summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/stable/sysfs-class-infiniband19
-rw-r--r--Documentation/ABI/stable/sysfs-driver-ib_srp2
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-hpre57
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-sec43
-rw-r--r--Documentation/ABI/testing/procfs-diskstats5
-rw-r--r--Documentation/ABI/testing/sysfs-block6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsi16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mei23
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt36
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei10
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-statistics16
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu2
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dfl-fme132
-rw-r--r--Documentation/DMA-attributes.txt18
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html1391
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.rst1163
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html668
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst521
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html9
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html704
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst624
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg2
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg2
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html3401
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.rst2704
-rw-r--r--Documentation/RCU/index.rst7
-rw-r--r--Documentation/RCU/lockdep.txt18
-rw-r--r--Documentation/RCU/whatisRCU.txt14
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst2
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst5
-rw-r--r--Documentation/admin-guide/device-mapper/dm-raid.rst2
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst2
-rw-r--r--Documentation/admin-guide/hw-vuln/mds.rst7
-rw-r--r--Documentation/admin-guide/hw-vuln/multihit.rst163
-rw-r--r--Documentation/admin-guide/hw-vuln/tsx_async_abort.rst279
-rw-r--r--Documentation/admin-guide/iostats.rst9
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt150
-rw-r--r--Documentation/admin-guide/perf/imx-ddr.rst15
-rw-r--r--Documentation/admin-guide/perf/thunderx2-pmu.rst20
-rw-r--r--Documentation/admin-guide/ras.rst31
-rw-r--r--Documentation/arm64/booting.rst3
-rw-r--r--Documentation/arm64/cpu-feature-registers.rst19
-rw-r--r--Documentation/arm64/elf_hwcaps.rst67
-rw-r--r--Documentation/arm64/silicon-errata.rst6
-rw-r--r--Documentation/asm-annotations.rst216
-rw-r--r--Documentation/block/stat.rst14
-rw-r--r--Documentation/bpf/index.rst9
-rw-r--r--Documentation/bpf/prog_flow_dissector.rst3
-rw-r--r--Documentation/bpf/s390.rst205
-rw-r--r--Documentation/core-api/printk-formats.rst46
-rw-r--r--Documentation/crypto/api-skcipher.rst29
-rw-r--r--Documentation/crypto/architecture.rst4
-rw-r--r--Documentation/crypto/crypto_engine.rst4
-rw-r--r--Documentation/crypto/devel-algos.rst27
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kunit/api/index.rst16
-rw-r--r--Documentation/dev-tools/kunit/api/test.rst11
-rw-r--r--Documentation/dev-tools/kunit/faq.rst62
-rw-r--r--Documentation/dev-tools/kunit/index.rst79
-rw-r--r--Documentation/dev-tools/kunit/start.rst180
-rw-r--r--Documentation/dev-tools/kunit/usage.rst576
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt30
-rw-r--r--Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt6
-rw-r--r--Documentation/devicetree/bindings/crypto/allwinner,sun8i-ss.yaml60
-rw-r--r--Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml52
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt26
-rw-r--r--Documentation/devicetree/bindings/devfreq/exynos-bus.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt24
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml104
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt3
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml93
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml62
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml76
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ad5820.txt11
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx290.txt57
-rw-r--r--Documentation/devicetree/bindings/media/i2c/nokia,smia.txt2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov2659.txt9
-rw-r--r--Documentation/devicetree/bindings/media/rc.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,csi2.txt1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.txt5
-rw-r--r--Documentation/devicetree/bindings/media/sh_mobile_ceu.txt17
-rw-r--r--Documentation/devicetree/bindings/media/ti,vpe.yaml64
-rw-r--r--Documentation/devicetree/bindings/mfd/da9062.txt4
-rw-r--r--Documentation/devicetree/bindings/mips/ralink.txt14
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt42
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/jz4740.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml14
-rw-r--r--Documentation/devicetree/bindings/mmc/owl-mmc.yaml59
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-atmel.txt5
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt30
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt53
-rw-r--r--Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt22
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt6
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt2
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bluetooth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ftgmac100.txt8
-rw-r--r--Documentation/devicetree/bindings/net/lpc-eth.txt5
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn532.txt46
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt29
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar803x.yaml111
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ether.yaml114
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt69
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml240
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83869.yaml84
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt26
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-otp.txt25
-rw-r--r--Documentation/devicetree/bindings/nvmem/sprd-efuse.txt39
-rw-r--r--Documentation/devicetree/bindings/perf/arm-ccn.txt1
-rw-r--r--Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml47
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml75
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml243
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt164
-rw-r--r--Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml116
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml140
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt192
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml132
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt183
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/cpcap-charger.txt9
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml69
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml4
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt21
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt25
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml7
-rw-r--r--Documentation/devicetree/bindings/rng/atmel-trng.txt2
-rw-r--r--Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt12
-rw-r--r--Documentation/devicetree/bindings/rng/omap3_rom_rng.txt27
-rw-r--r--Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt17
-rw-r--r--Documentation/devicetree/bindings/security/tpm/google,cr50.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau7118.yaml85
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml267
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml38
-rw-r--r--Documentation/devicetree/bindings/sound/arndale.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,mqs.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.txt31
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.yaml76
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-max98090.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/rt1011.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.yaml91
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.txt84
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.yaml138
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-codec.txt94
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/tas2562.txt34
-rw-r--r--Documentation/devicetree/bindings/sound/tas2770.txt37
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm3168a.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt5
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,hspi.yaml57
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt11
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml159
-rw-r--r--Documentation/devicetree/bindings/spi/sh-hspi.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt105
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sifive.txt37
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sifive.yaml86
-rw-r--r--Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt47
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml83
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt41
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml86
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.txt57
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml126
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.txt29
-rw-r--r--Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt38
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml86
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/infiniband.rst127
-rw-r--r--Documentation/driver-api/libata.rst14
-rw-r--r--Documentation/driver-api/nvmem.rst2
-rw-r--r--Documentation/filesystems/fscrypt.rst68
-rw-r--r--Documentation/filesystems/fsverity.rst12
-rw-r--r--Documentation/firmware-guide/acpi/namespace.rst2
-rw-r--r--Documentation/fpga/dfl.rst10
-rw-r--r--Documentation/hwmon/bel-pfe.rst112
-rw-r--r--Documentation/hwmon/dell-smm-hwmon.rst164
-rw-r--r--Documentation/hwmon/ina3221.rst12
-rw-r--r--Documentation/hwmon/index.rst4
-rw-r--r--Documentation/hwmon/ltc2947.rst100
-rw-r--r--Documentation/hwmon/tmp513.rst103
-rw-r--r--Documentation/index.rst8
-rw-r--r--Documentation/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/livepatch/index.rst1
-rw-r--r--Documentation/livepatch/system-state.rst167
-rw-r--r--Documentation/media/cec.h.rst.exceptions89
-rw-r--r--Documentation/media/kapi/v4l2-controls.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-funcs.rst1
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst6
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst105
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst8
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst4
-rw-r--r--Documentation/media/uapi/v4l/biblio.rst9
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst13
-rw-r--r--Documentation/media/uapi/v4l/dev-mem2mem.rst1
-rw-r--r--Documentation/media/uapi/v4l/dev-stateless-decoder.rst424
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-codec.rst569
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-flash.rst2
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-image-source.rst10
-rw-r--r--Documentation/media/uapi/v4l/meta-formats.rst1
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst35
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst60
-rw-r--r--Documentation/media/uapi/v4l/v4l2-selection-targets.rst4
-rw-r--r--Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst10
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst5
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fbuf.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-queryctrl.rst24
-rw-r--r--Documentation/media/uapi/v4l/vidioc-reqbufs.rst6
-rw-r--r--Documentation/media/v4l-drivers/imx.rst75
-rw-r--r--Documentation/media/v4l-drivers/ipu3.rst53
-rw-r--r--Documentation/media/v4l-drivers/ipu3_rcb.svg331
-rw-r--r--Documentation/media/v4l-drivers/vimc.rst16
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions5
-rw-r--r--Documentation/networking/af_xdp.rst277
-rw-r--r--Documentation/networking/device_drivers/aquantia/atlantic.txt46
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa.txt12
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/index.rst1
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst191
-rw-r--r--Documentation/networking/device_drivers/mellanox/mlx5.rst21
-rw-r--r--Documentation/networking/device_drivers/ti/cpsw_switchdev.txt209
-rw-r--r--Documentation/networking/devlink-params-mlx5.txt17
-rw-r--r--Documentation/networking/devlink-params-mv88e6xxx.txt7
-rw-r--r--Documentation/networking/devlink-params-ti-cpsw-switch.txt10
-rw-r--r--Documentation/networking/devlink-params.txt4
-rw-r--r--Documentation/networking/devlink-trap.rst61
-rw-r--r--Documentation/networking/filter.txt8
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rw-r--r--Documentation/networking/nfc.rst (renamed from Documentation/networking/nfc.txt)74
-rw-r--r--Documentation/networking/phy.rst3
-rw-r--r--Documentation/networking/tls.rst26
-rw-r--r--Documentation/power/drivers-testing.rst7
-rw-r--r--Documentation/power/freezing-of-tasks.rst37
-rw-r--r--Documentation/power/opp.rst32
-rw-r--r--Documentation/power/pci.rst28
-rw-r--r--Documentation/power/pm_qos_interface.rst26
-rw-r--r--Documentation/power/runtime_pm.rst4
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.rst7
-rw-r--r--Documentation/power/swsusp.rst14
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst222
-rw-r--r--Documentation/trace/intel_th.rst28
-rw-r--r--Documentation/virt/kvm/api.txt58
-rw-r--r--Documentation/virt/kvm/arm/pvtime.rst80
-rw-r--r--Documentation/virt/kvm/devices/vcpu.txt14
-rw-r--r--Documentation/virt/kvm/devices/xics.txt14
-rw-r--r--Documentation/virt/kvm/devices/xive.txt8
-rw-r--r--Documentation/x86/boot.rst174
-rw-r--r--Documentation/x86/index.rst1
-rw-r--r--Documentation/x86/tsx_async_abort.rst117
-rw-r--r--MAINTAINERS281
-rw-r--r--Makefile5
-rw-r--r--arch/Kconfig21
-rw-r--r--arch/alpha/kernel/perf_event.c4
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S18
-rw-r--r--arch/arc/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am3517.dtsi31
-rw-r--r--arch/arm/boot/dts/am3517_mt_ventoux.dts2
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts27
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts5
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts5
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi5
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi52
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3530.dts2
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3730.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd70.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-ha-lcd.dts2
-rw-r--r--arch/arm/boot/dts/omap3-ha.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-rev-f.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-rev-g.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts2
-rw-r--r--arch/arm/boot/dts/omap3-ldp.dts2
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-lilly-dbb056.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts6
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi7
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-alto35.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-gallop43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-palo35.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-palo43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-summit.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-tobi.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts2
-rw-r--r--arch/arm/boot/dts/omap3-pandora-1ghz.dts2
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi36
-rw-r--r--arch/arm/boot/dts/omap3-sbc-t3530.dts2
-rw-r--r--arch/arm/boot/dts/omap3-sbc-t3730.dts2
-rw-r--r--arch/arm/boot/dts/omap3-sniper.dts2
-rw-r--r--arch/arm/boot/dts/omap3-thunder.dts2
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts2
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts2
-rw-r--r--arch/arm/boot/dts/omap34xx.dtsi66
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi65
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/crypto/Kconfig36
-rw-r--r--arch/arm/crypto/Makefile49
-rw-r--r--arch/arm/crypto/chacha-glue.c343
-rw-r--r--arch/arm/crypto/chacha-neon-glue.c202
-rw-r--r--arch/arm/crypto/chacha-scalar-core.S460
-rw-r--r--arch/arm/crypto/crct10dif-ce-core.S2
-rw-r--r--arch/arm/crypto/curve25519-core.S2062
-rw-r--r--arch/arm/crypto/curve25519-glue.c127
-rw-r--r--arch/arm/crypto/ghash-ce-core.S1
-rw-r--r--arch/arm/crypto/poly1305-armv4.pl1236
-rw-r--r--arch/arm/crypto/poly1305-core.S_shipped1158
-rw-r--r--arch/arm/crypto/poly1305-glue.c276
-rw-r--r--arch/arm/crypto/sha1-ce-core.S1
-rw-r--r--arch/arm/crypto/sha2-ce-core.S1
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h9
-rw-r--r--arch/arm/include/asm/kvm_host.h33
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/guest.c14
-rw-r--r--arch/arm/kvm/handle_exit.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c4
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/hsmmc.c171
-rw-r--r--arch/arm/mach-omap2/hsmmc.h32
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c119
-rw-r--r--arch/arm/mach-pxa/icontrol.c9
-rw-r--r--arch/arm/mach-pxa/zeus.c9
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c2
-rw-r--r--arch/arm/mm/proc-v7-bugs.c21
-rw-r--r--arch/arm/plat-pxa/ssp.c4
-rw-r--r--arch/arm/xen/mm.c3
-rw-r--r--arch/arm64/Kconfig54
-rw-r--r--arch/arm64/Makefile5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts1
-rw-r--r--arch/arm64/crypto/Kconfig17
-rw-r--r--arch/arm64/crypto/Makefile10
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c81
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S501
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c293
-rw-r--r--arch/arm64/crypto/poly1305-armv8.pl913
-rw-r--r--arch/arm64/crypto/poly1305-core.S_shipped835
-rw-r--r--arch/arm64/crypto/poly1305-glue.c237
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h25
-rw-r--r--arch/arm64/include/asm/barrier.h12
-rw-r--r--arch/arm64/include/asm/cache.h3
-rw-r--r--arch/arm64/include/asm/cpucaps.h4
-rw-r--r--arch/arm64/include/asm/cpufeature.h14
-rw-r--r--arch/arm64/include/asm/daifflags.h19
-rw-r--r--arch/arm64/include/asm/exception.h22
-rw-r--r--arch/arm64/include/asm/ftrace.h23
-rw-r--r--arch/arm64/include/asm/insn.h3
-rw-r--r--arch/arm64/include/asm/irqflags.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h26
-rw-r--r--arch/arm64/include/asm/kvm_host.h40
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/module.h2
-rw-r--r--arch/arm64/include/asm/paravirt.h9
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h16
-rw-r--r--arch/arm64/include/asm/processor.h16
-rw-r--r--arch/arm64/include/asm/pvclock-abi.h17
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h6
-rw-r--r--arch/arm64/include/asm/traps.h10
-rw-r--r--arch/arm64/include/asm/uaccess.h27
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h5
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_errata.c147
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/entry-common.c332
-rw-r--r--arch/arm64/kernel/entry-ftrace.S140
-rw-r--r--arch/arm64/kernel/entry.S281
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/ftrace.c123
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm64/kernel/insn.c13
-rw-r--r--arch/arm64/kernel/kaslr.c44
-rw-r--r--arch/arm64/kernel/module-plts.c3
-rw-r--r--arch/arm64/kernel/module.c57
-rw-r--r--arch/arm64/kernel/paravirt.c140
-rw-r--r--arch/arm64/kernel/perf_event.c191
-rw-r--r--arch/arm64/kernel/probes/kprobes.c4
-rw-r--r--arch/arm64/kernel/psci.c15
-rw-r--r--arch/arm64/kernel/sdei.c3
-rw-r--r--arch/arm64/kernel/smp.c11
-rw-r--r--arch/arm64/kernel/sys_compat.c11
-rw-r--r--arch/arm64/kernel/syscall.c4
-rw-r--r--arch/arm64/kernel/time.c3
-rw-r--r--arch/arm64/kernel/traps.c21
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S13
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/guest.c23
-rw-r--r--arch/arm64/kvm/handle_exit.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c52
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c35
-rw-r--r--arch/arm64/kvm/hyp/tlb.c23
-rw-r--r--arch/arm64/kvm/inject_fault.c4
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S2
-rw-r--r--arch/arm64/lib/copy_in_user.S2
-rw-r--r--arch/arm64/lib/copy_to_user.S2
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c6
-rw-r--r--arch/arm64/mm/fault.c64
-rw-r--r--arch/arm64/mm/init.c91
-rw-r--r--arch/arm64/mm/mmu.c7
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S8
-rw-r--r--arch/csky/kernel/vmlinux.lds.S5
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S9
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S5
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S20
-rw-r--r--arch/m68k/atari/config.c27
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig8
-rw-r--r--arch/m68k/configs/atari_defconfig8
-rw-r--r--arch/m68k/configs/bvme6000_defconfig8
-rw-r--r--arch/m68k/configs/hp300_defconfig8
-rw-r--r--arch/m68k/configs/mac_defconfig8
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig8
-rw-r--r--arch/m68k/configs/mvme16x_defconfig8
-rw-r--r--arch/m68k/configs/q40_defconfig8
-rw-r--r--arch/m68k/configs/sun3_defconfig8
-rw-r--r--arch/m68k/configs/sun3x_defconfig8
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds4
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds4
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds4
-rw-r--r--arch/m68k/q40/config.c1
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S8
-rw-r--r--arch/mips/Kbuild.platforms2
-rw-r--r--arch/mips/Kconfig185
-rw-r--r--arch/mips/Kconfig.debug3
-rw-r--r--arch/mips/Makefile5
-rw-r--r--arch/mips/Makefile.postlink10
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts214
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi86
-rw-r--r--arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts197
-rw-r--r--arch/mips/boot/dts/ralink/mt7628a.dtsi16
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/configs/fuloong2e_defconfig2
-rw-r--r--arch/mips/configs/lemote2f_defconfig2
-rw-r--r--arch/mips/configs/loongson3_defconfig2
-rw-r--r--arch/mips/crypto/Makefile18
-rw-r--r--arch/mips/crypto/chacha-core.S497
-rw-r--r--arch/mips/crypto/chacha-glue.c150
-rw-r--r--arch/mips/crypto/poly1305-glue.c203
-rw-r--r--arch/mips/crypto/poly1305-mips.pl1273
-rw-r--r--arch/mips/fw/arc/Makefile6
-rw-r--r--arch/mips/fw/arc/cmdline.c16
-rw-r--r--arch/mips/fw/arc/env.c6
-rw-r--r--arch/mips/fw/arc/file.c49
-rw-r--r--arch/mips/fw/arc/identify.c15
-rw-r--r--arch/mips/fw/arc/init.c20
-rw-r--r--arch/mips/fw/arc/memory.c9
-rw-r--r--arch/mips/fw/arc/misc.c59
-rw-r--r--arch/mips/fw/arc/promlib.c25
-rw-r--r--arch/mips/fw/arc/salone.c25
-rw-r--r--arch/mips/fw/arc/time.c25
-rw-r--r--arch/mips/fw/arc/tree.c127
-rw-r--r--arch/mips/generic/init.c6
-rw-r--r--arch/mips/include/asm/atomic.h571
-rw-r--r--arch/mips/include/asm/barrier.h228
-rw-r--r--arch/mips/include/asm/bitops.h443
-rw-r--r--arch/mips/include/asm/bootinfo.h4
-rw-r--r--arch/mips/include/asm/bugs.h18
-rw-r--r--arch/mips/include/asm/cmpxchg.h59
-rw-r--r--arch/mips/include/asm/cop2.h2
-rw-r--r--arch/mips/include/asm/cpu-type.h11
-rw-r--r--arch/mips/include/asm/cpu.h10
-rw-r--r--arch/mips/include/asm/fixmap.h2
-rw-r--r--arch/mips/include/asm/futex.h15
-rw-r--r--arch/mips/include/asm/hazards.h2
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/irqflags.h2
-rw-r--r--arch/mips/include/asm/llsc.h19
-rw-r--r--arch/mips/include/asm/mach-ip22/spaces.h12
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h5
-rw-r--r--arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h83
-rw-r--r--arch/mips/include/asm/mach-ip30/irq.h87
-rw-r--r--arch/mips/include/asm/mach-ip30/kernel-entry-init.h13
-rw-r--r--arch/mips/include/asm/mach-ip30/mangle-port.h22
-rw-r--r--arch/mips/include/asm/mach-ip30/spaces.h20
-rw-r--r--arch/mips/include/asm/mach-ip30/war.h26
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h44
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/loongson.h326
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/machine.h (renamed from arch/mips/include/asm/mach-loongson64/machine.h)12
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h36
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mem.h (renamed from arch/mips/include/asm/mach-loongson64/mem.h)6
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/pci.h46
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-loongson32/prom.h20
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h3
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h4
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h32
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson.h115
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_regs.h227
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h29
-rw-r--r--arch/mips/include/asm/mach-loongson64/pci.h31
-rw-r--r--arch/mips/include/asm/mach-loongson64/topology.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h6
-rw-r--r--arch/mips/include/asm/module.h12
-rw-r--r--arch/mips/include/asm/pci/bridge.h1
-rw-r--r--arch/mips/include/asm/pgalloc.h4
-rw-r--r--arch/mips/include/asm/pgtable-32.h6
-rw-r--r--arch/mips/include/asm/pgtable-64.h44
-rw-r--r--arch/mips/include/asm/pgtable.h11
-rw-r--r--arch/mips/include/asm/pmon.h46
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h362
-rw-r--r--arch/mips/include/asm/sgi/heart.h272
-rw-r--r--arch/mips/include/asm/sgi/sgi.h48
-rw-r--r--arch/mips/include/asm/sgialib.h22
-rw-r--r--arch/mips/include/asm/sgiarcs.h103
-rw-r--r--arch/mips/include/asm/sn/agent.h2
-rw-r--r--arch/mips/include/asm/sn/arch.h31
-rw-r--r--arch/mips/include/asm/sn/gda.h4
-rw-r--r--arch/mips/include/asm/sn/hub.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h9
-rw-r--r--arch/mips/include/asm/sn/mapped_kernel.h4
-rw-r--r--arch/mips/include/asm/sn/sn0/arch.h18
-rw-r--r--arch/mips/include/asm/sn/sn_private.h5
-rw-r--r--arch/mips/include/asm/sn/types.h4
-rw-r--r--arch/mips/include/asm/string.h121
-rw-r--r--arch/mips/include/asm/sync.h207
-rw-r--r--arch/mips/include/asm/unroll.h77
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c53
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/idle.c7
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--arch/mips/kernel/pm-cps.c20
-rw-r--r--arch/mips/kernel/r4k-bugs64.c (renamed from arch/mips/kernel/cpu-bugs64.c)11
-rw-r--r--arch/mips/kernel/setup.c137
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/syscall.c3
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/kernel/vmlinux.lds.S15
-rw-r--r--arch/mips/kvm/mmu.c40
-rw-r--r--arch/mips/kvm/trap_emul.c4
-rw-r--r--arch/mips/lib/bitops.c57
-rw-r--r--arch/mips/lib/csum_partial.S4
-rw-r--r--arch/mips/loongson2ef/Kconfig95
-rw-r--r--arch/mips/loongson2ef/Makefile18
-rw-r--r--arch/mips/loongson2ef/Platform32
-rw-r--r--arch/mips/loongson2ef/common/Makefile (renamed from arch/mips/loongson64/common/Makefile)3
-rw-r--r--arch/mips/loongson2ef/common/bonito-irq.c (renamed from arch/mips/loongson64/common/bonito-irq.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/Makefile (renamed from arch/mips/loongson64/common/cs5536/Makefile)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_acc.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_acc.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_ehci.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_ide.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_ide.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_isa.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_isa.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_ohci.c)0
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_pci.c (renamed from arch/mips/loongson64/common/cs5536/cs5536_pci.c)0
-rw-r--r--arch/mips/loongson2ef/common/env.c53
-rw-r--r--arch/mips/loongson2ef/common/init.c (renamed from arch/mips/loongson64/common/init.c)10
-rw-r--r--arch/mips/loongson2ef/common/irq.c (renamed from arch/mips/loongson64/common/irq.c)0
-rw-r--r--arch/mips/loongson2ef/common/machtype.c (renamed from arch/mips/loongson64/common/machtype.c)1
-rw-r--r--arch/mips/loongson2ef/common/mem.c62
-rw-r--r--arch/mips/loongson2ef/common/pci.c (renamed from arch/mips/loongson64/common/pci.c)8
-rw-r--r--arch/mips/loongson2ef/common/platform.c (renamed from arch/mips/loongson64/common/platform.c)0
-rw-r--r--arch/mips/loongson2ef/common/pm.c (renamed from arch/mips/loongson64/common/pm.c)9
-rw-r--r--arch/mips/loongson2ef/common/reset.c (renamed from arch/mips/loongson64/common/reset.c)21
-rw-r--r--arch/mips/loongson2ef/common/rtc.c (renamed from arch/mips/loongson64/common/rtc.c)0
-rw-r--r--arch/mips/loongson2ef/common/serial.c86
-rw-r--r--arch/mips/loongson2ef/common/setup.c (renamed from arch/mips/loongson64/common/setup.c)21
-rw-r--r--arch/mips/loongson2ef/common/time.c (renamed from arch/mips/loongson64/common/time.c)4
-rw-r--r--arch/mips/loongson2ef/common/uart_base.c (renamed from arch/mips/loongson64/common/uart_base.c)19
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/Makefile (renamed from arch/mips/loongson64/fuloong-2e/Makefile)0
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/dma.c (renamed from arch/mips/loongson64/fuloong-2e/dma.c)0
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/irq.c (renamed from arch/mips/loongson64/fuloong-2e/irq.c)0
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/reset.c (renamed from arch/mips/loongson64/fuloong-2e/reset.c)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/Makefile (renamed from arch/mips/loongson64/lemote-2f/Makefile)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/clock.c (renamed from arch/mips/loongson64/lemote-2f/clock.c)6
-rw-r--r--arch/mips/loongson2ef/lemote-2f/dma.c (renamed from arch/mips/loongson64/lemote-2f/dma.c)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c (renamed from arch/mips/loongson64/lemote-2f/ec_kb3310b.c)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h (renamed from arch/mips/loongson64/lemote-2f/ec_kb3310b.h)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/irq.c (renamed from arch/mips/loongson64/lemote-2f/irq.c)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/machtype.c (renamed from arch/mips/loongson64/lemote-2f/machtype.c)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/pm.c (renamed from arch/mips/loongson64/lemote-2f/pm.c)0
-rw-r--r--arch/mips/loongson2ef/lemote-2f/reset.c (renamed from arch/mips/loongson64/lemote-2f/reset.c)2
-rw-r--r--arch/mips/loongson32/Kconfig2
-rw-r--r--arch/mips/loongson32/Platform4
-rw-r--r--arch/mips/loongson32/common/prom.c59
-rw-r--r--arch/mips/loongson32/common/setup.c11
-rw-r--r--arch/mips/loongson64/Kconfig119
-rw-r--r--arch/mips/loongson64/Makefile29
-rw-r--r--arch/mips/loongson64/Platform35
-rw-r--r--arch/mips/loongson64/acpi_init.c (renamed from arch/mips/loongson64/loongson-3/acpi_init.c)0
-rw-r--r--arch/mips/loongson64/common/cmdline.c44
-rw-r--r--arch/mips/loongson64/common/early_printk.c38
-rw-r--r--arch/mips/loongson64/common/mem.c157
-rw-r--r--arch/mips/loongson64/common/serial.c117
-rw-r--r--arch/mips/loongson64/cop2-ex.c (renamed from arch/mips/loongson64/loongson-3/cop2-ex.c)0
-rw-r--r--arch/mips/loongson64/dma.c (renamed from arch/mips/loongson64/loongson-3/dma.c)0
-rw-r--r--arch/mips/loongson64/env.c (renamed from arch/mips/loongson64/common/env.c)62
-rw-r--r--arch/mips/loongson64/hpet.c (renamed from arch/mips/loongson64/loongson-3/hpet.c)0
-rw-r--r--arch/mips/loongson64/init.c46
-rw-r--r--arch/mips/loongson64/irq.c (renamed from arch/mips/loongson64/loongson-3/irq.c)8
-rw-r--r--arch/mips/loongson64/loongson-3/Makefile11
-rw-r--r--arch/mips/loongson64/numa.c (renamed from arch/mips/loongson64/loongson-3/numa.c)11
-rw-r--r--arch/mips/loongson64/pci.c51
-rw-r--r--arch/mips/loongson64/platform.c (renamed from arch/mips/loongson64/loongson-3/platform.c)0
-rw-r--r--arch/mips/loongson64/pm.c104
-rw-r--r--arch/mips/loongson64/reset.c64
-rw-r--r--arch/mips/loongson64/rtc.c39
-rw-r--r--arch/mips/loongson64/setup.c30
-rw-r--r--arch/mips/loongson64/smp.c (renamed from arch/mips/loongson64/loongson-3/smp.c)160
-rw-r--r--arch/mips/loongson64/smp.h (renamed from arch/mips/loongson64/loongson-3/smp.h)0
-rw-r--r--arch/mips/loongson64/time.c29
-rw-r--r--arch/mips/math-emu/me-debugfs.c3
-rw-r--r--arch/mips/mm/c-r3k.c4
-rw-r--r--arch/mips/mm/c-r4k.c51
-rw-r--r--arch/mips/mm/c-tx39.c4
-rw-r--r--arch/mips/mm/fault.c12
-rw-r--r--arch/mips/mm/hugetlbpage.c14
-rw-r--r--arch/mips/mm/init.c6
-rw-r--r--arch/mips/mm/ioremap.c6
-rw-r--r--arch/mips/mm/page.c2
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/mips/mm/tlb-r4k.c8
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/oprofile/Makefile4
-rw-r--r--arch/mips/oprofile/common.c6
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/pci-ip27.c35
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c156
-rw-r--r--arch/mips/power/cpu.c8
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c74
-rw-r--r--arch/mips/sgi-ip27/Kconfig7
-rw-r--r--arch/mips/sgi-ip27/ip27-common.h10
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c10
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c74
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-klconfig.c14
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c21
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c77
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c16
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c82
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c48
-rw-r--r--arch/mips/sgi-ip30/Makefile9
-rw-r--r--arch/mips/sgi-ip30/Platform8
-rw-r--r--arch/mips/sgi-ip30/ip30-common.h9
-rw-r--r--arch/mips/sgi-ip30/ip30-console.c23
-rw-r--r--arch/mips/sgi-ip30/ip30-irq.c328
-rw-r--r--arch/mips/sgi-ip30/ip30-power.c41
-rw-r--r--arch/mips/sgi-ip30/ip30-setup.c138
-rw-r--r--arch/mips/sgi-ip30/ip30-smp.c149
-rw-r--r--arch/mips/sgi-ip30/ip30-timer.c63
-rw-r--r--arch/mips/sgi-ip30/ip30-xtalk.c152
-rw-r--r--arch/mips/tools/.gitignore1
-rw-r--r--arch/mips/tools/Makefile5
-rw-r--r--arch/mips/tools/loongson3-llsc-check.c307
-rw-r--r--arch/mips/vdso/Makefile1
-rw-r--r--arch/nds32/kernel/vmlinux.lds.S5
-rw-r--r--arch/nios2/kernel/vmlinux.lds.S5
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S7
-rw-r--r--arch/parisc/Makefile1
-rw-r--r--arch/parisc/kernel/module.c10
-rw-r--r--arch/parisc/kernel/module.lds7
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S11
-rw-r--r--arch/powerpc/crypto/aes-spe-glue.c454
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/include/asm/local.h2
-rw-r--r--arch/powerpc/include/asm/page.h9
-rw-r--r--arch/powerpc/include/asm/reg.h12
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h3
-rw-r--r--arch/powerpc/kernel/time.c6
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S37
-rw-r--r--arch/powerpc/kvm/book3s.c27
-rw-r--r--arch/powerpc/kvm/book3s.h3
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c26
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c82
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c40
-rw-r--r--arch/powerpc/kvm/book3s_xive.c128
-rw-r--r--arch/powerpc/kvm/book3s_xive.h5
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c82
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c6
-rw-r--r--arch/powerpc/kvm/powerpc.c2
-rw-r--r--arch/powerpc/mm/mem.c20
-rw-r--r--arch/powerpc/perf/core-book3s.c18
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/kernel/module.c4
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S5
-rw-r--r--arch/s390/Kconfig28
-rw-r--r--arch/s390/boot/startup.c2
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c609
-rw-r--r--arch/s390/crypto/des_s390.c419
-rw-r--r--arch/s390/crypto/paes_s390.c414
-rw-r--r--arch/s390/crypto/sha_common.c7
-rw-r--r--arch/s390/include/asm/alternative.h4
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/s390/include/asm/ctl_reg.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h16
-rw-r--r--arch/s390/include/asm/pgtable.h97
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h2
-rw-r--r--arch/s390/include/asm/timex.h17
-rw-r--r--arch/s390/kernel/dis.c13
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/head64.S18
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c21
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c10
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c104
-rw-r--r--arch/s390/kernel/perf_event.c8
-rw-r--r--arch/s390/kernel/process.c36
-rw-r--r--arch/s390/kernel/smp.c80
-rw-r--r--arch/s390/kernel/time.c9
-rw-r--r--arch/s390/kernel/unwind_bc.c14
-rw-r--r--arch/s390/kernel/vmlinux.lds.S12
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/diag.c22
-rw-r--r--arch/s390/kvm/interrupt.c5
-rw-r--r--arch/s390/kvm/kvm-s390.c19
-rw-r--r--arch/s390/lib/spinlock.c4
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/maccess.c12
-rw-r--r--arch/s390/net/bpf_jit_comp.c502
-rw-r--r--arch/sh/boards/mach-sdk7786/nmi.c2
-rw-r--r--arch/sh/drivers/pci/fixups-sdk7786.c2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7734.h2
-rw-r--r--arch/sh/kernel/io_trapped.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S3
-rw-r--r--arch/sh/mm/consistent.c5
-rw-r--r--arch/sparc/crypto/aes_glue.c310
-rw-r--r--arch/sparc/crypto/camellia_glue.c217
-rw-r--r--arch/sparc/crypto/des_glue.c499
-rw-r--r--arch/sparc/kernel/smp_64.c6
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc/vdso/Makefile4
-rw-r--r--arch/um/configs/kunit_defconfig3
-rw-r--r--arch/um/include/asm/common.lds.S3
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S5
-rw-r--r--arch/x86/Kconfig119
-rw-r--r--arch/x86/Kconfig.cpu25
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/Makefile_32.cpu1
-rw-r--r--arch/x86/boot/Makefile3
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/compressed/eboot.c9
-rw-r--r--arch/x86/boot/compressed/efi_stub_32.S4
-rw-r--r--arch/x86/boot/compressed/efi_thunk_64.S33
-rw-r--r--arch/x86/boot/compressed/head_32.S15
-rw-r--r--arch/x86/boot/compressed/head_64.S63
-rw-r--r--arch/x86/boot/compressed/kaslr.c58
-rw-r--r--arch/x86/boot/compressed/kernel_info.S22
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S11
-rw-r--r--arch/x86/boot/copy.S16
-rw-r--r--arch/x86/boot/header.S3
-rw-r--r--arch/x86/boot/pmjump.S10
-rw-r--r--arch/x86/boot/tools/build.c5
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S36
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S12
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S114
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S32
-rw-r--r--arch/x86/crypto/blake2s-core.S258
-rw-r--r--arch/x86/crypto/blake2s-glue.c233
-rw-r--r--arch/x86/crypto/blowfish-x86_64-asm_64.S16
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S44
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S44
-rw-r--r--arch/x86/crypto/camellia-x86_64-asm_64.S16
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S24
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S32
-rw-r--r--arch/x86/crypto/chacha-avx2-x86_64.S12
-rw-r--r--arch/x86/crypto/chacha-avx512vl-x86_64.S12
-rw-r--r--arch/x86/crypto/chacha-ssse3-x86_64.S16
-rw-r--r--arch/x86/crypto/chacha_glue.c184
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S4
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S4
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S4
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c2475
-rw-r--r--arch/x86/crypto/des3_ede-asm_64.S8
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S12
-rw-r--r--arch/x86/crypto/nh-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/nh-sse2-x86_64.S4
-rw-r--r--arch/x86/crypto/poly1305-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/poly1305-sse2-x86_64.S8
-rw-r--r--arch/x86/crypto/poly1305_glue.c199
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S32
-rw-r--r--arch/x86/crypto/serpent-avx2-asm_64.S32
-rw-r--r--arch/x86/crypto/serpent-sse2-i586-asm_32.S8
-rw-r--r--arch/x86/crypto/serpent-sse2-x86_64-asm_64.S8
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S4
-rw-r--r--arch/x86/crypto/sha1_ni_asm.S4
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S4
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S4
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S4
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S4
-rw-r--r--arch/x86/crypto/sha256_ni_asm.S4
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S4
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S4
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S4
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S32
-rw-r--r--arch/x86/crypto/twofish-i586-asm_32.S8
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S8
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64.S8
-rw-r--r--arch/x86/entry/calling.h2
-rw-r--r--arch/x86/entry/common.c4
-rw-r--r--arch/x86/entry/entry_32.S379
-rw-r--r--arch/x86/entry/entry_64.S112
-rw-r--r--arch/x86/entry/entry_64_compat.S16
-rw-r--r--arch/x86/entry/syscall_32.c8
-rw-r--r--arch/x86/entry/syscall_64.c14
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl8
-rw-r--r--arch/x86/entry/thunk_32.S4
-rw-r--r--arch/x86/entry/thunk_64.S7
-rw-r--r--arch/x86/entry/vdso/Makefile2
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S2
-rw-r--r--arch/x86/events/amd/core.c13
-rw-r--r--arch/x86/events/core.c8
-rw-r--r--arch/x86/events/intel/bts.c8
-rw-r--r--arch/x86/events/intel/core.c23
-rw-r--r--arch/x86/events/intel/lbr.c23
-rw-r--r--arch/x86/events/intel/p4.c5
-rw-r--r--arch/x86/events/intel/pt.c203
-rw-r--r--arch/x86/events/intel/pt.h12
-rw-r--r--arch/x86/events/perf_event.h11
-rw-r--r--arch/x86/hyperv/hv_apic.c16
-rw-r--r--arch/x86/hyperv/hv_init.c6
-rw-r--r--arch/x86/ia32/ia32_signal.c5
-rw-r--r--arch/x86/include/asm/asm.h14
-rw-r--r--arch/x86/include/asm/calgary.h57
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h18
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/crash.h9
-rw-r--r--arch/x86/include/asm/disabled-features.h2
-rw-r--r--arch/x86/include/asm/e820/types.h8
-rw-r--r--arch/x86/include/asm/efi.h17
-rw-r--r--arch/x86/include/asm/emulate_prefix.h14
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h5
-rw-r--r--arch/x86/include/asm/insn.h6
-rw-r--r--arch/x86/include/asm/io_bitmap.h29
-rw-r--r--arch/x86/include/asm/kexec.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h38
-rw-r--r--arch/x86/include/asm/linkage.h4
-rw-r--r--arch/x86/include/asm/module.h2
-rw-r--r--arch/x86/include/asm/msr-index.h18
-rw-r--r--arch/x86/include/asm/nospec-branch.h4
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pci.h7
-rw-r--r--arch/x86/include/asm/pci_64.h28
-rw-r--r--arch/x86/include/asm/pgtable-3level.h46
-rw-r--r--arch/x86/include/asm/pgtable.h6
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h8
-rw-r--r--arch/x86/include/asm/processor.h130
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/purgatory.h10
-rw-r--r--arch/x86/include/asm/refcount.h126
-rw-r--r--arch/x86/include/asm/rio.h64
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/segment.h12
-rw-r--r--arch/x86/include/asm/switch_to.h10
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h76
-rw-r--r--arch/x86/include/asm/tce.h35
-rw-r--r--arch/x86/include/asm/text-patching.h24
-rw-r--r--arch/x86/include/asm/thread_info.h14
-rw-r--r--arch/x86/include/asm/trace/hyperv.h15
-rw-r--r--arch/x86/include/asm/umip.h4
-rw-r--r--arch/x86/include/asm/uv/bios.h2
-rw-r--r--arch/x86/include/asm/uv/uv.h16
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h61
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h2
-rw-r--r--arch/x86/include/asm/xen/interface.h11
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h41
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S9
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S10
-rw-r--r--arch/x86/kernel/alternative.c132
-rw-r--r--arch/x86/kernel/amd_gart_64.c12
-rw-r--r--arch/x86/kernel/apic/apic.c43
-rw-r--r--arch/x86/kernel/apic/io_apic.c25
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c184
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c185
-rw-r--r--arch/x86/kernel/cpu/common.c292
-rw-r--r--arch/x86/kernel/cpu/cpu.h18
-rw-r--r--arch/x86/kernel/cpu/intel.c13
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c93
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c11
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h6
-rw-r--r--arch/x86/kernel/cpu/mce/therm_throt.c251
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c36
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c7
-rw-r--r--arch/x86/kernel/cpu/rdrand.c22
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c4
-rw-r--r--arch/x86/kernel/cpu/tsx.c140
-rw-r--r--arch/x86/kernel/crash.c128
-rw-r--r--arch/x86/kernel/doublefault.c5
-rw-r--r--arch/x86/kernel/e820.c23
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/fpu/xstate.c22
-rw-r--r--arch/x86/kernel/ftrace_32.S23
-rw-r--r--arch/x86/kernel/ftrace_64.S47
-rw-r--r--arch/x86/kernel/head_32.S72
-rw-r--r--arch/x86/kernel/head_64.S113
-rw-r--r--arch/x86/kernel/ioport.c209
-rw-r--r--arch/x86/kernel/irqflags.S8
-rw-r--r--arch/x86/kernel/jailhouse.c136
-rw-r--r--arch/x86/kernel/jump_label.c9
-rw-r--r--arch/x86/kernel/kdebugfs.c21
-rw-r--r--arch/x86/kernel/kprobes/core.c4
-rw-r--r--arch/x86/kernel/kprobes/opt.c11
-rw-r--r--arch/x86/kernel/ksysfs.c31
-rw-r--r--arch/x86/kernel/kvm.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c47
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c1586
-rw-r--r--arch/x86/kernel/pci-dma.c6
-rw-r--r--arch/x86/kernel/process.c205
-rw-r--r--arch/x86/kernel/process_32.c77
-rw-r--r--arch/x86/kernel/process_64.c86
-rw-r--r--arch/x86/kernel/ptrace.c12
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S13
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S13
-rw-r--r--arch/x86/kernel/setup.c42
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/kernel/tboot.c15
-rw-r--r--arch/x86/kernel/tce_64.c177
-rw-r--r--arch/x86/kernel/traps.c5
-rw-r--r--arch/x86/kernel/tsc_sync.c8
-rw-r--r--arch/x86/kernel/umip.c18
-rw-r--r--arch/x86/kernel/uprobes.c2
-rw-r--r--arch/x86/kernel/verify_cpu.S4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S16
-rw-r--r--arch/x86/kernel/x86_init.c24
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/cpuid.c8
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/ioapic.c34
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h51
-rw-r--r--arch/x86/kvm/lapic.c111
-rw-r--r--arch/x86/kvm/lapic.h3
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/mmu/mmu.c (renamed from arch/x86/kvm/mmu.c)284
-rw-r--r--arch/x86/kvm/mmu/page_track.c (renamed from arch/x86/kvm/page_track.c)0
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h (renamed from arch/x86/kvm/paging_tmpl.h)29
-rw-r--r--arch/x86/kvm/pmu.c128
-rw-r--r--arch/x86/kvm/pmu.h29
-rw-r--r--arch/x86/kvm/pmu_amd.c24
-rw-r--r--arch/x86/kvm/svm.c140
-rw-r--r--arch/x86/kvm/vmx/nested.c252
-rw-r--r--arch/x86/kvm/vmx/nested.h9
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c34
-rw-r--r--arch/x86/kvm/vmx/vmenter.S12
-rw-r--r--arch/x86/kvm/vmx/vmx.c370
-rw-r--r--arch/x86/kvm/vmx/vmx.h23
-rw-r--r--arch/x86/kvm/x86.c315
-rw-r--r--arch/x86/kvm/x86.h15
-rw-r--r--arch/x86/lib/atomic64_386_32.S4
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S32
-rw-r--r--arch/x86/lib/checksum_32.S16
-rw-r--r--arch/x86/lib/clear_page_64.S12
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S4
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S4
-rw-r--r--arch/x86/lib/copy_page_64.S8
-rw-r--r--arch/x86/lib/copy_user_64.S21
-rw-r--r--arch/x86/lib/csum-copy_64.S4
-rw-r--r--arch/x86/lib/getuser.S22
-rw-r--r--arch/x86/lib/hweight.S8
-rw-r--r--arch/x86/lib/insn.c34
-rw-r--r--arch/x86/lib/iomap_copy_64.S4
-rw-r--r--arch/x86/lib/memcpy_64.S20
-rw-r--r--arch/x86/lib/memmove_64.S8
-rw-r--r--arch/x86/lib/memset_64.S16
-rw-r--r--arch/x86/lib/msr-reg.S8
-rw-r--r--arch/x86/lib/putuser.S19
-rw-r--r--arch/x86/lib/retpoline.S4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt18
-rw-r--r--arch/x86/math-emu/div_Xsig.S4
-rw-r--r--arch/x86/math-emu/div_small.S4
-rw-r--r--arch/x86/math-emu/fpu_system.h6
-rw-r--r--arch/x86/math-emu/mul_Xsig.S12
-rw-r--r--arch/x86/math-emu/polynom_Xsig.S4
-rw-r--r--arch/x86/math-emu/reg_ld_str.c6
-rw-r--r--arch/x86/math-emu/reg_norm.S8
-rw-r--r--arch/x86/math-emu/reg_round.S4
-rw-r--r--arch/x86/math-emu/reg_u_add.S4
-rw-r--r--arch/x86/math-emu/reg_u_div.S4
-rw-r--r--arch/x86/math-emu/reg_u_mul.S4
-rw-r--r--arch/x86/math-emu/reg_u_sub.S4
-rw-r--r--arch/x86/math-emu/round_Xsig.S8
-rw-r--r--arch/x86/math-emu/shr_Xsig.S4
-rw-r--r--arch/x86/math-emu/wm_shrx.S8
-rw-r--r--arch/x86/math-emu/wm_sqrt.S4
-rw-r--r--arch/x86/mm/Makefile4
-rw-r--r--arch/x86/mm/cpu_entry_area.c12
-rw-r--r--arch/x86/mm/extable.c49
-rw-r--r--arch/x86/mm/init.c8
-rw-r--r--arch/x86/mm/init_64.c16
-rw-r--r--arch/x86/mm/ioremap.c11
-rw-r--r--arch/x86/mm/kmmio.c7
-rw-r--r--arch/x86/mm/maccess.c43
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S8
-rw-r--r--arch/x86/mm/mmio-mod.c6
-rw-r--r--arch/x86/mm/numa.c2
-rw-r--r--arch/x86/mm/numa_emulation.c4
-rw-r--r--arch/x86/mm/pat.c8
-rw-r--r--arch/x86/mm/pat_internal.h20
-rw-r--r--arch/x86/mm/pat_interval.c185
-rw-r--r--arch/x86/mm/pat_rbtree.c268
-rw-r--r--arch/x86/mm/pgtable.c4
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--arch/x86/mm/testmmiotrace.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c620
-rw-r--r--arch/x86/oprofile/op_x86_model.h6
-rw-r--r--arch/x86/platform/efi/efi.c54
-rw-r--r--arch/x86/platform/efi/efi_stub_32.S4
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S4
-rw-r--r--arch/x86/platform/efi/efi_thunk_64.S16
-rw-r--r--arch/x86/platform/efi/quirks.c3
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/platform/olpc/xo1-wakeup.S3
-rw-r--r--arch/x86/platform/pvh/head.S18
-rw-r--r--arch/x86/platform/sfi/sfi.c3
-rw-r--r--arch/x86/platform/uv/bios_uv.c9
-rw-r--r--arch/x86/power/hibernate_asm_32.S14
-rw-r--r--arch/x86/power/hibernate_asm_64.S14
-rw-r--r--arch/x86/purgatory/entry64.S24
-rw-r--r--arch/x86/purgatory/purgatory.c19
-rw-r--r--arch/x86/purgatory/setup-x86_64.S14
-rw-r--r--arch/x86/purgatory/stack.S7
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/realmode/rm/header.S8
-rw-r--r--arch/x86/realmode/rm/realmode.lds.S1
-rw-r--r--arch/x86/realmode/rm/reboot.S13
-rw-r--r--arch/x86/realmode/rm/stack.S14
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S16
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S29
-rw-r--r--arch/x86/realmode/rm/trampoline_common.S2
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S17
-rw-r--r--arch/x86/realmode/rmpiggy.S10
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk4
-rw-r--r--arch/x86/um/vdso/vdso.S6
-rw-r--r--arch/x86/xen/enlighten_pv.c10
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/x86/xen/xen-asm.S28
-rw-r--r--arch/x86/xen/xen-asm_32.S80
-rw-r--r--arch/x86/xen/xen-asm_64.S34
-rw-r--r--arch/x86/xen/xen-head.S8
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S8
-rw-r--r--block/Kconfig4
-rw-r--r--block/Kconfig.iosched1
-rw-r--r--block/Makefile1
-rw-r--r--block/bfq-cgroup.c85
-rw-r--r--block/bfq-iosched.c36
-rw-r--r--block/bfq-iosched.h10
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-cgroup-rwstat.c129
-rw-r--r--block/blk-cgroup-rwstat.h149
-rw-r--r--block/blk-cgroup.c304
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c15
-rw-r--r--block/blk-iocost.c8
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq-sysfs.c31
-rw-r--r--block/blk-mq-tag.c8
-rw-r--r--block/blk-mq-tag.h1
-rw-r--r--block/blk-mq.c136
-rw-r--r--block/blk-mq.h9
-rw-r--r--block/blk-softirq.c4
-rw-r--r--block/blk-stat.c7
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/blk-throttle.c71
-rw-r--r--block/blk-zoned.c453
-rw-r--r--block/blk.h7
-rw-r--r--block/elevator.c9
-rw-r--r--block/genhd.c8
-rw-r--r--block/ioctl.c42
-rw-r--r--block/opal_proto.h6
-rw-r--r--block/partition-generic.c231
-rw-r--r--block/sed-opal.c318
-rw-r--r--block/t10-pi.c8
-rw-r--r--crypto/Kconfig171
-rw-r--r--crypto/Makefile11
-rw-r--r--crypto/ablkcipher.c407
-rw-r--r--crypto/adiantum.c5
-rw-r--r--crypto/aead.c165
-rw-r--r--crypto/aegis128-core.c125
-rw-r--r--crypto/aegis128-neon-inner.c50
-rw-r--r--crypto/aegis128-neon.c21
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/algapi.c26
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/api.c3
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c101
-rw-r--r--crypto/blake2b_generic.c320
-rw-r--r--crypto/blake2s_generic.c171
-rw-r--r--crypto/blkcipher.c548
-rw-r--r--crypto/chacha_generic.c94
-rw-r--r--crypto/cryptd.c2
-rw-r--r--crypto/crypto_engine.c29
-rw-r--r--crypto/crypto_user_base.c4
-rw-r--r--crypto/crypto_user_stat.c8
-rw-r--r--crypto/curve25519-generic.c90
-rw-r--r--crypto/ecc.c5
-rw-r--r--crypto/essiv.c9
-rw-r--r--crypto/geniv.c176
-rw-r--r--crypto/jitterentropy-kcapi.c8
-rw-r--r--crypto/jitterentropy.c13
-rw-r--r--crypto/jitterentropy.h17
-rw-r--r--crypto/nhpoly1305.c3
-rw-r--r--crypto/poly1305_generic.c228
-rw-r--r--crypto/skcipher.c230
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c82
-rw-r--r--crypto/testmgr.h2124
-rw-r--r--crypto/tgr192.c4
-rw-r--r--drivers/acpi/Kconfig23
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/acpi_configfs.c4
-rw-r--r--drivers/acpi/acpi_lpss.c48
-rw-r--r--drivers/acpi/acpi_platform.c43
-rw-r--r--drivers/acpi/acpi_video.c8
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h10
-rw-r--r--drivers/acpi/acpica/acutils.h9
-rw-r--r--drivers/acpi/acpica/dbconvert.c4
-rw-r--r--drivers/acpi/acpica/dbdisply.c2
-rw-r--r--drivers/acpi/acpica/dbfileio.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c36
-rw-r--r--drivers/acpi/acpica/dbmethod.c4
-rw-r--r--drivers/acpi/acpica/dbnames.c114
-rw-r--r--drivers/acpi/acpica/dbobject.c1
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c12
-rw-r--r--drivers/acpi/acpica/evgpeblk.c11
-rw-r--r--drivers/acpi/acpica/evgpeinit.c3
-rw-r--r--drivers/acpi/acpica/evmisc.c12
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c1
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c3
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c6
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psobject.c7
-rw-r--r--drivers/acpi/acpica/rscreate.c3
-rw-r--r--drivers/acpi/acpica/tbdata.c3
-rw-r--r--drivers/acpi/acpica/tbxfload.c40
-rw-r--r--drivers/acpi/acpica/utbuffer.c52
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/apei/apei-base.c44
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c5
-rw-r--r--drivers/acpi/apei/ghes.c25
-rw-r--r--drivers/acpi/apei/hest.c14
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c139
-rw-r--r--drivers/acpi/ec.c195
-rw-r--r--drivers/acpi/hmat/Makefile2
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/numa/Kconfig (renamed from drivers/acpi/hmat/Kconfig)7
-rw-r--r--drivers/acpi/numa/Makefile3
-rw-r--r--drivers/acpi/numa/hmat.c (renamed from drivers/acpi/hmat/hmat.c)158
-rw-r--r--drivers/acpi/numa/srat.c (renamed from drivers/acpi/numa.c)0
-rw-r--r--drivers/acpi/osi.c6
-rw-r--r--drivers/acpi/pmic/intel_pmic.c20
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c (renamed from drivers/acpi/pmic/intel_pmic_crc.c)4
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtcrc.c44
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/acpi/property.c48
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/utils.c32
-rw-r--r--drivers/android/binder.c6
-rw-r--r--drivers/android/binder_alloc.c42
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/ahci_tegra.c6
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/ata/libahci.c6
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_falcon.c42
-rw-r--r--drivers/ata/pata_macio.c6
-rw-r--r--drivers/ata/pata_pxa.c8
-rw-r--r--drivers/ata/pdc_adma.c7
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c34
-rw-r--r--drivers/ata/sata_nv.c18
-rw-r--r--drivers/ata/sata_promise.c6
-rw-r--r--drivers/ata/sata_qstor.c8
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/ata/sata_sil.c8
-rw-r--r--drivers/ata/sata_sil24.c6
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/base/cpu.c17
-rw-r--r--drivers/base/memory.c36
-rw-r--r--drivers/base/power/common.c20
-rw-r--r--drivers/base/power/domain.c40
-rw-r--r--drivers/base/power/power.h30
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/property.c83
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/base/swnode.c258
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c24
-rw-r--r--drivers/block/drbd/drbd_nl.c13
-rw-r--r--drivers/block/loop.c39
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk.h19
-rw-r--r--drivers/block/null_blk_main.c125
-rw-r--r--drivers/block/null_blk_zoned.c87
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c10
-rw-r--r--drivers/bluetooth/btintel.c45
-rw-r--r--drivers/bluetooth/btintel.h5
-rw-r--r--drivers/bluetooth/btmtksdio.c1
-rw-r--r--drivers/bluetooth/btqca.c92
-rw-r--r--drivers/bluetooth/btqca.h32
-rw-r--r--drivers/bluetooth/btrtl.c4
-rw-r--r--drivers/bluetooth/btusb.c57
-rw-r--r--drivers/bluetooth/btwilink.c337
-rw-r--r--drivers/bluetooth/hci_bcm.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_ll.c39
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bluetooth/hci_qca.c278
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c6
-rw-r--r--drivers/bus/fsl-mc/dprc.c53
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c43
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h42
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/hw_random/Kconfig46
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/atmel-rng.c43
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c5
-rw-r--r--drivers/char/hw_random/core.c66
-rw-r--r--drivers/char/hw_random/exynos-trng.c4
-rw-r--r--drivers/char/hw_random/hisi-rng.c4
-rw-r--r--drivers/char/hw_random/hisi-trng-v2.c99
-rw-r--r--drivers/char/hw_random/iproc-rng200.c9
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c44
-rw-r--r--drivers/char/hw_random/meson-rng.c4
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/npcm-rng.c184
-rw-r--r--drivers/char/hw_random/omap-rng.c13
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c168
-rw-r--r--drivers/char/hw_random/pasemi-rng.c4
-rw-r--r--drivers/char/hw_random/pic32-rng.c4
-rw-r--r--drivers/char/hw_random/st-rng.c4
-rw-r--r--drivers/char/hw_random/tx4939-rng.c4
-rw-r--r--drivers/char/hw_random/xgene-rng.c4
-rw-r--r--drivers/char/ipmi/Kconfig98
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c37
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c55
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c40
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/ppdev.c16
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/char/tpm/Kconfig7
-rw-r--r--drivers/char/tpm/Makefile4
-rw-r--r--drivers/char/tpm/tpm-interface.c64
-rw-r--r--drivers/char/tpm/tpm-sysfs.c45
-rw-r--r--drivers/char/tpm/tpm.h248
-rw-r--r--drivers/char/tpm/tpm1-cmd.c15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c311
-rw-r--r--drivers/char/tpm/tpm_crb.c123
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c79
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c143
-rw-r--r--drivers/char/tpm/tpm_tis_spi.h53
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c322
-rw-r--r--drivers/char/virtio_console.c28
-rw-r--r--drivers/char/xillybus/xillybus_of.c5
-rw-r--r--drivers/clocksource/hyperv_timer.c154
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c3
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/arm_big_little.c658
-rw-r--r--drivers/cpufreq/arm_big_little.h43
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq.c35
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c20
-rw-r--r--drivers/cpufreq/intel_pstate.c30
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c17
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c7
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c25
-rw-r--r--drivers/cpufreq/ti-cpufreq.c119
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c584
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c7
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/driver.c72
-rw-r--r--drivers/cpuidle/governor.c7
-rw-r--r--drivers/cpuidle/governors/haltpoll.c7
-rw-r--r--drivers/cpuidle/governors/ladder.c29
-rw-r--r--drivers/cpuidle/governors/menu.c131
-rw-r--r--drivers/cpuidle/governors/teo.c182
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/cpuidle/sysfs.c71
-rw-r--r--drivers/crypto/Kconfig92
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/allwinner/Kconfig87
-rw-r--r--drivers/crypto/allwinner/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/Makefile (renamed from drivers/crypto/sunxi-ss/Makefile)0
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-cipher.c)34
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-core.c)139
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-hash.c)47
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c (renamed from drivers/crypto/sunxi-ss/sun4i-ss-prng.c)9
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h (renamed from drivers/crypto/sunxi-ss/sun4i-ss.h)2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c438
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c676
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h254
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c436
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c642
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h218
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/amlogic/Kconfig24
-rw-r--r--drivers/crypto/amlogic/Makefile2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c382
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c332
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h161
-rw-r--r--drivers/crypto/atmel-aes.c590
-rw-r--r--drivers/crypto/atmel-authenc.h2
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c469
-rw-r--r--drivers/crypto/bcm/cipher.c373
-rw-r--r--drivers/crypto/bcm/cipher.h10
-rw-r--r--drivers/crypto/bcm/spu2.c6
-rw-r--r--drivers/crypto/caam/Kconfig6
-rw-r--r--drivers/crypto/caam/caampkc.c72
-rw-r--r--drivers/crypto/caam/caampkc.h8
-rw-r--r--drivers/crypto/caam/ctrl.c222
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/qi.c8
-rw-r--r--drivers/crypto/caam/qi.h1
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c292
-rw-r--r--drivers/crypto/cavium/nitrox/Kconfig2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c39
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h15
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c9
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c134
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c94
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c169
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c100
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c14
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h13
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c14
-rw-r--r--drivers/crypto/ccp/ccp-dev.c15
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c1
-rw-r--r--drivers/crypto/ccp/ccp-ops.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c68
-rw-r--r--drivers/crypto/ccp/psp-dev.h1
-rw-r--r--drivers/crypto/ccree/cc_aead.c3
-rw-r--r--drivers/crypto/ccree/cc_cipher.c4
-rw-r--r--drivers/crypto/chelsio/Kconfig2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c334
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h16
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c27
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c15
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c20
-rw-r--r--drivers/crypto/geode-aes.c433
-rw-r--r--drivers/crypto/geode-aes.h15
-rw-r--r--drivers/crypto/hifn_795x.c183
-rw-r--r--drivers/crypto/hisilicon/Kconfig45
-rw-r--r--drivers/crypto/hisilicon/Makefile6
-rw-r--r--drivers/crypto/hisilicon/hpre/Makefile2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h83
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c1137
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c1052
-rw-r--r--drivers/crypto/hisilicon/qm.c142
-rw-r--r--drivers/crypto/hisilicon/qm.h17
-rw-r--r--drivers/crypto/hisilicon/sec2/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h156
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c889
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h198
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c1095
-rw-r--r--drivers/crypto/hisilicon/sgl.c184
-rw-r--r--drivers/crypto/hisilicon/sgl.h24
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h1
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c46
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c294
-rw-r--r--drivers/crypto/inside-secure/safexcel.c329
-rw-r--r--drivers/crypto/inside-secure/safexcel.h131
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c1506
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c1475
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c5
-rw-r--r--drivers/crypto/ixp4xx_crypto.c228
-rw-r--r--drivers/crypto/marvell/cesa.h6
-rw-r--r--drivers/crypto/marvell/cipher.c14
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c250
-rw-r--r--drivers/crypto/mxs-dcp.c140
-rw-r--r--drivers/crypto/n2_core.c206
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c81
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c45
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c87
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c76
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c29
-rw-r--r--drivers/crypto/nx/nx.c64
-rw-r--r--drivers/crypto/nx/nx.h19
-rw-r--r--drivers/crypto/nx/nx_debugfs.c18
-rw-r--r--drivers/crypto/omap-aes.c209
-rw-r--r--drivers/crypto/omap-aes.h4
-rw-r--r--drivers/crypto/omap-des.c232
-rw-r--r--drivers/crypto/padlock-aes.c157
-rw-r--r--drivers/crypto/picoxcell_crypto.c386
-rw-r--r--drivers/crypto/qat/Kconfig2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c304
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h4
-rw-r--r--drivers/crypto/qce/Makefile2
-rw-r--r--drivers/crypto/qce/cipher.h8
-rw-r--r--drivers/crypto/qce/common.c12
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/core.c2
-rw-r--r--drivers/crypto/qce/dma.c4
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c (renamed from drivers/crypto/qce/ablkcipher.c)172
-rw-r--r--drivers/crypto/rockchip/Makefile2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c8
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h3
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c556
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c538
-rw-r--r--drivers/crypto/s5p-sss.c187
-rw-r--r--drivers/crypto/sahara.c156
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c338
-rw-r--r--drivers/crypto/talitos.c314
-rw-r--r--drivers/crypto/ux500/Kconfig2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c371
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c3
-rw-r--r--drivers/crypto/virtio/Kconfig2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c192
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/vmx/Makefile6
-rw-r--r--drivers/dax/Kconfig27
-rw-r--r--drivers/dax/Makefile2
-rw-r--r--drivers/dax/bus.c2
-rw-r--r--drivers/dax/bus.h2
-rw-r--r--drivers/dax/dax-private.h2
-rw-r--r--drivers/dax/hmem.c56
-rw-r--r--drivers/devfreq/devfreq.c33
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c1
-rw-r--r--drivers/devfreq/governor.h3
-rw-r--r--drivers/devfreq/tegra30-devfreq.c417
-rw-r--r--drivers/edac/altera_edac.c152
-rw-r--r--drivers/edac/amd64_edac.c217
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/aspeed_edac.c7
-rw-r--r--drivers/edac/edac_device.c50
-rw-r--r--drivers/edac/edac_device.h54
-rw-r--r--drivers/edac/edac_mc.c138
-rw-r--r--drivers/edac/edac_mc_sysfs.c49
-rw-r--r--drivers/edac/ghes_edac.c128
-rw-r--r--drivers/edac/i10nm_base.c3
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/i5000_edac.c5
-rw-r--r--drivers/edac/i5100_edac.c16
-rw-r--r--drivers/edac/i5400_edac.c18
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/i7core_edac.c3
-rw-r--r--drivers/edac/ie31200_edac.c7
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/sb_edac.c23
-rw-r--r--drivers/edac/skx_base.c54
-rw-r--r--drivers/edac/skx_common.c65
-rw-r--r--drivers/edac/skx_common.h4
-rw-r--r--drivers/edac/ti_edac.c2
-rw-r--r--drivers/extcon/extcon-axp288.c38
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c16
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-sm5502.h2
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firmware/arm_sdei.c12
-rw-r--r--drivers/firmware/broadcom/Kconfig8
-rw-r--r--drivers/firmware/broadcom/Makefile1
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c279
-rw-r--r--drivers/firmware/efi/Kconfig21
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/apple-properties.c18
-rw-r--r--drivers/firmware/efi/arm-init.c9
-rw-r--r--drivers/firmware/efi/arm-runtime.c24
-rw-r--r--drivers/firmware/efi/efi.c15
-rw-r--r--drivers/firmware/efi/esrt.c3
-rw-r--r--drivers/firmware/efi/fake_mem.c26
-rw-r--r--drivers/firmware/efi/fake_mem.h10
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c19
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/random.c27
-rw-r--r--drivers/firmware/efi/x86_fake_mem.c69
-rw-r--r--drivers/firmware/psci/psci.c24
-rw-r--r--drivers/firmware/stratix10-rsu.c42
-rw-r--r--drivers/firmware/stratix10-svc.c18
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/dfl-fme-main.c385
-rw-r--r--drivers/fpga/zynq-fpga.c4
-rw-r--r--drivers/fsi/Kconfig8
-rw-r--r--drivers/fsi/Makefile1
-rw-r--r--drivers/fsi/fsi-core.c67
-rw-r--r--drivers/fsi/fsi-master-aspeed.c544
-rw-r--r--drivers/fsi/fsi-master-hub.c46
-rw-r--r--drivers/fsi/fsi-master.h71
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-bd70528.c6
-rw-r--r--drivers/gpio/gpio-loongson.c2
-rw-r--r--drivers/gpio/gpio-max77620.c6
-rw-r--r--drivers/gpio/gpio-menz127.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c17
-rw-r--r--drivers/gpio/gpiolib-devres.c33
-rw-r--r--drivers/gpio/gpiolib.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c11
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c41
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c111
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c435
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h31
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c50
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c122
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/greybus/connection.c3
-rw-r--r--drivers/hv/hv.c4
-rw-r--r--drivers/hv/vmbus_drv.c30
-rw-r--r--drivers/hwmon/Kconfig38
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/applesmc.c38
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c115
-rw-r--r--drivers/hwmon/ina3221.c163
-rw-r--r--drivers/hwmon/ltc2947-core.c1183
-rw-r--r--drivers/hwmon/ltc2947-i2c.c49
-rw-r--r--drivers/hwmon/ltc2947-spi.c50
-rw-r--r--drivers/hwmon/ltc2947.h12
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c131
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c74
-rw-r--r--drivers/hwmon/tmp421.c3
-rw-r--r--drivers/hwmon/tmp513.c772
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c312
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c351
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h81
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c36
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c26
-rw-r--r--drivers/hwtracing/coresight/coresight.c51
-rw-r--r--drivers/hwtracing/intel_th/core.c8
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/policy.c4
-rw-r--r--drivers/i2c/i2c-core-acpi.c28
-rw-r--r--drivers/i2c/i2c-core-of.c4
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/ide/falconide.c60
-rw-r--r--drivers/ide/tx4938ide.c2
-rw-r--r--drivers/ide/tx4939ide.c6
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/imu/adis.c24
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cache.c8
-rw-r--r--drivers/infiniband/core/cm.c66
-rw-r--r--drivers/infiniband/core/cm_msgs.h32
-rw-r--r--drivers/infiniband/core/cma.c107
-rw-r--r--drivers/infiniband/core/core_priv.h11
-rw-r--r--drivers/infiniband/core/counters.c40
-rw-r--r--drivers/infiniband/core/device.c50
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c335
-rw-r--r--drivers/infiniband/core/iwpm_util.h5
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/nldev.c141
-rw-r--r--drivers/infiniband/core/rdma_core.c1
-rw-r--r--drivers/infiniband/core/restrack.c20
-rw-r--r--drivers/infiniband/core/restrack.h1
-rw-r--r--drivers/infiniband/core/rw.c25
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c12
-rw-r--r--drivers/infiniband/core/umem.c12
-rw-r--r--drivers/infiniband/core/umem_odp.c38
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c84
-rw-r--r--drivers/infiniband/core/verbs.c12
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig12
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c143
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig19
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile7
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1312
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h204
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c344
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.h69
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h802
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c282
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h155
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2258
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h233
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c230
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c232
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c101
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1321
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h347
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1082
-rw-r--r--drivers/infiniband/hw/cxgb3/tcb.h632
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c7
-rw-r--r--drivers/infiniband/hw/efa/efa.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h29
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c40
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h19
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c17
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c370
-rw-r--r--drivers/infiniband/hw/hfi1/init.c1
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c17
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c16
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c57
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h5
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c300
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c38
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c69
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c88
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c30
-rw-r--r--drivers/infiniband/hw/mlx4/main.c18
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c37
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c25
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c29
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c24
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c124
-rw-r--r--drivers/infiniband/hw/mlx5/main.c116
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c199
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h64
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c177
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c989
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c60
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c90
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c74
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h11
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h2
-rw-r--r--drivers/infiniband/hw/qedr/main.c5
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h72
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c150
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c643
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c38
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c30
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h1
-rw-r--r--drivers/infiniband/sw/siw/siw.h31
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c45
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c35
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c338
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h34
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c5
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c72
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c47
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c247
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h58
-rw-r--r--drivers/input/ff-memless.c9
-rw-r--r--drivers/input/rmi4/rmi_f11.c9
-rw-r--r--drivers/input/rmi4/rmi_f12.c32
-rw-r--r--drivers/input/rmi4/rmi_f54.c5
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c7
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/msm8974.c784
-rw-r--r--drivers/iommu/amd_iommu.c30
-rw-r--r--drivers/irqchip/irq-gic-v3.c20
-rw-r--r--drivers/irqchip/irq-gic-v4.c7
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c16
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c11
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c8
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c12
-rw-r--r--drivers/isdn/mISDN/hwchannel.c7
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c14
-rw-r--r--drivers/macintosh/rack-meter.c7
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_pm72.c22
-rw-r--r--drivers/macintosh/windfarm_rm31.c6
-rw-r--r--drivers/mcb/mcb-core.c28
-rw-r--r--drivers/mcb/mcb-lpc.c1
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/md/Kconfig54
-rw-r--r--drivers/md/bcache/Makefile2
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c17
-rw-r--r--drivers/md/bcache/btree.c19
-rw-r--r--drivers/md/bcache/closure.c7
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c56
-rw-r--r--drivers/md/bcache/sysfs.c7
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/dm-bio-prison-v1.c27
-rw-r--r--drivers/md/dm-bio-prison-v2.c26
-rw-r--r--drivers/md/dm-cache-target.c77
-rw-r--r--drivers/md/dm-clone-metadata.c29
-rw-r--r--drivers/md/dm-clone-metadata.h4
-rw-r--r--drivers/md/dm-clone-target.c62
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-dust.c97
-rw-r--r--drivers/md/dm-flakey.c25
-rw-r--r--drivers/md/dm-integrity.c28
-rw-r--r--drivers/md/dm-linear.c22
-rw-r--r--drivers/md/dm-raid.c164
-rw-r--r--drivers/md/dm-stripe.c15
-rw-r--r--drivers/md/dm-table.c27
-rw-r--r--drivers/md/dm-thin.c118
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c166
-rw-r--r--drivers/md/dm-zoned-reclaim.c8
-rw-r--r--drivers/md/dm-zoned-target.c54
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c135
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-linear.c5
-rw-r--r--drivers/md/md-multipath.c5
-rw-r--r--drivers/md/md.c57
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/cec/cec-adap.c12
-rw-r--r--drivers/media/cec/cec-api.c20
-rw-r--r--drivers/media/cec/cec-core.c5
-rw-r--r--drivers/media/cec/cec-pin.c10
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsir.h2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c12
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c54
-rw-r--r--drivers/media/dvb-frontends/mt312.c13
-rw-r--r--drivers/media/dvb-frontends/si2168.h47
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h10
-rw-r--r--drivers/media/dvb-frontends/tc90522.c27
-rw-r--r--drivers/media/dvb-frontends/tc90522.h3
-rw-r--r--drivers/media/i2c/Kconfig80
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad5820.c35
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv7842.c4
-rw-r--r--drivers/media/i2c/bt819.c2
-rw-r--r--drivers/media/i2c/hi556.c1200
-rw-r--r--drivers/media/i2c/imx214.c9
-rw-r--r--drivers/media/i2c/imx290.c884
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max2175.h4
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/ov2659.c139
-rw-r--r--drivers/media/i2c/ov5640.c33
-rw-r--r--drivers/media/i2c/ov5695.c2
-rw-r--r--drivers/media/i2c/ov6650.c266
-rw-r--r--drivers/media/i2c/saa711x_regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c326
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h36
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h3
-rw-r--r--drivers/media/i2c/st-mipid02.c5
-rw-r--r--drivers/media/i2c/tda1997x_regs.h2
-rw-r--r--drivers/media/i2c/tvp5150_reg.h2
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/mc/mc-device.c65
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c5
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c43
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/pci/cx88/cx88-video.c11
-rw-r--r--drivers/media/pci/cx88/cx88.h1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c1
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c4
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c166
-rw-r--r--drivers/media/pci/smipcie/smipcie.h1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c2
-rw-r--r--drivers/media/platform/Kconfig17
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c861
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h43
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h10
-rw-r--r--drivers/media/platform/aspeed-video.c58
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c2
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c41
-rw-r--r--drivers/media/platform/coda/coda-common.c13
-rw-r--r--drivers/media/platform/coda/coda.h1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c7
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c36
-rw-r--r--drivers/media/platform/meson/ao-cec.c30
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c20
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h9
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c4
-rw-r--r--drivers/media/platform/qcom/venus/core.c56
-rw-r--r--drivers/media/platform/qcom/venus/core.h30
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c247
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c6
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c11
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c17
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c156
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h6
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c1
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sunxi/Makefile1
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c1028
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.h237
-rw-r--r--drivers/media/platform/ti-vpe/csc.c254
-rw-r--r--drivers/media/platform/ti-vpe/csc.h4
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c13
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h2
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h5
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c396
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c4
-rw-r--r--drivers/media/platform/vim2m.c8
-rw-r--r--drivers/media/platform/vimc/Makefile7
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c107
-rw-r--r--drivers/media/platform/vimc/vimc-common.c171
-rw-r--r--drivers/media/platform/vimc/vimc-common.h120
-rw-r--r--drivers/media/platform/vimc/vimc-core.c215
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c182
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c102
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c109
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c19
-rw-r--r--drivers/media/platform/vivid/Makefile2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c7
-rw-r--r--drivers/media/platform/vivid/vivid-core.c368
-rw-r--r--drivers/media/platform/vivid/vivid-core.h25
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c89
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c62
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c57
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.c201
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.h29
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.c174
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.h25
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h2
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/rc/imon.c64
-rw-r--r--drivers/media/rc/imon_raw.c22
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c6
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-gs1.c84
-rw-r--r--drivers/media/rc/keymaps/rc-vega-s9x.c54
-rw-r--r--drivers/media/rc/mceusb.c141
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-main.c1
-rw-r--r--drivers/media/rc/tango-ir.c14
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/si2157.h33
-rw-r--r--drivers/media/tuners/si2157_priv.h5
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c13
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c508
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c172
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c795
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h30
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c37
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c28
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c391
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h14
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c20
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c30
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/sq905.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-regs.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-usb-isoc.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000.h2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c29
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c10
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c4
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c199
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c112
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c190
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c6
-rw-r--r--drivers/memstick/core/Kconfig18
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c12
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c2
-rw-r--r--drivers/mfd/tps6105x.c34
-rw-r--r--drivers/misc/Kconfig17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5260.c3
-rw-r--r--drivers/misc/cardreader/rts5261.c792
-rw-r--r--drivers/misc/cardreader/rts5261.h233
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c43
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/eeprom/eeprom.c4
-rw-r--r--drivers/misc/fastrpc.c209
-rw-r--r--drivers/misc/habanalabs/command_submission.c127
-rw-r--r--drivers/misc/habanalabs/debugfs.c112
-rw-r--r--drivers/misc/habanalabs/device.c18
-rw-r--r--drivers/misc/habanalabs/firmware_if.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c53
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c73
-rw-r--r--drivers/misc/habanalabs/hw_queue.c249
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h7
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h12
-rw-r--r--drivers/misc/habanalabs/memory.c392
-rw-r--r--drivers/misc/habanalabs/mmu.c204
-rw-r--r--drivers/misc/hpilo.h2
-rw-r--r--drivers/misc/ibmvmc.h4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c80
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/refcount.c11
-rw-r--r--drivers/misc/mei/bus-fixup.c9
-rw-r--r--drivers/misc/mei/bus.c42
-rw-r--r--drivers/misc/mei/client.h36
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c74
-rw-r--r--drivers/misc/mei/hw-me.h12
-rw-r--r--drivers/misc/mei/hw-txe.c10
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/main.c24
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c16
-rw-r--r--drivers/misc/mic/Kconfig16
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/ocxl/trace.h2
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c11
-rw-r--r--drivers/misc/ti-st/st_core.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c67
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c7
-rw-r--r--drivers/mmc/core/block.c151
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/quirks.h7
-rw-r--r--drivers/mmc/core/sdio.c28
-rw-r--r--drivers/mmc/core/sdio_bus.c9
-rw-r--r--drivers/mmc/host/Kconfig21
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c3
-rw-r--r--drivers/mmc/host/bcm2835.c4
-rw-r--r--drivers/mmc/host/cavium-octeon.c15
-rw-r--r--drivers/mmc/host/dw_mmc.c4
-rw-r--r--drivers/mmc/host/jz4740_mmc.c41
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c198
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c46
-rw-r--r--drivers/mmc/host/moxart-mmc.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c31
-rw-r--r--drivers/mmc/host/owl-mmc.c696
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c49
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h14
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c362
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c493
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c12
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c21
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c257
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c53
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_am654.c71
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c26
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.h32
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c12
-rw-r--r--drivers/mmc/host/vub300.c7
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c79
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c8
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/devices/mchp23k256.c20
-rw-r--r--drivers/mtd/devices/spear_smi.c42
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c1
-rw-r--r--drivers/mtd/maps/Kconfig11
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/physmap-core.c5
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c132
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h17
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdcore.c26
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig7
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c23
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3030
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c59
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c1
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_base.c8
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c8
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c5
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c4
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c23
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c58
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c23
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c6
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c58
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c25
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c23
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1491
-rw-r--r--drivers/mtd/ubi/debug.c131
-rw-r--r--drivers/net/Kconfig64
-rw-r--r--drivers/net/bonding/bond_main.c138
-rw-r--r--drivers/net/caif/Kconfig46
-rw-r--r--drivers/net/can/c_can/c_can_platform.c21
-rw-r--r--drivers/net/can/dev.c5
-rw-r--r--drivers/net/can/flexcan.c131
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/m_can/m_can.c54
-rw-r--r--drivers/net/can/m_can/m_can_platform.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h3
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/rx-offload.c122
-rw-r--r--drivers/net/can/slcan.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c75
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c26
-rw-r--r--drivers/net/can/xilinx_can.c102
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c73
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/bcm_sf2.c37
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/dsa_loop.c5
-rw-r--r--drivers/net/dsa/lan9303-core.c4
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c12
-rw-r--r--drivers/net/dsa/mt7530.c23
-rw-r--r--drivers/net/dsa/mv88e6060.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c519
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c60
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c13
-rw-r--r--drivers/net/dsa/ocelot/Kconfig11
-rw-r--r--drivers/net/dsa/ocelot/Makefile6
-rw-r--r--drivers/net/dsa/ocelot/felix.c530
-rw-r--r--drivers/net/dsa/ocelot/felix.h37
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c583
-rw-r--r--drivers/net/dsa/qca8k.c14
-rw-r--r--drivers/net/dsa/realtek-smi-core.c5
-rw-r--r--drivers/net/dsa/sja1105/Kconfig1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h61
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c65
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c418
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c630
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h113
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c409
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c432
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h27
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c5
-rw-r--r--drivers/net/dummy.c36
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c158
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c270
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c120
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c328
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h44
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c100
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.c147
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c1392
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h140
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c63
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c439
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c122
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h277
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c212
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h396
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c322
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c15
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c7
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c5
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c328
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c416
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c97
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c55
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/macb.h9
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c491
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h129
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c796
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c354
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c650
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c131
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c265
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1036
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h50
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c56
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c370
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c182
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c375
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h38
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h73
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c183
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h226
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c39
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h17
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c27
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h93
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c300
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c17
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c93
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c188
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c563
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c114
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h12
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c276
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c48
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c118
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c859
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c205
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c313
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c933
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1327
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c810
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1267
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c600
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h140
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c535
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c1181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c104
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c231
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c30
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c639
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h14918
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c116
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h221
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c130
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c1711
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c876
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c187
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h40
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c60
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c590
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c506
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h15
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c303
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h27
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1154
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h482
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c154
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c32
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c36
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c56
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c59
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c60
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c128
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h196
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c41
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c24
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c290
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c11
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1030
-rw-r--r--drivers/net/ethernet/renesas/ravb.h3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c30
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c22
-rw-r--r--drivers/net/ethernet/sfc/efx.c283
-rw-r--r--drivers/net/ethernet/sfc/efx.h22
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c33
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h84
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c220
-rw-r--r--drivers/net/ethernet/sfc/tx.c92
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c62
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c290
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig20
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1285
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c150
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h11
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c2048
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1246
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h81
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c589
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.h15
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c5
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c18
-rw-r--r--drivers/net/hyperv/hyperv_net.h7
-rw-r--r--drivers/net/hyperv/netvsc.c38
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/hyperv/rndis_filter.c9
-rw-r--r--drivers/net/ieee802154/Kconfig12
-rw-r--r--drivers/net/ieee802154/cc2520.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/loopback.c38
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/bus.c1
-rw-r--r--drivers/net/netdevsim/dev.c393
-rw-r--r--drivers/net/netdevsim/fib.c176
-rw-r--r--drivers/net/netdevsim/health.c319
-rw-r--r--drivers/net/netdevsim/netdev.c10
-rw-r--r--drivers/net/netdevsim/netdevsim.h33
-rw-r--r--drivers/net/nlmon.c28
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c312
-rw-r--r--drivers/net/phy/broadcom.c89
-rw-r--r--drivers/net/phy/dp83640.c16
-rw-r--r--drivers/net/phy/dp83867.c152
-rw-r--r--drivers/net/phy/dp83869.c431
-rw-r--r--drivers/net/phy/marvell.c255
-rw-r--r--drivers/net/phy/marvell10g.c25
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/mdio_bus.c17
-rw-r--r--drivers/net/phy/mscc.c208
-rw-r--r--drivers/net/phy/phy-core.c44
-rw-r--r--drivers/net/phy/phy.c67
-rw-r--r--drivers/net/phy/phy_device.c220
-rw-r--r--drivers/net/phy/phylink.c95
-rw-r--r--drivers/net/phy/sfp-bus.c216
-rw-r--r--drivers/net/phy/sfp.c630
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c53
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c35
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c1307
-rw-r--r--drivers/net/veth.c43
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vsockmon.c31
-rw-r--r--drivers/net/vxlan.c29
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c6
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c6
-rw-r--r--drivers/net/wireless/ath/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c188
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c387
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c82
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h68
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c98
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/ath/regd.c50
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h13
-rw-r--r--drivers/net/wireless/atmel/Kconfig42
-rw-r--r--drivers/net/wireless/atmel/atmel_cs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c53
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c81
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h2
-rw-r--r--drivers/net/wireless/cisco/Kconfig2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h514
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c811
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c891
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c392
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c625
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c16
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c51
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c33
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c194
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c141
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c187
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h57
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c44
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c58
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c198
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c151
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c47
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c85
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c35
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h133
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/switchdev.h24
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig44
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c42
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c509
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h619
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c189
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c400
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h92
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c227
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h80
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c138
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c263
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c320
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h239
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c236
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h16
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c171
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h30
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c191
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c469
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c829
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c376
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c21
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c135
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c27
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c4
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c6
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c3
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c25
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c4
-rw-r--r--drivers/net/xen-netback/interface.c114
-rw-r--r--drivers/nfc/nfcmrvl/Kconfig2
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c1
-rw-r--r--drivers/nfc/nxp-nci/i2c.c6
-rw-r--r--drivers/nfc/pn533/Kconfig11
-rw-r--r--drivers/nfc/pn533/Makefile2
-rw-r--r--drivers/nfc/pn533/i2c.c32
-rw-r--r--drivers/nfc/pn533/pn533.c287
-rw-r--r--drivers/nfc/pn533/pn533.h40
-rw-r--r--drivers/nfc/pn533/uart.c330
-rw-r--r--drivers/nfc/pn533/usb.c16
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c1
-rw-r--r--drivers/nubus/nubus.c2
-rw-r--r--drivers/nvdimm/Kconfig1
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/region_devs.c13
-rw-r--r--drivers/nvme/host/Kconfig10
-rw-r--r--drivers/nvme/host/Makefile1
-rw-r--r--drivers/nvme/host/core.c42
-rw-r--r--drivers/nvme/host/fc.c49
-rw-r--r--drivers/nvme/host/hwmon.c259
-rw-r--r--drivers/nvme/host/multipath.c13
-rw-r--r--drivers/nvme/host/nvme.h33
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c133
-rw-r--r--drivers/nvme/target/core.c20
-rw-r--r--drivers/nvme/target/discovery.c70
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/fc.c31
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c43
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/nvme/target/loop.c7
-rw-r--r--drivers/nvme/target/nvmet.h10
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c14
-rw-r--r--drivers/nvmem/Kconfig23
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/core.c61
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c120
-rw-r--r--drivers/nvmem/imx-ocotp.c4
-rw-r--r--drivers/nvmem/rockchip-otp.c268
-rw-r--r--drivers/nvmem/sc27xx-efuse.c13
-rw-r--r--drivers/nvmem/sprd-efuse.c424
-rw-r--r--drivers/of/fdt.c20
-rw-r--r--drivers/of/of_mdio.c4
-rw-r--r--drivers/of/of_net.c16
-rw-r--r--drivers/of/property.c16
-rw-r--r--drivers/opp/core.c69
-rw-r--r--drivers/oprofile/oprofile_perf.c8
-rw-r--r--drivers/parport/daisy.c40
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c26
-rw-r--r--drivers/pci/pci.c18
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c1
-rw-r--r--drivers/pcmcia/i82092.c34
-rw-r--r--drivers/pcmcia/i82092aa.h2
-rw-r--r--drivers/pcmcia/yenta_socket.c3
-rw-r--r--drivers/perf/arm-cci.c4
-rw-r--r--drivers/perf/arm-ccn.c4
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c5
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c124
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c26
-rw-r--r--drivers/perf/thunderx2_pmu.c267
-rw-r--r--drivers/perf/xgene_pmu.c14
-rw-r--r--drivers/phy/allwinner/Kconfig11
-rw-r--r--drivers/phy/allwinner/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c190
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c10
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c4
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c4
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c3
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c9
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c120
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h96
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c7
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c7
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c805
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c23
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c137
-rw-r--r--drivers/phy/tegra/xusb.c93
-rw-r--r--drivers/phy/tegra/xusb.h4
-rw-r--r--drivers/phy/ti/Kconfig4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c119
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/devicetree.c50
-rw-r--r--drivers/pinctrl/devicetree.h7
-rw-r--r--drivers/pinctrl/freescale/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Kconfig7
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c119
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c171
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c454
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/meson/Kconfig6
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c942
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c38
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c1
-rw-r--r--drivers/pinctrl/mvebu/Kconfig10
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c40
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c12
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c81
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c32
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c65
-rw-r--r--drivers/pinctrl/pinctrl-at91.c55
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c4
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c54
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c4
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c944
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h144
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c50
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c23
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c29
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c29
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c30
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c382
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c8
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c8
-rw-r--r--drivers/pinctrl/pinctrl-rzn1.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c53
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c21
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c4
-rw-r--r--drivers/pinctrl/pinctrl-u300.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c4
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa25x.c13
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa27x.c13
-rw-r--r--drivers/pinctrl/qcom/Kconfig101
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1127
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c121
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c32
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c35
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c863
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c57
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c4
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h8
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c41
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c43
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c51
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c4
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c23
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c10
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c3
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c4
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c4
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c5
-rw-r--r--drivers/platform/goldfish/Kconfig3
-rw-r--r--drivers/platform/mips/Kconfig4
-rw-r--r--drivers/platform/mips/cpu_hwmon.c17
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c10
-rw-r--r--drivers/power/avs/smartreflex.c2
-rw-r--r--drivers/power/reset/at91-reset.c6
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c8
-rw-r--r--drivers/power/supply/ab8500_btemp.c9
-rw-r--r--drivers/power/supply/ab8500_charger.c7
-rw-r--r--drivers/power/supply/ab8500_fg.c26
-rw-r--r--drivers/power/supply/abx500_chargalg.c8
-rw-r--r--drivers/power/supply/axp20x_usb_power.c8
-rw-r--r--drivers/power/supply/bd70528-charger.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c271
-rw-r--r--drivers/power/supply/cpcap-charger.c222
-rw-r--r--drivers/power/supply/test_power.c61
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/idt8a340_reg.h659
-rw-r--r--drivers/ptp/ptp_chardev.c20
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1427
-rw-r--r--drivers/ptp/ptp_clockmatrix.h104
-rw-r--r--drivers/ptp/ptp_dte.c4
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/ab8500.c17
-rw-r--r--drivers/regulator/bd70528-regulator.c1
-rw-r--r--drivers/regulator/bd718x7-regulator.c1
-rw-r--r--drivers/regulator/core.c19
-rw-r--r--drivers/regulator/da9062-regulator.c63
-rw-r--r--drivers/regulator/da9063-regulator.c9
-rw-r--r--drivers/regulator/da9211-regulator.c12
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/internal.h1
-rw-r--r--drivers/regulator/max77686-regulator.c5
-rw-r--r--drivers/regulator/max8907-regulator.c15
-rw-r--r--drivers/regulator/pbias-regulator.c75
-rw-r--r--drivers/regulator/pcap-regulator.c4
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c62
-rw-r--r--drivers/regulator/qcom_smd-regulator.c92
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c41
-rw-r--r--drivers/regulator/rk808-regulator.c29
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c7
-rw-r--r--drivers/regulator/s5m8767.c7
-rw-r--r--drivers/regulator/slg51000-regulator.c13
-rw-r--r--drivers/regulator/stm32-vrefbuf.c4
-rw-r--r--drivers/regulator/stpmic1_regulator.c6
-rw-r--r--drivers/regulator/tps6105x-regulator.c2
-rw-r--r--drivers/regulator/tps65090-regulator.c26
-rw-r--r--drivers/regulator/tps65132-regulator.c17
-rw-r--r--drivers/regulator/uniphier-regulator.c4
-rw-r--r--drivers/regulator/vexpress-regulator.c5
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/qdio.h27
-rw-r--r--drivers/s390/cio/qdio_main.c57
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c41
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c11
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h76
-rw-r--r--drivers/s390/crypto/pkey_api.c26
-rw-r--r--drivers/s390/net/ism.h2
-rw-r--r--drivers/s390/net/qeth_core.h20
-rw-r--r--drivers/s390/net/qeth_core_main.c200
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_core_sys.c80
-rw-r--r--drivers/s390/net/qeth_ethtool.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c68
-rw-r--r--drivers/s390/net/qeth_l2_sys.c43
-rw-r--r--drivers/s390/net/qeth_l3.h27
-rw-r--r--drivers/s390/net/qeth_l3_main.c257
-rw-r--r--drivers/s390/net/qeth_l3_sys.c96
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c8
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sd_zbc.c278
-rw-r--r--drivers/sh/intc/core.c4
-rw-r--r--drivers/soc/fsl/qbman/qman.c7
-rw-r--r--drivers/soundwire/Kconfig2
-rw-r--r--drivers/soundwire/bus.c7
-rw-r--r--drivers/soundwire/cadence_master.c292
-rw-r--r--drivers/soundwire/cadence_master.h39
-rw-r--r--drivers/soundwire/intel.c201
-rw-r--r--drivers/soundwire/intel_init.c1
-rw-r--r--drivers/soundwire/slave.c98
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/spi-at91-usart.c4
-rw-r--r--drivers/spi/spi-atmel.c219
-rw-r--r--drivers/spi/spi-axi-spi-engine.c16
-rw-r--r--drivers/spi/spi-bcm-qspi.c7
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c3
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-cavium.c3
-rw-r--r--drivers/spi/spi-dw-mmio.c6
-rw-r--r--drivers/spi/spi-dw-pci.c24
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-falcon.c2
-rw-r--r--drivers/spi/spi-fsl-cpm.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c43
-rw-r--r--drivers/spi/spi-fsl-espi.c19
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-fsl-qspi.c55
-rw-r--r--drivers/spi/spi-fsl-spi.c3
-rw-r--r--drivers/spi/spi-gpio.c9
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-lantiq-ssc.c10
-rw-r--r--drivers/spi/spi-loopback-test.c12
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c3
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c3
-rw-r--r--drivers/spi/spi-mt65xx.c23
-rw-r--r--drivers/spi/spi-mxic.c8
-rw-r--r--drivers/spi/spi-npcm-pspi.c3
-rw-r--r--drivers/spi/spi-nxp-fspi.c2
-rw-r--r--drivers/spi/spi-omap-100k.c7
-rw-r--r--drivers/spi/spi-omap2-mcspi.c105
-rw-r--r--drivers/spi/spi-orion.c9
-rw-r--r--drivers/spi/spi-pic32.c46
-rw-r--r--drivers/spi/spi-pl022.c29
-rw-r--r--drivers/spi/spi-pxa2xx.c95
-rw-r--r--drivers/spi/spi-qup.c4
-rw-r--r--drivers/spi/spi-rspi.c8
-rw-r--r--drivers/spi/spi-s3c64xx.c6
-rw-r--r--drivers/spi/spi-sc18is602.c3
-rw-r--r--drivers/spi/spi-sh-hspi.c3
-rw-r--r--drivers/spi/spi-sifive.c11
-rw-r--r--drivers/spi/spi-slave-mt27xx.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c8
-rw-r--r--drivers/spi/spi-sprd.c15
-rw-r--r--drivers/spi/spi-st-ssc4.c3
-rw-r--r--drivers/spi/spi-stm32-qspi.c3
-rw-r--r--drivers/spi/spi-tegra114.c42
-rw-r--r--drivers/spi/spi-tegra20-sflash.c5
-rw-r--r--drivers/spi/spi-tegra20-slink.c8
-rw-r--r--drivers/spi/spi-topcliff-pch.c7
-rw-r--r--drivers/spi/spi-txx9.c78
-rw-r--r--drivers/spi/spi-xcomm.c3
-rw-r--r--drivers/spi/spi-xilinx.c7
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c10
-rw-r--r--drivers/spi/spi-zynq-qspi.c84
-rw-r--r--drivers/spi/spi.c332
-rw-r--r--drivers/spi/spidev.c9
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/hp/Kconfig (renamed from drivers/net/ethernet/hp/Kconfig)0
-rw-r--r--drivers/staging/hp/Makefile (renamed from drivers/net/ethernet/hp/Makefile)0
-rw-r--r--drivers/staging/hp/hp100.c (renamed from drivers/net/ethernet/hp/hp100.c)0
-rw-r--r--drivers/staging/hp/hp100.h (renamed from drivers/net/ethernet/hp/hp100.h)0
-rw-r--r--drivers/staging/media/hantro/hantro.h20
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c16
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c120
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h7
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c48
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c20
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c12
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c25
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c51
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c21
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c41
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c10
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c27
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c27
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c38
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c36
-rw-r--r--drivers/staging/media/ipu3/Makefile6
-rw-r--r--drivers/staging/media/ipu3/TODO5
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h5
-rw-r--r--drivers/staging/media/omap4iss/iss.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c64
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c147
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c616
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h318
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c102
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h1
-rw-r--r--drivers/staging/most/sound/sound.c8
-rw-r--r--drivers/staging/vboxsf/Kconfig10
-rw-r--r--drivers/staging/vboxsf/Makefile5
-rw-r--r--drivers/staging/vboxsf/TODO7
-rw-r--r--drivers/staging/vboxsf/dir.c418
-rw-r--r--drivers/staging/vboxsf/file.c370
-rw-r--r--drivers/staging/vboxsf/shfl_hostintf.h901
-rw-r--r--drivers/staging/vboxsf/super.c501
-rw-r--r--drivers/staging/vboxsf/utils.c551
-rw-r--r--drivers/staging/vboxsf/vboxsf_wrappers.c371
-rw-r--r--drivers/staging/vboxsf/vfsmod.h137
-rw-r--r--drivers/thunderbolt/cap.c6
-rw-r--r--drivers/thunderbolt/ctl.c8
-rw-r--r--drivers/thunderbolt/eeprom.c11
-rw-r--r--drivers/thunderbolt/icm.c157
-rw-r--r--drivers/thunderbolt/lc.c193
-rw-r--r--drivers/thunderbolt/path.c52
-rw-r--r--drivers/thunderbolt/switch.c586
-rw-r--r--drivers/thunderbolt/tb.c340
-rw-r--r--drivers/thunderbolt/tb.h81
-rw-r--r--drivers/thunderbolt/tb_msgs.h2
-rw-r--r--drivers/thunderbolt/tb_regs.h97
-rw-r--r--drivers/thunderbolt/tunnel.c364
-rw-r--r--drivers/thunderbolt/tunnel.h10
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/tty_ldsem.c8
-rw-r--r--drivers/uio/uio_dmem_genirq.c14
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c236
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c79
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c22
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c75
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c31
-rw-r--r--drivers/usb/core/config.c12
-rw-r--r--drivers/usb/core/devio.c19
-rw-r--r--drivers/usb/core/hub.c201
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc3/Kconfig30
-rw-r--r--drivers/usb/dwc3/core.c37
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c28
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_acm.c21
-rw-r--r--drivers/usb/gadget/function/f_obex.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c21
-rw-r--r--drivers/usb/gadget/function/f_tcm.c13
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c516
-rw-r--r--drivers/usb/gadget/function/u_serial.h8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig26
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c3
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c3
-rw-r--r--drivers/usb/gadget/legacy/serial.c49
-rw-r--r--drivers/usb/gadget/udc/Kconfig19
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c9
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c10
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c12
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h2
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c5
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c3810
-rw-r--r--drivers/usb/host/Kconfig106
-rw-r--r--drivers/usb/host/bcma-hcd.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c8
-rw-r--r--drivers/usb/host/imx21-dbg.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/ohci-at91.c8
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci-tegra.c126
-rw-r--r--drivers/usb/host/xhci-trace.h26
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h29
-rw-r--r--drivers/usb/image/microtek.c3
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/misc/Kconfig22
-rw-r--r--drivers/usb/misc/appledisplay.c8
-rw-r--r--drivers/usb/misc/chaoskey.c24
-rw-r--r--drivers/usb/misc/ftdi-elan.c6
-rw-r--r--drivers/usb/misc/idmouse.c36
-rw-r--r--drivers/usb/misc/legousbtower.c303
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usb251xb.c66
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c35
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-keystone.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c5
-rw-r--r--drivers/usb/renesas_usbhs/common.h3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c19
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c12
-rw-r--r--drivers/usb/roles/class.c21
-rw-r--r--drivers/usb/serial/Kconfig48
-rw-r--r--drivers/usb/serial/ch341.c97
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c762
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/pl2303.c124
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/Kconfig11
-rw-r--r--drivers/usb/typec/Makefile1
-rw-r--r--drivers/usb/typec/class.c42
-rw-r--r--drivers/usb/typec/hd3ss3220.c269
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c135
-rw-r--r--drivers/usb/typec/tps6598x.c49
-rw-r--r--drivers/usb/typec/ucsi/displayport.c40
-rw-r--r--drivers/usb/typec/ucsi/trace.c11
-rw-r--r--drivers/usb/typec/ucsi/trace.h79
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c609
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h417
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c91
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c166
-rw-r--r--drivers/usb/usbip/Kconfig1
-rw-r--r--drivers/usb/usbip/stub_rx.c50
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/vhost/vsock.c102
-rw-r--r--drivers/video/console/vgacon.c6
-rw-r--r--drivers/virtio/virtio_balloon.c20
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/w1/masters/sgi_w1.c4
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2430.c295
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/xen/Kconfig63
-rw-r--r--drivers/xen/mcelog.c14
-rw-r--r--fs/Kconfig3
-rw-r--r--fs/Makefile1
-rw-r--r--fs/affs/affs.h16
-rw-r--r--fs/affs/super.c10
-rw-r--r--fs/afs/callback.c1
-rw-r--r--fs/afs/dir.c7
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/inode.c13
-rw-r--r--fs/afs/rxrpc.c1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/vl_list.c4
-rw-r--r--fs/afs/yfsclient.c4
-rw-r--r--fs/aio.c10
-rw-r--r--fs/autofs/expire.c5
-rw-r--r--fs/block_dev.c69
-rw-r--r--fs/btrfs/Kconfig2
-rw-r--r--fs/btrfs/async-thread.c113
-rw-r--r--fs/btrfs/async-thread.h37
-rw-r--r--fs/btrfs/block-group.c589
-rw-r--r--fs/btrfs/block-group.h51
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/compression.c269
-rw-r--r--fs/btrfs/compression.h46
-rw-r--r--fs/btrfs/ctree.c287
-rw-r--r--fs/btrfs/ctree.h51
-rw-r--r--fs/btrfs/delalloc-space.c21
-rw-r--r--fs/btrfs/delayed-inode.c18
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/dev-replace.h2
-rw-r--r--fs/btrfs/disk-io.c365
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-io-tree.h248
-rw-r--r--fs/btrfs/extent-tree.c146
-rw-r--r--fs/btrfs/extent_io.c120
-rw-r--r--fs/btrfs/extent_io.h231
-rw-r--r--fs/btrfs/extent_map.c6
-rw-r--r--fs/btrfs/extent_map.h11
-rw-r--r--fs/btrfs/file-item.c1
-rw-r--r--fs/btrfs/file.c74
-rw-r--r--fs/btrfs/free-space-cache.c118
-rw-r--r--fs/btrfs/free-space-cache.h39
-rw-r--r--fs/btrfs/free-space-tree.c133
-rw-r--r--fs/btrfs/free-space-tree.h18
-rw-r--r--fs/btrfs/inode.c185
-rw-r--r--fs/btrfs/ioctl.c49
-rw-r--r--fs/btrfs/locking.c309
-rw-r--r--fs/btrfs/locking.h13
-rw-r--r--fs/btrfs/lzo.c53
-rw-r--r--fs/btrfs/misc.h11
-rw-r--r--fs/btrfs/ordered-data.c7
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/print-tree.c6
-rw-r--r--fs/btrfs/props.c6
-rw-r--r--fs/btrfs/qgroup.c11
-rw-r--r--fs/btrfs/qgroup.h2
-rw-r--r--fs/btrfs/raid56.c101
-rw-r--r--fs/btrfs/reada.c19
-rw-r--r--fs/btrfs/relocation.c43
-rw-r--r--fs/btrfs/scrub.c100
-rw-r--r--fs/btrfs/send.c45
-rw-r--r--fs/btrfs/space-info.c8
-rw-r--r--fs/btrfs/space-info.h3
-rw-r--r--fs/btrfs/super.c26
-rw-r--r--fs/btrfs/sysfs.c47
-rw-r--r--fs/btrfs/sysfs.h2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c11
-rw-r--r--fs/btrfs/tests/btrfs-tests.h4
-rw-r--r--fs/btrfs/tests/free-space-tests.c15
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c101
-rw-r--r--fs/btrfs/transaction.c98
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-checker.c211
-rw-r--r--fs/btrfs/tree-log.c136
-rw-r--r--fs/btrfs/volumes.c494
-rw-r--r--fs/btrfs/volumes.h24
-rw-r--r--fs/btrfs/zlib.c52
-rw-r--r--fs/btrfs/zstd.c47
-rw-r--r--fs/ceph/file.c29
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/crypto/bio.c29
-rw-r--r--fs/crypto/crypto.c124
-rw-r--r--fs/crypto/fscrypt_private.h25
-rw-r--r--fs/crypto/keyring.c6
-rw-r--r--fs/crypto/keysetup.c158
-rw-r--r--fs/crypto/keysetup_v1.c4
-rw-r--r--fs/crypto/policy.c41
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/ecryptfs/inode.c84
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exportfs/expfs.c31
-rw-r--r--fs/ext4/Kconfig17
-rw-r--r--fs/ext4/Makefile1
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/inode-test.c272
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/super.c14
-rw-r--r--fs/f2fs/file.c5
-rw-r--r--fs/f2fs/segment.c3
-rw-r--r--fs/f2fs/super.c77
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/io-wq.c1065
-rw-r--r--fs/io-wq.h74
-rw-r--r--fs/io_uring.c2227
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/kernfs/dir.c105
-rw-r--r--fs/kernfs/file.c4
-rw-r--r--fs/kernfs/inode.c4
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/kernfs/mount.c102
-rw-r--r--fs/namespace.c15
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/xattr.c56
-rw-r--r--fs/pipe.c6
-rw-r--r--fs/proc/stat.c56
-rw-r--r--include/Kbuild4
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/acpi/acpixf.h8
-rw-r--r--include/acpi/button.h12
-rw-r--r--include/asm-generic/vmlinux.lds.h67
-rw-r--r--include/clocksource/hyperv_timer.h7
-rw-r--r--include/crypto/aead.h2
-rw-r--r--include/crypto/algapi.h149
-rw-r--r--include/crypto/blake2s.h106
-rw-r--r--include/crypto/chacha.h83
-rw-r--r--include/crypto/chacha20poly1305.h48
-rw-r--r--include/crypto/curve25519.h71
-rw-r--r--include/crypto/engine.h4
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/internal/blake2s.h24
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/des.h12
-rw-r--r--include/crypto/internal/poly1305.h58
-rw-r--r--include/crypto/internal/skcipher.h62
-rw-r--r--include/crypto/poly1305.h69
-rw-r--r--include/crypto/skcipher.h49
-rw-r--r--include/drm/bridge/dw_hdmi.h4
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/dt-bindings/gpio/meson-a1-gpio.h73
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8974.h146
-rw-r--r--include/dt-bindings/net/qca-ar803x.h13
-rw-r--r--include/dt-bindings/net/ti-dp83869.h42
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pmu/exynos_ppmu.h25
-rw-r--r--include/dt-bindings/regulator/dlg,da9063-regulator.h16
-rw-r--r--include/dt-bindings/sound/samsung-i2s.h12
-rw-r--r--include/keys/trusted_tpm.h (renamed from include/keys/trusted.h)49
-rw-r--r--include/kunit/assert.h356
-rw-r--r--include/kunit/string-stream.h51
-rw-r--r--include/kunit/test.h1490
-rw-r--r--include/kunit/try-catch.h75
-rw-r--r--include/kvm/arm_hypercalls.h43
-rw-r--r--include/kvm/arm_psci.h2
-rw-r--r--include/kvm/arm_vgic.h8
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/arm-smccc.h75
-rw-r--r--include/linux/arm_sdei.h6
-rw-r--r--include/linux/blk-cgroup.h199
-rw-r--r--include/linux/blk-mq.h300
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h31
-rw-r--r--include/linux/bpf.h296
-rw-r--r--include/linux/bpf_types.h77
-rw-r--r--include/linux/bpf_verifier.h12
-rw-r--r--include/linux/brcmphy.h10
-rw-r--r--include/linux/btf.h33
-rw-r--r--include/linux/can/core.h1
-rw-r--r--include/linux/can/platform/mcp251x.h22
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/cgroup-defs.h19
-rw-r--r--include/linux/cgroup.h27
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/context_tracking.h30
-rw-r--r--include/linux/context_tracking_state.h21
-rw-r--r--include/linux/coresight.h6
-rw-r--r--include/linux/cpu.h37
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h27
-rw-r--r--include/linux/crypto.h861
-rw-r--r--include/linux/device-mapper.h27
-rw-r--r--include/linux/dim.h63
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/edac.h146
-rw-r--r--include/linux/efi.h18
-rw-r--r--include/linux/errname.h16
-rw-r--r--include/linux/exportfs.h5
-rw-r--r--include/linux/extable.h10
-rw-r--r--include/linux/filter.h27
-rw-r--r--include/linux/firmware/broadcom/tee_bnxt_fw.h14
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h8
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h13
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/fscrypt.h35
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/ftrace.h40
-rw-r--r--include/linux/futex.h40
-rw-r--r--include/linux/fwnode.h10
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/gpio/consumer.h54
-rw-r--r--include/linux/icmp.h15
-rw-r--r--include/linux/icmpv6.h14
-rw-r--r--include/linux/intel-iommu.h6
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/ipmi_smi.h12
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqchip/arm-gic-v4.h4
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kernel_stat.h18
-rw-r--r--include/linux/kernfs.h57
-rw-r--r--include/linux/kvm_host.h48
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/linkage.h249
-rw-r--r--include/linux/linkmode.h6
-rw-r--r--include/linux/livepatch.h17
-rw-r--r--include/linux/lockdep.h21
-rw-r--r--include/linux/lsm_hooks.h15
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/memregion.h23
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/fs.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmzone.h45
-rw-r--r--include/linux/mroute_base.h28
-rw-r--r--include/linux/mtd/spi-nor.h64
-rw-r--r--include/linux/netdevice.h43
-rw-r--r--include/linux/netfilter.h41
-rw-r--r--include/linux/netfilter/ipset/ip_set.h196
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h14
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h3
-rw-r--r--include/linux/nvme-fc.h182
-rw-r--r--include/linux/nvme.h60
-rw-r--r--include/linux/nvmem-consumer.h11
-rw-r--r--include/linux/of_net.h7
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/percpu-rwsem.h4
-rw-r--r--include/linux/perf_event.h74
-rw-r--r--include/linux/phy.h26
-rw-r--r--include/linux/phy/phy.h3
-rw-r--r--include/linux/phy/tegra/xusb.h4
-rw-r--r--include/linux/phylink.h25
-rw-r--r--include/linux/pid.h7
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/platform_data/cros_ec_commands.h285
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/intel-spi.h1
-rw-r--r--include/linux/platform_data/spi-mt65xx.h1
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h5
-rw-r--r--include/linux/pm_opp.h13
-rw-r--r--include/linux/power/smartreflex.h3
-rw-r--r--include/linux/property.h106
-rw-r--r--include/linux/psci.h9
-rw-r--r--include/linux/pxa2xx_ssp.h2
-rw-r--r--include/linux/rculist_bl.h28
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/refcount.h269
-rw-r--r--include/linux/regulator/ab8500.h3
-rw-r--r--include/linux/regulator/fixed.h1
-rw-r--r--include/linux/rtsx_pci.h1
-rw-r--r--include/linux/rwlock_api_smp.h16
-rw-r--r--include/linux/sbitmap.h9
-rw-r--r--include/linux/sched.h13
-rw-r--r--include/linux/sched/mm.h6
-rw-r--r--include/linux/sched/task.h3
-rw-r--r--include/linux/security.h39
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/seqlock.h4
-rw-r--r--include/linux/sfp.h31
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/skmsg.h12
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/soundwire/sdw.h7
-rw-r--r--include/linux/spi/spi.h132
-rw-r--r--include/linux/spinlock_api_smp.h8
-rw-r--r--include/linux/stat.h3
-rw-r--r--include/linux/stmmac.h4
-rw-r--r--include/linux/sxgbe_platform.h4
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/tick.h9
-rw-r--r--include/linux/tpm.h250
-rw-r--r--include/linux/u64_stats_sync.h51
-rw-r--r--include/linux/uaccess.h16
-rw-r--r--include/linux/usb/role.h3
-rw-r--r--include/linux/usb/tcpm.h41
-rw-r--r--include/linux/usb/typec.h41
-rw-r--r--include/linux/virtio_vsock.h18
-rw-r--r--include/linux/vm_sockets.h15
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vmw_vmci_api.h2
-rw-r--r--include/linux/vtime.h59
-rw-r--r--include/linux/w1.h1
-rw-r--r--include/linux/ww_mutex.h2
-rw-r--r--include/media/cec-pin.h10
-rw-r--r--include/media/cec.h31
-rw-r--r--include/media/dvb-usb-ids.h1
-rw-r--r--include/media/hevc-ctrls.h212
-rw-r--r--include/media/i2c/smiapp.h1
-rw-r--r--include/media/rc-map.h24
-rw-r--r--include/media/v4l2-common.h33
-rw-r--r--include/media/v4l2-ctrls.h87
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-mem2mem.h44
-rw-r--r--include/media/videobuf2-core.h3
-rw-r--r--include/media/videobuf2-v4l2.h5
-rw-r--r--include/net/act_api.h47
-rw-r--r--include/net/addrconf.h6
-rw-r--r--include/net/af_vsock.h45
-rw-r--r--include/net/arp.h4
-rw-r--r--include/net/cfg80211.h13
-rw-r--r--include/net/devlink.h71
-rw-r--r--include/net/dsa.h108
-rw-r--r--include/net/fib_notifier.h13
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow_dissector.h33
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_fib.h50
-rw-r--r--include/net/ip_fib.h21
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/mac80211.h90
-rw-r--r--include/net/ndisc.h8
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h6
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h10
-rw-r--r--include/net/netfilter/nf_flow_table.h64
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/net/netfilter/nf_tables_offload.h1
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/netns/mib.h3
-rw-r--r--include/net/netns/sctp.h14
-rw-r--r--include/net/netprio_cgroup.h2
-rw-r--r--include/net/page_pool.h85
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sch_generic.h18
-rw-r--r--include/net/sctp/constants.h12
-rw-r--r--include/net/sctp/structs.h16
-rw-r--r--include/net/sctp/ulpevent.h16
-rw-r--r--include/net/smc.h7
-rw-r--r--include/net/snmp.h6
-rw-r--r--include/net/sock.h27
-rw-r--r--include/net/tcp.h12
-rw-r--r--include/net/tls.h71
-rw-r--r--include/net/tls_toe.h77
-rw-r--r--include/net/vsock_addr.h2
-rw-r--r--include/net/xdp_priv.h4
-rw-r--r--include/net/xdp_sock.h51
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/rdma/ib_cm.h32
-rw-r--r--include/rdma/ib_mad.h40
-rw-r--r--include/rdma/ib_umem.h4
-rw-r--r--include/rdma/ib_umem_odp.h18
-rw-r--r--include/rdma/ib_verbs.h79
-rw-r--r--include/rdma/restrack.h5
-rw-r--r--include/soc/fsl/qman.h11
-rw-r--r--include/soc/mscc/ocelot.h550
-rw-r--r--include/soc/mscc/ocelot_sys.h (renamed from drivers/net/ethernet/mscc/ocelot_sys.h)0
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/intel-dsp-config.h34
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/pcm.h20
-rw-r--r--include/sound/pxa2xx-lib.h26
-rw-r--r--include/sound/rt5682.h1
-rw-r--r--include/sound/simple_card_utils.h1
-rw-r--r--include/sound/soc-acpi-intel-match.h3
-rw-r--r--include/sound/soc-acpi.h4
-rw-r--r--include/sound/soc-component.h52
-rw-r--r--include/sound/soc-dpcm.h18
-rw-r--r--include/sound/soc.h38
-rw-r--r--include/sound/sof.h3
-rw-r--r--include/sound/sof/dai-imx.h34
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/pm.h8
-rw-r--r--include/sound/sof/stream.h4
-rw-r--r--include/sound/timer.h6
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/trace/bpf_probe.h3
-rw-r--r--include/trace/events/bridge.h12
-rw-r--r--include/trace/events/btrfs.h131
-rw-r--r--include/trace/events/cgroup.h6
-rw-r--r--include/trace/events/fsi.h6
-rw-r--r--include/trace/events/fsi_master_aspeed.h77
-rw-r--r--include/trace/events/io_uring.h358
-rw-r--r--include/trace/events/page_pool.h44
-rw-r--r--include/trace/events/rcu.h47
-rw-r--r--include/trace/events/tcp.h2
-rw-r--r--include/trace/events/timer.h3
-rw-r--r--include/trace/events/wbt.h12
-rw-r--r--include/trace/events/writeback.h140
-rw-r--r--include/trace/events/xdp.h21
-rw-r--r--include/uapi/linux/blkzoned.h17
-rw-r--r--include/uapi/linux/bpf.h188
-rw-r--r--include/uapi/linux/btrfs.h5
-rw-r--r--include/uapi/linux/btrfs_tree.h23
-rw-r--r--include/uapi/linux/cec-funcs.h34
-rw-r--r--include/uapi/linux/cec.h133
-rw-r--r--include/uapi/linux/dcbnl.h2
-rw-r--r--include/uapi/linux/devlink.h5
-rw-r--r--include/uapi/linux/ethtool.h6
-rw-r--r--include/uapi/linux/fcntl.h9
-rw-r--r--include/uapi/linux/fscrypt.h3
-rw-r--r--include/uapi/linux/gen_stats.h5
-rw-r--r--include/uapi/linux/if.h1
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/io_uring.h24
-rw-r--r--include/uapi/linux/kvm.h11
-rw-r--r--include/uapi/linux/lwtunnel.h41
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/netfilter_arp/arp_tables.h2
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/uapi/linux/netfilter_ipv4/ip_tables.h2
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6_tables.h2
-rw-r--r--include/uapi/linux/nl80211.h34
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/perf_event.h10
-rw-r--r--include/uapi/linux/pkt_cls.h34
-rw-r--r--include/uapi/linux/pkt_sched.h22
-rw-r--r--include/uapi/linux/psp-sev.h3
-rw-r--r--include/uapi/linux/ptp_clock.h5
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/sched.h64
-rw-r--r--include/uapi/linux/sctp.h31
-rw-r--r--include/uapi/linux/sed-opal.h20
-rw-r--r--include/uapi/linux/snmp.h17
-rw-r--r--include/uapi/linux/stat.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h29
-rw-r--r--include/uapi/linux/tcp.h10
-rw-r--r--include/uapi/linux/tipc.h22
-rw-r--r--include/uapi/linux/tipc_config.h4
-rw-r--r--include/uapi/linux/tipc_netlink.h4
-rw-r--r--include/uapi/linux/v4l2-controls.h1
-rw-r--r--include/uapi/linux/videodev2.h22
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--include/uapi/misc/fastrpc.h15
-rw-r--r--include/uapi/misc/habanalabs.h48
-rw-r--r--include/uapi/rdma/cxgb3-abi.h82
-rw-r--r--include/uapi/rdma/efa-abi.h6
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h22
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h1
-rw-r--r--include/uapi/rdma/nes-abi.h115
-rw-r--r--include/uapi/rdma/qedr-abi.h25
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h22
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h5
-rw-r--r--include/uapi/sound/compress_params.h10
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/uapi/sound/sof/tokens.h11
-rw-r--r--include/xen/interface/xen-mca.h10
-rw-r--r--init/Kconfig5
-rw-r--r--init/do_mounts.c49
-rw-r--r--init/initramfs.c8
-rw-r--r--kernel/Kconfig.preempt2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/arraymap.c263
-rw-r--r--kernel/bpf/btf.c796
-rw-r--r--kernel/bpf/cgroup.c4
-rw-r--r--kernel/bpf/core.c129
-rw-r--r--kernel/bpf/devmap.c74
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/inode.c7
-rw-r--r--kernel/bpf/local_storage.c2
-rw-r--r--kernel/bpf/map_in_map.c7
-rw-r--r--kernel/bpf/offload.c4
-rw-r--r--kernel/bpf/stackmap.c9
-rw-r--r--kernel/bpf/syscall.c382
-rw-r--r--kernel/bpf/trampoline.c253
-rw-r--r--kernel/bpf/verifier.c543
-rw-r--r--kernel/bpf/xskmap.c118
-rw-r--r--kernel/cgroup/cgroup-internal.h5
-rw-r--r--kernel/cgroup/cgroup-v1.c5
-rw-r--r--kernel/cgroup/cgroup.c330
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/cgroup/freezer.c9
-rw-r--r--kernel/cgroup/pids.c11
-rw-r--r--kernel/cgroup/rstat.c46
-rw-r--r--kernel/context_tracking.c6
-rw-r--r--kernel/cpu.c29
-rw-r--r--kernel/dma/debug.c2
-rw-r--r--kernel/dma/direct.c13
-rw-r--r--kernel/events/core.c404
-rw-r--r--kernel/events/internal.h1
-rw-r--r--kernel/events/ring_buffer.c60
-rw-r--r--kernel/exit.c32
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c146
-rw-r--r--kernel/futex.c326
-rw-r--r--kernel/livepatch/Makefile2
-rw-r--r--kernel/livepatch/core.c44
-rw-r--r--kernel/livepatch/core.h5
-rw-r--r--kernel/livepatch/state.c119
-rw-r--r--kernel/livepatch/state.h9
-rw-r--r--kernel/livepatch/transition.c12
-rw-r--r--kernel/locking/lockdep.c7
-rw-r--r--kernel/locking/locktorture.c9
-rw-r--r--kernel/locking/mutex.c8
-rw-r--r--kernel/locking/rtmutex.c6
-rw-r--r--kernel/locking/rwsem.c10
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c11
-rw-r--r--kernel/pid.c86
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/qos.c8
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/printk/printk.c10
-rw-r--r--kernel/rcu/rcu.h4
-rw-r--r--kernel/rcu/rcu_segcblist.c6
-rw-r--r--kernel/rcu/rcuperf.c16
-rw-r--r--kernel/rcu/rcutorture.c44
-rw-r--r--kernel/rcu/tree.c73
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/sched/core.c41
-rw-r--r--kernel/sched/cputime.c288
-rw-r--r--kernel/sched/deadline.c12
-rw-r--r--kernel/sched/fair.c1416
-rw-r--r--kernel/sched/features.h1
-rw-r--r--kernel/sched/idle.c34
-rw-r--r--kernel/sched/rt.c12
-rw-r--r--kernel/sched/sched.h25
-rw-r--r--kernel/sched/stop_task.c9
-rw-r--r--kernel/sched/topology.c9
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/stacktrace.c4
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/sysctl-test.c392
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/tick-sched.c13
-rw-r--r--kernel/trace/blktrace.c84
-rw-r--r--kernel/trace/bpf_trace.c227
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace_benchmark.c4
-rw-r--r--kernel/trace/trace_event_perf.c15
-rw-r--r--kernel/workqueue.c97
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug40
-rw-r--r--lib/Makefile10
-rw-r--r--lib/cpu_rmap.c2
-rw-r--r--lib/crypto/Kconfig130
-rw-r--r--lib/crypto/Makefile42
-rw-r--r--lib/crypto/blake2s-generic.c111
-rw-r--r--lib/crypto/blake2s-selftest.c622
-rw-r--r--lib/crypto/blake2s.c126
-rw-r--r--lib/crypto/chacha.c (renamed from lib/chacha.c)20
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c7393
-rw-r--r--lib/crypto/chacha20poly1305.c369
-rw-r--r--lib/crypto/curve25519-fiat32.c864
-rw-r--r--lib/crypto/curve25519-hacl64.c788
-rw-r--r--lib/crypto/curve25519.c25
-rw-r--r--lib/crypto/libchacha.c35
-rw-r--r--lib/crypto/poly1305.c232
-rw-r--r--lib/errname.c223
-rw-r--r--lib/kunit/Kconfig36
-rw-r--r--lib/kunit/Makefile9
-rw-r--r--lib/kunit/assert.c141
-rw-r--r--lib/kunit/example-test.c88
-rw-r--r--lib/kunit/string-stream-test.c52
-rw-r--r--lib/kunit/string-stream.c217
-rw-r--r--lib/kunit/test-test.c331
-rw-r--r--lib/kunit/test.c478
-rw-r--r--lib/kunit/try-catch.c118
-rw-r--r--lib/list-test.c746
-rw-r--r--lib/livepatch/Makefile5
-rw-r--r--lib/livepatch/test_klp_state.c162
-rw-r--r--lib/livepatch/test_klp_state2.c191
-rw-r--r--lib/livepatch/test_klp_state3.c5
-rw-r--r--lib/locking-selftest.c24
-rw-r--r--lib/memregion.c18
-rw-r--r--lib/refcount.c255
-rw-r--r--lib/sbitmap.c17
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/test_bpf.c112
-rw-r--r--lib/test_printf.c53
-rw-r--r--lib/ubsan.c7
-rw-r--r--lib/ubsan.h2
-rw-r--r--lib/vsprintf.c120
-rw-r--r--lib/xz/xz_dec_lzma2.c1
-rw-r--r--mm/debug.c31
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/khugepaged.c28
-rw-r--r--mm/ksm.c14
-rw-r--r--mm/maccess.c70
-rw-r--r--mm/madvise.c16
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory.c104
-rw-r--r--mm/memory_hotplug.c59
-rw-r--r--mm/mempolicy.c14
-rw-r--r--mm/nommu.c15
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/slub.c39
-rw-r--r--mm/vmalloc.c20
-rw-r--r--net/Kconfig26
-rw-r--r--net/atm/clip.c6
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/batman-adv/bat_v.c1
-rw-r--r--net/batman-adv/bat_v_ogm.c34
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c2
-rw-r--r--net/batman-adv/soft-interface.c5
-rw-r--r--net/batman-adv/types.h3
-rw-r--r--net/bluetooth/Kconfig2
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/hci_core.c39
-rw-r--r--net/bluetooth/hci_request.c19
-rw-r--r--net/bluetooth/l2cap_core.c4
-rw-r--r--net/bluetooth/smp.c6
-rw-r--r--net/bpf/test_run.c52
-rw-r--r--net/bridge/br_device.c36
-rw-r--r--net/bridge/br_fdb.c157
-rw-r--r--net/bridge/br_input.c7
-rw-r--r--net/bridge/br_private.h24
-rw-r--r--net/bridge/br_switchdev.c12
-rw-r--r--net/caif/Kconfig10
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/j1939/main.c9
-rw-r--r--net/can/j1939/socket.c94
-rw-r--r--net/can/j1939/transport.c36
-rw-r--r--net/core/bpf_sk_storage.c2
-rw-r--r--net/core/dev.c417
-rw-r--r--net/core/devlink.c356
-rw-r--r--net/core/fib_notifier.c95
-rw-r--r--net/core/fib_rules.c23
-rw-r--r--net/core/filter.c33
-rw-r--r--net/core/flow_dissector.c155
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/gen_stats.c12
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/net-procfs.c4
-rw-r--r--net/core/net-sysfs.c25
-rw-r--r--net/core/netprio_cgroup.c8
-rw-r--r--net/core/page_pool.c189
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/core/rtnetlink.c243
-rw-r--r--net/core/skmsg.c13
-rw-r--r--net/core/sock.c18
-rw-r--r--net/core/sock_reuseport.c4
-rw-r--r--net/core/xdp.c128
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/dsa/Kconfig9
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c93
-rw-r--r--net/dsa/dsa2.c384
-rw-r--r--net/dsa/dsa_priv.h27
-rw-r--r--net/dsa/port.c32
-rw-r--r--net/dsa/slave.c25
-rw-r--r--net/dsa/switch.c4
-rw-r--r--net/dsa/tag_8021q.c22
-rw-r--r--net/dsa/tag_ocelot.c241
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ieee802154/nl802154.c39
-rw-r--r--net/ipv4/Kconfig218
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_notifier.c13
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/fib_trie.c44
-rw-r--r--net/ipv4/icmp.c14
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c15
-rw-r--r--net/ipv4/inetpeer.c12
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ip_input.c38
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_tunnel_core.c440
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipmr.c16
-rw-r--r--net/ipv4/ipmr_base.c30
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_socket_ipv4.c10
-rw-r--r--net/ipv4/nexthop.c1
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/syncookies.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c32
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_diag.c4
-rw-r--r--net/ipv4/tcp_fastopen.c5
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_ulp.c3
-rw-r--r--net/ipv4/udp.c29
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/fib6_notifier.c11
-rw-r--r--net/ipv6/fib6_rules.c5
-rw-r--r--net/ipv6/icmp.c22
-rw-r--r--net/ipv6/ip6_fib.c54
-rw-r--r--net/ipv6/ip6_input.c29
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/Kconfig28
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_tproxy_ipv6.c2
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/seg6_local.c33
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-tx.c9
-rw-r--r--net/mac80211/airtime.c597
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/debugfs.c88
-rw-r--r--net/mac80211/debugfs_sta.c43
-rw-r--r--net/mac80211/ibss.c9
-rw-r--r--net/mac80211/ieee80211_i.h8
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/mlme.c103
-rw-r--r--net/mac80211/rc80211_minstrel.c48
-rw-r--r--net/mac80211/rc80211_minstrel.h57
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c73
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h2
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c8
-rw-r--r--net/mac80211/sta_info.c52
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/status.c39
-rw-r--r--net/mac80211/tx.c136
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c20
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c26
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c18
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c41
-rw-r--r--net/netfilter/ipset/ip_set_core.c212
-rw-r--r--net/netfilter/ipset/ip_set_getport.c28
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c24
-rw-r--r--net/netfilter/ipset/ip_set_hash_mac.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c24
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c47
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c28
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c24
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c28
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c47
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ovf.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c18
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_ecache.c23
-rw-r--r--net/netfilter/nf_conntrack_extend.c21
-rw-r--r--net/netfilter/nf_conntrack_netlink.c76
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c6
-rw-r--r--net/netfilter/nf_flow_table_core.c177
-rw-r--r--net/netfilter/nf_flow_table_inet.c25
-rw-r--r--net/netfilter/nf_flow_table_offload.c851
-rw-r--r--net/netfilter/nf_tables_api.c619
-rw-r--r--net/netfilter/nf_tables_offload.c275
-rw-r--r--net/netfilter/nft_chain_filter.c45
-rw-r--r--net/netfilter/nft_cmp.c6
-rw-r--r--net/netfilter/nft_flow_offload.c5
-rw-r--r--net/netfilter/nft_meta.c18
-rw-r--r--net/netfilter/nft_payload.c94
-rw-r--r--net/netfilter/xt_HMARK.c6
-rw-r--r--net/netfilter/xt_time.c19
-rw-r--r--net/netlink/genetlink.c303
-rw-r--r--net/nfc/hci/Kconfig14
-rw-r--r--net/nfc/netlink.c17
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/conntrack.c21
-rw-r--r--net/openvswitch/datapath.c113
-rw-r--r--net/openvswitch/datapath.h12
-rw-r--r--net/openvswitch/flow.c20
-rw-r--r--net/openvswitch/flow.h10
-rw-r--r--net/openvswitch/flow_netlink.c87
-rw-r--r--net/openvswitch/flow_table.c381
-rw-r--r--net/openvswitch/flow_table.h19
-rw-r--r--net/openvswitch/vport.c5
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/qrtr/tun.c6
-rw-r--r--net/rds/ib.c10
-rw-r--r--net/rds/ib.h15
-rw-r--r--net/rds/ib_cm.c190
-rw-r--r--net/rds/ib_recv.c13
-rw-r--r--net/rds/ib_send.c19
-rw-r--r--net/rfkill/core.c9
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/rxrpc/Kconfig2
-rw-r--r--net/rxrpc/peer_object.c2
-rw-r--r--net/sched/act_api.c60
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/act_connmark.c4
-rw-r--r--net/sched/act_csum.c14
-rw-r--r--net/sched/act_ct.c20
-rw-r--r--net/sched/act_ctinfo.c8
-rw-r--r--net/sched/act_gact.c21
-rw-r--r--net/sched/act_ife.c7
-rw-r--r--net/sched/act_ipt.c12
-rw-r--r--net/sched/act_mirred.c23
-rw-r--r--net/sched/act_mpls.c8
-rw-r--r--net/sched/act_nat.c8
-rw-r--r--net/sched/act_pedit.c17
-rw-r--r--net/sched/act_police.c20
-rw-r--r--net/sched/act_sample.c8
-rw-r--r--net/sched/act_simple.c7
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/act_skbmod.c4
-rw-r--r--net/sched/act_tunnel_key.c220
-rw-r--r--net/sched/act_vlan.c18
-rw-r--r--net/sched/cls_flower.c254
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/sch_fq.c3
-rw-r--r--net/sched/sch_fq_codel.c1
-rw-r--r--net/sched/sch_generic.c18
-rw-r--r--net/sched/sch_pie.c120
-rw-r--r--net/sched/sch_taprio.c28
-rw-r--r--net/sctp/associola.c61
-rw-r--r--net/sctp/chunk.c40
-rw-r--r--net/sctp/diag.c4
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/socket.c151
-rw-r--r--net/sctp/sysctl.c22
-rw-r--r--net/sctp/ulpevent.c57
-rw-r--r--net/smc/af_smc.c27
-rw-r--r--net/smc/smc.h1
-rw-r--r--net/smc/smc_cdc.c7
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_close.c97
-rw-r--r--net/smc/smc_close.h2
-rw-r--r--net/smc/smc_core.c444
-rw-r--r--net/smc/smc_core.h16
-rw-r--r--net/smc/smc_ib.c24
-rw-r--r--net/smc/smc_ib.h4
-rw-r--r--net/smc/smc_ism.c27
-rw-r--r--net/smc/smc_llc.c11
-rw-r--r--net/smc/smc_pnet.c5
-rw-r--r--net/smc/smc_rx.c10
-rw-r--r--net/smc/smc_tx.c28
-rw-r--r--net/smc/smc_wr.c45
-rw-r--r--net/smc/smc_wr.h10
-rw-r--r--net/socket.c66
-rw-r--r--net/tipc/Kconfig15
-rw-r--r--net/tipc/Makefile1
-rw-r--r--net/tipc/bcast.c6
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/bearer.c49
-rw-r--r--net/tipc/bearer.h6
-rw-r--r--net/tipc/core.c32
-rw-r--r--net/tipc/core.h20
-rw-r--r--net/tipc/crypto.c1986
-rw-r--r--net/tipc/crypto.h167
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c109
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/monitor.c15
-rw-r--r--net/tipc/monitor.h1
-rw-r--r--net/tipc/msg.c221
-rw-r--r--net/tipc/msg.h77
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c51
-rw-r--r--net/tipc/name_table.h4
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/netlink.c39
-rw-r--r--net/tipc/netlink.h1
-rw-r--r--net/tipc/netlink_compat.c28
-rw-r--r--net/tipc/node.c496
-rw-r--r--net/tipc/node.h25
-rw-r--r--net/tipc/socket.c131
-rw-r--r--net/tipc/sysctl.c11
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/tls/Kconfig10
-rw-r--r--net/tls/Makefile5
-rw-r--r--net/tls/tls_device.c46
-rw-r--r--net/tls/tls_main.c173
-rw-r--r--net/tls/tls_proc.c49
-rw-r--r--net/tls/tls_sw.c29
-rw-r--r--net/tls/tls_toe.c139
-rw-r--r--net/tls/trace.c10
-rw-r--r--net/tls/trace.h202
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/af_vsock.c397
-rw-r--r--net/vmw_vsock/hyperv_transport.c94
-rw-r--r--net/vmw_vsock/virtio_transport.c177
-rw-r--r--net/vmw_vsock/virtio_transport_common.c223
-rw-r--r--net/vmw_vsock/vmci_transport.c142
-rw-r--r--net/vmw_vsock/vmci_transport.h3
-rw-r--r--net/vmw_vsock/vmci_transport_notify.h1
-rw-r--r--net/wireless/nl80211.c17
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xdp/xsk.c41
-rw-r--r--net/xfrm/Kconfig12
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface.c23
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile1
-rw-r--r--samples/bpf/Makefile171
-rw-r--r--samples/bpf/Makefile.target75
-rw-r--r--samples/bpf/README.rst49
-rw-r--r--samples/bpf/hbm.c2
-rw-r--r--samples/bpf/hbm_kern.h27
-rw-r--r--samples/bpf/map_perf_test_kern.c28
-rw-r--r--samples/bpf/offwaketime_kern.c1
-rw-r--r--samples/bpf/parse_ldabs.c1
-rw-r--r--samples/bpf/sampleip_kern.c1
-rw-r--r--samples/bpf/sockex1_kern.c13
-rw-r--r--samples/bpf/sockex2_kern.c13
-rw-r--r--samples/bpf/sockex3_kern.c1
-rw-r--r--samples/bpf/spintest_kern.c1
-rw-r--r--samples/bpf/tcbpf1_kern.c1
-rw-r--r--samples/bpf/test_map_in_map_kern.c20
-rw-r--r--samples/bpf/test_overhead_kprobe_kern.c1
-rw-r--r--samples/bpf/test_probe_write_user_kern.c3
-rw-r--r--samples/bpf/trace_event_kern.c1
-rw-r--r--samples/bpf/tracex1_kern.c1
-rw-r--r--samples/bpf/tracex2_kern.c1
-rw-r--r--samples/bpf/tracex3_kern.c1
-rw-r--r--samples/bpf/tracex4_kern.c1
-rw-r--r--samples/bpf/tracex5_kern.c1
-rw-r--r--samples/bpf/xdp1_kern.c12
-rw-r--r--samples/bpf/xdp1_user.c2
-rw-r--r--samples/bpf/xdp2_kern.c12
-rw-r--r--samples/bpf/xdp_adjust_tail_kern.c19
-rw-r--r--samples/bpf/xdp_adjust_tail_user.c29
-rw-r--r--samples/bpf/xdp_fwd_kern.c13
-rw-r--r--samples/bpf/xdp_redirect_cpu_kern.c108
-rw-r--r--samples/bpf/xdp_redirect_kern.c24
-rw-r--r--samples/bpf/xdp_redirect_map_kern.c24
-rw-r--r--samples/bpf/xdp_router_ipv4_kern.c64
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c37
-rw-r--r--samples/bpf/xdp_rxq_info_user.c6
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c2
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c26
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c2
-rw-r--r--samples/bpf/xdpsock.h11
-rw-r--r--samples/bpf/xdpsock_kern.c24
-rw-r--r--samples/bpf/xdpsock_user.c163
-rw-r--r--samples/mei/Makefile12
-rw-r--r--samples/pktgen/README.rst2
-rw-r--r--samples/pktgen/functions.sh154
-rw-r--r--samples/pktgen/parameters.sh2
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh15
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh15
-rwxr-xr-xsamples/pktgen/pktgen_sample01_simple.sh23
-rwxr-xr-xsamples/pktgen/pktgen_sample02_multiqueue.sh23
-rwxr-xr-xsamples/pktgen/pktgen_sample03_burst_single_flow.sh15
-rwxr-xr-xsamples/pktgen/pktgen_sample04_many_flows.sh22
-rwxr-xr-xsamples/pktgen/pktgen_sample05_flow_per_thread.sh15
-rwxr-xr-xsamples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh23
-rwxr-xr-xscripts/bpf_helpers_doc.py155
-rwxr-xr-xscripts/checkpatch.pl9
-rwxr-xr-xscripts/tools-support-relr.sh8
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/trusted-keys/Makefile8
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c (renamed from security/keys/trusted.c)98
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c314
-rw-r--r--security/safesetid/securityfs.c4
-rw-r--r--security/security.c27
-rw-r--r--security/selinux/hooks.c69
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/selinux/include/objsec.h6
-rw-r--r--security/selinux/nlmsgtab.c4
-rw-r--r--sound/aoa/soundbus/i2sbus/pcm.c2
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c80
-rw-r--r--sound/core/Kconfig28
-rw-r--r--sound/core/init.c1
-rw-r--r--sound/core/memalloc.c25
-rw-r--r--sound/core/pcm_dmaengine.c83
-rw-r--r--sound/core/pcm_lib.c8
-rw-r--r--sound/core/pcm_local.h7
-rw-r--r--sound/core/pcm_memory.c88
-rw-r--r--sound/core/pcm_native.c68
-rw-r--r--sound/core/seq/seq_timer.c18
-rw-r--r--sound/core/timer.c182
-rw-r--r--sound/drivers/Kconfig21
-rw-r--r--sound/drivers/aloop.c665
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/ml403-ac97cr.c2
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c4
-rw-r--r--sound/drivers/vx/vx_pcm.c10
-rw-r--r--sound/firewire/Kconfig6
-rw-r--r--sound/firewire/amdtp-stream.c407
-rw-r--r--sound/firewire/amdtp-stream.h28
-rw-r--r--sound/firewire/bebob/bebob.h4
-rw-r--r--sound/firewire/bebob/bebob_midi.c2
-rw-r--r--sound/firewire/bebob/bebob_pcm.c80
-rw-r--r--sound/firewire/bebob/bebob_stream.c92
-rw-r--r--sound/firewire/dice/dice-midi.c2
-rw-r--r--sound/firewire/dice/dice-pcm.c83
-rw-r--r--sound/firewire/dice/dice-stream.c11
-rw-r--r--sound/firewire/dice/dice.h4
-rw-r--r--sound/firewire/digi00x/digi00x-midi.c2
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c66
-rw-r--r--sound/firewire/digi00x/digi00x-stream.c14
-rw-r--r--sound/firewire/digi00x/digi00x.h4
-rw-r--r--sound/firewire/fireface/ff-pcm.c60
-rw-r--r--sound/firewire/fireface/ff-stream.c22
-rw-r--r--sound/firewire/fireface/ff.h4
-rw-r--r--sound/firewire/fireworks/fireworks.h4
-rw-r--r--sound/firewire/fireworks/fireworks_midi.c2
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c72
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c14
-rw-r--r--sound/firewire/isight.c8
-rw-r--r--sound/firewire/motu/motu-midi.c2
-rw-r--r--sound/firewire/motu/motu-pcm.c63
-rw-r--r--sound/firewire/motu/motu-proc.c4
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c142
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c4
-rw-r--r--sound/firewire/motu/motu-stream.c14
-rw-r--r--sound/firewire/motu/motu.c34
-rw-r--r--sound/firewire/motu/motu.h10
-rw-r--r--sound/firewire/oxfw/oxfw-midi.c4
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c80
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c15
-rw-r--r--sound/firewire/oxfw/oxfw.h4
-rw-r--r--sound/firewire/tascam/tascam-pcm.c65
-rw-r--r--sound/firewire/tascam/tascam-stream.c14
-rw-r--r--sound/firewire/tascam/tascam.h4
-rw-r--r--sound/hda/Kconfig10
-rw-r--r--sound/hda/Makefile5
-rw-r--r--sound/hda/hdac_regmap.c1
-rw-r--r--sound/hda/intel-dsp-config.c357
-rw-r--r--sound/hda/intel-nhlt.c3
-rw-r--r--sound/isa/Kconfig18
-rw-r--r--sound/isa/cs423x/cs4236.c3
-rw-r--r--sound/mips/Kconfig12
-rw-r--r--sound/mips/hal2.c3
-rw-r--r--sound/mips/sgio2audio.c12
-rw-r--r--sound/pci/Kconfig2
-rw-r--r--sound/pci/ad1889.c6
-rw-r--r--sound/pci/ali5451/ali5451.c2
-rw-r--r--sound/pci/als300.c3
-rw-r--r--sound/pci/als4000.c3
-rw-r--r--sound/pci/asihpi/asihpi.c4
-rw-r--r--sound/pci/atiixp.c6
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c3
-rw-r--r--sound/pci/aw2/aw2-alsa.c6
-rw-r--r--sound/pci/azt3328.c8
-rw-r--r--sound/pci/bt87x.c5
-rw-r--r--sound/pci/ca0106/ca0106_main.c6
-rw-r--r--sound/pci/cmipci.c6
-rw-r--r--sound/pci/cs4281.c3
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c16
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c6
-rw-r--r--sound/pci/ctxfi/ctpcm.c5
-rw-r--r--sound/pci/ctxfi/ctvmem.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.c24
-rw-r--r--sound/pci/emu10k1/emu10k1.c5
-rw-r--r--sound/pci/emu10k1/emu10k1x.c6
-rw-r--r--sound/pci/emu10k1/emufx.c2
-rw-r--r--sound/pci/emu10k1/emupcm.c12
-rw-r--r--sound/pci/emu10k1/memory.c4
-rw-r--r--sound/pci/emu10k1/p16v.c4
-rw-r--r--sound/pci/ens1370.c8
-rw-r--r--sound/pci/es1938.c3
-rw-r--r--sound/pci/es1968.c4
-rw-r--r--sound/pci/fm801.c2
-rw-r--r--sound/pci/hda/Kconfig11
-rw-r--r--sound/pci/hda/hda_bind.c4
-rw-r--r--sound/pci/hda/hda_controller.c1
-rw-r--r--sound/pci/hda/hda_intel.c69
-rw-r--r--sound/pci/hda/hda_jack.c151
-rw-r--r--sound/pci/hda/hda_jack.h107
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c328
-rw-r--r--sound/pci/hda/patch_realtek.c63
-rw-r--r--sound/pci/ice1712/ice1712.c9
-rw-r--r--sound/pci/ice1712/ice1724.c6
-rw-r--r--sound/pci/intel8x0.c4
-rw-r--r--sound/pci/intel8x0m.c4
-rw-r--r--sound/pci/korg1212/korg1212.c8
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/lola/lola_pcm.c5
-rw-r--r--sound/pci/lx6464es/lx6464es.c2
-rw-r--r--sound/pci/maestro3.c3
-rw-r--r--sound/pci/mixart/mixart.c7
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c10
-rw-r--r--sound/pci/pcxhr/pcxhr.c4
-rw-r--r--sound/pci/riptide/riptide.c6
-rw-r--r--sound/pci/rme32.c4
-rw-r--r--sound/pci/rme9652/hdsp.c7
-rw-r--r--sound/pci/rme9652/hdspm.c3
-rw-r--r--sound/pci/rme9652/rme9652.c7
-rw-r--r--sound/pci/sis7019.c3
-rw-r--r--sound/pci/sonicvibes.c3
-rw-r--r--sound/pci/trident/trident_main.c24
-rw-r--r--sound/pci/via82xx.c17
-rw-r--r--sound/pci/via82xx_modem.c6
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c16
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c9
-rw-r--r--sound/sh/aica.c2
-rw-r--r--sound/sh/sh_dac_audio.c2
-rw-r--r--sound/soc/amd/acp-pcm-dma.c63
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c56
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c48
-rw-r--r--sound/soc/au1x/dbdma2.c64
-rw-r--r--sound/soc/au1x/dma.c65
-rw-r--r--sound/soc/bcm/cygnus-pcm.c56
-rw-r--r--sound/soc/cirrus/Kconfig14
-rw-r--r--sound/soc/codecs/Kconfig74
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/adau1761.c129
-rw-r--r--sound/soc/codecs/adau7118-hw.c43
-rw-r--r--sound/soc/codecs/adau7118-i2c.c82
-rw-r--r--sound/soc/codecs/adau7118.c586
-rw-r--r--sound/soc/codecs/adau7118.h24
-rw-r--r--sound/soc/codecs/cros_ec_codec.c1128
-rw-r--r--sound/soc/codecs/cx2072x.c2
-rw-r--r--sound/soc/codecs/hdac_hda.c114
-rw-r--r--sound/soc/codecs/hdac_hda.h13
-rw-r--r--sound/soc/codecs/madera.h2
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c54
-rw-r--r--sound/soc/codecs/mt6358.c105
-rw-r--r--sound/soc/codecs/pcm3168a.c143
-rw-r--r--sound/soc/codecs/rt1011.c249
-rw-r--r--sound/soc/codecs/rt1011.h24
-rw-r--r--sound/soc/codecs/rt5514-spi.c48
-rw-r--r--sound/soc/codecs/rt5645.c19
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/rt5677-spi.c398
-rw-r--r--sound/soc/codecs/rt5677-spi.h1
-rw-r--r--sound/soc/codecs/rt5677.c445
-rw-r--r--sound/soc/codecs/rt5677.h11
-rw-r--r--sound/soc/codecs/rt5682.c43
-rw-r--r--sound/soc/codecs/tas2562.c590
-rw-r--r--sound/soc/codecs/tas2562.h85
-rw-r--r--sound/soc/codecs/tas2770.c819
-rw-r--r--sound/soc/codecs/tas2770.h143
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c45
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h8
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c3
-rw-r--r--sound/soc/codecs/wcd9335.c10
-rw-r--r--sound/soc/codecs/wm2200.c5
-rw-r--r--sound/soc/codecs/wm5100.c2
-rw-r--r--sound/soc/codecs/wm8904.c73
-rw-r--r--sound/soc/codecs/wm8904.h1
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c22
-rw-r--r--sound/soc/codecs/wm8994.c156
-rw-r--r--sound/soc/codecs/wm8994.h10
-rw-r--r--sound/soc/codecs/wm_adsp.c81
-rw-r--r--sound/soc/codecs/wm_adsp.h4
-rw-r--r--sound/soc/dwc/dwc-pcm.c50
-rw-r--r--sound/soc/fsl/Kconfig10
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl_asrc.c110
-rw-r--r--sound/soc/fsl/fsl_asrc.h7
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c120
-rw-r--r--sound/soc/fsl/fsl_audmix.c6
-rw-r--r--sound/soc/fsl/fsl_audmix.h1
-rw-r--r--sound/soc/fsl/fsl_dma.c54
-rw-r--r--sound/soc/fsl/fsl_esai.c12
-rw-r--r--sound/soc/fsl/fsl_mqs.c335
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c56
-rw-r--r--sound/soc/fsl/mpc5200_dma.c51
-rw-r--r--sound/soc/generic/audio-graph-card.c4
-rw-r--r--sound/soc/generic/simple-card.c4
-rw-r--r--sound/soc/intel/Kconfig21
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c30
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-pcm.c52
-rw-r--r--sound/soc/intel/boards/Kconfig100
-rw-r--r--sound/soc/intel/boards/Makefile14
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c51
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c11
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c11
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c10
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c26
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c487
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c11
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c85
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.h32
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c149
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c6
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h27
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c3
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c13
-rw-r--r--sound/soc/intel/common/Makefile4
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cfl-match.c18
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c56
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c31
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c18
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c50
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c57
-rw-r--r--sound/soc/intel/skylake/skl.c19
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c6
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c52
-rw-r--r--sound/soc/mediatek/Kconfig1
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c28
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.h10
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c76
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c11
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c26
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c70
-rw-r--r--sound/soc/meson/axg-fifo.c56
-rw-r--r--sound/soc/meson/axg-fifo.h20
-rw-r--r--sound/soc/meson/axg-frddr.c24
-rw-r--r--sound/soc/meson/axg-toddr.c24
-rw-r--r--sound/soc/pxa/Kconfig16
-rw-r--r--sound/soc/pxa/mmp-pcm.c62
-rw-r--r--sound/soc/pxa/mmp-sspa.c2
-rw-r--r--sound/soc/pxa/poodle.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c24
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c14
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c14
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c14
-rw-r--r--sound/soc/qcom/Kconfig20
-rw-r--r--sound/soc/qcom/lpass-platform.c70
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c106
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c55
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.h15
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c14
-rw-r--r--sound/soc/rockchip/Kconfig3
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c313
-rw-r--r--sound/soc/samsung/Kconfig12
-rw-r--r--sound/soc/samsung/Makefile4
-rw-r--r--sound/soc/samsung/arndale.c217
-rw-r--r--sound/soc/samsung/arndale_rt5631.c164
-rw-r--r--sound/soc/samsung/idma.c58
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c6
-rw-r--r--sound/soc/sh/dma-sh7760.c48
-rw-r--r--sound/soc/sh/fsi.c31
-rw-r--r--sound/soc/sh/rcar/core.c54
-rw-r--r--sound/soc/sh/rcar/dma.c30
-rw-r--r--sound/soc/sh/siu_pcm.c44
-rw-r--r--sound/soc/soc-component.c142
-rw-r--r--sound/soc/soc-compress.c52
-rw-r--r--sound/soc/soc-core.c1169
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c165
-rw-r--r--sound/soc/soc-jack.c3
-rw-r--r--sound/soc/soc-ops.c11
-rw-r--r--sound/soc/soc-pcm.c241
-rw-r--r--sound/soc/soc-topology.c17
-rw-r--r--sound/soc/soc-utils.c11
-rw-r--r--sound/soc/sof/Kconfig43
-rw-r--r--sound/soc/sof/control.c34
-rw-r--r--sound/soc/sof/core.c44
-rw-r--r--sound/soc/sof/debug.c16
-rw-r--r--sound/soc/sof/imx/Kconfig20
-rw-r--r--sound/soc/sof/imx/imx8.c7
-rw-r--r--sound/soc/sof/intel/Kconfig74
-rw-r--r--sound/soc/sof/intel/apl.c8
-rw-r--r--sound/soc/sof/intel/bdw.c31
-rw-r--r--sound/soc/sof/intel/byt.c44
-rw-r--r--sound/soc/sof/intel/cnl.c67
-rw-r--r--sound/soc/sof/intel/hda-codec.c22
-rw-r--r--sound/soc/sof/intel/hda-dsp.c137
-rw-r--r--sound/soc/sof/intel/hda-ipc.c6
-rw-r--r--sound/soc/sof/intel/hda-ipc.h51
-rw-r--r--sound/soc/sof/intel/hda-loader.c25
-rw-r--r--sound/soc/sof/intel/hda-pcm.c15
-rw-r--r--sound/soc/sof/intel/hda-stream.c24
-rw-r--r--sound/soc/sof/intel/hda.c25
-rw-r--r--sound/soc/sof/intel/hda.h23
-rw-r--r--sound/soc/sof/ipc.c10
-rw-r--r--sound/soc/sof/ops.h10
-rw-r--r--sound/soc/sof/pcm.c123
-rw-r--r--sound/soc/sof/pm.c130
-rw-r--r--sound/soc/sof/sof-acpi-dev.c12
-rw-r--r--sound/soc/sof/sof-pci-dev.c94
-rw-r--r--sound/soc/sof/sof-priv.h54
-rw-r--r--sound/soc/sof/topology.c498
-rw-r--r--sound/soc/sof/trace.c17
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c74
-rw-r--r--sound/soc/stm/stm32_adfsdm.c42
-rw-r--r--sound/soc/stm/stm32_sai.c2
-rw-r--r--sound/soc/stm/stm32_spdifrx.c18
-rw-r--r--sound/soc/sunxi/sun4i-codec.c6
-rw-r--r--sound/soc/tegra/tegra30_i2s.c56
-rw-r--r--sound/soc/ti/davinci-mcasp.c2
-rw-r--r--sound/soc/txx9/txx9aclc.c48
-rw-r--r--sound/soc/uniphier/aio-dma.c51
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c3
-rw-r--r--sound/soc/xilinx/Kconfig20
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c63
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c30
-rw-r--r--sound/soc/zte/Kconfig12
-rw-r--r--sound/sparc/amd7930.c2
-rw-r--r--sound/sparc/dbri.c2
-rw-r--r--sound/usb/6fire/pcm.c16
-rw-r--r--sound/usb/Kconfig32
-rw-r--r--sound/usb/caiaq/audio.c8
-rw-r--r--sound/usb/card.c3
-rw-r--r--sound/usb/clock.c10
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--sound/usb/hiface/pcm.c9
-rw-r--r--sound/usb/line6/pcm.c4
-rw-r--r--sound/usb/misc/ua101.c14
-rw-r--r--sound/usb/mixer.c7
-rw-r--r--sound/usb/mixer_scarlett.c23
-rw-r--r--sound/usb/mixer_scarlett_gen2.c36
-rw-r--r--sound/usb/pcm.c50
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c4
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c4
-rw-r--r--sound/usb/validate.c29
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h2
-rw-r--r--tools/arch/x86/include/asm/emulate_prefix.h14
-rw-r--r--tools/arch/x86/include/asm/insn.h6
-rw-r--r--tools/arch/x86/include/asm/irq_vectors.h146
-rw-r--r--tools/arch/x86/include/asm/msr-index.h857
-rw-r--r--tools/arch/x86/lib/insn.c34
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt18
-rw-r--r--tools/arch/x86/tools/gen-insn-attr-x86.awk4
-rw-r--r--tools/bpf/Makefile6
-rw-r--r--tools/bpf/bpf_exp.y14
-rw-r--r--tools/bpf/bpftool/btf.c57
-rw-r--r--tools/bpf/bpftool/main.c4
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/prog.c22
-rw-r--r--tools/gpio/Build1
-rw-r--r--tools/gpio/Makefile10
-rw-r--r--tools/include/uapi/linux/bpf.h188
-rw-r--r--tools/include/uapi/linux/fcntl.h9
-rw-r--r--tools/include/uapi/linux/if_link.h2
-rw-r--r--tools/include/uapi/linux/perf_event.h10
-rw-r--r--tools/lib/api/debug-internal.h4
-rw-r--r--tools/lib/api/debug.c4
-rw-r--r--tools/lib/api/fs/fs.c4
-rw-r--r--tools/lib/bpf/.gitignore4
-rw-r--r--tools/lib/bpf/Makefile58
-rw-r--r--tools/lib/bpf/bpf.c11
-rw-r--r--tools/lib/bpf/bpf.h10
-rw-r--r--tools/lib/bpf/bpf_core_read.h263
-rw-r--r--tools/lib/bpf/bpf_endian.h (renamed from tools/testing/selftests/bpf/bpf_endian.h)0
-rw-r--r--tools/lib/bpf/bpf_helpers.h47
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c14
-rw-r--r--tools/lib/bpf/bpf_tracing.h195
-rw-r--r--tools/lib/bpf/btf.c97
-rw-r--r--tools/lib/bpf/btf.h6
-rw-r--r--tools/lib/bpf/btf_dump.c37
-rw-r--r--tools/lib/bpf/libbpf.c2104
-rw-r--r--tools/lib/bpf/libbpf.h99
-rw-r--r--tools/lib/bpf/libbpf.map18
-rw-r--r--tools/lib/bpf/libbpf_internal.h63
-rw-r--r--tools/lib/bpf/libbpf_probes.c1
-rw-r--r--tools/lib/bpf/netlink.c87
-rw-r--r--tools/lib/bpf/nlattr.c10
-rw-r--r--tools/lib/bpf/test_libbpf.c (renamed from tools/lib/bpf/test_libbpf.cpp)14
-rw-r--r--tools/lib/bpf/xsk.c173
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h3
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h2
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h2
-rw-r--r--tools/lib/lockdep/preload.c16
-rw-r--r--tools/lib/subcmd/Makefile9
-rw-r--r--tools/lib/traceevent/Makefile8
-rw-r--r--tools/lib/traceevent/event-parse.c7
-rw-r--r--tools/lib/traceevent/parse-filter.c9
-rw-r--r--tools/memory-model/Documentation/explanation.txt602
-rw-r--r--tools/memory-model/linux-kernel.cat2
-rw-r--r--tools/objtool/check.c2
-rwxr-xr-xtools/objtool/sync-check.sh3
-rw-r--r--tools/perf/Documentation/intel-pt.txt59
-rw-r--r--tools/perf/Documentation/perf-config.txt5
-rw-r--r--tools/perf/Documentation/perf-diff.txt5
-rw-r--r--tools/perf/Documentation/perf-list.txt3
-rw-r--r--tools/perf/Documentation/perf-record.txt16
-rw-r--r--tools/perf/Documentation/perf-report.txt11
-rw-r--r--tools/perf/Documentation/perf-stat.txt11
-rw-r--r--tools/perf/Documentation/perf-trace.txt14
-rw-r--r--tools/perf/Documentation/perf.data-directory-format.txt63
-rw-r--r--tools/perf/Documentation/perf.txt2
-rw-r--r--tools/perf/Makefile.config33
-rw-r--r--tools/perf/Makefile.perf21
-rw-r--r--tools/perf/arch/arm/util/Build2
-rw-r--r--tools/perf/arch/arm/util/perf_regs.c6
-rw-r--r--tools/perf/arch/arm64/util/Build1
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c6
-rw-r--r--tools/perf/arch/arm64/util/sym-handling.c3
-rw-r--r--tools/perf/arch/csky/util/Build2
-rw-r--r--tools/perf/arch/csky/util/perf_regs.c6
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c4
-rw-r--r--tools/perf/arch/riscv/util/Build2
-rw-r--r--tools/perf/arch/riscv/util/perf_regs.c6
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c8
-rw-r--r--tools/perf/arch/s390/util/Build1
-rw-r--r--tools/perf/arch/s390/util/perf_regs.c6
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-32.c52
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-64.c62
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-src.c109
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c9
-rw-r--r--tools/perf/arch/x86/util/auxtrace.c4
-rw-r--r--tools/perf/arch/x86/util/event.c2
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c5
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c81
-rw-r--r--tools/perf/builtin-annotate.c8
-rw-r--r--tools/perf/builtin-diff.c258
-rw-r--r--tools/perf/builtin-inject.c83
-rw-r--r--tools/perf/builtin-kmem.c4
-rw-r--r--tools/perf/builtin-kvm.c13
-rw-r--r--tools/perf/builtin-list.c14
-rw-r--r--tools/perf/builtin-record.c160
-rw-r--r--tools/perf/builtin-report.c81
-rw-r--r--tools/perf/builtin-sched.c2
-rw-r--r--tools/perf/builtin-script.c10
-rw-r--r--tools/perf/builtin-stat.c60
-rw-r--r--tools/perf/builtin-top.c29
-rw-r--r--tools/perf/builtin-trace.c993
-rwxr-xr-xtools/perf/check-headers.sh5
-rw-r--r--tools/perf/lib/Build1
-rw-r--r--tools/perf/lib/Makefile6
-rw-r--r--tools/perf/lib/core.c3
-rw-r--r--tools/perf/lib/evlist.c357
-rw-r--r--tools/perf/lib/evsel.c3
-rw-r--r--tools/perf/lib/include/internal/evlist.h43
-rw-r--r--tools/perf/lib/include/internal/evsel.h1
-rw-r--r--tools/perf/lib/include/internal/mmap.h45
-rw-r--r--tools/perf/lib/include/internal/tests.h20
-rw-r--r--tools/perf/lib/include/perf/core.h3
-rw-r--r--tools/perf/lib/include/perf/evlist.h15
-rw-r--r--tools/perf/lib/include/perf/evsel.h2
-rw-r--r--tools/perf/lib/include/perf/mmap.h15
-rw-r--r--tools/perf/lib/internal.h5
-rw-r--r--tools/perf/lib/libperf.map10
-rw-r--r--tools/perf/lib/mmap.c275
-rw-r--r--tools/perf/lib/tests/Makefile6
-rw-r--r--tools/perf/lib/tests/test-cpumap.c2
-rw-r--r--tools/perf/lib/tests/test-evlist.c219
-rw-r--r--tools/perf/lib/tests/test-evsel.c2
-rw-r--r--tools/perf/lib/tests/test-threadmap.c2
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json28
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json28
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json120
-rw-r--r--tools/perf/pmu-events/arch/arm64/armv8-recommended.json158
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json74
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json60
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json66
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/cache.json60
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/floating-point.json6
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/frontend.json158
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/marked.json266
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/memory.json72
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json1150
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/pipeline.json118
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/pmc.json48
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/translation.json60
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/cache.json44
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/floating-point.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/frontend.json142
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/marked.json250
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/memory.json52
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json934
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json212
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pmc.json48
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/translation.json92
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json178
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json184
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json12012
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json92
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json656
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/memory.json11386
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json9574
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json1222
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json191
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json1585
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json339
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json170
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json170
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json114
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json112
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json188
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json204
-rw-r--r--tools/perf/pmu-events/jevents.c39
-rw-r--r--tools/perf/pmu-events/jevents.h3
-rw-r--r--tools/perf/pmu-events/pmu-events.h1
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py1565
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/base-stat2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c16
-rw-r--r--tools/perf/tests/bp_account.c20
-rw-r--r--tools/perf/tests/bp_signal.c15
-rw-r--r--tools/perf/tests/bpf.c7
-rw-r--r--tools/perf/tests/builtin-test.c2
-rw-r--r--tools/perf/tests/code-reading.c9
-rw-r--r--tools/perf/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/tests/keep-tracking.c9
-rw-r--r--tools/perf/tests/map_groups.c11
-rw-r--r--tools/perf/tests/mmap-basic.c9
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c9
-rw-r--r--tools/perf/tests/parse-events.c3
-rw-r--r--tools/perf/tests/perf-record.c9
-rw-r--r--tools/perf/tests/sample-parsing.c16
-rw-r--r--tools/perf/tests/sw-clock.c9
-rw-r--r--tools/perf/tests/switch-tracking.c9
-rw-r--r--tools/perf/tests/task-exit.c18
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c6
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/beauty.h35
-rw-r--r--tools/perf/trace/beauty/mmap.c4
-rw-r--r--tools/perf/trace/beauty/tracepoints/Build2
-rw-r--r--tools/perf/trace/beauty/tracepoints/x86_irq_vectors.c29
-rwxr-xr-xtools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh27
-rw-r--r--tools/perf/trace/beauty/tracepoints/x86_msr.c39
-rwxr-xr-xtools/perf/trace/beauty/tracepoints/x86_msr.sh40
-rw-r--r--tools/perf/ui/browsers/annotate.c25
-rw-r--r--tools/perf/ui/browsers/hists.c105
-rw-r--r--tools/perf/ui/browsers/hists.h2
-rw-r--r--tools/perf/ui/gtk/annotate.c27
-rw-r--r--tools/perf/ui/stdio/hist.c29
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate.c305
-rw-r--r--tools/perf/util/annotate.h24
-rw-r--r--tools/perf/util/auxtrace.c350
-rw-r--r--tools/perf/util/auxtrace.h44
-rw-r--r--tools/perf/util/block-info.c477
-rw-r--r--tools/perf/util/block-info.h79
-rw-r--r--tools/perf/util/callchain.c40
-rw-r--r--tools/perf/util/callchain.h5
-rw-r--r--tools/perf/util/cpumap.c18
-rw-r--r--tools/perf/util/cpumap.h3
-rw-r--r--tools/perf/util/cs-etm.c4
-rw-r--r--tools/perf/util/data.c46
-rw-r--r--tools/perf/util/data.h12
-rw-r--r--tools/perf/util/db-export.c16
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/debug.h9
-rw-r--r--tools/perf/util/dso.c159
-rw-r--r--tools/perf/util/dso.h20
-rw-r--r--tools/perf/util/dsos.c97
-rw-r--r--tools/perf/util/dsos.h14
-rw-r--r--tools/perf/util/dwarf-aux.c142
-rw-r--r--tools/perf/util/dwarf-aux.h3
-rw-r--r--tools/perf/util/env.c56
-rw-r--r--tools/perf/util/env.h7
-rw-r--r--tools/perf/util/event.c6
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/evlist.c334
-rw-r--r--tools/perf/util/evlist.h13
-rw-r--r--tools/perf/util/evsel.c76
-rw-r--r--tools/perf/util/evsel_config.h13
-rw-r--r--tools/perf/util/evsel_fprintf.c29
-rw-r--r--tools/perf/util/header.h4
-rw-r--r--tools/perf/util/hist.c71
-rw-r--r--tools/perf/util/hist.h18
-rw-r--r--tools/perf/util/intel-pt.c109
-rw-r--r--tools/perf/util/llvm-utils.c5
-rw-r--r--tools/perf/util/machine.c125
-rw-r--r--tools/perf/util/machine.h4
-rw-r--r--tools/perf/util/map.c178
-rw-r--r--tools/perf/util/map.h17
-rw-r--r--tools/perf/util/map_groups.h31
-rw-r--r--tools/perf/util/map_symbol.h5
-rw-r--r--tools/perf/util/mem-events.c2
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/mmap.c260
-rw-r--r--tools/perf/util/mmap.h28
-rw-r--r--tools/perf/util/parse-events.c308
-rw-r--r--tools/perf/util/parse-events.h10
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/parse-events.y391
-rw-r--r--tools/perf/util/parse-regs-options.c8
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c3
-rw-r--r--tools/perf/util/perf_regs.c4
-rw-r--r--tools/perf/util/perf_regs.h4
-rw-r--r--tools/perf/util/pmu.c59
-rw-r--r--tools/perf/util/pmu.h6
-rw-r--r--tools/perf/util/probe-event.c21
-rw-r--r--tools/perf/util/probe-event.h3
-rw-r--r--tools/perf/util/probe-file.c14
-rw-r--r--tools/perf/util/probe-file.h2
-rw-r--r--tools/perf/util/probe-finder.c193
-rw-r--r--tools/perf/util/probe-finder.h1
-rw-r--r--tools/perf/util/python.c8
-rw-r--r--tools/perf/util/record.c31
-rw-r--r--tools/perf/util/record.h3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c16
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c18
-rw-r--r--tools/perf/util/session.c119
-rw-r--r--tools/perf/util/session.h11
-rw-r--r--tools/perf/util/sort.c113
-rw-r--r--tools/perf/util/sort.h4
-rw-r--r--tools/perf/util/spark.c34
-rw-r--r--tools/perf/util/spark.h8
-rw-r--r--tools/perf/util/stat-display.c15
-rw-r--r--tools/perf/util/stat.c11
-rw-r--r--tools/perf/util/stat.h3
-rw-r--r--tools/perf/util/string2.h3
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol.c160
-rw-r--r--tools/perf/util/symbol.h26
-rw-r--r--tools/perf/util/symbol_conf.h1
-rw-r--r--tools/perf/util/synthetic-events.c14
-rw-r--r--tools/perf/util/thread.c2
-rw-r--r--tools/perf/util/time-utils.c27
-rw-r--r--tools/perf/util/time-utils.h5
-rw-r--r--tools/perf/util/unwind-libdw.c7
-rw-r--r--tools/perf/util/unwind-libunwind-local.c7
-rw-r--r--tools/perf/util/unwind.h8
-rw-r--r--tools/perf/util/util.c19
-rw-r--r--tools/perf/util/vdso.c4
-rw-r--r--tools/power/cpupower/ToDo14
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c9
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c9
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c4
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h1
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h5
-rw-r--r--tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c64
-rw-r--r--tools/power/cpupower/utils/idle_monitor/nhm_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/snb_idle.c2
-rw-r--r--tools/testing/kunit/.gitignore3
-rw-r--r--tools/testing/kunit/configs/all_tests.config3
-rwxr-xr-xtools/testing/kunit/kunit.py138
-rw-r--r--tools/testing/kunit/kunit_config.py66
-rw-r--r--tools/testing/kunit/kunit_kernel.py149
-rw-r--r--tools/testing/kunit/kunit_parser.py310
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py206
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-all_passed.log32
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-crash.log69
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-failure.log36
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log75
-rw-r--r--tools/testing/kunit/test_data/test_output_isolated_correctly.log106
-rw-r--r--tools/testing/kunit/test_data/test_read_from_file.kconfig17
-rw-r--r--tools/testing/selftests/Makefile10
-rw-r--r--tools/testing/selftests/arm64/Makefile64
-rw-r--r--tools/testing/selftests/arm64/README25
-rw-r--r--tools/testing/selftests/arm64/signal/.gitignore3
-rw-r--r--tools/testing/selftests/arm64/signal/Makefile32
-rw-r--r--tools/testing/selftests/arm64/signal/README59
-rw-r--r--tools/testing/selftests/arm64/signal/signals.S64
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.c29
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.h100
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.c328
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.h120
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c52
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c77
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c46
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c50
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c37
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c50
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_compat_toggle.c31
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_daif_bits.c35
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1h.c15
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1t.c15
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2h.c15
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2t.c15
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3h.c15
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3t.c15
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_template.h28
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.c196
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.h104
-rw-r--r--tools/testing/selftests/arm64/tags/.gitignore (renamed from tools/testing/selftests/arm64/.gitignore)0
-rw-r--r--tools/testing/selftests/arm64/tags/Makefile7
-rwxr-xr-xtools/testing/selftests/arm64/tags/run_tags_test.sh (renamed from tools/testing/selftests/arm64/run_tags_test.sh)0
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c (renamed from tools/testing/selftests/arm64/tags_test.c)0
-rw-r--r--tools/testing/selftests/bpf/.gitignore6
-rw-r--r--tools/testing/selftests/bpf/Makefile396
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h535
-rw-r--r--tools/testing/selftests/bpf/bpf_legacy.h39
-rw-r--r--tools/testing/selftests/bpf/bpf_trace_helpers.h58
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c (renamed from tools/testing/selftests/bpf/test_btf_dump.c)88
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c261
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c140
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c154
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c220
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning.c224
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rdonly_maps.c95
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/section_names.c (renamed from tools/testing/selftests/bpf/test_section_names.c)90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c487
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_overhead.c142
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c5
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h238
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c54
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c82
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c57
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c153
-rw-r--r--tools/testing/selftests/bpf/progs/loop1.c1
-rw-r--r--tools/testing/selftests/bpf/progs/loop2.c1
-rw-r--r--tools/testing/selftests/bpf/progs/loop3.c1
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h67
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c13
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h36
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall1.c48
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall2.c59
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c31
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c33
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c40
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_rtt.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c57
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_existence.c79
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_ints.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_misc.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_mods.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_mmap.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_invalid.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_stack_map.h (renamed from tools/testing/selftests/bpf/test_queue_stack_map.h)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c83
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh30
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh48
-rwxr-xr-xtools/testing/selftests/bpf/test_libbpf.sh43
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c144
-rw-r--r--tools/testing/selftests/bpf/test_maps.c12
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py20
-rw-r--r--tools/testing/selftests/bpf/test_progs.c68
-rw-r--r--tools/testing/selftests/bpf/test_progs.h10
-rw-r--r--tools/testing/selftests/bpf/test_stub.c4
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c23
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh5
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c83
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c17
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c2
-rw-r--r--tools/testing/selftests/cgroup/Makefile4
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c42
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h6
-rw-r--r--tools/testing/selftests/cgroup/test_core.c146
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c3
-rwxr-xr-xtools/testing/selftests/cgroup/test_stress.sh4
-rwxr-xr-xtools/testing/selftests/cgroup/with_stress.sh101
-rw-r--r--tools/testing/selftests/clone3/.gitignore3
-rw-r--r--tools/testing/selftests/clone3/Makefile6
-rw-r--r--tools/testing/selftests/clone3/clone3.c202
-rw-r--r--tools/testing/selftests/clone3/clone3_clear_sighand.c129
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h63
-rw-r--r--tools/testing/selftests/clone3/clone3_set_tid.c397
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh563
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh557
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/mirror_gre_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh54
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh18
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh20
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh7
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh303
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink_in_netns.sh72
-rwxr-xr-xtools/testing/selftests/gen_kselftest_tar.sh21
-rwxr-xr-xtools/testing/selftests/kselftest/module.sh (renamed from tools/testing/selftests/kselftest_module.sh)0
-rwxr-xr-xtools/testing/selftests/kselftest_install.sh24
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h7
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c7
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c4
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c72
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c76
-rwxr-xr-xtools/testing/selftests/lib/bitmap.sh2
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh2
-rwxr-xr-xtools/testing/selftests/lib/printf.sh2
-rwxr-xr-xtools/testing/selftests/lib/strscpy.sh2
-rw-r--r--tools/testing/selftests/livepatch/Makefile3
-rw-r--r--tools/testing/selftests/livepatch/settings1
-rwxr-xr-xtools/testing/selftests/livepatch/test-state.sh180
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/altnames.sh75
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh52
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh55
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool.sh318
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool_lib.sh69
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh36
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh11
-rw-r--r--tools/testing/selftests/net/so_txtime.c4
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c73
-rwxr-xr-xtools/testing/selftests/net/traceroute.sh322
-rw-r--r--tools/testing/selftests/net/udpgso.c3
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c3
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/ipvs.sh228
-rw-r--r--tools/testing/selftests/pidfd/Makefile2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c296
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c6
-rw-r--r--tools/testing/selftests/ptp/testptp.c53
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS033
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE023
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE043
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE063
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE083
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE093
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRIVIAL3
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt1
-rw-r--r--tools/testing/selftests/sync/sync.c6
-rw-r--r--tools/testing/selftests/tc-testing/config10
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ct.json96
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json145
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json749
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/basic.json325
-rw-r--r--tools/testing/selftests/vm/Makefile5
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests10
-rw-r--r--tools/testing/selftests/x86/ioperm.c16
-rw-r--r--tools/testing/selftests/x86/iopl.c129
-rw-r--r--tools/testing/selftests/x86/mov_ss_trap.c3
-rw-r--r--tools/testing/selftests/x86/sigreturn.c13
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c2
-rw-r--r--virt/kvm/arm/arch_timer.c8
-rw-r--r--virt/kvm/arm/arm.c49
-rw-r--r--virt/kvm/arm/hypercalls.c71
-rw-r--r--virt/kvm/arm/mmio.c9
-rw-r--r--virt/kvm/arm/psci.c84
-rw-r--r--virt/kvm/arm/pvtime.c131
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c1
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c59
-rw-r--r--virt/kvm/arm/vgic/vgic.c4
-rw-r--r--virt/kvm/arm/vgic/vgic.h2
-rw-r--r--virt/kvm/coalesced_mmio.c8
-rw-r--r--virt/kvm/kvm_main.c209
5593 files changed, 310013 insertions, 130639 deletions
diff --git a/.mailmap b/.mailmap
index fd6219293057..e7f0341a6dbf 100644
--- a/.mailmap
+++ b/.mailmap
@@ -32,6 +32,7 @@ Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Archit Taneja <archit@ti.com>
+Ard Biesheuvel <ardb@kernel.org> <ard.biesheuvel@linaro.org>
Arnaud Patard <arnaud.patard@rtp-net.org>
Arnd Bergmann <arnd@arndb.de>
Axel Dyks <xl@xlsigned.net>
diff --git a/Documentation/ABI/stable/sysfs-class-infiniband b/Documentation/ABI/stable/sysfs-class-infiniband
index aed21b8916a2..96dfe1926b76 100644
--- a/Documentation/ABI/stable/sysfs-class-infiniband
+++ b/Documentation/ABI/stable/sysfs-class-infiniband
@@ -314,25 +314,6 @@ Description:
board_id: (RO) Manufacturing board ID
-sysfs interface for Chelsio T3 RDMA Driver (cxgb3)
---------------------------------------------------
-
-What: /sys/class/infiniband/cxgb3_X/hw_rev
-What: /sys/class/infiniband/cxgb3_X/hca_type
-What: /sys/class/infiniband/cxgb3_X/board_id
-Date: Feb, 2007
-KernelVersion: v2.6.21
-Contact: linux-rdma@vger.kernel.org
-Description:
- hw_rev: (RO) Hardware revision number
-
- hca_type: (RO) HCA type. Here it is a driver short name.
- It should normally match the name in its bus
- driver structure (e.g. pci_driver::name).
-
- board_id: (RO) Manufacturing board id
-
-
sysfs interface for Mellanox ConnectX HCA IB driver (mlx4)
----------------------------------------------------------
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp
index 7049a2b50359..84972a57caae 100644
--- a/Documentation/ABI/stable/sysfs-driver-ib_srp
+++ b/Documentation/ABI/stable/sysfs-driver-ib_srp
@@ -67,6 +67,8 @@ Description: Interface for making ib_srp connect to a new target.
initiator is allowed to queue per SCSI host. The default
value for this parameter is 62. The lowest supported value
is 2.
+ * max_it_iu_size, a decimal number specifying the maximum
+ initiator to target information unit length.
What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/ibdev
Date: January 2, 2006
diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre
new file mode 100644
index 000000000000..ec4a79e3a807
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-hisi-hpre
@@ -0,0 +1,57 @@
+What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/regs
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump debug registers from the HPRE cluster.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/cluster_ctrl
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Write the HPRE core selection in the cluster into this file,
+ and then we can read the debug information of the core.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/rdclr_en
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: HPRE cores debug registers read clear control. 1 means enable
+ register read clear, otherwise 0. Writing to this file has no
+ functional effect, only enable or disable counters clear after
+ reading of these registers.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/current_qm
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One HPRE controller has one PF and multiple VFs, each function
+ has a QM. Select the QM which below qm refers to.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/regs
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump debug registers from the HPRE.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/qm_regs
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump debug registers from the QM.
+ Available for PF and VF in host. VF in guest currently only
+ has one debug register.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One QM may contain multiple queues. Select specific queue to
+ show its debug registers in above qm_regs.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/clear_enable
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: QM debug registers(qm_regs) read clear control. 1 means enable
+ register read clear, otherwise 0.
+ Writing to this file has no functional effect, only enable or
+ disable counters clear after reading of these registers.
+ Only available for PF.
diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec
new file mode 100644
index 000000000000..06adb899495e
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-hisi-sec
@@ -0,0 +1,43 @@
+What: /sys/kernel/debug/hisi_sec/<bdf>/sec_dfx
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the debug registers of SEC cores.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_sec/<bdf>/clear_enable
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Enabling/disabling of clear action after reading
+ the SEC debug registers.
+ 0: disable, 1: enable.
+ Only available for PF, and take no other effect on SEC.
+
+What: /sys/kernel/debug/hisi_sec/<bdf>/current_qm
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One SEC controller has one PF and multiple VFs, each function
+ has a QM. This file can be used to select the QM which below
+ qm refers to.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_sec/<bdf>/qm/qm_regs
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump of QM related debug registers.
+ Available for PF and VF in host. VF in guest currently only
+ has one debug register.
+
+What: /sys/kernel/debug/hisi_sec/<bdf>/qm/current_q
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One QM of SEC may contain multiple queues. Select specific
+ queue to show its debug registers in above 'qm_regs'.
+ Only available for PF.
+
+What: /sys/kernel/debug/hisi_sec/<bdf>/qm/clear_enable
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Enabling/disabling of clear action after reading
+ the SEC's QM debug registers.
+ 0: disable, 1: enable.
+ Only available for PF, and take no other effect on SEC.
diff --git a/Documentation/ABI/testing/procfs-diskstats b/Documentation/ABI/testing/procfs-diskstats
index 2c44b4f1b060..70dcaf2481f4 100644
--- a/Documentation/ABI/testing/procfs-diskstats
+++ b/Documentation/ABI/testing/procfs-diskstats
@@ -29,4 +29,9 @@ Description:
17 - sectors discarded
18 - time spent discarding
+ Kernel 5.5+ appends two more fields for flush requests:
+
+ 19 - flush requests completed successfully
+ 20 - time spent flushing
+
For more details refer to Documentation/admin-guide/iostats.rst
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index f8c7c7126bb1..ed8c14f161ee 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -15,6 +15,12 @@ Description:
9 - I/Os currently in progress
10 - time spent doing I/Os (ms)
11 - weighted time spent doing I/Os (ms)
+ 12 - discards completed
+ 13 - discards merged
+ 14 - sectors discarded
+ 15 - time spent discarding (ms)
+ 16 - flush requests completed
+ 17 - time spent flushing (ms)
For more details refer Documentation/admin-guide/iostats.rst
diff --git a/Documentation/ABI/testing/sysfs-bus-fsi b/Documentation/ABI/testing/sysfs-bus-fsi
index 57c806350d6c..320697bdf41d 100644
--- a/Documentation/ABI/testing/sysfs-bus-fsi
+++ b/Documentation/ABI/testing/sysfs-bus-fsi
@@ -1,25 +1,25 @@
-What: /sys/bus/platform/devices/fsi-master/rescan
+What: /sys/bus/platform/devices/../fsi-master/fsi0/rescan
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Initiates a FSI master scan for all connected slave devices
on its links.
-What: /sys/bus/platform/devices/fsi-master/break
+What: /sys/bus/platform/devices/../fsi-master/fsi0/break
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Sends an FSI BREAK command on a master's communication
link to any connnected slaves. A BREAK resets connected
device's logic and preps it to receive further commands
from the master.
-What: /sys/bus/platform/devices/fsi-master/slave@00:00/term
+What: /sys/bus/platform/devices/../fsi-master/fsi0/slave@00:00/term
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Sends an FSI terminate command from the master to its
connected slave. A terminate resets the slave's state machines
@@ -29,10 +29,10 @@ Description:
ongoing operation in case of an expired 'Master Time Out'
timer.
-What: /sys/bus/platform/devices/fsi-master/slave@00:00/raw
+What: /sys/bus/platform/devices/../fsi-master/fsi0/slave@00:00/raw
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Provides a means of reading/writing a 32 bit value from/to a
specified FSI bus address.
diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
index 6bd45346ac7e..3d37e2796d5a 100644
--- a/Documentation/ABI/testing/sysfs-bus-mei
+++ b/Documentation/ABI/testing/sysfs-bus-mei
@@ -4,7 +4,7 @@ KernelVersion: 3.10
Contact: Samuel Ortiz <sameo@linux.intel.com>
linux-mei@linux.intel.com
Description: Stores the same MODALIAS value emitted by uevent
- Format: mei:<mei device name>:<device uuid>:
+ Format: mei:<mei device name>:<device uuid>:<protocol version>
What: /sys/bus/mei/devices/.../name
Date: May 2015
@@ -26,3 +26,24 @@ KernelVersion: 4.3
Contact: Tomas Winkler <tomas.winkler@intel.com>
Description: Stores mei client protocol version
Format: %d
+
+What: /sys/bus/mei/devices/.../max_conn
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client maximum number of connections
+ Format: %d
+
+What: /sys/bus/mei/devices/.../fixed
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client fixed address, if any
+ Format: %d
+
+What: /sys/bus/mei/devices/.../max_len
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client maximum message length
+ Format: %d
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index b21fba14689b..82e80de78dd0 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -80,6 +80,14 @@ Contact: thunderbolt-software@lists.01.org
Description: This attribute contains 1 if Thunderbolt device was already
authorized on boot and 0 otherwise.
+What: /sys/bus/thunderbolt/devices/.../generation
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Christian Kellner <christian@kellner.me>
+Description: This attribute contains the generation of the Thunderbolt
+ controller associated with the device. It will contain 4
+ for USB4.
+
What: /sys/bus/thunderbolt/devices/.../key
Date: Sep 2017
KernelVersion: 4.13
@@ -104,6 +112,34 @@ Contact: thunderbolt-software@lists.01.org
Description: This attribute contains name of this device extracted from
the device DROM.
+What: /sys/bus/thunderbolt/devices/.../rx_speed
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports the device RX speed per lane.
+ All RX lanes run at the same speed.
+
+What: /sys/bus/thunderbolt/devices/.../rx_lanes
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports number of RX lanes the device is
+ using simultaneusly through its upstream port.
+
+What: /sys/bus/thunderbolt/devices/.../tx_speed
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports the TX speed per lane.
+ All TX lanes run at the same speed.
+
+What: /sys/bus/thunderbolt/devices/.../tx_lanes
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports number of TX lanes the device is
+ using simultaneusly through its upstream port.
+
What: /sys/bus/thunderbolt/devices/.../vendor
Date: Sep 2017
KernelVersion: 4.13
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index a92d844f806e..e9dc110650ae 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -80,3 +80,13 @@ Description: Display the ME device state.
DISABLED
POWER_DOWN
POWER_UP
+
+What: /sys/class/mei/meiN/trc
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display trc status register content
+
+ The ME FW writes Glitch Detection HW (TRC)
+ status information into trc status register
+ for BIOS and OS to monitor fw health.
diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
index 397118de7b5e..55db27815361 100644
--- a/Documentation/ABI/testing/sysfs-class-net-statistics
+++ b/Documentation/ABI/testing/sysfs-class-net-statistics
@@ -51,6 +51,14 @@ Description:
packet processing. See the network driver for the exact
meaning of this value.
+What: /sys/class/<iface>/statistics/rx_errors
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: netdev@vger.kernel.org
+Description:
+ Indicates the number of receive errors on this network device.
+ See the network driver for the exact meaning of this value.
+
What: /sys/class/<iface>/statistics/rx_fifo_errors
Date: April 2005
KernelVersion: 2.6.12
@@ -88,6 +96,14 @@ Description:
due to lack of capacity in the receive side. See the network
driver for the exact meaning of this value.
+What: /sys/class/<iface>/statistics/rx_nohandler
+Date: February 2016
+KernelVersion: 4.6
+Contact: netdev@vger.kernel.org
+Description:
+ Indicates the number of received packets that were dropped on
+ an inactive device by the network core.
+
What: /sys/class/<iface>/statistics/rx_over_errors
Date: April 2005
KernelVersion: 2.6.12
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 06d0931119cc..fc20cde63d1e 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -486,6 +486,8 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/ABI/testing/sysfs-platform-dfl-fme b/Documentation/ABI/testing/sysfs-platform-dfl-fme
index 72634d3ae4f4..3683cb1cdc3d 100644
--- a/Documentation/ABI/testing/sysfs-platform-dfl-fme
+++ b/Documentation/ABI/testing/sysfs-platform-dfl-fme
@@ -106,3 +106,135 @@ KernelVersion: 5.4
Contact: Wu Hao <hao.wu@intel.com>
Description: Read-only. Read this file to get the second error detected by
hardware.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/name
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. Read this file to get the name of hwmon device, it
+ supports values:
+ 'dfl_fme_thermal' - thermal hwmon device name
+ 'dfl_fme_power' - power hwmon device name
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_input
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns FPGA device temperature in millidegrees
+ Celsius.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_max
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns hardware threshold1 temperature in
+ millidegrees Celsius. If temperature rises at or above this
+ threshold, hardware starts 50% or 90% throttling (see
+ 'temp1_max_policy').
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_crit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns hardware threshold2 temperature in
+ millidegrees Celsius. If temperature rises at or above this
+ threshold, hardware starts 100% throttling.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_emergency
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns hardware trip threshold temperature in
+ millidegrees Celsius. If temperature rises at or above this
+ threshold, a fatal event will be triggered to board management
+ controller (BMC) to shutdown FPGA.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_max_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if temperature is currently at or above
+ hardware threshold1 (see 'temp1_max'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_crit_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if temperature is currently at or above
+ hardware threshold2 (see 'temp1_crit'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_max_policy
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. Read this file to get the policy of hardware threshold1
+ (see 'temp1_max'). It only supports two values (policies):
+ 0 - AP2 state (90% throttling)
+ 1 - AP1 state (50% throttling)
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_input
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns current FPGA power consumption in uW.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_max
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Write. Read this file to get current hardware power
+ threshold1 in uW. If power consumption rises at or above
+ this threshold, hardware starts 50% throttling.
+ Write this file to set current hardware power threshold1 in uW.
+ As hardware only accepts values in Watts, so input value will
+ be round down per Watts (< 1 watts part will be discarded) and
+ clamped within the range from 0 to 127 Watts. Write fails with
+ -EINVAL if input parsing fails.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_crit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Write. Read this file to get current hardware power
+ threshold2 in uW. If power consumption rises at or above
+ this threshold, hardware starts 90% throttling.
+ Write this file to set current hardware power threshold2 in uW.
+ As hardware only accepts values in Watts, so input value will
+ be round down per Watts (< 1 watts part will be discarded) and
+ clamped within the range from 0 to 127 Watts. Write fails with
+ -EINVAL if input parsing fails.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_max_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if power consumption is currently at or
+ above hardware threshold1 (see 'power1_max'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_crit_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if power consumption is currently at or
+ above hardware threshold2 (see 'power1_crit'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_xeon_limit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns power limit for XEON in uW.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_fpga_limit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns power limit for FPGA in uW.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_ltr
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. Read this file to get current Latency Tolerance
+ Reporting (ltr) value. It returns 1 if all Accelerated
+ Function Units (AFUs) can tolerate latency >= 40us for memory
+ access or 0 if any AFU is latency sensitive (< 40us).
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 8f8d97f65d73..29dcbe8826e8 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -5,24 +5,6 @@ DMA attributes
This document describes the semantics of the DMA attributes that are
defined in linux/dma-mapping.h.
-DMA_ATTR_WRITE_BARRIER
-----------------------
-
-DMA_ATTR_WRITE_BARRIER is a (write) barrier attribute for DMA. DMA
-to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces
-all pending DMA writes to complete, and thus provides a mechanism to
-strictly order DMA from a device across all intervening busses and
-bridges. This barrier is not specific to a particular type of
-interconnect, it applies to the system as a whole, and so its
-implementation must account for the idiosyncrasies of the system all
-the way from the DMA device to memory.
-
-As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
-useful, suppose that a device does a DMA write to indicate that data is
-ready and available in memory. The DMA of the "completion indication"
-could race with data DMA. Mapping the memory used for completion
-indications with DMA_ATTR_WRITE_BARRIER would prevent the race.
-
DMA_ATTR_WEAK_ORDERING
----------------------
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
deleted file mode 100644
index c30c1957c7e6..000000000000
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ /dev/null
@@ -1,1391 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head><title>A Tour Through TREE_RCU's Data Structures [LWN.net]</title>
- <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
-
- <p>December 18, 2016</p>
- <p>This article was contributed by Paul E.&nbsp;McKenney</p>
-
-<h3>Introduction</h3>
-
-This document describes RCU's major data structures and their relationship
-to each other.
-
-<ol>
-<li> <a href="#Data-Structure Relationships">
- Data-Structure Relationships</a>
-<li> <a href="#The rcu_state Structure">
- The <tt>rcu_state</tt> Structure</a>
-<li> <a href="#The rcu_node Structure">
- The <tt>rcu_node</tt> Structure</a>
-<li> <a href="#The rcu_segcblist Structure">
- The <tt>rcu_segcblist</tt> Structure</a>
-<li> <a href="#The rcu_data Structure">
- The <tt>rcu_data</tt> Structure</a>
-<li> <a href="#The rcu_head Structure">
- The <tt>rcu_head</tt> Structure</a>
-<li> <a href="#RCU-Specific Fields in the task_struct Structure">
- RCU-Specific Fields in the <tt>task_struct</tt> Structure</a>
-<li> <a href="#Accessor Functions">
- Accessor Functions</a>
-</ol>
-
-<h3><a name="Data-Structure Relationships">Data-Structure Relationships</a></h3>
-
-<p>RCU is for all intents and purposes a large state machine, and its
-data structures maintain the state in such a way as to allow RCU readers
-to execute extremely quickly, while also processing the RCU grace periods
-requested by updaters in an efficient and extremely scalable fashion.
-The efficiency and scalability of RCU updaters is provided primarily
-by a combining tree, as shown below:
-
-</p><p><img src="BigTreeClassicRCU.svg" alt="BigTreeClassicRCU.svg" width="30%">
-
-</p><p>This diagram shows an enclosing <tt>rcu_state</tt> structure
-containing a tree of <tt>rcu_node</tt> structures.
-Each leaf node of the <tt>rcu_node</tt> tree has up to 16
-<tt>rcu_data</tt> structures associated with it, so that there
-are <tt>NR_CPUS</tt> number of <tt>rcu_data</tt> structures,
-one for each possible CPU.
-This structure is adjusted at boot time, if needed, to handle the
-common case where <tt>nr_cpu_ids</tt> is much less than
-<tt>NR_CPUs</tt>.
-For example, a number of Linux distributions set <tt>NR_CPUs=4096</tt>,
-which results in a three-level <tt>rcu_node</tt> tree.
-If the actual hardware has only 16 CPUs, RCU will adjust itself
-at boot time, resulting in an <tt>rcu_node</tt> tree with only a single node.
-
-</p><p>The purpose of this combining tree is to allow per-CPU events
-such as quiescent states, dyntick-idle transitions,
-and CPU hotplug operations to be processed efficiently
-and scalably.
-Quiescent states are recorded by the per-CPU <tt>rcu_data</tt> structures,
-and other events are recorded by the leaf-level <tt>rcu_node</tt>
-structures.
-All of these events are combined at each level of the tree until finally
-grace periods are completed at the tree's root <tt>rcu_node</tt>
-structure.
-A grace period can be completed at the root once every CPU
-(or, in the case of <tt>CONFIG_PREEMPT_RCU</tt>, task)
-has passed through a quiescent state.
-Once a grace period has completed, record of that fact is propagated
-back down the tree.
-
-</p><p>As can be seen from the diagram, on a 64-bit system
-a two-level tree with 64 leaves can accommodate 1,024 CPUs, with a fanout
-of 64 at the root and a fanout of 16 at the leaves.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why isn't the fanout at the leaves also 64?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because there are more types of events that affect the leaf-level
- <tt>rcu_node</tt> structures than further up the tree.
- Therefore, if the leaf <tt>rcu_node</tt> structures have fanout of
- 64, the contention on these structures' <tt>-&gt;structures</tt>
- becomes excessive.
- Experimentation on a wide variety of systems has shown that a fanout
- of 16 works well for the leaves of the <tt>rcu_node</tt> tree.
- </font>
-
- <p><font color="ffffff">Of course, further experience with
- systems having hundreds or thousands of CPUs may demonstrate
- that the fanout for the non-leaf <tt>rcu_node</tt> structures
- must also be reduced.
- Such reduction can be easily carried out when and if it proves
- necessary.
- In the meantime, if you are using such a system and running into
- contention problems on the non-leaf <tt>rcu_node</tt> structures,
- you may use the <tt>CONFIG_RCU_FANOUT</tt> kernel configuration
- parameter to reduce the non-leaf fanout as needed.
- </font>
-
- <p><font color="ffffff">Kernels built for systems with
- strong NUMA characteristics might also need to adjust
- <tt>CONFIG_RCU_FANOUT</tt> so that the domains of the
- <tt>rcu_node</tt> structures align with hardware boundaries.
- However, there has thus far been no need for this.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>If your system has more than 1,024 CPUs (or more than 512 CPUs on
-a 32-bit system), then RCU will automatically add more levels to the
-tree.
-For example, if you are crazy enough to build a 64-bit system with 65,536
-CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
-
-</p><p><img src="HugeTreeClassicRCU.svg" alt="HugeTreeClassicRCU.svg" width="50%">
-
-</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
-accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
-32-bit systems.
-On the other hand, you can set both <tt>CONFIG_RCU_FANOUT</tt> and
-<tt>CONFIG_RCU_FANOUT_LEAF</tt> to be as small as 2, which would result
-in a 16-CPU test using a 4-level tree.
-This can be useful for testing large-system capabilities on small test
-machines.
-
-</p><p>This multi-level combining tree allows us to get most of the
-performance and scalability
-benefits of partitioning, even though RCU grace-period detection is
-inherently a global operation.
-The trick here is that only the last CPU to report a quiescent state
-into a given <tt>rcu_node</tt> structure need advance to the <tt>rcu_node</tt>
-structure at the next level up the tree.
-This means that at the leaf-level <tt>rcu_node</tt> structure, only
-one access out of sixteen will progress up the tree.
-For the internal <tt>rcu_node</tt> structures, the situation is even
-more extreme: Only one access out of sixty-four will progress up
-the tree.
-Because the vast majority of the CPUs do not progress up the tree,
-the lock contention remains roughly constant up the tree.
-No matter how many CPUs there are in the system, at most 64 quiescent-state
-reports per grace period will progress all the way to the root
-<tt>rcu_node</tt> structure, thus ensuring that the lock contention
-on that root <tt>rcu_node</tt> structure remains acceptably low.
-
-</p><p>In effect, the combining tree acts like a big shock absorber,
-keeping lock contention under control at all tree levels regardless
-of the level of loading on the system.
-
-</p><p>RCU updaters wait for normal grace periods by registering
-RCU callbacks, either directly via <tt>call_rcu()</tt>
-or indirectly via <tt>synchronize_rcu()</tt> and friends.
-RCU callbacks are represented by <tt>rcu_head</tt> structures,
-which are queued on <tt>rcu_data</tt> structures while they are
-waiting for a grace period to elapse, as shown in the following figure:
-
-</p><p><img src="BigTreePreemptRCUBHdyntickCB.svg" alt="BigTreePreemptRCUBHdyntickCB.svg" width="40%">
-
-</p><p>This figure shows how <tt>TREE_RCU</tt>'s and
-<tt>PREEMPT_RCU</tt>'s major data structures are related.
-Lesser data structures will be introduced with the algorithms that
-make use of them.
-
-</p><p>Note that each of the data structures in the above figure has
-its own synchronization:
-
-<p><ol>
-<li> Each <tt>rcu_state</tt> structures has a lock and a mutex,
- and some fields are protected by the corresponding root
- <tt>rcu_node</tt> structure's lock.
-<li> Each <tt>rcu_node</tt> structure has a spinlock.
-<li> The fields in <tt>rcu_data</tt> are private to the corresponding
- CPU, although a few can be read and written by other CPUs.
-</ol>
-
-<p>It is important to note that different data structures can have
-very different ideas about the state of RCU at any given time.
-For but one example, awareness of the start or end of a given RCU
-grace period propagates slowly through the data structures.
-This slow propagation is absolutely necessary for RCU to have good
-read-side performance.
-If this balkanized implementation seems foreign to you, one useful
-trick is to consider each instance of these data structures to be
-a different person, each having the usual slightly different
-view of reality.
-
-</p><p>The general role of each of these data structures is as
-follows:
-
-</p><ol>
-<li> <tt>rcu_state</tt>:
- This structure forms the interconnection between the
- <tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
- tracks grace periods, serves as short-term repository
- for callbacks orphaned by CPU-hotplug events,
- maintains <tt>rcu_barrier()</tt> state,
- tracks expedited grace-period state,
- and maintains state used to force quiescent states when
- grace periods extend too long,
-<li> <tt>rcu_node</tt>: This structure forms the combining
- tree that propagates quiescent-state
- information from the leaves to the root, and also propagates
- grace-period information from the root to the leaves.
- It provides local copies of the grace-period state in order
- to allow this information to be accessed in a synchronized
- manner without suffering the scalability limitations that
- would otherwise be imposed by global locking.
- In <tt>CONFIG_PREEMPT_RCU</tt> kernels, it manages the lists
- of tasks that have blocked while in their current
- RCU read-side critical section.
- In <tt>CONFIG_PREEMPT_RCU</tt> with
- <tt>CONFIG_RCU_BOOST</tt>, it manages the
- per-<tt>rcu_node</tt> priority-boosting
- kernel threads (kthreads) and state.
- Finally, it records CPU-hotplug state in order to determine
- which CPUs should be ignored during a given grace period.
-<li> <tt>rcu_data</tt>: This per-CPU structure is the
- focus of quiescent-state detection and RCU callback queuing.
- It also tracks its relationship to the corresponding leaf
- <tt>rcu_node</tt> structure to allow more-efficient
- propagation of quiescent states up the <tt>rcu_node</tt>
- combining tree.
- Like the <tt>rcu_node</tt> structure, it provides a local
- copy of the grace-period information to allow for-free
- synchronized
- access to this information from the corresponding CPU.
- Finally, this structure records past dyntick-idle state
- for the corresponding CPU and also tracks statistics.
-<li> <tt>rcu_head</tt>:
- This structure represents RCU callbacks, and is the
- only structure allocated and managed by RCU users.
- The <tt>rcu_head</tt> structure is normally embedded
- within the RCU-protected data structure.
-</ol>
-
-<p>If all you wanted from this article was a general notion of how
-RCU's data structures are related, you are done.
-Otherwise, each of the following sections give more details on
-the <tt>rcu_state</tt>, <tt>rcu_node</tt> and <tt>rcu_data</tt> data
-structures.
-
-<h3><a name="The rcu_state Structure">
-The <tt>rcu_state</tt> Structure</a></h3>
-
-<p>The <tt>rcu_state</tt> structure is the base structure that
-represents the state of RCU in the system.
-This structure forms the interconnection between the
-<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
-tracks grace periods, contains the lock used to
-synchronize with CPU-hotplug events,
-and maintains state used to force quiescent states when
-grace periods extend too long,
-
-</p><p>A few of the <tt>rcu_state</tt> structure's fields are discussed,
-singly and in groups, in the following sections.
-The more specialized fields are covered in the discussion of their
-use.
-
-<h5>Relationship to rcu_node and rcu_data Structures</h5>
-
-This portion of the <tt>rcu_state</tt> structure is declared
-as follows:
-
-<pre>
- 1 struct rcu_node node[NUM_RCU_NODES];
- 2 struct rcu_node *level[NUM_RCU_LVLS + 1];
- 3 struct rcu_data __percpu *rda;
-</pre>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Wait a minute!
- You said that the <tt>rcu_node</tt> structures formed a tree,
- but they are declared as a flat array!
- What gives?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- The tree is laid out in the array.
- The first node In the array is the head, the next set of nodes in the
- array are children of the head node, and so on until the last set of
- nodes in the array are the leaves.
- </font>
-
- <p><font color="ffffff">See the following diagrams to see how
- this works.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>The <tt>rcu_node</tt> tree is embedded into the
-<tt>-&gt;node[]</tt> array as shown in the following figure:
-
-</p><p><img src="TreeMapping.svg" alt="TreeMapping.svg" width="40%">
-
-</p><p>One interesting consequence of this mapping is that a
-breadth-first traversal of the tree is implemented as a simple
-linear scan of the array, which is in fact what the
-<tt>rcu_for_each_node_breadth_first()</tt> macro does.
-This macro is used at the beginning and ends of grace periods.
-
-</p><p>Each entry of the <tt>-&gt;level</tt> array references
-the first <tt>rcu_node</tt> structure on the corresponding level
-of the tree, for example, as shown below:
-
-</p><p><img src="TreeMappingLevel.svg" alt="TreeMappingLevel.svg" width="40%">
-
-</p><p>The zero<sup>th</sup> element of the array references the root
-<tt>rcu_node</tt> structure, the first element references the
-first child of the root <tt>rcu_node</tt>, and finally the second
-element references the first leaf <tt>rcu_node</tt> structure.
-
-</p><p>For whatever it is worth, if you draw the tree to be tree-shaped
-rather than array-shaped, it is easy to draw a planar representation:
-
-</p><p><img src="TreeLevel.svg" alt="TreeLevel.svg" width="60%">
-
-</p><p>Finally, the <tt>-&gt;rda</tt> field references a per-CPU
-pointer to the corresponding CPU's <tt>rcu_data</tt> structure.
-
-</p><p>All of these fields are constant once initialization is complete,
-and therefore need no protection.
-
-<h5>Grace-Period Tracking</h5>
-
-<p>This portion of the <tt>rcu_state</tt> structure is declared
-as follows:
-
-<pre>
- 1 unsigned long gp_seq;
-</pre>
-
-<p>RCU grace periods are numbered, and
-the <tt>-&gt;gp_seq</tt> field contains the current grace-period
-sequence number.
-The bottom two bits are the state of the current grace period,
-which can be zero for not yet started or one for in progress.
-In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
-zero, then RCU is idle.
-Any other value in the bottom two bits indicates that something is broken.
-This field is protected by the root <tt>rcu_node</tt> structure's
-<tt>-&gt;lock</tt> field.
-
-</p><p>There are <tt>-&gt;gp_seq</tt> fields
-in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
-as well.
-The fields in the <tt>rcu_state</tt> structure represent the
-most current value, and those of the other structures are compared
-in order to detect the beginnings and ends of grace periods in a distributed
-fashion.
-The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
-(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
-
-<h5>Miscellaneous</h5>
-
-<p>This portion of the <tt>rcu_state</tt> structure is declared
-as follows:
-
-<pre>
- 1 unsigned long gp_max;
- 2 char abbr;
- 3 char *name;
-</pre>
-
-<p>The <tt>-&gt;gp_max</tt> field tracks the duration of the longest
-grace period in jiffies.
-It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
-
-<p>The <tt>-&gt;name</tt> and <tt>-&gt;abbr</tt> fields distinguish
-between preemptible RCU (&ldquo;rcu_preempt&rdquo; and &ldquo;p&rdquo;)
-and non-preemptible RCU (&ldquo;rcu_sched&rdquo; and &ldquo;s&rdquo;).
-These fields are used for diagnostic and tracing purposes.
-
-<h3><a name="The rcu_node Structure">
-The <tt>rcu_node</tt> Structure</a></h3>
-
-<p>The <tt>rcu_node</tt> structures form the combining
-tree that propagates quiescent-state
-information from the leaves to the root and also that propagates
-grace-period information from the root down to the leaves.
-They provides local copies of the grace-period state in order
-to allow this information to be accessed in a synchronized
-manner without suffering the scalability limitations that
-would otherwise be imposed by global locking.
-In <tt>CONFIG_PREEMPT_RCU</tt> kernels, they manage the lists
-of tasks that have blocked while in their current
-RCU read-side critical section.
-In <tt>CONFIG_PREEMPT_RCU</tt> with
-<tt>CONFIG_RCU_BOOST</tt>, they manage the
-per-<tt>rcu_node</tt> priority-boosting
-kernel threads (kthreads) and state.
-Finally, they record CPU-hotplug state in order to determine
-which CPUs should be ignored during a given grace period.
-
-</p><p>The <tt>rcu_node</tt> structure's fields are discussed,
-singly and in groups, in the following sections.
-
-<h5>Connection to Combining Tree</h5>
-
-<p>This portion of the <tt>rcu_node</tt> structure is declared
-as follows:
-
-<pre>
- 1 struct rcu_node *parent;
- 2 u8 level;
- 3 u8 grpnum;
- 4 unsigned long grpmask;
- 5 int grplo;
- 6 int grphi;
-</pre>
-
-<p>The <tt>-&gt;parent</tt> pointer references the <tt>rcu_node</tt>
-one level up in the tree, and is <tt>NULL</tt> for the root
-<tt>rcu_node</tt>.
-The RCU implementation makes heavy use of this field to push quiescent
-states up the tree.
-The <tt>-&gt;level</tt> field gives the level in the tree, with
-the root being at level zero, its children at level one, and so on.
-The <tt>-&gt;grpnum</tt> field gives this node's position within
-the children of its parent, so this number can range between 0 and 31
-on 32-bit systems and between 0 and 63 on 64-bit systems.
-The <tt>-&gt;level</tt> and <tt>-&gt;grpnum</tt> fields are
-used only during initialization and for tracing.
-The <tt>-&gt;grpmask</tt> field is the bitmask counterpart of
-<tt>-&gt;grpnum</tt>, and therefore always has exactly one bit set.
-This mask is used to clear the bit corresponding to this <tt>rcu_node</tt>
-structure in its parent's bitmasks, which are described later.
-Finally, the <tt>-&gt;grplo</tt> and <tt>-&gt;grphi</tt> fields
-contain the lowest and highest numbered CPU served by this
-<tt>rcu_node</tt> structure, respectively.
-
-</p><p>All of these fields are constant, and thus do not require any
-synchronization.
-
-<h5>Synchronization</h5>
-
-<p>This field of the <tt>rcu_node</tt> structure is declared
-as follows:
-
-<pre>
- 1 raw_spinlock_t lock;
-</pre>
-
-<p>This field is used to protect the remaining fields in this structure,
-unless otherwise stated.
-That said, all of the fields in this structure can be accessed without
-locking for tracing purposes.
-Yes, this can result in confusing traces, but better some tracing confusion
-than to be heisenbugged out of existence.
-
-<h5>Grace-Period Tracking</h5>
-
-<p>This portion of the <tt>rcu_node</tt> structure is declared
-as follows:
-
-<pre>
- 1 unsigned long gp_seq;
- 2 unsigned long gp_seq_needed;
-</pre>
-
-<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
-the counterparts of the field of the same name in the <tt>rcu_state</tt>
-structure.
-They each may lag up to one step behind their <tt>rcu_state</tt>
-counterpart.
-If the bottom two bits of a given <tt>rcu_node</tt> structure's
-<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
-structure believes that RCU is idle.
-</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning and the end
-of each grace period.
-
-<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
-furthest-in-the-future grace period request seen by the corresponding
-<tt>rcu_node</tt> structure. The request is considered fulfilled when
-the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
-the <tt>-&gt;gp_seq_needed</tt> field.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Suppose that this <tt>rcu_node</tt> structure doesn't see
- a request for a very long time.
- Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
- problems?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
- <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
- will be updated at the end of the grace period.
- Modulo-arithmetic comparisons therefore will always get the
- correct answer, even with wrapping.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h5>Quiescent-State Tracking</h5>
-
-<p>These fields manage the propagation of quiescent states up the
-combining tree.
-
-</p><p>This portion of the <tt>rcu_node</tt> structure has fields
-as follows:
-
-<pre>
- 1 unsigned long qsmask;
- 2 unsigned long expmask;
- 3 unsigned long qsmaskinit;
- 4 unsigned long expmaskinit;
-</pre>
-
-<p>The <tt>-&gt;qsmask</tt> field tracks which of this
-<tt>rcu_node</tt> structure's children still need to report
-quiescent states for the current normal grace period.
-Such children will have a value of 1 in their corresponding bit.
-Note that the leaf <tt>rcu_node</tt> structures should be
-thought of as having <tt>rcu_data</tt> structures as their
-children.
-Similarly, the <tt>-&gt;expmask</tt> field tracks which
-of this <tt>rcu_node</tt> structure's children still need to report
-quiescent states for the current expedited grace period.
-An expedited grace period has
-the same conceptual properties as a normal grace period, but the
-expedited implementation accepts extreme CPU overhead to obtain
-much lower grace-period latency, for example, consuming a few
-tens of microseconds worth of CPU time to reduce grace-period
-duration from milliseconds to tens of microseconds.
-The <tt>-&gt;qsmaskinit</tt> field tracks which of this
-<tt>rcu_node</tt> structure's children cover for at least
-one online CPU.
-This mask is used to initialize <tt>-&gt;qsmask</tt>,
-and <tt>-&gt;expmaskinit</tt> is used to initialize
-<tt>-&gt;expmask</tt> and the beginning of the
-normal and expedited grace periods, respectively.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why are these bitmasks protected by locking?
- Come on, haven't you heard of atomic instructions???
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Lockless grace-period computation! Such a tantalizing possibility!
- </font>
-
- <p><font color="ffffff">But consider the following sequence of events:
- </font>
-
- <ol>
- <li> <font color="ffffff">CPU&nbsp;0 has been in dyntick-idle
- mode for quite some time.
- When it wakes up, it notices that the current RCU
- grace period needs it to report in, so it sets a
- flag where the scheduling clock interrupt will find it.
- </font><p>
- <li> <font color="ffffff">Meanwhile, CPU&nbsp;1 is running
- <tt>force_quiescent_state()</tt>,
- and notices that CPU&nbsp;0 has been in dyntick idle mode,
- which qualifies as an extended quiescent state.
- </font><p>
- <li> <font color="ffffff">CPU&nbsp;0's scheduling clock
- interrupt fires in the
- middle of an RCU read-side critical section, and notices
- that the RCU core needs something, so commences RCU softirq
- processing.
- </font>
- <p>
- <li> <font color="ffffff">CPU&nbsp;0's softirq handler
- executes and is just about ready
- to report its quiescent state up the <tt>rcu_node</tt>
- tree.
- </font><p>
- <li> <font color="ffffff">But CPU&nbsp;1 beats it to the punch,
- completing the current
- grace period and starting a new one.
- </font><p>
- <li> <font color="ffffff">CPU&nbsp;0 now reports its quiescent
- state for the wrong
- grace period.
- That grace period might now end before the RCU read-side
- critical section.
- If that happens, disaster will ensue.
- </font>
- </ol>
-
- <p><font color="ffffff">So the locking is absolutely required in
- order to coordinate clearing of the bits with updating of the
- grace-period sequence number in <tt>-&gt;gp_seq</tt>.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h5>Blocked-Task Management</h5>
-
-<p><tt>PREEMPT_RCU</tt> allows tasks to be preempted in the
-midst of their RCU read-side critical sections, and these tasks
-must be tracked explicitly.
-The details of exactly why and how they are tracked will be covered
-in a separate article on RCU read-side processing.
-For now, it is enough to know that the <tt>rcu_node</tt>
-structure tracks them.
-
-<pre>
- 1 struct list_head blkd_tasks;
- 2 struct list_head *gp_tasks;
- 3 struct list_head *exp_tasks;
- 4 bool wait_blkd_tasks;
-</pre>
-
-<p>The <tt>-&gt;blkd_tasks</tt> field is a list header for
-the list of blocked and preempted tasks.
-As tasks undergo context switches within RCU read-side critical
-sections, their <tt>task_struct</tt> structures are enqueued
-(via the <tt>task_struct</tt>'s <tt>-&gt;rcu_node_entry</tt>
-field) onto the head of the <tt>-&gt;blkd_tasks</tt> list for the
-leaf <tt>rcu_node</tt> structure corresponding to the CPU
-on which the outgoing context switch executed.
-As these tasks later exit their RCU read-side critical sections,
-they remove themselves from the list.
-This list is therefore in reverse time order, so that if one of the tasks
-is blocking the current grace period, all subsequent tasks must
-also be blocking that same grace period.
-Therefore, a single pointer into this list suffices to track
-all tasks blocking a given grace period.
-That pointer is stored in <tt>-&gt;gp_tasks</tt> for normal
-grace periods and in <tt>-&gt;exp_tasks</tt> for expedited
-grace periods.
-These last two fields are <tt>NULL</tt> if either there is
-no grace period in flight or if there are no blocked tasks
-preventing that grace period from completing.
-If either of these two pointers is referencing a task that
-removes itself from the <tt>-&gt;blkd_tasks</tt> list,
-then that task must advance the pointer to the next task on
-the list, or set the pointer to <tt>NULL</tt> if there
-are no subsequent tasks on the list.
-
-</p><p>For example, suppose that tasks&nbsp;T1, T2, and&nbsp;T3 are
-all hard-affinitied to the largest-numbered CPU in the system.
-Then if task&nbsp;T1 blocked in an RCU read-side
-critical section, then an expedited grace period started,
-then task&nbsp;T2 blocked in an RCU read-side critical section,
-then a normal grace period started, and finally task&nbsp;3 blocked
-in an RCU read-side critical section, then the state of the
-last leaf <tt>rcu_node</tt> structure's blocked-task list
-would be as shown below:
-
-</p><p><img src="blkd_task.svg" alt="blkd_task.svg" width="60%">
-
-</p><p>Task&nbsp;T1 is blocking both grace periods, task&nbsp;T2 is
-blocking only the normal grace period, and task&nbsp;T3 is blocking
-neither grace period.
-Note that these tasks will not remove themselves from this list
-immediately upon resuming execution.
-They will instead remain on the list until they execute the outermost
-<tt>rcu_read_unlock()</tt> that ends their RCU read-side critical
-section.
-
-<p>
-The <tt>-&gt;wait_blkd_tasks</tt> field indicates whether or not
-the current grace period is waiting on a blocked task.
-
-<h5>Sizing the <tt>rcu_node</tt> Array</h5>
-
-<p>The <tt>rcu_node</tt> array is sized via a series of
-C-preprocessor expressions as follows:
-
-<pre>
- 1 #ifdef CONFIG_RCU_FANOUT
- 2 #define RCU_FANOUT CONFIG_RCU_FANOUT
- 3 #else
- 4 # ifdef CONFIG_64BIT
- 5 # define RCU_FANOUT 64
- 6 # else
- 7 # define RCU_FANOUT 32
- 8 # endif
- 9 #endif
-10
-11 #ifdef CONFIG_RCU_FANOUT_LEAF
-12 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
-13 #else
-14 # ifdef CONFIG_64BIT
-15 # define RCU_FANOUT_LEAF 64
-16 # else
-17 # define RCU_FANOUT_LEAF 32
-18 # endif
-19 #endif
-20
-21 #define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
-22 #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
-23 #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
-24 #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
-25
-26 #if NR_CPUS &lt;= RCU_FANOUT_1
-27 # define RCU_NUM_LVLS 1
-28 # define NUM_RCU_LVL_0 1
-29 # define NUM_RCU_NODES NUM_RCU_LVL_0
-30 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
-31 # define RCU_NODE_NAME_INIT { "rcu_node_0" }
-32 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
-33 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0" }
-34 #elif NR_CPUS &lt;= RCU_FANOUT_2
-35 # define RCU_NUM_LVLS 2
-36 # define NUM_RCU_LVL_0 1
-37 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-38 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
-39 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
-40 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
-41 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
-42 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" }
-43 #elif NR_CPUS &lt;= RCU_FANOUT_3
-44 # define RCU_NUM_LVLS 3
-45 # define NUM_RCU_LVL_0 1
-46 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
-47 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-48 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
-49 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
-50 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
-51 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
-52 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
-53 #elif NR_CPUS &lt;= RCU_FANOUT_4
-54 # define RCU_NUM_LVLS 4
-55 # define NUM_RCU_LVL_0 1
-56 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
-57 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
-58 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-59 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
-60 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
-61 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
-62 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
-63 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
-64 #else
-65 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
-66 #endif
-</pre>
-
-<p>The maximum number of levels in the <tt>rcu_node</tt> structure
-is currently limited to four, as specified by lines&nbsp;21-24
-and the structure of the subsequent &ldquo;if&rdquo; statement.
-For 32-bit systems, this allows 16*32*32*32=524,288 CPUs, which
-should be sufficient for the next few years at least.
-For 64-bit systems, 16*64*64*64=4,194,304 CPUs is allowed, which
-should see us through the next decade or so.
-This four-level tree also allows kernels built with
-<tt>CONFIG_RCU_FANOUT=8</tt> to support up to 4096 CPUs,
-which might be useful in very large systems having eight CPUs per
-socket (but please note that no one has yet shown any measurable
-performance degradation due to misaligned socket and <tt>rcu_node</tt>
-boundaries).
-In addition, building kernels with a full four levels of <tt>rcu_node</tt>
-tree permits better testing of RCU's combining-tree code.
-
-</p><p>The <tt>RCU_FANOUT</tt> symbol controls how many children
-are permitted at each non-leaf level of the <tt>rcu_node</tt> tree.
-If the <tt>CONFIG_RCU_FANOUT</tt> Kconfig option is not specified,
-it is set based on the word size of the system, which is also
-the Kconfig default.
-
-</p><p>The <tt>RCU_FANOUT_LEAF</tt> symbol controls how many CPUs are
-handled by each leaf <tt>rcu_node</tt> structure.
-Experience has shown that allowing a given leaf <tt>rcu_node</tt>
-structure to handle 64 CPUs, as permitted by the number of bits in
-the <tt>-&gt;qsmask</tt> field on a 64-bit system, results in
-excessive contention for the leaf <tt>rcu_node</tt> structures'
-<tt>-&gt;lock</tt> fields.
-The number of CPUs per leaf <tt>rcu_node</tt> structure is therefore
-limited to 16 given the default value of <tt>CONFIG_RCU_FANOUT_LEAF</tt>.
-If <tt>CONFIG_RCU_FANOUT_LEAF</tt> is unspecified, the value
-selected is based on the word size of the system, just as for
-<tt>CONFIG_RCU_FANOUT</tt>.
-Lines&nbsp;11-19 perform this computation.
-
-</p><p>Lines&nbsp;21-24 compute the maximum number of CPUs supported by
-a single-level (which contains a single <tt>rcu_node</tt> structure),
-two-level, three-level, and four-level <tt>rcu_node</tt> tree,
-respectively, given the fanout specified by <tt>RCU_FANOUT</tt>
-and <tt>RCU_FANOUT_LEAF</tt>.
-These numbers of CPUs are retained in the
-<tt>RCU_FANOUT_1</tt>,
-<tt>RCU_FANOUT_2</tt>,
-<tt>RCU_FANOUT_3</tt>, and
-<tt>RCU_FANOUT_4</tt>
-C-preprocessor variables, respectively.
-
-</p><p>These variables are used to control the C-preprocessor <tt>#if</tt>
-statement spanning lines&nbsp;26-66 that computes the number of
-<tt>rcu_node</tt> structures required for each level of the tree,
-as well as the number of levels required.
-The number of levels is placed in the <tt>NUM_RCU_LVLS</tt>
-C-preprocessor variable by lines&nbsp;27, 35, 44, and&nbsp;54.
-The number of <tt>rcu_node</tt> structures for the topmost level
-of the tree is always exactly one, and this value is unconditionally
-placed into <tt>NUM_RCU_LVL_0</tt> by lines&nbsp;28, 36, 45, and&nbsp;55.
-The rest of the levels (if any) of the <tt>rcu_node</tt> tree
-are computed by dividing the maximum number of CPUs by the
-fanout supported by the number of levels from the current level down,
-rounding up. This computation is performed by lines&nbsp;37,
-46-47, and&nbsp;56-58.
-Lines&nbsp;31-33, 40-42, 50-52, and&nbsp;62-63 create initializers
-for lockdep lock-class names.
-Finally, lines&nbsp;64-66 produce an error if the maximum number of
-CPUs is too large for the specified fanout.
-
-<h3><a name="The rcu_segcblist Structure">
-The <tt>rcu_segcblist</tt> Structure</a></h3>
-
-The <tt>rcu_segcblist</tt> structure maintains a segmented list of
-callbacks as follows:
-
-<pre>
- 1 #define RCU_DONE_TAIL 0
- 2 #define RCU_WAIT_TAIL 1
- 3 #define RCU_NEXT_READY_TAIL 2
- 4 #define RCU_NEXT_TAIL 3
- 5 #define RCU_CBLIST_NSEGS 4
- 6
- 7 struct rcu_segcblist {
- 8 struct rcu_head *head;
- 9 struct rcu_head **tails[RCU_CBLIST_NSEGS];
-10 unsigned long gp_seq[RCU_CBLIST_NSEGS];
-11 long len;
-12 long len_lazy;
-13 };
-</pre>
-
-<p>
-The segments are as follows:
-
-<ol>
-<li> <tt>RCU_DONE_TAIL</tt>: Callbacks whose grace periods have elapsed.
- These callbacks are ready to be invoked.
-<li> <tt>RCU_WAIT_TAIL</tt>: Callbacks that are waiting for the
- current grace period.
- Note that different CPUs can have different ideas about which
- grace period is current, hence the <tt>-&gt;gp_seq</tt> field.
-<li> <tt>RCU_NEXT_READY_TAIL</tt>: Callbacks waiting for the next
- grace period to start.
-<li> <tt>RCU_NEXT_TAIL</tt>: Callbacks that have not yet been
- associated with a grace period.
-</ol>
-
-<p>
-The <tt>-&gt;head</tt> pointer references the first callback or
-is <tt>NULL</tt> if the list contains no callbacks (which is
-<i>not</i> the same as being empty).
-Each element of the <tt>-&gt;tails[]</tt> array references the
-<tt>-&gt;next</tt> pointer of the last callback in the corresponding
-segment of the list, or the list's <tt>-&gt;head</tt> pointer if
-that segment and all previous segments are empty.
-If the corresponding segment is empty but some previous segment is
-not empty, then the array element is identical to its predecessor.
-Older callbacks are closer to the head of the list, and new callbacks
-are added at the tail.
-This relationship between the <tt>-&gt;head</tt> pointer, the
-<tt>-&gt;tails[]</tt> array, and the callbacks is shown in this
-diagram:
-
-</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
-
-</p><p>In this figure, the <tt>-&gt;head</tt> pointer references the
-first
-RCU callback in the list.
-The <tt>-&gt;tails[RCU_DONE_TAIL]</tt> array element references
-the <tt>-&gt;head</tt> pointer itself, indicating that none
-of the callbacks is ready to invoke.
-The <tt>-&gt;tails[RCU_WAIT_TAIL]</tt> array element references callback
-CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
-CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period,
-give or take possible disagreements about exactly which grace period
-is the current one.
-The <tt>-&gt;tails[RCU_NEXT_READY_TAIL]</tt> array element
-references the same RCU callback that <tt>-&gt;tails[RCU_WAIT_TAIL]</tt>
-does, which indicates that there are no callbacks waiting on the next
-RCU grace period.
-The <tt>-&gt;tails[RCU_NEXT_TAIL]</tt> array element references
-CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
-remaining RCU callbacks have not yet been assigned to an RCU grace
-period.
-Note that the <tt>-&gt;tails[RCU_NEXT_TAIL]</tt> array element
-always references the last RCU callback's <tt>-&gt;next</tt> pointer
-unless the callback list is empty, in which case it references
-the <tt>-&gt;head</tt> pointer.
-
-<p>
-There is one additional important special case for the
-<tt>-&gt;tails[RCU_NEXT_TAIL]</tt> array element: It can be <tt>NULL</tt>
-when this list is <i>disabled</i>.
-Lists are disabled when the corresponding CPU is offline or when
-the corresponding CPU's callbacks are offloaded to a kthread,
-both of which are described elsewhere.
-
-</p><p>CPUs advance their callbacks from the
-<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
-<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
-as grace periods advance.
-
-</p><p>The <tt>-&gt;gp_seq[]</tt> array records grace-period
-numbers corresponding to the list segments.
-This is what allows different CPUs to have different ideas as to
-which is the current grace period while still avoiding premature
-invocation of their callbacks.
-In particular, this allows CPUs that go idle for extended periods
-to determine which of their callbacks are ready to be invoked after
-reawakening.
-
-</p><p>The <tt>-&gt;len</tt> counter contains the number of
-callbacks in <tt>-&gt;head</tt>, and the
-<tt>-&gt;len_lazy</tt> contains the number of those callbacks that
-are known to only free memory, and whose invocation can therefore
-be safely deferred.
-
-<p><b>Important note</b>: It is the <tt>-&gt;len</tt> field that
-determines whether or not there are callbacks associated with
-this <tt>rcu_segcblist</tt> structure, <i>not</i> the <tt>-&gt;head</tt>
-pointer.
-The reason for this is that all the ready-to-invoke callbacks
-(that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted
-all at once at callback-invocation time (<tt>rcu_do_batch</tt>), due
-to which <tt>-&gt;head</tt> may be set to NULL if there are no not-done
-callbacks remaining in the <tt>rcu_segcblist</tt>.
-If callback invocation must be postponed, for example, because a
-high-priority process just woke up on this CPU, then the remaining
-callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment and
-<tt>-&gt;head</tt> once again points to the start of the segment.
-In short, the head field can briefly be <tt>NULL</tt> even though the
-CPU has callbacks present the entire time.
-Therefore, it is not appropriate to test the <tt>-&gt;head</tt> pointer
-for <tt>NULL</tt>.
-
-<p>In contrast, the <tt>-&gt;len</tt> and <tt>-&gt;len_lazy</tt> counts
-are adjusted only after the corresponding callbacks have been invoked.
-This means that the <tt>-&gt;len</tt> count is zero only if
-the <tt>rcu_segcblist</tt> structure really is devoid of callbacks.
-Of course, off-CPU sampling of the <tt>-&gt;len</tt> count requires
-careful use of appropriate synchronization, for example, memory barriers.
-This synchronization can be a bit subtle, particularly in the case
-of <tt>rcu_barrier()</tt>.
-
-<h3><a name="The rcu_data Structure">
-The <tt>rcu_data</tt> Structure</a></h3>
-
-<p>The <tt>rcu_data</tt> maintains the per-CPU state for the RCU subsystem.
-The fields in this structure may be accessed only from the corresponding
-CPU (and from tracing) unless otherwise stated.
-This structure is the
-focus of quiescent-state detection and RCU callback queuing.
-It also tracks its relationship to the corresponding leaf
-<tt>rcu_node</tt> structure to allow more-efficient
-propagation of quiescent states up the <tt>rcu_node</tt>
-combining tree.
-Like the <tt>rcu_node</tt> structure, it provides a local
-copy of the grace-period information to allow for-free
-synchronized
-access to this information from the corresponding CPU.
-Finally, this structure records past dyntick-idle state
-for the corresponding CPU and also tracks statistics.
-
-</p><p>The <tt>rcu_data</tt> structure's fields are discussed,
-singly and in groups, in the following sections.
-
-<h5>Connection to Other Data Structures</h5>
-
-<p>This portion of the <tt>rcu_data</tt> structure is declared
-as follows:
-
-<pre>
- 1 int cpu;
- 2 struct rcu_node *mynode;
- 3 unsigned long grpmask;
- 4 bool beenonline;
-</pre>
-
-<p>The <tt>-&gt;cpu</tt> field contains the number of the
-corresponding CPU and the <tt>-&gt;mynode</tt> field references the
-corresponding <tt>rcu_node</tt> structure.
-The <tt>-&gt;mynode</tt> is used to propagate quiescent states
-up the combining tree.
-These two fields are constant and therefore do not require synchronization.
-
-<p>The <tt>-&gt;grpmask</tt> field indicates the bit in
-the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
-<tt>rcu_data</tt> structure, and is also used when propagating
-quiescent states.
-The <tt>-&gt;beenonline</tt> flag is set whenever the corresponding
-CPU comes online, which means that the debugfs tracing need not dump
-out any <tt>rcu_data</tt> structure for which this flag is not set.
-
-<h5>Quiescent-State and Grace-Period Tracking</h5>
-
-<p>This portion of the <tt>rcu_data</tt> structure is declared
-as follows:
-
-<pre>
- 1 unsigned long gp_seq;
- 2 unsigned long gp_seq_needed;
- 3 bool cpu_no_qs;
- 4 bool core_needs_qs;
- 5 bool gpwrap;
-</pre>
-
-<p>The <tt>-&gt;gp_seq</tt> field is the counterpart of the field of the same
-name in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. The
-<tt>-&gt;gp_seq_needed</tt> field is the counterpart of the field of the same
-name in the rcu_node</tt> structure.
-They may each lag up to one behind their <tt>rcu_node</tt>
-counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
-<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
-arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
-will catch up upon exit from dyntick-idle mode).
-If the lower two bits of a given <tt>rcu_data</tt> structure's
-<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
-structure believes that RCU is idle.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- All this replication of the grace period numbers can only cause
- massive confusion.
- Why not just keep a global sequence number and be done with it???
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because if there was only a single global sequence
- numbers, there would need to be a single global lock to allow
- safely accessing and updating it.
- And if we are not going to have a single global lock, we need
- to carefully manage the numbers on a per-node basis.
- Recall from the answer to a previous Quick Quiz that the consequences
- of applying a previously sampled quiescent state to the wrong
- grace period are quite severe.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>The <tt>-&gt;cpu_no_qs</tt> flag indicates that the
-CPU has not yet passed through a quiescent state,
-while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
-RCU core needs a quiescent state from the corresponding CPU.
-The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the
-<tt>gp_seq</tt> counter is in danger of overflow, which
-will cause the CPU to disregard the values of its counters on
-its next exit from idle.
-
-<h5>RCU Callback Handling</h5>
-
-<p>In the absence of CPU-hotplug events, RCU callbacks are invoked by
-the same CPU that registered them.
-This is strictly a cache-locality optimization: callbacks can and
-do get invoked on CPUs other than the one that registered them.
-After all, if the CPU that registered a given callback has gone
-offline before the callback can be invoked, there really is no other
-choice.
-
-</p><p>This portion of the <tt>rcu_data</tt> structure is declared
-as follows:
-
-<pre>
- 1 struct rcu_segcblist cblist;
- 2 long qlen_last_fqs_check;
- 3 unsigned long n_cbs_invoked;
- 4 unsigned long n_nocbs_invoked;
- 5 unsigned long n_cbs_orphaned;
- 6 unsigned long n_cbs_adopted;
- 7 unsigned long n_force_qs_snap;
- 8 long blimit;
-</pre>
-
-<p>The <tt>-&gt;cblist</tt> structure is the segmented callback list
-described earlier.
-The CPU advances the callbacks in its <tt>rcu_data</tt> structure
-whenever it notices that another RCU grace period has completed.
-The CPU detects the completion of an RCU grace period by noticing
-that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;gp_seq</tt> field differs from that of its leaf
-<tt>rcu_node</tt> structure.
-Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
-grace period.
-
-<p>
-The <tt>-&gt;qlen_last_fqs_check</tt> and
-<tt>-&gt;n_force_qs_snap</tt> coordinate the forcing of quiescent
-states from <tt>call_rcu()</tt> and friends when callback
-lists grow excessively long.
-
-</p><p>The <tt>-&gt;n_cbs_invoked</tt>,
-<tt>-&gt;n_cbs_orphaned</tt>, and <tt>-&gt;n_cbs_adopted</tt>
-fields count the number of callbacks invoked,
-sent to other CPUs when this CPU goes offline,
-and received from other CPUs when those other CPUs go offline.
-The <tt>-&gt;n_nocbs_invoked</tt> is used when the CPU's callbacks
-are offloaded to a kthread.
-
-<p>
-Finally, the <tt>-&gt;blimit</tt> counter is the maximum number of
-RCU callbacks that may be invoked at a given time.
-
-<h5>Dyntick-Idle Handling</h5>
-
-<p>This portion of the <tt>rcu_data</tt> structure is declared
-as follows:
-
-<pre>
- 1 int dynticks_snap;
- 2 unsigned long dynticks_fqs;
-</pre>
-
-The <tt>-&gt;dynticks_snap</tt> field is used to take a snapshot
-of the corresponding CPU's dyntick-idle state when forcing
-quiescent states, and is therefore accessed from other CPUs.
-Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
-count the number of times this CPU is determined to be in
-dyntick-idle state, and is used for tracing and debugging purposes.
-
-<p>
-This portion of the rcu_data structure is declared as follows:
-
-<pre>
- 1 long dynticks_nesting;
- 2 long dynticks_nmi_nesting;
- 3 atomic_t dynticks;
- 4 bool rcu_need_heavy_qs;
- 5 bool rcu_urgent_qs;
-</pre>
-
-<p>These fields in the rcu_data structure maintain the per-CPU dyntick-idle
-state for the corresponding CPU.
-The fields may be accessed only from the corresponding CPU (and from tracing)
-unless otherwise stated.
-
-<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
-nesting depth of process execution, so that in normal circumstances
-this counter has value zero or one.
-NMIs, irqs, and tracers are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
-field.
-Because NMIs cannot be masked, changes to this variable have to be
-undertaken carefully using an algorithm provided by Andy Lutomirski.
-The initial transition from idle adds one, and nested transitions
-add two, so that a nesting level of five is represented by a
-<tt>-&gt;dynticks_nmi_nesting</tt> value of nine.
-This counter can therefore be thought of as counting the number
-of reasons why this CPU cannot be permitted to enter dyntick-idle
-mode, aside from process-level transitions.
-
-<p>However, it turns out that when running in non-idle kernel context,
-the Linux kernel is fully capable of entering interrupt handlers that
-never exit and perhaps also vice versa.
-Therefore, whenever the <tt>-&gt;dynticks_nesting</tt> field is
-incremented up from zero, the <tt>-&gt;dynticks_nmi_nesting</tt> field
-is set to a large positive number, and whenever the
-<tt>-&gt;dynticks_nesting</tt> field is decremented down to zero,
-the the <tt>-&gt;dynticks_nmi_nesting</tt> field is set to zero.
-Assuming that the number of misnested interrupts is not sufficient
-to overflow the counter, this approach corrects the
-<tt>-&gt;dynticks_nmi_nesting</tt> field every time the corresponding
-CPU enters the idle loop from process context.
-
-</p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
-CPU's transitions to and from either dyntick-idle or user mode, so
-that this counter has an even value when the CPU is in dyntick-idle
-mode or user mode and an odd value otherwise. The transitions to/from
-user mode need to be counted for user mode adaptive-ticks support
-(see timers/NO_HZ.txt).
-
-</p><p>The <tt>-&gt;rcu_need_heavy_qs</tt> field is used
-to record the fact that the RCU core code would really like to
-see a quiescent state from the corresponding CPU, so much so that
-it is willing to call for heavy-weight dyntick-counter operations.
-This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
-code, which provide a momentary idle sojourn in response.
-
-</p><p>Finally, the <tt>-&gt;rcu_urgent_qs</tt> field is used to record
-the fact that the RCU core code would really like to see a quiescent state from
-the corresponding CPU, with the various other fields indicating just how badly
-RCU wants this quiescent state.
-This flag is checked by RCU's context-switch path
-(<tt>rcu_note_context_switch</tt>) and the cond_resched code.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why not simply combine the <tt>-&gt;dynticks_nesting</tt>
- and <tt>-&gt;dynticks_nmi_nesting</tt> counters into a
- single counter that just counts the number of reasons that
- the corresponding CPU is non-idle?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because this would fail in the presence of interrupts whose
- handlers never return and of handlers that manage to return
- from a made-up interrupt.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>Additional fields are present for some special-purpose
-builds, and are discussed separately.
-
-<h3><a name="The rcu_head Structure">
-The <tt>rcu_head</tt> Structure</a></h3>
-
-<p>Each <tt>rcu_head</tt> structure represents an RCU callback.
-These structures are normally embedded within RCU-protected data
-structures whose algorithms use asynchronous grace periods.
-In contrast, when using algorithms that block waiting for RCU grace periods,
-RCU users need not provide <tt>rcu_head</tt> structures.
-
-</p><p>The <tt>rcu_head</tt> structure has fields as follows:
-
-<pre>
- 1 struct rcu_head *next;
- 2 void (*func)(struct rcu_head *head);
-</pre>
-
-<p>The <tt>-&gt;next</tt> field is used
-to link the <tt>rcu_head</tt> structures together in the
-lists within the <tt>rcu_data</tt> structures.
-The <tt>-&gt;func</tt> field is a pointer to the function
-to be called when the callback is ready to be invoked, and
-this function is passed a pointer to the <tt>rcu_head</tt>
-structure.
-However, <tt>kfree_rcu()</tt> uses the <tt>-&gt;func</tt>
-field to record the offset of the <tt>rcu_head</tt>
-structure within the enclosing RCU-protected data structure.
-
-</p><p>Both of these fields are used internally by RCU.
-From the viewpoint of RCU users, this structure is an
-opaque &ldquo;cookie&rdquo;.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Given that the callback function <tt>-&gt;func</tt>
- is passed a pointer to the <tt>rcu_head</tt> structure,
- how is that function supposed to find the beginning of the
- enclosing RCU-protected data structure?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- In actual practice, there is a separate callback function per
- type of RCU-protected data structure.
- The callback function can therefore use the <tt>container_of()</tt>
- macro in the Linux kernel (or other pointer-manipulation facilities
- in other software environments) to find the beginning of the
- enclosing structure.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h3><a name="RCU-Specific Fields in the task_struct Structure">
-RCU-Specific Fields in the <tt>task_struct</tt> Structure</a></h3>
-
-<p>The <tt>CONFIG_PREEMPT_RCU</tt> implementation uses some
-additional fields in the <tt>task_struct</tt> structure:
-
-<pre>
- 1 #ifdef CONFIG_PREEMPT_RCU
- 2 int rcu_read_lock_nesting;
- 3 union rcu_special rcu_read_unlock_special;
- 4 struct list_head rcu_node_entry;
- 5 struct rcu_node *rcu_blocked_node;
- 6 #endif /* #ifdef CONFIG_PREEMPT_RCU */
- 7 #ifdef CONFIG_TASKS_RCU
- 8 unsigned long rcu_tasks_nvcsw;
- 9 bool rcu_tasks_holdout;
-10 struct list_head rcu_tasks_holdout_list;
-11 int rcu_tasks_idle_cpu;
-12 #endif /* #ifdef CONFIG_TASKS_RCU */
-</pre>
-
-<p>The <tt>-&gt;rcu_read_lock_nesting</tt> field records the
-nesting level for RCU read-side critical sections, and
-the <tt>-&gt;rcu_read_unlock_special</tt> field is a bitmask
-that records special conditions that require <tt>rcu_read_unlock()</tt>
-to do additional work.
-The <tt>-&gt;rcu_node_entry</tt> field is used to form lists of
-tasks that have blocked within preemptible-RCU read-side critical
-sections and the <tt>-&gt;rcu_blocked_node</tt> field references
-the <tt>rcu_node</tt> structure whose list this task is a member of,
-or <tt>NULL</tt> if it is not blocked within a preemptible-RCU
-read-side critical section.
-
-<p>The <tt>-&gt;rcu_tasks_nvcsw</tt> field tracks the number of
-voluntary context switches that this task had undergone at the
-beginning of the current tasks-RCU grace period,
-<tt>-&gt;rcu_tasks_holdout</tt> is set if the current tasks-RCU
-grace period is waiting on this task, <tt>-&gt;rcu_tasks_holdout_list</tt>
-is a list element enqueuing this task on the holdout list,
-and <tt>-&gt;rcu_tasks_idle_cpu</tt> tracks which CPU this
-idle task is running, but only if the task is currently running,
-that is, if the CPU is currently idle.
-
-<h3><a name="Accessor Functions">
-Accessor Functions</a></h3>
-
-<p>The following listing shows the
-<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt> and
-<tt>rcu_for_each_leaf_node()</tt> function and macros:
-
-<pre>
- 1 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
- 2 {
- 3 return &amp;rsp-&gt;node[0];
- 4 }
- 5
- 6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
- 7 for ((rnp) = &amp;(rsp)-&gt;node[0]; \
- 8 (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
- 9
- 10 #define rcu_for_each_leaf_node(rsp, rnp) \
- 11 for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
- 12 (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
-</pre>
-
-<p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
-first element of the specified <tt>rcu_state</tt> structure's
-<tt>-&gt;node[]</tt> array, which is the root <tt>rcu_node</tt>
-structure.
-
-</p><p>As noted earlier, the <tt>rcu_for_each_node_breadth_first()</tt>
-macro takes advantage of the layout of the <tt>rcu_node</tt>
-structures in the <tt>rcu_state</tt> structure's
-<tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
-simply traversing the array in order.
-Similarly, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
-the last part of the array, thus traversing only the leaf
-<tt>rcu_node</tt> structures.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- What does
- <tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
- contains only a single node?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- In the single-node case,
- <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h3><a name="Summary">
-Summary</a></h3>
-
-So the state of RCU is represented by an <tt>rcu_state</tt> structure,
-which contains a combining tree of <tt>rcu_node</tt> and
-<tt>rcu_data</tt> structures.
-Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
-state is tracked by dynticks-related fields in the <tt>rcu_data</tt> structure.
-
-If you made it this far, you are well prepared to read the code
-walkthroughs in the other articles in this series.
-
-<h3><a name="Acknowledgments">
-Acknowledgments</a></h3>
-
-I owe thanks to Cyrill Gorcunov, Mathieu Desnoyers, Dhaval Giani, Paul
-Turner, Abhishek Srivastava, Matt Kowalczyk, and Serge Hallyn
-for helping me get this document into a more human-readable state.
-
-<h3><a name="Legal Statement">
-Legal Statement</a></h3>
-
-<p>This work represents the view of the author and does not necessarily
-represent the view of IBM.
-
-</p><p>Linux is a registered trademark of Linus Torvalds.
-
-</p><p>Other company, product, and service names may be trademarks or
-service marks of others.
-
-</body></html>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
new file mode 100644
index 000000000000..4a48e20a46f2
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
@@ -0,0 +1,1163 @@
+===================================================
+A Tour Through TREE_RCU's Data Structures [LWN.net]
+===================================================
+
+December 18, 2016
+
+This article was contributed by Paul E. McKenney
+
+Introduction
+============
+
+This document describes RCU's major data structures and their relationship
+to each other.
+
+Data-Structure Relationships
+============================
+
+RCU is for all intents and purposes a large state machine, and its
+data structures maintain the state in such a way as to allow RCU readers
+to execute extremely quickly, while also processing the RCU grace periods
+requested by updaters in an efficient and extremely scalable fashion.
+The efficiency and scalability of RCU updaters is provided primarily
+by a combining tree, as shown below:
+
+.. kernel-figure:: BigTreeClassicRCU.svg
+
+This diagram shows an enclosing ``rcu_state`` structure containing a tree
+of ``rcu_node`` structures. Each leaf node of the ``rcu_node`` tree has up
+to 16 ``rcu_data`` structures associated with it, so that there are
+``NR_CPUS`` number of ``rcu_data`` structures, one for each possible CPU.
+This structure is adjusted at boot time, if needed, to handle the common
+case where ``nr_cpu_ids`` is much less than ``NR_CPUs``.
+For example, a number of Linux distributions set ``NR_CPUs=4096``,
+which results in a three-level ``rcu_node`` tree.
+If the actual hardware has only 16 CPUs, RCU will adjust itself
+at boot time, resulting in an ``rcu_node`` tree with only a single node.
+
+The purpose of this combining tree is to allow per-CPU events
+such as quiescent states, dyntick-idle transitions,
+and CPU hotplug operations to be processed efficiently
+and scalably.
+Quiescent states are recorded by the per-CPU ``rcu_data`` structures,
+and other events are recorded by the leaf-level ``rcu_node``
+structures.
+All of these events are combined at each level of the tree until finally
+grace periods are completed at the tree's root ``rcu_node``
+structure.
+A grace period can be completed at the root once every CPU
+(or, in the case of ``CONFIG_PREEMPT_RCU``, task)
+has passed through a quiescent state.
+Once a grace period has completed, record of that fact is propagated
+back down the tree.
+
+As can be seen from the diagram, on a 64-bit system
+a two-level tree with 64 leaves can accommodate 1,024 CPUs, with a fanout
+of 64 at the root and a fanout of 16 at the leaves.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why isn't the fanout at the leaves also 64? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Because there are more types of events that affect the leaf-level |
+| ``rcu_node`` structures than further up the tree. Therefore, if the |
+| leaf ``rcu_node`` structures have fanout of 64, the contention on |
+| these structures' ``->structures`` becomes excessive. Experimentation |
+| on a wide variety of systems has shown that a fanout of 16 works well |
+| for the leaves of the ``rcu_node`` tree. |
+| |
+| Of course, further experience with systems having hundreds or |
+| thousands of CPUs may demonstrate that the fanout for the non-leaf |
+| ``rcu_node`` structures must also be reduced. Such reduction can be |
+| easily carried out when and if it proves necessary. In the meantime, |
+| if you are using such a system and running into contention problems |
+| on the non-leaf ``rcu_node`` structures, you may use the |
+| ``CONFIG_RCU_FANOUT`` kernel configuration parameter to reduce the |
+| non-leaf fanout as needed. |
+| |
+| Kernels built for systems with strong NUMA characteristics might |
+| also need to adjust ``CONFIG_RCU_FANOUT`` so that the domains of |
+| the ``rcu_node`` structures align with hardware boundaries. |
+| However, there has thus far been no need for this. |
++-----------------------------------------------------------------------+
+
+If your system has more than 1,024 CPUs (or more than 512 CPUs on a
+32-bit system), then RCU will automatically add more levels to the tree.
+For example, if you are crazy enough to build a 64-bit system with
+65,536 CPUs, RCU would configure the ``rcu_node`` tree as follows:
+
+.. kernel-figure:: HugeTreeClassicRCU.svg
+
+RCU currently permits up to a four-level tree, which on a 64-bit system
+accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
+32-bit systems. On the other hand, you can set both
+``CONFIG_RCU_FANOUT`` and ``CONFIG_RCU_FANOUT_LEAF`` to be as small as
+2, which would result in a 16-CPU test using a 4-level tree. This can be
+useful for testing large-system capabilities on small test machines.
+
+This multi-level combining tree allows us to get most of the performance
+and scalability benefits of partitioning, even though RCU grace-period
+detection is inherently a global operation. The trick here is that only
+the last CPU to report a quiescent state into a given ``rcu_node``
+structure need advance to the ``rcu_node`` structure at the next level
+up the tree. This means that at the leaf-level ``rcu_node`` structure,
+only one access out of sixteen will progress up the tree. For the
+internal ``rcu_node`` structures, the situation is even more extreme:
+Only one access out of sixty-four will progress up the tree. Because the
+vast majority of the CPUs do not progress up the tree, the lock
+contention remains roughly constant up the tree. No matter how many CPUs
+there are in the system, at most 64 quiescent-state reports per grace
+period will progress all the way to the root ``rcu_node`` structure,
+thus ensuring that the lock contention on that root ``rcu_node``
+structure remains acceptably low.
+
+In effect, the combining tree acts like a big shock absorber, keeping
+lock contention under control at all tree levels regardless of the level
+of loading on the system.
+
+RCU updaters wait for normal grace periods by registering RCU callbacks,
+either directly via ``call_rcu()`` or indirectly via
+``synchronize_rcu()`` and friends. RCU callbacks are represented by
+``rcu_head`` structures, which are queued on ``rcu_data`` structures
+while they are waiting for a grace period to elapse, as shown in the
+following figure:
+
+.. kernel-figure:: BigTreePreemptRCUBHdyntickCB.svg
+
+This figure shows how ``TREE_RCU``'s and ``PREEMPT_RCU``'s major data
+structures are related. Lesser data structures will be introduced with
+the algorithms that make use of them.
+
+Note that each of the data structures in the above figure has its own
+synchronization:
+
+#. Each ``rcu_state`` structures has a lock and a mutex, and some fields
+ are protected by the corresponding root ``rcu_node`` structure's lock.
+#. Each ``rcu_node`` structure has a spinlock.
+#. The fields in ``rcu_data`` are private to the corresponding CPU,
+ although a few can be read and written by other CPUs.
+
+It is important to note that different data structures can have very
+different ideas about the state of RCU at any given time. For but one
+example, awareness of the start or end of a given RCU grace period
+propagates slowly through the data structures. This slow propagation is
+absolutely necessary for RCU to have good read-side performance. If this
+balkanized implementation seems foreign to you, one useful trick is to
+consider each instance of these data structures to be a different
+person, each having the usual slightly different view of reality.
+
+The general role of each of these data structures is as follows:
+
+#. ``rcu_state``: This structure forms the interconnection between the
+ ``rcu_node`` and ``rcu_data`` structures, tracks grace periods,
+ serves as short-term repository for callbacks orphaned by CPU-hotplug
+ events, maintains ``rcu_barrier()`` state, tracks expedited
+ grace-period state, and maintains state used to force quiescent
+ states when grace periods extend too long,
+#. ``rcu_node``: This structure forms the combining tree that propagates
+ quiescent-state information from the leaves to the root, and also
+ propagates grace-period information from the root to the leaves. It
+ provides local copies of the grace-period state in order to allow
+ this information to be accessed in a synchronized manner without
+ suffering the scalability limitations that would otherwise be imposed
+ by global locking. In ``CONFIG_PREEMPT_RCU`` kernels, it manages the
+ lists of tasks that have blocked while in their current RCU read-side
+ critical section. In ``CONFIG_PREEMPT_RCU`` with
+ ``CONFIG_RCU_BOOST``, it manages the per-\ ``rcu_node``
+ priority-boosting kernel threads (kthreads) and state. Finally, it
+ records CPU-hotplug state in order to determine which CPUs should be
+ ignored during a given grace period.
+#. ``rcu_data``: This per-CPU structure is the focus of quiescent-state
+ detection and RCU callback queuing. It also tracks its relationship
+ to the corresponding leaf ``rcu_node`` structure to allow
+ more-efficient propagation of quiescent states up the ``rcu_node``
+ combining tree. Like the ``rcu_node`` structure, it provides a local
+ copy of the grace-period information to allow for-free synchronized
+ access to this information from the corresponding CPU. Finally, this
+ structure records past dyntick-idle state for the corresponding CPU
+ and also tracks statistics.
+#. ``rcu_head``: This structure represents RCU callbacks, and is the
+ only structure allocated and managed by RCU users. The ``rcu_head``
+ structure is normally embedded within the RCU-protected data
+ structure.
+
+If all you wanted from this article was a general notion of how RCU's
+data structures are related, you are done. Otherwise, each of the
+following sections give more details on the ``rcu_state``, ``rcu_node``
+and ``rcu_data`` data structures.
+
+The ``rcu_state`` Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rcu_state`` structure is the base structure that represents the
+state of RCU in the system. This structure forms the interconnection
+between the ``rcu_node`` and ``rcu_data`` structures, tracks grace
+periods, contains the lock used to synchronize with CPU-hotplug events,
+and maintains state used to force quiescent states when grace periods
+extend too long,
+
+A few of the ``rcu_state`` structure's fields are discussed, singly and
+in groups, in the following sections. The more specialized fields are
+covered in the discussion of their use.
+
+Relationship to rcu_node and rcu_data Structures
+''''''''''''''''''''''''''''''''''''''''''''''''
+
+This portion of the ``rcu_state`` structure is declared as follows:
+
+::
+
+ 1 struct rcu_node node[NUM_RCU_NODES];
+ 2 struct rcu_node *level[NUM_RCU_LVLS + 1];
+ 3 struct rcu_data __percpu *rda;
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Wait a minute! You said that the ``rcu_node`` structures formed a |
+| tree, but they are declared as a flat array! What gives? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| The tree is laid out in the array. The first node In the array is the |
+| head, the next set of nodes in the array are children of the head |
+| node, and so on until the last set of nodes in the array are the |
+| leaves. |
+| See the following diagrams to see how this works. |
++-----------------------------------------------------------------------+
+
+The ``rcu_node`` tree is embedded into the ``->node[]`` array as shown
+in the following figure:
+
+.. kernel-figure:: TreeMapping.svg
+
+One interesting consequence of this mapping is that a breadth-first
+traversal of the tree is implemented as a simple linear scan of the
+array, which is in fact what the ``rcu_for_each_node_breadth_first()``
+macro does. This macro is used at the beginning and ends of grace
+periods.
+
+Each entry of the ``->level`` array references the first ``rcu_node``
+structure on the corresponding level of the tree, for example, as shown
+below:
+
+.. kernel-figure:: TreeMappingLevel.svg
+
+The zero\ :sup:`th` element of the array references the root
+``rcu_node`` structure, the first element references the first child of
+the root ``rcu_node``, and finally the second element references the
+first leaf ``rcu_node`` structure.
+
+For whatever it is worth, if you draw the tree to be tree-shaped rather
+than array-shaped, it is easy to draw a planar representation:
+
+.. kernel-figure:: TreeLevel.svg
+
+Finally, the ``->rda`` field references a per-CPU pointer to the
+corresponding CPU's ``rcu_data`` structure.
+
+All of these fields are constant once initialization is complete, and
+therefore need no protection.
+
+Grace-Period Tracking
+'''''''''''''''''''''
+
+This portion of the ``rcu_state`` structure is declared as follows:
+
+::
+
+ 1 unsigned long gp_seq;
+
+RCU grace periods are numbered, and the ``->gp_seq`` field contains the
+current grace-period sequence number. The bottom two bits are the state
+of the current grace period, which can be zero for not yet started or
+one for in progress. In other words, if the bottom two bits of
+``->gp_seq`` are zero, then RCU is idle. Any other value in the bottom
+two bits indicates that something is broken. This field is protected by
+the root ``rcu_node`` structure's ``->lock`` field.
+
+There are ``->gp_seq`` fields in the ``rcu_node`` and ``rcu_data``
+structures as well. The fields in the ``rcu_state`` structure represent
+the most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a
+distributed fashion. The values flow from ``rcu_state`` to ``rcu_node``
+(down the tree from the root to the leaves) to ``rcu_data``.
+
+Miscellaneous
+'''''''''''''
+
+This portion of the ``rcu_state`` structure is declared as follows:
+
+::
+
+ 1 unsigned long gp_max;
+ 2 char abbr;
+ 3 char *name;
+
+The ``->gp_max`` field tracks the duration of the longest grace period
+in jiffies. It is protected by the root ``rcu_node``'s ``->lock``.
+
+The ``->name`` and ``->abbr`` fields distinguish between preemptible RCU
+(“rcu_preempt” and “p”) and non-preemptible RCU (“rcu_sched” and “s”).
+These fields are used for diagnostic and tracing purposes.
+
+The ``rcu_node`` Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rcu_node`` structures form the combining tree that propagates
+quiescent-state information from the leaves to the root and also that
+propagates grace-period information from the root down to the leaves.
+They provides local copies of the grace-period state in order to allow
+this information to be accessed in a synchronized manner without
+suffering the scalability limitations that would otherwise be imposed by
+global locking. In ``CONFIG_PREEMPT_RCU`` kernels, they manage the lists
+of tasks that have blocked while in their current RCU read-side critical
+section. In ``CONFIG_PREEMPT_RCU`` with ``CONFIG_RCU_BOOST``, they
+manage the per-\ ``rcu_node`` priority-boosting kernel threads
+(kthreads) and state. Finally, they record CPU-hotplug state in order to
+determine which CPUs should be ignored during a given grace period.
+
+The ``rcu_node`` structure's fields are discussed, singly and in groups,
+in the following sections.
+
+Connection to Combining Tree
+''''''''''''''''''''''''''''
+
+This portion of the ``rcu_node`` structure is declared as follows:
+
+::
+
+ 1 struct rcu_node *parent;
+ 2 u8 level;
+ 3 u8 grpnum;
+ 4 unsigned long grpmask;
+ 5 int grplo;
+ 6 int grphi;
+
+The ``->parent`` pointer references the ``rcu_node`` one level up in the
+tree, and is ``NULL`` for the root ``rcu_node``. The RCU implementation
+makes heavy use of this field to push quiescent states up the tree. The
+``->level`` field gives the level in the tree, with the root being at
+level zero, its children at level one, and so on. The ``->grpnum`` field
+gives this node's position within the children of its parent, so this
+number can range between 0 and 31 on 32-bit systems and between 0 and 63
+on 64-bit systems. The ``->level`` and ``->grpnum`` fields are used only
+during initialization and for tracing. The ``->grpmask`` field is the
+bitmask counterpart of ``->grpnum``, and therefore always has exactly
+one bit set. This mask is used to clear the bit corresponding to this
+``rcu_node`` structure in its parent's bitmasks, which are described
+later. Finally, the ``->grplo`` and ``->grphi`` fields contain the
+lowest and highest numbered CPU served by this ``rcu_node`` structure,
+respectively.
+
+All of these fields are constant, and thus do not require any
+synchronization.
+
+Synchronization
+'''''''''''''''
+
+This field of the ``rcu_node`` structure is declared as follows:
+
+::
+
+ 1 raw_spinlock_t lock;
+
+This field is used to protect the remaining fields in this structure,
+unless otherwise stated. That said, all of the fields in this structure
+can be accessed without locking for tracing purposes. Yes, this can
+result in confusing traces, but better some tracing confusion than to be
+heisenbugged out of existence.
+
+.. _grace-period-tracking-1:
+
+Grace-Period Tracking
+'''''''''''''''''''''
+
+This portion of the ``rcu_node`` structure is declared as follows:
+
+::
+
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
+
+The ``rcu_node`` structures' ``->gp_seq`` fields are the counterparts of
+the field of the same name in the ``rcu_state`` structure. They each may
+lag up to one step behind their ``rcu_state`` counterpart. If the bottom
+two bits of a given ``rcu_node`` structure's ``->gp_seq`` field is zero,
+then this ``rcu_node`` structure believes that RCU is idle.
+
+The ``>gp_seq`` field of each ``rcu_node`` structure is updated at the
+beginning and the end of each grace period.
+
+The ``->gp_seq_needed`` fields record the furthest-in-the-future grace
+period request seen by the corresponding ``rcu_node`` structure. The
+request is considered fulfilled when the value of the ``->gp_seq`` field
+equals or exceeds that of the ``->gp_seq_needed`` field.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Suppose that this ``rcu_node`` structure doesn't see a request for a |
+| very long time. Won't wrapping of the ``->gp_seq`` field cause |
+| problems? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| No, because if the ``->gp_seq_needed`` field lags behind the |
+| ``->gp_seq`` field, the ``->gp_seq_needed`` field will be updated at |
+| the end of the grace period. Modulo-arithmetic comparisons therefore |
+| will always get the correct answer, even with wrapping. |
++-----------------------------------------------------------------------+
+
+Quiescent-State Tracking
+''''''''''''''''''''''''
+
+These fields manage the propagation of quiescent states up the combining
+tree.
+
+This portion of the ``rcu_node`` structure has fields as follows:
+
+::
+
+ 1 unsigned long qsmask;
+ 2 unsigned long expmask;
+ 3 unsigned long qsmaskinit;
+ 4 unsigned long expmaskinit;
+
+The ``->qsmask`` field tracks which of this ``rcu_node`` structure's
+children still need to report quiescent states for the current normal
+grace period. Such children will have a value of 1 in their
+corresponding bit. Note that the leaf ``rcu_node`` structures should be
+thought of as having ``rcu_data`` structures as their children.
+Similarly, the ``->expmask`` field tracks which of this ``rcu_node``
+structure's children still need to report quiescent states for the
+current expedited grace period. An expedited grace period has the same
+conceptual properties as a normal grace period, but the expedited
+implementation accepts extreme CPU overhead to obtain much lower
+grace-period latency, for example, consuming a few tens of microseconds
+worth of CPU time to reduce grace-period duration from milliseconds to
+tens of microseconds. The ``->qsmaskinit`` field tracks which of this
+``rcu_node`` structure's children cover for at least one online CPU.
+This mask is used to initialize ``->qsmask``, and ``->expmaskinit`` is
+used to initialize ``->expmask`` and the beginning of the normal and
+expedited grace periods, respectively.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why are these bitmasks protected by locking? Come on, haven't you |
+| heard of atomic instructions??? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Lockless grace-period computation! Such a tantalizing possibility! |
+| But consider the following sequence of events: |
+| |
+| #. CPU 0 has been in dyntick-idle mode for quite some time. When it |
+| wakes up, it notices that the current RCU grace period needs it to |
+| report in, so it sets a flag where the scheduling clock interrupt |
+| will find it. |
+| #. Meanwhile, CPU 1 is running ``force_quiescent_state()``, and |
+| notices that CPU 0 has been in dyntick idle mode, which qualifies |
+| as an extended quiescent state. |
+| #. CPU 0's scheduling clock interrupt fires in the middle of an RCU |
+| read-side critical section, and notices that the RCU core needs |
+| something, so commences RCU softirq processing. |
+| #. CPU 0's softirq handler executes and is just about ready to report |
+| its quiescent state up the ``rcu_node`` tree. |
+| #. But CPU 1 beats it to the punch, completing the current grace |
+| period and starting a new one. |
+| #. CPU 0 now reports its quiescent state for the wrong grace period. |
+| That grace period might now end before the RCU read-side critical |
+| section. If that happens, disaster will ensue. |
+| |
+| So the locking is absolutely required in order to coordinate clearing |
+| of the bits with updating of the grace-period sequence number in |
+| ``->gp_seq``. |
++-----------------------------------------------------------------------+
+
+Blocked-Task Management
+'''''''''''''''''''''''
+
+``PREEMPT_RCU`` allows tasks to be preempted in the midst of their RCU
+read-side critical sections, and these tasks must be tracked explicitly.
+The details of exactly why and how they are tracked will be covered in a
+separate article on RCU read-side processing. For now, it is enough to
+know that the ``rcu_node`` structure tracks them.
+
+::
+
+ 1 struct list_head blkd_tasks;
+ 2 struct list_head *gp_tasks;
+ 3 struct list_head *exp_tasks;
+ 4 bool wait_blkd_tasks;
+
+The ``->blkd_tasks`` field is a list header for the list of blocked and
+preempted tasks. As tasks undergo context switches within RCU read-side
+critical sections, their ``task_struct`` structures are enqueued (via
+the ``task_struct``'s ``->rcu_node_entry`` field) onto the head of the
+``->blkd_tasks`` list for the leaf ``rcu_node`` structure corresponding
+to the CPU on which the outgoing context switch executed. As these tasks
+later exit their RCU read-side critical sections, they remove themselves
+from the list. This list is therefore in reverse time order, so that if
+one of the tasks is blocking the current grace period, all subsequent
+tasks must also be blocking that same grace period. Therefore, a single
+pointer into this list suffices to track all tasks blocking a given
+grace period. That pointer is stored in ``->gp_tasks`` for normal grace
+periods and in ``->exp_tasks`` for expedited grace periods. These last
+two fields are ``NULL`` if either there is no grace period in flight or
+if there are no blocked tasks preventing that grace period from
+completing. If either of these two pointers is referencing a task that
+removes itself from the ``->blkd_tasks`` list, then that task must
+advance the pointer to the next task on the list, or set the pointer to
+``NULL`` if there are no subsequent tasks on the list.
+
+For example, suppose that tasks T1, T2, and T3 are all hard-affinitied
+to the largest-numbered CPU in the system. Then if task T1 blocked in an
+RCU read-side critical section, then an expedited grace period started,
+then task T2 blocked in an RCU read-side critical section, then a normal
+grace period started, and finally task 3 blocked in an RCU read-side
+critical section, then the state of the last leaf ``rcu_node``
+structure's blocked-task list would be as shown below:
+
+.. kernel-figure:: blkd_task.svg
+
+Task T1 is blocking both grace periods, task T2 is blocking only the
+normal grace period, and task T3 is blocking neither grace period. Note
+that these tasks will not remove themselves from this list immediately
+upon resuming execution. They will instead remain on the list until they
+execute the outermost ``rcu_read_unlock()`` that ends their RCU
+read-side critical section.
+
+The ``->wait_blkd_tasks`` field indicates whether or not the current
+grace period is waiting on a blocked task.
+
+Sizing the ``rcu_node`` Array
+'''''''''''''''''''''''''''''
+
+The ``rcu_node`` array is sized via a series of C-preprocessor
+expressions as follows:
+
+::
+
+ 1 #ifdef CONFIG_RCU_FANOUT
+ 2 #define RCU_FANOUT CONFIG_RCU_FANOUT
+ 3 #else
+ 4 # ifdef CONFIG_64BIT
+ 5 # define RCU_FANOUT 64
+ 6 # else
+ 7 # define RCU_FANOUT 32
+ 8 # endif
+ 9 #endif
+ 10
+ 11 #ifdef CONFIG_RCU_FANOUT_LEAF
+ 12 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+ 13 #else
+ 14 # ifdef CONFIG_64BIT
+ 15 # define RCU_FANOUT_LEAF 64
+ 16 # else
+ 17 # define RCU_FANOUT_LEAF 32
+ 18 # endif
+ 19 #endif
+ 20
+ 21 #define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
+ 22 #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
+ 23 #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
+ 24 #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
+ 25
+ 26 #if NR_CPUS <= RCU_FANOUT_1
+ 27 # define RCU_NUM_LVLS 1
+ 28 # define NUM_RCU_LVL_0 1
+ 29 # define NUM_RCU_NODES NUM_RCU_LVL_0
+ 30 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
+ 31 # define RCU_NODE_NAME_INIT { "rcu_node_0" }
+ 32 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
+ 33 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0" }
+ 34 #elif NR_CPUS <= RCU_FANOUT_2
+ 35 # define RCU_NUM_LVLS 2
+ 36 # define NUM_RCU_LVL_0 1
+ 37 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+ 38 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+ 39 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+ 40 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
+ 41 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+ 42 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" }
+ 43 #elif NR_CPUS <= RCU_FANOUT_3
+ 44 # define RCU_NUM_LVLS 3
+ 45 # define NUM_RCU_LVL_0 1
+ 46 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+ 47 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+ 48 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+ 49 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+ 50 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+ 51 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+ 52 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
+ 53 #elif NR_CPUS <= RCU_FANOUT_4
+ 54 # define RCU_NUM_LVLS 4
+ 55 # define NUM_RCU_LVL_0 1
+ 56 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+ 57 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+ 58 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+ 59 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+ 60 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+ 61 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+ 62 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+ 63 # define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
+ 64 #else
+ 65 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+ 66 #endif
+
+The maximum number of levels in the ``rcu_node`` structure is currently
+limited to four, as specified by lines 21-24 and the structure of the
+subsequent “if” statement. For 32-bit systems, this allows
+16*32*32*32=524,288 CPUs, which should be sufficient for the next few
+years at least. For 64-bit systems, 16*64*64*64=4,194,304 CPUs is
+allowed, which should see us through the next decade or so. This
+four-level tree also allows kernels built with ``CONFIG_RCU_FANOUT=8``
+to support up to 4096 CPUs, which might be useful in very large systems
+having eight CPUs per socket (but please note that no one has yet shown
+any measurable performance degradation due to misaligned socket and
+``rcu_node`` boundaries). In addition, building kernels with a full four
+levels of ``rcu_node`` tree permits better testing of RCU's
+combining-tree code.
+
+The ``RCU_FANOUT`` symbol controls how many children are permitted at
+each non-leaf level of the ``rcu_node`` tree. If the
+``CONFIG_RCU_FANOUT`` Kconfig option is not specified, it is set based
+on the word size of the system, which is also the Kconfig default.
+
+The ``RCU_FANOUT_LEAF`` symbol controls how many CPUs are handled by
+each leaf ``rcu_node`` structure. Experience has shown that allowing a
+given leaf ``rcu_node`` structure to handle 64 CPUs, as permitted by the
+number of bits in the ``->qsmask`` field on a 64-bit system, results in
+excessive contention for the leaf ``rcu_node`` structures' ``->lock``
+fields. The number of CPUs per leaf ``rcu_node`` structure is therefore
+limited to 16 given the default value of ``CONFIG_RCU_FANOUT_LEAF``. If
+``CONFIG_RCU_FANOUT_LEAF`` is unspecified, the value selected is based
+on the word size of the system, just as for ``CONFIG_RCU_FANOUT``.
+Lines 11-19 perform this computation.
+
+Lines 21-24 compute the maximum number of CPUs supported by a
+single-level (which contains a single ``rcu_node`` structure),
+two-level, three-level, and four-level ``rcu_node`` tree, respectively,
+given the fanout specified by ``RCU_FANOUT`` and ``RCU_FANOUT_LEAF``.
+These numbers of CPUs are retained in the ``RCU_FANOUT_1``,
+``RCU_FANOUT_2``, ``RCU_FANOUT_3``, and ``RCU_FANOUT_4`` C-preprocessor
+variables, respectively.
+
+These variables are used to control the C-preprocessor ``#if`` statement
+spanning lines 26-66 that computes the number of ``rcu_node`` structures
+required for each level of the tree, as well as the number of levels
+required. The number of levels is placed in the ``NUM_RCU_LVLS``
+C-preprocessor variable by lines 27, 35, 44, and 54. The number of
+``rcu_node`` structures for the topmost level of the tree is always
+exactly one, and this value is unconditionally placed into
+``NUM_RCU_LVL_0`` by lines 28, 36, 45, and 55. The rest of the levels
+(if any) of the ``rcu_node`` tree are computed by dividing the maximum
+number of CPUs by the fanout supported by the number of levels from the
+current level down, rounding up. This computation is performed by
+lines 37, 46-47, and 56-58. Lines 31-33, 40-42, 50-52, and 62-63 create
+initializers for lockdep lock-class names. Finally, lines 64-66 produce
+an error if the maximum number of CPUs is too large for the specified
+fanout.
+
+The ``rcu_segcblist`` Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rcu_segcblist`` structure maintains a segmented list of callbacks
+as follows:
+
+::
+
+ 1 #define RCU_DONE_TAIL 0
+ 2 #define RCU_WAIT_TAIL 1
+ 3 #define RCU_NEXT_READY_TAIL 2
+ 4 #define RCU_NEXT_TAIL 3
+ 5 #define RCU_CBLIST_NSEGS 4
+ 6
+ 7 struct rcu_segcblist {
+ 8 struct rcu_head *head;
+ 9 struct rcu_head **tails[RCU_CBLIST_NSEGS];
+ 10 unsigned long gp_seq[RCU_CBLIST_NSEGS];
+ 11 long len;
+ 12 long len_lazy;
+ 13 };
+
+The segments are as follows:
+
+#. ``RCU_DONE_TAIL``: Callbacks whose grace periods have elapsed. These
+ callbacks are ready to be invoked.
+#. ``RCU_WAIT_TAIL``: Callbacks that are waiting for the current grace
+ period. Note that different CPUs can have different ideas about which
+ grace period is current, hence the ``->gp_seq`` field.
+#. ``RCU_NEXT_READY_TAIL``: Callbacks waiting for the next grace period
+ to start.
+#. ``RCU_NEXT_TAIL``: Callbacks that have not yet been associated with a
+ grace period.
+
+The ``->head`` pointer references the first callback or is ``NULL`` if
+the list contains no callbacks (which is *not* the same as being empty).
+Each element of the ``->tails[]`` array references the ``->next``
+pointer of the last callback in the corresponding segment of the list,
+or the list's ``->head`` pointer if that segment and all previous
+segments are empty. If the corresponding segment is empty but some
+previous segment is not empty, then the array element is identical to
+its predecessor. Older callbacks are closer to the head of the list, and
+new callbacks are added at the tail. This relationship between the
+``->head`` pointer, the ``->tails[]`` array, and the callbacks is shown
+in this diagram:
+
+.. kernel-figure:: nxtlist.svg
+
+In this figure, the ``->head`` pointer references the first RCU callback
+in the list. The ``->tails[RCU_DONE_TAIL]`` array element references the
+``->head`` pointer itself, indicating that none of the callbacks is
+ready to invoke. The ``->tails[RCU_WAIT_TAIL]`` array element references
+callback CB 2's ``->next`` pointer, which indicates that CB 1 and CB 2
+are both waiting on the current grace period, give or take possible
+disagreements about exactly which grace period is the current one. The
+``->tails[RCU_NEXT_READY_TAIL]`` array element references the same RCU
+callback that ``->tails[RCU_WAIT_TAIL]`` does, which indicates that
+there are no callbacks waiting on the next RCU grace period. The
+``->tails[RCU_NEXT_TAIL]`` array element references CB 4's ``->next``
+pointer, indicating that all the remaining RCU callbacks have not yet
+been assigned to an RCU grace period. Note that the
+``->tails[RCU_NEXT_TAIL]`` array element always references the last RCU
+callback's ``->next`` pointer unless the callback list is empty, in
+which case it references the ``->head`` pointer.
+
+There is one additional important special case for the
+``->tails[RCU_NEXT_TAIL]`` array element: It can be ``NULL`` when this
+list is *disabled*. Lists are disabled when the corresponding CPU is
+offline or when the corresponding CPU's callbacks are offloaded to a
+kthread, both of which are described elsewhere.
+
+CPUs advance their callbacks from the ``RCU_NEXT_TAIL`` to the
+``RCU_NEXT_READY_TAIL`` to the ``RCU_WAIT_TAIL`` to the
+``RCU_DONE_TAIL`` list segments as grace periods advance.
+
+The ``->gp_seq[]`` array records grace-period numbers corresponding to
+the list segments. This is what allows different CPUs to have different
+ideas as to which is the current grace period while still avoiding
+premature invocation of their callbacks. In particular, this allows CPUs
+that go idle for extended periods to determine which of their callbacks
+are ready to be invoked after reawakening.
+
+The ``->len`` counter contains the number of callbacks in ``->head``,
+and the ``->len_lazy`` contains the number of those callbacks that are
+known to only free memory, and whose invocation can therefore be safely
+deferred.
+
+.. important::
+
+ It is the ``->len`` field that determines whether or
+ not there are callbacks associated with this ``rcu_segcblist``
+ structure, *not* the ``->head`` pointer. The reason for this is that all
+ the ready-to-invoke callbacks (that is, those in the ``RCU_DONE_TAIL``
+ segment) are extracted all at once at callback-invocation time
+ (``rcu_do_batch``), due to which ``->head`` may be set to NULL if there
+ are no not-done callbacks remaining in the ``rcu_segcblist``. If
+ callback invocation must be postponed, for example, because a
+ high-priority process just woke up on this CPU, then the remaining
+ callbacks are placed back on the ``RCU_DONE_TAIL`` segment and
+ ``->head`` once again points to the start of the segment. In short, the
+ head field can briefly be ``NULL`` even though the CPU has callbacks
+ present the entire time. Therefore, it is not appropriate to test the
+ ``->head`` pointer for ``NULL``.
+
+In contrast, the ``->len`` and ``->len_lazy`` counts are adjusted only
+after the corresponding callbacks have been invoked. This means that the
+``->len`` count is zero only if the ``rcu_segcblist`` structure really
+is devoid of callbacks. Of course, off-CPU sampling of the ``->len``
+count requires careful use of appropriate synchronization, for example,
+memory barriers. This synchronization can be a bit subtle, particularly
+in the case of ``rcu_barrier()``.
+
+The ``rcu_data`` Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rcu_data`` maintains the per-CPU state for the RCU subsystem. The
+fields in this structure may be accessed only from the corresponding CPU
+(and from tracing) unless otherwise stated. This structure is the focus
+of quiescent-state detection and RCU callback queuing. It also tracks
+its relationship to the corresponding leaf ``rcu_node`` structure to
+allow more-efficient propagation of quiescent states up the ``rcu_node``
+combining tree. Like the ``rcu_node`` structure, it provides a local
+copy of the grace-period information to allow for-free synchronized
+access to this information from the corresponding CPU. Finally, this
+structure records past dyntick-idle state for the corresponding CPU and
+also tracks statistics.
+
+The ``rcu_data`` structure's fields are discussed, singly and in groups,
+in the following sections.
+
+Connection to Other Data Structures
+'''''''''''''''''''''''''''''''''''
+
+This portion of the ``rcu_data`` structure is declared as follows:
+
+::
+
+ 1 int cpu;
+ 2 struct rcu_node *mynode;
+ 3 unsigned long grpmask;
+ 4 bool beenonline;
+
+The ``->cpu`` field contains the number of the corresponding CPU and the
+``->mynode`` field references the corresponding ``rcu_node`` structure.
+The ``->mynode`` is used to propagate quiescent states up the combining
+tree. These two fields are constant and therefore do not require
+synchronization.
+
+The ``->grpmask`` field indicates the bit in the ``->mynode->qsmask``
+corresponding to this ``rcu_data`` structure, and is also used when
+propagating quiescent states. The ``->beenonline`` flag is set whenever
+the corresponding CPU comes online, which means that the debugfs tracing
+need not dump out any ``rcu_data`` structure for which this flag is not
+set.
+
+Quiescent-State and Grace-Period Tracking
+'''''''''''''''''''''''''''''''''''''''''
+
+This portion of the ``rcu_data`` structure is declared as follows:
+
+::
+
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
+ 3 bool cpu_no_qs;
+ 4 bool core_needs_qs;
+ 5 bool gpwrap;
+
+The ``->gp_seq`` field is the counterpart of the field of the same name
+in the ``rcu_state`` and ``rcu_node`` structures. The
+``->gp_seq_needed`` field is the counterpart of the field of the same
+name in the rcu_node structure. They may each lag up to one behind their
+``rcu_node`` counterparts, but in ``CONFIG_NO_HZ_IDLE`` and
+``CONFIG_NO_HZ_FULL`` kernels can lag arbitrarily far behind for CPUs in
+dyntick-idle mode (but these counters will catch up upon exit from
+dyntick-idle mode). If the lower two bits of a given ``rcu_data``
+structure's ``->gp_seq`` are zero, then this ``rcu_data`` structure
+believes that RCU is idle.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| All this replication of the grace period numbers can only cause |
+| massive confusion. Why not just keep a global sequence number and be |
+| done with it??? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Because if there was only a single global sequence numbers, there |
+| would need to be a single global lock to allow safely accessing and |
+| updating it. And if we are not going to have a single global lock, we |
+| need to carefully manage the numbers on a per-node basis. Recall from |
+| the answer to a previous Quick Quiz that the consequences of applying |
+| a previously sampled quiescent state to the wrong grace period are |
+| quite severe. |
++-----------------------------------------------------------------------+
+
+The ``->cpu_no_qs`` flag indicates that the CPU has not yet passed
+through a quiescent state, while the ``->core_needs_qs`` flag indicates
+that the RCU core needs a quiescent state from the corresponding CPU.
+The ``->gpwrap`` field indicates that the corresponding CPU has remained
+idle for so long that the ``gp_seq`` counter is in danger of overflow,
+which will cause the CPU to disregard the values of its counters on its
+next exit from idle.
+
+RCU Callback Handling
+'''''''''''''''''''''
+
+In the absence of CPU-hotplug events, RCU callbacks are invoked by the
+same CPU that registered them. This is strictly a cache-locality
+optimization: callbacks can and do get invoked on CPUs other than the
+one that registered them. After all, if the CPU that registered a given
+callback has gone offline before the callback can be invoked, there
+really is no other choice.
+
+This portion of the ``rcu_data`` structure is declared as follows:
+
+::
+
+ 1 struct rcu_segcblist cblist;
+ 2 long qlen_last_fqs_check;
+ 3 unsigned long n_cbs_invoked;
+ 4 unsigned long n_nocbs_invoked;
+ 5 unsigned long n_cbs_orphaned;
+ 6 unsigned long n_cbs_adopted;
+ 7 unsigned long n_force_qs_snap;
+ 8 long blimit;
+
+The ``->cblist`` structure is the segmented callback list described
+earlier. The CPU advances the callbacks in its ``rcu_data`` structure
+whenever it notices that another RCU grace period has completed. The CPU
+detects the completion of an RCU grace period by noticing that the value
+of its ``rcu_data`` structure's ``->gp_seq`` field differs from that of
+its leaf ``rcu_node`` structure. Recall that each ``rcu_node``
+structure's ``->gp_seq`` field is updated at the beginnings and ends of
+each grace period.
+
+The ``->qlen_last_fqs_check`` and ``->n_force_qs_snap`` coordinate the
+forcing of quiescent states from ``call_rcu()`` and friends when
+callback lists grow excessively long.
+
+The ``->n_cbs_invoked``, ``->n_cbs_orphaned``, and ``->n_cbs_adopted``
+fields count the number of callbacks invoked, sent to other CPUs when
+this CPU goes offline, and received from other CPUs when those other
+CPUs go offline. The ``->n_nocbs_invoked`` is used when the CPU's
+callbacks are offloaded to a kthread.
+
+Finally, the ``->blimit`` counter is the maximum number of RCU callbacks
+that may be invoked at a given time.
+
+Dyntick-Idle Handling
+'''''''''''''''''''''
+
+This portion of the ``rcu_data`` structure is declared as follows:
+
+::
+
+ 1 int dynticks_snap;
+ 2 unsigned long dynticks_fqs;
+
+The ``->dynticks_snap`` field is used to take a snapshot of the
+corresponding CPU's dyntick-idle state when forcing quiescent states,
+and is therefore accessed from other CPUs. Finally, the
+``->dynticks_fqs`` field is used to count the number of times this CPU
+is determined to be in dyntick-idle state, and is used for tracing and
+debugging purposes.
+
+This portion of the rcu_data structure is declared as follows:
+
+::
+
+ 1 long dynticks_nesting;
+ 2 long dynticks_nmi_nesting;
+ 3 atomic_t dynticks;
+ 4 bool rcu_need_heavy_qs;
+ 5 bool rcu_urgent_qs;
+
+These fields in the rcu_data structure maintain the per-CPU dyntick-idle
+state for the corresponding CPU. The fields may be accessed only from
+the corresponding CPU (and from tracing) unless otherwise stated.
+
+The ``->dynticks_nesting`` field counts the nesting depth of process
+execution, so that in normal circumstances this counter has value zero
+or one. NMIs, irqs, and tracers are counted by the
+``->dynticks_nmi_nesting`` field. Because NMIs cannot be masked, changes
+to this variable have to be undertaken carefully using an algorithm
+provided by Andy Lutomirski. The initial transition from idle adds one,
+and nested transitions add two, so that a nesting level of five is
+represented by a ``->dynticks_nmi_nesting`` value of nine. This counter
+can therefore be thought of as counting the number of reasons why this
+CPU cannot be permitted to enter dyntick-idle mode, aside from
+process-level transitions.
+
+However, it turns out that when running in non-idle kernel context, the
+Linux kernel is fully capable of entering interrupt handlers that never
+exit and perhaps also vice versa. Therefore, whenever the
+``->dynticks_nesting`` field is incremented up from zero, the
+``->dynticks_nmi_nesting`` field is set to a large positive number, and
+whenever the ``->dynticks_nesting`` field is decremented down to zero,
+the the ``->dynticks_nmi_nesting`` field is set to zero. Assuming that
+the number of misnested interrupts is not sufficient to overflow the
+counter, this approach corrects the ``->dynticks_nmi_nesting`` field
+every time the corresponding CPU enters the idle loop from process
+context.
+
+The ``->dynticks`` field counts the corresponding CPU's transitions to
+and from either dyntick-idle or user mode, so that this counter has an
+even value when the CPU is in dyntick-idle mode or user mode and an odd
+value otherwise. The transitions to/from user mode need to be counted
+for user mode adaptive-ticks support (see timers/NO_HZ.txt).
+
+The ``->rcu_need_heavy_qs`` field is used to record the fact that the
+RCU core code would really like to see a quiescent state from the
+corresponding CPU, so much so that it is willing to call for
+heavy-weight dyntick-counter operations. This flag is checked by RCU's
+context-switch and ``cond_resched()`` code, which provide a momentary
+idle sojourn in response.
+
+Finally, the ``->rcu_urgent_qs`` field is used to record the fact that
+the RCU core code would really like to see a quiescent state from the
+corresponding CPU, with the various other fields indicating just how
+badly RCU wants this quiescent state. This flag is checked by RCU's
+context-switch path (``rcu_note_context_switch``) and the cond_resched
+code.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why not simply combine the ``->dynticks_nesting`` and |
+| ``->dynticks_nmi_nesting`` counters into a single counter that just |
+| counts the number of reasons that the corresponding CPU is non-idle? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Because this would fail in the presence of interrupts whose handlers |
+| never return and of handlers that manage to return from a made-up |
+| interrupt. |
++-----------------------------------------------------------------------+
+
+Additional fields are present for some special-purpose builds, and are
+discussed separately.
+
+The ``rcu_head`` Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each ``rcu_head`` structure represents an RCU callback. These structures
+are normally embedded within RCU-protected data structures whose
+algorithms use asynchronous grace periods. In contrast, when using
+algorithms that block waiting for RCU grace periods, RCU users need not
+provide ``rcu_head`` structures.
+
+The ``rcu_head`` structure has fields as follows:
+
+::
+
+ 1 struct rcu_head *next;
+ 2 void (*func)(struct rcu_head *head);
+
+The ``->next`` field is used to link the ``rcu_head`` structures
+together in the lists within the ``rcu_data`` structures. The ``->func``
+field is a pointer to the function to be called when the callback is
+ready to be invoked, and this function is passed a pointer to the
+``rcu_head`` structure. However, ``kfree_rcu()`` uses the ``->func``
+field to record the offset of the ``rcu_head`` structure within the
+enclosing RCU-protected data structure.
+
+Both of these fields are used internally by RCU. From the viewpoint of
+RCU users, this structure is an opaque “cookie”.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Given that the callback function ``->func`` is passed a pointer to |
+| the ``rcu_head`` structure, how is that function supposed to find the |
+| beginning of the enclosing RCU-protected data structure? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| In actual practice, there is a separate callback function per type of |
+| RCU-protected data structure. The callback function can therefore use |
+| the ``container_of()`` macro in the Linux kernel (or other |
+| pointer-manipulation facilities in other software environments) to |
+| find the beginning of the enclosing structure. |
++-----------------------------------------------------------------------+
+
+RCU-Specific Fields in the ``task_struct`` Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``CONFIG_PREEMPT_RCU`` implementation uses some additional fields in
+the ``task_struct`` structure:
+
+::
+
+ 1 #ifdef CONFIG_PREEMPT_RCU
+ 2 int rcu_read_lock_nesting;
+ 3 union rcu_special rcu_read_unlock_special;
+ 4 struct list_head rcu_node_entry;
+ 5 struct rcu_node *rcu_blocked_node;
+ 6 #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ 7 #ifdef CONFIG_TASKS_RCU
+ 8 unsigned long rcu_tasks_nvcsw;
+ 9 bool rcu_tasks_holdout;
+ 10 struct list_head rcu_tasks_holdout_list;
+ 11 int rcu_tasks_idle_cpu;
+ 12 #endif /* #ifdef CONFIG_TASKS_RCU */
+
+The ``->rcu_read_lock_nesting`` field records the nesting level for RCU
+read-side critical sections, and the ``->rcu_read_unlock_special`` field
+is a bitmask that records special conditions that require
+``rcu_read_unlock()`` to do additional work. The ``->rcu_node_entry``
+field is used to form lists of tasks that have blocked within
+preemptible-RCU read-side critical sections and the
+``->rcu_blocked_node`` field references the ``rcu_node`` structure whose
+list this task is a member of, or ``NULL`` if it is not blocked within a
+preemptible-RCU read-side critical section.
+
+The ``->rcu_tasks_nvcsw`` field tracks the number of voluntary context
+switches that this task had undergone at the beginning of the current
+tasks-RCU grace period, ``->rcu_tasks_holdout`` is set if the current
+tasks-RCU grace period is waiting on this task,
+``->rcu_tasks_holdout_list`` is a list element enqueuing this task on
+the holdout list, and ``->rcu_tasks_idle_cpu`` tracks which CPU this
+idle task is running, but only if the task is currently running, that
+is, if the CPU is currently idle.
+
+Accessor Functions
+~~~~~~~~~~~~~~~~~~
+
+The following listing shows the ``rcu_get_root()``,
+``rcu_for_each_node_breadth_first`` and ``rcu_for_each_leaf_node()``
+function and macros:
+
+::
+
+ 1 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+ 2 {
+ 3 return &rsp->node[0];
+ 4 }
+ 5
+ 6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
+ 7 for ((rnp) = &(rsp)->node[0]; \
+ 8 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+ 9
+ 10 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 11 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
+ 12 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+
+The ``rcu_get_root()`` simply returns a pointer to the first element of
+the specified ``rcu_state`` structure's ``->node[]`` array, which is the
+root ``rcu_node`` structure.
+
+As noted earlier, the ``rcu_for_each_node_breadth_first()`` macro takes
+advantage of the layout of the ``rcu_node`` structures in the
+``rcu_state`` structure's ``->node[]`` array, performing a breadth-first
+traversal by simply traversing the array in order. Similarly, the
+``rcu_for_each_leaf_node()`` macro traverses only the last part of the
+array, thus traversing only the leaf ``rcu_node`` structures.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| What does ``rcu_for_each_leaf_node()`` do if the ``rcu_node`` tree |
+| contains only a single node? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| In the single-node case, ``rcu_for_each_leaf_node()`` traverses the |
+| single node. |
++-----------------------------------------------------------------------+
+
+Summary
+~~~~~~~
+
+So the state of RCU is represented by an ``rcu_state`` structure, which
+contains a combining tree of ``rcu_node`` and ``rcu_data`` structures.
+Finally, in ``CONFIG_NO_HZ_IDLE`` kernels, each CPU's dyntick-idle state
+is tracked by dynticks-related fields in the ``rcu_data`` structure. If
+you made it this far, you are well prepared to read the code
+walkthroughs in the other articles in this series.
+
+Acknowledgments
+~~~~~~~~~~~~~~~
+
+I owe thanks to Cyrill Gorcunov, Mathieu Desnoyers, Dhaval Giani, Paul
+Turner, Abhishek Srivastava, Matt Kowalczyk, and Serge Hallyn for
+helping me get this document into a more human-readable state.
+
+Legal Statement
+~~~~~~~~~~~~~~~
+
+This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+Linux is a registered trademark of Linus Torvalds.
+
+Other company, product, and service names may be trademarks or service
+marks of others.
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
deleted file mode 100644
index 57300db4b5ff..000000000000
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ /dev/null
@@ -1,668 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head><title>A Tour Through TREE_RCU's Expedited Grace Periods</title>
- <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
-
-<h2>Introduction</h2>
-
-This document describes RCU's expedited grace periods.
-Unlike RCU's normal grace periods, which accept long latencies to attain
-high efficiency and minimal disturbance, expedited grace periods accept
-lower efficiency and significant disturbance to attain shorter latencies.
-
-<p>
-There are two flavors of RCU (RCU-preempt and RCU-sched), with an earlier
-third RCU-bh flavor having been implemented in terms of the other two.
-Each of the two implementations is covered in its own section.
-
-<ol>
-<li> <a href="#Expedited Grace Period Design">
- Expedited Grace Period Design</a>
-<li> <a href="#RCU-preempt Expedited Grace Periods">
- RCU-preempt Expedited Grace Periods</a>
-<li> <a href="#RCU-sched Expedited Grace Periods">
- RCU-sched Expedited Grace Periods</a>
-<li> <a href="#Expedited Grace Period and CPU Hotplug">
- Expedited Grace Period and CPU Hotplug</a>
-<li> <a href="#Expedited Grace Period Refinements">
- Expedited Grace Period Refinements</a>
-</ol>
-
-<h2><a name="Expedited Grace Period Design">
-Expedited Grace Period Design</a></h2>
-
-<p>
-The expedited RCU grace periods cannot be accused of being subtle,
-given that they for all intents and purposes hammer every CPU that
-has not yet provided a quiescent state for the current expedited
-grace period.
-The one saving grace is that the hammer has grown a bit smaller
-over time: The old call to <tt>try_stop_cpus()</tt> has been
-replaced with a set of calls to <tt>smp_call_function_single()</tt>,
-each of which results in an IPI to the target CPU.
-The corresponding handler function checks the CPU's state, motivating
-a faster quiescent state where possible, and triggering a report
-of that quiescent state.
-As always for RCU, once everything has spent some time in a quiescent
-state, the expedited grace period has completed.
-
-<p>
-The details of the <tt>smp_call_function_single()</tt> handler's
-operation depend on the RCU flavor, as described in the following
-sections.
-
-<h2><a name="RCU-preempt Expedited Grace Periods">
-RCU-preempt Expedited Grace Periods</a></h2>
-
-<p>
-<tt>CONFIG_PREEMPT=y</tt> kernels implement RCU-preempt.
-The overall flow of the handling of a given CPU by an RCU-preempt
-expedited grace period is shown in the following diagram:
-
-<p><img src="ExpRCUFlow.svg" alt="ExpRCUFlow.svg" width="55%">
-
-<p>
-The solid arrows denote direct action, for example, a function call.
-The dotted arrows denote indirect action, for example, an IPI
-or a state that is reached after some time.
-
-<p>
-If a given CPU is offline or idle, <tt>synchronize_rcu_expedited()</tt>
-will ignore it because idle and offline CPUs are already residing
-in quiescent states.
-Otherwise, the expedited grace period will use
-<tt>smp_call_function_single()</tt> to send the CPU an IPI, which
-is handled by <tt>rcu_exp_handler()</tt>.
-
-<p>
-However, because this is preemptible RCU, <tt>rcu_exp_handler()</tt>
-can check to see if the CPU is currently running in an RCU read-side
-critical section.
-If not, the handler can immediately report a quiescent state.
-Otherwise, it sets flags so that the outermost <tt>rcu_read_unlock()</tt>
-invocation will provide the needed quiescent-state report.
-This flag-setting avoids the previous forced preemption of all
-CPUs that might have RCU read-side critical sections.
-In addition, this flag-setting is done so as to avoid increasing
-the overhead of the common-case fastpath through the scheduler.
-
-<p>
-Again because this is preemptible RCU, an RCU read-side critical section
-can be preempted.
-When that happens, RCU will enqueue the task, which will the continue to
-block the current expedited grace period until it resumes and finds its
-outermost <tt>rcu_read_unlock()</tt>.
-The CPU will report a quiescent state just after enqueuing the task because
-the CPU is no longer blocking the grace period.
-It is instead the preempted task doing the blocking.
-The list of blocked tasks is managed by <tt>rcu_preempt_ctxt_queue()</tt>,
-which is called from <tt>rcu_preempt_note_context_switch()</tt>, which
-in turn is called from <tt>rcu_note_context_switch()</tt>, which in
-turn is called from the scheduler.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why not just have the expedited grace period check the
- state of all the CPUs?
- After all, that would avoid all those real-time-unfriendly IPIs.
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because we want the RCU read-side critical sections to run fast,
- which means no memory barriers.
- Therefore, it is not possible to safely check the state from some
- other CPU.
- And even if it was possible to safely check the state, it would
- still be necessary to IPI the CPU to safely interact with the
- upcoming <tt>rcu_read_unlock()</tt> invocation, which means that
- the remote state testing would not help the worst-case
- latency that real-time applications care about.
-
- <p><font color="ffffff">One way to prevent your real-time
- application from getting hit with these IPIs is to
- build your kernel with <tt>CONFIG_NO_HZ_FULL=y</tt>.
- RCU would then perceive the CPU running your application
- as being idle, and it would be able to safely detect that
- state without needing to IPI the CPU.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-Please note that this is just the overall flow:
-Additional complications can arise due to races with CPUs going idle
-or offline, among other things.
-
-<h2><a name="RCU-sched Expedited Grace Periods">
-RCU-sched Expedited Grace Periods</a></h2>
-
-<p>
-<tt>CONFIG_PREEMPT=n</tt> kernels implement RCU-sched.
-The overall flow of the handling of a given CPU by an RCU-sched
-expedited grace period is shown in the following diagram:
-
-<p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%">
-
-<p>
-As with RCU-preempt, RCU-sched's
-<tt>synchronize_rcu_expedited()</tt> ignores offline and
-idle CPUs, again because they are in remotely detectable
-quiescent states.
-However, because the
-<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
-leave no trace of their invocation, in general it is not possible to tell
-whether or not the current CPU is in an RCU read-side critical section.
-The best that RCU-sched's <tt>rcu_exp_handler()</tt> can do is to check
-for idle, on the off-chance that the CPU went idle while the IPI
-was in flight.
-If the CPU is idle, then <tt>rcu_exp_handler()</tt> reports
-the quiescent state.
-
-<p> Otherwise, the handler forces a future context switch by setting the
-NEED_RESCHED flag of the current task's thread flag and the CPU preempt
-counter.
-At the time of the context switch, the CPU reports the quiescent state.
-Should the CPU go offline first, it will report the quiescent state
-at that time.
-
-<h2><a name="Expedited Grace Period and CPU Hotplug">
-Expedited Grace Period and CPU Hotplug</a></h2>
-
-<p>
-The expedited nature of expedited grace periods require a much tighter
-interaction with CPU hotplug operations than is required for normal
-grace periods.
-In addition, attempting to IPI offline CPUs will result in splats, but
-failing to IPI online CPUs can result in too-short grace periods.
-Neither option is acceptable in production kernels.
-
-<p>
-The interaction between expedited grace periods and CPU hotplug operations
-is carried out at several levels:
-
-<ol>
-<li> The number of CPUs that have ever been online is tracked
- by the <tt>rcu_state</tt> structure's <tt>-&gt;ncpus</tt>
- field.
- The <tt>rcu_state</tt> structure's <tt>-&gt;ncpus_snap</tt>
- field tracks the number of CPUs that have ever been online
- at the beginning of an RCU expedited grace period.
- Note that this number never decreases, at least in the absence
- of a time machine.
-<li> The identities of the CPUs that have ever been online is
- tracked by the <tt>rcu_node</tt> structure's
- <tt>-&gt;expmaskinitnext</tt> field.
- The <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinit</tt>
- field tracks the identities of the CPUs that were online
- at least once at the beginning of the most recent RCU
- expedited grace period.
- The <tt>rcu_state</tt> structure's <tt>-&gt;ncpus</tt> and
- <tt>-&gt;ncpus_snap</tt> fields are used to detect when
- new CPUs have come online for the first time, that is,
- when the <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinitnext</tt>
- field has changed since the beginning of the last RCU
- expedited grace period, which triggers an update of each
- <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinit</tt>
- field from its <tt>-&gt;expmaskinitnext</tt> field.
-<li> Each <tt>rcu_node</tt> structure's <tt>-&gt;expmaskinit</tt>
- field is used to initialize that structure's
- <tt>-&gt;expmask</tt> at the beginning of each RCU
- expedited grace period.
- This means that only those CPUs that have been online at least
- once will be considered for a given grace period.
-<li> Any CPU that goes offline will clear its bit in its leaf
- <tt>rcu_node</tt> structure's <tt>-&gt;qsmaskinitnext</tt>
- field, so any CPU with that bit clear can safely be ignored.
- However, it is possible for a CPU coming online or going offline
- to have this bit set for some time while <tt>cpu_online</tt>
- returns <tt>false</tt>.
-<li> For each non-idle CPU that RCU believes is currently online, the grace
- period invokes <tt>smp_call_function_single()</tt>.
- If this succeeds, the CPU was fully online.
- Failure indicates that the CPU is in the process of coming online
- or going offline, in which case it is necessary to wait for a
- short time period and try again.
- The purpose of this wait (or series of waits, as the case may be)
- is to permit a concurrent CPU-hotplug operation to complete.
-<li> In the case of RCU-sched, one of the last acts of an outgoing CPU
- is to invoke <tt>rcu_report_dead()</tt>, which
- reports a quiescent state for that CPU.
- However, this is likely paranoia-induced redundancy. <!-- @@@ -->
-</ol>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why all the dancing around with multiple counters and masks
- tracking CPUs that were once online?
- Why not just have a single set of masks tracking the currently
- online CPUs and be done with it?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Maintaining single set of masks tracking the online CPUs <i>sounds</i>
- easier, at least until you try working out all the race conditions
- between grace-period initialization and CPU-hotplug operations.
- For example, suppose initialization is progressing down the
- tree while a CPU-offline operation is progressing up the tree.
- This situation can result in bits set at the top of the tree
- that have no counterparts at the bottom of the tree.
- Those bits will never be cleared, which will result in
- grace-period hangs.
- In short, that way lies madness, to say nothing of a great many
- bugs, hangs, and deadlocks.
-
- <p><font color="ffffff">
- In contrast, the current multi-mask multi-counter scheme ensures
- that grace-period initialization will always see consistent masks
- up and down the tree, which brings significant simplifications
- over the single-mask method.
-
- <p><font color="ffffff">
- This is an instance of
- <a href="http://www.cs.columbia.edu/~library/TR-repository/reports/reports-1992/cucs-039-92.ps.gz"><font color="ffffff">
- deferring work in order to avoid synchronization</a>.
- Lazily recording CPU-hotplug events at the beginning of the next
- grace period greatly simplifies maintenance of the CPU-tracking
- bitmasks in the <tt>rcu_node</tt> tree.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h2><a name="Expedited Grace Period Refinements">
-Expedited Grace Period Refinements</a></h2>
-
-<ol>
-<li> <a href="#Idle-CPU Checks">Idle-CPU checks</a>.
-<li> <a href="#Batching via Sequence Counter">
- Batching via sequence counter</a>.
-<li> <a href="#Funnel Locking and Wait/Wakeup">
- Funnel locking and wait/wakeup</a>.
-<li> <a href="#Use of Workqueues">Use of Workqueues</a>.
-<li> <a href="#Stall Warnings">Stall warnings</a>.
-<li> <a href="#Mid-Boot Operation">Mid-boot operation</a>.
-</ol>
-
-<h3><a name="Idle-CPU Checks">Idle-CPU Checks</a></h3>
-
-<p>
-Each expedited grace period checks for idle CPUs when initially forming
-the mask of CPUs to be IPIed and again just before IPIing a CPU
-(both checks are carried out by <tt>sync_rcu_exp_select_cpus()</tt>).
-If the CPU is idle at any time between those two times, the CPU will
-not be IPIed.
-Instead, the task pushing the grace period forward will include the
-idle CPUs in the mask passed to <tt>rcu_report_exp_cpu_mult()</tt>.
-
-<p>
-For RCU-sched, there is an additional check:
-If the IPI has interrupted the idle loop, then
-<tt>rcu_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
-to report the corresponding quiescent state.
-
-<p>
-For RCU-preempt, there is no specific check for idle in the
-IPI handler (<tt>rcu_exp_handler()</tt>), but because
-RCU read-side critical sections are not permitted within the
-idle loop, if <tt>rcu_exp_handler()</tt> sees that the CPU is within
-RCU read-side critical section, the CPU cannot possibly be idle.
-Otherwise, <tt>rcu_exp_handler()</tt> invokes
-<tt>rcu_report_exp_rdp()</tt> to report the corresponding quiescent
-state, regardless of whether or not that quiescent state was due to
-the CPU being idle.
-
-<p>
-In summary, RCU expedited grace periods check for idle when building
-the bitmask of CPUs that must be IPIed, just before sending each IPI,
-and (either explicitly or implicitly) within the IPI handler.
-
-<h3><a name="Batching via Sequence Counter">
-Batching via Sequence Counter</a></h3>
-
-<p>
-If each grace-period request was carried out separately, expedited
-grace periods would have abysmal scalability and
-problematic high-load characteristics.
-Because each grace-period operation can serve an unlimited number of
-updates, it is important to <i>batch</i> requests, so that a single
-expedited grace-period operation will cover all requests in the
-corresponding batch.
-
-<p>
-This batching is controlled by a sequence counter named
-<tt>-&gt;expedited_sequence</tt> in the <tt>rcu_state</tt> structure.
-This counter has an odd value when there is an expedited grace period
-in progress and an even value otherwise, so that dividing the counter
-value by two gives the number of completed grace periods.
-During any given update request, the counter must transition from
-even to odd and then back to even, thus indicating that a grace
-period has elapsed.
-Therefore, if the initial value of the counter is <tt>s</tt>,
-the updater must wait until the counter reaches at least the
-value <tt>(s+3)&amp;~0x1</tt>.
-This counter is managed by the following access functions:
-
-<ol>
-<li> <tt>rcu_exp_gp_seq_start()</tt>, which marks the start of
- an expedited grace period.
-<li> <tt>rcu_exp_gp_seq_end()</tt>, which marks the end of an
- expedited grace period.
-<li> <tt>rcu_exp_gp_seq_snap()</tt>, which obtains a snapshot of
- the counter.
-<li> <tt>rcu_exp_gp_seq_done()</tt>, which returns <tt>true</tt>
- if a full expedited grace period has elapsed since the
- corresponding call to <tt>rcu_exp_gp_seq_snap()</tt>.
-</ol>
-
-<p>
-Again, only one request in a given batch need actually carry out
-a grace-period operation, which means there must be an efficient
-way to identify which of many concurrent reqeusts will initiate
-the grace period, and that there be an efficient way for the
-remaining requests to wait for that grace period to complete.
-However, that is the topic of the next section.
-
-<h3><a name="Funnel Locking and Wait/Wakeup">
-Funnel Locking and Wait/Wakeup</a></h3>
-
-<p>
-The natural way to sort out which of a batch of updaters will initiate
-the expedited grace period is to use the <tt>rcu_node</tt> combining
-tree, as implemented by the <tt>exp_funnel_lock()</tt> function.
-The first updater corresponding to a given grace period arriving
-at a given <tt>rcu_node</tt> structure records its desired grace-period
-sequence number in the <tt>-&gt;exp_seq_rq</tt> field and moves up
-to the next level in the tree.
-Otherwise, if the <tt>-&gt;exp_seq_rq</tt> field already contains
-the sequence number for the desired grace period or some later one,
-the updater blocks on one of four wait queues in the
-<tt>-&gt;exp_wq[]</tt> array, using the second-from-bottom
-and third-from bottom bits as an index.
-An <tt>-&gt;exp_lock</tt> field in the <tt>rcu_node</tt> structure
-synchronizes access to these fields.
-
-<p>
-An empty <tt>rcu_node</tt> tree is shown in the following diagram,
-with the white cells representing the <tt>-&gt;exp_seq_rq</tt> field
-and the red cells representing the elements of the
-<tt>-&gt;exp_wq[]</tt> array.
-
-<p><img src="Funnel0.svg" alt="Funnel0.svg" width="75%">
-
-<p>
-The next diagram shows the situation after the arrival of Task&nbsp;A
-and Task&nbsp;B at the leftmost and rightmost leaf <tt>rcu_node</tt>
-structures, respectively.
-The current value of the <tt>rcu_state</tt> structure's
-<tt>-&gt;expedited_sequence</tt> field is zero, so adding three and
-clearing the bottom bit results in the value two, which both tasks
-record in the <tt>-&gt;exp_seq_rq</tt> field of their respective
-<tt>rcu_node</tt> structures:
-
-<p><img src="Funnel1.svg" alt="Funnel1.svg" width="75%">
-
-<p>
-Each of Tasks&nbsp;A and&nbsp;B will move up to the root
-<tt>rcu_node</tt> structure.
-Suppose that Task&nbsp;A wins, recording its desired grace-period sequence
-number and resulting in the state shown below:
-
-<p><img src="Funnel2.svg" alt="Funnel2.svg" width="75%">
-
-<p>
-Task&nbsp;A now advances to initiate a new grace period, while Task&nbsp;B
-moves up to the root <tt>rcu_node</tt> structure, and, seeing that
-its desired sequence number is already recorded, blocks on
-<tt>-&gt;exp_wq[1]</tt>.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why <tt>-&gt;exp_wq[1]</tt>?
- Given that the value of these tasks' desired sequence number is
- two, so shouldn't they instead block on <tt>-&gt;exp_wq[2]</tt>?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- No.
-
- <p><font color="ffffff">
- Recall that the bottom bit of the desired sequence number indicates
- whether or not a grace period is currently in progress.
- It is therefore necessary to shift the sequence number right one
- bit position to obtain the number of the grace period.
- This results in <tt>-&gt;exp_wq[1]</tt>.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-If Tasks&nbsp;C and&nbsp;D also arrive at this point, they will compute the
-same desired grace-period sequence number, and see that both leaf
-<tt>rcu_node</tt> structures already have that value recorded.
-They will therefore block on their respective <tt>rcu_node</tt>
-structures' <tt>-&gt;exp_wq[1]</tt> fields, as shown below:
-
-<p><img src="Funnel3.svg" alt="Funnel3.svg" width="75%">
-
-<p>
-Task&nbsp;A now acquires the <tt>rcu_state</tt> structure's
-<tt>-&gt;exp_mutex</tt> and initiates the grace period, which
-increments <tt>-&gt;expedited_sequence</tt>.
-Therefore, if Tasks&nbsp;E and&nbsp;F arrive, they will compute
-a desired sequence number of 4 and will record this value as
-shown below:
-
-<p><img src="Funnel4.svg" alt="Funnel4.svg" width="75%">
-
-<p>
-Tasks&nbsp;E and&nbsp;F will propagate up the <tt>rcu_node</tt>
-combining tree, with Task&nbsp;F blocking on the root <tt>rcu_node</tt>
-structure and Task&nbsp;E wait for Task&nbsp;A to finish so that
-it can start the next grace period.
-The resulting state is as shown below:
-
-<p><img src="Funnel5.svg" alt="Funnel5.svg" width="75%">
-
-<p>
-Once the grace period completes, Task&nbsp;A
-starts waking up the tasks waiting for this grace period to complete,
-increments the <tt>-&gt;expedited_sequence</tt>,
-acquires the <tt>-&gt;exp_wake_mutex</tt> and then releases the
-<tt>-&gt;exp_mutex</tt>.
-This results in the following state:
-
-<p><img src="Funnel6.svg" alt="Funnel6.svg" width="75%">
-
-<p>
-Task&nbsp;E can then acquire <tt>-&gt;exp_mutex</tt> and increment
-<tt>-&gt;expedited_sequence</tt> to the value three.
-If new tasks&nbsp;G and&nbsp;H arrive and moves up the combining tree at the
-same time, the state will be as follows:
-
-<p><img src="Funnel7.svg" alt="Funnel7.svg" width="75%">
-
-<p>
-Note that three of the root <tt>rcu_node</tt> structure's
-waitqueues are now occupied.
-However, at some point, Task&nbsp;A will wake up the
-tasks blocked on the <tt>-&gt;exp_wq</tt> waitqueues, resulting
-in the following state:
-
-<p><img src="Funnel8.svg" alt="Funnel8.svg" width="75%">
-
-<p>
-Execution will continue with Tasks&nbsp;E and&nbsp;H completing
-their grace periods and carrying out their wakeups.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- What happens if Task&nbsp;A takes so long to do its wakeups
- that Task&nbsp;E's grace period completes?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Then Task&nbsp;E will block on the <tt>-&gt;exp_wake_mutex</tt>,
- which will also prevent it from releasing <tt>-&gt;exp_mutex</tt>,
- which in turn will prevent the next grace period from starting.
- This last is important in preventing overflow of the
- <tt>-&gt;exp_wq[]</tt> array.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h3><a name="Use of Workqueues">Use of Workqueues</a></h3>
-
-<p>
-In earlier implementations, the task requesting the expedited
-grace period also drove it to completion.
-This straightforward approach had the disadvantage of needing to
-account for POSIX signals sent to user tasks,
-so more recent implemementations use the Linux kernel's
-<a href="https://www.kernel.org/doc/Documentation/core-api/workqueue.rst">workqueues</a>.
-
-<p>
-The requesting task still does counter snapshotting and funnel-lock
-processing, but the task reaching the top of the funnel lock
-does a <tt>schedule_work()</tt> (from <tt>_synchronize_rcu_expedited()</tt>
-so that a workqueue kthread does the actual grace-period processing.
-Because workqueue kthreads do not accept POSIX signals, grace-period-wait
-processing need not allow for POSIX signals.
-
-In addition, this approach allows wakeups for the previous expedited
-grace period to be overlapped with processing for the next expedited
-grace period.
-Because there are only four sets of waitqueues, it is necessary to
-ensure that the previous grace period's wakeups complete before the
-next grace period's wakeups start.
-This is handled by having the <tt>-&gt;exp_mutex</tt>
-guard expedited grace-period processing and the
-<tt>-&gt;exp_wake_mutex</tt> guard wakeups.
-The key point is that the <tt>-&gt;exp_mutex</tt> is not released
-until the first wakeup is complete, which means that the
-<tt>-&gt;exp_wake_mutex</tt> has already been acquired at that point.
-This approach ensures that the previous grace period's wakeups can
-be carried out while the current grace period is in process, but
-that these wakeups will complete before the next grace period starts.
-This means that only three waitqueues are required, guaranteeing that
-the four that are provided are sufficient.
-
-<h3><a name="Stall Warnings">Stall Warnings</a></h3>
-
-<p>
-Expediting grace periods does nothing to speed things up when RCU
-readers take too long, and therefore expedited grace periods check
-for stalls just as normal grace periods do.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But why not just let the normal grace-period machinery
- detect the stalls, given that a given reader must block
- both normal and expedited grace periods?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because it is quite possible that at a given time there
- is no normal grace period in progress, in which case the
- normal grace period cannot emit a stall warning.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-The <tt>synchronize_sched_expedited_wait()</tt> function loops waiting
-for the expedited grace period to end, but with a timeout set to the
-current RCU CPU stall-warning time.
-If this time is exceeded, any CPUs or <tt>rcu_node</tt> structures
-blocking the current grace period are printed.
-Each stall warning results in another pass through the loop, but the
-second and subsequent passes use longer stall times.
-
-<h3><a name="Mid-Boot Operation">Mid-boot operation</a></h3>
-
-<p>
-The use of workqueues has the advantage that the expedited
-grace-period code need not worry about POSIX signals.
-Unfortunately, it has the
-corresponding disadvantage that workqueues cannot be used until
-they are initialized, which does not happen until some time after
-the scheduler spawns the first task.
-Given that there are parts of the kernel that really do want to
-execute grace periods during this mid-boot &ldquo;dead zone&rdquo;,
-expedited grace periods must do something else during thie time.
-
-<p>
-What they do is to fall back to the old practice of requiring that the
-requesting task drive the expedited grace period, as was the case
-before the use of workqueues.
-However, the requesting task is only required to drive the grace period
-during the mid-boot dead zone.
-Before mid-boot, a synchronous grace period is a no-op.
-Some time after mid-boot, workqueues are used.
-
-<p>
-Non-expedited non-SRCU synchronous grace periods must also operate
-normally during mid-boot.
-This is handled by causing non-expedited grace periods to take the
-expedited code path during mid-boot.
-
-<p>
-The current code assumes that there are no POSIX signals during
-the mid-boot dead zone.
-However, if an overwhelming need for POSIX signals somehow arises,
-appropriate adjustments can be made to the expedited stall-warning code.
-One such adjustment would reinstate the pre-workqueue stall-warning
-checks, but only during the mid-boot dead zone.
-
-<p>
-With this refinement, synchronous grace periods can now be used from
-task context pretty much any time during the life of the kernel.
-That is, aside from some points in the suspend, hibernate, or shutdown
-code path.
-
-<h3><a name="Summary">
-Summary</a></h3>
-
-<p>
-Expedited grace periods use a sequence-number approach to promote
-batching, so that a single grace-period operation can serve numerous
-requests.
-A funnel lock is used to efficiently identify the one task out of
-a concurrent group that will request the grace period.
-All members of the group will block on waitqueues provided in
-the <tt>rcu_node</tt> structure.
-The actual grace-period processing is carried out by a workqueue.
-
-<p>
-CPU-hotplug operations are noted lazily in order to prevent the need
-for tight synchronization between expedited grace periods and
-CPU-hotplug operations.
-The dyntick-idle counters are used to avoid sending IPIs to idle CPUs,
-at least in the common case.
-RCU-preempt and RCU-sched use different IPI handlers and different
-code to respond to the state changes carried out by those handlers,
-but otherwise use common code.
-
-<p>
-Quiescent states are tracked using the <tt>rcu_node</tt> tree,
-and once all necessary quiescent states have been reported,
-all tasks waiting on this expedited grace period are awakened.
-A pair of mutexes are used to allow one grace period's wakeups
-to proceed concurrently with the next grace period's processing.
-
-<p>
-This combination of mechanisms allows expedited grace periods to
-run reasonably efficiently.
-However, for non-time-critical tasks, normal grace periods should be
-used instead because their longer duration permits much higher
-degrees of batching, and thus much lower per-request overheads.
-
-</body></html>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst
new file mode 100644
index 000000000000..72f0f6fbd53c
--- /dev/null
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst
@@ -0,0 +1,521 @@
+=================================================
+A Tour Through TREE_RCU's Expedited Grace Periods
+=================================================
+
+Introduction
+============
+
+This document describes RCU's expedited grace periods.
+Unlike RCU's normal grace periods, which accept long latencies to attain
+high efficiency and minimal disturbance, expedited grace periods accept
+lower efficiency and significant disturbance to attain shorter latencies.
+
+There are two flavors of RCU (RCU-preempt and RCU-sched), with an earlier
+third RCU-bh flavor having been implemented in terms of the other two.
+Each of the two implementations is covered in its own section.
+
+Expedited Grace Period Design
+=============================
+
+The expedited RCU grace periods cannot be accused of being subtle,
+given that they for all intents and purposes hammer every CPU that
+has not yet provided a quiescent state for the current expedited
+grace period.
+The one saving grace is that the hammer has grown a bit smaller
+over time: The old call to ``try_stop_cpus()`` has been
+replaced with a set of calls to ``smp_call_function_single()``,
+each of which results in an IPI to the target CPU.
+The corresponding handler function checks the CPU's state, motivating
+a faster quiescent state where possible, and triggering a report
+of that quiescent state.
+As always for RCU, once everything has spent some time in a quiescent
+state, the expedited grace period has completed.
+
+The details of the ``smp_call_function_single()`` handler's
+operation depend on the RCU flavor, as described in the following
+sections.
+
+RCU-preempt Expedited Grace Periods
+===================================
+
+``CONFIG_PREEMPT=y`` kernels implement RCU-preempt.
+The overall flow of the handling of a given CPU by an RCU-preempt
+expedited grace period is shown in the following diagram:
+
+.. kernel-figure:: ExpRCUFlow.svg
+
+The solid arrows denote direct action, for example, a function call.
+The dotted arrows denote indirect action, for example, an IPI
+or a state that is reached after some time.
+
+If a given CPU is offline or idle, ``synchronize_rcu_expedited()``
+will ignore it because idle and offline CPUs are already residing
+in quiescent states.
+Otherwise, the expedited grace period will use
+``smp_call_function_single()`` to send the CPU an IPI, which
+is handled by ``rcu_exp_handler()``.
+
+However, because this is preemptible RCU, ``rcu_exp_handler()``
+can check to see if the CPU is currently running in an RCU read-side
+critical section.
+If not, the handler can immediately report a quiescent state.
+Otherwise, it sets flags so that the outermost ``rcu_read_unlock()``
+invocation will provide the needed quiescent-state report.
+This flag-setting avoids the previous forced preemption of all
+CPUs that might have RCU read-side critical sections.
+In addition, this flag-setting is done so as to avoid increasing
+the overhead of the common-case fastpath through the scheduler.
+
+Again because this is preemptible RCU, an RCU read-side critical section
+can be preempted.
+When that happens, RCU will enqueue the task, which will the continue to
+block the current expedited grace period until it resumes and finds its
+outermost ``rcu_read_unlock()``.
+The CPU will report a quiescent state just after enqueuing the task because
+the CPU is no longer blocking the grace period.
+It is instead the preempted task doing the blocking.
+The list of blocked tasks is managed by ``rcu_preempt_ctxt_queue()``,
+which is called from ``rcu_preempt_note_context_switch()``, which
+in turn is called from ``rcu_note_context_switch()``, which in
+turn is called from the scheduler.
+
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why not just have the expedited grace period check the state of all |
+| the CPUs? After all, that would avoid all those real-time-unfriendly |
+| IPIs. |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Because we want the RCU read-side critical sections to run fast, |
+| which means no memory barriers. Therefore, it is not possible to |
+| safely check the state from some other CPU. And even if it was |
+| possible to safely check the state, it would still be necessary to |
+| IPI the CPU to safely interact with the upcoming |
+| ``rcu_read_unlock()`` invocation, which means that the remote state |
+| testing would not help the worst-case latency that real-time |
+| applications care about. |
+| |
+| One way to prevent your real-time application from getting hit with |
+| these IPIs is to build your kernel with ``CONFIG_NO_HZ_FULL=y``. RCU |
+| would then perceive the CPU running your application as being idle, |
+| and it would be able to safely detect that state without needing to |
+| IPI the CPU. |
++-----------------------------------------------------------------------+
+
+Please note that this is just the overall flow: Additional complications
+can arise due to races with CPUs going idle or offline, among other
+things.
+
+RCU-sched Expedited Grace Periods
+---------------------------------
+
+``CONFIG_PREEMPT=n`` kernels implement RCU-sched. The overall flow of
+the handling of a given CPU by an RCU-sched expedited grace period is
+shown in the following diagram:
+
+.. kernel-figure:: ExpSchedFlow.svg
+
+As with RCU-preempt, RCU-sched's ``synchronize_rcu_expedited()`` ignores
+offline and idle CPUs, again because they are in remotely detectable
+quiescent states. However, because the ``rcu_read_lock_sched()`` and
+``rcu_read_unlock_sched()`` leave no trace of their invocation, in
+general it is not possible to tell whether or not the current CPU is in
+an RCU read-side critical section. The best that RCU-sched's
+``rcu_exp_handler()`` can do is to check for idle, on the off-chance
+that the CPU went idle while the IPI was in flight. If the CPU is idle,
+then ``rcu_exp_handler()`` reports the quiescent state.
+
+Otherwise, the handler forces a future context switch by setting the
+NEED_RESCHED flag of the current task's thread flag and the CPU preempt
+counter. At the time of the context switch, the CPU reports the
+quiescent state. Should the CPU go offline first, it will report the
+quiescent state at that time.
+
+Expedited Grace Period and CPU Hotplug
+--------------------------------------
+
+The expedited nature of expedited grace periods require a much tighter
+interaction with CPU hotplug operations than is required for normal
+grace periods. In addition, attempting to IPI offline CPUs will result
+in splats, but failing to IPI online CPUs can result in too-short grace
+periods. Neither option is acceptable in production kernels.
+
+The interaction between expedited grace periods and CPU hotplug
+operations is carried out at several levels:
+
+#. The number of CPUs that have ever been online is tracked by the
+ ``rcu_state`` structure's ``->ncpus`` field. The ``rcu_state``
+ structure's ``->ncpus_snap`` field tracks the number of CPUs that
+ have ever been online at the beginning of an RCU expedited grace
+ period. Note that this number never decreases, at least in the
+ absence of a time machine.
+#. The identities of the CPUs that have ever been online is tracked by
+ the ``rcu_node`` structure's ``->expmaskinitnext`` field. The
+ ``rcu_node`` structure's ``->expmaskinit`` field tracks the
+ identities of the CPUs that were online at least once at the
+ beginning of the most recent RCU expedited grace period. The
+ ``rcu_state`` structure's ``->ncpus`` and ``->ncpus_snap`` fields are
+ used to detect when new CPUs have come online for the first time,
+ that is, when the ``rcu_node`` structure's ``->expmaskinitnext``
+ field has changed since the beginning of the last RCU expedited grace
+ period, which triggers an update of each ``rcu_node`` structure's
+ ``->expmaskinit`` field from its ``->expmaskinitnext`` field.
+#. Each ``rcu_node`` structure's ``->expmaskinit`` field is used to
+ initialize that structure's ``->expmask`` at the beginning of each
+ RCU expedited grace period. This means that only those CPUs that have
+ been online at least once will be considered for a given grace
+ period.
+#. Any CPU that goes offline will clear its bit in its leaf ``rcu_node``
+ structure's ``->qsmaskinitnext`` field, so any CPU with that bit
+ clear can safely be ignored. However, it is possible for a CPU coming
+ online or going offline to have this bit set for some time while
+ ``cpu_online`` returns ``false``.
+#. For each non-idle CPU that RCU believes is currently online, the
+ grace period invokes ``smp_call_function_single()``. If this
+ succeeds, the CPU was fully online. Failure indicates that the CPU is
+ in the process of coming online or going offline, in which case it is
+ necessary to wait for a short time period and try again. The purpose
+ of this wait (or series of waits, as the case may be) is to permit a
+ concurrent CPU-hotplug operation to complete.
+#. In the case of RCU-sched, one of the last acts of an outgoing CPU is
+ to invoke ``rcu_report_dead()``, which reports a quiescent state for
+ that CPU. However, this is likely paranoia-induced redundancy.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why all the dancing around with multiple counters and masks tracking |
+| CPUs that were once online? Why not just have a single set of masks |
+| tracking the currently online CPUs and be done with it? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Maintaining single set of masks tracking the online CPUs *sounds* |
+| easier, at least until you try working out all the race conditions |
+| between grace-period initialization and CPU-hotplug operations. For |
+| example, suppose initialization is progressing down the tree while a |
+| CPU-offline operation is progressing up the tree. This situation can |
+| result in bits set at the top of the tree that have no counterparts |
+| at the bottom of the tree. Those bits will never be cleared, which |
+| will result in grace-period hangs. In short, that way lies madness, |
+| to say nothing of a great many bugs, hangs, and deadlocks. |
+| In contrast, the current multi-mask multi-counter scheme ensures that |
+| grace-period initialization will always see consistent masks up and |
+| down the tree, which brings significant simplifications over the |
+| single-mask method. |
+| |
+| This is an instance of `deferring work in order to avoid |
+| synchronization <http://www.cs.columbia.edu/~library/TR-repository/re |
+| ports/reports-1992/cucs-039-92.ps.gz>`__. |
+| Lazily recording CPU-hotplug events at the beginning of the next |
+| grace period greatly simplifies maintenance of the CPU-tracking |
+| bitmasks in the ``rcu_node`` tree. |
++-----------------------------------------------------------------------+
+
+Expedited Grace Period Refinements
+----------------------------------
+
+Idle-CPU Checks
+~~~~~~~~~~~~~~~
+
+Each expedited grace period checks for idle CPUs when initially forming
+the mask of CPUs to be IPIed and again just before IPIing a CPU (both
+checks are carried out by ``sync_rcu_exp_select_cpus()``). If the CPU is
+idle at any time between those two times, the CPU will not be IPIed.
+Instead, the task pushing the grace period forward will include the idle
+CPUs in the mask passed to ``rcu_report_exp_cpu_mult()``.
+
+For RCU-sched, there is an additional check: If the IPI has interrupted
+the idle loop, then ``rcu_exp_handler()`` invokes
+``rcu_report_exp_rdp()`` to report the corresponding quiescent state.
+
+For RCU-preempt, there is no specific check for idle in the IPI handler
+(``rcu_exp_handler()``), but because RCU read-side critical sections are
+not permitted within the idle loop, if ``rcu_exp_handler()`` sees that
+the CPU is within RCU read-side critical section, the CPU cannot
+possibly be idle. Otherwise, ``rcu_exp_handler()`` invokes
+``rcu_report_exp_rdp()`` to report the corresponding quiescent state,
+regardless of whether or not that quiescent state was due to the CPU
+being idle.
+
+In summary, RCU expedited grace periods check for idle when building the
+bitmask of CPUs that must be IPIed, just before sending each IPI, and
+(either explicitly or implicitly) within the IPI handler.
+
+Batching via Sequence Counter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If each grace-period request was carried out separately, expedited grace
+periods would have abysmal scalability and problematic high-load
+characteristics. Because each grace-period operation can serve an
+unlimited number of updates, it is important to *batch* requests, so
+that a single expedited grace-period operation will cover all requests
+in the corresponding batch.
+
+This batching is controlled by a sequence counter named
+``->expedited_sequence`` in the ``rcu_state`` structure. This counter
+has an odd value when there is an expedited grace period in progress and
+an even value otherwise, so that dividing the counter value by two gives
+the number of completed grace periods. During any given update request,
+the counter must transition from even to odd and then back to even, thus
+indicating that a grace period has elapsed. Therefore, if the initial
+value of the counter is ``s``, the updater must wait until the counter
+reaches at least the value ``(s+3)&~0x1``. This counter is managed by
+the following access functions:
+
+#. ``rcu_exp_gp_seq_start()``, which marks the start of an expedited
+ grace period.
+#. ``rcu_exp_gp_seq_end()``, which marks the end of an expedited grace
+ period.
+#. ``rcu_exp_gp_seq_snap()``, which obtains a snapshot of the counter.
+#. ``rcu_exp_gp_seq_done()``, which returns ``true`` if a full expedited
+ grace period has elapsed since the corresponding call to
+ ``rcu_exp_gp_seq_snap()``.
+
+Again, only one request in a given batch need actually carry out a
+grace-period operation, which means there must be an efficient way to
+identify which of many concurrent reqeusts will initiate the grace
+period, and that there be an efficient way for the remaining requests to
+wait for that grace period to complete. However, that is the topic of
+the next section.
+
+Funnel Locking and Wait/Wakeup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The natural way to sort out which of a batch of updaters will initiate
+the expedited grace period is to use the ``rcu_node`` combining tree, as
+implemented by the ``exp_funnel_lock()`` function. The first updater
+corresponding to a given grace period arriving at a given ``rcu_node``
+structure records its desired grace-period sequence number in the
+``->exp_seq_rq`` field and moves up to the next level in the tree.
+Otherwise, if the ``->exp_seq_rq`` field already contains the sequence
+number for the desired grace period or some later one, the updater
+blocks on one of four wait queues in the ``->exp_wq[]`` array, using the
+second-from-bottom and third-from bottom bits as an index. An
+``->exp_lock`` field in the ``rcu_node`` structure synchronizes access
+to these fields.
+
+An empty ``rcu_node`` tree is shown in the following diagram, with the
+white cells representing the ``->exp_seq_rq`` field and the red cells
+representing the elements of the ``->exp_wq[]`` array.
+
+.. kernel-figure:: Funnel0.svg
+
+The next diagram shows the situation after the arrival of Task A and
+Task B at the leftmost and rightmost leaf ``rcu_node`` structures,
+respectively. The current value of the ``rcu_state`` structure's
+``->expedited_sequence`` field is zero, so adding three and clearing the
+bottom bit results in the value two, which both tasks record in the
+``->exp_seq_rq`` field of their respective ``rcu_node`` structures:
+
+.. kernel-figure:: Funnel1.svg
+
+Each of Tasks A and B will move up to the root ``rcu_node`` structure.
+Suppose that Task A wins, recording its desired grace-period sequence
+number and resulting in the state shown below:
+
+.. kernel-figure:: Funnel2.svg
+
+Task A now advances to initiate a new grace period, while Task B moves
+up to the root ``rcu_node`` structure, and, seeing that its desired
+sequence number is already recorded, blocks on ``->exp_wq[1]``.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why ``->exp_wq[1]``? Given that the value of these tasks' desired |
+| sequence number is two, so shouldn't they instead block on |
+| ``->exp_wq[2]``? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| No. |
+| Recall that the bottom bit of the desired sequence number indicates |
+| whether or not a grace period is currently in progress. It is |
+| therefore necessary to shift the sequence number right one bit |
+| position to obtain the number of the grace period. This results in |
+| ``->exp_wq[1]``. |
++-----------------------------------------------------------------------+
+
+If Tasks C and D also arrive at this point, they will compute the same
+desired grace-period sequence number, and see that both leaf
+``rcu_node`` structures already have that value recorded. They will
+therefore block on their respective ``rcu_node`` structures'
+``->exp_wq[1]`` fields, as shown below:
+
+.. kernel-figure:: Funnel3.svg
+
+Task A now acquires the ``rcu_state`` structure's ``->exp_mutex`` and
+initiates the grace period, which increments ``->expedited_sequence``.
+Therefore, if Tasks E and F arrive, they will compute a desired sequence
+number of 4 and will record this value as shown below:
+
+.. kernel-figure:: Funnel4.svg
+
+Tasks E and F will propagate up the ``rcu_node`` combining tree, with
+Task F blocking on the root ``rcu_node`` structure and Task E wait for
+Task A to finish so that it can start the next grace period. The
+resulting state is as shown below:
+
+.. kernel-figure:: Funnel5.svg
+
+Once the grace period completes, Task A starts waking up the tasks
+waiting for this grace period to complete, increments the
+``->expedited_sequence``, acquires the ``->exp_wake_mutex`` and then
+releases the ``->exp_mutex``. This results in the following state:
+
+.. kernel-figure:: Funnel6.svg
+
+Task E can then acquire ``->exp_mutex`` and increment
+``->expedited_sequence`` to the value three. If new tasks G and H arrive
+and moves up the combining tree at the same time, the state will be as
+follows:
+
+.. kernel-figure:: Funnel7.svg
+
+Note that three of the root ``rcu_node`` structure's waitqueues are now
+occupied. However, at some point, Task A will wake up the tasks blocked
+on the ``->exp_wq`` waitqueues, resulting in the following state:
+
+.. kernel-figure:: Funnel8.svg
+
+Execution will continue with Tasks E and H completing their grace
+periods and carrying out their wakeups.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| What happens if Task A takes so long to do its wakeups that Task E's |
+| grace period completes? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Then Task E will block on the ``->exp_wake_mutex``, which will also |
+| prevent it from releasing ``->exp_mutex``, which in turn will prevent |
+| the next grace period from starting. This last is important in |
+| preventing overflow of the ``->exp_wq[]`` array. |
++-----------------------------------------------------------------------+
+
+Use of Workqueues
+~~~~~~~~~~~~~~~~~
+
+In earlier implementations, the task requesting the expedited grace
+period also drove it to completion. This straightforward approach had
+the disadvantage of needing to account for POSIX signals sent to user
+tasks, so more recent implemementations use the Linux kernel's
+`workqueues <https://www.kernel.org/doc/Documentation/core-api/workqueue.rst>`__.
+
+The requesting task still does counter snapshotting and funnel-lock
+processing, but the task reaching the top of the funnel lock does a
+``schedule_work()`` (from ``_synchronize_rcu_expedited()`` so that a
+workqueue kthread does the actual grace-period processing. Because
+workqueue kthreads do not accept POSIX signals, grace-period-wait
+processing need not allow for POSIX signals. In addition, this approach
+allows wakeups for the previous expedited grace period to be overlapped
+with processing for the next expedited grace period. Because there are
+only four sets of waitqueues, it is necessary to ensure that the
+previous grace period's wakeups complete before the next grace period's
+wakeups start. This is handled by having the ``->exp_mutex`` guard
+expedited grace-period processing and the ``->exp_wake_mutex`` guard
+wakeups. The key point is that the ``->exp_mutex`` is not released until
+the first wakeup is complete, which means that the ``->exp_wake_mutex``
+has already been acquired at that point. This approach ensures that the
+previous grace period's wakeups can be carried out while the current
+grace period is in process, but that these wakeups will complete before
+the next grace period starts. This means that only three waitqueues are
+required, guaranteeing that the four that are provided are sufficient.
+
+Stall Warnings
+~~~~~~~~~~~~~~
+
+Expediting grace periods does nothing to speed things up when RCU
+readers take too long, and therefore expedited grace periods check for
+stalls just as normal grace periods do.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But why not just let the normal grace-period machinery detect the |
+| stalls, given that a given reader must block both normal and |
+| expedited grace periods? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Because it is quite possible that at a given time there is no normal |
+| grace period in progress, in which case the normal grace period |
+| cannot emit a stall warning. |
++-----------------------------------------------------------------------+
+
+The ``synchronize_sched_expedited_wait()`` function loops waiting for
+the expedited grace period to end, but with a timeout set to the current
+RCU CPU stall-warning time. If this time is exceeded, any CPUs or
+``rcu_node`` structures blocking the current grace period are printed.
+Each stall warning results in another pass through the loop, but the
+second and subsequent passes use longer stall times.
+
+Mid-boot operation
+~~~~~~~~~~~~~~~~~~
+
+The use of workqueues has the advantage that the expedited grace-period
+code need not worry about POSIX signals. Unfortunately, it has the
+corresponding disadvantage that workqueues cannot be used until they are
+initialized, which does not happen until some time after the scheduler
+spawns the first task. Given that there are parts of the kernel that
+really do want to execute grace periods during this mid-boot “dead
+zone”, expedited grace periods must do something else during thie time.
+
+What they do is to fall back to the old practice of requiring that the
+requesting task drive the expedited grace period, as was the case before
+the use of workqueues. However, the requesting task is only required to
+drive the grace period during the mid-boot dead zone. Before mid-boot, a
+synchronous grace period is a no-op. Some time after mid-boot,
+workqueues are used.
+
+Non-expedited non-SRCU synchronous grace periods must also operate
+normally during mid-boot. This is handled by causing non-expedited grace
+periods to take the expedited code path during mid-boot.
+
+The current code assumes that there are no POSIX signals during the
+mid-boot dead zone. However, if an overwhelming need for POSIX signals
+somehow arises, appropriate adjustments can be made to the expedited
+stall-warning code. One such adjustment would reinstate the
+pre-workqueue stall-warning checks, but only during the mid-boot dead
+zone.
+
+With this refinement, synchronous grace periods can now be used from
+task context pretty much any time during the life of the kernel. That
+is, aside from some points in the suspend, hibernate, or shutdown code
+path.
+
+Summary
+~~~~~~~
+
+Expedited grace periods use a sequence-number approach to promote
+batching, so that a single grace-period operation can serve numerous
+requests. A funnel lock is used to efficiently identify the one task out
+of a concurrent group that will request the grace period. All members of
+the group will block on waitqueues provided in the ``rcu_node``
+structure. The actual grace-period processing is carried out by a
+workqueue.
+
+CPU-hotplug operations are noted lazily in order to prevent the need for
+tight synchronization between expedited grace periods and CPU-hotplug
+operations. The dyntick-idle counters are used to avoid sending IPIs to
+idle CPUs, at least in the common case. RCU-preempt and RCU-sched use
+different IPI handlers and different code to respond to the state
+changes carried out by those handlers, but otherwise use common code.
+
+Quiescent states are tracked using the ``rcu_node`` tree, and once all
+necessary quiescent states have been reported, all tasks waiting on this
+expedited grace period are awakened. A pair of mutexes are used to allow
+one grace period's wakeups to proceed concurrently with the next grace
+period's processing.
+
+This combination of mechanisms allows expedited grace periods to run
+reasonably efficiently. However, for non-time-critical tasks, normal
+grace periods should be used instead because their longer duration
+permits much higher degrees of batching, and thus much lower per-request
+overheads.
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html
deleted file mode 100644
index e5b42a798ff3..000000000000
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head><title>A Diagram of TREE_RCU's Grace-Period Memory Ordering</title>
- <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
-
-<p><img src="TreeRCU-gp.svg" alt="TreeRCU-gp.svg">
-
-</body></html>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
deleted file mode 100644
index c64f8d26609f..000000000000
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ /dev/null
@@ -1,704 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head><title>A Tour Through TREE_RCU's Grace-Period Memory Ordering</title>
- <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
-
- <p>August 8, 2017</p>
- <p>This article was contributed by Paul E.&nbsp;McKenney</p>
-
-<h3>Introduction</h3>
-
-<p>This document gives a rough visual overview of how Tree RCU's
-grace-period memory ordering guarantee is provided.
-
-<ol>
-<li> <a href="#What Is Tree RCU's Grace Period Memory Ordering Guarantee?">
- What Is Tree RCU's Grace Period Memory Ordering Guarantee?</a>
-<li> <a href="#Tree RCU Grace Period Memory Ordering Building Blocks">
- Tree RCU Grace Period Memory Ordering Building Blocks</a>
-<li> <a href="#Tree RCU Grace Period Memory Ordering Components">
- Tree RCU Grace Period Memory Ordering Components</a>
-<li> <a href="#Putting It All Together">Putting It All Together</a>
-</ol>
-
-<h3><a name="What Is Tree RCU's Grace Period Memory Ordering Guarantee?">
-What Is Tree RCU's Grace Period Memory Ordering Guarantee?</a></h3>
-
-<p>RCU grace periods provide extremely strong memory-ordering guarantees
-for non-idle non-offline code.
-Any code that happens after the end of a given RCU grace period is guaranteed
-to see the effects of all accesses prior to the beginning of that grace
-period that are within RCU read-side critical sections.
-Similarly, any code that happens before the beginning of a given RCU grace
-period is guaranteed to see the effects of all accesses following the end
-of that grace period that are within RCU read-side critical sections.
-
-<p>Note well that RCU-sched read-side critical sections include any region
-of code for which preemption is disabled.
-Given that each individual machine instruction can be thought of as
-an extremely small region of preemption-disabled code, one can think of
-<tt>synchronize_rcu()</tt> as <tt>smp_mb()</tt> on steroids.
-
-<p>RCU updaters use this guarantee by splitting their updates into
-two phases, one of which is executed before the grace period and
-the other of which is executed after the grace period.
-In the most common use case, phase one removes an element from
-a linked RCU-protected data structure, and phase two frees that element.
-For this to work, any readers that have witnessed state prior to the
-phase-one update (in the common case, removal) must not witness state
-following the phase-two update (in the common case, freeing).
-
-<p>The RCU implementation provides this guarantee using a network
-of lock-based critical sections, memory barriers, and per-CPU
-processing, as is described in the following sections.
-
-<h3><a name="Tree RCU Grace Period Memory Ordering Building Blocks">
-Tree RCU Grace Period Memory Ordering Building Blocks</a></h3>
-
-<p>The workhorse for RCU's grace-period memory ordering is the
-critical section for the <tt>rcu_node</tt> structure's
-<tt>-&gt;lock</tt>.
-These critical sections use helper functions for lock acquisition, including
-<tt>raw_spin_lock_rcu_node()</tt>,
-<tt>raw_spin_lock_irq_rcu_node()</tt>, and
-<tt>raw_spin_lock_irqsave_rcu_node()</tt>.
-Their lock-release counterparts are
-<tt>raw_spin_unlock_rcu_node()</tt>,
-<tt>raw_spin_unlock_irq_rcu_node()</tt>, and
-<tt>raw_spin_unlock_irqrestore_rcu_node()</tt>,
-respectively.
-For completeness, a
-<tt>raw_spin_trylock_rcu_node()</tt>
-is also provided.
-The key point is that the lock-acquisition functions, including
-<tt>raw_spin_trylock_rcu_node()</tt>, all invoke
-<tt>smp_mb__after_unlock_lock()</tt> immediately after successful
-acquisition of the lock.
-
-<p>Therefore, for any given <tt>rcu_node</tt> structure, any access
-happening before one of the above lock-release functions will be seen
-by all CPUs as happening before any access happening after a later
-one of the above lock-acquisition functions.
-Furthermore, any access happening before one of the
-above lock-release function on any given CPU will be seen by all
-CPUs as happening before any access happening after a later one
-of the above lock-acquisition functions executing on that same CPU,
-even if the lock-release and lock-acquisition functions are operating
-on different <tt>rcu_node</tt> structures.
-Tree RCU uses these two ordering guarantees to form an ordering
-network among all CPUs that were in any way involved in the grace
-period, including any CPUs that came online or went offline during
-the grace period in question.
-
-<p>The following litmus test exhibits the ordering effects of these
-lock-acquisition and lock-release functions:
-
-<pre>
- 1 int x, y, z;
- 2
- 3 void task0(void)
- 4 {
- 5 raw_spin_lock_rcu_node(rnp);
- 6 WRITE_ONCE(x, 1);
- 7 r1 = READ_ONCE(y);
- 8 raw_spin_unlock_rcu_node(rnp);
- 9 }
-10
-11 void task1(void)
-12 {
-13 raw_spin_lock_rcu_node(rnp);
-14 WRITE_ONCE(y, 1);
-15 r2 = READ_ONCE(z);
-16 raw_spin_unlock_rcu_node(rnp);
-17 }
-18
-19 void task2(void)
-20 {
-21 WRITE_ONCE(z, 1);
-22 smp_mb();
-23 r3 = READ_ONCE(x);
-24 }
-25
-26 WARN_ON(r1 == 0 &amp;&amp; r2 == 0 &amp;&amp; r3 == 0);
-</pre>
-
-<p>The <tt>WARN_ON()</tt> is evaluated at &ldquo;the end of time&rdquo;,
-after all changes have propagated throughout the system.
-Without the <tt>smp_mb__after_unlock_lock()</tt> provided by the
-acquisition functions, this <tt>WARN_ON()</tt> could trigger, for example
-on PowerPC.
-The <tt>smp_mb__after_unlock_lock()</tt> invocations prevent this
-<tt>WARN_ON()</tt> from triggering.
-
-<p>This approach must be extended to include idle CPUs, which need
-RCU's grace-period memory ordering guarantee to extend to any
-RCU read-side critical sections preceding and following the current
-idle sojourn.
-This case is handled by calls to the strongly ordered
-<tt>atomic_add_return()</tt> read-modify-write atomic operation that
-is invoked within <tt>rcu_dynticks_eqs_enter()</tt> at idle-entry
-time and within <tt>rcu_dynticks_eqs_exit()</tt> at idle-exit time.
-The grace-period kthread invokes <tt>rcu_dynticks_snap()</tt> and
-<tt>rcu_dynticks_in_eqs_since()</tt> (both of which invoke
-an <tt>atomic_add_return()</tt> of zero) to detect idle CPUs.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But what about CPUs that remain offline for the entire
- grace period?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Such CPUs will be offline at the beginning of the grace period,
- so the grace period won't expect quiescent states from them.
- Races between grace-period start and CPU-hotplug operations
- are mediated by the CPU's leaf <tt>rcu_node</tt> structure's
- <tt>-&gt;lock</tt> as described above.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>The approach must be extended to handle one final case, that
-of waking a task blocked in <tt>synchronize_rcu()</tt>.
-This task might be affinitied to a CPU that is not yet aware that
-the grace period has ended, and thus might not yet be subject to
-the grace period's memory ordering.
-Therefore, there is an <tt>smp_mb()</tt> after the return from
-<tt>wait_for_completion()</tt> in the <tt>synchronize_rcu()</tt>
-code path.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- What? Where???
- I don't see any <tt>smp_mb()</tt> after the return from
- <tt>wait_for_completion()</tt>!!!
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- That would be because I spotted the need for that
- <tt>smp_mb()</tt> during the creation of this documentation,
- and it is therefore unlikely to hit mainline before v4.14.
- Kudos to Lance Roy, Will Deacon, Peter Zijlstra, and
- Jonathan Cameron for asking questions that sensitized me
- to the rather elaborate sequence of events that demonstrate
- the need for this memory barrier.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>Tree RCU's grace--period memory-ordering guarantees rely most
-heavily on the <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>
-field, so much so that it is necessary to abbreviate this pattern
-in the diagrams in the next section.
-For example, consider the <tt>rcu_prepare_for_idle()</tt> function
-shown below, which is one of several functions that enforce ordering
-of newly arrived RCU callbacks against future grace periods:
-
-<pre>
- 1 static void rcu_prepare_for_idle(void)
- 2 {
- 3 bool needwake;
- 4 struct rcu_data *rdp;
- 5 struct rcu_dynticks *rdtp = this_cpu_ptr(&amp;rcu_dynticks);
- 6 struct rcu_node *rnp;
- 7 struct rcu_state *rsp;
- 8 int tne;
- 9
-10 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
-11 rcu_is_nocb_cpu(smp_processor_id()))
-12 return;
-13 tne = READ_ONCE(tick_nohz_active);
-14 if (tne != rdtp-&gt;tick_nohz_enabled_snap) {
-15 if (rcu_cpu_has_callbacks(NULL))
-16 invoke_rcu_core();
-17 rdtp-&gt;tick_nohz_enabled_snap = tne;
-18 return;
-19 }
-20 if (!tne)
-21 return;
-22 if (rdtp-&gt;all_lazy &amp;&amp;
-23 rdtp-&gt;nonlazy_posted != rdtp-&gt;nonlazy_posted_snap) {
-24 rdtp-&gt;all_lazy = false;
-25 rdtp-&gt;nonlazy_posted_snap = rdtp-&gt;nonlazy_posted;
-26 invoke_rcu_core();
-27 return;
-28 }
-29 if (rdtp-&gt;last_accelerate == jiffies)
-30 return;
-31 rdtp-&gt;last_accelerate = jiffies;
-32 for_each_rcu_flavor(rsp) {
-33 rdp = this_cpu_ptr(rsp-&gt;rda);
-34 if (rcu_segcblist_pend_cbs(&amp;rdp-&gt;cblist))
-35 continue;
-36 rnp = rdp-&gt;mynode;
-37 raw_spin_lock_rcu_node(rnp);
-38 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-39 raw_spin_unlock_rcu_node(rnp);
-40 if (needwake)
-41 rcu_gp_kthread_wake(rsp);
-42 }
-43 }
-</pre>
-
-<p>But the only part of <tt>rcu_prepare_for_idle()</tt> that really
-matters for this discussion are lines&nbsp;37&ndash;39.
-We will therefore abbreviate this function as follows:
-
-</p><p><img src="rcu_node-lock.svg" alt="rcu_node-lock.svg">
-
-<p>The box represents the <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>
-critical section, with the double line on top representing the additional
-<tt>smp_mb__after_unlock_lock()</tt>.
-
-<h3><a name="Tree RCU Grace Period Memory Ordering Components">
-Tree RCU Grace Period Memory Ordering Components</a></h3>
-
-<p>Tree RCU's grace-period memory-ordering guarantee is provided by
-a number of RCU components:
-
-<ol>
-<li> <a href="#Callback Registry">Callback Registry</a>
-<li> <a href="#Grace-Period Initialization">Grace-Period Initialization</a>
-<li> <a href="#Self-Reported Quiescent States">
- Self-Reported Quiescent States</a>
-<li> <a href="#Dynamic Tick Interface">Dynamic Tick Interface</a>
-<li> <a href="#CPU-Hotplug Interface">CPU-Hotplug Interface</a>
-<li> <a href="Forcing Quiescent States">Forcing Quiescent States</a>
-<li> <a href="Grace-Period Cleanup">Grace-Period Cleanup</a>
-<li> <a href="Callback Invocation">Callback Invocation</a>
-</ol>
-
-<p>Each of the following section looks at the corresponding component
-in detail.
-
-<h4><a name="Callback Registry">Callback Registry</a></h4>
-
-<p>If RCU's grace-period guarantee is to mean anything at all, any
-access that happens before a given invocation of <tt>call_rcu()</tt>
-must also happen before the corresponding grace period.
-The implementation of this portion of RCU's grace period guarantee
-is shown in the following figure:
-
-</p><p><img src="TreeRCU-callback-registry.svg" alt="TreeRCU-callback-registry.svg">
-
-<p>Because <tt>call_rcu()</tt> normally acts only on CPU-local state,
-it provides no ordering guarantees, either for itself or for
-phase one of the update (which again will usually be removal of
-an element from an RCU-protected data structure).
-It simply enqueues the <tt>rcu_head</tt> structure on a per-CPU list,
-which cannot become associated with a grace period until a later
-call to <tt>rcu_accelerate_cbs()</tt>, as shown in the diagram above.
-
-<p>One set of code paths shown on the left invokes
-<tt>rcu_accelerate_cbs()</tt> via
-<tt>note_gp_changes()</tt>, either directly from <tt>call_rcu()</tt> (if
-the current CPU is inundated with queued <tt>rcu_head</tt> structures)
-or more likely from an <tt>RCU_SOFTIRQ</tt> handler.
-Another code path in the middle is taken only in kernels built with
-<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>, which invokes
-<tt>rcu_accelerate_cbs()</tt> via <tt>rcu_prepare_for_idle()</tt>.
-The final code path on the right is taken only in kernels built with
-<tt>CONFIG_HOTPLUG_CPU=y</tt>, which invokes
-<tt>rcu_accelerate_cbs()</tt> via
-<tt>rcu_advance_cbs()</tt>, <tt>rcu_migrate_callbacks</tt>,
-<tt>rcutree_migrate_callbacks()</tt>, and <tt>takedown_cpu()</tt>,
-which in turn is invoked on a surviving CPU after the outgoing
-CPU has been completely offlined.
-
-<p>There are a few other code paths within grace-period processing
-that opportunistically invoke <tt>rcu_accelerate_cbs()</tt>.
-However, either way, all of the CPU's recently queued <tt>rcu_head</tt>
-structures are associated with a future grace-period number under
-the protection of the CPU's lead <tt>rcu_node</tt> structure's
-<tt>-&gt;lock</tt>.
-In all cases, there is full ordering against any prior critical section
-for that same <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>, and
-also full ordering against any of the current task's or CPU's prior critical
-sections for any <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>.
-
-<p>The next section will show how this ordering ensures that any
-accesses prior to the <tt>call_rcu()</tt> (particularly including phase
-one of the update)
-happen before the start of the corresponding grace period.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But what about <tt>synchronize_rcu()</tt>?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- The <tt>synchronize_rcu()</tt> passes <tt>call_rcu()</tt>
- to <tt>wait_rcu_gp()</tt>, which invokes it.
- So either way, it eventually comes down to <tt>call_rcu()</tt>.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h4><a name="Grace-Period Initialization">Grace-Period Initialization</a></h4>
-
-<p>Grace-period initialization is carried out by
-the grace-period kernel thread, which makes several passes over the
-<tt>rcu_node</tt> tree within the <tt>rcu_gp_init()</tt> function.
-This means that showing the full flow of ordering through the
-grace-period computation will require duplicating this tree.
-If you find this confusing, please note that the state of the
-<tt>rcu_node</tt> changes over time, just like Heraclitus's river.
-However, to keep the <tt>rcu_node</tt> river tractable, the
-grace-period kernel thread's traversals are presented in multiple
-parts, starting in this section with the various phases of
-grace-period initialization.
-
-<p>The first ordering-related grace-period initialization action is to
-advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
-grace-period-number counter, as shown below:
-
-</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
-
-<p>The actual increment is carried out using <tt>smp_store_release()</tt>,
-which helps reject false-positive RCU CPU stall detection.
-Note that only the root <tt>rcu_node</tt> structure is touched.
-
-<p>The first pass through the <tt>rcu_node</tt> tree updates bitmasks
-based on CPUs having come online or gone offline since the start of
-the previous grace period.
-In the common case where the number of online CPUs for this <tt>rcu_node</tt>
-structure has not transitioned to or from zero,
-this pass will scan only the leaf <tt>rcu_node</tt> structures.
-However, if the number of online CPUs for a given leaf <tt>rcu_node</tt>
-structure has transitioned from zero,
-<tt>rcu_init_new_rnp()</tt> will be invoked for the first incoming CPU.
-Similarly, if the number of online CPUs for a given leaf <tt>rcu_node</tt>
-structure has transitioned to zero,
-<tt>rcu_cleanup_dead_rnp()</tt> will be invoked for the last outgoing CPU.
-The diagram below shows the path of ordering if the leftmost
-<tt>rcu_node</tt> structure onlines its first CPU and if the next
-<tt>rcu_node</tt> structure has no online CPUs
-(or, alternatively if the leftmost <tt>rcu_node</tt> structure offlines
-its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
-
-</p><p><img src="TreeRCU-gp-init-2.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
-
-<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
-tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
-<tt>rcu_state</tt> structure, as shown in the following diagram.
-
-</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
-
-<p>This change will also cause each CPU's next call to
-<tt>__note_gp_changes()</tt>
-to notice that a new grace period has started, as described in the next
-section.
-But because the grace-period kthread started the grace period at the
-root (with the advancing of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
-the start of the grace period will happen after the actual start
-of the grace period.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But what about the CPU that started the grace period?
- Why wouldn't it see the start of the grace period right when
- it started that grace period?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- In some deep philosophical and overly anthromorphized
- sense, yes, the CPU starting the grace period is immediately
- aware of having done so.
- However, if we instead assume that RCU is not self-aware,
- then even the CPU starting the grace period does not really
- become aware of the start of this grace period until its
- first call to <tt>__note_gp_changes()</tt>.
- On the other hand, this CPU potentially gets early notification
- because it invokes <tt>__note_gp_changes()</tt> during its
- last <tt>rcu_gp_init()</tt> pass through its leaf
- <tt>rcu_node</tt> structure.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h4><a name="Self-Reported Quiescent States">
-Self-Reported Quiescent States</a></h4>
-
-<p>When all entities that might block the grace period have reported
-quiescent states (or as described in a later section, had quiescent
-states reported on their behalf), the grace period can end.
-Online non-idle CPUs report their own quiescent states, as shown
-in the following diagram:
-
-</p><p><img src="TreeRCU-qs.svg" alt="TreeRCU-qs.svg" width="75%">
-
-<p>This is for the last CPU to report a quiescent state, which signals
-the end of the grace period.
-Earlier quiescent states would push up the <tt>rcu_node</tt> tree
-only until they encountered an <tt>rcu_node</tt> structure that
-is waiting for additional quiescent states.
-However, ordering is nevertheless preserved because some later quiescent
-state will acquire that <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>.
-
-<p>Any number of events can lead up to a CPU invoking
-<tt>note_gp_changes</tt> (or alternatively, directly invoking
-<tt>__note_gp_changes()</tt>), at which point that CPU will notice
-the start of a new grace period while holding its leaf
-<tt>rcu_node</tt> lock.
-Therefore, all execution shown in this diagram happens after the
-start of the grace period.
-In addition, this CPU will consider any RCU read-side critical
-section that started before the invocation of <tt>__note_gp_changes()</tt>
-to have started before the grace period, and thus a critical
-section that the grace period must wait on.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But a RCU read-side critical section might have started
- after the beginning of the grace period
- (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
- the grace period wait on such a critical section?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- It is indeed not necessary for the grace period to wait on such
- a critical section.
- However, it is permissible to wait on it.
- And it is furthermore important to wait on it, as this
- lazy approach is far more scalable than a &ldquo;big bang&rdquo;
- all-at-once grace-period start could possibly be.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>If the CPU does a context switch, a quiescent state will be
-noted by <tt>rcu_node_context_switch()</tt> on the left.
-On the other hand, if the CPU takes a scheduler-clock interrupt
-while executing in usermode, a quiescent state will be noted by
-<tt>rcu_sched_clock_irq()</tt> on the right.
-Either way, the passage through a quiescent state will be noted
-in a per-CPU variable.
-
-<p>The next time an <tt>RCU_SOFTIRQ</tt> handler executes on
-this CPU (for example, after the next scheduler-clock
-interrupt), <tt>rcu_core()</tt> will invoke
-<tt>rcu_check_quiescent_state()</tt>, which will notice the
-recorded quiescent state, and invoke
-<tt>rcu_report_qs_rdp()</tt>.
-If <tt>rcu_report_qs_rdp()</tt> verifies that the quiescent state
-really does apply to the current grace period, it invokes
-<tt>rcu_report_rnp()</tt> which traverses up the <tt>rcu_node</tt>
-tree as shown at the bottom of the diagram, clearing bits from
-each <tt>rcu_node</tt> structure's <tt>-&gt;qsmask</tt> field,
-and propagating up the tree when the result is zero.
-
-<p>Note that traversal passes upwards out of a given <tt>rcu_node</tt>
-structure only if the current CPU is reporting the last quiescent
-state for the subtree headed by that <tt>rcu_node</tt> structure.
-A key point is that if a CPU's traversal stops at a given <tt>rcu_node</tt>
-structure, then there will be a later traversal by another CPU
-(or perhaps the same one) that proceeds upwards
-from that point, and the <tt>rcu_node</tt> <tt>-&gt;lock</tt>
-guarantees that the first CPU's quiescent state happens before the
-remainder of the second CPU's traversal.
-Applying this line of thought repeatedly shows that all CPUs'
-quiescent states happen before the last CPU traverses through
-the root <tt>rcu_node</tt> structure, the &ldquo;last CPU&rdquo;
-being the one that clears the last bit in the root <tt>rcu_node</tt>
-structure's <tt>-&gt;qsmask</tt> field.
-
-<h4><a name="Dynamic Tick Interface">Dynamic Tick Interface</a></h4>
-
-<p>Due to energy-efficiency considerations, RCU is forbidden from
-disturbing idle CPUs.
-CPUs are therefore required to notify RCU when entering or leaving idle
-state, which they do via fully ordered value-returning atomic operations
-on a per-CPU variable.
-The ordering effects are as shown below:
-
-</p><p><img src="TreeRCU-dyntick.svg" alt="TreeRCU-dyntick.svg" width="50%">
-
-<p>The RCU grace-period kernel thread samples the per-CPU idleness
-variable while holding the corresponding CPU's leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;lock</tt>.
-This means that any RCU read-side critical sections that precede the
-idle period (the oval near the top of the diagram above) will happen
-before the end of the current grace period.
-Similarly, the beginning of the current grace period will happen before
-any RCU read-side critical sections that follow the
-idle period (the oval near the bottom of the diagram above).
-
-<p>Plumbing this into the full grace-period execution is described
-<a href="#Forcing Quiescent States">below</a>.
-
-<h4><a name="CPU-Hotplug Interface">CPU-Hotplug Interface</a></h4>
-
-<p>RCU is also forbidden from disturbing offline CPUs, which might well
-be powered off and removed from the system completely.
-CPUs are therefore required to notify RCU of their comings and goings
-as part of the corresponding CPU hotplug operations.
-The ordering effects are shown below:
-
-</p><p><img src="TreeRCU-hotplug.svg" alt="TreeRCU-hotplug.svg" width="50%">
-
-<p>Because CPU hotplug operations are much less frequent than idle transitions,
-they are heavier weight, and thus acquire the CPU's leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;lock</tt> and update this structure's
-<tt>-&gt;qsmaskinitnext</tt>.
-The RCU grace-period kernel thread samples this mask to detect CPUs
-having gone offline since the beginning of this grace period.
-
-<p>Plumbing this into the full grace-period execution is described
-<a href="#Forcing Quiescent States">below</a>.
-
-<h4><a name="Forcing Quiescent States">Forcing Quiescent States</a></h4>
-
-<p>As noted above, idle and offline CPUs cannot report their own
-quiescent states, and therefore the grace-period kernel thread
-must do the reporting on their behalf.
-This process is called &ldquo;forcing quiescent states&rdquo;, it is
-repeated every few jiffies, and its ordering effects are shown below:
-
-</p><p><img src="TreeRCU-gp-fqs.svg" alt="TreeRCU-gp-fqs.svg" width="100%">
-
-<p>Each pass of quiescent state forcing is guaranteed to traverse the
-leaf <tt>rcu_node</tt> structures, and if there are no new quiescent
-states due to recently idled and/or offlined CPUs, then only the
-leaves are traversed.
-However, if there is a newly offlined CPU as illustrated on the left
-or a newly idled CPU as illustrated on the right, the corresponding
-quiescent state will be driven up towards the root.
-As with self-reported quiescent states, the upwards driving stops
-once it reaches an <tt>rcu_node</tt> structure that has quiescent
-states outstanding from other CPUs.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- The leftmost drive to root stopped before it reached
- the root <tt>rcu_node</tt> structure, which means that
- there are still CPUs subordinate to that structure on
- which the current grace period is waiting.
- Given that, how is it possible that the rightmost drive
- to root ended the grace period?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Good analysis!
- It is in fact impossible in the absence of bugs in RCU.
- But this diagram is complex enough as it is, so simplicity
- overrode accuracy.
- You can think of it as poetic license, or you can think of
- it as misdirection that is resolved in the
- <a href="#Putting It All Together">stitched-together diagram</a>.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
-
-<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
-advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
-The ordering effects are shown below:
-
-</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
-
-<p>As indicated by the oval at the bottom of the diagram, once
-grace-period cleanup is complete, the next grace period can begin.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But when precisely does the grace period end?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- There is no useful single point at which the grace period
- can be said to end.
- The earliest reasonable candidate is as soon as the last
- CPU has reported its quiescent state, but it may be some
- milliseconds before RCU becomes aware of this.
- The latest reasonable candidate is once the <tt>rcu_state</tt>
- structure's <tt>-&gt;gp_seq</tt> field has been updated,
- but it is quite possible that some CPUs have already completed
- phase two of their updates by that time.
- In short, if you are going to work with RCU, you need to
- learn to embrace uncertainty.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-
-<h4><a name="Callback Invocation">Callback Invocation</a></h4>
-
-<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
-invoking its RCU callbacks that were waiting for this grace period
-to end.
-These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
-which is usually invoked by <tt>__note_gp_changes()</tt>.
-As shown in the diagram below, this invocation can be triggered by
-the scheduling-clock interrupt (<tt>rcu_sched_clock_irq()</tt> on
-the left) or by idle entry (<tt>rcu_cleanup_after_idle()</tt> on
-the right, but only for kernels build with
-<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>).
-Either way, <tt>RCU_SOFTIRQ</tt> is raised, which results in
-<tt>rcu_do_batch()</tt> invoking the callbacks, which in turn
-allows those callbacks to carry out (either directly or indirectly
-via wakeup) the needed phase-two processing for each update.
-
-</p><p><img src="TreeRCU-callback-invocation.svg" alt="TreeRCU-callback-invocation.svg" width="60%">
-
-<p>Please note that callback invocation can also be prompted by any
-number of corner-case code paths, for example, when a CPU notes that
-it has excessive numbers of callbacks queued.
-In all cases, the CPU acquires its leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;lock</tt> before invoking callbacks, which preserves the
-required ordering against the newly completed grace period.
-
-<p>However, if the callback function communicates to other CPUs,
-for example, doing a wakeup, then it is that function's responsibility
-to maintain ordering.
-For example, if the callback function wakes up a task that runs on
-some other CPU, proper ordering must in place in both the callback
-function and the task being awakened.
-To see why this is important, consider the top half of the
-<a href="#Grace-Period Cleanup">grace-period cleanup</a> diagram.
-The callback might be running on a CPU corresponding to the leftmost
-leaf <tt>rcu_node</tt> structure, and awaken a task that is to run on
-a CPU corresponding to the rightmost leaf <tt>rcu_node</tt> structure,
-and the grace-period kernel thread might not yet have reached the
-rightmost leaf.
-In this case, the grace period's memory ordering might not yet have
-reached that CPU, so again the callback function and the awakened
-task must supply proper ordering.
-
-<h3><a name="Putting It All Together">Putting It All Together</a></h3>
-
-<p>A stitched-together diagram is
-<a href="Tree-RCU-Diagram.html">here</a>.
-
-<h3><a name="Legal Statement">
-Legal Statement</a></h3>
-
-<p>This work represents the view of the author and does not necessarily
-represent the view of IBM.
-
-</p><p>Linux is a registered trademark of Linus Torvalds.
-
-</p><p>Other company, product, and service names may be trademarks or
-service marks of others.
-
-</body></html>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
new file mode 100644
index 000000000000..1a8b129cfc04
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
@@ -0,0 +1,624 @@
+======================================================
+A Tour Through TREE_RCU's Grace-Period Memory Ordering
+======================================================
+
+August 8, 2017
+
+This article was contributed by Paul E.&nbsp;McKenney
+
+Introduction
+============
+
+This document gives a rough visual overview of how Tree RCU's
+grace-period memory ordering guarantee is provided.
+
+What Is Tree RCU's Grace Period Memory Ordering Guarantee?
+==========================================================
+
+RCU grace periods provide extremely strong memory-ordering guarantees
+for non-idle non-offline code.
+Any code that happens after the end of a given RCU grace period is guaranteed
+to see the effects of all accesses prior to the beginning of that grace
+period that are within RCU read-side critical sections.
+Similarly, any code that happens before the beginning of a given RCU grace
+period is guaranteed to see the effects of all accesses following the end
+of that grace period that are within RCU read-side critical sections.
+
+Note well that RCU-sched read-side critical sections include any region
+of code for which preemption is disabled.
+Given that each individual machine instruction can be thought of as
+an extremely small region of preemption-disabled code, one can think of
+``synchronize_rcu()`` as ``smp_mb()`` on steroids.
+
+RCU updaters use this guarantee by splitting their updates into
+two phases, one of which is executed before the grace period and
+the other of which is executed after the grace period.
+In the most common use case, phase one removes an element from
+a linked RCU-protected data structure, and phase two frees that element.
+For this to work, any readers that have witnessed state prior to the
+phase-one update (in the common case, removal) must not witness state
+following the phase-two update (in the common case, freeing).
+
+The RCU implementation provides this guarantee using a network
+of lock-based critical sections, memory barriers, and per-CPU
+processing, as is described in the following sections.
+
+Tree RCU Grace Period Memory Ordering Building Blocks
+=====================================================
+
+The workhorse for RCU's grace-period memory ordering is the
+critical section for the ``rcu_node`` structure's
+``-&gt;lock``. These critical sections use helper functions for lock
+acquisition, including ``raw_spin_lock_rcu_node()``,
+``raw_spin_lock_irq_rcu_node()``, and ``raw_spin_lock_irqsave_rcu_node()``.
+Their lock-release counterparts are ``raw_spin_unlock_rcu_node()``,
+``raw_spin_unlock_irq_rcu_node()``, and
+``raw_spin_unlock_irqrestore_rcu_node()``, respectively.
+For completeness, a ``raw_spin_trylock_rcu_node()`` is also provided.
+The key point is that the lock-acquisition functions, including
+``raw_spin_trylock_rcu_node()``, all invoke ``smp_mb__after_unlock_lock()``
+immediately after successful acquisition of the lock.
+
+Therefore, for any given ``rcu_node`` structure, any access
+happening before one of the above lock-release functions will be seen
+by all CPUs as happening before any access happening after a later
+one of the above lock-acquisition functions.
+Furthermore, any access happening before one of the
+above lock-release function on any given CPU will be seen by all
+CPUs as happening before any access happening after a later one
+of the above lock-acquisition functions executing on that same CPU,
+even if the lock-release and lock-acquisition functions are operating
+on different ``rcu_node`` structures.
+Tree RCU uses these two ordering guarantees to form an ordering
+network among all CPUs that were in any way involved in the grace
+period, including any CPUs that came online or went offline during
+the grace period in question.
+
+The following litmus test exhibits the ordering effects of these
+lock-acquisition and lock-release functions::
+
+ 1 int x, y, z;
+ 2
+ 3 void task0(void)
+ 4 {
+ 5 raw_spin_lock_rcu_node(rnp);
+ 6 WRITE_ONCE(x, 1);
+ 7 r1 = READ_ONCE(y);
+ 8 raw_spin_unlock_rcu_node(rnp);
+ 9 }
+ 10
+ 11 void task1(void)
+ 12 {
+ 13 raw_spin_lock_rcu_node(rnp);
+ 14 WRITE_ONCE(y, 1);
+ 15 r2 = READ_ONCE(z);
+ 16 raw_spin_unlock_rcu_node(rnp);
+ 17 }
+ 18
+ 19 void task2(void)
+ 20 {
+ 21 WRITE_ONCE(z, 1);
+ 22 smp_mb();
+ 23 r3 = READ_ONCE(x);
+ 24 }
+ 25
+ 26 WARN_ON(r1 == 0 &amp;&amp; r2 == 0 &amp;&amp; r3 == 0);
+
+The ``WARN_ON()`` is evaluated at &ldquo;the end of time&rdquo;,
+after all changes have propagated throughout the system.
+Without the ``smp_mb__after_unlock_lock()`` provided by the
+acquisition functions, this ``WARN_ON()`` could trigger, for example
+on PowerPC.
+The ``smp_mb__after_unlock_lock()`` invocations prevent this
+``WARN_ON()`` from triggering.
+
+This approach must be extended to include idle CPUs, which need
+RCU's grace-period memory ordering guarantee to extend to any
+RCU read-side critical sections preceding and following the current
+idle sojourn.
+This case is handled by calls to the strongly ordered
+``atomic_add_return()`` read-modify-write atomic operation that
+is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
+time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
+The grace-period kthread invokes ``rcu_dynticks_snap()`` and
+``rcu_dynticks_in_eqs_since()`` (both of which invoke
+an ``atomic_add_return()`` of zero) to detect idle CPUs.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But what about CPUs that remain offline for the entire grace period? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Such CPUs will be offline at the beginning of the grace period, so |
+| the grace period won't expect quiescent states from them. Races |
+| between grace-period start and CPU-hotplug operations are mediated |
+| by the CPU's leaf ``rcu_node`` structure's ``->lock`` as described |
+| above. |
++-----------------------------------------------------------------------+
+
+The approach must be extended to handle one final case, that of waking a
+task blocked in ``synchronize_rcu()``. This task might be affinitied to
+a CPU that is not yet aware that the grace period has ended, and thus
+might not yet be subject to the grace period's memory ordering.
+Therefore, there is an ``smp_mb()`` after the return from
+``wait_for_completion()`` in the ``synchronize_rcu()`` code path.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| What? Where??? I don't see any ``smp_mb()`` after the return from |
+| ``wait_for_completion()``!!! |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| That would be because I spotted the need for that ``smp_mb()`` during |
+| the creation of this documentation, and it is therefore unlikely to |
+| hit mainline before v4.14. Kudos to Lance Roy, Will Deacon, Peter |
+| Zijlstra, and Jonathan Cameron for asking questions that sensitized |
+| me to the rather elaborate sequence of events that demonstrate the |
+| need for this memory barrier. |
++-----------------------------------------------------------------------+
+
+Tree RCU's grace--period memory-ordering guarantees rely most heavily on
+the ``rcu_node`` structure's ``->lock`` field, so much so that it is
+necessary to abbreviate this pattern in the diagrams in the next
+section. For example, consider the ``rcu_prepare_for_idle()`` function
+shown below, which is one of several functions that enforce ordering of
+newly arrived RCU callbacks against future grace periods:
+
+::
+
+ 1 static void rcu_prepare_for_idle(void)
+ 2 {
+ 3 bool needwake;
+ 4 struct rcu_data *rdp;
+ 5 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ 6 struct rcu_node *rnp;
+ 7 struct rcu_state *rsp;
+ 8 int tne;
+ 9
+ 10 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
+ 11 rcu_is_nocb_cpu(smp_processor_id()))
+ 12 return;
+ 13 tne = READ_ONCE(tick_nohz_active);
+ 14 if (tne != rdtp->tick_nohz_enabled_snap) {
+ 15 if (rcu_cpu_has_callbacks(NULL))
+ 16 invoke_rcu_core();
+ 17 rdtp->tick_nohz_enabled_snap = tne;
+ 18 return;
+ 19 }
+ 20 if (!tne)
+ 21 return;
+ 22 if (rdtp->all_lazy &&
+ 23 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
+ 24 rdtp->all_lazy = false;
+ 25 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
+ 26 invoke_rcu_core();
+ 27 return;
+ 28 }
+ 29 if (rdtp->last_accelerate == jiffies)
+ 30 return;
+ 31 rdtp->last_accelerate = jiffies;
+ 32 for_each_rcu_flavor(rsp) {
+ 33 rdp = this_cpu_ptr(rsp->rda);
+ 34 if (rcu_segcblist_pend_cbs(&rdp->cblist))
+ 35 continue;
+ 36 rnp = rdp->mynode;
+ 37 raw_spin_lock_rcu_node(rnp);
+ 38 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+ 39 raw_spin_unlock_rcu_node(rnp);
+ 40 if (needwake)
+ 41 rcu_gp_kthread_wake(rsp);
+ 42 }
+ 43 }
+
+But the only part of ``rcu_prepare_for_idle()`` that really matters for
+this discussion are lines 37–39. We will therefore abbreviate this
+function as follows:
+
+.. kernel-figure:: rcu_node-lock.svg
+
+The box represents the ``rcu_node`` structure's ``->lock`` critical
+section, with the double line on top representing the additional
+``smp_mb__after_unlock_lock()``.
+
+Tree RCU Grace Period Memory Ordering Components
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tree RCU's grace-period memory-ordering guarantee is provided by a
+number of RCU components:
+
+#. `Callback Registry`_
+#. `Grace-Period Initialization`_
+#. `Self-Reported Quiescent States`_
+#. `Dynamic Tick Interface`_
+#. `CPU-Hotplug Interface`_
+#. `Forcing Quiescent States`_
+#. `Grace-Period Cleanup`_
+#. `Callback Invocation`_
+
+Each of the following section looks at the corresponding component in
+detail.
+
+Callback Registry
+^^^^^^^^^^^^^^^^^
+
+If RCU's grace-period guarantee is to mean anything at all, any access
+that happens before a given invocation of ``call_rcu()`` must also
+happen before the corresponding grace period. The implementation of this
+portion of RCU's grace period guarantee is shown in the following
+figure:
+
+.. kernel-figure:: TreeRCU-callback-registry.svg
+
+Because ``call_rcu()`` normally acts only on CPU-local state, it
+provides no ordering guarantees, either for itself or for phase one of
+the update (which again will usually be removal of an element from an
+RCU-protected data structure). It simply enqueues the ``rcu_head``
+structure on a per-CPU list, which cannot become associated with a grace
+period until a later call to ``rcu_accelerate_cbs()``, as shown in the
+diagram above.
+
+One set of code paths shown on the left invokes ``rcu_accelerate_cbs()``
+via ``note_gp_changes()``, either directly from ``call_rcu()`` (if the
+current CPU is inundated with queued ``rcu_head`` structures) or more
+likely from an ``RCU_SOFTIRQ`` handler. Another code path in the middle
+is taken only in kernels built with ``CONFIG_RCU_FAST_NO_HZ=y``, which
+invokes ``rcu_accelerate_cbs()`` via ``rcu_prepare_for_idle()``. The
+final code path on the right is taken only in kernels built with
+``CONFIG_HOTPLUG_CPU=y``, which invokes ``rcu_accelerate_cbs()`` via
+``rcu_advance_cbs()``, ``rcu_migrate_callbacks``,
+``rcutree_migrate_callbacks()``, and ``takedown_cpu()``, which in turn
+is invoked on a surviving CPU after the outgoing CPU has been completely
+offlined.
+
+There are a few other code paths within grace-period processing that
+opportunistically invoke ``rcu_accelerate_cbs()``. However, either way,
+all of the CPU's recently queued ``rcu_head`` structures are associated
+with a future grace-period number under the protection of the CPU's lead
+``rcu_node`` structure's ``->lock``. In all cases, there is full
+ordering against any prior critical section for that same ``rcu_node``
+structure's ``->lock``, and also full ordering against any of the
+current task's or CPU's prior critical sections for any ``rcu_node``
+structure's ``->lock``.
+
+The next section will show how this ordering ensures that any accesses
+prior to the ``call_rcu()`` (particularly including phase one of the
+update) happen before the start of the corresponding grace period.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But what about ``synchronize_rcu()``? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| The ``synchronize_rcu()`` passes ``call_rcu()`` to ``wait_rcu_gp()``, |
+| which invokes it. So either way, it eventually comes down to |
+| ``call_rcu()``. |
++-----------------------------------------------------------------------+
+
+Grace-Period Initialization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Grace-period initialization is carried out by the grace-period kernel
+thread, which makes several passes over the ``rcu_node`` tree within the
+``rcu_gp_init()`` function. This means that showing the full flow of
+ordering through the grace-period computation will require duplicating
+this tree. If you find this confusing, please note that the state of the
+``rcu_node`` changes over time, just like Heraclitus's river. However,
+to keep the ``rcu_node`` river tractable, the grace-period kernel
+thread's traversals are presented in multiple parts, starting in this
+section with the various phases of grace-period initialization.
+
+The first ordering-related grace-period initialization action is to
+advance the ``rcu_state`` structure's ``->gp_seq`` grace-period-number
+counter, as shown below:
+
+.. kernel-figure:: TreeRCU-gp-init-1.svg
+
+The actual increment is carried out using ``smp_store_release()``, which
+helps reject false-positive RCU CPU stall detection. Note that only the
+root ``rcu_node`` structure is touched.
+
+The first pass through the ``rcu_node`` tree updates bitmasks based on
+CPUs having come online or gone offline since the start of the previous
+grace period. In the common case where the number of online CPUs for
+this ``rcu_node`` structure has not transitioned to or from zero, this
+pass will scan only the leaf ``rcu_node`` structures. However, if the
+number of online CPUs for a given leaf ``rcu_node`` structure has
+transitioned from zero, ``rcu_init_new_rnp()`` will be invoked for the
+first incoming CPU. Similarly, if the number of online CPUs for a given
+leaf ``rcu_node`` structure has transitioned to zero,
+``rcu_cleanup_dead_rnp()`` will be invoked for the last outgoing CPU.
+The diagram below shows the path of ordering if the leftmost
+``rcu_node`` structure onlines its first CPU and if the next
+``rcu_node`` structure has no online CPUs (or, alternatively if the
+leftmost ``rcu_node`` structure offlines its last CPU and if the next
+``rcu_node`` structure has no online CPUs).
+
+.. kernel-figure:: TreeRCU-gp-init-1.svg
+
+The final ``rcu_gp_init()`` pass through the ``rcu_node`` tree traverses
+breadth-first, setting each ``rcu_node`` structure's ``->gp_seq`` field
+to the newly advanced value from the ``rcu_state`` structure, as shown
+in the following diagram.
+
+.. kernel-figure:: TreeRCU-gp-init-1.svg
+
+This change will also cause each CPU's next call to
+``__note_gp_changes()`` to notice that a new grace period has started,
+as described in the next section. But because the grace-period kthread
+started the grace period at the root (with the advancing of the
+``rcu_state`` structure's ``->gp_seq`` field) before setting each leaf
+``rcu_node`` structure's ``->gp_seq`` field, each CPU's observation of
+the start of the grace period will happen after the actual start of the
+grace period.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But what about the CPU that started the grace period? Why wouldn't it |
+| see the start of the grace period right when it started that grace |
+| period? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| In some deep philosophical and overly anthromorphized sense, yes, the |
+| CPU starting the grace period is immediately aware of having done so. |
+| However, if we instead assume that RCU is not self-aware, then even |
+| the CPU starting the grace period does not really become aware of the |
+| start of this grace period until its first call to |
+| ``__note_gp_changes()``. On the other hand, this CPU potentially gets |
+| early notification because it invokes ``__note_gp_changes()`` during |
+| its last ``rcu_gp_init()`` pass through its leaf ``rcu_node`` |
+| structure. |
++-----------------------------------------------------------------------+
+
+Self-Reported Quiescent States
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When all entities that might block the grace period have reported
+quiescent states (or as described in a later section, had quiescent
+states reported on their behalf), the grace period can end. Online
+non-idle CPUs report their own quiescent states, as shown in the
+following diagram:
+
+.. kernel-figure:: TreeRCU-qs.svg
+
+This is for the last CPU to report a quiescent state, which signals the
+end of the grace period. Earlier quiescent states would push up the
+``rcu_node`` tree only until they encountered an ``rcu_node`` structure
+that is waiting for additional quiescent states. However, ordering is
+nevertheless preserved because some later quiescent state will acquire
+that ``rcu_node`` structure's ``->lock``.
+
+Any number of events can lead up to a CPU invoking ``note_gp_changes``
+(or alternatively, directly invoking ``__note_gp_changes()``), at which
+point that CPU will notice the start of a new grace period while holding
+its leaf ``rcu_node`` lock. Therefore, all execution shown in this
+diagram happens after the start of the grace period. In addition, this
+CPU will consider any RCU read-side critical section that started before
+the invocation of ``__note_gp_changes()`` to have started before the
+grace period, and thus a critical section that the grace period must
+wait on.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But a RCU read-side critical section might have started after the |
+| beginning of the grace period (the advancing of ``->gp_seq`` from |
+| earlier), so why should the grace period wait on such a critical |
+| section? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| It is indeed not necessary for the grace period to wait on such a |
+| critical section. However, it is permissible to wait on it. And it is |
+| furthermore important to wait on it, as this lazy approach is far |
+| more scalable than a “big bang” all-at-once grace-period start could |
+| possibly be. |
++-----------------------------------------------------------------------+
+
+If the CPU does a context switch, a quiescent state will be noted by
+``rcu_note_context_switch()`` on the left. On the other hand, if the CPU
+takes a scheduler-clock interrupt while executing in usermode, a
+quiescent state will be noted by ``rcu_sched_clock_irq()`` on the right.
+Either way, the passage through a quiescent state will be noted in a
+per-CPU variable.
+
+The next time an ``RCU_SOFTIRQ`` handler executes on this CPU (for
+example, after the next scheduler-clock interrupt), ``rcu_core()`` will
+invoke ``rcu_check_quiescent_state()``, which will notice the recorded
+quiescent state, and invoke ``rcu_report_qs_rdp()``. If
+``rcu_report_qs_rdp()`` verifies that the quiescent state really does
+apply to the current grace period, it invokes ``rcu_report_rnp()`` which
+traverses up the ``rcu_node`` tree as shown at the bottom of the
+diagram, clearing bits from each ``rcu_node`` structure's ``->qsmask``
+field, and propagating up the tree when the result is zero.
+
+Note that traversal passes upwards out of a given ``rcu_node`` structure
+only if the current CPU is reporting the last quiescent state for the
+subtree headed by that ``rcu_node`` structure. A key point is that if a
+CPU's traversal stops at a given ``rcu_node`` structure, then there will
+be a later traversal by another CPU (or perhaps the same one) that
+proceeds upwards from that point, and the ``rcu_node`` ``->lock``
+guarantees that the first CPU's quiescent state happens before the
+remainder of the second CPU's traversal. Applying this line of thought
+repeatedly shows that all CPUs' quiescent states happen before the last
+CPU traverses through the root ``rcu_node`` structure, the “last CPU”
+being the one that clears the last bit in the root ``rcu_node``
+structure's ``->qsmask`` field.
+
+Dynamic Tick Interface
+^^^^^^^^^^^^^^^^^^^^^^
+
+Due to energy-efficiency considerations, RCU is forbidden from
+disturbing idle CPUs. CPUs are therefore required to notify RCU when
+entering or leaving idle state, which they do via fully ordered
+value-returning atomic operations on a per-CPU variable. The ordering
+effects are as shown below:
+
+.. kernel-figure:: TreeRCU-dyntick.svg
+
+The RCU grace-period kernel thread samples the per-CPU idleness variable
+while holding the corresponding CPU's leaf ``rcu_node`` structure's
+``->lock``. This means that any RCU read-side critical sections that
+precede the idle period (the oval near the top of the diagram above)
+will happen before the end of the current grace period. Similarly, the
+beginning of the current grace period will happen before any RCU
+read-side critical sections that follow the idle period (the oval near
+the bottom of the diagram above).
+
+Plumbing this into the full grace-period execution is described
+`below <#Forcing%20Quiescent%20States>`__.
+
+CPU-Hotplug Interface
+^^^^^^^^^^^^^^^^^^^^^
+
+RCU is also forbidden from disturbing offline CPUs, which might well be
+powered off and removed from the system completely. CPUs are therefore
+required to notify RCU of their comings and goings as part of the
+corresponding CPU hotplug operations. The ordering effects are shown
+below:
+
+.. kernel-figure:: TreeRCU-hotplug.svg
+
+Because CPU hotplug operations are much less frequent than idle
+transitions, they are heavier weight, and thus acquire the CPU's leaf
+``rcu_node`` structure's ``->lock`` and update this structure's
+``->qsmaskinitnext``. The RCU grace-period kernel thread samples this
+mask to detect CPUs having gone offline since the beginning of this
+grace period.
+
+Plumbing this into the full grace-period execution is described
+`below <#Forcing%20Quiescent%20States>`__.
+
+Forcing Quiescent States
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+As noted above, idle and offline CPUs cannot report their own quiescent
+states, and therefore the grace-period kernel thread must do the
+reporting on their behalf. This process is called “forcing quiescent
+states”, it is repeated every few jiffies, and its ordering effects are
+shown below:
+
+.. kernel-figure:: TreeRCU-gp-fqs.svg
+
+Each pass of quiescent state forcing is guaranteed to traverse the leaf
+``rcu_node`` structures, and if there are no new quiescent states due to
+recently idled and/or offlined CPUs, then only the leaves are traversed.
+However, if there is a newly offlined CPU as illustrated on the left or
+a newly idled CPU as illustrated on the right, the corresponding
+quiescent state will be driven up towards the root. As with
+self-reported quiescent states, the upwards driving stops once it
+reaches an ``rcu_node`` structure that has quiescent states outstanding
+from other CPUs.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| The leftmost drive to root stopped before it reached the root |
+| ``rcu_node`` structure, which means that there are still CPUs |
+| subordinate to that structure on which the current grace period is |
+| waiting. Given that, how is it possible that the rightmost drive to |
+| root ended the grace period? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Good analysis! It is in fact impossible in the absence of bugs in |
+| RCU. But this diagram is complex enough as it is, so simplicity |
+| overrode accuracy. You can think of it as poetic license, or you can |
+| think of it as misdirection that is resolved in the |
+| `stitched-together diagram <#Putting%20It%20All%20Together>`__. |
++-----------------------------------------------------------------------+
+
+Grace-Period Cleanup
+^^^^^^^^^^^^^^^^^^^^
+
+Grace-period cleanup first scans the ``rcu_node`` tree breadth-first
+advancing all the ``->gp_seq`` fields, then it advances the
+``rcu_state`` structure's ``->gp_seq`` field. The ordering effects are
+shown below:
+
+.. kernel-figure:: TreeRCU-gp-cleanup.svg
+
+As indicated by the oval at the bottom of the diagram, once grace-period
+cleanup is complete, the next grace period can begin.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But when precisely does the grace period end? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| There is no useful single point at which the grace period can be said |
+| to end. The earliest reasonable candidate is as soon as the last CPU |
+| has reported its quiescent state, but it may be some milliseconds |
+| before RCU becomes aware of this. The latest reasonable candidate is |
+| once the ``rcu_state`` structure's ``->gp_seq`` field has been |
+| updated, but it is quite possible that some CPUs have already |
+| completed phase two of their updates by that time. In short, if you |
+| are going to work with RCU, you need to learn to embrace uncertainty. |
++-----------------------------------------------------------------------+
+
+Callback Invocation
+^^^^^^^^^^^^^^^^^^^
+
+Once a given CPU's leaf ``rcu_node`` structure's ``->gp_seq`` field has
+been updated, that CPU can begin invoking its RCU callbacks that were
+waiting for this grace period to end. These callbacks are identified by
+``rcu_advance_cbs()``, which is usually invoked by
+``__note_gp_changes()``. As shown in the diagram below, this invocation
+can be triggered by the scheduling-clock interrupt
+(``rcu_sched_clock_irq()`` on the left) or by idle entry
+(``rcu_cleanup_after_idle()`` on the right, but only for kernels build
+with ``CONFIG_RCU_FAST_NO_HZ=y``). Either way, ``RCU_SOFTIRQ`` is
+raised, which results in ``rcu_do_batch()`` invoking the callbacks,
+which in turn allows those callbacks to carry out (either directly or
+indirectly via wakeup) the needed phase-two processing for each update.
+
+.. kernel-figure:: TreeRCU-callback-invocation.svg
+
+Please note that callback invocation can also be prompted by any number
+of corner-case code paths, for example, when a CPU notes that it has
+excessive numbers of callbacks queued. In all cases, the CPU acquires
+its leaf ``rcu_node`` structure's ``->lock`` before invoking callbacks,
+which preserves the required ordering against the newly completed grace
+period.
+
+However, if the callback function communicates to other CPUs, for
+example, doing a wakeup, then it is that function's responsibility to
+maintain ordering. For example, if the callback function wakes up a task
+that runs on some other CPU, proper ordering must in place in both the
+callback function and the task being awakened. To see why this is
+important, consider the top half of the `grace-period
+cleanup <#Grace-Period%20Cleanup>`__ diagram. The callback might be
+running on a CPU corresponding to the leftmost leaf ``rcu_node``
+structure, and awaken a task that is to run on a CPU corresponding to
+the rightmost leaf ``rcu_node`` structure, and the grace-period kernel
+thread might not yet have reached the rightmost leaf. In this case, the
+grace period's memory ordering might not yet have reached that CPU, so
+again the callback function and the awakened task must supply proper
+ordering.
+
+Putting It All Together
+~~~~~~~~~~~~~~~~~~~~~~~
+
+A stitched-together diagram is here:
+
+.. kernel-figure:: TreeRCU-gp.svg
+
+Legal Statement
+~~~~~~~~~~~~~~~
+
+This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+Linux is a registered trademark of Linus Torvalds.
+
+Other company, product, and service names may be trademarks or service
+marks of others.
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index 2bcd742d6e49..069f6f8371c2 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -3880,7 +3880,7 @@
font-style="normal"
y="-4418.6582"
x="3745.7725"
- xml:space="preserve">rcu_node_context_switch()</text>
+ xml:space="preserve">rcu_note_context_switch()</text>
</g>
<g
transform="translate(1881.1886,54048.57)"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index 779c9ac31a52..7d6c5f7e505c 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -753,7 +753,7 @@
font-style="normal"
y="-4418.6582"
x="3745.7725"
- xml:space="preserve">rcu_node_context_switch()</text>
+ xml:space="preserve">rcu_note_context_switch()</text>
</g>
<g
transform="translate(3131.2648,-585.6713)"
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
deleted file mode 100644
index 467251f7fef6..000000000000
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ /dev/null
@@ -1,3401 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
- <html>
- <head><title>A Tour Through RCU's Requirements [LWN.net]</title>
- <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=utf-8">
-
-<h1>A Tour Through RCU's Requirements</h1>
-
-<p>Copyright IBM Corporation, 2015</p>
-<p>Author: Paul E.&nbsp;McKenney</p>
-<p><i>The initial version of this document appeared in the
-<a href="https://lwn.net/">LWN</a> articles
-<a href="https://lwn.net/Articles/652156/">here</a>,
-<a href="https://lwn.net/Articles/652677/">here</a>, and
-<a href="https://lwn.net/Articles/653326/">here</a>.</i></p>
-
-<h2>Introduction</h2>
-
-<p>
-Read-copy update (RCU) is a synchronization mechanism that is often
-used as a replacement for reader-writer locking.
-RCU is unusual in that updaters do not block readers,
-which means that RCU's read-side primitives can be exceedingly fast
-and scalable.
-In addition, updaters can make useful forward progress concurrently
-with readers.
-However, all this concurrency between RCU readers and updaters does raise
-the question of exactly what RCU readers are doing, which in turn
-raises the question of exactly what RCU's requirements are.
-
-<p>
-This document therefore summarizes RCU's requirements, and can be thought
-of as an informal, high-level specification for RCU.
-It is important to understand that RCU's specification is primarily
-empirical in nature;
-in fact, I learned about many of these requirements the hard way.
-This situation might cause some consternation, however, not only
-has this learning process been a lot of fun, but it has also been
-a great privilege to work with so many people willing to apply
-technologies in interesting new ways.
-
-<p>
-All that aside, here are the categories of currently known RCU requirements:
-</p>
-
-<ol>
-<li> <a href="#Fundamental Requirements">
- Fundamental Requirements</a>
-<li> <a href="#Fundamental Non-Requirements">Fundamental Non-Requirements</a>
-<li> <a href="#Parallelism Facts of Life">
- Parallelism Facts of Life</a>
-<li> <a href="#Quality-of-Implementation Requirements">
- Quality-of-Implementation Requirements</a>
-<li> <a href="#Linux Kernel Complications">
- Linux Kernel Complications</a>
-<li> <a href="#Software-Engineering Requirements">
- Software-Engineering Requirements</a>
-<li> <a href="#Other RCU Flavors">
- Other RCU Flavors</a>
-<li> <a href="#Possible Future Changes">
- Possible Future Changes</a>
-</ol>
-
-<p>
-This is followed by a <a href="#Summary">summary</a>,
-however, the answers to each quick quiz immediately follows the quiz.
-Select the big white space with your mouse to see the answer.
-
-<h2><a name="Fundamental Requirements">Fundamental Requirements</a></h2>
-
-<p>
-RCU's fundamental requirements are the closest thing RCU has to hard
-mathematical requirements.
-These are:
-
-<ol>
-<li> <a href="#Grace-Period Guarantee">
- Grace-Period Guarantee</a>
-<li> <a href="#Publish-Subscribe Guarantee">
- Publish-Subscribe Guarantee</a>
-<li> <a href="#Memory-Barrier Guarantees">
- Memory-Barrier Guarantees</a>
-<li> <a href="#RCU Primitives Guaranteed to Execute Unconditionally">
- RCU Primitives Guaranteed to Execute Unconditionally</a>
-<li> <a href="#Guaranteed Read-to-Write Upgrade">
- Guaranteed Read-to-Write Upgrade</a>
-</ol>
-
-<h3><a name="Grace-Period Guarantee">Grace-Period Guarantee</a></h3>
-
-<p>
-RCU's grace-period guarantee is unusual in being premeditated:
-Jack Slingwine and I had this guarantee firmly in mind when we started
-work on RCU (then called &ldquo;rclock&rdquo;) in the early 1990s.
-That said, the past two decades of experience with RCU have produced
-a much more detailed understanding of this guarantee.
-
-<p>
-RCU's grace-period guarantee allows updaters to wait for the completion
-of all pre-existing RCU read-side critical sections.
-An RCU read-side critical section
-begins with the marker <tt>rcu_read_lock()</tt> and ends with
-the marker <tt>rcu_read_unlock()</tt>.
-These markers may be nested, and RCU treats a nested set as one
-big RCU read-side critical section.
-Production-quality implementations of <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> are extremely lightweight, and in
-fact have exactly zero overhead in Linux kernels built for production
-use with <tt>CONFIG_PREEMPT=n</tt>.
-
-<p>
-This guarantee allows ordering to be enforced with extremely low
-overhead to readers, for example:
-
-<blockquote>
-<pre>
- 1 int x, y;
- 2
- 3 void thread0(void)
- 4 {
- 5 rcu_read_lock();
- 6 r1 = READ_ONCE(x);
- 7 r2 = READ_ONCE(y);
- 8 rcu_read_unlock();
- 9 }
-10
-11 void thread1(void)
-12 {
-13 WRITE_ONCE(x, 1);
-14 synchronize_rcu();
-15 WRITE_ONCE(y, 1);
-16 }
-</pre>
-</blockquote>
-
-<p>
-Because the <tt>synchronize_rcu()</tt> on line&nbsp;14 waits for
-all pre-existing readers, any instance of <tt>thread0()</tt> that
-loads a value of zero from <tt>x</tt> must complete before
-<tt>thread1()</tt> stores to <tt>y</tt>, so that instance must
-also load a value of zero from <tt>y</tt>.
-Similarly, any instance of <tt>thread0()</tt> that loads a value of
-one from <tt>y</tt> must have started after the
-<tt>synchronize_rcu()</tt> started, and must therefore also load
-a value of one from <tt>x</tt>.
-Therefore, the outcome:
-<blockquote>
-<pre>
-(r1 == 0 &amp;&amp; r2 == 1)
-</pre>
-</blockquote>
-cannot happen.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Wait a minute!
- You said that updaters can make useful forward progress concurrently
- with readers, but pre-existing readers will block
- <tt>synchronize_rcu()</tt>!!!
- Just who are you trying to fool???
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- First, if updaters do not wish to be blocked by readers, they can use
- <tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
- be discussed later.
- Second, even when using <tt>synchronize_rcu()</tt>, the other
- update-side code does run concurrently with readers, whether
- pre-existing or not.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-This scenario resembles one of the first uses of RCU in
-<a href="https://en.wikipedia.org/wiki/DYNIX">DYNIX/ptx</a>,
-which managed a distributed lock manager's transition into
-a state suitable for handling recovery from node failure,
-more or less as follows:
-
-<blockquote>
-<pre>
- 1 #define STATE_NORMAL 0
- 2 #define STATE_WANT_RECOVERY 1
- 3 #define STATE_RECOVERING 2
- 4 #define STATE_WANT_NORMAL 3
- 5
- 6 int state = STATE_NORMAL;
- 7
- 8 void do_something_dlm(void)
- 9 {
-10 int state_snap;
-11
-12 rcu_read_lock();
-13 state_snap = READ_ONCE(state);
-14 if (state_snap == STATE_NORMAL)
-15 do_something();
-16 else
-17 do_something_carefully();
-18 rcu_read_unlock();
-19 }
-20
-21 void start_recovery(void)
-22 {
-23 WRITE_ONCE(state, STATE_WANT_RECOVERY);
-24 synchronize_rcu();
-25 WRITE_ONCE(state, STATE_RECOVERING);
-26 recovery();
-27 WRITE_ONCE(state, STATE_WANT_NORMAL);
-28 synchronize_rcu();
-29 WRITE_ONCE(state, STATE_NORMAL);
-30 }
-</pre>
-</blockquote>
-
-<p>
-The RCU read-side critical section in <tt>do_something_dlm()</tt>
-works with the <tt>synchronize_rcu()</tt> in <tt>start_recovery()</tt>
-to guarantee that <tt>do_something()</tt> never runs concurrently
-with <tt>recovery()</tt>, but with little or no synchronization
-overhead in <tt>do_something_dlm()</tt>.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Without that extra grace period, memory reordering could result in
- <tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
- concurrently with the last bits of <tt>recovery()</tt>.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-In order to avoid fatal problems such as deadlocks,
-an RCU read-side critical section must not contain calls to
-<tt>synchronize_rcu()</tt>.
-Similarly, an RCU read-side critical section must not
-contain anything that waits, directly or indirectly, on completion of
-an invocation of <tt>synchronize_rcu()</tt>.
-
-<p>
-Although RCU's grace-period guarantee is useful in and of itself, with
-<a href="https://lwn.net/Articles/573497/">quite a few use cases</a>,
-it would be good to be able to use RCU to coordinate read-side
-access to linked data structures.
-For this, the grace-period guarantee is not sufficient, as can
-be seen in function <tt>add_gp_buggy()</tt> below.
-We will look at the reader's code later, but in the meantime, just think of
-the reader as locklessly picking up the <tt>gp</tt> pointer,
-and, if the value loaded is non-<tt>NULL</tt>, locklessly accessing the
-<tt>-&gt;a</tt> and <tt>-&gt;b</tt> fields.
-
-<blockquote>
-<pre>
- 1 bool add_gp_buggy(int a, int b)
- 2 {
- 3 p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4 if (!p)
- 5 return -ENOMEM;
- 6 spin_lock(&amp;gp_lock);
- 7 if (rcu_access_pointer(gp)) {
- 8 spin_unlock(&amp;gp_lock);
- 9 return false;
-10 }
-11 p-&gt;a = a;
-12 p-&gt;b = a;
-13 gp = p; /* ORDERING BUG */
-14 spin_unlock(&amp;gp_lock);
-15 return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-The problem is that both the compiler and weakly ordered CPUs are within
-their rights to reorder this code as follows:
-
-<blockquote>
-<pre>
- 1 bool add_gp_buggy_optimized(int a, int b)
- 2 {
- 3 p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4 if (!p)
- 5 return -ENOMEM;
- 6 spin_lock(&amp;gp_lock);
- 7 if (rcu_access_pointer(gp)) {
- 8 spin_unlock(&amp;gp_lock);
- 9 return false;
-10 }
-<b>11 gp = p; /* ORDERING BUG */
-12 p-&gt;a = a;
-13 p-&gt;b = a;</b>
-14 spin_unlock(&amp;gp_lock);
-15 return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-If an RCU reader fetches <tt>gp</tt> just after
-<tt>add_gp_buggy_optimized</tt> executes line&nbsp;11,
-it will see garbage in the <tt>-&gt;a</tt> and <tt>-&gt;b</tt>
-fields.
-And this is but one of many ways in which compiler and hardware optimizations
-could cause trouble.
-Therefore, we clearly need some way to prevent the compiler and the CPU from
-reordering in this manner, which brings us to the publish-subscribe
-guarantee discussed in the next section.
-
-<h3><a name="Publish-Subscribe Guarantee">Publish/Subscribe Guarantee</a></h3>
-
-<p>
-RCU's publish-subscribe guarantee allows data to be inserted
-into a linked data structure without disrupting RCU readers.
-The updater uses <tt>rcu_assign_pointer()</tt> to insert the
-new data, and readers use <tt>rcu_dereference()</tt> to
-access data, whether new or old.
-The following shows an example of insertion:
-
-<blockquote>
-<pre>
- 1 bool add_gp(int a, int b)
- 2 {
- 3 p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4 if (!p)
- 5 return -ENOMEM;
- 6 spin_lock(&amp;gp_lock);
- 7 if (rcu_access_pointer(gp)) {
- 8 spin_unlock(&amp;gp_lock);
- 9 return false;
-10 }
-11 p-&gt;a = a;
-12 p-&gt;b = a;
-13 rcu_assign_pointer(gp, p);
-14 spin_unlock(&amp;gp_lock);
-15 return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-The <tt>rcu_assign_pointer()</tt> on line&nbsp;13 is conceptually
-equivalent to a simple assignment statement, but also guarantees
-that its assignment will
-happen after the two assignments in lines&nbsp;11 and&nbsp;12,
-similar to the C11 <tt>memory_order_release</tt> store operation.
-It also prevents any number of &ldquo;interesting&rdquo; compiler
-optimizations, for example, the use of <tt>gp</tt> as a scratch
-location immediately preceding the assignment.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
- two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
- from being reordered.
- Can't that also cause problems?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- No, it cannot.
- The readers cannot see either of these two fields until
- the assignment to <tt>gp</tt>, by which time both fields are
- fully initialized.
- So reordering the assignments
- to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
- cause any problems.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-It is tempting to assume that the reader need not do anything special
-to control its accesses to the RCU-protected data,
-as shown in <tt>do_something_gp_buggy()</tt> below:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp_buggy(void)
- 2 {
- 3 rcu_read_lock();
- 4 p = gp; /* OPTIMIZATIONS GALORE!!! */
- 5 if (p) {
- 6 do_something(p-&gt;a, p-&gt;b);
- 7 rcu_read_unlock();
- 8 return true;
- 9 }
-10 rcu_read_unlock();
-11 return false;
-12 }
-</pre>
-</blockquote>
-
-<p>
-However, this temptation must be resisted because there are a
-surprisingly large number of ways that the compiler
-(to say nothing of
-<a href="https://h71000.www7.hp.com/wizard/wiz_2637.html">DEC Alpha CPUs</a>)
-can trip this code up.
-For but one example, if the compiler were short of registers, it
-might choose to refetch from <tt>gp</tt> rather than keeping
-a separate copy in <tt>p</tt> as follows:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp_buggy_optimized(void)
- 2 {
- 3 rcu_read_lock();
- 4 if (gp) { /* OPTIMIZATIONS GALORE!!! */
-<b> 5 do_something(gp-&gt;a, gp-&gt;b);</b>
- 6 rcu_read_unlock();
- 7 return true;
- 8 }
- 9 rcu_read_unlock();
-10 return false;
-11 }
-</pre>
-</blockquote>
-
-<p>
-If this function ran concurrently with a series of updates that
-replaced the current structure with a new one,
-the fetches of <tt>gp-&gt;a</tt>
-and <tt>gp-&gt;b</tt> might well come from two different structures,
-which could cause serious confusion.
-To prevent this (and much else besides), <tt>do_something_gp()</tt> uses
-<tt>rcu_dereference()</tt> to fetch from <tt>gp</tt>:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp(void)
- 2 {
- 3 rcu_read_lock();
- 4 p = rcu_dereference(gp);
- 5 if (p) {
- 6 do_something(p-&gt;a, p-&gt;b);
- 7 rcu_read_unlock();
- 8 return true;
- 9 }
-10 rcu_read_unlock();
-11 return false;
-12 }
-</pre>
-</blockquote>
-
-<p>
-The <tt>rcu_dereference()</tt> uses volatile casts and (for DEC Alpha)
-memory barriers in the Linux kernel.
-Should a
-<a href="http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf">high-quality implementation of C11 <tt>memory_order_consume</tt> [PDF]</a>
-ever appear, then <tt>rcu_dereference()</tt> could be implemented
-as a <tt>memory_order_consume</tt> load.
-Regardless of the exact implementation, a pointer fetched by
-<tt>rcu_dereference()</tt> may not be used outside of the
-outermost RCU read-side critical section containing that
-<tt>rcu_dereference()</tt>, unless protection of
-the corresponding data element has been passed from RCU to some
-other synchronization mechanism, most commonly locking or
-<a href="https://www.kernel.org/doc/Documentation/RCU/rcuref.txt">reference counting</a>.
-
-<p>
-In short, updaters use <tt>rcu_assign_pointer()</tt> and readers
-use <tt>rcu_dereference()</tt>, and these two RCU API elements
-work together to ensure that readers have a consistent view of
-newly added data elements.
-
-<p>
-Of course, it is also necessary to remove elements from RCU-protected
-data structures, for example, using the following process:
-
-<ol>
-<li> Remove the data element from the enclosing structure.
-<li> Wait for all pre-existing RCU read-side critical sections
- to complete (because only pre-existing readers can possibly have
- a reference to the newly removed data element).
-<li> At this point, only the updater has a reference to the
- newly removed data element, so it can safely reclaim
- the data element, for example, by passing it to <tt>kfree()</tt>.
-</ol>
-
-This process is implemented by <tt>remove_gp_synchronous()</tt>:
-
-<blockquote>
-<pre>
- 1 bool remove_gp_synchronous(void)
- 2 {
- 3 struct foo *p;
- 4
- 5 spin_lock(&amp;gp_lock);
- 6 p = rcu_access_pointer(gp);
- 7 if (!p) {
- 8 spin_unlock(&amp;gp_lock);
- 9 return false;
-10 }
-11 rcu_assign_pointer(gp, NULL);
-12 spin_unlock(&amp;gp_lock);
-13 synchronize_rcu();
-14 kfree(p);
-15 return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-This function is straightforward, with line&nbsp;13 waiting for a grace
-period before line&nbsp;14 frees the old data element.
-This waiting ensures that readers will reach line&nbsp;7 of
-<tt>do_something_gp()</tt> before the data element referenced by
-<tt>p</tt> is freed.
-The <tt>rcu_access_pointer()</tt> on line&nbsp;6 is similar to
-<tt>rcu_dereference()</tt>, except that:
-
-<ol>
-<li> The value returned by <tt>rcu_access_pointer()</tt>
- cannot be dereferenced.
- If you want to access the value pointed to as well as
- the pointer itself, use <tt>rcu_dereference()</tt>
- instead of <tt>rcu_access_pointer()</tt>.
-<li> The call to <tt>rcu_access_pointer()</tt> need not be
- protected.
- In contrast, <tt>rcu_dereference()</tt> must either be
- within an RCU read-side critical section or in a code
- segment where the pointer cannot change, for example, in
- code protected by the corresponding update-side lock.
-</ol>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Without the <tt>rcu_dereference()</tt> or the
- <tt>rcu_access_pointer()</tt>, what destructive optimizations
- might the compiler make use of?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Let's start with what happens to <tt>do_something_gp()</tt>
- if it fails to use <tt>rcu_dereference()</tt>.
- It could reuse a value formerly fetched from this same pointer.
- It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
- manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
- mash-up of two distinct pointer values.
- It might even use value-speculation optimizations, where it makes
- a wrong guess, but by the time it gets around to checking the
- value, an update has changed the pointer to match the wrong guess.
- Too bad about any dereferences that returned pre-initialization garbage
- in the meantime!
- </font>
-
- <p><font color="ffffff">
- For <tt>remove_gp_synchronous()</tt>, as long as all modifications
- to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
- the above optimizations are harmless.
- However, <tt>sparse</tt> will complain if you
- define <tt>gp</tt> with <tt>__rcu</tt> and then
- access it without using
- either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-In short, RCU's publish-subscribe guarantee is provided by the combination
-of <tt>rcu_assign_pointer()</tt> and <tt>rcu_dereference()</tt>.
-This guarantee allows data elements to be safely added to RCU-protected
-linked data structures without disrupting RCU readers.
-This guarantee can be used in combination with the grace-period
-guarantee to also allow data elements to be removed from RCU-protected
-linked data structures, again without disrupting RCU readers.
-
-<p>
-This guarantee was only partially premeditated.
-DYNIX/ptx used an explicit memory barrier for publication, but had nothing
-resembling <tt>rcu_dereference()</tt> for subscription, nor did it
-have anything resembling the <tt>smp_read_barrier_depends()</tt>
-that was later subsumed into <tt>rcu_dereference()</tt> and later
-still into <tt>READ_ONCE()</tt>.
-The need for these operations made itself known quite suddenly at a
-late-1990s meeting with the DEC Alpha architects, back in the days when
-DEC was still a free-standing company.
-It took the Alpha architects a good hour to convince me that any sort
-of barrier would ever be needed, and it then took me a good <i>two</i> hours
-to convince them that their documentation did not make this point clear.
-More recent work with the C and C++ standards committees have provided
-much education on tricks and traps from the compiler.
-In short, compilers were much less tricky in the early 1990s, but in
-2015, don't even think about omitting <tt>rcu_dereference()</tt>!
-
-<h3><a name="Memory-Barrier Guarantees">Memory-Barrier Guarantees</a></h3>
-
-<p>
-The previous section's simple linked-data-structure scenario clearly
-demonstrates the need for RCU's stringent memory-ordering guarantees on
-systems with more than one CPU:
-
-<ol>
-<li> Each CPU that has an RCU read-side critical section that
- begins before <tt>synchronize_rcu()</tt> starts is
- guaranteed to execute a full memory barrier between the time
- that the RCU read-side critical section ends and the time that
- <tt>synchronize_rcu()</tt> returns.
- Without this guarantee, a pre-existing RCU read-side critical section
- might hold a reference to the newly removed <tt>struct foo</tt>
- after the <tt>kfree()</tt> on line&nbsp;14 of
- <tt>remove_gp_synchronous()</tt>.
-<li> Each CPU that has an RCU read-side critical section that ends
- after <tt>synchronize_rcu()</tt> returns is guaranteed
- to execute a full memory barrier between the time that
- <tt>synchronize_rcu()</tt> begins and the time that the RCU
- read-side critical section begins.
- Without this guarantee, a later RCU read-side critical section
- running after the <tt>kfree()</tt> on line&nbsp;14 of
- <tt>remove_gp_synchronous()</tt> might
- later run <tt>do_something_gp()</tt> and find the
- newly deleted <tt>struct foo</tt>.
-<li> If the task invoking <tt>synchronize_rcu()</tt> remains
- on a given CPU, then that CPU is guaranteed to execute a full
- memory barrier sometime during the execution of
- <tt>synchronize_rcu()</tt>.
- This guarantee ensures that the <tt>kfree()</tt> on
- line&nbsp;14 of <tt>remove_gp_synchronous()</tt> really does
- execute after the removal on line&nbsp;11.
-<li> If the task invoking <tt>synchronize_rcu()</tt> migrates
- among a group of CPUs during that invocation, then each of the
- CPUs in that group is guaranteed to execute a full memory barrier
- sometime during the execution of <tt>synchronize_rcu()</tt>.
- This guarantee also ensures that the <tt>kfree()</tt> on
- line&nbsp;14 of <tt>remove_gp_synchronous()</tt> really does
- execute after the removal on
- line&nbsp;11, but also in the case where the thread executing the
- <tt>synchronize_rcu()</tt> migrates in the meantime.
-</ol>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Given that multiple CPUs can start RCU read-side critical sections
- at any time without any ordering whatsoever, how can RCU possibly
- tell whether or not a given RCU read-side critical section starts
- before a given instance of <tt>synchronize_rcu()</tt>?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- If RCU cannot tell whether or not a given
- RCU read-side critical section starts before a
- given instance of <tt>synchronize_rcu()</tt>,
- then it must assume that the RCU read-side critical section
- started first.
- In other words, a given instance of <tt>synchronize_rcu()</tt>
- can avoid waiting on a given RCU read-side critical section only
- if it can prove that <tt>synchronize_rcu()</tt> started first.
- </font>
-
- <p><font color="ffffff">
- A related question is &ldquo;When <tt>rcu_read_lock()</tt>
- doesn't generate any code, why does it matter how it relates
- to a grace period?&rdquo;
- The answer is that it is not the relationship of
- <tt>rcu_read_lock()</tt> itself that is important, but rather
- the relationship of the code within the enclosed RCU read-side
- critical section to the code preceding and following the
- grace period.
- If we take this viewpoint, then a given RCU read-side critical
- section begins before a given grace period when some access
- preceding the grace period observes the effect of some access
- within the critical section, in which case none of the accesses
- within the critical section may observe the effects of any
- access following the grace period.
- </font>
-
- <p><font color="ffffff">
- As of late 2016, mathematical models of RCU take this
- viewpoint, for example, see slides&nbsp;62 and&nbsp;63
- of the
- <a href="http://www2.rdrop.com/users/paulmck/scalability/paper/LinuxMM.2016.10.04c.LCE.pdf">2016 LinuxCon EU</a>
- presentation.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- The first and second guarantees require unbelievably strict ordering!
- Are all these memory barriers <i> really</i> required?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Yes, they really are required.
- To see why the first guarantee is required, consider the following
- sequence of events:
- </font>
-
- <ol>
- <li> <font color="ffffff">
- CPU 1: <tt>rcu_read_lock()</tt>
- </font>
- <li> <font color="ffffff">
- CPU 1: <tt>q = rcu_dereference(gp);
- /* Very likely to return p. */</tt>
- </font>
- <li> <font color="ffffff">
- CPU 0: <tt>list_del_rcu(p);</tt>
- </font>
- <li> <font color="ffffff">
- CPU 0: <tt>synchronize_rcu()</tt> starts.
- </font>
- <li> <font color="ffffff">
- CPU 1: <tt>do_something_with(q-&gt;a);
- /* No smp_mb(), so might happen after kfree(). */</tt>
- </font>
- <li> <font color="ffffff">
- CPU 1: <tt>rcu_read_unlock()</tt>
- </font>
- <li> <font color="ffffff">
- CPU 0: <tt>synchronize_rcu()</tt> returns.
- </font>
- <li> <font color="ffffff">
- CPU 0: <tt>kfree(p);</tt>
- </font>
- </ol>
-
- <p><font color="ffffff">
- Therefore, there absolutely must be a full memory barrier between the
- end of the RCU read-side critical section and the end of the
- grace period.
- </font>
-
- <p><font color="ffffff">
- The sequence of events demonstrating the necessity of the second rule
- is roughly similar:
- </font>
-
- <ol>
- <li> <font color="ffffff">CPU 0: <tt>list_del_rcu(p);</tt>
- </font>
- <li> <font color="ffffff">CPU 0: <tt>synchronize_rcu()</tt> starts.
- </font>
- <li> <font color="ffffff">CPU 1: <tt>rcu_read_lock()</tt>
- </font>
- <li> <font color="ffffff">CPU 1: <tt>q = rcu_dereference(gp);
- /* Might return p if no memory barrier. */</tt>
- </font>
- <li> <font color="ffffff">CPU 0: <tt>synchronize_rcu()</tt> returns.
- </font>
- <li> <font color="ffffff">CPU 0: <tt>kfree(p);</tt>
- </font>
- <li> <font color="ffffff">
- CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
- </font>
- <li> <font color="ffffff">CPU 1: <tt>rcu_read_unlock()</tt>
- </font>
- </ol>
-
- <p><font color="ffffff">
- And similarly, without a memory barrier between the beginning of the
- grace period and the beginning of the RCU read-side critical section,
- CPU&nbsp;1 might end up accessing the freelist.
- </font>
-
- <p><font color="ffffff">
- The &ldquo;as if&rdquo; rule of course applies, so that any
- implementation that acts as if the appropriate memory barriers
- were in place is a correct implementation.
- That said, it is much easier to fool yourself into believing
- that you have adhered to the as-if rule than it is to actually
- adhere to it!
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- You claim that <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
- generate absolutely no code in some kernel builds.
- This means that the compiler might arbitrarily rearrange consecutive
- RCU read-side critical sections.
- Given such rearrangement, if a given RCU read-side critical section
- is done, how can you be sure that all prior RCU read-side critical
- sections are done?
- Won't the compiler rearrangements make that impossible to determine?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- In cases where <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
- generate absolutely no code, RCU infers quiescent states only at
- special locations, for example, within the scheduler.
- Because calls to <tt>schedule()</tt> had better prevent calling-code
- accesses to shared variables from being rearranged across the call to
- <tt>schedule()</tt>, if RCU detects the end of a given RCU read-side
- critical section, it will necessarily detect the end of all prior
- RCU read-side critical sections, no matter how aggressively the
- compiler scrambles the code.
- </font>
-
- <p><font color="ffffff">
- Again, this all assumes that the compiler cannot scramble code across
- calls to the scheduler, out of interrupt handlers, into the idle loop,
- into user-mode code, and so on.
- But if your kernel build allows that sort of scrambling, you have broken
- far more than just RCU!
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-Note that these memory-barrier requirements do not replace the fundamental
-RCU requirement that a grace period wait for all pre-existing readers.
-On the contrary, the memory barriers called out in this section must operate in
-such a way as to <i>enforce</i> this fundamental requirement.
-Of course, different implementations enforce this requirement in different
-ways, but enforce it they must.
-
-<h3><a name="RCU Primitives Guaranteed to Execute Unconditionally">RCU Primitives Guaranteed to Execute Unconditionally</a></h3>
-
-<p>
-The common-case RCU primitives are unconditional.
-They are invoked, they do their job, and they return, with no possibility
-of error, and no need to retry.
-This is a key RCU design philosophy.
-
-<p>
-However, this philosophy is pragmatic rather than pigheaded.
-If someone comes up with a good justification for a particular conditional
-RCU primitive, it might well be implemented and added.
-After all, this guarantee was reverse-engineered, not premeditated.
-The unconditional nature of the RCU primitives was initially an
-accident of implementation, and later experience with synchronization
-primitives with conditional primitives caused me to elevate this
-accident to a guarantee.
-Therefore, the justification for adding a conditional primitive to
-RCU would need to be based on detailed and compelling use cases.
-
-<h3><a name="Guaranteed Read-to-Write Upgrade">Guaranteed Read-to-Write Upgrade</a></h3>
-
-<p>
-As far as RCU is concerned, it is always possible to carry out an
-update within an RCU read-side critical section.
-For example, that RCU read-side critical section might search for
-a given data element, and then might acquire the update-side
-spinlock in order to update that element, all while remaining
-in that RCU read-side critical section.
-Of course, it is necessary to exit the RCU read-side critical section
-before invoking <tt>synchronize_rcu()</tt>, however, this
-inconvenience can be avoided through use of the
-<tt>call_rcu()</tt> and <tt>kfree_rcu()</tt> API members
-described later in this document.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But how does the upgrade-to-write operation exclude other readers?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- It doesn't, just like normal RCU updates, which also do not exclude
- RCU readers.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-This guarantee allows lookup code to be shared between read-side
-and update-side code, and was premeditated, appearing in the earliest
-DYNIX/ptx RCU documentation.
-
-<h2><a name="Fundamental Non-Requirements">Fundamental Non-Requirements</a></h2>
-
-<p>
-RCU provides extremely lightweight readers, and its read-side guarantees,
-though quite useful, are correspondingly lightweight.
-It is therefore all too easy to assume that RCU is guaranteeing more
-than it really is.
-Of course, the list of things that RCU does not guarantee is infinitely
-long, however, the following sections list a few non-guarantees that
-have caused confusion.
-Except where otherwise noted, these non-guarantees were premeditated.
-
-<ol>
-<li> <a href="#Readers Impose Minimal Ordering">
- Readers Impose Minimal Ordering</a>
-<li> <a href="#Readers Do Not Exclude Updaters">
- Readers Do Not Exclude Updaters</a>
-<li> <a href="#Updaters Only Wait For Old Readers">
- Updaters Only Wait For Old Readers</a>
-<li> <a href="#Grace Periods Don't Partition Read-Side Critical Sections">
- Grace Periods Don't Partition Read-Side Critical Sections</a>
-<li> <a href="#Read-Side Critical Sections Don't Partition Grace Periods">
- Read-Side Critical Sections Don't Partition Grace Periods</a>
-</ol>
-
-<h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3>
-
-<p>
-Reader-side markers such as <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> provide absolutely no ordering guarantees
-except through their interaction with the grace-period APIs such as
-<tt>synchronize_rcu()</tt>.
-To see this, consider the following pair of threads:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3 rcu_read_lock();
- 4 WRITE_ONCE(x, 1);
- 5 rcu_read_unlock();
- 6 rcu_read_lock();
- 7 WRITE_ONCE(y, 1);
- 8 rcu_read_unlock();
- 9 }
-10
-11 void thread1(void)
-12 {
-13 rcu_read_lock();
-14 r1 = READ_ONCE(y);
-15 rcu_read_unlock();
-16 rcu_read_lock();
-17 r2 = READ_ONCE(x);
-18 rcu_read_unlock();
-19 }
-</pre>
-</blockquote>
-
-<p>
-After <tt>thread0()</tt> and <tt>thread1()</tt> execute
-concurrently, it is quite possible to have
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 0)
-</pre>
-</blockquote>
-
-(that is, <tt>y</tt> appears to have been assigned before <tt>x</tt>),
-which would not be possible if <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> had much in the way of ordering
-properties.
-But they do not, so the CPU is within its rights
-to do significant reordering.
-This is by design: Any significant ordering constraints would slow down
-these fast-path APIs.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Can't the compiler also reorder this code?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- No, the volatile casts in <tt>READ_ONCE()</tt> and
- <tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
- this particular case.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h3><a name="Readers Do Not Exclude Updaters">Readers Do Not Exclude Updaters</a></h3>
-
-<p>
-Neither <tt>rcu_read_lock()</tt> nor <tt>rcu_read_unlock()</tt>
-exclude updates.
-All they do is to prevent grace periods from ending.
-The following example illustrates this:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3 rcu_read_lock();
- 4 r1 = READ_ONCE(y);
- 5 if (r1) {
- 6 do_something_with_nonzero_x();
- 7 r2 = READ_ONCE(x);
- 8 WARN_ON(!r2); /* BUG!!! */
- 9 }
-10 rcu_read_unlock();
-11 }
-12
-13 void thread1(void)
-14 {
-15 spin_lock(&amp;my_lock);
-16 WRITE_ONCE(x, 1);
-17 WRITE_ONCE(y, 1);
-18 spin_unlock(&amp;my_lock);
-19 }
-</pre>
-</blockquote>
-
-<p>
-If the <tt>thread0()</tt> function's <tt>rcu_read_lock()</tt>
-excluded the <tt>thread1()</tt> function's update,
-the <tt>WARN_ON()</tt> could never fire.
-But the fact is that <tt>rcu_read_lock()</tt> does not exclude
-much of anything aside from subsequent grace periods, of which
-<tt>thread1()</tt> has none, so the
-<tt>WARN_ON()</tt> can and does fire.
-
-<h3><a name="Updaters Only Wait For Old Readers">Updaters Only Wait For Old Readers</a></h3>
-
-<p>
-It might be tempting to assume that after <tt>synchronize_rcu()</tt>
-completes, there are no readers executing.
-This temptation must be avoided because
-new readers can start immediately after <tt>synchronize_rcu()</tt>
-starts, and <tt>synchronize_rcu()</tt> is under no
-obligation to wait for these new readers.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Suppose that synchronize_rcu() did wait until <i>all</i>
- readers had completed instead of waiting only on
- pre-existing readers.
- For how long would the updater be able to rely on there
- being no readers?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- For no time at all.
- Even if <tt>synchronize_rcu()</tt> were to wait until
- all readers had completed, a new reader might start immediately after
- <tt>synchronize_rcu()</tt> completed.
- Therefore, the code following
- <tt>synchronize_rcu()</tt> can <i>never</i> rely on there being
- no readers.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h3><a name="Grace Periods Don't Partition Read-Side Critical Sections">
-Grace Periods Don't Partition Read-Side Critical Sections</a></h3>
-
-<p>
-It is tempting to assume that if any part of one RCU read-side critical
-section precedes a given grace period, and if any part of another RCU
-read-side critical section follows that same grace period, then all of
-the first RCU read-side critical section must precede all of the second.
-However, this just isn't the case: A single grace period does not
-partition the set of RCU read-side critical sections.
-An example of this situation can be illustrated as follows, where
-<tt>x</tt>, <tt>y</tt>, and <tt>z</tt> are initially all zero:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3 rcu_read_lock();
- 4 WRITE_ONCE(a, 1);
- 5 WRITE_ONCE(b, 1);
- 6 rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11 r1 = READ_ONCE(a);
-12 synchronize_rcu();
-13 WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18 rcu_read_lock();
-19 r2 = READ_ONCE(b);
-20 r3 = READ_ONCE(c);
-21 rcu_read_unlock();
-22 }
-</pre>
-</blockquote>
-
-<p>
-It turns out that the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 0 &amp;&amp; r3 == 1)
-</pre>
-</blockquote>
-
-is entirely possible.
-The following figure show how this can happen, with each circled
-<tt>QS</tt> indicating the point at which RCU recorded a
-<i>quiescent state</i> for each thread, that is, a state in which
-RCU knows that the thread cannot be in the midst of an RCU read-side
-critical section that started before the current grace period:
-
-<p><img src="GPpartitionReaders1.svg" alt="GPpartitionReaders1.svg" width="60%"></p>
-
-<p>
-If it is necessary to partition RCU read-side critical sections in this
-manner, it is necessary to use two grace periods, where the first
-grace period is known to end before the second grace period starts:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3 rcu_read_lock();
- 4 WRITE_ONCE(a, 1);
- 5 WRITE_ONCE(b, 1);
- 6 rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11 r1 = READ_ONCE(a);
-12 synchronize_rcu();
-13 WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18 r2 = READ_ONCE(c);
-19 synchronize_rcu();
-20 WRITE_ONCE(d, 1);
-21 }
-22
-23 void thread3(void)
-24 {
-25 rcu_read_lock();
-26 r3 = READ_ONCE(b);
-27 r4 = READ_ONCE(d);
-28 rcu_read_unlock();
-29 }
-</pre>
-</blockquote>
-
-<p>
-Here, if <tt>(r1 == 1)</tt>, then
-<tt>thread0()</tt>'s write to <tt>b</tt> must happen
-before the end of <tt>thread1()</tt>'s grace period.
-If in addition <tt>(r4 == 1)</tt>, then
-<tt>thread3()</tt>'s read from <tt>b</tt> must happen
-after the beginning of <tt>thread2()</tt>'s grace period.
-If it is also the case that <tt>(r2 == 1)</tt>, then the
-end of <tt>thread1()</tt>'s grace period must precede the
-beginning of <tt>thread2()</tt>'s grace period.
-This mean that the two RCU read-side critical sections cannot overlap,
-guaranteeing that <tt>(r3 == 1)</tt>.
-As a result, the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 1 &amp;&amp; r3 == 0 &amp;&amp; r4 == 1)
-</pre>
-</blockquote>
-
-cannot happen.
-
-<p>
-This non-requirement was also non-premeditated, but became apparent
-when studying RCU's interaction with memory ordering.
-
-<h3><a name="Read-Side Critical Sections Don't Partition Grace Periods">
-Read-Side Critical Sections Don't Partition Grace Periods</a></h3>
-
-<p>
-It is also tempting to assume that if an RCU read-side critical section
-happens between a pair of grace periods, then those grace periods cannot
-overlap.
-However, this temptation leads nowhere good, as can be illustrated by
-the following, with all variables initially zero:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3 rcu_read_lock();
- 4 WRITE_ONCE(a, 1);
- 5 WRITE_ONCE(b, 1);
- 6 rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11 r1 = READ_ONCE(a);
-12 synchronize_rcu();
-13 WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18 rcu_read_lock();
-19 WRITE_ONCE(d, 1);
-20 r2 = READ_ONCE(c);
-21 rcu_read_unlock();
-22 }
-23
-24 void thread3(void)
-25 {
-26 r3 = READ_ONCE(d);
-27 synchronize_rcu();
-28 WRITE_ONCE(e, 1);
-29 }
-30
-31 void thread4(void)
-32 {
-33 rcu_read_lock();
-34 r4 = READ_ONCE(b);
-35 r5 = READ_ONCE(e);
-36 rcu_read_unlock();
-37 }
-</pre>
-</blockquote>
-
-<p>
-In this case, the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 1 &amp;&amp; r3 == 1 &amp;&amp; r4 == 0 &amp&amp; r5 == 1)
-</pre>
-</blockquote>
-
-is entirely possible, as illustrated below:
-
-<p><img src="ReadersPartitionGP1.svg" alt="ReadersPartitionGP1.svg" width="100%"></p>
-
-<p>
-Again, an RCU read-side critical section can overlap almost all of a
-given grace period, just so long as it does not overlap the entire
-grace period.
-As a result, an RCU read-side critical section cannot partition a pair
-of RCU grace periods.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- How long a sequence of grace periods, each separated by an RCU
- read-side critical section, would be required to partition the RCU
- read-side critical sections at the beginning and end of the chain?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- In theory, an infinite number.
- In practice, an unknown number that is sensitive to both implementation
- details and timing considerations.
- Therefore, even in practice, RCU users must abide by the
- theoretical rather than the practical answer.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2>
-
-<p>
-These parallelism facts of life are by no means specific to RCU, but
-the RCU implementation must abide by them.
-They therefore bear repeating:
-
-<ol>
-<li> Any CPU or task may be delayed at any time,
- and any attempts to avoid these delays by disabling
- preemption, interrupts, or whatever are completely futile.
- This is most obvious in preemptible user-level
- environments and in virtualized environments (where
- a given guest OS's VCPUs can be preempted at any time by
- the underlying hypervisor), but can also happen in bare-metal
- environments due to ECC errors, NMIs, and other hardware
- events.
- Although a delay of more than about 20 seconds can result
- in splats, the RCU implementation is obligated to use
- algorithms that can tolerate extremely long delays, but where
- &ldquo;extremely long&rdquo; is not long enough to allow
- wrap-around when incrementing a 64-bit counter.
-<li> Both the compiler and the CPU can reorder memory accesses.
- Where it matters, RCU must use compiler directives and
- memory-barrier instructions to preserve ordering.
-<li> Conflicting writes to memory locations in any given cache line
- will result in expensive cache misses.
- Greater numbers of concurrent writes and more-frequent
- concurrent writes will result in more dramatic slowdowns.
- RCU is therefore obligated to use algorithms that have
- sufficient locality to avoid significant performance and
- scalability problems.
-<li> As a rough rule of thumb, only one CPU's worth of processing
- may be carried out under the protection of any given exclusive
- lock.
- RCU must therefore use scalable locking designs.
-<li> Counters are finite, especially on 32-bit systems.
- RCU's use of counters must therefore tolerate counter wrap,
- or be designed such that counter wrap would take way more
- time than a single system is likely to run.
- An uptime of ten years is quite possible, a runtime
- of a century much less so.
- As an example of the latter, RCU's dyntick-idle nesting counter
- allows 54 bits for interrupt nesting level (this counter
- is 64 bits even on a 32-bit system).
- Overflowing this counter requires 2<sup>54</sup>
- half-interrupts on a given CPU without that CPU ever going idle.
- If a half-interrupt happened every microsecond, it would take
- 570 years of runtime to overflow this counter, which is currently
- believed to be an acceptably long time.
-<li> Linux systems can have thousands of CPUs running a single
- Linux kernel in a single shared-memory environment.
- RCU must therefore pay close attention to high-end scalability.
-</ol>
-
-<p>
-This last parallelism fact of life means that RCU must pay special
-attention to the preceding facts of life.
-The idea that Linux might scale to systems with thousands of CPUs would
-have been met with some skepticism in the 1990s, but these requirements
-would have otherwise have been unsurprising, even in the early 1990s.
-
-<h2><a name="Quality-of-Implementation Requirements">Quality-of-Implementation Requirements</a></h2>
-
-<p>
-These sections list quality-of-implementation requirements.
-Although an RCU implementation that ignores these requirements could
-still be used, it would likely be subject to limitations that would
-make it inappropriate for industrial-strength production use.
-Classes of quality-of-implementation requirements are as follows:
-
-<ol>
-<li> <a href="#Specialization">Specialization</a>
-<li> <a href="#Performance and Scalability">Performance and Scalability</a>
-<li> <a href="#Forward Progress">Forward Progress</a>
-<li> <a href="#Composability">Composability</a>
-<li> <a href="#Corner Cases">Corner Cases</a>
-</ol>
-
-<p>
-These classes is covered in the following sections.
-
-<h3><a name="Specialization">Specialization</a></h3>
-
-<p>
-RCU is and always has been intended primarily for read-mostly situations,
-which means that RCU's read-side primitives are optimized, often at the
-expense of its update-side primitives.
-Experience thus far is captured by the following list of situations:
-
-<ol>
-<li> Read-mostly data, where stale and inconsistent data is not
- a problem: RCU works great!
-<li> Read-mostly data, where data must be consistent:
- RCU works well.
-<li> Read-write data, where data must be consistent:
- RCU <i>might</i> work OK.
- Or not.
-<li> Write-mostly data, where data must be consistent:
- RCU is very unlikely to be the right tool for the job,
- with the following exceptions, where RCU can provide:
- <ol type=a>
- <li> Existence guarantees for update-friendly mechanisms.
- <li> Wait-free read-side primitives for real-time use.
- </ol>
-</ol>
-
-<p>
-This focus on read-mostly situations means that RCU must interoperate
-with other synchronization primitives.
-For example, the <tt>add_gp()</tt> and <tt>remove_gp_synchronous()</tt>
-examples discussed earlier use RCU to protect readers and locking to
-coordinate updaters.
-However, the need extends much farther, requiring that a variety of
-synchronization primitives be legal within RCU read-side critical sections,
-including spinlocks, sequence locks, atomic operations, reference
-counters, and memory barriers.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- What about sleeping locks?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- These are forbidden within Linux-kernel RCU read-side critical
- sections because it is not legal to place a quiescent state
- (in this case, voluntary context switch) within an RCU read-side
- critical section.
- However, sleeping locks may be used within userspace RCU read-side
- critical sections, and also within Linux-kernel sleepable RCU
- <a href="#Sleepable RCU"><font color="ffffff">(SRCU)</font></a>
- read-side critical sections.
- In addition, the -rt patchset turns spinlocks into a
- sleeping locks so that the corresponding critical sections
- can be preempted, which also means that these sleeplockified
- spinlocks (but not other sleeping locks!) may be acquire within
- -rt-Linux-kernel RCU read-side critical sections.
- </font>
-
- <p><font color="ffffff">
- Note that it <i>is</i> legal for a normal RCU read-side
- critical section to conditionally acquire a sleeping locks
- (as in <tt>mutex_trylock()</tt>), but only as long as it does
- not loop indefinitely attempting to conditionally acquire that
- sleeping locks.
- The key point is that things like <tt>mutex_trylock()</tt>
- either return with the mutex held, or return an error indication if
- the mutex was not immediately available.
- Either way, <tt>mutex_trylock()</tt> returns immediately without
- sleeping.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-It often comes as a surprise that many algorithms do not require a
-consistent view of data, but many can function in that mode,
-with network routing being the poster child.
-Internet routing algorithms take significant time to propagate
-updates, so that by the time an update arrives at a given system,
-that system has been sending network traffic the wrong way for
-a considerable length of time.
-Having a few threads continue to send traffic the wrong way for a
-few more milliseconds is clearly not a problem: In the worst case,
-TCP retransmissions will eventually get the data where it needs to go.
-In general, when tracking the state of the universe outside of the
-computer, some level of inconsistency must be tolerated due to
-speed-of-light delays if nothing else.
-
-<p>
-Furthermore, uncertainty about external state is inherent in many cases.
-For example, a pair of veterinarians might use heartbeat to determine
-whether or not a given cat was alive.
-But how long should they wait after the last heartbeat to decide that
-the cat is in fact dead?
-Waiting less than 400 milliseconds makes no sense because this would
-mean that a relaxed cat would be considered to cycle between death
-and life more than 100 times per minute.
-Moreover, just as with human beings, a cat's heart might stop for
-some period of time, so the exact wait period is a judgment call.
-One of our pair of veterinarians might wait 30 seconds before pronouncing
-the cat dead, while the other might insist on waiting a full minute.
-The two veterinarians would then disagree on the state of the cat during
-the final 30 seconds of the minute following the last heartbeat.
-
-<p>
-Interestingly enough, this same situation applies to hardware.
-When push comes to shove, how do we tell whether or not some
-external server has failed?
-We send messages to it periodically, and declare it failed if we
-don't receive a response within a given period of time.
-Policy decisions can usually tolerate short
-periods of inconsistency.
-The policy was decided some time ago, and is only now being put into
-effect, so a few milliseconds of delay is normally inconsequential.
-
-<p>
-However, there are algorithms that absolutely must see consistent data.
-For example, the translation between a user-level SystemV semaphore
-ID to the corresponding in-kernel data structure is protected by RCU,
-but it is absolutely forbidden to update a semaphore that has just been
-removed.
-In the Linux kernel, this need for consistency is accommodated by acquiring
-spinlocks located in the in-kernel data structure from within
-the RCU read-side critical section, and this is indicated by the
-green box in the figure above.
-Many other techniques may be used, and are in fact used within the
-Linux kernel.
-
-<p>
-In short, RCU is not required to maintain consistency, and other
-mechanisms may be used in concert with RCU when consistency is required.
-RCU's specialization allows it to do its job extremely well, and its
-ability to interoperate with other synchronization mechanisms allows
-the right mix of synchronization tools to be used for a given job.
-
-<h3><a name="Performance and Scalability">Performance and Scalability</a></h3>
-
-<p>
-Energy efficiency is a critical component of performance today,
-and Linux-kernel RCU implementations must therefore avoid unnecessarily
-awakening idle CPUs.
-I cannot claim that this requirement was premeditated.
-In fact, I learned of it during a telephone conversation in which I
-was given &ldquo;frank and open&rdquo; feedback on the importance
-of energy efficiency in battery-powered systems and on specific
-energy-efficiency shortcomings of the Linux-kernel RCU implementation.
-In my experience, the battery-powered embedded community will consider
-any unnecessary wakeups to be extremely unfriendly acts.
-So much so that mere Linux-kernel-mailing-list posts are
-insufficient to vent their ire.
-
-<p>
-Memory consumption is not particularly important for in most
-situations, and has become decreasingly
-so as memory sizes have expanded and memory
-costs have plummeted.
-However, as I learned from Matt Mackall's
-<a href="http://elinux.org/Linux_Tiny-FAQ">bloatwatch</a>
-efforts, memory footprint is critically important on single-CPU systems with
-non-preemptible (<tt>CONFIG_PREEMPT=n</tt>) kernels, and thus
-<a href="https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com">tiny RCU</a>
-was born.
-Josh Triplett has since taken over the small-memory banner with his
-<a href="https://tiny.wiki.kernel.org/">Linux kernel tinification</a>
-project, which resulted in
-<a href="#Sleepable RCU">SRCU</a>
-becoming optional for those kernels not needing it.
-
-<p>
-The remaining performance requirements are, for the most part,
-unsurprising.
-For example, in keeping with RCU's read-side specialization,
-<tt>rcu_dereference()</tt> should have negligible overhead (for
-example, suppression of a few minor compiler optimizations).
-Similarly, in non-preemptible environments, <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> should have exactly zero overhead.
-
-<p>
-In preemptible environments, in the case where the RCU read-side
-critical section was not preempted (as will be the case for the
-highest-priority real-time process), <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> should have minimal overhead.
-In particular, they should not contain atomic read-modify-write
-operations, memory-barrier instructions, preemption disabling,
-interrupt disabling, or backwards branches.
-However, in the case where the RCU read-side critical section was preempted,
-<tt>rcu_read_unlock()</tt> may acquire spinlocks and disable interrupts.
-This is why it is better to nest an RCU read-side critical section
-within a preempt-disable region than vice versa, at least in cases
-where that critical section is short enough to avoid unduly degrading
-real-time latencies.
-
-<p>
-The <tt>synchronize_rcu()</tt> grace-period-wait primitive is
-optimized for throughput.
-It may therefore incur several milliseconds of latency in addition to
-the duration of the longest RCU read-side critical section.
-On the other hand, multiple concurrent invocations of
-<tt>synchronize_rcu()</tt> are required to use batching optimizations
-so that they can be satisfied by a single underlying grace-period-wait
-operation.
-For example, in the Linux kernel, it is not unusual for a single
-grace-period-wait operation to serve more than
-<a href="https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response">1,000 separate invocations</a>
-of <tt>synchronize_rcu()</tt>, thus amortizing the per-invocation
-overhead down to nearly zero.
-However, the grace-period optimization is also required to avoid
-measurable degradation of real-time scheduling and interrupt latencies.
-
-<p>
-In some cases, the multi-millisecond <tt>synchronize_rcu()</tt>
-latencies are unacceptable.
-In these cases, <tt>synchronize_rcu_expedited()</tt> may be used
-instead, reducing the grace-period latency down to a few tens of
-microseconds on small systems, at least in cases where the RCU read-side
-critical sections are short.
-There are currently no special latency requirements for
-<tt>synchronize_rcu_expedited()</tt> on large systems, but,
-consistent with the empirical nature of the RCU specification,
-that is subject to change.
-However, there most definitely are scalability requirements:
-A storm of <tt>synchronize_rcu_expedited()</tt> invocations on 4096
-CPUs should at least make reasonable forward progress.
-In return for its shorter latencies, <tt>synchronize_rcu_expedited()</tt>
-is permitted to impose modest degradation of real-time latency
-on non-idle online CPUs.
-Here, &ldquo;modest&rdquo; means roughly the same latency
-degradation as a scheduling-clock interrupt.
-
-<p>
-There are a number of situations where even
-<tt>synchronize_rcu_expedited()</tt>'s reduced grace-period
-latency is unacceptable.
-In these situations, the asynchronous <tt>call_rcu()</tt> can be
-used in place of <tt>synchronize_rcu()</tt> as follows:
-
-<blockquote>
-<pre>
- 1 struct foo {
- 2 int a;
- 3 int b;
- 4 struct rcu_head rh;
- 5 };
- 6
- 7 static void remove_gp_cb(struct rcu_head *rhp)
- 8 {
- 9 struct foo *p = container_of(rhp, struct foo, rh);
-10
-11 kfree(p);
-12 }
-13
-14 bool remove_gp_asynchronous(void)
-15 {
-16 struct foo *p;
-17
-18 spin_lock(&amp;gp_lock);
-19 p = rcu_access_pointer(gp);
-20 if (!p) {
-21 spin_unlock(&amp;gp_lock);
-22 return false;
-23 }
-24 rcu_assign_pointer(gp, NULL);
-25 call_rcu(&amp;p-&gt;rh, remove_gp_cb);
-26 spin_unlock(&amp;gp_lock);
-27 return true;
-28 }
-</pre>
-</blockquote>
-
-<p>
-A definition of <tt>struct foo</tt> is finally needed, and appears
-on lines&nbsp;1-5.
-The function <tt>remove_gp_cb()</tt> is passed to <tt>call_rcu()</tt>
-on line&nbsp;25, and will be invoked after the end of a subsequent
-grace period.
-This gets the same effect as <tt>remove_gp_synchronous()</tt>,
-but without forcing the updater to wait for a grace period to elapse.
-The <tt>call_rcu()</tt> function may be used in a number of
-situations where neither <tt>synchronize_rcu()</tt> nor
-<tt>synchronize_rcu_expedited()</tt> would be legal,
-including within preempt-disable code, <tt>local_bh_disable()</tt> code,
-interrupt-disable code, and interrupt handlers.
-However, even <tt>call_rcu()</tt> is illegal within NMI handlers
-and from idle and offline CPUs.
-The callback function (<tt>remove_gp_cb()</tt> in this case) will be
-executed within softirq (software interrupt) environment within the
-Linux kernel,
-either within a real softirq handler or under the protection
-of <tt>local_bh_disable()</tt>.
-In both the Linux kernel and in userspace, it is bad practice to
-write an RCU callback function that takes too long.
-Long-running operations should be relegated to separate threads or
-(in the Linux kernel) workqueues.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
- After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
- structure, which would interact badly with concurrent insertions.
- Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
- any changes, including any insertions that <tt>rcu_dereference()</tt>
- would protect against.
- Therefore, any insertions will be delayed until after
- <tt>-&gt;gp_lock</tt>
- is released on line&nbsp;25, which in turn means that
- <tt>rcu_access_pointer()</tt> suffices.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-However, all that <tt>remove_gp_cb()</tt> is doing is
-invoking <tt>kfree()</tt> on the data element.
-This is a common idiom, and is supported by <tt>kfree_rcu()</tt>,
-which allows &ldquo;fire and forget&rdquo; operation as shown below:
-
-<blockquote>
-<pre>
- 1 struct foo {
- 2 int a;
- 3 int b;
- 4 struct rcu_head rh;
- 5 };
- 6
- 7 bool remove_gp_faf(void)
- 8 {
- 9 struct foo *p;
-10
-11 spin_lock(&amp;gp_lock);
-12 p = rcu_dereference(gp);
-13 if (!p) {
-14 spin_unlock(&amp;gp_lock);
-15 return false;
-16 }
-17 rcu_assign_pointer(gp, NULL);
-18 kfree_rcu(p, rh);
-19 spin_unlock(&amp;gp_lock);
-20 return true;
-21 }
-</pre>
-</blockquote>
-
-<p>
-Note that <tt>remove_gp_faf()</tt> simply invokes
-<tt>kfree_rcu()</tt> and proceeds, without any need to pay any
-further attention to the subsequent grace period and <tt>kfree()</tt>.
-It is permissible to invoke <tt>kfree_rcu()</tt> from the same
-environments as for <tt>call_rcu()</tt>.
-Interestingly enough, DYNIX/ptx had the equivalents of
-<tt>call_rcu()</tt> and <tt>kfree_rcu()</tt>, but not
-<tt>synchronize_rcu()</tt>.
-This was due to the fact that RCU was not heavily used within DYNIX/ptx,
-so the very few places that needed something like
-<tt>synchronize_rcu()</tt> simply open-coded it.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Earlier it was claimed that <tt>call_rcu()</tt> and
- <tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
- by readers.
- But how can that be correct, given that the invocation of the callback
- and the freeing of the memory (respectively) must still wait for
- a grace period to elapse?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- We could define things this way, but keep in mind that this sort of
- definition would say that updates in garbage-collected languages
- cannot complete until the next time the garbage collector runs,
- which does not seem at all reasonable.
- The key point is that in most cases, an updater using either
- <tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
- next update as soon as it has invoked <tt>call_rcu()</tt> or
- <tt>kfree_rcu()</tt>, without having to wait for a subsequent
- grace period.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-But what if the updater must wait for the completion of code to be
-executed after the end of the grace period, but has other tasks
-that can be carried out in the meantime?
-The polling-style <tt>get_state_synchronize_rcu()</tt> and
-<tt>cond_synchronize_rcu()</tt> functions may be used for this
-purpose, as shown below:
-
-<blockquote>
-<pre>
- 1 bool remove_gp_poll(void)
- 2 {
- 3 struct foo *p;
- 4 unsigned long s;
- 5
- 6 spin_lock(&amp;gp_lock);
- 7 p = rcu_access_pointer(gp);
- 8 if (!p) {
- 9 spin_unlock(&amp;gp_lock);
-10 return false;
-11 }
-12 rcu_assign_pointer(gp, NULL);
-13 spin_unlock(&amp;gp_lock);
-14 s = get_state_synchronize_rcu();
-15 do_something_while_waiting();
-16 cond_synchronize_rcu(s);
-17 kfree(p);
-18 return true;
-19 }
-</pre>
-</blockquote>
-
-<p>
-On line&nbsp;14, <tt>get_state_synchronize_rcu()</tt> obtains a
-&ldquo;cookie&rdquo; from RCU,
-then line&nbsp;15 carries out other tasks,
-and finally, line&nbsp;16 returns immediately if a grace period has
-elapsed in the meantime, but otherwise waits as required.
-The need for <tt>get_state_synchronize_rcu</tt> and
-<tt>cond_synchronize_rcu()</tt> has appeared quite recently,
-so it is too early to tell whether they will stand the test of time.
-
-<p>
-RCU thus provides a range of tools to allow updaters to strike the
-required tradeoff between latency, flexibility and CPU overhead.
-
-<h3><a name="Forward Progress">Forward Progress</a></h3>
-
-<p>
-In theory, delaying grace-period completion and callback invocation
-is harmless.
-In practice, not only are memory sizes finite but also callbacks sometimes
-do wakeups, and sufficiently deferred wakeups can be difficult
-to distinguish from system hangs.
-Therefore, RCU must provide a number of mechanisms to promote forward
-progress.
-
-<p>
-These mechanisms are not foolproof, nor can they be.
-For one simple example, an infinite loop in an RCU read-side critical
-section must by definition prevent later grace periods from ever completing.
-For a more involved example, consider a 64-CPU system built with
-<tt>CONFIG_RCU_NOCB_CPU=y</tt> and booted with <tt>rcu_nocbs=1-63</tt>,
-where CPUs&nbsp;1 through&nbsp;63 spin in tight loops that invoke
-<tt>call_rcu()</tt>.
-Even if these tight loops also contain calls to <tt>cond_resched()</tt>
-(thus allowing grace periods to complete), CPU&nbsp;0 simply will
-not be able to invoke callbacks as fast as the other 63 CPUs can
-register them, at least not until the system runs out of memory.
-In both of these examples, the Spiderman principle applies: With great
-power comes great responsibility.
-However, short of this level of abuse, RCU is required to
-ensure timely completion of grace periods and timely invocation of
-callbacks.
-
-<p>
-RCU takes the following steps to encourage timely completion of
-grace periods:
-
-<ol>
-<li> If a grace period fails to complete within 100&nbsp;milliseconds,
- RCU causes future invocations of <tt>cond_resched()</tt> on
- the holdout CPUs to provide an RCU quiescent state.
- RCU also causes those CPUs' <tt>need_resched()</tt> invocations
- to return <tt>true</tt>, but only after the corresponding CPU's
- next scheduling-clock.
-<li> CPUs mentioned in the <tt>nohz_full</tt> kernel boot parameter
- can run indefinitely in the kernel without scheduling-clock
- interrupts, which defeats the above <tt>need_resched()</tt>
- strategem.
- RCU will therefore invoke <tt>resched_cpu()</tt> on any
- <tt>nohz_full</tt> CPUs still holding out after
- 109&nbsp;milliseconds.
-<li> In kernels built with <tt>CONFIG_RCU_BOOST=y</tt>, if a given
- task that has been preempted within an RCU read-side critical
- section is holding out for more than 500&nbsp;milliseconds,
- RCU will resort to priority boosting.
-<li> If a CPU is still holding out 10&nbsp;seconds into the grace
- period, RCU will invoke <tt>resched_cpu()</tt> on it regardless
- of its <tt>nohz_full</tt> state.
-</ol>
-
-<p>
-The above values are defaults for systems running with <tt>HZ=1000</tt>.
-They will vary as the value of <tt>HZ</tt> varies, and can also be
-changed using the relevant Kconfig options and kernel boot parameters.
-RCU currently does not do much sanity checking of these
-parameters, so please use caution when changing them.
-Note that these forward-progress measures are provided only for RCU,
-not for
-<a href="#Sleepable RCU">SRCU</a> or
-<a href="#Tasks RCU">Tasks RCU</a>.
-
-<p>
-RCU takes the following steps in <tt>call_rcu()</tt> to encourage timely
-invocation of callbacks when any given non-<tt>rcu_nocbs</tt> CPU has
-10,000 callbacks, or has 10,000 more callbacks than it had the last time
-encouragement was provided:
-
-<ol>
-<li> Starts a grace period, if one is not already in progress.
-<li> Forces immediate checking for quiescent states, rather than
- waiting for three milliseconds to have elapsed since the
- beginning of the grace period.
-<li> Immediately tags the CPU's callbacks with their grace period
- completion numbers, rather than waiting for the <tt>RCU_SOFTIRQ</tt>
- handler to get around to it.
-<li> Lifts callback-execution batch limits, which speeds up callback
- invocation at the expense of degrading realtime response.
-</ol>
-
-<p>
-Again, these are default values when running at <tt>HZ=1000</tt>,
-and can be overridden.
-Again, these forward-progress measures are provided only for RCU,
-not for
-<a href="#Sleepable RCU">SRCU</a> or
-<a href="#Tasks RCU">Tasks RCU</a>.
-Even for RCU, callback-invocation forward progress for <tt>rcu_nocbs</tt>
-CPUs is much less well-developed, in part because workloads benefiting
-from <tt>rcu_nocbs</tt> CPUs tend to invoke <tt>call_rcu()</tt>
-relatively infrequently.
-If workloads emerge that need both <tt>rcu_nocbs</tt> CPUs and high
-<tt>call_rcu()</tt> invocation rates, then additional forward-progress
-work will be required.
-
-<h3><a name="Composability">Composability</a></h3>
-
-<p>
-Composability has received much attention in recent years, perhaps in part
-due to the collision of multicore hardware with object-oriented techniques
-designed in single-threaded environments for single-threaded use.
-And in theory, RCU read-side critical sections may be composed, and in
-fact may be nested arbitrarily deeply.
-In practice, as with all real-world implementations of composable
-constructs, there are limitations.
-
-<p>
-Implementations of RCU for which <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> generate no code, such as
-Linux-kernel RCU when <tt>CONFIG_PREEMPT=n</tt>, can be
-nested arbitrarily deeply.
-After all, there is no overhead.
-Except that if all these instances of <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> are visible to the compiler,
-compilation will eventually fail due to exhausting memory,
-mass storage, or user patience, whichever comes first.
-If the nesting is not visible to the compiler, as is the case with
-mutually recursive functions each in its own translation unit,
-stack overflow will result.
-If the nesting takes the form of loops, perhaps in the guise of tail
-recursion, either the control variable
-will overflow or (in the Linux kernel) you will get an RCU CPU stall warning.
-Nevertheless, this class of RCU implementations is one
-of the most composable constructs in existence.
-
-<p>
-RCU implementations that explicitly track nesting depth
-are limited by the nesting-depth counter.
-For example, the Linux kernel's preemptible RCU limits nesting to
-<tt>INT_MAX</tt>.
-This should suffice for almost all practical purposes.
-That said, a consecutive pair of RCU read-side critical sections
-between which there is an operation that waits for a grace period
-cannot be enclosed in another RCU read-side critical section.
-This is because it is not legal to wait for a grace period within
-an RCU read-side critical section: To do so would result either
-in deadlock or
-in RCU implicitly splitting the enclosing RCU read-side critical
-section, neither of which is conducive to a long-lived and prosperous
-kernel.
-
-<p>
-It is worth noting that RCU is not alone in limiting composability.
-For example, many transactional-memory implementations prohibit
-composing a pair of transactions separated by an irrevocable
-operation (for example, a network receive operation).
-For another example, lock-based critical sections can be composed
-surprisingly freely, but only if deadlock is avoided.
-
-<p>
-In short, although RCU read-side critical sections are highly composable,
-care is required in some situations, just as is the case for any other
-composable synchronization mechanism.
-
-<h3><a name="Corner Cases">Corner Cases</a></h3>
-
-<p>
-A given RCU workload might have an endless and intense stream of
-RCU read-side critical sections, perhaps even so intense that there
-was never a point in time during which there was not at least one
-RCU read-side critical section in flight.
-RCU cannot allow this situation to block grace periods: As long as
-all the RCU read-side critical sections are finite, grace periods
-must also be finite.
-
-<p>
-That said, preemptible RCU implementations could potentially result
-in RCU read-side critical sections being preempted for long durations,
-which has the effect of creating a long-duration RCU read-side
-critical section.
-This situation can arise only in heavily loaded systems, but systems using
-real-time priorities are of course more vulnerable.
-Therefore, RCU priority boosting is provided to help deal with this
-case.
-That said, the exact requirements on RCU priority boosting will likely
-evolve as more experience accumulates.
-
-<p>
-Other workloads might have very high update rates.
-Although one can argue that such workloads should instead use
-something other than RCU, the fact remains that RCU must
-handle such workloads gracefully.
-This requirement is another factor driving batching of grace periods,
-but it is also the driving force behind the checks for large numbers
-of queued RCU callbacks in the <tt>call_rcu()</tt> code path.
-Finally, high update rates should not delay RCU read-side critical
-sections, although some small read-side delays can occur when using
-<tt>synchronize_rcu_expedited()</tt>, courtesy of this function's use
-of <tt>smp_call_function_single()</tt>.
-
-<p>
-Although all three of these corner cases were understood in the early
-1990s, a simple user-level test consisting of <tt>close(open(path))</tt>
-in a tight loop
-in the early 2000s suddenly provided a much deeper appreciation of the
-high-update-rate corner case.
-This test also motivated addition of some RCU code to react to high update
-rates, for example, if a given CPU finds itself with more than 10,000
-RCU callbacks queued, it will cause RCU to take evasive action by
-more aggressively starting grace periods and more aggressively forcing
-completion of grace-period processing.
-This evasive action causes the grace period to complete more quickly,
-but at the cost of restricting RCU's batching optimizations, thus
-increasing the CPU overhead incurred by that grace period.
-
-<h2><a name="Software-Engineering Requirements">
-Software-Engineering Requirements</a></h2>
-
-<p>
-Between Murphy's Law and &ldquo;To err is human&rdquo;, it is necessary to
-guard against mishaps and misuse:
-
-<ol>
-<li> It is all too easy to forget to use <tt>rcu_read_lock()</tt>
- everywhere that it is needed, so kernels built with
- <tt>CONFIG_PROVE_RCU=y</tt> will splat if
- <tt>rcu_dereference()</tt> is used outside of an
- RCU read-side critical section.
- Update-side code can use <tt>rcu_dereference_protected()</tt>,
- which takes a
- <a href="https://lwn.net/Articles/371986/">lockdep expression</a>
- to indicate what is providing the protection.
- If the indicated protection is not provided, a lockdep splat
- is emitted.
-
- <p>
- Code shared between readers and updaters can use
- <tt>rcu_dereference_check()</tt>, which also takes a
- lockdep expression, and emits a lockdep splat if neither
- <tt>rcu_read_lock()</tt> nor the indicated protection
- is in place.
- In addition, <tt>rcu_dereference_raw()</tt> is used in those
- (hopefully rare) cases where the required protection cannot
- be easily described.
- Finally, <tt>rcu_read_lock_held()</tt> is provided to
- allow a function to verify that it has been invoked within
- an RCU read-side critical section.
- I was made aware of this set of requirements shortly after Thomas
- Gleixner audited a number of RCU uses.
-<li> A given function might wish to check for RCU-related preconditions
- upon entry, before using any other RCU API.
- The <tt>rcu_lockdep_assert()</tt> does this job,
- asserting the expression in kernels having lockdep enabled
- and doing nothing otherwise.
-<li> It is also easy to forget to use <tt>rcu_assign_pointer()</tt>
- and <tt>rcu_dereference()</tt>, perhaps (incorrectly)
- substituting a simple assignment.
- To catch this sort of error, a given RCU-protected pointer may be
- tagged with <tt>__rcu</tt>, after which sparse
- will complain about simple-assignment accesses to that pointer.
- Arnd Bergmann made me aware of this requirement, and also
- supplied the needed
- <a href="https://lwn.net/Articles/376011/">patch series</a>.
-<li> Kernels built with <tt>CONFIG_DEBUG_OBJECTS_RCU_HEAD=y</tt>
- will splat if a data element is passed to <tt>call_rcu()</tt>
- twice in a row, without a grace period in between.
- (This error is similar to a double free.)
- The corresponding <tt>rcu_head</tt> structures that are
- dynamically allocated are automatically tracked, but
- <tt>rcu_head</tt> structures allocated on the stack
- must be initialized with <tt>init_rcu_head_on_stack()</tt>
- and cleaned up with <tt>destroy_rcu_head_on_stack()</tt>.
- Similarly, statically allocated non-stack <tt>rcu_head</tt>
- structures must be initialized with <tt>init_rcu_head()</tt>
- and cleaned up with <tt>destroy_rcu_head()</tt>.
- Mathieu Desnoyers made me aware of this requirement, and also
- supplied the needed
- <a href="https://lkml.kernel.org/g/20100319013024.GA28456@Krystal">patch</a>.
-<li> An infinite loop in an RCU read-side critical section will
- eventually trigger an RCU CPU stall warning splat, with
- the duration of &ldquo;eventually&rdquo; being controlled by the
- <tt>RCU_CPU_STALL_TIMEOUT</tt> <tt>Kconfig</tt> option, or,
- alternatively, by the
- <tt>rcupdate.rcu_cpu_stall_timeout</tt> boot/sysfs
- parameter.
- However, RCU is not obligated to produce this splat
- unless there is a grace period waiting on that particular
- RCU read-side critical section.
- <p>
- Some extreme workloads might intentionally delay
- RCU grace periods, and systems running those workloads can
- be booted with <tt>rcupdate.rcu_cpu_stall_suppress</tt>
- to suppress the splats.
- This kernel parameter may also be set via <tt>sysfs</tt>.
- Furthermore, RCU CPU stall warnings are counter-productive
- during sysrq dumps and during panics.
- RCU therefore supplies the <tt>rcu_sysrq_start()</tt> and
- <tt>rcu_sysrq_end()</tt> API members to be called before
- and after long sysrq dumps.
- RCU also supplies the <tt>rcu_panic()</tt> notifier that is
- automatically invoked at the beginning of a panic to suppress
- further RCU CPU stall warnings.
-
- <p>
- This requirement made itself known in the early 1990s, pretty
- much the first time that it was necessary to debug a CPU stall.
- That said, the initial implementation in DYNIX/ptx was quite
- generic in comparison with that of Linux.
-<li> Although it would be very good to detect pointers leaking out
- of RCU read-side critical sections, there is currently no
- good way of doing this.
- One complication is the need to distinguish between pointers
- leaking and pointers that have been handed off from RCU to
- some other synchronization mechanism, for example, reference
- counting.
-<li> In kernels built with <tt>CONFIG_RCU_TRACE=y</tt>, RCU-related
- information is provided via event tracing.
-<li> Open-coded use of <tt>rcu_assign_pointer()</tt> and
- <tt>rcu_dereference()</tt> to create typical linked
- data structures can be surprisingly error-prone.
- Therefore, RCU-protected
- <a href="https://lwn.net/Articles/609973/#RCU List APIs">linked lists</a>
- and, more recently, RCU-protected
- <a href="https://lwn.net/Articles/612100/">hash tables</a>
- are available.
- Many other special-purpose RCU-protected data structures are
- available in the Linux kernel and the userspace RCU library.
-<li> Some linked structures are created at compile time, but still
- require <tt>__rcu</tt> checking.
- The <tt>RCU_POINTER_INITIALIZER()</tt> macro serves this
- purpose.
-<li> It is not necessary to use <tt>rcu_assign_pointer()</tt>
- when creating linked structures that are to be published via
- a single external pointer.
- The <tt>RCU_INIT_POINTER()</tt> macro is provided for
- this task and also for assigning <tt>NULL</tt> pointers
- at runtime.
-</ol>
-
-<p>
-This not a hard-and-fast list: RCU's diagnostic capabilities will
-continue to be guided by the number and type of usage bugs found
-in real-world RCU usage.
-
-<h2><a name="Linux Kernel Complications">Linux Kernel Complications</a></h2>
-
-<p>
-The Linux kernel provides an interesting environment for all kinds of
-software, including RCU.
-Some of the relevant points of interest are as follows:
-
-<ol>
-<li> <a href="#Configuration">Configuration</a>.
-<li> <a href="#Firmware Interface">Firmware Interface</a>.
-<li> <a href="#Early Boot">Early Boot</a>.
-<li> <a href="#Interrupts and NMIs">
- Interrupts and non-maskable interrupts (NMIs)</a>.
-<li> <a href="#Loadable Modules">Loadable Modules</a>.
-<li> <a href="#Hotplug CPU">Hotplug CPU</a>.
-<li> <a href="#Scheduler and RCU">Scheduler and RCU</a>.
-<li> <a href="#Tracing and RCU">Tracing and RCU</a>.
-<li> <a href="#Accesses to User Memory and RCU">
-Accesses to User Memory and RCU</a>.
-<li> <a href="#Energy Efficiency">Energy Efficiency</a>.
-<li> <a href="#Scheduling-Clock Interrupts and RCU">
- Scheduling-Clock Interrupts and RCU</a>.
-<li> <a href="#Memory Efficiency">Memory Efficiency</a>.
-<li> <a href="#Performance, Scalability, Response Time, and Reliability">
- Performance, Scalability, Response Time, and Reliability</a>.
-</ol>
-
-<p>
-This list is probably incomplete, but it does give a feel for the
-most notable Linux-kernel complications.
-Each of the following sections covers one of the above topics.
-
-<h3><a name="Configuration">Configuration</a></h3>
-
-<p>
-RCU's goal is automatic configuration, so that almost nobody
-needs to worry about RCU's <tt>Kconfig</tt> options.
-And for almost all users, RCU does in fact work well
-&ldquo;out of the box.&rdquo;
-
-<p>
-However, there are specialized use cases that are handled by
-kernel boot parameters and <tt>Kconfig</tt> options.
-Unfortunately, the <tt>Kconfig</tt> system will explicitly ask users
-about new <tt>Kconfig</tt> options, which requires almost all of them
-be hidden behind a <tt>CONFIG_RCU_EXPERT</tt> <tt>Kconfig</tt> option.
-
-<p>
-This all should be quite obvious, but the fact remains that
-Linus Torvalds recently had to
-<a href="https://lkml.kernel.org/g/CA+55aFy4wcCwaL4okTs8wXhGZ5h-ibecy_Meg9C4MNQrUnwMcg@mail.gmail.com">remind</a>
-me of this requirement.
-
-<h3><a name="Firmware Interface">Firmware Interface</a></h3>
-
-<p>
-In many cases, kernel obtains information about the system from the
-firmware, and sometimes things are lost in translation.
-Or the translation is accurate, but the original message is bogus.
-
-<p>
-For example, some systems' firmware overreports the number of CPUs,
-sometimes by a large factor.
-If RCU naively believed the firmware, as it used to do,
-it would create too many per-CPU kthreads.
-Although the resulting system will still run correctly, the extra
-kthreads needlessly consume memory and can cause confusion
-when they show up in <tt>ps</tt> listings.
-
-<p>
-RCU must therefore wait for a given CPU to actually come online before
-it can allow itself to believe that the CPU actually exists.
-The resulting &ldquo;ghost CPUs&rdquo; (which are never going to
-come online) cause a number of
-<a href="https://paulmck.livejournal.com/37494.html">interesting complications</a>.
-
-<h3><a name="Early Boot">Early Boot</a></h3>
-
-<p>
-The Linux kernel's boot sequence is an interesting process,
-and RCU is used early, even before <tt>rcu_init()</tt>
-is invoked.
-In fact, a number of RCU's primitives can be used as soon as the
-initial task's <tt>task_struct</tt> is available and the
-boot CPU's per-CPU variables are set up.
-The read-side primitives (<tt>rcu_read_lock()</tt>,
-<tt>rcu_read_unlock()</tt>, <tt>rcu_dereference()</tt>,
-and <tt>rcu_access_pointer()</tt>) will operate normally very early on,
-as will <tt>rcu_assign_pointer()</tt>.
-
-<p>
-Although <tt>call_rcu()</tt> may be invoked at any
-time during boot, callbacks are not guaranteed to be invoked until after
-all of RCU's kthreads have been spawned, which occurs at
-<tt>early_initcall()</tt> time.
-This delay in callback invocation is due to the fact that RCU does not
-invoke callbacks until it is fully initialized, and this full initialization
-cannot occur until after the scheduler has initialized itself to the
-point where RCU can spawn and run its kthreads.
-In theory, it would be possible to invoke callbacks earlier,
-however, this is not a panacea because there would be severe restrictions
-on what operations those callbacks could invoke.
-
-<p>
-Perhaps surprisingly, <tt>synchronize_rcu()</tt> and
-<tt>synchronize_rcu_expedited()</tt>,
-will operate normally
-during very early boot, the reason being that there is only one CPU
-and preemption is disabled.
-This means that the call <tt>synchronize_rcu()</tt> (or friends)
-itself is a quiescent
-state and thus a grace period, so the early-boot implementation can
-be a no-op.
-
-<p>
-However, once the scheduler has spawned its first kthread, this early
-boot trick fails for <tt>synchronize_rcu()</tt> (as well as for
-<tt>synchronize_rcu_expedited()</tt>) in <tt>CONFIG_PREEMPT=y</tt>
-kernels.
-The reason is that an RCU read-side critical section might be preempted,
-which means that a subsequent <tt>synchronize_rcu()</tt> really does have
-to wait for something, as opposed to simply returning immediately.
-Unfortunately, <tt>synchronize_rcu()</tt> can't do this until all of
-its kthreads are spawned, which doesn't happen until some time during
-<tt>early_initcalls()</tt> time.
-But this is no excuse: RCU is nevertheless required to correctly handle
-synchronous grace periods during this time period.
-Once all of its kthreads are up and running, RCU starts running
-normally.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- How can RCU possibly handle grace periods before all of its
- kthreads have been spawned???
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Very carefully!
- </font>
-
- <p><font color="ffffff">
- During the &ldquo;dead zone&rdquo; between the time that the
- scheduler spawns the first task and the time that all of RCU's
- kthreads have been spawned, all synchronous grace periods are
- handled by the expedited grace-period mechanism.
- At runtime, this expedited mechanism relies on workqueues, but
- during the dead zone the requesting task itself drives the
- desired expedited grace period.
- Because dead-zone execution takes place within task context,
- everything works.
- Once the dead zone ends, expedited grace periods go back to
- using workqueues, as is required to avoid problems that would
- otherwise occur when a user task received a POSIX signal while
- driving an expedited grace period.
- </font>
-
- <p><font color="ffffff">
- And yes, this does mean that it is unhelpful to send POSIX
- signals to random tasks between the time that the scheduler
- spawns its first kthread and the time that RCU's kthreads
- have all been spawned.
- If there ever turns out to be a good reason for sending POSIX
- signals during that time, appropriate adjustments will be made.
- (If it turns out that POSIX signals are sent during this time for
- no good reason, other adjustments will be made, appropriate
- or otherwise.)
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-I learned of these boot-time requirements as a result of a series of
-system hangs.
-
-<h3><a name="Interrupts and NMIs">Interrupts and NMIs</a></h3>
-
-<p>
-The Linux kernel has interrupts, and RCU read-side critical sections are
-legal within interrupt handlers and within interrupt-disabled regions
-of code, as are invocations of <tt>call_rcu()</tt>.
-
-<p>
-Some Linux-kernel architectures can enter an interrupt handler from
-non-idle process context, and then just never leave it, instead stealthily
-transitioning back to process context.
-This trick is sometimes used to invoke system calls from inside the kernel.
-These &ldquo;half-interrupts&rdquo; mean that RCU has to be very careful
-about how it counts interrupt nesting levels.
-I learned of this requirement the hard way during a rewrite
-of RCU's dyntick-idle code.
-
-<p>
-The Linux kernel has non-maskable interrupts (NMIs), and
-RCU read-side critical sections are legal within NMI handlers.
-Thankfully, RCU update-side primitives, including
-<tt>call_rcu()</tt>, are prohibited within NMI handlers.
-
-<p>
-The name notwithstanding, some Linux-kernel architectures
-can have nested NMIs, which RCU must handle correctly.
-Andy Lutomirski
-<a href="https://lkml.kernel.org/r/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com">surprised me</a>
-with this requirement;
-he also kindly surprised me with
-<a href="https://lkml.kernel.org/r/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com">an algorithm</a>
-that meets this requirement.
-
-<p>
-Furthermore, NMI handlers can be interrupted by what appear to RCU
-to be normal interrupts.
-One way that this can happen is for code that directly invokes
-<tt>rcu_irq_enter()</tt> and <tt>rcu_irq_exit()</tt> to be called
-from an NMI handler.
-This astonishing fact of life prompted the current code structure,
-which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt>
-and <tt>rcu_irq_exit()</tt> invoking <tt>rcu_nmi_exit()</tt>.
-And yes, I also learned of this requirement the hard way.
-
-<h3><a name="Loadable Modules">Loadable Modules</a></h3>
-
-<p>
-The Linux kernel has loadable modules, and these modules can
-also be unloaded.
-After a given module has been unloaded, any attempt to call
-one of its functions results in a segmentation fault.
-The module-unload functions must therefore cancel any
-delayed calls to loadable-module functions, for example,
-any outstanding <tt>mod_timer()</tt> must be dealt with
-via <tt>del_timer_sync()</tt> or similar.
-
-<p>
-Unfortunately, there is no way to cancel an RCU callback;
-once you invoke <tt>call_rcu()</tt>, the callback function is
-eventually going to be invoked, unless the system goes down first.
-Because it is normally considered socially irresponsible to crash the system
-in response to a module unload request, we need some other way
-to deal with in-flight RCU callbacks.
-
-<p>
-RCU therefore provides
-<tt><a href="https://lwn.net/Articles/217484/">rcu_barrier()</a></tt>,
-which waits until all in-flight RCU callbacks have been invoked.
-If a module uses <tt>call_rcu()</tt>, its exit function should therefore
-prevent any future invocation of <tt>call_rcu()</tt>, then invoke
-<tt>rcu_barrier()</tt>.
-In theory, the underlying module-unload code could invoke
-<tt>rcu_barrier()</tt> unconditionally, but in practice this would
-incur unacceptable latencies.
-
-<p>
-Nikita Danilov noted this requirement for an analogous filesystem-unmount
-situation, and Dipankar Sarma incorporated <tt>rcu_barrier()</tt> into RCU.
-The need for <tt>rcu_barrier()</tt> for module unloading became
-apparent later.
-
-<p>
-<b>Important note</b>: The <tt>rcu_barrier()</tt> function is not,
-repeat, <i>not</i>, obligated to wait for a grace period.
-It is instead only required to wait for RCU callbacks that have
-already been posted.
-Therefore, if there are no RCU callbacks posted anywhere in the system,
-<tt>rcu_barrier()</tt> is within its rights to return immediately.
-Even if there are callbacks posted, <tt>rcu_barrier()</tt> does not
-necessarily need to wait for a grace period.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Wait a minute!
- Each RCU callbacks must wait for a grace period to complete,
- and <tt>rcu_barrier()</tt> must wait for each pre-existing
- callback to be invoked.
- Doesn't <tt>rcu_barrier()</tt> therefore need to wait for
- a full grace period if there is even one callback posted anywhere
- in the system?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Absolutely not!!!
- </font>
-
- <p><font color="ffffff">
- Yes, each RCU callbacks must wait for a grace period to complete,
- but it might well be partly (or even completely) finished waiting
- by the time <tt>rcu_barrier()</tt> is invoked.
- In that case, <tt>rcu_barrier()</tt> need only wait for the
- remaining portion of the grace period to elapse.
- So even if there are quite a few callbacks posted,
- <tt>rcu_barrier()</tt> might well return quite quickly.
- </font>
-
- <p><font color="ffffff">
- So if you need to wait for a grace period as well as for all
- pre-existing callbacks, you will need to invoke both
- <tt>synchronize_rcu()</tt> and <tt>rcu_barrier()</tt>.
- If latency is a concern, you can always use workqueues
- to invoke them concurrently.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<h3><a name="Hotplug CPU">Hotplug CPU</a></h3>
-
-<p>
-The Linux kernel supports CPU hotplug, which means that CPUs
-can come and go.
-It is of course illegal to use any RCU API member from an offline CPU,
-with the exception of <a href="#Sleepable RCU">SRCU</a> read-side
-critical sections.
-This requirement was present from day one in DYNIX/ptx, but
-on the other hand, the Linux kernel's CPU-hotplug implementation
-is &ldquo;interesting.&rdquo;
-
-<p>
-The Linux-kernel CPU-hotplug implementation has notifiers that
-are used to allow the various kernel subsystems (including RCU)
-to respond appropriately to a given CPU-hotplug operation.
-Most RCU operations may be invoked from CPU-hotplug notifiers,
-including even synchronous grace-period operations such as
-<tt>synchronize_rcu()</tt> and <tt>synchronize_rcu_expedited()</tt>.
-
-<p>
-However, all-callback-wait operations such as
-<tt>rcu_barrier()</tt> are also not supported, due to the
-fact that there are phases of CPU-hotplug operations where
-the outgoing CPU's callbacks will not be invoked until after
-the CPU-hotplug operation ends, which could also result in deadlock.
-Furthermore, <tt>rcu_barrier()</tt> blocks CPU-hotplug operations
-during its execution, which results in another type of deadlock
-when invoked from a CPU-hotplug notifier.
-
-<h3><a name="Scheduler and RCU">Scheduler and RCU</a></h3>
-
-<p>
-RCU depends on the scheduler, and the scheduler uses RCU to
-protect some of its data structures.
-The preemptible-RCU <tt>rcu_read_unlock()</tt>
-implementation must therefore be written carefully to avoid deadlocks
-involving the scheduler's runqueue and priority-inheritance locks.
-In particular, <tt>rcu_read_unlock()</tt> must tolerate an
-interrupt where the interrupt handler invokes both
-<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
-This possibility requires <tt>rcu_read_unlock()</tt> to use
-negative nesting levels to avoid destructive recursion via
-interrupt handler's use of RCU.
-
-<p>
-This scheduler-RCU requirement came as a
-<a href="https://lwn.net/Articles/453002/">complete surprise</a>.
-
-<p>
-As noted above, RCU makes use of kthreads, and it is necessary to
-avoid excessive CPU-time accumulation by these kthreads.
-This requirement was no surprise, but RCU's violation of it
-when running context-switch-heavy workloads when built with
-<tt>CONFIG_NO_HZ_FULL=y</tt>
-<a href="http://www.rdrop.com/users/paulmck/scalability/paper/BareMetal.2015.01.15b.pdf">did come as a surprise [PDF]</a>.
-RCU has made good progress towards meeting this requirement, even
-for context-switch-heavy <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
-but there is room for further improvement.
-
-<p>
-It is forbidden to hold any of scheduler's runqueue or priority-inheritance
-spinlocks across an <tt>rcu_read_unlock()</tt> unless interrupts have been
-disabled across the entire RCU read-side critical section, that is,
-up to and including the matching <tt>rcu_read_lock()</tt>.
-Violating this restriction can result in deadlocks involving these
-scheduler spinlocks.
-There was hope that this restriction might be lifted when interrupt-disabled
-calls to <tt>rcu_read_unlock()</tt> started deferring the reporting of
-the resulting RCU-preempt quiescent state until the end of the corresponding
-interrupts-disabled region.
-Unfortunately, timely reporting of the corresponding quiescent state
-to expedited grace periods requires a call to <tt>raise_softirq()</tt>,
-which can acquire these scheduler spinlocks.
-In addition, real-time systems using RCU priority boosting
-need this restriction to remain in effect because deferred
-quiescent-state reporting would also defer deboosting, which in turn
-would degrade real-time latencies.
-
-<p>
-In theory, if a given RCU read-side critical section could be
-guaranteed to be less than one second in duration, holding a scheduler
-spinlock across that critical section's <tt>rcu_read_unlock()</tt>
-would require only that preemption be disabled across the entire
-RCU read-side critical section, not interrupts.
-Unfortunately, given the possibility of vCPU preemption, long-running
-interrupts, and so on, it is not possible in practice to guarantee
-that a given RCU read-side critical section will complete in less than
-one second.
-Therefore, as noted above, if scheduler spinlocks are held across
-a given call to <tt>rcu_read_unlock()</tt>, interrupts must be
-disabled across the entire RCU read-side critical section.
-
-<h3><a name="Tracing and RCU">Tracing and RCU</a></h3>
-
-<p>
-It is possible to use tracing on RCU code, but tracing itself
-uses RCU.
-For this reason, <tt>rcu_dereference_raw_check()</tt>
-is provided for use by tracing, which avoids the destructive
-recursion that could otherwise ensue.
-This API is also used by virtualization in some architectures,
-where RCU readers execute in environments in which tracing
-cannot be used.
-The tracing folks both located the requirement and provided the
-needed fix, so this surprise requirement was relatively painless.
-
-<h3><a name="Accesses to User Memory and RCU">
-Accesses to User Memory and RCU</a></h3>
-
-<p>
-The kernel needs to access user-space memory, for example, to access
-data referenced by system-call parameters.
-The <tt>get_user()</tt> macro does this job.
-
-<p>
-However, user-space memory might well be paged out, which means
-that <tt>get_user()</tt> might well page-fault and thus block while
-waiting for the resulting I/O to complete.
-It would be a very bad thing for the compiler to reorder
-a <tt>get_user()</tt> invocation into an RCU read-side critical
-section.
-For example, suppose that the source code looked like this:
-
-<blockquote>
-<pre>
- 1 rcu_read_lock();
- 2 p = rcu_dereference(gp);
- 3 v = p-&gt;value;
- 4 rcu_read_unlock();
- 5 get_user(user_v, user_p);
- 6 do_something_with(v, user_v);
-</pre>
-</blockquote>
-
-<p>
-The compiler must not be permitted to transform this source code into
-the following:
-
-<blockquote>
-<pre>
- 1 rcu_read_lock();
- 2 p = rcu_dereference(gp);
- 3 get_user(user_v, user_p); // BUG: POSSIBLE PAGE FAULT!!!
- 4 v = p-&gt;value;
- 5 rcu_read_unlock();
- 6 do_something_with(v, user_v);
-</pre>
-</blockquote>
-
-<p>
-If the compiler did make this transformation in a
-<tt>CONFIG_PREEMPT=n</tt> kernel build, and if <tt>get_user()</tt> did
-page fault, the result would be a quiescent state in the middle
-of an RCU read-side critical section.
-This misplaced quiescent state could result in line&nbsp;4 being
-a use-after-free access, which could be bad for your kernel's
-actuarial statistics.
-Similar examples can be constructed with the call to <tt>get_user()</tt>
-preceding the <tt>rcu_read_lock()</tt>.
-
-<p>
-Unfortunately, <tt>get_user()</tt> doesn't have any particular
-ordering properties, and in some architectures the underlying <tt>asm</tt>
-isn't even marked <tt>volatile</tt>.
-And even if it was marked <tt>volatile</tt>, the above access to
-<tt>p-&gt;value</tt> is not volatile, so the compiler would not have any
-reason to keep those two accesses in order.
-
-<p>
-Therefore, the Linux-kernel definitions of <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> must act as compiler barriers,
-at least for outermost instances of <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> within a nested set of RCU read-side critical
-sections.
-
-<h3><a name="Energy Efficiency">Energy Efficiency</a></h3>
-
-<p>
-Interrupting idle CPUs is considered socially unacceptable,
-especially by people with battery-powered embedded systems.
-RCU therefore conserves energy by detecting which CPUs are
-idle, including tracking CPUs that have been interrupted from idle.
-This is a large part of the energy-efficiency requirement,
-so I learned of this via an irate phone call.
-
-<p>
-Because RCU avoids interrupting idle CPUs, it is illegal to
-execute an RCU read-side critical section on an idle CPU.
-(Kernels built with <tt>CONFIG_PROVE_RCU=y</tt> will splat
-if you try it.)
-The <tt>RCU_NONIDLE()</tt> macro and <tt>_rcuidle</tt>
-event tracing is provided to work around this restriction.
-In addition, <tt>rcu_is_watching()</tt> may be used to
-test whether or not it is currently legal to run RCU read-side
-critical sections on this CPU.
-I learned of the need for diagnostics on the one hand
-and <tt>RCU_NONIDLE()</tt> on the other while inspecting
-idle-loop code.
-Steven Rostedt supplied <tt>_rcuidle</tt> event tracing,
-which is used quite heavily in the idle loop.
-However, there are some restrictions on the code placed within
-<tt>RCU_NONIDLE()</tt>:
-
-<ol>
-<li> Blocking is prohibited.
- In practice, this is not a serious restriction given that idle
- tasks are prohibited from blocking to begin with.
-<li> Although nesting <tt>RCU_NONIDLE()</tt> is permitted, they cannot
- nest indefinitely deeply.
- However, given that they can be nested on the order of a million
- deep, even on 32-bit systems, this should not be a serious
- restriction.
- This nesting limit would probably be reached long after the
- compiler OOMed or the stack overflowed.
-<li> Any code path that enters <tt>RCU_NONIDLE()</tt> must sequence
- out of that same <tt>RCU_NONIDLE()</tt>.
- For example, the following is grossly illegal:
-
- <blockquote>
- <pre>
- 1 RCU_NONIDLE({
- 2 do_something();
- 3 goto bad_idea; /* BUG!!! */
- 4 do_something_else();});
- 5 bad_idea:
- </pre>
- </blockquote>
-
- <p>
- It is just as illegal to transfer control into the middle of
- <tt>RCU_NONIDLE()</tt>'s argument.
- Yes, in theory, you could transfer in as long as you also
- transferred out, but in practice you could also expect to get sharply
- worded review comments.
-</ol>
-
-<p>
-It is similarly socially unacceptable to interrupt an
-<tt>nohz_full</tt> CPU running in userspace.
-RCU must therefore track <tt>nohz_full</tt> userspace
-execution.
-RCU must therefore be able to sample state at two points in
-time, and be able to determine whether or not some other CPU spent
-any time idle and/or executing in userspace.
-
-<p>
-These energy-efficiency requirements have proven quite difficult to
-understand and to meet, for example, there have been more than five
-clean-sheet rewrites of RCU's energy-efficiency code, the last of
-which was finally able to demonstrate
-<a href="http://www.rdrop.com/users/paulmck/realtime/paper/AMPenergy.2013.04.19a.pdf">real energy savings running on real hardware [PDF]</a>.
-As noted earlier,
-I learned of many of these requirements via angry phone calls:
-Flaming me on the Linux-kernel mailing list was apparently not
-sufficient to fully vent their ire at RCU's energy-efficiency bugs!
-
-<h3><a name="Scheduling-Clock Interrupts and RCU">
-Scheduling-Clock Interrupts and RCU</a></h3>
-
-<p>
-The kernel transitions between in-kernel non-idle execution, userspace
-execution, and the idle loop.
-Depending on kernel configuration, RCU handles these states differently:
-
-<table border=3>
-<tr><th><tt>HZ</tt> Kconfig</th>
- <th>In-Kernel</th>
- <th>Usermode</th>
- <th>Idle</th></tr>
-<tr><th align="left"><tt>HZ_PERIODIC</tt></th>
- <td>Can rely on scheduling-clock interrupt.</td>
- <td>Can rely on scheduling-clock interrupt and its
- detection of interrupt from usermode.</td>
- <td>Can rely on RCU's dyntick-idle detection.</td></tr>
-<tr><th align="left"><tt>NO_HZ_IDLE</tt></th>
- <td>Can rely on scheduling-clock interrupt.</td>
- <td>Can rely on scheduling-clock interrupt and its
- detection of interrupt from usermode.</td>
- <td>Can rely on RCU's dyntick-idle detection.</td></tr>
-<tr><th align="left"><tt>NO_HZ_FULL</tt></th>
- <td>Can only sometimes rely on scheduling-clock interrupt.
- In other cases, it is necessary to bound kernel execution
- times and/or use IPIs.</td>
- <td>Can rely on RCU's dyntick-idle detection.</td>
- <td>Can rely on RCU's dyntick-idle detection.</td></tr>
-</table>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- Why can't <tt>NO_HZ_FULL</tt> in-kernel execution rely on the
- scheduling-clock interrupt, just like <tt>HZ_PERIODIC</tt>
- and <tt>NO_HZ_IDLE</tt> do?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because, as a performance optimization, <tt>NO_HZ_FULL</tt>
- does not necessarily re-enable the scheduling-clock interrupt
- on entry to each and every system call.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-However, RCU must be reliably informed as to whether any given
-CPU is currently in the idle loop, and, for <tt>NO_HZ_FULL</tt>,
-also whether that CPU is executing in usermode, as discussed
-<a href="#Energy Efficiency">earlier</a>.
-It also requires that the scheduling-clock interrupt be enabled when
-RCU needs it to be:
-
-<ol>
-<li> If a CPU is either idle or executing in usermode, and RCU believes
- it is non-idle, the scheduling-clock tick had better be running.
- Otherwise, you will get RCU CPU stall warnings. Or at best,
- very long (11-second) grace periods, with a pointless IPI waking
- the CPU from time to time.
-<li> If a CPU is in a portion of the kernel that executes RCU read-side
- critical sections, and RCU believes this CPU to be idle, you will get
- random memory corruption. <b>DON'T DO THIS!!!</b>
-
- <br>This is one reason to test with lockdep, which will complain
- about this sort of thing.
-<li> If a CPU is in a portion of the kernel that is absolutely
- positively no-joking guaranteed to never execute any RCU read-side
- critical sections, and RCU believes this CPU to to be idle,
- no problem. This sort of thing is used by some architectures
- for light-weight exception handlers, which can then avoid the
- overhead of <tt>rcu_irq_enter()</tt> and <tt>rcu_irq_exit()</tt>
- at exception entry and exit, respectively.
- Some go further and avoid the entireties of <tt>irq_enter()</tt>
- and <tt>irq_exit()</tt>.
-
- <br>Just make very sure you are running some of your tests with
- <tt>CONFIG_PROVE_RCU=y</tt>, just in case one of your code paths
- was in fact joking about not doing RCU read-side critical sections.
-<li> If a CPU is executing in the kernel with the scheduling-clock
- interrupt disabled and RCU believes this CPU to be non-idle,
- and if the CPU goes idle (from an RCU perspective) every few
- jiffies, no problem. It is usually OK for there to be the
- occasional gap between idle periods of up to a second or so.
-
- <br>If the gap grows too long, you get RCU CPU stall warnings.
-<li> If a CPU is either idle or executing in usermode, and RCU believes
- it to be idle, of course no problem.
-<li> If a CPU is executing in the kernel, the kernel code
- path is passing through quiescent states at a reasonable
- frequency (preferably about once per few jiffies, but the
- occasional excursion to a second or so is usually OK) and the
- scheduling-clock interrupt is enabled, of course no problem.
-
- <br>If the gap between a successive pair of quiescent states grows
- too long, you get RCU CPU stall warnings.
-</ol>
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But what if my driver has a hardware interrupt handler
- that can run for many seconds?
- I cannot invoke <tt>schedule()</tt> from an hardware
- interrupt handler, after all!
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- One approach is to do <tt>rcu_irq_exit();rcu_irq_enter();</tt>
- every so often.
- But given that long-running interrupt handlers can cause
- other problems, not least for response time, shouldn't you
- work to keep your interrupt handler's runtime within reasonable
- bounds?
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-But as long as RCU is properly informed of kernel state transitions between
-in-kernel execution, usermode execution, and idle, and as long as the
-scheduling-clock interrupt is enabled when RCU needs it to be, you
-can rest assured that the bugs you encounter will be in some other
-part of RCU or some other part of the kernel!
-
-<h3><a name="Memory Efficiency">Memory Efficiency</a></h3>
-
-<p>
-Although small-memory non-realtime systems can simply use Tiny RCU,
-code size is only one aspect of memory efficiency.
-Another aspect is the size of the <tt>rcu_head</tt> structure
-used by <tt>call_rcu()</tt> and <tt>kfree_rcu()</tt>.
-Although this structure contains nothing more than a pair of pointers,
-it does appear in many RCU-protected data structures, including
-some that are size critical.
-The <tt>page</tt> structure is a case in point, as evidenced by
-the many occurrences of the <tt>union</tt> keyword within that structure.
-
-<p>
-This need for memory efficiency is one reason that RCU uses hand-crafted
-singly linked lists to track the <tt>rcu_head</tt> structures that
-are waiting for a grace period to elapse.
-It is also the reason why <tt>rcu_head</tt> structures do not contain
-debug information, such as fields tracking the file and line of the
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> that posted them.
-Although this information might appear in debug-only kernel builds at some
-point, in the meantime, the <tt>-&gt;func</tt> field will often provide
-the needed debug information.
-
-<p>
-However, in some cases, the need for memory efficiency leads to even
-more extreme measures.
-Returning to the <tt>page</tt> structure, the <tt>rcu_head</tt> field
-shares storage with a great many other structures that are used at
-various points in the corresponding page's lifetime.
-In order to correctly resolve certain
-<a href="https://lkml.kernel.org/g/1439976106-137226-1-git-send-email-kirill.shutemov@linux.intel.com">race conditions</a>,
-the Linux kernel's memory-management subsystem needs a particular bit
-to remain zero during all phases of grace-period processing,
-and that bit happens to map to the bottom bit of the
-<tt>rcu_head</tt> structure's <tt>-&gt;next</tt> field.
-RCU makes this guarantee as long as <tt>call_rcu()</tt>
-is used to post the callback, as opposed to <tt>kfree_rcu()</tt>
-or some future &ldquo;lazy&rdquo;
-variant of <tt>call_rcu()</tt> that might one day be created for
-energy-efficiency purposes.
-
-<p>
-That said, there are limits.
-RCU requires that the <tt>rcu_head</tt> structure be aligned to a
-two-byte boundary, and passing a misaligned <tt>rcu_head</tt>
-structure to one of the <tt>call_rcu()</tt> family of functions
-will result in a splat.
-It is therefore necessary to exercise caution when packing
-structures containing fields of type <tt>rcu_head</tt>.
-Why not a four-byte or even eight-byte alignment requirement?
-Because the m68k architecture provides only two-byte alignment,
-and thus acts as alignment's least common denominator.
-
-<p>
-The reason for reserving the bottom bit of pointers to
-<tt>rcu_head</tt> structures is to leave the door open to
-&ldquo;lazy&rdquo; callbacks whose invocations can safely be deferred.
-Deferring invocation could potentially have energy-efficiency
-benefits, but only if the rate of non-lazy callbacks decreases
-significantly for some important workload.
-In the meantime, reserving the bottom bit keeps this option open
-in case it one day becomes useful.
-
-<h3><a name="Performance, Scalability, Response Time, and Reliability">
-Performance, Scalability, Response Time, and Reliability</a></h3>
-
-<p>
-Expanding on the
-<a href="#Performance and Scalability">earlier discussion</a>,
-RCU is used heavily by hot code paths in performance-critical
-portions of the Linux kernel's networking, security, virtualization,
-and scheduling code paths.
-RCU must therefore use efficient implementations, especially in its
-read-side primitives.
-To that end, it would be good if preemptible RCU's implementation
-of <tt>rcu_read_lock()</tt> could be inlined, however, doing
-this requires resolving <tt>#include</tt> issues with the
-<tt>task_struct</tt> structure.
-
-<p>
-The Linux kernel supports hardware configurations with up to
-4096 CPUs, which means that RCU must be extremely scalable.
-Algorithms that involve frequent acquisitions of global locks or
-frequent atomic operations on global variables simply cannot be
-tolerated within the RCU implementation.
-RCU therefore makes heavy use of a combining tree based on the
-<tt>rcu_node</tt> structure.
-RCU is required to tolerate all CPUs continuously invoking any
-combination of RCU's runtime primitives with minimal per-operation
-overhead.
-In fact, in many cases, increasing load must <i>decrease</i> the
-per-operation overhead, witness the batching optimizations for
-<tt>synchronize_rcu()</tt>, <tt>call_rcu()</tt>,
-<tt>synchronize_rcu_expedited()</tt>, and <tt>rcu_barrier()</tt>.
-As a general rule, RCU must cheerfully accept whatever the
-rest of the Linux kernel decides to throw at it.
-
-<p>
-The Linux kernel is used for real-time workloads, especially
-in conjunction with the
-<a href="https://rt.wiki.kernel.org/index.php/Main_Page">-rt patchset</a>.
-The real-time-latency response requirements are such that the
-traditional approach of disabling preemption across RCU
-read-side critical sections is inappropriate.
-Kernels built with <tt>CONFIG_PREEMPT=y</tt> therefore
-use an RCU implementation that allows RCU read-side critical
-sections to be preempted.
-This requirement made its presence known after users made it
-clear that an earlier
-<a href="https://lwn.net/Articles/107930/">real-time patch</a>
-did not meet their needs, in conjunction with some
-<a href="https://lkml.kernel.org/g/20050318002026.GA2693@us.ibm.com">RCU issues</a>
-encountered by a very early version of the -rt patchset.
-
-<p>
-In addition, RCU must make do with a sub-100-microsecond real-time latency
-budget.
-In fact, on smaller systems with the -rt patchset, the Linux kernel
-provides sub-20-microsecond real-time latencies for the whole kernel,
-including RCU.
-RCU's scalability and latency must therefore be sufficient for
-these sorts of configurations.
-To my surprise, the sub-100-microsecond real-time latency budget
-<a href="http://www.rdrop.com/users/paulmck/realtime/paper/bigrt.2013.01.31a.LCA.pdf">
-applies to even the largest systems [PDF]</a>,
-up to and including systems with 4096 CPUs.
-This real-time requirement motivated the grace-period kthread, which
-also simplified handling of a number of race conditions.
-
-<p>
-RCU must avoid degrading real-time response for CPU-bound threads, whether
-executing in usermode (which is one use case for
-<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
-That said, CPU-bound loops in the kernel must execute
-<tt>cond_resched()</tt> at least once per few tens of milliseconds
-in order to avoid receiving an IPI from RCU.
-
-<p>
-Finally, RCU's status as a synchronization primitive means that
-any RCU failure can result in arbitrary memory corruption that can be
-extremely difficult to debug.
-This means that RCU must be extremely reliable, which in
-practice also means that RCU must have an aggressive stress-test
-suite.
-This stress-test suite is called <tt>rcutorture</tt>.
-
-<p>
-Although the need for <tt>rcutorture</tt> was no surprise,
-the current immense popularity of the Linux kernel is posing
-interesting&mdash;and perhaps unprecedented&mdash;validation
-challenges.
-To see this, keep in mind that there are well over one billion
-instances of the Linux kernel running today, given Android
-smartphones, Linux-powered televisions, and servers.
-This number can be expected to increase sharply with the advent of
-the celebrated Internet of Things.
-
-<p>
-Suppose that RCU contains a race condition that manifests on average
-once per million years of runtime.
-This bug will be occurring about three times per <i>day</i> across
-the installed base.
-RCU could simply hide behind hardware error rates, given that no one
-should really expect their smartphone to last for a million years.
-However, anyone taking too much comfort from this thought should
-consider the fact that in most jurisdictions, a successful multi-year
-test of a given mechanism, which might include a Linux kernel,
-suffices for a number of types of safety-critical certifications.
-In fact, rumor has it that the Linux kernel is already being used
-in production for safety-critical applications.
-I don't know about you, but I would feel quite bad if a bug in RCU
-killed someone.
-Which might explain my recent focus on validation and verification.
-
-<h2><a name="Other RCU Flavors">Other RCU Flavors</a></h2>
-
-<p>
-One of the more surprising things about RCU is that there are now
-no fewer than five <i>flavors</i>, or API families.
-In addition, the primary flavor that has been the sole focus up to
-this point has two different implementations, non-preemptible and
-preemptible.
-The other four flavors are listed below, with requirements for each
-described in a separate section.
-
-<ol>
-<li> <a href="#Bottom-Half Flavor">Bottom-Half Flavor (Historical)</a>
-<li> <a href="#Sched Flavor">Sched Flavor (Historical)</a>
-<li> <a href="#Sleepable RCU">Sleepable RCU</a>
-<li> <a href="#Tasks RCU">Tasks RCU</a>
-</ol>
-
-<h3><a name="Bottom-Half Flavor">Bottom-Half Flavor (Historical)</a></h3>
-
-<p>
-The RCU-bh flavor of RCU has since been expressed in terms of
-the other RCU flavors as part of a consolidation of the three
-flavors into a single flavor.
-The read-side API remains, and continues to disable softirq and to
-be accounted for by lockdep.
-Much of the material in this section is therefore strictly historical
-in nature.
-
-<p>
-The softirq-disable (AKA &ldquo;bottom-half&rdquo;,
-hence the &ldquo;_bh&rdquo; abbreviations)
-flavor of RCU, or <i>RCU-bh</i>, was developed by
-Dipankar Sarma to provide a flavor of RCU that could withstand the
-network-based denial-of-service attacks researched by Robert
-Olsson.
-These attacks placed so much networking load on the system
-that some of the CPUs never exited softirq execution,
-which in turn prevented those CPUs from ever executing a context switch,
-which, in the RCU implementation of that time, prevented grace periods
-from ever ending.
-The result was an out-of-memory condition and a system hang.
-
-<p>
-The solution was the creation of RCU-bh, which does
-<tt>local_bh_disable()</tt>
-across its read-side critical sections, and which uses the transition
-from one type of softirq processing to another as a quiescent state
-in addition to context switch, idle, user mode, and offline.
-This means that RCU-bh grace periods can complete even when some of
-the CPUs execute in softirq indefinitely, thus allowing algorithms
-based on RCU-bh to withstand network-based denial-of-service attacks.
-
-<p>
-Because
-<tt>rcu_read_lock_bh()</tt> and <tt>rcu_read_unlock_bh()</tt>
-disable and re-enable softirq handlers, any attempt to start a softirq
-handlers during the
-RCU-bh read-side critical section will be deferred.
-In this case, <tt>rcu_read_unlock_bh()</tt>
-will invoke softirq processing, which can take considerable time.
-One can of course argue that this softirq overhead should be associated
-with the code following the RCU-bh read-side critical section rather
-than <tt>rcu_read_unlock_bh()</tt>, but the fact
-is that most profiling tools cannot be expected to make this sort
-of fine distinction.
-For example, suppose that a three-millisecond-long RCU-bh read-side
-critical section executes during a time of heavy networking load.
-There will very likely be an attempt to invoke at least one softirq
-handler during that three milliseconds, but any such invocation will
-be delayed until the time of the <tt>rcu_read_unlock_bh()</tt>.
-This can of course make it appear at first glance as if
-<tt>rcu_read_unlock_bh()</tt> was executing very slowly.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">RCU-bh API</a>
-includes
-<tt>rcu_read_lock_bh()</tt>,
-<tt>rcu_read_unlock_bh()</tt>,
-<tt>rcu_dereference_bh()</tt>,
-<tt>rcu_dereference_bh_check()</tt>,
-<tt>synchronize_rcu_bh()</tt>,
-<tt>synchronize_rcu_bh_expedited()</tt>,
-<tt>call_rcu_bh()</tt>,
-<tt>rcu_barrier_bh()</tt>, and
-<tt>rcu_read_lock_bh_held()</tt>.
-However, the update-side APIs are now simple wrappers for other RCU
-flavors, namely RCU-sched in CONFIG_PREEMPT=n kernels and RCU-preempt
-otherwise.
-
-<h3><a name="Sched Flavor">Sched Flavor (Historical)</a></h3>
-
-<p>
-The RCU-sched flavor of RCU has since been expressed in terms of
-the other RCU flavors as part of a consolidation of the three
-flavors into a single flavor.
-The read-side API remains, and continues to disable preemption and to
-be accounted for by lockdep.
-Much of the material in this section is therefore strictly historical
-in nature.
-
-<p>
-Before preemptible RCU, waiting for an RCU grace period had the
-side effect of also waiting for all pre-existing interrupt
-and NMI handlers.
-However, there are legitimate preemptible-RCU implementations that
-do not have this property, given that any point in the code outside
-of an RCU read-side critical section can be a quiescent state.
-Therefore, <i>RCU-sched</i> was created, which follows &ldquo;classic&rdquo;
-RCU in that an RCU-sched grace period waits for for pre-existing
-interrupt and NMI handlers.
-In kernels built with <tt>CONFIG_PREEMPT=n</tt>, the RCU and RCU-sched
-APIs have identical implementations, while kernels built with
-<tt>CONFIG_PREEMPT=y</tt> provide a separate implementation for each.
-
-<p>
-Note well that in <tt>CONFIG_PREEMPT=y</tt> kernels,
-<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
-disable and re-enable preemption, respectively.
-This means that if there was a preemption attempt during the
-RCU-sched read-side critical section, <tt>rcu_read_unlock_sched()</tt>
-will enter the scheduler, with all the latency and overhead entailed.
-Just as with <tt>rcu_read_unlock_bh()</tt>, this can make it look
-as if <tt>rcu_read_unlock_sched()</tt> was executing very slowly.
-However, the highest-priority task won't be preempted, so that task
-will enjoy low-overhead <tt>rcu_read_unlock_sched()</tt> invocations.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">RCU-sched API</a>
-includes
-<tt>rcu_read_lock_sched()</tt>,
-<tt>rcu_read_unlock_sched()</tt>,
-<tt>rcu_read_lock_sched_notrace()</tt>,
-<tt>rcu_read_unlock_sched_notrace()</tt>,
-<tt>rcu_dereference_sched()</tt>,
-<tt>rcu_dereference_sched_check()</tt>,
-<tt>synchronize_sched()</tt>,
-<tt>synchronize_rcu_sched_expedited()</tt>,
-<tt>call_rcu_sched()</tt>,
-<tt>rcu_barrier_sched()</tt>, and
-<tt>rcu_read_lock_sched_held()</tt>.
-However, anything that disables preemption also marks an RCU-sched
-read-side critical section, including
-<tt>preempt_disable()</tt> and <tt>preempt_enable()</tt>,
-<tt>local_irq_save()</tt> and <tt>local_irq_restore()</tt>,
-and so on.
-
-<h3><a name="Sleepable RCU">Sleepable RCU</a></h3>
-
-<p>
-For well over a decade, someone saying &ldquo;I need to block within
-an RCU read-side critical section&rdquo; was a reliable indication
-that this someone did not understand RCU.
-After all, if you are always blocking in an RCU read-side critical
-section, you can probably afford to use a higher-overhead synchronization
-mechanism.
-However, that changed with the advent of the Linux kernel's notifiers,
-whose RCU read-side critical
-sections almost never sleep, but sometimes need to.
-This resulted in the introduction of
-<a href="https://lwn.net/Articles/202847/">sleepable RCU</a>,
-or <i>SRCU</i>.
-
-<p>
-SRCU allows different domains to be defined, with each such domain
-defined by an instance of an <tt>srcu_struct</tt> structure.
-A pointer to this structure must be passed in to each SRCU function,
-for example, <tt>synchronize_srcu(&amp;ss)</tt>, where
-<tt>ss</tt> is the <tt>srcu_struct</tt> structure.
-The key benefit of these domains is that a slow SRCU reader in one
-domain does not delay an SRCU grace period in some other domain.
-That said, one consequence of these domains is that read-side code
-must pass a &ldquo;cookie&rdquo; from <tt>srcu_read_lock()</tt>
-to <tt>srcu_read_unlock()</tt>, for example, as follows:
-
-<blockquote>
-<pre>
- 1 int idx;
- 2
- 3 idx = srcu_read_lock(&amp;ss);
- 4 do_something();
- 5 srcu_read_unlock(&amp;ss, idx);
-</pre>
-</blockquote>
-
-<p>
-As noted above, it is legal to block within SRCU read-side critical sections,
-however, with great power comes great responsibility.
-If you block forever in one of a given domain's SRCU read-side critical
-sections, then that domain's grace periods will also be blocked forever.
-Of course, one good way to block forever is to deadlock, which can
-happen if any operation in a given domain's SRCU read-side critical
-section can wait, either directly or indirectly, for that domain's
-grace period to elapse.
-For example, this results in a self-deadlock:
-
-<blockquote>
-<pre>
- 1 int idx;
- 2
- 3 idx = srcu_read_lock(&amp;ss);
- 4 do_something();
- 5 synchronize_srcu(&amp;ss);
- 6 srcu_read_unlock(&amp;ss, idx);
-</pre>
-</blockquote>
-
-<p>
-However, if line&nbsp;5 acquired a mutex that was held across
-a <tt>synchronize_srcu()</tt> for domain <tt>ss</tt>,
-deadlock would still be possible.
-Furthermore, if line&nbsp;5 acquired a mutex that was held across
-a <tt>synchronize_srcu()</tt> for some other domain <tt>ss1</tt>,
-and if an <tt>ss1</tt>-domain SRCU read-side critical section
-acquired another mutex that was held across as <tt>ss</tt>-domain
-<tt>synchronize_srcu()</tt>,
-deadlock would again be possible.
-Such a deadlock cycle could extend across an arbitrarily large number
-of different SRCU domains.
-Again, with great power comes great responsibility.
-
-<p>
-Unlike the other RCU flavors, SRCU read-side critical sections can
-run on idle and even offline CPUs.
-This ability requires that <tt>srcu_read_lock()</tt> and
-<tt>srcu_read_unlock()</tt> contain memory barriers, which means
-that SRCU readers will run a bit slower than would RCU readers.
-It also motivates the <tt>smp_mb__after_srcu_read_unlock()</tt>
-API, which, in combination with <tt>srcu_read_unlock()</tt>,
-guarantees a full memory barrier.
-
-<p>
-Also unlike other RCU flavors, <tt>synchronize_srcu()</tt> may <b>not</b>
-be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace
-periods make use of timers and the possibility of timers being temporarily
-&ldquo;stranded&rdquo; on the outgoing CPU.
-This stranding of timers means that timers posted to the outgoing CPU
-will not fire until late in the CPU-hotplug process.
-The problem is that if a notifier is waiting on an SRCU grace period,
-that grace period is waiting on a timer, and that timer is stranded on the
-outgoing CPU, then the notifier will never be awakened, in other words,
-deadlock has occurred.
-This same situation of course also prohibits <tt>srcu_barrier()</tt>
-from being invoked from CPU-hotplug notifiers.
-
-<p>
-SRCU also differs from other RCU flavors in that SRCU's expedited and
-non-expedited grace periods are implemented by the same mechanism.
-This means that in the current SRCU implementation, expediting a
-future grace period has the side effect of expediting all prior
-grace periods that have not yet completed.
-(But please note that this is a property of the current implementation,
-not necessarily of future implementations.)
-In addition, if SRCU has been idle for longer than the interval
-specified by the <tt>srcutree.exp_holdoff</tt> kernel boot parameter
-(25&nbsp;microseconds by default),
-and if a <tt>synchronize_srcu()</tt> invocation ends this idle period,
-that invocation will be automatically expedited.
-
-<p>
-As of v4.12, SRCU's callbacks are maintained per-CPU, eliminating
-a locking bottleneck present in prior kernel versions.
-Although this will allow users to put much heavier stress on
-<tt>call_srcu()</tt>, it is important to note that SRCU does not
-yet take any special steps to deal with callback flooding.
-So if you are posting (say) 10,000 SRCU callbacks per second per CPU,
-you are probably totally OK, but if you intend to post (say) 1,000,000
-SRCU callbacks per second per CPU, please run some tests first.
-SRCU just might need a few adjustment to deal with that sort of load.
-Of course, your mileage may vary based on the speed of your CPUs and
-the size of your memory.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">SRCU API</a>
-includes
-<tt>srcu_read_lock()</tt>,
-<tt>srcu_read_unlock()</tt>,
-<tt>srcu_dereference()</tt>,
-<tt>srcu_dereference_check()</tt>,
-<tt>synchronize_srcu()</tt>,
-<tt>synchronize_srcu_expedited()</tt>,
-<tt>call_srcu()</tt>,
-<tt>srcu_barrier()</tt>, and
-<tt>srcu_read_lock_held()</tt>.
-It also includes
-<tt>DEFINE_SRCU()</tt>,
-<tt>DEFINE_STATIC_SRCU()</tt>, and
-<tt>init_srcu_struct()</tt>
-APIs for defining and initializing <tt>srcu_struct</tt> structures.
-
-<h3><a name="Tasks RCU">Tasks RCU</a></h3>
-
-<p>
-Some forms of tracing use &ldquo;trampolines&rdquo; to handle the
-binary rewriting required to install different types of probes.
-It would be good to be able to free old trampolines, which sounds
-like a job for some form of RCU.
-However, because it is necessary to be able to install a trace
-anywhere in the code, it is not possible to use read-side markers
-such as <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
-In addition, it does not work to have these markers in the trampoline
-itself, because there would need to be instructions following
-<tt>rcu_read_unlock()</tt>.
-Although <tt>synchronize_rcu()</tt> would guarantee that execution
-reached the <tt>rcu_read_unlock()</tt>, it would not be able to
-guarantee that execution had completely left the trampoline.
-
-<p>
-The solution, in the form of
-<a href="https://lwn.net/Articles/607117/"><i>Tasks RCU</i></a>,
-is to have implicit
-read-side critical sections that are delimited by voluntary context
-switches, that is, calls to <tt>schedule()</tt>,
-<tt>cond_resched()</tt>, and
-<tt>synchronize_rcu_tasks()</tt>.
-In addition, transitions to and from userspace execution also delimit
-tasks-RCU read-side critical sections.
-
-<p>
-The tasks-RCU API is quite compact, consisting only of
-<tt>call_rcu_tasks()</tt>,
-<tt>synchronize_rcu_tasks()</tt>, and
-<tt>rcu_barrier_tasks()</tt>.
-In <tt>CONFIG_PREEMPT=n</tt> kernels, trampolines cannot be preempted,
-so these APIs map to
-<tt>call_rcu()</tt>,
-<tt>synchronize_rcu()</tt>, and
-<tt>rcu_barrier()</tt>, respectively.
-In <tt>CONFIG_PREEMPT=y</tt> kernels, trampolines can be preempted,
-and these three APIs are therefore implemented by separate functions
-that check for voluntary context switches.
-
-<h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
-
-<p>
-One of the tricks that RCU uses to attain update-side scalability is
-to increase grace-period latency with increasing numbers of CPUs.
-If this becomes a serious problem, it will be necessary to rework the
-grace-period state machine so as to avoid the need for the additional
-latency.
-
-<p>
-RCU disables CPU hotplug in a few places, perhaps most notably in the
-<tt>rcu_barrier()</tt> operations.
-If there is a strong reason to use <tt>rcu_barrier()</tt> in CPU-hotplug
-notifiers, it will be necessary to avoid disabling CPU hotplug.
-This would introduce some complexity, so there had better be a <i>very</i>
-good reason.
-
-<p>
-The tradeoff between grace-period latency on the one hand and interruptions
-of other CPUs on the other hand may need to be re-examined.
-The desire is of course for zero grace-period latency as well as zero
-interprocessor interrupts undertaken during an expedited grace period
-operation.
-While this ideal is unlikely to be achievable, it is quite possible that
-further improvements can be made.
-
-<p>
-The multiprocessor implementations of RCU use a combining tree that
-groups CPUs so as to reduce lock contention and increase cache locality.
-However, this combining tree does not spread its memory across NUMA
-nodes nor does it align the CPU groups with hardware features such
-as sockets or cores.
-Such spreading and alignment is currently believed to be unnecessary
-because the hotpath read-side primitives do not access the combining
-tree, nor does <tt>call_rcu()</tt> in the common case.
-If you believe that your architecture needs such spreading and alignment,
-then your architecture should also benefit from the
-<tt>rcutree.rcu_fanout_leaf</tt> boot parameter, which can be set
-to the number of CPUs in a socket, NUMA node, or whatever.
-If the number of CPUs is too large, use a fraction of the number of
-CPUs.
-If the number of CPUs is a large prime number, well, that certainly
-is an &ldquo;interesting&rdquo; architectural choice!
-More flexible arrangements might be considered, but only if
-<tt>rcutree.rcu_fanout_leaf</tt> has proven inadequate, and only
-if the inadequacy has been demonstrated by a carefully run and
-realistic system-level workload.
-
-<p>
-Please note that arrangements that require RCU to remap CPU numbers will
-require extremely good demonstration of need and full exploration of
-alternatives.
-
-<p>
-RCU's various kthreads are reasonably recent additions.
-It is quite likely that adjustments will be required to more gracefully
-handle extreme loads.
-It might also be necessary to be able to relate CPU utilization by
-RCU's kthreads and softirq handlers to the code that instigated this
-CPU utilization.
-For example, RCU callback overhead might be charged back to the
-originating <tt>call_rcu()</tt> instance, though probably not
-in production kernels.
-
-<p>
-Additional work may be required to provide reasonable forward-progress
-guarantees under heavy load for grace periods and for callback
-invocation.
-
-<h2><a name="Summary">Summary</a></h2>
-
-<p>
-This document has presented more than two decade's worth of RCU
-requirements.
-Given that the requirements keep changing, this will not be the last
-word on this subject, but at least it serves to get an important
-subset of the requirements set forth.
-
-<h2><a name="Acknowledgments">Acknowledgments</a></h2>
-
-I am grateful to Steven Rostedt, Lai Jiangshan, Ingo Molnar,
-Oleg Nesterov, Borislav Petkov, Peter Zijlstra, Boqun Feng, and
-Andy Lutomirski for their help in rendering
-this article human readable, and to Michelle Rankin for her support
-of this effort.
-Other contributions are acknowledged in the Linux kernel's git archive.
-
-</body></html>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst
new file mode 100644
index 000000000000..fd5e2cbc4935
--- /dev/null
+++ b/Documentation/RCU/Design/Requirements/Requirements.rst
@@ -0,0 +1,2704 @@
+=================================
+A Tour Through RCU's Requirements
+=================================
+
+Copyright IBM Corporation, 2015
+
+Author: Paul E. McKenney
+
+The initial version of this document appeared in the
+`LWN <https://lwn.net/>`_ on those articles:
+`part 1 <https://lwn.net/Articles/652156/>`_,
+`part 2 <https://lwn.net/Articles/652677/>`_, and
+`part 3 <https://lwn.net/Articles/653326/>`_.
+
+Introduction
+------------
+
+Read-copy update (RCU) is a synchronization mechanism that is often used
+as a replacement for reader-writer locking. RCU is unusual in that
+updaters do not block readers, which means that RCU's read-side
+primitives can be exceedingly fast and scalable. In addition, updaters
+can make useful forward progress concurrently with readers. However, all
+this concurrency between RCU readers and updaters does raise the
+question of exactly what RCU readers are doing, which in turn raises the
+question of exactly what RCU's requirements are.
+
+This document therefore summarizes RCU's requirements, and can be
+thought of as an informal, high-level specification for RCU. It is
+important to understand that RCU's specification is primarily empirical
+in nature; in fact, I learned about many of these requirements the hard
+way. This situation might cause some consternation, however, not only
+has this learning process been a lot of fun, but it has also been a
+great privilege to work with so many people willing to apply
+technologies in interesting new ways.
+
+All that aside, here are the categories of currently known RCU
+requirements:
+
+#. `Fundamental Requirements`_
+#. `Fundamental Non-Requirements`_
+#. `Parallelism Facts of Life`_
+#. `Quality-of-Implementation Requirements`_
+#. `Linux Kernel Complications`_
+#. `Software-Engineering Requirements`_
+#. `Other RCU Flavors`_
+#. `Possible Future Changes`_
+
+This is followed by a `summary <#Summary>`__, however, the answers to
+each quick quiz immediately follows the quiz. Select the big white space
+with your mouse to see the answer.
+
+Fundamental Requirements
+------------------------
+
+RCU's fundamental requirements are the closest thing RCU has to hard
+mathematical requirements. These are:
+
+#. `Grace-Period Guarantee`_
+#. `Publish/Subscribe Guarantee`_
+#. `Memory-Barrier Guarantees`_
+#. `RCU Primitives Guaranteed to Execute Unconditionally`_
+#. `Guaranteed Read-to-Write Upgrade`_
+
+Grace-Period Guarantee
+~~~~~~~~~~~~~~~~~~~~~~
+
+RCU's grace-period guarantee is unusual in being premeditated: Jack
+Slingwine and I had this guarantee firmly in mind when we started work
+on RCU (then called “rclock”) in the early 1990s. That said, the past
+two decades of experience with RCU have produced a much more detailed
+understanding of this guarantee.
+
+RCU's grace-period guarantee allows updaters to wait for the completion
+of all pre-existing RCU read-side critical sections. An RCU read-side
+critical section begins with the marker ``rcu_read_lock()`` and ends
+with the marker ``rcu_read_unlock()``. These markers may be nested, and
+RCU treats a nested set as one big RCU read-side critical section.
+Production-quality implementations of ``rcu_read_lock()`` and
+``rcu_read_unlock()`` are extremely lightweight, and in fact have
+exactly zero overhead in Linux kernels built for production use with
+``CONFIG_PREEMPT=n``.
+
+This guarantee allows ordering to be enforced with extremely low
+overhead to readers, for example:
+
+ ::
+
+ 1 int x, y;
+ 2
+ 3 void thread0(void)
+ 4 {
+ 5 rcu_read_lock();
+ 6 r1 = READ_ONCE(x);
+ 7 r2 = READ_ONCE(y);
+ 8 rcu_read_unlock();
+ 9 }
+ 10
+ 11 void thread1(void)
+ 12 {
+ 13 WRITE_ONCE(x, 1);
+ 14 synchronize_rcu();
+ 15 WRITE_ONCE(y, 1);
+ 16 }
+
+Because the ``synchronize_rcu()`` on line 14 waits for all pre-existing
+readers, any instance of ``thread0()`` that loads a value of zero from
+``x`` must complete before ``thread1()`` stores to ``y``, so that
+instance must also load a value of zero from ``y``. Similarly, any
+instance of ``thread0()`` that loads a value of one from ``y`` must have
+started after the ``synchronize_rcu()`` started, and must therefore also
+load a value of one from ``x``. Therefore, the outcome:
+
+ ::
+
+ (r1 == 0 && r2 == 1)
+
+cannot happen.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Wait a minute! You said that updaters can make useful forward |
+| progress concurrently with readers, but pre-existing readers will |
+| block ``synchronize_rcu()``!!! |
+| Just who are you trying to fool??? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| First, if updaters do not wish to be blocked by readers, they can use |
+| ``call_rcu()`` or ``kfree_rcu()``, which will be discussed later. |
+| Second, even when using ``synchronize_rcu()``, the other update-side |
+| code does run concurrently with readers, whether pre-existing or not. |
++-----------------------------------------------------------------------+
+
+This scenario resembles one of the first uses of RCU in
+`DYNIX/ptx <https://en.wikipedia.org/wiki/DYNIX>`__, which managed a
+distributed lock manager's transition into a state suitable for handling
+recovery from node failure, more or less as follows:
+
+ ::
+
+ 1 #define STATE_NORMAL 0
+ 2 #define STATE_WANT_RECOVERY 1
+ 3 #define STATE_RECOVERING 2
+ 4 #define STATE_WANT_NORMAL 3
+ 5
+ 6 int state = STATE_NORMAL;
+ 7
+ 8 void do_something_dlm(void)
+ 9 {
+ 10 int state_snap;
+ 11
+ 12 rcu_read_lock();
+ 13 state_snap = READ_ONCE(state);
+ 14 if (state_snap == STATE_NORMAL)
+ 15 do_something();
+ 16 else
+ 17 do_something_carefully();
+ 18 rcu_read_unlock();
+ 19 }
+ 20
+ 21 void start_recovery(void)
+ 22 {
+ 23 WRITE_ONCE(state, STATE_WANT_RECOVERY);
+ 24 synchronize_rcu();
+ 25 WRITE_ONCE(state, STATE_RECOVERING);
+ 26 recovery();
+ 27 WRITE_ONCE(state, STATE_WANT_NORMAL);
+ 28 synchronize_rcu();
+ 29 WRITE_ONCE(state, STATE_NORMAL);
+ 30 }
+
+The RCU read-side critical section in ``do_something_dlm()`` works with
+the ``synchronize_rcu()`` in ``start_recovery()`` to guarantee that
+``do_something()`` never runs concurrently with ``recovery()``, but with
+little or no synchronization overhead in ``do_something_dlm()``.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why is the ``synchronize_rcu()`` on line 28 needed? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Without that extra grace period, memory reordering could result in |
+| ``do_something_dlm()`` executing ``do_something()`` concurrently with |
+| the last bits of ``recovery()``. |
++-----------------------------------------------------------------------+
+
+In order to avoid fatal problems such as deadlocks, an RCU read-side
+critical section must not contain calls to ``synchronize_rcu()``.
+Similarly, an RCU read-side critical section must not contain anything
+that waits, directly or indirectly, on completion of an invocation of
+``synchronize_rcu()``.
+
+Although RCU's grace-period guarantee is useful in and of itself, with
+`quite a few use cases <https://lwn.net/Articles/573497/>`__, it would
+be good to be able to use RCU to coordinate read-side access to linked
+data structures. For this, the grace-period guarantee is not sufficient,
+as can be seen in function ``add_gp_buggy()`` below. We will look at the
+reader's code later, but in the meantime, just think of the reader as
+locklessly picking up the ``gp`` pointer, and, if the value loaded is
+non-\ ``NULL``, locklessly accessing the ``->a`` and ``->b`` fields.
+
+ ::
+
+ 1 bool add_gp_buggy(int a, int b)
+ 2 {
+ 3 p = kmalloc(sizeof(*p), GFP_KERNEL);
+ 4 if (!p)
+ 5 return -ENOMEM;
+ 6 spin_lock(&gp_lock);
+ 7 if (rcu_access_pointer(gp)) {
+ 8 spin_unlock(&gp_lock);
+ 9 return false;
+ 10 }
+ 11 p->a = a;
+ 12 p->b = a;
+ 13 gp = p; /* ORDERING BUG */
+ 14 spin_unlock(&gp_lock);
+ 15 return true;
+ 16 }
+
+The problem is that both the compiler and weakly ordered CPUs are within
+their rights to reorder this code as follows:
+
+ ::
+
+ 1 bool add_gp_buggy_optimized(int a, int b)
+ 2 {
+ 3 p = kmalloc(sizeof(*p), GFP_KERNEL);
+ 4 if (!p)
+ 5 return -ENOMEM;
+ 6 spin_lock(&gp_lock);
+ 7 if (rcu_access_pointer(gp)) {
+ 8 spin_unlock(&gp_lock);
+ 9 return false;
+ 10 }
+ 11 gp = p; /* ORDERING BUG */
+ 12 p->a = a;
+ 13 p->b = a;
+ 14 spin_unlock(&gp_lock);
+ 15 return true;
+ 16 }
+
+If an RCU reader fetches ``gp`` just after ``add_gp_buggy_optimized``
+executes line 11, it will see garbage in the ``->a`` and ``->b`` fields.
+And this is but one of many ways in which compiler and hardware
+optimizations could cause trouble. Therefore, we clearly need some way
+to prevent the compiler and the CPU from reordering in this manner,
+which brings us to the publish-subscribe guarantee discussed in the next
+section.
+
+Publish/Subscribe Guarantee
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+RCU's publish-subscribe guarantee allows data to be inserted into a
+linked data structure without disrupting RCU readers. The updater uses
+``rcu_assign_pointer()`` to insert the new data, and readers use
+``rcu_dereference()`` to access data, whether new or old. The following
+shows an example of insertion:
+
+ ::
+
+ 1 bool add_gp(int a, int b)
+ 2 {
+ 3 p = kmalloc(sizeof(*p), GFP_KERNEL);
+ 4 if (!p)
+ 5 return -ENOMEM;
+ 6 spin_lock(&gp_lock);
+ 7 if (rcu_access_pointer(gp)) {
+ 8 spin_unlock(&gp_lock);
+ 9 return false;
+ 10 }
+ 11 p->a = a;
+ 12 p->b = a;
+ 13 rcu_assign_pointer(gp, p);
+ 14 spin_unlock(&gp_lock);
+ 15 return true;
+ 16 }
+
+The ``rcu_assign_pointer()`` on line 13 is conceptually equivalent to a
+simple assignment statement, but also guarantees that its assignment
+will happen after the two assignments in lines 11 and 12, similar to the
+C11 ``memory_order_release`` store operation. It also prevents any
+number of “interesting” compiler optimizations, for example, the use of
+``gp`` as a scratch location immediately preceding the assignment.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But ``rcu_assign_pointer()`` does nothing to prevent the two |
+| assignments to ``p->a`` and ``p->b`` from being reordered. Can't that |
+| also cause problems? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| No, it cannot. The readers cannot see either of these two fields |
+| until the assignment to ``gp``, by which time both fields are fully |
+| initialized. So reordering the assignments to ``p->a`` and ``p->b`` |
+| cannot possibly cause any problems. |
++-----------------------------------------------------------------------+
+
+It is tempting to assume that the reader need not do anything special to
+control its accesses to the RCU-protected data, as shown in
+``do_something_gp_buggy()`` below:
+
+ ::
+
+ 1 bool do_something_gp_buggy(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 p = gp; /* OPTIMIZATIONS GALORE!!! */
+ 5 if (p) {
+ 6 do_something(p->a, p->b);
+ 7 rcu_read_unlock();
+ 8 return true;
+ 9 }
+ 10 rcu_read_unlock();
+ 11 return false;
+ 12 }
+
+However, this temptation must be resisted because there are a
+surprisingly large number of ways that the compiler (to say nothing of
+`DEC Alpha CPUs <https://h71000.www7.hp.com/wizard/wiz_2637.html>`__)
+can trip this code up. For but one example, if the compiler were short
+of registers, it might choose to refetch from ``gp`` rather than keeping
+a separate copy in ``p`` as follows:
+
+ ::
+
+ 1 bool do_something_gp_buggy_optimized(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 if (gp) { /* OPTIMIZATIONS GALORE!!! */
+ 5 do_something(gp->a, gp->b);
+ 6 rcu_read_unlock();
+ 7 return true;
+ 8 }
+ 9 rcu_read_unlock();
+ 10 return false;
+ 11 }
+
+If this function ran concurrently with a series of updates that replaced
+the current structure with a new one, the fetches of ``gp->a`` and
+``gp->b`` might well come from two different structures, which could
+cause serious confusion. To prevent this (and much else besides),
+``do_something_gp()`` uses ``rcu_dereference()`` to fetch from ``gp``:
+
+ ::
+
+ 1 bool do_something_gp(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 p = rcu_dereference(gp);
+ 5 if (p) {
+ 6 do_something(p->a, p->b);
+ 7 rcu_read_unlock();
+ 8 return true;
+ 9 }
+ 10 rcu_read_unlock();
+ 11 return false;
+ 12 }
+
+The ``rcu_dereference()`` uses volatile casts and (for DEC Alpha) memory
+barriers in the Linux kernel. Should a `high-quality implementation of
+C11 ``memory_order_consume``
+[PDF] <http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf>`__
+ever appear, then ``rcu_dereference()`` could be implemented as a
+``memory_order_consume`` load. Regardless of the exact implementation, a
+pointer fetched by ``rcu_dereference()`` may not be used outside of the
+outermost RCU read-side critical section containing that
+``rcu_dereference()``, unless protection of the corresponding data
+element has been passed from RCU to some other synchronization
+mechanism, most commonly locking or `reference
+counting <https://www.kernel.org/doc/Documentation/RCU/rcuref.txt>`__.
+
+In short, updaters use ``rcu_assign_pointer()`` and readers use
+``rcu_dereference()``, and these two RCU API elements work together to
+ensure that readers have a consistent view of newly added data elements.
+
+Of course, it is also necessary to remove elements from RCU-protected
+data structures, for example, using the following process:
+
+#. Remove the data element from the enclosing structure.
+#. Wait for all pre-existing RCU read-side critical sections to complete
+ (because only pre-existing readers can possibly have a reference to
+ the newly removed data element).
+#. At this point, only the updater has a reference to the newly removed
+ data element, so it can safely reclaim the data element, for example,
+ by passing it to ``kfree()``.
+
+This process is implemented by ``remove_gp_synchronous()``:
+
+ ::
+
+ 1 bool remove_gp_synchronous(void)
+ 2 {
+ 3 struct foo *p;
+ 4
+ 5 spin_lock(&gp_lock);
+ 6 p = rcu_access_pointer(gp);
+ 7 if (!p) {
+ 8 spin_unlock(&gp_lock);
+ 9 return false;
+ 10 }
+ 11 rcu_assign_pointer(gp, NULL);
+ 12 spin_unlock(&gp_lock);
+ 13 synchronize_rcu();
+ 14 kfree(p);
+ 15 return true;
+ 16 }
+
+This function is straightforward, with line 13 waiting for a grace
+period before line 14 frees the old data element. This waiting ensures
+that readers will reach line 7 of ``do_something_gp()`` before the data
+element referenced by ``p`` is freed. The ``rcu_access_pointer()`` on
+line 6 is similar to ``rcu_dereference()``, except that:
+
+#. The value returned by ``rcu_access_pointer()`` cannot be
+ dereferenced. If you want to access the value pointed to as well as
+ the pointer itself, use ``rcu_dereference()`` instead of
+ ``rcu_access_pointer()``.
+#. The call to ``rcu_access_pointer()`` need not be protected. In
+ contrast, ``rcu_dereference()`` must either be within an RCU
+ read-side critical section or in a code segment where the pointer
+ cannot change, for example, in code protected by the corresponding
+ update-side lock.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Without the ``rcu_dereference()`` or the ``rcu_access_pointer()``, |
+| what destructive optimizations might the compiler make use of? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Let's start with what happens to ``do_something_gp()`` if it fails to |
+| use ``rcu_dereference()``. It could reuse a value formerly fetched |
+| from this same pointer. It could also fetch the pointer from ``gp`` |
+| in a byte-at-a-time manner, resulting in *load tearing*, in turn |
+| resulting a bytewise mash-up of two distinct pointer values. It might |
+| even use value-speculation optimizations, where it makes a wrong |
+| guess, but by the time it gets around to checking the value, an |
+| update has changed the pointer to match the wrong guess. Too bad |
+| about any dereferences that returned pre-initialization garbage in |
+| the meantime! |
+| For ``remove_gp_synchronous()``, as long as all modifications to |
+| ``gp`` are carried out while holding ``gp_lock``, the above |
+| optimizations are harmless. However, ``sparse`` will complain if you |
+| define ``gp`` with ``__rcu`` and then access it without using either |
+| ``rcu_access_pointer()`` or ``rcu_dereference()``. |
++-----------------------------------------------------------------------+
+
+In short, RCU's publish-subscribe guarantee is provided by the
+combination of ``rcu_assign_pointer()`` and ``rcu_dereference()``. This
+guarantee allows data elements to be safely added to RCU-protected
+linked data structures without disrupting RCU readers. This guarantee
+can be used in combination with the grace-period guarantee to also allow
+data elements to be removed from RCU-protected linked data structures,
+again without disrupting RCU readers.
+
+This guarantee was only partially premeditated. DYNIX/ptx used an
+explicit memory barrier for publication, but had nothing resembling
+``rcu_dereference()`` for subscription, nor did it have anything
+resembling the ``smp_read_barrier_depends()`` that was later subsumed
+into ``rcu_dereference()`` and later still into ``READ_ONCE()``. The
+need for these operations made itself known quite suddenly at a
+late-1990s meeting with the DEC Alpha architects, back in the days when
+DEC was still a free-standing company. It took the Alpha architects a
+good hour to convince me that any sort of barrier would ever be needed,
+and it then took me a good *two* hours to convince them that their
+documentation did not make this point clear. More recent work with the C
+and C++ standards committees have provided much education on tricks and
+traps from the compiler. In short, compilers were much less tricky in
+the early 1990s, but in 2015, don't even think about omitting
+``rcu_dereference()``!
+
+Memory-Barrier Guarantees
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The previous section's simple linked-data-structure scenario clearly
+demonstrates the need for RCU's stringent memory-ordering guarantees on
+systems with more than one CPU:
+
+#. Each CPU that has an RCU read-side critical section that begins
+ before ``synchronize_rcu()`` starts is guaranteed to execute a full
+ memory barrier between the time that the RCU read-side critical
+ section ends and the time that ``synchronize_rcu()`` returns. Without
+ this guarantee, a pre-existing RCU read-side critical section might
+ hold a reference to the newly removed ``struct foo`` after the
+ ``kfree()`` on line 14 of ``remove_gp_synchronous()``.
+#. Each CPU that has an RCU read-side critical section that ends after
+ ``synchronize_rcu()`` returns is guaranteed to execute a full memory
+ barrier between the time that ``synchronize_rcu()`` begins and the
+ time that the RCU read-side critical section begins. Without this
+ guarantee, a later RCU read-side critical section running after the
+ ``kfree()`` on line 14 of ``remove_gp_synchronous()`` might later run
+ ``do_something_gp()`` and find the newly deleted ``struct foo``.
+#. If the task invoking ``synchronize_rcu()`` remains on a given CPU,
+ then that CPU is guaranteed to execute a full memory barrier sometime
+ during the execution of ``synchronize_rcu()``. This guarantee ensures
+ that the ``kfree()`` on line 14 of ``remove_gp_synchronous()`` really
+ does execute after the removal on line 11.
+#. If the task invoking ``synchronize_rcu()`` migrates among a group of
+ CPUs during that invocation, then each of the CPUs in that group is
+ guaranteed to execute a full memory barrier sometime during the
+ execution of ``synchronize_rcu()``. This guarantee also ensures that
+ the ``kfree()`` on line 14 of ``remove_gp_synchronous()`` really does
+ execute after the removal on line 11, but also in the case where the
+ thread executing the ``synchronize_rcu()`` migrates in the meantime.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Given that multiple CPUs can start RCU read-side critical sections at |
+| any time without any ordering whatsoever, how can RCU possibly tell |
+| whether or not a given RCU read-side critical section starts before a |
+| given instance of ``synchronize_rcu()``? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| If RCU cannot tell whether or not a given RCU read-side critical |
+| section starts before a given instance of ``synchronize_rcu()``, then |
+| it must assume that the RCU read-side critical section started first. |
+| In other words, a given instance of ``synchronize_rcu()`` can avoid |
+| waiting on a given RCU read-side critical section only if it can |
+| prove that ``synchronize_rcu()`` started first. |
+| A related question is “When ``rcu_read_lock()`` doesn't generate any |
+| code, why does it matter how it relates to a grace period?” The |
+| answer is that it is not the relationship of ``rcu_read_lock()`` |
+| itself that is important, but rather the relationship of the code |
+| within the enclosed RCU read-side critical section to the code |
+| preceding and following the grace period. If we take this viewpoint, |
+| then a given RCU read-side critical section begins before a given |
+| grace period when some access preceding the grace period observes the |
+| effect of some access within the critical section, in which case none |
+| of the accesses within the critical section may observe the effects |
+| of any access following the grace period. |
+| |
+| As of late 2016, mathematical models of RCU take this viewpoint, for |
+| example, see slides 62 and 63 of the `2016 LinuxCon |
+| EU <http://www2.rdrop.com/users/paulmck/scalability/paper/LinuxMM.201 |
+| 6.10.04c.LCE.pdf>`__ |
+| presentation. |
++-----------------------------------------------------------------------+
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| The first and second guarantees require unbelievably strict ordering! |
+| Are all these memory barriers *really* required? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Yes, they really are required. To see why the first guarantee is |
+| required, consider the following sequence of events: |
+| |
+| #. CPU 1: ``rcu_read_lock()`` |
+| #. CPU 1: ``q = rcu_dereference(gp); /* Very likely to return p. */`` |
+| #. CPU 0: ``list_del_rcu(p);`` |
+| #. CPU 0: ``synchronize_rcu()`` starts. |
+| #. CPU 1: ``do_something_with(q->a);`` |
+| ``/* No smp_mb(), so might happen after kfree(). */`` |
+| #. CPU 1: ``rcu_read_unlock()`` |
+| #. CPU 0: ``synchronize_rcu()`` returns. |
+| #. CPU 0: ``kfree(p);`` |
+| |
+| Therefore, there absolutely must be a full memory barrier between the |
+| end of the RCU read-side critical section and the end of the grace |
+| period. |
+| |
+| The sequence of events demonstrating the necessity of the second rule |
+| is roughly similar: |
+| |
+| #. CPU 0: ``list_del_rcu(p);`` |
+| #. CPU 0: ``synchronize_rcu()`` starts. |
+| #. CPU 1: ``rcu_read_lock()`` |
+| #. CPU 1: ``q = rcu_dereference(gp);`` |
+| ``/* Might return p if no memory barrier. */`` |
+| #. CPU 0: ``synchronize_rcu()`` returns. |
+| #. CPU 0: ``kfree(p);`` |
+| #. CPU 1: ``do_something_with(q->a); /* Boom!!! */`` |
+| #. CPU 1: ``rcu_read_unlock()`` |
+| |
+| And similarly, without a memory barrier between the beginning of the |
+| grace period and the beginning of the RCU read-side critical section, |
+| CPU 1 might end up accessing the freelist. |
+| |
+| The “as if” rule of course applies, so that any implementation that |
+| acts as if the appropriate memory barriers were in place is a correct |
+| implementation. That said, it is much easier to fool yourself into |
+| believing that you have adhered to the as-if rule than it is to |
+| actually adhere to it! |
++-----------------------------------------------------------------------+
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| You claim that ``rcu_read_lock()`` and ``rcu_read_unlock()`` generate |
+| absolutely no code in some kernel builds. This means that the |
+| compiler might arbitrarily rearrange consecutive RCU read-side |
+| critical sections. Given such rearrangement, if a given RCU read-side |
+| critical section is done, how can you be sure that all prior RCU |
+| read-side critical sections are done? Won't the compiler |
+| rearrangements make that impossible to determine? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| In cases where ``rcu_read_lock()`` and ``rcu_read_unlock()`` generate |
+| absolutely no code, RCU infers quiescent states only at special |
+| locations, for example, within the scheduler. Because calls to |
+| ``schedule()`` had better prevent calling-code accesses to shared |
+| variables from being rearranged across the call to ``schedule()``, if |
+| RCU detects the end of a given RCU read-side critical section, it |
+| will necessarily detect the end of all prior RCU read-side critical |
+| sections, no matter how aggressively the compiler scrambles the code. |
+| Again, this all assumes that the compiler cannot scramble code across |
+| calls to the scheduler, out of interrupt handlers, into the idle |
+| loop, into user-mode code, and so on. But if your kernel build allows |
+| that sort of scrambling, you have broken far more than just RCU! |
++-----------------------------------------------------------------------+
+
+Note that these memory-barrier requirements do not replace the
+fundamental RCU requirement that a grace period wait for all
+pre-existing readers. On the contrary, the memory barriers called out in
+this section must operate in such a way as to *enforce* this fundamental
+requirement. Of course, different implementations enforce this
+requirement in different ways, but enforce it they must.
+
+RCU Primitives Guaranteed to Execute Unconditionally
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The common-case RCU primitives are unconditional. They are invoked, they
+do their job, and they return, with no possibility of error, and no need
+to retry. This is a key RCU design philosophy.
+
+However, this philosophy is pragmatic rather than pigheaded. If someone
+comes up with a good justification for a particular conditional RCU
+primitive, it might well be implemented and added. After all, this
+guarantee was reverse-engineered, not premeditated. The unconditional
+nature of the RCU primitives was initially an accident of
+implementation, and later experience with synchronization primitives
+with conditional primitives caused me to elevate this accident to a
+guarantee. Therefore, the justification for adding a conditional
+primitive to RCU would need to be based on detailed and compelling use
+cases.
+
+Guaranteed Read-to-Write Upgrade
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As far as RCU is concerned, it is always possible to carry out an update
+within an RCU read-side critical section. For example, that RCU
+read-side critical section might search for a given data element, and
+then might acquire the update-side spinlock in order to update that
+element, all while remaining in that RCU read-side critical section. Of
+course, it is necessary to exit the RCU read-side critical section
+before invoking ``synchronize_rcu()``, however, this inconvenience can
+be avoided through use of the ``call_rcu()`` and ``kfree_rcu()`` API
+members described later in this document.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But how does the upgrade-to-write operation exclude other readers? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| It doesn't, just like normal RCU updates, which also do not exclude |
+| RCU readers. |
++-----------------------------------------------------------------------+
+
+This guarantee allows lookup code to be shared between read-side and
+update-side code, and was premeditated, appearing in the earliest
+DYNIX/ptx RCU documentation.
+
+Fundamental Non-Requirements
+----------------------------
+
+RCU provides extremely lightweight readers, and its read-side
+guarantees, though quite useful, are correspondingly lightweight. It is
+therefore all too easy to assume that RCU is guaranteeing more than it
+really is. Of course, the list of things that RCU does not guarantee is
+infinitely long, however, the following sections list a few
+non-guarantees that have caused confusion. Except where otherwise noted,
+these non-guarantees were premeditated.
+
+#. `Readers Impose Minimal Ordering`_
+#. `Readers Do Not Exclude Updaters`_
+#. `Updaters Only Wait For Old Readers`_
+#. `Grace Periods Don't Partition Read-Side Critical Sections`_
+#. `Read-Side Critical Sections Don't Partition Grace Periods`_
+
+Readers Impose Minimal Ordering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Reader-side markers such as ``rcu_read_lock()`` and
+``rcu_read_unlock()`` provide absolutely no ordering guarantees except
+through their interaction with the grace-period APIs such as
+``synchronize_rcu()``. To see this, consider the following pair of
+threads:
+
+ ::
+
+ 1 void thread0(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 WRITE_ONCE(x, 1);
+ 5 rcu_read_unlock();
+ 6 rcu_read_lock();
+ 7 WRITE_ONCE(y, 1);
+ 8 rcu_read_unlock();
+ 9 }
+ 10
+ 11 void thread1(void)
+ 12 {
+ 13 rcu_read_lock();
+ 14 r1 = READ_ONCE(y);
+ 15 rcu_read_unlock();
+ 16 rcu_read_lock();
+ 17 r2 = READ_ONCE(x);
+ 18 rcu_read_unlock();
+ 19 }
+
+After ``thread0()`` and ``thread1()`` execute concurrently, it is quite
+possible to have
+
+ ::
+
+ (r1 == 1 && r2 == 0)
+
+(that is, ``y`` appears to have been assigned before ``x``), which would
+not be possible if ``rcu_read_lock()`` and ``rcu_read_unlock()`` had
+much in the way of ordering properties. But they do not, so the CPU is
+within its rights to do significant reordering. This is by design: Any
+significant ordering constraints would slow down these fast-path APIs.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Can't the compiler also reorder this code? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| No, the volatile casts in ``READ_ONCE()`` and ``WRITE_ONCE()`` |
+| prevent the compiler from reordering in this particular case. |
++-----------------------------------------------------------------------+
+
+Readers Do Not Exclude Updaters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Neither ``rcu_read_lock()`` nor ``rcu_read_unlock()`` exclude updates.
+All they do is to prevent grace periods from ending. The following
+example illustrates this:
+
+ ::
+
+ 1 void thread0(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 r1 = READ_ONCE(y);
+ 5 if (r1) {
+ 6 do_something_with_nonzero_x();
+ 7 r2 = READ_ONCE(x);
+ 8 WARN_ON(!r2); /* BUG!!! */
+ 9 }
+ 10 rcu_read_unlock();
+ 11 }
+ 12
+ 13 void thread1(void)
+ 14 {
+ 15 spin_lock(&my_lock);
+ 16 WRITE_ONCE(x, 1);
+ 17 WRITE_ONCE(y, 1);
+ 18 spin_unlock(&my_lock);
+ 19 }
+
+If the ``thread0()`` function's ``rcu_read_lock()`` excluded the
+``thread1()`` function's update, the ``WARN_ON()`` could never fire. But
+the fact is that ``rcu_read_lock()`` does not exclude much of anything
+aside from subsequent grace periods, of which ``thread1()`` has none, so
+the ``WARN_ON()`` can and does fire.
+
+Updaters Only Wait For Old Readers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It might be tempting to assume that after ``synchronize_rcu()``
+completes, there are no readers executing. This temptation must be
+avoided because new readers can start immediately after
+``synchronize_rcu()`` starts, and ``synchronize_rcu()`` is under no
+obligation to wait for these new readers.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Suppose that synchronize_rcu() did wait until *all* readers had |
+| completed instead of waiting only on pre-existing readers. For how |
+| long would the updater be able to rely on there being no readers? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| For no time at all. Even if ``synchronize_rcu()`` were to wait until |
+| all readers had completed, a new reader might start immediately after |
+| ``synchronize_rcu()`` completed. Therefore, the code following |
+| ``synchronize_rcu()`` can *never* rely on there being no readers. |
++-----------------------------------------------------------------------+
+
+Grace Periods Don't Partition Read-Side Critical Sections
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is tempting to assume that if any part of one RCU read-side critical
+section precedes a given grace period, and if any part of another RCU
+read-side critical section follows that same grace period, then all of
+the first RCU read-side critical section must precede all of the second.
+However, this just isn't the case: A single grace period does not
+partition the set of RCU read-side critical sections. An example of this
+situation can be illustrated as follows, where ``x``, ``y``, and ``z``
+are initially all zero:
+
+ ::
+
+ 1 void thread0(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 WRITE_ONCE(a, 1);
+ 5 WRITE_ONCE(b, 1);
+ 6 rcu_read_unlock();
+ 7 }
+ 8
+ 9 void thread1(void)
+ 10 {
+ 11 r1 = READ_ONCE(a);
+ 12 synchronize_rcu();
+ 13 WRITE_ONCE(c, 1);
+ 14 }
+ 15
+ 16 void thread2(void)
+ 17 {
+ 18 rcu_read_lock();
+ 19 r2 = READ_ONCE(b);
+ 20 r3 = READ_ONCE(c);
+ 21 rcu_read_unlock();
+ 22 }
+
+It turns out that the outcome:
+
+ ::
+
+ (r1 == 1 && r2 == 0 && r3 == 1)
+
+is entirely possible. The following figure show how this can happen,
+with each circled ``QS`` indicating the point at which RCU recorded a
+*quiescent state* for each thread, that is, a state in which RCU knows
+that the thread cannot be in the midst of an RCU read-side critical
+section that started before the current grace period:
+
+.. kernel-figure:: GPpartitionReaders1.svg
+
+If it is necessary to partition RCU read-side critical sections in this
+manner, it is necessary to use two grace periods, where the first grace
+period is known to end before the second grace period starts:
+
+ ::
+
+ 1 void thread0(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 WRITE_ONCE(a, 1);
+ 5 WRITE_ONCE(b, 1);
+ 6 rcu_read_unlock();
+ 7 }
+ 8
+ 9 void thread1(void)
+ 10 {
+ 11 r1 = READ_ONCE(a);
+ 12 synchronize_rcu();
+ 13 WRITE_ONCE(c, 1);
+ 14 }
+ 15
+ 16 void thread2(void)
+ 17 {
+ 18 r2 = READ_ONCE(c);
+ 19 synchronize_rcu();
+ 20 WRITE_ONCE(d, 1);
+ 21 }
+ 22
+ 23 void thread3(void)
+ 24 {
+ 25 rcu_read_lock();
+ 26 r3 = READ_ONCE(b);
+ 27 r4 = READ_ONCE(d);
+ 28 rcu_read_unlock();
+ 29 }
+
+Here, if ``(r1 == 1)``, then ``thread0()``'s write to ``b`` must happen
+before the end of ``thread1()``'s grace period. If in addition
+``(r4 == 1)``, then ``thread3()``'s read from ``b`` must happen after
+the beginning of ``thread2()``'s grace period. If it is also the case
+that ``(r2 == 1)``, then the end of ``thread1()``'s grace period must
+precede the beginning of ``thread2()``'s grace period. This mean that
+the two RCU read-side critical sections cannot overlap, guaranteeing
+that ``(r3 == 1)``. As a result, the outcome:
+
+ ::
+
+ (r1 == 1 && r2 == 1 && r3 == 0 && r4 == 1)
+
+cannot happen.
+
+This non-requirement was also non-premeditated, but became apparent when
+studying RCU's interaction with memory ordering.
+
+Read-Side Critical Sections Don't Partition Grace Periods
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is also tempting to assume that if an RCU read-side critical section
+happens between a pair of grace periods, then those grace periods cannot
+overlap. However, this temptation leads nowhere good, as can be
+illustrated by the following, with all variables initially zero:
+
+ ::
+
+ 1 void thread0(void)
+ 2 {
+ 3 rcu_read_lock();
+ 4 WRITE_ONCE(a, 1);
+ 5 WRITE_ONCE(b, 1);
+ 6 rcu_read_unlock();
+ 7 }
+ 8
+ 9 void thread1(void)
+ 10 {
+ 11 r1 = READ_ONCE(a);
+ 12 synchronize_rcu();
+ 13 WRITE_ONCE(c, 1);
+ 14 }
+ 15
+ 16 void thread2(void)
+ 17 {
+ 18 rcu_read_lock();
+ 19 WRITE_ONCE(d, 1);
+ 20 r2 = READ_ONCE(c);
+ 21 rcu_read_unlock();
+ 22 }
+ 23
+ 24 void thread3(void)
+ 25 {
+ 26 r3 = READ_ONCE(d);
+ 27 synchronize_rcu();
+ 28 WRITE_ONCE(e, 1);
+ 29 }
+ 30
+ 31 void thread4(void)
+ 32 {
+ 33 rcu_read_lock();
+ 34 r4 = READ_ONCE(b);
+ 35 r5 = READ_ONCE(e);
+ 36 rcu_read_unlock();
+ 37 }
+
+In this case, the outcome:
+
+ ::
+
+ (r1 == 1 && r2 == 1 && r3 == 1 && r4 == 0 && r5 == 1)
+
+is entirely possible, as illustrated below:
+
+.. kernel-figure:: ReadersPartitionGP1.svg
+
+Again, an RCU read-side critical section can overlap almost all of a
+given grace period, just so long as it does not overlap the entire grace
+period. As a result, an RCU read-side critical section cannot partition
+a pair of RCU grace periods.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| How long a sequence of grace periods, each separated by an RCU |
+| read-side critical section, would be required to partition the RCU |
+| read-side critical sections at the beginning and end of the chain? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| In theory, an infinite number. In practice, an unknown number that is |
+| sensitive to both implementation details and timing considerations. |
+| Therefore, even in practice, RCU users must abide by the theoretical |
+| rather than the practical answer. |
++-----------------------------------------------------------------------+
+
+Parallelism Facts of Life
+-------------------------
+
+These parallelism facts of life are by no means specific to RCU, but the
+RCU implementation must abide by them. They therefore bear repeating:
+
+#. Any CPU or task may be delayed at any time, and any attempts to avoid
+ these delays by disabling preemption, interrupts, or whatever are
+ completely futile. This is most obvious in preemptible user-level
+ environments and in virtualized environments (where a given guest
+ OS's VCPUs can be preempted at any time by the underlying
+ hypervisor), but can also happen in bare-metal environments due to
+ ECC errors, NMIs, and other hardware events. Although a delay of more
+ than about 20 seconds can result in splats, the RCU implementation is
+ obligated to use algorithms that can tolerate extremely long delays,
+ but where “extremely long” is not long enough to allow wrap-around
+ when incrementing a 64-bit counter.
+#. Both the compiler and the CPU can reorder memory accesses. Where it
+ matters, RCU must use compiler directives and memory-barrier
+ instructions to preserve ordering.
+#. Conflicting writes to memory locations in any given cache line will
+ result in expensive cache misses. Greater numbers of concurrent
+ writes and more-frequent concurrent writes will result in more
+ dramatic slowdowns. RCU is therefore obligated to use algorithms that
+ have sufficient locality to avoid significant performance and
+ scalability problems.
+#. As a rough rule of thumb, only one CPU's worth of processing may be
+ carried out under the protection of any given exclusive lock. RCU
+ must therefore use scalable locking designs.
+#. Counters are finite, especially on 32-bit systems. RCU's use of
+ counters must therefore tolerate counter wrap, or be designed such
+ that counter wrap would take way more time than a single system is
+ likely to run. An uptime of ten years is quite possible, a runtime of
+ a century much less so. As an example of the latter, RCU's
+ dyntick-idle nesting counter allows 54 bits for interrupt nesting
+ level (this counter is 64 bits even on a 32-bit system). Overflowing
+ this counter requires 2\ :sup:`54` half-interrupts on a given CPU
+ without that CPU ever going idle. If a half-interrupt happened every
+ microsecond, it would take 570 years of runtime to overflow this
+ counter, which is currently believed to be an acceptably long time.
+#. Linux systems can have thousands of CPUs running a single Linux
+ kernel in a single shared-memory environment. RCU must therefore pay
+ close attention to high-end scalability.
+
+This last parallelism fact of life means that RCU must pay special
+attention to the preceding facts of life. The idea that Linux might
+scale to systems with thousands of CPUs would have been met with some
+skepticism in the 1990s, but these requirements would have otherwise
+have been unsurprising, even in the early 1990s.
+
+Quality-of-Implementation Requirements
+--------------------------------------
+
+These sections list quality-of-implementation requirements. Although an
+RCU implementation that ignores these requirements could still be used,
+it would likely be subject to limitations that would make it
+inappropriate for industrial-strength production use. Classes of
+quality-of-implementation requirements are as follows:
+
+#. `Specialization`_
+#. `Performance and Scalability`_
+#. `Forward Progress`_
+#. `Composability`_
+#. `Corner Cases`_
+
+These classes is covered in the following sections.
+
+Specialization
+~~~~~~~~~~~~~~
+
+RCU is and always has been intended primarily for read-mostly
+situations, which means that RCU's read-side primitives are optimized,
+often at the expense of its update-side primitives. Experience thus far
+is captured by the following list of situations:
+
+#. Read-mostly data, where stale and inconsistent data is not a problem:
+ RCU works great!
+#. Read-mostly data, where data must be consistent: RCU works well.
+#. Read-write data, where data must be consistent: RCU *might* work OK.
+ Or not.
+#. Write-mostly data, where data must be consistent: RCU is very
+ unlikely to be the right tool for the job, with the following
+ exceptions, where RCU can provide:
+
+ a. Existence guarantees for update-friendly mechanisms.
+ b. Wait-free read-side primitives for real-time use.
+
+This focus on read-mostly situations means that RCU must interoperate
+with other synchronization primitives. For example, the ``add_gp()`` and
+``remove_gp_synchronous()`` examples discussed earlier use RCU to
+protect readers and locking to coordinate updaters. However, the need
+extends much farther, requiring that a variety of synchronization
+primitives be legal within RCU read-side critical sections, including
+spinlocks, sequence locks, atomic operations, reference counters, and
+memory barriers.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| What about sleeping locks? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| These are forbidden within Linux-kernel RCU read-side critical |
+| sections because it is not legal to place a quiescent state (in this |
+| case, voluntary context switch) within an RCU read-side critical |
+| section. However, sleeping locks may be used within userspace RCU |
+| read-side critical sections, and also within Linux-kernel sleepable |
+| RCU `(SRCU) <#Sleepable%20RCU>`__ read-side critical sections. In |
+| addition, the -rt patchset turns spinlocks into a sleeping locks so |
+| that the corresponding critical sections can be preempted, which also |
+| means that these sleeplockified spinlocks (but not other sleeping |
+| locks!) may be acquire within -rt-Linux-kernel RCU read-side critical |
+| sections. |
+| Note that it *is* legal for a normal RCU read-side critical section |
+| to conditionally acquire a sleeping locks (as in |
+| ``mutex_trylock()``), but only as long as it does not loop |
+| indefinitely attempting to conditionally acquire that sleeping locks. |
+| The key point is that things like ``mutex_trylock()`` either return |
+| with the mutex held, or return an error indication if the mutex was |
+| not immediately available. Either way, ``mutex_trylock()`` returns |
+| immediately without sleeping. |
++-----------------------------------------------------------------------+
+
+It often comes as a surprise that many algorithms do not require a
+consistent view of data, but many can function in that mode, with
+network routing being the poster child. Internet routing algorithms take
+significant time to propagate updates, so that by the time an update
+arrives at a given system, that system has been sending network traffic
+the wrong way for a considerable length of time. Having a few threads
+continue to send traffic the wrong way for a few more milliseconds is
+clearly not a problem: In the worst case, TCP retransmissions will
+eventually get the data where it needs to go. In general, when tracking
+the state of the universe outside of the computer, some level of
+inconsistency must be tolerated due to speed-of-light delays if nothing
+else.
+
+Furthermore, uncertainty about external state is inherent in many cases.
+For example, a pair of veterinarians might use heartbeat to determine
+whether or not a given cat was alive. But how long should they wait
+after the last heartbeat to decide that the cat is in fact dead? Waiting
+less than 400 milliseconds makes no sense because this would mean that a
+relaxed cat would be considered to cycle between death and life more
+than 100 times per minute. Moreover, just as with human beings, a cat's
+heart might stop for some period of time, so the exact wait period is a
+judgment call. One of our pair of veterinarians might wait 30 seconds
+before pronouncing the cat dead, while the other might insist on waiting
+a full minute. The two veterinarians would then disagree on the state of
+the cat during the final 30 seconds of the minute following the last
+heartbeat.
+
+Interestingly enough, this same situation applies to hardware. When push
+comes to shove, how do we tell whether or not some external server has
+failed? We send messages to it periodically, and declare it failed if we
+don't receive a response within a given period of time. Policy decisions
+can usually tolerate short periods of inconsistency. The policy was
+decided some time ago, and is only now being put into effect, so a few
+milliseconds of delay is normally inconsequential.
+
+However, there are algorithms that absolutely must see consistent data.
+For example, the translation between a user-level SystemV semaphore ID
+to the corresponding in-kernel data structure is protected by RCU, but
+it is absolutely forbidden to update a semaphore that has just been
+removed. In the Linux kernel, this need for consistency is accommodated
+by acquiring spinlocks located in the in-kernel data structure from
+within the RCU read-side critical section, and this is indicated by the
+green box in the figure above. Many other techniques may be used, and
+are in fact used within the Linux kernel.
+
+In short, RCU is not required to maintain consistency, and other
+mechanisms may be used in concert with RCU when consistency is required.
+RCU's specialization allows it to do its job extremely well, and its
+ability to interoperate with other synchronization mechanisms allows the
+right mix of synchronization tools to be used for a given job.
+
+Performance and Scalability
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Energy efficiency is a critical component of performance today, and
+Linux-kernel RCU implementations must therefore avoid unnecessarily
+awakening idle CPUs. I cannot claim that this requirement was
+premeditated. In fact, I learned of it during a telephone conversation
+in which I was given “frank and open” feedback on the importance of
+energy efficiency in battery-powered systems and on specific
+energy-efficiency shortcomings of the Linux-kernel RCU implementation.
+In my experience, the battery-powered embedded community will consider
+any unnecessary wakeups to be extremely unfriendly acts. So much so that
+mere Linux-kernel-mailing-list posts are insufficient to vent their ire.
+
+Memory consumption is not particularly important for in most situations,
+and has become decreasingly so as memory sizes have expanded and memory
+costs have plummeted. However, as I learned from Matt Mackall's
+`bloatwatch <http://elinux.org/Linux_Tiny-FAQ>`__ efforts, memory
+footprint is critically important on single-CPU systems with
+non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny
+RCU <https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com>`__
+was born. Josh Triplett has since taken over the small-memory banner
+with his `Linux kernel tinification <https://tiny.wiki.kernel.org/>`__
+project, which resulted in `SRCU <#Sleepable%20RCU>`__ becoming optional
+for those kernels not needing it.
+
+The remaining performance requirements are, for the most part,
+unsurprising. For example, in keeping with RCU's read-side
+specialization, ``rcu_dereference()`` should have negligible overhead
+(for example, suppression of a few minor compiler optimizations).
+Similarly, in non-preemptible environments, ``rcu_read_lock()`` and
+``rcu_read_unlock()`` should have exactly zero overhead.
+
+In preemptible environments, in the case where the RCU read-side
+critical section was not preempted (as will be the case for the
+highest-priority real-time process), ``rcu_read_lock()`` and
+``rcu_read_unlock()`` should have minimal overhead. In particular, they
+should not contain atomic read-modify-write operations, memory-barrier
+instructions, preemption disabling, interrupt disabling, or backwards
+branches. However, in the case where the RCU read-side critical section
+was preempted, ``rcu_read_unlock()`` may acquire spinlocks and disable
+interrupts. This is why it is better to nest an RCU read-side critical
+section within a preempt-disable region than vice versa, at least in
+cases where that critical section is short enough to avoid unduly
+degrading real-time latencies.
+
+The ``synchronize_rcu()`` grace-period-wait primitive is optimized for
+throughput. It may therefore incur several milliseconds of latency in
+addition to the duration of the longest RCU read-side critical section.
+On the other hand, multiple concurrent invocations of
+``synchronize_rcu()`` are required to use batching optimizations so that
+they can be satisfied by a single underlying grace-period-wait
+operation. For example, in the Linux kernel, it is not unusual for a
+single grace-period-wait operation to serve more than `1,000 separate
+invocations <https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response>`__
+of ``synchronize_rcu()``, thus amortizing the per-invocation overhead
+down to nearly zero. However, the grace-period optimization is also
+required to avoid measurable degradation of real-time scheduling and
+interrupt latencies.
+
+In some cases, the multi-millisecond ``synchronize_rcu()`` latencies are
+unacceptable. In these cases, ``synchronize_rcu_expedited()`` may be
+used instead, reducing the grace-period latency down to a few tens of
+microseconds on small systems, at least in cases where the RCU read-side
+critical sections are short. There are currently no special latency
+requirements for ``synchronize_rcu_expedited()`` on large systems, but,
+consistent with the empirical nature of the RCU specification, that is
+subject to change. However, there most definitely are scalability
+requirements: A storm of ``synchronize_rcu_expedited()`` invocations on
+4096 CPUs should at least make reasonable forward progress. In return
+for its shorter latencies, ``synchronize_rcu_expedited()`` is permitted
+to impose modest degradation of real-time latency on non-idle online
+CPUs. Here, “modest” means roughly the same latency degradation as a
+scheduling-clock interrupt.
+
+There are a number of situations where even
+``synchronize_rcu_expedited()``'s reduced grace-period latency is
+unacceptable. In these situations, the asynchronous ``call_rcu()`` can
+be used in place of ``synchronize_rcu()`` as follows:
+
+ ::
+
+ 1 struct foo {
+ 2 int a;
+ 3 int b;
+ 4 struct rcu_head rh;
+ 5 };
+ 6
+ 7 static void remove_gp_cb(struct rcu_head *rhp)
+ 8 {
+ 9 struct foo *p = container_of(rhp, struct foo, rh);
+ 10
+ 11 kfree(p);
+ 12 }
+ 13
+ 14 bool remove_gp_asynchronous(void)
+ 15 {
+ 16 struct foo *p;
+ 17
+ 18 spin_lock(&gp_lock);
+ 19 p = rcu_access_pointer(gp);
+ 20 if (!p) {
+ 21 spin_unlock(&gp_lock);
+ 22 return false;
+ 23 }
+ 24 rcu_assign_pointer(gp, NULL);
+ 25 call_rcu(&p->rh, remove_gp_cb);
+ 26 spin_unlock(&gp_lock);
+ 27 return true;
+ 28 }
+
+A definition of ``struct foo`` is finally needed, and appears on
+lines 1-5. The function ``remove_gp_cb()`` is passed to ``call_rcu()``
+on line 25, and will be invoked after the end of a subsequent grace
+period. This gets the same effect as ``remove_gp_synchronous()``, but
+without forcing the updater to wait for a grace period to elapse. The
+``call_rcu()`` function may be used in a number of situations where
+neither ``synchronize_rcu()`` nor ``synchronize_rcu_expedited()`` would
+be legal, including within preempt-disable code, ``local_bh_disable()``
+code, interrupt-disable code, and interrupt handlers. However, even
+``call_rcu()`` is illegal within NMI handlers and from idle and offline
+CPUs. The callback function (``remove_gp_cb()`` in this case) will be
+executed within softirq (software interrupt) environment within the
+Linux kernel, either within a real softirq handler or under the
+protection of ``local_bh_disable()``. In both the Linux kernel and in
+userspace, it is bad practice to write an RCU callback function that
+takes too long. Long-running operations should be relegated to separate
+threads or (in the Linux kernel) workqueues.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why does line 19 use ``rcu_access_pointer()``? After all, |
+| ``call_rcu()`` on line 25 stores into the structure, which would |
+| interact badly with concurrent insertions. Doesn't this mean that |
+| ``rcu_dereference()`` is required? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Presumably the ``->gp_lock`` acquired on line 18 excludes any |
+| changes, including any insertions that ``rcu_dereference()`` would |
+| protect against. Therefore, any insertions will be delayed until |
+| after ``->gp_lock`` is released on line 25, which in turn means that |
+| ``rcu_access_pointer()`` suffices. |
++-----------------------------------------------------------------------+
+
+However, all that ``remove_gp_cb()`` is doing is invoking ``kfree()`` on
+the data element. This is a common idiom, and is supported by
+``kfree_rcu()``, which allows “fire and forget” operation as shown
+below:
+
+ ::
+
+ 1 struct foo {
+ 2 int a;
+ 3 int b;
+ 4 struct rcu_head rh;
+ 5 };
+ 6
+ 7 bool remove_gp_faf(void)
+ 8 {
+ 9 struct foo *p;
+ 10
+ 11 spin_lock(&gp_lock);
+ 12 p = rcu_dereference(gp);
+ 13 if (!p) {
+ 14 spin_unlock(&gp_lock);
+ 15 return false;
+ 16 }
+ 17 rcu_assign_pointer(gp, NULL);
+ 18 kfree_rcu(p, rh);
+ 19 spin_unlock(&gp_lock);
+ 20 return true;
+ 21 }
+
+Note that ``remove_gp_faf()`` simply invokes ``kfree_rcu()`` and
+proceeds, without any need to pay any further attention to the
+subsequent grace period and ``kfree()``. It is permissible to invoke
+``kfree_rcu()`` from the same environments as for ``call_rcu()``.
+Interestingly enough, DYNIX/ptx had the equivalents of ``call_rcu()``
+and ``kfree_rcu()``, but not ``synchronize_rcu()``. This was due to the
+fact that RCU was not heavily used within DYNIX/ptx, so the very few
+places that needed something like ``synchronize_rcu()`` simply
+open-coded it.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Earlier it was claimed that ``call_rcu()`` and ``kfree_rcu()`` |
+| allowed updaters to avoid being blocked by readers. But how can that |
+| be correct, given that the invocation of the callback and the freeing |
+| of the memory (respectively) must still wait for a grace period to |
+| elapse? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| We could define things this way, but keep in mind that this sort of |
+| definition would say that updates in garbage-collected languages |
+| cannot complete until the next time the garbage collector runs, which |
+| does not seem at all reasonable. The key point is that in most cases, |
+| an updater using either ``call_rcu()`` or ``kfree_rcu()`` can proceed |
+| to the next update as soon as it has invoked ``call_rcu()`` or |
+| ``kfree_rcu()``, without having to wait for a subsequent grace |
+| period. |
++-----------------------------------------------------------------------+
+
+But what if the updater must wait for the completion of code to be
+executed after the end of the grace period, but has other tasks that can
+be carried out in the meantime? The polling-style
+``get_state_synchronize_rcu()`` and ``cond_synchronize_rcu()`` functions
+may be used for this purpose, as shown below:
+
+ ::
+
+ 1 bool remove_gp_poll(void)
+ 2 {
+ 3 struct foo *p;
+ 4 unsigned long s;
+ 5
+ 6 spin_lock(&gp_lock);
+ 7 p = rcu_access_pointer(gp);
+ 8 if (!p) {
+ 9 spin_unlock(&gp_lock);
+ 10 return false;
+ 11 }
+ 12 rcu_assign_pointer(gp, NULL);
+ 13 spin_unlock(&gp_lock);
+ 14 s = get_state_synchronize_rcu();
+ 15 do_something_while_waiting();
+ 16 cond_synchronize_rcu(s);
+ 17 kfree(p);
+ 18 return true;
+ 19 }
+
+On line 14, ``get_state_synchronize_rcu()`` obtains a “cookie” from RCU,
+then line 15 carries out other tasks, and finally, line 16 returns
+immediately if a grace period has elapsed in the meantime, but otherwise
+waits as required. The need for ``get_state_synchronize_rcu`` and
+``cond_synchronize_rcu()`` has appeared quite recently, so it is too
+early to tell whether they will stand the test of time.
+
+RCU thus provides a range of tools to allow updaters to strike the
+required tradeoff between latency, flexibility and CPU overhead.
+
+Forward Progress
+~~~~~~~~~~~~~~~~
+
+In theory, delaying grace-period completion and callback invocation is
+harmless. In practice, not only are memory sizes finite but also
+callbacks sometimes do wakeups, and sufficiently deferred wakeups can be
+difficult to distinguish from system hangs. Therefore, RCU must provide
+a number of mechanisms to promote forward progress.
+
+These mechanisms are not foolproof, nor can they be. For one simple
+example, an infinite loop in an RCU read-side critical section must by
+definition prevent later grace periods from ever completing. For a more
+involved example, consider a 64-CPU system built with
+``CONFIG_RCU_NOCB_CPU=y`` and booted with ``rcu_nocbs=1-63``, where
+CPUs 1 through 63 spin in tight loops that invoke ``call_rcu()``. Even
+if these tight loops also contain calls to ``cond_resched()`` (thus
+allowing grace periods to complete), CPU 0 simply will not be able to
+invoke callbacks as fast as the other 63 CPUs can register them, at
+least not until the system runs out of memory. In both of these
+examples, the Spiderman principle applies: With great power comes great
+responsibility. However, short of this level of abuse, RCU is required
+to ensure timely completion of grace periods and timely invocation of
+callbacks.
+
+RCU takes the following steps to encourage timely completion of grace
+periods:
+
+#. If a grace period fails to complete within 100 milliseconds, RCU
+ causes future invocations of ``cond_resched()`` on the holdout CPUs
+ to provide an RCU quiescent state. RCU also causes those CPUs'
+ ``need_resched()`` invocations to return ``true``, but only after the
+ corresponding CPU's next scheduling-clock.
+#. CPUs mentioned in the ``nohz_full`` kernel boot parameter can run
+ indefinitely in the kernel without scheduling-clock interrupts, which
+ defeats the above ``need_resched()`` strategem. RCU will therefore
+ invoke ``resched_cpu()`` on any ``nohz_full`` CPUs still holding out
+ after 109 milliseconds.
+#. In kernels built with ``CONFIG_RCU_BOOST=y``, if a given task that
+ has been preempted within an RCU read-side critical section is
+ holding out for more than 500 milliseconds, RCU will resort to
+ priority boosting.
+#. If a CPU is still holding out 10 seconds into the grace period, RCU
+ will invoke ``resched_cpu()`` on it regardless of its ``nohz_full``
+ state.
+
+The above values are defaults for systems running with ``HZ=1000``. They
+will vary as the value of ``HZ`` varies, and can also be changed using
+the relevant Kconfig options and kernel boot parameters. RCU currently
+does not do much sanity checking of these parameters, so please use
+caution when changing them. Note that these forward-progress measures
+are provided only for RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
+RCU <#Tasks%20RCU>`__.
+
+RCU takes the following steps in ``call_rcu()`` to encourage timely
+invocation of callbacks when any given non-\ ``rcu_nocbs`` CPU has
+10,000 callbacks, or has 10,000 more callbacks than it had the last time
+encouragement was provided:
+
+#. Starts a grace period, if one is not already in progress.
+#. Forces immediate checking for quiescent states, rather than waiting
+ for three milliseconds to have elapsed since the beginning of the
+ grace period.
+#. Immediately tags the CPU's callbacks with their grace period
+ completion numbers, rather than waiting for the ``RCU_SOFTIRQ``
+ handler to get around to it.
+#. Lifts callback-execution batch limits, which speeds up callback
+ invocation at the expense of degrading realtime response.
+
+Again, these are default values when running at ``HZ=1000``, and can be
+overridden. Again, these forward-progress measures are provided only for
+RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
+RCU <#Tasks%20RCU>`__. Even for RCU, callback-invocation forward
+progress for ``rcu_nocbs`` CPUs is much less well-developed, in part
+because workloads benefiting from ``rcu_nocbs`` CPUs tend to invoke
+``call_rcu()`` relatively infrequently. If workloads emerge that need
+both ``rcu_nocbs`` CPUs and high ``call_rcu()`` invocation rates, then
+additional forward-progress work will be required.
+
+Composability
+~~~~~~~~~~~~~
+
+Composability has received much attention in recent years, perhaps in
+part due to the collision of multicore hardware with object-oriented
+techniques designed in single-threaded environments for single-threaded
+use. And in theory, RCU read-side critical sections may be composed, and
+in fact may be nested arbitrarily deeply. In practice, as with all
+real-world implementations of composable constructs, there are
+limitations.
+
+Implementations of RCU for which ``rcu_read_lock()`` and
+``rcu_read_unlock()`` generate no code, such as Linux-kernel RCU when
+``CONFIG_PREEMPT=n``, can be nested arbitrarily deeply. After all, there
+is no overhead. Except that if all these instances of
+``rcu_read_lock()`` and ``rcu_read_unlock()`` are visible to the
+compiler, compilation will eventually fail due to exhausting memory,
+mass storage, or user patience, whichever comes first. If the nesting is
+not visible to the compiler, as is the case with mutually recursive
+functions each in its own translation unit, stack overflow will result.
+If the nesting takes the form of loops, perhaps in the guise of tail
+recursion, either the control variable will overflow or (in the Linux
+kernel) you will get an RCU CPU stall warning. Nevertheless, this class
+of RCU implementations is one of the most composable constructs in
+existence.
+
+RCU implementations that explicitly track nesting depth are limited by
+the nesting-depth counter. For example, the Linux kernel's preemptible
+RCU limits nesting to ``INT_MAX``. This should suffice for almost all
+practical purposes. That said, a consecutive pair of RCU read-side
+critical sections between which there is an operation that waits for a
+grace period cannot be enclosed in another RCU read-side critical
+section. This is because it is not legal to wait for a grace period
+within an RCU read-side critical section: To do so would result either
+in deadlock or in RCU implicitly splitting the enclosing RCU read-side
+critical section, neither of which is conducive to a long-lived and
+prosperous kernel.
+
+It is worth noting that RCU is not alone in limiting composability. For
+example, many transactional-memory implementations prohibit composing a
+pair of transactions separated by an irrevocable operation (for example,
+a network receive operation). For another example, lock-based critical
+sections can be composed surprisingly freely, but only if deadlock is
+avoided.
+
+In short, although RCU read-side critical sections are highly
+composable, care is required in some situations, just as is the case for
+any other composable synchronization mechanism.
+
+Corner Cases
+~~~~~~~~~~~~
+
+A given RCU workload might have an endless and intense stream of RCU
+read-side critical sections, perhaps even so intense that there was
+never a point in time during which there was not at least one RCU
+read-side critical section in flight. RCU cannot allow this situation to
+block grace periods: As long as all the RCU read-side critical sections
+are finite, grace periods must also be finite.
+
+That said, preemptible RCU implementations could potentially result in
+RCU read-side critical sections being preempted for long durations,
+which has the effect of creating a long-duration RCU read-side critical
+section. This situation can arise only in heavily loaded systems, but
+systems using real-time priorities are of course more vulnerable.
+Therefore, RCU priority boosting is provided to help deal with this
+case. That said, the exact requirements on RCU priority boosting will
+likely evolve as more experience accumulates.
+
+Other workloads might have very high update rates. Although one can
+argue that such workloads should instead use something other than RCU,
+the fact remains that RCU must handle such workloads gracefully. This
+requirement is another factor driving batching of grace periods, but it
+is also the driving force behind the checks for large numbers of queued
+RCU callbacks in the ``call_rcu()`` code path. Finally, high update
+rates should not delay RCU read-side critical sections, although some
+small read-side delays can occur when using
+``synchronize_rcu_expedited()``, courtesy of this function's use of
+``smp_call_function_single()``.
+
+Although all three of these corner cases were understood in the early
+1990s, a simple user-level test consisting of ``close(open(path))`` in a
+tight loop in the early 2000s suddenly provided a much deeper
+appreciation of the high-update-rate corner case. This test also
+motivated addition of some RCU code to react to high update rates, for
+example, if a given CPU finds itself with more than 10,000 RCU callbacks
+queued, it will cause RCU to take evasive action by more aggressively
+starting grace periods and more aggressively forcing completion of
+grace-period processing. This evasive action causes the grace period to
+complete more quickly, but at the cost of restricting RCU's batching
+optimizations, thus increasing the CPU overhead incurred by that grace
+period.
+
+Software-Engineering Requirements
+---------------------------------
+
+Between Murphy's Law and “To err is human”, it is necessary to guard
+against mishaps and misuse:
+
+#. It is all too easy to forget to use ``rcu_read_lock()`` everywhere
+ that it is needed, so kernels built with ``CONFIG_PROVE_RCU=y`` will
+ splat if ``rcu_dereference()`` is used outside of an RCU read-side
+ critical section. Update-side code can use
+ ``rcu_dereference_protected()``, which takes a `lockdep
+ expression <https://lwn.net/Articles/371986/>`__ to indicate what is
+ providing the protection. If the indicated protection is not
+ provided, a lockdep splat is emitted.
+ Code shared between readers and updaters can use
+ ``rcu_dereference_check()``, which also takes a lockdep expression,
+ and emits a lockdep splat if neither ``rcu_read_lock()`` nor the
+ indicated protection is in place. In addition,
+ ``rcu_dereference_raw()`` is used in those (hopefully rare) cases
+ where the required protection cannot be easily described. Finally,
+ ``rcu_read_lock_held()`` is provided to allow a function to verify
+ that it has been invoked within an RCU read-side critical section. I
+ was made aware of this set of requirements shortly after Thomas
+ Gleixner audited a number of RCU uses.
+#. A given function might wish to check for RCU-related preconditions
+ upon entry, before using any other RCU API. The
+ ``rcu_lockdep_assert()`` does this job, asserting the expression in
+ kernels having lockdep enabled and doing nothing otherwise.
+#. It is also easy to forget to use ``rcu_assign_pointer()`` and
+ ``rcu_dereference()``, perhaps (incorrectly) substituting a simple
+ assignment. To catch this sort of error, a given RCU-protected
+ pointer may be tagged with ``__rcu``, after which sparse will
+ complain about simple-assignment accesses to that pointer. Arnd
+ Bergmann made me aware of this requirement, and also supplied the
+ needed `patch series <https://lwn.net/Articles/376011/>`__.
+#. Kernels built with ``CONFIG_DEBUG_OBJECTS_RCU_HEAD=y`` will splat if
+ a data element is passed to ``call_rcu()`` twice in a row, without a
+ grace period in between. (This error is similar to a double free.)
+ The corresponding ``rcu_head`` structures that are dynamically
+ allocated are automatically tracked, but ``rcu_head`` structures
+ allocated on the stack must be initialized with
+ ``init_rcu_head_on_stack()`` and cleaned up with
+ ``destroy_rcu_head_on_stack()``. Similarly, statically allocated
+ non-stack ``rcu_head`` structures must be initialized with
+ ``init_rcu_head()`` and cleaned up with ``destroy_rcu_head()``.
+ Mathieu Desnoyers made me aware of this requirement, and also
+ supplied the needed
+ `patch <https://lkml.kernel.org/g/20100319013024.GA28456@Krystal>`__.
+#. An infinite loop in an RCU read-side critical section will eventually
+ trigger an RCU CPU stall warning splat, with the duration of
+ “eventually” being controlled by the ``RCU_CPU_STALL_TIMEOUT``
+ ``Kconfig`` option, or, alternatively, by the
+ ``rcupdate.rcu_cpu_stall_timeout`` boot/sysfs parameter. However, RCU
+ is not obligated to produce this splat unless there is a grace period
+ waiting on that particular RCU read-side critical section.
+
+ Some extreme workloads might intentionally delay RCU grace periods,
+ and systems running those workloads can be booted with
+ ``rcupdate.rcu_cpu_stall_suppress`` to suppress the splats. This
+ kernel parameter may also be set via ``sysfs``. Furthermore, RCU CPU
+ stall warnings are counter-productive during sysrq dumps and during
+ panics. RCU therefore supplies the ``rcu_sysrq_start()`` and
+ ``rcu_sysrq_end()`` API members to be called before and after long
+ sysrq dumps. RCU also supplies the ``rcu_panic()`` notifier that is
+ automatically invoked at the beginning of a panic to suppress further
+ RCU CPU stall warnings.
+
+ This requirement made itself known in the early 1990s, pretty much
+ the first time that it was necessary to debug a CPU stall. That said,
+ the initial implementation in DYNIX/ptx was quite generic in
+ comparison with that of Linux.
+
+#. Although it would be very good to detect pointers leaking out of RCU
+ read-side critical sections, there is currently no good way of doing
+ this. One complication is the need to distinguish between pointers
+ leaking and pointers that have been handed off from RCU to some other
+ synchronization mechanism, for example, reference counting.
+#. In kernels built with ``CONFIG_RCU_TRACE=y``, RCU-related information
+ is provided via event tracing.
+#. Open-coded use of ``rcu_assign_pointer()`` and ``rcu_dereference()``
+ to create typical linked data structures can be surprisingly
+ error-prone. Therefore, RCU-protected `linked
+ lists <https://lwn.net/Articles/609973/#RCU%20List%20APIs>`__ and,
+ more recently, RCU-protected `hash
+ tables <https://lwn.net/Articles/612100/>`__ are available. Many
+ other special-purpose RCU-protected data structures are available in
+ the Linux kernel and the userspace RCU library.
+#. Some linked structures are created at compile time, but still require
+ ``__rcu`` checking. The ``RCU_POINTER_INITIALIZER()`` macro serves
+ this purpose.
+#. It is not necessary to use ``rcu_assign_pointer()`` when creating
+ linked structures that are to be published via a single external
+ pointer. The ``RCU_INIT_POINTER()`` macro is provided for this task
+ and also for assigning ``NULL`` pointers at runtime.
+
+This not a hard-and-fast list: RCU's diagnostic capabilities will
+continue to be guided by the number and type of usage bugs found in
+real-world RCU usage.
+
+Linux Kernel Complications
+--------------------------
+
+The Linux kernel provides an interesting environment for all kinds of
+software, including RCU. Some of the relevant points of interest are as
+follows:
+
+#. `Configuration`_
+#. `Firmware Interface`_
+#. `Early Boot`_
+#. `Interrupts and NMIs`_
+#. `Loadable Modules`_
+#. `Hotplug CPU`_
+#. `Scheduler and RCU`_
+#. `Tracing and RCU`_
+#. `Accesses to User Memory and RCU`_
+#. `Energy Efficiency`_
+#. `Scheduling-Clock Interrupts and RCU`_
+#. `Memory Efficiency`_
+#. `Performance, Scalability, Response Time, and Reliability`_
+
+This list is probably incomplete, but it does give a feel for the most
+notable Linux-kernel complications. Each of the following sections
+covers one of the above topics.
+
+Configuration
+~~~~~~~~~~~~~
+
+RCU's goal is automatic configuration, so that almost nobody needs to
+worry about RCU's ``Kconfig`` options. And for almost all users, RCU
+does in fact work well “out of the box.”
+
+However, there are specialized use cases that are handled by kernel boot
+parameters and ``Kconfig`` options. Unfortunately, the ``Kconfig``
+system will explicitly ask users about new ``Kconfig`` options, which
+requires almost all of them be hidden behind a ``CONFIG_RCU_EXPERT``
+``Kconfig`` option.
+
+This all should be quite obvious, but the fact remains that Linus
+Torvalds recently had to
+`remind <https://lkml.kernel.org/g/CA+55aFy4wcCwaL4okTs8wXhGZ5h-ibecy_Meg9C4MNQrUnwMcg@mail.gmail.com>`__
+me of this requirement.
+
+Firmware Interface
+~~~~~~~~~~~~~~~~~~
+
+In many cases, kernel obtains information about the system from the
+firmware, and sometimes things are lost in translation. Or the
+translation is accurate, but the original message is bogus.
+
+For example, some systems' firmware overreports the number of CPUs,
+sometimes by a large factor. If RCU naively believed the firmware, as it
+used to do, it would create too many per-CPU kthreads. Although the
+resulting system will still run correctly, the extra kthreads needlessly
+consume memory and can cause confusion when they show up in ``ps``
+listings.
+
+RCU must therefore wait for a given CPU to actually come online before
+it can allow itself to believe that the CPU actually exists. The
+resulting “ghost CPUs” (which are never going to come online) cause a
+number of `interesting
+complications <https://paulmck.livejournal.com/37494.html>`__.
+
+Early Boot
+~~~~~~~~~~
+
+The Linux kernel's boot sequence is an interesting process, and RCU is
+used early, even before ``rcu_init()`` is invoked. In fact, a number of
+RCU's primitives can be used as soon as the initial task's
+``task_struct`` is available and the boot CPU's per-CPU variables are
+set up. The read-side primitives (``rcu_read_lock()``,
+``rcu_read_unlock()``, ``rcu_dereference()``, and
+``rcu_access_pointer()``) will operate normally very early on, as will
+``rcu_assign_pointer()``.
+
+Although ``call_rcu()`` may be invoked at any time during boot,
+callbacks are not guaranteed to be invoked until after all of RCU's
+kthreads have been spawned, which occurs at ``early_initcall()`` time.
+This delay in callback invocation is due to the fact that RCU does not
+invoke callbacks until it is fully initialized, and this full
+initialization cannot occur until after the scheduler has initialized
+itself to the point where RCU can spawn and run its kthreads. In theory,
+it would be possible to invoke callbacks earlier, however, this is not a
+panacea because there would be severe restrictions on what operations
+those callbacks could invoke.
+
+Perhaps surprisingly, ``synchronize_rcu()`` and
+``synchronize_rcu_expedited()``, will operate normally during very early
+boot, the reason being that there is only one CPU and preemption is
+disabled. This means that the call ``synchronize_rcu()`` (or friends)
+itself is a quiescent state and thus a grace period, so the early-boot
+implementation can be a no-op.
+
+However, once the scheduler has spawned its first kthread, this early
+boot trick fails for ``synchronize_rcu()`` (as well as for
+``synchronize_rcu_expedited()``) in ``CONFIG_PREEMPT=y`` kernels. The
+reason is that an RCU read-side critical section might be preempted,
+which means that a subsequent ``synchronize_rcu()`` really does have to
+wait for something, as opposed to simply returning immediately.
+Unfortunately, ``synchronize_rcu()`` can't do this until all of its
+kthreads are spawned, which doesn't happen until some time during
+``early_initcalls()`` time. But this is no excuse: RCU is nevertheless
+required to correctly handle synchronous grace periods during this time
+period. Once all of its kthreads are up and running, RCU starts running
+normally.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| How can RCU possibly handle grace periods before all of its kthreads |
+| have been spawned??? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Very carefully! |
+| During the “dead zone” between the time that the scheduler spawns the |
+| first task and the time that all of RCU's kthreads have been spawned, |
+| all synchronous grace periods are handled by the expedited |
+| grace-period mechanism. At runtime, this expedited mechanism relies |
+| on workqueues, but during the dead zone the requesting task itself |
+| drives the desired expedited grace period. Because dead-zone |
+| execution takes place within task context, everything works. Once the |
+| dead zone ends, expedited grace periods go back to using workqueues, |
+| as is required to avoid problems that would otherwise occur when a |
+| user task received a POSIX signal while driving an expedited grace |
+| period. |
+| |
+| And yes, this does mean that it is unhelpful to send POSIX signals to |
+| random tasks between the time that the scheduler spawns its first |
+| kthread and the time that RCU's kthreads have all been spawned. If |
+| there ever turns out to be a good reason for sending POSIX signals |
+| during that time, appropriate adjustments will be made. (If it turns |
+| out that POSIX signals are sent during this time for no good reason, |
+| other adjustments will be made, appropriate or otherwise.) |
++-----------------------------------------------------------------------+
+
+I learned of these boot-time requirements as a result of a series of
+system hangs.
+
+Interrupts and NMIs
+~~~~~~~~~~~~~~~~~~~
+
+The Linux kernel has interrupts, and RCU read-side critical sections are
+legal within interrupt handlers and within interrupt-disabled regions of
+code, as are invocations of ``call_rcu()``.
+
+Some Linux-kernel architectures can enter an interrupt handler from
+non-idle process context, and then just never leave it, instead
+stealthily transitioning back to process context. This trick is
+sometimes used to invoke system calls from inside the kernel. These
+“half-interrupts” mean that RCU has to be very careful about how it
+counts interrupt nesting levels. I learned of this requirement the hard
+way during a rewrite of RCU's dyntick-idle code.
+
+The Linux kernel has non-maskable interrupts (NMIs), and RCU read-side
+critical sections are legal within NMI handlers. Thankfully, RCU
+update-side primitives, including ``call_rcu()``, are prohibited within
+NMI handlers.
+
+The name notwithstanding, some Linux-kernel architectures can have
+nested NMIs, which RCU must handle correctly. Andy Lutomirski `surprised
+me <https://lkml.kernel.org/r/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com>`__
+with this requirement; he also kindly surprised me with `an
+algorithm <https://lkml.kernel.org/r/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com>`__
+that meets this requirement.
+
+Furthermore, NMI handlers can be interrupted by what appear to RCU to be
+normal interrupts. One way that this can happen is for code that
+directly invokes ``rcu_irq_enter()`` and ``rcu_irq_exit()`` to be called
+from an NMI handler. This astonishing fact of life prompted the current
+code structure, which has ``rcu_irq_enter()`` invoking
+``rcu_nmi_enter()`` and ``rcu_irq_exit()`` invoking ``rcu_nmi_exit()``.
+And yes, I also learned of this requirement the hard way.
+
+Loadable Modules
+~~~~~~~~~~~~~~~~
+
+The Linux kernel has loadable modules, and these modules can also be
+unloaded. After a given module has been unloaded, any attempt to call
+one of its functions results in a segmentation fault. The module-unload
+functions must therefore cancel any delayed calls to loadable-module
+functions, for example, any outstanding ``mod_timer()`` must be dealt
+with via ``del_timer_sync()`` or similar.
+
+Unfortunately, there is no way to cancel an RCU callback; once you
+invoke ``call_rcu()``, the callback function is eventually going to be
+invoked, unless the system goes down first. Because it is normally
+considered socially irresponsible to crash the system in response to a
+module unload request, we need some other way to deal with in-flight RCU
+callbacks.
+
+RCU therefore provides ``rcu_barrier()``, which waits until all
+in-flight RCU callbacks have been invoked. If a module uses
+``call_rcu()``, its exit function should therefore prevent any future
+invocation of ``call_rcu()``, then invoke ``rcu_barrier()``. In theory,
+the underlying module-unload code could invoke ``rcu_barrier()``
+unconditionally, but in practice this would incur unacceptable
+latencies.
+
+Nikita Danilov noted this requirement for an analogous
+filesystem-unmount situation, and Dipankar Sarma incorporated
+``rcu_barrier()`` into RCU. The need for ``rcu_barrier()`` for module
+unloading became apparent later.
+
+.. important::
+
+ The ``rcu_barrier()`` function is not, repeat,
+ *not*, obligated to wait for a grace period. It is instead only required
+ to wait for RCU callbacks that have already been posted. Therefore, if
+ there are no RCU callbacks posted anywhere in the system,
+ ``rcu_barrier()`` is within its rights to return immediately. Even if
+ there are callbacks posted, ``rcu_barrier()`` does not necessarily need
+ to wait for a grace period.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Wait a minute! Each RCU callbacks must wait for a grace period to |
+| complete, and ``rcu_barrier()`` must wait for each pre-existing |
+| callback to be invoked. Doesn't ``rcu_barrier()`` therefore need to |
+| wait for a full grace period if there is even one callback posted |
+| anywhere in the system? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Absolutely not!!! |
+| Yes, each RCU callbacks must wait for a grace period to complete, but |
+| it might well be partly (or even completely) finished waiting by the |
+| time ``rcu_barrier()`` is invoked. In that case, ``rcu_barrier()`` |
+| need only wait for the remaining portion of the grace period to |
+| elapse. So even if there are quite a few callbacks posted, |
+| ``rcu_barrier()`` might well return quite quickly. |
+| |
+| So if you need to wait for a grace period as well as for all |
+| pre-existing callbacks, you will need to invoke both |
+| ``synchronize_rcu()`` and ``rcu_barrier()``. If latency is a concern, |
+| you can always use workqueues to invoke them concurrently. |
++-----------------------------------------------------------------------+
+
+Hotplug CPU
+~~~~~~~~~~~
+
+The Linux kernel supports CPU hotplug, which means that CPUs can come
+and go. It is of course illegal to use any RCU API member from an
+offline CPU, with the exception of `SRCU <#Sleepable%20RCU>`__ read-side
+critical sections. This requirement was present from day one in
+DYNIX/ptx, but on the other hand, the Linux kernel's CPU-hotplug
+implementation is “interesting.”
+
+The Linux-kernel CPU-hotplug implementation has notifiers that are used
+to allow the various kernel subsystems (including RCU) to respond
+appropriately to a given CPU-hotplug operation. Most RCU operations may
+be invoked from CPU-hotplug notifiers, including even synchronous
+grace-period operations such as ``synchronize_rcu()`` and
+``synchronize_rcu_expedited()``.
+
+However, all-callback-wait operations such as ``rcu_barrier()`` are also
+not supported, due to the fact that there are phases of CPU-hotplug
+operations where the outgoing CPU's callbacks will not be invoked until
+after the CPU-hotplug operation ends, which could also result in
+deadlock. Furthermore, ``rcu_barrier()`` blocks CPU-hotplug operations
+during its execution, which results in another type of deadlock when
+invoked from a CPU-hotplug notifier.
+
+Scheduler and RCU
+~~~~~~~~~~~~~~~~~
+
+RCU depends on the scheduler, and the scheduler uses RCU to protect some
+of its data structures. The preemptible-RCU ``rcu_read_unlock()``
+implementation must therefore be written carefully to avoid deadlocks
+involving the scheduler's runqueue and priority-inheritance locks. In
+particular, ``rcu_read_unlock()`` must tolerate an interrupt where the
+interrupt handler invokes both ``rcu_read_lock()`` and
+``rcu_read_unlock()``. This possibility requires ``rcu_read_unlock()``
+to use negative nesting levels to avoid destructive recursion via
+interrupt handler's use of RCU.
+
+This scheduler-RCU requirement came as a `complete
+surprise <https://lwn.net/Articles/453002/>`__.
+
+As noted above, RCU makes use of kthreads, and it is necessary to avoid
+excessive CPU-time accumulation by these kthreads. This requirement was
+no surprise, but RCU's violation of it when running context-switch-heavy
+workloads when built with ``CONFIG_NO_HZ_FULL=y`` `did come as a
+surprise
+[PDF] <http://www.rdrop.com/users/paulmck/scalability/paper/BareMetal.2015.01.15b.pdf>`__.
+RCU has made good progress towards meeting this requirement, even for
+context-switch-heavy ``CONFIG_NO_HZ_FULL=y`` workloads, but there is
+room for further improvement.
+
+It is forbidden to hold any of scheduler's runqueue or
+priority-inheritance spinlocks across an ``rcu_read_unlock()`` unless
+interrupts have been disabled across the entire RCU read-side critical
+section, that is, up to and including the matching ``rcu_read_lock()``.
+Violating this restriction can result in deadlocks involving these
+scheduler spinlocks. There was hope that this restriction might be
+lifted when interrupt-disabled calls to ``rcu_read_unlock()`` started
+deferring the reporting of the resulting RCU-preempt quiescent state
+until the end of the corresponding interrupts-disabled region.
+Unfortunately, timely reporting of the corresponding quiescent state to
+expedited grace periods requires a call to ``raise_softirq()``, which
+can acquire these scheduler spinlocks. In addition, real-time systems
+using RCU priority boosting need this restriction to remain in effect
+because deferred quiescent-state reporting would also defer deboosting,
+which in turn would degrade real-time latencies.
+
+In theory, if a given RCU read-side critical section could be guaranteed
+to be less than one second in duration, holding a scheduler spinlock
+across that critical section's ``rcu_read_unlock()`` would require only
+that preemption be disabled across the entire RCU read-side critical
+section, not interrupts. Unfortunately, given the possibility of vCPU
+preemption, long-running interrupts, and so on, it is not possible in
+practice to guarantee that a given RCU read-side critical section will
+complete in less than one second. Therefore, as noted above, if
+scheduler spinlocks are held across a given call to
+``rcu_read_unlock()``, interrupts must be disabled across the entire RCU
+read-side critical section.
+
+Tracing and RCU
+~~~~~~~~~~~~~~~
+
+It is possible to use tracing on RCU code, but tracing itself uses RCU.
+For this reason, ``rcu_dereference_raw_check()`` is provided for use
+by tracing, which avoids the destructive recursion that could otherwise
+ensue. This API is also used by virtualization in some architectures,
+where RCU readers execute in environments in which tracing cannot be
+used. The tracing folks both located the requirement and provided the
+needed fix, so this surprise requirement was relatively painless.
+
+Accesses to User Memory and RCU
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The kernel needs to access user-space memory, for example, to access data
+referenced by system-call parameters. The ``get_user()`` macro does this job.
+
+However, user-space memory might well be paged out, which means that
+``get_user()`` might well page-fault and thus block while waiting for the
+resulting I/O to complete. It would be a very bad thing for the compiler to
+reorder a ``get_user()`` invocation into an RCU read-side critical section.
+
+For example, suppose that the source code looked like this:
+
+ ::
+
+ 1 rcu_read_lock();
+ 2 p = rcu_dereference(gp);
+ 3 v = p->value;
+ 4 rcu_read_unlock();
+ 5 get_user(user_v, user_p);
+ 6 do_something_with(v, user_v);
+
+The compiler must not be permitted to transform this source code into
+the following:
+
+ ::
+
+ 1 rcu_read_lock();
+ 2 p = rcu_dereference(gp);
+ 3 get_user(user_v, user_p); // BUG: POSSIBLE PAGE FAULT!!!
+ 4 v = p->value;
+ 5 rcu_read_unlock();
+ 6 do_something_with(v, user_v);
+
+If the compiler did make this transformation in a ``CONFIG_PREEMPT=n`` kernel
+build, and if ``get_user()`` did page fault, the result would be a quiescent
+state in the middle of an RCU read-side critical section. This misplaced
+quiescent state could result in line 4 being a use-after-free access,
+which could be bad for your kernel's actuarial statistics. Similar examples
+can be constructed with the call to ``get_user()`` preceding the
+``rcu_read_lock()``.
+
+Unfortunately, ``get_user()`` doesn't have any particular ordering properties,
+and in some architectures the underlying ``asm`` isn't even marked
+``volatile``. And even if it was marked ``volatile``, the above access to
+``p->value`` is not volatile, so the compiler would not have any reason to keep
+those two accesses in order.
+
+Therefore, the Linux-kernel definitions of ``rcu_read_lock()`` and
+``rcu_read_unlock()`` must act as compiler barriers, at least for outermost
+instances of ``rcu_read_lock()`` and ``rcu_read_unlock()`` within a nested set
+of RCU read-side critical sections.
+
+Energy Efficiency
+~~~~~~~~~~~~~~~~~
+
+Interrupting idle CPUs is considered socially unacceptable, especially
+by people with battery-powered embedded systems. RCU therefore conserves
+energy by detecting which CPUs are idle, including tracking CPUs that
+have been interrupted from idle. This is a large part of the
+energy-efficiency requirement, so I learned of this via an irate phone
+call.
+
+Because RCU avoids interrupting idle CPUs, it is illegal to execute an
+RCU read-side critical section on an idle CPU. (Kernels built with
+``CONFIG_PROVE_RCU=y`` will splat if you try it.) The ``RCU_NONIDLE()``
+macro and ``_rcuidle`` event tracing is provided to work around this
+restriction. In addition, ``rcu_is_watching()`` may be used to test
+whether or not it is currently legal to run RCU read-side critical
+sections on this CPU. I learned of the need for diagnostics on the one
+hand and ``RCU_NONIDLE()`` on the other while inspecting idle-loop code.
+Steven Rostedt supplied ``_rcuidle`` event tracing, which is used quite
+heavily in the idle loop. However, there are some restrictions on the
+code placed within ``RCU_NONIDLE()``:
+
+#. Blocking is prohibited. In practice, this is not a serious
+ restriction given that idle tasks are prohibited from blocking to
+ begin with.
+#. Although nesting ``RCU_NONIDLE()`` is permitted, they cannot nest
+ indefinitely deeply. However, given that they can be nested on the
+ order of a million deep, even on 32-bit systems, this should not be a
+ serious restriction. This nesting limit would probably be reached
+ long after the compiler OOMed or the stack overflowed.
+#. Any code path that enters ``RCU_NONIDLE()`` must sequence out of that
+ same ``RCU_NONIDLE()``. For example, the following is grossly
+ illegal:
+
+ ::
+
+ 1 RCU_NONIDLE({
+ 2 do_something();
+ 3 goto bad_idea; /* BUG!!! */
+ 4 do_something_else();});
+ 5 bad_idea:
+
+
+ It is just as illegal to transfer control into the middle of
+ ``RCU_NONIDLE()``'s argument. Yes, in theory, you could transfer in
+ as long as you also transferred out, but in practice you could also
+ expect to get sharply worded review comments.
+
+It is similarly socially unacceptable to interrupt an ``nohz_full`` CPU
+running in userspace. RCU must therefore track ``nohz_full`` userspace
+execution. RCU must therefore be able to sample state at two points in
+time, and be able to determine whether or not some other CPU spent any
+time idle and/or executing in userspace.
+
+These energy-efficiency requirements have proven quite difficult to
+understand and to meet, for example, there have been more than five
+clean-sheet rewrites of RCU's energy-efficiency code, the last of which
+was finally able to demonstrate `real energy savings running on real
+hardware
+[PDF] <http://www.rdrop.com/users/paulmck/realtime/paper/AMPenergy.2013.04.19a.pdf>`__.
+As noted earlier, I learned of many of these requirements via angry
+phone calls: Flaming me on the Linux-kernel mailing list was apparently
+not sufficient to fully vent their ire at RCU's energy-efficiency bugs!
+
+Scheduling-Clock Interrupts and RCU
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The kernel transitions between in-kernel non-idle execution, userspace
+execution, and the idle loop. Depending on kernel configuration, RCU
+handles these states differently:
+
++-----------------+------------------+------------------+-----------------+
+| ``HZ`` Kconfig | In-Kernel | Usermode | Idle |
++=================+==================+==================+=================+
+| ``HZ_PERIODIC`` | Can rely on | Can rely on | Can rely on |
+| | scheduling-clock | scheduling-clock | RCU's |
+| | interrupt. | interrupt and | dyntick-idle |
+| | | its detection | detection. |
+| | | of interrupt | |
+| | | from usermode. | |
++-----------------+------------------+------------------+-----------------+
+| ``NO_HZ_IDLE`` | Can rely on | Can rely on | Can rely on |
+| | scheduling-clock | scheduling-clock | RCU's |
+| | interrupt. | interrupt and | dyntick-idle |
+| | | its detection | detection. |
+| | | of interrupt | |
+| | | from usermode. | |
++-----------------+------------------+------------------+-----------------+
+| ``NO_HZ_FULL`` | Can only | Can rely on | Can rely on |
+| | sometimes rely | RCU's | RCU's |
+| | on | dyntick-idle | dyntick-idle |
+| | scheduling-clock | detection. | detection. |
+| | interrupt. In | | |
+| | other cases, it | | |
+| | is necessary to | | |
+| | bound kernel | | |
+| | execution times | | |
+| | and/or use | | |
+| | IPIs. | | |
++-----------------+------------------+------------------+-----------------+
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| Why can't ``NO_HZ_FULL`` in-kernel execution rely on the |
+| scheduling-clock interrupt, just like ``HZ_PERIODIC`` and |
+| ``NO_HZ_IDLE`` do? |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| Because, as a performance optimization, ``NO_HZ_FULL`` does not |
+| necessarily re-enable the scheduling-clock interrupt on entry to each |
+| and every system call. |
++-----------------------------------------------------------------------+
+
+However, RCU must be reliably informed as to whether any given CPU is
+currently in the idle loop, and, for ``NO_HZ_FULL``, also whether that
+CPU is executing in usermode, as discussed
+`earlier <#Energy%20Efficiency>`__. It also requires that the
+scheduling-clock interrupt be enabled when RCU needs it to be:
+
+#. If a CPU is either idle or executing in usermode, and RCU believes it
+ is non-idle, the scheduling-clock tick had better be running.
+ Otherwise, you will get RCU CPU stall warnings. Or at best, very long
+ (11-second) grace periods, with a pointless IPI waking the CPU from
+ time to time.
+#. If a CPU is in a portion of the kernel that executes RCU read-side
+ critical sections, and RCU believes this CPU to be idle, you will get
+ random memory corruption. **DON'T DO THIS!!!**
+ This is one reason to test with lockdep, which will complain about
+ this sort of thing.
+#. If a CPU is in a portion of the kernel that is absolutely positively
+ no-joking guaranteed to never execute any RCU read-side critical
+ sections, and RCU believes this CPU to to be idle, no problem. This
+ sort of thing is used by some architectures for light-weight
+ exception handlers, which can then avoid the overhead of
+ ``rcu_irq_enter()`` and ``rcu_irq_exit()`` at exception entry and
+ exit, respectively. Some go further and avoid the entireties of
+ ``irq_enter()`` and ``irq_exit()``.
+ Just make very sure you are running some of your tests with
+ ``CONFIG_PROVE_RCU=y``, just in case one of your code paths was in
+ fact joking about not doing RCU read-side critical sections.
+#. If a CPU is executing in the kernel with the scheduling-clock
+ interrupt disabled and RCU believes this CPU to be non-idle, and if
+ the CPU goes idle (from an RCU perspective) every few jiffies, no
+ problem. It is usually OK for there to be the occasional gap between
+ idle periods of up to a second or so.
+ If the gap grows too long, you get RCU CPU stall warnings.
+#. If a CPU is either idle or executing in usermode, and RCU believes it
+ to be idle, of course no problem.
+#. If a CPU is executing in the kernel, the kernel code path is passing
+ through quiescent states at a reasonable frequency (preferably about
+ once per few jiffies, but the occasional excursion to a second or so
+ is usually OK) and the scheduling-clock interrupt is enabled, of
+ course no problem.
+ If the gap between a successive pair of quiescent states grows too
+ long, you get RCU CPU stall warnings.
+
++-----------------------------------------------------------------------+
+| **Quick Quiz**: |
++-----------------------------------------------------------------------+
+| But what if my driver has a hardware interrupt handler that can run |
+| for many seconds? I cannot invoke ``schedule()`` from an hardware |
+| interrupt handler, after all! |
++-----------------------------------------------------------------------+
+| **Answer**: |
++-----------------------------------------------------------------------+
+| One approach is to do ``rcu_irq_exit();rcu_irq_enter();`` every so |
+| often. But given that long-running interrupt handlers can cause other |
+| problems, not least for response time, shouldn't you work to keep |
+| your interrupt handler's runtime within reasonable bounds? |
++-----------------------------------------------------------------------+
+
+But as long as RCU is properly informed of kernel state transitions
+between in-kernel execution, usermode execution, and idle, and as long
+as the scheduling-clock interrupt is enabled when RCU needs it to be,
+you can rest assured that the bugs you encounter will be in some other
+part of RCU or some other part of the kernel!
+
+Memory Efficiency
+~~~~~~~~~~~~~~~~~
+
+Although small-memory non-realtime systems can simply use Tiny RCU, code
+size is only one aspect of memory efficiency. Another aspect is the size
+of the ``rcu_head`` structure used by ``call_rcu()`` and
+``kfree_rcu()``. Although this structure contains nothing more than a
+pair of pointers, it does appear in many RCU-protected data structures,
+including some that are size critical. The ``page`` structure is a case
+in point, as evidenced by the many occurrences of the ``union`` keyword
+within that structure.
+
+This need for memory efficiency is one reason that RCU uses hand-crafted
+singly linked lists to track the ``rcu_head`` structures that are
+waiting for a grace period to elapse. It is also the reason why
+``rcu_head`` structures do not contain debug information, such as fields
+tracking the file and line of the ``call_rcu()`` or ``kfree_rcu()`` that
+posted them. Although this information might appear in debug-only kernel
+builds at some point, in the meantime, the ``->func`` field will often
+provide the needed debug information.
+
+However, in some cases, the need for memory efficiency leads to even
+more extreme measures. Returning to the ``page`` structure, the
+``rcu_head`` field shares storage with a great many other structures
+that are used at various points in the corresponding page's lifetime. In
+order to correctly resolve certain `race
+conditions <https://lkml.kernel.org/g/1439976106-137226-1-git-send-email-kirill.shutemov@linux.intel.com>`__,
+the Linux kernel's memory-management subsystem needs a particular bit to
+remain zero during all phases of grace-period processing, and that bit
+happens to map to the bottom bit of the ``rcu_head`` structure's
+``->next`` field. RCU makes this guarantee as long as ``call_rcu()`` is
+used to post the callback, as opposed to ``kfree_rcu()`` or some future
+“lazy” variant of ``call_rcu()`` that might one day be created for
+energy-efficiency purposes.
+
+That said, there are limits. RCU requires that the ``rcu_head``
+structure be aligned to a two-byte boundary, and passing a misaligned
+``rcu_head`` structure to one of the ``call_rcu()`` family of functions
+will result in a splat. It is therefore necessary to exercise caution
+when packing structures containing fields of type ``rcu_head``. Why not
+a four-byte or even eight-byte alignment requirement? Because the m68k
+architecture provides only two-byte alignment, and thus acts as
+alignment's least common denominator.
+
+The reason for reserving the bottom bit of pointers to ``rcu_head``
+structures is to leave the door open to “lazy” callbacks whose
+invocations can safely be deferred. Deferring invocation could
+potentially have energy-efficiency benefits, but only if the rate of
+non-lazy callbacks decreases significantly for some important workload.
+In the meantime, reserving the bottom bit keeps this option open in case
+it one day becomes useful.
+
+Performance, Scalability, Response Time, and Reliability
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Expanding on the `earlier
+discussion <#Performance%20and%20Scalability>`__, RCU is used heavily by
+hot code paths in performance-critical portions of the Linux kernel's
+networking, security, virtualization, and scheduling code paths. RCU
+must therefore use efficient implementations, especially in its
+read-side primitives. To that end, it would be good if preemptible RCU's
+implementation of ``rcu_read_lock()`` could be inlined, however, doing
+this requires resolving ``#include`` issues with the ``task_struct``
+structure.
+
+The Linux kernel supports hardware configurations with up to 4096 CPUs,
+which means that RCU must be extremely scalable. Algorithms that involve
+frequent acquisitions of global locks or frequent atomic operations on
+global variables simply cannot be tolerated within the RCU
+implementation. RCU therefore makes heavy use of a combining tree based
+on the ``rcu_node`` structure. RCU is required to tolerate all CPUs
+continuously invoking any combination of RCU's runtime primitives with
+minimal per-operation overhead. In fact, in many cases, increasing load
+must *decrease* the per-operation overhead, witness the batching
+optimizations for ``synchronize_rcu()``, ``call_rcu()``,
+``synchronize_rcu_expedited()``, and ``rcu_barrier()``. As a general
+rule, RCU must cheerfully accept whatever the rest of the Linux kernel
+decides to throw at it.
+
+The Linux kernel is used for real-time workloads, especially in
+conjunction with the `-rt
+patchset <https://rt.wiki.kernel.org/index.php/Main_Page>`__. The
+real-time-latency response requirements are such that the traditional
+approach of disabling preemption across RCU read-side critical sections
+is inappropriate. Kernels built with ``CONFIG_PREEMPT=y`` therefore use
+an RCU implementation that allows RCU read-side critical sections to be
+preempted. This requirement made its presence known after users made it
+clear that an earlier `real-time
+patch <https://lwn.net/Articles/107930/>`__ did not meet their needs, in
+conjunction with some `RCU
+issues <https://lkml.kernel.org/g/20050318002026.GA2693@us.ibm.com>`__
+encountered by a very early version of the -rt patchset.
+
+In addition, RCU must make do with a sub-100-microsecond real-time
+latency budget. In fact, on smaller systems with the -rt patchset, the
+Linux kernel provides sub-20-microsecond real-time latencies for the
+whole kernel, including RCU. RCU's scalability and latency must
+therefore be sufficient for these sorts of configurations. To my
+surprise, the sub-100-microsecond real-time latency budget `applies to
+even the largest systems
+[PDF] <http://www.rdrop.com/users/paulmck/realtime/paper/bigrt.2013.01.31a.LCA.pdf>`__,
+up to and including systems with 4096 CPUs. This real-time requirement
+motivated the grace-period kthread, which also simplified handling of a
+number of race conditions.
+
+RCU must avoid degrading real-time response for CPU-bound threads,
+whether executing in usermode (which is one use case for
+``CONFIG_NO_HZ_FULL=y``) or in the kernel. That said, CPU-bound loops in
+the kernel must execute ``cond_resched()`` at least once per few tens of
+milliseconds in order to avoid receiving an IPI from RCU.
+
+Finally, RCU's status as a synchronization primitive means that any RCU
+failure can result in arbitrary memory corruption that can be extremely
+difficult to debug. This means that RCU must be extremely reliable,
+which in practice also means that RCU must have an aggressive
+stress-test suite. This stress-test suite is called ``rcutorture``.
+
+Although the need for ``rcutorture`` was no surprise, the current
+immense popularity of the Linux kernel is posing interesting—and perhaps
+unprecedented—validation challenges. To see this, keep in mind that
+there are well over one billion instances of the Linux kernel running
+today, given Android smartphones, Linux-powered televisions, and
+servers. This number can be expected to increase sharply with the advent
+of the celebrated Internet of Things.
+
+Suppose that RCU contains a race condition that manifests on average
+once per million years of runtime. This bug will be occurring about
+three times per *day* across the installed base. RCU could simply hide
+behind hardware error rates, given that no one should really expect
+their smartphone to last for a million years. However, anyone taking too
+much comfort from this thought should consider the fact that in most
+jurisdictions, a successful multi-year test of a given mechanism, which
+might include a Linux kernel, suffices for a number of types of
+safety-critical certifications. In fact, rumor has it that the Linux
+kernel is already being used in production for safety-critical
+applications. I don't know about you, but I would feel quite bad if a
+bug in RCU killed someone. Which might explain my recent focus on
+validation and verification.
+
+Other RCU Flavors
+-----------------
+
+One of the more surprising things about RCU is that there are now no
+fewer than five *flavors*, or API families. In addition, the primary
+flavor that has been the sole focus up to this point has two different
+implementations, non-preemptible and preemptible. The other four flavors
+are listed below, with requirements for each described in a separate
+section.
+
+#. `Bottom-Half Flavor (Historical)`_
+#. `Sched Flavor (Historical)`_
+#. `Sleepable RCU`_
+#. `Tasks RCU`_
+
+Bottom-Half Flavor (Historical)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The RCU-bh flavor of RCU has since been expressed in terms of the other
+RCU flavors as part of a consolidation of the three flavors into a
+single flavor. The read-side API remains, and continues to disable
+softirq and to be accounted for by lockdep. Much of the material in this
+section is therefore strictly historical in nature.
+
+The softirq-disable (AKA “bottom-half”, hence the “_bh” abbreviations)
+flavor of RCU, or *RCU-bh*, was developed by Dipankar Sarma to provide a
+flavor of RCU that could withstand the network-based denial-of-service
+attacks researched by Robert Olsson. These attacks placed so much
+networking load on the system that some of the CPUs never exited softirq
+execution, which in turn prevented those CPUs from ever executing a
+context switch, which, in the RCU implementation of that time, prevented
+grace periods from ever ending. The result was an out-of-memory
+condition and a system hang.
+
+The solution was the creation of RCU-bh, which does
+``local_bh_disable()`` across its read-side critical sections, and which
+uses the transition from one type of softirq processing to another as a
+quiescent state in addition to context switch, idle, user mode, and
+offline. This means that RCU-bh grace periods can complete even when
+some of the CPUs execute in softirq indefinitely, thus allowing
+algorithms based on RCU-bh to withstand network-based denial-of-service
+attacks.
+
+Because ``rcu_read_lock_bh()`` and ``rcu_read_unlock_bh()`` disable and
+re-enable softirq handlers, any attempt to start a softirq handlers
+during the RCU-bh read-side critical section will be deferred. In this
+case, ``rcu_read_unlock_bh()`` will invoke softirq processing, which can
+take considerable time. One can of course argue that this softirq
+overhead should be associated with the code following the RCU-bh
+read-side critical section rather than ``rcu_read_unlock_bh()``, but the
+fact is that most profiling tools cannot be expected to make this sort
+of fine distinction. For example, suppose that a three-millisecond-long
+RCU-bh read-side critical section executes during a time of heavy
+networking load. There will very likely be an attempt to invoke at least
+one softirq handler during that three milliseconds, but any such
+invocation will be delayed until the time of the
+``rcu_read_unlock_bh()``. This can of course make it appear at first
+glance as if ``rcu_read_unlock_bh()`` was executing very slowly.
+
+The `RCU-bh
+API <https://lwn.net/Articles/609973/#RCU%20Per-Flavor%20API%20Table>`__
+includes ``rcu_read_lock_bh()``, ``rcu_read_unlock_bh()``,
+``rcu_dereference_bh()``, ``rcu_dereference_bh_check()``,
+``synchronize_rcu_bh()``, ``synchronize_rcu_bh_expedited()``,
+``call_rcu_bh()``, ``rcu_barrier_bh()``, and
+``rcu_read_lock_bh_held()``. However, the update-side APIs are now
+simple wrappers for other RCU flavors, namely RCU-sched in
+CONFIG_PREEMPT=n kernels and RCU-preempt otherwise.
+
+Sched Flavor (Historical)
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The RCU-sched flavor of RCU has since been expressed in terms of the
+other RCU flavors as part of a consolidation of the three flavors into a
+single flavor. The read-side API remains, and continues to disable
+preemption and to be accounted for by lockdep. Much of the material in
+this section is therefore strictly historical in nature.
+
+Before preemptible RCU, waiting for an RCU grace period had the side
+effect of also waiting for all pre-existing interrupt and NMI handlers.
+However, there are legitimate preemptible-RCU implementations that do
+not have this property, given that any point in the code outside of an
+RCU read-side critical section can be a quiescent state. Therefore,
+*RCU-sched* was created, which follows “classic” RCU in that an
+RCU-sched grace period waits for for pre-existing interrupt and NMI
+handlers. In kernels built with ``CONFIG_PREEMPT=n``, the RCU and
+RCU-sched APIs have identical implementations, while kernels built with
+``CONFIG_PREEMPT=y`` provide a separate implementation for each.
+
+Note well that in ``CONFIG_PREEMPT=y`` kernels,
+``rcu_read_lock_sched()`` and ``rcu_read_unlock_sched()`` disable and
+re-enable preemption, respectively. This means that if there was a
+preemption attempt during the RCU-sched read-side critical section,
+``rcu_read_unlock_sched()`` will enter the scheduler, with all the
+latency and overhead entailed. Just as with ``rcu_read_unlock_bh()``,
+this can make it look as if ``rcu_read_unlock_sched()`` was executing
+very slowly. However, the highest-priority task won't be preempted, so
+that task will enjoy low-overhead ``rcu_read_unlock_sched()``
+invocations.
+
+The `RCU-sched
+API <https://lwn.net/Articles/609973/#RCU%20Per-Flavor%20API%20Table>`__
+includes ``rcu_read_lock_sched()``, ``rcu_read_unlock_sched()``,
+``rcu_read_lock_sched_notrace()``, ``rcu_read_unlock_sched_notrace()``,
+``rcu_dereference_sched()``, ``rcu_dereference_sched_check()``,
+``synchronize_sched()``, ``synchronize_rcu_sched_expedited()``,
+``call_rcu_sched()``, ``rcu_barrier_sched()``, and
+``rcu_read_lock_sched_held()``. However, anything that disables
+preemption also marks an RCU-sched read-side critical section, including
+``preempt_disable()`` and ``preempt_enable()``, ``local_irq_save()`` and
+``local_irq_restore()``, and so on.
+
+Sleepable RCU
+~~~~~~~~~~~~~
+
+For well over a decade, someone saying “I need to block within an RCU
+read-side critical section” was a reliable indication that this someone
+did not understand RCU. After all, if you are always blocking in an RCU
+read-side critical section, you can probably afford to use a
+higher-overhead synchronization mechanism. However, that changed with
+the advent of the Linux kernel's notifiers, whose RCU read-side critical
+sections almost never sleep, but sometimes need to. This resulted in the
+introduction of `sleepable RCU <https://lwn.net/Articles/202847/>`__, or
+*SRCU*.
+
+SRCU allows different domains to be defined, with each such domain
+defined by an instance of an ``srcu_struct`` structure. A pointer to
+this structure must be passed in to each SRCU function, for example,
+``synchronize_srcu(&ss)``, where ``ss`` is the ``srcu_struct``
+structure. The key benefit of these domains is that a slow SRCU reader
+in one domain does not delay an SRCU grace period in some other domain.
+That said, one consequence of these domains is that read-side code must
+pass a “cookie” from ``srcu_read_lock()`` to ``srcu_read_unlock()``, for
+example, as follows:
+
+ ::
+
+ 1 int idx;
+ 2
+ 3 idx = srcu_read_lock(&ss);
+ 4 do_something();
+ 5 srcu_read_unlock(&ss, idx);
+
+As noted above, it is legal to block within SRCU read-side critical
+sections, however, with great power comes great responsibility. If you
+block forever in one of a given domain's SRCU read-side critical
+sections, then that domain's grace periods will also be blocked forever.
+Of course, one good way to block forever is to deadlock, which can
+happen if any operation in a given domain's SRCU read-side critical
+section can wait, either directly or indirectly, for that domain's grace
+period to elapse. For example, this results in a self-deadlock:
+
+ ::
+
+ 1 int idx;
+ 2
+ 3 idx = srcu_read_lock(&ss);
+ 4 do_something();
+ 5 synchronize_srcu(&ss);
+ 6 srcu_read_unlock(&ss, idx);
+
+However, if line 5 acquired a mutex that was held across a
+``synchronize_srcu()`` for domain ``ss``, deadlock would still be
+possible. Furthermore, if line 5 acquired a mutex that was held across a
+``synchronize_srcu()`` for some other domain ``ss1``, and if an
+``ss1``-domain SRCU read-side critical section acquired another mutex
+that was held across as ``ss``-domain ``synchronize_srcu()``, deadlock
+would again be possible. Such a deadlock cycle could extend across an
+arbitrarily large number of different SRCU domains. Again, with great
+power comes great responsibility.
+
+Unlike the other RCU flavors, SRCU read-side critical sections can run
+on idle and even offline CPUs. This ability requires that
+``srcu_read_lock()`` and ``srcu_read_unlock()`` contain memory barriers,
+which means that SRCU readers will run a bit slower than would RCU
+readers. It also motivates the ``smp_mb__after_srcu_read_unlock()`` API,
+which, in combination with ``srcu_read_unlock()``, guarantees a full
+memory barrier.
+
+Also unlike other RCU flavors, ``synchronize_srcu()`` may **not** be
+invoked from CPU-hotplug notifiers, due to the fact that SRCU grace
+periods make use of timers and the possibility of timers being
+temporarily “stranded” on the outgoing CPU. This stranding of timers
+means that timers posted to the outgoing CPU will not fire until late in
+the CPU-hotplug process. The problem is that if a notifier is waiting on
+an SRCU grace period, that grace period is waiting on a timer, and that
+timer is stranded on the outgoing CPU, then the notifier will never be
+awakened, in other words, deadlock has occurred. This same situation of
+course also prohibits ``srcu_barrier()`` from being invoked from
+CPU-hotplug notifiers.
+
+SRCU also differs from other RCU flavors in that SRCU's expedited and
+non-expedited grace periods are implemented by the same mechanism. This
+means that in the current SRCU implementation, expediting a future grace
+period has the side effect of expediting all prior grace periods that
+have not yet completed. (But please note that this is a property of the
+current implementation, not necessarily of future implementations.) In
+addition, if SRCU has been idle for longer than the interval specified
+by the ``srcutree.exp_holdoff`` kernel boot parameter (25 microseconds
+by default), and if a ``synchronize_srcu()`` invocation ends this idle
+period, that invocation will be automatically expedited.
+
+As of v4.12, SRCU's callbacks are maintained per-CPU, eliminating a
+locking bottleneck present in prior kernel versions. Although this will
+allow users to put much heavier stress on ``call_srcu()``, it is
+important to note that SRCU does not yet take any special steps to deal
+with callback flooding. So if you are posting (say) 10,000 SRCU
+callbacks per second per CPU, you are probably totally OK, but if you
+intend to post (say) 1,000,000 SRCU callbacks per second per CPU, please
+run some tests first. SRCU just might need a few adjustment to deal with
+that sort of load. Of course, your mileage may vary based on the speed
+of your CPUs and the size of your memory.
+
+The `SRCU
+API <https://lwn.net/Articles/609973/#RCU%20Per-Flavor%20API%20Table>`__
+includes ``srcu_read_lock()``, ``srcu_read_unlock()``,
+``srcu_dereference()``, ``srcu_dereference_check()``,
+``synchronize_srcu()``, ``synchronize_srcu_expedited()``,
+``call_srcu()``, ``srcu_barrier()``, and ``srcu_read_lock_held()``. It
+also includes ``DEFINE_SRCU()``, ``DEFINE_STATIC_SRCU()``, and
+``init_srcu_struct()`` APIs for defining and initializing
+``srcu_struct`` structures.
+
+Tasks RCU
+~~~~~~~~~
+
+Some forms of tracing use “trampolines” to handle the binary rewriting
+required to install different types of probes. It would be good to be
+able to free old trampolines, which sounds like a job for some form of
+RCU. However, because it is necessary to be able to install a trace
+anywhere in the code, it is not possible to use read-side markers such
+as ``rcu_read_lock()`` and ``rcu_read_unlock()``. In addition, it does
+not work to have these markers in the trampoline itself, because there
+would need to be instructions following ``rcu_read_unlock()``. Although
+``synchronize_rcu()`` would guarantee that execution reached the
+``rcu_read_unlock()``, it would not be able to guarantee that execution
+had completely left the trampoline.
+
+The solution, in the form of `Tasks
+RCU <https://lwn.net/Articles/607117/>`__, is to have implicit read-side
+critical sections that are delimited by voluntary context switches, that
+is, calls to ``schedule()``, ``cond_resched()``, and
+``synchronize_rcu_tasks()``. In addition, transitions to and from
+userspace execution also delimit tasks-RCU read-side critical sections.
+
+The tasks-RCU API is quite compact, consisting only of
+``call_rcu_tasks()``, ``synchronize_rcu_tasks()``, and
+``rcu_barrier_tasks()``. In ``CONFIG_PREEMPT=n`` kernels, trampolines
+cannot be preempted, so these APIs map to ``call_rcu()``,
+``synchronize_rcu()``, and ``rcu_barrier()``, respectively. In
+``CONFIG_PREEMPT=y`` kernels, trampolines can be preempted, and these
+three APIs are therefore implemented by separate functions that check
+for voluntary context switches.
+
+Possible Future Changes
+-----------------------
+
+One of the tricks that RCU uses to attain update-side scalability is to
+increase grace-period latency with increasing numbers of CPUs. If this
+becomes a serious problem, it will be necessary to rework the
+grace-period state machine so as to avoid the need for the additional
+latency.
+
+RCU disables CPU hotplug in a few places, perhaps most notably in the
+``rcu_barrier()`` operations. If there is a strong reason to use
+``rcu_barrier()`` in CPU-hotplug notifiers, it will be necessary to
+avoid disabling CPU hotplug. This would introduce some complexity, so
+there had better be a *very* good reason.
+
+The tradeoff between grace-period latency on the one hand and
+interruptions of other CPUs on the other hand may need to be
+re-examined. The desire is of course for zero grace-period latency as
+well as zero interprocessor interrupts undertaken during an expedited
+grace period operation. While this ideal is unlikely to be achievable,
+it is quite possible that further improvements can be made.
+
+The multiprocessor implementations of RCU use a combining tree that
+groups CPUs so as to reduce lock contention and increase cache locality.
+However, this combining tree does not spread its memory across NUMA
+nodes nor does it align the CPU groups with hardware features such as
+sockets or cores. Such spreading and alignment is currently believed to
+be unnecessary because the hotpath read-side primitives do not access
+the combining tree, nor does ``call_rcu()`` in the common case. If you
+believe that your architecture needs such spreading and alignment, then
+your architecture should also benefit from the
+``rcutree.rcu_fanout_leaf`` boot parameter, which can be set to the
+number of CPUs in a socket, NUMA node, or whatever. If the number of
+CPUs is too large, use a fraction of the number of CPUs. If the number
+of CPUs is a large prime number, well, that certainly is an
+“interesting” architectural choice! More flexible arrangements might be
+considered, but only if ``rcutree.rcu_fanout_leaf`` has proven
+inadequate, and only if the inadequacy has been demonstrated by a
+carefully run and realistic system-level workload.
+
+Please note that arrangements that require RCU to remap CPU numbers will
+require extremely good demonstration of need and full exploration of
+alternatives.
+
+RCU's various kthreads are reasonably recent additions. It is quite
+likely that adjustments will be required to more gracefully handle
+extreme loads. It might also be necessary to be able to relate CPU
+utilization by RCU's kthreads and softirq handlers to the code that
+instigated this CPU utilization. For example, RCU callback overhead
+might be charged back to the originating ``call_rcu()`` instance, though
+probably not in production kernels.
+
+Additional work may be required to provide reasonable forward-progress
+guarantees under heavy load for grace periods and for callback
+invocation.
+
+Summary
+-------
+
+This document has presented more than two decade's worth of RCU
+requirements. Given that the requirements keep changing, this will not
+be the last word on this subject, but at least it serves to get an
+important subset of the requirements set forth.
+
+Acknowledgments
+---------------
+
+I am grateful to Steven Rostedt, Lai Jiangshan, Ingo Molnar, Oleg
+Nesterov, Borislav Petkov, Peter Zijlstra, Boqun Feng, and Andy
+Lutomirski for their help in rendering this article human readable, and
+to Michelle Rankin for her support of this effort. Other contributions
+are acknowledged in the Linux kernel's git archive.
diff --git a/Documentation/RCU/index.rst b/Documentation/RCU/index.rst
index 340a9725676c..5c99185710fa 100644
--- a/Documentation/RCU/index.rst
+++ b/Documentation/RCU/index.rst
@@ -5,12 +5,17 @@ RCU concepts
============
.. toctree::
- :maxdepth: 1
+ :maxdepth: 3
rcu
listRCU
UP
+ Design/Memory-Ordering/Tree-RCU-Memory-Ordering
+ Design/Expedited-Grace-Periods/Expedited-Grace-Periods
+ Design/Requirements/Requirements
+ Design/Data-Structures/Data-Structures
+
.. only:: subproject and html
Indices
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
index da51d3068850..89db949eeca0 100644
--- a/Documentation/RCU/lockdep.txt
+++ b/Documentation/RCU/lockdep.txt
@@ -96,7 +96,17 @@ other flavors of rcu_dereference(). On the other hand, it is illegal
to use rcu_dereference_protected() if either the RCU-protected pointer
or the RCU-protected data that it points to can change concurrently.
-There are currently only "universal" versions of the rcu_assign_pointer()
-and RCU list-/tree-traversal primitives, which do not (yet) check for
-being in an RCU read-side critical section. In the future, separate
-versions of these primitives might be created.
+Like rcu_dereference(), when lockdep is enabled, RCU list and hlist
+traversal primitives check for being called from within an RCU read-side
+critical section. However, a lockdep expression can be passed to them
+as a additional optional argument. With this lockdep expression, these
+traversal primitives will complain only if the lockdep expression is
+false and they are called from outside any RCU read-side critical section.
+
+For example, the workqueue for_each_pwq() macro is intended to be used
+either within an RCU read-side critical section or with wq->mutex held.
+It is thus implemented as follows:
+
+ #define for_each_pwq(pwq, wq)
+ list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,
+ lock_is_held(&(wq->mutex).dep_map))
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 7e1a8721637a..58ba05c4d97f 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -290,7 +290,7 @@ rcu_dereference()
at any time, including immediately after the rcu_dereference().
And, again like rcu_assign_pointer(), rcu_dereference() is
typically used indirectly, via the _rcu list-manipulation
- primitives, such as list_for_each_entry_rcu().
+ primitives, such as list_for_each_entry_rcu() [2].
[1] The variant rcu_dereference_protected() can be used outside
of an RCU read-side critical section as long as the usage is
@@ -302,9 +302,17 @@ rcu_dereference()
must prohibit. The rcu_dereference_protected() variant takes
a lockdep expression to indicate which locks must be acquired
by the caller. If the indicated protection is not provided,
- a lockdep splat is emitted. See RCU/Design/Requirements/Requirements.html
+ a lockdep splat is emitted. See Documentation/RCU/Design/Requirements/Requirements.rst
and the API's code comments for more details and example usage.
+ [2] If the list_for_each_entry_rcu() instance might be used by
+ update-side code as well as by RCU readers, then an additional
+ lockdep expression can be added to its list of arguments.
+ For example, given an additional "lock_is_held(&mylock)" argument,
+ the RCU lockdep code would complain only if this instance was
+ invoked outside of an RCU read-side critical section and without
+ the protection of mylock.
+
The following diagram shows how each API communicates among the
reader, updater, and reclaimer.
@@ -630,7 +638,7 @@ been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
promotes synchronize_rcu() to a full memory barrier in compliance with
the "Memory-Barrier Guarantees" listed in:
- Documentation/RCU/Design/Requirements/Requirements.html.
+ Documentation/RCU/Design/Requirements/Requirements.rst
It is possible to nest rcu_read_lock(), since reader-writer locks may
be recursively acquired. Note also that rcu_read_lock() is immune
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 5361ebec3361..007ba86aef78 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1334,7 +1334,7 @@ PAGE_SIZE multiple when read back.
pgdeactivate
- Amount of pages moved to the inactive LRU lis
+ Amount of pages moved to the inactive LRU list
pglazyfree
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index a30aa91b5fbe..594095b54b29 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -177,6 +177,11 @@ bitmap_flush_interval:number
The bitmap flush interval in milliseconds. The metadata buffers
are synchronized when this interval expires.
+fix_padding
+ Use a smaller padding of the tag area that is more
+ space-efficient. If this option is not present, large padding is
+ used - that is for compatibility with older kernels.
+
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
diff --git a/Documentation/admin-guide/device-mapper/dm-raid.rst b/Documentation/admin-guide/device-mapper/dm-raid.rst
index 2fe255b130fb..f6344675e395 100644
--- a/Documentation/admin-guide/device-mapper/dm-raid.rst
+++ b/Documentation/admin-guide/device-mapper/dm-raid.rst
@@ -417,3 +417,5 @@ Version History
deadlock/potential data corruption. Update superblock when
specific devices are requested via rebuild. Fix RAID leg
rebuild errors.
+ 1.15.0 Fix size extensions not being synchronized in case of new MD bitmap
+ pages allocated; also fix those not occuring after previous reductions
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index 49311f3da6f2..0795e3c2643f 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -12,3 +12,5 @@ are configurable at compile, boot or run time.
spectre
l1tf
mds
+ tsx_async_abort
+ multihit.rst
diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
index e3a796c0d3a2..2d19c9f4c1fe 100644
--- a/Documentation/admin-guide/hw-vuln/mds.rst
+++ b/Documentation/admin-guide/hw-vuln/mds.rst
@@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
============ =============================================================
-Not specifying this option is equivalent to "mds=full".
-
+Not specifying this option is equivalent to "mds=full". For processors
+that are affected by both TAA (TSX Asynchronous Abort) and MDS,
+specifying just "mds=off" without an accompanying "tsx_async_abort=off"
+will have no effect as the same mitigation is used for both
+vulnerabilities.
Mitigation selection guide
--------------------------
diff --git a/Documentation/admin-guide/hw-vuln/multihit.rst b/Documentation/admin-guide/hw-vuln/multihit.rst
new file mode 100644
index 000000000000..ba9988d8bce5
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/multihit.rst
@@ -0,0 +1,163 @@
+iTLB multihit
+=============
+
+iTLB multihit is an erratum where some processors may incur a machine check
+error, possibly resulting in an unrecoverable CPU lockup, when an
+instruction fetch hits multiple entries in the instruction TLB. This can
+occur when the page size is changed along with either the physical address
+or cache type. A malicious guest running on a virtualized system can
+exploit this erratum to perform a denial of service attack.
+
+
+Affected processors
+-------------------
+
+Variations of this erratum are present on most Intel Core and Xeon processor
+models. The erratum is not present on:
+
+ - non-Intel processors
+
+ - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont)
+
+ - Intel processors that have the PSCHANGE_MC_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+
+Related CVEs
+------------
+
+The following CVE entry is related to this issue:
+
+ ============== =================================================
+ CVE-2018-12207 Machine Check Error Avoidance on Page Size Change
+ ============== =================================================
+
+
+Problem
+-------
+
+Privileged software, including OS and virtual machine managers (VMM), are in
+charge of memory management. A key component in memory management is the control
+of the page tables. Modern processors use virtual memory, a technique that creates
+the illusion of a very large memory for processors. This virtual space is split
+into pages of a given size. Page tables translate virtual addresses to physical
+addresses.
+
+To reduce latency when performing a virtual to physical address translation,
+processors include a structure, called TLB, that caches recent translations.
+There are separate TLBs for instruction (iTLB) and data (dTLB).
+
+Under this errata, instructions are fetched from a linear address translated
+using a 4 KB translation cached in the iTLB. Privileged software modifies the
+paging structure so that the same linear address using large page size (2 MB, 4
+MB, 1 GB) with a different physical address or memory type. After the page
+structure modification but before the software invalidates any iTLB entries for
+the linear address, a code fetch that happens on the same linear address may
+cause a machine-check error which can result in a system hang or shutdown.
+
+
+Attack scenarios
+----------------
+
+Attacks against the iTLB multihit erratum can be mounted from malicious
+guests in a virtualized system.
+
+
+iTLB multihit system information
+--------------------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current iTLB
+multihit status of the system:whether the system is vulnerable and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/itlb_multihit
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - Not affected
+ - The processor is not vulnerable.
+ * - KVM: Mitigation: Split huge pages
+ - Software changes mitigate this issue.
+ * - KVM: Vulnerable
+ - The processor is vulnerable, but no mitigation enabled
+
+
+Enumeration of the erratum
+--------------------------------
+
+A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr
+and will be set on CPU's which are mitigated against this issue.
+
+ ======================================= =========== ===============================
+ IA32_ARCH_CAPABILITIES MSR Not present Possibly vulnerable,check model
+ IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '0' Likely vulnerable,check model
+ IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '1' Not vulnerable
+ ======================================= =========== ===============================
+
+
+Mitigation mechanism
+-------------------------
+
+This erratum can be mitigated by restricting the use of large page sizes to
+non-executable pages. This forces all iTLB entries to be 4K, and removes
+the possibility of multiple hits.
+
+In order to mitigate the vulnerability, KVM initially marks all huge pages
+as non-executable. If the guest attempts to execute in one of those pages,
+the page is broken down into 4K pages, which are then marked executable.
+
+If EPT is disabled or not available on the host, KVM is in control of TLB
+flushes and the problematic situation cannot happen. However, the shadow
+EPT paging mechanism used by nested virtualization is vulnerable, because
+the nested guest can trigger multiple iTLB hits by modifying its own
+(non-nested) page tables. For simplicity, KVM will make large pages
+non-executable in all shadow paging modes.
+
+Mitigation control on the kernel command line and KVM - module parameter
+------------------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism for marking huge pages as
+non-executable can be controlled with a module parameter "nx_huge_pages=".
+The kernel command line allows to control the iTLB multihit mitigations at
+boot time with the option "kvm.nx_huge_pages=".
+
+The valid arguments for these options are:
+
+ ========== ================================================================
+ force Mitigation is enabled. In this case, the mitigation implements
+ non-executable huge pages in Linux kernel KVM module. All huge
+ pages in the EPT are marked as non-executable.
+ If a guest attempts to execute in one of those pages, the page is
+ broken down into 4K pages, which are then marked executable.
+
+ off Mitigation is disabled.
+
+ auto Enable mitigation only if the platform is affected and the kernel
+ was not booted with the "mitigations=off" command line parameter.
+ This is the default option.
+ ========== ================================================================
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The system is protected by the kernel unconditionally and no further
+ action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the guest comes from a trusted source, you may assume that the guest will
+ not attempt to maliciously exploit these errata and no further action is
+ required.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ If the guest comes from an untrusted source, the guest host kernel will need
+ to apply iTLB multihit mitigation via the kernel command line or kvm
+ module parameter.
diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
new file mode 100644
index 000000000000..af6865b822d2
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
@@ -0,0 +1,279 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TAA - TSX Asynchronous Abort
+======================================
+
+TAA is a hardware vulnerability that allows unprivileged speculative access to
+data which is available in various CPU internal buffers by using asynchronous
+aborts within an Intel TSX transactional region.
+
+Affected processors
+-------------------
+
+This vulnerability only affects Intel processors that support Intel
+Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8)
+is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit
+(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations
+also mitigate against TAA.
+
+Whether a processor is affected or not can be read out from the TAA
+vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entry is related to this TAA issue:
+
+ ============== ===== ===================================================
+ CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some
+ microprocessors utilizing speculative execution may
+ allow an authenticated user to potentially enable
+ information disclosure via a side channel with
+ local access.
+ ============== ===== ===================================================
+
+Problem
+-------
+
+When performing store, load or L1 refill operations, processors write
+data into temporary microarchitectural structures (buffers). The data in
+those buffers can be forwarded to load operations as an optimization.
+
+Intel TSX is an extension to the x86 instruction set architecture that adds
+hardware transactional memory support to improve performance of multi-threaded
+software. TSX lets the processor expose and exploit concurrency hidden in an
+application due to dynamically avoiding unnecessary synchronization.
+
+TSX supports atomic memory transactions that are either committed (success) or
+aborted. During an abort, operations that happened within the transactional region
+are rolled back. An asynchronous abort takes place, among other options, when a
+different thread accesses a cache line that is also used within the transactional
+region when that access might lead to a data race.
+
+Immediately after an uncompleted asynchronous abort, certain speculatively
+executed loads may read data from those internal buffers and pass it to dependent
+operations. This can be then used to infer the value via a cache side channel
+attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+The victim of a malicious actor does not need to make use of TSX. Only the
+attacker needs to begin a TSX transaction and raise an asynchronous abort
+which in turn potenitally leaks data stored in the buffers.
+
+More detailed technical information is available in the TAA specific x86
+architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the TAA vulnerability can be implemented from unprivileged
+applications running on hosts or guests.
+
+As for MDS, the attacker has no control over the memory addresses that can
+be leaked. Only the victim is responsible for bringing data to the CPU. As
+a result, the malicious actor has to sample as much data as possible and
+then postprocess it to try to infer any useful information from it.
+
+A potential attacker only has read access to the data. Also, there is no direct
+privilege escalation by using this technique.
+
+
+.. _tsx_async_abort_sys_info:
+
+TAA system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current TAA status
+of mitigated systems. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - 'Vulnerable'
+ - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied.
+ * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+ - The system tries to clear the buffers but the microcode might not support the operation.
+ * - 'Mitigation: Clear CPU buffers'
+ - The microcode has been updated to clear the buffers. TSX is still enabled.
+ * - 'Mitigation: TSX disabled'
+ - TSX is disabled.
+ * - 'Not affected'
+ - The CPU is not affected by this issue.
+
+.. _ucode_needed:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the processor is vulnerable, but the availability of the microcode-based
+mitigation mechanism is not advertised via CPUID the kernel selects a best
+effort mitigation mode. This mode invokes the mitigation instructions
+without a guarantee that they clear the CPU buffers.
+
+This is done to address virtualization scenarios where the host has the
+microcode update applied, but the hypervisor is not yet updated to expose the
+CPUID to the guest. If the host has updated microcode the protection takes
+effect; otherwise a few CPU cycles are wasted pointlessly.
+
+The state in the tsx_async_abort sysfs file reflects this situation
+accordingly.
+
+
+Mitigation mechanism
+--------------------
+
+The kernel detects the affected CPUs and the presence of the microcode which is
+required. If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default.
+
+
+The mitigation can be controlled at boot time via a kernel command line option.
+See :ref:`taa_mitigation_control_command_line`.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Affected systems where the host has TAA microcode and TAA is mitigated by
+having disabled TSX previously, are not vulnerable regardless of the status
+of the VMs.
+
+In all other cases, if the host either does not have the TAA microcode or
+the kernel is not mitigated, the system might be vulnerable.
+
+
+.. _taa_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the TAA mitigations at boot time with
+the option "tsx_async_abort=". The valid arguments for this option are:
+
+ ============ =============================================================
+ off This option disables the TAA mitigation on affected platforms.
+ If the system has TSX enabled (see next parameter) and the CPU
+ is affected, the system is vulnerable.
+
+ full TAA mitigation is enabled. If TSX is enabled, on an affected
+ system it will clear CPU buffers on ring transitions. On
+ systems which are MDS-affected and deploy MDS mitigation,
+ TAA is also mitigated. Specifying this option on those
+ systems will have no effect.
+
+ full,nosmt The same as tsx_async_abort=full, with SMT disabled on
+ vulnerable CPUs that have TSX enabled. This is the complete
+ mitigation. When TSX is disabled, SMT is not disabled because
+ CPU is not vulnerable to cross-thread TAA attacks.
+ ============ =============================================================
+
+Not specifying this option is equivalent to "tsx_async_abort=full". For
+processors that are affected by both TAA and MDS, specifying just
+"tsx_async_abort=off" without an accompanying "mds=off" will have no
+effect as the same mitigation is used for both vulnerabilities.
+
+The kernel command line also allows to control the TSX feature using the
+parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
+to control the TSX feature and the enumeration of the TSX feature bits (RTM
+and HLE) in CPUID.
+
+The valid options are:
+
+ ============ =============================================================
+ off Disables TSX on the system.
+
+ Note that this option takes effect only on newer CPUs which are
+ not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1
+ and which get the new IA32_TSX_CTRL MSR through a microcode
+ update. This new MSR allows for the reliable deactivation of
+ the TSX functionality.
+
+ on Enables TSX.
+
+ Although there are mitigations for all known security
+ vulnerabilities, TSX has been known to be an accelerator for
+ several previous speculation-related CVEs, and so there may be
+ unknown security risks associated with leaving it enabled.
+
+ auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX
+ on the system.
+ ============ =============================================================
+
+Not specifying this option is equivalent to "tsx=off".
+
+The following combinations of the "tsx_async_abort" and "tsx" are possible. For
+affected platforms tsx=auto is equivalent to tsx=off and the result will be:
+
+ ========= ========================== =========================================
+ tsx=on tsx_async_abort=full The system will use VERW to clear CPU
+ buffers. Cross-thread attacks are still
+ possible on SMT machines.
+ tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT
+ mitigated.
+ tsx=on tsx_async_abort=off The system is vulnerable.
+ tsx=off tsx_async_abort=full TSX might be disabled if microcode
+ provides a TSX control MSR. If so,
+ system is not vulnerable.
+ tsx=off tsx_async_abort=full,nosmt Ditto
+ tsx=off tsx_async_abort=off ditto
+ ========= ========================== =========================================
+
+
+For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU
+buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0)
+"tsx" command line argument has no effect.
+
+For the affected platforms below table indicates the mitigation status for the
+combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO
+and TSX_CTRL_MSR.
+
+ ======= ========= ============= ========================================
+ MDS_NO MD_CLEAR TSX_CTRL_MSR Status
+ ======= ========= ============= ========================================
+ 0 0 0 Vulnerable (needs microcode)
+ 0 1 0 MDS and TAA mitigated via VERW
+ 1 1 0 MDS fixed, TAA vulnerable if TSX enabled
+ because MD_CLEAR has no meaning and
+ VERW is not guaranteed to clear buffers
+ 1 X 1 MDS fixed, TAA can be mitigated by
+ VERW or TSX_CTRL_MSR
+ ======= ========= ============= ========================================
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If all user space applications are from a trusted source and do not execute
+untrusted code which is supplied externally, then the mitigation can be
+disabled. The same applies to virtualized environments with trusted guests.
+
+
+2. Untrusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If there are untrusted applications or guests on the system, enabling TSX
+might allow a malicious actor to leak data from the host or from other
+processes running on the same physical core.
+
+If the microcode is available and the TSX is disabled on the host, attacks
+are prevented in a virtualized environment as well, even if the VMs do not
+explicitly enable the mitigation.
+
+
+.. _taa_default_mitigations:
+
+Default mitigations
+-------------------
+
+The kernel's default action for vulnerable processors is:
+
+ - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off).
diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst
index 5d63b18bd6d1..4f0462af3ca7 100644
--- a/Documentation/admin-guide/iostats.rst
+++ b/Documentation/admin-guide/iostats.rst
@@ -121,6 +121,15 @@ Field 15 -- # of milliseconds spent discarding
This is the total number of milliseconds spent by all discards (as
measured from __make_request() to end_that_request_last()).
+Field 16 -- # of flush requests completed
+ This is the total number of flush requests completed successfully.
+
+ Block layer combines flush requests and executes at most one at a time.
+ This counts flush requests executed by disk. Not tracked for partitions.
+
+Field 17 -- # of milliseconds spent flushing
+ This is the total number of milliseconds spent by all flush requests.
+
To avoid introducing performance bottlenecks, no locks are held while
modifying these counters. This implies that minor inaccuracies may be
introduced when changes collide, so (for instance) adding up all the
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a84a83f8881e..1fda057434c3 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1168,7 +1168,8 @@
Format: {"off" | "on" | "skip[mbr]"}
efi= [EFI]
- Format: { "old_map", "nochunk", "noruntime", "debug" }
+ Format: { "old_map", "nochunk", "noruntime", "debug",
+ "nosoftreserve" }
old_map [X86-64]: switch to the old ioremap-based EFI
runtime services mapping. 32-bit still uses this one by
default.
@@ -1177,6 +1178,12 @@
firmware implementations.
noruntime : disable EFI runtime services support
debug: enable misc debug output
+ nosoftreserve: The EFI_MEMORY_SP (Specific Purpose)
+ attribute may cause the kernel to reserve the
+ memory range for a memory mapping driver to
+ claim. Specify efi=nosoftreserve to disable this
+ reservation and treat the memory by its base type
+ (i.e. EFI_CONVENTIONAL_MEMORY / "System RAM").
efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of
@@ -1189,15 +1196,21 @@
updating original EFI memory map.
Region of memory which aa attribute is added to is
from ss to ss+nn.
+
If efi_fake_mem=2G@4G:0x10000,2G@0x10a0000000:0x10000
is specified, EFI_MEMORY_MORE_RELIABLE(0x10000)
attribute is added to range 0x100000000-0x180000000 and
0x10a0000000-0x1120000000.
+ If efi_fake_mem=8G@9G:0x40000 is specified, the
+ EFI_MEMORY_SP(0x40000) attribute is added to
+ range 0x240000000-0x43fffffff.
+
Using this parameter you can do debugging of EFI memmap
- related feature. For example, you can do debugging of
+ related features. For example, you can do debugging of
Address Range Mirroring feature even if your box
- doesn't support it.
+ doesn't support it, or mark specific memory as
+ "soft reserved".
efivar_ssdt= [EFI; X86] Name of an EFI variable that contains an SSDT
that is to be dynamically loaded by Linux. If there are
@@ -2055,6 +2068,25 @@
KVM MMU at runtime.
Default is 0 (off)
+ kvm.nx_huge_pages=
+ [KVM] Controls the software workaround for the
+ X86_BUG_ITLB_MULTIHIT bug.
+ force : Always deploy workaround.
+ off : Never deploy workaround.
+ auto : Deploy workaround based on the presence of
+ X86_BUG_ITLB_MULTIHIT.
+
+ Default is 'auto'.
+
+ If the software workaround is enabled for the host,
+ guests do need not to enable it for nested guests.
+
+ kvm.nx_huge_pages_recovery_ratio=
+ [KVM] Controls how many 4KiB pages are periodically zapped
+ back to huge pages. 0 disables the recovery, otherwise if
+ the value is N KVM will zap 1/Nth of the 4KiB pages every
+ minute. The default is 60.
+
kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
Default is 1 (enabled)
@@ -2454,6 +2486,12 @@
SMT on vulnerable CPUs
off - Unconditionally disable MDS mitigation
+ On TAA-affected machines, mds=off can be prevented by
+ an active TAA mitigation as both vulnerabilities are
+ mitigated with the same mechanism so in order to disable
+ this mitigation, you need to specify tsx_async_abort=off
+ too.
+
Not specifying this option is equivalent to
mds=full.
@@ -2636,6 +2674,13 @@
ssbd=force-off [ARM64]
l1tf=off [X86]
mds=off [X86]
+ tsx_async_abort=off [X86]
+ kvm.nx_huge_pages=off [X86]
+
+ Exceptions:
+ This does not have any effect on
+ kvm.nx_huge_pages when
+ kvm.nx_huge_pages=force.
auto (default)
Mitigate all CPU vulnerabilities, but leave SMT
@@ -2651,6 +2696,7 @@
be fully mitigated, even if it means losing SMT.
Equivalent to: l1tf=flush,nosmt [X86]
mds=full,nosmt [X86]
+ tsx_async_abort=full,nosmt [X86]
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
@@ -3083,9 +3129,9 @@
[X86,PV_OPS] Disable paravirtualized VMware scheduler
clock and use the default one.
- no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting.
- steal time is computed, but won't influence scheduler
- behaviour
+ no-steal-acc [X86,KVM,ARM64] Disable paravirtualized steal time
+ accounting. steal time is computed, but won't
+ influence scheduler behaviour
nolapic [X86-32,APIC] Do not enable or use the local APIC.
@@ -4848,6 +4894,76 @@
interruptions from clocksource watchdog are not
acceptable).
+ tsx= [X86] Control Transactional Synchronization
+ Extensions (TSX) feature in Intel processors that
+ support TSX control.
+
+ This parameter controls the TSX feature. The options are:
+
+ on - Enable TSX on the system. Although there are
+ mitigations for all known security vulnerabilities,
+ TSX has been known to be an accelerator for
+ several previous speculation-related CVEs, and
+ so there may be unknown security risks associated
+ with leaving it enabled.
+
+ off - Disable TSX on the system. (Note that this
+ option takes effect only on newer CPUs which are
+ not vulnerable to MDS, i.e., have
+ MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get
+ the new IA32_TSX_CTRL MSR through a microcode
+ update. This new MSR allows for the reliable
+ deactivation of the TSX functionality.)
+
+ auto - Disable TSX if X86_BUG_TAA is present,
+ otherwise enable TSX on the system.
+
+ Not specifying this option is equivalent to tsx=off.
+
+ See Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
+ for more details.
+
+ tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async
+ Abort (TAA) vulnerability.
+
+ Similar to Micro-architectural Data Sampling (MDS)
+ certain CPUs that support Transactional
+ Synchronization Extensions (TSX) are vulnerable to an
+ exploit against CPU internal buffers which can forward
+ information to a disclosure gadget under certain
+ conditions.
+
+ In vulnerable processors, the speculatively forwarded
+ data can be used in a cache side channel attack, to
+ access data to which the attacker does not have direct
+ access.
+
+ This parameter controls the TAA mitigation. The
+ options are:
+
+ full - Enable TAA mitigation on vulnerable CPUs
+ if TSX is enabled.
+
+ full,nosmt - Enable TAA mitigation and disable SMT on
+ vulnerable CPUs. If TSX is disabled, SMT
+ is not disabled because CPU is not
+ vulnerable to cross-thread TAA attacks.
+ off - Unconditionally disable TAA mitigation
+
+ On MDS-affected machines, tsx_async_abort=off can be
+ prevented by an active MDS mitigation as both vulnerabilities
+ are mitigated with the same mechanism so in order to disable
+ this mitigation, you need to specify mds=off too.
+
+ Not specifying this option is equivalent to
+ tsx_async_abort=full. On CPUs which are MDS affected
+ and deploy MDS mitigation, TAA mitigation is not
+ required and doesn't provide any additional
+ mitigation.
+
+ For details see:
+ Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
+
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
Format:
@@ -4998,13 +5114,13 @@
Flags is a set of characters, each corresponding
to a common usb-storage quirk flag as follows:
a = SANE_SENSE (collect more than 18 bytes
- of sense data);
+ of sense data, not on uas);
b = BAD_SENSE (don't collect more than 18
- bytes of sense data);
+ bytes of sense data, not on uas);
c = FIX_CAPACITY (decrease the reported
device capacity by one sector);
d = NO_READ_DISC_INFO (don't use
- READ_DISC_INFO command);
+ READ_DISC_INFO command, not on uas);
e = NO_READ_CAPACITY_16 (don't use
READ_CAPACITY_16 command);
f = NO_REPORT_OPCODES (don't use report opcodes
@@ -5019,17 +5135,18 @@
j = NO_REPORT_LUNS (don't use report luns
command, uas only);
l = NOT_LOCKABLE (don't try to lock and
- unlock ejectable media);
+ unlock ejectable media, not on uas);
m = MAX_SECTORS_64 (don't transfer more
- than 64 sectors = 32 KB at a time);
+ than 64 sectors = 32 KB at a time,
+ not on uas);
n = INITIAL_READ10 (force a retry of the
- initial READ(10) command);
+ initial READ(10) command, not on uas);
o = CAPACITY_OK (accept the capacity
- reported by the device);
+ reported by the device, not on uas);
p = WRITE_CACHE (the device cache is ON
- by default);
+ by default, not on uas);
r = IGNORE_RESIDUE (the device reports
- bogus residue values);
+ bogus residue values, not on uas);
s = SINGLE_LUN (the device has only one
Logical Unit);
t = NO_ATA_1X (don't allow ATA(12) and ATA(16)
@@ -5038,7 +5155,8 @@
w = NO_WP_DETECT (don't test whether the
medium is write-protected).
y = ALWAYS_SYNC (issue a SYNCHRONIZE_CACHE
- even if the device claims no cache)
+ even if the device claims no cache,
+ not on uas)
Example: quirks=0419:aaf5:rl,0421:0433:rc
user_debug= [KNL,ARM]
diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst
index 517a205abad6..90056e4e8859 100644
--- a/Documentation/admin-guide/perf/imx-ddr.rst
+++ b/Documentation/admin-guide/perf/imx-ddr.rst
@@ -17,7 +17,8 @@ The "format" directory describes format of the config (event ID) and config1
(AXI filtering) fields of the perf_event_attr structure, see /sys/bus/event_source/
devices/imx8_ddr0/format/. The "events" directory describes the events types
hardware supported that can be used with perf tool, see /sys/bus/event_source/
-devices/imx8_ddr0/events/.
+devices/imx8_ddr0/events/. The "caps" directory describes filter features implemented
+in DDR PMU, see /sys/bus/events_source/devices/imx8_ddr0/caps/.
e.g.::
perf stat -a -e imx8_ddr0/cycles/ cmd
perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd
@@ -25,9 +26,12 @@ devices/imx8_ddr0/events/.
AXI filtering is only used by CSV modes 0x41 (axid-read) and 0x42 (axid-write)
to count reading or writing matches filter setting. Filter setting is various
from different DRAM controller implementations, which is distinguished by quirks
-in the driver.
+in the driver. You also can dump info from userspace, filter in "caps" directory
+indicates whether PMU supports AXI ID filter or not; enhanced_filter indicates
+whether PMU supports enhanced AXI ID filter or not. Value 0 for un-supported, and
+value 1 for supported.
-* With DDR_CAP_AXI_ID_FILTER quirk.
+* With DDR_CAP_AXI_ID_FILTER quirk(filter: 1, enhanced_filter: 0).
Filter is defined with two configuration parts:
--AXI_ID defines AxID matching value.
--AXI_MASKING defines which bits of AxID are meaningful for the matching.
@@ -50,3 +54,8 @@ in the driver.
axi_id to monitor a specific id, rather than having to specify axi_mask.
e.g.::
perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12
+
+* With DDR_CAP_AXI_ID_FILTER_ENHANCED quirk(filter: 1, enhanced_filter: 1).
+ This is an extension to the DDR_CAP_AXI_ID_FILTER quirk which permits
+ counting the number of bytes (as opposed to the number of bursts) from DDR
+ read and write transactions concurrently with another set of data counters.
diff --git a/Documentation/admin-guide/perf/thunderx2-pmu.rst b/Documentation/admin-guide/perf/thunderx2-pmu.rst
index 08e33675853a..01f158238ae1 100644
--- a/Documentation/admin-guide/perf/thunderx2-pmu.rst
+++ b/Documentation/admin-guide/perf/thunderx2-pmu.rst
@@ -3,24 +3,26 @@ Cavium ThunderX2 SoC Performance Monitoring Unit (PMU UNCORE)
=============================================================
The ThunderX2 SoC PMU consists of independent, system-wide, per-socket
-PMUs such as the Level 3 Cache (L3C) and DDR4 Memory Controller (DMC).
+PMUs such as the Level 3 Cache (L3C), DDR4 Memory Controller (DMC) and
+Cavium Coherent Processor Interconnect (CCPI2).
The DMC has 8 interleaved channels and the L3C has 16 interleaved tiles.
Events are counted for the default channel (i.e. channel 0) and prorated
to the total number of channels/tiles.
-The DMC and L3C support up to 4 counters. Counters are independently
-programmable and can be started and stopped individually. Each counter
-can be set to a different event. Counters are 32-bit and do not support
-an overflow interrupt; they are read every 2 seconds.
+The DMC and L3C support up to 4 counters, while the CCPI2 supports up to 8
+counters. Counters are independently programmable to different events and
+can be started and stopped individually. None of the counters support an
+overflow interrupt. DMC and L3C counters are 32-bit and read every 2 seconds.
+The CCPI2 counters are 64-bit and assumed not to overflow in normal operation.
PMU UNCORE (perf) driver:
The thunderx2_pmu driver registers per-socket perf PMUs for the DMC and
-L3C devices. Each PMU can be used to count up to 4 events
-simultaneously. The PMUs provide a description of their available events
-and configuration options under sysfs, see
-/sys/devices/uncore_<l3c_S/dmc_S/>; S is the socket id.
+L3C devices. Each PMU can be used to count up to 4 (DMC/L3C) or up to 8
+(CCPI2) events simultaneously. The PMUs provide a description of their
+available events and configuration options under sysfs, see
+/sys/devices/uncore_<l3c_S/dmc_S/ccpi2_S/>; S is the socket id.
The driver does not support sampling, therefore "perf record" will not
work. Per-task perf sessions are also not supported.
diff --git a/Documentation/admin-guide/ras.rst b/Documentation/admin-guide/ras.rst
index 2b20f5f7380d..0310db624964 100644
--- a/Documentation/admin-guide/ras.rst
+++ b/Documentation/admin-guide/ras.rst
@@ -330,9 +330,12 @@ There can be multiple csrows and multiple channels.
.. [#f4] Nowadays, the term DIMM (Dual In-line Memory Module) is widely
used to refer to a memory module, although there are other memory
- packaging alternatives, like SO-DIMM, SIMM, etc. Along this document,
- and inside the EDAC system, the term "dimm" is used for all memory
- modules, even when they use a different kind of packaging.
+ packaging alternatives, like SO-DIMM, SIMM, etc. The UEFI
+ specification (Version 2.7) defines a memory module in the Common
+ Platform Error Record (CPER) section to be an SMBIOS Memory Device
+ (Type 17). Along this document, and inside the EDAC subsystem, the term
+ "dimm" is used for all memory modules, even when they use a
+ different kind of packaging.
Memory controllers allow for several csrows, with 8 csrows being a
typical value. Yet, the actual number of csrows depends on the layout of
@@ -349,12 +352,14 @@ controllers. The following example will assume 2 channels:
| | ``ch0`` | ``ch1`` |
+============+===========+===========+
| ``csrow0`` | DIMM_A0 | DIMM_B0 |
- +------------+ | |
- | ``csrow1`` | | |
+ | | rank0 | rank0 |
+ +------------+ - | - |
+ | ``csrow1`` | rank1 | rank1 |
+------------+-----------+-----------+
| ``csrow2`` | DIMM_A1 | DIMM_B1 |
- +------------+ | |
- | ``csrow3`` | | |
+ | | rank0 | rank0 |
+ +------------+ - | - |
+ | ``csrow3`` | rank1 | rank1 |
+------------+-----------+-----------+
In the above example, there are 4 physical slots on the motherboard
@@ -374,11 +379,13 @@ which the memory DIMM is placed. Thus, when 1 DIMM is placed in each
Channel, the csrows cross both DIMMs.
Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
-Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
-will have just one csrow (csrow0). csrow1 will be empty. On the other
-hand, when 2 dual ranked DIMMs are similarly placed, then both csrow0
-and csrow1 will be populated. The pattern repeats itself for csrow2 and
-csrow3.
+In the example above 2 dual ranked DIMMs are similarly placed. Thus,
+both csrow0 and csrow1 are populated. On the other hand, when 2 single
+ranked DIMMs are placed in slots DIMM_A0 and DIMM_B0, then they will
+have just one csrow (csrow0) and csrow1 will be empty. The pattern
+repeats itself for csrow2 and csrow3. Also note that some memory
+controllers don't have any logic to identify the memory module, see
+``rankX`` directories below.
The representation of the above is reflected in the directory
tree in EDAC's sysfs interface. Starting in directory
diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst
index d3f3a60fbf25..5d78a6f5b0ae 100644
--- a/Documentation/arm64/booting.rst
+++ b/Documentation/arm64/booting.rst
@@ -213,6 +213,9 @@ Before jumping into the kernel, the following conditions must be met:
- ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
- ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1.
+ - ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across
+ all CPUs the kernel is executing on, and must stay constant
+ for the lifetime of the kernel.
- If the kernel is entered at EL1:
diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst
index 2955287e9acc..b6e44884e3ad 100644
--- a/Documentation/arm64/cpu-feature-registers.rst
+++ b/Documentation/arm64/cpu-feature-registers.rst
@@ -168,8 +168,15 @@ infrastructure:
+------------------------------+---------+---------+
- 3) MIDR_EL1 - Main ID Register
+ 3) ID_AA64PFR1_EL1 - Processor Feature Register 1
+ +------------------------------+---------+---------+
+ | Name | bits | visible |
+ +------------------------------+---------+---------+
+ | SSBS | [7-4] | y |
+ +------------------------------+---------+---------+
+
+ 4) MIDR_EL1 - Main ID Register
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
@@ -188,11 +195,15 @@ infrastructure:
as available on the CPU where it is fetched and is not a system
wide safe value.
- 4) ID_AA64ISAR1_EL1 - Instruction set attribute register 1
+ 5) ID_AA64ISAR1_EL1 - Instruction set attribute register 1
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | SB | [39-36] | y |
+ +------------------------------+---------+---------+
+ | FRINTTS | [35-32] | y |
+ +------------------------------+---------+---------+
| GPI | [31-28] | y |
+------------------------------+---------+---------+
| GPA | [27-24] | y |
@@ -210,7 +221,7 @@ infrastructure:
| DPB | [3-0] | y |
+------------------------------+---------+---------+
- 5) ID_AA64MMFR2_EL1 - Memory model feature register 2
+ 6) ID_AA64MMFR2_EL1 - Memory model feature register 2
+------------------------------+---------+---------+
| Name | bits | visible |
@@ -218,7 +229,7 @@ infrastructure:
| AT | [35-32] | y |
+------------------------------+---------+---------+
- 6) ID_AA64ZFR0_EL1 - SVE feature ID register 0
+ 7) ID_AA64ZFR0_EL1 - SVE feature ID register 0
+------------------------------+---------+---------+
| Name | bits | visible |
diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst
index 91f79529c58c..7fa3d215ae6a 100644
--- a/Documentation/arm64/elf_hwcaps.rst
+++ b/Documentation/arm64/elf_hwcaps.rst
@@ -119,10 +119,6 @@ HWCAP_LRCPC
HWCAP_DCPOP
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001.
-HWCAP2_DCPODP
-
- Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
-
HWCAP_SHA3
Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001.
@@ -141,30 +137,6 @@ HWCAP_SHA512
HWCAP_SVE
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
-HWCAP2_SVE2
-
- Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
-
-HWCAP2_SVEAES
-
- Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
-
-HWCAP2_SVEPMULL
-
- Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
-
-HWCAP2_SVEBITPERM
-
- Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
-
-HWCAP2_SVESHA3
-
- Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
-
-HWCAP2_SVESM4
-
- Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
-
HWCAP_ASIMDFHM
Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
@@ -180,13 +152,12 @@ HWCAP_ILRCPC
HWCAP_FLAGM
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
-HWCAP2_FLAGM2
-
- Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010.
-
HWCAP_SSBS
Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
+HWCAP_SB
+ Functionality implied by ID_AA64ISAR1_EL1.SB == 0b0001.
+
HWCAP_PACA
Functionality implied by ID_AA64ISAR1_EL1.APA == 0b0001 or
ID_AA64ISAR1_EL1.API == 0b0001, as described by
@@ -197,6 +168,38 @@ HWCAP_PACG
ID_AA64ISAR1_EL1.GPI == 0b0001, as described by
Documentation/arm64/pointer-authentication.rst.
+HWCAP2_DCPODP
+
+ Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
+
+HWCAP2_SVE2
+
+ Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
+
+HWCAP2_SVEAES
+
+ Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
+
+HWCAP2_SVEPMULL
+
+ Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
+
+HWCAP2_SVEBITPERM
+
+ Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
+
+HWCAP2_SVESHA3
+
+ Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
+
+HWCAP2_SVESM4
+
+ Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
+
+HWCAP2_FLAGM2
+
+ Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010.
+
HWCAP2_FRINT
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 5a09661330fc..99b2545455ff 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -70,8 +70,12 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #853709 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
@@ -88,6 +92,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst
new file mode 100644
index 000000000000..f55c2bb74d00
--- /dev/null
+++ b/Documentation/asm-annotations.rst
@@ -0,0 +1,216 @@
+Assembler Annotations
+=====================
+
+Copyright (c) 2017-2019 Jiri Slaby
+
+This document describes the new macros for annotation of data and code in
+assembly. In particular, it contains information about ``SYM_FUNC_START``,
+``SYM_FUNC_END``, ``SYM_CODE_START``, and similar.
+
+Rationale
+---------
+Some code like entries, trampolines, or boot code needs to be written in
+assembly. The same as in C, such code is grouped into functions and
+accompanied with data. Standard assemblers do not force users into precisely
+marking these pieces as code, data, or even specifying their length.
+Nevertheless, assemblers provide developers with such annotations to aid
+debuggers throughout assembly. On top of that, developers also want to mark
+some functions as *global* in order to be visible outside of their translation
+units.
+
+Over time, the Linux kernel has adopted macros from various projects (like
+``binutils``) to facilitate such annotations. So for historic reasons,
+developers have been using ``ENTRY``, ``END``, ``ENDPROC``, and other
+annotations in assembly. Due to the lack of their documentation, the macros
+are used in rather wrong contexts at some locations. Clearly, ``ENTRY`` was
+intended to denote the beginning of global symbols (be it data or code).
+``END`` used to mark the end of data or end of special functions with
+*non-standard* calling convention. In contrast, ``ENDPROC`` should annotate
+only ends of *standard* functions.
+
+When these macros are used correctly, they help assemblers generate a nice
+object with both sizes and types set correctly. For example, the result of
+``arch/x86/lib/putuser.S``::
+
+ Num: Value Size Type Bind Vis Ndx Name
+ 25: 0000000000000000 33 FUNC GLOBAL DEFAULT 1 __put_user_1
+ 29: 0000000000000030 37 FUNC GLOBAL DEFAULT 1 __put_user_2
+ 32: 0000000000000060 36 FUNC GLOBAL DEFAULT 1 __put_user_4
+ 35: 0000000000000090 37 FUNC GLOBAL DEFAULT 1 __put_user_8
+
+This is not only important for debugging purposes. When there are properly
+annotated objects like this, tools can be run on them to generate more useful
+information. In particular, on properly annotated objects, ``objtool`` can be
+run to check and fix the object if needed. Currently, ``objtool`` can report
+missing frame pointer setup/destruction in functions. It can also
+automatically generate annotations for :doc:`ORC unwinder <x86/orc-unwinder>`
+for most code. Both of these are especially important to support reliable
+stack traces which are in turn necessary for :doc:`Kernel live patching
+<livepatch/livepatch>`.
+
+Caveat and Discussion
+---------------------
+As one might realize, there were only three macros previously. That is indeed
+insufficient to cover all the combinations of cases:
+
+* standard/non-standard function
+* code/data
+* global/local symbol
+
+There was a discussion_ and instead of extending the current ``ENTRY/END*``
+macros, it was decided that brand new macros should be introduced instead::
+
+ So how about using macro names that actually show the purpose, instead
+ of importing all the crappy, historic, essentially randomly chosen
+ debug symbol macro names from the binutils and older kernels?
+
+.. _discussion: https://lkml.kernel.org/r/20170217104757.28588-1-jslaby@suse.cz
+
+Macros Description
+------------------
+
+The new macros are prefixed with the ``SYM_`` prefix and can be divided into
+three main groups:
+
+1. ``SYM_FUNC_*`` -- to annotate C-like functions. This means functions with
+ standard C calling conventions, i.e. the stack contains a return address at
+ the predefined place and a return from the function can happen in a
+ standard way. When frame pointers are enabled, save/restore of frame
+ pointer shall happen at the start/end of a function, respectively, too.
+
+ Checking tools like ``objtool`` should ensure such marked functions conform
+ to these rules. The tools can also easily annotate these functions with
+ debugging information (like *ORC data*) automatically.
+
+2. ``SYM_CODE_*`` -- special functions called with special stack. Be it
+ interrupt handlers with special stack content, trampolines, or startup
+ functions.
+
+ Checking tools mostly ignore checking of these functions. But some debug
+ information still can be generated automatically. For correct debug data,
+ this code needs hints like ``UNWIND_HINT_REGS`` provided by developers.
+
+3. ``SYM_DATA*`` -- obviously data belonging to ``.data`` sections and not to
+ ``.text``. Data do not contain instructions, so they have to be treated
+ specially by the tools: they should not treat the bytes as instructions,
+ nor assign any debug information to them.
+
+Instruction Macros
+~~~~~~~~~~~~~~~~~~
+This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
+
+* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
+ most frequent markings**. They are used for functions with standard calling
+ conventions -- global and local. Like in C, they both align the functions to
+ architecture specific ``__ALIGN`` bytes. There are also ``_NOALIGN`` variants
+ for special cases where developers do not want this implicit alignment.
+
+ ``SYM_FUNC_START_WEAK`` and ``SYM_FUNC_START_WEAK_NOALIGN`` markings are
+ also offered as an assembler counterpart to the *weak* attribute known from
+ C.
+
+ All of these **shall** be coupled with ``SYM_FUNC_END``. First, it marks
+ the sequence of instructions as a function and computes its size to the
+ generated object file. Second, it also eases checking and processing such
+ object files as the tools can trivially find exact function boundaries.
+
+ So in most cases, developers should write something like in the following
+ example, having some asm instructions in between the macros, of course::
+
+ SYM_FUNC_START(memset)
+ ... asm insns ...
+ SYM_FUNC_END(memset)
+
+ In fact, this kind of annotation corresponds to the now deprecated ``ENTRY``
+ and ``ENDPROC`` macros.
+
+* ``SYM_FUNC_START_ALIAS`` and ``SYM_FUNC_START_LOCAL_ALIAS`` serve for those
+ who decided to have two or more names for one function. The typical use is::
+
+ SYM_FUNC_START_ALIAS(__memset)
+ SYM_FUNC_START(memset)
+ ... asm insns ...
+ SYM_FUNC_END(memset)
+ SYM_FUNC_END_ALIAS(__memset)
+
+ In this example, one can call ``__memset`` or ``memset`` with the same
+ result, except the debug information for the instructions is generated to
+ the object file only once -- for the non-``ALIAS`` case.
+
+* ``SYM_CODE_START`` and ``SYM_CODE_START_LOCAL`` should be used only in
+ special cases -- if you know what you are doing. This is used exclusively
+ for interrupt handlers and similar where the calling convention is not the C
+ one. ``_NOALIGN`` variants exist too. The use is the same as for the ``FUNC``
+ category above::
+
+ SYM_CODE_START_LOCAL(bad_put_user)
+ ... asm insns ...
+ SYM_CODE_END(bad_put_user)
+
+ Again, every ``SYM_CODE_START*`` **shall** be coupled by ``SYM_CODE_END``.
+
+ To some extent, this category corresponds to deprecated ``ENTRY`` and
+ ``END``. Except ``END`` had several other meanings too.
+
+* ``SYM_INNER_LABEL*`` is used to denote a label inside some
+ ``SYM_{CODE,FUNC}_START`` and ``SYM_{CODE,FUNC}_END``. They are very similar
+ to C labels, except they can be made global. An example of use::
+
+ SYM_CODE_START(ftrace_caller)
+ /* save_mcount_regs fills in first two parameters */
+ ...
+
+ SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
+ /* Load the ftrace_ops into the 3rd parameter */
+ ...
+
+ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
+ call ftrace_stub
+ ...
+ retq
+ SYM_CODE_END(ftrace_caller)
+
+Data Macros
+~~~~~~~~~~~
+Similar to instructions, there is a couple of macros to describe data in the
+assembly.
+
+* ``SYM_DATA_START`` and ``SYM_DATA_START_LOCAL`` mark the start of some data
+ and shall be used in conjunction with either ``SYM_DATA_END``, or
+ ``SYM_DATA_END_LABEL``. The latter adds also a label to the end, so that
+ people can use ``lstack`` and (local) ``lstack_end`` in the following
+ example::
+
+ SYM_DATA_START_LOCAL(lstack)
+ .skip 4096
+ SYM_DATA_END_LABEL(lstack, SYM_L_LOCAL, lstack_end)
+
+* ``SYM_DATA`` and ``SYM_DATA_LOCAL`` are variants for simple, mostly one-line
+ data::
+
+ SYM_DATA(HEAP, .long rm_heap)
+ SYM_DATA(heap_end, .long rm_stack)
+
+ In the end, they expand to ``SYM_DATA_START`` with ``SYM_DATA_END``
+ internally.
+
+Support Macros
+~~~~~~~~~~~~~~
+All the above reduce themselves to some invocation of ``SYM_START``,
+``SYM_END``, or ``SYM_ENTRY`` at last. Normally, developers should avoid using
+these.
+
+Further, in the above examples, one could see ``SYM_L_LOCAL``. There are also
+``SYM_L_GLOBAL`` and ``SYM_L_WEAK``. All are intended to denote linkage of a
+symbol marked by them. They are used either in ``_LABEL`` variants of the
+earlier macros, or in ``SYM_START``.
+
+
+Overriding Macros
+~~~~~~~~~~~~~~~~~
+Architecture can also override any of the macros in their own
+``asm/linkage.h``, including macros specifying the type of a symbol
+(``SYM_T_FUNC``, ``SYM_T_OBJECT``, and ``SYM_T_NONE``). As every macro
+described in this file is surrounded by ``#ifdef`` + ``#endif``, it is enough
+to define the macros differently in the aforementioned architecture-dependent
+header.
diff --git a/Documentation/block/stat.rst b/Documentation/block/stat.rst
index 9c07bc22b0bc..77311335c08b 100644
--- a/Documentation/block/stat.rst
+++ b/Documentation/block/stat.rst
@@ -41,6 +41,8 @@ discard I/Os requests number of discard I/Os processed
discard merges requests number of discard I/Os merged with in-queue I/O
discard sectors sectors number of sectors discarded
discard ticks milliseconds total wait time for discard requests
+flush I/Os requests number of flush I/Os processed
+flush ticks milliseconds total wait time for flush requests
=============== ============= =================================================
read I/Os, write I/Os, discard I/0s
@@ -48,6 +50,14 @@ read I/Os, write I/Os, discard I/0s
These values increment when an I/O request completes.
+flush I/Os
+==========
+
+These values increment when an flush I/O request completes.
+
+Block layer combines flush requests and executes at most one at a time.
+This counts flush requests executed by disk. Not tracked for partitions.
+
read merges, write merges, discard merges
=========================================
@@ -62,8 +72,8 @@ discarded from this block device. The "sectors" in question are the
standard UNIX 512-byte sectors, not any device- or filesystem-specific
block size. The counters are incremented when the I/O completes.
-read ticks, write ticks, discard ticks
-======================================
+read ticks, write ticks, discard ticks, flush ticks
+===================================================
These values count the number of milliseconds that I/O requests have
waited on this block device. If there are multiple I/O requests waiting,
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 801a6ed3f2e5..4f5410b61441 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -47,6 +47,15 @@ Program types
prog_flow_dissector
+Testing BPF
+===========
+
+.. toctree::
+ :maxdepth: 1
+
+ s390
+
+
.. Links:
.. _Documentation/networking/filter.txt: ../networking/filter.txt
.. _man-pages: https://www.kernel.org/doc/man-pages/
diff --git a/Documentation/bpf/prog_flow_dissector.rst b/Documentation/bpf/prog_flow_dissector.rst
index a78bf036cadd..4d86780ab0f1 100644
--- a/Documentation/bpf/prog_flow_dissector.rst
+++ b/Documentation/bpf/prog_flow_dissector.rst
@@ -142,3 +142,6 @@ BPF flow dissector doesn't support exporting all the metadata that in-kernel
C-based implementation can export. Notable example is single VLAN (802.1Q)
and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
for a set of information that's currently can be exported from the BPF context.
+
+When BPF flow dissector is attached to the root network namespace (machine-wide
+policy), users can't override it in their child network namespaces.
diff --git a/Documentation/bpf/s390.rst b/Documentation/bpf/s390.rst
new file mode 100644
index 000000000000..21ecb309daea
--- /dev/null
+++ b/Documentation/bpf/s390.rst
@@ -0,0 +1,205 @@
+===================
+Testing BPF on s390
+===================
+
+1. Introduction
+***************
+
+IBM Z are mainframe computers, which are descendants of IBM System/360 from
+year 1964. They are supported by the Linux kernel under the name "s390". This
+document describes how to test BPF in an s390 QEMU guest.
+
+2. One-time setup
+*****************
+
+The following is required to build and run the test suite:
+
+ * s390 GCC
+ * s390 development headers and libraries
+ * Clang with BPF support
+ * QEMU with s390 support
+ * Disk image with s390 rootfs
+
+Debian supports installing compiler and libraries for s390 out of the box.
+Users of other distros may use debootstrap in order to set up a Debian chroot::
+
+ sudo debootstrap \
+ --variant=minbase \
+ --include=sudo \
+ testing \
+ ./s390-toolchain
+ sudo mount --rbind /dev ./s390-toolchain/dev
+ sudo mount --rbind /proc ./s390-toolchain/proc
+ sudo mount --rbind /sys ./s390-toolchain/sys
+ sudo chroot ./s390-toolchain
+
+Once on Debian, the build prerequisites can be installed as follows::
+
+ sudo dpkg --add-architecture s390x
+ sudo apt-get update
+ sudo apt-get install \
+ bc \
+ bison \
+ cmake \
+ debootstrap \
+ dwarves \
+ flex \
+ g++ \
+ gcc \
+ g++-s390x-linux-gnu \
+ gcc-s390x-linux-gnu \
+ gdb-multiarch \
+ git \
+ make \
+ python3 \
+ qemu-system-misc \
+ qemu-utils \
+ rsync \
+ libcap-dev:s390x \
+ libelf-dev:s390x \
+ libncurses-dev
+
+Latest Clang targeting BPF can be installed as follows::
+
+ git clone https://github.com/llvm/llvm-project.git
+ ln -s ../../clang llvm-project/llvm/tools/
+ mkdir llvm-project-build
+ cd llvm-project-build
+ cmake \
+ -DLLVM_TARGETS_TO_BUILD=BPF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_INSTALL_PREFIX=/opt/clang-bpf \
+ ../llvm-project/llvm
+ make
+ sudo make install
+ export PATH=/opt/clang-bpf/bin:$PATH
+
+The disk image can be prepared using a loopback mount and debootstrap::
+
+ qemu-img create -f raw ./s390.img 1G
+ sudo losetup -f ./s390.img
+ sudo mkfs.ext4 /dev/loopX
+ mkdir ./s390.rootfs
+ sudo mount /dev/loopX ./s390.rootfs
+ sudo debootstrap \
+ --foreign \
+ --arch=s390x \
+ --variant=minbase \
+ --include=" \
+ iproute2, \
+ iputils-ping, \
+ isc-dhcp-client, \
+ kmod, \
+ libcap2, \
+ libelf1, \
+ netcat, \
+ procps" \
+ testing \
+ ./s390.rootfs
+ sudo umount ./s390.rootfs
+ sudo losetup -d /dev/loopX
+
+3. Compilation
+**************
+
+In addition to the usual Kconfig options required to run the BPF test suite, it
+is also helpful to select::
+
+ CONFIG_NET_9P=y
+ CONFIG_9P_FS=y
+ CONFIG_NET_9P_VIRTIO=y
+ CONFIG_VIRTIO_PCI=y
+
+as that would enable a very easy way to share files with the s390 virtual
+machine.
+
+Compiling kernel, modules and testsuite, as well as preparing gdb scripts to
+simplify debugging, can be done using the following commands::
+
+ make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- menuconfig
+ make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- bzImage modules scripts_gdb
+ make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- \
+ -C tools/testing/selftests \
+ TARGETS=bpf \
+ INSTALL_PATH=$PWD/tools/testing/selftests/kselftest_install \
+ install
+
+4. Running the test suite
+*************************
+
+The virtual machine can be started as follows::
+
+ qemu-system-s390x \
+ -cpu max,zpci=on \
+ -smp 2 \
+ -m 4G \
+ -kernel linux/arch/s390/boot/compressed/vmlinux \
+ -drive file=./s390.img,if=virtio,format=raw \
+ -nographic \
+ -append 'root=/dev/vda rw console=ttyS1' \
+ -virtfs local,path=./linux,security_model=none,mount_tag=linux \
+ -object rng-random,filename=/dev/urandom,id=rng0 \
+ -device virtio-rng-ccw,rng=rng0 \
+ -netdev user,id=net0 \
+ -device virtio-net-ccw,netdev=net0
+
+When using this on a real IBM Z, ``-enable-kvm`` may be added for better
+performance. When starting the virtual machine for the first time, disk image
+setup must be finalized using the following command::
+
+ /debootstrap/debootstrap --second-stage
+
+Directory with the code built on the host as well as ``/proc`` and ``/sys``
+need to be mounted as follows::
+
+ mkdir -p /linux
+ mount -t 9p linux /linux
+ mount -t proc proc /proc
+ mount -t sysfs sys /sys
+
+After that, the test suite can be run using the following commands::
+
+ cd /linux/tools/testing/selftests/kselftest_install
+ ./run_kselftest.sh
+
+As usual, tests can be also run individually::
+
+ cd /linux/tools/testing/selftests/bpf
+ ./test_verifier
+
+5. Debugging
+************
+
+It is possible to debug the s390 kernel using QEMU GDB stub, which is activated
+by passing ``-s`` to QEMU.
+
+It is preferable to turn KASLR off, so that gdb would know where to find the
+kernel image in memory, by building the kernel with::
+
+ RANDOMIZE_BASE=n
+
+GDB can then be attached using the following command::
+
+ gdb-multiarch -ex 'target remote localhost:1234' ./vmlinux
+
+6. Network
+**********
+
+In case one needs to use the network in the virtual machine in order to e.g.
+install additional packages, it can be configured using::
+
+ dhclient eth0
+
+7. Links
+********
+
+This document is a compilation of techniques, whose more comprehensive
+descriptions can be found by following these links:
+
+- `Debootstrap <https://wiki.debian.org/EmDebian/CrossDebootstrap>`_
+- `Multiarch <https://wiki.debian.org/Multiarch/HOWTO>`_
+- `Building LLVM <https://llvm.org/docs/CMake.html>`_
+- `Cross-compiling the kernel <https://wiki.gentoo.org/wiki/Embedded_Handbook/General/Cross-compiling_the_kernel>`_
+- `QEMU s390x Guest Support <https://wiki.qemu.org/Documentation/Platforms/S390X>`_
+- `Plan 9 folder sharing over Virtio <https://wiki.qemu.org/Documentation/9psetup>`_
+- `Using GDB with QEMU <https://wiki.osdev.org/Kernel_Debugging#Use_GDB_with_QEMU>`_
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index ecbebf4ca8e7..ea21dd4b9bad 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -79,6 +79,18 @@ has the added benefit of providing a unique identifier. On 64-bit machines
the first 32 bits are zeroed. The kernel will print ``(ptrval)`` until it
gathers enough entropy. If you *really* want the address see %px below.
+Error Pointers
+--------------
+
+::
+
+ %pe -ENOSPC
+
+For printing error pointers (i.e. a pointer for which IS_ERR() is true)
+as a symbolic error name. Error values for which no symbolic name is
+known are printed in decimal, while a non-ERR_PTR passed as the
+argument to %pe gets treated as ordinary %p.
+
Symbols/Function Pointers
-------------------------
@@ -86,8 +98,6 @@ Symbols/Function Pointers
%pS versatile_init+0x0/0x110
%ps versatile_init
- %pF versatile_init+0x0/0x110
- %pf versatile_init
%pSR versatile_init+0x9/0x110
(with __builtin_extract_return_addr() translation)
%pB prev_fn_of_versatile_init+0x88/0x88
@@ -97,14 +107,6 @@ The ``S`` and ``s`` specifiers are used for printing a pointer in symbolic
format. They result in the symbol name with (S) or without (s)
offsets. If KALLSYMS are disabled then the symbol address is printed instead.
-Note, that the ``F`` and ``f`` specifiers are identical to ``S`` (``s``)
-and thus deprecated. We have ``F`` and ``f`` because on ia64, ppc64 and
-parisc64 function pointers are indirect and, in fact, are function
-descriptors, which require additional dereferencing before we can lookup
-the symbol. As of now, ``S`` and ``s`` perform dereferencing on those
-platforms (when needed), so ``F`` and ``f`` exist for compatibility
-reasons only.
-
The ``B`` specifier results in the symbol name with offsets and should be
used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
@@ -428,6 +430,30 @@ Examples::
Passed by reference.
+Fwnode handles
+--------------
+
+::
+
+ %pfw[fP]
+
+For printing information on fwnode handles. The default is to print the full
+node name, including the path. The modifiers are functionally equivalent to
+%pOF above.
+
+ - f - full name of the node, including the path
+ - P - the name of the node including an address (if there is one)
+
+Examples (ACPI)::
+
+ %pfwf \_SB.PCI0.CIO2.port@1.endpoint@0 - Full node name
+ %pfwP endpoint@0 - Node name
+
+Examples (OF)::
+
+ %pfwf /ocp@68000000/i2c@48072000/camera@10/port/endpoint - Full name
+ %pfwP endpoint - Node name
+
Time and date (struct rtc_time)
-------------------------------
diff --git a/Documentation/crypto/api-skcipher.rst b/Documentation/crypto/api-skcipher.rst
index 20ba08dddf2e..1aaf8985894b 100644
--- a/Documentation/crypto/api-skcipher.rst
+++ b/Documentation/crypto/api-skcipher.rst
@@ -5,7 +5,7 @@ Block Cipher Algorithm Definitions
:doc: Block Cipher Algorithm Definitions
.. kernel-doc:: include/linux/crypto.h
- :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg
+ :functions: crypto_alg cipher_alg compress_alg
Symmetric Key Cipher API
------------------------
@@ -33,30 +33,3 @@ Single Block Cipher API
.. kernel-doc:: include/linux/crypto.h
:functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one
-
-Asynchronous Block Cipher API - Deprecated
-------------------------------------------
-
-.. kernel-doc:: include/linux/crypto.h
- :doc: Asynchronous Block Cipher API
-
-.. kernel-doc:: include/linux/crypto.h
- :functions: crypto_free_ablkcipher crypto_has_ablkcipher crypto_ablkcipher_ivsize crypto_ablkcipher_blocksize crypto_ablkcipher_setkey crypto_ablkcipher_reqtfm crypto_ablkcipher_encrypt crypto_ablkcipher_decrypt
-
-Asynchronous Cipher Request Handle - Deprecated
------------------------------------------------
-
-.. kernel-doc:: include/linux/crypto.h
- :doc: Asynchronous Cipher Request Handle
-
-.. kernel-doc:: include/linux/crypto.h
- :functions: crypto_ablkcipher_reqsize ablkcipher_request_set_tfm ablkcipher_request_alloc ablkcipher_request_free ablkcipher_request_set_callback ablkcipher_request_set_crypt
-
-Synchronous Block Cipher API - Deprecated
------------------------------------------
-
-.. kernel-doc:: include/linux/crypto.h
- :doc: Synchronous Block Cipher API
-
-.. kernel-doc:: include/linux/crypto.h
- :functions: crypto_alloc_blkcipher crypto_free_blkcipher crypto_has_blkcipher crypto_blkcipher_name crypto_blkcipher_ivsize crypto_blkcipher_blocksize crypto_blkcipher_setkey crypto_blkcipher_encrypt crypto_blkcipher_encrypt_iv crypto_blkcipher_decrypt crypto_blkcipher_decrypt_iv crypto_blkcipher_set_iv crypto_blkcipher_get_iv
diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst
index 3eae1ae7f798..646c3380a7ed 100644
--- a/Documentation/crypto/architecture.rst
+++ b/Documentation/crypto/architecture.rst
@@ -201,10 +201,6 @@ the aforementioned cipher types:
- CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data
(MAC)
-- CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher
-
-- CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher
-
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
an ECDH or DH implementation
diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst
index 3baa23c2cd08..25cf9836c336 100644
--- a/Documentation/crypto/crypto_engine.rst
+++ b/Documentation/crypto/crypto_engine.rst
@@ -63,8 +63,6 @@ request by using:
When your driver receives a crypto_request, you must to transfer it to
the crypto engine via one of:
-* crypto_transfer_ablkcipher_request_to_engine()
-
* crypto_transfer_aead_request_to_engine()
* crypto_transfer_akcipher_request_to_engine()
@@ -75,8 +73,6 @@ the crypto engine via one of:
At the end of the request process, a call to one of the following functions is needed:
-* crypto_finalize_ablkcipher_request()
-
* crypto_finalize_aead_request()
* crypto_finalize_akcipher_request()
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
index c45c6f400dbd..f9d288015acc 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -128,25 +128,20 @@ process requests that are unaligned. This implies, however, additional
overhead as the kernel crypto API needs to perform the realignment of
the data which may imply moving of data.
-Cipher Definition With struct blkcipher_alg and ablkcipher_alg
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Cipher Definition With struct skcipher_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Struct blkcipher_alg defines a synchronous block cipher whereas struct
-ablkcipher_alg defines an asynchronous block cipher.
+Struct skcipher_alg defines a multi-block cipher, or more generally, a
+length-preserving symmetric cipher algorithm.
-Please refer to the single block cipher description for schematics of
-the block cipher usage.
+Scatterlist handling
+~~~~~~~~~~~~~~~~~~~~
-Specifics Of Asynchronous Multi-Block Cipher
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are a couple of specifics to the asynchronous interface.
-
-First of all, some of the drivers will want to use the Generic
-ScatterWalk in case the hardware needs to be fed separate chunks of the
-scatterlist which contains the plaintext and will contain the
-ciphertext. Please refer to the ScatterWalk interface offered by the
-Linux kernel scatter / gather list implementation.
+Some drivers will want to use the Generic ScatterWalk in case the
+hardware needs to be fed separate chunks of the scatterlist which
+contains the plaintext and will contain the ciphertext. Please refer
+to the ScatterWalk interface offered by the Linux kernel scatter /
+gather list implementation.
Hashing [HASH]
--------------
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index b0522a4dd107..09dee10d2592 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -24,6 +24,7 @@ whole; patches welcome!
gdb-kernel-debugging
kgdb
kselftest
+ kunit/index
.. only:: subproject and html
diff --git a/Documentation/dev-tools/kunit/api/index.rst b/Documentation/dev-tools/kunit/api/index.rst
new file mode 100644
index 000000000000..9b9bffe5d41a
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/index.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+API Reference
+=============
+.. toctree::
+
+ test
+
+This section documents the KUnit kernel testing API. It is divided into the
+following sections:
+
+================================= ==============================================
+:doc:`test` documents all of the standard testing API
+ excluding mocking or mocking related features.
+================================= ==============================================
diff --git a/Documentation/dev-tools/kunit/api/test.rst b/Documentation/dev-tools/kunit/api/test.rst
new file mode 100644
index 000000000000..aaa97f17e5b3
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/test.rst
@@ -0,0 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========
+Test API
+========
+
+This file documents all of the standard testing API excluding mocking or mocking
+related features.
+
+.. kernel-doc:: include/kunit/test.h
+ :internal:
diff --git a/Documentation/dev-tools/kunit/faq.rst b/Documentation/dev-tools/kunit/faq.rst
new file mode 100644
index 000000000000..bf2095112d89
--- /dev/null
+++ b/Documentation/dev-tools/kunit/faq.rst
@@ -0,0 +1,62 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+Frequently Asked Questions
+==========================
+
+How is this different from Autotest, kselftest, etc?
+====================================================
+KUnit is a unit testing framework. Autotest, kselftest (and some others) are
+not.
+
+A `unit test <https://martinfowler.com/bliki/UnitTest.html>`_ is supposed to
+test a single unit of code in isolation, hence the name. A unit test should be
+the finest granularity of testing and as such should allow all possible code
+paths to be tested in the code under test; this is only possible if the code
+under test is very small and does not have any external dependencies outside of
+the test's control like hardware.
+
+There are no testing frameworks currently available for the kernel that do not
+require installing the kernel on a test machine or in a VM and all require
+tests to be written in userspace and run on the kernel under test; this is true
+for Autotest, kselftest, and some others, disqualifying any of them from being
+considered unit testing frameworks.
+
+Does KUnit support running on architectures other than UML?
+===========================================================
+
+Yes, well, mostly.
+
+For the most part, the KUnit core framework (what you use to write the tests)
+can compile to any architecture; it compiles like just another part of the
+kernel and runs when the kernel boots. However, there is some infrastructure,
+like the KUnit Wrapper (``tools/testing/kunit/kunit.py``) that does not support
+other architectures.
+
+In short, this means that, yes, you can run KUnit on other architectures, but
+it might require more work than using KUnit on UML.
+
+For more information, see :ref:`kunit-on-non-uml`.
+
+What is the difference between a unit test and these other kinds of tests?
+==========================================================================
+Most existing tests for the Linux kernel would be categorized as an integration
+test, or an end-to-end test.
+
+- A unit test is supposed to test a single unit of code in isolation, hence the
+ name. A unit test should be the finest granularity of testing and as such
+ should allow all possible code paths to be tested in the code under test; this
+ is only possible if the code under test is very small and does not have any
+ external dependencies outside of the test's control like hardware.
+- An integration test tests the interaction between a minimal set of components,
+ usually just two or three. For example, someone might write an integration
+ test to test the interaction between a driver and a piece of hardware, or to
+ test the interaction between the userspace libraries the kernel provides and
+ the kernel itself; however, one of these tests would probably not test the
+ entire kernel along with hardware interactions and interactions with the
+ userspace.
+- An end-to-end test usually tests the entire system from the perspective of the
+ code under test. For example, someone might write an end-to-end test for the
+ kernel by installing a production configuration of the kernel on production
+ hardware with a production userspace and then trying to exercise some behavior
+ that depends on interactions between the hardware, the kernel, and userspace.
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
new file mode 100644
index 000000000000..26ffb46bdf99
--- /dev/null
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================
+KUnit - Unit Testing for the Linux Kernel
+=========================================
+
+.. toctree::
+ :maxdepth: 2
+
+ start
+ usage
+ api/index
+ faq
+
+What is KUnit?
+==============
+
+KUnit is a lightweight unit testing and mocking framework for the Linux kernel.
+These tests are able to be run locally on a developer's workstation without a VM
+or special hardware.
+
+KUnit is heavily inspired by JUnit, Python's unittest.mock, and
+Googletest/Googlemock for C++. KUnit provides facilities for defining unit test
+cases, grouping related test cases into test suites, providing common
+infrastructure for running tests, and much more.
+
+Get started now: :doc:`start`
+
+Why KUnit?
+==========
+
+A unit test is supposed to test a single unit of code in isolation, hence the
+name. A unit test should be the finest granularity of testing and as such should
+allow all possible code paths to be tested in the code under test; this is only
+possible if the code under test is very small and does not have any external
+dependencies outside of the test's control like hardware.
+
+Outside of KUnit, there are no testing frameworks currently
+available for the kernel that do not require installing the kernel on a test
+machine or in a VM and all require tests to be written in userspace running on
+the kernel; this is true for Autotest, and kselftest, disqualifying
+any of them from being considered unit testing frameworks.
+
+KUnit addresses the problem of being able to run tests without needing a virtual
+machine or actual hardware with User Mode Linux. User Mode Linux is a Linux
+architecture, like ARM or x86; however, unlike other architectures it compiles
+to a standalone program that can be run like any other program directly inside
+of a host operating system; to be clear, it does not require any virtualization
+support; it is just a regular program.
+
+KUnit is fast. Excluding build time, from invocation to completion KUnit can run
+several dozen tests in only 10 to 20 seconds; this might not sound like a big
+deal to some people, but having such fast and easy to run tests fundamentally
+changes the way you go about testing and even writing code in the first place.
+Linus himself said in his `git talk at Google
+<https://gist.github.com/lorn/1272686/revisions#diff-53c65572127855f1b003db4064a94573R874>`_:
+
+ "... a lot of people seem to think that performance is about doing the
+ same thing, just doing it faster, and that is not true. That is not what
+ performance is all about. If you can do something really fast, really
+ well, people will start using it differently."
+
+In this context Linus was talking about branching and merging,
+but this point also applies to testing. If your tests are slow, unreliable, are
+difficult to write, and require a special setup or special hardware to run,
+then you wait a lot longer to write tests, and you wait a lot longer to run
+tests; this means that tests are likely to break, unlikely to test a lot of
+things, and are unlikely to be rerun once they pass. If your tests are really
+fast, you run them all the time, every time you make a change, and every time
+someone sends you some code. Why trust that someone ran all their tests
+correctly on every change when you can just run them yourself in less time than
+it takes to read their test log?
+
+How do I use it?
+================
+
+* :doc:`start` - for new users of KUnit
+* :doc:`usage` - for a more detailed explanation of KUnit features
+* :doc:`api/index` - for the list of KUnit APIs used for testing
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
new file mode 100644
index 000000000000..aeeddfafeea2
--- /dev/null
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -0,0 +1,180 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Getting Started
+===============
+
+Installing dependencies
+=======================
+KUnit has the same dependencies as the Linux kernel. As long as you can build
+the kernel, you can run KUnit.
+
+KUnit Wrapper
+=============
+Included with KUnit is a simple Python wrapper that helps format the output to
+easily use and read KUnit output. It handles building and running the kernel, as
+well as formatting the output.
+
+The wrapper can be run with:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py run
+
+Creating a kunitconfig
+======================
+The Python script is a thin wrapper around Kbuild as such, it needs to be
+configured with a ``kunitconfig`` file. This file essentially contains the
+regular Kernel config, with the specific test targets as well.
+
+.. code-block:: bash
+
+ git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
+ cd $PATH_TO_LINUX_REPO
+ ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
+
+You may want to add kunitconfig to your local gitignore.
+
+Verifying KUnit Works
+---------------------
+
+To make sure that everything is set up correctly, simply invoke the Python
+wrapper from your kernel repo:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py run
+
+.. note::
+ You may want to run ``make mrproper`` first.
+
+If everything worked correctly, you should see the following:
+
+.. code-block:: bash
+
+ Generating .config ...
+ Building KUnit Kernel ...
+ Starting KUnit Kernel ...
+
+followed by a list of tests that are run. All of them should be passing.
+
+.. note::
+ Because it is building a lot of sources for the first time, the ``Building
+ kunit kernel`` step may take a while.
+
+Writing your first test
+=======================
+
+In your kernel repo let's add some code that we can test. Create a file
+``drivers/misc/example.h`` with the contents:
+
+.. code-block:: c
+
+ int misc_example_add(int left, int right);
+
+create a file ``drivers/misc/example.c``:
+
+.. code-block:: c
+
+ #include <linux/errno.h>
+
+ #include "example.h"
+
+ int misc_example_add(int left, int right)
+ {
+ return left + right;
+ }
+
+Now add the following lines to ``drivers/misc/Kconfig``:
+
+.. code-block:: kconfig
+
+ config MISC_EXAMPLE
+ bool "My example"
+
+and the following lines to ``drivers/misc/Makefile``:
+
+.. code-block:: make
+
+ obj-$(CONFIG_MISC_EXAMPLE) += example.o
+
+Now we are ready to write the test. The test will be in
+``drivers/misc/example-test.c``:
+
+.. code-block:: c
+
+ #include <kunit/test.h>
+ #include "example.h"
+
+ /* Define the test cases. */
+
+ static void misc_example_add_test_basic(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, 1, misc_example_add(1, 0));
+ KUNIT_EXPECT_EQ(test, 2, misc_example_add(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, misc_example_add(-1, 1));
+ KUNIT_EXPECT_EQ(test, INT_MAX, misc_example_add(0, INT_MAX));
+ KUNIT_EXPECT_EQ(test, -1, misc_example_add(INT_MAX, INT_MIN));
+ }
+
+ static void misc_example_test_failure(struct kunit *test)
+ {
+ KUNIT_FAIL(test, "This test never passes.");
+ }
+
+ static struct kunit_case misc_example_test_cases[] = {
+ KUNIT_CASE(misc_example_add_test_basic),
+ KUNIT_CASE(misc_example_test_failure),
+ {}
+ };
+
+ static struct kunit_suite misc_example_test_suite = {
+ .name = "misc-example",
+ .test_cases = misc_example_test_cases,
+ };
+ kunit_test_suite(misc_example_test_suite);
+
+Now add the following to ``drivers/misc/Kconfig``:
+
+.. code-block:: kconfig
+
+ config MISC_EXAMPLE_TEST
+ bool "Test for my example"
+ depends on MISC_EXAMPLE && KUNIT
+
+and the following to ``drivers/misc/Makefile``:
+
+.. code-block:: make
+
+ obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
+
+Now add it to your ``kunitconfig``:
+
+.. code-block:: none
+
+ CONFIG_MISC_EXAMPLE=y
+ CONFIG_MISC_EXAMPLE_TEST=y
+
+Now you can run the test:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py
+
+You should see the following failure:
+
+.. code-block:: none
+
+ ...
+ [16:08:57] [PASSED] misc-example:misc_example_add_test_basic
+ [16:08:57] [FAILED] misc-example:misc_example_test_failure
+ [16:08:57] EXPECTATION FAILED at drivers/misc/example-test.c:17
+ [16:08:57] This test never passes.
+ ...
+
+Congrats! You just wrote your first KUnit test!
+
+Next Steps
+==========
+* Check out the :doc:`usage` page for a more
+ in-depth explanation of KUnit.
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
new file mode 100644
index 000000000000..c6e69634e274
--- /dev/null
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -0,0 +1,576 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========
+Using KUnit
+===========
+
+The purpose of this document is to describe what KUnit is, how it works, how it
+is intended to be used, and all the concepts and terminology that are needed to
+understand it. This guide assumes a working knowledge of the Linux kernel and
+some basic knowledge of testing.
+
+For a high level introduction to KUnit, including setting up KUnit for your
+project, see :doc:`start`.
+
+Organization of this document
+=============================
+
+This document is organized into two main sections: Testing and Isolating
+Behavior. The first covers what a unit test is and how to use KUnit to write
+them. The second covers how to use KUnit to isolate code and make it possible
+to unit test code that was otherwise un-unit-testable.
+
+Testing
+=======
+
+What is KUnit?
+--------------
+
+"K" is short for "kernel" so "KUnit" is the "(Linux) Kernel Unit Testing
+Framework." KUnit is intended first and foremost for writing unit tests; it is
+general enough that it can be used to write integration tests; however, this is
+a secondary goal. KUnit has no ambition of being the only testing framework for
+the kernel; for example, it does not intend to be an end-to-end testing
+framework.
+
+What is Unit Testing?
+---------------------
+
+A `unit test <https://martinfowler.com/bliki/UnitTest.html>`_ is a test that
+tests code at the smallest possible scope, a *unit* of code. In the C
+programming language that's a function.
+
+Unit tests should be written for all the publicly exposed functions in a
+compilation unit; so that is all the functions that are exported in either a
+*class* (defined below) or all functions which are **not** static.
+
+Writing Tests
+-------------
+
+Test Cases
+~~~~~~~~~~
+
+The fundamental unit in KUnit is the test case. A test case is a function with
+the signature ``void (*)(struct kunit *test)``. It calls a function to be tested
+and then sets *expectations* for what should happen. For example:
+
+.. code-block:: c
+
+ void example_test_success(struct kunit *test)
+ {
+ }
+
+ void example_test_failure(struct kunit *test)
+ {
+ KUNIT_FAIL(test, "This test never passes.");
+ }
+
+In the above example ``example_test_success`` always passes because it does
+nothing; no expectations are set, so all expectations pass. On the other hand
+``example_test_failure`` always fails because it calls ``KUNIT_FAIL``, which is
+a special expectation that logs a message and causes the test case to fail.
+
+Expectations
+~~~~~~~~~~~~
+An *expectation* is a way to specify that you expect a piece of code to do
+something in a test. An expectation is called like a function. A test is made
+by setting expectations about the behavior of a piece of code under test; when
+one or more of the expectations fail, the test case fails and information about
+the failure is logged. For example:
+
+.. code-block:: c
+
+ void add_test_basic(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, 1, add(1, 0));
+ KUNIT_EXPECT_EQ(test, 2, add(1, 1));
+ }
+
+In the above example ``add_test_basic`` makes a number of assertions about the
+behavior of a function called ``add``; the first parameter is always of type
+``struct kunit *``, which contains information about the current test context;
+the second parameter, in this case, is what the value is expected to be; the
+last value is what the value actually is. If ``add`` passes all of these
+expectations, the test case, ``add_test_basic`` will pass; if any one of these
+expectations fail, the test case will fail.
+
+It is important to understand that a test case *fails* when any expectation is
+violated; however, the test will continue running, potentially trying other
+expectations until the test case ends or is otherwise terminated. This is as
+opposed to *assertions* which are discussed later.
+
+To learn about more expectations supported by KUnit, see :doc:`api/test`.
+
+.. note::
+ A single test case should be pretty short, pretty easy to understand,
+ focused on a single behavior.
+
+For example, if we wanted to properly test the add function above, we would
+create additional tests cases which would each test a different property that an
+add function should have like this:
+
+.. code-block:: c
+
+ void add_test_basic(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, 1, add(1, 0));
+ KUNIT_EXPECT_EQ(test, 2, add(1, 1));
+ }
+
+ void add_test_negative(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, 0, add(-1, 1));
+ }
+
+ void add_test_max(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, INT_MAX, add(0, INT_MAX));
+ KUNIT_EXPECT_EQ(test, -1, add(INT_MAX, INT_MIN));
+ }
+
+ void add_test_overflow(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, INT_MIN, add(INT_MAX, 1));
+ }
+
+Notice how it is immediately obvious what all the properties that we are testing
+for are.
+
+Assertions
+~~~~~~~~~~
+
+KUnit also has the concept of an *assertion*. An assertion is just like an
+expectation except the assertion immediately terminates the test case if it is
+not satisfied.
+
+For example:
+
+.. code-block:: c
+
+ static void mock_test_do_expect_default_return(struct kunit *test)
+ {
+ struct mock_test_context *ctx = test->priv;
+ struct mock *mock = ctx->mock;
+ int param0 = 5, param1 = -5;
+ const char *two_param_types[] = {"int", "int"};
+ const void *two_params[] = {&param0, &param1};
+ const void *ret;
+
+ ret = mock->do_expect(mock,
+ "test_printk", test_printk,
+ two_param_types, two_params,
+ ARRAY_SIZE(two_params));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ret);
+ KUNIT_EXPECT_EQ(test, -4, *((int *) ret));
+ }
+
+In this example, the method under test should return a pointer to a value, so
+if the pointer returned by the method is null or an errno, we don't want to
+bother continuing the test since the following expectation could crash the test
+case. `ASSERT_NOT_ERR_OR_NULL(...)` allows us to bail out of the test case if
+the appropriate conditions have not been satisfied to complete the test.
+
+Test Suites
+~~~~~~~~~~~
+
+Now obviously one unit test isn't very helpful; the power comes from having
+many test cases covering all of your behaviors. Consequently it is common to
+have many *similar* tests; in order to reduce duplication in these closely
+related tests most unit testing frameworks provide the concept of a *test
+suite*, in KUnit we call it a *test suite*; all it is is just a collection of
+test cases for a unit of code with a set up function that gets invoked before
+every test cases and then a tear down function that gets invoked after every
+test case completes.
+
+Example:
+
+.. code-block:: c
+
+ static struct kunit_case example_test_cases[] = {
+ KUNIT_CASE(example_test_foo),
+ KUNIT_CASE(example_test_bar),
+ KUNIT_CASE(example_test_baz),
+ {}
+ };
+
+ static struct kunit_suite example_test_suite = {
+ .name = "example",
+ .init = example_test_init,
+ .exit = example_test_exit,
+ .test_cases = example_test_cases,
+ };
+ kunit_test_suite(example_test_suite);
+
+In the above example the test suite, ``example_test_suite``, would run the test
+cases ``example_test_foo``, ``example_test_bar``, and ``example_test_baz``,
+each would have ``example_test_init`` called immediately before it and would
+have ``example_test_exit`` called immediately after it.
+``kunit_test_suite(example_test_suite)`` registers the test suite with the
+KUnit test framework.
+
+.. note::
+ A test case will only be run if it is associated with a test suite.
+
+For a more information on these types of things see the :doc:`api/test`.
+
+Isolating Behavior
+==================
+
+The most important aspect of unit testing that other forms of testing do not
+provide is the ability to limit the amount of code under test to a single unit.
+In practice, this is only possible by being able to control what code gets run
+when the unit under test calls a function and this is usually accomplished
+through some sort of indirection where a function is exposed as part of an API
+such that the definition of that function can be changed without affecting the
+rest of the code base. In the kernel this primarily comes from two constructs,
+classes, structs that contain function pointers that are provided by the
+implementer, and architecture specific functions which have definitions selected
+at compile time.
+
+Classes
+-------
+
+Classes are not a construct that is built into the C programming language;
+however, it is an easily derived concept. Accordingly, pretty much every project
+that does not use a standardized object oriented library (like GNOME's GObject)
+has their own slightly different way of doing object oriented programming; the
+Linux kernel is no exception.
+
+The central concept in kernel object oriented programming is the class. In the
+kernel, a *class* is a struct that contains function pointers. This creates a
+contract between *implementers* and *users* since it forces them to use the
+same function signature without having to call the function directly. In order
+for it to truly be a class, the function pointers must specify that a pointer
+to the class, known as a *class handle*, be one of the parameters; this makes
+it possible for the member functions (also known as *methods*) to have access
+to member variables (more commonly known as *fields*) allowing the same
+implementation to have multiple *instances*.
+
+Typically a class can be *overridden* by *child classes* by embedding the
+*parent class* in the child class. Then when a method provided by the child
+class is called, the child implementation knows that the pointer passed to it is
+of a parent contained within the child; because of this, the child can compute
+the pointer to itself because the pointer to the parent is always a fixed offset
+from the pointer to the child; this offset is the offset of the parent contained
+in the child struct. For example:
+
+.. code-block:: c
+
+ struct shape {
+ int (*area)(struct shape *this);
+ };
+
+ struct rectangle {
+ struct shape parent;
+ int length;
+ int width;
+ };
+
+ int rectangle_area(struct shape *this)
+ {
+ struct rectangle *self = container_of(this, struct shape, parent);
+
+ return self->length * self->width;
+ };
+
+ void rectangle_new(struct rectangle *self, int length, int width)
+ {
+ self->parent.area = rectangle_area;
+ self->length = length;
+ self->width = width;
+ }
+
+In this example (as in most kernel code) the operation of computing the pointer
+to the child from the pointer to the parent is done by ``container_of``.
+
+Faking Classes
+~~~~~~~~~~~~~~
+
+In order to unit test a piece of code that calls a method in a class, the
+behavior of the method must be controllable, otherwise the test ceases to be a
+unit test and becomes an integration test.
+
+A fake just provides an implementation of a piece of code that is different than
+what runs in a production instance, but behaves identically from the standpoint
+of the callers; this is usually done to replace a dependency that is hard to
+deal with, or is slow.
+
+A good example for this might be implementing a fake EEPROM that just stores the
+"contents" in an internal buffer. For example, let's assume we have a class that
+represents an EEPROM:
+
+.. code-block:: c
+
+ struct eeprom {
+ ssize_t (*read)(struct eeprom *this, size_t offset, char *buffer, size_t count);
+ ssize_t (*write)(struct eeprom *this, size_t offset, const char *buffer, size_t count);
+ };
+
+And we want to test some code that buffers writes to the EEPROM:
+
+.. code-block:: c
+
+ struct eeprom_buffer {
+ ssize_t (*write)(struct eeprom_buffer *this, const char *buffer, size_t count);
+ int flush(struct eeprom_buffer *this);
+ size_t flush_count; /* Flushes when buffer exceeds flush_count. */
+ };
+
+ struct eeprom_buffer *new_eeprom_buffer(struct eeprom *eeprom);
+ void destroy_eeprom_buffer(struct eeprom *eeprom);
+
+We can easily test this code by *faking out* the underlying EEPROM:
+
+.. code-block:: c
+
+ struct fake_eeprom {
+ struct eeprom parent;
+ char contents[FAKE_EEPROM_CONTENTS_SIZE];
+ };
+
+ ssize_t fake_eeprom_read(struct eeprom *parent, size_t offset, char *buffer, size_t count)
+ {
+ struct fake_eeprom *this = container_of(parent, struct fake_eeprom, parent);
+
+ count = min(count, FAKE_EEPROM_CONTENTS_SIZE - offset);
+ memcpy(buffer, this->contents + offset, count);
+
+ return count;
+ }
+
+ ssize_t fake_eeprom_write(struct eeprom *this, size_t offset, const char *buffer, size_t count)
+ {
+ struct fake_eeprom *this = container_of(parent, struct fake_eeprom, parent);
+
+ count = min(count, FAKE_EEPROM_CONTENTS_SIZE - offset);
+ memcpy(this->contents + offset, buffer, count);
+
+ return count;
+ }
+
+ void fake_eeprom_init(struct fake_eeprom *this)
+ {
+ this->parent.read = fake_eeprom_read;
+ this->parent.write = fake_eeprom_write;
+ memset(this->contents, 0, FAKE_EEPROM_CONTENTS_SIZE);
+ }
+
+We can now use it to test ``struct eeprom_buffer``:
+
+.. code-block:: c
+
+ struct eeprom_buffer_test {
+ struct fake_eeprom *fake_eeprom;
+ struct eeprom_buffer *eeprom_buffer;
+ };
+
+ static void eeprom_buffer_test_does_not_write_until_flush(struct kunit *test)
+ {
+ struct eeprom_buffer_test *ctx = test->priv;
+ struct eeprom_buffer *eeprom_buffer = ctx->eeprom_buffer;
+ struct fake_eeprom *fake_eeprom = ctx->fake_eeprom;
+ char buffer[] = {0xff};
+
+ eeprom_buffer->flush_count = SIZE_MAX;
+
+ eeprom_buffer->write(eeprom_buffer, buffer, 1);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[0], 0);
+
+ eeprom_buffer->write(eeprom_buffer, buffer, 1);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[1], 0);
+
+ eeprom_buffer->flush(eeprom_buffer);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[0], 0xff);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[1], 0xff);
+ }
+
+ static void eeprom_buffer_test_flushes_after_flush_count_met(struct kunit *test)
+ {
+ struct eeprom_buffer_test *ctx = test->priv;
+ struct eeprom_buffer *eeprom_buffer = ctx->eeprom_buffer;
+ struct fake_eeprom *fake_eeprom = ctx->fake_eeprom;
+ char buffer[] = {0xff};
+
+ eeprom_buffer->flush_count = 2;
+
+ eeprom_buffer->write(eeprom_buffer, buffer, 1);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[0], 0);
+
+ eeprom_buffer->write(eeprom_buffer, buffer, 1);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[0], 0xff);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[1], 0xff);
+ }
+
+ static void eeprom_buffer_test_flushes_increments_of_flush_count(struct kunit *test)
+ {
+ struct eeprom_buffer_test *ctx = test->priv;
+ struct eeprom_buffer *eeprom_buffer = ctx->eeprom_buffer;
+ struct fake_eeprom *fake_eeprom = ctx->fake_eeprom;
+ char buffer[] = {0xff, 0xff};
+
+ eeprom_buffer->flush_count = 2;
+
+ eeprom_buffer->write(eeprom_buffer, buffer, 1);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[0], 0);
+
+ eeprom_buffer->write(eeprom_buffer, buffer, 2);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[0], 0xff);
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[1], 0xff);
+ /* Should have only flushed the first two bytes. */
+ KUNIT_EXPECT_EQ(test, fake_eeprom->contents[2], 0);
+ }
+
+ static int eeprom_buffer_test_init(struct kunit *test)
+ {
+ struct eeprom_buffer_test *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->fake_eeprom = kunit_kzalloc(test, sizeof(*ctx->fake_eeprom), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->fake_eeprom);
+ fake_eeprom_init(ctx->fake_eeprom);
+
+ ctx->eeprom_buffer = new_eeprom_buffer(&ctx->fake_eeprom->parent);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->eeprom_buffer);
+
+ test->priv = ctx;
+
+ return 0;
+ }
+
+ static void eeprom_buffer_test_exit(struct kunit *test)
+ {
+ struct eeprom_buffer_test *ctx = test->priv;
+
+ destroy_eeprom_buffer(ctx->eeprom_buffer);
+ }
+
+.. _kunit-on-non-uml:
+
+KUnit on non-UML architectures
+==============================
+
+By default KUnit uses UML as a way to provide dependencies for code under test.
+Under most circumstances KUnit's usage of UML should be treated as an
+implementation detail of how KUnit works under the hood. Nevertheless, there
+are instances where being able to run architecture specific code, or test
+against real hardware is desirable. For these reasons KUnit supports running on
+other architectures.
+
+Running existing KUnit tests on non-UML architectures
+-----------------------------------------------------
+
+There are some special considerations when running existing KUnit tests on
+non-UML architectures:
+
+* Hardware may not be deterministic, so a test that always passes or fails
+ when run under UML may not always do so on real hardware.
+* Hardware and VM environments may not be hermetic. KUnit tries its best to
+ provide a hermetic environment to run tests; however, it cannot manage state
+ that it doesn't know about outside of the kernel. Consequently, tests that
+ may be hermetic on UML may not be hermetic on other architectures.
+* Some features and tooling may not be supported outside of UML.
+* Hardware and VMs are slower than UML.
+
+None of these are reasons not to run your KUnit tests on real hardware; they are
+only things to be aware of when doing so.
+
+The biggest impediment will likely be that certain KUnit features and
+infrastructure may not support your target environment. For example, at this
+time the KUnit Wrapper (``tools/testing/kunit/kunit.py``) does not work outside
+of UML. Unfortunately, there is no way around this. Using UML (or even just a
+particular architecture) allows us to make a lot of assumptions that make it
+possible to do things which might otherwise be impossible.
+
+Nevertheless, all core KUnit framework features are fully supported on all
+architectures, and using them is straightforward: all you need to do is to take
+your kunitconfig, your Kconfig options for the tests you would like to run, and
+merge them into whatever config your are using for your platform. That's it!
+
+For example, let's say you have the following kunitconfig:
+
+.. code-block:: none
+
+ CONFIG_KUNIT=y
+ CONFIG_KUNIT_EXAMPLE_TEST=y
+
+If you wanted to run this test on an x86 VM, you might add the following config
+options to your ``.config``:
+
+.. code-block:: none
+
+ CONFIG_KUNIT=y
+ CONFIG_KUNIT_EXAMPLE_TEST=y
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
+
+All these new options do is enable support for a common serial console needed
+for logging.
+
+Next, you could build a kernel with these tests as follows:
+
+
+.. code-block:: bash
+
+ make ARCH=x86 olddefconfig
+ make ARCH=x86
+
+Once you have built a kernel, you could run it on QEMU as follows:
+
+.. code-block:: bash
+
+ qemu-system-x86_64 -enable-kvm \
+ -m 1024 \
+ -kernel arch/x86_64/boot/bzImage \
+ -append 'console=ttyS0' \
+ --nographic
+
+Interspersed in the kernel logs you might see the following:
+
+.. code-block:: none
+
+ TAP version 14
+ # Subtest: example
+ 1..1
+ # example_simple_test: initializing
+ ok 1 - example_simple_test
+ ok 1 - example
+
+Congratulations, you just ran a KUnit test on the x86 architecture!
+
+Writing new tests for other architectures
+-----------------------------------------
+
+The first thing you must do is ask yourself whether it is necessary to write a
+KUnit test for a specific architecture, and then whether it is necessary to
+write that test for a particular piece of hardware. In general, writing a test
+that depends on having access to a particular piece of hardware or software (not
+included in the Linux source repo) should be avoided at all costs.
+
+Even if you only ever plan on running your KUnit test on your hardware
+configuration, other people may want to run your tests and may not have access
+to your hardware. If you write your test to run on UML, then anyone can run your
+tests without knowing anything about your particular setup, and you can still
+run your tests on your hardware setup just by compiling for your architecture.
+
+.. important::
+ Always prefer tests that run on UML to tests that only run under a particular
+ architecture, and always prefer tests that run under QEMU or another easy
+ (and monitarily free) to obtain software environment to a specific piece of
+ hardware.
+
+Nevertheless, there are still valid reasons to write an architecture or hardware
+specific test: for example, you might want to test some code that really belongs
+in ``arch/some-arch/*``. Even so, try your best to write the test so that it
+does not depend on physical hardware: if some of your test cases don't need the
+hardware, only require the hardware for tests that actually need it.
+
+Now that you have narrowed down exactly what bits are hardware specific, the
+actual procedure for writing and running the tests is pretty much the same as
+writing normal KUnit tests. One special caveat is that you have to reset
+hardware state in between test cases; if this is not possible, you may only be
+able to run one test case per invocation.
+
+.. TODO(brendanhiggins@google.com): Add an actual example of an architecture
+ dependent KUnit test.
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index fcc3bacfd8bc..d02c42d21f2f 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -87,6 +87,15 @@ its hardware characteristcs.
* port or ports: see "Graph bindings for Coresight" below.
+* Optional properties for all components:
+
+ * arm,coresight-loses-context-with-cpu : boolean. Indicates that the
+ hardware will lose register context on CPU power down (e.g. CPUIdle).
+ An example of where this may be needed are systems which contain a
+ coresight component and CPU in the same power domain. When the CPU
+ powers down the coresight component also powers down and loses its
+ context. This property is currently only used for the ETM 4.x driver.
+
* Optional properties for ETM/PTMs:
* arm,cp14: must be present if the system accesses ETM/PTM management
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index b301f753ed2c..e77635c5422c 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -43,7 +43,7 @@ SoC Families:
- OMAP2 generic - defaults to OMAP2420
compatible = "ti,omap2"
-- OMAP3 generic - defaults to OMAP3430
+- OMAP3 generic
compatible = "ti,omap3"
- OMAP4 generic - defaults to OMAP4430
compatible = "ti,omap4"
@@ -51,6 +51,8 @@ SoC Families:
compatible = "ti,omap5"
- DRA7 generic - defaults to DRA742
compatible = "ti,dra7"
+- AM33x generic
+ compatible = "ti,am33xx"
- AM43x generic - defaults to AM4372
compatible = "ti,am43"
@@ -63,12 +65,14 @@ SoCs:
- OMAP3430
compatible = "ti,omap3430", "ti,omap3"
+ legacy: "ti,omap34xx" - please do not use any more
- AM3517
compatible = "ti,am3517", "ti,omap3"
- OMAP3630
- compatible = "ti,omap36xx", "ti,omap3"
-- AM33xx
- compatible = "ti,am33xx", "ti,omap3"
+ compatible = "ti,omap3630", "ti,omap3"
+ legacy: "ti,omap36xx" - please do not use any more
+- AM335x
+ compatible = "ti,am33xx"
- OMAP4430
compatible = "ti,omap4430", "ti,omap4"
@@ -110,19 +114,19 @@ SoCs:
- AM4372
compatible = "ti,am4372", "ti,am43"
-Boards:
+Boards (incomplete list of examples):
- OMAP3 BeagleBoard : Low cost community board
- compatible = "ti,omap3-beagle", "ti,omap3"
+ compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
- compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
- OMAP4 SDP : Software Development Board
- compatible = "ti,omap4-sdp", "ti,omap4430"
+ compatible = "ti,omap4-sdp", "ti,omap4430", "ti,omap4"
- OMAP4 PandaBoard : Low cost community board
- compatible = "ti,omap4-panda", "ti,omap4430"
+ compatible = "ti,omap4-panda", "ti,omap4430", "ti,omap4"
- OMAP4 DuoVero with Parlor : Commercial expansion board with daughter board
compatible = "gumstix,omap4-duovero-parlor", "gumstix,omap4-duovero", "ti,omap4430", "ti,omap4";
@@ -134,16 +138,16 @@ Boards:
compatible = "variscite,var-dvk-om44", "variscite,var-som-om44", "ti,omap4460", "ti,omap4";
- OMAP3 EVM : Software Development Board for OMAP35x, AM/DM37x
- compatible = "ti,omap3-evm", "ti,omap3"
+ compatible = "ti,omap3-evm", "ti,omap3630", "ti,omap3"
- AM335X EVM : Software Development Board for AM335x
- compatible = "ti,am335x-evm", "ti,am33xx", "ti,omap3"
+ compatible = "ti,am335x-evm", "ti,am33xx"
- AM335X Bone : Low cost community board
- compatible = "ti,am335x-bone", "ti,am33xx", "ti,omap3"
+ compatible = "ti,am335x-bone", "ti,am33xx"
- AM3359 ICEv2 : Low cost Industrial Communication Engine EVM.
- compatible = "ti,am3359-icev2", "ti,am33xx", "ti,omap3"
+ compatible = "ti,am3359-icev2", "ti,am33xx"
- AM335X OrionLXm : Substation Automation Platform
compatible = "novatech,am335x-lxm", "ti,am33xx"
diff --git a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
index 0c38e4b8fc51..1758051798fe 100644
--- a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
@@ -15,12 +15,16 @@ In 'cpus' nodes:
In 'operating-points-v2' table:
- compatible: Should be
- - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx SoCs
+ - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx,
+ omap34xx, omap36xx and am3517 SoCs
- syscon: A phandle pointing to a syscon node representing the control module
register space of the SoC.
Optional properties:
--------------------
+- "vdd-supply", "vbb-supply": to define two regulators for dra7xx
+- "cpu0-supply", "vbb-supply": to define two regulators for omap36xx
+
For each opp entry in 'operating-points-v2' table:
- opp-supported-hw: Two bitfields indicating:
1. Which revision of the SoC the OPP is supported by
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ss.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ss.yaml
new file mode 100644
index 000000000000..8a29d36edf26
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ss.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/allwinner,sun8i-ss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner Security System v2 driver
+
+maintainers:
+ - Corentin Labbe <corentin.labbe@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-a83t-crypto
+ - allwinner,sun9i-a80-crypto
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus clock
+ - description: Module clock
+
+ clock-names:
+ items:
+ - const: bus
+ - const: mod
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sun8i-a83t-ccu.h>
+ #include <dt-bindings/reset/sun8i-a83t-ccu.h>
+
+ crypto: crypto@1c15000 {
+ compatible = "allwinner,sun8i-a83t-crypto";
+ reg = <0x01c15000 0x1000>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&ccu RST_BUS_SS>;
+ clocks = <&ccu CLK_BUS_SS>, <&ccu CLK_SS>;
+ clock-names = "bus", "mod";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml b/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml
new file mode 100644
index 000000000000..5becc60a0e28
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/amlogic,gxl-crypto.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic GXL Cryptographic Offloader
+
+maintainers:
+ - Corentin Labbe <clabbe@baylibre.com>
+
+properties:
+ compatible:
+ items:
+ - const: amlogic,gxl-crypto
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: "Interrupt for flow 0"
+ - description: "Interrupt for flow 1"
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: blkmv
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/gxbb-clkc.h>
+
+ crypto: crypto-engine@c883e000 {
+ compatible = "amlogic,gxl-crypto";
+ reg = <0x0 0xc883e000 0x0 0x36>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_EDGE_RISING>, <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc CLKID_BLKMV>;
+ clock-names = "blkmv";
+ };
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
index 3e36c1d11386..fb46b491791c 100644
--- a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -10,14 +10,23 @@ The Exynos PPMU driver uses the devfreq-event class to provide event data
to various devfreq devices. The devfreq devices would use the event data when
derterming the current state of each IP.
-Required properties:
+Required properties for PPMU device:
- compatible: Should be "samsung,exynos-ppmu" or "samsung,exynos-ppmu-v2.
- reg: physical base address of each PPMU and length of memory mapped region.
-Optional properties:
+Optional properties for PPMU device:
- clock-names : the name of clock used by the PPMU, "ppmu"
- clocks : phandles for clock specified in "clock-names" property
+Required properties for 'events' child node of PPMU device:
+- event-name : the unique event name among PPMU device
+Optional properties for 'events' child node of PPMU device:
+- event-data-type : Define the type of data which shell be counted
+by the counter. You can check include/dt-bindings/pmu/exynos_ppmu.h for
+all possible type, i.e. count read requests, count write data in bytes,
+etc. This field is optional and when it is missing, the driver code
+will use default data type.
+
Example1 : PPMUv1 nodes in exynos3250.dtsi are listed below.
ppmu_dmc0: ppmu_dmc0@106a0000 {
@@ -145,3 +154,16 @@ Example3 : PPMUv2 nodes in exynos5433.dtsi are listed below.
reg = <0x104d0000 0x2000>;
status = "disabled";
};
+
+Example4 : 'event-data-type' in exynos4412-ppmu-common.dtsi are listed below.
+
+ &ppmu_dmc0 {
+ status = "okay";
+ events {
+ ppmu_dmc0_3: ppmu-event3-dmc0 {
+ event-name = "ppmu-event3-dmc0";
+ event-data-type = <(PPMU_RO_DATA_CNT |
+ PPMU_WO_DATA_CNT)>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
index f8e946471a58..e71f752cc18f 100644
--- a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
+++ b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -50,8 +50,6 @@ Required properties only for passive bus device:
Optional properties only for parent bus device:
- exynos,saturation-ratio: the percentage value which is used to calibrate
the performance count against total cycle count.
-- exynos,voltage-tolerance: the percentage value for bus voltage tolerance
- which is used to calculate the max voltage.
Detailed correlation between sub-blocks and power line according to Exynos SoC:
- In case of Exynos3250, there are two power line as following:
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 372f0eeb5a2a..f1f95f678739 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -8,6 +8,7 @@ Required Properties:
- "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a77470-usb-dmac" (RZ/G1C)
- "renesas,r8a774a1-usb-dmac" (RZ/G2M)
+ - "renesas,r8a774b1-usb-dmac" (RZ/G2N)
- "renesas,r8a774c0-usb-dmac" (RZ/G2E)
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt b/Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt
new file mode 100644
index 000000000000..b758f91914f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt
@@ -0,0 +1,24 @@
+Device-tree bindings for AST2600 FSI master
+-------------------------------------------
+
+The AST2600 contains two identical FSI masters. They share a clock and have a
+separate interrupt line and output pins.
+
+Required properties:
+ - compatible: "aspeed,ast2600-fsi-master"
+ - reg: base address and length
+ - clocks: phandle and clock number
+ - interrupts: platform dependent interrupt description
+ - pinctrl-0: phandle to pinctrl node
+ - pinctrl-names: pinctrl state
+
+Examples:
+
+ fsi-master {
+ compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+ reg = <0x1e79b000 0x94>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fsi1_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
new file mode 100644
index 000000000000..ae04903f34bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/hwmon/adi,ltc2947.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2947 high precision power and energy monitor
+
+maintainers:
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ Analog Devices LTC2947 high precision power and energy monitor over SPI or I2C.
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/LTC2947.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2947
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description:
+ The LTC2947 uses either a trimmed internal oscillator or an external clock
+ as the time base for determining the integration period to represent time,
+ charge and energy. When an external clock is used, this property must be
+ set accordingly.
+ maxItems: 1
+
+ adi,accumulator-ctl-pol:
+ description:
+ This property controls the polarity of current that is accumulated to
+ calculate charge and energy so that, they can be only accumulated for
+ positive current for example. Since there are two sets of registers for
+ the accumulated values, this entry can also have two items which sets
+ energy1/charge1 and energy2/charger2 respectively. Check table 12 of the
+ datasheet for more information on the supported options.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 2
+ maxItems: 2
+ items:
+ enum: [0, 1, 2, 3]
+ default: 0
+
+ adi,accumulation-deadband-microamp:
+ description:
+ This property controls the Accumulation Dead band which allows to set the
+ level of current below which no accumulation takes place.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 255
+ default: 0
+
+ adi,gpio-out-pol:
+ description:
+ This property controls the GPIO polarity. Setting it to one makes the GPIO
+ active high, setting it to zero makets it active low. When this property
+ is present, the GPIO is automatically configured as output and set to
+ control a fan as a function of measured temperature.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ default: 0
+
+ adi,gpio-in-accum:
+ description:
+ When set, this property sets the GPIO as input. It is then used to control
+ the accumulation of charge, energy and time. This function can be
+ enabled/configured separately for each of the two sets of accumulation
+ registers. Check table 13 of the datasheet for more information on the
+ supported options. This property cannot be used together with
+ adi,gpio-out-pol.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 2
+ maxItems: 2
+ items:
+ enum: [0, 1, 2]
+ default: 0
+
+required:
+ - compatible
+ - reg
+
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ltc2947_spi: ltc2947@0 {
+ compatible = "adi,ltc2947";
+ reg = <0>;
+ /* accumulation takes place always for energ1/charge1. */
+ /* accumulation only on positive current for energy2/charge2. */
+ adi,accumulator-ctl-pol = <0 1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt b/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
index 1036f65fb778..d9a2719f9243 100644
--- a/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
+++ b/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
@@ -5,6 +5,9 @@ Required properties:
- compatible : Must be one of the following:
"ibm,cffps1"
"ibm,cffps2"
+ or "ibm,cffps" if the system
+ must support any version of the
+ power supply
- reg = < I2C bus address >; : Address of the power supply on the
I2C bus.
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
new file mode 100644
index 000000000000..168235ad5d81
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/ti,tmp513.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TMP513/512 system monitor sensor
+
+maintainers:
+ - Eric Tremblay <etremblay@distech-controls.com>
+
+description: |
+ The TMP512 (dual-channel) and TMP513 (triple-channel) are system monitors
+ that include remote sensors, a local temperature sensor, and a high-side
+ current shunt monitor. These system monitors have the capability of measuring
+ remote temperatures, on-chip temperatures, and system voltage/power/current
+ consumption.
+
+ Datasheets:
+ http://www.ti.com/lit/gpn/tmp513
+ http://www.ti.com/lit/gpn/tmp512
+
+
+properties:
+ compatible:
+ enum:
+ - ti,tmp512
+ - ti,tmp513
+
+ reg:
+ maxItems: 1
+
+ shunt-resistor-micro-ohms:
+ description: |
+ If 0, the calibration process will be skiped and the current and power
+ measurement engine will not work. Temperature and voltage measurement
+ will continue to work. The shunt value also need to respect:
+ rshunt <= pga-gain * 40 * 1000 * 1000.
+ If not, it's not possible to compute a valid calibration value.
+ default: 1000
+
+ ti,pga-gain:
+ description: |
+ The gain value for the PGA function. This is 8, 4, 2 or 1.
+ The PGA gain affect the shunt voltage range.
+ The range will be equal to: pga-gain * 40mV
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8]
+ default: 8
+
+ ti,bus-range-microvolt:
+ description: |
+ This is the operating range of the bus voltage in microvolt
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [16000000, 32000000]
+ default: 32000000
+
+ ti,nfactor:
+ description: |
+ Array of three(TMP513) or two(TMP512) n-Factor value for each remote
+ temperature channel.
+ See datasheet Table 11 for n-Factor range list and value interpretation.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/uint32-array
+ - minItems: 2
+ maxItems: 3
+ items:
+ default: 0x00
+ minimum: 0x00
+ maximum: 0xFF
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tmp513@5c {
+ compatible = "ti,tmp513";
+ reg = <0x5C>;
+ shunt-resistor-micro-ohms = <330000>;
+ ti,bus-range-microvolt = <32000000>;
+ ti,pga-gain = <8>;
+ ti,nfactor = <0x1 0xF3 0x00>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
new file mode 100644
index 000000000000..9af3c6e59cff
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,msm8974.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm MSM8974 Network-On-Chip Interconnect
+
+maintainers:
+ - Brian Masney <masneyb@onstation.org>
+
+description: |
+ The Qualcomm MSM8974 interconnect providers support setting system
+ bandwidth requirements between various network-on-chip fabrics.
+
+properties:
+ reg:
+ maxItems: 1
+
+ compatible:
+ enum:
+ - qcom,msm8974-bimc
+ - qcom,msm8974-cnoc
+ - qcom,msm8974-mmssnoc
+ - qcom,msm8974-ocmemnoc
+ - qcom,msm8974-pnoc
+ - qcom,msm8974-snoc
+
+ '#interconnect-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+required:
+ - compatible
+ - reg
+ - '#interconnect-cells'
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ bimc: interconnect@fc380000 {
+ reg = <0xfc380000 0x6a000>;
+ compatible = "qcom,msm8974-bimc";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+ <&rpmcc RPM_SMD_BIMC_A_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml b/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
new file mode 100644
index 000000000000..2e40f700e84f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/allwinner,sun8i-h3-deinterlace.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H3 Deinterlace Device Tree Bindings
+
+maintainers:
+ - Jernej Skrabec <jernej.skrabec@siol.net>
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <mripard@kernel.org>
+
+description: |-
+ The Allwinner H3 and later has a deinterlace core used for
+ deinterlacing interlaced video content.
+
+properties:
+ compatible:
+ const: allwinner,sun8i-h3-deinterlace
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Deinterlace interface clock
+ - description: Deinterlace module clock
+ - description: Deinterlace DRAM clock
+
+ clock-names:
+ items:
+ - const: bus
+ - const: mod
+ - const: ram
+
+ resets:
+ maxItems: 1
+
+ interconnects:
+ maxItems: 1
+
+ interconnect-names:
+ const: dma-mem
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sun8i-h3-ccu.h>
+ #include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+ deinterlace: deinterlace@1400000 {
+ compatible = "allwinner,sun8i-h3-deinterlace";
+ reg = <0x01400000 0x20000>;
+ clocks = <&ccu CLK_BUS_DEINTERLACE>,
+ <&ccu CLK_DEINTERLACE>,
+ <&ccu CLK_DRAM_DEINTERLACE>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_DEINTERLACE>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mbus 9>;
+ interconnect-names = "dma-mem";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/ad5820.txt b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
index 5940ca11c021..5764cbedf9b7 100644
--- a/Documentation/devicetree/bindings/media/i2c/ad5820.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
@@ -2,12 +2,20 @@
Required Properties:
- - compatible: Must contain "adi,ad5820"
+ - compatible: Must contain one of:
+ - "adi,ad5820"
+ - "adi,ad5821"
+ - "adi,ad5823"
- reg: I2C slave address
- VANA-supply: supply of voltage for VANA pin
+Optional properties:
+
+ - enable-gpios : GPIO spec for the XSHUTDOWN pin. The XSHUTDOWN signal is
+active low, a high level on the pin enables the device.
+
Example:
ad5820: coil@c {
@@ -15,5 +23,6 @@ Example:
reg = <0x0c>;
VANA-supply = <&vaux4>;
+ enable-gpios = <&msmgpio 26 GPIO_ACTIVE_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/media/i2c/imx290.txt b/Documentation/devicetree/bindings/media/i2c/imx290.txt
new file mode 100644
index 000000000000..a3cc21410f7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/imx290.txt
@@ -0,0 +1,57 @@
+* Sony IMX290 1/2.8-Inch CMOS Image Sensor
+
+The Sony IMX290 is a 1/2.8-Inch CMOS Solid-state image sensor with
+Square Pixel for Color Cameras. It is programmable through I2C and 4-wire
+interfaces. The sensor output is available via CMOS logic parallel SDR output,
+Low voltage LVDS DDR output and CSI-2 serial data output. The CSI-2 bus is the
+default. No bindings have been defined for the other busses.
+
+Required Properties:
+- compatible: Should be "sony,imx290"
+- reg: I2C bus address of the device
+- clocks: Reference to the xclk clock.
+- clock-names: Should be "xclk".
+- clock-frequency: Frequency of the xclk clock in Hz.
+- vdddo-supply: Sensor digital IO regulator.
+- vdda-supply: Sensor analog regulator.
+- vddd-supply: Sensor digital core regulator.
+
+Optional Properties:
+- reset-gpios: Sensor reset GPIO
+
+The imx290 device node should contain one 'port' child node with
+an 'endpoint' subnode. For further reading on port node refer to
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Required Properties on endpoint:
+- data-lanes: check ../video-interfaces.txt
+- link-frequencies: check ../video-interfaces.txt
+- remote-endpoint: check ../video-interfaces.txt
+
+Example:
+ &i2c1 {
+ ...
+ imx290: camera-sensor@1a {
+ compatible = "sony,imx290";
+ reg = <0x1a>;
+
+ reset-gpios = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_default>;
+
+ clocks = <&gcc GCC_CAMSS_MCLK0_CLK>;
+ clock-names = "xclk";
+ clock-frequency = <37125000>;
+
+ vdddo-supply = <&camera_vdddo_1v8>;
+ vdda-supply = <&camera_vdda_2v8>;
+ vddd-supply = <&camera_vddd_1v5>;
+
+ port {
+ imx290_ep: endpoint {
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <445500000>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
index c3c3479233c4..10ece8108081 100644
--- a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+++ b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
@@ -27,8 +27,6 @@ Mandatory properties
Optional properties
-------------------
-- nokia,nvm-size: The size of the NVM, in bytes. If the size is not given,
- the NVM contents will not be read.
- reset-gpios: XSHUTDOWN GPIO
- flash-leds: See ../video-interfaces.txt
- lens-focus: See ../video-interfaces.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2659.txt b/Documentation/devicetree/bindings/media/i2c/ov2659.txt
index cabc7d827dfb..92989a619f29 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov2659.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ov2659.txt
@@ -12,6 +12,12 @@ Required Properties:
- clock-names: should be "xvclk".
- link-frequencies: target pixel clock frequency.
+Optional Properties:
+- powerdown-gpios: reference to the GPIO connected to the pwdn pin, if any.
+ Active high with internal pull down resistor.
+- reset-gpios: reference to the GPIO connected to the resetb pin, if any.
+ Active low with internal pull up resistor.
+
For further reading on port node refer to
Documentation/devicetree/bindings/media/video-interfaces.txt.
@@ -27,6 +33,9 @@ Example:
clocks = <&clk_ov2659 0>;
clock-names = "xvclk";
+ powerdown-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+
port {
ov2659_0: endpoint {
remote-endpoint = <&vpfe_ep>;
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index 9054555e6608..cdfc8ee023ac 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -39,6 +39,7 @@ properties:
- rc-avermedia-rm-ks
- rc-avertv-303
- rc-azurewave-ad-tu700
+ - rc-beelink-gs1
- rc-behold
- rc-behold-columbus
- rc-budget-ci-old
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.txt b/Documentation/devicetree/bindings/media/renesas,csi2.txt
index 331409259752..2da6f60b2b56 100644
--- a/Documentation/devicetree/bindings/media/renesas,csi2.txt
+++ b/Documentation/devicetree/bindings/media/renesas,csi2.txt
@@ -9,6 +9,7 @@ Mandatory properties
--------------------
- compatible: Must be one or more of the following
- "renesas,r8a774a1-csi2" for the R8A774A1 device.
+ - "renesas,r8a774b1-csi2" for the R8A774B1 device.
- "renesas,r8a774c0-csi2" for the R8A774C0 device.
- "renesas,r8a7795-csi2" for the R8A7795 device.
- "renesas,r8a7796-csi2" for the R8A7796 device.
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.txt b/Documentation/devicetree/bindings/media/renesas,vin.txt
index aa217b096279..e30b0d4eefdd 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.txt
+++ b/Documentation/devicetree/bindings/media/renesas,vin.txt
@@ -14,6 +14,7 @@ on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
- "renesas,vin-r8a7744" for the R8A7744 device
- "renesas,vin-r8a7745" for the R8A7745 device
- "renesas,vin-r8a774a1" for the R8A774A1 device
+ - "renesas,vin-r8a774b1" for the R8A774B1 device
- "renesas,vin-r8a774c0" for the R8A774C0 device
- "renesas,vin-r8a7778" for the R8A7778 device
- "renesas,vin-r8a7779" for the R8A7779 device
@@ -43,7 +44,7 @@ on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
Additionally, an alias named vinX will need to be created to specify
which video input device this is.
-The per-board settings Gen2 platforms:
+The per-board settings for Gen2 and RZ/G1 platforms:
- port - sub-node describing a single endpoint connected to the VIN
from external SoC pins as described in video-interfaces.txt[1].
@@ -63,7 +64,7 @@ The per-board settings Gen2 platforms:
- data-enable-active: polarity of CLKENB signal, see [1] for
description. Default is active high.
-The per-board settings Gen3 and RZ/G2 platforms:
+The per-board settings for Gen3 and RZ/G2 platforms:
Gen3 and RZ/G2 platforms can support both a single connected parallel input
source from external SoC pins (port@0) and/or multiple parallel input sources
diff --git a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
deleted file mode 100644
index cfa4ffada8ae..000000000000
--- a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Bindings, specific for the sh_mobile_ceu_camera.c driver:
- - compatible: Should be "renesas,sh-mobile-ceu"
- - reg: register base and size
- - interrupts: the interrupt number
- - renesas,max-width: maximum image width, supported on this SoC
- - renesas,max-height: maximum image height, supported on this SoC
-
-Example:
-
-ceu0: ceu@fe910000 {
- compatible = "renesas,sh-mobile-ceu";
- reg = <0xfe910000 0xa0>;
- interrupt-parent = <&intcs>;
- interrupts = <0x880>;
- renesas,max-width = <8188>;
- renesas,max-height = <8188>;
-};
diff --git a/Documentation/devicetree/bindings/media/ti,vpe.yaml b/Documentation/devicetree/bindings/media/ti,vpe.yaml
new file mode 100644
index 000000000000..f3a8a350e85f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/ti,vpe.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/ti,vpe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DRA7x Video Processing Engine (VPE) Device Tree Bindings
+
+maintainers:
+ - Benoit Parrot <bparrot@ti.com>
+
+description: |-
+ The Video Processing Engine (VPE) is a key component for image post
+ processing applications. VPE consist of a single memory to memory
+ path which can perform chroma up/down sampling, deinterlacing,
+ scaling and color space conversion.
+
+properties:
+ compatible:
+ const: ti,dra7-vpe
+
+ reg:
+ items:
+ - description: The VPE main register region
+ - description: Scaler (SC) register region
+ - description: Color Space Conversion (CSC) register region
+ - description: Video Port Direct Memory Access (VPDMA) register region
+
+ reg-names:
+ items:
+ - const: vpe_top
+ - const: sc
+ - const: csc
+ - const: vpdma
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ vpe: vpe@489d0000 {
+ compatible = "ti,dra7-vpe";
+ reg = <0x489d0000 0x120>,
+ <0x489d0700 0x80>,
+ <0x489d5700 0x18>,
+ <0x489dd000 0x400>;
+ reg-names = "vpe_top",
+ "sc",
+ "csc",
+ "vpdma";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
index edca653a5777..bc4b59de6a55 100644
--- a/Documentation/devicetree/bindings/mfd/da9062.txt
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -66,6 +66,9 @@ Sub-nodes:
details of individual regulator device can be found in:
Documentation/devicetree/bindings/regulator/regulator.txt
+ regulator-initial-mode may be specified for buck regulators using mode values
+ from include/dt-bindings/regulator/dlg,da9063-regulator.h.
+
- rtc : This node defines settings required for the Real-Time Clock associated
with the DA9062. There are currently no entries in this binding, however
compatible = "dlg,da9062-rtc" should be added if a node is created.
@@ -96,6 +99,7 @@ Example:
regulator-max-microvolt = <1570000>;
regulator-min-microamp = <500000>;
regulator-max-microamp = <2000000>;
+ regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
regulator-boot-on;
};
DA9062_LDO1: ldo1 {
diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
index a16e8d7fe56c..8cc0ab41578c 100644
--- a/Documentation/devicetree/bindings/mips/ralink.txt
+++ b/Documentation/devicetree/bindings/mips/ralink.txt
@@ -16,3 +16,17 @@ value must be one of the following values:
ralink,mt7620a-soc
ralink,mt7620n-soc
ralink,mt7628a-soc
+ ralink,mt7688a-soc
+
+2. Boards
+
+GARDENA smart Gateway (MT7688)
+
+This board is based on the MediaTek MT7688 and equipped with 128 MiB
+of DDR and 8 MiB of flash (SPI NOR) and additional 128MiB SPI NAND
+storage.
+
+------------------------------
+Required root node properties:
+- compatible = "gardena,smart-gateway-mt7688", "ralink,mt7688a-soc",
+ "ralink,mt7628a-soc";
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 7ca0aa7ccc0b..428685eb2ded 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -15,10 +15,15 @@ Required Properties:
- "arasan,sdhci-5.1": generic Arasan SDHCI 5.1 PHY
- "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1": rk3399 eMMC PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
+ - "xlnx,zynqmp-8.9a": ZynqMP SDHCI 8.9a PHY
+ For this device it is strongly suggested to include clock-output-names and
+ #clock-cells.
- "ti,am654-sdhci-5.1", "arasan,sdhci-5.1": TI AM654 MMC PHY
Note: This binding has been deprecated and moved to [5].
- "intel,lgm-sdhci-5.1-emmc", "arasan,sdhci-5.1": Intel LGM eMMC PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
+ - "intel,lgm-sdhci-5.1-sdxc", "arasan,sdhci-5.1": Intel LGM SDXC PHY
+ For this device it is strongly suggested to include arasan,soc-ctl-syscon.
[5] Documentation/devicetree/bindings/mmc/sdhci-am654.txt
@@ -38,15 +43,19 @@ Optional Properties:
- clock-output-names: If specified, this will be the name of the card clock
which will be exposed by this device. Required if #clock-cells is
specified.
- - #clock-cells: If specified this should be the value <0>. With this property
- in place we will export a clock representing the Card Clock. This clock
- is expected to be consumed by our PHY. You must also specify
+ - #clock-cells: If specified this should be the value <0> or <1>. With this
+ property in place we will export one or two clocks representing the Card
+ Clock. These clocks are expected to be consumed by our PHY.
- xlnx,fails-without-test-cd: when present, the controller doesn't work when
the CD line is not connected properly, and the line is not connected
properly. Test mode can be used to force the controller to function.
- xlnx,int-clock-stable-broken: when present, the controller always reports
that the internal clock is stable even when it is not.
+ - xlnx,mio-bank: When specified, this will indicate the MIO bank number in
+ which the command and data lines are configured. If not specified, driver
+ will assume this as 0.
+
Example:
sdhci@e0100000 {
compatible = "arasan,sdhci-8.9a";
@@ -83,6 +92,18 @@ Example:
#clock-cells = <0>;
};
+ sdhci: mmc@ff160000 {
+ compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
+ interrupt-parent = <&gic>;
+ interrupts = <0 48 4>;
+ reg = <0x0 0xff160000 0x0 0x1000>;
+ clocks = <&clk200>, <&clk200>;
+ clock-names = "clk_xin", "clk_ahb";
+ clock-output-names = "clk_out_sd0", "clk_in_sd0";
+ #clock-cells = <1>;
+ clk-phase-sd-hs = <63>, <72>;
+ };
+
emmc: sdhci@ec700000 {
compatible = "intel,lgm-sdhci-5.1-emmc", "arasan,sdhci-5.1";
reg = <0xec700000 0x300>;
@@ -97,3 +118,18 @@ Example:
phy-names = "phy_arasan";
arasan,soc-ctl-syscon = <&sysconf>;
};
+
+ sdxc: sdhci@ec600000 {
+ compatible = "arasan,sdhci-5.1", "intel,lgm-sdhci-5.1-sdxc";
+ reg = <0xec600000 0x300>;
+ interrupt-parent = <&ioapic1>;
+ interrupts = <43 1>;
+ clocks = <&cgu0 LGM_CLK_SDIO>, <&cgu0 LGM_CLK_NGI>,
+ <&cgu0 LGM_GCLK_SDXC>;
+ clock-names = "clk_xin", "clk_ahb", "gate";
+ clock-output-names = "sdxc_cardclock";
+ #clock-cells = <0>;
+ phys = <&sdxc_phy>;
+ phy-names = "phy_arasan";
+ arasan,soc-ctl-syscon = <&sysconf>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index f707b8bee304..2fb466ca2a9d 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -18,6 +18,9 @@ Required properties:
"fsl,imx6ull-usdhc"
"fsl,imx7d-usdhc"
"fsl,imx7ulp-usdhc"
+ "fsl,imx8mq-usdhc"
+ "fsl,imx8mm-usdhc"
+ "fsl,imx8mn-usdhc"
"fsl,imx8qxp-usdhc"
Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/jz4740.txt b/Documentation/devicetree/bindings/mmc/jz4740.txt
index 8a6f87f13114..453d3b9d145d 100644
--- a/Documentation/devicetree/bindings/mmc/jz4740.txt
+++ b/Documentation/devicetree/bindings/mmc/jz4740.txt
@@ -1,14 +1,16 @@
-* Ingenic JZ47xx MMC controllers
+* Ingenic XBurst MMC controllers
This file documents the device tree properties used for the MMC controller in
-Ingenic JZ4740/JZ4780 SoCs. These are in addition to the core MMC properties
-described in mmc.txt.
+Ingenic JZ4740/JZ4760/JZ4780/X1000 SoCs. These are in addition to the core MMC
+properties described in mmc.txt.
Required properties:
- compatible: Should be one of the following:
- "ingenic,jz4740-mmc" for the JZ4740
- "ingenic,jz4725b-mmc" for the JZ4725B
+ - "ingenic,jz4760-mmc" for the JZ4760
- "ingenic,jz4780-mmc" for the JZ4780
+ - "ingenic,x1000-mmc" for the X1000
- reg: Should contain the MMC controller registers location and length.
- interrupts: Should contain the interrupt specifier of the MMC controller.
- clocks: Clock for the MMC controller.
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 080754e0ef35..b130450c3b34 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -333,6 +333,19 @@ patternProperties:
required:
- reg
+ "^clk-phase-(legacy|sd-hs|mmc-(hs|hs[24]00|ddr52)|uhs-(sdr(12|25|50|104)|ddr50))$":
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 359
+ description:
+ Set the clock (phase) delays which are to be configured in the
+ controller while switching to particular speed mode. These values
+ are in pair of degrees.
+
dependencies:
cd-debounce-delay-ms: [ cd-gpios ]
fixed-emmc-driver-type: [ non-removable ]
@@ -351,6 +364,7 @@ examples:
keep-power-in-suspend;
wakeup-source;
mmc-pwrseq = <&sdhci0_pwrseq>;
+ clk-phase-sd-hs = <63>, <72>;
};
- |
diff --git a/Documentation/devicetree/bindings/mmc/owl-mmc.yaml b/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
new file mode 100644
index 000000000000..12b40213426d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/owl-mmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Actions Semi Owl SoCs SD/MMC/SDIO controller
+
+allOf:
+ - $ref: "mmc-controller.yaml"
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+properties:
+ compatible:
+ const: actions,owl-mmc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: mmc
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ mmc0: mmc@e0330000 {
+ compatible = "actions,owl-mmc";
+ reg = <0x0 0xe0330000 0x0 0x4000>;
+ interrupts = <0 42 4>;
+ clocks = <&cmu 56>;
+ resets = <&cmu 23>;
+ dmas = <&dma 2>;
+ dma-names = "mmc";
+ bus-width = <4>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt b/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
index dd08d038a65c..bc08fc43a9be 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
@@ -11,6 +11,7 @@ Required properties:
"renesas,sdhi-r8a7744" - SDHI IP on R8A7744 SoC
"renesas,sdhi-r8a7745" - SDHI IP on R8A7745 SoC
"renesas,sdhi-r8a774a1" - SDHI IP on R8A774A1 SoC
+ "renesas,sdhi-r8a774b1" - SDHI IP on R8A774B1 SoC
"renesas,sdhi-r8a774c0" - SDHI IP on R8A774C0 SoC
"renesas,sdhi-r8a77470" - SDHI IP on R8A77470 SoC
"renesas,sdhi-mmc-r8a77470" - SDHI/MMC IP on R8A77470 SoC
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
index 1b662d7171a0..503c6dbac1b2 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
@@ -9,6 +9,11 @@ Required properties:
- clocks: Phandlers to the clocks.
- clock-names: Must be "hclock", "multclk", "baseclk";
+Optional properties:
+- microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is
+ inverted. The default polarity for this signal is described in the datasheet.
+ For instance on SAMA5D2, the pin is usually tied to the GND with a resistor
+ and a capacitor (see "SDMMC I/O Calibration" chapter).
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt b/Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt
new file mode 100644
index 000000000000..627ee89c125b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt
@@ -0,0 +1,30 @@
+* SOCIONEXT Milbeaut SDHCI controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci_milbeaut driver.
+
+Required properties:
+- compatible: "socionext,milbeaut-m10v-sdhci-3.0"
+- clocks: Must contain an entry for each entry in clock-names. It is a
+ list of phandles and clock-specifier pairs.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Should contain the following two entries:
+ "iface" - clock used for sdhci interface
+ "core" - core clock for sdhci controller
+
+Optional properties:
+- fujitsu,cmd-dat-delay-select: boolean property indicating that this host
+ requires the CMD_DAT_DELAY control to be enabled.
+
+Example:
+ sdhci3: mmc@1b010000 {
+ compatible = "socionext,milbeaut-m10v-sdhci-3.0";
+ reg = <0x1b010000 0x10000>;
+ interrupts = <0 265 0x4>;
+ voltage-ranges = <3300 3300>;
+ bus-width = <4>;
+ clocks = <&clk 7>, <&ahb_clk>;
+ clock-names = "core", "iface";
+ cap-sdio-irq;
+ fujitsu,cmd-dat-delay-select;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
new file mode 100644
index 000000000000..f3893c4d3c6a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
@@ -0,0 +1,53 @@
+* Cadence NAND controller
+
+Required properties:
+ - compatible : "cdns,hp-nfc"
+ - reg : Contains two entries, each of which is a tuple consisting of a
+ physical address and length. The first entry is the address and
+ length of the controller register set. The second entry is the
+ address and length of the Slave DMA data port.
+ - reg-names: should contain "reg" and "sdma"
+ - #address-cells: should be 1. The cell encodes the chip select connection.
+ - #size-cells : should be 0.
+ - interrupts : The interrupt number.
+ - clocks: phandle of the controller core clock (nf_clk).
+
+Optional properties:
+ - dmas: shall reference DMA channel associated to the NAND controller
+ - cdns,board-delay-ps : Estimated Board delay. The value includes the total
+ round trip delay for the signals and is used for deciding on values
+ associated with data read capture. The example formula for SDR mode is
+ the following:
+ board delay = RE#PAD delay + PCB trace to device + PCB trace from device
+ + DQ PAD delay
+
+Child nodes represent the available NAND chips.
+
+Required properties of NAND chips:
+ - reg: shall contain the native Chip Select ids from 0 to max supported by
+ the cadence nand flash controller
+
+See Documentation/devicetree/bindings/mtd/nand.txt for more details on
+generic bindings.
+
+Example:
+
+nand_controller: nand-controller@60000000 {
+ compatible = "cdns,hp-nfc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x60000000 0x10000>, <0x80000000 0x10000>;
+ reg-names = "reg", "sdma";
+ clocks = <&nf_clk>;
+ cdns,board-delay-ps = <4830>;
+ interrupts = <2 0>;
+ nand@0 {
+ reg = <0>;
+ label = "nand-1";
+ };
+ nand@1 {
+ reg = <1>;
+ label = "nand-2";
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt b/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt
new file mode 100644
index 000000000000..4bdcb92ae381
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt
@@ -0,0 +1,22 @@
+Flash device on Intel IXP4xx SoC
+
+This flash is regular CFI compatible (Intel or AMD extended) flash chips with
+specific big-endian or mixed-endian memory access pattern.
+
+Required properties:
+- compatible : must be "intel,ixp4xx-flash", "cfi-flash";
+- reg : memory address for the flash chip
+- bank-width : width in bytes of flash interface, should be <2>
+
+For the rest of the properties, see mtd-physmap.txt.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Example:
+
+flash@50000000 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ reg = <0x50000000 0x01000000>;
+ bank-width = <2>;
+};
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index b7336b9d6a3c..48a7f916c5e4 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -44,6 +44,12 @@ Optional properties:
Admission Control Block supports reporting the number of packets in-flight in a
switch queue
+- resets: a single phandle and reset identifier pair. See
+ Documentation/devicetree/binding/reset/reset.txt for details.
+
+- reset-names: If the "reset" property is specified, this property should have
+ the value "switch" to denote the switch reset line.
+
Port subnodes:
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
index 3956af1d30f3..33a0d67e4ce5 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
@@ -2,7 +2,7 @@
Required properties:
- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
- "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5".
+ "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5", "brcm,bcm2711-genet-v5".
- reg: address and length of the register set for the device
- interrupts and/or interrupts-extended: must be two cells, the first cell
is the general purpose interrupt line, while the second cell is the
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
index 4fa00e2eafcf..f16b99571af1 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
@@ -14,6 +14,8 @@ Required properties:
* "brcm,bcm4330-bt"
* "brcm,bcm43438-bt"
* "brcm,bcm4345c5"
+ * "brcm,bcm43540-bt"
+ * "brcm,bcm4335a0"
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 0e7c31794ae6..ac471b60ed6a 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -121,6 +121,11 @@ properties:
and is useful for determining certain configuration settings
such as flow control thresholds.
+ sfp:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ Specifies a reference to a node representing a SFP cage.
+
tx-fifo-depth:
$ref: /schemas/types.yaml#definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index f70f18ff821f..8927941c74bb 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -153,6 +153,11 @@ properties:
Delay after the reset was deasserted in microseconds. If
this property is missing the delay will be skipped.
+ sfp:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ Specifies a reference to a node representing a SFP cage.
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/ftgmac100.txt b/Documentation/devicetree/bindings/net/ftgmac100.txt
index 72e7aaf7242e..f878c1103463 100644
--- a/Documentation/devicetree/bindings/net/ftgmac100.txt
+++ b/Documentation/devicetree/bindings/net/ftgmac100.txt
@@ -9,6 +9,7 @@ Required properties:
- "aspeed,ast2400-mac"
- "aspeed,ast2500-mac"
+ - "aspeed,ast2600-mac"
- reg: Address and length of the register set for the device
- interrupts: Should contain ethernet controller interrupt
@@ -23,6 +24,13 @@ Optional properties:
- no-hw-checksum: Used to disable HW checksum support. Here for backward
compatibility as the driver now should have correct defaults based on
the SoC.
+- clocks: In accordance with the generic clock bindings. Must describe the MAC
+ IP clock, and optionally an RMII RCLK gate for the AST2500/AST2600. The
+ required MAC clock must be the first cell.
+- clock-names:
+
+ - "MACCLK": The MAC IP clock
+ - "RCLK": Clock gate for the RMII RCLK
Example:
diff --git a/Documentation/devicetree/bindings/net/lpc-eth.txt b/Documentation/devicetree/bindings/net/lpc-eth.txt
index b92e927808b6..cfe0e5991d46 100644
--- a/Documentation/devicetree/bindings/net/lpc-eth.txt
+++ b/Documentation/devicetree/bindings/net/lpc-eth.txt
@@ -10,6 +10,11 @@ Optional properties:
absent, "rmii" is assumed.
- use-iram: Use LPC32xx internal SRAM (IRAM) for DMA buffering
+Optional subnodes:
+- mdio : specifies the mdio bus, used as a container for phy nodes according to
+ phy.txt in the same directory
+
+
Example:
mac: ethernet@31060000 {
diff --git a/Documentation/devicetree/bindings/net/nfc/pn532.txt b/Documentation/devicetree/bindings/net/nfc/pn532.txt
new file mode 100644
index 000000000000..a5507dc499bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/pn532.txt
@@ -0,0 +1,46 @@
+* NXP Semiconductors PN532 NFC Controller
+
+Required properties:
+- compatible: Should be
+ - "nxp,pn532" Place a node with this inside the devicetree node of the bus
+ where the NFC chip is connected to.
+ Currently the kernel has phy bindings for uart and i2c.
+ - "nxp,pn532-i2c" (DEPRECATED) only works for the i2c binding.
+ - "nxp,pn533-i2c" (DEPRECATED) only works for the i2c binding.
+
+Required properties if connected on i2c:
+- clock-frequency: I²C work frequency.
+- reg: for the I²C bus address. This is fixed at 0x24 for the PN532.
+- interrupts: GPIO interrupt to which the chip is connected
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with PN532 on I2C2):
+
+&i2c2 {
+
+
+ pn532: nfc@24 {
+
+ compatible = "nxp,pn532";
+
+ reg = <0x24>;
+ clock-frequency = <400000>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
+
+ };
+};
+
+Example (for PN532 connected via uart):
+
+uart4: serial@49042000 {
+ compatible = "ti,omap3-uart";
+
+ pn532: nfc {
+ compatible = "nxp,pn532";
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
deleted file mode 100644
index 2efe3886b95b..000000000000
--- a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* NXP Semiconductors PN532 NFC Controller
-
-Required properties:
-- compatible: Should be "nxp,pn532-i2c" or "nxp,pn533-i2c".
-- clock-frequency: I²C work frequency.
-- reg: address on the bus
-- interrupts: GPIO interrupt to which the chip is connected
-
-Optional SoC Specific Properties:
-- pinctrl-names: Contains only one value - "default".
-- pintctrl-0: Specifies the pin control groups used for this controller.
-
-Example (for ARM-based BeagleBone with PN532 on I2C2):
-
-&i2c2 {
-
-
- pn532: pn532@24 {
-
- compatible = "nxp,pn532-i2c";
-
- reg = <0x24>;
- clock-frequency = <400000>;
-
- interrupt-parent = <&gpio1>;
- interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
-
- };
-};
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
new file mode 100644
index 000000000000..5a6c9d20c0ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qca,ar803x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros AR803x PHY
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Heiner Kallweit <hkallweit1@gmail.com>
+
+description: |
+ Bindings for Qualcomm Atheros AR803x PHYs
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ qca,clk-out-frequency:
+ description: Clock output frequency in Hertz.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 25000000, 50000000, 62500000, 125000000 ]
+
+ qca,clk-out-strength:
+ description: Clock output driver strength.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1, 2 ]
+
+ qca,keep-pll-enabled:
+ description: |
+ If set, keep the PLL enabled even if there is no link. Useful if you
+ want to use the clock output without an ethernet link.
+
+ Only supported on the AR8031.
+ type: boolean
+
+ vddio-supply:
+ description: |
+ RGMII I/O voltage regulator (see regulator/regulator.yaml).
+
+ The PHY supports RGMII I/O voltages of 1.5V, 1.8V and 2.5V. You can
+ either connect this to the vddio-regulator (1.5V / 1.8V) or the
+ vddh-regulator (2.5V).
+
+ Only supported on the AR8031.
+
+ vddio-regulator:
+ type: object
+ description:
+ Initial data for the VDDIO regulator. Set this to 1.5V or 1.8V.
+ allOf:
+ - $ref: /schemas/regulator/regulator.yaml
+
+ vddh-regulator:
+ type: object
+ description:
+ Dummy subnode to model the external connection of the PHY VDDH
+ regulator to VDDIO.
+ allOf:
+ - $ref: /schemas/regulator/regulator.yaml
+
+
+examples:
+ - |
+ #include <dt-bindings/net/qca-ar803x.h>
+
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy-mode = "rgmii-id";
+
+ ethernet-phy@0 {
+ reg = <0>;
+
+ qca,clk-out-frequency = <125000000>;
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+
+ vddio-supply = <&vddio>;
+
+ vddio: vddio-regulator {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ - |
+ #include <dt-bindings/net/qca-ar803x.h>
+
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy-mode = "rgmii-id";
+
+ ethernet-phy@0 {
+ reg = <0>;
+
+ qca,clk-out-frequency = <50000000>;
+ qca,keep-pll-enabled;
+
+ vddio-supply = <&vddh>;
+
+ vddh: vddh-regulator {
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml
new file mode 100644
index 000000000000..7f84df9790e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/renesas,ether.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Electronics SH EtherMAC
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+maintainers:
+ - Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,gether-r8a7740 # device is a part of R8A7740 SoC
+ - renesas,gether-r8a77980 # device is a part of R8A77980 SoC
+ - renesas,ether-r7s72100 # device is a part of R7S72100 SoC
+ - renesas,ether-r7s9210 # device is a part of R7S9210 SoC
+ - items:
+ - enum:
+ - renesas,ether-r8a7778 # device is a part of R8A7778 SoC
+ - renesas,ether-r8a7779 # device is a part of R8A7779 SoC
+ - enum:
+ - renesas,rcar-gen1-ether # a generic R-Car Gen1 device
+ - items:
+ - enum:
+ - renesas,ether-r8a7745 # device is a part of R8A7745 SoC
+ - renesas,ether-r8a7743 # device is a part of R8A7743 SoC
+ - renesas,ether-r8a7790 # device is a part of R8A7790 SoC
+ - renesas,ether-r8a7791 # device is a part of R8A7791 SoC
+ - renesas,ether-r8a7793 # device is a part of R8A7793 SoC
+ - renesas,ether-r8a7794 # device is a part of R8A7794 SoC
+ - enum:
+ - renesas,rcar-gen2-ether # a generic R-Car Gen2 or RZ/G1 device
+
+ reg:
+ items:
+ - description: E-DMAC/feLic registers
+ - description: TSU registers
+ minItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#address-cells':
+ description: number of address cells for the MDIO bus
+ const: 1
+
+ '#size-cells':
+ description: number of size cells on the MDIO bus
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ pinctrl-0: true
+
+ pinctrl-names: true
+
+ renesas,no-ether-link:
+ type: boolean
+ description:
+ specify when a board does not provide a proper Ether LINK signal
+
+ renesas,ether-link-active-low:
+ type: boolean
+ description:
+ specify when the Ether LINK signal is active-low instead of normal
+ active-high
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phy-mode
+ - phy-handle
+ - '#address-cells'
+ - '#size-cells'
+ - clocks
+ - pinctrl-0
+
+examples:
+ # Lager board
+ - |
+ #include <dt-bindings/clock/r8a7790-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ethernet@ee700000 {
+ compatible = "renesas,ether-r8a7790", "renesas,rcar-gen2-ether";
+ reg = <0 0xee700000 0 0x400>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
+ phy-mode = "rmii";
+ phy-handle = <&phy1>;
+ pinctrl-0 = <&ether_pins>;
+ pinctrl-names = "default";
+ renesas,ether-link-active-low;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&phy1_pins>;
+ pinctrl-names = "default";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
deleted file mode 100644
index abc36274227c..000000000000
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-* Renesas Electronics SH EtherMAC
-
-This file provides information on what the device node for the SH EtherMAC
-interface contains.
-
-Required properties:
-- compatible: Must contain one or more of the following:
- "renesas,gether-r8a7740" if the device is a part of R8A7740 SoC.
- "renesas,ether-r8a7743" if the device is a part of R8A7743 SoC.
- "renesas,ether-r8a7745" if the device is a part of R8A7745 SoC.
- "renesas,ether-r8a7778" if the device is a part of R8A7778 SoC.
- "renesas,ether-r8a7779" if the device is a part of R8A7779 SoC.
- "renesas,ether-r8a7790" if the device is a part of R8A7790 SoC.
- "renesas,ether-r8a7791" if the device is a part of R8A7791 SoC.
- "renesas,ether-r8a7793" if the device is a part of R8A7793 SoC.
- "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC.
- "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
- "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
- "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
- "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
- "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
- device.
-
- When compatible with the generic version, nodes must list
- the SoC-specific version corresponding to the platform
- first followed by the generic version.
-
-- reg: offset and length of (1) the E-DMAC/feLic register block (required),
- (2) the TSU register block (optional).
-- interrupts: interrupt specifier for the sole interrupt.
-- phy-mode: see ethernet.txt file in the same directory.
-- phy-handle: see ethernet.txt file in the same directory.
-- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
-- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
-- clocks: clock phandle and specifier pair.
-- pinctrl-0: phandle, referring to a default pin configuration node.
-
-Optional properties:
-- pinctrl-names: pin configuration state name ("default").
-- renesas,no-ether-link: boolean, specify when a board does not provide a proper
- Ether LINK signal.
-- renesas,ether-link-active-low: boolean, specify when the Ether LINK signal is
- active-low instead of normal active-high.
-
-Example (Lager board):
-
- ethernet@ee700000 {
- compatible = "renesas,ether-r8a7790",
- "renesas,rcar-gen2-ether";
- reg = <0 0xee700000 0 0x400>;
- interrupt-parent = <&gic>;
- interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
- phy-mode = "rmii";
- phy-handle = <&phy1>;
- pinctrl-0 = <&ether_pins>;
- pinctrl-names = "default";
- renesas,ether-link-active-low;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy1: ethernet-phy@1 {
- reg = <1>;
- interrupt-parent = <&irqc0>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-0 = <&phy1_pins>;
- pinctrl-names = "default";
- };
- };
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
new file mode 100644
index 000000000000..81ae8cafabc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -0,0 +1,240 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ti,cpsw-switch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI SoC Ethernet Switch Controller (CPSW) Device Tree Bindings
+
+maintainers:
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+ - Sekhar Nori <nsekhar@ti.com>
+
+description:
+ The 3-port switch gigabit ethernet subsystem provides ethernet packet
+ communication and can be configured as an ethernet switch. It provides the
+ gigabit media independent interface (GMII),reduced gigabit media
+ independent interface (RGMII), reduced media independent interface (RMII),
+ the management data input output (MDIO) for physical layer device (PHY)
+ management.
+
+properties:
+ compatible:
+ oneOf:
+ - const: ti,cpsw-switch
+ - items:
+ - const: ti,am335x-cpsw-switch
+ - const: ti,cpsw-switch
+ - items:
+ - const: ti,am4372-cpsw-switch
+ - const: ti,cpsw-switch
+ - items:
+ - const: ti,dra7-cpsw-switch
+ - const: ti,cpsw-switch
+
+ reg:
+ maxItems: 1
+ description:
+ The physical base address and size of full the CPSW module IO range
+
+ ranges: true
+
+ clocks:
+ maxItems: 1
+ description: CPSW functional clock
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: fck
+
+ interrupts:
+ items:
+ - description: RX_THRESH interrupt
+ - description: RX interrupt
+ - description: TX interrupt
+ - description: MISC interrupt
+
+ interrupt-names:
+ items:
+ - const: "rx_thresh"
+ - const: "rx"
+ - const: "tx"
+ - const: "misc"
+
+ pinctrl-names: true
+
+ syscon:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ Phandle to the system control device node which provides access to
+ efuse IO range with MAC addresses
+
+
+ ethernet-ports:
+ type: object
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^port@[0-9]+$":
+ type: object
+ minItems: 1
+ maxItems: 2
+ description: CPSW external ports
+
+ allOf:
+ - $ref: ethernet-controller.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+ enum: [1, 2]
+ description: CPSW port number
+
+ phys:
+ $ref: /schemas/types.yaml#definitions/phandle-array
+ maxItems: 1
+ description: phandle on phy-gmii-sel PHY
+
+ label:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ maxItems: 1
+ description: label associated with this port
+
+ ti,dual-emac-pvid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ minimum: 1
+ maximum: 1024
+ description:
+ Specifies default PORT VID to be used to segregate
+ ports. Default value - CPSW port number.
+
+ required:
+ - reg
+ - phys
+
+ mdio:
+ type: object
+ allOf:
+ - $ref: "ti,davinci-mdio.yaml#"
+ description:
+ CPSW MDIO bus.
+
+ cpts:
+ type: object
+ description:
+ The Common Platform Time Sync (CPTS) module
+
+ properties:
+ clocks:
+ maxItems: 1
+ description: CPTS reference clock
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: cpts
+
+ cpts_clock_mult:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Numerator to convert input clock ticks into ns
+
+ cpts_clock_shift:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Denominator to convert input clock ticks into ns.
+ Mult and shift will be calculated basing on CPTS rftclk frequency if
+ both cpts_clock_shift and cpts_clock_mult properties are not provided.
+
+ required:
+ - clocks
+ - clock-names
+
+required:
+ - compatible
+ - reg
+ - ranges
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - '#address-cells'
+ - '#size-cells'
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/dra7.h>
+
+ mac_sw: switch@0 {
+ compatible = "ti,dra7-cpsw-switch","ti,cpsw-switch";
+ reg = <0x0 0x4000>;
+ ranges = <0 0 0x4000>;
+ clocks = <&gmac_main_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ syscon = <&scm_conf>;
+ inctrl-names = "default", "sleep";
+
+ interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx_thresh", "rx", "tx", "misc";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
+ phy-handle = <&ethphy0_sw>;
+ phy-mode = "rgmii";
+ ti,dual_emac_pvid = <1>;
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ label = "wan";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2>;
+ phy-handle = <&ethphy1_sw>;
+ phy-mode = "rgmii";
+ ti,dual_emac_pvid = <2>;
+ };
+ };
+
+ davinci_mdio_sw: mdio@1000 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x1000 0x100>;
+ clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+
+ ethphy0_sw: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1_sw: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ cpts {
+ clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 25>;
+ clock-names = "cpts";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,dp83869.yaml b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
new file mode 100644
index 000000000000..6fe3e451da8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/net/ti,dp83869.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TI DP83869 ethernet PHY
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+
+maintainers:
+ - Dan Murphy <dmurphy@ti.com>
+
+description: |
+ The DP83869HM device is a robust, fully-featured Gigabit (PHY) transceiver
+ with integrated PMD sublayers that supports 10BASE-Te, 100BASE-TX and
+ 1000BASE-T Ethernet protocols. The DP83869 also supports 1000BASE-X and
+ 100BASE-FX Fiber protocols.
+ This device interfaces to the MAC layer through Reduced GMII (RGMII) and
+ SGMII The DP83869HM supports Media Conversion in Managed mode. In this mode,
+ the DP83869HM can run 1000BASE-X-to-1000BASE-T and 100BASE-FX-to-100BASE-TX
+ conversions. The DP83869HM can also support Bridge Conversion from RGMII to
+ SGMII and SGMII to RGMII.
+
+ Specifications about the charger can be found at:
+ http://www.ti.com/lit/ds/symlink/dp83869hm.pdf
+
+properties:
+ reg:
+ maxItems: 1
+
+ ti,min-output-impedance:
+ type: boolean
+ description: |
+ MAC Interface Impedance control to set the programmable output impedance
+ to a minimum value (35 ohms).
+
+ ti,max-output-impedance:
+ type: boolean
+ description: |
+ MAC Interface Impedance control to set the programmable output impedance
+ to a maximum value (70 ohms).
+
+ tx-fifo-depth:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Transmitt FIFO depth see dt-bindings/net/ti-dp83869.h for values
+
+ rx-fifo-depth:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Receive FIFO depth see dt-bindings/net/ti-dp83869.h for values
+
+ ti,clk-output-sel:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Muxing option for CLK_OUT pin see dt-bindings/net/ti-dp83869.h for values.
+
+ ti,op-mode:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Operational mode for the PHY. If this is not set then the operational
+ mode is set by the straps. see dt-bindings/net/ti-dp83869.h for values
+
+required:
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/net/ti-dp83869.h>
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ tx-fifo-depth = <DP83869_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ rx-fifo-depth = <DP83869_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,op-mode = <DP83869_RGMII_COPPER_ETHERNET>;
+ ti,max-output-impedance = "true";
+ ti,clk-output-sel = <DP83869_CLK_O_SEL_CHN_A_RCLK>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index ae661e65354e..017128394a3e 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -81,6 +81,12 @@ Optional properties:
Definition: Name of external front end module used. Some valid FEM names
for example: "microsemi-lx5586", "sky85703-11"
and "sky85803" etc.
+- qcom,snoc-host-cap-8bit-quirk:
+ Usage: Optional
+ Value type: <empty>
+ Definition: Quirk specifying that the firmware expects the 8bit version
+ of the host capability QMI request
+- qcom,xo-cal-data: xo cal offset to be configured in xo trim register.
Example (to supply PCI based wifi block details):
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
index bb2fcde6f7ff..f38950560982 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
@@ -35,3 +35,29 @@ Examples:
ti,power-gpio = <&gpio3 23 GPIO_ACTIVE_HIGH>; /* 87 */
};
};
+
+&mmc3 {
+ vmmc-supply = <&wlan_en>;
+
+ bus-width = <4>;
+ non-removable;
+ ti,non-removable;
+ cap-power-off-card;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlan: wifi@1 {
+ compatible = "ti,wl1251";
+
+ reg = <1>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */
+
+ ti,wl1251-has-eeprom;
+ };
+};
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt b/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt
new file mode 100644
index 000000000000..40f649f7c2e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt
@@ -0,0 +1,25 @@
+Rockchip internal OTP (One Time Programmable) memory device tree bindings
+
+Required properties:
+- compatible: Should be one of the following.
+ - "rockchip,px30-otp" - for PX30 SoCs.
+ - "rockchip,rk3308-otp" - for RK3308 SoCs.
+- reg: Should contain the registers location and size
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Should be "otp", "apb_pclk" and "phy".
+- resets: Must contain an entry for each entry in reset-names.
+ See ../../reset/reset.txt for details.
+- reset-names: Should be "phy".
+
+See nvmem.txt for more information.
+
+Example:
+ otp: otp@ff290000 {
+ compatible = "rockchip,px30-otp";
+ reg = <0x0 0xff290000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru SCLK_OTP_USR>, <&cru PCLK_OTP_NS>,
+ <&cru PCLK_OTP_PHY>;
+ clock-names = "otp", "apb_pclk", "phy";
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/sprd-efuse.txt b/Documentation/devicetree/bindings/nvmem/sprd-efuse.txt
new file mode 100644
index 000000000000..96b6feec27f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/sprd-efuse.txt
@@ -0,0 +1,39 @@
+= Spreadtrum eFuse device tree bindings =
+
+Required properties:
+- compatible: Should be "sprd,ums312-efuse".
+- reg: Specify the address offset of efuse controller.
+- clock-names: Should be "enable".
+- clocks: The phandle and specifier referencing the controller's clock.
+- hwlocks: Reference to a phandle of a hwlock provider node.
+
+= Data cells =
+Are child nodes of eFuse, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+ ap_efuse: efuse@32240000 {
+ compatible = "sprd,ums312-efuse";
+ reg = <0 0x32240000 0 0x10000>;
+ clock-names = "enable";
+ hwlocks = <&hwlock 8>;
+ clocks = <&aonapb_gate CLK_EFUSE_EB>;
+
+ /* Data cells */
+ thermal_calib: calib@10 {
+ reg = <0x10 0x2>;
+ };
+ };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+Example:
+
+ thermal {
+ ...
+
+ nvmem-cells = <&thermal_calib>;
+ nvmem-cell-names = "calibration";
+ };
diff --git a/Documentation/devicetree/bindings/perf/arm-ccn.txt b/Documentation/devicetree/bindings/perf/arm-ccn.txt
index 43b5a71a5a9d..1c53b5aa3317 100644
--- a/Documentation/devicetree/bindings/perf/arm-ccn.txt
+++ b/Documentation/devicetree/bindings/perf/arm-ccn.txt
@@ -6,6 +6,7 @@ Required properties:
"arm,ccn-502"
"arm,ccn-504"
"arm,ccn-508"
+ "arm,ccn-512"
- reg: (standard registers property) physical address and size
(16MB) of the configuration registers block
diff --git a/Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt b/Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
index d77e3f26f9e6..7822a806ea0a 100644
--- a/Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
+++ b/Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: should be one of:
"fsl,imx8-ddr-pmu"
"fsl,imx8m-ddr-pmu"
+ "fsl,imx8mp-ddr-pmu"
- reg: physical address and size
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
new file mode 100644
index 000000000000..e5922b427342
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Ondrej Jirman <megous@megous.com>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/allwinner,sun50i-h6-usb3-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Allwinner H6 USB3 PHY
+
+maintainers:
+ - Ondrej Jirman <megous@megous.com>
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun50i-h6-usb3-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - "#phy-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun50i-h6-ccu.h>
+ #include <dt-bindings/reset/sun50i-h6-ccu.h>
+ phy@5210000 {
+ compatible = "allwinner,sun50i-h6-usb3-phy";
+ reg = <0x5210000 0x10000>;
+ clocks = <&ccu CLK_USB_PHY1>;
+ resets = <&ccu RST_USB_PHY1>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
index 00639baae74a..541f5298827c 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -2,6 +2,7 @@ ROCKCHIP USB2.0 PHY WITH INNO IP BLOCK
Required properties (phy (parent) node):
- compatible : should be one of the listed compatibles:
+ * "rockchip,px30-usb2phy"
* "rockchip,rk3228-usb2phy"
* "rockchip,rk3328-usb2phy"
* "rockchip,rk3366-usb2phy"
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 085fbd676cfc..eac9ad3cbbc8 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -14,7 +14,8 @@ Required properties:
"qcom,msm8998-qmp-pcie-phy" for PCIe QMP phy on msm8998,
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
"qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
- "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845.
+ "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845,
+ "qcom,sm8150-qmp-ufs-phy" for UFS QMP phy on sm8150.
- reg:
- index 0: address and length of register set for PHY's common
@@ -57,6 +58,8 @@ Required properties:
"aux", "cfg_ahb", "ref", "com_aux".
For "qcom,sdm845-qmp-ufs-phy" must contain:
"ref", "ref_aux".
+ For "qcom,sm8150-qmp-ufs-phy" must contain:
+ "ref", "ref_aux".
- resets: a list of phandles and reset controller specifier pairs,
one for each entry in reset-names.
@@ -83,6 +86,8 @@ Required properties:
"phy", "common".
For "qcom,sdm845-qmp-ufs-phy": must contain:
"ufsphy".
+ For "qcom,sm8150-qmp-ufs-phy": must contain:
+ "ufsphy".
- vdda-phy-supply: Phandle to a regulator supply to PHY core block.
- vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index 503a8cfb3184..7734b219d9aa 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -10,6 +10,8 @@ Required properties:
SoC.
"renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
SoC.
+ "renesas,usb2-phy-r8a774b1" if the device is a part of an R8A774B1
+ SoC.
"renesas,usb2-phy-r8a774c0" if the device is a part of an R8A774C0
SoC.
"renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
index 9d9826609c2f..0fe433b9a592 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
@@ -9,6 +9,8 @@ need this driver.
Required properties:
- compatible: "renesas,r8a774a1-usb3-phy" if the device is a part of an R8A774A1
SoC.
+ "renesas,r8a774b1-usb3-phy" if the device is a part of an R8A774B1
+ SoC.
"renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
SoC.
"renesas,r8a7796-usb3-phy" if the device is a part of an R8A7796
diff --git a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
new file mode 100644
index 000000000000..bb0da87bcd84
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip,px30-dsi-dphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip MIPI DPHY with additional LVDS/TTL modes
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ "#phy-cells":
+ const: 0
+
+ "#clock-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - rockchip,px30-dsi-dphy
+ - rockchip,rk3128-dsi-dphy
+ - rockchip,rk3368-dsi-dphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: PLL reference clock
+ - description: Module clock
+
+ clock-names:
+ items:
+ - const: ref
+ - const: pclk
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ resets:
+ items:
+ - description: exclusive PHY reset line
+
+ reset-names:
+ items:
+ - const: apb
+
+required:
+ - "#phy-cells"
+ - "#clock-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi_dphy: phy@ff2e0000 {
+ compatible = "rockchip,px30-video-phy";
+ reg = <0x0 0xff2e0000 0x0 0x10000>;
+ clocks = <&pmucru 13>, <&cru 12>;
+ clock-names = "ref", "pclk";
+ #clock-cells = <0>;
+ resets = <&cru 12>;
+ reset-names = "apb";
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
new file mode 100644
index 000000000000..cd0503b6fe36
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/allwinner,sun4i-a10-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Pin Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#gpio-cells":
+ const: 3
+ description:
+ GPIO consumers must use three arguments, first the number of the
+ bank, then the pin number inside that bank, and finally the GPIO
+ flags.
+
+ "#interrupt-cells":
+ const: 3
+ description:
+ Interrupts consumers must use three arguments, first the number
+ of the bank, then the pin number inside that bank, and finally
+ the interrupts flags.
+
+ compatible:
+ enum:
+ - allwinner,sun4i-a10-pinctrl
+ - allwinner,sun5i-a10s-pinctrl
+ - allwinner,sun5i-a13-pinctrl
+ - allwinner,sun6i-a31-pinctrl
+ - allwinner,sun6i-a31-r-pinctrl
+ - allwinner,sun6i-a31s-pinctrl
+ - allwinner,sun7i-a20-pinctrl
+ - allwinner,sun8i-a23-pinctrl
+ - allwinner,sun8i-a23-r-pinctrl
+ - allwinner,sun8i-a33-pinctrl
+ - allwinner,sun8i-a83t-pinctrl
+ - allwinner,sun8i-a83t-r-pinctrl
+ - allwinner,sun8i-h3-pinctrl
+ - allwinner,sun8i-h3-r-pinctrl
+ - allwinner,sun8i-r40-pinctrl
+ - allwinner,sun8i-v3-pinctrl
+ - allwinner,sun8i-v3s-pinctrl
+ - allwinner,sun9i-a80-pinctrl
+ - allwinner,sun9i-a80-r-pinctrl
+ - allwinner,sun50i-a64-pinctrl
+ - allwinner,sun50i-a64-r-pinctrl
+ - allwinner,sun50i-h5-pinctrl
+ - allwinner,sun50i-h6-pinctrl
+ - allwinner,sun50i-h6-r-pinctrl
+ - allwinner,suniv-f1c100s-pinctrl
+ - nextthing,gr8-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 5
+ description:
+ One interrupt per external interrupt bank supported on the
+ controller, sorted by bank number ascending order.
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: High Frequency Oscillator
+ - description: Low Frequency Oscillator
+
+ clock-names:
+ items:
+ - const: apb
+ - const: hosc
+ - const: losc
+
+ resets:
+ maxItems: 1
+
+ gpio-controller: true
+ interrupt-controller: true
+ gpio-line-names: true
+
+ input-debounce:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 5
+ description:
+ Debouncing periods in microseconds, one period per interrupt
+ bank found in the controller
+
+patternProperties:
+ # It's pretty scary, but the basic idea is that:
+ # - One node name can start with either s- or r- for PRCM nodes,
+ # - Then, the name itself can be any repetition of <string>- (to
+ # accomodate with nodes like uart4-rts-cts-pins), where each
+ # string can be either starting with 'p' but in a string longer
+ # than 3, or something that doesn't start with 'p',
+ # - Then, the bank name is optional and will be between pa and pg,
+ # pl or pm. Some pins groups that have several options will have
+ # the pin numbers then,
+ # - Finally, the name will end with either -pin or pins.
+
+ "^([rs]-)?(([a-z0-9]{3,}|[a-oq-z][a-z0-9]*?)?-)+?(p[a-ilm][0-9]*?-)??pins?$":
+ type: object
+
+ properties:
+ pins: true
+ function: true
+ bias-disable: true
+ bias-pull-up: true
+ bias-pull-down: true
+
+ drive-strength:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 10, 20, 30, 40 ]
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+ "^vcc-p[a-hlm]-supply$":
+ description:
+ Power supplies for pin banks.
+
+required:
+ - "#gpio-cells"
+ - "#interrupt-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - gpio-controller
+ - interrupt-controller
+
+allOf:
+ # FIXME: We should have the pin bank supplies here, but not a lot of
+ # boards are defining it at the moment so it would generate a lot of
+ # warnings.
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun9i-a80-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 5
+ maxItems: 5
+
+ else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-pinctrl
+ - allwinner,sun6i-a31s-pinctrl
+ - allwinner,sun50i-h6-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 4
+ maxItems: 4
+
+ else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-a23-pinctrl
+ - allwinner,sun8i-a83t-pinctrl
+ - allwinner,sun50i-a64-pinctrl
+ - allwinner,sun50i-h5-pinctrl
+ - allwinner,suniv-f1c100s-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 3
+ maxItems: 3
+
+ else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-r-pinctrl
+ - allwinner,sun8i-a33-pinctrl
+ - allwinner,sun8i-h3-pinctrl
+ - allwinner,sun8i-v3-pinctrl
+ - allwinner,sun8i-v3s-pinctrl
+ - allwinner,sun9i-a80-r-pinctrl
+ - allwinner,sun50i-h6-r-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ else:
+ properties:
+ interrupts:
+ minItems: 1
+ maxItems: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun5i-ccu.h>
+
+ pio: pinctrl@1c20800 {
+ compatible = "allwinner,sun5i-a13-pinctrl";
+ reg = <0x01c20800 0x400>;
+ interrupts = <28>;
+ clocks = <&ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #gpio-cells = <3>;
+
+ uart1_pe_pins: uart1-pe-pins {
+ pins = "PE10", "PE11";
+ function = "uart1";
+ };
+
+ uart1_pg_pins: uart1-pg-pins {
+ pins = "PG3", "PG4";
+ function = "uart1";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
deleted file mode 100644
index 328585c6da58..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ /dev/null
@@ -1,164 +0,0 @@
-* Allwinner A1X Pin Controller
-
-The pins controlled by sunXi pin controller are organized in banks,
-each bank has 32 pins. Each pin has 7 multiplexing functions, with
-the first two functions being GPIO in and out. The configuration on
-the pins includes drive strength and pull-up.
-
-Required properties:
-- compatible: Should be one of the following (depending on your SoC):
- "allwinner,sun4i-a10-pinctrl"
- "allwinner,sun5i-a10s-pinctrl"
- "allwinner,sun5i-a13-pinctrl"
- "allwinner,sun6i-a31-pinctrl"
- "allwinner,sun6i-a31s-pinctrl"
- "allwinner,sun6i-a31-r-pinctrl"
- "allwinner,sun7i-a20-pinctrl"
- "allwinner,sun8i-a23-pinctrl"
- "allwinner,sun8i-a23-r-pinctrl"
- "allwinner,sun8i-a33-pinctrl"
- "allwinner,sun9i-a80-pinctrl"
- "allwinner,sun9i-a80-r-pinctrl"
- "allwinner,sun8i-a83t-pinctrl"
- "allwinner,sun8i-a83t-r-pinctrl"
- "allwinner,sun8i-h3-pinctrl"
- "allwinner,sun8i-h3-r-pinctrl"
- "allwinner,sun8i-r40-pinctrl"
- "allwinner,sun8i-v3-pinctrl"
- "allwinner,sun8i-v3s-pinctrl"
- "allwinner,sun50i-a64-pinctrl"
- "allwinner,sun50i-a64-r-pinctrl"
- "allwinner,sun50i-h5-pinctrl"
- "allwinner,sun50i-h6-pinctrl"
- "allwinner,sun50i-h6-r-pinctrl"
- "allwinner,suniv-f1c100s-pinctrl"
- "nextthing,gr8-pinctrl"
-
-- reg: Should contain the register physical address and length for the
- pin controller.
-
-- clocks: phandle to the clocks feeding the pin controller:
- - "apb": the gated APB parent clock
- - "hosc": the high frequency oscillator in the system
- - "losc": the low frequency oscillator in the system
-
-Note: For backward compatibility reasons, the hosc and losc clocks are only
-required if you need to use the optional input-debounce property. Any new
-device tree should set them.
-
-Each pin bank, depending on the SoC, can have an associated regulator:
-
-- vcc-pa-supply: for the A10, A20, A31, A31s, A80 and R40 SoCs
-- vcc-pb-supply: for the A31, A31s, A80 and V3s SoCs
-- vcc-pc-supply: for the A10, A20, A31, A31s, A64, A80, H5, R40 and V3s SoCs
-- vcc-pd-supply: for the A23, A31, A31s, A64, A80, A83t, H3, H5 and R40 SoCs
-- vcc-pe-supply: for the A10, A20, A31, A31s, A64, A80, R40 and V3s SoCs
-- vcc-pf-supply: for the A10, A20, A31, A31s, A80, R40 and V3s SoCs
-- vcc-pg-supply: for the A10, A20, A31, A31s, A64, A80, H3, H5, R40 and V3s SoCs
-- vcc-ph-supply: for the A31, A31s and A80 SoCs
-- vcc-pl-supply: for the r-pinctrl of the A64, A80 and A83t SoCs
-- vcc-pm-supply: for the r-pinctrl of the A31, A31s and A80 SoCs
-
-Optional properties:
- - input-debounce: Array of debouncing periods in microseconds. One period per
- irq bank found in the controller. 0 if no setup required.
-
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices.
-
-A pinctrl node should contain at least one subnodes representing the
-pinctrl groups available on the machine. Each subnode will list the
-pins it needs, and how they should be configured, with regard to muxer
-configuration, drive strength and pullups. If one of these options is
-not set, its actual value will be unspecified.
-
-Allwinner A1X Pin Controller supports the generic pin multiplexing and
-configuration bindings. For details on each properties, you can refer to
- ./pinctrl-bindings.txt.
-
-Required sub-node properties:
- - pins
- - function
-
-Optional sub-node properties:
- - bias-disable
- - bias-pull-up
- - bias-pull-down
- - drive-strength
-
-*** Deprecated pin configuration and multiplexing binding
-
-Required subnode-properties:
-
-- allwinner,pins: List of strings containing the pin name.
-- allwinner,function: Function to mux the pins listed above to.
-
-Optional subnode-properties:
-- allwinner,drive: Integer. Represents the current sent to the pin
- 0: 10 mA
- 1: 20 mA
- 2: 30 mA
- 3: 40 mA
-- allwinner,pull: Integer.
- 0: No resistor
- 1: Pull-up resistor
- 2: Pull-down resistor
-
-Examples:
-
-pio: pinctrl@1c20800 {
- compatible = "allwinner,sun5i-a13-pinctrl";
- reg = <0x01c20800 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- uart1_pins_a: uart1@0 {
- allwinner,pins = "PE10", "PE11";
- allwinner,function = "uart1";
- allwinner,drive = <0>;
- allwinner,pull = <0>;
- };
-
- uart1_pins_b: uart1@1 {
- allwinner,pins = "PG3", "PG4";
- allwinner,function = "uart1";
- allwinner,drive = <0>;
- allwinner,pull = <0>;
- };
-};
-
-
-GPIO and interrupt controller
------------------------------
-
-This hardware also acts as a GPIO controller and an interrupt
-controller.
-
-Consumers that would want to refer to one or the other (or both)
-should provide through the usual *-gpios and interrupts properties a
-cell with 3 arguments, first the number of the bank, then the pin
-inside that bank, and finally the flags for the GPIO/interrupts.
-
-Example:
-
-xio: gpio@38 {
- compatible = "nxp,pcf8574a";
- reg = <0x38>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- interrupt-parent = <&pio>;
- interrupts = <6 0 IRQ_TYPE_EDGE_FALLING>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
-
-reg_usb1_vbus: usb1-vbus {
- compatible = "regulator-fixed";
- regulator-name = "usb1-vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml
new file mode 100644
index 000000000000..240d429f773b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/pinctrl/intel,lgm-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain SoC pinmux & GPIO controller binding
+
+maintainers:
+ - Rahul Tanwar <rahul.tanwar@linux.intel.com>
+
+description: |
+ Pinmux & GPIO controller controls pin multiplexing & configuration including
+ GPIO function selection & GPIO attributes configuration.
+
+ Please refer to [1] for details of the common pinctrl bindings used by the
+ client devices.
+
+ [1] Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+properties:
+ compatible:
+ const: intel,lgm-io
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ '-pins$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ function:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ A string containing the name of the function to mux to the group.
+
+ groups:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ An array of strings identifying the list of groups.
+
+ pins:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ List of pins to select with this function.
+
+ pinmux:
+ description: The applicable mux group.
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32-array"
+
+ bias-pull-up:
+ type: boolean
+
+ bias-pull-down:
+ type: boolean
+
+ drive-strength:
+ description: |
+ Selects the drive strength for the specified pins in mA.
+ 0: 2 mA
+ 1: 4 mA
+ 2: 8 mA
+ 3: 12 mA
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+
+ slew-rate:
+ type: boolean
+ description: |
+ Sets slew rate for specified pins.
+ 0: slow slew
+ 1: fast slew
+
+ drive-open-drain:
+ type: boolean
+
+ output-enable:
+ type: boolean
+
+ required:
+ - function
+ - groups
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ pinctrl: pinctrl@e2880000 {
+ compatible = "intel,lgm-pinctrl";
+ reg = <0xe2880000 0x100000>;
+
+ uart0-pins {
+ pins = <64>, /* UART_RX0 */
+ <65>; /* UART_TX0 */
+ function = "CONSOLE_UART0";
+ pinmux = <1>,
+ <1>;
+ groups = "CONSOLE_UART0";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 10dc4f7176ca..0aff1f28495c 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -15,6 +15,7 @@ Required properties for the root node:
"amlogic,meson-axg-aobus-pinctrl"
"amlogic,meson-g12a-periphs-pinctrl"
"amlogic,meson-g12a-aobus-pinctrl"
+ "amlogic,meson-a1-periphs-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
new file mode 100644
index 000000000000..13b7ab9dd6d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/pincfg-node.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic pin configuration node schema
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ Many data items that are represented in a pin configuration node are common
+ and generic. Pin control bindings should use the properties defined below
+ where they are applicable; not all of these properties are relevant or useful
+ for all hardware or binding structures. Each individual binding document
+ should state which of these generic properties, if any, are used, and the
+ structure of the DT nodes that contain these properties.
+
+properties:
+ bias-disable:
+ type: boolean
+ description: disable any pin bias
+
+ bias-high-impedance:
+ type: boolean
+ description: high impedance mode ("third-state", "floating")
+
+ bias-bus-hold:
+ type: boolean
+ description: latch weakly
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: pull up the pin. Takes as optional argument on hardware
+ supporting it the pull strength in Ohm.
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: pull down the pin. Takes as optional argument on hardware
+ supporting it the pull strength in Ohm.
+
+ bias-pull-pin-default:
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: use pin-default pull state. Takes as optional argument on
+ hardware supporting it the pull strength in Ohm.
+
+ drive-push-pull:
+ type: boolean
+ description: drive actively high and low
+
+ drive-open-drain:
+ type: boolean
+ description: drive with open drain
+
+ drive-open-source:
+ type: boolean
+ description: drive with open source
+
+ drive-strength:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: sink or source at most X mA
+
+ drive-strength-microamp:
+ description: sink or source at most X uA
+
+ input-enable:
+ type: boolean
+ description: enable input on pin (no effect on output, such as
+ enabling an input buffer)
+
+ input-disable:
+ type: boolean
+ description: disable input on pin (no effect on output, such as
+ disabling an input buffer)
+
+ input-schmitt-enable:
+ type: boolean
+ description: enable schmitt-trigger mode
+
+ input-schmitt-disable:
+ type: boolean
+ description: disable schmitt-trigger mode
+
+ input-debounce:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Takes the debounce time in usec as argument or 0 to disable
+ debouncing
+
+ power-source:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: select between different power supplies
+
+ low-power-enable:
+ type: boolean
+ description: enable low power mode
+
+ low-power-disable:
+ type: boolean
+ description: disable low power mode
+
+ output-disable:
+ type: boolean
+ description: disable output on a pin (such as disable an output buffer)
+
+ output-enable:
+ type: boolean
+ description: enable output on a pin without actively driving it
+ (such as enabling an output buffer)
+
+ output-low:
+ type: boolean
+ description: set the pin to output mode with low level
+
+ output-high:
+ type: boolean
+ description: set the pin to output mode with high level
+
+ sleep-hardware-state:
+ type: boolean
+ description: indicate this is sleep related state which will be
+ programmed into the registers for the sleep state.
+
+ slew-rate:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: set the slew rate
+
+ skew-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ this affects the expected clock skew on input pins
+ and the delay before latching a value to an output
+ pin. Typically indicates how many double-inverters are
+ used to delay the signal.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index fcd37e93ed4d..4613bb17ace3 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -141,196 +141,8 @@ controller device.
== Generic pin multiplexing node content ==
-pin multiplexing nodes:
-
-function - the mux function to select
-groups - the list of groups to select with this function
- (either this or "pins" must be specified)
-pins - the list of pins to select with this function (either
- this or "groups" must be specified)
-
-Example:
-
-state_0_node_a {
- uart0 {
- function = "uart0";
- groups = "u0rxtx", "u0rtscts";
- };
-};
-state_1_node_a {
- spi0 {
- function = "spi0";
- groups = "spi0pins";
- };
-};
-state_2_node_a {
- function = "i2c0";
- pins = "mfio29", "mfio30";
-};
-
-Optionally an alternative binding can be used if more suitable depending on the
-pin controller hardware. For hardware where there is a large number of identical
-pin controller instances, naming each pin and function can easily become
-unmaintainable. This is especially the case if the same controller is used for
-different pins and functions depending on the SoC revision and packaging.
-
-For cases like this, the pin controller driver may use pinctrl-pin-array helper
-binding with a hardware based index and a number of pin configuration values:
-
-pincontroller {
- ... /* Standard DT properties for the device itself elided */
- #pinctrl-cells = <2>;
-
- state_0_node_a {
- pinctrl-pin-array = <
- 0 A_DELAY_PS(0) G_DELAY_PS(120)
- 4 A_DELAY_PS(0) G_DELAY_PS(360)
- ...
- >;
- };
- ...
-};
-
-Above #pinctrl-cells specifies the number of value cells in addition to the
-index of the registers. This is similar to the interrupts-extended binding with
-one exception. There is no need to specify the phandle for each entry as that
-is already known as the defined pins are always children of the pin controller
-node. Further having the phandle pointing to another pin controller would not
-currently work as the pinctrl framework uses named modes to group pins for each
-pin control device.
-
-The index for pinctrl-pin-array must relate to the hardware for the pinctrl
-registers, and must not be a virtual index of pin instances. The reason for
-this is to avoid mapping of the index in the dts files and the pin controller
-driver as it can change.
-
-For hardware where pin multiplexing configurations have to be specified for
-each single pin the number of required sub-nodes containing "pin" and
-"function" properties can quickly escalate and become hard to write and
-maintain.
-
-For cases like this, the pin controller driver may use the pinmux helper
-property, where the pin identifier is provided with mux configuration settings
-in a pinmux group. A pinmux group consists of the pin identifier and mux
-settings represented as a single integer or an array of integers.
-
-The pinmux property accepts an array of pinmux groups, each of them describing
-a single pin multiplexing configuration.
-
-pincontroller {
- state_0_node_a {
- pinmux = <PINMUX_GROUP>, <PINMUX_GROUP>, ...;
- };
-};
-
-Each individual pin controller driver bindings documentation shall specify
-how pin IDs and pin multiplexing configuration are defined and assembled
-together in a pinmux group.
+See pinmux-node.yaml
== Generic pin configuration node content ==
-Many data items that are represented in a pin configuration node are common
-and generic. Pin control bindings should use the properties defined below
-where they are applicable; not all of these properties are relevant or useful
-for all hardware or binding structures. Each individual binding document
-should state which of these generic properties, if any, are used, and the
-structure of the DT nodes that contain these properties.
-
-Supported generic properties are:
-
-pins - the list of pins that properties in the node
- apply to (either this, "group" or "pinmux" has to be
- specified)
-group - the group to apply the properties to, if the driver
- supports configuration of whole groups rather than
- individual pins (either this, "pins" or "pinmux" has
- to be specified)
-pinmux - the list of numeric pin ids and their mux settings
- that properties in the node apply to (either this,
- "pins" or "groups" have to be specified)
-bias-disable - disable any pin bias
-bias-high-impedance - high impedance mode ("third-state", "floating")
-bias-bus-hold - latch weakly
-bias-pull-up - pull up the pin
-bias-pull-down - pull down the pin
-bias-pull-pin-default - use pin-default pull state
-drive-push-pull - drive actively high and low
-drive-open-drain - drive with open drain
-drive-open-source - drive with open source
-drive-strength - sink or source at most X mA
-drive-strength-microamp - sink or source at most X uA
-input-enable - enable input on pin (no effect on output, such as
- enabling an input buffer)
-input-disable - disable input on pin (no effect on output, such as
- disabling an input buffer)
-input-schmitt-enable - enable schmitt-trigger mode
-input-schmitt-disable - disable schmitt-trigger mode
-input-debounce - debounce mode with debound time X
-power-source - select between different power supplies
-low-power-enable - enable low power mode
-low-power-disable - disable low power mode
-output-disable - disable output on a pin (such as disable an output
- buffer)
-output-enable - enable output on a pin without actively driving it
- (such as enabling an output buffer)
-output-low - set the pin to output mode with low level
-output-high - set the pin to output mode with high level
-sleep-hardware-state - indicate this is sleep related state which will be programmed
- into the registers for the sleep state.
-slew-rate - set the slew rate
-skew-delay - this affects the expected clock skew on input pins
- and the delay before latching a value to an output
- pin. Typically indicates how many double-inverters are
- used to delay the signal.
-
-For example:
-
-state_0_node_a {
- cts_rxd {
- pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
- bias-pull-up;
- };
-};
-state_1_node_a {
- rts_txd {
- pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
- output-high;
- };
-};
-state_2_node_a {
- foo {
- group = "foo-group";
- bias-pull-up;
- };
-};
-state_3_node_a {
- mux {
- pinmux = <GPIOx_PINm_MUXn>, <GPIOx_PINj_MUXk)>;
- input-enable;
- };
-};
-
-Some of the generic properties take arguments. For those that do, the
-arguments are described below.
-
-- pins takes a list of pin names or IDs as a required argument. The specific
- binding for the hardware defines:
- - Whether the entries are integers or strings, and their meaning.
-
-- pinmux takes a list of pin IDs and mux settings as required argument. The
- specific bindings for the hardware defines:
- - How pin IDs and mux settings are defined and assembled together in a single
- integer or an array of integers.
-
-- bias-pull-up, -down and -pin-default take as optional argument on hardware
- supporting it the pull strength in Ohm. bias-disable will disable the pull.
-
-- drive-strength takes as argument the target strength in mA.
-
-- drive-strength-microamp takes as argument the target strength in uA.
-
-- input-debounce takes the debounce time in usec as argument
- or 0 to disable debouncing
-
-More in-depth documentation on these parameters can be found in
-<include/linux/pinctrl/pinconf-generic.h>
+See pincfg-node.yaml
diff --git a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
new file mode 100644
index 000000000000..777623a57fd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/pinmux-node.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic pin multiplexing node schema
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The contents of the pin configuration child nodes are defined by the binding
+ for the individual pin controller device. The pin configuration nodes need not
+ be direct children of the pin controller device; they may be grandchildren,
+ for example. Whether this is legal, and whether there is any interaction
+ between the child and intermediate parent nodes, is again defined entirely by
+ the binding for the individual pin controller device.
+
+ While not required to be used, there are 3 generic forms of pin muxing nodes
+ which pin controller devices can use.
+
+ pin multiplexing nodes:
+
+ Example:
+
+ state_0_node_a {
+ uart0 {
+ function = "uart0";
+ groups = "u0rxtx", "u0rtscts";
+ };
+ };
+ state_1_node_a {
+ spi0 {
+ function = "spi0";
+ groups = "spi0pins";
+ };
+ };
+ state_2_node_a {
+ function = "i2c0";
+ pins = "mfio29", "mfio30";
+ };
+
+ Optionally an alternative binding can be used if more suitable depending on the
+ pin controller hardware. For hardware where there is a large number of identical
+ pin controller instances, naming each pin and function can easily become
+ unmaintainable. This is especially the case if the same controller is used for
+ different pins and functions depending on the SoC revision and packaging.
+
+ For cases like this, the pin controller driver may use pinctrl-pin-array helper
+ binding with a hardware based index and a number of pin configuration values:
+
+ pincontroller {
+ ... /* Standard DT properties for the device itself elided */
+ #pinctrl-cells = <2>;
+
+ state_0_node_a {
+ pinctrl-pin-array = <
+ 0 A_DELAY_PS(0) G_DELAY_PS(120)
+ 4 A_DELAY_PS(0) G_DELAY_PS(360)
+ ...
+ >;
+ };
+ ...
+ };
+
+ Above #pinctrl-cells specifies the number of value cells in addition to the
+ index of the registers. This is similar to the interrupts-extended binding with
+ one exception. There is no need to specify the phandle for each entry as that
+ is already known as the defined pins are always children of the pin controller
+ node. Further having the phandle pointing to another pin controller would not
+ currently work as the pinctrl framework uses named modes to group pins for each
+ pin control device.
+
+ The index for pinctrl-pin-array must relate to the hardware for the pinctrl
+ registers, and must not be a virtual index of pin instances. The reason for
+ this is to avoid mapping of the index in the dts files and the pin controller
+ driver as it can change.
+
+ For hardware where pin multiplexing configurations have to be specified for
+ each single pin the number of required sub-nodes containing "pin" and
+ "function" properties can quickly escalate and become hard to write and
+ maintain.
+
+ For cases like this, the pin controller driver may use the pinmux helper
+ property, where the pin identifier is provided with mux configuration settings
+ in a pinmux group. A pinmux group consists of the pin identifier and mux
+ settings represented as a single integer or an array of integers.
+
+ The pinmux property accepts an array of pinmux groups, each of them describing
+ a single pin multiplexing configuration.
+
+ pincontroller {
+ state_0_node_a {
+ pinmux = <PINMUX_GROUP>, <PINMUX_GROUP>, ...;
+ };
+ };
+
+ Each individual pin controller driver bindings documentation shall specify
+ how pin IDs and pin multiplexing configuration are defined and assembled
+ together in a pinmux group.
+
+properties:
+ function:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The mux function to select
+
+ pins:
+ oneOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ The list of pin identifiers that properties in the node apply to. The
+ specific binding for the hardware defines whether the entries are integers
+ or strings, and their meaning.
+
+ group:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ the group to apply the properties to, if the driver supports
+ configuration of whole groups rather than individual pins (either
+ this, "pins" or "pinmux" has to be specified)
+
+ pinmux:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ The list of numeric pin ids and their mux settings that properties in the
+ node apply to (either this, "pins" or "groups" have to be specified)
+
+ pinctrl-pin-array:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt
new file mode 100644
index 000000000000..70d04d12f136
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt
@@ -0,0 +1,183 @@
+Qualcomm MSM8976 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSM8956 and MSM8976 platforms.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,msm8976-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode.
+
+ Valid pins are:
+ gpio0-gpio145
+ Supports mux, bias and drive-strength
+
+ sdc1_clk, sdc1_cmd, sdc1_data,
+ sdc2_clk, sdc2_cmd, sdc2_data,
+ sdc3_clk, sdc3_cmd, sdc3_data
+ Supports bias and drive-strength
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+
+ gpio, blsp_uart1, blsp_spi1, smb_int, blsp_i2c1, blsp_spi2,
+ blsp_uart2, blsp_i2c2, gcc_gp1_clk_b, blsp_spi3,
+ qdss_tracedata_b, blsp_i2c3, gcc_gp2_clk_b, gcc_gp3_clk_b,
+ blsp_spi4, cap_int, blsp_i2c4, blsp_spi5, blsp_uart5,
+ qdss_traceclk_a, m_voc, blsp_i2c5, qdss_tracectl_a,
+ qdss_tracedata_a, blsp_spi6, blsp_uart6, qdss_tracectl_b,
+ blsp_i2c6, qdss_traceclk_b, mdp_vsync, pri_mi2s_mclk_a,
+ sec_mi2s_mclk_a, cam_mclk, cci0_i2c, cci1_i2c, blsp1_spi,
+ blsp3_spi, gcc_gp1_clk_a, gcc_gp2_clk_a, gcc_gp3_clk_a,
+ uim_batt, sd_write, uim1_data, uim1_clk, uim1_reset,
+ uim1_present, uim2_data, uim2_clk, uim2_reset,
+ uim2_present, ts_xvdd, mipi_dsi0, us_euro, ts_resout,
+ ts_sample, sec_mi2s_mclk_b, pri_mi2s, codec_reset,
+ cdc_pdm0, us_emitter, pri_mi2s_mclk_b, pri_mi2s_mclk_c,
+ lpass_slimbus, lpass_slimbus0, lpass_slimbus1, codec_int1,
+ codec_int2, wcss_bt, sdc3, wcss_wlan2, wcss_wlan1,
+ wcss_wlan0, wcss_wlan, wcss_fm, key_volp, key_snapshot,
+ key_focus, key_home, pwr_down, dmic0_clk, hdmi_int,
+ dmic0_data, wsa_vi, wsa_en, blsp_spi8, wsa_irq, blsp_i2c8,
+ pa_indicator, modem_tsync, ssbi_wtr1, gsm1_tx, gsm0_tx,
+ sdcard_det, sec_mi2s, ss_switch,
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,msm8976-pinctrl";
+ reg = <0x1000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 145>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ blsp1_uart2_active: blsp1_uart2_active {
+ mux {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index c32bf3237545..7be5de8d253f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -15,14 +15,18 @@ PMIC's from Qualcomm.
"qcom,pm8917-gpio"
"qcom,pm8921-gpio"
"qcom,pm8941-gpio"
+ "qcom,pm8950-gpio"
"qcom,pm8994-gpio"
"qcom,pm8998-gpio"
"qcom,pma8084-gpio"
+ "qcom,pmi8950-gpio"
"qcom,pmi8994-gpio"
"qcom,pmi8998-gpio"
"qcom,pms405-gpio"
"qcom,pm8150-gpio"
"qcom,pm8150b-gpio"
+ "qcom,pm6150-gpio"
+ "qcom,pm6150l-gpio"
And must contain either "qcom,spmi-gpio" or "qcom,ssbi-gpio"
if the device is on an spmi bus or an ssbi bus respectively
@@ -91,15 +95,19 @@ to specify in a pin configuration subnode:
gpio1-gpio38 for pm8917
gpio1-gpio44 for pm8921
gpio1-gpio36 for pm8941
+ gpio1-gpio8 for pm8950 (hole on gpio3)
gpio1-gpio22 for pm8994
gpio1-gpio26 for pm8998
gpio1-gpio22 for pma8084
+ gpio1-gpio2 for pmi8950
gpio1-gpio10 for pmi8994
gpio1-gpio12 for pms405 (holes on gpio1, gpio9 and gpio10)
gpio1-gpio10 for pm8150 (holes on gpio2, gpio5, gpio7
and gpio8)
gpio1-gpio12 for pm8150b (holes on gpio3, gpio4, gpio7)
gpio1-gpio12 for pm8150l (hole on gpio7)
+ gpio1-gpio10 for pm6150
+ gpio1-gpio12 for pm6150l
- function:
Usage: required
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index 2ab95bc26066..448d36a85730 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -16,6 +16,8 @@ of PMIC's from Qualcomm.
"qcom,pm8917-mpp",
"qcom,pm8921-mpp",
"qcom,pm8941-mpp",
+ "qcom,pm8950-mpp",
+ "qcom,pmi8950-mpp",
"qcom,pm8994-mpp",
"qcom,pma8084-mpp",
@@ -80,6 +82,8 @@ to specify in a pin configuration subnode:
mpp1-mpp4 for pm8841
mpp1-mpp4 for pm8916
mpp1-mpp8 for pm8941
+ mpp1-mpp4 for pm8950
+ mpp1-mpp4 for pmi8950
mpp1-mpp4 for pma8084
- function:
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 3902efa18fd0..6eada23eaa31 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -18,6 +18,7 @@ Required Properties:
- "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
- "renesas,pfc-r8a77470": for R8A77470 (RZ/G1C) compatible pin-controller.
- "renesas,pfc-r8a774a1": for R8A774A1 (RZ/G2M) compatible pin-controller.
+ - "renesas,pfc-r8a774b1": for R8A774B1 (RZ/G2N) compatible pin-controller.
- "renesas,pfc-r8a774c0": for R8A774C0 (RZ/G2E) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Car M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
@@ -27,7 +28,8 @@ Required Properties:
- "renesas,pfc-r8a7793": for R8A7793 (R-Car M2-N) compatible pin-controller.
- "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller.
- "renesas,pfc-r8a7795": for R8A7795 (R-Car H3) compatible pin-controller.
- - "renesas,pfc-r8a7796": for R8A7796 (R-Car M3-W) compatible pin-controller.
+ - "renesas,pfc-r8a7796": for R8A77960 (R-Car M3-W) compatible pin-controller.
+ - "renesas,pfc-r8a77961": for R8A77961 (R-Car M3-W+) compatible pin-controller.
- "renesas,pfc-r8a77965": for R8A77965 (R-Car M3-N) compatible pin-controller.
- "renesas,pfc-r8a77970": for R8A77970 (R-Car V3M) compatible pin-controller.
- "renesas,pfc-r8a77980": for R8A77980 (R-Car V3H) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index 0919db294c17..2113cfaa26e6 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -29,6 +29,7 @@ Required properties for iomux controller:
"rockchip,rk3188-pinctrl": for Rockchip RK3188
"rockchip,rk3228-pinctrl": for Rockchip RK3228
"rockchip,rk3288-pinctrl": for Rockchip RK3288
+ "rockchip,rk3308-pinctrl": for Rockchip RK3308
"rockchip,rk3328-pinctrl": for Rockchip RK3328
"rockchip,rk3368-pinctrl": for Rockchip RK3368
"rockchip,rk3399-pinctrl": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt b/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
index 80bd873c3b1d..6048f636783f 100644
--- a/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
@@ -5,7 +5,8 @@ Required properties:
- interrupts: Interrupt specifier for each name in interrupt-names
- interrupt-names: Should contain the following entries:
"chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb"
+ "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
+ "battdetb"
- io-channels: IIO ADC channel specifier for each name in io-channel-names
- io-channel-names: Should contain the following entries:
"battdetb", "battp", "vbus", "chg_isense", "batti"
@@ -21,11 +22,13 @@ cpcap_charger: charger {
compatible = "motorola,mapphone-cpcap-charger";
interrupts-extended = <
&cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0
- &cpcap 22 0 &cpcap 20 0 &cpcap 19 0 &cpcap 54 0
+ &cpcap 22 0 &cpcap 21 0 &cpcap 20 0 &cpcap 19 0
+ &cpcap 54 0
>;
interrupt-names =
"chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb";
+ "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
+ "battdetb";
mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW
&gpio3 23 GPIO_ACTIVE_LOW>;
io-channels = <&cpcap_adc 0 &cpcap_adc 1
diff --git a/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml b/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
new file mode 100644
index 000000000000..9e21b83d717e
--- /dev/null
+++ b/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ptp/ptp-idtcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IDT ClockMatrix (TM) PTP Clock Device Tree Bindings
+
+maintainers:
+ - Vincent Cheng <vincent.cheng.xh@renesas.com>
+
+properties:
+ compatible:
+ enum:
+ # For System Synchronizer
+ - idt,8a34000
+ - idt,8a34001
+ - idt,8a34002
+ - idt,8a34003
+ - idt,8a34004
+ - idt,8a34005
+ - idt,8a34006
+ - idt,8a34007
+ - idt,8a34008
+ - idt,8a34009
+ # For Port Synchronizer
+ - idt,8a34010
+ - idt,8a34011
+ - idt,8a34012
+ - idt,8a34013
+ - idt,8a34014
+ - idt,8a34015
+ - idt,8a34016
+ - idt,8a34017
+ - idt,8a34018
+ - idt,8a34019
+ # For Universal Frequency Translator (UFT)
+ - idt,8a34040
+ - idt,8a34041
+ - idt,8a34042
+ - idt,8a34043
+ - idt,8a34044
+ - idt,8a34045
+ - idt,8a34046
+ - idt,8a34047
+ - idt,8a34048
+ - idt,8a34049
+
+ reg:
+ maxItems: 1
+ description:
+ I2C slave address of the device.
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c@1 {
+ compatible = "abc,acme-1234";
+ reg = <0x01 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phc@5b {
+ compatible = "idt,8a34000";
+ reg = <0x5b>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index f32416968197..59b4b73d4051 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -50,6 +50,10 @@ properties:
description: startup time in microseconds
$ref: /schemas/types.yaml#/definitions/uint32
+ off-on-delay-us:
+ description: off delay time in microseconds
+ $ref: /schemas/types.yaml#/definitions/uint32
+
enable-active-high:
description:
Polarity of GPIO is Active high. If this property is missing,
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
index bab9f71140b8..97c3e0b7611c 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
@@ -28,6 +28,8 @@ Supported regulator node names:
PM8150L: smps1 - smps8, ldo1 - ldo11, bob, flash, rgb
PM8998: smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
PMI8998: bob
+ PM6150: smps1 - smps5, ldo1 - ldo19
+ PM6150L: smps1 - smps8, ldo1 - ldo11, bob
========================
First Level Nodes - PMIC
@@ -43,6 +45,8 @@ First Level Nodes - PMIC
"qcom,pm8150l-rpmh-regulators"
"qcom,pm8998-rpmh-regulators"
"qcom,pmi8998-rpmh-regulators"
+ "qcom,pm6150-rpmh-regulators"
+ "qcom,pm6150l-rpmh-regulators"
- qcom,pmic-id
Usage: required
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
index 45025b5b67f6..d126df043403 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
@@ -22,6 +22,7 @@ Regulator nodes are identified by their compatible:
"qcom,rpm-pm8841-regulators"
"qcom,rpm-pm8916-regulators"
"qcom,rpm-pm8941-regulators"
+ "qcom,rpm-pm8950-regulators"
"qcom,rpm-pm8994-regulators"
"qcom,rpm-pm8998-regulators"
"qcom,rpm-pma8084-regulators"
@@ -57,6 +58,26 @@ Regulator nodes are identified by their compatible:
- vdd_s1-supply:
- vdd_s2-supply:
- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_l1_l19-supply:
+- vdd_l2_l23-supply:
+- vdd_l3-supply:
+- vdd_l4_l5_l6_l7_l16-supply:
+- vdd_l8_l11_l12_l17_l22-supply:
+- vdd_l9_l10_l13_l14_l15_l18-supply:
+- vdd_l20-supply:
+- vdd_l21-supply:
+ Usage: optional (pm8950 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
- vdd_l1_l3-supply:
- vdd_l2_lvs1_2_3-supply:
- vdd_l4_l11-supply:
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index 430b8622bda1..f5cdac8b2847 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -4,10 +4,12 @@ Qualcomm SPMI Regulators
Usage: required
Value type: <string>
Definition: must be one of:
+ "qcom,pm8004-regulators"
"qcom,pm8005-regulators"
"qcom,pm8841-regulators"
"qcom,pm8916-regulators"
"qcom,pm8941-regulators"
+ "qcom,pm8950-regulators"
"qcom,pm8994-regulators"
"qcom,pmi8994-regulators"
"qcom,pms405-regulators"
@@ -76,6 +78,26 @@ Qualcomm SPMI Regulators
- vdd_s2-supply:
- vdd_s3-supply:
- vdd_s4-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_l1_l19-supply:
+- vdd_l2_l23-supply:
+- vdd_l3-supply:
+- vdd_l4_l5_l6_l7_l16-supply:
+- vdd_l8_l11_l12_l17_l22-supply:
+- vdd_l9_l10_l13_l14_l15_l18-supply:
+- vdd_l20-supply:
+- vdd_l21-supply:
+ Usage: optional (pm8950 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
- vdd_s5-supply:
- vdd_s6-supply:
- vdd_s7-supply:
@@ -140,6 +162,9 @@ sub-node is identified using the node's name, with valid values listed for each
of the PMICs below.
pm8005:
+ s2, s5
+
+pm8005:
s1, s2, s3, s4
pm8841:
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 02c3043ce419..92ff2e8ad572 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -38,7 +38,12 @@ properties:
type: boolean
regulator-boot-on:
- description: bootloader/firmware enabled regulator
+ description: bootloader/firmware enabled regulator.
+ It's expected that this regulator was left on by the bootloader.
+ If the bootloader didn't leave it on then OS should turn it on
+ at boot but shouldn't prevent it from being turned off later.
+ This property is intended to only be used for regulators where
+ software cannot read the state of the regulator.
type: boolean
regulator-allow-bypass:
diff --git a/Documentation/devicetree/bindings/rng/atmel-trng.txt b/Documentation/devicetree/bindings/rng/atmel-trng.txt
index 4ac5aaa2d024..3900ee4f3532 100644
--- a/Documentation/devicetree/bindings/rng/atmel-trng.txt
+++ b/Documentation/devicetree/bindings/rng/atmel-trng.txt
@@ -1,7 +1,7 @@
Atmel TRNG (True Random Number Generator) block
Required properties:
-- compatible : Should be "atmel,at91sam9g45-trng"
+- compatible : Should be "atmel,at91sam9g45-trng" or "microchip,sam9x60-trng"
- reg : Offset and length of the register set of this block
- interrupts : the interrupt number for the TRNG block
- clocks: should contain the TRNG clk source
diff --git a/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt
new file mode 100644
index 000000000000..65c04172fc8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt
@@ -0,0 +1,12 @@
+NPCM SoC Random Number Generator
+
+Required properties:
+- compatible : "nuvoton,npcm750-rng" for the NPCM7XX BMC.
+- reg : Specifies physical base address and size of the registers.
+
+Example:
+
+rng: rng@f000b000 {
+ compatible = "nuvoton,npcm750-rng";
+ reg = <0xf000b000 0x8>;
+};
diff --git a/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
new file mode 100644
index 000000000000..f315c9723bd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
@@ -0,0 +1,27 @@
+OMAP ROM RNG driver binding
+
+Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The
+implementation can depend on the SoC secure ROM used.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "nokia,n900-rom-rng"
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the the RNG interface clock
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "ick"
+
+Example:
+
+ rom_rng: rng {
+ compatible = "nokia,n900-rom-rng";
+ clocks = <&rng_ick>;
+ clock-names = "ick";
+ };
diff --git a/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt b/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt
new file mode 100644
index 000000000000..5a613a4ec780
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt
@@ -0,0 +1,17 @@
+Exynos True Random Number Generator
+
+Required properties:
+
+- compatible : Should be "samsung,exynos5250-trng".
+- reg : Specifies base physical address and size of the registers map.
+- clocks : Phandle to clock-controller plus clock-specifier pair.
+- clock-names : "secss" as a clock name.
+
+Example:
+
+ rng@10830600 {
+ compatible = "samsung,exynos5250-trng";
+ reg = <0x10830600 0x100>;
+ clocks = <&clock CLK_SSS>;
+ clock-names = "secss";
+ };
diff --git a/Documentation/devicetree/bindings/security/tpm/google,cr50.txt b/Documentation/devicetree/bindings/security/tpm/google,cr50.txt
new file mode 100644
index 000000000000..cd69c2efdd37
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/google,cr50.txt
@@ -0,0 +1,19 @@
+* H1 Secure Microcontroller with Cr50 Firmware on SPI Bus.
+
+H1 Secure Microcontroller running Cr50 firmware provides several
+functions, including TPM-like functionality. It communicates over
+SPI using the FIFO protocol described in the PTP Spec, section 6.
+
+Required properties:
+- compatible: Should be "google,cr50".
+- spi-max-frequency: Maximum SPI frequency.
+
+Example:
+
+&spi0 {
+ tpm@0 {
+ compatible = "google,cr50";
+ reg = <0>;
+ spi-max-frequency = <800000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/adi,adau7118.yaml b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
new file mode 100644
index 000000000000..75e0cbe6be70
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/adi,adau7118.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+
+title: Analog Devices ADAU7118 8 Channel PDM to I2S/TDM Converter
+
+maintainers:
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ Analog Devices ADAU7118 8 Channel PDM to I2S/TDM Converter over I2C or HW
+ standalone mode.
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ADAU7118.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,adau7118
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ iovdd-supply:
+ description: Digital Input/Output Power Supply.
+
+ dvdd-supply:
+ description: Internal Core Digital Power Supply.
+
+ adi,decimation-ratio:
+ description: |
+ This property set's the decimation ratio of PDM to PCM audio data.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [64, 32, 16]
+ default: 64
+
+ adi,pdm-clk-map:
+ description: |
+ The ADAU7118 has two PDM clocks for the four Inputs. Each input must be
+ assigned to one of these two clocks. This property set's the mapping
+ between the clocks and the inputs.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 4
+ maxItems: 4
+ items:
+ maximum: 1
+ default: [0, 0, 1, 1]
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - iovdd-supply
+ - dvdd-supply
+
+examples:
+ - |
+ i2c {
+ /* example with i2c support */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ adau7118_codec: audio-codec@14 {
+ compatible = "adi,adau7118";
+ reg = <0x14>;
+ #sound-dai-cells = <0>;
+ iovdd-supply = <&supply>;
+ dvdd-supply = <&supply>;
+ adi,pdm-clk-map = <1 1 0 0>;
+ adi,decimation-ratio = <16>;
+ };
+ };
+
+ /* example with hw standalone mode */
+ adau7118_codec_hw: adau7118-codec-hw {
+ compatible = "adi,adau7118";
+ #sound-dai-cells = <0>;
+ iovdd-supply = <&supply>;
+ dvdd-supply = <&supply>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
new file mode 100644
index 000000000000..b8f89c7258eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -0,0 +1,267 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/allwinner,sun4i-a10-codec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Codec Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#sound-dai-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - allwinner,sun4i-a10-codec
+ - allwinner,sun6i-a31-codec
+ - allwinner,sun7i-a20-codec
+ - allwinner,sun8i-a23-codec
+ - allwinner,sun8i-h3-codec
+ - allwinner,sun8i-v3s-codec
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Module Clock
+
+ clock-names:
+ items:
+ - const: apb
+ - const: codec
+
+ dmas:
+ items:
+ - description: RX DMA Channel
+ - description: TX DMA Channel
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ resets:
+ maxItems: 1
+
+ allwinner,audio-routing:
+ description: |-
+ A list of the connections between audio components. Each entry
+ is a pair of strings, the first being the connection's sink, the
+ second being the connection's source.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/non-unique-string-array
+ - minItems: 2
+ maxItems: 18
+ items:
+ enum:
+ # Audio Pins on the SoC
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - MIC3
+
+ # Microphone Biases from the SoC
+ - HBIAS
+ - MBIAS
+
+ # Board Connectors
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ allwinner,codec-analog-controls:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to the codec analog controls in the PRCM
+
+ allwinner,pa-gpios:
+ description: GPIO to enable the external amplifier
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-codec
+ - allwinner,sun8i-a23-codec
+ - allwinner,sun8i-h3-codec
+ - allwinner,sun8i-v3s-codec
+
+ then:
+ if:
+ properties:
+ compatible:
+ const: allwinner,sun6i-a31-codec
+
+ then:
+ required:
+ - resets
+ - allwinner,audio-routing
+
+ else:
+ required:
+ - resets
+ - allwinner,audio-routing
+ - allwinner,codec-analog-controls
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - MIC3
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-a23-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - MIC1
+ - MIC2
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-h3-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-v3s-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - MIC1
+ - HBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+additionalProperties: false
+
+examples:
+ - |
+ codec@1c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun7i-a20-codec";
+ reg = <0x01c22c00 0x40>;
+ interrupts = <0 30 4>;
+ clocks = <&apb0_gates 0>, <&codec_clk>;
+ clock-names = "apb", "codec";
+ dmas = <&dma 0 19>, <&dma 0 19>;
+ dma-names = "rx", "tx";
+ };
+
+ - |
+ codec@1c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun6i-a31-codec";
+ reg = <0x01c22c00 0x98>;
+ interrupts = <0 29 4>;
+ clocks = <&ccu 61>, <&ccu 135>;
+ clock-names = "apb", "codec";
+ resets = <&ccu 42>;
+ dmas = <&dma 15>, <&dma 15>;
+ dma-names = "rx", "tx";
+ allwinner,audio-routing =
+ "Headphone", "HP",
+ "Speaker", "LINEOUT",
+ "LINEIN", "Line In",
+ "MIC1", "MBIAS",
+ "MIC1", "Mic",
+ "MIC2", "HBIAS",
+ "MIC2", "Headset Mic";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
new file mode 100644
index 000000000000..85305b4c2729
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/allwinner,sun8i-a23-codec-analog.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A23 Analog Codec Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ compatible:
+ enum:
+ # FIXME: This is documented in the PRCM binding, but needs to be
+ # migrated here at some point
+ # - allwinner,sun8i-a23-codec-analog
+ - allwinner,sun8i-h3-codec-analog
+ - allwinner,sun8i-v3s-codec-analog
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ codec_analog: codec-analog@1f015c0 {
+ compatible = "allwinner,sun8i-h3-codec-analog";
+ reg = <0x01f015c0 0x4>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/arndale.txt b/Documentation/devicetree/bindings/sound/arndale.txt
index 0e76946385ae..17530120ccfc 100644
--- a/Documentation/devicetree/bindings/sound/arndale.txt
+++ b/Documentation/devicetree/bindings/sound/arndale.txt
@@ -1,8 +1,9 @@
Audio Binding for Arndale boards
Required properties:
-- compatible : Can be the following,
- "samsung,arndale-rt5631"
+- compatible : Can be one of the following:
+ "samsung,arndale-rt5631",
+ "samsung,arndale-wm1811"
- samsung,audio-cpu: The phandle of the Samsung I2S controller
- samsung,audio-codec: The phandle of the audio codec
diff --git a/Documentation/devicetree/bindings/sound/fsl,mqs.txt b/Documentation/devicetree/bindings/sound/fsl,mqs.txt
new file mode 100644
index 000000000000..40353fc30255
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,mqs.txt
@@ -0,0 +1,36 @@
+fsl,mqs audio CODEC
+
+Required properties:
+ - compatible : Must contain one of "fsl,imx6sx-mqs", "fsl,codec-mqs"
+ "fsl,imx8qm-mqs", "fsl,imx8qxp-mqs".
+ - clocks : A list of phandles + clock-specifiers, one for each entry in
+ clock-names
+ - clock-names : "mclk" - must required.
+ "core" - required if compatible is "fsl,imx8qm-mqs", it
+ is for register access.
+ - gpr : A phandle of General Purpose Registers in IOMUX Controller.
+ Required if compatible is "fsl,imx6sx-mqs".
+
+Required if compatible is "fsl,imx8qm-mqs":
+ - power-domains: A phandle of PM domain provider node.
+ - reg: Offset and length of the register set for the device.
+
+Example:
+
+mqs: mqs {
+ compatible = "fsl,imx6sx-mqs";
+ gpr = <&gpr>;
+ clocks = <&clks IMX6SX_CLK_SAI1>;
+ clock-names = "mclk";
+ status = "disabled";
+};
+
+mqs: mqs@59850000 {
+ compatible = "fsl,imx8qm-mqs";
+ reg = <0x59850000 0x10000>;
+ clocks = <&clk IMX8QM_AUD_MQS_IPG>,
+ <&clk IMX8QM_AUD_MQS_HMCLK>;
+ clock-names = "core", "mclk";
+ power-domains = <&pd_mqs0>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
index 1084f7f22eea..8ca52dcc5572 100644
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
@@ -1,4 +1,4 @@
-* Audio codec controlled by ChromeOS EC
+Audio codec controlled by ChromeOS EC
Google's ChromeOS EC codec is a digital mic codec provided by the
Embedded Controller (EC) and is controlled via a host-command interface.
@@ -9,10 +9,27 @@ Documentation/devicetree/bindings/mfd/cros-ec.txt).
Required properties:
- compatible: Must contain "google,cros-ec-codec"
- #sound-dai-cells: Should be 1. The cell specifies number of DAIs.
-- max-dmic-gain: A number for maximum gain in dB on digital microphone.
+
+Optional properties:
+- reg: Pysical base address and length of shared memory region from EC.
+ It contains 3 unsigned 32-bit integer. The first 2 integers
+ combine to become an unsigned 64-bit physical address. The last
+ one integer is length of the shared memory.
+- memory-region: Shared memory region to EC. A "shared-dma-pool". See
+ ../reserved-memory/reserved-memory.txt for details.
Example:
+{
+ ...
+
+ reserved_mem: reserved_mem {
+ compatible = "shared-dma-pool";
+ reg = <0 0x52800000 0 0x100000>;
+ no-map;
+ };
+}
+
cros-ec@0 {
compatible = "google,cros-ec-spi";
@@ -21,6 +38,7 @@ cros-ec@0 {
cros_ec_codec: ec-codec {
compatible = "google,cros-ec-codec";
#sound-dai-cells = <1>;
- max-dmic-gain = <43>;
+ reg = <0x0 0x10500000 0x80000>;
+ memory-region = <&reserved_mem>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
index 396ba38619f6..1f1cba4152ce 100644
--- a/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
+++ b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
@@ -4,6 +4,10 @@ Required properties:
- compatible = "mediatek,mt68183-audio";
- reg: register location and size
- interrupts: should contain AFE interrupt
+- resets: Must contain an entry for each entry in reset-names
+ See ../reset/reset.txt for details.
+- reset-names: should have these reset names:
+ "audiosys";
- power-domains: should define the power domain
- clocks: Must contain an entry for each entry in clock-names
- clock-names: should have these clock names:
@@ -20,6 +24,8 @@ Example:
compatible = "mediatek,mt8183-audio";
reg = <0 0x11220000 0 0x1000>;
interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
+ resets = <&watchdog MT8183_TOPRGU_AUDIO_SW_RST>;
+ reset-names = "audiosys";
power-domains = <&scpsys MT8183_POWER_DOMAIN_AUDIO>;
clocks = <&infrasys CLK_INFRA_AUDIO>,
<&infrasys CLK_INFRA_AUDIO_26M_BCLK>,
diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
index d6d5207fa996..decaa013a07e 100644
--- a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
+++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
@@ -2,14 +2,19 @@ MT8183 with MT6358, TS3A227 and MAX98357 CODECS
Required properties:
- compatible : "mediatek,mt8183_mt6358_ts3a227_max98357"
-- mediatek,headset-codec: the phandles of ts3a227 codecs
- mediatek,platform: the phandle of MT8183 ASoC platform
+Optional properties:
+- mediatek,headset-codec: the phandles of ts3a227 codecs
+- mediatek,ec-codec: the phandle of EC codecs.
+ See google,cros-ec-codec.txt for more details.
+
Example:
sound {
compatible = "mediatek,mt8183_mt6358_ts3a227_max98357";
mediatek,headset-codec = <&ts3a227>;
+ mediatek,ec-codec = <&ec_codec>;
mediatek,platform = <&afe>;
};
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.txt b/Documentation/devicetree/bindings/sound/renesas,fsi.txt
deleted file mode 100644
index 0cf0f819b823..000000000000
--- a/Documentation/devicetree/bindings/sound/renesas,fsi.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Renesas FSI
-
-Required properties:
-- compatible : "renesas,fsi2-<soctype>",
- "renesas,sh_fsi2" or "renesas,sh_fsi" as
- fallback.
- Examples with soctypes are:
- - "renesas,fsi2-r8a7740" (R-Mobile A1)
- - "renesas,fsi2-sh73a0" (SH-Mobile AG5)
-- reg : Should contain the register physical address and length
-- interrupts : Should contain FSI interrupt
-
-- fsia,spdif-connection : FSI is connected by S/PDIF
-- fsia,stream-mode-support : FSI supports 16bit stream mode.
-- fsia,use-internal-clock : FSI uses internal clock when master mode.
-
-- fsib,spdif-connection : same as fsia
-- fsib,stream-mode-support : same as fsia
-- fsib,use-internal-clock : same as fsia
-
-Example:
-
-sh_fsi2: sh_fsi2@ec230000 {
- compatible = "renesas,sh_fsi2";
- reg = <0xec230000 0x400>;
- interrupts = <0 146 0x4>;
-
- fsia,spdif-connection;
- fsia,stream-mode-support;
- fsia,use-internal-clock;
-};
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.yaml b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
new file mode 100644
index 000000000000..140a37fc3c0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/renesas,fsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas FSI Sound Driver Device Tree Bindings
+
+maintainers:
+ - Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+properties:
+ $nodename:
+ pattern: "^sound@.*"
+
+ compatible:
+ oneOf:
+ # for FSI2 SoC
+ - items:
+ - enum:
+ - renesas,fsi2-sh73a0
+ - renesas,fsi2-r8a7740
+ - enum:
+ - renesas,sh_fsi2
+ # for Generic
+ - items:
+ - enum:
+ - renesas,sh_fsi
+ - renesas,sh_fsi2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ fsia,spdif-connection:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: FSI is connected by S/PDIF
+
+ fsia,stream-mode-support:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: FSI supports 16bit stream mode
+
+ fsia,use-internal-clock:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: FSI uses internal clock when master mode
+
+ fsib,spdif-connection:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: same as fsia
+
+ fsib,stream-mode-support:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: same as fsia
+
+ fsib,use-internal-clock:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: same as fsia
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ sh_fsi2: sound@ec230000 {
+ compatible = "renesas,fsi2-r8a7740", "renesas,sh_fsi2";
+ reg = <0xec230000 0x400>;
+ interrupts = <0 146 0x4>;
+
+ fsia,spdif-connection;
+ fsia,stream-mode-support;
+ fsia,use-internal-clock;
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 5c52182f7dcf..797fd035434c 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -268,6 +268,7 @@ Required properties:
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
- "renesas,rcar_sound-r8a77470" (RZ/G1C)
- "renesas,rcar_sound-r8a774a1" (RZ/G2M)
+ - "renesas,rcar_sound-r8a774b1" (RZ/G2N)
- "renesas,rcar_sound-r8a774c0" (RZ/G2E)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
diff --git a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
index a805aa99ad75..e9c58b204399 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
@@ -5,15 +5,38 @@ Required properties:
- rockchip,model: The user-visible name of this sound complex
- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
connected to the CODEC
-- rockchip,audio-codec: The phandle of the MAX98090 audio codec
-- rockchip,headset-codec: The phandle of Ext chip for jack detection
+
+Optional properties:
+- rockchip,audio-codec: The phandle of the MAX98090 audio codec.
+- rockchip,headset-codec: The phandle of Ext chip for jack detection. This is
+ required if there is rockchip,audio-codec.
+- rockchip,hdmi-codec: The phandle of HDMI device for HDMI codec.
Example:
+/* For max98090-only board. */
+sound {
+ compatible = "rockchip,rockchip-audio-max98090";
+ rockchip,model = "ROCKCHIP-I2S";
+ rockchip,i2s-controller = <&i2s>;
+ rockchip,audio-codec = <&max98090>;
+ rockchip,headset-codec = <&headsetcodec>;
+};
+
+/* For HDMI-only board. */
+sound {
+ compatible = "rockchip,rockchip-audio-max98090";
+ rockchip,model = "ROCKCHIP-I2S";
+ rockchip,i2s-controller = <&i2s>;
+ rockchip,hdmi-codec = <&hdmi>;
+};
+
+/* For max98090 plus HDMI board. */
sound {
compatible = "rockchip,rockchip-audio-max98090";
rockchip,model = "ROCKCHIP-I2S";
rockchip,i2s-controller = <&i2s>;
rockchip,audio-codec = <&max98090>;
rockchip,headset-codec = <&headsetcodec>;
+ rockchip,hdmi-codec = <&hdmi>;
};
diff --git a/Documentation/devicetree/bindings/sound/rt1011.txt b/Documentation/devicetree/bindings/sound/rt1011.txt
index 35a23e60d679..02d53b9aa247 100644
--- a/Documentation/devicetree/bindings/sound/rt1011.txt
+++ b/Documentation/devicetree/bindings/sound/rt1011.txt
@@ -20,6 +20,14 @@ Required properties:
| 1 | 1 | 0x3b |
-------------------------------------
+Optional properties:
+
+- realtek,temperature_calib
+ u32. The temperature was measured while doing the calibration. Units: Celsius degree
+
+- realtek,r0_calib
+ u32. This is r0 calibration data which was measured in factory mode.
+
Pins on the device (for linking into audio routes) for RT1011:
* SPO
@@ -29,4 +37,6 @@ Example:
rt1011: codec@38 {
compatible = "realtek,rt1011";
reg = <0x38>;
+ realtek,temperature_calib = <25>;
+ realtek,r0_calib = <0x224050>;
};
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
index 312e9a129530..30e927a28369 100644
--- a/Documentation/devicetree/bindings/sound/rt5682.txt
+++ b/Documentation/devicetree/bindings/sound/rt5682.txt
@@ -27,6 +27,11 @@ Optional properties:
- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+- realtek,btndet-delay
+ The debounce delay for push button.
+ The delay time is realtek,btndet-delay value multiple of 8.192 ms.
+ If absent, the default is 16.
+
Pins on the device (for linking into audio routes) for RT5682:
* DMIC L1
@@ -47,4 +52,5 @@ rt5682 {
realtek,dmic1-data-pin = <1>;
realtek,dmic1-clk-pin = <1>;
realtek,jd-src = <1>;
+ realtek,btndet-delay = <16>;
};
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.txt b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
deleted file mode 100644
index e9da2200e173..000000000000
--- a/Documentation/devicetree/bindings/sound/samsung,odroid.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Samsung Exynos Odroid XU3/XU4 audio complex with MAX98090 codec
-
-Required properties:
-
- - compatible - "hardkernel,odroid-xu3-audio" - for Odroid XU3 board,
- "hardkernel,odroid-xu4-audio" - for Odroid XU4 board (deprecated),
- "samsung,odroid-xu3-audio" - for Odroid XU3 board (deprecated),
- "samsung,odroid-xu4-audio" - for Odroid XU4 board (deprecated)
- - model - the user-visible name of this sound complex
- - clocks - should contain entries matching clock names in the clock-names
- property
- - samsung,audio-widgets - this property specifies off-codec audio elements
- like headphones or speakers, for details see widgets.txt
- - samsung,audio-routing - a list of the connections between audio
- components; each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's source;
- valid names for sources and sinks are the MAX98090's pins (as
- documented in its binding), and the jacks on the board
-
- For Odroid X2:
- "Headphone Jack", "Mic Jack", "DMIC"
-
- For Odroid U3, XU3:
- "Headphone Jack", "Speakers"
-
- For Odroid XU4:
- no entries
-
-Required sub-nodes:
-
- - 'cpu' subnode with a 'sound-dai' property containing the phandle of the I2S
- controller
- - 'codec' subnode with a 'sound-dai' property containing list of phandles
- to the CODEC nodes, first entry must be corresponding to the MAX98090
- CODEC and the second entry must be the phandle of the HDMI IP block node
-
-Example:
-
-sound {
- compatible = "hardkernel,odroid-xu3-audio";
- model = "Odroid-XU3";
- samsung,audio-routing =
- "Headphone Jack", "HPL",
- "Headphone Jack", "HPR",
- "IN1", "Mic Jack",
- "Mic Jack", "MICBIAS";
-
- cpu {
- sound-dai = <&i2s0 0>;
- };
- codec {
- sound-dai = <&hdmi>, <&max98090>;
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
new file mode 100644
index 000000000000..c6b244352d05
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/samsung,odroid.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos Odroid XU3/XU4 audio complex with MAX98090 codec
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: hardkernel,odroid-xu3-audio
+
+ - const: hardkernel,odroid-xu4-audio
+ deprecated: true
+
+ - const: samsung,odroid-xu3-audio
+ deprecated: true
+
+ - const: samsung,odroid-xu4-audio
+ deprecated: true
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The user-visible name of this sound complex.
+
+ cpu:
+ type: object
+ properties:
+ sound-dai:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: phandles to the I2S controllers
+
+ codec:
+ type: object
+ properties:
+ sound-dai:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ List of phandles to the CODEC nodes,
+ first entry must be corresponding to the MAX98090 CODEC and
+ the second entry must be the phandle of the HDMI IP block node.
+
+ samsung,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ List of the connections between audio
+ components; each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's source;
+ valid names for sources and sinks are the MAX98090's pins (as
+ documented in its binding), and the jacks on the board.
+ For Odroid X2: "Headphone Jack", "Mic Jack", "DMIC"
+ For Odroid U3, XU3: "Headphone Jack", "Speakers"
+ For Odroid XU4: no entries
+
+ samsung,audio-widgets:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ This property specifies off-codec audio elements
+ like headphones or speakers, for details see widgets.txt
+
+required:
+ - compatible
+ - model
+ - cpu
+ - codec
+
+examples:
+ - |
+ sound {
+ compatible = "hardkernel,odroid-xu3-audio";
+ model = "Odroid-XU3";
+ samsung,audio-routing =
+ "Headphone Jack", "HPL",
+ "Headphone Jack", "HPR",
+ "IN1", "Mic Jack",
+ "Mic Jack", "MICBIAS";
+
+ cpu {
+ sound-dai = <&i2s0 0>;
+ };
+
+ codec {
+ sound-dai = <&hdmi>, <&max98090>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.txt b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
deleted file mode 100644
index a88cb00fa096..000000000000
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-* Samsung I2S controller
-
-Required SoC Specific Properties:
-
-- compatible : should be one of the following.
- - samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
- - samsung,s5pv210-i2s: for 8/16/24bit multichannel(5.1) I2S with
- secondary fifo, s/w reset control and internal mux for root clk src.
- - samsung,exynos5420-i2s: for 8/16/24bit multichannel(5.1) I2S for
- playback, stereo channel capture, secondary fifo using internal
- or external dma, s/w reset control, internal mux for root clk src
- and 7.1 channel TDM support for playback. TDM (Time division multiplexing)
- is to allow transfer of multiple channel audio data on single data line.
- - samsung,exynos7-i2s: with all the available features of exynos5 i2s,
- exynos7 I2S has 7.1 channel TDM support for capture, secondary fifo
- with only external dma and more no.of root clk sampling frequencies.
- - samsung,exynos7-i2s1: I2S1 on previous samsung platforms supports
- stereo channels. exynos7 i2s1 upgraded to 5.1 multichannel with
- slightly modified bit offsets.
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-- dmas: list of DMA controller phandle and DMA request line ordered pairs.
-- dma-names: identifier string for each DMA request line in the dmas property.
- These strings correspond 1:1 with the ordered pairs in dmas.
-- clocks: Handle to iis clock and RCLK source clk.
-- clock-names:
- i2s0 uses some base clocks from CMU and some are from audio subsystem internal
- clock controller. The clock names for i2s0 should be "iis", "i2s_opclk0" and
- "i2s_opclk1" as shown in the example below.
- i2s1 and i2s2 uses clocks from CMU. The clock names for i2s1 and i2s2 should
- be "iis" and "i2s_opclk0".
- "iis" is the i2s bus clock and i2s_opclk0, i2s_opclk1 are sources of the root
- clk. i2s0 has internal mux to select the source of root clk and i2s1 and i2s2
- doesn't have any such mux.
-- #clock-cells: should be 1, this property must be present if the I2S device
- is a clock provider in terms of the common clock bindings, described in
- ../clock/clock-bindings.txt.
-- clock-output-names (deprecated): from the common clock bindings, names of
- the CDCLK I2S output clocks, suggested values are "i2s_cdclk0", "i2s_cdclk1",
- "i2s_cdclk3" for the I2S0, I2S1, I2S2 devices respectively.
-
-There are following clocks available at the I2S device nodes:
- CLK_I2S_CDCLK - the CDCLK (CODECLKO) gate clock,
- CLK_I2S_RCLK_PSR - the RCLK prescaler divider clock (corresponding to the
- IISPSR register),
- CLK_I2S_RCLK_SRC - the RCLKSRC mux clock (corresponding to RCLKSRC bit in
- IISMOD register).
-
-Refer to the SoC datasheet for availability of the above clocks.
-The CLK_I2S_RCLK_PSR and CLK_I2S_RCLK_SRC clocks are usually only available
-in the IIS Multi Audio Interface.
-
-Note: Old DTs may not have the #clock-cells property and then not use the I2S
-node as a clock supplier.
-
-Optional SoC Specific Properties:
-
-- samsung,idma-addr: Internal DMA register base address of the audio
- sub system(used in secondary sound source).
-- pinctrl-0: Should specify pin control groups used for this controller.
-- pinctrl-names: Should contain only one value - "default".
-- #sound-dai-cells: should be 1.
-
-
-Example:
-
-i2s0: i2s@3830000 {
- compatible = "samsung,s5pv210-i2s";
- reg = <0x03830000 0x100>;
- dmas = <&pdma0 10
- &pdma0 9
- &pdma0 8>;
- dma-names = "tx", "rx", "tx-sec";
- clocks = <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_SCLK_I2S>;
- clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
- #clock-cells = <1>;
- samsung,idma-addr = <0x03000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_bus>;
- #sound-dai-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.yaml b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
new file mode 100644
index 000000000000..53e3bad4178c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/samsung-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC I2S controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+properties:
+ compatible:
+ description: |
+ samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
+
+ samsung,s5pv210-i2s: for 8/16/24bit multichannel (5.1) I2S with
+ secondary FIFO, s/w reset control and internal mux for root clock
+ source.
+
+ samsung,exynos5420-i2s: for 8/16/24bit multichannel (5.1) I2S for
+ playback, stereo channel capture, secondary FIFO using internal
+ or external DMA, s/w reset control, internal mux for root clock
+ source and 7.1 channel TDM support for playback; TDM (Time division
+ multiplexing) is to allow transfer of multiple channel audio data on
+ single data line.
+
+ samsung,exynos7-i2s: with all the available features of Exynos5 I2S.
+ Exynos7 I2S has 7.1 channel TDM support for capture, secondary FIFO
+ with only external DMA and more number of root clock sampling
+ frequencies.
+
+ samsung,exynos7-i2s1: I2S1 on previous samsung platforms supports
+ stereo channels. Exynos7 I2S1 upgraded to 5.1 multichannel with
+ slightly modified bit offsets.
+ enum:
+ - samsung,s3c6410-i2s
+ - samsung,s5pv210-i2s
+ - samsung,exynos5420-i2s
+ - samsung,exynos7-i2s
+ - samsung,exynos7-i2s1
+
+ reg:
+ maxItems: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 3
+
+ dma-names:
+ oneOf:
+ - items:
+ - const: tx
+ - const: rx
+ - items:
+ - const: tx
+ - const: rx
+ - const: tx-sec
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: iis
+ - items: # for I2S0
+ - const: iis
+ - const: i2s_opclk0
+ - const: i2s_opclk1
+ - items: # for I2S1 and I2S2
+ - const: iis
+ - const: i2s_opclk0
+ description: |
+ "iis" is the I2S bus clock and i2s_opclk0, i2s_opclk1 are sources
+ of the root clock. I2S0 has internal mux to select the source
+ of root clock and I2S1 and I2S2 doesn't have any such mux.
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ deprecated: true
+ oneOf:
+ - items: # for I2S0
+ - const: i2s_cdclk0
+ - items: # for I2S1
+ - const: i2s_cdclk1
+ - items: # for I2S2
+ - const: i2s_cdclk2
+ description: Names of the CDCLK I2S output clocks.
+
+ samsung,idma-addr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Internal DMA register base address of the audio
+ subsystem (used in secondary sound source).
+
+ pinctrl-0:
+ description: Should specify pin control groups used for this controller.
+
+ pinctrl-names:
+ const: default
+
+ "#sound-dai-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - dmas
+ - dma-names
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos-audss-clk.h>
+
+ i2s0: i2s@3830000 {
+ compatible = "samsung,s5pv210-i2s";
+ reg = <0x03830000 0x100>;
+ dmas = <&pdma0 10>,
+ <&pdma0 9>,
+ <&pdma0 8>;
+ dma-names = "tx", "rx", "tx-sec";
+ clocks = <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_SCLK_I2S>;
+ clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+ #clock-cells = <1>;
+ samsung,idma-addr = <0x03000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_bus>;
+ #sound-dai-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
deleted file mode 100644
index 66579bbd3294..000000000000
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-* Allwinner A10 Codec
-
-Required properties:
-- compatible: must be one of the following compatibles:
- - "allwinner,sun4i-a10-codec"
- - "allwinner,sun6i-a31-codec"
- - "allwinner,sun7i-a20-codec"
- - "allwinner,sun8i-a23-codec"
- - "allwinner,sun8i-h3-codec"
- - "allwinner,sun8i-v3s-codec"
-- reg: must contain the registers location and length
-- interrupts: must contain the codec interrupt
-- dmas: DMA channels for tx and rx dma. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: should include "tx" and "rx".
-- clocks: a list of phandle + clock-specifer pairs, one for each entry
- in clock-names.
-- clock-names: should contain the following:
- - "apb": the parent APB clock for this controller
- - "codec": the parent module clock
-
-Optional properties:
-- allwinner,pa-gpios: gpio to enable external amplifier
-
-Required properties for the following compatibles:
- - "allwinner,sun6i-a31-codec"
- - "allwinner,sun8i-a23-codec"
- - "allwinner,sun8i-h3-codec"
- - "allwinner,sun8i-v3s-codec"
-- resets: phandle to the reset control for this device
-- allwinner,audio-routing: A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source. Valid names include:
-
- Audio pins on the SoC:
- "HP"
- "HPCOM"
- "LINEIN" (not on sun8i-v3s)
- "LINEOUT" (not on sun8i-a23 or sun8i-v3s)
- "MIC1"
- "MIC2" (not on sun8i-v3s)
- "MIC3" (sun6i-a31 only)
-
- Microphone biases from the SoC:
- "HBIAS"
- "MBIAS" (not on sun8i-v3s)
-
- Board connectors:
- "Headphone"
- "Headset Mic"
- "Line In"
- "Line Out"
- "Mic"
- "Speaker"
-
-Required properties for the following compatibles:
- - "allwinner,sun8i-a23-codec"
- - "allwinner,sun8i-h3-codec"
- - "allwinner,sun8i-v3s-codec"
-- allwinner,codec-analog-controls: A phandle to the codec analog controls
- block in the PRCM.
-
-Example:
-codec: codec@1c22c00 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun7i-a20-codec";
- reg = <0x01c22c00 0x40>;
- interrupts = <0 30 4>;
- clocks = <&apb0_gates 0>, <&codec_clk>;
- clock-names = "apb", "codec";
- dmas = <&dma 0 19>, <&dma 0 19>;
- dma-names = "rx", "tx";
-};
-
-codec: codec@1c22c00 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun6i-a31-codec";
- reg = <0x01c22c00 0x98>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_APB1_CODEC>, <&ccu CLK_CODEC>;
- clock-names = "apb", "codec";
- resets = <&ccu RST_APB1_CODEC>;
- dmas = <&dma 15>, <&dma 15>;
- dma-names = "rx", "tx";
- allwinner,audio-routing =
- "Headphone", "HP",
- "Speaker", "LINEOUT",
- "LINEIN", "Line In",
- "MIC1", "MBIAS",
- "MIC1", "Mic",
- "MIC2", "HBIAS",
- "MIC2", "Headset Mic";
-};
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
deleted file mode 100644
index 07356758bd91..000000000000
--- a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* Allwinner Codec Analog Controls
-
-Required properties:
-- compatible: must be one of the following compatibles:
- - "allwinner,sun8i-a23-codec-analog"
- - "allwinner,sun8i-h3-codec-analog"
- - "allwinner,sun8i-v3s-codec-analog"
-
-Required properties if not a sub-node of the PRCM node:
-- reg: must contain the registers location and length
-
-Example:
-prcm: prcm@1f01400 {
- codec_analog: codec-analog {
- compatible = "allwinner,sun8i-a23-codec-analog";
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/tas2562.txt b/Documentation/devicetree/bindings/sound/tas2562.txt
new file mode 100644
index 000000000000..658e1fb18a99
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas2562.txt
@@ -0,0 +1,34 @@
+Texas Instruments TAS2562 Smart PA
+
+The TAS2562 is a mono, digital input Class-D audio amplifier optimized for
+efficiently driving high peak power into small loudspeakers.
+Integrated speaker voltage and current sense provides for
+real time monitoring of loudspeaker behavior.
+
+Required properties:
+ - #address-cells - Should be <1>.
+ - #size-cells - Should be <0>.
+ - compatible: - Should contain "ti,tas2562".
+ - reg: - The i2c address. Should be 0x4c, 0x4d, 0x4e or 0x4f.
+ - ti,imon-slot-no:- TDM TX current sense time slot.
+
+Optional properties:
+- interrupt-parent: phandle to the interrupt controller which provides
+ the interrupt.
+- interrupts: (GPIO) interrupt to which the chip is connected.
+- shut-down: GPIO used to control the state of the device.
+
+Examples:
+tas2562@4c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "ti,tas2562";
+ reg = <0x4c>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+
+ shut-down = <&gpio1 15 0>;
+ ti,imon-slot-no = <0>;
+};
+
diff --git a/Documentation/devicetree/bindings/sound/tas2770.txt b/Documentation/devicetree/bindings/sound/tas2770.txt
new file mode 100644
index 000000000000..ede6bb3d9637
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas2770.txt
@@ -0,0 +1,37 @@
+Texas Instruments TAS2770 Smart PA
+
+The TAS2770 is a mono, digital input Class-D audio amplifier optimized for
+efficiently driving high peak power into small loudspeakers.
+Integrated speaker voltage and current sense provides for
+real time monitoring of loudspeaker behavior.
+
+Required properties:
+
+ - compatible: - Should contain "ti,tas2770".
+ - reg: - The i2c address. Should contain <0x4c>, <0x4d>,<0x4e>, or <0x4f>.
+ - #address-cells - Should be <1>.
+ - #size-cells - Should be <0>.
+ - ti,asi-format: - Sets TDM RX capture edge. 0->Rising; 1->Falling.
+ - ti,imon-slot-no:- TDM TX current sense time slot.
+ - ti,vmon-slot-no:- TDM TX voltage sense time slot.
+
+Optional properties:
+
+- interrupt-parent: the phandle to the interrupt controller which provides
+ the interrupt.
+- interrupts: interrupt specification for data-ready.
+
+Examples:
+
+ tas2770@4c {
+ compatible = "ti,tas2770";
+ reg = <0x4c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <97 0>;
+ ti,asi-format = <0>;
+ ti,imon-slot-no = <0>;
+ ti,vmon-slot-no = <2>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt b/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt
index 5d9cb84c661d..a02ecaab5183 100644
--- a/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt
+++ b/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt
@@ -25,6 +25,13 @@ Required properties:
For required properties on SPI/I2C, consult SPI/I2C device tree documentation
+Optional properties:
+
+ - reset-gpios : Optional reset gpio line connected to RST pin of the codec.
+ The RST line is low active:
+ RST = low: device power-down
+ RST = high: device is enabled
+
Examples:
i2c0: i2c0@0 {
@@ -34,6 +41,7 @@ i2c0: i2c0@0 {
pcm3168a: audio-codec@44 {
compatible = "ti,pcm3168a";
reg = <0x44>;
+ reset-gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
clocks = <&clk_core CLK_AUDIO>;
clock-names = "scki";
VDD1-supply = <&supply3v3>;
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 5b3c33bb99e5..e372303697dc 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -29,6 +29,11 @@ Optional properties:
3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
+- ai31xx-ocmv - output common-mode voltage setting
+ 0 - 1.35V,
+ 1 - 1.5V,
+ 2 - 1.65V,
+ 3 - 1.8V
Deprecated properties:
diff --git a/Documentation/devicetree/bindings/spi/renesas,hspi.yaml b/Documentation/devicetree/bindings/spi/renesas,hspi.yaml
new file mode 100644
index 000000000000..c429cf4bea5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/renesas,hspi.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/renesas,hspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas HSPI
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,hspi-r8a7778 # R-Car M1A
+ - renesas,hspi-r8a7779 # R-Car H1
+ - const: renesas,hspi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - '#address-cells'
+ - '#size-cells'
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7778-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ hspi0: spi@fffc7000 {
+ compatible = "renesas,hspi-r8a7778", "renesas,hspi";
+ reg = <0xfffc7000 0x18>;
+ interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
+ power-domains = <&cpg_clocks>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
diff --git a/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt b/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt
new file mode 100644
index 000000000000..fb1a6728638d
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt
@@ -0,0 +1,11 @@
+Renesas RZ/N1 SPI Controller
+
+This controller is based on the Synopsys DW Synchronous Serial Interface and
+inherits all properties defined in snps,dw-apb-ssi.txt except for the
+compatible property.
+
+Required properties:
+- compatible : The device specific string followed by the generic RZ/N1 string.
+ Therefore it must be one of:
+ "renesas,r9a06g032-spi", "renesas,rzn1-spi"
+ "renesas,r9a06g033-spi", "renesas,rzn1-spi"
diff --git a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
new file mode 100644
index 000000000000..b6c1dd2a9c5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/renesas,sh-msiof.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas MSIOF SPI controller
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: renesas,msiof-sh73a0 # SH-Mobile AG5
+ - const: renesas,sh-mobile-msiof # generic SH-Mobile compatible
+ # device
+ - items:
+ - enum:
+ - renesas,msiof-r8a7743 # RZ/G1M
+ - renesas,msiof-r8a7744 # RZ/G1N
+ - renesas,msiof-r8a7745 # RZ/G1E
+ - renesas,msiof-r8a77470 # RZ/G1C
+ - renesas,msiof-r8a7790 # R-Car H2
+ - renesas,msiof-r8a7791 # R-Car M2-W
+ - renesas,msiof-r8a7792 # R-Car V2H
+ - renesas,msiof-r8a7793 # R-Car M2-N
+ - renesas,msiof-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-msiof # generic R-Car Gen2 and RZ/G1
+ # compatible device
+ - items:
+ - enum:
+ - renesas,msiof-r8a774a1 # RZ/G2M
+ - renesas,msiof-r8a774b1 # RZ/G2N
+ - renesas,msiof-r8a774c0 # RZ/G2E
+ - renesas,msiof-r8a7795 # R-Car H3
+ - renesas,msiof-r8a7796 # R-Car M3-W
+ - renesas,msiof-r8a77965 # R-Car M3-N
+ - renesas,msiof-r8a77970 # R-Car V3M
+ - renesas,msiof-r8a77980 # R-Car V3H
+ - renesas,msiof-r8a77990 # R-Car E3
+ - renesas,msiof-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-msiof # generic R-Car Gen3 and RZ/G2
+ # compatible device
+ - items:
+ - const: renesas,sh-msiof # deprecated
+
+ reg:
+ minItems: 1
+ maxItems: 2
+ oneOf:
+ - items:
+ - description: CPU and DMA engine registers
+ - items:
+ - description: CPU registers
+ - description: DMA engine registers
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ num-cs:
+ description: |
+ Total number of chip selects (default is 1).
+ Up to 3 native chip selects are supported:
+ 0: MSIOF_SYNC
+ 1: MSIOF_SS1
+ 2: MSIOF_SS2
+ Hardware limitations related to chip selects:
+ - Native chip selects are always deasserted in between transfers
+ that are part of the same message. Use cs-gpios to work around
+ this.
+ - All slaves using native chip selects must use the same spi-cs-high
+ configuration. Use cs-gpios to work around this.
+ - When using GPIO chip selects, at least one native chip select must
+ be left unused, as it will be driven anyway.
+ minimum: 1
+ maximum: 3
+ default: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum: [ tx, rx ]
+
+ renesas,dtdl:
+ description: delay sync signal (setup) in transmit mode.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum:
+ - 0 # no bit delay
+ - 50 # 0.5-clock-cycle delay
+ - 100 # 1-clock-cycle delay
+ - 150 # 1.5-clock-cycle delay
+ - 200 # 2-clock-cycle delay
+
+ renesas,syncdl:
+ description: delay sync signal (hold) in transmit mode
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum:
+ - 0 # no bit delay
+ - 50 # 0.5-clock-cycle delay
+ - 100 # 1-clock-cycle delay
+ - 150 # 1.5-clock-cycle delay
+ - 200 # 2-clock-cycle delay
+ - 300 # 3-clock-cycle delay
+
+ renesas,tx-fifo-size:
+ # deprecated for soctype-specific bindings
+ description: |
+ Override the default TX fifo size. Unit is words. Ignored if 0.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ default: 64
+
+ renesas,rx-fifo-size:
+ # deprecated for soctype-specific bindings
+ description: |
+ Override the default RX fifo size. Unit is words. Ignored if 0.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ default: 64
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#address-cells'
+ - '#size-cells'
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7791-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ msiof0: spi@e6e20000 {
+ compatible = "renesas,msiof-r8a7791", "renesas,rcar-gen2-msiof";
+ reg = <0 0xe6e20000 0 0x0064>;
+ interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
+ dmas = <&dmac0 0x51>, <&dmac0 0x52>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/sh-hspi.txt b/Documentation/devicetree/bindings/spi/sh-hspi.txt
deleted file mode 100644
index b9d1e4d11a77..000000000000
--- a/Documentation/devicetree/bindings/spi/sh-hspi.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Renesas HSPI.
-
-Required properties:
-- compatible : "renesas,hspi-<soctype>", "renesas,hspi" as fallback.
- Examples with soctypes are:
- - "renesas,hspi-r8a7778" (R-Car M1)
- - "renesas,hspi-r8a7779" (R-Car H1)
-- reg : Offset and length of the register set for the device
-- interrupts : Interrupt specifier
-- #address-cells : Must be <1>
-- #size-cells : Must be <0>
-
-Pinctrl properties might be needed, too. See
-Documentation/devicetree/bindings/pinctrl/renesas,*.
-
-Example:
-
- hspi0: spi@fffc7000 {
- compatible = "renesas,hspi-r8a7778", "renesas,hspi";
- reg = <0xfffc7000 0x18>;
- interrupt-parent = <&gic>;
- interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
deleted file mode 100644
index 18e14ee257b2..000000000000
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ /dev/null
@@ -1,105 +0,0 @@
-Renesas MSIOF spi controller
-
-Required properties:
-- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
- "renesas,msiof-r8a7744" (RZ/G1N)
- "renesas,msiof-r8a7745" (RZ/G1E)
- "renesas,msiof-r8a77470" (RZ/G1C)
- "renesas,msiof-r8a774a1" (RZ/G2M)
- "renesas,msiof-r8a774c0" (RZ/G2E)
- "renesas,msiof-r8a7790" (R-Car H2)
- "renesas,msiof-r8a7791" (R-Car M2-W)
- "renesas,msiof-r8a7792" (R-Car V2H)
- "renesas,msiof-r8a7793" (R-Car M2-N)
- "renesas,msiof-r8a7794" (R-Car E2)
- "renesas,msiof-r8a7795" (R-Car H3)
- "renesas,msiof-r8a7796" (R-Car M3-W)
- "renesas,msiof-r8a77965" (R-Car M3-N)
- "renesas,msiof-r8a77970" (R-Car V3M)
- "renesas,msiof-r8a77980" (R-Car V3H)
- "renesas,msiof-r8a77990" (R-Car E3)
- "renesas,msiof-r8a77995" (R-Car D3)
- "renesas,msiof-sh73a0" (SH-Mobile AG5)
- "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
- "renesas,rcar-gen2-msiof" (generic R-Car Gen2 and RZ/G1 compatible device)
- "renesas,rcar-gen3-msiof" (generic R-Car Gen3 and RZ/G2 compatible device)
- "renesas,sh-msiof" (deprecated)
-
- When compatible with the generic version, nodes
- must list the SoC-specific version corresponding
- to the platform first followed by the generic
- version.
-
-- reg : A list of offsets and lengths of the register sets for
- the device.
- If only one register set is present, it is to be used
- by both the CPU and the DMA engine.
- If two register sets are present, the first is to be
- used by the CPU, and the second is to be used by the
- DMA engine.
-- interrupts : Interrupt specifier
-- #address-cells : Must be <1>
-- #size-cells : Must be <0>
-
-Optional properties:
-- clocks : Must contain a reference to the functional clock.
-- num-cs : Total number of chip selects (default is 1).
- Up to 3 native chip selects are supported:
- 0: MSIOF_SYNC
- 1: MSIOF_SS1
- 2: MSIOF_SS2
- Hardware limitations related to chip selects:
- - Native chip selects are always deasserted in
- between transfers that are part of the same
- message. Use cs-gpios to work around this.
- - All slaves using native chip selects must use the
- same spi-cs-high configuration. Use cs-gpios to
- work around this.
- - When using GPIO chip selects, at least one native
- chip select must be left unused, as it will be
- driven anyway.
-- dmas : Must contain a list of two references to DMA
- specifiers, one for transmission, and one for
- reception.
-- dma-names : Must contain a list of two DMA names, "tx" and "rx".
-- spi-slave : Empty property indicating the SPI controller is used
- in slave mode.
-- renesas,dtdl : delay sync signal (setup) in transmit mode.
- Must contain one of the following values:
- 0 (no bit delay)
- 50 (0.5-clock-cycle delay)
- 100 (1-clock-cycle delay)
- 150 (1.5-clock-cycle delay)
- 200 (2-clock-cycle delay)
-
-- renesas,syncdl : delay sync signal (hold) in transmit mode.
- Must contain one of the following values:
- 0 (no bit delay)
- 50 (0.5-clock-cycle delay)
- 100 (1-clock-cycle delay)
- 150 (1.5-clock-cycle delay)
- 200 (2-clock-cycle delay)
- 300 (3-clock-cycle delay)
-
-Optional properties, deprecated for soctype-specific bindings:
-- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
- (default is 64)
-- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
- (default is 64)
-
-Pinctrl properties might be needed, too. See
-Documentation/devicetree/bindings/pinctrl/renesas,*.
-
-Example:
-
- msiof0: spi@e6e20000 {
- compatible = "renesas,msiof-r8a7791",
- "renesas,rcar-gen2-msiof";
- reg = <0 0xe6e20000 0 0x0064>;
- interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
- dmas = <&dmac0 0x51>, <&dmac0 0x52>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
index f54c8c36395e..3ed08ee9feba 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
@@ -16,7 +16,8 @@ Required properties:
Optional properties:
- clock-names : Contains the names of the clocks:
"ssi_clk", for the core clock used to generate the external SPI clock.
- "pclk", the interface clock, required for register access.
+ "pclk", the interface clock, required for register access. If a clock domain
+ used to enable this clock then it should be named "pclk_clkdomain".
- cs-gpios : Specifies the gpio pins to be used for chipselects.
- num-cs : The number of chipselects. If omitted, this will default to 4.
- reg-io-width : The I/O register width (in bytes) implemented by this
diff --git a/Documentation/devicetree/bindings/spi/spi-sifive.txt b/Documentation/devicetree/bindings/spi/spi-sifive.txt
deleted file mode 100644
index 3f5c6e438972..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-sifive.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-SiFive SPI controller Device Tree Bindings
-------------------------------------------
-
-Required properties:
-- compatible : Should be "sifive,<chip>-spi" and "sifive,spi<version>".
- Supported compatible strings are:
- "sifive,fu540-c000-spi" for the SiFive SPI v0 as integrated
- onto the SiFive FU540 chip, and "sifive,spi0" for the SiFive
- SPI v0 IP block with no chip integration tweaks.
- Please refer to sifive-blocks-ip-versioning.txt for details
-- reg : Physical base address and size of SPI registers map
- A second (optional) range can indicate memory mapped flash
-- interrupts : Must contain one entry
-- interrupt-parent : Must be core interrupt controller
-- clocks : Must reference the frequency given to the controller
-- #address-cells : Must be '1', indicating which CS to use
-- #size-cells : Must be '0'
-
-Optional properties:
-- sifive,fifo-depth : Depth of hardware queues; defaults to 8
-- sifive,max-bits-per-word : Maximum bits per word; defaults to 8
-
-SPI RTL that corresponds to the IP block version numbers can be found here:
-https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi
-
-Example:
- spi: spi@10040000 {
- compatible = "sifive,fu540-c000-spi", "sifive,spi0";
- reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>;
- interrupt-parent = <&plic>;
- interrupts = <51>;
- clocks = <&tlclk>;
- #address-cells = <1>;
- #size-cells = <0>;
- sifive,fifo-depth = <8>;
- sifive,max-bits-per-word = <8>;
- };
diff --git a/Documentation/devicetree/bindings/spi/spi-sifive.yaml b/Documentation/devicetree/bindings/spi/spi-sifive.yaml
new file mode 100644
index 000000000000..140e4351a19f
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sifive.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-sifive.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive SPI controller
+
+maintainers:
+ - Pragnesh Patel <pragnesh.patel@sifive.com>
+ - Paul Walmsley <paul.walmsley@sifive.com>
+ - Palmer Dabbelt <palmer@sifive.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ items:
+ - const: sifive,fu540-c000-spi
+ - const: sifive,spi0
+
+ description:
+ Should be "sifive,<chip>-spi" and "sifive,spi<version>".
+ Supported compatible strings are -
+ "sifive,fu540-c000-spi" for the SiFive SPI v0 as integrated
+ onto the SiFive FU540 chip, and "sifive,spi0" for the SiFive
+ SPI v0 IP block with no chip integration tweaks.
+ Please refer to sifive-blocks-ip-versioning.txt for details
+
+ SPI RTL that corresponds to the IP block version numbers can be found here -
+ https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi
+
+ reg:
+ maxItems: 1
+
+ description:
+ Physical base address and size of SPI registers map
+ A second (optional) range can indicate memory mapped flash
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ description:
+ Must reference the frequency given to the controller
+
+ sifive,fifo-depth:
+ description:
+ Depth of hardware queues; defaults to 8
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - enum: [ 8 ]
+ - default: 8
+
+ sifive,max-bits-per-word:
+ description:
+ Maximum bits per word; defaults to 8
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - enum: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]
+ - default: 8
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ spi: spi@10040000 {
+ compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>;
+ interrupt-parent = <&plic>;
+ interrupts = <51>;
+ clocks = <&tlclk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sifive,fifo-depth = <8>;
+ sifive,max-bits-per-word = <8>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt b/Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt
deleted file mode 100644
index bfc038b9478d..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-* STMicroelectronics Quad Serial Peripheral Interface(QSPI)
-
-Required properties:
-- compatible: should be "st,stm32f469-qspi"
-- reg: the first contains the register location and length.
- the second contains the memory mapping address and length
-- reg-names: should contain the reg names "qspi" "qspi_mm"
-- interrupts: should contain the interrupt for the device
-- clocks: the phandle of the clock needed by the QSPI controller
-- A pinctrl must be defined to set pins in mode of operation for QSPI transfer
-
-Optional properties:
-- resets: must contain the phandle to the reset controller.
-
-A spi flash (NOR/NAND) must be a child of spi node and could have some
-properties. Also see jedec,spi-nor.txt.
-
-Required properties:
-- reg: chip-Select number (QSPI controller may connect 2 flashes)
-- spi-max-frequency: max frequency of spi bus
-
-Optional properties:
-- spi-rx-bus-width: see ./spi-bus.txt for the description
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
-Documentation/devicetree/bindings/dma/dma.txt.
-- dma-names: DMA request names should include "tx" and "rx" if present.
-
-Example:
-
-qspi: spi@a0001000 {
- compatible = "st,stm32f469-qspi";
- reg = <0xa0001000 0x1000>, <0x90000000 0x10000000>;
- reg-names = "qspi", "qspi_mm";
- interrupts = <91>;
- resets = <&rcc STM32F4_AHB3_RESET(QSPI)>;
- clocks = <&rcc 0 STM32F4_AHB3_CLOCK(QSPI)>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_qspi0>;
-
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-rx-bus-width = <4>;
- spi-max-frequency = <108000000>;
- ...
- };
-};
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index dc924a5f71db..5f4ed3e5c994 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -8,7 +8,8 @@ Required properties:
number.
Optional properties:
-- xlnx,num-ss-bits : Number of chip selects used.
+- xlnx,num-ss-bits : Number of chip selects used.
+- xlnx,num-transfer-bits : Number of bits per transfer. This will be 8 if not specified
Example:
axi_quad_spi@41e00000 {
@@ -17,5 +18,6 @@ Example:
interrupts = <0 31 1>;
reg = <0x41e00000 0x10000>;
xlnx,num-ss-bits = <0x1>;
+ xlnx,num-transfer-bits = <32>;
};
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
new file mode 100644
index 000000000000..3665a5fe6b7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/st,stm32-qspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Quad Serial Peripheral Interface (QSPI) bindings
+
+maintainers:
+ - Christophe Kerello <christophe.kerello@st.com>
+ - Patrice Chotard <patrice.chotard@st.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ const: st,stm32f469-qspi
+
+ reg:
+ items:
+ - description: registers
+ - description: memory mapping
+
+ reg-names:
+ items:
+ - const: qspi
+ - const: qspi_mm
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: tx DMA channel
+ - description: rx DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ spi@58003000 {
+ compatible = "st,stm32f469-qspi";
+ reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
+ reg-names = "qspi", "qspi_mm";
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdma1 22 0x10 0x100002 0x0 0x0>,
+ <&mdma1 22 0x10 0x100008 0x0 0x0>;
+ dma-names = "tx", "rx";
+ clocks = <&rcc QSPI_K>;
+ resets = <&rcc QSPI_R>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt
deleted file mode 100644
index 35039e720515..000000000000
--- a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Renesas Electronics USB3.0 Peripheral driver
-
-Required properties:
- - compatible: Must contain one of the following:
- - "renesas,r8a774a1-usb3-peri"
- - "renesas,r8a774c0-usb3-peri"
- - "renesas,r8a7795-usb3-peri"
- - "renesas,r8a7796-usb3-peri"
- - "renesas,r8a77965-usb3-peri"
- - "renesas,r8a77990-usb3-peri"
- - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 or RZ/G2
- compatible device
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
- - reg: Base address and length of the register for the USB3.0 Peripheral
- - interrupts: Interrupt specifier for the USB3.0 Peripheral
- - clocks: clock phandle and specifier pair
-
-Optional properties:
- - phys: phandle + phy specifier pair
- - phy-names: must be "usb"
-
-Example of R-Car H3 ES1.x:
- usb3_peri0: usb@ee020000 {
- compatible = "renesas,r8a7795-usb3-peri",
- "renesas,rcar-gen3-usb3-peri";
- reg = <0 0xee020000 0 0x400>;
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 328>;
- };
-
- usb3_peri1: usb@ee060000 {
- compatible = "renesas,r8a7795-usb3-peri",
- "renesas,rcar-gen3-usb3-peri";
- reg = <0 0xee060000 0 0x400>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 327>;
- };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
new file mode 100644
index 000000000000..92d8631b9aa6
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/renesas,usb3-peri.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas USB 3.0 Peripheral controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a774a1-usb3-peri # RZ/G2M
+ - renesas,r8a774b1-usb3-peri # RZ/G2N
+ - renesas,r8a774c0-usb3-peri # RZ/G2E
+ - renesas,r8a7795-usb3-peri # R-Car H3
+ - renesas,r8a7796-usb3-peri # R-Car M3-W
+ - renesas,r8a77965-usb3-peri # R-Car M3-N
+ - renesas,r8a77990-usb3-peri # R-Car E3
+ - const: renesas,rcar-gen3-usb3-peri
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: usb
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ usb-role-switch:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Support role switch.
+
+ companion:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle of a companion.
+
+ port:
+ description: |
+ any connector to the data bus of this controller should be modelled
+ using the OF graph bindings specified, if the "usb-role-switch"
+ property is used.
+
+required:
+ - compatible
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a774c0-sysc.h>
+
+ usb3_peri0: usb@ee020000 {
+ compatible = "renesas,r8a774c0-usb3-peri", "renesas,rcar-gen3-usb3-peri";
+ reg = <0 0xee020000 0 0x400>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 328>;
+ companion = <&xhci0>;
+ usb-role-switch;
+
+ port {
+ usb3_role_switch: endpoint {
+ remote-endpoint = <&hd3ss3220_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.txt b/Documentation/devicetree/bindings/usb/renesas,usbhs.txt
deleted file mode 100644
index e39255ea6e4f..000000000000
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Renesas Electronics USBHS driver
-
-Required properties:
- - compatible: Must contain one or more of the following:
-
- - "renesas,usbhs-r8a7743" for r8a7743 (RZ/G1M) compatible device
- - "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device
- - "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
- - "renesas,usbhs-r8a77470" for r8a77470 (RZ/G1C) compatible device
- - "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device
- - "renesas,usbhs-r8a774c0" for r8a774c0 (RZ/G2E) compatible device
- - "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
- - "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device
- - "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device
- - "renesas,usbhs-r8a7793" for r8a7793 (R-Car M2-N) compatible device
- - "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
- - "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- - "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
- - "renesas,usbhs-r8a77990" for r8a77990 (R-Car E3) compatible device
- - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
- - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
- - "renesas,usbhs-r7s9210" for r7s9210 (RZ/A2) compatible device
- - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
- - "renesas,rcar-gen3-usbhs" for R-Car Gen3 or RZ/G2 compatible devices
- - "renesas,rza1-usbhs" for RZ/A1 compatible device
- - "renesas,rza2-usbhs" for RZ/A2 compatible device
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first followed
- by the generic version.
-
- - reg: Base address and length of the register for the USBHS
- - interrupts: Interrupt specifier for the USBHS
- - clocks: A list of phandle + clock specifier pairs.
- - In case of "renesas,rcar-gen3-usbhs", two clocks are required.
- First clock should be peripheral and second one should be host.
- - In case of except above, one clock is required. First clock
- should be peripheral.
-
-Optional properties:
- - renesas,buswait: Integer to use BUSWAIT register
- - renesas,enable-gpio: A gpio specifier to check GPIO determining if USB
- function should be enabled
- - phys: phandle + phy specifier pair
- - phy-names: must be "usb"
- - dmas: Must contain a list of references to DMA specifiers.
- - dma-names : named "ch%d", where %d is the channel number ranging from zero
- to the number of channels (DnFIFOs) minus one.
-
-Example:
- usbhs: usb@e6590000 {
- compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
- reg = <0 0xe6590000 0 0x100>;
- interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
- };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
new file mode 100644
index 000000000000..469affa872d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/renesas,usbhs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas USBHS (HS-USB) controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: renesas,usbhs-r7s72100 # RZ/A1
+ - const: renesas,rza1-usbhs
+
+ - items:
+ - const: renesas,usbhs-r7s9210 # RZ/A2
+ - const: renesas,rza2-usbhs
+
+ - items:
+ - enum:
+ - renesas,usbhs-r8a7743 # RZ/G1M
+ - renesas,usbhs-r8a7744 # RZ/G1N
+ - renesas,usbhs-r8a7745 # RZ/G1E
+ - renesas,usbhs-r8a77470 # RZ/G1C
+ - renesas,usbhs-r8a7790 # R-Car H2
+ - renesas,usbhs-r8a7791 # R-Car M2-W
+ - renesas,usbhs-r8a7792 # R-Car V2H
+ - renesas,usbhs-r8a7793 # R-Car M2-N
+ - renesas,usbhs-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-usbhs
+
+ - items:
+ - enum:
+ - renesas,usbhs-r8a774a1 # RZ/G2M
+ - renesas,usbhs-r8a774b1 # RZ/G2N
+ - renesas,usbhs-r8a774c0 # RZ/G2E
+ - renesas,usbhs-r8a7795 # R-Car H3
+ - renesas,usbhs-r8a7796 # R-Car M3-W
+ - renesas,usbhs-r8a77965 # R-Car M3-N
+ - renesas,usbhs-r8a77990 # R-Car E3
+ - renesas,usbhs-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-usbhs
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+ items:
+ - description: USB 2.0 host
+ - description: USB 2.0 peripheral
+ - description: USB 2.0 clock selector
+
+ interrupts:
+ maxItems: 1
+
+ renesas,buswait:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Integer to use BUSWAIT register.
+
+ renesas,enable-gpio:
+ description: |
+ gpio specifier to check GPIO determining if USB function should be
+ enabled.
+
+ phys:
+ maxItems: 1
+ items:
+ - description: phandle + phy specifier pair.
+
+ phy-names:
+ maxItems: 1
+ items:
+ - const: usb
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ - const: ch0
+ - const: ch1
+ - const: ch2
+ - const: ch3
+
+ dr_mode: true
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: USB 2.0 host
+ - description: USB 2.0 peripheral
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ usbhs: usb@e6590000 {
+ compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
+ reg = <0 0xe6590000 0 0x100>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
index d4cf53c071d9..e3fc57e605ed 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
@@ -6,10 +6,39 @@ Required properties:
- interrupts : <a b> where a is the interrupt number and b represents an
encoding of the sense and level information for the interrupt.
+Required sub-node:
+- connector: The "usb-c-connector" attached to the tcpci chip, the bindings
+ of connector node are specified in
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
Example :
rt1711h@4e {
compatible = "richtek,rt1711h";
reg = <0x4e>;
interrupt-parent = <&gpio26>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 12000, 2000)>;
+ op-sink-microwatt = <10000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&usb3_data_ss>;
+ };
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
new file mode 100644
index 000000000000..25780e945b15
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
@@ -0,0 +1,38 @@
+TI HD3SS3220 TypeC DRP Port Controller.
+
+Required properties:
+ - compatible: Must be "ti,hd3ss3220".
+ - reg: I2C slave address, must be 0x47 or 0x67 based on ADDR pin.
+ - interrupts: An interrupt specifier.
+
+Required sub-node:
+ - connector: The "usb-c-connector" attached to the hd3ss3220 chip. The
+ bindings of the connector node are specified in:
+
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
+Example:
+hd3ss3220@47 {
+ compatible = "ti,hd3ss3220";
+ reg = <0x47>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ hd3ss3220_ep: endpoint {
+ remote-endpoint = <&usb3_role_switch>;
+ };
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
new file mode 100644
index 000000000000..5f5264b2e9ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/ti,j721e-usb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller
+
+maintainers:
+ - Roger Quadros <rogerq@ti.com>
+
+properties:
+ compatible:
+ items:
+ - const: ti,j721e-usb
+
+ reg:
+ description: module registers
+
+ power-domains:
+ description:
+ PM domain provider node and an args specifier containing
+ the USB device id value. See,
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+
+ clocks:
+ description: Clock phandles to usb2_refclk and lpm_clk
+ minItems: 2
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: ref
+ - const: lpm
+
+ ti,usb2-only:
+ description:
+ If present, it restricts the controller to USB2.0 mode of
+ operation. Must be present if USB3 PHY is not available
+ for USB.
+ type: boolean
+
+ ti,vbus-divider:
+ description:
+ Should be present if USB VBUS line is connected to the
+ VBUS pin of the SoC via a 1/3 voltage divider.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - power-domains
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ cdns_usb@4104000 {
+ compatible = "ti,j721e-usb";
+ reg = <0x00 0x4104000 0x00 0x100>;
+ power-domains = <&k3_pds 288 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 288 15>, <&k3_clks 288 3>;
+ clock-names = "ref", "lpm";
+ assigned-clocks = <&k3_clks 288 15>; /* USB2_REFCLK */
+ assigned-clock-parents = <&k3_clks 288 16>; /* HFOSC0 */
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb@6000000 {
+ compatible = "cdns,usb3";
+ reg = <0x00 0x6000000 0x00 0x10000>,
+ <0x00 0x6010000 0x00 0x10000>,
+ <0x00 0x6020000 0x00 0x10000>;
+ reg-names = "otg", "xhci", "dev";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
+ interrupt-names = "host",
+ "peripheral",
+ "otg";
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index b49b819571f9..3f378951d624 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -10,6 +10,7 @@ Required properties:
- "renesas,xhci-r8a7743" for r8a7743 SoC
- "renesas,xhci-r8a7744" for r8a7744 SoC
- "renesas,xhci-r8a774a1" for r8a774a1 SoC
+ - "renesas,xhci-r8a774b1" for r8a774b1 SoC
- "renesas,xhci-r8a774c0" for r8a774c0 SoC
- "renesas,xhci-r8a7790" for r8a7790 SoC
- "renesas,xhci-r8a7791" for r8a7791 SoC
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 17915f64b8ee..1a934eab175e 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -7,11 +7,12 @@ Required properties :
- compatible : Should be "microchip,usb251xb" or one of the specific types:
"microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
"microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi",
- "microchip,usb2517", "microchip,usb2517i"
+ "microchip,usb2517", "microchip,usb2517i", "microchip,usb2422"
- reg : I2C address on the selected bus (default is <0x2C>)
Optional properties :
- reset-gpios : Should specify the gpio for hub reset
+ - vdd-supply : Should specify the phandle to the regulator supplying vdd
- skip-config : Skip Hub configuration, but only send the USB-Attach command
- vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
- product-id : Set USB Product ID of the hub (16 bit, default depends on type)
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 967e78c5ec0a..fd6fa07c45b8 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -16,7 +16,7 @@ properties: {}
patternProperties:
# Prefixes which are not vendors, but followed the pattern
# DO NOT ADD NEW PROPERTIES TO THIS LIST
- "^(at25|devbus|dmacap|dsa|exynos|gpio-fan|gpio|gpmc|hdmi|i2c-gpio),.*": true
+ "^(at25|devbus|dmacap|dsa|exynos|fsi[ab]|gpio-fan|gpio|gpmc|hdmi|i2c-gpio),.*": true
"^(keypad|m25p|max8952|max8997|max8998|mpmc),.*": true
"^(pinctrl-single|#pinctrl-single|PowerPC),.*": true
"^(pl022|pxa-mmc|rcar_sound|rotary-encoder|s5m8767|sdhci),.*": true
@@ -343,6 +343,8 @@ patternProperties:
description: Freescale Semiconductor
"^fujitsu,.*":
description: Fujitsu Ltd.
+ "^gardena,.*":
+ description: GARDENA GmbH
"^gateworks,.*":
description: Gateworks Corporation
"^gcw,.*":
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 38e638abe3eb..d0efa35052e3 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -26,6 +26,7 @@ available subsections can be seen below.
device_link
component
message-based
+ infiniband
sound
frame-buffer
regulator
diff --git a/Documentation/driver-api/infiniband.rst b/Documentation/driver-api/infiniband.rst
new file mode 100644
index 000000000000..1a3116f32ff0
--- /dev/null
+++ b/Documentation/driver-api/infiniband.rst
@@ -0,0 +1,127 @@
+===========================================
+InfiniBand and Remote DMA (RDMA) Interfaces
+===========================================
+
+Introduction and Overview
+=========================
+
+TBD
+
+InfiniBand core interfaces
+==========================
+
+.. kernel-doc:: drivers/infiniband/core/iwpm_util.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/core/cq.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/cm.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/rw.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/device.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/verbs.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/packer.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/sa_query.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/ud_header.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/fmr_pool.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/umem.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/umem_odp.c
+ :export:
+
+RDMA Verbs transport library
+============================
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/mr.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/rc.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/ah.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/vt.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/cq.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/qp.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/mcast.c
+ :export:
+
+Upper Layer Protocols
+=====================
+
+iSCSI Extensions for RDMA (iSER)
+--------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.c
+ :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers \
+ iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit \
+ iscsi_iser_cleanup_task iscsi_iser_check_protection \
+ iscsi_iser_conn_create iscsi_iser_conn_bind \
+ iscsi_iser_conn_start iscsi_iser_conn_stop \
+ iscsi_iser_session_destroy iscsi_iser_session_create \
+ iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll \
+ iscsi_iser_ep_disconnect
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iser_initiator.c
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iser_verbs.c
+ :internal:
+
+Omni-Path (OPA) Virtual NIC support
+-----------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+ :internal:
+
+InfiniBand SCSI RDMA protocol target support
+--------------------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/srpt/ib_srpt.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/srpt/ib_srpt.c
+ :internal:
+
+iSCSI Extensions for RDMA (iSER) target support
+-----------------------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/isert/ib_isert.c
+ :internal:
+
diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
index 70e180e6b93d..207f0d24de69 100644
--- a/Documentation/driver-api/libata.rst
+++ b/Documentation/driver-api/libata.rst
@@ -250,23 +250,23 @@ High-level taskfile hooks
::
- void (*qc_prep) (struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc);
-Higher-level hooks, these two hooks can potentially supercede several of
+Higher-level hooks, these two hooks can potentially supersede several of
the above taskfile/DMA engine hooks. ``->qc_prep`` is called after the
buffers have been DMA-mapped, and is typically used to populate the
-hardware's DMA scatter-gather table. Most drivers use the standard
-:c:func:`ata_qc_prep` helper function, but more advanced drivers roll their
-own.
+hardware's DMA scatter-gather table. Some drivers use the standard
+:c:func:`ata_bmdma_qc_prep` and :c:func:`ata_bmdma_dumb_qc_prep` helper
+functions, but more advanced drivers roll their own.
``->qc_issue`` is used to make a command active, once the hardware and S/G
tables have been prepared. IDE BMDMA drivers use the helper function
-:c:func:`ata_qc_issue_prot` for taskfile protocol-based dispatch. More
+:c:func:`ata_sff_qc_issue` for taskfile protocol-based dispatch. More
advanced drivers implement their own ``->qc_issue``.
-:c:func:`ata_qc_issue_prot` calls ``->tf_load()``, ``->bmdma_setup()``, and
+:c:func:`ata_sff_qc_issue` calls ``->sff_tf_load()``, ``->bmdma_setup()``, and
``->bmdma_start()`` as necessary to initiate a transfer.
Exception and probe handling (EH)
diff --git a/Documentation/driver-api/nvmem.rst b/Documentation/driver-api/nvmem.rst
index d9d958d5c824..287e86819640 100644
--- a/Documentation/driver-api/nvmem.rst
+++ b/Documentation/driver-api/nvmem.rst
@@ -129,6 +129,8 @@ To facilitate such consumers NVMEM framework provides below apis::
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
struct nvmem_device *devm_nvmem_device_get(struct device *dev,
const char *name);
+ struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data));
void nvmem_device_put(struct nvmem_device *nvmem);
int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
size_t bytes, void *buf);
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 8a0700af9596..471a511c7508 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -256,13 +256,8 @@ alternative master keys or to support rotating master keys. Instead,
the master keys may be wrapped in userspace, e.g. as is done by the
`fscrypt <https://github.com/google/fscrypt>`_ tool.
-Including the inode number in the IVs was considered. However, it was
-rejected as it would have prevented ext4 filesystems from being
-resized, and by itself still wouldn't have been sufficient to prevent
-the same key from being directly reused for both XTS and CTS-CBC.
-
-DIRECT_KEY and per-mode keys
-----------------------------
+DIRECT_KEY policies
+-------------------
The Adiantum encryption mode (see `Encryption modes and usage`_) is
suitable for both contents and filenames encryption, and it accepts
@@ -285,6 +280,21 @@ IV. Moreover:
key derived using the KDF. Users may use the same master key for
other v2 encryption policies.
+IV_INO_LBLK_64 policies
+-----------------------
+
+When FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 is set in the fscrypt policy,
+the encryption keys are derived from the master key, encryption mode
+number, and filesystem UUID. This normally results in all files
+protected by the same master key sharing a single contents encryption
+key and a single filenames encryption key. To still encrypt different
+files' data differently, inode numbers are included in the IVs.
+Consequently, shrinking the filesystem may not be allowed.
+
+This format is optimized for use with inline encryption hardware
+compliant with the UFS or eMMC standards, which support only 64 IV
+bits per I/O request and may have only a small number of keyslots.
+
Key identifiers
---------------
@@ -308,8 +318,9 @@ If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS. To
-use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
-implementation) must be enabled so that ESSIV can be used.
+use AES-128-CBC, CONFIG_CRYPTO_ESSIV and CONFIG_CRYPTO_SHA256 (or
+another SHA-256 implementation) must be enabled so that ESSIV can be
+used.
Adiantum is a (primarily) stream cipher-based mode that is fast even
on CPUs without dedicated crypto instructions. It's also a true
@@ -341,10 +352,16 @@ a little endian number, except that:
is encrypted with AES-256 where the AES-256 key is the SHA-256 hash
of the file's data encryption key.
-- In the "direct key" configuration (FSCRYPT_POLICY_FLAG_DIRECT_KEY
- set in the fscrypt_policy), the file's nonce is also appended to the
- IV. Currently this is only allowed with the Adiantum encryption
- mode.
+- With `DIRECT_KEY policies`_, the file's nonce is appended to the IV.
+ Currently this is only allowed with the Adiantum encryption mode.
+
+- With `IV_INO_LBLK_64 policies`_, the logical block number is limited
+ to 32 bits and is placed in bits 0-31 of the IV. The inode number
+ (which is also limited to 32 bits) is placed in bits 32-63.
+
+Note that because file logical block numbers are included in the IVs,
+filesystems must enforce that blocks are never shifted around within
+encrypted files, e.g. via "collapse range" or "insert range".
Filenames encryption
--------------------
@@ -354,10 +371,10 @@ the requirements to retain support for efficient directory lookups and
filenames of up to 255 bytes, the same IV is used for every filename
in a directory.
-However, each encrypted directory still uses a unique key; or
-alternatively (for the "direct key" configuration) has the file's
-nonce included in the IVs. Thus, IV reuse is limited to within a
-single directory.
+However, each encrypted directory still uses a unique key, or
+alternatively has the file's nonce (for `DIRECT_KEY policies`_) or
+inode number (for `IV_INO_LBLK_64 policies`_) included in the IVs.
+Thus, IV reuse is limited to within a single directory.
With CTS-CBC, the IV reuse means that when the plaintext filenames
share a common prefix at least as long as the cipher block size (16
@@ -431,12 +448,15 @@ This structure must be initialized as follows:
(1) for ``contents_encryption_mode`` and FSCRYPT_MODE_AES_256_CTS
(4) for ``filenames_encryption_mode``.
-- ``flags`` must contain a value from ``<linux/fscrypt.h>`` which
- identifies the amount of NUL-padding to use when encrypting
- filenames. If unsure, use FSCRYPT_POLICY_FLAGS_PAD_32 (0x3).
- Additionally, if the encryption modes are both
- FSCRYPT_MODE_ADIANTUM, this can contain
- FSCRYPT_POLICY_FLAG_DIRECT_KEY; see `DIRECT_KEY and per-mode keys`_.
+- ``flags`` contains optional flags from ``<linux/fscrypt.h>``:
+
+ - FSCRYPT_POLICY_FLAGS_PAD_*: The amount of NUL padding to use when
+ encrypting filenames. If unsure, use FSCRYPT_POLICY_FLAGS_PAD_32
+ (0x3).
+ - FSCRYPT_POLICY_FLAG_DIRECT_KEY: See `DIRECT_KEY policies`_.
+ - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64: See `IV_INO_LBLK_64
+ policies`_. This is mutually exclusive with DIRECT_KEY and is not
+ supported on v1 policies.
- For v2 encryption policies, ``__reserved`` must be zeroed.
@@ -1089,7 +1109,7 @@ policy structs (see `Setting an encryption policy`_), except that the
context structs also contain a nonce. The nonce is randomly generated
by the kernel and is used as KDF input or as a tweak to cause
different files to be encrypted differently; see `Per-file keys`_ and
-`DIRECT_KEY and per-mode keys`_.
+`DIRECT_KEY policies`_.
Data path changes
-----------------
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index 42a0b6dd9e0b..a95536b6443c 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -226,6 +226,14 @@ To do so, check for FS_VERITY_FL (0x00100000) in the returned flags.
The verity flag is not settable via FS_IOC_SETFLAGS. You must use
FS_IOC_ENABLE_VERITY instead, since parameters must be provided.
+statx
+-----
+
+Since Linux v5.5, the statx() system call sets STATX_ATTR_VERITY if
+the file has fs-verity enabled. This can perform better than
+FS_IOC_GETFLAGS and FS_IOC_MEASURE_VERITY because it doesn't require
+opening the file, and opening verity files can be expensive.
+
Accessing verity files
======================
@@ -398,7 +406,7 @@ pages have been read into the pagecache. (See `Verifying data`_.)
ext4
----
-ext4 supports fs-verity since Linux TODO and e2fsprogs v1.45.2.
+ext4 supports fs-verity since Linux v5.4 and e2fsprogs v1.45.2.
To create verity files on an ext4 filesystem, the filesystem must have
been formatted with ``-O verity`` or had ``tune2fs -O verity`` run on
@@ -434,7 +442,7 @@ also only supports extent-based files.
f2fs
----
-f2fs supports fs-verity since Linux TODO and f2fs-tools v1.11.0.
+f2fs supports fs-verity since Linux v5.4 and f2fs-tools v1.11.0.
To create verity files on an f2fs filesystem, the filesystem must have
been formatted with ``-O verity``.
diff --git a/Documentation/firmware-guide/acpi/namespace.rst b/Documentation/firmware-guide/acpi/namespace.rst
index 835521baeb89..3eb763d6656d 100644
--- a/Documentation/firmware-guide/acpi/namespace.rst
+++ b/Documentation/firmware-guide/acpi/namespace.rst
@@ -261,7 +261,7 @@ Description Tables contain information used for the creation of the
struct acpi_device objects represented by the given row (xSDT means DSDT
or SSDT).
-The forth column of the above table indicates the 'bus_id' generation
+The fourth column of the above table indicates the 'bus_id' generation
rule of the struct acpi_device object:
_HID:
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index 6fa483fc823e..094fc8aacd8e 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -108,6 +108,16 @@ More functions are exposed through sysfs
error reporting sysfs interfaces allow user to read errors detected by the
hardware, and clear the logged errors.
+ Power management (dfl_fme_power hwmon)
+ power management hwmon sysfs interfaces allow user to read power management
+ information (power consumption, thresholds, threshold status, limits, etc.)
+ and configure power thresholds for different throttling levels.
+
+ Thermal management (dfl_fme_thermal hwmon)
+ thermal management hwmon sysfs interfaces allow user to read thermal
+ management information (current temperature, thresholds, threshold status,
+ etc.).
+
FIU - PORT
==========
diff --git a/Documentation/hwmon/bel-pfe.rst b/Documentation/hwmon/bel-pfe.rst
new file mode 100644
index 000000000000..4b4a7d67854c
--- /dev/null
+++ b/Documentation/hwmon/bel-pfe.rst
@@ -0,0 +1,112 @@
+Kernel driver bel-pfe
+======================
+
+Supported chips:
+
+ * BEL PFE1100
+
+ Prefixes: 'pfe1100'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.belfuse.com/resources/datasheets/powersolutions/ds-bps-pfe1100-12-054xa.pdf
+
+ * BEL PFE3000
+
+ Prefixes: 'pfe3000'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.belfuse.com/resources/datasheets/powersolutions/ds-bps-pfe3000-series.pdf
+
+Author: Tao Ren <rentao.bupt@gmail.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for below power supply devices
+which support PMBus Protocol:
+
+ * BEL PFE1100
+
+ 1100 Watt AC to DC power-factor-corrected (PFC) power supply.
+ PMBus Communication Manual is not publicly available.
+
+ * BEL PFE3000
+
+ 3000 Watt AC/DC power-factor-corrected (PFC) and DC-DC power supply.
+ PMBus Communication Manual is not publicly available.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+Example: the following will load the driver for an PFE3000 at address 0x20
+on I2C bus #1::
+
+ $ modprobe bel-pfe
+ $ echo pfe3000 0x20 > /sys/bus/i2c/devices/i2c-1/new_device
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+======================= =======================================================
+curr1_label "iin"
+curr1_input Measured input current
+curr1_max Input current max value
+curr1_max_alarm Input current max alarm
+
+curr[2-3]_label "iout[1-2]"
+curr[2-3]_input Measured output current
+curr[2-3]_max Output current max value
+curr[2-3]_max_alarm Output current max alarm
+
+fan[1-2]_input Fan 1 and 2 speed in RPM
+fan1_target Set fan speed reference for both fans
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_crit Input voltage critical max value
+in1_crit_alarm Input voltage critical max alarm
+in1_lcrit Input voltage critical min value
+in1_lcrit_alarm Input voltage critical min alarm
+in1_max Input voltage max value
+in1_max_alarm Input voltage max alarm
+
+in2_label "vcap"
+in2_input Hold up capacitor voltage
+
+in[3-8]_label "vout[1-3,5-7]"
+in[3-8]_input Measured output voltage
+in[3-4]_alarm vout[1-2] output voltage alarm
+
+power[1-2]_label "pin[1-2]"
+power[1-2]_input Measured input power
+power[1-2]_alarm Input power high alarm
+
+power[3-4]_label "pout[1-2]"
+power[3-4]_input Measured output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_alarm Temperature alarm
+======================= =======================================================
+
+.. note::
+
+ - curr3, fan2, vout[2-7], vcap, pin2, pout2 and temp3 attributes only
+ exist for PFE3000.
diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst
new file mode 100644
index 000000000000..3bf77a5df995
--- /dev/null
+++ b/Documentation/hwmon/dell-smm-hwmon.rst
@@ -0,0 +1,164 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+.. include:: <isonum.txt>
+
+Kernel driver dell-smm-hwmon
+============================
+
+:Copyright: |copy| 2002-2005 Massimo Dal Zotto <dz@debian.org>
+:Copyright: |copy| 2019 Giovanni Mascellani <gio@debian.org>
+
+Description
+-----------
+
+On many Dell laptops the System Management Mode (SMM) BIOS can be
+queried for the status of fans and temperature sensors. Userspace
+utilities like ``sensors`` can be used to return the readings. The
+userspace suite `i8kutils`__ can also be used to read the sensors and
+automatically adjust fan speed (please notice that it currently uses
+the deprecated ``/proc/i8k`` interface).
+
+ __ https://github.com/vitorafsr/i8kutils
+
+``sysfs`` interface
+-------------------
+
+Temperature sensors and fans can be queried and set via the standard
+``hwmon`` interface on ``sysfs``, under the directory
+``/sys/class/hwmon/hwmonX`` for some value of ``X`` (search for the
+``X`` such that ``/sys/class/hwmon/hwmonX/name`` has content
+``dell_smm``). A number of other attributes can be read or written:
+
+=============================== ======= =======================================
+Name Perm Description
+=============================== ======= =======================================
+fan[1-3]_input RO Fan speed in RPM.
+fan[1-3]_label RO Fan label.
+pwm[1-3] RW Control the fan PWM duty-cycle.
+pwm1_enable WO Enable or disable automatic BIOS fan
+ control (not supported on all laptops,
+ see below for details).
+temp[1-10]_input RO Temperature reading in milli-degrees
+ Celsius.
+temp[1-10]_label RO Temperature sensor label.
+=============================== ======= =======================================
+
+Disabling automatic BIOS fan control
+------------------------------------
+
+On some laptops the BIOS automatically sets fan speed every few
+seconds. Therefore the fan speed set by mean of this driver is quickly
+overwritten.
+
+There is experimental support for disabling automatic BIOS fan
+control, at least on laptops where the corresponding SMM command is
+known, by writing the value ``1`` in the attribute ``pwm1_enable``
+(writing ``2`` enables automatic BIOS control again). Even if you have
+more than one fan, all of them are set to either enabled or disabled
+automatic fan control at the same time and, notwithstanding the name,
+``pwm1_enable`` sets automatic control for all fans.
+
+If ``pwm1_enable`` is not available, then it means that SMM codes for
+enabling and disabling automatic BIOS fan control are not whitelisted
+for your hardware. It is possible that codes that work for other
+laptops actually work for yours as well, or that you have to discover
+new codes.
+
+Check the list ``i8k_whitelist_fan_control`` in file
+``drivers/hwmon/dell-smm-hwmon.c`` in the kernel tree: as a first
+attempt you can try to add your machine and use an already-known code
+pair. If, after recompiling the kernel, you see that ``pwm1_enable``
+is present and works (i.e., you can manually control the fan speed),
+then please submit your finding as a kernel patch, so that other users
+can benefit from it. Please see
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+for information on submitting patches.
+
+If no known code works on your machine, you need to resort to do some
+probing, because unfortunately Dell does not publish datasheets for
+its SMM. You can experiment with the code in `this repository`__ to
+probe the BIOS on your machine and discover the appropriate codes.
+
+ __ https://github.com/clopez/dellfan/
+
+Again, when you find new codes, we'd be happy to have your patches!
+
+Module parameters
+-----------------
+
+* force:bool
+ Force loading without checking for supported
+ models. (default: 0)
+
+* ignore_dmi:bool
+ Continue probing hardware even if DMI data does not
+ match. (default: 0)
+
+* restricted:bool
+ Allow fan control only to processes with the
+ ``CAP_SYS_ADMIN`` capability set or processes run
+ as root when using the legacy ``/proc/i8k``
+ interface. In this case normal users will be able
+ to read temperature and fan status but not to
+ control the fan. If your notebook is shared with
+ other users and you don't trust them you may want
+ to use this option. (default: 1, only available
+ with ``CONFIG_I8K``)
+
+* power_status:bool
+ Report AC status in ``/proc/i8k``. (default: 0,
+ only available with ``CONFIG_I8K``)
+
+* fan_mult:uint
+ Factor to multiply fan speed with. (default:
+ autodetect)
+
+* fan_max:uint
+ Maximum configurable fan speed. (default:
+ autodetect)
+
+Legacy ``/proc`` interface
+--------------------------
+
+.. warning:: This interface is obsolete and deprecated and should not
+ used in new applications. This interface is only
+ available when kernel is compiled with option
+ ``CONFIG_I8K``.
+
+The information provided by the kernel driver can be accessed by
+simply reading the ``/proc/i8k`` file. For example::
+
+ $ cat /proc/i8k
+ 1.0 A17 2J59L02 52 2 1 8040 6420 1 2
+
+The fields read from ``/proc/i8k`` are::
+
+ 1.0 A17 2J59L02 52 2 1 8040 6420 1 2
+ | | | | | | | | | |
+ | | | | | | | | | +------- 10. buttons status
+ | | | | | | | | +--------- 9. AC status
+ | | | | | | | +-------------- 8. fan0 RPM
+ | | | | | | +------------------- 7. fan1 RPM
+ | | | | | +--------------------- 6. fan0 status
+ | | | | +----------------------- 5. fan1 status
+ | | | +-------------------------- 4. temp0 reading (Celsius)
+ | | +---------------------------------- 3. Dell service tag (later known as 'serial number')
+ | +-------------------------------------- 2. BIOS version
+ +------------------------------------------ 1. /proc/i8k format version
+
+A negative value, for example -22, indicates that the BIOS doesn't
+return the corresponding information. This is normal on some
+models/BIOSes.
+
+For performance reasons the ``/proc/i8k`` doesn't report by default
+the AC status since this SMM call takes a long time to execute and is
+not really needed. If you want to see the ac status in ``/proc/i8k``
+you must explictitly enable this option by passing the
+``power_status=1`` parameter to insmod. If AC status is not
+available -1 is printed instead.
+
+The driver provides also an ioctl interface which can be used to
+obtain the same information and to control the fan status. The ioctl
+interface can be accessed from C programs or from shell using the
+i8kctl utility. See the source file of ``i8kutils`` for more
+information on how to use the ioctl interface.
diff --git a/Documentation/hwmon/ina3221.rst b/Documentation/hwmon/ina3221.rst
index f6007ae8f4e2..297f7323b441 100644
--- a/Documentation/hwmon/ina3221.rst
+++ b/Documentation/hwmon/ina3221.rst
@@ -41,6 +41,18 @@ curr[123]_max Warning alert current(mA) setting, activates the
average is above this value.
curr[123]_max_alarm Warning alert current limit exceeded
in[456]_input Shunt voltage(uV) for channels 1, 2, and 3 respectively
+in7_input Sum of shunt voltage(uV) channels
+in7_label Channel label for sum of shunt voltage
+curr4_input Sum of current(mA) measurement channels,
+ (only available when all channels use the same resistor
+ value for their shunt resistors)
+curr4_crit Critical alert current(mA) setting for sum of current
+ measurements, activates the corresponding alarm
+ when the respective current is above this value
+ (only effective when all channels use the same resistor
+ value for their shunt resistors)
+curr4_crit_alarm Critical alert current limit exceeded for sum of
+ current measurements.
samples Number of samples using in the averaging mode.
Supports the list of number of samples:
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 230ad59b462b..43cc605741ea 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -41,9 +41,11 @@ Hardware Monitoring Kernel Drivers
asb100
asc7621
aspeed-pwm-tacho
+ bel-pfe
coretemp
da9052
da9055
+ dell-smm-hwmon
dme1737
ds1621
ds620
@@ -90,6 +92,7 @@ Hardware Monitoring Kernel Drivers
lm95245
lochnagar
ltc2945
+ ltc2947
ltc2978
ltc2990
ltc3815
@@ -153,6 +156,7 @@ Hardware Monitoring Kernel Drivers
tmp108
tmp401
tmp421
+ tmp513
tps40422
twl4030-madc-hwmon
ucd9000
diff --git a/Documentation/hwmon/ltc2947.rst b/Documentation/hwmon/ltc2947.rst
new file mode 100644
index 000000000000..419fc84fe934
--- /dev/null
+++ b/Documentation/hwmon/ltc2947.rst
@@ -0,0 +1,100 @@
+Kernel drivers ltc2947-i2c and ltc2947-spi
+==========================================
+
+Supported chips:
+
+ * Analog Devices LTC2947
+
+ Prefix: 'ltc2947'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/LTC2947.pdf
+
+Author: Nuno Sá <nuno.sa@analog.com>
+
+Description
+___________
+
+The LTC2947 is a high precision power and energy monitor that measures current,
+voltage, power, temperature, charge and energy. The device supports both SPI
+and I2C depending on the chip configuration.
+The device also measures accumulated quantities as energy. It has two banks of
+register's to read/set energy related values. These banks can be configured
+independently to have setups like: energy1 accumulates always and enrgy2 only
+accumulates if current is positive (to check battery charging efficiency for
+example). The device also supports a GPIO pin that can be configured as output
+to control a fan as a function of measured temperature. Then, the GPIO becomes
+active as soon as a temperature reading is higher than a defined threshold. The
+temp2 channel is used to control this thresholds and to read the respective
+alarms.
+
+Sysfs entries
+_____________
+
+The following attributes are supported. Limits are read-write, reset_history
+is write-only and all the other attributes are read-only.
+
+======================= ==========================================
+in0_input VP-VM voltage (mV).
+in0_min Undervoltage threshold
+in0_max Overvoltage threshold
+in0_lowest Lowest measured voltage
+in0_highest Highest measured voltage
+in0_reset_history Write 1 to reset in1 history
+in0_min_alarm Undervoltage alarm
+in0_max_alarm Overvoltage alarm
+in0_label Channel label (VP-VM)
+
+in1_input DVCC voltage (mV)
+in1_min Undervoltage threshold
+in1_max Overvoltage threshold
+in1_lowest Lowest measured voltage
+in1_highest Highest measured voltage
+in1_reset_history Write 1 to reset in2 history
+in1_min_alarm Undervoltage alarm
+in1_max_alarm Overvoltage alarm
+in1_label Channel label (DVCC)
+
+curr1_input IP-IM Sense current (mA)
+curr1_min Undercurrent threshold
+curr1_max Overcurrent threshold
+curr1_lowest Lowest measured current
+curr1_highest Highest measured current
+curr1_reset_history Write 1 to reset curr1 history
+curr1_min_alarm Undercurrent alarm
+curr1_max_alarm Overcurrent alarm
+curr1_label Channel label (IP-IM)
+
+power1_input Power (in uW)
+power1_min Low power threshold
+power1_max High power threshold
+power1_input_lowest Historical minimum power use
+power1_input_highest Historical maximum power use
+power1_reset_history Write 1 to reset power1 history
+power1_min_alarm Low power alarm
+power1_max_alarm High power alarm
+power1_label Channel label (Power)
+
+temp1_input Chip Temperature (in milliC)
+temp1_min Low temperature threshold
+temp1_max High temperature threshold
+temp1_input_lowest Historical minimum temperature use
+temp1_input_highest Historical maximum temperature use
+temp1_reset_history Write 1 to reset temp1 history
+temp1_min_alarm Low temperature alarm
+temp1_max_alarm High temperature alarm
+temp1_label Channel label (Ambient)
+
+temp2_min Low temperature threshold for fan control
+temp2_max High temperature threshold for fan control
+temp2_min_alarm Low temperature fan control alarm
+temp2_max_alarm High temperature fan control alarm
+temp2_label Channel label (TEMPFAN)
+
+energy1_input Measured energy over time (in microJoule)
+
+energy2_input Measured energy over time (in microJoule)
+======================= ==========================================
diff --git a/Documentation/hwmon/tmp513.rst b/Documentation/hwmon/tmp513.rst
new file mode 100644
index 000000000000..6c8fae4b1a75
--- /dev/null
+++ b/Documentation/hwmon/tmp513.rst
@@ -0,0 +1,103 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver tmp513
+====================
+
+Supported chips:
+
+ * Texas Instruments TMP512
+
+ Prefix: 'tmp512'
+
+ Datasheet: http://www.ti.com/lit/ds/symlink/tmp512.pdf
+
+ * Texas Instruments TMP513
+
+ Prefix: 'tmp513'
+
+ Datasheet: http://www.ti.com/lit/ds/symlink/tmp513.pdf
+
+Authors:
+
+ Eric Tremblay <etremblay@distech-controls.com>
+
+Description
+-----------
+
+This driver implements support for Texas Instruments TMP512, and TMP513.
+The TMP512 (dual-channel) and TMP513 (triple-channel) are system monitors
+that include remote sensors, a local temperature sensor, and a high-side current
+shunt monitor. These system monitors have the capability of measuring remote
+temperatures, on-chip temperatures, and system voltage/power/current
+consumption.
+
+The temperatures are measured in degrees Celsius with a range of
+-40 to + 125 degrees with a resolution of 0.0625 degree C.
+
+For hysteresis value, only the first channel is writable. Writing to it
+will affect all other values since each channels are sharing the same
+hysteresis value. The hysteresis is in degrees Celsius with a range of
+0 to 127.5 degrees with a resolution of 0.5 degree.
+
+The driver exports the temperature values via the following sysfs files:
+
+**temp[1-4]_input**
+
+**temp[1-4]_crit**
+
+**temp[1-4]_crit_alarm**
+
+**temp[1-4]_crit_hyst**
+
+The driver read the shunt voltage from the chip and convert it to current.
+The readable range depends on the "ti,pga-gain" property (default to 8) and the
+shunt resistor value. The value resolution will be equal to 10uV/Rshunt.
+
+The driver exports the shunt currents values via the following sysFs files:
+
+**curr1_input**
+
+**curr1_lcrit**
+
+**curr1_lcrit_alarm**
+
+**curr1_crit**
+
+**curr1_crit_alarm**
+
+The bus voltage range is read from the chip with a resolution of 4mV. The chip
+can be configurable in two different range (32V or 16V) using the
+ti,bus-range-microvolt property in the devicetree.
+
+The driver exports the bus voltage values via the following sysFs files:
+
+**in0_input**
+
+**in0_lcrit**
+
+**in0_lcrit_alarm**
+
+**in0_crit**
+
+**in0_crit_alarm**
+
+The bus power and bus currents range and resolution depends on the calibration
+register value. Those values are calculate by the hardware using those
+formulas:
+
+Current = (ShuntVoltage * CalibrationRegister) / 4096
+Power = (Current * BusVoltage) / 5000
+
+The driver exports the bus current and bus power values via the following
+sysFs files:
+
+**curr2_input**
+
+**power1_input**
+
+**power1_crit**
+
+**power1_crit_alarm**
+
+The calibration process follow the procedure of the datasheet (without overflow)
+and depend on the shunt resistor value and the pga_gain value.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index b843e313d2f2..2ceab197246f 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -135,6 +135,14 @@ needed).
mic/index
scheduler/index
+Architecture-agnostic documentation
+-----------------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ asm-annotations
+
Architecture-specific documentation
-----------------------------------
diff --git a/Documentation/ioctl/ioctl-number.rst b/Documentation/ioctl/ioctl-number.rst
index bef79cd4c6b4..4ef86433bd67 100644
--- a/Documentation/ioctl/ioctl-number.rst
+++ b/Documentation/ioctl/ioctl-number.rst
@@ -233,6 +233,7 @@ Code Seq# Include File Comments
'f' 00-0F fs/ext4/ext4.h conflict!
'f' 00-0F linux/fs.h conflict!
'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict!
+'f' 13-27 linux/fscrypt.h
'f' 81-8F linux/fsverity.h
'g' 00-0F linux/usb/gadgetfs.h
'g' 20-2F linux/usb/g_printer.h
diff --git a/Documentation/livepatch/index.rst b/Documentation/livepatch/index.rst
index 17674a9e21b2..525944063be7 100644
--- a/Documentation/livepatch/index.rst
+++ b/Documentation/livepatch/index.rst
@@ -12,6 +12,7 @@ Kernel Livepatching
cumulative-patches
module-elf-format
shadow-vars
+ system-state
.. only:: subproject and html
diff --git a/Documentation/livepatch/system-state.rst b/Documentation/livepatch/system-state.rst
new file mode 100644
index 000000000000..c6d127c2d9aa
--- /dev/null
+++ b/Documentation/livepatch/system-state.rst
@@ -0,0 +1,167 @@
+====================
+System State Changes
+====================
+
+Some users are really reluctant to reboot a system. This brings the need
+to provide more livepatches and maintain some compatibility between them.
+
+Maintaining more livepatches is much easier with cumulative livepatches.
+Each new livepatch completely replaces any older one. It can keep,
+add, and even remove fixes. And it is typically safe to replace any version
+of the livepatch with any other one thanks to the atomic replace feature.
+
+The problems might come with shadow variables and callbacks. They might
+change the system behavior or state so that it is no longer safe to
+go back and use an older livepatch or the original kernel code. Also
+any new livepatch must be able to detect what changes have already been
+done by the already installed livepatches.
+
+This is where the livepatch system state tracking gets useful. It
+allows to:
+
+ - store data needed to manipulate and restore the system state
+
+ - define compatibility between livepatches using a change id
+ and version
+
+
+1. Livepatch system state API
+=============================
+
+The state of the system might get modified either by several livepatch callbacks
+or by the newly used code. Also it must be possible to find changes done by
+already installed livepatches.
+
+Each modified state is described by struct klp_state, see
+include/linux/livepatch.h.
+
+Each livepatch defines an array of struct klp_states. They mention
+all states that the livepatch modifies.
+
+The livepatch author must define the following two fields for each
+struct klp_state:
+
+ - *id*
+
+ - Non-zero number used to identify the affected system state.
+
+ - *version*
+
+ - Number describing the variant of the system state change that
+ is supported by the given livepatch.
+
+The state can be manipulated using two functions:
+
+ - *klp_get_state(patch, id)*
+
+ - Get struct klp_state associated with the given livepatch
+ and state id.
+
+ - *klp_get_prev_state(id)*
+
+ - Get struct klp_state associated with the given feature id and
+ already installed livepatches.
+
+2. Livepatch compatibility
+==========================
+
+The system state version is used to prevent loading incompatible livepatches.
+The check is done when the livepatch is enabled. The rules are:
+
+ - Any completely new system state modification is allowed.
+
+ - System state modifications with the same or higher version are allowed
+ for already modified system states.
+
+ - Cumulative livepatches must handle all system state modifications from
+ already installed livepatches.
+
+ - Non-cumulative livepatches are allowed to touch already modified
+ system states.
+
+3. Supported scenarios
+======================
+
+Livepatches have their life-cycle and the same is true for the system
+state changes. Every compatible livepatch has to support the following
+scenarios:
+
+ - Modify the system state when the livepatch gets enabled and the state
+ has not been already modified by a livepatches that are being
+ replaced.
+
+ - Take over or update the system state modification when is has already
+ been done by a livepatch that is being replaced.
+
+ - Restore the original state when the livepatch is disabled.
+
+ - Restore the previous state when the transition is reverted.
+ It might be the original system state or the state modification
+ done by livepatches that were being replaced.
+
+ - Remove any already made changes when error occurs and the livepatch
+ cannot get enabled.
+
+4. Expected usage
+=================
+
+System states are usually modified by livepatch callbacks. The expected
+role of each callback is as follows:
+
+*pre_patch()*
+
+ - Allocate *state->data* when necessary. The allocation might fail
+ and *pre_patch()* is the only callback that could stop loading
+ of the livepatch. The allocation is not needed when the data
+ are already provided by previously installed livepatches.
+
+ - Do any other preparatory action that is needed by
+ the new code even before the transition gets finished.
+ For example, initialize *state->data*.
+
+ The system state itself is typically modified in *post_patch()*
+ when the entire system is able to handle it.
+
+ - Clean up its own mess in case of error. It might be done by a custom
+ code or by calling *post_unpatch()* explicitly.
+
+*post_patch()*
+
+ - Copy *state->data* from the previous livepatch when they are
+ compatible.
+
+ - Do the actual system state modification. Eventually allow
+ the new code to use it.
+
+ - Make sure that *state->data* has all necessary information.
+
+ - Free *state->data* from replaces livepatches when they are
+ not longer needed.
+
+*pre_unpatch()*
+
+ - Prevent the code, added by the livepatch, relying on the system
+ state change.
+
+ - Revert the system state modification..
+
+*post_unpatch()*
+
+ - Distinguish transition reverse and livepatch disabling by
+ checking *klp_get_prev_state()*.
+
+ - In case of transition reverse, restore the previous system
+ state. It might mean doing nothing.
+
+ - Remove any not longer needed setting or data.
+
+.. note::
+
+ *pre_unpatch()* typically does symmetric operations to *post_patch()*.
+ Except that it is called only when the livepatch is being disabled.
+ Therefore it does not need to care about any previously installed
+ livepatch.
+
+ *post_unpatch()* typically does symmetric operations to *pre_patch()*.
+ It might be called also during the transition reverse. Therefore it
+ has to handle the state of the previously installed livepatches.
diff --git a/Documentation/media/cec.h.rst.exceptions b/Documentation/media/cec.h.rst.exceptions
index 014816d04b9e..d83790ccac8e 100644
--- a/Documentation/media/cec.h.rst.exceptions
+++ b/Documentation/media/cec.h.rst.exceptions
@@ -335,6 +335,95 @@ ignore define CEC_OP_MENU_STATE_DEACTIVATED
ignore define CEC_MSG_USER_CONTROL_PRESSED
+ignore define CEC_OP_UI_CMD_SELECT
+ignore define CEC_OP_UI_CMD_UP
+ignore define CEC_OP_UI_CMD_DOWN
+ignore define CEC_OP_UI_CMD_LEFT
+ignore define CEC_OP_UI_CMD_RIGHT
+ignore define CEC_OP_UI_CMD_RIGHT_UP
+ignore define CEC_OP_UI_CMD_RIGHT_DOWN
+ignore define CEC_OP_UI_CMD_LEFT_UP
+ignore define CEC_OP_UI_CMD_LEFT_DOWN
+ignore define CEC_OP_UI_CMD_DEVICE_ROOT_MENU
+ignore define CEC_OP_UI_CMD_DEVICE_SETUP_MENU
+ignore define CEC_OP_UI_CMD_CONTENTS_MENU
+ignore define CEC_OP_UI_CMD_FAVORITE_MENU
+ignore define CEC_OP_UI_CMD_BACK
+ignore define CEC_OP_UI_CMD_MEDIA_TOP_MENU
+ignore define CEC_OP_UI_CMD_MEDIA_CONTEXT_SENSITIVE_MENU
+ignore define CEC_OP_UI_CMD_NUMBER_ENTRY_MODE
+ignore define CEC_OP_UI_CMD_NUMBER_11
+ignore define CEC_OP_UI_CMD_NUMBER_12
+ignore define CEC_OP_UI_CMD_NUMBER_0_OR_NUMBER_10
+ignore define CEC_OP_UI_CMD_NUMBER_1
+ignore define CEC_OP_UI_CMD_NUMBER_2
+ignore define CEC_OP_UI_CMD_NUMBER_3
+ignore define CEC_OP_UI_CMD_NUMBER_4
+ignore define CEC_OP_UI_CMD_NUMBER_5
+ignore define CEC_OP_UI_CMD_NUMBER_6
+ignore define CEC_OP_UI_CMD_NUMBER_7
+ignore define CEC_OP_UI_CMD_NUMBER_8
+ignore define CEC_OP_UI_CMD_NUMBER_9
+ignore define CEC_OP_UI_CMD_DOT
+ignore define CEC_OP_UI_CMD_ENTER
+ignore define CEC_OP_UI_CMD_CLEAR
+ignore define CEC_OP_UI_CMD_NEXT_FAVORITE
+ignore define CEC_OP_UI_CMD_CHANNEL_UP
+ignore define CEC_OP_UI_CMD_CHANNEL_DOWN
+ignore define CEC_OP_UI_CMD_PREVIOUS_CHANNEL
+ignore define CEC_OP_UI_CMD_SOUND_SELECT
+ignore define CEC_OP_UI_CMD_INPUT_SELECT
+ignore define CEC_OP_UI_CMD_DISPLAY_INFORMATION
+ignore define CEC_OP_UI_CMD_HELP
+ignore define CEC_OP_UI_CMD_PAGE_UP
+ignore define CEC_OP_UI_CMD_PAGE_DOWN
+ignore define CEC_OP_UI_CMD_POWER
+ignore define CEC_OP_UI_CMD_VOLUME_UP
+ignore define CEC_OP_UI_CMD_VOLUME_DOWN
+ignore define CEC_OP_UI_CMD_MUTE
+ignore define CEC_OP_UI_CMD_PLAY
+ignore define CEC_OP_UI_CMD_STOP
+ignore define CEC_OP_UI_CMD_PAUSE
+ignore define CEC_OP_UI_CMD_RECORD
+ignore define CEC_OP_UI_CMD_REWIND
+ignore define CEC_OP_UI_CMD_FAST_FORWARD
+ignore define CEC_OP_UI_CMD_EJECT
+ignore define CEC_OP_UI_CMD_SKIP_FORWARD
+ignore define CEC_OP_UI_CMD_SKIP_BACKWARD
+ignore define CEC_OP_UI_CMD_STOP_RECORD
+ignore define CEC_OP_UI_CMD_PAUSE_RECORD
+ignore define CEC_OP_UI_CMD_ANGLE
+ignore define CEC_OP_UI_CMD_SUB_PICTURE
+ignore define CEC_OP_UI_CMD_VIDEO_ON_DEMAND
+ignore define CEC_OP_UI_CMD_ELECTRONIC_PROGRAM_GUIDE
+ignore define CEC_OP_UI_CMD_TIMER_PROGRAMMING
+ignore define CEC_OP_UI_CMD_INITIAL_CONFIGURATION
+ignore define CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE
+ignore define CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION
+ignore define CEC_OP_UI_CMD_AUDIO_DESCRIPTION
+ignore define CEC_OP_UI_CMD_INTERNET
+ignore define CEC_OP_UI_CMD_3D_MODE
+ignore define CEC_OP_UI_CMD_PLAY_FUNCTION
+ignore define CEC_OP_UI_CMD_PAUSE_PLAY_FUNCTION
+ignore define CEC_OP_UI_CMD_RECORD_FUNCTION
+ignore define CEC_OP_UI_CMD_PAUSE_RECORD_FUNCTION
+ignore define CEC_OP_UI_CMD_STOP_FUNCTION
+ignore define CEC_OP_UI_CMD_MUTE_FUNCTION
+ignore define CEC_OP_UI_CMD_RESTORE_VOLUME_FUNCTION
+ignore define CEC_OP_UI_CMD_TUNE_FUNCTION
+ignore define CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION
+ignore define CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION
+ignore define CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION
+ignore define CEC_OP_UI_CMD_POWER_TOGGLE_FUNCTION
+ignore define CEC_OP_UI_CMD_POWER_OFF_FUNCTION
+ignore define CEC_OP_UI_CMD_POWER_ON_FUNCTION
+ignore define CEC_OP_UI_CMD_F1_BLUE
+ignore define CEC_OP_UI_CMD_F2_RED
+ignore define CEC_OP_UI_CMD_F3_GREEN
+ignore define CEC_OP_UI_CMD_F4_YELLOW
+ignore define CEC_OP_UI_CMD_F5
+ignore define CEC_OP_UI_CMD_DATA
+
ignore define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL
ignore define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA
ignore define CEC_OP_UI_BCAST_TYPE_ANALOGUE
diff --git a/Documentation/media/kapi/v4l2-controls.rst b/Documentation/media/kapi/v4l2-controls.rst
index ebe2a55908be..b20800cae3f2 100644
--- a/Documentation/media/kapi/v4l2-controls.rst
+++ b/Documentation/media/kapi/v4l2-controls.rst
@@ -140,6 +140,15 @@ Menu controls with a driver specific menu are added by calling
const struct v4l2_ctrl_ops *ops, u32 id, s32 max,
s32 skip_mask, s32 def, const char * const *qmenu);
+Standard compound controls can be added by calling
+:c:func:`v4l2_ctrl_new_std_compound`:
+
+.. code-block:: c
+
+ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def);
+
Integer menu controls with a driver specific menu can be added by calling
:c:func:`v4l2_ctrl_new_int_menu`:
diff --git a/Documentation/media/uapi/cec/cec-funcs.rst b/Documentation/media/uapi/cec/cec-funcs.rst
index 620590b168c9..dc6da9c639a8 100644
--- a/Documentation/media/uapi/cec/cec-funcs.rst
+++ b/Documentation/media/uapi/cec/cec-funcs.rst
@@ -24,6 +24,7 @@ Function Reference
cec-ioc-adap-g-caps
cec-ioc-adap-g-log-addrs
cec-ioc-adap-g-phys-addr
+ cec-ioc-adap-g-conn-info
cec-ioc-dqevent
cec-ioc-g-mode
cec-ioc-receive
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index 0c44f31a9b59..76761a98c312 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -135,8 +135,12 @@ returns the information to the application. The ioctl never fails.
- The CEC hardware can monitor CEC pin changes from low to high voltage
and vice versa. When in pin monitoring mode the application will
receive ``CEC_EVENT_PIN_CEC_LOW`` and ``CEC_EVENT_PIN_CEC_HIGH`` events.
+ * .. _`CEC-CAP-CONNECTOR-INFO`:
-
+ - ``CEC_CAP_CONNECTOR_INFO``
+ - 0x00000100
+ - If this capability is set, then :ref:`CEC_ADAP_G_CONNECTOR_INFO` can
+ be used.
Return Value
============
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst
new file mode 100644
index 000000000000..a21659d55c6b
--- /dev/null
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst
@@ -0,0 +1,105 @@
+.. SPDX-License-Identifier: GPL-2.0
+..
+.. Copyright 2019 Google LLC
+..
+.. _CEC_ADAP_G_CONNECTOR_INFO:
+
+*******************************
+ioctl CEC_ADAP_G_CONNECTOR_INFO
+*******************************
+
+Name
+====
+
+CEC_ADAP_G_CONNECTOR_INFO - Query HDMI connector information
+
+Synopsis
+========
+
+.. c:function:: int ioctl( int fd, CEC_ADAP_G_CONNECTOR_INFO, struct cec_connector_info *argp )
+ :name: CEC_ADAP_G_CONNECTOR_INFO
+
+Arguments
+=========
+
+``fd``
+ File descriptor returned by :c:func:`open() <cec-open>`.
+
+``argp``
+
+
+Description
+===========
+
+Using this ioctl an application can learn which HDMI connector this CEC
+device corresponds to. While calling this ioctl the application should
+provide a pointer to a cec_connector_info struct which will be populated
+by the kernel with the info provided by the adapter's driver. This ioctl
+is only available if the ``CEC_CAP_CONNECTOR_INFO`` capability is set.
+
+.. tabularcolumns:: |p{1.0cm}|p{4.4cm}|p{2.5cm}|p{9.6cm}|
+
+.. c:type:: cec_connector_info
+
+.. flat-table:: struct cec_connector_info
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 1 8
+
+ * - __u32
+ - ``type``
+ - The type of connector this adapter is associated with.
+ * - union
+ - ``(anonymous)``
+ -
+ * -
+ - ``struct cec_drm_connector_info``
+ - drm
+ - :ref:`cec-drm-connector-info`
+
+
+.. tabularcolumns:: |p{4.4cm}|p{2.5cm}|p{10.6cm}|
+
+.. _connector-type:
+
+.. flat-table:: Connector types
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 8
+
+ * .. _`CEC-CONNECTOR-TYPE-NO-CONNECTOR`:
+
+ - ``CEC_CONNECTOR_TYPE_NO_CONNECTOR``
+ - 0
+ - No connector is associated with the adapter/the information is not
+ provided by the driver.
+ * .. _`CEC-CONNECTOR-TYPE-DRM`:
+
+ - ``CEC_CONNECTOR_TYPE_DRM``
+ - 1
+ - Indicates that a DRM connector is associated with this adapter.
+ Information about the connector can be found in
+ :ref:`cec-drm-connector-info`.
+
+.. tabularcolumns:: |p{4.4cm}|p{2.5cm}|p{10.6cm}|
+
+.. c:type:: cec_drm_connector_info
+
+.. _cec-drm-connector-info:
+
+.. flat-table:: struct cec_drm_connector_info
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 8
+
+ * .. _`CEC-DRM-CONNECTOR-TYPE-CARD-NO`:
+
+ - __u32
+ - ``card_no``
+ - DRM card number: the number from a card's path, e.g. 0 in case of
+ /dev/card0.
+ * .. _`CEC-DRM-CONNECTOR-TYPE-CONNECTOR_ID`:
+
+ - __u32
+ - ``connector_id``
+ - DRM connector ID.
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 46a1c99a595e..5e21b1fbfc01 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -70,6 +70,14 @@ it is guaranteed that the state did change in between the two events.
addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
has the unregistered logical address. In that case all other bits are 0.
+ * - __u16
+ - ``have_conn_info``
+ - If non-zero, then HDMI connector information is available.
+ This field is only valid if ``CEC_CAP_CONNECTOR_INFO`` is set. If that
+ capability is set and ``have_conn_info`` is zero, then that indicates
+ that the HDMI connector device is not instantiated, either because
+ the HDMI driver is still configuring the device or because the HDMI
+ device was unbound.
.. c:type:: cec_event_lost_msgs
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
index a74c82d95609..01abe8103bdd 100644
--- a/Documentation/media/uapi/mediactl/request-api.rst
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -53,8 +53,8 @@ with different configurations in advance, knowing that the configuration will be
applied when needed to get the expected result. Configuration values at the time
of request completion are also available for reading.
-Usage
-=====
+General Usage
+-------------
The Request API extends the Media Controller API and cooperates with
subsystem-specific APIs to support request usage. At the Media Controller
diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst
index ad2ff258afa8..8095f57d3d75 100644
--- a/Documentation/media/uapi/v4l/biblio.rst
+++ b/Documentation/media/uapi/v4l/biblio.rst
@@ -131,6 +131,15 @@ ITU-T Rec. H.264 Specification (04/2017 Edition)
:author: International Telecommunication Union (http://www.itu.ch)
+.. _hevc:
+
+ITU H.265/HEVC
+==============
+
+:title: ITU-T Rec. H.265 | ISO/IEC 23008-2 "High Efficiency Video Coding"
+
+:author: International Telecommunication Union (http://www.itu.ch), International Organisation for Standardisation (http://www.iso.ch)
+
.. _jfif:
JFIF
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index 1cbd9cde57f3..9149b57728e5 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -607,6 +607,19 @@ Buffer Flags
applications shall use this flag for output buffers if the data in
this buffer has not been created by the CPU but by some
DMA-capable unit, in which case caches have not been used.
+ * .. _`V4L2-BUF-FLAG-M2M-HOLD-CAPTURE-BUF`:
+
+ - ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF``
+ - 0x00000200
+ - Only valid if ``V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF`` is
+ set. It is typically used with stateless decoders where multiple
+ output buffers each decode to a slice of the decoded frame.
+ Applications can set this flag when queueing the output buffer
+ to prevent the driver from dequeueing the capture buffer after
+ the output buffer has been decoded (i.e. the capture buffer is
+ 'held'). If the timestamp of this output buffer differs from that
+ of the previous output buffer, then that indicates the start of a
+ new frame and the previously held capture buffer is dequeued.
* .. _`V4L2-BUF-FLAG-LAST`:
- ``V4L2_BUF_FLAG_LAST``
diff --git a/Documentation/media/uapi/v4l/dev-mem2mem.rst b/Documentation/media/uapi/v4l/dev-mem2mem.rst
index caa05f5f6380..70953958cee6 100644
--- a/Documentation/media/uapi/v4l/dev-mem2mem.rst
+++ b/Documentation/media/uapi/v4l/dev-mem2mem.rst
@@ -46,3 +46,4 @@ devices are given in the following sections.
:maxdepth: 1
dev-decoder
+ dev-stateless-decoder
diff --git a/Documentation/media/uapi/v4l/dev-stateless-decoder.rst b/Documentation/media/uapi/v4l/dev-stateless-decoder.rst
new file mode 100644
index 000000000000..4a26646eeec5
--- /dev/null
+++ b/Documentation/media/uapi/v4l/dev-stateless-decoder.rst
@@ -0,0 +1,424 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _stateless_decoder:
+
+**************************************************
+Memory-to-memory Stateless Video Decoder Interface
+**************************************************
+
+A stateless decoder is a decoder that works without retaining any kind of state
+between processed frames. This means that each frame is decoded independently
+of any previous and future frames, and that the client is responsible for
+maintaining the decoding state and providing it to the decoder with each
+decoding request. This is in contrast to the stateful video decoder interface,
+where the hardware and driver maintain the decoding state and all the client
+has to do is to provide the raw encoded stream and dequeue decoded frames in
+display order.
+
+This section describes how user-space ("the client") is expected to communicate
+with stateless decoders in order to successfully decode an encoded stream.
+Compared to stateful codecs, the decoder/client sequence is simpler, but the
+cost of this simplicity is extra complexity in the client which is responsible
+for maintaining a consistent decoding state.
+
+Stateless decoders make use of the :ref:`media-request-api`. A stateless
+decoder must expose the ``V4L2_BUF_CAP_SUPPORTS_REQUESTS`` capability on its
+``OUTPUT`` queue when :c:func:`VIDIOC_REQBUFS` or :c:func:`VIDIOC_CREATE_BUFS`
+are invoked.
+
+Depending on the encoded formats supported by the decoder, a single decoded
+frame may be the result of several decode requests (for instance, H.264 streams
+with multiple slices per frame). Decoders that support such formats must also
+expose the ``V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF`` capability on their
+``OUTPUT`` queue.
+
+Querying capabilities
+=====================
+
+1. To enumerate the set of coded formats supported by the decoder, the client
+ calls :c:func:`VIDIOC_ENUM_FMT` on the ``OUTPUT`` queue.
+
+ * The driver must always return the full set of supported ``OUTPUT`` formats,
+ irrespective of the format currently set on the ``CAPTURE`` queue.
+
+ * Simultaneously, the driver must restrain the set of values returned by
+ codec-specific capability controls (such as H.264 profiles) to the set
+ actually supported by the hardware.
+
+2. To enumerate the set of supported raw formats, the client calls
+ :c:func:`VIDIOC_ENUM_FMT` on the ``CAPTURE`` queue.
+
+ * The driver must return only the formats supported for the format currently
+ active on the ``OUTPUT`` queue.
+
+ * Depending on the currently set ``OUTPUT`` format, the set of supported raw
+ formats may depend on the value of some codec-dependent controls.
+ The client is responsible for making sure that these controls are set
+ before querying the ``CAPTURE`` queue. Failure to do so will result in the
+ default values for these controls being used, and a returned set of formats
+ that may not be usable for the media the client is trying to decode.
+
+3. The client may use :c:func:`VIDIOC_ENUM_FRAMESIZES` to detect supported
+ resolutions for a given format, passing desired pixel format in
+ :c:type:`v4l2_frmsizeenum`'s ``pixel_format``.
+
+4. Supported profiles and levels for the current ``OUTPUT`` format, if
+ applicable, may be queried using their respective controls via
+ :c:func:`VIDIOC_QUERYCTRL`.
+
+Initialization
+==============
+
+1. Set the coded format on the ``OUTPUT`` queue via :c:func:`VIDIOC_S_FMT`.
+
+ * **Required fields:**
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``OUTPUT``.
+
+ ``pixelformat``
+ a coded pixel format.
+
+ ``width``, ``height``
+ coded width and height parsed from the stream.
+
+ other fields
+ follow standard semantics.
+
+ .. note::
+
+ Changing the ``OUTPUT`` format may change the currently set ``CAPTURE``
+ format. The driver will derive a new ``CAPTURE`` format from the
+ ``OUTPUT`` format being set, including resolution, colorimetry
+ parameters, etc. If the client needs a specific ``CAPTURE`` format,
+ it must adjust it afterwards.
+
+2. Call :c:func:`VIDIOC_S_EXT_CTRLS` to set all the controls (parsed headers,
+ etc.) required by the ``OUTPUT`` format to enumerate the ``CAPTURE`` formats.
+
+3. Call :c:func:`VIDIOC_G_FMT` for ``CAPTURE`` queue to get the format for the
+ destination buffers parsed/decoded from the bytestream.
+
+ * **Required fields:**
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
+
+ * **Returned fields:**
+
+ ``width``, ``height``
+ frame buffer resolution for the decoded frames.
+
+ ``pixelformat``
+ pixel format for decoded frames.
+
+ ``num_planes`` (for _MPLANE ``type`` only)
+ number of planes for pixelformat.
+
+ ``sizeimage``, ``bytesperline``
+ as per standard semantics; matching frame buffer format.
+
+ .. note::
+
+ The value of ``pixelformat`` may be any pixel format supported for the
+ ``OUTPUT`` format, based on the hardware capabilities. It is suggested
+ that the driver chooses the preferred/optimal format for the current
+ configuration. For example, a YUV format may be preferred over an RGB
+ format, if an additional conversion step would be required for RGB.
+
+4. *[optional]* Enumerate ``CAPTURE`` formats via :c:func:`VIDIOC_ENUM_FMT` on
+ the ``CAPTURE`` queue. The client may use this ioctl to discover which
+ alternative raw formats are supported for the current ``OUTPUT`` format and
+ select one of them via :c:func:`VIDIOC_S_FMT`.
+
+ .. note::
+
+ The driver will return only formats supported for the currently selected
+ ``OUTPUT`` format and currently set controls, even if more formats may be
+ supported by the decoder in general.
+
+ For example, a decoder may support YUV and RGB formats for
+ resolutions 1920x1088 and lower, but only YUV for higher resolutions (due
+ to hardware limitations). After setting a resolution of 1920x1088 or lower
+ as the ``OUTPUT`` format, :c:func:`VIDIOC_ENUM_FMT` may return a set of
+ YUV and RGB pixel formats, but after setting a resolution higher than
+ 1920x1088, the driver will not return RGB pixel formats, since they are
+ unsupported for this resolution.
+
+5. *[optional]* Choose a different ``CAPTURE`` format than suggested via
+ :c:func:`VIDIOC_S_FMT` on ``CAPTURE`` queue. It is possible for the client to
+ choose a different format than selected/suggested by the driver in
+ :c:func:`VIDIOC_G_FMT`.
+
+ * **Required fields:**
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
+
+ ``pixelformat``
+ a raw pixel format.
+
+ ``width``, ``height``
+ frame buffer resolution of the decoded stream; typically unchanged from
+ what was returned with :c:func:`VIDIOC_G_FMT`, but it may be different
+ if the hardware supports composition and/or scaling.
+
+ After performing this step, the client must perform step 3 again in order
+ to obtain up-to-date information about the buffers size and layout.
+
+6. Allocate source (bytestream) buffers via :c:func:`VIDIOC_REQBUFS` on
+ ``OUTPUT`` queue.
+
+ * **Required fields:**
+
+ ``count``
+ requested number of buffers to allocate; greater than zero.
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``OUTPUT``.
+
+ ``memory``
+ follows standard semantics.
+
+ * **Return fields:**
+
+ ``count``
+ actual number of buffers allocated.
+
+ * If required, the driver will adjust ``count`` to be equal or bigger to the
+ minimum of required number of ``OUTPUT`` buffers for the given format and
+ requested count. The client must check this value after the ioctl returns
+ to get the actual number of buffers allocated.
+
+7. Allocate destination (raw format) buffers via :c:func:`VIDIOC_REQBUFS` on the
+ ``CAPTURE`` queue.
+
+ * **Required fields:**
+
+ ``count``
+ requested number of buffers to allocate; greater than zero. The client
+ is responsible for deducing the minimum number of buffers required
+ for the stream to be properly decoded (taking e.g. reference frames
+ into account) and pass an equal or bigger number.
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
+
+ ``memory``
+ follows standard semantics. ``V4L2_MEMORY_USERPTR`` is not supported
+ for ``CAPTURE`` buffers.
+
+ * **Return fields:**
+
+ ``count``
+ adjusted to allocated number of buffers, in case the codec requires
+ more buffers than requested.
+
+ * The driver must adjust count to the minimum of required number of
+ ``CAPTURE`` buffers for the current format, stream configuration and
+ requested count. The client must check this value after the ioctl
+ returns to get the number of buffers allocated.
+
+8. Allocate requests (likely one per ``OUTPUT`` buffer) via
+ :c:func:`MEDIA_IOC_REQUEST_ALLOC` on the media device.
+
+9. Start streaming on both ``OUTPUT`` and ``CAPTURE`` queues via
+ :c:func:`VIDIOC_STREAMON`.
+
+Decoding
+========
+
+For each frame, the client is responsible for submitting at least one request to
+which the following is attached:
+
+* The amount of encoded data expected by the codec for its current
+ configuration, as a buffer submitted to the ``OUTPUT`` queue. Typically, this
+ corresponds to one frame worth of encoded data, but some formats may allow (or
+ require) different amounts per unit.
+* All the metadata needed to decode the submitted encoded data, in the form of
+ controls relevant to the format being decoded.
+
+The amount of data and contents of the source ``OUTPUT`` buffer, as well as the
+controls that must be set on the request, depend on the active coded pixel
+format and might be affected by codec-specific extended controls, as stated in
+documentation of each format.
+
+If there is a possibility that the decoded frame will require one or more
+decode requests after the current one in order to be produced, then the client
+must set the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag on the ``OUTPUT``
+buffer. This will result in the (potentially partially) decoded ``CAPTURE``
+buffer not being made available for dequeueing, and reused for the next decode
+request if the timestamp of the next ``OUTPUT`` buffer has not changed.
+
+A typical frame would thus be decoded using the following sequence:
+
+1. Queue an ``OUTPUT`` buffer containing one unit of encoded bytestream data for
+ the decoding request, using :c:func:`VIDIOC_QBUF`.
+
+ * **Required fields:**
+
+ ``index``
+ index of the buffer being queued.
+
+ ``type``
+ type of the buffer.
+
+ ``bytesused``
+ number of bytes taken by the encoded data frame in the buffer.
+
+ ``flags``
+ the ``V4L2_BUF_FLAG_REQUEST_FD`` flag must be set. Additionally, if
+ we are not sure that the current decode request is the last one needed
+ to produce a fully decoded frame, then
+ ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` must also be set.
+
+ ``request_fd``
+ must be set to the file descriptor of the decoding request.
+
+ ``timestamp``
+ must be set to a unique value per frame. This value will be propagated
+ into the decoded frame's buffer and can also be used to use this frame
+ as the reference of another. If using multiple decode requests per
+ frame, then the timestamps of all the ``OUTPUT`` buffers for a given
+ frame must be identical. If the timestamp changes, then the currently
+ held ``CAPTURE`` buffer will be made available for dequeuing and the
+ current request will work on a new ``CAPTURE`` buffer.
+
+2. Set the codec-specific controls for the decoding request, using
+ :c:func:`VIDIOC_S_EXT_CTRLS`.
+
+ * **Required fields:**
+
+ ``which``
+ must be ``V4L2_CTRL_WHICH_REQUEST_VAL``.
+
+ ``request_fd``
+ must be set to the file descriptor of the decoding request.
+
+ other fields
+ other fields are set as usual when setting controls. The ``controls``
+ array must contain all the codec-specific controls required to decode
+ a frame.
+
+ .. note::
+
+ It is possible to specify the controls in different invocations of
+ :c:func:`VIDIOC_S_EXT_CTRLS`, or to overwrite a previously set control, as
+ long as ``request_fd`` and ``which`` are properly set. The controls state
+ at the moment of request submission is the one that will be considered.
+
+ .. note::
+
+ The order in which steps 1 and 2 take place is interchangeable.
+
+3. Submit the request by invoking :c:func:`MEDIA_REQUEST_IOC_QUEUE` on the
+ request FD.
+
+ If the request is submitted without an ``OUTPUT`` buffer, or if some of the
+ required controls are missing from the request, then
+ :c:func:`MEDIA_REQUEST_IOC_QUEUE` will return ``-ENOENT``. If more than one
+ ``OUTPUT`` buffer is queued, then it will return ``-EINVAL``.
+ :c:func:`MEDIA_REQUEST_IOC_QUEUE` returning non-zero means that no
+ ``CAPTURE`` buffer will be produced for this request.
+
+``CAPTURE`` buffers must not be part of the request, and are queued
+independently. They are returned in decode order (i.e. the same order as coded
+frames were submitted to the ``OUTPUT`` queue).
+
+Runtime decoding errors are signaled by the dequeued ``CAPTURE`` buffers
+carrying the ``V4L2_BUF_FLAG_ERROR`` flag. If a decoded reference frame has an
+error, then all following decoded frames that refer to it also have the
+``V4L2_BUF_FLAG_ERROR`` flag set, although the decoder will still try to
+produce (likely corrupted) frames.
+
+Buffer management while decoding
+================================
+Contrary to stateful decoders, a stateless decoder does not perform any kind of
+buffer management: it only guarantees that dequeued ``CAPTURE`` buffers can be
+used by the client for as long as they are not queued again. "Used" here
+encompasses using the buffer for compositing or display.
+
+A dequeued capture buffer can also be used as the reference frame of another
+buffer.
+
+A frame is specified as reference by converting its timestamp into nanoseconds,
+and storing it into the relevant member of a codec-dependent control structure.
+The :c:func:`v4l2_timeval_to_ns` function must be used to perform that
+conversion. The timestamp of a frame can be used to reference it as soon as all
+its units of encoded data are successfully submitted to the ``OUTPUT`` queue.
+
+A decoded buffer containing a reference frame must not be reused as a decoding
+target until all the frames referencing it have been decoded. The safest way to
+achieve this is to refrain from queueing a reference buffer until all the
+decoded frames referencing it have been dequeued. However, if the driver can
+guarantee that buffers queued to the ``CAPTURE`` queue are processed in queued
+order, then user-space can take advantage of this guarantee and queue a
+reference buffer when the following conditions are met:
+
+1. All the requests for frames affected by the reference frame have been
+ queued, and
+
+2. A sufficient number of ``CAPTURE`` buffers to cover all the decoded
+ referencing frames have been queued.
+
+When queuing a decoding request, the driver will increase the reference count of
+all the resources associated with reference frames. This means that the client
+can e.g. close the DMABUF file descriptors of reference frame buffers if it
+won't need them afterwards.
+
+Seeking
+=======
+In order to seek, the client just needs to submit requests using input buffers
+corresponding to the new stream position. It must however be aware that
+resolution may have changed and follow the dynamic resolution change sequence in
+that case. Also depending on the codec used, picture parameters (e.g. SPS/PPS
+for H.264) may have changed and the client is responsible for making sure that a
+valid state is sent to the decoder.
+
+The client is then free to ignore any returned ``CAPTURE`` buffer that comes
+from the pre-seek position.
+
+Pausing
+=======
+
+In order to pause, the client can just cease queuing buffers onto the ``OUTPUT``
+queue. Without source bytestream data, there is no data to process and the codec
+will remain idle.
+
+Dynamic resolution change
+=========================
+
+If the client detects a resolution change in the stream, it will need to perform
+the initialization sequence again with the new resolution:
+
+1. If the last submitted request resulted in a ``CAPTURE`` buffer being
+ held by the use of the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag, then the
+ last frame is not available on the ``CAPTURE`` queue. In this case, a
+ ``V4L2_DEC_CMD_FLUSH`` command shall be sent. This will make the driver
+ dequeue the held ``CAPTURE`` buffer.
+
+2. Wait until all submitted requests have completed and dequeue the
+ corresponding output buffers.
+
+3. Call :c:func:`VIDIOC_STREAMOFF` on both the ``OUTPUT`` and ``CAPTURE``
+ queues.
+
+4. Free all ``CAPTURE`` buffers by calling :c:func:`VIDIOC_REQBUFS` on the
+ ``CAPTURE`` queue with a buffer count of zero.
+
+5. Perform the initialization sequence again (minus the allocation of
+ ``OUTPUT`` buffers), with the new resolution set on the ``OUTPUT`` queue.
+ Note that due to resolution constraints, a different format may need to be
+ picked on the ``CAPTURE`` queue.
+
+Drain
+=====
+
+If the last submitted request resulted in a ``CAPTURE`` buffer being
+held by the use of the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag, then the
+last frame is not available on the ``CAPTURE`` queue. In this case, a
+``V4L2_DEC_CMD_FLUSH`` command shall be sent. This will make the driver
+dequeue the held ``CAPTURE`` buffer.
+
+After that, in order to drain the stream on a stateless decoder, the client
+just needs to wait until all the submitted requests are completed.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
index bc5dd8e76567..28313c0f4e7c 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
@@ -1713,10 +1713,14 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
* - __u8
- ``scaling_list_4x4[6][16]``
- -
+ - Scaling matrix after applying the inverse scanning process.
+ Expected list order is Intra Y, Intra Cb, Intra Cr, Inter Y,
+ Inter Cb, Inter Cr.
* - __u8
- ``scaling_list_8x8[6][64]``
- -
+ - Scaling matrix after applying the inverse scanning process.
+ Expected list order is Intra Y, Inter Y, Intra Cb, Inter Cb,
+ Intra Cr, Inter Cr.
``V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (struct)``
Specifies the slice parameters (as extracted from the bitstream)
@@ -1796,7 +1800,7 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
-
* - __u32
- ``dec_ref_pic_marking_bit_size``
- -
+ - Size in bits of the dec_ref_pic_marking() syntax element.
* - __u32
- ``pic_order_cnt_bit_size``
-
@@ -1820,10 +1824,12 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
-
* - __u8
- ``num_ref_idx_l0_active_minus1``
- -
+ - If num_ref_idx_active_override_flag is not set, this field must be
+ set to the value of num_ref_idx_l0_default_active_minus1.
* - __u8
- ``num_ref_idx_l1_active_minus1``
- -
+ - If num_ref_idx_active_override_flag is not set, this field must be
+ set to the value of num_ref_idx_l1_default_active_minus1.
* - __u32
- ``slice_group_change_cycle``
-
@@ -1983,9 +1989,9 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
- ``reference_ts``
- Timestamp of the V4L2 capture buffer to use as reference, used
with B-coded and P-coded frames. The timestamp refers to the
- ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
- :c:func:`v4l2_timeval_to_ns()` function to convert the struct
- :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
* - __u16
- ``frame_num``
-
@@ -3693,3 +3699,550 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
Indicates whether to generate SPS and PPS at every IDR. Setting it to 0
disables generating SPS and PPS at every IDR. Setting it to one enables
generating SPS and PPS at every IDR.
+
+.. _v4l2-mpeg-hevc:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_SPS (struct)``
+ Specifies the Sequence Parameter Set fields (as extracted from the
+ bitstream) for the associated HEVC slice data.
+ These bitstream parameters are defined according to :ref:`hevc`.
+ They are described in section 7.4.3.2 "Sequence parameter set RBSP
+ semantics" of the specification.
+
+.. c:type:: v4l2_ctrl_hevc_sps
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_sps
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u16
+ - ``pic_width_in_luma_samples``
+ -
+ * - __u16
+ - ``pic_height_in_luma_samples``
+ -
+ * - __u8
+ - ``bit_depth_luma_minus8``
+ -
+ * - __u8
+ - ``bit_depth_chroma_minus8``
+ -
+ * - __u8
+ - ``log2_max_pic_order_cnt_lsb_minus4``
+ -
+ * - __u8
+ - ``sps_max_dec_pic_buffering_minus1``
+ -
+ * - __u8
+ - ``sps_max_num_reorder_pics``
+ -
+ * - __u8
+ - ``sps_max_latency_increase_plus1``
+ -
+ * - __u8
+ - ``log2_min_luma_coding_block_size_minus3``
+ -
+ * - __u8
+ - ``log2_diff_max_min_luma_coding_block_size``
+ -
+ * - __u8
+ - ``log2_min_luma_transform_block_size_minus2``
+ -
+ * - __u8
+ - ``log2_diff_max_min_luma_transform_block_size``
+ -
+ * - __u8
+ - ``max_transform_hierarchy_depth_inter``
+ -
+ * - __u8
+ - ``max_transform_hierarchy_depth_intra``
+ -
+ * - __u8
+ - ``pcm_sample_bit_depth_luma_minus1``
+ -
+ * - __u8
+ - ``pcm_sample_bit_depth_chroma_minus1``
+ -
+ * - __u8
+ - ``log2_min_pcm_luma_coding_block_size_minus3``
+ -
+ * - __u8
+ - ``log2_diff_max_min_pcm_luma_coding_block_size``
+ -
+ * - __u8
+ - ``num_short_term_ref_pic_sets``
+ -
+ * - __u8
+ - ``num_long_term_ref_pics_sps``
+ -
+ * - __u8
+ - ``chroma_format_idc``
+ -
+ * - __u64
+ - ``flags``
+ - See :ref:`Sequence Parameter Set Flags <hevc_sps_flags>`
+
+.. _hevc_sps_flags:
+
+``Sequence Parameter Set Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_AMP_ENABLED``
+ - 0x00000004
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET``
+ - 0x00000008
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_PCM_ENABLED``
+ - 0x00000010
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED``
+ - 0x00000020
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT``
+ - 0x00000040
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED``
+ - 0x00000080
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED``
+ - 0x00000100
+ -
+
+``V4L2_CID_MPEG_VIDEO_HEVC_PPS (struct)``
+ Specifies the Picture Parameter Set fields (as extracted from the
+ bitstream) for the associated HEVC slice data.
+ These bitstream parameters are defined according to :ref:`hevc`.
+ They are described in section 7.4.3.3 "Picture parameter set RBSP
+ semantics" of the specification.
+
+.. c:type:: v4l2_ctrl_hevc_pps
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_pps
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``num_extra_slice_header_bits``
+ -
+ * - __s8
+ - ``init_qp_minus26``
+ -
+ * - __u8
+ - ``diff_cu_qp_delta_depth``
+ -
+ * - __s8
+ - ``pps_cb_qp_offset``
+ -
+ * - __s8
+ - ``pps_cr_qp_offset``
+ -
+ * - __u8
+ - ``num_tile_columns_minus1``
+ -
+ * - __u8
+ - ``num_tile_rows_minus1``
+ -
+ * - __u8
+ - ``column_width_minus1[20]``
+ -
+ * - __u8
+ - ``row_height_minus1[22]``
+ -
+ * - __s8
+ - ``pps_beta_offset_div2``
+ -
+ * - __s8
+ - ``pps_tc_offset_div2``
+ -
+ * - __u8
+ - ``log2_parallel_merge_level_minus2``
+ -
+ * - __u8
+ - ``padding[4]``
+ - Applications and drivers must set this to zero.
+ * - __u64
+ - ``flags``
+ - See :ref:`Picture Parameter Set Flags <hevc_pps_flags>`
+
+.. _hevc_pps_flags:
+
+``Picture Parameter Set Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED``
+ - 0x00000004
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT``
+ - 0x00000008
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED``
+ - 0x00000010
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED``
+ - 0x00000020
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED``
+ - 0x00000040
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT``
+ - 0x00000080
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED``
+ - 0x00000100
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED``
+ - 0x00000200
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED``
+ - 0x00000400
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_TILES_ENABLED``
+ - 0x00000800
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED``
+ - 0x00001000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED``
+ - 0x00002000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED``
+ - 0x00004000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED``
+ - 0x00008000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER``
+ - 0x00010000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT``
+ - 0x00020000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT``
+ - 0x00040000
+ -
+
+``V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (struct)``
+ Specifies various slice-specific parameters, especially from the NAL unit
+ header, general slice segment header and weighted prediction parameter
+ parts of the bitstream.
+ These bitstream parameters are defined according to :ref:`hevc`.
+ They are described in section 7.4.7 "General slice segment header
+ semantics" of the specification.
+
+.. c:type:: v4l2_ctrl_hevc_slice_params
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_slice_params
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``bit_size``
+ - Size (in bits) of the current slice data.
+ * - __u32
+ - ``data_bit_offset``
+ - Offset (in bits) to the video data in the current slice data.
+ * - __u8
+ - ``nal_unit_type``
+ -
+ * - __u8
+ - ``nuh_temporal_id_plus1``
+ -
+ * - __u8
+ - ``slice_type``
+ -
+ (V4L2_HEVC_SLICE_TYPE_I, V4L2_HEVC_SLICE_TYPE_P or
+ V4L2_HEVC_SLICE_TYPE_B).
+ * - __u8
+ - ``colour_plane_id``
+ -
+ * - __u16
+ - ``slice_pic_order_cnt``
+ -
+ * - __u8
+ - ``num_ref_idx_l0_active_minus1``
+ -
+ * - __u8
+ - ``num_ref_idx_l1_active_minus1``
+ -
+ * - __u8
+ - ``collocated_ref_idx``
+ -
+ * - __u8
+ - ``five_minus_max_num_merge_cand``
+ -
+ * - __s8
+ - ``slice_qp_delta``
+ -
+ * - __s8
+ - ``slice_cb_qp_offset``
+ -
+ * - __s8
+ - ``slice_cr_qp_offset``
+ -
+ * - __s8
+ - ``slice_act_y_qp_offset``
+ -
+ * - __s8
+ - ``slice_act_cb_qp_offset``
+ -
+ * - __s8
+ - ``slice_act_cr_qp_offset``
+ -
+ * - __s8
+ - ``slice_beta_offset_div2``
+ -
+ * - __s8
+ - ``slice_tc_offset_div2``
+ -
+ * - __u8
+ - ``pic_struct``
+ -
+ * - __u8
+ - ``num_active_dpb_entries``
+ - The number of entries in ``dpb``.
+ * - __u8
+ - ``ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The list of L0 reference elements as indices in the DPB.
+ * - __u8
+ - ``ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The list of L1 reference elements as indices in the DPB.
+ * - __u8
+ - ``num_rps_poc_st_curr_before``
+ - The number of reference pictures in the short-term set that come before
+ the current frame.
+ * - __u8
+ - ``num_rps_poc_st_curr_after``
+ - The number of reference pictures in the short-term set that come after
+ the current frame.
+ * - __u8
+ - ``num_rps_poc_lt_curr``
+ - The number of reference pictures in the long-term set.
+ * - __u8
+ - ``padding[7]``
+ - Applications and drivers must set this to zero.
+ * - struct :c:type:`v4l2_hevc_dpb_entry`
+ - ``dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The decoded picture buffer, for meta-data about reference frames.
+ * - struct :c:type:`v4l2_hevc_pred_weight_table`
+ - ``pred_weight_table``
+ - The prediction weight coefficients for inter-picture prediction.
+ * - __u64
+ - ``flags``
+ - See :ref:`Slice Parameters Flags <hevc_slice_params_flags>`
+
+.. _hevc_slice_params_flags:
+
+``Slice Parameters Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED``
+ - 0x00000004
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO``
+ - 0x00000008
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT``
+ - 0x00000010
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0``
+ - 0x00000020
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV``
+ - 0x00000040
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED``
+ - 0x00000080
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
+ - 0x00000100
+ -
+
+.. c:type:: v4l2_hevc_dpb_entry
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_hevc_dpb_entry
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u64
+ - ``timestamp``
+ - Timestamp of the V4L2 capture buffer to use as reference, used
+ with B-coded and P-coded frames. The timestamp refers to the
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ * - __u8
+ - ``rps``
+ - The reference set for the reference frame
+ (V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE,
+ V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER or
+ V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ * - __u8
+ - ``field_pic``
+ - Whether the reference is a field picture or a frame.
+ * - __u16
+ - ``pic_order_cnt[2]``
+ - The picture order count of the reference. Only the first element of the
+ array is used for frame pictures, while the first element identifies the
+ top field and the second the bottom field in field-coded pictures.
+ * - __u8
+ - ``padding[2]``
+ - Applications and drivers must set this to zero.
+
+.. c:type:: v4l2_hevc_pred_weight_table
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_hevc_pred_weight_table
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``luma_log2_weight_denom``
+ -
+ * - __s8
+ - ``delta_chroma_log2_weight_denom``
+ -
+ * - __s8
+ - ``delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __s8
+ - ``chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __s8
+ - ``delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __s8
+ - ``chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __u8
+ - ``padding[6]``
+ - Applications and drivers must set this to zero.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (enum)``
+ Specifies the decoding mode to use. Currently exposes slice-based and
+ frame-based decoding but new modes might be added later on.
+ This control is used as a modifier for V4L2_PIX_FMT_HEVC_SLICE
+ pixel format. Applications that support V4L2_PIX_FMT_HEVC_SLICE
+ are required to set this control in order to specify the decoding mode
+ that is expected for the buffer.
+ Drivers may expose a single or multiple decoding modes, depending
+ on what they can support.
+
+ .. note::
+
+ This menu control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_mpeg_video_hevc_decode_mode
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED``
+ - 0
+ - Decoding is done at the slice granularity.
+ The OUTPUT buffer must contain a single slice.
+ * - ``V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED``
+ - 1
+ - Decoding is done at the frame granularity.
+ The OUTPUT buffer must contain all slices needed to decode the
+ frame. The OUTPUT buffer must also contain both fields.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (enum)``
+ Specifies the HEVC slice start code expected for each slice.
+ This control is used as a modifier for V4L2_PIX_FMT_HEVC_SLICE
+ pixel format. Applications that support V4L2_PIX_FMT_HEVC_SLICE
+ are required to set this control in order to specify the start code
+ that is expected for the buffer.
+ Drivers may expose a single or multiple start codes, depending
+ on what they can support.
+
+ .. note::
+
+ This menu control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_mpeg_video_hevc_start_code
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE``
+ - 0
+ - Selecting this value specifies that HEVC slices are passed
+ to the driver without any start code.
+ * - ``V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B``
+ - 1
+ - Selecting this value specifies that HEVC slices are expected
+ to be prefixed by Annex B start codes. According to :ref:`hevc`
+ valid start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
index eff056b17167..b9a6b08fbf32 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
@@ -98,7 +98,7 @@ Flash Control IDs
V4L2_CID_FLASH_STROBE control.
* - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL``
- The flash strobe is triggered by an external source. Typically
- this is a sensor, which makes it possible to synchronises the
+ this is a sensor, which makes it possible to synchronise the
flash strobe start to exposure start.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
index 2c3ab5796d76..2d3e2b83d6dd 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
@@ -55,3 +55,13 @@ Image Source Control IDs
``V4L2_CID_TEST_PATTERN_GREENB (integer)``
Test pattern green (next to blue) colour component.
+
+``V4L2_CID_UNIT_CELL_SIZE (struct)``
+ This control returns the unit cell size in nanometers. The struct
+ :c:type:`v4l2_area` provides the width and the height in separate
+ fields to take into consideration asymmetric pixels.
+ This control does not take into consideration any possible hardware
+ binning.
+ The unit cell consists of the whole area of the pixel, sensitive and
+ non-sensitive.
+ This control is required for automatic calibration of sensors/cameras.
diff --git a/Documentation/media/uapi/v4l/meta-formats.rst b/Documentation/media/uapi/v4l/meta-formats.rst
index b10ca9ee3968..74c8659ee9d6 100644
--- a/Documentation/media/uapi/v4l/meta-formats.rst
+++ b/Documentation/media/uapi/v4l/meta-formats.rst
@@ -24,3 +24,4 @@ These formats are used for the :ref:`metadata` interface only.
pixfmt-meta-uvc
pixfmt-meta-vsp1-hgo
pixfmt-meta-vsp1-hgt
+ pixfmt-meta-vivid
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index 292fdc116c77..561bda112809 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -61,10 +61,10 @@ Compressed Formats
- ``V4L2_PIX_FMT_H264_SLICE``
- 'S264'
- - H264 parsed slice data, without the start code and as
- extracted from the H264 bitstream. This format is adapted for
- stateless video decoders that implement an H264 pipeline
- (using the :ref:`mem2mem` and :ref:`media-request-api`).
+ - H264 parsed slice data, including slice headers, either with or
+ without the start code, as extracted from the H264 bitstream.
+ This format is adapted for stateless video decoders that implement an
+ H264 pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`).
This pixelformat has two modifiers that must be set at least once
through the ``V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE``
and ``V4L2_CID_MPEG_VIDEO_H264_START_CODE`` controls.
@@ -80,6 +80,10 @@ Compressed Formats
appropriate number of macroblocks to decode a full
corresponding frame to the matching capture buffer.
+ The syntax for this format is documented in :ref:`h264`, section
+ 7.3.2.8 "Slice layer without partitioning RBSP syntax" and the following
+ sections.
+
.. note::
This format is not yet part of the public kernel API and it
@@ -188,6 +192,29 @@ Compressed Formats
If :ref:`VIDIOC_ENUM_FMT` reports ``V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM``
then the decoder has no requirements since it can parse all the
information from the raw bytestream.
+ * .. _V4L2-PIX-FMT-HEVC-SLICE:
+
+ - ``V4L2_PIX_FMT_HEVC_SLICE``
+ - 'S265'
+ - HEVC parsed slice data, as extracted from the HEVC bitstream.
+ This format is adapted for stateless video decoders that implement a
+ HEVC pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`).
+ This pixelformat has two modifiers that must be set at least once
+ through the ``V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE``
+ and ``V4L2_CID_MPEG_VIDEO_HEVC_START_CODE`` controls.
+ Metadata associated with the frame to decode is required to be passed
+ through the following controls :
+ * ``V4L2_CID_MPEG_VIDEO_HEVC_SPS``
+ * ``V4L2_CID_MPEG_VIDEO_HEVC_PPS``
+ * ``V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS``
+ See the :ref:`associated Codec Control IDs <v4l2-mpeg-hevc>`.
+ Buffers associated with this pixel format must contain the appropriate
+ number of macroblocks to decode a full corresponding frame.
+
+ .. note::
+
+ This format is not yet part of the public kernel API and it
+ is expected to change.
* .. _V4L2-PIX-FMT-FWHT:
- ``V4L2_PIX_FMT_FWHT``
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst
new file mode 100644
index 000000000000..eed20eaefe24
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst
@@ -0,0 +1,60 @@
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _v4l2-meta-fmt-vivid:
+
+*******************************
+V4L2_META_FMT_VIVID ('VIVD')
+*******************************
+
+VIVID Metadata Format
+
+
+Description
+===========
+
+This describes metadata format used by the vivid driver.
+
+It sets Brightness, Saturation, Contrast and Hue, each of which maps to
+corresponding controls of the vivid driver with respect to the range and default values.
+
+It contains the following fields:
+
+.. flat-table:: VIVID Metadata
+ :widths: 1 4
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Field
+ - Description
+ * - u16 brightness;
+ - Image brightness, the value is in the range 0 to 255, with the default value as 128.
+ * - u16 contrast;
+ - Image contrast, the value is in the range 0 to 255, with the default value as 128.
+ * - u16 saturation;
+ - Image color saturation, the value is in the range 0 to 255, with the default value as 128.
+ * - s16 hue;
+ - Image color balance, the value is in the range -128 to 128, with the default value as 0.
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
index f74f239b0510..aae0c0013eb1 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
@@ -38,8 +38,10 @@ of the two interfaces they are used.
* - ``V4L2_SEL_TGT_CROP_DEFAULT``
- 0x0001
- Suggested cropping rectangle that covers the "whole picture".
+ This includes only active pixels and excludes other non-active
+ pixels such as black pixels.
+ - Yes
- Yes
- - No
* - ``V4L2_SEL_TGT_CROP_BOUNDS``
- 0x0002
- Bounds of the crop rectangle. All valid crop rectangles fit inside
diff --git a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
index 57f0066f4cff..f1a504836f31 100644
--- a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
@@ -208,7 +208,15 @@ introduced in Linux 3.3. They are, however, mandatory for stateful mem2mem decod
been started yet, the driver will return an ``EPERM`` error code. When
the decoder is already running, this command does nothing. No
flags are defined for this command.
-
+ * - ``V4L2_DEC_CMD_FLUSH``
+ - 4
+ - Flush any held capture buffers. Only valid for stateless decoders.
+ This command is typically used when the application reached the
+ end of the stream and the last output buffer had the
+ ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag set. This would prevent
+ dequeueing the capture buffer containing the last decoded frame.
+ So this command can be used to explicitly flush that final decoded
+ frame. This command does nothing if there are no held capture buffers.
Return Value
============
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
index 13dc1a986249..271cac18afbb 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
@@ -199,6 +199,11 @@ still cause this situation.
- A pointer to a matrix control of unsigned 32-bit values. Valid if
this control is of type ``V4L2_CTRL_TYPE_U32``.
* -
+ - :c:type:`v4l2_area` *
+ - ``p_area``
+ - A pointer to a struct :c:type:`v4l2_area`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_AREA``.
+ * -
- void *
- ``ptr``
- A pointer to a compound type which can be an N-dimensional array
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
index 7b6179627803..2d197e6bba8f 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
@@ -63,7 +63,7 @@ EINVAL error code when overlays are not supported.
To set the parameters for a *Video Output Overlay*, applications must
initialize the ``flags`` field of a struct
-struct :c:type:`v4l2_framebuffer`. Since the framebuffer is
+:c:type:`v4l2_framebuffer`. Since the framebuffer is
implemented on the TV card all other parameters are determined by the
driver. When an application calls :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>` with a pointer to
this structure, the driver prepares for the overlay and returns the
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index a3d56ffbf4cc..6690928e657b 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -443,6 +443,12 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_ctrl_mpeg2_quantization`, containing MPEG-2
quantization matrices for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_AREA``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_area`, containing the width and the height
+ of a rectangular area. Units depend on the use case.
* - ``V4L2_CTRL_TYPE_H264_SPS``
- n/a
- n/a
@@ -473,6 +479,24 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_ctrl_h264_decode_params`, containing H264
decode parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_SPS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_sps`, containing HEVC Sequence
+ Parameter Set for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_PPS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_pps`, containing HEVC Picture
+ Parameter Set for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_slice_params`, containing HEVC
+ slice parameters for stateless video decoders.
.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
index d7faef10e39b..d0c643db477a 100644
--- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
@@ -125,6 +125,7 @@ aborting or finishing any DMA in progress, an implicit
.. _V4L2-BUF-CAP-SUPPORTS-DMABUF:
.. _V4L2-BUF-CAP-SUPPORTS-REQUESTS:
.. _V4L2-BUF-CAP-SUPPORTS-ORPHANED-BUFS:
+.. _V4L2-BUF-CAP-SUPPORTS-M2M-HOLD-CAPTURE-BUF:
.. cssclass:: longtable
@@ -150,6 +151,11 @@ aborting or finishing any DMA in progress, an implicit
- The kernel allows calling :ref:`VIDIOC_REQBUFS` while buffers are still
mapped or exported via DMABUF. These orphaned buffers will be freed
when they are unmapped or when the exported DMABUF fds are closed.
+ * - ``V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF``
+ - 0x00000020
+ - Only valid for stateless decoders. If set, then userspace can set the
+ ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag to hold off on returning the
+ capture buffer until the OUTPUT timestamp changes.
Return Value
============
diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst
index 1d7eb8c7bd5c..1246573c1019 100644
--- a/Documentation/media/v4l-drivers/imx.rst
+++ b/Documentation/media/v4l-drivers/imx.rst
@@ -515,10 +515,10 @@ Streaming can then begin independently on the capture device nodes
be used to select any supported YUV pixelformat on the capture device
nodes, including planar.
-SabreAuto with ADV7180 decoder
-------------------------------
+i.MX6Q SabreAuto with ADV7180 decoder
+-------------------------------------
-On the SabreAuto, an on-board ADV7180 SD decoder is connected to the
+On the i.MX6Q SabreAuto, an on-board ADV7180 SD decoder is connected to the
parallel bus input on the internal video mux to IPU1 CSI0.
The following example configures a pipeline to capture from the ADV7180
@@ -547,8 +547,6 @@ This example configures a pipeline to capture from the ADV7180
video decoder, assuming PAL 720x576 input signals, with Motion
Compensated de-interlacing. The adv7180 must output sequential or
alternating fields (field type 'seq-tb' for PAL, or 'alternate').
-$outputfmt can be any format supported by the ipu1_ic_prpvf entity
-at its output pad:
.. code-block:: none
@@ -565,11 +563,70 @@ at its output pad:
media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x576]"
media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x576 field:none]"
media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x576 field:none]"
- media-ctl -V "'ipu1_ic_prpvf':1 [fmt:$outputfmt field:none]"
+ media-ctl -V "'ipu1_ic_prpvf':1 [fmt:AYUV32/720x576 field:none]"
+ # Configure "ipu1_ic_prpvf capture" interface (assumed at /dev/video2)
+ v4l2-ctl -d2 --set-fmt-video=field=none
+
+Streaming can then begin on /dev/video2. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video2.
+
+This platform accepts Composite Video analog inputs to the ADV7180 on
+Ain1 (connector J42).
+
+i.MX6DL SabreAuto with ADV7180 decoder
+--------------------------------------
+
+On the i.MX6DL SabreAuto, an on-board ADV7180 SD decoder is connected to the
+parallel bus input on the internal video mux to IPU1 CSI0.
+
+The following example configures a pipeline to capture from the ADV7180
+video decoder, assuming NTSC 720x480 input signals, using simple
+interweave (unconverted and without motion compensation). The adv7180
+must output sequential or alternating fields (field type 'seq-bt' for
+NTSC, or 'alternate'):
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'adv7180 4-0021':0 -> 'ipu1_csi0_mux':4[1]"
+ media-ctl -l "'ipu1_csi0_mux':5 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':2 -> 'ipu1_csi0 capture':0[1]"
+ # Configure pads
+ media-ctl -V "'adv7180 4-0021':0 [fmt:UYVY2X8/720x480 field:seq-bt]"
+ media-ctl -V "'ipu1_csi0_mux':5 [fmt:UYVY2X8/720x480]"
+ media-ctl -V "'ipu1_csi0':2 [fmt:AYUV32/720x480]"
+ # Configure "ipu1_csi0 capture" interface (assumed at /dev/video0)
+ v4l2-ctl -d0 --set-fmt-video=field=interlaced_bt
+
+Streaming can then begin on /dev/video0. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video0.
+
+This example configures a pipeline to capture from the ADV7180
+video decoder, assuming PAL 720x576 input signals, with Motion
+Compensated de-interlacing. The adv7180 must output sequential or
+alternating fields (field type 'seq-tb' for PAL, or 'alternate').
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'adv7180 4-0021':0 -> 'ipu1_csi0_mux':4[1]"
+ media-ctl -l "'ipu1_csi0_mux':5 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':1 -> 'ipu1_vdic':0[1]"
+ media-ctl -l "'ipu1_vdic':2 -> 'ipu1_ic_prp':0[1]"
+ media-ctl -l "'ipu1_ic_prp':2 -> 'ipu1_ic_prpvf':0[1]"
+ media-ctl -l "'ipu1_ic_prpvf':1 -> 'ipu1_ic_prpvf capture':0[1]"
+ # Configure pads
+ media-ctl -V "'adv7180 4-0021':0 [fmt:UYVY2X8/720x576 field:seq-tb]"
+ media-ctl -V "'ipu1_csi0_mux':5 [fmt:UYVY2X8/720x576]"
+ media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x576]"
+ media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x576 field:none]"
+ media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x576 field:none]"
+ media-ctl -V "'ipu1_ic_prpvf':1 [fmt:AYUV32/720x576 field:none]"
+ # Configure "ipu1_ic_prpvf capture" interface (assumed at /dev/video2)
+ v4l2-ctl -d2 --set-fmt-video=field=none
-Streaming can then begin on the capture device node at
-"ipu1_ic_prpvf capture". The v4l2-ctl tool can be used to select any
-supported YUV or RGB pixelformat on the capture device node.
+Streaming can then begin on /dev/video2. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video2.
This platform accepts Composite Video analog inputs to the ADV7180 on
Ain1 (connector J42).
diff --git a/Documentation/media/v4l-drivers/ipu3.rst b/Documentation/media/v4l-drivers/ipu3.rst
index c9f780404eee..e4904ab44e60 100644
--- a/Documentation/media/v4l-drivers/ipu3.rst
+++ b/Documentation/media/v4l-drivers/ipu3.rst
@@ -265,19 +265,56 @@ below.
yavta -w "0x009819A1 1" /dev/v4l-subdev7
-RAW Bayer frames go through the following ImgU pipeline HW blocks to have the
+Certain hardware blocks in ImgU pipeline can change the frame resolution by
+cropping or scaling, these hardware blocks include Input Feeder(IF), Bayer Down
+Scaler (BDS) and Geometric Distortion Correction (GDC).
+There is also a block which can change the frame resolution - YUV Scaler, it is
+only applicable to the secondary output.
+
+RAW Bayer frames go through these ImgU pipeline hardware blocks and the final
processed image output to the DDR memory.
-RAW Bayer frame -> Input Feeder -> Bayer Down Scaling (BDS) -> Geometric
-Distortion Correction (GDC) -> DDR
+.. kernel-figure:: ipu3_rcb.svg
+ :alt: ipu3 resolution blocks image
-The ImgU V4L2 subdev has to be configured with the supported resolutions in all
-the above HW blocks, for a given input resolution.
+ IPU3 resolution change hardware blocks
+
+**Input Feeder**
+
+Input Feeder gets the Bayer frame data from the sensor, it can enable cropping
+of lines and columns from the frame and then store pixels into device's internal
+pixel buffer which are ready to readout by following blocks.
+
+**Bayer Down Scaler**
+
+Bayer Down Scaler is capable of performing image scaling in Bayer domain, the
+downscale factor can be configured from 1X to 1/4X in each axis with
+configuration steps of 0.03125 (1/32).
+**Geometric Distortion Correction**
+
+Geometric Distortion Correction is used to performe correction of distortions
+and image filtering. It needs some extra filter and envelop padding pixels to
+work, so the input resolution of GDC should be larger than the output
+resolution.
+
+**YUV Scaler**
+
+YUV Scaler which similar with BDS, but it is mainly do image down scaling in
+YUV domain, it can support up to 1/12X down scaling, but it can not be applied
+to the main output.
+
+The ImgU V4L2 subdev has to be configured with the supported resolutions in all
+the above hardware blocks, for a given input resolution.
For a given supported resolution for an input frame, the Input Feeder, Bayer
-Down Scaling and GDC blocks should be configured with the supported resolutions.
-This information can be obtained by looking at the following IPU3 ImgU
-configuration table.
+Down Scaler and GDC blocks should be configured with the supported resolutions
+as each hardware block has its own alignment requirement.
+
+You must configure the output resolution of the hardware blocks smartly to meet
+the hardware requirement along with keeping the maximum field of view.
+The intermediate resolutions can be generated by specific tool and this
+information can be obtained by looking at the following IPU3 ImgU configuration
+table.
https://chromium.googlesource.com/chromiumos/overlays/board-overlays/+/master
diff --git a/Documentation/media/v4l-drivers/ipu3_rcb.svg b/Documentation/media/v4l-drivers/ipu3_rcb.svg
new file mode 100644
index 000000000000..d878421b42a0
--- /dev/null
+++ b/Documentation/media/v4l-drivers/ipu3_rcb.svg
@@ -0,0 +1,331 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="774pt" height="152pt" viewBox="0 0 774 152" version="1.1">
+<defs>
+<g>
+<symbol overflow="visible" id="glyph0-0">
+<path style="stroke:none;" d="M 1 0 L 1 -15 L 9 -15 L 9 0 Z M 8 -1 L 8 -14 L 2 -14 L 2 -1 Z M 8 -1 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-1">
+<path style="stroke:none;" d="M 4.6875 -1.15625 C 5.519531 -1.15625 6.15625 -1.316406 6.59375 -1.640625 C 7.039062 -1.960938 7.265625 -2.441406 7.265625 -3.078125 C 7.265625 -3.460938 7.179688 -3.789062 7.015625 -4.0625 C 6.859375 -4.34375 6.644531 -4.582031 6.375 -4.78125 C 6.113281 -4.988281 5.816406 -5.171875 5.484375 -5.328125 C 5.148438 -5.484375 4.804688 -5.628906 4.453125 -5.765625 C 4.054688 -5.921875 3.675781 -6.097656 3.3125 -6.296875 C 2.945312 -6.492188 2.617188 -6.726562 2.328125 -7 C 2.046875 -7.269531 1.820312 -7.582031 1.65625 -7.9375 C 1.488281 -8.300781 1.40625 -8.726562 1.40625 -9.21875 C 1.40625 -10.300781 1.742188 -11.144531 2.421875 -11.75 C 3.097656 -12.351562 4.046875 -12.65625 5.265625 -12.65625 C 5.597656 -12.65625 5.925781 -12.628906 6.25 -12.578125 C 6.570312 -12.535156 6.875 -12.476562 7.15625 -12.40625 C 7.4375 -12.34375 7.6875 -12.265625 7.90625 -12.171875 C 8.125 -12.085938 8.300781 -12 8.4375 -11.90625 L 7.921875 -10.515625 C 7.648438 -10.679688 7.28125 -10.84375 6.8125 -11 C 6.351562 -11.15625 5.835938 -11.234375 5.265625 -11.234375 C 4.660156 -11.234375 4.140625 -11.082031 3.703125 -10.78125 C 3.265625 -10.488281 3.046875 -10.039062 3.046875 -9.4375 C 3.046875 -9.09375 3.109375 -8.800781 3.234375 -8.5625 C 3.359375 -8.320312 3.53125 -8.109375 3.75 -7.921875 C 3.96875 -7.742188 4.222656 -7.582031 4.515625 -7.4375 C 4.804688 -7.289062 5.128906 -7.144531 5.484375 -7 C 5.984375 -6.789062 6.441406 -6.578125 6.859375 -6.359375 C 7.285156 -6.148438 7.648438 -5.894531 7.953125 -5.59375 C 8.253906 -5.300781 8.488281 -4.953125 8.65625 -4.546875 C 8.820312 -4.148438 8.90625 -3.664062 8.90625 -3.09375 C 8.90625 -2.019531 8.539062 -1.191406 7.8125 -0.609375 C 7.082031 -0.0234375 6.039062 0.265625 4.6875 0.265625 C 4.238281 0.265625 3.820312 0.234375 3.4375 0.171875 C 3.050781 0.109375 2.707031 0.03125 2.40625 -0.0625 C 2.101562 -0.15625 1.835938 -0.25 1.609375 -0.34375 C 1.390625 -0.4375 1.21875 -0.519531 1.09375 -0.59375 L 1.59375 -1.953125 C 1.863281 -1.804688 2.257812 -1.632812 2.78125 -1.4375 C 3.300781 -1.25 3.9375 -1.15625 4.6875 -1.15625 Z M 4.6875 -1.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-2">
+<path style="stroke:none;" d="M 5.1875 -9.5 C 6.4375 -9.5 7.398438 -9.109375 8.078125 -8.328125 C 8.753906 -7.546875 9.09375 -6.363281 9.09375 -4.78125 L 9.09375 -4.203125 L 2.453125 -4.203125 C 2.523438 -3.242188 2.84375 -2.515625 3.40625 -2.015625 C 3.976562 -1.515625 4.773438 -1.265625 5.796875 -1.265625 C 6.390625 -1.265625 6.890625 -1.3125 7.296875 -1.40625 C 7.710938 -1.5 8.023438 -1.597656 8.234375 -1.703125 L 8.453125 -0.296875 C 8.253906 -0.191406 7.894531 -0.0820312 7.375 0.03125 C 6.851562 0.15625 6.269531 0.21875 5.625 0.21875 C 4.820312 0.21875 4.113281 0.0976562 3.5 -0.140625 C 2.894531 -0.390625 2.394531 -0.726562 2 -1.15625 C 1.601562 -1.582031 1.300781 -2.09375 1.09375 -2.6875 C 0.894531 -3.28125 0.796875 -3.925781 0.796875 -4.625 C 0.796875 -5.445312 0.921875 -6.164062 1.171875 -6.78125 C 1.429688 -7.394531 1.765625 -7.898438 2.171875 -8.296875 C 2.585938 -8.703125 3.054688 -9.003906 3.578125 -9.203125 C 4.097656 -9.398438 4.632812 -9.5 5.1875 -9.5 Z M 7.421875 -5.546875 C 7.421875 -6.328125 7.210938 -6.945312 6.796875 -7.40625 C 6.390625 -7.863281 5.84375 -8.09375 5.15625 -8.09375 C 4.769531 -8.09375 4.421875 -8.019531 4.109375 -7.875 C 3.796875 -7.726562 3.523438 -7.535156 3.296875 -7.296875 C 3.066406 -7.054688 2.882812 -6.78125 2.75 -6.46875 C 2.625 -6.164062 2.539062 -5.859375 2.5 -5.546875 Z M 7.421875 -5.546875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-3">
+<path style="stroke:none;" d="M 1.421875 -9.015625 C 2.015625 -9.160156 2.609375 -9.273438 3.203125 -9.359375 C 3.796875 -9.441406 4.351562 -9.484375 4.875 -9.484375 C 6.113281 -9.484375 7.050781 -9.160156 7.6875 -8.515625 C 8.320312 -7.878906 8.640625 -6.851562 8.640625 -5.4375 L 8.640625 0 L 7 0 L 7 -5.140625 C 7 -5.742188 6.945312 -6.226562 6.84375 -6.59375 C 6.738281 -6.96875 6.585938 -7.257812 6.390625 -7.46875 C 6.191406 -7.675781 5.957031 -7.816406 5.6875 -7.890625 C 5.414062 -7.972656 5.117188 -8.015625 4.796875 -8.015625 C 4.535156 -8.015625 4.253906 -8 3.953125 -7.96875 C 3.648438 -7.9375 3.359375 -7.894531 3.078125 -7.84375 L 3.078125 0 L 1.421875 0 Z M 1.421875 -9.015625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-4">
+<path style="stroke:none;" d="M 7.015625 -2.3125 C 7.015625 -2.644531 6.878906 -2.914062 6.609375 -3.125 C 6.335938 -3.34375 6 -3.53125 5.59375 -3.6875 C 5.1875 -3.851562 4.742188 -4.015625 4.265625 -4.171875 C 3.785156 -4.328125 3.335938 -4.515625 2.921875 -4.734375 C 2.515625 -4.960938 2.175781 -5.242188 1.90625 -5.578125 C 1.632812 -5.910156 1.5 -6.34375 1.5 -6.875 C 1.5 -7.625 1.800781 -8.25 2.40625 -8.75 C 3.007812 -9.25 3.960938 -9.5 5.265625 -9.5 C 5.765625 -9.5 6.285156 -9.460938 6.828125 -9.390625 C 7.367188 -9.316406 7.832031 -9.21875 8.21875 -9.09375 L 7.921875 -7.625 C 7.816406 -7.675781 7.671875 -7.726562 7.484375 -7.78125 C 7.296875 -7.84375 7.082031 -7.894531 6.84375 -7.9375 C 6.601562 -7.988281 6.34375 -8.023438 6.0625 -8.046875 C 5.789062 -8.078125 5.53125 -8.09375 5.28125 -8.09375 C 3.84375 -8.09375 3.125 -7.703125 3.125 -6.921875 C 3.125 -6.640625 3.257812 -6.398438 3.53125 -6.203125 C 3.800781 -6.015625 4.144531 -5.835938 4.5625 -5.671875 C 4.976562 -5.515625 5.425781 -5.351562 5.90625 -5.1875 C 6.382812 -5.019531 6.828125 -4.816406 7.234375 -4.578125 C 7.648438 -4.335938 7.992188 -4.046875 8.265625 -3.703125 C 8.546875 -3.367188 8.6875 -2.941406 8.6875 -2.421875 C 8.6875 -1.578125 8.359375 -0.925781 7.703125 -0.46875 C 7.046875 -0.0078125 6.007812 0.21875 4.59375 0.21875 C 3.957031 0.21875 3.375 0.164062 2.84375 0.0625 C 2.3125 -0.0390625 1.800781 -0.203125 1.3125 -0.421875 L 1.640625 -1.921875 C 2.109375 -1.703125 2.597656 -1.523438 3.109375 -1.390625 C 3.617188 -1.253906 4.171875 -1.1875 4.765625 -1.1875 C 6.265625 -1.1875 7.015625 -1.5625 7.015625 -2.3125 Z M 7.015625 -2.3125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-5">
+<path style="stroke:none;" d="M 9.203125 -4.640625 C 9.203125 -3.910156 9.097656 -3.25 8.890625 -2.65625 C 8.679688 -2.0625 8.390625 -1.550781 8.015625 -1.125 C 7.640625 -0.695312 7.191406 -0.363281 6.671875 -0.125 C 6.160156 0.101562 5.597656 0.21875 4.984375 0.21875 C 4.378906 0.21875 3.820312 0.101562 3.3125 -0.125 C 2.800781 -0.363281 2.359375 -0.695312 1.984375 -1.125 C 1.609375 -1.550781 1.316406 -2.0625 1.109375 -2.65625 C 0.898438 -3.25 0.796875 -3.910156 0.796875 -4.640625 C 0.796875 -5.367188 0.898438 -6.035156 1.109375 -6.640625 C 1.316406 -7.242188 1.609375 -7.753906 1.984375 -8.171875 C 2.359375 -8.585938 2.800781 -8.910156 3.3125 -9.140625 C 3.820312 -9.378906 4.378906 -9.5 4.984375 -9.5 C 5.597656 -9.5 6.160156 -9.378906 6.671875 -9.140625 C 7.191406 -8.910156 7.640625 -8.585938 8.015625 -8.171875 C 8.390625 -7.753906 8.679688 -7.242188 8.890625 -6.640625 C 9.097656 -6.035156 9.203125 -5.367188 9.203125 -4.640625 Z M 7.5 -4.640625 C 7.5 -5.691406 7.269531 -6.519531 6.8125 -7.125 C 6.363281 -7.738281 5.753906 -8.046875 4.984375 -8.046875 C 4.222656 -8.046875 3.617188 -7.738281 3.171875 -7.125 C 2.722656 -6.519531 2.5 -5.691406 2.5 -4.640625 C 2.5 -3.597656 2.722656 -2.773438 3.171875 -2.171875 C 3.617188 -1.566406 4.222656 -1.265625 4.984375 -1.265625 C 5.753906 -1.265625 6.363281 -1.566406 6.8125 -2.171875 C 7.269531 -2.773438 7.5 -3.597656 7.5 -4.640625 Z M 7.5 -4.640625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-6">
+<path style="stroke:none;" d="M 2.140625 0 L 2.140625 -8.78125 C 3.503906 -9.25 4.878906 -9.484375 6.265625 -9.484375 C 6.691406 -9.484375 7.097656 -9.460938 7.484375 -9.421875 C 7.867188 -9.390625 8.296875 -9.320312 8.765625 -9.21875 L 8.453125 -7.765625 C 8.023438 -7.878906 7.648438 -7.953125 7.328125 -7.984375 C 7.003906 -8.023438 6.648438 -8.046875 6.265625 -8.046875 C 5.453125 -8.046875 4.625 -7.929688 3.78125 -7.703125 L 3.78125 0 Z M 2.140625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-7">
+<path style="stroke:none;" d="M 5.8125 -10.984375 L 5.8125 -1.40625 L 8.21875 -1.40625 L 8.21875 0 L 1.78125 0 L 1.78125 -1.40625 L 4.1875 -1.40625 L 4.1875 -10.984375 L 1.78125 -10.984375 L 1.78125 -12.375 L 8.21875 -12.375 L 8.21875 -10.984375 Z M 5.8125 -10.984375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-8">
+<path style="stroke:none;" d="M 1.8125 0 L 1.8125 -12.375 L 8.84375 -12.375 L 8.84375 -10.984375 L 3.453125 -10.984375 L 3.453125 -7.125 L 8.203125 -7.125 L 8.203125 -5.734375 L 3.453125 -5.734375 L 3.453125 0 Z M 1.8125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-9">
+<path style="stroke:none;" d="M 4.078125 0.09375 C 3.878906 0.09375 3.644531 0.0859375 3.375 0.078125 C 3.113281 0.0664062 2.847656 0.0507812 2.578125 0.03125 C 2.316406 0.0078125 2.050781 -0.0195312 1.78125 -0.0625 C 1.507812 -0.101562 1.273438 -0.148438 1.078125 -0.203125 L 1.078125 -12.203125 C 1.273438 -12.253906 1.503906 -12.300781 1.765625 -12.34375 C 2.023438 -12.382812 2.289062 -12.410156 2.5625 -12.421875 C 2.84375 -12.441406 3.113281 -12.457031 3.375 -12.46875 C 3.632812 -12.488281 3.867188 -12.5 4.078125 -12.5 C 4.691406 -12.5 5.265625 -12.445312 5.796875 -12.34375 C 6.328125 -12.238281 6.789062 -12.054688 7.1875 -11.796875 C 7.582031 -11.546875 7.890625 -11.210938 8.109375 -10.796875 C 8.328125 -10.390625 8.4375 -9.878906 8.4375 -9.265625 C 8.4375 -8.960938 8.390625 -8.675781 8.296875 -8.40625 C 8.203125 -8.132812 8.070312 -7.878906 7.90625 -7.640625 C 7.738281 -7.398438 7.546875 -7.1875 7.328125 -7 C 7.109375 -6.820312 6.875 -6.6875 6.625 -6.59375 C 7.300781 -6.40625 7.867188 -6.0625 8.328125 -5.5625 C 8.785156 -5.0625 9.015625 -4.414062 9.015625 -3.625 C 9.015625 -2.394531 8.617188 -1.46875 7.828125 -0.84375 C 7.046875 -0.21875 5.796875 0.09375 4.078125 0.09375 Z M 2.71875 -5.78125 L 2.71875 -1.359375 C 2.75 -1.347656 2.898438 -1.332031 3.171875 -1.3125 C 3.441406 -1.289062 3.785156 -1.28125 4.203125 -1.28125 C 4.609375 -1.28125 5 -1.3125 5.375 -1.375 C 5.757812 -1.445312 6.097656 -1.570312 6.390625 -1.75 C 6.691406 -1.925781 6.929688 -2.160156 7.109375 -2.453125 C 7.285156 -2.753906 7.375 -3.132812 7.375 -3.59375 C 7.375 -4.007812 7.289062 -4.359375 7.125 -4.640625 C 6.957031 -4.921875 6.738281 -5.144531 6.46875 -5.3125 C 6.195312 -5.476562 5.878906 -5.597656 5.515625 -5.671875 C 5.160156 -5.742188 4.789062 -5.78125 4.40625 -5.78125 Z M 2.71875 -7.140625 L 4.015625 -7.140625 C 4.347656 -7.140625 4.679688 -7.171875 5.015625 -7.234375 C 5.347656 -7.304688 5.644531 -7.414062 5.90625 -7.5625 C 6.175781 -7.707031 6.390625 -7.90625 6.546875 -8.15625 C 6.710938 -8.414062 6.796875 -8.738281 6.796875 -9.125 C 6.796875 -9.476562 6.722656 -9.78125 6.578125 -10.03125 C 6.429688 -10.289062 6.238281 -10.5 6 -10.65625 C 5.757812 -10.820312 5.484375 -10.9375 5.171875 -11 C 4.859375 -11.0625 4.53125 -11.09375 4.1875 -11.09375 C 3.832031 -11.09375 3.523438 -11.085938 3.265625 -11.078125 C 3.003906 -11.078125 2.820312 -11.066406 2.71875 -11.046875 Z M 2.71875 -7.140625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-10">
+<path style="stroke:none;" d="M 9.203125 -6.203125 C 9.203125 -5.054688 9.054688 -4.082031 8.765625 -3.28125 C 8.484375 -2.476562 8.09375 -1.828125 7.59375 -1.328125 C 7.09375 -0.828125 6.5 -0.460938 5.8125 -0.234375 C 5.125 -0.015625 4.378906 0.09375 3.578125 0.09375 C 2.753906 0.09375 1.921875 -0.00390625 1.078125 -0.203125 L 1.078125 -12.203125 C 1.921875 -12.398438 2.753906 -12.5 3.578125 -12.5 C 4.378906 -12.5 5.125 -12.382812 5.8125 -12.15625 C 6.5 -11.925781 7.09375 -11.554688 7.59375 -11.046875 C 8.09375 -10.546875 8.484375 -9.894531 8.765625 -9.09375 C 9.054688 -8.300781 9.203125 -7.335938 9.203125 -6.203125 Z M 2.71875 -1.375 C 3.050781 -1.332031 3.390625 -1.3125 3.734375 -1.3125 C 4.335938 -1.3125 4.875 -1.398438 5.34375 -1.578125 C 5.8125 -1.765625 6.203125 -2.054688 6.515625 -2.453125 C 6.835938 -2.847656 7.082031 -3.351562 7.25 -3.96875 C 7.425781 -4.59375 7.515625 -5.335938 7.515625 -6.203125 C 7.515625 -7.878906 7.191406 -9.109375 6.546875 -9.890625 C 5.898438 -10.679688 4.945312 -11.078125 3.6875 -11.078125 C 3.507812 -11.078125 3.335938 -11.070312 3.171875 -11.0625 C 3.003906 -11.0625 2.851562 -11.046875 2.71875 -11.015625 Z M 2.71875 -1.375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-11">
+<path style="stroke:none;" d="M 7.453125 -6.09375 L 9.09375 -6.09375 L 9.09375 -0.296875 C 8.84375 -0.203125 8.4375 -0.0859375 7.875 0.046875 C 7.320312 0.191406 6.664062 0.265625 5.90625 0.265625 C 5.15625 0.265625 4.472656 0.125 3.859375 -0.15625 C 3.242188 -0.445312 2.71875 -0.863281 2.28125 -1.40625 C 1.851562 -1.957031 1.519531 -2.632812 1.28125 -3.4375 C 1.039062 -4.25 0.921875 -5.171875 0.921875 -6.203125 C 0.921875 -7.242188 1.050781 -8.160156 1.3125 -8.953125 C 1.582031 -9.753906 1.945312 -10.425781 2.40625 -10.96875 C 2.863281 -11.519531 3.398438 -11.9375 4.015625 -12.21875 C 4.628906 -12.507812 5.289062 -12.65625 6 -12.65625 C 6.457031 -12.65625 6.859375 -12.617188 7.203125 -12.546875 C 7.546875 -12.484375 7.835938 -12.40625 8.078125 -12.3125 C 8.328125 -12.226562 8.53125 -12.132812 8.6875 -12.03125 C 8.851562 -11.925781 8.976562 -11.847656 9.0625 -11.796875 L 8.515625 -10.421875 C 8.210938 -10.660156 7.847656 -10.851562 7.421875 -11 C 7.003906 -11.15625 6.5625 -11.234375 6.09375 -11.234375 C 5.59375 -11.234375 5.125 -11.113281 4.6875 -10.875 C 4.257812 -10.632812 3.890625 -10.296875 3.578125 -9.859375 C 3.273438 -9.421875 3.035156 -8.890625 2.859375 -8.265625 C 2.679688 -7.648438 2.59375 -6.960938 2.59375 -6.203125 C 2.59375 -5.453125 2.671875 -4.769531 2.828125 -4.15625 C 2.984375 -3.539062 3.207031 -3.015625 3.5 -2.578125 C 3.789062 -2.140625 4.148438 -1.796875 4.578125 -1.546875 C 5.015625 -1.304688 5.515625 -1.1875 6.078125 -1.1875 C 6.460938 -1.1875 6.757812 -1.210938 6.96875 -1.265625 C 7.1875 -1.316406 7.347656 -1.367188 7.453125 -1.421875 Z M 7.453125 -6.09375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-12">
+<path style="stroke:none;" d="M 9.203125 -0.515625 C 8.734375 -0.253906 8.234375 -0.0625 7.703125 0.0625 C 7.179688 0.195312 6.617188 0.265625 6.015625 0.265625 C 5.285156 0.265625 4.609375 0.132812 3.984375 -0.125 C 3.367188 -0.382812 2.832031 -0.773438 2.375 -1.296875 C 1.925781 -1.828125 1.570312 -2.5 1.3125 -3.3125 C 1.050781 -4.132812 0.921875 -5.097656 0.921875 -6.203125 C 0.921875 -7.253906 1.054688 -8.179688 1.328125 -8.984375 C 1.597656 -9.785156 1.96875 -10.457031 2.4375 -11 C 2.90625 -11.539062 3.453125 -11.953125 4.078125 -12.234375 C 4.703125 -12.515625 5.367188 -12.65625 6.078125 -12.65625 C 6.566406 -12.65625 7.066406 -12.585938 7.578125 -12.453125 C 8.097656 -12.328125 8.601562 -12.109375 9.09375 -11.796875 L 8.625 -10.4375 C 7.738281 -10.945312 6.910156 -11.203125 6.140625 -11.203125 C 5.585938 -11.203125 5.09375 -11.082031 4.65625 -10.84375 C 4.226562 -10.613281 3.859375 -10.28125 3.546875 -9.84375 C 3.242188 -9.40625 3.007812 -8.878906 2.84375 -8.265625 C 2.675781 -7.648438 2.59375 -6.960938 2.59375 -6.203125 C 2.59375 -5.347656 2.679688 -4.609375 2.859375 -3.984375 C 3.046875 -3.359375 3.296875 -2.835938 3.609375 -2.421875 C 3.929688 -2.003906 4.316406 -1.695312 4.765625 -1.5 C 5.210938 -1.300781 5.695312 -1.203125 6.21875 -1.203125 C 6.601562 -1.203125 7.007812 -1.25 7.4375 -1.34375 C 7.863281 -1.445312 8.304688 -1.625 8.765625 -1.875 Z M 9.203125 -0.515625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-0">
+<path style="stroke:none;" d="M 0.59375 0 L 0.59375 -9 L 5.40625 -9 L 5.40625 0 Z M 4.796875 -0.59375 L 4.796875 -8.40625 L 1.203125 -8.40625 L 1.203125 -0.59375 Z M 4.796875 -0.59375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-1">
+<path style="stroke:none;" d="M 2.515625 0 L 2.515625 -2.765625 C 2.023438 -3.554688 1.582031 -4.332031 1.1875 -5.09375 C 0.789062 -5.851562 0.445312 -6.628906 0.15625 -7.421875 L 1.265625 -7.421875 C 1.492188 -6.753906 1.757812 -6.113281 2.0625 -5.5 C 2.363281 -4.882812 2.6875 -4.253906 3.03125 -3.609375 C 3.394531 -4.285156 3.71875 -4.929688 4 -5.546875 C 4.28125 -6.160156 4.539062 -6.785156 4.78125 -7.421875 L 5.859375 -7.421875 C 5.554688 -6.640625 5.207031 -5.875 4.8125 -5.125 C 4.414062 -4.382812 3.976562 -3.601562 3.5 -2.78125 L 3.5 0 Z M 2.515625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-2">
+<path style="stroke:none;" d="M 3 0.15625 C 2.5625 0.15625 2.1875 0.09375 1.875 -0.03125 C 1.570312 -0.164062 1.320312 -0.347656 1.125 -0.578125 C 0.9375 -0.804688 0.796875 -1.085938 0.703125 -1.421875 C 0.617188 -1.765625 0.578125 -2.144531 0.578125 -2.5625 L 0.578125 -7.421875 L 1.5625 -7.421875 L 1.5625 -2.65625 C 1.5625 -2.28125 1.59375 -1.96875 1.65625 -1.71875 C 1.726562 -1.46875 1.828125 -1.265625 1.953125 -1.109375 C 2.078125 -0.960938 2.222656 -0.859375 2.390625 -0.796875 C 2.566406 -0.734375 2.769531 -0.703125 3 -0.703125 C 3.226562 -0.703125 3.425781 -0.734375 3.59375 -0.796875 C 3.769531 -0.859375 3.921875 -0.960938 4.046875 -1.109375 C 4.171875 -1.265625 4.265625 -1.46875 4.328125 -1.71875 C 4.398438 -1.96875 4.4375 -2.28125 4.4375 -2.65625 L 4.4375 -7.421875 L 5.421875 -7.421875 L 5.421875 -2.5625 C 5.421875 -2.144531 5.375 -1.765625 5.28125 -1.421875 C 5.195312 -1.085938 5.054688 -0.804688 4.859375 -0.578125 C 4.671875 -0.347656 4.421875 -0.164062 4.109375 -0.03125 C 3.804688 0.09375 3.4375 0.15625 3 0.15625 Z M 3 0.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-3">
+<path style="stroke:none;" d="M 1.21875 -7.421875 C 1.320312 -6.921875 1.445312 -6.375 1.59375 -5.78125 C 1.738281 -5.1875 1.890625 -4.585938 2.046875 -3.984375 C 2.210938 -3.390625 2.378906 -2.820312 2.546875 -2.28125 C 2.722656 -1.738281 2.882812 -1.265625 3.03125 -0.859375 C 3.15625 -1.265625 3.300781 -1.742188 3.46875 -2.296875 C 3.644531 -2.847656 3.816406 -3.421875 3.984375 -4.015625 C 4.148438 -4.609375 4.304688 -5.203125 4.453125 -5.796875 C 4.609375 -6.390625 4.734375 -6.929688 4.828125 -7.421875 L 5.859375 -7.421875 C 5.796875 -7.109375 5.691406 -6.679688 5.546875 -6.140625 C 5.398438 -5.597656 5.226562 -4.992188 5.03125 -4.328125 C 4.832031 -3.660156 4.609375 -2.953125 4.359375 -2.203125 C 4.117188 -1.453125 3.863281 -0.71875 3.59375 0 L 2.375 0 C 2.125 -0.71875 1.878906 -1.445312 1.640625 -2.1875 C 1.410156 -2.9375 1.195312 -3.644531 1 -4.3125 C 0.800781 -4.976562 0.628906 -5.582031 0.484375 -6.125 C 0.335938 -6.675781 0.226562 -7.109375 0.15625 -7.421875 Z M 1.21875 -7.421875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-4">
+<path style="stroke:none;" d=""/>
+</symbol>
+<symbol overflow="visible" id="glyph1-5">
+<path style="stroke:none;" d="M 5.515625 -3.71875 C 5.515625 -3.03125 5.425781 -2.445312 5.25 -1.96875 C 5.082031 -1.488281 4.847656 -1.097656 4.546875 -0.796875 C 4.253906 -0.492188 3.898438 -0.273438 3.484375 -0.140625 C 3.078125 -0.00390625 2.628906 0.0625 2.140625 0.0625 C 1.648438 0.0625 1.148438 0 0.640625 -0.125 L 0.640625 -7.3125 C 1.148438 -7.4375 1.648438 -7.5 2.140625 -7.5 C 2.628906 -7.5 3.078125 -7.429688 3.484375 -7.296875 C 3.898438 -7.160156 4.253906 -6.941406 4.546875 -6.640625 C 4.847656 -6.335938 5.082031 -5.941406 5.25 -5.453125 C 5.425781 -4.972656 5.515625 -4.394531 5.515625 -3.71875 Z M 1.625 -0.828125 C 1.832031 -0.804688 2.039062 -0.796875 2.25 -0.796875 C 2.601562 -0.796875 2.921875 -0.847656 3.203125 -0.953125 C 3.484375 -1.054688 3.71875 -1.226562 3.90625 -1.46875 C 4.101562 -1.707031 4.253906 -2.007812 4.359375 -2.375 C 4.460938 -2.75 4.515625 -3.195312 4.515625 -3.71875 C 4.515625 -4.726562 4.316406 -5.46875 3.921875 -5.9375 C 3.535156 -6.40625 2.960938 -6.640625 2.203125 -6.640625 C 2.097656 -6.640625 1.992188 -6.640625 1.890625 -6.640625 C 1.796875 -6.640625 1.707031 -6.628906 1.625 -6.609375 Z M 1.625 -0.828125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-6">
+<path style="stroke:none;" d="M 5.515625 -2.78125 C 5.515625 -2.34375 5.453125 -1.945312 5.328125 -1.59375 C 5.203125 -1.238281 5.023438 -0.929688 4.796875 -0.671875 C 4.578125 -0.410156 4.3125 -0.210938 4 -0.078125 C 3.695312 0.0546875 3.359375 0.125 2.984375 0.125 C 2.628906 0.125 2.296875 0.0546875 1.984375 -0.078125 C 1.679688 -0.210938 1.414062 -0.410156 1.1875 -0.671875 C 0.96875 -0.929688 0.796875 -1.238281 0.671875 -1.59375 C 0.546875 -1.945312 0.484375 -2.34375 0.484375 -2.78125 C 0.484375 -3.21875 0.546875 -3.617188 0.671875 -3.984375 C 0.796875 -4.347656 0.96875 -4.65625 1.1875 -4.90625 C 1.414062 -5.15625 1.679688 -5.347656 1.984375 -5.484375 C 2.296875 -5.628906 2.628906 -5.703125 2.984375 -5.703125 C 3.359375 -5.703125 3.695312 -5.628906 4 -5.484375 C 4.3125 -5.347656 4.578125 -5.15625 4.796875 -4.90625 C 5.023438 -4.65625 5.203125 -4.347656 5.328125 -3.984375 C 5.453125 -3.617188 5.515625 -3.21875 5.515625 -2.78125 Z M 4.5 -2.78125 C 4.5 -3.414062 4.363281 -3.914062 4.09375 -4.28125 C 3.820312 -4.644531 3.453125 -4.828125 2.984375 -4.828125 C 2.523438 -4.828125 2.160156 -4.644531 1.890625 -4.28125 C 1.628906 -3.914062 1.5 -3.414062 1.5 -2.78125 C 1.5 -2.15625 1.628906 -1.660156 1.890625 -1.296875 C 2.160156 -0.929688 2.523438 -0.75 2.984375 -0.75 C 3.453125 -0.75 3.820312 -0.929688 4.09375 -1.296875 C 4.363281 -1.660156 4.5 -2.15625 4.5 -2.78125 Z M 4.5 -2.78125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-7">
+<path style="stroke:none;" d="M 4.109375 0 C 3.992188 -0.269531 3.890625 -0.515625 3.796875 -0.734375 C 3.710938 -0.960938 3.628906 -1.1875 3.546875 -1.40625 C 3.460938 -1.632812 3.378906 -1.867188 3.296875 -2.109375 C 3.210938 -2.359375 3.113281 -2.640625 3 -2.953125 C 2.882812 -2.640625 2.78125 -2.359375 2.6875 -2.109375 C 2.601562 -1.867188 2.519531 -1.632812 2.4375 -1.40625 C 2.351562 -1.1875 2.265625 -0.960938 2.171875 -0.734375 C 2.085938 -0.515625 1.984375 -0.269531 1.859375 0 L 1.109375 0 C 0.890625 -0.976562 0.707031 -1.953125 0.5625 -2.921875 C 0.414062 -3.890625 0.304688 -4.769531 0.234375 -5.5625 L 1.15625 -5.5625 C 1.1875 -5.25 1.210938 -4.941406 1.234375 -4.640625 C 1.265625 -4.347656 1.300781 -4.035156 1.34375 -3.703125 C 1.382812 -3.378906 1.429688 -3.023438 1.484375 -2.640625 C 1.535156 -2.253906 1.59375 -1.820312 1.65625 -1.34375 C 1.78125 -1.664062 1.882812 -1.945312 1.96875 -2.1875 C 2.0625 -2.425781 2.144531 -2.648438 2.21875 -2.859375 C 2.289062 -3.078125 2.359375 -3.296875 2.421875 -3.515625 C 2.492188 -3.742188 2.570312 -4 2.65625 -4.28125 L 3.390625 -4.28125 C 3.472656 -4 3.546875 -3.742188 3.609375 -3.515625 C 3.671875 -3.296875 3.738281 -3.078125 3.8125 -2.859375 C 3.882812 -2.648438 3.957031 -2.425781 4.03125 -2.1875 C 4.113281 -1.945312 4.21875 -1.671875 4.34375 -1.359375 C 4.414062 -1.796875 4.476562 -2.203125 4.53125 -2.578125 C 4.59375 -2.953125 4.640625 -3.304688 4.671875 -3.640625 C 4.710938 -3.972656 4.75 -4.296875 4.78125 -4.609375 C 4.820312 -4.921875 4.851562 -5.238281 4.875 -5.5625 L 5.765625 -5.5625 C 5.734375 -5.164062 5.6875 -4.738281 5.625 -4.28125 C 5.570312 -3.820312 5.503906 -3.351562 5.421875 -2.875 C 5.335938 -2.394531 5.25 -1.910156 5.15625 -1.421875 C 5.0625 -0.929688 4.960938 -0.457031 4.859375 0 Z M 4.109375 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-8">
+<path style="stroke:none;" d="M 0.859375 -5.40625 C 1.210938 -5.5 1.566406 -5.566406 1.921875 -5.609375 C 2.273438 -5.660156 2.609375 -5.6875 2.921875 -5.6875 C 3.671875 -5.6875 4.234375 -5.492188 4.609375 -5.109375 C 4.992188 -4.722656 5.1875 -4.109375 5.1875 -3.265625 L 5.1875 0 L 4.203125 0 L 4.203125 -3.078125 C 4.203125 -3.441406 4.171875 -3.734375 4.109375 -3.953125 C 4.046875 -4.179688 3.953125 -4.359375 3.828125 -4.484375 C 3.710938 -4.609375 3.570312 -4.691406 3.40625 -4.734375 C 3.25 -4.785156 3.070312 -4.8125 2.875 -4.8125 C 2.71875 -4.8125 2.546875 -4.800781 2.359375 -4.78125 C 2.179688 -4.757812 2.007812 -4.734375 1.84375 -4.703125 L 1.84375 0 L 0.859375 0 Z M 0.859375 -5.40625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-9">
+<path style="stroke:none;" d="M 4.21875 -1.390625 C 4.21875 -1.585938 4.132812 -1.75 3.96875 -1.875 C 3.800781 -2.007812 3.59375 -2.125 3.34375 -2.21875 C 3.101562 -2.3125 2.835938 -2.40625 2.546875 -2.5 C 2.265625 -2.59375 2 -2.707031 1.75 -2.84375 C 1.507812 -2.976562 1.304688 -3.144531 1.140625 -3.34375 C 0.984375 -3.539062 0.90625 -3.800781 0.90625 -4.125 C 0.90625 -4.570312 1.082031 -4.945312 1.4375 -5.25 C 1.800781 -5.550781 2.375 -5.703125 3.15625 -5.703125 C 3.457031 -5.703125 3.769531 -5.675781 4.09375 -5.625 C 4.414062 -5.582031 4.695312 -5.523438 4.9375 -5.453125 L 4.75 -4.578125 C 4.6875 -4.609375 4.597656 -4.640625 4.484375 -4.671875 C 4.367188 -4.710938 4.238281 -4.742188 4.09375 -4.765625 C 3.957031 -4.796875 3.804688 -4.816406 3.640625 -4.828125 C 3.472656 -4.847656 3.316406 -4.859375 3.171875 -4.859375 C 2.304688 -4.859375 1.875 -4.625 1.875 -4.15625 C 1.875 -3.988281 1.953125 -3.84375 2.109375 -3.71875 C 2.273438 -3.601562 2.484375 -3.5 2.734375 -3.40625 C 2.984375 -3.3125 3.25 -3.210938 3.53125 -3.109375 C 3.820312 -3.015625 4.09375 -2.894531 4.34375 -2.75 C 4.59375 -2.601562 4.796875 -2.425781 4.953125 -2.21875 C 5.117188 -2.019531 5.203125 -1.765625 5.203125 -1.453125 C 5.203125 -0.953125 5.003906 -0.5625 4.609375 -0.28125 C 4.222656 -0.0078125 3.609375 0.125 2.765625 0.125 C 2.378906 0.125 2.023438 0.09375 1.703125 0.03125 C 1.378906 -0.03125 1.078125 -0.125 0.796875 -0.25 L 0.984375 -1.15625 C 1.265625 -1.019531 1.554688 -0.910156 1.859375 -0.828125 C 2.171875 -0.742188 2.503906 -0.703125 2.859375 -0.703125 C 3.765625 -0.703125 4.21875 -0.929688 4.21875 -1.390625 Z M 4.21875 -1.390625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-10">
+<path style="stroke:none;" d="M 0.59375 -2.765625 C 0.59375 -3.273438 0.671875 -3.710938 0.828125 -4.078125 C 0.984375 -4.441406 1.203125 -4.742188 1.484375 -4.984375 C 1.765625 -5.234375 2.09375 -5.414062 2.46875 -5.53125 C 2.84375 -5.644531 3.238281 -5.703125 3.65625 -5.703125 C 3.925781 -5.703125 4.195312 -5.679688 4.46875 -5.640625 C 4.738281 -5.609375 5.023438 -5.546875 5.328125 -5.453125 L 5.09375 -4.59375 C 4.832031 -4.6875 4.59375 -4.75 4.375 -4.78125 C 4.15625 -4.8125 3.929688 -4.828125 3.703125 -4.828125 C 3.421875 -4.828125 3.148438 -4.785156 2.890625 -4.703125 C 2.640625 -4.628906 2.414062 -4.507812 2.21875 -4.34375 C 2.03125 -4.1875 1.878906 -3.976562 1.765625 -3.71875 C 1.660156 -3.457031 1.609375 -3.140625 1.609375 -2.765625 C 1.609375 -2.421875 1.660156 -2.117188 1.765625 -1.859375 C 1.867188 -1.609375 2.015625 -1.398438 2.203125 -1.234375 C 2.390625 -1.078125 2.613281 -0.957031 2.875 -0.875 C 3.144531 -0.789062 3.4375 -0.75 3.75 -0.75 C 4.007812 -0.75 4.253906 -0.765625 4.484375 -0.796875 C 4.722656 -0.828125 4.984375 -0.890625 5.265625 -0.984375 L 5.40625 -0.15625 C 5.125 -0.0507812 4.835938 0.0195312 4.546875 0.0625 C 4.265625 0.101562 3.957031 0.125 3.625 0.125 C 3.175781 0.125 2.765625 0.0664062 2.390625 -0.046875 C 2.023438 -0.171875 1.707031 -0.351562 1.4375 -0.59375 C 1.164062 -0.832031 0.957031 -1.132812 0.8125 -1.5 C 0.664062 -1.863281 0.59375 -2.285156 0.59375 -2.765625 Z M 0.59375 -2.765625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-11">
+<path style="stroke:none;" d="M 3.0625 -0.703125 C 3.3125 -0.703125 3.53125 -0.707031 3.71875 -0.71875 C 3.914062 -0.738281 4.082031 -0.765625 4.21875 -0.796875 L 4.21875 -2.453125 C 4.082031 -2.492188 3.925781 -2.523438 3.75 -2.546875 C 3.570312 -2.566406 3.382812 -2.578125 3.1875 -2.578125 C 3 -2.578125 2.816406 -2.5625 2.640625 -2.53125 C 2.460938 -2.507812 2.304688 -2.460938 2.171875 -2.390625 C 2.035156 -2.316406 1.921875 -2.222656 1.828125 -2.109375 C 1.742188 -1.992188 1.703125 -1.847656 1.703125 -1.671875 C 1.703125 -1.304688 1.820312 -1.050781 2.0625 -0.90625 C 2.3125 -0.769531 2.644531 -0.703125 3.0625 -0.703125 Z M 2.96875 -5.703125 C 3.382812 -5.703125 3.734375 -5.648438 4.015625 -5.546875 C 4.296875 -5.441406 4.523438 -5.296875 4.703125 -5.109375 C 4.878906 -4.929688 5.003906 -4.707031 5.078125 -4.4375 C 5.148438 -4.175781 5.1875 -3.890625 5.1875 -3.578125 L 5.1875 -0.09375 C 4.957031 -0.0507812 4.648438 -0.00390625 4.265625 0.046875 C 3.890625 0.0976562 3.5 0.125 3.09375 0.125 C 2.789062 0.125 2.492188 0.0976562 2.203125 0.046875 C 1.921875 -0.00390625 1.664062 -0.09375 1.4375 -0.21875 C 1.21875 -0.351562 1.039062 -0.535156 0.90625 -0.765625 C 0.769531 -0.992188 0.703125 -1.289062 0.703125 -1.65625 C 0.703125 -1.976562 0.769531 -2.25 0.90625 -2.46875 C 1.039062 -2.6875 1.21875 -2.863281 1.4375 -3 C 1.664062 -3.132812 1.921875 -3.234375 2.203125 -3.296875 C 2.484375 -3.359375 2.769531 -3.390625 3.0625 -3.390625 C 3.445312 -3.390625 3.832031 -3.34375 4.21875 -3.25 L 4.21875 -3.53125 C 4.21875 -3.695312 4.195312 -3.859375 4.15625 -4.015625 C 4.125 -4.171875 4.054688 -4.3125 3.953125 -4.4375 C 3.847656 -4.5625 3.707031 -4.660156 3.53125 -4.734375 C 3.363281 -4.816406 3.144531 -4.859375 2.875 -4.859375 C 2.53125 -4.859375 2.226562 -4.832031 1.96875 -4.78125 C 1.71875 -4.738281 1.523438 -4.691406 1.390625 -4.640625 L 1.265625 -5.453125 C 1.398438 -5.523438 1.625 -5.582031 1.9375 -5.625 C 2.257812 -5.675781 2.601562 -5.703125 2.96875 -5.703125 Z M 2.96875 -5.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-12">
+<path style="stroke:none;" d="M 4.0625 0.125 C 3.707031 0.125 3.410156 0.078125 3.171875 -0.015625 C 2.941406 -0.109375 2.757812 -0.25 2.625 -0.4375 C 2.488281 -0.632812 2.390625 -0.875 2.328125 -1.15625 C 2.273438 -1.4375 2.25 -1.765625 2.25 -2.140625 L 2.25 -7.421875 L 0.640625 -7.421875 L 0.640625 -8.25 L 3.234375 -8.25 L 3.234375 -2.140625 C 3.234375 -1.867188 3.25 -1.644531 3.28125 -1.46875 C 3.320312 -1.289062 3.378906 -1.144531 3.453125 -1.03125 C 3.535156 -0.925781 3.628906 -0.851562 3.734375 -0.8125 C 3.847656 -0.769531 3.984375 -0.75 4.140625 -0.75 C 4.367188 -0.75 4.582031 -0.773438 4.78125 -0.828125 C 4.988281 -0.890625 5.144531 -0.953125 5.25 -1.015625 L 5.40625 -0.1875 C 5.351562 -0.15625 5.28125 -0.117188 5.1875 -0.078125 C 5.101562 -0.046875 5 -0.015625 4.875 0.015625 C 4.757812 0.046875 4.628906 0.0703125 4.484375 0.09375 C 4.347656 0.113281 4.207031 0.125 4.0625 0.125 Z M 4.0625 0.125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-13">
+<path style="stroke:none;" d="M 2.515625 -6.4375 C 2.304688 -6.4375 2.125 -6.503906 1.96875 -6.640625 C 1.8125 -6.785156 1.734375 -6.984375 1.734375 -7.234375 C 1.734375 -7.484375 1.8125 -7.679688 1.96875 -7.828125 C 2.125 -7.972656 2.304688 -8.046875 2.515625 -8.046875 C 2.722656 -8.046875 2.898438 -7.972656 3.046875 -7.828125 C 3.203125 -7.679688 3.28125 -7.484375 3.28125 -7.234375 C 3.28125 -6.984375 3.203125 -6.785156 3.046875 -6.640625 C 2.898438 -6.503906 2.722656 -6.4375 2.515625 -6.4375 Z M 2.25 -4.734375 L 0.640625 -4.734375 L 0.640625 -5.5625 L 3.234375 -5.5625 L 3.234375 -2.140625 C 3.234375 -1.585938 3.3125 -1.21875 3.46875 -1.03125 C 3.625 -0.84375 3.851562 -0.75 4.15625 -0.75 C 4.382812 -0.75 4.597656 -0.773438 4.796875 -0.828125 C 4.992188 -0.890625 5.144531 -0.953125 5.25 -1.015625 L 5.40625 -0.1875 C 5.351562 -0.15625 5.28125 -0.117188 5.1875 -0.078125 C 5.101562 -0.046875 5.003906 -0.015625 4.890625 0.015625 C 4.773438 0.046875 4.644531 0.0703125 4.5 0.09375 C 4.363281 0.113281 4.21875 0.125 4.0625 0.125 C 3.71875 0.125 3.425781 0.078125 3.1875 -0.015625 C 2.957031 -0.109375 2.769531 -0.25 2.625 -0.4375 C 2.488281 -0.632812 2.390625 -0.875 2.328125 -1.15625 C 2.273438 -1.4375 2.25 -1.765625 2.25 -2.140625 Z M 2.25 -4.734375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-14">
+<path style="stroke:none;" d="M 4.15625 -0.515625 C 4.039062 -0.453125 3.863281 -0.382812 3.625 -0.3125 C 3.394531 -0.238281 3.128906 -0.203125 2.828125 -0.203125 C 2.503906 -0.203125 2.195312 -0.253906 1.90625 -0.359375 C 1.625 -0.472656 1.378906 -0.640625 1.171875 -0.859375 C 0.960938 -1.078125 0.796875 -1.351562 0.671875 -1.6875 C 0.546875 -2.03125 0.484375 -2.4375 0.484375 -2.90625 C 0.484375 -3.3125 0.539062 -3.679688 0.65625 -4.015625 C 0.769531 -4.359375 0.9375 -4.65625 1.15625 -4.90625 C 1.375 -5.15625 1.644531 -5.347656 1.96875 -5.484375 C 2.289062 -5.628906 2.65625 -5.703125 3.0625 -5.703125 C 3.539062 -5.703125 3.945312 -5.664062 4.28125 -5.59375 C 4.625 -5.53125 4.910156 -5.46875 5.140625 -5.40625 L 5.140625 -0.4375 C 5.140625 0.425781 4.921875 1.050781 4.484375 1.4375 C 4.054688 1.820312 3.398438 2.015625 2.515625 2.015625 C 2.160156 2.015625 1.835938 1.984375 1.546875 1.921875 C 1.253906 1.867188 0.992188 1.804688 0.765625 1.734375 L 0.953125 0.859375 C 1.160156 0.941406 1.394531 1.007812 1.65625 1.0625 C 1.925781 1.125 2.222656 1.15625 2.546875 1.15625 C 3.117188 1.15625 3.53125 1.035156 3.78125 0.796875 C 4.03125 0.566406 4.15625 0.191406 4.15625 -0.328125 Z M 4.15625 -4.6875 C 4.0625 -4.71875 3.925781 -4.75 3.75 -4.78125 C 3.582031 -4.8125 3.359375 -4.828125 3.078125 -4.828125 C 2.554688 -4.828125 2.160156 -4.648438 1.890625 -4.296875 C 1.628906 -3.941406 1.5 -3.472656 1.5 -2.890625 C 1.5 -2.566406 1.535156 -2.289062 1.609375 -2.0625 C 1.691406 -1.84375 1.796875 -1.65625 1.921875 -1.5 C 2.054688 -1.351562 2.207031 -1.242188 2.375 -1.171875 C 2.539062 -1.109375 2.722656 -1.078125 2.921875 -1.078125 C 3.160156 -1.078125 3.390625 -1.113281 3.609375 -1.1875 C 3.835938 -1.257812 4.019531 -1.34375 4.15625 -1.4375 Z M 4.15625 -4.6875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-15">
+<path style="stroke:none;" d="M 2.8125 -0.703125 C 3.3125 -0.703125 3.691406 -0.796875 3.953125 -0.984375 C 4.222656 -1.171875 4.359375 -1.457031 4.359375 -1.84375 C 4.359375 -2.082031 4.304688 -2.28125 4.203125 -2.4375 C 4.109375 -2.601562 3.984375 -2.75 3.828125 -2.875 C 3.671875 -3 3.488281 -3.109375 3.28125 -3.203125 C 3.082031 -3.296875 2.878906 -3.378906 2.671875 -3.453125 C 2.429688 -3.546875 2.203125 -3.648438 1.984375 -3.765625 C 1.765625 -3.890625 1.566406 -4.03125 1.390625 -4.1875 C 1.222656 -4.351562 1.085938 -4.546875 0.984375 -4.765625 C 0.890625 -4.984375 0.84375 -5.238281 0.84375 -5.53125 C 0.84375 -6.175781 1.046875 -6.679688 1.453125 -7.046875 C 1.859375 -7.410156 2.425781 -7.59375 3.15625 -7.59375 C 3.351562 -7.59375 3.550781 -7.578125 3.75 -7.546875 C 3.945312 -7.523438 4.128906 -7.492188 4.296875 -7.453125 C 4.460938 -7.410156 4.609375 -7.359375 4.734375 -7.296875 C 4.867188 -7.242188 4.976562 -7.191406 5.0625 -7.140625 L 4.75 -6.3125 C 4.59375 -6.40625 4.375 -6.5 4.09375 -6.59375 C 3.8125 -6.695312 3.5 -6.75 3.15625 -6.75 C 2.789062 -6.75 2.476562 -6.65625 2.21875 -6.46875 C 1.957031 -6.289062 1.828125 -6.019531 1.828125 -5.65625 C 1.828125 -5.457031 1.863281 -5.285156 1.9375 -5.140625 C 2.007812 -4.992188 2.113281 -4.863281 2.25 -4.75 C 2.382812 -4.644531 2.535156 -4.546875 2.703125 -4.453125 C 2.878906 -4.367188 3.070312 -4.285156 3.28125 -4.203125 C 3.59375 -4.078125 3.875 -3.945312 4.125 -3.8125 C 4.375 -3.6875 4.585938 -3.535156 4.765625 -3.359375 C 4.953125 -3.179688 5.09375 -2.972656 5.1875 -2.734375 C 5.289062 -2.492188 5.34375 -2.203125 5.34375 -1.859375 C 5.34375 -1.210938 5.125 -0.710938 4.6875 -0.359375 C 4.25 -0.015625 3.625 0.15625 2.8125 0.15625 C 2.539062 0.15625 2.289062 0.132812 2.0625 0.09375 C 1.832031 0.0625 1.625 0.0195312 1.4375 -0.03125 C 1.257812 -0.09375 1.101562 -0.148438 0.96875 -0.203125 C 0.832031 -0.253906 0.726562 -0.304688 0.65625 -0.359375 L 0.953125 -1.171875 C 1.117188 -1.085938 1.359375 -0.988281 1.671875 -0.875 C 1.984375 -0.757812 2.363281 -0.703125 2.8125 -0.703125 Z M 2.8125 -0.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-16">
+<path style="stroke:none;" d="M 3.109375 -5.703125 C 3.859375 -5.703125 4.4375 -5.46875 4.84375 -5 C 5.25 -4.53125 5.453125 -3.820312 5.453125 -2.875 L 5.453125 -2.515625 L 1.46875 -2.515625 C 1.507812 -1.941406 1.703125 -1.503906 2.046875 -1.203125 C 2.390625 -0.898438 2.867188 -0.75 3.484375 -0.75 C 3.835938 -0.75 4.132812 -0.773438 4.375 -0.828125 C 4.625 -0.890625 4.8125 -0.953125 4.9375 -1.015625 L 5.078125 -0.1875 C 4.953125 -0.113281 4.734375 -0.046875 4.421875 0.015625 C 4.109375 0.0859375 3.757812 0.125 3.375 0.125 C 2.894531 0.125 2.472656 0.0507812 2.109375 -0.09375 C 1.742188 -0.238281 1.441406 -0.4375 1.203125 -0.6875 C 0.960938 -0.945312 0.78125 -1.253906 0.65625 -1.609375 C 0.539062 -1.960938 0.484375 -2.347656 0.484375 -2.765625 C 0.484375 -3.265625 0.554688 -3.695312 0.703125 -4.0625 C 0.859375 -4.4375 1.0625 -4.742188 1.3125 -4.984375 C 1.5625 -5.222656 1.835938 -5.398438 2.140625 -5.515625 C 2.453125 -5.640625 2.773438 -5.703125 3.109375 -5.703125 Z M 4.453125 -3.328125 C 4.453125 -3.796875 4.328125 -4.164062 4.078125 -4.4375 C 3.828125 -4.71875 3.5 -4.859375 3.09375 -4.859375 C 2.863281 -4.859375 2.65625 -4.8125 2.46875 -4.71875 C 2.28125 -4.632812 2.117188 -4.519531 1.984375 -4.375 C 1.847656 -4.226562 1.738281 -4.0625 1.65625 -3.875 C 1.570312 -3.695312 1.519531 -3.515625 1.5 -3.328125 Z M 4.453125 -3.328125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-17">
+<path style="stroke:none;" d="M 4.15625 -4.390625 C 4.039062 -4.492188 3.875 -4.59375 3.65625 -4.6875 C 3.445312 -4.78125 3.222656 -4.828125 2.984375 -4.828125 C 2.722656 -4.828125 2.5 -4.773438 2.3125 -4.671875 C 2.125 -4.566406 1.96875 -4.421875 1.84375 -4.234375 C 1.726562 -4.054688 1.640625 -3.84375 1.578125 -3.59375 C 1.523438 -3.34375 1.5 -3.070312 1.5 -2.78125 C 1.5 -2.132812 1.648438 -1.632812 1.953125 -1.28125 C 2.253906 -0.925781 2.648438 -0.75 3.140625 -0.75 C 3.390625 -0.75 3.597656 -0.757812 3.765625 -0.78125 C 3.941406 -0.8125 4.070312 -0.835938 4.15625 -0.859375 Z M 4.15625 -8.140625 L 5.140625 -8.3125 L 5.140625 -0.15625 C 4.929688 -0.09375 4.65625 -0.03125 4.3125 0.03125 C 3.976562 0.09375 3.585938 0.125 3.140625 0.125 C 2.742188 0.125 2.378906 0.0546875 2.046875 -0.078125 C 1.722656 -0.210938 1.441406 -0.40625 1.203125 -0.65625 C 0.972656 -0.90625 0.796875 -1.207031 0.671875 -1.5625 C 0.546875 -1.925781 0.484375 -2.332031 0.484375 -2.78125 C 0.484375 -3.21875 0.535156 -3.613281 0.640625 -3.96875 C 0.742188 -4.320312 0.898438 -4.625 1.109375 -4.875 C 1.316406 -5.132812 1.566406 -5.335938 1.859375 -5.484375 C 2.148438 -5.628906 2.488281 -5.703125 2.875 -5.703125 C 3.164062 -5.703125 3.421875 -5.664062 3.640625 -5.59375 C 3.867188 -5.519531 4.039062 -5.441406 4.15625 -5.359375 Z M 4.15625 -8.140625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-18">
+<path style="stroke:none;" d="M 1.28125 0 L 1.28125 -5.265625 C 2.101562 -5.546875 2.925781 -5.6875 3.75 -5.6875 C 4.007812 -5.6875 4.253906 -5.675781 4.484375 -5.65625 C 4.722656 -5.632812 4.976562 -5.59375 5.25 -5.53125 L 5.078125 -4.65625 C 4.816406 -4.726562 4.585938 -4.773438 4.390625 -4.796875 C 4.203125 -4.816406 3.988281 -4.828125 3.75 -4.828125 C 3.269531 -4.828125 2.773438 -4.757812 2.265625 -4.625 L 2.265625 0 Z M 1.28125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-19">
+<path style="stroke:none;" d="M 0.609375 1.046875 C 0.679688 1.085938 0.78125 1.117188 0.90625 1.140625 C 1.039062 1.160156 1.164062 1.171875 1.28125 1.171875 C 1.675781 1.171875 1.984375 1.082031 2.203125 0.90625 C 2.421875 0.738281 2.625 0.460938 2.8125 0.078125 C 2.363281 -0.773438 1.941406 -1.6875 1.546875 -2.65625 C 1.148438 -3.625 0.828125 -4.59375 0.578125 -5.5625 L 1.65625 -5.5625 C 1.738281 -5.25 1.832031 -4.90625 1.9375 -4.53125 C 2.039062 -4.15625 2.160156 -3.769531 2.296875 -3.375 C 2.441406 -2.988281 2.585938 -2.597656 2.734375 -2.203125 C 2.890625 -1.804688 3.054688 -1.425781 3.234375 -1.0625 C 3.367188 -1.4375 3.488281 -1.800781 3.59375 -2.15625 C 3.707031 -2.519531 3.8125 -2.882812 3.90625 -3.25 C 4.007812 -3.613281 4.109375 -3.984375 4.203125 -4.359375 C 4.296875 -4.742188 4.394531 -5.144531 4.5 -5.5625 L 5.53125 -5.5625 C 5.269531 -4.53125 4.984375 -3.523438 4.671875 -2.546875 C 4.359375 -1.566406 4.019531 -0.660156 3.65625 0.171875 C 3.519531 0.484375 3.375 0.753906 3.21875 0.984375 C 3.0625 1.222656 2.890625 1.414062 2.703125 1.5625 C 2.523438 1.71875 2.316406 1.832031 2.078125 1.90625 C 1.847656 1.976562 1.585938 2.015625 1.296875 2.015625 C 1.140625 2.015625 0.972656 1.992188 0.796875 1.953125 C 0.617188 1.910156 0.5 1.875 0.4375 1.84375 Z M 0.609375 1.046875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-20">
+<path style="stroke:none;" d="M 0.34375 -3.71875 C 0.34375 -4.382812 0.40625 -4.960938 0.53125 -5.453125 C 0.664062 -5.941406 0.847656 -6.34375 1.078125 -6.65625 C 1.304688 -6.96875 1.582031 -7.203125 1.90625 -7.359375 C 2.238281 -7.515625 2.601562 -7.59375 3 -7.59375 C 3.394531 -7.59375 3.753906 -7.515625 4.078125 -7.359375 C 4.410156 -7.203125 4.691406 -6.96875 4.921875 -6.65625 C 5.148438 -6.34375 5.328125 -5.941406 5.453125 -5.453125 C 5.585938 -4.960938 5.65625 -4.382812 5.65625 -3.71875 C 5.65625 -3.050781 5.585938 -2.472656 5.453125 -1.984375 C 5.328125 -1.503906 5.148438 -1.101562 4.921875 -0.78125 C 4.691406 -0.457031 4.410156 -0.21875 4.078125 -0.0625 C 3.753906 0.0820312 3.394531 0.15625 3 0.15625 C 2.601562 0.15625 2.238281 0.0820312 1.90625 -0.0625 C 1.582031 -0.21875 1.304688 -0.457031 1.078125 -0.78125 C 0.847656 -1.101562 0.664062 -1.503906 0.53125 -1.984375 C 0.40625 -2.472656 0.34375 -3.050781 0.34375 -3.71875 Z M 1.359375 -3.71875 C 1.359375 -2.738281 1.488281 -1.988281 1.75 -1.46875 C 2.007812 -0.957031 2.414062 -0.703125 2.96875 -0.703125 C 3.53125 -0.703125 3.953125 -0.957031 4.234375 -1.46875 C 4.515625 -1.988281 4.65625 -2.738281 4.65625 -3.71875 C 4.65625 -4.695312 4.515625 -5.445312 4.234375 -5.96875 C 3.953125 -6.488281 3.53125 -6.75 2.96875 -6.75 C 2.414062 -6.75 2.007812 -6.488281 1.75 -5.96875 C 1.488281 -5.445312 1.359375 -4.695312 1.359375 -3.71875 Z M 1.359375 -3.71875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-21">
+<path style="stroke:none;" d="M 5.140625 -0.15625 C 4.929688 -0.101562 4.644531 -0.046875 4.28125 0.015625 C 3.925781 0.0859375 3.507812 0.125 3.03125 0.125 C 2.613281 0.125 2.265625 0.0625 1.984375 -0.0625 C 1.703125 -0.1875 1.472656 -0.363281 1.296875 -0.59375 C 1.117188 -0.820312 0.992188 -1.09375 0.921875 -1.40625 C 0.847656 -1.71875 0.8125 -2.0625 0.8125 -2.4375 L 0.8125 -5.5625 L 1.796875 -5.5625 L 1.796875 -2.65625 C 1.796875 -1.96875 1.894531 -1.476562 2.09375 -1.1875 C 2.300781 -0.894531 2.644531 -0.75 3.125 -0.75 C 3.226562 -0.75 3.332031 -0.753906 3.4375 -0.765625 C 3.550781 -0.773438 3.65625 -0.785156 3.75 -0.796875 C 3.851562 -0.804688 3.9375 -0.816406 4 -0.828125 C 4.070312 -0.847656 4.125 -0.859375 4.15625 -0.859375 L 4.15625 -5.5625 L 5.140625 -5.5625 Z M 5.140625 -0.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-22">
+<path style="stroke:none;" d="M 2.921875 -5.5625 L 5.265625 -5.5625 L 5.265625 -4.734375 L 2.921875 -4.734375 L 2.921875 -2.140625 C 2.921875 -1.867188 2.9375 -1.644531 2.96875 -1.46875 C 3.007812 -1.289062 3.078125 -1.144531 3.171875 -1.03125 C 3.265625 -0.925781 3.382812 -0.851562 3.53125 -0.8125 C 3.675781 -0.769531 3.851562 -0.75 4.0625 -0.75 C 4.34375 -0.75 4.570312 -0.773438 4.75 -0.828125 C 4.925781 -0.878906 5.09375 -0.941406 5.25 -1.015625 L 5.40625 -0.1875 C 5.289062 -0.132812 5.109375 -0.0703125 4.859375 0 C 4.617188 0.0820312 4.316406 0.125 3.953125 0.125 C 3.546875 0.125 3.207031 0.078125 2.9375 -0.015625 C 2.675781 -0.109375 2.46875 -0.25 2.3125 -0.4375 C 2.164062 -0.632812 2.066406 -0.875 2.015625 -1.15625 C 1.960938 -1.4375 1.9375 -1.765625 1.9375 -2.140625 L 1.9375 -4.734375 L 0.75 -4.734375 L 0.75 -5.5625 L 1.9375 -5.5625 L 1.9375 -7.125 L 2.921875 -7.296875 Z M 2.921875 -5.5625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-23">
+<path style="stroke:none;" d="M 4.5 -2.765625 C 4.5 -3.421875 4.347656 -3.925781 4.046875 -4.28125 C 3.742188 -4.632812 3.347656 -4.8125 2.859375 -4.8125 C 2.585938 -4.8125 2.375 -4.796875 2.21875 -4.765625 C 2.0625 -4.742188 1.9375 -4.71875 1.84375 -4.6875 L 1.84375 -1.171875 C 1.957031 -1.066406 2.117188 -0.96875 2.328125 -0.875 C 2.546875 -0.789062 2.773438 -0.75 3.015625 -0.75 C 3.273438 -0.75 3.5 -0.800781 3.6875 -0.90625 C 3.875 -1.007812 4.023438 -1.148438 4.140625 -1.328125 C 4.265625 -1.515625 4.351562 -1.726562 4.40625 -1.96875 C 4.46875 -2.21875 4.5 -2.484375 4.5 -2.765625 Z M 5.515625 -2.765625 C 5.515625 -2.347656 5.460938 -1.957031 5.359375 -1.59375 C 5.253906 -1.238281 5.097656 -0.929688 4.890625 -0.671875 C 4.679688 -0.421875 4.429688 -0.222656 4.140625 -0.078125 C 3.847656 0.0546875 3.507812 0.125 3.125 0.125 C 2.832031 0.125 2.570312 0.0859375 2.34375 0.015625 C 2.125 -0.046875 1.957031 -0.125 1.84375 -0.21875 L 1.84375 1.984375 L 0.859375 1.984375 L 0.859375 -5.40625 C 1.066406 -5.46875 1.34375 -5.53125 1.6875 -5.59375 C 2.03125 -5.65625 2.421875 -5.6875 2.859375 -5.6875 C 3.253906 -5.6875 3.613281 -5.617188 3.9375 -5.484375 C 4.269531 -5.347656 4.550781 -5.15625 4.78125 -4.90625 C 5.019531 -4.65625 5.203125 -4.347656 5.328125 -3.984375 C 5.453125 -3.617188 5.515625 -3.210938 5.515625 -2.765625 Z M 5.515625 -2.765625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-24">
+<path style="stroke:none;" d="M 3.015625 -3.734375 L 4.15625 -7.421875 L 5.09375 -7.421875 C 5.25 -6.253906 5.359375 -5.054688 5.421875 -3.828125 C 5.492188 -2.609375 5.554688 -1.332031 5.609375 0 L 4.65625 0 C 4.644531 -0.425781 4.632812 -0.890625 4.625 -1.390625 C 4.625 -1.898438 4.613281 -2.421875 4.59375 -2.953125 C 4.582031 -3.492188 4.570312 -4.039062 4.5625 -4.59375 C 4.550781 -5.144531 4.539062 -5.679688 4.53125 -6.203125 L 3.4375 -2.8125 L 2.578125 -2.8125 L 1.46875 -6.203125 C 1.46875 -5.679688 1.457031 -5.144531 1.4375 -4.59375 C 1.425781 -4.050781 1.414062 -3.507812 1.40625 -2.96875 C 1.394531 -2.425781 1.382812 -1.898438 1.375 -1.390625 C 1.363281 -0.890625 1.351562 -0.425781 1.34375 0 L 0.390625 0 C 0.410156 -0.601562 0.4375 -1.222656 0.46875 -1.859375 C 0.5 -2.503906 0.535156 -3.144531 0.578125 -3.78125 C 0.617188 -4.414062 0.671875 -5.039062 0.734375 -5.65625 C 0.796875 -6.269531 0.863281 -6.859375 0.9375 -7.421875 L 1.84375 -7.421875 Z M 3.015625 -3.734375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-0">
+<path style="stroke:none;" d="M 0.640625 2.296875 L 0.640625 -9.171875 L 7.140625 -9.171875 L 7.140625 2.296875 Z M 1.375 1.578125 L 6.421875 1.578125 L 6.421875 -8.4375 L 1.375 -8.4375 Z M 1.375 1.578125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-1">
+<path style="stroke:none;" d="M 6.34375 -6.84375 L 6.34375 -5.75 C 6.007812 -5.925781 5.675781 -6.0625 5.34375 -6.15625 C 5.007812 -6.25 4.675781 -6.296875 4.34375 -6.296875 C 3.582031 -6.296875 2.992188 -6.050781 2.578125 -5.5625 C 2.160156 -5.082031 1.953125 -4.410156 1.953125 -3.546875 C 1.953125 -2.679688 2.160156 -2.007812 2.578125 -1.53125 C 2.992188 -1.050781 3.582031 -0.8125 4.34375 -0.8125 C 4.675781 -0.8125 5.007812 -0.851562 5.34375 -0.9375 C 5.675781 -1.03125 6.007812 -1.171875 6.34375 -1.359375 L 6.34375 -0.265625 C 6.019531 -0.117188 5.679688 -0.0078125 5.328125 0.0625 C 4.984375 0.144531 4.613281 0.1875 4.21875 0.1875 C 3.144531 0.1875 2.289062 -0.144531 1.65625 -0.8125 C 1.03125 -1.488281 0.71875 -2.398438 0.71875 -3.546875 C 0.71875 -4.703125 1.035156 -5.613281 1.671875 -6.28125 C 2.304688 -6.945312 3.179688 -7.28125 4.296875 -7.28125 C 4.648438 -7.28125 5 -7.242188 5.34375 -7.171875 C 5.6875 -7.097656 6.019531 -6.988281 6.34375 -6.84375 Z M 6.34375 -6.84375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-2">
+<path style="stroke:none;" d="M 5.34375 -6.015625 C 5.207031 -6.085938 5.0625 -6.140625 4.90625 -6.171875 C 4.757812 -6.210938 4.59375 -6.234375 4.40625 -6.234375 C 3.75 -6.234375 3.242188 -6.019531 2.890625 -5.59375 C 2.535156 -5.164062 2.359375 -4.550781 2.359375 -3.75 L 2.359375 0 L 1.1875 0 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6 C 2.597656 -6.4375 2.914062 -6.757812 3.3125 -6.96875 C 3.707031 -7.175781 4.1875 -7.28125 4.75 -7.28125 C 4.832031 -7.28125 4.921875 -7.273438 5.015625 -7.265625 C 5.109375 -7.253906 5.21875 -7.238281 5.34375 -7.21875 Z M 5.34375 -6.015625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-3">
+<path style="stroke:none;" d="M 3.984375 -6.296875 C 3.359375 -6.296875 2.863281 -6.050781 2.5 -5.5625 C 2.132812 -5.070312 1.953125 -4.398438 1.953125 -3.546875 C 1.953125 -2.691406 2.128906 -2.019531 2.484375 -1.53125 C 2.847656 -1.050781 3.347656 -0.8125 3.984375 -0.8125 C 4.597656 -0.8125 5.085938 -1.054688 5.453125 -1.546875 C 5.816406 -2.035156 6 -2.703125 6 -3.546875 C 6 -4.390625 5.816406 -5.054688 5.453125 -5.546875 C 5.085938 -6.046875 4.597656 -6.296875 3.984375 -6.296875 Z M 3.984375 -7.28125 C 4.992188 -7.28125 5.789062 -6.945312 6.375 -6.28125 C 6.957031 -5.625 7.25 -4.710938 7.25 -3.546875 C 7.25 -2.378906 6.957031 -1.460938 6.375 -0.796875 C 5.789062 -0.140625 4.992188 0.1875 3.984375 0.1875 C 2.960938 0.1875 2.160156 -0.140625 1.578125 -0.796875 C 1.003906 -1.460938 0.71875 -2.378906 0.71875 -3.546875 C 0.71875 -4.710938 1.003906 -5.625 1.578125 -6.28125 C 2.160156 -6.945312 2.960938 -7.28125 3.984375 -7.28125 Z M 3.984375 -7.28125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-4">
+<path style="stroke:none;" d="M 2.359375 -1.0625 L 2.359375 2.703125 L 1.1875 2.703125 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6.03125 C 2.597656 -6.457031 2.90625 -6.769531 3.28125 -6.96875 C 3.65625 -7.175781 4.101562 -7.28125 4.625 -7.28125 C 5.488281 -7.28125 6.191406 -6.9375 6.734375 -6.25 C 7.273438 -5.5625 7.546875 -4.660156 7.546875 -3.546875 C 7.546875 -2.429688 7.273438 -1.53125 6.734375 -0.84375 C 6.191406 -0.15625 5.488281 0.1875 4.625 0.1875 C 4.101562 0.1875 3.65625 0.0820312 3.28125 -0.125 C 2.90625 -0.332031 2.597656 -0.644531 2.359375 -1.0625 Z M 6.328125 -3.546875 C 6.328125 -4.410156 6.148438 -5.082031 5.796875 -5.5625 C 5.441406 -6.050781 4.957031 -6.296875 4.34375 -6.296875 C 3.726562 -6.296875 3.242188 -6.050781 2.890625 -5.5625 C 2.535156 -5.082031 2.359375 -4.410156 2.359375 -3.546875 C 2.359375 -2.691406 2.535156 -2.019531 2.890625 -1.53125 C 3.242188 -1.039062 3.726562 -0.796875 4.34375 -0.796875 C 4.957031 -0.796875 5.441406 -1.039062 5.796875 -1.53125 C 6.148438 -2.019531 6.328125 -2.691406 6.328125 -3.546875 Z M 6.328125 -3.546875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-5">
+<path style="stroke:none;" d="M 5.75 -6.90625 L 5.75 -5.796875 C 5.425781 -5.960938 5.085938 -6.085938 4.734375 -6.171875 C 4.378906 -6.253906 4.007812 -6.296875 3.625 -6.296875 C 3.039062 -6.296875 2.601562 -6.207031 2.3125 -6.03125 C 2.03125 -5.851562 1.890625 -5.585938 1.890625 -5.234375 C 1.890625 -4.960938 1.988281 -4.75 2.1875 -4.59375 C 2.394531 -4.445312 2.816406 -4.300781 3.453125 -4.15625 L 3.84375 -4.0625 C 4.675781 -3.882812 5.265625 -3.632812 5.609375 -3.3125 C 5.960938 -2.988281 6.140625 -2.539062 6.140625 -1.96875 C 6.140625 -1.300781 5.878906 -0.773438 5.359375 -0.390625 C 4.835938 -0.00390625 4.117188 0.1875 3.203125 0.1875 C 2.816406 0.1875 2.414062 0.148438 2 0.078125 C 1.59375 0.00390625 1.160156 -0.109375 0.703125 -0.265625 L 0.703125 -1.46875 C 1.140625 -1.238281 1.566406 -1.066406 1.984375 -0.953125 C 2.398438 -0.847656 2.8125 -0.796875 3.21875 -0.796875 C 3.769531 -0.796875 4.191406 -0.890625 4.484375 -1.078125 C 4.785156 -1.265625 4.9375 -1.53125 4.9375 -1.875 C 4.9375 -2.1875 4.828125 -2.425781 4.609375 -2.59375 C 4.398438 -2.769531 3.9375 -2.9375 3.21875 -3.09375 L 2.8125 -3.1875 C 2.082031 -3.34375 1.554688 -3.578125 1.234375 -3.890625 C 0.910156 -4.203125 0.75 -4.632812 0.75 -5.1875 C 0.75 -5.851562 0.984375 -6.367188 1.453125 -6.734375 C 1.929688 -7.097656 2.609375 -7.28125 3.484375 -7.28125 C 3.910156 -7.28125 4.316406 -7.25 4.703125 -7.1875 C 5.085938 -7.125 5.4375 -7.03125 5.75 -6.90625 Z M 5.75 -6.90625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-6">
+<path style="stroke:none;" d="M 4.453125 -3.578125 C 3.515625 -3.578125 2.863281 -3.46875 2.5 -3.25 C 2.132812 -3.03125 1.953125 -2.660156 1.953125 -2.140625 C 1.953125 -1.734375 2.085938 -1.40625 2.359375 -1.15625 C 2.628906 -0.914062 3 -0.796875 3.46875 -0.796875 C 4.113281 -0.796875 4.632812 -1.023438 5.03125 -1.484375 C 5.425781 -1.941406 5.625 -2.550781 5.625 -3.3125 L 5.625 -3.578125 Z M 6.78125 -4.0625 L 6.78125 0 L 5.625 0 L 5.625 -1.078125 C 5.351562 -0.648438 5.019531 -0.332031 4.625 -0.125 C 4.226562 0.0820312 3.738281 0.1875 3.15625 0.1875 C 2.425781 0.1875 1.847656 -0.015625 1.421875 -0.421875 C 0.992188 -0.835938 0.78125 -1.382812 0.78125 -2.0625 C 0.78125 -2.863281 1.046875 -3.46875 1.578125 -3.875 C 2.117188 -4.28125 2.921875 -4.484375 3.984375 -4.484375 L 5.625 -4.484375 L 5.625 -4.609375 C 5.625 -5.140625 5.445312 -5.550781 5.09375 -5.84375 C 4.738281 -6.144531 4.238281 -6.296875 3.59375 -6.296875 C 3.1875 -6.296875 2.789062 -6.242188 2.40625 -6.140625 C 2.019531 -6.046875 1.648438 -5.898438 1.296875 -5.703125 L 1.296875 -6.78125 C 1.722656 -6.945312 2.140625 -7.070312 2.546875 -7.15625 C 2.953125 -7.238281 3.34375 -7.28125 3.71875 -7.28125 C 4.75 -7.28125 5.515625 -7.015625 6.015625 -6.484375 C 6.523438 -5.953125 6.78125 -5.144531 6.78125 -4.0625 Z M 6.78125 -4.0625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-7">
+<path style="stroke:none;" d="M 1.21875 -9.875 L 2.390625 -9.875 L 2.390625 0 L 1.21875 0 Z M 1.21875 -9.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-8">
+<path style="stroke:none;" d="M 7.3125 -3.84375 L 7.3125 -3.28125 L 1.9375 -3.28125 C 1.988281 -2.46875 2.226562 -1.851562 2.65625 -1.4375 C 3.09375 -1.019531 3.695312 -0.8125 4.46875 -0.8125 C 4.914062 -0.8125 5.347656 -0.863281 5.765625 -0.96875 C 6.191406 -1.082031 6.613281 -1.25 7.03125 -1.46875 L 7.03125 -0.359375 C 6.613281 -0.179688 6.179688 -0.046875 5.734375 0.046875 C 5.296875 0.140625 4.851562 0.1875 4.40625 0.1875 C 3.269531 0.1875 2.367188 -0.140625 1.703125 -0.796875 C 1.046875 -1.460938 0.71875 -2.359375 0.71875 -3.484375 C 0.71875 -4.648438 1.03125 -5.570312 1.65625 -6.25 C 2.289062 -6.9375 3.140625 -7.28125 4.203125 -7.28125 C 5.160156 -7.28125 5.914062 -6.972656 6.46875 -6.359375 C 7.03125 -5.742188 7.3125 -4.90625 7.3125 -3.84375 Z M 6.140625 -4.1875 C 6.128906 -4.820312 5.945312 -5.332031 5.59375 -5.71875 C 5.25 -6.101562 4.789062 -6.296875 4.21875 -6.296875 C 3.5625 -6.296875 3.035156 -6.109375 2.640625 -5.734375 C 2.253906 -5.367188 2.03125 -4.851562 1.96875 -4.1875 Z M 6.140625 -4.1875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-9">
+<path style="stroke:none;" d="M 6.328125 -3.546875 C 6.328125 -4.410156 6.148438 -5.082031 5.796875 -5.5625 C 5.441406 -6.050781 4.957031 -6.296875 4.34375 -6.296875 C 3.726562 -6.296875 3.242188 -6.050781 2.890625 -5.5625 C 2.535156 -5.082031 2.359375 -4.410156 2.359375 -3.546875 C 2.359375 -2.691406 2.535156 -2.019531 2.890625 -1.53125 C 3.242188 -1.039062 3.726562 -0.796875 4.34375 -0.796875 C 4.957031 -0.796875 5.441406 -1.039062 5.796875 -1.53125 C 6.148438 -2.019531 6.328125 -2.691406 6.328125 -3.546875 Z M 2.359375 -6.03125 C 2.597656 -6.457031 2.90625 -6.769531 3.28125 -6.96875 C 3.65625 -7.175781 4.101562 -7.28125 4.625 -7.28125 C 5.488281 -7.28125 6.191406 -6.9375 6.734375 -6.25 C 7.273438 -5.5625 7.546875 -4.660156 7.546875 -3.546875 C 7.546875 -2.429688 7.273438 -1.53125 6.734375 -0.84375 C 6.191406 -0.15625 5.488281 0.1875 4.625 0.1875 C 4.101562 0.1875 3.65625 0.0820312 3.28125 -0.125 C 2.90625 -0.332031 2.597656 -0.644531 2.359375 -1.0625 L 2.359375 0 L 1.1875 0 L 1.1875 -9.875 L 2.359375 -9.875 Z M 2.359375 -6.03125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-10">
+<path style="stroke:none;" d="M 1.21875 -7.109375 L 2.390625 -7.109375 L 2.390625 0 L 1.21875 0 Z M 1.21875 -9.875 L 2.390625 -9.875 L 2.390625 -8.390625 L 1.21875 -8.390625 Z M 1.21875 -9.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-11">
+<path style="stroke:none;" d="M 7.140625 -4.296875 L 7.140625 0 L 5.96875 0 L 5.96875 -4.25 C 5.96875 -4.925781 5.835938 -5.429688 5.578125 -5.765625 C 5.316406 -6.097656 4.921875 -6.265625 4.390625 -6.265625 C 3.765625 -6.265625 3.269531 -6.0625 2.90625 -5.65625 C 2.539062 -5.257812 2.359375 -4.710938 2.359375 -4.015625 L 2.359375 0 L 1.1875 0 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6 C 2.640625 -6.425781 2.96875 -6.742188 3.34375 -6.953125 C 3.71875 -7.171875 4.15625 -7.28125 4.65625 -7.28125 C 5.46875 -7.28125 6.082031 -7.023438 6.5 -6.515625 C 6.925781 -6.015625 7.140625 -5.273438 7.140625 -4.296875 Z M 7.140625 -4.296875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-12">
+<path style="stroke:none;" d="M 5.90625 -3.640625 C 5.90625 -4.484375 5.726562 -5.132812 5.375 -5.59375 C 5.03125 -6.0625 4.539062 -6.296875 3.90625 -6.296875 C 3.28125 -6.296875 2.789062 -6.0625 2.4375 -5.59375 C 2.09375 -5.132812 1.921875 -4.484375 1.921875 -3.640625 C 1.921875 -2.796875 2.09375 -2.140625 2.4375 -1.671875 C 2.789062 -1.210938 3.28125 -0.984375 3.90625 -0.984375 C 4.539062 -0.984375 5.03125 -1.210938 5.375 -1.671875 C 5.726562 -2.140625 5.90625 -2.796875 5.90625 -3.640625 Z M 7.078125 -0.875 C 7.078125 0.332031 6.804688 1.226562 6.265625 1.8125 C 5.722656 2.40625 4.898438 2.703125 3.796875 2.703125 C 3.390625 2.703125 3.003906 2.671875 2.640625 2.609375 C 2.273438 2.546875 1.921875 2.453125 1.578125 2.328125 L 1.578125 1.1875 C 1.921875 1.375 2.257812 1.507812 2.59375 1.59375 C 2.925781 1.6875 3.265625 1.734375 3.609375 1.734375 C 4.378906 1.734375 4.953125 1.535156 5.328125 1.140625 C 5.710938 0.742188 5.90625 0.140625 5.90625 -0.671875 L 5.90625 -1.25 C 5.664062 -0.832031 5.351562 -0.519531 4.96875 -0.3125 C 4.59375 -0.101562 4.144531 0 3.625 0 C 2.75 0 2.046875 -0.332031 1.515625 -1 C 0.984375 -1.664062 0.71875 -2.546875 0.71875 -3.640625 C 0.71875 -4.734375 0.984375 -5.613281 1.515625 -6.28125 C 2.046875 -6.945312 2.75 -7.28125 3.625 -7.28125 C 4.144531 -7.28125 4.59375 -7.175781 4.96875 -6.96875 C 5.351562 -6.757812 5.664062 -6.445312 5.90625 -6.03125 L 5.90625 -7.109375 L 7.078125 -7.109375 Z M 7.078125 -0.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-13">
+<path style="stroke:none;" d="M 5.125 -8.609375 C 4.1875 -8.609375 3.441406 -8.257812 2.890625 -7.5625 C 2.347656 -6.875 2.078125 -5.929688 2.078125 -4.734375 C 2.078125 -3.535156 2.347656 -2.585938 2.890625 -1.890625 C 3.441406 -1.203125 4.1875 -0.859375 5.125 -0.859375 C 6.050781 -0.859375 6.785156 -1.203125 7.328125 -1.890625 C 7.878906 -2.585938 8.15625 -3.535156 8.15625 -4.734375 C 8.15625 -5.929688 7.878906 -6.875 7.328125 -7.5625 C 6.785156 -8.257812 6.050781 -8.609375 5.125 -8.609375 Z M 5.125 -9.65625 C 6.445312 -9.65625 7.503906 -9.207031 8.296875 -8.3125 C 9.097656 -7.414062 9.5 -6.222656 9.5 -4.734375 C 9.5 -3.234375 9.097656 -2.035156 8.296875 -1.140625 C 7.503906 -0.253906 6.445312 0.1875 5.125 0.1875 C 3.789062 0.1875 2.722656 -0.253906 1.921875 -1.140625 C 1.128906 -2.035156 0.734375 -3.234375 0.734375 -4.734375 C 0.734375 -6.222656 1.128906 -7.414062 1.921875 -8.3125 C 2.722656 -9.207031 3.789062 -9.65625 5.125 -9.65625 Z M 5.125 -9.65625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-14">
+<path style="stroke:none;" d="M 1.109375 -2.8125 L 1.109375 -7.109375 L 2.265625 -7.109375 L 2.265625 -2.84375 C 2.265625 -2.175781 2.394531 -1.671875 2.65625 -1.328125 C 2.925781 -0.992188 3.320312 -0.828125 3.84375 -0.828125 C 4.476562 -0.828125 4.976562 -1.023438 5.34375 -1.421875 C 5.707031 -1.828125 5.890625 -2.378906 5.890625 -3.078125 L 5.890625 -7.109375 L 7.0625 -7.109375 L 7.0625 0 L 5.890625 0 L 5.890625 -1.09375 C 5.609375 -0.65625 5.28125 -0.332031 4.90625 -0.125 C 4.53125 0.0820312 4.09375 0.1875 3.59375 0.1875 C 2.78125 0.1875 2.160156 -0.0664062 1.734375 -0.578125 C 1.316406 -1.085938 1.109375 -1.832031 1.109375 -2.8125 Z M 4.046875 -7.28125 Z M 4.046875 -7.28125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-15">
+<path style="stroke:none;" d="M 2.375 -9.125 L 2.375 -7.109375 L 4.78125 -7.109375 L 4.78125 -6.203125 L 2.375 -6.203125 L 2.375 -2.34375 C 2.375 -1.757812 2.453125 -1.382812 2.609375 -1.21875 C 2.773438 -1.0625 3.101562 -0.984375 3.59375 -0.984375 L 4.78125 -0.984375 L 4.78125 0 L 3.59375 0 C 2.6875 0 2.0625 -0.164062 1.71875 -0.5 C 1.375 -0.84375 1.203125 -1.457031 1.203125 -2.34375 L 1.203125 -6.203125 L 0.34375 -6.203125 L 0.34375 -7.109375 L 1.203125 -7.109375 L 1.203125 -9.125 Z M 2.375 -9.125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-16">
+<path style="stroke:none;" d=""/>
+</symbol>
+<symbol overflow="visible" id="glyph2-17">
+<path style="stroke:none;" d="M 1.28125 -9.484375 L 6.71875 -9.484375 L 6.71875 -8.390625 L 2.5625 -8.390625 L 2.5625 -5.609375 L 6.3125 -5.609375 L 6.3125 -4.53125 L 2.5625 -4.53125 L 2.5625 0 L 1.28125 0 Z M 1.28125 -9.484375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-18">
+<path style="stroke:none;" d="M 6.765625 -5.75 C 7.054688 -6.269531 7.40625 -6.65625 7.8125 -6.90625 C 8.21875 -7.15625 8.695312 -7.28125 9.25 -7.28125 C 9.988281 -7.28125 10.554688 -7.019531 10.953125 -6.5 C 11.359375 -5.976562 11.5625 -5.242188 11.5625 -4.296875 L 11.5625 0 L 10.390625 0 L 10.390625 -4.25 C 10.390625 -4.9375 10.265625 -5.441406 10.015625 -5.765625 C 9.773438 -6.097656 9.410156 -6.265625 8.921875 -6.265625 C 8.316406 -6.265625 7.835938 -6.0625 7.484375 -5.65625 C 7.128906 -5.257812 6.953125 -4.710938 6.953125 -4.015625 L 6.953125 0 L 5.78125 0 L 5.78125 -4.25 C 5.78125 -4.9375 5.660156 -5.441406 5.421875 -5.765625 C 5.179688 -6.097656 4.804688 -6.265625 4.296875 -6.265625 C 3.703125 -6.265625 3.226562 -6.0625 2.875 -5.65625 C 2.53125 -5.25 2.359375 -4.703125 2.359375 -4.015625 L 2.359375 0 L 1.1875 0 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6 C 2.617188 -6.4375 2.9375 -6.757812 3.3125 -6.96875 C 3.6875 -7.175781 4.128906 -7.28125 4.640625 -7.28125 C 5.160156 -7.28125 5.597656 -7.148438 5.953125 -6.890625 C 6.316406 -6.628906 6.585938 -6.25 6.765625 -5.75 Z M 6.765625 -5.75 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-19">
+<path style="stroke:none;" d="M 6.953125 -9.171875 L 6.953125 -7.921875 C 6.472656 -8.148438 6.015625 -8.320312 5.578125 -8.4375 C 5.148438 -8.550781 4.734375 -8.609375 4.328125 -8.609375 C 3.628906 -8.609375 3.085938 -8.472656 2.703125 -8.203125 C 2.328125 -7.929688 2.140625 -7.546875 2.140625 -7.046875 C 2.140625 -6.628906 2.265625 -6.3125 2.515625 -6.09375 C 2.773438 -5.882812 3.253906 -5.710938 3.953125 -5.578125 L 4.734375 -5.421875 C 5.679688 -5.234375 6.382812 -4.910156 6.84375 -4.453125 C 7.300781 -3.992188 7.53125 -3.378906 7.53125 -2.609375 C 7.53125 -1.691406 7.222656 -0.992188 6.609375 -0.515625 C 5.992188 -0.046875 5.085938 0.1875 3.890625 0.1875 C 3.441406 0.1875 2.960938 0.132812 2.453125 0.03125 C 1.953125 -0.0703125 1.429688 -0.222656 0.890625 -0.421875 L 0.890625 -1.734375 C 1.410156 -1.441406 1.921875 -1.222656 2.421875 -1.078125 C 2.921875 -0.929688 3.410156 -0.859375 3.890625 -0.859375 C 4.628906 -0.859375 5.195312 -1 5.59375 -1.28125 C 5.988281 -1.570312 6.1875 -1.984375 6.1875 -2.515625 C 6.1875 -2.984375 6.039062 -3.347656 5.75 -3.609375 C 5.46875 -3.867188 5.003906 -4.066406 4.359375 -4.203125 L 3.578125 -4.359375 C 2.617188 -4.546875 1.925781 -4.84375 1.5 -5.25 C 1.070312 -5.65625 0.859375 -6.21875 0.859375 -6.9375 C 0.859375 -7.78125 1.148438 -8.441406 1.734375 -8.921875 C 2.328125 -9.410156 3.144531 -9.65625 4.1875 -9.65625 C 4.625 -9.65625 5.070312 -9.613281 5.53125 -9.53125 C 6 -9.445312 6.472656 -9.328125 6.953125 -9.171875 Z M 6.953125 -9.171875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-20">
+<path style="stroke:none;" d="M 4.1875 0.65625 C 3.851562 1.507812 3.53125 2.0625 3.21875 2.3125 C 2.90625 2.570312 2.488281 2.703125 1.96875 2.703125 L 1.03125 2.703125 L 1.03125 1.734375 L 1.71875 1.734375 C 2.039062 1.734375 2.289062 1.65625 2.46875 1.5 C 2.644531 1.34375 2.835938 0.984375 3.046875 0.421875 L 3.265625 -0.109375 L 0.390625 -7.109375 L 1.625 -7.109375 L 3.84375 -1.546875 L 6.0625 -7.109375 L 7.3125 -7.109375 Z M 4.1875 0.65625 "/>
+</symbol>
+</g>
+</defs>
+<g id="surface268880">
+<rect x="0" y="0" width="774" height="152" style="fill:rgb(100%,100%,100%);fill-opacity:1;stroke:none;"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 21.75297 10.408118 L 26.433829 10.408118 L 26.433829 12.281165 L 21.75297 12.281165 Z M 21.75297 10.408118 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 29.079728 10.51222 L 32.829728 10.51222 L 32.829728 12.149915 L 29.079728 12.149915 Z M 29.079728 10.51222 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="20.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-2" x="30.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-3" x="40.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-4" x="50.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-5" x="60.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-6" x="70.171875" y="57.705621"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-7" x="174.203125" y="60.053277"/>
+ <use xlink:href="#glyph0-8" x="184.203125" y="60.053277"/>
+</g>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 40.925236 10.544446 L 44.675236 10.544446 L 44.675236 12.090345 L 40.925236 12.090345 Z M 40.925236 10.544446 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 34.883439 10.536634 L 38.633439 10.536634 L 38.633439 12.120032 L 34.883439 12.120032 Z M 34.883439 10.536634 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 47.084806 10.484876 L 52.045743 10.484876 L 52.045743 12.130774 L 47.084806 12.130774 Z M 47.084806 10.484876 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 53.980118 10.376868 L 59.866642 10.376868 L 59.866642 12.279603 L 53.980118 12.279603 Z M 53.980118 10.376868 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 54.048478 13.825501 L 59.868009 13.825501 L 59.868009 15.490345 L 54.048478 15.490345 Z M 54.048478 13.825501 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 26.481876 11.338001 L 28.593009 11.332337 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 28.968009 11.33136 L 28.468595 11.582728 L 28.593009 11.332337 L 28.467228 11.082728 Z M 28.968009 11.33136 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 32.876798 11.329798 L 34.396525 11.328626 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 34.771525 11.328431 L 34.27172 11.578821 L 34.396525 11.328626 L 34.271329 11.078821 Z M 34.771525 11.328431 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 38.633439 11.328431 L 40.438517 11.319642 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 40.813517 11.317884 L 40.314689 11.570228 L 40.438517 11.319642 L 40.312345 11.070423 Z M 40.813517 11.317884 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 44.675236 11.317298 L 46.597892 11.309876 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 46.972892 11.308313 L 46.473868 11.560267 L 46.597892 11.309876 L 46.471915 11.060267 Z M 46.972892 11.308313 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 52.045743 11.307923 L 53.4934 11.323157 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 53.8684 11.327063 L 53.365861 11.57179 L 53.4934 11.323157 L 53.370939 11.07179 Z M 53.8684 11.327063 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-9" x="286.757813" y="60.611871"/>
+ <use xlink:href="#glyph0-10" x="296.757813" y="60.611871"/>
+ <use xlink:href="#glyph0-1" x="306.757813" y="60.611871"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-11" x="405.660156" y="59.904839"/>
+ <use xlink:href="#glyph0-10" x="415.660156" y="59.904839"/>
+ <use xlink:href="#glyph0-12" x="425.660156" y="59.904839"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph1-1" x="511.308594" y="58.064616"/>
+ <use xlink:href="#glyph1-2" x="517.308757" y="58.064616"/>
+ <use xlink:href="#glyph1-3" x="523.308919" y="58.064616"/>
+ <use xlink:href="#glyph1-4" x="529.309082" y="58.064616"/>
+ <use xlink:href="#glyph1-5" x="535.309245" y="58.064616"/>
+ <use xlink:href="#glyph1-6" x="541.309408" y="58.064616"/>
+ <use xlink:href="#glyph1-7" x="547.30957" y="58.064616"/>
+ <use xlink:href="#glyph1-8" x="553.309733" y="58.064616"/>
+ <use xlink:href="#glyph1-9" x="559.309896" y="58.064616"/>
+ <use xlink:href="#glyph1-10" x="565.310059" y="58.064616"/>
+ <use xlink:href="#glyph1-11" x="571.310221" y="58.064616"/>
+ <use xlink:href="#glyph1-12" x="577.310384" y="58.064616"/>
+ <use xlink:href="#glyph1-13" x="583.310547" y="58.064616"/>
+ <use xlink:href="#glyph1-8" x="589.31071" y="58.064616"/>
+ <use xlink:href="#glyph1-14" x="595.310872" y="58.064616"/>
+</g>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 45.671915 11.342298 L 45.655704 11.342298 L 45.655704 14.657923 L 53.561759 14.657923 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 53.936759 14.657923 L 53.436759 14.907923 L 53.561759 14.657923 L 53.436759 14.407923 Z M 53.936759 14.657923 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph1-15" x="657.078125" y="57.724772"/>
+ <use xlink:href="#glyph1-16" x="663.078288" y="57.724772"/>
+ <use xlink:href="#glyph1-10" x="669.078451" y="57.724772"/>
+ <use xlink:href="#glyph1-6" x="675.078613" y="57.724772"/>
+ <use xlink:href="#glyph1-8" x="681.078776" y="57.724772"/>
+ <use xlink:href="#glyph1-17" x="687.078939" y="57.724772"/>
+ <use xlink:href="#glyph1-11" x="693.079102" y="57.724772"/>
+ <use xlink:href="#glyph1-18" x="699.079264" y="57.724772"/>
+ <use xlink:href="#glyph1-19" x="705.079427" y="57.724772"/>
+ <use xlink:href="#glyph1-4" x="711.07959" y="57.724772"/>
+ <use xlink:href="#glyph1-20" x="717.079753" y="57.724772"/>
+ <use xlink:href="#glyph1-21" x="723.079915" y="57.724772"/>
+ <use xlink:href="#glyph1-22" x="729.080078" y="57.724772"/>
+ <use xlink:href="#glyph1-23" x="735.080241" y="57.724772"/>
+ <use xlink:href="#glyph1-21" x="741.080404" y="57.724772"/>
+ <use xlink:href="#glyph1-22" x="747.080566" y="57.724772"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph1-24" x="673.335938" y="124.170085"/>
+ <use xlink:href="#glyph1-11" x="679.3361" y="124.170085"/>
+ <use xlink:href="#glyph1-13" x="685.336263" y="124.170085"/>
+ <use xlink:href="#glyph1-8" x="691.336426" y="124.170085"/>
+ <use xlink:href="#glyph1-4" x="697.336589" y="124.170085"/>
+ <use xlink:href="#glyph1-20" x="703.336751" y="124.170085"/>
+ <use xlink:href="#glyph1-21" x="709.336914" y="124.170085"/>
+ <use xlink:href="#glyph1-22" x="715.337077" y="124.170085"/>
+ <use xlink:href="#glyph1-23" x="721.33724" y="124.170085"/>
+ <use xlink:href="#glyph1-21" x="727.337402" y="124.170085"/>
+ <use xlink:href="#glyph1-22" x="733.337565" y="124.170085"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-1" x="168.71875" y="31.959093"/>
+ <use xlink:href="#glyph2-2" x="175.866102" y="31.959093"/>
+ <use xlink:href="#glyph2-3" x="180.92551" y="31.959093"/>
+ <use xlink:href="#glyph2-4" x="188.879069" y="31.959093"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-5" x="288.109375" y="31.681749"/>
+ <use xlink:href="#glyph2-1" x="294.882378" y="31.681749"/>
+ <use xlink:href="#glyph2-6" x="302.029731" y="31.681749"/>
+ <use xlink:href="#glyph2-7" x="309.996039" y="31.681749"/>
+ <use xlink:href="#glyph2-8" x="313.607964" y="31.681749"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-5" x="535.988281" y="33.365343"/>
+ <use xlink:href="#glyph2-1" x="542.761285" y="33.365343"/>
+ <use xlink:href="#glyph2-6" x="549.908637" y="33.365343"/>
+ <use xlink:href="#glyph2-7" x="557.874946" y="33.365343"/>
+ <use xlink:href="#glyph2-8" x="561.486871" y="33.365343"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-9" x="26.695313" y="32.365343"/>
+ <use xlink:href="#glyph2-10" x="34.947266" y="32.365343"/>
+ <use xlink:href="#glyph2-11" x="38.559191" y="32.365343"/>
+ <use xlink:href="#glyph2-11" x="46.798394" y="32.365343"/>
+ <use xlink:href="#glyph2-10" x="55.037598" y="32.365343"/>
+ <use xlink:href="#glyph2-11" x="58.649523" y="32.365343"/>
+ <use xlink:href="#glyph2-12" x="66.888726" y="32.365343"/>
+</g>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-dasharray:0.14,0.14;stroke-miterlimit:10;" d="M 45.300431 9.486438 L 60.373478 9.486438 L 60.373478 16.175696 L 45.300431 16.175696 Z M 45.300431 9.486438 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-13" x="532.003906" y="11.904405"/>
+ <use xlink:href="#glyph2-14" x="542.236382" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="550.475586" y="11.904405"/>
+ <use xlink:href="#glyph2-4" x="555.5727" y="11.904405"/>
+ <use xlink:href="#glyph2-14" x="563.824653" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="572.063856" y="11.904405"/>
+ <use xlink:href="#glyph2-16" x="577.16097" y="11.904405"/>
+ <use xlink:href="#glyph2-17" x="581.293186" y="11.904405"/>
+ <use xlink:href="#glyph2-3" x="588.307346" y="11.904405"/>
+ <use xlink:href="#glyph2-2" x="596.260905" y="11.904405"/>
+ <use xlink:href="#glyph2-18" x="601.377279" y="11.904405"/>
+ <use xlink:href="#glyph2-6" x="614.040853" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="622.007161" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="627.104275" y="11.904405"/>
+ <use xlink:href="#glyph2-8" x="632.201389" y="11.904405"/>
+ <use xlink:href="#glyph2-2" x="640.199436" y="11.904405"/>
+ <use xlink:href="#glyph2-16" x="645.544217" y="11.904405"/>
+ <use xlink:href="#glyph2-19" x="649.676432" y="11.904405"/>
+ <use xlink:href="#glyph2-20" x="657.928385" y="11.904405"/>
+ <use xlink:href="#glyph2-5" x="665.621799" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="672.394803" y="11.904405"/>
+ <use xlink:href="#glyph2-8" x="677.491916" y="11.904405"/>
+ <use xlink:href="#glyph2-18" x="685.489963" y="11.904405"/>
+</g>
+</g>
+</svg>
diff --git a/Documentation/media/v4l-drivers/vimc.rst b/Documentation/media/v4l-drivers/vimc.rst
index 406417680db5..8f5d7f8d83bb 100644
--- a/Documentation/media/v4l-drivers/vimc.rst
+++ b/Documentation/media/v4l-drivers/vimc.rst
@@ -76,27 +76,19 @@ vimc-capture:
* 1 Pad sink
* 1 Pad source
-Module options
----------------
-Vimc has a few module parameters to configure the driver. You should pass
-those arguments to each subdevice, not to the vimc module. For example::
+Module options
+--------------
- vimc_subdevice.param=value
+Vimc has a module parameter to configure the driver.
-* ``vimc_scaler.sca_mult=<unsigned int>``
+* ``sca_mult=<unsigned int>``
Image size multiplier factor to be used to multiply both width and
height, so the image size will be ``sca_mult^2`` bigger than the
original one. Currently, only supports scaling up (the default value
is 3).
-* ``vimc_debayer.deb_mean_win_size=<unsigned int>``
-
- Window size to calculate the mean. Note: the window size needs to be an
- odd number, as the main pixel stays in the center of the window,
- otherwise the next odd number is considered (the default value is 3).
-
Source code documentation
-------------------------
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index adeb6b7a15cb..cb6ccf91776e 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -141,6 +141,10 @@ replace symbol V4L2_CTRL_TYPE_H264_PPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_SCALING_MATRIX :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_DECODE_PARAMS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_HEVC_SPS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_HEVC_PPS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_AREA :c:type:`v4l2_ctrl_type`
# V4L2 capability defines
replace define V4L2_CAP_VIDEO_CAPTURE device-capabilities
@@ -434,6 +438,7 @@ replace define V4L2_DEC_CMD_START decoder-cmds
replace define V4L2_DEC_CMD_STOP decoder-cmds
replace define V4L2_DEC_CMD_PAUSE decoder-cmds
replace define V4L2_DEC_CMD_RESUME decoder-cmds
+replace define V4L2_DEC_CMD_FLUSH decoder-cmds
replace define V4L2_DEC_CMD_START_MUTE_AUDIO decoder-cmds
replace define V4L2_DEC_CMD_PAUSE_TO_BLACK decoder-cmds
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 83f7ae5fc045..5bc55a4e3bce 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -40,13 +40,13 @@ allocates memory for this UMEM using whatever means it feels is most
appropriate (malloc, mmap, huge pages, etc). This memory area is then
registered with the kernel using the new setsockopt XDP_UMEM_REG. The
UMEM also has two rings: the FILL ring and the COMPLETION ring. The
-fill ring is used by the application to send down addr for the kernel
+FILL ring is used by the application to send down addr for the kernel
to fill in with RX packet data. References to these frames will then
appear in the RX ring once each packet has been received. The
-completion ring, on the other hand, contains frame addr that the
+COMPLETION ring, on the other hand, contains frame addr that the
kernel has transmitted completely and can now be used again by user
space, for either TX or RX. Thus, the frame addrs appearing in the
-completion ring are addrs that were previously transmitted using the
+COMPLETION ring are addrs that were previously transmitted using the
TX ring. In summary, the RX and FILL rings are used for the RX path
and the TX and COMPLETION rings are used for the TX path.
@@ -91,11 +91,16 @@ Concepts
========
In order to use an AF_XDP socket, a number of associated objects need
-to be setup.
+to be setup. These objects and their options are explained in the
+following sections.
-Jonathan Corbet has also written an excellent article on LWN,
-"Accelerating networking with AF_XDP". It can be found at
-https://lwn.net/Articles/750845/.
+For an overview on how AF_XDP works, you can also take a look at the
+Linux Plumbers paper from 2018 on the subject:
+http://vger.kernel.org/lpc_net2018_talks/lpc18_paper_af_xdp_perf-v2.pdf. Do
+NOT consult the paper from 2017 on "AF_PACKET v4", the first attempt
+at AF_XDP. Nearly everything changed since then. Jonathan Corbet has
+also written an excellent article on LWN, "Accelerating networking
+with AF_XDP". It can be found at https://lwn.net/Articles/750845/.
UMEM
----
@@ -113,22 +118,22 @@ the next socket B can do this by setting the XDP_SHARED_UMEM flag in
struct sockaddr_xdp member sxdp_flags, and passing the file descriptor
of A to struct sockaddr_xdp member sxdp_shared_umem_fd.
-The UMEM has two single-producer/single-consumer rings, that are used
+The UMEM has two single-producer/single-consumer rings that are used
to transfer ownership of UMEM frames between the kernel and the
user-space application.
Rings
-----
-There are a four different kind of rings: Fill, Completion, RX and
+There are a four different kind of rings: FILL, COMPLETION, RX and
TX. All rings are single-producer/single-consumer, so the user-space
application need explicit synchronization of multiple
processes/threads are reading/writing to them.
-The UMEM uses two rings: Fill and Completion. Each socket associated
+The UMEM uses two rings: FILL and COMPLETION. Each socket associated
with the UMEM must have an RX queue, TX queue or both. Say, that there
is a setup with four sockets (all doing TX and RX). Then there will be
-one Fill ring, one Completion ring, four TX rings and four RX rings.
+one FILL ring, one COMPLETION ring, four TX rings and four RX rings.
The rings are head(producer)/tail(consumer) based rings. A producer
writes the data ring at the index pointed out by struct xdp_ring
@@ -146,7 +151,7 @@ The size of the rings need to be of size power of two.
UMEM Fill Ring
~~~~~~~~~~~~~~
-The Fill ring is used to transfer ownership of UMEM frames from
+The FILL ring is used to transfer ownership of UMEM frames from
user-space to kernel-space. The UMEM addrs are passed in the ring. As
an example, if the UMEM is 64k and each chunk is 4k, then the UMEM has
16 chunks and can pass addrs between 0 and 64k.
@@ -164,8 +169,8 @@ chunks mode, then the incoming addr will be left untouched.
UMEM Completion Ring
~~~~~~~~~~~~~~~~~~~~
-The Completion Ring is used transfer ownership of UMEM frames from
-kernel-space to user-space. Just like the Fill ring, UMEM indicies are
+The COMPLETION Ring is used transfer ownership of UMEM frames from
+kernel-space to user-space. Just like the FILL ring, UMEM indices are
used.
Frames passed from the kernel to user-space are frames that has been
@@ -181,7 +186,7 @@ The RX ring is the receiving side of a socket. Each entry in the ring
is a struct xdp_desc descriptor. The descriptor contains UMEM offset
(addr) and the length of the data (len).
-If no frames have been passed to kernel via the Fill ring, no
+If no frames have been passed to kernel via the FILL ring, no
descriptors will (or can) appear on the RX ring.
The user application consumes struct xdp_desc descriptors from this
@@ -199,8 +204,24 @@ be relaxed in the future.
The user application produces struct xdp_desc descriptors to this
ring.
+Libbpf
+======
+
+Libbpf is a helper library for eBPF and XDP that makes using these
+technologies a lot simpler. It also contains specific helper functions
+in tools/lib/bpf/xsk.h for facilitating the use of AF_XDP. It
+contains two types of functions: those that can be used to make the
+setup of AF_XDP socket easier and ones that can be used in the data
+plane to access the rings safely and quickly. To see an example on how
+to use this API, please take a look at the sample application in
+samples/bpf/xdpsock_usr.c which uses libbpf for both setup and data
+plane operations.
+
+We recommend that you use this library unless you have become a power
+user. It will make your program a lot simpler.
+
XSKMAP / BPF_MAP_TYPE_XSKMAP
-----------------------------
+============================
On XDP side there is a BPF map type BPF_MAP_TYPE_XSKMAP (XSKMAP) that
is used in conjunction with bpf_redirect_map() to pass the ingress
@@ -216,21 +237,202 @@ queue 17. Only the XDP program executing for eth0 and queue 17 will
successfully pass data to the socket. Please refer to the sample
application (samples/bpf/) in for an example.
+Configuration Flags and Socket Options
+======================================
+
+These are the various configuration flags that can be used to control
+and monitor the behavior of AF_XDP sockets.
+
+XDP_COPY and XDP_ZERO_COPY bind flags
+-------------------------------------
+
+When you bind to a socket, the kernel will first try to use zero-copy
+copy. If zero-copy is not supported, it will fall back on using copy
+mode, i.e. copying all packets out to user space. But if you would
+like to force a certain mode, you can use the following flags. If you
+pass the XDP_COPY flag to the bind call, the kernel will force the
+socket into copy mode. If it cannot use copy mode, the bind call will
+fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
+socket into zero-copy mode or fail.
+
+XDP_SHARED_UMEM bind flag
+-------------------------
+
+This flag enables you to bind multiple sockets to the same UMEM, but
+only if they share the same queue id. In this mode, each socket has
+their own RX and TX rings, but the UMEM (tied to the fist socket
+created) only has a single FILL ring and a single COMPLETION
+ring. To use this mode, create the first socket and bind it in the normal
+way. Create a second socket and create an RX and a TX ring, or at
+least one of them, but no FILL or COMPLETION rings as the ones from
+the first socket will be used. In the bind call, set he
+XDP_SHARED_UMEM option and provide the initial socket's fd in the
+sxdp_shared_umem_fd field. You can attach an arbitrary number of extra
+sockets this way.
+
+What socket will then a packet arrive on? This is decided by the XDP
+program. Put all the sockets in the XSK_MAP and just indicate which
+index in the array you would like to send each packet to. A simple
+round-robin example of distributing packets is shown below:
+
+.. code-block:: c
+
+ #include <linux/bpf.h>
+ #include "bpf_helpers.h"
+
+ #define MAX_SOCKS 16
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, MAX_SOCKS);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ } xsks_map SEC(".maps");
+
+ static unsigned int rr;
+
+ SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+ {
+ rr = (rr + 1) & (MAX_SOCKS - 1);
+
+ return bpf_redirect_map(&xsks_map, rr, XDP_DROP);
+ }
+
+Note, that since there is only a single set of FILL and COMPLETION
+rings, and they are single producer, single consumer rings, you need
+to make sure that multiple processes or threads do not use these rings
+concurrently. There are no synchronization primitives in the
+libbpf code that protects multiple users at this point in time.
+
+Libbpf uses this mode if you create more than one socket tied to the
+same umem. However, note that you need to supply the
+XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD libbpf_flag with the
+xsk_socket__create calls and load your own XDP program as there is no
+built in one in libbpf that will route the traffic for you.
+
+XDP_USE_NEED_WAKEUP bind flag
+-----------------------------
+
+This option adds support for a new flag called need_wakeup that is
+present in the FILL ring and the TX ring, the rings for which user
+space is a producer. When this option is set in the bind call, the
+need_wakeup flag will be set if the kernel needs to be explicitly
+woken up by a syscall to continue processing packets. If the flag is
+zero, no syscall is needed.
+
+If the flag is set on the FILL ring, the application needs to call
+poll() to be able to continue to receive packets on the RX ring. This
+can happen, for example, when the kernel has detected that there are no
+more buffers on the FILL ring and no buffers left on the RX HW ring of
+the NIC. In this case, interrupts are turned off as the NIC cannot
+receive any packets (as there are no buffers to put them in), and the
+need_wakeup flag is set so that user space can put buffers on the
+FILL ring and then call poll() so that the kernel driver can put these
+buffers on the HW ring and start to receive packets.
+
+If the flag is set for the TX ring, it means that the application
+needs to explicitly notify the kernel to send any packets put on the
+TX ring. This can be accomplished either by a poll() call, as in the
+RX path, or by calling sendto().
+
+An example of how to use this flag can be found in
+samples/bpf/xdpsock_user.c. An example with the use of libbpf helpers
+would look like this for the TX path:
+
+.. code-block:: c
+
+ if (xsk_ring_prod__needs_wakeup(&my_tx_ring))
+ sendto(xsk_socket__fd(xsk_handle), NULL, 0, MSG_DONTWAIT, NULL, 0);
+
+I.e., only use the syscall if the flag is set.
+
+We recommend that you always enable this mode as it usually leads to
+better performance especially if you run the application and the
+driver on the same core, but also if you use different cores for the
+application and the kernel driver, as it reduces the number of
+syscalls needed for the TX path.
+
+XDP_{RX|TX|UMEM_FILL|UMEM_COMPLETION}_RING setsockopts
+------------------------------------------------------
+
+These setsockopts sets the number of descriptors that the RX, TX,
+FILL, and COMPLETION rings respectively should have. It is mandatory
+to set the size of at least one of the RX and TX rings. If you set
+both, you will be able to both receive and send traffic from your
+application, but if you only want to do one of them, you can save
+resources by only setting up one of them. Both the FILL ring and the
+COMPLETION ring are mandatory as you need to have a UMEM tied to your
+socket. But if the XDP_SHARED_UMEM flag is used, any socket after the
+first one does not have a UMEM and should in that case not have any
+FILL or COMPLETION rings created as the ones from the shared umem will
+be used. Note, that the rings are single-producer single-consumer, so
+do not try to access them from multiple processes at the same
+time. See the XDP_SHARED_UMEM section.
+
+In libbpf, you can create Rx-only and Tx-only sockets by supplying
+NULL to the rx and tx arguments, respectively, to the
+xsk_socket__create function.
+
+If you create a Tx-only socket, we recommend that you do not put any
+packets on the fill ring. If you do this, drivers might think you are
+going to receive something when you in fact will not, and this can
+negatively impact performance.
+
+XDP_UMEM_REG setsockopt
+-----------------------
+
+This setsockopt registers a UMEM to a socket. This is the area that
+contain all the buffers that packet can recide in. The call takes a
+pointer to the beginning of this area and the size of it. Moreover, it
+also has parameter called chunk_size that is the size that the UMEM is
+divided into. It can only be 2K or 4K at the moment. If you have an
+UMEM area that is 128K and a chunk size of 2K, this means that you
+will be able to hold a maximum of 128K / 2K = 64 packets in your UMEM
+area and that your largest packet size can be 2K.
+
+There is also an option to set the headroom of each single buffer in
+the UMEM. If you set this to N bytes, it means that the packet will
+start N bytes into the buffer leaving the first N bytes for the
+application to use. The final option is the flags field, but it will
+be dealt with in separate sections for each UMEM flag.
+
+XDP_STATISTICS getsockopt
+-------------------------
+
+Gets drop statistics of a socket that can be useful for debug
+purposes. The supported statistics are shown below:
+
+.. code-block:: c
+
+ struct xdp_statistics {
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+ };
+
+XDP_OPTIONS getsockopt
+----------------------
+
+Gets options from an XDP socket. The only one supported so far is
+XDP_OPTIONS_ZEROCOPY which tells you if zero-copy is on or not.
+
Usage
=====
-In order to use AF_XDP sockets there are two parts needed. The
+In order to use AF_XDP sockets two parts are needed. The
user-space application and the XDP program. For a complete setup and
usage example, please refer to the sample application. The user-space
side is xdpsock_user.c and the XDP side is part of libbpf.
-The XDP code sample included in tools/lib/bpf/xsk.c is the following::
+The XDP code sample included in tools/lib/bpf/xsk.c is the following:
+
+.. code-block:: c
SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
{
int index = ctx->rx_queue_index;
- // A set entry here means that the correspnding queue_id
+ // A set entry here means that the corresponding queue_id
// has an active AF_XDP socket bound to it.
if (bpf_map_lookup_elem(&xsks_map, &index))
return bpf_redirect_map(&xsks_map, index, 0);
@@ -238,7 +440,10 @@ The XDP code sample included in tools/lib/bpf/xsk.c is the following::
return XDP_PASS;
}
-Naive ring dequeue and enqueue could look like this::
+A simple but not so performance ring dequeue and enqueue could look
+like this:
+
+.. code-block:: c
// struct xdp_rxtx_ring {
// __u32 *producer;
@@ -287,17 +492,16 @@ Naive ring dequeue and enqueue could look like this::
return 0;
}
-
-For a more optimized version, please refer to the sample application.
+But please use the libbpf functions as they are optimized and ready to
+use. Will make your life easier.
Sample application
==================
There is a xdpsock benchmarking/test application included that
-demonstrates how to use AF_XDP sockets with both private and shared
-UMEMs. Say that you would like your UDP traffic from port 4242 to end
-up in queue 16, that we will enable AF_XDP on. Here, we use ethtool
-for this::
+demonstrates how to use AF_XDP sockets with private UMEMs. Say that
+you would like your UDP traffic from port 4242 to end up in queue 16,
+that we will enable AF_XDP on. Here, we use ethtool for this::
ethtool -N p3p2 rx-flow-hash udp4 fn
ethtool -N p3p2 flow-type udp4 src-port 4242 dst-port 4242 \
@@ -311,13 +515,18 @@ using::
For XDP_SKB mode, use the switch "-S" instead of "-N" and all options
can be displayed with "-h", as usual.
+This sample application uses libbpf to make the setup and usage of
+AF_XDP simpler. If you want to know how the raw uapi of AF_XDP is
+really used to make something more advanced, take a look at the libbpf
+code in tools/lib/bpf/xsk.[ch].
+
FAQ
=======
Q: I am not seeing any traffic on the socket. What am I doing wrong?
A: When a netdev of a physical NIC is initialized, Linux usually
- allocates one Rx and Tx queue pair per core. So on a 8 core system,
+ allocates one RX and TX queue pair per core. So on a 8 core system,
queue ids 0 to 7 will be allocated, one per core. In the AF_XDP
bind call or the xsk_socket__create libbpf function call, you
specify a specific queue id to bind to and it is only the traffic
@@ -343,9 +552,21 @@ A: When a netdev of a physical NIC is initialized, Linux usually
sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
4242 action 2
- A number of other ways are possible all up to the capabilitites of
+ A number of other ways are possible all up to the capabilities of
the NIC you have.
+Q: Can I use the XSKMAP to implement a switch betwen different umems
+ in copy mode?
+
+A: The short answer is no, that is not supported at the moment. The
+ XSKMAP can only be used to switch traffic coming in on queue id X
+ to sockets bound to the same queue id X. The XSKMAP can contain
+ sockets bound to different queue ids, for example X and Y, but only
+ traffic goming in from queue id Y can be directed to sockets bound
+ to the same queue id Y. In zero-copy mode, you should use the
+ switch, or other distribution mechanism, in your NIC to direct
+ traffic to the correct queue id and socket.
+
Credits
=======
diff --git a/Documentation/networking/device_drivers/aquantia/atlantic.txt b/Documentation/networking/device_drivers/aquantia/atlantic.txt
index d235cbaeccc6..2013fcedc2da 100644
--- a/Documentation/networking/device_drivers/aquantia/atlantic.txt
+++ b/Documentation/networking/device_drivers/aquantia/atlantic.txt
@@ -1,5 +1,5 @@
-aQuantia AQtion Driver for the aQuantia Multi-Gigabit PCI Express Family of
-Ethernet Adapters
+Marvell(Aquantia) AQtion Driver for the aQuantia Multi-Gigabit PCI Express
+Family of Ethernet Adapters
=============================================================================
Contents
@@ -325,6 +325,46 @@ Supported ethtool options
Example:
ethtool -N eth0 flow-type udp4 action 0 loc 32
+ UDP GSO hardware offload
+ ---------------------------------
+ UDP GSO allows to boost UDP tx rates by offloading UDP headers allocation
+ into hardware. A special userspace socket option is required for this,
+ could be validated with /kernel/tools/testing/selftests/net/
+
+ udpgso_bench_tx -u -4 -D 10.0.1.1 -s 6300 -S 100
+
+ Will cause sending out of 100 byte sized UDP packets formed from single
+ 6300 bytes user buffer.
+
+ UDP GSO is configured by:
+
+ ethtool -K eth0 tx-udp-segmentation on
+
+ Private flags (testing)
+ ---------------------------------
+
+ Atlantic driver supports private flags for hardware custom features:
+
+ $ ethtool --show-priv-flags ethX
+
+ Private flags for ethX:
+ DMASystemLoopback : off
+ PKTSystemLoopback : off
+ DMANetworkLoopback : off
+ PHYInternalLoopback: off
+ PHYExternalLoopback: off
+
+ Example:
+
+ $ ethtool --set-priv-flags ethX DMASystemLoopback on
+
+ DMASystemLoopback: DMA Host loopback.
+ PKTSystemLoopback: Packet buffer host loopback.
+ DMANetworkLoopback: Network side loopback on DMA block.
+ PHYInternalLoopback: Internal loopback on Phy.
+ PHYExternalLoopback: External loopback on Phy (with loopback ethernet cable).
+
+
Command Line Parameters
=======================
The following command line parameters are available on atlantic driver:
@@ -426,7 +466,7 @@ Support
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related
-to the issue to support@aquantia.com
+to the issue to aqn_support@marvell.com
License
=======
diff --git a/Documentation/networking/device_drivers/freescale/dpaa.txt b/Documentation/networking/device_drivers/freescale/dpaa.txt
index f88194f71c54..b06601ff9200 100644
--- a/Documentation/networking/device_drivers/freescale/dpaa.txt
+++ b/Documentation/networking/device_drivers/freescale/dpaa.txt
@@ -129,9 +129,9 @@ CONFIG_AQUANTIA_PHY=y
DPAA Ethernet Frame Processing
==============================
-On Rx, buffers for the incoming frames are retrieved from one of the three
-existing buffers pools. The driver initializes and seeds these, each with
-buffers of different sizes: 1KB, 2KB and 4KB.
+On Rx, buffers for the incoming frames are retrieved from the buffers found
+in the dedicated interface buffer pool. The driver initializes and seeds these
+with one page buffers.
On Tx, all transmitted frames are returned to the driver through Tx
confirmation frame queues. The driver is then responsible for freeing the
@@ -254,7 +254,7 @@ The following statistics are exported for each interface through ethtool:
The driver also exports the following information in sysfs:
- the FQ IDs for each FQ type
- /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+ /sys/devices/platform/soc/<addr>.fman/<addr>.ethernet/dpaa-ethernet.<id>/net/fm<nr>-mac<nr>/fqids
- - the IDs of the buffer pools in use
- /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
+ - the ID of the buffer pool in use
+ /sys/devices/platform/soc/<addr>.fman/<addr>.ethernet/dpaa-ethernet.<id>/net/fm<nr>-mac<nr>/bpids
diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/index.rst b/Documentation/networking/device_drivers/freescale/dpaa2/index.rst
index 67bd87fe6c53..ee40fcc5ddff 100644
--- a/Documentation/networking/device_drivers/freescale/dpaa2/index.rst
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/index.rst
@@ -8,3 +8,4 @@ DPAA2 Documentation
overview
dpio-driver
ethernet-driver
+ mac-phy-support
diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst b/Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
new file mode 100644
index 000000000000..51e6624fb774
--- /dev/null
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
@@ -0,0 +1,191 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=======================
+DPAA2 MAC / PHY support
+=======================
+
+:Copyright: |copy| 2019 NXP
+
+Overview
+--------
+
+The DPAA2 MAC / PHY support consists of a set of APIs that help DPAA2 network
+drivers (dpaa2-eth, dpaa2-ethsw) interract with the PHY library.
+
+DPAA2 Software Architecture
+---------------------------
+
+Among other DPAA2 objects, the fsl-mc bus exports DPNI objects (abstracting a
+network interface) and DPMAC objects (abstracting a MAC). The dpaa2-eth driver
+probes on the DPNI object and connects to and configures a DPMAC object with
+the help of phylink.
+
+Data connections may be established between a DPNI and a DPMAC, or between two
+DPNIs. Depending on the connection type, the netif_carrier_[on/off] is handled
+directly by the dpaa2-eth driver or by phylink.
+
+.. code-block:: none
+
+ Sources of abstracted link state information presented by the MC firmware
+
+ +--------------------------------------+
+ +------------+ +---------+ | xgmac_mdio |
+ | net_device | | phylink |--| +-----+ +-----+ +-----+ +-----+ |
+ +------------+ +---------+ | | PHY | | PHY | | PHY | | PHY | |
+ | | | +-----+ +-----+ +-----+ +-----+ |
+ +------------------------------------+ | External MDIO bus |
+ | dpaa2-eth | +--------------------------------------+
+ +------------------------------------+
+ | | Linux
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | MC firmware
+ | /| V
+ +----------+ / | +----------+
+ | | / | | |
+ | | | | | |
+ | DPNI |<------| |<------| DPMAC |
+ | | | | | |
+ | | \ |<---+ | |
+ +----------+ \ | | +----------+
+ \| |
+ |
+ +--------------------------------------+
+ | MC firmware polling MAC PCS for link |
+ | +-----+ +-----+ +-----+ +-----+ |
+ | | PCS | | PCS | | PCS | | PCS | |
+ | +-----+ +-----+ +-----+ +-----+ |
+ | Internal MDIO bus |
+ +--------------------------------------+
+
+
+Depending on an MC firmware configuration setting, each MAC may be in one of two modes:
+
+- DPMAC_LINK_TYPE_FIXED: the link state management is handled exclusively by
+ the MC firmware by polling the MAC PCS. Without the need to register a
+ phylink instance, the dpaa2-eth driver will not bind to the connected dpmac
+ object at all.
+
+- DPMAC_LINK_TYPE_PHY: The MC firmware is left waiting for link state update
+ events, but those are in fact passed strictly between the dpaa2-mac (based on
+ phylink) and its attached net_device driver (dpaa2-eth, dpaa2-ethsw),
+ effectively bypassing the firmware.
+
+Implementation
+--------------
+
+At probe time or when a DPNI's endpoint is dynamically changed, the dpaa2-eth
+is responsible to find out if the peer object is a DPMAC and if this is the
+case, to integrate it with PHYLINK using the dpaa2_mac_connect() API, which
+will do the following:
+
+ - look up the device tree for PHYLINK-compatible of binding (phy-handle)
+ - will create a PHYLINK instance associated with the received net_device
+ - connect to the PHY using phylink_of_phy_connect()
+
+The following phylink_mac_ops callback are implemented:
+
+ - .validate() will populate the supported linkmodes with the MAC capabilities
+ only when the phy_interface_t is RGMII_* (at the moment, this is the only
+ link type supported by the driver).
+
+ - .mac_config() will configure the MAC in the new configuration using the
+ dpmac_set_link_state() MC firmware API.
+
+ - .mac_link_up() / .mac_link_down() will update the MAC link using the same
+ API described above.
+
+At driver unbind() or when the DPNI object is disconnected from the DPMAC, the
+dpaa2-eth driver calls dpaa2_mac_disconnect() which will, in turn, disconnect
+from the PHY and destroy the PHYLINK instance.
+
+In case of a DPNI-DPMAC connection, an 'ip link set dev eth0 up' would start
+the following sequence of operations:
+
+(1) phylink_start() called from .dev_open().
+(2) The .mac_config() and .mac_link_up() callbacks are called by PHYLINK.
+(3) In order to configure the HW MAC, the MC Firmware API
+ dpmac_set_link_state() is called.
+(4) The firmware will eventually setup the HW MAC in the new configuration.
+(5) A netif_carrier_on() call is made directly from PHYLINK on the associated
+ net_device.
+(6) The dpaa2-eth driver handles the LINK_STATE_CHANGE irq in order to
+ enable/disable Rx taildrop based on the pause frame settings.
+
+.. code-block:: none
+
+ +---------+ +---------+
+ | PHYLINK |-------------->| eth0 |
+ +---------+ (5) +---------+
+ (1) ^ |
+ | |
+ | v (2)
+ +-----------------------------------+
+ | dpaa2-eth |
+ +-----------------------------------+
+ | ^ (6)
+ | |
+ v (3) |
+ +---------+---------------+---------+
+ | DPMAC | | DPNI |
+ +---------+ +---------+
+ | MC Firmware |
+ +-----------------------------------+
+ |
+ |
+ v (4)
+ +-----------------------------------+
+ | HW MAC |
+ +-----------------------------------+
+
+In case of a DPNI-DPNI connection, a usual sequence of operations looks like
+the following:
+
+(1) ip link set dev eth0 up
+(2) The dpni_enable() MC API called on the associated fsl_mc_device.
+(3) ip link set dev eth1 up
+(4) The dpni_enable() MC API called on the associated fsl_mc_device.
+(5) The LINK_STATE_CHANGED irq is received by both instances of the dpaa2-eth
+ driver because now the operational link state is up.
+(6) The netif_carrier_on() is called on the exported net_device from
+ link_state_update().
+
+.. code-block:: none
+
+ +---------+ +---------+
+ | eth0 | | eth1 |
+ +---------+ +---------+
+ | ^ ^ |
+ | | | |
+ (1) v | (6) (6) | v (3)
+ +---------+ +---------+
+ |dpaa2-eth| |dpaa2-eth|
+ +---------+ +---------+
+ | ^ ^ |
+ | | | |
+ (2) v | (5) (5) | v (4)
+ +---------+---------------+---------+
+ | DPNI | | DPNI |
+ +---------+ +---------+
+ | MC Firmware |
+ +-----------------------------------+
+
+
+Exported API
+------------
+
+Any DPAA2 driver that drivers endpoints of DPMAC objects should service its
+_EVENT_ENDPOINT_CHANGED irq and connect/disconnect from the associated DPMAC
+when necessary using the below listed API::
+
+ - int dpaa2_mac_connect(struct dpaa2_mac *mac);
+ - void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+A phylink integration is necessary only when the partner DPMAC is not of TYPE_FIXED.
+One can check for this condition using the below API::
+
+ - bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,struct fsl_mc_io *mc_io);
+
+Before connection to a MAC, the caller must allocate and populate the
+dpaa2_mac structure with the associated net_device, a pointer to the MC portal
+to be used and the actual fsl_mc_device structure of the DPMAC.
diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/mellanox/mlx5.rst
index d071c6b49e1f..7599dceba9f1 100644
--- a/Documentation/networking/device_drivers/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/mellanox/mlx5.rst
@@ -154,6 +154,27 @@ User command examples:
values:
cmode runtime value smfs
+enable_roce: RoCE enablement state
+----------------------------------
+RoCE enablement state controls driver support for RoCE traffic.
+When RoCE is disabled, there is no gid table, only raw ethernet QPs are supported and traffic on the well known UDP RoCE port is handled as raw ethernet traffic.
+
+To change RoCE enablement state a user must change the driverinit cmode value and run devlink reload.
+
+User command examples:
+
+- Disable RoCE::
+
+ $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
+ $ devlink dev reload pci/0000:06:00.0
+
+- Read RoCE enablement state::
+
+ $ devlink dev param show pci/0000:06:00.0 name enable_roce
+ pci/0000:06:00.0:
+ name enable_roce type generic
+ values:
+ cmode driverinit value true
Devlink health reporters
========================
diff --git a/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt b/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt
new file mode 100644
index 000000000000..5c8cee17fca9
--- /dev/null
+++ b/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt
@@ -0,0 +1,209 @@
+* Texas Instruments CPSW switchdev based ethernet driver 2.0
+
+- Port renaming
+On older udev versions renaming of ethX to swXpY will not be automatically
+supported
+In order to rename via udev:
+ip -d link show dev sw0p1 | grep switchid
+
+SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}==<switchid>, \
+ ATTR{phys_port_name}!="", NAME="sw0$attr{phys_port_name}"
+
+
+====================
+# Dual mac mode
+====================
+- The new (cpsw_new.c) driver is operating in dual-emac mode by default, thus
+working as 2 individual network interfaces. Main differences from legacy CPSW
+driver are:
+ - optimized promiscuous mode: The P0_UNI_FLOOD (both ports) is enabled in
+addition to ALLMULTI (current port) instead of ALE_BYPASS.
+So, Ports in promiscuous mode will keep possibility of mcast and vlan filtering,
+which is provides significant benefits when ports are joined to the same bridge,
+but without enabling "switch" mode, or to different bridges.
+ - learning disabled on ports as it make not too much sense for
+ segregated ports - no forwarding in HW.
+ - enabled basic support for devlink.
+
+ devlink dev show
+ platform/48484000.switch
+
+ devlink dev param show
+ platform/48484000.switch:
+ name switch_mode type driver-specific
+ values:
+ cmode runtime value false
+ name ale_bypass type driver-specific
+ values:
+ cmode runtime value false
+
+Devlink configuration parameters
+====================
+See Documentation/networking/devlink-params-ti-cpsw-switch.txt
+
+====================
+# Bridging in dual mac mode
+====================
+The dual_mac mode requires two vids to be reserved for internal purposes,
+which, by default, equal CPSW Port numbers. As result, bridge has to be
+configured in vlan unaware mode or default_pvid has to be adjusted.
+
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge vlan_filtering 0
+ echo 0 > /sys/class/net/br0/bridge/default_pvid
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+ - or -
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge vlan_filtering 0
+ echo 100 > /sys/class/net/br0/bridge/default_pvid
+ ip link set dev br0 type bridge vlan_filtering 1
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+
+====================
+# Enabling "switch"
+====================
+The Switch mode can be enabled by configuring devlink driver parameter
+"switch_mode" to 1/true:
+ devlink dev param set platform/48484000.switch \
+ name switch_mode value 1 cmode runtime
+
+This can be done regardless of the state of Port's netdev devices - UP/DOWN, but
+Port's netdev devices have to be in UP before joining to the bridge to avoid
+overwriting of bridge configuration as CPSW switch driver copletly reloads its
+configuration when first Port changes its state to UP.
+
+When the both interfaces joined the bridge - CPSW switch driver will enable
+marking packets with offload_fwd_mark flag unless "ale_bypass=0"
+
+All configuration is implemented via switchdev API.
+
+====================
+# Bridge setup
+====================
+ devlink dev param set platform/48484000.switch \
+ name switch_mode value 1 cmode runtime
+
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge ageing_time 1000
+ ip link set dev sw0p1 up
+ ip link set dev sw0p2 up
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+ [*] bridge vlan add dev br0 vid 1 pvid untagged self
+
+[*] if vlan_filtering=1. where default_pvid=1
+
+=================
+# On/off STP
+=================
+ip link set dev BRDEV type bridge stp_state 1/0
+
+Note. Steps [*] are mandatory.
+
+====================
+# VLAN configuration
+====================
+bridge vlan add dev br0 vid 1 pvid untagged self <---- add cpu port to VLAN 1
+
+Note. This step is mandatory for bridge/default_pvid.
+
+=================
+# Add extra VLANs
+=================
+ 1. untagged:
+ bridge vlan add dev sw0p1 vid 100 pvid untagged master
+ bridge vlan add dev sw0p2 vid 100 pvid untagged master
+ bridge vlan add dev br0 vid 100 pvid untagged self <---- Add cpu port to VLAN100
+
+ 2. tagged:
+ bridge vlan add dev sw0p1 vid 100 master
+ bridge vlan add dev sw0p2 vid 100 master
+ bridge vlan add dev br0 vid 100 pvid tagged self <---- Add cpu port to VLAN100
+
+====
+FDBs
+====
+FDBs are automatically added on the appropriate switch port upon detection
+
+Manually adding FDBs:
+bridge fdb add aa:bb:cc:dd:ee:ff dev sw0p1 master vlan 100
+bridge fdb add aa:bb:cc:dd:ee:fe dev sw0p2 master <---- Add on all VLANs
+
+====
+MDBs
+====
+MDBs are automatically added on the appropriate switch port upon detection
+
+Manually adding MDBs:
+bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent vid 100
+bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent <---- Add on all VLANs
+
+==================
+Multicast flooding
+==================
+CPU port mcast_flooding is always on
+
+Turning flooding on/off on swithch ports:
+bridge link set dev sw0p1 mcast_flood on/off
+
+==================
+Access and Trunk port
+==================
+ bridge vlan add dev sw0p1 vid 100 pvid untagged master
+ bridge vlan add dev sw0p2 vid 100 master
+
+
+ bridge vlan add dev br0 vid 100 self
+ ip link add link br0 name br0.100 type vlan id 100
+
+ Note. Setting PVID on Bridge device itself working only for
+ default VLAN (default_pvid).
+
+=====================
+ NFS
+=====================
+The only way for NFS to work is by chrooting to a minimal environment when
+switch configuration that will affect connectivity is needed.
+Assuming you are booting NFS with eth1 interface(the script is hacky and
+it's just there to prove NFS is doable).
+
+setup.sh:
+#!/bin/sh
+mkdir proc
+mount -t proc none /proc
+ifconfig br0 > /dev/null
+if [ $? -ne 0 ]; then
+ echo "Setting up bridge"
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge ageing_time 1000
+ ip link set dev br0 type bridge vlan_filtering 1
+
+ ip link set eth1 down
+ ip link set eth1 name sw0p1
+ ip link set dev sw0p1 up
+ ip link set dev sw0p2 up
+ ip link set dev sw0p2 master br0
+ ip link set dev sw0p1 master br0
+ bridge vlan add dev br0 vid 1 pvid untagged self
+ ifconfig sw0p1 0.0.0.0
+ udhchc -i br0
+fi
+umount /proc
+
+run_nfs.sh:
+#!/bin/sh
+mkdir /tmp/root/bin -p
+mkdir /tmp/root/lib -p
+
+cp -r /lib/ /tmp/root/
+cp -r /bin/ /tmp/root/
+cp /sbin/ip /tmp/root/bin
+cp /sbin/bridge /tmp/root/bin
+cp /sbin/ifconfig /tmp/root/bin
+cp /sbin/udhcpc /tmp/root/bin
+cp /path/to/setup.sh /tmp/root/bin
+chroot /tmp/root/ busybox sh /bin/setup.sh
+
+run ./run_nfs.sh
diff --git a/Documentation/networking/devlink-params-mlx5.txt b/Documentation/networking/devlink-params-mlx5.txt
new file mode 100644
index 000000000000..5071467118bd
--- /dev/null
+++ b/Documentation/networking/devlink-params-mlx5.txt
@@ -0,0 +1,17 @@
+flow_steering_mode [DEVICE, DRIVER-SPECIFIC]
+ Controls the flow steering mode of the driver.
+ Two modes are supported:
+ 1. 'dmfs' - Device managed flow steering.
+ 2. 'smfs - Software/Driver managed flow steering.
+ In DMFS mode, the HW steering entities are created and
+ managed through the Firmware.
+ In SMFS mode, the HW steering entities are created and
+ managed though by the driver directly into Hardware
+ without firmware intervention.
+ Type: String
+ Configuration mode: runtime
+
+enable_roce [DEVICE, GENERIC]
+ Enable handling of RoCE traffic in the device.
+ Defaultly enabled.
+ Configuration mode: driverinit
diff --git a/Documentation/networking/devlink-params-mv88e6xxx.txt b/Documentation/networking/devlink-params-mv88e6xxx.txt
new file mode 100644
index 000000000000..21c4b3556ef2
--- /dev/null
+++ b/Documentation/networking/devlink-params-mv88e6xxx.txt
@@ -0,0 +1,7 @@
+ATU_hash [DEVICE, DRIVER-SPECIFIC]
+ Select one of four possible hashing algorithms for
+ MAC addresses in the Address Translation Unit.
+ A value of 3 seems to work better than the default of
+ 1 when many MAC addresses have the same OUI.
+ Configuration mode: runtime
+ Type: u8. 0-3 valid.
diff --git a/Documentation/networking/devlink-params-ti-cpsw-switch.txt b/Documentation/networking/devlink-params-ti-cpsw-switch.txt
new file mode 100644
index 000000000000..4037458499f7
--- /dev/null
+++ b/Documentation/networking/devlink-params-ti-cpsw-switch.txt
@@ -0,0 +1,10 @@
+ale_bypass [DEVICE, DRIVER-SPECIFIC]
+ Allows to enable ALE_CONTROL(4).BYPASS mode for debug purposes.
+ All packets will be sent to the Host port only if enabled.
+ Type: bool
+ Configuration mode: runtime
+
+switch_mode [DEVICE, DRIVER-SPECIFIC]
+ Enable switch mode
+ Type: bool
+ Configuration mode: runtime
diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt
index ddba3e9b55b1..04e234e9acc9 100644
--- a/Documentation/networking/devlink-params.txt
+++ b/Documentation/networking/devlink-params.txt
@@ -65,3 +65,7 @@ reset_dev_on_drv_probe [DEVICE, GENERIC]
Reset only if device firmware can be found in the
filesystem.
Type: u8
+
+enable_roce [DEVICE, GENERIC]
+ Enable handling of RoCE traffic in the device.
+ Type: Boolean
diff --git a/Documentation/networking/devlink-trap.rst b/Documentation/networking/devlink-trap.rst
index 8e90a85f3bd5..dc9659ca06fa 100644
--- a/Documentation/networking/devlink-trap.rst
+++ b/Documentation/networking/devlink-trap.rst
@@ -162,6 +162,67 @@ be added to the following table:
- ``drop``
- Traps packets that the device decided to drop because they could not be
enqueued to a transmission queue which is full
+ * - ``non_ip``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to
+ undergo a layer 3 lookup, but are not IP or MPLS packets
+ * - ``uc_dip_over_mc_dmac``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and they have a unicast destination IP and a multicast destination
+ MAC
+ * - ``dip_is_loopback_address``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their destination IP is the loopback address (i.e., 127.0.0.0/8
+ and ::1/128)
+ * - ``sip_is_mc``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their source IP is multicast (i.e., 224.0.0.0/8 and ff::/8)
+ * - ``sip_is_loopback_address``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their source IP is the loopback address (i.e., 127.0.0.0/8 and ::1/128)
+ * - ``ip_header_corrupted``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their IP header is corrupted: wrong checksum, wrong IP version
+ or too short Internet Header Length (IHL)
+ * - ``ipv4_sip_is_limited_bc``
+ - ``drop``
+ - Traps packets that the device decided to drop because they need to be
+ routed and their source IP is limited broadcast (i.e., 255.255.255.255/32)
+ * - ``ipv6_mc_dip_reserved_scope``
+ - ``drop``
+ - Traps IPv6 packets that the device decided to drop because they need to
+ be routed and their IPv6 multicast destination IP has a reserved scope
+ (i.e., ffx0::/16)
+ * - ``ipv6_mc_dip_interface_local_scope``
+ - ``drop``
+ - Traps IPv6 packets that the device decided to drop because they need to
+ be routed and their IPv6 multicast destination IP has an interface-local scope
+ (i.e., ffx1::/16)
+ * - ``mtu_value_is_too_small``
+ - ``exception``
+ - Traps packets that should have been routed by the device, but were bigger
+ than the MTU of the egress interface
+ * - ``unresolved_neigh``
+ - ``exception``
+ - Traps packets that did not have a matching IP neighbour after routing
+ * - ``mc_reverse_path_forwarding``
+ - ``exception``
+ - Traps multicast IP packets that failed reverse-path forwarding (RPF)
+ check during multicast routing
+ * - ``reject_route``
+ - ``exception``
+ - Traps packets that hit reject routes (i.e., "unreachable", "prohibit")
+ * - ``ipv4_lpm_miss``
+ - ``exception``
+ - Traps unicast IPv4 packets that did not match any route
+ * - ``ipv6_lpm_miss``
+ - ``exception``
+ - Traps unicast IPv6 packets that did not match any route
Driver-specific Packet Traps
============================
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 319e5e041f38..c4a328f2d57a 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -770,10 +770,10 @@ Some core changes of the new internal format:
callq foo
mov %rax,%r13
mov %rbx,%rdi
- mov $0x2,%esi
- mov $0x3,%edx
- mov $0x4,%ecx
- mov $0x5,%r8d
+ mov $0x6,%esi
+ mov $0x7,%edx
+ mov $0x8,%ecx
+ mov $0x9,%r8d
callq bar
add %r13,%rax
mov -0x228(%rbp),%rbx
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index d4dca42910d0..5acab1290e03 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -33,6 +33,7 @@ Contents:
scaling
tls
tls-offload
+ nfc
.. only:: subproject and html
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8d4ad1d1ae26..099a55bd1432 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -2091,6 +2091,28 @@ pf_enable - INTEGER
Default: 1
+pf_expose - INTEGER
+ Unset or enable/disable pf (pf is short for potentially failed) state
+ exposure. Applications can control the exposure of the PF path state
+ in the SCTP_PEER_ADDR_CHANGE event and the SCTP_GET_PEER_ADDR_INFO
+ sockopt. When it's unset, no SCTP_PEER_ADDR_CHANGE event with
+ SCTP_ADDR_PF state will be sent and a SCTP_PF-state transport info
+ can be got via SCTP_GET_PEER_ADDR_INFO sockopt; When it's enabled,
+ a SCTP_PEER_ADDR_CHANGE event will be sent for a transport becoming
+ SCTP_PF state and a SCTP_PF-state transport info can be got via
+ SCTP_GET_PEER_ADDR_INFO sockopt; When it's diabled, no
+ SCTP_PEER_ADDR_CHANGE event will be sent and it returns -EACCES when
+ trying to get a SCTP_PF-state transport info via SCTP_GET_PEER_ADDR_INFO
+ sockopt.
+
+ 0: Unset pf state exposure, Compatible with old applications.
+
+ 1: Disable pf state exposure.
+
+ 2: Enable pf state exposure.
+
+ Default: 0
+
addip_noauth_enable - BOOLEAN
Dynamic Address Reconfiguration (ADD-IP) requires the use of
authentication to protect the operations of adding or removing new
@@ -2173,6 +2195,18 @@ pf_retrans - INTEGER
Default: 0
+ps_retrans - INTEGER
+ Primary.Switchover.Max.Retrans (PSMR), it's a tunable parameter coming
+ from section-5 "Primary Path Switchover" in rfc7829. The primary path
+ will be changed to another active path when the path error counter on
+ the old primary path exceeds PSMR, so that "the SCTP sender is allowed
+ to continue data transmission on a new working path even when the old
+ primary destination address becomes active again". Note this feature
+ is disabled by initializing 'ps_retrans' per netns as 0xffff by default,
+ and its value can't be less than 'pf_retrans' when changing by sysctl.
+
+ Default: 0xffff
+
rto_initial - INTEGER
The initial round trip timeout value in milliseconds that will be used
in calculating round trip times. This is the initial time interval
diff --git a/Documentation/networking/nfc.txt b/Documentation/networking/nfc.rst
index b24c29bdae27..9aab3a88c9b2 100644
--- a/Documentation/networking/nfc.txt
+++ b/Documentation/networking/nfc.rst
@@ -1,3 +1,4 @@
+===================
Linux NFC subsystem
===================
@@ -8,7 +9,7 @@ This document covers the architecture overview, the device driver interface
description and the userspace interface description.
Architecture overview
----------------------
+=====================
The NFC subsystem is responsible for:
- NFC adapters management;
@@ -25,33 +26,34 @@ The control operations are available to userspace via generic netlink.
The low-level data exchange interface is provided by the new socket family
PF_NFC. The NFC_SOCKPROTO_RAW performs raw communication with NFC targets.
-
- +--------------------------------------+
- | USER SPACE |
- +--------------------------------------+
- ^ ^
- | low-level | control
- | data exchange | operations
- | |
- | v
- | +-----------+
- | AF_NFC | netlink |
- | socket +-----------+
- | raw ^
- | |
- v v
- +---------+ +-----------+
- | rawsock | <--------> | core |
- +---------+ +-----------+
- ^
- |
- v
- +-----------+
- | driver |
- +-----------+
+.. code-block:: none
+
+ +--------------------------------------+
+ | USER SPACE |
+ +--------------------------------------+
+ ^ ^
+ | low-level | control
+ | data exchange | operations
+ | |
+ | v
+ | +-----------+
+ | AF_NFC | netlink |
+ | socket +-----------+
+ | raw ^
+ | |
+ v v
+ +---------+ +-----------+
+ | rawsock | <--------> | core |
+ +---------+ +-----------+
+ ^
+ |
+ v
+ +-----------+
+ | driver |
+ +-----------+
Device Driver Interface
------------------------
+=======================
When registering on the NFC subsystem, the device driver must inform the core
of the set of supported NFC protocols and the set of ops callbacks. The ops
@@ -64,7 +66,7 @@ callbacks that must be implemented are the following:
* data_exchange - send data and receive the response (transceive operation)
Userspace interface
---------------------
+===================
The userspace interface is divided in control operations and low-level data
exchange operation.
@@ -82,7 +84,7 @@ The operations are composed by commands and events, all listed below:
* NFC_EVENT_DEVICE_ADDED - reports an NFC device addition
* NFC_EVENT_DEVICE_REMOVED - reports an NFC device removal
* NFC_EVENT_TARGETS_FOUND - reports START_POLL results when 1 or more targets
-are found
+ are found
The user must call START_POLL to poll for NFC targets, passing the desired NFC
protocols through NFC_ATTR_PROTOCOLS attribute. The device remains in polling
@@ -101,14 +103,14 @@ it's closed.
LOW-LEVEL DATA EXCHANGE:
The userspace must use PF_NFC sockets to perform any data communication with
-targets. All NFC sockets use AF_NFC:
-
-struct sockaddr_nfc {
- sa_family_t sa_family;
- __u32 dev_idx;
- __u32 target_idx;
- __u32 nfc_protocol;
-};
+targets. All NFC sockets use AF_NFC::
+
+ struct sockaddr_nfc {
+ sa_family_t sa_family;
+ __u32 dev_idx;
+ __u32 target_idx;
+ __u32 nfc_protocol;
+ };
To establish a connection with one target, the user must create an
NFC_SOCKPROTO_RAW socket and call the 'connect' syscall with the sockaddr_nfc
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index a689966bc4be..cda1c0a0492a 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -352,7 +352,8 @@ Fills the phydev structure with up-to-date information about the current
settings in the PHY.
::
- int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd);
Ethtool convenience functions.
::
diff --git a/Documentation/networking/tls.rst b/Documentation/networking/tls.rst
index 5bcbf75e2025..8cb2cd4e2a80 100644
--- a/Documentation/networking/tls.rst
+++ b/Documentation/networking/tls.rst
@@ -213,3 +213,29 @@ A patchset to OpenSSL to use ktls as the record layer is
of calling send directly after a handshake using gnutls.
Since it doesn't implement a full record layer, control
messages are not supported.
+
+Statistics
+==========
+
+TLS implementation exposes the following per-namespace statistics
+(``/proc/net/tls_stat``):
+
+- ``TlsCurrTxSw``, ``TlsCurrRxSw`` -
+ number of TX and RX sessions currently installed where host handles
+ cryptography
+
+- ``TlsCurrTxDevice``, ``TlsCurrRxDevice`` -
+ number of TX and RX sessions currently installed where NIC handles
+ cryptography
+
+- ``TlsTxSw``, ``TlsRxSw`` -
+ number of TX and RX sessions opened with host cryptography
+
+- ``TlsTxDevice``, ``TlsRxDevice`` -
+ number of TX and RX sessions opened with NIC cryptography
+
+- ``TlsDecryptError`` -
+ record decryption failed (e.g. due to incorrect authentication tag)
+
+- ``TlsDeviceRxResync`` -
+ number of RX resyncs sent to NICs handling cryptography
diff --git a/Documentation/power/drivers-testing.rst b/Documentation/power/drivers-testing.rst
index e53f1999fc39..d77d2894f9fe 100644
--- a/Documentation/power/drivers-testing.rst
+++ b/Documentation/power/drivers-testing.rst
@@ -39,9 +39,10 @@ c) Compile the driver directly into the kernel and try the test modes of
d) Attempt to hibernate with the driver compiled directly into the kernel
in the "reboot", "shutdown" and "platform" modes.
-e) Try the test modes of suspend (see: Documentation/power/basic-pm-debugging.rst,
- 2). [As far as the STR tests are concerned, it should not matter whether or
- not the driver is built as a module.]
+e) Try the test modes of suspend (see:
+ Documentation/power/basic-pm-debugging.rst, 2). [As far as the STR tests are
+ concerned, it should not matter whether or not the driver is built as a
+ module.]
f) Attempt to suspend to RAM using the s2ram tool with the driver loaded
(see: Documentation/power/basic-pm-debugging.rst, 2).
diff --git a/Documentation/power/freezing-of-tasks.rst b/Documentation/power/freezing-of-tasks.rst
index ef110fe55e82..8bd693399834 100644
--- a/Documentation/power/freezing-of-tasks.rst
+++ b/Documentation/power/freezing-of-tasks.rst
@@ -215,30 +215,31 @@ VI. Are there any precautions to be taken to prevent freezing failures?
Yes, there are.
-First of all, grabbing the 'system_transition_mutex' lock to mutually exclude a piece of code
-from system-wide sleep such as suspend/hibernation is not encouraged.
-If possible, that piece of code must instead hook onto the suspend/hibernation
-notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
-(kernel/cpu.c) for an example.
-
-However, if that is not feasible, and grabbing 'system_transition_mutex' is deemed necessary,
-it is strongly discouraged to directly call mutex_[un]lock(&system_transition_mutex) since
-that could lead to freezing failures, because if the suspend/hibernate code
-successfully acquired the 'system_transition_mutex' lock, and hence that other entity failed
-to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
-state. As a consequence, the freezer would not be able to freeze that task,
-leading to freezing failure.
+First of all, grabbing the 'system_transition_mutex' lock to mutually exclude a
+piece of code from system-wide sleep such as suspend/hibernation is not
+encouraged. If possible, that piece of code must instead hook onto the
+suspend/hibernation notifiers to achieve mutual exclusion. Look at the
+CPU-Hotplug code (kernel/cpu.c) for an example.
+
+However, if that is not feasible, and grabbing 'system_transition_mutex' is
+deemed necessary, it is strongly discouraged to directly call
+mutex_[un]lock(&system_transition_mutex) since that could lead to freezing
+failures, because if the suspend/hibernate code successfully acquired the
+'system_transition_mutex' lock, and hence that other entity failed to acquire
+the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE state. As a
+consequence, the freezer would not be able to freeze that task, leading to
+freezing failure.
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
since they ask the freezer to skip freezing this task, since it is anyway
-"frozen enough" as it is blocked on 'system_transition_mutex', which will be released
-only after the entire suspend/hibernation sequence is complete.
-So, to summarize, use [un]lock_system_sleep() instead of directly using
+"frozen enough" as it is blocked on 'system_transition_mutex', which will be
+released only after the entire suspend/hibernation sequence is complete. So, to
+summarize, use [un]lock_system_sleep() instead of directly using
mutex_[un]lock(&system_transition_mutex). That would prevent freezing failures.
V. Miscellaneous
================
/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
-all user space processes or all freezable kernel threads, in unit of millisecond.
-The default value is 20000, with range of unsigned integer.
+all user space processes or all freezable kernel threads, in unit of
+millisecond. The default value is 20000, with range of unsigned integer.
diff --git a/Documentation/power/opp.rst b/Documentation/power/opp.rst
index 209c7613f5a4..e3cc4f349ea8 100644
--- a/Documentation/power/opp.rst
+++ b/Documentation/power/opp.rst
@@ -73,19 +73,21 @@ factors. Example usage: Thermal management or other exceptional situations where
SoC framework might choose to disable a higher frequency OPP to safely continue
operations until that OPP could be re-enabled if possible.
-OPP library facilitates this concept in it's implementation. The following
+OPP library facilitates this concept in its implementation. The following
operational functions operate only on available opps:
-opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
+opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq,
+dev_pm_opp_get_opp_count
-dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
-be used for dev_pm_opp_enable/disable functions to make an opp available as required.
+dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer
+which can then be used for dev_pm_opp_enable/disable functions to make an
+opp available as required.
WARNING: Users of OPP library should refresh their availability count using
-get_opp_count if dev_pm_opp_enable/disable functions are invoked for a device, the
-exact mechanism to trigger these or the notification mechanism to other
-dependent subsystems such as cpufreq are left to the discretion of the SoC
-specific framework which uses the OPP library. Similar care needs to be taken
-care to refresh the cpufreq table in cases of these operations.
+get_opp_count if dev_pm_opp_enable/disable functions are invoked for a
+device, the exact mechanism to trigger these or the notification mechanism
+to other dependent subsystems such as cpufreq are left to the discretion of
+the SoC specific framework which uses the OPP library. Similar care needs
+to be taken care to refresh the cpufreq table in cases of these operations.
2. Initial OPP List Registration
================================
@@ -99,11 +101,11 @@ OPPs dynamically using the dev_pm_opp_enable / disable functions.
dev_pm_opp_add
Add a new OPP for a specific domain represented by the device pointer.
The OPP is defined using the frequency and voltage. Once added, the OPP
- is assumed to be available and control of it's availability can be done
- with the dev_pm_opp_enable/disable functions. OPP library internally stores
- and manages this information in the opp struct. This function may be
- used by SoC framework to define a optimal list as per the demands of
- SoC usage environment.
+ is assumed to be available and control of its availability can be done
+ with the dev_pm_opp_enable/disable functions. OPP library
+ internally stores and manages this information in the opp struct.
+ This function may be used by SoC framework to define a optimal list
+ as per the demands of SoC usage environment.
WARNING:
Do not use this function in interrupt context.
@@ -354,7 +356,7 @@ struct dev_pm_opp
struct device
This is used to identify a domain to the OPP layer. The
- nature of the device and it's implementation is left to the user of
+ nature of the device and its implementation is left to the user of
OPP library such as the SoC framework.
Overall, in a simplistic view, the data structure operations is represented as
diff --git a/Documentation/power/pci.rst b/Documentation/power/pci.rst
index 0e2ef7429304..51e0a493d284 100644
--- a/Documentation/power/pci.rst
+++ b/Documentation/power/pci.rst
@@ -426,12 +426,12 @@ pm->runtime_idle() callback.
2.4. System-Wide Power Transitions
----------------------------------
There are a few different types of system-wide power transitions, described in
-Documentation/driver-api/pm/devices.rst. Each of them requires devices to be handled
-in a specific way and the PM core executes subsystem-level power management
-callbacks for this purpose. They are executed in phases such that each phase
-involves executing the same subsystem-level callback for every device belonging
-to the given subsystem before the next phase begins. These phases always run
-after tasks have been frozen.
+Documentation/driver-api/pm/devices.rst. Each of them requires devices to be
+handled in a specific way and the PM core executes subsystem-level power
+management callbacks for this purpose. They are executed in phases such that
+each phase involves executing the same subsystem-level callback for every device
+belonging to the given subsystem before the next phase begins. These phases
+always run after tasks have been frozen.
2.4.1. System Suspend
^^^^^^^^^^^^^^^^^^^^^
@@ -636,12 +636,12 @@ System restore requires a hibernation image to be loaded into memory and the
pre-hibernation memory contents to be restored before the pre-hibernation system
activity can be resumed.
-As described in Documentation/driver-api/pm/devices.rst, the hibernation image is loaded
-into memory by a fresh instance of the kernel, called the boot kernel, which in
-turn is loaded and run by a boot loader in the usual way. After the boot kernel
-has loaded the image, it needs to replace its own code and data with the code
-and data of the "hibernated" kernel stored within the image, called the image
-kernel. For this purpose all devices are frozen just like before creating
+As described in Documentation/driver-api/pm/devices.rst, the hibernation image
+is loaded into memory by a fresh instance of the kernel, called the boot kernel,
+which in turn is loaded and run by a boot loader in the usual way. After the
+boot kernel has loaded the image, it needs to replace its own code and data with
+the code and data of the "hibernated" kernel stored within the image, called the
+image kernel. For this purpose all devices are frozen just like before creating
the image during hibernation, in the
prepare, freeze, freeze_noirq
@@ -691,8 +691,8 @@ controlling the runtime power management of their devices.
At the time of this writing there are two ways to define power management
callbacks for a PCI device driver, the recommended one, based on using a
-dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and the
-"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
+dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and
+the "legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
.resume() callbacks from struct pci_driver are used. The legacy approach,
however, doesn't allow one to define runtime power management callbacks and is
not really suitable for any new drivers. Therefore it is not covered by this
diff --git a/Documentation/power/pm_qos_interface.rst b/Documentation/power/pm_qos_interface.rst
index 3097694fba69..0d62d506caf0 100644
--- a/Documentation/power/pm_qos_interface.rst
+++ b/Documentation/power/pm_qos_interface.rst
@@ -8,8 +8,8 @@ one of the parameters.
Two different PM QoS frameworks are available:
1. PM QoS classes for cpu_dma_latency
-2. the per-device PM QoS framework provides the API to manage the per-device latency
-constraints and PM QoS flags.
+2. The per-device PM QoS framework provides the API to manage the
+ per-device latency constraints and PM QoS flags.
Each parameters have defined units:
@@ -47,14 +47,14 @@ void pm_qos_add_request(handle, param_class, target_value):
pm_qos API functions.
void pm_qos_update_request(handle, new_target_value):
- Will update the list element pointed to by the handle with the new target value
- and recompute the new aggregated target, calling the notification tree if the
- target is changed.
+ Will update the list element pointed to by the handle with the new target
+ value and recompute the new aggregated target, calling the notification tree
+ if the target is changed.
void pm_qos_remove_request(handle):
- Will remove the element. After removal it will update the aggregate target and
- call the notification tree if the target was changed as a result of removing
- the request.
+ Will remove the element. After removal it will update the aggregate target
+ and call the notification tree if the target was changed as a result of
+ removing the request.
int pm_qos_request(param_class):
Returns the aggregated value for a given PM QoS class.
@@ -167,9 +167,9 @@ int dev_pm_qos_expose_flags(device, value)
change the value of the PM_QOS_FLAG_NO_POWER_OFF flag.
void dev_pm_qos_hide_flags(device)
- Drop the request added by dev_pm_qos_expose_flags() from the device's PM QoS list
- of flags and remove sysfs attribute pm_qos_no_power_off from the device's power
- directory.
+ Drop the request added by dev_pm_qos_expose_flags() from the device's PM QoS
+ list of flags and remove sysfs attribute pm_qos_no_power_off from the device's
+ power directory.
Notification mechanisms:
@@ -179,8 +179,8 @@ int dev_pm_qos_add_notifier(device, notifier, type):
Adds a notification callback function for the device for a particular request
type.
- The callback is called when the aggregated value of the device constraints list
- is changed.
+ The callback is called when the aggregated value of the device constraints
+ list is changed.
int dev_pm_qos_remove_notifier(device, notifier, type):
Removes the notification callback function for the device.
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 2c2ec99b5088..ab8406c84254 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -268,8 +268,8 @@ defined in include/linux/pm.h:
`unsigned int runtime_auto;`
- if set, indicates that the user space has allowed the device driver to
power manage the device at run time via the /sys/devices/.../power/control
- `interface;` it may only be modified with the help of the pm_runtime_allow()
- and pm_runtime_forbid() helper functions
+ `interface;` it may only be modified with the help of the
+ pm_runtime_allow() and pm_runtime_forbid() helper functions
`unsigned int no_callbacks;`
- indicates that the device does not use the runtime PM callbacks (see
diff --git a/Documentation/power/suspend-and-cpuhotplug.rst b/Documentation/power/suspend-and-cpuhotplug.rst
index 7ac8e1f549f4..572d968c5375 100644
--- a/Documentation/power/suspend-and-cpuhotplug.rst
+++ b/Documentation/power/suspend-and-cpuhotplug.rst
@@ -106,8 +106,8 @@ execution during resume):
* Release system_transition_mutex lock.
-It is to be noted here that the system_transition_mutex lock is acquired at the very
-beginning, when we are just starting out to suspend, and then released only
+It is to be noted here that the system_transition_mutex lock is acquired at the
+very beginning, when we are just starting out to suspend, and then released only
after the entire cycle is complete (i.e., suspend + resume).
::
@@ -165,7 +165,8 @@ Important files and functions/entry points:
- kernel/power/process.c : freeze_processes(), thaw_processes()
- kernel/power/suspend.c : suspend_prepare(), suspend_enter(), suspend_finish()
-- kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](), [disable|enable]_nonboot_cpus()
+- kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](),
+ [disable|enable]_nonboot_cpus()
diff --git a/Documentation/power/swsusp.rst b/Documentation/power/swsusp.rst
index d000312f6965..8524f079e05c 100644
--- a/Documentation/power/swsusp.rst
+++ b/Documentation/power/swsusp.rst
@@ -118,7 +118,8 @@ In a really perfect world::
echo 1 > /proc/acpi/sleep # for standby
echo 2 > /proc/acpi/sleep # for suspend to ram
- echo 3 > /proc/acpi/sleep # for suspend to ram, but with more power conservative
+ echo 3 > /proc/acpi/sleep # for suspend to ram, but with more power
+ # conservative
echo 4 > /proc/acpi/sleep # for suspend to disk
echo 5 > /proc/acpi/sleep # for shutdown unfriendly the system
@@ -192,8 +193,8 @@ Q:
A:
The freezing of tasks is a mechanism by which user space processes and some
- kernel threads are controlled during hibernation or system-wide suspend (on some
- architectures). See freezing-of-tasks.txt for details.
+ kernel threads are controlled during hibernation or system-wide suspend (on
+ some architectures). See freezing-of-tasks.txt for details.
Q:
What is the difference between "platform" and "shutdown"?
@@ -282,7 +283,8 @@ A:
suspend(PMSG_FREEZE): devices are frozen so that they don't interfere
with state snapshot
- state snapshot: copy of whole used memory is taken with interrupts disabled
+ state snapshot: copy of whole used memory is taken with interrupts
+ disabled
resume(): devices are woken up so that we can write image to swap
@@ -353,8 +355,8 @@ Q:
A:
Generally, yes, you can. However, it requires you to use the "resume=" and
- "resume_offset=" kernel command line parameters, so the resume from a swap file
- cannot be initiated from an initrd or initramfs image. See
+ "resume_offset=" kernel command line parameters, so the resume from a swap
+ file cannot be initiated from an initrd or initramfs image. See
swsusp-and-swap-files.txt for details.
Q:
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 132f5eb9b530..f169d58ca019 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -805,6 +805,7 @@ destructor and PCI entries. Example code is shown first, below.
return -EBUSY;
}
chip->irq = pci->irq;
+ card->sync_irq = chip->irq;
/* (2) initialization of the chip hardware */
.... /* (not implemented in this document) */
@@ -965,6 +966,15 @@ usually like the following:
return IRQ_HANDLED;
}
+After requesting the IRQ, you can passed it to ``card->sync_irq``
+field:
+::
+
+ card->irq = chip->irq;
+
+This allows PCM core automatically performing
+:c:func:`synchronize_irq()` at the necessary timing like ``hw_free``.
+See the later section `sync_stop callback`_ for details.
Now let's write the corresponding destructor for the resources above.
The role of destructor is simple: disable the hardware (if already
@@ -1270,21 +1280,23 @@ shows only the skeleton, how to build up the PCM interfaces.
/* the hardware-specific codes will be here */
....
return 0;
-
}
/* hw_params callback */
static int snd_mychip_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
+ /* the hardware-specific codes will be here */
+ ....
+ return 0;
}
/* hw_free callback */
static int snd_mychip_pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_pages(substream);
+ /* the hardware-specific codes will be here */
+ ....
+ return 0;
}
/* prepare callback */
@@ -1339,7 +1351,6 @@ shows only the skeleton, how to build up the PCM interfaces.
static struct snd_pcm_ops snd_mychip_playback_ops = {
.open = snd_mychip_playback_open,
.close = snd_mychip_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mychip_pcm_hw_params,
.hw_free = snd_mychip_pcm_hw_free,
.prepare = snd_mychip_pcm_prepare,
@@ -1351,7 +1362,6 @@ shows only the skeleton, how to build up the PCM interfaces.
static struct snd_pcm_ops snd_mychip_capture_ops = {
.open = snd_mychip_capture_open,
.close = snd_mychip_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mychip_pcm_hw_params,
.hw_free = snd_mychip_pcm_hw_free,
.prepare = snd_mychip_pcm_prepare,
@@ -1382,9 +1392,9 @@ shows only the skeleton, how to build up the PCM interfaces.
&snd_mychip_capture_ops);
/* pre-allocation of buffers */
/* NOTE: this may fail */
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
@@ -1454,7 +1464,6 @@ The operators are defined typically like this:
static struct snd_pcm_ops snd_mychip_playback_ops = {
.open = snd_mychip_pcm_open,
.close = snd_mychip_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mychip_pcm_hw_params,
.hw_free = snd_mychip_pcm_hw_free,
.prepare = snd_mychip_pcm_prepare,
@@ -1465,13 +1474,14 @@ The operators are defined typically like this:
All the callbacks are described in the Operators_ subsection.
After setting the operators, you probably will want to pre-allocate the
-buffer. For the pre-allocation, simply call the following:
+buffer and set up the managed allocation mode.
+For that, simply call the following:
::
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
+ 64*1024, 64*1024);
It will allocate a buffer up to 64kB as default. Buffer management
details will be described in the later section `Buffer and Memory
@@ -1621,8 +1631,7 @@ For the operators (callbacks) of each sound driver, most of these
records are supposed to be read-only. Only the PCM middle-layer changes
/ updates them. The exceptions are the hardware description (hw) DMA
buffer information and the private data. Besides, if you use the
-standard buffer allocation method via
-:c:func:`snd_pcm_lib_malloc_pages()`, you don't need to set the
+standard managed buffer allocation mode, you don't need to set the
DMA buffer information by yourself.
In the sections below, important records are explained.
@@ -1776,8 +1785,8 @@ the physical address of the buffer. This field is specified only when
the buffer is a linear buffer. ``dma_bytes`` holds the size of buffer
in bytes. ``dma_private`` is used for the ALSA DMA allocator.
-If you use a standard ALSA function,
-:c:func:`snd_pcm_lib_malloc_pages()`, for allocating the buffer,
+If you use either the managed buffer allocation mode or the standard
+API function :c:func:`snd_pcm_lib_malloc_pages()` for allocating the buffer,
these fields are set by the ALSA middle layer, and you should *not*
change them by yourself. You can read them but not write them. On the
other hand, if you want to allocate the buffer by yourself, you'll
@@ -1911,7 +1920,10 @@ ioctl callback
~~~~~~~~~~~~~~
This is used for any special call to pcm ioctls. But usually you can
-pass a generic ioctl callback, :c:func:`snd_pcm_lib_ioctl()`.
+leave it as NULL, then PCM core calls the generic ioctl callback
+function :c:func:`snd_pcm_lib_ioctl()`. If you need to deal with the
+unique setup of channel info or reset procedure, you can pass your own
+callback function here.
hw_params callback
~~~~~~~~~~~~~~~~~~~
@@ -1929,8 +1941,12 @@ Many hardware setups should be done in this callback, including the
allocation of buffers.
Parameters to be initialized are retrieved by
-:c:func:`params_xxx()` macros. To allocate buffer, you can call a
-helper function,
+:c:func:`params_xxx()` macros.
+
+When you set up the managed buffer allocation mode for the substream,
+a buffer is already allocated before this callback gets
+called. Alternatively, you can call a helper function below for
+allocating the buffer, too.
::
@@ -1964,18 +1980,23 @@ hw_free callback
static int snd_xxx_hw_free(struct snd_pcm_substream *substream);
This is called to release the resources allocated via
-``hw_params``. For example, releasing the buffer via
-:c:func:`snd_pcm_lib_malloc_pages()` is done by calling the
-following:
-
-::
-
- snd_pcm_lib_free_pages(substream);
+``hw_params``.
This function is always called before the close callback is called.
Also, the callback may be called multiple times, too. Keep track
whether the resource was already released.
+When you have set up the managed buffer allocation mode for the PCM
+substream, the allocated PCM buffer will be automatically released
+after this callback gets called. Otherwise you'll have to release the
+buffer manually. Typically, when the buffer was allocated from the
+pre-allocated pool, you can use the standard API function
+:c:func:`snd_pcm_lib_malloc_pages()` like:
+
+::
+
+ snd_pcm_lib_free_pages(substream);
+
prepare callback
~~~~~~~~~~~~~~~~
@@ -2048,6 +2069,37 @@ flag set, and you cannot call functions which may sleep. The
triggering the DMA. The other stuff should be initialized
``hw_params`` and ``prepare`` callbacks properly beforehand.
+sync_stop callback
+~~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_sync_stop(struct snd_pcm_substream *substream);
+
+This callback is optional, and NULL can be passed. It's called after
+the PCM core stops the stream and changes the stream state
+``prepare``, ``hw_params`` or ``hw_free``.
+Since the IRQ handler might be still pending, we need to wait until
+the pending task finishes before moving to the next step; otherwise it
+might lead to a crash due to resource conflicts or access to the freed
+resources. A typical behavior is to call a synchronization function
+like :c:func:`synchronize_irq()` here.
+
+For majority of drivers that need only a call of
+:c:func:`synchronize_irq()`, there is a simpler setup, too.
+While keeping NULL to ``sync_stop`` PCM callback, the driver can set
+``card->sync_irq`` field to store the valid interrupt number after
+requesting an IRQ, instead. Then PCM core will look call
+:c:func:`synchronize_irq()` with the given IRQ appropriately.
+
+If the IRQ handler is released at the card destructor, you don't need
+to clear ``card->sync_irq``, as the card itself is being released.
+So, usually you'll need to add just a single line for assigning
+``card->sync_irq`` in the driver code unless the driver re-acquires
+the IRQ. When the driver frees and re-acquires the IRQ dynamically
+(e.g. for suspend/resume), it needs to clear and re-set
+``card->sync_irq`` again appropriately.
+
pointer callback
~~~~~~~~~~~~~~~~
@@ -2095,10 +2147,12 @@ This callback is atomic as default.
page callback
~~~~~~~~~~~~~
-This callback is optional too. This callback is used mainly for
-non-contiguous buffers. The mmap calls this callback to get the page
-address. Some examples will be explained in the later section `Buffer
-and Memory Management`_, too.
+This callback is optional too. The mmap calls this callback to get the
+page fault address.
+
+Since the recent changes, you need no special callback any longer for
+the standard SG-buffer or vmalloc-buffer. Hence this callback should
+be rarely used.
mmap calllback
~~~~~~~~~~~~~~
@@ -3512,7 +3566,7 @@ bus).
::
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pci), size, max);
+ &pci->dev, size, max);
where ``size`` is the byte size to be pre-allocated and the ``max`` is
the maximum size to be changed via the ``prealloc`` proc file. The
@@ -3523,12 +3577,14 @@ The second argument (type) and the third argument (device pointer) are
dependent on the bus. For normal devices, pass the device pointer
(typically identical as ``card->dev``) to the third argument with
``SNDRV_DMA_TYPE_DEV`` type. For the continuous buffer unrelated to the
-bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type and the
-``snd_dma_continuous_data(GFP_KERNEL)`` device pointer, where
-``GFP_KERNEL`` is the kernel allocation flag to use. For the
-scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the device
-pointer (see the `Non-Contiguous Buffers`_
-section).
+bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type.
+You can pass NULL to the device pointer in that case, which is the
+default mode implying to allocate with ``GFP_KRENEL`` flag.
+If you need a different GFP flag, you can pass it by encoding the flag
+into the device pointer via a special macro
+:c:func:`snd_dma_continuous_data()`.
+For the scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the
+device pointer (see the `Non-Contiguous Buffers`_ section).
Once the buffer is pre-allocated, you can use the allocator in the
``hw_params`` callback:
@@ -3539,6 +3595,25 @@ Once the buffer is pre-allocated, you can use the allocator in the
Note that you have to pre-allocate to use this function.
+Most of drivers use, though, rather the newly introduced "managed
+buffer allocation mode" instead of the manual allocation or release.
+This is done by calling :c:func:`snd_pcm_set_managed_buffer_all()`
+instead of :c:func:`snd_pcm_lib_preallocate_pages_for_all()`.
+
+::
+
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &pci->dev, size, max);
+
+where passed arguments are identical in both functions.
+The difference in the managed mode is that PCM core will call
+:c:func:`snd_pcm_lib_malloc_pages()` internally already before calling
+the PCM ``hw_params`` callback, and call :c:func:`snd_pcm_lib_free_pages()`
+after the PCM ``hw_free`` callback automatically. So the driver
+doesn't have to call these functions explicitly in its callback any
+longer. This made many driver code having NULL ``hw_params`` and
+``hw_free`` entries.
+
External Hardware Buffers
-------------------------
@@ -3693,20 +3768,26 @@ provides an interface for handling SG-buffers. The API is provided in
``<sound/pcm.h>``.
For creating the SG-buffer handler, call
-:c:func:`snd_pcm_lib_preallocate_pages()` or
-:c:func:`snd_pcm_lib_preallocate_pages_for_all()` with
+:c:func:`snd_pcm_set_managed_buffer()` or
+:c:func:`snd_pcm_set_managed_buffer_all()` with
``SNDRV_DMA_TYPE_DEV_SG`` in the PCM constructor like other PCI
-pre-allocator. You need to pass ``snd_dma_pci_data(pci)``, where pci is
+pre-allocator. You need to pass ``&pci->dev``, where pci is
the :c:type:`struct pci_dev <pci_dev>` pointer of the chip as
-well. The ``struct snd_sg_buf`` instance is created as
-``substream->dma_private``. You can cast the pointer like:
+well.
+
+::
+
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ &pci->dev, size, max);
+
+The ``struct snd_sg_buf`` instance is created as
+``substream->dma_private`` in turn. You can cast the pointer like:
::
struct snd_sg_buf *sgbuf = (struct snd_sg_buf *)substream->dma_private;
-Then call :c:func:`snd_pcm_lib_malloc_pages()` in the ``hw_params``
-callback as well as in the case of normal PCI buffer. The SG-buffer
+Then in :c:func:`snd_pcm_lib_malloc_pages()` call, the common SG-buffer
handler will allocate the non-contiguous kernel pages of the given size
and map them onto the virtually contiguous memory. The virtual pointer
is addressed in runtime->dma_area. The physical address
@@ -3715,41 +3796,40 @@ physically non-contiguous. The physical address table is set up in
``sgbuf->table``. You can get the physical address at a certain offset
via :c:func:`snd_pcm_sgbuf_get_addr()`.
-When a SG-handler is used, you need to set
-:c:func:`snd_pcm_sgbuf_ops_page()` as the ``page`` callback. (See
-`page callback`_ section.)
-
-To release the data, call :c:func:`snd_pcm_lib_free_pages()` in
-the ``hw_free`` callback as usual.
+If you need to release the SG-buffer data explicitly, call the
+standard API function :c:func:`snd_pcm_lib_free_pages()` as usual.
Vmalloc'ed Buffers
------------------
It's possible to use a buffer allocated via :c:func:`vmalloc()`, for
-example, for an intermediate buffer. Since the allocated pages are not
-contiguous, you need to set the ``page`` callback to obtain the physical
-address at every offset.
+example, for an intermediate buffer. In the recent version of kernel,
+you can simply allocate it via standard
+:c:func:`snd_pcm_lib_malloc_pages()` and co after setting up the
+buffer preallocation with ``SNDRV_DMA_TYPE_VMALLOC`` type.
-The easiest way to achieve it would be to use
-:c:func:`snd_pcm_lib_alloc_vmalloc_buffer()` for allocating the buffer
-via :c:func:`vmalloc()`, and set :c:func:`snd_pcm_sgbuf_ops_page()` to
-the ``page`` callback. At release, you need to call
-:c:func:`snd_pcm_lib_free_vmalloc_buffer()`.
+::
-If you want to implementation the ``page`` manually, it would be like
-this:
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
-::
+The NULL is passed to the device pointer argument, which indicates
+that the default pages (GFP_KERNEL and GFP_HIGHMEM) will be
+allocated.
- #include <linux/vmalloc.h>
+Also, note that zero is passed to both the size and the max size
+arguments here. Since each vmalloc call should succeed at any time,
+we don't need to pre-allocate the buffers like other continuous
+pages.
- /* get the physical page pointer on the given offset */
- static struct page *mychip_page(struct snd_pcm_substream *substream,
- unsigned long offset)
- {
- void *pageptr = substream->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
- }
+If you need the 32bit DMA allocation, pass the device pointer encoded
+by :c:func:`snd_dma_continuous_data()` with ``GFP_KERNEL|__GFP_DMA32``
+argument.
+
+::
+
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ snd_dma_continuous_data(GFP_KERNEL | __GFP_DMA32), 0, 0);
Proc Interface
==============
diff --git a/Documentation/trace/intel_th.rst b/Documentation/trace/intel_th.rst
index baa12eb09ef4..70b7126eaeeb 100644
--- a/Documentation/trace/intel_th.rst
+++ b/Documentation/trace/intel_th.rst
@@ -44,7 +44,8 @@ Documentation/trace/stm.rst for more information on that.
MSU can be configured to collect trace data into a system memory
buffer, which can later on be read from its device nodes via read() or
-mmap() interface.
+mmap() interface and directed to a "software sink" driver that will
+consume the data and/or relay it further.
On the whole, Intel(R) Trace Hub does not require any special
userspace software to function; everything can be configured, started
@@ -122,3 +123,28 @@ In order to enable the host mode, set the 'host_mode' parameter of the
will show up on the intel_th bus. Also, trace configuration and
capture controlling attribute groups of the 'gth' device will not be
exposed. The 'sth' device will operate as usual.
+
+Software Sinks
+--------------
+
+The Memory Storage Unit (MSU) driver provides an in-kernel API for
+drivers to register themselves as software sinks for the trace data.
+Such drivers can further export the data via other devices, such as
+USB device controllers or network cards.
+
+The API has two main parts::
+ - notifying the software sink that a particular window is full, and
+ "locking" that window, that is, making it unavailable for the trace
+ collection; when this happens, the MSU driver will automatically
+ switch to the next window in the buffer if it is unlocked, or stop
+ the trace capture if it's not;
+ - tracking the "locked" state of windows and providing a way for the
+ software sink driver to notify the MSU driver when a window is
+ unlocked and can be used again to collect trace data.
+
+An example sink driver, msu-sink illustrates the implementation of a
+software sink. Functionally, it simply unlocks windows as soon as they
+are full, keeping the MSU running in a circular buffer mode. Unlike the
+"multi" mode, it will fill out all the windows in the buffer as opposed
+to just the first one. It can be enabled by writing "sink" to the "mode"
+file (assuming msu-sink.ko is loaded).
diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt
index 4833904d32a5..49183add44e7 100644
--- a/Documentation/virt/kvm/api.txt
+++ b/Documentation/virt/kvm/api.txt
@@ -1002,12 +1002,18 @@ Specifying exception.has_esr on a system that does not support it will return
-EINVAL. Setting anything other than the lower 24bits of exception.serror_esr
will return -EINVAL.
+It is not possible to read back a pending external abort (injected via
+KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always delivered
+directly to the virtual CPU).
+
+
struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
+ __u8 ext_dabt_pending;
/* Align it to 8 bytes */
- __u8 pad[6];
+ __u8 pad[5];
__u64 serror_esr;
} exception;
__u32 reserved[12];
@@ -1051,9 +1057,23 @@ contain a valid state and shall be written into the VCPU.
ARM/ARM64:
+User space may need to inject several types of events to the guest.
+
Set the pending SError exception state for this VCPU. It is not possible to
'cancel' an Serror that has been made pending.
+If the guest performed an access to I/O memory which could not be handled by
+userspace, for example because of missing instruction syndrome decode
+information or because there is no device mapped at the accessed IPA, then
+userspace can ask the kernel to inject an external abort using the address
+from the exiting fault on the VCPU. It is a programming error to set
+ext_dabt_pending after an exit which was not either KVM_EXIT_MMIO or
+KVM_EXIT_ARM_NISV. This feature is only available if the system supports
+KVM_CAP_ARM_INJECT_EXT_DABT. This is a helper which provides commonality in
+how userspace reports accesses for the above cases to guests, across different
+userspace implementations. Nevertheless, userspace can still emulate all Arm
+exceptions by manipulating individual registers using the KVM_SET_ONE_REG API.
+
See KVM_GET_VCPU_EVENTS for the data structure.
@@ -2982,6 +3002,9 @@ can be determined by querying the KVM_CAP_GUEST_DEBUG_HW_BPS and
KVM_CAP_GUEST_DEBUG_HW_WPS capabilities which return a positive number
indicating the number of supported registers.
+For ppc, the KVM_CAP_PPC_GUEST_DEBUG_SSTEP capability indicates whether
+the single-step debug event (KVM_GUESTDBG_SINGLESTEP) is supported.
+
When debug events exit the main run loop with the reason
KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run
structure containing architecture specific debug information.
@@ -4468,6 +4491,39 @@ Hyper-V SynIC state change. Notification is used to remap SynIC
event/message pages and to enable/disable SynIC messages/events processing
in userspace.
+ /* KVM_EXIT_ARM_NISV */
+ struct {
+ __u64 esr_iss;
+ __u64 fault_ipa;
+ } arm_nisv;
+
+Used on arm and arm64 systems. If a guest accesses memory not in a memslot,
+KVM will typically return to userspace and ask it to do MMIO emulation on its
+behalf. However, for certain classes of instructions, no instruction decode
+(direction, length of memory access) is provided, and fetching and decoding
+the instruction from the VM is overly complicated to live in the kernel.
+
+Historically, when this situation occurred, KVM would print a warning and kill
+the VM. KVM assumed that if the guest accessed non-memslot memory, it was
+trying to do I/O, which just couldn't be emulated, and the warning message was
+phrased accordingly. However, what happened more often was that a guest bug
+caused access outside the guest memory areas which should lead to a more
+meaningful warning message and an external abort in the guest, if the access
+did not fall within an I/O window.
+
+Userspace implementations can query for KVM_CAP_ARM_NISV_TO_USER, and enable
+this capability at VM creation. Once this is done, these types of errors will
+instead return to userspace with KVM_EXIT_ARM_NISV, with the valid bits from
+the HSR (arm) and ESR_EL2 (arm64) in the esr_iss field, and the faulting IPA
+in the fault_ipa field. Userspace can either fix up the access if it's
+actually an I/O access by decoding the instruction from guest memory (if it's
+very brave) and continue executing the guest, or it can decide to suspend,
+dump, or restart the guest.
+
+Note that KVM does not skip the faulting instruction as it does for
+KVM_EXIT_MMIO, but userspace has to emulate any change to the processing state
+if it decides to decode and emulate the instruction.
+
/* Fix the size of the union. */
char padding[256];
};
diff --git a/Documentation/virt/kvm/arm/pvtime.rst b/Documentation/virt/kvm/arm/pvtime.rst
new file mode 100644
index 000000000000..2357dd2d8655
--- /dev/null
+++ b/Documentation/virt/kvm/arm/pvtime.rst
@@ -0,0 +1,80 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Paravirtualized time support for arm64
+======================================
+
+Arm specification DEN0057/A defines a standard for paravirtualised time
+support for AArch64 guests:
+
+https://developer.arm.com/docs/den0057/a
+
+KVM/arm64 implements the stolen time part of this specification by providing
+some hypervisor service calls to support a paravirtualized guest obtaining a
+view of the amount of time stolen from its execution.
+
+Two new SMCCC compatible hypercalls are defined:
+
+* PV_TIME_FEATURES: 0xC5000020
+* PV_TIME_ST: 0xC5000021
+
+These are only available in the SMC64/HVC64 calling convention as
+paravirtualized time is not available to 32 bit Arm guests. The existence of
+the PV_FEATURES hypercall should be probed using the SMCCC 1.1 ARCH_FEATURES
+mechanism before calling it.
+
+PV_TIME_FEATURES
+ ============= ======== ==========
+ Function ID: (uint32) 0xC5000020
+ PV_call_id: (uint32) The function to query for support.
+ Currently only PV_TIME_ST is supported.
+ Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant
+ PV-time feature is supported by the hypervisor.
+ ============= ======== ==========
+
+PV_TIME_ST
+ ============= ======== ==========
+ Function ID: (uint32) 0xC5000021
+ Return value: (int64) IPA of the stolen time data structure for this
+ VCPU. On failure:
+ NOT_SUPPORTED (-1)
+ ============= ======== ==========
+
+The IPA returned by PV_TIME_ST should be mapped by the guest as normal memory
+with inner and outer write back caching attributes, in the inner shareable
+domain. A total of 16 bytes from the IPA returned are guaranteed to be
+meaningfully filled by the hypervisor (see structure below).
+
+PV_TIME_ST returns the structure for the calling VCPU.
+
+Stolen Time
+-----------
+
+The structure pointed to by the PV_TIME_ST hypercall is as follows:
+
++-------------+-------------+-------------+----------------------------+
+| Field | Byte Length | Byte Offset | Description |
++=============+=============+=============+============================+
+| Revision | 4 | 0 | Must be 0 for version 1.0 |
++-------------+-------------+-------------+----------------------------+
+| Attributes | 4 | 4 | Must be 0 |
++-------------+-------------+-------------+----------------------------+
+| Stolen time | 8 | 8 | Stolen time in unsigned |
+| | | | nanoseconds indicating how |
+| | | | much time this VCPU thread |
+| | | | was involuntarily not |
+| | | | running on a physical CPU. |
++-------------+-------------+-------------+----------------------------+
+
+All values in the structure are stored little-endian.
+
+The structure will be updated by the hypervisor prior to scheduling a VCPU. It
+will be present within a reserved region of the normal memory given to the
+guest. The guest should not attempt to write into this memory. There is a
+structure per VCPU of the guest.
+
+It is advisable that one or more 64k pages are set aside for the purpose of
+these structures and not used for other purposes, this enables the guest to map
+the region using 64k pages and avoids conflicting attributes with other memory.
+
+For the user space interface see Documentation/virt/kvm/devices/vcpu.txt
+section "3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL".
diff --git a/Documentation/virt/kvm/devices/vcpu.txt b/Documentation/virt/kvm/devices/vcpu.txt
index 2b5dab16c4f2..6f3bd64a05b0 100644
--- a/Documentation/virt/kvm/devices/vcpu.txt
+++ b/Documentation/virt/kvm/devices/vcpu.txt
@@ -60,3 +60,17 @@ time to use the number provided for a given timer, overwriting any previously
configured values on other VCPUs. Userspace should configure the interrupt
numbers on at least one VCPU after creating all VCPUs and before running any
VCPUs.
+
+3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL
+Architectures: ARM64
+
+3.1 ATTRIBUTE: KVM_ARM_VCPU_PVTIME_IPA
+Parameters: 64-bit base address
+Returns: -ENXIO: Stolen time not implemented
+ -EEXIST: Base address already set for this VCPU
+ -EINVAL: Base address not 64 byte aligned
+
+Specifies the base address of the stolen time structure for this VCPU. The
+base address must be 64 byte aligned and exist within a valid guest memory
+region. See Documentation/virt/kvm/arm/pvtime.txt for more information
+including the layout of the stolen time structure.
diff --git a/Documentation/virt/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.txt
index 42864935ac5d..423332dda7bc 100644
--- a/Documentation/virt/kvm/devices/xics.txt
+++ b/Documentation/virt/kvm/devices/xics.txt
@@ -3,9 +3,19 @@ XICS interrupt controller
Device type supported: KVM_DEV_TYPE_XICS
Groups:
- KVM_DEV_XICS_SOURCES
+ 1. KVM_DEV_XICS_GRP_SOURCES
Attributes: One per interrupt source, indexed by the source number.
+ 2. KVM_DEV_XICS_GRP_CTRL
+ Attributes:
+ 2.1 KVM_DEV_XICS_NR_SERVERS (write only)
+ The kvm_device_attr.addr points to a __u32 value which is the number of
+ interrupt server numbers (ie, highest possible vcpu id plus one).
+ Errors:
+ -EINVAL: Value greater than KVM_MAX_VCPU_ID.
+ -EFAULT: Invalid user pointer for attr->addr.
+ -EBUSY: A vcpu is already connected to the device.
+
This device emulates the XICS (eXternal Interrupt Controller
Specification) defined in PAPR. The XICS has a set of interrupt
sources, each identified by a 20-bit source number, and a set of
@@ -38,7 +48,7 @@ least-significant end of the word:
Each source has 64 bits of state that can be read and written using
the KVM_GET_DEVICE_ATTR and KVM_SET_DEVICE_ATTR ioctls, specifying the
-KVM_DEV_XICS_SOURCES attribute group, with the attribute number being
+KVM_DEV_XICS_GRP_SOURCES attribute group, with the attribute number being
the interrupt source number. The 64 bit state word has the following
bitfields, starting from the least-significant end of the word:
diff --git a/Documentation/virt/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.txt
index 9a24a4525253..f5d1d6b5af61 100644
--- a/Documentation/virt/kvm/devices/xive.txt
+++ b/Documentation/virt/kvm/devices/xive.txt
@@ -78,6 +78,14 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
migrating the VM.
Errors: none
+ 1.3 KVM_DEV_XIVE_NR_SERVERS (write only)
+ The kvm_device_attr.addr points to a __u32 value which is the number of
+ interrupt server numbers (ie, highest possible vcpu id plus one).
+ Errors:
+ -EINVAL: Value greater than KVM_MAX_VCPU_ID.
+ -EFAULT: Invalid user pointer for attr->addr.
+ -EBUSY: A vCPU is already connected to the device.
+
2. KVM_DEV_XIVE_GRP_SOURCE (write only)
Initializes a new source in the XIVE device and mask it.
Attributes:
diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst
index 08a2f100c0e6..90bb8f5ab384 100644
--- a/Documentation/x86/boot.rst
+++ b/Documentation/x86/boot.rst
@@ -68,8 +68,25 @@ Protocol 2.12 (Kernel 3.8) Added the xloadflags field and extension fields
Protocol 2.13 (Kernel 3.14) Support 32- and 64-bit flags being set in
xloadflags to support booting a 64-bit kernel from 32-bit
EFI
+
+Protocol 2.14: BURNT BY INCORRECT COMMIT ae7e1238e68f2a472a125673ab506d49158c1889
+ (x86/boot: Add ACPI RSDP address to setup_header)
+ DO NOT USE!!! ASSUME SAME AS 2.13.
+
+Protocol 2.15: (Kernel 5.5) Added the kernel_info and kernel_info.setup_type_max.
============= ============================================================
+.. note::
+ The protocol version number should be changed only if the setup header
+ is changed. There is no need to update the version number if boot_params
+ or kernel_info are changed. Additionally, it is recommended to use
+ xloadflags (in this case the protocol version number should not be
+ updated either) or kernel_info to communicate supported Linux kernel
+ features to the boot loader. Due to very limited space available in
+ the original setup header every update to it should be considered
+ with great care. Starting from the protocol 2.15 the primary way to
+ communicate things to the boot loader is the kernel_info.
+
Memory Layout
=============
@@ -207,6 +224,7 @@ Offset/Size Proto Name Meaning
0258/8 2.10+ pref_address Preferred loading address
0260/4 2.10+ init_size Linear memory required during initialization
0264/4 2.11+ handover_offset Offset of handover entry point
+0268/4 2.15+ kernel_info_offset Offset of the kernel_info
=========== ======== ===================== ============================================
.. note::
@@ -809,6 +827,47 @@ Protocol: 2.09+
sure to consider the case where the linked list already contains
entries.
+ The setup_data is a bit awkward to use for extremely large data objects,
+ both because the setup_data header has to be adjacent to the data object
+ and because it has a 32-bit length field. However, it is important that
+ intermediate stages of the boot process have a way to identify which
+ chunks of memory are occupied by kernel data.
+
+ Thus setup_indirect struct and SETUP_INDIRECT type were introduced in
+ protocol 2.15.
+
+ struct setup_indirect {
+ __u32 type;
+ __u32 reserved; /* Reserved, must be set to zero. */
+ __u64 len;
+ __u64 addr;
+ };
+
+ The type member is a SETUP_INDIRECT | SETUP_* type. However, it cannot be
+ SETUP_INDIRECT itself since making the setup_indirect a tree structure
+ could require a lot of stack space in something that needs to parse it
+ and stack space can be limited in boot contexts.
+
+ Let's give an example how to point to SETUP_E820_EXT data using setup_indirect.
+ In this case setup_data and setup_indirect will look like this:
+
+ struct setup_data {
+ __u64 next = 0 or <addr_of_next_setup_data_struct>;
+ __u32 type = SETUP_INDIRECT;
+ __u32 len = sizeof(setup_data);
+ __u8 data[sizeof(setup_indirect)] = struct setup_indirect {
+ __u32 type = SETUP_INDIRECT | SETUP_E820_EXT;
+ __u32 reserved = 0;
+ __u64 len = <len_of_SETUP_E820_EXT_data>;
+ __u64 addr = <addr_of_SETUP_E820_EXT_data>;
+ }
+ }
+
+.. note::
+ SETUP_INDIRECT | SETUP_NONE objects cannot be properly distinguished
+ from SETUP_INDIRECT itself. So, this kind of objects cannot be provided
+ by the bootloaders.
+
============ ============
Field name: pref_address
Type: read (reloc)
@@ -855,6 +914,121 @@ Offset/size: 0x264/4
See EFI HANDOVER PROTOCOL below for more details.
+============ ==================
+Field name: kernel_info_offset
+Type: read
+Offset/size: 0x268/4
+Protocol: 2.15+
+============ ==================
+
+ This field is the offset from the beginning of the kernel image to the
+ kernel_info. The kernel_info structure is embedded in the Linux image
+ in the uncompressed protected mode region.
+
+
+The kernel_info
+===============
+
+The relationships between the headers are analogous to the various data
+sections:
+
+ setup_header = .data
+ boot_params/setup_data = .bss
+
+What is missing from the above list? That's right:
+
+ kernel_info = .rodata
+
+We have been (ab)using .data for things that could go into .rodata or .bss for
+a long time, for lack of alternatives and -- especially early on -- inertia.
+Also, the BIOS stub is responsible for creating boot_params, so it isn't
+available to a BIOS-based loader (setup_data is, though).
+
+setup_header is permanently limited to 144 bytes due to the reach of the
+2-byte jump field, which doubles as a length field for the structure, combined
+with the size of the "hole" in struct boot_params that a protected-mode loader
+or the BIOS stub has to copy it into. It is currently 119 bytes long, which
+leaves us with 25 very precious bytes. This isn't something that can be fixed
+without revising the boot protocol entirely, breaking backwards compatibility.
+
+boot_params proper is limited to 4096 bytes, but can be arbitrarily extended
+by adding setup_data entries. It cannot be used to communicate properties of
+the kernel image, because it is .bss and has no image-provided content.
+
+kernel_info solves this by providing an extensible place for information about
+the kernel image. It is readonly, because the kernel cannot rely on a
+bootloader copying its contents anywhere, but that is OK; if it becomes
+necessary it can still contain data items that an enabled bootloader would be
+expected to copy into a setup_data chunk.
+
+All kernel_info data should be part of this structure. Fixed size data have to
+be put before kernel_info_var_len_data label. Variable size data have to be put
+after kernel_info_var_len_data label. Each chunk of variable size data has to
+be prefixed with header/magic and its size, e.g.:
+
+ kernel_info:
+ .ascii "LToP" /* Header, Linux top (structure). */
+ .long kernel_info_var_len_data - kernel_info
+ .long kernel_info_end - kernel_info
+ .long 0x01234567 /* Some fixed size data for the bootloaders. */
+ kernel_info_var_len_data:
+ example_struct: /* Some variable size data for the bootloaders. */
+ .ascii "0123" /* Header/Magic. */
+ .long example_struct_end - example_struct
+ .ascii "Struct"
+ .long 0x89012345
+ example_struct_end:
+ example_strings: /* Some variable size data for the bootloaders. */
+ .ascii "ABCD" /* Header/Magic. */
+ .long example_strings_end - example_strings
+ .asciz "String_0"
+ .asciz "String_1"
+ example_strings_end:
+ kernel_info_end:
+
+This way the kernel_info is self-contained blob.
+
+.. note::
+ Each variable size data header/magic can be any 4-character string,
+ without \0 at the end of the string, which does not collide with
+ existing variable length data headers/magics.
+
+
+Details of the kernel_info Fields
+=================================
+
+============ ========
+Field name: header
+Offset/size: 0x0000/4
+============ ========
+
+ Contains the magic number "LToP" (0x506f544c).
+
+============ ========
+Field name: size
+Offset/size: 0x0004/4
+============ ========
+
+ This field contains the size of the kernel_info including kernel_info.header.
+ It does not count kernel_info.kernel_info_var_len_data size. This field should be
+ used by the bootloaders to detect supported fixed size fields in the kernel_info
+ and beginning of kernel_info.kernel_info_var_len_data.
+
+============ ========
+Field name: size_total
+Offset/size: 0x0008/4
+============ ========
+
+ This field contains the size of the kernel_info including kernel_info.header
+ and kernel_info.kernel_info_var_len_data.
+
+============ ==============
+Field name: setup_type_max
+Offset/size: 0x000c/4
+============ ==============
+
+ This field contains maximal allowed type for setup_data and setup_indirect structs.
+
The Image Checksum
==================
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
index af64c4bb4447..a8de2fbc1caa 100644
--- a/Documentation/x86/index.rst
+++ b/Documentation/x86/index.rst
@@ -27,6 +27,7 @@ x86-specific Documentation
mds
microcode
resctrl_ui
+ tsx_async_abort
usb-legacy-support
i386/index
x86_64/index
diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst
new file mode 100644
index 000000000000..583ddc185ba2
--- /dev/null
+++ b/Documentation/x86/tsx_async_abort.rst
@@ -0,0 +1,117 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TSX Async Abort (TAA) mitigation
+================================
+
+.. _tsx_async_abort:
+
+Overview
+--------
+
+TSX Async Abort (TAA) is a side channel attack on internal buffers in some
+Intel processors similar to Microachitectural Data Sampling (MDS). In this
+case certain loads may speculatively pass invalid data to dependent operations
+when an asynchronous abort condition is pending in a Transactional
+Synchronization Extensions (TSX) transaction. This includes loads with no
+fault or assist condition. Such loads may speculatively expose stale data from
+the same uarch data structures as in MDS, with same scope of exposure i.e.
+same-thread and cross-thread. This issue affects all current processors that
+support TSX.
+
+Mitigation strategy
+-------------------
+
+a) TSX disable - one of the mitigations is to disable TSX. A new MSR
+IA32_TSX_CTRL will be available in future and current processors after
+microcode update which can be used to disable TSX. In addition, it
+controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID.
+
+b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this
+vulnerability. More details on this approach can be found in
+:ref:`Documentation/admin-guide/hw-vuln/mds.rst <mds>`.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ============= ============================================================
+ off Mitigation is disabled. Either the CPU is not affected or
+ tsx_async_abort=off is supplied on the kernel command line.
+
+ tsx disabled Mitigation is enabled. TSX feature is disabled by default at
+ bootup on processors that support TSX control.
+
+ verw Mitigation is enabled. CPU is affected and MD_CLEAR is
+ advertised in CPUID.
+
+ ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not
+ advertised in CPUID. That is mainly for virtualization
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
+ ============= ============================================================
+
+If the CPU is affected and the "tsx_async_abort" kernel command line parameter is
+not provided then the kernel selects an appropriate mitigation depending on the
+status of RTM and MD_CLEAR CPUID bits.
+
+Below tables indicate the impact of tsx=on|off|auto cmdline options on state of
+TAA mitigation, VERW behavior and TSX feature for various combinations of
+MSR_IA32_ARCH_CAPABILITIES bits.
+
+1. "tsx=off"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Disabled Yes TSX disabled TSX disabled
+ 1 X 1 Disabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+2. "tsx=on"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Enabled Yes None Same as MDS
+ 1 X 1 Enabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+3. "tsx=auto"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Disabled Yes TSX disabled TSX disabled
+ 1 X 1 Enabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that
+indicates whether MSR_IA32_TSX_CTRL is supported.
+
+There are two control bits in IA32_TSX_CTRL MSR:
+
+ Bit 0: When set it disables the Restricted Transactional Memory (RTM)
+ sub-feature of TSX (will force all transactions to abort on the
+ XBEGIN instruction).
+
+ Bit 1: When set it disables the enumeration of the RTM and HLE feature
+ (i.e. it will make CPUID(EAX=7).EBX{bit4} and
+ CPUID(EAX=7).EBX{bit11} read as 0).
diff --git a/MAINTAINERS b/MAINTAINERS
index 99def8472f3f..b16b72ae6cd6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -643,7 +643,7 @@ F: drivers/net/ethernet/alacritech/*
FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com>
-M: Zhu Yanjun <yanjun.zhu@oracle.com>
+M: Zhu Yanjun <zyjzyj2000@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/nvidia/*
@@ -682,11 +682,11 @@ S: Maintained
F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
-ALLWINNER SECURITY SYSTEM
+ALLWINNER CRYPTO DRIVERS
M: Corentin Labbe <clabbe.montjoie@gmail.com>
L: linux-crypto@vger.kernel.org
S: Maintained
-F: drivers/crypto/sunxi-ss/
+F: drivers/crypto/allwinner/
ALLWINNER VPU DRIVER
M: Maxime Ripard <mripard@kernel.org>
@@ -1010,6 +1010,7 @@ F: drivers/media/i2c/adv7842*
ANALOG DEVICES INC ASOC CODEC DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
+M: Nuno Sá <nuno.sa@analog.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://wiki.analog.com/
W: http://ez.analog.com/community/linux-device-drivers
@@ -1190,14 +1191,21 @@ S: Maintained
F: drivers/media/i2c/aptina-pll.*
AQUANTIA ETHERNET DRIVER (atlantic)
-M: Igor Russkikh <igor.russkikh@aquantia.com>
+M: Igor Russkikh <irusskikh@marvell.com>
L: netdev@vger.kernel.org
S: Supported
-W: http://www.aquantia.com
+W: https://www.marvell.com/
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/aquantia/atlantic/
F: Documentation/networking/device_drivers/aquantia/atlantic.txt
+AQUANTIA ETHERNET DRIVER PTP SUBSYSTEM
+M: Egor Pomozov <epomozov@marvell.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.aquantia.com
+F: drivers/net/ethernet/aquantia/atlantic/aq_ptp*
+
ARC FRAMEBUFFER DRIVER
M: Jaya Kumar <jayalk@intworks.biz>
S: Maintained
@@ -1399,6 +1407,7 @@ F: drivers/clk/actions/
F: drivers/clocksource/timer-owl*
F: drivers/dma/owl-dma.c
F: drivers/i2c/busses/i2c-owl.c
+F: drivers/mmc/host/owl-mmc.c
F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
@@ -1407,6 +1416,7 @@ F: Documentation/devicetree/bindings/arm/actions.yaml
F: Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
F: Documentation/devicetree/bindings/dma/owl-dma.txt
F: Documentation/devicetree/bindings/i2c/i2c-owl.txt
+F: Documentation/devicetree/bindings/mmc/owl-mmc.yaml
F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
F: Documentation/devicetree/bindings/power/actions,owl-sps.txt
F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -1478,6 +1488,14 @@ F: drivers/soc/amlogic/
F: drivers/rtc/rtc-meson*
N: meson
+ARM/Amlogic Meson SoC Crypto Drivers
+M: Corentin Labbe <clabbe@baylibre.com>
+L: linux-crypto@vger.kernel.org
+L: linux-amlogic@lists.infradead.org
+S: Maintained
+F: drivers/crypto/amlogic/
+F: Documentation/devicetree/bindings/crypto/amlogic*
+
ARM/Amlogic Meson SoC Sound Drivers
M: Jerome Brunet <jbrunet@baylibre.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -2620,6 +2638,7 @@ S: Maintained
F: arch/arm64/
X: arch/arm64/boot/dts/
F: Documentation/arm64/
+F: tools/testing/selftests/arm64/
AS3645A LED FLASH CONTROLLER DRIVER
M: Sakari Ailus <sakari.ailus@iki.fi>
@@ -3270,7 +3289,6 @@ S: Maintained
F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE
-M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
L: linux-mips@vger.kernel.org
@@ -3542,7 +3560,7 @@ BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS
M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
S: Maintained
F: drivers/devfreq/exynos-bus.c
F: Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -3605,6 +3623,13 @@ S: Maintained
F: Documentation/devicetree/bindings/media/cdns,*.txt
F: drivers/media/platform/cadence/cdns-csi2*
+CADENCE NAND DRIVER
+M: Piotr Sroka <piotrs@cadence.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/raw/cadence-nand-controller.c
+F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
+
CADET FM/AM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -3636,16 +3661,6 @@ L: cake@lists.bufferbloat.net (moderated for non-subscribers)
S: Maintained
F: net/sched/sch_cake.c
-CALGARY x86-64 IOMMU
-M: Muli Ben-Yehuda <mulix@mulix.org>
-M: Jon Mason <jdmason@kudzu.us>
-L: iommu@lists.linux-foundation.org
-S: Maintained
-F: arch/x86/kernel/pci-calgary_64.c
-F: arch/x86/kernel/tce_64.c
-F: arch/x86/include/asm/calgary.h
-F: arch/x86/include/asm/tce.h
-
CAN NETWORK DRIVERS
M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
@@ -4279,14 +4294,13 @@ F: include/linux/cpufreq.h
F: include/linux/sched/cpufreq.h
F: tools/testing/selftests/cpufreq/
-CPU FREQUENCY DRIVERS - ARM BIG LITTLE
+CPU FREQUENCY DRIVERS - VEXPRESS SPC ARM BIG LITTLE
M: Viresh Kumar <viresh.kumar@linaro.org>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-pm@vger.kernel.org
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
S: Maintained
-F: drivers/cpufreq/arm_big_little.h
-F: drivers/cpufreq/arm_big_little.c
+F: drivers/cpufreq/vexpress-spc-cpufreq.c
CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
@@ -4465,14 +4479,6 @@ W: http://www.chelsio.com
S: Supported
F: drivers/scsi/cxgbi/cxgb3i
-CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
-M: Potnuri Bharat Teja <bharat@chelsio.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.openfabrics.org
-S: Supported
-F: drivers/infiniband/hw/cxgb3/
-F: include/uapi/rdma/cxgb3-abi.h
-
CXGB4 CRYPTO DRIVER (chcr)
M: Atul Gupta <atul.gupta@chelsio.com>
L: linux-crypto@vger.kernel.org
@@ -4647,6 +4653,14 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
F: drivers/net/fddi/defxx.*
+DEINTERLACE DRIVERS FOR ALLWINNER H3
+M: Jernej Skrabec <jernej.skrabec@siol.net>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/platform/sunxi/sun8i-di/
+F: Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
+
DELL SMBIOS DRIVER
M: Pali Rohár <pali.rohar@gmail.com>
M: Mario Limonciello <mario.limonciello@dell.com>
@@ -4771,9 +4785,9 @@ F: include/linux/devcoredump.h
DEVICE FREQUENCY (DEVFREQ)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
-R: Chanwoo Choi <cw00.choi@samsung.com>
+M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
S: Maintained
F: drivers/devfreq/
F: include/linux/devfreq.h
@@ -4783,10 +4797,11 @@ F: include/trace/events/devfreq.h
DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
S: Supported
F: drivers/devfreq/event/
F: drivers/devfreq/devfreq-event.c
+F: include/dt-bindings/pmu/exynos_ppmu.h
F: include/linux/devfreq-event.h
F: Documentation/devicetree/bindings/devfreq/event/
@@ -5056,10 +5071,14 @@ M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
+F: drivers/net/ethernet/freescale/dpaa2/dpaa2-mac*
F: drivers/net/ethernet/freescale/dpaa2/dpni*
+F: drivers/net/ethernet/freescale/dpaa2/dpmac*
F: drivers/net/ethernet/freescale/dpaa2/dpkg.h
F: drivers/net/ethernet/freescale/dpaa2/Makefile
F: drivers/net/ethernet/freescale/dpaa2/Kconfig
+F: Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst
+F: Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
DPAA2 ETHERNET SWITCH DRIVER
M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
@@ -6012,14 +6031,14 @@ F: sound/usb/misc/ua101.c
EFI TEST DRIVER
L: linux-efi@vger.kernel.org
M: Ivan Hu <ivan.hu@canonical.com>
-M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+M: Ard Biesheuvel <ardb@kernel.org>
S: Maintained
F: drivers/firmware/efi/test/
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
M: Jeremy Kerr <jk@ozlabs.org>
-M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+M: Ard Biesheuvel <ardb@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
L: linux-efi@vger.kernel.org
S: Maintained
@@ -6153,10 +6172,12 @@ S: Maintained
F: Documentation/ABI/testing/sysfs-class-net-phydev
F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
F: Documentation/devicetree/bindings/net/mdio*
+F: Documentation/devicetree/bindings/net/qca,ar803x.yaml
F: Documentation/networking/phy.rst
F: drivers/net/phy/
F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
+F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
F: include/linux/of_net.h
F: include/linux/phy.h
@@ -6199,7 +6220,7 @@ S: Supported
F: security/integrity/evm/
EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+M: Ard Biesheuvel <ardb@kernel.org>
L: linux-efi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
S: Maintained
@@ -7375,6 +7396,25 @@ F: include/uapi/linux/if_hippi.h
F: net/802/hippi.c
F: drivers/net/hippi/
+HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
+M: Zaibo Xu <xuzaibo@huawei.com>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/hisilicon/sec2/sec_crypto.c
+F: drivers/crypto/hisilicon/sec2/sec_main.c
+F: drivers/crypto/hisilicon/sec2/sec_crypto.h
+F: drivers/crypto/hisilicon/sec2/sec.h
+F: Documentation/ABI/testing/debugfs-hisi-sec
+
+HISILICON HIGH PERFORMANCE RSA ENGINE DRIVER (HPRE)
+M: Zaibo Xu <xuzaibo@huawei.com>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/hisilicon/hpre/hpre_crypto.c
+F: drivers/crypto/hisilicon/hpre/hpre_main.c
+F: drivers/crypto/hisilicon/hpre/hpre.h
+F: Documentation/ABI/testing/debugfs-hisi-hpre
+
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
M: Yisen Zhuang <yisen.zhuang@huawei.com>
M: Salil Mehta <salil.mehta@huawei.com>
@@ -7383,6 +7423,11 @@ W: http://www.hisilicon.com
S: Maintained
F: drivers/net/ethernet/hisilicon/hns3/
+HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
+M: Zaibo Xu <xuzaibo@huawei.com>
+S: Maintained
+F: drivers/char/hw_random/hisi-trng-v2.c
+
HISILICON LPC BUS DRIVER
M: john.garry@huawei.com
W: http://www.hisilicon.com
@@ -7428,7 +7473,6 @@ S: Maintained
F: drivers/crypto/hisilicon/qm.c
F: drivers/crypto/hisilicon/qm.h
F: drivers/crypto/hisilicon/sgl.c
-F: drivers/crypto/hisilicon/sgl.h
F: drivers/crypto/hisilicon/zip/
F: Documentation/ABI/testing/debugfs-hisi-zip
@@ -7454,8 +7498,8 @@ F: drivers/platform/x86/tc1100-wmi.c
HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
M: Jaroslav Kysela <perex@perex.cz>
-S: Maintained
-F: drivers/net/ethernet/hp/hp100.*
+S: Obsolete
+F: drivers/staging/hp/hp100.*
HPET: High Precision Event Timers driver
M: Clemens Ladisch <clemens@ladisch.de>
@@ -7556,6 +7600,13 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/hygon.c
+HYNIX HI556 SENSOR DRIVER
+M: Shawn Tu <shawnx.tu@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/hi556.c
+
Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
@@ -7740,7 +7791,7 @@ F: drivers/i2c/i2c-stub.c
I3C SUBSYSTEM
M: Boris Brezillon <bbrezillon@kernel.org>
-L: linux-i3c@lists.infradead.org
+L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
C: irc://chat.freenode.net/linux-i3c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git
S: Maintained
@@ -7756,6 +7807,12 @@ S: Maintained
F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
F: drivers/i3c/master/dw*
+I3C DRIVER FOR CADENCE I3C MASTER IP
+M: Przemysław Gaj <pgaj@cadence.com>
+S: Maintained
+F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+F: drivers/i3c/master/i3c-master-cdns.c
+
IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
@@ -8309,11 +8366,14 @@ F: drivers/hid/intel-ish-hid/
INTEL IOMMU (VT-d)
M: David Woodhouse <dwmw2@infradead.org>
+M: Lu Baolu <baolu.lu@linux.intel.com>
L: iommu@lists.linux-foundation.org
-T: git git://git.infradead.org/iommu-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Supported
-F: drivers/iommu/intel-iommu.c
+F: drivers/iommu/dmar.c
+F: drivers/iommu/intel*.[ch]
F: include/linux/intel-iommu.h
+F: include/linux/intel-svm.h
INTEL IOP-ADMA DMA DRIVER
R: Dan Williams <dan.j.williams@intel.com>
@@ -8337,6 +8397,7 @@ S: Maintained
F: drivers/staging/media/ipu3/
F: Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
F: Documentation/media/v4l-drivers/ipu3.rst
+F: Documentation/media/v4l-drivers/ipu3_rcb.svg
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
M: Krzysztof Halasa <khalasa@piap.pl>
@@ -8572,12 +8633,13 @@ F: include/linux/iova.h
IO_URING
M: Jens Axboe <axboe@kernel.dk>
-L: linux-block@vger.kernel.org
-L: linux-fsdevel@vger.kernel.org
+L: io-uring@vger.kernel.org
T: git git://git.kernel.dk/linux-block
T: git git://git.kernel.dk/liburing
S: Maintained
F: fs/io_uring.c
+F: fs/io-wq.c
+F: fs/io-wq.h
F: include/uapi/linux/io_uring.h
IPMI SUBSYSTEM
@@ -8928,6 +8990,17 @@ S: Maintained
F: tools/testing/selftests/
F: Documentation/dev-tools/kselftest*
+KERNEL UNIT TESTING FRAMEWORK (KUnit)
+M: Brendan Higgins <brendanhiggins@google.com>
+L: linux-kselftest@vger.kernel.org
+L: kunit-dev@googlegroups.com
+W: https://google.github.io/kunit-docs/third_party/kernel/docs/
+S: Maintained
+F: Documentation/dev-tools/kunit/
+F: include/kunit/
+F: lib/kunit/
+F: tools/testing/kunit/
+
KERNEL USERMODE HELPER
M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -9505,6 +9578,13 @@ F: Documentation/misc-devices/lis3lv02d.rst
F: drivers/misc/lis3lv02d/
F: drivers/platform/x86/hp_accel.c
+LIST KUNIT TEST
+M: David Gow <davidgow@google.com>
+L: linux-kselftest@vger.kernel.org
+L: kunit-dev@googlegroups.com
+S: Maintained
+F: lib/list-test.c
+
LIVE PATCHING
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Jiri Kosina <jikos@kernel.org>
@@ -9648,6 +9728,17 @@ S: Maintained
F: Documentation/hwmon/ltc4261.rst
F: drivers/hwmon/ltc4261.c
+LTC2947 HARDWARE MONITOR DRIVER
+M: Nuno Sá <nuno.sa@analog.com>
+W: http://ez.analog.com/community/linux-device-drivers
+L: linux-hwmon@vger.kernel.org
+S: Supported
+F: drivers/hwmon/ltc2947-core.c
+F: drivers/hwmon/ltc2947-spi.c
+F: drivers/hwmon/ltc2947-i2c.c
+F: drivers/hwmon/ltc2947.h
+F: Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
+
LTC4306 I2C MULTIPLEXER DRIVER
M: Michael Hennerich <michael.hennerich@analog.com>
W: http://ez.analog.com/community/linux-device-drivers
@@ -9756,6 +9847,7 @@ S: Maintained
F: drivers/net/dsa/mv88e6xxx/
F: include/linux/platform_data/mv88e6xxx.h
F: Documentation/devicetree/bindings/net/dsa/marvell.txt
+F: Documentation/networking/devlink-params-mv88e6xxx.txt
MARVELL ARMADA DRM SUPPORT
M: Russell King <linux@armlinux.org.uk>
@@ -9880,7 +9972,7 @@ F: Documentation/hwmon/max16065.rst
F: drivers/hwmon/max16065.c
MAX2175 SDR TUNER DRIVER
-M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
+M: Ramesh Shanmugasundaram <rashanmu@gmail.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
@@ -10142,7 +10234,7 @@ F: drivers/media/platform/renesas-ceu.c
F: include/media/drv-intf/renesas-ceu.h
MEDIA DRIVERS FOR RENESAS - DRIF
-M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
+M: Ramesh Shanmugasundaram <rashanmu@gmail.com>
L: linux-media@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -10552,15 +10644,13 @@ F: include/linux/vmalloc.h
F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
-M: David Woodhouse <dwmw2@infradead.org>
-M: Brian Norris <computersforpeace@gmail.com>
-M: Marek Vasut <marek.vasut@gmail.com>
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
M: Vignesh Raghavendra <vigneshr@ti.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+C: irc://irc.oftc.net/mtd
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git mtd/fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git mtd/next
S: Maintained
@@ -10605,7 +10695,7 @@ W: http://linux-meson.com/
S: Supported
F: drivers/media/platform/meson/ao-cec.c
F: drivers/media/platform/meson/ao-cec-g12a.c
-F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
+F: Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
T: git git://linuxtv.org/media_tree.git
MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
@@ -10837,6 +10927,7 @@ M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/mscc/
+F: include/soc/mscc/ocelot*
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
M: Chen Yu <yu.c.chen@intel.com>
@@ -10891,18 +10982,18 @@ F: arch/mips/include/asm/mach-loongson32/
F: drivers/*/*loongson1*
F: drivers/*/*/*loongson1*
-MIPS/LOONGSON2 ARCHITECTURE
+MIPS/LOONGSON2EF ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
L: linux-mips@vger.kernel.org
S: Maintained
-F: arch/mips/loongson64/fuloong-2e/
-F: arch/mips/loongson64/lemote-2f/
-F: arch/mips/include/asm/mach-loongson64/
+F: arch/mips/loongson2ef/
+F: arch/mips/include/asm/mach-loongson2ef/
F: drivers/*/*loongson2*
F: drivers/*/*/*loongson2*
-MIPS/LOONGSON3 ARCHITECTURE
+MIPS/LOONGSON64 ARCHITECTURE
M: Huacai Chen <chenhc@lemote.com>
+M: Jiaxun Yang <jiaxun.yang@flygoat.com>
L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/loongson64/
@@ -11653,6 +11744,7 @@ F: drivers/nvme/target/fcloop.c
NVM EXPRESS TARGET DRIVER
M: Christoph Hellwig <hch@lst.de>
M: Sagi Grimberg <sagi@grimberg.me>
+M: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
L: linux-nvme@lists.infradead.org
T: git://git.infradead.org/nvme.git
W: http://git.infradead.org/nvme.git
@@ -12794,6 +12886,13 @@ F: arch/*/events/*
F: arch/*/events/*/*
F: tools/perf/
+PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS
+R: John Garry <john.garry@huawei.com>
+R: Will Deacon <will@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: tools/perf/pmu-events/arch/arm64/
+
PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>
L: linux-abi-devel@lists.sourceforge.net
@@ -12851,6 +12950,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
F: samples/pidfd/
F: tools/testing/selftests/pidfd/
+F: tools/testing/selftests/clone3/
K: (?i)pidfd
K: (?i)clone3
K: \b(clone_args|kernel_clone_args)\b
@@ -12886,7 +12986,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M: Andy Shevchenko <andy@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained
F: drivers/pinctrl/intel/
@@ -13016,6 +13116,15 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/pm8001/
+PM-GRAPH UTILITY
+M: "Todd E Brandt" <todd.e.brandt@linux.intel.com>
+L: linux-pm@vger.kernel.org
+W: https://01.org/pm-graph
+B: https://bugzilla.kernel.org/buglist.cgi?component=pm-graph&product=Tools
+T: git git://github.com/intel/pm-graph
+S: Supported
+F: tools/power/pm-graph
+
PNP SUPPORT
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
S: Maintained
@@ -13148,12 +13257,14 @@ F: Documentation/filesystems/proc.txt
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
M: Kees Cook <keescook@chromium.org>
+M: Iurii Zaikin <yzaikin@google.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/proc/proc_sysctl.c
F: include/linux/sysctl.h
F: kernel/sysctl.c
+F: kernel/sysctl-test.c
F: tools/testing/selftests/sysctl/
PS3 NETWORK SUPPORT
@@ -13837,7 +13948,7 @@ R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
F: Documentation/devicetree/bindings/net/renesas,*.txt
-F: Documentation/devicetree/bindings/net/sh_eth.txt
+F: Documentation/devicetree/bindings/net/renesas,*.yaml
F: drivers/net/ethernet/renesas/
F: include/linux/sh_eth.h
@@ -15039,7 +15150,7 @@ F: include/media/soc_camera.h
F: drivers/staging/media/soc_camera/
SOCIONEXT SYNQUACER I2C DRIVER
-M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+M: Ard Biesheuvel <ardb@kernel.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-synquacer.c
@@ -15174,6 +15285,14 @@ S: Maintained
F: drivers/media/i2c/imx274.c
F: Documentation/devicetree/bindings/media/i2c/imx274.txt
+SONY IMX290 SENSOR DRIVER
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx290.c
+F: Documentation/devicetree/bindings/media/i2c/imx290.txt
+
SONY IMX319 SENSOR DRIVER
M: Bingbu Cao <bingbu.cao@intel.com>
L: linux-media@vger.kernel.org
@@ -15321,7 +15440,6 @@ F: arch/arm/boot/dts/spear*
F: arch/arm/mach-spear/
SPI NOR SUBSYSTEM
-M: Marek Vasut <marek.vasut@gmail.com>
M: Tudor Ambarus <tudor.ambarus@microchip.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
@@ -16351,6 +16469,7 @@ W: http://linuxtv.org/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/platform/ti-vpe/
+F: Documentation/devicetree/bindings/media/ti,vpe.yaml
TI WILINK WIRELESS DRIVERS
L: linux-wireless@vger.kernel.org
@@ -16421,6 +16540,13 @@ S: Maintained
F: Documentation/hwmon/tmp401.rst
F: drivers/hwmon/tmp401.c
+TMP513 HARDWARE MONITOR DRIVER
+M: Eric Tremblay <etremblay@distech-controls.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/tmp513.rst
+F: drivers/hwmon/tmp513.c
+
TMPFS (SHMEM FILESYSTEM)
M: Hugh Dickins <hughd@google.com>
L: linux-mm@kvack.org
@@ -16632,10 +16758,9 @@ F: drivers/media/pci/tw686x/
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
-M: Artem Bityutskiy <dedekind1@gmail.com>
-M: Adrian Hunter <adrian.hunter@intel.com>
L: linux-mtd@lists.infradead.org
-T: git git://git.infradead.org/ubifs-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git fixes
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
S: Supported
F: Documentation/filesystems/ubifs.txt
@@ -16750,11 +16875,11 @@ S: Maintained
F: drivers/scsi/ufs/ufs-mediatek*
UNSORTED BLOCK IMAGES (UBI)
-M: Artem Bityutskiy <dedekind1@gmail.com>
M: Richard Weinberger <richard@nod.at>
W: http://www.linux-mtd.infradead.org/
L: linux-mtd@lists.infradead.org
-T: git git://git.infradead.org/ubifs-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git fixes
S: Supported
F: drivers/mtd/ubi/
F: include/linux/mtd/ubi.h
@@ -17241,6 +17366,7 @@ F: include/media/videobuf2-*
VIMC VIRTUAL MEDIA CONTROLLER DRIVER
M: Helen Koike <helen.koike@collabora.com>
+R: Shuah Khan <skhan@linuxfoundation.org>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: https://linuxtv.org
@@ -17256,6 +17382,7 @@ F: virt/lib/
VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <stefanha@redhat.com>
+M: Stefano Garzarella <sgarzare@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
@@ -17381,18 +17508,20 @@ F: include/linux/vbox_utils.h
F: include/uapi/linux/vbox*.h
F: drivers/virt/vboxguest/
-VIRTUAL BOX SHARED FOLDER VFS DRIVER:
-M: Hans de Goede <hdegoede@redhat.com>
-L: linux-fsdevel@vger.kernel.org
-S: Maintained
-F: drivers/staging/vboxsf/*
-
VIRTUAL SERIO DEVICE DRIVER
M: Stephen Chandler Paul <thatslyude@gmail.com>
S: Maintained
F: drivers/input/serio/userio.c
F: include/uapi/linux/userio.h
+VITESSE FELIX ETHERNET SWITCH DRIVER
+M: Vladimir Oltean <vladimir.oltean@nxp.com>
+M: Claudiu Manoil <claudiu.manoil@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/dsa/ocelot/*
+F: net/dsa/tag_ocelot.c
+
VIVID VIRTUAL VIDEO DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -17493,6 +17622,18 @@ S: Maintained
F: drivers/net/vrf.c
F: Documentation/networking/vrf.txt
+VSPRINTF
+M: Petr Mladek <pmladek@suse.com>
+M: Steven Rostedt <rostedt@goodmis.org>
+M: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pmladek/printk.git
+S: Maintained
+F: lib/vsprintf.c
+F: lib/test_printf.c
+F: Documentation/core-api/printk-formats.rst
+
VT1211 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
L: linux-hwmon@vger.kernel.org
@@ -17507,10 +17648,8 @@ S: Maintained
F: drivers/hwmon/vt8231.c
VUB300 USB to SDIO/SD/MMC bridge chip
-M: Tony Olech <tony.olech@elandigitalsystems.com>
L: linux-mmc@vger.kernel.org
-L: linux-usb@vger.kernel.org
-S: Supported
+S: Orphan
F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS
diff --git a/Makefile b/Makefile
index 1d5298356ea8..d4d36c61940b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -917,6 +917,9 @@ ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif
+# make the checker run with the right architecture
+CHECKFLAGS += --arch=$(ARCH)
+
# insure the checker run with the right endianness
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
diff --git a/arch/Kconfig b/arch/Kconfig
index 5f8a5d84dbbe..8bcc1c746142 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -892,27 +892,6 @@ config STRICT_MODULE_RWX
config ARCH_HAS_PHYS_TO_DMA
bool
-config ARCH_HAS_REFCOUNT
- bool
- help
- An architecture selects this when it has implemented refcount_t
- using open coded assembly primitives that provide an optimized
- refcount_t implementation, possibly at the expense of some full
- refcount state checks of CONFIG_REFCOUNT_FULL=y.
-
- The refcount overflow check behavior, however, must be retained.
- Catching overflows is the primary security concern for protecting
- against bugs in reference counts.
-
-config REFCOUNT_FULL
- bool "Perform full reference count validation at the expense of speed"
- help
- Enabling this switches the refcounting infrastructure from a fast
- unchecked atomic_t implementation to a fully state checked
- implementation, which can be (slightly) slower but provides protections
- against various use-after-free conditions that can be used in
- security flaw exploits.
-
config HAVE_ARCH_COMPILER_H
bool
help
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 4341ccf5c0c4..e7a59d927d78 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -824,7 +824,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
- pr_warning("PMI: silly index %ld\n", la_ptr);
+ pr_warn("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
- pr_warning("PMI: No event at index %d!\n", idx);
+ pr_warn("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index c4b5ceceab52..bc6f727278fd 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
+#define EMITS_PT_NOTE
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
@@ -8,7 +12,7 @@
OUTPUT_FORMAT("elf64-alpha")
OUTPUT_ARCH(alpha)
ENTRY(__start)
-PHDRS { kernel PT_LOAD; note PT_NOTE; }
+PHDRS { text PT_LOAD; note PT_NOTE; }
jiffies = jiffies_64;
SECTIONS
{
@@ -27,17 +31,11 @@ SECTIONS
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
- } :kernel
+ } :text
swapper_pg_dir = SWAPPER_PGD;
_etext = .; /* End of text section */
- NOTES :kernel :note
- .dummy : {
- *(.dummy)
- } :kernel
-
- RODATA
- EXCEPTION_TABLE(16)
+ RO_DATA(4096)
/* Will be freed after init */
__init_begin = ALIGN(PAGE_SIZE);
@@ -52,7 +50,7 @@ SECTIONS
_sdata = .; /* Start of rw data section */
_data = .;
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.got : {
*(.got)
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 6c693a9d29b6..54139a6f469b 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -95,13 +95,13 @@ SECTIONS
_etext = .;
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
+ RO_DATA(PAGE_SIZE)
/*
* 1. this is .data essentially
* 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
*/
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
@@ -118,8 +118,6 @@ SECTIONS
/DISCARD/ : { *(.eh_frame) }
#endif
- NOTES
-
. = ALIGN(PAGE_SIZE);
_end = . ;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8a50efb559f3..0d3c5d7cceb7 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -117,7 +117,6 @@ config ARM
select OLD_SIGSUSPEND3
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC
- select REFCOUNT_FULL
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
# Above selects are sorted alphabetically; please add new ones
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index bf3002009b00..76f819f4ba48 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -16,6 +16,37 @@
can = &hecc;
};
+ cpus {
+ cpu: cpu@0 {
+ /* Based on OMAP3630 variants OPP50 and OPP100 */
+ operating-points-v2 = <&cpu0_opp_table>;
+
+ clock-latency = <300000>; /* From legacy driver */
+ };
+ };
+
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+ /*
+ * AM3517 TRM only lists 600MHz @ 1.2V, but omap36xx
+ * appear to operate at 300MHz as well. Since AM3517 only
+ * lists one operating voltage, it will remain fixed at 1.2V
+ */
+ opp50-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <1200000>;
+ opp-supported-hw = <0xffffffff 0xffffffff>;
+ opp-suspend;
+ };
+
+ opp100-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1200000>;
+ opp-supported-hw = <0xffffffff 0xffffffff>;
+ };
+ };
+
ocp@68000000 {
am35x_otg_hs: am35x_otg_hs@5c040000 {
compatible = "ti,omap3-musb";
diff --git a/arch/arm/boot/dts/am3517_mt_ventoux.dts b/arch/arm/boot/dts/am3517_mt_ventoux.dts
index e507e4ae0d88..e7d7124a34ba 100644
--- a/arch/arm/boot/dts/am3517_mt_ventoux.dts
+++ b/arch/arm/boot/dts/am3517_mt_ventoux.dts
@@ -8,7 +8,7 @@
/ {
model = "TeeJet Mt.Ventoux";
- compatible = "teejet,mt_ventoux", "ti,omap3";
+ compatible = "teejet,mt_ventoux", "ti,am3517", "ti,omap3";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 0aaacea1d887..820ce3b60bb6 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -186,3 +186,30 @@
pinctrl-1 = <&mmc2_pins_hs>;
pinctrl-2 = <&mmc2_pins_ddr_rev20 &mmc2_iodelay_ddr_conf>;
};
+
+&mac_sw {
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+};
+
+&cpsw_port1 {
+ phy-handle = <&ethphy0_sw>;
+ phy-mode = "rgmii";
+ ti,dual-emac-pvid = <1>;
+};
+
+&cpsw_port2 {
+ phy-handle = <&ethphy1_sw>;
+ phy-mode = "rgmii";
+ ti,dual-emac-pvid = <2>;
+};
+
+&davinci_mdio_sw {
+ ethphy0_sw: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1_sw: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index ea1c119feaa5..c3d966904d64 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -27,3 +27,8 @@
pinctrl-1 = <&mmc2_pins_hs>;
pinctrl-2 = <&mmc2_pins_ddr_rev20>;
};
+
+&mac {
+ status = "okay";
+ dual_emac;
+};
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index 7935d70874ce..fa0088025b2c 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -35,3 +35,8 @@
pinctrl-1 = <&mmc2_pins_default>;
pinctrl-2 = <&mmc2_pins_default>;
};
+
+&mac {
+ status = "okay";
+ dual_emac;
+};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 423855a2a2d6..398721c7201c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -363,11 +363,6 @@
ext-clk-src;
};
-&mac {
- status = "okay";
- dual_emac;
-};
-
&cpsw_emac0 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii";
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 5cac2dd58241..37e048771b0f 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -3079,6 +3079,58 @@
phys = <&phy_gmii_sel 2>;
};
};
+
+ mac_sw: switch@0 {
+ compatible = "ti,dra7-cpsw-switch","ti,cpsw-switch";
+ reg = <0x0 0x4000>;
+ ranges = <0 0 0x4000>;
+ clocks = <&gmac_main_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ syscon = <&scm_conf>;
+ status = "disabled";
+
+ interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx_thresh", "rx", "tx", "misc";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ label = "port2";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2>;
+ };
+ };
+
+ davinci_mdio_sw: mdio@1000 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ clocks = <&gmac_main_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ reg = <0x1000 0x100>;
+ };
+
+ cpts {
+ clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 25>;
+ clock-names = "cpts";
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
index f7a841a28865..2a0a98fe67f0 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
@@ -9,5 +9,5 @@
/ {
model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit";
- compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3";
+ compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
index 7675bc3fa868..57bae2aa910e 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
@@ -9,5 +9,5 @@
/ {
model = "LogicPD Zoom OMAP35xx Torpedo Development Kit";
- compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3";
+ compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3430", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index d1eae47b83f6..08bae935605c 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -43,11 +43,13 @@
compatible = "motorola,mapphone-cpcap-charger";
interrupts-extended = <
&cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0
- &cpcap 22 0 &cpcap 20 0 &cpcap 19 0 &cpcap 54 0
+ &cpcap 22 0 &cpcap 21 0 &cpcap 20 0 &cpcap 19 0
+ &cpcap 54 0
>;
interrupt-names =
"chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb";
+ "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
+ "battdetb";
mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW
&gpio3 23 GPIO_ACTIVE_LOW>;
io-channels = <&cpcap_adc 0 &cpcap_adc 1
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 1aa99fc1487a..125ed933ca75 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 BeagleBoard xM";
- compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
+ compatible = "ti,omap3-beagle-xm", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index e3df3c166902..4ed3f93f5841 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 BeagleBoard";
- compatible = "ti,omap3-beagle", "ti,omap3";
+ compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-cm-t3530.dts b/arch/arm/boot/dts/omap3-cm-t3530.dts
index 76e52c78cbb4..32dbaeaed147 100644
--- a/arch/arm/boot/dts/omap3-cm-t3530.dts
+++ b/arch/arm/boot/dts/omap3-cm-t3530.dts
@@ -9,7 +9,7 @@
/ {
model = "CompuLab CM-T3530";
- compatible = "compulab,omap3-cm-t3530", "ti,omap34xx", "ti,omap3";
+ compatible = "compulab,omap3-cm-t3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
/* Regulator to trigger the reset signal of the Wifi module */
mmc2_sdio_reset: regulator-mmc2-sdio-reset {
diff --git a/arch/arm/boot/dts/omap3-cm-t3730.dts b/arch/arm/boot/dts/omap3-cm-t3730.dts
index 6e944dfa0f3d..683819bf0915 100644
--- a/arch/arm/boot/dts/omap3-cm-t3730.dts
+++ b/arch/arm/boot/dts/omap3-cm-t3730.dts
@@ -9,7 +9,7 @@
/ {
model = "CompuLab CM-T3730";
- compatible = "compulab,omap3-cm-t3730", "ti,omap36xx", "ti,omap3";
+ compatible = "compulab,omap3-cm-t3730", "ti,omap3630", "ti,omap36xx", "ti,omap3";
wl12xx_vmmc2: wl12xx_vmmc2 {
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
index a80fc60bc773..afed85078ad8 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
@@ -11,7 +11,7 @@
#include "omap3-devkit8000-lcd-common.dtsi"
/ {
model = "TimLL OMAP3 Devkit8000 with 4.3'' LCD panel";
- compatible = "timll,omap3-devkit8000", "ti,omap3";
+ compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
lcd0: display {
panel-timing {
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
index 0753776071f8..07c51a105c0d 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
@@ -11,7 +11,7 @@
#include "omap3-devkit8000-lcd-common.dtsi"
/ {
model = "TimLL OMAP3 Devkit8000 with 7.0'' LCD panel";
- compatible = "timll,omap3-devkit8000", "ti,omap3";
+ compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
lcd0: display {
panel-timing {
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index faafc48d8f61..162d0726b008 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -7,7 +7,7 @@
#include "omap3-devkit8000-common.dtsi"
/ {
model = "TimLL OMAP3 Devkit8000";
- compatible = "timll,omap3-devkit8000", "ti,omap3";
+ compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
aliases {
display1 = &dvi0;
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index b6ef1a7ac8a4..409a758c99f1 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -11,7 +11,7 @@
/ {
model = "OMAP3 GTA04";
- compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
+ compatible = "ti,omap3-gta04", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-ha-lcd.dts b/arch/arm/boot/dts/omap3-ha-lcd.dts
index badb9b3c8897..c9ecbc45c8e2 100644
--- a/arch/arm/boot/dts/omap3-ha-lcd.dts
+++ b/arch/arm/boot/dts/omap3-ha-lcd.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 HEAD acoustics LCD-baseboard with TAO3530 SOM";
- compatible = "headacoustics,omap3-ha-lcd", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+ compatible = "headacoustics,omap3-ha-lcd", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-ha.dts b/arch/arm/boot/dts/omap3-ha.dts
index a5365252bfbe..35c4e15abeb7 100644
--- a/arch/arm/boot/dts/omap3-ha.dts
+++ b/arch/arm/boot/dts/omap3-ha.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 HEAD acoustics baseboard with TAO3530 SOM";
- compatible = "headacoustics,omap3-ha", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+ compatible = "headacoustics,omap3-ha", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
index 03dcd05fb8a0..d134ce1cffc0 100644
--- a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
+++ b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEPv2 Rev. F (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020-rev-f", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0020-rev-f", "ti,omap3630", "ti,omap36xx", "ti,omap3";
/* Regulator to trigger the WL_EN signal of the Wifi module */
lbep5clwmc_wlen: regulator-lbep5clwmc-wlen {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 6d0519e3dfd0..e341535a7162 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEPv2 Rev. C (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0020", "ti,omap3630", "ti,omap36xx", "ti,omap3";
vmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
index 060acd1e803a..9ca1d0f61964 100644
--- a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
+++ b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEP COM MODULE Rev. G (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030-rev-g", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0030-rev-g", "ti,omap3630", "ti,omap36xx", "ti,omap3";
/* Regulator to trigger the WL_EN signal of the Wifi module */
lbep5clwmc_wlen: regulator-lbep5clwmc-wlen {
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 25170bd3c573..32f31035daa2 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEP COM MODULE Rev. E (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0030", "ti,omap3630", "ti,omap36xx", "ti,omap3";
vmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/omap3-ldp.dts b/arch/arm/boot/dts/omap3-ldp.dts
index 9a5fde2d9bce..ec9ba04ef43b 100644
--- a/arch/arm/boot/dts/omap3-ldp.dts
+++ b/arch/arm/boot/dts/omap3-ldp.dts
@@ -10,7 +10,7 @@
/ {
model = "TI OMAP3430 LDP (Zoom1 Labrador)";
- compatible = "ti,omap3-ldp", "ti,omap3";
+ compatible = "ti,omap3-ldp", "ti,omap3430", "ti,omap3";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index c22833d4e568..73d477898ec2 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -7,7 +7,7 @@
/ {
model = "INCOstartec LILLY-A83X module (DM3730)";
- compatible = "incostartec,omap3-lilly-a83x", "ti,omap36xx", "ti,omap3";
+ compatible = "incostartec,omap3-lilly-a83x", "ti,omap3630", "ti,omap36xx", "ti,omap3";
chosen {
bootargs = "console=ttyO0,115200n8 vt.global_cursor_default=0 consoleblank=0";
diff --git a/arch/arm/boot/dts/omap3-lilly-dbb056.dts b/arch/arm/boot/dts/omap3-lilly-dbb056.dts
index fec335400074..ecb4ef738e07 100644
--- a/arch/arm/boot/dts/omap3-lilly-dbb056.dts
+++ b/arch/arm/boot/dts/omap3-lilly-dbb056.dts
@@ -8,7 +8,7 @@
/ {
model = "INCOstartec LILLY-DBB056 (DM3730)";
- compatible = "incostartec,omap3-lilly-dbb056", "incostartec,omap3-lilly-a83x", "ti,omap36xx", "ti,omap3";
+ compatible = "incostartec,omap3-lilly-dbb056", "incostartec,omap3-lilly-a83x", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&twl {
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 74c0ff2350d3..2495a696cec6 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -12,7 +12,7 @@
/ {
model = "Nokia N9";
- compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
+ compatible = "nokia,omap3-n9", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&i2c2 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 84a5ade1e865..63659880eeb3 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -155,6 +155,12 @@
pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
};
+ rom_rng: rng {
+ compatible = "nokia,n900-rom-rng";
+ clocks = <&rng_ick>;
+ clock-names = "ick";
+ };
+
/* controlled (enabled/disabled) directly by bcm2048 and wl1251 */
vctcxo: vctcxo {
compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 6681d4519e97..a075b63f3087 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -11,13 +11,6 @@
cpus {
cpu@0 {
cpu0-supply = <&vcc>;
- operating-points = <
- /* kHz uV */
- 300000 1012500
- 600000 1200000
- 800000 1325000
- 1000000 1375000
- >;
};
};
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index 9886bf8b90ab..31d47a1fad84 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -12,7 +12,7 @@
/ {
model = "Nokia N950";
- compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
+ compatible = "nokia,omap3-n950", "ti,omap3630", "ti,omap36xx", "ti,omap3";
keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/omap3-overo-storm-alto35.dts b/arch/arm/boot/dts/omap3-overo-storm-alto35.dts
index 18338576c41d..7f04dfad8203 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-alto35.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-alto35.dts
@@ -14,5 +14,5 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Alto35";
- compatible = "gumstix,omap3-overo-alto35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-alto35", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts b/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
index f204c8af8281..bc5a04e03336 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Chestnut43";
- compatible = "gumstix,omap3-overo-chestnut43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-chestnut43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts b/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
index c633f7cee68e..065c31cbf0e2 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Gallop43";
- compatible = "gumstix,omap3-overo-gallop43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-gallop43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-palo35.dts b/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
index fb88ebc9858c..e38c1c51392c 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo35";
- compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-palo43.dts b/arch/arm/boot/dts/omap3-overo-storm-palo43.dts
index 76cca00d97b6..e6dc23159c4d 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-palo43.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-palo43.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo43";
- compatible = "gumstix,omap3-overo-palo43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-palo43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-summit.dts b/arch/arm/boot/dts/omap3-overo-storm-summit.dts
index cc081a9e4c1e..587c08ce282d 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-summit.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-summit.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Summit";
- compatible = "gumstix,omap3-overo-summit", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-summit", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
index 1de41c0826e0..f57de6010994 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
@@ -14,6 +14,6 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
- compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts b/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
index 9ed13118ed8e..281af6c113be 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
@@ -14,5 +14,5 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on TobiDuo";
- compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-pandora-1ghz.dts b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
index 81b957f33c9f..ea509956d7ac 100644
--- a/arch/arm/boot/dts/omap3-pandora-1ghz.dts
+++ b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
@@ -16,7 +16,7 @@
/ {
model = "Pandora Handheld Console 1GHz";
- compatible = "openpandora,omap3-pandora-1ghz", "ti,omap36xx", "ti,omap3";
+ compatible = "openpandora,omap3-pandora-1ghz", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index ec5891718ae6..150d5be42d27 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -226,6 +226,17 @@
gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* GPIO_164 */
};
+ /* wl1251 wifi+bt module */
+ wlan_en: fixed-regulator-wg7210_en {
+ compatible = "regulator-fixed";
+ regulator-name = "vwlan";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ startup-delay-us = <50000>;
+ enable-active-high;
+ gpio = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ };
+
/* wg7210 (wifi+bt module) 32k clock buffer */
wg7210_32k: fixed-regulator-wg7210_32k {
compatible = "regulator-fixed";
@@ -522,9 +533,30 @@
/*wp-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;*/ /* GPIO_127 */
};
-/* mmc3 is probed using pdata-quirks to pass wl1251 card data */
&mmc3 {
- status = "disabled";
+ vmmc-supply = <&wlan_en>;
+
+ bus-width = <4>;
+ non-removable;
+ ti,non-removable;
+ cap-power-off-card;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlan: wifi@1 {
+ compatible = "ti,wl1251";
+
+ reg = <1>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */
+
+ ti,wl1251-has-eeprom;
+ };
};
/* bluetooth*/
diff --git a/arch/arm/boot/dts/omap3-sbc-t3530.dts b/arch/arm/boot/dts/omap3-sbc-t3530.dts
index ae96002abb3b..24bf3fd86641 100644
--- a/arch/arm/boot/dts/omap3-sbc-t3530.dts
+++ b/arch/arm/boot/dts/omap3-sbc-t3530.dts
@@ -8,7 +8,7 @@
/ {
model = "CompuLab SBC-T3530 with CM-T3530";
- compatible = "compulab,omap3-sbc-t3530", "compulab,omap3-cm-t3530", "ti,omap34xx", "ti,omap3";
+ compatible = "compulab,omap3-sbc-t3530", "compulab,omap3-cm-t3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
aliases {
display0 = &dvi0;
diff --git a/arch/arm/boot/dts/omap3-sbc-t3730.dts b/arch/arm/boot/dts/omap3-sbc-t3730.dts
index 7de6df16fc17..eb3893b9535e 100644
--- a/arch/arm/boot/dts/omap3-sbc-t3730.dts
+++ b/arch/arm/boot/dts/omap3-sbc-t3730.dts
@@ -8,7 +8,7 @@
/ {
model = "CompuLab SBC-T3730 with CM-T3730";
- compatible = "compulab,omap3-sbc-t3730", "compulab,omap3-cm-t3730", "ti,omap36xx", "ti,omap3";
+ compatible = "compulab,omap3-sbc-t3730", "compulab,omap3-cm-t3730", "ti,omap3630", "ti,omap36xx", "ti,omap3";
aliases {
display0 = &dvi0;
diff --git a/arch/arm/boot/dts/omap3-sniper.dts b/arch/arm/boot/dts/omap3-sniper.dts
index 40a87330e8c3..b6879cdc5c13 100644
--- a/arch/arm/boot/dts/omap3-sniper.dts
+++ b/arch/arm/boot/dts/omap3-sniper.dts
@@ -9,7 +9,7 @@
/ {
model = "LG Optimus Black";
- compatible = "lg,omap3-sniper", "ti,omap36xx", "ti,omap3";
+ compatible = "lg,omap3-sniper", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-thunder.dts b/arch/arm/boot/dts/omap3-thunder.dts
index 6276e7079b36..64221e3b3477 100644
--- a/arch/arm/boot/dts/omap3-thunder.dts
+++ b/arch/arm/boot/dts/omap3-thunder.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 Thunder baseboard with TAO3530 SOM";
- compatible = "technexion,omap3-thunder", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+ compatible = "technexion,omap3-thunder", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index db3a2fe84e99..d240e39f2151 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -9,7 +9,7 @@
/ {
model = "TI Zoom3";
- compatible = "ti,omap3-zoom3", "ti,omap36xx", "ti,omap3";
+ compatible = "ti,omap3-zoom3", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index 0abd61108a53..7bfde8aac7ae 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3430 SDP";
- compatible = "ti,omap3430-sdp", "ti,omap3";
+ compatible = "ti,omap3430-sdp", "ti,omap3430", "ti,omap3";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index 7b09cbee8bb8..c4dd9801840d 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -16,19 +16,67 @@
/ {
cpus {
cpu: cpu@0 {
- /* OMAP343x/OMAP35xx variants OPP1-5 */
- operating-points = <
- /* kHz uV */
- 125000 975000
- 250000 1075000
- 500000 1200000
- 550000 1270000
- 600000 1350000
- >;
+ /* OMAP343x/OMAP35xx variants OPP1-6 */
+ operating-points-v2 = <&cpu0_opp_table>;
+
clock-latency = <300000>; /* From legacy driver */
};
};
+ /* see Documentation/devicetree/bindings/opp/opp.txt */
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+
+ opp1-125000000 {
+ opp-hz = /bits/ 64 <125000000>;
+ /*
+ * we currently only select the max voltage from table
+ * Table 3-3 of the omap3530 Data sheet (SPRS507F).
+ * Format is: <target min max>
+ */
+ opp-microvolt = <975000 975000 975000>;
+ /*
+ * first value is silicon revision bit mask
+ * second one 720MHz Device Identification bit mask
+ */
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp2-250000000 {
+ opp-hz = /bits/ 64 <250000000>;
+ opp-microvolt = <1075000 1075000 1075000>;
+ opp-supported-hw = <0xffffffff 3>;
+ opp-suspend;
+ };
+
+ opp3-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <1200000 1200000 1200000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp4-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-microvolt = <1275000 1275000 1275000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp5-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1350000 1350000 1350000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp6-720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1350000 1350000 1350000>;
+ /* only high-speed grade omap3530 devices */
+ opp-supported-hw = <0xffffffff 2>;
+ turbo-mode;
+ };
+ };
+
ocp@68000000 {
omap3_pmx_core2: pinmux@480025d8 {
compatible = "ti,omap3-padconf", "pinctrl-single";
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 1e552f08f120..c618cb257d00 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -19,16 +19,65 @@
};
cpus {
- /* OMAP3630/OMAP37xx 'standard device' variants OPP50 to OPP130 */
+ /* OMAP3630/OMAP37xx variants OPP50 to OPP130 and OPP1G */
cpu: cpu@0 {
- operating-points = <
- /* kHz uV */
- 300000 1012500
- 600000 1200000
- 800000 1325000
- >;
- clock-latency = <300000>; /* From legacy driver */
+ operating-points-v2 = <&cpu0_opp_table>;
+
+ vbb-supply = <&abb_mpu_iva>;
+ clock-latency = <300000>; /* From omap-cpufreq driver */
+ };
+ };
+
+ /* see Documentation/devicetree/bindings/opp/opp.txt */
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+
+ opp50-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ /*
+ * we currently only select the max voltage from table
+ * Table 4-19 of the DM3730 Data sheet (SPRS685B)
+ * Format is: cpu0-supply: <target min max>
+ * vbb-supply: <target min max>
+ */
+ opp-microvolt = <1012500 1012500 1012500>,
+ <1012500 1012500 1012500>;
+ /*
+ * first value is silicon revision bit mask
+ * second one is "speed binned" bit mask
+ */
+ opp-supported-hw = <0xffffffff 3>;
+ opp-suspend;
+ };
+
+ opp100-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1200000 1200000 1200000>,
+ <1200000 1200000 1200000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp130-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1325000 1325000 1325000>,
+ <1325000 1325000 1325000>;
+ opp-supported-hw = <0xffffffff 3>;
};
+
+ opp1g-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1375000 1375000 1375000>,
+ <1375000 1375000 1375000>;
+ /* only on am/dm37x with speed-binned bit set */
+ opp-supported-hw = <0xffffffff 2>;
+ turbo-mode;
+ };
+ };
+
+ opp_supply_mpu_iva: opp_supply {
+ compatible = "ti,omap-opp-supply";
+ ti,absolute-max-voltage-uv = <1375000>;
};
ocp@68000000 {
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 40d7f1a4fc45..89cce8d4bc6b 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -554,3 +554,4 @@ CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_TI_CPSW_SWITCHDEV=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 043b0b18bf7e..2674de6ada1f 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON
config CRYPTO_SHA1_ARM_CE
tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
select CRYPTO_SHA1_ARM
select CRYPTO_HASH
help
@@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE
config CRYPTO_SHA2_ARM_CE
tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
select CRYPTO_SHA256_ARM
select CRYPTO_HASH
help
@@ -81,7 +81,7 @@ config CRYPTO_AES_ARM
config CRYPTO_AES_ARM_BS
tristate "Bit sliced AES using NEON instructions"
depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_SIMD
help
@@ -96,8 +96,8 @@ config CRYPTO_AES_ARM_BS
config CRYPTO_AES_ARM_CE
tristate "Accelerated AES using ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
+ depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_SIMD
help
@@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE
config CRYPTO_GHASH_ARM_CE
tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
select CRYPTO_HASH
select CRYPTO_CRYPTD
select CRYPTO_GF128MUL
@@ -118,23 +118,35 @@ config CRYPTO_GHASH_ARM_CE
config CRYPTO_CRCT10DIF_ARM_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions"
- depends on KERNEL_MODE_NEON && CRC_T10DIF
+ depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on CRC_T10DIF
select CRYPTO_HASH
config CRYPTO_CRC32_ARM_CE
tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
- depends on KERNEL_MODE_NEON && CRC32
+ depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on CRC32
select CRYPTO_HASH
config CRYPTO_CHACHA20_NEON
- tristate "NEON accelerated ChaCha stream cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_CHACHA20
+ tristate "NEON and scalar accelerated ChaCha stream cipher algorithms"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_ARM
+ tristate "Accelerated scalar and SIMD Poly1305 hash implementations"
+ select CRYPTO_HASH
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
config CRYPTO_NHPOLY1305_NEON
tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)"
depends on KERNEL_MODE_NEON
select CRYPTO_NHPOLY1305
+config CRYPTO_CURVE25519_NEON
+ tristate "NEON accelerated Curve25519 scalar multiplication library"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_LIB_CURVE25519_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 4180f3a13512..b745c17d356f 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,34 +10,16 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
+obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
+obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o
-ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
-ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
-ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
-ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
-ce-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
-crc-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
-
-ifneq ($(crc-obj-y)$(crc-obj-m),)
-ifeq ($(call as-instr,.arch armv8-a\n.arch_extension crc,y,n),y)
-ce-obj-y += $(crc-obj-y)
-ce-obj-m += $(crc-obj-m)
-else
-$(warning These CRC Extensions modules need binutils 2.23 or higher)
-$(warning $(crc-obj-y) $(crc-obj-m))
-endif
-endif
-
-ifneq ($(ce-obj-y)$(ce-obj-m),)
-ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y)
-obj-y += $(ce-obj-y)
-obj-m += $(ce-obj-m)
-else
-$(warning These ARMv8 Crypto Extensions modules need binutils 2.23 or higher)
-$(warning $(ce-obj-y) $(ce-obj-m))
-endif
-endif
+obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
+obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
+obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
+obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
+obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
+obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
@@ -53,13 +35,19 @@ aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
-chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
+chacha-neon-y := chacha-scalar-core.o chacha-glue.o
+chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
+poly1305-arm-y := poly1305-core.o poly1305-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
+curve25519-neon-y := curve25519-core.o curve25519-glue.o
ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
+$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl
+ $(call cmd,perl)
+
$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
$(call cmd,perl)
@@ -67,4 +55,9 @@ $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
$(call cmd,perl)
endif
-clean-files += sha256-core.S sha512-core.S
+clean-files += poly1305-core.S sha256-core.S sha512-core.S
+
+# massage the perlasm code a bit so we only get the NEON routine if we need it
+poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
+poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
+AFLAGS_poly1305-core.o += $(poly1305-aflags-y)
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
new file mode 100644
index 000000000000..3f0c057aa050
--- /dev/null
+++ b/arch/arm/crypto/chacha-glue.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/chacha.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/cputype.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
+ int nrounds);
+asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
+ int nrounds);
+asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds);
+asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
+
+asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
+ const u32 *state, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon);
+
+static inline bool neon_usable(void)
+{
+ return static_branch_likely(&use_neon) && crypto_simd_usable();
+}
+
+static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ u8 buf[CHACHA_BLOCK_SIZE];
+
+ while (bytes >= CHACHA_BLOCK_SIZE * 4) {
+ chacha_4block_xor_neon(state, dst, src, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 4;
+ src += CHACHA_BLOCK_SIZE * 4;
+ dst += CHACHA_BLOCK_SIZE * 4;
+ state[12] += 4;
+ }
+ while (bytes >= CHACHA_BLOCK_SIZE) {
+ chacha_block_xor_neon(state, dst, src, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
+ dst += CHACHA_BLOCK_SIZE;
+ state[12]++;
+ }
+ if (bytes) {
+ memcpy(buf, src, bytes);
+ chacha_block_xor_neon(state, buf, buf, nrounds);
+ memcpy(dst, buf, bytes);
+ }
+}
+
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
+{
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
+ hchacha_block_arm(state, stream, nrounds);
+ } else {
+ kernel_neon_begin();
+ hchacha_block_neon(state, stream, nrounds);
+ kernel_neon_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
+ int nrounds)
+{
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
+ bytes <= CHACHA_BLOCK_SIZE) {
+ chacha_doarm(dst, src, bytes, state, nrounds);
+ state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE);
+ return;
+ }
+
+ kernel_neon_begin();
+ chacha_doneon(state, dst, src, bytes, nrounds);
+ kernel_neon_end();
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+static int chacha_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx, const u8 *iv,
+ bool neon)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init_generic(state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ if (!neon) {
+ chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, state, ctx->nrounds);
+ state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
+ } else {
+ kernel_neon_begin();
+ chacha_doneon(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ kernel_neon_end();
+ }
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int do_chacha(struct skcipher_request *req, bool neon)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, neon);
+}
+
+static int chacha_arm(struct skcipher_request *req)
+{
+ return do_chacha(req, false);
+}
+
+static int chacha_neon(struct skcipher_request *req)
+{
+ return do_chacha(req, neon_usable());
+}
+
+static int do_xchacha(struct skcipher_request *req, bool neon)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ chacha_init_generic(state, ctx->key, req->iv);
+
+ if (!neon) {
+ hchacha_block_arm(state, subctx.key, ctx->nrounds);
+ } else {
+ kernel_neon_begin();
+ hchacha_block_neon(state, subctx.key, ctx->nrounds);
+ kernel_neon_end();
+ }
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ return chacha_stream_xor(req, &subctx, real_iv, neon);
+}
+
+static int xchacha_arm(struct skcipher_request *req)
+{
+ return do_xchacha(req, false);
+}
+
+static int xchacha_neon(struct skcipher_request *req)
+{
+ return do_xchacha(req, neon_usable());
+}
+
+static struct skcipher_alg arm_algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-arm",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = chacha_arm,
+ .decrypt = chacha_arm,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-arm",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = xchacha_arm,
+ .decrypt = xchacha_arm,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-arm",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = xchacha_arm,
+ .decrypt = xchacha_arm,
+ },
+};
+
+static struct skcipher_alg neon_algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = chacha_neon,
+ .decrypt = chacha_neon,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = xchacha_neon,
+ .decrypt = xchacha_neon,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = xchacha_neon,
+ .decrypt = xchacha_neon,
+ }
+};
+
+static int __init chacha_simd_mod_init(void)
+{
+ int err;
+
+ err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
+ int i;
+
+ switch (read_cpuid_part()) {
+ case ARM_CPU_PART_CORTEX_A7:
+ case ARM_CPU_PART_CORTEX_A5:
+ /*
+ * The Cortex-A7 and Cortex-A5 do not perform well with
+ * the NEON implementation but do incredibly with the
+ * scalar one and use less power.
+ */
+ for (i = 0; i < ARRAY_SIZE(neon_algs); i++)
+ neon_algs[i].base.cra_priority = 0;
+ break;
+ default:
+ static_branch_enable(&use_neon);
+ }
+
+ err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
+ if (err)
+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ }
+ return err;
+}
+
+static void __exit chacha_simd_mod_fini(void)
+{
+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
+ crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
+}
+
+module_init(chacha_simd_mod_init);
+module_exit(chacha_simd_mod_fini);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-arm");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-arm");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-arm");
+#ifdef CONFIG_KERNEL_MODE_NEON
+MODULE_ALIAS_CRYPTO("chacha20-neon");
+MODULE_ALIAS_CRYPTO("xchacha20-neon");
+MODULE_ALIAS_CRYPTO("xchacha12-neon");
+#endif
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c
deleted file mode 100644
index a8e9b534c8da..000000000000
--- a/arch/arm/crypto/chacha-neon-glue.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on:
- * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds);
-asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds);
-asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
-
-static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- u8 buf[CHACHA_BLOCK_SIZE];
-
- while (bytes >= CHACHA_BLOCK_SIZE * 4) {
- chacha_4block_xor_neon(state, dst, src, nrounds);
- bytes -= CHACHA_BLOCK_SIZE * 4;
- src += CHACHA_BLOCK_SIZE * 4;
- dst += CHACHA_BLOCK_SIZE * 4;
- state[12] += 4;
- }
- while (bytes >= CHACHA_BLOCK_SIZE) {
- chacha_block_xor_neon(state, dst, src, nrounds);
- bytes -= CHACHA_BLOCK_SIZE;
- src += CHACHA_BLOCK_SIZE;
- dst += CHACHA_BLOCK_SIZE;
- state[12]++;
- }
- if (bytes) {
- memcpy(buf, src, bytes);
- chacha_block_xor_neon(state, buf, buf, nrounds);
- memcpy(dst, buf, bytes);
- }
-}
-
-static int chacha_neon_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- crypto_chacha_init(state, ctx, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, ctx->nrounds);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
- return crypto_chacha_crypt(req);
-
- return chacha_neon_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
- return crypto_xchacha_crypt(req);
-
- crypto_chacha_init(state, ctx, req->iv);
-
- kernel_neon_begin();
- hchacha_block_neon(state, subctx.key, ctx->nrounds);
- kernel_neon_end();
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_neon_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = chacha_neon,
- .decrypt = chacha_neon,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha12_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- if (!(elf_hwcap & HWCAP_NEON))
- return -ENODEV;
-
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-neon");
diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/crypto/chacha-scalar-core.S
new file mode 100644
index 000000000000..2985b80a45b5
--- /dev/null
+++ b/arch/arm/crypto/chacha-scalar-core.S
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Design notes:
+ *
+ * 16 registers would be needed to hold the state matrix, but only 14 are
+ * available because 'sp' and 'pc' cannot be used. So we spill the elements
+ * (x8, x9) to the stack and swap them out with (x10, x11). This adds one
+ * 'ldrd' and one 'strd' instruction per round.
+ *
+ * All rotates are performed using the implicit rotate operand accepted by the
+ * 'add' and 'eor' instructions. This is faster than using explicit rotate
+ * instructions. To make this work, we allow the values in the second and last
+ * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
+ * wrong rotation amount. The rotation amount is then fixed up just in time
+ * when the values are used. 'brot' is the number of bits the values in row 'b'
+ * need to be rotated right to arrive at the correct values, and 'drot'
+ * similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such
+ * that they end up as (25, 24) after every round.
+ */
+
+ // ChaCha state registers
+ X0 .req r0
+ X1 .req r1
+ X2 .req r2
+ X3 .req r3
+ X4 .req r4
+ X5 .req r5
+ X6 .req r6
+ X7 .req r7
+ X8_X10 .req r8 // shared by x8 and x10
+ X9_X11 .req r9 // shared by x9 and x11
+ X12 .req r10
+ X13 .req r11
+ X14 .req r12
+ X15 .req r14
+
+.macro __rev out, in, t0, t1, t2
+.if __LINUX_ARM_ARCH__ >= 6
+ rev \out, \in
+.else
+ lsl \t0, \in, #24
+ and \t1, \in, #0xff00
+ and \t2, \in, #0xff0000
+ orr \out, \t0, \in, lsr #24
+ orr \out, \out, \t1, lsl #8
+ orr \out, \out, \t2, lsr #8
+.endif
+.endm
+
+.macro _le32_bswap x, t0, t1, t2
+#ifdef __ARMEB__
+ __rev \x, \x, \t0, \t1, \t2
+#endif
+.endm
+
+.macro _le32_bswap_4x a, b, c, d, t0, t1, t2
+ _le32_bswap \a, \t0, \t1, \t2
+ _le32_bswap \b, \t0, \t1, \t2
+ _le32_bswap \c, \t0, \t1, \t2
+ _le32_bswap \d, \t0, \t1, \t2
+.endm
+
+.macro __ldrd a, b, src, offset
+#if __LINUX_ARM_ARCH__ >= 6
+ ldrd \a, \b, [\src, #\offset]
+#else
+ ldr \a, [\src, #\offset]
+ ldr \b, [\src, #\offset + 4]
+#endif
+.endm
+
+.macro __strd a, b, dst, offset
+#if __LINUX_ARM_ARCH__ >= 6
+ strd \a, \b, [\dst, #\offset]
+#else
+ str \a, [\dst, #\offset]
+ str \b, [\dst, #\offset + 4]
+#endif
+.endm
+
+.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2
+
+ // a += b; d ^= a; d = rol(d, 16);
+ add \a1, \a1, \b1, ror #brot
+ add \a2, \a2, \b2, ror #brot
+ eor \d1, \a1, \d1, ror #drot
+ eor \d2, \a2, \d2, ror #drot
+ // drot == 32 - 16 == 16
+
+ // c += d; b ^= c; b = rol(b, 12);
+ add \c1, \c1, \d1, ror #16
+ add \c2, \c2, \d2, ror #16
+ eor \b1, \c1, \b1, ror #brot
+ eor \b2, \c2, \b2, ror #brot
+ // brot == 32 - 12 == 20
+
+ // a += b; d ^= a; d = rol(d, 8);
+ add \a1, \a1, \b1, ror #20
+ add \a2, \a2, \b2, ror #20
+ eor \d1, \a1, \d1, ror #16
+ eor \d2, \a2, \d2, ror #16
+ // drot == 32 - 8 == 24
+
+ // c += d; b ^= c; b = rol(b, 7);
+ add \c1, \c1, \d1, ror #24
+ add \c2, \c2, \d2, ror #24
+ eor \b1, \c1, \b1, ror #20
+ eor \b2, \c2, \b2, ror #20
+ // brot == 32 - 7 == 25
+.endm
+
+.macro _doubleround
+
+ // column round
+
+ // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
+ _halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13
+
+ // save (x8, x9); restore (x10, x11)
+ __strd X8_X10, X9_X11, sp, 0
+ __ldrd X8_X10, X9_X11, sp, 8
+
+ // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
+ _halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15
+
+ .set brot, 25
+ .set drot, 24
+
+ // diagonal round
+
+ // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
+ _halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12
+
+ // save (x10, x11); restore (x8, x9)
+ __strd X8_X10, X9_X11, sp, 8
+ __ldrd X8_X10, X9_X11, sp, 0
+
+ // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
+ _halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14
+.endm
+
+.macro _chacha_permute nrounds
+ .set brot, 0
+ .set drot, 0
+ .rept \nrounds / 2
+ _doubleround
+ .endr
+.endm
+
+.macro _chacha nrounds
+
+.Lnext_block\@:
+ // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
+ // Registers contain x0-x9,x12-x15.
+
+ // Do the core ChaCha permutation to update x0-x15.
+ _chacha_permute \nrounds
+
+ add sp, #8
+ // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
+ // Registers contain x0-x9,x12-x15.
+ // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
+
+ // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
+ push {X8_X10, X9_X11, X12, X13, X14, X15}
+
+ // Load (OUT, IN, LEN).
+ ldr r14, [sp, #96]
+ ldr r12, [sp, #100]
+ ldr r11, [sp, #104]
+
+ orr r10, r14, r12
+
+ // Use slow path if fewer than 64 bytes remain.
+ cmp r11, #64
+ blt .Lxor_slowpath\@
+
+ // Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on
+ // ARMv6+, since ldmia and stmia (used below) still require alignment.
+ tst r10, #3
+ bne .Lxor_slowpath\@
+
+ // Fast path: XOR 64 bytes of aligned data.
+
+ // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
+ // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
+ // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
+
+ // x0-x3
+ __ldrd r8, r9, sp, 32
+ __ldrd r10, r11, sp, 40
+ add X0, X0, r8
+ add X1, X1, r9
+ add X2, X2, r10
+ add X3, X3, r11
+ _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
+ ldmia r12!, {r8-r11}
+ eor X0, X0, r8
+ eor X1, X1, r9
+ eor X2, X2, r10
+ eor X3, X3, r11
+ stmia r14!, {X0-X3}
+
+ // x4-x7
+ __ldrd r8, r9, sp, 48
+ __ldrd r10, r11, sp, 56
+ add X4, r8, X4, ror #brot
+ add X5, r9, X5, ror #brot
+ ldmia r12!, {X0-X3}
+ add X6, r10, X6, ror #brot
+ add X7, r11, X7, ror #brot
+ _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
+ eor X4, X4, X0
+ eor X5, X5, X1
+ eor X6, X6, X2
+ eor X7, X7, X3
+ stmia r14!, {X4-X7}
+
+ // x8-x15
+ pop {r0-r7} // (x8-x9,x12-x15,x10-x11)
+ __ldrd r8, r9, sp, 32
+ __ldrd r10, r11, sp, 40
+ add r0, r0, r8 // x8
+ add r1, r1, r9 // x9
+ add r6, r6, r10 // x10
+ add r7, r7, r11 // x11
+ _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
+ ldmia r12!, {r8-r11}
+ eor r0, r0, r8 // x8
+ eor r1, r1, r9 // x9
+ eor r6, r6, r10 // x10
+ eor r7, r7, r11 // x11
+ stmia r14!, {r0,r1,r6,r7}
+ ldmia r12!, {r0,r1,r6,r7}
+ __ldrd r8, r9, sp, 48
+ __ldrd r10, r11, sp, 56
+ add r2, r8, r2, ror #drot // x12
+ add r3, r9, r3, ror #drot // x13
+ add r4, r10, r4, ror #drot // x14
+ add r5, r11, r5, ror #drot // x15
+ _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
+ ldr r9, [sp, #72] // load LEN
+ eor r2, r2, r0 // x12
+ eor r3, r3, r1 // x13
+ eor r4, r4, r6 // x14
+ eor r5, r5, r7 // x15
+ subs r9, #64 // decrement and check LEN
+ stmia r14!, {r2-r5}
+
+ beq .Ldone\@
+
+.Lprepare_for_next_block\@:
+
+ // Stack: x0-x15 OUT IN LEN
+
+ // Increment block counter (x12)
+ add r8, #1
+
+ // Store updated (OUT, IN, LEN)
+ str r14, [sp, #64]
+ str r12, [sp, #68]
+ str r9, [sp, #72]
+
+ mov r14, sp
+
+ // Store updated block counter (x12)
+ str r8, [sp, #48]
+
+ sub sp, #16
+
+ // Reload state and do next block
+ ldmia r14!, {r0-r11} // load x0-x11
+ __strd r10, r11, sp, 8 // store x10-x11 before state
+ ldmia r14, {r10-r12,r14} // load x12-x15
+ b .Lnext_block\@
+
+.Lxor_slowpath\@:
+ // Slow path: < 64 bytes remaining, or unaligned input or output buffer.
+ // We handle it by storing the 64 bytes of keystream to the stack, then
+ // XOR-ing the needed portion with the data.
+
+ // Allocate keystream buffer
+ sub sp, #64
+ mov r14, sp
+
+ // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
+ // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
+ // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
+
+ // Save keystream for x0-x3
+ __ldrd r8, r9, sp, 96
+ __ldrd r10, r11, sp, 104
+ add X0, X0, r8
+ add X1, X1, r9
+ add X2, X2, r10
+ add X3, X3, r11
+ _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
+ stmia r14!, {X0-X3}
+
+ // Save keystream for x4-x7
+ __ldrd r8, r9, sp, 112
+ __ldrd r10, r11, sp, 120
+ add X4, r8, X4, ror #brot
+ add X5, r9, X5, ror #brot
+ add X6, r10, X6, ror #brot
+ add X7, r11, X7, ror #brot
+ _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
+ add r8, sp, #64
+ stmia r14!, {X4-X7}
+
+ // Save keystream for x8-x15
+ ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11)
+ __ldrd r8, r9, sp, 128
+ __ldrd r10, r11, sp, 136
+ add r0, r0, r8 // x8
+ add r1, r1, r9 // x9
+ add r6, r6, r10 // x10
+ add r7, r7, r11 // x11
+ _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
+ stmia r14!, {r0,r1,r6,r7}
+ __ldrd r8, r9, sp, 144
+ __ldrd r10, r11, sp, 152
+ add r2, r8, r2, ror #drot // x12
+ add r3, r9, r3, ror #drot // x13
+ add r4, r10, r4, ror #drot // x14
+ add r5, r11, r5, ror #drot // x15
+ _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
+ stmia r14, {r2-r5}
+
+ // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
+ // Registers: r8 is block counter, r12 is IN.
+
+ ldr r9, [sp, #168] // LEN
+ ldr r14, [sp, #160] // OUT
+ cmp r9, #64
+ mov r0, sp
+ movle r1, r9
+ movgt r1, #64
+ // r1 is number of bytes to XOR, in range [1, 64]
+
+.if __LINUX_ARM_ARCH__ < 6
+ orr r2, r12, r14
+ tst r2, #3 // IN or OUT misaligned?
+ bne .Lxor_next_byte\@
+.endif
+
+ // XOR a word at a time
+.rept 16
+ subs r1, #4
+ blt .Lxor_words_done\@
+ ldr r2, [r12], #4
+ ldr r3, [r0], #4
+ eor r2, r2, r3
+ str r2, [r14], #4
+.endr
+ b .Lxor_slowpath_done\@
+.Lxor_words_done\@:
+ ands r1, r1, #3
+ beq .Lxor_slowpath_done\@
+
+ // XOR a byte at a time
+.Lxor_next_byte\@:
+ ldrb r2, [r12], #1
+ ldrb r3, [r0], #1
+ eor r2, r2, r3
+ strb r2, [r14], #1
+ subs r1, #1
+ bne .Lxor_next_byte\@
+
+.Lxor_slowpath_done\@:
+ subs r9, #64
+ add sp, #96
+ bgt .Lprepare_for_next_block\@
+
+.Ldone\@:
+.endm // _chacha
+
+/*
+ * void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
+ * const u32 *state, int nrounds);
+ */
+ENTRY(chacha_doarm)
+ cmp r2, #0 // len == 0?
+ reteq lr
+
+ ldr ip, [sp]
+ cmp ip, #12
+
+ push {r0-r2,r4-r11,lr}
+
+ // Push state x0-x15 onto stack.
+ // Also store an extra copy of x10-x11 just before the state.
+
+ add X12, r3, #48
+ ldm X12, {X12,X13,X14,X15}
+ push {X12,X13,X14,X15}
+ sub sp, sp, #64
+
+ __ldrd X8_X10, X9_X11, r3, 40
+ __strd X8_X10, X9_X11, sp, 8
+ __strd X8_X10, X9_X11, sp, 56
+ ldm r3, {X0-X9_X11}
+ __strd X0, X1, sp, 16
+ __strd X2, X3, sp, 24
+ __strd X4, X5, sp, 32
+ __strd X6, X7, sp, 40
+ __strd X8_X10, X9_X11, sp, 48
+
+ beq 1f
+ _chacha 20
+
+0: add sp, #76
+ pop {r4-r11, pc}
+
+1: _chacha 12
+ b 0b
+ENDPROC(chacha_doarm)
+
+/*
+ * void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds);
+ */
+ENTRY(hchacha_block_arm)
+ push {r1,r4-r11,lr}
+
+ cmp r2, #12 // ChaCha12 ?
+
+ mov r14, r0
+ ldmia r14!, {r0-r11} // load x0-x11
+ push {r10-r11} // store x10-x11 to stack
+ ldm r14, {r10-r12,r14} // load x12-x15
+ sub sp, #8
+
+ beq 1f
+ _chacha_permute 20
+
+ // Skip over (unused0-unused1, x10-x11)
+0: add sp, #16
+
+ // Fix up rotations of x12-x15
+ ror X12, X12, #drot
+ ror X13, X13, #drot
+ pop {r4} // load 'out'
+ ror X14, X14, #drot
+ ror X15, X15, #drot
+
+ // Store (x0-x3,x12-x15) to 'out'
+ stm r4, {X0,X1,X2,X3,X12,X13,X14,X15}
+
+ pop {r4-r11,pc}
+
+1: _chacha_permute 12
+ b 0b
+ENDPROC(hchacha_block_arm)
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
index 86be258a803f..46c02c518a30 100644
--- a/arch/arm/crypto/crct10dif-ce-core.S
+++ b/arch/arm/crypto/crct10dif-ce-core.S
@@ -72,7 +72,7 @@
#endif
.text
- .arch armv7-a
+ .arch armv8-a
.fpu crypto-neon-fp-armv8
init_crc .req r0
diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
new file mode 100644
index 000000000000..be18af52e7dc
--- /dev/null
+++ b/arch/arm/crypto/curve25519-core.S
@@ -0,0 +1,2062 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This
+ * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been
+ * manually reworked for use in kernel space.
+ */
+
+#include <linux/linkage.h>
+
+.text
+.fpu neon
+.arch armv7-a
+.align 4
+
+ENTRY(curve25519_neon)
+ push {r4-r11, lr}
+ mov ip, sp
+ sub r3, sp, #704
+ and r3, r3, #0xfffffff0
+ mov sp, r3
+ movw r4, #0
+ movw r5, #254
+ vmov.i32 q0, #1
+ vshr.u64 q1, q0, #7
+ vshr.u64 q0, q0, #8
+ vmov.i32 d4, #19
+ vmov.i32 d5, #38
+ add r6, sp, #480
+ vst1.8 {d2-d3}, [r6, : 128]!
+ vst1.8 {d0-d1}, [r6, : 128]!
+ vst1.8 {d4-d5}, [r6, : 128]
+ add r6, r3, #0
+ vmov.i32 q2, #0
+ vst1.8 {d4-d5}, [r6, : 128]!
+ vst1.8 {d4-d5}, [r6, : 128]!
+ vst1.8 d4, [r6, : 64]
+ add r6, r3, #0
+ movw r7, #960
+ sub r7, r7, #2
+ neg r7, r7
+ sub r7, r7, r7, LSL #7
+ str r7, [r6]
+ add r6, sp, #672
+ vld1.8 {d4-d5}, [r1]!
+ vld1.8 {d6-d7}, [r1]
+ vst1.8 {d4-d5}, [r6, : 128]!
+ vst1.8 {d6-d7}, [r6, : 128]
+ sub r1, r6, #16
+ ldrb r6, [r1]
+ and r6, r6, #248
+ strb r6, [r1]
+ ldrb r6, [r1, #31]
+ and r6, r6, #127
+ orr r6, r6, #64
+ strb r6, [r1, #31]
+ vmov.i64 q2, #0xffffffff
+ vshr.u64 q3, q2, #7
+ vshr.u64 q2, q2, #6
+ vld1.8 {d8}, [r2]
+ vld1.8 {d10}, [r2]
+ add r2, r2, #6
+ vld1.8 {d12}, [r2]
+ vld1.8 {d14}, [r2]
+ add r2, r2, #6
+ vld1.8 {d16}, [r2]
+ add r2, r2, #4
+ vld1.8 {d18}, [r2]
+ vld1.8 {d20}, [r2]
+ add r2, r2, #6
+ vld1.8 {d22}, [r2]
+ add r2, r2, #2
+ vld1.8 {d24}, [r2]
+ vld1.8 {d26}, [r2]
+ vshr.u64 q5, q5, #26
+ vshr.u64 q6, q6, #3
+ vshr.u64 q7, q7, #29
+ vshr.u64 q8, q8, #6
+ vshr.u64 q10, q10, #25
+ vshr.u64 q11, q11, #3
+ vshr.u64 q12, q12, #12
+ vshr.u64 q13, q13, #38
+ vand q4, q4, q2
+ vand q6, q6, q2
+ vand q8, q8, q2
+ vand q10, q10, q2
+ vand q2, q12, q2
+ vand q5, q5, q3
+ vand q7, q7, q3
+ vand q9, q9, q3
+ vand q11, q11, q3
+ vand q3, q13, q3
+ add r2, r3, #48
+ vadd.i64 q12, q4, q1
+ vadd.i64 q13, q10, q1
+ vshr.s64 q12, q12, #26
+ vshr.s64 q13, q13, #26
+ vadd.i64 q5, q5, q12
+ vshl.i64 q12, q12, #26
+ vadd.i64 q14, q5, q0
+ vadd.i64 q11, q11, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q15, q11, q0
+ vsub.i64 q4, q4, q12
+ vshr.s64 q12, q14, #25
+ vsub.i64 q10, q10, q13
+ vshr.s64 q13, q15, #25
+ vadd.i64 q6, q6, q12
+ vshl.i64 q12, q12, #25
+ vadd.i64 q14, q6, q1
+ vadd.i64 q2, q2, q13
+ vsub.i64 q5, q5, q12
+ vshr.s64 q12, q14, #26
+ vshl.i64 q13, q13, #25
+ vadd.i64 q14, q2, q1
+ vadd.i64 q7, q7, q12
+ vshl.i64 q12, q12, #26
+ vadd.i64 q15, q7, q0
+ vsub.i64 q11, q11, q13
+ vshr.s64 q13, q14, #26
+ vsub.i64 q6, q6, q12
+ vshr.s64 q12, q15, #25
+ vadd.i64 q3, q3, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q14, q3, q0
+ vadd.i64 q8, q8, q12
+ vshl.i64 q12, q12, #25
+ vadd.i64 q15, q8, q1
+ add r2, r2, #8
+ vsub.i64 q2, q2, q13
+ vshr.s64 q13, q14, #25
+ vsub.i64 q7, q7, q12
+ vshr.s64 q12, q15, #26
+ vadd.i64 q14, q13, q13
+ vadd.i64 q9, q9, q12
+ vtrn.32 d12, d14
+ vshl.i64 q12, q12, #26
+ vtrn.32 d13, d15
+ vadd.i64 q0, q9, q0
+ vadd.i64 q4, q4, q14
+ vst1.8 d12, [r2, : 64]!
+ vshl.i64 q6, q13, #4
+ vsub.i64 q7, q8, q12
+ vshr.s64 q0, q0, #25
+ vadd.i64 q4, q4, q6
+ vadd.i64 q6, q10, q0
+ vshl.i64 q0, q0, #25
+ vadd.i64 q8, q6, q1
+ vadd.i64 q4, q4, q13
+ vshl.i64 q10, q13, #25
+ vadd.i64 q1, q4, q1
+ vsub.i64 q0, q9, q0
+ vshr.s64 q8, q8, #26
+ vsub.i64 q3, q3, q10
+ vtrn.32 d14, d0
+ vshr.s64 q1, q1, #26
+ vtrn.32 d15, d1
+ vadd.i64 q0, q11, q8
+ vst1.8 d14, [r2, : 64]
+ vshl.i64 q7, q8, #26
+ vadd.i64 q5, q5, q1
+ vtrn.32 d4, d6
+ vshl.i64 q1, q1, #26
+ vtrn.32 d5, d7
+ vsub.i64 q3, q6, q7
+ add r2, r2, #16
+ vsub.i64 q1, q4, q1
+ vst1.8 d4, [r2, : 64]
+ vtrn.32 d6, d0
+ vtrn.32 d7, d1
+ sub r2, r2, #8
+ vtrn.32 d2, d10
+ vtrn.32 d3, d11
+ vst1.8 d6, [r2, : 64]
+ sub r2, r2, #24
+ vst1.8 d2, [r2, : 64]
+ add r2, r3, #96
+ vmov.i32 q0, #0
+ vmov.i64 d2, #0xff
+ vmov.i64 d3, #0
+ vshr.u32 q1, q1, #7
+ vst1.8 {d2-d3}, [r2, : 128]!
+ vst1.8 {d0-d1}, [r2, : 128]!
+ vst1.8 d0, [r2, : 64]
+ add r2, r3, #144
+ vmov.i32 q0, #0
+ vst1.8 {d0-d1}, [r2, : 128]!
+ vst1.8 {d0-d1}, [r2, : 128]!
+ vst1.8 d0, [r2, : 64]
+ add r2, r3, #240
+ vmov.i32 q0, #0
+ vmov.i64 d2, #0xff
+ vmov.i64 d3, #0
+ vshr.u32 q1, q1, #7
+ vst1.8 {d2-d3}, [r2, : 128]!
+ vst1.8 {d0-d1}, [r2, : 128]!
+ vst1.8 d0, [r2, : 64]
+ add r2, r3, #48
+ add r6, r3, #192
+ vld1.8 {d0-d1}, [r2, : 128]!
+ vld1.8 {d2-d3}, [r2, : 128]!
+ vld1.8 {d4}, [r2, : 64]
+ vst1.8 {d0-d1}, [r6, : 128]!
+ vst1.8 {d2-d3}, [r6, : 128]!
+ vst1.8 d4, [r6, : 64]
+.Lmainloop:
+ mov r2, r5, LSR #3
+ and r6, r5, #7
+ ldrb r2, [r1, r2]
+ mov r2, r2, LSR r6
+ and r2, r2, #1
+ str r5, [sp, #456]
+ eor r4, r4, r2
+ str r2, [sp, #460]
+ neg r2, r4
+ add r4, r3, #96
+ add r5, r3, #192
+ add r6, r3, #144
+ vld1.8 {d8-d9}, [r4, : 128]!
+ add r7, r3, #240
+ vld1.8 {d10-d11}, [r5, : 128]!
+ veor q6, q4, q5
+ vld1.8 {d14-d15}, [r6, : 128]!
+ vdup.i32 q8, r2
+ vld1.8 {d18-d19}, [r7, : 128]!
+ veor q10, q7, q9
+ vld1.8 {d22-d23}, [r4, : 128]!
+ vand q6, q6, q8
+ vld1.8 {d24-d25}, [r5, : 128]!
+ vand q10, q10, q8
+ vld1.8 {d26-d27}, [r6, : 128]!
+ veor q4, q4, q6
+ vld1.8 {d28-d29}, [r7, : 128]!
+ veor q5, q5, q6
+ vld1.8 {d0}, [r4, : 64]
+ veor q6, q7, q10
+ vld1.8 {d2}, [r5, : 64]
+ veor q7, q9, q10
+ vld1.8 {d4}, [r6, : 64]
+ veor q9, q11, q12
+ vld1.8 {d6}, [r7, : 64]
+ veor q10, q0, q1
+ sub r2, r4, #32
+ vand q9, q9, q8
+ sub r4, r5, #32
+ vand q10, q10, q8
+ sub r5, r6, #32
+ veor q11, q11, q9
+ sub r6, r7, #32
+ veor q0, q0, q10
+ veor q9, q12, q9
+ veor q1, q1, q10
+ veor q10, q13, q14
+ veor q12, q2, q3
+ vand q10, q10, q8
+ vand q8, q12, q8
+ veor q12, q13, q10
+ veor q2, q2, q8
+ veor q10, q14, q10
+ veor q3, q3, q8
+ vadd.i32 q8, q4, q6
+ vsub.i32 q4, q4, q6
+ vst1.8 {d16-d17}, [r2, : 128]!
+ vadd.i32 q6, q11, q12
+ vst1.8 {d8-d9}, [r5, : 128]!
+ vsub.i32 q4, q11, q12
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vadd.i32 q6, q0, q2
+ vst1.8 {d8-d9}, [r5, : 128]!
+ vsub.i32 q0, q0, q2
+ vst1.8 d12, [r2, : 64]
+ vadd.i32 q2, q5, q7
+ vst1.8 d0, [r5, : 64]
+ vsub.i32 q0, q5, q7
+ vst1.8 {d4-d5}, [r4, : 128]!
+ vadd.i32 q2, q9, q10
+ vst1.8 {d0-d1}, [r6, : 128]!
+ vsub.i32 q0, q9, q10
+ vst1.8 {d4-d5}, [r4, : 128]!
+ vadd.i32 q2, q1, q3
+ vst1.8 {d0-d1}, [r6, : 128]!
+ vsub.i32 q0, q1, q3
+ vst1.8 d4, [r4, : 64]
+ vst1.8 d0, [r6, : 64]
+ add r2, sp, #512
+ add r4, r3, #96
+ add r5, r3, #144
+ vld1.8 {d0-d1}, [r2, : 128]
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vld1.8 {d4-d5}, [r5, : 128]!
+ vzip.i32 q1, q2
+ vld1.8 {d6-d7}, [r4, : 128]!
+ vld1.8 {d8-d9}, [r5, : 128]!
+ vshl.i32 q5, q1, #1
+ vzip.i32 q3, q4
+ vshl.i32 q6, q2, #1
+ vld1.8 {d14}, [r4, : 64]
+ vshl.i32 q8, q3, #1
+ vld1.8 {d15}, [r5, : 64]
+ vshl.i32 q9, q4, #1
+ vmul.i32 d21, d7, d1
+ vtrn.32 d14, d15
+ vmul.i32 q11, q4, q0
+ vmul.i32 q0, q7, q0
+ vmull.s32 q12, d2, d2
+ vmlal.s32 q12, d11, d1
+ vmlal.s32 q12, d12, d0
+ vmlal.s32 q12, d13, d23
+ vmlal.s32 q12, d16, d22
+ vmlal.s32 q12, d7, d21
+ vmull.s32 q10, d2, d11
+ vmlal.s32 q10, d4, d1
+ vmlal.s32 q10, d13, d0
+ vmlal.s32 q10, d6, d23
+ vmlal.s32 q10, d17, d22
+ vmull.s32 q13, d10, d4
+ vmlal.s32 q13, d11, d3
+ vmlal.s32 q13, d13, d1
+ vmlal.s32 q13, d16, d0
+ vmlal.s32 q13, d17, d23
+ vmlal.s32 q13, d8, d22
+ vmull.s32 q1, d10, d5
+ vmlal.s32 q1, d11, d4
+ vmlal.s32 q1, d6, d1
+ vmlal.s32 q1, d17, d0
+ vmlal.s32 q1, d8, d23
+ vmull.s32 q14, d10, d6
+ vmlal.s32 q14, d11, d13
+ vmlal.s32 q14, d4, d4
+ vmlal.s32 q14, d17, d1
+ vmlal.s32 q14, d18, d0
+ vmlal.s32 q14, d9, d23
+ vmull.s32 q11, d10, d7
+ vmlal.s32 q11, d11, d6
+ vmlal.s32 q11, d12, d5
+ vmlal.s32 q11, d8, d1
+ vmlal.s32 q11, d19, d0
+ vmull.s32 q15, d10, d8
+ vmlal.s32 q15, d11, d17
+ vmlal.s32 q15, d12, d6
+ vmlal.s32 q15, d13, d5
+ vmlal.s32 q15, d19, d1
+ vmlal.s32 q15, d14, d0
+ vmull.s32 q2, d10, d9
+ vmlal.s32 q2, d11, d8
+ vmlal.s32 q2, d12, d7
+ vmlal.s32 q2, d13, d6
+ vmlal.s32 q2, d14, d1
+ vmull.s32 q0, d15, d1
+ vmlal.s32 q0, d10, d14
+ vmlal.s32 q0, d11, d19
+ vmlal.s32 q0, d12, d8
+ vmlal.s32 q0, d13, d17
+ vmlal.s32 q0, d6, d6
+ add r2, sp, #480
+ vld1.8 {d18-d19}, [r2, : 128]!
+ vmull.s32 q3, d16, d7
+ vmlal.s32 q3, d10, d15
+ vmlal.s32 q3, d11, d14
+ vmlal.s32 q3, d12, d9
+ vmlal.s32 q3, d13, d8
+ vld1.8 {d8-d9}, [r2, : 128]
+ vadd.i64 q5, q12, q9
+ vadd.i64 q6, q15, q9
+ vshr.s64 q5, q5, #26
+ vshr.s64 q6, q6, #26
+ vadd.i64 q7, q10, q5
+ vshl.i64 q5, q5, #26
+ vadd.i64 q8, q7, q4
+ vadd.i64 q2, q2, q6
+ vshl.i64 q6, q6, #26
+ vadd.i64 q10, q2, q4
+ vsub.i64 q5, q12, q5
+ vshr.s64 q8, q8, #25
+ vsub.i64 q6, q15, q6
+ vshr.s64 q10, q10, #25
+ vadd.i64 q12, q13, q8
+ vshl.i64 q8, q8, #25
+ vadd.i64 q13, q12, q9
+ vadd.i64 q0, q0, q10
+ vsub.i64 q7, q7, q8
+ vshr.s64 q8, q13, #26
+ vshl.i64 q10, q10, #25
+ vadd.i64 q13, q0, q9
+ vadd.i64 q1, q1, q8
+ vshl.i64 q8, q8, #26
+ vadd.i64 q15, q1, q4
+ vsub.i64 q2, q2, q10
+ vshr.s64 q10, q13, #26
+ vsub.i64 q8, q12, q8
+ vshr.s64 q12, q15, #25
+ vadd.i64 q3, q3, q10
+ vshl.i64 q10, q10, #26
+ vadd.i64 q13, q3, q4
+ vadd.i64 q14, q14, q12
+ add r2, r3, #288
+ vshl.i64 q12, q12, #25
+ add r4, r3, #336
+ vadd.i64 q15, q14, q9
+ add r2, r2, #8
+ vsub.i64 q0, q0, q10
+ add r4, r4, #8
+ vshr.s64 q10, q13, #25
+ vsub.i64 q1, q1, q12
+ vshr.s64 q12, q15, #26
+ vadd.i64 q13, q10, q10
+ vadd.i64 q11, q11, q12
+ vtrn.32 d16, d2
+ vshl.i64 q12, q12, #26
+ vtrn.32 d17, d3
+ vadd.i64 q1, q11, q4
+ vadd.i64 q4, q5, q13
+ vst1.8 d16, [r2, : 64]!
+ vshl.i64 q5, q10, #4
+ vst1.8 d17, [r4, : 64]!
+ vsub.i64 q8, q14, q12
+ vshr.s64 q1, q1, #25
+ vadd.i64 q4, q4, q5
+ vadd.i64 q5, q6, q1
+ vshl.i64 q1, q1, #25
+ vadd.i64 q6, q5, q9
+ vadd.i64 q4, q4, q10
+ vshl.i64 q10, q10, #25
+ vadd.i64 q9, q4, q9
+ vsub.i64 q1, q11, q1
+ vshr.s64 q6, q6, #26
+ vsub.i64 q3, q3, q10
+ vtrn.32 d16, d2
+ vshr.s64 q9, q9, #26
+ vtrn.32 d17, d3
+ vadd.i64 q1, q2, q6
+ vst1.8 d16, [r2, : 64]
+ vshl.i64 q2, q6, #26
+ vst1.8 d17, [r4, : 64]
+ vadd.i64 q6, q7, q9
+ vtrn.32 d0, d6
+ vshl.i64 q7, q9, #26
+ vtrn.32 d1, d7
+ vsub.i64 q2, q5, q2
+ add r2, r2, #16
+ vsub.i64 q3, q4, q7
+ vst1.8 d0, [r2, : 64]
+ add r4, r4, #16
+ vst1.8 d1, [r4, : 64]
+ vtrn.32 d4, d2
+ vtrn.32 d5, d3
+ sub r2, r2, #8
+ sub r4, r4, #8
+ vtrn.32 d6, d12
+ vtrn.32 d7, d13
+ vst1.8 d4, [r2, : 64]
+ vst1.8 d5, [r4, : 64]
+ sub r2, r2, #24
+ sub r4, r4, #24
+ vst1.8 d6, [r2, : 64]
+ vst1.8 d7, [r4, : 64]
+ add r2, r3, #240
+ add r4, r3, #96
+ vld1.8 {d0-d1}, [r4, : 128]!
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vld1.8 {d4}, [r4, : 64]
+ add r4, r3, #144
+ vld1.8 {d6-d7}, [r4, : 128]!
+ vtrn.32 q0, q3
+ vld1.8 {d8-d9}, [r4, : 128]!
+ vshl.i32 q5, q0, #4
+ vtrn.32 q1, q4
+ vshl.i32 q6, q3, #4
+ vadd.i32 q5, q5, q0
+ vadd.i32 q6, q6, q3
+ vshl.i32 q7, q1, #4
+ vld1.8 {d5}, [r4, : 64]
+ vshl.i32 q8, q4, #4
+ vtrn.32 d4, d5
+ vadd.i32 q7, q7, q1
+ vadd.i32 q8, q8, q4
+ vld1.8 {d18-d19}, [r2, : 128]!
+ vshl.i32 q10, q2, #4
+ vld1.8 {d22-d23}, [r2, : 128]!
+ vadd.i32 q10, q10, q2
+ vld1.8 {d24}, [r2, : 64]
+ vadd.i32 q5, q5, q0
+ add r2, r3, #192
+ vld1.8 {d26-d27}, [r2, : 128]!
+ vadd.i32 q6, q6, q3
+ vld1.8 {d28-d29}, [r2, : 128]!
+ vadd.i32 q8, q8, q4
+ vld1.8 {d25}, [r2, : 64]
+ vadd.i32 q10, q10, q2
+ vtrn.32 q9, q13
+ vadd.i32 q7, q7, q1
+ vadd.i32 q5, q5, q0
+ vtrn.32 q11, q14
+ vadd.i32 q6, q6, q3
+ add r2, sp, #528
+ vadd.i32 q10, q10, q2
+ vtrn.32 d24, d25
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vshl.i32 q6, q13, #1
+ vst1.8 {d20-d21}, [r2, : 128]!
+ vshl.i32 q10, q14, #1
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vshl.i32 q15, q12, #1
+ vadd.i32 q8, q8, q4
+ vext.32 d10, d31, d30, #0
+ vadd.i32 q7, q7, q1
+ vst1.8 {d16-d17}, [r2, : 128]!
+ vmull.s32 q8, d18, d5
+ vmlal.s32 q8, d26, d4
+ vmlal.s32 q8, d19, d9
+ vmlal.s32 q8, d27, d3
+ vmlal.s32 q8, d22, d8
+ vmlal.s32 q8, d28, d2
+ vmlal.s32 q8, d23, d7
+ vmlal.s32 q8, d29, d1
+ vmlal.s32 q8, d24, d6
+ vmlal.s32 q8, d25, d0
+ vst1.8 {d14-d15}, [r2, : 128]!
+ vmull.s32 q2, d18, d4
+ vmlal.s32 q2, d12, d9
+ vmlal.s32 q2, d13, d8
+ vmlal.s32 q2, d19, d3
+ vmlal.s32 q2, d22, d2
+ vmlal.s32 q2, d23, d1
+ vmlal.s32 q2, d24, d0
+ vst1.8 {d20-d21}, [r2, : 128]!
+ vmull.s32 q7, d18, d9
+ vmlal.s32 q7, d26, d3
+ vmlal.s32 q7, d19, d8
+ vmlal.s32 q7, d27, d2
+ vmlal.s32 q7, d22, d7
+ vmlal.s32 q7, d28, d1
+ vmlal.s32 q7, d23, d6
+ vmlal.s32 q7, d29, d0
+ vst1.8 {d10-d11}, [r2, : 128]!
+ vmull.s32 q5, d18, d3
+ vmlal.s32 q5, d19, d2
+ vmlal.s32 q5, d22, d1
+ vmlal.s32 q5, d23, d0
+ vmlal.s32 q5, d12, d8
+ vst1.8 {d16-d17}, [r2, : 128]
+ vmull.s32 q4, d18, d8
+ vmlal.s32 q4, d26, d2
+ vmlal.s32 q4, d19, d7
+ vmlal.s32 q4, d27, d1
+ vmlal.s32 q4, d22, d6
+ vmlal.s32 q4, d28, d0
+ vmull.s32 q8, d18, d7
+ vmlal.s32 q8, d26, d1
+ vmlal.s32 q8, d19, d6
+ vmlal.s32 q8, d27, d0
+ add r2, sp, #544
+ vld1.8 {d20-d21}, [r2, : 128]
+ vmlal.s32 q7, d24, d21
+ vmlal.s32 q7, d25, d20
+ vmlal.s32 q4, d23, d21
+ vmlal.s32 q4, d29, d20
+ vmlal.s32 q8, d22, d21
+ vmlal.s32 q8, d28, d20
+ vmlal.s32 q5, d24, d20
+ vst1.8 {d14-d15}, [r2, : 128]
+ vmull.s32 q7, d18, d6
+ vmlal.s32 q7, d26, d0
+ add r2, sp, #624
+ vld1.8 {d30-d31}, [r2, : 128]
+ vmlal.s32 q2, d30, d21
+ vmlal.s32 q7, d19, d21
+ vmlal.s32 q7, d27, d20
+ add r2, sp, #592
+ vld1.8 {d26-d27}, [r2, : 128]
+ vmlal.s32 q4, d25, d27
+ vmlal.s32 q8, d29, d27
+ vmlal.s32 q8, d25, d26
+ vmlal.s32 q7, d28, d27
+ vmlal.s32 q7, d29, d26
+ add r2, sp, #576
+ vld1.8 {d28-d29}, [r2, : 128]
+ vmlal.s32 q4, d24, d29
+ vmlal.s32 q8, d23, d29
+ vmlal.s32 q8, d24, d28
+ vmlal.s32 q7, d22, d29
+ vmlal.s32 q7, d23, d28
+ vst1.8 {d8-d9}, [r2, : 128]
+ add r2, sp, #528
+ vld1.8 {d8-d9}, [r2, : 128]
+ vmlal.s32 q7, d24, d9
+ vmlal.s32 q7, d25, d31
+ vmull.s32 q1, d18, d2
+ vmlal.s32 q1, d19, d1
+ vmlal.s32 q1, d22, d0
+ vmlal.s32 q1, d24, d27
+ vmlal.s32 q1, d23, d20
+ vmlal.s32 q1, d12, d7
+ vmlal.s32 q1, d13, d6
+ vmull.s32 q6, d18, d1
+ vmlal.s32 q6, d19, d0
+ vmlal.s32 q6, d23, d27
+ vmlal.s32 q6, d22, d20
+ vmlal.s32 q6, d24, d26
+ vmull.s32 q0, d18, d0
+ vmlal.s32 q0, d22, d27
+ vmlal.s32 q0, d23, d26
+ vmlal.s32 q0, d24, d31
+ vmlal.s32 q0, d19, d20
+ add r2, sp, #608
+ vld1.8 {d18-d19}, [r2, : 128]
+ vmlal.s32 q2, d18, d7
+ vmlal.s32 q5, d18, d6
+ vmlal.s32 q1, d18, d21
+ vmlal.s32 q0, d18, d28
+ vmlal.s32 q6, d18, d29
+ vmlal.s32 q2, d19, d6
+ vmlal.s32 q5, d19, d21
+ vmlal.s32 q1, d19, d29
+ vmlal.s32 q0, d19, d9
+ vmlal.s32 q6, d19, d28
+ add r2, sp, #560
+ vld1.8 {d18-d19}, [r2, : 128]
+ add r2, sp, #480
+ vld1.8 {d22-d23}, [r2, : 128]
+ vmlal.s32 q5, d19, d7
+ vmlal.s32 q0, d18, d21
+ vmlal.s32 q0, d19, d29
+ vmlal.s32 q6, d18, d6
+ add r2, sp, #496
+ vld1.8 {d6-d7}, [r2, : 128]
+ vmlal.s32 q6, d19, d21
+ add r2, sp, #544
+ vld1.8 {d18-d19}, [r2, : 128]
+ vmlal.s32 q0, d30, d8
+ add r2, sp, #640
+ vld1.8 {d20-d21}, [r2, : 128]
+ vmlal.s32 q5, d30, d29
+ add r2, sp, #576
+ vld1.8 {d24-d25}, [r2, : 128]
+ vmlal.s32 q1, d30, d28
+ vadd.i64 q13, q0, q11
+ vadd.i64 q14, q5, q11
+ vmlal.s32 q6, d30, d9
+ vshr.s64 q4, q13, #26
+ vshr.s64 q13, q14, #26
+ vadd.i64 q7, q7, q4
+ vshl.i64 q4, q4, #26
+ vadd.i64 q14, q7, q3
+ vadd.i64 q9, q9, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q15, q9, q3
+ vsub.i64 q0, q0, q4
+ vshr.s64 q4, q14, #25
+ vsub.i64 q5, q5, q13
+ vshr.s64 q13, q15, #25
+ vadd.i64 q6, q6, q4
+ vshl.i64 q4, q4, #25
+ vadd.i64 q14, q6, q11
+ vadd.i64 q2, q2, q13
+ vsub.i64 q4, q7, q4
+ vshr.s64 q7, q14, #26
+ vshl.i64 q13, q13, #25
+ vadd.i64 q14, q2, q11
+ vadd.i64 q8, q8, q7
+ vshl.i64 q7, q7, #26
+ vadd.i64 q15, q8, q3
+ vsub.i64 q9, q9, q13
+ vshr.s64 q13, q14, #26
+ vsub.i64 q6, q6, q7
+ vshr.s64 q7, q15, #25
+ vadd.i64 q10, q10, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q14, q10, q3
+ vadd.i64 q1, q1, q7
+ add r2, r3, #144
+ vshl.i64 q7, q7, #25
+ add r4, r3, #96
+ vadd.i64 q15, q1, q11
+ add r2, r2, #8
+ vsub.i64 q2, q2, q13
+ add r4, r4, #8
+ vshr.s64 q13, q14, #25
+ vsub.i64 q7, q8, q7
+ vshr.s64 q8, q15, #26
+ vadd.i64 q14, q13, q13
+ vadd.i64 q12, q12, q8
+ vtrn.32 d12, d14
+ vshl.i64 q8, q8, #26
+ vtrn.32 d13, d15
+ vadd.i64 q3, q12, q3
+ vadd.i64 q0, q0, q14
+ vst1.8 d12, [r2, : 64]!
+ vshl.i64 q7, q13, #4
+ vst1.8 d13, [r4, : 64]!
+ vsub.i64 q1, q1, q8
+ vshr.s64 q3, q3, #25
+ vadd.i64 q0, q0, q7
+ vadd.i64 q5, q5, q3
+ vshl.i64 q3, q3, #25
+ vadd.i64 q6, q5, q11
+ vadd.i64 q0, q0, q13
+ vshl.i64 q7, q13, #25
+ vadd.i64 q8, q0, q11
+ vsub.i64 q3, q12, q3
+ vshr.s64 q6, q6, #26
+ vsub.i64 q7, q10, q7
+ vtrn.32 d2, d6
+ vshr.s64 q8, q8, #26
+ vtrn.32 d3, d7
+ vadd.i64 q3, q9, q6
+ vst1.8 d2, [r2, : 64]
+ vshl.i64 q6, q6, #26
+ vst1.8 d3, [r4, : 64]
+ vadd.i64 q1, q4, q8
+ vtrn.32 d4, d14
+ vshl.i64 q4, q8, #26
+ vtrn.32 d5, d15
+ vsub.i64 q5, q5, q6
+ add r2, r2, #16
+ vsub.i64 q0, q0, q4
+ vst1.8 d4, [r2, : 64]
+ add r4, r4, #16
+ vst1.8 d5, [r4, : 64]
+ vtrn.32 d10, d6
+ vtrn.32 d11, d7
+ sub r2, r2, #8
+ sub r4, r4, #8
+ vtrn.32 d0, d2
+ vtrn.32 d1, d3
+ vst1.8 d10, [r2, : 64]
+ vst1.8 d11, [r4, : 64]
+ sub r2, r2, #24
+ sub r4, r4, #24
+ vst1.8 d0, [r2, : 64]
+ vst1.8 d1, [r4, : 64]
+ add r2, r3, #288
+ add r4, r3, #336
+ vld1.8 {d0-d1}, [r2, : 128]!
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vsub.i32 q0, q0, q1
+ vld1.8 {d2-d3}, [r2, : 128]!
+ vld1.8 {d4-d5}, [r4, : 128]!
+ vsub.i32 q1, q1, q2
+ add r5, r3, #240
+ vld1.8 {d4}, [r2, : 64]
+ vld1.8 {d6}, [r4, : 64]
+ vsub.i32 q2, q2, q3
+ vst1.8 {d0-d1}, [r5, : 128]!
+ vst1.8 {d2-d3}, [r5, : 128]!
+ vst1.8 d4, [r5, : 64]
+ add r2, r3, #144
+ add r4, r3, #96
+ add r5, r3, #144
+ add r6, r3, #192
+ vld1.8 {d0-d1}, [r2, : 128]!
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vsub.i32 q2, q0, q1
+ vadd.i32 q0, q0, q1
+ vld1.8 {d2-d3}, [r2, : 128]!
+ vld1.8 {d6-d7}, [r4, : 128]!
+ vsub.i32 q4, q1, q3
+ vadd.i32 q1, q1, q3
+ vld1.8 {d6}, [r2, : 64]
+ vld1.8 {d10}, [r4, : 64]
+ vsub.i32 q6, q3, q5
+ vadd.i32 q3, q3, q5
+ vst1.8 {d4-d5}, [r5, : 128]!
+ vst1.8 {d0-d1}, [r6, : 128]!
+ vst1.8 {d8-d9}, [r5, : 128]!
+ vst1.8 {d2-d3}, [r6, : 128]!
+ vst1.8 d12, [r5, : 64]
+ vst1.8 d6, [r6, : 64]
+ add r2, r3, #0
+ add r4, r3, #240
+ vld1.8 {d0-d1}, [r4, : 128]!
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vld1.8 {d4}, [r4, : 64]
+ add r4, r3, #336
+ vld1.8 {d6-d7}, [r4, : 128]!
+ vtrn.32 q0, q3
+ vld1.8 {d8-d9}, [r4, : 128]!
+ vshl.i32 q5, q0, #4
+ vtrn.32 q1, q4
+ vshl.i32 q6, q3, #4
+ vadd.i32 q5, q5, q0
+ vadd.i32 q6, q6, q3
+ vshl.i32 q7, q1, #4
+ vld1.8 {d5}, [r4, : 64]
+ vshl.i32 q8, q4, #4
+ vtrn.32 d4, d5
+ vadd.i32 q7, q7, q1
+ vadd.i32 q8, q8, q4
+ vld1.8 {d18-d19}, [r2, : 128]!
+ vshl.i32 q10, q2, #4
+ vld1.8 {d22-d23}, [r2, : 128]!
+ vadd.i32 q10, q10, q2
+ vld1.8 {d24}, [r2, : 64]
+ vadd.i32 q5, q5, q0
+ add r2, r3, #288
+ vld1.8 {d26-d27}, [r2, : 128]!
+ vadd.i32 q6, q6, q3
+ vld1.8 {d28-d29}, [r2, : 128]!
+ vadd.i32 q8, q8, q4
+ vld1.8 {d25}, [r2, : 64]
+ vadd.i32 q10, q10, q2
+ vtrn.32 q9, q13
+ vadd.i32 q7, q7, q1
+ vadd.i32 q5, q5, q0
+ vtrn.32 q11, q14
+ vadd.i32 q6, q6, q3
+ add r2, sp, #528
+ vadd.i32 q10, q10, q2
+ vtrn.32 d24, d25
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vshl.i32 q6, q13, #1
+ vst1.8 {d20-d21}, [r2, : 128]!
+ vshl.i32 q10, q14, #1
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vshl.i32 q15, q12, #1
+ vadd.i32 q8, q8, q4
+ vext.32 d10, d31, d30, #0
+ vadd.i32 q7, q7, q1
+ vst1.8 {d16-d17}, [r2, : 128]!
+ vmull.s32 q8, d18, d5
+ vmlal.s32 q8, d26, d4
+ vmlal.s32 q8, d19, d9
+ vmlal.s32 q8, d27, d3
+ vmlal.s32 q8, d22, d8
+ vmlal.s32 q8, d28, d2
+ vmlal.s32 q8, d23, d7
+ vmlal.s32 q8, d29, d1
+ vmlal.s32 q8, d24, d6
+ vmlal.s32 q8, d25, d0
+ vst1.8 {d14-d15}, [r2, : 128]!
+ vmull.s32 q2, d18, d4
+ vmlal.s32 q2, d12, d9
+ vmlal.s32 q2, d13, d8
+ vmlal.s32 q2, d19, d3
+ vmlal.s32 q2, d22, d2
+ vmlal.s32 q2, d23, d1
+ vmlal.s32 q2, d24, d0
+ vst1.8 {d20-d21}, [r2, : 128]!
+ vmull.s32 q7, d18, d9
+ vmlal.s32 q7, d26, d3
+ vmlal.s32 q7, d19, d8
+ vmlal.s32 q7, d27, d2
+ vmlal.s32 q7, d22, d7
+ vmlal.s32 q7, d28, d1
+ vmlal.s32 q7, d23, d6
+ vmlal.s32 q7, d29, d0
+ vst1.8 {d10-d11}, [r2, : 128]!
+ vmull.s32 q5, d18, d3
+ vmlal.s32 q5, d19, d2
+ vmlal.s32 q5, d22, d1
+ vmlal.s32 q5, d23, d0
+ vmlal.s32 q5, d12, d8
+ vst1.8 {d16-d17}, [r2, : 128]!
+ vmull.s32 q4, d18, d8
+ vmlal.s32 q4, d26, d2
+ vmlal.s32 q4, d19, d7
+ vmlal.s32 q4, d27, d1
+ vmlal.s32 q4, d22, d6
+ vmlal.s32 q4, d28, d0
+ vmull.s32 q8, d18, d7
+ vmlal.s32 q8, d26, d1
+ vmlal.s32 q8, d19, d6
+ vmlal.s32 q8, d27, d0
+ add r2, sp, #544
+ vld1.8 {d20-d21}, [r2, : 128]
+ vmlal.s32 q7, d24, d21
+ vmlal.s32 q7, d25, d20
+ vmlal.s32 q4, d23, d21
+ vmlal.s32 q4, d29, d20
+ vmlal.s32 q8, d22, d21
+ vmlal.s32 q8, d28, d20
+ vmlal.s32 q5, d24, d20
+ vst1.8 {d14-d15}, [r2, : 128]
+ vmull.s32 q7, d18, d6
+ vmlal.s32 q7, d26, d0
+ add r2, sp, #624
+ vld1.8 {d30-d31}, [r2, : 128]
+ vmlal.s32 q2, d30, d21
+ vmlal.s32 q7, d19, d21
+ vmlal.s32 q7, d27, d20
+ add r2, sp, #592
+ vld1.8 {d26-d27}, [r2, : 128]
+ vmlal.s32 q4, d25, d27
+ vmlal.s32 q8, d29, d27
+ vmlal.s32 q8, d25, d26
+ vmlal.s32 q7, d28, d27
+ vmlal.s32 q7, d29, d26
+ add r2, sp, #576
+ vld1.8 {d28-d29}, [r2, : 128]
+ vmlal.s32 q4, d24, d29
+ vmlal.s32 q8, d23, d29
+ vmlal.s32 q8, d24, d28
+ vmlal.s32 q7, d22, d29
+ vmlal.s32 q7, d23, d28
+ vst1.8 {d8-d9}, [r2, : 128]
+ add r2, sp, #528
+ vld1.8 {d8-d9}, [r2, : 128]
+ vmlal.s32 q7, d24, d9
+ vmlal.s32 q7, d25, d31
+ vmull.s32 q1, d18, d2
+ vmlal.s32 q1, d19, d1
+ vmlal.s32 q1, d22, d0
+ vmlal.s32 q1, d24, d27
+ vmlal.s32 q1, d23, d20
+ vmlal.s32 q1, d12, d7
+ vmlal.s32 q1, d13, d6
+ vmull.s32 q6, d18, d1
+ vmlal.s32 q6, d19, d0
+ vmlal.s32 q6, d23, d27
+ vmlal.s32 q6, d22, d20
+ vmlal.s32 q6, d24, d26
+ vmull.s32 q0, d18, d0
+ vmlal.s32 q0, d22, d27
+ vmlal.s32 q0, d23, d26
+ vmlal.s32 q0, d24, d31
+ vmlal.s32 q0, d19, d20
+ add r2, sp, #608
+ vld1.8 {d18-d19}, [r2, : 128]
+ vmlal.s32 q2, d18, d7
+ vmlal.s32 q5, d18, d6
+ vmlal.s32 q1, d18, d21
+ vmlal.s32 q0, d18, d28
+ vmlal.s32 q6, d18, d29
+ vmlal.s32 q2, d19, d6
+ vmlal.s32 q5, d19, d21
+ vmlal.s32 q1, d19, d29
+ vmlal.s32 q0, d19, d9
+ vmlal.s32 q6, d19, d28
+ add r2, sp, #560
+ vld1.8 {d18-d19}, [r2, : 128]
+ add r2, sp, #480
+ vld1.8 {d22-d23}, [r2, : 128]
+ vmlal.s32 q5, d19, d7
+ vmlal.s32 q0, d18, d21
+ vmlal.s32 q0, d19, d29
+ vmlal.s32 q6, d18, d6
+ add r2, sp, #496
+ vld1.8 {d6-d7}, [r2, : 128]
+ vmlal.s32 q6, d19, d21
+ add r2, sp, #544
+ vld1.8 {d18-d19}, [r2, : 128]
+ vmlal.s32 q0, d30, d8
+ add r2, sp, #640
+ vld1.8 {d20-d21}, [r2, : 128]
+ vmlal.s32 q5, d30, d29
+ add r2, sp, #576
+ vld1.8 {d24-d25}, [r2, : 128]
+ vmlal.s32 q1, d30, d28
+ vadd.i64 q13, q0, q11
+ vadd.i64 q14, q5, q11
+ vmlal.s32 q6, d30, d9
+ vshr.s64 q4, q13, #26
+ vshr.s64 q13, q14, #26
+ vadd.i64 q7, q7, q4
+ vshl.i64 q4, q4, #26
+ vadd.i64 q14, q7, q3
+ vadd.i64 q9, q9, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q15, q9, q3
+ vsub.i64 q0, q0, q4
+ vshr.s64 q4, q14, #25
+ vsub.i64 q5, q5, q13
+ vshr.s64 q13, q15, #25
+ vadd.i64 q6, q6, q4
+ vshl.i64 q4, q4, #25
+ vadd.i64 q14, q6, q11
+ vadd.i64 q2, q2, q13
+ vsub.i64 q4, q7, q4
+ vshr.s64 q7, q14, #26
+ vshl.i64 q13, q13, #25
+ vadd.i64 q14, q2, q11
+ vadd.i64 q8, q8, q7
+ vshl.i64 q7, q7, #26
+ vadd.i64 q15, q8, q3
+ vsub.i64 q9, q9, q13
+ vshr.s64 q13, q14, #26
+ vsub.i64 q6, q6, q7
+ vshr.s64 q7, q15, #25
+ vadd.i64 q10, q10, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q14, q10, q3
+ vadd.i64 q1, q1, q7
+ add r2, r3, #288
+ vshl.i64 q7, q7, #25
+ add r4, r3, #96
+ vadd.i64 q15, q1, q11
+ add r2, r2, #8
+ vsub.i64 q2, q2, q13
+ add r4, r4, #8
+ vshr.s64 q13, q14, #25
+ vsub.i64 q7, q8, q7
+ vshr.s64 q8, q15, #26
+ vadd.i64 q14, q13, q13
+ vadd.i64 q12, q12, q8
+ vtrn.32 d12, d14
+ vshl.i64 q8, q8, #26
+ vtrn.32 d13, d15
+ vadd.i64 q3, q12, q3
+ vadd.i64 q0, q0, q14
+ vst1.8 d12, [r2, : 64]!
+ vshl.i64 q7, q13, #4
+ vst1.8 d13, [r4, : 64]!
+ vsub.i64 q1, q1, q8
+ vshr.s64 q3, q3, #25
+ vadd.i64 q0, q0, q7
+ vadd.i64 q5, q5, q3
+ vshl.i64 q3, q3, #25
+ vadd.i64 q6, q5, q11
+ vadd.i64 q0, q0, q13
+ vshl.i64 q7, q13, #25
+ vadd.i64 q8, q0, q11
+ vsub.i64 q3, q12, q3
+ vshr.s64 q6, q6, #26
+ vsub.i64 q7, q10, q7
+ vtrn.32 d2, d6
+ vshr.s64 q8, q8, #26
+ vtrn.32 d3, d7
+ vadd.i64 q3, q9, q6
+ vst1.8 d2, [r2, : 64]
+ vshl.i64 q6, q6, #26
+ vst1.8 d3, [r4, : 64]
+ vadd.i64 q1, q4, q8
+ vtrn.32 d4, d14
+ vshl.i64 q4, q8, #26
+ vtrn.32 d5, d15
+ vsub.i64 q5, q5, q6
+ add r2, r2, #16
+ vsub.i64 q0, q0, q4
+ vst1.8 d4, [r2, : 64]
+ add r4, r4, #16
+ vst1.8 d5, [r4, : 64]
+ vtrn.32 d10, d6
+ vtrn.32 d11, d7
+ sub r2, r2, #8
+ sub r4, r4, #8
+ vtrn.32 d0, d2
+ vtrn.32 d1, d3
+ vst1.8 d10, [r2, : 64]
+ vst1.8 d11, [r4, : 64]
+ sub r2, r2, #24
+ sub r4, r4, #24
+ vst1.8 d0, [r2, : 64]
+ vst1.8 d1, [r4, : 64]
+ add r2, sp, #512
+ add r4, r3, #144
+ add r5, r3, #192
+ vld1.8 {d0-d1}, [r2, : 128]
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vld1.8 {d4-d5}, [r5, : 128]!
+ vzip.i32 q1, q2
+ vld1.8 {d6-d7}, [r4, : 128]!
+ vld1.8 {d8-d9}, [r5, : 128]!
+ vshl.i32 q5, q1, #1
+ vzip.i32 q3, q4
+ vshl.i32 q6, q2, #1
+ vld1.8 {d14}, [r4, : 64]
+ vshl.i32 q8, q3, #1
+ vld1.8 {d15}, [r5, : 64]
+ vshl.i32 q9, q4, #1
+ vmul.i32 d21, d7, d1
+ vtrn.32 d14, d15
+ vmul.i32 q11, q4, q0
+ vmul.i32 q0, q7, q0
+ vmull.s32 q12, d2, d2
+ vmlal.s32 q12, d11, d1
+ vmlal.s32 q12, d12, d0
+ vmlal.s32 q12, d13, d23
+ vmlal.s32 q12, d16, d22
+ vmlal.s32 q12, d7, d21
+ vmull.s32 q10, d2, d11
+ vmlal.s32 q10, d4, d1
+ vmlal.s32 q10, d13, d0
+ vmlal.s32 q10, d6, d23
+ vmlal.s32 q10, d17, d22
+ vmull.s32 q13, d10, d4
+ vmlal.s32 q13, d11, d3
+ vmlal.s32 q13, d13, d1
+ vmlal.s32 q13, d16, d0
+ vmlal.s32 q13, d17, d23
+ vmlal.s32 q13, d8, d22
+ vmull.s32 q1, d10, d5
+ vmlal.s32 q1, d11, d4
+ vmlal.s32 q1, d6, d1
+ vmlal.s32 q1, d17, d0
+ vmlal.s32 q1, d8, d23
+ vmull.s32 q14, d10, d6
+ vmlal.s32 q14, d11, d13
+ vmlal.s32 q14, d4, d4
+ vmlal.s32 q14, d17, d1
+ vmlal.s32 q14, d18, d0
+ vmlal.s32 q14, d9, d23
+ vmull.s32 q11, d10, d7
+ vmlal.s32 q11, d11, d6
+ vmlal.s32 q11, d12, d5
+ vmlal.s32 q11, d8, d1
+ vmlal.s32 q11, d19, d0
+ vmull.s32 q15, d10, d8
+ vmlal.s32 q15, d11, d17
+ vmlal.s32 q15, d12, d6
+ vmlal.s32 q15, d13, d5
+ vmlal.s32 q15, d19, d1
+ vmlal.s32 q15, d14, d0
+ vmull.s32 q2, d10, d9
+ vmlal.s32 q2, d11, d8
+ vmlal.s32 q2, d12, d7
+ vmlal.s32 q2, d13, d6
+ vmlal.s32 q2, d14, d1
+ vmull.s32 q0, d15, d1
+ vmlal.s32 q0, d10, d14
+ vmlal.s32 q0, d11, d19
+ vmlal.s32 q0, d12, d8
+ vmlal.s32 q0, d13, d17
+ vmlal.s32 q0, d6, d6
+ add r2, sp, #480
+ vld1.8 {d18-d19}, [r2, : 128]!
+ vmull.s32 q3, d16, d7
+ vmlal.s32 q3, d10, d15
+ vmlal.s32 q3, d11, d14
+ vmlal.s32 q3, d12, d9
+ vmlal.s32 q3, d13, d8
+ vld1.8 {d8-d9}, [r2, : 128]
+ vadd.i64 q5, q12, q9
+ vadd.i64 q6, q15, q9
+ vshr.s64 q5, q5, #26
+ vshr.s64 q6, q6, #26
+ vadd.i64 q7, q10, q5
+ vshl.i64 q5, q5, #26
+ vadd.i64 q8, q7, q4
+ vadd.i64 q2, q2, q6
+ vshl.i64 q6, q6, #26
+ vadd.i64 q10, q2, q4
+ vsub.i64 q5, q12, q5
+ vshr.s64 q8, q8, #25
+ vsub.i64 q6, q15, q6
+ vshr.s64 q10, q10, #25
+ vadd.i64 q12, q13, q8
+ vshl.i64 q8, q8, #25
+ vadd.i64 q13, q12, q9
+ vadd.i64 q0, q0, q10
+ vsub.i64 q7, q7, q8
+ vshr.s64 q8, q13, #26
+ vshl.i64 q10, q10, #25
+ vadd.i64 q13, q0, q9
+ vadd.i64 q1, q1, q8
+ vshl.i64 q8, q8, #26
+ vadd.i64 q15, q1, q4
+ vsub.i64 q2, q2, q10
+ vshr.s64 q10, q13, #26
+ vsub.i64 q8, q12, q8
+ vshr.s64 q12, q15, #25
+ vadd.i64 q3, q3, q10
+ vshl.i64 q10, q10, #26
+ vadd.i64 q13, q3, q4
+ vadd.i64 q14, q14, q12
+ add r2, r3, #144
+ vshl.i64 q12, q12, #25
+ add r4, r3, #192
+ vadd.i64 q15, q14, q9
+ add r2, r2, #8
+ vsub.i64 q0, q0, q10
+ add r4, r4, #8
+ vshr.s64 q10, q13, #25
+ vsub.i64 q1, q1, q12
+ vshr.s64 q12, q15, #26
+ vadd.i64 q13, q10, q10
+ vadd.i64 q11, q11, q12
+ vtrn.32 d16, d2
+ vshl.i64 q12, q12, #26
+ vtrn.32 d17, d3
+ vadd.i64 q1, q11, q4
+ vadd.i64 q4, q5, q13
+ vst1.8 d16, [r2, : 64]!
+ vshl.i64 q5, q10, #4
+ vst1.8 d17, [r4, : 64]!
+ vsub.i64 q8, q14, q12
+ vshr.s64 q1, q1, #25
+ vadd.i64 q4, q4, q5
+ vadd.i64 q5, q6, q1
+ vshl.i64 q1, q1, #25
+ vadd.i64 q6, q5, q9
+ vadd.i64 q4, q4, q10
+ vshl.i64 q10, q10, #25
+ vadd.i64 q9, q4, q9
+ vsub.i64 q1, q11, q1
+ vshr.s64 q6, q6, #26
+ vsub.i64 q3, q3, q10
+ vtrn.32 d16, d2
+ vshr.s64 q9, q9, #26
+ vtrn.32 d17, d3
+ vadd.i64 q1, q2, q6
+ vst1.8 d16, [r2, : 64]
+ vshl.i64 q2, q6, #26
+ vst1.8 d17, [r4, : 64]
+ vadd.i64 q6, q7, q9
+ vtrn.32 d0, d6
+ vshl.i64 q7, q9, #26
+ vtrn.32 d1, d7
+ vsub.i64 q2, q5, q2
+ add r2, r2, #16
+ vsub.i64 q3, q4, q7
+ vst1.8 d0, [r2, : 64]
+ add r4, r4, #16
+ vst1.8 d1, [r4, : 64]
+ vtrn.32 d4, d2
+ vtrn.32 d5, d3
+ sub r2, r2, #8
+ sub r4, r4, #8
+ vtrn.32 d6, d12
+ vtrn.32 d7, d13
+ vst1.8 d4, [r2, : 64]
+ vst1.8 d5, [r4, : 64]
+ sub r2, r2, #24
+ sub r4, r4, #24
+ vst1.8 d6, [r2, : 64]
+ vst1.8 d7, [r4, : 64]
+ add r2, r3, #336
+ add r4, r3, #288
+ vld1.8 {d0-d1}, [r2, : 128]!
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vadd.i32 q0, q0, q1
+ vld1.8 {d2-d3}, [r2, : 128]!
+ vld1.8 {d4-d5}, [r4, : 128]!
+ vadd.i32 q1, q1, q2
+ add r5, r3, #288
+ vld1.8 {d4}, [r2, : 64]
+ vld1.8 {d6}, [r4, : 64]
+ vadd.i32 q2, q2, q3
+ vst1.8 {d0-d1}, [r5, : 128]!
+ vst1.8 {d2-d3}, [r5, : 128]!
+ vst1.8 d4, [r5, : 64]
+ add r2, r3, #48
+ add r4, r3, #144
+ vld1.8 {d0-d1}, [r4, : 128]!
+ vld1.8 {d2-d3}, [r4, : 128]!
+ vld1.8 {d4}, [r4, : 64]
+ add r4, r3, #288
+ vld1.8 {d6-d7}, [r4, : 128]!
+ vtrn.32 q0, q3
+ vld1.8 {d8-d9}, [r4, : 128]!
+ vshl.i32 q5, q0, #4
+ vtrn.32 q1, q4
+ vshl.i32 q6, q3, #4
+ vadd.i32 q5, q5, q0
+ vadd.i32 q6, q6, q3
+ vshl.i32 q7, q1, #4
+ vld1.8 {d5}, [r4, : 64]
+ vshl.i32 q8, q4, #4
+ vtrn.32 d4, d5
+ vadd.i32 q7, q7, q1
+ vadd.i32 q8, q8, q4
+ vld1.8 {d18-d19}, [r2, : 128]!
+ vshl.i32 q10, q2, #4
+ vld1.8 {d22-d23}, [r2, : 128]!
+ vadd.i32 q10, q10, q2
+ vld1.8 {d24}, [r2, : 64]
+ vadd.i32 q5, q5, q0
+ add r2, r3, #240
+ vld1.8 {d26-d27}, [r2, : 128]!
+ vadd.i32 q6, q6, q3
+ vld1.8 {d28-d29}, [r2, : 128]!
+ vadd.i32 q8, q8, q4
+ vld1.8 {d25}, [r2, : 64]
+ vadd.i32 q10, q10, q2
+ vtrn.32 q9, q13
+ vadd.i32 q7, q7, q1
+ vadd.i32 q5, q5, q0
+ vtrn.32 q11, q14
+ vadd.i32 q6, q6, q3
+ add r2, sp, #528
+ vadd.i32 q10, q10, q2
+ vtrn.32 d24, d25
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vshl.i32 q6, q13, #1
+ vst1.8 {d20-d21}, [r2, : 128]!
+ vshl.i32 q10, q14, #1
+ vst1.8 {d12-d13}, [r2, : 128]!
+ vshl.i32 q15, q12, #1
+ vadd.i32 q8, q8, q4
+ vext.32 d10, d31, d30, #0
+ vadd.i32 q7, q7, q1
+ vst1.8 {d16-d17}, [r2, : 128]!
+ vmull.s32 q8, d18, d5
+ vmlal.s32 q8, d26, d4
+ vmlal.s32 q8, d19, d9
+ vmlal.s32 q8, d27, d3
+ vmlal.s32 q8, d22, d8
+ vmlal.s32 q8, d28, d2
+ vmlal.s32 q8, d23, d7
+ vmlal.s32 q8, d29, d1
+ vmlal.s32 q8, d24, d6
+ vmlal.s32 q8, d25, d0
+ vst1.8 {d14-d15}, [r2, : 128]!
+ vmull.s32 q2, d18, d4
+ vmlal.s32 q2, d12, d9
+ vmlal.s32 q2, d13, d8
+ vmlal.s32 q2, d19, d3
+ vmlal.s32 q2, d22, d2
+ vmlal.s32 q2, d23, d1
+ vmlal.s32 q2, d24, d0
+ vst1.8 {d20-d21}, [r2, : 128]!
+ vmull.s32 q7, d18, d9
+ vmlal.s32 q7, d26, d3
+ vmlal.s32 q7, d19, d8
+ vmlal.s32 q7, d27, d2
+ vmlal.s32 q7, d22, d7
+ vmlal.s32 q7, d28, d1
+ vmlal.s32 q7, d23, d6
+ vmlal.s32 q7, d29, d0
+ vst1.8 {d10-d11}, [r2, : 128]!
+ vmull.s32 q5, d18, d3
+ vmlal.s32 q5, d19, d2
+ vmlal.s32 q5, d22, d1
+ vmlal.s32 q5, d23, d0
+ vmlal.s32 q5, d12, d8
+ vst1.8 {d16-d17}, [r2, : 128]!
+ vmull.s32 q4, d18, d8
+ vmlal.s32 q4, d26, d2
+ vmlal.s32 q4, d19, d7
+ vmlal.s32 q4, d27, d1
+ vmlal.s32 q4, d22, d6
+ vmlal.s32 q4, d28, d0
+ vmull.s32 q8, d18, d7
+ vmlal.s32 q8, d26, d1
+ vmlal.s32 q8, d19, d6
+ vmlal.s32 q8, d27, d0
+ add r2, sp, #544
+ vld1.8 {d20-d21}, [r2, : 128]
+ vmlal.s32 q7, d24, d21
+ vmlal.s32 q7, d25, d20
+ vmlal.s32 q4, d23, d21
+ vmlal.s32 q4, d29, d20
+ vmlal.s32 q8, d22, d21
+ vmlal.s32 q8, d28, d20
+ vmlal.s32 q5, d24, d20
+ vst1.8 {d14-d15}, [r2, : 128]
+ vmull.s32 q7, d18, d6
+ vmlal.s32 q7, d26, d0
+ add r2, sp, #624
+ vld1.8 {d30-d31}, [r2, : 128]
+ vmlal.s32 q2, d30, d21
+ vmlal.s32 q7, d19, d21
+ vmlal.s32 q7, d27, d20
+ add r2, sp, #592
+ vld1.8 {d26-d27}, [r2, : 128]
+ vmlal.s32 q4, d25, d27
+ vmlal.s32 q8, d29, d27
+ vmlal.s32 q8, d25, d26
+ vmlal.s32 q7, d28, d27
+ vmlal.s32 q7, d29, d26
+ add r2, sp, #576
+ vld1.8 {d28-d29}, [r2, : 128]
+ vmlal.s32 q4, d24, d29
+ vmlal.s32 q8, d23, d29
+ vmlal.s32 q8, d24, d28
+ vmlal.s32 q7, d22, d29
+ vmlal.s32 q7, d23, d28
+ vst1.8 {d8-d9}, [r2, : 128]
+ add r2, sp, #528
+ vld1.8 {d8-d9}, [r2, : 128]
+ vmlal.s32 q7, d24, d9
+ vmlal.s32 q7, d25, d31
+ vmull.s32 q1, d18, d2
+ vmlal.s32 q1, d19, d1
+ vmlal.s32 q1, d22, d0
+ vmlal.s32 q1, d24, d27
+ vmlal.s32 q1, d23, d20
+ vmlal.s32 q1, d12, d7
+ vmlal.s32 q1, d13, d6
+ vmull.s32 q6, d18, d1
+ vmlal.s32 q6, d19, d0
+ vmlal.s32 q6, d23, d27
+ vmlal.s32 q6, d22, d20
+ vmlal.s32 q6, d24, d26
+ vmull.s32 q0, d18, d0
+ vmlal.s32 q0, d22, d27
+ vmlal.s32 q0, d23, d26
+ vmlal.s32 q0, d24, d31
+ vmlal.s32 q0, d19, d20
+ add r2, sp, #608
+ vld1.8 {d18-d19}, [r2, : 128]
+ vmlal.s32 q2, d18, d7
+ vmlal.s32 q5, d18, d6
+ vmlal.s32 q1, d18, d21
+ vmlal.s32 q0, d18, d28
+ vmlal.s32 q6, d18, d29
+ vmlal.s32 q2, d19, d6
+ vmlal.s32 q5, d19, d21
+ vmlal.s32 q1, d19, d29
+ vmlal.s32 q0, d19, d9
+ vmlal.s32 q6, d19, d28
+ add r2, sp, #560
+ vld1.8 {d18-d19}, [r2, : 128]
+ add r2, sp, #480
+ vld1.8 {d22-d23}, [r2, : 128]
+ vmlal.s32 q5, d19, d7
+ vmlal.s32 q0, d18, d21
+ vmlal.s32 q0, d19, d29
+ vmlal.s32 q6, d18, d6
+ add r2, sp, #496
+ vld1.8 {d6-d7}, [r2, : 128]
+ vmlal.s32 q6, d19, d21
+ add r2, sp, #544
+ vld1.8 {d18-d19}, [r2, : 128]
+ vmlal.s32 q0, d30, d8
+ add r2, sp, #640
+ vld1.8 {d20-d21}, [r2, : 128]
+ vmlal.s32 q5, d30, d29
+ add r2, sp, #576
+ vld1.8 {d24-d25}, [r2, : 128]
+ vmlal.s32 q1, d30, d28
+ vadd.i64 q13, q0, q11
+ vadd.i64 q14, q5, q11
+ vmlal.s32 q6, d30, d9
+ vshr.s64 q4, q13, #26
+ vshr.s64 q13, q14, #26
+ vadd.i64 q7, q7, q4
+ vshl.i64 q4, q4, #26
+ vadd.i64 q14, q7, q3
+ vadd.i64 q9, q9, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q15, q9, q3
+ vsub.i64 q0, q0, q4
+ vshr.s64 q4, q14, #25
+ vsub.i64 q5, q5, q13
+ vshr.s64 q13, q15, #25
+ vadd.i64 q6, q6, q4
+ vshl.i64 q4, q4, #25
+ vadd.i64 q14, q6, q11
+ vadd.i64 q2, q2, q13
+ vsub.i64 q4, q7, q4
+ vshr.s64 q7, q14, #26
+ vshl.i64 q13, q13, #25
+ vadd.i64 q14, q2, q11
+ vadd.i64 q8, q8, q7
+ vshl.i64 q7, q7, #26
+ vadd.i64 q15, q8, q3
+ vsub.i64 q9, q9, q13
+ vshr.s64 q13, q14, #26
+ vsub.i64 q6, q6, q7
+ vshr.s64 q7, q15, #25
+ vadd.i64 q10, q10, q13
+ vshl.i64 q13, q13, #26
+ vadd.i64 q14, q10, q3
+ vadd.i64 q1, q1, q7
+ add r2, r3, #240
+ vshl.i64 q7, q7, #25
+ add r4, r3, #144
+ vadd.i64 q15, q1, q11
+ add r2, r2, #8
+ vsub.i64 q2, q2, q13
+ add r4, r4, #8
+ vshr.s64 q13, q14, #25
+ vsub.i64 q7, q8, q7
+ vshr.s64 q8, q15, #26
+ vadd.i64 q14, q13, q13
+ vadd.i64 q12, q12, q8
+ vtrn.32 d12, d14
+ vshl.i64 q8, q8, #26
+ vtrn.32 d13, d15
+ vadd.i64 q3, q12, q3
+ vadd.i64 q0, q0, q14
+ vst1.8 d12, [r2, : 64]!
+ vshl.i64 q7, q13, #4
+ vst1.8 d13, [r4, : 64]!
+ vsub.i64 q1, q1, q8
+ vshr.s64 q3, q3, #25
+ vadd.i64 q0, q0, q7
+ vadd.i64 q5, q5, q3
+ vshl.i64 q3, q3, #25
+ vadd.i64 q6, q5, q11
+ vadd.i64 q0, q0, q13
+ vshl.i64 q7, q13, #25
+ vadd.i64 q8, q0, q11
+ vsub.i64 q3, q12, q3
+ vshr.s64 q6, q6, #26
+ vsub.i64 q7, q10, q7
+ vtrn.32 d2, d6
+ vshr.s64 q8, q8, #26
+ vtrn.32 d3, d7
+ vadd.i64 q3, q9, q6
+ vst1.8 d2, [r2, : 64]
+ vshl.i64 q6, q6, #26
+ vst1.8 d3, [r4, : 64]
+ vadd.i64 q1, q4, q8
+ vtrn.32 d4, d14
+ vshl.i64 q4, q8, #26
+ vtrn.32 d5, d15
+ vsub.i64 q5, q5, q6
+ add r2, r2, #16
+ vsub.i64 q0, q0, q4
+ vst1.8 d4, [r2, : 64]
+ add r4, r4, #16
+ vst1.8 d5, [r4, : 64]
+ vtrn.32 d10, d6
+ vtrn.32 d11, d7
+ sub r2, r2, #8
+ sub r4, r4, #8
+ vtrn.32 d0, d2
+ vtrn.32 d1, d3
+ vst1.8 d10, [r2, : 64]
+ vst1.8 d11, [r4, : 64]
+ sub r2, r2, #24
+ sub r4, r4, #24
+ vst1.8 d0, [r2, : 64]
+ vst1.8 d1, [r4, : 64]
+ ldr r2, [sp, #456]
+ ldr r4, [sp, #460]
+ subs r5, r2, #1
+ bge .Lmainloop
+ add r1, r3, #144
+ add r2, r3, #336
+ vld1.8 {d0-d1}, [r1, : 128]!
+ vld1.8 {d2-d3}, [r1, : 128]!
+ vld1.8 {d4}, [r1, : 64]
+ vst1.8 {d0-d1}, [r2, : 128]!
+ vst1.8 {d2-d3}, [r2, : 128]!
+ vst1.8 d4, [r2, : 64]
+ movw r1, #0
+.Linvertloop:
+ add r2, r3, #144
+ movw r4, #0
+ movw r5, #2
+ cmp r1, #1
+ moveq r5, #1
+ addeq r2, r3, #336
+ addeq r4, r3, #48
+ cmp r1, #2
+ moveq r5, #1
+ addeq r2, r3, #48
+ cmp r1, #3
+ moveq r5, #5
+ addeq r4, r3, #336
+ cmp r1, #4
+ moveq r5, #10
+ cmp r1, #5
+ moveq r5, #20
+ cmp r1, #6
+ moveq r5, #10
+ addeq r2, r3, #336
+ addeq r4, r3, #336
+ cmp r1, #7
+ moveq r5, #50
+ cmp r1, #8
+ moveq r5, #100
+ cmp r1, #9
+ moveq r5, #50
+ addeq r2, r3, #336
+ cmp r1, #10
+ moveq r5, #5
+ addeq r2, r3, #48
+ cmp r1, #11
+ moveq r5, #0
+ addeq r2, r3, #96
+ add r6, r3, #144
+ add r7, r3, #288
+ vld1.8 {d0-d1}, [r6, : 128]!
+ vld1.8 {d2-d3}, [r6, : 128]!
+ vld1.8 {d4}, [r6, : 64]
+ vst1.8 {d0-d1}, [r7, : 128]!
+ vst1.8 {d2-d3}, [r7, : 128]!
+ vst1.8 d4, [r7, : 64]
+ cmp r5, #0
+ beq .Lskipsquaringloop
+.Lsquaringloop:
+ add r6, r3, #288
+ add r7, r3, #288
+ add r8, r3, #288
+ vmov.i32 q0, #19
+ vmov.i32 q1, #0
+ vmov.i32 q2, #1
+ vzip.i32 q1, q2
+ vld1.8 {d4-d5}, [r7, : 128]!
+ vld1.8 {d6-d7}, [r7, : 128]!
+ vld1.8 {d9}, [r7, : 64]
+ vld1.8 {d10-d11}, [r6, : 128]!
+ add r7, sp, #384
+ vld1.8 {d12-d13}, [r6, : 128]!
+ vmul.i32 q7, q2, q0
+ vld1.8 {d8}, [r6, : 64]
+ vext.32 d17, d11, d10, #1
+ vmul.i32 q9, q3, q0
+ vext.32 d16, d10, d8, #1
+ vshl.u32 q10, q5, q1
+ vext.32 d22, d14, d4, #1
+ vext.32 d24, d18, d6, #1
+ vshl.u32 q13, q6, q1
+ vshl.u32 d28, d8, d2
+ vrev64.i32 d22, d22
+ vmul.i32 d1, d9, d1
+ vrev64.i32 d24, d24
+ vext.32 d29, d8, d13, #1
+ vext.32 d0, d1, d9, #1
+ vrev64.i32 d0, d0
+ vext.32 d2, d9, d1, #1
+ vext.32 d23, d15, d5, #1
+ vmull.s32 q4, d20, d4
+ vrev64.i32 d23, d23
+ vmlal.s32 q4, d21, d1
+ vrev64.i32 d2, d2
+ vmlal.s32 q4, d26, d19
+ vext.32 d3, d5, d15, #1
+ vmlal.s32 q4, d27, d18
+ vrev64.i32 d3, d3
+ vmlal.s32 q4, d28, d15
+ vext.32 d14, d12, d11, #1
+ vmull.s32 q5, d16, d23
+ vext.32 d15, d13, d12, #1
+ vmlal.s32 q5, d17, d4
+ vst1.8 d8, [r7, : 64]!
+ vmlal.s32 q5, d14, d1
+ vext.32 d12, d9, d8, #0
+ vmlal.s32 q5, d15, d19
+ vmov.i64 d13, #0
+ vmlal.s32 q5, d29, d18
+ vext.32 d25, d19, d7, #1
+ vmlal.s32 q6, d20, d5
+ vrev64.i32 d25, d25
+ vmlal.s32 q6, d21, d4
+ vst1.8 d11, [r7, : 64]!
+ vmlal.s32 q6, d26, d1
+ vext.32 d9, d10, d10, #0
+ vmlal.s32 q6, d27, d19
+ vmov.i64 d8, #0
+ vmlal.s32 q6, d28, d18
+ vmlal.s32 q4, d16, d24
+ vmlal.s32 q4, d17, d5
+ vmlal.s32 q4, d14, d4
+ vst1.8 d12, [r7, : 64]!
+ vmlal.s32 q4, d15, d1
+ vext.32 d10, d13, d12, #0
+ vmlal.s32 q4, d29, d19
+ vmov.i64 d11, #0
+ vmlal.s32 q5, d20, d6
+ vmlal.s32 q5, d21, d5
+ vmlal.s32 q5, d26, d4
+ vext.32 d13, d8, d8, #0
+ vmlal.s32 q5, d27, d1
+ vmov.i64 d12, #0
+ vmlal.s32 q5, d28, d19
+ vst1.8 d9, [r7, : 64]!
+ vmlal.s32 q6, d16, d25
+ vmlal.s32 q6, d17, d6
+ vst1.8 d10, [r7, : 64]
+ vmlal.s32 q6, d14, d5
+ vext.32 d8, d11, d10, #0
+ vmlal.s32 q6, d15, d4
+ vmov.i64 d9, #0
+ vmlal.s32 q6, d29, d1
+ vmlal.s32 q4, d20, d7
+ vmlal.s32 q4, d21, d6
+ vmlal.s32 q4, d26, d5
+ vext.32 d11, d12, d12, #0
+ vmlal.s32 q4, d27, d4
+ vmov.i64 d10, #0
+ vmlal.s32 q4, d28, d1
+ vmlal.s32 q5, d16, d0
+ sub r6, r7, #32
+ vmlal.s32 q5, d17, d7
+ vmlal.s32 q5, d14, d6
+ vext.32 d30, d9, d8, #0
+ vmlal.s32 q5, d15, d5
+ vld1.8 {d31}, [r6, : 64]!
+ vmlal.s32 q5, d29, d4
+ vmlal.s32 q15, d20, d0
+ vext.32 d0, d6, d18, #1
+ vmlal.s32 q15, d21, d25
+ vrev64.i32 d0, d0
+ vmlal.s32 q15, d26, d24
+ vext.32 d1, d7, d19, #1
+ vext.32 d7, d10, d10, #0
+ vmlal.s32 q15, d27, d23
+ vrev64.i32 d1, d1
+ vld1.8 {d6}, [r6, : 64]
+ vmlal.s32 q15, d28, d22
+ vmlal.s32 q3, d16, d4
+ add r6, r6, #24
+ vmlal.s32 q3, d17, d2
+ vext.32 d4, d31, d30, #0
+ vmov d17, d11
+ vmlal.s32 q3, d14, d1
+ vext.32 d11, d13, d13, #0
+ vext.32 d13, d30, d30, #0
+ vmlal.s32 q3, d15, d0
+ vext.32 d1, d8, d8, #0
+ vmlal.s32 q3, d29, d3
+ vld1.8 {d5}, [r6, : 64]
+ sub r6, r6, #16
+ vext.32 d10, d6, d6, #0
+ vmov.i32 q1, #0xffffffff
+ vshl.i64 q4, q1, #25
+ add r7, sp, #480
+ vld1.8 {d14-d15}, [r7, : 128]
+ vadd.i64 q9, q2, q7
+ vshl.i64 q1, q1, #26
+ vshr.s64 q10, q9, #26
+ vld1.8 {d0}, [r6, : 64]!
+ vadd.i64 q5, q5, q10
+ vand q9, q9, q1
+ vld1.8 {d16}, [r6, : 64]!
+ add r6, sp, #496
+ vld1.8 {d20-d21}, [r6, : 128]
+ vadd.i64 q11, q5, q10
+ vsub.i64 q2, q2, q9
+ vshr.s64 q9, q11, #25
+ vext.32 d12, d5, d4, #0
+ vand q11, q11, q4
+ vadd.i64 q0, q0, q9
+ vmov d19, d7
+ vadd.i64 q3, q0, q7
+ vsub.i64 q5, q5, q11
+ vshr.s64 q11, q3, #26
+ vext.32 d18, d11, d10, #0
+ vand q3, q3, q1
+ vadd.i64 q8, q8, q11
+ vadd.i64 q11, q8, q10
+ vsub.i64 q0, q0, q3
+ vshr.s64 q3, q11, #25
+ vand q11, q11, q4
+ vadd.i64 q3, q6, q3
+ vadd.i64 q6, q3, q7
+ vsub.i64 q8, q8, q11
+ vshr.s64 q11, q6, #26
+ vand q6, q6, q1
+ vadd.i64 q9, q9, q11
+ vadd.i64 d25, d19, d21
+ vsub.i64 q3, q3, q6
+ vshr.s64 d23, d25, #25
+ vand q4, q12, q4
+ vadd.i64 d21, d23, d23
+ vshl.i64 d25, d23, #4
+ vadd.i64 d21, d21, d23
+ vadd.i64 d25, d25, d21
+ vadd.i64 d4, d4, d25
+ vzip.i32 q0, q8
+ vadd.i64 d12, d4, d14
+ add r6, r8, #8
+ vst1.8 d0, [r6, : 64]
+ vsub.i64 d19, d19, d9
+ add r6, r6, #16
+ vst1.8 d16, [r6, : 64]
+ vshr.s64 d22, d12, #26
+ vand q0, q6, q1
+ vadd.i64 d10, d10, d22
+ vzip.i32 q3, q9
+ vsub.i64 d4, d4, d0
+ sub r6, r6, #8
+ vst1.8 d6, [r6, : 64]
+ add r6, r6, #16
+ vst1.8 d18, [r6, : 64]
+ vzip.i32 q2, q5
+ sub r6, r6, #32
+ vst1.8 d4, [r6, : 64]
+ subs r5, r5, #1
+ bhi .Lsquaringloop
+.Lskipsquaringloop:
+ mov r2, r2
+ add r5, r3, #288
+ add r6, r3, #144
+ vmov.i32 q0, #19
+ vmov.i32 q1, #0
+ vmov.i32 q2, #1
+ vzip.i32 q1, q2
+ vld1.8 {d4-d5}, [r5, : 128]!
+ vld1.8 {d6-d7}, [r5, : 128]!
+ vld1.8 {d9}, [r5, : 64]
+ vld1.8 {d10-d11}, [r2, : 128]!
+ add r5, sp, #384
+ vld1.8 {d12-d13}, [r2, : 128]!
+ vmul.i32 q7, q2, q0
+ vld1.8 {d8}, [r2, : 64]
+ vext.32 d17, d11, d10, #1
+ vmul.i32 q9, q3, q0
+ vext.32 d16, d10, d8, #1
+ vshl.u32 q10, q5, q1
+ vext.32 d22, d14, d4, #1
+ vext.32 d24, d18, d6, #1
+ vshl.u32 q13, q6, q1
+ vshl.u32 d28, d8, d2
+ vrev64.i32 d22, d22
+ vmul.i32 d1, d9, d1
+ vrev64.i32 d24, d24
+ vext.32 d29, d8, d13, #1
+ vext.32 d0, d1, d9, #1
+ vrev64.i32 d0, d0
+ vext.32 d2, d9, d1, #1
+ vext.32 d23, d15, d5, #1
+ vmull.s32 q4, d20, d4
+ vrev64.i32 d23, d23
+ vmlal.s32 q4, d21, d1
+ vrev64.i32 d2, d2
+ vmlal.s32 q4, d26, d19
+ vext.32 d3, d5, d15, #1
+ vmlal.s32 q4, d27, d18
+ vrev64.i32 d3, d3
+ vmlal.s32 q4, d28, d15
+ vext.32 d14, d12, d11, #1
+ vmull.s32 q5, d16, d23
+ vext.32 d15, d13, d12, #1
+ vmlal.s32 q5, d17, d4
+ vst1.8 d8, [r5, : 64]!
+ vmlal.s32 q5, d14, d1
+ vext.32 d12, d9, d8, #0
+ vmlal.s32 q5, d15, d19
+ vmov.i64 d13, #0
+ vmlal.s32 q5, d29, d18
+ vext.32 d25, d19, d7, #1
+ vmlal.s32 q6, d20, d5
+ vrev64.i32 d25, d25
+ vmlal.s32 q6, d21, d4
+ vst1.8 d11, [r5, : 64]!
+ vmlal.s32 q6, d26, d1
+ vext.32 d9, d10, d10, #0
+ vmlal.s32 q6, d27, d19
+ vmov.i64 d8, #0
+ vmlal.s32 q6, d28, d18
+ vmlal.s32 q4, d16, d24
+ vmlal.s32 q4, d17, d5
+ vmlal.s32 q4, d14, d4
+ vst1.8 d12, [r5, : 64]!
+ vmlal.s32 q4, d15, d1
+ vext.32 d10, d13, d12, #0
+ vmlal.s32 q4, d29, d19
+ vmov.i64 d11, #0
+ vmlal.s32 q5, d20, d6
+ vmlal.s32 q5, d21, d5
+ vmlal.s32 q5, d26, d4
+ vext.32 d13, d8, d8, #0
+ vmlal.s32 q5, d27, d1
+ vmov.i64 d12, #0
+ vmlal.s32 q5, d28, d19
+ vst1.8 d9, [r5, : 64]!
+ vmlal.s32 q6, d16, d25
+ vmlal.s32 q6, d17, d6
+ vst1.8 d10, [r5, : 64]
+ vmlal.s32 q6, d14, d5
+ vext.32 d8, d11, d10, #0
+ vmlal.s32 q6, d15, d4
+ vmov.i64 d9, #0
+ vmlal.s32 q6, d29, d1
+ vmlal.s32 q4, d20, d7
+ vmlal.s32 q4, d21, d6
+ vmlal.s32 q4, d26, d5
+ vext.32 d11, d12, d12, #0
+ vmlal.s32 q4, d27, d4
+ vmov.i64 d10, #0
+ vmlal.s32 q4, d28, d1
+ vmlal.s32 q5, d16, d0
+ sub r2, r5, #32
+ vmlal.s32 q5, d17, d7
+ vmlal.s32 q5, d14, d6
+ vext.32 d30, d9, d8, #0
+ vmlal.s32 q5, d15, d5
+ vld1.8 {d31}, [r2, : 64]!
+ vmlal.s32 q5, d29, d4
+ vmlal.s32 q15, d20, d0
+ vext.32 d0, d6, d18, #1
+ vmlal.s32 q15, d21, d25
+ vrev64.i32 d0, d0
+ vmlal.s32 q15, d26, d24
+ vext.32 d1, d7, d19, #1
+ vext.32 d7, d10, d10, #0
+ vmlal.s32 q15, d27, d23
+ vrev64.i32 d1, d1
+ vld1.8 {d6}, [r2, : 64]
+ vmlal.s32 q15, d28, d22
+ vmlal.s32 q3, d16, d4
+ add r2, r2, #24
+ vmlal.s32 q3, d17, d2
+ vext.32 d4, d31, d30, #0
+ vmov d17, d11
+ vmlal.s32 q3, d14, d1
+ vext.32 d11, d13, d13, #0
+ vext.32 d13, d30, d30, #0
+ vmlal.s32 q3, d15, d0
+ vext.32 d1, d8, d8, #0
+ vmlal.s32 q3, d29, d3
+ vld1.8 {d5}, [r2, : 64]
+ sub r2, r2, #16
+ vext.32 d10, d6, d6, #0
+ vmov.i32 q1, #0xffffffff
+ vshl.i64 q4, q1, #25
+ add r5, sp, #480
+ vld1.8 {d14-d15}, [r5, : 128]
+ vadd.i64 q9, q2, q7
+ vshl.i64 q1, q1, #26
+ vshr.s64 q10, q9, #26
+ vld1.8 {d0}, [r2, : 64]!
+ vadd.i64 q5, q5, q10
+ vand q9, q9, q1
+ vld1.8 {d16}, [r2, : 64]!
+ add r2, sp, #496
+ vld1.8 {d20-d21}, [r2, : 128]
+ vadd.i64 q11, q5, q10
+ vsub.i64 q2, q2, q9
+ vshr.s64 q9, q11, #25
+ vext.32 d12, d5, d4, #0
+ vand q11, q11, q4
+ vadd.i64 q0, q0, q9
+ vmov d19, d7
+ vadd.i64 q3, q0, q7
+ vsub.i64 q5, q5, q11
+ vshr.s64 q11, q3, #26
+ vext.32 d18, d11, d10, #0
+ vand q3, q3, q1
+ vadd.i64 q8, q8, q11
+ vadd.i64 q11, q8, q10
+ vsub.i64 q0, q0, q3
+ vshr.s64 q3, q11, #25
+ vand q11, q11, q4
+ vadd.i64 q3, q6, q3
+ vadd.i64 q6, q3, q7
+ vsub.i64 q8, q8, q11
+ vshr.s64 q11, q6, #26
+ vand q6, q6, q1
+ vadd.i64 q9, q9, q11
+ vadd.i64 d25, d19, d21
+ vsub.i64 q3, q3, q6
+ vshr.s64 d23, d25, #25
+ vand q4, q12, q4
+ vadd.i64 d21, d23, d23
+ vshl.i64 d25, d23, #4
+ vadd.i64 d21, d21, d23
+ vadd.i64 d25, d25, d21
+ vadd.i64 d4, d4, d25
+ vzip.i32 q0, q8
+ vadd.i64 d12, d4, d14
+ add r2, r6, #8
+ vst1.8 d0, [r2, : 64]
+ vsub.i64 d19, d19, d9
+ add r2, r2, #16
+ vst1.8 d16, [r2, : 64]
+ vshr.s64 d22, d12, #26
+ vand q0, q6, q1
+ vadd.i64 d10, d10, d22
+ vzip.i32 q3, q9
+ vsub.i64 d4, d4, d0
+ sub r2, r2, #8
+ vst1.8 d6, [r2, : 64]
+ add r2, r2, #16
+ vst1.8 d18, [r2, : 64]
+ vzip.i32 q2, q5
+ sub r2, r2, #32
+ vst1.8 d4, [r2, : 64]
+ cmp r4, #0
+ beq .Lskippostcopy
+ add r2, r3, #144
+ mov r4, r4
+ vld1.8 {d0-d1}, [r2, : 128]!
+ vld1.8 {d2-d3}, [r2, : 128]!
+ vld1.8 {d4}, [r2, : 64]
+ vst1.8 {d0-d1}, [r4, : 128]!
+ vst1.8 {d2-d3}, [r4, : 128]!
+ vst1.8 d4, [r4, : 64]
+.Lskippostcopy:
+ cmp r1, #1
+ bne .Lskipfinalcopy
+ add r2, r3, #288
+ add r4, r3, #144
+ vld1.8 {d0-d1}, [r2, : 128]!
+ vld1.8 {d2-d3}, [r2, : 128]!
+ vld1.8 {d4}, [r2, : 64]
+ vst1.8 {d0-d1}, [r4, : 128]!
+ vst1.8 {d2-d3}, [r4, : 128]!
+ vst1.8 d4, [r4, : 64]
+.Lskipfinalcopy:
+ add r1, r1, #1
+ cmp r1, #12
+ blo .Linvertloop
+ add r1, r3, #144
+ ldr r2, [r1], #4
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
+ ldr r6, [r1], #4
+ ldr r7, [r1], #4
+ ldr r8, [r1], #4
+ ldr r9, [r1], #4
+ ldr r10, [r1], #4
+ ldr r1, [r1]
+ add r11, r1, r1, LSL #4
+ add r11, r11, r1, LSL #1
+ add r11, r11, #16777216
+ mov r11, r11, ASR #25
+ add r11, r11, r2
+ mov r11, r11, ASR #26
+ add r11, r11, r3
+ mov r11, r11, ASR #25
+ add r11, r11, r4
+ mov r11, r11, ASR #26
+ add r11, r11, r5
+ mov r11, r11, ASR #25
+ add r11, r11, r6
+ mov r11, r11, ASR #26
+ add r11, r11, r7
+ mov r11, r11, ASR #25
+ add r11, r11, r8
+ mov r11, r11, ASR #26
+ add r11, r11, r9
+ mov r11, r11, ASR #25
+ add r11, r11, r10
+ mov r11, r11, ASR #26
+ add r11, r11, r1
+ mov r11, r11, ASR #25
+ add r2, r2, r11
+ add r2, r2, r11, LSL #1
+ add r2, r2, r11, LSL #4
+ mov r11, r2, ASR #26
+ add r3, r3, r11
+ sub r2, r2, r11, LSL #26
+ mov r11, r3, ASR #25
+ add r4, r4, r11
+ sub r3, r3, r11, LSL #25
+ mov r11, r4, ASR #26
+ add r5, r5, r11
+ sub r4, r4, r11, LSL #26
+ mov r11, r5, ASR #25
+ add r6, r6, r11
+ sub r5, r5, r11, LSL #25
+ mov r11, r6, ASR #26
+ add r7, r7, r11
+ sub r6, r6, r11, LSL #26
+ mov r11, r7, ASR #25
+ add r8, r8, r11
+ sub r7, r7, r11, LSL #25
+ mov r11, r8, ASR #26
+ add r9, r9, r11
+ sub r8, r8, r11, LSL #26
+ mov r11, r9, ASR #25
+ add r10, r10, r11
+ sub r9, r9, r11, LSL #25
+ mov r11, r10, ASR #26
+ add r1, r1, r11
+ sub r10, r10, r11, LSL #26
+ mov r11, r1, ASR #25
+ sub r1, r1, r11, LSL #25
+ add r2, r2, r3, LSL #26
+ mov r3, r3, LSR #6
+ add r3, r3, r4, LSL #19
+ mov r4, r4, LSR #13
+ add r4, r4, r5, LSL #13
+ mov r5, r5, LSR #19
+ add r5, r5, r6, LSL #6
+ add r6, r7, r8, LSL #25
+ mov r7, r8, LSR #7
+ add r7, r7, r9, LSL #19
+ mov r8, r9, LSR #13
+ add r8, r8, r10, LSL #12
+ mov r9, r10, LSR #20
+ add r1, r9, r1, LSL #6
+ str r2, [r0]
+ str r3, [r0, #4]
+ str r4, [r0, #8]
+ str r5, [r0, #12]
+ str r6, [r0, #16]
+ str r7, [r0, #20]
+ str r8, [r0, #24]
+ str r1, [r0, #28]
+ movw r0, #0
+ mov sp, ip
+ pop {r4-r11, pc}
+ENDPROC(curve25519_neon)
diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c
new file mode 100644
index 000000000000..2e9e12d2f642
--- /dev/null
+++ b/arch/arm/crypto/curve25519-glue.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This
+ * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been
+ * manually reworked for use in kernel space.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/simd.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <crypto/curve25519.h>
+
+asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE]);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE])
+{
+ if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
+ kernel_neon_begin();
+ curve25519_neon(out, scalar, point);
+ kernel_neon_end();
+ } else {
+ curve25519_generic(out, scalar, point);
+ }
+}
+EXPORT_SYMBOL(curve25519_arch);
+
+static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ u8 *secret = kpp_tfm_ctx(tfm);
+
+ if (!len)
+ curve25519_generate_secret(secret);
+ else if (len == CURVE25519_KEY_SIZE &&
+ crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
+ memcpy(secret, buf, CURVE25519_KEY_SIZE);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int curve25519_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ const u8 *secret = kpp_tfm_ctx(tfm);
+ u8 public_key[CURVE25519_KEY_SIZE];
+ u8 buf[CURVE25519_KEY_SIZE];
+ int copied, nbytes;
+ u8 const *bp;
+
+ if (req->src) {
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ CURVE25519_KEY_SIZE),
+ public_key, CURVE25519_KEY_SIZE);
+ if (copied != CURVE25519_KEY_SIZE)
+ return -EINVAL;
+ bp = public_key;
+ } else {
+ bp = curve25519_base_point;
+ }
+
+ curve25519_arch(buf, secret, bp);
+
+ /* might want less than we've got */
+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
+ nbytes),
+ buf, nbytes);
+ if (copied != nbytes)
+ return -EINVAL;
+ return 0;
+}
+
+static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
+{
+ return CURVE25519_KEY_SIZE;
+}
+
+static struct kpp_alg curve25519_alg = {
+ .base.cra_name = "curve25519",
+ .base.cra_driver_name = "curve25519-neon",
+ .base.cra_priority = 200,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_ctxsize = CURVE25519_KEY_SIZE,
+
+ .set_secret = curve25519_set_secret,
+ .generate_public_key = curve25519_compute_value,
+ .compute_shared_secret = curve25519_compute_value,
+ .max_size = curve25519_max_size,
+};
+
+static int __init mod_init(void)
+{
+ if (elf_hwcap & HWCAP_NEON) {
+ static_branch_enable(&have_neon);
+ return crypto_register_kpp(&curve25519_alg);
+ }
+ return 0;
+}
+
+static void __exit mod_exit(void)
+{
+ if (elf_hwcap & HWCAP_NEON)
+ crypto_unregister_kpp(&curve25519_alg);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_ALIAS_CRYPTO("curve25519");
+MODULE_ALIAS_CRYPTO("curve25519-neon");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
index c47fe81abcb0..534c9647726d 100644
--- a/arch/arm/crypto/ghash-ce-core.S
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -88,6 +88,7 @@
T3_H .req d17
.text
+ .arch armv8-a
.fpu crypto-neon-fp-armv8
.macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
diff --git a/arch/arm/crypto/poly1305-armv4.pl b/arch/arm/crypto/poly1305-armv4.pl
new file mode 100644
index 000000000000..6d79498d3115
--- /dev/null
+++ b/arch/arm/crypto/poly1305-armv4.pl
@@ -0,0 +1,1236 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
+#
+# ====================================================================
+# Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
+# project.
+# ====================================================================
+#
+# IALU(*)/gcc-4.4 NEON
+#
+# ARM11xx(ARMv6) 7.78/+100% -
+# Cortex-A5 6.35/+130% 3.00
+# Cortex-A8 6.25/+115% 2.36
+# Cortex-A9 5.10/+95% 2.55
+# Cortex-A15 3.85/+85% 1.25(**)
+# Snapdragon S4 5.70/+100% 1.48(**)
+#
+# (*) this is for -march=armv6, i.e. with bunch of ldrb loading data;
+# (**) these are trade-off results, they can be improved by ~8% but at
+# the cost of 15/12% regression on Cortex-A5/A7, it's even possible
+# to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
+
+($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
+# define poly1305_init poly1305_init_arm
+# define poly1305_blocks poly1305_blocks_arm
+# define poly1305_emit poly1305_emit_arm
+.globl poly1305_blocks_neon
+#endif
+
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code 32
+#endif
+
+.text
+
+.globl poly1305_emit
+.globl poly1305_blocks
+.globl poly1305_init
+.type poly1305_init,%function
+.align 5
+poly1305_init:
+.Lpoly1305_init:
+ stmdb sp!,{r4-r11}
+
+ eor r3,r3,r3
+ cmp $inp,#0
+ str r3,[$ctx,#0] @ zero hash value
+ str r3,[$ctx,#4]
+ str r3,[$ctx,#8]
+ str r3,[$ctx,#12]
+ str r3,[$ctx,#16]
+ str r3,[$ctx,#36] @ clear is_base2_26
+ add $ctx,$ctx,#20
+
+#ifdef __thumb2__
+ it eq
+#endif
+ moveq r0,#0
+ beq .Lno_key
+
+#if __ARM_MAX_ARCH__>=7
+ mov r3,#-1
+ str r3,[$ctx,#28] @ impossible key power value
+# ifndef __KERNEL__
+ adr r11,.Lpoly1305_init
+ ldr r12,.LOPENSSL_armcap
+# endif
+#endif
+ ldrb r4,[$inp,#0]
+ mov r10,#0x0fffffff
+ ldrb r5,[$inp,#1]
+ and r3,r10,#-4 @ 0x0ffffffc
+ ldrb r6,[$inp,#2]
+ ldrb r7,[$inp,#3]
+ orr r4,r4,r5,lsl#8
+ ldrb r5,[$inp,#4]
+ orr r4,r4,r6,lsl#16
+ ldrb r6,[$inp,#5]
+ orr r4,r4,r7,lsl#24
+ ldrb r7,[$inp,#6]
+ and r4,r4,r10
+
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+# if !defined(_WIN32)
+ ldr r12,[r11,r12] @ OPENSSL_armcap_P
+# endif
+# if defined(__APPLE__) || defined(_WIN32)
+ ldr r12,[r12]
+# endif
+#endif
+ ldrb r8,[$inp,#7]
+ orr r5,r5,r6,lsl#8
+ ldrb r6,[$inp,#8]
+ orr r5,r5,r7,lsl#16
+ ldrb r7,[$inp,#9]
+ orr r5,r5,r8,lsl#24
+ ldrb r8,[$inp,#10]
+ and r5,r5,r3
+
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ tst r12,#ARMV7_NEON @ check for NEON
+# ifdef __thumb2__
+ adr r9,.Lpoly1305_blocks_neon
+ adr r11,.Lpoly1305_blocks
+ it ne
+ movne r11,r9
+ adr r12,.Lpoly1305_emit
+ orr r11,r11,#1 @ thumb-ify addresses
+ orr r12,r12,#1
+# else
+ add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
+ ite eq
+ addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
+ addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
+# endif
+#endif
+ ldrb r9,[$inp,#11]
+ orr r6,r6,r7,lsl#8
+ ldrb r7,[$inp,#12]
+ orr r6,r6,r8,lsl#16
+ ldrb r8,[$inp,#13]
+ orr r6,r6,r9,lsl#24
+ ldrb r9,[$inp,#14]
+ and r6,r6,r3
+
+ ldrb r10,[$inp,#15]
+ orr r7,r7,r8,lsl#8
+ str r4,[$ctx,#0]
+ orr r7,r7,r9,lsl#16
+ str r5,[$ctx,#4]
+ orr r7,r7,r10,lsl#24
+ str r6,[$ctx,#8]
+ and r7,r7,r3
+ str r7,[$ctx,#12]
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ stmia r2,{r11,r12} @ fill functions table
+ mov r0,#1
+#else
+ mov r0,#0
+#endif
+.Lno_key:
+ ldmia sp!,{r4-r11}
+#if __ARM_ARCH__>=5
+ ret @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_init,.-poly1305_init
+___
+{
+my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
+my ($s1,$s2,$s3)=($r1,$r2,$r3);
+
+$code.=<<___;
+.type poly1305_blocks,%function
+.align 5
+poly1305_blocks:
+.Lpoly1305_blocks:
+ stmdb sp!,{r3-r11,lr}
+
+ ands $len,$len,#-16
+ beq .Lno_data
+
+ add $len,$len,$inp @ end pointer
+ sub sp,sp,#32
+
+#if __ARM_ARCH__<7
+ ldmia $ctx,{$h0-$r3} @ load context
+ add $ctx,$ctx,#20
+ str $len,[sp,#16] @ offload stuff
+ str $ctx,[sp,#12]
+#else
+ ldr lr,[$ctx,#36] @ is_base2_26
+ ldmia $ctx!,{$h0-$h4} @ load hash value
+ str $len,[sp,#16] @ offload stuff
+ str $ctx,[sp,#12]
+
+ adds $r0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
+ mov $r1,$h1,lsr#6
+ adcs $r1,$r1,$h2,lsl#20
+ mov $r2,$h2,lsr#12
+ adcs $r2,$r2,$h3,lsl#14
+ mov $r3,$h3,lsr#18
+ adcs $r3,$r3,$h4,lsl#8
+ mov $len,#0
+ teq lr,#0
+ str $len,[$ctx,#16] @ clear is_base2_26
+ adc $len,$len,$h4,lsr#24
+
+ itttt ne
+ movne $h0,$r0 @ choose between radixes
+ movne $h1,$r1
+ movne $h2,$r2
+ movne $h3,$r3
+ ldmia $ctx,{$r0-$r3} @ load key
+ it ne
+ movne $h4,$len
+#endif
+
+ mov lr,$inp
+ cmp $padbit,#0
+ str $r1,[sp,#20]
+ str $r2,[sp,#24]
+ str $r3,[sp,#28]
+ b .Loop
+
+.align 4
+.Loop:
+#if __ARM_ARCH__<7
+ ldrb r0,[lr],#16 @ load input
+# ifdef __thumb2__
+ it hi
+# endif
+ addhi $h4,$h4,#1 @ 1<<128
+ ldrb r1,[lr,#-15]
+ ldrb r2,[lr,#-14]
+ ldrb r3,[lr,#-13]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-12]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-11]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-10]
+ adds $h0,$h0,r3 @ accumulate input
+
+ ldrb r3,[lr,#-9]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-8]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-7]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-6]
+ adcs $h1,$h1,r3
+
+ ldrb r3,[lr,#-5]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-4]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-3]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-2]
+ adcs $h2,$h2,r3
+
+ ldrb r3,[lr,#-1]
+ orr r1,r0,r1,lsl#8
+ str lr,[sp,#8] @ offload input pointer
+ orr r2,r1,r2,lsl#16
+ add $s1,$r1,$r1,lsr#2
+ orr r3,r2,r3,lsl#24
+#else
+ ldr r0,[lr],#16 @ load input
+ it hi
+ addhi $h4,$h4,#1 @ padbit
+ ldr r1,[lr,#-12]
+ ldr r2,[lr,#-8]
+ ldr r3,[lr,#-4]
+# ifdef __ARMEB__
+ rev r0,r0
+ rev r1,r1
+ rev r2,r2
+ rev r3,r3
+# endif
+ adds $h0,$h0,r0 @ accumulate input
+ str lr,[sp,#8] @ offload input pointer
+ adcs $h1,$h1,r1
+ add $s1,$r1,$r1,lsr#2
+ adcs $h2,$h2,r2
+#endif
+ add $s2,$r2,$r2,lsr#2
+ adcs $h3,$h3,r3
+ add $s3,$r3,$r3,lsr#2
+
+ umull r2,r3,$h1,$r0
+ adc $h4,$h4,#0
+ umull r0,r1,$h0,$r0
+ umlal r2,r3,$h4,$s1
+ umlal r0,r1,$h3,$s1
+ ldr $r1,[sp,#20] @ reload $r1
+ umlal r2,r3,$h2,$s3
+ umlal r0,r1,$h1,$s3
+ umlal r2,r3,$h3,$s2
+ umlal r0,r1,$h2,$s2
+ umlal r2,r3,$h0,$r1
+ str r0,[sp,#0] @ future $h0
+ mul r0,$s2,$h4
+ ldr $r2,[sp,#24] @ reload $r2
+ adds r2,r2,r1 @ d1+=d0>>32
+ eor r1,r1,r1
+ adc lr,r3,#0 @ future $h2
+ str r2,[sp,#4] @ future $h1
+
+ mul r2,$s3,$h4
+ eor r3,r3,r3
+ umlal r0,r1,$h3,$s3
+ ldr $r3,[sp,#28] @ reload $r3
+ umlal r2,r3,$h3,$r0
+ umlal r0,r1,$h2,$r0
+ umlal r2,r3,$h2,$r1
+ umlal r0,r1,$h1,$r1
+ umlal r2,r3,$h1,$r2
+ umlal r0,r1,$h0,$r2
+ umlal r2,r3,$h0,$r3
+ ldr $h0,[sp,#0]
+ mul $h4,$r0,$h4
+ ldr $h1,[sp,#4]
+
+ adds $h2,lr,r0 @ d2+=d1>>32
+ ldr lr,[sp,#8] @ reload input pointer
+ adc r1,r1,#0
+ adds $h3,r2,r1 @ d3+=d2>>32
+ ldr r0,[sp,#16] @ reload end pointer
+ adc r3,r3,#0
+ add $h4,$h4,r3 @ h4+=d3>>32
+
+ and r1,$h4,#-4
+ and $h4,$h4,#3
+ add r1,r1,r1,lsr#2 @ *=5
+ adds $h0,$h0,r1
+ adcs $h1,$h1,#0
+ adcs $h2,$h2,#0
+ adcs $h3,$h3,#0
+ adc $h4,$h4,#0
+
+ cmp r0,lr @ done yet?
+ bhi .Loop
+
+ ldr $ctx,[sp,#12]
+ add sp,sp,#32
+ stmdb $ctx,{$h0-$h4} @ store the result
+
+.Lno_data:
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r3-r11,pc}
+#else
+ ldmia sp!,{r3-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_blocks,.-poly1305_blocks
+___
+}
+{
+my ($ctx,$mac,$nonce)=map("r$_",(0..2));
+my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
+my $g4=$ctx;
+
+$code.=<<___;
+.type poly1305_emit,%function
+.align 5
+poly1305_emit:
+.Lpoly1305_emit:
+ stmdb sp!,{r4-r11}
+
+ ldmia $ctx,{$h0-$h4}
+
+#if __ARM_ARCH__>=7
+ ldr ip,[$ctx,#36] @ is_base2_26
+
+ adds $g0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
+ mov $g1,$h1,lsr#6
+ adcs $g1,$g1,$h2,lsl#20
+ mov $g2,$h2,lsr#12
+ adcs $g2,$g2,$h3,lsl#14
+ mov $g3,$h3,lsr#18
+ adcs $g3,$g3,$h4,lsl#8
+ mov $g4,#0
+ adc $g4,$g4,$h4,lsr#24
+
+ tst ip,ip
+ itttt ne
+ movne $h0,$g0
+ movne $h1,$g1
+ movne $h2,$g2
+ movne $h3,$g3
+ it ne
+ movne $h4,$g4
+#endif
+
+ adds $g0,$h0,#5 @ compare to modulus
+ adcs $g1,$h1,#0
+ adcs $g2,$h2,#0
+ adcs $g3,$h3,#0
+ adc $g4,$h4,#0
+ tst $g4,#4 @ did it carry/borrow?
+
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h0,$g0
+ ldr $g0,[$nonce,#0]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h1,$g1
+ ldr $g1,[$nonce,#4]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h2,$g2
+ ldr $g2,[$nonce,#8]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h3,$g3
+ ldr $g3,[$nonce,#12]
+
+ adds $h0,$h0,$g0
+ adcs $h1,$h1,$g1
+ adcs $h2,$h2,$g2
+ adc $h3,$h3,$g3
+
+#if __ARM_ARCH__>=7
+# ifdef __ARMEB__
+ rev $h0,$h0
+ rev $h1,$h1
+ rev $h2,$h2
+ rev $h3,$h3
+# endif
+ str $h0,[$mac,#0]
+ str $h1,[$mac,#4]
+ str $h2,[$mac,#8]
+ str $h3,[$mac,#12]
+#else
+ strb $h0,[$mac,#0]
+ mov $h0,$h0,lsr#8
+ strb $h1,[$mac,#4]
+ mov $h1,$h1,lsr#8
+ strb $h2,[$mac,#8]
+ mov $h2,$h2,lsr#8
+ strb $h3,[$mac,#12]
+ mov $h3,$h3,lsr#8
+
+ strb $h0,[$mac,#1]
+ mov $h0,$h0,lsr#8
+ strb $h1,[$mac,#5]
+ mov $h1,$h1,lsr#8
+ strb $h2,[$mac,#9]
+ mov $h2,$h2,lsr#8
+ strb $h3,[$mac,#13]
+ mov $h3,$h3,lsr#8
+
+ strb $h0,[$mac,#2]
+ mov $h0,$h0,lsr#8
+ strb $h1,[$mac,#6]
+ mov $h1,$h1,lsr#8
+ strb $h2,[$mac,#10]
+ mov $h2,$h2,lsr#8
+ strb $h3,[$mac,#14]
+ mov $h3,$h3,lsr#8
+
+ strb $h0,[$mac,#3]
+ strb $h1,[$mac,#7]
+ strb $h2,[$mac,#11]
+ strb $h3,[$mac,#15]
+#endif
+ ldmia sp!,{r4-r11}
+#if __ARM_ARCH__>=5
+ ret @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_emit,.-poly1305_emit
+___
+{
+my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
+my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
+my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
+
+my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.fpu neon
+
+.type poly1305_init_neon,%function
+.align 5
+poly1305_init_neon:
+.Lpoly1305_init_neon:
+ ldr r3,[$ctx,#48] @ first table element
+ cmp r3,#-1 @ is value impossible?
+ bne .Lno_init_neon
+
+ ldr r4,[$ctx,#20] @ load key base 2^32
+ ldr r5,[$ctx,#24]
+ ldr r6,[$ctx,#28]
+ ldr r7,[$ctx,#32]
+
+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
+ mov r3,r4,lsr#26
+ mov r4,r5,lsr#20
+ orr r3,r3,r5,lsl#6
+ mov r5,r6,lsr#14
+ orr r4,r4,r6,lsl#12
+ mov r6,r7,lsr#8
+ orr r5,r5,r7,lsl#18
+ and r3,r3,#0x03ffffff
+ and r4,r4,#0x03ffffff
+ and r5,r5,#0x03ffffff
+
+ vdup.32 $R0,r2 @ r^1 in both lanes
+ add r2,r3,r3,lsl#2 @ *5
+ vdup.32 $R1,r3
+ add r3,r4,r4,lsl#2
+ vdup.32 $S1,r2
+ vdup.32 $R2,r4
+ add r4,r5,r5,lsl#2
+ vdup.32 $S2,r3
+ vdup.32 $R3,r5
+ add r5,r6,r6,lsl#2
+ vdup.32 $S3,r4
+ vdup.32 $R4,r6
+ vdup.32 $S4,r5
+
+ mov $zeros,#2 @ counter
+
+.Lsquare_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+
+ vmull.u32 $D0,$R0,${R0}[1]
+ vmull.u32 $D1,$R1,${R0}[1]
+ vmull.u32 $D2,$R2,${R0}[1]
+ vmull.u32 $D3,$R3,${R0}[1]
+ vmull.u32 $D4,$R4,${R0}[1]
+
+ vmlal.u32 $D0,$R4,${S1}[1]
+ vmlal.u32 $D1,$R0,${R1}[1]
+ vmlal.u32 $D2,$R1,${R1}[1]
+ vmlal.u32 $D3,$R2,${R1}[1]
+ vmlal.u32 $D4,$R3,${R1}[1]
+
+ vmlal.u32 $D0,$R3,${S2}[1]
+ vmlal.u32 $D1,$R4,${S2}[1]
+ vmlal.u32 $D3,$R1,${R2}[1]
+ vmlal.u32 $D2,$R0,${R2}[1]
+ vmlal.u32 $D4,$R2,${R2}[1]
+
+ vmlal.u32 $D0,$R2,${S3}[1]
+ vmlal.u32 $D3,$R0,${R3}[1]
+ vmlal.u32 $D1,$R3,${S3}[1]
+ vmlal.u32 $D2,$R4,${S3}[1]
+ vmlal.u32 $D4,$R1,${R3}[1]
+
+ vmlal.u32 $D3,$R4,${S4}[1]
+ vmlal.u32 $D0,$R1,${S4}[1]
+ vmlal.u32 $D1,$R2,${S4}[1]
+ vmlal.u32 $D2,$R3,${S4}[1]
+ vmlal.u32 $D4,$R0,${R4}[1]
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ @ and P. Schwabe
+ @
+ @ H0>>+H1>>+H2>>+H3>>+H4
+ @ H3>>+H4>>*5+H0>>+H1
+ @
+ @ Trivia.
+ @
+ @ Result of multiplication of n-bit number by m-bit number is
+ @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
+ @ m-bit number multiplied by 2^n is still n+m bits wide.
+ @
+ @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
+ @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
+ @ one is n+1 bits wide.
+ @
+ @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
+ @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
+ @ can be 27. However! In cases when their width exceeds 26 bits
+ @ they are limited by 2^26+2^6. This in turn means that *sum*
+ @ of the products with these values can still be viewed as sum
+ @ of 52-bit numbers as long as the amount of addends is not a
+ @ power of 2. For example,
+ @
+ @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
+ @
+ @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
+ @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
+ @ 8 * (2^52) or 2^55. However, the value is then multiplied by
+ @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
+ @ which is less than 32 * (2^52) or 2^57. And when processing
+ @ data we are looking at triple as many addends...
+ @
+ @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
+ @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
+ @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
+ @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
+ @ instruction accepts 2x32-bit input and writes 2x64-bit result.
+ @ This means that result of reduction have to be compressed upon
+ @ loop wrap-around. This can be done in the process of reduction
+ @ to minimize amount of instructions [as well as amount of
+ @ 128-bit instructions, which benefits low-end processors], but
+ @ one has to watch for H2 (which is narrower than H0) and 5*H4
+ @ not being wider than 58 bits, so that result of right shift
+ @ by 26 bits fits in 32 bits. This is also useful on x86,
+ @ because it allows to use paddd in place for paddq, which
+ @ benefits Atom, where paddq is ridiculously slow.
+
+ vshr.u64 $T0,$D3,#26
+ vmovn.i64 $D3#lo,$D3
+ vshr.u64 $T1,$D0,#26
+ vmovn.i64 $D0#lo,$D0
+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4
+ vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff
+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1
+ vbic.i32 $D0#lo,#0xfc000000
+
+ vshrn.u64 $T0#lo,$D4,#26
+ vmovn.i64 $D4#lo,$D4
+ vshr.u64 $T1,$D1,#26
+ vmovn.i64 $D1#lo,$D1
+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2
+ vbic.i32 $D4#lo,#0xfc000000
+ vbic.i32 $D1#lo,#0xfc000000
+
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo
+ vshl.u32 $T0#lo,$T0#lo,#2
+ vshrn.u64 $T1#lo,$D2,#26
+ vmovn.i64 $D2#lo,$D2
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0
+ vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
+ vbic.i32 $D2#lo,#0xfc000000
+
+ vshr.u32 $T0#lo,$D0#lo,#26
+ vbic.i32 $D0#lo,#0xfc000000
+ vshr.u32 $T1#lo,$D3#lo,#26
+ vbic.i32 $D3#lo,#0xfc000000
+ vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
+ vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
+
+ subs $zeros,$zeros,#1
+ beq .Lsquare_break_neon
+
+ add $tbl0,$ctx,#(48+0*9*4)
+ add $tbl1,$ctx,#(48+1*9*4)
+
+ vtrn.32 $R0,$D0#lo @ r^2:r^1
+ vtrn.32 $R2,$D2#lo
+ vtrn.32 $R3,$D3#lo
+ vtrn.32 $R1,$D1#lo
+ vtrn.32 $R4,$D4#lo
+
+ vshl.u32 $S2,$R2,#2 @ *5
+ vshl.u32 $S3,$R3,#2
+ vshl.u32 $S1,$R1,#2
+ vshl.u32 $S4,$R4,#2
+ vadd.i32 $S2,$S2,$R2
+ vadd.i32 $S1,$S1,$R1
+ vadd.i32 $S3,$S3,$R3
+ vadd.i32 $S4,$S4,$R4
+
+ vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
+ vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
+ vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vst1.32 {${S4}[0]},[$tbl0,:32]
+ vst1.32 {${S4}[1]},[$tbl1,:32]
+
+ b .Lsquare_neon
+
+.align 4
+.Lsquare_break_neon:
+ add $tbl0,$ctx,#(48+2*4*9)
+ add $tbl1,$ctx,#(48+3*4*9)
+
+ vmov $R0,$D0#lo @ r^4:r^3
+ vshl.u32 $S1,$D1#lo,#2 @ *5
+ vmov $R1,$D1#lo
+ vshl.u32 $S2,$D2#lo,#2
+ vmov $R2,$D2#lo
+ vshl.u32 $S3,$D3#lo,#2
+ vmov $R3,$D3#lo
+ vshl.u32 $S4,$D4#lo,#2
+ vmov $R4,$D4#lo
+ vadd.i32 $S1,$S1,$D1#lo
+ vadd.i32 $S2,$S2,$D2#lo
+ vadd.i32 $S3,$S3,$D3#lo
+ vadd.i32 $S4,$S4,$D4#lo
+
+ vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
+ vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
+ vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vst1.32 {${S4}[0]},[$tbl0]
+ vst1.32 {${S4}[1]},[$tbl1]
+
+.Lno_init_neon:
+ ret @ bx lr
+.size poly1305_init_neon,.-poly1305_init_neon
+
+.type poly1305_blocks_neon,%function
+.align 5
+poly1305_blocks_neon:
+.Lpoly1305_blocks_neon:
+ ldr ip,[$ctx,#36] @ is_base2_26
+
+ cmp $len,#64
+ blo .Lpoly1305_blocks
+
+ stmdb sp!,{r4-r7}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ tst ip,ip @ is_base2_26?
+ bne .Lbase2_26_neon
+
+ stmdb sp!,{r1-r3,lr}
+ bl .Lpoly1305_init_neon
+
+ ldr r4,[$ctx,#0] @ load hash value base 2^32
+ ldr r5,[$ctx,#4]
+ ldr r6,[$ctx,#8]
+ ldr r7,[$ctx,#12]
+ ldr ip,[$ctx,#16]
+
+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
+ mov r3,r4,lsr#26
+ veor $D0#lo,$D0#lo,$D0#lo
+ mov r4,r5,lsr#20
+ orr r3,r3,r5,lsl#6
+ veor $D1#lo,$D1#lo,$D1#lo
+ mov r5,r6,lsr#14
+ orr r4,r4,r6,lsl#12
+ veor $D2#lo,$D2#lo,$D2#lo
+ mov r6,r7,lsr#8
+ orr r5,r5,r7,lsl#18
+ veor $D3#lo,$D3#lo,$D3#lo
+ and r3,r3,#0x03ffffff
+ orr r6,r6,ip,lsl#24
+ veor $D4#lo,$D4#lo,$D4#lo
+ and r4,r4,#0x03ffffff
+ mov r1,#1
+ and r5,r5,#0x03ffffff
+ str r1,[$ctx,#36] @ set is_base2_26
+
+ vmov.32 $D0#lo[0],r2
+ vmov.32 $D1#lo[0],r3
+ vmov.32 $D2#lo[0],r4
+ vmov.32 $D3#lo[0],r5
+ vmov.32 $D4#lo[0],r6
+ adr $zeros,.Lzeros
+
+ ldmia sp!,{r1-r3,lr}
+ b .Lhash_loaded
+
+.align 4
+.Lbase2_26_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ load hash value
+
+ veor $D0#lo,$D0#lo,$D0#lo
+ veor $D1#lo,$D1#lo,$D1#lo
+ veor $D2#lo,$D2#lo,$D2#lo
+ veor $D3#lo,$D3#lo,$D3#lo
+ veor $D4#lo,$D4#lo,$D4#lo
+ vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
+ adr $zeros,.Lzeros
+ vld1.32 {$D4#lo[0]},[$ctx]
+ sub $ctx,$ctx,#16 @ rewind
+
+.Lhash_loaded:
+ add $in2,$inp,#32
+ mov $padbit,$padbit,lsl#24
+ tst $len,#31
+ beq .Leven
+
+ vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
+ vmov.32 $H4#lo[0],$padbit
+ sub $len,$len,#16
+ add $in2,$inp,#32
+
+# ifdef __ARMEB__
+ vrev32.8 $H0,$H0
+ vrev32.8 $H3,$H3
+ vrev32.8 $H1,$H1
+ vrev32.8 $H2,$H2
+# endif
+ vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
+ vshl.u32 $H3#lo,$H3#lo,#18
+
+ vsri.u32 $H3#lo,$H2#lo,#14
+ vshl.u32 $H2#lo,$H2#lo,#12
+ vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi
+
+ vbic.i32 $H3#lo,#0xfc000000
+ vsri.u32 $H2#lo,$H1#lo,#20
+ vshl.u32 $H1#lo,$H1#lo,#6
+
+ vbic.i32 $H2#lo,#0xfc000000
+ vsri.u32 $H1#lo,$H0#lo,#26
+ vadd.i32 $H3#hi,$H3#lo,$D3#lo
+
+ vbic.i32 $H0#lo,#0xfc000000
+ vbic.i32 $H1#lo,#0xfc000000
+ vadd.i32 $H2#hi,$H2#lo,$D2#lo
+
+ vadd.i32 $H0#hi,$H0#lo,$D0#lo
+ vadd.i32 $H1#hi,$H1#lo,$D1#lo
+
+ mov $tbl1,$zeros
+ add $tbl0,$ctx,#48
+
+ cmp $len,$len
+ b .Long_tail
+
+.align 4
+.Leven:
+ subs $len,$len,#64
+ it lo
+ movlo $in2,$zeros
+
+ vmov.i32 $H4,#1<<24 @ padbit, yes, always
+ vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
+ add $inp,$inp,#64
+ vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
+ add $in2,$in2,#64
+ itt hi
+ addhi $tbl1,$ctx,#(48+1*9*4)
+ addhi $tbl0,$ctx,#(48+3*9*4)
+
+# ifdef __ARMEB__
+ vrev32.8 $H0,$H0
+ vrev32.8 $H3,$H3
+ vrev32.8 $H1,$H1
+ vrev32.8 $H2,$H2
+# endif
+ vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
+ vshl.u32 $H3,$H3,#18
+
+ vsri.u32 $H3,$H2,#14
+ vshl.u32 $H2,$H2,#12
+
+ vbic.i32 $H3,#0xfc000000
+ vsri.u32 $H2,$H1,#20
+ vshl.u32 $H1,$H1,#6
+
+ vbic.i32 $H2,#0xfc000000
+ vsri.u32 $H1,$H0,#26
+
+ vbic.i32 $H0,#0xfc000000
+ vbic.i32 $H1,#0xfc000000
+
+ bls .Lskip_loop
+
+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2
+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ b .Loop_neon
+
+.align 5
+.Loop_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ @ \___________________/
+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ @ \___________________/ \____________________/
+ @
+ @ Note that we start with inp[2:3]*r^2. This is because it
+ @ doesn't depend on reduction in previous iteration.
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ inp[2:3]*r^2
+
+ vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1]
+ vmull.u32 $D2,$H2#hi,${R0}[1]
+ vadd.i32 $H0#lo,$H0#lo,$D0#lo
+ vmull.u32 $D0,$H0#hi,${R0}[1]
+ vadd.i32 $H3#lo,$H3#lo,$D3#lo
+ vmull.u32 $D3,$H3#hi,${R0}[1]
+ vmlal.u32 $D2,$H1#hi,${R1}[1]
+ vadd.i32 $H1#lo,$H1#lo,$D1#lo
+ vmull.u32 $D1,$H1#hi,${R0}[1]
+
+ vadd.i32 $H4#lo,$H4#lo,$D4#lo
+ vmull.u32 $D4,$H4#hi,${R0}[1]
+ subs $len,$len,#64
+ vmlal.u32 $D0,$H4#hi,${S1}[1]
+ it lo
+ movlo $in2,$zeros
+ vmlal.u32 $D3,$H2#hi,${R1}[1]
+ vld1.32 ${S4}[1],[$tbl1,:32]
+ vmlal.u32 $D1,$H0#hi,${R1}[1]
+ vmlal.u32 $D4,$H3#hi,${R1}[1]
+
+ vmlal.u32 $D0,$H3#hi,${S2}[1]
+ vmlal.u32 $D3,$H1#hi,${R2}[1]
+ vmlal.u32 $D4,$H2#hi,${R2}[1]
+ vmlal.u32 $D1,$H4#hi,${S2}[1]
+ vmlal.u32 $D2,$H0#hi,${R2}[1]
+
+ vmlal.u32 $D3,$H0#hi,${R3}[1]
+ vmlal.u32 $D0,$H2#hi,${S3}[1]
+ vmlal.u32 $D4,$H1#hi,${R3}[1]
+ vmlal.u32 $D1,$H3#hi,${S3}[1]
+ vmlal.u32 $D2,$H4#hi,${S3}[1]
+
+ vmlal.u32 $D3,$H4#hi,${S4}[1]
+ vmlal.u32 $D0,$H1#hi,${S4}[1]
+ vmlal.u32 $D4,$H0#hi,${R4}[1]
+ vmlal.u32 $D1,$H2#hi,${S4}[1]
+ vmlal.u32 $D2,$H3#hi,${S4}[1]
+
+ vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
+ add $in2,$in2,#64
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ (hash+inp[0:1])*r^4 and accumulate
+
+ vmlal.u32 $D3,$H3#lo,${R0}[0]
+ vmlal.u32 $D0,$H0#lo,${R0}[0]
+ vmlal.u32 $D4,$H4#lo,${R0}[0]
+ vmlal.u32 $D1,$H1#lo,${R0}[0]
+ vmlal.u32 $D2,$H2#lo,${R0}[0]
+ vld1.32 ${S4}[0],[$tbl0,:32]
+
+ vmlal.u32 $D3,$H2#lo,${R1}[0]
+ vmlal.u32 $D0,$H4#lo,${S1}[0]
+ vmlal.u32 $D4,$H3#lo,${R1}[0]
+ vmlal.u32 $D1,$H0#lo,${R1}[0]
+ vmlal.u32 $D2,$H1#lo,${R1}[0]
+
+ vmlal.u32 $D3,$H1#lo,${R2}[0]
+ vmlal.u32 $D0,$H3#lo,${S2}[0]
+ vmlal.u32 $D4,$H2#lo,${R2}[0]
+ vmlal.u32 $D1,$H4#lo,${S2}[0]
+ vmlal.u32 $D2,$H0#lo,${R2}[0]
+
+ vmlal.u32 $D3,$H0#lo,${R3}[0]
+ vmlal.u32 $D0,$H2#lo,${S3}[0]
+ vmlal.u32 $D4,$H1#lo,${R3}[0]
+ vmlal.u32 $D1,$H3#lo,${S3}[0]
+ vmlal.u32 $D3,$H4#lo,${S4}[0]
+
+ vmlal.u32 $D2,$H4#lo,${S3}[0]
+ vmlal.u32 $D0,$H1#lo,${S4}[0]
+ vmlal.u32 $D4,$H0#lo,${R4}[0]
+ vmov.i32 $H4,#1<<24 @ padbit, yes, always
+ vmlal.u32 $D1,$H2#lo,${S4}[0]
+ vmlal.u32 $D2,$H3#lo,${S4}[0]
+
+ vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
+ add $inp,$inp,#64
+# ifdef __ARMEB__
+ vrev32.8 $H0,$H0
+ vrev32.8 $H1,$H1
+ vrev32.8 $H2,$H2
+ vrev32.8 $H3,$H3
+# endif
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction interleaved with base 2^32 -> base 2^26 of
+ @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4.
+
+ vshr.u64 $T0,$D3,#26
+ vmovn.i64 $D3#lo,$D3
+ vshr.u64 $T1,$D0,#26
+ vmovn.i64 $D0#lo,$D0
+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4
+ vbic.i32 $D3#lo,#0xfc000000
+ vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1
+ vshl.u32 $H3,$H3,#18
+ vbic.i32 $D0#lo,#0xfc000000
+
+ vshrn.u64 $T0#lo,$D4,#26
+ vmovn.i64 $D4#lo,$D4
+ vshr.u64 $T1,$D1,#26
+ vmovn.i64 $D1#lo,$D1
+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2
+ vsri.u32 $H3,$H2,#14
+ vbic.i32 $D4#lo,#0xfc000000
+ vshl.u32 $H2,$H2,#12
+ vbic.i32 $D1#lo,#0xfc000000
+
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo
+ vshl.u32 $T0#lo,$T0#lo,#2
+ vbic.i32 $H3,#0xfc000000
+ vshrn.u64 $T1#lo,$D2,#26
+ vmovn.i64 $D2#lo,$D2
+ vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec]
+ vsri.u32 $H2,$H1,#20
+ vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
+ vshl.u32 $H1,$H1,#6
+ vbic.i32 $D2#lo,#0xfc000000
+ vbic.i32 $H2,#0xfc000000
+
+ vshrn.u64 $T0#lo,$D0,#26 @ re-narrow
+ vmovn.i64 $D0#lo,$D0
+ vsri.u32 $H1,$H0,#26
+ vbic.i32 $H0,#0xfc000000
+ vshr.u32 $T1#lo,$D3#lo,#26
+ vbic.i32 $D3#lo,#0xfc000000
+ vbic.i32 $D0#lo,#0xfc000000
+ vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
+ vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
+ vbic.i32 $H1,#0xfc000000
+
+ bhi .Loop_neon
+
+.Lskip_loop:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ add $tbl1,$ctx,#(48+0*9*4)
+ add $tbl0,$ctx,#(48+1*9*4)
+ adds $len,$len,#32
+ it ne
+ movne $len,#0
+ bne .Long_tail
+
+ vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi
+ vadd.i32 $H0#hi,$H0#lo,$D0#lo
+ vadd.i32 $H3#hi,$H3#lo,$D3#lo
+ vadd.i32 $H1#hi,$H1#lo,$D1#lo
+ vadd.i32 $H4#hi,$H4#lo,$D4#lo
+
+.Long_tail:
+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1
+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2
+
+ vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant
+ vmull.u32 $D2,$H2#hi,$R0
+ vadd.i32 $H0#lo,$H0#lo,$D0#lo
+ vmull.u32 $D0,$H0#hi,$R0
+ vadd.i32 $H3#lo,$H3#lo,$D3#lo
+ vmull.u32 $D3,$H3#hi,$R0
+ vadd.i32 $H1#lo,$H1#lo,$D1#lo
+ vmull.u32 $D1,$H1#hi,$R0
+ vadd.i32 $H4#lo,$H4#lo,$D4#lo
+ vmull.u32 $D4,$H4#hi,$R0
+
+ vmlal.u32 $D0,$H4#hi,$S1
+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vmlal.u32 $D3,$H2#hi,$R1
+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vmlal.u32 $D1,$H0#hi,$R1
+ vmlal.u32 $D4,$H3#hi,$R1
+ vmlal.u32 $D2,$H1#hi,$R1
+
+ vmlal.u32 $D3,$H1#hi,$R2
+ vld1.32 ${S4}[1],[$tbl1,:32]
+ vmlal.u32 $D0,$H3#hi,$S2
+ vld1.32 ${S4}[0],[$tbl0,:32]
+ vmlal.u32 $D4,$H2#hi,$R2
+ vmlal.u32 $D1,$H4#hi,$S2
+ vmlal.u32 $D2,$H0#hi,$R2
+
+ vmlal.u32 $D3,$H0#hi,$R3
+ it ne
+ addne $tbl1,$ctx,#(48+2*9*4)
+ vmlal.u32 $D0,$H2#hi,$S3
+ it ne
+ addne $tbl0,$ctx,#(48+3*9*4)
+ vmlal.u32 $D4,$H1#hi,$R3
+ vmlal.u32 $D1,$H3#hi,$S3
+ vmlal.u32 $D2,$H4#hi,$S3
+
+ vmlal.u32 $D3,$H4#hi,$S4
+ vorn $MASK,$MASK,$MASK @ all-ones, can be redundant
+ vmlal.u32 $D0,$H1#hi,$S4
+ vshr.u64 $MASK,$MASK,#38
+ vmlal.u32 $D4,$H0#hi,$R4
+ vmlal.u32 $D1,$H2#hi,$S4
+ vmlal.u32 $D2,$H3#hi,$S4
+
+ beq .Lshort_tail
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ (hash+inp[0:1])*r^4:r^3 and accumulate
+
+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3
+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
+
+ vmlal.u32 $D2,$H2#lo,$R0
+ vmlal.u32 $D0,$H0#lo,$R0
+ vmlal.u32 $D3,$H3#lo,$R0
+ vmlal.u32 $D1,$H1#lo,$R0
+ vmlal.u32 $D4,$H4#lo,$R0
+
+ vmlal.u32 $D0,$H4#lo,$S1
+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vmlal.u32 $D3,$H2#lo,$R1
+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vmlal.u32 $D1,$H0#lo,$R1
+ vmlal.u32 $D4,$H3#lo,$R1
+ vmlal.u32 $D2,$H1#lo,$R1
+
+ vmlal.u32 $D3,$H1#lo,$R2
+ vld1.32 ${S4}[1],[$tbl1,:32]
+ vmlal.u32 $D0,$H3#lo,$S2
+ vld1.32 ${S4}[0],[$tbl0,:32]
+ vmlal.u32 $D4,$H2#lo,$R2
+ vmlal.u32 $D1,$H4#lo,$S2
+ vmlal.u32 $D2,$H0#lo,$R2
+
+ vmlal.u32 $D3,$H0#lo,$R3
+ vmlal.u32 $D0,$H2#lo,$S3
+ vmlal.u32 $D4,$H1#lo,$R3
+ vmlal.u32 $D1,$H3#lo,$S3
+ vmlal.u32 $D2,$H4#lo,$S3
+
+ vmlal.u32 $D3,$H4#lo,$S4
+ vorn $MASK,$MASK,$MASK @ all-ones
+ vmlal.u32 $D0,$H1#lo,$S4
+ vshr.u64 $MASK,$MASK,#38
+ vmlal.u32 $D4,$H0#lo,$R4
+ vmlal.u32 $D1,$H2#lo,$S4
+ vmlal.u32 $D2,$H3#lo,$S4
+
+.Lshort_tail:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ horizontal addition
+
+ vadd.i64 $D3#lo,$D3#lo,$D3#hi
+ vadd.i64 $D0#lo,$D0#lo,$D0#hi
+ vadd.i64 $D4#lo,$D4#lo,$D4#hi
+ vadd.i64 $D1#lo,$D1#lo,$D1#hi
+ vadd.i64 $D2#lo,$D2#lo,$D2#hi
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction, but without narrowing
+
+ vshr.u64 $T0,$D3,#26
+ vand.i64 $D3,$D3,$MASK
+ vshr.u64 $T1,$D0,#26
+ vand.i64 $D0,$D0,$MASK
+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4
+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1
+
+ vshr.u64 $T0,$D4,#26
+ vand.i64 $D4,$D4,$MASK
+ vshr.u64 $T1,$D1,#26
+ vand.i64 $D1,$D1,$MASK
+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2
+
+ vadd.i64 $D0,$D0,$T0
+ vshl.u64 $T0,$T0,#2
+ vshr.u64 $T1,$D2,#26
+ vand.i64 $D2,$D2,$MASK
+ vadd.i64 $D0,$D0,$T0 @ h4 -> h0
+ vadd.i64 $D3,$D3,$T1 @ h2 -> h3
+
+ vshr.u64 $T0,$D0,#26
+ vand.i64 $D0,$D0,$MASK
+ vshr.u64 $T1,$D3,#26
+ vand.i64 $D3,$D3,$MASK
+ vadd.i64 $D1,$D1,$T0 @ h0 -> h1
+ vadd.i64 $D4,$D4,$T1 @ h3 -> h4
+
+ cmp $len,#0
+ bne .Leven
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ store hash value
+
+ vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
+ vst1.32 {$D4#lo[0]},[$ctx]
+
+ vldmia sp!,{d8-d15} @ epilogue
+ ldmia sp!,{r4-r7}
+ ret @ bx lr
+.size poly1305_blocks_neon,.-poly1305_blocks_neon
+
+.align 5
+.Lzeros:
+.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+#ifndef __KERNEL__
+.LOPENSSL_armcap:
+# ifdef _WIN32
+.word OPENSSL_armcap_P
+# else
+.word OPENSSL_armcap_P-.Lpoly1305_init
+# endif
+.comm OPENSSL_armcap_P,4,4
+.hidden OPENSSL_armcap_P
+#endif
+#endif
+___
+} }
+$code.=<<___;
+.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by \@dot-asm"
+.align 2
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+close STDOUT; # enforce flush
diff --git a/arch/arm/crypto/poly1305-core.S_shipped b/arch/arm/crypto/poly1305-core.S_shipped
new file mode 100644
index 000000000000..37b71d990293
--- /dev/null
+++ b/arch/arm/crypto/poly1305-core.S_shipped
@@ -0,0 +1,1158 @@
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
+# define poly1305_init poly1305_init_arm
+# define poly1305_blocks poly1305_blocks_arm
+# define poly1305_emit poly1305_emit_arm
+.globl poly1305_blocks_neon
+#endif
+
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code 32
+#endif
+
+.text
+
+.globl poly1305_emit
+.globl poly1305_blocks
+.globl poly1305_init
+.type poly1305_init,%function
+.align 5
+poly1305_init:
+.Lpoly1305_init:
+ stmdb sp!,{r4-r11}
+
+ eor r3,r3,r3
+ cmp r1,#0
+ str r3,[r0,#0] @ zero hash value
+ str r3,[r0,#4]
+ str r3,[r0,#8]
+ str r3,[r0,#12]
+ str r3,[r0,#16]
+ str r3,[r0,#36] @ clear is_base2_26
+ add r0,r0,#20
+
+#ifdef __thumb2__
+ it eq
+#endif
+ moveq r0,#0
+ beq .Lno_key
+
+#if __ARM_MAX_ARCH__>=7
+ mov r3,#-1
+ str r3,[r0,#28] @ impossible key power value
+# ifndef __KERNEL__
+ adr r11,.Lpoly1305_init
+ ldr r12,.LOPENSSL_armcap
+# endif
+#endif
+ ldrb r4,[r1,#0]
+ mov r10,#0x0fffffff
+ ldrb r5,[r1,#1]
+ and r3,r10,#-4 @ 0x0ffffffc
+ ldrb r6,[r1,#2]
+ ldrb r7,[r1,#3]
+ orr r4,r4,r5,lsl#8
+ ldrb r5,[r1,#4]
+ orr r4,r4,r6,lsl#16
+ ldrb r6,[r1,#5]
+ orr r4,r4,r7,lsl#24
+ ldrb r7,[r1,#6]
+ and r4,r4,r10
+
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+# if !defined(_WIN32)
+ ldr r12,[r11,r12] @ OPENSSL_armcap_P
+# endif
+# if defined(__APPLE__) || defined(_WIN32)
+ ldr r12,[r12]
+# endif
+#endif
+ ldrb r8,[r1,#7]
+ orr r5,r5,r6,lsl#8
+ ldrb r6,[r1,#8]
+ orr r5,r5,r7,lsl#16
+ ldrb r7,[r1,#9]
+ orr r5,r5,r8,lsl#24
+ ldrb r8,[r1,#10]
+ and r5,r5,r3
+
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ tst r12,#ARMV7_NEON @ check for NEON
+# ifdef __thumb2__
+ adr r9,.Lpoly1305_blocks_neon
+ adr r11,.Lpoly1305_blocks
+ it ne
+ movne r11,r9
+ adr r12,.Lpoly1305_emit
+ orr r11,r11,#1 @ thumb-ify addresses
+ orr r12,r12,#1
+# else
+ add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
+ ite eq
+ addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
+ addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
+# endif
+#endif
+ ldrb r9,[r1,#11]
+ orr r6,r6,r7,lsl#8
+ ldrb r7,[r1,#12]
+ orr r6,r6,r8,lsl#16
+ ldrb r8,[r1,#13]
+ orr r6,r6,r9,lsl#24
+ ldrb r9,[r1,#14]
+ and r6,r6,r3
+
+ ldrb r10,[r1,#15]
+ orr r7,r7,r8,lsl#8
+ str r4,[r0,#0]
+ orr r7,r7,r9,lsl#16
+ str r5,[r0,#4]
+ orr r7,r7,r10,lsl#24
+ str r6,[r0,#8]
+ and r7,r7,r3
+ str r7,[r0,#12]
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ stmia r2,{r11,r12} @ fill functions table
+ mov r0,#1
+#else
+ mov r0,#0
+#endif
+.Lno_key:
+ ldmia sp!,{r4-r11}
+#if __ARM_ARCH__>=5
+ bx lr @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_init,.-poly1305_init
+.type poly1305_blocks,%function
+.align 5
+poly1305_blocks:
+.Lpoly1305_blocks:
+ stmdb sp!,{r3-r11,lr}
+
+ ands r2,r2,#-16
+ beq .Lno_data
+
+ add r2,r2,r1 @ end pointer
+ sub sp,sp,#32
+
+#if __ARM_ARCH__<7
+ ldmia r0,{r4-r12} @ load context
+ add r0,r0,#20
+ str r2,[sp,#16] @ offload stuff
+ str r0,[sp,#12]
+#else
+ ldr lr,[r0,#36] @ is_base2_26
+ ldmia r0!,{r4-r8} @ load hash value
+ str r2,[sp,#16] @ offload stuff
+ str r0,[sp,#12]
+
+ adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32
+ mov r10,r5,lsr#6
+ adcs r10,r10,r6,lsl#20
+ mov r11,r6,lsr#12
+ adcs r11,r11,r7,lsl#14
+ mov r12,r7,lsr#18
+ adcs r12,r12,r8,lsl#8
+ mov r2,#0
+ teq lr,#0
+ str r2,[r0,#16] @ clear is_base2_26
+ adc r2,r2,r8,lsr#24
+
+ itttt ne
+ movne r4,r9 @ choose between radixes
+ movne r5,r10
+ movne r6,r11
+ movne r7,r12
+ ldmia r0,{r9-r12} @ load key
+ it ne
+ movne r8,r2
+#endif
+
+ mov lr,r1
+ cmp r3,#0
+ str r10,[sp,#20]
+ str r11,[sp,#24]
+ str r12,[sp,#28]
+ b .Loop
+
+.align 4
+.Loop:
+#if __ARM_ARCH__<7
+ ldrb r0,[lr],#16 @ load input
+# ifdef __thumb2__
+ it hi
+# endif
+ addhi r8,r8,#1 @ 1<<128
+ ldrb r1,[lr,#-15]
+ ldrb r2,[lr,#-14]
+ ldrb r3,[lr,#-13]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-12]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-11]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-10]
+ adds r4,r4,r3 @ accumulate input
+
+ ldrb r3,[lr,#-9]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-8]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-7]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-6]
+ adcs r5,r5,r3
+
+ ldrb r3,[lr,#-5]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-4]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-3]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-2]
+ adcs r6,r6,r3
+
+ ldrb r3,[lr,#-1]
+ orr r1,r0,r1,lsl#8
+ str lr,[sp,#8] @ offload input pointer
+ orr r2,r1,r2,lsl#16
+ add r10,r10,r10,lsr#2
+ orr r3,r2,r3,lsl#24
+#else
+ ldr r0,[lr],#16 @ load input
+ it hi
+ addhi r8,r8,#1 @ padbit
+ ldr r1,[lr,#-12]
+ ldr r2,[lr,#-8]
+ ldr r3,[lr,#-4]
+# ifdef __ARMEB__
+ rev r0,r0
+ rev r1,r1
+ rev r2,r2
+ rev r3,r3
+# endif
+ adds r4,r4,r0 @ accumulate input
+ str lr,[sp,#8] @ offload input pointer
+ adcs r5,r5,r1
+ add r10,r10,r10,lsr#2
+ adcs r6,r6,r2
+#endif
+ add r11,r11,r11,lsr#2
+ adcs r7,r7,r3
+ add r12,r12,r12,lsr#2
+
+ umull r2,r3,r5,r9
+ adc r8,r8,#0
+ umull r0,r1,r4,r9
+ umlal r2,r3,r8,r10
+ umlal r0,r1,r7,r10
+ ldr r10,[sp,#20] @ reload r10
+ umlal r2,r3,r6,r12
+ umlal r0,r1,r5,r12
+ umlal r2,r3,r7,r11
+ umlal r0,r1,r6,r11
+ umlal r2,r3,r4,r10
+ str r0,[sp,#0] @ future r4
+ mul r0,r11,r8
+ ldr r11,[sp,#24] @ reload r11
+ adds r2,r2,r1 @ d1+=d0>>32
+ eor r1,r1,r1
+ adc lr,r3,#0 @ future r6
+ str r2,[sp,#4] @ future r5
+
+ mul r2,r12,r8
+ eor r3,r3,r3
+ umlal r0,r1,r7,r12
+ ldr r12,[sp,#28] @ reload r12
+ umlal r2,r3,r7,r9
+ umlal r0,r1,r6,r9
+ umlal r2,r3,r6,r10
+ umlal r0,r1,r5,r10
+ umlal r2,r3,r5,r11
+ umlal r0,r1,r4,r11
+ umlal r2,r3,r4,r12
+ ldr r4,[sp,#0]
+ mul r8,r9,r8
+ ldr r5,[sp,#4]
+
+ adds r6,lr,r0 @ d2+=d1>>32
+ ldr lr,[sp,#8] @ reload input pointer
+ adc r1,r1,#0
+ adds r7,r2,r1 @ d3+=d2>>32
+ ldr r0,[sp,#16] @ reload end pointer
+ adc r3,r3,#0
+ add r8,r8,r3 @ h4+=d3>>32
+
+ and r1,r8,#-4
+ and r8,r8,#3
+ add r1,r1,r1,lsr#2 @ *=5
+ adds r4,r4,r1
+ adcs r5,r5,#0
+ adcs r6,r6,#0
+ adcs r7,r7,#0
+ adc r8,r8,#0
+
+ cmp r0,lr @ done yet?
+ bhi .Loop
+
+ ldr r0,[sp,#12]
+ add sp,sp,#32
+ stmdb r0,{r4-r8} @ store the result
+
+.Lno_data:
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r3-r11,pc}
+#else
+ ldmia sp!,{r3-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_blocks,.-poly1305_blocks
+.type poly1305_emit,%function
+.align 5
+poly1305_emit:
+.Lpoly1305_emit:
+ stmdb sp!,{r4-r11}
+
+ ldmia r0,{r3-r7}
+
+#if __ARM_ARCH__>=7
+ ldr ip,[r0,#36] @ is_base2_26
+
+ adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32
+ mov r9,r4,lsr#6
+ adcs r9,r9,r5,lsl#20
+ mov r10,r5,lsr#12
+ adcs r10,r10,r6,lsl#14
+ mov r11,r6,lsr#18
+ adcs r11,r11,r7,lsl#8
+ mov r0,#0
+ adc r0,r0,r7,lsr#24
+
+ tst ip,ip
+ itttt ne
+ movne r3,r8
+ movne r4,r9
+ movne r5,r10
+ movne r6,r11
+ it ne
+ movne r7,r0
+#endif
+
+ adds r8,r3,#5 @ compare to modulus
+ adcs r9,r4,#0
+ adcs r10,r5,#0
+ adcs r11,r6,#0
+ adc r0,r7,#0
+ tst r0,#4 @ did it carry/borrow?
+
+#ifdef __thumb2__
+ it ne
+#endif
+ movne r3,r8
+ ldr r8,[r2,#0]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne r4,r9
+ ldr r9,[r2,#4]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne r5,r10
+ ldr r10,[r2,#8]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne r6,r11
+ ldr r11,[r2,#12]
+
+ adds r3,r3,r8
+ adcs r4,r4,r9
+ adcs r5,r5,r10
+ adc r6,r6,r11
+
+#if __ARM_ARCH__>=7
+# ifdef __ARMEB__
+ rev r3,r3
+ rev r4,r4
+ rev r5,r5
+ rev r6,r6
+# endif
+ str r3,[r1,#0]
+ str r4,[r1,#4]
+ str r5,[r1,#8]
+ str r6,[r1,#12]
+#else
+ strb r3,[r1,#0]
+ mov r3,r3,lsr#8
+ strb r4,[r1,#4]
+ mov r4,r4,lsr#8
+ strb r5,[r1,#8]
+ mov r5,r5,lsr#8
+ strb r6,[r1,#12]
+ mov r6,r6,lsr#8
+
+ strb r3,[r1,#1]
+ mov r3,r3,lsr#8
+ strb r4,[r1,#5]
+ mov r4,r4,lsr#8
+ strb r5,[r1,#9]
+ mov r5,r5,lsr#8
+ strb r6,[r1,#13]
+ mov r6,r6,lsr#8
+
+ strb r3,[r1,#2]
+ mov r3,r3,lsr#8
+ strb r4,[r1,#6]
+ mov r4,r4,lsr#8
+ strb r5,[r1,#10]
+ mov r5,r5,lsr#8
+ strb r6,[r1,#14]
+ mov r6,r6,lsr#8
+
+ strb r3,[r1,#3]
+ strb r4,[r1,#7]
+ strb r5,[r1,#11]
+ strb r6,[r1,#15]
+#endif
+ ldmia sp!,{r4-r11}
+#if __ARM_ARCH__>=5
+ bx lr @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_emit,.-poly1305_emit
+#if __ARM_MAX_ARCH__>=7
+.fpu neon
+
+.type poly1305_init_neon,%function
+.align 5
+poly1305_init_neon:
+.Lpoly1305_init_neon:
+ ldr r3,[r0,#48] @ first table element
+ cmp r3,#-1 @ is value impossible?
+ bne .Lno_init_neon
+
+ ldr r4,[r0,#20] @ load key base 2^32
+ ldr r5,[r0,#24]
+ ldr r6,[r0,#28]
+ ldr r7,[r0,#32]
+
+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
+ mov r3,r4,lsr#26
+ mov r4,r5,lsr#20
+ orr r3,r3,r5,lsl#6
+ mov r5,r6,lsr#14
+ orr r4,r4,r6,lsl#12
+ mov r6,r7,lsr#8
+ orr r5,r5,r7,lsl#18
+ and r3,r3,#0x03ffffff
+ and r4,r4,#0x03ffffff
+ and r5,r5,#0x03ffffff
+
+ vdup.32 d0,r2 @ r^1 in both lanes
+ add r2,r3,r3,lsl#2 @ *5
+ vdup.32 d1,r3
+ add r3,r4,r4,lsl#2
+ vdup.32 d2,r2
+ vdup.32 d3,r4
+ add r4,r5,r5,lsl#2
+ vdup.32 d4,r3
+ vdup.32 d5,r5
+ add r5,r6,r6,lsl#2
+ vdup.32 d6,r4
+ vdup.32 d7,r6
+ vdup.32 d8,r5
+
+ mov r5,#2 @ counter
+
+.Lsquare_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+
+ vmull.u32 q5,d0,d0[1]
+ vmull.u32 q6,d1,d0[1]
+ vmull.u32 q7,d3,d0[1]
+ vmull.u32 q8,d5,d0[1]
+ vmull.u32 q9,d7,d0[1]
+
+ vmlal.u32 q5,d7,d2[1]
+ vmlal.u32 q6,d0,d1[1]
+ vmlal.u32 q7,d1,d1[1]
+ vmlal.u32 q8,d3,d1[1]
+ vmlal.u32 q9,d5,d1[1]
+
+ vmlal.u32 q5,d5,d4[1]
+ vmlal.u32 q6,d7,d4[1]
+ vmlal.u32 q8,d1,d3[1]
+ vmlal.u32 q7,d0,d3[1]
+ vmlal.u32 q9,d3,d3[1]
+
+ vmlal.u32 q5,d3,d6[1]
+ vmlal.u32 q8,d0,d5[1]
+ vmlal.u32 q6,d5,d6[1]
+ vmlal.u32 q7,d7,d6[1]
+ vmlal.u32 q9,d1,d5[1]
+
+ vmlal.u32 q8,d7,d8[1]
+ vmlal.u32 q5,d1,d8[1]
+ vmlal.u32 q6,d3,d8[1]
+ vmlal.u32 q7,d5,d8[1]
+ vmlal.u32 q9,d0,d7[1]
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ @ and P. Schwabe
+ @
+ @ H0>>+H1>>+H2>>+H3>>+H4
+ @ H3>>+H4>>*5+H0>>+H1
+ @
+ @ Trivia.
+ @
+ @ Result of multiplication of n-bit number by m-bit number is
+ @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
+ @ m-bit number multiplied by 2^n is still n+m bits wide.
+ @
+ @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
+ @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
+ @ one is n+1 bits wide.
+ @
+ @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
+ @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
+ @ can be 27. However! In cases when their width exceeds 26 bits
+ @ they are limited by 2^26+2^6. This in turn means that *sum*
+ @ of the products with these values can still be viewed as sum
+ @ of 52-bit numbers as long as the amount of addends is not a
+ @ power of 2. For example,
+ @
+ @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
+ @
+ @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
+ @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
+ @ 8 * (2^52) or 2^55. However, the value is then multiplied by
+ @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
+ @ which is less than 32 * (2^52) or 2^57. And when processing
+ @ data we are looking at triple as many addends...
+ @
+ @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
+ @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
+ @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
+ @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
+ @ instruction accepts 2x32-bit input and writes 2x64-bit result.
+ @ This means that result of reduction have to be compressed upon
+ @ loop wrap-around. This can be done in the process of reduction
+ @ to minimize amount of instructions [as well as amount of
+ @ 128-bit instructions, which benefits low-end processors], but
+ @ one has to watch for H2 (which is narrower than H0) and 5*H4
+ @ not being wider than 58 bits, so that result of right shift
+ @ by 26 bits fits in 32 bits. This is also useful on x86,
+ @ because it allows to use paddd in place for paddq, which
+ @ benefits Atom, where paddq is ridiculously slow.
+
+ vshr.u64 q15,q8,#26
+ vmovn.i64 d16,q8
+ vshr.u64 q4,q5,#26
+ vmovn.i64 d10,q5
+ vadd.i64 q9,q9,q15 @ h3 -> h4
+ vbic.i32 d16,#0xfc000000 @ &=0x03ffffff
+ vadd.i64 q6,q6,q4 @ h0 -> h1
+ vbic.i32 d10,#0xfc000000
+
+ vshrn.u64 d30,q9,#26
+ vmovn.i64 d18,q9
+ vshr.u64 q4,q6,#26
+ vmovn.i64 d12,q6
+ vadd.i64 q7,q7,q4 @ h1 -> h2
+ vbic.i32 d18,#0xfc000000
+ vbic.i32 d12,#0xfc000000
+
+ vadd.i32 d10,d10,d30
+ vshl.u32 d30,d30,#2
+ vshrn.u64 d8,q7,#26
+ vmovn.i64 d14,q7
+ vadd.i32 d10,d10,d30 @ h4 -> h0
+ vadd.i32 d16,d16,d8 @ h2 -> h3
+ vbic.i32 d14,#0xfc000000
+
+ vshr.u32 d30,d10,#26
+ vbic.i32 d10,#0xfc000000
+ vshr.u32 d8,d16,#26
+ vbic.i32 d16,#0xfc000000
+ vadd.i32 d12,d12,d30 @ h0 -> h1
+ vadd.i32 d18,d18,d8 @ h3 -> h4
+
+ subs r5,r5,#1
+ beq .Lsquare_break_neon
+
+ add r6,r0,#(48+0*9*4)
+ add r7,r0,#(48+1*9*4)
+
+ vtrn.32 d0,d10 @ r^2:r^1
+ vtrn.32 d3,d14
+ vtrn.32 d5,d16
+ vtrn.32 d1,d12
+ vtrn.32 d7,d18
+
+ vshl.u32 d4,d3,#2 @ *5
+ vshl.u32 d6,d5,#2
+ vshl.u32 d2,d1,#2
+ vshl.u32 d8,d7,#2
+ vadd.i32 d4,d4,d3
+ vadd.i32 d2,d2,d1
+ vadd.i32 d6,d6,d5
+ vadd.i32 d8,d8,d7
+
+ vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
+ vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
+ vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
+ vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
+ vst1.32 {d8[0]},[r6,:32]
+ vst1.32 {d8[1]},[r7,:32]
+
+ b .Lsquare_neon
+
+.align 4
+.Lsquare_break_neon:
+ add r6,r0,#(48+2*4*9)
+ add r7,r0,#(48+3*4*9)
+
+ vmov d0,d10 @ r^4:r^3
+ vshl.u32 d2,d12,#2 @ *5
+ vmov d1,d12
+ vshl.u32 d4,d14,#2
+ vmov d3,d14
+ vshl.u32 d6,d16,#2
+ vmov d5,d16
+ vshl.u32 d8,d18,#2
+ vmov d7,d18
+ vadd.i32 d2,d2,d12
+ vadd.i32 d4,d4,d14
+ vadd.i32 d6,d6,d16
+ vadd.i32 d8,d8,d18
+
+ vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
+ vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
+ vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
+ vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
+ vst1.32 {d8[0]},[r6]
+ vst1.32 {d8[1]},[r7]
+
+.Lno_init_neon:
+ bx lr @ bx lr
+.size poly1305_init_neon,.-poly1305_init_neon
+
+.type poly1305_blocks_neon,%function
+.align 5
+poly1305_blocks_neon:
+.Lpoly1305_blocks_neon:
+ ldr ip,[r0,#36] @ is_base2_26
+
+ cmp r2,#64
+ blo .Lpoly1305_blocks
+
+ stmdb sp!,{r4-r7}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ tst ip,ip @ is_base2_26?
+ bne .Lbase2_26_neon
+
+ stmdb sp!,{r1-r3,lr}
+ bl .Lpoly1305_init_neon
+
+ ldr r4,[r0,#0] @ load hash value base 2^32
+ ldr r5,[r0,#4]
+ ldr r6,[r0,#8]
+ ldr r7,[r0,#12]
+ ldr ip,[r0,#16]
+
+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
+ mov r3,r4,lsr#26
+ veor d10,d10,d10
+ mov r4,r5,lsr#20
+ orr r3,r3,r5,lsl#6
+ veor d12,d12,d12
+ mov r5,r6,lsr#14
+ orr r4,r4,r6,lsl#12
+ veor d14,d14,d14
+ mov r6,r7,lsr#8
+ orr r5,r5,r7,lsl#18
+ veor d16,d16,d16
+ and r3,r3,#0x03ffffff
+ orr r6,r6,ip,lsl#24
+ veor d18,d18,d18
+ and r4,r4,#0x03ffffff
+ mov r1,#1
+ and r5,r5,#0x03ffffff
+ str r1,[r0,#36] @ set is_base2_26
+
+ vmov.32 d10[0],r2
+ vmov.32 d12[0],r3
+ vmov.32 d14[0],r4
+ vmov.32 d16[0],r5
+ vmov.32 d18[0],r6
+ adr r5,.Lzeros
+
+ ldmia sp!,{r1-r3,lr}
+ b .Lhash_loaded
+
+.align 4
+.Lbase2_26_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ load hash value
+
+ veor d10,d10,d10
+ veor d12,d12,d12
+ veor d14,d14,d14
+ veor d16,d16,d16
+ veor d18,d18,d18
+ vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
+ adr r5,.Lzeros
+ vld1.32 {d18[0]},[r0]
+ sub r0,r0,#16 @ rewind
+
+.Lhash_loaded:
+ add r4,r1,#32
+ mov r3,r3,lsl#24
+ tst r2,#31
+ beq .Leven
+
+ vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]!
+ vmov.32 d28[0],r3
+ sub r2,r2,#16
+ add r4,r1,#32
+
+# ifdef __ARMEB__
+ vrev32.8 q10,q10
+ vrev32.8 q13,q13
+ vrev32.8 q11,q11
+ vrev32.8 q12,q12
+# endif
+ vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26
+ vshl.u32 d26,d26,#18
+
+ vsri.u32 d26,d24,#14
+ vshl.u32 d24,d24,#12
+ vadd.i32 d29,d28,d18 @ add hash value and move to #hi
+
+ vbic.i32 d26,#0xfc000000
+ vsri.u32 d24,d22,#20
+ vshl.u32 d22,d22,#6
+
+ vbic.i32 d24,#0xfc000000
+ vsri.u32 d22,d20,#26
+ vadd.i32 d27,d26,d16
+
+ vbic.i32 d20,#0xfc000000
+ vbic.i32 d22,#0xfc000000
+ vadd.i32 d25,d24,d14
+
+ vadd.i32 d21,d20,d10
+ vadd.i32 d23,d22,d12
+
+ mov r7,r5
+ add r6,r0,#48
+
+ cmp r2,r2
+ b .Long_tail
+
+.align 4
+.Leven:
+ subs r2,r2,#64
+ it lo
+ movlo r4,r5
+
+ vmov.i32 q14,#1<<24 @ padbit, yes, always
+ vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
+ add r1,r1,#64
+ vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
+ add r4,r4,#64
+ itt hi
+ addhi r7,r0,#(48+1*9*4)
+ addhi r6,r0,#(48+3*9*4)
+
+# ifdef __ARMEB__
+ vrev32.8 q10,q10
+ vrev32.8 q13,q13
+ vrev32.8 q11,q11
+ vrev32.8 q12,q12
+# endif
+ vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
+ vshl.u32 q13,q13,#18
+
+ vsri.u32 q13,q12,#14
+ vshl.u32 q12,q12,#12
+
+ vbic.i32 q13,#0xfc000000
+ vsri.u32 q12,q11,#20
+ vshl.u32 q11,q11,#6
+
+ vbic.i32 q12,#0xfc000000
+ vsri.u32 q11,q10,#26
+
+ vbic.i32 q10,#0xfc000000
+ vbic.i32 q11,#0xfc000000
+
+ bls .Lskip_loop
+
+ vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2
+ vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
+ vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
+ vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
+ b .Loop_neon
+
+.align 5
+.Loop_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ @ ___________________/
+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ @ ___________________/ ____________________/
+ @
+ @ Note that we start with inp[2:3]*r^2. This is because it
+ @ doesn't depend on reduction in previous iteration.
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ inp[2:3]*r^2
+
+ vadd.i32 d24,d24,d14 @ accumulate inp[0:1]
+ vmull.u32 q7,d25,d0[1]
+ vadd.i32 d20,d20,d10
+ vmull.u32 q5,d21,d0[1]
+ vadd.i32 d26,d26,d16
+ vmull.u32 q8,d27,d0[1]
+ vmlal.u32 q7,d23,d1[1]
+ vadd.i32 d22,d22,d12
+ vmull.u32 q6,d23,d0[1]
+
+ vadd.i32 d28,d28,d18
+ vmull.u32 q9,d29,d0[1]
+ subs r2,r2,#64
+ vmlal.u32 q5,d29,d2[1]
+ it lo
+ movlo r4,r5
+ vmlal.u32 q8,d25,d1[1]
+ vld1.32 d8[1],[r7,:32]
+ vmlal.u32 q6,d21,d1[1]
+ vmlal.u32 q9,d27,d1[1]
+
+ vmlal.u32 q5,d27,d4[1]
+ vmlal.u32 q8,d23,d3[1]
+ vmlal.u32 q9,d25,d3[1]
+ vmlal.u32 q6,d29,d4[1]
+ vmlal.u32 q7,d21,d3[1]
+
+ vmlal.u32 q8,d21,d5[1]
+ vmlal.u32 q5,d25,d6[1]
+ vmlal.u32 q9,d23,d5[1]
+ vmlal.u32 q6,d27,d6[1]
+ vmlal.u32 q7,d29,d6[1]
+
+ vmlal.u32 q8,d29,d8[1]
+ vmlal.u32 q5,d23,d8[1]
+ vmlal.u32 q9,d21,d7[1]
+ vmlal.u32 q6,d25,d8[1]
+ vmlal.u32 q7,d27,d8[1]
+
+ vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
+ add r4,r4,#64
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ (hash+inp[0:1])*r^4 and accumulate
+
+ vmlal.u32 q8,d26,d0[0]
+ vmlal.u32 q5,d20,d0[0]
+ vmlal.u32 q9,d28,d0[0]
+ vmlal.u32 q6,d22,d0[0]
+ vmlal.u32 q7,d24,d0[0]
+ vld1.32 d8[0],[r6,:32]
+
+ vmlal.u32 q8,d24,d1[0]
+ vmlal.u32 q5,d28,d2[0]
+ vmlal.u32 q9,d26,d1[0]
+ vmlal.u32 q6,d20,d1[0]
+ vmlal.u32 q7,d22,d1[0]
+
+ vmlal.u32 q8,d22,d3[0]
+ vmlal.u32 q5,d26,d4[0]
+ vmlal.u32 q9,d24,d3[0]
+ vmlal.u32 q6,d28,d4[0]
+ vmlal.u32 q7,d20,d3[0]
+
+ vmlal.u32 q8,d20,d5[0]
+ vmlal.u32 q5,d24,d6[0]
+ vmlal.u32 q9,d22,d5[0]
+ vmlal.u32 q6,d26,d6[0]
+ vmlal.u32 q8,d28,d8[0]
+
+ vmlal.u32 q7,d28,d6[0]
+ vmlal.u32 q5,d22,d8[0]
+ vmlal.u32 q9,d20,d7[0]
+ vmov.i32 q14,#1<<24 @ padbit, yes, always
+ vmlal.u32 q6,d24,d8[0]
+ vmlal.u32 q7,d26,d8[0]
+
+ vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
+ add r1,r1,#64
+# ifdef __ARMEB__
+ vrev32.8 q10,q10
+ vrev32.8 q11,q11
+ vrev32.8 q12,q12
+ vrev32.8 q13,q13
+# endif
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction interleaved with base 2^32 -> base 2^26 of
+ @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14.
+
+ vshr.u64 q15,q8,#26
+ vmovn.i64 d16,q8
+ vshr.u64 q4,q5,#26
+ vmovn.i64 d10,q5
+ vadd.i64 q9,q9,q15 @ h3 -> h4
+ vbic.i32 d16,#0xfc000000
+ vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
+ vadd.i64 q6,q6,q4 @ h0 -> h1
+ vshl.u32 q13,q13,#18
+ vbic.i32 d10,#0xfc000000
+
+ vshrn.u64 d30,q9,#26
+ vmovn.i64 d18,q9
+ vshr.u64 q4,q6,#26
+ vmovn.i64 d12,q6
+ vadd.i64 q7,q7,q4 @ h1 -> h2
+ vsri.u32 q13,q12,#14
+ vbic.i32 d18,#0xfc000000
+ vshl.u32 q12,q12,#12
+ vbic.i32 d12,#0xfc000000
+
+ vadd.i32 d10,d10,d30
+ vshl.u32 d30,d30,#2
+ vbic.i32 q13,#0xfc000000
+ vshrn.u64 d8,q7,#26
+ vmovn.i64 d14,q7
+ vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec]
+ vsri.u32 q12,q11,#20
+ vadd.i32 d16,d16,d8 @ h2 -> h3
+ vshl.u32 q11,q11,#6
+ vbic.i32 d14,#0xfc000000
+ vbic.i32 q12,#0xfc000000
+
+ vshrn.u64 d30,q5,#26 @ re-narrow
+ vmovn.i64 d10,q5
+ vsri.u32 q11,q10,#26
+ vbic.i32 q10,#0xfc000000
+ vshr.u32 d8,d16,#26
+ vbic.i32 d16,#0xfc000000
+ vbic.i32 d10,#0xfc000000
+ vadd.i32 d12,d12,d30 @ h0 -> h1
+ vadd.i32 d18,d18,d8 @ h3 -> h4
+ vbic.i32 q11,#0xfc000000
+
+ bhi .Loop_neon
+
+.Lskip_loop:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ add r7,r0,#(48+0*9*4)
+ add r6,r0,#(48+1*9*4)
+ adds r2,r2,#32
+ it ne
+ movne r2,#0
+ bne .Long_tail
+
+ vadd.i32 d25,d24,d14 @ add hash value and move to #hi
+ vadd.i32 d21,d20,d10
+ vadd.i32 d27,d26,d16
+ vadd.i32 d23,d22,d12
+ vadd.i32 d29,d28,d18
+
+.Long_tail:
+ vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1
+ vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2
+
+ vadd.i32 d24,d24,d14 @ can be redundant
+ vmull.u32 q7,d25,d0
+ vadd.i32 d20,d20,d10
+ vmull.u32 q5,d21,d0
+ vadd.i32 d26,d26,d16
+ vmull.u32 q8,d27,d0
+ vadd.i32 d22,d22,d12
+ vmull.u32 q6,d23,d0
+ vadd.i32 d28,d28,d18
+ vmull.u32 q9,d29,d0
+
+ vmlal.u32 q5,d29,d2
+ vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
+ vmlal.u32 q8,d25,d1
+ vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
+ vmlal.u32 q6,d21,d1
+ vmlal.u32 q9,d27,d1
+ vmlal.u32 q7,d23,d1
+
+ vmlal.u32 q8,d23,d3
+ vld1.32 d8[1],[r7,:32]
+ vmlal.u32 q5,d27,d4
+ vld1.32 d8[0],[r6,:32]
+ vmlal.u32 q9,d25,d3
+ vmlal.u32 q6,d29,d4
+ vmlal.u32 q7,d21,d3
+
+ vmlal.u32 q8,d21,d5
+ it ne
+ addne r7,r0,#(48+2*9*4)
+ vmlal.u32 q5,d25,d6
+ it ne
+ addne r6,r0,#(48+3*9*4)
+ vmlal.u32 q9,d23,d5
+ vmlal.u32 q6,d27,d6
+ vmlal.u32 q7,d29,d6
+
+ vmlal.u32 q8,d29,d8
+ vorn q0,q0,q0 @ all-ones, can be redundant
+ vmlal.u32 q5,d23,d8
+ vshr.u64 q0,q0,#38
+ vmlal.u32 q9,d21,d7
+ vmlal.u32 q6,d25,d8
+ vmlal.u32 q7,d27,d8
+
+ beq .Lshort_tail
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ (hash+inp[0:1])*r^4:r^3 and accumulate
+
+ vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3
+ vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
+
+ vmlal.u32 q7,d24,d0
+ vmlal.u32 q5,d20,d0
+ vmlal.u32 q8,d26,d0
+ vmlal.u32 q6,d22,d0
+ vmlal.u32 q9,d28,d0
+
+ vmlal.u32 q5,d28,d2
+ vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
+ vmlal.u32 q8,d24,d1
+ vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
+ vmlal.u32 q6,d20,d1
+ vmlal.u32 q9,d26,d1
+ vmlal.u32 q7,d22,d1
+
+ vmlal.u32 q8,d22,d3
+ vld1.32 d8[1],[r7,:32]
+ vmlal.u32 q5,d26,d4
+ vld1.32 d8[0],[r6,:32]
+ vmlal.u32 q9,d24,d3
+ vmlal.u32 q6,d28,d4
+ vmlal.u32 q7,d20,d3
+
+ vmlal.u32 q8,d20,d5
+ vmlal.u32 q5,d24,d6
+ vmlal.u32 q9,d22,d5
+ vmlal.u32 q6,d26,d6
+ vmlal.u32 q7,d28,d6
+
+ vmlal.u32 q8,d28,d8
+ vorn q0,q0,q0 @ all-ones
+ vmlal.u32 q5,d22,d8
+ vshr.u64 q0,q0,#38
+ vmlal.u32 q9,d20,d7
+ vmlal.u32 q6,d24,d8
+ vmlal.u32 q7,d26,d8
+
+.Lshort_tail:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ horizontal addition
+
+ vadd.i64 d16,d16,d17
+ vadd.i64 d10,d10,d11
+ vadd.i64 d18,d18,d19
+ vadd.i64 d12,d12,d13
+ vadd.i64 d14,d14,d15
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction, but without narrowing
+
+ vshr.u64 q15,q8,#26
+ vand.i64 q8,q8,q0
+ vshr.u64 q4,q5,#26
+ vand.i64 q5,q5,q0
+ vadd.i64 q9,q9,q15 @ h3 -> h4
+ vadd.i64 q6,q6,q4 @ h0 -> h1
+
+ vshr.u64 q15,q9,#26
+ vand.i64 q9,q9,q0
+ vshr.u64 q4,q6,#26
+ vand.i64 q6,q6,q0
+ vadd.i64 q7,q7,q4 @ h1 -> h2
+
+ vadd.i64 q5,q5,q15
+ vshl.u64 q15,q15,#2
+ vshr.u64 q4,q7,#26
+ vand.i64 q7,q7,q0
+ vadd.i64 q5,q5,q15 @ h4 -> h0
+ vadd.i64 q8,q8,q4 @ h2 -> h3
+
+ vshr.u64 q15,q5,#26
+ vand.i64 q5,q5,q0
+ vshr.u64 q4,q8,#26
+ vand.i64 q8,q8,q0
+ vadd.i64 q6,q6,q15 @ h0 -> h1
+ vadd.i64 q9,q9,q4 @ h3 -> h4
+
+ cmp r2,#0
+ bne .Leven
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ store hash value
+
+ vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
+ vst1.32 {d18[0]},[r0]
+
+ vldmia sp!,{d8-d15} @ epilogue
+ ldmia sp!,{r4-r7}
+ bx lr @ bx lr
+.size poly1305_blocks_neon,.-poly1305_blocks_neon
+
+.align 5
+.Lzeros:
+.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+#ifndef __KERNEL__
+.LOPENSSL_armcap:
+# ifdef _WIN32
+.word OPENSSL_armcap_P
+# else
+.word OPENSSL_armcap_P-.Lpoly1305_init
+# endif
+.comm OPENSSL_armcap_P,4,4
+.hidden OPENSSL_armcap_P
+#endif
+#endif
+.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm"
+.align 2
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..74a725ac89c9
--- /dev/null
+++ b/arch/arm/crypto/poly1305-glue.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
+#include <crypto/internal/simd.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/jump_label.h>
+#include <linux/module.h>
+
+void poly1305_init_arm(void *state, const u8 *key);
+void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
+void poly1305_emit_arm(void *state, __le32 *digest, const u32 *nonce);
+
+void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
+{
+}
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+{
+ poly1305_init_arm(&dctx->h, key);
+ dctx->s[0] = get_unaligned_le32(key + 16);
+ dctx->s[1] = get_unaligned_le32(key + 20);
+ dctx->s[2] = get_unaligned_le32(key + 24);
+ dctx->s[3] = get_unaligned_le32(key + 28);
+ dctx->buflen = 0;
+}
+EXPORT_SYMBOL(poly1305_init_arch);
+
+static int arm_poly1305_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->buflen = 0;
+ dctx->rset = 0;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
+ u32 len, u32 hibit, bool do_neon)
+{
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset) {
+ poly1305_init_arm(&dctx->h, src);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (len >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ if (len < POLY1305_BLOCK_SIZE)
+ return;
+ }
+
+ len &= ~(POLY1305_BLOCK_SIZE - 1);
+
+ if (static_branch_likely(&have_neon) && likely(do_neon))
+ poly1305_blocks_neon(&dctx->h, src, len, hibit);
+ else
+ poly1305_blocks_arm(&dctx->h, src, len, hibit);
+}
+
+static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx,
+ const u8 *src, u32 len, bool do_neon)
+{
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ len -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ arm_poly1305_blocks(dctx, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1, false);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(len >= POLY1305_BLOCK_SIZE)) {
+ arm_poly1305_blocks(dctx, src, len, 1, do_neon);
+ src += round_down(len, POLY1305_BLOCK_SIZE);
+ len %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(len)) {
+ dctx->buflen = len;
+ memcpy(dctx->buf, src, len);
+ }
+}
+
+static int arm_poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ arm_poly1305_do_update(dctx, src, srclen, false);
+ return 0;
+}
+
+static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc,
+ const u8 *src,
+ unsigned int srclen)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+ bool do_neon = crypto_simd_usable() && srclen > 128;
+
+ if (static_branch_likely(&have_neon) && do_neon)
+ kernel_neon_begin();
+ arm_poly1305_do_update(dctx, src, srclen, do_neon);
+ if (static_branch_likely(&have_neon) && do_neon)
+ kernel_neon_end();
+ return 0;
+}
+
+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
+ unsigned int nbytes)
+{
+ bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ crypto_simd_usable();
+
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_blocks_arm(&dctx->h, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
+
+ if (static_branch_likely(&have_neon) && do_neon) {
+ kernel_neon_begin();
+ poly1305_blocks_neon(&dctx->h, src, len, 1);
+ kernel_neon_end();
+ } else {
+ poly1305_blocks_arm(&dctx->h, src, len, 1);
+ }
+ src += len;
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ dctx->buflen = nbytes;
+ memcpy(dctx->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL(poly1305_update_arch);
+
+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
+{
+ __le32 digest[4];
+ u64 f = 0;
+
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit_arm(&dctx->h, digest, dctx->s);
+
+ /* mac = (h + s) % (2^128) */
+ f = (f >> 32) + le32_to_cpu(digest[0]);
+ put_unaligned_le32(f, dst);
+ f = (f >> 32) + le32_to_cpu(digest[1]);
+ put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + le32_to_cpu(digest[2]);
+ put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + le32_to_cpu(digest[3]);
+ put_unaligned_le32(f, dst + 12);
+
+ *dctx = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL(poly1305_final_arch);
+
+static int arm_poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ poly1305_final_arch(dctx, dst);
+ return 0;
+}
+
+static struct shash_alg arm_poly1305_algs[] = {{
+ .init = arm_poly1305_init,
+ .update = arm_poly1305_update,
+ .final = arm_poly1305_final,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+
+ .base.cra_name = "poly1305",
+ .base.cra_driver_name = "poly1305-arm",
+ .base.cra_priority = 150,
+ .base.cra_blocksize = POLY1305_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+#ifdef CONFIG_KERNEL_MODE_NEON
+}, {
+ .init = arm_poly1305_init,
+ .update = arm_poly1305_update_neon,
+ .final = arm_poly1305_final,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+
+ .base.cra_name = "poly1305",
+ .base.cra_driver_name = "poly1305-neon",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = POLY1305_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+#endif
+}};
+
+static int __init arm_poly1305_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ (elf_hwcap & HWCAP_NEON))
+ static_branch_enable(&have_neon);
+ else
+ /* register only the first entry */
+ return crypto_register_shash(&arm_poly1305_algs[0]);
+
+ return crypto_register_shashes(arm_poly1305_algs,
+ ARRAY_SIZE(arm_poly1305_algs));
+}
+
+static void __exit arm_poly1305_mod_exit(void)
+{
+ if (!static_branch_likely(&have_neon)) {
+ crypto_unregister_shash(&arm_poly1305_algs[0]);
+ return;
+ }
+ crypto_unregister_shashes(arm_poly1305_algs,
+ ARRAY_SIZE(arm_poly1305_algs));
+}
+
+module_init(arm_poly1305_mod_init);
+module_exit(arm_poly1305_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-arm");
+MODULE_ALIAS_CRYPTO("poly1305-neon");
diff --git a/arch/arm/crypto/sha1-ce-core.S b/arch/arm/crypto/sha1-ce-core.S
index 49a74a441aec..8a702e051738 100644
--- a/arch/arm/crypto/sha1-ce-core.S
+++ b/arch/arm/crypto/sha1-ce-core.S
@@ -10,6 +10,7 @@
#include <asm/assembler.h>
.text
+ .arch armv8-a
.fpu crypto-neon-fp-armv8
k0 .req q0
diff --git a/arch/arm/crypto/sha2-ce-core.S b/arch/arm/crypto/sha2-ce-core.S
index 4ad517577e23..b6369d2440a1 100644
--- a/arch/arm/crypto/sha2-ce-core.S
+++ b/arch/arm/crypto/sha2-ce-core.S
@@ -10,6 +10,7 @@
#include <asm/assembler.h>
.text
+ .arch armv8-a
.fpu crypto-neon-fp-armv8
k0 .req q7
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 0125aa059d5b..9c04bd810d07 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -162,6 +162,7 @@
#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
+#define HSR_CM (1 << 8)
#define HSR_FSC (0x3f)
#define HSR_FSC_TYPE (0x3c)
#define HSR_SSE (1 << 21)
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 40002416efec..9b118516d2db 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -95,12 +95,12 @@ static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr;
}
-static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr &= ~HCR_TWE;
}
-static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr |= HCR_TWE;
}
@@ -167,6 +167,11 @@ static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
}
+static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
+}
+
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 8a37c8e89777..556cd818eccf 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -7,6 +7,7 @@
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
+#include <linux/arm-smccc.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
@@ -38,6 +39,7 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -76,6 +78,14 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
+
+ /*
+ * If we encounter a data abort without valid instruction syndrome
+ * information, report this to user space. User space can (and
+ * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
+ * supported.
+ */
+ bool return_nisv_io_abort_to_user;
};
#define KVM_NR_MEM_OBJS 40
@@ -323,6 +333,29 @@ static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
+{
+ return SMCCC_RET_NOT_SUPPORTED;
+}
+
+static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
+{
+ return GPA_INVALID;
+}
+
+static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
+{
+}
+
+static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
+{
+ return false;
+}
+
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 2769360f195c..03cd7c19a683 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -131,8 +131,9 @@ struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
+ __u8 ext_dabt_pending;
/* Align it to 8 bytes */
- __u8 pad[6];
+ __u8 pad[5];
__u64 serror_esr;
} exception;
__u32 reserved[12];
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 8c74037ade22..21b8b271c80d 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -70,8 +70,6 @@ SECTIONS
ARM_UNWIND_SECTIONS
#endif
- NOTES
-
_etext = .; /* End of text and rodata section */
ARM_VECTORS
@@ -114,7 +112,7 @@ SECTIONS
. = ALIGN(THREAD_SIZE);
_sdata = .;
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
*(.data..ro_after_init)
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 23150c0f0f4d..319ccb10846a 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -81,8 +81,6 @@ SECTIONS
ARM_UNWIND_SECTIONS
#endif
- NOTES
-
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
@@ -143,7 +141,7 @@ SECTIONS
__init_end = .;
_sdata = .;
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
BSS_SECTION(0, 0, 0)
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index b76b75bd9e00..e442d82821df 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -24,7 +24,7 @@ obj-y += kvm-arm.o init.o interrupts.o
obj-y += handle_exit.o guest.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
-obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
+obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 684cf64b4033..0e6f23504c26 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -21,6 +21,10 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
+ VCPU_STAT(halt_successful_poll),
+ VCPU_STAT(halt_attempted_poll),
+ VCPU_STAT(halt_poll_invalid),
+ VCPU_STAT(halt_wakeup),
VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat),
@@ -255,6 +259,12 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
{
events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
+ /*
+ * We never return a pending ext_dabt here because we deliver it to
+ * the virtual CPU directly when setting the event and it's no longer
+ * 'pending' at this point.
+ */
+
return 0;
}
@@ -263,12 +273,16 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
+ bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr)
return -EINVAL;
else if (serror_pending)
kvm_inject_vabt(vcpu);
+ if (ext_dabt_pending)
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+
return 0;
}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 2a6a1394d26e..e58a89d2f13f 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -9,7 +9,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_psci.h>
+#include <kvm/arm_hypercalls.h>
#include <trace/events/kvm.h>
#include "trace.h"
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 39a7d9393641..24dd5bbe60e4 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -62,13 +62,13 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
*/
void imx6q_cpuidle_fec_irqs_used(void)
{
- imx6q_cpuidle_driver.states[1].disabled = true;
+ cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, true);
}
EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_used);
void imx6q_cpuidle_fec_irqs_unused(void)
{
- imx6q_cpuidle_driver.states[1].disabled = false;
+ cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, false);
}
EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_unused);
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 8f208197988f..1e1e86d17fc5 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -216,9 +216,6 @@ obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
# Platform specific device init code
-omap-hsmmc-$(CONFIG_MMC_OMAP_HS) := hsmmc.o
-obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
-
obj-y += omap_phy_internal.o
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 6316da3623b3..223b37c48389 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -352,7 +352,6 @@ void omap_pcs_legacy_init(int irq, void (*rearm)(void));
struct omap_sdrc_params;
extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
-struct omap2_hsmmc_info;
extern void omap_reserve(void);
struct omap_hwmod;
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
deleted file mode 100644
index 63423ea6a240..000000000000
--- a/arch/arm/mach-omap2/hsmmc.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-omap2/hsmmc.c
- *
- * Copyright (C) 2007-2008 Texas Instruments
- * Copyright (C) 2008 Nokia Corporation
- * Author: Texas Instruments
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_data/hsmmc-omap.h>
-
-#include "soc.h"
-#include "omap_device.h"
-
-#include "hsmmc.h"
-#include "control.h"
-
-#if IS_ENABLED(CONFIG_MMC_OMAP_HS)
-
-static u16 control_pbias_offset;
-static u16 control_devconf1_offset;
-
-#define HSMMC_NAME_LEN 9
-
-static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
- struct omap_hsmmc_platform_data *mmc)
-{
- char *hc_name;
-
- hc_name = kzalloc(HSMMC_NAME_LEN + 1, GFP_KERNEL);
- if (!hc_name)
- return -ENOMEM;
-
- snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1);
- mmc->name = hc_name;
- mmc->caps = c->caps;
- mmc->reg_offset = 0;
-
- return 0;
-}
-
-static int omap_hsmmc_done;
-
-void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
-{
- struct platform_device *pdev;
- int res;
-
- if (omap_hsmmc_done)
- return;
-
- omap_hsmmc_done = 1;
-
- for (; c->mmc; c++) {
- pdev = c->pdev;
- if (!pdev)
- continue;
- res = omap_device_register(pdev);
- if (res)
- pr_err("Could not late init MMC\n");
- }
-}
-
-#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
-
-static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
- int ctrl_nr)
-{
- struct omap_hwmod *oh;
- struct omap_hwmod *ohs[1];
- struct omap_device *od;
- struct platform_device *pdev;
- char oh_name[MAX_OMAP_MMC_HWMOD_NAME_LEN];
- struct omap_hsmmc_platform_data *mmc_data;
- struct omap_hsmmc_dev_attr *mmc_dev_attr;
- char *name;
- int res;
-
- mmc_data = kzalloc(sizeof(*mmc_data), GFP_KERNEL);
- if (!mmc_data)
- return;
-
- res = omap_hsmmc_pdata_init(hsmmcinfo, mmc_data);
- if (res < 0)
- goto free_mmc;
-
- name = "omap_hsmmc";
- res = snprintf(oh_name, MAX_OMAP_MMC_HWMOD_NAME_LEN,
- "mmc%d", ctrl_nr);
- WARN(res >= MAX_OMAP_MMC_HWMOD_NAME_LEN,
- "String buffer overflow in MMC%d device setup\n", ctrl_nr);
-
- oh = omap_hwmod_lookup(oh_name);
- if (!oh) {
- pr_err("Could not look up %s\n", oh_name);
- goto free_name;
- }
- ohs[0] = oh;
- if (oh->dev_attr != NULL) {
- mmc_dev_attr = oh->dev_attr;
- mmc_data->controller_flags = mmc_dev_attr->flags;
- }
-
- pdev = platform_device_alloc(name, ctrl_nr - 1);
- if (!pdev) {
- pr_err("Could not allocate pdev for %s\n", name);
- goto free_name;
- }
- dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
-
- od = omap_device_alloc(pdev, ohs, 1);
- if (IS_ERR(od)) {
- pr_err("Could not allocate od for %s\n", name);
- goto put_pdev;
- }
-
- res = platform_device_add_data(pdev, mmc_data,
- sizeof(struct omap_hsmmc_platform_data));
- if (res) {
- pr_err("Could not add pdata for %s\n", name);
- goto put_pdev;
- }
-
- hsmmcinfo->pdev = pdev;
-
- res = omap_device_register(pdev);
- if (res) {
- pr_err("Could not register od for %s\n", name);
- goto free_od;
- }
-
- goto free_mmc;
-
-free_od:
- omap_device_delete(od);
-
-put_pdev:
- platform_device_put(pdev);
-
-free_name:
- kfree(mmc_data->name);
-
-free_mmc:
- kfree(mmc_data);
-}
-
-void __init omap_hsmmc_init(struct omap2_hsmmc_info *controllers)
-{
- if (omap_hsmmc_done)
- return;
-
- omap_hsmmc_done = 1;
-
- if (cpu_is_omap2430()) {
- control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
- } else {
- control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
- }
-
- for (; controllers->mmc; controllers++)
- omap_hsmmc_init_one(controllers, controllers->mmc);
-
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
deleted file mode 100644
index 76c5ed2afa72..000000000000
--- a/arch/arm/mach-omap2/hsmmc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * MMC definitions for OMAP2
- */
-
-struct mmc_card;
-
-struct omap2_hsmmc_info {
- u8 mmc; /* controller 1/2/3 */
- u32 caps; /* 4/8 wires and any additional host
- * capabilities OR'd (ref. linux/mmc/host.h) */
- struct platform_device *pdev; /* mmc controller instance */
- /* init some special card */
- void (*init_card)(struct mmc_card *card);
-};
-
-#if IS_ENABLED(CONFIG_MMC_OMAP_HS)
-
-void omap_hsmmc_init(struct omap2_hsmmc_info *);
-void omap_hsmmc_late_init(struct omap2_hsmmc_info *);
-
-#else
-
-static inline void omap_hsmmc_init(struct omap2_hsmmc_info *info)
-{
-}
-
-static inline void omap_hsmmc_late_init(struct omap2_hsmmc_info *info)
-{
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 2efd18e8824c..c47a2afc91e5 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/davinci_emac.h>
#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_platform.h>
@@ -33,7 +32,6 @@
#include "omap_device.h"
#include "omap-secure.h"
#include "soc.h"
-#include "hsmmc.h"
static struct omap_hsmmc_platform_data __maybe_unused mmc_pdata[2];
@@ -269,21 +267,13 @@ static void __init am3517_evm_legacy_init(void)
am35xx_emac_reset();
}
-static struct platform_device omap3_rom_rng_device = {
- .name = "omap3-rom-rng",
- .id = -1,
- .dev = {
- .platform_data = rx51_secure_rng_call,
- },
-};
-
static void __init nokia_n900_legacy_init(void)
{
hsmmc2_internal_input_clk();
mmc_pdata[0].name = "external";
mmc_pdata[1].name = "internal";
- if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
+ if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
/* set IBE to 1 */
@@ -292,9 +282,6 @@ static void __init nokia_n900_legacy_init(void)
pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n");
pr_warn("Thumb binaries may crash randomly without this workaround\n");
}
-
- pr_info("RX-51: Registering OMAP3 HWRNG device\n");
- platform_device_register(&omap3_rom_rng_device);
}
}
@@ -311,118 +298,15 @@ static void __init omap3_logicpd_torpedo_init(void)
}
/* omap3pandora legacy devices */
-#define PANDORA_WIFI_IRQ_GPIO 21
-#define PANDORA_WIFI_NRESET_GPIO 23
static struct platform_device pandora_backlight = {
.name = "pandora-backlight",
.id = -1,
};
-static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
-};
-
-static struct regulator_init_data pandora_vmmc3 = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply),
- .consumer_supplies = pandora_vmmc3_supply,
-};
-
-static struct fixed_voltage_config pandora_vwlan = {
- .supply_name = "vwlan",
- .microvolts = 1800000, /* 1.8V */
- .startup_delay = 50000, /* 50ms */
- .init_data = &pandora_vmmc3,
-};
-
-static struct platform_device pandora_vwlan_device = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &pandora_vwlan,
- },
-};
-
-static struct gpiod_lookup_table pandora_vwlan_gpiod_table = {
- .dev_id = "reg-fixed-voltage.1",
- .table = {
- /*
- * As this is a low GPIO number it should be at the first
- * GPIO bank.
- */
- GPIO_LOOKUP("gpio-0-31", PANDORA_WIFI_NRESET_GPIO,
- NULL, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static void pandora_wl1251_init_card(struct mmc_card *card)
-{
- /*
- * We have TI wl1251 attached to MMC3. Pass this information to
- * SDIO core because it can't be probed by normal methods.
- */
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
- card->quirks |= MMC_QUIRK_NONSTD_SDIO;
- card->cccr.wide_bus = 1;
- card->cis.vendor = 0x104c;
- card->cis.device = 0x9066;
- card->cis.blksize = 512;
- card->cis.max_dtr = 24000000;
- card->ocr = 0x80;
- }
-}
-
-static struct omap2_hsmmc_info pandora_mmc3[] = {
- {
- .mmc = 3,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
- .init_card = pandora_wl1251_init_card,
- },
- {} /* Terminator */
-};
-
-static void __init pandora_wl1251_init(void)
-{
- struct wl1251_platform_data pandora_wl1251_pdata;
- int ret;
-
- memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
-
- pandora_wl1251_pdata.power_gpio = -1;
-
- ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
- if (ret < 0)
- goto fail;
-
- pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
- if (pandora_wl1251_pdata.irq < 0)
- goto fail_irq;
-
- pandora_wl1251_pdata.use_eeprom = true;
- ret = wl1251_set_platform_data(&pandora_wl1251_pdata);
- if (ret < 0)
- goto fail_irq;
-
- return;
-
-fail_irq:
- gpio_free(PANDORA_WIFI_IRQ_GPIO);
-fail:
- pr_err("wl1251 board initialisation failed\n");
-}
-
static void __init omap3_pandora_legacy_init(void)
{
platform_device_register(&pandora_backlight);
- gpiod_add_lookup_table(&pandora_vwlan_gpiod_table);
- platform_device_register(&pandora_vwlan_device);
- omap_hsmmc_init(pandora_mmc3);
- omap_hsmmc_late_init(pandora_mmc3);
- pandora_wl1251_init();
}
#endif /* CONFIG_ARCH_OMAP3 */
@@ -638,6 +522,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
&am35xx_emac_pdata),
+ OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call),
/* McBSP modules with sidetone core */
#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 865b10344ea2..0474a4b1394d 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -12,6 +12,7 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
@@ -22,7 +23,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/regulator/machine.h>
#include "generic.h"
@@ -69,8 +69,9 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
.gpio_cs = ICONTROL_MCP251x_nCS4
};
-static struct mcp251x_platform_data mcp251x_info = {
- .oscillator_frequency = 16E6,
+static const struct property_entry mcp251x_properties[] = {
+ PROPERTY_ENTRY_U32("clock-frequency", 16000000),
+ {}
};
static struct spi_board_info mcp251x_board_info[] = {
@@ -79,7 +80,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 0,
- .platform_data = &mcp251x_info,
+ .properties = mcp251x_properties,
.controller_data = &mcp251x_chip_info1,
.irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1)
},
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index da113c8eefbf..b27fc7ac9cea 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -13,6 +13,7 @@
#include <linux/leds.h>
#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/serial_8250.h>
@@ -27,7 +28,6 @@
#include <linux/platform_data/i2c-pxa.h>
#include <linux/platform_data/pca953x.h>
#include <linux/apm-emulation.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -428,14 +428,15 @@ static struct gpiod_lookup_table can_regulator_gpiod_table = {
},
};
-static struct mcp251x_platform_data zeus_mcp2515_pdata = {
- .oscillator_frequency = 16*1000*1000,
+static const struct property_entry mcp251x_properties[] = {
+ PROPERTY_ENTRY_U32("clock-frequency", 16000000),
+ {}
};
static struct spi_board_info zeus_spi_board_info[] = {
[0] = {
.modalias = "mcp2515",
- .platform_data = &zeus_mcp2515_pdata,
+ .properties = mcp251x_properties,
.irq = PXA_GPIO_TO_IRQ(ZEUS_CAN_GPIO),
.max_speed_hz = 1*1000*1000,
.bus_num = 3,
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 2447427cb4a8..69f3fa270fbe 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -203,7 +203,7 @@ void tegra20_cpuidle_pcie_irqs_in_use(void)
{
pr_info_once(
"Disabling cpuidle LP2 state, since PCIe IRQs are in use\n");
- tegra_idle_driver.states[1].disabled = true;
+ cpuidle_driver_state_disabled(&tegra_idle_driver, 1, true);
}
int __init tegra20_cpuidle_init(void)
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 9a07916af8dd..7c90b4c615a5 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
-#include <linux/psci.h>
#include <linux/smp.h>
#include <asm/cp15.h>
@@ -75,26 +74,20 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A72: {
struct arm_smccc_res res;
- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
- break;
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if ((int)res.a0 != 0)
+ return;
- switch (psci_ops.conduit) {
- case PSCI_CONDUIT_HVC:
- arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 != 0)
- break;
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
- case PSCI_CONDUIT_SMC:
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 != 0)
- break;
+ case SMCCC_CONDUIT_SMC:
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 9a6e4923bd69..563440315acd 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -89,7 +89,7 @@ void pxa_ssp_free(struct ssp_device *ssp)
ssp->use_count--;
ssp->label = NULL;
} else
- dev_err(&ssp->pdev->dev, "device already free\n");
+ dev_err(ssp->dev, "device already free\n");
mutex_unlock(&ssp_lock);
}
EXPORT_SYMBOL(pxa_ssp_free);
@@ -118,7 +118,7 @@ static int pxa_ssp_probe(struct platform_device *pdev)
if (ssp == NULL)
return -ENOMEM;
- ssp->pdev = pdev;
+ ssp->dev = dev;
ssp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ssp->clk))
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 38fa917c8585..3c7645d7b9b4 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -15,6 +15,7 @@
#include <xen/interface/grant_table.h>
#include <xen/interface/memory.h>
#include <xen/page.h>
+#include <xen/xen-ops.h>
#include <xen/swiotlb-xen.h>
#include <asm/cacheflush.h>
@@ -133,7 +134,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
return;
}
-int __init xen_mm_init(void)
+static int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
if (!xen_initial_domain())
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3f047afb982c..afe6412fe769 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -67,7 +67,7 @@ config ARM64
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
@@ -143,6 +143,8 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS \
+ if $(cc-option,-fpatchable-function-entry=2)
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
@@ -180,7 +182,6 @@ config ARM64
select PCI_SYSCALL if PCI
select POWER_RESET
select POWER_SUPPLY
- select REFCOUNT_FULL
select SPARSE_IRQ
select SWIOTLB
select SYSCTL_EXCEPTION_TRACE
@@ -266,6 +267,10 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
+config ZONE_DMA
+ bool "Support DMA zone" if EXPERT
+ default y
+
config ZONE_DMA32
bool "Support DMA32 zone" if EXPERT
default y
@@ -538,6 +543,16 @@ config ARM64_ERRATUM_1286807
invalidated has been observed by other observers. The
workaround repeats the TLBI+DSB operation.
+config ARM64_ERRATUM_1319367
+ bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
+ default y
+ help
+ This option adds work arounds for ARM Cortex-A57 erratum 1319537
+ and A72 erratum 1319367
+
+ Cortex-A57 and A72 cores could end-up with corrupted TLBs by
+ speculating an AT instruction during a guest context switch.
+
If unsure, say Y.
config ARM64_ERRATUM_1463225
@@ -558,6 +573,22 @@ config ARM64_ERRATUM_1463225
If unsure, say Y.
+config ARM64_ERRATUM_1542419
+ bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
+ default y
+ help
+ This option adds a workaround for ARM Neoverse-N1 erratum
+ 1542419.
+
+ Affected Neoverse-N1 cores could execute a stale instruction when
+ modified by another CPU. The workaround depends on a firmware
+ counterpart.
+
+ Workaround the issue by hiding the DIC feature from EL0. This
+ forces user-space to perform cache maintenance.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -845,10 +876,26 @@ config ARM64_PA_BITS
default 48 if ARM64_PA_BITS_48
default 52 if ARM64_PA_BITS_52
+choice
+ prompt "Endianness"
+ default CPU_LITTLE_ENDIAN
+ help
+ Select the endianness of data accesses performed by the CPU. Userspace
+ applications will need to be compiled and linked for the endianness
+ that is selected here.
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
help
- Say Y if you plan on running a kernel in big-endian mode.
+ Say Y if you plan on running a kernel with a big-endian userspace.
+
+config CPU_LITTLE_ENDIAN
+ bool "Build little-endian kernel"
+ help
+ Say Y if you plan on running a kernel with a little-endian userspace.
+ This is usually the case for distributions targeting arm64.
+
+endchoice
config SCHED_MC
bool "Multi-core scheduler support"
@@ -1597,6 +1644,7 @@ config CMDLINE
config CMDLINE_FORCE
bool "Always use the default kernel command string"
+ depends on CMDLINE != ""
help
Always use the default kernel command string, even if the boot
loader passes other arguments to the kernel.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2c0238ce0551..1fbe24d4fdb6 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -95,6 +95,11 @@ ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDS_MODULE += $(srctree)/arch/arm64/kernel/module.lds
endif
+ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
+ KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 1d05d570142f..ce4b0679839d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -252,6 +252,7 @@
};
&r_ir {
+ linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 4922c4451e7c..b8eb0453123d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -86,7 +86,7 @@ config CRYPTO_AES_ARM64_CE_CCM
config CRYPTO_AES_ARM64_CE_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES_ARM64_CE
select CRYPTO_AES_ARM64
select CRYPTO_SIMD
@@ -94,7 +94,7 @@ config CRYPTO_AES_ARM64_CE_BLK
config CRYPTO_AES_ARM64_NEON_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES_ARM64
select CRYPTO_LIB_AES
select CRYPTO_SIMD
@@ -102,8 +102,15 @@ config CRYPTO_AES_ARM64_NEON_BLK
config CRYPTO_CHACHA20_NEON
tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_CHACHA20
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_NEON
+ tristate "Poly1305 hash function using scalar or NEON instructions"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
config CRYPTO_NHPOLY1305_NEON
tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
@@ -113,7 +120,7 @@ config CRYPTO_NHPOLY1305_NEON
config CRYPTO_AES_ARM64_BS
tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES_ARM64_NEON_BLK
select CRYPTO_AES_ARM64
select CRYPTO_LIB_AES
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 0435f2a0610e..d0901e610df3 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -50,6 +50,10 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
+obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o
+poly1305-neon-y := poly1305-core.o poly1305-glue.o
+AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_init_arm64
+
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
@@ -68,11 +72,15 @@ ifdef REGENERATE_ARM64_CRYPTO
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
+$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv8.pl
+ $(call cmd,perlasm)
+
$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
+
endif
-clean-files += sha256-core.S sha512-core.S
+clean-files += poly1305-core.S sha256-core.S sha512-core.S
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index ea873b8904c4..e3e27349a9fe 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -384,7 +384,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
goto xts_tail;
kernel_neon_end();
- skcipher_walk_done(&walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
if (err || likely(!tail))
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index 1495d2b18518..b08029d7bde6 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -1,5 +1,5 @@
/*
- * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
+ * ARM NEON and scalar accelerated ChaCha and XChaCha stream ciphers,
* including ChaCha20 (RFC7539)
*
* Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
@@ -20,9 +20,10 @@
*/
#include <crypto/algapi.h>
-#include <crypto/chacha.h>
+#include <crypto/internal/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -36,6 +37,8 @@ asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src,
int nrounds, int bytes);
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
int bytes, int nrounds)
{
@@ -59,6 +62,37 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
}
}
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
+{
+ if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
+ hchacha_block_generic(state, stream, nrounds);
+ } else {
+ kernel_neon_begin();
+ hchacha_block_neon(state, stream, nrounds);
+ kernel_neon_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
+ int nrounds)
+{
+ if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
+ !crypto_simd_usable())
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ kernel_neon_begin();
+ chacha_doneon(state, dst, src, bytes, nrounds);
+ kernel_neon_end();
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
static int chacha_neon_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
@@ -68,7 +102,7 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, false);
- crypto_chacha_init(state, ctx, iv);
+ chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -76,10 +110,17 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
if (nbytes < walk.total)
nbytes = rounddown(nbytes, walk.stride);
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, ctx->nrounds);
- kernel_neon_end();
+ if (!static_branch_likely(&have_neon) ||
+ !crypto_simd_usable()) {
+ chacha_crypt_generic(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ } else {
+ kernel_neon_begin();
+ chacha_doneon(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ kernel_neon_end();
+ }
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
@@ -91,9 +132,6 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
- return crypto_chacha_crypt(req);
-
return chacha_neon_stream_xor(req, ctx, req->iv);
}
@@ -105,14 +143,8 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16];
u8 real_iv[16];
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
- return crypto_xchacha_crypt(req);
-
- crypto_chacha_init(state, ctx, req->iv);
-
- kernel_neon_begin();
- hchacha_block_neon(state, subctx.key, ctx->nrounds);
- kernel_neon_end();
+ chacha_init_generic(state, ctx->key, req->iv);
+ hchacha_block_arch(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], req->iv + 24, 8);
@@ -134,7 +166,7 @@ static struct skcipher_alg algs[] = {
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
+ .setkey = chacha20_setkey,
.encrypt = chacha_neon,
.decrypt = chacha_neon,
}, {
@@ -150,7 +182,7 @@ static struct skcipher_alg algs[] = {
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
+ .setkey = chacha20_setkey,
.encrypt = xchacha_neon,
.decrypt = xchacha_neon,
}, {
@@ -166,7 +198,7 @@ static struct skcipher_alg algs[] = {
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha12_setkey,
+ .setkey = chacha12_setkey,
.encrypt = xchacha_neon,
.decrypt = xchacha_neon,
}
@@ -175,14 +207,17 @@ static struct skcipher_alg algs[] = {
static int __init chacha_simd_mod_init(void)
{
if (!cpu_have_named_feature(ASIMD))
- return -ENODEV;
+ return 0;
+
+ static_branch_enable(&have_neon);
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_simd_mod_fini(void)
{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+ if (cpu_have_named_feature(ASIMD))
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(chacha_simd_mod_init);
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index 410e8afcf5a7..a791c4adf8e6 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -13,8 +13,8 @@
T1 .req v2
T2 .req v3
MASK .req v4
- XL .req v5
- XM .req v6
+ XM .req v5
+ XL .req v6
XH .req v7
IN1 .req v7
@@ -358,20 +358,37 @@ ENTRY(pmull_ghash_update_p8)
__pmull_ghash p8
ENDPROC(pmull_ghash_update_p8)
- KS0 .req v12
- KS1 .req v13
- INP0 .req v14
- INP1 .req v15
-
- .macro load_round_keys, rounds, rk
- cmp \rounds, #12
- blo 2222f /* 128 bits */
- beq 1111f /* 192 bits */
- ld1 {v17.4s-v18.4s}, [\rk], #32
-1111: ld1 {v19.4s-v20.4s}, [\rk], #32
-2222: ld1 {v21.4s-v24.4s}, [\rk], #64
- ld1 {v25.4s-v28.4s}, [\rk], #64
- ld1 {v29.4s-v31.4s}, [\rk]
+ KS0 .req v8
+ KS1 .req v9
+ KS2 .req v10
+ KS3 .req v11
+
+ INP0 .req v21
+ INP1 .req v22
+ INP2 .req v23
+ INP3 .req v24
+
+ K0 .req v25
+ K1 .req v26
+ K2 .req v27
+ K3 .req v28
+ K4 .req v12
+ K5 .req v13
+ K6 .req v4
+ K7 .req v5
+ K8 .req v14
+ K9 .req v15
+ KK .req v29
+ KL .req v30
+ KM .req v31
+
+ .macro load_round_keys, rounds, rk, tmp
+ add \tmp, \rk, #64
+ ld1 {K0.4s-K3.4s}, [\rk]
+ ld1 {K4.4s-K5.4s}, [\tmp]
+ add \tmp, \rk, \rounds, lsl #4
+ sub \tmp, \tmp, #32
+ ld1 {KK.4s-KM.4s}, [\tmp]
.endm
.macro enc_round, state, key
@@ -379,197 +396,367 @@ ENDPROC(pmull_ghash_update_p8)
aesmc \state\().16b, \state\().16b
.endm
- .macro enc_block, state, rounds
- cmp \rounds, #12
- b.lo 2222f /* 128 bits */
- b.eq 1111f /* 192 bits */
- enc_round \state, v17
- enc_round \state, v18
-1111: enc_round \state, v19
- enc_round \state, v20
-2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
+ .macro enc_qround, s0, s1, s2, s3, key
+ enc_round \s0, \key
+ enc_round \s1, \key
+ enc_round \s2, \key
+ enc_round \s3, \key
+ .endm
+
+ .macro enc_block, state, rounds, rk, tmp
+ add \tmp, \rk, #96
+ ld1 {K6.4s-K7.4s}, [\tmp], #32
+ .irp key, K0, K1, K2, K3, K4 K5
enc_round \state, \key
.endr
- aese \state\().16b, v30.16b
- eor \state\().16b, \state\().16b, v31.16b
+
+ tbnz \rounds, #2, .Lnot128_\@
+.Lout256_\@:
+ enc_round \state, K6
+ enc_round \state, K7
+
+.Lout192_\@:
+ enc_round \state, KK
+ aese \state\().16b, KL.16b
+ eor \state\().16b, \state\().16b, KM.16b
+
+ .subsection 1
+.Lnot128_\@:
+ ld1 {K8.4s-K9.4s}, [\tmp], #32
+ enc_round \state, K6
+ enc_round \state, K7
+ ld1 {K6.4s-K7.4s}, [\tmp]
+ enc_round \state, K8
+ enc_round \state, K9
+ tbz \rounds, #1, .Lout192_\@
+ b .Lout256_\@
+ .previous
.endm
+ .align 6
.macro pmull_gcm_do_crypt, enc
- ld1 {SHASH.2d}, [x4], #16
- ld1 {HH.2d}, [x4]
- ld1 {XL.2d}, [x1]
- ldr x8, [x5, #8] // load lower counter
+ stp x29, x30, [sp, #-32]!
+ mov x29, sp
+ str x19, [sp, #24]
+
+ load_round_keys x7, x6, x8
+
+ ld1 {SHASH.2d}, [x3], #16
+ ld1 {HH.2d-HH4.2d}, [x3]
- movi MASK.16b, #0xe1
trn1 SHASH2.2d, SHASH.2d, HH.2d
trn2 T1.2d, SHASH.2d, HH.2d
-CPU_LE( rev x8, x8 )
- shl MASK.2d, MASK.2d, #57
eor SHASH2.16b, SHASH2.16b, T1.16b
- .if \enc == 1
- ldr x10, [sp]
- ld1 {KS0.16b-KS1.16b}, [x10]
- .endif
+ trn1 HH34.2d, HH3.2d, HH4.2d
+ trn2 T1.2d, HH3.2d, HH4.2d
+ eor HH34.16b, HH34.16b, T1.16b
- cbnz x6, 4f
+ ld1 {XL.2d}, [x4]
-0: ld1 {INP0.16b-INP1.16b}, [x3], #32
+ cbz x0, 3f // tag only?
- rev x9, x8
- add x11, x8, #1
- add x8, x8, #2
+ ldr w8, [x5, #12] // load lower counter
+CPU_LE( rev w8, w8 )
- .if \enc == 1
- eor INP0.16b, INP0.16b, KS0.16b // encrypt input
- eor INP1.16b, INP1.16b, KS1.16b
+0: mov w9, #4 // max blocks per round
+ add x10, x0, #0xf
+ lsr x10, x10, #4 // remaining blocks
+
+ subs x0, x0, #64
+ csel w9, w10, w9, mi
+ add w8, w8, w9
+
+ bmi 1f
+ ld1 {INP0.16b-INP3.16b}, [x2], #64
+ .subsection 1
+ /*
+ * Populate the four input registers right to left with up to 63 bytes
+ * of data, using overlapping loads to avoid branches.
+ *
+ * INP0 INP1 INP2 INP3
+ * 1 byte | | | |x |
+ * 16 bytes | | | |xxxxxxxx|
+ * 17 bytes | | |xxxxxxxx|x |
+ * 47 bytes | |xxxxxxxx|xxxxxxxx|xxxxxxx |
+ * etc etc
+ *
+ * Note that this code may read up to 15 bytes before the start of
+ * the input. It is up to the calling code to ensure this is safe if
+ * this happens in the first iteration of the loop (i.e., when the
+ * input size is < 16 bytes)
+ */
+1: mov x15, #16
+ ands x19, x0, #0xf
+ csel x19, x19, x15, ne
+ adr_l x17, .Lpermute_table + 16
+
+ sub x11, x15, x19
+ add x12, x17, x11
+ sub x17, x17, x11
+ ld1 {T1.16b}, [x12]
+ sub x10, x1, x11
+ sub x11, x2, x11
+
+ cmp x0, #-16
+ csel x14, x15, xzr, gt
+ cmp x0, #-32
+ csel x15, x15, xzr, gt
+ cmp x0, #-48
+ csel x16, x19, xzr, gt
+ csel x1, x1, x10, gt
+ csel x2, x2, x11, gt
+
+ ld1 {INP0.16b}, [x2], x14
+ ld1 {INP1.16b}, [x2], x15
+ ld1 {INP2.16b}, [x2], x16
+ ld1 {INP3.16b}, [x2]
+ tbl INP3.16b, {INP3.16b}, T1.16b
+ b 2f
+ .previous
+
+2: .if \enc == 0
+ bl pmull_gcm_ghash_4x
.endif
- ld1 {KS0.8b}, [x5] // load upper counter
- rev x11, x11
- sub w0, w0, #2
- mov KS1.8b, KS0.8b
- ins KS0.d[1], x9 // set lower counter
- ins KS1.d[1], x11
+ bl pmull_gcm_enc_4x
- rev64 T1.16b, INP1.16b
+ tbnz x0, #63, 6f
+ st1 {INP0.16b-INP3.16b}, [x1], #64
+ .if \enc == 1
+ bl pmull_gcm_ghash_4x
+ .endif
+ bne 0b
- cmp w7, #12
- b.ge 2f // AES-192/256?
+3: ldp x19, x10, [sp, #24]
+ cbz x10, 5f // output tag?
-1: enc_round KS0, v21
- ext IN1.16b, T1.16b, T1.16b, #8
+ ld1 {INP3.16b}, [x10] // load lengths[]
+ mov w9, #1
+ bl pmull_gcm_ghash_4x
- enc_round KS1, v21
- pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+ mov w11, #(0x1 << 24) // BE '1U'
+ ld1 {KS0.16b}, [x5]
+ mov KS0.s[3], w11
- enc_round KS0, v22
- eor T1.16b, T1.16b, IN1.16b
+ enc_block KS0, x7, x6, x12
- enc_round KS1, v22
- pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ ext XL.16b, XL.16b, XL.16b, #8
+ rev64 XL.16b, XL.16b
+ eor XL.16b, XL.16b, KS0.16b
+ st1 {XL.16b}, [x10] // store tag
- enc_round KS0, v23
- pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+4: ldp x29, x30, [sp], #32
+ ret
- enc_round KS1, v23
- rev64 T1.16b, INP0.16b
- ext T2.16b, XL.16b, XL.16b, #8
+5:
+CPU_LE( rev w8, w8 )
+ str w8, [x5, #12] // store lower counter
+ st1 {XL.2d}, [x4]
+ b 4b
+
+6: ld1 {T1.16b-T2.16b}, [x17], #32 // permute vectors
+ sub x17, x17, x19, lsl #1
+
+ cmp w9, #1
+ beq 7f
+ .subsection 1
+7: ld1 {INP2.16b}, [x1]
+ tbx INP2.16b, {INP3.16b}, T1.16b
+ mov INP3.16b, INP2.16b
+ b 8f
+ .previous
+
+ st1 {INP0.16b}, [x1], x14
+ st1 {INP1.16b}, [x1], x15
+ st1 {INP2.16b}, [x1], x16
+ tbl INP3.16b, {INP3.16b}, T1.16b
+ tbx INP3.16b, {INP2.16b}, T2.16b
+8: st1 {INP3.16b}, [x1]
- enc_round KS0, v24
- ext IN1.16b, T1.16b, T1.16b, #8
- eor T1.16b, T1.16b, T2.16b
+ .if \enc == 1
+ ld1 {T1.16b}, [x17]
+ tbl INP3.16b, {INP3.16b}, T1.16b // clear non-data bits
+ bl pmull_gcm_ghash_4x
+ .endif
+ b 3b
+ .endm
- enc_round KS1, v24
- eor XL.16b, XL.16b, IN1.16b
+ /*
+ * void pmull_gcm_encrypt(int blocks, u8 dst[], const u8 src[],
+ * struct ghash_key const *k, u64 dg[], u8 ctr[],
+ * int rounds, u8 tag)
+ */
+ENTRY(pmull_gcm_encrypt)
+ pmull_gcm_do_crypt 1
+ENDPROC(pmull_gcm_encrypt)
- enc_round KS0, v25
- eor T1.16b, T1.16b, XL.16b
+ /*
+ * void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
+ * struct ghash_key const *k, u64 dg[], u8 ctr[],
+ * int rounds, u8 tag)
+ */
+ENTRY(pmull_gcm_decrypt)
+ pmull_gcm_do_crypt 0
+ENDPROC(pmull_gcm_decrypt)
- enc_round KS1, v25
- pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
+pmull_gcm_ghash_4x:
+ movi MASK.16b, #0xe1
+ shl MASK.2d, MASK.2d, #57
- enc_round KS0, v26
- pmull XL.1q, HH.1d, XL.1d // a0 * b0
+ rev64 T1.16b, INP0.16b
+ rev64 T2.16b, INP1.16b
+ rev64 TT3.16b, INP2.16b
+ rev64 TT4.16b, INP3.16b
- enc_round KS1, v26
- pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
+ ext XL.16b, XL.16b, XL.16b, #8
- enc_round KS0, v27
- eor XL.16b, XL.16b, XL2.16b
- eor XH.16b, XH.16b, XH2.16b
+ tbz w9, #2, 0f // <4 blocks?
+ .subsection 1
+0: movi XH2.16b, #0
+ movi XM2.16b, #0
+ movi XL2.16b, #0
- enc_round KS1, v27
- eor XM.16b, XM.16b, XM2.16b
- ext T1.16b, XL.16b, XH.16b, #8
+ tbz w9, #0, 1f // 2 blocks?
+ tbz w9, #1, 2f // 1 block?
- enc_round KS0, v28
- eor T2.16b, XL.16b, XH.16b
- eor XM.16b, XM.16b, T1.16b
+ eor T2.16b, T2.16b, XL.16b
+ ext T1.16b, T2.16b, T2.16b, #8
+ b .Lgh3
- enc_round KS1, v28
- eor XM.16b, XM.16b, T2.16b
+1: eor TT3.16b, TT3.16b, XL.16b
+ ext T2.16b, TT3.16b, TT3.16b, #8
+ b .Lgh2
- enc_round KS0, v29
- pmull T2.1q, XL.1d, MASK.1d
+2: eor TT4.16b, TT4.16b, XL.16b
+ ext IN1.16b, TT4.16b, TT4.16b, #8
+ b .Lgh1
+ .previous
- enc_round KS1, v29
- mov XH.d[0], XM.d[1]
- mov XM.d[1], XL.d[0]
+ eor T1.16b, T1.16b, XL.16b
+ ext IN1.16b, T1.16b, T1.16b, #8
- aese KS0.16b, v30.16b
- eor XL.16b, XM.16b, T2.16b
+ pmull2 XH2.1q, HH4.2d, IN1.2d // a1 * b1
+ eor T1.16b, T1.16b, IN1.16b
+ pmull XL2.1q, HH4.1d, IN1.1d // a0 * b0
+ pmull2 XM2.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
- aese KS1.16b, v30.16b
- ext T2.16b, XL.16b, XL.16b, #8
+ ext T1.16b, T2.16b, T2.16b, #8
+.Lgh3: eor T2.16b, T2.16b, T1.16b
+ pmull2 XH.1q, HH3.2d, T1.2d // a1 * b1
+ pmull XL.1q, HH3.1d, T1.1d // a0 * b0
+ pmull XM.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
- eor KS0.16b, KS0.16b, v31.16b
- pmull XL.1q, XL.1d, MASK.1d
- eor T2.16b, T2.16b, XH.16b
+ eor XH2.16b, XH2.16b, XH.16b
+ eor XL2.16b, XL2.16b, XL.16b
+ eor XM2.16b, XM2.16b, XM.16b
- eor KS1.16b, KS1.16b, v31.16b
- eor XL.16b, XL.16b, T2.16b
+ ext T2.16b, TT3.16b, TT3.16b, #8
+.Lgh2: eor TT3.16b, TT3.16b, T2.16b
+ pmull2 XH.1q, HH.2d, T2.2d // a1 * b1
+ pmull XL.1q, HH.1d, T2.1d // a0 * b0
+ pmull2 XM.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
- .if \enc == 0
- eor INP0.16b, INP0.16b, KS0.16b
- eor INP1.16b, INP1.16b, KS1.16b
- .endif
+ eor XH2.16b, XH2.16b, XH.16b
+ eor XL2.16b, XL2.16b, XL.16b
+ eor XM2.16b, XM2.16b, XM.16b
- st1 {INP0.16b-INP1.16b}, [x2], #32
+ ext IN1.16b, TT4.16b, TT4.16b, #8
+.Lgh1: eor TT4.16b, TT4.16b, IN1.16b
+ pmull XL.1q, SHASH.1d, IN1.1d // a0 * b0
+ pmull2 XH.1q, SHASH.2d, IN1.2d // a1 * b1
+ pmull XM.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
- cbnz w0, 0b
+ eor XH.16b, XH.16b, XH2.16b
+ eor XL.16b, XL.16b, XL2.16b
+ eor XM.16b, XM.16b, XM2.16b
-CPU_LE( rev x8, x8 )
- st1 {XL.2d}, [x1]
- str x8, [x5, #8] // store lower counter
+ eor T2.16b, XL.16b, XH.16b
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor XM.16b, XM.16b, T2.16b
- .if \enc == 1
- st1 {KS0.16b-KS1.16b}, [x10]
- .endif
+ __pmull_reduce_p64
+
+ eor T2.16b, T2.16b, XH.16b
+ eor XL.16b, XL.16b, T2.16b
ret
+ENDPROC(pmull_gcm_ghash_4x)
+
+pmull_gcm_enc_4x:
+ ld1 {KS0.16b}, [x5] // load upper counter
+ sub w10, w8, #4
+ sub w11, w8, #3
+ sub w12, w8, #2
+ sub w13, w8, #1
+ rev w10, w10
+ rev w11, w11
+ rev w12, w12
+ rev w13, w13
+ mov KS1.16b, KS0.16b
+ mov KS2.16b, KS0.16b
+ mov KS3.16b, KS0.16b
+ ins KS0.s[3], w10 // set lower counter
+ ins KS1.s[3], w11
+ ins KS2.s[3], w12
+ ins KS3.s[3], w13
+
+ add x10, x6, #96 // round key pointer
+ ld1 {K6.4s-K7.4s}, [x10], #32
+ .irp key, K0, K1, K2, K3, K4, K5
+ enc_qround KS0, KS1, KS2, KS3, \key
+ .endr
-2: b.eq 3f // AES-192?
- enc_round KS0, v17
- enc_round KS1, v17
- enc_round KS0, v18
- enc_round KS1, v18
-3: enc_round KS0, v19
- enc_round KS1, v19
- enc_round KS0, v20
- enc_round KS1, v20
- b 1b
+ tbnz x7, #2, .Lnot128
+ .subsection 1
+.Lnot128:
+ ld1 {K8.4s-K9.4s}, [x10], #32
+ .irp key, K6, K7
+ enc_qround KS0, KS1, KS2, KS3, \key
+ .endr
+ ld1 {K6.4s-K7.4s}, [x10]
+ .irp key, K8, K9
+ enc_qround KS0, KS1, KS2, KS3, \key
+ .endr
+ tbz x7, #1, .Lout192
+ b .Lout256
+ .previous
-4: load_round_keys w7, x6
- b 0b
- .endm
+.Lout256:
+ .irp key, K6, K7
+ enc_qround KS0, KS1, KS2, KS3, \key
+ .endr
- /*
- * void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[],
- * struct ghash_key const *k, u8 ctr[],
- * int rounds, u8 ks[])
- */
-ENTRY(pmull_gcm_encrypt)
- pmull_gcm_do_crypt 1
-ENDPROC(pmull_gcm_encrypt)
+.Lout192:
+ enc_qround KS0, KS1, KS2, KS3, KK
- /*
- * void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], const u8 src[],
- * struct ghash_key const *k, u8 ctr[],
- * int rounds)
- */
-ENTRY(pmull_gcm_decrypt)
- pmull_gcm_do_crypt 0
-ENDPROC(pmull_gcm_decrypt)
+ aese KS0.16b, KL.16b
+ aese KS1.16b, KL.16b
+ aese KS2.16b, KL.16b
+ aese KS3.16b, KL.16b
+
+ eor KS0.16b, KS0.16b, KM.16b
+ eor KS1.16b, KS1.16b, KM.16b
+ eor KS2.16b, KS2.16b, KM.16b
+ eor KS3.16b, KS3.16b, KM.16b
+
+ eor INP0.16b, INP0.16b, KS0.16b
+ eor INP1.16b, INP1.16b, KS1.16b
+ eor INP2.16b, INP2.16b, KS2.16b
+ eor INP3.16b, INP3.16b, KS3.16b
- /*
- * void pmull_gcm_encrypt_block(u8 dst[], u8 src[], u8 rk[], int rounds)
- */
-ENTRY(pmull_gcm_encrypt_block)
- cbz x2, 0f
- load_round_keys w3, x2
-0: ld1 {v0.16b}, [x1]
- enc_block v0, w3
- st1 {v0.16b}, [x0]
ret
-ENDPROC(pmull_gcm_encrypt_block)
+ENDPROC(pmull_gcm_enc_4x)
+
+ .section ".rodata", "a"
+ .align 6
+.Lpermute_table:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .previous
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 70b1469783f9..522cf004ce65 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -58,17 +58,15 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head);
-asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
- const u8 src[], struct ghash_key const *k,
+asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
+ struct ghash_key const *k, u64 dg[],
u8 ctr[], u32 const rk[], int rounds,
- u8 ks[]);
+ u8 tag[]);
-asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
- const u8 src[], struct ghash_key const *k,
- u8 ctr[], u32 const rk[], int rounds);
-
-asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
- u32 const rk[], int rounds);
+asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
+ struct ghash_key const *k, u64 dg[],
+ u8 ctr[], u32 const rk[], int rounds,
+ u8 tag[]);
static int ghash_init(struct shash_desc *desc)
{
@@ -85,7 +83,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head))
{
- if (likely(crypto_simd_usable())) {
+ if (likely(crypto_simd_usable() && simd_update)) {
kernel_neon_begin();
simd_update(blocks, dg, src, key, head);
kernel_neon_end();
@@ -398,136 +396,112 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
}
}
-static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
- u64 dg[], u8 tag[], int cryptlen)
-{
- u8 mac[AES_BLOCK_SIZE];
- u128 lengths;
-
- lengths.a = cpu_to_be64(req->assoclen * 8);
- lengths.b = cpu_to_be64(cryptlen * 8);
-
- ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL,
- pmull_ghash_update_p64);
-
- put_unaligned_be64(dg[1], mac);
- put_unaligned_be64(dg[0], mac + 8);
-
- crypto_xor(tag, mac, AES_BLOCK_SIZE);
-}
-
static int gcm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
+ int nrounds = num_rounds(&ctx->aes_key);
struct skcipher_walk walk;
+ u8 buf[AES_BLOCK_SIZE];
u8 iv[AES_BLOCK_SIZE];
- u8 ks[2 * AES_BLOCK_SIZE];
- u8 tag[AES_BLOCK_SIZE];
u64 dg[2] = {};
- int nrounds = num_rounds(&ctx->aes_key);
+ u128 lengths;
+ u8 *tag;
int err;
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(req->cryptlen * 8);
+
if (req->assoclen)
gcm_calculate_auth_mac(req, dg);
memcpy(iv, req->iv, GCM_IV_SIZE);
- put_unaligned_be32(1, iv + GCM_IV_SIZE);
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, false);
- if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
- u32 const *rk = NULL;
-
- kernel_neon_begin();
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
- put_unaligned_be32(2, iv + GCM_IV_SIZE);
- pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
- put_unaligned_be32(3, iv + GCM_IV_SIZE);
- pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
- put_unaligned_be32(4, iv + GCM_IV_SIZE);
-
+ if (likely(crypto_simd_usable())) {
do {
- int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ int nbytes = walk.nbytes;
+
+ tag = (u8 *)&lengths;
- if (rk)
- kernel_neon_begin();
+ if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
+ src = dst = memcpy(buf + sizeof(buf) - nbytes,
+ src, nbytes);
+ } else if (nbytes < walk.total) {
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
+ tag = NULL;
+ }
- pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
- walk.src.virt.addr, &ctx->ghash_key,
- iv, rk, nrounds, ks);
+ kernel_neon_begin();
+ pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg,
+ iv, ctx->aes_key.key_enc, nrounds,
+ tag);
kernel_neon_end();
- err = skcipher_walk_done(&walk,
- walk.nbytes % (2 * AES_BLOCK_SIZE));
+ if (unlikely(!nbytes))
+ break;
- rk = ctx->aes_key.key_enc;
- } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
- } else {
- aes_encrypt(&ctx->aes_key, tag, iv);
- put_unaligned_be32(2, iv + GCM_IV_SIZE);
+ if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr,
+ buf + sizeof(buf) - nbytes, nbytes);
- while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- const int blocks =
- walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ } while (walk.nbytes);
+ } else {
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- u8 *src = walk.src.virt.addr;
int remaining = blocks;
do {
- aes_encrypt(&ctx->aes_key, ks, iv);
- crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
+ aes_encrypt(&ctx->aes_key, buf, iv);
+ crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
} while (--remaining > 0);
- ghash_do_update(blocks, dg,
- walk.dst.virt.addr, &ctx->ghash_key,
- NULL, pmull_ghash_update_p64);
+ ghash_do_update(blocks, dg, walk.dst.virt.addr,
+ &ctx->ghash_key, NULL, NULL);
err = skcipher_walk_done(&walk,
- walk.nbytes % (2 * AES_BLOCK_SIZE));
- }
- if (walk.nbytes) {
- aes_encrypt(&ctx->aes_key, ks, iv);
- if (walk.nbytes > AES_BLOCK_SIZE) {
- crypto_inc(iv, AES_BLOCK_SIZE);
- aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
- }
+ walk.nbytes % AES_BLOCK_SIZE);
}
- }
- /* handle the tail */
- if (walk.nbytes) {
- u8 buf[GHASH_BLOCK_SIZE];
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- u8 *head = NULL;
+ /* handle the tail */
+ if (walk.nbytes) {
+ aes_encrypt(&ctx->aes_key, buf, iv);
- crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
- walk.nbytes);
+ crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
+ buf, walk.nbytes);
- if (walk.nbytes > GHASH_BLOCK_SIZE) {
- head = dst;
- dst += GHASH_BLOCK_SIZE;
- nbytes %= GHASH_BLOCK_SIZE;
+ memcpy(buf, walk.dst.virt.addr, walk.nbytes);
+ memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
}
- memcpy(buf, dst, nbytes);
- memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
- ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
- pmull_ghash_update_p64);
+ tag = (u8 *)&lengths;
+ ghash_do_update(1, dg, tag, &ctx->ghash_key,
+ walk.nbytes ? buf : NULL, NULL);
- err = skcipher_walk_done(&walk, 0);
+ if (walk.nbytes)
+ err = skcipher_walk_done(&walk, 0);
+
+ put_unaligned_be64(dg[1], tag);
+ put_unaligned_be64(dg[0], tag + 8);
+ put_unaligned_be32(1, iv + GCM_IV_SIZE);
+ aes_encrypt(&ctx->aes_key, iv, iv);
+ crypto_xor(tag, iv, AES_BLOCK_SIZE);
}
if (err)
return err;
- gcm_final(req, ctx, dg, tag, req->cryptlen);
-
/* copy authtag to end of dst */
scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
@@ -540,75 +514,65 @@ static int gcm_decrypt(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
+ int nrounds = num_rounds(&ctx->aes_key);
struct skcipher_walk walk;
- u8 iv[2 * AES_BLOCK_SIZE];
- u8 tag[AES_BLOCK_SIZE];
- u8 buf[2 * GHASH_BLOCK_SIZE];
+ u8 buf[AES_BLOCK_SIZE];
+ u8 iv[AES_BLOCK_SIZE];
u64 dg[2] = {};
- int nrounds = num_rounds(&ctx->aes_key);
+ u128 lengths;
+ u8 *tag;
int err;
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
+
if (req->assoclen)
gcm_calculate_auth_mac(req, dg);
memcpy(iv, req->iv, GCM_IV_SIZE);
- put_unaligned_be32(1, iv + GCM_IV_SIZE);
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, false);
- if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
- u32 const *rk = NULL;
-
- kernel_neon_begin();
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
- put_unaligned_be32(2, iv + GCM_IV_SIZE);
-
+ if (likely(crypto_simd_usable())) {
do {
- int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
- int rem = walk.total - blocks * AES_BLOCK_SIZE;
-
- if (rk)
- kernel_neon_begin();
-
- pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
- walk.src.virt.addr, &ctx->ghash_key,
- iv, rk, nrounds);
-
- /* check if this is the final iteration of the loop */
- if (rem < (2 * AES_BLOCK_SIZE)) {
- u8 *iv2 = iv + AES_BLOCK_SIZE;
-
- if (rem > AES_BLOCK_SIZE) {
- memcpy(iv2, iv, AES_BLOCK_SIZE);
- crypto_inc(iv2, AES_BLOCK_SIZE);
- }
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ int nbytes = walk.nbytes;
- pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
+ tag = (u8 *)&lengths;
- if (rem > AES_BLOCK_SIZE)
- pmull_gcm_encrypt_block(iv2, iv2, NULL,
- nrounds);
+ if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
+ src = dst = memcpy(buf + sizeof(buf) - nbytes,
+ src, nbytes);
+ } else if (nbytes < walk.total) {
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
+ tag = NULL;
}
+ kernel_neon_begin();
+ pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg,
+ iv, ctx->aes_key.key_enc, nrounds,
+ tag);
kernel_neon_end();
- err = skcipher_walk_done(&walk,
- walk.nbytes % (2 * AES_BLOCK_SIZE));
+ if (unlikely(!nbytes))
+ break;
- rk = ctx->aes_key.key_enc;
- } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
- } else {
- aes_encrypt(&ctx->aes_key, tag, iv);
- put_unaligned_be32(2, iv + GCM_IV_SIZE);
+ if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr,
+ buf + sizeof(buf) - nbytes, nbytes);
- while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ } while (walk.nbytes);
+ } else {
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- u8 *src = walk.src.virt.addr;
ghash_do_update(blocks, dg, walk.src.virt.addr,
- &ctx->ghash_key, NULL,
- pmull_ghash_update_p64);
+ &ctx->ghash_key, NULL, NULL);
do {
aes_encrypt(&ctx->aes_key, buf, iv);
@@ -620,49 +584,38 @@ static int gcm_decrypt(struct aead_request *req)
} while (--blocks > 0);
err = skcipher_walk_done(&walk,
- walk.nbytes % (2 * AES_BLOCK_SIZE));
+ walk.nbytes % AES_BLOCK_SIZE);
}
- if (walk.nbytes) {
- if (walk.nbytes > AES_BLOCK_SIZE) {
- u8 *iv2 = iv + AES_BLOCK_SIZE;
-
- memcpy(iv2, iv, AES_BLOCK_SIZE);
- crypto_inc(iv2, AES_BLOCK_SIZE);
- aes_encrypt(&ctx->aes_key, iv2, iv2);
- }
- aes_encrypt(&ctx->aes_key, iv, iv);
+ /* handle the tail */
+ if (walk.nbytes) {
+ memcpy(buf, walk.src.virt.addr, walk.nbytes);
+ memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
}
- }
- /* handle the tail */
- if (walk.nbytes) {
- const u8 *src = walk.src.virt.addr;
- const u8 *head = NULL;
- unsigned int nbytes = walk.nbytes;
+ tag = (u8 *)&lengths;
+ ghash_do_update(1, dg, tag, &ctx->ghash_key,
+ walk.nbytes ? buf : NULL, NULL);
- if (walk.nbytes > GHASH_BLOCK_SIZE) {
- head = src;
- src += GHASH_BLOCK_SIZE;
- nbytes %= GHASH_BLOCK_SIZE;
- }
+ if (walk.nbytes) {
+ aes_encrypt(&ctx->aes_key, buf, iv);
- memcpy(buf, src, nbytes);
- memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
- ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
- pmull_ghash_update_p64);
+ crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
+ buf, walk.nbytes);
- crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
- walk.nbytes);
+ err = skcipher_walk_done(&walk, 0);
+ }
- err = skcipher_walk_done(&walk, 0);
+ put_unaligned_be64(dg[1], tag);
+ put_unaligned_be64(dg[0], tag + 8);
+ put_unaligned_be32(1, iv + GCM_IV_SIZE);
+ aes_encrypt(&ctx->aes_key, iv, iv);
+ crypto_xor(tag, iv, AES_BLOCK_SIZE);
}
if (err)
return err;
- gcm_final(req, ctx, dg, tag, req->cryptlen - authsize);
-
/* compare calculated auth tag with the stored one */
scatterwalk_map_and_copy(buf, req->src,
req->assoclen + req->cryptlen - authsize,
@@ -675,7 +628,7 @@ static int gcm_decrypt(struct aead_request *req)
static struct aead_alg gcm_aes_alg = {
.ivsize = GCM_IV_SIZE,
- .chunksize = 2 * AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_setkey,
.setauthsize = gcm_setauthsize,
diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl
new file mode 100644
index 000000000000..6e5576d19af8
--- /dev/null
+++ b/arch/arm64/crypto/poly1305-armv8.pl
@@ -0,0 +1,913 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
+#
+# ====================================================================
+# Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
+# project.
+# ====================================================================
+#
+# This module implements Poly1305 hash for ARMv8.
+#
+# June 2015
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone.
+#
+# IALU/gcc-4.9 NEON
+#
+# Apple A7 1.86/+5% 0.72
+# Cortex-A53 2.69/+58% 1.47
+# Cortex-A57 2.70/+7% 1.14
+# Denver 1.64/+50% 1.18(*)
+# X-Gene 2.13/+68% 2.27
+# Mongoose 1.77/+75% 1.12
+# Kryo 2.70/+55% 1.13
+# ThunderX2 1.17/+95% 1.36
+#
+# (*) estimate based on resources availability is less than 1.0,
+# i.e. measured result is worse than expected, presumably binary
+# translator is not almighty;
+
+$flavour=shift;
+$output=shift;
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
+
+my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
+my ($mac,$nonce)=($inp,$len);
+
+my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+.extern OPENSSL_armcap_P
+#endif
+
+.text
+
+// forward "declarations" are required for Apple
+.globl poly1305_blocks
+.globl poly1305_emit
+
+.globl poly1305_init
+.type poly1305_init,%function
+.align 5
+poly1305_init:
+ cmp $inp,xzr
+ stp xzr,xzr,[$ctx] // zero hash value
+ stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
+
+ csel x0,xzr,x0,eq
+ b.eq .Lno_key
+
+#ifndef __KERNEL__
+ adrp x17,OPENSSL_armcap_P
+ ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
+#endif
+
+ ldp $r0,$r1,[$inp] // load key
+ mov $s1,#0xfffffffc0fffffff
+ movk $s1,#0x0fff,lsl#48
+#ifdef __AARCH64EB__
+ rev $r0,$r0 // flip bytes
+ rev $r1,$r1
+#endif
+ and $r0,$r0,$s1 // &=0ffffffc0fffffff
+ and $s1,$s1,#-4
+ and $r1,$r1,$s1 // &=0ffffffc0ffffffc
+ mov w#$s1,#-1
+ stp $r0,$r1,[$ctx,#32] // save key value
+ str w#$s1,[$ctx,#48] // impossible key power value
+
+#ifndef __KERNEL__
+ tst w17,#ARMV7_NEON
+
+ adr $d0,.Lpoly1305_blocks
+ adr $r0,.Lpoly1305_blocks_neon
+ adr $d1,.Lpoly1305_emit
+
+ csel $d0,$d0,$r0,eq
+
+# ifdef __ILP32__
+ stp w#$d0,w#$d1,[$len]
+# else
+ stp $d0,$d1,[$len]
+# endif
+#endif
+ mov x0,#1
+.Lno_key:
+ ret
+.size poly1305_init,.-poly1305_init
+
+.type poly1305_blocks,%function
+.align 5
+poly1305_blocks:
+.Lpoly1305_blocks:
+ ands $len,$len,#-16
+ b.eq .Lno_data
+
+ ldp $h0,$h1,[$ctx] // load hash value
+ ldp $h2,x17,[$ctx,#16] // [along with is_base2_26]
+ ldp $r0,$r1,[$ctx,#32] // load key value
+
+#ifdef __AARCH64EB__
+ lsr $d0,$h0,#32
+ mov w#$d1,w#$h0
+ lsr $d2,$h1,#32
+ mov w15,w#$h1
+ lsr x16,$h2,#32
+#else
+ mov w#$d0,w#$h0
+ lsr $d1,$h0,#32
+ mov w#$d2,w#$h1
+ lsr x15,$h1,#32
+ mov w16,w#$h2
+#endif
+
+ add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
+ lsr $d1,$d2,#12
+ adds $d0,$d0,$d2,lsl#52
+ add $d1,$d1,x15,lsl#14
+ adc $d1,$d1,xzr
+ lsr $d2,x16,#24
+ adds $d1,$d1,x16,lsl#40
+ adc $d2,$d2,xzr
+
+ cmp x17,#0 // is_base2_26?
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+ csel $h0,$h0,$d0,eq // choose between radixes
+ csel $h1,$h1,$d1,eq
+ csel $h2,$h2,$d2,eq
+
+.Loop:
+ ldp $t0,$t1,[$inp],#16 // load input
+ sub $len,$len,#16
+#ifdef __AARCH64EB__
+ rev $t0,$t0
+ rev $t1,$t1
+#endif
+ adds $h0,$h0,$t0 // accumulate input
+ adcs $h1,$h1,$t1
+
+ mul $d0,$h0,$r0 // h0*r0
+ adc $h2,$h2,$padbit
+ umulh $d1,$h0,$r0
+
+ mul $t0,$h1,$s1 // h1*5*r1
+ umulh $t1,$h1,$s1
+
+ adds $d0,$d0,$t0
+ mul $t0,$h0,$r1 // h0*r1
+ adc $d1,$d1,$t1
+ umulh $d2,$h0,$r1
+
+ adds $d1,$d1,$t0
+ mul $t0,$h1,$r0 // h1*r0
+ adc $d2,$d2,xzr
+ umulh $t1,$h1,$r0
+
+ adds $d1,$d1,$t0
+ mul $t0,$h2,$s1 // h2*5*r1
+ adc $d2,$d2,$t1
+ mul $t1,$h2,$r0 // h2*r0
+
+ adds $d1,$d1,$t0
+ adc $d2,$d2,$t1
+
+ and $t0,$d2,#-4 // final reduction
+ and $h2,$d2,#3
+ add $t0,$t0,$d2,lsr#2
+ adds $h0,$d0,$t0
+ adcs $h1,$d1,xzr
+ adc $h2,$h2,xzr
+
+ cbnz $len,.Loop
+
+ stp $h0,$h1,[$ctx] // store hash value
+ stp $h2,xzr,[$ctx,#16] // [and clear is_base2_26]
+
+.Lno_data:
+ ret
+.size poly1305_blocks,.-poly1305_blocks
+
+.type poly1305_emit,%function
+.align 5
+poly1305_emit:
+.Lpoly1305_emit:
+ ldp $h0,$h1,[$ctx] // load hash base 2^64
+ ldp $h2,$r0,[$ctx,#16] // [along with is_base2_26]
+ ldp $t0,$t1,[$nonce] // load nonce
+
+#ifdef __AARCH64EB__
+ lsr $d0,$h0,#32
+ mov w#$d1,w#$h0
+ lsr $d2,$h1,#32
+ mov w15,w#$h1
+ lsr x16,$h2,#32
+#else
+ mov w#$d0,w#$h0
+ lsr $d1,$h0,#32
+ mov w#$d2,w#$h1
+ lsr x15,$h1,#32
+ mov w16,w#$h2
+#endif
+
+ add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
+ lsr $d1,$d2,#12
+ adds $d0,$d0,$d2,lsl#52
+ add $d1,$d1,x15,lsl#14
+ adc $d1,$d1,xzr
+ lsr $d2,x16,#24
+ adds $d1,$d1,x16,lsl#40
+ adc $d2,$d2,xzr
+
+ cmp $r0,#0 // is_base2_26?
+ csel $h0,$h0,$d0,eq // choose between radixes
+ csel $h1,$h1,$d1,eq
+ csel $h2,$h2,$d2,eq
+
+ adds $d0,$h0,#5 // compare to modulus
+ adcs $d1,$h1,xzr
+ adc $d2,$h2,xzr
+
+ tst $d2,#-4 // see if it's carried/borrowed
+
+ csel $h0,$h0,$d0,eq
+ csel $h1,$h1,$d1,eq
+
+#ifdef __AARCH64EB__
+ ror $t0,$t0,#32 // flip nonce words
+ ror $t1,$t1,#32
+#endif
+ adds $h0,$h0,$t0 // accumulate nonce
+ adc $h1,$h1,$t1
+#ifdef __AARCH64EB__
+ rev $h0,$h0 // flip output bytes
+ rev $h1,$h1
+#endif
+ stp $h0,$h1,[$mac] // write result
+
+ ret
+.size poly1305_emit,.-poly1305_emit
+___
+my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
+my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
+my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
+my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
+my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
+my ($T0,$T1,$MASK) = map("v$_",(29..31));
+
+my ($in2,$zeros)=("x16","x17");
+my $is_base2_26 = $zeros; # borrow
+
+$code.=<<___;
+.type poly1305_mult,%function
+.align 5
+poly1305_mult:
+ mul $d0,$h0,$r0 // h0*r0
+ umulh $d1,$h0,$r0
+
+ mul $t0,$h1,$s1 // h1*5*r1
+ umulh $t1,$h1,$s1
+
+ adds $d0,$d0,$t0
+ mul $t0,$h0,$r1 // h0*r1
+ adc $d1,$d1,$t1
+ umulh $d2,$h0,$r1
+
+ adds $d1,$d1,$t0
+ mul $t0,$h1,$r0 // h1*r0
+ adc $d2,$d2,xzr
+ umulh $t1,$h1,$r0
+
+ adds $d1,$d1,$t0
+ mul $t0,$h2,$s1 // h2*5*r1
+ adc $d2,$d2,$t1
+ mul $t1,$h2,$r0 // h2*r0
+
+ adds $d1,$d1,$t0
+ adc $d2,$d2,$t1
+
+ and $t0,$d2,#-4 // final reduction
+ and $h2,$d2,#3
+ add $t0,$t0,$d2,lsr#2
+ adds $h0,$d0,$t0
+ adcs $h1,$d1,xzr
+ adc $h2,$h2,xzr
+
+ ret
+.size poly1305_mult,.-poly1305_mult
+
+.type poly1305_splat,%function
+.align 4
+poly1305_splat:
+ and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x13,$h0,#26,#26
+ extr x14,$h1,$h0,#52
+ and x14,x14,#0x03ffffff
+ ubfx x15,$h1,#14,#26
+ extr x16,$h2,$h1,#40
+
+ str w12,[$ctx,#16*0] // r0
+ add w12,w13,w13,lsl#2 // r1*5
+ str w13,[$ctx,#16*1] // r1
+ add w13,w14,w14,lsl#2 // r2*5
+ str w12,[$ctx,#16*2] // s1
+ str w14,[$ctx,#16*3] // r2
+ add w14,w15,w15,lsl#2 // r3*5
+ str w13,[$ctx,#16*4] // s2
+ str w15,[$ctx,#16*5] // r3
+ add w15,w16,w16,lsl#2 // r4*5
+ str w14,[$ctx,#16*6] // s3
+ str w16,[$ctx,#16*7] // r4
+ str w15,[$ctx,#16*8] // s4
+
+ ret
+.size poly1305_splat,.-poly1305_splat
+
+#ifdef __KERNEL__
+.globl poly1305_blocks_neon
+#endif
+.type poly1305_blocks_neon,%function
+.align 5
+poly1305_blocks_neon:
+.Lpoly1305_blocks_neon:
+ ldr $is_base2_26,[$ctx,#24]
+ cmp $len,#128
+ b.lo .Lpoly1305_blocks
+
+ .inst 0xd503233f // paciasp
+ stp x29,x30,[sp,#-80]!
+ add x29,sp,#0
+
+ stp d8,d9,[sp,#16] // meet ABI requirements
+ stp d10,d11,[sp,#32]
+ stp d12,d13,[sp,#48]
+ stp d14,d15,[sp,#64]
+
+ cbz $is_base2_26,.Lbase2_64_neon
+
+ ldp w10,w11,[$ctx] // load hash value base 2^26
+ ldp w12,w13,[$ctx,#8]
+ ldr w14,[$ctx,#16]
+
+ tst $len,#31
+ b.eq .Leven_neon
+
+ ldp $r0,$r1,[$ctx,#32] // load key value
+
+ add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
+ lsr $h1,x12,#12
+ adds $h0,$h0,x12,lsl#52
+ add $h1,$h1,x13,lsl#14
+ adc $h1,$h1,xzr
+ lsr $h2,x14,#24
+ adds $h1,$h1,x14,lsl#40
+ adc $d2,$h2,xzr // can be partially reduced...
+
+ ldp $d0,$d1,[$inp],#16 // load input
+ sub $len,$len,#16
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+
+#ifdef __AARCH64EB__
+ rev $d0,$d0
+ rev $d1,$d1
+#endif
+ adds $h0,$h0,$d0 // accumulate input
+ adcs $h1,$h1,$d1
+ adc $h2,$h2,$padbit
+
+ bl poly1305_mult
+
+ and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x11,$h0,#26,#26
+ extr x12,$h1,$h0,#52
+ and x12,x12,#0x03ffffff
+ ubfx x13,$h1,#14,#26
+ extr x14,$h2,$h1,#40
+
+ b .Leven_neon
+
+.align 4
+.Lbase2_64_neon:
+ ldp $r0,$r1,[$ctx,#32] // load key value
+
+ ldp $h0,$h1,[$ctx] // load hash value base 2^64
+ ldr $h2,[$ctx,#16]
+
+ tst $len,#31
+ b.eq .Linit_neon
+
+ ldp $d0,$d1,[$inp],#16 // load input
+ sub $len,$len,#16
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+#ifdef __AARCH64EB__
+ rev $d0,$d0
+ rev $d1,$d1
+#endif
+ adds $h0,$h0,$d0 // accumulate input
+ adcs $h1,$h1,$d1
+ adc $h2,$h2,$padbit
+
+ bl poly1305_mult
+
+.Linit_neon:
+ ldr w17,[$ctx,#48] // first table element
+ and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x11,$h0,#26,#26
+ extr x12,$h1,$h0,#52
+ and x12,x12,#0x03ffffff
+ ubfx x13,$h1,#14,#26
+ extr x14,$h2,$h1,#40
+
+ cmp w17,#-1 // is value impossible?
+ b.ne .Leven_neon
+
+ fmov ${H0},x10
+ fmov ${H1},x11
+ fmov ${H2},x12
+ fmov ${H3},x13
+ fmov ${H4},x14
+
+ ////////////////////////////////// initialize r^n table
+ mov $h0,$r0 // r^1
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+ mov $h1,$r1
+ mov $h2,xzr
+ add $ctx,$ctx,#48+12
+ bl poly1305_splat
+
+ bl poly1305_mult // r^2
+ sub $ctx,$ctx,#4
+ bl poly1305_splat
+
+ bl poly1305_mult // r^3
+ sub $ctx,$ctx,#4
+ bl poly1305_splat
+
+ bl poly1305_mult // r^4
+ sub $ctx,$ctx,#4
+ bl poly1305_splat
+ sub $ctx,$ctx,#48 // restore original $ctx
+ b .Ldo_neon
+
+.align 4
+.Leven_neon:
+ fmov ${H0},x10
+ fmov ${H1},x11
+ fmov ${H2},x12
+ fmov ${H3},x13
+ fmov ${H4},x14
+
+.Ldo_neon:
+ ldp x8,x12,[$inp,#32] // inp[2:3]
+ subs $len,$len,#64
+ ldp x9,x13,[$inp,#48]
+ add $in2,$inp,#96
+ adr $zeros,.Lzeros
+
+ lsl $padbit,$padbit,#24
+ add x15,$ctx,#48
+
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ and x5,x9,#0x03ffffff
+ ubfx x6,x8,#26,#26
+ ubfx x7,x9,#26,#26
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ extr x8,x12,x8,#52
+ extr x9,x13,x9,#52
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ fmov $IN23_0,x4
+ and x8,x8,#0x03ffffff
+ and x9,x9,#0x03ffffff
+ ubfx x10,x12,#14,#26
+ ubfx x11,x13,#14,#26
+ add x12,$padbit,x12,lsr#40
+ add x13,$padbit,x13,lsr#40
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ fmov $IN23_1,x6
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ fmov $IN23_2,x8
+ fmov $IN23_3,x10
+ fmov $IN23_4,x12
+
+ ldp x8,x12,[$inp],#16 // inp[0:1]
+ ldp x9,x13,[$inp],#48
+
+ ld1 {$R0,$R1,$S1,$R2},[x15],#64
+ ld1 {$S2,$R3,$S3,$R4},[x15],#64
+ ld1 {$S4},[x15]
+
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ and x5,x9,#0x03ffffff
+ ubfx x6,x8,#26,#26
+ ubfx x7,x9,#26,#26
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ extr x8,x12,x8,#52
+ extr x9,x13,x9,#52
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ fmov $IN01_0,x4
+ and x8,x8,#0x03ffffff
+ and x9,x9,#0x03ffffff
+ ubfx x10,x12,#14,#26
+ ubfx x11,x13,#14,#26
+ add x12,$padbit,x12,lsr#40
+ add x13,$padbit,x13,lsr#40
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ fmov $IN01_1,x6
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ movi $MASK.2d,#-1
+ fmov $IN01_2,x8
+ fmov $IN01_3,x10
+ fmov $IN01_4,x12
+ ushr $MASK.2d,$MASK.2d,#38
+
+ b.ls .Lskip_loop
+
+.align 4
+.Loop_neon:
+ ////////////////////////////////////////////////////////////////
+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ // \___________________/
+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ // \___________________/ \____________________/
+ //
+ // Note that we start with inp[2:3]*r^2. This is because it
+ // doesn't depend on reduction in previous iteration.
+ ////////////////////////////////////////////////////////////////
+ // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
+ // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
+ // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
+ // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
+ // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
+
+ subs $len,$len,#64
+ umull $ACC4,$IN23_0,${R4}[2]
+ csel $in2,$zeros,$in2,lo
+ umull $ACC3,$IN23_0,${R3}[2]
+ umull $ACC2,$IN23_0,${R2}[2]
+ ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
+ umull $ACC1,$IN23_0,${R1}[2]
+ ldp x9,x13,[$in2],#48
+ umull $ACC0,$IN23_0,${R0}[2]
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+
+ umlal $ACC4,$IN23_1,${R3}[2]
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ umlal $ACC3,$IN23_1,${R2}[2]
+ and x5,x9,#0x03ffffff
+ umlal $ACC2,$IN23_1,${R1}[2]
+ ubfx x6,x8,#26,#26
+ umlal $ACC1,$IN23_1,${R0}[2]
+ ubfx x7,x9,#26,#26
+ umlal $ACC0,$IN23_1,${S4}[2]
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+
+ umlal $ACC4,$IN23_2,${R2}[2]
+ extr x8,x12,x8,#52
+ umlal $ACC3,$IN23_2,${R1}[2]
+ extr x9,x13,x9,#52
+ umlal $ACC2,$IN23_2,${R0}[2]
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ umlal $ACC1,$IN23_2,${S4}[2]
+ fmov $IN23_0,x4
+ umlal $ACC0,$IN23_2,${S3}[2]
+ and x8,x8,#0x03ffffff
+
+ umlal $ACC4,$IN23_3,${R1}[2]
+ and x9,x9,#0x03ffffff
+ umlal $ACC3,$IN23_3,${R0}[2]
+ ubfx x10,x12,#14,#26
+ umlal $ACC2,$IN23_3,${S4}[2]
+ ubfx x11,x13,#14,#26
+ umlal $ACC1,$IN23_3,${S3}[2]
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ umlal $ACC0,$IN23_3,${S2}[2]
+ fmov $IN23_1,x6
+
+ add $IN01_2,$IN01_2,$H2
+ add x12,$padbit,x12,lsr#40
+ umlal $ACC4,$IN23_4,${R0}[2]
+ add x13,$padbit,x13,lsr#40
+ umlal $ACC3,$IN23_4,${S4}[2]
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ umlal $ACC2,$IN23_4,${S3}[2]
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ umlal $ACC1,$IN23_4,${S2}[2]
+ fmov $IN23_2,x8
+ umlal $ACC0,$IN23_4,${S1}[2]
+ fmov $IN23_3,x10
+
+ ////////////////////////////////////////////////////////////////
+ // (hash+inp[0:1])*r^4 and accumulate
+
+ add $IN01_0,$IN01_0,$H0
+ fmov $IN23_4,x12
+ umlal $ACC3,$IN01_2,${R1}[0]
+ ldp x8,x12,[$inp],#16 // inp[0:1]
+ umlal $ACC0,$IN01_2,${S3}[0]
+ ldp x9,x13,[$inp],#48
+ umlal $ACC4,$IN01_2,${R2}[0]
+ umlal $ACC1,$IN01_2,${S4}[0]
+ umlal $ACC2,$IN01_2,${R0}[0]
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+
+ add $IN01_1,$IN01_1,$H1
+ umlal $ACC3,$IN01_0,${R3}[0]
+ umlal $ACC4,$IN01_0,${R4}[0]
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ umlal $ACC2,$IN01_0,${R2}[0]
+ and x5,x9,#0x03ffffff
+ umlal $ACC0,$IN01_0,${R0}[0]
+ ubfx x6,x8,#26,#26
+ umlal $ACC1,$IN01_0,${R1}[0]
+ ubfx x7,x9,#26,#26
+
+ add $IN01_3,$IN01_3,$H3
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ umlal $ACC3,$IN01_1,${R2}[0]
+ extr x8,x12,x8,#52
+ umlal $ACC4,$IN01_1,${R3}[0]
+ extr x9,x13,x9,#52
+ umlal $ACC0,$IN01_1,${S4}[0]
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ umlal $ACC2,$IN01_1,${R1}[0]
+ fmov $IN01_0,x4
+ umlal $ACC1,$IN01_1,${R0}[0]
+ and x8,x8,#0x03ffffff
+
+ add $IN01_4,$IN01_4,$H4
+ and x9,x9,#0x03ffffff
+ umlal $ACC3,$IN01_3,${R0}[0]
+ ubfx x10,x12,#14,#26
+ umlal $ACC0,$IN01_3,${S2}[0]
+ ubfx x11,x13,#14,#26
+ umlal $ACC4,$IN01_3,${R1}[0]
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ umlal $ACC1,$IN01_3,${S3}[0]
+ fmov $IN01_1,x6
+ umlal $ACC2,$IN01_3,${S4}[0]
+ add x12,$padbit,x12,lsr#40
+
+ umlal $ACC3,$IN01_4,${S4}[0]
+ add x13,$padbit,x13,lsr#40
+ umlal $ACC0,$IN01_4,${S1}[0]
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ umlal $ACC4,$IN01_4,${R0}[0]
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ umlal $ACC1,$IN01_4,${S2}[0]
+ fmov $IN01_2,x8
+ umlal $ACC2,$IN01_4,${S3}[0]
+ fmov $IN01_3,x10
+ fmov $IN01_4,x12
+
+ /////////////////////////////////////////////////////////////////
+ // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ // and P. Schwabe
+ //
+ // [see discussion in poly1305-armv4 module]
+
+ ushr $T0.2d,$ACC3,#26
+ xtn $H3,$ACC3
+ ushr $T1.2d,$ACC0,#26
+ and $ACC0,$ACC0,$MASK.2d
+ add $ACC4,$ACC4,$T0.2d // h3 -> h4
+ bic $H3,#0xfc,lsl#24 // &=0x03ffffff
+ add $ACC1,$ACC1,$T1.2d // h0 -> h1
+
+ ushr $T0.2d,$ACC4,#26
+ xtn $H4,$ACC4
+ ushr $T1.2d,$ACC1,#26
+ xtn $H1,$ACC1
+ bic $H4,#0xfc,lsl#24
+ add $ACC2,$ACC2,$T1.2d // h1 -> h2
+
+ add $ACC0,$ACC0,$T0.2d
+ shl $T0.2d,$T0.2d,#2
+ shrn $T1.2s,$ACC2,#26
+ xtn $H2,$ACC2
+ add $ACC0,$ACC0,$T0.2d // h4 -> h0
+ bic $H1,#0xfc,lsl#24
+ add $H3,$H3,$T1.2s // h2 -> h3
+ bic $H2,#0xfc,lsl#24
+
+ shrn $T0.2s,$ACC0,#26
+ xtn $H0,$ACC0
+ ushr $T1.2s,$H3,#26
+ bic $H3,#0xfc,lsl#24
+ bic $H0,#0xfc,lsl#24
+ add $H1,$H1,$T0.2s // h0 -> h1
+ add $H4,$H4,$T1.2s // h3 -> h4
+
+ b.hi .Loop_neon
+
+.Lskip_loop:
+ dup $IN23_2,${IN23_2}[0]
+ add $IN01_2,$IN01_2,$H2
+
+ ////////////////////////////////////////////////////////////////
+ // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ adds $len,$len,#32
+ b.ne .Long_tail
+
+ dup $IN23_2,${IN01_2}[0]
+ add $IN23_0,$IN01_0,$H0
+ add $IN23_3,$IN01_3,$H3
+ add $IN23_1,$IN01_1,$H1
+ add $IN23_4,$IN01_4,$H4
+
+.Long_tail:
+ dup $IN23_0,${IN23_0}[0]
+ umull2 $ACC0,$IN23_2,${S3}
+ umull2 $ACC3,$IN23_2,${R1}
+ umull2 $ACC4,$IN23_2,${R2}
+ umull2 $ACC2,$IN23_2,${R0}
+ umull2 $ACC1,$IN23_2,${S4}
+
+ dup $IN23_1,${IN23_1}[0]
+ umlal2 $ACC0,$IN23_0,${R0}
+ umlal2 $ACC2,$IN23_0,${R2}
+ umlal2 $ACC3,$IN23_0,${R3}
+ umlal2 $ACC4,$IN23_0,${R4}
+ umlal2 $ACC1,$IN23_0,${R1}
+
+ dup $IN23_3,${IN23_3}[0]
+ umlal2 $ACC0,$IN23_1,${S4}
+ umlal2 $ACC3,$IN23_1,${R2}
+ umlal2 $ACC2,$IN23_1,${R1}
+ umlal2 $ACC4,$IN23_1,${R3}
+ umlal2 $ACC1,$IN23_1,${R0}
+
+ dup $IN23_4,${IN23_4}[0]
+ umlal2 $ACC3,$IN23_3,${R0}
+ umlal2 $ACC4,$IN23_3,${R1}
+ umlal2 $ACC0,$IN23_3,${S2}
+ umlal2 $ACC1,$IN23_3,${S3}
+ umlal2 $ACC2,$IN23_3,${S4}
+
+ umlal2 $ACC3,$IN23_4,${S4}
+ umlal2 $ACC0,$IN23_4,${S1}
+ umlal2 $ACC4,$IN23_4,${R0}
+ umlal2 $ACC1,$IN23_4,${S2}
+ umlal2 $ACC2,$IN23_4,${S3}
+
+ b.eq .Lshort_tail
+
+ ////////////////////////////////////////////////////////////////
+ // (hash+inp[0:1])*r^4:r^3 and accumulate
+
+ add $IN01_0,$IN01_0,$H0
+ umlal $ACC3,$IN01_2,${R1}
+ umlal $ACC0,$IN01_2,${S3}
+ umlal $ACC4,$IN01_2,${R2}
+ umlal $ACC1,$IN01_2,${S4}
+ umlal $ACC2,$IN01_2,${R0}
+
+ add $IN01_1,$IN01_1,$H1
+ umlal $ACC3,$IN01_0,${R3}
+ umlal $ACC0,$IN01_0,${R0}
+ umlal $ACC4,$IN01_0,${R4}
+ umlal $ACC1,$IN01_0,${R1}
+ umlal $ACC2,$IN01_0,${R2}
+
+ add $IN01_3,$IN01_3,$H3
+ umlal $ACC3,$IN01_1,${R2}
+ umlal $ACC0,$IN01_1,${S4}
+ umlal $ACC4,$IN01_1,${R3}
+ umlal $ACC1,$IN01_1,${R0}
+ umlal $ACC2,$IN01_1,${R1}
+
+ add $IN01_4,$IN01_4,$H4
+ umlal $ACC3,$IN01_3,${R0}
+ umlal $ACC0,$IN01_3,${S2}
+ umlal $ACC4,$IN01_3,${R1}
+ umlal $ACC1,$IN01_3,${S3}
+ umlal $ACC2,$IN01_3,${S4}
+
+ umlal $ACC3,$IN01_4,${S4}
+ umlal $ACC0,$IN01_4,${S1}
+ umlal $ACC4,$IN01_4,${R0}
+ umlal $ACC1,$IN01_4,${S2}
+ umlal $ACC2,$IN01_4,${S3}
+
+.Lshort_tail:
+ ////////////////////////////////////////////////////////////////
+ // horizontal add
+
+ addp $ACC3,$ACC3,$ACC3
+ ldp d8,d9,[sp,#16] // meet ABI requirements
+ addp $ACC0,$ACC0,$ACC0
+ ldp d10,d11,[sp,#32]
+ addp $ACC4,$ACC4,$ACC4
+ ldp d12,d13,[sp,#48]
+ addp $ACC1,$ACC1,$ACC1
+ ldp d14,d15,[sp,#64]
+ addp $ACC2,$ACC2,$ACC2
+ ldr x30,[sp,#8]
+ .inst 0xd50323bf // autiasp
+
+ ////////////////////////////////////////////////////////////////
+ // lazy reduction, but without narrowing
+
+ ushr $T0.2d,$ACC3,#26
+ and $ACC3,$ACC3,$MASK.2d
+ ushr $T1.2d,$ACC0,#26
+ and $ACC0,$ACC0,$MASK.2d
+
+ add $ACC4,$ACC4,$T0.2d // h3 -> h4
+ add $ACC1,$ACC1,$T1.2d // h0 -> h1
+
+ ushr $T0.2d,$ACC4,#26
+ and $ACC4,$ACC4,$MASK.2d
+ ushr $T1.2d,$ACC1,#26
+ and $ACC1,$ACC1,$MASK.2d
+ add $ACC2,$ACC2,$T1.2d // h1 -> h2
+
+ add $ACC0,$ACC0,$T0.2d
+ shl $T0.2d,$T0.2d,#2
+ ushr $T1.2d,$ACC2,#26
+ and $ACC2,$ACC2,$MASK.2d
+ add $ACC0,$ACC0,$T0.2d // h4 -> h0
+ add $ACC3,$ACC3,$T1.2d // h2 -> h3
+
+ ushr $T0.2d,$ACC0,#26
+ and $ACC0,$ACC0,$MASK.2d
+ ushr $T1.2d,$ACC3,#26
+ and $ACC3,$ACC3,$MASK.2d
+ add $ACC1,$ACC1,$T0.2d // h0 -> h1
+ add $ACC4,$ACC4,$T1.2d // h3 -> h4
+
+ ////////////////////////////////////////////////////////////////
+ // write the result, can be partially reduced
+
+ st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
+ mov x4,#1
+ st1 {$ACC4}[0],[$ctx]
+ str x4,[$ctx,#8] // set is_base2_26
+
+ ldr x29,[sp],#80
+ ret
+.size poly1305_blocks_neon,.-poly1305_blocks_neon
+
+.align 5
+.Lzeros:
+.long 0,0,0,0,0,0,0,0
+.asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm"
+.align 2
+#if !defined(__KERNEL__) && !defined(_WIN64)
+.comm OPENSSL_armcap_P,4,4
+.hidden OPENSSL_armcap_P
+#endif
+___
+
+foreach (split("\n",$code)) {
+ s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
+ s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
+ (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
+ (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
+ (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
+ (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
+ (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
+
+ s/\.[124]([sd])\[/.$1\[/;
+ s/w#x([0-9]+)/w$1/g;
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/arch/arm64/crypto/poly1305-core.S_shipped b/arch/arm64/crypto/poly1305-core.S_shipped
new file mode 100644
index 000000000000..8d1c4e420ccd
--- /dev/null
+++ b/arch/arm64/crypto/poly1305-core.S_shipped
@@ -0,0 +1,835 @@
+#ifndef __KERNEL__
+# include "arm_arch.h"
+.extern OPENSSL_armcap_P
+#endif
+
+.text
+
+// forward "declarations" are required for Apple
+.globl poly1305_blocks
+.globl poly1305_emit
+
+.globl poly1305_init
+.type poly1305_init,%function
+.align 5
+poly1305_init:
+ cmp x1,xzr
+ stp xzr,xzr,[x0] // zero hash value
+ stp xzr,xzr,[x0,#16] // [along with is_base2_26]
+
+ csel x0,xzr,x0,eq
+ b.eq .Lno_key
+
+#ifndef __KERNEL__
+ adrp x17,OPENSSL_armcap_P
+ ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
+#endif
+
+ ldp x7,x8,[x1] // load key
+ mov x9,#0xfffffffc0fffffff
+ movk x9,#0x0fff,lsl#48
+#ifdef __AARCH64EB__
+ rev x7,x7 // flip bytes
+ rev x8,x8
+#endif
+ and x7,x7,x9 // &=0ffffffc0fffffff
+ and x9,x9,#-4
+ and x8,x8,x9 // &=0ffffffc0ffffffc
+ mov w9,#-1
+ stp x7,x8,[x0,#32] // save key value
+ str w9,[x0,#48] // impossible key power value
+
+#ifndef __KERNEL__
+ tst w17,#ARMV7_NEON
+
+ adr x12,.Lpoly1305_blocks
+ adr x7,.Lpoly1305_blocks_neon
+ adr x13,.Lpoly1305_emit
+
+ csel x12,x12,x7,eq
+
+# ifdef __ILP32__
+ stp w12,w13,[x2]
+# else
+ stp x12,x13,[x2]
+# endif
+#endif
+ mov x0,#1
+.Lno_key:
+ ret
+.size poly1305_init,.-poly1305_init
+
+.type poly1305_blocks,%function
+.align 5
+poly1305_blocks:
+.Lpoly1305_blocks:
+ ands x2,x2,#-16
+ b.eq .Lno_data
+
+ ldp x4,x5,[x0] // load hash value
+ ldp x6,x17,[x0,#16] // [along with is_base2_26]
+ ldp x7,x8,[x0,#32] // load key value
+
+#ifdef __AARCH64EB__
+ lsr x12,x4,#32
+ mov w13,w4
+ lsr x14,x5,#32
+ mov w15,w5
+ lsr x16,x6,#32
+#else
+ mov w12,w4
+ lsr x13,x4,#32
+ mov w14,w5
+ lsr x15,x5,#32
+ mov w16,w6
+#endif
+
+ add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
+ lsr x13,x14,#12
+ adds x12,x12,x14,lsl#52
+ add x13,x13,x15,lsl#14
+ adc x13,x13,xzr
+ lsr x14,x16,#24
+ adds x13,x13,x16,lsl#40
+ adc x14,x14,xzr
+
+ cmp x17,#0 // is_base2_26?
+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
+ csel x4,x4,x12,eq // choose between radixes
+ csel x5,x5,x13,eq
+ csel x6,x6,x14,eq
+
+.Loop:
+ ldp x10,x11,[x1],#16 // load input
+ sub x2,x2,#16
+#ifdef __AARCH64EB__
+ rev x10,x10
+ rev x11,x11
+#endif
+ adds x4,x4,x10 // accumulate input
+ adcs x5,x5,x11
+
+ mul x12,x4,x7 // h0*r0
+ adc x6,x6,x3
+ umulh x13,x4,x7
+
+ mul x10,x5,x9 // h1*5*r1
+ umulh x11,x5,x9
+
+ adds x12,x12,x10
+ mul x10,x4,x8 // h0*r1
+ adc x13,x13,x11
+ umulh x14,x4,x8
+
+ adds x13,x13,x10
+ mul x10,x5,x7 // h1*r0
+ adc x14,x14,xzr
+ umulh x11,x5,x7
+
+ adds x13,x13,x10
+ mul x10,x6,x9 // h2*5*r1
+ adc x14,x14,x11
+ mul x11,x6,x7 // h2*r0
+
+ adds x13,x13,x10
+ adc x14,x14,x11
+
+ and x10,x14,#-4 // final reduction
+ and x6,x14,#3
+ add x10,x10,x14,lsr#2
+ adds x4,x12,x10
+ adcs x5,x13,xzr
+ adc x6,x6,xzr
+
+ cbnz x2,.Loop
+
+ stp x4,x5,[x0] // store hash value
+ stp x6,xzr,[x0,#16] // [and clear is_base2_26]
+
+.Lno_data:
+ ret
+.size poly1305_blocks,.-poly1305_blocks
+
+.type poly1305_emit,%function
+.align 5
+poly1305_emit:
+.Lpoly1305_emit:
+ ldp x4,x5,[x0] // load hash base 2^64
+ ldp x6,x7,[x0,#16] // [along with is_base2_26]
+ ldp x10,x11,[x2] // load nonce
+
+#ifdef __AARCH64EB__
+ lsr x12,x4,#32
+ mov w13,w4
+ lsr x14,x5,#32
+ mov w15,w5
+ lsr x16,x6,#32
+#else
+ mov w12,w4
+ lsr x13,x4,#32
+ mov w14,w5
+ lsr x15,x5,#32
+ mov w16,w6
+#endif
+
+ add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
+ lsr x13,x14,#12
+ adds x12,x12,x14,lsl#52
+ add x13,x13,x15,lsl#14
+ adc x13,x13,xzr
+ lsr x14,x16,#24
+ adds x13,x13,x16,lsl#40
+ adc x14,x14,xzr
+
+ cmp x7,#0 // is_base2_26?
+ csel x4,x4,x12,eq // choose between radixes
+ csel x5,x5,x13,eq
+ csel x6,x6,x14,eq
+
+ adds x12,x4,#5 // compare to modulus
+ adcs x13,x5,xzr
+ adc x14,x6,xzr
+
+ tst x14,#-4 // see if it's carried/borrowed
+
+ csel x4,x4,x12,eq
+ csel x5,x5,x13,eq
+
+#ifdef __AARCH64EB__
+ ror x10,x10,#32 // flip nonce words
+ ror x11,x11,#32
+#endif
+ adds x4,x4,x10 // accumulate nonce
+ adc x5,x5,x11
+#ifdef __AARCH64EB__
+ rev x4,x4 // flip output bytes
+ rev x5,x5
+#endif
+ stp x4,x5,[x1] // write result
+
+ ret
+.size poly1305_emit,.-poly1305_emit
+.type poly1305_mult,%function
+.align 5
+poly1305_mult:
+ mul x12,x4,x7 // h0*r0
+ umulh x13,x4,x7
+
+ mul x10,x5,x9 // h1*5*r1
+ umulh x11,x5,x9
+
+ adds x12,x12,x10
+ mul x10,x4,x8 // h0*r1
+ adc x13,x13,x11
+ umulh x14,x4,x8
+
+ adds x13,x13,x10
+ mul x10,x5,x7 // h1*r0
+ adc x14,x14,xzr
+ umulh x11,x5,x7
+
+ adds x13,x13,x10
+ mul x10,x6,x9 // h2*5*r1
+ adc x14,x14,x11
+ mul x11,x6,x7 // h2*r0
+
+ adds x13,x13,x10
+ adc x14,x14,x11
+
+ and x10,x14,#-4 // final reduction
+ and x6,x14,#3
+ add x10,x10,x14,lsr#2
+ adds x4,x12,x10
+ adcs x5,x13,xzr
+ adc x6,x6,xzr
+
+ ret
+.size poly1305_mult,.-poly1305_mult
+
+.type poly1305_splat,%function
+.align 4
+poly1305_splat:
+ and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x13,x4,#26,#26
+ extr x14,x5,x4,#52
+ and x14,x14,#0x03ffffff
+ ubfx x15,x5,#14,#26
+ extr x16,x6,x5,#40
+
+ str w12,[x0,#16*0] // r0
+ add w12,w13,w13,lsl#2 // r1*5
+ str w13,[x0,#16*1] // r1
+ add w13,w14,w14,lsl#2 // r2*5
+ str w12,[x0,#16*2] // s1
+ str w14,[x0,#16*3] // r2
+ add w14,w15,w15,lsl#2 // r3*5
+ str w13,[x0,#16*4] // s2
+ str w15,[x0,#16*5] // r3
+ add w15,w16,w16,lsl#2 // r4*5
+ str w14,[x0,#16*6] // s3
+ str w16,[x0,#16*7] // r4
+ str w15,[x0,#16*8] // s4
+
+ ret
+.size poly1305_splat,.-poly1305_splat
+
+#ifdef __KERNEL__
+.globl poly1305_blocks_neon
+#endif
+.type poly1305_blocks_neon,%function
+.align 5
+poly1305_blocks_neon:
+.Lpoly1305_blocks_neon:
+ ldr x17,[x0,#24]
+ cmp x2,#128
+ b.lo .Lpoly1305_blocks
+
+ .inst 0xd503233f // paciasp
+ stp x29,x30,[sp,#-80]!
+ add x29,sp,#0
+
+ stp d8,d9,[sp,#16] // meet ABI requirements
+ stp d10,d11,[sp,#32]
+ stp d12,d13,[sp,#48]
+ stp d14,d15,[sp,#64]
+
+ cbz x17,.Lbase2_64_neon
+
+ ldp w10,w11,[x0] // load hash value base 2^26
+ ldp w12,w13,[x0,#8]
+ ldr w14,[x0,#16]
+
+ tst x2,#31
+ b.eq .Leven_neon
+
+ ldp x7,x8,[x0,#32] // load key value
+
+ add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
+ lsr x5,x12,#12
+ adds x4,x4,x12,lsl#52
+ add x5,x5,x13,lsl#14
+ adc x5,x5,xzr
+ lsr x6,x14,#24
+ adds x5,x5,x14,lsl#40
+ adc x14,x6,xzr // can be partially reduced...
+
+ ldp x12,x13,[x1],#16 // load input
+ sub x2,x2,#16
+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
+
+#ifdef __AARCH64EB__
+ rev x12,x12
+ rev x13,x13
+#endif
+ adds x4,x4,x12 // accumulate input
+ adcs x5,x5,x13
+ adc x6,x6,x3
+
+ bl poly1305_mult
+
+ and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x11,x4,#26,#26
+ extr x12,x5,x4,#52
+ and x12,x12,#0x03ffffff
+ ubfx x13,x5,#14,#26
+ extr x14,x6,x5,#40
+
+ b .Leven_neon
+
+.align 4
+.Lbase2_64_neon:
+ ldp x7,x8,[x0,#32] // load key value
+
+ ldp x4,x5,[x0] // load hash value base 2^64
+ ldr x6,[x0,#16]
+
+ tst x2,#31
+ b.eq .Linit_neon
+
+ ldp x12,x13,[x1],#16 // load input
+ sub x2,x2,#16
+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
+#ifdef __AARCH64EB__
+ rev x12,x12
+ rev x13,x13
+#endif
+ adds x4,x4,x12 // accumulate input
+ adcs x5,x5,x13
+ adc x6,x6,x3
+
+ bl poly1305_mult
+
+.Linit_neon:
+ ldr w17,[x0,#48] // first table element
+ and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x11,x4,#26,#26
+ extr x12,x5,x4,#52
+ and x12,x12,#0x03ffffff
+ ubfx x13,x5,#14,#26
+ extr x14,x6,x5,#40
+
+ cmp w17,#-1 // is value impossible?
+ b.ne .Leven_neon
+
+ fmov d24,x10
+ fmov d25,x11
+ fmov d26,x12
+ fmov d27,x13
+ fmov d28,x14
+
+ ////////////////////////////////// initialize r^n table
+ mov x4,x7 // r^1
+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
+ mov x5,x8
+ mov x6,xzr
+ add x0,x0,#48+12
+ bl poly1305_splat
+
+ bl poly1305_mult // r^2
+ sub x0,x0,#4
+ bl poly1305_splat
+
+ bl poly1305_mult // r^3
+ sub x0,x0,#4
+ bl poly1305_splat
+
+ bl poly1305_mult // r^4
+ sub x0,x0,#4
+ bl poly1305_splat
+ sub x0,x0,#48 // restore original x0
+ b .Ldo_neon
+
+.align 4
+.Leven_neon:
+ fmov d24,x10
+ fmov d25,x11
+ fmov d26,x12
+ fmov d27,x13
+ fmov d28,x14
+
+.Ldo_neon:
+ ldp x8,x12,[x1,#32] // inp[2:3]
+ subs x2,x2,#64
+ ldp x9,x13,[x1,#48]
+ add x16,x1,#96
+ adr x17,.Lzeros
+
+ lsl x3,x3,#24
+ add x15,x0,#48
+
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ and x5,x9,#0x03ffffff
+ ubfx x6,x8,#26,#26
+ ubfx x7,x9,#26,#26
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ extr x8,x12,x8,#52
+ extr x9,x13,x9,#52
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ fmov d14,x4
+ and x8,x8,#0x03ffffff
+ and x9,x9,#0x03ffffff
+ ubfx x10,x12,#14,#26
+ ubfx x11,x13,#14,#26
+ add x12,x3,x12,lsr#40
+ add x13,x3,x13,lsr#40
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ fmov d15,x6
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ fmov d16,x8
+ fmov d17,x10
+ fmov d18,x12
+
+ ldp x8,x12,[x1],#16 // inp[0:1]
+ ldp x9,x13,[x1],#48
+
+ ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
+ ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
+ ld1 {v8.4s},[x15]
+
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ and x5,x9,#0x03ffffff
+ ubfx x6,x8,#26,#26
+ ubfx x7,x9,#26,#26
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ extr x8,x12,x8,#52
+ extr x9,x13,x9,#52
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ fmov d9,x4
+ and x8,x8,#0x03ffffff
+ and x9,x9,#0x03ffffff
+ ubfx x10,x12,#14,#26
+ ubfx x11,x13,#14,#26
+ add x12,x3,x12,lsr#40
+ add x13,x3,x13,lsr#40
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ fmov d10,x6
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ movi v31.2d,#-1
+ fmov d11,x8
+ fmov d12,x10
+ fmov d13,x12
+ ushr v31.2d,v31.2d,#38
+
+ b.ls .Lskip_loop
+
+.align 4
+.Loop_neon:
+ ////////////////////////////////////////////////////////////////
+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ // ___________________/
+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ // ___________________/ ____________________/
+ //
+ // Note that we start with inp[2:3]*r^2. This is because it
+ // doesn't depend on reduction in previous iteration.
+ ////////////////////////////////////////////////////////////////
+ // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
+ // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
+ // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
+ // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
+ // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
+
+ subs x2,x2,#64
+ umull v23.2d,v14.2s,v7.s[2]
+ csel x16,x17,x16,lo
+ umull v22.2d,v14.2s,v5.s[2]
+ umull v21.2d,v14.2s,v3.s[2]
+ ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
+ umull v20.2d,v14.2s,v1.s[2]
+ ldp x9,x13,[x16],#48
+ umull v19.2d,v14.2s,v0.s[2]
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+
+ umlal v23.2d,v15.2s,v5.s[2]
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ umlal v22.2d,v15.2s,v3.s[2]
+ and x5,x9,#0x03ffffff
+ umlal v21.2d,v15.2s,v1.s[2]
+ ubfx x6,x8,#26,#26
+ umlal v20.2d,v15.2s,v0.s[2]
+ ubfx x7,x9,#26,#26
+ umlal v19.2d,v15.2s,v8.s[2]
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+
+ umlal v23.2d,v16.2s,v3.s[2]
+ extr x8,x12,x8,#52
+ umlal v22.2d,v16.2s,v1.s[2]
+ extr x9,x13,x9,#52
+ umlal v21.2d,v16.2s,v0.s[2]
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ umlal v20.2d,v16.2s,v8.s[2]
+ fmov d14,x4
+ umlal v19.2d,v16.2s,v6.s[2]
+ and x8,x8,#0x03ffffff
+
+ umlal v23.2d,v17.2s,v1.s[2]
+ and x9,x9,#0x03ffffff
+ umlal v22.2d,v17.2s,v0.s[2]
+ ubfx x10,x12,#14,#26
+ umlal v21.2d,v17.2s,v8.s[2]
+ ubfx x11,x13,#14,#26
+ umlal v20.2d,v17.2s,v6.s[2]
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ umlal v19.2d,v17.2s,v4.s[2]
+ fmov d15,x6
+
+ add v11.2s,v11.2s,v26.2s
+ add x12,x3,x12,lsr#40
+ umlal v23.2d,v18.2s,v0.s[2]
+ add x13,x3,x13,lsr#40
+ umlal v22.2d,v18.2s,v8.s[2]
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ umlal v21.2d,v18.2s,v6.s[2]
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ umlal v20.2d,v18.2s,v4.s[2]
+ fmov d16,x8
+ umlal v19.2d,v18.2s,v2.s[2]
+ fmov d17,x10
+
+ ////////////////////////////////////////////////////////////////
+ // (hash+inp[0:1])*r^4 and accumulate
+
+ add v9.2s,v9.2s,v24.2s
+ fmov d18,x12
+ umlal v22.2d,v11.2s,v1.s[0]
+ ldp x8,x12,[x1],#16 // inp[0:1]
+ umlal v19.2d,v11.2s,v6.s[0]
+ ldp x9,x13,[x1],#48
+ umlal v23.2d,v11.2s,v3.s[0]
+ umlal v20.2d,v11.2s,v8.s[0]
+ umlal v21.2d,v11.2s,v0.s[0]
+#ifdef __AARCH64EB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+
+ add v10.2s,v10.2s,v25.2s
+ umlal v22.2d,v9.2s,v5.s[0]
+ umlal v23.2d,v9.2s,v7.s[0]
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ umlal v21.2d,v9.2s,v3.s[0]
+ and x5,x9,#0x03ffffff
+ umlal v19.2d,v9.2s,v0.s[0]
+ ubfx x6,x8,#26,#26
+ umlal v20.2d,v9.2s,v1.s[0]
+ ubfx x7,x9,#26,#26
+
+ add v12.2s,v12.2s,v27.2s
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ umlal v22.2d,v10.2s,v3.s[0]
+ extr x8,x12,x8,#52
+ umlal v23.2d,v10.2s,v5.s[0]
+ extr x9,x13,x9,#52
+ umlal v19.2d,v10.2s,v8.s[0]
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ umlal v21.2d,v10.2s,v1.s[0]
+ fmov d9,x4
+ umlal v20.2d,v10.2s,v0.s[0]
+ and x8,x8,#0x03ffffff
+
+ add v13.2s,v13.2s,v28.2s
+ and x9,x9,#0x03ffffff
+ umlal v22.2d,v12.2s,v0.s[0]
+ ubfx x10,x12,#14,#26
+ umlal v19.2d,v12.2s,v4.s[0]
+ ubfx x11,x13,#14,#26
+ umlal v23.2d,v12.2s,v1.s[0]
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ umlal v20.2d,v12.2s,v6.s[0]
+ fmov d10,x6
+ umlal v21.2d,v12.2s,v8.s[0]
+ add x12,x3,x12,lsr#40
+
+ umlal v22.2d,v13.2s,v8.s[0]
+ add x13,x3,x13,lsr#40
+ umlal v19.2d,v13.2s,v2.s[0]
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ umlal v23.2d,v13.2s,v0.s[0]
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ umlal v20.2d,v13.2s,v4.s[0]
+ fmov d11,x8
+ umlal v21.2d,v13.2s,v6.s[0]
+ fmov d12,x10
+ fmov d13,x12
+
+ /////////////////////////////////////////////////////////////////
+ // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ // and P. Schwabe
+ //
+ // [see discussion in poly1305-armv4 module]
+
+ ushr v29.2d,v22.2d,#26
+ xtn v27.2s,v22.2d
+ ushr v30.2d,v19.2d,#26
+ and v19.16b,v19.16b,v31.16b
+ add v23.2d,v23.2d,v29.2d // h3 -> h4
+ bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
+ add v20.2d,v20.2d,v30.2d // h0 -> h1
+
+ ushr v29.2d,v23.2d,#26
+ xtn v28.2s,v23.2d
+ ushr v30.2d,v20.2d,#26
+ xtn v25.2s,v20.2d
+ bic v28.2s,#0xfc,lsl#24
+ add v21.2d,v21.2d,v30.2d // h1 -> h2
+
+ add v19.2d,v19.2d,v29.2d
+ shl v29.2d,v29.2d,#2
+ shrn v30.2s,v21.2d,#26
+ xtn v26.2s,v21.2d
+ add v19.2d,v19.2d,v29.2d // h4 -> h0
+ bic v25.2s,#0xfc,lsl#24
+ add v27.2s,v27.2s,v30.2s // h2 -> h3
+ bic v26.2s,#0xfc,lsl#24
+
+ shrn v29.2s,v19.2d,#26
+ xtn v24.2s,v19.2d
+ ushr v30.2s,v27.2s,#26
+ bic v27.2s,#0xfc,lsl#24
+ bic v24.2s,#0xfc,lsl#24
+ add v25.2s,v25.2s,v29.2s // h0 -> h1
+ add v28.2s,v28.2s,v30.2s // h3 -> h4
+
+ b.hi .Loop_neon
+
+.Lskip_loop:
+ dup v16.2d,v16.d[0]
+ add v11.2s,v11.2s,v26.2s
+
+ ////////////////////////////////////////////////////////////////
+ // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ adds x2,x2,#32
+ b.ne .Long_tail
+
+ dup v16.2d,v11.d[0]
+ add v14.2s,v9.2s,v24.2s
+ add v17.2s,v12.2s,v27.2s
+ add v15.2s,v10.2s,v25.2s
+ add v18.2s,v13.2s,v28.2s
+
+.Long_tail:
+ dup v14.2d,v14.d[0]
+ umull2 v19.2d,v16.4s,v6.4s
+ umull2 v22.2d,v16.4s,v1.4s
+ umull2 v23.2d,v16.4s,v3.4s
+ umull2 v21.2d,v16.4s,v0.4s
+ umull2 v20.2d,v16.4s,v8.4s
+
+ dup v15.2d,v15.d[0]
+ umlal2 v19.2d,v14.4s,v0.4s
+ umlal2 v21.2d,v14.4s,v3.4s
+ umlal2 v22.2d,v14.4s,v5.4s
+ umlal2 v23.2d,v14.4s,v7.4s
+ umlal2 v20.2d,v14.4s,v1.4s
+
+ dup v17.2d,v17.d[0]
+ umlal2 v19.2d,v15.4s,v8.4s
+ umlal2 v22.2d,v15.4s,v3.4s
+ umlal2 v21.2d,v15.4s,v1.4s
+ umlal2 v23.2d,v15.4s,v5.4s
+ umlal2 v20.2d,v15.4s,v0.4s
+
+ dup v18.2d,v18.d[0]
+ umlal2 v22.2d,v17.4s,v0.4s
+ umlal2 v23.2d,v17.4s,v1.4s
+ umlal2 v19.2d,v17.4s,v4.4s
+ umlal2 v20.2d,v17.4s,v6.4s
+ umlal2 v21.2d,v17.4s,v8.4s
+
+ umlal2 v22.2d,v18.4s,v8.4s
+ umlal2 v19.2d,v18.4s,v2.4s
+ umlal2 v23.2d,v18.4s,v0.4s
+ umlal2 v20.2d,v18.4s,v4.4s
+ umlal2 v21.2d,v18.4s,v6.4s
+
+ b.eq .Lshort_tail
+
+ ////////////////////////////////////////////////////////////////
+ // (hash+inp[0:1])*r^4:r^3 and accumulate
+
+ add v9.2s,v9.2s,v24.2s
+ umlal v22.2d,v11.2s,v1.2s
+ umlal v19.2d,v11.2s,v6.2s
+ umlal v23.2d,v11.2s,v3.2s
+ umlal v20.2d,v11.2s,v8.2s
+ umlal v21.2d,v11.2s,v0.2s
+
+ add v10.2s,v10.2s,v25.2s
+ umlal v22.2d,v9.2s,v5.2s
+ umlal v19.2d,v9.2s,v0.2s
+ umlal v23.2d,v9.2s,v7.2s
+ umlal v20.2d,v9.2s,v1.2s
+ umlal v21.2d,v9.2s,v3.2s
+
+ add v12.2s,v12.2s,v27.2s
+ umlal v22.2d,v10.2s,v3.2s
+ umlal v19.2d,v10.2s,v8.2s
+ umlal v23.2d,v10.2s,v5.2s
+ umlal v20.2d,v10.2s,v0.2s
+ umlal v21.2d,v10.2s,v1.2s
+
+ add v13.2s,v13.2s,v28.2s
+ umlal v22.2d,v12.2s,v0.2s
+ umlal v19.2d,v12.2s,v4.2s
+ umlal v23.2d,v12.2s,v1.2s
+ umlal v20.2d,v12.2s,v6.2s
+ umlal v21.2d,v12.2s,v8.2s
+
+ umlal v22.2d,v13.2s,v8.2s
+ umlal v19.2d,v13.2s,v2.2s
+ umlal v23.2d,v13.2s,v0.2s
+ umlal v20.2d,v13.2s,v4.2s
+ umlal v21.2d,v13.2s,v6.2s
+
+.Lshort_tail:
+ ////////////////////////////////////////////////////////////////
+ // horizontal add
+
+ addp v22.2d,v22.2d,v22.2d
+ ldp d8,d9,[sp,#16] // meet ABI requirements
+ addp v19.2d,v19.2d,v19.2d
+ ldp d10,d11,[sp,#32]
+ addp v23.2d,v23.2d,v23.2d
+ ldp d12,d13,[sp,#48]
+ addp v20.2d,v20.2d,v20.2d
+ ldp d14,d15,[sp,#64]
+ addp v21.2d,v21.2d,v21.2d
+ ldr x30,[sp,#8]
+ .inst 0xd50323bf // autiasp
+
+ ////////////////////////////////////////////////////////////////
+ // lazy reduction, but without narrowing
+
+ ushr v29.2d,v22.2d,#26
+ and v22.16b,v22.16b,v31.16b
+ ushr v30.2d,v19.2d,#26
+ and v19.16b,v19.16b,v31.16b
+
+ add v23.2d,v23.2d,v29.2d // h3 -> h4
+ add v20.2d,v20.2d,v30.2d // h0 -> h1
+
+ ushr v29.2d,v23.2d,#26
+ and v23.16b,v23.16b,v31.16b
+ ushr v30.2d,v20.2d,#26
+ and v20.16b,v20.16b,v31.16b
+ add v21.2d,v21.2d,v30.2d // h1 -> h2
+
+ add v19.2d,v19.2d,v29.2d
+ shl v29.2d,v29.2d,#2
+ ushr v30.2d,v21.2d,#26
+ and v21.16b,v21.16b,v31.16b
+ add v19.2d,v19.2d,v29.2d // h4 -> h0
+ add v22.2d,v22.2d,v30.2d // h2 -> h3
+
+ ushr v29.2d,v19.2d,#26
+ and v19.16b,v19.16b,v31.16b
+ ushr v30.2d,v22.2d,#26
+ and v22.16b,v22.16b,v31.16b
+ add v20.2d,v20.2d,v29.2d // h0 -> h1
+ add v23.2d,v23.2d,v30.2d // h3 -> h4
+
+ ////////////////////////////////////////////////////////////////
+ // write the result, can be partially reduced
+
+ st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
+ mov x4,#1
+ st1 {v23.s}[0],[x0]
+ str x4,[x0,#8] // set is_base2_26
+
+ ldr x29,[sp],#80
+ ret
+.size poly1305_blocks_neon,.-poly1305_blocks_neon
+
+.align 5
+.Lzeros:
+.long 0,0,0,0,0,0,0,0
+.asciz "Poly1305 for ARMv8, CRYPTOGAMS by @dot-asm"
+.align 2
+#if !defined(__KERNEL__) && !defined(_WIN64)
+.comm OPENSSL_armcap_P,4,4
+.hidden OPENSSL_armcap_P
+#endif
diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..dd843d0ee83a
--- /dev/null
+++ b/arch/arm64/crypto/poly1305-glue.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
+#include <crypto/internal/simd.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/jump_label.h>
+#include <linux/module.h>
+
+asmlinkage void poly1305_init_arm64(void *state, const u8 *key);
+asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_emit(void *state, __le32 *digest, const u32 *nonce);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+{
+ poly1305_init_arm64(&dctx->h, key);
+ dctx->s[0] = get_unaligned_le32(key + 16);
+ dctx->s[1] = get_unaligned_le32(key + 20);
+ dctx->s[2] = get_unaligned_le32(key + 24);
+ dctx->s[3] = get_unaligned_le32(key + 28);
+ dctx->buflen = 0;
+}
+EXPORT_SYMBOL(poly1305_init_arch);
+
+static int neon_poly1305_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->buflen = 0;
+ dctx->rset = 0;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
+ u32 len, u32 hibit, bool do_neon)
+{
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset) {
+ poly1305_init_arch(dctx, src);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (len >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ if (len < POLY1305_BLOCK_SIZE)
+ return;
+ }
+
+ len &= ~(POLY1305_BLOCK_SIZE - 1);
+
+ if (static_branch_likely(&have_neon) && likely(do_neon))
+ poly1305_blocks_neon(&dctx->h, src, len, hibit);
+ else
+ poly1305_blocks(&dctx->h, src, len, hibit);
+}
+
+static void neon_poly1305_do_update(struct poly1305_desc_ctx *dctx,
+ const u8 *src, u32 len, bool do_neon)
+{
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ len -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ neon_poly1305_blocks(dctx, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1, false);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(len >= POLY1305_BLOCK_SIZE)) {
+ neon_poly1305_blocks(dctx, src, len, 1, do_neon);
+ src += round_down(len, POLY1305_BLOCK_SIZE);
+ len %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(len)) {
+ dctx->buflen = len;
+ memcpy(dctx->buf, src, len);
+ }
+}
+
+static int neon_poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ bool do_neon = crypto_simd_usable() && srclen > 128;
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (static_branch_likely(&have_neon) && do_neon)
+ kernel_neon_begin();
+ neon_poly1305_do_update(dctx, src, srclen, do_neon);
+ if (static_branch_likely(&have_neon) && do_neon)
+ kernel_neon_end();
+ return 0;
+}
+
+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
+ unsigned int nbytes)
+{
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
+
+ if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
+ kernel_neon_begin();
+ poly1305_blocks_neon(&dctx->h, src, len, 1);
+ kernel_neon_end();
+ } else {
+ poly1305_blocks(&dctx->h, src, len, 1);
+ }
+ src += len;
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ dctx->buflen = nbytes;
+ memcpy(dctx->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL(poly1305_update_arch);
+
+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
+{
+ __le32 digest[4];
+ u64 f = 0;
+
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit(&dctx->h, digest, dctx->s);
+
+ /* mac = (h + s) % (2^128) */
+ f = (f >> 32) + le32_to_cpu(digest[0]);
+ put_unaligned_le32(f, dst);
+ f = (f >> 32) + le32_to_cpu(digest[1]);
+ put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + le32_to_cpu(digest[2]);
+ put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + le32_to_cpu(digest[3]);
+ put_unaligned_le32(f, dst + 12);
+
+ *dctx = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL(poly1305_final_arch);
+
+static int neon_poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ poly1305_final_arch(dctx, dst);
+ return 0;
+}
+
+static struct shash_alg neon_poly1305_alg = {
+ .init = neon_poly1305_init,
+ .update = neon_poly1305_update,
+ .final = neon_poly1305_final,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+
+ .base.cra_name = "poly1305",
+ .base.cra_driver_name = "poly1305-neon",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = POLY1305_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+};
+
+static int __init neon_poly1305_mod_init(void)
+{
+ if (!cpu_have_named_feature(ASIMD))
+ return 0;
+
+ static_branch_enable(&have_neon);
+
+ return crypto_register_shash(&neon_poly1305_alg);
+}
+
+static void __exit neon_poly1305_mod_exit(void)
+{
+ if (cpu_have_named_feature(ASIMD))
+ crypto_unregister_shash(&neon_poly1305_alg);
+}
+
+module_init(neon_poly1305_mod_init);
+module_exit(neon_poly1305_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-neon");
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 5bf963830b17..f68a0e64482a 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -58,29 +58,4 @@ alternative_else_nop_endif
.endm
#endif
-/*
- * These macros are no-ops when UAO is present.
- */
- .macro uaccess_disable_not_uao, tmp1, tmp2
- uaccess_ttbr0_disable \tmp1, \tmp2
-alternative_if ARM64_ALT_PAN_NOT_UAO
- SET_PSTATE_PAN(1)
-alternative_else_nop_endif
- .endm
-
- .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
- uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
-alternative_if ARM64_ALT_PAN_NOT_UAO
- SET_PSTATE_PAN(0)
-alternative_else_nop_endif
- .endm
-
-/*
- * Remove the address tag from a virtual address, if present.
- */
- .macro untagged_addr, dst, addr
- sbfx \dst, \addr, #0, #56
- and \dst, \dst, \addr
- .endm
-
#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index e0e2b1946f42..7d9cc5ec4971 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -29,6 +29,18 @@
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+#define pmr_sync() \
+ do { \
+ extern struct static_key_false gic_pmr_sync; \
+ \
+ if (static_branch_unlikely(&gic_pmr_sync)) \
+ dsb(sy); \
+ } while(0)
+#else
+#define pmr_sync() do {} while (0)
+#endif
+
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 43da6dd29592..806e9dc2a852 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -11,6 +11,7 @@
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
+#define CTR_IMINLINE_MASK 0xf
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
@@ -18,7 +19,7 @@
#define CTR_DIC_SHIFT 29
#define CTR_CACHE_MINLINE_MASK \
- (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
+ (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index ac1dbca3d0cd..b92683871119 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -54,7 +54,9 @@
#define ARM64_WORKAROUND_1463225 44
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
+#define ARM64_WORKAROUND_1542419 47
+#define ARM64_WORKAROUND_1319367 48
-#define ARM64_NCAPS 47
+#define ARM64_NCAPS 49
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9cde5d2e768f..4261d55e8506 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -659,6 +659,20 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
default: return CONFIG_ARM64_PA_BITS;
}
}
+
+/* Check whether hardware update of the Access flag is supported */
+static inline bool cpu_has_hw_af(void)
+{
+ u64 mmfr1;
+
+ if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
+ return false;
+
+ mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ return cpuid_feature_extract_unsigned_field(mmfr1,
+ ID_AA64MMFR1_HADBS_SHIFT);
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 063c964af705..72acd2db167f 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -8,7 +8,9 @@
#include <linux/irqflags.h>
#include <asm/arch_gicv3.h>
+#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/ptrace.h>
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
@@ -65,7 +67,7 @@ static inline void local_daif_restore(unsigned long flags)
if (system_uses_irq_prio_masking()) {
gic_write_pmr(GIC_PRIO_IRQON);
- dsb(sy);
+ pmr_sync();
}
} else if (system_uses_irq_prio_masking()) {
u64 pmr;
@@ -109,4 +111,19 @@ static inline void local_daif_restore(unsigned long flags)
trace_hardirqs_off();
}
+/*
+ * Called by synchronous exception handlers to restore the DAIF bits that were
+ * modified by taking an exception.
+ */
+static inline void local_daif_inherit(struct pt_regs *regs)
+{
+ unsigned long flags = regs->pstate & DAIF_MASK;
+
+ /*
+ * We can't use local_daif_restore(regs->pstate) here as
+ * system_has_prio_mask_debugging() won't restore the I bit if it can
+ * use the pmr instead.
+ */
+ write_sysreg(flags, daif);
+}
#endif
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index a17393ff6677..4d5f3b5f50cd 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -8,14 +8,15 @@
#define __ASM_EXCEPTION_H
#include <asm/esr.h>
+#include <asm/kprobes.h>
+#include <asm/ptrace.h>
#include <linux/interrupt.h>
-#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
-#define __exception_irq_entry __exception
+#define __exception_irq_entry __kprobes
#endif
static inline u32 disr_to_esr(u64 disr)
@@ -31,5 +32,22 @@ static inline u32 disr_to_esr(u64 disr)
}
asmlinkage void enter_from_user_mode(void);
+void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+void do_undefinstr(struct pt_regs *regs);
+asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+ struct pt_regs *regs);
+void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
+void do_sve_acc(unsigned int esr, struct pt_regs *regs);
+void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
+void do_sysinstr(unsigned int esr, struct pt_regs *regs);
+void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
+void do_cp15instr(unsigned int esr, struct pt_regs *regs);
+void el0_svc_handler(struct pt_regs *regs);
+void el0_svc_compat_handler(struct pt_regs *regs);
+void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index d48667b04c41..91fa4baa1a93 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -11,9 +11,20 @@
#include <asm/insn.h>
#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#else
#define MCOUNT_ADDR ((unsigned long)_mcount)
+#endif
+
+/* The BL at the callsite's adjusted rec->ip */
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
+#define FTRACE_PLT_IDX 0
+#define FTRACE_REGS_PLT_IDX 1
+#define NR_FTRACE_PLTS 2
+
/*
* Currently, gcc tends to save the link register after the local variables
* on the stack. This causes the max stack tracer to report the function
@@ -44,12 +55,24 @@ extern void return_to_handler(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
+ * Adjust addr to point at the BL in the callsite.
+ * See ftrace_init_nop() for the callsite sequence.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return addr + AARCH64_INSN_SIZE;
+ /*
* addr is the address of the mcount call instruction.
* recordmcount does the necessary offset calculation.
*/
return addr;
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
#define ftrace_return_address(n) return_address(n)
/*
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 39e7780bedd6..bb313dde58a4 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -440,6 +440,9 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type);
+u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant);
u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
enum aarch64_insn_variant variant,
enum aarch64_insn_register Rn,
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 1a59f0ed1ae3..aa4b6521ef14 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -6,6 +6,7 @@
#define __ASM_IRQFLAGS_H
#include <asm/alternative.h>
+#include <asm/barrier.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
@@ -34,14 +35,14 @@ static inline void arch_local_irq_enable(void)
}
asm volatile(ALTERNATIVE(
- "msr daifclr, #2 // arch_local_irq_enable\n"
- "nop",
- __msr_s(SYS_ICC_PMR_EL1, "%0")
- "dsb sy",
+ "msr daifclr, #2 // arch_local_irq_enable",
+ __msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
: "r" ((unsigned long) GIC_PRIO_IRQON)
: "memory");
+
+ pmr_sync();
}
static inline void arch_local_irq_disable(void)
@@ -116,14 +117,14 @@ static inline unsigned long arch_local_irq_save(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile(ALTERNATIVE(
- "msr daif, %0\n"
- "nop",
- __msr_s(SYS_ICC_PMR_EL1, "%0")
- "dsb sy",
- ARM64_HAS_IRQ_PRIO_MASKING)
+ "msr daif, %0",
+ __msr_s(SYS_ICC_PMR_EL1, "%0"),
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
: "r" (flags)
: "memory");
+
+ pmr_sync();
}
#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index ddf9d762ac62..6e5d839f42b5 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -61,7 +61,6 @@
* RW: 64bit by default, can be overridden for 32bit VMs
* TAC: Trap ACTLR
* TSC: Trap SMC
- * TVM: Trap VM ops (until M+C set in SCTLR_EL1)
* TSW: Trap cache operations by set/way
* TWE: Trap WFE
* TWI: Trap WFI
@@ -74,7 +73,7 @@
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
- HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
+ HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d69c1efc63e7..5efe5ca8fecf 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -53,8 +53,18 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
/* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR;
}
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
vcpu->arch.hcr_el2 |= HCR_FWB;
+ } else {
+ /*
+ * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
+ * get set in SCTLR_EL1 such that we can detect when the guest
+ * MMU gets turned on and do the necessary cache maintenance
+ * then.
+ */
+ vcpu->arch.hcr_el2 |= HCR_TVM;
+ }
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
@@ -77,14 +87,19 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr_el2;
}
-static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 &= ~HCR_TWE;
+ if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
+ vcpu->arch.hcr_el2 &= ~HCR_TWI;
+ else
+ vcpu->arch.hcr_el2 |= HCR_TWI;
}
-static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 |= HCR_TWE;
+ vcpu->arch.hcr_el2 |= HCR_TWI;
}
static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
@@ -258,6 +273,11 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
+static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
+}
+
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f656169db8c3..c61260cf63c5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -83,6 +84,14 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
+
+ /*
+ * If we encounter a data abort without valid instruction syndrome
+ * information, report this to user space. User space can (and
+ * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
+ * supported.
+ */
+ bool return_nisv_io_abort_to_user;
};
#define KVM_NR_MEM_OBJS 40
@@ -338,6 +347,13 @@ struct kvm_vcpu_arch {
/* True when deferrable sysregs are loaded on the physical CPU,
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
bool sysregs_loaded_on_cpu;
+
+ /* Guest PV state */
+ struct {
+ u64 steal;
+ u64 last_steal;
+ gpa_t base;
+ } steal;
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -478,6 +494,27 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
+gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
+void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+
+int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+
+static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
+{
+ vcpu_arch->steal.base = GPA_INVALID;
+}
+
+static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
+{
+ return (vcpu_arch->steal.base != GPA_INVALID);
+}
+
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
@@ -600,8 +637,7 @@ static inline void kvm_arm_vhe_guest_enter(void)
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
- if (system_uses_irq_prio_masking())
- dsb(sy);
+ pmr_sync();
}
static inline void kvm_arm_vhe_guest_exit(void)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c23c47360664..a4f9ca5479b0 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -69,12 +69,6 @@
#define KERNEL_START _text
#define KERNEL_END _end
-#ifdef CONFIG_ARM64_VA_BITS_52
-#define MAX_USER_VA_BITS 52
-#else
-#define MAX_USER_VA_BITS VA_BITS
-#endif
-
/*
* Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
* address space for the shadow region respectively. They can bloat the stack
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index f80e13cbf8ec..1e93de68c044 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -21,7 +21,7 @@ struct mod_arch_specific {
struct mod_plt_sec init;
/* for CONFIG_DYNAMIC_FTRACE */
- struct plt_entry *ftrace_trampoline;
+ struct plt_entry *ftrace_trampolines;
};
#endif
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index 799d9dd6f7cc..cf3a0fd7c1a7 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -21,6 +21,13 @@ static inline u64 paravirt_steal_clock(int cpu)
{
return pv_ops.time.steal_clock(cpu);
}
-#endif
+
+int __init pv_time_init(void);
+
+#else
+
+#define pv_time_init() do {} while (0)
+
+#endif // CONFIG_PARAVIRT
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 3df60f97da1f..d9fbd433cc17 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -69,7 +69,7 @@
#define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1 << (MAX_USER_VA_BITS - PGDIR_SHIFT))
+#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
/*
* Section address mask and size definitions.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 565aa45ef134..5d15b4735a0e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -17,7 +17,7 @@
* VMALLOC range.
*
* VMALLOC_START: beginning of the kernel vmalloc space
- * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
+ * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
* and fixed mappings
*/
#define VMALLOC_START (MODULES_END)
@@ -865,6 +865,20 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define phys_to_ttbr(addr) (addr)
#endif
+/*
+ * On arm64 without hardware Access Flag, copying from user will fail because
+ * the pte is old and cannot be marked young. So we always end up with zeroed
+ * page after fork() + CoW for pfn mappings. We don't always have a
+ * hardware-managed access flag on arm64.
+ */
+static inline bool arch_faults_on_old_pte(void)
+{
+ WARN_ON(preemptible());
+
+ return !cpu_has_hw_af();
+}
+#define arch_faults_on_old_pte arch_faults_on_old_pte
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5623685c7d13..5ba63204d078 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -9,7 +9,7 @@
#define __ASM_PROCESSOR_H
#define KERNEL_DS UL(-1)
-#define USER_DS ((UL(1) << MAX_USER_VA_BITS) - 1)
+#define USER_DS ((UL(1) << VA_BITS) - 1)
/*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
@@ -26,10 +26,12 @@
#include <linux/init.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/thread_info.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h>
+#include <asm/kasan.h>
#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h>
@@ -214,6 +216,18 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->sp = sp;
}
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
+}
+
#ifdef CONFIG_COMPAT
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
diff --git a/arch/arm64/include/asm/pvclock-abi.h b/arch/arm64/include/asm/pvclock-abi.h
new file mode 100644
index 000000000000..c4f1c0a0789c
--- /dev/null
+++ b/arch/arm64/include/asm/pvclock-abi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd. */
+
+#ifndef __ASM_PVCLOCK_ABI_H
+#define __ASM_PVCLOCK_ABI_H
+
+/* The below structure is defined in ARM DEN0057A */
+
+struct pvclock_vcpu_stolen_time {
+ __le32 revision;
+ __le32 attributes;
+ __le64 stolen_time;
+ /* Structure must be 64 byte aligned, pad to that size */
+ u8 padding[48];
+} __packed;
+
+#endif
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index 06d880b3526c..b383b4802a7b 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -66,24 +66,18 @@ struct pt_regs;
} \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
-#ifndef SYSCALL_DEFINE0
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \
ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
-#endif
-#ifndef COND_SYSCALL
#define COND_SYSCALL(name) \
asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \
{ \
return sys_ni_syscall(); \
}
-#endif
-#ifndef SYS_NI
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
-#endif
#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 59690613ac31..cee5928e1b7d 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -42,16 +42,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__irqentry_text_end;
}
-static inline int in_exception_text(unsigned long ptr)
-{
- int in;
-
- in = ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
-
- return in ? : __in_irqentry_text(ptr);
-}
-
static inline int in_entry_text(unsigned long ptr)
{
return ptr >= (unsigned long)&__entry_text_start &&
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 097d6bfac0b7..127712b0b970 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -378,20 +378,34 @@ do { \
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \
({ \
- __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
+ unsigned long __acfu_ret; \
+ uaccess_enable_not_uao(); \
+ __acfu_ret = __arch_copy_from_user((to), \
+ __uaccess_mask_ptr(from), (n)); \
+ uaccess_disable_not_uao(); \
+ __acfu_ret; \
})
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
#define raw_copy_to_user(to, from, n) \
({ \
- __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
+ unsigned long __actu_ret; \
+ uaccess_enable_not_uao(); \
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
+ (from), (n)); \
+ uaccess_disable_not_uao(); \
+ __actu_ret; \
})
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \
({ \
- __arch_copy_in_user(__uaccess_mask_ptr(to), \
- __uaccess_mask_ptr(from), (n)); \
+ unsigned long __aciu_ret; \
+ uaccess_enable_not_uao(); \
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
+ __uaccess_mask_ptr(from), (n)); \
+ uaccess_disable_not_uao(); \
+ __aciu_ret; \
})
#define INLINE_COPY_TO_USER
@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
- if (access_ok(to, n))
+ if (access_ok(to, n)) {
+ uaccess_enable_not_uao();
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
+ uaccess_disable_not_uao();
+ }
return n;
}
#define clear_user __clear_user
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 67c21f9bdbad..820e5751ada7 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -164,8 +164,9 @@ struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
+ __u8 ext_dabt_pending;
/* Align it to 8 bytes */
- __u8 pad[6];
+ __u8 pad[5];
__u64 serror_esr;
} exception;
__u32 reserved[12];
@@ -323,6 +324,8 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
+#define KVM_ARM_VCPU_PVTIME_CTRL 2
+#define KVM_ARM_VCPU_PVTIME_IPA 0
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_VCPU2_SHIFT 28
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 478491f07b4f..fc6488660f64 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -13,9 +13,9 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
- entry-fpsimd.o process.o ptrace.o setup.o signal.o \
- sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o cpu_ops.o insn.o \
+ entry-common.o entry-fpsimd.o process.o ptrace.o \
+ setup.o signal.o sys.o stacktrace.o time.o traps.o \
+ io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 214685760e1c..a5bdce8af65b 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -56,6 +56,7 @@ int main(void)
DEFINE(S_X24, offsetof(struct pt_regs, regs[24]));
DEFINE(S_X26, offsetof(struct pt_regs, regs[26]));
DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
+ DEFINE(S_FP, offsetof(struct pt_regs, regs[29]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 93f34b4eca25..6a09ca7644ea 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -6,7 +6,6 @@
*/
#include <linux/arm-smccc.h>
-#include <linux/psci.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include <asm/cpu.h>
@@ -88,13 +87,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
}
static void
-cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
+cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
{
u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+ bool enable_uct_trap = false;
/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
if ((read_cpuid_cachetype() & mask) !=
(arm64_ftr_reg_ctrel0.sys_val & mask))
+ enable_uct_trap = true;
+
+ /* ... or if the system is affected by an erratum */
+ if (cap->capability == ARM64_WORKAROUND_1542419)
+ enable_uct_trap = true;
+
+ if (enable_uct_trap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
@@ -167,9 +174,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
}
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
-#include <linux/psci.h>
static void call_smc_arch_workaround_1(void)
{
@@ -213,43 +218,31 @@ static int detect_harden_bp_fw(void)
struct arm_smccc_res res;
u32 midr = read_cpuid_id();
- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ break;
+ default:
return -1;
+ }
- switch (psci_ops.conduit) {
- case PSCI_CONDUIT_HVC:
- arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- switch ((int)res.a0) {
- case 1:
- /* Firmware says we're just fine */
- return 0;
- case 0:
- cb = call_hvc_arch_workaround_1;
- /* This is a guest, no need to patch KVM vectors */
- smccc_start = NULL;
- smccc_end = NULL;
- break;
- default:
- return -1;
- }
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
+ cb = call_hvc_arch_workaround_1;
+ /* This is a guest, no need to patch KVM vectors */
+ smccc_start = NULL;
+ smccc_end = NULL;
break;
- case PSCI_CONDUIT_SMC:
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- switch ((int)res.a0) {
- case 1:
- /* Firmware says we're just fine */
- return 0;
- case 0:
- cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc_start;
- smccc_end = __smccc_workaround_1_smc_end;
- break;
- default:
- return -1;
- }
+ case SMCCC_CONDUIT_SMC:
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
break;
default:
@@ -309,11 +302,11 @@ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
BUG_ON(nr_inst != 1);
- switch (psci_ops.conduit) {
- case PSCI_CONDUIT_HVC:
+ switch (arm_smccc_1_1_get_conduit()) {
+ case SMCCC_CONDUIT_HVC:
insn = aarch64_insn_get_hvc_value();
break;
- case PSCI_CONDUIT_SMC:
+ case SMCCC_CONDUIT_SMC:
insn = aarch64_insn_get_smc_value();
break;
default:
@@ -339,6 +332,8 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
void arm64_set_ssbd_mitigation(bool state)
{
+ int conduit;
+
if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
pr_info_once("SSBD disabled by kernel configuration\n");
return;
@@ -352,19 +347,10 @@ void arm64_set_ssbd_mitigation(bool state)
return;
}
- switch (psci_ops.conduit) {
- case PSCI_CONDUIT_HVC:
- arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
- break;
-
- case PSCI_CONDUIT_SMC:
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
- break;
+ conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
+ NULL);
- default:
- WARN_ON_ONCE(1);
- break;
- }
+ WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
}
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
@@ -374,6 +360,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
bool required = true;
s32 val;
bool this_cpu_safe = false;
+ int conduit;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
@@ -391,25 +378,10 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
goto out_printmsg;
}
- if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
- ssbd_state = ARM64_SSBD_UNKNOWN;
- if (!this_cpu_safe)
- __ssb_safe = false;
- return false;
- }
-
- switch (psci_ops.conduit) {
- case PSCI_CONDUIT_HVC:
- arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_2, &res);
- break;
-
- case PSCI_CONDUIT_SMC:
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
- ARM_SMCCC_ARCH_WORKAROUND_2, &res);
- break;
+ conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_2, &res);
- default:
+ if (conduit == SMCCC_CONDUIT_NONE) {
ssbd_state = ARM64_SSBD_UNKNOWN;
if (!this_cpu_safe)
__ssb_safe = false;
@@ -650,9 +622,21 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
return false;
}
-#ifdef CONFIG_HARDEN_EL2_VECTORS
+static bool __maybe_unused
+has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
+ const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && has_dic;
+}
+
+#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367)
-static const struct midr_range arm64_harden_el2_vectors[] = {
+static const struct midr_range ca57_a72[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
@@ -881,7 +865,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS,
- ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
+ ERRATA_MIDR_RANGE_LIST(ca57_a72),
},
#endif
{
@@ -927,6 +911,23 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1542419
+ {
+ /* we depend on the firmware portion for correctness */
+ .desc = "ARM erratum 1542419 (kernel portion)",
+ .capability = ARM64_WORKAROUND_1542419,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_neoverse_n1_erratum_1542419,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1319367
+ {
+ .desc = "ARM erratum 1319367",
+ .capability = ARM64_WORKAROUND_1319367,
+ ERRATA_MIDR_RANGE_LIST(ca57_a72),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 80f459ad0190..04cf64e9f0c9 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -982,6 +982,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
{ /* sentinel */ }
};
char const *str = "kpti command line option";
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 05933c065732..56bba746da1c 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -329,7 +329,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_cntfrq = arch_timer_get_cntfrq();
/*
* Use the effective value of the CTR_EL0 than the raw value
- * exposed by the CPU. CTR_E0.IDC field value must be interpreted
+ * exposed by the CPU. CTR_EL0.IDC field value must be interpreted
* with the CLIDR_EL1 fields to avoid triggering false warnings
* when there is a mismatch across the CPUs. Keep track of the
* effective value of the CTR_EL0 in our internal records for
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
new file mode 100644
index 000000000000..5dce5e56995a
--- /dev/null
+++ b/arch/arm64/kernel/entry-common.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Exception handling code
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/context_tracking.h>
+#include <linux/ptrace.h>
+#include <linux/thread_info.h>
+
+#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
+#include <asm/esr.h>
+#include <asm/exception.h>
+#include <asm/kprobes.h>
+#include <asm/mmu.h>
+#include <asm/sysreg.h>
+
+static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ local_daif_inherit(regs);
+ far = untagged_addr(far);
+ do_mem_abort(far, esr, regs);
+}
+NOKPROBE_SYMBOL(el1_abort);
+
+static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ local_daif_inherit(regs);
+ do_sp_pc_abort(far, esr, regs);
+}
+NOKPROBE_SYMBOL(el1_pc);
+
+static void el1_undef(struct pt_regs *regs)
+{
+ local_daif_inherit(regs);
+ do_undefinstr(regs);
+}
+NOKPROBE_SYMBOL(el1_undef);
+
+static void el1_inv(struct pt_regs *regs, unsigned long esr)
+{
+ local_daif_inherit(regs);
+ bad_mode(regs, 0, esr);
+}
+NOKPROBE_SYMBOL(el1_inv);
+
+static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ /*
+ * The CPU masked interrupts, and we are leaving them masked during
+ * do_debug_exception(). Update PMR as if we had called
+ * local_mask_daif().
+ */
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ do_debug_exception(far, esr, regs);
+}
+NOKPROBE_SYMBOL(el1_dbg);
+
+asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_DABT_CUR:
+ case ESR_ELx_EC_IABT_CUR:
+ el1_abort(regs, esr);
+ break;
+ /*
+ * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
+ * recursive exception when trying to push the initial pt_regs.
+ */
+ case ESR_ELx_EC_PC_ALIGN:
+ el1_pc(regs, esr);
+ break;
+ case ESR_ELx_EC_SYS64:
+ case ESR_ELx_EC_UNKNOWN:
+ el1_undef(regs);
+ break;
+ case ESR_ELx_EC_BREAKPT_CUR:
+ case ESR_ELx_EC_SOFTSTP_CUR:
+ case ESR_ELx_EC_WATCHPT_CUR:
+ case ESR_ELx_EC_BRK64:
+ el1_dbg(regs, esr);
+ break;
+ default:
+ el1_inv(regs, esr);
+ };
+}
+NOKPROBE_SYMBOL(el1_sync_handler);
+
+static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ far = untagged_addr(far);
+ do_mem_abort(far, esr, regs);
+}
+NOKPROBE_SYMBOL(el0_da);
+
+static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (!is_ttbr0_addr(far))
+ arm64_apply_bp_hardening();
+
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_mem_abort(far, esr, regs);
+}
+NOKPROBE_SYMBOL(el0_ia);
+
+static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_fpsimd_acc(esr, regs);
+}
+NOKPROBE_SYMBOL(el0_fpsimd_acc);
+
+static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_sve_acc(esr, regs);
+}
+NOKPROBE_SYMBOL(el0_sve_acc);
+
+static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_fpsimd_exc(esr, regs);
+}
+NOKPROBE_SYMBOL(el0_fpsimd_exc);
+
+static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_sysinstr(esr, regs);
+}
+NOKPROBE_SYMBOL(el0_sys);
+
+static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
+{
+ unsigned long far = read_sysreg(far_el1);
+
+ if (!is_ttbr0_addr(instruction_pointer(regs)))
+ arm64_apply_bp_hardening();
+
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_sp_pc_abort(far, esr, regs);
+}
+NOKPROBE_SYMBOL(el0_pc);
+
+static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ do_sp_pc_abort(regs->sp, esr, regs);
+}
+NOKPROBE_SYMBOL(el0_sp);
+
+static void notrace el0_undef(struct pt_regs *regs)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_undefinstr(regs);
+}
+NOKPROBE_SYMBOL(el0_undef);
+
+static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ bad_el0_sync(regs, 0, esr);
+}
+NOKPROBE_SYMBOL(el0_inv);
+
+static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
+{
+ /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
+ unsigned long far = read_sysreg(far_el1);
+
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ user_exit_irqoff();
+ do_debug_exception(far, esr, regs);
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+}
+NOKPROBE_SYMBOL(el0_dbg);
+
+static void notrace el0_svc(struct pt_regs *regs)
+{
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ el0_svc_handler(regs);
+}
+NOKPROBE_SYMBOL(el0_svc);
+
+asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_SVC64:
+ el0_svc(regs);
+ break;
+ case ESR_ELx_EC_DABT_LOW:
+ el0_da(regs, esr);
+ break;
+ case ESR_ELx_EC_IABT_LOW:
+ el0_ia(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_ASIMD:
+ el0_fpsimd_acc(regs, esr);
+ break;
+ case ESR_ELx_EC_SVE:
+ el0_sve_acc(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_EXC64:
+ el0_fpsimd_exc(regs, esr);
+ break;
+ case ESR_ELx_EC_SYS64:
+ case ESR_ELx_EC_WFx:
+ el0_sys(regs, esr);
+ break;
+ case ESR_ELx_EC_SP_ALIGN:
+ el0_sp(regs, esr);
+ break;
+ case ESR_ELx_EC_PC_ALIGN:
+ el0_pc(regs, esr);
+ break;
+ case ESR_ELx_EC_UNKNOWN:
+ el0_undef(regs);
+ break;
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_WATCHPT_LOW:
+ case ESR_ELx_EC_BRK64:
+ el0_dbg(regs, esr);
+ break;
+ default:
+ el0_inv(regs, esr);
+ }
+}
+NOKPROBE_SYMBOL(el0_sync_handler);
+
+#ifdef CONFIG_COMPAT
+static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_cp15instr(esr, regs);
+}
+NOKPROBE_SYMBOL(el0_cp15);
+
+static void notrace el0_svc_compat(struct pt_regs *regs)
+{
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ el0_svc_compat_handler(regs);
+}
+NOKPROBE_SYMBOL(el0_svc_compat);
+
+asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_SVC32:
+ el0_svc_compat(regs);
+ break;
+ case ESR_ELx_EC_DABT_LOW:
+ el0_da(regs, esr);
+ break;
+ case ESR_ELx_EC_IABT_LOW:
+ el0_ia(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_ASIMD:
+ el0_fpsimd_acc(regs, esr);
+ break;
+ case ESR_ELx_EC_FP_EXC32:
+ el0_fpsimd_exc(regs, esr);
+ break;
+ case ESR_ELx_EC_PC_ALIGN:
+ el0_pc(regs, esr);
+ break;
+ case ESR_ELx_EC_UNKNOWN:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_CP14_64:
+ el0_undef(regs);
+ break;
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ el0_cp15(regs, esr);
+ break;
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_WATCHPT_LOW:
+ case ESR_ELx_EC_BKPT32:
+ el0_dbg(regs, esr);
+ break;
+ default:
+ el0_inv(regs, esr);
+ }
+}
+NOKPROBE_SYMBOL(el0_sync_compat_handler);
+#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 33d003d80121..4fe1514fcbfd 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -7,10 +7,137 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+/*
+ * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
+ * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
+ * ftrace_make_call() have patched those NOPs to:
+ *
+ * MOV X9, LR
+ * BL <entry>
+ *
+ * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
+ *
+ * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are
+ * live, and x9-x18 are safe to clobber.
+ *
+ * We save the callsite's context into a pt_regs before invoking any ftrace
+ * callbacks. So that we can get a sensible backtrace, we create a stack record
+ * for the callsite and the ftrace entry assembly. This is not sufficient for
+ * reliable stacktrace: until we create the callsite stack record, its caller
+ * is missing from the LR and existing chain of frame records.
+ */
+ .macro ftrace_regs_entry, allregs=0
+ /* Make room for pt_regs, plus a callee frame */
+ sub sp, sp, #(S_FRAME_SIZE + 16)
+
+ /* Save function arguments (and x9 for simplicity) */
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+
+ /* Optionally save the callee-saved registers, always save the FP */
+ .if \allregs == 1
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ stp x28, x29, [sp, #S_X28]
+ .else
+ str x29, [sp, #S_FP]
+ .endif
+
+ /* Save the callsite's SP and LR */
+ add x10, sp, #(S_FRAME_SIZE + 16)
+ stp x9, x10, [sp, #S_LR]
+
+ /* Save the PC after the ftrace callsite */
+ str x30, [sp, #S_PC]
+
+ /* Create a frame record for the callsite above pt_regs */
+ stp x29, x9, [sp, #S_FRAME_SIZE]
+ add x29, sp, #S_FRAME_SIZE
+
+ /* Create our frame record within pt_regs. */
+ stp x29, x30, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+ .endm
+
+ENTRY(ftrace_regs_caller)
+ ftrace_regs_entry 1
+ b ftrace_common
+ENDPROC(ftrace_regs_caller)
+
+ENTRY(ftrace_caller)
+ ftrace_regs_entry 0
+ b ftrace_common
+ENDPROC(ftrace_caller)
+
+ENTRY(ftrace_common)
+ sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
+ mov x1, x9 // parent_ip (callsite's LR)
+ ldr_l x2, function_trace_op // op
+ mov x3, sp // regs
+
+GLOBAL(ftrace_call)
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+/*
+ * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
+ * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
+ * to restore x0-x8, x29, and x30.
+ */
+ftrace_common_return:
+ /* Restore function arguments */
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldr x8, [sp, #S_X8]
+
+ /* Restore the callsite's FP, LR, PC */
+ ldr x29, [sp, #S_FP]
+ ldr x30, [sp, #S_LR]
+ ldr x9, [sp, #S_PC]
+
+ /* Restore the callsite's SP */
+ add sp, sp, #S_FRAME_SIZE + 16
+
+ ret x9
+ENDPROC(ftrace_common)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ ldr x0, [sp, #S_PC]
+ sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
+ add x1, sp, #S_LR // parent_ip (callsite's LR)
+ ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
+ bl prepare_ftrace_return
+ b ftrace_common_return
+ENDPROC(ftrace_graph_caller)
+#else
+#endif
+
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
/*
* Gcc with -pg will put the following code in the beginning of each function:
* mov x0, x30
@@ -160,11 +287,6 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
mcount_exit
ENDPROC(ftrace_caller)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(ftrace_stub)
- ret
-ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -184,7 +306,15 @@ ENTRY(ftrace_graph_caller)
mcount_exit
ENDPROC(ftrace_graph_caller)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+ENTRY(ftrace_stub)
+ ret
+ENDPROC(ftrace_stub)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* void return_to_handler(void)
*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cf3bd2976e57..583f71abbe98 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -269,8 +269,10 @@ alternative_else_nop_endif
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
ldr x20, [sp, #S_PMR_SAVE]
msr_s SYS_ICC_PMR_EL1, x20
- /* Ensure priority change is seen by redistributor */
- dsb sy
+ mrs_s x21, SYS_ICC_CTLR_EL1
+ tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
+ dsb sy // Ensure priority change is seen by redistributor
+.L__skip_pmr_sync\@:
alternative_else_nop_endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
@@ -578,76 +580,9 @@ ENDPROC(el1_error_invalid)
.align 6
el1_sync:
kernel_entry 1
- mrs x1, esr_el1 // read the syndrome register
- lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
- cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
- b.eq el1_da
- cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
- b.eq el1_ia
- cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
- b.eq el1_undef
- cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el1_pc
- cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
- b.eq el1_undef
- cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
- b.ge el1_dbg
- b el1_inv
-
-el1_ia:
- /*
- * Fall through to the Data abort case
- */
-el1_da:
- /*
- * Data abort handling
- */
- mrs x3, far_el1
- inherit_daif pstate=x23, tmp=x2
- untagged_addr x0, x3
- mov x2, sp // struct pt_regs
- bl do_mem_abort
-
- kernel_exit 1
-el1_pc:
- /*
- * PC alignment exception handling. We don't handle SP alignment faults,
- * since we will have hit a recursive exception when trying to push the
- * initial pt_regs.
- */
- mrs x0, far_el1
- inherit_daif pstate=x23, tmp=x2
- mov x2, sp
- bl do_sp_pc_abort
- ASM_BUG()
-el1_undef:
- /*
- * Undefined instruction
- */
- inherit_daif pstate=x23, tmp=x2
mov x0, sp
- bl do_undefinstr
+ bl el1_sync_handler
kernel_exit 1
-el1_dbg:
- /*
- * Debug exception handling
- */
- cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
- cinc x24, x24, eq // set bit '0'
- tbz x24, #0, el1_inv // EL1 only
- gic_prio_kentry_setup tmp=x3
- mrs x0, far_el1
- mov x2, sp // struct pt_regs
- bl do_debug_exception
- kernel_exit 1
-el1_inv:
- // TODO: add support for undefined instructions in kernel mode
- inherit_daif pstate=x23, tmp=x2
- mov x0, sp
- mov x2, x1
- mov x1, #BAD_SYNC
- bl bad_mode
- ASM_BUG()
ENDPROC(el1_sync)
.align 6
@@ -714,71 +649,18 @@ ENDPROC(el1_irq)
.align 6
el0_sync:
kernel_entry 0
- mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
- cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
- b.eq el0_svc
- cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
- b.eq el0_da
- cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
- b.eq el0_ia
- cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
- b.eq el0_fpsimd_acc
- cmp x24, #ESR_ELx_EC_SVE // SVE access
- b.eq el0_sve_acc
- cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
- b.eq el0_fpsimd_exc
- cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
- ccmp x24, #ESR_ELx_EC_WFx, #4, ne
- b.eq el0_sys
- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el0_sp
- cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_pc
- cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
- b.eq el0_undef
- cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
- b.ge el0_dbg
- b el0_inv
+ mov x0, sp
+ bl el0_sync_handler
+ b ret_to_user
#ifdef CONFIG_COMPAT
.align 6
el0_sync_compat:
kernel_entry 0, 32
- mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
- cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
- b.eq el0_svc_compat
- cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
- b.eq el0_da
- cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
- b.eq el0_ia
- cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
- b.eq el0_fpsimd_acc
- cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
- b.eq el0_fpsimd_exc
- cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_pc
- cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
- b.eq el0_undef
- cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
- b.eq el0_cp15
- cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
- b.eq el0_cp15
- cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
- b.eq el0_undef
- cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
- b.eq el0_undef
- cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
- b.eq el0_undef
- cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
- b.ge el0_dbg
- b el0_inv
-el0_svc_compat:
- gic_prio_kentry_setup tmp=x1
mov x0, sp
- bl el0_svc_compat_handler
+ bl el0_sync_compat_handler
b ret_to_user
+ENDPROC(el0_sync)
.align 6
el0_irq_compat:
@@ -788,139 +670,7 @@ el0_irq_compat:
el0_error_compat:
kernel_entry 0, 32
b el0_error_naked
-
-el0_cp15:
- /*
- * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
- */
- ct_user_exit_irqoff
- enable_daif
- mov x0, x25
- mov x1, sp
- bl do_cp15instr
- b ret_to_user
-#endif
-
-el0_da:
- /*
- * Data abort handling
- */
- mrs x26, far_el1
- ct_user_exit_irqoff
- enable_daif
- untagged_addr x0, x26
- mov x1, x25
- mov x2, sp
- bl do_mem_abort
- b ret_to_user
-el0_ia:
- /*
- * Instruction abort handling
- */
- mrs x26, far_el1
- gic_prio_kentry_setup tmp=x0
- ct_user_exit_irqoff
- enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
- mov x0, x26
- mov x1, x25
- mov x2, sp
- bl do_el0_ia_bp_hardening
- b ret_to_user
-el0_fpsimd_acc:
- /*
- * Floating Point or Advanced SIMD access
- */
- ct_user_exit_irqoff
- enable_daif
- mov x0, x25
- mov x1, sp
- bl do_fpsimd_acc
- b ret_to_user
-el0_sve_acc:
- /*
- * Scalable Vector Extension access
- */
- ct_user_exit_irqoff
- enable_daif
- mov x0, x25
- mov x1, sp
- bl do_sve_acc
- b ret_to_user
-el0_fpsimd_exc:
- /*
- * Floating Point, Advanced SIMD or SVE exception
- */
- ct_user_exit_irqoff
- enable_daif
- mov x0, x25
- mov x1, sp
- bl do_fpsimd_exc
- b ret_to_user
-el0_sp:
- ldr x26, [sp, #S_SP]
- b el0_sp_pc
-el0_pc:
- mrs x26, far_el1
-el0_sp_pc:
- /*
- * Stack or PC alignment exception handling
- */
- gic_prio_kentry_setup tmp=x0
- ct_user_exit_irqoff
- enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
#endif
- mov x0, x26
- mov x1, x25
- mov x2, sp
- bl do_sp_pc_abort
- b ret_to_user
-el0_undef:
- /*
- * Undefined instruction
- */
- ct_user_exit_irqoff
- enable_daif
- mov x0, sp
- bl do_undefinstr
- b ret_to_user
-el0_sys:
- /*
- * System instructions, for trapped cache maintenance instructions
- */
- ct_user_exit_irqoff
- enable_daif
- mov x0, x25
- mov x1, sp
- bl do_sysinstr
- b ret_to_user
-el0_dbg:
- /*
- * Debug exception handling
- */
- tbnz x24, #0, el0_inv // EL0 only
- mrs x24, far_el1
- gic_prio_kentry_setup tmp=x3
- ct_user_exit_irqoff
- mov x0, x24
- mov x1, x25
- mov x2, sp
- bl do_debug_exception
- enable_da_f
- b ret_to_user
-el0_inv:
- ct_user_exit_irqoff
- enable_daif
- mov x0, sp
- mov x1, #BAD_SYNC
- mov x2, x25
- bl bad_el0_sync
- b ret_to_user
-ENDPROC(el0_sync)
.align 6
el0_irq:
@@ -999,17 +749,6 @@ finish_ret_to_user:
kernel_exit 0
ENDPROC(ret_to_user)
-/*
- * SVC handler.
- */
- .align 6
-el0_svc:
- gic_prio_kentry_setup tmp=x1
- mov x0, sp
- bl el0_svc_handler
- b ret_to_user
-ENDPROC(el0_svc)
-
.popsection // .entry.text
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 37d3912cfe06..3eb338f14386 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -920,7 +920,7 @@ void fpsimd_release_task(struct task_struct *dead_task)
* would have disabled the SVE access trap for userspace during
* ret_to_user, making an SVE access trap impossible in that case.
*/
-asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
+void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{
/* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
@@ -947,7 +947,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
/*
* Trapped FP/ASIMD access.
*/
-asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
+void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
/* TODO: implement lazy context saving/restoring */
WARN_ON(1);
@@ -956,7 +956,7 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
/*
* Raise a SIGFPE for the current process.
*/
-asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
+void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
unsigned int si_code = FPE_FLTUNK;
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 06e56b470315..8618faa82e6d 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -62,6 +62,19 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(pc, 0, new, false);
}
+static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
+{
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ struct plt_entry *plt = mod->arch.ftrace_trampolines;
+
+ if (addr == FTRACE_ADDR)
+ return &plt[FTRACE_PLT_IDX];
+ if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS))
+ return &plt[FTRACE_REGS_PLT_IDX];
+#endif
+ return NULL;
+}
+
/*
* Turn on the call to ftrace_caller() in instrumented function
*/
@@ -72,9 +85,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
long offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
-#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline, *dst;
struct module *mod;
+ struct plt_entry *plt;
+
+ if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ return -EINVAL;
/*
* On kernels that support module PLTs, the offset between the
@@ -93,49 +108,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (WARN_ON(!mod))
return -EINVAL;
- /*
- * There is only one ftrace trampoline per module. For now,
- * this is not a problem since on arm64, all dynamic ftrace
- * invocations are routed via ftrace_caller(). This will need
- * to be revisited if support for multiple ftrace entry points
- * is added in the future, but for now, the pr_err() below
- * deals with a theoretical issue only.
- *
- * Note that PLTs are place relative, and plt_entries_equal()
- * checks whether they point to the same target. Here, we need
- * to check if the actual opcodes are in fact identical,
- * regardless of the offset in memory so use memcmp() instead.
- */
- dst = mod->arch.ftrace_trampoline;
- trampoline = get_plt_entry(addr, dst);
- if (memcmp(dst, &trampoline, sizeof(trampoline))) {
- if (plt_entry_is_initialized(dst)) {
- pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
- return -EINVAL;
- }
-
- /* point the trampoline to our ftrace entry point */
- module_disable_ro(mod);
- *dst = trampoline;
- module_enable_ro(mod, true);
-
- /*
- * Ensure updated trampoline is visible to instruction
- * fetch before we patch in the branch. Although the
- * architecture doesn't require an IPI in this case,
- * Neoverse-N1 erratum #1542419 does require one
- * if the TLB maintenance in module_enable_ro() is
- * skipped due to rodata_enabled. It doesn't seem worth
- * it to make it conditional given that this is
- * certainly not a fast-path.
- */
- flush_icache_range((unsigned long)&dst[0],
- (unsigned long)&dst[1]);
+ plt = get_ftrace_plt(mod, addr);
+ if (!plt) {
+ pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
+ return -EINVAL;
}
- addr = (unsigned long)dst;
-#else /* CONFIG_ARM64_MODULE_PLTS */
- return -EINVAL;
-#endif /* CONFIG_ARM64_MODULE_PLTS */
+
+ addr = (unsigned long)plt;
}
old = aarch64_insn_gen_nop();
@@ -144,6 +123,55 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(pc, old, new, true);
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, old_addr,
+ AARCH64_INSN_BRANCH_LINK);
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+/*
+ * The compiler has inserted two NOPs before the regular function prologue.
+ * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
+ * and x9-x18 are free for our use.
+ *
+ * At runtime we want to be able to swing a single NOP <-> BL to enable or
+ * disable the ftrace call. The BL requires us to save the original LR value,
+ * so here we insert a <MOV X9, LR> over the first NOP so the instructions
+ * before the regular prologue are:
+ *
+ * | Compiled | Disabled | Enabled |
+ * +----------+------------+------------+
+ * | NOP | MOV X9, LR | MOV X9, LR |
+ * | NOP | NOP | BL <entry> |
+ *
+ * The LR value will be recovered by ftrace_regs_entry, and restored into LR
+ * before returning to the regular function prologue. When a function is not
+ * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
+ *
+ * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
+ * the BL.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
+ u32 old, new;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
+ AARCH64_INSN_REG_LR,
+ AARCH64_INSN_VARIANT_64BIT);
+ return ftrace_modify_code(pc, old, new, true);
+}
+#endif
+
/*
* Turn off the call to ftrace_caller() in instrumented function
*/
@@ -156,9 +184,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
long offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
-#ifdef CONFIG_ARM64_MODULE_PLTS
u32 replaced;
+ if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ return -EINVAL;
+
/*
* 'mod' is only set at module load time, but if we end up
* dealing with an out-of-range condition, we can assume it
@@ -189,9 +219,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EINVAL;
validate = false;
-#else /* CONFIG_ARM64_MODULE_PLTS */
- return -EINVAL;
-#endif /* CONFIG_ARM64_MODULE_PLTS */
} else {
old = aarch64_insn_gen_branch_imm(pc, addr,
AARCH64_INSN_BRANCH_LINK);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 38ee1514cd9c..0b727edf4104 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -51,7 +51,7 @@ int hw_breakpoint_slots(int type)
case TYPE_DATA:
return get_num_wrps();
default:
- pr_warning("unknown slot type: %d\n", type);
+ pr_warn("unknown slot type: %d\n", type);
return 0;
}
}
@@ -112,7 +112,7 @@ static u64 read_wb_reg(int reg, int n)
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
default:
- pr_warning("attempt to read from unknown breakpoint register %d\n", n);
+ pr_warn("attempt to read from unknown breakpoint register %d\n", n);
}
return val;
@@ -127,7 +127,7 @@ static void write_wb_reg(int reg, int n, u64 val)
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
default:
- pr_warning("attempt to write to unknown breakpoint register %d\n", n);
+ pr_warn("attempt to write to unknown breakpoint register %d\n", n);
}
isb();
}
@@ -145,7 +145,7 @@ static enum dbg_active_el debug_exception_level(int privilege)
case AARCH64_BREAKPOINT_EL1:
return DBG_ACTIVE_EL1;
default:
- pr_warning("invalid breakpoint privilege level %d\n", privilege);
+ pr_warn("invalid breakpoint privilege level %d\n", privilege);
return -EINVAL;
}
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index d801a7094076..513b29c3e735 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -1268,6 +1268,19 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
+/*
+ * MOV (register) is architecturally an alias of ORR (shifted register) where
+ * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
+ */
+u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant)
+{
+ return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
+ src, 0, variant,
+ AARCH64_INSN_LOGIC_ORR);
+}
+
u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
enum aarch64_insn_register reg,
enum aarch64_insn_adr_type type)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 416f537bf614..2a11a962e571 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -19,6 +19,14 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
+enum kaslr_status {
+ KASLR_ENABLED,
+ KASLR_DISABLED_CMDLINE,
+ KASLR_DISABLED_NO_SEED,
+ KASLR_DISABLED_FDT_REMAP,
+};
+
+static enum kaslr_status __initdata kaslr_status;
u64 __ro_after_init module_alloc_base;
u16 __initdata memstart_offset_seed;
@@ -91,15 +99,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
*/
early_fixmap_init();
fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
- if (!fdt)
+ if (!fdt) {
+ kaslr_status = KASLR_DISABLED_FDT_REMAP;
return 0;
+ }
/*
* Retrieve (and wipe) the seed from the FDT
*/
seed = get_kaslr_seed(fdt);
- if (!seed)
- return 0;
/*
* Check if 'nokaslr' appears on the command line, and
@@ -107,8 +115,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
*/
cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) {
+ kaslr_status = KASLR_DISABLED_CMDLINE;
+ return 0;
+ }
+
+ if (!seed) {
+ kaslr_status = KASLR_DISABLED_NO_SEED;
return 0;
+ }
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
@@ -170,3 +185,24 @@ u64 __init kaslr_early_init(u64 dt_phys)
return offset;
}
+
+static int __init kaslr_init(void)
+{
+ switch (kaslr_status) {
+ case KASLR_ENABLED:
+ pr_info("KASLR enabled\n");
+ break;
+ case KASLR_DISABLED_CMDLINE:
+ pr_info("KASLR disabled on command line\n");
+ break;
+ case KASLR_DISABLED_NO_SEED:
+ pr_warn("KASLR disabled due to lack of seed\n");
+ break;
+ case KASLR_DISABLED_FDT_REMAP:
+ pr_warn("KASLR disabled due to FDT remapping failure\n");
+ break;
+ }
+
+ return 0;
+}
+core_initcall(kaslr_init)
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index b182442b87a3..65b08a74aec6 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -4,6 +4,7 @@
*/
#include <linux/elf.h>
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
@@ -330,7 +331,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
tramp->sh_type = SHT_NOBITS;
tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
tramp->sh_addralign = __alignof__(struct plt_entry);
- tramp->sh_size = sizeof(struct plt_entry);
+ tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
}
return 0;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 03ff15bffbb6..1cd1a4d0ed30 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -9,6 +9,7 @@
#include <linux/bitops.h>
#include <linux/elf.h>
+#include <linux/ftrace.h>
#include <linux/gfp.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
@@ -470,22 +471,58 @@ overflow:
return -ENOEXEC;
}
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
- if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
- apply_alternatives_module((void *)s->sh_addr, s->sh_size);
-#ifdef CONFIG_ARM64_MODULE_PLTS
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
- !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
- me->arch.ftrace_trampoline = (void *)s->sh_addr;
-#endif
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
}
+ return NULL;
+}
+
+static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
+{
+ *plt = get_plt_entry(addr, plt);
+}
+
+static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
+ const Elf_Shdr *s;
+ struct plt_entry *plts;
+
+ s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
+ if (!s)
+ return -ENOEXEC;
+
+ plts = (void *)s->sh_addr;
+
+ __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
+
+ mod->arch.ftrace_trampolines = plts;
+#endif
return 0;
}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s;
+ s = find_section(hdr, sechdrs, ".altinstructions");
+ if (s)
+ apply_alternatives_module((void *)s->sh_addr, s->sh_size);
+
+ return module_init_ftrace_plt(hdr, sechdrs, me);
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 4cfed91fe256..1ef702b0be2d 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -6,13 +6,153 @@
* Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/
+#define pr_fmt(fmt) "arm-pv: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/jump_label.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
#include <linux/types.h>
+
#include <asm/paravirt.h>
+#include <asm/pvclock-abi.h>
+#include <asm/smp_plat.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
struct paravirt_patch_template pv_ops;
EXPORT_SYMBOL_GPL(pv_ops);
+
+struct pv_time_stolen_time_region {
+ struct pvclock_vcpu_stolen_time *kaddr;
+};
+
+static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
+
+static bool steal_acc = true;
+static int __init parse_no_stealacc(char *arg)
+{
+ steal_acc = false;
+ return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+
+/* return stolen time in ns by asking the hypervisor */
+static u64 pv_steal_clock(int cpu)
+{
+ struct pv_time_stolen_time_region *reg;
+
+ reg = per_cpu_ptr(&stolen_time_region, cpu);
+ if (!reg->kaddr) {
+ pr_warn_once("stolen time enabled but not configured for cpu %d\n",
+ cpu);
+ return 0;
+ }
+
+ return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
+}
+
+static int stolen_time_dying_cpu(unsigned int cpu)
+{
+ struct pv_time_stolen_time_region *reg;
+
+ reg = this_cpu_ptr(&stolen_time_region);
+ if (!reg->kaddr)
+ return 0;
+
+ memunmap(reg->kaddr);
+ memset(reg, 0, sizeof(*reg));
+
+ return 0;
+}
+
+static int init_stolen_time_cpu(unsigned int cpu)
+{
+ struct pv_time_stolen_time_region *reg;
+ struct arm_smccc_res res;
+
+ reg = this_cpu_ptr(&stolen_time_region);
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_ST, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return -EINVAL;
+
+ reg->kaddr = memremap(res.a0,
+ sizeof(struct pvclock_vcpu_stolen_time),
+ MEMREMAP_WB);
+
+ if (!reg->kaddr) {
+ pr_warn("Failed to map stolen time data structure\n");
+ return -ENOMEM;
+ }
+
+ if (le32_to_cpu(reg->kaddr->revision) != 0 ||
+ le32_to_cpu(reg->kaddr->attributes) != 0) {
+ pr_warn_once("Unexpected revision or attributes in stolen time data\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int pv_time_init_stolen_time(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
+ "hypervisor/arm/pvtime:starting",
+ init_stolen_time_cpu, stolen_time_dying_cpu);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static bool has_pv_steal_clock(void)
+{
+ struct arm_smccc_res res;
+
+ /* To detect the presence of PV time support we require SMCCC 1.1+ */
+ if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
+ return false;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_HV_PV_TIME_FEATURES, &res);
+
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return false;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_FEATURES,
+ ARM_SMCCC_HV_PV_TIME_ST, &res);
+
+ return (res.a0 == SMCCC_RET_SUCCESS);
+}
+
+int __init pv_time_init(void)
+{
+ int ret;
+
+ if (!has_pv_steal_clock())
+ return 0;
+
+ ret = pv_time_init_stolen_time();
+ if (ret)
+ return ret;
+
+ pv_ops.time.steal_clock = pv_steal_clock;
+
+ static_key_slow_inc(&paravirt_steal_enabled);
+ if (steal_acc)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+
+ pr_info("using stolen time PV\n");
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index a0b4f1bca491..e40b65645c86 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -158,133 +158,74 @@ armv8pmu_events_sysfs_show(struct device *dev,
return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
}
-#define ARMV8_EVENT_ATTR(name, config) \
- PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
- config, armv8pmu_events_sysfs_show)
-
-ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
-ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
-ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
-ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
-ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
-ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
-ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
-ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
-ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
-ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
-ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
-ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
-ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
-ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
-ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
-ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
-ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
-ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
-ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
-ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
-ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
-ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
-ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
-ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
-ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
-ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
-ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
-ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
-ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
-ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
-/* Don't expose the chain event in /sys, since it's useless in isolation */
-ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
-ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
-ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
-ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
-ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
-ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
-ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
-ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
-ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
-ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
-ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
-ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
-ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
-ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
-ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
-ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
-ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
-ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
-ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
-ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
-ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
-ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
-ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
-ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
-ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
-ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
-ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
-ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
-ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
-ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
+#define ARMV8_EVENT_ATTR(name, config) \
+ (&((struct perf_pmu_events_attr) { \
+ .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
+ .id = config, \
+ }).attr.attr)
static struct attribute *armv8_pmuv3_event_attrs[] = {
- &armv8_event_attr_sw_incr.attr.attr,
- &armv8_event_attr_l1i_cache_refill.attr.attr,
- &armv8_event_attr_l1i_tlb_refill.attr.attr,
- &armv8_event_attr_l1d_cache_refill.attr.attr,
- &armv8_event_attr_l1d_cache.attr.attr,
- &armv8_event_attr_l1d_tlb_refill.attr.attr,
- &armv8_event_attr_ld_retired.attr.attr,
- &armv8_event_attr_st_retired.attr.attr,
- &armv8_event_attr_inst_retired.attr.attr,
- &armv8_event_attr_exc_taken.attr.attr,
- &armv8_event_attr_exc_return.attr.attr,
- &armv8_event_attr_cid_write_retired.attr.attr,
- &armv8_event_attr_pc_write_retired.attr.attr,
- &armv8_event_attr_br_immed_retired.attr.attr,
- &armv8_event_attr_br_return_retired.attr.attr,
- &armv8_event_attr_unaligned_ldst_retired.attr.attr,
- &armv8_event_attr_br_mis_pred.attr.attr,
- &armv8_event_attr_cpu_cycles.attr.attr,
- &armv8_event_attr_br_pred.attr.attr,
- &armv8_event_attr_mem_access.attr.attr,
- &armv8_event_attr_l1i_cache.attr.attr,
- &armv8_event_attr_l1d_cache_wb.attr.attr,
- &armv8_event_attr_l2d_cache.attr.attr,
- &armv8_event_attr_l2d_cache_refill.attr.attr,
- &armv8_event_attr_l2d_cache_wb.attr.attr,
- &armv8_event_attr_bus_access.attr.attr,
- &armv8_event_attr_memory_error.attr.attr,
- &armv8_event_attr_inst_spec.attr.attr,
- &armv8_event_attr_ttbr_write_retired.attr.attr,
- &armv8_event_attr_bus_cycles.attr.attr,
- &armv8_event_attr_l1d_cache_allocate.attr.attr,
- &armv8_event_attr_l2d_cache_allocate.attr.attr,
- &armv8_event_attr_br_retired.attr.attr,
- &armv8_event_attr_br_mis_pred_retired.attr.attr,
- &armv8_event_attr_stall_frontend.attr.attr,
- &armv8_event_attr_stall_backend.attr.attr,
- &armv8_event_attr_l1d_tlb.attr.attr,
- &armv8_event_attr_l1i_tlb.attr.attr,
- &armv8_event_attr_l2i_cache.attr.attr,
- &armv8_event_attr_l2i_cache_refill.attr.attr,
- &armv8_event_attr_l3d_cache_allocate.attr.attr,
- &armv8_event_attr_l3d_cache_refill.attr.attr,
- &armv8_event_attr_l3d_cache.attr.attr,
- &armv8_event_attr_l3d_cache_wb.attr.attr,
- &armv8_event_attr_l2d_tlb_refill.attr.attr,
- &armv8_event_attr_l2i_tlb_refill.attr.attr,
- &armv8_event_attr_l2d_tlb.attr.attr,
- &armv8_event_attr_l2i_tlb.attr.attr,
- &armv8_event_attr_remote_access.attr.attr,
- &armv8_event_attr_ll_cache.attr.attr,
- &armv8_event_attr_ll_cache_miss.attr.attr,
- &armv8_event_attr_dtlb_walk.attr.attr,
- &armv8_event_attr_itlb_walk.attr.attr,
- &armv8_event_attr_ll_cache_rd.attr.attr,
- &armv8_event_attr_ll_cache_miss_rd.attr.attr,
- &armv8_event_attr_remote_access_rd.attr.attr,
- &armv8_event_attr_sample_pop.attr.attr,
- &armv8_event_attr_sample_feed.attr.attr,
- &armv8_event_attr_sample_filtrate.attr.attr,
- &armv8_event_attr_sample_collision.attr.attr,
+ ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
+ ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
+ ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
+ ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
+ ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
+ ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
+ ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
+ ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
+ ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
+ ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
+ ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
+ ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
+ ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
+ ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
+ ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
+ ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
+ ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
+ ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
+ ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
+ ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
+ ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
+ ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
+ ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
+ ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
+ ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
+ ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
+ /* Don't expose the chain event in /sys, since it's useless in isolation */
+ ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
+ ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
+ ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
+ ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
+ ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
+ ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
+ ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
+ ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
+ ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
+ ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
+ ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
+ ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
+ ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
+ ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
+ ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
+ ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
+ ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
+ ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
+ ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
+ ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
+ ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
+ ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
+ ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
+ ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
+ ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
+ ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
NULL,
};
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index c4452827419b..d1c95dcf1d78 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -455,10 +455,6 @@ int __init arch_populate_kprobe_blacklist(void)
(unsigned long)__irqentry_text_end);
if (ret)
return ret;
- ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
- (unsigned long)__exception_text_end);
- if (ret)
- return ret;
ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
(unsigned long)__idmap_text_end);
if (ret)
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index c9f72b2665f1..43ae4e0c968f 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -81,7 +81,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
static int cpu_psci_cpu_kill(unsigned int cpu)
{
- int err, i;
+ int err;
+ unsigned long start, end;
if (!psci_ops.affinity_info)
return 0;
@@ -91,16 +92,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
* while it is dying. So, try again a few times.
*/
- for (i = 0; i < 10; i++) {
+ start = jiffies;
+ end = start + msecs_to_jiffies(100);
+ do {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
- pr_info("CPU%d killed.\n", cpu);
+ pr_info("CPU%d killed (polled %d ms)\n", cpu,
+ jiffies_to_msecs(jiffies - start));
return 0;
}
- msleep(10);
- pr_info("Retrying again to check for CPU kill\n");
- }
+ usleep_range(100, 1000);
+ } while (time_before(jiffies, end));
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index ea94cf8f9dc6..d6259dac62b6 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -2,6 +2,7 @@
// Copyright (C) 2017 Arm Ltd.
#define pr_fmt(fmt) "sdei: " fmt
+#include <linux/arm-smccc.h>
#include <linux/arm_sdei.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
@@ -161,7 +162,7 @@ unsigned long sdei_arch_get_entry_point(int conduit)
return 0;
}
- sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
+ sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
if (arm64_kernel_unmapped_at_el0()) {
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index dc9fe879c279..ab149bcc3dc7 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -345,8 +345,7 @@ void __cpu_die(unsigned int cpu)
*/
err = op_cpu_kill(cpu);
if (err)
- pr_warn("CPU%d may not have shut down cleanly: %d\n",
- cpu, err);
+ pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
}
/*
@@ -976,8 +975,8 @@ void smp_send_stop(void)
udelay(1);
if (num_online_cpus() > 1)
- pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
- cpumask_pr_args(cpu_online_mask));
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
sdei_mask_local_cpu();
}
@@ -1017,8 +1016,8 @@ void crash_smp_send_stop(void)
udelay(1);
if (atomic_read(&waiting_for_crash_ipi) > 0)
- pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
- cpumask_pr_args(&mask));
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(&mask));
sdei_mask_local_cpu();
}
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index f1cb64959427..3c18c2454089 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -8,6 +8,7 @@
*/
#include <linux/compat.h>
+#include <linux/cpufeature.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
@@ -17,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
+#include <asm/tlbflush.h>
#include <asm/unistd.h>
static long
@@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ /*
+ * The workaround requires an inner-shareable tlbi.
+ * We pick the reserved-ASID to minimise the impact.
+ */
+ __tlbi(aside1is, __TLBI_VADDR(0, 0));
+ dsb(ish);
+ }
+
ret = __flush_cache_user_range(start, start + chunk);
if (ret)
return ret;
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 871c739f060a..9a9d98a443fc 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -154,14 +154,14 @@ static inline void sve_user_discard(void)
sve_user_disable();
}
-asmlinkage void el0_svc_handler(struct pt_regs *regs)
+void el0_svc_handler(struct pt_regs *regs)
{
sve_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
#ifdef CONFIG_COMPAT
-asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
+void el0_svc_compat_handler(struct pt_regs *regs)
{
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
compat_sys_call_table);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 0b2946414dc9..73f06d4b3aae 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -30,6 +30,7 @@
#include <asm/thread_info.h>
#include <asm/stacktrace.h>
+#include <asm/paravirt.h>
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -65,4 +66,6 @@ void __init time_init(void)
/* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ;
+
+ pv_time_init();
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 34739e80211b..73caf35c2262 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -35,6 +35,7 @@
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/insn.h>
+#include <asm/kprobes.h>
#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/stack_pointer.h>
@@ -393,7 +394,7 @@ void arm64_notify_segfault(unsigned long addr)
force_signal_inject(SIGSEGV, code, addr);
}
-asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+void do_undefinstr(struct pt_regs *regs)
{
/* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs))
@@ -405,6 +406,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
}
+NOKPROBE_SYMBOL(do_undefinstr);
#define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \
@@ -470,6 +472,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ /* Hide DIC so that we can trap the unnecessary maintenance...*/
+ val &= ~BIT(CTR_DIC_SHIFT);
+
+ /* ... and fake IminLine to reduce the number of traps. */
+ val &= ~CTR_IMINLINE_MASK;
+ val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
+ }
+
pt_regs_write_reg(regs, rt, val);
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
@@ -667,7 +678,7 @@ static const struct sys64_hook cp15_64_hooks[] = {
{},
};
-asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+void do_cp15instr(unsigned int esr, struct pt_regs *regs)
{
const struct sys64_hook *hook, *hook_base;
@@ -705,9 +716,10 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
*/
do_undefinstr(regs);
}
+NOKPROBE_SYMBOL(do_cp15instr);
#endif
-asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
+void do_sysinstr(unsigned int esr, struct pt_regs *regs)
{
const struct sys64_hook *hook;
@@ -724,6 +736,7 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
*/
do_undefinstr(regs);
}
+NOKPROBE_SYMBOL(do_sysinstr);
static const char *esr_class_str[] = {
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
@@ -793,7 +806,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0. Unlike bad_mode, this returns.
*/
-asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
void __user *pc = (void __user *)instruction_pointer(regs);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index aa76f7259668..841a8b4ce114 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -5,6 +5,8 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
+#define RO_EXCEPTION_TABLE_ALIGN 8
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/kernel-pgtable.h>
@@ -111,9 +113,6 @@ SECTIONS
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- __exception_text_start = .;
- *(.exception.text)
- __exception_text_end = .;
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
ENTRY_TEXT
@@ -135,11 +134,9 @@ SECTIONS
. = ALIGN(SEGMENT_ALIGN);
_etext = .; /* End of text section */
- RO_DATA(PAGE_SIZE) /* everything from this point to */
- EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
- NOTES
+ /* everything from this point to __init_begin will be marked RO NX */
+ RO_DATA(PAGE_SIZE)
- . = ALIGN(PAGE_SIZE);
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
@@ -215,7 +212,7 @@ SECTIONS
_data = .;
_sdata = .;
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
/*
* Data written with the MMU off but read with the MMU on requires
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a67121d419a2..a475c68cbfec 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -21,6 +21,8 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
+ # for TASKSTATS/TASK_DELAY_ACCT:
+ depends on NET && MULTIUSER
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -39,6 +41,8 @@ config KVM
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_VCPU_RUN_PID_CHANGE
+ select TASKSTATS
+ select TASK_DELAY_ACCT
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 3ac1a64d2fb9..5ffbdc39e780 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvtime.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index dfd626447482..2fff06114a8f 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -34,6 +34,10 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
+ VCPU_STAT(halt_successful_poll),
+ VCPU_STAT(halt_attempted_poll),
+ VCPU_STAT(halt_poll_invalid),
+ VCPU_STAT(halt_wakeup),
VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat),
@@ -712,6 +716,12 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
+ /*
+ * We never return a pending ext_dabt here because we deliver it to
+ * the virtual CPU directly when setting the event and it's no longer
+ * 'pending' at this point.
+ */
+
return 0;
}
@@ -720,6 +730,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
+ bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
@@ -733,6 +744,9 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
kvm_inject_vabt(vcpu);
}
+ if (ext_dabt_pending)
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+
return 0;
}
@@ -858,6 +872,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVTIME_CTRL:
+ ret = kvm_arm_pvtime_set_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -878,6 +895,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVTIME_CTRL:
+ ret = kvm_arm_pvtime_get_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -898,6 +918,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVTIME_CTRL:
+ ret = kvm_arm_pvtime_has_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 706cca23f0d2..aacfc55de44c 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -11,8 +11,6 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
-#include <kvm/arm_psci.h>
-
#include <asm/esr.h>
#include <asm/exception.h>
#include <asm/kvm_asm.h>
@@ -22,6 +20,8 @@
#include <asm/debug-monitors.h>
#include <asm/traps.h>
+#include <kvm/arm_hypercalls.h>
+
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 799e84a40335..72fbbd86eb5e 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -12,7 +12,7 @@
#include <kvm/arm_psci.h>
-#include <asm/arch_gicv3.h>
+#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
@@ -118,6 +118,20 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
}
write_sysreg(val, cptr_el2);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
+
+ isb();
+ /*
+ * At this stage, and thanks to the above isb(), S2 is
+ * configured and enabled. We can now restore the guest's S1
+ * configuration: SCTLR, and only then TCR.
+ */
+ write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
+ isb();
+ write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
+ }
}
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
@@ -159,6 +173,23 @@ static void __hyp_text __deactivate_traps_nvhe(void)
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ u64 val;
+
+ /*
+ * Set the TCR and SCTLR registers in the exact opposite
+ * sequence as __activate_traps_nvhe (first prevent walks,
+ * then force the MMU on). A generous sprinkling of isb()
+ * ensure that things happen in this exact order.
+ */
+ val = read_sysreg_el1(SYS_TCR);
+ write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
+ isb();
+ val = read_sysreg_el1(SYS_SCTLR);
+ write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
+ isb();
+ }
+
__deactivate_traps_common();
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
@@ -657,7 +688,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
*/
if (system_uses_irq_prio_masking()) {
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
- dsb(sy);
+ pmr_sync();
}
vcpu = kern_hyp_va(vcpu);
@@ -670,18 +701,23 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_save_state_nvhe(host_ctxt);
- __activate_vm(kern_hyp_va(vcpu->kvm));
- __activate_traps(vcpu);
-
- __hyp_vgic_restore_state(vcpu);
- __timer_enable_traps(vcpu);
-
/*
* We must restore the 32-bit state before the sysregs, thanks
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
+ *
+ * Also, and in order to be able to deal with erratum #1319537 (A57)
+ * and #1319367 (A72), we must ensure that all VM-related sysreg are
+ * restored before we enable S2 translation.
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt);
+
+ __activate_vm(kern_hyp_va(vcpu->kvm));
+ __activate_traps(vcpu);
+
+ __hyp_vgic_restore_state(vcpu);
+ __timer_enable_traps(vcpu);
+
__debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 7ddbc849b580..22b8128d19f6 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -117,12 +117,26 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
+
+ if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
+ write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
+ } else if (!ctxt->__hyp_running_vcpu) {
+ /*
+ * Must only be done for guest registers, hence the context
+ * test. We're coming from the host, so SCTLR.M is already
+ * set. Pairs with __activate_traps_nvhe().
+ */
+ write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
+ TCR_EPD1_MASK | TCR_EPD0_MASK),
+ SYS_TCR);
+ isb();
+ }
+
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
- write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR);
write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0);
write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1);
@@ -135,6 +149,23 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
+ ctxt->__hyp_running_vcpu) {
+ /*
+ * Must only be done for host registers, hence the context
+ * test. Pairs with __deactivate_traps_nvhe().
+ */
+ isb();
+ /*
+ * At this stage, and thanks to the above isb(), S2 is
+ * deconfigured and disabled. We can now restore the host's
+ * S1 configuration: SCTLR, and only then TCR.
+ */
+ write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
+ isb();
+ write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
+ }
+
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR);
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index eb0efc5557f3..c2bc17ca6430 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -63,6 +63,22 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ u64 val;
+
+ /*
+ * For CPUs that are affected by ARM 1319367, we need to
+ * avoid a host Stage-1 walk while we have the guest's
+ * VMID set in the VTTBR in order to invalidate TLBs.
+ * We're guaranteed that the S1 MMU is enabled, so we can
+ * simply set the EPD bits to avoid any further TLB fill.
+ */
+ val = cxt->tcr = read_sysreg_el1(SYS_TCR);
+ val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
+ write_sysreg_el1(val, SYS_TCR);
+ isb();
+ }
+
__load_guest_stage2(kvm);
isb();
}
@@ -100,6 +116,13 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ /* Ensure write of the host VMID */
+ isb();
+ /* Restore the host's TCR_EL1 */
+ write_sysreg_el1(cxt->tcr, SYS_TCR);
+ }
}
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index a9d25a305af5..ccdb6a051ab2 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -109,7 +109,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
/**
* kvm_inject_dabt - inject a data abort into the guest
- * @vcpu: The VCPU to receive the undefined exception
+ * @vcpu: The VCPU to receive the data abort
* @addr: The address to report in the DFAR
*
* It is assumed that this code is called from the VCPU thread and that the
@@ -125,7 +125,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
/**
* kvm_inject_pabt - inject a prefetch abort into the guest
- * @vcpu: The VCPU to receive the undefined exception
+ * @vcpu: The VCPU to receive the prefetch abort
* @addr: The address to report in the DFAR
*
* It is assumed that this code is called from the VCPU thread and that the
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 10415572e82f..aeafc03e961a 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -20,7 +20,6 @@
* Alignment fixed up by hardware.
*/
ENTRY(__arch_clear_user)
- uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
- uaccess_disable_not_uao x2, x3
ret
ENDPROC(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 680e74409ff9..ebb3c06cbb5d 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -54,10 +54,8 @@
end .req x5
ENTRY(__arch_copy_from_user)
- uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 0bedae3f3792..3d8153a1ebce 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -56,10 +56,8 @@
end .req x5
ENTRY(__arch_copy_in_user)
- uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(__arch_copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 2d88c736e8f2..357eae2c18eb 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -53,10 +53,8 @@
end .req x5
ENTRY(__arch_copy_to_user)
- uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index cbfcbe6470a5..bfa30b75b2b8 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n)
{
- unsigned long rc = __arch_copy_from_user(to, from, n);
+ unsigned long rc;
+
+ uaccess_enable_not_uao();
+ rc = __arch_copy_from_user(to, from, n);
+ uaccess_disable_not_uao();
/* See above */
__clean_dcache_area_pop(to, n - rc);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9fc6db0bcbad..077b02a2d4d3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -32,7 +32,8 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
-#include <asm/kasan.h>
+#include <asm/kprobes.h>
+#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
@@ -101,18 +102,6 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr);
}
-static inline bool is_ttbr0_addr(unsigned long addr)
-{
- /* entry assembly clears tags for TTBR0 addrs */
- return addr < TASK_SIZE;
-}
-
-static inline bool is_ttbr1_addr(unsigned long addr)
-{
- /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
- return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
-}
-
static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
{
/* Either init_pg_dir or swapper_pg_dir */
@@ -318,6 +307,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (is_el1_permission_fault(addr, esr, regs)) {
if (esr & ESR_ELx_WNR)
msg = "write to read-only memory";
+ else if (is_el1_instruction_abort(esr))
+ msg = "execute from non-executable memory";
else
msg = "read from unreadable memory";
} else if (addr < PAGE_SIZE) {
@@ -736,8 +727,7 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
};
-asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
- struct pt_regs *regs)
+void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
@@ -753,43 +743,21 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die(inf->name, regs,
inf->sig, inf->code, (void __user *)addr, esr);
}
+NOKPROBE_SYMBOL(do_mem_abort);
-asmlinkage void __exception do_el0_irq_bp_hardening(void)
+void do_el0_irq_bp_hardening(void)
{
/* PC has already been checked in entry.S */
arm64_apply_bp_hardening();
}
+NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
- unsigned int esr,
- struct pt_regs *regs)
-{
- /*
- * We've taken an instruction abort from userspace and not yet
- * re-enabled IRQs. If the address is a kernel address, apply
- * BP hardening prior to enabling IRQs and pre-emption.
- */
- if (!is_ttbr0_addr(addr))
- arm64_apply_bp_hardening();
-
- local_daif_restore(DAIF_PROCCTX);
- do_mem_abort(addr, esr, regs);
-}
-
-
-asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
- unsigned int esr,
- struct pt_regs *regs)
+void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
- if (user_mode(regs)) {
- if (!is_ttbr0_addr(instruction_pointer(regs)))
- arm64_apply_bp_hardening();
- local_daif_restore(DAIF_PROCCTX);
- }
-
arm64_notify_die("SP/PC alignment exception", regs,
SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
}
+NOKPROBE_SYMBOL(do_sp_pc_abort);
int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
@@ -872,8 +840,7 @@ NOKPROBE_SYMBOL(debug_exception_exit);
#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
-static int __exception
-cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
@@ -892,16 +859,15 @@ cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
return 1;
}
#else
-static int __exception
-cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler);
-asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
- unsigned int esr,
- struct pt_regs *regs)
+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+ struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 45c00a54909c..be9481cdf3b9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -20,6 +20,7 @@
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
@@ -41,6 +42,8 @@
#include <asm/tlb.h>
#include <asm/alternative.h>
+#define ARM64_ZONE_DMA_BITS 30
+
/*
* We need to be able to catch inadvertent references to memstart_addr
* that occur (potentially in generic code) before arm64_memblock_init()
@@ -56,7 +59,14 @@ EXPORT_SYMBOL(physvirt_offset);
struct page *vmemmap __ro_after_init;
EXPORT_SYMBOL(vmemmap);
+/*
+ * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
+ * memory as some devices, namely the Raspberry Pi 4, have peripherals with
+ * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
+ * bit addressable memory area.
+ */
phys_addr_t arm64_dma_phys_limit __ro_after_init;
+static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
@@ -81,7 +91,7 @@ static void __init reserve_crashkernel(void)
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
+ crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -169,15 +179,16 @@ static void __init reserve_elfcorehdr(void)
{
}
#endif /* CONFIG_CRASH_DUMP */
+
/*
- * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
- * currently assumes that for memory starting above 4G, 32-bit devices will
- * use a DMA offset.
+ * Return the maximum physical address for a zone with a given address size
+ * limit. It currently assumes that for memory starting above 4G, 32-bit
+ * devices will use a DMA offset.
*/
-static phys_addr_t __init max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
- phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
- return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+ phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
+ return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
}
#ifdef CONFIG_NUMA
@@ -186,8 +197,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
+#endif
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
#endif
max_zone_pfns[ZONE_NORMAL] = max;
@@ -200,16 +214,21 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long max_dma = min;
+ unsigned long max_dma32 = min;
+ unsigned long __maybe_unused max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
- /* 4GB maximum for 32-bit only capable devices */
-#ifdef CONFIG_ZONE_DMA32
+#ifdef CONFIG_ZONE_DMA
max_dma = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA32] = max_dma - min;
+ zone_size[ZONE_DMA] = max_dma - min;
+ max_dma32 = max_dma;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
+ zone_size[ZONE_DMA32] = max_dma32 - max_dma;
#endif
- zone_size[ZONE_NORMAL] = max - max_dma;
+ zone_size[ZONE_NORMAL] = max - max_dma32;
memcpy(zhole_size, zone_size, sizeof(zhole_size));
@@ -219,16 +238,22 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-
-#ifdef CONFIG_ZONE_DMA32
+#ifdef CONFIG_ZONE_DMA
if (start < max_dma) {
- unsigned long dma_end = min(end, max_dma);
- zhole_size[ZONE_DMA32] -= dma_end - start;
+ unsigned long dma_end = min_not_zero(end, max_dma);
+ zhole_size[ZONE_DMA] -= dma_end - start;
}
#endif
- if (end > max_dma) {
+#ifdef CONFIG_ZONE_DMA32
+ if (start < max_dma32) {
+ unsigned long dma32_end = min(end, max_dma32);
+ unsigned long dma32_start = max(start, max_dma);
+ zhole_size[ZONE_DMA32] -= dma32_end - dma32_start;
+ }
+#endif
+ if (end > max_dma32) {
unsigned long normal_end = min(end, max);
- unsigned long normal_start = max(start, max_dma);
+ unsigned long normal_start = max(start, max_dma32);
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
@@ -418,11 +443,15 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- /* 4GB maximum for 32-bit only capable devices */
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ zone_dma_bits = ARM64_ZONE_DMA_BITS;
+ arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
+ }
+
if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma_phys_limit = max_zone_dma_phys();
+ arm64_dma32_phys_limit = max_zone_phys(32);
else
- arm64_dma_phys_limit = PHYS_MASK + 1;
+ arm64_dma32_phys_limit = PHYS_MASK + 1;
reserve_crashkernel();
@@ -430,7 +459,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
- dma_contiguous_reserve(arm64_dma_phys_limit);
+ dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -534,7 +563,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;
@@ -571,7 +600,7 @@ void free_initmem(void)
{
free_reserved_area(lm_alias(__init_begin),
lm_alias(__init_end),
- 0, "unused kernel");
+ POISON_FREE_INITMEM, "unused kernel");
/*
* Unmap the __init region but leave the VM area in place. This
* prevents the region from being reused for kernel modules, which
@@ -580,18 +609,6 @@ void free_initmem(void)
unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
- unsigned long aligned_start, aligned_end;
-
- aligned_start = __virt_to_phys(start) & PAGE_MASK;
- aligned_end = PAGE_ALIGN(__virt_to_phys(end));
- memblock_free(aligned_start, aligned_end - aligned_start);
- free_reserved_area((void *)start, (void *)end, 0, "initrd");
-}
-#endif
-
/*
* Dump out memory limit information on panic.
*/
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 60c929f3683b..5a3b15a14a7f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -338,7 +338,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
phys_addr_t (*pgtable_alloc)(int),
int flags)
{
- unsigned long addr, length, end, next;
+ unsigned long addr, end, next;
pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
/*
@@ -350,9 +350,8 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
phys &= PAGE_MASK;
addr = virt & PAGE_MASK;
- length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
+ end = PAGE_ALIGN(virt + size);
- end = addr + length;
do {
next = pgd_addr_end(addr, end);
alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
@@ -1061,6 +1060,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
+ memblock_clear_nomap(start, size);
+
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
restrictions);
}
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
index 584bab2bace6..ac99ba0864bf 100644
--- a/arch/c6x/kernel/vmlinux.lds.S
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -5,6 +5,9 @@
* Copyright (C) 2010, 2011 Texas Instruments Incorporated
* Mark Salter <msalter@redhat.com>
*/
+
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
@@ -80,10 +83,7 @@ SECTIONS
*(.gnu.warning)
}
- EXCEPTION_TABLE(16)
- NOTES
-
- RO_DATA_SECTION(PAGE_SIZE)
+ RO_DATA(PAGE_SIZE)
.const :
{
*(.const .const.* .gnu.linkonce.r.*)
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index ae7961b973f2..2ff37beaf2bf 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -49,11 +49,10 @@ SECTIONS
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
- NOTES
EXCEPTION_TABLE(L1_CACHE_BYTES)
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
VBR_BASE
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 49f716c0a1df..6b1afc2f9b68 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/thread_info.h>
@@ -37,9 +40,7 @@ SECTIONS
#endif
_etext = . ;
}
- EXCEPTION_TABLE(16)
- NOTES
- RO_DATA_SECTION(4)
+ RO_DATA(4)
ROMEND = .;
#if defined(CONFIG_ROMKERNEL)
. = RAMTOP;
@@ -48,7 +49,7 @@ SECTIONS
#endif
_sdata = . ;
__data_start = . ;
- RW_DATA_SECTION(0, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(0, PAGE_SIZE, THREAD_SIZE)
#if defined(CONFIG_ROMKERNEL)
#undef ADDR
#endif
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 78f2418e97c8..0ca2471ddb9f 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -49,12 +49,11 @@ SECTIONS
INIT_DATA_SECTION(PAGE_SIZE)
_sdata = .;
- RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
- RO_DATA_SECTION(PAGE_SIZE)
+ RW_DATA(32,PAGE_SIZE,_THREAD_SIZE)
+ RO_DATA(PAGE_SIZE)
_edata = .;
EXCEPTION_TABLE(16)
- NOTES
BSS_SECTION(_PAGE_SIZE, _PAGE_SIZE, _PAGE_SIZE)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index bb320c6d0cc9..c49fcef754de 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -289,7 +289,7 @@ static void __init setup_crashkernel(unsigned long total, int *n)
}
if (!check_crashkernel_memory(base, size)) {
- pr_warning("crashkernel: There would be kdump memory "
+ pr_warn("crashkernel: There would be kdump memory "
"at %ld GB but this is unusable because it "
"must\nbe below 4 GB. Change the memory "
"configuration of the machine.\n",
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 1e95d32c8877..91b4024c9351 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -132,7 +132,7 @@ static __u64 vtime_delta(struct task_struct *tsk)
return delta_stime;
}
-void vtime_account_system(struct task_struct *tsk)
+void vtime_account_kernel(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
__u64 stime = vtime_delta(tsk);
@@ -146,7 +146,7 @@ void vtime_account_system(struct task_struct *tsk)
else
ti->stime += stime;
}
-EXPORT_SYMBOL_GPL(vtime_account_system);
+EXPORT_SYMBOL_GPL(vtime_account_kernel);
void vtime_account_idle(struct task_struct *tsk)
{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d9d4e21107cd..1ec6b703c5b4 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -5,6 +5,9 @@
#include <asm/pgtable.h>
#include <asm/thread_info.h>
+#define EMITS_PT_NOTE
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-ia64-little")
@@ -13,7 +16,7 @@ ENTRY(phys_start)
jiffies = jiffies_64;
PHDRS {
- code PT_LOAD;
+ text PT_LOAD;
percpu PT_LOAD;
data PT_LOAD;
note PT_NOTE;
@@ -36,7 +39,7 @@ SECTIONS {
phys_start = _start - LOAD_OFFSET;
code : {
- } :code
+ } :text
. = KERNEL_START;
_text = .;
@@ -68,11 +71,6 @@ SECTIONS {
/*
* Read-only data
*/
- NOTES :code :note /* put .notes in text and mark in PT_NOTE */
- code_continues : {
- } : code /* switch back to regular program... */
-
- EXCEPTION_TABLE(16)
/* MCA table */
. = ALIGN(16);
@@ -102,11 +100,11 @@ SECTIONS {
__start_unwind = .;
*(.IA_64.unwind*)
__end_unwind = .;
- } :code :unwind
+ } :text :unwind
code_continues2 : {
- } : code
+ } :text
- RODATA
+ RO_DATA(4096)
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
@@ -214,7 +212,7 @@ SECTIONS {
_end = .;
code : {
- } :code
+ } :text
STABS_DEBUG
DWARF_DEBUG
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 73bf5ea9ee1b..7ec3161e8517 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -869,8 +869,28 @@ static const struct resource atari_scsi_tt_rsrc[] __initconst = {
};
#endif
+/*
+ * Falcon IDE interface
+ */
+
+#define FALCON_IDE_BASE 0xfff00000
+
+static const struct resource atari_falconide_rsrc[] __initconst = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = FALCON_IDE_BASE,
+ .end = FALCON_IDE_BASE + 0x39,
+ },
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_MFP_FSCSI,
+ .end = IRQ_MFP_FSCSI,
+ },
+};
+
int __init atari_platform_init(void)
{
+ struct platform_device *pdev;
int rv = 0;
if (!MACH_IS_ATARI)
@@ -912,6 +932,13 @@ int __init atari_platform_init(void)
atari_scsi_tt_rsrc, ARRAY_SIZE(atari_scsi_tt_rsrc));
#endif
+ if (ATARIHW_PRESENT(IDE)) {
+ pdev = platform_device_register_simple("atari-falcon-ide", -1,
+ atari_falconide_rsrc, ARRAY_SIZE(atari_falconide_rsrc));
+ if (IS_ERR(pdev))
+ rv = PTR_ERR(pdev);
+ }
+
return rv;
}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 9a33c1c006a1..619d30d663a2 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -355,6 +355,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -420,11 +421,15 @@ CONFIG_INPUT_M68K_BEEP=m
# CONFIG_LEGACY_PTYS is not set
CONFIG_PRINTER=m
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_ICY=m
CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
-# CONFIG_HWMON is not set
+CONFIG_HWMON=m
+CONFIG_SENSORS_LTC2990=m
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -432,8 +437,6 @@ CONFIG_FB_AMIGA_OCS=y
CONFIG_FB_AMIGA_ECS=y
CONFIG_FB_AMIGA_AGA=y
CONFIG_FB_FM2=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
@@ -490,6 +493,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -560,10 +564,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 7fdbc797a05d..caa0558abcdb 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -334,6 +334,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -393,8 +394,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -450,6 +449,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -520,10 +520,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index f1763405a539..2551c7e9ac54 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -350,6 +350,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -417,8 +418,6 @@ CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_ATARI=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
@@ -472,6 +471,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -542,10 +542,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 91154d6acb31..4ffc1e5646d5 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -332,6 +332,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -390,8 +391,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -443,6 +442,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -513,10 +513,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index c398c4a94d95..806da3d97ca4 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -333,6 +333,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -395,8 +396,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -452,6 +451,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -522,10 +522,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 350d004559be..250da20e291c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -342,6 +342,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -419,8 +420,6 @@ CONFIG_PTP_1588_CLOCK=m
CONFIG_FB=y
CONFIG_FB_VALKYRIE=y
CONFIG_FB_MAC=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID=m
@@ -474,6 +473,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -544,10 +544,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b838dd820348..b764a0368a56 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -386,6 +386,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -480,11 +481,15 @@ CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_SERIAL_PMACZILOG_CONSOLE=y
CONFIG_PRINTER=m
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_ICY=m
CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
-# CONFIG_HWMON is not set
+CONFIG_HWMON=m
+CONFIG_SENSORS_LTC2990=m
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -495,8 +500,6 @@ CONFIG_FB_FM2=y
CONFIG_FB_ATARI=y
CONFIG_FB_VALKYRIE=y
CONFIG_FB_MAC=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
@@ -556,6 +559,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -626,10 +630,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 3f8dd61559cf..7800d3a8d46e 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -331,6 +331,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -389,8 +390,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -442,6 +441,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -512,10 +512,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index ae3b2d4f636c..c32dc2d2058d 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -332,6 +332,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -390,8 +391,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -443,6 +442,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -513,10 +513,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index cd61ef14b582..bf0a65ce57e0 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -339,6 +339,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -404,8 +405,6 @@ CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
@@ -461,6 +460,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -531,10 +531,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 151f5371cd3d..5f3cfa2926d2 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -329,6 +329,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -390,8 +391,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID=m
@@ -445,6 +444,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -515,10 +515,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 1dcb0ee1fe98..58354d2018d5 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -329,6 +329,7 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
+CONFIG_DM_CLONE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
@@ -389,8 +390,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_HID=m
@@ -444,6 +443,7 @@ CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@@ -514,10 +514,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index cf6edda38971..7b975420c3d9 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -60,8 +60,8 @@ SECTIONS {
#endif
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(16, PAGE_SIZE, THREAD_SIZE)
_edata = .;
EXCEPTION_TABLE(16)
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 625a5785804f..4d33da4e7106 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,9 +31,9 @@ SECTIONS
_sdata = .; /* Start of data section */
- RODATA
+ RO_DATA(4096)
- RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(16, PAGE_SIZE, THREAD_SIZE)
BSS_SECTION(0, 0, 0)
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 9868270b0984..87d9f4d08f65 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -24,13 +24,13 @@ SECTIONS
*(.fixup)
*(.gnu.warning)
} :text = 0x4e75
- RODATA
+ RO_DATA(4096)
_etext = .; /* End of text section */
EXCEPTION_TABLE(16) :data
_sdata = .; /* Start of rw data section */
- RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) :data
+ RW_DATA(16, PAGE_SIZE, THREAD_SIZE) :data
/* End of data goes *here* so that freeing init code works properly. */
_edata = .;
NOTES
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index e63eb5f06999..f31890078197 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -264,6 +264,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
{
int tmp = Q40_RTC_CTRL;
+ pll->pll_ctrl = 0;
pll->pll_value = tmp & Q40_RTC_PLL_MASK;
if (tmp & Q40_RTC_PLL_SIGN)
pll->pll_value = -pll->pll_value;
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index e1f3e8741292..760cac41cbfe 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -11,6 +11,8 @@
OUTPUT_ARCH(microblaze)
ENTRY(microblaze_start)
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
@@ -51,9 +53,7 @@ SECTIONS {
}
. = ALIGN(16);
- RODATA
- EXCEPTION_TABLE(16)
- NOTES
+ RO_DATA(4096)
/*
* sdata2 section can go anywhere, but must be word aligned
@@ -70,7 +70,7 @@ SECTIONS {
}
_sdata = . ;
- RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(32, PAGE_SIZE, THREAD_SIZE)
_edata = . ;
/* Under the microblaze ABI, .sdata and .sbss must be contiguous */
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 0de839882106..a69b272a3ab0 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -17,6 +17,7 @@ platforms += jazz
platforms += jz4740
platforms += lantiq
platforms += lasat
+platforms += loongson2ef
platforms += loongson32
platforms += loongson64
platforms += mti-malta
@@ -30,6 +31,7 @@ platforms += ralink
platforms += rb532
platforms += sgi-ip22
platforms += sgi-ip27
+platforms += sgi-ip30
platforms += sgi-ip32
platforms += sibyte
platforms += sni
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a0bd9bdb5f83..c86be02b6d89 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -7,6 +7,7 @@ config MIPS
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_SUPPORTS_UPROBES
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
@@ -86,6 +87,8 @@ config MIPS
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
+ select ARCH_HAS_KCOV
+ select HAVE_GCC_PLUGINS
menu "Machine selection"
@@ -359,6 +362,8 @@ config MACH_DECSTATION
config MACH_JAZZ
bool "Jazz family of machines"
+ select ARC_MEMORY
+ select ARC_PROMLIB
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select FW_ARC
@@ -441,7 +446,7 @@ config LASAT
select SYS_SUPPORTS_LITTLE_ENDIAN
config MACH_LOONGSON32
- bool "Loongson-1 family of machines"
+ bool "Loongson 32-bit family of machines"
select SYS_SUPPORTS_ZBOOT
help
This enables support for the Loongson-1 family of machines.
@@ -450,18 +455,48 @@ config MACH_LOONGSON32
the Institute of Computing Technology (ICT), Chinese Academy of
Sciences (CAS).
+config MACH_LOONGSON2EF
+ bool "Loongson-2E/F family of machines"
+ select SYS_SUPPORTS_ZBOOT
+ help
+ This enables the support of early Loongson-2E/F family of machines.
+
config MACH_LOONGSON64
- bool "Loongson-2/3 family of machines"
+ bool "Loongson 64-bit family of machines"
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select BOOT_ELF32
+ select BOARD_SCACHE
+ select CSRC_R4K
+ select CEVT_R4K
+ select CPU_HAS_WB
+ select FORCE_PCI
+ select ISA
+ select I8259
+ select IRQ_MIPS_CPU
+ select NR_CPUS_DEFAULT_4
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select SYS_HAS_CPU_LOONGSON64
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select SYS_SUPPORTS_NUMA
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_ZBOOT
+ select LOONGSON_MC146818
+ select ZONE_DMA32
+ select NUMA
help
This enables the support of Loongson-2/3 family of machines.
- Loongson-2 is a family of single-core CPUs and Loongson-3 is a
- family of multi-core CPUs. They are both 64-bit general-purpose
- MIPS-compatible CPUs. Loongson-2/3 are developed by the Institute
- of Computing Technology (ICT), Chinese Academy of Sciences (CAS)
- in the People's Republic of China. The chief architect is Professor
- Weiwu Hu.
+ Loongson-2 and Loongson-3 are 64-bit general-purpose processors with
+ GS264/GS464/GS464E/GS464V microarchitecture (except old Loongson-2E
+ and Loongson-2F which will be removed), developed by the Institute
+ of Computing Technology (ICT), Chinese Academy of Sciences (CAS).
config MACH_PISTACHIO
bool "IMG Pistachio SoC based boards"
@@ -631,6 +666,8 @@ config RALINK
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
+ select ARC_MEMORY
+ select ARC_PROMLIB
select FW_ARC
select FW_ARC32
select ARCH_MIGHT_HAVE_PC_SERIO
@@ -654,14 +691,7 @@ config SGI_IP22
select SWAP_IO_SPACE
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
- #
- # Disable EARLY_PRINTK for now since it leads to overwritten prom
- # memory during early boot on some machines.
- #
- # See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com
- # for a more details discussion
- #
- # select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -674,8 +704,10 @@ config SGI_IP22
config SGI_IP27
bool "SGI IP27 (Origin200/2000)"
select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_SPARSEMEM_ENABLE
select FW_ARC
select FW_ARC64
+ select ARC_CMDLINE_ONLY
select BOOT_ELF64
select DEFAULT_SGI_PARTITION
select SYS_HAS_EARLY_PRINTK
@@ -698,6 +730,8 @@ config SGI_IP27
config SGI_IP28
bool "SGI IP28 (Indigo2 R10k)"
+ select ARC_MEMORY
+ select ARC_PROMLIB
select FW_ARC
select FW_ARC64
select ARCH_MIGHT_HAVE_PC_SERIO
@@ -719,14 +753,7 @@ config SGI_IP28
select SGI_HAS_ZILOG
select SWAP_IO_SPACE
select SYS_HAS_CPU_R10000
- #
- # Disable EARLY_PRINTK for now since it leads to overwritten prom
- # memory during early boot on some machines.
- #
- # See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com
- # for a more details discussion
- #
- # select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select MIPS_L1_CACHE_SHIFT_7
@@ -734,8 +761,37 @@ config SGI_IP28
This is the SGI Indigo2 with R10000 processor. To compile a Linux
kernel that runs on these, say Y here.
+config SGI_IP30
+ bool "SGI IP30 (Octane/Octane2)"
+ select ARCH_HAS_PHYS_TO_DMA
+ select FW_ARC
+ select FW_ARC64
+ select BOOT_ELF64
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYNC_R4K if SMP
+ select ZONE_DMA32
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select IRQ_DOMAIN_HIERARCHY
+ select NR_CPUS_DEFAULT_2
+ select PCI_DRIVERS_GENERIC
+ select PCI_XTALK_BRIDGE
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_CPU_R10000
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_SMP
+ select MIPS_L1_CACHE_SHIFT_7
+ select ARC_MEMORY
+ help
+ These are the SGI Octane and Octane2 graphics workstations. To
+ compile a Linux kernel that runs on these, say Y here.
+
config SGI_IP32
bool "SGI IP32 (O2)"
+ select ARC_MEMORY
+ select ARC_PROMLIB
select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC32
@@ -843,6 +899,8 @@ config SIBYTE_BIGSUR
config SNI_RM
bool "SNI RM200/300/400"
+ select ARC_MEMORY
+ select ARC_PROMLIB
select FW_ARC if CPU_LITTLE_ENDIAN
select FW_ARC32 if CPU_LITTLE_ENDIAN
select FW_SNIPROM if CPU_BIG_ENDIAN
@@ -1038,6 +1096,7 @@ source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
source "arch/mips/vr41xx/Kconfig"
source "arch/mips/cavium-octeon/Kconfig"
+source "arch/mips/loongson2ef/Kconfig"
source "arch/mips/loongson32/Kconfig"
source "arch/mips/loongson64/Kconfig"
source "arch/mips/netlogic/Kconfig"
@@ -1353,19 +1412,18 @@ config MIPS_L1_CACHE_SHIFT
config HAVE_STD_PC_SERIAL_PORT
bool
+config ARC_CMDLINE_ONLY
+ bool
+
config ARC_CONSOLE
bool "ARC console support"
depends on SGI_IP22 || SGI_IP28 || (SNI_RM && CPU_LITTLE_ENDIAN)
config ARC_MEMORY
bool
- depends on MACH_JAZZ || SNI_RM || SGI_IP32
- default y
config ARC_PROMLIB
bool
- depends on MACH_JAZZ || SNI_RM || SGI_IP22 || SGI_IP28 || SGI_IP32
- default y
config FW_ARC64
bool
@@ -1379,51 +1437,56 @@ choice
prompt "CPU type"
default CPU_R4X00
-config CPU_LOONGSON3
- bool "Loongson 3 CPU"
- depends on SYS_HAS_CPU_LOONGSON3
+config CPU_LOONGSON64
+ bool "Loongson 64-bit CPU"
+ depends on SYS_HAS_CPU_LOONGSON64
select ARCH_HAS_PHYS_TO_DMA
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
select CPU_HAS_LOAD_STORE_LR
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
+ select MIPS_ASID_BITS_VARIABLE
select MIPS_PGD_C0_CONTEXT
select MIPS_L1_CACHE_SHIFT_6
select GPIOLIB
select SWIOTLB
help
- The Loongson 3 processor implements the MIPS64R2 instruction
- set with many extensions.
+ The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
+ cores implements the MIPS64R2 instruction set with many extensions,
+ including most 64-bit Loongson-2 (2H, 2K) and Loongson-3 (3A1000,
+ 3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
+ Loongson-2E/2F is not covered here and will be removed in future.
config LOONGSON3_ENHANCEMENT
- bool "New Loongson 3 CPU Enhancements"
+ bool "New Loongson-3 CPU Enhancements"
default n
select CPU_MIPSR2
select CPU_HAS_PREFETCH
- depends on CPU_LOONGSON3
+ depends on CPU_LOONGSON64
help
- New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A
+ New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A
R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as
- FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User
+ FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPr2 ASE, User
Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer),
Fast TLB refill support, etc.
This option enable those enhancements which are not probed at run
time. If you want a generic kernel to run on all Loongson 3 machines,
please say 'N' here. If you want a high-performance kernel to run on
- new Loongson 3 machines only, please say 'Y' here.
+ new Loongson-3 machines only, please say 'Y' here.
config CPU_LOONGSON3_WORKAROUNDS
- bool "Old Loongson 3 LLSC Workarounds"
+ bool "Old Loongson-3 LLSC Workarounds"
default y if SMP
- depends on CPU_LOONGSON3
+ depends on CPU_LOONGSON64
help
- Loongson 3 processors have the llsc issues which require workarounds.
+ Loongson-3 processors have the llsc issues which require workarounds.
Without workarounds the system may hang unexpectedly.
- Newer Loongson 3 will fix these issues and no workarounds are needed.
+ Newer Loongson-3 will fix these issues and no workarounds are needed.
The workarounds have no significant side effect on them but may
decrease the performance of the system so this option should be
disabled unless the kernel is intended to be run on old systems.
@@ -1433,7 +1496,7 @@ config CPU_LOONGSON3_WORKAROUNDS
config CPU_LOONGSON2E
bool "Loongson 2E"
depends on SYS_HAS_CPU_LOONGSON2E
- select CPU_LOONGSON2
+ select CPU_LOONGSON2EF
help
The Loongson 2E processor implements the MIPS III instruction set
with many extensions.
@@ -1444,7 +1507,7 @@ config CPU_LOONGSON2E
config CPU_LOONGSON2F
bool "Loongson 2F"
depends on SYS_HAS_CPU_LOONGSON2F
- select CPU_LOONGSON2
+ select CPU_LOONGSON2EF
select GPIOLIB
help
The Loongson 2F processor implements the MIPS III instruction set
@@ -1457,7 +1520,7 @@ config CPU_LOONGSON2F
config CPU_LOONGSON1B
bool "Loongson 1B"
depends on SYS_HAS_CPU_LOONGSON1B
- select CPU_LOONGSON1
+ select CPU_LOONGSON32
select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
@@ -1467,7 +1530,7 @@ config CPU_LOONGSON1B
config CPU_LOONGSON1C
bool "Loongson 1C"
depends on SYS_HAS_CPU_LOONGSON1C
- select CPU_LOONGSON1
+ select CPU_LOONGSON32
select LEDS_GPIO_REGISTER
help
The Loongson 1C is a 32-bit SoC, which implements the MIPS32
@@ -1857,7 +1920,7 @@ config SYS_SUPPORTS_ZBOOT_UART_PROM
bool
select SYS_SUPPORTS_ZBOOT
-config CPU_LOONGSON2
+config CPU_LOONGSON2EF
bool
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
@@ -1866,7 +1929,7 @@ config CPU_LOONGSON2
select ARCH_HAS_PHYS_TO_DMA
select CPU_HAS_LOAD_STORE_LR
-config CPU_LOONGSON1
+config CPU_LOONGSON32
bool
select CPU_MIPS32
select CPU_MIPSR2
@@ -1900,7 +1963,7 @@ config CPU_BMIPS5000
select SYS_SUPPORTS_HOTPLUG_CPU
select CPU_HAS_RIXI
-config SYS_HAS_CPU_LOONGSON3
+config SYS_HAS_CPU_LOONGSON64
bool
select CPU_SUPPORTS_CPUFREQ
select CPU_HAS_RIXI
@@ -1912,7 +1975,6 @@ config SYS_HAS_CPU_LOONGSON2F
bool
select CPU_SUPPORTS_CPUFREQ
select CPU_SUPPORTS_ADDRWINCFG if 64BIT
- select CPU_SUPPORTS_UNCACHED_ACCELERATED
config SYS_HAS_CPU_LOONGSON1B
bool
@@ -2089,8 +2151,6 @@ config CPU_SUPPORTS_ADDRWINCFG
config CPU_SUPPORTS_HUGEPAGES
bool
depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA))
-config CPU_SUPPORTS_UNCACHED_ACCELERATED
- bool
config MIPS_PGD_C0_CONTEXT
bool
default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
@@ -2162,7 +2222,7 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
- depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
+ depends on !CPU_LOONGSON2EF && !CPU_LOONGSON64
help
This option select the standard 4kB Linux page size. On some
R3000-family processors this is the only available page size. Using
@@ -2554,6 +2614,10 @@ config CPU_R4000_WORKAROUNDS
config CPU_R4400_WORKAROUNDS
bool
+config CPU_R4X00_BUGS64
+ bool
+ default y if SYS_HAS_CPU_R4X00 && 64BIT && (TARGET_ISA_REV < 1)
+
config MIPS_ASID_SHIFT
int
default 6 if CPU_R3000 || CPU_TX39XX
@@ -2612,20 +2676,11 @@ config CPU_SUPPORTS_MSA
config ARCH_FLATMEM_ENABLE
def_bool y
- depends on !NUMA && !CPU_LOONGSON2
-
-config ARCH_DISCONTIGMEM_ENABLE
- bool
- default y if SGI_IP27
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
+ depends on !NUMA && !CPU_LOONGSON2EF
config ARCH_SPARSEMEM_ENABLE
bool
- select SPARSEMEM_STATIC
+ select SPARSEMEM_STATIC if !SGI_IP27
config NUMA
bool "NUMA Support"
@@ -2702,7 +2757,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
+ depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON64)
default y
help
Enable hardware performance counter support for perf events. If
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 0c86b2a2adfc..93a2974d2ab7 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -32,7 +32,6 @@ config USE_GENERIC_EARLY_PRINTK_8250
config CMDLINE_BOOL
bool "Built-in kernel command line"
- default n
help
For most systems, it is firmware or second stage bootloader that
by default specifies the kernel command line options. However,
@@ -53,7 +52,6 @@ config CMDLINE_BOOL
config CMDLINE
string "Default kernel command string"
depends on CMDLINE_BOOL
- default ""
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, and for the cases
@@ -68,7 +66,6 @@ config CMDLINE
config CMDLINE_OVERRIDE
bool "Built-in command line overrides firmware arguments"
- default n
depends on CMDLINE_BOOL
help
By setting this option to 'Y' you will have your kernel ignore
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index cdc09b71febe..e1c44aed8156 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -14,6 +14,9 @@
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/mips/tools elf-entry
+ifeq ($(CONFIG_CPU_LOONGSON3_WORKAROUNDS),y)
+ $(Q)$(MAKE) $(build)=arch/mips/tools loongson3-llsc-check
+endif
$(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
KBUILD_DEFCONFIG := 32r2el_defconfig
@@ -323,7 +326,7 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
# See arch/mips/Kbuild for content of core part of the kernel
core-y += arch/mips/
-drivers-$(CONFIG_MIPS_CRC_SUPPORT) += arch/mips/crypto/
+drivers-y += arch/mips/crypto/
drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
# suspend and hibernation support
diff --git a/arch/mips/Makefile.postlink b/arch/mips/Makefile.postlink
index 4eea4188cb20..f03fdc95143e 100644
--- a/arch/mips/Makefile.postlink
+++ b/arch/mips/Makefile.postlink
@@ -3,7 +3,8 @@
# Post-link MIPS pass
# ===========================================================================
#
-# 1. Insert relocations into vmlinux
+# 1. Check that Loongson3 LL/SC workarounds are applied correctly
+# 2. Insert relocations into vmlinux
PHONY := __archpost
__archpost:
@@ -11,6 +12,10 @@ __archpost:
-include include/config/auto.conf
include scripts/Kbuild.include
+CMD_LS3_LLSC = arch/mips/tools/loongson3-llsc-check
+quiet_cmd_ls3_llsc = LLSCCHK $@
+ cmd_ls3_llsc = $(CMD_LS3_LLSC) $@
+
CMD_RELOCS = arch/mips/boot/tools/relocs
quiet_cmd_relocs = RELOCS $@
cmd_relocs = $(CMD_RELOCS) $@
@@ -19,6 +24,9 @@ quiet_cmd_relocs = RELOCS $@
vmlinux: FORCE
@true
+ifeq ($(CONFIG_CPU_LOONGSON3_WORKAROUNDS),y)
+ $(call if_changed,ls3_llsc)
+endif
ifeq ($(CONFIG_RELOCATABLE),y)
$(call if_changed,relocs)
endif
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 2e9952311ecd..37b93166bf22 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -25,12 +25,47 @@
0x30000000 0x30000000>;
};
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ label = "ci20:red:led0";
+ gpios = <&gpc 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ };
+
+ led1 {
+ label = "ci20:red:led1";
+ gpios = <&gpc 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "nand-disk";
+ };
+
+ led2 {
+ label = "ci20:red:led2";
+ gpios = <&gpc 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu1";
+ };
+
+ led3 {
+ label = "ci20:red:led3";
+ gpios = <&gpc 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu0";
+ };
+ };
+
eth0_power: fixedregulator@0 {
compatible = "regulator-fixed";
regulator-name = "eth0_power";
gpio = <&gpb 25 GPIO_ACTIVE_LOW>;
enable-active-high;
};
+
+ wlan0_power: fixedregulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan0_power";
+ gpio = <&gpb 19 GPIO_ACTIVE_LOW>;
+ enable-active-high;
+ };
};
&ext {
@@ -54,9 +89,18 @@
bus-width = <4>;
max-frequency = <50000000>;
+ non-removable;
pinctrl-names = "default";
pinctrl-0 = <&pins_mmc1>;
+
+ brcmf: wifi@1 {
+/* reg = <4>;*/
+ compatible = "brcm,bcm4330-fmac";
+ vcc-supply = <&wlan0_power>;
+ device-wakeup-gpios = <&gpd 9 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpf 7 GPIO_ACTIVE_LOW>;
+ };
};
&uart0 {
@@ -73,6 +117,23 @@
pinctrl-0 = <&pins_uart1>;
};
+&uart2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart2>;
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ reset-gpios = <&gpf 8 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&wlan0_power>;
+ device-wakeup-gpios = <&gpf 5 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpf 6 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpf 4 GPIO_ACTIVE_LOW>;
+ };
+};
+
&uart3 {
status = "okay";
@@ -87,6 +148,123 @@
pinctrl-0 = <&pins_uart4>;
};
+&i2c0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c0>;
+
+ clock-frequency = <400000>;
+
+ act8600: act8600@5a {
+ compatible = "active-semi,act8600";
+ reg = <0x5a>;
+ status = "okay";
+
+ regulators {
+ vddcore: SUDCDC1 {
+ regulator-name = "VDDCORE";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+ vddmem: SUDCDC2 {
+ regulator-name = "VDDMEM";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+ vcc_33: SUDCDC3 {
+ regulator-name = "VCC33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ vcc_50: SUDCDC4 {
+ regulator-name = "VCC50";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+ vcc_25: LDO_REG5 {
+ regulator-name = "VCC25";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+ wifi_io: LDO_REG6 {
+ regulator-name = "WIFIIO";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+ vcc_28: LDO_REG7 {
+ regulator-name = "VCC28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+ vcc_15: LDO_REG8 {
+ regulator-name = "VCC15";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+ vcc_18: LDO_REG9 {
+ regulator-name = "VCC18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ vcc_11: LDO_REG10 {
+ regulator-name = "VCC11";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c1>;
+
+};
+
+&i2c2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c2>;
+
+};
+
+&i2c3 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c3>;
+
+};
+
+&i2c4 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c4>;
+
+ clock-frequency = <400000>;
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ interrupts = <110>;
+ };
+};
+
&nemc {
status = "okay";
@@ -197,6 +375,12 @@
bias-disable;
};
+ pins_uart2: uart2 {
+ function = "uart2";
+ groups = "uart2-data", "uart2-hwflow";
+ bias-disable;
+ };
+
pins_uart3: uart3 {
function = "uart3";
groups = "uart3-data", "uart3-hwflow";
@@ -209,6 +393,36 @@
bias-disable;
};
+ pins_i2c0: i2c0 {
+ function = "i2c0";
+ groups = "i2c0-data";
+ bias-disable;
+ };
+
+ pins_i2c1: i2c1 {
+ function = "i2c1";
+ groups = "i2c1-data";
+ bias-disable;
+ };
+
+ pins_i2c2: i2c2 {
+ function = "i2c2";
+ groups = "i2c2-data";
+ bias-disable;
+ };
+
+ pins_i2c3: i2c3 {
+ function = "i2c3";
+ groups = "i2c3-data";
+ bias-disable;
+ };
+
+ pins_i2c4: i2c4 {
+ function = "i2c4";
+ groups = "i2c4-data-e";
+ bias-disable;
+ };
+
pins_nemc: nemc {
function = "nemc";
groups = "nemc-data", "nemc-cle-ale", "nemc-rd-we", "nemc-frd-fwe";
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index c54bd7cfec55..f928329b034b 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -262,6 +262,92 @@
status = "disabled";
};
+ i2c0: i2c@10050000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0x10050000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <60>;
+
+ clocks = <&cgu JZ4780_CLK_SMB0>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c0_data>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c@10051000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10051000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <59>;
+
+ clocks = <&cgu JZ4780_CLK_SMB1>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c1_data>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c@10052000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10052000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <58>;
+
+ clocks = <&cgu JZ4780_CLK_SMB2>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c2_data>;
+
+ status = "disabled";
+ };
+
+ i2c3: i2c@10053000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10053000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <57>;
+
+ clocks = <&cgu JZ4780_CLK_SMB3>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c3_data>;
+
+ status = "disabled";
+ };
+
+ i2c4: i2c@10054000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10054000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <56>;
+
+ clocks = <&cgu JZ4780_CLK_SMB4>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c4_data>;
+
+ status = "disabled";
+ };
+
watchdog: watchdog@10002000 {
compatible = "ingenic,jz4780-watchdog";
reg = <0x10002000 0x10>;
diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
new file mode 100644
index 000000000000..aa5caaa31104
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Stefan Roese <sr@denx.de>
+ */
+
+/dts-v1/;
+
+/include/ "mt7628a.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ compatible = "gardena,smart-gateway-mt7688", "ralink,mt7688a-soc",
+ "ralink,mt7628a-soc";
+ model = "GARDENA smart Gateway (MT7688)";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x8000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_gpio_gpio>; /* GPIO11 */
+
+ user_btn1 {
+ label = "USER_BTN1";
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ linux,code =<KEY_PROG1> ;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_pwm0_gpio>, /* GPIO18 */
+ <&pinmux_pwm1_gpio>, /* GPIO19 */
+ <&pinmux_sdmode_gpio>, /* GPIO22..29 */
+ <&pinmux_p0led_an_gpio>; /* GPIO43 */
+ /*
+ * <&pinmux_i2s_gpio> (covers GPIO0..3) is needed here as
+ * well for GPIO3. But this is already claimed for uart1
+ * (see below). So we can't include it in this LED node.
+ */
+
+ power_blue {
+ label = "smartgw:power:blue";
+ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ power_green {
+ label = "smartgw:power:green";
+ gpios = <&gpio 19 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ power_red {
+ label = "smartgw:power:red";
+ gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ radio_blue {
+ label = "smartgw:radio:blue";
+ gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ radio_green {
+ label = "smartgw:radio:green";
+ gpios = <&gpio 24 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ radio_red {
+ label = "smartgw:radio:red";
+ gpios = <&gpio 25 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ internet_blue {
+ label = "smartgw:internet:blue";
+ gpios = <&gpio 26 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ internet_green {
+ label = "smartgw:internet:green";
+ gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ internet_red {
+ label = "smartgw:internet:red";
+ gpios = <&gpio 28 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ ethernet_link {
+ label = "smartgw:eth:link";
+ gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "netdev";
+ };
+
+ ethernet_activity {
+ label = "smartgw:eth:act";
+ gpios = <&gpio 43 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "netdev";
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+};
+
+&i2c {
+ status = "okay";
+};
+
+&spi {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_spi_spi>, <&pinmux_spi_cs1_cs>;
+
+ m25p80@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0xa0000>;
+ read-only;
+ };
+
+ partition@a0000 {
+ label = "uboot_env0";
+ reg = <0xa0000 0x10000>;
+ };
+
+ partition@b0000 {
+ label = "uboot_env1";
+ reg = <0xb0000 0x10000>;
+ };
+
+ factory: partition@c0000 {
+ label = "factory";
+ reg = <0xc0000 0x10000>;
+ read-only;
+ };
+ };
+ };
+
+ nand_flash@1 {
+ compatible = "spi-nand";
+ linux,mtd-name = "gd5f";
+ reg = <1>;
+ spi-max-frequency = <40000000>;
+ };
+};
+
+&uart1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_i2s_gpio>; /* GPIO0..3 */
+
+ rts-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ cts-gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+};
+
+&uart2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_p2led_an_gpio>, /* GPIO41 */
+ <&pinmux_p3led_an_gpio>; /* GPIO40 */
+
+ rts-gpios = <&gpio 40 GPIO_ACTIVE_LOW>;
+ cts-gpios = <&gpio 41 GPIO_ACTIVE_LOW>;
+};
+
+&watchdog {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi
index 61f8621e88b3..742bcc1dc2e0 100644
--- a/arch/mips/boot/dts/ralink/mt7628a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi
@@ -199,6 +199,22 @@
status = "disabled";
};
+ i2c: i2c@900 {
+ compatible = "mediatek,mt7621-i2c";
+ reg = <0x900 0x100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_i2c_i2c>;
+
+ resets = <&resetc 16>;
+ reset-names = "i2c";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
uart0: uartlite@c00 {
compatible = "ns16550a";
reg = <0xc00 0x100>;
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 95034bf5ca83..1f742c32a883 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -844,7 +844,7 @@ void __init prom_init(void)
* BIST should always be enabled when doing a soft reset. L2
* Cache locking for instance is not cleared unless BIST is
* enabled. Unfortunately due to a chip errata G-200 for
- * Cn38XX and CN31XX, BIST msut be disabled on these parts.
+ * Cn38XX and CN31XX, BIST must be disabled on these parts.
*/
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
OCTEON_IS_MODEL(OCTEON_CN31XX))
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 7a7af706e898..1788ae23bff9 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -15,7 +15,7 @@ CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_MACH_LOONGSON64=y
+CONFIG_MACH_LOONGSON2EF=y
CONFIG_PCI=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index d44f1469cf64..f9f93427c9bd 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -12,7 +12,7 @@ CONFIG_LOG_BUF_SHIFT=15
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_PROFILING=y
-CONFIG_MACH_LOONGSON64=y
+CONFIG_MACH_LOONGSON2EF=y
CONFIG_LEMOTE_MACH2F=y
CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 90ee0084d786..c16a2330e84d 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -12,7 +12,6 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_LOG_BUF_SHIFT=14
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
@@ -24,7 +23,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_MACH_LOONGSON64=y
-CONFIG_LOONGSON_MACH3X=y
CONFIG_SMP=y
CONFIG_HZ_256=y
CONFIG_KEXEC=y
diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile
index e07aca572c2e..8e1deaf00e0c 100644
--- a/arch/mips/crypto/Makefile
+++ b/arch/mips/crypto/Makefile
@@ -4,3 +4,21 @@
#
obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o
+
+obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
+chacha-mips-y := chacha-core.o chacha-glue.o
+AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
+
+obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
+poly1305-mips-y := poly1305-core.o poly1305-glue.o
+
+perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
+perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
+
+$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE
+ $(call if_changed,perlasm)
+
+targets += poly1305-core.S
diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/crypto/chacha-core.S
new file mode 100644
index 000000000000..5755f69cfe00
--- /dev/null
+++ b/arch/mips/crypto/chacha-core.S
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com>. All Rights Reserved.
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#define MASK_U32 0x3c
+#define CHACHA20_BLOCK_SIZE 64
+#define STACK_SIZE 32
+
+#define X0 $t0
+#define X1 $t1
+#define X2 $t2
+#define X3 $t3
+#define X4 $t4
+#define X5 $t5
+#define X6 $t6
+#define X7 $t7
+#define X8 $t8
+#define X9 $t9
+#define X10 $v1
+#define X11 $s6
+#define X12 $s5
+#define X13 $s4
+#define X14 $s3
+#define X15 $s2
+/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */
+#define T0 $s1
+#define T1 $s0
+#define T(n) T ## n
+#define X(n) X ## n
+
+/* Input arguments */
+#define STATE $a0
+#define OUT $a1
+#define IN $a2
+#define BYTES $a3
+
+/* Output argument */
+/* NONCE[0] is kept in a register and not in memory.
+ * We don't want to touch original value in memory.
+ * Must be incremented every loop iteration.
+ */
+#define NONCE_0 $v0
+
+/* SAVED_X and SAVED_CA are set in the jump table.
+ * Use regs which are overwritten on exit else we don't leak clear data.
+ * They are used to handling the last bytes which are not multiple of 4.
+ */
+#define SAVED_X X15
+#define SAVED_CA $s7
+
+#define IS_UNALIGNED $s7
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define MSB 0
+#define LSB 3
+#define ROTx rotl
+#define ROTR(n) rotr n, 24
+#define CPU_TO_LE32(n) \
+ wsbh n; \
+ rotr n, 16;
+#else
+#define MSB 3
+#define LSB 0
+#define ROTx rotr
+#define CPU_TO_LE32(n)
+#define ROTR(n)
+#endif
+
+#define FOR_EACH_WORD(x) \
+ x( 0); \
+ x( 1); \
+ x( 2); \
+ x( 3); \
+ x( 4); \
+ x( 5); \
+ x( 6); \
+ x( 7); \
+ x( 8); \
+ x( 9); \
+ x(10); \
+ x(11); \
+ x(12); \
+ x(13); \
+ x(14); \
+ x(15);
+
+#define FOR_EACH_WORD_REV(x) \
+ x(15); \
+ x(14); \
+ x(13); \
+ x(12); \
+ x(11); \
+ x(10); \
+ x( 9); \
+ x( 8); \
+ x( 7); \
+ x( 6); \
+ x( 5); \
+ x( 4); \
+ x( 3); \
+ x( 2); \
+ x( 1); \
+ x( 0);
+
+#define PLUS_ONE_0 1
+#define PLUS_ONE_1 2
+#define PLUS_ONE_2 3
+#define PLUS_ONE_3 4
+#define PLUS_ONE_4 5
+#define PLUS_ONE_5 6
+#define PLUS_ONE_6 7
+#define PLUS_ONE_7 8
+#define PLUS_ONE_8 9
+#define PLUS_ONE_9 10
+#define PLUS_ONE_10 11
+#define PLUS_ONE_11 12
+#define PLUS_ONE_12 13
+#define PLUS_ONE_13 14
+#define PLUS_ONE_14 15
+#define PLUS_ONE_15 16
+#define PLUS_ONE(x) PLUS_ONE_ ## x
+#define _CONCAT3(a,b,c) a ## b ## c
+#define CONCAT3(a,b,c) _CONCAT3(a,b,c)
+
+#define STORE_UNALIGNED(x) \
+CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
+ .if (x != 12); \
+ lw T0, (x*4)(STATE); \
+ .endif; \
+ lwl T1, (x*4)+MSB ## (IN); \
+ lwr T1, (x*4)+LSB ## (IN); \
+ .if (x == 12); \
+ addu X ## x, NONCE_0; \
+ .else; \
+ addu X ## x, T0; \
+ .endif; \
+ CPU_TO_LE32(X ## x); \
+ xor X ## x, T1; \
+ swl X ## x, (x*4)+MSB ## (OUT); \
+ swr X ## x, (x*4)+LSB ## (OUT);
+
+#define STORE_ALIGNED(x) \
+CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
+ .if (x != 12); \
+ lw T0, (x*4)(STATE); \
+ .endif; \
+ lw T1, (x*4) ## (IN); \
+ .if (x == 12); \
+ addu X ## x, NONCE_0; \
+ .else; \
+ addu X ## x, T0; \
+ .endif; \
+ CPU_TO_LE32(X ## x); \
+ xor X ## x, T1; \
+ sw X ## x, (x*4) ## (OUT);
+
+/* Jump table macro.
+ * Used for setup and handling the last bytes, which are not multiple of 4.
+ * X15 is free to store Xn
+ * Every jumptable entry must be equal in size.
+ */
+#define JMPTBL_ALIGNED(x) \
+.Lchacha_mips_jmptbl_aligned_ ## x: ; \
+ .set noreorder; \
+ b .Lchacha_mips_xor_aligned_ ## x ## _b; \
+ .if (x == 12); \
+ addu SAVED_X, X ## x, NONCE_0; \
+ .else; \
+ addu SAVED_X, X ## x, SAVED_CA; \
+ .endif; \
+ .set reorder
+
+#define JMPTBL_UNALIGNED(x) \
+.Lchacha_mips_jmptbl_unaligned_ ## x: ; \
+ .set noreorder; \
+ b .Lchacha_mips_xor_unaligned_ ## x ## _b; \
+ .if (x == 12); \
+ addu SAVED_X, X ## x, NONCE_0; \
+ .else; \
+ addu SAVED_X, X ## x, SAVED_CA; \
+ .endif; \
+ .set reorder
+
+#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \
+ addu X(A), X(K); \
+ addu X(B), X(L); \
+ addu X(C), X(M); \
+ addu X(D), X(N); \
+ xor X(V), X(A); \
+ xor X(W), X(B); \
+ xor X(Y), X(C); \
+ xor X(Z), X(D); \
+ rotl X(V), S; \
+ rotl X(W), S; \
+ rotl X(Y), S; \
+ rotl X(Z), S;
+
+.text
+.set reorder
+.set noat
+.globl chacha_crypt_arch
+.ent chacha_crypt_arch
+chacha_crypt_arch:
+ .frame $sp, STACK_SIZE, $ra
+
+ /* Load number of rounds */
+ lw $at, 16($sp)
+
+ addiu $sp, -STACK_SIZE
+
+ /* Return bytes = 0. */
+ beqz BYTES, .Lchacha_mips_end
+
+ lw NONCE_0, 48(STATE)
+
+ /* Save s0-s7 */
+ sw $s0, 0($sp)
+ sw $s1, 4($sp)
+ sw $s2, 8($sp)
+ sw $s3, 12($sp)
+ sw $s4, 16($sp)
+ sw $s5, 20($sp)
+ sw $s6, 24($sp)
+ sw $s7, 28($sp)
+
+ /* Test IN or OUT is unaligned.
+ * IS_UNALIGNED = ( IN | OUT ) & 0x00000003
+ */
+ or IS_UNALIGNED, IN, OUT
+ andi IS_UNALIGNED, 0x3
+
+ b .Lchacha_rounds_start
+
+.align 4
+.Loop_chacha_rounds:
+ addiu IN, CHACHA20_BLOCK_SIZE
+ addiu OUT, CHACHA20_BLOCK_SIZE
+ addiu NONCE_0, 1
+
+.Lchacha_rounds_start:
+ lw X0, 0(STATE)
+ lw X1, 4(STATE)
+ lw X2, 8(STATE)
+ lw X3, 12(STATE)
+
+ lw X4, 16(STATE)
+ lw X5, 20(STATE)
+ lw X6, 24(STATE)
+ lw X7, 28(STATE)
+ lw X8, 32(STATE)
+ lw X9, 36(STATE)
+ lw X10, 40(STATE)
+ lw X11, 44(STATE)
+
+ move X12, NONCE_0
+ lw X13, 52(STATE)
+ lw X14, 56(STATE)
+ lw X15, 60(STATE)
+
+.Loop_chacha_xor_rounds:
+ addiu $at, -2
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
+ bnez $at, .Loop_chacha_xor_rounds
+
+ addiu BYTES, -(CHACHA20_BLOCK_SIZE)
+
+ /* Is data src/dst unaligned? Jump */
+ bnez IS_UNALIGNED, .Loop_chacha_unaligned
+
+ /* Set number rounds here to fill delayslot. */
+ lw $at, (STACK_SIZE+16)($sp)
+
+ /* BYTES < 0, it has no full block. */
+ bltz BYTES, .Lchacha_mips_no_full_block_aligned
+
+ FOR_EACH_WORD_REV(STORE_ALIGNED)
+
+ /* BYTES > 0? Loop again. */
+ bgtz BYTES, .Loop_chacha_rounds
+
+ /* Place this here to fill delay slot */
+ addiu NONCE_0, 1
+
+ /* BYTES < 0? Handle last bytes */
+ bltz BYTES, .Lchacha_mips_xor_bytes
+
+.Lchacha_mips_xor_done:
+ /* Restore used registers */
+ lw $s0, 0($sp)
+ lw $s1, 4($sp)
+ lw $s2, 8($sp)
+ lw $s3, 12($sp)
+ lw $s4, 16($sp)
+ lw $s5, 20($sp)
+ lw $s6, 24($sp)
+ lw $s7, 28($sp)
+
+ /* Write NONCE_0 back to right location in state */
+ sw NONCE_0, 48(STATE)
+
+.Lchacha_mips_end:
+ addiu $sp, STACK_SIZE
+ jr $ra
+
+.Lchacha_mips_no_full_block_aligned:
+ /* Restore the offset on BYTES */
+ addiu BYTES, CHACHA20_BLOCK_SIZE
+
+ /* Get number of full WORDS */
+ andi $at, BYTES, MASK_U32
+
+ /* Load upper half of jump table addr */
+ lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0)
+
+ /* Calculate lower half jump table offset */
+ ins T0, $at, 1, 6
+
+ /* Add offset to STATE */
+ addu T1, STATE, $at
+
+ /* Add lower half jump table addr */
+ addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0)
+
+ /* Read value from STATE */
+ lw SAVED_CA, 0(T1)
+
+ /* Store remaining bytecounter as negative value */
+ subu BYTES, $at, BYTES
+
+ jr T0
+
+ /* Jump table */
+ FOR_EACH_WORD(JMPTBL_ALIGNED)
+
+
+.Loop_chacha_unaligned:
+ /* Set number rounds here to fill delayslot. */
+ lw $at, (STACK_SIZE+16)($sp)
+
+ /* BYTES > 0, it has no full block. */
+ bltz BYTES, .Lchacha_mips_no_full_block_unaligned
+
+ FOR_EACH_WORD_REV(STORE_UNALIGNED)
+
+ /* BYTES > 0? Loop again. */
+ bgtz BYTES, .Loop_chacha_rounds
+
+ /* Write NONCE_0 back to right location in state */
+ sw NONCE_0, 48(STATE)
+
+ .set noreorder
+ /* Fall through to byte handling */
+ bgez BYTES, .Lchacha_mips_xor_done
+.Lchacha_mips_xor_unaligned_0_b:
+.Lchacha_mips_xor_aligned_0_b:
+ /* Place this here to fill delay slot */
+ addiu NONCE_0, 1
+ .set reorder
+
+.Lchacha_mips_xor_bytes:
+ addu IN, $at
+ addu OUT, $at
+ /* First byte */
+ lbu T1, 0(IN)
+ addiu $at, BYTES, 1
+ CPU_TO_LE32(SAVED_X)
+ ROTR(SAVED_X)
+ xor T1, SAVED_X
+ sb T1, 0(OUT)
+ beqz $at, .Lchacha_mips_xor_done
+ /* Second byte */
+ lbu T1, 1(IN)
+ addiu $at, BYTES, 2
+ ROTx SAVED_X, 8
+ xor T1, SAVED_X
+ sb T1, 1(OUT)
+ beqz $at, .Lchacha_mips_xor_done
+ /* Third byte */
+ lbu T1, 2(IN)
+ ROTx SAVED_X, 8
+ xor T1, SAVED_X
+ sb T1, 2(OUT)
+ b .Lchacha_mips_xor_done
+
+.Lchacha_mips_no_full_block_unaligned:
+ /* Restore the offset on BYTES */
+ addiu BYTES, CHACHA20_BLOCK_SIZE
+
+ /* Get number of full WORDS */
+ andi $at, BYTES, MASK_U32
+
+ /* Load upper half of jump table addr */
+ lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0)
+
+ /* Calculate lower half jump table offset */
+ ins T0, $at, 1, 6
+
+ /* Add offset to STATE */
+ addu T1, STATE, $at
+
+ /* Add lower half jump table addr */
+ addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0)
+
+ /* Read value from STATE */
+ lw SAVED_CA, 0(T1)
+
+ /* Store remaining bytecounter as negative value */
+ subu BYTES, $at, BYTES
+
+ jr T0
+
+ /* Jump table */
+ FOR_EACH_WORD(JMPTBL_UNALIGNED)
+.end chacha_crypt_arch
+.set at
+
+/* Input arguments
+ * STATE $a0
+ * OUT $a1
+ * NROUND $a2
+ */
+
+#undef X12
+#undef X13
+#undef X14
+#undef X15
+
+#define X12 $a3
+#define X13 $at
+#define X14 $v0
+#define X15 STATE
+
+.set noat
+.globl hchacha_block_arch
+.ent hchacha_block_arch
+hchacha_block_arch:
+ .frame $sp, STACK_SIZE, $ra
+
+ addiu $sp, -STACK_SIZE
+
+ /* Save X11(s6) */
+ sw X11, 0($sp)
+
+ lw X0, 0(STATE)
+ lw X1, 4(STATE)
+ lw X2, 8(STATE)
+ lw X3, 12(STATE)
+ lw X4, 16(STATE)
+ lw X5, 20(STATE)
+ lw X6, 24(STATE)
+ lw X7, 28(STATE)
+ lw X8, 32(STATE)
+ lw X9, 36(STATE)
+ lw X10, 40(STATE)
+ lw X11, 44(STATE)
+ lw X12, 48(STATE)
+ lw X13, 52(STATE)
+ lw X14, 56(STATE)
+ lw X15, 60(STATE)
+
+.Loop_hchacha_xor_rounds:
+ addiu $a2, -2
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
+ bnez $a2, .Loop_hchacha_xor_rounds
+
+ /* Restore used register */
+ lw X11, 0($sp)
+
+ sw X0, 0(OUT)
+ sw X1, 4(OUT)
+ sw X2, 8(OUT)
+ sw X3, 12(OUT)
+ sw X12, 16(OUT)
+ sw X13, 20(OUT)
+ sw X14, 24(OUT)
+ sw X15, 28(OUT)
+
+ addiu $sp, STACK_SIZE
+ jr $ra
+.end hchacha_block_arch
+.set at
diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c
new file mode 100644
index 000000000000..779e399c9bef
--- /dev/null
+++ b/arch/mips/crypto/chacha-glue.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPS accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/byteorder.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+static int chacha_mips_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx, const u8 *iv)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init_generic(state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int chacha_mips(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_mips_stream_xor(req, ctx, req->iv);
+}
+
+static int xchacha_mips(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ chacha_init_generic(state, ctx->key, req->iv);
+
+ hchacha_block(state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ return chacha_mips_stream_xor(req, &subctx, real_iv);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = chacha_mips,
+ .decrypt = chacha_mips,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = xchacha_mips,
+ .decrypt = xchacha_mips,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = xchacha_mips,
+ .decrypt = xchacha_mips,
+ }
+};
+
+static int __init chacha_simd_mod_init(void)
+{
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_simd_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_init(chacha_simd_mod_init);
+module_exit(chacha_simd_mod_fini);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-mips");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-mips");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-mips");
diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..b759b6ccc361
--- /dev/null
+++ b/arch/mips/crypto/poly1305-glue.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+asmlinkage void poly1305_init_mips(void *state, const u8 *key);
+asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_emit_mips(void *state, __le32 *digest, const u32 *nonce);
+
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+{
+ poly1305_init_mips(&dctx->h, key);
+ dctx->s[0] = get_unaligned_le32(key + 16);
+ dctx->s[1] = get_unaligned_le32(key + 20);
+ dctx->s[2] = get_unaligned_le32(key + 24);
+ dctx->s[3] = get_unaligned_le32(key + 28);
+ dctx->buflen = 0;
+}
+EXPORT_SYMBOL(poly1305_init_arch);
+
+static int mips_poly1305_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->buflen = 0;
+ dctx->rset = 0;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
+ u32 len, u32 hibit)
+{
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset) {
+ poly1305_init_mips(&dctx->h, src);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (len >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ if (len < POLY1305_BLOCK_SIZE)
+ return;
+ }
+
+ len &= ~(POLY1305_BLOCK_SIZE - 1);
+
+ poly1305_blocks_mips(&dctx->h, src, len, hibit);
+}
+
+static int mips_poly1305_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ len -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(len >= POLY1305_BLOCK_SIZE)) {
+ mips_poly1305_blocks(dctx, src, len, 1);
+ src += round_down(len, POLY1305_BLOCK_SIZE);
+ len %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(len)) {
+ dctx->buflen = len;
+ memcpy(dctx->buf, src, len);
+ }
+ return 0;
+}
+
+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
+ unsigned int nbytes)
+{
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_blocks_mips(&dctx->h, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
+
+ poly1305_blocks_mips(&dctx->h, src, len, 1);
+ src += len;
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ dctx->buflen = nbytes;
+ memcpy(dctx->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL(poly1305_update_arch);
+
+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
+{
+ __le32 digest[4];
+ u64 f = 0;
+
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit_mips(&dctx->h, digest, dctx->s);
+
+ /* mac = (h + s) % (2^128) */
+ f = (f >> 32) + le32_to_cpu(digest[0]);
+ put_unaligned_le32(f, dst);
+ f = (f >> 32) + le32_to_cpu(digest[1]);
+ put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + le32_to_cpu(digest[2]);
+ put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + le32_to_cpu(digest[3]);
+ put_unaligned_le32(f, dst + 12);
+
+ *dctx = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL(poly1305_final_arch);
+
+static int mips_poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ poly1305_final_arch(dctx, dst);
+ return 0;
+}
+
+static struct shash_alg mips_poly1305_alg = {
+ .init = mips_poly1305_init,
+ .update = mips_poly1305_update,
+ .final = mips_poly1305_final,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+
+ .base.cra_name = "poly1305",
+ .base.cra_driver_name = "poly1305-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = POLY1305_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+};
+
+static int __init mips_poly1305_mod_init(void)
+{
+ return crypto_register_shash(&mips_poly1305_alg);
+}
+
+static void __exit mips_poly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&mips_poly1305_alg);
+}
+
+module_init(mips_poly1305_mod_init);
+module_exit(mips_poly1305_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-mips");
diff --git a/arch/mips/crypto/poly1305-mips.pl b/arch/mips/crypto/poly1305-mips.pl
new file mode 100644
index 000000000000..b05bab884ed2
--- /dev/null
+++ b/arch/mips/crypto/poly1305-mips.pl
@@ -0,0 +1,1273 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
+#
+# ====================================================================
+# Written by Andy Polyakov, @dot-asm, originally for the OpenSSL
+# project.
+# ====================================================================
+
+# Poly1305 hash for MIPS.
+#
+# May 2016
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone.
+#
+# IALU/gcc
+# R1x000 ~5.5/+130% (big-endian)
+# Octeon II 2.50/+70% (little-endian)
+#
+# March 2019
+#
+# Add 32-bit code path.
+#
+# October 2019
+#
+# Modulo-scheduling reduction allows to omit dependency chain at the
+# end of inner loop and improve performance. Also optimize MIPS32R2
+# code path for MIPS 1004K core. Per René von Dorst's suggestions.
+#
+# IALU/gcc
+# R1x000 ~9.8/? (big-endian)
+# Octeon II 3.65/+140% (little-endian)
+# MT7621/1004K 4.75/? (little-endian)
+#
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp [o32 can be
+# excluded from the rule, because it's specified volatile];
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+$flavour = shift || "64"; # supported flavours are o32,n32,64,nubi32,nubi64
+
+$v0 = ($flavour =~ /nubi/i) ? $a0 : $t0;
+
+if ($flavour =~ /64|n32/i) {{{
+######################################################################
+# 64-bit code path
+#
+
+my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3);
+my ($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1);
+
+$code.=<<___;
+#if (defined(_MIPS_ARCH_MIPS64R3) || defined(_MIPS_ARCH_MIPS64R5) || \\
+ defined(_MIPS_ARCH_MIPS64R6)) \\
+ && !defined(_MIPS_ARCH_MIPS64R2)
+# define _MIPS_ARCH_MIPS64R2
+#endif
+
+#if defined(_MIPS_ARCH_MIPS64R6)
+# define dmultu(rs,rt)
+# define mflo(rd,rs,rt) dmulu rd,rs,rt
+# define mfhi(rd,rs,rt) dmuhu rd,rs,rt
+#else
+# define dmultu(rs,rt) dmultu rs,rt
+# define mflo(rd,rs,rt) mflo rd
+# define mfhi(rd,rs,rt) mfhi rd
+#endif
+
+#ifdef __KERNEL__
+# define poly1305_init poly1305_init_mips
+# define poly1305_blocks poly1305_blocks_mips
+# define poly1305_emit poly1305_emit_mips
+#endif
+
+#if defined(__MIPSEB__) && !defined(MIPSEB)
+# define MIPSEB
+#endif
+
+#ifdef MIPSEB
+# define MSB 0
+# define LSB 7
+#else
+# define MSB 7
+# define LSB 0
+#endif
+
+.text
+.set noat
+.set noreorder
+
+.align 5
+.globl poly1305_init
+.ent poly1305_init
+poly1305_init:
+ .frame $sp,0,$ra
+ .set reorder
+
+ sd $zero,0($ctx)
+ sd $zero,8($ctx)
+ sd $zero,16($ctx)
+
+ beqz $inp,.Lno_key
+
+#if defined(_MIPS_ARCH_MIPS64R6)
+ andi $tmp0,$inp,7 # $inp % 8
+ dsubu $inp,$inp,$tmp0 # align $inp
+ sll $tmp0,$tmp0,3 # byte to bit offset
+ ld $in0,0($inp)
+ ld $in1,8($inp)
+ beqz $tmp0,.Laligned_key
+ ld $tmp2,16($inp)
+
+ subu $tmp1,$zero,$tmp0
+# ifdef MIPSEB
+ dsllv $in0,$in0,$tmp0
+ dsrlv $tmp3,$in1,$tmp1
+ dsllv $in1,$in1,$tmp0
+ dsrlv $tmp2,$tmp2,$tmp1
+# else
+ dsrlv $in0,$in0,$tmp0
+ dsllv $tmp3,$in1,$tmp1
+ dsrlv $in1,$in1,$tmp0
+ dsllv $tmp2,$tmp2,$tmp1
+# endif
+ or $in0,$in0,$tmp3
+ or $in1,$in1,$tmp2
+.Laligned_key:
+#else
+ ldl $in0,0+MSB($inp)
+ ldl $in1,8+MSB($inp)
+ ldr $in0,0+LSB($inp)
+ ldr $in1,8+LSB($inp)
+#endif
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS64R2)
+ dsbh $in0,$in0 # byte swap
+ dsbh $in1,$in1
+ dshd $in0,$in0
+ dshd $in1,$in1
+# else
+ ori $tmp0,$zero,0xFF
+ dsll $tmp2,$tmp0,32
+ or $tmp0,$tmp2 # 0x000000FF000000FF
+
+ and $tmp1,$in0,$tmp0 # byte swap
+ and $tmp3,$in1,$tmp0
+ dsrl $tmp2,$in0,24
+ dsrl $tmp4,$in1,24
+ dsll $tmp1,24
+ dsll $tmp3,24
+ and $tmp2,$tmp0
+ and $tmp4,$tmp0
+ dsll $tmp0,8 # 0x0000FF000000FF00
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ and $tmp2,$in0,$tmp0
+ and $tmp4,$in1,$tmp0
+ dsrl $in0,8
+ dsrl $in1,8
+ dsll $tmp2,8
+ dsll $tmp4,8
+ and $in0,$tmp0
+ and $in1,$tmp0
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ or $in0,$tmp1
+ or $in1,$tmp3
+ dsrl $tmp1,$in0,32
+ dsrl $tmp3,$in1,32
+ dsll $in0,32
+ dsll $in1,32
+ or $in0,$tmp1
+ or $in1,$tmp3
+# endif
+#endif
+ li $tmp0,1
+ dsll $tmp0,32 # 0x0000000100000000
+ daddiu $tmp0,-63 # 0x00000000ffffffc1
+ dsll $tmp0,28 # 0x0ffffffc10000000
+ daddiu $tmp0,-1 # 0x0ffffffc0fffffff
+
+ and $in0,$tmp0
+ daddiu $tmp0,-3 # 0x0ffffffc0ffffffc
+ and $in1,$tmp0
+
+ sd $in0,24($ctx)
+ dsrl $tmp0,$in1,2
+ sd $in1,32($ctx)
+ daddu $tmp0,$in1 # s1 = r1 + (r1 >> 2)
+ sd $tmp0,40($ctx)
+
+.Lno_key:
+ li $v0,0 # return 0
+ jr $ra
+.end poly1305_init
+___
+{
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x0003f000" : "0x00030000";
+
+my ($h0,$h1,$h2,$r0,$r1,$rs1,$d0,$d1,$d2) =
+ ($s0,$s1,$s2,$s3,$s4,$s5,$in0,$in1,$t2);
+my ($shr,$shl) = ($s6,$s7); # used on R6
+
+$code.=<<___;
+.align 5
+.globl poly1305_blocks
+.ent poly1305_blocks
+poly1305_blocks:
+ .set noreorder
+ dsrl $len,4 # number of complete blocks
+ bnez $len,poly1305_blocks_internal
+ nop
+ jr $ra
+ nop
+.end poly1305_blocks
+
+.align 5
+.ent poly1305_blocks_internal
+poly1305_blocks_internal:
+ .set noreorder
+#if defined(_MIPS_ARCH_MIPS64R6)
+ .frame $sp,8*8,$ra
+ .mask $SAVED_REGS_MASK|0x000c0000,-8
+ dsubu $sp,8*8
+ sd $s7,56($sp)
+ sd $s6,48($sp)
+#else
+ .frame $sp,6*8,$ra
+ .mask $SAVED_REGS_MASK,-8
+ dsubu $sp,6*8
+#endif
+ sd $s5,40($sp)
+ sd $s4,32($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ sd $s3,24($sp)
+ sd $s2,16($sp)
+ sd $s1,8($sp)
+ sd $s0,0($sp)
+___
+$code.=<<___;
+ .set reorder
+
+#if defined(_MIPS_ARCH_MIPS64R6)
+ andi $shr,$inp,7
+ dsubu $inp,$inp,$shr # align $inp
+ sll $shr,$shr,3 # byte to bit offset
+ subu $shl,$zero,$shr
+#endif
+
+ ld $h0,0($ctx) # load hash value
+ ld $h1,8($ctx)
+ ld $h2,16($ctx)
+
+ ld $r0,24($ctx) # load key
+ ld $r1,32($ctx)
+ ld $rs1,40($ctx)
+
+ dsll $len,4
+ daddu $len,$inp # end of buffer
+ b .Loop
+
+.align 4
+.Loop:
+#if defined(_MIPS_ARCH_MIPS64R6)
+ ld $in0,0($inp) # load input
+ ld $in1,8($inp)
+ beqz $shr,.Laligned_inp
+
+ ld $tmp2,16($inp)
+# ifdef MIPSEB
+ dsllv $in0,$in0,$shr
+ dsrlv $tmp3,$in1,$shl
+ dsllv $in1,$in1,$shr
+ dsrlv $tmp2,$tmp2,$shl
+# else
+ dsrlv $in0,$in0,$shr
+ dsllv $tmp3,$in1,$shl
+ dsrlv $in1,$in1,$shr
+ dsllv $tmp2,$tmp2,$shl
+# endif
+ or $in0,$in0,$tmp3
+ or $in1,$in1,$tmp2
+.Laligned_inp:
+#else
+ ldl $in0,0+MSB($inp) # load input
+ ldl $in1,8+MSB($inp)
+ ldr $in0,0+LSB($inp)
+ ldr $in1,8+LSB($inp)
+#endif
+ daddiu $inp,16
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS64R2)
+ dsbh $in0,$in0 # byte swap
+ dsbh $in1,$in1
+ dshd $in0,$in0
+ dshd $in1,$in1
+# else
+ ori $tmp0,$zero,0xFF
+ dsll $tmp2,$tmp0,32
+ or $tmp0,$tmp2 # 0x000000FF000000FF
+
+ and $tmp1,$in0,$tmp0 # byte swap
+ and $tmp3,$in1,$tmp0
+ dsrl $tmp2,$in0,24
+ dsrl $tmp4,$in1,24
+ dsll $tmp1,24
+ dsll $tmp3,24
+ and $tmp2,$tmp0
+ and $tmp4,$tmp0
+ dsll $tmp0,8 # 0x0000FF000000FF00
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ and $tmp2,$in0,$tmp0
+ and $tmp4,$in1,$tmp0
+ dsrl $in0,8
+ dsrl $in1,8
+ dsll $tmp2,8
+ dsll $tmp4,8
+ and $in0,$tmp0
+ and $in1,$tmp0
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ or $in0,$tmp1
+ or $in1,$tmp3
+ dsrl $tmp1,$in0,32
+ dsrl $tmp3,$in1,32
+ dsll $in0,32
+ dsll $in1,32
+ or $in0,$tmp1
+ or $in1,$tmp3
+# endif
+#endif
+ dsrl $tmp1,$h2,2 # modulo-scheduled reduction
+ andi $h2,$h2,3
+ dsll $tmp0,$tmp1,2
+
+ daddu $d0,$h0,$in0 # accumulate input
+ daddu $tmp1,$tmp0
+ sltu $tmp0,$d0,$h0
+ daddu $d0,$d0,$tmp1 # ... and residue
+ sltu $tmp1,$d0,$tmp1
+ daddu $d1,$h1,$in1
+ daddu $tmp0,$tmp1
+ sltu $tmp1,$d1,$h1
+ daddu $d1,$tmp0
+
+ dmultu ($r0,$d0) # h0*r0
+ daddu $d2,$h2,$padbit
+ sltu $tmp0,$d1,$tmp0
+ mflo ($h0,$r0,$d0)
+ mfhi ($h1,$r0,$d0)
+
+ dmultu ($rs1,$d1) # h1*5*r1
+ daddu $d2,$tmp1
+ daddu $d2,$tmp0
+ mflo ($tmp0,$rs1,$d1)
+ mfhi ($tmp1,$rs1,$d1)
+
+ dmultu ($r1,$d0) # h0*r1
+ mflo ($tmp2,$r1,$d0)
+ mfhi ($h2,$r1,$d0)
+ daddu $h0,$tmp0
+ daddu $h1,$tmp1
+ sltu $tmp0,$h0,$tmp0
+
+ dmultu ($r0,$d1) # h1*r0
+ daddu $h1,$tmp0
+ daddu $h1,$tmp2
+ mflo ($tmp0,$r0,$d1)
+ mfhi ($tmp1,$r0,$d1)
+
+ dmultu ($rs1,$d2) # h2*5*r1
+ sltu $tmp2,$h1,$tmp2
+ daddu $h2,$tmp2
+ mflo ($tmp2,$rs1,$d2)
+
+ dmultu ($r0,$d2) # h2*r0
+ daddu $h1,$tmp0
+ daddu $h2,$tmp1
+ mflo ($tmp3,$r0,$d2)
+ sltu $tmp0,$h1,$tmp0
+ daddu $h2,$tmp0
+
+ daddu $h1,$tmp2
+ sltu $tmp2,$h1,$tmp2
+ daddu $h2,$tmp2
+ daddu $h2,$tmp3
+
+ bne $inp,$len,.Loop
+
+ sd $h0,0($ctx) # store hash value
+ sd $h1,8($ctx)
+ sd $h2,16($ctx)
+
+ .set noreorder
+#if defined(_MIPS_ARCH_MIPS64R6)
+ ld $s7,56($sp)
+ ld $s6,48($sp)
+#endif
+ ld $s5,40($sp) # epilogue
+ ld $s4,32($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi epilogue
+ ld $s3,24($sp)
+ ld $s2,16($sp)
+ ld $s1,8($sp)
+ ld $s0,0($sp)
+___
+$code.=<<___;
+ jr $ra
+#if defined(_MIPS_ARCH_MIPS64R6)
+ daddu $sp,8*8
+#else
+ daddu $sp,6*8
+#endif
+.end poly1305_blocks_internal
+___
+}
+{
+my ($ctx,$mac,$nonce) = ($a0,$a1,$a2);
+
+$code.=<<___;
+.align 5
+.globl poly1305_emit
+.ent poly1305_emit
+poly1305_emit:
+ .frame $sp,0,$ra
+ .set reorder
+
+ ld $tmp2,16($ctx)
+ ld $tmp0,0($ctx)
+ ld $tmp1,8($ctx)
+
+ li $in0,-4 # final reduction
+ dsrl $in1,$tmp2,2
+ and $in0,$tmp2
+ andi $tmp2,$tmp2,3
+ daddu $in0,$in1
+
+ daddu $tmp0,$tmp0,$in0
+ sltu $in1,$tmp0,$in0
+ daddiu $in0,$tmp0,5 # compare to modulus
+ daddu $tmp1,$tmp1,$in1
+ sltiu $tmp3,$in0,5
+ sltu $tmp4,$tmp1,$in1
+ daddu $in1,$tmp1,$tmp3
+ daddu $tmp2,$tmp2,$tmp4
+ sltu $tmp3,$in1,$tmp3
+ daddu $tmp2,$tmp2,$tmp3
+
+ dsrl $tmp2,2 # see if it carried/borrowed
+ dsubu $tmp2,$zero,$tmp2
+
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+ and $in0,$tmp2
+ and $in1,$tmp2
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+
+ lwu $tmp0,0($nonce) # load nonce
+ lwu $tmp1,4($nonce)
+ lwu $tmp2,8($nonce)
+ lwu $tmp3,12($nonce)
+ dsll $tmp1,32
+ dsll $tmp3,32
+ or $tmp0,$tmp1
+ or $tmp2,$tmp3
+
+ daddu $in0,$tmp0 # accumulate nonce
+ daddu $in1,$tmp2
+ sltu $tmp0,$in0,$tmp0
+ daddu $in1,$tmp0
+
+ dsrl $tmp0,$in0,8 # write mac value
+ dsrl $tmp1,$in0,16
+ dsrl $tmp2,$in0,24
+ sb $in0,0($mac)
+ dsrl $tmp3,$in0,32
+ sb $tmp0,1($mac)
+ dsrl $tmp0,$in0,40
+ sb $tmp1,2($mac)
+ dsrl $tmp1,$in0,48
+ sb $tmp2,3($mac)
+ dsrl $tmp2,$in0,56
+ sb $tmp3,4($mac)
+ dsrl $tmp3,$in1,8
+ sb $tmp0,5($mac)
+ dsrl $tmp0,$in1,16
+ sb $tmp1,6($mac)
+ dsrl $tmp1,$in1,24
+ sb $tmp2,7($mac)
+
+ sb $in1,8($mac)
+ dsrl $tmp2,$in1,32
+ sb $tmp3,9($mac)
+ dsrl $tmp3,$in1,40
+ sb $tmp0,10($mac)
+ dsrl $tmp0,$in1,48
+ sb $tmp1,11($mac)
+ dsrl $tmp1,$in1,56
+ sb $tmp2,12($mac)
+ sb $tmp3,13($mac)
+ sb $tmp0,14($mac)
+ sb $tmp1,15($mac)
+
+ jr $ra
+.end poly1305_emit
+.rdata
+.asciiz "Poly1305 for MIPS64, CRYPTOGAMS by \@dot-asm"
+.align 2
+___
+}
+}}} else {{{
+######################################################################
+# 32-bit code path
+#
+
+my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3);
+my ($in0,$in1,$in2,$in3,$tmp0,$tmp1,$tmp2,$tmp3) =
+ ($a4,$a5,$a6,$a7,$at,$t0,$t1,$t2);
+
+$code.=<<___;
+#if (defined(_MIPS_ARCH_MIPS32R3) || defined(_MIPS_ARCH_MIPS32R5) || \\
+ defined(_MIPS_ARCH_MIPS32R6)) \\
+ && !defined(_MIPS_ARCH_MIPS32R2)
+# define _MIPS_ARCH_MIPS32R2
+#endif
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+# define multu(rs,rt)
+# define mflo(rd,rs,rt) mulu rd,rs,rt
+# define mfhi(rd,rs,rt) muhu rd,rs,rt
+#else
+# define multu(rs,rt) multu rs,rt
+# define mflo(rd,rs,rt) mflo rd
+# define mfhi(rd,rs,rt) mfhi rd
+#endif
+
+#ifdef __KERNEL__
+# define poly1305_init poly1305_init_mips
+# define poly1305_blocks poly1305_blocks_mips
+# define poly1305_emit poly1305_emit_mips
+#endif
+
+#if defined(__MIPSEB__) && !defined(MIPSEB)
+# define MIPSEB
+#endif
+
+#ifdef MIPSEB
+# define MSB 0
+# define LSB 3
+#else
+# define MSB 3
+# define LSB 0
+#endif
+
+.text
+.set noat
+.set noreorder
+
+.align 5
+.globl poly1305_init
+.ent poly1305_init
+poly1305_init:
+ .frame $sp,0,$ra
+ .set reorder
+
+ sw $zero,0($ctx)
+ sw $zero,4($ctx)
+ sw $zero,8($ctx)
+ sw $zero,12($ctx)
+ sw $zero,16($ctx)
+
+ beqz $inp,.Lno_key
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+ andi $tmp0,$inp,3 # $inp % 4
+ subu $inp,$inp,$tmp0 # align $inp
+ sll $tmp0,$tmp0,3 # byte to bit offset
+ lw $in0,0($inp)
+ lw $in1,4($inp)
+ lw $in2,8($inp)
+ lw $in3,12($inp)
+ beqz $tmp0,.Laligned_key
+
+ lw $tmp2,16($inp)
+ subu $tmp1,$zero,$tmp0
+# ifdef MIPSEB
+ sllv $in0,$in0,$tmp0
+ srlv $tmp3,$in1,$tmp1
+ sllv $in1,$in1,$tmp0
+ or $in0,$in0,$tmp3
+ srlv $tmp3,$in2,$tmp1
+ sllv $in2,$in2,$tmp0
+ or $in1,$in1,$tmp3
+ srlv $tmp3,$in3,$tmp1
+ sllv $in3,$in3,$tmp0
+ or $in2,$in2,$tmp3
+ srlv $tmp2,$tmp2,$tmp1
+ or $in3,$in3,$tmp2
+# else
+ srlv $in0,$in0,$tmp0
+ sllv $tmp3,$in1,$tmp1
+ srlv $in1,$in1,$tmp0
+ or $in0,$in0,$tmp3
+ sllv $tmp3,$in2,$tmp1
+ srlv $in2,$in2,$tmp0
+ or $in1,$in1,$tmp3
+ sllv $tmp3,$in3,$tmp1
+ srlv $in3,$in3,$tmp0
+ or $in2,$in2,$tmp3
+ sllv $tmp2,$tmp2,$tmp1
+ or $in3,$in3,$tmp2
+# endif
+.Laligned_key:
+#else
+ lwl $in0,0+MSB($inp)
+ lwl $in1,4+MSB($inp)
+ lwl $in2,8+MSB($inp)
+ lwl $in3,12+MSB($inp)
+ lwr $in0,0+LSB($inp)
+ lwr $in1,4+LSB($inp)
+ lwr $in2,8+LSB($inp)
+ lwr $in3,12+LSB($inp)
+#endif
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS32R2)
+ wsbh $in0,$in0 # byte swap
+ wsbh $in1,$in1
+ wsbh $in2,$in2
+ wsbh $in3,$in3
+ rotr $in0,$in0,16
+ rotr $in1,$in1,16
+ rotr $in2,$in2,16
+ rotr $in3,$in3,16
+# else
+ srl $tmp0,$in0,24 # byte swap
+ srl $tmp1,$in0,8
+ andi $tmp2,$in0,0xFF00
+ sll $in0,$in0,24
+ andi $tmp1,0xFF00
+ sll $tmp2,$tmp2,8
+ or $in0,$tmp0
+ srl $tmp0,$in1,24
+ or $tmp1,$tmp2
+ srl $tmp2,$in1,8
+ or $in0,$tmp1
+ andi $tmp1,$in1,0xFF00
+ sll $in1,$in1,24
+ andi $tmp2,0xFF00
+ sll $tmp1,$tmp1,8
+ or $in1,$tmp0
+ srl $tmp0,$in2,24
+ or $tmp2,$tmp1
+ srl $tmp1,$in2,8
+ or $in1,$tmp2
+ andi $tmp2,$in2,0xFF00
+ sll $in2,$in2,24
+ andi $tmp1,0xFF00
+ sll $tmp2,$tmp2,8
+ or $in2,$tmp0
+ srl $tmp0,$in3,24
+ or $tmp1,$tmp2
+ srl $tmp2,$in3,8
+ or $in2,$tmp1
+ andi $tmp1,$in3,0xFF00
+ sll $in3,$in3,24
+ andi $tmp2,0xFF00
+ sll $tmp1,$tmp1,8
+ or $in3,$tmp0
+ or $tmp2,$tmp1
+ or $in3,$tmp2
+# endif
+#endif
+ lui $tmp0,0x0fff
+ ori $tmp0,0xffff # 0x0fffffff
+ and $in0,$in0,$tmp0
+ subu $tmp0,3 # 0x0ffffffc
+ and $in1,$in1,$tmp0
+ and $in2,$in2,$tmp0
+ and $in3,$in3,$tmp0
+
+ sw $in0,20($ctx)
+ sw $in1,24($ctx)
+ sw $in2,28($ctx)
+ sw $in3,32($ctx)
+
+ srl $tmp1,$in1,2
+ srl $tmp2,$in2,2
+ srl $tmp3,$in3,2
+ addu $in1,$in1,$tmp1 # s1 = r1 + (r1 >> 2)
+ addu $in2,$in2,$tmp2
+ addu $in3,$in3,$tmp3
+ sw $in1,36($ctx)
+ sw $in2,40($ctx)
+ sw $in3,44($ctx)
+.Lno_key:
+ li $v0,0
+ jr $ra
+.end poly1305_init
+___
+{
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x00fff000" : "0x00ff0000";
+
+my ($h0,$h1,$h2,$h3,$h4, $r0,$r1,$r2,$r3, $rs1,$rs2,$rs3) =
+ ($s0,$s1,$s2,$s3,$s4, $s5,$s6,$s7,$s8, $s9,$s10,$s11);
+my ($d0,$d1,$d2,$d3) =
+ ($a4,$a5,$a6,$a7);
+my $shr = $t2; # used on R6
+my $one = $t2; # used on R2
+
+$code.=<<___;
+.globl poly1305_blocks
+.align 5
+.ent poly1305_blocks
+poly1305_blocks:
+ .frame $sp,16*4,$ra
+ .mask $SAVED_REGS_MASK,-4
+ .set noreorder
+ subu $sp, $sp,4*12
+ sw $s11,4*11($sp)
+ sw $s10,4*10($sp)
+ sw $s9, 4*9($sp)
+ sw $s8, 4*8($sp)
+ sw $s7, 4*7($sp)
+ sw $s6, 4*6($sp)
+ sw $s5, 4*5($sp)
+ sw $s4, 4*4($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ sw $s3, 4*3($sp)
+ sw $s2, 4*2($sp)
+ sw $s1, 4*1($sp)
+ sw $s0, 4*0($sp)
+___
+$code.=<<___;
+ .set reorder
+
+ srl $len,4 # number of complete blocks
+ li $one,1
+ beqz $len,.Labort
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+ andi $shr,$inp,3
+ subu $inp,$inp,$shr # align $inp
+ sll $shr,$shr,3 # byte to bit offset
+#endif
+
+ lw $h0,0($ctx) # load hash value
+ lw $h1,4($ctx)
+ lw $h2,8($ctx)
+ lw $h3,12($ctx)
+ lw $h4,16($ctx)
+
+ lw $r0,20($ctx) # load key
+ lw $r1,24($ctx)
+ lw $r2,28($ctx)
+ lw $r3,32($ctx)
+ lw $rs1,36($ctx)
+ lw $rs2,40($ctx)
+ lw $rs3,44($ctx)
+
+ sll $len,4
+ addu $len,$len,$inp # end of buffer
+ b .Loop
+
+.align 4
+.Loop:
+#if defined(_MIPS_ARCH_MIPS32R6)
+ lw $d0,0($inp) # load input
+ lw $d1,4($inp)
+ lw $d2,8($inp)
+ lw $d3,12($inp)
+ beqz $shr,.Laligned_inp
+
+ lw $t0,16($inp)
+ subu $t1,$zero,$shr
+# ifdef MIPSEB
+ sllv $d0,$d0,$shr
+ srlv $at,$d1,$t1
+ sllv $d1,$d1,$shr
+ or $d0,$d0,$at
+ srlv $at,$d2,$t1
+ sllv $d2,$d2,$shr
+ or $d1,$d1,$at
+ srlv $at,$d3,$t1
+ sllv $d3,$d3,$shr
+ or $d2,$d2,$at
+ srlv $t0,$t0,$t1
+ or $d3,$d3,$t0
+# else
+ srlv $d0,$d0,$shr
+ sllv $at,$d1,$t1
+ srlv $d1,$d1,$shr
+ or $d0,$d0,$at
+ sllv $at,$d2,$t1
+ srlv $d2,$d2,$shr
+ or $d1,$d1,$at
+ sllv $at,$d3,$t1
+ srlv $d3,$d3,$shr
+ or $d2,$d2,$at
+ sllv $t0,$t0,$t1
+ or $d3,$d3,$t0
+# endif
+.Laligned_inp:
+#else
+ lwl $d0,0+MSB($inp) # load input
+ lwl $d1,4+MSB($inp)
+ lwl $d2,8+MSB($inp)
+ lwl $d3,12+MSB($inp)
+ lwr $d0,0+LSB($inp)
+ lwr $d1,4+LSB($inp)
+ lwr $d2,8+LSB($inp)
+ lwr $d3,12+LSB($inp)
+#endif
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS32R2)
+ wsbh $d0,$d0 # byte swap
+ wsbh $d1,$d1
+ wsbh $d2,$d2
+ wsbh $d3,$d3
+ rotr $d0,$d0,16
+ rotr $d1,$d1,16
+ rotr $d2,$d2,16
+ rotr $d3,$d3,16
+# else
+ srl $at,$d0,24 # byte swap
+ srl $t0,$d0,8
+ andi $t1,$d0,0xFF00
+ sll $d0,$d0,24
+ andi $t0,0xFF00
+ sll $t1,$t1,8
+ or $d0,$at
+ srl $at,$d1,24
+ or $t0,$t1
+ srl $t1,$d1,8
+ or $d0,$t0
+ andi $t0,$d1,0xFF00
+ sll $d1,$d1,24
+ andi $t1,0xFF00
+ sll $t0,$t0,8
+ or $d1,$at
+ srl $at,$d2,24
+ or $t1,$t0
+ srl $t0,$d2,8
+ or $d1,$t1
+ andi $t1,$d2,0xFF00
+ sll $d2,$d2,24
+ andi $t0,0xFF00
+ sll $t1,$t1,8
+ or $d2,$at
+ srl $at,$d3,24
+ or $t0,$t1
+ srl $t1,$d3,8
+ or $d2,$t0
+ andi $t0,$d3,0xFF00
+ sll $d3,$d3,24
+ andi $t1,0xFF00
+ sll $t0,$t0,8
+ or $d3,$at
+ or $t1,$t0
+ or $d3,$t1
+# endif
+#endif
+ srl $t0,$h4,2 # modulo-scheduled reduction
+ andi $h4,$h4,3
+ sll $at,$t0,2
+
+ addu $d0,$d0,$h0 # accumulate input
+ addu $t0,$t0,$at
+ sltu $h0,$d0,$h0
+ addu $d0,$d0,$t0 # ... and residue
+ sltu $at,$d0,$t0
+
+ addu $d1,$d1,$h1
+ addu $h0,$h0,$at # carry
+ sltu $h1,$d1,$h1
+ addu $d1,$d1,$h0
+ sltu $h0,$d1,$h0
+
+ addu $d2,$d2,$h2
+ addu $h1,$h1,$h0 # carry
+ sltu $h2,$d2,$h2
+ addu $d2,$d2,$h1
+ sltu $h1,$d2,$h1
+
+ addu $d3,$d3,$h3
+ addu $h2,$h2,$h1 # carry
+ sltu $h3,$d3,$h3
+ addu $d3,$d3,$h2
+
+#if defined(_MIPS_ARCH_MIPS32R2) && !defined(_MIPS_ARCH_MIPS32R6)
+ multu $r0,$d0 # d0*r0
+ sltu $h2,$d3,$h2
+ maddu $rs3,$d1 # d1*s3
+ addu $h3,$h3,$h2 # carry
+ maddu $rs2,$d2 # d2*s2
+ addu $h4,$h4,$padbit
+ maddu $rs1,$d3 # d3*s1
+ addu $h4,$h4,$h3
+ mfhi $at
+ mflo $h0
+
+ multu $r1,$d0 # d0*r1
+ maddu $r0,$d1 # d1*r0
+ maddu $rs3,$d2 # d2*s3
+ maddu $rs2,$d3 # d3*s2
+ maddu $rs1,$h4 # h4*s1
+ maddu $at,$one # hi*1
+ mfhi $at
+ mflo $h1
+
+ multu $r2,$d0 # d0*r2
+ maddu $r1,$d1 # d1*r1
+ maddu $r0,$d2 # d2*r0
+ maddu $rs3,$d3 # d3*s3
+ maddu $rs2,$h4 # h4*s2
+ maddu $at,$one # hi*1
+ mfhi $at
+ mflo $h2
+
+ mul $t0,$r0,$h4 # h4*r0
+
+ multu $r3,$d0 # d0*r3
+ maddu $r2,$d1 # d1*r2
+ maddu $r1,$d2 # d2*r1
+ maddu $r0,$d3 # d3*r0
+ maddu $rs3,$h4 # h4*s3
+ maddu $at,$one # hi*1
+ mfhi $at
+ mflo $h3
+
+ addiu $inp,$inp,16
+
+ addu $h4,$t0,$at
+#else
+ multu ($r0,$d0) # d0*r0
+ mflo ($h0,$r0,$d0)
+ mfhi ($h1,$r0,$d0)
+
+ sltu $h2,$d3,$h2
+ addu $h3,$h3,$h2 # carry
+
+ multu ($rs3,$d1) # d1*s3
+ mflo ($at,$rs3,$d1)
+ mfhi ($t0,$rs3,$d1)
+
+ addu $h4,$h4,$padbit
+ addiu $inp,$inp,16
+ addu $h4,$h4,$h3
+
+ multu ($rs2,$d2) # d2*s2
+ mflo ($a3,$rs2,$d2)
+ mfhi ($t1,$rs2,$d2)
+ addu $h0,$h0,$at
+ addu $h1,$h1,$t0
+ multu ($rs1,$d3) # d3*s1
+ sltu $at,$h0,$at
+ addu $h1,$h1,$at
+
+ mflo ($at,$rs1,$d3)
+ mfhi ($t0,$rs1,$d3)
+ addu $h0,$h0,$a3
+ addu $h1,$h1,$t1
+ multu ($r1,$d0) # d0*r1
+ sltu $a3,$h0,$a3
+ addu $h1,$h1,$a3
+
+
+ mflo ($a3,$r1,$d0)
+ mfhi ($h2,$r1,$d0)
+ addu $h0,$h0,$at
+ addu $h1,$h1,$t0
+ multu ($r0,$d1) # d1*r0
+ sltu $at,$h0,$at
+ addu $h1,$h1,$at
+
+ mflo ($at,$r0,$d1)
+ mfhi ($t0,$r0,$d1)
+ addu $h1,$h1,$a3
+ sltu $a3,$h1,$a3
+ multu ($rs3,$d2) # d2*s3
+ addu $h2,$h2,$a3
+
+ mflo ($a3,$rs3,$d2)
+ mfhi ($t1,$rs3,$d2)
+ addu $h1,$h1,$at
+ addu $h2,$h2,$t0
+ multu ($rs2,$d3) # d3*s2
+ sltu $at,$h1,$at
+ addu $h2,$h2,$at
+
+ mflo ($at,$rs2,$d3)
+ mfhi ($t0,$rs2,$d3)
+ addu $h1,$h1,$a3
+ addu $h2,$h2,$t1
+ multu ($rs1,$h4) # h4*s1
+ sltu $a3,$h1,$a3
+ addu $h2,$h2,$a3
+
+ mflo ($a3,$rs1,$h4)
+ addu $h1,$h1,$at
+ addu $h2,$h2,$t0
+ multu ($r2,$d0) # d0*r2
+ sltu $at,$h1,$at
+ addu $h2,$h2,$at
+
+
+ mflo ($at,$r2,$d0)
+ mfhi ($h3,$r2,$d0)
+ addu $h1,$h1,$a3
+ sltu $a3,$h1,$a3
+ multu ($r1,$d1) # d1*r1
+ addu $h2,$h2,$a3
+
+ mflo ($a3,$r1,$d1)
+ mfhi ($t1,$r1,$d1)
+ addu $h2,$h2,$at
+ sltu $at,$h2,$at
+ multu ($r0,$d2) # d2*r0
+ addu $h3,$h3,$at
+
+ mflo ($at,$r0,$d2)
+ mfhi ($t0,$r0,$d2)
+ addu $h2,$h2,$a3
+ addu $h3,$h3,$t1
+ multu ($rs3,$d3) # d3*s3
+ sltu $a3,$h2,$a3
+ addu $h3,$h3,$a3
+
+ mflo ($a3,$rs3,$d3)
+ mfhi ($t1,$rs3,$d3)
+ addu $h2,$h2,$at
+ addu $h3,$h3,$t0
+ multu ($rs2,$h4) # h4*s2
+ sltu $at,$h2,$at
+ addu $h3,$h3,$at
+
+ mflo ($at,$rs2,$h4)
+ addu $h2,$h2,$a3
+ addu $h3,$h3,$t1
+ multu ($r3,$d0) # d0*r3
+ sltu $a3,$h2,$a3
+ addu $h3,$h3,$a3
+
+
+ mflo ($a3,$r3,$d0)
+ mfhi ($t1,$r3,$d0)
+ addu $h2,$h2,$at
+ sltu $at,$h2,$at
+ multu ($r2,$d1) # d1*r2
+ addu $h3,$h3,$at
+
+ mflo ($at,$r2,$d1)
+ mfhi ($t0,$r2,$d1)
+ addu $h3,$h3,$a3
+ sltu $a3,$h3,$a3
+ multu ($r0,$d3) # d3*r0
+ addu $t1,$t1,$a3
+
+ mflo ($a3,$r0,$d3)
+ mfhi ($d3,$r0,$d3)
+ addu $h3,$h3,$at
+ addu $t1,$t1,$t0
+ multu ($r1,$d2) # d2*r1
+ sltu $at,$h3,$at
+ addu $t1,$t1,$at
+
+ mflo ($at,$r1,$d2)
+ mfhi ($t0,$r1,$d2)
+ addu $h3,$h3,$a3
+ addu $t1,$t1,$d3
+ multu ($rs3,$h4) # h4*s3
+ sltu $a3,$h3,$a3
+ addu $t1,$t1,$a3
+
+ mflo ($a3,$rs3,$h4)
+ addu $h3,$h3,$at
+ addu $t1,$t1,$t0
+ multu ($r0,$h4) # h4*r0
+ sltu $at,$h3,$at
+ addu $t1,$t1,$at
+
+
+ mflo ($h4,$r0,$h4)
+ addu $h3,$h3,$a3
+ sltu $a3,$h3,$a3
+ addu $t1,$t1,$a3
+ addu $h4,$h4,$t1
+
+ li $padbit,1 # if we loop, padbit is 1
+#endif
+ bne $inp,$len,.Loop
+
+ sw $h0,0($ctx) # store hash value
+ sw $h1,4($ctx)
+ sw $h2,8($ctx)
+ sw $h3,12($ctx)
+ sw $h4,16($ctx)
+
+ .set noreorder
+.Labort:
+ lw $s11,4*11($sp)
+ lw $s10,4*10($sp)
+ lw $s9, 4*9($sp)
+ lw $s8, 4*8($sp)
+ lw $s7, 4*7($sp)
+ lw $s6, 4*6($sp)
+ lw $s5, 4*5($sp)
+ lw $s4, 4*4($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ lw $s3, 4*3($sp)
+ lw $s2, 4*2($sp)
+ lw $s1, 4*1($sp)
+ lw $s0, 4*0($sp)
+___
+$code.=<<___;
+ jr $ra
+ addu $sp,$sp,4*12
+.end poly1305_blocks
+___
+}
+{
+my ($ctx,$mac,$nonce,$tmp4) = ($a0,$a1,$a2,$a3);
+
+$code.=<<___;
+.align 5
+.globl poly1305_emit
+.ent poly1305_emit
+poly1305_emit:
+ .frame $sp,0,$ra
+ .set reorder
+
+ lw $tmp4,16($ctx)
+ lw $tmp0,0($ctx)
+ lw $tmp1,4($ctx)
+ lw $tmp2,8($ctx)
+ lw $tmp3,12($ctx)
+
+ li $in0,-4 # final reduction
+ srl $ctx,$tmp4,2
+ and $in0,$in0,$tmp4
+ andi $tmp4,$tmp4,3
+ addu $ctx,$ctx,$in0
+
+ addu $tmp0,$tmp0,$ctx
+ sltu $ctx,$tmp0,$ctx
+ addiu $in0,$tmp0,5 # compare to modulus
+ addu $tmp1,$tmp1,$ctx
+ sltiu $in1,$in0,5
+ sltu $ctx,$tmp1,$ctx
+ addu $in1,$in1,$tmp1
+ addu $tmp2,$tmp2,$ctx
+ sltu $in2,$in1,$tmp1
+ sltu $ctx,$tmp2,$ctx
+ addu $in2,$in2,$tmp2
+ addu $tmp3,$tmp3,$ctx
+ sltu $in3,$in2,$tmp2
+ sltu $ctx,$tmp3,$ctx
+ addu $in3,$in3,$tmp3
+ addu $tmp4,$tmp4,$ctx
+ sltu $ctx,$in3,$tmp3
+ addu $ctx,$tmp4
+
+ srl $ctx,2 # see if it carried/borrowed
+ subu $ctx,$zero,$ctx
+
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+ xor $in2,$tmp2
+ xor $in3,$tmp3
+ and $in0,$ctx
+ and $in1,$ctx
+ and $in2,$ctx
+ and $in3,$ctx
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+ xor $in2,$tmp2
+ xor $in3,$tmp3
+
+ lw $tmp0,0($nonce) # load nonce
+ lw $tmp1,4($nonce)
+ lw $tmp2,8($nonce)
+ lw $tmp3,12($nonce)
+
+ addu $in0,$tmp0 # accumulate nonce
+ sltu $ctx,$in0,$tmp0
+
+ addu $in1,$tmp1
+ sltu $tmp1,$in1,$tmp1
+ addu $in1,$ctx
+ sltu $ctx,$in1,$ctx
+ addu $ctx,$tmp1
+
+ addu $in2,$tmp2
+ sltu $tmp2,$in2,$tmp2
+ addu $in2,$ctx
+ sltu $ctx,$in2,$ctx
+ addu $ctx,$tmp2
+
+ addu $in3,$tmp3
+ addu $in3,$ctx
+
+ srl $tmp0,$in0,8 # write mac value
+ srl $tmp1,$in0,16
+ srl $tmp2,$in0,24
+ sb $in0, 0($mac)
+ sb $tmp0,1($mac)
+ srl $tmp0,$in1,8
+ sb $tmp1,2($mac)
+ srl $tmp1,$in1,16
+ sb $tmp2,3($mac)
+ srl $tmp2,$in1,24
+ sb $in1, 4($mac)
+ sb $tmp0,5($mac)
+ srl $tmp0,$in2,8
+ sb $tmp1,6($mac)
+ srl $tmp1,$in2,16
+ sb $tmp2,7($mac)
+ srl $tmp2,$in2,24
+ sb $in2, 8($mac)
+ sb $tmp0,9($mac)
+ srl $tmp0,$in3,8
+ sb $tmp1,10($mac)
+ srl $tmp1,$in3,16
+ sb $tmp2,11($mac)
+ srl $tmp2,$in3,24
+ sb $in3, 12($mac)
+ sb $tmp0,13($mac)
+ sb $tmp1,14($mac)
+ sb $tmp2,15($mac)
+
+ jr $ra
+.end poly1305_emit
+.rdata
+.asciiz "Poly1305 for MIPS32, CRYPTOGAMS by \@dot-asm"
+.align 2
+___
+}
+}}}
+
+$output=pop and open STDOUT,">$output";
+print $code;
+close STDOUT;
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
index 31dd7305d643..64d685efcc77 100644
--- a/arch/mips/fw/arc/Makefile
+++ b/arch/mips/fw/arc/Makefile
@@ -3,8 +3,12 @@
# Makefile for the ARC prom monitor library routines under Linux.
#
+ifdef CONFIG_ARC_CMDLINE_ONLY
+lib-y += cmdline.o
+else
lib-y += cmdline.o env.o file.o identify.o init.o \
- misc.o salone.o time.o tree.o
+ misc.o
+endif
lib-$(CONFIG_ARC_MEMORY) += memory.o
lib-$(CONFIG_ARC_CONSOLE) += arc_con.o
diff --git a/arch/mips/fw/arc/cmdline.c b/arch/mips/fw/arc/cmdline.c
index c0122a1dc587..155c5e911723 100644
--- a/arch/mips/fw/arc/cmdline.c
+++ b/arch/mips/fw/arc/cmdline.c
@@ -17,6 +17,12 @@
#undef DEBUG_CMDLINE
+/*
+ * A 32-bit ARC PROM pass arguments and environment as 32-bit pointer.
+ * These macro take care of sign extension.
+ */
+#define prom_argv(index) ((char *) (long)argv[(index)])
+
static char *ignored[] = {
"ConsoleIn=",
"ConsoleOut=",
@@ -32,14 +38,14 @@ static char *used_arc[][2] = {
{ "OSLoadOptions=", "" }
};
-static char * __init move_firmware_args(char* cp)
+static char __init *move_firmware_args(int argc, LONG *argv, char *cp)
{
char *s;
int actr, i;
actr = 1; /* Always ignore argv[0] */
- while (actr < prom_argc) {
+ while (actr < argc) {
for(i = 0; i < ARRAY_SIZE(used_arc); i++) {
int len = strlen(used_arc[i][0]);
@@ -64,7 +70,7 @@ static char * __init move_firmware_args(char* cp)
return cp;
}
-void __init prom_init_cmdline(void)
+void __init prom_init_cmdline(int argc, LONG *argv)
{
char *cp;
int actr, i;
@@ -76,9 +82,9 @@ void __init prom_init_cmdline(void)
* Move ARC variables to the beginning to make sure they can be
* overridden by later arguments.
*/
- cp = move_firmware_args(cp);
+ cp = move_firmware_args(argc, argv, cp);
- while (actr < prom_argc) {
+ while (actr < argc) {
for (i = 0; i < ARRAY_SIZE(ignored); i++) {
int len = strlen(ignored[i]);
diff --git a/arch/mips/fw/arc/env.c b/arch/mips/fw/arc/env.c
index 1118a26b32ee..02407a7bb38e 100644
--- a/arch/mips/fw/arc/env.c
+++ b/arch/mips/fw/arc/env.c
@@ -19,9 +19,3 @@ ArcGetEnvironmentVariable(CHAR *name)
{
return (CHAR *) ARC_CALL1(get_evar, name);
}
-
-LONG __init
-ArcSetEnvironmentVariable(PCHAR name, PCHAR value)
-{
- return ARC_CALL2(set_evar, name, value);
-}
diff --git a/arch/mips/fw/arc/file.c b/arch/mips/fw/arc/file.c
index 49fd3ff13fe5..b0d8535c80cc 100644
--- a/arch/mips/fw/arc/file.c
+++ b/arch/mips/fw/arc/file.c
@@ -13,62 +13,13 @@
#include <asm/sgialib.h>
LONG
-ArcGetDirectoryEntry(ULONG FileID, struct linux_vdirent *Buffer,
- ULONG N, ULONG *Count)
-{
- return ARC_CALL4(get_vdirent, FileID, Buffer, N, Count);
-}
-
-LONG
-ArcOpen(CHAR *Path, enum linux_omode OpenMode, ULONG *FileID)
-{
- return ARC_CALL3(open, Path, OpenMode, FileID);
-}
-
-LONG
-ArcClose(ULONG FileID)
-{
- return ARC_CALL1(close, FileID);
-}
-
-LONG
ArcRead(ULONG FileID, VOID *Buffer, ULONG N, ULONG *Count)
{
return ARC_CALL4(read, FileID, Buffer, N, Count);
}
LONG
-ArcGetReadStatus(ULONG FileID)
-{
- return ARC_CALL1(get_rstatus, FileID);
-}
-
-LONG
ArcWrite(ULONG FileID, PVOID Buffer, ULONG N, PULONG Count)
{
return ARC_CALL4(write, FileID, Buffer, N, Count);
}
-
-LONG
-ArcSeek(ULONG FileID, struct linux_bigint *Position, enum linux_seekmode SeekMode)
-{
- return ARC_CALL3(seek, FileID, Position, SeekMode);
-}
-
-LONG
-ArcMount(char *name, enum linux_mountops op)
-{
- return ARC_CALL2(mount, name, op);
-}
-
-LONG
-ArcGetFileInformation(ULONG FileID, struct linux_finfo *Information)
-{
- return ARC_CALL2(get_finfo, FileID, Information);
-}
-
-LONG ArcSetFileInformation(ULONG FileID, ULONG AttributeFlags,
- ULONG AttributeMask)
-{
- return ARC_CALL3(set_finfo, FileID, AttributeFlags, AttributeMask);
-}
diff --git a/arch/mips/fw/arc/identify.c b/arch/mips/fw/arc/identify.c
index f90266c02c9d..5527e0f54079 100644
--- a/arch/mips/fw/arc/identify.c
+++ b/arch/mips/fw/arc/identify.c
@@ -32,10 +32,6 @@ static struct smatch mach_table[] = {
.liname = "SGI Indy",
.flags = PROM_FLAG_ARCS,
}, {
- .arcname = "SGI-IP27",
- .liname = "SGI Origin",
- .flags = PROM_FLAG_ARCS,
- }, {
.arcname = "SGI-IP28",
.liname = "SGI IP28",
.flags = PROM_FLAG_ARCS,
@@ -87,6 +83,11 @@ const char *get_system_type(void)
return system_type;
}
+static pcomponent * __init ArcGetChild(pcomponent *Current)
+{
+ return (pcomponent *) ARC_CALL1(child_component, Current);
+}
+
void __init prom_identify_arch(void)
{
pcomponent *p;
@@ -98,13 +99,7 @@ void __init prom_identify_arch(void)
*/
p = ArcGetChild(PROM_NULL_COMPONENT);
if (p == NULL) {
-#ifdef CONFIG_SGI_IP27
- /* IP27 PROM misbehaves, seems to not implement ARC
- GetChild(). So we just assume it's an IP27. */
- iname = "SGI-IP27";
-#else
iname = "Unknown";
-#endif
} else
iname = (char *) (long) p->iname;
diff --git a/arch/mips/fw/arc/init.c b/arch/mips/fw/arc/init.c
index 008555969534..f9d1dea9b2ca 100644
--- a/arch/mips/fw/arc/init.c
+++ b/arch/mips/fw/arc/init.c
@@ -18,8 +18,11 @@
/* Master romvec interface. */
struct linux_romvec *romvec;
-int prom_argc;
-LONG *_prom_argv, *_prom_envp;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
+/* stack for calling 32bit ARC prom */
+u64 o32_stk[4096];
+#endif
void __init prom_init(void)
{
@@ -27,10 +30,6 @@ void __init prom_init(void)
romvec = ROMVECTOR;
- prom_argc = fw_arg0;
- _prom_argv = (LONG *) fw_arg1;
- _prom_envp = (LONG *) fw_arg2;
-
if (pb->magic != 0x53435241) {
printk(KERN_CRIT "Aieee, bad prom vector magic %08lx\n",
(unsigned long) pb->magic);
@@ -38,7 +37,7 @@ void __init prom_init(void)
;
}
- prom_init_cmdline();
+ prom_init_cmdline(fw_arg0, (LONG *)fw_arg1);
prom_identify_arch();
printk(KERN_INFO "PROMLIB: ARC firmware Version %d Revision %d\n",
pb->ver, pb->rev);
@@ -49,11 +48,4 @@ void __init prom_init(void)
ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode();
#endif
-#ifdef CONFIG_SGI_IP27
- {
- extern const struct plat_smp_ops ip27_smp_ops;
-
- register_smp_ops(&ip27_smp_ops);
- }
-#endif
}
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index b4328b3b5288..dbbcddc82823 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -158,6 +158,10 @@ void __init prom_meminit(void)
}
}
+void __weak __init prom_cleanup(void)
+{
+}
+
void __init prom_free_prom_memory(void)
{
int i;
@@ -169,4 +173,9 @@ void __init prom_free_prom_memory(void)
free_init_pages("prom memory",
prom_mem_base[i], prom_mem_base[i] + prom_mem_size[i]);
}
+ /*
+ * at this point it isn't safe to call PROM functions
+ * give platforms a way to do PROM cleanups
+ */
+ prom_cleanup();
}
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c
index 19f710117d97..d5b2d5901324 100644
--- a/arch/mips/fw/arc/misc.c
+++ b/arch/mips/fw/arc/misc.c
@@ -21,47 +21,6 @@
#include <asm/bootinfo.h>
VOID __noreturn
-ArcHalt(VOID)
-{
- bc_disable();
- local_irq_disable();
- ARC_CALL0(halt);
-
- unreachable();
-}
-
-VOID __noreturn
-ArcPowerDown(VOID)
-{
- bc_disable();
- local_irq_disable();
- ARC_CALL0(pdown);
-
- unreachable();
-}
-
-/* XXX is this a soft reset basically? XXX */
-VOID __noreturn
-ArcRestart(VOID)
-{
- bc_disable();
- local_irq_disable();
- ARC_CALL0(restart);
-
- unreachable();
-}
-
-VOID __noreturn
-ArcReboot(VOID)
-{
- bc_disable();
- local_irq_disable();
- ARC_CALL0(reboot);
-
- unreachable();
-}
-
-VOID __noreturn
ArcEnterInteractiveMode(VOID)
{
bc_disable();
@@ -71,24 +30,6 @@ ArcEnterInteractiveMode(VOID)
unreachable();
}
-LONG
-ArcSaveConfiguration(VOID)
-{
- return ARC_CALL0(cfg_save);
-}
-
-struct linux_sysid *
-ArcGetSystemId(VOID)
-{
- return (struct linux_sysid *) ARC_CALL0(get_sysid);
-}
-
-VOID __init
-ArcFlushAllCaches(VOID)
-{
- ARC_CALL0(cache_flush);
-}
-
DISPLAY_STATUS * __init ArcGetDisplayStatus(ULONG FileID)
{
return (DISPLAY_STATUS *) ARC_CALL1(GetDisplayStatus, FileID);
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index be381307fbb0..5e9e840a9314 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -11,6 +11,21 @@
#include <asm/bcache.h>
#include <asm/setup.h>
+#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
+/*
+ * For 64bit kernels working with a 32bit ARC PROM pointer arguments
+ * for ARC calls need to reside in CKEG0/1. But as soon as the kernel
+ * switches to it's first kernel thread stack is set to an address in
+ * XKPHYS, so anything on stack can't be used anymore. This is solved
+ * by using a * static declartion variables are put into BSS, which is
+ * linked to a CKSEG0 address. Since this is only used on UP platforms
+ * there is not spinlock needed
+ */
+#define O32_STATIC static
+#else
+#define O32_STATIC
+#endif
+
/*
* IP22 boardcache is not compatible with board caches. Thus we disable it
* during romvec action. Since r4xx0.c is always compiled and linked with your
@@ -23,8 +38,10 @@
void prom_putchar(char c)
{
- ULONG cnt;
- CHAR it = c;
+ O32_STATIC ULONG cnt;
+ O32_STATIC CHAR it;
+
+ it = c;
bc_disable();
ArcWrite(1, &it, 1, &cnt);
@@ -33,8 +50,8 @@ void prom_putchar(char c)
char prom_getchar(void)
{
- ULONG cnt;
- CHAR c;
+ O32_STATIC ULONG cnt;
+ O32_STATIC CHAR c;
bc_disable();
ArcRead(0, &c, 1, &cnt);
diff --git a/arch/mips/fw/arc/salone.c b/arch/mips/fw/arc/salone.c
deleted file mode 100644
index 2d99f44d5576..000000000000
--- a/arch/mips/fw/arc/salone.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Routines to load into memory and execute stand-along program images using
- * ARCS PROM firmware.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- */
-#include <linux/init.h>
-#include <asm/sgialib.h>
-
-LONG __init ArcLoad(CHAR *Path, ULONG TopAddr, ULONG *ExecAddr, ULONG *LowAddr)
-{
- return ARC_CALL4(load, Path, TopAddr, ExecAddr, LowAddr);
-}
-
-LONG __init ArcInvoke(ULONG ExecAddr, ULONG StackAddr, ULONG Argc, CHAR *Argv[],
- CHAR *Envp[])
-{
- return ARC_CALL5(invoke, ExecAddr, StackAddr, Argc, Argv, Envp);
-}
-
-LONG __init ArcExecute(CHAR *Path, LONG Argc, CHAR *Argv[], CHAR *Envp[])
-{
- return ARC_CALL4(exec, Path, Argc, Argv, Envp);
-}
diff --git a/arch/mips/fw/arc/time.c b/arch/mips/fw/arc/time.c
deleted file mode 100644
index 190cdb50b895..000000000000
--- a/arch/mips/fw/arc/time.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Extracting time information from ARCS prom.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- */
-#include <linux/init.h>
-
-#include <asm/fw/arc/types.h>
-#include <asm/sgialib.h>
-
-struct linux_tinfo * __init
-ArcGetTime(VOID)
-{
- return (struct linux_tinfo *) ARC_CALL0(get_tinfo);
-}
-
-ULONG __init
-ArcGetRelativeTime(VOID)
-{
- return ARC_CALL0(get_rtime);
-}
diff --git a/arch/mips/fw/arc/tree.c b/arch/mips/fw/arc/tree.c
deleted file mode 100644
index 924a37dc2569..000000000000
--- a/arch/mips/fw/arc/tree.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * PROM component device tree code.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 1999 Silicon Graphics, Inc.
- */
-#include <linux/init.h>
-#include <asm/fw/arc/types.h>
-#include <asm/sgialib.h>
-
-#undef DEBUG_PROM_TREE
-
-pcomponent * __init
-ArcGetPeer(pcomponent *Current)
-{
- if (Current == PROM_NULL_COMPONENT)
- return PROM_NULL_COMPONENT;
-
- return (pcomponent *) ARC_CALL1(next_component, Current);
-}
-
-pcomponent * __init
-ArcGetChild(pcomponent *Current)
-{
- return (pcomponent *) ARC_CALL1(child_component, Current);
-}
-
-pcomponent * __init
-ArcGetParent(pcomponent *Current)
-{
- if (Current == PROM_NULL_COMPONENT)
- return PROM_NULL_COMPONENT;
-
- return (pcomponent *) ARC_CALL1(parent_component, Current);
-}
-
-LONG __init
-ArcGetConfigurationData(VOID *Buffer, pcomponent *Current)
-{
- return ARC_CALL2(component_data, Buffer, Current);
-}
-
-pcomponent * __init
-ArcAddChild(pcomponent *Current, pcomponent *Template, VOID *ConfigurationData)
-{
- return (pcomponent *)
- ARC_CALL3(child_add, Current, Template, ConfigurationData);
-}
-
-LONG __init
-ArcDeleteComponent(pcomponent *ComponentToDelete)
-{
- return ARC_CALL1(comp_del, ComponentToDelete);
-}
-
-pcomponent * __init
-ArcGetComponent(CHAR *Path)
-{
- return (pcomponent *)ARC_CALL1(component_by_path, Path);
-}
-
-#ifdef DEBUG_PROM_TREE
-
-static char *classes[] = {
- "system", "processor", "cache", "adapter", "controller", "peripheral",
- "memory"
-};
-
-static char *types[] = {
- "arc", "cpu", "fpu", "picache", "pdcache", "sicache", "sdcache",
- "sccache", "memdev", "eisa adapter", "tc adapter", "scsi adapter",
- "dti adapter", "multi-func adapter", "disk controller",
- "tp controller", "cdrom controller", "worm controller",
- "serial controller", "net controller", "display controller",
- "parallel controller", "pointer controller", "keyboard controller",
- "audio controller", "misc controller", "disk peripheral",
- "floppy peripheral", "tp peripheral", "modem peripheral",
- "monitor peripheral", "printer peripheral", "pointer peripheral",
- "keyboard peripheral", "terminal peripheral", "line peripheral",
- "net peripheral", "misc peripheral", "anonymous"
-};
-
-static char *iflags[] = {
- "bogus", "read only", "removable", "console in", "console out",
- "input", "output"
-};
-
-static void __init
-dump_component(pcomponent *p)
-{
- printk("[%p]:class<%s>type<%s>flags<%s>ver<%d>rev<%d>",
- p, classes[p->class], types[p->type],
- iflags[p->iflags], p->vers, p->rev);
- printk("key<%08lx>\n\tamask<%08lx>cdsize<%d>ilen<%d>iname<%s>\n",
- p->key, p->amask, (int)p->cdsize, (int)p->ilen, p->iname);
-}
-
-static void __init
-traverse(pcomponent *p, int op)
-{
- dump_component(p);
- if(ArcGetChild(p))
- traverse(ArcGetChild(p), 1);
- if(ArcGetPeer(p) && op)
- traverse(ArcGetPeer(p), 1);
-}
-
-void __init
-prom_testtree(void)
-{
- pcomponent *p;
-
- p = ArcGetChild(PROM_NULL_COMPONENT);
- dump_component(p);
- p = ArcGetChild(p);
- while(p) {
- dump_component(p);
- p = ArcGetPeer(p);
- }
-}
-
-#endif /* DEBUG_PROM_TREE */
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index d5b8c4717ded..1de215b283d6 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -20,9 +20,9 @@
#include <asm/smp-ops.h>
#include <asm/time.h>
-static __initdata const void *fdt;
-static __initdata const struct mips_machine *mach;
-static __initdata const void *mach_match_data;
+static __initconst const void *fdt;
+static __initconst const struct mips_machine *mach;
+static __initconst const void *mach_match_data;
void __init prom_init(void)
{
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index bb8658cc7f12..e5ac88392d1f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -20,157 +20,176 @@
#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
#include <asm/war.h>
-/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_OPS(pfx, type) \
+static __always_inline type pfx##_read(const pfx##_t *v) \
+{ \
+ return READ_ONCE(v->counter); \
+} \
+ \
+static __always_inline void pfx##_set(pfx##_t *v, type i) \
+{ \
+ WRITE_ONCE(v->counter, i); \
+} \
+ \
+static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+{ \
+ return cmpxchg(&v->counter, o, n); \
+} \
+ \
+static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+{ \
+ return xchg(&v->counter, n); \
+}
-/*
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define ATOMIC_INIT(i) { (i) }
+ATOMIC_OPS(atomic, int)
-/*
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
+#ifdef CONFIG_64BIT
+# define ATOMIC64_INIT(i) { (i) }
+ATOMIC_OPS(atomic64, s64)
+#endif
-#define ATOMIC_OP(op, c_op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %0, %1 # atomic_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
+#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+{ \
+ type temp; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " " #sc " %0, %1 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
}
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
-{ \
- int result; \
- \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
+#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
}
-#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
-{ \
- int result; \
- \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_fetch_" #op " \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- " move %0, %1 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
+#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+{ \
+ int temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ " move %0, %1 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
}
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(add, +=, addu)
-ATOMIC_OPS(sub, -=, subu)
+ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
+ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
+ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
+# define atomic64_add_return_relaxed atomic64_add_return_relaxed
+# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif /* CONFIG_64BIT */
+
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
+ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
+ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
+ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
+ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
+# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -184,258 +203,66 @@ ATOMIC_OPS(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
-{
- int result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc) {
- int temp;
-
- loongson_llsc_mb();
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
- " .set pop \n"
- " subu %0, %1, %3 \n"
- " move %1, %0 \n"
- " bltz %0, 2f \n"
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- " sc %1, %2 \n"
- "\t" __scbeqz " %1, 1b \n"
- "2: \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i) : __LLSC_CLOBBER);
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- if (result >= 0)
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
+#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
+static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ smp_mb__before_atomic(); \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result -= i; \
+ if (result >= 0) \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ smp_mb__after_atomic(); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
+ " .set pop \n" \
+ " " #op " %0, %1, %3 \n" \
+ " move %1, %0 \n" \
+ " bltz %0, 2f \n" \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " #sc " %1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) \
+ : __LLSC_CLOBBER); \
+ \
+ /* \
+ * In the Loongson3 workaround case we already have a \
+ * completion barrier at 2: above, which is needed due to the \
+ * bltz that can branch to code outside of the LL/SC loop. As \
+ * such, we don't need to emit another barrier here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__after_atomic(); \
+ \
+ return result; \
}
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- */
+ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
-
-#define ATOMIC64_INIT(i) { (i) }
-
-/*
- * atomic64_read - read atomic variable
- * @v: pointer of type atomic64_t
- *
- */
-#define atomic64_read(v) READ_ONCE((v)->counter)
-
-/*
- * atomic64_set - set atomic variable
- * @v: pointer of type atomic64_t
- * @i: required value
- */
-#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-
-#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %0, %1 # atomic64_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
-}
-
-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
-}
-
-#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " move %0, %1 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
-}
-
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op) \
- ATOMIC64_FETCH_OP(op, c_op, asm_op)
-
-ATOMIC64_OPS(add, +=, daddu)
-ATOMIC64_OPS(sub, -=, dsubu)
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_FETCH_OP(op, c_op, asm_op)
-
-ATOMIC64_OPS(and, &=, and)
-ATOMIC64_OPS(or, |=, or)
-ATOMIC64_OPS(xor, ^=, xor)
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-/*
- * atomic64_sub_if_positive - conditionally subtract integer from atomic
- * variable
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically test @v and subtract @i if @v is greater or equal than @i.
- * The function returns the old value of @v minus @i.
- */
-static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
-{
- s64 result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc) {
- s64 temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
- " dsubu %0, %1, %3 \n"
- " move %1, %0 \n"
- " bltz %0, 1f \n"
- " scd %1, %2 \n"
- "\t" __scbeqz " %1, 1b \n"
- "1: \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- if (result >= 0)
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
-
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic64_t
- */
+ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#endif
-#endif /* CONFIG_64BIT */
+#undef ATOMIC_SIP_OP
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 9228f7386220..49ff172a72b9 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -9,131 +9,26 @@
#define __ASM_BARRIER_H
#include <asm/addrspace.h>
+#include <asm/sync.h>
-/*
- * Sync types defined by the MIPS architecture (document MD00087 table 6.5)
- * These values are used with the sync instruction to perform memory barriers.
- * Types of ordering guarantees available through the SYNC instruction:
- * - Completion Barriers
- * - Ordering Barriers
- * As compared to the completion barrier, the ordering barrier is a
- * lighter-weight operation as it does not require the specified instructions
- * before the SYNC to be already completed. Instead it only requires that those
- * specified instructions which are subsequent to the SYNC in the instruction
- * stream are never re-ordered for processing ahead of the specified
- * instructions which are before the SYNC in the instruction stream.
- * This potentially reduces how many cycles the barrier instruction must stall
- * before it completes.
- * Implementations that do not use any of the non-zero values of stype to define
- * different barriers, such as ordering barriers, must make those stype values
- * act the same as stype zero.
- */
-
-/*
- * Completion barriers:
- * - Every synchronizable specified memory instruction (loads or stores or both)
- * that occurs in the instruction stream before the SYNC instruction must be
- * already globally performed before any synchronizable specified memory
- * instructions that occur after the SYNC are allowed to be performed, with
- * respect to any other processor or coherent I/O module.
- *
- * - The barrier does not guarantee the order in which instruction fetches are
- * performed.
- *
- * - A stype value of zero will always be defined such that it performs the most
- * complete set of synchronization operations that are defined.This means
- * stype zero always does a completion barrier that affects both loads and
- * stores preceding the SYNC instruction and both loads and stores that are
- * subsequent to the SYNC instruction. Non-zero values of stype may be defined
- * by the architecture or specific implementations to perform synchronization
- * behaviors that are less complete than that of stype zero. If an
- * implementation does not use one of these non-zero values to define a
- * different synchronization behavior, then that non-zero value of stype must
- * act the same as stype zero completion barrier. This allows software written
- * for an implementation with a lighter-weight barrier to work on another
- * implementation which only implements the stype zero completion barrier.
- *
- * - A completion barrier is required, potentially in conjunction with SSNOP (in
- * Release 1 of the Architecture) or EHB (in Release 2 of the Architecture),
- * to guarantee that memory reference results are visible across operating
- * mode changes. For example, a completion barrier is required on some
- * implementations on entry to and exit from Debug Mode to guarantee that
- * memory effects are handled correctly.
- */
-
-/*
- * stype 0 - A completion barrier that affects preceding loads and stores and
- * subsequent loads and stores.
- * Older instructions which must reach the load/store ordering point before the
- * SYNC instruction completes: Loads, Stores
- * Younger instructions which must reach the load/store ordering point only
- * after the SYNC instruction completes: Loads, Stores
- * Older instructions which must be globally performed when the SYNC instruction
- * completes: Loads, Stores
- */
-#define STYPE_SYNC 0x0
-
-/*
- * Ordering barriers:
- * - Every synchronizable specified memory instruction (loads or stores or both)
- * that occurs in the instruction stream before the SYNC instruction must
- * reach a stage in the load/store datapath after which no instruction
- * re-ordering is possible before any synchronizable specified memory
- * instruction which occurs after the SYNC instruction in the instruction
- * stream reaches the same stage in the load/store datapath.
- *
- * - If any memory instruction before the SYNC instruction in program order,
- * generates a memory request to the external memory and any memory
- * instruction after the SYNC instruction in program order also generates a
- * memory request to external memory, the memory request belonging to the
- * older instruction must be globally performed before the time the memory
- * request belonging to the younger instruction is globally performed.
- *
- * - The barrier does not guarantee the order in which instruction fetches are
- * performed.
- */
+static inline void __sync(void)
+{
+ asm volatile(__SYNC(full, always) ::: "memory");
+}
-/*
- * stype 0x10 - An ordering barrier that affects preceding loads and stores and
- * subsequent loads and stores.
- * Older instructions which must reach the load/store ordering point before the
- * SYNC instruction completes: Loads, Stores
- * Younger instructions which must reach the load/store ordering point only
- * after the SYNC instruction completes: Loads, Stores
- * Older instructions which must be globally performed when the SYNC instruction
- * completes: N/A
- */
-#define STYPE_SYNC_MB 0x10
+static inline void rmb(void)
+{
+ asm volatile(__SYNC(rmb, always) ::: "memory");
+}
+#define rmb rmb
-/*
- * stype 0x14 - A completion barrier specific to global invalidations
- *
- * When a sync instruction of this type completes any preceding GINVI or GINVT
- * operation has been globalized & completed on all coherent CPUs. Anything
- * that the GINV* instruction should invalidate will have been invalidated on
- * all coherent CPUs when this instruction completes. It is implementation
- * specific whether the GINV* instructions themselves will ensure completion,
- * or this sync type will.
- *
- * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3)
- * this sync type also requires that previous SYNCI operations have completed.
- */
-#define STYPE_GINV 0x14
+static inline void wmb(void)
+{
+ asm volatile(__SYNC(wmb, always) ::: "memory");
+}
+#define wmb wmb
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- ".set mips2\n\t" \
- "sync\n\t" \
- ".set pop" \
- : /* no output */ \
- : /* no input */ \
- : "memory")
-#else
-#define __sync() do { } while(0)
-#endif
+#define fast_mb() __sync()
#define __fast_iob() \
__asm__ __volatile__( \
@@ -146,17 +41,8 @@
: "m" (*(int *)CKSEG1) \
: "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
-# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
-
-# define fast_wmb() __syncw()
-# define fast_rmb() barrier()
-# define fast_mb() __sync()
# define fast_iob() do { } while (0)
#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
-# define fast_wmb() __sync()
-# define fast_rmb() __sync()
-# define fast_mb() __sync()
# ifdef CONFIG_SGI_IP28
# define fast_iob() \
__asm__ __volatile__( \
@@ -192,23 +78,14 @@
#endif /* !CONFIG_CPU_HAS_WB */
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-
#if defined(CONFIG_WEAK_ORDERING)
-# ifdef CONFIG_CPU_CAVIUM_OCTEON
-# define __smp_mb() __sync()
-# define __smp_rmb() barrier()
-# define __smp_wmb() __syncw()
-# else
-# define __smp_mb() __asm__ __volatile__("sync" : : :"memory")
-# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory")
-# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory")
-# endif
+# define __smp_mb() __sync()
+# define __smp_rmb() rmb()
+# define __smp_wmb() wmb()
#else
-#define __smp_mb() barrier()
-#define __smp_rmb() barrier()
-#define __smp_wmb() barrier()
+# define __smp_mb() barrier()
+# define __smp_rmb() barrier()
+# define __smp_wmb() barrier()
#endif
/*
@@ -218,13 +95,14 @@
* ordering will be done by smp_llsc_mb() and friends.
*/
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB " sync \n"
-#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
-#define __LLSC_CLOBBER
+# define __WEAK_LLSC_MB sync
+# define smp_llsc_mb() \
+ __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
+# define __LLSC_CLOBBER
#else
-#define __WEAK_LLSC_MB " \n"
-#define smp_llsc_mb() do { } while (0)
-#define __LLSC_CLOBBER "memory"
+# define __WEAK_LLSC_MB
+# define smp_llsc_mb() do { } while (0)
+# define __LLSC_CLOBBER "memory"
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
@@ -241,52 +119,22 @@
#define nudge_writes() mb()
#endif
-#define __smp_mb__before_atomic() __smp_mb__before_llsc()
-#define __smp_mb__after_atomic() smp_llsc_mb()
-
/*
- * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
- * store or prefetch) in between an LL & SC can cause the SC instruction to
- * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
- * containing such sequences, this bug bites harder than we might otherwise
- * expect due to reordering & speculation:
- *
- * 1) A memory access appearing prior to the LL in program order may actually
- * be executed after the LL - this is the reordering case.
- *
- * In order to avoid this we need to place a memory barrier (ie. a SYNC
- * instruction) prior to every LL instruction, in between it and any earlier
- * memory access instructions.
- *
- * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
- *
- * 2) If a conditional branch exists between an LL & SC with a target outside
- * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
- * or similar, then misprediction of the branch may allow speculative
- * execution of memory accesses from outside of the LL-SC loop.
- *
- * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
- * at each affected branch target, for which we also use loongson_llsc_mb()
- * defined below.
- *
- * This case affects all current Loongson 3 CPUs.
- *
- * The above described cases cause an error in the cache coherence protocol;
- * such that the Invalidate of a competing LL-SC goes 'missing' and SC
- * erroneously observes its core still has Exclusive state and lets the SC
- * proceed.
- *
- * Therefore the error only occurs on SMP systems.
+ * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
+ * a completion barrier immediately preceding the LL instruction. Therefore we
+ * can skip emitting a barrier from __smp_mb__before_atomic().
*/
-#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
-#define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory")
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
+# define __smp_mb__before_atomic()
#else
-#define loongson_llsc_mb() do { } while (0)
+# define __smp_mb__before_atomic() __smp_mb__before_llsc()
#endif
+#define __smp_mb__after_atomic() smp_llsc_mb()
+
static inline void sync_ginv(void)
{
- asm volatile("sync\t%0" :: "i"(STYPE_GINV));
+ asm volatile(__SYNC(ginv, always));
}
#include <asm-generic/barrier.h>
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 985d6a02f9ea..a74769940fbd 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -13,16 +13,55 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <linux/bits.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/compiler.h>
#include <asm/cpu-features.h>
+#include <asm/isa-rev.h>
#include <asm/llsc.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
+#define __bit_op(mem, insn, inputs...) do { \
+ unsigned long temp; \
+ \
+ asm volatile( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " __LL "%0, %1 \n" \
+ " " insn " \n" \
+ " " __SC "%0, %1 \n" \
+ " " __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
+ : inputs \
+ : __LLSC_CLOBBER); \
+} while (0)
+
+#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
+ unsigned long orig, temp; \
+ \
+ asm volatile( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " __LL ll_dst ", %2 \n" \
+ " " insn " \n" \
+ " " __SC "%1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ " .set pop \n" \
+ : "=&r"(orig), "=&r"(temp), \
+ "+" GCC_OFF_SMALL_ASM()(mem) \
+ : inputs \
+ : __LLSC_CLOBBER); \
+ \
+ orig; \
+})
+
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
@@ -30,8 +69,6 @@
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
-int __mips_test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
@@ -52,51 +89,20 @@ int __mips_test_and_change_bit(unsigned long nr,
*/
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- int bit = nr & SZLONG_MASK;
- unsigned long temp;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # set_bit \n"
- " or %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqzl %0, 1b \n"
- " .set pop \n"
- : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
- : __LLSC_CLOBBER);
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " " __LL "%0, %1 # set_bit \n"
- " " __INS "%0, %3, %2, 1 \n"
- " " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (bit), "r" (~0)
- : __LLSC_CLOBBER);
- } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
- } else if (kernel_uses_llsc) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # set_bit \n"
- " or %0, %2 \n"
- " " __SC "%0, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!temp));
- } else
+ if (!kernel_uses_llsc) {
__mips_set_bit(nr, addr);
+ return;
+ }
+
+ if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
+ __bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
+ return;
+ }
+
+ __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
}
/*
@@ -111,51 +117,20 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- int bit = nr & SZLONG_MASK;
- unsigned long temp;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # clear_bit \n"
- " and %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqzl %0, 1b \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (~(1UL << bit))
- : __LLSC_CLOBBER);
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " " __LL "%0, %1 # clear_bit \n"
- " " __INS "%0, $0, %2, 1 \n"
- " " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
- } else if (kernel_uses_llsc) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # clear_bit \n"
- " and %0, %2 \n"
- " " __SC "%0, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (~(1UL << bit))
- : __LLSC_CLOBBER);
- } while (unlikely(!temp));
- } else
+ if (!kernel_uses_llsc) {
__mips_clear_bit(nr, addr);
+ return;
+ }
+
+ if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
+ __bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
+ return;
+ }
+
+ __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
}
/*
@@ -183,159 +158,61 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # change_bit \n"
- " xor %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqzl %0, 1b \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit)
- : __LLSC_CLOBBER);
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # change_bit \n"
- " xor %0, %2 \n"
- " " __SC "%0, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!temp));
- } else
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+
+ if (!kernel_uses_llsc) {
__mips_change_bit(nr, addr);
+ return;
+ }
+
+ __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
}
/*
- * test_and_set_bit - Set a bit and return its old value
+ * test_and_set_bit_lock - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * This operation is atomic and implies acquire ordering semantics
+ * after the memory operation.
*/
-static inline int test_and_set_bit(unsigned long nr,
+static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
- res = __mips_test_and_set_bit(nr, addr);
+ if (!kernel_uses_llsc) {
+ res = __mips_test_and_set_bit_lock(nr, addr);
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "or\t%1, %0, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
smp_llsc_mb();
- return res != 0;
+ return res;
}
/*
- * test_and_set_bit_lock - Set a bit and return its old value
+ * test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and implies acquire ordering semantics
- * after the memory operation.
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
*/
-static inline int test_and_set_bit_lock(unsigned long nr,
+static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
- res = __mips_test_and_set_bit_lock(nr, addr);
-
- smp_llsc_mb();
-
- return res != 0;
+ smp_mb__before_atomic();
+ return test_and_set_bit_lock(nr, addr);
}
+
/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
@@ -347,71 +224,30 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
-
- smp_mb__before_llsc();
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
+ smp_mb__before_atomic();
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_clear_bit \n"
- " or %2, %0, %3 \n"
- " xor %2, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " " __LL "%0, %1 # test_and_clear_bit \n"
- " " __EXT "%2, %0, %3, 1 \n"
- " " __INS "%0, $0, %3, 1 \n"
- " " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "ir" (bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!temp));
-#endif
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_clear_bit \n"
- " or %2, %0, %3 \n"
- " xor %2, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
+ if (!kernel_uses_llsc) {
res = __mips_test_and_clear_bit(nr, addr);
+ } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
+ res = __test_bit_op(*m, "%1",
+ __EXT "%0, %1, %3, 1;"
+ __INS "%1, $0, %3, 1",
+ "i"(bit));
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "or\t%1, %0, %3;"
+ "xor\t%1, %1, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
smp_llsc_mb();
- return res != 0;
+ return res;
}
/*
@@ -425,54 +261,29 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
+ smp_mb__before_atomic();
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_change_bit \n"
- " xor %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_change_bit \n"
- " xor %2, %0, %3 \n"
- " " __SC "\t%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : __LLSC_CLOBBER);
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
+ if (!kernel_uses_llsc) {
res = __mips_test_and_change_bit(nr, addr);
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "xor\t%1, %0, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
smp_llsc_mb();
- return res != 0;
+ return res;
}
+#undef __bit_op
+#undef __test_bit_op
+
#include <asm-generic/bitops/non-atomic.h>
/*
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 34d62229dea5..d41a5057bc69 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -61,7 +61,7 @@
/*
* Valid machtype for Loongson family
*/
-enum loongson_machine_type {
+enum loongson2ef_machine_type {
MACH_LOONGSON_UNKNOWN,
MACH_LEMOTE_FL2E,
MACH_LEMOTE_FL2F,
@@ -70,7 +70,6 @@ enum loongson_machine_type {
MACH_DEXXON_GDIUM2F10,
MACH_LEMOTE_NAS,
MACH_LEMOTE_LL2F,
- MACH_LOONGSON_GENERIC,
MACH_LOONGSON_END
};
@@ -99,6 +98,7 @@ extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_ad
extern void prom_init(void);
extern void prom_free_prom_memory(void);
+extern void prom_cleanup(void);
extern void free_init_pages(const char *what,
unsigned long begin, unsigned long end);
diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h
index d8ab8b7129b5..d72dc6e1cf3c 100644
--- a/arch/mips/include/asm/bugs.h
+++ b/arch/mips/include/asm/bugs.h
@@ -26,9 +26,8 @@ extern void check_bugs64(void);
static inline void check_bugs_early(void)
{
-#ifdef CONFIG_64BIT
- check_bugs64_early();
-#endif
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64_early();
}
static inline void check_bugs(void)
@@ -37,19 +36,18 @@ static inline void check_bugs(void)
cpu_data[cpu].udelay_val = loops_per_jiffy;
check_bugs32();
-#ifdef CONFIG_64BIT
- check_bugs64();
-#endif
+
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64();
}
static inline int r4k_daddiu_bug(void)
{
-#ifdef CONFIG_64BIT
+ if (!IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ return 0;
+
WARN_ON(daddiu_bug < 0);
return daddiu_bug != 0;
-#else
- return 0;
-#endif
}
#endif /* _ASM_BUGS_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index f6136871561d..5b0b3a6777ea 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -11,20 +11,11 @@
#include <linux/bug.h>
#include <linux/irqflags.h>
#include <asm/compiler.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
#include <asm/war.h>
/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
-/*
* These functions doesn't exist, so if they are called you'll either:
*
* - Get an error at compile-time due to __compiletime_error, if supported by
@@ -46,18 +37,18 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc) { \
- loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set push \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
"1: " ld " %0, %2 # __xchg_asm \n" \
" .set pop \n" \
" move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \
- "\t" __scbeqz " $1, 1b \n" \
+ "\t" __SC_BEQZ "$1, 1b \n" \
" .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
@@ -103,7 +94,13 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
({ \
__typeof__(*(ptr)) __res; \
\
- smp_mb__before_llsc(); \
+ /* \
+ * In the Loongson3 workaround case __xchg_asm() already \
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__before_llsc(); \
\
__res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
@@ -118,25 +115,24 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc) { \
- loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set pop \n" \
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
- "\t" __scbeqz " $1, 1b \n" \
+ "\t" __SC_BEQZ "$1, 1b \n" \
" .set pop \n" \
- "2: \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
: __LLSC_CLOBBER); \
- loongson_llsc_mb(); \
} else { \
unsigned long __flags; \
\
@@ -190,9 +186,23 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
({ \
__typeof__(*(ptr)) __res; \
\
- smp_mb__before_llsc(); \
+ /* \
+ * In the Loongson3 workaround case __cmpxchg_asm() already \
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__before_llsc(); \
+ \
__res = cmpxchg_local((ptr), (old), (new)); \
- smp_llsc_mb(); \
+ \
+ /* \
+ * In the Loongson3 workaround case __cmpxchg_asm() already \
+ * contains a completion barrier after the SC, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_llsc_mb(); \
\
__res; \
})
@@ -233,11 +243,11 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
*/
local_irq_save(flags);
- loongson_llsc_mb();
asm volatile(
" .set push \n"
" .set " MIPS_ISA_ARCH_LEVEL " \n"
/* Load 64 bits from ptr */
+ " " __SYNC(full, loongson3_war) " \n"
"1: lld %L0, %3 # __cmpxchg64 \n"
/*
* Split the 64 bit value we loaded into the 2 registers that hold the
@@ -269,9 +279,9 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
/* Attempt to store new at ptr */
" scd %L1, %2 \n"
/* If we failed, loop! */
- "\t" __scbeqz " %L1, 1b \n"
+ "\t" __SC_BEQZ "%L1, 1b \n"
" .set pop \n"
- "2: \n"
+ "2: " __SYNC(full, loongson3_war) " \n"
: "=&r"(ret),
"=&r"(tmp),
"=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
@@ -279,7 +289,6 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
"r" (old),
"r" (new)
: "memory");
- loongson_llsc_mb();
local_irq_restore(flags);
return ret;
@@ -312,6 +321,4 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
-#undef __scbeqz
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
index 63b3468ede4c..6b7396a6a115 100644
--- a/arch/mips/include/asm/cop2.h
+++ b/arch/mips/include/asm/cop2.h
@@ -33,7 +33,7 @@ extern void nlm_cop2_restore(struct nlm_cop2_state *);
#define cop2_present 1
#define cop2_lazy_restore 0
-#elif defined(CONFIG_CPU_LOONGSON3)
+#elif defined(CONFIG_CPU_LOONGSON64)
#define cop2_present 1
#define cop2_lazy_restore 1
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 7bbb66760a07..c46c59b0f1b4 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -15,18 +15,17 @@
static inline int __pure __get_cpu_type(const int cpu_type)
{
switch (cpu_type) {
-#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
- defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
- case CPU_LOONGSON2:
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF)
+ case CPU_LOONGSON2EF:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
- case CPU_LOONGSON3:
+#ifdef CONFIG_SYS_HAS_CPU_LOONGSON64
+ case CPU_LOONGSON64:
#endif
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON1B) || \
defined(CONFIG_SYS_HAS_CPU_LOONGSON1C)
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 7fddcb8350c6..ea830783d663 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -91,7 +91,9 @@
#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
#define PRID_IMP_R5432 0x5400
#define PRID_IMP_R5500 0x5500
-#define PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
+#define PRID_IMP_LOONGSON_64R 0x6100 /* Reduced Loongson-2 */
+#define PRID_IMP_LOONGSON_64C 0x6300 /* Classic Loongson-2 and Loongson-3 */
+#define PRID_IMP_LOONGSON_64G 0xc000 /* Generic Loongson-2 and Loongson-3 */
#define PRID_IMP_UNKNOWN 0xff00
@@ -310,15 +312,15 @@ enum cpu_type_enum {
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
- CPU_BMIPS4380, CPU_BMIPS5000, CPU_XBURST, CPU_LOONGSON1, CPU_M14KC,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_XBURST, CPU_LOONGSON32, CPU_M14KC,
CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K,
CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250,
/*
* MIPS64 class processors
*/
- CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
- CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+ CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2EF,
+ CPU_LOONGSON64, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, CPU_I6500,
CPU_QEMU_GENERIC,
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 6842ffafd1e7..1784d4348c36 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -70,7 +70,7 @@ enum fixed_addresses {
#include <asm-generic/fixmap.h>
#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+ pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), (vaddr))
/*
* Called from pgtable_init()
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index b83b0397462d..110220705e97 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -16,6 +16,7 @@
#include <asm/barrier.h>
#include <asm/compiler.h>
#include <asm/errno.h>
+#include <asm/sync.h>
#include <asm/war.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
@@ -32,7 +33,7 @@
" .set arch=r4000 \n" \
"2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \
- __WEAK_LLSC_MB \
+ __stringify(__WEAK_LLSC_MB) " \n" \
"3: \n" \
" .insn \n" \
" .set pop \n" \
@@ -50,19 +51,19 @@
"i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
- loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set pop \n" \
" " insn " \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"2: "user_sc("$1", "%2")" \n" \
" beqz $1, 1b \n" \
- __WEAK_LLSC_MB \
+ __stringify(__WEAK_LLSC_MB) " \n" \
"3: \n" \
" .insn \n" \
" .set pop \n" \
@@ -147,7 +148,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .set arch=r4000 \n"
"2: sc $1, %2 \n"
" beqzl $1, 1b \n"
- __WEAK_LLSC_MB
+ __stringify(__WEAK_LLSC_MB) " \n"
"3: \n"
" .insn \n"
" .set pop \n"
@@ -164,13 +165,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
- loongson_llsc_mb();
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " " __SYNC(full, loongson3_war) " \n"
"1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n"
" .set pop \n"
@@ -178,8 +179,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .set "MIPS_ISA_ARCH_LEVEL" \n"
"2: "user_sc("$1", "%2")" \n"
" beqz $1, 1b \n"
- __WEAK_LLSC_MB
- "3: \n"
+ "3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
" .insn \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
@@ -194,7 +194,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory");
- loongson_llsc_mb();
} else
return -ENOSYS;
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 0fa27446869a..a4f48b0f5541 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -158,7 +158,7 @@ do { \
} while (0)
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
- defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
+ defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
/*
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 2b7b56736372..3f6ce74335b4 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -306,7 +306,7 @@ static inline void iounmap(const volatile void __iomem *addr)
#undef __IS_KSEG1
}
-#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
#define war_io_reorder_wmb() wmb()
#else
#define war_io_reorder_wmb() barrier()
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index f0b862a83816..c4728bbdf15b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1)
+#if defined(CONFIG_CPU_LOONGSON64) || defined(CONFIG_CPU_LOONGSON32)
" mfc0 %[flags], $12 \n"
" di \n"
#else
diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h
index c6d17d171147..c49738bc3bda 100644
--- a/arch/mips/include/asm/llsc.h
+++ b/arch/mips/include/asm/llsc.h
@@ -9,20 +9,31 @@
#ifndef __ASM_LLSC_H
#define __ASM_LLSC_H
+#include <asm/isa-rev.h>
+
#if _MIPS_SZLONG == 32
-#define SZLONG_LOG 5
-#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
#define __INS "ins "
#define __EXT "ext "
#elif _MIPS_SZLONG == 64
-#define SZLONG_LOG 6
-#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
#define __INS "dins "
#define __EXT "dext "
#endif
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __SC_BEQZ "beqzl "
+#elif MIPS_ISA_REV >= 6
+# define __SC_BEQZ "beqzc "
+#else
+# define __SC_BEQZ "beqz "
+#endif
+
#endif /* __ASM_LLSC_H */
diff --git a/arch/mips/include/asm/mach-ip22/spaces.h b/arch/mips/include/asm/mach-ip22/spaces.h
index 7f9fa6f66059..24fe92cb5313 100644
--- a/arch/mips/include/asm/mach-ip22/spaces.h
+++ b/arch/mips/include/asm/mach-ip22/spaces.h
@@ -10,17 +10,7 @@
#ifndef _ASM_MACH_IP22_SPACES_H
#define _ASM_MACH_IP22_SPACES_H
-
-#ifdef CONFIG_64BIT
-
-#define PAGE_OFFSET 0xffffffff80000000UL
-
-#define CAC_BASE 0xffffffff80000000
-#define IO_BASE 0xffffffffa0000000
-#define UNCAC_BASE 0xffffffffa0000000
-#define MAP_BASE 0xc000000000000000
-
-#endif /* CONFIG_64BIT */
+#define PHYS_OFFSET _AC(0x08000000, UL)
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 1cd6a23a84f2..f463826515df 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -6,7 +6,7 @@
#include <asm/sn/arch.h>
#include <asm/sn/hub.h>
-#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
+#define pa_to_nid(addr) NASID_GET(addr)
struct hub_data {
kern_vars_t kern_vars;
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 965f0793a5f9..be61ddcdacab 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -7,14 +7,13 @@
#include <asm/mmzone.h>
struct cpuinfo_ip27 {
- cnodeid_t p_nodeid; /* my node ID in compact-id-space */
nasid_t p_nasid; /* my node ID in numa-as-id-space */
unsigned char p_slice; /* Physical position on node board */
};
extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
-#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
+#define cpu_to_node(cpu) (cputonasid(cpu))
#define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \
&hub_data(node)->h_cpus)
@@ -23,7 +22,7 @@ extern int pcibus_to_node(struct pci_bus *);
#define cpumask_of_pcibus(bus) (cpumask_of_node(pcibus_to_node(bus)))
-extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+extern unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
#define node_distance(from, to) (__node_distances[(from)][(to)])
diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
new file mode 100644
index 000000000000..cfa02f3d25df
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IP30/Octane cpu-features overrides.
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ * 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2015 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+#ifndef __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
+
+#include <asm/cpu.h>
+
+/*
+ * IP30 only supports R1[024]000 processors, all using the same config
+ */
+#define cpu_has_tlb 1
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
+#define cpu_has_rw_llb 0
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_6k_cache 0
+#define cpu_has_8k_cache 0
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 1
+#define cpu_has_nofpuex 0
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_64bits 1
+#define cpu_has_divec 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_xpa 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+
+#define cpu_icache_snoops_remote_store 1
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_mips32r6 0
+#define cpu_has_mips64r6 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_hwrena_impl_bits 0
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_fre 0
+#define cpu_has_cdmm 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 64
+#define cpu_scache_line_size() 128
+
+#endif /* __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H */
+
diff --git a/arch/mips/include/asm/mach-ip30/irq.h b/arch/mips/include/asm/mach-ip30/irq.h
new file mode 100644
index 000000000000..e5c3dd965266
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/irq.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HEART IRQ defines
+ *
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2014-2016 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+
+#ifndef __ASM_MACH_IP30_IRQ_H
+#define __ASM_MACH_IP30_IRQ_H
+
+/*
+ * HEART has 64 hardware interrupts, but use 128 to leave room for a few
+ * software interrupts as well (such as the CPU timer interrupt.
+ */
+#define NR_IRQS 128
+
+extern void __init ip30_install_ipi(void);
+
+/*
+ * HEART has 64 interrupt vectors available to it, subdivided into five
+ * priority levels. They are numbered 0 to 63.
+ */
+#define HEART_NUM_IRQS 64
+
+/*
+ * These are the five interrupt priority levels and their corresponding
+ * CPU IPx interrupt pins.
+ *
+ * Level 4 - Error Interrupts.
+ * Level 3 - HEART timer interrupt.
+ * Level 2 - CPU IPI, CPU debug, power putton, general device interrupts.
+ * Level 1 - General device interrupts.
+ * Level 0 - General device GFX flow control interrupts.
+ */
+#define HEART_L4_INT_MASK 0xfff8000000000000ULL /* IP6 */
+#define HEART_L3_INT_MASK 0x0004000000000000ULL /* IP5 */
+#define HEART_L2_INT_MASK 0x0003ffff00000000ULL /* IP4 */
+#define HEART_L1_INT_MASK 0x00000000ffff0000ULL /* IP3 */
+#define HEART_L0_INT_MASK 0x000000000000ffffULL /* IP2 */
+
+/* HEART L0 Interrupts (Low Priority) */
+#define HEART_L0_INT_GENERIC 0
+#define HEART_L0_INT_FLOW_CTRL_HWTR_0 1
+#define HEART_L0_INT_FLOW_CTRL_HWTR_1 2
+
+/* HEART L2 Interrupts (High Priority) */
+#define HEART_L2_INT_RESCHED_CPU_0 46
+#define HEART_L2_INT_RESCHED_CPU_1 47
+#define HEART_L2_INT_CALL_CPU_0 48
+#define HEART_L2_INT_CALL_CPU_1 49
+
+/* HEART L3 Interrupts (Compare/Counter Timer) */
+#define HEART_L3_INT_TIMER 50
+
+/* HEART L4 Interrupts (Errors) */
+#define HEART_L4_INT_XWID_ERR_9 51
+#define HEART_L4_INT_XWID_ERR_A 52
+#define HEART_L4_INT_XWID_ERR_B 53
+#define HEART_L4_INT_XWID_ERR_C 54
+#define HEART_L4_INT_XWID_ERR_D 55
+#define HEART_L4_INT_XWID_ERR_E 56
+#define HEART_L4_INT_XWID_ERR_F 57
+#define HEART_L4_INT_XWID_ERR_XBOW 58
+#define HEART_L4_INT_CPU_BUS_ERR_0 59
+#define HEART_L4_INT_CPU_BUS_ERR_1 60
+#define HEART_L4_INT_CPU_BUS_ERR_2 61
+#define HEART_L4_INT_CPU_BUS_ERR_3 62
+#define HEART_L4_INT_HEART_EXCP 63
+
+/*
+ * Power Switch is wired via BaseIO BRIDGE slot #6.
+ *
+ * ACFail is wired via BaseIO BRIDGE slot #7.
+ */
+#define IP30_POWER_IRQ HEART_L2_INT_POWER_BTN
+
+#include_next <irq.h>
+
+#define IP30_HEART_L0_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define IP30_HEART_L1_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define IP30_HEART_L2_IRQ (MIPS_CPU_IRQ_BASE + 4)
+#define IP30_HEART_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 5)
+#define IP30_HEART_ERR_IRQ (MIPS_CPU_IRQ_BASE + 6)
+
+#endif /* __ASM_MACH_IP30_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ip30/kernel-entry-init.h b/arch/mips/include/asm/mach-ip30/kernel-entry-init.h
new file mode 100644
index 000000000000..be0472c977d8
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/kernel-entry-init.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_MACH_IP30_KERNEL_ENTRY_H
+#define __ASM_MACH_IP30_KERNEL_ENTRY_H
+
+ .macro kernel_entry_setup
+ .endm
+
+ .macro smp_slave_setup
+ move gp, a0
+ .endm
+
+#endif /* __ASM_MACH_IP30_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-ip30/mangle-port.h b/arch/mips/include/asm/mach-ip30/mangle-port.h
new file mode 100644
index 000000000000..f3e1262a2d5e
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/mangle-port.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP30_MANGLE_PORT_H
+#define __ASM_MACH_IP30_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) ((port)^3)
+#define __swizzle_addr_w(port) ((port)^2)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+#define ioswabb(a, x) (x)
+#define __mem_ioswabb(a, x) (x)
+#define ioswabw(a, x) (x)
+#define __mem_ioswabw(a, x) cpu_to_le16(x)
+#define ioswabl(a, x) (x)
+#define __mem_ioswabl(a, x) cpu_to_le32(x)
+#define ioswabq(a, x) (x)
+#define __mem_ioswabq(a, x) cpu_to_le64(x)
+
+#endif /* __ASM_MACH_IP30_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-ip30/spaces.h b/arch/mips/include/asm/mach-ip30/spaces.h
new file mode 100644
index 000000000000..c8a302dfbe05
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/spaces.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+#ifndef _ASM_MACH_IP30_SPACES_H
+#define _ASM_MACH_IP30_SPACES_H
+
+/*
+ * Memory in IP30/Octane is offset 512MB in the physical address space.
+ */
+#define PHYS_OFFSET _AC(0x20000000, UL)
+
+#ifdef CONFIG_64BIT
+#define CAC_BASE _AC(0xA800000000000000, UL)
+#endif
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MACH_IP30_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ip30/war.h b/arch/mips/include/asm/mach-ip30/war.h
new file mode 100644
index 000000000000..a98ba204f183
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/war.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_IP30_WAR_H
+#define __ASM_MIPS_MACH_IP30_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+
+#ifdef CONFIG_CPU_R10000
+#define R10000_LLSC_WAR 1
+#else
+#define R10000_LLSC_WAR 0
+#endif
+
+#endif /* __ASM_MIPS_MACH_IP30_WAR_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
new file mode 100644
index 000000000000..b2ee859ca0b7
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ * Copyright (C) 2009 Philippe Vachon <philippe@cowpig.ca>
+ * Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
+ *
+ * reference: /proc/cpuinfo,
+ * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ * arch/mips/kernel/proc.c(show_cpuinfo),
+ * loongson2f user manual.
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_LOONGSON2EF_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_32fpr 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_4kex 1
+#define cpu_has_64bits 1
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_counter 1
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_divec 0
+#define cpu_has_ejtag 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_has_llsc 1
+#define cpu_has_mcheck 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mips3d 0
+#define cpu_has_mipsmt 0
+#define cpu_has_smartmips 0
+#define cpu_has_tlb 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_vce 0
+#define cpu_has_veic 0
+#define cpu_has_vint 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_watch 1
+
+#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h
index 9795b3361532..9795b3361532 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h
index 52e8bb0fc04d..52e8bb0fc04d 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h
index a0d4b752899e..a0d4b752899e 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h
index 70d0153cccc3..70d0153cccc3 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h
diff --git a/arch/mips/include/asm/mach-loongson2ef/loongson.h b/arch/mips/include/asm/mach-loongson2ef/loongson.h
new file mode 100644
index 000000000000..5008af0a1a19
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/loongson.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_LOONGSON_H
+#define __ASM_MACH_LOONGSON2EF_LOONGSON_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+
+/* loongson internal northbridge initialization */
+extern void bonito_irq_init(void);
+
+/* machine-specific reboot/halt operation */
+extern void mach_prepare_reboot(void);
+extern void mach_prepare_shutdown(void);
+
+/* environment arguments from bootloader */
+extern u32 cpu_clock_freq;
+extern u32 memsize, highmemsize;
+
+/* loongson-specific command line, env and memory initialization */
+extern void __init prom_init_memory(void);
+extern void __init prom_init_machtype(void);
+extern void __init prom_init_env(void);
+#ifdef CONFIG_LOONGSON_UART_BASE
+extern unsigned long _loongson_uart_base, loongson_uart_base;
+extern void prom_init_loongson_uart_base(void);
+#endif
+
+static inline void prom_init_uart_base(void)
+{
+#ifdef CONFIG_LOONGSON_UART_BASE
+ prom_init_loongson_uart_base();
+#endif
+}
+
+/* irq operation functions */
+extern void bonito_irqdispatch(void);
+extern void __init bonito_irq_init(void);
+extern void __init mach_init_irq(void);
+extern void mach_irq_dispatch(unsigned int pending);
+extern int mach_i8259_irq(void);
+
+/* We need this in some places... */
+#define delay() ({ \
+ int x; \
+ for (x = 0; x < 100000; x++) \
+ __asm__ __volatile__(""); \
+})
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_IRQ_BASE 32
+#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
+
+#include <linux/interrupt.h>
+static inline void do_perfcnt_IRQ(void)
+{
+#if IS_ENABLED(CONFIG_OPROFILE)
+ do_IRQ(LOONGSON2_PERFCNT_IRQ);
+#endif
+}
+
+#define LOONGSON_FLASH_BASE 0x1c000000
+#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
+#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
+
+#define LOONGSON_LIO0_BASE 0x1e000000
+#define LOONGSON_LIO0_SIZE 0x01C00000 /* 28M */
+#define LOONGSON_LIO0_TOP (LOONGSON_LIO0_BASE+LOONGSON_LIO0_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1fc00000
+#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
+
+#define LOONGSON_PCILO0_BASE 0x10000000
+#define LOONGSON_PCILO1_BASE 0x14000000
+#define LOONGSON_PCILO2_BASE 0x18000000
+#define LOONGSON_PCILO_BASE LOONGSON_PCILO0_BASE
+#define LOONGSON_PCILO_SIZE 0x0c000000 /* 64M * 3 */
+#define LOONGSON_PCILO_TOP (LOONGSON_PCILO0_BASE+LOONGSON_PCILO_SIZE-1)
+
+#define LOONGSON_PCICFG_BASE 0x1fe80000
+#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
+#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+#define LOONGSON_PCIIO_BASE 0x1fd00000
+
+#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
+
+/* Loongson Register Bases */
+
+#define LOONGSON_PCICONFIGBASE 0x00
+#define LOONGSON_REGBASE 0x100
+
+/* PCI Configuration Registers */
+
+#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
+#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
+#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
+#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
+
+#define LOONGSON_PCI_ISR4C LOONGSON_PCI_REG(0x4c)
+
+#define LOONGSON_PCICMD_PERR_CLR 0x80000000
+#define LOONGSON_PCICMD_SERR_CLR 0x40000000
+#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
+#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
+#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
+#define LOONGSON_PCICMD_ASTEPEN 0x00000080
+#define LOONGSON_PCICMD_SERREN 0x00000100
+#define LOONGSON_PCILTIMER_BUSLATENCY 0x0000ff00
+#define LOONGSON_PCILTIMER_BUSLATENCY_SHIFT 8
+
+/* Loongson h/w Configuration */
+
+#define LOONGSON_GENCFG_OFFSET 0x4
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+
+#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
+#define LOONGSON_GENCFG_SNOOPEN 0x00000002
+#define LOONGSON_GENCFG_CPUSELFRESET 0x00000004
+
+#define LOONGSON_GENCFG_FORCE_IRQA 0x00000008
+#define LOONGSON_GENCFG_IRQA_ISOUT 0x00000010
+#define LOONGSON_GENCFG_IRQA_FROM_INT1 0x00000020
+#define LOONGSON_GENCFG_BYTESWAP 0x00000040
+
+#define LOONGSON_GENCFG_UNCACHED 0x00000080
+#define LOONGSON_GENCFG_PREFETCHEN 0x00000100
+#define LOONGSON_GENCFG_WBEHINDEN 0x00000200
+#define LOONGSON_GENCFG_CACHEALG 0x00000c00
+#define LOONGSON_GENCFG_CACHEALG_SHIFT 10
+#define LOONGSON_GENCFG_PCIQUEUE 0x00001000
+#define LOONGSON_GENCFG_CACHESTOP 0x00002000
+#define LOONGSON_GENCFG_MSTRBYTESWAP 0x00004000
+#define LOONGSON_GENCFG_BUSERREN 0x00008000
+#define LOONGSON_GENCFG_NORETRYTIMEOUT 0x00010000
+#define LOONGSON_GENCFG_SHORTCOPYTIMEOUT 0x00020000
+
+/* PCI address map control */
+
+#define LOONGSON_PCIMAP LOONGSON_REG(LOONGSON_REGBASE + 0x10)
+#define LOONGSON_PCIMEMBASECFG LOONGSON_REG(LOONGSON_REGBASE + 0x14)
+#define LOONGSON_PCIMAP_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x18)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
+
+/* ICU Configuration Regs - r/w */
+
+#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
+
+/* ICU Enable Regs - IntEn & IntISR are r/o. */
+
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
+#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
+
+/* ICU */
+#define LOONGSON_ICU_MBOXES 0x0000000f
+#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_DMARDY 0x00000010
+#define LOONGSON_ICU_DMAEMPTY 0x00000020
+#define LOONGSON_ICU_COPYRDY 0x00000040
+#define LOONGSON_ICU_COPYEMPTY 0x00000080
+#define LOONGSON_ICU_COPYERR 0x00000100
+#define LOONGSON_ICU_PCIIRQ 0x00000200
+#define LOONGSON_ICU_MASTERERR 0x00000400
+#define LOONGSON_ICU_SYSTEMERR 0x00000800
+#define LOONGSON_ICU_DRAMPERR 0x00001000
+#define LOONGSON_ICU_RETRYERR 0x00002000
+#define LOONGSON_ICU_GPIOS 0x01ff0000
+#define LOONGSON_ICU_GPIOS_SHIFT 16
+#define LOONGSON_ICU_GPINS 0x7e000000
+#define LOONGSON_ICU_GPINS_SHIFT 25
+#define LOONGSON_ICU_MBOX(N) (1<<(LOONGSON_ICU_MBOXES_SHIFT+(N)))
+#define LOONGSON_ICU_GPIO(N) (1<<(LOONGSON_ICU_GPIOS_SHIFT+(N)))
+#define LOONGSON_ICU_GPIN(N) (1<<(LOONGSON_ICU_GPINS_SHIFT+(N)))
+
+/* PCI prefetch window base & mask */
+
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+
+/* PCI_Hit*_Sel_* */
+
+#define LOONGSON_PCI_HIT0_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x50)
+#define LOONGSON_PCI_HIT0_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x54)
+#define LOONGSON_PCI_HIT1_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x58)
+#define LOONGSON_PCI_HIT1_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x5c)
+#define LOONGSON_PCI_HIT2_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x60)
+#define LOONGSON_PCI_HIT2_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x64)
+
+/* PXArb Config & Status */
+
+#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
+#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
+
+/* Chip Config registor of each physical cpu package, PRid >= Loongson-2F */
+#define LOONGSON_CHIPCFG (void __iomem *)TO_UNCAC(0x1fc00180)
+
+/* pcimap */
+
+#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
+#define LOONGSON_PCIMAP_PCIMAP_LO0_SHIFT 0
+#define LOONGSON_PCIMAP_PCIMAP_LO1 0x00000fc0
+#define LOONGSON_PCIMAP_PCIMAP_LO1_SHIFT 6
+#define LOONGSON_PCIMAP_PCIMAP_LO2 0x0003f000
+#define LOONGSON_PCIMAP_PCIMAP_LO2_SHIFT 12
+#define LOONGSON_PCIMAP_PCIMAP_2 0x00040000
+#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
+ ((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
+
+#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
+#include <linux/cpufreq.h>
+extern struct cpufreq_frequency_table loongson2_clockmod_table[];
+#endif
+
+/*
+ * address windows configuration module
+ *
+ * loongson2e do not have this module
+ */
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/* address window config module base address */
+#define LOONGSON_ADDRWINCFG_BASE 0x3ff00000ul
+#define LOONGSON_ADDRWINCFG_SIZE 0x180
+
+extern unsigned long _loongson_addrwincfg_base;
+#define LOONGSON_ADDRWINCFG(offset) \
+ (*(volatile u64 *)(_loongson_addrwincfg_base + (offset)))
+
+#define CPU_WIN0_BASE LOONGSON_ADDRWINCFG(0x00)
+#define CPU_WIN1_BASE LOONGSON_ADDRWINCFG(0x08)
+#define CPU_WIN2_BASE LOONGSON_ADDRWINCFG(0x10)
+#define CPU_WIN3_BASE LOONGSON_ADDRWINCFG(0x18)
+
+#define CPU_WIN0_MASK LOONGSON_ADDRWINCFG(0x20)
+#define CPU_WIN1_MASK LOONGSON_ADDRWINCFG(0x28)
+#define CPU_WIN2_MASK LOONGSON_ADDRWINCFG(0x30)
+#define CPU_WIN3_MASK LOONGSON_ADDRWINCFG(0x38)
+
+#define CPU_WIN0_MMAP LOONGSON_ADDRWINCFG(0x40)
+#define CPU_WIN1_MMAP LOONGSON_ADDRWINCFG(0x48)
+#define CPU_WIN2_MMAP LOONGSON_ADDRWINCFG(0x50)
+#define CPU_WIN3_MMAP LOONGSON_ADDRWINCFG(0x58)
+
+#define PCIDMA_WIN0_BASE LOONGSON_ADDRWINCFG(0x60)
+#define PCIDMA_WIN1_BASE LOONGSON_ADDRWINCFG(0x68)
+#define PCIDMA_WIN2_BASE LOONGSON_ADDRWINCFG(0x70)
+#define PCIDMA_WIN3_BASE LOONGSON_ADDRWINCFG(0x78)
+
+#define PCIDMA_WIN0_MASK LOONGSON_ADDRWINCFG(0x80)
+#define PCIDMA_WIN1_MASK LOONGSON_ADDRWINCFG(0x88)
+#define PCIDMA_WIN2_MASK LOONGSON_ADDRWINCFG(0x90)
+#define PCIDMA_WIN3_MASK LOONGSON_ADDRWINCFG(0x98)
+
+#define PCIDMA_WIN0_MMAP LOONGSON_ADDRWINCFG(0xa0)
+#define PCIDMA_WIN1_MMAP LOONGSON_ADDRWINCFG(0xa8)
+#define PCIDMA_WIN2_MMAP LOONGSON_ADDRWINCFG(0xb0)
+#define PCIDMA_WIN3_MMAP LOONGSON_ADDRWINCFG(0xb8)
+
+#define ADDRWIN_WIN0 0
+#define ADDRWIN_WIN1 1
+#define ADDRWIN_WIN2 2
+#define ADDRWIN_WIN3 3
+
+#define ADDRWIN_MAP_DST_DDR 0
+#define ADDRWIN_MAP_DST_PCI 1
+#define ADDRWIN_MAP_DST_LIO 1
+
+/*
+ * s: CPU, PCIDMA
+ * d: DDR, PCI, LIO
+ * win: 0, 1, 2, 3
+ * src: map source
+ * dst: map destination
+ * size: ~mask + 1
+ */
+#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
+ s##_WIN##w##_BASE = (src); \
+ s##_WIN##w##_MMAP = (dst) | ADDRWIN_MAP_DST_##d; \
+ s##_WIN##w##_MASK = ~(size-1); \
+} while (0)
+
+#define LOONGSON_ADDRWIN_CPUTOPCI(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, PCI, win, src, dst, size)
+#define LOONGSON_ADDRWIN_CPUTODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, DDR, win, src, dst, size)
+#define LOONGSON_ADDRWIN_PCITODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(PCIDMA, DDR, win, src, dst, size)
+
+#endif /* ! CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#endif /* __ASM_MACH_LOONGSON2EF_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson64/machine.h b/arch/mips/include/asm/mach-loongson2ef/machine.h
index 8ef7ea94a26d..4097267ef186 100644
--- a/arch/mips/include/asm/mach-loongson64/machine.h
+++ b/arch/mips/include/asm/mach-loongson2ef/machine.h
@@ -4,8 +4,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#ifndef __ASM_MACH_LOONGSON64_MACHINE_H
-#define __ASM_MACH_LOONGSON64_MACHINE_H
+#ifndef __ASM_MACH_LOONGSON2EF_MACHINE_H
+#define __ASM_MACH_LOONGSON2EF_MACHINE_H
#ifdef CONFIG_LEMOTE_FULOONG2E
@@ -20,10 +20,4 @@
#endif
-#ifdef CONFIG_LOONGSON_MACH3X
-
-#define LOONGSON_MACHTYPE MACH_LOONGSON_GENERIC
-
-#endif /* CONFIG_LOONGSON_MACH3X */
-
-#endif /* __ASM_MACH_LOONGSON64_MACHINE_H */
+#endif /* __ASM_MACH_LOONGSON2EF_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h b/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h
new file mode 100644
index 000000000000..00d602629a55
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 2001, 03, 07 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * RTC routines for PC style attached Dallas chip.
+ */
+#ifndef __ASM_MACH_LOONGSON2EF_MC146818RTC_H
+#define __ASM_MACH_LOONGSON2EF_MC146818RTC_H
+
+#include <linux/io.h>
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_IRQ 8
+
+static inline unsigned char CMOS_READ(unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ return inb_p(RTC_PORT(1));
+}
+
+static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ outb_p(data, RTC_PORT(1));
+}
+
+#define RTC_ALWAYS_BCD 0
+
+#ifndef mc146818_decode_year
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1970)
+#endif
+
+#endif /* __ASM_MACH_LOONGSON2EF_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-loongson64/mem.h b/arch/mips/include/asm/mach-loongson2ef/mem.h
index ce33c174c04d..d1d759b8974e 100644
--- a/arch/mips/include/asm/mach-loongson64/mem.h
+++ b/arch/mips/include/asm/mach-loongson2ef/mem.h
@@ -4,8 +4,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#ifndef __ASM_MACH_LOONGSON64_MEM_H
-#define __ASM_MACH_LOONGSON64_MEM_H
+#ifndef __ASM_MACH_LOONGSON2EF_MEM_H
+#define __ASM_MACH_LOONGSON2EF_MEM_H
/*
* high memory space
@@ -34,4 +34,4 @@
#define LOONGSON_MMIO_MEM_END 0x80000000
#endif
-#endif /* __ASM_MACH_LOONGSON64_MEM_H */
+#endif /* __ASM_MACH_LOONGSON2EF_MEM_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/pci.h b/arch/mips/include/asm/mach-loongson2ef/pci.h
new file mode 100644
index 000000000000..5588c5bc5395
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/pci.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2008 Zhang Le <r0bertz@gentoo.org>
+ * Copyright (c) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_PCI_H_
+#define __ASM_MACH_LOONGSON2EF_PCI_H_
+
+extern struct pci_ops loongson_pci_ops;
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/*
+ * we use address window2 to map cpu address space to pci space
+ * window2: cpu [1G, 2G] -> pci [1G, 2G]
+ * why not use window 0 & 1? because they are used by cpu when booting.
+ * window0: cpu [0, 256M] -> ddr [0, 256M]
+ * window1: cpu [256M, 512M] -> pci [256M, 512M]
+ */
+
+/* the smallest LOONGSON_CPU_MEM_SRC can be 512M */
+#define LOONGSON_CPU_MEM_SRC 0x40000000ul /* 1G */
+#define LOONGSON_PCI_MEM_DST LOONGSON_CPU_MEM_SRC
+
+#define LOONGSON_PCI_MEM_START LOONGSON_PCI_MEM_DST
+#define LOONGSON_PCI_MEM_END (0x80000000ul-1) /* 2G */
+
+#define MMAP_CPUTOPCI_SIZE (LOONGSON_PCI_MEM_END - \
+ LOONGSON_PCI_MEM_START + 1)
+
+#else /* loongson2f/32bit & loongson2e */
+
+/* this pci memory space is mapped by pcimap in pci.c */
+#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
+#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#endif /* !__ASM_MACH_LOONGSON2EF_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson2ef/spaces.h b/arch/mips/include/asm/mach-loongson2ef/spaces.h
new file mode 100644
index 000000000000..ba4e8e9b618e
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/spaces.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON2EF_SPACES_H_
+#define __ASM_MACH_LOONGSON2EF_SPACES_H_
+
+#if defined(CONFIG_64BIT)
+#define CAC_BASE _AC(0x9800000000000000, UL)
+#endif /* CONFIG_64BIT */
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/mach-loongson32/prom.h b/arch/mips/include/asm/mach-loongson32/prom.h
deleted file mode 100644
index cb789f18d790..000000000000
--- a/arch/mips/include/asm/mach-loongson32/prom.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
- */
-
-#ifndef __ASM_MACH_LOONGSON32_PROM_H
-#define __ASM_MACH_LOONGSON32_PROM_H
-
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-
-/* environment arguments from bootloader */
-extern unsigned long memsize, highmemsize;
-
-/* loongson-specific command line, env and memory initialization */
-extern char *prom_getenv(char *name);
-extern void __init prom_init_cmdline(void);
-
-#endif /* __ASM_MACH_LOONGSON32_PROM_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index 4aca25f2ff06..7dc8d75445a9 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -43,11 +43,8 @@
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
#define cpu_has_watch 1
-
-#ifdef CONFIG_CPU_LOONGSON3
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
-#endif
#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index be9f727a9328..73a89913dc38 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -4,8 +4,6 @@
#include <boot_param.h>
-#ifdef CONFIG_CPU_LOONGSON3
-
/* cpu core interrupt numbers */
#define MIPS_CPU_IRQ_BASE 56
@@ -35,8 +33,6 @@
#define LOONGSON_INT_COREx_INTy(x, y) (1<<(x) | 1<<(y+4)) /* route to int y of core x */
-#endif
-
extern void fixup_irqs(void);
extern void loongson3_ipi_interrupt(struct pt_regs *regs);
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index b5e288a12dfe..87a5bfbf8cfe 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -17,7 +17,6 @@
* Override macros used in arch/mips/kernel/head.S.
*/
.macro kernel_entry_setup
-#ifdef CONFIG_CPU_LOONGSON3
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
@@ -30,23 +29,29 @@
mtc0 t0, CP0_PAGEGRAIN
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
+ /* Loongson-3A R4+ */
+ andi t1, t0, PRID_IMP_MASK
+ li t2, PRID_IMP_LOONGSON_64G
+ beq t1, t2, 1f
+ nop
+ /* Loongson-3A R2/R3 */
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
- bnez t0, 1f
+ slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ bnez t0, 2f
+ nop
+1:
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
-1:
+2:
_ehb
.set pop
-#endif
.endm
/*
* Do SMP slave processor setup.
*/
.macro smp_slave_setup
-#ifdef CONFIG_CPU_LOONGSON3
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
@@ -59,16 +64,23 @@
mtc0 t0, CP0_PAGEGRAIN
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
+ /* Loongson-3A R4+ */
+ andi t1, t0, PRID_IMP_MASK
+ li t2, PRID_IMP_LOONGSON_64G
+ beq t1, t2, 1f
+ nop
+ /* Loongson-3A R2/R3 */
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
- bnez t0, 1f
+ slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ bnez t0, 2f
+ nop
+1:
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
-1:
+2:
_ehb
.set pop
-#endif
.endm
#endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h
index 694a58574ec0..a8fce112a9b0 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson.h
@@ -12,8 +12,6 @@
#include <linux/irq.h>
#include <boot_param.h>
-/* loongson internal northbridge initialization */
-extern void bonito_irq_init(void);
/* machine-specific reboot/halt operation */
extern void mach_prepare_reboot(void);
@@ -26,25 +24,9 @@ extern const struct plat_smp_ops loongson3_smp_ops;
/* loongson-specific command line, env and memory initialization */
extern void __init prom_init_memory(void);
-extern void __init prom_init_cmdline(void);
-extern void __init prom_init_machtype(void);
extern void __init prom_init_env(void);
-#ifdef CONFIG_LOONGSON_UART_BASE
-extern unsigned long _loongson_uart_base[], loongson_uart_base[];
-extern void prom_init_loongson_uart_base(void);
-#endif
-
-static inline void prom_init_uart_base(void)
-{
-#ifdef CONFIG_LOONGSON_UART_BASE
- prom_init_loongson_uart_base();
-#endif
-}
/* irq operation functions */
-extern void bonito_irqdispatch(void);
-extern void __init bonito_irq_init(void);
-extern void __init mach_init_irq(void);
extern void mach_irq_dispatch(unsigned int pending);
extern int mach_i8259_irq(void);
@@ -64,17 +46,6 @@ extern int mach_i8259_irq(void);
#define LOONGSON3_REG32(base, x) \
(*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
-#define LOONGSON_IRQ_BASE 32
-#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
-
-#include <linux/interrupt.h>
-static inline void do_perfcnt_IRQ(void)
-{
-#if IS_ENABLED(CONFIG_OPROFILE)
- do_IRQ(LOONGSON2_PERFCNT_IRQ);
-#endif
-}
-
#define LOONGSON_FLASH_BASE 0x1c000000
#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
@@ -109,11 +80,7 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
-#ifdef CONFIG_CPU_LOONGSON3
#define LOONGSON_PCIIO_BASE loongson_sysconf.pci_io_base
-#else
-#define LOONGSON_PCIIO_BASE 0x1fd00000
-#endif
#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
@@ -270,86 +237,4 @@ extern u64 loongson_freqctrl[MAX_PACKAGES];
#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
-#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
-#include <linux/cpufreq.h>
-extern struct cpufreq_frequency_table loongson2_clockmod_table[];
-#endif
-
-/*
- * address windows configuration module
- *
- * loongson2e do not have this module
- */
-#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
-
-/* address window config module base address */
-#define LOONGSON_ADDRWINCFG_BASE 0x3ff00000ul
-#define LOONGSON_ADDRWINCFG_SIZE 0x180
-
-extern unsigned long _loongson_addrwincfg_base;
-#define LOONGSON_ADDRWINCFG(offset) \
- (*(volatile u64 *)(_loongson_addrwincfg_base + (offset)))
-
-#define CPU_WIN0_BASE LOONGSON_ADDRWINCFG(0x00)
-#define CPU_WIN1_BASE LOONGSON_ADDRWINCFG(0x08)
-#define CPU_WIN2_BASE LOONGSON_ADDRWINCFG(0x10)
-#define CPU_WIN3_BASE LOONGSON_ADDRWINCFG(0x18)
-
-#define CPU_WIN0_MASK LOONGSON_ADDRWINCFG(0x20)
-#define CPU_WIN1_MASK LOONGSON_ADDRWINCFG(0x28)
-#define CPU_WIN2_MASK LOONGSON_ADDRWINCFG(0x30)
-#define CPU_WIN3_MASK LOONGSON_ADDRWINCFG(0x38)
-
-#define CPU_WIN0_MMAP LOONGSON_ADDRWINCFG(0x40)
-#define CPU_WIN1_MMAP LOONGSON_ADDRWINCFG(0x48)
-#define CPU_WIN2_MMAP LOONGSON_ADDRWINCFG(0x50)
-#define CPU_WIN3_MMAP LOONGSON_ADDRWINCFG(0x58)
-
-#define PCIDMA_WIN0_BASE LOONGSON_ADDRWINCFG(0x60)
-#define PCIDMA_WIN1_BASE LOONGSON_ADDRWINCFG(0x68)
-#define PCIDMA_WIN2_BASE LOONGSON_ADDRWINCFG(0x70)
-#define PCIDMA_WIN3_BASE LOONGSON_ADDRWINCFG(0x78)
-
-#define PCIDMA_WIN0_MASK LOONGSON_ADDRWINCFG(0x80)
-#define PCIDMA_WIN1_MASK LOONGSON_ADDRWINCFG(0x88)
-#define PCIDMA_WIN2_MASK LOONGSON_ADDRWINCFG(0x90)
-#define PCIDMA_WIN3_MASK LOONGSON_ADDRWINCFG(0x98)
-
-#define PCIDMA_WIN0_MMAP LOONGSON_ADDRWINCFG(0xa0)
-#define PCIDMA_WIN1_MMAP LOONGSON_ADDRWINCFG(0xa8)
-#define PCIDMA_WIN2_MMAP LOONGSON_ADDRWINCFG(0xb0)
-#define PCIDMA_WIN3_MMAP LOONGSON_ADDRWINCFG(0xb8)
-
-#define ADDRWIN_WIN0 0
-#define ADDRWIN_WIN1 1
-#define ADDRWIN_WIN2 2
-#define ADDRWIN_WIN3 3
-
-#define ADDRWIN_MAP_DST_DDR 0
-#define ADDRWIN_MAP_DST_PCI 1
-#define ADDRWIN_MAP_DST_LIO 1
-
-/*
- * s: CPU, PCIDMA
- * d: DDR, PCI, LIO
- * win: 0, 1, 2, 3
- * src: map source
- * dst: map destination
- * size: ~mask + 1
- */
-#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
- s##_WIN##w##_BASE = (src); \
- s##_WIN##w##_MMAP = (dst) | ADDRWIN_MAP_DST_##d; \
- s##_WIN##w##_MASK = ~(size-1); \
-} while (0)
-
-#define LOONGSON_ADDRWIN_CPUTOPCI(win, src, dst, size) \
- LOONGSON_ADDRWIN_CFG(CPU, PCI, win, src, dst, size)
-#define LOONGSON_ADDRWIN_CPUTODDR(win, src, dst, size) \
- LOONGSON_ADDRWIN_CFG(CPU, DDR, win, src, dst, size)
-#define LOONGSON_ADDRWIN_PCITODDR(win, src, dst, size) \
- LOONGSON_ADDRWIN_CFG(PCIDMA, DDR, win, src, dst, size)
-
-#endif /* ! CONFIG_CPU_SUPPORTS_ADDRWINCFG */
-
#endif /* __ASM_MACH_LOONGSON64_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_regs.h b/arch/mips/include/asm/mach-loongson64/loongson_regs.h
new file mode 100644
index 000000000000..363a47a5d26e
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/loongson_regs.h
@@ -0,0 +1,227 @@
+/*
+ * Read/Write Loongson Extension Registers
+ */
+
+#ifndef _LOONGSON_REGS_H_
+#define _LOONGSON_REGS_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#include <asm/mipsregs.h>
+#include <asm/cpu.h>
+
+static inline bool cpu_has_cfg(void)
+{
+ return ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G);
+}
+
+static inline u32 read_cpucfg(u32 reg)
+{
+ u32 __res;
+
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8080118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+/* Bit Domains for CFG registers */
+#define LOONGSON_CFG0 0x0
+#define LOONGSON_CFG0_PRID GENMASK(31, 0)
+
+#define LOONGSON_CFG1 0x1
+#define LOONGSON_CFG1_FP BIT(0)
+#define LOONGSON_CFG1_FPREV GENMASK(3, 1)
+#define LOONGSON_CFG1_MMI BIT(4)
+#define LOONGSON_CFG1_MSA1 BIT(5)
+#define LOONGSON_CFG1_MSA2 BIT(6)
+#define LOONGSON_CFG1_CGP BIT(7)
+#define LOONGSON_CFG1_WRP BIT(8)
+#define LOONGSON_CFG1_LSX1 BIT(9)
+#define LOONGSON_CFG1_LSX2 BIT(10)
+#define LOONGSON_CFG1_LASX BIT(11)
+#define LOONGSON_CFG1_R6FXP BIT(12)
+#define LOONGSON_CFG1_R6CRCP BIT(13)
+#define LOONGSON_CFG1_R6FPP BIT(14)
+#define LOONGSON_CFG1_CNT64 BIT(15)
+#define LOONGSON_CFG1_LSLDR0 BIT(16)
+#define LOONGSON_CFG1_LSPREF BIT(17)
+#define LOONGSON_CFG1_LSPREFX BIT(18)
+#define LOONGSON_CFG1_LSSYNCI BIT(19)
+#define LOONGSON_CFG1_LSUCA BIT(20)
+#define LOONGSON_CFG1_LLSYNC BIT(21)
+#define LOONGSON_CFG1_TGTSYNC BIT(22)
+#define LOONGSON_CFG1_LLEXC BIT(23)
+#define LOONGSON_CFG1_SCRAND BIT(24)
+#define LOONGSON_CFG1_MUALP BIT(25)
+#define LOONGSON_CFG1_KMUALEN BIT(26)
+#define LOONGSON_CFG1_ITLBT BIT(27)
+#define LOONGSON_CFG1_LSUPERF BIT(28)
+#define LOONGSON_CFG1_SFBP BIT(29)
+#define LOONGSON_CFG1_CDMAP BIT(30)
+
+#define LOONGSON_CFG2 0x2
+#define LOONGSON_CFG2_LEXT1 BIT(0)
+#define LOONGSON_CFG2_LEXT2 BIT(1)
+#define LOONGSON_CFG2_LEXT3 BIT(2)
+#define LOONGSON_CFG2_LSPW BIT(3)
+#define LOONGSON_CFG2_LBT1 BIT(4)
+#define LOONGSON_CFG2_LBT2 BIT(5)
+#define LOONGSON_CFG2_LBT3 BIT(6)
+#define LOONGSON_CFG2_LBTMMU BIT(7)
+#define LOONGSON_CFG2_LPMP BIT(8)
+#define LOONGSON_CFG2_LPMPREV GENMASK(11, 9)
+#define LOONGSON_CFG2_LAMO BIT(12)
+#define LOONGSON_CFG2_LPIXU BIT(13)
+#define LOONGSON_CFG2_LPIXUN BIT(14)
+#define LOONGSON_CFG2_LZVP BIT(15)
+#define LOONGSON_CFG2_LZVREV GENMASK(18, 16)
+#define LOONGSON_CFG2_LGFTP BIT(19)
+#define LOONGSON_CFG2_LGFTPREV GENMASK(22, 20)
+#define LOONGSON_CFG2_LLFTP BIT(23)
+#define LOONGSON_CFG2_LLFTPREV GENMASK(26, 24)
+#define LOONGSON_CFG2_LCSRP BIT(27)
+#define LOONGSON_CFG2_LDISBLIKELY BIT(28)
+
+#define LOONGSON_CFG3 0x3
+#define LOONGSON_CFG3_LCAMP BIT(0)
+#define LOONGSON_CFG3_LCAMREV GENMASK(3, 1)
+#define LOONGSON_CFG3_LCAMNUM GENMASK(11, 4)
+#define LOONGSON_CFG3_LCAMKW GENMASK(19, 12)
+#define LOONGSON_CFG3_LCAMVW GENMASK(27, 20)
+
+#define LOONGSON_CFG4 0x4
+#define LOONGSON_CFG4_CCFREQ GENMASK(31, 0)
+
+#define LOONGSON_CFG5 0x5
+#define LOONGSON_CFG5_CFM GENMASK(15, 0)
+#define LOONGSON_CFG5_CFD GENMASK(31, 16)
+
+#define LOONGSON_CFG6 0x6
+
+#define LOONGSON_CFG7 0x7
+#define LOONGSON_CFG7_GCCAEQRP BIT(0)
+#define LOONGSON_CFG7_UCAWINP BIT(1)
+
+static inline bool cpu_has_csr(void)
+{
+ if (cpu_has_cfg())
+ return (read_cpucfg(LOONGSON_CFG2) & LOONGSON_CFG2_LCSRP);
+
+ return false;
+}
+
+static inline u32 csr_readl(u32 reg)
+{
+ u32 __res;
+
+ /* RDCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8000118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+static inline u64 csr_readq(u32 reg)
+{
+ u64 __res;
+
+ /* DWRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8020118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+static inline void csr_writel(u32 val, u32 reg)
+{
+ /* WRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r reg,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8010118 | (reg << 21) | (val << 11))\n\t"
+ :
+ :"r"(reg),"r"(val)
+ :
+ );
+}
+
+static inline void csr_writeq(u64 val, u32 reg)
+{
+ /* DWRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r reg,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8030118 | (reg << 21) | (val << 11))\n\t"
+ :
+ :"r"(reg),"r"(val)
+ :
+ );
+}
+
+/* Public CSR Register can also be accessed with regular addresses */
+#define CSR_PUBLIC_MMIO_BASE 0x1fe00000
+
+#define MMIO_CSR(x) (void *)TO_UNCAC(CSR_PUBLIC_MMIO_BASE + x)
+
+#define LOONGSON_CSR_FEATURES 0x8
+#define LOONGSON_CSRF_TEMP BIT(0)
+#define LOONGSON_CSRF_NODECNT BIT(1)
+#define LOONGSON_CSRF_MSI BIT(2)
+#define LOONGSON_CSRF_EXTIOI BIT(3)
+#define LOONGSON_CSRF_IPI BIT(4)
+#define LOONGSON_CSRF_FREQ BIT(5)
+
+#define LOONGSON_CSR_VENDOR 0x10 /* Vendor name string, should be "Loongson" */
+#define LOONGSON_CSR_CPUNAME 0x20 /* Processor name string */
+#define LOONGSON_CSR_NODECNT 0x408
+#define LOONGSON_CSR_CPUTEMP 0x428
+
+/* PerCore CSR, only accessable by local cores */
+#define LOONGSON_CSR_IPI_STATUS 0x1000
+#define LOONGSON_CSR_IPI_EN 0x1004
+#define LOONGSON_CSR_IPI_SET 0x1008
+#define LOONGSON_CSR_IPI_CLEAR 0x100c
+#define LOONGSON_CSR_IPI_SEND 0x1040
+#define CSR_IPI_SEND_IP_SHIFT 0
+#define CSR_IPI_SEND_CPU_SHIFT 16
+#define CSR_IPI_SEND_BLOCK BIT(31)
+
+static inline u64 drdtime(void)
+{
+ int rID = 0;
+ u64 val = 0;
+
+ __asm__ __volatile__(
+ "parse_r rID,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8090118 | (rID << 21) | (val << 11))\n\t"
+ :"=r"(rID),"=r"(val)
+ :
+ );
+ return val;
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 62073d60739f..3a25dbd3b3e9 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -6,8 +6,8 @@
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
*/
-#ifndef _ASM_MACH_MMZONE_H
-#define _ASM_MACH_MMZONE_H
+#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
+#define _ASM_MACH_LOONGSON64_MMZONE_H
#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
@@ -19,30 +19,9 @@
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
-#define LEVELS_PER_SLICE 128
+extern struct pglist_data *__node_data[];
-struct slice_data {
- unsigned long irq_enable_mask[2];
- int level_to_irq[LEVELS_PER_SLICE];
-};
-
-struct hub_data {
- cpumask_t h_cpus;
- unsigned long slice_map;
- unsigned long irq_alloc_mask[2];
- struct slice_data slice[2];
-};
-
-struct node_data {
- struct pglist_data pglist;
- struct hub_data hub;
- cpumask_t cpumask;
-};
-
-extern struct node_data *__node_data[];
-
-#define NODE_DATA(n) (&__node_data[(n)]->pglist)
-#define hub_data(n) (&__node_data[(n)]->hub)
+#define NODE_DATA(n) (__node_data[n])
extern void setup_zero_pages(void);
extern void __init prom_init_numa_memory(void);
diff --git a/arch/mips/include/asm/mach-loongson64/pci.h b/arch/mips/include/asm/mach-loongson64/pci.h
index 97f807fb2117..8b59d64a23e8 100644
--- a/arch/mips/include/asm/mach-loongson64/pci.h
+++ b/arch/mips/include/asm/mach-loongson64/pci.h
@@ -12,39 +12,8 @@ extern struct pci_ops loongson_pci_ops;
/* this is an offset from mips_io_port_base */
#define LOONGSON_PCI_IO_START 0x00004000UL
-#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
-
-/*
- * we use address window2 to map cpu address space to pci space
- * window2: cpu [1G, 2G] -> pci [1G, 2G]
- * why not use window 0 & 1? because they are used by cpu when booting.
- * window0: cpu [0, 256M] -> ddr [0, 256M]
- * window1: cpu [256M, 512M] -> pci [256M, 512M]
- */
-
-/* the smallest LOONGSON_CPU_MEM_SRC can be 512M */
-#define LOONGSON_CPU_MEM_SRC 0x40000000ul /* 1G */
-#define LOONGSON_PCI_MEM_DST LOONGSON_CPU_MEM_SRC
-
-#define LOONGSON_PCI_MEM_START LOONGSON_PCI_MEM_DST
-#define LOONGSON_PCI_MEM_END (0x80000000ul-1) /* 2G */
-
-#define MMAP_CPUTOPCI_SIZE (LOONGSON_PCI_MEM_END - \
- LOONGSON_PCI_MEM_START + 1)
-
-#else /* loongson2f/32bit & loongson2e */
-
-/* this pci memory space is mapped by pcimap in pci.c */
-#ifdef CONFIG_CPU_LOONGSON3
#define LOONGSON_PCI_MEM_START 0x40000000UL
#define LOONGSON_PCI_MEM_END 0x7effffffUL
-#else
-#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
-#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
-#endif
-/* this is an offset from mips_io_port_base */
-#define LOONGSON_PCI_IO_START 0x00004000UL
-#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
#endif /* !__ASM_MACH_LOONGSON64_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson64/topology.h b/arch/mips/include/asm/mach-loongson64/topology.h
index 7ff819ab308a..3414a1fd1783 100644
--- a/arch/mips/include/asm/mach-loongson64/topology.h
+++ b/arch/mips/include/asm/mach-loongson64/topology.h
@@ -5,7 +5,9 @@
#ifdef CONFIG_NUMA
#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
-#define cpumask_of_node(node) (&__node_data[(node)]->cpumask)
+
+extern cpumask_t __node_cpumask[];
+#define cpumask_of_node(node) (&__node_cpumask[node])
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index bdbdc19a2b8f..0d5a30988697 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -689,6 +689,9 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+/* Ingenic HPTLB off bits */
+#define XBURST_PAGECTRL_HPTLB_DIS 0xa9000000
+
/* Ingenic Config7 bits */
#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
@@ -1971,6 +1974,9 @@ do { \
#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7)
#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
+/* Ingenic page ctrl register */
+#define write_c0_page_ctrl(val) __write_32bit_c0_register($5, 4, val)
+
/*
* Macros to access the guest system control coprocessor
*/
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index ed70994fbbec..9846047b3d3d 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -119,12 +119,12 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "RM7000 "
#elif defined CONFIG_CPU_SB1
#define MODULE_PROC_FAMILY "SB1 "
-#elif defined CONFIG_CPU_LOONGSON1
-#define MODULE_PROC_FAMILY "LOONGSON1 "
-#elif defined CONFIG_CPU_LOONGSON2
-#define MODULE_PROC_FAMILY "LOONGSON2 "
-#elif defined CONFIG_CPU_LOONGSON3
-#define MODULE_PROC_FAMILY "LOONGSON3 "
+#elif defined CONFIG_CPU_LOONGSON32
+#define MODULE_PROC_FAMILY "LOONGSON32 "
+#elif defined CONFIG_CPU_LOONGSON2EF
+#define MODULE_PROC_FAMILY "LOONGSON2EF "
+#elif defined CONFIG_CPU_LOONGSON64
+#define MODULE_PROC_FAMILY "LOONGSON64 "
#elif defined CONFIG_CPU_CAVIUM_OCTEON
#define MODULE_PROC_FAMILY "OCTEON "
#elif defined CONFIG_CPU_XLR
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index a92cd30b48c9..3bc630ff9ad4 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -807,6 +807,7 @@ struct bridge_controller {
unsigned long intr_addr;
struct irq_domain *domain;
unsigned int pci_int[8];
+ u32 ioc3_sid[8];
nasid_t nasid;
};
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 166842337eb2..fa77cb71f303 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -96,9 +96,9 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_pages((unsigned long)pud, PUD_ORDER);
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
{
- set_pgd(pgd, __pgd((unsigned long)pud));
+ set_p4d(p4d, __p4d((unsigned long)pud));
}
#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index ba967148b016..1945c8970141 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -16,7 +16,6 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifdef CONFIG_HIGHMEM
@@ -196,14 +195,11 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define __pgd_offset(address) pgd_index(address)
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* to find an entry in a page-table-directory */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 93a9dce31f25..f92716cfa4f4 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,11 +17,12 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
-#define __ARCH_USE_5LEVEL_HACK
-#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
+#if CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
-#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
+#elif CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
#endif
/*
@@ -186,44 +187,49 @@ extern pud_t invalid_pud_table[PTRS_PER_PUD];
/*
* Empty pgd entries point to the invalid_pud_table.
*/
-static inline int pgd_none(pgd_t pgd)
+static inline int p4d_none(p4d_t p4d)
{
- return pgd_val(pgd) == (unsigned long)invalid_pud_table;
+ return p4d_val(p4d) == (unsigned long)invalid_pud_table;
}
-static inline int pgd_bad(pgd_t pgd)
+static inline int p4d_bad(p4d_t p4d)
{
- if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
+ if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
return 1;
return 0;
}
-static inline int pgd_present(pgd_t pgd)
+static inline int p4d_present(p4d_t p4d)
{
- return pgd_val(pgd) != (unsigned long)invalid_pud_table;
+ return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void p4d_clear(p4d_t *p4dp)
{
- pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
+ p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{
- return pgd_val(pgd);
+ return p4d_val(p4d);
}
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
+#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
+
+#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
- return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+ return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
}
-static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
+static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
- *pgd = pgdval;
+ *p4d = p4dval;
}
#endif
@@ -314,10 +320,6 @@ static inline void pud_clear(pud_t *pudp)
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
-#define __pgd_offset(address) pgd_index(address)
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) pmd_index(address)
-
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index f85bd5b15f51..91b89aab1787 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -644,17 +644,6 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#include <asm-generic/pgtable.h>
/*
- * uncached accelerated TLB map for video memory access
- */
-#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
-struct file;
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#endif
-
-/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
*/
diff --git a/arch/mips/include/asm/pmon.h b/arch/mips/include/asm/pmon.h
deleted file mode 100644
index 6ad519189ce2..000000000000
--- a/arch/mips/include/asm/pmon.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- *
- * The cpustart method is a PMC-Sierra's function to start the secondary CPU.
- * Stock PMON 2000 has the smpfork, semlock and semunlock methods instead.
- */
-#ifndef _ASM_PMON_H
-#define _ASM_PMON_H
-
-struct callvectors {
- int (*open) (char*, int, int);
- int (*close) (int);
- int (*read) (int, void*, int);
- int (*write) (int, void*, int);
- off_t (*lseek) (int, off_t, int);
- int (*printf) (const char*, ...);
- void (*cacheflush) (void);
- char* (*gets) (char*);
- union {
- int (*smpfork) (unsigned long cp, char *sp);
- int (*cpustart) (long, void (*)(void), void *, long);
- } _s;
- int (*semlock) (int sem);
- void (*semunlock) (int sem);
-};
-
-extern struct callvectors *debug_vectors;
-
-#define pmon_open(name, flags, mode) debug_vectors->open(name, flage, mode)
-#define pmon_close(fd) debug_vectors->close(fd)
-#define pmon_read(fd, buf, count) debug_vectors->read(fd, buf, count)
-#define pmon_write(fd, buf, count) debug_vectors->write(fd, buf, count)
-#define pmon_lseek(fd, off, whence) debug_vectors->lseek(fd, off, whence)
-#define pmon_printf(fmt...) debug_vectors->printf(fmt)
-#define pmon_cacheflush() debug_vectors->cacheflush()
-#define pmon_gets(s) debug_vectors->gets(s)
-#define pmon_cpustart(n, f, sp, gp) debug_vectors->_s.cpustart(n, f, sp, gp)
-#define pmon_smpfork(cp, sp) debug_vectors->_s.smpfork(cp, sp)
-#define pmon_semlock(sem) debug_vectors->semlock(sem)
-#define pmon_semunlock(sem) debug_vectors->semunlock(sem)
-
-#endif /* _ASM_PMON_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index fba18d4a9190..7619ad319400 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -385,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
-#ifdef CONFIG_CPU_LOONGSON3
+#ifdef CONFIG_CPU_LOONGSON64
/*
* Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
* tight read loop is executed, because reads take priority over writes & the
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 7f4a32d3345a..15ab16f99f28 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -15,12 +15,14 @@
#include <linux/stringify.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/cacheops.h>
#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
#include <asm/mmzone.h>
+#include <asm/unroll.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
@@ -39,16 +41,19 @@ extern void (*r4k_blast_icache)(void);
*/
#define INDEX_BASE CKSEG0
-#define cache_op(op,addr) \
+#define _cache_op(insn, op, addr) \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
- " cache %0, %1 \n" \
+ " " insn("%0", "%1") " \n" \
" .set pop \n" \
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
+#define cache_op(op, addr) \
+ _cache_op(kernel_cache, op, addr)
+
static inline void flush_icache_line_indexed(unsigned long addr)
{
cache_op(Index_Invalidate_I, addr);
@@ -67,7 +72,7 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
{
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
@@ -149,7 +154,7 @@ static inline void flush_scache_line(unsigned long addr)
static inline int protected_flush_icache_line(unsigned long addr)
{
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
default:
@@ -193,338 +198,10 @@ static inline void invalidate_tcache_page(unsigned long addr)
cache_op(Page_Invalidate_T, addr);
}
-#ifndef CONFIG_CPU_MIPSR6
-#define cache16_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
- " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
- " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
- " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
- " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
- " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
- " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
- " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
- " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
- " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
- " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
- " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
- " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
- " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache32_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
- " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
- " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
- " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
- " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
- " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
- " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
- " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
- " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
- " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
- " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
- " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
- " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
- " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache64_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
- " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
- " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
- " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
- " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
- " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
- " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
- " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
- " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
- " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
- " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
- " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
- " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
- " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
- " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache128_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
- " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
- " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
- " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
- " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
- " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
- " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
- " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
- " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
- " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
- " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
- " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
- " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
- " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
- " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#else
-/*
- * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
- * This means we now need to increment the base register before we flush
- * more cache lines
- */
-#define cache16_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
- " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
- " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
- " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
- " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
- " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
- " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
- " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
- " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
- " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache32_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
- " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache64_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache128_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-#endif /* CONFIG_CPU_MIPSR6 */
-
-/*
- * Perform the cache operation specified by op using a user mode virtual
- * address while in kernel mode.
- */
-#define cache16_unroll32_user(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
- " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
- " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
- " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
- " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
- " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
- " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
- " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
- " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
- " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
- " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
- " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
- " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
- " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
- " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
- " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache32_unroll32_user(base, op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
- " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
- " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
- " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
- " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
- " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
- " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
- " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
- " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
- " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
- " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
- " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
- " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
- " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
- " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
- " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache64_unroll32_user(base, op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
- " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
- " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
- " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
- " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
- " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
- " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
- " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
- " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
- " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
- " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
- " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
- " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
- " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
- " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
- " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
+#define cache_unroll(times, insn, op, addr, lsize) do { \
+ int i = 0; \
+ unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
+} while (0)
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
@@ -539,7 +216,8 @@ static inline void extra##blast_##pfx##cache##lsize(void) \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
- cache##lsize##_unroll32(addr|ws, indexop); \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
} \
\
static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
@@ -548,7 +226,7 @@ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
unsigned long end = page + PAGE_SIZE; \
\
do { \
- cache##lsize##_unroll32(start, hitop); \
+ cache_unroll(32, kernel_cache, hitop, start, lsize); \
start += lsize * 32; \
} while (start < end); \
} \
@@ -565,7 +243,8 @@ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
- cache##lsize##_unroll32(addr|ws, indexop); \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
}
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
@@ -596,7 +275,7 @@ static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
unsigned long end = page + PAGE_SIZE; \
\
do { \
- cache##lsize##_unroll32_user(start, hitop); \
+ cache_unroll(32, user_cache, hitop, start, lsize); \
start += lsize * 32; \
} while (start < end); \
}
@@ -688,7 +367,8 @@ static inline void blast_##pfx##cache##lsize##_node(long node) \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
- cache##lsize##_unroll32(addr|ws, indexop); \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
}
__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
diff --git a/arch/mips/include/asm/sgi/heart.h b/arch/mips/include/asm/sgi/heart.h
new file mode 100644
index 000000000000..c423221b4792
--- /dev/null
+++ b/arch/mips/include/asm/sgi/heart.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HEART chip definitions
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2007-2015 Joshua Kinard <kumba@gentoo.org>
+ */
+#ifndef __ASM_SGI_HEART_H
+#define __ASM_SGI_HEART_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+
+/*
+ * There are 8 DIMM slots on an IP30 system
+ * board, which are grouped into four banks
+ */
+#define HEART_MEMORY_BANKS 4
+
+/* HEART can support up to four CPUs */
+#define HEART_MAX_CPUS 4
+
+#define HEART_XKPHYS_BASE ((void *)(IO_BASE | 0x000000000ff00000ULL))
+
+/**
+ * struct ip30_heart_regs - struct that maps IP30 HEART registers.
+ * @mode: HEART_MODE - Purpose Unknown, machine reset called from here.
+ * @sdram_mode: HEART_SDRAM_MODE - purpose unknown.
+ * @mem_refresh: HEART_MEM_REF - purpose unknown.
+ * @mem_req_arb: HEART_MEM_REQ_ARB - purpose unknown.
+ * @mem_cfg.q: union for 64bit access to HEART_MEMCFG - 4x 64bit registers.
+ * @mem_cfg.l: union for 32bit access to HEART_MEMCFG - 8x 32bit registers.
+ * @fc_mode: HEART_FC_MODE - Purpose Unknown, possibly for GFX flow control.
+ * @fc_timer_limit: HEART_FC_TIMER_LIMIT - purpose unknown.
+ * @fc_addr: HEART_FC0_ADDR, HEART_FC1_ADDR - purpose unknown.
+ * @fc_credit_cnt: HEART_FC0_CR_CNT, HEART_FC1_CR_CNT - purpose unknown.
+ * @fc_timer: HEART_FC0_TIMER, HEART_FC1_TIMER - purpose unknown.
+ * @status: HEART_STATUS - HEART status information.
+ * @bus_err_addr: HEART_BERR_ADDR - likely contains addr of recent SIGBUS.
+ * @bus_err_misc: HEART_BERR_MISC - purpose unknown.
+ * @mem_err_addr: HEART_MEMERR_ADDR - likely contains addr of recent mem err.
+ * @mem_err_data: HEART_MEMERR_DATA - purpose unknown.
+ * @piur_acc_err: HEART_PIUR_ACC_ERR - likely for access err to HEART regs.
+ * @mlan_clock_div: HEART_MLAN_CLK_DIV - MicroLAN clock divider.
+ * @mlan_ctrl: HEART_MLAN_CTL - MicroLAN control.
+ * @__pad0: 0x0f40 bytes of padding -> next HEART register 0x01000.
+ * @undefined: Undefined/diag register, write to it triggers PIUR_ACC_ERR.
+ * @__pad1: 0xeff8 bytes of padding -> next HEART register 0x10000.
+ * @imr: HEART_IMR0 to HEART_IMR3 - per-cpu interrupt mask register.
+ * @set_isr: HEART_SET_ISR - set interrupt status register.
+ * @clear_isr: HEART_CLR_ISR - clear interrupt status register.
+ * @isr: HEART_ISR - interrupt status register (read-only).
+ * @imsr: HEART_IMSR - purpose unknown.
+ * @cause: HEART_CAUSE - HEART cause information.
+ * @__pad2: 0xffb8 bytes of padding -> next HEART register 0x20000.
+ * @count: HEART_COUNT - 52-bit counter.
+ * @__pad3: 0xfff8 bytes of padding -> next HEART register 0x30000.
+ * @compare: HEART_COMPARE - 24-bit compare.
+ * @__pad4: 0xfff8 bytes of padding -> next HEART register 0x40000.
+ * @trigger: HEART_TRIGGER - purpose unknown.
+ * @__pad5: 0xfff8 bytes of padding -> next HEART register 0x50000.
+ * @cpuid: HEART_PRID - contains CPU ID of CPU currently accessing HEART.
+ * @__pad6: 0xfff8 bytes of padding -> next HEART register 0x60000.
+ * @sync: HEART_SYNC - purpose unknown.
+ *
+ * HEART is the main system controller ASIC for IP30 system. It incorporates
+ * a memory controller, interrupt status/cause/set/clear management, basic
+ * timer with count/compare, and other functionality. For Linux, not all of
+ * HEART's functions are fully understood.
+ *
+ * Implementation note: All HEART registers are 64bits-wide, but the mem_cfg
+ * register only reports correct values if queried in 32bits. Hence the need
+ * for a union. Even though mem_cfg.l has 8 array slots, we only ever query
+ * up to 4 of those. IP30 has 8 DIMM slots arranged into 4 banks, w/ 2 DIMMs
+ * per bank. Each 32bit read accesses one of these banks. Perhaps HEART was
+ * designed to address up to 8 banks (16 DIMMs)? We may never know.
+ */
+struct ip30_heart_regs { /* 0x0ff00000 */
+ u64 mode; /* + 0x00000 */
+ /* Memory */
+ u64 sdram_mode; /* + 0x00008 */
+ u64 mem_refresh; /* + 0x00010 */
+ u64 mem_req_arb; /* + 0x00018 */
+ union {
+ u64 q[HEART_MEMORY_BANKS]; /* readq() */
+ u32 l[HEART_MEMORY_BANKS * 2]; /* readl() */
+ } mem_cfg; /* + 0x00020 */
+ /* Flow control (gfx?) */
+ u64 fc_mode; /* + 0x00040 */
+ u64 fc_timer_limit; /* + 0x00048 */
+ u64 fc_addr[2]; /* + 0x00050 */
+ u64 fc_credit_cnt[2]; /* + 0x00060 */
+ u64 fc_timer[2]; /* + 0x00070 */
+ /* Status */
+ u64 status; /* + 0x00080 */
+ /* Bus error */
+ u64 bus_err_addr; /* + 0x00088 */
+ u64 bus_err_misc; /* + 0x00090 */
+ /* Memory error */
+ u64 mem_err_addr; /* + 0x00098 */
+ u64 mem_err_data; /* + 0x000a0 */
+ /* Misc */
+ u64 piur_acc_err; /* + 0x000a8 */
+ u64 mlan_clock_div; /* + 0x000b0 */
+ u64 mlan_ctrl; /* + 0x000b8 */
+ u64 __pad0[0x01e8]; /* + 0x000c0 + 0x0f40 */
+ /* Undefined */
+ u64 undefined; /* + 0x01000 */
+ u64 __pad1[0x1dff]; /* + 0x01008 + 0xeff8 */
+ /* Interrupts */
+ u64 imr[HEART_MAX_CPUS]; /* + 0x10000 */
+ u64 set_isr; /* + 0x10020 */
+ u64 clear_isr; /* + 0x10028 */
+ u64 isr; /* + 0x10030 */
+ u64 imsr; /* + 0x10038 */
+ u64 cause; /* + 0x10040 */
+ u64 __pad2[0x1ff7]; /* + 0x10048 + 0xffb8 */
+ /* Timer */
+ u64 count; /* + 0x20000 */
+ u64 __pad3[0x1fff]; /* + 0x20008 + 0xfff8 */
+ u64 compare; /* + 0x30000 */
+ u64 __pad4[0x1fff]; /* + 0x30008 + 0xfff8 */
+ u64 trigger; /* + 0x40000 */
+ u64 __pad5[0x1fff]; /* + 0x40008 + 0xfff8 */
+ /* Misc */
+ u64 cpuid; /* + 0x50000 */
+ u64 __pad6[0x1fff]; /* + 0x50008 + 0xfff8 */
+ u64 sync; /* + 0x60000 */
+};
+
+
+/* For timer-related bits. */
+#define HEART_NS_PER_CYCLE 80
+#define HEART_CYCLES_PER_SEC (NSEC_PER_SEC / HEART_NS_PER_CYCLE)
+
+
+/*
+ * XXX: Everything below this comment will either go away or be cleaned
+ * up to fit in better with Linux. A lot of the bit definitions for
+ * HEART were derived from IRIX's sys/RACER/heart.h header file.
+ */
+
+/* HEART Masks */
+#define HEART_ATK_MASK 0x0007ffffffffffff /* HEART attack mask */
+#define HEART_ACK_ALL_MASK 0xffffffffffffffff /* Ack everything */
+#define HEART_CLR_ALL_MASK 0x0000000000000000 /* Clear all */
+#define HEART_BR_ERR_MASK 0x7ff8000000000000 /* BRIDGE error mask */
+#define HEART_CPU0_ERR_MASK 0x8ff8000000000000 /* CPU0 error mask */
+#define HEART_CPU1_ERR_MASK 0x97f8000000000000 /* CPU1 error mask */
+#define HEART_CPU2_ERR_MASK 0xa7f8000000000000 /* CPU2 error mask */
+#define HEART_CPU3_ERR_MASK 0xc7f8000000000000 /* CPU3 error mask */
+#define HEART_ERR_MASK 0x1ff /* HEART error mask */
+#define HEART_ERR_MASK_START 51 /* HEART error start */
+#define HEART_ERR_MASK_END 63 /* HEART error end */
+
+/* Bits in the HEART_MODE register. */
+#define HM_PROC_DISABLE_SHFT 60
+#define HM_PROC_DISABLE_MSK (0xfUL << HM_PROC_DISABLE_SHFT)
+#define HM_PROC_DISABLE(x) (0x1UL << (x) + HM_PROC_DISABLE_SHFT)
+#define HM_MAX_PSR (0x7UL << 57)
+#define HM_MAX_IOSR (0x7UL << 54)
+#define HM_MAX_PEND_IOSR (0x7UL << 51)
+#define HM_TRIG_SRC_SEL_MSK (0x7UL << 48)
+#define HM_TRIG_HEART_EXC (0x0UL << 48)
+#define HM_TRIG_REG_BIT (0x1UL << 48)
+#define HM_TRIG_SYSCLK (0x2UL << 48)
+#define HM_TRIG_MEMCLK_2X (0x3UL << 48)
+#define HM_TRIG_MEMCLK (0x4UL << 48)
+#define HM_TRIG_IOCLK (0x5UL << 48)
+#define HM_PIU_TEST_MODE (0xfUL << 40)
+#define HM_GP_FLAG_MSK (0xfUL << 36)
+#define HM_GP_FLAG(x) BIT((x) + 36)
+#define HM_MAX_PROC_HYST (0xfUL << 32)
+#define HM_LLP_WRST_AFTER_RST BIT(28)
+#define HM_LLP_LINK_RST BIT(27)
+#define HM_LLP_WARM_RST BIT(26)
+#define HM_COR_ECC_LCK BIT(25)
+#define HM_REDUCED_PWR BIT(24)
+#define HM_COLD_RST BIT(23)
+#define HM_SW_RST BIT(22)
+#define HM_MEM_FORCE_WR BIT(21)
+#define HM_DB_ERR_GEN BIT(20)
+#define HM_SB_ERR_GEN BIT(19)
+#define HM_CACHED_PIO_EN BIT(18)
+#define HM_CACHED_PROM_EN BIT(17)
+#define HM_PE_SYS_COR_ERE BIT(16)
+#define HM_GLOBAL_ECC_EN BIT(15)
+#define HM_IO_COH_EN BIT(14)
+#define HM_INT_EN BIT(13)
+#define HM_DATA_CHK_EN BIT(12)
+#define HM_REF_EN BIT(11)
+#define HM_BAD_SYSWR_ERE BIT(10)
+#define HM_BAD_SYSRD_ERE BIT(9)
+#define HM_SYSSTATE_ERE BIT(8)
+#define HM_SYSCMD_ERE BIT(7)
+#define HM_NCOR_SYS_ERE BIT(6)
+#define HM_COR_SYS_ERE BIT(5)
+#define HM_DATA_ELMNT_ERE BIT(4)
+#define HM_MEM_ADDR_PROC_ERE BIT(3)
+#define HM_MEM_ADDR_IO_ERE BIT(2)
+#define HM_NCOR_MEM_ERE BIT(1)
+#define HM_COR_MEM_ERE BIT(0)
+
+/* Bits in the HEART_MEM_REF register. */
+#define HEART_MEMREF_REFS(x) ((0xfUL & (x)) << 16)
+#define HEART_MEMREF_PERIOD(x) ((0xffffUL & (x)))
+#define HEART_MEMREF_REFS_VAL HEART_MEMREF_REFS(8)
+#define HEART_MEMREF_PERIOD_VAL HEART_MEMREF_PERIOD(0x4000)
+#define HEART_MEMREF_VAL (HEART_MEMREF_REFS_VAL | \
+ HEART_MEMREF_PERIOD_VAL)
+
+/* Bits in the HEART_MEM_REQ_ARB register. */
+#define HEART_MEMARB_IODIS (1 << 20)
+#define HEART_MEMARB_MAXPMWRQS (15 << 16)
+#define HEART_MEMARB_MAXPMRRQS (15 << 12)
+#define HEART_MEMARB_MAXPMRQS (15 << 8)
+#define HEART_MEMARB_MAXRRRQS (15 << 4)
+#define HEART_MEMARB_MAXGBRRQS (15)
+
+/* Bits in the HEART_MEMCFG<x> registers. */
+#define HEART_MEMCFG_VALID 0x80000000 /* Bank is valid */
+#define HEART_MEMCFG_DENSITY 0x01c00000 /* Mem density */
+#define HEART_MEMCFG_SIZE_MASK 0x003f0000 /* Mem size mask */
+#define HEART_MEMCFG_ADDR_MASK 0x000001ff /* Base addr mask */
+#define HEART_MEMCFG_SIZE_SHIFT 16 /* Mem size shift */
+#define HEART_MEMCFG_DENSITY_SHIFT 22 /* Density Shift */
+#define HEART_MEMCFG_UNIT_SHIFT 25 /* Unit Shift, 32MB */
+
+/* Bits in the HEART_STATUS register */
+#define HEART_STAT_HSTL_SDRV BIT(14)
+#define HEART_STAT_FC_CR_OUT(x) BIT((x) + 12)
+#define HEART_STAT_DIR_CNNCT BIT(11)
+#define HEART_STAT_TRITON BIT(10)
+#define HEART_STAT_R4K BIT(9)
+#define HEART_STAT_BIG_ENDIAN BIT(8)
+#define HEART_STAT_PROC_SHFT 4
+#define HEART_STAT_PROC_MSK (0xfUL << HEART_STAT_PROC_SHFT)
+#define HEART_STAT_PROC_ACTIVE(x) (0x1UL << ((x) + HEART_STAT_PROC_SHFT))
+#define HEART_STAT_WIDGET_ID 0xf
+
+/* Bits in the HEART_CAUSE register */
+#define HC_PE_SYS_COR_ERR_MSK (0xfUL << 60)
+#define HC_PE_SYS_COR_ERR(x) BIT((x) + 60)
+#define HC_PIOWDB_OFLOW BIT(44)
+#define HC_PIORWRB_OFLOW BIT(43)
+#define HC_PIUR_ACC_ERR BIT(42)
+#define HC_BAD_SYSWR_ERR BIT(41)
+#define HC_BAD_SYSRD_ERR BIT(40)
+#define HC_SYSSTATE_ERR_MSK (0xfUL << 36)
+#define HC_SYSSTATE_ERR(x) BIT((x) + 36)
+#define HC_SYSCMD_ERR_MSK (0xfUL << 32)
+#define HC_SYSCMD_ERR(x) BIT((x) + 32)
+#define HC_NCOR_SYSAD_ERR_MSK (0xfUL << 28)
+#define HC_NCOR_SYSAD_ERR(x) BIT((x) + 28)
+#define HC_COR_SYSAD_ERR_MSK (0xfUL << 24)
+#define HC_COR_SYSAD_ERR(x) BIT((x) + 24)
+#define HC_DATA_ELMNT_ERR_MSK (0xfUL << 20)
+#define HC_DATA_ELMNT_ERR(x) BIT((x) + 20)
+#define HC_WIDGET_ERR BIT(16)
+#define HC_MEM_ADDR_ERR_PROC_MSK (0xfUL << 4)
+#define HC_MEM_ADDR_ERR_PROC(x) BIT((x) + 4)
+#define HC_MEM_ADDR_ERR_IO BIT(2)
+#define HC_NCOR_MEM_ERR BIT(1)
+#define HC_COR_MEM_ERR BIT(0)
+
+extern struct ip30_heart_regs __iomem *heart_regs;
+
+#define heart_read ____raw_readq
+#define heart_write ____raw_writeq
+
+#endif /* __ASM_SGI_HEART_H */
diff --git a/arch/mips/include/asm/sgi/sgi.h b/arch/mips/include/asm/sgi/sgi.h
deleted file mode 100644
index b61557151e3f..000000000000
--- a/arch/mips/include/asm/sgi/sgi.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * sgi.h: Definitions specific to SGI machines.
- *
- * Copyright (C) 1996 David S. Miller (dm@sgi.com)
- */
-#ifndef _ASM_SGI_SGI_H
-#define _ASM_SGI_SGI_H
-
-/* UP=UniProcessor MP=MultiProcessor(capable) */
-enum sgi_mach {
- ip4, /* R2k UP */
- ip5, /* R2k MP */
- ip6, /* R3k UP */
- ip7, /* R3k MP */
- ip9, /* R3k UP */
- ip12, /* R3kA UP, Indigo */
- ip15, /* R3kA MP */
- ip17, /* R4K UP */
- ip19, /* R4K MP */
- ip20, /* R4K UP, Indigo */
- ip21, /* R8k/TFP MP */
- ip22, /* R4x00 UP, Indy, Indigo2 */
- ip25, /* R10k MP */
- ip26, /* R8k/TFP UP, Indigo2 */
- ip27, /* R10k MP, R12k MP, R14k MP, Origin 200/2k, Onyx2 */
- ip28, /* R10k UP, Indigo2 Impact R10k */
- ip30, /* R10k MP, R12k MP, R14k MP, Octane */
- ip32, /* R5k UP, RM5200 UP, RM7k UP, R10k UP, R12k UP, O2 */
- ip35, /* R14k MP, R16k MP, Origin 300/3k, Onyx3, Fuel, Tezro */
-};
-
-extern enum sgi_mach sgimach;
-extern void sgi_sysinit(void);
-
-/* Many I/O space registers are byte sized and are contained within
- * one byte per word, specifically the MSB, this macro helps out.
- */
-#ifdef __MIPSEL__
-#define SGI_MSB(regaddr) (regaddr)
-#else
-#define SGI_MSB(regaddr) ((regaddr) | 0x3)
-#endif
-
-#endif /* _ASM_SGI_SGI_H */
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 0d9fad5915fe..80f900417f7e 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -15,14 +15,6 @@
#include <asm/sgiarcs.h>
extern struct linux_romvec *romvec;
-extern int prom_argc;
-
-extern LONG *_prom_argv, *_prom_envp;
-
-/* A 32-bit ARC PROM pass arguments and environment as 32-bit pointer.
- These macros take care of sign extension. */
-#define prom_argv(index) ((char *) (long) _prom_argv[(index)])
-#define prom_argc(index) ((char *) (long) _prom_argc[(index)])
extern int prom_flags;
@@ -47,12 +39,6 @@ extern void prom_meminit(void);
/* PROM device tree library routines. */
#define PROM_NULL_COMPONENT ((pcomponent *) 0)
-/* Get sibling component of THIS. */
-extern pcomponent *ArcGetPeer(pcomponent *this);
-
-/* Get child component of THIS. */
-extern pcomponent *ArcGetChild(pcomponent *this);
-
/* This is called at prom_init time to identify the
* ARC architecture we are running on
*/
@@ -60,22 +46,16 @@ extern void prom_identify_arch(void);
/* Environment variable routines. */
extern PCHAR ArcGetEnvironmentVariable(PCHAR name);
-extern LONG ArcSetEnvironmentVariable(PCHAR name, PCHAR value);
/* ARCS command line parsing. */
-extern void prom_init_cmdline(void);
+extern void prom_init_cmdline(int argc, LONG *argv);
/* File operations. */
extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
/* Misc. routines. */
-extern VOID ArcHalt(VOID) __noreturn;
-extern VOID ArcPowerDown(VOID) __noreturn;
-extern VOID ArcRestart(VOID) __noreturn;
-extern VOID ArcReboot(VOID) __noreturn;
extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
-extern VOID ArcFlushAllCaches(VOID);
extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
#endif /* _ASM_SGIALIB_H */
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 105a9479ac5f..e1512cab180b 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -12,6 +12,8 @@
#ifndef _ASM_SGIARCS_H
#define _ASM_SGIARCS_H
+#include <linux/kernel.h>
+
#include <asm/types.h>
#include <asm/fw/arc/types.h>
@@ -368,110 +370,65 @@ struct linux_smonblock {
#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
-#define __arc_clobbers \
- "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
- "$12", "$13", "$14", "$15", "$16", "$24", "$25", "$31"
+extern long call_o32(long vec, void *stack, ...);
+
+extern u64 o32_stk[4096];
+#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define ARC_CALL0(dest) \
({ long __res; \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec) \
- : __arc_clobbers, "$4", "$5", "$6", "$7"); \
- (unsigned long) __res; \
+ __res = call_o32(__vec, O32_STK); \
+ __res; \
})
#define ARC_CALL1(dest, a1) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
+ int __a1 = (int) (long) (a1); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1) \
- : __arc_clobbers, "$5", "$6", "$7"); \
- (unsigned long) __res; \
+ __res = call_o32(__vec, O32_STK, __a1); \
+ __res; \
})
#define ARC_CALL2(dest, a1, a2) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2) \
- : __arc_clobbers, "$6", "$7"); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2); \
__res; \
})
#define ARC_CALL3(dest, a1, a2, a3) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
- register signed int __a3 __asm__("$6") = (int) (long) (a3); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3) \
- : __arc_clobbers, "$7"); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3); \
__res; \
})
#define ARC_CALL4(dest, a1, a2, a3, a4) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
- register signed int __a3 __asm__("$6") = (int) (long) (a3); \
- register signed int __a4 __asm__("$7") = (int) (long) (a4); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ int __a4 = (int) (long) (a4); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3), \
- "r" (__a4) \
- : __arc_clobbers); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3, __a4); \
__res; \
})
-#define ARC_CALL5(dest, a1, a2, a3, a4, a5) \
+#define ARC_CALL5(dest, a1, a2, a3, a4, a5) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
- register signed int __a3 __asm__("$6") = (int) (long) (a3); \
- register signed int __a4 __asm__("$7") = (int) (long) (a4); \
- register signed int __a5 = (int) (long) (a5); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ int __a4 = (int) (long) (a4); \
+ int __a5 = (int) (long) (a5); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "sw\t%7, 16($29)\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), \
- "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4), \
- "r" (__a5) \
- : __arc_clobbers); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3, __a4, __a5); \
__res; \
})
diff --git a/arch/mips/include/asm/sn/agent.h b/arch/mips/include/asm/sn/agent.h
index e33d09293019..7e9b3271737a 100644
--- a/arch/mips/include/asm/sn/agent.h
+++ b/arch/mips/include/asm/sn/agent.h
@@ -26,7 +26,7 @@
#if defined(CONFIG_SGI_IP27)
#define HUB_NIC_ADDR(_cpuid) \
- REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)), \
+ REMOTE_HUB_ADDR(cpu_to_node(_cpuid), \
MD_MLAN_CTL)
#endif
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index 3f1fb1454749..f7d3273d9599 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -19,44 +19,13 @@
#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
-#define makespnum(_nasid, _slice) \
- (((_nasid) << CPUS_PER_NODE_SHFT) | (_slice))
#define INVALID_NASID (nasid_t)-1
-#define INVALID_CNODEID (cnodeid_t)-1
#define INVALID_PNODEID (pnodeid_t)-1
#define INVALID_MODULE (moduleid_t)-1
#define INVALID_PARTID (partid_t)-1
extern nasid_t get_nasid(void);
-extern cnodeid_t get_cpu_cnode(cpuid_t);
extern int get_cpu_slice(cpuid_t);
-/*
- * NO ONE should access these arrays directly. The only reason we refer to
- * them here is to avoid the procedure call that would be required in the
- * macros below. (Really want private data members here :-)
- */
-extern cnodeid_t nasid_to_compact_node[MAX_NASIDS];
-extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
-
-/*
- * These macros are used by various parts of the kernel to convert
- * between the three different kinds of node numbering. At least some
- * of them may change to procedure calls in the future, but the macros
- * will continue to work. Don't use the arrays above directly.
- */
-
-#define NASID_TO_REGION(nnode) \
- ((nnode) >> \
- (is_fine_dirmode() ? NASID_TO_FINEREG_SHFT : NASID_TO_COARSEREG_SHFT))
-
-extern cnodeid_t nasid_to_compact_node[MAX_NASIDS];
-extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
-extern cnodeid_t cpuid_to_compact_node[MAXCPUS];
-
-#define NASID_TO_COMPACT_NODEID(nnode) (nasid_to_compact_node[nnode])
-#define COMPACT_TO_NASID_NODEID(cnode) (compact_to_nasid_node[cnode])
-#define CPUID_TO_COMPACT_NODEID(cpu) (cpuid_to_compact_node[(cpu)])
-
#endif /* _ASM_SN_ARCH_H */
diff --git a/arch/mips/include/asm/sn/gda.h b/arch/mips/include/asm/sn/gda.h
index 85fa1b5f639d..d52f81620661 100644
--- a/arch/mips/include/asm/sn/gda.h
+++ b/arch/mips/include/asm/sn/gda.h
@@ -60,9 +60,7 @@ typedef struct gda {
/* Pointer to a mask of nodes with copies
* of the kernel. */
char g_padding[56]; /* pad out to 128 bytes */
- nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
- * indexed by cnodeid.
- */
+ nasid_t g_nasidtable[MAX_NUMNODES]; /* NASID of each node */
} gda_t;
#define GDA ((gda_t*) GDA_ADDR(get_nasid()))
diff --git a/arch/mips/include/asm/sn/hub.h b/arch/mips/include/asm/sn/hub.h
index 338f7eed74f1..45878fbefbae 100644
--- a/arch/mips/include/asm/sn/hub.h
+++ b/arch/mips/include/asm/sn/hub.h
@@ -10,8 +10,8 @@
#include <asm/xtalk/xtalk.h>
/* ip27-hubio.c */
-extern unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
+extern unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
unsigned long xtalk_addr, size_t size);
-extern void hub_pio_init(cnodeid_t cnode);
+extern void hub_pio_init(nasid_t nasid);
#endif /* __ASM_SN_HUB_H */
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index a947eed48fee..78ef760ddde4 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -590,4 +590,13 @@ struct ioc3_etxd {
#define MIDR_DATA_MASK 0x0000ffff
+/* subsystem IDs supplied by card detection in pci-xtalk-bridge */
+#define IOC3_SUBSYS_IP27_BASEIO6G 0xc300
+#define IOC3_SUBSYS_IP27_MIO 0xc301
+#define IOC3_SUBSYS_IP27_BASEIO 0xc302
+#define IOC3_SUBSYS_IP29_SYSBOARD 0xc303
+#define IOC3_SUBSYS_IP30_SYSBOARD 0xc304
+#define IOC3_SUBSYS_MENET 0xc305
+#define IOC3_SUBSYS_MENET4 0xc306
+
#endif /* MIPS_SN_IOC3_H */
diff --git a/arch/mips/include/asm/sn/mapped_kernel.h b/arch/mips/include/asm/sn/mapped_kernel.h
index 2f3efa91c16e..3f1049807018 100644
--- a/arch/mips/include/asm/sn/mapped_kernel.h
+++ b/arch/mips/include/asm/sn/mapped_kernel.h
@@ -37,10 +37,10 @@
#define MAPPED_KERN_RO_TO_PHYS(x) \
((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \
- MAPPED_KERN_RO_PHYSBASE(get_compact_nodeid()))
+ MAPPED_KERN_RO_PHYSBASE(get_nasid()))
#define MAPPED_KERN_RW_TO_PHYS(x) \
((unsigned long)MAPPED_ADDR_RW_TO_PHYS(x) | \
- MAPPED_KERN_RW_PHYSBASE(get_compact_nodeid()))
+ MAPPED_KERN_RW_PHYSBASE(get_nasid()))
#else /* CONFIG_MAPPED_KERNEL */
diff --git a/arch/mips/include/asm/sn/sn0/arch.h b/arch/mips/include/asm/sn/sn0/arch.h
index 425a67e6a947..12f4c4649ff0 100644
--- a/arch/mips/include/asm/sn/sn0/arch.h
+++ b/arch/mips/include/asm/sn/sn0/arch.h
@@ -12,25 +12,11 @@
#define _ASM_SN_SN0_ARCH_H
-#ifndef SN0XXL /* 128 cpu SMP max */
-/*
- * This is the maximum number of nodes that can be part of a kernel.
- * Effectively, it's the maximum number of compact node ids (cnodeid_t).
- */
-#define MAX_COMPACT_NODES 64
-
/*
* MAXCPUS refers to the maximum number of CPUs in a single kernel.
* This is not necessarily the same as MAXNODES * CPUS_PER_NODE
*/
-#define MAXCPUS 128
-
-#else /* SN0XXL system */
-
-#define MAX_COMPACT_NODES 128
-#define MAXCPUS 256
-
-#endif /* SN0XXL */
+#define MAXCPUS (MAX_NUMNODES * CPUS_PER_NODE)
/*
* This is the maximum number of NASIDS that can be present in a system.
@@ -66,7 +52,5 @@
#define SLOT_MIN_MEM_SIZE (32*1024*1024)
#define CPUS_PER_NODE 2 /* CPUs on a single hub */
-#define CPUS_PER_NODE_SHFT 1 /* Bits to shift in the node number */
-#define CPUS_PER_SUBNODE 2 /* CPUs on a single hub PI */
#endif /* _ASM_SN_SN0_ARCH_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
index f09ba846c644..63a2c30d81c6 100644
--- a/arch/mips/include/asm/sn/sn_private.h
+++ b/arch/mips/include/asm/sn/sn_private.h
@@ -7,14 +7,13 @@
extern nasid_t master_nasid;
extern void cpu_node_probe(void);
-extern cnodeid_t get_compact_nodeid(void);
-extern void hub_rtc_init(cnodeid_t);
+extern void hub_rtc_init(nasid_t nasid);
extern void cpu_time_init(void);
extern void per_cpu_init(void);
extern void install_cpu_nmi_handler(int slice);
extern void install_ipi(void);
extern void setup_replication_mask(void);
extern void replicate_kernel_text(void);
-extern unsigned long node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(nasid_t nasid);
#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index 6d24d4e8b9ed..203c927e84d1 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -12,13 +12,9 @@
#include <linux/types.h>
typedef unsigned long cpuid_t;
-typedef unsigned long cnodemask_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
-typedef signed short cnodeid_t; /* node id in compact-id space */
typedef signed char partid_t; /* partition ID type */
typedef signed short moduleid_t; /* user-visible module number type */
-typedef signed short cmoduleid_t; /* kernel compact module id type */
-typedef unsigned char clusterid_t; /* Clusterid of the cell */
typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h
index 29030cb398ee..1de3bbce8e88 100644
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -10,127 +10,6 @@
#ifndef _ASM_STRING_H
#define _ASM_STRING_H
-
-/*
- * Most of the inline functions are rather naive implementations so I just
- * didn't bother updating them for 64-bit ...
- */
-#ifdef CONFIG_32BIT
-
-#ifndef IN_STRING_C
-
-#define __HAVE_ARCH_STRCPY
-static __inline__ char *strcpy(char *__dest, __const__ char *__src)
-{
- char *__xdest = __dest;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n"
- "1:\tlbu\t$1,(%1)\n\t"
- "addiu\t%1,1\n\t"
- "sb\t$1,(%0)\n\t"
- "bnez\t$1,1b\n\t"
- "addiu\t%0,1\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (__dest), "=r" (__src)
- : "0" (__dest), "1" (__src)
- : "memory");
-
- return __xdest;
-}
-
-#define __HAVE_ARCH_STRNCPY
-static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n)
-{
- char *__xdest = __dest;
-
- if (__n == 0)
- return __xdest;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n"
- "1:\tlbu\t$1,(%1)\n\t"
- "subu\t%2,1\n\t"
- "sb\t$1,(%0)\n\t"
- "beqz\t$1,2f\n\t"
- "addiu\t%0,1\n\t"
- "bnez\t%2,1b\n\t"
- "addiu\t%1,1\n"
- "2:\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (__dest), "=r" (__src), "=r" (__n)
- : "0" (__dest), "1" (__src), "2" (__n)
- : "memory");
-
- return __xdest;
-}
-
-#define __HAVE_ARCH_STRCMP
-static __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct)
-{
- int __res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- "lbu\t%2,(%0)\n"
- "1:\tlbu\t$1,(%1)\n\t"
- "addiu\t%0,1\n\t"
- "bne\t$1,%2,2f\n\t"
- "addiu\t%1,1\n\t"
- "bnez\t%2,1b\n\t"
- "lbu\t%2,(%0)\n\t"
-#if defined(CONFIG_CPU_R3000)
- "nop\n\t"
-#endif
- "move\t%2,$1\n"
- "2:\tsubu\t%2,$1\n"
- "3:\t.set\tat\n\t"
- ".set\treorder"
- : "=r" (__cs), "=r" (__ct), "=r" (__res)
- : "0" (__cs), "1" (__ct));
-
- return __res;
-}
-
-#endif /* !defined(IN_STRING_C) */
-
-#define __HAVE_ARCH_STRNCMP
-static __inline__ int
-strncmp(__const__ char *__cs, __const__ char *__ct, size_t __count)
-{
- int __res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n"
- "1:\tlbu\t%3,(%0)\n\t"
- "beqz\t%2,2f\n\t"
- "lbu\t$1,(%1)\n\t"
- "subu\t%2,1\n\t"
- "bne\t$1,%3,3f\n\t"
- "addiu\t%0,1\n\t"
- "bnez\t%3,1b\n\t"
- "addiu\t%1,1\n"
- "2:\n\t"
-#if defined(CONFIG_CPU_R3000)
- "nop\n\t"
-#endif
- "move\t%3,$1\n"
- "3:\tsubu\t%3,$1\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (__cs), "=r" (__ct), "=r" (__count), "=r" (__res)
- : "0" (__cs), "1" (__ct), "2" (__count));
-
- return __res;
-}
-#endif /* CONFIG_32BIT */
-
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h
new file mode 100644
index 000000000000..7c6a1095f556
--- /dev/null
+++ b/arch/mips/include/asm/sync.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MIPS_ASM_SYNC_H__
+#define __MIPS_ASM_SYNC_H__
+
+/*
+ * sync types are defined by the MIPS64 Instruction Set documentation in Volume
+ * II-A of the MIPS Architecture Reference Manual, which can be found here:
+ *
+ * https://www.mips.com/?do-download=the-mips64-instruction-set-v6-06
+ *
+ * Two types of barrier are provided:
+ *
+ * 1) Completion barriers, which ensure that a memory operation has actually
+ * completed & often involve stalling the CPU pipeline to do so.
+ *
+ * 2) Ordering barriers, which only ensure that affected memory operations
+ * won't be reordered in the CPU pipeline in a manner that violates the
+ * restrictions imposed by the barrier.
+ *
+ * Ordering barriers can be more efficient than completion barriers, since:
+ *
+ * a) Ordering barriers only require memory access instructions which preceed
+ * them in program order (older instructions) to reach a point in the
+ * load/store datapath beyond which reordering is not possible before
+ * allowing memory access instructions which follow them (younger
+ * instructions) to be performed. That is, older instructions don't
+ * actually need to complete - they just need to get far enough that all
+ * other coherent CPUs will observe their completion before they observe
+ * the effects of younger instructions.
+ *
+ * b) Multiple variants of ordering barrier are provided which allow the
+ * effects to be restricted to different combinations of older or younger
+ * loads or stores. By way of example, if we only care that stores older
+ * than a barrier are observed prior to stores that are younger than a
+ * barrier & don't care about the ordering of loads then the 'wmb'
+ * ordering barrier can be used. Limiting the barrier's effects to stores
+ * allows loads to continue unaffected & potentially allows the CPU to
+ * make progress faster than if younger loads had to wait for older stores
+ * to complete.
+ */
+
+/*
+ * No sync instruction at all; used to allow code to nullify the effect of the
+ * __SYNC() macro without needing lots of #ifdefery.
+ */
+#define __SYNC_none -1
+
+/*
+ * A full completion barrier; all memory accesses appearing prior to this sync
+ * instruction in program order must complete before any memory accesses
+ * appearing after this sync instruction in program order.
+ */
+#define __SYNC_full 0x00
+
+/*
+ * For now we use a full completion barrier to implement all sync types, until
+ * we're satisfied that lightweight ordering barriers defined by MIPSr6 are
+ * sufficient to uphold our desired memory model.
+ */
+#define __SYNC_aq __SYNC_full
+#define __SYNC_rl __SYNC_full
+#define __SYNC_mb __SYNC_full
+
+/*
+ * ...except on Cavium Octeon CPUs, which have been using the 'wmb' ordering
+ * barrier since 2010 & omit 'rmb' barriers because the CPUs don't perform
+ * speculative reads.
+ */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define __SYNC_rmb __SYNC_none
+# define __SYNC_wmb 0x04
+#else
+# define __SYNC_rmb __SYNC_full
+# define __SYNC_wmb __SYNC_full
+#endif
+
+/*
+ * A GINV sync is a little different; it doesn't relate directly to loads or
+ * stores, but instead causes synchronization of an icache or TLB global
+ * invalidation operation triggered by the ginvi or ginvt instructions
+ * respectively. In cases where we need to know that a ginvi or ginvt operation
+ * has been performed by all coherent CPUs, we must issue a sync instruction of
+ * this type. Once this instruction graduates all coherent CPUs will have
+ * observed the invalidation.
+ */
+#define __SYNC_ginv 0x14
+
+/* Trivial; indicate that we always need this sync instruction. */
+#define __SYNC_always (1 << 0)
+
+/*
+ * Indicate that we need this sync instruction only on systems with weakly
+ * ordered memory access. In general this is most MIPS systems, but there are
+ * exceptions which provide strongly ordered memory.
+ */
+#ifdef CONFIG_WEAK_ORDERING
+# define __SYNC_weak_ordering (1 << 1)
+#else
+# define __SYNC_weak_ordering 0
+#endif
+
+/*
+ * Indicate that we need this sync instruction only on systems where LL/SC
+ * don't implicitly provide a memory barrier. In general this is most MIPS
+ * systems.
+ */
+#ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+# define __SYNC_weak_llsc (1 << 2)
+#else
+# define __SYNC_weak_llsc 0
+#endif
+
+/*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+ * store or prefetch) in between an LL & SC can cause the SC instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+ * 1) A memory access appearing prior to the LL in program order may actually
+ * be executed after the LL - this is the reordering case.
+ *
+ * In order to avoid this we need to place a memory barrier (ie. a SYNC
+ * instruction) prior to every LL instruction, in between it and any earlier
+ * memory access instructions.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+ * 2) If a conditional branch exists between an LL & SC with a target outside
+ * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+ * execution of memory accesses from outside of the LL-SC loop.
+ *
+ * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
+ * at each affected branch target.
+ *
+ * This case affects all current Loongson 3 CPUs.
+ *
+ * The above described cases cause an error in the cache coherence protocol;
+ * such that the Invalidate of a competing LL-SC goes 'missing' and SC
+ * erroneously observes its core still has Exclusive state and lets the SC
+ * proceed.
+ *
+ * Therefore the error only occurs on SMP systems.
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
+# define __SYNC_loongson3_war (1 << 31)
+#else
+# define __SYNC_loongson3_war 0
+#endif
+
+/*
+ * Some Cavium Octeon CPUs suffer from a bug that causes a single wmb ordering
+ * barrier to be ineffective, requiring the use of 2 in sequence to provide an
+ * effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
+ * optimized memory barrier primitives."). Here we specify that the affected
+ * sync instructions should be emitted twice.
+ */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
+#else
+# define __SYNC_rpt(type) 1
+#endif
+
+/*
+ * The main event. Here we actually emit a sync instruction of a given type, if
+ * reason is non-zero.
+ *
+ * In future we have the option of emitting entries in a fixups-style table
+ * here that would allow us to opportunistically remove some sync instructions
+ * when we detect at runtime that we're running on a CPU that doesn't need
+ * them.
+ */
+#ifdef CONFIG_CPU_HAS_SYNC
+# define ____SYNC(_type, _reason, _else) \
+ .if (( _type ) != -1) && ( _reason ); \
+ .set push; \
+ .set MIPS_ISA_LEVEL_RAW; \
+ .rept __SYNC_rpt(_type); \
+ sync _type; \
+ .endr; \
+ .set pop; \
+ .else; \
+ _else; \
+ .endif
+#else
+# define ____SYNC(_type, _reason, _else)
+#endif
+
+/*
+ * Preprocessor magic to expand macros used as arguments before we insert them
+ * into assembly code.
+ */
+#ifdef __ASSEMBLY__
+# define ___SYNC(type, reason, else) \
+ ____SYNC(type, reason, else)
+#else
+# define ___SYNC(type, reason, else) \
+ __stringify(____SYNC(type, reason, else))
+#endif
+
+#define __SYNC(type, reason) \
+ ___SYNC(__SYNC_##type, __SYNC_##reason, )
+#define __SYNC_ELSE(type, reason, else) \
+ ___SYNC(__SYNC_##type, __SYNC_##reason, else)
+
+#endif /* __MIPS_ASM_SYNC_H__ */
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
new file mode 100644
index 000000000000..c628747d4ecd
--- /dev/null
+++ b/arch/mips/include/asm/unroll.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_UNROLL_H__
+#define __ASM_UNROLL_H__
+
+/*
+ * Explicitly unroll a loop, for use in cases where doing so is performance
+ * critical.
+ *
+ * Ideally we'd rely upon the compiler to provide this but there's no commonly
+ * available means to do so. For example GCC's "#pragma GCC unroll"
+ * functionality would be ideal but is only available from GCC 8 onwards. Using
+ * -funroll-loops is an option but GCC tends to make poor choices when
+ * compiling our string functions. -funroll-all-loops leads to massive code
+ * bloat, even if only applied to the string functions.
+ */
+#define unroll(times, fn, ...) do { \
+ extern void bad_unroll(void) \
+ __compiletime_error("Unsupported unroll"); \
+ \
+ /* \
+ * We can't unroll if the number of iterations isn't \
+ * compile-time constant. Unfortunately GCC versions \
+ * up until 4.6 tend to miss obvious constants & cause \
+ * this check to fail, even though they go on to \
+ * generate reasonable code for the switch statement, \
+ * so we skip the sanity check for those compilers. \
+ */ \
+ BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 || \
+ CONFIG_CLANG_VERSION >= 80000) && \
+ !__builtin_constant_p(times)); \
+ \
+ switch (times) { \
+ case 32: fn(__VA_ARGS__); /* fall through */ \
+ case 31: fn(__VA_ARGS__); /* fall through */ \
+ case 30: fn(__VA_ARGS__); /* fall through */ \
+ case 29: fn(__VA_ARGS__); /* fall through */ \
+ case 28: fn(__VA_ARGS__); /* fall through */ \
+ case 27: fn(__VA_ARGS__); /* fall through */ \
+ case 26: fn(__VA_ARGS__); /* fall through */ \
+ case 25: fn(__VA_ARGS__); /* fall through */ \
+ case 24: fn(__VA_ARGS__); /* fall through */ \
+ case 23: fn(__VA_ARGS__); /* fall through */ \
+ case 22: fn(__VA_ARGS__); /* fall through */ \
+ case 21: fn(__VA_ARGS__); /* fall through */ \
+ case 20: fn(__VA_ARGS__); /* fall through */ \
+ case 19: fn(__VA_ARGS__); /* fall through */ \
+ case 18: fn(__VA_ARGS__); /* fall through */ \
+ case 17: fn(__VA_ARGS__); /* fall through */ \
+ case 16: fn(__VA_ARGS__); /* fall through */ \
+ case 15: fn(__VA_ARGS__); /* fall through */ \
+ case 14: fn(__VA_ARGS__); /* fall through */ \
+ case 13: fn(__VA_ARGS__); /* fall through */ \
+ case 12: fn(__VA_ARGS__); /* fall through */ \
+ case 11: fn(__VA_ARGS__); /* fall through */ \
+ case 10: fn(__VA_ARGS__); /* fall through */ \
+ case 9: fn(__VA_ARGS__); /* fall through */ \
+ case 8: fn(__VA_ARGS__); /* fall through */ \
+ case 7: fn(__VA_ARGS__); /* fall through */ \
+ case 6: fn(__VA_ARGS__); /* fall through */ \
+ case 5: fn(__VA_ARGS__); /* fall through */ \
+ case 4: fn(__VA_ARGS__); /* fall through */ \
+ case 3: fn(__VA_ARGS__); /* fall through */ \
+ case 2: fn(__VA_ARGS__); /* fall through */ \
+ case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 0: break; \
+ \
+ default: \
+ /* \
+ * Either the iteration count is unreasonable \
+ * or we need to add more cases above. \
+ */ \
+ bad_unroll(); \
+ break; \
+ } \
+} while (0)
+
+#endif /* __ASM_UNROLL_H__ */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 89b07ea8d249..d6e97df51cfb 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -80,7 +80,7 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
-obj-$(CONFIG_64BIT) += cpu-bugs64.o
+obj-$(CONFIG_CPU_R4X00_BUGS64) += r4k-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index f521cbf934e7..c54332697673 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -608,7 +608,7 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
if (!(flags & FTLB_EN))
return 1;
return 0;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Flush ITLB, DTLB, VTLB and FTLB */
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
@@ -1526,24 +1526,24 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST;
c->tlbsize = 64;
break;
- case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
+ case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON2E:
- c->cputype = CPU_LOONGSON2;
+ c->cputype = CPU_LOONGSON2EF;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2e");
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
break;
case PRID_REV_LOONGSON2F:
- c->cputype = CPU_LOONGSON2;
+ c->cputype = CPU_LOONGSON2EF;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2f");
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
break;
case PRID_REV_LOONGSON3A_R1:
- c->cputype = CPU_LOONGSON3;
+ c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R1);
@@ -1552,7 +1552,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
- c->cputype = CPU_LOONGSON3;
+ c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3b");
set_isa(c, MIPS_CPU_ISA_M64R1);
@@ -1565,12 +1565,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
c->tlbsize = 64;
+ set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID);
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
case PRID_IMP_LOONGSON_32: /* Loongson-1 */
decode_configs(c);
- c->cputype = CPU_LOONGSON1;
+ c->cputype = CPU_LOONGSON32;
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON1B:
@@ -1903,18 +1904,18 @@ platform:
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
{
switch (c->processor_id & PRID_IMP_MASK) {
- case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
+ case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
- c->cputype = CPU_LOONGSON3;
+ c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
- c->cputype = CPU_LOONGSON3;
+ c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
@@ -1927,6 +1928,17 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
break;
+ case PRID_IMP_LOONGSON_64G:
+ c->cputype = CPU_LOONGSON64;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ decode_configs(c);
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ break;
default:
panic("Unknown Loongson Processor ID!");
break;
@@ -1965,13 +1977,30 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
break;
}
+ switch (c->processor_id & PRID_COMP_MASK) {
/*
- * The config0 register in the Xburst CPUs with a processor ID of
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
+ * mode is not compatible with the MIPS standard, it will cause
+ * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
+ * when starting the init process. After chip reset, the default
+ * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
+ * switch back to VTLB mode to prevent getting stuck.
+ */
+ case PRID_COMP_INGENIC_D1:
+ write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+ break;
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
* but they don't actually support this ISA.
*/
- if ((c->processor_id & PRID_COMP_MASK) == PRID_COMP_INGENIC_D0)
+ case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+ break;
+ default:
+ break;
+ }
}
static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index efde27c99414..0a43c9125267 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -18,6 +18,7 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
+#include <asm/sync.h>
#include <asm/war.h>
#include <asm/thread_info.h>
@@ -353,8 +354,9 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
#ifdef CONFIG_SMP
1: PTR_LA k0, ejtag_debug_buffer_spinlock
- ll k0, 0(k0)
- bnez k0, 1b
+ __SYNC(full, loongson3_war)
+2: ll k0, 0(k0)
+ bnez k0, 2b
PTR_LA k0, ejtag_debug_buffer_spinlock
sc k0, 0(k0)
beqz k0, 1b
@@ -657,7 +659,7 @@ isrdhwr:
.set pop
END(handle_ri_rdhwr)
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_CPU_R4X00_BUGS64
/* A temporary overflow handler used by check_daddi(). */
__INIT
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index eb2afc0b8db1..37f8e78e2869 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -173,13 +173,14 @@ void __init check_wait(void)
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
case CPU_XBURST:
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
case CPU_XLR:
case CPU_XLP:
cpu_wait = r4k_wait;
break;
- case CPU_LOONGSON3:
- if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
+ case CPU_LOONGSON64:
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0))
cpu_wait = r4k_wait;
break;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index a3e2da8391ea..128fc9999c56 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1623,7 +1623,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
break;
}
@@ -1764,12 +1764,12 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
mipspmu.name = "mips/loongson1";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
mipspmu.name = "mips/loongson3";
mipspmu.general_event_map = &loongson3_event_map;
mipspmu.cache_event_map = &loongson3_cache_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index a26f40db15d0..9bf60d7d44d3 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -307,7 +307,7 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
}
/* Barrier ensuring previous cache invalidates are complete */
- uasm_i_sync(pp, STYPE_SYNC);
+ uasm_i_sync(pp, __SYNC_full);
uasm_i_ehb(pp);
/* Check whether the pipeline stalled due to the FSB being full */
@@ -397,7 +397,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
if (coupled_coherence) {
/* Increment ready_count */
- uasm_i_sync(&p, STYPE_SYNC_MB);
+ uasm_i_sync(&p, __SYNC_mb);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
@@ -406,7 +406,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_addiu(&p, t1, t1, 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
- uasm_i_sync(&p, STYPE_SYNC_MB);
+ uasm_i_sync(&p, __SYNC_mb);
/*
* If this is the last VPE to become ready for non-coherence
@@ -473,7 +473,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
Index_Writeback_Inv_D, lbl_flushdcache);
/* Barrier ensuring previous cache invalidates are complete */
- uasm_i_sync(&p, STYPE_SYNC);
+ uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
if (mips_cm_revision() < CM_REV_CM3) {
@@ -487,7 +487,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Barrier to ensure write to coherence control is complete */
- uasm_i_sync(&p, STYPE_SYNC);
+ uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
}
@@ -534,7 +534,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
}
/* Barrier to ensure write to CPC command is complete */
- uasm_i_sync(&p, STYPE_SYNC);
+ uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
}
@@ -572,13 +572,13 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Barrier to ensure write to coherence control is complete */
- uasm_i_sync(&p, STYPE_SYNC);
+ uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
- uasm_i_sync(&p, STYPE_SYNC_MB);
+ uasm_i_sync(&p, __SYNC_mb);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
@@ -586,7 +586,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
- uasm_i_sync(&p, STYPE_SYNC_MB);
+ uasm_i_sync(&p, __SYNC_mb);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
@@ -608,7 +608,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_build_label(&l, p, lbl_secondary_cont);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
- uasm_i_sync(&p, STYPE_SYNC_MB);
+ uasm_i_sync(&p, __SYNC_mb);
}
/* The core is coherent, time to return to C code */
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/r4k-bugs64.c
index 6a7afe7ef4d3..1ff19f1ea5ca 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/r4k-bugs64.c
@@ -242,7 +242,7 @@ static __init void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
+int daddiu_bug = -1;
static __init void check_daddiu(void)
{
@@ -312,14 +312,11 @@ static __init void check_daddiu(void)
void __init check_bugs64_early(void)
{
- if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
- check_mult_sh();
- check_daddiu();
- }
+ check_mult_sh();
+ check_daddiu();
}
void __init check_bugs64(void)
{
- if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
- check_daddi();
+ check_daddi();
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 5eec13b8d222..c3d4212b5f1d 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -67,7 +67,9 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
-static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
+#else
+static const char builtin_cmdline[] __initconst = "";
#endif
/*
@@ -285,7 +287,7 @@ static unsigned long __init init_initrd(void)
* Initialize the bootmem allocator. It also setup initrd related data
* if needed.
*/
-#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
+#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
static void __init bootmem_init(void)
{
@@ -538,11 +540,94 @@ static void __init check_kernel_sections_mem(void)
}
}
-#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
-#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
-#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
-#define BUILTIN_EXTEND_WITH_PROM \
- IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
+static void __init bootcmdline_append(const char *s, size_t max)
+{
+ if (!s[0] || !max)
+ return;
+
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ strlcat(boot_command_line, s, max);
+}
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ bool *dt_bootargs = data;
+ const char *p;
+ int l;
+
+ if (depth != 1 || !data ||
+ (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
+ return 0;
+
+ p = of_get_flat_dt_prop(node, "bootargs", &l);
+ if (p != NULL && l > 0) {
+ bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
+ *dt_bootargs = true;
+ }
+
+ return 1;
+}
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
+
+static void __init bootcmdline_init(char **cmdline_p)
+{
+ bool dt_bootargs = false;
+
+ /*
+ * If CMDLINE_OVERRIDE is enabled then initializing the command line is
+ * trivial - we simply use the built-in command line unconditionally &
+ * unmodified.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ return;
+ }
+
+ /*
+ * If the user specified a built-in command line &
+ * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
+ * prepended to arguments from the bootloader or DT so we'll copy them
+ * to the start of boot_command_line here. Otherwise, empty
+ * boot_command_line to undo anything early_init_dt_scan_chosen() did.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ else
+ boot_command_line[0] = 0;
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+ /*
+ * If we're configured to take boot arguments from DT, look for those
+ * now.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
+ of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
+#endif
+
+ /*
+ * If we didn't get any arguments from DT (regardless of whether that's
+ * because we weren't configured to look for them, or because we looked
+ * & found none) then we'll take arguments from the bootloader.
+ * plat_mem_setup() should have filled arcs_cmdline with arguments from
+ * the bootloader.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
+ bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
+
+ /*
+ * If the user specified a built-in command line & we didn't already
+ * prepend it, we append it to boot_command_line here.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
+ !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
+ bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
+}
/*
* arch_mem_init - initialize memory management subsystem
@@ -570,48 +655,12 @@ static void __init arch_mem_init(char **cmdline_p)
{
extern void plat_mem_setup(void);
- /*
- * Initialize boot_command_line to an innocuous but non-empty string in
- * order to prevent early_init_dt_scan_chosen() from copying
- * CONFIG_CMDLINE into it without our knowledge. We handle
- * CONFIG_CMDLINE ourselves below & don't want to duplicate its
- * content because repeating arguments can be problematic.
- */
- strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
-
/* call board setup routine */
plat_mem_setup();
memblock_set_bottom_up(true);
-#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
- (USE_DTB_CMDLINE && !boot_command_line[0]))
- strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
-
- if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
- if (boot_command_line[0])
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
- }
-
-#if defined(CONFIG_CMDLINE_BOOL)
- if (builtin_cmdline[0]) {
- if (boot_command_line[0])
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
- }
-
- if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
- if (boot_command_line[0])
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
- }
-#endif
-#endif
+ bootcmdline_init(cmdline_p);
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-
*cmdline_p = command_line;
parse_early_param();
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 712c15de6ab9..9058e9dcf080 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -31,7 +31,6 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/bootinfo.h>
-#include <asm/pmon.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mipsregs.h>
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 3f16f3823031..c333e5788664 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -37,6 +37,7 @@
#include <asm/signal.h>
#include <asm/sim.h>
#include <asm/shmparam.h>
+#include <asm/sync.h>
#include <asm/sysmips.h>
#include <asm/switch_to.h>
@@ -133,12 +134,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
[efault] "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
- loongson_llsc_mb();
__asm__ __volatile__ (
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
"1: \n"
+ " " __SYNC(full, loongson3_war) " \n"
user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
"2: \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 342e41de9d64..83f2a437d9e2 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1761,7 +1761,7 @@ static inline void parity_protection_init(void)
case CPU_5KC:
case CPU_5KE:
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -2394,7 +2394,7 @@ void __init trap_init(void)
else {
if (cpu_has_vtag_icache)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
- else if (current_cpu_type() == CPU_LOONGSON3)
+ else if (current_cpu_type() == CPU_LOONGSON64)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 33ee0d18fb0a..a5f00ec73ea6 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -10,6 +10,11 @@
*/
#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+/* Cavium Octeon should not have a separate PT_NOTE Program Header. */
+#ifndef CONFIG_CAVIUM_OCTEON_SOC
+#define EMITS_PT_NOTE
+#endif
+
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -76,16 +81,8 @@ SECTIONS
__stop___dbe_table = .;
}
-#ifdef CONFIG_CAVIUM_OCTEON_SOC
-#define NOTES_HEADER
-#else /* CONFIG_CAVIUM_OCTEON_SOC */
-#define NOTES_HEADER :note
-#endif /* CONFIG_CAVIUM_OCTEON_SOC */
- NOTES :text NOTES_HEADER
- .dummy : { *(.dummy) } :text
-
_sdata = .; /* Start of data section */
- RODATA
+ RO_DATA(4096)
/* writeable */
.data : { /* Data */
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 97e538a8c1be..7dad7a293eae 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -136,6 +136,7 @@ pgd_t *kvm_pgd_alloc(void)
static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -145,7 +146,8 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
BUG();
return NULL;
}
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
pmd_t *new_pmd;
@@ -204,8 +206,8 @@ static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
{
pte_t *pte;
unsigned long end = ~0ul;
- int i_min = __pmd_offset(start_gpa);
- int i_max = __pmd_offset(end_gpa);
+ int i_min = pmd_index(start_gpa);
+ int i_max = pmd_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
int i;
@@ -232,8 +234,8 @@ static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
{
pmd_t *pmd;
unsigned long end = ~0ul;
- int i_min = __pud_offset(start_gpa);
- int i_max = __pud_offset(end_gpa);
+ int i_min = pud_index(start_gpa);
+ int i_max = pud_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
int i;
@@ -258,6 +260,7 @@ static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
unsigned long end_gpa)
{
+ p4d_t *p4d;
pud_t *pud;
unsigned long end = ~0ul;
int i_min = pgd_index(start_gpa);
@@ -269,7 +272,8 @@ static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
if (!pgd_present(pgd[i]))
continue;
- pud = pud_offset(pgd + i, 0);
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d + i, 0);
if (i == i_max)
end = end_gpa;
@@ -334,8 +338,8 @@ static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
int ret = 0; \
pte_t *pte; \
unsigned long cur_end = ~0ul; \
- int i_min = __pmd_offset(start); \
- int i_max = __pmd_offset(end); \
+ int i_min = pmd_index(start); \
+ int i_max = pmd_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
@@ -357,8 +361,8 @@ static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
int ret = 0; \
pmd_t *pmd; \
unsigned long cur_end = ~0ul; \
- int i_min = __pud_offset(start); \
- int i_max = __pud_offset(end); \
+ int i_min = pud_index(start); \
+ int i_max = pud_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
@@ -378,6 +382,7 @@ static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
+ p4d_t *p4d; \
pud_t *pud; \
unsigned long cur_end = ~0ul; \
int i_min = pgd_index(start); \
@@ -388,7 +393,8 @@ static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
if (!pgd_present(pgd[i])) \
continue; \
\
- pud = pud_offset(pgd + i, 0); \
+ p4d = p4d_offset(pgd, 0); \
+ pud = pud_offset(p4d + i, 0); \
if (i == i_max) \
cur_end = end; \
\
@@ -862,8 +868,8 @@ static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
{
pte_t *pte;
unsigned long end = ~0ul;
- int i_min = __pmd_offset(start_gva);
- int i_max = __pmd_offset(end_gva);
+ int i_min = pmd_index(start_gva);
+ int i_max = pmd_index(end_gva);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
int i;
@@ -890,8 +896,8 @@ static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
{
pmd_t *pmd;
unsigned long end = ~0ul;
- int i_min = __pud_offset(start_gva);
- int i_max = __pud_offset(end_gva);
+ int i_min = pud_index(start_gva);
+ int i_max = pud_index(end_gva);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
int i;
@@ -916,6 +922,7 @@ static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
unsigned long end_gva)
{
+ p4d_t *p4d;
pud_t *pud;
unsigned long end = ~0ul;
int i_min = pgd_index(start_gva);
@@ -927,7 +934,8 @@ static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
if (!pgd_present(pgd[i]))
continue;
- pud = pud_offset(pgd + i, 0);
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d + i, 0);
if (i == i_max)
end = end_gva;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 73daa6ad33af..5a11e83dffe6 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -564,6 +564,7 @@ static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
/* Don't free host kernel page tables copied from init_mm.pgd */
const unsigned long end = 0x80000000;
unsigned long pgd_va, pud_va, pmd_va;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -576,7 +577,8 @@ static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
pgd_va = (unsigned long)i << PGDIR_SHIFT;
if (pgd_va >= end)
break;
- pud = pud_offset(pgd + i, 0);
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d + i, 0);
for (j = 0; j < PTRS_PER_PUD; j++) {
if (pud_none(pud[j]))
continue;
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 3b2a1e78a543..116d0bd8b2ae 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -7,6 +7,7 @@
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/irqflags.h>
#include <linux/export.h>
@@ -19,12 +20,11 @@
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
@@ -41,12 +41,11 @@ EXPORT_SYMBOL(__mips_set_bit);
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
@@ -63,12 +62,11 @@ EXPORT_SYMBOL(__mips_clear_bit);
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
@@ -78,32 +76,6 @@ EXPORT_SYMBOL(__mips_change_bit);
/**
- * __mips_test_and_set_bit - Set a bit and return its old value. This is
- * called by test_and_set_bit() if it cannot find a faster solution.
- * @nr: Bit to set
- * @addr: Address to count from
- */
-int __mips_test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr)
-{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
- unsigned long mask;
- unsigned long flags;
- int res;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a) != 0;
- *a |= mask;
- raw_local_irq_restore(flags);
- return res;
-}
-EXPORT_SYMBOL(__mips_test_and_set_bit);
-
-
-/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set
@@ -112,13 +84,12 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
@@ -137,13 +108,12 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
@@ -162,13 +132,12 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *a = (unsigned long *)addr;
- unsigned bit = nr & SZLONG_MASK;
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
unsigned long mask;
unsigned long flags;
int res;
- a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a) != 0;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 2ff84f4b1717..fda7b57b826e 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(csum_partial)
#endif
/* odd buffer alignment? */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON64)
.set push
.set arch=mips32r2
wsbh v1, sum
@@ -732,7 +732,7 @@ EXPORT_SYMBOL(csum_partial)
addu sum, v1
#endif
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON64)
.set push
.set arch=mips32r2
wsbh v1, sum
diff --git a/arch/mips/loongson2ef/Kconfig b/arch/mips/loongson2ef/Kconfig
new file mode 100644
index 000000000000..595dd48e1e4d
--- /dev/null
+++ b/arch/mips/loongson2ef/Kconfig
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0
+if MACH_LOONGSON2EF
+
+choice
+ prompt "Machine Type"
+
+config LEMOTE_FULOONG2E
+ bool "Lemote Fuloong(2e) mini-PC"
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_LOONGSON2E
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select BOARD_SCACHE
+ select FORCE_PCI
+ select I8259
+ select ISA
+ select IRQ_MIPS_CPU
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select CPU_HAS_WB
+ select LOONGSON_MC146818
+ help
+ Lemote Fuloong(2e) mini-PC board based on the Chinese Loongson-2E CPU and
+ an FPGA northbridge
+
+ Lemote Fuloong(2e) mini PC have a VIA686B south bridge.
+
+config LEMOTE_MACH2F
+ bool "Lemote Loongson 2F family machines"
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select BOARD_SCACHE
+ select BOOT_ELF32
+ select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
+ select CPU_HAS_WB
+ select CS5536
+ select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
+ select DMA_NONCOHERENT
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select HAVE_CLK
+ select FORCE_PCI
+ select I8259
+ select IRQ_MIPS_CPU
+ select ISA
+ select SYS_HAS_CPU_LOONGSON2F
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select LOONGSON_MC146818
+ help
+ Lemote Loongson 2F family machines utilize the 2F revision of
+ Loongson processor and the AMD CS5536 south bridge.
+
+ These family machines include fuloong2f mini PC, yeeloong2f notebook,
+ LingLoong allinone PC and so forth.
+
+endchoice
+
+config CS5536
+ bool
+
+config CS5536_MFGPT
+ bool "CS5536 MFGPT Timer"
+ depends on CS5536 && !HIGH_RES_TIMERS
+ select MIPS_EXTERNAL_TIMER
+ help
+ This option enables the mfgpt0 timer of AMD CS5536. With this timer
+ switched on you can not use high resolution timers.
+
+ If you want to enable the Loongson2 CPUFreq Driver, Please enable
+ this option at first, otherwise, You will get wrong system time.
+
+ If unsure, say Yes.
+
+config LOONGSON_UART_BASE
+ bool
+ default y
+ depends on EARLY_PRINTK || SERIAL_8250
+
+config LOONGSON_MC146818
+ bool
+ default n
+
+endif # MACH_LOONGSON2EF
diff --git a/arch/mips/loongson2ef/Makefile b/arch/mips/loongson2ef/Makefile
new file mode 100644
index 000000000000..d4af1605cc9b
--- /dev/null
+++ b/arch/mips/loongson2ef/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Common code for all Loongson based systems
+#
+
+obj-$(CONFIG_MACH_LOONGSON2EF) += common/
+
+#
+# Lemote Fuloong mini-PC (Loongson 2E-based)
+#
+
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
+
+#
+# Lemote loongson2f family machines
+#
+
+obj-$(CONFIG_LEMOTE_MACH2F) += lemote-2f/
diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
new file mode 100644
index 000000000000..3aca42963f35
--- /dev/null
+++ b/arch/mips/loongson2ef/Platform
@@ -0,0 +1,32 @@
+#
+# Loongson Processors' Support
+#
+
+# Only gcc >= 4.4 have Loongson specific support
+cflags-$(CONFIG_CPU_LOONGSON2EF) += -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON2E) += \
+ $(call cc-option,-march=loongson2e,-march=r4600)
+cflags-$(CONFIG_CPU_LOONGSON2F) += \
+ $(call cc-option,-march=loongson2f,-march=r4600)
+# Enable the workarounds for Loongson2f
+ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
+ ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-nop,),)
+ $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-nop)
+ else
+ cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-nop
+ endif
+ ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-jump,),)
+ $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-jump)
+ else
+ cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-jump
+ endif
+endif
+
+#
+# Loongson Machines' Support
+#
+
+platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/
+cflags-$(CONFIG_MACH_LOONGSON2EF) += -I$(srctree)/arch/mips/include/asm/mach-loongson2ef -mno-branch-likely
+load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000
+load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000
diff --git a/arch/mips/loongson64/common/Makefile b/arch/mips/loongson2ef/common/Makefile
index 684624f61f5a..d5ab3e543ea3 100644
--- a/arch/mips/loongson64/common/Makefile
+++ b/arch/mips/loongson2ef/common/Makefile
@@ -3,14 +3,13 @@
# Makefile for loongson based machines.
#
-obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
+obj-y += setup.o init.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o serial.o
obj-$(CONFIG_PCI) += pci.o
#
# Serial port support
#
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson2ef/common/bonito-irq.c
index 82352cc25e4c..82352cc25e4c 100644
--- a/arch/mips/loongson64/common/bonito-irq.c
+++ b/arch/mips/loongson2ef/common/bonito-irq.c
diff --git a/arch/mips/loongson64/common/cs5536/Makefile b/arch/mips/loongson2ef/common/cs5536/Makefile
index b32b29661245..b32b29661245 100644
--- a/arch/mips/loongson64/common/cs5536/Makefile
+++ b/arch/mips/loongson2ef/common/cs5536/Makefile
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_acc.c b/arch/mips/loongson2ef/common/cs5536/cs5536_acc.c
index ff50aae72916..ff50aae72916 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_acc.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_acc.c
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ehci.c b/arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c
index bd4c39fe6109..bd4c39fe6109 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ehci.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ide.c b/arch/mips/loongson2ef/common/cs5536/cs5536_ide.c
index bb933294b092..bb933294b092 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ide.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_ide.c
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_isa.c b/arch/mips/loongson2ef/common/cs5536/cs5536_isa.c
index 5ad38f86ee62..5ad38f86ee62 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_isa.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_isa.c
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c
index 30af1b7c7529..30af1b7c7529 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c
index 71a52b120317..71a52b120317 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_pci.c b/arch/mips/loongson2ef/common/cs5536/cs5536_pci.c
index 202c89b568ba..202c89b568ba 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_pci.c
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_pci.c
diff --git a/arch/mips/loongson2ef/common/env.c b/arch/mips/loongson2ef/common/env.c
new file mode 100644
index 000000000000..6f20bdf9b242
--- /dev/null
+++ b/arch/mips/loongson2ef/common/env.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Based on Ocelot Linux port, which is
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright 2003 ICT CAS
+ * Author: Michael Guo <guoyi@ict.ac.cn>
+ *
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+#include <linux/export.h>
+#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
+#include <loongson.h>
+
+u32 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+
+void __init prom_init_env(void)
+{
+ /* pmon passes arguments in 32bit pointers */
+ unsigned int processor_id;
+
+ cpu_clock_freq = fw_getenvl("cpuclock");
+ memsize = fw_getenvl("memsize");
+ highmemsize = fw_getenvl("highmemsize");
+
+ if (memsize == 0)
+ memsize = 256;
+
+ pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
+
+ if (cpu_clock_freq == 0) {
+ processor_id = (&current_cpu_data)->processor_id;
+ switch (processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2E:
+ cpu_clock_freq = 533080000;
+ break;
+ case PRID_REV_LOONGSON2F:
+ cpu_clock_freq = 797000000;
+ break;
+ default:
+ cpu_clock_freq = 100000000;
+ break;
+ }
+ }
+ pr_info("CpuClock = %u\n", cpu_clock_freq);
+}
diff --git a/arch/mips/loongson64/common/init.c b/arch/mips/loongson2ef/common/init.c
index 912fe61c4fc7..45512178be77 100644
--- a/arch/mips/loongson64/common/init.c
+++ b/arch/mips/loongson2ef/common/init.c
@@ -9,6 +9,7 @@
#include <asm/traps.h>
#include <asm/smp-ops.h>
#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
#include <loongson.h>
@@ -32,22 +33,17 @@ void __init prom_init(void)
ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE);
#endif
- prom_init_cmdline();
+ fw_init_cmdline();
+ prom_init_machtype();
prom_init_env();
/* init base address of io space */
set_io_port_base((unsigned long)
ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
-
-#ifdef CONFIG_NUMA
- prom_init_numa_memory();
-#else
prom_init_memory();
-#endif
/*init the uart base address */
prom_init_uart_base();
- register_smp_ops(&loongson3_smp_ops);
board_nmi_handler_setup = mips_nmi_setup;
}
diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson2ef/common/irq.c
index 0ea93c1c0a97..0ea93c1c0a97 100644
--- a/arch/mips/loongson64/common/irq.c
+++ b/arch/mips/loongson2ef/common/irq.c
diff --git a/arch/mips/loongson64/common/machtype.c b/arch/mips/loongson2ef/common/machtype.c
index 4e42d929f1c7..82f6de49f20f 100644
--- a/arch/mips/loongson64/common/machtype.c
+++ b/arch/mips/loongson2ef/common/machtype.c
@@ -23,7 +23,6 @@ static const char *system_types[] = {
[MACH_DEXXON_GDIUM2F10] = "dexxon-gdium-2f",
[MACH_LEMOTE_NAS] = "lemote-nas-2f",
[MACH_LEMOTE_LL2F] = "lemote-lynloong-2f",
- [MACH_LOONGSON_GENERIC] = "generic-loongson-machine",
[MACH_LOONGSON_END] = NULL,
};
diff --git a/arch/mips/loongson2ef/common/mem.c b/arch/mips/loongson2ef/common/mem.c
new file mode 100644
index 000000000000..ae21f1c62baa
--- /dev/null
+++ b/arch/mips/loongson2ef/common/mem.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+#include <mem.h>
+#include <pci.h>
+
+
+u32 memsize, highmemsize;
+
+void __init prom_init_memory(void)
+{
+ add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
+
+ add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize <<
+ 20), BOOT_MEM_RESERVED);
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ {
+ int bit;
+
+ bit = fls(memsize + highmemsize);
+ if (bit != ffs(memsize + highmemsize))
+ bit += 20;
+ else
+ bit = bit + 20 - 1;
+
+ /* set cpu window3 to map CPU to DDR: 2G -> 2G */
+ LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
+ 0x80000000ul, (1 << bit));
+ mmiowb();
+ }
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#ifdef CONFIG_64BIT
+ if (highmemsize > 0)
+ add_memory_region(LOONGSON_HIGHMEM_START,
+ highmemsize << 20, BOOT_MEM_RAM);
+
+ add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START -
+ LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED);
+
+#endif /* !CONFIG_64BIT */
+}
+
+/* override of arch/mips/mm/cache.c: __uncached_access */
+int __uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_DSYNC)
+ return 1;
+
+ return addr >= __pa(high_memory) ||
+ ((addr >= LOONGSON_MMIO_MEM_START) &&
+ (addr < LOONGSON_MMIO_MEM_END));
+}
diff --git a/arch/mips/loongson64/common/pci.c b/arch/mips/loongson2ef/common/pci.c
index c47bb7bf3aa4..200916925e95 100644
--- a/arch/mips/loongson64/common/pci.c
+++ b/arch/mips/loongson2ef/common/pci.c
@@ -7,7 +7,6 @@
#include <pci.h>
#include <loongson.h>
-#include <boot_param.h>
static struct resource loongson_pci_mem_resource = {
.name = "pci memory space",
@@ -81,15 +80,8 @@ static int __init pcibios_init(void)
setup_pcimap();
loongson_pci_controller.io_map_base = mips_io_port_base;
-#ifdef CONFIG_LEFI_FIRMWARE_INTERFACE
- loongson_pci_mem_resource.start = loongson_sysconf.pci_mem_start_addr;
- loongson_pci_mem_resource.end = loongson_sysconf.pci_mem_end_addr;
-#endif
register_pci_controller(&loongson_pci_controller);
-#ifdef CONFIG_CPU_LOONGSON3
- sbx00_acpi_init();
-#endif
return 0;
}
diff --git a/arch/mips/loongson64/common/platform.c b/arch/mips/loongson2ef/common/platform.c
index 0084820cffaa..0084820cffaa 100644
--- a/arch/mips/loongson64/common/platform.c
+++ b/arch/mips/loongson2ef/common/platform.c
diff --git a/arch/mips/loongson64/common/pm.c b/arch/mips/loongson2ef/common/pm.c
index b8aed878d912..11f4cfd581fb 100644
--- a/arch/mips/loongson64/common/pm.c
+++ b/arch/mips/loongson2ef/common/pm.c
@@ -75,7 +75,7 @@ int __weak wakeup_loongson(void)
static void wait_for_wakeup_events(void)
{
while (!wakeup_loongson())
- LOONGSON_CHIPCFG(0) &= ~0x7;
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
}
/*
@@ -98,15 +98,16 @@ static void loongson_suspend_enter(void)
stop_perf_counters();
- cached_cpu_freq = LOONGSON_CHIPCFG(0);
+ cached_cpu_freq = readl(LOONGSON_CHIPCFG);
/* Put CPU into wait mode */
- LOONGSON_CHIPCFG(0) &= ~0x7;
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
/* wait for the given events to wakeup cpu from wait mode */
wait_for_wakeup_events();
- LOONGSON_CHIPCFG(0) = cached_cpu_freq;
+ writel(cached_cpu_freq, LOONGSON_CHIPCFG);
+
mmiowb();
}
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson2ef/common/reset.c
index ce39e918e4d5..e7c87161ce00 100644
--- a/arch/mips/loongson64/common/reset.c
+++ b/arch/mips/loongson2ef/common/reset.c
@@ -13,7 +13,6 @@
#include <asm/reboot.h>
#include <loongson.h>
-#include <boot_param.h>
static inline void loongson_reboot(void)
{
@@ -35,26 +34,15 @@ static inline void loongson_reboot(void)
static void loongson_restart(char *command)
{
-#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
/* do preparation for reboot */
mach_prepare_reboot();
/* reboot via jumping to boot base address */
loongson_reboot();
-#else
- void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
-
- fw_restart();
- while (1) {
- if (cpu_wait)
- cpu_wait();
- }
-#endif
}
static void loongson_poweroff(void)
{
-#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
mach_prepare_shutdown();
/*
@@ -62,15 +50,6 @@ static void loongson_poweroff(void)
* a generic delay loop, machine_hang(), so simply return.
*/
return;
-#else
- void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
-
- fw_poweroff();
- while (1) {
- if (cpu_wait)
- cpu_wait();
- }
-#endif
}
static void loongson_halt(void)
diff --git a/arch/mips/loongson64/common/rtc.c b/arch/mips/loongson2ef/common/rtc.c
index 8d7628c0f513..8d7628c0f513 100644
--- a/arch/mips/loongson64/common/rtc.c
+++ b/arch/mips/loongson2ef/common/rtc.c
diff --git a/arch/mips/loongson2ef/common/serial.c b/arch/mips/loongson2ef/common/serial.c
new file mode 100644
index 000000000000..ac4f6e3ebc3e
--- /dev/null
+++ b/arch/mips/loongson2ef/common/serial.c
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Yan hua (yanhua@lemote.com)
+ * Author: Wu Zhangjin (wuzhangjin@gmail.com)
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+#include <machine.h>
+
+#define PORT(int, clk) \
+{ \
+ .irq = int, \
+ .uartclk = clk, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+#define PORT_M(int, clk) \
+{ \
+ .irq = MIPS_CPU_IRQ_BASE + (int), \
+ .uartclk = clk, \
+ .iotype = UPIO_MEM, \
+ .membase = (void __iomem *)NULL, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+static struct plat_serial8250_port uart8250_data[MACH_LOONGSON_END + 1] = {
+ [MACH_LOONGSON_UNKNOWN] = {},
+ [MACH_LEMOTE_FL2E] = PORT(4, 1843200),
+ [MACH_LEMOTE_FL2F] = PORT(3, 1843200),
+ [MACH_LEMOTE_ML2F7] = PORT_M(3, 3686400),
+ [MACH_LEMOTE_YL2F89] = PORT_M(3, 3686400),
+ [MACH_DEXXON_GDIUM2F10] = PORT_M(3, 3686400),
+ [MACH_LEMOTE_NAS] = PORT_M(3, 3686400),
+ [MACH_LEMOTE_LL2F] = PORT(3, 1843200),
+ [MACH_LOONGSON_END] = {},
+};
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+};
+
+static int __init serial_init(void)
+{
+ unsigned char iotype;
+
+ iotype = uart8250_data[mips_machtype].iotype;
+
+ if (UPIO_MEM == iotype) {
+ uart8250_data[mips_machtype].mapbase =
+ loongson_uart_base;
+ uart8250_data[mips_machtype].membase =
+ (void __iomem *)_loongson_uart_base;
+ }
+ else if (UPIO_PORT == iotype)
+ uart8250_data[mips_machtype].iobase =
+ loongson_uart_base - LOONGSON_PCIIO_BASE;
+
+ memset(&uart8250_data[mips_machtype + 1], 0,
+ sizeof(struct plat_serial8250_port));
+ uart8250_device.dev.platform_data = &uart8250_data[mips_machtype];
+
+ return platform_device_register(&uart8250_device);
+}
+module_init(serial_init);
+
+static void __exit serial_exit(void)
+{
+ platform_device_unregister(&uart8250_device);
+}
+module_exit(serial_exit);
diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson2ef/common/setup.c
index bc2da4c140c4..4fd27f4f90ed 100644
--- a/arch/mips/loongson64/common/setup.c
+++ b/arch/mips/loongson2ef/common/setup.c
@@ -11,11 +11,6 @@
#include <loongson.h>
-#ifdef CONFIG_VT
-#include <linux/console.h>
-#include <linux/screen_info.h>
-#endif
-
static void wbflush_loongson(void)
{
asm(".set\tpush\n\t"
@@ -32,20 +27,4 @@ EXPORT_SYMBOL(__wbflush);
void __init plat_mem_setup(void)
{
-#ifdef CONFIG_VT
-#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-
- screen_info = (struct screen_info) {
- .orig_x = 0,
- .orig_y = 25,
- .orig_video_cols = 80,
- .orig_video_lines = 25,
- .orig_video_isVGA = VIDEO_TYPE_VGAC,
- .orig_video_points = 16,
- };
-#elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
-#endif
}
diff --git a/arch/mips/loongson64/common/time.c b/arch/mips/loongson2ef/common/time.c
index e78760ce475b..585741af42a9 100644
--- a/arch/mips/loongson64/common/time.c
+++ b/arch/mips/loongson2ef/common/time.c
@@ -18,11 +18,7 @@ void __init plat_time_init(void)
/* setup mips r4k timer */
mips_hpt_frequency = cpu_clock_freq / 2;
-#ifdef CONFIG_RS780_HPET
- setup_hpet_timer();
-#else
setup_mfgpt0_timer();
-#endif
}
void read_persistent_clock64(struct timespec64 *ts)
diff --git a/arch/mips/loongson64/common/uart_base.c b/arch/mips/loongson2ef/common/uart_base.c
index e88d937f10fe..522bea6ad7b0 100644
--- a/arch/mips/loongson64/common/uart_base.c
+++ b/arch/mips/loongson2ef/common/uart_base.c
@@ -6,13 +6,14 @@
#include <linux/export.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
#include <loongson.h>
/* raw */
-unsigned long loongson_uart_base[MAX_UARTS] = {};
+unsigned long loongson_uart_base;
/* ioremapped */
-unsigned long _loongson_uart_base[MAX_UARTS] = {};
+unsigned long _loongson_uart_base;
EXPORT_SYMBOL(loongson_uart_base);
EXPORT_SYMBOL(_loongson_uart_base);
@@ -20,16 +21,12 @@ EXPORT_SYMBOL(_loongson_uart_base);
void prom_init_loongson_uart_base(void)
{
switch (mips_machtype) {
- case MACH_LOONGSON_GENERIC:
- /* The CPU provided serial port (CPU) */
- loongson_uart_base[0] = LOONGSON_REG_BASE + 0x1e0;
- break;
case MACH_LEMOTE_FL2E:
- loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x3f8;
+ loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8;
break;
case MACH_LEMOTE_FL2F:
case MACH_LEMOTE_LL2F:
- loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x2f8;
+ loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8;
break;
case MACH_LEMOTE_ML2F7:
case MACH_LEMOTE_YL2F89:
@@ -37,10 +34,10 @@ void prom_init_loongson_uart_base(void)
case MACH_LEMOTE_NAS:
default:
/* The CPU provided serial port (LPC) */
- loongson_uart_base[0] = LOONGSON_LIO1_BASE + 0x3f8;
+ loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
break;
}
- _loongson_uart_base[0] =
- (unsigned long)ioremap_nocache(loongson_uart_base[0], 8);
+ _loongson_uart_base = TO_UNCAC(loongson_uart_base);
+ setup_8250_early_printk_port(_loongson_uart_base, 0, 1024);
}
diff --git a/arch/mips/loongson64/fuloong-2e/Makefile b/arch/mips/loongson2ef/fuloong-2e/Makefile
index bb58edb3bea7..bb58edb3bea7 100644
--- a/arch/mips/loongson64/fuloong-2e/Makefile
+++ b/arch/mips/loongson2ef/fuloong-2e/Makefile
diff --git a/arch/mips/loongson64/fuloong-2e/dma.c b/arch/mips/loongson2ef/fuloong-2e/dma.c
index e122292bf666..e122292bf666 100644
--- a/arch/mips/loongson64/fuloong-2e/dma.c
+++ b/arch/mips/loongson2ef/fuloong-2e/dma.c
diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson2ef/fuloong-2e/irq.c
index 32278e7bf85c..32278e7bf85c 100644
--- a/arch/mips/loongson64/fuloong-2e/irq.c
+++ b/arch/mips/loongson2ef/fuloong-2e/irq.c
diff --git a/arch/mips/loongson64/fuloong-2e/reset.c b/arch/mips/loongson2ef/fuloong-2e/reset.c
index 8273de1cf4bb..8273de1cf4bb 100644
--- a/arch/mips/loongson64/fuloong-2e/reset.c
+++ b/arch/mips/loongson2ef/fuloong-2e/reset.c
diff --git a/arch/mips/loongson64/lemote-2f/Makefile b/arch/mips/loongson2ef/lemote-2f/Makefile
index 881a0ec06d1f..881a0ec06d1f 100644
--- a/arch/mips/loongson64/lemote-2f/Makefile
+++ b/arch/mips/loongson2ef/lemote-2f/Makefile
diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson2ef/lemote-2f/clock.c
index 8281334df9c8..414f282c8ab5 100644
--- a/arch/mips/loongson64/lemote-2f/clock.c
+++ b/arch/mips/loongson2ef/lemote-2f/clock.c
@@ -15,7 +15,7 @@
#include <linux/spinlock.h>
#include <asm/clock.h>
-#include <asm/mach-loongson64/loongson.h>
+#include <asm/mach-loongson2ef/loongson.h>
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
@@ -118,9 +118,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
clk->rate = rate;
- regval = LOONGSON_CHIPCFG(0);
+ regval = readl(LOONGSON_CHIPCFG);
regval = (regval & ~0x7) | (pos->driver_data - 1);
- LOONGSON_CHIPCFG(0) = regval;
+ writel(regval, LOONGSON_CHIPCFG);
return ret;
}
diff --git a/arch/mips/loongson64/lemote-2f/dma.c b/arch/mips/loongson2ef/lemote-2f/dma.c
index abf0e39d7e46..abf0e39d7e46 100644
--- a/arch/mips/loongson64/lemote-2f/dma.c
+++ b/arch/mips/loongson2ef/lemote-2f/dma.c
diff --git a/arch/mips/loongson64/lemote-2f/ec_kb3310b.c b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c
index d138220e96a2..d138220e96a2 100644
--- a/arch/mips/loongson64/lemote-2f/ec_kb3310b.c
+++ b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c
diff --git a/arch/mips/loongson64/lemote-2f/ec_kb3310b.h b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h
index aecdbc9c875a..aecdbc9c875a 100644
--- a/arch/mips/loongson64/lemote-2f/ec_kb3310b.h
+++ b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h
diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson2ef/lemote-2f/irq.c
index c58a044c6c07..c58a044c6c07 100644
--- a/arch/mips/loongson64/lemote-2f/irq.c
+++ b/arch/mips/loongson2ef/lemote-2f/irq.c
diff --git a/arch/mips/loongson64/lemote-2f/machtype.c b/arch/mips/loongson2ef/lemote-2f/machtype.c
index 9462a3ab57be..9462a3ab57be 100644
--- a/arch/mips/loongson64/lemote-2f/machtype.c
+++ b/arch/mips/loongson2ef/lemote-2f/machtype.c
diff --git a/arch/mips/loongson64/lemote-2f/pm.c b/arch/mips/loongson2ef/lemote-2f/pm.c
index 3d0027229e3c..3d0027229e3c 100644
--- a/arch/mips/loongson64/lemote-2f/pm.c
+++ b/arch/mips/loongson2ef/lemote-2f/pm.c
diff --git a/arch/mips/loongson64/lemote-2f/reset.c b/arch/mips/loongson2ef/lemote-2f/reset.c
index 0db0934302ea..197dae4ffd23 100644
--- a/arch/mips/loongson64/lemote-2f/reset.c
+++ b/arch/mips/loongson2ef/lemote-2f/reset.c
@@ -24,7 +24,7 @@ static void reset_cpu(void)
* reset cpu to full speed, this is needed when enabling cpu frequency
* scalling
*/
- LOONGSON_CHIPCFG(0) |= 0x7;
+ writel(readl(LOONGSON_CHIPCFG) | 0x7, LOONGSON_CHIPCFG);
}
/* reset support for fuloong2f */
diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig
index 6dacc1438906..e27879b4813b 100644
--- a/arch/mips/loongson32/Kconfig
+++ b/arch/mips/loongson32/Kconfig
@@ -38,7 +38,7 @@ endchoice
menuconfig CEVT_CSRC_LS1X
bool "Use PWM Timer for clockevent/clocksource"
select MIPS_EXTERNAL_TIMER
- depends on CPU_LOONGSON1
+ depends on CPU_LOONGSON32
help
This option changes the default clockevent/clocksource to PWM Timer,
and is required by Loongson1 CPUFreq support.
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index 333215593092..7f8e342f1ef5 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -1,4 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON32) += -march=mips32r2 -Wa,--trap
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
-load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80200000
+load-$(CONFIG_CPU_LOONGSON32) += 0xffffffff80200000
diff --git a/arch/mips/loongson32/common/prom.c b/arch/mips/loongson32/common/prom.c
index c4e043ee53ff..73dd25142484 100644
--- a/arch/mips/loongson32/common/prom.c
+++ b/arch/mips/loongson32/common/prom.c
@@ -5,63 +5,25 @@
* Modified from arch/mips/pnx833x/common/prom.c.
*/
+#include <linux/io.h>
+#include <linux/init.h>
#include <linux/serial_reg.h>
#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
#include <loongson1.h>
-#include <prom.h>
-int prom_argc;
-char **prom_argv, **prom_envp;
-unsigned long memsize, highmemsize;
-
-char *prom_getenv(char *envname)
-{
- char **env = prom_envp;
- int i;
-
- i = strlen(envname);
-
- while (*env) {
- if (strncmp(envname, *env, i) == 0 && *(*env + i) == '=')
- return *env + i + 1;
- env++;
- }
-
- return 0;
-}
-
-static inline unsigned long env_or_default(char *env, unsigned long dfl)
-{
- char *str = prom_getenv(env);
- return str ? simple_strtol(str, 0, 0) : dfl;
-}
-
-void __init prom_init_cmdline(void)
-{
- char *c = &(arcs_cmdline[0]);
- int i;
-
- for (i = 1; i < prom_argc; i++) {
- strcpy(c, prom_argv[i]);
- c += strlen(prom_argv[i]);
- if (i < prom_argc - 1)
- *c++ = ' ';
- }
- *c = 0;
-}
+unsigned long memsize;
void __init prom_init(void)
{
void __iomem *uart_base;
- prom_argc = fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg2;
- prom_init_cmdline();
+ fw_init_cmdline();
- memsize = env_or_default("memsize", DEFAULT_MEMSIZE);
- highmemsize = env_or_default("highmemsize", 0x0);
+ memsize = fw_getenvl("memsize");
+ if(!memsize)
+ memsize = DEFAULT_MEMSIZE;
if (strstr(arcs_cmdline, "console=ttyS3"))
uart_base = ioremap_nocache(LS1X_UART3_BASE, 0x0f);
@@ -77,3 +39,8 @@ void __init prom_init(void)
void __init prom_free_prom_memory(void)
{
}
+
+void __init plat_mem_setup(void)
+{
+ add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
+}
diff --git a/arch/mips/loongson32/common/setup.c b/arch/mips/loongson32/common/setup.c
index 8b03e18fc4d8..4733fe037176 100644
--- a/arch/mips/loongson32/common/setup.c
+++ b/arch/mips/loongson32/common/setup.c
@@ -3,15 +3,12 @@
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*/
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/cpu-info.h>
#include <asm/bootinfo.h>
-#include <prom.h>
-
-void __init plat_mem_setup(void)
-{
- add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
-}
-
const char *get_system_type(void)
{
unsigned int processor_id = (&current_cpu_data)->processor_id;
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index 4c14a11525f4..48b29c198acf 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -1,119 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
if MACH_LOONGSON64
-choice
- prompt "Machine Type"
-
-config LEMOTE_FULOONG2E
- bool "Lemote Fuloong(2e) mini-PC"
- select ARCH_SPARSEMEM_ENABLE
- select ARCH_MIGHT_HAVE_PC_PARPORT
- select ARCH_MIGHT_HAVE_PC_SERIO
- select CEVT_R4K
- select CSRC_R4K
- select SYS_HAS_CPU_LOONGSON2E
- select DMA_NONCOHERENT
- select BOOT_ELF32
- select BOARD_SCACHE
- select HAVE_PCI
- select I8259
- select ISA
- select IRQ_MIPS_CPU
- select SYS_SUPPORTS_64BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select SYS_SUPPORTS_HIGHMEM
- select SYS_HAS_EARLY_PRINTK
- select GENERIC_ISA_DMA_SUPPORT_BROKEN
- select CPU_HAS_WB
- select LOONGSON_MC146818
- help
- Lemote Fuloong(2e) mini-PC board based on the Chinese Loongson-2E CPU and
- an FPGA northbridge
-
- Lemote Fuloong(2e) mini PC have a VIA686B south bridge.
-
-config LEMOTE_MACH2F
- bool "Lemote Loongson 2F family machines"
- select ARCH_SPARSEMEM_ENABLE
- select ARCH_MIGHT_HAVE_PC_PARPORT
- select ARCH_MIGHT_HAVE_PC_SERIO
- select BOARD_SCACHE
- select BOOT_ELF32
- select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
- select CPU_HAS_WB
- select CS5536
- select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
- select DMA_NONCOHERENT
- select GENERIC_ISA_DMA_SUPPORT_BROKEN
- select HAVE_CLK
- select HAVE_PCI
- select I8259
- select IRQ_MIPS_CPU
- select ISA
- select SYS_HAS_CPU_LOONGSON2F
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_64BIT_KERNEL
- select SYS_SUPPORTS_HIGHMEM
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select LOONGSON_MC146818
- help
- Lemote Loongson 2F family machines utilize the 2F revision of
- Loongson processor and the AMD CS5536 south bridge.
-
- These family machines include fuloong2f mini PC, yeeloong2f notebook,
- LingLoong allinone PC and so forth.
-
-config LOONGSON_MACH3X
- bool "Generic Loongson 3 family machines"
- select ARCH_SPARSEMEM_ENABLE
- select ARCH_MIGHT_HAVE_PC_PARPORT
- select ARCH_MIGHT_HAVE_PC_SERIO
- select GENERIC_ISA_DMA_SUPPORT_BROKEN
- select BOOT_ELF32
- select BOARD_SCACHE
- select CSRC_R4K
- select CEVT_R4K
- select CPU_HAS_WB
- select FORCE_PCI
- select ISA
- select I8259
- select IRQ_MIPS_CPU
- select NR_CPUS_DEFAULT_4
- select SYS_HAS_CPU_LOONGSON3
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_SMP
- select SYS_SUPPORTS_HOTPLUG_CPU
- select SYS_SUPPORTS_NUMA
- select SYS_SUPPORTS_64BIT_KERNEL
- select SYS_SUPPORTS_HIGHMEM
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select LOONGSON_MC146818
- select ZONE_DMA32
- select LEFI_FIRMWARE_INTERFACE
- help
- Generic Loongson 3 family machines utilize the 3A/3B revision
- of Loongson processor and RS780/SBX00 chipset.
-endchoice
-
-config CS5536
- bool
-
-config CS5536_MFGPT
- bool "CS5536 MFGPT Timer"
- depends on CS5536 && !HIGH_RES_TIMERS
- select MIPS_EXTERNAL_TIMER
- help
- This option enables the mfgpt0 timer of AMD CS5536. With this timer
- switched on you can not use high resolution timers.
-
- If you want to enable the Loongson2 CPUFreq Driver, Please enable
- this option at first, otherwise, You will get wrong system time.
-
- If unsure, say Yes.
-
config RS780_HPET
bool "RS780/SBX00 HPET Timer"
- depends on LOONGSON_MACH3X
+ depends on MACH_LOONGSON64
select MIPS_EXTERNAL_TIMER
help
This option enables the hpet timer of AMD RS780/SBX00.
@@ -123,16 +13,9 @@ config RS780_HPET
If unsure, say Yes.
-config LOONGSON_UART_BASE
- bool
- default y
- depends on EARLY_PRINTK || SERIAL_8250
config LOONGSON_MC146818
bool
default n
-config LEFI_FIRMWARE_INTERFACE
- bool
-
endif # MACH_LOONGSON64
diff --git a/arch/mips/loongson64/Makefile b/arch/mips/loongson64/Makefile
index 1a5df773707d..7821891bc5d0 100644
--- a/arch/mips/loongson64/Makefile
+++ b/arch/mips/loongson64/Makefile
@@ -1,24 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Common code for all Loongson based systems
+# Makefile for Loongson-3 family machines
#
+obj-$(CONFIG_MACH_LOONGSON64) += irq.o cop2-ex.o platform.o acpi_init.o dma.o \
+ setup.o init.o env.o time.o reset.o \
-obj-$(CONFIG_MACH_LOONGSON64) += common/
-
-#
-# Lemote Fuloong mini-PC (Loongson 2E-based)
-#
-
-obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
-
-#
-# Lemote loongson2f family machines
-#
-
-obj-$(CONFIG_LEMOTE_MACH2F) += lemote-2f/
-
-#
-# All Loongson-3 family machines
-#
-
-obj-$(CONFIG_CPU_LOONGSON3) += loongson-3/
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_RS780_HPET) += hpet.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
+obj-$(CONFIG_SUSPEND) += pm.o
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 9f79908f5063..d5eb94c9edb4 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -2,32 +2,13 @@
# Loongson Processors' Support
#
-# Only gcc >= 4.4 have Loongson specific support
-cflags-$(CONFIG_CPU_LOONGSON2) += -Wa,--trap
-cflags-$(CONFIG_CPU_LOONGSON2E) += \
- $(call cc-option,-march=loongson2e,-march=r4600)
-cflags-$(CONFIG_CPU_LOONGSON2F) += \
- $(call cc-option,-march=loongson2f,-march=r4600)
-# Enable the workarounds for Loongson2f
-ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
- ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-nop,),)
- $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-nop)
- else
- cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-nop
- endif
- ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-jump,),)
- $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-jump)
- else
- cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-jump
- endif
-endif
-cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
-# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a
+# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
# description).
#
# We disable this in order to prevent the assembler meddling with the
@@ -44,7 +25,7 @@ cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
# binutils does not merge support for the flag then we can revisit & remove
# this later - for now it ensures vendor toolchains don't cause problems.
#
-cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
#
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
@@ -55,14 +36,14 @@ cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3
#
ifeq ($(call cc-ifversion, -ge, 0409, y), y)
ifeq ($(call ld-ifversion, -ge, 225000000, y), y)
- cflags-$(CONFIG_CPU_LOONGSON3) += \
+ cflags-$(CONFIG_CPU_LOONGSON64) += \
$(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
else
- cflags-$(CONFIG_CPU_LOONGSON3) += \
+ cflags-$(CONFIG_CPU_LOONGSON64) += \
$(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
endif
else
- cflags-$(CONFIG_CPU_LOONGSON3) += \
+ cflags-$(CONFIG_CPU_LOONGSON64) += \
$(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
endif
@@ -76,6 +57,4 @@ cflags-y += $(call cc-option,-mno-loongson-mmi)
platform-$(CONFIG_MACH_LOONGSON64) += loongson64/
cflags-$(CONFIG_MACH_LOONGSON64) += -I$(srctree)/arch/mips/include/asm/mach-loongson64 -mno-branch-likely
-load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000
-load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000
-load-$(CONFIG_LOONGSON_MACH3X) += 0xffffffff80200000
+load-$(CONFIG_CPU_LOONGSON64) += 0xffffffff80200000
diff --git a/arch/mips/loongson64/loongson-3/acpi_init.c b/arch/mips/loongson64/acpi_init.c
index 8d7c119ddf91..8d7c119ddf91 100644
--- a/arch/mips/loongson64/loongson-3/acpi_init.c
+++ b/arch/mips/loongson64/acpi_init.c
diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c
deleted file mode 100644
index a735460682cf..000000000000
--- a/arch/mips/loongson64/common/cmdline.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Based on Ocelot Linux port, which is
- * Copyright 2001 MontaVista Software Inc.
- * Author: jsun@mvista.com or jsun@junsun.net
- *
- * Copyright 2003 ICT CAS
- * Author: Michael Guo <guoyi@ict.ac.cn>
- *
- * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
- * Author: Fuxin Zhang, zhangfx@lemote.com
- *
- * Copyright (C) 2009 Lemote Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- */
-#include <asm/bootinfo.h>
-
-#include <loongson.h>
-
-void __init prom_init_cmdline(void)
-{
- int prom_argc;
- /* pmon passes arguments in 32bit pointers */
- int *_prom_argv;
- int i;
- long l;
-
- /* firmware arguments are initialized in head.S */
- prom_argc = fw_arg0;
- _prom_argv = (int *)fw_arg1;
-
- /* arg[0] is "g", the rest is boot parameters */
- arcs_cmdline[0] = '\0';
- for (i = 1; i < prom_argc; i++) {
- l = (long)_prom_argv[i];
- if (strlen(arcs_cmdline) + strlen(((char *)l) + 1)
- >= sizeof(arcs_cmdline))
- break;
- strcat(arcs_cmdline, ((char *)l));
- strcat(arcs_cmdline, " ");
- }
-
- prom_init_machtype();
-}
diff --git a/arch/mips/loongson64/common/early_printk.c b/arch/mips/loongson64/common/early_printk.c
deleted file mode 100644
index 5e2a151aa30c..000000000000
--- a/arch/mips/loongson64/common/early_printk.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* early printk support
- *
- * Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca>
- * Copyright (c) 2009 Lemote Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- */
-#include <linux/serial_reg.h>
-#include <asm/setup.h>
-
-#include <loongson.h>
-
-#define PORT(base, offset) (u8 *)(base + offset)
-
-static inline unsigned int serial_in(unsigned char *base, int offset)
-{
- return readb(PORT(base, offset));
-}
-
-static inline void serial_out(unsigned char *base, int offset, int value)
-{
- writeb(value, PORT(base, offset));
-}
-
-void prom_putchar(char c)
-{
- int timeout;
- unsigned char *uart_base;
-
- uart_base = (unsigned char *)_loongson_uart_base[0];
- timeout = 1024;
-
- while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) &&
- (timeout-- > 0))
- ;
-
- serial_out(uart_base, UART_TX, c);
-}
diff --git a/arch/mips/loongson64/common/mem.c b/arch/mips/loongson64/common/mem.c
deleted file mode 100644
index 4254ac4ec616..000000000000
--- a/arch/mips/loongson64/common/mem.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- */
-#include <linux/fs.h>
-#include <linux/fcntl.h>
-#include <linux/memblock.h>
-#include <linux/mm.h>
-
-#include <asm/bootinfo.h>
-
-#include <loongson.h>
-#include <boot_param.h>
-#include <mem.h>
-#include <pci.h>
-
-#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
-
-u32 memsize, highmemsize;
-
-void __init prom_init_memory(void)
-{
- add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
-
- add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize <<
- 20), BOOT_MEM_RESERVED);
-
-#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
- {
- int bit;
-
- bit = fls(memsize + highmemsize);
- if (bit != ffs(memsize + highmemsize))
- bit += 20;
- else
- bit = bit + 20 - 1;
-
- /* set cpu window3 to map CPU to DDR: 2G -> 2G */
- LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
- 0x80000000ul, (1 << bit));
- mmiowb();
- }
-#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
-
-#ifdef CONFIG_64BIT
- if (highmemsize > 0)
- add_memory_region(LOONGSON_HIGHMEM_START,
- highmemsize << 20, BOOT_MEM_RAM);
-
- add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START -
- LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED);
-
-#endif /* !CONFIG_64BIT */
-}
-
-#else /* CONFIG_LEFI_FIRMWARE_INTERFACE */
-
-void __init prom_init_memory(void)
-{
- int i;
- u32 node_id;
- u32 mem_type;
-
- /* parse memory information */
- for (i = 0; i < loongson_memmap->nr_map; i++) {
- node_id = loongson_memmap->map[i].node_id;
- mem_type = loongson_memmap->map[i].mem_type;
-
- if (node_id != 0)
- continue;
-
- switch (mem_type) {
- case SYSTEM_RAM_LOW:
- memblock_add(loongson_memmap->map[i].mem_start,
- (u64)loongson_memmap->map[i].mem_size << 20);
- break;
- case SYSTEM_RAM_HIGH:
- memblock_add(loongson_memmap->map[i].mem_start,
- (u64)loongson_memmap->map[i].mem_size << 20);
- break;
- case SYSTEM_RAM_RESERVED:
- memblock_reserve(loongson_memmap->map[i].mem_start,
- (u64)loongson_memmap->map[i].mem_size << 20);
- break;
- }
- }
-}
-
-#endif /* CONFIG_LEFI_FIRMWARE_INTERFACE */
-
-/* override of arch/mips/mm/cache.c: __uncached_access */
-int __uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_DSYNC)
- return 1;
-
- return addr >= __pa(high_memory) ||
- ((addr >= LOONGSON_MMIO_MEM_START) &&
- (addr < LOONGSON_MMIO_MEM_END));
-}
-
-#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
-
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <asm/current.h>
-
-static unsigned long uca_start, uca_end;
-
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot)
-{
- unsigned long offset = pfn << PAGE_SHIFT;
- unsigned long end = offset + size;
-
- if (__uncached_access(file, offset)) {
- if (uca_start && (offset >= uca_start) &&
- (end <= uca_end))
- return __pgprot((pgprot_val(vma_prot) &
- ~_CACHE_MASK) |
- _CACHE_UNCACHED_ACCELERATED);
- else
- return pgprot_noncached(vma_prot);
- }
- return vma_prot;
-}
-
-static int __init find_vga_mem_init(void)
-{
- struct pci_dev *dev = 0;
- struct resource *r;
- int idx;
-
- if (uca_start)
- return 0;
-
- for_each_pci_dev(dev) {
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
- r = &dev->resource[idx];
- if (!r->start && r->end)
- continue;
- if (r->flags & IORESOURCE_IO)
- continue;
- if (r->flags & IORESOURCE_MEM) {
- uca_start = r->start;
- uca_end = r->end;
- return 0;
- }
- }
- }
- }
-
- return 0;
-}
-
-late_initcall(find_vga_mem_init);
-#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
deleted file mode 100644
index 98c3a7feb10f..000000000000
--- a/arch/mips/loongson64/common/serial.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author: Yan hua (yanhua@lemote.com)
- * Author: Wu Zhangjin (wuzhangjin@gmail.com)
- */
-
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/serial_8250.h>
-
-#include <asm/bootinfo.h>
-
-#include <loongson.h>
-#include <machine.h>
-
-#define PORT(int, clk) \
-{ \
- .irq = int, \
- .uartclk = clk, \
- .iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
- .regshift = 0, \
-}
-
-#define PORT_M(int, clk) \
-{ \
- .irq = MIPS_CPU_IRQ_BASE + (int), \
- .uartclk = clk, \
- .iotype = UPIO_MEM, \
- .membase = (void __iomem *)NULL, \
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
- .regshift = 0, \
-}
-
-static struct plat_serial8250_port uart8250_data[][MAX_UARTS + 1] = {
- [MACH_LOONGSON_UNKNOWN] = {},
- [MACH_LEMOTE_FL2E] = {PORT(4, 1843200), {} },
- [MACH_LEMOTE_FL2F] = {PORT(3, 1843200), {} },
- [MACH_LEMOTE_ML2F7] = {PORT_M(3, 3686400), {} },
- [MACH_LEMOTE_YL2F89] = {PORT_M(3, 3686400), {} },
- [MACH_DEXXON_GDIUM2F10] = {PORT_M(3, 3686400), {} },
- [MACH_LEMOTE_NAS] = {PORT_M(3, 3686400), {} },
- [MACH_LEMOTE_LL2F] = {PORT(3, 1843200), {} },
- [MACH_LOONGSON_GENERIC] = {PORT_M(2, 25000000), {} },
- [MACH_LOONGSON_END] = {},
-};
-
-static struct platform_device uart8250_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
-};
-
-static int __init serial_init(void)
-{
- int i;
- unsigned char iotype;
-
- iotype = uart8250_data[mips_machtype][0].iotype;
-
- if (UPIO_MEM == iotype) {
- uart8250_data[mips_machtype][0].mapbase =
- loongson_uart_base[0];
- uart8250_data[mips_machtype][0].membase =
- (void __iomem *)_loongson_uart_base[0];
- }
- else if (UPIO_PORT == iotype)
- uart8250_data[mips_machtype][0].iobase =
- loongson_uart_base[0] - LOONGSON_PCIIO_BASE;
-
- if (loongson_sysconf.uarts[0].uartclk)
- uart8250_data[mips_machtype][0].uartclk =
- loongson_sysconf.uarts[0].uartclk;
-
- for (i = 1; i < loongson_sysconf.nr_uarts; i++) {
- iotype = loongson_sysconf.uarts[i].iotype;
- uart8250_data[mips_machtype][i].iotype = iotype;
- loongson_uart_base[i] = loongson_sysconf.uarts[i].uart_base;
-
- if (UPIO_MEM == iotype) {
- uart8250_data[mips_machtype][i].irq =
- MIPS_CPU_IRQ_BASE + loongson_sysconf.uarts[i].int_offset;
- uart8250_data[mips_machtype][i].mapbase =
- loongson_uart_base[i];
- uart8250_data[mips_machtype][i].membase =
- ioremap_nocache(loongson_uart_base[i], 8);
- } else if (UPIO_PORT == iotype) {
- uart8250_data[mips_machtype][i].irq =
- loongson_sysconf.uarts[i].int_offset;
- uart8250_data[mips_machtype][i].iobase =
- loongson_uart_base[i] - LOONGSON_PCIIO_BASE;
- }
-
- uart8250_data[mips_machtype][i].uartclk =
- loongson_sysconf.uarts[i].uartclk;
- uart8250_data[mips_machtype][i].flags =
- UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
- }
-
- memset(&uart8250_data[mips_machtype][loongson_sysconf.nr_uarts],
- 0, sizeof(struct plat_serial8250_port));
- uart8250_device.dev.platform_data = uart8250_data[mips_machtype];
-
- return platform_device_register(&uart8250_device);
-}
-module_init(serial_init);
-
-static void __exit serial_exit(void)
-{
- platform_device_unregister(&uart8250_device);
-}
-module_exit(serial_exit);
diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c
index 9efdfe430ff0..9efdfe430ff0 100644
--- a/arch/mips/loongson64/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson64/cop2-ex.c
diff --git a/arch/mips/loongson64/loongson-3/dma.c b/arch/mips/loongson64/dma.c
index 5e86635f71db..5e86635f71db 100644
--- a/arch/mips/loongson64/loongson-3/dma.c
+++ b/arch/mips/loongson64/dma.c
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/env.c
index 09d5cf4676ca..0daeb7bcf023 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/env.c
@@ -30,41 +30,13 @@ u64 loongson_freqctrl[MAX_PACKAGES];
unsigned long long smp_group[4];
-#define parse_even_earlier(res, option, p) \
-do { \
- unsigned int tmp __maybe_unused; \
- \
- if (strncmp(option, (char *)p, strlen(option)) == 0) \
- tmp = kstrtou32((char *)p + strlen(option"="), 10, &res); \
-} while (0)
+const char *get_system_type(void)
+{
+ return "Generic Loongson64 System";
+}
void __init prom_init_env(void)
{
- /* pmon passes arguments in 32bit pointers */
- unsigned int processor_id;
-
-#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
- int *_prom_envp;
- long l;
-
- /* firmware arguments are initialized in head.S */
- _prom_envp = (int *)fw_arg2;
-
- l = (long)*_prom_envp;
- while (l != 0) {
- parse_even_earlier(cpu_clock_freq, "cpuclock", l);
- parse_even_earlier(memsize, "memsize", l);
- parse_even_earlier(highmemsize, "highmemsize", l);
- _prom_envp++;
- l = (long)*_prom_envp;
- }
- if (memsize == 0)
- memsize = 256;
-
- loongson_sysconf.nr_uarts = 1;
-
- pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
-#else
struct boot_params *boot_p;
struct loongson_params *loongson_p;
struct system_loongson *esys;
@@ -182,31 +154,5 @@ void __init prom_init_env(void)
if (loongson_sysconf.nr_sensors)
memcpy(loongson_sysconf.sensors, esys->sensors,
sizeof(struct sensor_device) * loongson_sysconf.nr_sensors);
-#endif
- if (cpu_clock_freq == 0) {
- processor_id = (&current_cpu_data)->processor_id;
- switch (processor_id & PRID_REV_MASK) {
- case PRID_REV_LOONGSON2E:
- cpu_clock_freq = 533080000;
- break;
- case PRID_REV_LOONGSON2F:
- cpu_clock_freq = 797000000;
- break;
- case PRID_REV_LOONGSON3A_R1:
- case PRID_REV_LOONGSON3A_R2_0:
- case PRID_REV_LOONGSON3A_R2_1:
- case PRID_REV_LOONGSON3A_R3_0:
- case PRID_REV_LOONGSON3A_R3_1:
- cpu_clock_freq = 900000000;
- break;
- case PRID_REV_LOONGSON3B_R1:
- case PRID_REV_LOONGSON3B_R2:
- cpu_clock_freq = 1000000000;
- break;
- default:
- cpu_clock_freq = 100000000;
- break;
- }
- }
pr_info("CpuClock = %u\n", cpu_clock_freq);
}
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/hpet.c
index ed15430ad64f..ed15430ad64f 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/hpet.c
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
new file mode 100644
index 000000000000..5ac1a0f35ca4
--- /dev/null
+++ b/arch/mips/loongson64/init.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/memblock.h>
+#include <asm/bootinfo.h>
+#include <asm/traps.h>
+#include <asm/smp-ops.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+
+#include <loongson.h>
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi;
+
+ base = (void *)(CAC_BASE + 0x380);
+ memcpy(base, &except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+void __init prom_init(void)
+{
+ fw_init_cmdline();
+ prom_init_env();
+
+ /* init base address of io space */
+ set_io_port_base((unsigned long)
+ ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
+
+ prom_init_numa_memory();
+
+ /* Hardcode to CPU UART 0 */
+ setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
+
+ register_smp_ops(&loongson3_smp_ops);
+ board_nmi_handler_setup = mips_nmi_setup;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/irq.c
index 5605061f5f98..79ad797497e4 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/irq.c
@@ -78,8 +78,12 @@ static void ht_irqdispatch(void)
#define UNUSED_IPS (CAUSEF_IP5 | CAUSEF_IP4 | CAUSEF_IP1 | CAUSEF_IP0)
-void mach_irq_dispatch(unsigned int pending)
+asmlinkage void plat_irq_dispatch(void)
{
+ unsigned int pending;
+
+ pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
if (pending & CAUSEF_IP7)
do_IRQ(LOONGSON_TIMER_IRQ);
#if defined(CONFIG_SMP)
@@ -127,7 +131,7 @@ void irq_router_init(void)
LOONGSON_INT_ROUTER_INTEN | (0xffff << 16) | 0x1 << 10;
}
-void __init mach_init_irq(void)
+void __init arch_init_irq(void)
{
struct irq_chip *chip;
diff --git a/arch/mips/loongson64/loongson-3/Makefile b/arch/mips/loongson64/loongson-3/Makefile
deleted file mode 100644
index df39598742b2..000000000000
--- a/arch/mips/loongson64/loongson-3/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for Loongson-3 family machines
-#
-obj-y += irq.o cop2-ex.o platform.o acpi_init.o dma.o
-
-obj-$(CONFIG_SMP) += smp.o
-
-obj-$(CONFIG_NUMA) += numa.o
-
-obj-$(CONFIG_RS780_HPET) += hpet.o
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/numa.c
index 8f20d2cb3767..ef94a2278561 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -26,12 +26,15 @@
#include <asm/wbflush.h>
#include <boot_param.h>
-static struct node_data prealloc__node_data[MAX_NUMNODES];
+static struct pglist_data prealloc__node_data[MAX_NUMNODES];
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
-struct node_data *__node_data[MAX_NUMNODES];
+struct pglist_data *__node_data[MAX_NUMNODES];
EXPORT_SYMBOL(__node_data);
+cpumask_t __node_cpumask[MAX_NUMNODES];
+EXPORT_SYMBOL(__node_cpumask);
+
static void enable_lpa(void)
{
unsigned long value;
@@ -214,7 +217,7 @@ static __init void prom_meminit(void)
if (node_online(node)) {
szmem(node);
node_mem_init(node);
- cpumask_clear(&__node_data[(node)]->cpumask);
+ cpumask_clear(&__node_cpumask[node]);
}
}
memblocks_present();
@@ -228,7 +231,7 @@ static __init void prom_meminit(void)
if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
continue;
- cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask);
+ cpumask_set_cpu(active_cpu, &__node_cpumask[node]);
pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
active_cpu++;
diff --git a/arch/mips/loongson64/pci.c b/arch/mips/loongson64/pci.c
new file mode 100644
index 000000000000..e84ae20c3290
--- /dev/null
+++ b/arch/mips/loongson64/pci.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/pci.h>
+
+#include <pci.h>
+#include <loongson.h>
+#include <boot_param.h>
+
+static struct resource loongson_pci_mem_resource = {
+ .name = "pci memory space",
+ .start = LOONGSON_PCI_MEM_START,
+ .end = LOONGSON_PCI_MEM_END,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource loongson_pci_io_resource = {
+ .name = "pci io space",
+ .start = LOONGSON_PCI_IO_START,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller loongson_pci_controller = {
+ .pci_ops = &loongson_pci_ops,
+ .io_resource = &loongson_pci_io_resource,
+ .mem_resource = &loongson_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_offset = 0x00000000UL,
+};
+
+
+extern int sbx00_acpi_init(void);
+
+static int __init pcibios_init(void)
+{
+
+ loongson_pci_controller.io_map_base = mips_io_port_base;
+ loongson_pci_mem_resource.start = loongson_sysconf.pci_mem_start_addr;
+ loongson_pci_mem_resource.end = loongson_sysconf.pci_mem_end_addr;
+
+ register_pci_controller(&loongson_pci_controller);
+
+ sbx00_acpi_init();
+
+ return 0;
+}
+
+arch_initcall(pcibios_init);
diff --git a/arch/mips/loongson64/loongson-3/platform.c b/arch/mips/loongson64/platform.c
index 13f3404f0030..13f3404f0030 100644
--- a/arch/mips/loongson64/loongson-3/platform.c
+++ b/arch/mips/loongson64/platform.c
diff --git a/arch/mips/loongson64/pm.c b/arch/mips/loongson64/pm.c
new file mode 100644
index 000000000000..7c8556f09781
--- /dev/null
+++ b/arch/mips/loongson64/pm.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * loongson-specific suspend support
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+#include <loongson.h>
+
+static unsigned int __maybe_unused cached_master_mask; /* i8259A */
+static unsigned int __maybe_unused cached_slave_mask;
+static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */
+
+void arch_suspend_disable_irqs(void)
+{
+ /* disable all mips events */
+ local_irq_disable();
+
+#ifdef CONFIG_I8259
+ /* disable all events of i8259A */
+ cached_slave_mask = inb(PIC_SLAVE_IMR);
+ cached_master_mask = inb(PIC_MASTER_IMR);
+
+ outb(0xff, PIC_SLAVE_IMR);
+ inb(PIC_SLAVE_IMR);
+ outb(0xff, PIC_MASTER_IMR);
+ inb(PIC_MASTER_IMR);
+#endif
+ /* disable all events of bonito */
+ cached_bonito_irq_mask = LOONGSON_INTEN;
+ LOONGSON_INTENCLR = 0xffff;
+ (void)LOONGSON_INTENCLR;
+}
+
+void arch_suspend_enable_irqs(void)
+{
+ /* enable all mips events */
+ local_irq_enable();
+#ifdef CONFIG_I8259
+ /* only enable the cached events of i8259A */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(cached_master_mask, PIC_MASTER_IMR);
+#endif
+ /* enable all cached events of bonito */
+ LOONGSON_INTENSET = cached_bonito_irq_mask;
+ (void)LOONGSON_INTENSET;
+}
+
+/*
+ * Setup the board-specific events for waking up loongson from wait mode
+ */
+void __weak setup_wakeup_events(void)
+{
+}
+
+void __weak mach_suspend(void)
+{
+}
+
+void __weak mach_resume(void)
+{
+}
+
+static int loongson_pm_enter(suspend_state_t state)
+{
+ mach_suspend();
+
+ mach_resume();
+
+ return 0;
+}
+
+static int loongson_pm_valid_state(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops loongson_pm_ops = {
+ .valid = loongson_pm_valid_state,
+ .enter = loongson_pm_enter,
+};
+
+static int __init loongson_pm_init(void)
+{
+ suspend_set_ops(&loongson_pm_ops);
+
+ return 0;
+}
+arch_initcall(loongson_pm_init);
diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
new file mode 100644
index 000000000000..88b3bd5fed25
--- /dev/null
+++ b/arch/mips/loongson64/reset.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Zhangjin Wu, wuzhangjin@gmail.com
+ */
+#include <linux/init.h>
+#include <linux/pm.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+
+#include <loongson.h>
+#include <boot_param.h>
+
+static inline void loongson_reboot(void)
+{
+ ((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) ();
+}
+
+static void loongson_restart(char *command)
+{
+
+ void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
+
+ fw_restart();
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static void loongson_poweroff(void)
+{
+ void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
+
+ fw_poweroff();
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static void loongson_halt(void)
+{
+ pr_notice("\n\n** You can safely turn off the power now **\n\n");
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = loongson_restart;
+ _machine_halt = loongson_halt;
+ pm_power_off = loongson_poweroff;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/loongson64/rtc.c b/arch/mips/loongson64/rtc.c
new file mode 100644
index 000000000000..8d7628c0f513
--- /dev/null
+++ b/arch/mips/loongson64/rtc.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Lemote Fuloong platform support
+ *
+ * Copyright(c) 2010 Arnaud Patard <apatard@mandriva.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h>
+
+static struct resource loongson_rtc_resources[] = {
+ {
+ .start = RTC_PORT(0),
+ .end = RTC_PORT(1),
+ .flags = IORESOURCE_IO,
+ }, {
+ .start = RTC_IRQ,
+ .end = RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device loongson_rtc_device = {
+ .name = "rtc_cmos",
+ .id = -1,
+ .resource = loongson_rtc_resources,
+ .num_resources = ARRAY_SIZE(loongson_rtc_resources),
+};
+
+
+static int __init loongson_rtc_platform_init(void)
+{
+ platform_device_register(&loongson_rtc_device);
+ return 0;
+}
+
+device_initcall(loongson_rtc_platform_init);
diff --git a/arch/mips/loongson64/setup.c b/arch/mips/loongson64/setup.c
new file mode 100644
index 000000000000..4fd27f4f90ed
--- /dev/null
+++ b/arch/mips/loongson64/setup.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+
+#include <asm/wbflush.h>
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+static void wbflush_loongson(void)
+{
+ asm(".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set mips3\n\t"
+ "sync\n\t"
+ "nop\n\t"
+ ".set\tpop\n\t"
+ ".set mips0\n\t");
+}
+
+void (*__wbflush)(void) = wbflush_loongson;
+EXPORT_SYMBOL(__wbflush);
+
+void __init plat_mem_setup(void)
+{
+}
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/smp.c
index ce68cdaaf33c..de8e0741ce2d 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -18,6 +18,7 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <loongson.h>
+#include <loongson_regs.h>
#include <workarounds.h>
#include "smp.h"
@@ -48,6 +49,62 @@ static uint32_t core0_c0count[NR_CPUS];
__wbflush(); \
} while (0)
+u32 (*ipi_read_clear)(int cpu);
+void (*ipi_write_action)(int cpu, u32 action);
+
+static u32 csr_ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = csr_readl(LOONGSON_CSR_IPI_STATUS);
+ /* Clear the ipi register to clear the interrupt */
+ csr_writel(action, LOONGSON_CSR_IPI_CLEAR);
+
+ return action;
+}
+
+static void csr_ipi_write_action(int cpu, u32 action)
+{
+ unsigned int irq = 0;
+
+ while ((irq = ffs(action))) {
+ uint32_t val = CSR_IPI_SEND_BLOCK;
+ val |= (irq - 1);
+ val |= (cpu << CSR_IPI_SEND_CPU_SHIFT);
+ csr_writel(val, LOONGSON_CSR_IPI_SEND);
+ action &= ~BIT(irq - 1);
+ }
+}
+
+static u32 legacy_ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
+ /* Clear the ipi register to clear the interrupt */
+ loongson3_ipi_write32(action, ipi_clear0_regs[cpu_logical_map(cpu)]);
+
+ return action;
+}
+
+static void legacy_ipi_write_action(int cpu, u32 action)
+{
+ loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]);
+}
+
+static void csr_ipi_probe(void)
+{
+ if (cpu_has_csr() && csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_IPI) {
+ ipi_read_clear = csr_ipi_read_clear;
+ ipi_write_action = csr_ipi_write_action;
+ } else {
+ ipi_read_clear = legacy_ipi_read_clear;
+ ipi_write_action = legacy_ipi_write_action;
+ }
+}
+
static void ipi_set0_regs_init(void)
{
ipi_set0_regs[0] = (void *)
@@ -233,7 +290,7 @@ static void ipi_mailbox_buf_init(void)
*/
static void loongson3_send_ipi_single(int cpu, unsigned int action)
{
- loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]);
+ ipi_write_action(cpu_logical_map(cpu), (u32)action);
}
static void
@@ -242,14 +299,14 @@ loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
unsigned int i;
for_each_cpu(i, mask)
- loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(i)]);
+ ipi_write_action(cpu_logical_map(i), (u32)action);
}
#define IPI_IRQ_OFFSET 6
void loongson3_send_irq_by_ipi(int cpu, int irqs)
{
- loongson3_ipi_write32(irqs << IPI_IRQ_OFFSET, ipi_set0_regs[cpu_logical_map(cpu)]);
+ ipi_write_action(cpu_logical_map(cpu), irqs << IPI_IRQ_OFFSET);
}
void loongson3_ipi_interrupt(struct pt_regs *regs)
@@ -257,13 +314,9 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
int i, cpu = smp_processor_id();
unsigned int action, c0count, irqs;
- /* Load the ipi register to figure out what we're supposed to do */
- action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
+ action = ipi_read_clear(cpu);
irqs = action >> IPI_IRQ_OFFSET;
- /* Clear the ipi register to clear the interrupt */
- loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]);
-
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
@@ -372,6 +425,7 @@ static void __init loongson3_smp_setup(void)
num++;
}
+ csr_ipi_probe();
ipi_set0_regs_init();
ipi_clear0_regs_init();
ipi_status0_regs_init();
@@ -450,7 +504,7 @@ static void loongson3_cpu_die(unsigned int cpu)
* flush all L1 entries at first. Then, another core (usually Core 0) can
* safely disable the clock of the target core. loongson3_play_dead() is
* called via CKSEG1 (uncached and unmmaped) */
-static void loongson3a_r1_play_dead(int *state_addr)
+static void loongson3_type1_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
@@ -512,7 +566,7 @@ static void loongson3a_r1_play_dead(int *state_addr)
: "a1");
}
-static void loongson3a_r2r3_play_dead(int *state_addr)
+static void loongson3_type2_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
@@ -532,27 +586,7 @@ static void loongson3a_r2r3_play_dead(int *state_addr)
" cache 1, 3(%[addr]) \n"
" addiu %[sets], %[sets], -1 \n"
" bnez %[sets], 1b \n"
- " addiu %[addr], %[addr], 0x40 \n"
- " li %[addr], 0x80000000 \n" /* KSEG0 */
- "2: cache 2, 0(%[addr]) \n" /* flush L1 VCache */
- " cache 2, 1(%[addr]) \n"
- " cache 2, 2(%[addr]) \n"
- " cache 2, 3(%[addr]) \n"
- " cache 2, 4(%[addr]) \n"
- " cache 2, 5(%[addr]) \n"
- " cache 2, 6(%[addr]) \n"
- " cache 2, 7(%[addr]) \n"
- " cache 2, 8(%[addr]) \n"
- " cache 2, 9(%[addr]) \n"
- " cache 2, 10(%[addr]) \n"
- " cache 2, 11(%[addr]) \n"
- " cache 2, 12(%[addr]) \n"
- " cache 2, 13(%[addr]) \n"
- " cache 2, 14(%[addr]) \n"
- " cache 2, 15(%[addr]) \n"
- " addiu %[vsets], %[vsets], -1 \n"
- " bnez %[vsets], 2b \n"
- " addiu %[addr], %[addr], 0x40 \n"
+ " addiu %[addr], %[addr], 0x20 \n"
" li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
" sw %[val], (%[state_addr]) \n"
" sync \n"
@@ -560,8 +594,7 @@ static void loongson3a_r2r3_play_dead(int *state_addr)
" .set pop \n"
: [addr] "=&r" (addr), [val] "=&r" (val)
: [state_addr] "r" (state_addr),
- [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
- [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
__asm__ __volatile__(
" .set push \n"
@@ -576,6 +609,8 @@ static void loongson3a_r2r3_play_dead(int *state_addr)
" andi %[node], %[cpuid], 0xc \n"
" dsll %[node], 42 \n" /* get node id */
" or %[base], %[base], %[node] \n"
+ " dsrl %[node], 30 \n" /* 15:14 */
+ " or %[base], %[base], %[node] \n"
"1: li %[count], 0x100 \n" /* wait for init loop */
"2: bnez %[count], 2b \n" /* limit mailbox access */
" addiu %[count], -1 \n"
@@ -595,7 +630,7 @@ static void loongson3a_r2r3_play_dead(int *state_addr)
: "a1");
}
-static void loongson3b_play_dead(int *state_addr)
+static void loongson3_type3_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
@@ -615,7 +650,27 @@ static void loongson3b_play_dead(int *state_addr)
" cache 1, 3(%[addr]) \n"
" addiu %[sets], %[sets], -1 \n"
" bnez %[sets], 1b \n"
- " addiu %[addr], %[addr], 0x20 \n"
+ " addiu %[addr], %[addr], 0x40 \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "2: cache 2, 0(%[addr]) \n" /* flush L1 VCache */
+ " cache 2, 1(%[addr]) \n"
+ " cache 2, 2(%[addr]) \n"
+ " cache 2, 3(%[addr]) \n"
+ " cache 2, 4(%[addr]) \n"
+ " cache 2, 5(%[addr]) \n"
+ " cache 2, 6(%[addr]) \n"
+ " cache 2, 7(%[addr]) \n"
+ " cache 2, 8(%[addr]) \n"
+ " cache 2, 9(%[addr]) \n"
+ " cache 2, 10(%[addr]) \n"
+ " cache 2, 11(%[addr]) \n"
+ " cache 2, 12(%[addr]) \n"
+ " cache 2, 13(%[addr]) \n"
+ " cache 2, 14(%[addr]) \n"
+ " cache 2, 15(%[addr]) \n"
+ " addiu %[vsets], %[vsets], -1 \n"
+ " bnez %[vsets], 2b \n"
+ " addiu %[addr], %[addr], 0x40 \n"
" li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
" sw %[val], (%[state_addr]) \n"
" sync \n"
@@ -623,7 +678,8 @@ static void loongson3b_play_dead(int *state_addr)
" .set pop \n"
: [addr] "=&r" (addr), [val] "=&r" (val)
: [state_addr] "r" (state_addr),
- [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
+ [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
__asm__ __volatile__(
" .set push \n"
@@ -638,8 +694,6 @@ static void loongson3b_play_dead(int *state_addr)
" andi %[node], %[cpuid], 0xc \n"
" dsll %[node], 42 \n" /* get node id */
" or %[base], %[base], %[node] \n"
- " dsrl %[node], 30 \n" /* 15:14 */
- " or %[base], %[base], %[node] \n"
"1: li %[count], 0x100 \n" /* wait for init loop */
"2: bnez %[count], 2b \n" /* limit mailbox access */
" addiu %[count], -1 \n"
@@ -661,30 +715,42 @@ static void loongson3b_play_dead(int *state_addr)
void play_dead(void)
{
- int *state_addr;
+ int prid_imp, prid_rev, *state_addr;
unsigned int cpu = smp_processor_id();
void (*play_dead_at_ckseg1)(int *);
idle_task_exit();
- switch (read_c0_prid() & PRID_REV_MASK) {
+
+ prid_imp = read_c0_prid() & PRID_IMP_MASK;
+ prid_rev = read_c0_prid() & PRID_REV_MASK;
+
+ if (prid_imp == PRID_IMP_LOONGSON_64G) {
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
+ goto out;
+ }
+
+ switch (prid_rev) {
case PRID_REV_LOONGSON3A_R1:
default:
play_dead_at_ckseg1 =
- (void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type1_play_dead);
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type2_play_dead);
break;
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
play_dead_at_ckseg1 =
- (void *)CKSEG1ADDR((unsigned long)loongson3a_r2r3_play_dead);
- break;
- case PRID_REV_LOONGSON3B_R1:
- case PRID_REV_LOONGSON3B_R2:
- play_dead_at_ckseg1 =
- (void *)CKSEG1ADDR((unsigned long)loongson3b_play_dead);
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
break;
}
+
+out:
state_addr = &per_cpu(cpu_state, cpu);
mb();
play_dead_at_ckseg1(state_addr);
diff --git a/arch/mips/loongson64/loongson-3/smp.h b/arch/mips/loongson64/smp.h
index 957bde81e0e4..957bde81e0e4 100644
--- a/arch/mips/loongson64/loongson-3/smp.h
+++ b/arch/mips/loongson64/smp.h
diff --git a/arch/mips/loongson64/time.c b/arch/mips/loongson64/time.c
new file mode 100644
index 000000000000..1245f22cec84
--- /dev/null
+++ b/arch/mips/loongson64/time.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+#include <asm/mc146818-time.h>
+#include <asm/time.h>
+#include <asm/hpet.h>
+
+#include <loongson.h>
+
+void __init plat_time_init(void)
+{
+ /* setup mips r4k timer */
+ mips_hpt_frequency = cpu_clock_freq / 2;
+
+#ifdef CONFIG_RS780_HPET
+ setup_hpet_timer();
+#endif
+}
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+ ts->tv_sec = mc146818_get_cmos_time();
+ ts->tv_nsec = 0;
+}
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index 387724860fa6..d5ad76b2bb67 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -189,6 +189,7 @@ static int __init debugfs_fpuemu(void)
{
struct dentry *fpuemu_debugfs_base_dir;
struct dentry *fpuemu_debugfs_inst_dir;
+ char name[32];
fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats",
mips_debugfs_dir);
@@ -225,8 +226,6 @@ do { \
#define FPU_STAT_CREATE_EX(m) \
do { \
- char name[32]; \
- \
adjust_instruction_counter_name(name, #m); \
\
debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 0ca401ddf3b7..15bb8cf59828 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -241,6 +241,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -253,7 +254,8 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
return;
pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset(pmdp, addr);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 89b9c851d822..5f3d0103b95d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -271,12 +271,14 @@ static inline void tx49_blast_icache32(void)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
@@ -302,12 +304,14 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static void (* r4k_blast_icache_page)(unsigned long addr);
@@ -320,7 +324,7 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
- else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
@@ -369,7 +373,7 @@ static void r4k_blast_icache_page_indexed_setup(void)
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed;
- else if (current_cpu_type() == CPU_LOONGSON2)
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache_page_indexed =
loongson2_blast_icache32_page_indexed;
else
@@ -395,7 +399,7 @@ static void r4k_blast_icache_setup(void)
r4k_blast_icache = blast_r4600_v1_icache32;
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache = tx49_blast_icache32;
- else if (current_cpu_type() == CPU_LOONGSON2)
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
@@ -465,7 +469,7 @@ static void r4k_blast_scache_node_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache_node = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache_node = blast_scache16_node;
@@ -480,7 +484,7 @@ static void r4k_blast_scache_node_setup(void)
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -497,7 +501,7 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Use get_ebase_cpunum() for both NUMA=y/n */
r4k_blast_scache_node(get_ebase_cpunum() >> 2);
break;
@@ -650,6 +654,7 @@ static inline void local_r4k_flush_cache_page(void *args)
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -664,7 +669,8 @@ static inline void local_r4k_flush_cache_page(void *args)
addr &= PAGE_MASK;
pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset(pmdp, addr);
@@ -770,7 +776,7 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
protected_loongson2_blast_icache_range(start, end);
break;
@@ -863,7 +869,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
@@ -904,7 +910,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
@@ -1224,7 +1230,7 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_PREFETCH;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
if (prid & 0x3)
@@ -1242,7 +1248,7 @@ static void probe_pcache(void)
c->dcache.waybit = 0;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
config1 = read_c0_config1();
lsize = (config1 >> 19) & 7;
if (lsize)
@@ -1267,7 +1273,8 @@ static void probe_pcache(void)
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = 0;
- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0))
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1452,7 +1459,7 @@ static void probe_pcache(void)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1478,7 +1485,7 @@ static void probe_vcache(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config2, lsize;
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
return;
config2 = read_c0_config2();
@@ -1653,11 +1660,11 @@ static void setup_scache(void)
#endif
return;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
loongson2_sc_init();
return;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
loongson3_sc_init();
return;
@@ -1926,7 +1933,7 @@ void r4k_cache_init(void)
/* Optimization: an L2 flush implicitly flushes the L1 */
current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Loongson-3 maintains cache coherency by hardware */
__flush_cache_all = cache_noop;
__flush_cache_vmap = cache_noop;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index b7c8a9d79c35..686867270627 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -170,6 +170,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -183,7 +184,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
- pudp = pud_offset(pgdp, page);
+ p4dp = p4d_offset(pgdp, page);
+ pudp = pud_offset(p4dp, page);
pmdp = pmd_offset(pudp, page);
ptep = pte_offset(pmdp, page);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index f589aa8f47d9..1e8d00793784 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -292,8 +292,9 @@ vmalloc_fault:
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
- int offset = __pgd_offset(address);
+ int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -305,8 +306,13 @@ vmalloc_fault:
goto no_context;
set_pgd(pgd, *pgd_k);
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index cef152234312..77ffece9c270 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -25,11 +25,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (pud)
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -40,14 +42,18 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud))
- pmd = pmd_offset(pud, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, addr);
+ if (pud_present(*pud))
+ pmd = pmd_offset(pud, addr);
+ }
}
return (pte_t *) pmd;
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 090fa653dfa9..50f9ed8c6c1b 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -239,9 +239,9 @@ void __init fixrange_init(unsigned long start, unsigned long end,
unsigned long vaddr;
vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 1601d90b087b..8317f337a86e 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -78,11 +78,15 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
flush_cache_all();
BUG_ON(address >= end);
do {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
error = -ENOMEM;
- pud = pud_alloc(&init_mm, dir, address);
+ p4d = p4d_alloc(&init_mm, dir, address);
+ if (!p4d)
+ break;
+ pud = pud_alloc(&init_mm, p4d, address);
if (!pud)
break;
pmd = pmd_alloc(&init_mm, pud, address);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 56e4f8bffd4c..c5578897a4fa 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -187,7 +187,7 @@ static void set_prefetch_parameters(void)
}
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Loongson-3 only support the Pref_Load/Pref_Store. */
pref_bias_clear_store = 128;
pref_bias_copy_load = 128;
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 6416a531a4c3..37c7a01427d2 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -56,6 +56,7 @@ void __init pagetable_init(void)
pgd_t *pgd_base;
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -81,8 +82,9 @@ void __init pagetable_init(void)
vaddr = PKMAP_BASE;
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
- pud = pud_offset(pgd, vaddr);
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ pud = pud_offset(p4d, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c13e46ced425..d7a9d5f211f0 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -35,10 +35,10 @@ extern void build_tlb_refill_handler(void);
static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB);
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
@@ -295,6 +295,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -320,7 +321,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
- pudp = pud_offset(pgdp, address);
+ p4dp = p4d_offset(pgdp, address);
+ pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 41bb91f05688..344e6e9ea43b 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -571,8 +571,8 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_BMIPS4350:
case CPU_BMIPS4380:
case CPU_BMIPS5000:
- case CPU_LOONGSON2:
- case CPU_LOONGSON3:
+ case CPU_LOONGSON2EF:
+ case CPU_LOONGSON64:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
@@ -1377,7 +1377,7 @@ static void build_r4000_tlb_refill_handler(void)
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
panic("TLB refill handler space exceeded");
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 011cf9f891e7..e10f216d0422 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -14,5 +14,5 @@ oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o
-oprofile-$(CONFIG_CPU_LOONGSON3) += op_model_loongson3.o
+oprofile-$(CONFIG_CPU_LOONGSON2EF) += op_model_loongson2.o
+oprofile-$(CONFIG_CPU_LOONGSON64) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 2f33992f6dff..03db268cba5c 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -93,7 +93,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_P5600:
case CPU_I6400:
case CPU_M5150:
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
case CPU_SB1:
case CPU_SB1A:
case CPU_R10000:
@@ -104,10 +104,10 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
lmodel = &op_model_mipsxx_ops;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
lmodel = &op_model_loongson2_ops;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
lmodel = &op_model_loongson3_ops;
break;
};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 96c13a0ab078..a537bf98912c 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -420,7 +420,7 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/sb1";
break;
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
op_model_mipsxx_ops.cpu_type = "mips/loongson1";
break;
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index d6de4cb2e31c..342ce10ef593 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_LASAT) += pci-lasat.o
obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
-obj-$(CONFIG_LOONGSON_MACH3X) += fixup-loongson3.o ops-loongson3.o
+obj-$(CONFIG_MACH_LOONGSON64) += fixup-loongson3.o ops-loongson3.o
obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o
obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 441eb9383b20..8e26b120f994 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -7,21 +7,13 @@
* Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#include <asm/sn/addrs.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/hub.h>
+#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
-
- return bc->baddr + paddr;
-}
-
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr & ~(0xffUL << 56);
-}
-
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
@@ -31,3 +23,20 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif /* CONFIG_NUMA */
+
+static void ip29_fixup_phy(struct pci_dev *dev)
+{
+ int nasid = pcibus_to_node(dev->bus);
+ u32 sid;
+
+ if (nasid != 1)
+ return; /* only needed on second module */
+
+ /* enable ethernet PHY on IP29 systemboard */
+ pci_read_config_dword(dev, PCI_SUBSYSTEM_VENDOR_ID, &sid);
+ if (sid == (PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_IP29_SYSBOARD) << 16))
+ REMOTE_HUB_S(nasid, MD_LED0, 0x09);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+ ip29_fixup_phy);
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
index 7b4d40354ee7..5c1a196be0c5 100644
--- a/arch/mips/pci/pci-xtalk-bridge.c
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -11,16 +11,38 @@
#include <linux/dma-direct.h>
#include <linux/platform_device.h>
#include <linux/platform_data/xtalk-bridge.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/crc16.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
#include <asm/sn/irq_alloc.h>
+#include <asm/sn/ioc3.h>
+
+#define CRC16_INIT 0
+#define CRC16_VALID 0xb001
+
+/*
+ * Common phys<->dma mapping for platforms using pci xtalk bridge
+ */
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
+
+ return bc->baddr + paddr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & ~(0xffUL << 56);
+}
/*
* Most of the IOC3 PCI config register aren't present
* we emulate what is needed for a normal PCI enumeration
*/
-static int ioc3_cfg_rd(void *addr, int where, int size, u32 *value)
+static int ioc3_cfg_rd(void *addr, int where, int size, u32 *value, u32 sid)
{
u32 cf, shift, mask;
@@ -30,6 +52,9 @@ static int ioc3_cfg_rd(void *addr, int where, int size, u32 *value)
if (get_dbe(cf, (u32 *)addr))
return PCIBIOS_DEVICE_NOT_FOUND;
break;
+ case 0x2c:
+ cf = sid;
+ break;
case 0x3c:
/* emulate sane interrupt pin value */
cf = 0x00000100;
@@ -111,7 +136,8 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
- return ioc3_cfg_rd(addr, where, size, value);
+ return ioc3_cfg_rd(addr, where, size, value,
+ bc->ioc3_sid[slot]);
}
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
@@ -149,7 +175,8 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where & ~3)];
- return ioc3_cfg_rd(addr, where, size, value);
+ return ioc3_cfg_rd(addr, where, size, value,
+ bc->ioc3_sid[slot]);
}
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
@@ -279,16 +306,15 @@ static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
struct bridge_irq_chip_data *data = d->chip_data;
int bit = d->parent_data->hwirq;
int pin = d->hwirq;
- nasid_t nasid;
int ret, cpu;
ret = irq_chip_set_affinity_parent(d, mask, force);
if (ret >= 0) {
cpu = cpumask_first_and(mask, cpu_online_mask);
- nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ data->nasid = cpu_to_node(cpu);
bridge_write(data->bc, b_int_addr[pin].addr,
(((data->bc->intr_addr >> 30) & 0x30000) |
- bit | (nasid << 8)));
+ bit | (data->nasid << 8)));
bridge_read(data->bc, b_wid_tflush);
}
return ret;
@@ -426,6 +452,117 @@ static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
+#define IOC3_SID(sid) (PCI_VENDOR_ID_SGI | ((sid) << 16))
+
+static void bridge_setup_ip27_baseio6g(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO6G);
+ bc->ioc3_sid[6] = IOC3_SID(IOC3_SUBSYS_IP27_MIO);
+}
+
+static void bridge_setup_ip27_baseio(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO);
+}
+
+static void bridge_setup_ip29_baseio(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP29_SYSBOARD);
+}
+
+static void bridge_setup_ip30_sysboard(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP30_SYSBOARD);
+}
+
+static void bridge_setup_menet(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[0] = IOC3_SID(IOC3_SUBSYS_MENET);
+ bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_MENET);
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_MENET);
+ bc->ioc3_sid[3] = IOC3_SID(IOC3_SUBSYS_MENET4);
+}
+
+#define BRIDGE_BOARD_SETUP(_partno, _setup) \
+ { .match = _partno, .setup = _setup }
+
+static const struct {
+ char *match;
+ void (*setup)(struct bridge_controller *bc);
+} bridge_ioc3_devid[] = {
+ BRIDGE_BOARD_SETUP("030-0734-", bridge_setup_ip27_baseio6g),
+ BRIDGE_BOARD_SETUP("030-0880-", bridge_setup_ip27_baseio6g),
+ BRIDGE_BOARD_SETUP("030-1023-", bridge_setup_ip27_baseio),
+ BRIDGE_BOARD_SETUP("030-1124-", bridge_setup_ip27_baseio),
+ BRIDGE_BOARD_SETUP("030-1025-", bridge_setup_ip29_baseio),
+ BRIDGE_BOARD_SETUP("030-1244-", bridge_setup_ip29_baseio),
+ BRIDGE_BOARD_SETUP("030-1389-", bridge_setup_ip29_baseio),
+ BRIDGE_BOARD_SETUP("030-0887-", bridge_setup_ip30_sysboard),
+ BRIDGE_BOARD_SETUP("030-1467-", bridge_setup_ip30_sysboard),
+ BRIDGE_BOARD_SETUP("030-0873-", bridge_setup_menet),
+};
+
+static void bridge_setup_board(struct bridge_controller *bc, char *partnum)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bridge_ioc3_devid); i++)
+ if (!strncmp(partnum, bridge_ioc3_devid[i].match,
+ strlen(bridge_ioc3_devid[i].match))) {
+ bridge_ioc3_devid[i].setup(bc);
+ }
+}
+
+static int bridge_nvmem_match(struct device *dev, const void *data)
+{
+ const char *name = dev_name(dev);
+ const char *prefix = data;
+
+ if (strlen(name) < strlen(prefix))
+ return 0;
+
+ return memcmp(prefix, dev_name(dev), strlen(prefix)) == 0;
+}
+
+static int bridge_get_partnum(u64 baddr, char *partnum)
+{
+ struct nvmem_device *nvmem;
+ char prefix[24];
+ u8 prom[64];
+ int i, j;
+ int ret;
+
+ snprintf(prefix, sizeof(prefix), "bridge-%012llx-0b-", baddr);
+
+ nvmem = nvmem_device_find(prefix, bridge_nvmem_match);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ ret = nvmem_device_read(nvmem, 0, 64, prom);
+ nvmem_device_put(nvmem);
+
+ if (ret != 64)
+ return ret;
+
+ if (crc16(CRC16_INIT, prom, 32) != CRC16_VALID ||
+ crc16(CRC16_INIT, prom + 32, 32) != CRC16_VALID)
+ return -EINVAL;
+
+ /* Assemble part number */
+ j = 0;
+ for (i = 0; i < 19; i++)
+ if (prom[i + 11] != ' ')
+ partnum[j++] = prom[i + 11];
+
+ for (i = 0; i < 6; i++)
+ if (prom[i + 32] != ' ')
+ partnum[j++] = prom[i + 32];
+
+ partnum[j] = 0;
+
+ return 0;
+}
+
static int bridge_probe(struct platform_device *pdev)
{
struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev);
@@ -434,9 +571,14 @@ static int bridge_probe(struct platform_device *pdev)
struct pci_host_bridge *host;
struct irq_domain *domain, *parent;
struct fwnode_handle *fn;
+ char partnum[26];
int slot;
int err;
+ /* get part number from one wire prom */
+ if (bridge_get_partnum(virt_to_phys((void *)bd->bridge_addr), partnum))
+ return -EPROBE_DEFER; /* not available yet */
+
parent = irq_get_default_host();
if (!parent)
return -ENODEV;
@@ -517,6 +659,8 @@ static int bridge_probe(struct platform_device *pdev)
}
bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
+ bridge_setup_board(bc, partnum);
+
host->dev.parent = dev;
host->sysdata = bc;
host->busnr = 0;
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
index 3340a5530de3..a15e29dfc7b3 100644
--- a/arch/mips/power/cpu.c
+++ b/arch/mips/power/cpu.c
@@ -19,8 +19,8 @@ void save_processor_state(void)
if (is_fpu_owner())
save_fp(current);
- if (cpu_has_dsp)
- save_dsp(current);
+
+ save_dsp(current);
}
void restore_processor_state(void)
@@ -29,8 +29,8 @@ void restore_processor_state(void)
if (is_fpu_owner())
restore_fp(current);
- if (cpu_has_dsp)
- restore_dsp(current);
+
+ restore_dsp(current);
}
int pfn_is_nosave(unsigned long pfn)
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index 1944d41507ef..74e5b9e27d6c 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <asm/io.h>
@@ -40,70 +41,36 @@ static inline unsigned int get_bank_config(int bank)
return bank % 2 ? res & 0xffff : res >> 16;
}
-struct mem {
- unsigned long addr;
- unsigned long size;
-};
-
+#if defined(CONFIG_SGI_IP28) || defined(CONFIG_32BIT)
+static void __init probe_memory(void)
+{
+ /* prom detects all usable memory */
+}
+#else
/*
- * Detect installed memory, do some sanity checks and notify kernel about it
+ * Detect installed memory, which PROM misses
*/
static void __init probe_memory(void)
{
- int i, j, found, cnt = 0;
- struct mem bank[4];
- struct mem space[2] = {{SGIMC_SEG0_BADDR, 0}, {SGIMC_SEG1_BADDR, 0}};
+ unsigned long addr, size;
+ int i;
printk(KERN_INFO "MC: Probing memory configuration:\n");
- for (i = 0; i < ARRAY_SIZE(bank); i++) {
+ for (i = 0; i < 4; i++) {
unsigned int tmp = get_bank_config(i);
if (!(tmp & SGIMC_MCONFIG_BVALID))
continue;
- bank[cnt].size = get_bank_size(tmp);
- bank[cnt].addr = get_bank_addr(tmp);
+ size = get_bank_size(tmp);
+ addr = get_bank_addr(tmp);
printk(KERN_INFO " bank%d: %3ldM @ %08lx\n",
- i, bank[cnt].size / 1024 / 1024, bank[cnt].addr);
- cnt++;
- }
+ i, size / 1024 / 1024, addr);
- /* And you thought bubble sort is dead algorithm... */
- do {
- unsigned long addr, size;
-
- found = 0;
- for (i = 1; i < cnt; i++)
- if (bank[i-1].addr > bank[i].addr) {
- addr = bank[i].addr;
- size = bank[i].size;
- bank[i].addr = bank[i-1].addr;
- bank[i].size = bank[i-1].size;
- bank[i-1].addr = addr;
- bank[i-1].size = size;
- found = 1;
- }
- } while (found);
-
- /* Figure out how are memory banks mapped into spaces */
- for (i = 0; i < cnt; i++) {
- found = 0;
- for (j = 0; j < ARRAY_SIZE(space) && !found; j++)
- if (space[j].addr + space[j].size == bank[i].addr) {
- space[j].size += bank[i].size;
- found = 1;
- }
- /* There is either hole or overlapping memory */
- if (!found)
- printk(KERN_CRIT "MC: Memory configuration mismatch "
- "(%08lx), expect Bus Error soon\n",
- bank[i].addr);
+ if (addr >= SGIMC_SEG1_BADDR)
+ memblock_add(addr, size);
}
-
- for (i = 0; i < ARRAY_SIZE(space); i++)
- if (space[i].size)
- add_memory_region(space[i].addr, space[i].size,
- BOOT_MEM_RAM);
}
+#endif
void __init sgimc_init(void)
{
@@ -205,10 +172,9 @@ void __init sgimc_init(void)
probe_memory();
}
-void __init prom_meminit(void) {}
-void __init prom_free_prom_memory(void)
-{
#ifdef CONFIG_SGI_IP28
+void __init prom_cleanup(void)
+{
u32 mconfig1;
unsigned long flags;
spinlock_t lock;
@@ -233,5 +199,5 @@ void __init prom_free_prom_memory(void)
sgimc->mconfig1 = mconfig1;
iob();
spin_unlock_irqrestore(&lock, flags);
-#endif
}
+#endif
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
index ef3847e7aee0..e5b6cadbec85 100644
--- a/arch/mips/sgi-ip27/Kconfig
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -38,10 +38,3 @@ config REPLICATE_KTEXT
Say Y here to enable replicating the kernel text across multiple
nodes in a NUMA cluster. This trades memory for speed.
-config REPLICATE_EXHANDLERS
- bool "Exception handler replication support"
- depends on SGI_IP27
- help
- Say Y here to enable replicating the kernel exception handlers
- across multiple nodes in a NUMA cluster. This trades memory for
- speed.
diff --git a/arch/mips/sgi-ip27/ip27-common.h b/arch/mips/sgi-ip27/ip27-common.h
new file mode 100644
index 000000000000..3ffbcf9bfd41
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-common.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP27_COMMON_H
+#define __IP27_COMMON_H
+
+extern void ip27_reboot_setup(void);
+extern void hub_rt_clock_event_init(void);
+extern const struct plat_smp_ops ip27_smp_ops;
+
+#endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index 6ebb8845a77c..a538d0ceb61d 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -25,10 +25,9 @@ static int force_fire_and_forget = 1;
* @size: size of the PIO mapping
*
**/
-unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
+unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
unsigned long xtalk_addr, size_t size)
{
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
unsigned i;
/* use small-window mapping if possible */
@@ -44,7 +43,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
xtalk_addr &= ~(BWIN_SIZE-1);
for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
- if (test_and_set_bit(i, hub_data(cnode)->h_bigwin_used))
+ if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
continue;
/*
@@ -171,13 +170,12 @@ static void hub_set_piomode(nasid_t nasid)
*
* @hub: hubinfo structure for our hub
*/
-void hub_pio_init(cnodeid_t cnode)
+void hub_pio_init(nasid_t nasid)
{
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
unsigned i;
/* initialize big window piomaps for this hub */
- bitmap_zero(hub_data(cnode)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
+ bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
IIO_ITTE_DISABLE(nasid, i);
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 59d5375c9021..f597e1ee2df7 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -13,9 +13,11 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/cpumask.h>
+#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/pgtable.h>
+#include <asm/sgialib.h>
#include <asm/time.h>
#include <asm/sn/types.h>
#include <asm/sn/sn0/addrs.h>
@@ -36,30 +38,23 @@
#include <asm/sn/sn0/ip27.h>
#include <asm/sn/mapped_kernel.h>
+#include "ip27-common.h"
+
#define CPU_NONE (cpuid_t)-1
-static DECLARE_BITMAP(hub_init_mask, MAX_COMPACT_NODES);
+static DECLARE_BITMAP(hub_init_mask, MAX_NUMNODES);
nasid_t master_nasid = INVALID_NASID;
-cnodeid_t nasid_to_compact_node[MAX_NASIDS];
-nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
-cnodeid_t cpuid_to_compact_node[MAXCPUS];
-
-EXPORT_SYMBOL(nasid_to_compact_node);
-
struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
EXPORT_SYMBOL_GPL(sn_cpu_info);
-extern void pcibr_setup(cnodeid_t);
-
-static void per_hub_init(cnodeid_t cnode)
+static void per_hub_init(nasid_t nasid)
{
- struct hub_data *hub = hub_data(cnode);
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+ struct hub_data *hub = hub_data(nasid);
cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
- if (test_and_set_bit(cnode, hub_init_mask))
+ if (test_and_set_bit(nasid, hub_init_mask))
return;
/*
* Set CRB timeout at 5ms, (< PI timeout of 10ms)
@@ -67,40 +62,31 @@ static void per_hub_init(cnodeid_t cnode)
REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
- hub_rtc_init(cnode);
+ hub_rtc_init(nasid);
-#ifdef CONFIG_REPLICATE_EXHANDLERS
- /*
- * If this is not a headless node initialization,
- * copy over the caliased exception handlers.
- */
- if (get_compact_nodeid() == cnode) {
- extern char except_vec2_generic, except_vec3_generic;
- extern void build_tlb_refill_handler(void);
-
- memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
- memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
- build_tlb_refill_handler();
- memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
- memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
+ if (nasid) {
+ /* copy exception handlers from first node to current node */
+ memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
+ (void *)CKSEG0, 0x200);
__flush_cache_all();
+ /* switch to node local exception handlers */
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
}
-#endif
}
void per_cpu_init(void)
{
int cpu = smp_processor_id();
int slice = LOCAL_HUB_L(PI_CPU_NUM);
- cnodeid_t cnode = get_compact_nodeid();
- struct hub_data *hub = hub_data(cnode);
+ nasid_t nasid = get_nasid();
+ struct hub_data *hub = hub_data(nasid);
if (test_and_set_bit(slice, &hub->slice_map))
return;
clear_c0_status(ST0_IM);
- per_hub_init(cnode);
+ per_hub_init(nasid);
cpu_time_init();
install_ipi();
@@ -122,21 +108,13 @@ get_nasid(void)
>> NSRI_NODEID_SHFT);
}
-/*
- * Map the physical node id to a virtual node id (virtual node ids are contiguous).
- */
-cnodeid_t get_compact_nodeid(void)
-{
- return NASID_TO_COMPACT_NODEID(get_nasid());
-}
-
-extern void ip27_reboot_setup(void);
-
void __init plat_mem_setup(void)
{
u64 p, e, n_mode;
nasid_t nid;
+ register_smp_ops(&ip27_smp_ops);
+
ip27_reboot_setup();
/*
@@ -175,3 +153,15 @@ void __init plat_mem_setup(void)
ioport_resource.end = ~0UL;
set_io_port_base(IO_BASE);
}
+
+const char *get_system_type(void)
+{
+ return "SGI Origin";
+}
+
+void __init prom_init(void)
+{
+ prom_init_cmdline(fw_arg0, (LONG *)fw_arg1);
+ prom_meminit();
+}
+
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 37be04975831..c72ae330ea93 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -73,7 +73,10 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
int cpu;
cpu = cpumask_first_and(mask, cpu_online_mask);
- nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_any(cpu_online_mask);
+
+ nasid = cpu_to_node(cpu);
hd->cpu = cpu;
if (!cputoslice(cpu)) {
hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
@@ -137,8 +140,9 @@ static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
handle_level_irq, NULL, NULL);
/* use CPU connected to nearest hub */
- hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid));
+ hub = hub_data(info->nasid);
setup_hub_mask(hd, &hub->h_cpus);
+ info->nasid = cpu_to_node(hd->cpu);
/* Make sure it's not already pending when we connect it. */
REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
index 41171ff0c75e..6cb2160e7689 100644
--- a/arch/mips/sgi-ip27/ip27-klconfig.c
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -73,11 +73,6 @@ lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type)
return (lboard_t *)NULL;
}
-cnodeid_t get_cpu_cnode(cpuid_t cpu)
-{
- return CPUID_TO_COMPACT_NODEID(cpu);
-}
-
klcpu_t *nasid_slice_to_cpuinfo(nasid_t nasid, int slice)
{
lboard_t *brd;
@@ -102,19 +97,14 @@ klcpu_t *sn_get_cpuinfo(cpuid_t cpu)
nasid_t nasid;
int slice;
klcpu_t *acpu;
- gda_t *gdap = GDA;
- cnodeid_t cnode;
if (!(cpu < MAXCPUS)) {
printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu);
return NULL;
}
- cnode = get_cpu_cnode(cpu);
- if (cnode == INVALID_CNODEID)
- return NULL;
-
- if ((nasid = gdap->g_nasidtable[cnode]) == INVALID_NASID)
+ nasid = cputonasid(cpu);
+ if (nasid == INVALID_NASID)
return NULL;
for (slice = 0; slice < CPUS_PER_NODE; slice++) {
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index a4f01328de62..ee1c6ff4aa00 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -38,13 +38,13 @@ void __init setup_replication_mask(void)
#error Kernel replication works with mapped kernel support. No calias support.
#endif
{
- cnodeid_t cnode;
+ nasid_t nasid;
- for_each_online_node(cnode) {
- if (cnode == 0)
+ for_each_online_node(nasid) {
+ if (nasid == 0)
continue;
/* Advertise that we have a copy of the kernel */
- cpumask_set_cpu(cnode, &ktext_repmask);
+ cpumask_set_cpu(nasid, &ktext_repmask);
}
}
#endif
@@ -85,7 +85,6 @@ static __init void copy_kernel(nasid_t dest_nasid)
void __init replicate_kernel_text(void)
{
- cnodeid_t cnode;
nasid_t client_nasid;
nasid_t server_nasid;
@@ -94,13 +93,12 @@ void __init replicate_kernel_text(void)
/* Record where the master node should get its kernel text */
set_ktext_source(master_nasid, master_nasid);
- for_each_online_node(cnode) {
- if (cnode == 0)
+ for_each_online_node(client_nasid) {
+ if (client_nasid == 0)
continue;
- client_nasid = COMPACT_TO_NASID_NODEID(cnode);
/* Check if this node should get a copy of the kernel */
- if (cpumask_test_cpu(cnode, &ktext_repmask)) {
+ if (cpumask_test_cpu(client_nasid, &ktext_repmask)) {
server_nasid = client_nasid;
copy_kernel(server_nasid);
}
@@ -115,17 +113,16 @@ void __init replicate_kernel_text(void)
* data structures on the first couple of pages of the first slot of each
* node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
*/
-unsigned long node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(nasid_t nasid)
{
unsigned long loadbase = REP_BASE;
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
unsigned long offset;
#ifdef CONFIG_MAPPED_KERNEL
loadbase += 16777216;
#endif
offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
- if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask)))
+ if ((nasid == 0) || (cpumask_test_cpu(nasid, &ktext_repmask)))
return TO_NODE(nasid, offset) >> PAGE_SHIFT;
else
return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index fb077a947575..563aad5e6398 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -33,7 +33,7 @@
#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
-struct node_data *__node_data[MAX_COMPACT_NODES];
+struct node_data *__node_data[MAX_NUMNODES];
EXPORT_SYMBOL(__node_data);
@@ -44,23 +44,23 @@ static int is_fine_dirmode(void)
return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE;
}
-static u64 get_region(cnodeid_t cnode)
+static u64 get_region(nasid_t nasid)
{
if (fine_mode)
- return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
+ return nasid >> NASID_TO_FINEREG_SHFT;
else
- return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
+ return nasid >> NASID_TO_COARSEREG_SHFT;
}
static u64 region_mask;
static void gen_region_mask(u64 *region_mask)
{
- cnodeid_t cnode;
+ nasid_t nasid;
(*region_mask) = 0;
- for_each_online_node(cnode) {
- (*region_mask) |= 1ULL << get_region(cnode);
+ for_each_online_node(nasid) {
+ (*region_mask) |= 1ULL << get_region(nasid);
}
}
@@ -104,23 +104,18 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
router_a->rou_rflag = 0;
}
-unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
{
klrou_t *router, *router_a = NULL, *router_b = NULL;
lboard_t *brd, *dest_brd;
- cnodeid_t cnode;
nasid_t nasid;
int port;
/* Figure out which routers nodes in question are connected to */
- for_each_online_node(cnode) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- if (nasid == -1) continue;
-
+ for_each_online_node(nasid) {
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
KLTYPE_ROUTER);
@@ -176,19 +171,16 @@ static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
static void __init init_topology_matrix(void)
{
- nasid_t nasid, nasid2;
- cnodeid_t row, col;
+ nasid_t row, col;
- for (row = 0; row < MAX_COMPACT_NODES; row++)
- for (col = 0; col < MAX_COMPACT_NODES; col++)
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++)
__node_distances[row][col] = -1;
for_each_online_node(row) {
- nasid = COMPACT_TO_NASID_NODEID(row);
for_each_online_node(col) {
- nasid2 = COMPACT_TO_NASID_NODEID(col);
__node_distances[row][col] =
- compute_node_distance(nasid, nasid2);
+ compute_node_distance(row, col);
}
}
}
@@ -196,12 +188,11 @@ static void __init init_topology_matrix(void)
static void __init dump_topology(void)
{
nasid_t nasid;
- cnodeid_t cnode;
lboard_t *brd, *dest_brd;
int port;
int router_num = 0;
klrou_t *router;
- cnodeid_t row, col;
+ nasid_t row, col;
pr_info("************** Topology ********************\n");
@@ -216,11 +207,7 @@ static void __init dump_topology(void)
pr_cont("\n");
}
- for_each_online_node(cnode) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- if (nasid == -1) continue;
-
+ for_each_online_node(nasid) {
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
KLTYPE_ROUTER);
@@ -254,21 +241,17 @@ static void __init dump_topology(void)
}
}
-static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
{
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
-
return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
}
-static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
{
- nasid_t nasid;
lboard_t *brd;
klmembnk_t *banks;
unsigned long size;
- nasid = COMPACT_TO_NASID_NODEID(node);
/* Find the node board */
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
if (!brd)
@@ -298,7 +281,7 @@ static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
static void __init mlreset(void)
{
- int i;
+ nasid_t nasid;
master_nasid = get_nasid();
fine_mode = is_fine_dirmode();
@@ -321,22 +304,14 @@ static void __init mlreset(void)
/*
* Set all nodes' calias sizes to 8k
*/
- for_each_online_node(i) {
- nasid_t nasid;
-
- nasid = COMPACT_TO_NASID_NODEID(i);
-
+ for_each_online_node(nasid) {
/*
* Always have node 0 in the region mask, otherwise
* CALIAS accesses get exceptions since the hub
* thinks it is a node 0 address.
*/
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
-#ifdef CONFIG_REPLICATE_EXHANDLERS
- REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
-#else
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
-#endif
#ifdef LATER
/*
@@ -354,7 +329,7 @@ static void __init szmem(void)
{
unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
int slot;
- cnodeid_t node;
+ nasid_t node;
for_each_online_node(node) {
nodebytes = 0;
@@ -384,7 +359,7 @@ static void __init szmem(void)
}
}
-static void __init node_mem_init(cnodeid_t node)
+static void __init node_mem_init(nasid_t node)
{
unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
unsigned long slot_freepfn = node_getfirstfree(node);
@@ -406,12 +381,8 @@ static void __init node_mem_init(cnodeid_t node)
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
- free_bootmem_with_active_regions(node, end_pfn);
-
memblock_reserve(slot_firstpfn << PAGE_SHIFT,
((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
-
- sparse_memory_present_with_active_regions(node);
}
/*
@@ -431,19 +402,21 @@ static struct node_data null_node = {
*/
void __init prom_meminit(void)
{
- cnodeid_t node;
+ nasid_t node;
mlreset();
szmem();
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
- for (node = 0; node < MAX_COMPACT_NODES; node++) {
+ for (node = 0; node < MAX_NUMNODES; node++) {
if (node_online(node)) {
node_mem_init(node);
continue;
}
__node_data[node] = &null_node;
}
+
+ memblocks_present();
}
void __init prom_free_prom_memory(void)
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index 3aae388561d9..daf3670d94e7 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -17,8 +17,6 @@
#define NODE_NUM_CPUS(n) CPUS_PER_NODE
#endif
-#define CNODEID_NONE (cnodeid_t)-1
-
typedef unsigned long machreg_t;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -152,16 +150,10 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice)
* Copy the cpu registers which have been saved in the IP27prom format
* into the eframe format for the node under consideration.
*/
-void nmi_node_eframe_save(cnodeid_t cnode)
+void nmi_node_eframe_save(nasid_t nasid)
{
- nasid_t nasid;
int slice;
- /* Make sure that we have a valid node */
- if (cnode == CNODEID_NONE)
- return;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
if (nasid == INVALID_NASID)
return;
@@ -178,10 +170,10 @@ void nmi_node_eframe_save(cnodeid_t cnode)
void
nmi_eframes_save(void)
{
- cnodeid_t cnode;
+ nasid_t nasid;
- for_each_online_node(cnode)
- nmi_node_eframe_save(cnode);
+ for_each_online_node(nasid)
+ nmi_node_eframe_save(nasid);
}
void
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
index e44a15d4f573..74d078247e49 100644
--- a/arch/mips/sgi-ip27/ip27-reset.c
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -26,6 +26,8 @@
#include <asm/sn/gda.h>
#include <asm/sn/sn0/hub.h>
+#include "ip27-common.h"
+
void machine_restart(char *command) __noreturn;
void machine_halt(void) __noreturn;
void machine_power_off(void) __noreturn;
@@ -45,8 +47,7 @@ static void ip27_machine_restart(char *command)
#endif
#if 0
for_each_online_node(i)
- REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG,
- PROMOP_REBOOT);
+ REMOTE_HUB_S(i, PROMOP_REG, PROMOP_REBOOT);
#else
LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
#endif
@@ -61,8 +62,7 @@ static void ip27_machine_halt(void)
smp_send_stop();
#endif
for_each_online_node(i)
- REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG,
- PROMOP_RESTART);
+ REMOTE_HUB_S(i, PROMOP_REG, PROMOP_RESTART);
LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
noreturn;
}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 20b81209c6b8..faa0244c8b0c 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -27,38 +27,19 @@
#include <asm/sn/sn0/hubio.h>
#include <asm/sn/sn0/ip27.h>
+#include "ip27-common.h"
+
/*
* Takes as first input the PROM assigned cpu id, and the kernel
* assigned cpu id as the second.
*/
-static void alloc_cpupda(cpuid_t cpu, int cpunum)
+static void alloc_cpupda(nasid_t nasid, cpuid_t cpu, int cpunum)
{
- cnodeid_t node = get_cpu_cnode(cpu);
- nasid_t nasid = COMPACT_TO_NASID_NODEID(node);
-
cputonasid(cpunum) = nasid;
- sn_cpu_info[cpunum].p_nodeid = node;
cputoslice(cpunum) = get_cpu_slice(cpu);
}
-static nasid_t get_actual_nasid(lboard_t *brd)
-{
- klhub_t *hub;
-
- if (!brd)
- return INVALID_NASID;
-
- /* find out if we are a completely disabled brd. */
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- if (!hub)
- return INVALID_NASID;
- if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
- return hub->hub_info.physid;
- else
- return brd->brd_nasid;
-}
-
-static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
+static int do_cpumask(nasid_t nasid, int highest)
{
static int tot_cpus_found = 0;
lboard_t *brd;
@@ -72,16 +53,13 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
while (acpu) {
cpuid = acpu->cpu_info.virtid;
- /* cnode is not valid for completely disabled brds */
- if (get_actual_nasid(brd) == brd->brd_nasid)
- cpuid_to_compact_node[cpuid] = cnode;
- if (cpuid > highest)
- highest = cpuid;
/* Only let it join in if it's marked enabled */
if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
(tot_cpus_found != NR_CPUS)) {
+ if (cpuid > highest)
+ highest = cpuid;
set_cpu_possible(cpuid, true);
- alloc_cpupda(cpuid, tot_cpus_found);
+ alloc_cpupda(nasid, cpuid, tot_cpus_found);
cpus_found++;
tot_cpus_found++;
}
@@ -103,29 +81,13 @@ void cpu_node_probe(void)
int i, highest = 0;
gda_t *gdap = GDA;
- /*
- * Initialize the arrays to invalid nodeid (-1)
- */
- for (i = 0; i < MAX_COMPACT_NODES; i++)
- compact_to_nasid_node[i] = INVALID_NASID;
- for (i = 0; i < MAX_NASIDS; i++)
- nasid_to_compact_node[i] = INVALID_CNODEID;
- for (i = 0; i < MAXCPUS; i++)
- cpuid_to_compact_node[i] = INVALID_CNODEID;
-
- /*
- * MCD - this whole "compact node" stuff can probably be dropped,
- * as we can handle sparse numbering now
- */
nodes_clear(node_online_map);
- for (i = 0; i < MAX_COMPACT_NODES; i++) {
+ for (i = 0; i < MAX_NUMNODES; i++) {
nasid_t nasid = gdap->g_nasidtable[i];
if (nasid == INVALID_NASID)
break;
- compact_to_nasid_node[i] = nasid;
- nasid_to_compact_node[nasid] = i;
- node_set_online(num_online_nodes());
- highest = do_cpumask(i, nasid, highest);
+ node_set_online(nasid);
+ highest = do_cpumask(nasid, highest);
}
printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
@@ -162,11 +124,10 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
irq += cputoslice(destid);
/*
- * Convert the compact hub number to the NASID to get the correct
- * part of the address space. Then set the interrupt bit associated
- * with the CPU we want to send the interrupt to.
+ * Set the interrupt bit associated with the CPU we want to
+ * send the interrupt to.
*/
- REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
+ REMOTE_HUB_SEND_INTR(cpu_to_node(destid), irq);
}
static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -184,8 +145,6 @@ static void ip27_init_cpu(void)
static void ip27_smp_finish(void)
{
- extern void hub_rt_clock_event_init(void);
-
hub_rt_clock_event_init();
local_irq_enable();
}
@@ -208,23 +167,20 @@ static int ip27_boot_secondary(int cpu, struct task_struct *idle)
static void __init ip27_smp_setup(void)
{
- cnodeid_t cnode;
+ nasid_t nasid;
- for_each_online_node(cnode) {
- if (cnode == 0)
+ for_each_online_node(nasid) {
+ if (nasid == 0)
continue;
- intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
+ intr_clear_all(nasid);
}
replicate_kernel_text();
/*
- * Assumption to be fixed: we're always booted on logical / physical
- * processor 0. While we're always running on logical processor 0
- * this still means this is physical processor zero; it might for
- * example be disabled in the firmware.
+ * PROM sets up system, that boot cpu is always first CPU on nasid 0
*/
- alloc_cpupda(0, 0);
+ alloc_cpupda(0, 0, 0);
}
static void __init ip27_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 9b4b9ac621a3..17302bbfa7a6 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -38,6 +38,8 @@
#include <asm/sn/sn0/hubio.h>
#include <asm/pci/bridge.h>
+#include "ip27-common.h"
+
static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
@@ -170,7 +172,7 @@ void cpu_time_init(void)
printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
}
-void hub_rtc_init(cnodeid_t cnode)
+void hub_rtc_init(nasid_t nasid)
{
/*
@@ -178,7 +180,7 @@ void hub_rtc_init(cnodeid_t cnode)
* If this is not the current node then it is a cpuless
* node and timeouts will not happen there.
*/
- if (get_compact_nodeid() == cnode) {
+ if (get_nasid() == nasid) {
LOCAL_HUB_S(PI_RT_EN_A, 1);
LOCAL_HUB_S(PI_RT_EN_B, 1);
LOCAL_HUB_S(PI_PROF_EN_A, 0);
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 4a1f0b0c29e2..5218b900f855 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
#include <linux/platform_data/xtalk-bridge.h>
#include <asm/sn/addrs.h>
#include <asm/sn/types.h>
@@ -26,9 +27,35 @@
static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
+ struct sgi_w1_platform_data *wd;
struct platform_device *pdev;
+ struct resource w1_res;
unsigned long offset;
+ offset = NODE_OFFSET(nasid);
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd)
+ goto no_mem;
+
+ snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
+ offset + (widget << SWIN_SIZE_BITS));
+
+ memset(&w1_res, 0, sizeof(w1_res));
+ w1_res.start = offset + (widget << SWIN_SIZE_BITS) +
+ offsetof(struct bridge_regs, b_nic);
+ w1_res.end = w1_res.start + 3;
+ w1_res.flags = IORESOURCE_MEM;
+
+ pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(wd);
+ goto no_mem;
+ }
+ platform_device_add_resources(pdev, &w1_res, 1);
+ platform_device_add_data(pdev, wd, sizeof(*wd));
+ platform_device_add(pdev);
+
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd)
goto no_mem;
@@ -38,7 +65,6 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
goto no_mem;
}
- offset = NODE_OFFSET(nasid);
bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
bd->intr_addr = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
@@ -46,14 +72,14 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
bd->masterwid = masterwid;
bd->mem.name = "Bridge PCI MEM";
- bd->mem.start = offset + (widget << SWIN_SIZE_BITS);
- bd->mem.end = bd->mem.start + SWIN_SIZE - 1;
+ bd->mem.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
+ bd->mem.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
bd->mem.flags = IORESOURCE_MEM;
bd->mem_offset = offset;
bd->io.name = "Bridge PCI IO";
- bd->io.start = offset + (widget << SWIN_SIZE_BITS);
- bd->io.end = bd->io.start + SWIN_SIZE - 1;
+ bd->io.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
+ bd->io.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
bd->io.flags = IORESOURCE_IO;
bd->io_offset = offset;
@@ -81,6 +107,8 @@ static int probe_one_port(nasid_t nasid, int widget, int masterwid)
bridge_platform_create(nasid, widget, masterwid);
break;
default:
+ pr_info("xtalk:n%d/%d unknown widget (0x%x)\n",
+ nasid, widget, partnum);
break;
}
@@ -138,14 +166,12 @@ static int xbow_probe(nasid_t nasid)
return 0;
}
-static void xtalk_probe_node(cnodeid_t nid)
+static void xtalk_probe_node(nasid_t nasid)
{
volatile u64 hubreg;
- nasid_t nasid;
xwidget_part_num_t partnum;
widgetreg_t widget_id;
- nasid = COMPACT_TO_NASID_NODEID(nid);
hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
/* check whether the link is up */
@@ -173,10 +199,10 @@ static void xtalk_probe_node(cnodeid_t nid)
static int __init xtalk_init(void)
{
- cnodeid_t cnode;
+ nasid_t nasid;
- for_each_online_node(cnode)
- xtalk_probe_node(cnode);
+ for_each_online_node(nasid)
+ xtalk_probe_node(nasid);
return 0;
}
diff --git a/arch/mips/sgi-ip30/Makefile b/arch/mips/sgi-ip30/Makefile
new file mode 100644
index 000000000000..18cf561b3d61
--- /dev/null
+++ b/arch/mips/sgi-ip30/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the IP30 specific kernel interface routines under Linux.
+#
+
+obj-y := ip30-irq.o ip30-power.o ip30-setup.o ip30-timer.o ip30-xtalk.o
+
+obj-$(CONFIG_EARLY_PRINTK) += ip30-console.o
+obj-$(CONFIG_SMP) += ip30-smp.o
diff --git a/arch/mips/sgi-ip30/Platform b/arch/mips/sgi-ip30/Platform
new file mode 100644
index 000000000000..2b5695c2049a
--- /dev/null
+++ b/arch/mips/sgi-ip30/Platform
@@ -0,0 +1,8 @@
+#
+# SGI-IP30 (Octane/Octane2)
+#
+ifdef CONFIG_SGI_IP30
+platform-$(CONFIG_SGI_IP30) += sgi-ip30/
+cflags-$(CONFIG_SGI_IP30) += -I$(srctree)/arch/mips/include/asm/mach-ip30
+load-$(CONFIG_SGI_IP30) += 0xa800000020004000
+endif
diff --git a/arch/mips/sgi-ip30/ip30-common.h b/arch/mips/sgi-ip30/ip30-common.h
new file mode 100644
index 000000000000..d2bcaee712f3
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-common.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP30_COMMON_H
+#define __IP30_COMMON_H
+
+extern struct plat_smp_ops ip30_smp_ops;
+extern void __init ip30_per_cpu_init(void);
+
+#endif /* __IP30_COMMON_H */
diff --git a/arch/mips/sgi-ip30/ip30-console.c b/arch/mips/sgi-ip30/ip30-console.c
new file mode 100644
index 000000000000..b91f8c4fdc78
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-console.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/io.h>
+
+#include <asm/sn/ioc3.h>
+
+static inline struct ioc3_uartregs *console_uart(void)
+{
+ struct ioc3 *ioc3;
+
+ ioc3 = (struct ioc3 *)((void *)(0x900000001f600000));
+ return &ioc3->sregs.uarta;
+}
+
+void prom_putchar(char c)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((readb(&uart->iu_lsr) & 0x20) == 0)
+ cpu_relax();
+
+ writeb(c, &uart->iu_thr);
+}
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
new file mode 100644
index 000000000000..d46655b914f1
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+#include <linux/tick.h>
+#include <linux/types.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/sgi/heart.h>
+
+struct heart_irq_data {
+ u64 *irq_mask;
+ int cpu;
+};
+
+static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
+
+static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
+
+static inline int heart_alloc_int(void)
+{
+ int bit;
+
+again:
+ bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
+ if (bit >= HEART_NUM_IRQS)
+ return -ENOSPC;
+
+ if (test_and_set_bit(bit, heart_irq_map))
+ goto again;
+
+ return bit;
+}
+
+static void ip30_error_irq(struct irq_desc *desc)
+{
+ u64 pending, mask, cause, error_irqs, err_reg;
+ int cpu = smp_processor_id();
+ int i;
+
+ pending = heart_read(&heart_regs->isr);
+ mask = heart_read(&heart_regs->imr[cpu]);
+ cause = heart_read(&heart_regs->cause);
+ error_irqs = (pending & HEART_L4_INT_MASK & mask);
+
+ /* Bail if there's nothing to process (how did we get here, then?) */
+ if (unlikely(!error_irqs))
+ return;
+
+ /* Prevent any of the error IRQs from firing again. */
+ heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
+
+ /* Ack all error IRQs. */
+ heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
+
+ /*
+ * If we also have a cause value, then something happened, so loop
+ * through the error IRQs and report a "heart attack" for each one
+ * and print the value of the HEART cause register. This is really
+ * primitive right now, but it should hopefully work until a more
+ * robust error handling routine can be put together.
+ *
+ * Refer to heart.h for the HC_* macros to work out the cause
+ * that got us here.
+ */
+ if (cause) {
+ pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
+ cpu, pending, mask, cause);
+
+ if (cause & HC_COR_MEM_ERR) {
+ err_reg = heart_read(&heart_regs->mem_err_addr);
+ pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
+ }
+
+ /* i = 63; i >= 51; i-- */
+ for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
+ if ((pending >> i) & 1)
+ pr_alert(" HEART Error IRQ #%d\n", i);
+
+ /* XXX: Seems possible to loop forever here, so panic(). */
+ panic("IP30: Fatal Error !\n");
+ }
+
+ /* Unmask the error IRQs. */
+ heart_write(mask, &heart_regs->imr[cpu]);
+}
+
+static void ip30_normal_irq(struct irq_desc *desc)
+{
+ int cpu = smp_processor_id();
+ struct irq_domain *domain;
+ u64 pend, mask;
+ int irq;
+
+ pend = heart_read(&heart_regs->isr);
+ mask = (heart_read(&heart_regs->imr[cpu]) &
+ (HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
+
+ pend &= mask;
+ if (unlikely(!pend))
+ return;
+
+#ifdef CONFIG_SMP
+ if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
+ heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
+ &heart_regs->clear_isr);
+ scheduler_ipi();
+ } else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
+ heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
+ &heart_regs->clear_isr);
+ scheduler_ipi();
+ } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
+ heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
+ &heart_regs->clear_isr);
+ generic_smp_call_function_interrupt();
+ } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
+ heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
+ &heart_regs->clear_isr);
+ generic_smp_call_function_interrupt();
+ } else
+#endif
+ {
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend));
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
+ }
+}
+
+static void ip30_ack_heart_irq(struct irq_data *d)
+{
+ heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
+}
+
+static void ip30_mask_heart_irq(struct irq_data *d)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(d->hwirq, mask);
+ heart_write(*mask, &heart_regs->imr[hd->cpu]);
+}
+
+static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(d->hwirq, mask);
+ heart_write(*mask, &heart_regs->imr[hd->cpu]);
+ heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
+}
+
+static void ip30_unmask_heart_irq(struct irq_data *d)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
+
+ set_bit(d->hwirq, mask);
+ heart_write(*mask, &heart_regs->imr[hd->cpu]);
+}
+
+static int ip30_set_heart_irq_affinity(struct irq_data *d,
+ const struct cpumask *mask, bool force)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+
+ if (!hd)
+ return -EINVAL;
+
+ if (irqd_is_started(d))
+ ip30_mask_and_ack_heart_irq(d);
+
+ hd->cpu = cpumask_first_and(mask, cpu_online_mask);
+
+ if (irqd_is_started(d))
+ ip30_unmask_heart_irq(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
+
+ return 0;
+}
+
+static struct irq_chip heart_irq_chip = {
+ .name = "HEART",
+ .irq_ack = ip30_ack_heart_irq,
+ .irq_mask = ip30_mask_heart_irq,
+ .irq_mask_ack = ip30_mask_and_ack_heart_irq,
+ .irq_unmask = ip30_unmask_heart_irq,
+ .irq_set_affinity = ip30_set_heart_irq_affinity,
+};
+
+static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct heart_irq_data *hd;
+ int hwirq;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
+ if (!hd)
+ return -ENOMEM;
+
+ hwirq = heart_alloc_int();
+ if (hwirq < 0) {
+ kfree(hd);
+ return -EAGAIN;
+ }
+ irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
+ handle_level_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void heart_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irqd;
+
+ if (nr_irqs > 1)
+ return;
+
+ irqd = irq_domain_get_irq_data(domain, virq);
+ clear_bit(irqd->hwirq, heart_irq_map);
+ if (irqd && irqd->chip_data)
+ kfree(irqd->chip_data);
+}
+
+static const struct irq_domain_ops heart_domain_ops = {
+ .alloc = heart_domain_alloc,
+ .free = heart_domain_free,
+};
+
+void __init ip30_install_ipi(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
+
+ set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
+ heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
+ &heart_regs->clear_isr);
+ set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
+ heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
+ &heart_regs->clear_isr);
+
+ heart_write(*mask, &heart_regs->imr[cpu]);
+}
+
+void __init arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+ unsigned long *mask;
+ int i;
+
+ mips_cpu_irq_init();
+
+ /* Mask all IRQs. */
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
+
+ /* Ack everything. */
+ heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
+
+ /* Enable specific HEART error IRQs for each CPU. */
+ mask = &per_cpu(irq_enable_mask, 0);
+ *mask |= HEART_CPU0_ERR_MASK;
+ heart_write(*mask, &heart_regs->imr[0]);
+ mask = &per_cpu(irq_enable_mask, 1);
+ *mask |= HEART_CPU1_ERR_MASK;
+ heart_write(*mask, &heart_regs->imr[1]);
+
+ /*
+ * Some HEART bits are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be accidentally
+ * used later.
+ */
+ set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
+ set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
+ set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
+ set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
+ set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
+ set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
+ set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
+ set_bit(HEART_L3_INT_TIMER, heart_irq_map);
+
+ /* Reserve the error interrupts (#51 to #63). */
+ for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
+ set_bit(i, heart_irq_map);
+
+ fn = irq_domain_alloc_named_fwnode("HEART");
+ WARN_ON(fn == NULL);
+ if (!fn)
+ return;
+ domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
+ &heart_domain_ops, NULL);
+ WARN_ON(domain == NULL);
+ if (!domain)
+ return;
+
+ irq_set_default_host(domain);
+
+ irq_set_percpu_devid(IP30_HEART_L0_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
+ domain);
+ irq_set_percpu_devid(IP30_HEART_L1_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
+ domain);
+ irq_set_percpu_devid(IP30_HEART_L2_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
+ domain);
+ irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
+ domain);
+}
diff --git a/arch/mips/sgi-ip30/ip30-power.c b/arch/mips/sgi-ip30/ip30-power.c
new file mode 100644
index 000000000000..120b3f3d5108
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-power.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-power.c: Software powerdown and reset handling for IP30 architecture.
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2014 Joshua Kinard <kumba@gentoo.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/rtc/ds1685.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/reboot.h>
+#include <asm/sgi/heart.h>
+
+static void __noreturn ip30_machine_restart(char *cmd)
+{
+ /*
+ * Execute HEART cold reset
+ * Yes, it's cold-HEARTed!
+ */
+ heart_write((heart_read(&heart_regs->mode) | HM_COLD_RST),
+ &heart_regs->mode);
+ unreachable();
+}
+
+static int __init ip30_reboot_setup(void)
+{
+ _machine_restart = ip30_machine_restart;
+
+ return 0;
+}
+
+subsys_initcall(ip30_reboot_setup);
diff --git a/arch/mips/sgi-ip30/ip30-setup.c b/arch/mips/sgi-ip30/ip30-setup.c
new file mode 100644
index 000000000000..44b1607e964d
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-setup.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IP30 miscellaneous setup bits.
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2007 Joshua Kinard <kumba@gentoo.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/memblock.h>
+
+#include <asm/smp-ops.h>
+#include <asm/sgialib.h>
+#include <asm/time.h>
+#include <asm/sgi/heart.h>
+
+#include "ip30-common.h"
+
+/* Structure of accessible HEART registers located in XKPHYS space. */
+struct ip30_heart_regs __iomem *heart_regs = HEART_XKPHYS_BASE;
+
+/*
+ * ARCS will report up to the first 1GB of
+ * memory if queried. Anything beyond that
+ * is marked as reserved.
+ */
+#define IP30_MAX_PROM_MEMORY _AC(0x40000000, UL)
+
+/*
+ * Memory in the Octane starts at 512MB
+ */
+#define IP30_MEMORY_BASE _AC(0x20000000, UL)
+
+/*
+ * If using ARCS to probe for memory, then
+ * remaining memory will start at this offset.
+ */
+#define IP30_REAL_MEMORY_START (IP30_MEMORY_BASE + IP30_MAX_PROM_MEMORY)
+
+#define MEM_SHIFT(x) ((x) >> 20)
+
+static void __init ip30_mem_init(void)
+{
+ unsigned long total_mem;
+ phys_addr_t addr;
+ phys_addr_t size;
+ u32 memcfg;
+ int i;
+
+ total_mem = 0;
+ for (i = 0; i < HEART_MEMORY_BANKS; i++) {
+ memcfg = __raw_readl(&heart_regs->mem_cfg.l[i]);
+ if (!(memcfg & HEART_MEMCFG_VALID))
+ continue;
+
+ addr = memcfg & HEART_MEMCFG_ADDR_MASK;
+ addr <<= HEART_MEMCFG_UNIT_SHIFT;
+ addr += IP30_MEMORY_BASE;
+ size = memcfg & HEART_MEMCFG_SIZE_MASK;
+ size >>= HEART_MEMCFG_SIZE_SHIFT;
+ size += 1;
+ size <<= HEART_MEMCFG_UNIT_SHIFT;
+
+ total_mem += size;
+
+ if (addr >= IP30_REAL_MEMORY_START)
+ memblock_free(addr, size);
+ else if ((addr + size) > IP30_REAL_MEMORY_START)
+ memblock_free(IP30_REAL_MEMORY_START,
+ size - IP30_MAX_PROM_MEMORY);
+ }
+ pr_info("Detected %luMB of physical memory.\n", MEM_SHIFT(total_mem));
+}
+
+/**
+ * ip30_cpu_time_init - platform time initialization.
+ */
+static void __init ip30_cpu_time_init(void)
+{
+ int cpu = smp_processor_id();
+ u64 heart_compare;
+ unsigned int start, end;
+ int time_diff;
+
+ heart_compare = (heart_read(&heart_regs->count) +
+ (HEART_CYCLES_PER_SEC / 10));
+ start = read_c0_count();
+ while ((heart_read(&heart_regs->count) - heart_compare) & 0x800000)
+ cpu_relax();
+
+ end = read_c0_count();
+ time_diff = (int)end - (int)start;
+ mips_hpt_frequency = time_diff * 10;
+ pr_info("IP30: CPU%d: %d MHz CPU detected.\n", cpu,
+ (mips_hpt_frequency * 2) / 1000000);
+}
+
+void __init ip30_per_cpu_init(void)
+{
+ /* Disable all interrupts. */
+ clear_c0_status(ST0_IM);
+
+ ip30_cpu_time_init();
+#ifdef CONFIG_SMP
+ ip30_install_ipi();
+#endif
+
+ enable_percpu_irq(IP30_HEART_L0_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP30_HEART_L1_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP30_HEART_L2_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP30_HEART_ERR_IRQ, IRQ_TYPE_NONE);
+}
+
+/**
+ * plat_mem_setup - despite the name, misc setup happens here.
+ */
+void __init plat_mem_setup(void)
+{
+ ip30_mem_init();
+
+ /* XXX: Hard lock on /sbin/init if this flag isn't specified. */
+ prom_flags |= PROM_FLAG_DONT_FREE_TEMP;
+
+#ifdef CONFIG_SMP
+ register_smp_ops(&ip30_smp_ops);
+#else
+ ip30_per_cpu_init();
+#endif
+
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL;
+ set_io_port_base(IO_BASE);
+}
diff --git a/arch/mips/sgi-ip30/ip30-smp.c b/arch/mips/sgi-ip30/ip30-smp.c
new file mode 100644
index 000000000000..4bfe654602b1
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-smp.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-smp.c: SMP on IP30 architecture.
+ * Based off of the original IP30 SMP code, with inspiration from ip27-smp.c
+ * and smp-bmips.c.
+ *
+ * Copyright (C) 2005-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2006-2007, 2014-2015 Joshua Kinard <kumba@gentoo.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/time.h>
+#include <asm/sgi/heart.h>
+
+#include "ip30-common.h"
+
+#define MPCONF_MAGIC 0xbaddeed2
+#define MPCONF_ADDR 0xa800000000000600L
+#define MPCONF_SIZE 0x80
+#define MPCONF(x) (MPCONF_ADDR + (x) * MPCONF_SIZE)
+
+/* HEART can theoretically do 4 CPUs, but only 2 are physically possible */
+#define MP_NCPU 2
+
+struct mpconf {
+ u32 magic;
+ u32 prid;
+ u32 physid;
+ u32 virtid;
+ u32 scachesz;
+ u16 fanloads;
+ u16 res;
+ void *launch;
+ void *rendezvous;
+ u64 res2[3];
+ void *stackaddr;
+ void *lnch_parm;
+ void *rndv_parm;
+ u32 idleflag;
+};
+
+static void ip30_smp_send_ipi_single(int cpu, u32 action)
+{
+ int irq;
+
+ switch (action) {
+ case SMP_RESCHEDULE_YOURSELF:
+ irq = HEART_L2_INT_RESCHED_CPU_0;
+ break;
+ case SMP_CALL_FUNCTION:
+ irq = HEART_L2_INT_CALL_CPU_0;
+ break;
+ default:
+ panic("IP30: Unknown action value in %s!\n", __func__);
+ }
+
+ irq += cpu;
+
+ /* Poke the other CPU -- it's got mail! */
+ heart_write(BIT_ULL(irq), &heart_regs->set_isr);
+}
+
+static void ip30_smp_send_ipi_mask(const struct cpumask *mask, u32 action)
+{
+ u32 i;
+
+ for_each_cpu(i, mask)
+ ip30_smp_send_ipi_single(i, action);
+}
+
+static void __init ip30_smp_setup(void)
+{
+ int i;
+ int ncpu = 0;
+ struct mpconf *mpc;
+
+ init_cpu_possible(cpumask_of(0));
+
+ /* Scan the MPCONF structure and enumerate available CPUs. */
+ for (i = 0; i < MP_NCPU; i++) {
+ mpc = (struct mpconf *)MPCONF(i);
+ if (mpc->magic == MPCONF_MAGIC) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++ncpu;
+ __cpu_logical_map[ncpu] = i;
+ pr_info("IP30: Slot: %d, PrID: %.8x, PhyID: %d, VirtID: %d\n",
+ i, mpc->prid, mpc->physid, mpc->virtid);
+ }
+ }
+ pr_info("IP30: Detected %d CPU(s) present.\n", ncpu);
+
+ /*
+ * Set the coherency algorithm to '5' (cacheable coherent
+ * exclusive on write). This is needed on IP30 SMP, especially
+ * for R14000 CPUs, otherwise, instruction bus errors will
+ * occur upon reaching userland.
+ */
+ change_c0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_COW);
+}
+
+static void __init ip30_smp_prepare_cpus(unsigned int max_cpus)
+{
+ /* nothing to do here */
+}
+
+static int __init ip30_smp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct mpconf *mpc = (struct mpconf *)MPCONF(cpu);
+
+ /* Stack pointer (sp). */
+ mpc->stackaddr = (void *)__KSTK_TOS(idle);
+
+ /* Global pointer (gp). */
+ mpc->lnch_parm = task_thread_info(idle);
+
+ mb(); /* make sure stack and lparm are written */
+
+ /* Boot CPUx. */
+ mpc->launch = smp_bootstrap;
+
+ /* CPUx now executes smp_bootstrap, then ip30_smp_finish */
+ return 0;
+}
+
+static void __init ip30_smp_init_cpu(void)
+{
+ ip30_per_cpu_init();
+}
+
+static void __init ip30_smp_finish(void)
+{
+ enable_percpu_irq(get_c0_compare_int(), IRQ_TYPE_NONE);
+ local_irq_enable();
+}
+
+struct plat_smp_ops __read_mostly ip30_smp_ops = {
+ .send_ipi_single = ip30_smp_send_ipi_single,
+ .send_ipi_mask = ip30_smp_send_ipi_mask,
+ .smp_setup = ip30_smp_setup,
+ .prepare_cpus = ip30_smp_prepare_cpus,
+ .boot_secondary = ip30_smp_boot_secondary,
+ .init_secondary = ip30_smp_init_cpu,
+ .smp_finish = ip30_smp_finish,
+ .prepare_boot_cpu = ip30_smp_init_cpu,
+};
diff --git a/arch/mips/sgi-ip30/ip30-timer.c b/arch/mips/sgi-ip30/ip30-timer.c
new file mode 100644
index 000000000000..d13e105478ae
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-timer.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-timer.c: Clocksource/clockevent support for the
+ * HEART chip in SGI Octane (IP30) systems.
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * Copyright (C) 2011 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/clocksource.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/sched_clock.h>
+
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+#include <asm/sgi/heart.h>
+
+static u64 ip30_heart_counter_read(struct clocksource *cs)
+{
+ return heart_read(&heart_regs->count);
+}
+
+struct clocksource ip30_heart_clocksource = {
+ .name = "HEART",
+ .rating = 400,
+ .read = ip30_heart_counter_read,
+ .mask = CLOCKSOURCE_MASK(52),
+ .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
+};
+
+static u64 notrace ip30_heart_read_sched_clock(void)
+{
+ return heart_read(&heart_regs->count);
+}
+
+static void __init ip30_heart_clocksource_init(void)
+{
+ struct clocksource *cs = &ip30_heart_clocksource;
+
+ clocksource_register_hz(cs, HEART_CYCLES_PER_SEC);
+
+ sched_clock_register(ip30_heart_read_sched_clock, 52,
+ HEART_CYCLES_PER_SEC);
+}
+
+void __init plat_time_init(void)
+{
+ int irq = get_c0_compare_int();
+
+ cp0_timer_irq_installed = 1;
+ c0_compare_irqaction.percpu_dev_id = &mips_clockevent_device;
+ c0_compare_irqaction.flags &= ~IRQF_SHARED;
+ irq_set_handler(irq, handle_percpu_devid_irq);
+ irq_set_percpu_devid(irq);
+ setup_percpu_irq(irq, &c0_compare_irqaction);
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+
+ ip30_heart_clocksource_init();
+}
diff --git a/arch/mips/sgi-ip30/ip30-xtalk.c b/arch/mips/sgi-ip30/ip30-xtalk.c
new file mode 100644
index 000000000000..8a2894645529
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-xtalk.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-xtalk.c - Very basic Crosstalk (XIO) detection support.
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * Copyright (C) 2007, 2014-2016 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/platform_data/xtalk-bridge.h>
+
+#include <asm/xtalk/xwidget.h>
+#include <asm/pci/bridge.h>
+
+#define IP30_SWIN_BASE(widget) \
+ (0x0000000010000000 | (((unsigned long)(widget)) << 24))
+
+#define IP30_RAW_SWIN_BASE(widget) (IO_BASE + IP30_SWIN_BASE(widget))
+
+#define IP30_SWIN_SIZE (1 << 24)
+
+#define IP30_WIDGET_XBOW _AC(0x0, UL) /* XBow is always 0 */
+#define IP30_WIDGET_HEART _AC(0x8, UL) /* HEART is always 8 */
+#define IP30_WIDGET_PCI_BASE _AC(0xf, UL) /* BaseIO PCI is always 15 */
+
+#define XTALK_NODEV 0xffffffff
+
+#define XBOW_REG_LINK_STAT_0 0x114
+#define XBOW_REG_LINK_BLK_SIZE 0x40
+#define XBOW_REG_LINK_ALIVE 0x80000000
+
+#define HEART_INTR_ADDR 0x00000080
+
+#define xtalk_read __raw_readl
+
+static void bridge_platform_create(int widget, int masterwid)
+{
+ struct xtalk_bridge_platform_data *bd;
+ struct sgi_w1_platform_data *wd;
+ struct platform_device *pdev;
+ struct resource w1_res;
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd)
+ goto no_mem;
+
+ snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
+ IP30_SWIN_BASE(widget));
+
+ memset(&w1_res, 0, sizeof(w1_res));
+ w1_res.start = IP30_SWIN_BASE(widget) +
+ offsetof(struct bridge_regs, b_nic);
+ w1_res.end = w1_res.start + 3;
+ w1_res.flags = IORESOURCE_MEM;
+
+ pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(wd);
+ goto no_mem;
+ }
+ platform_device_add_resources(pdev, &w1_res, 1);
+ platform_device_add_data(pdev, wd, sizeof(*wd));
+ platform_device_add(pdev);
+
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ goto no_mem;
+ pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(bd);
+ goto no_mem;
+ }
+
+ bd->bridge_addr = IP30_RAW_SWIN_BASE(widget);
+ bd->intr_addr = HEART_INTR_ADDR;
+ bd->nasid = 0;
+ bd->masterwid = masterwid;
+
+ bd->mem.name = "Bridge PCI MEM";
+ bd->mem.start = IP30_SWIN_BASE(widget) + BRIDGE_DEVIO0;
+ bd->mem.end = IP30_SWIN_BASE(widget) + IP30_SWIN_SIZE - 1;
+ bd->mem.flags = IORESOURCE_MEM;
+ bd->mem_offset = IP30_SWIN_BASE(widget);
+
+ bd->io.name = "Bridge PCI IO";
+ bd->io.start = IP30_SWIN_BASE(widget) + BRIDGE_DEVIO0;
+ bd->io.end = IP30_SWIN_BASE(widget) + IP30_SWIN_SIZE - 1;
+ bd->io.flags = IORESOURCE_IO;
+ bd->io_offset = IP30_SWIN_BASE(widget);
+
+ platform_device_add_data(pdev, bd, sizeof(*bd));
+ platform_device_add(pdev);
+ pr_info("xtalk:%x bridge widget\n", widget);
+ return;
+
+no_mem:
+ pr_warn("xtalk:%x bridge create out of memory\n", widget);
+}
+
+static unsigned int __init xbow_widget_active(s8 wid)
+{
+ unsigned int link_stat;
+
+ link_stat = xtalk_read((void *)(IP30_RAW_SWIN_BASE(IP30_WIDGET_XBOW) +
+ XBOW_REG_LINK_STAT_0 +
+ XBOW_REG_LINK_BLK_SIZE *
+ (wid - 8)));
+
+ return (link_stat & XBOW_REG_LINK_ALIVE) ? 1 : 0;
+}
+
+static void __init xtalk_init_widget(s8 wid, s8 masterwid)
+{
+ xwidget_part_num_t partnum;
+ widgetreg_t widget_id;
+
+ if (!xbow_widget_active(wid))
+ return;
+
+ widget_id = xtalk_read((void *)(IP30_RAW_SWIN_BASE(wid) + WIDGET_ID));
+
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ case XBRIDGE_WIDGET_PART_NUM:
+ bridge_platform_create(wid, masterwid);
+ break;
+ default:
+ pr_info("xtalk:%x unknown widget (0x%x)\n", wid, partnum);
+ break;
+ }
+}
+
+static int __init ip30_xtalk_init(void)
+{
+ int i;
+
+ /*
+ * Walk widget IDs backwards so that BaseIO is probed first. This
+ * ensures that the BaseIO IOC3 is always detected as eth0.
+ */
+ for (i = IP30_WIDGET_PCI_BASE; i > IP30_WIDGET_HEART; i--)
+ xtalk_init_widget(i, IP30_WIDGET_HEART);
+
+ return 0;
+}
+
+arch_initcall(ip30_xtalk_init);
diff --git a/arch/mips/tools/.gitignore b/arch/mips/tools/.gitignore
index 56d34ccccce4..b0209450d9ff 100644
--- a/arch/mips/tools/.gitignore
+++ b/arch/mips/tools/.gitignore
@@ -1 +1,2 @@
elf-entry
+loongson3-llsc-check
diff --git a/arch/mips/tools/Makefile b/arch/mips/tools/Makefile
index 3baee4bc6775..aaef688749f5 100644
--- a/arch/mips/tools/Makefile
+++ b/arch/mips/tools/Makefile
@@ -3,3 +3,8 @@ hostprogs-y := elf-entry
PHONY += elf-entry
elf-entry: $(obj)/elf-entry
@:
+
+hostprogs-$(CONFIG_CPU_LOONGSON3_WORKAROUNDS) += loongson3-llsc-check
+PHONY += loongson3-llsc-check
+loongson3-llsc-check: $(obj)/loongson3-llsc-check
+ @:
diff --git a/arch/mips/tools/loongson3-llsc-check.c b/arch/mips/tools/loongson3-llsc-check.c
new file mode 100644
index 000000000000..0ebddd0ae46f
--- /dev/null
+++ b/arch/mips/tools/loongson3-llsc-check.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <byteswap.h>
+#include <elf.h>
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#ifdef be32toh
+/* If libc provides le{16,32,64}toh() then we'll use them */
+#elif BYTE_ORDER == LITTLE_ENDIAN
+# define le16toh(x) (x)
+# define le32toh(x) (x)
+# define le64toh(x) (x)
+#elif BYTE_ORDER == BIG_ENDIAN
+# define le16toh(x) bswap_16(x)
+# define le32toh(x) bswap_32(x)
+# define le64toh(x) bswap_64(x)
+#endif
+
+/* MIPS opcodes, in bits 31:26 of an instruction */
+#define OP_SPECIAL 0x00
+#define OP_REGIMM 0x01
+#define OP_BEQ 0x04
+#define OP_BNE 0x05
+#define OP_BLEZ 0x06
+#define OP_BGTZ 0x07
+#define OP_BEQL 0x14
+#define OP_BNEL 0x15
+#define OP_BLEZL 0x16
+#define OP_BGTZL 0x17
+#define OP_LL 0x30
+#define OP_LLD 0x34
+#define OP_SC 0x38
+#define OP_SCD 0x3c
+
+/* Bits 20:16 of OP_REGIMM instructions */
+#define REGIMM_BLTZ 0x00
+#define REGIMM_BGEZ 0x01
+#define REGIMM_BLTZL 0x02
+#define REGIMM_BGEZL 0x03
+#define REGIMM_BLTZAL 0x10
+#define REGIMM_BGEZAL 0x11
+#define REGIMM_BLTZALL 0x12
+#define REGIMM_BGEZALL 0x13
+
+/* Bits 5:0 of OP_SPECIAL instructions */
+#define SPECIAL_SYNC 0x0f
+
+static void usage(FILE *f)
+{
+ fprintf(f, "Usage: loongson3-llsc-check /path/to/vmlinux\n");
+}
+
+static int se16(uint16_t x)
+{
+ return (int16_t)x;
+}
+
+static bool is_ll(uint32_t insn)
+{
+ switch (insn >> 26) {
+ case OP_LL:
+ case OP_LLD:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_sc(uint32_t insn)
+{
+ switch (insn >> 26) {
+ case OP_SC:
+ case OP_SCD:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_sync(uint32_t insn)
+{
+ /* Bits 31:11 should all be zeroes */
+ if (insn >> 11)
+ return false;
+
+ /* Bits 5:0 specify the SYNC special encoding */
+ if ((insn & 0x3f) != SPECIAL_SYNC)
+ return false;
+
+ return true;
+}
+
+static bool is_branch(uint32_t insn, int *off)
+{
+ switch (insn >> 26) {
+ case OP_BEQ:
+ case OP_BEQL:
+ case OP_BNE:
+ case OP_BNEL:
+ case OP_BGTZ:
+ case OP_BGTZL:
+ case OP_BLEZ:
+ case OP_BLEZL:
+ *off = se16(insn) + 1;
+ return true;
+
+ case OP_REGIMM:
+ switch ((insn >> 16) & 0x1f) {
+ case REGIMM_BGEZ:
+ case REGIMM_BGEZL:
+ case REGIMM_BGEZAL:
+ case REGIMM_BGEZALL:
+ case REGIMM_BLTZ:
+ case REGIMM_BLTZL:
+ case REGIMM_BLTZAL:
+ case REGIMM_BLTZALL:
+ *off = se16(insn) + 1;
+ return true;
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static int check_ll(uint64_t pc, uint32_t *code, size_t sz)
+{
+ ssize_t i, max, sc_pos;
+ int off;
+
+ /*
+ * Every LL must be preceded by a sync instruction in order to ensure
+ * that instruction reordering doesn't allow a prior memory access to
+ * execute after the LL & cause erroneous results.
+ */
+ if (!is_sync(le32toh(code[-1]))) {
+ fprintf(stderr, "%" PRIx64 ": LL not preceded by sync\n", pc);
+ return -EINVAL;
+ }
+
+ /* Find the matching SC instruction */
+ max = sz / 4;
+ for (sc_pos = 0; sc_pos < max; sc_pos++) {
+ if (is_sc(le32toh(code[sc_pos])))
+ break;
+ }
+ if (sc_pos >= max) {
+ fprintf(stderr, "%" PRIx64 ": LL has no matching SC\n", pc);
+ return -EINVAL;
+ }
+
+ /*
+ * Check branches within the LL/SC loop target sync instructions,
+ * ensuring that speculative execution can't generate memory accesses
+ * due to instructions outside of the loop.
+ */
+ for (i = 0; i < sc_pos; i++) {
+ if (!is_branch(le32toh(code[i]), &off))
+ continue;
+
+ /*
+ * If the branch target is within the LL/SC loop then we don't
+ * need to worry about it.
+ */
+ if ((off >= -i) && (off <= sc_pos))
+ continue;
+
+ /* If the branch targets a sync instruction we're all good... */
+ if (is_sync(le32toh(code[i + off])))
+ continue;
+
+ /* ...but if not, we have a problem */
+ fprintf(stderr, "%" PRIx64 ": Branch target not a sync\n",
+ pc + (i * 4));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_code(uint64_t pc, uint32_t *code, size_t sz)
+{
+ int err = 0;
+
+ if (sz % 4) {
+ fprintf(stderr, "%" PRIx64 ": Section size not a multiple of 4\n",
+ pc);
+ err = -EINVAL;
+ sz -= (sz % 4);
+ }
+
+ if (is_ll(le32toh(code[0]))) {
+ fprintf(stderr, "%" PRIx64 ": First instruction in section is an LL\n",
+ pc);
+ err = -EINVAL;
+ }
+
+#define advance() ( \
+ code++, \
+ pc += 4, \
+ sz -= 4 \
+)
+
+ /*
+ * Skip the first instructionm allowing check_ll to look backwards
+ * unconditionally.
+ */
+ advance();
+
+ /* Now scan through the code looking for LL instructions */
+ for (; sz; advance()) {
+ if (is_ll(le32toh(code[0])))
+ err |= check_ll(pc, code, sz);
+ }
+
+ return err;
+}
+
+int main(int argc, char *argv[])
+{
+ int vmlinux_fd, status, err, i;
+ const char *vmlinux_path;
+ struct stat st;
+ Elf64_Ehdr *eh;
+ Elf64_Shdr *sh;
+ void *vmlinux;
+
+ status = EXIT_FAILURE;
+
+ if (argc < 2) {
+ usage(stderr);
+ goto out_ret;
+ }
+
+ vmlinux_path = argv[1];
+ vmlinux_fd = open(vmlinux_path, O_RDONLY);
+ if (vmlinux_fd == -1) {
+ perror("Unable to open vmlinux");
+ goto out_ret;
+ }
+
+ err = fstat(vmlinux_fd, &st);
+ if (err) {
+ perror("Unable to stat vmlinux");
+ goto out_close;
+ }
+
+ vmlinux = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, vmlinux_fd, 0);
+ if (vmlinux == MAP_FAILED) {
+ perror("Unable to mmap vmlinux");
+ goto out_close;
+ }
+
+ eh = vmlinux;
+ if (memcmp(eh->e_ident, ELFMAG, SELFMAG)) {
+ fprintf(stderr, "vmlinux is not an ELF?\n");
+ goto out_munmap;
+ }
+
+ if (eh->e_ident[EI_CLASS] != ELFCLASS64) {
+ fprintf(stderr, "vmlinux is not 64b?\n");
+ goto out_munmap;
+ }
+
+ if (eh->e_ident[EI_DATA] != ELFDATA2LSB) {
+ fprintf(stderr, "vmlinux is not little endian?\n");
+ goto out_munmap;
+ }
+
+ for (i = 0; i < le16toh(eh->e_shnum); i++) {
+ sh = vmlinux + le64toh(eh->e_shoff) + (i * le16toh(eh->e_shentsize));
+
+ if (sh->sh_type != SHT_PROGBITS)
+ continue;
+ if (!(sh->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ err = check_code(le64toh(sh->sh_addr),
+ vmlinux + le64toh(sh->sh_offset),
+ le64toh(sh->sh_size));
+ if (err)
+ goto out_munmap;
+ }
+
+ status = EXIT_SUCCESS;
+out_munmap:
+ munmap(vmlinux, st.st_size);
+out_close:
+ close(vmlinux_fd);
+out_ret:
+ return status;
+}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 996a934ece7d..e05938997e69 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -75,6 +75,7 @@ CFLAGS_REMOVE_vdso.o = -pg
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
#
# Shared build commands.
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
index 9e90f30a181d..f679d3397436 100644
--- a/arch/nds32/kernel/vmlinux.lds.S
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -53,12 +53,11 @@ SECTIONS
_etext = .; /* End of text and rodata section */
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
EXCEPTION_TABLE(16)
- NOTES
BSS_SECTION(4, 4, 4)
_end = .;
diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S
index 6ad64f14617d..c55a7cfa1075 100644
--- a/arch/nios2/kernel/vmlinux.lds.S
+++ b/arch/nios2/kernel/vmlinux.lds.S
@@ -49,8 +49,8 @@ SECTIONS
__init_end = .;
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
BSS_SECTION(0, 0, 0)
@@ -58,7 +58,6 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
- NOTES
DISCARDS
}
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 2e2c72c157f3..60449fd7f16f 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -67,19 +67,18 @@ SECTIONS
_sdata = .;
- /* Page alignment required for RO_DATA_SECTION */
- RO_DATA_SECTION(PAGE_SIZE)
+ /* Page alignment required for RO_DATA */
+ RO_DATA(PAGE_SIZE)
_e_kernel_ro = .;
/* Whatever comes after _e_kernel_ro had better be page-aligend, too */
/* 32 here is cacheline size... recheck this */
- RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE)
+ RW_DATA(32, PAGE_SIZE, PAGE_SIZE)
_edata = .;
EXCEPTION_TABLE(4)
- NOTES
/* Init code and data */
. = ALIGN(PAGE_SIZE);
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 36b834f1c933..dca8f2de8cf5 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -60,7 +60,6 @@ KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 \
-DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT)
CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1)))
-KBUILD_LDS_MODULE += $(srctree)/arch/parisc/kernel/module.lds
endif
OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index ac5f34993b53..1c50093e2ebe 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -43,6 +43,7 @@
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
@@ -862,7 +863,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const char *strtab = NULL;
const Elf_Shdr *s;
char *secstrings;
- int err, symindex = -1;
+ int symindex = -1;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
@@ -946,11 +947,13 @@ int module_finalize(const Elf_Ehdr *hdr,
/* patch .altinstructions */
apply_alternatives(aseg, aseg + s->sh_size, me->name);
+#ifdef CONFIG_DYNAMIC_FTRACE
/* For 32 bit kernels we're compiling modules with
* -ffunction-sections so we must relocate the addresses in the
- *__mcount_loc section.
+ * ftrace callsite section.
*/
- if (symindex != -1 && !strcmp(secname, "__mcount_loc")) {
+ if (symindex != -1 && !strcmp(secname, FTRACE_CALLSITE_SECTION)) {
+ int err;
if (s->sh_type == SHT_REL)
err = apply_relocate((Elf_Shdr *)sechdrs,
strtab, symindex,
@@ -962,6 +965,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if (err)
return err;
}
+#endif
}
return 0;
}
diff --git a/arch/parisc/kernel/module.lds b/arch/parisc/kernel/module.lds
deleted file mode 100644
index 1a9a92aca5c8..000000000000
--- a/arch/parisc/kernel/module.lds
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-SECTIONS {
- __mcount_loc : {
- *(__patchable_function_entries)
- }
-}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 99cd24f2ea01..53e29d88f99c 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -19,6 +19,7 @@
*(.data..vm0.pte)
#define CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define RO_EXCEPTION_TABLE_ALIGN 8
#include <asm-generic/vmlinux.lds.h>
@@ -109,7 +110,7 @@ SECTIONS
_sdata = .;
/* Architecturally we need to keep __gp below 0x1000000 and thus
- * in front of RO_DATA_SECTION() which stores lots of tracepoint
+ * in front of RO_DATA() which stores lots of tracepoint
* and ftrace symbols. */
#ifdef CONFIG_64BIT
. = ALIGN(16);
@@ -127,11 +128,7 @@ SECTIONS
}
#endif
- RO_DATA_SECTION(8)
-
- /* RO because of BUILDTIME_EXTABLE_SORT */
- EXCEPTION_TABLE(8)
- NOTES
+ RO_DATA(8)
/* unwind info */
.PARISC.unwind : {
@@ -149,7 +146,7 @@ SECTIONS
data_start = .;
/* Data */
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c
index 3a4ca7d32477..1fad5d4c658d 100644
--- a/arch/powerpc/crypto/aes-spe-glue.c
+++ b/arch/powerpc/crypto/aes-spe-glue.c
@@ -17,7 +17,10 @@
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
+#include <crypto/gf128mul.h>
+#include <crypto/scatterwalk.h>
/*
* MAX_BYTES defines the number of bytes that are allowed to be processed
@@ -118,13 +121,19 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
-static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *in_key, unsigned int key_len)
+{
+ return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
+static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = xts_check_key(tfm, in_key, key_len);
+ err = xts_verify_key(tfm, in_key, key_len);
if (err)
return err;
@@ -133,7 +142,7 @@ static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
if (key_len != AES_KEYSIZE_128 &&
key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -178,208 +187,229 @@ static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
spe_end();
}
-static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_ecb_crypt(struct skcipher_request *req, bool enc)
{
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int ubytes;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
- ubytes = nbytes > MAX_BYTES ?
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
- nbytes -= ubytes;
+ while ((nbytes = walk.nbytes) != 0) {
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
spe_begin();
- ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, ctx->rounds, nbytes);
+ if (enc)
+ ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, nbytes);
+ else
+ ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, ctx->rounds, nbytes);
spe_end();
- err = blkcipher_walk_done(desc, &walk, ubytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_ecb_encrypt(struct skcipher_request *req)
{
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int ubytes;
- int err;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- ubytes = nbytes > MAX_BYTES ?
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
- nbytes -= ubytes;
-
- spe_begin();
- ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_dec, ctx->rounds, nbytes);
- spe_end();
-
- err = blkcipher_walk_done(desc, &walk, ubytes);
- }
+ return ppc_ecb_crypt(req, true);
+}
- return err;
+static int ppc_ecb_decrypt(struct skcipher_request *req)
+{
+ return ppc_ecb_crypt(req, false);
}
-static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_cbc_crypt(struct skcipher_request *req, bool enc)
{
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int ubytes;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
- ubytes = nbytes > MAX_BYTES ?
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
- nbytes -= ubytes;
+ while ((nbytes = walk.nbytes) != 0) {
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
spe_begin();
- ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, ctx->rounds, nbytes, walk.iv);
+ if (enc)
+ ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, nbytes,
+ walk.iv);
+ else
+ ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, ctx->rounds, nbytes,
+ walk.iv);
spe_end();
- err = blkcipher_walk_done(desc, &walk, ubytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_cbc_encrypt(struct skcipher_request *req)
{
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int ubytes;
- int err;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- ubytes = nbytes > MAX_BYTES ?
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
- nbytes -= ubytes;
-
- spe_begin();
- ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_dec, ctx->rounds, nbytes, walk.iv);
- spe_end();
-
- err = blkcipher_walk_done(desc, &walk, ubytes);
- }
+ return ppc_cbc_crypt(req, true);
+}
- return err;
+static int ppc_cbc_decrypt(struct skcipher_request *req)
+{
+ return ppc_cbc_crypt(req, false);
}
-static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_ctr_crypt(struct skcipher_request *req)
{
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int pbytes, ubytes;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((pbytes = walk.nbytes)) {
- pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes;
- pbytes = pbytes == nbytes ?
- nbytes : pbytes & ~(AES_BLOCK_SIZE - 1);
- ubytes = walk.nbytes - pbytes;
+ while ((nbytes = walk.nbytes) != 0) {
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
spe_begin();
ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, ctx->rounds, pbytes , walk.iv);
+ ctx->key_enc, ctx->rounds, nbytes, walk.iv);
spe_end();
- nbytes -= pbytes;
- err = blkcipher_walk_done(desc, &walk, ubytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_xts_crypt(struct skcipher_request *req, bool enc)
{
- struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int ubytes;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
u32 *twk;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
twk = ctx->key_twk;
- while ((nbytes = walk.nbytes)) {
- ubytes = nbytes > MAX_BYTES ?
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
- nbytes -= ubytes;
+ while ((nbytes = walk.nbytes) != 0) {
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
spe_begin();
- ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk);
+ if (enc)
+ ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, ctx->rounds, nbytes,
+ walk.iv, twk);
+ else
+ ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, ctx->rounds, nbytes,
+ walk.iv, twk);
spe_end();
twk = NULL;
- err = blkcipher_walk_done(desc, &walk, ubytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ppc_xts_encrypt(struct skcipher_request *req)
{
- struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- unsigned int ubytes;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
+ int offset = req->cryptlen - tail - AES_BLOCK_SIZE;
+ struct skcipher_request subreq;
+ u8 b[2][AES_BLOCK_SIZE];
int err;
- u32 *twk;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- twk = ctx->key_twk;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (tail) {
+ subreq = *req;
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ req->cryptlen - tail, req->iv);
+ req = &subreq;
+ }
- while ((nbytes = walk.nbytes)) {
- ubytes = nbytes > MAX_BYTES ?
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
- nbytes -= ubytes;
+ err = ppc_xts_crypt(req, true);
+ if (err || !tail)
+ return err;
- spe_begin();
- ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk);
- spe_end();
+ scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE, 0);
+ memcpy(b[1], b[0], tail);
+ scatterwalk_map_and_copy(b[0], req->src, offset + AES_BLOCK_SIZE, tail, 0);
- twk = NULL;
- err = blkcipher_walk_done(desc, &walk, ubytes);
+ spe_begin();
+ ppc_encrypt_xts(b[0], b[0], ctx->key_enc, ctx->rounds, AES_BLOCK_SIZE,
+ req->iv, NULL);
+ spe_end();
+
+ scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1);
+
+ return 0;
+}
+
+static int ppc_xts_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
+ int offset = req->cryptlen - tail - AES_BLOCK_SIZE;
+ struct skcipher_request subreq;
+ u8 b[3][AES_BLOCK_SIZE];
+ le128 twk;
+ int err;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (tail) {
+ subreq = *req;
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ offset, req->iv);
+ req = &subreq;
}
- return err;
+ err = ppc_xts_crypt(req, false);
+ if (err || !tail)
+ return err;
+
+ scatterwalk_map_and_copy(b[1], req->src, offset, AES_BLOCK_SIZE + tail, 0);
+
+ spe_begin();
+ if (!offset)
+ ppc_encrypt_ecb(req->iv, req->iv, ctx->key_twk, ctx->rounds,
+ AES_BLOCK_SIZE);
+
+ gf128mul_x_ble(&twk, (le128 *)req->iv);
+
+ ppc_decrypt_xts(b[1], b[1], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE,
+ (u8 *)&twk, NULL);
+ memcpy(b[0], b[2], tail);
+ memcpy(b[0] + tail, b[1] + tail, AES_BLOCK_SIZE - tail);
+ ppc_decrypt_xts(b[0], b[0], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE,
+ req->iv, NULL);
+ spe_end();
+
+ scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1);
+
+ return 0;
}
/*
@@ -388,9 +418,9 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
* This improves IPsec thoughput by another few percent. Additionally we assume
* that AES context is always aligned to at least 8 bytes because it is created
* with kmalloc() in the crypto infrastructure
- *
*/
-static struct crypto_alg aes_algs[] = { {
+
+static struct crypto_alg aes_cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-ppc-spe",
.cra_priority = 300,
@@ -408,96 +438,84 @@ static struct crypto_alg aes_algs[] = { {
.cia_decrypt = ppc_aes_decrypt
}
}
-}, {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-ppc-spe",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ppc_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ppc_aes_setkey,
- .encrypt = ppc_ecb_encrypt,
- .decrypt = ppc_ecb_decrypt,
- }
- }
-}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-ppc-spe",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ppc_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ppc_aes_setkey,
- .encrypt = ppc_cbc_encrypt,
- .decrypt = ppc_cbc_decrypt,
- }
- }
-}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-ppc-spe",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct ppc_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ppc_aes_setkey,
- .encrypt = ppc_ctr_crypt,
- .decrypt = ppc_ctr_crypt,
- }
- }
-}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-ppc-spe",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ppc_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ppc_xts_setkey,
- .encrypt = ppc_xts_encrypt,
- .decrypt = ppc_xts_decrypt,
- }
+};
+
+static struct skcipher_alg aes_skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-ppc-spe",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ppc_aes_setkey_skcipher,
+ .encrypt = ppc_ecb_encrypt,
+ .decrypt = ppc_ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-ppc-spe",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_aes_setkey_skcipher,
+ .encrypt = ppc_cbc_encrypt,
+ .decrypt = ppc_cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-ppc-spe",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_aes_setkey_skcipher,
+ .encrypt = ppc_ctr_crypt,
+ .decrypt = ppc_ctr_crypt,
+ .chunksize = AES_BLOCK_SIZE,
+ }, {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-ppc-spe",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ppc_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ppc_xts_setkey,
+ .encrypt = ppc_xts_encrypt,
+ .decrypt = ppc_xts_decrypt,
}
-} };
+};
static int __init ppc_aes_mod_init(void)
{
- return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+ int err;
+
+ err = crypto_register_alg(&aes_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(aes_skcipher_algs,
+ ARRAY_SIZE(aes_skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&aes_cipher_alg);
+ return err;
}
static void __exit ppc_aes_mod_fini(void)
{
- crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+ crypto_unregister_alg(&aes_cipher_alg);
+ crypto_unregister_skciphers(aes_skcipher_algs,
+ ARRAY_SIZE(aes_skcipher_algs));
}
module_init(ppc_aes_mod_init);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 6fe6ad64cba5..4273e799203d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -401,7 +401,6 @@ struct kvmppc_mmu {
u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data, bool iswrite);
- void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index ee62776e5433..d63f649fe713 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -271,6 +271,7 @@ struct kvmppc_ops {
union kvmppc_one_reg *val);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index fdd00939270b..bc4bd19b7fc2 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -17,7 +17,7 @@ typedef struct
#define LOCAL_INIT(i) { (i) }
-static __inline__ long local_read(local_t *l)
+static __inline__ long local_read(const local_t *l)
{
return READ_ONCE(l->v);
}
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index c8bb14ff4713..f6c562acc3f8 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -329,13 +329,4 @@ struct vm_area_struct;
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
-/*
- * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
- */
-#ifdef CONFIG_PPC32
-#define ARCH_ZONE_DMA_BITS 30
-#else
-#define ARCH_ZONE_DMA_BITS 31
-#endif
-
#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index b3cbb1136bce..75c7e95a321b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -748,6 +748,18 @@
#define SPRN_USPRG7 0x107 /* SPRG7 userspace read */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * Bits loaded from MSR upon interrupt.
+ * PPC (64-bit) bits 33-36,42-47 are interrupt dependent, the others are
+ * loaded from MSR. The exception is that SRESET and MCE do not always load
+ * bit 62 (RI) from MSR. Don't use PPC_BITMASK for this because 32-bit uses
+ * it.
+ */
+#define SRR1_MSR_BITS (~0x783f0000UL)
+#endif
+
#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index b0f72dea8b11..264e266a85bf 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -667,6 +667,8 @@ struct kvm_ppc_cpu_char {
/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
+#define KVM_DEV_XICS_GRP_CTRL 2
+#define KVM_DEV_XICS_NR_SERVERS 1
/* Layout of 64-bit source attribute values */
#define KVM_XICS_DESTINATION_SHIFT 0
@@ -683,6 +685,7 @@ struct kvm_ppc_cpu_char {
#define KVM_DEV_XIVE_GRP_CTRL 1
#define KVM_DEV_XIVE_RESET 1
#define KVM_DEV_XIVE_EQ_SYNC 2
+#define KVM_DEV_XIVE_NR_SERVERS 3
#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 694522308cd5..84827da01d45 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -338,7 +338,7 @@ static unsigned long vtime_delta(struct task_struct *tsk,
return stime;
}
-void vtime_account_system(struct task_struct *tsk)
+void vtime_account_kernel(struct task_struct *tsk)
{
unsigned long stime, stime_scaled, steal_time;
struct cpu_accounting_data *acct = get_accounting(tsk);
@@ -366,7 +366,7 @@ void vtime_account_system(struct task_struct *tsk)
#endif
}
}
-EXPORT_SYMBOL_GPL(vtime_account_system);
+EXPORT_SYMBOL_GPL(vtime_account_kernel);
void vtime_account_idle(struct task_struct *tsk)
{
@@ -395,7 +395,7 @@ static void vtime_flush_scaled(struct task_struct *tsk,
/*
* Account the whole cputime accumulated in the paca
* Must be called with interrupts disabled.
- * Assumes that vtime_account_system/idle() has been called
+ * Assumes that vtime_account_kernel/idle() has been called
* recently (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 060a1acd7c6d..8834220036a5 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -6,6 +6,8 @@
#endif
#define BSS_FIRST_SECTIONS *(.bss.prominit)
+#define EMITS_PT_NOTE
+#define RO_EXCEPTION_TABLE_ALIGN 0
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
@@ -18,22 +20,8 @@
ENTRY(_stext)
PHDRS {
- kernel PT_LOAD FLAGS(7); /* RWX */
- notes PT_NOTE FLAGS(0);
- dummy PT_NOTE FLAGS(0);
-
- /* binutils < 2.18 has a bug that makes it misbehave when taking an
- ELF file with all segments at load address 0 as input. This
- happens when running "strip" on vmlinux, because of the AT() magic
- in this linker script. People using GCC >= 4.2 won't run into
- this problem, because the "build-id" support will put some data
- into the "notes" segment (at a non-zero load address).
-
- To work around this, we force some data into both the "dummy"
- segment and the kernel segment, so the dummy segment will get a
- non-zero load address. It's not enough to always create the
- "notes" segment, since if nothing gets assigned to it, its load
- address will be zero. */
+ text PT_LOAD FLAGS(7); /* RWX */
+ note PT_NOTE FLAGS(0);
}
#ifdef CONFIG_PPC64
@@ -77,7 +65,7 @@ SECTIONS
#else /* !CONFIG_PPC64 */
HEAD_TEXT
#endif
- } :kernel
+ } :text
__head_end = .;
@@ -126,7 +114,7 @@ SECTIONS
__got2_end = .;
#endif /* CONFIG_PPC32 */
- } :kernel
+ } :text
. = ALIGN(ETEXT_ALIGN_SIZE);
_etext = .;
@@ -175,17 +163,6 @@ SECTIONS
__stop__btb_flush_fixup = .;
}
#endif
- EXCEPTION_TABLE(0)
-
- NOTES :kernel :notes
-
- /* The dummy segment contents for the bug workaround mentioned above
- near PHDRS. */
- .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
- LONG(0)
- LONG(0)
- LONG(0)
- } :kernel :dummy
/*
* Init sections discarded at runtime
@@ -200,7 +177,7 @@ SECTIONS
#ifdef CONFIG_PPC64
*(.tramp.ftrace.init);
#endif
- } :kernel
+ } :text
/* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index ec2547cc5ecb..58a59ee998e2 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -74,27 +74,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
- ulong pc = kvmppc_get_pc(vcpu);
- ulong lr = kvmppc_get_lr(vcpu);
- if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
- kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
- if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
- kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
- }
-}
-EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
- if (!is_kvmppc_hv_enabled(vcpu->kvm))
- return to_book3s(vcpu)->hior;
- return 0;
-}
-
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending)
{
@@ -134,11 +113,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
- kvmppc_unfixup_split_real(vcpu);
- kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
- kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
- kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
- vcpu->arch.mmu.reset_msr(vcpu);
+ vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
}
static int kvmppc_book3s_vec2irqprio(unsigned int vec)
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index 2ef1311a2a13..3a4613985949 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
#endif
+extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
+extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
+
#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 18f244aad7aa..f21e73492ce3 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -90,11 +90,6 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
}
-static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
-{
- kvmppc_set_msr(vcpu, 0);
-}
-
static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
u32 sre, gva_t eaddr,
bool primary)
@@ -406,7 +401,6 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin;
mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin;
mmu->xlate = kvmppc_mmu_book3s_32_xlate;
- mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr;
mmu->tlbie = kvmppc_mmu_book3s_32_tlbie;
mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid;
mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp;
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 5f63a5f7f24f..599133256a95 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -24,20 +24,6 @@
#define dprintk(X...) do { } while(0)
#endif
-static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
-{
- unsigned long msr = vcpu->arch.intr_msr;
- unsigned long cur_msr = kvmppc_get_msr(vcpu);
-
- /* If transactional, change to suspend mode on IRQ delivery */
- if (MSR_TM_TRANSACTIONAL(cur_msr))
- msr |= MSR_TS_S;
- else
- msr |= cur_msr & MSR_TS_MASK;
-
- kvmppc_set_msr(vcpu, msr);
-}
-
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
struct kvm_vcpu *vcpu,
gva_t eaddr)
@@ -676,7 +662,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
mmu->slbie = kvmppc_mmu_book3s_64_slbie;
mmu->slbia = kvmppc_mmu_book3s_64_slbia;
mmu->xlate = kvmppc_mmu_book3s_64_xlate;
- mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 9a75f0e1933b..d381526c5c9b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -275,18 +275,6 @@ int kvmppc_mmu_hv_init(void)
return 0;
}
-static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
-{
- unsigned long msr = vcpu->arch.intr_msr;
-
- /* If transactional, change to suspend mode on IRQ delivery */
- if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
- msr |= MSR_TS_S;
- else
- msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
- kvmppc_set_msr(vcpu, msr);
-}
-
static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret)
@@ -508,6 +496,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct vm_area_struct *vma;
unsigned long rcbits;
long mmio_update;
+ struct mm_struct *mm;
if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
@@ -584,6 +573,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
is_ci = false;
pfn = 0;
page = NULL;
+ mm = current->mm;
pte_size = PAGE_SIZE;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
@@ -592,8 +582,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
if (npages < 1) {
/* Check if it's an I/O mapping */
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, hva);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
pfn = vma->vm_pgoff +
@@ -602,7 +592,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
write_ok = vma->vm_flags & VM_WRITE;
}
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
if (!pfn)
goto out_put;
} else {
@@ -621,8 +611,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* hugepage split and collapse.
*/
local_irq_save(flags);
- ptep = find_current_mm_pte(current->mm->pgd,
- hva, NULL, NULL);
+ ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (__pte_write(pte))
@@ -2000,7 +1989,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
if (ret < 0) {
kfree(ctx);
- kvm_put_kvm(kvm);
+ kvm_put_kvm_no_destroy(kvm);
return ret;
}
@@ -2161,7 +2150,6 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
- mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 5834db0a54c6..883a66e76638 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -317,7 +317,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
if (ret >= 0)
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
else
- kvm_put_kvm(kvm);
+ kvm_put_kvm_no_destroy(kvm);
mutex_unlock(&kvm->lock);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 709cf1fd4cf4..ec5c0379296a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
/* If set, the threads on each CPU core have to be in the same MMU mode */
static bool no_mixing_hpt_and_radix;
-static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
@@ -338,18 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
-static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
-{
- /*
- * Check for illegal transactional state bit combination
- * and if we find it, force the TS field to a safe state.
- */
- if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
- msr &= ~MSR_TS_MASK;
- vcpu->arch.shregs.msr = msr;
- kvmppc_end_cede(vcpu);
-}
-
static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
{
vcpu->arch.pvr = pvr;
@@ -792,6 +779,11 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
vcpu->arch.dawr = value1;
vcpu->arch.dawrx = value2;
return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
+ /* KVM does not support mflags=2 (AIL=2) */
+ if (mflags != 0 && mflags != 3)
+ return H_UNSUPPORTED_FLAG_START;
+ return H_TOO_HARD;
default:
return H_TOO_HARD;
}
@@ -2454,15 +2446,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
vcpu->arch.timer_running = 1;
}
-static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.ceded = 0;
- if (vcpu->arch.timer_running) {
- hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
- vcpu->arch.timer_running = 0;
- }
-}
-
extern int __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
@@ -5401,6 +5384,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.set_one_reg = kvmppc_set_one_reg_hv,
.vcpu_load = kvmppc_core_vcpu_load_hv,
.vcpu_put = kvmppc_core_vcpu_put_hv,
+ .inject_interrupt = kvmppc_inject_interrupt_hv,
.set_msr = kvmppc_set_msr_hv,
.vcpu_run = kvmppc_vcpu_run_hv,
.vcpu_create = kvmppc_core_vcpu_create_hv,
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7c1909657b55..7cd3cf3d366b 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -755,6 +755,71 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
local_paca->kvm_hstate.kvm_split_mode = NULL;
}
+static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.ceded = 0;
+ if (vcpu->arch.timer_running) {
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ vcpu->arch.timer_running = 0;
+ }
+}
+
+void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
+{
+ /*
+ * Check for illegal transactional state bit combination
+ * and if we find it, force the TS field to a safe state.
+ */
+ if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
+ msr &= ~MSR_TS_MASK;
+ vcpu->arch.shregs.msr = msr;
+ kvmppc_end_cede(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
+
+static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+{
+ unsigned long msr, pc, new_msr, new_pc;
+
+ msr = kvmppc_get_msr(vcpu);
+ pc = kvmppc_get_pc(vcpu);
+ new_msr = vcpu->arch.intr_msr;
+ new_pc = vec;
+
+ /* If transactional, change to suspend mode on IRQ delivery */
+ if (MSR_TM_TRANSACTIONAL(msr))
+ new_msr |= MSR_TS_S;
+ else
+ new_msr |= msr & MSR_TS_MASK;
+
+ /*
+ * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
+ * applicable. AIL=2 is not supported.
+ *
+ * AIL does not apply to SRESET, MCE, or HMI (which is never
+ * delivered to the guest), and does not apply if IR=0 or DR=0.
+ */
+ if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
+ vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
+ (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
+ (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
+ new_msr |= MSR_IR | MSR_DR;
+ new_pc += 0xC000000000004000ULL;
+ }
+
+ kvmppc_set_srr0(vcpu, pc);
+ kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
+ kvmppc_set_pc(vcpu, new_pc);
+ vcpu->arch.shregs.msr = new_msr;
+}
+
+void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+{
+ inject_interrupt(vcpu, vec, srr1_flags);
+ kvmppc_end_cede(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
+
/*
* Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
* Can we inject a Decrementer or a External interrupt?
@@ -762,7 +827,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
{
int ext;
- unsigned long vec = 0;
unsigned long lpcr;
/* Insert EXTERNAL bit into LPCR at the MER bit position */
@@ -774,26 +838,16 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & MSR_EE) {
if (ext) {
- vec = BOOK3S_INTERRUPT_EXTERNAL;
+ inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
} else {
long int dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD))
dec = (int) dec;
if (dec < 0)
- vec = BOOK3S_INTERRUPT_DECREMENTER;
+ inject_interrupt(vcpu,
+ BOOK3S_INTERRUPT_DECREMENTER, 0);
}
}
- if (vec) {
- unsigned long msr, old_msr = vcpu->arch.shregs.msr;
-
- kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
- kvmppc_set_srr1(vcpu, old_msr);
- kvmppc_set_pc(vcpu, vec);
- msr = vcpu->arch.intr_msr;
- if (MSR_TM_ACTIVE(old_msr))
- msr |= MSR_TS_S;
- vcpu->arch.shregs.msr = msr;
- }
if (vcpu->arch.doorbell_request) {
mtspr(SPRN_DPDES, 1);
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index cdf30c6eaf54..dc97e5be76f6 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -1186,7 +1186,7 @@ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
forward_to_l1:
vcpu->arch.fault_dsisr = flags;
if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
- vcpu->arch.shregs.msr &= ~0x783f0000ul;
+ vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
vcpu->arch.shregs.msr |= flags;
}
return RESUME_HOST;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index cc65af8fe6f7..ce4fcf76e53e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -90,7 +90,43 @@ static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
}
-void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
+static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
+ ulong pc = kvmppc_get_pc(vcpu);
+ ulong lr = kvmppc_get_lr(vcpu);
+ if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+ kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
+ if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+ kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
+ }
+}
+
+static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+{
+ unsigned long msr, pc, new_msr, new_pc;
+
+ kvmppc_unfixup_split_real(vcpu);
+
+ msr = kvmppc_get_msr(vcpu);
+ pc = kvmppc_get_pc(vcpu);
+ new_msr = vcpu->arch.intr_msr;
+ new_pc = to_book3s(vcpu)->hior + vec;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* If transactional, change to suspend mode on IRQ delivery */
+ if (MSR_TM_TRANSACTIONAL(msr))
+ new_msr |= MSR_TS_S;
+ else
+ new_msr |= msr & MSR_TS_MASK;
+#endif
+
+ kvmppc_set_srr0(vcpu, pc);
+ kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
+ kvmppc_set_pc(vcpu, new_pc);
+ kvmppc_set_msr(vcpu, new_msr);
+}
static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
{
@@ -1761,6 +1797,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
#else
/* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202;
+ vcpu->arch.intr_msr = 0;
#endif
kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
vcpu->arch.slb_nr = 64;
@@ -2058,6 +2095,7 @@ static struct kvmppc_ops kvm_ops_pr = {
.set_one_reg = kvmppc_set_one_reg_pr,
.vcpu_load = kvmppc_core_vcpu_load_pr,
.vcpu_put = kvmppc_core_vcpu_put_pr,
+ .inject_interrupt = kvmppc_inject_interrupt_pr,
.set_msr = kvmppc_set_msr_pr,
.vcpu_run = kvmppc_vcpu_run_pr,
.vcpu_create = kvmppc_core_vcpu_create_pr,
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index a3f9c665bb5b..66858b7d3c6b 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1211,6 +1211,45 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.xive_vcpu = NULL;
}
+static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
+{
+ /* We have a block of xive->nr_servers VPs. We just need to check
+ * raw vCPU ids are below the expected limit for this guest's
+ * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
+ * index that can be safely used to compute a VP id that belongs
+ * to the VP block.
+ */
+ return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
+}
+
+int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
+{
+ u32 vp_id;
+
+ if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+
+ if (xive->vp_base == XIVE_INVALID_VP) {
+ xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
+ pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
+
+ if (xive->vp_base == XIVE_INVALID_VP)
+ return -ENOSPC;
+ }
+
+ vp_id = kvmppc_xive_vp(xive, cpu);
+ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+ pr_devel("Duplicate !\n");
+ return -EEXIST;
+ }
+
+ *vp = vp_id;
+
+ return 0;
+}
+
int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu)
{
@@ -1229,20 +1268,13 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
- if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
- pr_devel("Out of bounds !\n");
- return -EINVAL;
- }
/* We need to synchronize with queue provisioning */
mutex_lock(&xive->lock);
- vp_id = kvmppc_xive_vp(xive, cpu);
- if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
- pr_devel("Duplicate !\n");
- r = -EEXIST;
+ r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
+ if (r)
goto bail;
- }
xc = kzalloc(sizeof(*xc), GFP_KERNEL);
if (!xc) {
@@ -1834,6 +1866,43 @@ int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return 0;
}
+int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
+{
+ u32 __user *ubufp = (u32 __user *) addr;
+ u32 nr_servers;
+ int rc = 0;
+
+ if (get_user(nr_servers, ubufp))
+ return -EFAULT;
+
+ pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
+
+ if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
+ return -EINVAL;
+
+ mutex_lock(&xive->lock);
+ if (xive->vp_base != XIVE_INVALID_VP)
+ /* The VP block is allocated once and freed when the device
+ * is released. Better not allow to change its size since its
+ * used by connect_vcpu to validate vCPU ids are valid (eg,
+ * setting it back to a higher value could allow connect_vcpu
+ * to come up with a VP id that goes beyond the VP block, which
+ * is likely to cause a crash in OPAL).
+ */
+ rc = -EBUSY;
+ else if (nr_servers > KVM_MAX_VCPUS)
+ /* We don't need more servers. Higher vCPU ids get packed
+ * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id().
+ */
+ xive->nr_servers = KVM_MAX_VCPUS;
+ else
+ xive->nr_servers = nr_servers;
+
+ mutex_unlock(&xive->lock);
+
+ return rc;
+}
+
static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
struct kvmppc_xive *xive = dev->private;
@@ -1842,6 +1911,11 @@ static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
switch (attr->group) {
case KVM_DEV_XICS_GRP_SOURCES:
return xive_set_source(xive, attr->attr, attr->addr);
+ case KVM_DEV_XICS_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XICS_NR_SERVERS:
+ return kvmppc_xive_set_nr_servers(xive, attr->addr);
+ }
}
return -ENXIO;
}
@@ -1867,6 +1941,11 @@ static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
attr->attr < KVMPPC_XICS_NR_IRQS)
return 0;
break;
+ case KVM_DEV_XICS_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XICS_NR_SERVERS:
+ return 0;
+ }
}
return -ENXIO;
}
@@ -2001,10 +2080,13 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
{
struct kvmppc_xive *xive;
struct kvm *kvm = dev->kvm;
- int ret = 0;
pr_devel("Creating xive for partition\n");
+ /* Already there ? */
+ if (kvm->arch.xive)
+ return -EEXIST;
+
xive = kvmppc_xive_get_device(kvm, type);
if (!xive)
return -ENOMEM;
@@ -2014,12 +2096,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
xive->kvm = kvm;
mutex_init(&xive->lock);
- /* Already there ? */
- if (kvm->arch.xive)
- ret = -EEXIST;
- else
- kvm->arch.xive = xive;
-
/* We use the default queue size set by the host */
xive->q_order = xive_native_default_eq_shift();
if (xive->q_order < PAGE_SHIFT)
@@ -2027,18 +2103,16 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
else
xive->q_page_order = xive->q_order - PAGE_SHIFT;
- /* Allocate a bunch of VPs */
- xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
- pr_devel("VP_Base=%x\n", xive->vp_base);
-
- if (xive->vp_base == XIVE_INVALID_VP)
- ret = -ENOMEM;
+ /* VP allocation is delayed to the first call to connect_vcpu */
+ xive->vp_base = XIVE_INVALID_VP;
+ /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
+ * on a POWER9 system.
+ */
+ xive->nr_servers = KVM_MAX_VCPUS;
xive->single_escalation = xive_native_has_single_escalation();
- if (ret)
- return ret;
-
+ kvm->arch.xive = xive;
return 0;
}
@@ -2108,9 +2182,9 @@ static int xive_debug_show(struct seq_file *m, void *private)
if (!xc)
continue;
- seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
+ seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x"
" MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
- xc->server_num, xc->cppr, xc->hw_cppr,
+ xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr,
xc->mfrr, xc->pending,
xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index fe3ed50e0818..382e3a56e789 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -135,6 +135,9 @@ struct kvmppc_xive {
/* Flags */
u8 single_escalation;
+ /* Number of entries in the VP block */
+ u32 nr_servers;
+
struct kvmppc_xive_ops *ops;
struct address_space *mapping;
struct mutex mapping_lock;
@@ -296,6 +299,8 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
struct kvmppc_xive_vcpu *xc, int irq);
+int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
+int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 78b906ffa0d2..d83adb1e1490 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -50,6 +50,24 @@ static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
}
}
+static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
+ u8 prio, __be32 *qpage,
+ u32 order, bool can_escalate)
+{
+ int rc;
+ __be32 *qpage_prev = q->qpage;
+
+ rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
+ can_escalate);
+ if (rc)
+ return rc;
+
+ if (qpage_prev)
+ put_page(virt_to_page(qpage_prev));
+
+ return rc;
+}
+
void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -118,19 +136,12 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
- if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
- pr_devel("Out of bounds !\n");
- return -EINVAL;
- }
mutex_lock(&xive->lock);
- vp_id = kvmppc_xive_vp(xive, server_num);
- if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
- pr_devel("Duplicate !\n");
- rc = -EEXIST;
+ rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
+ if (rc)
goto bail;
- }
xc = kzalloc(sizeof(*xc), GFP_KERNEL);
if (!xc) {
@@ -582,19 +593,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
q->guest_qaddr = 0;
q->guest_qshift = 0;
- rc = xive_native_configure_queue(xc->vp_id, q, priority,
- NULL, 0, true);
+ rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
+ NULL, 0, true);
if (rc) {
pr_err("Failed to reset queue %d for VCPU %d: %d\n",
priority, xc->server_num, rc);
return rc;
}
- if (q->qpage) {
- put_page(virt_to_page(q->qpage));
- q->qpage = NULL;
- }
-
return 0;
}
@@ -624,12 +630,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
srcu_idx = srcu_read_lock(&kvm->srcu);
gfn = gpa_to_gfn(kvm_eq.qaddr);
- page = gfn_to_page(kvm, gfn);
- if (is_error_page(page)) {
- srcu_read_unlock(&kvm->srcu, srcu_idx);
- pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
- return -EINVAL;
- }
page_size = kvm_host_page_size(kvm, gfn);
if (1ull << kvm_eq.qshift > page_size) {
@@ -638,6 +638,13 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
return -EINVAL;
}
+ page = gfn_to_page(kvm, gfn);
+ if (is_error_page(page)) {
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
+ return -EINVAL;
+ }
+
qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -653,8 +660,8 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
* OPAL level because the use of END ESBs is not supported by
* Linux.
*/
- rc = xive_native_configure_queue(xc->vp_id, q, priority,
- (__be32 *) qaddr, kvm_eq.qshift, true);
+ rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
+ (__be32 *) qaddr, kvm_eq.qshift, true);
if (rc) {
pr_err("Failed to configure queue %d for VCPU %d: %d\n",
priority, xc->server_num, rc);
@@ -928,6 +935,8 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
return kvmppc_xive_reset(xive);
case KVM_DEV_XIVE_EQ_SYNC:
return kvmppc_xive_native_eq_sync(xive);
+ case KVM_DEV_XIVE_NR_SERVERS:
+ return kvmppc_xive_set_nr_servers(xive, attr->addr);
}
break;
case KVM_DEV_XIVE_GRP_SOURCE:
@@ -967,6 +976,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
switch (attr->attr) {
case KVM_DEV_XIVE_RESET:
case KVM_DEV_XIVE_EQ_SYNC:
+ case KVM_DEV_XIVE_NR_SERVERS:
return 0;
}
break;
@@ -1067,7 +1077,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
{
struct kvmppc_xive *xive;
struct kvm *kvm = dev->kvm;
- int ret = 0;
pr_devel("Creating xive native device\n");
@@ -1081,27 +1090,20 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
dev->private = xive;
xive->dev = dev;
xive->kvm = kvm;
- kvm->arch.xive = xive;
mutex_init(&xive->mapping_lock);
mutex_init(&xive->lock);
- /*
- * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
- * a default. Getting the max number of CPUs the VM was
- * configured with would improve our usage of the XIVE VP space.
+ /* VP allocation is delayed to the first call to connect_vcpu */
+ xive->vp_base = XIVE_INVALID_VP;
+ /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
+ * on a POWER9 system.
*/
- xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
- pr_devel("VP_Base=%x\n", xive->vp_base);
-
- if (xive->vp_base == XIVE_INVALID_VP)
- ret = -ENXIO;
+ xive->nr_servers = KVM_MAX_VCPUS;
xive->single_escalation = xive_native_has_single_escalation();
xive->ops = &kvmppc_xive_native_ops;
- if (ret)
- return ret;
-
+ kvm->arch.xive = xive;
return 0;
}
@@ -1204,8 +1206,8 @@ static int xive_native_debug_show(struct seq_file *m, void *private)
if (!xc)
continue;
- seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
- xc->server_num,
+ seq_printf(m, "cpu server %#x VP=%#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
+ xc->server_num, xc->vp_id,
vcpu->arch.xive_saved_state.nsr,
vcpu->arch.xive_saved_state.cppr,
vcpu->arch.xive_saved_state.ipb,
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 321db0fdb9db..425d13806645 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -355,9 +355,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 1) {
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ down_read(&kvm->mm->mmap_sem);
- vma = find_vma(current->mm, hva);
+ vma = find_vma(kvm->mm, hva);
if (vma && hva >= vma->vm_start &&
(vma->vm_flags & VM_PFNMAP)) {
/*
@@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
}
- up_read(&current->mm->mmap_sem);
+ up_read(&kvm->mm->mmap_sem);
}
if (likely(!pfnmap)) {
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3a77bb643452..9e085e931d74 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -522,6 +522,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
+ case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
+ /* fall through */
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index be941d382c8d..c95b7fe9f298 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/memremap.h>
+#include <linux/dma-direct.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -201,10 +202,10 @@ static int __init mark_nonram_nosave(void)
* everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA.
*
- * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
- * inform the generic DMA mapping code. 32-bit only devices (if not handled
- * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
- * otherwise served by ZONE_DMA.
+ * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
+ * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
+ * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
+ * ZONE_DMA.
*/
static unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -237,9 +238,18 @@ void __init paging_init(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
+ /*
+ * Allow 30-bit DMA for very limited Broadcom wifi chips on many
+ * powerbooks.
+ */
+ if (IS_ENABLED(CONFIG_PPC32))
+ zone_dma_bits = 30;
+ else
+ zone_dma_bits = 31;
+
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
- 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
+ 1UL << (zone_dma_bits - PAGE_SHIFT));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index ca92e01d0bd1..48604625ab31 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -96,7 +96,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
-static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
+static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
@@ -127,7 +127,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
-static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
+static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@@ -179,7 +179,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
* [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
*/
-static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
+static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
bool sdar_valid;
@@ -204,8 +204,7 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
- if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
- is_kernel_addr(mfspr(SPRN_SDAR)))
+ if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
*addrp = 0;
}
@@ -444,7 +443,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
}
/* Processing BHRB entries */
-static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw)
{
u64 val;
u64 addr;
@@ -472,8 +471,7 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
* exporting it to userspace (avoid exposure of regions
* where we could have speculative execution)
*/
- if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
- is_kernel_addr(addr))
+ if (is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
continue;
/* Branches are read most recent first (ie. mfbhrb 0 is
@@ -2087,12 +2085,12 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type &
(PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
- perf_get_data_addr(regs, &data.addr);
+ perf_get_data_addr(event, regs, &data.addr);
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
struct cpu_hw_events *cpuhw;
cpuhw = this_cpu_ptr(&cpu_hw_events);
- power_pmu_bhrb_read(cpuhw);
+ power_pmu_bhrb_read(event, cpuhw);
data.br_stack = &cpuhw->bhrb_stack;
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 8eebbc8860bb..75a6c9117622 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -164,7 +164,7 @@ config ARCH_RV32I
config ARCH_RV64I
bool "RV64I"
select 64BIT
- select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 70bb94ae61c5..b7401858d872 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -315,8 +315,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
- pr_warning("%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
+ pr_warn("%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
return -ENOENT;
}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 23cd1a9e52a1..12f42f96d46e 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -52,12 +52,12 @@ SECTIONS
/* Start of data section */
_sdata = .;
- RO_DATA_SECTION(L1_CACHE_BYTES)
+ RO_DATA(L1_CACHE_BYTES)
.srodata : {
*(.srodata*)
}
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
@@ -69,7 +69,6 @@ SECTIONS
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
EXCEPTION_TABLE(0x10)
- NOTES
.rel.dyn : {
*(.rel.dyn*)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 43a81d0ad507..f0df9e48e651 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -246,8 +246,8 @@ choice
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
- depends on !CC_IS_CLANG
select HAVE_MARCH_Z900_FEATURES
+ depends on $(cc-option,-march=z900)
help
Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not
@@ -255,8 +255,8 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
- depends on !CC_IS_CLANG
select HAVE_MARCH_Z990_FEATURES
+ depends on $(cc-option,-march=z990)
help
Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work
@@ -264,8 +264,8 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
- depends on !CC_IS_CLANG
select HAVE_MARCH_Z9_109_FEATURES
+ depends on $(cc-option,-march=z9-109)
help
Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work
@@ -274,6 +274,7 @@ config MARCH_Z9_109
config MARCH_Z10
bool "IBM System z10"
select HAVE_MARCH_Z10_FEATURES
+ depends on $(cc-option,-march=z10)
help
Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work
@@ -282,6 +283,7 @@ config MARCH_Z10
config MARCH_Z196
bool "IBM zEnterprise 114 and 196"
select HAVE_MARCH_Z196_FEATURES
+ depends on $(cc-option,-march=z196)
help
Select this to enable optimizations for IBM zEnterprise 114 and 196
(2818 and 2817 series). The kernel will be slightly faster but will
@@ -290,6 +292,7 @@ config MARCH_Z196
config MARCH_ZEC12
bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES
+ depends on $(cc-option,-march=zEC12)
help
Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
2827 series). The kernel will be slightly faster but will not work on
@@ -298,6 +301,7 @@ config MARCH_ZEC12
config MARCH_Z13
bool "IBM z13s and z13"
select HAVE_MARCH_Z13_FEATURES
+ depends on $(cc-option,-march=z13)
help
Select this to enable optimizations for IBM z13s and z13 (2965 and
2964 series). The kernel will be slightly faster but will not work on
@@ -306,6 +310,7 @@ config MARCH_Z13
config MARCH_Z14
bool "IBM z14 ZR1 and z14"
select HAVE_MARCH_Z14_FEATURES
+ depends on $(cc-option,-march=z14)
help
Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
and 3906 series). The kernel will be slightly faster but will not
@@ -314,6 +319,7 @@ config MARCH_Z14
config MARCH_Z15
bool "IBM z15"
select HAVE_MARCH_Z15_FEATURES
+ depends on $(cc-option,-march=z15)
help
Select this to enable optimizations for IBM z15 (8562
and 8561 series). The kernel will be slightly faster but will not
@@ -367,33 +373,39 @@ config TUNE_DEFAULT
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
- depends on !CC_IS_CLANG
+ depends on $(cc-option,-mtune=z900)
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
- depends on !CC_IS_CLANG
+ depends on $(cc-option,-mtune=z990)
config TUNE_Z9_109
bool "IBM System z9"
- depends on !CC_IS_CLANG
+ depends on $(cc-option,-mtune=z9-109)
config TUNE_Z10
bool "IBM System z10"
+ depends on $(cc-option,-mtune=z10)
config TUNE_Z196
bool "IBM zEnterprise 114 and 196"
+ depends on $(cc-option,-mtune=z196)
config TUNE_ZEC12
bool "IBM zBC12 and zEC12"
+ depends on $(cc-option,-mtune=zEC12)
config TUNE_Z13
- bool "IBM z13"
+ bool "IBM z13s and z13"
+ depends on $(cc-option,-mtune=z13)
config TUNE_Z14
- bool "IBM z14"
+ bool "IBM z14 ZR1 and z14"
+ depends on $(cc-option,-mtune=z14)
config TUNE_Z15
bool "IBM z15"
+ depends on $(cc-option,-mtune=z15)
endchoice
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 5367950510f6..fbd341ea03b8 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -46,7 +46,7 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = {
.diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma
};
-static struct diag210 _diag210_tmp_dma __section(".dma.data");
+static struct diag210 _diag210_tmp_dma __section(.dma.data);
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void _swsusp_reset_dma(void);
unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 38d64030aacf..2e60c80395ab 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -62,7 +62,6 @@ CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
-CONFIG_REFCOUNT_FULL=y
CONFIG_LOCK_EVENT_COUNTS=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 9803e96d2924..ead0b2c9881d 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -44,7 +44,7 @@ struct s390_aes_ctx {
int key_len;
unsigned long fc;
union {
- struct crypto_sync_skcipher *blk;
+ struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
};
@@ -54,7 +54,7 @@ struct s390_xts_ctx {
u8 pcc_key[32];
int key_len;
unsigned long fc;
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
struct gcm_sg_walk {
@@ -178,66 +178,41 @@ static struct crypto_alg aes_alg = {
}
};
-static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
+static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
-
- crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
- CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
-}
-
-static int fallback_blk_dec(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
-
- skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-
- ret = crypto_skcipher_decrypt(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
+ int ret;
- skcipher_request_zero(req);
+ crypto_skcipher_clear_flags(sctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(sctx->fallback.skcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(sctx->fallback.skcipher) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
-static int fallback_blk_enc(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
+ struct skcipher_request *req,
+ unsigned long modifier)
{
- unsigned int ret;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
-
- skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
- ret = crypto_skcipher_encrypt(req);
- return ret;
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
+ return (modifier & CPACF_DECRYPT) ?
+ crypto_skcipher_decrypt(subreq) :
+ crypto_skcipher_encrypt(subreq);
}
-static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
@@ -248,111 +223,92 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!sctx->fc)
- return setkey_fallback_blk(tfm, in_key, key_len);
+ return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
-static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
- ret = blkcipher_walk_virt(desc, walk);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ if (unlikely(!sctx->fc))
+ return fallback_skcipher_crypt(sctx, req, modifier);
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(sctx->fc | modifier, sctx->key,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
-
return ret;
}
-static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_encrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, 0, &walk);
+ return ecb_aes_crypt(req, 0);
}
-static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_decrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
+ return ecb_aes_crypt(req, CPACF_DECRYPT);
}
-static int fallback_init_blk(struct crypto_tfm *tfm)
+static int fallback_init_skcipher(struct crypto_skcipher *tfm)
{
- const char *name = tfm->__crt_alg->cra_name;
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
- sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
- if (IS_ERR(sctx->fallback.blk)) {
+ if (IS_ERR(sctx->fallback.skcipher)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
- return PTR_ERR(sctx->fallback.blk);
+ return PTR_ERR(sctx->fallback.skcipher);
}
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(sctx->fallback.skcipher));
return 0;
}
-static void fallback_exit_blk(struct crypto_tfm *tfm)
+static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(sctx->fallback.blk);
+ crypto_free_skcipher(sctx->fallback.skcipher);
}
-static struct crypto_alg ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s390",
- .cra_priority = 401, /* combo: aes + ecb + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ecb_aes_set_key,
- .encrypt = ecb_aes_encrypt,
- .decrypt = ecb_aes_decrypt,
- }
- }
+static struct skcipher_alg ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-s390",
+ .base.cra_priority = 401, /* combo: aes + ecb + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = fallback_init_skcipher,
+ .exit = fallback_exit_skcipher,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_set_key,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
};
-static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
@@ -363,17 +319,18 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
if (!sctx->fc)
- return setkey_fallback_blk(tfm, in_key, key_len);
+ return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
@@ -381,134 +338,74 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 key[AES_MAX_KEY_SIZE];
} param;
- ret = blkcipher_walk_virt(desc, walk);
- memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ if (unlikely(!sctx->fc))
+ return fallback_skcipher_crypt(sctx, req, modifier);
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+ memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
memcpy(param.key, sctx->key, sctx->key_len);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_kmc(sctx->fc | modifier, &param,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
- memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
-static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_encrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, 0, &walk);
+ return cbc_aes_crypt(req, 0);
}
-static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_decrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
+ return cbc_aes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s390",
- .cra_priority = 402, /* ecb-aes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_aes_set_key,
- .encrypt = cbc_aes_encrypt,
- .decrypt = cbc_aes_decrypt,
- }
- }
+static struct skcipher_alg cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-s390",
+ .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = fallback_init_skcipher,
+ .exit = fallback_exit_skcipher,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_set_key,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
};
-static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
-{
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
-
- crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
- CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
-}
-
-static int xts_fallback_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
- unsigned int ret;
-
- skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-
- ret = crypto_skcipher_decrypt(req);
-
- skcipher_request_zero(req);
- return ret;
-}
-
-static int xts_fallback_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
- unsigned int ret;
-
- skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-
- ret = crypto_skcipher_encrypt(req);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- skcipher_request_zero(req);
+ crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(xts_ctx->fallback,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(xts_ctx->fallback) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
-static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
int err;
@@ -518,7 +415,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* In fips mode only 128 bit or 256 bit keys are valid */
if (fips_enabled && key_len != 32 && key_len != 64) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -539,10 +436,11 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
-static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int offset, nbytes, n;
int ret;
struct {
@@ -557,113 +455,100 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 init[16];
} xts_param;
- ret = blkcipher_walk_virt(desc, walk);
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, xts_ctx->fallback);
+ return (modifier & CPACF_DECRYPT) ?
+ crypto_skcipher_decrypt(subreq) :
+ crypto_skcipher_encrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
offset = xts_ctx->key_len & 0x10;
memset(pcc_param.block, 0, sizeof(pcc_param.block));
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
- memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
memcpy(xts_param.init, pcc_param.xts, 16);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
-static int xts_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_aes_encrypt(struct skcipher_request *req)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (!nbytes)
- return -EINVAL;
-
- if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
- return xts_fallback_encrypt(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, 0, &walk);
+ return xts_aes_crypt(req, 0);
}
-static int xts_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_aes_decrypt(struct skcipher_request *req)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (!nbytes)
- return -EINVAL;
-
- if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
- return xts_fallback_decrypt(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
+ return xts_aes_crypt(req, CPACF_DECRYPT);
}
-static int xts_fallback_init(struct crypto_tfm *tfm)
+static int xts_fallback_init(struct crypto_skcipher *tfm)
{
- const char *name = tfm->__crt_alg->cra_name;
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
- xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
if (IS_ERR(xts_ctx->fallback)) {
pr_err("Allocating XTS fallback algorithm %s failed\n",
name);
return PTR_ERR(xts_ctx->fallback);
}
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(xts_ctx->fallback));
return 0;
}
-static void xts_fallback_exit(struct crypto_tfm *tfm)
+static void xts_fallback_exit(struct crypto_skcipher *tfm)
{
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(xts_ctx->fallback);
+ crypto_free_skcipher(xts_ctx->fallback);
}
-static struct crypto_alg xts_aes_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-s390",
- .cra_priority = 402, /* ecb-aes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_xts_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = xts_fallback_init,
- .cra_exit = xts_fallback_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_aes_set_key,
- .encrypt = xts_aes_encrypt,
- .decrypt = xts_aes_decrypt,
- }
- }
+static struct skcipher_alg xts_aes_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-s390",
+ .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = xts_fallback_init,
+ .exit = xts_fallback_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_aes_set_key,
+ .encrypt = xts_aes_encrypt,
+ .decrypt = xts_aes_decrypt,
};
-static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
@@ -674,7 +559,7 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
if (!sctx->fc)
- return setkey_fallback_blk(tfm, in_key, key_len);
+ return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
@@ -696,30 +581,34 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
return n;
}
-static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ctr_aes_crypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
+ struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
+ if (unlikely(!sctx->fc))
+ return fallback_skcipher_crypt(sctx, req, 0);
+
locked = mutex_trylock(&ctrblk_lock);
- ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
+
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk->iv, nbytes);
- ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
- cpacf_kmctr(sctx->fc | modifier, sctx->key,
- walk->dst.virt.addr, walk->src.virt.addr,
- n, ctrptr);
+ n = __ctrblk_init(ctrblk, walk.iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
+ cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
+ walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
- memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
+ memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
@@ -727,67 +616,33 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
- cpacf_kmctr(sctx->fc | modifier, sctx->key,
- buf, walk->src.virt.addr,
- AES_BLOCK_SIZE, walk->iv);
- memcpy(walk->dst.virt.addr, buf, nbytes);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, 0);
+ cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
+ AES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
-static int ctr_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, 0, &walk);
-}
-
-static int ctr_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
-}
-
-static struct crypto_alg ctr_aes_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-s390",
- .cra_priority = 402, /* ecb-aes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_aes_set_key,
- .encrypt = ctr_aes_encrypt,
- .decrypt = ctr_aes_decrypt,
- }
- }
+static struct skcipher_alg ctr_aes_alg = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-s390",
+ .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = fallback_init_skcipher,
+ .exit = fallback_exit_skcipher,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_aes_set_key,
+ .encrypt = ctr_aes_crypt,
+ .decrypt = ctr_aes_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1116,24 +971,27 @@ static struct aead_alg gcm_aes_aead = {
},
};
-static struct crypto_alg *aes_s390_algs_ptr[5];
-static int aes_s390_algs_num;
+static struct crypto_alg *aes_s390_alg;
+static struct skcipher_alg *aes_s390_skcipher_algs[4];
+static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
-static int aes_s390_register_alg(struct crypto_alg *alg)
+static int aes_s390_register_skcipher(struct skcipher_alg *alg)
{
int ret;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (!ret)
- aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
+ aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
return ret;
}
static void aes_s390_fini(void)
{
- while (aes_s390_algs_num--)
- crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
+ if (aes_s390_alg)
+ crypto_unregister_alg(aes_s390_alg);
+ while (aes_s390_skciphers_num--)
+ crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
@@ -1154,10 +1012,11 @@ static int __init aes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
- ret = aes_s390_register_alg(&aes_alg);
+ ret = crypto_register_alg(&aes_alg);
if (ret)
goto out_err;
- ret = aes_s390_register_alg(&ecb_aes_alg);
+ aes_s390_alg = &aes_alg;
+ ret = aes_s390_register_skcipher(&ecb_aes_alg);
if (ret)
goto out_err;
}
@@ -1165,14 +1024,14 @@ static int __init aes_s390_init(void)
if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
- ret = aes_s390_register_alg(&cbc_aes_alg);
+ ret = aes_s390_register_skcipher(&cbc_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
- ret = aes_s390_register_alg(&xts_aes_alg);
+ ret = aes_s390_register_skcipher(&xts_aes_alg);
if (ret)
goto out_err;
}
@@ -1185,7 +1044,7 @@ static int __init aes_s390_init(void)
ret = -ENOMEM;
goto out_err;
}
- ret = aes_s390_register_alg(&ctr_aes_alg);
+ ret = aes_s390_register_skcipher(&ctr_aes_alg);
if (ret)
goto out_err;
}
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 439b100c6f2e..bfbafd35bcbd 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
@@ -45,6 +46,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
+static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return des_setkey(crypto_skcipher_tfm(tfm), key, key_len);
+}
+
static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -79,28 +86,30 @@ static struct crypto_alg des_alg = {
}
};
-static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
- struct blkcipher_walk *walk)
+static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
- ret = blkcipher_walk_virt(desc, walk);
- while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
- cpacf_km(fc, ctx->key, walk->dst.virt.addr,
- walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ cpacf_km(fc, ctx->key, walk.dst.virt.addr,
+ walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
-static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
- struct blkcipher_walk *walk)
+static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
@@ -108,99 +117,69 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
u8 key[DES3_KEY_SIZE];
} param;
- ret = blkcipher_walk_virt(desc, walk);
- memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+ memcpy(param.iv, walk.iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
- while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
- cpacf_kmc(fc, &param, walk->dst.virt.addr,
- walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ cpacf_kmc(fc, &param, walk.dst.virt.addr,
+ walk.src.virt.addr, n);
+ memcpy(walk.iv, param.iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
- memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
return ret;
}
-static int ecb_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA, &walk);
+ return ecb_desall_crypt(req, CPACF_KM_DEA);
}
-static int ecb_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA | CPACF_DECRYPT, &walk);
+ return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT);
}
-static struct crypto_alg ecb_des_alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-s390",
- .cra_priority = 400, /* combo: des + ecb */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_setkey,
- .encrypt = ecb_des_encrypt,
- .decrypt = ecb_des_decrypt,
- }
- }
+static struct skcipher_alg ecb_des_alg = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-s390",
+ .base.cra_priority = 400, /* combo: des + ecb */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = ecb_des_encrypt,
+ .decrypt = ecb_des_decrypt,
};
-static int cbc_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA, &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_DEA);
}
-static int cbc_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA | CPACF_DECRYPT, &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT);
}
-static struct crypto_alg cbc_des_alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-s390",
- .cra_priority = 400, /* combo: des + cbc */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey,
- .encrypt = cbc_des_encrypt,
- .decrypt = cbc_des_decrypt,
- }
- }
+static struct skcipher_alg cbc_des_alg = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-s390",
+ .base.cra_priority = 400, /* combo: des + cbc */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = cbc_des_encrypt,
+ .decrypt = cbc_des_decrypt,
};
/*
@@ -232,6 +211,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
+static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len);
+}
+
static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -266,87 +251,53 @@ static struct crypto_alg des3_alg = {
}
};
-static int ecb_des3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des3_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192, &walk);
+ return ecb_desall_crypt(req, CPACF_KM_TDEA_192);
}
-static int ecb_des3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des3_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192 | CPACF_DECRYPT,
- &walk);
+ return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT);
}
-static struct crypto_alg ecb_des3_alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-s390",
- .cra_priority = 400, /* combo: des3 + ecb */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .setkey = des3_setkey,
- .encrypt = ecb_des3_encrypt,
- .decrypt = ecb_des3_decrypt,
- }
- }
+static struct skcipher_alg ecb_des3_alg = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-s390",
+ .base.cra_priority = 400, /* combo: des3 + ecb */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .setkey = des3_setkey_skcipher,
+ .encrypt = ecb_des3_encrypt,
+ .decrypt = ecb_des3_decrypt,
};
-static int cbc_des3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des3_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192, &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_TDEA_192);
}
-static int cbc_des3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des3_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192 | CPACF_DECRYPT,
- &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT);
}
-static struct crypto_alg cbc_des3_alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-s390",
- .cra_priority = 400, /* combo: des3 + cbc */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_setkey,
- .encrypt = cbc_des3_encrypt,
- .decrypt = cbc_des3_decrypt,
- }
- }
+static struct skcipher_alg cbc_des3_alg = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-s390",
+ .base.cra_priority = 400, /* combo: des3 + cbc */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey_skcipher,
+ .encrypt = cbc_des3_encrypt,
+ .decrypt = cbc_des3_decrypt,
};
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
@@ -364,128 +315,90 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
return n;
}
-static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
- struct blkcipher_walk *walk)
+static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[DES_BLOCK_SIZE], *ctrptr;
+ struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
locked = mutex_trylock(&ctrblk_lock);
- ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) {
n = DES_BLOCK_SIZE;
if (nbytes >= 2*DES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk->iv, nbytes);
- ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
- cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
- walk->src.virt.addr, n, ctrptr);
+ n = __ctrblk_init(ctrblk, walk.iv, nbytes);
+ ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv;
+ cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr,
+ walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
- memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
+ memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(walk->iv, DES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ crypto_inc(walk.iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
- cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
- DES_BLOCK_SIZE, walk->iv);
- memcpy(walk->dst.virt.addr, buf, nbytes);
- crypto_inc(walk->iv, DES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, 0);
+ cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr,
+ DES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
-static int ctr_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA, &walk);
-}
-
-static int ctr_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_des_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA | CPACF_DECRYPT, &walk);
+ return ctr_desall_crypt(req, CPACF_KMCTR_DEA);
}
-static struct crypto_alg ctr_des_alg = {
- .cra_name = "ctr(des)",
- .cra_driver_name = "ctr-des-s390",
- .cra_priority = 400, /* combo: des + ctr */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey,
- .encrypt = ctr_des_encrypt,
- .decrypt = ctr_des_decrypt,
- }
- }
+static struct skcipher_alg ctr_des_alg = {
+ .base.cra_name = "ctr(des)",
+ .base.cra_driver_name = "ctr-des-s390",
+ .base.cra_priority = 400, /* combo: des + ctr */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = ctr_des_crypt,
+ .decrypt = ctr_des_crypt,
+ .chunksize = DES_BLOCK_SIZE,
};
-static int ctr_des3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192, &walk);
-}
-
-static int ctr_des3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_des3_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192 | CPACF_DECRYPT,
- &walk);
+ return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192);
}
-static struct crypto_alg ctr_des3_alg = {
- .cra_name = "ctr(des3_ede)",
- .cra_driver_name = "ctr-des3_ede-s390",
- .cra_priority = 400, /* combo: des3 + ede */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_setkey,
- .encrypt = ctr_des3_encrypt,
- .decrypt = ctr_des3_decrypt,
- }
- }
+static struct skcipher_alg ctr_des3_alg = {
+ .base.cra_name = "ctr(des3_ede)",
+ .base.cra_driver_name = "ctr-des3_ede-s390",
+ .base.cra_priority = 400, /* combo: des3 + ede */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey_skcipher,
+ .encrypt = ctr_des3_crypt,
+ .decrypt = ctr_des3_crypt,
+ .chunksize = DES_BLOCK_SIZE,
};
-static struct crypto_alg *des_s390_algs_ptr[8];
+static struct crypto_alg *des_s390_algs_ptr[2];
static int des_s390_algs_num;
+static struct skcipher_alg *des_s390_skciphers_ptr[6];
+static int des_s390_skciphers_num;
static int des_s390_register_alg(struct crypto_alg *alg)
{
@@ -497,10 +410,22 @@ static int des_s390_register_alg(struct crypto_alg *alg)
return ret;
}
+static int des_s390_register_skcipher(struct skcipher_alg *alg)
+{
+ int ret;
+
+ ret = crypto_register_skcipher(alg);
+ if (!ret)
+ des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg;
+ return ret;
+}
+
static void des_s390_exit(void)
{
while (des_s390_algs_num--)
crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
+ while (des_s390_skciphers_num--)
+ crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
}
@@ -518,12 +443,12 @@ static int __init des_s390_init(void)
ret = des_s390_register_alg(&des_alg);
if (ret)
goto out_err;
- ret = des_s390_register_alg(&ecb_des_alg);
+ ret = des_s390_register_skcipher(&ecb_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
- ret = des_s390_register_alg(&cbc_des_alg);
+ ret = des_s390_register_skcipher(&cbc_des_alg);
if (ret)
goto out_err;
}
@@ -531,12 +456,12 @@ static int __init des_s390_init(void)
ret = des_s390_register_alg(&des3_alg);
if (ret)
goto out_err;
- ret = des_s390_register_alg(&ecb_des3_alg);
+ ret = des_s390_register_skcipher(&ecb_des3_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
- ret = des_s390_register_alg(&cbc_des3_alg);
+ ret = des_s390_register_skcipher(&cbc_des3_alg);
if (ret)
goto out_err;
}
@@ -551,12 +476,12 @@ static int __init des_s390_init(void)
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
- ret = des_s390_register_alg(&ctr_des_alg);
+ ret = des_s390_register_skcipher(&ctr_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
- ret = des_s390_register_alg(&ctr_des3_alg);
+ ret = des_s390_register_skcipher(&ctr_des3_alg);
if (ret)
goto out_err;
}
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 6184dceed340..c7119c617b6e 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -21,6 +21,7 @@
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
@@ -123,27 +124,27 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int ecb_paes_init(struct crypto_tfm *tfm)
+static int ecb_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
return 0;
}
-static void ecb_paes_exit(struct crypto_tfm *tfm)
+static void ecb_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
-static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
@@ -151,91 +152,75 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
}
-static int ecb_paes_crypt(struct blkcipher_desc *desc,
- unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
- ret = blkcipher_walk_virt(desc, walk);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
- walk->dst.virt.addr, walk->src.virt.addr, n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
- ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
}
}
return ret;
}
-static int ecb_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_paes_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
+ return ecb_paes_crypt(req, 0);
}
-static int ecb_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_paes_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
+ return ecb_paes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg ecb_paes_alg = {
- .cra_name = "ecb(paes)",
- .cra_driver_name = "ecb-paes-s390",
- .cra_priority = 401, /* combo: aes + ecb + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_paes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
- .cra_init = ecb_paes_init,
- .cra_exit = ecb_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .setkey = ecb_paes_set_key,
- .encrypt = ecb_paes_encrypt,
- .decrypt = ecb_paes_decrypt,
- }
- }
+static struct skcipher_alg ecb_paes_alg = {
+ .base.cra_name = "ecb(paes)",
+ .base.cra_driver_name = "ecb-paes-s390",
+ .base.cra_priority = 401, /* combo: aes + ecb + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
+ .init = ecb_paes_init,
+ .exit = ecb_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .setkey = ecb_paes_set_key,
+ .encrypt = ecb_paes_encrypt,
+ .decrypt = ecb_paes_decrypt,
};
-static int cbc_paes_init(struct crypto_tfm *tfm)
+static int cbc_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
return 0;
}
-static void cbc_paes_exit(struct crypto_tfm *tfm)
+static void cbc_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
@@ -258,11 +243,11 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
@@ -270,16 +255,17 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__cbc_paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
}
-static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
struct {
@@ -287,73 +273,60 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 key[MAXPROTKEYSIZE];
} param;
- ret = blkcipher_walk_virt(desc, walk);
- memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+ memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | modifier, &param,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- if (k)
- ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ if (k) {
+ memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - k);
+ }
if (k < n) {
if (__cbc_paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
}
}
- memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
-static int cbc_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_paes_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_paes_crypt(desc, 0, &walk);
+ return cbc_paes_crypt(req, 0);
}
-static int cbc_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_paes_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
+ return cbc_paes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg cbc_paes_alg = {
- .cra_name = "cbc(paes)",
- .cra_driver_name = "cbc-paes-s390",
- .cra_priority = 402, /* ecb-paes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_paes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
- .cra_init = cbc_paes_init,
- .cra_exit = cbc_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_paes_set_key,
- .encrypt = cbc_paes_encrypt,
- .decrypt = cbc_paes_decrypt,
- }
- }
+static struct skcipher_alg cbc_paes_alg = {
+ .base.cra_name = "cbc(paes)",
+ .base.cra_driver_name = "cbc-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
+ .init = cbc_paes_init,
+ .exit = cbc_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_paes_set_key,
+ .encrypt = cbc_paes_encrypt,
+ .decrypt = cbc_paes_decrypt,
};
-static int xts_paes_init(struct crypto_tfm *tfm)
+static int xts_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb[0].key = NULL;
ctx->kb[1].key = NULL;
@@ -361,9 +334,9 @@ static int xts_paes_init(struct crypto_tfm *tfm)
return 0;
}
-static void xts_paes_exit(struct crypto_tfm *tfm)
+static void xts_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb[0]);
_free_kb_keybuf(&ctx->kb[1]);
@@ -391,11 +364,11 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int xts_key_len)
{
int rc;
- struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len, key_len;
@@ -414,7 +387,7 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__xts_paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -427,13 +400,14 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
AES_KEYSIZE_128 : AES_KEYSIZE_256;
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
- return xts_check_key(tfm, ckey, 2*ckey_len);
+ return xts_verify_key(tfm, ckey, 2*ckey_len);
}
-static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int keylen, offset, nbytes, n, k;
int ret;
struct {
@@ -448,90 +422,76 @@ static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 init[16];
} xts_param;
- ret = blkcipher_walk_virt(desc, walk);
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
retry:
memset(&pcc_param, 0, sizeof(pcc_param));
- memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
memcpy(xts_param.init, pcc_param.xts, 16);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
- walk->dst.virt.addr, walk->src.virt.addr, n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
- ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__xts_paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
goto retry;
}
}
return ret;
}
-static int xts_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_paes_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_paes_crypt(desc, 0, &walk);
+ return xts_paes_crypt(req, 0);
}
-static int xts_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_paes_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
+ return xts_paes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg xts_paes_alg = {
- .cra_name = "xts(paes)",
- .cra_driver_name = "xts-paes-s390",
- .cra_priority = 402, /* ecb-paes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_pxts_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
- .cra_init = xts_paes_init,
- .cra_exit = xts_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * PAES_MIN_KEYSIZE,
- .max_keysize = 2 * PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_paes_set_key,
- .encrypt = xts_paes_encrypt,
- .decrypt = xts_paes_decrypt,
- }
- }
+static struct skcipher_alg xts_paes_alg = {
+ .base.cra_name = "xts(paes)",
+ .base.cra_driver_name = "xts-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
+ .init = xts_paes_init,
+ .exit = xts_paes_exit,
+ .min_keysize = 2 * PAES_MIN_KEYSIZE,
+ .max_keysize = 2 * PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_paes_set_key,
+ .encrypt = xts_paes_encrypt,
+ .decrypt = xts_paes_decrypt,
};
-static int ctr_paes_init(struct crypto_tfm *tfm)
+static int ctr_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
return 0;
}
-static void ctr_paes_exit(struct crypto_tfm *tfm)
+static void ctr_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
@@ -555,11 +515,11 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
@@ -567,7 +527,7 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__ctr_paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
@@ -588,37 +548,37 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
return n;
}
-static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ctr_paes_crypt(struct skcipher_request *req)
{
- struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
+ struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret, locked;
locked = spin_trylock(&ctrblk_lock);
- ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk->iv, nbytes);
- ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
- k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
- walk->dst.virt.addr, walk->src.virt.addr,
- n, ctrptr);
+ n = __ctrblk_init(ctrblk, walk.iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
+ k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr,
+ walk.src.virt.addr, n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
- memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
+ memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
if (k < n) {
if (__ctr_paes_set_key(ctx) != 0) {
if (locked)
spin_unlock(&ctrblk_lock);
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
}
}
}
@@ -629,80 +589,54 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
*/
if (nbytes) {
while (1) {
- if (cpacf_kmctr(ctx->fc | modifier,
- ctx->pk.protkey, buf,
- walk->src.virt.addr, AES_BLOCK_SIZE,
- walk->iv) == AES_BLOCK_SIZE)
+ if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf,
+ walk.src.virt.addr, AES_BLOCK_SIZE,
+ walk.iv) == AES_BLOCK_SIZE)
break;
if (__ctr_paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
}
- memcpy(walk->dst.virt.addr, buf, nbytes);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
-static int ctr_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_paes_crypt(desc, 0, &walk);
-}
-
-static int ctr_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
-}
-
-static struct crypto_alg ctr_paes_alg = {
- .cra_name = "ctr(paes)",
- .cra_driver_name = "ctr-paes-s390",
- .cra_priority = 402, /* ecb-paes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_paes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
- .cra_init = ctr_paes_init,
- .cra_exit = ctr_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_paes_set_key,
- .encrypt = ctr_paes_encrypt,
- .decrypt = ctr_paes_decrypt,
- }
- }
+static struct skcipher_alg ctr_paes_alg = {
+ .base.cra_name = "ctr(paes)",
+ .base.cra_driver_name = "ctr-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
+ .init = ctr_paes_init,
+ .exit = ctr_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_paes_set_key,
+ .encrypt = ctr_paes_crypt,
+ .decrypt = ctr_paes_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
-static inline void __crypto_unregister_alg(struct crypto_alg *alg)
+static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
{
- if (!list_empty(&alg->cra_list))
- crypto_unregister_alg(alg);
+ if (!list_empty(&alg->base.cra_list))
+ crypto_unregister_skcipher(alg);
}
static void paes_s390_fini(void)
{
if (ctrblk)
free_page((unsigned long) ctrblk);
- __crypto_unregister_alg(&ctr_paes_alg);
- __crypto_unregister_alg(&xts_paes_alg);
- __crypto_unregister_alg(&cbc_paes_alg);
- __crypto_unregister_alg(&ecb_paes_alg);
+ __crypto_unregister_skcipher(&ctr_paes_alg);
+ __crypto_unregister_skcipher(&xts_paes_alg);
+ __crypto_unregister_skcipher(&cbc_paes_alg);
+ __crypto_unregister_skcipher(&ecb_paes_alg);
}
static int __init paes_s390_init(void)
@@ -717,7 +651,7 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
- ret = crypto_register_alg(&ecb_paes_alg);
+ ret = crypto_register_skcipher(&ecb_paes_alg);
if (ret)
goto out_err;
}
@@ -725,14 +659,14 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
- ret = crypto_register_alg(&cbc_paes_alg);
+ ret = crypto_register_skcipher(&cbc_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
- ret = crypto_register_alg(&xts_paes_alg);
+ ret = crypto_register_skcipher(&xts_paes_alg);
if (ret)
goto out_err;
}
@@ -740,7 +674,7 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
- ret = crypto_register_alg(&ctr_paes_alg);
+ ret = crypto_register_skcipher(&ctr_paes_alg);
if (ret)
goto out_err;
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index d39e0f079217..686fe7aa192f 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
- unsigned int n, mbl_offset;
+ unsigned int n;
+ int mbl_offset;
n = ctx->count % bsize;
bits = ctx->count * 8;
- mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32);
+ mbl_offset = s390_crypto_shash_parmsize(ctx->func);
if (mbl_offset < 0)
return -EINVAL;
+ mbl_offset = mbl_offset / sizeof(u32);
+
/* set total msg bit length (mbl) in CPACF parmblock */
switch (ctx->func) {
case CPACF_KLMD_SHA_1:
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index c2cf7bcdef9b..1c8a38f762a3 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -139,10 +139,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* without volatile and memory clobber.
*/
#define alternative(oldinstr, altinstr, facility) \
- asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
- asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
altinstr2, facility2) ::: "memory")
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 713fc9735ffb..a2b11ac00f60 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -9,7 +9,7 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define __EMIT_BUG(x) do { \
- asm volatile( \
+ asm_inline volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section .rodata.str,\"aMS\",@progbits,1\n" \
@@ -28,7 +28,7 @@
#else /* CONFIG_DEBUG_BUGVERBOSE */
#define __EMIT_BUG(x) do { \
- asm volatile( \
+ asm_inline volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section __bug_table,\"awM\",@progbits,%1\n" \
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 60f907516335..ed5efbb531c4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
+#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index abe60268335d..02f4c21c57f6 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -392,6 +392,7 @@ struct kvm_vcpu_stat {
u64 diagnose_10;
u64 diagnose_44;
u64 diagnose_9c;
+ u64 diagnose_9c_ignored;
u64 diagnose_258;
u64 diagnose_308;
u64 diagnose_500;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 823578c6b9e2..a4d38092530a 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -177,8 +177,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define ARCH_ZONE_DMA_BITS 31
-
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bccb8f4a63e2..77606c4acd58 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION2_ENTRY_EMPTY);
return (p4d_t *) table;
}
-#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
+
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!mm_p4d_folded(mm))
+ crst_table_free(mm, (unsigned long *) p4d);
+}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
@@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table;
}
-#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (!mm_pud_folded(mm))
+ crst_table_free(mm, (unsigned long *) pud);
+}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
@@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ if (mm_pmd_folded(mm))
+ return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5ff98d76a66c..7b03037a8475 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -266,11 +266,9 @@ static inline int is_module_addr(void *addr)
#endif
#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
-#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
@@ -699,10 +697,8 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
- if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
return 1;
- if (pmd_large(pmd))
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
@@ -710,12 +706,10 @@ static inline int pud_bad(pud_t pud)
{
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
- if (type > _REGION_ENTRY_TYPE_R3)
+ if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
return 1;
if (type < _REGION_ENTRY_TYPE_R3)
return 0;
- if (pud_large(pud))
- return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
}
@@ -758,18 +752,12 @@ static inline int pmd_write(pmd_t pmd)
static inline int pmd_dirty(pmd_t pmd)
{
- int dirty = 1;
- if (pmd_large(pmd))
- dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
- return dirty;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
}
static inline int pmd_young(pmd_t pmd)
{
- int young = 1;
- if (pmd_large(pmd))
- young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
- return young;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
}
static inline int pte_present(pte_t pte)
@@ -1173,8 +1161,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
- if (!MACHINE_HAS_NX)
- pte_val(entry) &= ~_PAGE_NOEXEC;
if (pte_present(entry))
pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm))
@@ -1191,6 +1177,8 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
+ if (!MACHINE_HAS_NX)
+ pte_val(__pte) &= ~_PAGE_NOEXEC;
return pte_mkyoung(__pte);
}
@@ -1297,29 +1285,23 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
- if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
- return pmd;
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd;
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
- pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
- }
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
- _SEGMENT_ENTRY_SOFT_DIRTY;
- if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
- }
+ pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd;
}
@@ -1333,29 +1315,23 @@ static inline pud_t pud_wrprotect(pud_t pud)
static inline pud_t pud_mkwrite(pud_t pud)
{
pud_val(pud) |= _REGION3_ENTRY_WRITE;
- if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
- return pud;
- pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+ if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
return pud;
}
static inline pud_t pud_mkclean(pud_t pud)
{
- if (pud_large(pud)) {
- pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
- pud_val(pud) |= _REGION_ENTRY_PROTECT;
- }
+ pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
return pud;
}
static inline pud_t pud_mkdirty(pud_t pud)
{
- if (pud_large(pud)) {
- pud_val(pud) |= _REGION3_ENTRY_DIRTY |
- _REGION3_ENTRY_SOFT_DIRTY;
- if (pud_val(pud) & _REGION3_ENTRY_WRITE)
- pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
- }
+ pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
+ if (pud_val(pud) & _REGION3_ENTRY_WRITE)
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
return pud;
}
@@ -1379,38 +1355,29 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
- if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
- }
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
- pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
- }
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
- _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
- _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
- pmd_val(pmd) |= massage_pgprot_pmd(newprot);
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
- pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
- pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
- return pmd;
- }
- pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
+ _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
+ _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd;
}
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 51a0e4a2dc96..881fc37c11c6 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -206,7 +206,7 @@ unsigned long get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
-static inline unsigned long current_stack_pointer(void)
+static __always_inline unsigned long current_stack_pointer(void)
{
unsigned long sp;
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index e3f238e8c611..71e3f0146cda 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -276,6 +276,7 @@ struct qdio_outbuf_state {
#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080
#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
+#define CHSC_AC2_SNIFFER_AVAILABLE 0x0008
#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c02bff33f6c7..3a37172d5398 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -85,7 +85,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
- asm volatile(
+ asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
" sth %1,%0\n"
: "=Q" (((unsigned short *) &lp->lock)[1])
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 0ae4bbf7779c..fee40212af11 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -38,7 +38,7 @@ static inline unsigned long get_stack_pointer(struct task_struct *task,
{
if (regs)
return (unsigned long) kernel_stack_pointer(regs);
- if (task == current)
+ if (!task || task == current)
return current_stack_pointer();
return (unsigned long) task->thread.ksp;
}
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 64539c221672..6da8885251d6 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -10,8 +10,9 @@
#ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H
-#include <asm/lowcore.h>
+#include <linux/preempt.h>
#include <linux/time64.h>
+#include <asm/lowcore.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -179,22 +180,24 @@ static inline cycles_t get_cycles(void)
int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);
-unsigned long long monotonic_clock(void);
extern unsigned char tod_clock_base[16] __aligned(8);
/**
* get_clock_monotonic - returns current time in clock rate units
*
- * The caller must ensure that preemption is disabled.
* The clock and tod_clock_base get changed via stop_machine.
- * Therefore preemption must be disabled when calling this
- * function, otherwise the returned value is not guaranteed to
- * be monotonic.
+ * Therefore preemption must be disabled, otherwise the returned
+ * value is not guaranteed to be monotonic.
*/
static inline unsigned long long get_tod_clock_monotonic(void)
{
- return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ unsigned long long tod;
+
+ preempt_disable();
+ tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ preempt_enable();
+ return tod;
}
/**
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 7abe6ae261b4..f304802ecf7b 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
- else if (operand->flags & OPERAND_PCREL)
- ptr += sprintf(ptr, "%lx", (signed int) value
- + addr);
- else if (operand->flags & OPERAND_SIGNED)
+ else if (operand->flags & OPERAND_PCREL) {
+ void *pcrel = (void *)((int)value + addr);
+
+ ptr += sprintf(ptr, "%px", pcrel);
+ } else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
else
ptr += sprintf(ptr, "%u", value);
@@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, "%016lx: ", addr);
+ ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
@@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
opsize = insn_length(*code);
if (opsize > len)
break;
- ptr += sprintf(ptr, "%p: ", code);
+ ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b432d63d0b37..db32a55daaec 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -30,6 +30,7 @@
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
+#include <asm/switch_to.h>
#include "entry.h"
static void __init reset_tod_clock(void)
@@ -238,7 +239,7 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17);
}
- if (test_facility(130)) {
+ if (test_facility(130) && !noexec_disabled) {
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
__ctl_set_bit(0, 20);
}
@@ -260,6 +261,24 @@ static inline void save_vector_registers(void)
#endif
}
+static inline void setup_control_registers(void)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, 0, 0);
+ reg |= CR0_LOW_ADDRESS_PROTECTION;
+ reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
+ reg |= CR0_EXTERNAL_CALL_SUBMASK;
+ __ctl_load(reg, 0, 0);
+}
+
+static inline void setup_access_registers(void)
+{
+ unsigned int acrs[NUM_ACRS] = { 0 };
+
+ restore_access_regs(acrs);
+}
+
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
@@ -268,21 +287,6 @@ static int __init disable_vector_extension(char *str)
}
early_param("novx", disable_vector_extension);
-static int __init noexec_setup(char *str)
-{
- bool enabled;
- int rc;
-
- rc = kstrtobool(str, &enabled);
- if (!rc && !enabled) {
- /* Disable no-execute support */
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
- __ctl_clear_bit(0, 20);
- }
- return rc;
-}
-early_param("noexec", noexec_setup);
-
static int __init cad_setup(char *str)
{
bool enabled;
@@ -332,5 +336,7 @@ void __init startup_init(void)
save_vector_registers();
setup_topology();
sclp_early_detect();
+ setup_control_registers();
+ setup_access_registers();
lockdep_on();
}
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 0d9ee198f4eb..b9e585f528a6 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,8 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
- larl %r0,boot_vdso_data
- stg %r0,__LC_VDSO_PER_CPU
#
# Setup stack
#
@@ -37,19 +35,8 @@ ENTRY(startup_continue)
#ifdef CONFIG_KASAN
brasl %r14,kasan_early_init
#endif
-#
-# Early machine initialization and detection functions.
-#
- brasl %r14,startup_init
-
-# check control registers
- stctg %c0,%c15,0(%r15)
- oi 6(%r15),0x60 # enable sigp emergency & external call
- oi 4(%r15),0x10 # switch on low address proctection
- lctlg %c0,%c15,0(%r15)
-
- lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess
- brasl %r14,start_kernel # go to C code
+ brasl %r14,startup_init # s390 specific early init
+ brasl %r14,start_kernel # common init code
#
# We returned from start_kernel ?!? PANIK
#
@@ -59,4 +46,3 @@ ENTRY(startup_continue)
.align 16
.LPG1:
.Ldw: .quad 0x0002000180000000,0x0000000000000000
-.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 48d48b6187c0..0eb1d1cc53a8 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
-static int __hw_perf_event_init(struct perf_event *event)
+static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
@@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event)
int err = 0;
u64 ev;
- switch (attr->type) {
+ switch (type) {
case PERF_TYPE_RAW:
/* Raw events are used to access counters directly,
* hence do not permit excludes */
@@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
+ unsigned int type = event->attr.type;
int err;
- switch (event->attr.type) {
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- case PERF_TYPE_RAW:
- err = __hw_perf_event_init(event);
- break;
- default:
+ if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
+ err = __hw_perf_event_init(event, type);
+ else if (event->pmu->type == type)
+ /* Registered as unknown PMU */
+ err = __hw_perf_event_init(event, PERF_TYPE_RAW);
+ else
return -ENOENT;
- }
if (unlikely(err) && event->destroy)
event->destroy(event);
@@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void)
return -ENODEV;
cpumf_pmu.attr_groups = cpumf_cf_event_group();
- rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
+ rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
return rc;
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index 2654e348801a..e949ab832ed7 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event)
int err = -ENOENT;
debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d config %#llx "
+ "%s event %p cpu %d config %#llx type:%u "
"sample_type %#llx cf_diag_events %d\n", __func__,
- event, event->cpu, attr->config, attr->sample_type,
- atomic_read(&cf_diag_events));
+ event, event->cpu, attr->config, event->pmu->type,
+ attr->sample_type, atomic_read(&cf_diag_events));
if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
- event->attr.type != PERF_TYPE_RAW)
+ event->attr.type != event->pmu->type)
goto out;
/* Raw events are used to access counters directly,
@@ -693,7 +693,7 @@ static int __init cf_diag_init(void)
}
debug_register_view(cf_diag_dbg, &debug_sprintf_view);
- rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW);
+ rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) {
debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
debug_unregister(cf_diag_dbg);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 3d8b12a9a6ff..69506fdbd9a1 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -156,8 +156,8 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
}
}
- debug_sprintf_event(sfdbg, 5,
- "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt);
+ debug_sprintf_event(sfdbg, 5, "%s freed sdbt %p\n", __func__,
+ sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
@@ -212,10 +212,10 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* the sampling buffer origin.
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
- debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: "
- "sampling buffer is not linked: origin=%p"
- "tail=%p\n",
- (void *) sfb->sdbt, (void *) tail);
+ debug_sprintf_event(sfdbg, 3, "%s: "
+ "sampling buffer is not linked: origin %p"
+ " tail %p\n", __func__,
+ (void *)sfb->sdbt, (void *)tail);
return -EINVAL;
}
@@ -252,7 +252,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->tail = tail;
debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
- " settings: sdbt=%lu sdb=%lu\n",
+ " settings: sdbt %lu sdb %lu\n",
sfb->num_sdbt, sfb->num_sdb);
return rc;
}
@@ -293,11 +293,11 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
if (rc) {
free_sampling_buffer(sfb);
debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
- "realloc_sampling_buffer failed with rc=%i\n", rc);
+ "realloc_sampling_buffer failed with rc %i\n", rc);
} else
debug_sprintf_event(sfdbg, 4,
- "alloc_sampling_buffer: tear=%p dear=%p\n",
- sfb->sdbt, (void *) *sfb->sdbt);
+ "alloc_sampling_buffer: tear %p dear %p\n",
+ sfb->sdbt, (void *)*sfb->sdbt);
return rc;
}
@@ -404,8 +404,8 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
return 0;
debug_sprintf_event(sfdbg, 3,
- "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu"
- " sample_size=%lu cpuhw=%p\n",
+ "%s: rate %lu f %lu sdb %lu/%lu"
+ " sample_size %lu cpuhw %p\n", __func__,
SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
sample_size, cpuhw);
@@ -465,8 +465,8 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num)
sfb_account_allocs(num, hwc);
- debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu"
- " num=%lu\n", OVERFLOW_REG(hwc), ratio, num);
+ debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow %llu ratio %lu"
+ " num %lu\n", OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
@@ -505,11 +505,11 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
if (rc)
debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
- "failed with rc=%i\n", rc);
+ "failed with rc %i\n", rc);
if (sfb_has_pending_allocs(sfb, hwc))
debug_sprintf_event(sfdbg, 5, "sfb: extend: "
- "req=%lu alloc=%lu remaining=%lu\n",
+ "req %lu alloc %lu remaining %lu\n",
num, sfb->num_sdb - num_old,
sfb_pending_allocs(sfb, hwc));
}
@@ -538,20 +538,22 @@ static void setup_pmc_cpu(void *flags)
err = sf_disable();
if (err)
pr_err("Switching off the sampling facility failed "
- "with rc=%i\n", err);
+ "with rc %i\n", err);
debug_sprintf_event(sfdbg, 5,
- "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf);
+ "%s: initialized: cpuhw %p\n", __func__,
+ cpusf);
break;
case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable();
if (err) {
pr_err("Switching off the sampling facility failed "
- "with rc=%i\n", err);
+ "with rc %i\n", err);
} else
deallocate_buffers(cpusf);
debug_sprintf_event(sfdbg, 5,
- "setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
+ "%s: released: cpuhw %p\n", __func__,
+ cpusf);
break;
}
if (err)
@@ -744,7 +746,7 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:"
- "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu,
+ "cpu:%d period:%#llx freq:%d,%#lx\n", event->cpu,
event->attr.sample_period, event->attr.freq,
SAMPLE_FREQ_MODE(hwc));
return 0;
@@ -963,7 +965,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
err = lsctl(&cpuhw->lsctl);
if (err) {
cpuhw->flags &= ~PMU_F_ENABLED;
- pr_err("Loading sampling controls failed: op=%i err=%i\n",
+ pr_err("Loading sampling controls failed: op %i err %i\n",
1, err);
return;
}
@@ -971,8 +973,8 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */
lpp(&S390_lowcore.lpp);
- debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
- "interval:%lx tear=%p dear=%p\n",
+ debug_sprintf_event(sfdbg, 6, "pmu_enable: es %i cs %i ed %i cd %i "
+ "interval %#lx tear %p dear %p\n",
cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
cpuhw->lsctl.cd, cpuhw->lsctl.interval,
(void *) cpuhw->lsctl.tear,
@@ -999,13 +1001,14 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
err = lsctl(&inactive);
if (err) {
- pr_err("Loading sampling controls failed: op=%i err=%i\n",
+ pr_err("Loading sampling controls failed: op %i err %i\n",
2, err);
return;
}
/* Save state of TEAR and DEAR register contents */
- if (!qsi(&si)) {
+ err = qsi(&si);
+ if (!err) {
/* TEAR/DEAR values are valid only if the sampling facility is
* enabled. Note that cpumsf_pmu_disable() might be called even
* for a disabled sampling facility because cpumsf_pmu_enable()
@@ -1017,7 +1020,7 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
}
} else
debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
- "qsi() failed with err=%i\n", err);
+ "qsi() failed with err %i\n", err);
cpuhw->flags &= ~PMU_F_ENABLED;
}
@@ -1130,15 +1133,6 @@ static void perf_event_count_update(struct perf_event *event, u64 count)
local64_add(count, &event->count);
}
-static void debug_sample_entry(struct hws_basic_entry *sample,
- struct hws_trailer_entry *te)
-{
- debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
- "sampling data entry: te->f=%i basic.def=%04x "
- "(%p)\n",
- te->f, sample->def, sample);
-}
-
/* hw_collect_samples() - Walk through a sample-data-block and collect samples
* @event: The perf event
* @sdbt: Sample-data-block table
@@ -1192,7 +1186,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
/* Count discarded samples */
*overflow += 1;
} else {
- debug_sample_entry(sample, te);
+ debug_sprintf_event(sfdbg, 4,
+ "%s: Found unknown"
+ " sampling data entry: te->f %i"
+ " basic.def %#4x (%p)\n", __func__,
+ te->f, sample->def, sample);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@@ -1267,9 +1265,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow += te->overflow;
/* Timestamps are valid for full sample-data-blocks only */
- debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p "
- "overflow=%llu timestamp=%#llx\n",
- sdbt, te->overflow,
+ debug_sprintf_event(sfdbg, 6, "%s: sdbt %p "
+ "overflow %llu timestamp %#llx\n",
+ __func__, sdbt, te->overflow,
(te->f) ? trailer_timestamp(te) : 0ULL);
/* Collect all samples from a single sample-data-block and
@@ -1313,9 +1311,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
if (sampl_overflow || event_overflow)
- debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
- "overflow stats: sample=%llu event=%llu\n",
- sampl_overflow, event_overflow);
+ debug_sprintf_event(sfdbg, 4, "%s: "
+ "overflow stats: sample %llu event %llu\n",
+ __func__, sampl_overflow, event_overflow);
}
#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
@@ -1368,7 +1366,7 @@ static void aux_output_end(struct perf_output_handle *handle)
te = aux_sdb_trailer(aux, aux->alert_mark);
te->flags &= ~SDB_TE_ALERT_REQ_MASK;
- debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i);
+ debug_sprintf_event(sfdbg, 6, "%s: collect %#lx SDBs\n", __func__, i);
}
/*
@@ -1428,8 +1426,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
"head->alert_mark->empty_mark (num_alert, range)"
- "[%lx -> %lx -> %lx] (%lx, %lx) "
- "tear index %lx, tear %lx dear %lx\n",
+ "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) "
+ "tear index %#lx, tear %#lx dear %#lx\n",
aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range,
head / CPUM_SF_SDB_PER_TABLE,
@@ -1596,13 +1594,13 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", num_sdb);
- debug_sprintf_event(sfdbg, 1, "head %lx range %lx "
- "overflow %llx\n",
+ debug_sprintf_event(sfdbg, 1, "head %#lx range %#lx "
+ "overflow %#llx\n",
aux->head, range, overflow);
} else {
size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
- debug_sprintf_event(sfdbg, 6, "head %lx alert %lx "
+ debug_sprintf_event(sfdbg, 6, "head %#lx alert %#lx "
"already full, try another\n",
aux->head, aux->alert_mark);
}
@@ -1610,7 +1608,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
if (done)
debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
- "[%lx -> %lx -> %lx] (%lx, %lx)\n",
+ "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n",
aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range);
}
@@ -1800,7 +1798,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:"
- "cpu:%d value:%llx period:%llx freq:%d\n",
+ "cpu:%d value:%#llx period:%#llx freq:%d\n",
event->cpu, value,
event->attr.sample_period, do_freq);
return 0;
@@ -2111,7 +2109,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
sfb_set_limits(min, max);
pr_info("The sampling buffer limits have changed to: "
- "min=%lu max=%lu (diag=x%lu)\n",
+ "min %lu max %lu (diag %lu)\n",
CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
return 0;
}
@@ -2129,7 +2127,7 @@ static const struct kernel_param_ops param_ops_sfb_size = {
static void __init pr_cpumsf_err(unsigned int reason)
{
pr_err("Sampling facility support for perf is not available: "
- "reason=%04x\n", reason);
+ "reason %#x\n", reason);
}
static int __init init_cpum_sampling_pmu(void)
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index fcb6c2e92b07..1e75cc983546 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct unwind_state state;
+ unsigned long addr;
- unwind_for_each_frame(&state, current, regs, 0)
- perf_callchain_store(entry, state.ip);
+ unwind_for_each_frame(&state, current, regs, 0) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || perf_callchain_store(entry, addr))
+ return;
+ }
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index b0afec673f77..6ccef5f29761 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -40,6 +40,7 @@
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
+#include <asm/unwind.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -178,9 +179,8 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
- struct stack_frame *sf, *low, *high;
- unsigned long return_address;
- int count;
+ struct unwind_state state;
+ unsigned long ip = 0;
if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
return 0;
@@ -188,26 +188,22 @@ unsigned long get_wchan(struct task_struct *p)
if (!try_get_task_stack(p))
return 0;
- low = task_stack_page(p);
- high = (struct stack_frame *) task_pt_regs(p);
- sf = (struct stack_frame *) p->thread.ksp;
- if (sf <= low || sf > high) {
- return_address = 0;
- goto out;
- }
- for (count = 0; count < 16; count++) {
- sf = (struct stack_frame *)READ_ONCE_NOCHECK(sf->back_chain);
- if (sf <= low || sf > high) {
- return_address = 0;
- goto out;
+ unwind_for_each_frame(&state, p, NULL, 0) {
+ if (state.stack_info.type != STACK_TYPE_TASK) {
+ ip = 0;
+ break;
}
- return_address = READ_ONCE_NOCHECK(sf->gprs[8]);
- if (!in_sched_functions(return_address))
- goto out;
+
+ ip = unwind_get_return_address(&state);
+ if (!ip)
+ break;
+
+ if (!in_sched_functions(ip))
+ break;
}
-out:
+
put_task_stack(p);
- return return_address;
+ return ip;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 44974654cbd0..6acdcf1d4074 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -724,39 +724,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
static int smp_add_present_cpu(int cpu);
-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
+static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
+ bool configured, bool early)
{
struct pcpu *pcpu;
- cpumask_t avail;
- int cpu, nr, i, j;
+ int cpu, nr, i;
u16 address;
nr = 0;
- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
- cpu = cpumask_first(&avail);
- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
- if (sclp.has_core_type && info->core[i].type != boot_core_type)
+ if (sclp.has_core_type && core->type != boot_core_type)
+ return nr;
+ cpu = cpumask_first(avail);
+ address = core->core_id << smp_cpu_mt_shift;
+ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
+ if (pcpu_find_address(cpu_present_mask, address + i))
continue;
- address = info->core[i].core_id << smp_cpu_mt_shift;
- for (j = 0; j <= smp_cpu_mtid; j++) {
- if (pcpu_find_address(cpu_present_mask, address + j))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = address + j;
- pcpu->state =
- (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
- if (cpu >= nr_cpu_ids)
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + i;
+ if (configured)
+ pcpu->state = CPU_STATE_CONFIGURED;
+ else
+ pcpu->state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (!early && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpumask_clear_cpu(cpu, avail);
+ cpu = cpumask_next(cpu, avail);
+ }
+ return nr;
+}
+
+static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
+{
+ struct sclp_core_entry *core;
+ cpumask_t avail;
+ bool configured;
+ u16 core_id;
+ int nr, i;
+
+ nr = 0;
+ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
+ /*
+ * Add IPL core first (which got logical CPU number 0) to make sure
+ * that all SMT threads get subsequent logical CPU numbers.
+ */
+ if (early) {
+ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
+ for (i = 0; i < info->configured; i++) {
+ core = &info->core[i];
+ if (core->core_id == core_id) {
+ nr += smp_add_core(core, &avail, true, early);
break;
+ }
}
}
+ for (i = 0; i < info->combined; i++) {
+ configured = i < info->configured;
+ nr += smp_add_core(&info->core[i], &avail, configured, early);
+ }
return nr;
}
@@ -805,7 +833,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */
get_online_cpus();
- __smp_rescan_cpus(info, 0);
+ __smp_rescan_cpus(info, true);
put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info));
}
@@ -1148,7 +1176,7 @@ int __ref smp_rescan_cpus(void)
smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
- nr = __smp_rescan_cpus(info, 1);
+ nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e8766beee5ad..f9d070d016e3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -110,15 +110,6 @@ unsigned long long notrace sched_clock(void)
}
NOKPROBE_SYMBOL(sched_clock);
-/*
- * Monotonic_clock - returns # of nanoseconds passed since time_init()
- */
-unsigned long long monotonic_clock(void)
-{
- return sched_clock();
-}
-EXPORT_SYMBOL(monotonic_clock);
-
static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
{
unsigned long long high, low, rem, sec, nsec;
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
index a8204f952315..fa111d3d378f 100644
--- a/arch/s390/kernel/unwind_bc.c
+++ b/arch/s390/kernel/unwind_bc.c
@@ -85,12 +85,7 @@ bool unwind_next_frame(struct unwind_state *state)
}
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Decode any ftrace redirection */
- if (ip == (unsigned long) return_to_handler)
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
- ip, (void *) sp);
-#endif
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
/* Update unwind state */
state->sp = sp;
@@ -147,12 +142,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
reuse_sp = false;
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Decode any ftrace redirection */
- if (ip == (unsigned long) return_to_handler)
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
- ip, NULL);
-#endif
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
/* Update unwind state */
state->sp = sp;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7e0eb4020917..37695499717d 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -15,6 +15,8 @@
/* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA
+#define EMITS_PT_NOTE
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/vmlinux.lds.h>
@@ -50,11 +52,7 @@ SECTIONS
_etext = .; /* End of text section */
} :text = 0x0700
- NOTES :text :note
-
- .dummy : { *(.dummy) } :data
-
- RO_DATA_SECTION(PAGE_SIZE)
+ RO_DATA(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
_sdata = .; /* Start of data section */
@@ -64,12 +62,12 @@ SECTIONS
.data..ro_after_init : {
*(.data..ro_after_init)
JUMP_TABLE_DATA
- }
+ } :data
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
- RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
BOOT_DATA_PRESERVED
_edata = .; /* End of data section */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c475ca49cfc6..8df10d3c8f6c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -247,9 +247,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
-void vtime_account_system(struct task_struct *tsk)
+void vtime_account_kernel(struct task_struct *tsk)
__attribute__((alias("vtime_account_irq_enter")));
-EXPORT_SYMBOL_GPL(vtime_account_system);
+EXPORT_SYMBOL_GPL(vtime_account_kernel);
/*
* Sorted add to a list. List is linear searched until first bigger
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 45634b3d2e0a..3fb54ec2cf3e 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -158,14 +158,28 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++;
- VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
+ /* yield to self */
if (tid == vcpu->vcpu_id)
- return 0;
+ goto no_yield;
+ /* yield to invalid */
tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
- if (tcpu)
- kvm_vcpu_yield_to(tcpu);
+ if (!tcpu)
+ goto no_yield;
+
+ /* target already running */
+ if (READ_ONCE(tcpu->cpu) >= 0)
+ goto no_yield;
+
+ if (kvm_vcpu_yield_to(tcpu) <= 0)
+ goto no_yield;
+
+ VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
+ return 0;
+no_yield:
+ VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
+ vcpu->stat.diagnose_9c_ignored++;
return 0;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index d1ccc168c071..165dea4c7f19 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1477,8 +1477,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0;
}
-static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
- struct kvm_s390_irq *irq)
+static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -2007,7 +2006,7 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
rc = __inject_sigp_stop(vcpu, irq);
break;
case KVM_S390_RESTART:
- rc = __inject_sigp_restart(vcpu, irq);
+ rc = __inject_sigp_restart(vcpu);
break;
case KVM_S390_INT_CLOCK_COMP:
rc = __inject_ckc(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d047e846e1b9..d9e6bf3d54f0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -155,6 +155,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
+ { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
@@ -453,16 +454,14 @@ static void kvm_s390_cpu_feat_init(void)
int kvm_arch_init(void *opaque)
{
- int rc;
+ int rc = -ENOMEM;
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf)
return -ENOMEM;
- if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
- rc = -ENOMEM;
- goto out_debug_unreg;
- }
+ if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
+ goto out;
kvm_s390_cpu_feat_init();
@@ -470,19 +469,17 @@ int kvm_arch_init(void *opaque)
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) {
pr_err("A FLIC registration call failed with rc=%d\n", rc);
- goto out_debug_unreg;
+ goto out;
}
rc = kvm_s390_gib_init(GAL_ISC);
if (rc)
- goto out_gib_destroy;
+ goto out;
return 0;
-out_gib_destroy:
- kvm_s390_gib_destroy();
-out_debug_unreg:
- debug_unregister(kvm_s390_dbf);
+out:
+ kvm_arch_exit();
return rc;
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 30a7c8c29964..ce1e4bbe53aa 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -74,7 +74,7 @@ static inline int arch_load_niai4(int *lock)
{
int owner;
- asm volatile(
+ asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
@@ -85,7 +85,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
{
int expected = old;
- asm volatile(
+ asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index a124f19f7b3c..f0ce22220565 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -118,6 +118,7 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
+ zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 1864a8bb9622..59ad7997fed1 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size)
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
}
-static int __memcpy_real(void *dest, void *src, size_t count)
+static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
@@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count)
return rc;
}
-static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
- unsigned long count)
+static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
+ unsigned long src,
+ unsigned long count)
{
int irqs_disabled, rc;
unsigned long flags;
if (!count)
return 0;
- flags = __arch_local_irq_stnsm(0xf8UL);
+ flags = arch_local_irq_save();
irqs_disabled = arch_irqs_disabled_flags(flags);
if (!irqs_disabled)
trace_hardirqs_off();
+ __arch_local_irq_stnsm(0xf8); // disable DAT
rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
+ if (flags & PSW_MASK_DAT)
+ __arch_local_irq_stosm(0x04); // enable DAT
if (!irqs_disabled)
trace_hardirqs_on();
__arch_local_irq_ssm(flags);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index ce88211b9c6c..8d2134136290 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -23,6 +23,8 @@
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/bpf.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
#include <asm/facility.h>
@@ -38,10 +40,11 @@ struct bpf_jit {
int size; /* Size of program and literal pool */
int size_prg; /* Size of program */
int prg; /* Current position in program */
- int lit_start; /* Start of literal pool */
- int lit; /* Current position in literal pool */
+ int lit32_start; /* Start of 32-bit literal pool */
+ int lit32; /* Current position in 32-bit literal pool */
+ int lit64_start; /* Start of 64-bit literal pool */
+ int lit64; /* Current position in 64-bit literal pool */
int base_ip; /* Base address for literal pool */
- int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */
int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
@@ -49,14 +52,10 @@ struct bpf_jit {
int labels[1]; /* Labels for local jumps */
};
-#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
-
-#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */
-#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */
-#define SEEN_LITERAL (1 << 2) /* code uses literals */
-#define SEEN_FUNC (1 << 3) /* calls C functions */
-#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */
-#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */
+#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
+#define SEEN_LITERAL BIT(1) /* code uses literals */
+#define SEEN_FUNC BIT(2) /* calls C functions */
+#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
/*
@@ -131,13 +130,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT2(op) \
({ \
if (jit->prg_buf) \
- *(u16 *) (jit->prg_buf + jit->prg) = op; \
+ *(u16 *) (jit->prg_buf + jit->prg) = (op); \
jit->prg += 2; \
})
#define EMIT2(op, b1, b2) \
({ \
- _EMIT2(op | reg(b1, b2)); \
+ _EMIT2((op) | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
@@ -145,20 +144,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT4(op) \
({ \
if (jit->prg_buf) \
- *(u32 *) (jit->prg_buf + jit->prg) = op; \
+ *(u32 *) (jit->prg_buf + jit->prg) = (op); \
jit->prg += 4; \
})
#define EMIT4(op, b1, b2) \
({ \
- _EMIT4(op | reg(b1, b2)); \
+ _EMIT4((op) | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT4_RRF(op, b1, b2, b3) \
({ \
- _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \
+ _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
REG_SET_SEEN(b3); \
@@ -167,13 +166,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT4_DISP(op, disp) \
({ \
unsigned int __disp = (disp) & 0xfff; \
- _EMIT4(op | __disp); \
+ _EMIT4((op) | __disp); \
})
#define EMIT4_DISP(op, b1, b2, disp) \
({ \
- _EMIT4_DISP(op | reg_high(b1) << 16 | \
- reg_high(b2) << 8, disp); \
+ _EMIT4_DISP((op) | reg_high(b1) << 16 | \
+ reg_high(b2) << 8, (disp)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
@@ -181,21 +180,27 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT4_IMM(op, b1, imm) \
({ \
unsigned int __imm = (imm) & 0xffff; \
- _EMIT4(op | reg_high(b1) << 16 | __imm); \
+ _EMIT4((op) | reg_high(b1) << 16 | __imm); \
REG_SET_SEEN(b1); \
})
#define EMIT4_PCREL(op, pcrel) \
({ \
long __pcrel = ((pcrel) >> 1) & 0xffff; \
- _EMIT4(op | __pcrel); \
+ _EMIT4((op) | __pcrel); \
+})
+
+#define EMIT4_PCREL_RIC(op, mask, target) \
+({ \
+ int __rel = ((target) - jit->prg) / 2; \
+ _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
})
#define _EMIT6(op1, op2) \
({ \
if (jit->prg_buf) { \
- *(u32 *) (jit->prg_buf + jit->prg) = op1; \
- *(u16 *) (jit->prg_buf + jit->prg + 4) = op2; \
+ *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
+ *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
} \
jit->prg += 6; \
})
@@ -203,20 +208,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT6_DISP(op1, op2, disp) \
({ \
unsigned int __disp = (disp) & 0xfff; \
- _EMIT6(op1 | __disp, op2); \
+ _EMIT6((op1) | __disp, op2); \
})
#define _EMIT6_DISP_LH(op1, op2, disp) \
({ \
- u32 _disp = (u32) disp; \
+ u32 _disp = (u32) (disp); \
unsigned int __disp_h = _disp & 0xff000; \
unsigned int __disp_l = _disp & 0x00fff; \
- _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \
+ _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
})
#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
({ \
- _EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 | \
+ _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
reg_high(b3) << 8, op2, disp); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
@@ -226,8 +231,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
({ \
int rel = (jit->labels[label] - jit->prg) >> 1; \
- _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \
- op2 | mask << 12); \
+ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
+ (op2) | (mask) << 12); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
@@ -235,68 +240,83 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
({ \
int rel = (jit->labels[label] - jit->prg) >> 1; \
- _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \
- (rel & 0xffff), op2 | (imm & 0xff) << 8); \
+ _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
+ (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
REG_SET_SEEN(b1); \
- BUILD_BUG_ON(((unsigned long) imm) > 0xff); \
+ BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
})
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
/* Branch instruction needs 6 bytes */ \
- int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
- _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \
+ int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT6_PCREL_RILB(op, b, target) \
({ \
- int rel = (target - jit->prg) / 2; \
- _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
+ _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
REG_SET_SEEN(b); \
})
#define EMIT6_PCREL_RIL(op, target) \
({ \
- int rel = (target - jit->prg) / 2; \
- _EMIT6(op | rel >> 16, rel & 0xffff); \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
+ _EMIT6((op) | rel >> 16, rel & 0xffff); \
+})
+
+#define EMIT6_PCREL_RILC(op, mask, target) \
+({ \
+ EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
})
#define _EMIT6_IMM(op, imm) \
({ \
unsigned int __imm = (imm); \
- _EMIT6(op | (__imm >> 16), __imm & 0xffff); \
+ _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
})
#define EMIT6_IMM(op, b1, imm) \
({ \
- _EMIT6_IMM(op | reg_high(b1) << 16, imm); \
+ _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
REG_SET_SEEN(b1); \
})
-#define EMIT_CONST_U32(val) \
+#define _EMIT_CONST_U32(val) \
({ \
unsigned int ret; \
- ret = jit->lit - jit->base_ip; \
- jit->seen |= SEEN_LITERAL; \
+ ret = jit->lit32; \
if (jit->prg_buf) \
- *(u32 *) (jit->prg_buf + jit->lit) = (u32) val; \
- jit->lit += 4; \
+ *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
+ jit->lit32 += 4; \
ret; \
})
-#define EMIT_CONST_U64(val) \
+#define EMIT_CONST_U32(val) \
({ \
- unsigned int ret; \
- ret = jit->lit - jit->base_ip; \
jit->seen |= SEEN_LITERAL; \
+ _EMIT_CONST_U32(val) - jit->base_ip; \
+})
+
+#define _EMIT_CONST_U64(val) \
+({ \
+ unsigned int ret; \
+ ret = jit->lit64; \
if (jit->prg_buf) \
- *(u64 *) (jit->prg_buf + jit->lit) = (u64) val; \
- jit->lit += 8; \
+ *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
+ jit->lit64 += 8; \
ret; \
})
+#define EMIT_CONST_U64(val) \
+({ \
+ jit->seen |= SEEN_LITERAL; \
+ _EMIT_CONST_U64(val) - jit->base_ip; \
+})
+
#define EMIT_ZERO(b1) \
({ \
if (!fp->aux->verifier_zext) { \
@@ -307,6 +327,67 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
})
/*
+ * Return whether this is the first pass. The first pass is special, since we
+ * don't know any sizes yet, and thus must be conservative.
+ */
+static bool is_first_pass(struct bpf_jit *jit)
+{
+ return jit->size == 0;
+}
+
+/*
+ * Return whether this is the code generation pass. The code generation pass is
+ * special, since we should change as little as possible.
+ */
+static bool is_codegen_pass(struct bpf_jit *jit)
+{
+ return jit->prg_buf;
+}
+
+/*
+ * Return whether "rel" can be encoded as a short PC-relative offset
+ */
+static bool is_valid_rel(int rel)
+{
+ return rel >= -65536 && rel <= 65534;
+}
+
+/*
+ * Return whether "off" can be reached using a short PC-relative offset
+ */
+static bool can_use_rel(struct bpf_jit *jit, int off)
+{
+ return is_valid_rel(off - jit->prg);
+}
+
+/*
+ * Return whether given displacement can be encoded using
+ * Long-Displacement Facility
+ */
+static bool is_valid_ldisp(int disp)
+{
+ return disp >= -524288 && disp <= 524287;
+}
+
+/*
+ * Return whether the next 32-bit literal pool entry can be referenced using
+ * Long-Displacement Facility
+ */
+static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
+{
+ return is_valid_ldisp(jit->lit32 - jit->base_ip);
+}
+
+/*
+ * Return whether the next 64-bit literal pool entry can be referenced using
+ * Long-Displacement Facility
+ */
+static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
+{
+ return is_valid_ldisp(jit->lit64 - jit->base_ip);
+}
+
+/*
* Fill whole space with illegal instructions
*/
static void jit_fill_hole(void *area, unsigned int size)
@@ -383,9 +464,18 @@ static int get_end(struct bpf_jit *jit, int start)
*/
static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
{
-
+ const int last = 15, save_restore_size = 6;
int re = 6, rs;
+ if (is_first_pass(jit)) {
+ /*
+ * We don't know yet which registers are used. Reserve space
+ * conservatively.
+ */
+ jit->prg += (last - re + 1) * save_restore_size;
+ return;
+ }
+
do {
rs = get_start(jit, re);
if (!rs)
@@ -396,7 +486,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
else
restore_regs(jit, rs, re, stack_depth);
re++;
- } while (re <= 15);
+ } while (re <= last);
}
/*
@@ -420,21 +510,28 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
/* Save registers */
save_restore_regs(jit, REGS_SAVE, stack_depth);
/* Setup literal pool */
- if (jit->seen & SEEN_LITERAL) {
- /* basr %r13,0 */
- EMIT2(0x0d00, REG_L, REG_0);
- jit->base_ip = jit->prg;
+ if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
+ if (!is_first_pass(jit) &&
+ is_valid_ldisp(jit->size - (jit->prg + 2))) {
+ /* basr %l,0 */
+ EMIT2(0x0d00, REG_L, REG_0);
+ jit->base_ip = jit->prg;
+ } else {
+ /* larl %l,lit32_start */
+ EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
+ jit->base_ip = jit->lit32_start;
+ }
}
/* Setup stack and backchain */
- if (jit->seen & SEEN_STACK) {
- if (jit->seen & SEEN_FUNC)
+ if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
+ if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
/* lgr %w1,%r15 (backchain) */
EMIT4(0xb9040000, REG_W1, REG_15);
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
- if (jit->seen & SEEN_FUNC)
+ if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
/* stg %w1,152(%r15) (backchain) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
@@ -446,12 +543,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
*/
static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
{
- /* Return 0 */
- if (jit->seen & SEEN_RET0) {
- jit->ret0_ip = jit->prg;
- /* lghi %b0,0 */
- EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
- }
jit->exit_ip = jit->prg;
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
@@ -476,7 +567,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
_EMIT2(0x07fe);
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
- (jit->seen & SEEN_FUNC)) {
+ (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
if (test_facility(35)) {
@@ -506,16 +597,14 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
int i, bool extra_pass)
{
struct bpf_insn *insn = &fp->insnsi[i];
- int jmp_off, last, insn_count = 1;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
+ int last, insn_count = 1;
u32 *addrs = jit->addrs;
s32 imm = insn->imm;
s16 off = insn->off;
unsigned int mask;
- if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
- jit->seen |= SEEN_REG_AX;
switch (insn->code) {
/*
* BPF_MOV
@@ -549,9 +638,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
u64 imm64;
imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
- /* lg %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm64));
+ /* lgrl %dst,imm */
+ EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
insn_count = 2;
break;
}
@@ -680,9 +768,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7080000, REG_W0, 0);
/* lr %w1,%dst */
EMIT2(0x1800, REG_W1, dst_reg);
- /* dl %w0,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
- EMIT_CONST_U32(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
+ /* dl %w0,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
+ EMIT_CONST_U32(imm));
+ } else {
+ /* lgfrl %dst,imm */
+ EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
+ _EMIT_CONST_U32(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* dlr %w0,%dst */
+ EMIT4(0xb9970000, REG_W0, dst_reg);
+ }
/* llgfr %dst,%rc */
EMIT4(0xb9160000, dst_reg, rc_reg);
if (insn_is_zext(&insn[1]))
@@ -704,9 +801,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
EMIT4(0xb9040000, REG_W1, dst_reg);
- /* dlg %w0,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* dlg %w0,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %dst,imm */
+ EMIT6_PCREL_RILB(0xc4080000, dst_reg,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* dlgr %w0,%dst */
+ EMIT4(0xb9870000, REG_W0, dst_reg);
+ }
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
@@ -729,9 +835,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
- /* ng %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* ng %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0080,
+ dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %w0,imm */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W0,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* ngr %dst,%w0 */
+ EMIT4(0xb9800000, dst_reg, REG_W0);
+ }
break;
/*
* BPF_OR
@@ -751,9 +867,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
- /* og %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* og %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0081,
+ dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %w0,imm */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W0,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* ogr %dst,%w0 */
+ EMIT4(0xb9810000, dst_reg, REG_W0);
+ }
break;
/*
* BPF_XOR
@@ -775,9 +901,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
- /* xg %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* xg %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0082,
+ dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %w0,imm */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W0,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* xgr %dst,%w0 */
+ EMIT4(0xb9820000, dst_reg, REG_W0);
+ }
break;
/*
* BPF_LSH
@@ -1023,9 +1159,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
- /* lg %w1,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
- EMIT_CONST_U64(func));
+ /* lgrl %w1,func */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
@@ -1054,9 +1189,17 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* llgf %w1,map.max_entries(%b2) */
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
- /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
- EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
- REG_W1, 0, 0xa);
+ /* if ((u32)%b3 >= (u32)%w1) goto out; */
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+ /* clrj %b3,%w1,0xa,label0 */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
+ REG_W1, 0, 0xa);
+ } else {
+ /* clr %b3,%w1 */
+ EMIT2(0x1500, BPF_REG_3, REG_W1);
+ /* brcl 0xa,label0 */
+ EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
+ }
/*
* if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
@@ -1071,9 +1214,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7080000, REG_W0, 1);
/* laal %w1,%w0,off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
- /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
- MAX_TAIL_CALL_CNT, 0, 0x2);
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
+ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
+ MAX_TAIL_CALL_CNT, 0, 0x2);
+ } else {
+ /* clfi %w1,MAX_TAIL_CALL_CNT */
+ EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
+ /* brcl 0x2,label0 */
+ EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
+ }
/*
* prog = array->ptrs[index];
@@ -1085,11 +1235,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9160000, REG_1, BPF_REG_3);
/* sllg %r1,%r1,3: %r1 *= 8 */
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
- /* lg %r1,prog(%b2,%r1) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
+ /* ltg %r1,prog(%b2,%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
- /* clgij %r1,0,0x8,label0 */
- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+ /* brc 0x8,label0 */
+ EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
+ } else {
+ /* brcl 0x8,label0 */
+ EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
+ }
/*
* Restore registers before calling function
@@ -1110,7 +1265,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
break;
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
- if (last && !(jit->seen & SEEN_RET0))
+ if (last)
break;
/* j <exit> */
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
@@ -1246,36 +1401,83 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
goto branch_oc;
branch_ks:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* lgfi %w1,imm (load sign extend imm) */
- EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* crj or cgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
- dst_reg, REG_W1, i, off, mask);
+ /* cfi or cgfi %dst,imm */
+ EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
+ dst_reg, imm);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* brc mask,off */
+ EMIT4_PCREL_RIC(0xa7040000,
+ mask >> 12, addrs[i + off + 1]);
+ } else {
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_ku:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* lgfi %w1,imm (load sign extend imm) */
- EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* clrj or clgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
- dst_reg, REG_W1, i, off, mask);
+ /* clfi or clgfi %dst,imm */
+ EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000,
+ dst_reg, imm);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* brc mask,off */
+ EMIT4_PCREL_RIC(0xa7040000,
+ mask >> 12, addrs[i + off + 1]);
+ } else {
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_xs:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* crj or cgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
- dst_reg, src_reg, i, off, mask);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* crj or cgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
+ dst_reg, src_reg, i, off, mask);
+ } else {
+ /* cr or cgr %dst,%src */
+ if (is_jmp32)
+ EMIT2(0x1900, dst_reg, src_reg);
+ else
+ EMIT4(0xb9200000, dst_reg, src_reg);
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_xu:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* clrj or clgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
- dst_reg, src_reg, i, off, mask);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* clrj or clgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
+ dst_reg, src_reg, i, off, mask);
+ } else {
+ /* clr or clgr %dst,%src */
+ if (is_jmp32)
+ EMIT2(0x1500, dst_reg, src_reg);
+ else
+ EMIT4(0xb9210000, dst_reg, src_reg);
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_oc:
- /* brc mask,jmp_off (branch instruction needs 4 bytes) */
- jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
- EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* brc mask,off */
+ EMIT4_PCREL_RIC(0xa7040000,
+ mask >> 12, addrs[i + off + 1]);
+ } else {
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
}
default: /* too complex, give up */
@@ -1286,28 +1488,67 @@ branch_oc:
}
/*
+ * Return whether new i-th instruction address does not violate any invariant
+ */
+static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
+{
+ /* On the first pass anything goes */
+ if (is_first_pass(jit))
+ return true;
+
+ /* The codegen pass must not change anything */
+ if (is_codegen_pass(jit))
+ return jit->addrs[i] == jit->prg;
+
+ /* Passes in between must not increase code size */
+ return jit->addrs[i] >= jit->prg;
+}
+
+/*
+ * Update the address of i-th instruction
+ */
+static int bpf_set_addr(struct bpf_jit *jit, int i)
+{
+ if (!bpf_is_new_addr_sane(jit, i))
+ return -1;
+ jit->addrs[i] = jit->prg;
+ return 0;
+}
+
+/*
* Compile eBPF program into s390x code
*/
static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
bool extra_pass)
{
- int i, insn_count;
+ int i, insn_count, lit32_size, lit64_size;
- jit->lit = jit->lit_start;
+ jit->lit32 = jit->lit32_start;
+ jit->lit64 = jit->lit64_start;
jit->prg = 0;
bpf_jit_prologue(jit, fp->aux->stack_depth);
+ if (bpf_set_addr(jit, 0) < 0)
+ return -1;
for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i, extra_pass);
if (insn_count < 0)
return -1;
/* Next instruction address */
- jit->addrs[i + insn_count] = jit->prg;
+ if (bpf_set_addr(jit, i + insn_count) < 0)
+ return -1;
}
bpf_jit_epilogue(jit, fp->aux->stack_depth);
- jit->lit_start = jit->prg;
- jit->size = jit->lit;
+ lit32_size = jit->lit32 - jit->lit32_start;
+ lit64_size = jit->lit64 - jit->lit64_start;
+ jit->lit32_start = jit->prg;
+ if (lit32_size)
+ jit->lit32_start = ALIGN(jit->lit32_start, 4);
+ jit->lit64_start = jit->lit32_start + lit32_size;
+ if (lit64_size)
+ jit->lit64_start = ALIGN(jit->lit64_start, 8);
+ jit->size = jit->lit64_start + lit64_size;
jit->size_prg = jit->prg;
return 0;
}
@@ -1369,7 +1610,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
}
memset(&jit, 0, sizeof(jit));
- jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
+ jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) {
fp = orig_fp;
goto out;
@@ -1388,12 +1629,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
/*
* Final pass: Allocate and generate program
*/
- if (jit.size >= BPF_SIZE_MAX) {
- fp = orig_fp;
- goto free_addrs;
- }
-
- header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
+ header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 8, jit_fill_hole);
if (!header) {
fp = orig_fp;
goto free_addrs;
@@ -1422,7 +1658,7 @@ skip_init_ctx:
if (!fp->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
free_addrs:
- kfree(jit.addrs);
+ kvfree(jit.addrs);
kfree(jit_data);
fp->aux->jit_data = NULL;
}
diff --git a/arch/sh/boards/mach-sdk7786/nmi.c b/arch/sh/boards/mach-sdk7786/nmi.c
index c2e09d798537..afba49679a12 100644
--- a/arch/sh/boards/mach-sdk7786/nmi.c
+++ b/arch/sh/boards/mach-sdk7786/nmi.c
@@ -37,7 +37,7 @@ static int __init nmi_mode_setup(char *str)
nmi_mode = NMI_MODE_ANY;
else {
nmi_mode = NMI_MODE_UNKNOWN;
- pr_warning("Unknown NMI mode %s\n", str);
+ pr_warn("Unknown NMI mode %s\n", str);
}
printk("Set NMI mode to %d\n", nmi_mode);
diff --git a/arch/sh/drivers/pci/fixups-sdk7786.c b/arch/sh/drivers/pci/fixups-sdk7786.c
index 8cbfa5310a4b..6972af7b4e93 100644
--- a/arch/sh/drivers/pci/fixups-sdk7786.c
+++ b/arch/sh/drivers/pci/fixups-sdk7786.c
@@ -53,7 +53,7 @@ static int __init sdk7786_pci_init(void)
/* Warn about forced rerouting if slot#3 is occupied */
if ((data & PCIECR_PRST3) == 0) {
- pr_warning("Unreachable card detected in slot#3\n");
+ pr_warn("Unreachable card detected in slot#3\n");
return -EBUSY;
}
} else
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h
index 96f0246ad2f2..82b63208135a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7734.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h
@@ -134,7 +134,7 @@ enum {
GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
- GPIO_FN_RD_WR, GPIO_FN_TCLK0,
+ GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
GPIO_FN_ET0_ETXD3_A,
GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index bacad6da4fe4..60c828a2b8a2 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -99,7 +99,7 @@ int register_trapped_io(struct trapped_io *tiop)
return 0;
bad:
- pr_warning("unable to install trapped io filter\n");
+ pr_warn("unable to install trapped io filter\n");
return -1;
}
EXPORT_SYMBOL_GPL(register_trapped_io);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 2c0e0f37a318..6ef341f6cfee 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -354,7 +354,7 @@ void __init setup_arch(char **cmdline_p)
/* processor boot mode configuration */
int generic_mode_pins(void)
{
- pr_warning("generic_mode_pins(): missing mode pin configuration\n");
+ pr_warn("generic_mode_pins(): missing mode pin configuration\n");
return 0;
}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 77a59d8c6b4d..c60b19958c35 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -48,11 +48,10 @@ SECTIONS
} = 0x0009
EXCEPTION_TABLE(16)
- NOTES
_sdata = .;
RO_DATA(PAGE_SIZE)
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
DWARF_EH_FRAME
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 792f36129062..3169a343a5ab 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -43,8 +43,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
r = pdev->resource + pdev->num_resources - 1;
if (r->flags) {
- pr_warning("%s: unable to find empty space for resource\n",
- name);
+ pr_warn("%s: unable to find empty space for resource\n", name);
return -EINVAL;
}
@@ -54,7 +53,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL);
if (!buf) {
- pr_warning("%s: unable to allocate memory\n", name);
+ pr_warn("%s: unable to allocate memory\n", name);
return -ENOMEM;
}
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 7b946b3dee9d..0f5a501c95a9 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
#include <asm/pstate.h>
@@ -197,6 +198,12 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
+static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -211,131 +218,108 @@ static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
}
-#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
-
-static int ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & AES_BLOCK_MASK;
-
- if (likely(block_len)) {
- ctx->ops->ecb_encrypt(&ctx->key[0],
- (const u64 *)walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len);
- }
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ ctx->ops->ecb_encrypt(&ctx->key[0], walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE));
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- u64 *key_end;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u64 *key_end;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & AES_BLOCK_MASK;
-
- if (likely(block_len)) {
- ctx->ops->ecb_decrypt(key_end,
- (const u64 *) walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr, block_len);
- }
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ ctx->ops->ecb_decrypt(key_end, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE));
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & AES_BLOCK_MASK;
-
- if (likely(block_len)) {
- ctx->ops->cbc_encrypt(&ctx->key[0],
- (const u64 *)walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len, (u64 *) walk.iv);
- }
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ ctx->ops->cbc_encrypt(&ctx->key[0], walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- u64 *key_end;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u64 *key_end;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & AES_BLOCK_MASK;
-
- if (likely(block_len)) {
- ctx->ops->cbc_decrypt(key_end,
- (const u64 *) walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len, (u64 *) walk.iv);
- }
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ ctx->ops->cbc_decrypt(key_end, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
- struct blkcipher_walk *walk)
+static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx,
+ struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
@@ -349,40 +333,35 @@ static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
-static int ctr_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- unsigned int block_len = nbytes & AES_BLOCK_MASK;
-
- if (likely(block_len)) {
- ctx->ops->ctr_crypt(&ctx->key[0],
- (const u64 *)walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len, (u64 *) walk.iv);
- }
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ ctx->ops->ctr_crypt(&ctx->key[0], walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
if (walk.nbytes) {
ctr_crypt_final(ctx, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
fprs_write(0);
return err;
}
-static struct crypto_alg algs[] = { {
+static struct crypto_alg cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
@@ -400,66 +379,53 @@ static struct crypto_alg algs[] = { {
.cia_decrypt = crypto_aes_decrypt
}
}
-}, {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ .chunksize = AES_BLOCK_SIZE,
+ }
+};
static bool __init sparc64_has_aes_opcode(void)
{
@@ -477,17 +443,27 @@ static bool __init sparc64_has_aes_opcode(void)
static int __init aes_sparc64_mod_init(void)
{
- if (sparc64_has_aes_opcode()) {
- pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
- return crypto_register_algs(algs, ARRAY_SIZE(algs));
+ int err;
+
+ if (!sparc64_has_aes_opcode()) {
+ pr_info("sparc64 aes opcodes not available.\n");
+ return -ENODEV;
}
- pr_info("sparc64 aes opcodes not available.\n");
- return -ENODEV;
+ pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
+ err = crypto_register_alg(&cipher_alg);
+ if (err)
+ return err;
+ err = crypto_register_skciphers(skcipher_algs,
+ ARRAY_SIZE(skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&cipher_alg);
+ return err;
}
static void __exit aes_sparc64_mod_fini(void)
{
- crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+ crypto_unregister_alg(&cipher_alg);
+ crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
}
module_init(aes_sparc64_mod_init);
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 3823f9491a72..1700f863748c 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
#include <asm/pstate.h>
@@ -52,6 +53,12 @@ static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
return 0;
}
+static int camellia_set_key_skcipher(struct crypto_skcipher *tfm,
+ const u8 *in_key, unsigned int key_len)
+{
+ return camellia_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
extern void camellia_sparc64_crypt(const u64 *key, const u32 *input,
u32 *output, unsigned int key_len);
@@ -81,61 +88,46 @@ typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len,
extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds;
extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds;
-#define CAMELLIA_BLOCK_MASK (~(CAMELLIA_BLOCK_SIZE - 1))
-
-static int __ecb_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes, bool encrypt)
+static int __ecb_crypt(struct skcipher_request *req, bool encrypt)
{
- struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
ecb_crypt_op *op;
const u64 *key;
+ unsigned int nbytes;
int err;
op = camellia_sparc64_ecb_crypt_3_grand_rounds;
if (ctx->key_len != 16)
op = camellia_sparc64_ecb_crypt_4_grand_rounds;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
if (encrypt)
key = &ctx->encrypt_key[0];
else
key = &ctx->decrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
-
- if (likely(block_len)) {
- const u64 *src64;
- u64 *dst64;
-
- src64 = (const u64 *)walk.src.virt.addr;
- dst64 = (u64 *) walk.dst.virt.addr;
- op(src64, dst64, block_len, key);
- }
- nbytes &= CAMELLIA_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ op(walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, CAMELLIA_BLOCK_SIZE), key);
+ err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return __ecb_crypt(desc, dst, src, nbytes, true);
+ return __ecb_crypt(req, true);
}
-static int ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return __ecb_crypt(desc, dst, src, nbytes, false);
+ return __ecb_crypt(req, false);
}
typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len,
@@ -146,85 +138,65 @@ extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds;
extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds;
extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds;
-static int cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
cbc_crypt_op *op;
const u64 *key;
+ unsigned int nbytes;
int err;
op = camellia_sparc64_cbc_encrypt_3_grand_rounds;
if (ctx->key_len != 16)
op = camellia_sparc64_cbc_encrypt_4_grand_rounds;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
key = &ctx->encrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
-
- if (likely(block_len)) {
- const u64 *src64;
- u64 *dst64;
-
- src64 = (const u64 *)walk.src.virt.addr;
- dst64 = (u64 *) walk.dst.virt.addr;
- op(src64, dst64, block_len, key,
- (u64 *) walk.iv);
- }
- nbytes &= CAMELLIA_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ op(walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
cbc_crypt_op *op;
const u64 *key;
+ unsigned int nbytes;
int err;
op = camellia_sparc64_cbc_decrypt_3_grand_rounds;
if (ctx->key_len != 16)
op = camellia_sparc64_cbc_decrypt_4_grand_rounds;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
key = &ctx->decrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
-
- if (likely(block_len)) {
- const u64 *src64;
- u64 *dst64;
-
- src64 = (const u64 *)walk.src.virt.addr;
- dst64 = (u64 *) walk.dst.virt.addr;
- op(src64, dst64, block_len, key,
- (u64 *) walk.iv);
- }
- nbytes &= CAMELLIA_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ op(walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static struct crypto_alg algs[] = { {
+static struct crypto_alg cipher_alg = {
.cra_name = "camellia",
.cra_driver_name = "camellia-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
@@ -242,46 +214,37 @@ static struct crypto_alg algs[] = { {
.cia_decrypt = camellia_decrypt
}
}
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_set_key,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_set_key,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}
+};
+
+static struct skcipher_alg skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_set_key_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_set_key_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }
};
static bool __init sparc64_has_camellia_opcode(void)
@@ -300,17 +263,27 @@ static bool __init sparc64_has_camellia_opcode(void)
static int __init camellia_sparc64_mod_init(void)
{
- if (sparc64_has_camellia_opcode()) {
- pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
- return crypto_register_algs(algs, ARRAY_SIZE(algs));
+ int err;
+
+ if (!sparc64_has_camellia_opcode()) {
+ pr_info("sparc64 camellia opcodes not available.\n");
+ return -ENODEV;
}
- pr_info("sparc64 camellia opcodes not available.\n");
- return -ENODEV;
+ pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
+ err = crypto_register_alg(&cipher_alg);
+ if (err)
+ return err;
+ err = crypto_register_skciphers(skcipher_algs,
+ ARRAY_SIZE(skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&cipher_alg);
+ return err;
}
static void __exit camellia_sparc64_mod_fini(void)
{
- crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+ crypto_unregister_alg(&cipher_alg);
+ crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
}
module_init(camellia_sparc64_mod_init);
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index db6010b4e52e..a499102bf706 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <asm/fpumacro.h>
#include <asm/pstate.h>
@@ -61,6 +62,12 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
+static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return des_set_key(crypto_skcipher_tfm(tfm), key, keylen);
+}
+
extern void des_sparc64_crypt(const u64 *key, const u64 *input,
u64 *output);
@@ -85,113 +92,90 @@ extern void des_sparc64_load_keys(const u64 *key);
extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output,
unsigned int len);
-#define DES_BLOCK_MASK (~(DES_BLOCK_SIZE - 1))
-
-static int __ecb_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes, bool encrypt)
+static int __ecb_crypt(struct skcipher_request *req, bool encrypt)
{
- struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
if (encrypt)
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
else
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & DES_BLOCK_MASK;
-
- if (likely(block_len)) {
- des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len);
- }
- nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, DES_BLOCK_SIZE));
+ err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return __ecb_crypt(desc, dst, src, nbytes, true);
+ return __ecb_crypt(req, true);
}
-static int ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return __ecb_crypt(desc, dst, src, nbytes, false);
+ return __ecb_crypt(req, false);
}
extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output,
unsigned int len, u64 *iv);
-static int cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+
+static int __cbc_crypt(struct skcipher_request *req, bool encrypt)
{
- struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & DES_BLOCK_MASK;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
- if (likely(block_len)) {
- des_sparc64_cbc_encrypt((const u64 *)walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len, (u64 *) walk.iv);
- }
- nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ if (encrypt)
+ des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
+ else
+ des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
+ while ((nbytes = walk.nbytes) != 0) {
+ if (encrypt)
+ des_sparc64_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes,
+ DES_BLOCK_SIZE),
+ walk.iv);
+ else
+ des_sparc64_cbc_decrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes,
+ DES_BLOCK_SIZE),
+ walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
- unsigned int len, u64 *iv);
-
-static int cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & DES_BLOCK_MASK;
+ return __cbc_crypt(req, true);
+}
- if (likely(block_len)) {
- des_sparc64_cbc_decrypt((const u64 *)walk.src.virt.addr,
- (u64 *) walk.dst.virt.addr,
- block_len, (u64 *) walk.iv);
- }
- nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- fprs_write(0);
- return err;
+static int cbc_decrypt(struct skcipher_request *req)
+{
+ return __cbc_crypt(req, false);
}
static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
@@ -227,6 +211,12 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
+static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen);
+}
+
extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input,
u64 *output);
@@ -251,241 +241,196 @@ extern void des3_ede_sparc64_load_keys(const u64 *key);
extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input,
u64 *output, unsigned int len);
-static int __ecb3_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes, bool encrypt)
+static int __ecb3_crypt(struct skcipher_request *req, bool encrypt)
{
- struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
const u64 *K;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
if (encrypt)
K = &ctx->encrypt_expkey[0];
else
K = &ctx->decrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & DES_BLOCK_MASK;
-
- if (likely(block_len)) {
- const u64 *src64 = (const u64 *)walk.src.virt.addr;
- des3_ede_sparc64_ecb_crypt(K, src64,
- (u64 *) walk.dst.virt.addr,
- block_len);
- }
- nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, DES_BLOCK_SIZE));
+ err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static int ecb3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb3_encrypt(struct skcipher_request *req)
{
- return __ecb3_crypt(desc, dst, src, nbytes, true);
+ return __ecb3_crypt(req, true);
}
-static int ecb3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb3_decrypt(struct skcipher_request *req)
{
- return __ecb3_crypt(desc, dst, src, nbytes, false);
+ return __ecb3_crypt(req, false);
}
extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
-static int cbc3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- const u64 *K;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- K = &ctx->encrypt_expkey[0];
- des3_ede_sparc64_load_keys(K);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & DES_BLOCK_MASK;
-
- if (likely(block_len)) {
- const u64 *src64 = (const u64 *)walk.src.virt.addr;
- des3_ede_sparc64_cbc_encrypt(K, src64,
- (u64 *) walk.dst.virt.addr,
- block_len,
- (u64 *) walk.iv);
- }
- nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- fprs_write(0);
- return err;
-}
-
extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input,
u64 *output, unsigned int len,
u64 *iv);
-static int cbc3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int __cbc3_crypt(struct skcipher_request *req, bool encrypt)
{
- struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
const u64 *K;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
- K = &ctx->decrypt_expkey[0];
+ if (encrypt)
+ K = &ctx->encrypt_expkey[0];
+ else
+ K = &ctx->decrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
- while ((nbytes = walk.nbytes)) {
- unsigned int block_len = nbytes & DES_BLOCK_MASK;
-
- if (likely(block_len)) {
- const u64 *src64 = (const u64 *)walk.src.virt.addr;
- des3_ede_sparc64_cbc_decrypt(K, src64,
- (u64 *) walk.dst.virt.addr,
- block_len,
- (u64 *) walk.iv);
- }
- nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = walk.nbytes) != 0) {
+ if (encrypt)
+ des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes,
+ DES_BLOCK_SIZE),
+ walk.iv);
+ else
+ des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes,
+ DES_BLOCK_SIZE),
+ walk.iv);
+ err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
}
fprs_write(0);
return err;
}
-static struct crypto_alg algs[] = { {
- .cra_name = "des",
- .cra_driver_name = "des-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES_KEY_SIZE,
- .cia_max_keysize = DES_KEY_SIZE,
- .cia_setkey = des_set_key,
- .cia_encrypt = sparc_des_encrypt,
- .cia_decrypt = sparc_des_decrypt
+static int cbc3_encrypt(struct skcipher_request *req)
+{
+ return __cbc3_crypt(req, true);
+}
+
+static int cbc3_decrypt(struct skcipher_request *req)
+{
+ return __cbc3_crypt(req, false);
+}
+
+static struct crypto_alg cipher_algs[] = {
+ {
+ .cra_name = "des",
+ .cra_driver_name = "des-sparc64",
+ .cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct des_sparc64_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES_KEY_SIZE,
+ .cia_max_keysize = DES_KEY_SIZE,
+ .cia_setkey = des_set_key,
+ .cia_encrypt = sparc_des_encrypt,
+ .cia_decrypt = sparc_des_decrypt
+ }
}
- }
-}, {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_set_key,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_set_key,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES3_EDE_KEY_SIZE,
- .cia_max_keysize = DES3_EDE_KEY_SIZE,
- .cia_setkey = des3_ede_set_key,
- .cia_encrypt = sparc_des3_ede_encrypt,
- .cia_decrypt = sparc_des3_ede_decrypt
+ }, {
+ .cra_name = "des3_ede",
+ .cra_driver_name = "des3_ede-sparc64",
+ .cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES3_EDE_KEY_SIZE,
+ .cia_max_keysize = DES3_EDE_KEY_SIZE,
+ .cia_setkey = des3_ede_set_key,
+ .cia_encrypt = sparc_des3_ede_encrypt,
+ .cia_decrypt = sparc_des3_ede_decrypt
+ }
}
}
-}, {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ede_set_key,
- .encrypt = ecb3_encrypt,
- .decrypt = ecb3_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_set_key,
- .encrypt = cbc3_encrypt,
- .decrypt = cbc3_decrypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_set_key_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_set_key_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ede_set_key_skcipher,
+ .encrypt = ecb3_encrypt,
+ .decrypt = ecb3_decrypt,
+ }, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-sparc64",
+ .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_set_key_skcipher,
+ .encrypt = cbc3_encrypt,
+ .decrypt = cbc3_decrypt,
+ }
+};
static bool __init sparc64_has_des_opcode(void)
{
@@ -503,17 +448,27 @@ static bool __init sparc64_has_des_opcode(void)
static int __init des_sparc64_mod_init(void)
{
- if (sparc64_has_des_opcode()) {
- pr_info("Using sparc64 des opcodes optimized DES implementation\n");
- return crypto_register_algs(algs, ARRAY_SIZE(algs));
+ int err;
+
+ if (!sparc64_has_des_opcode()) {
+ pr_info("sparc64 des opcodes not available.\n");
+ return -ENODEV;
}
- pr_info("sparc64 des opcodes not available.\n");
- return -ENODEV;
+ pr_info("Using sparc64 des opcodes optimized DES implementation\n");
+ err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
+ if (err)
+ return err;
+ err = crypto_register_skciphers(skcipher_algs,
+ ARRAY_SIZE(skcipher_algs));
+ if (err)
+ crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
+ return err;
}
static void __exit des_sparc64_mod_fini(void)
{
- crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+ crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
+ crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
}
module_init(des_sparc64_mod_init);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index a8275fea4b70..9b4506373353 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1673,9 +1673,9 @@ void __init setup_per_cpu_areas(void)
pcpu_alloc_bootmem,
pcpu_free_bootmem);
if (rc)
- pr_warning("PERCPU: %s allocator failed (%d), "
- "falling back to page size\n",
- pcpu_fc_names[pcpu_chosen_fc], rc);
+ pr_warn("PERCPU: %s allocator failed (%d), "
+ "falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 61afd787bd0c..7ec79918b566 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -67,7 +67,7 @@ SECTIONS
.data1 : {
*(.data1)
}
- RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE)
+ RW_DATA(SMP_CACHE_BYTES, 0, THREAD_SIZE)
/* End of data section */
_edata = .;
@@ -78,7 +78,6 @@ SECTIONS
__stop___fixup = .;
}
EXCEPTION_TABLE(16)
- NOTES
. = ALIGN(PAGE_SIZE);
__init_begin = ALIGN(PAGE_SIZE);
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index 324a23947585..997ffe46e953 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -65,14 +65,14 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
#
-CFLAGS_REMOVE_vdso-note.o = -pg
CFLAGS_REMOVE_vclock_gettime.o = -pg
+CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
-CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/
diff --git a/arch/um/configs/kunit_defconfig b/arch/um/configs/kunit_defconfig
new file mode 100644
index 000000000000..9235b7d42d38
--- /dev/null
+++ b/arch/um/configs/kunit_defconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_KUNIT_TEST=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index d7086b985f27..7145ce699982 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -9,14 +9,13 @@
_sdata = .;
PROVIDE (sdata = .);
- RODATA
+ RO_DATA(4096)
.unprotected : { *(.unprotected) }
. = ALIGN(4096);
PROVIDE (_unprotected_end = .);
. = ALIGN(4096);
- NOTES
EXCEPTION_TABLE(0)
BUG_TABLE
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
index 7abf90537cd5..6fb320b337ef 100644
--- a/arch/unicore32/kernel/vmlinux.lds.S
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -43,12 +43,11 @@ SECTIONS
_etext = .;
_sdata = .;
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
EXCEPTION_TABLE(L1_CACHE_BYTES)
- NOTES
BSS_SECTION(0, 0, 0)
_end = .;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d6e1faa28c58..9c9bc348c412 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,7 +24,7 @@ config X86_64
depends on 64BIT
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
- select ARCH_SUPPORTS_INT128
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
@@ -73,7 +73,6 @@ config X86
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY
@@ -932,36 +931,6 @@ config GART_IOMMU
If unsure, say Y.
-config CALGARY_IOMMU
- bool "IBM Calgary IOMMU support"
- select IOMMU_HELPER
- select SWIOTLB
- depends on X86_64 && PCI
- ---help---
- Support for hardware IOMMUs in IBM's xSeries x366 and x460
- systems. Needed to run systems with more than 3GB of memory
- properly with 32-bit PCI devices that do not support DAC
- (Double Address Cycle). Calgary also supports bus level
- isolation, where all DMAs pass through the IOMMU. This
- prevents them from going anywhere except their intended
- destination. This catches hard-to-find kernel bugs and
- mis-behaving drivers and devices that do not use the DMA-API
- properly to set up their DMA buffers. The IOMMU can be
- turned off at boot time with the iommu=off parameter.
- Normally the kernel will make the right choice by itself.
- If unsure, say Y.
-
-config CALGARY_IOMMU_ENABLED_BY_DEFAULT
- def_bool y
- prompt "Should Calgary be enabled by default?"
- depends on CALGARY_IOMMU
- ---help---
- Should Calgary be enabled by default? if you choose 'y', Calgary
- will be used (if it exists). If you choose 'n', Calgary will not be
- used even if it exists. If you choose 'n' and would like to use
- Calgary anyway, pass 'iommu=calgary' on the kernel command line.
- If unsure, say Y.
-
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@@ -1000,8 +969,8 @@ config NR_CPUS_RANGE_END
config NR_CPUS_RANGE_END
int
depends on X86_64
- default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK)
- default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK)
+ default 8192 if SMP && CPUMASK_OFFSTACK
+ default 512 if SMP && !CPUMASK_OFFSTACK
default 1 if !SMP
config NR_CPUS_DEFAULT
@@ -1254,6 +1223,24 @@ config X86_VSYSCALL_EMULATION
Disabling this option saves about 7K of kernel size and
possibly 4K of additional runtime pagetable memory.
+config X86_IOPL_IOPERM
+ bool "IOPERM and IOPL Emulation"
+ default y
+ ---help---
+ This enables the ioperm() and iopl() syscalls which are necessary
+ for legacy applications.
+
+ Legacy IOPL support is an overbroad mechanism which allows user
+ space aside of accessing all 65536 I/O ports also to disable
+ interrupts. To gain this access the caller needs CAP_SYS_RAWIO
+ capabilities and permission from potentially active security
+ modules.
+
+ The emulation restricts the functionality of the syscall to
+ only allowing the full range I/O port access, but prevents the
+ ability to disable interrupts from user space which would be
+ granted if the hardware IOPL mechanism would be used.
+
config TOSHIBA
tristate "Toshiba Laptop support"
depends on X86_32
@@ -1492,6 +1479,7 @@ config X86_PAE
config X86_5LEVEL
bool "Enable 5-level page tables support"
+ default y
select DYNAMIC_MEMORY_LAYOUT
select SPARSEMEM_VMEMMAP
depends on X86_64
@@ -1751,7 +1739,7 @@ config X86_RESERVE_LOW
config MATH_EMULATION
bool
depends on MODIFY_LDT_SYSCALL
- prompt "Math emulation" if X86_32
+ prompt "Math emulation" if X86_32 && (M486SX || MELAN)
---help---
Linux can emulate a math coprocessor (used for floating point
operations) if you don't have one. 486DX and Pentium processors have
@@ -1880,16 +1868,16 @@ config X86_SMAP
If unsure, say Y.
-config X86_INTEL_UMIP
+config X86_UMIP
def_bool y
- depends on CPU_SUP_INTEL
- prompt "Intel User Mode Instruction Prevention" if EXPERT
+ depends on CPU_SUP_INTEL || CPU_SUP_AMD
+ prompt "User Mode Instruction Prevention" if EXPERT
---help---
- The User Mode Instruction Prevention (UMIP) is a security
- feature in newer Intel processors. If enabled, a general
- protection fault is issued if the SGDT, SLDT, SIDT, SMSW
- or STR instructions are executed in user mode. These instructions
- unnecessarily expose information about the hardware state.
+ User Mode Instruction Prevention (UMIP) is a security feature in
+ some x86 processors. If enabled, a general protection fault is
+ issued if the SGDT, SLDT, SIDT, SMSW or STR instructions are
+ executed in user mode. These instructions unnecessarily expose
+ information about the hardware state.
The vast majority of applications do not use these instructions.
For the very few that do, software emulation is provided in
@@ -1940,6 +1928,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
If unsure, say y.
+choice
+ prompt "TSX enable mode"
+ depends on CPU_SUP_INTEL
+ default X86_INTEL_TSX_MODE_OFF
+ help
+ Intel's TSX (Transactional Synchronization Extensions) feature
+ allows to optimize locking protocols through lock elision which
+ can lead to a noticeable performance boost.
+
+ On the other hand it has been shown that TSX can be exploited
+ to form side channel attacks (e.g. TAA) and chances are there
+ will be more of those attacks discovered in the future.
+
+ Therefore TSX is not enabled by default (aka tsx=off). An admin
+ might override this decision by tsx=on the command line parameter.
+ Even with TSX enabled, the kernel will attempt to enable the best
+ possible TAA mitigation setting depending on the microcode available
+ for the particular machine.
+
+ This option allows to set the default tsx mode between tsx=on, =off
+ and =auto. See Documentation/admin-guide/kernel-parameters.txt for more
+ details.
+
+ Say off if not sure, auto if TSX is in use but it should be used on safe
+ platforms or on if TSX is in use and the security aspect of tsx is not
+ relevant.
+
+config X86_INTEL_TSX_MODE_OFF
+ bool "off"
+ help
+ TSX is disabled if possible - equals to tsx=off command line parameter.
+
+config X86_INTEL_TSX_MODE_ON
+ bool "on"
+ help
+ TSX is always enabled on TSX capable HW - equals the tsx=on command
+ line parameter.
+
+config X86_INTEL_TSX_MODE_AUTO
+ bool "auto"
+ help
+ TSX is enabled on TSX capable HW that is believed to be safe against
+ side channel attacks- equals the tsx=auto command line parameter.
+endchoice
+
config EFI
bool "EFI runtime service support"
depends on ACPI
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 8e29c991ba3e..af9c967782f6 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -50,12 +50,19 @@ choice
See each option's help text for additional details. If you don't know
what to do, choose "486".
+config M486SX
+ bool "486SX"
+ depends on X86_32
+ ---help---
+ Select this for an 486-class CPU without an FPU such as
+ AMD/Cyrix/IBM/Intel SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5S.
+
config M486
- bool "486"
+ bool "486DX"
depends on X86_32
---help---
Select this for an 486-class CPU such as AMD/Cyrix/IBM/Intel
- 486DX/DX2/DX4 or SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
+ 486DX/DX2/DX4 and UMC U5D.
config M586
bool "586/K5/5x86/6x86/6x86MX"
@@ -312,20 +319,20 @@ config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || MPSC
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
- default "4" if MELAN || M486 || MGEODEGX1
+ default "4" if MELAN || M486SX || M486 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
config X86_F00F_BUG
def_bool y
- depends on M586MMX || M586TSC || M586 || M486
+ depends on M586MMX || M586TSC || M586 || M486SX || M486
config X86_INVD_BUG
def_bool y
- depends on M486
+ depends on M486SX || M486
config X86_ALIGNMENT_16
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
config X86_INTEL_USERCOPY
def_bool y
@@ -378,7 +385,7 @@ config X86_MINIMUM_CPU_FAMILY
config X86_DEBUGCTLMSR
def_bool y
- depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486) && !UML
+ depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
menuconfig PROCESSOR_SELECT
bool "Supported processor vendors" if EXPERT
@@ -402,7 +409,7 @@ config CPU_SUP_INTEL
config CPU_SUP_CYRIX_32
default y
bool "Support Cyrix processors" if PROCESSOR_SELECT
- depends on M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT)
+ depends on M486SX || M486 || M586 || M586TSC || M586MMX || (EXPERT && !64BIT)
---help---
This enables detection, tunings and quirks for Cyrix processors
@@ -470,7 +477,7 @@ config CPU_SUP_TRANSMETA_32
config CPU_SUP_UMC_32
default y
bool "Support UMC processors" if PROCESSOR_SELECT
- depends on M486 || (EXPERT && !64BIT)
+ depends on M486SX || M486 || (EXPERT && !64BIT)
---help---
This enables detection, tunings and quirks for UMC processors
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index bf9cd83de777..409c00f74e60 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -316,10 +316,6 @@ config UNWINDER_FRAME_POINTER
unwinder, but the kernel text size will grow by ~3% and the kernel's
overall performance will degrade by roughly 5-10%.
- This option is recommended if you want to use the livepatch
- consistency model, as this is currently the only way to get a
- reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE).
-
config UNWINDER_GUESS
bool "Guess unwinder"
depends on EXPERT
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 1f5faf8606b4..cd3056759880 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -10,6 +10,7 @@ else
tune = $(call cc-option,-mcpu=$(1),$(2))
endif
+cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
cflags-$(CONFIG_M586TSC) += -march=i586
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index e2839b5c246c..95410d6ee2ff 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -67,6 +67,7 @@ clean-files += cpustr.h
KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
GCOV_PROFILE := n
UBSAN_SANITIZE := n
@@ -87,7 +88,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 6b84afdd7538..aa976adb7094 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -38,6 +38,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += -Wno-pointer-sign
+KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -72,8 +73,8 @@ $(obj)/../voffset.h: vmlinux FORCE
$(obj)/misc.o: $(obj)/../voffset.h
-vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
- $(obj)/string.o $(obj)/cmdline.o $(obj)/error.o \
+vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/kernel_info.o $(obj)/head_$(BITS).o \
+ $(obj)/misc.o $(obj)/string.o $(obj)/cmdline.o $(obj)/error.o \
$(obj)/piggy.o $(obj)/cpuflags.o
vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 82bc60c8acb2..72b08fde6de6 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -554,7 +554,11 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
- e820_type = E820_TYPE_RAM;
+ if (efi_soft_reserve_enabled() &&
+ (d->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else
+ e820_type = E820_TYPE_RAM;
break;
case EFI_ACPI_MEMORY_NVS:
@@ -782,6 +786,9 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation(sys_table);
+
+ efi_random_get_seed(sys_table);
+
efi_retrieve_tpm2_eventlog(sys_table);
setup_graphics(boot_params);
diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
*/
.text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
* 0. The function can only be called in Linux kernel. So CS has been
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movl saved_return_addr(%edx), %ecx
pushl %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
.previous
.data
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
index bff9ab7c6317..593913692d16 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -23,7 +23,7 @@
.code64
.text
-ENTRY(efi64_thunk)
+SYM_FUNC_START(efi64_thunk)
push %rbp
push %rbx
@@ -97,14 +97,14 @@ ENTRY(efi64_thunk)
pop %rbx
pop %rbp
ret
-ENDPROC(efi64_thunk)
+SYM_FUNC_END(efi64_thunk)
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
.code32
/*
@@ -112,7 +112,7 @@ ENDPROC(efi_exit32)
*
* The stack should represent the 32-bit calling convention.
*/
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
@@ -172,20 +172,23 @@ ENTRY(efi_enter32)
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
.data
.balign 8
- .global efi32_boot_gdt
-efi32_boot_gdt: .word 0
- .quad 0
+SYM_DATA_START(efi32_boot_gdt)
+ .word 0
+ .quad 0
+SYM_DATA_END(efi32_boot_gdt)
+
+SYM_DATA_START_LOCAL(save_gdt)
+ .word 0
+ .quad 0
+SYM_DATA_END(save_gdt)
-save_gdt: .word 0
- .quad 0
-func_rt_ptr: .quad 0
+SYM_DATA_LOCAL(func_rt_ptr, .quad 0)
- .global efi_gdt64
-efi_gdt64:
+SYM_DATA_START(efi_gdt64)
.word efi_gdt64_end - efi_gdt64
.long 0 /* Filled out by user */
.word 0
@@ -194,4 +197,4 @@ efi_gdt64:
.quad 0x00cf92000000ffff /* __KERNEL_DS */
.quad 0x0080890000000000 /* TS descriptor */
.quad 0x0000000000000000 /* TS continued */
-efi_gdt64_end:
+SYM_DATA_END_LABEL(efi_gdt64, SYM_L_LOCAL, efi_gdt64_end)
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 5e30eaaf8576..f2dfd6d083ef 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
*/
leal .Lrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_STUB
/*
* We don't need the return address, so set up the stack so efi_main() can find
* its arguments.
*/
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
call 1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl %eax
pushl %ecx
jmp 2f /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl %ecx
popl %edx
@@ -205,11 +205,11 @@ fail:
movl BP_code32_start(%esi), %eax
leal startup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
#endif
.text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
/*
* Clear BSS (stack is currently empty)
@@ -260,6 +260,7 @@ ENDPROC(efi32_stub_entry)
*/
xorl %ebx, %ebx
jmp *%eax
+SYM_FUNC_END(.Lrelocated)
#ifdef CONFIG_EFI_STUB
.data
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d98cd483377e..58a512e33d8d 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -45,7 +45,7 @@
__HEAD
.code32
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
/*
* 32bit entry is 0 and it is ABI so immutable!
* If we come here directly from a bootloader,
@@ -222,11 +222,11 @@ ENTRY(startup_32)
/* Jump from 32bit compatibility mode into 64bit mode. */
lret
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_MIXED
.org 0x190
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp /* Discard return address */
popl %ecx
popl %edx
@@ -245,12 +245,12 @@ ENTRY(efi32_stub_entry)
movl %eax, efi_config(%ebp)
jmp startup_32
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
#endif
.code64
.org 0x200
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
/*
* 64bit entry is 0x200 and it is ABI so immutable!
* We come here either from startup_32 or directly from a
@@ -442,11 +442,12 @@ trampoline_return:
*/
leaq .Lrelocated(%rbx), %rax
jmp *%rax
+SYM_CODE_END(startup_64)
#ifdef CONFIG_EFI_STUB
/* The entry point for the PE/COFF executable is efi_pe_entry. */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
movq %rcx, efi64_config(%rip) /* Handle */
movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
@@ -495,10 +496,10 @@ fail:
movl BP_code32_start(%esi), %eax
leaq startup_64(%rax), %rax
jmp *%rax
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
.org 0x390
-ENTRY(efi64_stub_entry)
+SYM_FUNC_START(efi64_stub_entry)
movq %rdi, efi64_config(%rip) /* Handle */
movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */
@@ -507,11 +508,11 @@ ENTRY(efi64_stub_entry)
movq %rdx, %rsi
jmp handover_entry
-ENDPROC(efi64_stub_entry)
+SYM_FUNC_END(efi64_stub_entry)
#endif
.text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
/*
* Clear BSS (stack is currently empty)
@@ -540,6 +541,7 @@ ENDPROC(efi64_stub_entry)
* Jump to the decompressed kernel.
*/
jmp *%rax
+SYM_FUNC_END(.Lrelocated)
/*
* Adjust the global offset table
@@ -570,7 +572,7 @@ ENDPROC(efi64_stub_entry)
* ECX contains the base address of the trampoline memory.
* Non zero RDX means trampoline needs to enable 5-level paging.
*/
-ENTRY(trampoline_32bit_src)
+SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl $__KERNEL_DS, %eax
movl %eax, %ds
@@ -633,11 +635,13 @@ ENTRY(trampoline_32bit_src)
movl %eax, %cr0
lret
+SYM_CODE_END(trampoline_32bit_src)
.code64
-.Lpaging_enabled:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
/* Return from the trampoline */
jmp *%rdi
+SYM_FUNC_END(.Lpaging_enabled)
/*
* The trampoline code has a size limit.
@@ -647,20 +651,22 @@ ENTRY(trampoline_32bit_src)
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
.code32
-.Lno_longmode:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
hlt
jmp 1b
+SYM_FUNC_END(.Lno_longmode)
#include "../../kernel/verify_cpu.S"
.data
-gdt64:
+SYM_DATA_START_LOCAL(gdt64)
.word gdt_end - gdt
.quad 0
+SYM_DATA_END(gdt64)
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt
.long gdt
.word 0
@@ -669,25 +675,24 @@ gdt:
.quad 0x00cf92000000ffff /* __KERNEL_DS */
.quad 0x0080890000000000 /* TS descriptor */
.quad 0x0000000000000000 /* TS continued */
-gdt_end:
+SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
#ifdef CONFIG_EFI_STUB
-efi_config:
- .quad 0
+SYM_DATA_LOCAL(efi_config, .quad 0)
#ifdef CONFIG_EFI_MIXED
- .global efi32_config
-efi32_config:
+SYM_DATA_START(efi32_config)
.fill 5,8,0
.quad efi64_thunk
.byte 0
+SYM_DATA_END(efi32_config)
#endif
- .global efi64_config
-efi64_config:
+SYM_DATA_START(efi64_config)
.fill 5,8,0
.quad efi_call
.byte 1
+SYM_DATA_END(efi64_config)
#endif /* CONFIG_EFI_STUB */
/*
@@ -695,23 +700,21 @@ efi64_config:
*/
.bss
.balign 4
-boot_heap:
- .fill BOOT_HEAP_SIZE, 1, 0
-boot_stack:
+SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
+
+SYM_DATA_START_LOCAL(boot_stack)
.fill BOOT_STACK_SIZE, 1, 0
-boot_stack_end:
+SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
/*
* Space for page tables (not in .bss so not zeroed)
*/
.section ".pgtable","a",@nobits
.balign 4096
-pgtable:
- .fill BOOT_PGT_SIZE, 1, 0
+SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0)
/*
* The page table is going to be used instead of page table in the trampoline
* memory.
*/
-top_pgtable:
- .fill PAGE_SIZE, 1, 0
+SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 2e53c056ba20..d7408af55738 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -132,8 +132,14 @@ char *skip_spaces(const char *str)
#include "../../../../lib/ctype.c"
#include "../../../../lib/cmdline.c"
+enum parse_mode {
+ PARSE_MEMMAP,
+ PARSE_EFI,
+};
+
static int
-parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
+parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
+ enum parse_mode mode)
{
char *oldp;
@@ -156,8 +162,29 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
*start = memparse(p + 1, &p);
return 0;
case '@':
- /* memmap=nn@ss specifies usable region, should be skipped */
- *size = 0;
+ if (mode == PARSE_MEMMAP) {
+ /*
+ * memmap=nn@ss specifies usable region, should
+ * be skipped
+ */
+ *size = 0;
+ } else {
+ unsigned long long flags;
+
+ /*
+ * efi_fake_mem=nn@ss:attr the attr specifies
+ * flags that might imply a soft-reservation.
+ */
+ *start = memparse(p + 1, &p);
+ if (p && *p == ':') {
+ p++;
+ if (kstrtoull(p, 0, &flags) < 0)
+ *size = 0;
+ else if (flags & EFI_MEMORY_SP)
+ return 0;
+ }
+ *size = 0;
+ }
/* Fall through */
default:
/*
@@ -172,7 +199,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
return -EINVAL;
}
-static void mem_avoid_memmap(char *str)
+static void mem_avoid_memmap(enum parse_mode mode, char *str)
{
static int i;
@@ -187,7 +214,7 @@ static void mem_avoid_memmap(char *str)
if (k)
*k++ = 0;
- rc = parse_memmap(str, &start, &size);
+ rc = parse_memmap(str, &start, &size, mode);
if (rc < 0)
break;
str = k;
@@ -238,7 +265,6 @@ static void parse_gb_huge_pages(char *param, char *val)
}
}
-
static void handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
@@ -271,7 +297,7 @@ static void handle_mem_options(void)
}
if (!strcmp(param, "memmap")) {
- mem_avoid_memmap(val);
+ mem_avoid_memmap(PARSE_MEMMAP, val);
} else if (strstr(param, "hugepages")) {
parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
@@ -284,6 +310,8 @@ static void handle_mem_options(void)
goto out;
mem_limit = mem_size;
+ } else if (!strcmp(param, "efi_fake_mem")) {
+ mem_avoid_memmap(PARSE_EFI, val);
}
}
@@ -459,6 +487,18 @@ static bool mem_avoid_overlap(struct mem_vector *img,
is_overlapping = true;
}
+ if (ptr->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)ptr->data)->type != SETUP_INDIRECT) {
+ avoid.start = ((struct setup_indirect *)ptr->data)->addr;
+ avoid.size = ((struct setup_indirect *)ptr->data)->len;
+
+ if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
+ *overlap = avoid;
+ earliest = overlap->start;
+ is_overlapping = true;
+ }
+ }
+
ptr = (struct setup_data *)(unsigned long)ptr->next;
}
@@ -760,6 +800,10 @@ process_efi_entries(unsigned long minimum, unsigned long image_size)
if (md->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ continue;
+
if (efi_mirror_found &&
!(md->attribute & EFI_MEMORY_MORE_RELIABLE))
continue;
diff --git a/arch/x86/boot/compressed/kernel_info.S b/arch/x86/boot/compressed/kernel_info.S
new file mode 100644
index 000000000000..f818ee8fba38
--- /dev/null
+++ b/arch/x86/boot/compressed/kernel_info.S
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/bootparam.h>
+
+ .section ".rodata.kernel_info", "a"
+
+ .global kernel_info
+
+kernel_info:
+ /* Header, Linux top (structure). */
+ .ascii "LToP"
+ /* Size. */
+ .long kernel_info_var_len_data - kernel_info
+ /* Size total. */
+ .long kernel_info_end - kernel_info
+
+ /* Maximal allowed type for setup_data and setup_indirect structs. */
+ .long SETUP_TYPE_MAX
+
+kernel_info_var_len_data:
+ /* Empty for time being... */
+kernel_info_end:
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index 6afb7130a387..dd07e7b41b11 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -15,7 +15,7 @@
.text
.code32
-ENTRY(get_sev_encryption_bit)
+SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -65,10 +65,10 @@ ENTRY(get_sev_encryption_bit)
#endif /* CONFIG_AMD_MEM_ENCRYPT */
ret
-ENDPROC(get_sev_encryption_bit)
+SYM_FUNC_END(get_sev_encryption_bit)
.code64
-ENTRY(set_sev_encryption_mask)
+SYM_FUNC_START(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT
push %rbp
push %rdx
@@ -90,12 +90,11 @@ ENTRY(set_sev_encryption_mask)
xor %rax, %rax
ret
-ENDPROC(set_sev_encryption_mask)
+SYM_FUNC_END(set_sev_encryption_mask)
.data
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
-GLOBAL(sme_me_mask)
- .quad 0
+SYM_DATA(sme_me_mask, .quad 0)
#endif
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 4c5f4f4ad035..6afd05e819d2 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -15,7 +15,7 @@
.code16
.text
-GLOBAL(memcpy)
+SYM_FUNC_START_NOALIGN(memcpy)
pushw %si
pushw %di
movw %ax, %di
@@ -29,9 +29,9 @@ GLOBAL(memcpy)
popw %di
popw %si
retl
-ENDPROC(memcpy)
+SYM_FUNC_END(memcpy)
-GLOBAL(memset)
+SYM_FUNC_START_NOALIGN(memset)
pushw %di
movw %ax, %di
movzbl %dl, %eax
@@ -44,22 +44,22 @@ GLOBAL(memset)
rep; stosb
popw %di
retl
-ENDPROC(memset)
+SYM_FUNC_END(memset)
-GLOBAL(copy_from_fs)
+SYM_FUNC_START_NOALIGN(copy_from_fs)
pushw %ds
pushw %fs
popw %ds
calll memcpy
popw %ds
retl
-ENDPROC(copy_from_fs)
+SYM_FUNC_END(copy_from_fs)
-GLOBAL(copy_to_fs)
+SYM_FUNC_START_NOALIGN(copy_to_fs)
pushw %es
pushw %fs
popw %es
calll memcpy
popw %es
retl
-ENDPROC(copy_to_fs)
+SYM_FUNC_END(copy_to_fs)
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 2c11c0f45d49..97d9b6d6c1af 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -300,7 +300,7 @@ _start:
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
- .word 0x020d # header version number (>= 0x0105)
+ .word 0x020f # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -567,6 +567,7 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
init_size: .long INIT_SIZE # kernel initialization size
handover_offset: .long 0 # Filled in by build.c
+kernel_info_offset: .long 0 # Filled in by build.c
# End of setup header #####################################################
diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index c22f9a7d1aeb..cbec8bd0841f 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -21,7 +21,7 @@
/*
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
*/
-GLOBAL(protected_mode_jump)
+SYM_FUNC_START_NOALIGN(protected_mode_jump)
movl %edx, %esi # Pointer to boot_params table
xorl %ebx, %ebx
@@ -40,13 +40,13 @@ GLOBAL(protected_mode_jump)
# Transition to 32-bit mode
.byte 0x66, 0xea # ljmpl opcode
-2: .long in_pm32 # offset
+2: .long .Lin_pm32 # offset
.word __BOOT_CS # segment
-ENDPROC(protected_mode_jump)
+SYM_FUNC_END(protected_mode_jump)
.code32
.section ".text32","ax"
-GLOBAL(in_pm32)
+SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32)
# Set up data segments for flat 32-bit mode
movl %ecx, %ds
movl %ecx, %es
@@ -72,4 +72,4 @@ GLOBAL(in_pm32)
lldt %cx
jmpl *%eax # Jump to the 32-bit entrypoint
-ENDPROC(in_pm32)
+SYM_FUNC_END(.Lin_pm32)
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index a93d44e58f9c..55e669d29e54 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -56,6 +56,7 @@ u8 buf[SETUP_SECT_MAX*512];
unsigned long efi32_stub_entry;
unsigned long efi64_stub_entry;
unsigned long efi_pe_entry;
+unsigned long kernel_info;
unsigned long startup_64;
/*----------------------------------------------------------------------*/
@@ -321,6 +322,7 @@ static void parse_zoffset(char *fname)
PARSE_ZOFS(p, efi32_stub_entry);
PARSE_ZOFS(p, efi64_stub_entry);
PARSE_ZOFS(p, efi_pe_entry);
+ PARSE_ZOFS(p, kernel_info);
PARSE_ZOFS(p, startup_64);
p = strchr(p, '\n');
@@ -410,6 +412,9 @@ int main(int argc, char ** argv)
efi_stub_entry_update();
+ /* Update kernel_info offset. */
+ put_unaligned_le32(kernel_info, &buf[0x268]);
+
crc = partial_crc32(buf, i, crc);
if (fwrite(buf, 1, i, dest) != i)
die("Writing setup failed");
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index d0a5ffeae8df..0b9654c7a05c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -25,7 +25,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_SMP=y
-CONFIG_CALGARY_IOMMU=y
CONFIG_NR_CPUS=64
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_VOLUNTARY=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 759b1a927826..958440eae27e 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
+obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
# These modules require assembler to support AVX.
ifeq ($(avx_supported),yes)
@@ -48,6 +49,7 @@ ifeq ($(avx_supported),yes)
obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o
+ obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o
endif
# These modules require assembler to support AVX2.
@@ -70,6 +72,7 @@ serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
+blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
ifeq ($(avx_supported),yes)
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index 4434607e366d..51d46d93efbc 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -71,7 +71,7 @@
* %r8
* %r9
*/
-__load_partial:
+SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
@@ -123,7 +123,7 @@ __load_partial:
.Lld_partial_8:
ret
-ENDPROC(__load_partial)
+SYM_FUNC_END(__load_partial)
/*
* __store_partial: internal ABI
@@ -137,7 +137,7 @@ ENDPROC(__load_partial)
* %r9
* %r10
*/
-__store_partial:
+SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
@@ -181,12 +181,12 @@ __store_partial:
.Lst_partial_1:
ret
-ENDPROC(__store_partial)
+SYM_FUNC_END(__store_partial)
/*
* void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv);
*/
-ENTRY(crypto_aegis128_aesni_init)
+SYM_FUNC_START(crypto_aegis128_aesni_init)
FRAME_BEGIN
/* load IV: */
@@ -226,13 +226,13 @@ ENTRY(crypto_aegis128_aesni_init)
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_init)
+SYM_FUNC_END(crypto_aegis128_aesni_init)
/*
* void crypto_aegis128_aesni_ad(void *state, unsigned int length,
* const void *data);
*/
-ENTRY(crypto_aegis128_aesni_ad)
+SYM_FUNC_START(crypto_aegis128_aesni_ad)
FRAME_BEGIN
cmp $0x10, LEN
@@ -378,7 +378,7 @@ ENTRY(crypto_aegis128_aesni_ad)
.Lad_out:
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_ad)
+SYM_FUNC_END(crypto_aegis128_aesni_ad)
.macro encrypt_block a s0 s1 s2 s3 s4 i
movdq\a (\i * 0x10)(SRC), MSG
@@ -402,7 +402,7 @@ ENDPROC(crypto_aegis128_aesni_ad)
* void crypto_aegis128_aesni_enc(void *state, unsigned int length,
* const void *src, void *dst);
*/
-ENTRY(crypto_aegis128_aesni_enc)
+SYM_FUNC_START(crypto_aegis128_aesni_enc)
FRAME_BEGIN
cmp $0x10, LEN
@@ -493,13 +493,13 @@ ENTRY(crypto_aegis128_aesni_enc)
.Lenc_out:
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_enc)
+SYM_FUNC_END(crypto_aegis128_aesni_enc)
/*
* void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length,
* const void *src, void *dst);
*/
-ENTRY(crypto_aegis128_aesni_enc_tail)
+SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
FRAME_BEGIN
/* load the state: */
@@ -533,7 +533,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_enc_tail)
+SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
movdq\a (\i * 0x10)(SRC), MSG
@@ -556,7 +556,7 @@ ENDPROC(crypto_aegis128_aesni_enc_tail)
* void crypto_aegis128_aesni_dec(void *state, unsigned int length,
* const void *src, void *dst);
*/
-ENTRY(crypto_aegis128_aesni_dec)
+SYM_FUNC_START(crypto_aegis128_aesni_dec)
FRAME_BEGIN
cmp $0x10, LEN
@@ -647,13 +647,13 @@ ENTRY(crypto_aegis128_aesni_dec)
.Ldec_out:
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_dec)
+SYM_FUNC_END(crypto_aegis128_aesni_dec)
/*
* void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length,
* const void *src, void *dst);
*/
-ENTRY(crypto_aegis128_aesni_dec_tail)
+SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
FRAME_BEGIN
/* load the state: */
@@ -697,13 +697,13 @@ ENTRY(crypto_aegis128_aesni_dec_tail)
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_dec_tail)
+SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
/*
* void crypto_aegis128_aesni_final(void *state, void *tag_xor,
* u64 assoclen, u64 cryptlen);
*/
-ENTRY(crypto_aegis128_aesni_final)
+SYM_FUNC_START(crypto_aegis128_aesni_final)
FRAME_BEGIN
/* load the state: */
@@ -744,4 +744,4 @@ ENTRY(crypto_aegis128_aesni_final)
FRAME_END
ret
-ENDPROC(crypto_aegis128_aesni_final)
+SYM_FUNC_END(crypto_aegis128_aesni_final)
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 5f6a5af9c489..ec437db1fa54 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -544,11 +544,11 @@ ddq_add_8:
* aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out,
* unsigned int num_bytes)
*/
-ENTRY(aes_ctr_enc_128_avx_by8)
+SYM_FUNC_START(aes_ctr_enc_128_avx_by8)
/* call the aes main loop */
do_aes_ctrmain KEY_128
-ENDPROC(aes_ctr_enc_128_avx_by8)
+SYM_FUNC_END(aes_ctr_enc_128_avx_by8)
/*
* routine to do AES192 CTR enc/decrypt "by8"
@@ -557,11 +557,11 @@ ENDPROC(aes_ctr_enc_128_avx_by8)
* aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out,
* unsigned int num_bytes)
*/
-ENTRY(aes_ctr_enc_192_avx_by8)
+SYM_FUNC_START(aes_ctr_enc_192_avx_by8)
/* call the aes main loop */
do_aes_ctrmain KEY_192
-ENDPROC(aes_ctr_enc_192_avx_by8)
+SYM_FUNC_END(aes_ctr_enc_192_avx_by8)
/*
* routine to do AES256 CTR enc/decrypt "by8"
@@ -570,8 +570,8 @@ ENDPROC(aes_ctr_enc_192_avx_by8)
* aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out,
* unsigned int num_bytes)
*/
-ENTRY(aes_ctr_enc_256_avx_by8)
+SYM_FUNC_START(aes_ctr_enc_256_avx_by8)
/* call the aes main loop */
do_aes_ctrmain KEY_256
-ENDPROC(aes_ctr_enc_256_avx_by8)
+SYM_FUNC_END(aes_ctr_enc_256_avx_by8)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index e40bdf024ba7..d28503f99f58 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1592,7 +1592,7 @@ _esb_loop_\@:
* poly = x^128 + x^127 + x^126 + x^121 + 1
*
*****************************************************************************/
-ENTRY(aesni_gcm_dec)
+SYM_FUNC_START(aesni_gcm_dec)
FUNC_SAVE
GCM_INIT %arg6, arg7, arg8, arg9
@@ -1600,7 +1600,7 @@ ENTRY(aesni_gcm_dec)
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_dec)
+SYM_FUNC_END(aesni_gcm_dec)
/*****************************************************************************
@@ -1680,7 +1680,7 @@ ENDPROC(aesni_gcm_dec)
*
* poly = x^128 + x^127 + x^126 + x^121 + 1
***************************************************************************/
-ENTRY(aesni_gcm_enc)
+SYM_FUNC_START(aesni_gcm_enc)
FUNC_SAVE
GCM_INIT %arg6, arg7, arg8, arg9
@@ -1689,7 +1689,7 @@ ENTRY(aesni_gcm_enc)
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc)
+SYM_FUNC_END(aesni_gcm_enc)
/*****************************************************************************
* void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1702,12 +1702,12 @@ ENDPROC(aesni_gcm_enc)
* const u8 *aad, // Additional Authentication Data (AAD)
* u64 aad_len) // Length of AAD in bytes.
*/
-ENTRY(aesni_gcm_init)
+SYM_FUNC_START(aesni_gcm_init)
FUNC_SAVE
GCM_INIT %arg3, %arg4,%arg5, %arg6
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_init)
+SYM_FUNC_END(aesni_gcm_init)
/*****************************************************************************
* void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1717,12 +1717,12 @@ ENDPROC(aesni_gcm_init)
* const u8 *in, // Plaintext input
* u64 plaintext_len, // Length of data in bytes for encryption.
*/
-ENTRY(aesni_gcm_enc_update)
+SYM_FUNC_START(aesni_gcm_enc_update)
FUNC_SAVE
GCM_ENC_DEC enc
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc_update)
+SYM_FUNC_END(aesni_gcm_enc_update)
/*****************************************************************************
* void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1732,12 +1732,12 @@ ENDPROC(aesni_gcm_enc_update)
* const u8 *in, // Plaintext input
* u64 plaintext_len, // Length of data in bytes for encryption.
*/
-ENTRY(aesni_gcm_dec_update)
+SYM_FUNC_START(aesni_gcm_dec_update)
FUNC_SAVE
GCM_ENC_DEC dec
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_dec_update)
+SYM_FUNC_END(aesni_gcm_dec_update)
/*****************************************************************************
* void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1747,19 +1747,18 @@ ENDPROC(aesni_gcm_dec_update)
* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
* // 12 or 8.
*/
-ENTRY(aesni_gcm_finalize)
+SYM_FUNC_START(aesni_gcm_finalize)
FUNC_SAVE
GCM_COMPLETE %arg3 %arg4
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_finalize)
+SYM_FUNC_END(aesni_gcm_finalize)
#endif
-.align 4
-_key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b11111111, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1769,11 +1768,10 @@ _key_expansion_256a:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1795,10 +1793,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1815,10 +1812,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b00010000, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1828,13 +1824,13 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
/*
* int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
* unsigned int key_len)
*/
-ENTRY(aesni_set_key)
+SYM_FUNC_START(aesni_set_key)
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
@@ -1943,12 +1939,12 @@ ENTRY(aesni_set_key)
#endif
FRAME_END
ret
-ENDPROC(aesni_set_key)
+SYM_FUNC_END(aesni_set_key)
/*
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
*/
-ENTRY(aesni_enc)
+SYM_FUNC_START(aesni_enc)
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
@@ -1967,7 +1963,7 @@ ENTRY(aesni_enc)
#endif
FRAME_END
ret
-ENDPROC(aesni_enc)
+SYM_FUNC_END(aesni_enc)
/*
* _aesni_enc1: internal ABI
@@ -1981,8 +1977,7 @@ ENDPROC(aesni_enc)
* KEY
* TKEYP (T1)
*/
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2025,7 +2020,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
/*
* _aesni_enc4: internal ABI
@@ -2045,8 +2040,7 @@ ENDPROC(_aesni_enc1)
* KEY
* TKEYP (T1)
*/
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY # key
mov KEYP, TKEYP
pxor KEY, STATE1 # round 0
@@ -2134,12 +2128,12 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
/*
* void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
*/
-ENTRY(aesni_dec)
+SYM_FUNC_START(aesni_dec)
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
@@ -2159,7 +2153,7 @@ ENTRY(aesni_dec)
#endif
FRAME_END
ret
-ENDPROC(aesni_dec)
+SYM_FUNC_END(aesni_dec)
/*
* _aesni_dec1: internal ABI
@@ -2173,8 +2167,7 @@ ENDPROC(aesni_dec)
* KEY
* TKEYP (T1)
*/
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2217,7 +2210,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
/*
* _aesni_dec4: internal ABI
@@ -2237,8 +2230,7 @@ ENDPROC(_aesni_dec1)
* KEY
* TKEYP (T1)
*/
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY # key
mov KEYP, TKEYP
pxor KEY, STATE1 # round 0
@@ -2326,13 +2318,13 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
/*
* void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len)
*/
-ENTRY(aesni_ecb_enc)
+SYM_FUNC_START(aesni_ecb_enc)
FRAME_BEGIN
#ifndef __x86_64__
pushl LEN
@@ -2386,13 +2378,13 @@ ENTRY(aesni_ecb_enc)
#endif
FRAME_END
ret
-ENDPROC(aesni_ecb_enc)
+SYM_FUNC_END(aesni_ecb_enc)
/*
* void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len);
*/
-ENTRY(aesni_ecb_dec)
+SYM_FUNC_START(aesni_ecb_dec)
FRAME_BEGIN
#ifndef __x86_64__
pushl LEN
@@ -2447,13 +2439,13 @@ ENTRY(aesni_ecb_dec)
#endif
FRAME_END
ret
-ENDPROC(aesni_ecb_dec)
+SYM_FUNC_END(aesni_ecb_dec)
/*
* void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len, u8 *iv)
*/
-ENTRY(aesni_cbc_enc)
+SYM_FUNC_START(aesni_cbc_enc)
FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
@@ -2491,13 +2483,13 @@ ENTRY(aesni_cbc_enc)
#endif
FRAME_END
ret
-ENDPROC(aesni_cbc_enc)
+SYM_FUNC_END(aesni_cbc_enc)
/*
* void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len, u8 *iv)
*/
-ENTRY(aesni_cbc_dec)
+SYM_FUNC_START(aesni_cbc_dec)
FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
@@ -2584,7 +2576,7 @@ ENTRY(aesni_cbc_dec)
#endif
FRAME_END
ret
-ENDPROC(aesni_cbc_dec)
+SYM_FUNC_END(aesni_cbc_dec)
#ifdef __x86_64__
.pushsection .rodata
@@ -2604,8 +2596,7 @@ ENDPROC(aesni_cbc_dec)
* INC: == 1, in little endian
* BSWAP_MASK == endian swapping mask
*/
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
PSHUFB_XMM BSWAP_MASK CTR
@@ -2613,7 +2604,7 @@ _aesni_inc_init:
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
ret
-ENDPROC(_aesni_inc_init)
+SYM_FUNC_END(_aesni_inc_init)
/*
* _aesni_inc: internal ABI
@@ -2630,8 +2621,7 @@ ENDPROC(_aesni_inc_init)
* CTR: == output IV, in little endian
* TCTR_LOW: == lower qword of CTR
*/
-.align 4
-_aesni_inc:
+SYM_FUNC_START_LOCAL(_aesni_inc)
paddq INC, CTR
add $1, TCTR_LOW
jnc .Linc_low
@@ -2642,13 +2632,13 @@ _aesni_inc:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
ret
-ENDPROC(_aesni_inc)
+SYM_FUNC_END(_aesni_inc)
/*
* void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len, u8 *iv)
*/
-ENTRY(aesni_ctr_enc)
+SYM_FUNC_START(aesni_ctr_enc)
FRAME_BEGIN
cmp $16, LEN
jb .Lctr_enc_just_ret
@@ -2705,7 +2695,7 @@ ENTRY(aesni_ctr_enc)
.Lctr_enc_just_ret:
FRAME_END
ret
-ENDPROC(aesni_ctr_enc)
+SYM_FUNC_END(aesni_ctr_enc)
/*
* _aesni_gf128mul_x_ble: internal ABI
@@ -2729,7 +2719,7 @@ ENDPROC(aesni_ctr_enc)
* void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* bool enc, u8 *iv)
*/
-ENTRY(aesni_xts_crypt8)
+SYM_FUNC_START(aesni_xts_crypt8)
FRAME_BEGIN
cmpb $0, %cl
movl $0, %ecx
@@ -2833,6 +2823,6 @@ ENTRY(aesni_xts_crypt8)
FRAME_END
ret
-ENDPROC(aesni_xts_crypt8)
+SYM_FUNC_END(aesni_xts_crypt8)
#endif
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 91c039ab5699..bfa1c0b3e5b4 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -1775,12 +1775,12 @@ _initial_blocks_done\@:
# const u8 *aad, /* Additional Authentication Data (AAD)*/
# u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
#############################################################
-ENTRY(aesni_gcm_init_avx_gen2)
+SYM_FUNC_START(aesni_gcm_init_avx_gen2)
FUNC_SAVE
INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_init_avx_gen2)
+SYM_FUNC_END(aesni_gcm_init_avx_gen2)
###############################################################################
#void aesni_gcm_enc_update_avx_gen2(
@@ -1790,7 +1790,7 @@ ENDPROC(aesni_gcm_init_avx_gen2)
# const u8 *in, /* Plaintext input */
# u64 plaintext_len) /* Length of data in Bytes for encryption. */
###############################################################################
-ENTRY(aesni_gcm_enc_update_avx_gen2)
+SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2)
FUNC_SAVE
mov keysize, %eax
cmp $32, %eax
@@ -1809,7 +1809,7 @@ key_256_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc_update_avx_gen2)
+SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
###############################################################################
#void aesni_gcm_dec_update_avx_gen2(
@@ -1819,7 +1819,7 @@ ENDPROC(aesni_gcm_enc_update_avx_gen2)
# const u8 *in, /* Ciphertext input */
# u64 plaintext_len) /* Length of data in Bytes for encryption. */
###############################################################################
-ENTRY(aesni_gcm_dec_update_avx_gen2)
+SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2)
FUNC_SAVE
mov keysize,%eax
cmp $32, %eax
@@ -1838,7 +1838,7 @@ key_256_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_dec_update_avx_gen2)
+SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
###############################################################################
#void aesni_gcm_finalize_avx_gen2(
@@ -1848,7 +1848,7 @@ ENDPROC(aesni_gcm_dec_update_avx_gen2)
# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
# Valid values are 16 (most likely), 12 or 8. */
###############################################################################
-ENTRY(aesni_gcm_finalize_avx_gen2)
+SYM_FUNC_START(aesni_gcm_finalize_avx_gen2)
FUNC_SAVE
mov keysize,%eax
cmp $32, %eax
@@ -1867,7 +1867,7 @@ key_256_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_finalize_avx_gen2)
+SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
#endif /* CONFIG_AS_AVX */
@@ -2746,12 +2746,12 @@ _initial_blocks_done\@:
# const u8 *aad, /* Additional Authentication Data (AAD)*/
# u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
#############################################################
-ENTRY(aesni_gcm_init_avx_gen4)
+SYM_FUNC_START(aesni_gcm_init_avx_gen4)
FUNC_SAVE
INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_init_avx_gen4)
+SYM_FUNC_END(aesni_gcm_init_avx_gen4)
###############################################################################
#void aesni_gcm_enc_avx_gen4(
@@ -2761,7 +2761,7 @@ ENDPROC(aesni_gcm_init_avx_gen4)
# const u8 *in, /* Plaintext input */
# u64 plaintext_len) /* Length of data in Bytes for encryption. */
###############################################################################
-ENTRY(aesni_gcm_enc_update_avx_gen4)
+SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4)
FUNC_SAVE
mov keysize,%eax
cmp $32, %eax
@@ -2780,7 +2780,7 @@ key_256_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc_update_avx_gen4)
+SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
###############################################################################
#void aesni_gcm_dec_update_avx_gen4(
@@ -2790,7 +2790,7 @@ ENDPROC(aesni_gcm_enc_update_avx_gen4)
# const u8 *in, /* Ciphertext input */
# u64 plaintext_len) /* Length of data in Bytes for encryption. */
###############################################################################
-ENTRY(aesni_gcm_dec_update_avx_gen4)
+SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4)
FUNC_SAVE
mov keysize,%eax
cmp $32, %eax
@@ -2809,7 +2809,7 @@ key_256_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_dec_update_avx_gen4)
+SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
###############################################################################
#void aesni_gcm_finalize_avx_gen4(
@@ -2819,7 +2819,7 @@ ENDPROC(aesni_gcm_dec_update_avx_gen4)
# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
# Valid values are 16 (most likely), 12 or 8. */
###############################################################################
-ENTRY(aesni_gcm_finalize_avx_gen4)
+SYM_FUNC_START(aesni_gcm_finalize_avx_gen4)
FUNC_SAVE
mov keysize,%eax
cmp $32, %eax
@@ -2838,6 +2838,6 @@ key_256_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_finalize_avx_gen4)
+SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
#endif /* CONFIG_AS_AVX2 */
diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S
new file mode 100644
index 000000000000..24910b766bdd
--- /dev/null
+++ b/arch/x86/crypto/blake2s-core.S
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2017-2019 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
+ */
+
+#include <linux/linkage.h>
+
+.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32
+.align 32
+IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667
+ .octa 0x5BE0CD191F83D9AB9B05688C510E527F
+.section .rodata.cst16.ROT16, "aM", @progbits, 16
+.align 16
+ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302
+.section .rodata.cst16.ROR328, "aM", @progbits, 16
+.align 16
+ROR328: .octa 0x0C0F0E0D080B0A090407060500030201
+.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160
+.align 64
+SIGMA:
+.byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13
+.byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7
+.byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1
+.byte 7, 3, 13, 11, 9, 1, 12, 14, 15, 2, 5, 4, 8, 6, 10, 0
+.byte 9, 5, 2, 10, 0, 7, 4, 15, 3, 14, 11, 6, 13, 1, 12, 8
+.byte 2, 6, 0, 8, 12, 10, 11, 3, 1, 4, 7, 15, 9, 13, 5, 14
+.byte 12, 1, 14, 4, 5, 15, 13, 10, 8, 0, 6, 9, 11, 7, 3, 2
+.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6
+.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4
+.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12
+#ifdef CONFIG_AS_AVX512
+.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640
+.align 64
+SIGMA2:
+.long 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13
+.long 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7
+.long 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9
+.long 11, 10, 7, 0, 8, 15, 1, 13, 3, 6, 2, 12, 4, 14, 9, 5
+.long 4, 10, 9, 14, 15, 0, 11, 8, 1, 7, 3, 13, 2, 5, 6, 12
+.long 2, 11, 4, 15, 14, 3, 10, 8, 13, 6, 5, 7, 0, 12, 1, 9
+.long 4, 8, 15, 9, 14, 11, 13, 5, 3, 2, 1, 12, 6, 10, 7, 0
+.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10
+.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14
+.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9
+#endif /* CONFIG_AS_AVX512 */
+
+.text
+#ifdef CONFIG_AS_SSSE3
+SYM_FUNC_START(blake2s_compress_ssse3)
+ testq %rdx,%rdx
+ je .Lendofloop
+ movdqu (%rdi),%xmm0
+ movdqu 0x10(%rdi),%xmm1
+ movdqa ROT16(%rip),%xmm12
+ movdqa ROR328(%rip),%xmm13
+ movdqu 0x20(%rdi),%xmm14
+ movq %rcx,%xmm15
+ leaq SIGMA+0xa0(%rip),%r8
+ jmp .Lbeginofloop
+ .align 32
+.Lbeginofloop:
+ movdqa %xmm0,%xmm10
+ movdqa %xmm1,%xmm11
+ paddq %xmm15,%xmm14
+ movdqa IV(%rip),%xmm2
+ movdqa %xmm14,%xmm3
+ pxor IV+0x10(%rip),%xmm3
+ leaq SIGMA(%rip),%rcx
+.Lroundloop:
+ movzbl (%rcx),%eax
+ movd (%rsi,%rax,4),%xmm4
+ movzbl 0x1(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm5
+ movzbl 0x2(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm6
+ movzbl 0x3(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm7
+ punpckldq %xmm5,%xmm4
+ punpckldq %xmm7,%xmm6
+ punpcklqdq %xmm6,%xmm4
+ paddd %xmm4,%xmm0
+ paddd %xmm1,%xmm0
+ pxor %xmm0,%xmm3
+ pshufb %xmm12,%xmm3
+ paddd %xmm3,%xmm2
+ pxor %xmm2,%xmm1
+ movdqa %xmm1,%xmm8
+ psrld $0xc,%xmm1
+ pslld $0x14,%xmm8
+ por %xmm8,%xmm1
+ movzbl 0x4(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm5
+ movzbl 0x5(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm6
+ movzbl 0x6(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm7
+ movzbl 0x7(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm4
+ punpckldq %xmm6,%xmm5
+ punpckldq %xmm4,%xmm7
+ punpcklqdq %xmm7,%xmm5
+ paddd %xmm5,%xmm0
+ paddd %xmm1,%xmm0
+ pxor %xmm0,%xmm3
+ pshufb %xmm13,%xmm3
+ paddd %xmm3,%xmm2
+ pxor %xmm2,%xmm1
+ movdqa %xmm1,%xmm8
+ psrld $0x7,%xmm1
+ pslld $0x19,%xmm8
+ por %xmm8,%xmm1
+ pshufd $0x93,%xmm0,%xmm0
+ pshufd $0x4e,%xmm3,%xmm3
+ pshufd $0x39,%xmm2,%xmm2
+ movzbl 0x8(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm6
+ movzbl 0x9(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm7
+ movzbl 0xa(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm4
+ movzbl 0xb(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm5
+ punpckldq %xmm7,%xmm6
+ punpckldq %xmm5,%xmm4
+ punpcklqdq %xmm4,%xmm6
+ paddd %xmm6,%xmm0
+ paddd %xmm1,%xmm0
+ pxor %xmm0,%xmm3
+ pshufb %xmm12,%xmm3
+ paddd %xmm3,%xmm2
+ pxor %xmm2,%xmm1
+ movdqa %xmm1,%xmm8
+ psrld $0xc,%xmm1
+ pslld $0x14,%xmm8
+ por %xmm8,%xmm1
+ movzbl 0xc(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm7
+ movzbl 0xd(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm4
+ movzbl 0xe(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm5
+ movzbl 0xf(%rcx),%eax
+ movd (%rsi,%rax,4),%xmm6
+ punpckldq %xmm4,%xmm7
+ punpckldq %xmm6,%xmm5
+ punpcklqdq %xmm5,%xmm7
+ paddd %xmm7,%xmm0
+ paddd %xmm1,%xmm0
+ pxor %xmm0,%xmm3
+ pshufb %xmm13,%xmm3
+ paddd %xmm3,%xmm2
+ pxor %xmm2,%xmm1
+ movdqa %xmm1,%xmm8
+ psrld $0x7,%xmm1
+ pslld $0x19,%xmm8
+ por %xmm8,%xmm1
+ pshufd $0x39,%xmm0,%xmm0
+ pshufd $0x4e,%xmm3,%xmm3
+ pshufd $0x93,%xmm2,%xmm2
+ addq $0x10,%rcx
+ cmpq %r8,%rcx
+ jnz .Lroundloop
+ pxor %xmm2,%xmm0
+ pxor %xmm3,%xmm1
+ pxor %xmm10,%xmm0
+ pxor %xmm11,%xmm1
+ addq $0x40,%rsi
+ decq %rdx
+ jnz .Lbeginofloop
+ movdqu %xmm0,(%rdi)
+ movdqu %xmm1,0x10(%rdi)
+ movdqu %xmm14,0x20(%rdi)
+.Lendofloop:
+ ret
+SYM_FUNC_END(blake2s_compress_ssse3)
+#endif /* CONFIG_AS_SSSE3 */
+
+#ifdef CONFIG_AS_AVX512
+SYM_FUNC_START(blake2s_compress_avx512)
+ vmovdqu (%rdi),%xmm0
+ vmovdqu 0x10(%rdi),%xmm1
+ vmovdqu 0x20(%rdi),%xmm4
+ vmovq %rcx,%xmm5
+ vmovdqa IV(%rip),%xmm14
+ vmovdqa IV+16(%rip),%xmm15
+ jmp .Lblake2s_compress_avx512_mainloop
+.align 32
+.Lblake2s_compress_avx512_mainloop:
+ vmovdqa %xmm0,%xmm10
+ vmovdqa %xmm1,%xmm11
+ vpaddq %xmm5,%xmm4,%xmm4
+ vmovdqa %xmm14,%xmm2
+ vpxor %xmm15,%xmm4,%xmm3
+ vmovdqu (%rsi),%ymm6
+ vmovdqu 0x20(%rsi),%ymm7
+ addq $0x40,%rsi
+ leaq SIGMA2(%rip),%rax
+ movb $0xa,%cl
+.Lblake2s_compress_avx512_roundloop:
+ addq $0x40,%rax
+ vmovdqa -0x40(%rax),%ymm8
+ vmovdqa -0x20(%rax),%ymm9
+ vpermi2d %ymm7,%ymm6,%ymm8
+ vpermi2d %ymm7,%ymm6,%ymm9
+ vmovdqa %ymm8,%ymm6
+ vmovdqa %ymm9,%ymm7
+ vpaddd %xmm8,%xmm0,%xmm0
+ vpaddd %xmm1,%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+ vprord $0x10,%xmm3,%xmm3
+ vpaddd %xmm3,%xmm2,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+ vprord $0xc,%xmm1,%xmm1
+ vextracti128 $0x1,%ymm8,%xmm8
+ vpaddd %xmm8,%xmm0,%xmm0
+ vpaddd %xmm1,%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+ vprord $0x8,%xmm3,%xmm3
+ vpaddd %xmm3,%xmm2,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+ vprord $0x7,%xmm1,%xmm1
+ vpshufd $0x93,%xmm0,%xmm0
+ vpshufd $0x4e,%xmm3,%xmm3
+ vpshufd $0x39,%xmm2,%xmm2
+ vpaddd %xmm9,%xmm0,%xmm0
+ vpaddd %xmm1,%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+ vprord $0x10,%xmm3,%xmm3
+ vpaddd %xmm3,%xmm2,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+ vprord $0xc,%xmm1,%xmm1
+ vextracti128 $0x1,%ymm9,%xmm9
+ vpaddd %xmm9,%xmm0,%xmm0
+ vpaddd %xmm1,%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+ vprord $0x8,%xmm3,%xmm3
+ vpaddd %xmm3,%xmm2,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+ vprord $0x7,%xmm1,%xmm1
+ vpshufd $0x39,%xmm0,%xmm0
+ vpshufd $0x4e,%xmm3,%xmm3
+ vpshufd $0x93,%xmm2,%xmm2
+ decb %cl
+ jne .Lblake2s_compress_avx512_roundloop
+ vpxor %xmm10,%xmm0,%xmm0
+ vpxor %xmm11,%xmm1,%xmm1
+ vpxor %xmm2,%xmm0,%xmm0
+ vpxor %xmm3,%xmm1,%xmm1
+ decq %rdx
+ jne .Lblake2s_compress_avx512_mainloop
+ vmovdqu %xmm0,(%rdi)
+ vmovdqu %xmm1,0x10(%rdi)
+ vmovdqu %xmm4,0x20(%rdi)
+ vzeroupper
+ retq
+SYM_FUNC_END(blake2s_compress_avx512)
+#endif /* CONFIG_AS_AVX512 */
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
new file mode 100644
index 000000000000..4a37ba7cdbe5
--- /dev/null
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/hash.h>
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/cpufeature.h>
+#include <asm/fpu/api.h>
+#include <asm/processor.h>
+#include <asm/simd.h>
+
+asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state,
+ const u8 *block, const size_t nblocks,
+ const u32 inc);
+asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
+ const u8 *block, const size_t nblocks,
+ const u32 inc);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
+
+void blake2s_compress_arch(struct blake2s_state *state,
+ const u8 *block, size_t nblocks,
+ const u32 inc)
+{
+ /* SIMD disables preemption, so relax after processing each page. */
+ BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8);
+
+ if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) {
+ blake2s_compress_generic(state, block, nblocks, inc);
+ return;
+ }
+
+ for (;;) {
+ const size_t blocks = min_t(size_t, nblocks,
+ PAGE_SIZE / BLAKE2S_BLOCK_SIZE);
+
+ kernel_fpu_begin();
+ if (IS_ENABLED(CONFIG_AS_AVX512) &&
+ static_branch_likely(&blake2s_use_avx512))
+ blake2s_compress_avx512(state, block, blocks, inc);
+ else
+ blake2s_compress_ssse3(state, block, blocks, inc);
+ kernel_fpu_end();
+
+ nblocks -= blocks;
+ if (!nblocks)
+ break;
+ block += blocks * BLAKE2S_BLOCK_SIZE;
+ }
+}
+EXPORT_SYMBOL(blake2s_compress_arch);
+
+static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static int crypto_blake2s_init(struct shash_desc *desc)
+{
+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2s_state *state = shash_desc_ctx(desc);
+ const int outlen = crypto_shash_digestsize(desc->tfm);
+
+ if (tctx->keylen)
+ blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
+ else
+ blake2s_init(state, outlen);
+
+ return 0;
+}
+
+static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen)
+{
+ struct blake2s_state *state = shash_desc_ctx(desc);
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return 0;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+
+ return 0;
+}
+
+static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
+{
+ struct blake2s_state *state = shash_desc_ctx(desc);
+
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ blake2s_compress_arch(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
+ memzero_explicit(state, sizeof(*state));
+
+ return 0;
+}
+
+static struct shash_alg blake2s_algs[] = {{
+ .base.cra_name = "blake2s-128",
+ .base.cra_driver_name = "blake2s-128-x86",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_128_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}, {
+ .base.cra_name = "blake2s-160",
+ .base.cra_driver_name = "blake2s-160-x86",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_160_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}, {
+ .base.cra_name = "blake2s-224",
+ .base.cra_driver_name = "blake2s-224-x86",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_224_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}, {
+ .base.cra_name = "blake2s-256",
+ .base.cra_driver_name = "blake2s-256-x86",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_256_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}};
+
+static int __init blake2s_mod_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SSSE3))
+ return 0;
+
+ static_branch_enable(&blake2s_use_ssse3);
+
+ if (IS_ENABLED(CONFIG_AS_AVX512) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM |
+ XFEATURE_MASK_AVX512, NULL))
+ static_branch_enable(&blake2s_use_avx512);
+
+ return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
+}
+
+static void __exit blake2s_mod_exit(void)
+{
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
+}
+
+module_init(blake2s_mod_init);
+module_exit(blake2s_mod_exit);
+
+MODULE_ALIAS_CRYPTO("blake2s-128");
+MODULE_ALIAS_CRYPTO("blake2s-128-x86");
+MODULE_ALIAS_CRYPTO("blake2s-160");
+MODULE_ALIAS_CRYPTO("blake2s-160-x86");
+MODULE_ALIAS_CRYPTO("blake2s-224");
+MODULE_ALIAS_CRYPTO("blake2s-224-x86");
+MODULE_ALIAS_CRYPTO("blake2s-256");
+MODULE_ALIAS_CRYPTO("blake2s-256-x86");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
index 330db7a48af8..4222ac6d6584 100644
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
@@ -103,7 +103,7 @@
bswapq RX0; \
xorq RX0, (RIO);
-ENTRY(__blowfish_enc_blk)
+SYM_FUNC_START(__blowfish_enc_blk)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -139,9 +139,9 @@ ENTRY(__blowfish_enc_blk)
.L__enc_xor:
xor_block();
ret;
-ENDPROC(__blowfish_enc_blk)
+SYM_FUNC_END(__blowfish_enc_blk)
-ENTRY(blowfish_dec_blk)
+SYM_FUNC_START(blowfish_dec_blk)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -171,7 +171,7 @@ ENTRY(blowfish_dec_blk)
movq %r11, %r12;
ret;
-ENDPROC(blowfish_dec_blk)
+SYM_FUNC_END(blowfish_dec_blk)
/**********************************************************************
4-way blowfish, four blocks parallel
@@ -283,7 +283,7 @@ ENDPROC(blowfish_dec_blk)
bswapq RX3; \
xorq RX3, 24(RIO);
-ENTRY(__blowfish_enc_blk_4way)
+SYM_FUNC_START(__blowfish_enc_blk_4way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -330,9 +330,9 @@ ENTRY(__blowfish_enc_blk_4way)
popq %rbx;
popq %r12;
ret;
-ENDPROC(__blowfish_enc_blk_4way)
+SYM_FUNC_END(__blowfish_enc_blk_4way)
-ENTRY(blowfish_dec_blk_4way)
+SYM_FUNC_START(blowfish_dec_blk_4way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -365,4 +365,4 @@ ENTRY(blowfish_dec_blk_4way)
popq %r12;
ret;
-ENDPROC(blowfish_dec_blk_4way)
+SYM_FUNC_END(blowfish_dec_blk_4way)
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index a14af6eb09cb..d01ddd73de65 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -189,20 +189,20 @@
* larger and would only be 0.5% faster (on sandy-bridge).
*/
.align 8
-roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
ret;
-ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
-roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
ret;
-ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
* IN/OUT:
@@ -722,7 +722,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.text
.align 8
-__camellia_enc_blk16:
+SYM_FUNC_START_LOCAL(__camellia_enc_blk16)
/* input:
* %rdi: ctx, CTX
* %rax: temporary storage, 256 bytes
@@ -806,10 +806,10 @@ __camellia_enc_blk16:
%xmm15, %rax, %rcx, 24);
jmp .Lenc_done;
-ENDPROC(__camellia_enc_blk16)
+SYM_FUNC_END(__camellia_enc_blk16)
.align 8
-__camellia_dec_blk16:
+SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
/* input:
* %rdi: ctx, CTX
* %rax: temporary storage, 256 bytes
@@ -891,9 +891,9 @@ __camellia_dec_blk16:
((key_table + (24) * 8) + 4)(CTX));
jmp .Ldec_max24;
-ENDPROC(__camellia_dec_blk16)
+SYM_FUNC_END(__camellia_dec_blk16)
-ENTRY(camellia_ecb_enc_16way)
+SYM_FUNC_START(camellia_ecb_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -916,9 +916,9 @@ ENTRY(camellia_ecb_enc_16way)
FRAME_END
ret;
-ENDPROC(camellia_ecb_enc_16way)
+SYM_FUNC_END(camellia_ecb_enc_16way)
-ENTRY(camellia_ecb_dec_16way)
+SYM_FUNC_START(camellia_ecb_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -946,9 +946,9 @@ ENTRY(camellia_ecb_dec_16way)
FRAME_END
ret;
-ENDPROC(camellia_ecb_dec_16way)
+SYM_FUNC_END(camellia_ecb_dec_16way)
-ENTRY(camellia_cbc_dec_16way)
+SYM_FUNC_START(camellia_cbc_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -997,7 +997,7 @@ ENTRY(camellia_cbc_dec_16way)
FRAME_END
ret;
-ENDPROC(camellia_cbc_dec_16way)
+SYM_FUNC_END(camellia_cbc_dec_16way)
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
@@ -1005,7 +1005,7 @@ ENDPROC(camellia_cbc_dec_16way)
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
-ENTRY(camellia_ctr_16way)
+SYM_FUNC_START(camellia_ctr_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -1110,7 +1110,7 @@ ENTRY(camellia_ctr_16way)
FRAME_END
ret;
-ENDPROC(camellia_ctr_16way)
+SYM_FUNC_END(camellia_ctr_16way)
#define gf128mul_x_ble(iv, mask, tmp) \
vpsrad $31, iv, tmp; \
@@ -1120,7 +1120,7 @@ ENDPROC(camellia_ctr_16way)
vpxor tmp, iv, iv;
.align 8
-camellia_xts_crypt_16way:
+SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -1254,9 +1254,9 @@ camellia_xts_crypt_16way:
FRAME_END
ret;
-ENDPROC(camellia_xts_crypt_16way)
+SYM_FUNC_END(camellia_xts_crypt_16way)
-ENTRY(camellia_xts_enc_16way)
+SYM_FUNC_START(camellia_xts_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -1268,9 +1268,9 @@ ENTRY(camellia_xts_enc_16way)
leaq __camellia_enc_blk16, %r9;
jmp camellia_xts_crypt_16way;
-ENDPROC(camellia_xts_enc_16way)
+SYM_FUNC_END(camellia_xts_enc_16way)
-ENTRY(camellia_xts_dec_16way)
+SYM_FUNC_START(camellia_xts_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -1286,4 +1286,4 @@ ENTRY(camellia_xts_dec_16way)
leaq __camellia_dec_blk16, %r9;
jmp camellia_xts_crypt_16way;
-ENDPROC(camellia_xts_dec_16way)
+SYM_FUNC_END(camellia_xts_dec_16way)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 4be4c7c3ba27..563ef6e83cdd 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -223,20 +223,20 @@
* larger and would only marginally faster.
*/
.align 8
-roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
%rcx, (%r9));
ret;
-ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
-roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
%rax, (%r9));
ret;
-ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
* IN/OUT:
@@ -760,7 +760,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.text
.align 8
-__camellia_enc_blk32:
+SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
/* input:
* %rdi: ctx, CTX
* %rax: temporary storage, 512 bytes
@@ -844,10 +844,10 @@ __camellia_enc_blk32:
%ymm15, %rax, %rcx, 24);
jmp .Lenc_done;
-ENDPROC(__camellia_enc_blk32)
+SYM_FUNC_END(__camellia_enc_blk32)
.align 8
-__camellia_dec_blk32:
+SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
/* input:
* %rdi: ctx, CTX
* %rax: temporary storage, 512 bytes
@@ -929,9 +929,9 @@ __camellia_dec_blk32:
((key_table + (24) * 8) + 4)(CTX));
jmp .Ldec_max24;
-ENDPROC(__camellia_dec_blk32)
+SYM_FUNC_END(__camellia_dec_blk32)
-ENTRY(camellia_ecb_enc_32way)
+SYM_FUNC_START(camellia_ecb_enc_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -958,9 +958,9 @@ ENTRY(camellia_ecb_enc_32way)
FRAME_END
ret;
-ENDPROC(camellia_ecb_enc_32way)
+SYM_FUNC_END(camellia_ecb_enc_32way)
-ENTRY(camellia_ecb_dec_32way)
+SYM_FUNC_START(camellia_ecb_dec_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -992,9 +992,9 @@ ENTRY(camellia_ecb_dec_32way)
FRAME_END
ret;
-ENDPROC(camellia_ecb_dec_32way)
+SYM_FUNC_END(camellia_ecb_dec_32way)
-ENTRY(camellia_cbc_dec_32way)
+SYM_FUNC_START(camellia_cbc_dec_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -1060,7 +1060,7 @@ ENTRY(camellia_cbc_dec_32way)
FRAME_END
ret;
-ENDPROC(camellia_cbc_dec_32way)
+SYM_FUNC_END(camellia_cbc_dec_32way)
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
@@ -1076,7 +1076,7 @@ ENDPROC(camellia_cbc_dec_32way)
vpslldq $8, tmp1, tmp1; \
vpsubq tmp1, x, x;
-ENTRY(camellia_ctr_32way)
+SYM_FUNC_START(camellia_ctr_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -1200,7 +1200,7 @@ ENTRY(camellia_ctr_32way)
FRAME_END
ret;
-ENDPROC(camellia_ctr_32way)
+SYM_FUNC_END(camellia_ctr_32way)
#define gf128mul_x_ble(iv, mask, tmp) \
vpsrad $31, iv, tmp; \
@@ -1222,7 +1222,7 @@ ENDPROC(camellia_ctr_32way)
vpxor tmp1, iv, iv;
.align 8
-camellia_xts_crypt_32way:
+SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -1367,9 +1367,9 @@ camellia_xts_crypt_32way:
FRAME_END
ret;
-ENDPROC(camellia_xts_crypt_32way)
+SYM_FUNC_END(camellia_xts_crypt_32way)
-ENTRY(camellia_xts_enc_32way)
+SYM_FUNC_START(camellia_xts_enc_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -1382,9 +1382,9 @@ ENTRY(camellia_xts_enc_32way)
leaq __camellia_enc_blk32, %r9;
jmp camellia_xts_crypt_32way;
-ENDPROC(camellia_xts_enc_32way)
+SYM_FUNC_END(camellia_xts_enc_32way)
-ENTRY(camellia_xts_dec_32way)
+SYM_FUNC_START(camellia_xts_dec_32way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
@@ -1400,4 +1400,4 @@ ENTRY(camellia_xts_dec_32way)
leaq __camellia_dec_blk32, %r9;
jmp camellia_xts_crypt_32way;
-ENDPROC(camellia_xts_dec_32way)
+SYM_FUNC_END(camellia_xts_dec_32way)
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
index 23528bc18fc6..1372e6408850 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -175,7 +175,7 @@
bswapq RAB0; \
movq RAB0, 4*2(RIO);
-ENTRY(__camellia_enc_blk)
+SYM_FUNC_START(__camellia_enc_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -220,9 +220,9 @@ ENTRY(__camellia_enc_blk)
movq RR12, %r12;
ret;
-ENDPROC(__camellia_enc_blk)
+SYM_FUNC_END(__camellia_enc_blk)
-ENTRY(camellia_dec_blk)
+SYM_FUNC_START(camellia_dec_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -258,7 +258,7 @@ ENTRY(camellia_dec_blk)
movq RR12, %r12;
ret;
-ENDPROC(camellia_dec_blk)
+SYM_FUNC_END(camellia_dec_blk)
/**********************************************************************
2-way camellia
@@ -409,7 +409,7 @@ ENDPROC(camellia_dec_blk)
bswapq RAB1; \
movq RAB1, 12*2(RIO);
-ENTRY(__camellia_enc_blk_2way)
+SYM_FUNC_START(__camellia_enc_blk_2way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -456,9 +456,9 @@ ENTRY(__camellia_enc_blk_2way)
movq RR12, %r12;
popq %rbx;
ret;
-ENDPROC(__camellia_enc_blk_2way)
+SYM_FUNC_END(__camellia_enc_blk_2way)
-ENTRY(camellia_dec_blk_2way)
+SYM_FUNC_START(camellia_dec_blk_2way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -496,4 +496,4 @@ ENTRY(camellia_dec_blk_2way)
movq RR12, %r12;
movq RXOR, %rbx;
ret;
-ENDPROC(camellia_dec_blk_2way)
+SYM_FUNC_END(camellia_dec_blk_2way)
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index dc55c3332fcc..8a6181b08b59 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -209,7 +209,7 @@
.text
.align 16
-__cast5_enc_blk16:
+SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
/* input:
* %rdi: ctx
* RL1: blocks 1 and 2
@@ -280,10 +280,10 @@ __cast5_enc_blk16:
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
-ENDPROC(__cast5_enc_blk16)
+SYM_FUNC_END(__cast5_enc_blk16)
.align 16
-__cast5_dec_blk16:
+SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
/* input:
* %rdi: ctx
* RL1: encrypted blocks 1 and 2
@@ -357,9 +357,9 @@ __cast5_dec_blk16:
.L__skip_dec:
vpsrldq $4, RKR, RKR;
jmp .L__dec_tail;
-ENDPROC(__cast5_dec_blk16)
+SYM_FUNC_END(__cast5_dec_blk16)
-ENTRY(cast5_ecb_enc_16way)
+SYM_FUNC_START(cast5_ecb_enc_16way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -394,9 +394,9 @@ ENTRY(cast5_ecb_enc_16way)
popq %r15;
FRAME_END
ret;
-ENDPROC(cast5_ecb_enc_16way)
+SYM_FUNC_END(cast5_ecb_enc_16way)
-ENTRY(cast5_ecb_dec_16way)
+SYM_FUNC_START(cast5_ecb_dec_16way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -432,9 +432,9 @@ ENTRY(cast5_ecb_dec_16way)
popq %r15;
FRAME_END
ret;
-ENDPROC(cast5_ecb_dec_16way)
+SYM_FUNC_END(cast5_ecb_dec_16way)
-ENTRY(cast5_cbc_dec_16way)
+SYM_FUNC_START(cast5_cbc_dec_16way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -484,9 +484,9 @@ ENTRY(cast5_cbc_dec_16way)
popq %r12;
FRAME_END
ret;
-ENDPROC(cast5_cbc_dec_16way)
+SYM_FUNC_END(cast5_cbc_dec_16way)
-ENTRY(cast5_ctr_16way)
+SYM_FUNC_START(cast5_ctr_16way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -560,4 +560,4 @@ ENTRY(cast5_ctr_16way)
popq %r12;
FRAME_END
ret;
-ENDPROC(cast5_ctr_16way)
+SYM_FUNC_END(cast5_ctr_16way)
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index 4f0a7cdb94d9..932a3ce32a88 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -247,7 +247,7 @@
.text
.align 8
-__cast6_enc_blk8:
+SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
/* input:
* %rdi: ctx
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
@@ -292,10 +292,10 @@ __cast6_enc_blk8:
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
-ENDPROC(__cast6_enc_blk8)
+SYM_FUNC_END(__cast6_enc_blk8)
.align 8
-__cast6_dec_blk8:
+SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
/* input:
* %rdi: ctx
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
@@ -339,9 +339,9 @@ __cast6_dec_blk8:
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
-ENDPROC(__cast6_dec_blk8)
+SYM_FUNC_END(__cast6_dec_blk8)
-ENTRY(cast6_ecb_enc_8way)
+SYM_FUNC_START(cast6_ecb_enc_8way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -362,9 +362,9 @@ ENTRY(cast6_ecb_enc_8way)
popq %r15;
FRAME_END
ret;
-ENDPROC(cast6_ecb_enc_8way)
+SYM_FUNC_END(cast6_ecb_enc_8way)
-ENTRY(cast6_ecb_dec_8way)
+SYM_FUNC_START(cast6_ecb_dec_8way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -385,9 +385,9 @@ ENTRY(cast6_ecb_dec_8way)
popq %r15;
FRAME_END
ret;
-ENDPROC(cast6_ecb_dec_8way)
+SYM_FUNC_END(cast6_ecb_dec_8way)
-ENTRY(cast6_cbc_dec_8way)
+SYM_FUNC_START(cast6_cbc_dec_8way)
/* input:
* %rdi: ctx
* %rsi: dst
@@ -411,9 +411,9 @@ ENTRY(cast6_cbc_dec_8way)
popq %r12;
FRAME_END
ret;
-ENDPROC(cast6_cbc_dec_8way)
+SYM_FUNC_END(cast6_cbc_dec_8way)
-ENTRY(cast6_ctr_8way)
+SYM_FUNC_START(cast6_ctr_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -439,9 +439,9 @@ ENTRY(cast6_ctr_8way)
popq %r12;
FRAME_END
ret;
-ENDPROC(cast6_ctr_8way)
+SYM_FUNC_END(cast6_ctr_8way)
-ENTRY(cast6_xts_enc_8way)
+SYM_FUNC_START(cast6_xts_enc_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -466,9 +466,9 @@ ENTRY(cast6_xts_enc_8way)
popq %r15;
FRAME_END
ret;
-ENDPROC(cast6_xts_enc_8way)
+SYM_FUNC_END(cast6_xts_enc_8way)
-ENTRY(cast6_xts_dec_8way)
+SYM_FUNC_START(cast6_xts_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -493,4 +493,4 @@ ENTRY(cast6_xts_dec_8way)
popq %r15;
FRAME_END
ret;
-ENDPROC(cast6_xts_dec_8way)
+SYM_FUNC_END(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S
index 831e4434fc20..ee9a40ab4109 100644
--- a/arch/x86/crypto/chacha-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha-avx2-x86_64.S
@@ -34,7 +34,7 @@ CTR4BL: .octa 0x00000000000000000000000000000002
.text
-ENTRY(chacha_2block_xor_avx2)
+SYM_FUNC_START(chacha_2block_xor_avx2)
# %rdi: Input state matrix, s
# %rsi: up to 2 data blocks output, o
# %rdx: up to 2 data blocks input, i
@@ -224,9 +224,9 @@ ENTRY(chacha_2block_xor_avx2)
lea -8(%r10),%rsp
jmp .Ldone2
-ENDPROC(chacha_2block_xor_avx2)
+SYM_FUNC_END(chacha_2block_xor_avx2)
-ENTRY(chacha_4block_xor_avx2)
+SYM_FUNC_START(chacha_4block_xor_avx2)
# %rdi: Input state matrix, s
# %rsi: up to 4 data blocks output, o
# %rdx: up to 4 data blocks input, i
@@ -529,9 +529,9 @@ ENTRY(chacha_4block_xor_avx2)
lea -8(%r10),%rsp
jmp .Ldone4
-ENDPROC(chacha_4block_xor_avx2)
+SYM_FUNC_END(chacha_4block_xor_avx2)
-ENTRY(chacha_8block_xor_avx2)
+SYM_FUNC_START(chacha_8block_xor_avx2)
# %rdi: Input state matrix, s
# %rsi: up to 8 data blocks output, o
# %rdx: up to 8 data blocks input, i
@@ -1018,4 +1018,4 @@ ENTRY(chacha_8block_xor_avx2)
jmp .Ldone8
-ENDPROC(chacha_8block_xor_avx2)
+SYM_FUNC_END(chacha_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S
index 848f9c75fd4f..bb193fde123a 100644
--- a/arch/x86/crypto/chacha-avx512vl-x86_64.S
+++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S
@@ -24,7 +24,7 @@ CTR8BL: .octa 0x00000003000000020000000100000000
.text
-ENTRY(chacha_2block_xor_avx512vl)
+SYM_FUNC_START(chacha_2block_xor_avx512vl)
# %rdi: Input state matrix, s
# %rsi: up to 2 data blocks output, o
# %rdx: up to 2 data blocks input, i
@@ -187,9 +187,9 @@ ENTRY(chacha_2block_xor_avx512vl)
jmp .Ldone2
-ENDPROC(chacha_2block_xor_avx512vl)
+SYM_FUNC_END(chacha_2block_xor_avx512vl)
-ENTRY(chacha_4block_xor_avx512vl)
+SYM_FUNC_START(chacha_4block_xor_avx512vl)
# %rdi: Input state matrix, s
# %rsi: up to 4 data blocks output, o
# %rdx: up to 4 data blocks input, i
@@ -453,9 +453,9 @@ ENTRY(chacha_4block_xor_avx512vl)
jmp .Ldone4
-ENDPROC(chacha_4block_xor_avx512vl)
+SYM_FUNC_END(chacha_4block_xor_avx512vl)
-ENTRY(chacha_8block_xor_avx512vl)
+SYM_FUNC_START(chacha_8block_xor_avx512vl)
# %rdi: Input state matrix, s
# %rsi: up to 8 data blocks output, o
# %rdx: up to 8 data blocks input, i
@@ -833,4 +833,4 @@ ENTRY(chacha_8block_xor_avx512vl)
jmp .Ldone8
-ENDPROC(chacha_8block_xor_avx512vl)
+SYM_FUNC_END(chacha_8block_xor_avx512vl)
diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S
index 2d86c7d6dc88..a38ab2512a6f 100644
--- a/arch/x86/crypto/chacha-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
@@ -33,7 +33,7 @@ CTRINC: .octa 0x00000003000000020000000100000000
*
* Clobbers: %r8d, %xmm4-%xmm7
*/
-chacha_permute:
+SYM_FUNC_START_LOCAL(chacha_permute)
movdqa ROT8(%rip),%xmm4
movdqa ROT16(%rip),%xmm5
@@ -109,9 +109,9 @@ chacha_permute:
jnz .Ldoubleround
ret
-ENDPROC(chacha_permute)
+SYM_FUNC_END(chacha_permute)
-ENTRY(chacha_block_xor_ssse3)
+SYM_FUNC_START(chacha_block_xor_ssse3)
# %rdi: Input state matrix, s
# %rsi: up to 1 data block output, o
# %rdx: up to 1 data block input, i
@@ -197,9 +197,9 @@ ENTRY(chacha_block_xor_ssse3)
lea -8(%r10),%rsp
jmp .Ldone
-ENDPROC(chacha_block_xor_ssse3)
+SYM_FUNC_END(chacha_block_xor_ssse3)
-ENTRY(hchacha_block_ssse3)
+SYM_FUNC_START(hchacha_block_ssse3)
# %rdi: Input state matrix, s
# %rsi: output (8 32-bit words)
# %edx: nrounds
@@ -218,9 +218,9 @@ ENTRY(hchacha_block_ssse3)
FRAME_END
ret
-ENDPROC(hchacha_block_ssse3)
+SYM_FUNC_END(hchacha_block_ssse3)
-ENTRY(chacha_4block_xor_ssse3)
+SYM_FUNC_START(chacha_4block_xor_ssse3)
# %rdi: Input state matrix, s
# %rsi: up to 4 data blocks output, o
# %rdx: up to 4 data blocks input, i
@@ -788,4 +788,4 @@ ENTRY(chacha_4block_xor_ssse3)
jmp .Ldone4
-ENDPROC(chacha_4block_xor_ssse3)
+SYM_FUNC_END(chacha_4block_xor_ssse3)
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 388f95a4ec24..a94e30b6f941 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -7,7 +7,7 @@
*/
#include <crypto/algapi.h>
-#include <crypto/chacha.h>
+#include <crypto/internal/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
@@ -21,24 +21,24 @@ asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
-#ifdef CONFIG_AS_AVX2
+
asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
-static bool chacha_use_avx2;
-#ifdef CONFIG_AS_AVX512
+
asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
-static bool chacha_use_avx512vl;
-#endif
-#endif
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
{
@@ -49,9 +49,8 @@ static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
-#ifdef CONFIG_AS_AVX2
-#ifdef CONFIG_AS_AVX512
- if (chacha_use_avx512vl) {
+ if (IS_ENABLED(CONFIG_AS_AVX512) &&
+ static_branch_likely(&chacha_use_avx512vl)) {
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha_8block_xor_avx512vl(state, dst, src, bytes,
nrounds);
@@ -79,8 +78,9 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
return;
}
}
-#endif
- if (chacha_use_avx2) {
+
+ if (IS_ENABLED(CONFIG_AS_AVX2) &&
+ static_branch_likely(&chacha_use_avx2)) {
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
bytes -= CHACHA_BLOCK_SIZE * 8;
@@ -104,7 +104,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
return;
}
}
-#endif
+
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
bytes -= CHACHA_BLOCK_SIZE * 4;
@@ -123,37 +123,76 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
}
}
-static int chacha_simd_stream_xor(struct skcipher_walk *walk,
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
+{
+ state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
+
+ if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
+ hchacha_block_generic(state, stream, nrounds);
+ } else {
+ kernel_fpu_begin();
+ hchacha_block_ssse3(state, stream, nrounds);
+ kernel_fpu_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
+
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
+ int nrounds)
+{
+ state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
+
+ if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
+ bytes <= CHACHA_BLOCK_SIZE)
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ kernel_fpu_begin();
+ chacha_dosimd(state, dst, src, bytes, nrounds);
+ kernel_fpu_end();
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+static int chacha_simd_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
u32 *state, state_buf[16 + 2] __aligned(8);
- int next_yield = 4096; /* bytes until next FPU yield */
- int err = 0;
+ struct skcipher_walk walk;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
- crypto_chacha_init(state, ctx, iv);
-
- while (walk->nbytes > 0) {
- unsigned int nbytes = walk->nbytes;
+ chacha_init_generic(state, ctx->key, iv);
- if (nbytes < walk->total) {
- nbytes = round_down(nbytes, walk->stride);
- next_yield -= nbytes;
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
- nbytes, ctx->nrounds);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- if (next_yield <= 0) {
- /* temporarily allow preemption */
- kernel_fpu_end();
+ if (!static_branch_likely(&chacha_use_simd) ||
+ !crypto_simd_usable()) {
+ chacha_crypt_generic(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ } else {
kernel_fpu_begin();
- next_yield = 4096;
+ chacha_dosimd(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ kernel_fpu_end();
}
-
- err = skcipher_walk_done(walk, walk->nbytes - nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
@@ -163,55 +202,34 @@ static int chacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- int err;
-
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
- return crypto_chacha_crypt(req);
- err = skcipher_walk_virt(&walk, req, true);
- if (err)
- return err;
-
- kernel_fpu_begin();
- err = chacha_simd_stream_xor(&walk, ctx, req->iv);
- kernel_fpu_end();
- return err;
+ return chacha_simd_stream_xor(req, ctx, req->iv);
}
static int xchacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- struct chacha_ctx subctx;
u32 *state, state_buf[16 + 2] __aligned(8);
+ struct chacha_ctx subctx;
u8 real_iv[16];
- int err;
-
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
- return crypto_xchacha_crypt(req);
-
- err = skcipher_walk_virt(&walk, req, true);
- if (err)
- return err;
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
- crypto_chacha_init(state, ctx, req->iv);
-
- kernel_fpu_begin();
-
- hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
+ chacha_init_generic(state, ctx->key, req->iv);
+
+ if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
+ kernel_fpu_begin();
+ hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
+ kernel_fpu_end();
+ } else {
+ hchacha_block_generic(state, subctx.key, ctx->nrounds);
+ }
subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], req->iv + 24, 8);
memcpy(&real_iv[8], req->iv + 16, 8);
- err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
-
- kernel_fpu_end();
-
- return err;
+ return chacha_simd_stream_xor(req, &subctx, real_iv);
}
static struct skcipher_alg algs[] = {
@@ -227,7 +245,7 @@ static struct skcipher_alg algs[] = {
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
+ .setkey = chacha20_setkey,
.encrypt = chacha_simd,
.decrypt = chacha_simd,
}, {
@@ -242,7 +260,7 @@ static struct skcipher_alg algs[] = {
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
+ .setkey = chacha20_setkey,
.encrypt = xchacha_simd,
.decrypt = xchacha_simd,
}, {
@@ -257,7 +275,7 @@ static struct skcipher_alg algs[] = {
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha12_setkey,
+ .setkey = chacha12_setkey,
.encrypt = xchacha_simd,
.decrypt = xchacha_simd,
},
@@ -266,24 +284,28 @@ static struct skcipher_alg algs[] = {
static int __init chacha_simd_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_SSSE3))
- return -ENODEV;
-
-#ifdef CONFIG_AS_AVX2
- chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX2) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
-#ifdef CONFIG_AS_AVX512
- chacha_use_avx512vl = chacha_use_avx2 &&
- boot_cpu_has(X86_FEATURE_AVX512VL) &&
- boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */
-#endif
-#endif
+ return 0;
+
+ static_branch_enable(&chacha_use_simd);
+
+ if (IS_ENABLED(CONFIG_AS_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+ static_branch_enable(&chacha_use_avx2);
+
+ if (IS_ENABLED(CONFIG_AS_AVX512) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
+ static_branch_enable(&chacha_use_avx512vl);
+ }
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_simd_mod_fini(void)
{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(chacha_simd_mod_init);
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index 1c099dc08cc3..9fd28ff65bc2 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -103,7 +103,7 @@
* size_t len, uint crc32)
*/
-ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
+SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
movdqa (BUF), %xmm1
movdqa 0x10(BUF), %xmm2
movdqa 0x20(BUF), %xmm3
@@ -238,4 +238,4 @@ fold_64:
PEXTRD 0x01, %xmm1, %eax
ret
-ENDPROC(crc32_pclmul_le_16)
+SYM_FUNC_END(crc32_pclmul_le_16)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index d9b734d0c8cc..0e6690e3618c 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -74,7 +74,7 @@
# unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
.text
-ENTRY(crc_pcl)
+SYM_FUNC_START(crc_pcl)
#define bufp %rdi
#define bufp_dw %edi
#define bufp_w %di
@@ -311,7 +311,7 @@ do_return:
popq %rdi
popq %rbx
ret
-ENDPROC(crc_pcl)
+SYM_FUNC_END(crc_pcl)
.section .rodata, "a", @progbits
################################################################
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
index 3d873e67749d..b2533d63030e 100644
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S
@@ -95,7 +95,7 @@
# Assumes len >= 16.
#
.align 16
-ENTRY(crc_t10dif_pcl)
+SYM_FUNC_START(crc_t10dif_pcl)
movdqa .Lbswap_mask(%rip), BSWAP_MASK
@@ -280,7 +280,7 @@ ENTRY(crc_t10dif_pcl)
jge .Lfold_16_bytes_loop # 32 <= len <= 255
add $16, len
jmp .Lhandle_partial_segment # 17 <= len <= 31
-ENDPROC(crc_t10dif_pcl)
+SYM_FUNC_END(crc_t10dif_pcl)
.section .rodata, "a", @progbits
.align 16
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
new file mode 100644
index 000000000000..a52a3fb15727
--- /dev/null
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -0,0 +1,2475 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2017 Armando Faz <armfazh@ic.unicamp.br>. All Rights Reserved.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
+ */
+
+#include <crypto/curve25519.h>
+#include <crypto/internal/kpp.h>
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_adx);
+
+enum { NUM_WORDS_ELTFP25519 = 4 };
+typedef __aligned(32) u64 eltfp25519_1w[NUM_WORDS_ELTFP25519];
+typedef __aligned(32) u64 eltfp25519_1w_buffer[2 * NUM_WORDS_ELTFP25519];
+
+#define mul_eltfp25519_1w_adx(c, a, b) do { \
+ mul_256x256_integer_adx(m.buffer, a, b); \
+ red_eltfp25519_1w_adx(c, m.buffer); \
+} while (0)
+
+#define mul_eltfp25519_1w_bmi2(c, a, b) do { \
+ mul_256x256_integer_bmi2(m.buffer, a, b); \
+ red_eltfp25519_1w_bmi2(c, m.buffer); \
+} while (0)
+
+#define sqr_eltfp25519_1w_adx(a) do { \
+ sqr_256x256_integer_adx(m.buffer, a); \
+ red_eltfp25519_1w_adx(a, m.buffer); \
+} while (0)
+
+#define sqr_eltfp25519_1w_bmi2(a) do { \
+ sqr_256x256_integer_bmi2(m.buffer, a); \
+ red_eltfp25519_1w_bmi2(a, m.buffer); \
+} while (0)
+
+#define mul_eltfp25519_2w_adx(c, a, b) do { \
+ mul2_256x256_integer_adx(m.buffer, a, b); \
+ red_eltfp25519_2w_adx(c, m.buffer); \
+} while (0)
+
+#define mul_eltfp25519_2w_bmi2(c, a, b) do { \
+ mul2_256x256_integer_bmi2(m.buffer, a, b); \
+ red_eltfp25519_2w_bmi2(c, m.buffer); \
+} while (0)
+
+#define sqr_eltfp25519_2w_adx(a) do { \
+ sqr2_256x256_integer_adx(m.buffer, a); \
+ red_eltfp25519_2w_adx(a, m.buffer); \
+} while (0)
+
+#define sqr_eltfp25519_2w_bmi2(a) do { \
+ sqr2_256x256_integer_bmi2(m.buffer, a); \
+ red_eltfp25519_2w_bmi2(a, m.buffer); \
+} while (0)
+
+#define sqrn_eltfp25519_1w_adx(a, times) do { \
+ int ____counter = (times); \
+ while (____counter-- > 0) \
+ sqr_eltfp25519_1w_adx(a); \
+} while (0)
+
+#define sqrn_eltfp25519_1w_bmi2(a, times) do { \
+ int ____counter = (times); \
+ while (____counter-- > 0) \
+ sqr_eltfp25519_1w_bmi2(a); \
+} while (0)
+
+#define copy_eltfp25519_1w(C, A) do { \
+ (C)[0] = (A)[0]; \
+ (C)[1] = (A)[1]; \
+ (C)[2] = (A)[2]; \
+ (C)[3] = (A)[3]; \
+} while (0)
+
+#define setzero_eltfp25519_1w(C) do { \
+ (C)[0] = 0; \
+ (C)[1] = 0; \
+ (C)[2] = 0; \
+ (C)[3] = 0; \
+} while (0)
+
+__aligned(32) static const u64 table_ladder_8k[252 * NUM_WORDS_ELTFP25519] = {
+ /* 1 */ 0xfffffffffffffff3UL, 0xffffffffffffffffUL,
+ 0xffffffffffffffffUL, 0x5fffffffffffffffUL,
+ /* 2 */ 0x6b8220f416aafe96UL, 0x82ebeb2b4f566a34UL,
+ 0xd5a9a5b075a5950fUL, 0x5142b2cf4b2488f4UL,
+ /* 3 */ 0x6aaebc750069680cUL, 0x89cf7820a0f99c41UL,
+ 0x2a58d9183b56d0f4UL, 0x4b5aca80e36011a4UL,
+ /* 4 */ 0x329132348c29745dUL, 0xf4a2e616e1642fd7UL,
+ 0x1e45bb03ff67bc34UL, 0x306912d0f42a9b4aUL,
+ /* 5 */ 0xff886507e6af7154UL, 0x04f50e13dfeec82fUL,
+ 0xaa512fe82abab5ceUL, 0x174e251a68d5f222UL,
+ /* 6 */ 0xcf96700d82028898UL, 0x1743e3370a2c02c5UL,
+ 0x379eec98b4e86eaaUL, 0x0c59888a51e0482eUL,
+ /* 7 */ 0xfbcbf1d699b5d189UL, 0xacaef0d58e9fdc84UL,
+ 0xc1c20d06231f7614UL, 0x2938218da274f972UL,
+ /* 8 */ 0xf6af49beff1d7f18UL, 0xcc541c22387ac9c2UL,
+ 0x96fcc9ef4015c56bUL, 0x69c1627c690913a9UL,
+ /* 9 */ 0x7a86fd2f4733db0eUL, 0xfdb8c4f29e087de9UL,
+ 0x095e4b1a8ea2a229UL, 0x1ad7a7c829b37a79UL,
+ /* 10 */ 0x342d89cad17ea0c0UL, 0x67bedda6cced2051UL,
+ 0x19ca31bf2bb42f74UL, 0x3df7b4c84980acbbUL,
+ /* 11 */ 0xa8c6444dc80ad883UL, 0xb91e440366e3ab85UL,
+ 0xc215cda00164f6d8UL, 0x3d867c6ef247e668UL,
+ /* 12 */ 0xc7dd582bcc3e658cUL, 0xfd2c4748ee0e5528UL,
+ 0xa0fd9b95cc9f4f71UL, 0x7529d871b0675ddfUL,
+ /* 13 */ 0xb8f568b42d3cbd78UL, 0x1233011b91f3da82UL,
+ 0x2dce6ccd4a7c3b62UL, 0x75e7fc8e9e498603UL,
+ /* 14 */ 0x2f4f13f1fcd0b6ecUL, 0xf1a8ca1f29ff7a45UL,
+ 0xc249c1a72981e29bUL, 0x6ebe0dbb8c83b56aUL,
+ /* 15 */ 0x7114fa8d170bb222UL, 0x65a2dcd5bf93935fUL,
+ 0xbdc41f68b59c979aUL, 0x2f0eef79a2ce9289UL,
+ /* 16 */ 0x42ecbf0c083c37ceUL, 0x2930bc09ec496322UL,
+ 0xf294b0c19cfeac0dUL, 0x3780aa4bedfabb80UL,
+ /* 17 */ 0x56c17d3e7cead929UL, 0xe7cb4beb2e5722c5UL,
+ 0x0ce931732dbfe15aUL, 0x41b883c7621052f8UL,
+ /* 18 */ 0xdbf75ca0c3d25350UL, 0x2936be086eb1e351UL,
+ 0xc936e03cb4a9b212UL, 0x1d45bf82322225aaUL,
+ /* 19 */ 0xe81ab1036a024cc5UL, 0xe212201c304c9a72UL,
+ 0xc5d73fba6832b1fcUL, 0x20ffdb5a4d839581UL,
+ /* 20 */ 0xa283d367be5d0fadUL, 0x6c2b25ca8b164475UL,
+ 0x9d4935467caaf22eUL, 0x5166408eee85ff49UL,
+ /* 21 */ 0x3c67baa2fab4e361UL, 0xb3e433c67ef35cefUL,
+ 0x5259729241159b1cUL, 0x6a621892d5b0ab33UL,
+ /* 22 */ 0x20b74a387555cdcbUL, 0x532aa10e1208923fUL,
+ 0xeaa17b7762281dd1UL, 0x61ab3443f05c44bfUL,
+ /* 23 */ 0x257a6c422324def8UL, 0x131c6c1017e3cf7fUL,
+ 0x23758739f630a257UL, 0x295a407a01a78580UL,
+ /* 24 */ 0xf8c443246d5da8d9UL, 0x19d775450c52fa5dUL,
+ 0x2afcfc92731bf83dUL, 0x7d10c8e81b2b4700UL,
+ /* 25 */ 0xc8e0271f70baa20bUL, 0x993748867ca63957UL,
+ 0x5412efb3cb7ed4bbUL, 0x3196d36173e62975UL,
+ /* 26 */ 0xde5bcad141c7dffcUL, 0x47cc8cd2b395c848UL,
+ 0xa34cd942e11af3cbUL, 0x0256dbf2d04ecec2UL,
+ /* 27 */ 0x875ab7e94b0e667fUL, 0xcad4dd83c0850d10UL,
+ 0x47f12e8f4e72c79fUL, 0x5f1a87bb8c85b19bUL,
+ /* 28 */ 0x7ae9d0b6437f51b8UL, 0x12c7ce5518879065UL,
+ 0x2ade09fe5cf77aeeUL, 0x23a05a2f7d2c5627UL,
+ /* 29 */ 0x5908e128f17c169aUL, 0xf77498dd8ad0852dUL,
+ 0x74b4c4ceab102f64UL, 0x183abadd10139845UL,
+ /* 30 */ 0xb165ba8daa92aaacUL, 0xd5c5ef9599386705UL,
+ 0xbe2f8f0cf8fc40d1UL, 0x2701e635ee204514UL,
+ /* 31 */ 0x629fa80020156514UL, 0xf223868764a8c1ceUL,
+ 0x5b894fff0b3f060eUL, 0x60d9944cf708a3faUL,
+ /* 32 */ 0xaeea001a1c7a201fUL, 0xebf16a633ee2ce63UL,
+ 0x6f7709594c7a07e1UL, 0x79b958150d0208cbUL,
+ /* 33 */ 0x24b55e5301d410e7UL, 0xe3a34edff3fdc84dUL,
+ 0xd88768e4904032d8UL, 0x131384427b3aaeecUL,
+ /* 34 */ 0x8405e51286234f14UL, 0x14dc4739adb4c529UL,
+ 0xb8a2b5b250634ffdUL, 0x2fe2a94ad8a7ff93UL,
+ /* 35 */ 0xec5c57efe843faddUL, 0x2843ce40f0bb9918UL,
+ 0xa4b561d6cf3d6305UL, 0x743629bde8fb777eUL,
+ /* 36 */ 0x343edd46bbaf738fUL, 0xed981828b101a651UL,
+ 0xa401760b882c797aUL, 0x1fc223e28dc88730UL,
+ /* 37 */ 0x48604e91fc0fba0eUL, 0xb637f78f052c6fa4UL,
+ 0x91ccac3d09e9239cUL, 0x23f7eed4437a687cUL,
+ /* 38 */ 0x5173b1118d9bd800UL, 0x29d641b63189d4a7UL,
+ 0xfdbf177988bbc586UL, 0x2959894fcad81df5UL,
+ /* 39 */ 0xaebc8ef3b4bbc899UL, 0x4148995ab26992b9UL,
+ 0x24e20b0134f92cfbUL, 0x40d158894a05dee8UL,
+ /* 40 */ 0x46b00b1185af76f6UL, 0x26bac77873187a79UL,
+ 0x3dc0bf95ab8fff5fUL, 0x2a608bd8945524d7UL,
+ /* 41 */ 0x26449588bd446302UL, 0x7c4bc21c0388439cUL,
+ 0x8e98a4f383bd11b2UL, 0x26218d7bc9d876b9UL,
+ /* 42 */ 0xe3081542997c178aUL, 0x3c2d29a86fb6606fUL,
+ 0x5c217736fa279374UL, 0x7dde05734afeb1faUL,
+ /* 43 */ 0x3bf10e3906d42babUL, 0xe4f7803e1980649cUL,
+ 0xe6053bf89595bf7aUL, 0x394faf38da245530UL,
+ /* 44 */ 0x7a8efb58896928f4UL, 0xfbc778e9cc6a113cUL,
+ 0x72670ce330af596fUL, 0x48f222a81d3d6cf7UL,
+ /* 45 */ 0xf01fce410d72caa7UL, 0x5a20ecc7213b5595UL,
+ 0x7bc21165c1fa1483UL, 0x07f89ae31da8a741UL,
+ /* 46 */ 0x05d2c2b4c6830ff9UL, 0xd43e330fc6316293UL,
+ 0xa5a5590a96d3a904UL, 0x705edb91a65333b6UL,
+ /* 47 */ 0x048ee15e0bb9a5f7UL, 0x3240cfca9e0aaf5dUL,
+ 0x8f4b71ceedc4a40bUL, 0x621c0da3de544a6dUL,
+ /* 48 */ 0x92872836a08c4091UL, 0xce8375b010c91445UL,
+ 0x8a72eb524f276394UL, 0x2667fcfa7ec83635UL,
+ /* 49 */ 0x7f4c173345e8752aUL, 0x061b47feee7079a5UL,
+ 0x25dd9afa9f86ff34UL, 0x3780cef5425dc89cUL,
+ /* 50 */ 0x1a46035a513bb4e9UL, 0x3e1ef379ac575adaUL,
+ 0xc78c5f1c5fa24b50UL, 0x321a967634fd9f22UL,
+ /* 51 */ 0x946707b8826e27faUL, 0x3dca84d64c506fd0UL,
+ 0xc189218075e91436UL, 0x6d9284169b3b8484UL,
+ /* 52 */ 0x3a67e840383f2ddfUL, 0x33eec9a30c4f9b75UL,
+ 0x3ec7c86fa783ef47UL, 0x26ec449fbac9fbc4UL,
+ /* 53 */ 0x5c0f38cba09b9e7dUL, 0x81168cc762a3478cUL,
+ 0x3e23b0d306fc121cUL, 0x5a238aa0a5efdcddUL,
+ /* 54 */ 0x1ba26121c4ea43ffUL, 0x36f8c77f7c8832b5UL,
+ 0x88fbea0b0adcf99aUL, 0x5ca9938ec25bebf9UL,
+ /* 55 */ 0xd5436a5e51fccda0UL, 0x1dbc4797c2cd893bUL,
+ 0x19346a65d3224a08UL, 0x0f5034e49b9af466UL,
+ /* 56 */ 0xf23c3967a1e0b96eUL, 0xe58b08fa867a4d88UL,
+ 0xfb2fabc6a7341679UL, 0x2a75381eb6026946UL,
+ /* 57 */ 0xc80a3be4c19420acUL, 0x66b1f6c681f2b6dcUL,
+ 0x7cf7036761e93388UL, 0x25abbbd8a660a4c4UL,
+ /* 58 */ 0x91ea12ba14fd5198UL, 0x684950fc4a3cffa9UL,
+ 0xf826842130f5ad28UL, 0x3ea988f75301a441UL,
+ /* 59 */ 0xc978109a695f8c6fUL, 0x1746eb4a0530c3f3UL,
+ 0x444d6d77b4459995UL, 0x75952b8c054e5cc7UL,
+ /* 60 */ 0xa3703f7915f4d6aaUL, 0x66c346202f2647d8UL,
+ 0xd01469df811d644bUL, 0x77fea47d81a5d71fUL,
+ /* 61 */ 0xc5e9529ef57ca381UL, 0x6eeeb4b9ce2f881aUL,
+ 0xb6e91a28e8009bd6UL, 0x4b80be3e9afc3fecUL,
+ /* 62 */ 0x7e3773c526aed2c5UL, 0x1b4afcb453c9a49dUL,
+ 0xa920bdd7baffb24dUL, 0x7c54699f122d400eUL,
+ /* 63 */ 0xef46c8e14fa94bc8UL, 0xe0b074ce2952ed5eUL,
+ 0xbea450e1dbd885d5UL, 0x61b68649320f712cUL,
+ /* 64 */ 0x8a485f7309ccbdd1UL, 0xbd06320d7d4d1a2dUL,
+ 0x25232973322dbef4UL, 0x445dc4758c17f770UL,
+ /* 65 */ 0xdb0434177cc8933cUL, 0xed6fe82175ea059fUL,
+ 0x1efebefdc053db34UL, 0x4adbe867c65daf99UL,
+ /* 66 */ 0x3acd71a2a90609dfUL, 0xe5e991856dd04050UL,
+ 0x1ec69b688157c23cUL, 0x697427f6885cfe4dUL,
+ /* 67 */ 0xd7be7b9b65e1a851UL, 0xa03d28d522c536ddUL,
+ 0x28399d658fd2b645UL, 0x49e5b7e17c2641e1UL,
+ /* 68 */ 0x6f8c3a98700457a4UL, 0x5078f0a25ebb6778UL,
+ 0xd13c3ccbc382960fUL, 0x2e003258a7df84b1UL,
+ /* 69 */ 0x8ad1f39be6296a1cUL, 0xc1eeaa652a5fbfb2UL,
+ 0x33ee0673fd26f3cbUL, 0x59256173a69d2cccUL,
+ /* 70 */ 0x41ea07aa4e18fc41UL, 0xd9fc19527c87a51eUL,
+ 0xbdaacb805831ca6fUL, 0x445b652dc916694fUL,
+ /* 71 */ 0xce92a3a7f2172315UL, 0x1edc282de11b9964UL,
+ 0xa1823aafe04c314aUL, 0x790a2d94437cf586UL,
+ /* 72 */ 0x71c447fb93f6e009UL, 0x8922a56722845276UL,
+ 0xbf70903b204f5169UL, 0x2f7a89891ba319feUL,
+ /* 73 */ 0x02a08eb577e2140cUL, 0xed9a4ed4427bdcf4UL,
+ 0x5253ec44e4323cd1UL, 0x3e88363c14e9355bUL,
+ /* 74 */ 0xaa66c14277110b8cUL, 0x1ae0391610a23390UL,
+ 0x2030bd12c93fc2a2UL, 0x3ee141579555c7abUL,
+ /* 75 */ 0x9214de3a6d6e7d41UL, 0x3ccdd88607f17efeUL,
+ 0x674f1288f8e11217UL, 0x5682250f329f93d0UL,
+ /* 76 */ 0x6cf00b136d2e396eUL, 0x6e4cf86f1014debfUL,
+ 0x5930b1b5bfcc4e83UL, 0x047069b48aba16b6UL,
+ /* 77 */ 0x0d4ce4ab69b20793UL, 0xb24db91a97d0fb9eUL,
+ 0xcdfa50f54e00d01dUL, 0x221b1085368bddb5UL,
+ /* 78 */ 0xe7e59468b1e3d8d2UL, 0x53c56563bd122f93UL,
+ 0xeee8a903e0663f09UL, 0x61efa662cbbe3d42UL,
+ /* 79 */ 0x2cf8ddddde6eab2aUL, 0x9bf80ad51435f231UL,
+ 0x5deadacec9f04973UL, 0x29275b5d41d29b27UL,
+ /* 80 */ 0xcfde0f0895ebf14fUL, 0xb9aab96b054905a7UL,
+ 0xcae80dd9a1c420fdUL, 0x0a63bf2f1673bbc7UL,
+ /* 81 */ 0x092f6e11958fbc8cUL, 0x672a81e804822fadUL,
+ 0xcac8351560d52517UL, 0x6f3f7722c8f192f8UL,
+ /* 82 */ 0xf8ba90ccc2e894b7UL, 0x2c7557a438ff9f0dUL,
+ 0x894d1d855ae52359UL, 0x68e122157b743d69UL,
+ /* 83 */ 0xd87e5570cfb919f3UL, 0x3f2cdecd95798db9UL,
+ 0x2121154710c0a2ceUL, 0x3c66a115246dc5b2UL,
+ /* 84 */ 0xcbedc562294ecb72UL, 0xba7143c36a280b16UL,
+ 0x9610c2efd4078b67UL, 0x6144735d946a4b1eUL,
+ /* 85 */ 0x536f111ed75b3350UL, 0x0211db8c2041d81bUL,
+ 0xf93cb1000e10413cUL, 0x149dfd3c039e8876UL,
+ /* 86 */ 0xd479dde46b63155bUL, 0xb66e15e93c837976UL,
+ 0xdafde43b1f13e038UL, 0x5fafda1a2e4b0b35UL,
+ /* 87 */ 0x3600bbdf17197581UL, 0x3972050bbe3cd2c2UL,
+ 0x5938906dbdd5be86UL, 0x34fce5e43f9b860fUL,
+ /* 88 */ 0x75a8a4cd42d14d02UL, 0x828dabc53441df65UL,
+ 0x33dcabedd2e131d3UL, 0x3ebad76fb814d25fUL,
+ /* 89 */ 0xd4906f566f70e10fUL, 0x5d12f7aa51690f5aUL,
+ 0x45adb16e76cefcf2UL, 0x01f768aead232999UL,
+ /* 90 */ 0x2b6cc77b6248febdUL, 0x3cd30628ec3aaffdUL,
+ 0xce1c0b80d4ef486aUL, 0x4c3bff2ea6f66c23UL,
+ /* 91 */ 0x3f2ec4094aeaeb5fUL, 0x61b19b286e372ca7UL,
+ 0x5eefa966de2a701dUL, 0x23b20565de55e3efUL,
+ /* 92 */ 0xe301ca5279d58557UL, 0x07b2d4ce27c2874fUL,
+ 0xa532cd8a9dcf1d67UL, 0x2a52fee23f2bff56UL,
+ /* 93 */ 0x8624efb37cd8663dUL, 0xbbc7ac20ffbd7594UL,
+ 0x57b85e9c82d37445UL, 0x7b3052cb86a6ec66UL,
+ /* 94 */ 0x3482f0ad2525e91eUL, 0x2cb68043d28edca0UL,
+ 0xaf4f6d052e1b003aUL, 0x185f8c2529781b0aUL,
+ /* 95 */ 0xaa41de5bd80ce0d6UL, 0x9407b2416853e9d6UL,
+ 0x563ec36e357f4c3aUL, 0x4cc4b8dd0e297bceUL,
+ /* 96 */ 0xa2fc1a52ffb8730eUL, 0x1811f16e67058e37UL,
+ 0x10f9a366cddf4ee1UL, 0x72f4a0c4a0b9f099UL,
+ /* 97 */ 0x8c16c06f663f4ea7UL, 0x693b3af74e970fbaUL,
+ 0x2102e7f1d69ec345UL, 0x0ba53cbc968a8089UL,
+ /* 98 */ 0xca3d9dc7fea15537UL, 0x4c6824bb51536493UL,
+ 0xb9886314844006b1UL, 0x40d2a72ab454cc60UL,
+ /* 99 */ 0x5936a1b712570975UL, 0x91b9d648debda657UL,
+ 0x3344094bb64330eaUL, 0x006ba10d12ee51d0UL,
+ /* 100 */ 0x19228468f5de5d58UL, 0x0eb12f4c38cc05b0UL,
+ 0xa1039f9dd5601990UL, 0x4502d4ce4fff0e0bUL,
+ /* 101 */ 0xeb2054106837c189UL, 0xd0f6544c6dd3b93cUL,
+ 0x40727064c416d74fUL, 0x6e15c6114b502ef0UL,
+ /* 102 */ 0x4df2a398cfb1a76bUL, 0x11256c7419f2f6b1UL,
+ 0x4a497962066e6043UL, 0x705b3aab41355b44UL,
+ /* 103 */ 0x365ef536d797b1d8UL, 0x00076bd622ddf0dbUL,
+ 0x3bbf33b0e0575a88UL, 0x3777aa05c8e4ca4dUL,
+ /* 104 */ 0x392745c85578db5fUL, 0x6fda4149dbae5ae2UL,
+ 0xb1f0b00b8adc9867UL, 0x09963437d36f1da3UL,
+ /* 105 */ 0x7e824e90a5dc3853UL, 0xccb5f6641f135cbdUL,
+ 0x6736d86c87ce8fccUL, 0x625f3ce26604249fUL,
+ /* 106 */ 0xaf8ac8059502f63fUL, 0x0c05e70a2e351469UL,
+ 0x35292e9c764b6305UL, 0x1a394360c7e23ac3UL,
+ /* 107 */ 0xd5c6d53251183264UL, 0x62065abd43c2b74fUL,
+ 0xb5fbf5d03b973f9bUL, 0x13a3da3661206e5eUL,
+ /* 108 */ 0xc6bd5837725d94e5UL, 0x18e30912205016c5UL,
+ 0x2088ce1570033c68UL, 0x7fba1f495c837987UL,
+ /* 109 */ 0x5a8c7423f2f9079dUL, 0x1735157b34023fc5UL,
+ 0xe4f9b49ad2fab351UL, 0x6691ff72c878e33cUL,
+ /* 110 */ 0x122c2adedc5eff3eUL, 0xf8dd4bf1d8956cf4UL,
+ 0xeb86205d9e9e5bdaUL, 0x049b92b9d975c743UL,
+ /* 111 */ 0xa5379730b0f6c05aUL, 0x72a0ffacc6f3a553UL,
+ 0xb0032c34b20dcd6dUL, 0x470e9dbc88d5164aUL,
+ /* 112 */ 0xb19cf10ca237c047UL, 0xb65466711f6c81a2UL,
+ 0xb3321bd16dd80b43UL, 0x48c14f600c5fbe8eUL,
+ /* 113 */ 0x66451c264aa6c803UL, 0xb66e3904a4fa7da6UL,
+ 0xd45f19b0b3128395UL, 0x31602627c3c9bc10UL,
+ /* 114 */ 0x3120dc4832e4e10dUL, 0xeb20c46756c717f7UL,
+ 0x00f52e3f67280294UL, 0x566d4fc14730c509UL,
+ /* 115 */ 0x7e3a5d40fd837206UL, 0xc1e926dc7159547aUL,
+ 0x216730fba68d6095UL, 0x22e8c3843f69cea7UL,
+ /* 116 */ 0x33d074e8930e4b2bUL, 0xb6e4350e84d15816UL,
+ 0x5534c26ad6ba2365UL, 0x7773c12f89f1f3f3UL,
+ /* 117 */ 0x8cba404da57962aaUL, 0x5b9897a81999ce56UL,
+ 0x508e862f121692fcUL, 0x3a81907fa093c291UL,
+ /* 118 */ 0x0dded0ff4725a510UL, 0x10d8cc10673fc503UL,
+ 0x5b9d151c9f1f4e89UL, 0x32a5c1d5cb09a44cUL,
+ /* 119 */ 0x1e0aa442b90541fbUL, 0x5f85eb7cc1b485dbUL,
+ 0xbee595ce8a9df2e5UL, 0x25e496c722422236UL,
+ /* 120 */ 0x5edf3c46cd0fe5b9UL, 0x34e75a7ed2a43388UL,
+ 0xe488de11d761e352UL, 0x0e878a01a085545cUL,
+ /* 121 */ 0xba493c77e021bb04UL, 0x2b4d1843c7df899aUL,
+ 0x9ea37a487ae80d67UL, 0x67a9958011e41794UL,
+ /* 122 */ 0x4b58051a6697b065UL, 0x47e33f7d8d6ba6d4UL,
+ 0xbb4da8d483ca46c1UL, 0x68becaa181c2db0dUL,
+ /* 123 */ 0x8d8980e90b989aa5UL, 0xf95eb14a2c93c99bUL,
+ 0x51c6c7c4796e73a2UL, 0x6e228363b5efb569UL,
+ /* 124 */ 0xc6bbc0b02dd624c8UL, 0x777eb47dec8170eeUL,
+ 0x3cde15a004cfafa9UL, 0x1dc6bc087160bf9bUL,
+ /* 125 */ 0x2e07e043eec34002UL, 0x18e9fc677a68dc7fUL,
+ 0xd8da03188bd15b9aUL, 0x48fbc3bb00568253UL,
+ /* 126 */ 0x57547d4cfb654ce1UL, 0xd3565b82a058e2adUL,
+ 0xf63eaf0bbf154478UL, 0x47531ef114dfbb18UL,
+ /* 127 */ 0xe1ec630a4278c587UL, 0x5507d546ca8e83f3UL,
+ 0x85e135c63adc0c2bUL, 0x0aa7efa85682844eUL,
+ /* 128 */ 0x72691ba8b3e1f615UL, 0x32b4e9701fbe3ffaUL,
+ 0x97b6d92e39bb7868UL, 0x2cfe53dea02e39e8UL,
+ /* 129 */ 0x687392cd85cd52b0UL, 0x27ff66c910e29831UL,
+ 0x97134556a9832d06UL, 0x269bb0360a84f8a0UL,
+ /* 130 */ 0x706e55457643f85cUL, 0x3734a48c9b597d1bUL,
+ 0x7aee91e8c6efa472UL, 0x5cd6abc198a9d9e0UL,
+ /* 131 */ 0x0e04de06cb3ce41aUL, 0xd8c6eb893402e138UL,
+ 0x904659bb686e3772UL, 0x7215c371746ba8c8UL,
+ /* 132 */ 0xfd12a97eeae4a2d9UL, 0x9514b7516394f2c5UL,
+ 0x266fd5809208f294UL, 0x5c847085619a26b9UL,
+ /* 133 */ 0x52985410fed694eaUL, 0x3c905b934a2ed254UL,
+ 0x10bb47692d3be467UL, 0x063b3d2d69e5e9e1UL,
+ /* 134 */ 0x472726eedda57debUL, 0xefb6c4ae10f41891UL,
+ 0x2b1641917b307614UL, 0x117c554fc4f45b7cUL,
+ /* 135 */ 0xc07cf3118f9d8812UL, 0x01dbd82050017939UL,
+ 0xd7e803f4171b2827UL, 0x1015e87487d225eaUL,
+ /* 136 */ 0xc58de3fed23acc4dUL, 0x50db91c294a7be2dUL,
+ 0x0b94d43d1c9cf457UL, 0x6b1640fa6e37524aUL,
+ /* 137 */ 0x692f346c5fda0d09UL, 0x200b1c59fa4d3151UL,
+ 0xb8c46f760777a296UL, 0x4b38395f3ffdfbcfUL,
+ /* 138 */ 0x18d25e00be54d671UL, 0x60d50582bec8aba6UL,
+ 0x87ad8f263b78b982UL, 0x50fdf64e9cda0432UL,
+ /* 139 */ 0x90f567aac578dcf0UL, 0xef1e9b0ef2a3133bUL,
+ 0x0eebba9242d9de71UL, 0x15473c9bf03101c7UL,
+ /* 140 */ 0x7c77e8ae56b78095UL, 0xb678e7666e6f078eUL,
+ 0x2da0b9615348ba1fUL, 0x7cf931c1ff733f0bUL,
+ /* 141 */ 0x26b357f50a0a366cUL, 0xe9708cf42b87d732UL,
+ 0xc13aeea5f91cb2c0UL, 0x35d90c991143bb4cUL,
+ /* 142 */ 0x47c1c404a9a0d9dcUL, 0x659e58451972d251UL,
+ 0x3875a8c473b38c31UL, 0x1fbd9ed379561f24UL,
+ /* 143 */ 0x11fabc6fd41ec28dUL, 0x7ef8dfe3cd2a2dcaUL,
+ 0x72e73b5d8c404595UL, 0x6135fa4954b72f27UL,
+ /* 144 */ 0xccfc32a2de24b69cUL, 0x3f55698c1f095d88UL,
+ 0xbe3350ed5ac3f929UL, 0x5e9bf806ca477eebUL,
+ /* 145 */ 0xe9ce8fb63c309f68UL, 0x5376f63565e1f9f4UL,
+ 0xd1afcfb35a6393f1UL, 0x6632a1ede5623506UL,
+ /* 146 */ 0x0b7d6c390c2ded4cUL, 0x56cb3281df04cb1fUL,
+ 0x66305a1249ecc3c7UL, 0x5d588b60a38ca72aUL,
+ /* 147 */ 0xa6ecbf78e8e5f42dUL, 0x86eeb44b3c8a3eecUL,
+ 0xec219c48fbd21604UL, 0x1aaf1af517c36731UL,
+ /* 148 */ 0xc306a2836769bde7UL, 0x208280622b1e2adbUL,
+ 0x8027f51ffbff94a6UL, 0x76cfa1ce1124f26bUL,
+ /* 149 */ 0x18eb00562422abb6UL, 0xf377c4d58f8c29c3UL,
+ 0x4dbbc207f531561aUL, 0x0253b7f082128a27UL,
+ /* 150 */ 0x3d1f091cb62c17e0UL, 0x4860e1abd64628a9UL,
+ 0x52d17436309d4253UL, 0x356f97e13efae576UL,
+ /* 151 */ 0xd351e11aa150535bUL, 0x3e6b45bb1dd878ccUL,
+ 0x0c776128bed92c98UL, 0x1d34ae93032885b8UL,
+ /* 152 */ 0x4ba0488ca85ba4c3UL, 0x985348c33c9ce6ceUL,
+ 0x66124c6f97bda770UL, 0x0f81a0290654124aUL,
+ /* 153 */ 0x9ed09ca6569b86fdUL, 0x811009fd18af9a2dUL,
+ 0xff08d03f93d8c20aUL, 0x52a148199faef26bUL,
+ /* 154 */ 0x3e03f9dc2d8d1b73UL, 0x4205801873961a70UL,
+ 0xc0d987f041a35970UL, 0x07aa1f15a1c0d549UL,
+ /* 155 */ 0xdfd46ce08cd27224UL, 0x6d0a024f934e4239UL,
+ 0x808a7a6399897b59UL, 0x0a4556e9e13d95a2UL,
+ /* 156 */ 0xd21a991fe9c13045UL, 0x9b0e8548fe7751b8UL,
+ 0x5da643cb4bf30035UL, 0x77db28d63940f721UL,
+ /* 157 */ 0xfc5eeb614adc9011UL, 0x5229419ae8c411ebUL,
+ 0x9ec3e7787d1dcf74UL, 0x340d053e216e4cb5UL,
+ /* 158 */ 0xcac7af39b48df2b4UL, 0xc0faec2871a10a94UL,
+ 0x140a69245ca575edUL, 0x0cf1c37134273a4cUL,
+ /* 159 */ 0xc8ee306ac224b8a5UL, 0x57eaee7ccb4930b0UL,
+ 0xa1e806bdaacbe74fUL, 0x7d9a62742eeb657dUL,
+ /* 160 */ 0x9eb6b6ef546c4830UL, 0x885cca1fddb36e2eUL,
+ 0xe6b9f383ef0d7105UL, 0x58654fef9d2e0412UL,
+ /* 161 */ 0xa905c4ffbe0e8e26UL, 0x942de5df9b31816eUL,
+ 0x497d723f802e88e1UL, 0x30684dea602f408dUL,
+ /* 162 */ 0x21e5a278a3e6cb34UL, 0xaefb6e6f5b151dc4UL,
+ 0xb30b8e049d77ca15UL, 0x28c3c9cf53b98981UL,
+ /* 163 */ 0x287fb721556cdd2aUL, 0x0d317ca897022274UL,
+ 0x7468c7423a543258UL, 0x4a7f11464eb5642fUL,
+ /* 164 */ 0xa237a4774d193aa6UL, 0xd865986ea92129a1UL,
+ 0x24c515ecf87c1a88UL, 0x604003575f39f5ebUL,
+ /* 165 */ 0x47b9f189570a9b27UL, 0x2b98cede465e4b78UL,
+ 0x026df551dbb85c20UL, 0x74fcd91047e21901UL,
+ /* 166 */ 0x13e2a90a23c1bfa3UL, 0x0cb0074e478519f6UL,
+ 0x5ff1cbbe3af6cf44UL, 0x67fe5438be812dbeUL,
+ /* 167 */ 0xd13cf64fa40f05b0UL, 0x054dfb2f32283787UL,
+ 0x4173915b7f0d2aeaUL, 0x482f144f1f610d4eUL,
+ /* 168 */ 0xf6210201b47f8234UL, 0x5d0ae1929e70b990UL,
+ 0xdcd7f455b049567cUL, 0x7e93d0f1f0916f01UL,
+ /* 169 */ 0xdd79cbf18a7db4faUL, 0xbe8391bf6f74c62fUL,
+ 0x027145d14b8291bdUL, 0x585a73ea2cbf1705UL,
+ /* 170 */ 0x485ca03e928a0db2UL, 0x10fc01a5742857e7UL,
+ 0x2f482edbd6d551a7UL, 0x0f0433b5048fdb8aUL,
+ /* 171 */ 0x60da2e8dd7dc6247UL, 0x88b4c9d38cd4819aUL,
+ 0x13033ac001f66697UL, 0x273b24fe3b367d75UL,
+ /* 172 */ 0xc6e8f66a31b3b9d4UL, 0x281514a494df49d5UL,
+ 0xd1726fdfc8b23da7UL, 0x4b3ae7d103dee548UL,
+ /* 173 */ 0xc6256e19ce4b9d7eUL, 0xff5c5cf186e3c61cUL,
+ 0xacc63ca34b8ec145UL, 0x74621888fee66574UL,
+ /* 174 */ 0x956f409645290a1eUL, 0xef0bf8e3263a962eUL,
+ 0xed6a50eb5ec2647bUL, 0x0694283a9dca7502UL,
+ /* 175 */ 0x769b963643a2dcd1UL, 0x42b7c8ea09fc5353UL,
+ 0x4f002aee13397eabUL, 0x63005e2c19b7d63aUL,
+ /* 176 */ 0xca6736da63023beaUL, 0x966c7f6db12a99b7UL,
+ 0xace09390c537c5e1UL, 0x0b696063a1aa89eeUL,
+ /* 177 */ 0xebb03e97288c56e5UL, 0x432a9f9f938c8be8UL,
+ 0xa6a5a93d5b717f71UL, 0x1a5fb4c3e18f9d97UL,
+ /* 178 */ 0x1c94e7ad1c60cdceUL, 0xee202a43fc02c4a0UL,
+ 0x8dafe4d867c46a20UL, 0x0a10263c8ac27b58UL,
+ /* 179 */ 0xd0dea9dfe4432a4aUL, 0x856af87bbe9277c5UL,
+ 0xce8472acc212c71aUL, 0x6f151b6d9bbb1e91UL,
+ /* 180 */ 0x26776c527ceed56aUL, 0x7d211cb7fbf8faecUL,
+ 0x37ae66a6fd4609ccUL, 0x1f81b702d2770c42UL,
+ /* 181 */ 0x2fb0b057eac58392UL, 0xe1dd89fe29744e9dUL,
+ 0xc964f8eb17beb4f8UL, 0x29571073c9a2d41eUL,
+ /* 182 */ 0xa948a18981c0e254UL, 0x2df6369b65b22830UL,
+ 0xa33eb2d75fcfd3c6UL, 0x078cd6ec4199a01fUL,
+ /* 183 */ 0x4a584a41ad900d2fUL, 0x32142b78e2c74c52UL,
+ 0x68c4e8338431c978UL, 0x7f69ea9008689fc2UL,
+ /* 184 */ 0x52f2c81e46a38265UL, 0xfd78072d04a832fdUL,
+ 0x8cd7d5fa25359e94UL, 0x4de71b7454cc29d2UL,
+ /* 185 */ 0x42eb60ad1eda6ac9UL, 0x0aad37dfdbc09c3aUL,
+ 0x81004b71e33cc191UL, 0x44e6be345122803cUL,
+ /* 186 */ 0x03fe8388ba1920dbUL, 0xf5d57c32150db008UL,
+ 0x49c8c4281af60c29UL, 0x21edb518de701aeeUL,
+ /* 187 */ 0x7fb63e418f06dc99UL, 0xa4460d99c166d7b8UL,
+ 0x24dd5248ce520a83UL, 0x5ec3ad712b928358UL,
+ /* 188 */ 0x15022a5fbd17930fUL, 0xa4f64a77d82570e3UL,
+ 0x12bc8d6915783712UL, 0x498194c0fc620abbUL,
+ /* 189 */ 0x38a2d9d255686c82UL, 0x785c6bd9193e21f0UL,
+ 0xe4d5c81ab24a5484UL, 0x56307860b2e20989UL,
+ /* 190 */ 0x429d55f78b4d74c4UL, 0x22f1834643350131UL,
+ 0x1e60c24598c71fffUL, 0x59f2f014979983efUL,
+ /* 191 */ 0x46a47d56eb494a44UL, 0x3e22a854d636a18eUL,
+ 0xb346e15274491c3bUL, 0x2ceafd4e5390cde7UL,
+ /* 192 */ 0xba8a8538be0d6675UL, 0x4b9074bb50818e23UL,
+ 0xcbdab89085d304c3UL, 0x61a24fe0e56192c4UL,
+ /* 193 */ 0xcb7615e6db525bcbUL, 0xdd7d8c35a567e4caUL,
+ 0xe6b4153acafcdd69UL, 0x2d668e097f3c9766UL,
+ /* 194 */ 0xa57e7e265ce55ef0UL, 0x5d9f4e527cd4b967UL,
+ 0xfbc83606492fd1e5UL, 0x090d52beb7c3f7aeUL,
+ /* 195 */ 0x09b9515a1e7b4d7cUL, 0x1f266a2599da44c0UL,
+ 0xa1c49548e2c55504UL, 0x7ef04287126f15ccUL,
+ /* 196 */ 0xfed1659dbd30ef15UL, 0x8b4ab9eec4e0277bUL,
+ 0x884d6236a5df3291UL, 0x1fd96ea6bf5cf788UL,
+ /* 197 */ 0x42a161981f190d9aUL, 0x61d849507e6052c1UL,
+ 0x9fe113bf285a2cd5UL, 0x7c22d676dbad85d8UL,
+ /* 198 */ 0x82e770ed2bfbd27dUL, 0x4c05b2ece996f5a5UL,
+ 0xcd40a9c2b0900150UL, 0x5895319213d9bf64UL,
+ /* 199 */ 0xe7cc5d703fea2e08UL, 0xb50c491258e2188cUL,
+ 0xcce30baa48205bf0UL, 0x537c659ccfa32d62UL,
+ /* 200 */ 0x37b6623a98cfc088UL, 0xfe9bed1fa4d6aca4UL,
+ 0x04d29b8e56a8d1b0UL, 0x725f71c40b519575UL,
+ /* 201 */ 0x28c7f89cd0339ce6UL, 0x8367b14469ddc18bUL,
+ 0x883ada83a6a1652cUL, 0x585f1974034d6c17UL,
+ /* 202 */ 0x89cfb266f1b19188UL, 0xe63b4863e7c35217UL,
+ 0xd88c9da6b4c0526aUL, 0x3e035c9df0954635UL,
+ /* 203 */ 0xdd9d5412fb45de9dUL, 0xdd684532e4cff40dUL,
+ 0x4b5c999b151d671cUL, 0x2d8c2cc811e7f690UL,
+ /* 204 */ 0x7f54be1d90055d40UL, 0xa464c5df464aaf40UL,
+ 0x33979624f0e917beUL, 0x2c018dc527356b30UL,
+ /* 205 */ 0xa5415024e330b3d4UL, 0x73ff3d96691652d3UL,
+ 0x94ec42c4ef9b59f1UL, 0x0747201618d08e5aUL,
+ /* 206 */ 0x4d6ca48aca411c53UL, 0x66415f2fcfa66119UL,
+ 0x9c4dd40051e227ffUL, 0x59810bc09a02f7ebUL,
+ /* 207 */ 0x2a7eb171b3dc101dUL, 0x441c5ab99ffef68eUL,
+ 0x32025c9b93b359eaUL, 0x5e8ce0a71e9d112fUL,
+ /* 208 */ 0xbfcccb92429503fdUL, 0xd271ba752f095d55UL,
+ 0x345ead5e972d091eUL, 0x18c8df11a83103baUL,
+ /* 209 */ 0x90cd949a9aed0f4cUL, 0xc5d1f4cb6660e37eUL,
+ 0xb8cac52d56c52e0bUL, 0x6e42e400c5808e0dUL,
+ /* 210 */ 0xa3b46966eeaefd23UL, 0x0c4f1f0be39ecdcaUL,
+ 0x189dc8c9d683a51dUL, 0x51f27f054c09351bUL,
+ /* 211 */ 0x4c487ccd2a320682UL, 0x587ea95bb3df1c96UL,
+ 0xc8ccf79e555cb8e8UL, 0x547dc829a206d73dUL,
+ /* 212 */ 0xb822a6cd80c39b06UL, 0xe96d54732000d4c6UL,
+ 0x28535b6f91463b4dUL, 0x228f4660e2486e1dUL,
+ /* 213 */ 0x98799538de8d3abfUL, 0x8cd8330045ebca6eUL,
+ 0x79952a008221e738UL, 0x4322e1a7535cd2bbUL,
+ /* 214 */ 0xb114c11819d1801cUL, 0x2016e4d84f3f5ec7UL,
+ 0xdd0e2df409260f4cUL, 0x5ec362c0ae5f7266UL,
+ /* 215 */ 0xc0462b18b8b2b4eeUL, 0x7cc8d950274d1afbUL,
+ 0xf25f7105436b02d2UL, 0x43bbf8dcbff9ccd3UL,
+ /* 216 */ 0xb6ad1767a039e9dfUL, 0xb0714da8f69d3583UL,
+ 0x5e55fa18b42931f5UL, 0x4ed5558f33c60961UL,
+ /* 217 */ 0x1fe37901c647a5ddUL, 0x593ddf1f8081d357UL,
+ 0x0249a4fd813fd7a6UL, 0x69acca274e9caf61UL,
+ /* 218 */ 0x047ba3ea330721c9UL, 0x83423fc20e7e1ea0UL,
+ 0x1df4c0af01314a60UL, 0x09a62dab89289527UL,
+ /* 219 */ 0xa5b325a49cc6cb00UL, 0xe94b5dc654b56cb6UL,
+ 0x3be28779adc994a0UL, 0x4296e8f8ba3a4aadUL,
+ /* 220 */ 0x328689761e451eabUL, 0x2e4d598bff59594aUL,
+ 0x49b96853d7a7084aUL, 0x4980a319601420a8UL,
+ /* 221 */ 0x9565b9e12f552c42UL, 0x8a5318db7100fe96UL,
+ 0x05c90b4d43add0d7UL, 0x538b4cd66a5d4edaUL,
+ /* 222 */ 0xf4e94fc3e89f039fUL, 0x592c9af26f618045UL,
+ 0x08a36eb5fd4b9550UL, 0x25fffaf6c2ed1419UL,
+ /* 223 */ 0x34434459cc79d354UL, 0xeeecbfb4b1d5476bUL,
+ 0xddeb34a061615d99UL, 0x5129cecceb64b773UL,
+ /* 224 */ 0xee43215894993520UL, 0x772f9c7cf14c0b3bUL,
+ 0xd2e2fce306bedad5UL, 0x715f42b546f06a97UL,
+ /* 225 */ 0x434ecdceda5b5f1aUL, 0x0da17115a49741a9UL,
+ 0x680bd77c73edad2eUL, 0x487c02354edd9041UL,
+ /* 226 */ 0xb8efeff3a70ed9c4UL, 0x56a32aa3e857e302UL,
+ 0xdf3a68bd48a2a5a0UL, 0x07f650b73176c444UL,
+ /* 227 */ 0xe38b9b1626e0ccb1UL, 0x79e053c18b09fb36UL,
+ 0x56d90319c9f94964UL, 0x1ca941e7ac9ff5c4UL,
+ /* 228 */ 0x49c4df29162fa0bbUL, 0x8488cf3282b33305UL,
+ 0x95dfda14cabb437dUL, 0x3391f78264d5ad86UL,
+ /* 229 */ 0x729ae06ae2b5095dUL, 0xd58a58d73259a946UL,
+ 0xe9834262d13921edUL, 0x27fedafaa54bb592UL,
+ /* 230 */ 0xa99dc5b829ad48bbUL, 0x5f025742499ee260UL,
+ 0x802c8ecd5d7513fdUL, 0x78ceb3ef3f6dd938UL,
+ /* 231 */ 0xc342f44f8a135d94UL, 0x7b9edb44828cdda3UL,
+ 0x9436d11a0537cfe7UL, 0x5064b164ec1ab4c8UL,
+ /* 232 */ 0x7020eccfd37eb2fcUL, 0x1f31ea3ed90d25fcUL,
+ 0x1b930d7bdfa1bb34UL, 0x5344467a48113044UL,
+ /* 233 */ 0x70073170f25e6dfbUL, 0xe385dc1a50114cc8UL,
+ 0x2348698ac8fc4f00UL, 0x2a77a55284dd40d8UL,
+ /* 234 */ 0xfe06afe0c98c6ce4UL, 0xc235df96dddfd6e4UL,
+ 0x1428d01e33bf1ed3UL, 0x785768ec9300bdafUL,
+ /* 235 */ 0x9702e57a91deb63bUL, 0x61bdb8bfe5ce8b80UL,
+ 0x645b426f3d1d58acUL, 0x4804a82227a557bcUL,
+ /* 236 */ 0x8e57048ab44d2601UL, 0x68d6501a4b3a6935UL,
+ 0xc39c9ec3f9e1c293UL, 0x4172f257d4de63e2UL,
+ /* 237 */ 0xd368b450330c6401UL, 0x040d3017418f2391UL,
+ 0x2c34bb6090b7d90dUL, 0x16f649228fdfd51fUL,
+ /* 238 */ 0xbea6818e2b928ef5UL, 0xe28ccf91cdc11e72UL,
+ 0x594aaa68e77a36cdUL, 0x313034806c7ffd0fUL,
+ /* 239 */ 0x8a9d27ac2249bd65UL, 0x19a3b464018e9512UL,
+ 0xc26ccff352b37ec7UL, 0x056f68341d797b21UL,
+ /* 240 */ 0x5e79d6757efd2327UL, 0xfabdbcb6553afe15UL,
+ 0xd3e7222c6eaf5a60UL, 0x7046c76d4dae743bUL,
+ /* 241 */ 0x660be872b18d4a55UL, 0x19992518574e1496UL,
+ 0xc103053a302bdcbbUL, 0x3ed8e9800b218e8eUL,
+ /* 242 */ 0x7b0b9239fa75e03eUL, 0xefe9fb684633c083UL,
+ 0x98a35fbe391a7793UL, 0x6065510fe2d0fe34UL,
+ /* 243 */ 0x55cb668548abad0cUL, 0xb4584548da87e527UL,
+ 0x2c43ecea0107c1ddUL, 0x526028809372de35UL,
+ /* 244 */ 0x3415c56af9213b1fUL, 0x5bee1a4d017e98dbUL,
+ 0x13f6b105b5cf709bUL, 0x5ff20e3482b29ab6UL,
+ /* 245 */ 0x0aa29c75cc2e6c90UL, 0xfc7d73ca3a70e206UL,
+ 0x899fc38fc4b5c515UL, 0x250386b124ffc207UL,
+ /* 246 */ 0x54ea28d5ae3d2b56UL, 0x9913149dd6de60ceUL,
+ 0x16694fc58f06d6c1UL, 0x46b23975eb018fc7UL,
+ /* 247 */ 0x470a6a0fb4b7b4e2UL, 0x5d92475a8f7253deUL,
+ 0xabeee5b52fbd3adbUL, 0x7fa20801a0806968UL,
+ /* 248 */ 0x76f3faf19f7714d2UL, 0xb3e840c12f4660c3UL,
+ 0x0fb4cd8df212744eUL, 0x4b065a251d3a2dd2UL,
+ /* 249 */ 0x5cebde383d77cd4aUL, 0x6adf39df882c9cb1UL,
+ 0xa2dd242eb09af759UL, 0x3147c0e50e5f6422UL,
+ /* 250 */ 0x164ca5101d1350dbUL, 0xf8d13479c33fc962UL,
+ 0xe640ce4d13e5da08UL, 0x4bdee0c45061f8baUL,
+ /* 251 */ 0xd7c46dc1a4edb1c9UL, 0x5514d7b6437fd98aUL,
+ 0x58942f6bb2a1c00bUL, 0x2dffb2ab1d70710eUL,
+ /* 252 */ 0xccdfcf2fc18b6d68UL, 0xa8ebcba8b7806167UL,
+ 0x980697f95e2937e3UL, 0x02fbba1cd0126e8cUL
+};
+
+/* c is two 512-bit products: c0[0:7]=a0[0:3]*b0[0:3] and c1[8:15]=a1[4:7]*b1[4:7]
+ * a is two 256-bit integers: a0[0:3] and a1[4:7]
+ * b is two 256-bit integers: b0[0:3] and b1[4:7]
+ */
+static void mul2_256x256_integer_adx(u64 *const c, const u64 *const a,
+ const u64 *const b)
+{
+ asm volatile(
+ "xorl %%r14d, %%r14d ;"
+ "movq (%1), %%rdx; " /* A[0] */
+ "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */
+ "xorl %%r10d, %%r10d ;"
+ "movq %%r8, (%0) ;"
+ "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */
+ "adox %%r10, %%r15 ;"
+ "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */
+ "adox %%r8, %%rax ;"
+ "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */
+ "adox %%r10, %%rbx ;"
+ /******************************************/
+ "adox %%r14, %%rcx ;"
+
+ "movq 8(%1), %%rdx; " /* A[1] */
+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
+ "adox %%r15, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
+ "adox %%r10, %%r9 ;"
+ "adcx %%r9, %%rax ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */
+ "adox %%r8, %%r11 ;"
+ "adcx %%r11, %%rbx ;"
+ "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */
+ "adox %%r10, %%r13 ;"
+ "adcx %%r13, %%rcx ;"
+ /******************************************/
+ "adox %%r14, %%r15 ;"
+ "adcx %%r14, %%r15 ;"
+
+ "movq 16(%1), %%rdx; " /* A[2] */
+ "xorl %%r10d, %%r10d ;"
+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
+ "adox %%rax, %%r8 ;"
+ "movq %%r8, 16(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
+ "adox %%r10, %%r9 ;"
+ "adcx %%r9, %%rbx ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */
+ "adox %%r8, %%r11 ;"
+ "adcx %%r11, %%rcx ;"
+ "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */
+ "adox %%r10, %%r13 ;"
+ "adcx %%r13, %%r15 ;"
+ /******************************************/
+ "adox %%r14, %%rax ;"
+ "adcx %%r14, %%rax ;"
+
+ "movq 24(%1), %%rdx; " /* A[3] */
+ "xorl %%r10d, %%r10d ;"
+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
+ "adox %%rbx, %%r8 ;"
+ "movq %%r8, 24(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
+ "adox %%r10, %%r9 ;"
+ "adcx %%r9, %%rcx ;"
+ "movq %%rcx, 32(%0) ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */
+ "adox %%r8, %%r11 ;"
+ "adcx %%r11, %%r15 ;"
+ "movq %%r15, 40(%0) ;"
+ "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */
+ "adox %%r10, %%r13 ;"
+ "adcx %%r13, %%rax ;"
+ "movq %%rax, 48(%0) ;"
+ /******************************************/
+ "adox %%r14, %%rbx ;"
+ "adcx %%r14, %%rbx ;"
+ "movq %%rbx, 56(%0) ;"
+
+ "movq 32(%1), %%rdx; " /* C[0] */
+ "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */
+ "xorl %%r10d, %%r10d ;"
+ "movq %%r8, 64(%0);"
+ "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */
+ "adox %%r10, %%r15 ;"
+ "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */
+ "adox %%r8, %%rax ;"
+ "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */
+ "adox %%r10, %%rbx ;"
+ /******************************************/
+ "adox %%r14, %%rcx ;"
+
+ "movq 40(%1), %%rdx; " /* C[1] */
+ "xorl %%r10d, %%r10d ;"
+ "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */
+ "adox %%r15, %%r8 ;"
+ "movq %%r8, 72(%0);"
+ "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */
+ "adox %%r10, %%r9 ;"
+ "adcx %%r9, %%rax ;"
+ "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */
+ "adox %%r8, %%r11 ;"
+ "adcx %%r11, %%rbx ;"
+ "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */
+ "adox %%r10, %%r13 ;"
+ "adcx %%r13, %%rcx ;"
+ /******************************************/
+ "adox %%r14, %%r15 ;"
+ "adcx %%r14, %%r15 ;"
+
+ "movq 48(%1), %%rdx; " /* C[2] */
+ "xorl %%r10d, %%r10d ;"
+ "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */
+ "adox %%rax, %%r8 ;"
+ "movq %%r8, 80(%0);"
+ "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */
+ "adox %%r10, %%r9 ;"
+ "adcx %%r9, %%rbx ;"
+ "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */
+ "adox %%r8, %%r11 ;"
+ "adcx %%r11, %%rcx ;"
+ "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */
+ "adox %%r10, %%r13 ;"
+ "adcx %%r13, %%r15 ;"
+ /******************************************/
+ "adox %%r14, %%rax ;"
+ "adcx %%r14, %%rax ;"
+
+ "movq 56(%1), %%rdx; " /* C[3] */
+ "xorl %%r10d, %%r10d ;"
+ "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */
+ "adox %%rbx, %%r8 ;"
+ "movq %%r8, 88(%0);"
+ "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */
+ "adox %%r10, %%r9 ;"
+ "adcx %%r9, %%rcx ;"
+ "movq %%rcx, 96(%0) ;"
+ "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */
+ "adox %%r8, %%r11 ;"
+ "adcx %%r11, %%r15 ;"
+ "movq %%r15, 104(%0) ;"
+ "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */
+ "adox %%r10, %%r13 ;"
+ "adcx %%r13, %%rax ;"
+ "movq %%rax, 112(%0) ;"
+ /******************************************/
+ "adox %%r14, %%rbx ;"
+ "adcx %%r14, %%rbx ;"
+ "movq %%rbx, 120(%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11", "%r13", "%r14", "%r15");
+}
+
+static void mul2_256x256_integer_bmi2(u64 *const c, const u64 *const a,
+ const u64 *const b)
+{
+ asm volatile(
+ "movq (%1), %%rdx; " /* A[0] */
+ "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */
+ "movq %%r8, (%0) ;"
+ "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */
+ "addq %%r10, %%r15 ;"
+ "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */
+ "adcq %%r8, %%rax ;"
+ "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */
+ "adcq %%r10, %%rbx ;"
+ /******************************************/
+ "adcq $0, %%rcx ;"
+
+ "movq 8(%1), %%rdx; " /* A[1] */
+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
+ "addq %%r15, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%r15 ;"
+
+ "addq %%r9, %%rax ;"
+ "adcq %%r11, %%rbx ;"
+ "adcq %%r13, %%rcx ;"
+ "adcq $0, %%r15 ;"
+
+ "movq 16(%1), %%rdx; " /* A[2] */
+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
+ "addq %%rax, %%r8 ;"
+ "movq %%r8, 16(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%rax ;"
+
+ "addq %%r9, %%rbx ;"
+ "adcq %%r11, %%rcx ;"
+ "adcq %%r13, %%r15 ;"
+ "adcq $0, %%rax ;"
+
+ "movq 24(%1), %%rdx; " /* A[3] */
+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
+ "addq %%rbx, %%r8 ;"
+ "movq %%r8, 24(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%rbx ;"
+
+ "addq %%r9, %%rcx ;"
+ "movq %%rcx, 32(%0) ;"
+ "adcq %%r11, %%r15 ;"
+ "movq %%r15, 40(%0) ;"
+ "adcq %%r13, %%rax ;"
+ "movq %%rax, 48(%0) ;"
+ "adcq $0, %%rbx ;"
+ "movq %%rbx, 56(%0) ;"
+
+ "movq 32(%1), %%rdx; " /* C[0] */
+ "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */
+ "movq %%r8, 64(%0) ;"
+ "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */
+ "addq %%r10, %%r15 ;"
+ "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */
+ "adcq %%r8, %%rax ;"
+ "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */
+ "adcq %%r10, %%rbx ;"
+ /******************************************/
+ "adcq $0, %%rcx ;"
+
+ "movq 40(%1), %%rdx; " /* C[1] */
+ "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */
+ "addq %%r15, %%r8 ;"
+ "movq %%r8, 72(%0) ;"
+ "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%r15 ;"
+
+ "addq %%r9, %%rax ;"
+ "adcq %%r11, %%rbx ;"
+ "adcq %%r13, %%rcx ;"
+ "adcq $0, %%r15 ;"
+
+ "movq 48(%1), %%rdx; " /* C[2] */
+ "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */
+ "addq %%rax, %%r8 ;"
+ "movq %%r8, 80(%0) ;"
+ "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%rax ;"
+
+ "addq %%r9, %%rbx ;"
+ "adcq %%r11, %%rcx ;"
+ "adcq %%r13, %%r15 ;"
+ "adcq $0, %%rax ;"
+
+ "movq 56(%1), %%rdx; " /* C[3] */
+ "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */
+ "addq %%rbx, %%r8 ;"
+ "movq %%r8, 88(%0) ;"
+ "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%rbx ;"
+
+ "addq %%r9, %%rcx ;"
+ "movq %%rcx, 96(%0) ;"
+ "adcq %%r11, %%r15 ;"
+ "movq %%r15, 104(%0) ;"
+ "adcq %%r13, %%rax ;"
+ "movq %%rax, 112(%0) ;"
+ "adcq $0, %%rbx ;"
+ "movq %%rbx, 120(%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11", "%r13", "%r15");
+}
+
+static void sqr2_256x256_integer_adx(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movq (%1), %%rdx ;" /* A[0] */
+ "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */
+ "xorl %%r15d, %%r15d;"
+ "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */
+ "adcx %%r14, %%r9 ;"
+ "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */
+ "adcx %%rax, %%r10 ;"
+ "movq 24(%1), %%rdx ;" /* A[3] */
+ "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */
+ "adcx %%rcx, %%r11 ;"
+ "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */
+ "adcx %%rax, %%rbx ;"
+ "movq 8(%1), %%rdx ;" /* A[1] */
+ "adcx %%r15, %%r13 ;"
+ "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */
+ "movq $0, %%r14 ;"
+ /******************************************/
+ "adcx %%r15, %%r14 ;"
+
+ "xorl %%r15d, %%r15d;"
+ "adox %%rax, %%r10 ;"
+ "adcx %%r8, %%r8 ;"
+ "adox %%rcx, %%r11 ;"
+ "adcx %%r9, %%r9 ;"
+ "adox %%r15, %%rbx ;"
+ "adcx %%r10, %%r10 ;"
+ "adox %%r15, %%r13 ;"
+ "adcx %%r11, %%r11 ;"
+ "adox %%r15, %%r14 ;"
+ "adcx %%rbx, %%rbx ;"
+ "adcx %%r13, %%r13 ;"
+ "adcx %%r14, %%r14 ;"
+
+ "movq (%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */
+ /*******************/
+ "movq %%rax, 0(%0) ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "movq 8(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */
+ "adcq %%rax, %%r9 ;"
+ "movq %%r9, 16(%0) ;"
+ "adcq %%rcx, %%r10 ;"
+ "movq %%r10, 24(%0) ;"
+ "movq 16(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */
+ "adcq %%rax, %%r11 ;"
+ "movq %%r11, 32(%0) ;"
+ "adcq %%rcx, %%rbx ;"
+ "movq %%rbx, 40(%0) ;"
+ "movq 24(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */
+ "adcq %%rax, %%r13 ;"
+ "movq %%r13, 48(%0) ;"
+ "adcq %%rcx, %%r14 ;"
+ "movq %%r14, 56(%0) ;"
+
+
+ "movq 32(%1), %%rdx ;" /* B[0] */
+ "mulx 40(%1), %%r8, %%r14 ;" /* B[1]*B[0] */
+ "xorl %%r15d, %%r15d;"
+ "mulx 48(%1), %%r9, %%r10 ;" /* B[2]*B[0] */
+ "adcx %%r14, %%r9 ;"
+ "mulx 56(%1), %%rax, %%rcx ;" /* B[3]*B[0] */
+ "adcx %%rax, %%r10 ;"
+ "movq 56(%1), %%rdx ;" /* B[3] */
+ "mulx 40(%1), %%r11, %%rbx ;" /* B[1]*B[3] */
+ "adcx %%rcx, %%r11 ;"
+ "mulx 48(%1), %%rax, %%r13 ;" /* B[2]*B[3] */
+ "adcx %%rax, %%rbx ;"
+ "movq 40(%1), %%rdx ;" /* B[1] */
+ "adcx %%r15, %%r13 ;"
+ "mulx 48(%1), %%rax, %%rcx ;" /* B[2]*B[1] */
+ "movq $0, %%r14 ;"
+ /******************************************/
+ "adcx %%r15, %%r14 ;"
+
+ "xorl %%r15d, %%r15d;"
+ "adox %%rax, %%r10 ;"
+ "adcx %%r8, %%r8 ;"
+ "adox %%rcx, %%r11 ;"
+ "adcx %%r9, %%r9 ;"
+ "adox %%r15, %%rbx ;"
+ "adcx %%r10, %%r10 ;"
+ "adox %%r15, %%r13 ;"
+ "adcx %%r11, %%r11 ;"
+ "adox %%r15, %%r14 ;"
+ "adcx %%rbx, %%rbx ;"
+ "adcx %%r13, %%r13 ;"
+ "adcx %%r14, %%r14 ;"
+
+ "movq 32(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* B[0]^2 */
+ /*******************/
+ "movq %%rax, 64(%0) ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 72(%0) ;"
+ "movq 40(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* B[1]^2 */
+ "adcq %%rax, %%r9 ;"
+ "movq %%r9, 80(%0) ;"
+ "adcq %%rcx, %%r10 ;"
+ "movq %%r10, 88(%0) ;"
+ "movq 48(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* B[2]^2 */
+ "adcq %%rax, %%r11 ;"
+ "movq %%r11, 96(%0) ;"
+ "adcq %%rcx, %%rbx ;"
+ "movq %%rbx, 104(%0) ;"
+ "movq 56(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* B[3]^2 */
+ "adcq %%rax, %%r13 ;"
+ "movq %%r13, 112(%0) ;"
+ "adcq %%rcx, %%r14 ;"
+ "movq %%r14, 120(%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11", "%r13", "%r14", "%r15");
+}
+
+static void sqr2_256x256_integer_bmi2(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movq 8(%1), %%rdx ;" /* A[1] */
+ "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */
+ "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */
+ "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */
+
+ "movq 16(%1), %%rdx ;" /* A[2] */
+ "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */
+ "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */
+
+ "addq %%rax, %%r9 ;"
+ "adcq %%rdx, %%r10 ;"
+ "adcq %%rcx, %%r11 ;"
+ "adcq %%r14, %%r15 ;"
+ "adcq $0, %%r13 ;"
+ "movq $0, %%r14 ;"
+ "adcq $0, %%r14 ;"
+
+ "movq (%1), %%rdx ;" /* A[0] */
+ "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */
+
+ "addq %%rax, %%r10 ;"
+ "adcq %%rcx, %%r11 ;"
+ "adcq $0, %%r15 ;"
+ "adcq $0, %%r13 ;"
+ "adcq $0, %%r14 ;"
+
+ "shldq $1, %%r13, %%r14 ;"
+ "shldq $1, %%r15, %%r13 ;"
+ "shldq $1, %%r11, %%r15 ;"
+ "shldq $1, %%r10, %%r11 ;"
+ "shldq $1, %%r9, %%r10 ;"
+ "shldq $1, %%r8, %%r9 ;"
+ "shlq $1, %%r8 ;"
+
+ /*******************/
+ "mulx %%rdx, %%rax, %%rcx ; " /* A[0]^2 */
+ /*******************/
+ "movq %%rax, 0(%0) ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "movq 8(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ; " /* A[1]^2 */
+ "adcq %%rax, %%r9 ;"
+ "movq %%r9, 16(%0) ;"
+ "adcq %%rcx, %%r10 ;"
+ "movq %%r10, 24(%0) ;"
+ "movq 16(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ; " /* A[2]^2 */
+ "adcq %%rax, %%r11 ;"
+ "movq %%r11, 32(%0) ;"
+ "adcq %%rcx, %%r15 ;"
+ "movq %%r15, 40(%0) ;"
+ "movq 24(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ; " /* A[3]^2 */
+ "adcq %%rax, %%r13 ;"
+ "movq %%r13, 48(%0) ;"
+ "adcq %%rcx, %%r14 ;"
+ "movq %%r14, 56(%0) ;"
+
+ "movq 40(%1), %%rdx ;" /* B[1] */
+ "mulx 32(%1), %%r8, %%r9 ;" /* B[0]*B[1] */
+ "mulx 48(%1), %%r10, %%r11 ;" /* B[2]*B[1] */
+ "mulx 56(%1), %%rcx, %%r14 ;" /* B[3]*B[1] */
+
+ "movq 48(%1), %%rdx ;" /* B[2] */
+ "mulx 56(%1), %%r15, %%r13 ;" /* B[3]*B[2] */
+ "mulx 32(%1), %%rax, %%rdx ;" /* B[0]*B[2] */
+
+ "addq %%rax, %%r9 ;"
+ "adcq %%rdx, %%r10 ;"
+ "adcq %%rcx, %%r11 ;"
+ "adcq %%r14, %%r15 ;"
+ "adcq $0, %%r13 ;"
+ "movq $0, %%r14 ;"
+ "adcq $0, %%r14 ;"
+
+ "movq 32(%1), %%rdx ;" /* B[0] */
+ "mulx 56(%1), %%rax, %%rcx ;" /* B[0]*B[3] */
+
+ "addq %%rax, %%r10 ;"
+ "adcq %%rcx, %%r11 ;"
+ "adcq $0, %%r15 ;"
+ "adcq $0, %%r13 ;"
+ "adcq $0, %%r14 ;"
+
+ "shldq $1, %%r13, %%r14 ;"
+ "shldq $1, %%r15, %%r13 ;"
+ "shldq $1, %%r11, %%r15 ;"
+ "shldq $1, %%r10, %%r11 ;"
+ "shldq $1, %%r9, %%r10 ;"
+ "shldq $1, %%r8, %%r9 ;"
+ "shlq $1, %%r8 ;"
+
+ /*******************/
+ "mulx %%rdx, %%rax, %%rcx ; " /* B[0]^2 */
+ /*******************/
+ "movq %%rax, 64(%0) ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 72(%0) ;"
+ "movq 40(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ; " /* B[1]^2 */
+ "adcq %%rax, %%r9 ;"
+ "movq %%r9, 80(%0) ;"
+ "adcq %%rcx, %%r10 ;"
+ "movq %%r10, 88(%0) ;"
+ "movq 48(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ; " /* B[2]^2 */
+ "adcq %%rax, %%r11 ;"
+ "movq %%r11, 96(%0) ;"
+ "adcq %%rcx, %%r15 ;"
+ "movq %%r15, 104(%0) ;"
+ "movq 56(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ; " /* B[3]^2 */
+ "adcq %%rax, %%r13 ;"
+ "movq %%r13, 112(%0) ;"
+ "adcq %%rcx, %%r14 ;"
+ "movq %%r14, 120(%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
+ "%r11", "%r13", "%r14", "%r15");
+}
+
+static void red_eltfp25519_2w_adx(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movl $38, %%edx; " /* 2*c = 38 = 2^256 */
+ "mulx 32(%1), %%r8, %%r10; " /* c*C[4] */
+ "xorl %%ebx, %%ebx ;"
+ "adox (%1), %%r8 ;"
+ "mulx 40(%1), %%r9, %%r11; " /* c*C[5] */
+ "adcx %%r10, %%r9 ;"
+ "adox 8(%1), %%r9 ;"
+ "mulx 48(%1), %%r10, %%rax; " /* c*C[6] */
+ "adcx %%r11, %%r10 ;"
+ "adox 16(%1), %%r10 ;"
+ "mulx 56(%1), %%r11, %%rcx; " /* c*C[7] */
+ "adcx %%rax, %%r11 ;"
+ "adox 24(%1), %%r11 ;"
+ /***************************************/
+ "adcx %%rbx, %%rcx ;"
+ "adox %%rbx, %%rcx ;"
+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */
+ "adcx %%rcx, %%r8 ;"
+ "adcx %%rbx, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcx %%rbx, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcx %%rbx, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+
+ "mulx 96(%1), %%r8, %%r10; " /* c*C[4] */
+ "xorl %%ebx, %%ebx ;"
+ "adox 64(%1), %%r8 ;"
+ "mulx 104(%1), %%r9, %%r11; " /* c*C[5] */
+ "adcx %%r10, %%r9 ;"
+ "adox 72(%1), %%r9 ;"
+ "mulx 112(%1), %%r10, %%rax; " /* c*C[6] */
+ "adcx %%r11, %%r10 ;"
+ "adox 80(%1), %%r10 ;"
+ "mulx 120(%1), %%r11, %%rcx; " /* c*C[7] */
+ "adcx %%rax, %%r11 ;"
+ "adox 88(%1), %%r11 ;"
+ /****************************************/
+ "adcx %%rbx, %%rcx ;"
+ "adox %%rbx, %%rcx ;"
+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */
+ "adcx %%rcx, %%r8 ;"
+ "adcx %%rbx, %%r9 ;"
+ "movq %%r9, 40(%0) ;"
+ "adcx %%rbx, %%r10 ;"
+ "movq %%r10, 48(%0) ;"
+ "adcx %%rbx, %%r11 ;"
+ "movq %%r11, 56(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 32(%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11");
+}
+
+static void red_eltfp25519_2w_bmi2(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movl $38, %%edx ; " /* 2*c = 38 = 2^256 */
+ "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */
+ "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */
+ "addq %%r10, %%r9 ;"
+ "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */
+ "adcq %%r11, %%r10 ;"
+ "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */
+ "adcq %%rax, %%r11 ;"
+ /***************************************/
+ "adcq $0, %%rcx ;"
+ "addq (%1), %%r8 ;"
+ "adcq 8(%1), %%r9 ;"
+ "adcq 16(%1), %%r10 ;"
+ "adcq 24(%1), %%r11 ;"
+ "adcq $0, %%rcx ;"
+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */
+ "addq %%rcx, %%r8 ;"
+ "adcq $0, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcq $0, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcq $0, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+
+ "mulx 96(%1), %%r8, %%r10 ;" /* c*C[4] */
+ "mulx 104(%1), %%r9, %%r11 ;" /* c*C[5] */
+ "addq %%r10, %%r9 ;"
+ "mulx 112(%1), %%r10, %%rax ;" /* c*C[6] */
+ "adcq %%r11, %%r10 ;"
+ "mulx 120(%1), %%r11, %%rcx ;" /* c*C[7] */
+ "adcq %%rax, %%r11 ;"
+ /****************************************/
+ "adcq $0, %%rcx ;"
+ "addq 64(%1), %%r8 ;"
+ "adcq 72(%1), %%r9 ;"
+ "adcq 80(%1), %%r10 ;"
+ "adcq 88(%1), %%r11 ;"
+ "adcq $0, %%rcx ;"
+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */
+ "addq %%rcx, %%r8 ;"
+ "adcq $0, %%r9 ;"
+ "movq %%r9, 40(%0) ;"
+ "adcq $0, %%r10 ;"
+ "movq %%r10, 48(%0) ;"
+ "adcq $0, %%r11 ;"
+ "movq %%r11, 56(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 32(%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
+ "%r11");
+}
+
+static void mul_256x256_integer_adx(u64 *const c, const u64 *const a,
+ const u64 *const b)
+{
+ asm volatile(
+ "movq (%1), %%rdx; " /* A[0] */
+ "mulx (%2), %%r8, %%r9; " /* A[0]*B[0] */
+ "xorl %%r10d, %%r10d ;"
+ "movq %%r8, (%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[0]*B[1] */
+ "adox %%r9, %%r10 ;"
+ "movq %%r10, 8(%0) ;"
+ "mulx 16(%2), %%r15, %%r13; " /* A[0]*B[2] */
+ "adox %%r11, %%r15 ;"
+ "mulx 24(%2), %%r14, %%rdx; " /* A[0]*B[3] */
+ "adox %%r13, %%r14 ;"
+ "movq $0, %%rax ;"
+ /******************************************/
+ "adox %%rdx, %%rax ;"
+
+ "movq 8(%1), %%rdx; " /* A[1] */
+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
+ "xorl %%r10d, %%r10d ;"
+ "adcx 8(%0), %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
+ "adox %%r9, %%r10 ;"
+ "adcx %%r15, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "mulx 16(%2), %%r15, %%r13; " /* A[1]*B[2] */
+ "adox %%r11, %%r15 ;"
+ "adcx %%r14, %%r15 ;"
+ "movq $0, %%r8 ;"
+ "mulx 24(%2), %%r14, %%rdx; " /* A[1]*B[3] */
+ "adox %%r13, %%r14 ;"
+ "adcx %%rax, %%r14 ;"
+ "movq $0, %%rax ;"
+ /******************************************/
+ "adox %%rdx, %%rax ;"
+ "adcx %%r8, %%rax ;"
+
+ "movq 16(%1), %%rdx; " /* A[2] */
+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
+ "xorl %%r10d, %%r10d ;"
+ "adcx 16(%0), %%r8 ;"
+ "movq %%r8, 16(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
+ "adox %%r9, %%r10 ;"
+ "adcx %%r15, %%r10 ;"
+ "movq %%r10, 24(%0) ;"
+ "mulx 16(%2), %%r15, %%r13; " /* A[2]*B[2] */
+ "adox %%r11, %%r15 ;"
+ "adcx %%r14, %%r15 ;"
+ "movq $0, %%r8 ;"
+ "mulx 24(%2), %%r14, %%rdx; " /* A[2]*B[3] */
+ "adox %%r13, %%r14 ;"
+ "adcx %%rax, %%r14 ;"
+ "movq $0, %%rax ;"
+ /******************************************/
+ "adox %%rdx, %%rax ;"
+ "adcx %%r8, %%rax ;"
+
+ "movq 24(%1), %%rdx; " /* A[3] */
+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
+ "xorl %%r10d, %%r10d ;"
+ "adcx 24(%0), %%r8 ;"
+ "movq %%r8, 24(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
+ "adox %%r9, %%r10 ;"
+ "adcx %%r15, %%r10 ;"
+ "movq %%r10, 32(%0) ;"
+ "mulx 16(%2), %%r15, %%r13; " /* A[3]*B[2] */
+ "adox %%r11, %%r15 ;"
+ "adcx %%r14, %%r15 ;"
+ "movq %%r15, 40(%0) ;"
+ "movq $0, %%r8 ;"
+ "mulx 24(%2), %%r14, %%rdx; " /* A[3]*B[3] */
+ "adox %%r13, %%r14 ;"
+ "adcx %%rax, %%r14 ;"
+ "movq %%r14, 48(%0) ;"
+ "movq $0, %%rax ;"
+ /******************************************/
+ "adox %%rdx, %%rax ;"
+ "adcx %%r8, %%rax ;"
+ "movq %%rax, 56(%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11",
+ "%r13", "%r14", "%r15");
+}
+
+static void mul_256x256_integer_bmi2(u64 *const c, const u64 *const a,
+ const u64 *const b)
+{
+ asm volatile(
+ "movq (%1), %%rdx; " /* A[0] */
+ "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */
+ "movq %%r8, (%0) ;"
+ "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */
+ "addq %%r10, %%r15 ;"
+ "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */
+ "adcq %%r8, %%rax ;"
+ "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */
+ "adcq %%r10, %%rbx ;"
+ /******************************************/
+ "adcq $0, %%rcx ;"
+
+ "movq 8(%1), %%rdx; " /* A[1] */
+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
+ "addq %%r15, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%r15 ;"
+
+ "addq %%r9, %%rax ;"
+ "adcq %%r11, %%rbx ;"
+ "adcq %%r13, %%rcx ;"
+ "adcq $0, %%r15 ;"
+
+ "movq 16(%1), %%rdx; " /* A[2] */
+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
+ "addq %%rax, %%r8 ;"
+ "movq %%r8, 16(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%rax ;"
+
+ "addq %%r9, %%rbx ;"
+ "adcq %%r11, %%rcx ;"
+ "adcq %%r13, %%r15 ;"
+ "adcq $0, %%rax ;"
+
+ "movq 24(%1), %%rdx; " /* A[3] */
+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
+ "addq %%rbx, %%r8 ;"
+ "movq %%r8, 24(%0) ;"
+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
+ "adcq %%r10, %%r9 ;"
+ "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */
+ "adcq %%r8, %%r11 ;"
+ "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */
+ "adcq %%r10, %%r13 ;"
+ /******************************************/
+ "adcq $0, %%rbx ;"
+
+ "addq %%r9, %%rcx ;"
+ "movq %%rcx, 32(%0) ;"
+ "adcq %%r11, %%r15 ;"
+ "movq %%r15, 40(%0) ;"
+ "adcq %%r13, %%rax ;"
+ "movq %%rax, 48(%0) ;"
+ "adcq $0, %%rbx ;"
+ "movq %%rbx, 56(%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11", "%r13", "%r15");
+}
+
+static void sqr_256x256_integer_adx(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movq (%1), %%rdx ;" /* A[0] */
+ "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */
+ "xorl %%r15d, %%r15d;"
+ "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */
+ "adcx %%r14, %%r9 ;"
+ "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */
+ "adcx %%rax, %%r10 ;"
+ "movq 24(%1), %%rdx ;" /* A[3] */
+ "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */
+ "adcx %%rcx, %%r11 ;"
+ "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */
+ "adcx %%rax, %%rbx ;"
+ "movq 8(%1), %%rdx ;" /* A[1] */
+ "adcx %%r15, %%r13 ;"
+ "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */
+ "movq $0, %%r14 ;"
+ /******************************************/
+ "adcx %%r15, %%r14 ;"
+
+ "xorl %%r15d, %%r15d;"
+ "adox %%rax, %%r10 ;"
+ "adcx %%r8, %%r8 ;"
+ "adox %%rcx, %%r11 ;"
+ "adcx %%r9, %%r9 ;"
+ "adox %%r15, %%rbx ;"
+ "adcx %%r10, %%r10 ;"
+ "adox %%r15, %%r13 ;"
+ "adcx %%r11, %%r11 ;"
+ "adox %%r15, %%r14 ;"
+ "adcx %%rbx, %%rbx ;"
+ "adcx %%r13, %%r13 ;"
+ "adcx %%r14, %%r14 ;"
+
+ "movq (%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */
+ /*******************/
+ "movq %%rax, 0(%0) ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "movq 8(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */
+ "adcq %%rax, %%r9 ;"
+ "movq %%r9, 16(%0) ;"
+ "adcq %%rcx, %%r10 ;"
+ "movq %%r10, 24(%0) ;"
+ "movq 16(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */
+ "adcq %%rax, %%r11 ;"
+ "movq %%r11, 32(%0) ;"
+ "adcq %%rcx, %%rbx ;"
+ "movq %%rbx, 40(%0) ;"
+ "movq 24(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */
+ "adcq %%rax, %%r13 ;"
+ "movq %%r13, 48(%0) ;"
+ "adcq %%rcx, %%r14 ;"
+ "movq %%r14, 56(%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11", "%r13", "%r14", "%r15");
+}
+
+static void sqr_256x256_integer_bmi2(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movq 8(%1), %%rdx ;" /* A[1] */
+ "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */
+ "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */
+ "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */
+
+ "movq 16(%1), %%rdx ;" /* A[2] */
+ "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */
+ "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */
+
+ "addq %%rax, %%r9 ;"
+ "adcq %%rdx, %%r10 ;"
+ "adcq %%rcx, %%r11 ;"
+ "adcq %%r14, %%r15 ;"
+ "adcq $0, %%r13 ;"
+ "movq $0, %%r14 ;"
+ "adcq $0, %%r14 ;"
+
+ "movq (%1), %%rdx ;" /* A[0] */
+ "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */
+
+ "addq %%rax, %%r10 ;"
+ "adcq %%rcx, %%r11 ;"
+ "adcq $0, %%r15 ;"
+ "adcq $0, %%r13 ;"
+ "adcq $0, %%r14 ;"
+
+ "shldq $1, %%r13, %%r14 ;"
+ "shldq $1, %%r15, %%r13 ;"
+ "shldq $1, %%r11, %%r15 ;"
+ "shldq $1, %%r10, %%r11 ;"
+ "shldq $1, %%r9, %%r10 ;"
+ "shldq $1, %%r8, %%r9 ;"
+ "shlq $1, %%r8 ;"
+
+ /*******************/
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */
+ /*******************/
+ "movq %%rax, 0(%0) ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, 8(%0) ;"
+ "movq 8(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */
+ "adcq %%rax, %%r9 ;"
+ "movq %%r9, 16(%0) ;"
+ "adcq %%rcx, %%r10 ;"
+ "movq %%r10, 24(%0) ;"
+ "movq 16(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */
+ "adcq %%rax, %%r11 ;"
+ "movq %%r11, 32(%0) ;"
+ "adcq %%rcx, %%r15 ;"
+ "movq %%r15, 40(%0) ;"
+ "movq 24(%1), %%rdx ;"
+ "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */
+ "adcq %%rax, %%r13 ;"
+ "movq %%r13, 48(%0) ;"
+ "adcq %%rcx, %%r14 ;"
+ "movq %%r14, 56(%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
+ "%r11", "%r13", "%r14", "%r15");
+}
+
+static void red_eltfp25519_1w_adx(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */
+ "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */
+ "xorl %%ebx, %%ebx ;"
+ "adox (%1), %%r8 ;"
+ "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */
+ "adcx %%r10, %%r9 ;"
+ "adox 8(%1), %%r9 ;"
+ "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */
+ "adcx %%r11, %%r10 ;"
+ "adox 16(%1), %%r10 ;"
+ "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */
+ "adcx %%rax, %%r11 ;"
+ "adox 24(%1), %%r11 ;"
+ /***************************************/
+ "adcx %%rbx, %%rcx ;"
+ "adox %%rbx, %%rcx ;"
+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */
+ "adcx %%rcx, %%r8 ;"
+ "adcx %%rbx, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcx %%rbx, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcx %%rbx, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
+ "%r10", "%r11");
+}
+
+static void red_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a)
+{
+ asm volatile(
+ "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */
+ "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */
+ "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */
+ "addq %%r10, %%r9 ;"
+ "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */
+ "adcq %%r11, %%r10 ;"
+ "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */
+ "adcq %%rax, %%r11 ;"
+ /***************************************/
+ "adcq $0, %%rcx ;"
+ "addq (%1), %%r8 ;"
+ "adcq 8(%1), %%r9 ;"
+ "adcq 16(%1), %%r10 ;"
+ "adcq 24(%1), %%r11 ;"
+ "adcq $0, %%rcx ;"
+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */
+ "addq %%rcx, %%r8 ;"
+ "adcq $0, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcq $0, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcq $0, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+ :
+ : "r"(c), "r"(a)
+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
+ "%r11");
+}
+
+static __always_inline void
+add_eltfp25519_1w_adx(u64 *const c, const u64 *const a, const u64 *const b)
+{
+ asm volatile(
+ "mov $38, %%eax ;"
+ "xorl %%ecx, %%ecx ;"
+ "movq (%2), %%r8 ;"
+ "adcx (%1), %%r8 ;"
+ "movq 8(%2), %%r9 ;"
+ "adcx 8(%1), %%r9 ;"
+ "movq 16(%2), %%r10 ;"
+ "adcx 16(%1), %%r10 ;"
+ "movq 24(%2), %%r11 ;"
+ "adcx 24(%1), %%r11 ;"
+ "cmovc %%eax, %%ecx ;"
+ "xorl %%eax, %%eax ;"
+ "adcx %%rcx, %%r8 ;"
+ "adcx %%rax, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcx %%rax, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcx %%rax, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $38, %%ecx ;"
+ "cmovc %%ecx, %%eax ;"
+ "addq %%rax, %%r8 ;"
+ "movq %%r8, (%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11");
+}
+
+static __always_inline void
+add_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a, const u64 *const b)
+{
+ asm volatile(
+ "mov $38, %%eax ;"
+ "movq (%2), %%r8 ;"
+ "addq (%1), %%r8 ;"
+ "movq 8(%2), %%r9 ;"
+ "adcq 8(%1), %%r9 ;"
+ "movq 16(%2), %%r10 ;"
+ "adcq 16(%1), %%r10 ;"
+ "movq 24(%2), %%r11 ;"
+ "adcq 24(%1), %%r11 ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%eax, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "adcq $0, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcq $0, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcq $0, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%eax, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11");
+}
+
+static __always_inline void
+sub_eltfp25519_1w(u64 *const c, const u64 *const a, const u64 *const b)
+{
+ asm volatile(
+ "mov $38, %%eax ;"
+ "movq (%1), %%r8 ;"
+ "subq (%2), %%r8 ;"
+ "movq 8(%1), %%r9 ;"
+ "sbbq 8(%2), %%r9 ;"
+ "movq 16(%1), %%r10 ;"
+ "sbbq 16(%2), %%r10 ;"
+ "movq 24(%1), %%r11 ;"
+ "sbbq 24(%2), %%r11 ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%eax, %%ecx ;"
+ "subq %%rcx, %%r8 ;"
+ "sbbq $0, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "sbbq $0, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "sbbq $0, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%eax, %%ecx ;"
+ "subq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(b)
+ : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11");
+}
+
+/* Multiplication by a24 = (A+2)/4 = (486662+2)/4 = 121666 */
+static __always_inline void
+mul_a24_eltfp25519_1w(u64 *const c, const u64 *const a)
+{
+ const u64 a24 = 121666;
+ asm volatile(
+ "movq %2, %%rdx ;"
+ "mulx (%1), %%r8, %%r10 ;"
+ "mulx 8(%1), %%r9, %%r11 ;"
+ "addq %%r10, %%r9 ;"
+ "mulx 16(%1), %%r10, %%rax ;"
+ "adcq %%r11, %%r10 ;"
+ "mulx 24(%1), %%r11, %%rcx ;"
+ "adcq %%rax, %%r11 ;"
+ /**************************/
+ "adcq $0, %%rcx ;"
+ "movl $38, %%edx ;" /* 2*c = 38 = 2^256 mod 2^255-19*/
+ "imul %%rdx, %%rcx ;"
+ "addq %%rcx, %%r8 ;"
+ "adcq $0, %%r9 ;"
+ "movq %%r9, 8(%0) ;"
+ "adcq $0, %%r10 ;"
+ "movq %%r10, 16(%0) ;"
+ "adcq $0, %%r11 ;"
+ "movq %%r11, 24(%0) ;"
+ "mov $0, %%ecx ;"
+ "cmovc %%edx, %%ecx ;"
+ "addq %%rcx, %%r8 ;"
+ "movq %%r8, (%0) ;"
+ :
+ : "r"(c), "r"(a), "r"(a24)
+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
+ "%r11");
+}
+
+static void inv_eltfp25519_1w_adx(u64 *const c, const u64 *const a)
+{
+ struct {
+ eltfp25519_1w_buffer buffer;
+ eltfp25519_1w x0, x1, x2;
+ } __aligned(32) m;
+ u64 *T[4];
+
+ T[0] = m.x0;
+ T[1] = c; /* x^(-1) */
+ T[2] = m.x1;
+ T[3] = m.x2;
+
+ copy_eltfp25519_1w(T[1], a);
+ sqrn_eltfp25519_1w_adx(T[1], 1);
+ copy_eltfp25519_1w(T[2], T[1]);
+ sqrn_eltfp25519_1w_adx(T[2], 2);
+ mul_eltfp25519_1w_adx(T[0], a, T[2]);
+ mul_eltfp25519_1w_adx(T[1], T[1], T[0]);
+ copy_eltfp25519_1w(T[2], T[1]);
+ sqrn_eltfp25519_1w_adx(T[2], 1);
+ mul_eltfp25519_1w_adx(T[0], T[0], T[2]);
+ copy_eltfp25519_1w(T[2], T[0]);
+ sqrn_eltfp25519_1w_adx(T[2], 5);
+ mul_eltfp25519_1w_adx(T[0], T[0], T[2]);
+ copy_eltfp25519_1w(T[2], T[0]);
+ sqrn_eltfp25519_1w_adx(T[2], 10);
+ mul_eltfp25519_1w_adx(T[2], T[2], T[0]);
+ copy_eltfp25519_1w(T[3], T[2]);
+ sqrn_eltfp25519_1w_adx(T[3], 20);
+ mul_eltfp25519_1w_adx(T[3], T[3], T[2]);
+ sqrn_eltfp25519_1w_adx(T[3], 10);
+ mul_eltfp25519_1w_adx(T[3], T[3], T[0]);
+ copy_eltfp25519_1w(T[0], T[3]);
+ sqrn_eltfp25519_1w_adx(T[0], 50);
+ mul_eltfp25519_1w_adx(T[0], T[0], T[3]);
+ copy_eltfp25519_1w(T[2], T[0]);
+ sqrn_eltfp25519_1w_adx(T[2], 100);
+ mul_eltfp25519_1w_adx(T[2], T[2], T[0]);
+ sqrn_eltfp25519_1w_adx(T[2], 50);
+ mul_eltfp25519_1w_adx(T[2], T[2], T[3]);
+ sqrn_eltfp25519_1w_adx(T[2], 5);
+ mul_eltfp25519_1w_adx(T[1], T[1], T[2]);
+
+ memzero_explicit(&m, sizeof(m));
+}
+
+static void inv_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a)
+{
+ struct {
+ eltfp25519_1w_buffer buffer;
+ eltfp25519_1w x0, x1, x2;
+ } __aligned(32) m;
+ u64 *T[5];
+
+ T[0] = m.x0;
+ T[1] = c; /* x^(-1) */
+ T[2] = m.x1;
+ T[3] = m.x2;
+
+ copy_eltfp25519_1w(T[1], a);
+ sqrn_eltfp25519_1w_bmi2(T[1], 1);
+ copy_eltfp25519_1w(T[2], T[1]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 2);
+ mul_eltfp25519_1w_bmi2(T[0], a, T[2]);
+ mul_eltfp25519_1w_bmi2(T[1], T[1], T[0]);
+ copy_eltfp25519_1w(T[2], T[1]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 1);
+ mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]);
+ copy_eltfp25519_1w(T[2], T[0]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 5);
+ mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]);
+ copy_eltfp25519_1w(T[2], T[0]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 10);
+ mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]);
+ copy_eltfp25519_1w(T[3], T[2]);
+ sqrn_eltfp25519_1w_bmi2(T[3], 20);
+ mul_eltfp25519_1w_bmi2(T[3], T[3], T[2]);
+ sqrn_eltfp25519_1w_bmi2(T[3], 10);
+ mul_eltfp25519_1w_bmi2(T[3], T[3], T[0]);
+ copy_eltfp25519_1w(T[0], T[3]);
+ sqrn_eltfp25519_1w_bmi2(T[0], 50);
+ mul_eltfp25519_1w_bmi2(T[0], T[0], T[3]);
+ copy_eltfp25519_1w(T[2], T[0]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 100);
+ mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 50);
+ mul_eltfp25519_1w_bmi2(T[2], T[2], T[3]);
+ sqrn_eltfp25519_1w_bmi2(T[2], 5);
+ mul_eltfp25519_1w_bmi2(T[1], T[1], T[2]);
+
+ memzero_explicit(&m, sizeof(m));
+}
+
+/* Given c, a 256-bit number, fred_eltfp25519_1w updates c
+ * with a number such that 0 <= C < 2**255-19.
+ */
+static __always_inline void fred_eltfp25519_1w(u64 *const c)
+{
+ u64 tmp0 = 38, tmp1 = 19;
+ asm volatile(
+ "btrq $63, %3 ;" /* Put bit 255 in carry flag and clear */
+ "cmovncl %k5, %k4 ;" /* c[255] ? 38 : 19 */
+
+ /* Add either 19 or 38 to c */
+ "addq %4, %0 ;"
+ "adcq $0, %1 ;"
+ "adcq $0, %2 ;"
+ "adcq $0, %3 ;"
+
+ /* Test for bit 255 again; only triggered on overflow modulo 2^255-19 */
+ "movl $0, %k4 ;"
+ "cmovnsl %k5, %k4 ;" /* c[255] ? 0 : 19 */
+ "btrq $63, %3 ;" /* Clear bit 255 */
+
+ /* Subtract 19 if necessary */
+ "subq %4, %0 ;"
+ "sbbq $0, %1 ;"
+ "sbbq $0, %2 ;"
+ "sbbq $0, %3 ;"
+
+ : "+r"(c[0]), "+r"(c[1]), "+r"(c[2]), "+r"(c[3]), "+r"(tmp0),
+ "+r"(tmp1)
+ :
+ : "memory", "cc");
+}
+
+static __always_inline void cswap(u8 bit, u64 *const px, u64 *const py)
+{
+ u64 temp;
+ asm volatile(
+ "test %9, %9 ;"
+ "movq %0, %8 ;"
+ "cmovnzq %4, %0 ;"
+ "cmovnzq %8, %4 ;"
+ "movq %1, %8 ;"
+ "cmovnzq %5, %1 ;"
+ "cmovnzq %8, %5 ;"
+ "movq %2, %8 ;"
+ "cmovnzq %6, %2 ;"
+ "cmovnzq %8, %6 ;"
+ "movq %3, %8 ;"
+ "cmovnzq %7, %3 ;"
+ "cmovnzq %8, %7 ;"
+ : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3]),
+ "+r"(py[0]), "+r"(py[1]), "+r"(py[2]), "+r"(py[3]),
+ "=r"(temp)
+ : "r"(bit)
+ : "cc"
+ );
+}
+
+static __always_inline void cselect(u8 bit, u64 *const px, const u64 *const py)
+{
+ asm volatile(
+ "test %4, %4 ;"
+ "cmovnzq %5, %0 ;"
+ "cmovnzq %6, %1 ;"
+ "cmovnzq %7, %2 ;"
+ "cmovnzq %8, %3 ;"
+ : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3])
+ : "r"(bit), "rm"(py[0]), "rm"(py[1]), "rm"(py[2]), "rm"(py[3])
+ : "cc"
+ );
+}
+
+static void curve25519_adx(u8 shared[CURVE25519_KEY_SIZE],
+ const u8 private_key[CURVE25519_KEY_SIZE],
+ const u8 session_key[CURVE25519_KEY_SIZE])
+{
+ struct {
+ u64 buffer[4 * NUM_WORDS_ELTFP25519];
+ u64 coordinates[4 * NUM_WORDS_ELTFP25519];
+ u64 workspace[6 * NUM_WORDS_ELTFP25519];
+ u8 session[CURVE25519_KEY_SIZE];
+ u8 private[CURVE25519_KEY_SIZE];
+ } __aligned(32) m;
+
+ int i = 0, j = 0;
+ u64 prev = 0;
+ u64 *const X1 = (u64 *)m.session;
+ u64 *const key = (u64 *)m.private;
+ u64 *const Px = m.coordinates + 0;
+ u64 *const Pz = m.coordinates + 4;
+ u64 *const Qx = m.coordinates + 8;
+ u64 *const Qz = m.coordinates + 12;
+ u64 *const X2 = Qx;
+ u64 *const Z2 = Qz;
+ u64 *const X3 = Px;
+ u64 *const Z3 = Pz;
+ u64 *const X2Z2 = Qx;
+ u64 *const X3Z3 = Px;
+
+ u64 *const A = m.workspace + 0;
+ u64 *const B = m.workspace + 4;
+ u64 *const D = m.workspace + 8;
+ u64 *const C = m.workspace + 12;
+ u64 *const DA = m.workspace + 16;
+ u64 *const CB = m.workspace + 20;
+ u64 *const AB = A;
+ u64 *const DC = D;
+ u64 *const DACB = DA;
+
+ memcpy(m.private, private_key, sizeof(m.private));
+ memcpy(m.session, session_key, sizeof(m.session));
+
+ curve25519_clamp_secret(m.private);
+
+ /* As in the draft:
+ * When receiving such an array, implementations of curve25519
+ * MUST mask the most-significant bit in the final byte. This
+ * is done to preserve compatibility with point formats which
+ * reserve the sign bit for use in other protocols and to
+ * increase resistance to implementation fingerprinting
+ */
+ m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1;
+
+ copy_eltfp25519_1w(Px, X1);
+ setzero_eltfp25519_1w(Pz);
+ setzero_eltfp25519_1w(Qx);
+ setzero_eltfp25519_1w(Qz);
+
+ Pz[0] = 1;
+ Qx[0] = 1;
+
+ /* main-loop */
+ prev = 0;
+ j = 62;
+ for (i = 3; i >= 0; --i) {
+ while (j >= 0) {
+ u64 bit = (key[i] >> j) & 0x1;
+ u64 swap = bit ^ prev;
+ prev = bit;
+
+ add_eltfp25519_1w_adx(A, X2, Z2); /* A = (X2+Z2) */
+ sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */
+ add_eltfp25519_1w_adx(C, X3, Z3); /* C = (X3+Z3) */
+ sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */
+ mul_eltfp25519_2w_adx(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */
+
+ cselect(swap, A, C);
+ cselect(swap, B, D);
+
+ sqr_eltfp25519_2w_adx(AB); /* [AA|BB] = [A^2|B^2] */
+ add_eltfp25519_1w_adx(X3, DA, CB); /* X3 = (DA+CB) */
+ sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */
+ sqr_eltfp25519_2w_adx(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */
+
+ copy_eltfp25519_1w(X2, B); /* X2 = B^2 */
+ sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */
+
+ mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */
+ add_eltfp25519_1w_adx(B, B, X2); /* B = a24*E+B */
+ mul_eltfp25519_2w_adx(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */
+ mul_eltfp25519_1w_adx(Z3, Z3, X1); /* Z3 = Z3*X1 */
+ --j;
+ }
+ j = 63;
+ }
+
+ inv_eltfp25519_1w_adx(A, Qz);
+ mul_eltfp25519_1w_adx((u64 *)shared, Qx, A);
+ fred_eltfp25519_1w((u64 *)shared);
+
+ memzero_explicit(&m, sizeof(m));
+}
+
+static void curve25519_adx_base(u8 session_key[CURVE25519_KEY_SIZE],
+ const u8 private_key[CURVE25519_KEY_SIZE])
+{
+ struct {
+ u64 buffer[4 * NUM_WORDS_ELTFP25519];
+ u64 coordinates[4 * NUM_WORDS_ELTFP25519];
+ u64 workspace[4 * NUM_WORDS_ELTFP25519];
+ u8 private[CURVE25519_KEY_SIZE];
+ } __aligned(32) m;
+
+ const int ite[4] = { 64, 64, 64, 63 };
+ const int q = 3;
+ u64 swap = 1;
+
+ int i = 0, j = 0, k = 0;
+ u64 *const key = (u64 *)m.private;
+ u64 *const Ur1 = m.coordinates + 0;
+ u64 *const Zr1 = m.coordinates + 4;
+ u64 *const Ur2 = m.coordinates + 8;
+ u64 *const Zr2 = m.coordinates + 12;
+
+ u64 *const UZr1 = m.coordinates + 0;
+ u64 *const ZUr2 = m.coordinates + 8;
+
+ u64 *const A = m.workspace + 0;
+ u64 *const B = m.workspace + 4;
+ u64 *const C = m.workspace + 8;
+ u64 *const D = m.workspace + 12;
+
+ u64 *const AB = m.workspace + 0;
+ u64 *const CD = m.workspace + 8;
+
+ const u64 *const P = table_ladder_8k;
+
+ memcpy(m.private, private_key, sizeof(m.private));
+
+ curve25519_clamp_secret(m.private);
+
+ setzero_eltfp25519_1w(Ur1);
+ setzero_eltfp25519_1w(Zr1);
+ setzero_eltfp25519_1w(Zr2);
+ Ur1[0] = 1;
+ Zr1[0] = 1;
+ Zr2[0] = 1;
+
+ /* G-S */
+ Ur2[3] = 0x1eaecdeee27cab34UL;
+ Ur2[2] = 0xadc7a0b9235d48e2UL;
+ Ur2[1] = 0xbbf095ae14b2edf8UL;
+ Ur2[0] = 0x7e94e1fec82faabdUL;
+
+ /* main-loop */
+ j = q;
+ for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) {
+ while (j < ite[i]) {
+ u64 bit = (key[i] >> j) & 0x1;
+ k = (64 * i + j - q);
+ swap = swap ^ bit;
+ cswap(swap, Ur1, Ur2);
+ cswap(swap, Zr1, Zr2);
+ swap = bit;
+ /* Addition */
+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
+ add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */
+ mul_eltfp25519_1w_adx(C, &P[4 * k], B); /* C = M0-B */
+ sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */
+ add_eltfp25519_1w_adx(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */
+ sqr_eltfp25519_2w_adx(AB); /* A = A^2 | B = B^2 */
+ mul_eltfp25519_2w_adx(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */
+ ++j;
+ }
+ j = 0;
+ }
+
+ /* Doubling */
+ for (i = 0; i < q; ++i) {
+ add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */
+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
+ sqr_eltfp25519_2w_adx(AB); /* A = A**2 B = B**2 */
+ copy_eltfp25519_1w(C, B); /* C = B */
+ sub_eltfp25519_1w(B, A, B); /* B = A-B */
+ mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */
+ add_eltfp25519_1w_adx(D, D, C); /* D = D+C */
+ mul_eltfp25519_2w_adx(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */
+ }
+
+ /* Convert to affine coordinates */
+ inv_eltfp25519_1w_adx(A, Zr1);
+ mul_eltfp25519_1w_adx((u64 *)session_key, Ur1, A);
+ fred_eltfp25519_1w((u64 *)session_key);
+
+ memzero_explicit(&m, sizeof(m));
+}
+
+static void curve25519_bmi2(u8 shared[CURVE25519_KEY_SIZE],
+ const u8 private_key[CURVE25519_KEY_SIZE],
+ const u8 session_key[CURVE25519_KEY_SIZE])
+{
+ struct {
+ u64 buffer[4 * NUM_WORDS_ELTFP25519];
+ u64 coordinates[4 * NUM_WORDS_ELTFP25519];
+ u64 workspace[6 * NUM_WORDS_ELTFP25519];
+ u8 session[CURVE25519_KEY_SIZE];
+ u8 private[CURVE25519_KEY_SIZE];
+ } __aligned(32) m;
+
+ int i = 0, j = 0;
+ u64 prev = 0;
+ u64 *const X1 = (u64 *)m.session;
+ u64 *const key = (u64 *)m.private;
+ u64 *const Px = m.coordinates + 0;
+ u64 *const Pz = m.coordinates + 4;
+ u64 *const Qx = m.coordinates + 8;
+ u64 *const Qz = m.coordinates + 12;
+ u64 *const X2 = Qx;
+ u64 *const Z2 = Qz;
+ u64 *const X3 = Px;
+ u64 *const Z3 = Pz;
+ u64 *const X2Z2 = Qx;
+ u64 *const X3Z3 = Px;
+
+ u64 *const A = m.workspace + 0;
+ u64 *const B = m.workspace + 4;
+ u64 *const D = m.workspace + 8;
+ u64 *const C = m.workspace + 12;
+ u64 *const DA = m.workspace + 16;
+ u64 *const CB = m.workspace + 20;
+ u64 *const AB = A;
+ u64 *const DC = D;
+ u64 *const DACB = DA;
+
+ memcpy(m.private, private_key, sizeof(m.private));
+ memcpy(m.session, session_key, sizeof(m.session));
+
+ curve25519_clamp_secret(m.private);
+
+ /* As in the draft:
+ * When receiving such an array, implementations of curve25519
+ * MUST mask the most-significant bit in the final byte. This
+ * is done to preserve compatibility with point formats which
+ * reserve the sign bit for use in other protocols and to
+ * increase resistance to implementation fingerprinting
+ */
+ m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1;
+
+ copy_eltfp25519_1w(Px, X1);
+ setzero_eltfp25519_1w(Pz);
+ setzero_eltfp25519_1w(Qx);
+ setzero_eltfp25519_1w(Qz);
+
+ Pz[0] = 1;
+ Qx[0] = 1;
+
+ /* main-loop */
+ prev = 0;
+ j = 62;
+ for (i = 3; i >= 0; --i) {
+ while (j >= 0) {
+ u64 bit = (key[i] >> j) & 0x1;
+ u64 swap = bit ^ prev;
+ prev = bit;
+
+ add_eltfp25519_1w_bmi2(A, X2, Z2); /* A = (X2+Z2) */
+ sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */
+ add_eltfp25519_1w_bmi2(C, X3, Z3); /* C = (X3+Z3) */
+ sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */
+ mul_eltfp25519_2w_bmi2(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */
+
+ cselect(swap, A, C);
+ cselect(swap, B, D);
+
+ sqr_eltfp25519_2w_bmi2(AB); /* [AA|BB] = [A^2|B^2] */
+ add_eltfp25519_1w_bmi2(X3, DA, CB); /* X3 = (DA+CB) */
+ sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */
+ sqr_eltfp25519_2w_bmi2(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */
+
+ copy_eltfp25519_1w(X2, B); /* X2 = B^2 */
+ sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */
+
+ mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */
+ add_eltfp25519_1w_bmi2(B, B, X2); /* B = a24*E+B */
+ mul_eltfp25519_2w_bmi2(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */
+ mul_eltfp25519_1w_bmi2(Z3, Z3, X1); /* Z3 = Z3*X1 */
+ --j;
+ }
+ j = 63;
+ }
+
+ inv_eltfp25519_1w_bmi2(A, Qz);
+ mul_eltfp25519_1w_bmi2((u64 *)shared, Qx, A);
+ fred_eltfp25519_1w((u64 *)shared);
+
+ memzero_explicit(&m, sizeof(m));
+}
+
+static void curve25519_bmi2_base(u8 session_key[CURVE25519_KEY_SIZE],
+ const u8 private_key[CURVE25519_KEY_SIZE])
+{
+ struct {
+ u64 buffer[4 * NUM_WORDS_ELTFP25519];
+ u64 coordinates[4 * NUM_WORDS_ELTFP25519];
+ u64 workspace[4 * NUM_WORDS_ELTFP25519];
+ u8 private[CURVE25519_KEY_SIZE];
+ } __aligned(32) m;
+
+ const int ite[4] = { 64, 64, 64, 63 };
+ const int q = 3;
+ u64 swap = 1;
+
+ int i = 0, j = 0, k = 0;
+ u64 *const key = (u64 *)m.private;
+ u64 *const Ur1 = m.coordinates + 0;
+ u64 *const Zr1 = m.coordinates + 4;
+ u64 *const Ur2 = m.coordinates + 8;
+ u64 *const Zr2 = m.coordinates + 12;
+
+ u64 *const UZr1 = m.coordinates + 0;
+ u64 *const ZUr2 = m.coordinates + 8;
+
+ u64 *const A = m.workspace + 0;
+ u64 *const B = m.workspace + 4;
+ u64 *const C = m.workspace + 8;
+ u64 *const D = m.workspace + 12;
+
+ u64 *const AB = m.workspace + 0;
+ u64 *const CD = m.workspace + 8;
+
+ const u64 *const P = table_ladder_8k;
+
+ memcpy(m.private, private_key, sizeof(m.private));
+
+ curve25519_clamp_secret(m.private);
+
+ setzero_eltfp25519_1w(Ur1);
+ setzero_eltfp25519_1w(Zr1);
+ setzero_eltfp25519_1w(Zr2);
+ Ur1[0] = 1;
+ Zr1[0] = 1;
+ Zr2[0] = 1;
+
+ /* G-S */
+ Ur2[3] = 0x1eaecdeee27cab34UL;
+ Ur2[2] = 0xadc7a0b9235d48e2UL;
+ Ur2[1] = 0xbbf095ae14b2edf8UL;
+ Ur2[0] = 0x7e94e1fec82faabdUL;
+
+ /* main-loop */
+ j = q;
+ for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) {
+ while (j < ite[i]) {
+ u64 bit = (key[i] >> j) & 0x1;
+ k = (64 * i + j - q);
+ swap = swap ^ bit;
+ cswap(swap, Ur1, Ur2);
+ cswap(swap, Zr1, Zr2);
+ swap = bit;
+ /* Addition */
+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
+ add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */
+ mul_eltfp25519_1w_bmi2(C, &P[4 * k], B);/* C = M0-B */
+ sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */
+ add_eltfp25519_1w_bmi2(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */
+ sqr_eltfp25519_2w_bmi2(AB); /* A = A^2 | B = B^2 */
+ mul_eltfp25519_2w_bmi2(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */
+ ++j;
+ }
+ j = 0;
+ }
+
+ /* Doubling */
+ for (i = 0; i < q; ++i) {
+ add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */
+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
+ sqr_eltfp25519_2w_bmi2(AB); /* A = A**2 B = B**2 */
+ copy_eltfp25519_1w(C, B); /* C = B */
+ sub_eltfp25519_1w(B, A, B); /* B = A-B */
+ mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */
+ add_eltfp25519_1w_bmi2(D, D, C); /* D = D+C */
+ mul_eltfp25519_2w_bmi2(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */
+ }
+
+ /* Convert to affine coordinates */
+ inv_eltfp25519_1w_bmi2(A, Zr1);
+ mul_eltfp25519_1w_bmi2((u64 *)session_key, Ur1, A);
+ fred_eltfp25519_1w((u64 *)session_key);
+
+ memzero_explicit(&m, sizeof(m));
+}
+
+void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE])
+{
+ if (static_branch_likely(&curve25519_use_adx))
+ curve25519_adx(mypublic, secret, basepoint);
+ else if (static_branch_likely(&curve25519_use_bmi2))
+ curve25519_bmi2(mypublic, secret, basepoint);
+ else
+ curve25519_generic(mypublic, secret, basepoint);
+}
+EXPORT_SYMBOL(curve25519_arch);
+
+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE])
+{
+ if (static_branch_likely(&curve25519_use_adx))
+ curve25519_adx_base(pub, secret);
+ else if (static_branch_likely(&curve25519_use_bmi2))
+ curve25519_bmi2_base(pub, secret);
+ else
+ curve25519_generic(pub, secret, curve25519_base_point);
+}
+EXPORT_SYMBOL(curve25519_base_arch);
+
+static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ u8 *secret = kpp_tfm_ctx(tfm);
+
+ if (!len)
+ curve25519_generate_secret(secret);
+ else if (len == CURVE25519_KEY_SIZE &&
+ crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
+ memcpy(secret, buf, CURVE25519_KEY_SIZE);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int curve25519_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ const u8 *secret = kpp_tfm_ctx(tfm);
+ u8 buf[CURVE25519_KEY_SIZE];
+ int copied, nbytes;
+
+ if (req->src)
+ return -EINVAL;
+
+ curve25519_base_arch(buf, secret);
+
+ /* might want less than we've got */
+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
+ nbytes),
+ buf, nbytes);
+ if (copied != nbytes)
+ return -EINVAL;
+ return 0;
+}
+
+static int curve25519_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ const u8 *secret = kpp_tfm_ctx(tfm);
+ u8 public_key[CURVE25519_KEY_SIZE];
+ u8 buf[CURVE25519_KEY_SIZE];
+ int copied, nbytes;
+
+ if (!req->src)
+ return -EINVAL;
+
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ CURVE25519_KEY_SIZE),
+ public_key, CURVE25519_KEY_SIZE);
+ if (copied != CURVE25519_KEY_SIZE)
+ return -EINVAL;
+
+ curve25519_arch(buf, secret, public_key);
+
+ /* might want less than we've got */
+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
+ nbytes),
+ buf, nbytes);
+ if (copied != nbytes)
+ return -EINVAL;
+ return 0;
+}
+
+static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
+{
+ return CURVE25519_KEY_SIZE;
+}
+
+static struct kpp_alg curve25519_alg = {
+ .base.cra_name = "curve25519",
+ .base.cra_driver_name = "curve25519-x86",
+ .base.cra_priority = 200,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_ctxsize = CURVE25519_KEY_SIZE,
+
+ .set_secret = curve25519_set_secret,
+ .generate_public_key = curve25519_generate_public_key,
+ .compute_shared_secret = curve25519_compute_shared_secret,
+ .max_size = curve25519_max_size,
+};
+
+static int __init curve25519_mod_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_BMI2))
+ static_branch_enable(&curve25519_use_bmi2);
+ else if (boot_cpu_has(X86_FEATURE_ADX))
+ static_branch_enable(&curve25519_use_adx);
+ else
+ return 0;
+ return crypto_register_kpp(&curve25519_alg);
+}
+
+static void __exit curve25519_mod_exit(void)
+{
+ if (boot_cpu_has(X86_FEATURE_BMI2) ||
+ boot_cpu_has(X86_FEATURE_ADX))
+ crypto_unregister_kpp(&curve25519_alg);
+}
+
+module_init(curve25519_mod_init);
+module_exit(curve25519_mod_exit);
+
+MODULE_ALIAS_CRYPTO("curve25519");
+MODULE_ALIAS_CRYPTO("curve25519-x86");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
index 7fca43099a5f..fac0fdc3f25d 100644
--- a/arch/x86/crypto/des3_ede-asm_64.S
+++ b/arch/x86/crypto/des3_ede-asm_64.S
@@ -162,7 +162,7 @@
movl left##d, (io); \
movl right##d, 4(io);
-ENTRY(des3_ede_x86_64_crypt_blk)
+SYM_FUNC_START(des3_ede_x86_64_crypt_blk)
/* input:
* %rdi: round keys, CTX
* %rsi: dst
@@ -244,7 +244,7 @@ ENTRY(des3_ede_x86_64_crypt_blk)
popq %rbx;
ret;
-ENDPROC(des3_ede_x86_64_crypt_blk)
+SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
/***********************************************************************
* 3-way 3DES
@@ -418,7 +418,7 @@ ENDPROC(des3_ede_x86_64_crypt_blk)
#define __movq(src, dst) \
movq src, dst;
-ENTRY(des3_ede_x86_64_crypt_blk_3way)
+SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way)
/* input:
* %rdi: ctx, round keys
* %rsi: dst (3 blocks)
@@ -529,7 +529,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
popq %rbx;
ret;
-ENDPROC(des3_ede_x86_64_crypt_blk_3way)
+SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
.section .rodata, "a", @progbits
.align 16
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 5d53effe8abe..bb9735fbb865 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -44,7 +44,7 @@
* T2
* T3
*/
-__clmul_gf128mul_ble:
+SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
movaps DATA, T1
pshufd $0b01001110, DATA, T2
pshufd $0b01001110, SHASH, T3
@@ -87,10 +87,10 @@ __clmul_gf128mul_ble:
pxor T2, T1
pxor T1, DATA
ret
-ENDPROC(__clmul_gf128mul_ble)
+SYM_FUNC_END(__clmul_gf128mul_ble)
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
-ENTRY(clmul_ghash_mul)
+SYM_FUNC_START(clmul_ghash_mul)
FRAME_BEGIN
movups (%rdi), DATA
movups (%rsi), SHASH
@@ -101,13 +101,13 @@ ENTRY(clmul_ghash_mul)
movups DATA, (%rdi)
FRAME_END
ret
-ENDPROC(clmul_ghash_mul)
+SYM_FUNC_END(clmul_ghash_mul)
/*
* void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
* const u128 *shash);
*/
-ENTRY(clmul_ghash_update)
+SYM_FUNC_START(clmul_ghash_update)
FRAME_BEGIN
cmp $16, %rdx
jb .Lupdate_just_ret # check length
@@ -130,4 +130,4 @@ ENTRY(clmul_ghash_update)
.Lupdate_just_ret:
FRAME_END
ret
-ENDPROC(clmul_ghash_update)
+SYM_FUNC_END(clmul_ghash_update)
diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
index f7946ea1b704..b22c7b936272 100644
--- a/arch/x86/crypto/nh-avx2-x86_64.S
+++ b/arch/x86/crypto/nh-avx2-x86_64.S
@@ -69,7 +69,7 @@
*
* It's guaranteed that message_len % 16 == 0.
*/
-ENTRY(nh_avx2)
+SYM_FUNC_START(nh_avx2)
vmovdqu 0x00(KEY), K0
vmovdqu 0x10(KEY), K1
@@ -154,4 +154,4 @@ ENTRY(nh_avx2)
vpaddq T4, T0, T0
vmovdqu T0, (HASH)
ret
-ENDPROC(nh_avx2)
+SYM_FUNC_END(nh_avx2)
diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S
index 51f52d4ab4bb..d7ae22dd6683 100644
--- a/arch/x86/crypto/nh-sse2-x86_64.S
+++ b/arch/x86/crypto/nh-sse2-x86_64.S
@@ -71,7 +71,7 @@
*
* It's guaranteed that message_len % 16 == 0.
*/
-ENTRY(nh_sse2)
+SYM_FUNC_START(nh_sse2)
movdqu 0x00(KEY), K0
movdqu 0x10(KEY), K1
@@ -120,4 +120,4 @@ ENTRY(nh_sse2)
movdqu T0, 0x00(HASH)
movdqu T1, 0x10(HASH)
ret
-ENDPROC(nh_sse2)
+SYM_FUNC_END(nh_sse2)
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index 8b341bc29d41..d6063feda9da 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -79,7 +79,7 @@ ORMASK: .octa 0x00000000010000000000000001000000
#define d3 %r12
#define d4 %r13
-ENTRY(poly1305_4block_avx2)
+SYM_FUNC_START(poly1305_4block_avx2)
# %rdi: Accumulator h[5]
# %rsi: 64 byte input block m
# %rdx: Poly1305 key r[5]
@@ -387,4 +387,4 @@ ENTRY(poly1305_4block_avx2)
pop %r12
pop %rbx
ret
-ENDPROC(poly1305_4block_avx2)
+SYM_FUNC_END(poly1305_4block_avx2)
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index 5578f846e622..d8ea29b96640 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -46,7 +46,7 @@ ORMASK: .octa 0x00000000010000000000000001000000
#define d3 %r11
#define d4 %r12
-ENTRY(poly1305_block_sse2)
+SYM_FUNC_START(poly1305_block_sse2)
# %rdi: Accumulator h[5]
# %rsi: 16 byte input block m
# %rdx: Poly1305 key r[5]
@@ -276,7 +276,7 @@ ENTRY(poly1305_block_sse2)
pop %r12
pop %rbx
ret
-ENDPROC(poly1305_block_sse2)
+SYM_FUNC_END(poly1305_block_sse2)
#define u0 0x00(%r8)
@@ -301,7 +301,7 @@ ENDPROC(poly1305_block_sse2)
#undef d0
#define d0 %r13
-ENTRY(poly1305_2block_sse2)
+SYM_FUNC_START(poly1305_2block_sse2)
# %rdi: Accumulator h[5]
# %rsi: 16 byte input block m
# %rdx: Poly1305 key r[5]
@@ -587,4 +587,4 @@ ENTRY(poly1305_2block_sse2)
pop %r12
pop %rbx
ret
-ENDPROC(poly1305_2block_sse2)
+SYM_FUNC_END(poly1305_2block_sse2)
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 4a1c05dce950..370cd88068ec 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -7,47 +7,23 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
#include <crypto/internal/simd.h>
-#include <crypto/poly1305.h>
#include <linux/crypto.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/simd.h>
-struct poly1305_simd_desc_ctx {
- struct poly1305_desc_ctx base;
- /* derived key u set? */
- bool uset;
-#ifdef CONFIG_AS_AVX2
- /* derived keys r^3, r^4 set? */
- bool wset;
-#endif
- /* derived Poly1305 key r^2 */
- u32 u[5];
- /* ... silently appended r^3 and r^4 when using AVX2 */
-};
-
asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
const u32 *r, unsigned int blocks);
asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
unsigned int blocks, const u32 *u);
-#ifdef CONFIG_AS_AVX2
asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
unsigned int blocks, const u32 *u);
-static bool poly1305_use_avx2;
-#endif
-
-static int poly1305_simd_init(struct shash_desc *desc)
-{
- struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
- sctx->uset = false;
-#ifdef CONFIG_AS_AVX2
- sctx->wset = false;
-#endif
-
- return crypto_poly1305_init(desc);
-}
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
static void poly1305_simd_mult(u32 *a, const u32 *b)
{
@@ -60,72 +36,85 @@ static void poly1305_simd_mult(u32 *a, const u32 *b)
poly1305_block_sse2(a, m, b, 1);
}
+static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ unsigned int datalen;
+
+ if (unlikely(!dctx->sset)) {
+ datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+ src += srclen - datalen;
+ srclen = datalen;
+ }
+ if (srclen >= POLY1305_BLOCK_SIZE) {
+ poly1305_core_blocks(&dctx->h, dctx->r, src,
+ srclen / POLY1305_BLOCK_SIZE, 1);
+ srclen %= POLY1305_BLOCK_SIZE;
+ }
+ return srclen;
+}
+
static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen)
{
- struct poly1305_simd_desc_ctx *sctx;
unsigned int blocks, datalen;
- BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base));
- sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
-
if (unlikely(!dctx->sset)) {
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
src += srclen - datalen;
srclen = datalen;
}
-#ifdef CONFIG_AS_AVX2
- if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
- if (unlikely(!sctx->wset)) {
- if (!sctx->uset) {
- memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u, dctx->r.r);
- sctx->uset = true;
+ if (IS_ENABLED(CONFIG_AS_AVX2) &&
+ static_branch_likely(&poly1305_use_avx2) &&
+ srclen >= POLY1305_BLOCK_SIZE * 4) {
+ if (unlikely(dctx->rset < 4)) {
+ if (dctx->rset < 2) {
+ dctx->r[1] = dctx->r[0];
+ poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
}
- memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u + 5, dctx->r.r);
- memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u + 10, dctx->r.r);
- sctx->wset = true;
+ dctx->r[2] = dctx->r[1];
+ poly1305_simd_mult(dctx->r[2].r, dctx->r[0].r);
+ dctx->r[3] = dctx->r[2];
+ poly1305_simd_mult(dctx->r[3].r, dctx->r[0].r);
+ dctx->rset = 4;
}
blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
- poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks,
- sctx->u);
+ poly1305_4block_avx2(dctx->h.h, src, dctx->r[0].r, blocks,
+ dctx->r[1].r);
src += POLY1305_BLOCK_SIZE * 4 * blocks;
srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
}
-#endif
+
if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
- if (unlikely(!sctx->uset)) {
- memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u, dctx->r.r);
- sctx->uset = true;
+ if (unlikely(dctx->rset < 2)) {
+ dctx->r[1] = dctx->r[0];
+ poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
+ dctx->rset = 2;
}
blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
- poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks,
- sctx->u);
+ poly1305_2block_sse2(dctx->h.h, src, dctx->r[0].r,
+ blocks, dctx->r[1].r);
src += POLY1305_BLOCK_SIZE * 2 * blocks;
srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
}
if (srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1);
+ poly1305_block_sse2(dctx->h.h, src, dctx->r[0].r, 1);
srclen -= POLY1305_BLOCK_SIZE;
}
return srclen;
}
-static int poly1305_simd_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key)
{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes;
-
- /* kernel_fpu_begin/end is costly, use fallback for small updates */
- if (srclen <= 288 || !crypto_simd_usable())
- return crypto_poly1305_update(desc, src, srclen);
+ poly1305_init_generic(desc, key);
+}
+EXPORT_SYMBOL(poly1305_init_arch);
- kernel_fpu_begin();
+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
+ unsigned int srclen)
+{
+ unsigned int bytes;
if (unlikely(dctx->buflen)) {
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
@@ -135,34 +124,84 @@ static int poly1305_simd_update(struct shash_desc *desc,
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_simd_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
+ if (static_branch_likely(&poly1305_use_simd) &&
+ likely(crypto_simd_usable())) {
+ kernel_fpu_begin();
+ poly1305_simd_blocks(dctx, dctx->buf,
+ POLY1305_BLOCK_SIZE);
+ kernel_fpu_end();
+ } else {
+ poly1305_scalar_blocks(dctx, dctx->buf,
+ POLY1305_BLOCK_SIZE);
+ }
dctx->buflen = 0;
}
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- bytes = poly1305_simd_blocks(dctx, src, srclen);
+ if (static_branch_likely(&poly1305_use_simd) &&
+ likely(crypto_simd_usable())) {
+ kernel_fpu_begin();
+ bytes = poly1305_simd_blocks(dctx, src, srclen);
+ kernel_fpu_end();
+ } else {
+ bytes = poly1305_scalar_blocks(dctx, src, srclen);
+ }
src += srclen - bytes;
srclen = bytes;
}
- kernel_fpu_end();
-
if (unlikely(srclen)) {
dctx->buflen = srclen;
memcpy(dctx->buf, src, srclen);
}
+}
+EXPORT_SYMBOL(poly1305_update_arch);
+
+void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest)
+{
+ poly1305_final_generic(desc, digest);
+}
+EXPORT_SYMBOL(poly1305_final_arch);
+
+static int crypto_poly1305_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ poly1305_core_init(&dctx->h);
+ dctx->buflen = 0;
+ dctx->rset = 0;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ poly1305_final_generic(dctx, dst);
+ return 0;
+}
+
+static int poly1305_simd_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+ poly1305_update_arch(dctx, src, srclen);
return 0;
}
static struct shash_alg alg = {
.digestsize = POLY1305_DIGEST_SIZE,
- .init = poly1305_simd_init,
+ .init = crypto_poly1305_init,
.update = poly1305_simd_update,
.final = crypto_poly1305_final,
- .descsize = sizeof(struct poly1305_simd_desc_ctx),
+ .descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
@@ -175,16 +214,16 @@ static struct shash_alg alg = {
static int __init poly1305_simd_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2))
- return -ENODEV;
-
-#ifdef CONFIG_AS_AVX2
- poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX2) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
- alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
- if (poly1305_use_avx2)
- alg.descsize += 10 * sizeof(u32);
-#endif
+ return 0;
+
+ static_branch_enable(&poly1305_use_simd);
+
+ if (IS_ENABLED(CONFIG_AS_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
+ static_branch_enable(&poly1305_use_avx2);
+
return crypto_register_shash(&alg);
}
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index ddc51dbba3af..ba9e4c1e7f5c 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -555,7 +555,7 @@
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
.align 8
-__serpent_enc_blk8_avx:
+SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
/* input:
* %rdi: ctx, CTX
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
@@ -606,10 +606,10 @@ __serpent_enc_blk8_avx:
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
-ENDPROC(__serpent_enc_blk8_avx)
+SYM_FUNC_END(__serpent_enc_blk8_avx)
.align 8
-__serpent_dec_blk8_avx:
+SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
/* input:
* %rdi: ctx, CTX
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
@@ -660,9 +660,9 @@ __serpent_dec_blk8_avx:
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
-ENDPROC(__serpent_dec_blk8_avx)
+SYM_FUNC_END(__serpent_dec_blk8_avx)
-ENTRY(serpent_ecb_enc_8way_avx)
+SYM_FUNC_START(serpent_ecb_enc_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -678,9 +678,9 @@ ENTRY(serpent_ecb_enc_8way_avx)
FRAME_END
ret;
-ENDPROC(serpent_ecb_enc_8way_avx)
+SYM_FUNC_END(serpent_ecb_enc_8way_avx)
-ENTRY(serpent_ecb_dec_8way_avx)
+SYM_FUNC_START(serpent_ecb_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -696,9 +696,9 @@ ENTRY(serpent_ecb_dec_8way_avx)
FRAME_END
ret;
-ENDPROC(serpent_ecb_dec_8way_avx)
+SYM_FUNC_END(serpent_ecb_dec_8way_avx)
-ENTRY(serpent_cbc_dec_8way_avx)
+SYM_FUNC_START(serpent_cbc_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -714,9 +714,9 @@ ENTRY(serpent_cbc_dec_8way_avx)
FRAME_END
ret;
-ENDPROC(serpent_cbc_dec_8way_avx)
+SYM_FUNC_END(serpent_cbc_dec_8way_avx)
-ENTRY(serpent_ctr_8way_avx)
+SYM_FUNC_START(serpent_ctr_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -734,9 +734,9 @@ ENTRY(serpent_ctr_8way_avx)
FRAME_END
ret;
-ENDPROC(serpent_ctr_8way_avx)
+SYM_FUNC_END(serpent_ctr_8way_avx)
-ENTRY(serpent_xts_enc_8way_avx)
+SYM_FUNC_START(serpent_xts_enc_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -756,9 +756,9 @@ ENTRY(serpent_xts_enc_8way_avx)
FRAME_END
ret;
-ENDPROC(serpent_xts_enc_8way_avx)
+SYM_FUNC_END(serpent_xts_enc_8way_avx)
-ENTRY(serpent_xts_dec_8way_avx)
+SYM_FUNC_START(serpent_xts_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -778,4 +778,4 @@ ENTRY(serpent_xts_dec_8way_avx)
FRAME_END
ret;
-ENDPROC(serpent_xts_dec_8way_avx)
+SYM_FUNC_END(serpent_xts_dec_8way_avx)
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
index 37bc1d48106c..c9648aeae705 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -561,7 +561,7 @@
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
.align 8
-__serpent_enc_blk16:
+SYM_FUNC_START_LOCAL(__serpent_enc_blk16)
/* input:
* %rdi: ctx, CTX
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: plaintext
@@ -612,10 +612,10 @@ __serpent_enc_blk16:
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
-ENDPROC(__serpent_enc_blk16)
+SYM_FUNC_END(__serpent_enc_blk16)
.align 8
-__serpent_dec_blk16:
+SYM_FUNC_START_LOCAL(__serpent_dec_blk16)
/* input:
* %rdi: ctx, CTX
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext
@@ -666,9 +666,9 @@ __serpent_dec_blk16:
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
-ENDPROC(__serpent_dec_blk16)
+SYM_FUNC_END(__serpent_dec_blk16)
-ENTRY(serpent_ecb_enc_16way)
+SYM_FUNC_START(serpent_ecb_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -688,9 +688,9 @@ ENTRY(serpent_ecb_enc_16way)
FRAME_END
ret;
-ENDPROC(serpent_ecb_enc_16way)
+SYM_FUNC_END(serpent_ecb_enc_16way)
-ENTRY(serpent_ecb_dec_16way)
+SYM_FUNC_START(serpent_ecb_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -710,9 +710,9 @@ ENTRY(serpent_ecb_dec_16way)
FRAME_END
ret;
-ENDPROC(serpent_ecb_dec_16way)
+SYM_FUNC_END(serpent_ecb_dec_16way)
-ENTRY(serpent_cbc_dec_16way)
+SYM_FUNC_START(serpent_cbc_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -733,9 +733,9 @@ ENTRY(serpent_cbc_dec_16way)
FRAME_END
ret;
-ENDPROC(serpent_cbc_dec_16way)
+SYM_FUNC_END(serpent_cbc_dec_16way)
-ENTRY(serpent_ctr_16way)
+SYM_FUNC_START(serpent_ctr_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -758,9 +758,9 @@ ENTRY(serpent_ctr_16way)
FRAME_END
ret;
-ENDPROC(serpent_ctr_16way)
+SYM_FUNC_END(serpent_ctr_16way)
-ENTRY(serpent_xts_enc_16way)
+SYM_FUNC_START(serpent_xts_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -784,9 +784,9 @@ ENTRY(serpent_xts_enc_16way)
FRAME_END
ret;
-ENDPROC(serpent_xts_enc_16way)
+SYM_FUNC_END(serpent_xts_enc_16way)
-ENTRY(serpent_xts_dec_16way)
+SYM_FUNC_START(serpent_xts_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -810,4 +810,4 @@ ENTRY(serpent_xts_dec_16way)
FRAME_END
ret;
-ENDPROC(serpent_xts_dec_16way)
+SYM_FUNC_END(serpent_xts_dec_16way)
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index e5c4a4690ca9..6379b99cb722 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -497,7 +497,7 @@
pxor t0, x3; \
movdqu x3, (3*4*4)(out);
-ENTRY(__serpent_enc_blk_4way)
+SYM_FUNC_START(__serpent_enc_blk_4way)
/* input:
* arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst
@@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way)
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
-ENDPROC(__serpent_enc_blk_4way)
+SYM_FUNC_END(__serpent_enc_blk_4way)
-ENTRY(serpent_dec_blk_4way)
+SYM_FUNC_START(serpent_dec_blk_4way)
/* input:
* arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst
@@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way)
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
ret;
-ENDPROC(serpent_dec_blk_4way)
+SYM_FUNC_END(serpent_dec_blk_4way)
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
index 5e0b3a3e97af..efb6dc17dc90 100644
--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
@@ -619,7 +619,7 @@
pxor t0, x3; \
movdqu x3, (3*4*4)(out);
-ENTRY(__serpent_enc_blk_8way)
+SYM_FUNC_START(__serpent_enc_blk_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -682,9 +682,9 @@ ENTRY(__serpent_enc_blk_8way)
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
-ENDPROC(__serpent_enc_blk_8way)
+SYM_FUNC_END(__serpent_enc_blk_8way)
-ENTRY(serpent_dec_blk_8way)
+SYM_FUNC_START(serpent_dec_blk_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -736,4 +736,4 @@ ENTRY(serpent_dec_blk_8way)
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
-ENDPROC(serpent_dec_blk_8way)
+SYM_FUNC_END(serpent_dec_blk_8way)
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 9f712a7dfd79..6decc85ef7b7 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -634,7 +634,7 @@ _loop3:
* param: function's name
*/
.macro SHA1_VECTOR_ASM name
- ENTRY(\name)
+ SYM_FUNC_START(\name)
push %rbx
push %r12
@@ -676,7 +676,7 @@ _loop3:
ret
- ENDPROC(\name)
+ SYM_FUNC_END(\name)
.endm
.section .rodata
diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
index ebbdba72ae07..11efe3a45a1f 100644
--- a/arch/x86/crypto/sha1_ni_asm.S
+++ b/arch/x86/crypto/sha1_ni_asm.S
@@ -95,7 +95,7 @@
*/
.text
.align 32
-ENTRY(sha1_ni_transform)
+SYM_FUNC_START(sha1_ni_transform)
mov %rsp, RSPSAVE
sub $FRAME_SIZE, %rsp
and $~0xF, %rsp
@@ -291,7 +291,7 @@ ENTRY(sha1_ni_transform)
mov RSPSAVE, %rsp
ret
-ENDPROC(sha1_ni_transform)
+SYM_FUNC_END(sha1_ni_transform)
.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
.align 16
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index 99c5b8c4dc38..5d03c1173690 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -67,7 +67,7 @@
* param: function's name
*/
.macro SHA1_VECTOR_ASM name
- ENTRY(\name)
+ SYM_FUNC_START(\name)
push %rbx
push %r12
@@ -101,7 +101,7 @@
pop %rbx
ret
- ENDPROC(\name)
+ SYM_FUNC_END(\name)
.endm
/*
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 001bbcf93c79..22e14c8dd2e4 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -347,7 +347,7 @@ a = TMP_
## arg 3 : Num blocks
########################################################################
.text
-ENTRY(sha256_transform_avx)
+SYM_FUNC_START(sha256_transform_avx)
.align 32
pushq %rbx
pushq %r12
@@ -460,7 +460,7 @@ done_hash:
popq %r12
popq %rbx
ret
-ENDPROC(sha256_transform_avx)
+SYM_FUNC_END(sha256_transform_avx)
.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 1420db15dcdd..519b551ad576 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -526,7 +526,7 @@ STACK_SIZE = _RSP + _RSP_SIZE
## arg 3 : Num blocks
########################################################################
.text
-ENTRY(sha256_transform_rorx)
+SYM_FUNC_START(sha256_transform_rorx)
.align 32
pushq %rbx
pushq %r12
@@ -713,7 +713,7 @@ done_hash:
popq %r12
popq %rbx
ret
-ENDPROC(sha256_transform_rorx)
+SYM_FUNC_END(sha256_transform_rorx)
.section .rodata.cst512.K256, "aM", @progbits, 512
.align 64
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index c6c05ed2c16a..69cc2f91dc4c 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -353,7 +353,7 @@ a = TMP_
## arg 3 : Num blocks
########################################################################
.text
-ENTRY(sha256_transform_ssse3)
+SYM_FUNC_START(sha256_transform_ssse3)
.align 32
pushq %rbx
pushq %r12
@@ -471,7 +471,7 @@ done_hash:
popq %rbx
ret
-ENDPROC(sha256_transform_ssse3)
+SYM_FUNC_END(sha256_transform_ssse3)
.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
index fb58f58ecfbc..7abade04a3a3 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@ -97,7 +97,7 @@
.text
.align 32
-ENTRY(sha256_ni_transform)
+SYM_FUNC_START(sha256_ni_transform)
shl $6, NUM_BLKS /* convert to bytes */
jz .Ldone_hash
@@ -327,7 +327,7 @@ ENTRY(sha256_ni_transform)
.Ldone_hash:
ret
-ENDPROC(sha256_ni_transform)
+SYM_FUNC_END(sha256_ni_transform)
.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 39235fefe6f7..3704ddd7e5d5 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -277,7 +277,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
# message blocks.
# L is the message length in SHA512 blocks
########################################################################
-ENTRY(sha512_transform_avx)
+SYM_FUNC_START(sha512_transform_avx)
cmp $0, msglen
je nowork
@@ -365,7 +365,7 @@ updateblock:
nowork:
ret
-ENDPROC(sha512_transform_avx)
+SYM_FUNC_END(sha512_transform_avx)
########################################################################
### Binary Data
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index b16d56005162..80d830e7ee09 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -569,7 +569,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
# message blocks.
# L is the message length in SHA512 blocks
########################################################################
-ENTRY(sha512_transform_rorx)
+SYM_FUNC_START(sha512_transform_rorx)
# Allocate Stack Space
mov %rsp, %rax
sub $frame_size, %rsp
@@ -682,7 +682,7 @@ done_hash:
# Restore Stack Pointer
mov frame_RSPSAVE(%rsp), %rsp
ret
-ENDPROC(sha512_transform_rorx)
+SYM_FUNC_END(sha512_transform_rorx)
########################################################################
### Binary Data
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index 66bbd9058a90..838f984e95d9 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -275,7 +275,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
# message blocks.
# L is the message length in SHA512 blocks.
########################################################################
-ENTRY(sha512_transform_ssse3)
+SYM_FUNC_START(sha512_transform_ssse3)
cmp $0, msglen
je nowork
@@ -364,7 +364,7 @@ updateblock:
nowork:
ret
-ENDPROC(sha512_transform_ssse3)
+SYM_FUNC_END(sha512_transform_ssse3)
########################################################################
### Binary Data
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index 698b8f2a56e2..a5151393bb2f 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -234,7 +234,7 @@
vpxor x3, wkey, x3;
.align 8
-__twofish_enc_blk8:
+SYM_FUNC_START_LOCAL(__twofish_enc_blk8)
/* input:
* %rdi: ctx, CTX
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
@@ -273,10 +273,10 @@ __twofish_enc_blk8:
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
ret;
-ENDPROC(__twofish_enc_blk8)
+SYM_FUNC_END(__twofish_enc_blk8)
.align 8
-__twofish_dec_blk8:
+SYM_FUNC_START_LOCAL(__twofish_dec_blk8)
/* input:
* %rdi: ctx, CTX
* RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
@@ -313,9 +313,9 @@ __twofish_dec_blk8:
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
ret;
-ENDPROC(__twofish_dec_blk8)
+SYM_FUNC_END(__twofish_dec_blk8)
-ENTRY(twofish_ecb_enc_8way)
+SYM_FUNC_START(twofish_ecb_enc_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -333,9 +333,9 @@ ENTRY(twofish_ecb_enc_8way)
FRAME_END
ret;
-ENDPROC(twofish_ecb_enc_8way)
+SYM_FUNC_END(twofish_ecb_enc_8way)
-ENTRY(twofish_ecb_dec_8way)
+SYM_FUNC_START(twofish_ecb_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -353,9 +353,9 @@ ENTRY(twofish_ecb_dec_8way)
FRAME_END
ret;
-ENDPROC(twofish_ecb_dec_8way)
+SYM_FUNC_END(twofish_ecb_dec_8way)
-ENTRY(twofish_cbc_dec_8way)
+SYM_FUNC_START(twofish_cbc_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -378,9 +378,9 @@ ENTRY(twofish_cbc_dec_8way)
FRAME_END
ret;
-ENDPROC(twofish_cbc_dec_8way)
+SYM_FUNC_END(twofish_cbc_dec_8way)
-ENTRY(twofish_ctr_8way)
+SYM_FUNC_START(twofish_ctr_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -405,9 +405,9 @@ ENTRY(twofish_ctr_8way)
FRAME_END
ret;
-ENDPROC(twofish_ctr_8way)
+SYM_FUNC_END(twofish_ctr_8way)
-ENTRY(twofish_xts_enc_8way)
+SYM_FUNC_START(twofish_xts_enc_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -429,9 +429,9 @@ ENTRY(twofish_xts_enc_8way)
FRAME_END
ret;
-ENDPROC(twofish_xts_enc_8way)
+SYM_FUNC_END(twofish_xts_enc_8way)
-ENTRY(twofish_xts_dec_8way)
+SYM_FUNC_START(twofish_xts_dec_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -453,4 +453,4 @@ ENTRY(twofish_xts_dec_8way)
FRAME_END
ret;
-ENDPROC(twofish_xts_dec_8way)
+SYM_FUNC_END(twofish_xts_dec_8way)
diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S
index 290cc4e9a6fe..a6f09e4f2e46 100644
--- a/arch/x86/crypto/twofish-i586-asm_32.S
+++ b/arch/x86/crypto/twofish-i586-asm_32.S
@@ -207,7 +207,7 @@
xor %esi, d ## D;\
ror $1, d ## D;
-ENTRY(twofish_enc_blk)
+SYM_FUNC_START(twofish_enc_blk)
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
@@ -261,9 +261,9 @@ ENTRY(twofish_enc_blk)
pop %ebp
mov $1, %eax
ret
-ENDPROC(twofish_enc_blk)
+SYM_FUNC_END(twofish_enc_blk)
-ENTRY(twofish_dec_blk)
+SYM_FUNC_START(twofish_dec_blk)
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
@@ -318,4 +318,4 @@ ENTRY(twofish_dec_blk)
pop %ebp
mov $1, %eax
ret
-ENDPROC(twofish_dec_blk)
+SYM_FUNC_END(twofish_dec_blk)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index e495e07c7f1b..fc23552afe37 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -220,7 +220,7 @@
rorq $32, RAB2; \
outunpack3(mov, RIO, 2, RAB, 2);
-ENTRY(__twofish_enc_blk_3way)
+SYM_FUNC_START(__twofish_enc_blk_3way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -267,9 +267,9 @@ ENTRY(__twofish_enc_blk_3way)
popq %r12;
popq %r13;
ret;
-ENDPROC(__twofish_enc_blk_3way)
+SYM_FUNC_END(__twofish_enc_blk_3way)
-ENTRY(twofish_dec_blk_3way)
+SYM_FUNC_START(twofish_dec_blk_3way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -302,4 +302,4 @@ ENTRY(twofish_dec_blk_3way)
popq %r12;
popq %r13;
ret;
-ENDPROC(twofish_dec_blk_3way)
+SYM_FUNC_END(twofish_dec_blk_3way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
index ecef2cb9f43f..d2e56232494a 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -202,7 +202,7 @@
xor %r8d, d ## D;\
ror $1, d ## D;
-ENTRY(twofish_enc_blk)
+SYM_FUNC_START(twofish_enc_blk)
pushq R1
/* %rdi contains the ctx address */
@@ -253,9 +253,9 @@ ENTRY(twofish_enc_blk)
popq R1
movl $1,%eax
ret
-ENDPROC(twofish_enc_blk)
+SYM_FUNC_END(twofish_enc_blk)
-ENTRY(twofish_dec_blk)
+SYM_FUNC_START(twofish_dec_blk)
pushq R1
/* %rdi contains the ctx address */
@@ -305,4 +305,4 @@ ENTRY(twofish_dec_blk)
popq R1
movl $1,%eax
ret
-ENDPROC(twofish_dec_blk)
+SYM_FUNC_END(twofish_dec_blk)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 515c0ceeb4a3..0789e13ece90 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -354,7 +354,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING
#ifdef CONFIG_JUMP_LABEL
- STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
+ STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_key, def=0
#endif
call enter_from_user_mode
.Lafter_call_\@:
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 3f8e22615812..9747876980b5 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -33,6 +33,7 @@
#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
#include <asm/nospec-branch.h>
+#include <asm/io_bitmap.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -196,6 +197,9 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
/* Reload ti->flags; we may have rescheduled above. */
cached_flags = READ_ONCE(ti->flags);
+ if (unlikely(cached_flags & _TIF_IO_BITMAP))
+ tss_update_io_bitmap();
+
fpregs_assert_state_consistent();
if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
switch_fpu_return();
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index f83ca5aa8b77..5832b11f01bb 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -172,7 +172,7 @@
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
.if \no_user_check == 0
/* coming from usermode? */
- testl $SEGMENT_RPL_MASK, PT_CS(%esp)
+ testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
jz .Lend_\@
.endif
/* On user-cr3? */
@@ -205,64 +205,76 @@
#define CS_FROM_ENTRY_STACK (1 << 31)
#define CS_FROM_USER_CR3 (1 << 30)
#define CS_FROM_KERNEL (1 << 29)
+#define CS_FROM_ESPFIX (1 << 28)
.macro FIXUP_FRAME
/*
* The high bits of the CS dword (__csh) are used for CS_FROM_*.
* Clear them in case hardware didn't do this for us.
*/
- andl $0x0000ffff, 3*4(%esp)
+ andl $0x0000ffff, 4*4(%esp)
#ifdef CONFIG_VM86
- testl $X86_EFLAGS_VM, 4*4(%esp)
+ testl $X86_EFLAGS_VM, 5*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@
#endif
- testl $SEGMENT_RPL_MASK, 3*4(%esp)
+ testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@
- orl $CS_FROM_KERNEL, 3*4(%esp)
+ orl $CS_FROM_KERNEL, 4*4(%esp)
/*
* When we're here from kernel mode; the (exception) stack looks like:
*
- * 5*4(%esp) - <previous context>
- * 4*4(%esp) - flags
- * 3*4(%esp) - cs
- * 2*4(%esp) - ip
- * 1*4(%esp) - orig_eax
- * 0*4(%esp) - gs / function
+ * 6*4(%esp) - <previous context>
+ * 5*4(%esp) - flags
+ * 4*4(%esp) - cs
+ * 3*4(%esp) - ip
+ * 2*4(%esp) - orig_eax
+ * 1*4(%esp) - gs / function
+ * 0*4(%esp) - fs
*
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us
- * the original 5 enties as gap:
+ * the original 6 enties as gap:
*
- * 12*4(%esp) - <previous context>
- * 11*4(%esp) - gap / flags
- * 10*4(%esp) - gap / cs
- * 9*4(%esp) - gap / ip
- * 8*4(%esp) - gap / orig_eax
- * 7*4(%esp) - gap / gs / function
- * 6*4(%esp) - ss
- * 5*4(%esp) - sp
- * 4*4(%esp) - flags
- * 3*4(%esp) - cs
- * 2*4(%esp) - ip
- * 1*4(%esp) - orig_eax
- * 0*4(%esp) - gs / function
+ * 14*4(%esp) - <previous context>
+ * 13*4(%esp) - gap / flags
+ * 12*4(%esp) - gap / cs
+ * 11*4(%esp) - gap / ip
+ * 10*4(%esp) - gap / orig_eax
+ * 9*4(%esp) - gap / gs / function
+ * 8*4(%esp) - gap / fs
+ * 7*4(%esp) - ss
+ * 6*4(%esp) - sp
+ * 5*4(%esp) - flags
+ * 4*4(%esp) - cs
+ * 3*4(%esp) - ip
+ * 2*4(%esp) - orig_eax
+ * 1*4(%esp) - gs / function
+ * 0*4(%esp) - fs
*/
pushl %ss # ss
pushl %esp # sp (points at ss)
- addl $6*4, (%esp) # point sp back at the previous context
- pushl 6*4(%esp) # flags
- pushl 6*4(%esp) # cs
- pushl 6*4(%esp) # ip
- pushl 6*4(%esp) # orig_eax
- pushl 6*4(%esp) # gs / function
+ addl $7*4, (%esp) # point sp back at the previous context
+ pushl 7*4(%esp) # flags
+ pushl 7*4(%esp) # cs
+ pushl 7*4(%esp) # ip
+ pushl 7*4(%esp) # orig_eax
+ pushl 7*4(%esp) # gs / function
+ pushl 7*4(%esp) # fs
.Lfrom_usermode_no_fixup_\@:
.endm
.macro IRET_FRAME
+ /*
+ * We're called with %ds, %es, %fs, and %gs from the interrupted
+ * frame, so we shouldn't use them. Also, we may be in ESPFIX
+ * mode and therefore have a nonzero SS base and an offset ESP,
+ * so any attempt to access the stack needs to use SS. (except for
+ * accesses through %esp, which automatically use SS.)
+ */
testl $CS_FROM_KERNEL, 1*4(%esp)
jz .Lfinished_frame_\@
@@ -276,31 +288,40 @@
movl 5*4(%esp), %eax # (modified) regs->sp
movl 4*4(%esp), %ecx # flags
- movl %ecx, -4(%eax)
+ movl %ecx, %ss:-1*4(%eax)
movl 3*4(%esp), %ecx # cs
andl $0x0000ffff, %ecx
- movl %ecx, -8(%eax)
+ movl %ecx, %ss:-2*4(%eax)
movl 2*4(%esp), %ecx # ip
- movl %ecx, -12(%eax)
+ movl %ecx, %ss:-3*4(%eax)
movl 1*4(%esp), %ecx # eax
- movl %ecx, -16(%eax)
+ movl %ecx, %ss:-4*4(%eax)
popl %ecx
- lea -16(%eax), %esp
+ lea -4*4(%eax), %esp
popl %eax
.Lfinished_frame_\@:
.endm
-.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
cld
.if \skip_gs == 0
PUSH_GS
.endif
- FIXUP_FRAME
pushl %fs
+
+ pushl %eax
+ movl $(__KERNEL_PERCPU), %eax
+ movl %eax, %fs
+.if \unwind_espfix > 0
+ UNWIND_ESPFIX_STACK
+.endif
+ popl %eax
+
+ FIXUP_FRAME
pushl %es
pushl %ds
pushl \pt_regs_ax
@@ -313,8 +334,6 @@
movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
- movl $(__KERNEL_PERCPU), %edx
- movl %edx, %fs
.if \skip_gs == 0
SET_KERNEL_GS %edx
.endif
@@ -324,8 +343,8 @@
.endif
.endm
-.macro SAVE_ALL_NMI cr3_reg:req
- SAVE_ALL
+.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
+ SAVE_ALL unwind_espfix=\unwind_espfix
BUG_IF_WRONG_CR3
@@ -357,6 +376,7 @@
2: popl %es
3: popl %fs
POP_GS \pop
+ IRET_FRAME
.pushsection .fixup, "ax"
4: movl $0, (%esp)
jmp 1b
@@ -395,7 +415,8 @@
.macro CHECK_AND_APPLY_ESPFIX
#ifdef CONFIG_X86_ESPFIX32
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
@@ -709,7 +730,7 @@
* %eax: prev task
* %edx: next task
*/
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
/*
* Save callee-saved registers
* This must match the order in struct inactive_task_frame
@@ -718,6 +739,11 @@ ENTRY(__switch_to_asm)
pushl %ebx
pushl %edi
pushl %esi
+ /*
+ * Flags are saved to prevent AC leakage. This could go
+ * away if objtool would have 32bit support to verify
+ * the STAC/CLAC correctness.
+ */
pushfl
/* switch stack */
@@ -740,15 +766,16 @@ ENTRY(__switch_to_asm)
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
- /* restore callee-saved registers */
+ /* Restore flags or the incoming task to restore AC state. */
popfl
+ /* restore callee-saved registers */
popl %esi
popl %edi
popl %ebx
popl %ebp
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
/*
* The unwinder expects the last frame on the stack to always be at the same
@@ -757,7 +784,7 @@ END(__switch_to_asm)
* asmlinkage function so its argument has to be pushed on the stack. This
* wrapper creates a proper "end of stack" frame header before the call.
*/
-ENTRY(schedule_tail_wrapper)
+SYM_FUNC_START(schedule_tail_wrapper)
FRAME_BEGIN
pushl %eax
@@ -766,7 +793,7 @@ ENTRY(schedule_tail_wrapper)
FRAME_END
ret
-ENDPROC(schedule_tail_wrapper)
+SYM_FUNC_END(schedule_tail_wrapper)
/*
* A newly forked process directly context switches into this address.
*
@@ -774,7 +801,7 @@ ENDPROC(schedule_tail_wrapper)
* ebx: kernel thread func (NULL for user thread)
* edi: kernel thread arg
*/
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
call schedule_tail_wrapper
testl %ebx, %ebx
@@ -797,7 +824,7 @@ ENTRY(ret_from_fork)
*/
movl $0, PT_EAX(%esp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
/*
* Return to user mode is not as complex as all this looks,
@@ -807,8 +834,7 @@ END(ret_from_fork)
*/
# userspace resumption stub bypassing syscall exit tracing
- ALIGN
-ret_from_exception:
+SYM_CODE_START_LOCAL(ret_from_exception)
preempt_stop(CLBR_ANY)
ret_from_intr:
#ifdef CONFIG_VM86
@@ -825,15 +851,14 @@ ret_from_intr:
cmpl $USER_RPL, %eax
jb restore_all_kernel # not returning to v8086 or userspace
-ENTRY(resume_userspace)
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
movl %esp, %eax
call prepare_exit_to_usermode
jmp restore_all
-END(ret_from_exception)
+SYM_CODE_END(ret_from_exception)
-GLOBAL(__begin_SYSENTER_singlestep_region)
+SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
/*
* All code from here through __end_SYSENTER_singlestep_region is subject
* to being single-stepped if a user program sets TF and executes SYSENTER.
@@ -848,9 +873,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
* Xen doesn't set %esp to be precisely what the normal SYSENTER
* entry point expects, so fix it up before using the normal path.
*/
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
addl $5*4, %esp /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
#endif
/*
@@ -885,7 +911,7 @@ ENTRY(xen_sysenter_target)
* ebp user stack
* 0(%ebp) arg6
*/
-ENTRY(entry_SYSENTER_32)
+SYM_FUNC_START(entry_SYSENTER_32)
/*
* On entry-stack with all userspace-regs live - save and
* restore eflags and %eax to use it as scratch-reg for the cr3
@@ -1012,8 +1038,8 @@ ENTRY(entry_SYSENTER_32)
pushl $X86_EFLAGS_FIXED
popfl
jmp .Lsysenter_flags_fixed
-GLOBAL(__end_SYSENTER_singlestep_region)
-ENDPROC(entry_SYSENTER_32)
+SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_FUNC_END(entry_SYSENTER_32)
/*
* 32-bit legacy system call entry.
@@ -1043,7 +1069,7 @@ ENDPROC(entry_SYSENTER_32)
* edi arg5
* ebp arg6
*/
-ENTRY(entry_INT80_32)
+SYM_FUNC_START(entry_INT80_32)
ASM_CLAC
pushl %eax /* pt_regs->orig_ax */
@@ -1075,7 +1101,6 @@ restore_all:
/* Restore user state */
RESTORE_REGS pop=4 # skip orig_eax/error_code
.Lirq_return:
- IRET_FRAME
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
* when returning from IPI handler and when returning from
@@ -1100,7 +1125,7 @@ restore_all_kernel:
jmp .Lirq_return
.section .fixup, "ax"
-ENTRY(iret_exc )
+SYM_CODE_START(iret_exc)
pushl $0 # no error code
pushl $do_iret_error
@@ -1117,9 +1142,10 @@ ENTRY(iret_exc )
#endif
jmp common_exception
+SYM_CODE_END(iret_exc)
.previous
_ASM_EXTABLE(.Lirq_return, iret_exc)
-ENDPROC(entry_INT80_32)
+SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
/*
@@ -1128,30 +1154,43 @@ ENDPROC(entry_INT80_32)
* We can't call C functions using the ESPFIX stack. This code reads
* the high word of the segment base from the GDT and swiches to the
* normal stack and adjusts ESP with the matching offset.
+ *
+ * We might be on user CR3 here, so percpu data is not mapped and we can't
+ * access the GDT through the percpu segment. Instead, use SGDT to find
+ * the cpu_entry_area alias of the GDT.
*/
#ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */
- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+ pushl %ecx
+ subl $2*4, %esp
+ sgdt (%esp)
+ movl 2(%esp), %ecx /* GDT address */
+ /*
+ * Careful: ECX is a linear pointer, so we need to force base
+ * zero. %cs is the only known-linear segment we have right now.
+ */
+ mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */
+ mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */
shl $16, %eax
+ addl $2*4, %esp
+ popl %ecx
addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS
pushl %eax
lss (%esp), %esp /* switch to the normal stack segment */
#endif
.endm
+
.macro UNWIND_ESPFIX_STACK
+ /* It's safe to clobber %eax, all other regs need to be preserved */
#ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax
/* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax
- jne 27f
- movl $__KERNEL_DS, %eax
- movl %eax, %ds
- movl %eax, %es
+ jne .Lno_fixup_\@
/* switch to normal stack */
FIXUP_ESPFIX_STACK
-27:
+.Lno_fixup_\@:
#endif
.endm
@@ -1160,7 +1199,7 @@ ENDPROC(entry_INT80_32)
* We pack 1 stub into every 8-byte block.
*/
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushl $(~vector+0x80) /* Note: always in signed byte range */
@@ -1168,11 +1207,11 @@ ENTRY(irq_entries_start)
jmp common_interrupt
.align 8
.endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
#ifdef CONFIG_X86_LOCAL_APIC
.align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
pushl $(~vector+0x80) /* Note: always in signed byte range */
@@ -1180,9 +1219,9 @@ ENTRY(spurious_entries_start)
jmp common_spurious
.align 8
.endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
-common_spurious:
+SYM_CODE_START_LOCAL(common_spurious)
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
SAVE_ALL switch_stacks=1
@@ -1191,7 +1230,7 @@ common_spurious:
movl %esp, %eax
call smp_spurious_interrupt
jmp ret_from_intr
-ENDPROC(common_spurious)
+SYM_CODE_END(common_spurious)
#endif
/*
@@ -1199,7 +1238,7 @@ ENDPROC(common_spurious)
* so IRQ-flags tracing has to follow that:
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
-common_interrupt:
+SYM_CODE_START_LOCAL(common_interrupt)
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
@@ -1209,10 +1248,10 @@ common_interrupt:
movl %esp, %eax
call do_IRQ
jmp ret_from_intr
-ENDPROC(common_interrupt)
+SYM_CODE_END(common_interrupt)
#define BUILD_INTERRUPT3(name, nr, fn) \
-ENTRY(name) \
+SYM_FUNC_START(name) \
ASM_CLAC; \
pushl $~(nr); \
SAVE_ALL switch_stacks=1; \
@@ -1221,7 +1260,7 @@ ENTRY(name) \
movl %esp, %eax; \
call fn; \
jmp ret_from_intr; \
-ENDPROC(name)
+SYM_FUNC_END(name)
#define BUILD_INTERRUPT(name, nr) \
BUILD_INTERRUPT3(name, nr, smp_##name); \
@@ -1229,14 +1268,14 @@ ENDPROC(name)
/* The include is where all of the SMP etc. interrupts come from */
#include <asm/entry_arch.h>
-ENTRY(coprocessor_error)
+SYM_CODE_START(coprocessor_error)
ASM_CLAC
pushl $0
pushl $do_coprocessor_error
jmp common_exception
-END(coprocessor_error)
+SYM_CODE_END(coprocessor_error)
-ENTRY(simd_coprocessor_error)
+SYM_CODE_START(simd_coprocessor_error)
ASM_CLAC
pushl $0
#ifdef CONFIG_X86_INVD_BUG
@@ -1248,104 +1287,99 @@ ENTRY(simd_coprocessor_error)
pushl $do_simd_coprocessor_error
#endif
jmp common_exception
-END(simd_coprocessor_error)
+SYM_CODE_END(simd_coprocessor_error)
-ENTRY(device_not_available)
+SYM_CODE_START(device_not_available)
ASM_CLAC
pushl $-1 # mark this as an int
pushl $do_device_not_available
jmp common_exception
-END(device_not_available)
+SYM_CODE_END(device_not_available)
#ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
+SYM_CODE_START(native_iret)
iret
_ASM_EXTABLE(native_iret, iret_exc)
-END(native_iret)
+SYM_CODE_END(native_iret)
#endif
-ENTRY(overflow)
+SYM_CODE_START(overflow)
ASM_CLAC
pushl $0
pushl $do_overflow
jmp common_exception
-END(overflow)
+SYM_CODE_END(overflow)
-ENTRY(bounds)
+SYM_CODE_START(bounds)
ASM_CLAC
pushl $0
pushl $do_bounds
jmp common_exception
-END(bounds)
+SYM_CODE_END(bounds)
-ENTRY(invalid_op)
+SYM_CODE_START(invalid_op)
ASM_CLAC
pushl $0
pushl $do_invalid_op
jmp common_exception
-END(invalid_op)
+SYM_CODE_END(invalid_op)
-ENTRY(coprocessor_segment_overrun)
+SYM_CODE_START(coprocessor_segment_overrun)
ASM_CLAC
pushl $0
pushl $do_coprocessor_segment_overrun
jmp common_exception
-END(coprocessor_segment_overrun)
+SYM_CODE_END(coprocessor_segment_overrun)
-ENTRY(invalid_TSS)
+SYM_CODE_START(invalid_TSS)
ASM_CLAC
pushl $do_invalid_TSS
jmp common_exception
-END(invalid_TSS)
+SYM_CODE_END(invalid_TSS)
-ENTRY(segment_not_present)
+SYM_CODE_START(segment_not_present)
ASM_CLAC
pushl $do_segment_not_present
jmp common_exception
-END(segment_not_present)
+SYM_CODE_END(segment_not_present)
-ENTRY(stack_segment)
+SYM_CODE_START(stack_segment)
ASM_CLAC
pushl $do_stack_segment
jmp common_exception
-END(stack_segment)
+SYM_CODE_END(stack_segment)
-ENTRY(alignment_check)
+SYM_CODE_START(alignment_check)
ASM_CLAC
pushl $do_alignment_check
jmp common_exception
-END(alignment_check)
+SYM_CODE_END(alignment_check)
-ENTRY(divide_error)
+SYM_CODE_START(divide_error)
ASM_CLAC
pushl $0 # no error code
pushl $do_divide_error
jmp common_exception
-END(divide_error)
+SYM_CODE_END(divide_error)
#ifdef CONFIG_X86_MCE
-ENTRY(machine_check)
+SYM_CODE_START(machine_check)
ASM_CLAC
pushl $0
pushl machine_check_vector
jmp common_exception
-END(machine_check)
+SYM_CODE_END(machine_check)
#endif
-ENTRY(spurious_interrupt_bug)
+SYM_CODE_START(spurious_interrupt_bug)
ASM_CLAC
pushl $0
pushl $do_spurious_interrupt_bug
jmp common_exception
-END(spurious_interrupt_bug)
+SYM_CODE_END(spurious_interrupt_bug)
#ifdef CONFIG_XEN_PV
-ENTRY(xen_hypervisor_callback)
- pushl $-1 /* orig_ax = -1 => not a system call */
- SAVE_ALL
- ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
-
+SYM_FUNC_START(xen_hypervisor_callback)
/*
* Check to see if we got the event in the critical
* region in xen_iret_direct, after we've reenabled
@@ -1353,22 +1387,23 @@ ENTRY(xen_hypervisor_callback)
* iret instruction's behaviour where it delivers a
* pending interrupt when enabling interrupts:
*/
- movl PT_EIP(%esp), %eax
- cmpl $xen_iret_start_crit, %eax
+ cmpl $xen_iret_start_crit, (%esp)
jb 1f
- cmpl $xen_iret_end_crit, %eax
+ cmpl $xen_iret_end_crit, (%esp)
jae 1f
-
- jmp xen_iret_crit_fixup
-
-ENTRY(xen_do_upcall)
-1: mov %esp, %eax
+ call xen_iret_crit_fixup
+1:
+ pushl $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ ENCODE_FRAME_POINTER
+ TRACE_IRQS_OFF
+ mov %esp, %eax
call xen_evtchn_do_upcall
#ifndef CONFIG_PREEMPTION
call xen_maybe_preempt_hcall
#endif
jmp ret_from_intr
-ENDPROC(xen_hypervisor_callback)
+SYM_FUNC_END(xen_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1382,7 +1417,7 @@ ENDPROC(xen_hypervisor_callback)
* to pop the stack frame we end up in an infinite loop of failsafe callbacks.
* We distinguish between categories by maintaining a status value in EAX.
*/
-ENTRY(xen_failsafe_callback)
+SYM_FUNC_START(xen_failsafe_callback)
pushl %eax
movl $1, %eax
1: mov 4(%esp), %ds
@@ -1419,7 +1454,7 @@ ENTRY(xen_failsafe_callback)
_ASM_EXTABLE(2b, 7b)
_ASM_EXTABLE(3b, 8b)
_ASM_EXTABLE(4b, 9b)
-ENDPROC(xen_failsafe_callback)
+SYM_FUNC_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
#ifdef CONFIG_XEN_PVHVM
@@ -1441,18 +1476,17 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
#endif /* CONFIG_HYPERV */
-ENTRY(page_fault)
+SYM_CODE_START(page_fault)
ASM_CLAC
pushl $do_page_fault
jmp common_exception_read_cr2
-END(page_fault)
+SYM_CODE_END(page_fault)
-common_exception_read_cr2:
+SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
/* the function address is in %gs's slot on the stack */
- SAVE_ALL switch_stacks=1 skip_gs=1
+ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
- UNWIND_ESPFIX_STACK
/* fixup %gs */
GS_TO_REG %ecx
@@ -1470,13 +1504,12 @@ common_exception_read_cr2:
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC %edi
jmp ret_from_exception
-END(common_exception_read_cr2)
+SYM_CODE_END(common_exception_read_cr2)
-common_exception:
+SYM_CODE_START_LOCAL_NOALIGN(common_exception)
/* the function address is in %gs's slot on the stack */
- SAVE_ALL switch_stacks=1 skip_gs=1
+ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
- UNWIND_ESPFIX_STACK
/* fixup %gs */
GS_TO_REG %ecx
@@ -1492,9 +1525,9 @@ common_exception:
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC %edi
jmp ret_from_exception
-END(common_exception)
+SYM_CODE_END(common_exception)
-ENTRY(debug)
+SYM_CODE_START(debug)
/*
* Entry from sysenter is now handled in common_exception
*/
@@ -1502,7 +1535,7 @@ ENTRY(debug)
pushl $-1 # mark this as an int
pushl $do_debug
jmp common_exception
-END(debug)
+SYM_CODE_END(debug)
/*
* NMI is doubly nasty. It can happen on the first instruction of
@@ -1511,10 +1544,14 @@ END(debug)
* switched stacks. We handle both conditions by simply checking whether we
* interrupted kernel code running on the SYSENTER stack.
*/
-ENTRY(nmi)
+SYM_CODE_START(nmi)
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
+ /*
+ * ESPFIX_SS is only ever set on the return to user path
+ * after we've switched to the entry stack.
+ */
pushl %eax
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
@@ -1550,6 +1587,11 @@ ENTRY(nmi)
movl %ebx, %esp
.Lnmi_return:
+#ifdef CONFIG_X86_ESPFIX32
+ testl $CS_FROM_ESPFIX, PT_CS(%esp)
+ jnz .Lnmi_from_espfix
+#endif
+
CHECK_AND_APPLY_ESPFIX
RESTORE_ALL_NMI cr3_reg=%edi pop=4
jmp .Lirq_return
@@ -1557,28 +1599,47 @@ ENTRY(nmi)
#ifdef CONFIG_X86_ESPFIX32
.Lnmi_espfix_stack:
/*
- * create the pointer to lss back
+ * Create the pointer to LSS back
*/
pushl %ss
pushl %esp
addl $4, (%esp)
- /* copy the iret frame of 12 bytes */
- .rept 3
- pushl 16(%esp)
- .endr
- pushl %eax
- SAVE_ALL_NMI cr3_reg=%edi
+
+ /* Copy the (short) IRET frame */
+ pushl 4*4(%esp) # flags
+ pushl 4*4(%esp) # cs
+ pushl 4*4(%esp) # ip
+
+ pushl %eax # orig_ax
+
+ SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
ENCODE_FRAME_POINTER
- FIXUP_ESPFIX_STACK # %eax == %esp
+
+ /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
+ xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
+
xorl %edx, %edx # zero error code
- call do_nmi
+ movl %esp, %eax # pt_regs pointer
+ jmp .Lnmi_from_sysenter_stack
+
+.Lnmi_from_espfix:
RESTORE_ALL_NMI cr3_reg=%edi
- lss 12+4(%esp), %esp # back to espfix stack
+ /*
+ * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
+ * fix up the gap and long frame:
+ *
+ * 3 - original frame (exception)
+ * 2 - ESPFIX block (above)
+ * 6 - gap (FIXUP_FRAME)
+ * 5 - long frame (FIXUP_FRAME)
+ * 1 - orig_ax
+ */
+ lss (1+5+6)*4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
-END(nmi)
+SYM_CODE_END(nmi)
-ENTRY(int3)
+SYM_CODE_START(int3)
ASM_CLAC
pushl $-1 # mark this as an int
@@ -1589,22 +1650,22 @@ ENTRY(int3)
movl %esp, %eax # pt_regs pointer
call do_int3
jmp ret_from_exception
-END(int3)
+SYM_CODE_END(int3)
-ENTRY(general_protection)
+SYM_CODE_START(general_protection)
pushl $do_general_protection
jmp common_exception
-END(general_protection)
+SYM_CODE_END(general_protection)
#ifdef CONFIG_KVM_GUEST
-ENTRY(async_page_fault)
+SYM_CODE_START(async_page_fault)
ASM_CLAC
pushl $do_async_page_fault
jmp common_exception_read_cr2
-END(async_page_fault)
+SYM_CODE_END(async_page_fault)
#endif
-ENTRY(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@@ -1613,4 +1674,4 @@ ENTRY(rewind_stack_do_exit)
call do_exit
1: jmp 1b
-END(rewind_stack_do_exit)
+SYM_CODE_END(rewind_stack_do_exit)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index b7c3ea4cb19d..76942cbd95a1 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -15,7 +15,7 @@
* at the top of the kernel process stack.
*
* Some macro usage:
- * - ENTRY/END: Define functions in the symbol table.
+ * - SYM_FUNC_START/END:Define functions in the symbol table.
* - TRACE_IRQ_*: Trace hardirq state for lock debugging.
* - idtentry: Define exception entry points.
*/
@@ -46,11 +46,11 @@
.section .entry.text, "ax"
#ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
+SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY
swapgs
sysretq
-END(native_usergs_sysret64)
+SYM_CODE_END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_FLAGS flags:req
@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
* with them due to bugs in both AMD and Intel CPUs.
*/
-ENTRY(entry_SYSCALL_64)
+SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
* Interrupts are off on entry.
@@ -162,7 +162,7 @@ ENTRY(entry_SYSCALL_64)
pushq %r11 /* pt_regs->flags */
pushq $__USER_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
-GLOBAL(entry_SYSCALL_64_after_hwframe)
+SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
pushq %rax /* pt_regs->orig_ax */
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
@@ -273,13 +273,13 @@ syscall_return_via_sysret:
popq %rdi
popq %rsp
USERGS_SYSRET64
-END(entry_SYSCALL_64)
+SYM_CODE_END(entry_SYSCALL_64)
/*
* %rdi: prev task
* %rsi: next task
*/
-ENTRY(__switch_to_asm)
+SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC
/*
* Save callee-saved registers
@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
popq %rbp
jmp __switch_to
-END(__switch_to_asm)
+SYM_CODE_END(__switch_to_asm)
/*
* A newly forked process directly context switches into this address.
@@ -330,7 +330,7 @@ END(__switch_to_asm)
* rbx: kernel thread func (NULL for user thread)
* r12: kernel thread arg
*/
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */
@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
*/
movq $0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
/*
* Build the entry stubs with some assembler magic.
* We pack 1 stub into every 8-byte block.
*/
.align 8
-ENTRY(irq_entries_start)
+SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -373,10 +373,10 @@ ENTRY(irq_entries_start)
.align 8
vector=vector+1
.endr
-END(irq_entries_start)
+SYM_CODE_END(irq_entries_start)
.align 8
-ENTRY(spurious_entries_start)
+SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
UNWIND_HINT_IRET_REGS
@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start)
.align 8
vector=vector+1
.endr
-END(spurious_entries_start)
+SYM_CODE_END(spurious_entries_start)
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY
@@ -511,7 +511,7 @@ END(spurious_entries_start)
* | return address |
* +----------------------------------------------------+
*/
-ENTRY(interrupt_entry)
+SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC
ASM_CLAC
cld
@@ -579,7 +579,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF
ret
-END(interrupt_entry)
+SYM_CODE_END(interrupt_entry)
_ASM_NOKPROBE(interrupt_entry)
@@ -589,18 +589,18 @@ _ASM_NOKPROBE(interrupt_entry)
* The interrupt stubs push (~vector+0x80) onto the stack and
* then jump to common_spurious/interrupt.
*/
-common_spurious:
+SYM_CODE_START_LOCAL(common_spurious)
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
call interrupt_entry
UNWIND_HINT_REGS indirect=1
call smp_spurious_interrupt /* rdi points to pt_regs */
jmp ret_from_intr
-END(common_spurious)
+SYM_CODE_END(common_spurious)
_ASM_NOKPROBE(common_spurious)
/* common_interrupt is a hotpath. Align it */
.p2align CONFIG_X86_L1_CACHE_SHIFT
-common_interrupt:
+SYM_CODE_START_LOCAL(common_interrupt)
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
call interrupt_entry
UNWIND_HINT_REGS indirect=1
@@ -616,12 +616,12 @@ ret_from_intr:
jz retint_kernel
/* Interrupt came from user space */
-GLOBAL(retint_user)
+.Lretint_user:
mov %rsp,%rdi
call prepare_exit_to_usermode
TRACE_IRQS_IRETQ
-GLOBAL(swapgs_restore_regs_and_return_to_usermode)
+SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
testb $3, CS(%rsp)
@@ -679,7 +679,7 @@ retint_kernel:
*/
TRACE_IRQS_IRETQ
-GLOBAL(restore_regs_and_return_to_kernel)
+SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates kernel mode. */
testb $3, CS(%rsp)
@@ -695,7 +695,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
*/
INTERRUPT_RETURN
-ENTRY(native_iret)
+SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
UNWIND_HINT_IRET_REGS
/*
* Are we returning to a stack segment from the LDT? Note: in
@@ -706,8 +706,7 @@ ENTRY(native_iret)
jnz native_irq_return_ldt
#endif
-.global native_irq_return_iret
-native_irq_return_iret:
+SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
/*
* This may fault. Non-paranoid faults on return to userspace are
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
@@ -789,14 +788,14 @@ native_irq_return_ldt:
*/
jmp native_irq_return_iret
#endif
-END(common_interrupt)
+SYM_CODE_END(common_interrupt)
_ASM_NOKPROBE(common_interrupt)
/*
* APIC interrupts.
*/
.macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS
pushq $~(\num)
.Lcommon_\sym:
@@ -804,7 +803,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1
call \do_sym /* rdi points to pt_regs */
jmp ret_from_intr
-END(\sym)
+SYM_CODE_END(\sym)
_ASM_NOKPROBE(\sym)
.endm
@@ -969,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
* #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
*/
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0
-ENTRY(\sym)
+SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
/* Sanity check */
@@ -1019,7 +1018,7 @@ ENTRY(\sym)
.endif
_ASM_NOKPROBE(\sym)
-END(\sym)
+SYM_CODE_END(\sym)
.endm
idtentry divide_error do_divide_error has_error_code=0
@@ -1041,7 +1040,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
* Reload gs selector with exception handling
* edi: new selector
*/
-ENTRY(native_load_gs_index)
+SYM_FUNC_START(native_load_gs_index)
FRAME_BEGIN
pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
@@ -1055,13 +1054,13 @@ ENTRY(native_load_gs_index)
popfq
FRAME_END
ret
-ENDPROC(native_load_gs_index)
+SYM_FUNC_END(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
.section .fixup, "ax"
/* running with kernelgs */
-.Lbad_gs:
+SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
SWAPGS /* switch back to user gs */
.macro ZAP_GS
/* This can't be a string because the preprocessor needs to see it. */
@@ -1072,10 +1071,11 @@ EXPORT_SYMBOL(native_load_gs_index)
xorl %eax, %eax
movl %eax, %gs
jmp 2b
+SYM_CODE_END(.Lbad_gs)
.previous
/* Call softirq on interrupt stack. Interrupts are off. */
-ENTRY(do_softirq_own_stack)
+SYM_FUNC_START(do_softirq_own_stack)
pushq %rbp
mov %rsp, %rbp
ENTER_IRQ_STACK regs=0 old_rsp=%r11
@@ -1083,7 +1083,7 @@ ENTRY(do_softirq_own_stack)
LEAVE_IRQ_STACK regs=0
leaveq
ret
-ENDPROC(do_softirq_own_stack)
+SYM_FUNC_END(do_softirq_own_stack)
#ifdef CONFIG_XEN_PV
idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
@@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* existing activation in its critical region -- if so, we pop the current
* activation and restart the handler using the previous one.
*/
-ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
/*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
call xen_maybe_preempt_hcall
#endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+SYM_CODE_END(xen_do_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1134,7 +1135,7 @@ END(xen_do_hypervisor_callback)
* We distinguish between categories by comparing each saved segment register
* with its current contents: any discrepancy means we in category 1.
*/
-ENTRY(xen_failsafe_callback)
+SYM_CODE_START(xen_failsafe_callback)
UNWIND_HINT_EMPTY
movl %ds, %ecx
cmpw %cx, 0x10(%rsp)
@@ -1164,7 +1165,7 @@ ENTRY(xen_failsafe_callback)
PUSH_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
jmp error_exit
-END(xen_failsafe_callback)
+SYM_CODE_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
#ifdef CONFIG_XEN_PVHVM
@@ -1214,7 +1215,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
* Use slow, but surefire "are we in kernel?" check.
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
-ENTRY(paranoid_entry)
+SYM_CODE_START_LOCAL(paranoid_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry)
FENCE_SWAPGS_KERNEL_ENTRY
ret
-END(paranoid_entry)
+SYM_CODE_END(paranoid_entry)
/*
* "Paranoid" exit path from exception stack. This is invoked
@@ -1262,7 +1263,7 @@ END(paranoid_entry)
*
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/
-ENTRY(paranoid_exit)
+SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
@@ -1272,19 +1273,18 @@ ENTRY(paranoid_exit)
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
- jmp .Lparanoid_exit_restore
+ jmp restore_regs_and_return_to_kernel
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
-.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
-END(paranoid_exit)
+SYM_CODE_END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch GS if needed.
*/
-ENTRY(error_entry)
+SYM_CODE_START_LOCAL(error_entry)
UNWIND_HINT_FUNC
cld
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1364,16 +1364,16 @@ ENTRY(error_entry)
call fixup_bad_iret
mov %rax, %rsp
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+SYM_CODE_END(error_entry)
-ENTRY(error_exit)
+SYM_CODE_START_LOCAL(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testb $3, CS(%rsp)
jz retint_kernel
- jmp retint_user
-END(error_exit)
+ jmp .Lretint_user
+SYM_CODE_END(error_exit)
/*
* Runs on exception stack. Xen PV does not go through this path at all,
@@ -1383,7 +1383,7 @@ END(error_exit)
* %r14: Used to save/restore the CR3 of the interrupted context
* when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/
-ENTRY(nmi)
+SYM_CODE_START(nmi)
UNWIND_HINT_IRET_REGS
/*
@@ -1718,21 +1718,21 @@ nmi_restore:
* about espfix64 on the way back to kernel mode.
*/
iretq
-END(nmi)
+SYM_CODE_END(nmi)
#ifndef CONFIG_IA32_EMULATION
/*
* This handles SYSCALL from 32-bit code. There is no way to program
* MSRs to fully disable 32-bit SYSCALL.
*/
-ENTRY(ignore_sysret)
+SYM_CODE_START(ignore_sysret)
UNWIND_HINT_EMPTY
mov $-ENOSYS, %eax
sysret
-END(ignore_sysret)
+SYM_CODE_END(ignore_sysret)
#endif
-ENTRY(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_do_exit)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@@ -1742,4 +1742,4 @@ ENTRY(rewind_stack_do_exit)
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
call do_exit
-END(rewind_stack_do_exit)
+SYM_CODE_END(rewind_stack_do_exit)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 39913770a44d..f1d3ccae5dd5 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -46,7 +46,7 @@
* ebp user stack
* 0(%ebp) arg6
*/
-ENTRY(entry_SYSENTER_compat)
+SYM_FUNC_START(entry_SYSENTER_compat)
/* Interrupts are off on entry. */
SWAPGS
@@ -146,8 +146,8 @@ ENTRY(entry_SYSENTER_compat)
pushq $X86_EFLAGS_FIXED
popfq
jmp .Lsysenter_flags_fixed
-GLOBAL(__end_entry_SYSENTER_compat)
-ENDPROC(entry_SYSENTER_compat)
+SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
+SYM_FUNC_END(entry_SYSENTER_compat)
/*
* 32-bit SYSCALL entry.
@@ -196,7 +196,7 @@ ENDPROC(entry_SYSENTER_compat)
* esp user stack
* 0(%esp) arg6
*/
-ENTRY(entry_SYSCALL_compat)
+SYM_CODE_START(entry_SYSCALL_compat)
/* Interrupts are off on entry. */
swapgs
@@ -215,7 +215,7 @@ ENTRY(entry_SYSCALL_compat)
pushq %r11 /* pt_regs->flags */
pushq $__USER32_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
-GLOBAL(entry_SYSCALL_compat_after_hwframe)
+SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
movl %eax, %eax /* discard orig_ax high bits */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
@@ -311,7 +311,7 @@ sysret32_from_system_call:
xorl %r10d, %r10d
swapgs
sysretl
-END(entry_SYSCALL_compat)
+SYM_CODE_END(entry_SYSCALL_compat)
/*
* 32-bit legacy system call entry.
@@ -339,7 +339,7 @@ END(entry_SYSCALL_compat)
* edi arg5
* ebp arg6
*/
-ENTRY(entry_INT80_compat)
+SYM_CODE_START(entry_INT80_compat)
/*
* Interrupts are off on entry.
*/
@@ -416,4 +416,4 @@ ENTRY(entry_INT80_compat)
/* Go back to user mode. */
TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
-END(entry_INT80_compat)
+SYM_CODE_END(entry_INT80_compat)
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index aa3336a7cb15..7d17b3addbbb 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -10,13 +10,11 @@
#ifdef CONFIG_IA32_EMULATION
/* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
-
-/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
-extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
-
+#define __sys_ni_syscall __ia32_sys_ni_syscall
#else /* CONFIG_IA32_EMULATION */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+#define __sys_ni_syscall sys_ni_syscall
#endif /* CONFIG_IA32_EMULATION */
#include <asm/syscalls_32.h>
@@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] =
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
- [0 ... __NR_syscall_compat_max] = &sys_ni_syscall,
+ [0 ... __NR_syscall_compat_max] = &__sys_ni_syscall,
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index b1bf31713374..adf619a856e8 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -4,11 +4,17 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
+#include <linux/syscalls.h>
#include <asm/asm-offsets.h>
#include <asm/syscall.h>
-/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
-extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
+extern asmlinkage long sys_ni_syscall(void);
+
+SYSCALL_DEFINE0(ni_syscall)
+{
+ return sys_ni_syscall();
+}
+
#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
#define __SYSCALL_X32(nr, sym, qual) __SYSCALL_64(nr, sym, qual)
#include <asm/syscalls_64.h>
@@ -23,7 +29,7 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
+ [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
@@ -40,7 +46,7 @@ asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_syscall_x32_max+1] = {
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
- [0 ... __NR_syscall_x32_max] = &sys_ni_syscall,
+ [0 ... __NR_syscall_x32_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 3fe02546aed3..15908eb9b17e 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -124,13 +124,13 @@
110 i386 iopl sys_iopl __ia32_sys_iopl
111 i386 vhangup sys_vhangup __ia32_sys_vhangup
112 i386 idle
-113 i386 vm86old sys_vm86old sys_ni_syscall
+113 i386 vm86old sys_vm86old __ia32_sys_ni_syscall
114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4
115 i386 swapoff sys_swapoff __ia32_sys_swapoff
116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo
117 i386 ipc sys_ipc __ia32_compat_sys_ipc
118 i386 fsync sys_fsync __ia32_sys_fsync
-119 i386 sigreturn sys_sigreturn sys32_sigreturn
+119 i386 sigreturn sys_sigreturn __ia32_compat_sys_sigreturn
120 i386 clone sys_clone __ia32_compat_sys_x86_clone
121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname
122 i386 uname sys_newuname __ia32_sys_newuname
@@ -177,14 +177,14 @@
163 i386 mremap sys_mremap __ia32_sys_mremap
164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16
165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16
-166 i386 vm86 sys_vm86 sys_ni_syscall
+166 i386 vm86 sys_vm86 __ia32_sys_ni_syscall
167 i386 query_module
168 i386 poll sys_poll __ia32_sys_poll
169 i386 nfsservctl
170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16
171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16
172 i386 prctl sys_prctl __ia32_sys_prctl
-173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+173 i386 rt_sigreturn sys_rt_sigreturn __ia32_compat_sys_rt_sigreturn
174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction
175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_compat_sys_rt_sigprocmask
176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index 2713490611a3..e010d4ae11f1 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -10,8 +10,7 @@
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
- .globl \name
-\name:
+SYM_CODE_START_NOALIGN(\name)
pushl %eax
pushl %ecx
pushl %edx
@@ -27,6 +26,7 @@
popl %eax
ret
_ASM_NOKPROBE(\name)
+SYM_CODE_END(\name)
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index ea5c4167086c..c5c3b6e86e62 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -12,7 +12,7 @@
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
- ENTRY(\name)
+SYM_FUNC_START_NOALIGN(\name)
pushq %rbp
movq %rsp, %rbp
@@ -33,7 +33,7 @@
call \func
jmp .L_restore
- ENDPROC(\name)
+SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
@@ -56,7 +56,7 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPTION)
-.L_restore:
+SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
popq %r11
popq %r10
popq %r9
@@ -69,4 +69,5 @@
popq %rbp
ret
_ASM_NOKPROBE(.L_restore)
+SYM_CODE_END(.L_restore)
#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 0f2154106d01..2b75e80f6b41 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -87,11 +87,9 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
#
-CFLAGS_REMOVE_vdso-note.o = -pg
CFLAGS_REMOVE_vclock_gettime.o = -pg
CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
CFLAGS_REMOVE_vgetcpu.o = -pg
-CFLAGS_REMOVE_vvar.o = -pg
#
# X32 processes use x32 vDSO to access 64bit kernel data.
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 263d7433dea8..de1fff7188aa 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -62,7 +62,7 @@ __kernel_vsyscall:
/* Enter using int $0x80 */
int $0x80
-GLOBAL(int80_landing_pad)
+SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
/*
* Restore EDX and ECX in case they were clobbered. EBP is not
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 64c3e70b0556..a7752cd78b89 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -652,15 +652,7 @@ static void amd_pmu_disable_event(struct perf_event *event)
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int active, handled;
-
- /*
- * Obtain the active count before calling x86_pmu_handle_irq() since
- * it is possible that x86_pmu_handle_irq() may make a counter
- * inactive (through x86_pmu_stop).
- */
- active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
+ int handled;
/* Process any counter overflows */
handled = x86_pmu_handle_irq(regs);
@@ -670,8 +662,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
* NMIs will be claimed if arriving within that window.
*/
if (handled) {
- this_cpu_write(perf_nmi_tstamp,
- jiffies + perf_nmi_window);
+ this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
return handled;
}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 7b21455d7504..6e3f0c18908e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2243,6 +2243,13 @@ static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
x86_pmu.sched_task(ctx, sched_in);
}
+static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
+ struct perf_event_context *next)
+{
+ if (x86_pmu.swap_task_ctx)
+ x86_pmu.swap_task_ctx(prev, next);
+}
+
void perf_check_microcode(void)
{
if (x86_pmu.check_microcode)
@@ -2297,6 +2304,7 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
+ .swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,
.aux_output_match = x86_pmu_aux_output_match,
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 5ee3fed881d3..38de4a7f6752 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -549,9 +549,11 @@ static int bts_event_init(struct perf_event *event)
* Note that the default paranoia setting permits unprivileged
* users to profile the kernel.
*/
- if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
- !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ if (event->attr.exclude_kernel) {
+ ret = perf_allow_kernel(&event->attr);
+ if (ret)
+ return ret;
+ }
if (x86_add_exclusive(x86_lbr_exclusive_bts))
return -EBUSY;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index fcef678c3423..3be51aa06e67 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3315,16 +3315,28 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (x86_pmu.version < 3)
return -EINVAL;
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ ret = perf_allow_cpu(&event->attr);
+ if (ret)
+ return ret;
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
return 0;
}
+#ifdef CONFIG_RETPOLINE
+static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr);
+static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr);
+#endif
+
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
+#ifdef CONFIG_RETPOLINE
+ if (x86_pmu.guest_get_msrs == intel_guest_get_msrs)
+ return intel_guest_get_msrs(nr);
+ else if (x86_pmu.guest_get_msrs == core_guest_get_msrs)
+ return core_guest_get_msrs(nr);
+#endif
if (x86_pmu.guest_get_msrs)
return x86_pmu.guest_get_msrs(nr);
*nr = 0;
@@ -3819,6 +3831,12 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
intel_pmu_lbr_sched_task(ctx, sched_in);
}
+static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
+ struct perf_event_context *next)
+{
+ intel_pmu_lbr_swap_task_ctx(prev, next);
+}
+
static int intel_pmu_check_period(struct perf_event *event, u64 value)
{
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
@@ -3954,6 +3972,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
+ .swap_task_ctx = intel_pmu_swap_task_ctx,
.check_period = intel_pmu_check_period,
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index ea54634eabf3..534c76606049 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -417,6 +417,29 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
cpuc->last_log_id = ++task_ctx->log_id;
}
+void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
+ struct perf_event_context *next)
+{
+ struct x86_perf_task_context *prev_ctx_data, *next_ctx_data;
+
+ swap(prev->task_ctx_data, next->task_ctx_data);
+
+ /*
+ * Architecture specific synchronization makes sense in
+ * case both prev->task_ctx_data and next->task_ctx_data
+ * pointers are allocated.
+ */
+
+ prev_ctx_data = next->task_ctx_data;
+ next_ctx_data = prev->task_ctx_data;
+
+ if (!prev_ctx_data || !next_ctx_data)
+ return;
+
+ swap(prev_ctx_data->lbr_callstack_users,
+ next_ctx_data->lbr_callstack_users);
+}
+
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index dee579efb2b2..a4cc66005ce8 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -776,8 +776,9 @@ static int p4_validate_raw_event(struct perf_event *event)
* the user needs special permissions to be able to use it
*/
if (p4_ht_active() && p4_event_bind_map[v].shared) {
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ v = perf_allow_cpu(&event->attr);
+ if (v)
+ return v;
}
/* ESCR EventMask bits may be invalid */
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 05e43d0f430b..1db7a51d9792 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -397,6 +397,20 @@ static bool pt_event_valid(struct perf_event *event)
* These all are cpu affine and operate on a local PT
*/
+static void pt_config_start(struct perf_event *event)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ u64 ctl = event->hw.config;
+
+ ctl |= RTIT_CTL_TRACEEN;
+ if (READ_ONCE(pt->vmx_on))
+ perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
+ else
+ wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+
+ WRITE_ONCE(event->hw.config, ctl);
+}
+
/* Address ranges and their corresponding msr configuration registers */
static const struct pt_address_range {
unsigned long msr_a;
@@ -469,6 +483,7 @@ static u64 pt_config_filters(struct perf_event *event)
static void pt_config(struct perf_event *event)
{
struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
u64 reg;
/* First round: clear STATUS, in particular the PSB byte counter. */
@@ -478,7 +493,9 @@ static void pt_config(struct perf_event *event)
}
reg = pt_config_filters(event);
- reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN;
+ reg |= RTIT_CTL_TRACEEN;
+ if (!buf->single)
+ reg |= RTIT_CTL_TOPA;
/*
* Previously, we had BRANCH_EN on by default, but now that PT has
@@ -501,10 +518,7 @@ static void pt_config(struct perf_event *event)
reg |= (event->attr.config & PT_CONFIG_MASK);
event->hw.config = reg;
- if (READ_ONCE(pt->vmx_on))
- perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
- else
- wrmsrl(MSR_IA32_RTIT_CTL, reg);
+ pt_config_start(event);
}
static void pt_config_stop(struct perf_event *event)
@@ -533,18 +547,6 @@ static void pt_config_stop(struct perf_event *event)
wmb();
}
-static void pt_config_buffer(void *buf, unsigned int topa_idx,
- unsigned int output_off)
-{
- u64 reg;
-
- wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
-
- reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
-
- wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
-}
-
/**
* struct topa - ToPA metadata
* @list: linkage to struct pt_buffer's list of tables
@@ -602,6 +604,33 @@ static inline phys_addr_t topa_pfn(struct topa *topa)
#define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
#define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
+static void pt_config_buffer(struct pt_buffer *buf)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ u64 reg, mask;
+ void *base;
+
+ if (buf->single) {
+ base = buf->data_pages[0];
+ mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7;
+ } else {
+ base = topa_to_page(buf->cur)->table;
+ mask = (u64)buf->cur_idx;
+ }
+
+ reg = virt_to_phys(base);
+ if (pt->output_base != reg) {
+ pt->output_base = reg;
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg);
+ }
+
+ reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32);
+ if (pt->output_mask != reg) {
+ pt->output_mask = reg;
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
+ }
+}
+
/**
* topa_alloc() - allocate page-sized ToPA table
* @cpu: CPU on which to allocate.
@@ -802,6 +831,11 @@ static void pt_update_head(struct pt *pt)
struct pt_buffer *buf = perf_get_aux(&pt->handle);
u64 topa_idx, base, old;
+ if (buf->single) {
+ local_set(&buf->data_size, buf->output_off);
+ return;
+ }
+
/* offset of the first region in this table from the beginning of buf */
base = buf->cur->offset + buf->output_off;
@@ -903,18 +937,21 @@ static void pt_handle_status(struct pt *pt)
*/
static void pt_read_offset(struct pt_buffer *buf)
{
- u64 offset, base_topa;
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
struct topa_page *tp;
- rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
- tp = phys_to_virt(base_topa);
- buf->cur = &tp->topa;
+ if (!buf->single) {
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
+ tp = phys_to_virt(pt->output_base);
+ buf->cur = &tp->topa;
+ }
- rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
/* offset within current output region */
- buf->output_off = offset >> 32;
+ buf->output_off = pt->output_mask >> 32;
/* index of current output region within this table */
- buf->cur_idx = (offset & 0xffffff80) >> 7;
+ if (!buf->single)
+ buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7;
}
static struct topa_entry *
@@ -1030,6 +1067,9 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
unsigned long head = local64_read(&buf->head);
unsigned long idx, npages, wakeup;
+ if (buf->single)
+ return 0;
+
/* can't stop in the middle of an output region */
if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
@@ -1111,13 +1151,17 @@ static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
if (buf->snapshot)
head &= (buf->nr_pages << PAGE_SHIFT) - 1;
- pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
- te = pt_topa_entry_for_page(buf, pg);
+ if (!buf->single) {
+ pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
+ te = pt_topa_entry_for_page(buf, pg);
- cur_tp = topa_entry_to_page(te);
- buf->cur = &cur_tp->topa;
- buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
- buf->output_off = head & (pt_buffer_region_size(buf) - 1);
+ cur_tp = topa_entry_to_page(te);
+ buf->cur = &cur_tp->topa;
+ buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
+ buf->output_off = head & (pt_buffer_region_size(buf) - 1);
+ } else {
+ buf->output_off = head;
+ }
local64_set(&buf->head, head);
local_set(&buf->data_size, 0);
@@ -1131,6 +1175,9 @@ static void pt_buffer_fini_topa(struct pt_buffer *buf)
{
struct topa *topa, *iter;
+ if (buf->single)
+ return;
+
list_for_each_entry_safe(topa, iter, &buf->tables, list) {
/*
* right now, this is in free_aux() path only, so
@@ -1176,6 +1223,36 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
return 0;
}
+static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
+{
+ struct page *p = virt_to_page(buf->data_pages[0]);
+ int ret = -ENOTSUPP, order = 0;
+
+ /*
+ * We can use single range output mode
+ * + in snapshot mode, where we don't need interrupts;
+ * + if the hardware supports it;
+ * + if the entire buffer is one contiguous allocation.
+ */
+ if (!buf->snapshot)
+ goto out;
+
+ if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output))
+ goto out;
+
+ if (PagePrivate(p))
+ order = page_private(p);
+
+ if (1 << order != nr_pages)
+ goto out;
+
+ buf->single = true;
+ buf->nr_pages = nr_pages;
+ ret = 0;
+out:
+ return ret;
+}
+
/**
* pt_buffer_setup_aux() - set up topa tables for a PT buffer
* @cpu: Cpu on which to allocate, -1 means current.
@@ -1198,6 +1275,13 @@ pt_buffer_setup_aux(struct perf_event *event, void **pages,
if (!nr_pages)
return NULL;
+ /*
+ * Only support AUX sampling in snapshot mode, where we don't
+ * generate NMIs.
+ */
+ if (event->attr.aux_sample_size && !snapshot)
+ return NULL;
+
if (cpu == -1)
cpu = raw_smp_processor_id();
node = cpu_to_node(cpu);
@@ -1213,6 +1297,10 @@ pt_buffer_setup_aux(struct perf_event *event, void **pages,
INIT_LIST_HEAD(&buf->tables);
+ ret = pt_buffer_try_single(buf, nr_pages);
+ if (!ret)
+ return buf;
+
ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
if (ret) {
kfree(buf);
@@ -1379,9 +1467,8 @@ void intel_pt_interrupt(void)
return;
}
- pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
- buf->output_off);
- pt_config(event);
+ pt_config_buffer(buf);
+ pt_config_start(event);
}
}
@@ -1444,8 +1531,7 @@ static void pt_event_start(struct perf_event *event, int mode)
WRITE_ONCE(pt->handle_nmi, 1);
hwc->state = 0;
- pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
- buf->output_off);
+ pt_config_buffer(buf);
pt_config(event);
return;
@@ -1496,6 +1582,52 @@ static void pt_event_stop(struct perf_event *event, int mode)
}
}
+static long pt_event_snapshot_aux(struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ unsigned long from = 0, to;
+ long ret;
+
+ if (WARN_ON_ONCE(!buf))
+ return 0;
+
+ /*
+ * Sampling is only allowed on snapshot events;
+ * see pt_buffer_setup_aux().
+ */
+ if (WARN_ON_ONCE(!buf->snapshot))
+ return 0;
+
+ /*
+ * Here, handle_nmi tells us if the tracing is on
+ */
+ if (READ_ONCE(pt->handle_nmi))
+ pt_config_stop(event);
+
+ pt_read_offset(buf);
+ pt_update_head(pt);
+
+ to = local_read(&buf->data_size);
+ if (to < size)
+ from = buf->nr_pages << PAGE_SHIFT;
+ from += to - size;
+
+ ret = perf_output_copy_aux(&pt->handle, handle, from, to);
+
+ /*
+ * If the tracing was on when we turned up, restart it.
+ * Compiler barrier not needed as we couldn't have been
+ * preempted by anything that touches pt->handle_nmi.
+ */
+ if (pt->handle_nmi)
+ pt_config_start(event);
+
+ return ret;
+}
+
static void pt_event_del(struct perf_event *event, int mode)
{
pt_event_stop(event, PERF_EF_UPDATE);
@@ -1615,6 +1747,7 @@ static __init int pt_init(void)
pt_pmu.pmu.del = pt_event_del;
pt_pmu.pmu.start = pt_event_start;
pt_pmu.pmu.stop = pt_event_stop;
+ pt_pmu.pmu.snapshot_aux = pt_event_snapshot_aux;
pt_pmu.pmu.read = pt_event_read;
pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
pt_pmu.pmu.free_aux = pt_buffer_free_aux;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 1d2bb7572374..96906a62aacd 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -64,6 +64,7 @@ struct pt_pmu {
* @lost: if data was lost/truncated
* @head: logical write offset inside the buffer
* @snapshot: if this is for a snapshot/overwrite counter
+ * @single: use Single Range Output instead of ToPA
* @stop_pos: STOP topa entry index
* @intr_pos: INT topa entry index
* @stop_te: STOP topa entry pointer
@@ -80,6 +81,7 @@ struct pt_buffer {
local_t data_size;
local64_t head;
bool snapshot;
+ bool single;
long stop_pos, intr_pos;
struct topa_entry *stop_te, *intr_te;
void **data_pages;
@@ -111,16 +113,20 @@ struct pt_filters {
/**
* struct pt - per-cpu pt context
- * @handle: perf output handle
+ * @handle: perf output handle
* @filters: last configured filters
- * @handle_nmi: do handle PT PMI on this cpu, there's an active event
- * @vmx_on: 1 if VMX is ON on this cpu
+ * @handle_nmi: do handle PT PMI on this cpu, there's an active event
+ * @vmx_on: 1 if VMX is ON on this cpu
+ * @output_base: cached RTIT_OUTPUT_BASE MSR value
+ * @output_mask: cached RTIT_OUTPUT_MASK MSR value
*/
struct pt {
struct perf_output_handle handle;
struct pt_filters filters;
int handle_nmi;
int vmx_on;
+ u64 output_base;
+ u64 output_mask;
};
#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ecacfbf4ebc1..930611db8f9a 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -683,6 +683,14 @@ struct x86_pmu {
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
/*
+ * perf task context (i.e. struct perf_event_context::task_ctx_data)
+ * switch helper to bridge calls from perf/core to perf/x86.
+ * See struct pmu::swap_task_ctx() usage for examples;
+ */
+ void (*swap_task_ctx)(struct perf_event_context *prev,
+ struct perf_event_context *next);
+
+ /*
* AMD bits
*/
unsigned int amd_nb_constraints : 1;
@@ -1016,6 +1024,9 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
void intel_ds_init(void);
+void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
+ struct perf_event_context *next);
+
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
u64 lbr_from_signext_quirk_wr(u64 val);
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index e01078e93dd3..40e0e322161d 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -194,10 +194,20 @@ do_ex_hypercall:
static bool __send_ipi_one(int cpu, int vector)
{
- struct cpumask mask = CPU_MASK_NONE;
+ int vp = hv_cpu_number_to_vp_number(cpu);
- cpumask_set_cpu(cpu, &mask);
- return __send_ipi_mask(&mask, vector);
+ trace_hyperv_send_ipi_one(cpu, vector);
+
+ if (!hv_hypercall_pg || (vp == VP_INVAL))
+ return false;
+
+ if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ return false;
+
+ if (vp >= 64)
+ return __send_ipi_mask_ex(cpumask_of(cpu), vector);
+
+ return !hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
}
static void hv_send_ipi(int cpu, int vector)
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 2db3972c0e0f..50ff030d9224 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -311,6 +311,12 @@ void __init hyperv_init(void)
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ /*
+ * Ignore any errors in setting up stimer clockevents
+ * as we can run with the LAPIC timer as a fallback.
+ */
+ (void)hv_stimer_alloc();
+
hv_apic_init();
x86_init.pci.arch_init = hv_pci_init;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 1cee10091b9f..30416d7f19d4 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -21,6 +21,7 @@
#include <linux/personality.h>
#include <linux/compat.h>
#include <linux/binfmts.h>
+#include <linux/syscalls.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/fpu/internal.h>
@@ -118,7 +119,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
return err;
}
-asmlinkage long sys32_sigreturn(void)
+COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
@@ -144,7 +145,7 @@ badframe:
return 0;
}
-asmlinkage long sys32_rt_sigreturn(void)
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_ia32 __user *frame;
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 3ff577c0b102..cd339b88d5d4 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -7,9 +7,11 @@
# define __ASM_FORM_RAW(x) x
# define __ASM_FORM_COMMA(x) x,
#else
-# define __ASM_FORM(x) " " #x " "
-# define __ASM_FORM_RAW(x) #x
-# define __ASM_FORM_COMMA(x) " " #x ","
+#include <linux/stringify.h>
+
+# define __ASM_FORM(x) " " __stringify(x) " "
+# define __ASM_FORM_RAW(x) __stringify(x)
+# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
#endif
#ifndef __x86_64__
@@ -139,9 +141,6 @@
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
-# define _ASM_EXTABLE_REFCOUNT(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
-
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \
@@ -170,9 +169,6 @@
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
-# define _ASM_EXTABLE_REFCOUNT(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
-
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h
deleted file mode 100644
index facd374a1bf7..000000000000
--- a/arch/x86/include/asm/calgary.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Derived from include/asm-powerpc/iommu.h
- *
- * Copyright IBM Corporation, 2006-2007
- *
- * Author: Jon Mason <jdmason@us.ibm.com>
- * Author: Muli Ben-Yehuda <muli@il.ibm.com>
- */
-
-#ifndef _ASM_X86_CALGARY_H
-#define _ASM_X86_CALGARY_H
-
-#include <linux/spinlock.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/timer.h>
-#include <asm/types.h>
-
-struct iommu_table {
- const struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
- unsigned long it_base; /* mapped address of tce table */
- unsigned long it_hint; /* Hint for next alloc */
- unsigned long *it_map; /* A simple allocation bitmap for now */
- void __iomem *bbar; /* Bridge BAR */
- u64 tar_val; /* Table Address Register */
- struct timer_list watchdog_timer;
- spinlock_t it_lock; /* Protects it_map */
- unsigned int it_size; /* Size of iommu table in entries */
- unsigned char it_busno; /* Bus number this table belongs to */
-};
-
-struct cal_chipset_ops {
- void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
- void (*tce_cache_blast)(struct iommu_table *tbl);
- void (*dump_error_regs)(struct iommu_table *tbl);
-};
-
-#define TCE_TABLE_SIZE_UNSPECIFIED ~0
-#define TCE_TABLE_SIZE_64K 0
-#define TCE_TABLE_SIZE_128K 1
-#define TCE_TABLE_SIZE_256K 2
-#define TCE_TABLE_SIZE_512K 3
-#define TCE_TABLE_SIZE_1M 4
-#define TCE_TABLE_SIZE_2M 5
-#define TCE_TABLE_SIZE_4M 6
-#define TCE_TABLE_SIZE_8M 7
-
-extern int use_calgary;
-
-#ifdef CONFIG_CALGARY_IOMMU
-extern int detect_calgary(void);
-#else
-static inline int detect_calgary(void) { return -ENODEV; }
-#endif
-
-#endif /* _ASM_X86_CALGARY_H */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 8348f7d69fd5..ea866c7bf31d 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -78,8 +78,12 @@ struct cpu_entry_area {
/*
* The GDT is just below entry_stack and thus serves (on x86_64) as
- * a a read-only guard page.
+ * a read-only guard page. On 32-bit the GDT must be writeable, so
+ * it needs an extra guard page.
*/
+#ifdef CONFIG_X86_32
+ char guard_entry_stack[PAGE_SIZE];
+#endif
struct entry_stack_page entry_stack_page;
/*
@@ -94,7 +98,6 @@ struct cpu_entry_area {
*/
struct cea_exception_stacks estacks;
#endif
-#ifdef CONFIG_CPU_SUP_INTEL
/*
* Per CPU debug store for Intel performance monitoring. Wastes a
* full page at the moment.
@@ -105,11 +108,13 @@ struct cpu_entry_area {
* Reserve enough fixmap PTEs.
*/
struct debug_store_buffers cpu_debug_buffers;
-#endif
};
-#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
-#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
+#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
+#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
+
+/* Total size includes the readonly IDT mapping page as well: */
+#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
@@ -117,13 +122,14 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
+/* Single page reserved for the readonly IDT mapping: */
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
#define CPU_ENTRY_AREA_MAP_SIZE \
- (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
+ (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 0652d3eed9bd..e9b62498fe75 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -292,6 +292,7 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
@@ -399,5 +400,7 @@
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
+#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index 0acf5ee45a21..f58de66091e5 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -2,10 +2,17 @@
#ifndef _ASM_X86_CRASH_H
#define _ASM_X86_CRASH_H
+struct kimage;
+
int crash_load_segments(struct kimage *image);
-int crash_copy_backup_region(struct kimage *image);
int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params);
void crash_smp_send_stop(void);
+#ifdef CONFIG_KEXEC_CORE
+void __init crash_reserve_low_1M(void);
+#else
+static inline void __init crash_reserve_low_1M(void) { }
+#endif
+
#endif /* _ASM_X86_CRASH_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index a5ea841cc6d2..8e1d0bb46361 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -22,7 +22,7 @@
# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
#endif
-#ifdef CONFIG_X86_INTEL_UMIP
+#ifdef CONFIG_X86_UMIP
# define DISABLE_UMIP 0
#else
# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
diff --git a/arch/x86/include/asm/e820/types.h b/arch/x86/include/asm/e820/types.h
index c3aa4b5e49e2..314f75d886d0 100644
--- a/arch/x86/include/asm/e820/types.h
+++ b/arch/x86/include/asm/e820/types.h
@@ -29,6 +29,14 @@ enum e820_type {
E820_TYPE_PRAM = 12,
/*
+ * Special-purpose memory is indicated to the system via the
+ * EFI_MEMORY_SP attribute. Define an e820 translation of this
+ * memory type for the purpose of reserving this range and
+ * marking it with the IORES_DESC_SOFT_RESERVED designation.
+ */
+ E820_TYPE_SOFT_RESERVED = 0xefffffff,
+
+ /*
* Reserved RAM used by the kernel itself if
* CONFIG_INTEL_TXT=y is enabled, memory of this type
* will be included in the S3 integrity calculation
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 43a82e59c59d..d028e9acdf1c 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -140,7 +140,6 @@ extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
extern void efi_recover_from_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
-extern void efi_reserve_boot_services(void);
struct efi_setup_data {
u64 fw_vendor;
@@ -244,6 +243,8 @@ static inline bool efi_is_64bit(void)
extern bool efi_reboot_required(void);
extern bool efi_is_table_address(unsigned long phys_addr);
+extern void efi_find_mirror(void);
+extern void efi_reserve_boot_services(void);
#else
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
static inline bool efi_reboot_required(void)
@@ -254,6 +255,20 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
+static inline void efi_find_mirror(void)
+{
+}
+static inline void efi_reserve_boot_services(void)
+{
+}
#endif /* CONFIG_EFI */
+#ifdef CONFIG_EFI_FAKE_MEMMAP
+extern void __init efi_fake_memmap_early(void);
+#else
+static inline void efi_fake_memmap_early(void)
+{
+}
+#endif
+
#endif /* _ASM_X86_EFI_H */
diff --git a/arch/x86/include/asm/emulate_prefix.h b/arch/x86/include/asm/emulate_prefix.h
new file mode 100644
index 000000000000..70f5b98a5286
--- /dev/null
+++ b/arch/x86/include/asm/emulate_prefix.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_EMULATE_PREFIX_H
+#define _ASM_X86_EMULATE_PREFIX_H
+
+/*
+ * Virt escape sequences to trigger instruction emulation;
+ * ideally these would decode to 'whole' instruction and not destroy
+ * the instruction stream; sadly this is not true for the 'kvm' one :/
+ */
+
+#define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */
+#define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */
+
+#endif
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0c47aa82e2e2..28183ee3cc42 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -156,7 +156,7 @@ extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
-void native_set_fixmap(enum fixed_addresses idx,
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT_XXL
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 7741e211f7f5..5f10f7f2098d 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -86,6 +86,8 @@
#define HV_X64_ACCESS_FREQUENCY_MSRS BIT(11)
/* AccessReenlightenmentControls privilege */
#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13)
+/* AccessTscInvariantControls privilege */
+#define HV_X64_ACCESS_TSC_INVARIANT BIT(15)
/*
* Feature identification: indicates which flags were specified at partition
@@ -278,6 +280,9 @@
#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+/* TSC invariant control */
+#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
+
/*
* Declare the MSR used to setup pages used to communicate with the hypervisor.
*/
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 154f27be8bfc..5c1ae3eff9d4 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -45,6 +45,7 @@ struct insn {
struct insn_field immediate2; /* for 64bit imm or seg16 */
};
+ int emulate_prefix_size;
insn_attr_t attr;
unsigned char opnd_bytes;
unsigned char addr_bytes;
@@ -128,6 +129,11 @@ static inline int insn_is_evex(struct insn *insn)
return (insn->vex_prefix.nbytes == 4);
}
+static inline int insn_has_emulate_prefix(struct insn *insn)
+{
+ return !!insn->emulate_prefix_size;
+}
+
/* Ensure this instruction is decoded completely */
static inline int insn_complete(struct insn *insn)
{
diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
new file mode 100644
index 000000000000..02c6ef8f7667
--- /dev/null
+++ b/arch/x86/include/asm/io_bitmap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_IOBITMAP_H
+#define _ASM_X86_IOBITMAP_H
+
+#include <linux/refcount.h>
+#include <asm/processor.h>
+
+struct io_bitmap {
+ u64 sequence;
+ refcount_t refcnt;
+ /* The maximum number of bytes to copy so all zero bits are covered */
+ unsigned int max;
+ unsigned long bitmap[IO_BITMAP_LONGS];
+};
+
+struct task_struct;
+
+#ifdef CONFIG_X86_IOPL_IOPERM
+void io_bitmap_share(struct task_struct *tsk);
+void io_bitmap_exit(void);
+
+void tss_update_io_bitmap(void);
+#else
+static inline void io_bitmap_share(struct task_struct *tsk) { }
+static inline void io_bitmap_exit(void) { }
+static inline void tss_update_io_bitmap(void) { }
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 5e7d6b46de97..6802c59e8252 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -66,10 +66,6 @@ struct kimage;
# define KEXEC_ARCH KEXEC_ARCH_X86_64
#endif
-/* Memory to backup during crash kdump */
-#define KEXEC_BACKUP_SRC_START (0UL)
-#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
-
/*
* This function is responsible for capturing register states if coming
* via panic otherwise just fix up the ss and sp if coming via kernel
@@ -154,12 +150,6 @@ struct kimage_arch {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- /* Details of backup region */
- unsigned long backup_src_start;
- unsigned long backup_src_sz;
-
- /* Physical address of backup segment */
- unsigned long backup_load_addr;
/* Core ELF header buffer */
void *elf_headers;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 24d6598dea29..b79cd6aa4075 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -156,10 +156,8 @@ enum kvm_reg {
VCPU_REGS_R15 = __VCPU_REGS_R15,
#endif
VCPU_REGS_RIP,
- NR_VCPU_REGS
-};
+ NR_VCPU_REGS,
-enum kvm_reg_ex {
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
VCPU_EXREG_CR3,
VCPU_EXREG_RFLAGS,
@@ -312,9 +310,12 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
+ struct list_head lpage_disallowed_link;
+
bool unsync;
u8 mmu_valid_gen;
bool mmio_cached;
+ bool lpage_disallowed; /* Can't be replaced by an equiv large page */
/*
* The following two entries are used to key the shadow page in the
@@ -451,6 +452,11 @@ struct kvm_pmc {
u64 eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
+ /*
+ * eventsel value for general purpose counters,
+ * ctrl value for fixed counters.
+ */
+ u64 current_config;
};
struct kvm_pmu {
@@ -469,7 +475,21 @@ struct kvm_pmu {
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
- u64 reprogram_pmi;
+ DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
+ DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
+ DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
+
+ /*
+ * The gate to release perf_events not marked in
+ * pmc_in_use only once in a vcpu time slice.
+ */
+ bool need_cleanup;
+
+ /*
+ * The total number of programmed perf_events and it helps to avoid
+ * redundant check before cleanup if guest don't use vPMU at all.
+ */
+ u8 event_count;
};
struct kvm_pmu_ops;
@@ -562,6 +582,7 @@ struct kvm_vcpu_arch {
u64 smbase;
u64 smi_count;
bool tpr_access_reporting;
+ bool xsaves_enabled;
u64 ia32_xss;
u64 microcode_version;
u64 arch_capabilities;
@@ -859,6 +880,7 @@ struct kvm_arch {
*/
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
+ struct list_head lpage_disallowed_mmu_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head;
@@ -933,6 +955,7 @@ struct kvm_arch {
bool exception_payload_enabled;
struct kvm_pmu_event_filter *pmu_event_filter;
+ struct task_struct *nx_lpage_recovery_thread;
};
struct kvm_vm_stat {
@@ -946,6 +969,7 @@ struct kvm_vm_stat {
ulong mmu_unsync;
ulong remote_tlb_flush;
ulong lpages;
+ ulong nx_lpage_splits;
ulong max_mmu_page_hash_collisions;
};
@@ -1035,7 +1059,6 @@ struct kvm_x86_ops {
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
- void (*decache_cr3)(struct kvm_vcpu *vcpu);
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -1084,7 +1107,7 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
- bool (*get_enable_apicv)(struct kvm_vcpu *vcpu);
+ bool (*get_enable_apicv)(struct kvm *kvm);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
@@ -1351,6 +1374,7 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
+int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
@@ -1571,6 +1595,8 @@ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
+void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
+ unsigned long *vcpu_bitmap);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 14caa9d9fb7f..365111789cc6 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -13,10 +13,6 @@
#ifdef __ASSEMBLY__
-#define GLOBAL(name) \
- .globl name; \
- name:
-
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
#define __ALIGN .p2align 4, 0x90
#define __ALIGN_STR __stringify(__ALIGN)
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 7948a17febb4..c215d2762488 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -15,6 +15,8 @@ struct mod_arch_specific {
#ifdef CONFIG_X86_64
/* X86_64 does not define MODULE_PROC_FAMILY */
+#elif defined CONFIG_M486SX
+#define MODULE_PROC_FAMILY "486SX "
#elif defined CONFIG_M486
#define MODULE_PROC_FAMILY "486 "
#elif defined CONFIG_M586
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 20ce682a2540..084e98da04a7 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -93,6 +93,18 @@
* Microarchitectural Data
* Sampling (MDS) vulnerabilities.
*/
+#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /*
+ * The processor is not susceptible to a
+ * machine check error due to modifying the
+ * code page size along with either the
+ * physical address or cache type
+ * without TLB invalidation.
+ */
+#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */
+#define ARCH_CAP_TAA_NO BIT(8) /*
+ * Not susceptible to
+ * TSX Async Abort (TAA) vulnerabilities.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -103,6 +115,10 @@
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
+#define MSR_IA32_TSX_CTRL 0x00000122
+#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
+#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
@@ -393,6 +409,8 @@
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD_PPIN_CTL 0xc00102f0
+#define MSR_AMD_PPIN 0xc00102f1
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 80bc209c0708..5c24a7b35166 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
#include <asm/segment.h>
/**
- * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the
@@ -337,7 +337,7 @@ static inline void mds_clear_cpu_buffers(void)
}
/**
- * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* Clear CPU buffers if the corresponding static key is enabled
*/
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 69089d46f128..86e7317eb31f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -294,10 +294,6 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
{
PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
}
-static inline void set_iopl_mask(unsigned mask)
-{
- PVOP_VCALL1(cpu.set_iopl_mask, mask);
-}
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 70b654f3ffe5..84812964d3dd 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -140,8 +140,6 @@ struct pv_cpu_ops {
void (*load_sp0)(unsigned long sp0);
- void (*set_iopl_mask)(unsigned mask);
-
void (*wbinvd)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index e662f987dfa2..90d0731fdcb6 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -12,8 +12,6 @@
#include <asm/pat.h>
#include <asm/x86_init.h>
-#ifdef __KERNEL__
-
struct pci_sysdata {
int domain; /* PCI domain */
int node; /* NUMA node */
@@ -118,11 +116,6 @@ void native_restore_msi_irqs(struct pci_dev *dev);
#define native_setup_msi_irqs NULL
#define native_teardown_msi_irq NULL
#endif
-#endif /* __KERNEL__ */
-
-#ifdef CONFIG_X86_64
-#include <asm/pci_64.h>
-#endif
/* generic pci stuff */
#include <asm-generic/pci.h>
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
deleted file mode 100644
index f5411de0ae11..000000000000
--- a/arch/x86/include/asm/pci_64.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_PCI_64_H
-#define _ASM_X86_PCI_64_H
-
-#ifdef __KERNEL__
-
-#ifdef CONFIG_CALGARY_IOMMU
-static inline void *pci_iommu(struct pci_bus *bus)
-{
- struct pci_sysdata *sd = bus->sysdata;
- return sd->iommu;
-}
-
-static inline void set_pci_iommu(struct pci_bus *bus, void *val)
-{
- struct pci_sysdata *sd = bus->sysdata;
- sd->iommu = val;
-}
-#endif /* CONFIG_CALGARY_IOMMU */
-
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
- int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
- int reg, int len, u32 value);
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index e3633795fb22..5afb5e0fe903 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -36,39 +36,41 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
#define pmd_read_atomic pmd_read_atomic
/*
- * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
- * a "*pmdp" dereference done by gcc. Problem is, in certain places
- * where pte_offset_map_lock is called, concurrent page faults are
+ * pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by GCC. Problem is, in certain places
+ * where pte_offset_map_lock() is called, concurrent page faults are
* allowed, if the mmap_sem is hold for reading. An example is mincore
* vs page faults vs MADV_DONTNEED. On the page fault side
- * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_populate() rightfully does a set_64bit(), but if we're reading the
* pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
- * because gcc will not read the 64bit of the pmd atomically. To fix
- * this all places running pmd_offset_map_lock() while holding the
+ * because GCC will not read the 64-bit value of the pmd atomically.
+ *
+ * To fix this all places running pte_offset_map_lock() while holding the
* mmap_sem in read mode, shall read the pmdp pointer using this
- * function to know if the pmd is null nor not, and in turn to know if
- * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * function to know if the pmd is null or not, and in turn to know if
+ * they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd
* operations.
*
- * Without THP if the mmap_sem is hold for reading, the pmd can only
- * transition from null to not null while pmd_read_atomic runs. So
+ * Without THP if the mmap_sem is held for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic() runs. So
* we can always return atomic pmd values with this function.
*
- * With THP if the mmap_sem is hold for reading, the pmd can become
+ * With THP if the mmap_sem is held for reading, the pmd can become
* trans_huge or none or point to a pte (and in turn become "stable")
- * at any time under pmd_read_atomic. We could read it really
- * atomically here with a atomic64_read for the THP enabled case (and
+ * at any time under pmd_read_atomic(). We could read it truly
+ * atomically here with an atomic64_read() for the THP enabled case (and
* it would be a whole lot simpler), but to avoid using cmpxchg8b we
* only return an atomic pmdval if the low part of the pmdval is later
- * found stable (i.e. pointing to a pte). And we're returning a none
- * pmdval if the low part of the pmd is none. In some cases the high
- * and low part of the pmdval returned may not be consistent if THP is
- * enabled (the low part may point to previously mapped hugepage,
- * while the high part may point to a more recently mapped hugepage),
- * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
- * of the pmd to be read atomically to decide if the pmd is unstable
- * or not, with the only exception of when the low part of the pmd is
- * zero in which case we return a none pmd.
+ * found to be stable (i.e. pointing to a pte). We are also returning a
+ * 'none' (zero) pmdval if the low part of the pmd is zero.
+ *
+ * In some cases the high and low part of the pmdval returned may not be
+ * consistent if THP is enabled (the low part may point to previously
+ * mapped hugepage, while the high part may point to a more recently
+ * mapped hugepage), but pmd_none_or_trans_huge_or_clear_bad() only
+ * needs the low part of the pmd to be read atomically to decide if the
+ * pmd is unstable or not, with the only exception when the low part
+ * of the pmd is zero, in which case we return a 'none' pmd.
*/
static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 0bc530c4eb13..ad97dc155195 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1463,6 +1463,12 @@ static inline bool arch_has_pfn_modify_check(void)
return boot_cpu_has_bug(X86_BUG_L1TF);
}
+#define arch_faults_on_old_pte arch_faults_on_old_pte
+static inline bool arch_faults_on_old_pte(void)
+{
+ return false;
+}
+
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index b0bc0fff5f1f..19f5807260c3 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
* Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
* to avoid include recursion hell
*/
-#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
+#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 41)
-#define CPU_ENTRY_AREA_BASE \
- ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
- & PMD_MASK)
+/* The +1 is for the readonly IDT page: */
+#define CPU_ENTRY_AREA_BASE \
+ ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6e0a3b43d027..e51afbb0cbfb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -7,6 +7,7 @@
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
+struct io_bitmap;
struct vm86;
#include <asm/math_emu.h>
@@ -93,7 +94,15 @@ struct cpuinfo_x86 {
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level;
- __u32 x86_capability[NCAPINTS + NBUGINTS];
+ /*
+ * Align to size of unsigned long because the x86_capability array
+ * is passed to bitops which require the alignment. Use unnamed
+ * union to enforce the array is aligned to size of unsigned long.
+ */
+ union {
+ __u32 x86_capability[NCAPINTS + NBUGINTS];
+ unsigned long x86_capability_alignment;
+ };
char x86_vendor_id[16];
char x86_model_id[64];
/* in KB - valid for CPUS which support this call: */
@@ -328,10 +337,32 @@ struct x86_hw_tss {
* IO-bitmap sizes:
*/
#define IO_BITMAP_BITS 65536
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
-#define INVALID_IO_BITMAP_OFFSET 0x8000
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
+
+#define IO_BITMAP_OFFSET_VALID_MAP \
+ (offsetof(struct tss_struct, io_bitmap.bitmap) - \
+ offsetof(struct tss_struct, x86_tss))
+
+#define IO_BITMAP_OFFSET_VALID_ALL \
+ (offsetof(struct tss_struct, io_bitmap.mapall) - \
+ offsetof(struct tss_struct, x86_tss))
+
+#ifdef CONFIG_X86_IOPL_IOPERM
+/*
+ * sizeof(unsigned long) coming from an extra "long" at the end of the
+ * iobitmap. The limit is inclusive, i.e. the last valid byte.
+ */
+# define __KERNEL_TSS_LIMIT \
+ (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
+ sizeof(unsigned long) - 1)
+#else
+# define __KERNEL_TSS_LIMIT \
+ (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
+#endif
+
+/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
+#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
struct entry_stack {
unsigned long words[64];
@@ -341,13 +372,21 @@ struct entry_stack_page {
struct entry_stack stack;
} __aligned(PAGE_SIZE);
-struct tss_struct {
+/*
+ * All IO bitmap related data stored in the TSS:
+ */
+struct x86_io_bitmap {
+ /* The sequence number of the last active bitmap. */
+ u64 prev_sequence;
+
/*
- * The fixed hardware portion. This must not cross a page boundary
- * at risk of violating the SDM's advice and potentially triggering
- * errata.
+ * Store the dirty size of the last io bitmap offender. The next
+ * one will have to do the cleanup as the switch out to a non io
+ * bitmap user will just set x86_tss.io_bitmap_base to a value
+ * outside of the TSS limit. So for sane tasks there is no need to
+ * actually touch the io_bitmap at all.
*/
- struct x86_hw_tss x86_tss;
+ unsigned int prev_max;
/*
* The extra 1 is there because the CPU will access an
@@ -355,21 +394,28 @@ struct tss_struct {
* bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+ unsigned long bitmap[IO_BITMAP_LONGS + 1];
+
+ /*
+ * Special I/O bitmap to emulate IOPL(3). All bytes zero,
+ * except the additional byte at the end.
+ */
+ unsigned long mapall[IO_BITMAP_LONGS + 1];
+};
+
+struct tss_struct {
+ /*
+ * The fixed hardware portion. This must not cross a page boundary
+ * at risk of violating the SDM's advice and potentially triggering
+ * errata.
+ */
+ struct x86_hw_tss x86_tss;
+
+ struct x86_io_bitmap io_bitmap;
} __aligned(PAGE_SIZE);
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
-/*
- * sizeof(unsigned long) coming from an extra "long" at the end
- * of the iobitmap.
- *
- * -1? seg base+limit should be pointing to the address of the
- * last valid byte
- */
-#define __KERNEL_TSS_LIMIT \
- (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)
-
/* Per CPU interrupt stacks */
struct irq_stack {
char stack[IRQ_STACK_SIZE];
@@ -480,10 +526,14 @@ struct thread_struct {
struct vm86 *vm86;
#endif
/* IO permissions: */
- unsigned long *io_bitmap_ptr;
- unsigned long iopl;
- /* Max allowed port in the bitmap, in bytes: */
- unsigned io_bitmap_max;
+ struct io_bitmap *io_bitmap;
+
+ /*
+ * IOPL. Priviledge level dependent I/O permission which is
+ * emulated via the I/O bitmap to prevent user space from disabling
+ * interrupts.
+ */
+ unsigned long iopl_emul;
mm_segment_t addr_limit;
@@ -515,25 +565,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
*/
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-/*
- * Set IOPL bits in EFLAGS from given mask
- */
-static inline void native_set_iopl_mask(unsigned mask)
-{
-#ifdef CONFIG_X86_32
- unsigned int reg;
-
- asm volatile ("pushfl;"
- "popl %0;"
- "andl %1, %0;"
- "orl %2, %0;"
- "pushl %0;"
- "popfl"
- : "=&r" (reg)
- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
-#endif
-}
-
static inline void
native_load_sp0(unsigned long sp0)
{
@@ -573,7 +604,6 @@ static inline void load_sp0(unsigned long sp0)
native_load_sp0(sp0);
}
-#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT_XXL */
/* Free all resources held by a thread. */
@@ -841,7 +871,6 @@ static inline void spin_lock_prefetch(const void *x)
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
.sysenter_cs = __KERNEL_CS, \
- .io_bitmap_ptr = NULL, \
.addr_limit = KERNEL_DS, \
}
@@ -958,7 +987,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
extern unsigned long arch_align_stack(unsigned long sp);
void free_init_pages(const char *what, unsigned long begin, unsigned long end);
-extern void free_kernel_image_pages(void *begin, void *end);
+extern void free_kernel_image_pages(const char *what, void *begin, void *end);
void default_idle(void);
#ifdef CONFIG_XEN
@@ -988,4 +1017,11 @@ enum mds_mitigations {
MDS_MITIGATION_VMWERV,
};
+enum taa_mitigations {
+ TAA_MITIGATION_OFF,
+ TAA_MITIGATION_UCODE_NEEDED,
+ TAA_MITIGATION_VERW,
+ TAA_MITIGATION_TSX_DISABLED,
+};
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 332eb3525867..5057a8ed100b 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -361,5 +361,11 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
+#ifdef CONFIG_X86_64
+# define do_set_thread_area_64(p, s, t) do_arch_prctl_64(p, s, t)
+#else
+# define do_set_thread_area_64(p, s, t) (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h
index 92c34e517da1..5528e9325049 100644
--- a/arch/x86/include/asm/purgatory.h
+++ b/arch/x86/include/asm/purgatory.h
@@ -6,16 +6,6 @@
#include <linux/purgatory.h>
extern void purgatory(void);
-/*
- * These forward declarations serve two purposes:
- *
- * 1) Make sparse happy when checking arch/purgatory
- * 2) Document that these are required to be global so the symbol
- * lookup in kexec works
- */
-extern unsigned long purgatory_backup_dest;
-extern unsigned long purgatory_backup_src;
-extern unsigned long purgatory_backup_sz;
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PURGATORY_H */
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
deleted file mode 100644
index 232f856e0db0..000000000000
--- a/arch/x86/include/asm/refcount.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef __ASM_X86_REFCOUNT_H
-#define __ASM_X86_REFCOUNT_H
-/*
- * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
- * PaX/grsecurity.
- */
-#include <linux/refcount.h>
-#include <asm/bug.h>
-
-/*
- * This is the first portion of the refcount error handling, which lives in
- * .text.unlikely, and is jumped to from the CPU flag check (in the
- * following macros). This saves the refcount value location into CX for
- * the exception handler to use (in mm/extable.c), and then triggers the
- * central refcount exception. The fixup address for the exception points
- * back to the regular execution flow in .text.
- */
-#define _REFCOUNT_EXCEPTION \
- ".pushsection .text..refcount\n" \
- "111:\tlea %[var], %%" _ASM_CX "\n" \
- "112:\t" ASM_UD2 "\n" \
- ASM_UNREACHABLE \
- ".popsection\n" \
- "113:\n" \
- _ASM_EXTABLE_REFCOUNT(112b, 113b)
-
-/* Trigger refcount exception if refcount result is negative. */
-#define REFCOUNT_CHECK_LT_ZERO \
- "js 111f\n\t" \
- _REFCOUNT_EXCEPTION
-
-/* Trigger refcount exception if refcount result is zero or negative. */
-#define REFCOUNT_CHECK_LE_ZERO \
- "jz 111f\n\t" \
- REFCOUNT_CHECK_LT_ZERO
-
-/* Trigger refcount exception unconditionally. */
-#define REFCOUNT_ERROR \
- "jmp 111f\n\t" \
- _REFCOUNT_EXCEPTION
-
-static __always_inline void refcount_add(unsigned int i, refcount_t *r)
-{
- asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
- REFCOUNT_CHECK_LT_ZERO
- : [var] "+m" (r->refs.counter)
- : "ir" (i)
- : "cc", "cx");
-}
-
-static __always_inline void refcount_inc(refcount_t *r)
-{
- asm volatile(LOCK_PREFIX "incl %0\n\t"
- REFCOUNT_CHECK_LT_ZERO
- : [var] "+m" (r->refs.counter)
- : : "cc", "cx");
-}
-
-static __always_inline void refcount_dec(refcount_t *r)
-{
- asm volatile(LOCK_PREFIX "decl %0\n\t"
- REFCOUNT_CHECK_LE_ZERO
- : [var] "+m" (r->refs.counter)
- : : "cc", "cx");
-}
-
-static __always_inline __must_check
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
-{
- bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
- REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, e, "er", i, "cx");
-
- if (ret) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- return false;
-}
-
-static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
-{
- bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
- REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, e, "cx");
-
- if (ret) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- return false;
-}
-
-static __always_inline __must_check
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
-{
- int c, result;
-
- c = atomic_read(&(r->refs));
- do {
- if (unlikely(c == 0))
- return false;
-
- result = c + i;
-
- /* Did we try to increment from/to an undesirable state? */
- if (unlikely(c < 0 || c == INT_MAX || result < c)) {
- asm volatile(REFCOUNT_ERROR
- : : [var] "m" (r->refs.counter)
- : "cc", "cx");
- break;
- }
-
- } while (!atomic_try_cmpxchg(&(r->refs), &c, result));
-
- return c != 0;
-}
-
-static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
-{
- return refcount_add_not_zero(1, r);
-}
-
-#endif
diff --git a/arch/x86/include/asm/rio.h b/arch/x86/include/asm/rio.h
deleted file mode 100644
index 0a21986d2238..000000000000
--- a/arch/x86/include/asm/rio.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Derived from include/asm-x86/mach-summit/mach_mpparse.h
- * and include/asm-x86/mach-default/bios_ebda.h
- *
- * Author: Laurent Vivier <Laurent.Vivier@bull.net>
- */
-
-#ifndef _ASM_X86_RIO_H
-#define _ASM_X86_RIO_H
-
-#define RIO_TABLE_VERSION 3
-
-struct rio_table_hdr {
- u8 version; /* Version number of this data structure */
- u8 num_scal_dev; /* # of Scalability devices */
- u8 num_rio_dev; /* # of RIO I/O devices */
-} __attribute__((packed));
-
-struct scal_detail {
- u8 node_id; /* Scalability Node ID */
- u32 CBAR; /* Address of 1MB register space */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF = None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port2node; /* Node ID port connected to: 0xFF = None */
- u8 port2port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
-} __attribute__((packed));
-
-struct rio_detail {
- u8 node_id; /* RIO Node ID */
- u32 BBAR; /* Address of 1MB register space */
- u8 type; /* Type of device */
- u8 owner_id; /* Node ID of Hurricane that owns this */
- /* node */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF=None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 first_slot; /* Lowest slot number below this Calgary */
- u8 status; /* Bit 0 = 1 : the XAPIC is used */
- /* = 0 : the XAPIC is not used, ie: */
- /* ints fwded to another XAPIC */
- /* Bits1:7 Reserved */
- u8 WP_index; /* instance index - lower ones have */
- /* lower slot numbers/PCI bus numbers */
- u8 chassis_num; /* 1 based Chassis number */
-} __attribute__((packed));
-
-enum {
- HURR_SCALABILTY = 0, /* Hurricane Scalability info */
- HURR_RIOIB = 2, /* Hurricane RIOIB info */
- COMPAT_CALGARY = 4, /* Compatibility Calgary */
- ALT_CALGARY = 5, /* Second Planar Calgary */
-};
-
-#endif /* _ASM_X86_RIO_H */
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 71b32f2570ab..036c360910c5 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -6,7 +6,6 @@
#include <asm/extable.h>
extern char __brk_base[], __brk_limit[];
-extern struct exception_table_entry __stop___ex_table[];
extern char __end_rodata_aligned[];
#if defined(CONFIG_X86_64)
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index ac3892920419..6669164abadc 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -31,6 +31,18 @@
*/
#define SEGMENT_RPL_MASK 0x3
+/*
+ * When running on Xen PV, the actual privilege level of the kernel is 1,
+ * not 0. Testing the Requested Privilege Level in a segment selector to
+ * determine whether the context is user mode or kernel mode with
+ * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level
+ * matches the 0x3 mask.
+ *
+ * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV
+ * kernels because privilege level 2 is never used.
+ */
+#define USER_SEGMENT_RPL_MASK 0x2
+
/* User mode is privilege level 3: */
#define USER_RPL 0x3
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 18a4b6890fa8..0e059b73437b 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -103,7 +103,17 @@ static inline void update_task_stack(struct task_struct *task)
if (static_cpu_has(X86_FEATURE_XENPV))
load_sp0(task_top_of_stack(task));
#endif
+}
+static inline void kthread_frame_init(struct inactive_task_frame *frame,
+ unsigned long fun, unsigned long arg)
+{
+ frame->bx = fun;
+#ifdef CONFIG_X86_32
+ frame->di = arg;
+#else
+ frame->r12 = arg;
+#endif
}
#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index e046a405743d..e2389ce9bf58 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -6,6 +6,8 @@
#ifndef _ASM_X86_SYSCALL_WRAPPER_H
#define _ASM_X86_SYSCALL_WRAPPER_H
+struct pt_regs;
+
/* Mapping of registers to parameters for syscalls on x86-64 and x32 */
#define SC_X86_64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \
@@ -28,13 +30,21 @@
* kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
* case as well.
*/
+#define __IA32_COMPAT_SYS_STUB0(x, name) \
+ asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs);\
+ ALLOW_ERROR_INJECTION(__ia32_compat_sys_##name, ERRNO); \
+ asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs)\
+ { \
+ return __se_compat_sys_##name(); \
+ }
+
#define __IA32_COMPAT_SYS_STUBx(x, name, ...) \
asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\
ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \
asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\
{ \
return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
- } \
+ }
#define __IA32_SYS_STUBx(x, name, ...) \
asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \
@@ -48,16 +58,23 @@
* To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
* named __ia32_sys_*()
*/
-#define SYSCALL_DEFINE0(sname) \
- SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __x64_sys_##sname(void); \
- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
- SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
- asmlinkage long __x64_sys_##sname(void)
-#define COND_SYSCALL(name) \
- cond_syscall(__x64_sys_##name); \
- cond_syscall(__ia32_sys_##name)
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
+ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
+ SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
+ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
+ { \
+ return sys_ni_syscall(); \
+ } \
+ asmlinkage __weak long __ia32_sys_##name(const struct pt_regs *__unused)\
+ { \
+ return sys_ni_syscall(); \
+ }
#define SYS_NI(name) \
SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \
@@ -75,15 +92,24 @@
* of the x86-64-style parameter ordering of x32 syscalls. The syscalls common
* with x86_64 obviously do not need such care.
*/
+#define __X32_COMPAT_SYS_STUB0(x, name, ...) \
+ asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs);\
+ ALLOW_ERROR_INJECTION(__x32_compat_sys_##name, ERRNO); \
+ asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs)\
+ { \
+ return __se_compat_sys_##name();\
+ }
+
#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\
ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \
asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\
{ \
return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
- } \
+ }
#else /* CONFIG_X86_X32 */
+#define __X32_COMPAT_SYS_STUB0(x, name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
#endif /* CONFIG_X86_X32 */
@@ -94,6 +120,17 @@
* mapping of registers to parameters, we need to generate stubs for each
* of them.
*/
+#define COMPAT_SYSCALL_DEFINE0(name) \
+ static long __se_compat_sys_##name(void); \
+ static inline long __do_compat_sys_##name(void); \
+ __IA32_COMPAT_SYS_STUB0(x, name) \
+ __X32_COMPAT_SYS_STUB0(x, name) \
+ static long __se_compat_sys_##name(void) \
+ { \
+ return __do_compat_sys_##name(); \
+ } \
+ static inline long __do_compat_sys_##name(void)
+
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
@@ -181,15 +218,19 @@
* macros to work correctly.
*/
#ifndef SYSCALL_DEFINE0
-#define SYSCALL_DEFINE0(sname) \
- SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __x64_sys_##sname(void); \
- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
- asmlinkage long __x64_sys_##sname(void)
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
+ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
+ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
#endif
#ifndef COND_SYSCALL
-#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name)
+#define COND_SYSCALL(name) \
+ asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
+ { \
+ return sys_ni_syscall(); \
+ }
#endif
#ifndef SYS_NI
@@ -201,7 +242,6 @@
* For VSYSCALLS, we need to declare these three syscalls with the new
* pt_regs-based calling convention for in-kernel use.
*/
-struct pt_regs;
asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs);
asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs);
asmlinkage long __x64_sys_time(const struct pt_regs *regs);
diff --git a/arch/x86/include/asm/tce.h b/arch/x86/include/asm/tce.h
deleted file mode 100644
index 6ed2deacf1d0..000000000000
--- a/arch/x86/include/asm/tce.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * This file is derived from asm-powerpc/tce.h.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Muli Ben-Yehuda <muli@il.ibm.com>
- * Author: Jon Mason <jdmason@us.ibm.com>
- */
-
-#ifndef _ASM_X86_TCE_H
-#define _ASM_X86_TCE_H
-
-extern unsigned int specified_table_size;
-struct iommu_table;
-
-#define TCE_ENTRY_SIZE 8 /* in bytes */
-
-#define TCE_READ_SHIFT 0
-#define TCE_WRITE_SHIFT 1
-#define TCE_HUBID_SHIFT 2 /* unused */
-#define TCE_RSVD_SHIFT 8 /* unused */
-#define TCE_RPN_SHIFT 12
-#define TCE_UNUSED_SHIFT 48 /* unused */
-
-#define TCE_RPN_MASK 0x0000fffffffff000ULL
-
-extern void tce_build(struct iommu_table *tbl, unsigned long index,
- unsigned int npages, unsigned long uaddr, int direction);
-extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
-extern void * __init alloc_tce_table(void);
-extern void __init free_tce_table(void *tbl);
-extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
-
-#endif /* _ASM_X86_TCE_H */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 5e8319bb207a..23c626a742e8 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -26,10 +26,11 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define POKE_MAX_OPCODE_SIZE 5
struct text_poke_loc {
- void *detour;
void *addr;
- size_t len;
- const char opcode[POKE_MAX_OPCODE_SIZE];
+ int len;
+ s32 rel32;
+ u8 opcode;
+ const u8 text[POKE_MAX_OPCODE_SIZE];
};
extern void text_poke_early(void *addr, const void *opcode, size_t len);
@@ -51,8 +52,10 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
-extern void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
extern void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries);
+extern void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ const void *opcode, size_t len, const void *emulate);
extern int after_bootmem;
extern __ro_after_init struct mm_struct *poking_mm;
extern __ro_after_init unsigned long poking_addr;
@@ -63,8 +66,17 @@ static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
regs->ip = ip;
}
-#define INT3_INSN_SIZE 1
-#define CALL_INSN_SIZE 5
+#define INT3_INSN_SIZE 1
+#define INT3_INSN_OPCODE 0xCC
+
+#define CALL_INSN_SIZE 5
+#define CALL_INSN_OPCODE 0xE8
+
+#define JMP32_INSN_SIZE 5
+#define JMP32_INSN_OPCODE 0xE9
+
+#define JMP8_INSN_SIZE 2
+#define JMP8_INSN_OPCODE 0xEB
static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
{
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index f9453536f9bb..d779366ce3f8 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -143,8 +143,8 @@ struct thread_info {
_TIF_NOHZ)
/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW_BASE \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+#define _TIF_WORK_CTXSW_BASE \
+ (_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
/*
@@ -156,8 +156,14 @@ struct thread_info {
# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
#endif
-#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
-#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+#ifdef CONFIG_X86_IOPL_IOPERM
+# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY | \
+ _TIF_IO_BITMAP)
+#else
+# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY)
+#endif
+
+#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
#define STACK_WARN (THREAD_SIZE/8)
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index ace464f09681..4d705cb4d63b 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -71,6 +71,21 @@ TRACE_EVENT(hyperv_send_ipi_mask,
__entry->ncpus, __entry->vector)
);
+TRACE_EVENT(hyperv_send_ipi_one,
+ TP_PROTO(int cpu,
+ int vector),
+ TP_ARGS(cpu, vector),
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(int, vector)
+ ),
+ TP_fast_assign(__entry->cpu = cpu;
+ __entry->vector = vector;
+ ),
+ TP_printk("cpu %d vector %x",
+ __entry->cpu, __entry->vector)
+ );
+
#endif /* CONFIG_HYPERV */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h
index db43f2a0d92c..aeed98c3c9e1 100644
--- a/arch/x86/include/asm/umip.h
+++ b/arch/x86/include/asm/umip.h
@@ -4,9 +4,9 @@
#include <linux/types.h>
#include <asm/ptrace.h>
-#ifdef CONFIG_X86_INTEL_UMIP
+#ifdef CONFIG_X86_UMIP
bool fixup_umip_exception(struct pt_regs *regs);
#else
static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; }
-#endif /* CONFIG_X86_INTEL_UMIP */
+#endif /* CONFIG_X86_UMIP */
#endif /* _ASM_X86_UMIP_H */
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 6e7caf65fa40..389174eaec79 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -138,7 +138,7 @@ extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
-extern void uv_bios_init(void);
+extern int uv_bios_init(void);
extern unsigned long sn_rtc_cycles_per_second;
extern int uv_type;
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 6bc6d89d8e2a..45ea95ce79b4 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,6 +12,16 @@ struct mm_struct;
#ifdef CONFIG_X86_UV
#include <linux/efi.h>
+#define UV_PROC_NODE "sgi_uv"
+
+static inline int uv(int uvtype)
+{
+ /* uv(0) is "any" */
+ if (uvtype >= 0 && uvtype <= 30)
+ return 1 << uvtype;
+ return 1;
+}
+
extern unsigned long uv_systab_phys;
extern enum uv_system_type get_uv_system_type(void);
@@ -20,7 +30,8 @@ static inline bool is_early_uv_system(void)
return uv_systab_phys && uv_systab_phys != EFI_INVALID_TABLE_ADDR;
}
extern int is_uv_system(void);
-extern int is_uv_hubless(void);
+extern int is_uv_hubbed(int uvtype);
+extern int is_uv_hubless(int uvtype);
extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void);
@@ -32,7 +43,8 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline bool is_early_uv_system(void) { return 0; }
static inline int is_uv_system(void) { return 0; }
-static inline int is_uv_hubless(void) { return 0; }
+static inline int is_uv_hubbed(int uv) { return 0; }
+static inline int is_uv_hubless(int uv) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline const struct cpumask *
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 44cf6d6deb7a..950cd1395d5d 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -19,6 +19,7 @@
#include <linux/topology.h>
#include <asm/types.h>
#include <asm/percpu.h>
+#include <asm/uv/uv.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/bios.h>
#include <asm/irq_vectors.h>
@@ -243,83 +244,61 @@ static inline int uv_hub_info_check(int version)
#define UV4_HUB_REVISION_BASE 7
#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
-#ifdef UV1_HUB_IS_SUPPORTED
+/* WARNING: UVx_HUB_IS_SUPPORTED defines are deprecated and will be removed */
static inline int is_uv1_hub(void)
{
- return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
-}
+#ifdef UV1_HUB_IS_SUPPORTED
+ return is_uv_hubbed(uv(1));
#else
-static inline int is_uv1_hub(void)
-{
return 0;
-}
#endif
+}
-#ifdef UV2_HUB_IS_SUPPORTED
static inline int is_uv2_hub(void)
{
- return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
- (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
-}
+#ifdef UV2_HUB_IS_SUPPORTED
+ return is_uv_hubbed(uv(2));
#else
-static inline int is_uv2_hub(void)
-{
return 0;
-}
#endif
+}
-#ifdef UV3_HUB_IS_SUPPORTED
static inline int is_uv3_hub(void)
{
- return ((uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE) &&
- (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE));
-}
+#ifdef UV3_HUB_IS_SUPPORTED
+ return is_uv_hubbed(uv(3));
#else
-static inline int is_uv3_hub(void)
-{
return 0;
-}
#endif
+}
/* First test "is UV4A", then "is UV4" */
-#ifdef UV4A_HUB_IS_SUPPORTED
-static inline int is_uv4a_hub(void)
-{
- return (uv_hub_info->hub_revision >= UV4A_HUB_REVISION_BASE);
-}
-#else
static inline int is_uv4a_hub(void)
{
+#ifdef UV4A_HUB_IS_SUPPORTED
+ if (is_uv_hubbed(uv(4)))
+ return (uv_hub_info->hub_revision == UV4A_HUB_REVISION_BASE);
+#endif
return 0;
}
-#endif
-#ifdef UV4_HUB_IS_SUPPORTED
static inline int is_uv4_hub(void)
{
- return uv_hub_info->hub_revision >= UV4_HUB_REVISION_BASE;
-}
+#ifdef UV4_HUB_IS_SUPPORTED
+ return is_uv_hubbed(uv(4));
#else
-static inline int is_uv4_hub(void)
-{
return 0;
-}
#endif
+}
static inline int is_uvx_hub(void)
{
- if (uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE)
- return uv_hub_info->hub_revision;
-
- return 0;
+ return (is_uv_hubbed(-2) >= uv(2));
}
static inline int is_uv_hub(void)
{
-#ifdef UV1_HUB_IS_SUPPORTED
- return uv_hub_info->hub_revision;
-#endif
- return is_uvx_hub();
+ return is_uv1_hub() || is_uvx_hub();
}
union uvh_apicid {
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 42e1245af0d8..ff4b52e37e60 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -62,6 +62,4 @@ void xen_arch_register_cpu(int num);
void xen_arch_unregister_cpu(int num);
#endif
-extern void xen_set_iopl_mask(unsigned mask);
-
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 62ca03ef5c65..9139b3e86316 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -379,12 +379,9 @@ struct xen_pmu_arch {
* Prefix forces emulation of some non-trapping instructions.
* Currently only CPUID.
*/
-#ifdef __ASSEMBLY__
-#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
-#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
-#else
-#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
-#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
-#endif
+#include <asm/emulate_prefix.h>
+
+#define XEN_EMULATE_PREFIX __ASM_FORM(.byte __XEN_EMULATE_PREFIX ;)
+#define XEN_CPUID XEN_EMULATE_PREFIX __ASM_FORM(cpuid)
#endif /* _ASM_X86_XEN_INTERFACE_H */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c895df5482c5..8669c6bdbb84 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_BOOTPARAM_H
#define _ASM_X86_BOOTPARAM_H
-/* setup_data types */
+/* setup_data/setup_indirect types */
#define SETUP_NONE 0
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
@@ -11,6 +11,11 @@
#define SETUP_APPLE_PROPERTIES 5
#define SETUP_JAILHOUSE 6
+#define SETUP_INDIRECT (1<<31)
+
+/* SETUP_INDIRECT | max(SETUP_*) */
+#define SETUP_TYPE_MAX (SETUP_INDIRECT | SETUP_JAILHOUSE)
+
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
@@ -49,6 +54,14 @@ struct setup_data {
__u8 data[0];
};
+/* extensible setup indirect data node */
+struct setup_indirect {
+ __u32 type;
+ __u32 reserved; /* Reserved, must be set to zero. */
+ __u64 len;
+ __u64 addr;
+};
+
struct setup_header {
__u8 setup_sects;
__u16 root_flags;
@@ -88,6 +101,7 @@ struct setup_header {
__u64 pref_address;
__u32 init_size;
__u32 handover_offset;
+ __u32 kernel_info_offset;
} __attribute__((packed));
struct sys_desc_table {
@@ -139,15 +153,22 @@ struct boot_e820_entry {
* setup data structure.
*/
struct jailhouse_setup_data {
- __u16 version;
- __u16 compatible_version;
- __u16 pm_timer_address;
- __u16 num_cpus;
- __u64 pci_mmconfig_base;
- __u32 tsc_khz;
- __u32 apic_khz;
- __u8 standard_ioapic;
- __u8 cpu_ids[255];
+ struct {
+ __u16 version;
+ __u16 compatible_version;
+ } __attribute__((packed)) hdr;
+ struct {
+ __u16 pm_timer_address;
+ __u16 num_cpus;
+ __u64 pci_mmconfig_base;
+ __u32 tsc_khz;
+ __u32 apic_khz;
+ __u8 standard_ioapic;
+ __u8 cpu_ids[255];
+ } __attribute__((packed)) v1;
+ struct {
+ __u32 flags;
+ } __attribute__((packed)) v2;
} __attribute__((packed));
/* The so-called "zeropage" */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3578ad248bc9..32acb970f416 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -134,7 +134,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o
-obj-$(CONFIG_X86_INTEL_UMIP) += umip.o
+obj-$(CONFIG_X86_UMIP) += umip.o
obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
@@ -146,7 +146,6 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o
- obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o
obj-y += vsmp_64.o
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index e95e95960156..daf88f8143c5 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -9,8 +9,7 @@
.code32
ALIGN
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
movw $__KERNEL_DS, %ax
movw %ax, %ss
movw %ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
# jump to place where we left off
movl saved_eip, %eax
jmp *%eax
+SYM_CODE_END(wakeup_pmode_return)
bogus_magic:
jmp bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
popfl
ret
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
call save_processor_state
call save_registers
pushl $3
@@ -87,10 +87,11 @@ ret_point:
call restore_registers
call restore_processor_state
ret
+SYM_CODE_END(do_suspend_lowlevel)
.data
ALIGN
-ENTRY(saved_magic) .long 0
+SYM_DATA(saved_magic, .long 0)
saved_eip: .long 0
# saved registers
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 7f9ade13bbcf..c8daa92f38dc 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -14,7 +14,7 @@
/*
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
-ENTRY(wakeup_long64)
+SYM_FUNC_START(wakeup_long64)
movq saved_magic, %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
@@ -40,9 +40,9 @@ ENTRY(wakeup_long64)
movq saved_rip, %rax
jmp *%rax
-ENDPROC(wakeup_long64)
+SYM_FUNC_END(wakeup_long64)
-ENTRY(do_suspend_lowlevel)
+SYM_FUNC_START(do_suspend_lowlevel)
FRAME_BEGIN
subq $8, %rsp
xorl %eax, %eax
@@ -125,7 +125,7 @@ ENTRY(do_suspend_lowlevel)
addq $8, %rsp
FRAME_END
jmp restore_processor_state
-ENDPROC(do_suspend_lowlevel)
+SYM_FUNC_END(do_suspend_lowlevel)
.data
saved_rbp: .quad 0
@@ -136,4 +136,4 @@ saved_rbx: .quad 0
saved_rip: .quad 0
saved_rsp: .quad 0
-ENTRY(saved_magic) .quad 0
+SYM_DATA(saved_magic, .quad 0)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 9d3a971ea364..9ec463fe96f2 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -956,16 +956,15 @@ NOKPROBE_SYMBOL(patch_cmp);
int poke_int3_handler(struct pt_regs *regs)
{
struct text_poke_loc *tp;
- unsigned char int3 = 0xcc;
void *ip;
/*
* Having observed our INT3 instruction, we now must observe
* bp_patching.nr_entries.
*
- * nr_entries != 0 INT3
- * WMB RMB
- * write INT3 if (nr_entries)
+ * nr_entries != 0 INT3
+ * WMB RMB
+ * write INT3 if (nr_entries)
*
* Idem for other elements in bp_patching.
*/
@@ -978,9 +977,9 @@ int poke_int3_handler(struct pt_regs *regs)
return 0;
/*
- * Discount the sizeof(int3). See text_poke_bp_batch().
+ * Discount the INT3. See text_poke_bp_batch().
*/
- ip = (void *) regs->ip - sizeof(int3);
+ ip = (void *) regs->ip - INT3_INSN_SIZE;
/*
* Skip the binary search if there is a single member in the vector.
@@ -997,8 +996,28 @@ int poke_int3_handler(struct pt_regs *regs)
return 0;
}
- /* set up the specified breakpoint detour */
- regs->ip = (unsigned long) tp->detour;
+ ip += tp->len;
+
+ switch (tp->opcode) {
+ case INT3_INSN_OPCODE:
+ /*
+ * Someone poked an explicit INT3, they'll want to handle it,
+ * do not consume.
+ */
+ return 0;
+
+ case CALL_INSN_OPCODE:
+ int3_emulate_call(regs, (long)ip + tp->rel32);
+ break;
+
+ case JMP32_INSN_OPCODE:
+ case JMP8_INSN_OPCODE:
+ int3_emulate_jmp(regs, (long)ip + tp->rel32);
+ break;
+
+ default:
+ BUG();
+ }
return 1;
}
@@ -1014,7 +1033,7 @@ NOKPROBE_SYMBOL(poke_int3_handler);
* synchronization using int3 breakpoint.
*
* The way it is done:
- * - For each entry in the vector:
+ * - For each entry in the vector:
* - add a int3 trap to the address that will be patched
* - sync cores
* - For each entry in the vector:
@@ -1027,9 +1046,9 @@ NOKPROBE_SYMBOL(poke_int3_handler);
*/
void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
{
- int patched_all_but_first = 0;
- unsigned char int3 = 0xcc;
+ unsigned char int3 = INT3_INSN_OPCODE;
unsigned int i;
+ int do_sync;
lockdep_assert_held(&text_mutex);
@@ -1053,16 +1072,16 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
/*
* Second step: update all but the first byte of the patched range.
*/
- for (i = 0; i < nr_entries; i++) {
+ for (do_sync = 0, i = 0; i < nr_entries; i++) {
if (tp[i].len - sizeof(int3) > 0) {
text_poke((char *)tp[i].addr + sizeof(int3),
- (const char *)tp[i].opcode + sizeof(int3),
+ (const char *)tp[i].text + sizeof(int3),
tp[i].len - sizeof(int3));
- patched_all_but_first++;
+ do_sync++;
}
}
- if (patched_all_but_first) {
+ if (do_sync) {
/*
* According to Intel, this core syncing is very likely
* not necessary and we'd be safe even without it. But
@@ -1075,10 +1094,17 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
* Third step: replace the first byte (int3) by the first byte of
* replacing opcode.
*/
- for (i = 0; i < nr_entries; i++)
- text_poke(tp[i].addr, tp[i].opcode, sizeof(int3));
+ for (do_sync = 0, i = 0; i < nr_entries; i++) {
+ if (tp[i].text[0] == INT3_INSN_OPCODE)
+ continue;
+
+ text_poke(tp[i].addr, tp[i].text, sizeof(int3));
+ do_sync++;
+ }
+
+ if (do_sync)
+ on_each_cpu(do_sync_core, NULL, 1);
- on_each_cpu(do_sync_core, NULL, 1);
/*
* sync_core() implies an smp_mb() and orders this store against
* the writing of the new instruction.
@@ -1087,6 +1113,60 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
bp_patching.nr_entries = 0;
}
+void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ const void *opcode, size_t len, const void *emulate)
+{
+ struct insn insn;
+
+ if (!opcode)
+ opcode = (void *)tp->text;
+ else
+ memcpy((void *)tp->text, opcode, len);
+
+ if (!emulate)
+ emulate = opcode;
+
+ kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
+ insn_get_length(&insn);
+
+ BUG_ON(!insn_complete(&insn));
+ BUG_ON(len != insn.length);
+
+ tp->addr = addr;
+ tp->len = len;
+ tp->opcode = insn.opcode.bytes[0];
+
+ switch (tp->opcode) {
+ case INT3_INSN_OPCODE:
+ break;
+
+ case CALL_INSN_OPCODE:
+ case JMP32_INSN_OPCODE:
+ case JMP8_INSN_OPCODE:
+ tp->rel32 = insn.immediate.value;
+ break;
+
+ default: /* assume NOP */
+ switch (len) {
+ case 2: /* NOP2 -- emulate as JMP8+0 */
+ BUG_ON(memcmp(emulate, ideal_nops[len], len));
+ tp->opcode = JMP8_INSN_OPCODE;
+ tp->rel32 = 0;
+ break;
+
+ case 5: /* NOP5 -- emulate as JMP32+0 */
+ BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
+ tp->opcode = JMP32_INSN_OPCODE;
+ tp->rel32 = 0;
+ break;
+
+ default: /* unknown instruction */
+ BUG();
+ }
+ break;
+ }
+}
+
/**
* text_poke_bp() -- update instructions on live kernel on SMP
* @addr: address to patch
@@ -1098,20 +1178,10 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
* dynamically allocated memory. This function should be used when it is
* not possible to allocate memory.
*/
-void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
+void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
{
- struct text_poke_loc tp = {
- .detour = handler,
- .addr = addr,
- .len = len,
- };
-
- if (len > POKE_MAX_OPCODE_SIZE) {
- WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE);
- return;
- }
-
- memcpy((void *)tp.opcode, opcode, len);
+ struct text_poke_loc tp;
+ text_poke_loc_init(&tp, addr, opcode, len, emulate);
text_poke_bp_batch(&tp, 1);
}
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index a6ac3712db8b..4bbccb9d16dc 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -510,10 +510,9 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
if (iommu_size < 64*1024*1024) {
- pr_warning(
- "PCI-DMA: Warning: Small IOMMU %luMB."
+ pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
" Consider increasing the AGP aperture in BIOS\n",
- iommu_size >> 20);
+ iommu_size >> 20);
}
return iommu_size;
@@ -665,8 +664,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
nommu:
/* Should not happen anymore */
- pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
- "falling back to iommu=soft.\n");
+ pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n");
return -1;
}
@@ -733,8 +731,8 @@ int __init gart_iommu_init(void)
!gart_iommu_aperture ||
(no_agp && init_amd_gatt(&info) < 0)) {
if (max_pfn > MAX_DMA32_PFN) {
- pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
- pr_warning("falling back to iommu=soft.\n");
+ pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
+ pr_warn("falling back to iommu=soft.\n");
}
return 0;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2b0faf86da1b..28446fa6bf18 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -780,8 +780,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
res = (((u64)deltapm) * mult) >> 22;
do_div(res, 1000000);
- pr_warning("APIC calibration not consistent "
- "with PM-Timer: %ldms instead of 100ms\n",(long)res);
+ pr_warn("APIC calibration not consistent "
+ "with PM-Timer: %ldms instead of 100ms\n", (long)res);
/* Correct the lapic counter value */
res = (((u64)(*delta)) * pm_100ms);
@@ -977,7 +977,7 @@ static int __init calibrate_APIC_clock(void)
*/
if (lapic_timer_period < (1000000 / HZ)) {
local_irq_enable();
- pr_warning("APIC frequency too slow, disabling apic timer\n");
+ pr_warn("APIC frequency too slow, disabling apic timer\n");
return -1;
}
@@ -1021,7 +1021,7 @@ static int __init calibrate_APIC_clock(void)
local_irq_enable();
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
- pr_warning("APIC timer disabled due to verification failure\n");
+ pr_warn("APIC timer disabled due to verification failure\n");
return -1;
}
@@ -1095,8 +1095,8 @@ static void local_apic_timer_interrupt(void)
* spurious.
*/
if (!evt->event_handler) {
- pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
- smp_processor_id());
+ pr_warn("Spurious LAPIC timer interrupt on cpu %d\n",
+ smp_processor_id());
/* Switch it off */
lapic_timer_shutdown(evt);
return;
@@ -1811,11 +1811,11 @@ static int __init setup_nox2apic(char *str)
int apicid = native_apic_msr_read(APIC_ID);
if (apicid >= 255) {
- pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
- apicid);
+ pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
+ apicid);
return 0;
}
- pr_warning("x2apic already enabled.\n");
+ pr_warn("x2apic already enabled.\n");
__x2apic_disable();
}
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
@@ -1983,7 +1983,7 @@ static int __init apic_verify(void)
*/
features = cpuid_edx(1);
if (!(features & (1 << X86_FEATURE_APIC))) {
- pr_warning("Could not enable APIC!\n");
+ pr_warn("Could not enable APIC!\n");
return -1;
}
set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
@@ -2337,7 +2337,7 @@ static int cpuid_to_apicid[] = {
#ifdef CONFIG_SMP
/**
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
- * @id: APIC ID to check
+ * @apicid: APIC ID to check
*/
bool apic_id_is_primary_thread(unsigned int apicid)
{
@@ -2410,9 +2410,8 @@ int generic_processor_info(int apicid, int version)
disabled_cpu_apicid == apicid) {
int thiscpu = num_processors + disabled_cpus;
- pr_warning("APIC: Disabling requested cpu."
- " Processor %d/0x%x ignored.\n",
- thiscpu, apicid);
+ pr_warn("APIC: Disabling requested cpu."
+ " Processor %d/0x%x ignored.\n", thiscpu, apicid);
disabled_cpus++;
return -ENODEV;
@@ -2426,8 +2425,7 @@ int generic_processor_info(int apicid, int version)
apicid != boot_cpu_physical_apicid) {
int thiscpu = max + disabled_cpus - 1;
- pr_warning(
- "APIC: NR_CPUS/possible_cpus limit of %i almost"
+ pr_warn("APIC: NR_CPUS/possible_cpus limit of %i almost"
" reached. Keeping one slot for boot cpu."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
@@ -2438,9 +2436,8 @@ int generic_processor_info(int apicid, int version)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
- pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
- "reached. Processor %d/0x%x ignored.\n",
- max, thiscpu, apicid);
+ pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. "
+ "Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
disabled_cpus++;
return -EINVAL;
@@ -2470,13 +2467,13 @@ int generic_processor_info(int apicid, int version)
* Validate version
*/
if (version == 0x0) {
- pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
- cpu, apicid);
+ pr_warn("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
+ cpu, apicid);
version = 0x10;
}
if (version != boot_cpu_apic_version) {
- pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
+ pr_warn("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
boot_cpu_apic_version, cpu, version);
}
@@ -2845,7 +2842,7 @@ static int __init apic_set_verbosity(char *arg)
apic_verbosity = APIC_VERBOSE;
#ifdef CONFIG_X86_64
else {
- pr_warning("APIC Verbosity level %s not recognised"
+ pr_warn("APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", arg);
return -EINVAL;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d6af97fd170a..913c88617848 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1725,19 +1725,20 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
return false;
}
-static inline bool ioapic_irqd_mask(struct irq_data *data)
+static inline bool ioapic_prepare_move(struct irq_data *data)
{
- /* If we are moving the irq we need to mask it */
+ /* If we are moving the IRQ we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
- mask_ioapic_irq(data);
+ if (!irqd_irq_masked(data))
+ mask_ioapic_irq(data);
return true;
}
return false;
}
-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
{
- if (unlikely(masked)) {
+ if (unlikely(moveit)) {
/* Only migrate the irq if the ack has been received.
*
* On rare occasions the broadcast level triggered ack gets
@@ -1766,15 +1767,17 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
*/
if (!io_apic_level_ack_pending(data->chip_data))
irq_move_masked_irq(data);
- unmask_ioapic_irq(data);
+ /* If the IRQ is masked in the core, leave it: */
+ if (!irqd_irq_masked(data))
+ unmask_ioapic_irq(data);
}
}
#else
-static inline bool ioapic_irqd_mask(struct irq_data *data)
+static inline bool ioapic_prepare_move(struct irq_data *data)
{
return false;
}
-static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
{
}
#endif
@@ -1783,11 +1786,11 @@ static void ioapic_ack_level(struct irq_data *irq_data)
{
struct irq_cfg *cfg = irqd_cfg(irq_data);
unsigned long v;
- bool masked;
+ bool moveit;
int i;
irq_complete_move(cfg);
- masked = ioapic_irqd_mask(irq_data);
+ moveit = ioapic_prepare_move(irq_data);
/*
* It appears there is an erratum which affects at least version 0x11
@@ -1842,7 +1845,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
}
- ioapic_irqd_unmask(irq_data, masked);
+ ioapic_finish_move(irq_data, moveit);
}
static void ioapic_ir_ack_level(struct irq_data *irq_data)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e6230af19864..d5b51a740524 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -14,6 +14,8 @@
#include <linux/memory.h>
#include <linux/export.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
#include <asm/e820/api.h>
#include <asm/uv/uv_mmrs.h>
@@ -25,12 +27,17 @@
static DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type;
-static bool uv_hubless_system;
+static int uv_hubbed_system;
+static int uv_hubless_system;
static u64 gru_start_paddr, gru_end_paddr;
static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
static u64 gru_dist_lmask, gru_dist_umask;
static union uvh_apicid uvh_apicid;
+/* Unpack OEM/TABLE ID's to be NULL terminated strings */
+static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
+static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+
/* Information derived from CPUID: */
static struct {
unsigned int apicid_shift;
@@ -248,17 +255,35 @@ static void __init uv_set_apicid_hibit(void)
}
}
-static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static void __init uv_stringify(int len, char *to, char *from)
+{
+ /* Relies on 'to' being NULL chars so result will be NULL terminated */
+ strncpy(to, from, len-1);
+}
+
+static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
{
int pnodeid;
int uv_apic;
+ uv_stringify(sizeof(oem_id), oem_id, _oem_id);
+ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
+
if (strncmp(oem_id, "SGI", 3) != 0) {
- if (strncmp(oem_id, "NSGI", 4) == 0) {
- uv_hubless_system = true;
- pr_info("UV: OEM IDs %s/%s, HUBLESS\n",
- oem_id, oem_table_id);
- }
+ if (strncmp(oem_id, "NSGI", 4) != 0)
+ return 0;
+
+ /* UV4 Hubless, CH, (0x11:UV4+Any) */
+ if (strncmp(oem_id, "NSGI4", 5) == 0)
+ uv_hubless_system = 0x11;
+
+ /* UV3 Hubless, UV300/MC990X w/o hub (0x9:UV3+Any) */
+ else
+ uv_hubless_system = 0x9;
+
+ pr_info("UV: OEM IDs %s/%s, HUBLESS(0x%x)\n",
+ oem_id, oem_table_id, uv_hubless_system);
+
return 0;
}
@@ -286,6 +311,24 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
if (uv_hub_info->hub_revision == 0)
goto badbios;
+ switch (uv_hub_info->hub_revision) {
+ case UV4_HUB_REVISION_BASE:
+ uv_hubbed_system = 0x11;
+ break;
+
+ case UV3_HUB_REVISION_BASE:
+ uv_hubbed_system = 0x9;
+ break;
+
+ case UV2_HUB_REVISION_BASE:
+ uv_hubbed_system = 0x5;
+ break;
+
+ case UV1_HUB_REVISION_BASE:
+ uv_hubbed_system = 0x3;
+ break;
+ }
+
pnodeid = early_get_pnodeid();
early_get_apic_socketid_shift();
@@ -336,9 +379,15 @@ int is_uv_system(void)
}
EXPORT_SYMBOL_GPL(is_uv_system);
-int is_uv_hubless(void)
+int is_uv_hubbed(int uvtype)
+{
+ return (uv_hubbed_system & uvtype);
+}
+EXPORT_SYMBOL_GPL(is_uv_hubbed);
+
+int is_uv_hubless(int uvtype)
{
- return uv_hubless_system;
+ return (uv_hubless_system & uvtype);
}
EXPORT_SYMBOL_GPL(is_uv_hubless);
@@ -1255,7 +1304,8 @@ static int __init decode_uv_systab(void)
struct uv_systab *st;
int i;
- if (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE)
+ /* If system is uv3 or lower, there is no extended UVsystab */
+ if (is_uv_hubbed(0xfffffe) < uv(4) && is_uv_hubless(0xfffffe) < uv(4))
return 0; /* No extended UVsystab required */
st = uv_systab;
@@ -1434,6 +1484,103 @@ static void __init build_socket_tables(void)
}
}
+/* Check which reboot to use */
+static void check_efi_reboot(void)
+{
+ /* If EFI reboot not available, use ACPI reboot */
+ if (!efi_enabled(EFI_BOOT))
+ reboot_type = BOOT_ACPI;
+}
+
+/* Setup user proc fs files */
+static int proc_hubbed_show(struct seq_file *file, void *data)
+{
+ seq_printf(file, "0x%x\n", uv_hubbed_system);
+ return 0;
+}
+
+static int proc_hubless_show(struct seq_file *file, void *data)
+{
+ seq_printf(file, "0x%x\n", uv_hubless_system);
+ return 0;
+}
+
+static int proc_oemid_show(struct seq_file *file, void *data)
+{
+ seq_printf(file, "%s/%s\n", oem_id, oem_table_id);
+ return 0;
+}
+
+static int proc_hubbed_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_hubbed_show, (void *)NULL);
+}
+
+static int proc_hubless_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_hubless_show, (void *)NULL);
+}
+
+static int proc_oemid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_oemid_show, (void *)NULL);
+}
+
+/* (struct is "non-const" as open function is set at runtime) */
+static struct file_operations proc_version_fops = {
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations proc_oemid_fops = {
+ .open = proc_oemid_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init void uv_setup_proc_files(int hubless)
+{
+ struct proc_dir_entry *pde;
+ char *name = hubless ? "hubless" : "hubbed";
+
+ pde = proc_mkdir(UV_PROC_NODE, NULL);
+ proc_create("oemid", 0, pde, &proc_oemid_fops);
+ proc_create(name, 0, pde, &proc_version_fops);
+ if (hubless)
+ proc_version_fops.open = proc_hubless_open;
+ else
+ proc_version_fops.open = proc_hubbed_open;
+}
+
+/* Initialize UV hubless systems */
+static __init int uv_system_init_hubless(void)
+{
+ int rc;
+
+ /* Setup PCH NMI handler */
+ uv_nmi_setup_hubless();
+
+ /* Init kernel/BIOS interface */
+ rc = uv_bios_init();
+ if (rc < 0)
+ return rc;
+
+ /* Process UVsystab */
+ rc = decode_uv_systab();
+ if (rc < 0)
+ return rc;
+
+ /* Create user access node */
+ if (rc >= 0)
+ uv_setup_proc_files(1);
+
+ check_efi_reboot();
+
+ return rc;
+}
+
static void __init uv_system_init_hub(void)
{
struct uv_hub_info_s hub_info = {0};
@@ -1559,32 +1706,27 @@ static void __init uv_system_init_hub(void)
uv_nmi_setup();
uv_cpu_init();
uv_scir_register_cpu_notifier();
- proc_mkdir("sgi_uv", NULL);
+ uv_setup_proc_files(0);
/* Register Legacy VGA I/O redirection handler: */
pci_register_set_vga_state(uv_set_vga_state);
- /*
- * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
- * EFI is not enabled in the kdump kernel:
- */
- if (is_kdump_kernel())
- reboot_type = BOOT_ACPI;
+ check_efi_reboot();
}
/*
- * There is a small amount of UV specific code needed to initialize a
- * UV system that does not have a "UV HUB" (referred to as "hubless").
+ * There is a different code path needed to initialize a UV system that does
+ * not have a "UV HUB" (referred to as "hubless").
*/
void __init uv_system_init(void)
{
- if (likely(!is_uv_system() && !is_uv_hubless()))
+ if (likely(!is_uv_system() && !is_uv_hubless(1)))
return;
if (is_uv_system())
uv_system_init_hub();
else
- uv_nmi_setup_hubless();
+ uv_system_init_hubless();
}
apic_driver(apic_x2apic_uv_x);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index d7a1e5a9331c..890f60083eca 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
ifdef CONFIG_CPU_SUP_INTEL
-obj-y += intel.o intel_pconfig.o
+obj-y += intel.o intel_pconfig.o tsx.o
obj-$(CONFIG_PM) += intel_epb.o
endif
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 91c2561b905f..8bf64899f56a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -39,6 +39,8 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
+static void __init mds_print_mitigation(void);
+static void __init taa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -105,6 +107,13 @@ void __init check_bugs(void)
ssb_select_mitigation();
l1tf_select_mitigation();
mds_select_mitigation();
+ taa_select_mitigation();
+
+ /*
+ * As MDS and TAA mitigations are inter-related, print MDS
+ * mitigation until after TAA mitigation selection is done.
+ */
+ mds_print_mitigation();
arch_smt_update();
@@ -243,6 +252,12 @@ static void __init mds_select_mitigation(void)
(mds_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
}
+}
+
+static void __init mds_print_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
+ return;
pr_info("%s\n", mds_strings[mds_mitigation]);
}
@@ -269,6 +284,113 @@ static int __init mds_cmdline(char *str)
early_param("mds", mds_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "TAA: " fmt
+
+/* Default mitigation for TAA-affected CPUs */
+static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
+static bool taa_nosmt __ro_after_init;
+
+static const char * const taa_strings[] = {
+ [TAA_MITIGATION_OFF] = "Vulnerable",
+ [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+ [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
+ [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
+};
+
+static void __init taa_select_mitigation(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+ }
+
+ /* TSX previously disabled by tsx=off */
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
+ goto out;
+ }
+
+ if (cpu_mitigations_off()) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+ }
+
+ /*
+ * TAA mitigation via VERW is turned off if both
+ * tsx_async_abort=off and mds=off are specified.
+ */
+ if (taa_mitigation == TAA_MITIGATION_OFF &&
+ mds_mitigation == MDS_MITIGATION_OFF)
+ goto out;
+
+ if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ taa_mitigation = TAA_MITIGATION_VERW;
+ else
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+ * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
+ * A microcode update fixes this behavior to clear CPU buffers. It also
+ * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
+ * ARCH_CAP_TSX_CTRL_MSR bit.
+ *
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+ ia32_cap = x86_read_arch_cap_msr();
+ if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+ !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+ * TSX is enabled, select alternate mitigation for TAA which is
+ * the same as MDS. Enable MDS static branch to clear CPU buffers.
+ *
+ * For guests that can't determine whether the correct microcode is
+ * present on host, enable the mitigation for UCODE_NEEDED as well.
+ */
+ static_branch_enable(&mds_user_clear);
+
+ if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+
+ /*
+ * Update MDS mitigation, if necessary, as the mds_user_clear is
+ * now enabled for TAA mitigation.
+ */
+ if (mds_mitigation == MDS_MITIGATION_OFF &&
+ boot_cpu_has_bug(X86_BUG_MDS)) {
+ mds_mitigation = MDS_MITIGATION_FULL;
+ mds_select_mitigation();
+ }
+out:
+ pr_info("%s\n", taa_strings[taa_mitigation]);
+}
+
+static int __init tsx_async_abort_parse_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_TAA))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off")) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ } else if (!strcmp(str, "full")) {
+ taa_mitigation = TAA_MITIGATION_VERW;
+ } else if (!strcmp(str, "full,nosmt")) {
+ taa_mitigation = TAA_MITIGATION_VERW;
+ taa_nosmt = true;
+ }
+
+ return 0;
+}
+early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt
enum spectre_v1_mitigation {
@@ -786,13 +908,10 @@ static void update_mds_branch_idle(void)
}
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
void cpu_bugs_smt_update(void)
{
- /* Enhanced IBRS implies STIBP. No update required. */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
- return;
-
mutex_lock(&spec_ctrl_mutex);
switch (spectre_v2_user) {
@@ -819,6 +938,17 @@ void cpu_bugs_smt_update(void)
break;
}
+ switch (taa_mitigation) {
+ case TAA_MITIGATION_VERW:
+ case TAA_MITIGATION_UCODE_NEEDED:
+ if (sched_smt_active())
+ pr_warn_once(TAA_MSG_SMT);
+ break;
+ case TAA_MITIGATION_TSX_DISABLED:
+ case TAA_MITIGATION_OFF:
+ break;
+ }
+
mutex_unlock(&spec_ctrl_mutex);
}
@@ -1149,6 +1279,9 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+bool itlb_multihit_kvm_mitigation;
+EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
+
#undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt
@@ -1304,11 +1437,24 @@ static ssize_t l1tf_show_state(char *buf)
l1tf_vmx_states[l1tf_vmx_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
}
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+ if (itlb_multihit_kvm_mitigation)
+ return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
+ else
+ return sprintf(buf, "KVM: Vulnerable\n");
+}
#else
static ssize_t l1tf_show_state(char *buf)
{
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
}
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+ return sprintf(buf, "Processor vulnerable\n");
+}
#endif
static ssize_t mds_show_state(char *buf)
@@ -1328,6 +1474,21 @@ static ssize_t mds_show_state(char *buf)
sched_smt_active() ? "vulnerable" : "disabled");
}
+static ssize_t tsx_async_abort_show_state(char *buf)
+{
+ if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+ (taa_mitigation == TAA_MITIGATION_OFF))
+ return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ taa_strings[taa_mitigation]);
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
static char *stibp_state(void)
{
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -1398,6 +1559,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_MDS:
return mds_show_state(buf);
+ case X86_BUG_TAA:
+ return tsx_async_abort_show_state(buf);
+
+ case X86_BUG_ITLB_MULTIHIT:
+ return itlb_multihit_show_state(buf);
+
default:
break;
}
@@ -1434,4 +1601,14 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu
{
return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
}
+
+ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
+}
+
+ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9ae7d1bcd4f4..baa2fed8deb6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -53,10 +53,7 @@
#include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
-
-#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
-#endif
#include "cpu.h"
@@ -565,8 +562,9 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
return NULL; /* Not found */
}
-__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
-__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
+/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
void load_percpu_segment(int cpu)
{
@@ -1016,13 +1014,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
-#define NO_SPECULATION BIT(0)
-#define NO_MELTDOWN BIT(1)
-#define NO_SSB BIT(2)
-#define NO_L1TF BIT(3)
-#define NO_MDS BIT(4)
-#define MSBDS_ONLY BIT(5)
-#define NO_SWAPGS BIT(6)
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN BIT(1)
+#define NO_SSB BIT(2)
+#define NO_L1TF BIT(3)
+#define NO_MDS BIT(4)
+#define MSBDS_ONLY BIT(5)
+#define NO_SWAPGS BIT(6)
+#define NO_ITLB_MULTIHIT BIT(7)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1043,27 +1042,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
/* Intel Family 6 */
- VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
- VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
- VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
- VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
- VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
-
- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(CORE_YONAH, NO_SSB),
- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
/*
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1073,15 +1072,17 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/
+ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
+
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
{}
};
@@ -1092,19 +1093,30 @@ static bool __init cpu_matches(unsigned long which)
return m && !!(m->driver_data & which);
}
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+u64 x86_read_arch_cap_msr(void)
{
u64 ia32_cap = 0;
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ return ia32_cap;
+}
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = x86_read_arch_cap_msr();
+
+ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
if (cpu_matches(NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
-
if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1121,6 +1133,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (!cpu_matches(NO_SWAPGS))
setup_force_cpu_bug(X86_BUG_SWAPGS);
+ /*
+ * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
+ * - TSX is supported or
+ * - TSX_CTRL is present
+ *
+ * TSX_CTRL check is needed for cases when TSX could be disabled before
+ * the kernel boot e.g. kexec.
+ * TSX_CTRL check alone is not sufficient for cases when the microcode
+ * update is not present or running as guest that don't get TSX_CTRL.
+ */
+ if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+ (cpu_has(c, X86_FEATURE_RTM) ||
+ (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+ setup_force_cpu_bug(X86_BUG_TAA);
+
if (cpu_matches(NO_MELTDOWN))
return;
@@ -1554,6 +1581,8 @@ void __init identify_boot_cpu(void)
#endif
cpu_detect_tlb(&boot_cpu_data);
setup_cr_pinning();
+
+ tsx_init();
}
void identify_secondary_cpu(struct cpuinfo_x86 *c)
@@ -1749,7 +1778,7 @@ static void wait_for_master_cpu(int cpu)
}
#ifdef CONFIG_X86_64
-static void setup_getcpu(int cpu)
+static inline void setup_getcpu(int cpu)
{
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
struct desc_struct d = { };
@@ -1769,7 +1798,59 @@ static void setup_getcpu(int cpu)
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
}
+
+static inline void ucode_cpu_init(int cpu)
+{
+ if (cpu)
+ load_ucode_ap();
+}
+
+static inline void tss_setup_ist(struct tss_struct *tss)
+{
+ /* Set up the per-CPU TSS IST stacks */
+ tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
+ tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
+ tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
+ tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
+}
+
+static inline void gdt_setup_doublefault_tss(int cpu) { }
+
+#else /* CONFIG_X86_64 */
+
+static inline void setup_getcpu(int cpu) { }
+
+static inline void ucode_cpu_init(int cpu)
+{
+ show_ucode_info_early();
+}
+
+static inline void tss_setup_ist(struct tss_struct *tss) { }
+
+static inline void gdt_setup_doublefault_tss(int cpu)
+{
+#ifdef CONFIG_DOUBLEFAULT
+ /* Set up the doublefault TSS pointer in the GDT */
+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+#endif
+}
+#endif /* !CONFIG_X86_64 */
+
+static inline void tss_setup_io_bitmap(struct tss_struct *tss)
+{
+ tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
+
+#ifdef CONFIG_X86_IOPL_IOPERM
+ tss->io_bitmap.prev_max = 0;
+ tss->io_bitmap.prev_sequence = 0;
+ memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
+ /*
+ * Invalidate the extra array entry past the end of the all
+ * permission bitmap as required by the hardware.
+ */
+ tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
#endif
+}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
@@ -1777,21 +1858,15 @@ static void setup_getcpu(int cpu)
* and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
*/
-#ifdef CONFIG_X86_64
-
void cpu_init(void)
{
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ struct task_struct *cur = current;
int cpu = raw_smp_processor_id();
- struct task_struct *me;
- struct tss_struct *t;
- int i;
wait_for_master_cpu(cpu);
- if (cpu)
- load_ucode_ap();
-
- t = &per_cpu(cpu_tss_rw, cpu);
+ ucode_cpu_init(cpu);
#ifdef CONFIG_NUMA
if (this_cpu_read(numa_node) == 0 &&
@@ -1800,63 +1875,47 @@ void cpu_init(void)
#endif
setup_getcpu(cpu);
- me = current;
-
pr_debug("Initializing CPU#%d\n", cpu);
- cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
+ boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
+ cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
/*
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
-
switch_to_new_gdt(cpu);
- loadsegment(fs, 0);
-
load_current_idt();
- memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
- syscall_init();
-
- wrmsrl(MSR_FS_BASE, 0);
- wrmsrl(MSR_KERNEL_GS_BASE, 0);
- barrier();
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ loadsegment(fs, 0);
+ memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
- x86_configure_nx();
- x2apic_setup();
+ wrmsrl(MSR_FS_BASE, 0);
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
- /*
- * set up and load the per-CPU TSS
- */
- if (!t->x86_tss.ist[0]) {
- t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
- t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
- t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
- t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
+ x2apic_setup();
}
- t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
-
- /*
- * <= is required because the CPU will access up to
- * 8 bits beyond the end of the IO permission bitmap.
- */
- for (i = 0; i <= IO_BITMAP_LONGS; i++)
- t->io_bitmap[i] = ~0UL;
-
mmgrab(&init_mm);
- me->active_mm = &init_mm;
- BUG_ON(me->mm);
+ cur->active_mm = &init_mm;
+ BUG_ON(cur->mm);
initialize_tlbstate_and_flush();
- enter_lazy_tlb(&init_mm, me);
+ enter_lazy_tlb(&init_mm, cur);
- /*
- * Initialize the TSS. sp0 points to the entry trampoline stack
- * regardless of what task is running.
- */
+ /* Initialize the TSS. */
+ tss_setup_ist(tss);
+ tss_setup_io_bitmap(tss);
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
+
load_TR_desc();
+ /*
+ * sp0 points to the entry trampoline stack regardless of what task
+ * is running.
+ */
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
load_mm_ldt(&init_mm);
@@ -1864,6 +1923,8 @@ void cpu_init(void)
clear_all_debug_regs();
dbg_restore_debug_regs();
+ gdt_setup_doublefault_tss(cpu);
+
fpu__init_cpu();
if (is_uv_system())
@@ -1872,63 +1933,6 @@ void cpu_init(void)
load_fixmap_gdt(cpu);
}
-#else
-
-void cpu_init(void)
-{
- int cpu = smp_processor_id();
- struct task_struct *curr = current;
- struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
-
- wait_for_master_cpu(cpu);
-
- show_ucode_info_early();
-
- pr_info("Initializing CPU#%d\n", cpu);
-
- if (cpu_feature_enabled(X86_FEATURE_VME) ||
- boot_cpu_has(X86_FEATURE_TSC) ||
- boot_cpu_has(X86_FEATURE_DE))
- cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-
- load_current_idt();
- switch_to_new_gdt(cpu);
-
- /*
- * Set up and load the per-CPU TSS and LDT
- */
- mmgrab(&init_mm);
- curr->active_mm = &init_mm;
- BUG_ON(curr->mm);
- initialize_tlbstate_and_flush();
- enter_lazy_tlb(&init_mm, curr);
-
- /*
- * Initialize the TSS. sp0 points to the entry trampoline stack
- * regardless of what task is running.
- */
- set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
- load_TR_desc();
- load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
-
- load_mm_ldt(&init_mm);
-
- t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
-
-#ifdef CONFIG_DOUBLEFAULT
- /* Set up doublefault TSS pointer in the GDT */
- __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
-#endif
-
- clear_all_debug_regs();
- dbg_restore_debug_regs();
-
- fpu__init_cpu();
-
- load_fixmap_gdt(cpu);
-}
-#endif
-
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index c0e2407abdd6..38ab6e115eac 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -44,6 +44,22 @@ struct _tlb_table {
extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
+#ifdef CONFIG_CPU_SUP_INTEL
+enum tsx_ctrl_states {
+ TSX_CTRL_ENABLE,
+ TSX_CTRL_DISABLE,
+ TSX_CTRL_NOT_SUPPORTED,
+};
+
+extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
+
+extern void __init tsx_init(void);
+extern void tsx_enable(void);
+extern void tsx_disable(void);
+#else
+static inline void tsx_init(void) { }
+#endif /* CONFIG_CPU_SUP_INTEL */
+
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
@@ -62,4 +78,6 @@ unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
+extern u64 x86_read_arch_cap_msr(void);
+
#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c2fdc00df163..4a900804a023 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -762,6 +762,11 @@ static void init_intel(struct cpuinfo_x86 *c)
detect_tme(c);
init_intel_misc_features(c);
+
+ if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+ tsx_enable();
+ if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ tsx_disable();
}
#ifdef CONFIG_X86_32
@@ -814,7 +819,7 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
{ 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
{ 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
- { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
+ { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
{ 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
{ 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
{ 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
@@ -842,7 +847,7 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
{ 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
{ 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
- { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
+ { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
{ 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
{ 0x00, 0, 0 }
};
@@ -854,8 +859,8 @@ static void intel_tlb_lookup(const unsigned char desc)
return;
/* look up this descriptor in the table */
- for (k = 0; intel_tlb_table[k].descriptor != desc && \
- intel_tlb_table[k].descriptor != 0; k++)
+ for (k = 0; intel_tlb_table[k].descriptor != desc &&
+ intel_tlb_table[k].descriptor != 0; k++)
;
if (intel_tlb_table[k].tlb_type == 0)
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 6ea7fdc82f3c..5167bd2bb6b1 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -583,7 +583,7 @@ bool amd_filter_mce(struct mce *m)
* - Prevent possible spurious interrupts from the IF bank on Family 0x17
* Models 0x10-0x2F due to Erratum #1114.
*/
-void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
+static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
{
int i, num_msrs;
u64 hwcr;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 743370ee4983..5f42f25bac8f 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -488,8 +488,9 @@ int mce_usable_address(struct mce *m)
if (!(m->status & MCI_STATUS_ADDRV))
return 0;
- /* Checks after this one are Intel-specific: */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ /* Checks after this one are Intel/Zhaoxin-specific: */
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return 1;
if (!(m->status & MCI_STATUS_MISCV))
@@ -507,10 +508,13 @@ EXPORT_SYMBOL_GPL(mce_usable_address);
bool mce_is_memory_error(struct mce *m)
{
- if (m->cpuvendor == X86_VENDOR_AMD ||
- m->cpuvendor == X86_VENDOR_HYGON) {
+ switch (m->cpuvendor) {
+ case X86_VENDOR_AMD:
+ case X86_VENDOR_HYGON:
return amd_mce_is_memory_error(m);
- } else if (m->cpuvendor == X86_VENDOR_INTEL) {
+
+ case X86_VENDOR_INTEL:
+ case X86_VENDOR_ZHAOXIN:
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
*
@@ -527,9 +531,10 @@ bool mce_is_memory_error(struct mce *m)
return (m->status & 0xef80) == BIT(7) ||
(m->status & 0xef00) == BIT(8) ||
(m->status & 0xeffc) == 0xc;
- }
- return false;
+ default:
+ return false;
+ }
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
@@ -1127,6 +1132,12 @@ static bool __mc_check_crashing_cpu(int cpu)
u64 mcgstatus;
mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
+ if (mcgstatus & MCG_STATUS_LMCES)
+ return false;
+ }
+
if (mcgstatus & MCG_STATUS_RIPV) {
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
return true;
@@ -1277,9 +1288,10 @@ void do_machine_check(struct pt_regs *regs, long error_code)
/*
* Check if this MCE is signaled to only this logical processor,
- * on Intel only.
+ * on Intel, Zhaoxin only.
*/
- if (m.cpuvendor == X86_VENDOR_INTEL)
+ if (m.cpuvendor == X86_VENDOR_INTEL ||
+ m.cpuvendor == X86_VENDOR_ZHAOXIN)
lmce = m.mcgstatus & MCG_STATUS_LMCES;
/*
@@ -1697,6 +1709,18 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
if (c->x86 == 6 && c->x86_model == 45)
quirk_no_way_out = quirk_sandybridge_ifu;
}
+
+ if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
+ /*
+ * All newer Zhaoxin CPUs support MCE broadcasting. Enable
+ * synchronization with a one second timeout.
+ */
+ if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
+ if (cfg->monarch_timeout < 0)
+ cfg->monarch_timeout = USEC_PER_SEC;
+ }
+ }
+
if (cfg->monarch_timeout < 0)
cfg->monarch_timeout = 0;
if (cfg->bootlog != 0)
@@ -1760,6 +1784,35 @@ static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
}
}
+static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
+{
+ struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
+
+ /*
+ * These CPUs have MCA bank 8 which reports only one error type called
+ * SVAD (System View Address Decoder). The reporting of that error is
+ * controlled by IA32_MC8.CTL.0.
+ *
+ * If enabled, prefetching on these CPUs will cause SVAD MCE when
+ * virtual machines start and result in a system panic. Always disable
+ * bank 8 SVAD error by default.
+ */
+ if ((c->x86 == 7 && c->x86_model == 0x1b) ||
+ (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
+ if (this_cpu_read(mce_num_banks) > 8)
+ mce_banks[8].ctl = 0;
+ }
+
+ intel_init_cmci();
+ intel_init_lmce();
+ mce_adjust_timer = cmci_intel_adjust_timer;
+}
+
+static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
+{
+ intel_clear_lmce();
+}
+
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
{
switch (c->x86_vendor) {
@@ -1781,6 +1834,10 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
mce_centaur_feature_init(c);
break;
+ case X86_VENDOR_ZHAOXIN:
+ mce_zhaoxin_feature_init(c);
+ break;
+
default:
break;
}
@@ -1792,6 +1849,11 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
case X86_VENDOR_INTEL:
mce_intel_feature_clear(c);
break;
+
+ case X86_VENDOR_ZHAOXIN:
+ mce_zhaoxin_feature_clear(c);
+ break;
+
default:
break;
}
@@ -2014,15 +2076,16 @@ static void mce_disable_error_reporting(void)
static void vendor_disable_error_reporting(void)
{
/*
- * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
- * are socket-wide.
- * Disabling them for just a single offlined CPU is bad, since it will
- * inhibit reporting for all shared resources on the socket like the
- * last level cache (LLC), the integrated memory controller (iMC), etc.
+ * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
+ * MSRs are socket-wide. Disabling them for just a single offlined CPU
+ * is bad, since it will inhibit reporting for all shared resources on
+ * the socket like the last level cache (LLC), the integrated memory
+ * controller (iMC), etc.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
return;
mce_disable_error_reporting();
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index 88cd9598fa57..e270d0770134 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -85,8 +85,10 @@ static int cmci_supported(int *banks)
* initialization is vendor keyed and this
* makes sure none of the backdoors are entered otherwise.
*/
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return 0;
+
if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
return 0;
rdmsrl(MSR_IA32_MCG_CAP, cap);
@@ -423,7 +425,7 @@ void cmci_disable_bank(int bank)
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
-static void intel_init_cmci(void)
+void intel_init_cmci(void)
{
int banks;
@@ -442,7 +444,7 @@ static void intel_init_cmci(void)
cmci_recheck();
}
-static void intel_init_lmce(void)
+void intel_init_lmce(void)
{
u64 val;
@@ -455,7 +457,7 @@ static void intel_init_lmce(void)
wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
}
-static void intel_clear_lmce(void)
+void intel_clear_lmce(void)
{
u64 val;
@@ -482,6 +484,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
case INTEL_FAM6_BROADWELL_D:
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 43031db429d2..842b273bce31 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -45,11 +45,17 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval);
bool mce_intel_cmci_poll(void);
void mce_intel_hcpu_update(unsigned long cpu);
void cmci_disable_bank(int bank);
+void intel_init_cmci(void);
+void intel_init_lmce(void);
+void intel_clear_lmce(void);
#else
# define cmci_intel_adjust_timer mce_adjust_timer_default
static inline bool mce_intel_cmci_poll(void) { return false; }
static inline void mce_intel_hcpu_update(unsigned long cpu) { }
static inline void cmci_disable_bank(int bank) { }
+static inline void intel_init_cmci(void) { }
+static inline void intel_init_lmce(void) { }
+static inline void intel_clear_lmce(void) { }
#endif
void mce_timer_kick(unsigned long interval);
diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
index 6e2becf547c5..d01e0da0163a 100644
--- a/arch/x86/kernel/cpu/mce/therm_throt.c
+++ b/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -40,15 +40,58 @@
#define THERMAL_THROTTLING_EVENT 0
#define POWER_LIMIT_EVENT 1
-/*
- * Current thermal event state:
+/**
+ * struct _thermal_state - Represent the current thermal event state
+ * @next_check: Stores the next timestamp, when it is allowed
+ * to log the next warning message.
+ * @last_interrupt_time: Stores the timestamp for the last threshold
+ * high event.
+ * @therm_work: Delayed workqueue structure
+ * @count: Stores the current running count for thermal
+ * or power threshold interrupts.
+ * @last_count: Stores the previous running count for thermal
+ * or power threshold interrupts.
+ * @max_time_ms: This shows the maximum amount of time CPU was
+ * in throttled state for a single thermal
+ * threshold high to low state.
+ * @total_time_ms: This is a cumulative time during which CPU was
+ * in the throttled state.
+ * @rate_control_active: Set when a throttling message is logged.
+ * This is used for the purpose of rate-control.
+ * @new_event: Stores the last high/low status of the
+ * THERM_STATUS_PROCHOT or
+ * THERM_STATUS_POWER_LIMIT.
+ * @level: Stores whether this _thermal_state instance is
+ * for a CORE level or for PACKAGE level.
+ * @sample_index: Index for storing the next sample in the buffer
+ * temp_samples[].
+ * @sample_count: Total number of samples collected in the buffer
+ * temp_samples[].
+ * @average: The last moving average of temperature samples
+ * @baseline_temp: Temperature at which thermal threshold high
+ * interrupt was generated.
+ * @temp_samples: Storage for temperature samples to calculate
+ * moving average.
+ *
+ * This structure is used to represent data related to thermal state for a CPU.
+ * There is a separate storage for core and package level for each CPU.
*/
struct _thermal_state {
- bool new_event;
- int event;
u64 next_check;
+ u64 last_interrupt_time;
+ struct delayed_work therm_work;
unsigned long count;
unsigned long last_count;
+ unsigned long max_time_ms;
+ unsigned long total_time_ms;
+ bool rate_control_active;
+ bool new_event;
+ u8 level;
+ u8 sample_index;
+ u8 sample_count;
+ u8 average;
+ u8 baseline_temp;
+ u8 temp_samples[3];
};
struct thermal_state {
@@ -121,8 +164,22 @@ define_therm_throt_device_one_ro(package_throttle_count);
define_therm_throt_device_show_func(package_power_limit, count);
define_therm_throt_device_one_ro(package_power_limit_count);
+define_therm_throt_device_show_func(core_throttle, max_time_ms);
+define_therm_throt_device_one_ro(core_throttle_max_time_ms);
+
+define_therm_throt_device_show_func(package_throttle, max_time_ms);
+define_therm_throt_device_one_ro(package_throttle_max_time_ms);
+
+define_therm_throt_device_show_func(core_throttle, total_time_ms);
+define_therm_throt_device_one_ro(core_throttle_total_time_ms);
+
+define_therm_throt_device_show_func(package_throttle, total_time_ms);
+define_therm_throt_device_one_ro(package_throttle_total_time_ms);
+
static struct attribute *thermal_throttle_attrs[] = {
&dev_attr_core_throttle_count.attr,
+ &dev_attr_core_throttle_max_time_ms.attr,
+ &dev_attr_core_throttle_total_time_ms.attr,
NULL
};
@@ -135,6 +192,105 @@ static const struct attribute_group thermal_attr_group = {
#define CORE_LEVEL 0
#define PACKAGE_LEVEL 1
+#define THERM_THROT_POLL_INTERVAL HZ
+#define THERM_STATUS_PROCHOT_LOG BIT(1)
+
+static void clear_therm_status_log(int level)
+{
+ int msr;
+ u64 msr_val;
+
+ if (level == CORE_LEVEL)
+ msr = MSR_IA32_THERM_STATUS;
+ else
+ msr = MSR_IA32_PACKAGE_THERM_STATUS;
+
+ rdmsrl(msr, msr_val);
+ wrmsrl(msr, msr_val & ~THERM_STATUS_PROCHOT_LOG);
+}
+
+static void get_therm_status(int level, bool *proc_hot, u8 *temp)
+{
+ int msr;
+ u64 msr_val;
+
+ if (level == CORE_LEVEL)
+ msr = MSR_IA32_THERM_STATUS;
+ else
+ msr = MSR_IA32_PACKAGE_THERM_STATUS;
+
+ rdmsrl(msr, msr_val);
+ if (msr_val & THERM_STATUS_PROCHOT_LOG)
+ *proc_hot = true;
+ else
+ *proc_hot = false;
+
+ *temp = (msr_val >> 16) & 0x7F;
+}
+
+static void throttle_active_work(struct work_struct *work)
+{
+ struct _thermal_state *state = container_of(to_delayed_work(work),
+ struct _thermal_state, therm_work);
+ unsigned int i, avg, this_cpu = smp_processor_id();
+ u64 now = get_jiffies_64();
+ bool hot;
+ u8 temp;
+
+ get_therm_status(state->level, &hot, &temp);
+ /* temperature value is offset from the max so lesser means hotter */
+ if (!hot && temp > state->baseline_temp) {
+ if (state->rate_control_active)
+ pr_info("CPU%d: %s temperature/speed normal (total events = %lu)\n",
+ this_cpu,
+ state->level == CORE_LEVEL ? "Core" : "Package",
+ state->count);
+
+ state->rate_control_active = false;
+ return;
+ }
+
+ if (time_before64(now, state->next_check) &&
+ state->rate_control_active)
+ goto re_arm;
+
+ state->next_check = now + CHECK_INTERVAL;
+
+ if (state->count != state->last_count) {
+ /* There was one new thermal interrupt */
+ state->last_count = state->count;
+ state->average = 0;
+ state->sample_count = 0;
+ state->sample_index = 0;
+ }
+
+ state->temp_samples[state->sample_index] = temp;
+ state->sample_count++;
+ state->sample_index = (state->sample_index + 1) % ARRAY_SIZE(state->temp_samples);
+ if (state->sample_count < ARRAY_SIZE(state->temp_samples))
+ goto re_arm;
+
+ avg = 0;
+ for (i = 0; i < ARRAY_SIZE(state->temp_samples); ++i)
+ avg += state->temp_samples[i];
+
+ avg /= ARRAY_SIZE(state->temp_samples);
+
+ if (state->average > avg) {
+ pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n",
+ this_cpu,
+ state->level == CORE_LEVEL ? "Core" : "Package",
+ state->count);
+ state->rate_control_active = true;
+ }
+
+ state->average = avg;
+
+re_arm:
+ clear_therm_status_log(state->level);
+ schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL);
+}
+
/***
* therm_throt_process - Process thermal throttling event from interrupt
* @curr: Whether the condition is current or not (boolean), since the
@@ -178,27 +334,33 @@ static void therm_throt_process(bool new_event, int event, int level)
if (new_event)
state->count++;
- if (time_before64(now, state->next_check) &&
- state->count != state->last_count)
+ if (event != THERMAL_THROTTLING_EVENT)
return;
- state->next_check = now + CHECK_INTERVAL;
- state->last_count = state->count;
+ if (new_event && !state->last_interrupt_time) {
+ bool hot;
+ u8 temp;
+
+ get_therm_status(state->level, &hot, &temp);
+ /*
+ * Ignore short temperature spike as the system is not close
+ * to PROCHOT. 10C offset is large enough to ignore. It is
+ * already dropped from the high threshold temperature.
+ */
+ if (temp > 10)
+ return;
- /* if we just entered the thermal event */
- if (new_event) {
- if (event == THERMAL_THROTTLING_EVENT)
- pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
- this_cpu,
- level == CORE_LEVEL ? "Core" : "Package",
- state->count);
- return;
- }
- if (old_event) {
- if (event == THERMAL_THROTTLING_EVENT)
- pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
- level == CORE_LEVEL ? "Core" : "Package");
- return;
+ state->baseline_temp = temp;
+ state->last_interrupt_time = now;
+ schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL);
+ } else if (old_event && state->last_interrupt_time) {
+ unsigned long throttle_time;
+
+ throttle_time = jiffies_delta_to_msecs(now - state->last_interrupt_time);
+ if (throttle_time > state->max_time_ms)
+ state->max_time_ms = throttle_time;
+ state->total_time_ms += throttle_time;
+ state->last_interrupt_time = 0;
}
}
@@ -244,20 +406,47 @@ static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
if (err)
return err;
- if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
+ if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) {
err = sysfs_add_file_to_group(&dev->kobj,
&dev_attr_core_power_limit_count.attr,
thermal_attr_group.name);
+ if (err)
+ goto del_group;
+ }
+
if (cpu_has(c, X86_FEATURE_PTS)) {
err = sysfs_add_file_to_group(&dev->kobj,
&dev_attr_package_throttle_count.attr,
thermal_attr_group.name);
- if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
+ if (err)
+ goto del_group;
+
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_throttle_max_time_ms.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_throttle_total_time_ms.attr,
+ thermal_attr_group.name);
+ if (err)
+ goto del_group;
+
+ if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) {
err = sysfs_add_file_to_group(&dev->kobj,
&dev_attr_package_power_limit_count.attr,
thermal_attr_group.name);
+ if (err)
+ goto del_group;
+ }
}
+ return 0;
+
+del_group:
+ sysfs_remove_group(&dev->kobj, &thermal_attr_group);
+
return err;
}
@@ -269,15 +458,29 @@ static void thermal_throttle_remove_dev(struct device *dev)
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
static int thermal_throttle_online(unsigned int cpu)
{
+ struct thermal_state *state = &per_cpu(thermal_state, cpu);
struct device *dev = get_cpu_device(cpu);
+ state->package_throttle.level = PACKAGE_LEVEL;
+ state->core_throttle.level = CORE_LEVEL;
+
+ INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
+ INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
+
return thermal_throttle_add_dev(dev, cpu);
}
static int thermal_throttle_offline(unsigned int cpu)
{
+ struct thermal_state *state = &per_cpu(thermal_state, cpu);
struct device *dev = get_cpu_device(cpu);
+ cancel_delayed_work(&state->package_throttle.therm_work);
+ cancel_delayed_work(&state->core_throttle.therm_work);
+
+ state->package_throttle.rate_control_active = false;
+ state->core_throttle.rate_control_active = false;
+
thermal_throttle_remove_dev(dev);
return 0;
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a0e52bd00ecc..3f6b137ef4e6 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -567,7 +567,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
void reload_ucode_amd(void)
{
struct microcode_amd *mc;
- u32 rev, dummy;
+ u32 rev, dummy __always_unused;
mc = (struct microcode_amd *)amd_ucode_patch;
@@ -673,7 +673,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
struct ucode_cpu_info *uci;
struct ucode_patch *p;
enum ucode_state ret;
- u32 rev, dummy;
+ u32 rev, dummy __always_unused;
BUG_ON(raw_smp_processor_id() != cpu);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index cb0fdcaf1415..7019d4b2df0c 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -63,11 +63,6 @@ LIST_HEAD(microcode_cache);
*/
static DEFINE_MUTEX(microcode_mutex);
-/*
- * Serialize late loading so that CPUs get updated one-by-one.
- */
-static DEFINE_RAW_SPINLOCK(update_lock);
-
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx {
@@ -566,11 +561,18 @@ static int __reload_late(void *info)
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
return -1;
- raw_spin_lock(&update_lock);
- apply_microcode_local(&err);
- raw_spin_unlock(&update_lock);
+ /*
+ * On an SMT system, it suffices to load the microcode on one sibling of
+ * the core because the microcode engine is shared between the threads.
+ * Synchronization still needs to take place so that no concurrent
+ * loading attempts happen on multiple threads of an SMT core. See
+ * below.
+ */
+ if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
+ apply_microcode_local(&err);
+ else
+ goto wait_for_siblings;
- /* siblings return UCODE_OK because their engine got updated already */
if (err > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
ret = -1;
@@ -578,14 +580,18 @@ static int __reload_late(void *info)
ret = 1;
}
+wait_for_siblings:
+ if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
+ panic("Timeout during microcode update!\n");
+
/*
- * Increase the wait timeout to a safe value here since we're
- * serializing the microcode update and that could take a while on a
- * large number of CPUs. And that is fine as the *actual* timeout will
- * be determined by the last CPU finished updating and thus cut short.
+ * At least one thread has completed update on each core.
+ * For others, simply call the update to make sure the
+ * per-cpu cpuinfo can be updated with right microcode
+ * revision.
*/
- if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
- panic("Timeout during microcode update!\n");
+ if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
+ apply_microcode_local(&err);
return ret;
}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index ce799cfe9434..6a99535d7f37 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -791,6 +791,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
struct microcode_intel *mc;
enum ucode_state ret;
static int prev_rev;
@@ -836,7 +837,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
return UCODE_ERROR;
}
- if (rev != prev_rev) {
+ if (bsp && rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
rev,
mc->hdr.date & 0xffff,
@@ -852,7 +853,7 @@ out:
c->microcode = rev;
/* Update boot_cpu_data's revision too, if we're on the BSP: */
- if (c->cpu_index == boot_cpu_data.cpu_index)
+ if (bsp)
boot_cpu_data.microcode = rev;
return ret;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index c656d92cd708..caa032ce3fe3 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -290,7 +290,12 @@ static void __init ms_hyperv_init_platform(void)
machine_ops.shutdown = hv_machine_shutdown;
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
- mark_tsc_unstable("running on Hyper-V");
+ if (ms_hyperv.features & HV_X64_ACCESS_TSC_INVARIANT) {
+ wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1);
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+ } else {
+ mark_tsc_unstable("running on Hyper-V");
+ }
/*
* Generation 2 instances don't support reading the NMI status from
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 5c900f9527ff..c4be62058dd9 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -29,7 +29,8 @@ __setup("nordrand", x86_rdrand_setup);
#ifdef CONFIG_ARCH_RANDOM
void x86_init_rdrand(struct cpuinfo_x86 *c)
{
- unsigned long tmp;
+ unsigned int changed = 0;
+ unsigned long tmp, prev;
int i;
if (!cpu_has(c, X86_FEATURE_RDRAND))
@@ -42,5 +43,24 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
return;
}
}
+
+ /*
+ * Stupid sanity-check whether RDRAND does *actually* generate
+ * some at least random-looking data.
+ */
+ prev = tmp;
+ for (i = 0; i < SANITY_CHECK_LOOPS; i++) {
+ if (rdrand_long(&tmp)) {
+ if (prev != tmp)
+ changed++;
+
+ prev = tmp;
+ }
+ }
+
+ if (WARN_ON_ONCE(!changed))
+ pr_emerg(
+"RDRAND gives funky smelling output, might consider not using it by booting with \"nordrand\"");
+
}
#endif
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index a46dee8e78db..2e3b06d6bbc6 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
}
rdtgrp = rdtgroup_kn_lock_live(of->kn);
- rdt_last_cmd_clear();
if (!rdtgrp) {
ret = -ENOENT;
- rdt_last_cmd_puts("Directory was removed\n");
goto unlock;
}
@@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
int ret;
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
- rdt_last_cmd_clear();
if (!prdtgrp) {
ret = -ENODEV;
- rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock;
}
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
new file mode 100644
index 000000000000..3e20d322bc98
--- /dev/null
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Transactional Synchronization Extensions (TSX) control.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ * Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+
+#include <asm/cmdline.h>
+
+#include "cpu.h"
+
+enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
+
+void tsx_disable(void)
+{
+ u64 tsx;
+
+ rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+ /* Force all transactions to immediately abort */
+ tsx |= TSX_CTRL_RTM_DISABLE;
+
+ /*
+ * Ensure TSX support is not enumerated in CPUID.
+ * This is visible to userspace and will ensure they
+ * do not waste resources trying TSX transactions that
+ * will always abort.
+ */
+ tsx |= TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+void tsx_enable(void)
+{
+ u64 tsx;
+
+ rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+ /* Enable the RTM feature in the cpu */
+ tsx &= ~TSX_CTRL_RTM_DISABLE;
+
+ /*
+ * Ensure TSX support is enumerated in CPUID.
+ * This is visible to userspace and will ensure they
+ * can enumerate and use the TSX feature.
+ */
+ tsx &= ~TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+static bool __init tsx_ctrl_is_supported(void)
+{
+ u64 ia32_cap = x86_read_arch_cap_msr();
+
+ /*
+ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+ *
+ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+ * tsx= cmdline requests will do nothing on CPUs without
+ * MSR_IA32_TSX_CTRL support.
+ */
+ return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
+}
+
+static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_TAA))
+ return TSX_CTRL_DISABLE;
+
+ return TSX_CTRL_ENABLE;
+}
+
+void __init tsx_init(void)
+{
+ char arg[5] = {};
+ int ret;
+
+ if (!tsx_ctrl_is_supported())
+ return;
+
+ ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
+ if (ret >= 0) {
+ if (!strcmp(arg, "on")) {
+ tsx_ctrl_state = TSX_CTRL_ENABLE;
+ } else if (!strcmp(arg, "off")) {
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ } else if (!strcmp(arg, "auto")) {
+ tsx_ctrl_state = x86_get_tsx_auto_mode();
+ } else {
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ pr_err("tsx: invalid option, defaulting to off\n");
+ }
+ } else {
+ /* tsx= not provided */
+ if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO))
+ tsx_ctrl_state = x86_get_tsx_auto_mode();
+ else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF))
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ else
+ tsx_ctrl_state = TSX_CTRL_ENABLE;
+ }
+
+ if (tsx_ctrl_state == TSX_CTRL_DISABLE) {
+ tsx_disable();
+
+ /*
+ * tsx_disable() will change the state of the
+ * RTM CPUID bit. Clear it here since it is now
+ * expected to be not set.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
+
+ /*
+ * HW defaults TSX to be enabled at bootup.
+ * We may still need the TSX enable support
+ * during init for special cases like
+ * kexec after TSX is disabled.
+ */
+ tsx_enable();
+
+ /*
+ * tsx_enable() will change the state of the
+ * RTM CPUID bit. Force it here since it is now
+ * expected to be set.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RTM);
+ }
+}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index eb651fbde92a..00fc55ac7ffa 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -24,6 +24,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/memblock.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
@@ -39,6 +40,7 @@
#include <asm/virtext.h>
#include <asm/intel_pt.h>
#include <asm/crash.h>
+#include <asm/cmdline.h>
/* Used while preparing memory map entries for second kernel */
struct crash_memmap_data {
@@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
rcu_read_unlock();
}
+/*
+ * When the crashkernel option is specified, only use the low
+ * 1M for the real mode trampoline.
+ */
+void __init crash_reserve_low_1M(void)
+{
+ if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
+ return;
+
+ memblock_reserve(0, 1<<20);
+ pr_info("Reserving the low 1M of memory for crashkernel\n");
+}
+
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
@@ -173,8 +188,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_KEXEC_FILE
-static unsigned long crash_zero_bytes;
-
static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
{
unsigned int *nr_ranges = arg;
@@ -189,8 +202,7 @@ static struct crash_mem *fill_up_crash_elf_data(void)
unsigned int nr_ranges = 0;
struct crash_mem *cmem;
- walk_system_ram_res(0, -1, &nr_ranges,
- get_nr_ram_ranges_callback);
+ walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
if (!nr_ranges)
return NULL;
@@ -217,15 +229,19 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
{
int ret = 0;
+ /* Exclude the low 1M because it is always reserved */
+ ret = crash_exclude_mem_range(cmem, 0, 1<<20);
+ if (ret)
+ return ret;
+
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (ret)
return ret;
- if (crashk_low_res.end) {
+ if (crashk_low_res.end)
ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
- crashk_low_res.end);
- }
+ crashk_low_res.end);
return ret;
}
@@ -246,16 +262,13 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
unsigned long *sz)
{
struct crash_mem *cmem;
- Elf64_Ehdr *ehdr;
- Elf64_Phdr *phdr;
- int ret, i;
+ int ret;
cmem = fill_up_crash_elf_data();
if (!cmem)
return -ENOMEM;
- ret = walk_system_ram_res(0, -1, cmem,
- prepare_elf64_ram_headers_callback);
+ ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
if (ret)
goto out;
@@ -265,24 +278,8 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
goto out;
/* By default prepare 64bit headers */
- ret = crash_prepare_elf64_headers(cmem,
- IS_ENABLED(CONFIG_X86_64), addr, sz);
- if (ret)
- goto out;
+ ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
- /*
- * If a range matches backup region, adjust offset to backup
- * segment.
- */
- ehdr = (Elf64_Ehdr *)*addr;
- phdr = (Elf64_Phdr *)(ehdr + 1);
- for (i = 0; i < ehdr->e_phnum; phdr++, i++)
- if (phdr->p_type == PT_LOAD &&
- phdr->p_paddr == image->arch.backup_src_start &&
- phdr->p_memsz == image->arch.backup_src_sz) {
- phdr->p_offset = image->arch.backup_load_addr;
- break;
- }
out:
vfree(cmem);
return ret;
@@ -296,8 +293,7 @@ static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
return 1;
- memcpy(&params->e820_table[nr_e820_entries], entry,
- sizeof(struct e820_entry));
+ memcpy(&params->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry));
params->e820_entries++;
return 0;
}
@@ -321,19 +317,11 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
unsigned long long mend)
{
unsigned long start, end;
- int ret = 0;
cmem->ranges[0].start = mstart;
cmem->ranges[0].end = mend;
cmem->nr_ranges = 1;
- /* Exclude Backup region */
- start = image->arch.backup_load_addr;
- end = start + image->arch.backup_src_sz - 1;
- ret = crash_exclude_mem_range(cmem, start, end);
- if (ret)
- return ret;
-
/* Exclude elf header region */
start = image->arch.elf_load_addr;
end = start + image->arch.elf_headers_sz - 1;
@@ -356,28 +344,28 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
memset(&cmd, 0, sizeof(struct crash_memmap_data));
cmd.params = params;
- /* Add first 640K segment */
- ei.addr = image->arch.backup_src_start;
- ei.size = image->arch.backup_src_sz;
- ei.type = E820_TYPE_RAM;
- add_e820_entry(params, &ei);
+ /* Add the low 1M */
+ cmd.type = E820_TYPE_RAM;
+ flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd,
+ memmap_entry_callback);
/* Add ACPI tables */
cmd.type = E820_TYPE_ACPI;
flags = IORESOURCE_MEM | IORESOURCE_BUSY;
walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
- memmap_entry_callback);
+ memmap_entry_callback);
/* Add ACPI Non-volatile Storage */
cmd.type = E820_TYPE_NVS;
walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
- memmap_entry_callback);
+ memmap_entry_callback);
/* Add e820 reserved ranges */
cmd.type = E820_TYPE_RESERVED;
flags = IORESOURCE_MEM;
walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
- memmap_entry_callback);
+ memmap_entry_callback);
/* Add crashk_low_res region */
if (crashk_low_res.end) {
@@ -388,8 +376,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
}
/* Exclude some ranges from crashk_res and add rest to memmap */
- ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
- crashk_res.end);
+ ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end);
if (ret)
goto out;
@@ -409,55 +396,12 @@ out:
return ret;
}
-static int determine_backup_region(struct resource *res, void *arg)
-{
- struct kimage *image = arg;
-
- image->arch.backup_src_start = res->start;
- image->arch.backup_src_sz = resource_size(res);
-
- /* Expecting only one range for backup region */
- return 1;
-}
-
int crash_load_segments(struct kimage *image)
{
int ret;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ULONG_MAX, .top_down = false };
- /*
- * Determine and load a segment for backup area. First 640K RAM
- * region is backup source
- */
-
- ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
- image, determine_backup_region);
-
- /* Zero or postive return values are ok */
- if (ret < 0)
- return ret;
-
- /* Add backup segment. */
- if (image->arch.backup_src_sz) {
- kbuf.buffer = &crash_zero_bytes;
- kbuf.bufsz = sizeof(crash_zero_bytes);
- kbuf.memsz = image->arch.backup_src_sz;
- kbuf.buf_align = PAGE_SIZE;
- /*
- * Ideally there is no source for backup segment. This is
- * copied in purgatory after crash. Just add a zero filled
- * segment for now to make sure checksum logic works fine.
- */
- ret = kexec_add_buffer(&kbuf);
- if (ret)
- return ret;
- image->arch.backup_load_addr = kbuf.mem;
- pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
- image->arch.backup_load_addr,
- image->arch.backup_src_start, kbuf.memsz);
- }
-
/* Prepare elf headers and add a segment */
ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
if (ret)
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
index 0b8cedb20d6d..0d6c657593f8 100644
--- a/arch/x86/kernel/doublefault.c
+++ b/arch/x86/kernel/doublefault.c
@@ -54,7 +54,7 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = {
.sp0 = STACK_START,
.ss0 = __KERNEL_DS,
.ldt = 0,
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+ .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
.ip = (unsigned long) doublefault_fn,
/* 0x2 bit is always set */
@@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = {
.ss = __KERNEL_DS,
.ds = __USER_DS,
.fs = __KERNEL_PERCPU,
+#ifndef CONFIG_X86_32_LAZY_GS
+ .gs = __KERNEL_STACK_CANARY,
+#endif
.__cr3 = __pa_nodebug(swapper_pg_dir),
};
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 7da2bcd2b8eb..c5399e80c59c 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -190,6 +190,7 @@ static void __init e820_print_type(enum e820_type type)
case E820_TYPE_RAM: /* Fall through: */
case E820_TYPE_RESERVED_KERN: pr_cont("usable"); break;
case E820_TYPE_RESERVED: pr_cont("reserved"); break;
+ case E820_TYPE_SOFT_RESERVED: pr_cont("soft reserved"); break;
case E820_TYPE_ACPI: pr_cont("ACPI data"); break;
case E820_TYPE_NVS: pr_cont("ACPI NVS"); break;
case E820_TYPE_UNUSABLE: pr_cont("unusable"); break;
@@ -999,6 +1000,17 @@ void __init e820__reserve_setup_data(void)
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ if (data->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+ e820__range_update(((struct setup_indirect *)data->data)->addr,
+ ((struct setup_indirect *)data->data)->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+ e820__range_update_kexec(((struct setup_indirect *)data->data)->addr,
+ ((struct setup_indirect *)data->data)->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+ }
+
pa_data = data->next;
early_memunmap(data, sizeof(*data));
}
@@ -1037,6 +1049,7 @@ static const char *__init e820_type_to_string(struct e820_entry *entry)
case E820_TYPE_PRAM: return "Persistent Memory (legacy)";
case E820_TYPE_PMEM: return "Persistent Memory";
case E820_TYPE_RESERVED: return "Reserved";
+ case E820_TYPE_SOFT_RESERVED: return "Soft Reserved";
default: return "Unknown E820 type";
}
}
@@ -1052,6 +1065,7 @@ static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry)
case E820_TYPE_PRAM: /* Fall-through: */
case E820_TYPE_PMEM: /* Fall-through: */
case E820_TYPE_RESERVED: /* Fall-through: */
+ case E820_TYPE_SOFT_RESERVED: /* Fall-through: */
default: return IORESOURCE_MEM;
}
}
@@ -1064,6 +1078,7 @@ static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry)
case E820_TYPE_PMEM: return IORES_DESC_PERSISTENT_MEMORY;
case E820_TYPE_PRAM: return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
case E820_TYPE_RESERVED: return IORES_DESC_RESERVED;
+ case E820_TYPE_SOFT_RESERVED: return IORES_DESC_SOFT_RESERVED;
case E820_TYPE_RESERVED_KERN: /* Fall-through: */
case E820_TYPE_RAM: /* Fall-through: */
case E820_TYPE_UNUSABLE: /* Fall-through: */
@@ -1078,11 +1093,12 @@ static bool __init do_mark_busy(enum e820_type type, struct resource *res)
return true;
/*
- * Treat persistent memory like device memory, i.e. reserve it
- * for exclusive use of a driver
+ * Treat persistent memory and other special memory ranges like
+ * device memory, i.e. reserve it for exclusive use of a driver
*/
switch (type) {
case E820_TYPE_RESERVED:
+ case E820_TYPE_SOFT_RESERVED:
case E820_TYPE_PRAM:
case E820_TYPE_PMEM:
return false;
@@ -1285,6 +1301,9 @@ void __init e820__memblock_setup(void)
if (end != (resource_size_t)end)
continue;
+ if (entry->type == E820_TYPE_SOFT_RESERVED)
+ memblock_reserve(entry->addr, entry->size);
+
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
continue;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6f6b1d04dadf..4cba91ec8049 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_INTEL, 0x3ec4,
+ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index e5cb67d67c03..319be936c348 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -60,7 +60,7 @@ u64 xfeatures_mask __read_mostly;
static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
+static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
/*
* The XSAVE area of kernel can be in standard or compacted format;
@@ -254,10 +254,13 @@ static void __init setup_xstate_features(void)
* in the fixed offsets in the xsave area in either compacted form
* or standard form.
*/
- xstate_offsets[0] = 0;
- xstate_sizes[0] = offsetof(struct fxregs_state, xmm_space);
- xstate_offsets[1] = xstate_sizes[0];
- xstate_sizes[1] = FIELD_SIZEOF(struct fxregs_state, xmm_space);
+ xstate_offsets[XFEATURE_FP] = 0;
+ xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
+ xmm_space);
+
+ xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
+ xstate_sizes[XFEATURE_SSE] = FIELD_SIZEOF(struct fxregs_state,
+ xmm_space);
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
if (!xfeature_enabled(i))
@@ -342,7 +345,7 @@ static int xfeature_is_aligned(int xfeature_nr)
*/
static void __init setup_xstate_comp(void)
{
- unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
+ unsigned int xstate_comp_sizes[XFEATURE_MAX];
int i;
/*
@@ -350,8 +353,9 @@ static void __init setup_xstate_comp(void)
* in the fixed offsets in the xsave area in either compacted form
* or standard form.
*/
- xstate_comp_offsets[0] = 0;
- xstate_comp_offsets[1] = offsetof(struct fxregs_state, xmm_space);
+ xstate_comp_offsets[XFEATURE_FP] = 0;
+ xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
+ xmm_space);
if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
@@ -840,7 +844,7 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
/*
* We should not ever be requesting features that we
- * have not enabled. Remember that pcntxt_mask is
+ * have not enabled. Remember that xfeatures_mask is
* what we write to the XCR0 register.
*/
WARN_ONCE(!(xfeatures_mask & BIT_ULL(xfeature_nr)),
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 073aab525d80..e8a9f8370112 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -12,20 +12,18 @@
#include <asm/frame.h>
#include <asm/asm-offsets.h>
-# define function_hook __fentry__
-EXPORT_SYMBOL(__fentry__)
-
#ifdef CONFIG_FRAME_POINTER
# define MCOUNT_FRAME 1 /* using frame = true */
#else
# define MCOUNT_FRAME 0 /* using frame = false */
#endif
-ENTRY(function_hook)
+SYM_FUNC_START(__fentry__)
ret
-END(function_hook)
+SYM_FUNC_END(__fentry__)
+EXPORT_SYMBOL(__fentry__)
-ENTRY(ftrace_caller)
+SYM_CODE_START(ftrace_caller)
#ifdef CONFIG_FRAME_POINTER
/*
@@ -85,11 +83,11 @@ ftrace_graph_call:
#endif
/* This is weak to keep gas from relaxing the jumps */
-WEAK(ftrace_stub)
+SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
ret
-END(ftrace_caller)
+SYM_CODE_END(ftrace_caller)
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
/*
* We're here from an mcount/fentry CALL, and the stack frame looks like:
*
@@ -138,7 +136,7 @@ ENTRY(ftrace_regs_caller)
movl function_trace_op, %ecx # 3rd argument: ftrace_pos
pushl %esp # 4th argument: pt_regs
-GLOBAL(ftrace_regs_call)
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub
addl $4, %esp # skip 4th argument
@@ -163,9 +161,10 @@ GLOBAL(ftrace_regs_call)
popl %eax
jmp .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
+SYM_CODE_START(ftrace_graph_caller)
pushl %eax
pushl %ecx
pushl %edx
@@ -179,7 +178,7 @@ ENTRY(ftrace_graph_caller)
popl %ecx
popl %eax
ret
-END(ftrace_graph_caller)
+SYM_CODE_END(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 809d54397dba..6e8961ca3605 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -14,9 +14,6 @@
.code64
.section .entry.text, "ax"
-# define function_hook __fentry__
-EXPORT_SYMBOL(__fentry__)
-
#ifdef CONFIG_FRAME_POINTER
/* Save parent and function stack frames (rip and rbp) */
# define MCOUNT_FRAME_SIZE (8+16*2)
@@ -132,22 +129,23 @@ EXPORT_SYMBOL(__fentry__)
#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(function_hook)
+SYM_FUNC_START(__fentry__)
retq
-ENDPROC(function_hook)
+SYM_FUNC_END(__fentry__)
+EXPORT_SYMBOL(__fentry__)
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
save_mcount_regs
-GLOBAL(ftrace_caller_op_ptr)
+SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx
/* regs go into 4th parameter (but make it NULL) */
movq $0, %rcx
-GLOBAL(ftrace_call)
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
restore_mcount_regs
@@ -157,10 +155,10 @@ GLOBAL(ftrace_call)
* think twice before adding any new code or changing the
* layout here.
*/
-GLOBAL(ftrace_epilogue)
+SYM_INNER_LABEL(ftrace_epilogue, SYM_L_GLOBAL)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call)
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
jmp ftrace_stub
#endif
@@ -168,11 +166,11 @@ GLOBAL(ftrace_graph_call)
* This is weak to keep gas from relaxing the jumps.
* It is also used to copy the retq for trampolines.
*/
-WEAK(ftrace_stub)
+SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
retq
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
-ENTRY(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */
pushfq
@@ -180,7 +178,7 @@ ENTRY(ftrace_regs_caller)
save_mcount_regs 8
/* save_mcount_regs fills in first two parameters */
-GLOBAL(ftrace_regs_caller_op_ptr)
+SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL)
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx
@@ -209,7 +207,7 @@ GLOBAL(ftrace_regs_caller_op_ptr)
/* regs go into 4th parameter */
leaq (%rsp), %rcx
-GLOBAL(ftrace_regs_call)
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub
/* Copy flags back to SS, to restore them */
@@ -239,16 +237,16 @@ GLOBAL(ftrace_regs_call)
* The trampoline will add the code to jump
* to the return.
*/
-GLOBAL(ftrace_regs_caller_end)
+SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
jmp ftrace_epilogue
-ENDPROC(ftrace_regs_caller)
+SYM_FUNC_END(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
-ENTRY(function_hook)
+SYM_FUNC_START(__fentry__)
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
@@ -261,7 +259,7 @@ fgraph_trace:
jnz ftrace_graph_caller
#endif
-GLOBAL(ftrace_stub)
+SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
retq
trace:
@@ -279,11 +277,12 @@ trace:
restore_mcount_regs
jmp fgraph_trace
-ENDPROC(function_hook)
+SYM_FUNC_END(__fentry__)
+EXPORT_SYMBOL(__fentry__)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
+SYM_FUNC_START(ftrace_graph_caller)
/* Saves rbp into %rdx and fills first parameter */
save_mcount_regs
@@ -294,9 +293,9 @@ ENTRY(ftrace_graph_caller)
restore_mcount_regs
retq
-ENDPROC(ftrace_graph_caller)
+SYM_FUNC_END(ftrace_graph_caller)
-ENTRY(return_to_handler)
+SYM_CODE_START(return_to_handler)
UNWIND_HINT_EMPTY
subq $24, %rsp
@@ -312,5 +311,5 @@ ENTRY(return_to_handler)
movq (%rsp), %rax
addq $24, %rsp
JMP_NOSPEC %rdi
-END(return_to_handler)
+SYM_CODE_END(return_to_handler)
#endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 30f9cb2c0b55..3923ab4630d7 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
* can.
*/
__HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -156,7 +156,7 @@ ENTRY(startup_32)
jmp *%eax
.Lbad_subarch:
-WEAK(xen_entry)
+SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK)
/* Unknown implementation; there's really
nothing we can do at this point. */
ud2a
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
#else
jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -179,12 +180,12 @@ num_subarch_entries = (. - subarch_entries) / 4
* up already except stack. We just set up stack here. Then call
* start_secondary().
*/
-ENTRY(start_cpu0)
+SYM_FUNC_START(start_cpu0)
movl initial_stack, %ecx
movl %ecx, %esp
call *(initial_code)
1: jmp 1b
-ENDPROC(start_cpu0)
+SYM_FUNC_END(start_cpu0)
#endif
/*
@@ -195,7 +196,7 @@ ENDPROC(start_cpu0)
* If cpu hotplug is not supported then this code can go in init section
* which will be freed later
*/
-ENTRY(startup_32_smp)
+SYM_FUNC_START(startup_32_smp)
cld
movl $(__BOOT_DS),%eax
movl %eax,%ds
@@ -362,7 +363,7 @@ ENTRY(startup_32_smp)
call *(initial_code)
1: jmp 1b
-ENDPROC(startup_32_smp)
+SYM_FUNC_END(startup_32_smp)
#include "verify_cpu.S"
@@ -392,7 +393,7 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */
ret
-ENTRY(early_idt_handler_array)
+SYM_FUNC_START(early_idt_handler_array)
# 36(%esp) %eflags
# 32(%esp) %cs
# 28(%esp) %eip
@@ -407,9 +408,9 @@ ENTRY(early_idt_handler_array)
i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
-ENDPROC(early_idt_handler_array)
+SYM_FUNC_END(early_idt_handler_array)
-early_idt_handler_common:
+SYM_CODE_START_LOCAL(early_idt_handler_common)
/*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
@@ -460,10 +461,10 @@ early_idt_handler_common:
decl %ss:early_recursion_flag
addl $4, %esp /* pop pt_regs->orig_ax */
iret
-ENDPROC(early_idt_handler_common)
+SYM_CODE_END(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */
-ENTRY(early_ignore_irq)
+SYM_FUNC_START(early_ignore_irq)
cld
#ifdef CONFIG_PRINTK
pushl %eax
@@ -498,19 +499,16 @@ ENTRY(early_ignore_irq)
hlt_loop:
hlt
jmp hlt_loop
-ENDPROC(early_ignore_irq)
+SYM_FUNC_END(early_ignore_irq)
__INITDATA
.align 4
-GLOBAL(early_recursion_flag)
- .long 0
+SYM_DATA(early_recursion_flag, .long 0)
__REFDATA
.align 4
-ENTRY(initial_code)
- .long i386_start_kernel
-ENTRY(setup_once_ref)
- .long setup_once
+SYM_DATA(initial_code, .long i386_start_kernel)
+SYM_DATA(setup_once_ref, .long setup_once)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
#define PGD_ALIGN (2 * PAGE_SIZE)
@@ -553,7 +551,7 @@ EXPORT_SYMBOL(empty_zero_page)
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
.align PGD_ALIGN
-ENTRY(initial_page_table)
+SYM_DATA_START(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
@@ -571,17 +569,28 @@ ENTRY(initial_page_table)
# error "Kernel PMDs should be 1, 2 or 3"
# endif
.align PAGE_SIZE /* needs to be page-sized too */
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ /*
+ * PTI needs another page so sync_initial_pagetable() works correctly
+ * and does not scribble over the data which is placed behind the
+ * actual initial_page_table. See clone_pgd_range().
+ */
+ .fill 1024, 4, 0
+#endif
+
+SYM_DATA_END(initial_page_table)
#endif
.data
.balign 4
-ENTRY(initial_stack)
- /*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
- * unwinder reliably detect the end of the stack.
- */
- .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \
- TOP_OF_KERNEL_STACK_PADDING;
+/*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
+ * reliably detect the end of the stack.
+ */
+SYM_DATA(initial_stack,
+ .long init_thread_union + THREAD_SIZE -
+ SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING)
__INITRODATA
int_msg:
@@ -597,27 +606,28 @@ int_msg:
*/
.data
-.globl boot_gdt_descr
-
ALIGN
# early boot GDT descriptor (must use 1:1 address mapping)
.word 0 # 32 bit align gdt_desc.address
-boot_gdt_descr:
+SYM_DATA_START_LOCAL(boot_gdt_descr)
.word __BOOT_DS+7
.long boot_gdt - __PAGE_OFFSET
+SYM_DATA_END(boot_gdt_descr)
# boot GDT descriptor (later on used by CPU#0):
.word 0 # 32 bit align gdt_desc.address
-ENTRY(early_gdt_descr)
+SYM_DATA_START(early_gdt_descr)
.word GDT_ENTRIES*8-1
.long gdt_page /* Overwritten for secondary CPUs */
+SYM_DATA_END(early_gdt_descr)
/*
* The boot_gdt must mirror the equivalent in setup.S and is
* used only for booting.
*/
.align L1_CACHE_BYTES
-ENTRY(boot_gdt)
+SYM_DATA_START(boot_gdt)
.fill GDT_ENTRY_BOOT_CS,8,0
.quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
.quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
+SYM_DATA_END(boot_gdt)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index f3d3e9646a99..4bbc770af632 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -49,8 +49,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
.text
__HEAD
.code64
- .globl startup_64
-startup_64:
+SYM_CODE_START_NOALIGN(startup_64)
UNWIND_HINT_EMPTY
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
@@ -90,7 +89,9 @@ startup_64:
/* Form the CR3 value being sure to include the CR3 modifier */
addq $(early_top_pgt - __START_KERNEL_map), %rax
jmp 1f
-ENTRY(secondary_startup_64)
+SYM_CODE_END(startup_64)
+
+SYM_CODE_START(secondary_startup_64)
UNWIND_HINT_EMPTY
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
@@ -240,7 +241,7 @@ ENTRY(secondary_startup_64)
pushq %rax # target address in negative space
lretq
.Lafter_lret:
-END(secondary_startup_64)
+SYM_CODE_END(secondary_startup_64)
#include "verify_cpu.S"
@@ -250,30 +251,28 @@ END(secondary_startup_64)
* up already except stack. We just set up stack here. Then call
* start_secondary() via .Ljump_to_C_code.
*/
-ENTRY(start_cpu0)
+SYM_CODE_START(start_cpu0)
UNWIND_HINT_EMPTY
movq initial_stack(%rip), %rsp
jmp .Ljump_to_C_code
-END(start_cpu0)
+SYM_CODE_END(start_cpu0)
#endif
/* Both SMP bootup and ACPI suspend change these variables */
__REFDATA
.balign 8
- GLOBAL(initial_code)
- .quad x86_64_start_kernel
- GLOBAL(initial_gs)
- .quad INIT_PER_CPU_VAR(fixed_percpu_data)
- GLOBAL(initial_stack)
- /*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
- * unwinder reliably detect the end of the stack.
- */
- .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
+SYM_DATA(initial_code, .quad x86_64_start_kernel)
+SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
+
+/*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
+ * reliably detect the end of the stack.
+ */
+SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
__FINITDATA
__INIT
-ENTRY(early_idt_handler_array)
+SYM_CODE_START(early_idt_handler_array)
i = 0
.rept NUM_EXCEPTION_VECTORS
.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
@@ -289,9 +288,9 @@ ENTRY(early_idt_handler_array)
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
UNWIND_HINT_IRET_REGS offset=16
-END(early_idt_handler_array)
+SYM_CODE_END(early_idt_handler_array)
-early_idt_handler_common:
+SYM_CODE_START_LOCAL(early_idt_handler_common)
/*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
@@ -333,17 +332,11 @@ early_idt_handler_common:
20:
decl early_recursion_flag(%rip)
jmp restore_regs_and_return_to_kernel
-END(early_idt_handler_common)
+SYM_CODE_END(early_idt_handler_common)
- __INITDATA
- .balign 4
-GLOBAL(early_recursion_flag)
- .long 0
-
-#define NEXT_PAGE(name) \
- .balign PAGE_SIZE; \
-GLOBAL(name)
+#define SYM_DATA_START_PAGE_ALIGNED(name) \
+ SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
@@ -358,11 +351,11 @@ GLOBAL(name)
*/
#define PTI_USER_PGD_FILL 512
/* This ensures they are 8k-aligned: */
-#define NEXT_PGD_PAGE(name) \
- .balign 2 * PAGE_SIZE; \
-GLOBAL(name)
+#define SYM_DATA_START_PTI_ALIGNED(name) \
+ SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
#else
-#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define SYM_DATA_START_PTI_ALIGNED(name) \
+ SYM_DATA_START_PAGE_ALIGNED(name)
#define PTI_USER_PGD_FILL 0
#endif
@@ -375,17 +368,23 @@ GLOBAL(name)
.endr
__INITDATA
-NEXT_PGD_PAGE(early_top_pgt)
+ .balign 4
+
+SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
.fill 512,8,0
.fill PTI_USER_PGD_FILL,8,0
+SYM_DATA_END(early_top_pgt)
-NEXT_PAGE(early_dynamic_pgts)
+SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
+SYM_DATA_END(early_dynamic_pgts)
+
+SYM_DATA(early_recursion_flag, .long 0)
.data
#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
-NEXT_PGD_PAGE(init_top_pgt)
+SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + L4_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
@@ -393,11 +392,13 @@ NEXT_PGD_PAGE(init_top_pgt)
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
.fill PTI_USER_PGD_FILL,8,0
+SYM_DATA_END(init_top_pgt)
-NEXT_PAGE(level3_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.fill 511, 8, 0
-NEXT_PAGE(level2_ident_pgt)
+SYM_DATA_END(level3_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
/*
* Since I easily can, map the first 1G.
* Don't set NX because code runs from these pages.
@@ -407,25 +408,29 @@ NEXT_PAGE(level2_ident_pgt)
* the CPU should ignore the bit.
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+SYM_DATA_END(level2_ident_pgt)
#else
-NEXT_PGD_PAGE(init_top_pgt)
+SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
.fill 512,8,0
.fill PTI_USER_PGD_FILL,8,0
+SYM_DATA_END(init_top_pgt)
#endif
#ifdef CONFIG_X86_5LEVEL
-NEXT_PAGE(level4_kernel_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
.fill 511,8,0
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+SYM_DATA_END(level4_kernel_pgt)
#endif
-NEXT_PAGE(level3_kernel_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+SYM_DATA_END(level3_kernel_pgt)
-NEXT_PAGE(level2_kernel_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
/*
* 512 MB kernel mapping. We spend a full page on this pagetable
* anyway.
@@ -442,8 +447,9 @@ NEXT_PAGE(level2_kernel_pgt)
*/
PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
KERNEL_IMAGE_SIZE/PMD_SIZE)
+SYM_DATA_END(level2_kernel_pgt)
-NEXT_PAGE(level2_fixmap_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
.fill (512 - 4 - FIXMAP_PMD_NUM),8,0
pgtno = 0
.rept (FIXMAP_PMD_NUM)
@@ -453,31 +459,32 @@ NEXT_PAGE(level2_fixmap_pgt)
.endr
/* 6 MB reserved space + a 2MB hole */
.fill 4,8,0
+SYM_DATA_END(level2_fixmap_pgt)
-NEXT_PAGE(level1_fixmap_pgt)
+SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
.rept (FIXMAP_PMD_NUM)
.fill 512,8,0
.endr
+SYM_DATA_END(level1_fixmap_pgt)
#undef PMDS
.data
.align 16
- .globl early_gdt_descr
-early_gdt_descr:
- .word GDT_ENTRIES*8-1
-early_gdt_descr_base:
- .quad INIT_PER_CPU_VAR(gdt_page)
-
-ENTRY(phys_base)
- /* This must match the first entry in level2_kernel_pgt */
- .quad 0x0000000000000000
+
+SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1)
+SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page))
+
+ .align 16
+/* This must match the first entry in level2_kernel_pgt */
+SYM_DATA(phys_base, .quad 0x0)
EXPORT_SYMBOL(phys_base)
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
-NEXT_PAGE(empty_zero_page)
+SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
.skip PAGE_SIZE
+SYM_DATA_END(empty_zero_page)
EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 61a89d3c0382..8abeee0dd7bf 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -3,32 +3,69 @@
* This contains the io-permission bitmap code - written by obz, with changes
* by Linus. 32/64 bits code unification by Miguel Botón.
*/
-
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/kernel.h>
#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
#include <linux/security.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/thread_info.h>
#include <linux/syscalls.h>
#include <linux/bitmap.h>
-#include <asm/syscalls.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/io_bitmap.h>
#include <asm/desc.h>
+#ifdef CONFIG_X86_IOPL_IOPERM
+
+static atomic64_t io_bitmap_sequence;
+
+void io_bitmap_share(struct task_struct *tsk)
+{
+ /* Can be NULL when current->thread.iopl_emul == 3 */
+ if (current->thread.io_bitmap) {
+ /*
+ * Take a refcount on current's bitmap. It can be used by
+ * both tasks as long as none of them changes the bitmap.
+ */
+ refcount_inc(&current->thread.io_bitmap->refcnt);
+ tsk->thread.io_bitmap = current->thread.io_bitmap;
+ }
+ set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
+}
+
+static void task_update_io_bitmap(void)
+{
+ struct thread_struct *t = &current->thread;
+
+ if (t->iopl_emul == 3 || t->io_bitmap) {
+ /* TSS update is handled on exit to user space */
+ set_thread_flag(TIF_IO_BITMAP);
+ } else {
+ clear_thread_flag(TIF_IO_BITMAP);
+ /* Invalidate TSS */
+ preempt_disable();
+ tss_update_io_bitmap();
+ preempt_enable();
+ }
+}
+
+void io_bitmap_exit(void)
+{
+ struct io_bitmap *iobm = current->thread.io_bitmap;
+
+ current->thread.io_bitmap = NULL;
+ task_update_io_bitmap();
+ if (iobm && refcount_dec_and_test(&iobm->refcnt))
+ kfree(iobm);
+}
+
/*
- * this changes the io permissions bitmap in the current task.
+ * This changes the io permissions bitmap in the current task.
*/
long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
struct thread_struct *t = &current->thread;
- struct tss_struct *tss;
- unsigned int i, max_long, bytes, bytes_updated;
+ unsigned int i, max_long;
+ struct io_bitmap *iobm;
if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
return -EINVAL;
@@ -41,59 +78,72 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
* IO bitmap up. ioperm() is much less timing critical than clone(),
* this is why we delay this operation until now:
*/
- if (!t->io_bitmap_ptr) {
- unsigned long *bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-
- if (!bitmap)
+ iobm = t->io_bitmap;
+ if (!iobm) {
+ /* No point to allocate a bitmap just to clear permissions */
+ if (!turn_on)
+ return 0;
+ iobm = kmalloc(sizeof(*iobm), GFP_KERNEL);
+ if (!iobm)
return -ENOMEM;
- memset(bitmap, 0xff, IO_BITMAP_BYTES);
- t->io_bitmap_ptr = bitmap;
- set_thread_flag(TIF_IO_BITMAP);
+ memset(iobm->bitmap, 0xff, sizeof(iobm->bitmap));
+ refcount_set(&iobm->refcnt, 1);
+ }
- /*
- * Now that we have an IO bitmap, we need our TSS limit to be
- * correct. It's fine if we are preempted after doing this:
- * with TIF_IO_BITMAP set, context switches will keep our TSS
- * limit correct.
- */
- preempt_disable();
- refresh_tss_limit();
- preempt_enable();
+ /*
+ * If the bitmap is not shared, then nothing can take a refcount as
+ * current can obviously not fork at the same time. If it's shared
+ * duplicate it and drop the refcount on the original one.
+ */
+ if (refcount_read(&iobm->refcnt) > 1) {
+ iobm = kmemdup(iobm, sizeof(*iobm), GFP_KERNEL);
+ if (!iobm)
+ return -ENOMEM;
+ refcount_set(&iobm->refcnt, 1);
+ io_bitmap_exit();
}
/*
- * do it in the per-thread copy and in the TSS ...
- *
- * Disable preemption via get_cpu() - we must not switch away
- * because the ->io_bitmap_max value must match the bitmap
- * contents:
+ * Store the bitmap pointer (might be the same if the task already
+ * head one). Must be done here so freeing the bitmap when all
+ * permissions are dropped has the pointer set up.
*/
- tss = &per_cpu(cpu_tss_rw, get_cpu());
+ t->io_bitmap = iobm;
+ /* Mark it active for context switching and exit to user mode */
+ set_thread_flag(TIF_IO_BITMAP);
+ /*
+ * Update the tasks bitmap. The update of the TSS bitmap happens on
+ * exit to user mode. So this needs no protection.
+ */
if (turn_on)
- bitmap_clear(t->io_bitmap_ptr, from, num);
+ bitmap_clear(iobm->bitmap, from, num);
else
- bitmap_set(t->io_bitmap_ptr, from, num);
+ bitmap_set(iobm->bitmap, from, num);
/*
* Search for a (possibly new) maximum. This is simple and stupid,
* to keep it obviously correct:
*/
- max_long = 0;
- for (i = 0; i < IO_BITMAP_LONGS; i++)
- if (t->io_bitmap_ptr[i] != ~0UL)
+ max_long = UINT_MAX;
+ for (i = 0; i < IO_BITMAP_LONGS; i++) {
+ if (iobm->bitmap[i] != ~0UL)
max_long = i;
+ }
+ /* All permissions dropped? */
+ if (max_long == UINT_MAX) {
+ io_bitmap_exit();
+ return 0;
+ }
- bytes = (max_long + 1) * sizeof(unsigned long);
- bytes_updated = max(bytes, t->io_bitmap_max);
-
- t->io_bitmap_max = bytes;
-
- /* Update the TSS: */
- memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
+ iobm->max = (max_long + 1) * sizeof(unsigned long);
- put_cpu();
+ /*
+ * Update the sequence number to force a TSS update on return to
+ * user mode.
+ */
+ iobm->sequence = atomic64_add_return(1, &io_bitmap_sequence);
return 0;
}
@@ -104,38 +154,61 @@ SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
}
/*
- * sys_iopl has to be used when you want to access the IO ports
- * beyond the 0x3ff range: to get the full 65536 ports bitmapped
- * you'd need 8kB of bitmaps/process, which is a bit excessive.
+ * The sys_iopl functionality depends on the level argument, which if
+ * granted for the task is used to enable access to all 65536 I/O ports.
+ *
+ * This does not use the IOPL mechanism provided by the CPU as that would
+ * also allow the user space task to use the CLI/STI instructions.
*
- * Here we just change the flags value on the stack: we allow
- * only the super-user to do it. This depends on the stack-layout
- * on system-call entry - see also fork() and the signal handling
- * code.
+ * Disabling interrupts in a user space task is dangerous as it might lock
+ * up the machine and the semantics vs. syscalls and exceptions is
+ * undefined.
+ *
+ * Setting IOPL to level 0-2 is disabling I/O permissions. Level 3
+ * 3 enables them.
+ *
+ * IOPL is strictly per thread and inherited on fork.
*/
SYSCALL_DEFINE1(iopl, unsigned int, level)
{
- struct pt_regs *regs = current_pt_regs();
struct thread_struct *t = &current->thread;
-
- /*
- * Careful: the IOPL bits in regs->flags are undefined under Xen PV
- * and changing them has no effect.
- */
- unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
+ unsigned int old;
if (level > 3)
return -EINVAL;
+
+ old = t->iopl_emul;
+
+ /* No point in going further if nothing changes */
+ if (level == old)
+ return 0;
+
/* Trying to gain more privileges? */
if (level > old) {
if (!capable(CAP_SYS_RAWIO) ||
security_locked_down(LOCKDOWN_IOPORT))
return -EPERM;
}
- regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
- (level << X86_EFLAGS_IOPL_BIT);
- t->iopl = level << X86_EFLAGS_IOPL_BIT;
- set_iopl_mask(t->iopl);
+
+ t->iopl_emul = level;
+ task_update_io_bitmap();
return 0;
}
+
+#else /* CONFIG_X86_IOPL_IOPERM */
+
+long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+ return -ENOSYS;
+}
+SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
+{
+ return -ENOSYS;
+}
+
+SYSCALL_DEFINE1(iopl, unsigned int, level)
+{
+ return -ENOSYS;
+}
+#endif
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
index ddeeaac8adda..0db0375235b4 100644
--- a/arch/x86/kernel/irqflags.S
+++ b/arch/x86/kernel/irqflags.S
@@ -7,20 +7,20 @@
/*
* unsigned long native_save_fl(void)
*/
-ENTRY(native_save_fl)
+SYM_FUNC_START(native_save_fl)
pushf
pop %_ASM_AX
ret
-ENDPROC(native_save_fl)
+SYM_FUNC_END(native_save_fl)
EXPORT_SYMBOL(native_save_fl)
/*
* void native_restore_fl(unsigned long flags)
* %eax/%rdi: flags
*/
-ENTRY(native_restore_fl)
+SYM_FUNC_START(native_restore_fl)
push %_ASM_ARG1
popf
ret
-ENDPROC(native_restore_fl)
+SYM_FUNC_END(native_restore_fl)
EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index 3ad34f01de2a..6eb8b50ea07e 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -11,6 +11,7 @@
#include <linux/acpi_pmtmr.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
+#include <linux/serial_8250.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/hypervisor.h>
@@ -21,9 +22,24 @@
#include <asm/setup.h>
#include <asm/jailhouse_para.h>
-static __initdata struct jailhouse_setup_data setup_data;
+static struct jailhouse_setup_data setup_data;
+#define SETUP_DATA_V1_LEN (sizeof(setup_data.hdr) + sizeof(setup_data.v1))
+#define SETUP_DATA_V2_LEN (SETUP_DATA_V1_LEN + sizeof(setup_data.v2))
+
static unsigned int precalibrated_tsc_khz;
+static void jailhouse_setup_irq(unsigned int irq)
+{
+ struct mpc_intsrc mp_irq = {
+ .type = MP_INTSRC,
+ .irqtype = mp_INT,
+ .irqflag = MP_IRQPOL_ACTIVE_HIGH | MP_IRQTRIG_EDGE,
+ .srcbusirq = irq,
+ .dstirq = irq,
+ };
+ mp_save_irq(&mp_irq);
+}
+
static uint32_t jailhouse_cpuid_base(void)
{
if (boot_cpu_data.cpuid_level < 0 ||
@@ -45,7 +61,7 @@ static void jailhouse_get_wallclock(struct timespec64 *now)
static void __init jailhouse_timer_init(void)
{
- lapic_timer_period = setup_data.apic_khz * (1000 / HZ);
+ lapic_timer_period = setup_data.v1.apic_khz * (1000 / HZ);
}
static unsigned long jailhouse_get_tsc(void)
@@ -77,33 +93,28 @@ static void __init jailhouse_get_smp_config(unsigned int early)
.type = IOAPIC_DOMAIN_STRICT,
.ops = &mp_ioapic_irqdomain_ops,
};
- struct mpc_intsrc mp_irq = {
- .type = MP_INTSRC,
- .irqtype = mp_INT,
- .irqflag = MP_IRQPOL_ACTIVE_HIGH | MP_IRQTRIG_EDGE,
- };
unsigned int cpu;
jailhouse_x2apic_init();
register_lapic_address(0xfee00000);
- for (cpu = 0; cpu < setup_data.num_cpus; cpu++) {
- generic_processor_info(setup_data.cpu_ids[cpu],
+ for (cpu = 0; cpu < setup_data.v1.num_cpus; cpu++) {
+ generic_processor_info(setup_data.v1.cpu_ids[cpu],
boot_cpu_apic_version);
}
smp_found_config = 1;
- if (setup_data.standard_ioapic) {
+ if (setup_data.v1.standard_ioapic) {
mp_register_ioapic(0, 0xfec00000, gsi_top, &ioapic_cfg);
- /* Register 1:1 mapping for legacy UART IRQs 3 and 4 */
- mp_irq.srcbusirq = mp_irq.dstirq = 3;
- mp_save_irq(&mp_irq);
-
- mp_irq.srcbusirq = mp_irq.dstirq = 4;
- mp_save_irq(&mp_irq);
+ if (IS_ENABLED(CONFIG_SERIAL_8250) &&
+ setup_data.hdr.version < 2) {
+ /* Register 1:1 mapping for legacy UART IRQs 3 and 4 */
+ jailhouse_setup_irq(3);
+ jailhouse_setup_irq(4);
+ }
}
}
@@ -126,9 +137,9 @@ static int __init jailhouse_pci_arch_init(void)
pcibios_last_bus = 0xff;
#ifdef CONFIG_PCI_MMCONFIG
- if (setup_data.pci_mmconfig_base) {
+ if (setup_data.v1.pci_mmconfig_base) {
pci_mmconfig_add(0, 0, pcibios_last_bus,
- setup_data.pci_mmconfig_base);
+ setup_data.v1.pci_mmconfig_base);
pci_mmcfg_arch_init();
}
#endif
@@ -136,9 +147,57 @@ static int __init jailhouse_pci_arch_init(void)
return 0;
}
+#ifdef CONFIG_SERIAL_8250
+static inline bool jailhouse_uart_enabled(unsigned int uart_nr)
+{
+ return setup_data.v2.flags & BIT(uart_nr);
+}
+
+static void jailhouse_serial_fixup(int port, struct uart_port *up,
+ u32 *capabilities)
+{
+ static const u16 pcuart_base[] = {0x3f8, 0x2f8, 0x3e8, 0x2e8};
+ unsigned int n;
+
+ for (n = 0; n < ARRAY_SIZE(pcuart_base); n++) {
+ if (pcuart_base[n] != up->iobase)
+ continue;
+
+ if (jailhouse_uart_enabled(n)) {
+ pr_info("Enabling UART%u (port 0x%lx)\n", n,
+ up->iobase);
+ jailhouse_setup_irq(up->irq);
+ } else {
+ /* Deactivate UART if access isn't allowed */
+ up->iobase = 0;
+ }
+ break;
+ }
+}
+
+static void __init jailhouse_serial_workaround(void)
+{
+ /*
+ * There are flags inside setup_data that indicate availability of
+ * platform UARTs since setup data version 2.
+ *
+ * In case of version 1, we don't know which UARTs belong Linux. In
+ * this case, unconditionally register 1:1 mapping for legacy UART IRQs
+ * 3 and 4.
+ */
+ if (setup_data.hdr.version > 1)
+ serial8250_set_isa_configurator(jailhouse_serial_fixup);
+}
+#else /* !CONFIG_SERIAL_8250 */
+static inline void jailhouse_serial_workaround(void)
+{
+}
+#endif /* CONFIG_SERIAL_8250 */
+
static void __init jailhouse_init_platform(void)
{
u64 pa_data = boot_params.hdr.setup_data;
+ unsigned long setup_data_len;
struct setup_data header;
void *mapping;
@@ -163,16 +222,8 @@ static void __init jailhouse_init_platform(void)
memcpy(&header, mapping, sizeof(header));
early_memunmap(mapping, sizeof(header));
- if (header.type == SETUP_JAILHOUSE &&
- header.len >= sizeof(setup_data)) {
- pa_data += offsetof(struct setup_data, data);
-
- mapping = early_memremap(pa_data, sizeof(setup_data));
- memcpy(&setup_data, mapping, sizeof(setup_data));
- early_memunmap(mapping, sizeof(setup_data));
-
+ if (header.type == SETUP_JAILHOUSE)
break;
- }
pa_data = header.next;
}
@@ -180,13 +231,28 @@ static void __init jailhouse_init_platform(void)
if (!pa_data)
panic("Jailhouse: No valid setup data found");
- if (setup_data.compatible_version > JAILHOUSE_SETUP_REQUIRED_VERSION)
- panic("Jailhouse: Unsupported setup data structure");
-
- pmtmr_ioport = setup_data.pm_timer_address;
+ /* setup data must at least contain the header */
+ if (header.len < sizeof(setup_data.hdr))
+ goto unsupported;
+
+ pa_data += offsetof(struct setup_data, data);
+ setup_data_len = min_t(unsigned long, sizeof(setup_data),
+ (unsigned long)header.len);
+ mapping = early_memremap(pa_data, setup_data_len);
+ memcpy(&setup_data, mapping, setup_data_len);
+ early_memunmap(mapping, setup_data_len);
+
+ if (setup_data.hdr.version == 0 ||
+ setup_data.hdr.compatible_version !=
+ JAILHOUSE_SETUP_REQUIRED_VERSION ||
+ (setup_data.hdr.version == 1 && header.len < SETUP_DATA_V1_LEN) ||
+ (setup_data.hdr.version >= 2 && header.len < SETUP_DATA_V2_LEN))
+ goto unsupported;
+
+ pmtmr_ioport = setup_data.v1.pm_timer_address;
pr_debug("Jailhouse: PM-Timer IO Port: %#x\n", pmtmr_ioport);
- precalibrated_tsc_khz = setup_data.tsc_khz;
+ precalibrated_tsc_khz = setup_data.v1.tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
pci_probe = 0;
@@ -196,6 +262,12 @@ static void __init jailhouse_init_platform(void)
* are none in a non-root cell.
*/
disable_acpi();
+
+ jailhouse_serial_workaround();
+ return;
+
+unsupported:
+ panic("Jailhouse: Unsupported setup data structure");
}
bool jailhouse_paravirt(void)
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 044053235302..c1a8b9e71408 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -89,8 +89,7 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
return;
}
- text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE,
- (void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
+ text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE, NULL);
}
void arch_jump_label_transform(struct jump_entry *entry,
@@ -147,11 +146,9 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}
__jump_label_set_jump_code(entry, type,
- (union jump_code_union *) &tp->opcode, 0);
+ (union jump_code_union *)&tp->text, 0);
- tp->addr = entry_code;
- tp->detour = entry_code + JUMP_LABEL_NOP_SIZE;
- tp->len = JUMP_LABEL_NOP_SIZE;
+ text_poke_loc_init(tp, entry_code, NULL, JUMP_LABEL_NOP_SIZE, NULL);
tp_vec_nr++;
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index edaa30b20841..64b6da95af98 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -44,7 +44,12 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf,
if (count > node->len - pos)
count = node->len - pos;
- pa = node->paddr + sizeof(struct setup_data) + pos;
+ pa = node->paddr + pos;
+
+ /* Is it direct data or invalid indirect one? */
+ if (!(node->type & SETUP_INDIRECT) || node->type == SETUP_INDIRECT)
+ pa += sizeof(struct setup_data);
+
p = memremap(pa, count, MEMREMAP_WB);
if (!p)
return -ENOMEM;
@@ -108,9 +113,17 @@ static int __init create_setup_data_nodes(struct dentry *parent)
goto err_dir;
}
- node->paddr = pa_data;
- node->type = data->type;
- node->len = data->len;
+ if (data->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+ node->paddr = ((struct setup_indirect *)data->data)->addr;
+ node->type = ((struct setup_indirect *)data->data)->type;
+ node->len = ((struct setup_indirect *)data->data)->len;
+ } else {
+ node->paddr = pa_data;
+ node->type = data->type;
+ node->len = data->len;
+ }
+
create_setup_data_node(d, no, node);
pa_data = data->next;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 43fc13c831af..4f13af7cbcdb 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -351,6 +351,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
kernel_insn_init(insn, dest, MAX_INSN_SIZE);
insn_get_length(insn);
+ /* We can not probe force emulate prefixed instruction */
+ if (insn_has_emulate_prefix(insn))
+ return 0;
+
/* Another subsystem puts a breakpoint, failed to recover */
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index b348dd506d58..8900329c28a7 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -437,8 +437,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
insn_buff[0] = RELATIVEJUMP_OPCODE;
*(s32 *)(&insn_buff[1]) = rel;
- text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
- op->optinsn.insn);
+ text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, NULL);
list_del_init(&op->list);
}
@@ -448,12 +447,18 @@ void arch_optimize_kprobes(struct list_head *oplist)
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
u8 insn_buff[RELATIVEJUMP_SIZE];
+ u8 emulate_buff[RELATIVEJUMP_SIZE];
/* Set int3 to first byte for kprobes */
insn_buff[0] = BREAKPOINT_INSTRUCTION;
memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+
+ emulate_buff[0] = RELATIVEJUMP_OPCODE;
+ *(s32 *)(&emulate_buff[1]) = (s32)((long)op->optinsn.insn -
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
- op->optinsn.insn);
+ emulate_buff);
}
/*
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
index 7969da939213..d0a19121c6a4 100644
--- a/arch/x86/kernel/ksysfs.c
+++ b/arch/x86/kernel/ksysfs.c
@@ -100,7 +100,12 @@ static int __init get_setup_data_size(int nr, size_t *size)
if (!data)
return -ENOMEM;
if (nr == i) {
- *size = data->len;
+ if (data->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
+ *size = ((struct setup_indirect *)data->data)->len;
+ else
+ *size = data->len;
+
memunmap(data);
return 0;
}
@@ -130,7 +135,10 @@ static ssize_t type_show(struct kobject *kobj,
if (!data)
return -ENOMEM;
- ret = sprintf(buf, "0x%x\n", data->type);
+ if (data->type == SETUP_INDIRECT)
+ ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type);
+ else
+ ret = sprintf(buf, "0x%x\n", data->type);
memunmap(data);
return ret;
}
@@ -142,7 +150,7 @@ static ssize_t setup_data_data_read(struct file *fp,
loff_t off, size_t count)
{
int nr, ret = 0;
- u64 paddr;
+ u64 paddr, len;
struct setup_data *data;
void *p;
@@ -157,19 +165,28 @@ static ssize_t setup_data_data_read(struct file *fp,
if (!data)
return -ENOMEM;
- if (off > data->len) {
+ if (data->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+ paddr = ((struct setup_indirect *)data->data)->addr;
+ len = ((struct setup_indirect *)data->data)->len;
+ } else {
+ paddr += sizeof(*data);
+ len = data->len;
+ }
+
+ if (off > len) {
ret = -EINVAL;
goto out;
}
- if (count > data->len - off)
- count = data->len - off;
+ if (count > len - off)
+ count = len - off;
if (!count)
goto out;
ret = count;
- p = memremap(paddr + sizeof(*data), data->len, MEMREMAP_WB);
+ p = memremap(paddr, len, MEMREMAP_WB);
if (!p) {
ret = -ENOMEM;
goto out;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e820568ed4d5..32ef1ee733b7 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -33,6 +33,7 @@
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/tlb.h>
+#include <asm/cpuidle_haltpoll.h>
static int kvmapf = 1;
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 5dcd438ad8f2..16e125a50b33 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -298,48 +298,6 @@ static void load_segments(void)
);
}
-#ifdef CONFIG_KEXEC_FILE
-/* Update purgatory as needed after various image segments have been prepared */
-static int arch_update_purgatory(struct kimage *image)
-{
- int ret = 0;
-
- if (!image->file_mode)
- return 0;
-
- /* Setup copying of backup region */
- if (image->type == KEXEC_TYPE_CRASH) {
- ret = kexec_purgatory_get_set_symbol(image,
- "purgatory_backup_dest",
- &image->arch.backup_load_addr,
- sizeof(image->arch.backup_load_addr), 0);
- if (ret)
- return ret;
-
- ret = kexec_purgatory_get_set_symbol(image,
- "purgatory_backup_src",
- &image->arch.backup_src_start,
- sizeof(image->arch.backup_src_start), 0);
- if (ret)
- return ret;
-
- ret = kexec_purgatory_get_set_symbol(image,
- "purgatory_backup_sz",
- &image->arch.backup_src_sz,
- sizeof(image->arch.backup_src_sz), 0);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-#else /* !CONFIG_KEXEC_FILE */
-static inline int arch_update_purgatory(struct kimage *image)
-{
- return 0;
-}
-#endif /* CONFIG_KEXEC_FILE */
-
int machine_kexec_prepare(struct kimage *image)
{
unsigned long start_pgtable;
@@ -353,11 +311,6 @@ int machine_kexec_prepare(struct kimage *image)
if (result)
return result;
- /* update purgatory as needed */
- result = arch_update_purgatory(image);
- if (result)
- return result;
-
return 0;
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 59d3d2763a9e..789f5e4f89de 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -341,8 +341,6 @@ struct paravirt_patch_template pv_ops = {
.cpu.iret = native_iret,
.cpu.swapgs = native_swapgs,
- .cpu.set_iopl_mask = native_set_iopl_mask,
-
.cpu.start_context_switch = paravirt_nop,
.cpu.end_context_switch = paravirt_nop,
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
deleted file mode 100644
index 23fdec030c37..000000000000
--- a/arch/x86/kernel/pci-calgary_64.c
+++ /dev/null
@@ -1,1586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Derived from arch/powerpc/kernel/iommu.c
- *
- * Copyright IBM Corporation, 2006-2007
- * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us>
- *
- * Author: Jon Mason <jdmason@kudzu.us>
- * Author: Muli Ben-Yehuda <muli@il.ibm.com>
-
- */
-
-#define pr_fmt(fmt) "Calgary: " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/crash_dump.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
-#include <linux/bitmap.h>
-#include <linux/pci_ids.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/iommu-helper.h>
-
-#include <asm/iommu.h>
-#include <asm/calgary.h>
-#include <asm/tce.h>
-#include <asm/pci-direct.h>
-#include <asm/dma.h>
-#include <asm/rio.h>
-#include <asm/bios_ebda.h>
-#include <asm/x86_init.h>
-#include <asm/iommu_table.h>
-
-#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
-int use_calgary __read_mostly = 1;
-#else
-int use_calgary __read_mostly = 0;
-#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
-
-#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
-#define PCI_DEVICE_ID_IBM_CALIOC2 0x0308
-
-/* register offsets inside the host bridge space */
-#define CALGARY_CONFIG_REG 0x0108
-#define PHB_CSR_OFFSET 0x0110 /* Channel Status */
-#define PHB_PLSSR_OFFSET 0x0120
-#define PHB_CONFIG_RW_OFFSET 0x0160
-#define PHB_IOBASE_BAR_LOW 0x0170
-#define PHB_IOBASE_BAR_HIGH 0x0180
-#define PHB_MEM_1_LOW 0x0190
-#define PHB_MEM_1_HIGH 0x01A0
-#define PHB_IO_ADDR_SIZE 0x01B0
-#define PHB_MEM_1_SIZE 0x01C0
-#define PHB_MEM_ST_OFFSET 0x01D0
-#define PHB_AER_OFFSET 0x0200
-#define PHB_CONFIG_0_HIGH 0x0220
-#define PHB_CONFIG_0_LOW 0x0230
-#define PHB_CONFIG_0_END 0x0240
-#define PHB_MEM_2_LOW 0x02B0
-#define PHB_MEM_2_HIGH 0x02C0
-#define PHB_MEM_2_SIZE_HIGH 0x02D0
-#define PHB_MEM_2_SIZE_LOW 0x02E0
-#define PHB_DOSHOLE_OFFSET 0x08E0
-
-/* CalIOC2 specific */
-#define PHB_SAVIOR_L2 0x0DB0
-#define PHB_PAGE_MIG_CTRL 0x0DA8
-#define PHB_PAGE_MIG_DEBUG 0x0DA0
-#define PHB_ROOT_COMPLEX_STATUS 0x0CB0
-
-/* PHB_CONFIG_RW */
-#define PHB_TCE_ENABLE 0x20000000
-#define PHB_SLOT_DISABLE 0x1C000000
-#define PHB_DAC_DISABLE 0x01000000
-#define PHB_MEM2_ENABLE 0x00400000
-#define PHB_MCSR_ENABLE 0x00100000
-/* TAR (Table Address Register) */
-#define TAR_SW_BITS 0x0000ffffffff800fUL
-#define TAR_VALID 0x0000000000000008UL
-/* CSR (Channel/DMA Status Register) */
-#define CSR_AGENT_MASK 0xffe0ffff
-/* CCR (Calgary Configuration Register) */
-#define CCR_2SEC_TIMEOUT 0x000000000000000EUL
-/* PMCR/PMDR (Page Migration Control/Debug Registers */
-#define PMR_SOFTSTOP 0x80000000
-#define PMR_SOFTSTOPFAULT 0x40000000
-#define PMR_HARDSTOP 0x20000000
-
-/*
- * The maximum PHB bus number.
- * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
- * x3950M2: 4 chassis, 48 PHBs per chassis = 192
- * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
- * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
- */
-#define MAX_PHB_BUS_NUM 256
-
-#define PHBS_PER_CALGARY 4
-
-/* register offsets in Calgary's internal register space */
-static const unsigned long tar_offsets[] = {
- 0x0580 /* TAR0 */,
- 0x0588 /* TAR1 */,
- 0x0590 /* TAR2 */,
- 0x0598 /* TAR3 */
-};
-
-static const unsigned long split_queue_offsets[] = {
- 0x4870 /* SPLIT QUEUE 0 */,
- 0x5870 /* SPLIT QUEUE 1 */,
- 0x6870 /* SPLIT QUEUE 2 */,
- 0x7870 /* SPLIT QUEUE 3 */
-};
-
-static const unsigned long phb_offsets[] = {
- 0x8000 /* PHB0 */,
- 0x9000 /* PHB1 */,
- 0xA000 /* PHB2 */,
- 0xB000 /* PHB3 */
-};
-
-/* PHB debug registers */
-
-static const unsigned long phb_debug_offsets[] = {
- 0x4000 /* PHB 0 DEBUG */,
- 0x5000 /* PHB 1 DEBUG */,
- 0x6000 /* PHB 2 DEBUG */,
- 0x7000 /* PHB 3 DEBUG */
-};
-
-/*
- * STUFF register for each debug PHB,
- * byte 1 = start bus number, byte 2 = end bus number
- */
-
-#define PHB_DEBUG_STUFF_OFFSET 0x0020
-
-unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
-static int translate_empty_slots __read_mostly = 0;
-static int calgary_detected __read_mostly = 0;
-
-static struct rio_table_hdr *rio_table_hdr __initdata;
-static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
-static struct rio_detail *rio_devs[MAX_NUMNODES * 4] __initdata;
-
-struct calgary_bus_info {
- void *tce_space;
- unsigned char translation_disabled;
- signed char phbid;
- void __iomem *bbar;
-};
-
-static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
-static void calgary_tce_cache_blast(struct iommu_table *tbl);
-static void calgary_dump_error_regs(struct iommu_table *tbl);
-static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
-static void calioc2_tce_cache_blast(struct iommu_table *tbl);
-static void calioc2_dump_error_regs(struct iommu_table *tbl);
-static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl);
-static void get_tce_space_from_tar(void);
-
-static const struct cal_chipset_ops calgary_chip_ops = {
- .handle_quirks = calgary_handle_quirks,
- .tce_cache_blast = calgary_tce_cache_blast,
- .dump_error_regs = calgary_dump_error_regs
-};
-
-static const struct cal_chipset_ops calioc2_chip_ops = {
- .handle_quirks = calioc2_handle_quirks,
- .tce_cache_blast = calioc2_tce_cache_blast,
- .dump_error_regs = calioc2_dump_error_regs
-};
-
-static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
-
-static inline int translation_enabled(struct iommu_table *tbl)
-{
- /* only PHBs with translation enabled have an IOMMU table */
- return (tbl != NULL);
-}
-
-static void iommu_range_reserve(struct iommu_table *tbl,
- unsigned long start_addr, unsigned int npages)
-{
- unsigned long index;
- unsigned long end;
- unsigned long flags;
-
- index = start_addr >> PAGE_SHIFT;
-
- /* bail out if we're asked to reserve a region we don't cover */
- if (index >= tbl->it_size)
- return;
-
- end = index + npages;
- if (end > tbl->it_size) /* don't go off the table */
- end = tbl->it_size;
-
- spin_lock_irqsave(&tbl->it_lock, flags);
-
- bitmap_set(tbl->it_map, index, npages);
-
- spin_unlock_irqrestore(&tbl->it_lock, flags);
-}
-
-static unsigned long iommu_range_alloc(struct device *dev,
- struct iommu_table *tbl,
- unsigned int npages)
-{
- unsigned long flags;
- unsigned long offset;
- unsigned long boundary_size;
-
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- PAGE_SIZE) >> PAGE_SHIFT;
-
- BUG_ON(npages == 0);
-
- spin_lock_irqsave(&tbl->it_lock, flags);
-
- offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint,
- npages, 0, boundary_size, 0);
- if (offset == ~0UL) {
- tbl->chip_ops->tce_cache_blast(tbl);
-
- offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
- npages, 0, boundary_size, 0);
- if (offset == ~0UL) {
- pr_warn("IOMMU full\n");
- spin_unlock_irqrestore(&tbl->it_lock, flags);
- if (panic_on_overflow)
- panic("Calgary: fix the allocator.\n");
- else
- return DMA_MAPPING_ERROR;
- }
- }
-
- tbl->it_hint = offset + npages;
- BUG_ON(tbl->it_hint > tbl->it_size);
-
- spin_unlock_irqrestore(&tbl->it_lock, flags);
-
- return offset;
-}
-
-static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
- void *vaddr, unsigned int npages, int direction)
-{
- unsigned long entry;
- dma_addr_t ret;
-
- entry = iommu_range_alloc(dev, tbl, npages);
- if (unlikely(entry == DMA_MAPPING_ERROR)) {
- pr_warn("failed to allocate %u pages in iommu %p\n",
- npages, tbl);
- return DMA_MAPPING_ERROR;
- }
-
- /* set the return dma address */
- ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
-
- /* put the TCEs in the HW table */
- tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
- direction);
- return ret;
-}
-
-static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
- unsigned int npages)
-{
- unsigned long entry;
- unsigned long flags;
-
- /* were we called with bad_dma_address? */
- if (unlikely(dma_addr == DMA_MAPPING_ERROR)) {
- WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
- "address 0x%Lx\n", dma_addr);
- return;
- }
-
- entry = dma_addr >> PAGE_SHIFT;
-
- BUG_ON(entry + npages > tbl->it_size);
-
- tce_free(tbl, entry, npages);
-
- spin_lock_irqsave(&tbl->it_lock, flags);
-
- bitmap_clear(tbl->it_map, entry, npages);
-
- spin_unlock_irqrestore(&tbl->it_lock, flags);
-}
-
-static inline struct iommu_table *find_iommu_table(struct device *dev)
-{
- struct pci_dev *pdev;
- struct pci_bus *pbus;
- struct iommu_table *tbl;
-
- pdev = to_pci_dev(dev);
-
- /* search up the device tree for an iommu */
- pbus = pdev->bus;
- do {
- tbl = pci_iommu(pbus);
- if (tbl && tbl->it_busno == pbus->number)
- break;
- tbl = NULL;
- pbus = pbus->parent;
- } while (pbus);
-
- BUG_ON(tbl && (tbl->it_busno != pbus->number));
-
- return tbl;
-}
-
-static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems,enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct iommu_table *tbl = find_iommu_table(dev);
- struct scatterlist *s;
- int i;
-
- if (!translation_enabled(tbl))
- return;
-
- for_each_sg(sglist, s, nelems, i) {
- unsigned int npages;
- dma_addr_t dma = s->dma_address;
- unsigned int dmalen = s->dma_length;
-
- if (dmalen == 0)
- break;
-
- npages = iommu_num_pages(dma, dmalen, PAGE_SIZE);
- iommu_free(tbl, dma, npages);
- }
-}
-
-static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct iommu_table *tbl = find_iommu_table(dev);
- struct scatterlist *s;
- unsigned long vaddr;
- unsigned int npages;
- unsigned long entry;
- int i;
-
- for_each_sg(sg, s, nelems, i) {
- BUG_ON(!sg_page(s));
-
- vaddr = (unsigned long) sg_virt(s);
- npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
-
- entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == DMA_MAPPING_ERROR) {
- /* makes sure unmap knows to stop */
- s->dma_length = 0;
- goto error;
- }
-
- s->dma_address = (entry << PAGE_SHIFT) | s->offset;
-
- /* insert into HW table */
- tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir);
-
- s->dma_length = s->length;
- }
-
- return nelems;
-error:
- calgary_unmap_sg(dev, sg, nelems, dir, 0);
- for_each_sg(sg, s, nelems, i) {
- sg->dma_address = DMA_MAPPING_ERROR;
- sg->dma_length = 0;
- }
- return 0;
-}
-
-static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- void *vaddr = page_address(page) + offset;
- unsigned long uaddr;
- unsigned int npages;
- struct iommu_table *tbl = find_iommu_table(dev);
-
- uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
-
- return iommu_alloc(dev, tbl, vaddr, npages, dir);
-}
-
-static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct iommu_table *tbl = find_iommu_table(dev);
- unsigned int npages;
-
- npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- iommu_free(tbl, dma_addr, npages);
-}
-
-static void* calgary_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
-{
- void *ret = NULL;
- dma_addr_t mapping;
- unsigned int npages, order;
- struct iommu_table *tbl = find_iommu_table(dev);
-
- size = PAGE_ALIGN(size); /* size rounded up to full pages */
- npages = size >> PAGE_SHIFT;
- order = get_order(size);
-
- /* alloc enough pages (and possibly more) */
- ret = (void *)__get_free_pages(flag, order);
- if (!ret)
- goto error;
- memset(ret, 0, size);
-
- /* set up tces to cover the allocated range */
- mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == DMA_MAPPING_ERROR)
- goto free;
- *dma_handle = mapping;
- return ret;
-free:
- free_pages((unsigned long)ret, get_order(size));
- ret = NULL;
-error:
- return ret;
-}
-
-static void calgary_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- unsigned int npages;
- struct iommu_table *tbl = find_iommu_table(dev);
-
- size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
-
- iommu_free(tbl, dma_handle, npages);
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static const struct dma_map_ops calgary_dma_ops = {
- .alloc = calgary_alloc_coherent,
- .free = calgary_free_coherent,
- .map_sg = calgary_map_sg,
- .unmap_sg = calgary_unmap_sg,
- .map_page = calgary_map_page,
- .unmap_page = calgary_unmap_page,
- .dma_supported = dma_direct_supported,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
-};
-
-static inline void __iomem * busno_to_bbar(unsigned char num)
-{
- return bus_info[num].bbar;
-}
-
-static inline int busno_to_phbid(unsigned char num)
-{
- return bus_info[num].phbid;
-}
-
-static inline unsigned long split_queue_offset(unsigned char num)
-{
- size_t idx = busno_to_phbid(num);
-
- return split_queue_offsets[idx];
-}
-
-static inline unsigned long tar_offset(unsigned char num)
-{
- size_t idx = busno_to_phbid(num);
-
- return tar_offsets[idx];
-}
-
-static inline unsigned long phb_offset(unsigned char num)
-{
- size_t idx = busno_to_phbid(num);
-
- return phb_offsets[idx];
-}
-
-static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
-{
- unsigned long target = ((unsigned long)bar) | offset;
- return (void __iomem*)target;
-}
-
-static inline int is_calioc2(unsigned short device)
-{
- return (device == PCI_DEVICE_ID_IBM_CALIOC2);
-}
-
-static inline int is_calgary(unsigned short device)
-{
- return (device == PCI_DEVICE_ID_IBM_CALGARY);
-}
-
-static inline int is_cal_pci_dev(unsigned short device)
-{
- return (is_calgary(device) || is_calioc2(device));
-}
-
-static void calgary_tce_cache_blast(struct iommu_table *tbl)
-{
- u64 val;
- u32 aer;
- int i = 0;
- void __iomem *bbar = tbl->bbar;
- void __iomem *target;
-
- /* disable arbitration on the bus */
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
- aer = readl(target);
- writel(0, target);
-
- /* read plssr to ensure it got there */
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
- val = readl(target);
-
- /* poll split queues until all DMA activity is done */
- target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
- do {
- val = readq(target);
- i++;
- } while ((val & 0xff) != 0xff && i < 100);
- if (i == 100)
- pr_warn("PCI bus not quiesced, continuing anyway\n");
-
- /* invalidate TCE cache */
- target = calgary_reg(bbar, tar_offset(tbl->it_busno));
- writeq(tbl->tar_val, target);
-
- /* enable arbitration */
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
- writel(aer, target);
- (void)readl(target); /* flush */
-}
-
-static void calioc2_tce_cache_blast(struct iommu_table *tbl)
-{
- void __iomem *bbar = tbl->bbar;
- void __iomem *target;
- u64 val64;
- u32 val;
- int i = 0;
- int count = 1;
- unsigned char bus = tbl->it_busno;
-
-begin:
- printk(KERN_DEBUG "Calgary: CalIOC2 bus 0x%x entering tce cache blast "
- "sequence - count %d\n", bus, count);
-
- /* 1. using the Page Migration Control reg set SoftStop */
- target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
- val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target);
- val |= PMR_SOFTSTOP;
- printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target);
- writel(cpu_to_be32(val), target);
-
- /* 2. poll split queues until all DMA activity is done */
- printk(KERN_DEBUG "2a. starting to poll split queues\n");
- target = calgary_reg(bbar, split_queue_offset(bus));
- do {
- val64 = readq(target);
- i++;
- } while ((val64 & 0xff) != 0xff && i < 100);
- if (i == 100)
- pr_warn("CalIOC2: PCI bus not quiesced, continuing anyway\n");
-
- /* 3. poll Page Migration DEBUG for SoftStopFault */
- target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
- val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target);
-
- /* 4. if SoftStopFault - goto (1) */
- if (val & PMR_SOFTSTOPFAULT) {
- if (++count < 100)
- goto begin;
- else {
- pr_warn("CalIOC2: too many SoftStopFaults, aborting TCE cache flush sequence!\n");
- return; /* pray for the best */
- }
- }
-
- /* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
- target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
- printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target);
- val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target);
- target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
- val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target);
-
- /* 6. invalidate TCE cache */
- printk(KERN_DEBUG "6. invalidating TCE cache\n");
- target = calgary_reg(bbar, tar_offset(bus));
- writeq(tbl->tar_val, target);
-
- /* 7. Re-read PMCR */
- printk(KERN_DEBUG "7a. Re-reading PMCR\n");
- target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
- val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target);
-
- /* 8. Remove HardStop */
- printk(KERN_DEBUG "8a. removing HardStop from PMCR\n");
- target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
- val = 0;
- printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target);
- writel(cpu_to_be32(val), target);
- val = be32_to_cpu(readl(target));
- printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target);
-}
-
-static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
- u64 limit)
-{
- unsigned int numpages;
-
- limit = limit | 0xfffff;
- limit++;
-
- numpages = ((limit - start) >> PAGE_SHIFT);
- iommu_range_reserve(pci_iommu(dev->bus), start, numpages);
-}
-
-static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
-{
- void __iomem *target;
- u64 low, high, sizelow;
- u64 start, limit;
- struct iommu_table *tbl = pci_iommu(dev->bus);
- unsigned char busnum = dev->bus->number;
- void __iomem *bbar = tbl->bbar;
-
- /* peripheral MEM_1 region */
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
- low = be32_to_cpu(readl(target));
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
- high = be32_to_cpu(readl(target));
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
- sizelow = be32_to_cpu(readl(target));
-
- start = (high << 32) | low;
- limit = sizelow;
-
- calgary_reserve_mem_region(dev, start, limit);
-}
-
-static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
-{
- void __iomem *target;
- u32 val32;
- u64 low, high, sizelow, sizehigh;
- u64 start, limit;
- struct iommu_table *tbl = pci_iommu(dev->bus);
- unsigned char busnum = dev->bus->number;
- void __iomem *bbar = tbl->bbar;
-
- /* is it enabled? */
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
- val32 = be32_to_cpu(readl(target));
- if (!(val32 & PHB_MEM2_ENABLE))
- return;
-
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
- low = be32_to_cpu(readl(target));
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
- high = be32_to_cpu(readl(target));
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
- sizelow = be32_to_cpu(readl(target));
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
- sizehigh = be32_to_cpu(readl(target));
-
- start = (high << 32) | low;
- limit = (sizehigh << 32) | sizelow;
-
- calgary_reserve_mem_region(dev, start, limit);
-}
-
-/*
- * some regions of the IO address space do not get translated, so we
- * must not give devices IO addresses in those regions. The regions
- * are the 640KB-1MB region and the two PCI peripheral memory holes.
- * Reserve all of them in the IOMMU bitmap to avoid giving them out
- * later.
- */
-static void __init calgary_reserve_regions(struct pci_dev *dev)
-{
- unsigned int npages;
- u64 start;
- struct iommu_table *tbl = pci_iommu(dev->bus);
-
- /* avoid the BIOS/VGA first 640KB-1MB region */
- /* for CalIOC2 - avoid the entire first MB */
- if (is_calgary(dev->device)) {
- start = (640 * 1024);
- npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
- } else { /* calioc2 */
- start = 0;
- npages = (1 * 1024 * 1024) >> PAGE_SHIFT;
- }
- iommu_range_reserve(tbl, start, npages);
-
- /* reserve the two PCI peripheral memory regions in IO space */
- calgary_reserve_peripheral_mem_1(dev);
- calgary_reserve_peripheral_mem_2(dev);
-}
-
-static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
-{
- u64 val64;
- u64 table_phys;
- void __iomem *target;
- int ret;
- struct iommu_table *tbl;
-
- /* build TCE tables for each PHB */
- ret = build_tce_table(dev, bbar);
- if (ret)
- return ret;
-
- tbl = pci_iommu(dev->bus);
- tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
-
- if (is_kdump_kernel())
- calgary_init_bitmap_from_tce_table(tbl);
- else
- tce_free(tbl, 0, tbl->it_size);
-
- if (is_calgary(dev->device))
- tbl->chip_ops = &calgary_chip_ops;
- else if (is_calioc2(dev->device))
- tbl->chip_ops = &calioc2_chip_ops;
- else
- BUG();
-
- calgary_reserve_regions(dev);
-
- /* set TARs for each PHB */
- target = calgary_reg(bbar, tar_offset(dev->bus->number));
- val64 = be64_to_cpu(readq(target));
-
- /* zero out all TAR bits under sw control */
- val64 &= ~TAR_SW_BITS;
- table_phys = (u64)__pa(tbl->it_base);
-
- val64 |= table_phys;
-
- BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
- val64 |= (u64) specified_table_size;
-
- tbl->tar_val = cpu_to_be64(val64);
-
- writeq(tbl->tar_val, target);
- readq(target); /* flush */
-
- return 0;
-}
-
-static void __init calgary_free_bus(struct pci_dev *dev)
-{
- u64 val64;
- struct iommu_table *tbl = pci_iommu(dev->bus);
- void __iomem *target;
- unsigned int bitmapsz;
-
- target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
- val64 = be64_to_cpu(readq(target));
- val64 &= ~TAR_SW_BITS;
- writeq(cpu_to_be64(val64), target);
- readq(target); /* flush */
-
- bitmapsz = tbl->it_size / BITS_PER_BYTE;
- free_pages((unsigned long)tbl->it_map, get_order(bitmapsz));
- tbl->it_map = NULL;
-
- kfree(tbl);
-
- set_pci_iommu(dev->bus, NULL);
-
- /* Can't free bootmem allocated memory after system is up :-( */
- bus_info[dev->bus->number].tce_space = NULL;
-}
-
-static void calgary_dump_error_regs(struct iommu_table *tbl)
-{
- void __iomem *bbar = tbl->bbar;
- void __iomem *target;
- u32 csr, plssr;
-
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
- csr = be32_to_cpu(readl(target));
-
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
- plssr = be32_to_cpu(readl(target));
-
- /* If no error, the agent ID in the CSR is not valid */
- pr_emerg("DMA error on Calgary PHB 0x%x, 0x%08x@CSR 0x%08x@PLSSR\n",
- tbl->it_busno, csr, plssr);
-}
-
-static void calioc2_dump_error_regs(struct iommu_table *tbl)
-{
- void __iomem *bbar = tbl->bbar;
- u32 csr, csmr, plssr, mck, rcstat;
- void __iomem *target;
- unsigned long phboff = phb_offset(tbl->it_busno);
- unsigned long erroff;
- u32 errregs[7];
- int i;
-
- /* dump CSR */
- target = calgary_reg(bbar, phboff | PHB_CSR_OFFSET);
- csr = be32_to_cpu(readl(target));
- /* dump PLSSR */
- target = calgary_reg(bbar, phboff | PHB_PLSSR_OFFSET);
- plssr = be32_to_cpu(readl(target));
- /* dump CSMR */
- target = calgary_reg(bbar, phboff | 0x290);
- csmr = be32_to_cpu(readl(target));
- /* dump mck */
- target = calgary_reg(bbar, phboff | 0x800);
- mck = be32_to_cpu(readl(target));
-
- pr_emerg("DMA error on CalIOC2 PHB 0x%x\n", tbl->it_busno);
-
- pr_emerg("0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
- csr, plssr, csmr, mck);
-
- /* dump rest of error regs */
- pr_emerg("");
- for (i = 0; i < ARRAY_SIZE(errregs); i++) {
- /* err regs are at 0x810 - 0x870 */
- erroff = (0x810 + (i * 0x10));
- target = calgary_reg(bbar, phboff | erroff);
- errregs[i] = be32_to_cpu(readl(target));
- pr_cont("0x%08x@0x%lx ", errregs[i], erroff);
- }
- pr_cont("\n");
-
- /* root complex status */
- target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
- rcstat = be32_to_cpu(readl(target));
- printk(KERN_EMERG "Calgary: 0x%08x@0x%x\n", rcstat,
- PHB_ROOT_COMPLEX_STATUS);
-}
-
-static void calgary_watchdog(struct timer_list *t)
-{
- struct iommu_table *tbl = from_timer(tbl, t, watchdog_timer);
- void __iomem *bbar = tbl->bbar;
- u32 val32;
- void __iomem *target;
-
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
- val32 = be32_to_cpu(readl(target));
-
- /* If no error, the agent ID in the CSR is not valid */
- if (val32 & CSR_AGENT_MASK) {
- tbl->chip_ops->dump_error_regs(tbl);
-
- /* reset error */
- writel(0, target);
-
- /* Disable bus that caused the error */
- target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
- PHB_CONFIG_RW_OFFSET);
- val32 = be32_to_cpu(readl(target));
- val32 |= PHB_SLOT_DISABLE;
- writel(cpu_to_be32(val32), target);
- readl(target); /* flush */
- } else {
- /* Reset the timer */
- mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
- }
-}
-
-static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
- unsigned char busnum, unsigned long timeout)
-{
- u64 val64;
- void __iomem *target;
- unsigned int phb_shift = ~0; /* silence gcc */
- u64 mask;
-
- switch (busno_to_phbid(busnum)) {
- case 0: phb_shift = (63 - 19);
- break;
- case 1: phb_shift = (63 - 23);
- break;
- case 2: phb_shift = (63 - 27);
- break;
- case 3: phb_shift = (63 - 35);
- break;
- default:
- BUG_ON(busno_to_phbid(busnum));
- }
-
- target = calgary_reg(bbar, CALGARY_CONFIG_REG);
- val64 = be64_to_cpu(readq(target));
-
- /* zero out this PHB's timer bits */
- mask = ~(0xFUL << phb_shift);
- val64 &= mask;
- val64 |= (timeout << phb_shift);
- writeq(cpu_to_be64(val64), target);
- readq(target); /* flush */
-}
-
-static void __init calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
-{
- unsigned char busnum = dev->bus->number;
- void __iomem *bbar = tbl->bbar;
- void __iomem *target;
- u32 val;
-
- /*
- * CalIOC2 designers recommend setting bit 8 in 0xnDB0 to 1
- */
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_SAVIOR_L2);
- val = cpu_to_be32(readl(target));
- val |= 0x00800000;
- writel(cpu_to_be32(val), target);
-}
-
-static void __init calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
-{
- unsigned char busnum = dev->bus->number;
-
- /*
- * Give split completion a longer timeout on bus 1 for aic94xx
- * http://bugzilla.kernel.org/show_bug.cgi?id=7180
- */
- if (is_calgary(dev->device) && (busnum == 1))
- calgary_set_split_completion_timeout(tbl->bbar, busnum,
- CCR_2SEC_TIMEOUT);
-}
-
-static void __init calgary_enable_translation(struct pci_dev *dev)
-{
- u32 val32;
- unsigned char busnum;
- void __iomem *target;
- void __iomem *bbar;
- struct iommu_table *tbl;
-
- busnum = dev->bus->number;
- tbl = pci_iommu(dev->bus);
- bbar = tbl->bbar;
-
- /* enable TCE in PHB Config Register */
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
- val32 = be32_to_cpu(readl(target));
- val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
-
- printk(KERN_INFO "Calgary: enabling translation on %s PHB %#x\n",
- (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
- "Calgary" : "CalIOC2", busnum);
- printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
- "bus.\n");
-
- writel(cpu_to_be32(val32), target);
- readl(target); /* flush */
-
- timer_setup(&tbl->watchdog_timer, calgary_watchdog, 0);
- mod_timer(&tbl->watchdog_timer, jiffies);
-}
-
-static void __init calgary_disable_translation(struct pci_dev *dev)
-{
- u32 val32;
- unsigned char busnum;
- void __iomem *target;
- void __iomem *bbar;
- struct iommu_table *tbl;
-
- busnum = dev->bus->number;
- tbl = pci_iommu(dev->bus);
- bbar = tbl->bbar;
-
- /* disable TCE in PHB Config Register */
- target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
- val32 = be32_to_cpu(readl(target));
- val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
-
- printk(KERN_INFO "Calgary: disabling translation on PHB %#x!\n", busnum);
- writel(cpu_to_be32(val32), target);
- readl(target); /* flush */
-
- del_timer_sync(&tbl->watchdog_timer);
-}
-
-static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
-{
- pci_dev_get(dev);
- set_pci_iommu(dev->bus, NULL);
-
- /* is the device behind a bridge? */
- if (dev->bus->parent)
- dev->bus->parent->self = dev;
- else
- dev->bus->self = dev;
-}
-
-static int __init calgary_init_one(struct pci_dev *dev)
-{
- void __iomem *bbar;
- struct iommu_table *tbl;
- int ret;
-
- bbar = busno_to_bbar(dev->bus->number);
- ret = calgary_setup_tar(dev, bbar);
- if (ret)
- goto done;
-
- pci_dev_get(dev);
-
- if (dev->bus->parent) {
- if (dev->bus->parent->self)
- printk(KERN_WARNING "Calgary: IEEEE, dev %p has "
- "bus->parent->self!\n", dev);
- dev->bus->parent->self = dev;
- } else
- dev->bus->self = dev;
-
- tbl = pci_iommu(dev->bus);
- tbl->chip_ops->handle_quirks(tbl, dev);
-
- calgary_enable_translation(dev);
-
- return 0;
-
-done:
- return ret;
-}
-
-static int __init calgary_locate_bbars(void)
-{
- int ret;
- int rioidx, phb, bus;
- void __iomem *bbar;
- void __iomem *target;
- unsigned long offset;
- u8 start_bus, end_bus;
- u32 val;
-
- ret = -ENODATA;
- for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
- struct rio_detail *rio = rio_devs[rioidx];
-
- if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
- continue;
-
- /* map entire 1MB of Calgary config space */
- bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
- if (!bbar)
- goto error;
-
- for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
- offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
- target = calgary_reg(bbar, offset);
-
- val = be32_to_cpu(readl(target));
-
- start_bus = (u8)((val & 0x00FF0000) >> 16);
- end_bus = (u8)((val & 0x0000FF00) >> 8);
-
- if (end_bus) {
- for (bus = start_bus; bus <= end_bus; bus++) {
- bus_info[bus].bbar = bbar;
- bus_info[bus].phbid = phb;
- }
- } else {
- bus_info[start_bus].bbar = bbar;
- bus_info[start_bus].phbid = phb;
- }
- }
- }
-
- return 0;
-
-error:
- /* scan bus_info and iounmap any bbars we previously ioremap'd */
- for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
- if (bus_info[bus].bbar)
- iounmap(bus_info[bus].bbar);
-
- return ret;
-}
-
-static int __init calgary_init(void)
-{
- int ret;
- struct pci_dev *dev = NULL;
- struct calgary_bus_info *info;
-
- ret = calgary_locate_bbars();
- if (ret)
- return ret;
-
- /* Purely for kdump kernel case */
- if (is_kdump_kernel())
- get_tce_space_from_tar();
-
- do {
- dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
- if (!dev)
- break;
- if (!is_cal_pci_dev(dev->device))
- continue;
-
- info = &bus_info[dev->bus->number];
- if (info->translation_disabled) {
- calgary_init_one_nontraslated(dev);
- continue;
- }
-
- if (!info->tce_space && !translate_empty_slots)
- continue;
-
- ret = calgary_init_one(dev);
- if (ret)
- goto error;
- } while (1);
-
- dev = NULL;
- for_each_pci_dev(dev) {
- struct iommu_table *tbl;
-
- tbl = find_iommu_table(&dev->dev);
-
- if (translation_enabled(tbl))
- dev->dev.dma_ops = &calgary_dma_ops;
- }
-
- return ret;
-
-error:
- do {
- dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
- if (!dev)
- break;
- if (!is_cal_pci_dev(dev->device))
- continue;
-
- info = &bus_info[dev->bus->number];
- if (info->translation_disabled) {
- pci_dev_put(dev);
- continue;
- }
- if (!info->tce_space && !translate_empty_slots)
- continue;
-
- calgary_disable_translation(dev);
- calgary_free_bus(dev);
- pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
- dev->dev.dma_ops = NULL;
- } while (1);
-
- return ret;
-}
-
-static inline int __init determine_tce_table_size(void)
-{
- int ret;
-
- if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
- return specified_table_size;
-
- if (is_kdump_kernel() && saved_max_pfn) {
- /*
- * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
- * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
- * larger table size has twice as many entries, so shift the
- * max ram address by 13 to divide by 8K and then look at the
- * order of the result to choose between 0-7.
- */
- ret = get_order((saved_max_pfn * PAGE_SIZE) >> 13);
- if (ret > TCE_TABLE_SIZE_8M)
- ret = TCE_TABLE_SIZE_8M;
- } else {
- /*
- * Use 8M by default (suggested by Muli) if it's not
- * kdump kernel and saved_max_pfn isn't set.
- */
- ret = TCE_TABLE_SIZE_8M;
- }
-
- return ret;
-}
-
-static int __init build_detail_arrays(void)
-{
- unsigned long ptr;
- unsigned numnodes, i;
- int scal_detail_size, rio_detail_size;
-
- numnodes = rio_table_hdr->num_scal_dev;
- if (numnodes > MAX_NUMNODES){
- printk(KERN_WARNING
- "Calgary: MAX_NUMNODES too low! Defined as %d, "
- "but system has %d nodes.\n",
- MAX_NUMNODES, numnodes);
- return -ENODEV;
- }
-
- switch (rio_table_hdr->version){
- case 2:
- scal_detail_size = 11;
- rio_detail_size = 13;
- break;
- case 3:
- scal_detail_size = 12;
- rio_detail_size = 15;
- break;
- default:
- printk(KERN_WARNING
- "Calgary: Invalid Rio Grande Table Version: %d\n",
- rio_table_hdr->version);
- return -EPROTO;
- }
-
- ptr = ((unsigned long)rio_table_hdr) + 3;
- for (i = 0; i < numnodes; i++, ptr += scal_detail_size)
- scal_devs[i] = (struct scal_detail *)ptr;
-
- for (i = 0; i < rio_table_hdr->num_rio_dev;
- i++, ptr += rio_detail_size)
- rio_devs[i] = (struct rio_detail *)ptr;
-
- return 0;
-}
-
-static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
-{
- int dev;
- u32 val;
-
- if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
- /*
- * FIXME: properly scan for devices across the
- * PCI-to-PCI bridge on every CalIOC2 port.
- */
- return 1;
- }
-
- for (dev = 1; dev < 8; dev++) {
- val = read_pci_config(bus, dev, 0, 0);
- if (val != 0xffffffff)
- break;
- }
- return (val != 0xffffffff);
-}
-
-/*
- * calgary_init_bitmap_from_tce_table():
- * Function for kdump case. In the second/kdump kernel initialize
- * the bitmap based on the tce table entries obtained from first kernel
- */
-static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
-{
- u64 *tp;
- unsigned int index;
- tp = ((u64 *)tbl->it_base);
- for (index = 0 ; index < tbl->it_size; index++) {
- if (*tp != 0x0)
- set_bit(index, tbl->it_map);
- tp++;
- }
-}
-
-/*
- * get_tce_space_from_tar():
- * Function for kdump case. Get the tce tables from first kernel
- * by reading the contents of the base address register of calgary iommu
- */
-static void __init get_tce_space_from_tar(void)
-{
- int bus;
- void __iomem *target;
- unsigned long tce_space;
-
- for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
- struct calgary_bus_info *info = &bus_info[bus];
- unsigned short pci_device;
- u32 val;
-
- val = read_pci_config(bus, 0, 0, 0);
- pci_device = (val & 0xFFFF0000) >> 16;
-
- if (!is_cal_pci_dev(pci_device))
- continue;
- if (info->translation_disabled)
- continue;
-
- if (calgary_bus_has_devices(bus, pci_device) ||
- translate_empty_slots) {
- target = calgary_reg(bus_info[bus].bbar,
- tar_offset(bus));
- tce_space = be64_to_cpu(readq(target));
- tce_space = tce_space & TAR_SW_BITS;
-
- tce_space = tce_space & (~specified_table_size);
- info->tce_space = (u64 *)__va(tce_space);
- }
- }
- return;
-}
-
-static int __init calgary_iommu_init(void)
-{
- int ret;
-
- /* ok, we're trying to use Calgary - let's roll */
- printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
-
- ret = calgary_init();
- if (ret) {
- printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
- "falling back to no_iommu\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-int __init detect_calgary(void)
-{
- int bus;
- void *tbl;
- int calgary_found = 0;
- unsigned long ptr;
- unsigned int offset, prev_offset;
- int ret;
-
- /*
- * if the user specified iommu=off or iommu=soft or we found
- * another HW IOMMU already, bail out.
- */
- if (no_iommu || iommu_detected)
- return -ENODEV;
-
- if (!use_calgary)
- return -ENODEV;
-
- if (!early_pci_allowed())
- return -ENODEV;
-
- printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n");
-
- ptr = (unsigned long)phys_to_virt(get_bios_ebda());
-
- rio_table_hdr = NULL;
- prev_offset = 0;
- offset = 0x180;
- /*
- * The next offset is stored in the 1st word.
- * Only parse up until the offset increases:
- */
- while (offset > prev_offset) {
- /* The block id is stored in the 2nd word */
- if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
- /* set the pointer past the offset & block id */
- rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
- break;
- }
- prev_offset = offset;
- offset = *((unsigned short *)(ptr + offset));
- }
- if (!rio_table_hdr) {
- printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table "
- "in EBDA - bailing!\n");
- return -ENODEV;
- }
-
- ret = build_detail_arrays();
- if (ret) {
- printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret);
- return -ENOMEM;
- }
-
- specified_table_size = determine_tce_table_size();
-
- for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
- struct calgary_bus_info *info = &bus_info[bus];
- unsigned short pci_device;
- u32 val;
-
- val = read_pci_config(bus, 0, 0, 0);
- pci_device = (val & 0xFFFF0000) >> 16;
-
- if (!is_cal_pci_dev(pci_device))
- continue;
-
- if (info->translation_disabled)
- continue;
-
- if (calgary_bus_has_devices(bus, pci_device) ||
- translate_empty_slots) {
- /*
- * If it is kdump kernel, find and use tce tables
- * from first kernel, else allocate tce tables here
- */
- if (!is_kdump_kernel()) {
- tbl = alloc_tce_table();
- if (!tbl)
- goto cleanup;
- info->tce_space = tbl;
- }
- calgary_found = 1;
- }
- }
-
- printk(KERN_DEBUG "Calgary: finished detection, Calgary %s\n",
- calgary_found ? "found" : "not found");
-
- if (calgary_found) {
- iommu_detected = 1;
- calgary_detected = 1;
- printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected.\n");
- printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
- specified_table_size);
-
- x86_init.iommu.iommu_init = calgary_iommu_init;
- }
- return calgary_found;
-
-cleanup:
- for (--bus; bus >= 0; --bus) {
- struct calgary_bus_info *info = &bus_info[bus];
-
- if (info->tce_space)
- free_tce_table(info->tce_space);
- }
- return -ENOMEM;
-}
-
-static int __init calgary_parse_options(char *p)
-{
- unsigned int bridge;
- unsigned long val;
- size_t len;
- ssize_t ret;
-
- while (*p) {
- if (!strncmp(p, "64k", 3))
- specified_table_size = TCE_TABLE_SIZE_64K;
- else if (!strncmp(p, "128k", 4))
- specified_table_size = TCE_TABLE_SIZE_128K;
- else if (!strncmp(p, "256k", 4))
- specified_table_size = TCE_TABLE_SIZE_256K;
- else if (!strncmp(p, "512k", 4))
- specified_table_size = TCE_TABLE_SIZE_512K;
- else if (!strncmp(p, "1M", 2))
- specified_table_size = TCE_TABLE_SIZE_1M;
- else if (!strncmp(p, "2M", 2))
- specified_table_size = TCE_TABLE_SIZE_2M;
- else if (!strncmp(p, "4M", 2))
- specified_table_size = TCE_TABLE_SIZE_4M;
- else if (!strncmp(p, "8M", 2))
- specified_table_size = TCE_TABLE_SIZE_8M;
-
- len = strlen("translate_empty_slots");
- if (!strncmp(p, "translate_empty_slots", len))
- translate_empty_slots = 1;
-
- len = strlen("disable");
- if (!strncmp(p, "disable", len)) {
- p += len;
- if (*p == '=')
- ++p;
- if (*p == '\0')
- break;
- ret = kstrtoul(p, 0, &val);
- if (ret)
- break;
-
- bridge = val;
- if (bridge < MAX_PHB_BUS_NUM) {
- printk(KERN_INFO "Calgary: disabling "
- "translation for PHB %#x\n", bridge);
- bus_info[bridge].translation_disabled = 1;
- }
- }
-
- p = strpbrk(p, ",");
- if (!p)
- break;
-
- p++; /* skip ',' */
- }
- return 1;
-}
-__setup("calgary=", calgary_parse_options);
-
-static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
-{
- struct iommu_table *tbl;
- unsigned int npages;
- int i;
-
- tbl = pci_iommu(dev->bus);
-
- for (i = 0; i < 4; i++) {
- struct resource *r = &dev->resource[PCI_BRIDGE_RESOURCES + i];
-
- /* Don't give out TCEs that map MEM resources */
- if (!(r->flags & IORESOURCE_MEM))
- continue;
-
- /* 0-based? we reserve the whole 1st MB anyway */
- if (!r->start)
- continue;
-
- /* cover the whole region */
- npages = resource_size(r) >> PAGE_SHIFT;
- npages++;
-
- iommu_range_reserve(tbl, r->start, npages);
- }
-}
-
-static int __init calgary_fixup_tce_spaces(void)
-{
- struct pci_dev *dev = NULL;
- struct calgary_bus_info *info;
-
- if (no_iommu || swiotlb || !calgary_detected)
- return -ENODEV;
-
- printk(KERN_DEBUG "Calgary: fixing up tce spaces\n");
-
- do {
- dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
- if (!dev)
- break;
- if (!is_cal_pci_dev(dev->device))
- continue;
-
- info = &bus_info[dev->bus->number];
- if (info->translation_disabled)
- continue;
-
- if (!info->tce_space)
- continue;
-
- calgary_fixup_one_tce_space(dev);
-
- } while (1);
-
- return 0;
-}
-
-/*
- * We need to be call after pcibios_assign_resources (fs_initcall level)
- * and before device_initcall.
- */
-rootfs_initcall(calgary_fixup_tce_spaces);
-
-IOMMU_INIT_POST(detect_calgary);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index fa4352dce491..57de2ebff7e2 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -12,7 +12,6 @@
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/gart.h>
-#include <asm/calgary.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
@@ -112,11 +111,6 @@ static __init int iommu_setup(char *p)
gart_parse_options(p);
-#ifdef CONFIG_CALGARY_IOMMU
- if (!strncmp(p, "calgary", 7))
- use_calgary = 1;
-#endif /* CONFIG_CALGARY_IOMMU */
-
p += strcspn(p, ",");
if (*p == ',')
++p;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5e94c4354d4e..bd2a11ca5dd6 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -41,6 +41,7 @@
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/spec-ctrl.h>
+#include <asm/io_bitmap.h>
#include <asm/proto.h>
#include "process.h"
@@ -72,18 +73,9 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
.ss1 = __KERNEL_CS,
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
#endif
+ .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
},
-#ifdef CONFIG_X86_32
- /*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
- .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
-#endif
};
EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
@@ -110,28 +102,89 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
void exit_thread(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
- unsigned long *bp = t->io_bitmap_ptr;
struct fpu *fpu = &t->fpu;
- if (bp) {
- struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
-
- t->io_bitmap_ptr = NULL;
- clear_thread_flag(TIF_IO_BITMAP);
- /*
- * Careful, clear this in the TSS too:
- */
- memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
- t->io_bitmap_max = 0;
- put_cpu();
- kfree(bp);
- }
+ if (test_thread_flag(TIF_IO_BITMAP))
+ io_bitmap_exit();
free_vm86(t);
fpu__drop(fpu);
}
+static int set_new_tls(struct task_struct *p, unsigned long tls)
+{
+ struct user_desc __user *utls = (struct user_desc __user *)tls;
+
+ if (in_ia32_syscall())
+ return do_set_thread_area(p, -1, utls, 0);
+ else
+ return do_set_thread_area_64(p, ARCH_SET_FS, tls);
+}
+
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
+{
+ struct inactive_task_frame *frame;
+ struct fork_frame *fork_frame;
+ struct pt_regs *childregs;
+ int ret = 0;
+
+ childregs = task_pt_regs(p);
+ fork_frame = container_of(childregs, struct fork_frame, regs);
+ frame = &fork_frame->frame;
+
+ frame->bp = 0;
+ frame->ret_addr = (unsigned long) ret_from_fork;
+ p->thread.sp = (unsigned long) fork_frame;
+ p->thread.io_bitmap = NULL;
+ memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+
+#ifdef CONFIG_X86_64
+ savesegment(gs, p->thread.gsindex);
+ p->thread.gsbase = p->thread.gsindex ? 0 : current->thread.gsbase;
+ savesegment(fs, p->thread.fsindex);
+ p->thread.fsbase = p->thread.fsindex ? 0 : current->thread.fsbase;
+ savesegment(es, p->thread.es);
+ savesegment(ds, p->thread.ds);
+#else
+ p->thread.sp0 = (unsigned long) (childregs + 1);
+ /*
+ * Clear all status flags including IF and set fixed bit. 64bit
+ * does not have this initialization as the frame does not contain
+ * flags. The flags consistency (especially vs. AC) is there
+ * ensured via objtool, which lacks 32bit support.
+ */
+ frame->flags = X86_EFLAGS_FIXED;
+#endif
+
+ /* Kernel thread ? */
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ kthread_frame_init(frame, sp, arg);
+ return 0;
+ }
+
+ frame->bx = 0;
+ *childregs = *current_pt_regs();
+ childregs->ax = 0;
+ if (sp)
+ childregs->sp = sp;
+
+#ifdef CONFIG_X86_32
+ task_user_gs(p) = get_user_gs(current_pt_regs());
+#endif
+
+ /* Set a new TLS for the child thread? */
+ if (clone_flags & CLONE_SETTLS)
+ ret = set_new_tls(p, tls);
+
+ if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
+ io_bitmap_share(p);
+
+ return ret;
+}
+
void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -269,31 +322,96 @@ void arch_setup_new_exec(void)
}
}
-static inline void switch_to_bitmap(struct thread_struct *prev,
- struct thread_struct *next,
- unsigned long tifp, unsigned long tifn)
+#ifdef CONFIG_X86_IOPL_IOPERM
+static inline void tss_invalidate_io_bitmap(struct tss_struct *tss)
{
- struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ /*
+ * Invalidate the I/O bitmap by moving io_bitmap_base outside the
+ * TSS limit so any subsequent I/O access from user space will
+ * trigger a #GP.
+ *
+ * This is correct even when VMEXIT rewrites the TSS limit
+ * to 0x67 as the only requirement is that the base points
+ * outside the limit.
+ */
+ tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
+}
- if (tifn & _TIF_IO_BITMAP) {
- /*
- * Copy the relevant range of the IO bitmap.
- * Normally this is 128 bytes or less:
- */
- memcpy(tss->io_bitmap, next->io_bitmap_ptr,
- max(prev->io_bitmap_max, next->io_bitmap_max));
+static inline void switch_to_bitmap(unsigned long tifp)
+{
+ /*
+ * Invalidate I/O bitmap if the previous task used it. This prevents
+ * any possible leakage of an active I/O bitmap.
+ *
+ * If the next task has an I/O bitmap it will handle it on exit to
+ * user mode.
+ */
+ if (tifp & _TIF_IO_BITMAP)
+ tss_invalidate_io_bitmap(this_cpu_ptr(&cpu_tss_rw));
+}
+
+static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
+{
+ /*
+ * Copy at least the byte range of the incoming tasks bitmap which
+ * covers the permitted I/O ports.
+ *
+ * If the previous task which used an I/O bitmap had more bits
+ * permitted, then the copy needs to cover those as well so they
+ * get turned off.
+ */
+ memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
+ max(tss->io_bitmap.prev_max, iobm->max));
+
+ /*
+ * Store the new max and the sequence number of this bitmap
+ * and a pointer to the bitmap itself.
+ */
+ tss->io_bitmap.prev_max = iobm->max;
+ tss->io_bitmap.prev_sequence = iobm->sequence;
+}
+
+/**
+ * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
+ */
+void tss_update_io_bitmap(void)
+{
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ u16 *base = &tss->x86_tss.io_bitmap_base;
+
+ if (test_thread_flag(TIF_IO_BITMAP)) {
+ struct thread_struct *t = &current->thread;
+
+ if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
+ *base = IO_BITMAP_OFFSET_VALID_ALL;
+ } else {
+ struct io_bitmap *iobm = t->io_bitmap;
+ /*
+ * Only copy bitmap data when the sequence number
+ * differs. The update time is accounted to the
+ * incoming task.
+ */
+ if (tss->io_bitmap.prev_sequence != iobm->sequence)
+ tss_copy_io_bitmap(tss, iobm);
+
+ /* Enable the bitmap */
+ *base = IO_BITMAP_OFFSET_VALID_MAP;
+ }
/*
- * Make sure that the TSS limit is correct for the CPU
- * to notice the IO bitmap.
+ * Make sure that the TSS limit is covering the io bitmap.
+ * It might have been cut down by a VMEXIT to 0x67 which
+ * would cause a subsequent I/O access from user space to
+ * trigger a #GP because tbe bitmap is outside the TSS
+ * limit.
*/
refresh_tss_limit();
- } else if (tifp & _TIF_IO_BITMAP) {
- /*
- * Clear any possible leftover bits:
- */
- memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
+ } else {
+ tss_invalidate_io_bitmap(tss);
}
}
+#else /* CONFIG_X86_IOPL_IOPERM */
+static inline void switch_to_bitmap(unsigned long tifp) { }
+#endif
#ifdef CONFIG_SMP
@@ -505,7 +623,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
tifn = READ_ONCE(task_thread_info(next_p)->flags);
tifp = READ_ONCE(task_thread_info(prev_p)->flags);
- switch_to_bitmap(prev, next, tifp, tifn);
+
+ switch_to_bitmap(tifp);
propagate_user_return_notify(prev_p, next_p);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index b8ceec4974fe..323499f48858 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -112,74 +112,6 @@ void release_thread(struct task_struct *dead_task)
release_vm86_irqs(dead_task);
}
-int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p, unsigned long tls)
-{
- struct pt_regs *childregs = task_pt_regs(p);
- struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs);
- struct inactive_task_frame *frame = &fork_frame->frame;
- struct task_struct *tsk;
- int err;
-
- /*
- * For a new task use the RESET flags value since there is no before.
- * All the status flags are zero; DF and all the system flags must also
- * be 0, specifically IF must be 0 because we context switch to the new
- * task with interrupts disabled.
- */
- frame->flags = X86_EFLAGS_FIXED;
- frame->bp = 0;
- frame->ret_addr = (unsigned long) ret_from_fork;
- p->thread.sp = (unsigned long) fork_frame;
- p->thread.sp0 = (unsigned long) (childregs+1);
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- /* kernel thread */
- memset(childregs, 0, sizeof(struct pt_regs));
- frame->bx = sp; /* function */
- frame->di = arg;
- p->thread.io_bitmap_ptr = NULL;
- return 0;
- }
- frame->bx = 0;
- *childregs = *current_pt_regs();
- childregs->ax = 0;
- if (sp)
- childregs->sp = sp;
-
- task_user_gs(p) = get_user_gs(current_pt_regs());
-
- p->thread.io_bitmap_ptr = NULL;
- tsk = current;
- err = -ENOMEM;
-
- if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
- p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
- IO_BITMAP_BYTES, GFP_KERNEL);
- if (!p->thread.io_bitmap_ptr) {
- p->thread.io_bitmap_max = 0;
- return -ENOMEM;
- }
- set_tsk_thread_flag(p, TIF_IO_BITMAP);
- }
-
- err = 0;
-
- /*
- * Set a new TLS for the child thread?
- */
- if (clone_flags & CLONE_SETTLS)
- err = do_set_thread_area(p, -1,
- (struct user_desc __user *)tls, 0);
-
- if (err && p->thread.io_bitmap_ptr) {
- kfree(p->thread.io_bitmap_ptr);
- p->thread.io_bitmap_max = 0;
- }
- return err;
-}
-
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
@@ -255,15 +187,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
load_TLS(next, cpu);
- /*
- * Restore IOPL if needed. In normal use, the flags restore
- * in the switch assembly will handle this. But if the kernel
- * is running virtualized at a non-zero CPL, the popf will
- * not restore flags, so it must be done in a separate step.
- */
- if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
- set_iopl_mask(next->iopl);
-
switch_to_extra(prev_p, next_p);
/*
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index af64519b2695..506d66830d4d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -371,81 +371,6 @@ void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
task->thread.gsbase = gsbase;
}
-int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p, unsigned long tls)
-{
- int err;
- struct pt_regs *childregs;
- struct fork_frame *fork_frame;
- struct inactive_task_frame *frame;
- struct task_struct *me = current;
-
- childregs = task_pt_regs(p);
- fork_frame = container_of(childregs, struct fork_frame, regs);
- frame = &fork_frame->frame;
-
- frame->bp = 0;
- frame->ret_addr = (unsigned long) ret_from_fork;
- p->thread.sp = (unsigned long) fork_frame;
- p->thread.io_bitmap_ptr = NULL;
-
- savesegment(gs, p->thread.gsindex);
- p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
- savesegment(fs, p->thread.fsindex);
- p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
- savesegment(es, p->thread.es);
- savesegment(ds, p->thread.ds);
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- /* kernel thread */
- memset(childregs, 0, sizeof(struct pt_regs));
- frame->bx = sp; /* function */
- frame->r12 = arg;
- return 0;
- }
- frame->bx = 0;
- *childregs = *current_pt_regs();
-
- childregs->ax = 0;
- if (sp)
- childregs->sp = sp;
-
- err = -ENOMEM;
- if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
- p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
- IO_BITMAP_BYTES, GFP_KERNEL);
- if (!p->thread.io_bitmap_ptr) {
- p->thread.io_bitmap_max = 0;
- return -ENOMEM;
- }
- set_tsk_thread_flag(p, TIF_IO_BITMAP);
- }
-
- /*
- * Set a new TLS for the child thread?
- */
- if (clone_flags & CLONE_SETTLS) {
-#ifdef CONFIG_IA32_EMULATION
- if (in_ia32_syscall())
- err = do_set_thread_area(p, -1,
- (struct user_desc __user *)tls, 0);
- else
-#endif
- err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
- if (err)
- goto out;
- }
- err = 0;
-out:
- if (err && p->thread.io_bitmap_ptr) {
- kfree(p->thread.io_bitmap_ptr);
- p->thread.io_bitmap_max = 0;
- }
-
- return err;
-}
-
static void
start_thread_common(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp,
@@ -572,17 +497,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_to_extra(prev_p, next_p);
-#ifdef CONFIG_XEN_PV
- /*
- * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
- * current_pt_regs()->flags may not match the current task's
- * intended IOPL. We need to switch it manually.
- */
- if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
- prev->iopl != next->iopl))
- xen_set_iopl_mask(next->iopl);
-#endif
-
if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
/*
* AMD CPUs have a misfeature: SYSRET sets the SS selector but
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 3c5bbe8e4120..066e5b01a7e0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -42,6 +42,7 @@
#include <asm/traps.h>
#include <asm/syscall.h>
#include <asm/fsgsbase.h>
+#include <asm/io_bitmap.h>
#include "tls.h"
@@ -697,7 +698,9 @@ static int ptrace_set_debugreg(struct task_struct *tsk, int n,
static int ioperm_active(struct task_struct *target,
const struct user_regset *regset)
{
- return target->thread.io_bitmap_max / regset->size;
+ struct io_bitmap *iobm = target->thread.io_bitmap;
+
+ return iobm ? DIV_ROUND_UP(iobm->max, regset->size) : 0;
}
static int ioperm_get(struct task_struct *target,
@@ -705,12 +708,13 @@ static int ioperm_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- if (!target->thread.io_bitmap_ptr)
+ struct io_bitmap *iobm = target->thread.io_bitmap;
+
+ if (!iobm)
return -ENXIO;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- target->thread.io_bitmap_ptr,
- 0, IO_BITMAP_BYTES);
+ iobm->bitmap, 0, IO_BITMAP_BYTES);
}
/*
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index ee26df08002e..94b33885f8d2 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -35,8 +35,7 @@
#define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
.text
- .globl relocate_kernel
-relocate_kernel:
+SYM_CODE_START_NOALIGN(relocate_kernel)
/* Save the CPU context, used for jumping back */
pushl %ebx
@@ -93,8 +92,9 @@ relocate_kernel:
addl $(identity_mapped - relocate_kernel), %eax
pushl %eax
ret
+SYM_CODE_END(relocate_kernel)
-identity_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
/* set return address to 0 if not preserving context */
pushl $0
/* store the start address on the stack */
@@ -191,8 +191,9 @@ identity_mapped:
addl $(virtual_mapped - relocate_kernel), %eax
pushl %eax
ret
+SYM_CODE_END(identity_mapped)
-virtual_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
movl CR4(%edi), %eax
movl %eax, %cr4
movl CR3(%edi), %eax
@@ -208,9 +209,10 @@ virtual_mapped:
popl %esi
popl %ebx
ret
+SYM_CODE_END(virtual_mapped)
/* Do the copies */
-swap_pages:
+SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movl 8(%esp), %edx
movl 4(%esp), %ecx
pushl %ebp
@@ -270,6 +272,7 @@ swap_pages:
popl %ebx
popl %ebp
ret
+SYM_CODE_END(swap_pages)
.globl kexec_control_code_size
.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index c51ccff5cd01..ef3ba99068d3 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -38,8 +38,7 @@
.text
.align PAGE_SIZE
.code64
- .globl relocate_kernel
-relocate_kernel:
+SYM_CODE_START_NOALIGN(relocate_kernel)
/*
* %rdi indirection_page
* %rsi page_list
@@ -103,8 +102,9 @@ relocate_kernel:
addq $(identity_mapped - relocate_kernel), %r8
pushq %r8
ret
+SYM_CODE_END(relocate_kernel)
-identity_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
/* set return address to 0 if not preserving context */
pushq $0
/* store the start address on the stack */
@@ -209,8 +209,9 @@ identity_mapped:
movq $virtual_mapped, %rax
pushq %rax
ret
+SYM_CODE_END(identity_mapped)
-virtual_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
movq RSP(%r8), %rsp
movq CR4(%r8), %rax
movq %rax, %cr4
@@ -228,9 +229,10 @@ virtual_mapped:
popq %rbp
popq %rbx
ret
+SYM_CODE_END(virtual_mapped)
/* Do the copies */
-swap_pages:
+SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movq %rdi, %rcx /* Put the page_list in %rcx */
xorl %edi, %edi
xorl %esi, %esi
@@ -283,6 +285,7 @@ swap_pages:
jmp 0b
3:
ret
+SYM_CODE_END(swap_pages)
.globl kexec_control_code_size
.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 77ea96b794bd..cedfe2077a69 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -143,6 +143,13 @@ struct boot_params boot_params;
/*
* Machine setup..
*/
+static struct resource rodata_resource = {
+ .name = "Kernel rodata",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
static struct resource data_resource = {
.name = "Kernel data",
.start = 0,
@@ -438,6 +445,12 @@ static void __init memblock_x86_reserve_range_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
memblock_reserve(pa_data, sizeof(*data) + data->len);
+
+ if (data->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
+ memblock_reserve(((struct setup_indirect *)data->data)->addr,
+ ((struct setup_indirect *)data->data)->len);
+
pa_data = data->next;
early_memunmap(data, sizeof(*data));
}
@@ -459,7 +472,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
* due to mapping restrictions.
*
* On 64bit, kdump kernel need be restricted to be under 64TB, which is
- * the upper limit of system RAM in 4-level paing mode. Since the kdump
+ * the upper limit of system RAM in 4-level paging mode. Since the kdump
* jumping could be from 5-level to 4-level, the jumping will fail if
* kernel is put above 64TB, and there's no way to detect the paging mode
* of the kernel which will be loaded for dumping during the 1st kernel
@@ -743,8 +756,8 @@ static void __init trim_bios_range(void)
e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
/*
- * special case: Some BIOSen report the PC BIOS
- * area (640->1Mb) as ram even though it is not.
+ * special case: Some BIOSes report the PC BIOS
+ * area (640Kb -> 1Mb) as RAM even though it is not.
* take them out.
*/
e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
@@ -951,7 +964,9 @@ void __init setup_arch(char **cmdline_p)
code_resource.start = __pa_symbol(_text);
code_resource.end = __pa_symbol(_etext)-1;
- data_resource.start = __pa_symbol(_etext);
+ rodata_resource.start = __pa_symbol(__start_rodata);
+ rodata_resource.end = __pa_symbol(__end_rodata)-1;
+ data_resource.start = __pa_symbol(_sdata);
data_resource.end = __pa_symbol(_edata)-1;
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
@@ -1040,6 +1055,7 @@ void __init setup_arch(char **cmdline_p)
/* after parse_early_param, so could debug it */
insert_resource(&iomem_resource, &code_resource);
+ insert_resource(&iomem_resource, &rodata_resource);
insert_resource(&iomem_resource, &data_resource);
insert_resource(&iomem_resource, &bss_resource);
@@ -1122,17 +1138,15 @@ void __init setup_arch(char **cmdline_p)
reserve_bios_regions();
- if (efi_enabled(EFI_MEMMAP)) {
- efi_fake_memmap();
- efi_find_mirror();
- efi_esrt_init();
+ efi_fake_memmap();
+ efi_find_mirror();
+ efi_esrt_init();
- /*
- * The EFI specification says that boot service code won't be
- * called after ExitBootServices(). This is, in fact, a lie.
- */
- efi_reserve_boot_services();
- }
+ /*
+ * The EFI specification says that boot service code won't be
+ * called after ExitBootServices(). This is, in fact, a lie.
+ */
+ efi_reserve_boot_services();
/* preallocate 4k for mptable mpc */
e820__memblock_alloc_reserved_mpc_new();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 86663874ef04..e6d7894ad127 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -207,8 +207,8 @@ void __init setup_per_cpu_areas(void)
pcpu_cpu_distance,
pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
- pr_warning("%s allocator failed (%d), falling back to page size\n",
- pcpu_fc_names[pcpu_chosen_fc], rc);
+ pr_warn("%s allocator failed (%d), falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index a49fe1dcb47e..4c61f0713832 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -57,7 +57,7 @@ void __init tboot_probe(void)
*/
if (!e820__mapped_any(boot_params.tboot_addr,
boot_params.tboot_addr, E820_TYPE_RESERVED)) {
- pr_warning("non-0 tboot_addr but it is not of type E820_TYPE_RESERVED\n");
+ pr_warn("non-0 tboot_addr but it is not of type E820_TYPE_RESERVED\n");
return;
}
@@ -65,13 +65,12 @@ void __init tboot_probe(void)
set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
- pr_warning("tboot at 0x%llx is invalid\n",
- boot_params.tboot_addr);
+ pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr);
tboot = NULL;
return;
}
if (tboot->version < 5) {
- pr_warning("tboot version is invalid: %u\n", tboot->version);
+ pr_warn("tboot version is invalid: %u\n", tboot->version);
tboot = NULL;
return;
}
@@ -289,7 +288,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
if (sleep_state >= ACPI_S_STATE_COUNT ||
acpi_shutdown_map[sleep_state] == -1) {
- pr_warning("unsupported sleep state 0x%x\n", sleep_state);
+ pr_warn("unsupported sleep state 0x%x\n", sleep_state);
return -1;
}
@@ -302,7 +301,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
if (!tboot_enabled())
return 0;
- pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
+ pr_warn("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
return -ENODEV;
}
@@ -320,7 +319,7 @@ static int tboot_wait_for_aps(int num_aps)
}
if (timeout)
- pr_warning("tboot wait for APs timeout\n");
+ pr_warn("tboot wait for APs timeout\n");
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
}
@@ -516,7 +515,7 @@ int tboot_force_iommu(void)
return 1;
if (no_iommu || swiotlb || dmar_disabled)
- pr_warning("Forcing Intel-IOMMU to enabled\n");
+ pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
#ifdef CONFIG_SWIOTLB
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
deleted file mode 100644
index 6384be751eff..000000000000
--- a/arch/x86/kernel/tce_64.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * This file manages the translation entries for the IBM Calgary IOMMU.
- *
- * Derived from arch/powerpc/platforms/pseries/iommu.c
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Jon Mason <jdmason@us.ibm.com>
- * Author: Muli Ben-Yehuda <muli@il.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/memblock.h>
-#include <asm/tce.h>
-#include <asm/calgary.h>
-#include <asm/proto.h>
-#include <asm/cacheflush.h>
-
-/* flush a tce at 'tceaddr' to main memory */
-static inline void flush_tce(void* tceaddr)
-{
- /* a single tce can't cross a cache line */
- if (boot_cpu_has(X86_FEATURE_CLFLUSH))
- clflush(tceaddr);
- else
- wbinvd();
-}
-
-void tce_build(struct iommu_table *tbl, unsigned long index,
- unsigned int npages, unsigned long uaddr, int direction)
-{
- u64* tp;
- u64 t;
- u64 rpn;
-
- t = (1 << TCE_READ_SHIFT);
- if (direction != DMA_TO_DEVICE)
- t |= (1 << TCE_WRITE_SHIFT);
-
- tp = ((u64*)tbl->it_base) + index;
-
- while (npages--) {
- rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
- t &= ~TCE_RPN_MASK;
- t |= (rpn << TCE_RPN_SHIFT);
-
- *tp = cpu_to_be64(t);
- flush_tce(tp);
-
- uaddr += PAGE_SIZE;
- tp++;
- }
-}
-
-void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
-{
- u64* tp;
-
- tp = ((u64*)tbl->it_base) + index;
-
- while (npages--) {
- *tp = cpu_to_be64(0);
- flush_tce(tp);
- tp++;
- }
-}
-
-static inline unsigned int table_size_to_number_of_entries(unsigned char size)
-{
- /*
- * size is the order of the table, 0-7
- * smallest table is 8K entries, so shift result by 13 to
- * multiply by 8K
- */
- return (1 << size) << 13;
-}
-
-static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
-{
- unsigned int bitmapsz;
- unsigned long bmppages;
- int ret;
-
- tbl->it_busno = dev->bus->number;
-
- /* set the tce table size - measured in entries */
- tbl->it_size = table_size_to_number_of_entries(specified_table_size);
-
- /*
- * number of bytes needed for the bitmap size in number of
- * entries; we need one bit per entry
- */
- bitmapsz = tbl->it_size / BITS_PER_BYTE;
- bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
- if (!bmppages) {
- printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
- ret = -ENOMEM;
- goto done;
- }
-
- tbl->it_map = (unsigned long*)bmppages;
-
- memset(tbl->it_map, 0, bitmapsz);
-
- tbl->it_hint = 0;
-
- spin_lock_init(&tbl->it_lock);
-
- return 0;
-
-done:
- return ret;
-}
-
-int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
-{
- struct iommu_table *tbl;
- int ret;
-
- if (pci_iommu(dev->bus)) {
- printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
- dev, pci_iommu(dev->bus));
- BUG();
- }
-
- tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
- if (!tbl) {
- printk(KERN_ERR "Calgary: error allocating iommu_table\n");
- ret = -ENOMEM;
- goto done;
- }
-
- ret = tce_table_setparms(dev, tbl);
- if (ret)
- goto free_tbl;
-
- tbl->bbar = bbar;
-
- set_pci_iommu(dev->bus, tbl);
-
- return 0;
-
-free_tbl:
- kfree(tbl);
-done:
- return ret;
-}
-
-void * __init alloc_tce_table(void)
-{
- unsigned int size;
-
- size = table_size_to_number_of_entries(specified_table_size);
- size *= TCE_ENTRY_SIZE;
-
- return memblock_alloc_low(size, size);
-}
-
-void __init free_tce_table(void *tbl)
-{
- unsigned int size;
-
- if (!tbl)
- return;
-
- size = table_size_to_number_of_entries(specified_table_size);
- size *= TCE_ENTRY_SIZE;
-
- memblock_free(__pa(tbl), size);
-}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4bb0f8447112..c90312146da0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -37,11 +37,6 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/io.h>
-
-#if defined(CONFIG_EDAC)
-#include <linux/edac.h>
-#endif
-
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index ec534f978867..b8acf639abd1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -364,12 +364,12 @@ retry:
/* Force it to 0 if random warps brought us here */
atomic_set(&test_runs, 0);
- pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+ pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
- pr_warning("Measured %Ld cycles TSC warp between CPUs, "
- "turning off TSC clock.\n", max_warp);
+ pr_warn("Measured %Ld cycles TSC warp between CPUs, "
+ "turning off TSC clock.\n", max_warp);
if (random_warps)
- pr_warning("TSC warped randomly between CPUs\n");
+ pr_warn("TSC warped randomly between CPUs\n");
mark_tsc_unstable("check_tsc_sync_source failed");
}
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 548fefed71ee..4d732a444711 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -1,6 +1,6 @@
/*
- * umip.c Emulation for instruction protected by the Intel User-Mode
- * Instruction Prevention feature
+ * umip.c Emulation for instruction protected by the User-Mode Instruction
+ * Prevention feature
*
* Copyright (c) 2017, Intel Corporation.
* Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
@@ -18,10 +18,10 @@
/** DOC: Emulation for User-Mode Instruction Prevention (UMIP)
*
- * The feature User-Mode Instruction Prevention present in recent Intel
- * processor prevents a group of instructions (SGDT, SIDT, SLDT, SMSW and STR)
- * from being executed with CPL > 0. Otherwise, a general protection fault is
- * issued.
+ * User-Mode Instruction Prevention is a security feature present in recent
+ * x86 processors that, when enabled, prevents a group of instructions (SGDT,
+ * SIDT, SLDT, SMSW and STR) from being run in user mode by issuing a general
+ * protection fault if the instruction is executed with CPL > 0.
*
* Rather than relaying to the user space the general protection fault caused by
* the UMIP-protected instructions (in the form of a SIGSEGV signal), it can be
@@ -91,7 +91,7 @@ const char * const umip_insns[5] = {
#define umip_pr_err(regs, fmt, ...) \
umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__)
-#define umip_pr_warning(regs, fmt, ...) \
+#define umip_pr_warn(regs, fmt, ...) \
umip_printk(regs, KERN_WARNING, fmt, ##__VA_ARGS__)
/**
@@ -380,14 +380,14 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (umip_inst < 0)
return false;
- umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
+ umip_pr_warn(regs, "%s instruction cannot be used by applications.\n",
umip_insns[umip_inst]);
/* Do not emulate (spoof) SLDT or STR. */
if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT)
return false;
- umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
+ umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n");
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size,
user_64bit_mode(regs)))
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 8cd745ef8c7b..15e5aad8ac2c 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -842,8 +842,8 @@ static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
/**
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @auprobe: the probepoint information.
* @mm: the probed address space.
- * @arch_uprobe: the probepoint information.
* @addr: virtual address at which to install the probepoint
* Return 0 on success or a -ve number on error.
*/
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index a024c4f7ba56..641f0fe1e5b4 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -31,7 +31,7 @@
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
-ENTRY(verify_cpu)
+SYM_FUNC_START_LOCAL(verify_cpu)
pushf # Save caller passed flags
push $0 # Kill any dangerous flags
popf
@@ -137,4 +137,4 @@ ENTRY(verify_cpu)
popf # Restore caller passed flags
xorl %eax, %eax
ret
-ENDPROC(verify_cpu)
+SYM_FUNC_END(verify_cpu)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index e2feacf921a0..3a1a819da137 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -21,6 +21,9 @@
#define LOAD_OFFSET __START_KERNEL_map
#endif
+#define EMITS_PT_NOTE
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@@ -141,17 +144,12 @@ SECTIONS
*(.text.__x86.indirect_thunk)
__indirect_thunk_end = .;
#endif
+ } :text =0xcccc
- /* End of text section */
- _etext = .;
- } :text = 0x9090
-
- NOTES :text :note
-
- EXCEPTION_TABLE(16) :text = 0x9090
-
- /* .text should occupy whole number of pages */
+ /* End of text section, which should occupy whole number of pages */
+ _etext = .;
. = ALIGN(PAGE_SIZE);
+
X86_ALIGN_RODATA_BEGIN
RO_DATA(PAGE_SIZE)
X86_ALIGN_RODATA_END
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 18a799c8fa28..ce89430a7f80 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -31,6 +31,28 @@ static int __init iommu_init_noop(void) { return 0; }
static void iommu_shutdown_noop(void) { }
bool __init bool_x86_init_noop(void) { return false; }
void x86_op_int_noop(int cpu) { }
+static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
+static __init void get_rtc_noop(struct timespec64 *now) { }
+
+static __initconst const struct of_device_id of_cmos_match[] = {
+ { .compatible = "motorola,mc146818" },
+ {}
+};
+
+/*
+ * Allow devicetree configured systems to disable the RTC by setting the
+ * corresponding DT node's status property to disabled. Code is optimized
+ * out for CONFIG_OF=n builds.
+ */
+static __init void x86_wallclock_init(void)
+{
+ struct device_node *node = of_find_matching_node(NULL, of_cmos_match);
+
+ if (node && !of_device_is_available(node)) {
+ x86_platform.get_wallclock = get_rtc_noop;
+ x86_platform.set_wallclock = set_rtc_noop;
+ }
+}
/*
* The platform setup functions are preset with the default functions
@@ -73,7 +95,7 @@ struct x86_init_ops x86_init __initdata = {
.timers = {
.setup_percpu_clockev = setup_boot_APIC_clock,
.timer_init = hpet_time_init,
- .wallclock_init = x86_init_noop,
+ .wallclock_init = x86_wallclock_init,
},
.iommu = {
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 31ecf7a76d5a..b19ef421084d 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -8,9 +8,9 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
-kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
+kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- hyperv.o page_track.o debugfs.o
+ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o
kvm-amd-y += svm.o pmu_amd.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index f68c0c753c38..c0aa07487eb8 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -816,8 +816,6 @@ static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
return __do_cpuid_func(entry, func, nent, maxnent);
}
-#undef F
-
struct kvm_cpuid_param {
u32 func;
bool (*qualifier)(const struct kvm_cpuid_param *param);
@@ -1015,6 +1013,12 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
*ebx = entry->ebx;
*ecx = entry->ecx;
*edx = entry->edx;
+ if (function == 7 && index == 0) {
+ u64 data;
+ if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
+ (data & TSX_CTRL_CPUID_CLEAR))
+ *ebx &= ~(F(RTM) | F(HLE));
+ }
} else {
*eax = *ebx = *ecx = *edx = 0;
/*
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 698efb8c3897..952d1a4f4d7e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2770,11 +2770,10 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
- setup_syscalls_segments(ctxt, &cs, &ss);
-
if (!(efer & EFER_SCE))
return emulate_ud(ctxt);
+ setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
@@ -2838,12 +2837,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
- setup_syscalls_segments(ctxt, &cs, &ss);
-
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
+ setup_syscalls_segments(ctxt, &cs, &ss);
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index d859ae8890d0..9fd2dd89a1c5 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -271,8 +271,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
bool mask_before, mask_after;
- int old_remote_irr, old_delivery_status;
union kvm_ioapic_redirect_entry *e;
+ unsigned long vcpu_bitmap;
+ int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
@@ -296,6 +297,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
/* Preserve read-only fields */
old_remote_irr = e->fields.remote_irr;
old_delivery_status = e->fields.delivery_status;
+ old_dest_id = e->fields.dest_id;
+ old_dest_mode = e->fields.dest_mode;
if (ioapic->ioregsel & 1) {
e->bits &= 0xffffffff;
e->bits |= (u64) val << 32;
@@ -321,7 +324,34 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
&& ioapic->irr & (1 << index))
ioapic_service(ioapic, index, false);
- kvm_make_scan_ioapic_request(ioapic->kvm);
+ if (e->fields.delivery_mode == APIC_DM_FIXED) {
+ struct kvm_lapic_irq irq;
+
+ irq.shorthand = 0;
+ irq.vector = e->fields.vector;
+ irq.delivery_mode = e->fields.delivery_mode << 8;
+ irq.dest_id = e->fields.dest_id;
+ irq.dest_mode = e->fields.dest_mode;
+ bitmap_zero(&vcpu_bitmap, 16);
+ kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
+ &vcpu_bitmap);
+ if (old_dest_mode != e->fields.dest_mode ||
+ old_dest_id != e->fields.dest_id) {
+ /*
+ * Update vcpu_bitmap with vcpus specified in
+ * the previous request as well. This is done to
+ * keep ioapic_handled_vectors synchronized.
+ */
+ irq.dest_id = old_dest_id;
+ irq.dest_mode = old_dest_mode;
+ kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
+ &vcpu_bitmap);
+ }
+ kvm_make_scan_ioapic_request_mask(ioapic->kvm,
+ &vcpu_bitmap);
+ } else {
+ kvm_make_scan_ioapic_request(ioapic->kvm);
+ }
break;
}
}
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 1cc6c47dc77e..58767020de41 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -37,22 +37,50 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
-static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
{
- if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
+{
+ if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
+ return 0;
+
+ if (!kvm_register_is_available(vcpu, reg))
kvm_x86_ops->cache_reg(vcpu, reg);
return vcpu->arch.regs[reg];
}
-static inline void kvm_register_write(struct kvm_vcpu *vcpu,
- enum kvm_reg reg,
+static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
unsigned long val)
{
+ if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
+ return;
+
vcpu->arch.regs[reg] = val;
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ kvm_register_mark_dirty(vcpu, reg);
}
static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
@@ -79,9 +107,8 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{
might_sleep(); /* on svm */
- if (!test_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_avail))
- kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
+ kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
return vcpu->arch.walk_mmu->pdptrs[index];
}
@@ -109,8 +136,8 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
- if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
- kvm_x86_ops->decache_cr3(vcpu);
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
+ kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
return vcpu->arch.cr3;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b29d00b661ff..cf9177b4a07f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -557,60 +557,53 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
irq->level, irq->trig_mode, dest_map);
}
+static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
+ struct kvm_lapic_irq *irq, u32 min)
+{
+ int i, count = 0;
+ struct kvm_vcpu *vcpu;
+
+ if (min > map->max_apic_id)
+ return 0;
+
+ for_each_set_bit(i, ipi_bitmap,
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ if (map->phys_map[min + i]) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, irq, NULL);
+ }
+ }
+
+ return count;
+}
+
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit)
{
- int i;
struct kvm_apic_map *map;
- struct kvm_vcpu *vcpu;
struct kvm_lapic_irq irq = {0};
int cluster_size = op_64_bit ? 64 : 32;
- int count = 0;
+ int count;
+
+ if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
+ return -KVM_EINVAL;
irq.vector = icr & APIC_VECTOR_MASK;
irq.delivery_mode = icr & APIC_MODE_MASK;
irq.level = (icr & APIC_INT_ASSERT) != 0;
irq.trig_mode = icr & APIC_INT_LEVELTRIG;
- if (icr & APIC_DEST_MASK)
- return -KVM_EINVAL;
- if (icr & APIC_SHORT_MASK)
- return -KVM_EINVAL;
-
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
- if (unlikely(!map)) {
- count = -EOPNOTSUPP;
- goto out;
- }
-
- if (min > map->max_apic_id)
- goto out;
- /* Bits above cluster_size are masked in the caller. */
- for_each_set_bit(i, &ipi_bitmap_low,
- min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
- if (map->phys_map[min + i]) {
- vcpu = map->phys_map[min + i]->vcpu;
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
- }
+ count = -EOPNOTSUPP;
+ if (likely(map)) {
+ count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
+ min += cluster_size;
+ count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
}
- min += cluster_size;
-
- if (min > map->max_apic_id)
- goto out;
-
- for_each_set_bit(i, &ipi_bitmap_high,
- min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
- if (map->phys_map[min + i]) {
- vcpu = map->phys_map[min + i]->vcpu;
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
- }
- }
-
-out:
rcu_read_unlock();
return count;
}
@@ -1124,6 +1117,50 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
return result;
}
+/*
+ * This routine identifies the destination vcpus mask meant to receive the
+ * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
+ * out the destination vcpus array and set the bitmap or it traverses to
+ * each available vcpu to identify the same.
+ */
+void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ unsigned long *vcpu_bitmap)
+{
+ struct kvm_lapic **dest_vcpu = NULL;
+ struct kvm_lapic *src = NULL;
+ struct kvm_apic_map *map;
+ struct kvm_vcpu *vcpu;
+ unsigned long bitmap;
+ int i, vcpu_idx;
+ bool ret;
+
+ rcu_read_lock();
+ map = rcu_dereference(kvm->arch.apic_map);
+
+ ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
+ &bitmap);
+ if (ret) {
+ for_each_set_bit(i, &bitmap, 16) {
+ if (!dest_vcpu[i])
+ continue;
+ vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
+ __set_bit(vcpu_idx, vcpu_bitmap);
+ }
+ } else {
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
+ continue;
+ if (!kvm_apic_match_dest(vcpu, NULL,
+ irq->delivery_mode,
+ irq->dest_id,
+ irq->dest_mode))
+ continue;
+ __set_bit(i, vcpu_bitmap);
+ }
+ }
+ rcu_read_unlock();
+}
+
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
@@ -2709,7 +2746,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
* KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
* and leave the INIT pending.
*/
- if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) {
+ if (kvm_vcpu_latch_init(vcpu)) {
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
clear_bit(KVM_APIC_SIPI, &apic->pending_events);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 1f5014852e20..39925afdfcdc 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -226,6 +226,9 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu);
+void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ unsigned long *vcpu_bitmap);
+
bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu);
int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 11f8ec89433b..d55674f44a18 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -210,4 +210,8 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+
+int kvm_mmu_post_init_vm(struct kvm *kvm);
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 24c23c66b226..6f92b40d798c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/kern_levels.h>
+#include <linux/kthread.h>
#include <asm/page.h>
#include <asm/pat.h>
@@ -47,6 +48,35 @@
#include <asm/kvm_page_track.h>
#include "trace.h"
+extern bool itlb_multihit_kvm_mitigation;
+
+static int __read_mostly nx_huge_pages = -1;
+#ifdef CONFIG_PREEMPT_RT
+/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
+static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
+#else
+static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+#endif
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
+
+static struct kernel_param_ops nx_huge_pages_ops = {
+ .set = set_nx_huge_pages,
+ .get = param_get_bool,
+};
+
+static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
+ .set = set_nx_huge_pages_recovery_ratio,
+ .get = param_get_uint,
+};
+
+module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages, "bool");
+module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
+ &nx_huge_pages_recovery_ratio, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
+
/*
* When setting this variable to true it enables Two-Dimensional-Paging
* where the hardware walks 2 page tables:
@@ -352,6 +382,11 @@ static inline bool spte_ad_need_write_protect(u64 spte)
return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
}
+static bool is_nx_huge_page_enabled(void)
+{
+ return READ_ONCE(nx_huge_pages);
+}
+
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
MMU_WARN_ON(is_mmio_spte(spte));
@@ -1190,6 +1225,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_disallow_lpage(slot, gfn);
}
+static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ if (sp->lpage_disallowed)
+ return;
+
+ ++kvm->stat.nx_lpage_splits;
+ list_add_tail(&sp->lpage_disallowed_link,
+ &kvm->arch.lpage_disallowed_mmu_pages);
+ sp->lpage_disallowed = true;
+}
+
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
@@ -1207,6 +1253,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ --kvm->stat.nx_lpage_splits;
+ sp->lpage_disallowed = false;
+ list_del(&sp->lpage_disallowed_link);
+}
+
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
@@ -2792,6 +2845,9 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
kvm_reload_remote_mmus(kvm);
}
+ if (sp->lpage_disallowed)
+ unaccount_huge_nx_page(kvm, sp);
+
sp->role.invalid = 1;
return list_unstable;
}
@@ -3013,6 +3069,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!speculative)
spte |= spte_shadow_accessed_mask(spte);
+ if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
+ is_nx_huge_page_enabled()) {
+ pte_access &= ~ACC_EXEC_MASK;
+ }
+
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
@@ -3233,9 +3294,32 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep);
}
+static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
+ gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
+{
+ int level = *levelp;
+ u64 spte = *it.sptep;
+
+ if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
+ is_nx_huge_page_enabled() &&
+ is_shadow_present_pte(spte) &&
+ !is_large_pte(spte)) {
+ /*
+ * A small SPTE exists for this pfn, but FNAME(fetch)
+ * and __direct_map would like to create a large PTE
+ * instead: just force them to go down another level,
+ * patching back for them into pfn the next 9 bits of
+ * the address.
+ */
+ u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
+ *pfnp |= gfn & page_mask;
+ (*levelp)--;
+ }
+}
+
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
int map_writable, int level, kvm_pfn_t pfn,
- bool prefault)
+ bool prefault, bool lpage_disallowed)
{
struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp;
@@ -3248,6 +3332,12 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) {
+ /*
+ * We cannot overwrite existing page tables with an NX
+ * large page, as the leaf could be executable.
+ */
+ disallowed_hugepage_adjust(it, gfn, &pfn, &level);
+
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == level)
break;
@@ -3258,6 +3348,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
it.level - 1, true, ACC_ALL);
link_shadow_page(vcpu, it.sptep, sp);
+ if (lpage_disallowed)
+ account_huge_nx_page(vcpu->kvm, sp);
}
}
@@ -3306,7 +3398,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* here.
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
- level == PT_PAGE_TABLE_LEVEL &&
+ !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompoundMap(pfn_to_page(pfn)) &&
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
@@ -3550,11 +3642,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
{
int r;
int level;
- bool force_pt_level = false;
+ bool force_pt_level;
kvm_pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
+ force_pt_level = lpage_disallowed;
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
/*
@@ -3588,7 +3683,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
- r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
+ r = __direct_map(vcpu, v, write, map_writable, level, pfn,
+ prefault, false);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
@@ -4174,6 +4270,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
@@ -4184,8 +4282,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (r)
return r;
- force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
- PT_DIRECTORY_LEVEL);
+ force_pt_level =
+ lpage_disallowed ||
+ !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
if (level > PT_DIRECTORY_LEVEL &&
@@ -4214,7 +4313,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
- r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
+ r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
+ prefault, lpage_disallowed);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
@@ -4295,7 +4395,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- kvm_x86_ops->tlb_flush(vcpu, true);
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
/*
@@ -5914,9 +6014,9 @@ restart:
* the guest, and the guest page table is using 4K page size
* mapping if the indirect sp has level = 1.
*/
- if (sp->role.direct &&
- !kvm_is_reserved_pfn(pfn) &&
- PageTransCompoundMap(pfn_to_page(pfn))) {
+ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
+ !kvm_is_zone_device_pfn(pfn) &&
+ PageTransCompoundMap(pfn_to_page(pfn))) {
pte_list_remove(rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
@@ -6155,10 +6255,59 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
}
+static bool get_nx_auto_mode(void)
+{
+ /* Return true when CPU has the bug, and mitigations are ON */
+ return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
+}
+
+static void __set_nx_huge_pages(bool val)
+{
+ nx_huge_pages = itlb_multihit_kvm_mitigation = val;
+}
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+{
+ bool old_val = nx_huge_pages;
+ bool new_val;
+
+ /* In "auto" mode deploy workaround only if CPU has the bug. */
+ if (sysfs_streq(val, "off"))
+ new_val = 0;
+ else if (sysfs_streq(val, "force"))
+ new_val = 1;
+ else if (sysfs_streq(val, "auto"))
+ new_val = get_nx_auto_mode();
+ else if (strtobool(val, &new_val) < 0)
+ return -EINVAL;
+
+ __set_nx_huge_pages(new_val);
+
+ if (new_val != old_val) {
+ struct kvm *kvm;
+
+ mutex_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ mutex_lock(&kvm->slots_lock);
+ kvm_mmu_zap_all_fast(kvm);
+ mutex_unlock(&kvm->slots_lock);
+
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+ }
+ mutex_unlock(&kvm_lock);
+ }
+
+ return 0;
+}
+
int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
+ if (nx_huge_pages == -1)
+ __set_nx_huge_pages(get_nx_auto_mode());
+
/*
* MMU roles use union aliasing which is, generally speaking, an
* undefined behavior. However, we supposedly know how compilers behave
@@ -6238,3 +6387,116 @@ void kvm_mmu_module_exit(void)
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable();
}
+
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
+{
+ unsigned int old_val;
+ int err;
+
+ old_val = nx_huge_pages_recovery_ratio;
+ err = param_set_uint(val, kp);
+ if (err)
+ return err;
+
+ if (READ_ONCE(nx_huge_pages) &&
+ !old_val && nx_huge_pages_recovery_ratio) {
+ struct kvm *kvm;
+
+ mutex_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+
+ mutex_unlock(&kvm_lock);
+ }
+
+ return err;
+}
+
+static void kvm_recover_nx_lpages(struct kvm *kvm)
+{
+ int rcu_idx;
+ struct kvm_mmu_page *sp;
+ unsigned int ratio;
+ LIST_HEAD(invalid_list);
+ ulong to_zap;
+
+ rcu_idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+
+ ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+ to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+ while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
+ /*
+ * We use a separate list instead of just using active_mmu_pages
+ * because the number of lpage_disallowed pages is expected to
+ * be relatively small compared to the total.
+ */
+ sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
+ struct kvm_mmu_page,
+ lpage_disallowed_link);
+ WARN_ON_ONCE(!sp->lpage_disallowed);
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ WARN_ON_ONCE(sp->lpage_disallowed);
+
+ if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ if (to_zap)
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, rcu_idx);
+}
+
+static long get_nx_lpage_recovery_timeout(u64 start_time)
+{
+ return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
+ ? start_time + 60 * HZ - get_jiffies_64()
+ : MAX_SCHEDULE_TIMEOUT;
+}
+
+static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
+{
+ u64 start_time;
+ long remaining_time;
+
+ while (true) {
+ start_time = get_jiffies_64();
+ remaining_time = get_nx_lpage_recovery_timeout(start_time);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop() && remaining_time > 0) {
+ schedule_timeout(remaining_time);
+ remaining_time = get_nx_lpage_recovery_timeout(start_time);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+ if (kthread_should_stop())
+ return 0;
+
+ kvm_recover_nx_lpages(kvm);
+ }
+}
+
+int kvm_mmu_post_init_vm(struct kvm *kvm)
+{
+ int err;
+
+ err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
+ "kvm-nx-lpage-recovery",
+ &kvm->arch.nx_lpage_recovery_thread);
+ if (!err)
+ kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
+
+ return err;
+}
+
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+{
+ if (kvm->arch.nx_lpage_recovery_thread)
+ kthread_stop(kvm->arch.nx_lpage_recovery_thread);
+}
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 3521e2d176f2..3521e2d176f2 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 7d5cdb3af594..97b21e7fd013 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -614,13 +614,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int write_fault, int hlevel,
- kvm_pfn_t pfn, bool map_writable, bool prefault)
+ kvm_pfn_t pfn, bool map_writable, bool prefault,
+ bool lpage_disallowed)
{
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access;
int top_level, ret;
- gfn_t base_gfn;
+ gfn_t gfn, base_gfn;
direct_access = gw->pte_access;
@@ -665,13 +666,25 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(vcpu, it.sptep, sp);
}
- base_gfn = gw->gfn;
+ /*
+ * FNAME(page_fault) might have clobbered the bottom bits of
+ * gw->gfn, restore them from the virtual address.
+ */
+ gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
+ base_gfn = gfn;
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
clear_sp_write_flooding_count(it.sptep);
- base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+
+ /*
+ * We cannot overwrite existing page tables with an NX
+ * large page, as the leaf could be executable.
+ */
+ disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
+
+ base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == hlevel)
break;
@@ -683,6 +696,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
it.level - 1, true, direct_access);
link_shadow_page(vcpu, it.sptep, sp);
+ if (lpage_disallowed)
+ account_huge_nx_page(vcpu->kvm, sp);
}
}
@@ -759,9 +774,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int r;
kvm_pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
- bool force_pt_level = false;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
+ bool force_pt_level = lpage_disallowed;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -851,7 +868,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
- level, pfn, map_writable, prefault);
+ level, pfn, map_writable, prefault, lpage_disallowed);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock:
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 46875bbd0419..bcc6a73d6628 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -62,8 +62,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
- if (!test_and_set_bit(pmc->idx,
- (unsigned long *)&pmu->reprogram_pmi)) {
+ if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
}
@@ -76,8 +75,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
- if (!test_and_set_bit(pmc->idx,
- (unsigned long *)&pmu->reprogram_pmi)) {
+ if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
@@ -137,7 +135,37 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
}
pmc->perf_event = event;
- clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
+ pmc_to_pmu(pmc)->event_count++;
+ clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
+}
+
+static void pmc_pause_counter(struct kvm_pmc *pmc)
+{
+ u64 counter = pmc->counter;
+
+ if (!pmc->perf_event)
+ return;
+
+ /* update counter, reset event value to avoid redundant accumulation */
+ counter += perf_event_pause(pmc->perf_event, true);
+ pmc->counter = counter & pmc_bitmask(pmc);
+}
+
+static bool pmc_resume_counter(struct kvm_pmc *pmc)
+{
+ if (!pmc->perf_event)
+ return false;
+
+ /* recalibrate sample period and check if it's accepted by perf core */
+ if (perf_event_period(pmc->perf_event,
+ (-pmc->counter) & pmc_bitmask(pmc)))
+ return false;
+
+ /* reuse perf_event to serve as pmc_reprogram_counter() does*/
+ perf_event_enable(pmc->perf_event);
+
+ clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
+ return true;
}
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
@@ -154,7 +182,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
pmc->eventsel = eventsel;
- pmc_stop_counter(pmc);
+ pmc_pause_counter(pmc);
if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
return;
@@ -193,6 +221,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
if (type == PERF_TYPE_RAW)
config = eventsel & X86_RAW_EVENT_MASK;
+ if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
+ return;
+
+ pmc_release_perf_event(pmc);
+
+ pmc->current_config = eventsel;
pmc_reprogram_counter(pmc, type, config,
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
@@ -209,7 +243,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
struct kvm_pmu_event_filter *filter;
struct kvm *kvm = pmc->vcpu->kvm;
- pmc_stop_counter(pmc);
+ pmc_pause_counter(pmc);
if (!en_field || !pmc_is_enabled(pmc))
return;
@@ -224,6 +258,12 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
return;
}
+ if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
+ return;
+
+ pmc_release_perf_event(pmc);
+
+ pmc->current_config = (u64)ctrl;
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
kvm_x86_ops->pmu_ops->find_fixed_event(idx),
!(en_field & 0x2), /* exclude user */
@@ -253,27 +293,32 @@ EXPORT_SYMBOL_GPL(reprogram_counter);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- u64 bitmask;
int bit;
- bitmask = pmu->reprogram_pmi;
-
- for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+ for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
if (unlikely(!pmc || !pmc->perf_event)) {
- clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+ clear_bit(bit, pmu->reprogram_pmi);
continue;
}
reprogram_counter(pmu, bit);
}
+
+ /*
+ * Unused perf_events are only released if the corresponding MSRs
+ * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
+ * triggers KVM_REQ_PMU if cleanup is needed.
+ */
+ if (unlikely(pmu->need_cleanup))
+ kvm_pmu_cleanup(vcpu);
}
/* check if idx is a valid index to access PMU */
-int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
+int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
- return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
+ return kvm_x86_ops->pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
}
bool is_vmware_backdoor_pmc(u32 pmc_idx)
@@ -323,7 +368,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
- pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
+ pmc = kvm_x86_ops->pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
if (!pmc)
return 1;
@@ -339,7 +384,17 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
- return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
+ return kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
+ kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
+}
+
+static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr);
+
+ if (pmc)
+ __set_bit(pmc->idx, pmu->pmc_in_use);
}
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
@@ -349,6 +404,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
+ kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
}
@@ -376,9 +432,45 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
memset(pmu, 0, sizeof(*pmu));
kvm_x86_ops->pmu_ops->init(vcpu);
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
+ pmu->event_count = 0;
+ pmu->need_cleanup = false;
kvm_pmu_refresh(vcpu);
}
+static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+ if (pmc_is_fixed(pmc))
+ return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
+ pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
+
+ return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
+}
+
+/* Release perf_events for vPMCs that have been unused for a full time slice. */
+void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc = NULL;
+ DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
+ int i;
+
+ pmu->need_cleanup = false;
+
+ bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
+ pmu->pmc_in_use, X86_PMC_IDX_MAX);
+
+ for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
+ pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, i);
+
+ if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
+ pmc_stop_counter(pmc);
+ }
+
+ bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
+}
+
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_pmu_reset(vcpu);
@@ -416,8 +508,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
*filter = tmp;
mutex_lock(&kvm->lock);
- rcu_swap_protected(kvm->arch.pmu_event_filter, filter,
- mutex_is_locked(&kvm->lock));
+ filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
+ mutex_is_locked(&kvm->lock));
mutex_unlock(&kvm->lock);
synchronize_srcu_expedited(&kvm->srcu);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 58265f761c3b..7ebb62326c14 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -25,9 +25,10 @@ struct kvm_pmu_ops {
unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
- struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
- u64 *mask);
- int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
+ struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
+ unsigned int idx, u64 *mask);
+ struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
+ int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
@@ -55,12 +56,21 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
return counter & pmc_bitmask(pmc);
}
-static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
- pmc->counter = pmc_read_counter(pmc);
perf_event_release_kernel(pmc->perf_event);
pmc->perf_event = NULL;
+ pmc->current_config = 0;
+ pmc_to_pmu(pmc)->event_count--;
+ }
+}
+
+static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event) {
+ pmc->counter = pmc_read_counter(pmc);
+ pmc_release_perf_event(pmc);
}
}
@@ -79,6 +89,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
}
+static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
+ u64 data)
+{
+ return !(pmu->global_ctrl_mask & data);
+}
+
/* returns general purpose PMC with the specified MSR. Note that it can be
* used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
* paramenter to tell them apart.
@@ -110,13 +126,14 @@ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
-int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
+int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index c8388389a3b0..ce0b10fe5e2b 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -174,7 +174,7 @@ static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
-static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
+static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -184,7 +184,8 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
}
/* idx is the ECX register of RDPMC instruction */
-static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
+static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
+ unsigned int idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *counters;
@@ -199,13 +200,19 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
+ /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
+ return false;
+}
+
+static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
+{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- int ret = false;
+ struct kvm_pmc *pmc;
- ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
- get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
+ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
+ pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
- return ret;
+ return pmc;
}
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
@@ -272,6 +279,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->global_status = 0;
+ bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
}
static void amd_pmu_init(struct kvm_vcpu *vcpu)
@@ -285,6 +293,7 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
+ pmu->gp_counters[i].current_config = 0;
}
}
@@ -306,8 +315,9 @@ struct kvm_pmu_ops amd_pmu_ops = {
.find_fixed_event = amd_find_fixed_event,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
+ .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
- .is_valid_msr_idx = amd_is_valid_msr_idx,
+ .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
.is_valid_msr = amd_is_valid_msr,
.get_msr = amd_pmu_get_msr,
.set_msr = amd_pmu_set_msr,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c5673bda4b66..362e874297e4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -38,6 +38,7 @@
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/rwsem.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -418,9 +419,13 @@ enum {
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+static int sev_flush_asids(void);
+static DECLARE_RWSEM(sev_deactivate_lock);
+static DEFINE_MUTEX(sev_bitmap_lock);
static unsigned int max_sev_asid;
static unsigned int min_sev_asid;
static unsigned long *sev_asid_bitmap;
+static unsigned long *sev_reclaim_asid_bitmap;
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
struct enc_region {
@@ -1235,11 +1240,15 @@ static __init int sev_hardware_setup(void)
/* Minimum ASID value that should be used for SEV guest */
min_sev_asid = cpuid_edx(0x8000001F);
- /* Initialize SEV ASID bitmap */
+ /* Initialize SEV ASID bitmaps */
sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
if (!sev_asid_bitmap)
return 1;
+ sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+ if (!sev_reclaim_asid_bitmap)
+ return 1;
+
status = kmalloc(sizeof(*status), GFP_KERNEL);
if (!status)
return 1;
@@ -1418,8 +1427,12 @@ static __exit void svm_hardware_unsetup(void)
{
int cpu;
- if (svm_sev_enabled())
+ if (svm_sev_enabled()) {
bitmap_free(sev_asid_bitmap);
+ bitmap_free(sev_reclaim_asid_bitmap);
+
+ sev_flush_asids();
+ }
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
@@ -1729,25 +1742,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
-static void __sev_asid_free(int asid)
+static void sev_asid_free(int asid)
{
struct svm_cpu_data *sd;
int cpu, pos;
+ mutex_lock(&sev_bitmap_lock);
+
pos = asid - 1;
- clear_bit(pos, sev_asid_bitmap);
+ __set_bit(pos, sev_reclaim_asid_bitmap);
for_each_possible_cpu(cpu) {
sd = per_cpu(svm_data, cpu);
sd->sev_vmcbs[pos] = NULL;
}
-}
-static void sev_asid_free(struct kvm *kvm)
-{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
- __sev_asid_free(sev->asid);
+ mutex_unlock(&sev_bitmap_lock);
}
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
@@ -1764,10 +1774,12 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
/* deactivate handle */
data->handle = handle;
+
+ /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
+ down_read(&sev_deactivate_lock);
sev_guest_deactivate(data, NULL);
+ up_read(&sev_deactivate_lock);
- wbinvd_on_all_cpus();
- sev_guest_df_flush(NULL);
kfree(data);
decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
@@ -1916,7 +1928,7 @@ static void sev_vm_destroy(struct kvm *kvm)
mutex_unlock(&kvm->lock);
sev_unbind_asid(kvm, sev->handle);
- sev_asid_free(kvm);
+ sev_asid_free(sev->asid);
}
static void avic_vm_destroy(struct kvm *kvm)
@@ -2370,7 +2382,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
}
@@ -2523,10 +2535,6 @@ static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}
-static void svm_decache_cr3(struct kvm_vcpu *vcpu)
-{
-}
-
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
}
@@ -4997,6 +5005,18 @@ static int handle_exit(struct kvm_vcpu *vcpu)
return 0;
}
+#ifdef CONFIG_RETPOLINE
+ if (exit_code == SVM_EXIT_MSR)
+ return msr_interception(svm);
+ else if (exit_code == SVM_EXIT_VINTR)
+ return interrupt_window_interception(svm);
+ else if (exit_code == SVM_EXIT_INTR)
+ return intr_interception(svm);
+ else if (exit_code == SVM_EXIT_HLT)
+ return halt_interception(svm);
+ else if (exit_code == SVM_EXIT_NPF)
+ return npf_interception(svm);
+#endif
return svm_exit_handlers[exit_code](svm);
}
@@ -5092,8 +5112,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
- if (svm_nested_virtualize_tpr(vcpu) ||
- kvm_vcpu_apicv_active(vcpu))
+ if (svm_nested_virtualize_tpr(vcpu))
return;
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
@@ -5110,9 +5129,9 @@ static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
return;
}
-static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
+static bool svm_get_enable_apicv(struct kvm *kvm)
{
- return avic && irqchip_split(vcpu->kvm);
+ return avic && irqchip_split(kvm);
}
static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
@@ -5634,7 +5653,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
svm->vmcb->save.cr2 = vcpu->arch.cr2;
clgi();
- kvm_load_guest_xcr0(vcpu);
+ kvm_load_guest_xsave_state(vcpu);
if (lapic_in_kernel(vcpu) &&
vcpu->arch.apic->lapic_timer.timer_advance_ns)
@@ -5784,7 +5803,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(&svm->vcpu);
- kvm_put_guest_xcr0(vcpu);
+ kvm_load_host_xsave_state(vcpu);
stgi();
/* Any pending NMI will happen here */
@@ -5893,6 +5912,9 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ boot_cpu_has(X86_FEATURE_XSAVES);
+
/* Update nrips enabled cache */
svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
@@ -5968,7 +5990,7 @@ static bool svm_mpx_supported(void)
static bool svm_xsaves_supported(void)
{
- return false;
+ return boot_cpu_has(X86_FEATURE_XSAVES);
}
static bool svm_umip_emulated(void)
@@ -6270,18 +6292,73 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
+static int sev_flush_asids(void)
+{
+ int ret, error;
+
+ /*
+ * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
+ * so it must be guarded.
+ */
+ down_write(&sev_deactivate_lock);
+
+ wbinvd_on_all_cpus();
+ ret = sev_guest_df_flush(&error);
+
+ up_write(&sev_deactivate_lock);
+
+ if (ret)
+ pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
+
+ return ret;
+}
+
+/* Must be called with the sev_bitmap_lock held */
+static bool __sev_recycle_asids(void)
+{
+ int pos;
+
+ /* Check if there are any ASIDs to reclaim before performing a flush */
+ pos = find_next_bit(sev_reclaim_asid_bitmap,
+ max_sev_asid, min_sev_asid - 1);
+ if (pos >= max_sev_asid)
+ return false;
+
+ if (sev_flush_asids())
+ return false;
+
+ bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
+ max_sev_asid);
+ bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
+
+ return true;
+}
+
static int sev_asid_new(void)
{
+ bool retry = true;
int pos;
+ mutex_lock(&sev_bitmap_lock);
+
/*
* SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
*/
+again:
pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
- if (pos >= max_sev_asid)
+ if (pos >= max_sev_asid) {
+ if (retry && __sev_recycle_asids()) {
+ retry = false;
+ goto again;
+ }
+ mutex_unlock(&sev_bitmap_lock);
return -EBUSY;
+ }
+
+ __set_bit(pos, sev_asid_bitmap);
+
+ mutex_unlock(&sev_bitmap_lock);
- set_bit(pos, sev_asid_bitmap);
return pos + 1;
}
@@ -6309,7 +6386,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
return 0;
e_free:
- __sev_asid_free(asid);
+ sev_asid_free(asid);
return ret;
}
@@ -6319,12 +6396,6 @@ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
int asid = sev_get_asid(kvm);
int ret;
- wbinvd_on_all_cpus();
-
- ret = sev_guest_df_flush(error);
- if (ret)
- return ret;
-
data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -7214,7 +7285,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
- .decache_cr3 = svm_decache_cr3,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
.set_cr3 = svm_set_cr3,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0e7c9301fe86..4aea7d304beb 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -10,6 +10,7 @@
#include "hyperv.h"
#include "mmu.h"
#include "nested.h"
+#include "pmu.h"
#include "trace.h"
#include "x86.h"
@@ -27,6 +28,16 @@ module_param(nested_early_check, bool, S_IRUGO);
failed; \
})
+#define SET_MSR_OR_WARN(vcpu, idx, data) \
+({ \
+ bool failed = kvm_set_msr(vcpu, idx, data); \
+ if (failed) \
+ pr_warn_ratelimited( \
+ "%s cannot write MSR (0x%x, 0x%llx)\n", \
+ __func__, idx, data); \
+ failed; \
+})
+
/*
* Hyper-V requires all of these, so mark them as supported even though
* they are just treated the same as all-context.
@@ -257,7 +268,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
vmx->nested.cached_shadow_vmcs12 = NULL;
/* Unpin physical memory we referred to in the vmcs02 */
if (vmx->nested.apic_access_page) {
- kvm_release_page_dirty(vmx->nested.apic_access_page);
+ kvm_release_page_clean(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
@@ -929,6 +940,57 @@ fail:
return i + 1;
}
+static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
+ u32 msr_index,
+ u64 *data)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * If the L0 hypervisor stored a more accurate value for the TSC that
+ * does not include the time taken for emulation of the L2->L1
+ * VM-exit in L0, use the more accurate value.
+ */
+ if (msr_index == MSR_IA32_TSC) {
+ int index = vmx_find_msr_index(&vmx->msr_autostore.guest,
+ MSR_IA32_TSC);
+
+ if (index >= 0) {
+ u64 val = vmx->msr_autostore.guest.val[index].value;
+
+ *data = kvm_read_l1_tsc(vcpu, val);
+ return true;
+ }
+ }
+
+ if (kvm_get_msr(vcpu, msr_index, data)) {
+ pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
+ msr_index);
+ return false;
+ }
+ return true;
+}
+
+static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
+ struct vmx_msr_entry *e)
+{
+ if (kvm_vcpu_read_guest(vcpu,
+ gpa + i * sizeof(*e),
+ e, 2 * sizeof(u32))) {
+ pr_debug_ratelimited(
+ "%s cannot read MSR entry (%u, 0x%08llx)\n",
+ __func__, i, gpa + i * sizeof(*e));
+ return false;
+ }
+ if (nested_vmx_store_msr_check(vcpu, e)) {
+ pr_debug_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, i, e->index, e->reserved);
+ return false;
+ }
+ return true;
+}
+
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
u64 data;
@@ -940,26 +1002,12 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
if (unlikely(i >= max_msr_list_size))
return -EINVAL;
- if (kvm_vcpu_read_guest(vcpu,
- gpa + i * sizeof(e),
- &e, 2 * sizeof(u32))) {
- pr_debug_ratelimited(
- "%s cannot read MSR entry (%u, 0x%08llx)\n",
- __func__, i, gpa + i * sizeof(e));
+ if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
return -EINVAL;
- }
- if (nested_vmx_store_msr_check(vcpu, &e)) {
- pr_debug_ratelimited(
- "%s check failed (%u, 0x%x, 0x%x)\n",
- __func__, i, e.index, e.reserved);
- return -EINVAL;
- }
- if (kvm_get_msr(vcpu, e.index, &data)) {
- pr_debug_ratelimited(
- "%s cannot read MSR (%u, 0x%x)\n",
- __func__, i, e.index);
+
+ if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
return -EINVAL;
- }
+
if (kvm_vcpu_write_guest(vcpu,
gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value),
@@ -973,6 +1021,60 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
return 0;
}
+static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ u32 count = vmcs12->vm_exit_msr_store_count;
+ u64 gpa = vmcs12->vm_exit_msr_store_addr;
+ struct vmx_msr_entry e;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
+ return false;
+
+ if (e.index == msr_index)
+ return true;
+ }
+ return false;
+}
+
+static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
+ u32 msr_index)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
+ bool in_vmcs12_store_list;
+ int msr_autostore_index;
+ bool in_autostore_list;
+ int last;
+
+ msr_autostore_index = vmx_find_msr_index(autostore, msr_index);
+ in_autostore_list = msr_autostore_index >= 0;
+ in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
+
+ if (in_vmcs12_store_list && !in_autostore_list) {
+ if (autostore->nr == NR_LOADSTORE_MSRS) {
+ /*
+ * Emulated VMEntry does not fail here. Instead a less
+ * accurate value will be returned by
+ * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
+ * instead of reading the value from the vmcs02 VMExit
+ * MSR-store area.
+ */
+ pr_warn_ratelimited(
+ "Not enough msr entries in msr_autostore. Can't add msr %x\n",
+ msr_index);
+ return;
+ }
+ last = autostore->nr++;
+ autostore->val[last].index = msr_index;
+ } else if (!in_vmcs12_store_list && in_autostore_list) {
+ last = --autostore->nr;
+ autostore->val[msr_autostore_index] = autostore->val[last];
+ }
+}
+
static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long invalid_mask;
@@ -1012,7 +1114,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
kvm_mmu_new_cr3(vcpu, cr3, false);
vcpu->arch.cr3 = cr3;
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
kvm_init_mmu(vcpu, false);
@@ -1024,7 +1126,9 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* populated by L2 differently than TLB entries populated
* by L1.
*
- * If L1 uses EPT, then TLB entries are tagged with different EPTP.
+ * If L0 uses EPT, L1 and L2 run with different EPTP because
+ * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
+ * are tagged with different EPTP.
*
* If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
* with different VPID (L1 entries are tagged with vmx->vpid
@@ -1034,7 +1138,7 @@ static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- return nested_cpu_has_ept(vmcs12) ||
+ return enable_ept ||
(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
}
@@ -2018,7 +2122,7 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
* addresses are constant (for vmcs02), the counts can change based
* on L2's behavior, e.g. switching to/from long mode.
*/
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
@@ -2073,6 +2177,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
exec_control &= ~CPU_BASED_TPR_SHADOW;
exec_control |= vmcs12->cpu_based_vm_exec_control;
+ vmx->nested.l1_tpr_threshold = -1;
if (exec_control & CPU_BASED_TPR_SHADOW)
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
#ifdef CONFIG_X86_64
@@ -2285,6 +2390,13 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
}
+ /*
+ * Make sure the msr_autostore list is up to date before we set the
+ * count in the vmcs02.
+ */
+ prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
+
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
@@ -2381,9 +2493,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_ept(vmcs12))
nested_ept_init_mmu_context(vcpu);
- else if (nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
- vmx_flush_tlb(vcpu, true);
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
@@ -2418,6 +2527,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
entry_failure_code))
return -EINVAL;
+ /*
+ * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
+ * on nested VM-Exit, which can occur without actually running L2 and
+ * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
+ * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
+ * transition to HLT instead of running L2.
+ */
+ if (enable_ept)
+ vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
+
/* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
is_pae_paging(vcpu)) {
@@ -2430,6 +2549,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
+ SET_MSR_OR_WARN(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ vmcs12->guest_ia32_perf_global_ctrl))
+ return -EINVAL;
+
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);
return 0;
@@ -2664,6 +2788,11 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
return -EINVAL;
+ if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
+ CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
+ vmcs12->host_ia32_perf_global_ctrl)))
+ return -EINVAL;
+
#ifdef CONFIG_X86_64
ia32e = !!(vcpu->arch.efer & EFER_LMA);
#else
@@ -2779,6 +2908,11 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
+ CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
+ vmcs12->guest_ia32_perf_global_ctrl)))
+ return -EINVAL;
+
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
@@ -2933,7 +3067,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
* to it so we can release it later.
*/
if (vmx->nested.apic_access_page) { /* shouldn't happen */
- kvm_release_page_dirty(vmx->nested.apic_access_page);
+ kvm_release_page_clean(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
@@ -3461,6 +3595,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
if (block_nested_events)
return -EBUSY;
+ clear_bit(KVM_APIC_INIT, &apic->pending_events);
nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
return 0;
}
@@ -3864,8 +3999,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.pat = vmcs12->host_ia32_pat;
}
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
- vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
- vmcs12->host_ia32_perf_global_ctrl);
+ SET_MSR_OR_WARN(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ vmcs12->host_ia32_perf_global_ctrl);
/* Set L1 segment info according to Intel SDM
27.5.2 Loading Host Segment and Descriptor-Table Registers */
@@ -3984,7 +4119,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
nested_ept_uninit_mmu_context(vcpu);
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
/*
* Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
@@ -4112,6 +4247,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ if (vmx->nested.l1_tpr_threshold != -1)
+ vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@@ -4119,15 +4256,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx_set_virtual_apic_mode(vcpu);
- } else if (!nested_cpu_has_ept(vmcs12) &&
- nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
- vmx_flush_tlb(vcpu, true);
}
/* Unpin physical memory we referred to in vmcs02 */
if (vmx->nested.apic_access_page) {
- kvm_release_page_dirty(vmx->nested.apic_access_page);
+ kvm_release_page_clean(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
@@ -4327,6 +4460,27 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
return 0;
}
+void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx;
+
+ if (!nested_vmx_allowed(vcpu))
+ return;
+
+ vmx = to_vmx(vcpu);
+ if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
+ vmx->nested.msrs.entry_ctls_high |=
+ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ vmx->nested.msrs.exit_ctls_high |=
+ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ } else {
+ vmx->nested.msrs.entry_ctls_high &=
+ ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ vmx->nested.msrs.exit_ctls_high &=
+ ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ }
+}
+
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
{
gva_t gva;
@@ -5766,7 +5920,7 @@ error_guest_mode:
return ret;
}
-void nested_vmx_vcpu_setup(void)
+void nested_vmx_set_vmcs_shadowing_bitmap(void)
{
if (enable_shadow_vmcs) {
vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
@@ -6047,23 +6201,23 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
init_vmcs_shadow_fields();
}
- exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
- exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
- exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
- exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
- exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
- exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
- exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
- exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
- exit_handlers[EXIT_REASON_VMON] = handle_vmon,
- exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
- exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
- exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
+ exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
+ exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
+ exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
+ exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
+ exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
+ exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
+ exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
+ exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
+ exit_handlers[EXIT_REASON_VMON] = handle_vmon;
+ exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
+ exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
+ exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
kvm_x86_ops->check_nested_events = vmx_check_nested_events;
kvm_x86_ops->get_nested_state = vmx_get_nested_state;
kvm_x86_ops->set_nested_state = vmx_set_nested_state;
- kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
+ kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages;
kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 6280f33e5fa6..fc874d4ead0f 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -21,7 +21,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
bool apicv);
void nested_vmx_hardware_unsetup(void);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
-void nested_vmx_vcpu_setup(void);
+void nested_vmx_set_vmcs_shadowing_bitmap(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry);
@@ -33,6 +33,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
+void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
@@ -256,7 +257,7 @@ static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
return ((val & fixed1) | fixed0) == val;
}
-static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
@@ -270,7 +271,7 @@ static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
return fixed_bits_valid(val, fixed0, fixed1);
}
-static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
@@ -278,7 +279,7 @@ static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
return fixed_bits_valid(val, fixed0, fixed1);
}
-static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
+static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 3e9c059099e9..7023138b1cb0 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -15,6 +15,7 @@
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
+#include "nested.h"
#include "pmu.h"
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
@@ -46,6 +47,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
if (old_ctrl == new_ctrl)
continue;
+ __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
reprogram_fixed_counter(pmc, new_ctrl, i);
}
@@ -111,7 +113,7 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
-static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
+static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
@@ -122,8 +124,8 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
(fixed && idx >= pmu->nr_arch_fixed_counters);
}
-static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
- unsigned idx, u64 *mask)
+static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
+ unsigned int idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
@@ -162,6 +164,18 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
return ret;
}
+static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+
+ pmc = get_fixed_pmc(pmu, msr);
+ pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
+ pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
+
+ return pmc;
+}
+
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -223,7 +237,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CORE_PERF_GLOBAL_CTRL:
if (pmu->global_ctrl == data)
return 0;
- if (!(data & pmu->global_ctrl_mask)) {
+ if (kvm_valid_perf_global_ctrl(pmu, data)) {
global_ctrl_changed(pmu, data);
return 0;
}
@@ -317,6 +331,13 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+
+ bitmap_set(pmu->all_valid_pmc_idx,
+ 0, pmu->nr_arch_gp_counters);
+ bitmap_set(pmu->all_valid_pmc_idx,
+ INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
+
+ nested_vmx_pmu_entry_exit_ctls_update(vcpu);
}
static void intel_pmu_init(struct kvm_vcpu *vcpu)
@@ -328,12 +349,14 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
+ pmu->gp_counters[i].current_config = 0;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
+ pmu->fixed_counters[i].current_config = 0;
}
}
@@ -366,8 +389,9 @@ struct kvm_pmu_ops intel_pmu_ops = {
.find_fixed_event = intel_find_fixed_event,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
+ .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
- .is_valid_msr_idx = intel_is_valid_msr_idx,
+ .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
.is_valid_msr = intel_is_valid_msr,
.get_msr = intel_pmu_get_msr,
.set_msr = intel_pmu_set_msr,
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 751a384c2eb0..81ada2ce99e7 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -43,7 +43,7 @@
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
* to vmx_vmexit.
*/
-ENTRY(vmx_vmenter)
+SYM_FUNC_START(vmx_vmenter)
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
je 2f
@@ -65,7 +65,7 @@ ENTRY(vmx_vmenter)
_ASM_EXTABLE(1b, 5b)
_ASM_EXTABLE(2b, 5b)
-ENDPROC(vmx_vmenter)
+SYM_FUNC_END(vmx_vmenter)
/**
* vmx_vmexit - Handle a VMX VM-Exit
@@ -77,7 +77,7 @@ ENDPROC(vmx_vmenter)
* here after hardware loads the host's state, i.e. this is the destination
* referred to by VMCS.HOST_RIP.
*/
-ENTRY(vmx_vmexit)
+SYM_FUNC_START(vmx_vmexit)
#ifdef CONFIG_RETPOLINE
ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
/* Preserve guest's RAX, it's used to stuff the RSB. */
@@ -90,7 +90,7 @@ ENTRY(vmx_vmexit)
.Lvmexit_skip_rsb:
#endif
ret
-ENDPROC(vmx_vmexit)
+SYM_FUNC_END(vmx_vmexit)
/**
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
@@ -101,7 +101,7 @@ ENDPROC(vmx_vmexit)
* Returns:
* 0 on VM-Exit, 1 on VM-Fail
*/
-ENTRY(__vmx_vcpu_run)
+SYM_FUNC_START(__vmx_vcpu_run)
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
@@ -233,4 +233,4 @@ ENTRY(__vmx_vcpu_run)
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
2: mov $1, %eax
jmp 1b
-ENDPROC(__vmx_vcpu_run)
+SYM_FUNC_END(__vmx_vcpu_run)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5d21a4ab28cf..1b9ab4166397 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO);
static bool __read_mostly nested = 1;
module_param(nested, bool, S_IRUGO);
-static u64 __read_mostly host_xss;
-
bool __read_mostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);
@@ -450,6 +448,7 @@ const u32 vmx_msr_index[] = {
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
MSR_EFER, MSR_TSC_AUX, MSR_STAR,
+ MSR_IA32_TSX_CTRL,
};
#if IS_ENABLED(CONFIG_HYPERV)
@@ -638,6 +637,23 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
return NULL;
}
+static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data)
+{
+ int ret = 0;
+
+ u64 old_msr_data = msr->data;
+ msr->data = data;
+ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ preempt_disable();
+ ret = kvm_set_shared_msr(msr->index, msr->data,
+ msr->mask);
+ preempt_enable();
+ if (ret)
+ msr->data = old_msr_data;
+ }
+ return ret;
+}
+
void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
@@ -726,8 +742,8 @@ static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
bool ret;
u32 mask = 1 << (seg * SEG_FIELD_NR + field);
- if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
- vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
+ if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
+ kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
vmx->segment_cache.bitmask = 0;
}
ret = vmx->segment_cache.bitmask & mask;
@@ -835,7 +851,7 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
vm_exit_controls_clearbit(vmx, exit);
}
-static int find_msr(struct vmx_msrs *m, unsigned int msr)
+int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
{
unsigned int i;
@@ -869,7 +885,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
}
break;
}
- i = find_msr(&m->guest, msr);
+ i = vmx_find_msr_index(&m->guest, msr);
if (i < 0)
goto skip_guest;
--m->guest.nr;
@@ -877,7 +893,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
skip_guest:
- i = find_msr(&m->host, msr);
+ i = vmx_find_msr_index(&m->host, msr);
if (i < 0)
return;
@@ -936,12 +952,12 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
- i = find_msr(&m->guest, msr);
+ i = vmx_find_msr_index(&m->guest, msr);
if (!entry_only)
- j = find_msr(&m->host, msr);
+ j = vmx_find_msr_index(&m->host, msr);
- if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
- (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
+ if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) ||
+ (j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
@@ -1268,6 +1284,18 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return;
+ /*
+ * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+ * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
+ * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
+ * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
+ * correctly.
+ */
+ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
+ pi_clear_sn(pi_desc);
+ goto after_clear_sn;
+ }
+
/* The full case. */
do {
old.control = new.control = pi_desc->control;
@@ -1283,6 +1311,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
+after_clear_sn:
+
/*
* Clear SN before reading the bitmap. The VT-d firmware
* writes the bitmap and reads SN atomically (5.2.3 in the
@@ -1291,7 +1321,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
*/
smp_mb__after_atomic();
- if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
+ if (!pi_is_pir_empty(pi_desc))
pi_set_on(pi_desc);
}
@@ -1338,14 +1368,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
(unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
- /*
- * VM exits change the host TR limit to 0x67 after a VM
- * exit. This is okay, since 0x67 covers everything except
- * the IO bitmap and have have code to handle the IO bitmap
- * being lost after a VM exit.
- */
- BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
-
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -1404,35 +1426,44 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long rflags, save_rflags;
- if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
- __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
+ kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
rflags = vmcs_readl(GUEST_RFLAGS);
- if (to_vmx(vcpu)->rmode.vm86_active) {
+ if (vmx->rmode.vm86_active) {
rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
- save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+ save_rflags = vmx->rmode.save_rflags;
rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
}
- to_vmx(vcpu)->rflags = rflags;
+ vmx->rflags = rflags;
}
- return to_vmx(vcpu)->rflags;
+ return vmx->rflags;
}
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- unsigned long old_rflags = vmx_get_rflags(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long old_rflags;
- __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
- to_vmx(vcpu)->rflags = rflags;
- if (to_vmx(vcpu)->rmode.vm86_active) {
- to_vmx(vcpu)->rmode.save_rflags = rflags;
+ if (enable_unrestricted_guest) {
+ kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
+ vmx->rflags = rflags;
+ vmcs_writel(GUEST_RFLAGS, rflags);
+ return;
+ }
+
+ old_rflags = vmx_get_rflags(vcpu);
+ vmx->rflags = rflags;
+ if (vmx->rmode.vm86_active) {
+ vmx->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
}
vmcs_writel(GUEST_RFLAGS, rflags);
- if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
- to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
+ if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
+ vmx->emulation_required = emulation_required(vcpu);
}
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@@ -1669,6 +1700,9 @@ static void setup_msrs(struct vcpu_vmx *vmx)
index = __find_msr_index(vmx, MSR_TSC_AUX);
if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_IA32_TSX_CTRL);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
vmx->save_nmsrs = save_nmsrs;
vmx->guest_msrs_ready = false;
@@ -1768,6 +1802,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
#endif
case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_info);
+ case MSR_IA32_TSX_CTRL:
+ if (!msr_info->host_initiated &&
+ !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
+ return 1;
+ goto find_shared_msr;
case MSR_IA32_UMWAIT_CONTROL:
if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
return 1;
@@ -1812,14 +1851,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
&msr_info->data);
- case MSR_IA32_XSS:
- if (!vmx_xsaves_supported() ||
- (!msr_info->host_initiated &&
- !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
- return 1;
- msr_info->data = vcpu->arch.ia32_xss;
- break;
case MSR_IA32_RTIT_CTL:
if (pt_mode != PT_MODE_HOST_GUEST)
return 1;
@@ -1870,8 +1901,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1;
- /* Else, falls through */
+ goto find_shared_msr;
default:
+ find_shared_msr:
msr = find_msr_entry(vmx, msr_info->index);
if (msr) {
msr_info->data = msr->data;
@@ -1987,6 +2019,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
MSR_IA32_SPEC_CTRL,
MSR_TYPE_RW);
break;
+ case MSR_IA32_TSX_CTRL:
+ if (!msr_info->host_initiated &&
+ !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
+ return 1;
+ if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
+ return 1;
+ goto find_shared_msr;
case MSR_IA32_PRED_CMD:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
@@ -2055,25 +2094,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!nested_vmx_allowed(vcpu))
return 1;
return vmx_set_vmx_msr(vcpu, msr_index, data);
- case MSR_IA32_XSS:
- if (!vmx_xsaves_supported() ||
- (!msr_info->host_initiated &&
- !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
- return 1;
- /*
- * The only supported bit as of Skylake is bit 8, but
- * it is not supported on KVM.
- */
- if (data != 0)
- return 1;
- vcpu->arch.ia32_xss = data;
- if (vcpu->arch.ia32_xss != host_xss)
- add_atomic_switch_msr(vmx, MSR_IA32_XSS,
- vcpu->arch.ia32_xss, host_xss, false);
- else
- clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
- break;
case MSR_IA32_RTIT_CTL:
if ((pt_mode != PT_MODE_HOST_GUEST) ||
vmx_rtit_ctl_check(vcpu, data) ||
@@ -2138,23 +2158,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
return 1;
- /* Else, falls through */
+ goto find_shared_msr;
+
default:
+ find_shared_msr:
msr = find_msr_entry(vmx, msr_index);
- if (msr) {
- u64 old_msr_data = msr->data;
- msr->data = data;
- if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
- preempt_disable();
- ret = kvm_set_shared_msr(msr->index, msr->data,
- msr->mask);
- preempt_enable();
- if (ret)
- msr->data = old_msr_data;
- }
- break;
- }
- ret = kvm_set_msr_common(vcpu, msr_info);
+ if (msr)
+ ret = vmx_set_guest_msr(vmx, msr, data);
+ else
+ ret = kvm_set_msr_common(vcpu, msr_info);
}
return ret;
@@ -2162,7 +2174,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ kvm_register_mark_available(vcpu, reg);
+
switch (reg) {
case VCPU_REGS_RSP:
vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
@@ -2174,7 +2187,12 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
if (enable_ept)
ept_save_pdptrs(vcpu);
break;
+ case VCPU_EXREG_CR3:
+ if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ break;
default:
+ WARN_ON_ONCE(1);
break;
}
}
@@ -2845,13 +2863,6 @@ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
}
-static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
-{
- if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
- vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
-}
-
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
@@ -2864,8 +2875,7 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- if (!test_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_dirty))
+ if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
return;
if (is_pae_paging(vcpu)) {
@@ -2887,10 +2897,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
}
- __set_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_dirty);
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
}
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -2899,8 +2906,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
- vmx_decache_cr3(vcpu);
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
+ vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */
exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
@@ -2981,6 +2988,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
struct kvm *kvm = vcpu->kvm;
+ bool update_guest_cr3 = true;
unsigned long guest_cr3;
u64 eptp;
@@ -2997,15 +3005,20 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
}
- if (enable_unrestricted_guest || is_paging(vcpu) ||
- is_guest_mode(vcpu))
- guest_cr3 = kvm_read_cr3(vcpu);
- else
+ /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
+ if (is_guest_mode(vcpu))
+ update_guest_cr3 = false;
+ else if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
+ else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+ guest_cr3 = vcpu->arch.cr3;
+ else /* vmcs01.GUEST_CR3 is already up-to-date. */
+ update_guest_cr3 = false;
ept_load_pdptrs(vcpu);
}
- vmcs_writel(GUEST_CR3, guest_cr3);
+ if (update_guest_cr3)
+ vmcs_writel(GUEST_CR3, guest_cr3);
}
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -3739,7 +3752,7 @@ void pt_update_intercept_for_msr(struct vcpu_vmx *vmx)
}
}
-static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
+static bool vmx_get_enable_apicv(struct kvm *kvm)
{
return enable_apicv;
}
@@ -4032,6 +4045,8 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
+ vcpu->arch.xsaves_enabled = xsaves_enabled;
+
if (!xsaves_enabled)
exec_control &= ~SECONDARY_EXEC_XSAVES;
@@ -4144,14 +4159,13 @@ static void ept_set_mmio_spte_mask(void)
#define VMX_XSS_EXIT_BITMAP 0
/*
- * Sets up the vmcs for emulated real mode.
+ * Noting that the initialization of Guest-state Area of VMCS is in
+ * vmx_vcpu_reset().
*/
-static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
+static void init_vmcs(struct vcpu_vmx *vmx)
{
- int i;
-
if (nested)
- nested_vmx_vcpu_setup();
+ nested_vmx_set_vmcs_shadowing_bitmap();
if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
@@ -4160,7 +4174,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
/* Control */
pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
- vmx->hv_deadline_tsc = -1;
exec_controls_set(vmx, vmx_exec_control(vmx));
@@ -4209,21 +4222,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
- for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
- u32 index = vmx_msr_index[i];
- u32 data_low, data_high;
- int j = vmx->nmsrs;
-
- if (rdmsr_safe(index, &data_low, &data_high) < 0)
- continue;
- if (wrmsr_safe(index, data_low, data_high) < 0)
- continue;
- vmx->guest_msrs[j].index = i;
- vmx->guest_msrs[j].data = 0;
- vmx->guest_msrs[j].mask = -1ull;
- ++vmx->nmsrs;
- }
-
vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
/* 22.2.1, 20.8.1 */
@@ -4234,6 +4232,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
set_cr4_guest_host_mask(vmx);
+ if (vmx->vpid != 0)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+
if (vmx_xsaves_supported())
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
@@ -4336,9 +4337,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
- if (vmx->vpid != 0)
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
-
cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
vmx->vcpu.arch.cr0 = cr0;
vmx_set_cr0(vcpu, cr0); /* enter rmode */
@@ -4693,7 +4691,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
return 0;
}
-static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.irq_exits;
return 1;
@@ -4965,21 +4963,6 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
vmcs_writel(GUEST_DR7, val);
}
-static int handle_cpuid(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_cpuid(vcpu);
-}
-
-static int handle_rdmsr(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_rdmsr(vcpu);
-}
-
-static int handle_wrmsr(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_wrmsr(vcpu);
-}
-
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
{
kvm_apic_update_ppr(vcpu);
@@ -4996,11 +4979,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_halt(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_halt(vcpu);
-}
-
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
return kvm_emulate_hypercall(vcpu);
@@ -5548,11 +5526,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_IO_INSTRUCTION] = handle_io,
[EXIT_REASON_CR_ACCESS] = handle_cr,
[EXIT_REASON_DR_ACCESS] = handle_dr,
- [EXIT_REASON_CPUID] = handle_cpuid,
- [EXIT_REASON_MSR_READ] = handle_rdmsr,
- [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
+ [EXIT_REASON_CPUID] = kvm_emulate_cpuid,
+ [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr,
+ [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr,
[EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
- [EXIT_REASON_HLT] = handle_halt,
+ [EXIT_REASON_HLT] = kvm_emulate_halt,
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_RDPMC] = handle_rdpmc,
@@ -5925,9 +5903,23 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
if (exit_reason < kvm_vmx_max_exit_handlers
- && kvm_vmx_exit_handlers[exit_reason])
+ && kvm_vmx_exit_handlers[exit_reason]) {
+#ifdef CONFIG_RETPOLINE
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ return kvm_emulate_wrmsr(vcpu);
+ else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
+ return handle_preemption_timer(vcpu);
+ else if (exit_reason == EXIT_REASON_PENDING_INTERRUPT)
+ return handle_interrupt_window(vcpu);
+ else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+ return handle_external_interrupt(vcpu);
+ else if (exit_reason == EXIT_REASON_HLT)
+ return kvm_emulate_halt(vcpu);
+ else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
+ return handle_ept_misconfig(vcpu);
+#endif
return kvm_vmx_exit_handlers[exit_reason](vcpu);
- else {
+ } else {
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason);
dump_vmcs();
@@ -6013,17 +6005,17 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ int tpr_threshold;
if (is_guest_mode(vcpu) &&
nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
return;
- if (irr == -1 || tpr < irr) {
- vmcs_write32(TPR_THRESHOLD, 0);
- return;
- }
-
- vmcs_write32(TPR_THRESHOLD, irr);
+ tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
+ if (is_guest_mode(vcpu))
+ to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
+ else
+ vmcs_write32(TPR_THRESHOLD, tpr_threshold);
}
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
@@ -6137,7 +6129,7 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
if (pi_test_on(&vmx->pi_desc)) {
pi_clear_on(&vmx->pi_desc);
/*
- * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+ * IOMMU can write to PID.ON, so the barrier matters even on UP.
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
@@ -6167,7 +6159,10 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
- return pi_test_on(vcpu_to_pi_desc(vcpu));
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ return pi_test_on(pi_desc) ||
+ (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
}
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -6497,9 +6492,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vmx->nested.need_vmcs12_to_shadow_sync)
nested_sync_vmcs12_to_shadow(vcpu);
- if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
+ if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast();
@@ -6522,7 +6517,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
- kvm_load_guest_xcr0(vcpu);
+ kvm_load_guest_xsave_state(vcpu);
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
@@ -6629,7 +6624,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
__write_pkru(vmx->host_pkru);
}
- kvm_put_guest_xcr0(vcpu);
+ kvm_load_host_xsave_state(vcpu);
vmx->nested.nested_run_pending = 0;
vmx->idt_vectoring_info = 0;
@@ -6683,7 +6678,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
int err;
struct vcpu_vmx *vmx;
unsigned long *msr_bitmap;
- int cpu;
+ int i, cpu;
BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
"struct kvm_vcpu must be at offset 0 for arch usercopy region");
@@ -6735,6 +6730,34 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx->guest_msrs)
goto free_pml;
+ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
+ u32 index = vmx_msr_index[i];
+ u32 data_low, data_high;
+ int j = vmx->nmsrs;
+
+ if (rdmsr_safe(index, &data_low, &data_high) < 0)
+ continue;
+ if (wrmsr_safe(index, data_low, data_high) < 0)
+ continue;
+
+ vmx->guest_msrs[j].index = i;
+ vmx->guest_msrs[j].data = 0;
+ switch (index) {
+ case MSR_IA32_TSX_CTRL:
+ /*
+ * No need to pass TSX_CTRL_CPUID_CLEAR through, so
+ * let's avoid changing CPUID bits under the host
+ * kernel's feet.
+ */
+ vmx->guest_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ break;
+ default:
+ vmx->guest_msrs[j].mask = -1ull;
+ break;
+ }
+ ++vmx->nmsrs;
+ }
+
err = alloc_loaded_vmcs(&vmx->vmcs01);
if (err < 0)
goto free_msrs;
@@ -6759,7 +6782,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
vmx->vcpu.cpu = cpu;
- vmx_vcpu_setup(vmx);
+ init_vmcs(vmx);
vmx_vcpu_put(&vmx->vcpu);
put_cpu();
if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
@@ -6979,6 +7002,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP));
+ cr4_fixed1_update(X86_CR4_LA57, ecx, bit(X86_FEATURE_LA57));
#undef cr4_fixed1_update
}
@@ -7073,6 +7097,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */
+ vcpu->arch.xsaves_enabled = false;
+
if (cpu_has_secondary_exec_ctrls()) {
vmx_compute_secondary_exec_control(vmx);
vmcs_set_secondary_exec_control(vmx);
@@ -7080,10 +7107,12 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (nested_vmx_allowed(vcpu))
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
+ FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX |
FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
else
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
- ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ ~(FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX |
+ FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX);
if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu);
@@ -7093,6 +7122,15 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
update_intel_pt_cfg(vcpu);
+
+ if (boot_cpu_has(X86_FEATURE_RTM)) {
+ struct shared_msr_entry *msr;
+ msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL);
+ if (msr) {
+ bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
+ vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
+ }
+ }
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -7581,9 +7619,6 @@ static __init int hardware_setup(void)
WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
}
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- rdmsrl(MSR_IA32_XSS, host_xss);
-
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
@@ -7764,7 +7799,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
- .decache_cr3 = vmx_decache_cr3,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index bee16687dc0b..7c1b978b2df4 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -22,11 +22,11 @@ extern u32 get_umwait_control_msr(void);
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
-#define NR_AUTOLOAD_MSRS 8
+#define NR_LOADSTORE_MSRS 8
struct vmx_msrs {
unsigned int nr;
- struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
+ struct vmx_msr_entry val[NR_LOADSTORE_MSRS];
};
struct shared_msr_entry {
@@ -167,6 +167,9 @@ struct nested_vmx {
u64 vmcs01_debugctl;
u64 vmcs01_guest_bndcfgs;
+ /* to migrate it to L1 if L2 writes to L1's CR8 directly */
+ int l1_tpr_threshold;
+
u16 vpid02;
u16 last_vpid;
@@ -230,6 +233,10 @@ struct vcpu_vmx {
struct vmx_msrs host;
} msr_autoload;
+ struct msr_autostore {
+ struct vmx_msrs guest;
+ } msr_autostore;
+
struct {
int vm86_active;
ulong save_rflags;
@@ -334,6 +341,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
+int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
@@ -355,6 +363,11 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
+static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
+{
+ return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
+}
+
static inline void pi_set_sn(struct pi_desc *pi_desc)
{
set_bit(POSTED_INTR_SN,
@@ -373,6 +386,12 @@ static inline void pi_clear_on(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ff395f812719..cf917139de6b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -68,6 +68,7 @@
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/intel_pt.h>
+#include <asm/emulate_prefix.h>
#include <clocksource/hyperv_timer.h>
#define CREATE_TRACE_POINTS
@@ -176,6 +177,8 @@ struct kvm_shared_msrs {
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static struct kvm_shared_msrs __percpu *shared_msrs;
+static u64 __read_mostly host_xss;
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
{ "pf_guest", VCPU_STAT(pf_guest) },
@@ -213,6 +216,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmu_unsync", VM_STAT(mmu_unsync) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ "largepages", VM_STAT(lpages, .mode = 0444) },
+ { "nx_largepages_splitted", VM_STAT(nx_lpage_splits, .mode = 0444) },
{ "max_mmu_page_hash_collisions",
VM_STAT(max_mmu_page_hash_collisions) },
{ NULL }
@@ -259,23 +263,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
}
}
-static void shared_msr_update(unsigned slot, u32 msr)
-{
- u64 value;
- unsigned int cpu = smp_processor_id();
- struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
-
- /* only read, and nobody should modify it at this time,
- * so don't need lock */
- if (slot >= shared_msrs_global.nr) {
- printk(KERN_ERR "kvm: invalid MSR slot!");
- return;
- }
- rdmsrl_safe(msr, &value);
- smsr->values[slot].host = value;
- smsr->values[slot].curr = value;
-}
-
void kvm_define_shared_msr(unsigned slot, u32 msr)
{
BUG_ON(slot >= KVM_NR_SHARED_MSRS);
@@ -287,10 +274,16 @@ EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
static void kvm_shared_msr_cpu_online(void)
{
- unsigned i;
+ unsigned int cpu = smp_processor_id();
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
+ u64 value;
+ int i;
- for (i = 0; i < shared_msrs_global.nr; ++i)
- shared_msr_update(i, shared_msrs_global.msrs[i]);
+ for (i = 0; i < shared_msrs_global.nr; ++i) {
+ rdmsrl_safe(shared_msrs_global.msrs[i], &value);
+ smsr->values[i].host = value;
+ smsr->values[i].curr = value;
+ }
}
int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
@@ -299,13 +292,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
int err;
- if (((value ^ smsr->values[slot].curr) & mask) == 0)
+ value = (value & mask) | (smsr->values[slot].host & ~mask);
+ if (value == smsr->values[slot].curr)
return 0;
- smsr->values[slot].curr = value;
err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
if (err)
return 1;
+ smsr->values[slot].curr = value;
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
@@ -708,10 +702,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
ret = 1;
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
- __set_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_dirty);
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+
out:
return ret;
@@ -721,7 +713,6 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
- bool changed = true;
int offset;
gfn_t gfn;
int r;
@@ -729,8 +720,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (!is_pae_paging(vcpu))
return false;
- if (!test_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_avail))
+ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
return true;
gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
@@ -738,11 +728,9 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0)
- goto out;
- changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
-out:
+ return true;
- return changed;
+ return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
}
EXPORT_SYMBOL_GPL(pdptrs_changed);
@@ -811,27 +799,34 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{
- if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
- !vcpu->guest_xcr0_loaded) {
- /* kvm_set_xcr() also depends on this */
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
- vcpu->guest_xcr0_loaded = 1;
+
+ if (vcpu->arch.xsaves_enabled &&
+ vcpu->arch.ia32_xss != host_xss)
+ wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
}
-EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
+EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{
- if (vcpu->guest_xcr0_loaded) {
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
- vcpu->guest_xcr0_loaded = 0;
+
+ if (vcpu->arch.xsaves_enabled &&
+ vcpu->arch.ia32_xss != host_xss)
+ wrmsrl(MSR_IA32_XSS, host_xss);
}
+
}
-EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
+EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
@@ -983,7 +978,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
vcpu->arch.cr3 = cr3;
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
return 0;
}
@@ -1132,13 +1127,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
*
- * This list is modified at module load time to reflect the
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
+ * extract the supported MSRs from the related const lists.
+ * msrs_to_save is selected from the msrs_to_save_all to reflect the
* capabilities of the host cpu. This capabilities test skips MSRs that are
- * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
+ * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
* may depend on host virtualization features rather than host cpu features.
*/
-static u32 msrs_to_save[] = {
+static const u32 msrs_to_save_all[] = {
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
@@ -1179,9 +1176,10 @@ static u32 msrs_to_save[] = {
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
};
+static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
static unsigned num_msrs_to_save;
-static u32 emulated_msrs[] = {
+static const u32 emulated_msrs_all[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
@@ -1220,7 +1218,7 @@ static u32 emulated_msrs[] = {
* by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
* We always support the "true" VMX control MSRs, even if the host
* processor does not, so I am putting these registers here rather
- * than in msrs_to_save.
+ * than in msrs_to_save_all.
*/
MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
@@ -1239,13 +1237,14 @@ static u32 emulated_msrs[] = {
MSR_KVM_POLL_CONTROL,
};
+static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
static unsigned num_emulated_msrs;
/*
* List of msr numbers which are used to expose MSR-based features that
* can be used by a hypervisor to validate requested CPU features.
*/
-static u32 msr_based_features[] = {
+static const u32 msr_based_features_all[] = {
MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
MSR_IA32_VMX_PINBASED_CTLS,
@@ -1270,6 +1269,7 @@ static u32 msr_based_features[] = {
MSR_IA32_ARCH_CAPABILITIES,
};
+static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
static unsigned int num_msr_based_features;
static u64 kvm_get_arch_capabilities(void)
@@ -1280,6 +1280,14 @@ static u64 kvm_get_arch_capabilities(void)
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
/*
+ * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
+ * the nested hypervisor runs with NX huge pages. If it is not,
+ * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
+ * L1 guests, so it need not worry about its own (L2) guests.
+ */
+ data |= ARCH_CAP_PSCHANGE_MC_NO;
+
+ /*
* If we're doing cache flushes (either "always" or "cond")
* we will do one whenever the guest does a vmlaunch/vmresume.
* If an outer hypervisor is doing the cache flush for us
@@ -1298,6 +1306,17 @@ static u64 kvm_get_arch_capabilities(void)
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
+ /*
+ * On TAA affected systems:
+ * - nothing to do if TSX is disabled on the host.
+ * - we emulate TSX_CTRL if present on the host.
+ * This lets the guest use VERW to clear CPU buffers.
+ */
+ if (!boot_cpu_has(X86_FEATURE_RTM))
+ data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
+ else if (!boot_cpu_has_bug(X86_BUG_TAA))
+ data |= ARCH_CAP_TAA_NO;
+
return data;
}
@@ -1444,8 +1463,8 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
- bool host_initiated)
+int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated)
{
struct msr_data msr;
int ret;
@@ -1520,20 +1539,25 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
}
#ifdef CONFIG_X86_64
+struct pvclock_clock {
+ int vclock_mode;
+ u64 cycle_last;
+ u64 mask;
+ u32 mult;
+ u32 shift;
+};
+
struct pvclock_gtod_data {
seqcount_t seq;
- struct { /* extract of a clocksource struct */
- int vclock_mode;
- u64 cycle_last;
- u64 mask;
- u32 mult;
- u32 shift;
- } clock;
+ struct pvclock_clock clock; /* extract of a clocksource struct */
+ struct pvclock_clock raw_clock; /* extract of a clocksource struct */
+ u64 boot_ns_raw;
u64 boot_ns;
u64 nsec_base;
u64 wall_time_sec;
+ u64 monotonic_raw_nsec;
};
static struct pvclock_gtod_data pvclock_gtod_data;
@@ -1541,9 +1565,10 @@ static struct pvclock_gtod_data pvclock_gtod_data;
static void update_pvclock_gtod(struct timekeeper *tk)
{
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
- u64 boot_ns;
+ u64 boot_ns, boot_ns_raw;
boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
+ boot_ns_raw = ktime_to_ns(ktime_add(tk->tkr_raw.base, tk->offs_boot));
write_seqcount_begin(&vdata->seq);
@@ -1554,11 +1579,20 @@ static void update_pvclock_gtod(struct timekeeper *tk)
vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr_mono.shift;
+ vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode;
+ vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
+ vdata->raw_clock.mask = tk->tkr_raw.mask;
+ vdata->raw_clock.mult = tk->tkr_raw.mult;
+ vdata->raw_clock.shift = tk->tkr_raw.shift;
+
vdata->boot_ns = boot_ns;
vdata->nsec_base = tk->tkr_mono.xtime_nsec;
vdata->wall_time_sec = tk->xtime_sec;
+ vdata->boot_ns_raw = boot_ns_raw;
+ vdata->monotonic_raw_nsec = tk->tkr_raw.xtime_nsec;
+
write_seqcount_end(&vdata->seq);
}
#endif
@@ -1982,21 +2016,21 @@ static u64 read_tsc(void)
return last;
}
-static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
+static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
+ int *mode)
{
long v;
- struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
u64 tsc_pg_val;
- switch (gtod->clock.vclock_mode) {
+ switch (clock->vclock_mode) {
case VCLOCK_HVCLOCK:
tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
tsc_timestamp);
if (tsc_pg_val != U64_MAX) {
/* TSC page valid */
*mode = VCLOCK_HVCLOCK;
- v = (tsc_pg_val - gtod->clock.cycle_last) &
- gtod->clock.mask;
+ v = (tsc_pg_val - clock->cycle_last) &
+ clock->mask;
} else {
/* TSC page invalid */
*mode = VCLOCK_NONE;
@@ -2005,8 +2039,8 @@ static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
case VCLOCK_TSC:
*mode = VCLOCK_TSC;
*tsc_timestamp = read_tsc();
- v = (*tsc_timestamp - gtod->clock.cycle_last) &
- gtod->clock.mask;
+ v = (*tsc_timestamp - clock->cycle_last) &
+ clock->mask;
break;
default:
*mode = VCLOCK_NONE;
@@ -2015,10 +2049,10 @@ static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
if (*mode == VCLOCK_NONE)
*tsc_timestamp = v = 0;
- return v * gtod->clock.mult;
+ return v * clock->mult;
}
-static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
+static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
unsigned long seq;
@@ -2027,10 +2061,10 @@ static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
do {
seq = read_seqcount_begin(&gtod->seq);
- ns = gtod->nsec_base;
- ns += vgettsc(tsc_timestamp, &mode);
+ ns = gtod->monotonic_raw_nsec;
+ ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
ns >>= gtod->clock.shift;
- ns += gtod->boot_ns;
+ ns += gtod->boot_ns_raw;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
*t = ns;
@@ -2048,7 +2082,7 @@ static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
seq = read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->nsec_base;
- ns += vgettsc(tsc_timestamp, &mode);
+ ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -2065,7 +2099,7 @@ static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
return false;
- return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns,
+ return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
tsc_timestamp));
}
@@ -2688,6 +2722,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
+ case MSR_IA32_XSS:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
+ return 1;
+ /*
+ * We do support PT if kvm_x86_ops->pt_supported(), but we do
+ * not support IA32_XSS[bit 8]. Guests will have to use
+ * RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT
+ * MSRs.
+ */
+ if (data != 0)
+ return 1;
+ vcpu->arch.ia32_xss = data;
+ break;
case MSR_SMI_COUNT:
if (!msr_info->host_initiated)
return 1;
@@ -3015,6 +3063,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
msr_info->host_initiated);
+ case MSR_IA32_XSS:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
+ return 1;
+ msr_info->data = vcpu->arch.ia32_xss;
+ break;
case MSR_K7_CLK_CTL:
/*
* Provide expected ramp-up count for K7. All other
@@ -3792,12 +3846,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
- if (lapic_in_kernel(vcpu)) {
- if (events->smi.latched_init)
- set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
- else
- clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
- }
+ }
+
+ if (lapic_in_kernel(vcpu)) {
+ if (events->smi.latched_init)
+ set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+ else
+ clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
}
}
@@ -4388,6 +4443,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_SET_NESTED_STATE: {
struct kvm_nested_state __user *user_kvm_nested_state = argp;
struct kvm_nested_state kvm_state;
+ int idx;
r = -EINVAL;
if (!kvm_x86_ops->set_nested_state)
@@ -4411,7 +4467,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
&& !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
break;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_GET_SUPPORTED_HV_CPUID: {
@@ -4913,9 +4971,6 @@ set_identity_unlock:
if (!irqchip_kernel(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
- if (r)
- goto set_irqchip_out;
- r = 0;
set_irqchip_out:
kfree(chip);
break;
@@ -5090,22 +5145,26 @@ static void kvm_init_msr_list(void)
{
struct x86_pmu_capability x86_pmu;
u32 dummy[2];
- unsigned i, j;
+ unsigned i;
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
- "Please update the fixed PMCs in msrs_to_save[]");
+ "Please update the fixed PMCs in msrs_to_saved_all[]");
perf_get_x86_pmu_capability(&x86_pmu);
- for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
- if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
+ num_msrs_to_save = 0;
+ num_emulated_msrs = 0;
+ num_msr_based_features = 0;
+
+ for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
+ if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue;
/*
* Even MSRs that are valid in the host may not be exposed
* to the guests in some cases.
*/
- switch (msrs_to_save[i]) {
+ switch (msrs_to_save_all[i]) {
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported())
continue;
@@ -5133,17 +5192,17 @@ static void kvm_init_msr_list(void)
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
if (!kvm_x86_ops->pt_supported() ||
- msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >=
+ msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
continue;
break;
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
- if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
+ if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
break;
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
- if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
+ if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
}
@@ -5151,34 +5210,25 @@ static void kvm_init_msr_list(void)
break;
}
- if (j < i)
- msrs_to_save[j] = msrs_to_save[i];
- j++;
+ msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
}
- num_msrs_to_save = j;
- for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
- if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+ for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
continue;
- if (j < i)
- emulated_msrs[j] = emulated_msrs[i];
- j++;
+ emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
}
- num_emulated_msrs = j;
- for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+ for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
struct kvm_msr_entry msr;
- msr.index = msr_based_features[i];
+ msr.index = msr_based_features_all[i];
if (kvm_get_msr_feature(&msr))
continue;
- if (j < i)
- msr_based_features[j] = msr_based_features[i];
- j++;
+ msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
}
- num_msr_based_features = j;
}
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -5443,6 +5493,7 @@ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
int handle_ud(struct kvm_vcpu *vcpu)
{
+ static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
int emul_type = EMULTYPE_TRAP_UD;
char sig[5]; /* ud2; .ascii "kvm" */
struct x86_exception e;
@@ -5450,7 +5501,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
if (force_emulation_prefix &&
kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
sig, sizeof(sig), &e) == 0 &&
- memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
+ memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
emul_type = EMULTYPE_TRAP_UD_FORCED;
}
@@ -6108,7 +6159,7 @@ static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc)
{
- return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
+ return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc);
}
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
@@ -7838,6 +7889,19 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
+void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
+ unsigned long *vcpu_bitmap)
+{
+ cpumask_var_t cpus;
+
+ zalloc_cpumask_var(&cpus, GFP_ATOMIC);
+
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC,
+ vcpu_bitmap, cpus);
+
+ free_cpumask_var(cpus);
+}
+
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
@@ -7915,7 +7979,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
*/
put_page(page);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
{
@@ -8674,8 +8737,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
goto out;
- /* INITs are latched while in SMM */
- if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
+ /*
+ * KVM_MP_STATE_INIT_RECEIVED means the processor is in
+ * INIT state; latched init should be reported using
+ * KVM_SET_VCPU_EVENTS, so reject it here.
+ */
+ if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) &&
(mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
goto out;
@@ -8767,7 +8834,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
kvm_set_cr8(vcpu, sregs->cr8);
@@ -9294,6 +9361,9 @@ int kvm_arch_hardware_setup(void)
kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
}
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ rdmsrl(MSR_IA32_XSS, host_xss);
+
kvm_init_msr_list();
return 0;
}
@@ -9347,7 +9417,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_pio_data;
if (irqchip_in_kernel(vcpu->kvm)) {
- vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
+ vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
@@ -9416,7 +9486,13 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
vcpu->arch.l1tf_flush_l1d = true;
+ if (pmu->version && unlikely(pmu->event_count)) {
+ pmu->need_cleanup = true;
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
kvm_x86_ops->sched_in(vcpu, cpu);
}
@@ -9428,6 +9504,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@@ -9456,6 +9533,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return kvm_x86_ops->vm_init(kvm);
}
+int kvm_arch_post_init_vm(struct kvm *kvm)
+{
+ return kvm_mmu_post_init_vm(kvm);
+}
+
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
vcpu_load(vcpu);
@@ -9557,6 +9639,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+ kvm_mmu_pre_destroy_vm(kvm);
+}
+
void kvm_arch_destroy_vm(struct kvm *kvm)
{
if (current->mm == kvm->mm) {
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index dbf7442a822b..29391af8871d 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -238,8 +238,7 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false;
}
-static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
+static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
{
unsigned long val = kvm_register_read(vcpu, reg);
@@ -247,8 +246,7 @@ static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
}
static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
- enum kvm_reg reg,
- unsigned long val)
+ int reg, unsigned long val)
{
if (!is_64_bit_mode(vcpu))
val = (u32)val;
@@ -260,6 +258,11 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
return !(kvm->arch.disabled_quirks & quirk);
}
+static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
+{
+ return is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu);
+}
+
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
@@ -366,7 +369,7 @@ static inline bool kvm_pat_valid(u64 data)
return (data | ((data & 0x0202020202020202ull) << 1)) == data;
}
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
+void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index e0788bade5ab..3b6544111ac9 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -20,10 +20,10 @@
#define BEGIN(op) \
.macro endp; \
-ENDPROC(atomic64_##op##_386); \
+SYM_FUNC_END(atomic64_##op##_386); \
.purgem endp; \
.endm; \
-ENTRY(atomic64_##op##_386); \
+SYM_FUNC_START(atomic64_##op##_386); \
LOCK v;
#define ENDP endp
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 843d978ee341..1c5c81c16b06 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -16,12 +16,12 @@
cmpxchg8b (\reg)
.endm
-ENTRY(atomic64_read_cx8)
+SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
ret
-ENDPROC(atomic64_read_cx8)
+SYM_FUNC_END(atomic64_read_cx8)
-ENTRY(atomic64_set_cx8)
+SYM_FUNC_START(atomic64_set_cx8)
1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */
@@ -29,19 +29,19 @@ ENTRY(atomic64_set_cx8)
jne 1b
ret
-ENDPROC(atomic64_set_cx8)
+SYM_FUNC_END(atomic64_set_cx8)
-ENTRY(atomic64_xchg_cx8)
+SYM_FUNC_START(atomic64_xchg_cx8)
1:
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
ret
-ENDPROC(atomic64_xchg_cx8)
+SYM_FUNC_END(atomic64_xchg_cx8)
.macro addsub_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebp
pushl %ebx
pushl %esi
@@ -69,14 +69,14 @@ ENTRY(atomic64_\func\()_return_cx8)
popl %ebx
popl %ebp
ret
-ENDPROC(atomic64_\func\()_return_cx8)
+SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
addsub_return add add adc
addsub_return sub sub sbb
.macro incdec_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebx
read64 %esi
@@ -94,13 +94,13 @@ ENTRY(atomic64_\func\()_return_cx8)
movl %ecx, %edx
popl %ebx
ret
-ENDPROC(atomic64_\func\()_return_cx8)
+SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
incdec_return inc add adc
incdec_return dec sub sbb
-ENTRY(atomic64_dec_if_positive_cx8)
+SYM_FUNC_START(atomic64_dec_if_positive_cx8)
pushl %ebx
read64 %esi
@@ -119,9 +119,9 @@ ENTRY(atomic64_dec_if_positive_cx8)
movl %ecx, %edx
popl %ebx
ret
-ENDPROC(atomic64_dec_if_positive_cx8)
+SYM_FUNC_END(atomic64_dec_if_positive_cx8)
-ENTRY(atomic64_add_unless_cx8)
+SYM_FUNC_START(atomic64_add_unless_cx8)
pushl %ebp
pushl %ebx
/* these just push these two parameters on the stack */
@@ -155,9 +155,9 @@ ENTRY(atomic64_add_unless_cx8)
jne 2b
xorl %eax, %eax
jmp 3b
-ENDPROC(atomic64_add_unless_cx8)
+SYM_FUNC_END(atomic64_add_unless_cx8)
-ENTRY(atomic64_inc_not_zero_cx8)
+SYM_FUNC_START(atomic64_inc_not_zero_cx8)
pushl %ebx
read64 %esi
@@ -177,4 +177,4 @@ ENTRY(atomic64_inc_not_zero_cx8)
3:
popl %ebx
ret
-ENDPROC(atomic64_inc_not_zero_cx8)
+SYM_FUNC_END(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4df90c9ea383..4742e8fa7ee7 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -46,7 +46,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
-ENTRY(csum_partial)
+SYM_FUNC_START(csum_partial)
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
@@ -128,13 +128,13 @@ ENTRY(csum_partial)
popl %ebx
popl %esi
ret
-ENDPROC(csum_partial)
+SYM_FUNC_END(csum_partial)
#else
/* Version for PentiumII/PPro */
-ENTRY(csum_partial)
+SYM_FUNC_START(csum_partial)
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
@@ -246,7 +246,7 @@ ENTRY(csum_partial)
popl %ebx
popl %esi
ret
-ENDPROC(csum_partial)
+SYM_FUNC_END(csum_partial)
#endif
EXPORT_SYMBOL(csum_partial)
@@ -280,7 +280,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define ARGBASE 16
#define FP 12
-ENTRY(csum_partial_copy_generic)
+SYM_FUNC_START(csum_partial_copy_generic)
subl $4,%esp
pushl %edi
pushl %esi
@@ -398,7 +398,7 @@ DST( movb %cl, (%edi) )
popl %edi
popl %ecx # equivalent to addl $4,%esp
ret
-ENDPROC(csum_partial_copy_generic)
+SYM_FUNC_END(csum_partial_copy_generic)
#else
@@ -416,7 +416,7 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12
-ENTRY(csum_partial_copy_generic)
+SYM_FUNC_START(csum_partial_copy_generic)
pushl %ebx
pushl %edi
pushl %esi
@@ -483,7 +483,7 @@ DST( movb %dl, (%edi) )
popl %edi
popl %ebx
ret
-ENDPROC(csum_partial_copy_generic)
+SYM_FUNC_END(csum_partial_copy_generic)
#undef ROUND
#undef ROUND1
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 75a5a4515fa7..c4c7dd115953 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -13,15 +13,15 @@
* Zero a page.
* %rdi - page
*/
-ENTRY(clear_page_rep)
+SYM_FUNC_START(clear_page_rep)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
ret
-ENDPROC(clear_page_rep)
+SYM_FUNC_END(clear_page_rep)
EXPORT_SYMBOL_GPL(clear_page_rep)
-ENTRY(clear_page_orig)
+SYM_FUNC_START(clear_page_orig)
xorl %eax,%eax
movl $4096/64,%ecx
.p2align 4
@@ -40,13 +40,13 @@ ENTRY(clear_page_orig)
jnz .Lloop
nop
ret
-ENDPROC(clear_page_orig)
+SYM_FUNC_END(clear_page_orig)
EXPORT_SYMBOL_GPL(clear_page_orig)
-ENTRY(clear_page_erms)
+SYM_FUNC_START(clear_page_erms)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
ret
-ENDPROC(clear_page_erms)
+SYM_FUNC_END(clear_page_erms)
EXPORT_SYMBOL_GPL(clear_page_erms)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index d63185698a23..3542502faa3b 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -13,7 +13,7 @@
* %rcx : high 64 bits of new value
* %al : Operation successful
*/
-ENTRY(this_cpu_cmpxchg16b_emu)
+SYM_FUNC_START(this_cpu_cmpxchg16b_emu)
#
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -44,4 +44,4 @@ ENTRY(this_cpu_cmpxchg16b_emu)
xor %al,%al
ret
-ENDPROC(this_cpu_cmpxchg16b_emu)
+SYM_FUNC_END(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index 691d80e97488..ca01ed6029f4 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -13,7 +13,7 @@
* %ebx : low 32 bits of new value
* %ecx : high 32 bits of new value
*/
-ENTRY(cmpxchg8b_emu)
+SYM_FUNC_START(cmpxchg8b_emu)
#
# Emulate 'cmpxchg8b (%esi)' on UP except we don't
@@ -42,5 +42,5 @@ ENTRY(cmpxchg8b_emu)
popfl
ret
-ENDPROC(cmpxchg8b_emu)
+SYM_FUNC_END(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index fd2d09afa097..2402d4c489d2 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -13,15 +13,15 @@
* prefetch distance based on SMP/UP.
*/
ALIGN
-ENTRY(copy_page)
+SYM_FUNC_START(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
ret
-ENDPROC(copy_page)
+SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
-ENTRY(copy_page_regs)
+SYM_FUNC_START_LOCAL(copy_page_regs)
subq $2*8, %rsp
movq %rbx, (%rsp)
movq %r12, 1*8(%rsp)
@@ -86,4 +86,4 @@ ENTRY(copy_page_regs)
movq 1*8(%rsp), %r12
addq $2*8, %rsp
ret
-ENDPROC(copy_page_regs)
+SYM_FUNC_END(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 86976b55ae74..816f128a6d52 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -53,7 +53,7 @@
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ENTRY(copy_user_generic_unrolled)
+SYM_FUNC_START(copy_user_generic_unrolled)
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -136,7 +136,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE_UA(19b, 40b)
_ASM_EXTABLE_UA(21b, 50b)
_ASM_EXTABLE_UA(22b, 50b)
-ENDPROC(copy_user_generic_unrolled)
+SYM_FUNC_END(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled)
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ENTRY(copy_user_generic_string)
+SYM_FUNC_START(copy_user_generic_string)
ASM_STAC
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
@@ -182,7 +182,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE_UA(1b, 11b)
_ASM_EXTABLE_UA(3b, 12b)
-ENDPROC(copy_user_generic_string)
+SYM_FUNC_END(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
/*
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(copy_user_generic_string)
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ENTRY(copy_user_enhanced_fast_string)
+SYM_FUNC_START(copy_user_enhanced_fast_string)
ASM_STAC
cmpl $64,%edx
jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
@@ -214,7 +214,7 @@ ENTRY(copy_user_enhanced_fast_string)
.previous
_ASM_EXTABLE_UA(1b, 12b)
-ENDPROC(copy_user_enhanced_fast_string)
+SYM_FUNC_END(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
@@ -230,8 +230,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ALIGN;
-.Lcopy_user_handle_tail:
+SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
movl %edx,%ecx
1: rep movsb
2: mov %ecx,%eax
@@ -239,7 +238,7 @@ ALIGN;
ret
_ASM_EXTABLE_UA(1b, 2b)
-END(.Lcopy_user_handle_tail)
+SYM_CODE_END(.Lcopy_user_handle_tail)
/*
* copy_user_nocache - Uncached memory copy with exception handling
@@ -250,7 +249,7 @@ END(.Lcopy_user_handle_tail)
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*/
-ENTRY(__copy_user_nocache)
+SYM_FUNC_START(__copy_user_nocache)
ASM_STAC
/* If size is less than 8 bytes, go to 4-byte copy */
@@ -389,5 +388,5 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
_ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
_ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
-ENDPROC(__copy_user_nocache)
+SYM_FUNC_END(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index a4a379e79259..3394a8ff7fd0 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -49,7 +49,7 @@
.endm
-ENTRY(csum_partial_copy_generic)
+SYM_FUNC_START(csum_partial_copy_generic)
cmpl $3*64, %edx
jle .Lignore
@@ -225,4 +225,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
-ENDPROC(csum_partial_copy_generic)
+SYM_FUNC_END(csum_partial_copy_generic)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 9578eb88fc87..c8a85b512796 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -36,7 +36,7 @@
#include <asm/export.h>
.text
-ENTRY(__get_user_1)
+SYM_FUNC_START(__get_user_1)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
@@ -47,10 +47,10 @@ ENTRY(__get_user_1)
xor %eax,%eax
ASM_CLAC
ret
-ENDPROC(__get_user_1)
+SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
-ENTRY(__get_user_2)
+SYM_FUNC_START(__get_user_2)
add $1,%_ASM_AX
jc bad_get_user
mov PER_CPU_VAR(current_task), %_ASM_DX
@@ -63,10 +63,10 @@ ENTRY(__get_user_2)
xor %eax,%eax
ASM_CLAC
ret
-ENDPROC(__get_user_2)
+SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
-ENTRY(__get_user_4)
+SYM_FUNC_START(__get_user_4)
add $3,%_ASM_AX
jc bad_get_user
mov PER_CPU_VAR(current_task), %_ASM_DX
@@ -79,10 +79,10 @@ ENTRY(__get_user_4)
xor %eax,%eax
ASM_CLAC
ret
-ENDPROC(__get_user_4)
+SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
-ENTRY(__get_user_8)
+SYM_FUNC_START(__get_user_8)
#ifdef CONFIG_X86_64
add $7,%_ASM_AX
jc bad_get_user
@@ -111,25 +111,27 @@ ENTRY(__get_user_8)
ASM_CLAC
ret
#endif
-ENDPROC(__get_user_8)
+SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
-.Lbad_get_user_clac:
+SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
ASM_CLAC
bad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
ret
+SYM_CODE_END(.Lbad_get_user_clac)
#ifdef CONFIG_X86_32
-.Lbad_get_user_8_clac:
+SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac)
ASM_CLAC
bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
ret
+SYM_CODE_END(.Lbad_get_user_8_clac)
#endif
_ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index a14f9939c365..dbf8cc97b7f5 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -8,7 +8,7 @@
* unsigned int __sw_hweight32(unsigned int w)
* %rdi: w
*/
-ENTRY(__sw_hweight32)
+SYM_FUNC_START(__sw_hweight32)
#ifdef CONFIG_X86_64
movl %edi, %eax # w
@@ -33,10 +33,10 @@ ENTRY(__sw_hweight32)
shrl $24, %eax # w = w_tmp >> 24
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
-ENDPROC(__sw_hweight32)
+SYM_FUNC_END(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
-ENTRY(__sw_hweight64)
+SYM_FUNC_START(__sw_hweight64)
#ifdef CONFIG_X86_64
pushq %rdi
pushq %rdx
@@ -79,5 +79,5 @@ ENTRY(__sw_hweight64)
popl %ecx
ret
#endif
-ENDPROC(__sw_hweight64)
+SYM_FUNC_END(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 0b5862ba6a75..404279563891 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -13,6 +13,8 @@
#include <asm/inat.h>
#include <asm/insn.h>
+#include <asm/emulate_prefix.h>
+
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
@@ -58,6 +60,36 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
insn->addr_bytes = 4;
}
+static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
+static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
+
+static int __insn_get_emulate_prefix(struct insn *insn,
+ const insn_byte_t *prefix, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
+ goto err_out;
+ }
+
+ insn->emulate_prefix_size = len;
+ insn->next_byte += len;
+
+ return 1;
+
+err_out:
+ return 0;
+}
+
+static void insn_get_emulate_prefix(struct insn *insn)
+{
+ if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
+ return;
+
+ __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
+}
+
/**
* insn_get_prefixes - scan x86 instruction prefix bytes
* @insn: &struct insn containing instruction
@@ -76,6 +108,8 @@ void insn_get_prefixes(struct insn *insn)
if (prefixes->got)
return;
+ insn_get_emulate_prefix(insn);
+
nb = 0;
lb = 0;
b = peek_next(insn_byte_t, insn);
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index a9bdf0805be0..cb5a1964506b 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -8,8 +8,8 @@
/*
* override generic version in lib/iomap_copy.c
*/
-ENTRY(__iowrite32_copy)
+SYM_FUNC_START(__iowrite32_copy)
movl %edx,%ecx
rep movsd
ret
-ENDPROC(__iowrite32_copy)
+SYM_FUNC_END(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 92748660ba51..56b243b14c3a 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -28,8 +28,8 @@
* Output:
* rax original destination
*/
-ENTRY(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
+SYM_FUNC_START_LOCAL(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
@@ -41,8 +41,8 @@ ENTRY(memcpy)
movl %edx, %ecx
rep movsb
ret
-ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END(memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy)
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
-ENTRY(memcpy_erms)
+SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
-ENDPROC(memcpy_erms)
+SYM_FUNC_END(memcpy_erms)
-ENTRY(memcpy_orig)
+SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
@@ -182,7 +182,7 @@ ENTRY(memcpy_orig)
.Lend:
retq
-ENDPROC(memcpy_orig)
+SYM_FUNC_END(memcpy_orig)
#ifndef CONFIG_UML
@@ -193,7 +193,7 @@ MCSAFE_TEST_CTL
* Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks.
*/
-ENTRY(__memcpy_mcsafe)
+SYM_FUNC_START(__memcpy_mcsafe)
cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words
@@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe)
xorl %eax, %eax
.L_done:
ret
-ENDPROC(__memcpy_mcsafe)
+SYM_FUNC_END(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
.section .fixup, "ax"
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..337830d7a59c 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,8 +26,8 @@
*/
.weak memmove
-ENTRY(memmove)
-ENTRY(__memmove)
+SYM_FUNC_START_ALIAS(memmove)
+SYM_FUNC_START(__memmove)
/* Handle more 32 bytes in loop */
mov %rdi, %rax
@@ -207,7 +207,7 @@ ENTRY(__memmove)
movb %r11b, (%rdi)
13:
retq
-ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END(__memmove)
+SYM_FUNC_END_ALIAS(memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..9ff15ee404a4 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,8 +19,8 @@
*
* rax original destination
*/
-ENTRY(memset)
-ENTRY(__memset)
+SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
-ENDPROC(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_END_ALIAS(memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
@@ -59,16 +59,16 @@ EXPORT_SYMBOL(__memset)
*
* rax original destination
*/
-ENTRY(memset_erms)
+SYM_FUNC_START_LOCAL(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset_erms)
+SYM_FUNC_END(memset_erms)
-ENTRY(memset_orig)
+SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
/* expand byte value */
@@ -139,4 +139,4 @@ ENTRY(memset_orig)
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
-ENDPROC(memset_orig)
+SYM_FUNC_END(memset_orig)
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index ed33cbab3958..a2b9caa5274c 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -12,7 +12,7 @@
*
*/
.macro op_safe_regs op
-ENTRY(\op\()_safe_regs)
+SYM_FUNC_START(\op\()_safe_regs)
pushq %rbx
pushq %r12
movq %rdi, %r10 /* Save pointer */
@@ -41,13 +41,13 @@ ENTRY(\op\()_safe_regs)
jmp 2b
_ASM_EXTABLE(1b, 3b)
-ENDPROC(\op\()_safe_regs)
+SYM_FUNC_END(\op\()_safe_regs)
.endm
#else /* X86_32 */
.macro op_safe_regs op
-ENTRY(\op\()_safe_regs)
+SYM_FUNC_START(\op\()_safe_regs)
pushl %ebx
pushl %ebp
pushl %esi
@@ -83,7 +83,7 @@ ENTRY(\op\()_safe_regs)
jmp 2b
_ASM_EXTABLE(1b, 3b)
-ENDPROC(\op\()_safe_regs)
+SYM_FUNC_END(\op\()_safe_regs)
.endm
#endif
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 126dd6a9ec9b..7c7c92db8497 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -34,7 +34,7 @@
#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
.text
-ENTRY(__put_user_1)
+SYM_FUNC_START(__put_user_1)
ENTER
cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
jae .Lbad_put_user
@@ -43,10 +43,10 @@ ENTRY(__put_user_1)
xor %eax,%eax
ASM_CLAC
ret
-ENDPROC(__put_user_1)
+SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
-ENTRY(__put_user_2)
+SYM_FUNC_START(__put_user_2)
ENTER
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $1,%_ASM_BX
@@ -57,10 +57,10 @@ ENTRY(__put_user_2)
xor %eax,%eax
ASM_CLAC
ret
-ENDPROC(__put_user_2)
+SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
-ENTRY(__put_user_4)
+SYM_FUNC_START(__put_user_4)
ENTER
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $3,%_ASM_BX
@@ -71,10 +71,10 @@ ENTRY(__put_user_4)
xor %eax,%eax
ASM_CLAC
ret
-ENDPROC(__put_user_4)
+SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
-ENTRY(__put_user_8)
+SYM_FUNC_START(__put_user_8)
ENTER
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $7,%_ASM_BX
@@ -88,14 +88,15 @@ ENTRY(__put_user_8)
xor %eax,%eax
ASM_CLAC
RET
-ENDPROC(__put_user_8)
+SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
-.Lbad_put_user_clac:
+SYM_CODE_START_LOCAL(.Lbad_put_user_clac)
ASM_CLAC
.Lbad_put_user:
movl $-EFAULT,%eax
RET
+SYM_CODE_END(.Lbad_put_user_clac)
_ASM_EXTABLE_UA(1b, .Lbad_put_user_clac)
_ASM_EXTABLE_UA(2b, .Lbad_put_user_clac)
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index c909961e678a..363ec132df7e 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -11,11 +11,11 @@
.macro THUNK reg
.section .text.__x86.indirect_thunk
-ENTRY(__x86_indirect_thunk_\reg)
+SYM_FUNC_START(__x86_indirect_thunk_\reg)
CFI_STARTPROC
JMP_NOSPEC %\reg
CFI_ENDPROC
-ENDPROC(__x86_indirect_thunk_\reg)
+SYM_FUNC_END(__x86_indirect_thunk_\reg)
.endm
/*
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index e0b85930dd77..0a0e9112f284 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -333,7 +333,7 @@ AVXcode: 1
06: CLTS
07: SYSRET (o64)
08: INVD
-09: WBINVD
+09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
@@ -364,7 +364,7 @@ AVXcode: 1
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
+1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
@@ -792,6 +792,8 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -943,9 +945,9 @@ GrpTable: Grp6
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
@@ -1020,7 +1022,7 @@ GrpTable: Grp15
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
@@ -1051,6 +1053,10 @@ GrpTable: Grp19
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
+GrpTable: Grp20
+0: cldemote Mb
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S
index ee08449d20fd..951da2ad54bb 100644
--- a/arch/x86/math-emu/div_Xsig.S
+++ b/arch/x86/math-emu/div_Xsig.S
@@ -75,7 +75,7 @@ FPU_result_1:
.text
-ENTRY(div_Xsig)
+SYM_FUNC_START(div_Xsig)
pushl %ebp
movl %esp,%ebp
#ifndef NON_REENTRANT_FPU
@@ -364,4 +364,4 @@ L_bugged_2:
pop %ebx
jmp L_exit
#endif /* PARANOID */
-ENDPROC(div_Xsig)
+SYM_FUNC_END(div_Xsig)
diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S
index 8f5025c80ee0..d047d1816abe 100644
--- a/arch/x86/math-emu/div_small.S
+++ b/arch/x86/math-emu/div_small.S
@@ -19,7 +19,7 @@
#include "fpu_emu.h"
.text
-ENTRY(FPU_div_small)
+SYM_FUNC_START(FPU_div_small)
pushl %ebp
movl %esp,%ebp
@@ -45,4 +45,4 @@ ENTRY(FPU_div_small)
leave
ret
-ENDPROC(FPU_div_small)
+SYM_FUNC_END(FPU_div_small)
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index f98a0c956764..9b41391867dc 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -107,6 +107,8 @@ static inline bool seg_writable(struct desc_struct *d)
#define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \
math_abort(FPU_info,SIGSEGV)
#define FPU_abort math_abort(FPU_info, SIGSEGV)
+#define FPU_copy_from_user(to, from, n) \
+ do { if (copy_from_user(to, from, n)) FPU_abort; } while (0)
#undef FPU_IGNORE_CODE_SEGV
#ifdef FPU_IGNORE_CODE_SEGV
@@ -122,7 +124,7 @@ static inline bool seg_writable(struct desc_struct *d)
#define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z)
#endif
-#define FPU_get_user(x,y) get_user((x),(y))
-#define FPU_put_user(x,y) put_user((x),(y))
+#define FPU_get_user(x,y) do { if (get_user((x),(y))) FPU_abort; } while (0)
+#define FPU_put_user(x,y) do { if (put_user((x),(y))) FPU_abort; } while (0)
#endif
diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S
index 3e489122a2b0..4afc7b1fa6e9 100644
--- a/arch/x86/math-emu/mul_Xsig.S
+++ b/arch/x86/math-emu/mul_Xsig.S
@@ -25,7 +25,7 @@
#include "fpu_emu.h"
.text
-ENTRY(mul32_Xsig)
+SYM_FUNC_START(mul32_Xsig)
pushl %ebp
movl %esp,%ebp
subl $16,%esp
@@ -63,10 +63,10 @@ ENTRY(mul32_Xsig)
popl %esi
leave
ret
-ENDPROC(mul32_Xsig)
+SYM_FUNC_END(mul32_Xsig)
-ENTRY(mul64_Xsig)
+SYM_FUNC_START(mul64_Xsig)
pushl %ebp
movl %esp,%ebp
subl $16,%esp
@@ -116,11 +116,11 @@ ENTRY(mul64_Xsig)
popl %esi
leave
ret
-ENDPROC(mul64_Xsig)
+SYM_FUNC_END(mul64_Xsig)
-ENTRY(mul_Xsig_Xsig)
+SYM_FUNC_START(mul_Xsig_Xsig)
pushl %ebp
movl %esp,%ebp
subl $16,%esp
@@ -176,4 +176,4 @@ ENTRY(mul_Xsig_Xsig)
popl %esi
leave
ret
-ENDPROC(mul_Xsig_Xsig)
+SYM_FUNC_END(mul_Xsig_Xsig)
diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S
index 604f0b2d17e8..702315eecb86 100644
--- a/arch/x86/math-emu/polynom_Xsig.S
+++ b/arch/x86/math-emu/polynom_Xsig.S
@@ -37,7 +37,7 @@
#define OVERFLOWED -16(%ebp) /* addition overflow flag */
.text
-ENTRY(polynomial_Xsig)
+SYM_FUNC_START(polynomial_Xsig)
pushl %ebp
movl %esp,%ebp
subl $32,%esp
@@ -134,4 +134,4 @@ L_accum_done:
popl %esi
leave
ret
-ENDPROC(polynomial_Xsig)
+SYM_FUNC_END(polynomial_Xsig)
diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
index f3779743d15e..fe6246ff9887 100644
--- a/arch/x86/math-emu/reg_ld_str.c
+++ b/arch/x86/math-emu/reg_ld_str.c
@@ -85,7 +85,7 @@ int FPU_load_extended(long double __user *s, int stnr)
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 10);
- __copy_from_user(sti_ptr, s, 10);
+ FPU_copy_from_user(sti_ptr, s, 10);
RE_ENTRANT_CHECK_ON;
return FPU_tagof(sti_ptr);
@@ -1126,9 +1126,9 @@ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
/* Copy all registers in stack order. */
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(s, 80);
- __copy_from_user(register_base + offset, s, other);
+ FPU_copy_from_user(register_base + offset, s, other);
if (offset)
- __copy_from_user(register_base, s + other, offset);
+ FPU_copy_from_user(register_base, s + other, offset);
RE_ENTRANT_CHECK_ON;
for (i = 0; i < 8; i++) {
diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S
index 7f6b4392a15d..cad1d60b1e84 100644
--- a/arch/x86/math-emu/reg_norm.S
+++ b/arch/x86/math-emu/reg_norm.S
@@ -22,7 +22,7 @@
.text
-ENTRY(FPU_normalize)
+SYM_FUNC_START(FPU_normalize)
pushl %ebp
movl %esp,%ebp
pushl %ebx
@@ -95,12 +95,12 @@ L_overflow:
call arith_overflow
pop %ebx
jmp L_exit
-ENDPROC(FPU_normalize)
+SYM_FUNC_END(FPU_normalize)
/* Normalise without reporting underflow or overflow */
-ENTRY(FPU_normalize_nuo)
+SYM_FUNC_START(FPU_normalize_nuo)
pushl %ebp
movl %esp,%ebp
pushl %ebx
@@ -147,4 +147,4 @@ L_exit_nuo_zero:
popl %ebx
leave
ret
-ENDPROC(FPU_normalize_nuo)
+SYM_FUNC_END(FPU_normalize_nuo)
diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S
index 04563421ee7d..11a1f798451b 100644
--- a/arch/x86/math-emu/reg_round.S
+++ b/arch/x86/math-emu/reg_round.S
@@ -109,7 +109,7 @@ FPU_denormal:
.globl fpu_Arith_exit
/* Entry point when called from C */
-ENTRY(FPU_round)
+SYM_FUNC_START(FPU_round)
pushl %ebp
movl %esp,%ebp
pushl %esi
@@ -708,4 +708,4 @@ L_exception_exit:
jmp fpu_reg_round_special_exit
#endif /* PARANOID */
-ENDPROC(FPU_round)
+SYM_FUNC_END(FPU_round)
diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S
index 50fe9f8c893c..9c9e2c810afe 100644
--- a/arch/x86/math-emu/reg_u_add.S
+++ b/arch/x86/math-emu/reg_u_add.S
@@ -32,7 +32,7 @@
#include "control_w.h"
.text
-ENTRY(FPU_u_add)
+SYM_FUNC_START(FPU_u_add)
pushl %ebp
movl %esp,%ebp
pushl %esi
@@ -166,4 +166,4 @@ L_exit:
leave
ret
#endif /* PARANOID */
-ENDPROC(FPU_u_add)
+SYM_FUNC_END(FPU_u_add)
diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S
index 94d545e118e4..e2fb5c2644c5 100644
--- a/arch/x86/math-emu/reg_u_div.S
+++ b/arch/x86/math-emu/reg_u_div.S
@@ -75,7 +75,7 @@ FPU_ovfl_flag:
#define DEST PARAM3
.text
-ENTRY(FPU_u_div)
+SYM_FUNC_START(FPU_u_div)
pushl %ebp
movl %esp,%ebp
#ifndef NON_REENTRANT_FPU
@@ -471,4 +471,4 @@ L_exit:
ret
#endif /* PARANOID */
-ENDPROC(FPU_u_div)
+SYM_FUNC_END(FPU_u_div)
diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S
index 21cde47fb3e5..0c779c87ac5b 100644
--- a/arch/x86/math-emu/reg_u_mul.S
+++ b/arch/x86/math-emu/reg_u_mul.S
@@ -45,7 +45,7 @@ FPU_accum_1:
.text
-ENTRY(FPU_u_mul)
+SYM_FUNC_START(FPU_u_mul)
pushl %ebp
movl %esp,%ebp
#ifndef NON_REENTRANT_FPU
@@ -147,4 +147,4 @@ L_exit:
ret
#endif /* PARANOID */
-ENDPROC(FPU_u_mul)
+SYM_FUNC_END(FPU_u_mul)
diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S
index f05dea7dec38..e9bb7c248649 100644
--- a/arch/x86/math-emu/reg_u_sub.S
+++ b/arch/x86/math-emu/reg_u_sub.S
@@ -33,7 +33,7 @@
#include "control_w.h"
.text
-ENTRY(FPU_u_sub)
+SYM_FUNC_START(FPU_u_sub)
pushl %ebp
movl %esp,%ebp
pushl %esi
@@ -271,4 +271,4 @@ L_exit:
popl %esi
leave
ret
-ENDPROC(FPU_u_sub)
+SYM_FUNC_END(FPU_u_sub)
diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S
index 226a51e991f1..d9d7de8dbd7b 100644
--- a/arch/x86/math-emu/round_Xsig.S
+++ b/arch/x86/math-emu/round_Xsig.S
@@ -23,7 +23,7 @@
.text
-ENTRY(round_Xsig)
+SYM_FUNC_START(round_Xsig)
pushl %ebp
movl %esp,%ebp
pushl %ebx /* Reserve some space */
@@ -79,11 +79,11 @@ L_exit:
popl %ebx
leave
ret
-ENDPROC(round_Xsig)
+SYM_FUNC_END(round_Xsig)
-ENTRY(norm_Xsig)
+SYM_FUNC_START(norm_Xsig)
pushl %ebp
movl %esp,%ebp
pushl %ebx /* Reserve some space */
@@ -139,4 +139,4 @@ L_n_exit:
popl %ebx
leave
ret
-ENDPROC(norm_Xsig)
+SYM_FUNC_END(norm_Xsig)
diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S
index 96f4779aa9c1..726af985f758 100644
--- a/arch/x86/math-emu/shr_Xsig.S
+++ b/arch/x86/math-emu/shr_Xsig.S
@@ -22,7 +22,7 @@
#include "fpu_emu.h"
.text
-ENTRY(shr_Xsig)
+SYM_FUNC_START(shr_Xsig)
push %ebp
movl %esp,%ebp
pushl %esi
@@ -86,4 +86,4 @@ L_more_than_95:
popl %esi
leave
ret
-ENDPROC(shr_Xsig)
+SYM_FUNC_END(shr_Xsig)
diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S
index d588874eb6fb..4fc89174caf0 100644
--- a/arch/x86/math-emu/wm_shrx.S
+++ b/arch/x86/math-emu/wm_shrx.S
@@ -33,7 +33,7 @@
| Results returned in the 64 bit arg and eax. |
+---------------------------------------------------------------------------*/
-ENTRY(FPU_shrx)
+SYM_FUNC_START(FPU_shrx)
push %ebp
movl %esp,%ebp
pushl %esi
@@ -93,7 +93,7 @@ L_more_than_95:
popl %esi
leave
ret
-ENDPROC(FPU_shrx)
+SYM_FUNC_END(FPU_shrx)
/*---------------------------------------------------------------------------+
@@ -112,7 +112,7 @@ ENDPROC(FPU_shrx)
| part which has been shifted out of the arg. |
| Results returned in the 64 bit arg and eax. |
+---------------------------------------------------------------------------*/
-ENTRY(FPU_shrxs)
+SYM_FUNC_START(FPU_shrxs)
push %ebp
movl %esp,%ebp
pushl %esi
@@ -204,4 +204,4 @@ Ls_more_than_95:
popl %esi
leave
ret
-ENDPROC(FPU_shrxs)
+SYM_FUNC_END(FPU_shrxs)
diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S
index f031c0e19356..3b2b58164ec1 100644
--- a/arch/x86/math-emu/wm_sqrt.S
+++ b/arch/x86/math-emu/wm_sqrt.S
@@ -75,7 +75,7 @@ FPU_fsqrt_arg_0:
.text
-ENTRY(wm_sqrt)
+SYM_FUNC_START(wm_sqrt)
pushl %ebp
movl %esp,%ebp
#ifndef NON_REENTRANT_FPU
@@ -469,4 +469,4 @@ sqrt_more_prec_large:
/* Our estimate is too large */
movl $0x7fffff00,%eax
jmp sqrt_round_result
-ENDPROC(wm_sqrt)
+SYM_FUNC_END(wm_sqrt)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 84373dc9b341..3b89c201ac26 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -13,7 +13,7 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
- pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
+ pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
# Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
@@ -23,7 +23,7 @@ CFLAGS_mem_encrypt_identity.o := $(nostackp)
CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
-obj-$(CONFIG_X86_PAT) += pat_rbtree.o
+obj-$(CONFIG_X86_PAT) += pat_interval.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 752ad11d6868..82ead8e27888 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -161,6 +161,14 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+ /*
+ * VMX changes the host TR limit to 0x67 after a VM exit. This is
+ * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
+ * that this is correct.
+ */
+ BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
+ BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
+
cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
@@ -178,7 +186,9 @@ static __init void setup_cpu_entry_area_ptes(void)
#ifdef CONFIG_X86_32
unsigned long start, end;
- BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
+ /* The +1 is for the readonly IDT: */
+ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
+ BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
start = CPU_ENTRY_AREA_BASE;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 4d75bc656f97..30bb0bd3b1b8 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -45,55 +45,6 @@ __visible bool ex_handler_fault(const struct exception_table_entry *fixup,
EXPORT_SYMBOL_GPL(ex_handler_fault);
/*
- * Handler for UD0 exception following a failed test against the
- * result of a refcount inc/dec/add/sub.
- */
-__visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr,
- unsigned long error_code,
- unsigned long fault_addr)
-{
- /* First unconditionally saturate the refcount. */
- *(int *)regs->cx = INT_MIN / 2;
-
- /*
- * Strictly speaking, this reports the fixup destination, not
- * the fault location, and not the actually overflowing
- * instruction, which is the instruction before the "js", but
- * since that instruction could be a variety of lengths, just
- * report the location after the overflow, which should be close
- * enough for finding the overflow, as it's at least back in
- * the function, having returned from .text.unlikely.
- */
- regs->ip = ex_fixup_addr(fixup);
-
- /*
- * This function has been called because either a negative refcount
- * value was seen by any of the refcount functions, or a zero
- * refcount value was seen by refcount_dec().
- *
- * If we crossed from INT_MAX to INT_MIN, OF (Overflow Flag: result
- * wrapped around) will be set. Additionally, seeing the refcount
- * reach 0 will set ZF (Zero Flag: result was zero). In each of
- * these cases we want a report, since it's a boundary condition.
- * The SF case is not reported since it indicates post-boundary
- * manipulations below zero or above INT_MAX. And if none of the
- * flags are set, something has gone very wrong, so report it.
- */
- if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
- bool zero = regs->flags & X86_EFLAGS_ZF;
-
- refcount_error_report(regs, zero ? "hit zero" : "overflow");
- } else if ((regs->flags & X86_EFLAGS_SF) == 0) {
- /* Report if none of OF, ZF, nor SF are set. */
- refcount_error_report(regs, "unexpected saturation");
- }
-
- return true;
-}
-EXPORT_SYMBOL(ex_handler_refcount);
-
-/*
* Handler for when we fail to restore a task's FPU state. We should never get
* here because the FPU state of a task using the FPU (task->thread.fpu.state)
* should always be valid. However, past bugs have allowed userspace to set
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fd10d91a6115..e7bb483557c9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -829,14 +829,13 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
* used for the kernel image only. free_init_pages() will do the
* right thing for either kind of address.
*/
-void free_kernel_image_pages(void *begin, void *end)
+void free_kernel_image_pages(const char *what, void *begin, void *end)
{
unsigned long begin_ul = (unsigned long)begin;
unsigned long end_ul = (unsigned long)end;
unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
-
- free_init_pages("unused kernel image", begin_ul, end_ul);
+ free_init_pages(what, begin_ul, end_ul);
/*
* PTI maps some of the kernel into userspace. For performance,
@@ -865,7 +864,8 @@ void __ref free_initmem(void)
mem_encrypt_free_decrypted_mem();
- free_kernel_image_pages(&__init_begin, &__init_end);
+ free_kernel_image_pages("unused kernel image (initmem)",
+ &__init_begin, &__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a6b5c653727b..dcb9bc961b39 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1263,7 +1263,7 @@ int kernel_set_to_readonly;
void set_kernel_text_rw(void)
{
unsigned long start = PFN_ALIGN(_text);
- unsigned long end = PFN_ALIGN(__stop___ex_table);
+ unsigned long end = PFN_ALIGN(_etext);
if (!kernel_set_to_readonly)
return;
@@ -1282,7 +1282,7 @@ void set_kernel_text_rw(void)
void set_kernel_text_ro(void)
{
unsigned long start = PFN_ALIGN(_text);
- unsigned long end = PFN_ALIGN(__stop___ex_table);
+ unsigned long end = PFN_ALIGN(_etext);
if (!kernel_set_to_readonly)
return;
@@ -1300,9 +1300,9 @@ void mark_rodata_ro(void)
{
unsigned long start = PFN_ALIGN(_text);
unsigned long rodata_start = PFN_ALIGN(__start_rodata);
- unsigned long end = (unsigned long) &__end_rodata_hpage_align;
- unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
- unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
+ unsigned long end = (unsigned long)__end_rodata_hpage_align;
+ unsigned long text_end = PFN_ALIGN(_etext);
+ unsigned long rodata_end = PFN_ALIGN(__end_rodata);
unsigned long all_end;
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
@@ -1334,8 +1334,10 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_kernel_image_pages((void *)text_end, (void *)rodata_start);
- free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
+ free_kernel_image_pages("unused kernel image (text/rodata gap)",
+ (void *)text_end, (void *)rodata_start);
+ free_kernel_image_pages("unused kernel image (rodata/data gap)",
+ (void *)rodata_end, (void *)_sdata);
debug_checkwx();
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index a39dcdb5ae34..1ff9c2030b4f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -626,6 +626,17 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
paddr_next = data->next;
len = data->len;
+ if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
+ memunmap(data);
+ return true;
+ }
+
+ if (data->type == SETUP_INDIRECT &&
+ ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
+ paddr = ((struct setup_indirect *)data->data)->addr;
+ len = ((struct setup_indirect *)data->data)->len;
+ }
+
memunmap(data);
if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 79eb55ce69a9..49d7814b59a9 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -193,8 +193,8 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
int ret;
WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
if (f->armed) {
- pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
- f->addr, f->count, !!f->old_presence);
+ pr_warn("double-arm: addr 0x%08lx, ref %d, old %d\n",
+ f->addr, f->count, !!f->old_presence);
}
ret = clear_page_presence(f, true);
WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
@@ -341,8 +341,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
* something external causing them (f.e. using a debugger while
* mmio tracing enabled), or erroneous behaviour
*/
- pr_warning("unexpected debug trap on CPU %d.\n",
- smp_processor_id());
+ pr_warn("unexpected debug trap on CPU %d.\n", smp_processor_id());
goto out;
}
diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
new file mode 100644
index 000000000000..f5b85bdc0535
--- /dev/null
+++ b/arch/x86/mm/maccess.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_X86_64
+static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
+{
+ return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
+}
+
+static __always_inline bool invalid_probe_range(u64 vaddr)
+{
+ /*
+ * Range covering the highest possible canonical userspace address
+ * as well as non-canonical address range. For the canonical range
+ * we also need to include the userspace guard page.
+ */
+ return vaddr < TASK_SIZE_MAX + PAGE_SIZE ||
+ canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr;
+}
+#else
+static __always_inline bool invalid_probe_range(u64 vaddr)
+{
+ return vaddr < TASK_SIZE_MAX;
+}
+#endif
+
+long probe_kernel_read_strict(void *dst, const void *src, size_t size)
+{
+ if (unlikely(invalid_probe_range((unsigned long)src)))
+ return -EFAULT;
+
+ return __probe_kernel_read(dst, src, size);
+}
+
+long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count)
+{
+ if (unlikely(invalid_probe_range((unsigned long)unsafe_addr)))
+ return -EFAULT;
+
+ return __strncpy_from_unsafe(dst, unsafe_addr, count);
+}
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 6d71481a1e70..106ead05bbe3 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -16,7 +16,7 @@
.text
.code64
-ENTRY(sme_encrypt_execute)
+SYM_FUNC_START(sme_encrypt_execute)
/*
* Entry parameters:
@@ -66,9 +66,9 @@ ENTRY(sme_encrypt_execute)
pop %rbp
ret
-ENDPROC(sme_encrypt_execute)
+SYM_FUNC_END(sme_encrypt_execute)
-ENTRY(__enc_copy)
+SYM_FUNC_START(__enc_copy)
/*
* Routine used to encrypt memory in place.
* This routine must be run outside of the kernel proper since
@@ -153,4 +153,4 @@ ENTRY(__enc_copy)
ret
.L__enc_copy_end:
-ENDPROC(__enc_copy)
+SYM_FUNC_END(__enc_copy)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index b8ef8557d4b3..673de6063345 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -394,7 +394,7 @@ static void enter_uniprocessor(void)
}
out:
if (num_online_cpus() > 1)
- pr_warning("multiple CPUs still online, may miss events.\n");
+ pr_warn("multiple CPUs still online, may miss events.\n");
}
static void leave_uniprocessor(void)
@@ -418,8 +418,8 @@ static void leave_uniprocessor(void)
static void enter_uniprocessor(void)
{
if (num_online_cpus() > 1)
- pr_warning("multiple CPUs are online, may miss events. "
- "Suggest booting with maxcpus=1 kernel argument.\n");
+ pr_warn("multiple CPUs are online, may miss events. "
+ "Suggest booting with maxcpus=1 kernel argument.\n");
}
static void leave_uniprocessor(void)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 4123100e0eaf..99f7a68738f0 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -699,7 +699,7 @@ static int __init dummy_numa_init(void)
* x86_numa_init - Initialize NUMA
*
* Try each configured NUMA initialization method until one succeeds. The
- * last fallback is dummy single node config encomapssing whole memory and
+ * last fallback is dummy single node config encompassing whole memory and
* never fails.
*/
void __init x86_numa_init(void)
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index abffa0be80da..7f1d2034df1e 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -438,7 +438,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
goto no_emu;
if (numa_cleanup_meminfo(&ei) < 0) {
- pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
+ pr_warn("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
goto no_emu;
}
@@ -449,7 +449,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
phys_size, PAGE_SIZE);
if (!phys) {
- pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
+ pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu;
}
memblock_reserve(phys, phys_size);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index d9fbd4f69920..2d758e19ef22 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -603,7 +603,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
spin_lock(&memtype_lock);
- err = rbt_memtype_check_insert(new, new_type);
+ err = memtype_check_insert(new, new_type);
if (err) {
pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
start, end - 1,
@@ -650,7 +650,7 @@ int free_memtype(u64 start, u64 end)
}
spin_lock(&memtype_lock);
- entry = rbt_memtype_erase(start, end);
+ entry = memtype_erase(start, end);
spin_unlock(&memtype_lock);
if (IS_ERR(entry)) {
@@ -693,7 +693,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
spin_lock(&memtype_lock);
- entry = rbt_memtype_lookup(paddr);
+ entry = memtype_lookup(paddr);
if (entry != NULL)
rettype = entry->type;
else
@@ -1109,7 +1109,7 @@ static struct memtype *memtype_get_idx(loff_t pos)
return NULL;
spin_lock(&memtype_lock);
- ret = rbt_memtype_copy_nth_element(print_entry, pos);
+ ret = memtype_copy_nth_element(print_entry, pos);
spin_unlock(&memtype_lock);
if (!ret) {
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
index eeb5caeb089b..79a06684349e 100644
--- a/arch/x86/mm/pat_internal.h
+++ b/arch/x86/mm/pat_internal.h
@@ -29,20 +29,20 @@ static inline char *cattr_name(enum page_cache_mode pcm)
}
#ifdef CONFIG_X86_PAT
-extern int rbt_memtype_check_insert(struct memtype *new,
- enum page_cache_mode *new_type);
-extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
-extern struct memtype *rbt_memtype_lookup(u64 addr);
-extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
+extern int memtype_check_insert(struct memtype *new,
+ enum page_cache_mode *new_type);
+extern struct memtype *memtype_erase(u64 start, u64 end);
+extern struct memtype *memtype_lookup(u64 addr);
+extern int memtype_copy_nth_element(struct memtype *out, loff_t pos);
#else
-static inline int rbt_memtype_check_insert(struct memtype *new,
- enum page_cache_mode *new_type)
+static inline int memtype_check_insert(struct memtype *new,
+ enum page_cache_mode *new_type)
{ return 0; }
-static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
+static inline struct memtype *memtype_erase(u64 start, u64 end)
{ return NULL; }
-static inline struct memtype *rbt_memtype_lookup(u64 addr)
+static inline struct memtype *memtype_lookup(u64 addr)
{ return NULL; }
-static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+static inline int memtype_copy_nth_element(struct memtype *out, loff_t pos)
{ return 0; }
#endif
diff --git a/arch/x86/mm/pat_interval.c b/arch/x86/mm/pat_interval.c
new file mode 100644
index 000000000000..47a1bf30748f
--- /dev/null
+++ b/arch/x86/mm/pat_interval.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handle caching attributes in page tables (PAT)
+ *
+ * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * Suresh B Siddha <suresh.b.siddha@intel.com>
+ *
+ * Interval tree used to store the PAT memory type reservations.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/pat.h>
+
+#include "pat_internal.h"
+
+/*
+ * The memtype tree keeps track of memory type for specific
+ * physical memory areas. Without proper tracking, conflicting memory
+ * types in different mappings can cause CPU cache corruption.
+ *
+ * The tree is an interval tree (augmented rbtree) with tree ordered
+ * on starting address. Tree can contain multiple entries for
+ * different regions which overlap. All the aliases have the same
+ * cache attributes of course.
+ *
+ * memtype_lock protects the rbtree.
+ */
+static inline u64 memtype_interval_start(struct memtype *memtype)
+{
+ return memtype->start;
+}
+
+static inline u64 memtype_interval_end(struct memtype *memtype)
+{
+ return memtype->end - 1;
+}
+INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
+ memtype_interval_start, memtype_interval_end,
+ static, memtype_interval)
+
+static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
+
+enum {
+ MEMTYPE_EXACT_MATCH = 0,
+ MEMTYPE_END_MATCH = 1
+};
+
+static struct memtype *memtype_match(u64 start, u64 end, int match_type)
+{
+ struct memtype *match;
+
+ match = memtype_interval_iter_first(&memtype_rbroot, start, end);
+ while (match != NULL && match->start < end) {
+ if ((match_type == MEMTYPE_EXACT_MATCH) &&
+ (match->start == start) && (match->end == end))
+ return match;
+
+ if ((match_type == MEMTYPE_END_MATCH) &&
+ (match->start < start) && (match->end == end))
+ return match;
+
+ match = memtype_interval_iter_next(match, start, end);
+ }
+
+ return NULL; /* Returns NULL if there is no match */
+}
+
+static int memtype_check_conflict(u64 start, u64 end,
+ enum page_cache_mode reqtype,
+ enum page_cache_mode *newtype)
+{
+ struct memtype *match;
+ enum page_cache_mode found_type = reqtype;
+
+ match = memtype_interval_iter_first(&memtype_rbroot, start, end);
+ if (match == NULL)
+ goto success;
+
+ if (match->type != found_type && newtype == NULL)
+ goto failure;
+
+ dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
+ found_type = match->type;
+
+ match = memtype_interval_iter_next(match, start, end);
+ while (match) {
+ if (match->type != found_type)
+ goto failure;
+
+ match = memtype_interval_iter_next(match, start, end);
+ }
+success:
+ if (newtype)
+ *newtype = found_type;
+
+ return 0;
+
+failure:
+ pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+ current->comm, current->pid, start, end,
+ cattr_name(found_type), cattr_name(match->type));
+ return -EBUSY;
+}
+
+int memtype_check_insert(struct memtype *new,
+ enum page_cache_mode *ret_type)
+{
+ int err = 0;
+
+ err = memtype_check_conflict(new->start, new->end, new->type, ret_type);
+ if (err)
+ return err;
+
+ if (ret_type)
+ new->type = *ret_type;
+
+ memtype_interval_insert(new, &memtype_rbroot);
+ return 0;
+}
+
+struct memtype *memtype_erase(u64 start, u64 end)
+{
+ struct memtype *data;
+
+ /*
+ * Since the memtype_rbroot tree allows overlapping ranges,
+ * memtype_erase() checks with EXACT_MATCH first, i.e. free
+ * a whole node for the munmap case. If no such entry is found,
+ * it then checks with END_MATCH, i.e. shrink the size of a node
+ * from the end for the mremap case.
+ */
+ data = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
+ if (!data) {
+ data = memtype_match(start, end, MEMTYPE_END_MATCH);
+ if (!data)
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (data->start == start) {
+ /* munmap: erase this node */
+ memtype_interval_remove(data, &memtype_rbroot);
+ } else {
+ /* mremap: update the end value of this node */
+ memtype_interval_remove(data, &memtype_rbroot);
+ data->end = start;
+ memtype_interval_insert(data, &memtype_rbroot);
+ return NULL;
+ }
+
+ return data;
+}
+
+struct memtype *memtype_lookup(u64 addr)
+{
+ return memtype_interval_iter_first(&memtype_rbroot, addr,
+ addr + PAGE_SIZE);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{
+ struct memtype *match;
+ int i = 1;
+
+ match = memtype_interval_iter_first(&memtype_rbroot, 0, ULONG_MAX);
+ while (match && pos != i) {
+ match = memtype_interval_iter_next(match, 0, ULONG_MAX);
+ i++;
+ }
+
+ if (match) { /* pos == i */
+ *out = *match;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+#endif
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
deleted file mode 100644
index 65ebe4b88f7c..000000000000
--- a/arch/x86/mm/pat_rbtree.c
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Handle caching attributes in page tables (PAT)
- *
- * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * Suresh B Siddha <suresh.b.siddha@intel.com>
- *
- * Interval tree (augmented rbtree) used to store the PAT memory type
- * reservations.
- */
-
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/rbtree_augmented.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-
-#include <asm/pgtable.h>
-#include <asm/pat.h>
-
-#include "pat_internal.h"
-
-/*
- * The memtype tree keeps track of memory type for specific
- * physical memory areas. Without proper tracking, conflicting memory
- * types in different mappings can cause CPU cache corruption.
- *
- * The tree is an interval tree (augmented rbtree) with tree ordered
- * on starting address. Tree can contain multiple entries for
- * different regions which overlap. All the aliases have the same
- * cache attributes of course.
- *
- * memtype_lock protects the rbtree.
- */
-
-static struct rb_root memtype_rbroot = RB_ROOT;
-
-static int is_node_overlap(struct memtype *node, u64 start, u64 end)
-{
- if (node->start >= end || node->end <= start)
- return 0;
-
- return 1;
-}
-
-static u64 get_subtree_max_end(struct rb_node *node)
-{
- u64 ret = 0;
- if (node) {
- struct memtype *data = rb_entry(node, struct memtype, rb);
- ret = data->subtree_max_end;
- }
- return ret;
-}
-
-#define NODE_END(node) ((node)->end)
-
-RB_DECLARE_CALLBACKS_MAX(static, memtype_rb_augment_cb,
- struct memtype, rb, u64, subtree_max_end, NODE_END)
-
-/* Find the first (lowest start addr) overlapping range from rb tree */
-static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
- u64 start, u64 end)
-{
- struct rb_node *node = root->rb_node;
- struct memtype *last_lower = NULL;
-
- while (node) {
- struct memtype *data = rb_entry(node, struct memtype, rb);
-
- if (get_subtree_max_end(node->rb_left) > start) {
- /* Lowest overlap if any must be on left side */
- node = node->rb_left;
- } else if (is_node_overlap(data, start, end)) {
- last_lower = data;
- break;
- } else if (start >= data->start) {
- /* Lowest overlap if any must be on right side */
- node = node->rb_right;
- } else {
- break;
- }
- }
- return last_lower; /* Returns NULL if there is no overlap */
-}
-
-enum {
- MEMTYPE_EXACT_MATCH = 0,
- MEMTYPE_END_MATCH = 1
-};
-
-static struct memtype *memtype_rb_match(struct rb_root *root,
- u64 start, u64 end, int match_type)
-{
- struct memtype *match;
-
- match = memtype_rb_lowest_match(root, start, end);
- while (match != NULL && match->start < end) {
- struct rb_node *node;
-
- if ((match_type == MEMTYPE_EXACT_MATCH) &&
- (match->start == start) && (match->end == end))
- return match;
-
- if ((match_type == MEMTYPE_END_MATCH) &&
- (match->start < start) && (match->end == end))
- return match;
-
- node = rb_next(&match->rb);
- if (node)
- match = rb_entry(node, struct memtype, rb);
- else
- match = NULL;
- }
-
- return NULL; /* Returns NULL if there is no match */
-}
-
-static int memtype_rb_check_conflict(struct rb_root *root,
- u64 start, u64 end,
- enum page_cache_mode reqtype,
- enum page_cache_mode *newtype)
-{
- struct rb_node *node;
- struct memtype *match;
- enum page_cache_mode found_type = reqtype;
-
- match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
- if (match == NULL)
- goto success;
-
- if (match->type != found_type && newtype == NULL)
- goto failure;
-
- dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
- found_type = match->type;
-
- node = rb_next(&match->rb);
- while (node) {
- match = rb_entry(node, struct memtype, rb);
-
- if (match->start >= end) /* Checked all possible matches */
- goto success;
-
- if (is_node_overlap(match, start, end) &&
- match->type != found_type) {
- goto failure;
- }
-
- node = rb_next(&match->rb);
- }
-success:
- if (newtype)
- *newtype = found_type;
-
- return 0;
-
-failure:
- pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
- current->comm, current->pid, start, end,
- cattr_name(found_type), cattr_name(match->type));
- return -EBUSY;
-}
-
-static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
-{
- struct rb_node **node = &(root->rb_node);
- struct rb_node *parent = NULL;
-
- while (*node) {
- struct memtype *data = rb_entry(*node, struct memtype, rb);
-
- parent = *node;
- if (data->subtree_max_end < newdata->end)
- data->subtree_max_end = newdata->end;
- if (newdata->start <= data->start)
- node = &((*node)->rb_left);
- else if (newdata->start > data->start)
- node = &((*node)->rb_right);
- }
-
- newdata->subtree_max_end = newdata->end;
- rb_link_node(&newdata->rb, parent, node);
- rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
-}
-
-int rbt_memtype_check_insert(struct memtype *new,
- enum page_cache_mode *ret_type)
-{
- int err = 0;
-
- err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
- new->type, ret_type);
-
- if (!err) {
- if (ret_type)
- new->type = *ret_type;
-
- new->subtree_max_end = new->end;
- memtype_rb_insert(&memtype_rbroot, new);
- }
- return err;
-}
-
-struct memtype *rbt_memtype_erase(u64 start, u64 end)
-{
- struct memtype *data;
-
- /*
- * Since the memtype_rbroot tree allows overlapping ranges,
- * rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free
- * a whole node for the munmap case. If no such entry is found,
- * it then checks with END_MATCH, i.e. shrink the size of a node
- * from the end for the mremap case.
- */
- data = memtype_rb_match(&memtype_rbroot, start, end,
- MEMTYPE_EXACT_MATCH);
- if (!data) {
- data = memtype_rb_match(&memtype_rbroot, start, end,
- MEMTYPE_END_MATCH);
- if (!data)
- return ERR_PTR(-EINVAL);
- }
-
- if (data->start == start) {
- /* munmap: erase this node */
- rb_erase_augmented(&data->rb, &memtype_rbroot,
- &memtype_rb_augment_cb);
- } else {
- /* mremap: update the end value of this node */
- rb_erase_augmented(&data->rb, &memtype_rbroot,
- &memtype_rb_augment_cb);
- data->end = start;
- data->subtree_max_end = data->end;
- memtype_rb_insert(&memtype_rbroot, data);
- return NULL;
- }
-
- return data;
-}
-
-struct memtype *rbt_memtype_lookup(u64 addr)
-{
- return memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
-}
-
-#if defined(CONFIG_DEBUG_FS)
-int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
-{
- struct rb_node *node;
- int i = 1;
-
- node = rb_first(&memtype_rbroot);
- while (node && pos != i) {
- node = rb_next(node);
- i++;
- }
-
- if (node) { /* pos == i */
- struct memtype *this = rb_entry(node, struct memtype, rb);
- *out = *this;
- return 0;
- } else {
- return 1;
- }
-}
-#endif
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3e4b9035bb9a..7bd2c3a52297 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -643,8 +643,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
fixmaps_set++;
}
-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
- pgprot_t flags)
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
{
/* Sanitize 'prot' against any unsupported bits: */
pgprot_val(flags) &= __default_kernel_pte_mask;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 7f2140414440..44a9f068eee0 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -574,7 +574,7 @@ static void pti_clone_kernel_text(void)
*/
unsigned long start = PFN_ALIGN(_text);
unsigned long end_clone = (unsigned long)__end_rodata_aligned;
- unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
+ unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
if (!pti_kernel_image_global_ok())
return;
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index a8bd952e136d..92153d054d6c 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -127,9 +127,9 @@ static int __init init(void)
return -ENXIO;
}
- pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
- "and writing 16 kB of rubbish in there.\n",
- size >> 10, mmio_address);
+ pr_warn("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
+ "and writing 16 kB of rubbish in there.\n",
+ size >> 10, mmio_address);
do_test(size);
do_test_bulk_ioremapping();
pr_info("All done.\n");
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 991549a1c5f3..b8be18427277 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -9,9 +9,11 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <linux/bpf.h>
-
+#include <linux/memory.h>
+#include <asm/extable.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
+#include <asm/text-patching.h>
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
@@ -96,6 +98,7 @@ static int bpf_size_to_x86_bytes(int bpf_size)
/* Pick a register outside of BPF range for JIT internal work */
#define AUX_REG (MAX_BPF_JIT_REG + 1)
+#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
/*
* The following table maps BPF registers to x86-64 registers.
@@ -104,8 +107,8 @@ static int bpf_size_to_x86_bytes(int bpf_size)
* register in load/store instructions, it always needs an
* extra byte of encoding and is callee saved.
*
- * Also x86-64 register R9 is unused. x86-64 register R10 is
- * used for blinding (if enabled).
+ * x86-64 register R9 is not used by BPF programs, but can be used by BPF
+ * trampoline. x86-64 register R10 is used for blinding (if enabled).
*/
static const int reg2hex[] = {
[BPF_REG_0] = 0, /* RAX */
@@ -121,6 +124,20 @@ static const int reg2hex[] = {
[BPF_REG_FP] = 5, /* RBP readonly */
[BPF_REG_AX] = 2, /* R10 temp register */
[AUX_REG] = 3, /* R11 temp register */
+ [X86_REG_R9] = 1, /* R9 register, 6th function argument */
+};
+
+static const int reg2pt_regs[] = {
+ [BPF_REG_0] = offsetof(struct pt_regs, ax),
+ [BPF_REG_1] = offsetof(struct pt_regs, di),
+ [BPF_REG_2] = offsetof(struct pt_regs, si),
+ [BPF_REG_3] = offsetof(struct pt_regs, dx),
+ [BPF_REG_4] = offsetof(struct pt_regs, cx),
+ [BPF_REG_5] = offsetof(struct pt_regs, r8),
+ [BPF_REG_6] = offsetof(struct pt_regs, bx),
+ [BPF_REG_7] = offsetof(struct pt_regs, r13),
+ [BPF_REG_8] = offsetof(struct pt_regs, r14),
+ [BPF_REG_9] = offsetof(struct pt_regs, r15),
};
/*
@@ -135,6 +152,7 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_7) |
BIT(BPF_REG_8) |
BIT(BPF_REG_9) |
+ BIT(X86_REG_R9) |
BIT(BPF_REG_AX));
}
@@ -186,7 +204,10 @@ struct jit_context {
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
-#define PROLOGUE_SIZE 20
+/* Number of bytes emit_patch() needs to generate instructions */
+#define X86_PATCH_SIZE 5
+
+#define PROLOGUE_SIZE 25
/*
* Emit x86-64 prologue code for BPF program and check its size.
@@ -195,8 +216,13 @@ struct jit_context {
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
{
u8 *prog = *pprog;
- int cnt = 0;
+ int cnt = X86_PATCH_SIZE;
+ /* BPF trampoline can be made to work without these nops,
+ * but let's waste 5 bytes for now and optimize later
+ */
+ memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
+ prog += cnt;
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
/* sub rsp, rounded_stack_depth */
@@ -213,6 +239,89 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
*pprog = prog;
}
+static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+ s64 offset;
+
+ offset = func - (ip + X86_PATCH_SIZE);
+ if (!is_simm32(offset)) {
+ pr_err("Target call %p is out of range\n", func);
+ return -ERANGE;
+ }
+ EMIT1_off32(opcode, offset);
+ *pprog = prog;
+ return 0;
+}
+
+static int emit_call(u8 **pprog, void *func, void *ip)
+{
+ return emit_patch(pprog, func, ip, 0xE8);
+}
+
+static int emit_jump(u8 **pprog, void *func, void *ip)
+{
+ return emit_patch(pprog, func, ip, 0xE9);
+}
+
+static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *old_addr, void *new_addr,
+ const bool text_live)
+{
+ const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
+ u8 old_insn[X86_PATCH_SIZE];
+ u8 new_insn[X86_PATCH_SIZE];
+ u8 *prog;
+ int ret;
+
+ memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
+ if (old_addr) {
+ prog = old_insn;
+ ret = t == BPF_MOD_CALL ?
+ emit_call(&prog, old_addr, ip) :
+ emit_jump(&prog, old_addr, ip);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
+ if (new_addr) {
+ prog = new_insn;
+ ret = t == BPF_MOD_CALL ?
+ emit_call(&prog, new_addr, ip) :
+ emit_jump(&prog, new_addr, ip);
+ if (ret)
+ return ret;
+ }
+
+ ret = -EBUSY;
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, old_insn, X86_PATCH_SIZE))
+ goto out;
+ if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
+ if (text_live)
+ text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
+ else
+ memcpy(ip, new_insn, X86_PATCH_SIZE);
+ }
+ ret = 0;
+out:
+ mutex_unlock(&text_mutex);
+ return ret;
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *old_addr, void *new_addr)
+{
+ if (!is_kernel_text((long)ip) &&
+ !is_bpf_text_address((long)ip))
+ /* BPF poking in modules is not supported */
+ return -EINVAL;
+
+ return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
+}
+
/*
* Generate the following code:
*
@@ -227,7 +336,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
* goto *(prog->bpf_func + prologue_size);
* out:
*/
-static void emit_bpf_tail_call(u8 **pprog)
+static void emit_bpf_tail_call_indirect(u8 **pprog)
{
u8 *prog = *pprog;
int label1, label2, label3;
@@ -294,6 +403,68 @@ static void emit_bpf_tail_call(u8 **pprog)
*pprog = prog;
}
+static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
+ u8 **pprog, int addr, u8 *image)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ /*
+ * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+ EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
+ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
+ EMIT2(X86_JA, 14); /* ja out */
+ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
+ EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
+
+ poke->ip = image + (addr - X86_PATCH_SIZE);
+ poke->adj_off = PROLOGUE_SIZE;
+
+ memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
+ prog += X86_PATCH_SIZE;
+ /* out: */
+
+ *pprog = prog;
+}
+
+static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
+{
+ struct bpf_jit_poke_descriptor *poke;
+ struct bpf_array *array;
+ struct bpf_prog *target;
+ int i, ret;
+
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ poke = &prog->aux->poke_tab[i];
+ WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
+
+ if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
+ continue;
+
+ array = container_of(poke->tail_call.map, struct bpf_array, map);
+ mutex_lock(&array->aux->poke_mutex);
+ target = array->ptrs[poke->tail_call.key];
+ if (target) {
+ /* Plain memcpy is used when image is not live yet
+ * and still not locked as read-only. Once poke
+ * location is active (poke->ip_stable), any parallel
+ * bpf_arch_text_poke() might occur still on the
+ * read-write image until we finally locked it as
+ * read-only. Both modifications on the given image
+ * are under text_mutex to avoid interference.
+ */
+ ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
+ (u8 *)target->bpf_func +
+ poke->adj_off, false);
+ BUG_ON(ret < 0);
+ }
+ WRITE_ONCE(poke->ip_stable, true);
+ mutex_unlock(&array->aux->poke_mutex);
+ }
+}
+
static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
u32 dst_reg, const u32 imm32)
{
@@ -377,6 +548,96 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
*pprog = prog;
}
+/* LDX: dst_reg = *(u8*)(src_reg + off) */
+static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ switch (size) {
+ case BPF_B:
+ /* Emit 'movzx rax, byte ptr [rax + off]' */
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
+ break;
+ case BPF_H:
+ /* Emit 'movzx rax, word ptr [rax + off]' */
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
+ break;
+ case BPF_W:
+ /* Emit 'mov eax, dword ptr [rax+0x14]' */
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
+ else
+ EMIT1(0x8B);
+ break;
+ case BPF_DW:
+ /* Emit 'mov rax, qword ptr [rax+0x14]' */
+ EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
+ break;
+ }
+ /*
+ * If insn->off == 0 we can save one extra byte, but
+ * special case of x86 R13 which always needs an offset
+ * is not worth the hassle
+ */
+ if (is_imm8(off))
+ EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
+ else
+ EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
+ *pprog = prog;
+}
+
+/* STX: *(u8*)(dst_reg + off) = src_reg */
+static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ switch (size) {
+ case BPF_B:
+ /* Emit 'mov byte ptr [rax + off], al' */
+ if (is_ereg(dst_reg) || is_ereg(src_reg) ||
+ /* We have to add extra byte for x86 SIL, DIL regs */
+ src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
+ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
+ else
+ EMIT1(0x88);
+ break;
+ case BPF_H:
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
+ else
+ EMIT2(0x66, 0x89);
+ break;
+ case BPF_W:
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
+ else
+ EMIT1(0x89);
+ break;
+ case BPF_DW:
+ EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
+ break;
+ }
+ if (is_imm8(off))
+ EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
+ else
+ EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
+ *pprog = prog;
+}
+
+static bool ex_handler_bpf(const struct exception_table_entry *x,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long fault_addr)
+{
+ u32 reg = x->fixup >> 8;
+
+ /* jump over faulting load and clear dest register */
+ *(unsigned long *)((void *)regs + reg) = 0;
+ regs->ip += x->fixup & 0xff;
+ return true;
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
@@ -384,7 +645,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int insn_cnt = bpf_prog->len;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
- int i, cnt = 0;
+ int i, cnt = 0, excnt = 0;
int proglen = 0;
u8 *prog = temp;
@@ -747,64 +1008,64 @@ st: if (is_imm8(insn->off))
/* STX: *(u8*)(dst_reg + off) = src_reg */
case BPF_STX | BPF_MEM | BPF_B:
- /* Emit 'mov byte ptr [rax + off], al' */
- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
- /* We have to add extra byte for x86 SIL, DIL regs */
- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
- else
- EMIT1(0x88);
- goto stx;
case BPF_STX | BPF_MEM | BPF_H:
- if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
- else
- EMIT2(0x66, 0x89);
- goto stx;
case BPF_STX | BPF_MEM | BPF_W:
- if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
- else
- EMIT1(0x89);
- goto stx;
case BPF_STX | BPF_MEM | BPF_DW:
- EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
-stx: if (is_imm8(insn->off))
- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
- else
- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
- insn->off);
+ emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
break;
/* LDX: dst_reg = *(u8*)(src_reg + off) */
case BPF_LDX | BPF_MEM | BPF_B:
- /* Emit 'movzx rax, byte ptr [rax + off]' */
- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
- goto ldx;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
- /* Emit 'movzx rax, word ptr [rax + off]' */
- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
- goto ldx;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
- /* Emit 'mov eax, dword ptr [rax+0x14]' */
- if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
- else
- EMIT1(0x8B);
- goto ldx;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
- /* Emit 'mov rax, qword ptr [rax+0x14]' */
- EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
-ldx: /*
- * If insn->off == 0 we can save one extra byte, but
- * special case of x86 R13 which always needs an offset
- * is not worth the hassle
- */
- if (is_imm8(insn->off))
- EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
- else
- EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
- insn->off);
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
+ struct exception_table_entry *ex;
+ u8 *_insn = image + proglen;
+ s64 delta;
+
+ if (!bpf_prog->aux->extable)
+ break;
+
+ if (excnt >= bpf_prog->aux->num_exentries) {
+ pr_err("ex gen bug\n");
+ return -EFAULT;
+ }
+ ex = &bpf_prog->aux->extable[excnt++];
+
+ delta = _insn - (u8 *)&ex->insn;
+ if (!is_simm32(delta)) {
+ pr_err("extable->insn doesn't fit into 32-bit\n");
+ return -EFAULT;
+ }
+ ex->insn = delta;
+
+ delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
+ if (!is_simm32(delta)) {
+ pr_err("extable->handler doesn't fit into 32-bit\n");
+ return -EFAULT;
+ }
+ ex->handler = delta;
+
+ if (dst_reg > BPF_REG_9) {
+ pr_err("verifier error\n");
+ return -EFAULT;
+ }
+ /*
+ * Compute size of x86 insn and its target dest x86 register.
+ * ex_handler_bpf() will use lower 8 bits to adjust
+ * pt_regs->ip to jump over this x86 instruction
+ * and upper bits to figure out which pt_regs to zero out.
+ * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
+ * of 4 bytes will be ignored and rbx will be zero inited.
+ */
+ ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
+ }
break;
/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
@@ -827,17 +1088,16 @@ xadd: if (is_imm8(insn->off))
/* call */
case BPF_JMP | BPF_CALL:
func = (u8 *) __bpf_call_base + imm32;
- jmp_offset = func - (image + addrs[i]);
- if (!imm32 || !is_simm32(jmp_offset)) {
- pr_err("unsupported BPF func %d addr %p image %p\n",
- imm32, func, image);
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
return -EINVAL;
- }
- EMIT1_off32(0xE8, jmp_offset);
break;
case BPF_JMP | BPF_TAIL_CALL:
- emit_bpf_tail_call(&prog);
+ if (imm32)
+ emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
+ &prog, addrs[i], image);
+ else
+ emit_bpf_tail_call_indirect(&prog);
break;
/* cond jump */
@@ -909,6 +1169,16 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
+ /* test dst_reg, dst_reg to save one extra byte */
+ if (imm32 == 0) {
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_2mod(0x48, dst_reg, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_2mod(0x40, dst_reg, dst_reg));
+ EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
+ goto emit_cond_jmp;
+ }
+
/* cmp dst_reg, imm8/32 */
if (BPF_CLASS(insn->code) == BPF_JMP)
EMIT1(add_1mod(0x48, dst_reg));
@@ -1048,9 +1318,218 @@ emit_jmp:
addrs[i] = proglen;
prog = temp;
}
+
+ if (image && excnt != bpf_prog->aux->num_exentries) {
+ pr_err("extable is not populated\n");
+ return -EFAULT;
+ }
return proglen;
}
+static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
+ int stack_size)
+{
+ int i;
+ /* Store function arguments to stack.
+ * For a function that accepts two pointers the sequence will be:
+ * mov QWORD PTR [rbp-0x10],rdi
+ * mov QWORD PTR [rbp-0x8],rsi
+ */
+ for (i = 0; i < min(nr_args, 6); i++)
+ emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
+ BPF_REG_FP,
+ i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
+ -(stack_size - i * 8));
+}
+
+static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
+ int stack_size)
+{
+ int i;
+
+ /* Restore function arguments from stack.
+ * For a function that accepts two pointers the sequence will be:
+ * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
+ * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
+ */
+ for (i = 0; i < min(nr_args, 6); i++)
+ emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
+ i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
+ BPF_REG_FP,
+ -(stack_size - i * 8));
+}
+
+static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
+ struct bpf_prog **progs, int prog_cnt, int stack_size)
+{
+ u8 *prog = *pprog;
+ int cnt = 0, i;
+
+ for (i = 0; i < prog_cnt; i++) {
+ if (emit_call(&prog, __bpf_prog_enter, prog))
+ return -EINVAL;
+ /* remember prog start time returned by __bpf_prog_enter */
+ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+
+ /* arg1: lea rdi, [rbp - stack_size] */
+ EMIT4(0x48, 0x8D, 0x7D, -stack_size);
+ /* arg2: progs[i]->insnsi for interpreter */
+ if (!progs[i]->jited)
+ emit_mov_imm64(&prog, BPF_REG_2,
+ (long) progs[i]->insnsi >> 32,
+ (u32) (long) progs[i]->insnsi);
+ /* call JITed bpf program or interpreter */
+ if (emit_call(&prog, progs[i]->bpf_func, prog))
+ return -EINVAL;
+
+ /* arg1: mov rdi, progs[i] */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
+ (u32) (long) progs[i]);
+ /* arg2: mov rsi, rbx <- start time in nsec */
+ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ if (emit_call(&prog, __bpf_prog_exit, prog))
+ return -EINVAL;
+ }
+ *pprog = prog;
+ return 0;
+}
+
+/* Example:
+ * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+ * its 'struct btf_func_model' will be nr_args=2
+ * The assembly code when eth_type_trans is executing after trampoline:
+ *
+ * push rbp
+ * mov rbp, rsp
+ * sub rsp, 16 // space for skb and dev
+ * push rbx // temp regs to pass start time
+ * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
+ * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
+ * call __bpf_prog_enter // rcu_read_lock and preempt_disable
+ * mov rbx, rax // remember start time in bpf stats are enabled
+ * lea rdi, [rbp - 16] // R1==ctx of bpf prog
+ * call addr_of_jited_FENTRY_prog
+ * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
+ * mov rsi, rbx // prog start time
+ * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
+ * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
+ * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
+ * pop rbx
+ * leave
+ * ret
+ *
+ * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
+ * replaced with 'call generated_bpf_trampoline'. When it returns
+ * eth_type_trans will continue executing with original skb and dev pointers.
+ *
+ * The assembly code when eth_type_trans is called from trampoline:
+ *
+ * push rbp
+ * mov rbp, rsp
+ * sub rsp, 24 // space for skb, dev, return value
+ * push rbx // temp regs to pass start time
+ * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
+ * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
+ * call __bpf_prog_enter // rcu_read_lock and preempt_disable
+ * mov rbx, rax // remember start time if bpf stats are enabled
+ * lea rdi, [rbp - 24] // R1==ctx of bpf prog
+ * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
+ * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
+ * mov rsi, rbx // prog start time
+ * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
+ * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
+ * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
+ * call eth_type_trans+5 // execute body of eth_type_trans
+ * mov qword ptr [rbp - 8], rax // save return value
+ * call __bpf_prog_enter // rcu_read_lock and preempt_disable
+ * mov rbx, rax // remember start time in bpf stats are enabled
+ * lea rdi, [rbp - 24] // R1==ctx of bpf prog
+ * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
+ * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
+ * mov rsi, rbx // prog start time
+ * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
+ * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
+ * pop rbx
+ * leave
+ * add rsp, 8 // skip eth_type_trans's frame
+ * ret // return to its caller
+ */
+int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
+ struct bpf_prog **fentry_progs, int fentry_cnt,
+ struct bpf_prog **fexit_progs, int fexit_cnt,
+ void *orig_call)
+{
+ int cnt = 0, nr_args = m->nr_args;
+ int stack_size = nr_args * 8;
+ u8 *prog;
+
+ /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
+ if (nr_args > 6)
+ return -ENOTSUPP;
+
+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
+ (flags & BPF_TRAMP_F_SKIP_FRAME))
+ return -EINVAL;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG)
+ stack_size += 8; /* room for return value of orig_call */
+
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* skip patched call instruction and point orig_call to actual
+ * body of the kernel function.
+ */
+ orig_call += X86_PATCH_SIZE;
+
+ prog = image;
+
+ EMIT1(0x55); /* push rbp */
+ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+ EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
+ EMIT1(0x53); /* push rbx */
+
+ save_regs(m, &prog, nr_args, stack_size);
+
+ if (fentry_cnt)
+ if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
+ return -EINVAL;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ if (fentry_cnt)
+ restore_regs(m, &prog, nr_args, stack_size);
+
+ /* call original function */
+ if (emit_call(&prog, orig_call, prog))
+ return -EINVAL;
+ /* remember return value in a stack for bpf prog to access */
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+ }
+
+ if (fexit_cnt)
+ if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
+ return -EINVAL;
+
+ if (flags & BPF_TRAMP_F_RESTORE_REGS)
+ restore_regs(m, &prog, nr_args, stack_size);
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG)
+ /* restore original return value back into RAX */
+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+
+ EMIT1(0x5B); /* pop rbx */
+ EMIT1(0xC9); /* leave */
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* skip our return address and return to parent */
+ EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
+ EMIT1(0xC3); /* ret */
+ /* One half of the page has active running trampoline.
+ * Another half is an area for next trampoline.
+ * Make sure the trampoline generation logic doesn't overflow.
+ */
+ if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY))
+ return -EFAULT;
+ return 0;
+}
+
struct x64_jit_data {
struct bpf_binary_header *header;
int *addrs;
@@ -1148,12 +1627,24 @@ out_image:
break;
}
if (proglen == oldproglen) {
- header = bpf_jit_binary_alloc(proglen, &image,
- 1, jit_fill_hole);
+ /*
+ * The number of entries in extable is the number of BPF_LDX
+ * insns that access kernel memory via "pointer to BTF type".
+ * The verifier changed their opcode from LDX|MEM|size
+ * to LDX|PROBE_MEM|size to make JITing easier.
+ */
+ u32 align = __alignof__(struct exception_table_entry);
+ u32 extable_size = prog->aux->num_exentries *
+ sizeof(struct exception_table_entry);
+
+ /* allocate module memory for x86 insns and extable */
+ header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
+ &image, align, jit_fill_hole);
if (!header) {
prog = orig_prog;
goto out_addrs;
}
+ prog->aux->extable = (void *) image + roundup(proglen, align);
}
oldproglen = proglen;
cond_resched();
@@ -1164,6 +1655,7 @@ out_image:
if (image) {
if (!prog->is_func || extra_pass) {
+ bpf_tail_call_direct_fixup(prog);
bpf_jit_binary_lock_ro(header);
} else {
jit_data->addrs = addrs;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 71e8a67337e2..276cf79b5d24 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -67,13 +67,13 @@ static inline void op_x86_warn_in_use(int counter)
* cannot be monitored by any other counter, contact your
* hardware or BIOS vendor.
*/
- pr_warning("oprofile: counter #%d on cpu #%d may already be used\n",
- counter, smp_processor_id());
+ pr_warn("oprofile: counter #%d on cpu #%d may already be used\n",
+ counter, smp_processor_id());
}
static inline void op_x86_warn_reserved(int counter)
{
- pr_warning("oprofile: counter #%d is already reserved\n", counter);
+ pr_warn("oprofile: counter #%d is already reserved\n", counter);
}
extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 425e025341db..38d44f36d5ed 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -128,6 +128,9 @@ void __init efi_find_mirror(void)
efi_memory_desc_t *md;
u64 mirror_size = 0, total_size = 0;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
@@ -145,14 +148,18 @@ void __init efi_find_mirror(void)
/*
* Tell the kernel about the EFI memory map. This might include
- * more than the max 128 entries that can fit in the e820 legacy
- * (zeropage) memory map.
+ * more than the max 128 entries that can fit in the passed in e820
+ * legacy (zeropage) memory map, but the kernel's e820 table can hold
+ * E820_MAX_ENTRIES.
*/
static void __init do_add_efi_memmap(void)
{
efi_memory_desc_t *md;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
@@ -164,7 +171,10 @@ static void __init do_add_efi_memmap(void)
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
- if (md->attribute & EFI_MEMORY_WB)
+ if (efi_soft_reserve_enabled()
+ && (md->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else if (md->attribute & EFI_MEMORY_WB)
e820_type = E820_TYPE_RAM;
else
e820_type = E820_TYPE_RESERVED;
@@ -190,11 +200,36 @@ static void __init do_add_efi_memmap(void)
e820_type = E820_TYPE_RESERVED;
break;
}
+
e820__range_add(start, size, e820_type);
}
e820__update_table(e820_table);
}
+/*
+ * Given add_efi_memmap defaults to 0 and there there is no alternative
+ * e820 mechanism for soft-reserved memory, import the full EFI memory
+ * map if soft reservations are present and enabled. Otherwise, the
+ * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
+ * the efi=nosoftreserve option.
+ */
+static bool do_efi_soft_reserve(void)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return false;
+
+ if (!efi_soft_reserve_enabled())
+ return false;
+
+ for_each_efi_memory_desc(md)
+ if (md->type == EFI_CONVENTIONAL_MEMORY &&
+ (md->attribute & EFI_MEMORY_SP))
+ return true;
+ return false;
+}
+
int __init efi_memblock_x86_reserve_range(void)
{
struct efi_info *e = &boot_params.efi_info;
@@ -224,9 +259,11 @@ int __init efi_memblock_x86_reserve_range(void)
if (rv)
return rv;
- if (add_efi_memmap)
+ if (add_efi_memmap || do_efi_soft_reserve())
do_add_efi_memmap();
+ efi_fake_memmap_early();
+
WARN(efi.memmap.desc_version != 1,
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
@@ -779,6 +816,15 @@ static bool should_map_region(efi_memory_desc_t *md)
return false;
/*
+ * EFI specific purpose memory may be reserved by default
+ * depending on kernel config and boot options.
+ */
+ if (md->type == EFI_CONVENTIONAL_MEMORY &&
+ efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
* Map all of RAM so that we can access arguments in the 1:1
* mapping when making EFI runtime calls.
*/
diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
index ab2e91e76894..eed8b5b441f8 100644
--- a/arch/x86/platform/efi/efi_stub_32.S
+++ b/arch/x86/platform/efi/efi_stub_32.S
@@ -22,7 +22,7 @@
*/
.text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
* 0. The function can only be called in Linux kernel. So CS has been
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -114,7 +114,7 @@ ENTRY(efi_call_phys)
movl (%edx), %ecx
pushl %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
.previous
.data
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 74628ec78f29..b1d2313fe3bf 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -39,7 +39,7 @@
mov %rsi, %cr0; \
mov (%rsp), %rsp
-ENTRY(efi_call)
+SYM_FUNC_START(efi_call)
pushq %rbp
movq %rsp, %rbp
SAVE_XMM
@@ -55,4 +55,4 @@ ENTRY(efi_call)
RESTORE_XMM
popq %rbp
ret
-ENDPROC(efi_call)
+SYM_FUNC_END(efi_call)
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
index 46c58b08739c..3189f1394701 100644
--- a/arch/x86/platform/efi/efi_thunk_64.S
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -25,7 +25,7 @@
.text
.code64
-ENTRY(efi64_thunk)
+SYM_FUNC_START(efi64_thunk)
push %rbp
push %rbx
@@ -60,14 +60,14 @@ ENTRY(efi64_thunk)
pop %rbx
pop %rbp
retq
-ENDPROC(efi64_thunk)
+SYM_FUNC_END(efi64_thunk)
/*
* We run this function from the 1:1 mapping.
*
* This function must be invoked with a 1:1 mapped stack.
*/
-ENTRY(__efi64_thunk)
+SYM_FUNC_START_LOCAL(__efi64_thunk)
movl %ds, %eax
push %rax
movl %es, %eax
@@ -114,14 +114,14 @@ ENTRY(__efi64_thunk)
or %rcx, %rax
1:
ret
-ENDPROC(__efi64_thunk)
+SYM_FUNC_END(__efi64_thunk)
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
.code32
/*
@@ -129,7 +129,7 @@ ENDPROC(efi_exit32)
*
* The stack should represent the 32-bit calling convention.
*/
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
@@ -145,7 +145,7 @@ ENTRY(efi_enter32)
pushl %eax
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
.data
.balign 8
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 3b9fd679cea9..7675cf754d90 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -320,6 +320,9 @@ void __init efi_reserve_boot_services(void)
{
efi_memory_desc_t *md;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
for_each_efi_memory_desc(md) {
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 6d193bb36021..089413cd944e 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -39,7 +39,7 @@ static int set_lid_wake_behavior(bool wake_on_close)
status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
if (ACPI_FAILURE(status)) {
- pr_warning(PFX "failed to set lid behavior\n");
+ pr_warn(PFX "failed to set lid behavior\n");
return 1;
}
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S
index 5fee3a2c2fd4..75f4faff8468 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -90,7 +90,7 @@ restore_registers:
ret
-ENTRY(do_olpc_suspend_lowlevel)
+SYM_CODE_START(do_olpc_suspend_lowlevel)
call save_processor_state
call save_registers
@@ -110,6 +110,7 @@ ret_point:
call restore_registers
call restore_processor_state
ret
+SYM_CODE_END(do_olpc_suspend_lowlevel)
.data
saved_gdt: .long 0,0
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..43b4d864817e 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -50,7 +50,7 @@
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
-ENTRY(pvh_start_xen)
+SYM_CODE_START_LOCAL(pvh_start_xen)
cld
lgdt (_pa(gdt))
@@ -146,15 +146,16 @@ ENTRY(pvh_start_xen)
ljmp $PVH_CS_SEL, $_pa(startup_32)
#endif
-END(pvh_start_xen)
+SYM_CODE_END(pvh_start_xen)
.section ".init.data","aw"
.balign 8
-gdt:
+SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
.long _pa(gdt_start)
.word 0
-gdt_start:
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
.quad 0x0000000000000000 /* NULL descriptor */
#ifdef CONFIG_X86_64
.quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
@@ -163,15 +164,14 @@ gdt_start:
#endif
.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
.quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
-gdt_end:
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
.balign 16
-canary:
- .fill 48, 1, 0
+SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
-early_stack:
+SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
-early_stack_end:
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
_ASM_PTR (pvh_start_xen - __START_KERNEL_map))
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
index bf6016f8db4e..6259563760f9 100644
--- a/arch/x86/platform/sfi/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
@@ -26,8 +26,7 @@ static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
static void __init mp_sfi_register_lapic(u8 id)
{
if (MAX_LOCAL_APIC - id <= 0) {
- pr_warning("Processor #%d invalid (max %d)\n",
- id, MAX_LOCAL_APIC);
+ pr_warn("Processor #%d invalid (max %d)\n", id, MAX_LOCAL_APIC);
return;
}
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index c2ee31953372..ece9cb9c1189 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -184,20 +184,20 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
}
EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
-void uv_bios_init(void)
+int uv_bios_init(void)
{
uv_systab = NULL;
if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
!uv_systab_phys || efi_runtime_disabled()) {
pr_crit("UV: UVsystab: missing\n");
- return;
+ return -EEXIST;
}
uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab));
if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
pr_err("UV: UVsystab: bad signature!\n");
iounmap(uv_systab);
- return;
+ return -EINVAL;
}
/* Starting with UV4 the UV systab size is variable */
@@ -208,8 +208,9 @@ void uv_bios_init(void)
uv_systab = ioremap(uv_systab_phys, size);
if (!uv_systab) {
pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
- return;
+ return -EFAULT;
}
}
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
+ return 0;
}
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 6fe383002125..8786653ad3c0 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -16,7 +16,7 @@
.text
-ENTRY(swsusp_arch_suspend)
+SYM_FUNC_START(swsusp_arch_suspend)
movl %esp, saved_context_esp
movl %ebx, saved_context_ebx
movl %ebp, saved_context_ebp
@@ -33,9 +33,9 @@ ENTRY(swsusp_arch_suspend)
call swsusp_save
FRAME_END
ret
-ENDPROC(swsusp_arch_suspend)
+SYM_FUNC_END(swsusp_arch_suspend)
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movl restore_jump_address, %ebx
movl restore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movl relocated_restore_code, %eax
jmpl *%eax
+SYM_CODE_END(restore_image)
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
movl temp_pgt, %eax
movl %eax, %cr3
@@ -77,10 +78,11 @@ copy_loop:
done:
jmpl *%ebx
+SYM_CODE_END(core_restore_code)
/* code below belongs to the image kernel */
.align PAGE_SIZE
-ENTRY(restore_registers)
+SYM_FUNC_START(restore_registers)
/* go back to the original page tables */
movl %ebp, %cr3
movl mmu_cr4_features, %ecx
@@ -107,4 +109,4 @@ ENTRY(restore_registers)
movl %eax, in_suspend
ret
-ENDPROC(restore_registers)
+SYM_FUNC_END(restore_registers)
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index a4d5eb0a7ece..7918b8415f13 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -22,7 +22,7 @@
#include <asm/processor-flags.h>
#include <asm/frame.h>
-ENTRY(swsusp_arch_suspend)
+SYM_FUNC_START(swsusp_arch_suspend)
movq $saved_context, %rax
movq %rsp, pt_regs_sp(%rax)
movq %rbp, pt_regs_bp(%rax)
@@ -50,9 +50,9 @@ ENTRY(swsusp_arch_suspend)
call swsusp_save
FRAME_END
ret
-ENDPROC(swsusp_arch_suspend)
+SYM_FUNC_END(swsusp_arch_suspend)
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */
movq restore_jump_address(%rip), %r8
movq restore_cr3(%rip), %r9
@@ -67,9 +67,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
+SYM_CODE_END(restore_image)
/* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
@@ -97,10 +98,11 @@ ENTRY(core_restore_code)
.Ldone:
/* jump to the restore_registers address from the image header */
jmpq *%r8
+SYM_CODE_END(core_restore_code)
/* code below belongs to the image kernel */
.align PAGE_SIZE
-ENTRY(restore_registers)
+SYM_FUNC_START(restore_registers)
/* go back to the original page tables */
movq %r9, %cr3
@@ -142,4 +144,4 @@ ENTRY(restore_registers)
movq %rax, in_suspend(%rip)
ret
-ENDPROC(restore_registers)
+SYM_FUNC_END(restore_registers)
diff --git a/arch/x86/purgatory/entry64.S b/arch/x86/purgatory/entry64.S
index 275a646d1048..0b4390ce586b 100644
--- a/arch/x86/purgatory/entry64.S
+++ b/arch/x86/purgatory/entry64.S
@@ -8,13 +8,13 @@
* This code has been taken from kexec-tools.
*/
+#include <linux/linkage.h>
+
.text
.balign 16
.code64
- .globl entry64, entry64_regs
-
-entry64:
+SYM_CODE_START(entry64)
/* Setup a gdt that should be preserved */
lgdt gdt(%rip)
@@ -54,10 +54,11 @@ new_cs_exit:
/* Jump to the new code... */
jmpq *rip(%rip)
+SYM_CODE_END(entry64)
.section ".rodata"
.balign 4
-entry64_regs:
+SYM_DATA_START(entry64_regs)
rax: .quad 0x0
rcx: .quad 0x0
rdx: .quad 0x0
@@ -75,13 +76,14 @@ r13: .quad 0x0
r14: .quad 0x0
r15: .quad 0x0
rip: .quad 0x0
- .size entry64_regs, . - entry64_regs
+SYM_DATA_END(entry64_regs)
/* GDT */
.section ".rodata"
.balign 16
-gdt:
- /* 0x00 unusable segment
+SYM_DATA_START_LOCAL(gdt)
+ /*
+ * 0x00 unusable segment
* 0x08 unused
* so use them as gdt ptr
*/
@@ -94,6 +96,8 @@ gdt:
/* 0x18 4GB flat data segment */
.word 0xFFFF, 0x0000, 0x9200, 0x00CF
-gdt_end:
-stack: .quad 0, 0
-stack_init:
+SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
+
+SYM_DATA_START_LOCAL(stack)
+ .quad 0, 0
+SYM_DATA_END_LABEL(stack, SYM_L_LOCAL, stack_init)
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 3b95410ff0f8..2961234d0795 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -14,28 +14,10 @@
#include "../boot/string.h"
-unsigned long purgatory_backup_dest __section(.kexec-purgatory);
-unsigned long purgatory_backup_src __section(.kexec-purgatory);
-unsigned long purgatory_backup_sz __section(.kexec-purgatory);
-
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory);
struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory);
-/*
- * On x86, second kernel requries first 640K of memory to boot. Copy
- * first 640K to a backup region in reserved memory range so that second
- * kernel can use first 640K.
- */
-static int copy_backup_region(void)
-{
- if (purgatory_backup_dest) {
- memcpy((void *)purgatory_backup_dest,
- (void *)purgatory_backup_src, purgatory_backup_sz);
- }
- return 0;
-}
-
static int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
@@ -66,7 +48,6 @@ void purgatory(void)
for (;;)
;
}
- copy_backup_region();
}
/*
diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S
index 321146be741d..89d9e9e53fcd 100644
--- a/arch/x86/purgatory/setup-x86_64.S
+++ b/arch/x86/purgatory/setup-x86_64.S
@@ -7,14 +7,14 @@
*
* This code has been taken from kexec-tools.
*/
+#include <linux/linkage.h>
#include <asm/purgatory.h>
.text
- .globl purgatory_start
.balign 16
-purgatory_start:
.code64
+SYM_CODE_START(purgatory_start)
/* Load a gdt so I know what the segment registers are */
lgdt gdt(%rip)
@@ -32,10 +32,12 @@ purgatory_start:
/* Call the C code */
call purgatory
jmp entry64
+SYM_CODE_END(purgatory_start)
.section ".rodata"
.balign 16
-gdt: /* 0x00 unusable segment
+SYM_DATA_START_LOCAL(gdt)
+ /* 0x00 unusable segment
* 0x08 unused
* so use them as the gdt ptr
*/
@@ -48,10 +50,10 @@ gdt: /* 0x00 unusable segment
/* 0x18 4GB flat data segment */
.word 0xFFFF, 0x0000, 0x9200, 0x00CF
-gdt_end:
+SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
.bss
.balign 4096
-lstack:
+SYM_DATA_START_LOCAL(lstack)
.skip 4096
-lstack_end:
+SYM_DATA_END_LABEL(lstack, SYM_L_LOCAL, lstack_end)
diff --git a/arch/x86/purgatory/stack.S b/arch/x86/purgatory/stack.S
index 8b1427422dfc..1ef507ca50a5 100644
--- a/arch/x86/purgatory/stack.S
+++ b/arch/x86/purgatory/stack.S
@@ -5,13 +5,14 @@
* Copyright (C) 2014 Red Hat Inc.
*/
+#include <linux/linkage.h>
+
/* A stack for the loaded kernel.
* Separate and in the data section so it can be prepopulated.
*/
.data
.balign 4096
- .globl stack, stack_end
-stack:
+SYM_DATA_START(stack)
.skip 4096
-stack_end:
+SYM_DATA_END_LABEL(stack, SYM_L_GLOBAL, stack_end)
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 7dce39c8c034..262f83cad355 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -8,6 +8,7 @@
#include <asm/pgtable.h>
#include <asm/realmode.h>
#include <asm/tlbflush.h>
+#include <asm/crash.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
@@ -34,6 +35,7 @@ void __init reserve_real_mode(void)
memblock_reserve(mem, size);
set_real_mode_mem(mem);
+ crash_reserve_low_1M();
}
static void __init setup_real_mode(void)
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
index 6363761cc74c..af04512c02d9 100644
--- a/arch/x86/realmode/rm/header.S
+++ b/arch/x86/realmode/rm/header.S
@@ -14,7 +14,7 @@
.section ".header", "a"
.balign 16
-GLOBAL(real_mode_header)
+SYM_DATA_START(real_mode_header)
.long pa_text_start
.long pa_ro_end
/* SMP trampoline */
@@ -33,11 +33,9 @@ GLOBAL(real_mode_header)
#ifdef CONFIG_X86_64
.long __KERNEL32_CS
#endif
-END(real_mode_header)
+SYM_DATA_END(real_mode_header)
/* End signature, used to verify integrity */
.section ".signature","a"
.balign 4
-GLOBAL(end_signature)
- .long REALMODE_END_SIGNATURE
-END(end_signature)
+SYM_DATA(end_signature, .long REALMODE_END_SIGNATURE)
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
index 3bb980800c58..64d135d1ee63 100644
--- a/arch/x86/realmode/rm/realmode.lds.S
+++ b/arch/x86/realmode/rm/realmode.lds.S
@@ -11,6 +11,7 @@
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH(i386)
+ENTRY(pa_text_start)
SECTIONS
{
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index cd2f97b9623b..f10515b10e0a 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -19,7 +19,7 @@
*/
.section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
+SYM_CODE_START(machine_real_restart_asm)
#ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -33,7 +33,7 @@ ENTRY(machine_real_restart_asm)
movl %eax, %cr0
ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off
-GLOBAL(machine_real_restart_paging_off)
+SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
xorl %eax, %eax
xorl %edx, %edx
movl $MSR_EFER, %ecx
@@ -63,6 +63,7 @@ GLOBAL(machine_real_restart_paging_off)
movl %ecx, %gs
movl %ecx, %ss
ljmpw $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
/*
* This is 16-bit protected mode code to disable paging and the cache,
@@ -127,13 +128,13 @@ bios:
.section ".rodata", "a"
.balign 16
-GLOBAL(machine_real_restart_idt)
+SYM_DATA_START(machine_real_restart_idt)
.word 0xffff /* Length - real mode default value */
.long 0 /* Base - real mode default value */
-END(machine_real_restart_idt)
+SYM_DATA_END(machine_real_restart_idt)
.balign 16
-GLOBAL(machine_real_restart_gdt)
+SYM_DATA_START(machine_real_restart_gdt)
/* Self-pointer */
.word 0xffff /* Length - real mode default value */
.long pa_machine_real_restart_gdt
@@ -153,4 +154,4 @@ GLOBAL(machine_real_restart_gdt)
* semantics we don't have to reload the segments once CR0.PE = 0.
*/
.quad GDT_ENTRY(0x0093, 0x100, 0xffff)
-END(machine_real_restart_gdt)
+SYM_DATA_END(machine_real_restart_gdt)
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
index 8d4cb64799ea..0fca64061ad2 100644
--- a/arch/x86/realmode/rm/stack.S
+++ b/arch/x86/realmode/rm/stack.S
@@ -6,15 +6,13 @@
#include <linux/linkage.h>
.data
-GLOBAL(HEAP)
- .long rm_heap
-GLOBAL(heap_end)
- .long rm_stack
+SYM_DATA(HEAP, .long rm_heap)
+SYM_DATA(heap_end, .long rm_stack)
.bss
.balign 16
-GLOBAL(rm_heap)
- .space 2048
-GLOBAL(rm_stack)
+SYM_DATA(rm_heap, .space 2048)
+
+SYM_DATA_START(rm_stack)
.space 2048
-GLOBAL(rm_stack_end)
+SYM_DATA_END_LABEL(rm_stack, SYM_L_GLOBAL, rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
index 1868b158480d..3fad907a179f 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -29,7 +29,7 @@
.code16
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
wbinvd # Needed for NUMA-Q should be harmless for others
LJMPW_RM(1f)
@@ -54,18 +54,20 @@ ENTRY(trampoline_start)
lmsw %dx # into protected mode
ljmpl $__BOOT_CS, $pa_startup_32
+SYM_CODE_END(trampoline_start)
.section ".text32","ax"
.code32
-ENTRY(startup_32) # note: also used from wakeup_asm.S
+SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S
jmp *%eax
+SYM_CODE_END(startup_32)
.bss
.balign 8
-GLOBAL(trampoline_header)
- tr_start: .space 4
- tr_gdt_pad: .space 2
- tr_gdt: .space 6
-END(trampoline_header)
+SYM_DATA_START(trampoline_header)
+ SYM_DATA_LOCAL(tr_start, .space 4)
+ SYM_DATA_LOCAL(tr_gdt_pad, .space 2)
+ SYM_DATA_LOCAL(tr_gdt, .space 6)
+SYM_DATA_END(trampoline_header)
#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index aee2b45d83b8..251758ed7443 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -38,7 +38,7 @@
.code16
.balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
cli # We should be safe anyway
wbinvd
@@ -78,12 +78,14 @@ ENTRY(trampoline_start)
no_longmode:
hlt
jmp no_longmode
+SYM_CODE_END(trampoline_start)
+
#include "../kernel/verify_cpu.S"
.section ".text32","ax"
.code32
.balign 4
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
movl %edx, %ss
addl $pa_real_mode_base, %esp
movl %edx, %ds
@@ -137,38 +139,39 @@ ENTRY(startup_32)
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/
ljmpl $__KERNEL_CS, $pa_startup_64
+SYM_CODE_END(startup_32)
.section ".text64","ax"
.code64
.balign 4
-ENTRY(startup_64)
+SYM_CODE_START(startup_64)
# Now jump into the kernel using virtual addresses
jmpq *tr_start(%rip)
+SYM_CODE_END(startup_64)
.section ".rodata","a"
# Duplicate the global descriptor table
# so the kernel can live anywhere
.balign 16
- .globl tr_gdt
-tr_gdt:
+SYM_DATA_START(tr_gdt)
.short tr_gdt_end - tr_gdt - 1 # gdt limit
.long pa_tr_gdt
.short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
.quad 0x00af9b000000ffff # __KERNEL_CS
.quad 0x00cf93000000ffff # __KERNEL_DS
-tr_gdt_end:
+SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
.bss
.balign PAGE_SIZE
-GLOBAL(trampoline_pgd) .space PAGE_SIZE
+SYM_DATA(trampoline_pgd, .space PAGE_SIZE)
.balign 8
-GLOBAL(trampoline_header)
- tr_start: .space 8
- GLOBAL(tr_efer) .space 8
- GLOBAL(tr_cr4) .space 4
- GLOBAL(tr_flags) .space 4
-END(trampoline_header)
+SYM_DATA_START(trampoline_header)
+ SYM_DATA_LOCAL(tr_start, .space 8)
+ SYM_DATA(tr_efer, .space 8)
+ SYM_DATA(tr_cr4, .space 4)
+ SYM_DATA(tr_flags, .space 4)
+SYM_DATA_END(trampoline_header)
#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
index 8d8208dcca24..5033e640f957 100644
--- a/arch/x86/realmode/rm/trampoline_common.S
+++ b/arch/x86/realmode/rm/trampoline_common.S
@@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
.section ".rodata","a"
.balign 16
-tr_idt: .fill 1, 6, 0
+SYM_DATA_LOCAL(tr_idt, .fill 1, 6, 0)
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
index 05ac9c17c811..02d0ba16ae33 100644
--- a/arch/x86/realmode/rm/wakeup_asm.S
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -17,7 +17,7 @@
.section ".data", "aw"
.balign 16
-GLOBAL(wakeup_header)
+SYM_DATA_START(wakeup_header)
video_mode: .short 0 /* Video mode number */
pmode_entry: .long 0
pmode_cs: .short __KERNEL_CS
@@ -31,13 +31,13 @@ GLOBAL(wakeup_header)
realmode_flags: .long 0
real_magic: .long 0
signature: .long WAKEUP_HEADER_SIGNATURE
-END(wakeup_header)
+SYM_DATA_END(wakeup_header)
.text
.code16
.balign 16
-ENTRY(wakeup_start)
+SYM_CODE_START(wakeup_start)
cli
cld
@@ -73,7 +73,7 @@ ENTRY(wakeup_start)
movw %ax, %fs
movw %ax, %gs
- lidtl wakeup_idt
+ lidtl .Lwakeup_idt
/* Clear the EFLAGS */
pushl $0
@@ -135,6 +135,7 @@ ENTRY(wakeup_start)
#else
jmp trampoline_start
#endif
+SYM_CODE_END(wakeup_start)
bogus_real_magic:
1:
@@ -152,7 +153,7 @@ bogus_real_magic:
*/
.balign 16
-GLOBAL(wakeup_gdt)
+SYM_DATA_START(wakeup_gdt)
.word 3*8-1 /* Self-descriptor */
.long pa_wakeup_gdt
.word 0
@@ -164,15 +165,15 @@ GLOBAL(wakeup_gdt)
.word 0xffff /* 16-bit data segment @ real_mode_base */
.long 0x93000000 + pa_real_mode_base
.word 0x008f /* big real mode */
-END(wakeup_gdt)
+SYM_DATA_END(wakeup_gdt)
.section ".rodata","a"
.balign 8
/* This is the standard real-mode IDT */
.balign 16
-GLOBAL(wakeup_idt)
+SYM_DATA_START_LOCAL(.Lwakeup_idt)
.word 0xffff /* limit */
.long 0 /* address */
.word 0
-END(wakeup_idt)
+SYM_DATA_END(.Lwakeup_idt)
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
index c078dba40cef..c8fef76743f6 100644
--- a/arch/x86/realmode/rmpiggy.S
+++ b/arch/x86/realmode/rmpiggy.S
@@ -10,12 +10,10 @@
.balign PAGE_SIZE
-GLOBAL(real_mode_blob)
+SYM_DATA_START(real_mode_blob)
.incbin "arch/x86/realmode/rm/realmode.bin"
-END(real_mode_blob)
+SYM_DATA_END_LABEL(real_mode_blob, SYM_L_GLOBAL, real_mode_blob_end)
-GLOBAL(real_mode_blob_end);
-
-GLOBAL(real_mode_relocs)
+SYM_DATA_START(real_mode_relocs)
.incbin "arch/x86/realmode/rm/realmode.relocs"
-END(real_mode_relocs)
+SYM_DATA_END(real_mode_relocs)
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index b02a36b2c14f..a42015b305f4 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -69,7 +69,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod)
}
-/^[0-9a-f]+\:/ {
+/^[0-9a-f]+:/ {
if (NR == 1)
next
# get index
diff --git a/arch/x86/um/vdso/vdso.S b/arch/x86/um/vdso/vdso.S
index a4a3870dc059..a6eaf293a73b 100644
--- a/arch/x86/um/vdso/vdso.S
+++ b/arch/x86/um/vdso/vdso.S
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/init.h>
+#include <linux/linkage.h>
__INITDATA
- .globl vdso_start, vdso_end
-vdso_start:
+SYM_DATA_START(vdso_start)
.incbin "arch/x86/um/vdso/vdso.so"
-vdso_end:
+SYM_DATA_END_LABEL(vdso_start, SYM_L_GLOBAL, vdso_end)
__FINIT
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5bfea374a160..ae4a41ca19f6 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -837,15 +837,6 @@ static void xen_load_sp0(unsigned long sp0)
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}
-void xen_set_iopl_mask(unsigned mask)
-{
- struct physdev_set_iopl set_iopl;
-
- /* Force the change at ring 0. */
- set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
- HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-}
-
static void xen_io_delay(void)
{
}
@@ -1055,7 +1046,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.write_idt_entry = xen_write_idt_entry,
.load_sp0 = xen_load_sp0,
- .set_iopl_mask = xen_set_iopl_mask,
.io_delay = xen_io_delay,
/* Xen takes care of %gs when switching to usermode for us */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 548d1e0a5ba1..33b0e20df7fc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -412,7 +412,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) {
- pr_warning("Unable to find available pfn range, not remapping identity pages\n");
+ pr_warn("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages);
break;
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index be104eef80be..508fe204520b 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -19,7 +19,7 @@
* event status with one and operation. If there are pending events,
* then enter the hypervisor to get them handled.
*/
-ENTRY(xen_irq_enable_direct)
+SYM_FUNC_START(xen_irq_enable_direct)
FRAME_BEGIN
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
@@ -38,17 +38,17 @@ ENTRY(xen_irq_enable_direct)
1:
FRAME_END
ret
- ENDPROC(xen_irq_enable_direct)
+SYM_FUNC_END(xen_irq_enable_direct)
/*
* Disabling events is simply a matter of making the event mask
* non-zero.
*/
-ENTRY(xen_irq_disable_direct)
+SYM_FUNC_START(xen_irq_disable_direct)
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
ret
-ENDPROC(xen_irq_disable_direct)
+SYM_FUNC_END(xen_irq_disable_direct)
/*
* (xen_)save_fl is used to get the current interrupt enable status.
@@ -59,12 +59,12 @@ ENDPROC(xen_irq_disable_direct)
* undefined. We need to toggle the state of the bit, because Xen and
* x86 use opposite senses (mask vs enable).
*/
-ENTRY(xen_save_fl_direct)
+SYM_FUNC_START(xen_save_fl_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah
addb %ah, %ah
ret
- ENDPROC(xen_save_fl_direct)
+SYM_FUNC_END(xen_save_fl_direct)
/*
@@ -74,7 +74,7 @@ ENTRY(xen_save_fl_direct)
* interrupt mask state, it checks for unmasked pending events and
* enters the hypervisor to get them delivered if so.
*/
-ENTRY(xen_restore_fl_direct)
+SYM_FUNC_START(xen_restore_fl_direct)
FRAME_BEGIN
#ifdef CONFIG_X86_64
testw $X86_EFLAGS_IF, %di
@@ -95,14 +95,14 @@ ENTRY(xen_restore_fl_direct)
1:
FRAME_END
ret
- ENDPROC(xen_restore_fl_direct)
+SYM_FUNC_END(xen_restore_fl_direct)
/*
* Force an event check by making a hypercall, but preserve regs
* before making the call.
*/
-ENTRY(check_events)
+SYM_FUNC_START(check_events)
FRAME_BEGIN
#ifdef CONFIG_X86_32
push %eax
@@ -135,19 +135,19 @@ ENTRY(check_events)
#endif
FRAME_END
ret
-ENDPROC(check_events)
+SYM_FUNC_END(check_events)
-ENTRY(xen_read_cr2)
+SYM_FUNC_START(xen_read_cr2)
FRAME_BEGIN
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
FRAME_END
ret
- ENDPROC(xen_read_cr2);
+SYM_FUNC_END(xen_read_cr2);
-ENTRY(xen_read_cr2_direct)
+SYM_FUNC_START(xen_read_cr2_direct)
FRAME_BEGIN
_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
FRAME_END
ret
- ENDPROC(xen_read_cr2_direct);
+SYM_FUNC_END(xen_read_cr2_direct);
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index c15db060a242..2712e9155306 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -56,7 +56,7 @@
_ASM_EXTABLE(1b,2b)
.endm
-ENTRY(xen_iret)
+SYM_CODE_START(xen_iret)
/* test eflags for special cases */
testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
jnz hyper_iret
@@ -122,14 +122,14 @@ xen_iret_end_crit:
hyper_iret:
/* put this out of line since its very rarely used */
jmp hypercall_page + __HYPERVISOR_iret * 32
+SYM_CODE_END(xen_iret)
.globl xen_iret_start_crit, xen_iret_end_crit
/*
- * This is called by xen_hypervisor_callback in entry.S when it sees
+ * This is called by xen_hypervisor_callback in entry_32.S when it sees
* that the EIP at the time of interrupt was between
- * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in
- * %eax so we can do a more refined determination of what to do.
+ * xen_iret_start_crit and xen_iret_end_crit.
*
* The stack format at this point is:
* ----------------
@@ -138,70 +138,46 @@ hyper_iret:
* eflags } outer exception info
* cs }
* eip }
- * ---------------- <- edi (copy dest)
- * eax : outer eax if it hasn't been restored
* ----------------
- * eflags } nested exception info
- * cs } (no ss/esp because we're nested
- * eip } from the same ring)
- * orig_eax }<- esi (copy src)
- * - - - - - - - -
- * fs }
- * es }
- * ds } SAVE_ALL state
- * eax }
- * : :
- * ebx }<- esp
+ * eax : outer eax if it hasn't been restored
* ----------------
+ * eflags }
+ * cs } nested exception info
+ * eip }
+ * return address : (into xen_hypervisor_callback)
*
- * In order to deliver the nested exception properly, we need to shift
- * everything from the return addr up to the error code so it sits
- * just under the outer exception info. This means that when we
- * handle the exception, we do it in the context of the outer
- * exception rather than starting a new one.
+ * In order to deliver the nested exception properly, we need to discard the
+ * nested exception frame such that when we handle the exception, we do it
+ * in the context of the outer exception rather than starting a new one.
*
- * The only caveat is that if the outer eax hasn't been restored yet
- * (ie, it's still on stack), we need to insert its value into the
- * SAVE_ALL state before going on, since it's usermode state which we
- * eventually need to restore.
+ * The only caveat is that if the outer eax hasn't been restored yet (i.e.
+ * it's still on stack), we need to restore its value here.
*/
-ENTRY(xen_iret_crit_fixup)
+SYM_CODE_START(xen_iret_crit_fixup)
/*
* Paranoia: Make sure we're really coming from kernel space.
* One could imagine a case where userspace jumps into the
* critical range address, but just before the CPU delivers a
- * GP, it decides to deliver an interrupt instead. Unlikely?
- * Definitely. Easy to avoid? Yes. The Intel documents
- * explicitly say that the reported EIP for a bad jump is the
- * jump instruction itself, not the destination, but some
- * virtual environments get this wrong.
+ * PF, it decides to deliver an interrupt instead. Unlikely?
+ * Definitely. Easy to avoid? Yes.
*/
- movl PT_CS(%esp), %ecx
- andl $SEGMENT_RPL_MASK, %ecx
- cmpl $USER_RPL, %ecx
- je 2f
-
- lea PT_ORIG_EAX(%esp), %esi
- lea PT_EFLAGS(%esp), %edi
+ testb $2, 2*4(%esp) /* nested CS */
+ jnz 2f
/*
* If eip is before iret_restore_end then stack
* hasn't been restored yet.
*/
- cmp $iret_restore_end, %eax
+ cmpl $iret_restore_end, 1*4(%esp)
jae 1f
- movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */
- movl %eax, PT_EAX(%esp)
+ movl 4*4(%esp), %eax /* load outer EAX */
+ ret $4*4 /* discard nested EIP, CS, and EFLAGS as
+ * well as the just restored EAX */
- lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */
-
- /* set up the copy */
-1: std
- mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
- rep movsl
- cld
-
- lea 4(%edi), %esp /* point esp to new frame */
-2: jmp xen_do_upcall
+1:
+ ret $3*4 /* discard nested EIP, CS, and EFLAGS */
+2:
+ ret
+SYM_CODE_END(xen_iret_crit_fixup)
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index ebf610b49c06..0a0fd168683a 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -20,11 +20,11 @@
#include <linux/linkage.h>
.macro xen_pv_trap name
-ENTRY(xen_\name)
+SYM_CODE_START(xen_\name)
pop %rcx
pop %r11
jmp \name
-END(xen_\name)
+SYM_CODE_END(xen_\name)
_ASM_NOKPROBE(xen_\name)
.endm
@@ -57,7 +57,7 @@ xen_pv_trap entry_INT80_compat
xen_pv_trap hypervisor_callback
__INIT
-ENTRY(xen_early_idt_handler_array)
+SYM_CODE_START(xen_early_idt_handler_array)
i = 0
.rept NUM_EXCEPTION_VECTORS
pop %rcx
@@ -66,7 +66,7 @@ ENTRY(xen_early_idt_handler_array)
i = i + 1
.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
-END(xen_early_idt_handler_array)
+SYM_CODE_END(xen_early_idt_handler_array)
__FINIT
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
@@ -85,11 +85,12 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* r11 }<-- pushed by hypercall page
* rsp->rax }
*/
-ENTRY(xen_iret)
+SYM_CODE_START(xen_iret)
pushq $0
jmp hypercall_iret
+SYM_CODE_END(xen_iret)
-ENTRY(xen_sysret64)
+SYM_CODE_START(xen_sysret64)
/*
* We're already on the usermode stack at this point, but
* still with the kernel gs, so we can easily switch back.
@@ -107,6 +108,7 @@ ENTRY(xen_sysret64)
pushq $VGCF_in_syscall
jmp hypercall_iret
+SYM_CODE_END(xen_sysret64)
/*
* Xen handles syscall callbacks much like ordinary exceptions, which
@@ -124,7 +126,7 @@ ENTRY(xen_sysret64)
*/
/* Normal 64-bit system call target */
-ENTRY(xen_syscall_target)
+SYM_FUNC_START(xen_syscall_target)
popq %rcx
popq %r11
@@ -137,12 +139,12 @@ ENTRY(xen_syscall_target)
movq $__USER_CS, 1*8(%rsp)
jmp entry_SYSCALL_64_after_hwframe
-ENDPROC(xen_syscall_target)
+SYM_FUNC_END(xen_syscall_target)
#ifdef CONFIG_IA32_EMULATION
/* 32-bit compat syscall target */
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START(xen_syscall32_target)
popq %rcx
popq %r11
@@ -155,25 +157,25 @@ ENTRY(xen_syscall32_target)
movq $__USER32_CS, 1*8(%rsp)
jmp entry_SYSCALL_compat_after_hwframe
-ENDPROC(xen_syscall32_target)
+SYM_FUNC_END(xen_syscall32_target)
/* 32-bit compat sysenter target */
-ENTRY(xen_sysenter_target)
+SYM_FUNC_START(xen_sysenter_target)
mov 0*8(%rsp), %rcx
mov 1*8(%rsp), %r11
mov 5*8(%rsp), %rsp
jmp entry_SYSENTER_compat
-ENDPROC(xen_sysenter_target)
+SYM_FUNC_END(xen_sysenter_target)
#else /* !CONFIG_IA32_EMULATION */
-ENTRY(xen_syscall32_target)
-ENTRY(xen_sysenter_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
+SYM_FUNC_START(xen_sysenter_target)
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
-ENDPROC(xen_sysenter_target)
+SYM_FUNC_END(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
#endif /* CONFIG_IA32_EMULATION */
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index c1d8b90aa4e2..1d0cee3163e4 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -22,7 +22,7 @@
#ifdef CONFIG_XEN_PV
__INIT
-ENTRY(startup_xen)
+SYM_CODE_START(startup_xen)
UNWIND_HINT_EMPTY
cld
@@ -52,13 +52,13 @@ ENTRY(startup_xen)
#endif
jmp xen_start_kernel
-END(startup_xen)
+SYM_CODE_END(startup_xen)
__FINIT
#endif
.pushsection .text
.balign PAGE_SIZE
-ENTRY(hypercall_page)
+SYM_CODE_START(hypercall_page)
.rept (PAGE_SIZE / 32)
UNWIND_HINT_EMPTY
.skip 32
@@ -69,7 +69,7 @@ ENTRY(hypercall_page)
.type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
#include <asm/xen-hypercalls.h>
#undef HYPERCALL
-END(hypercall_page)
+SYM_CODE_END(hypercall_page)
.popsection
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 943f10639a93..0043d5858f14 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -14,6 +14,8 @@
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/thread_info.h>
@@ -124,18 +126,16 @@ SECTIONS
. = ALIGN(16);
- RODATA
+ RO_DATA(4096)
/* Relocation table */
.fixup : { *(.fixup) }
- EXCEPTION_TABLE(16)
- NOTES
/* Data section */
_sdata = .;
- RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
/* Initialization code and data: */
diff --git a/block/Kconfig b/block/Kconfig
index 41c0917ce622..c23094a14a2b 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -32,6 +32,9 @@ config BLK_RQ_ALLOC_TIME
config BLK_SCSI_REQUEST
bool
+config BLK_CGROUP_RWSTAT
+ bool
+
config BLK_DEV_BSG
bool "Block layer SG support v4"
default y
@@ -86,6 +89,7 @@ config BLK_DEV_ZONED
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
+ select BLK_CGROUP_RWSTAT
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index b89310a022ad..7df14133adc8 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -31,6 +31,7 @@ config IOSCHED_BFQ
config BFQ_GROUP_IOSCHED
bool "BFQ hierarchical scheduling support"
depends on IOSCHED_BFQ && BLK_CGROUP
+ select BLK_CGROUP_RWSTAT
---help---
Enable hierarchical scheduling in BFQ, using the blkio
diff --git a/block/Makefile b/block/Makefile
index 9ef57ace90d4..205a5f2fef17 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
+obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 86a607cf19a1..cea0ae12f937 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -347,6 +347,14 @@ void bfqg_and_blkg_put(struct bfq_group *bfqg)
bfqg_put(bfqg);
}
+void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
+{
+ struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
+
+ blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
+ blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
+}
+
/* @stats = 0 */
static void bfqg_stats_reset(struct bfqg_stats *stats)
{
@@ -431,6 +439,8 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
static void bfqg_stats_exit(struct bfqg_stats *stats)
{
+ blkg_rwstat_exit(&stats->bytes);
+ blkg_rwstat_exit(&stats->ios);
#ifdef CONFIG_BFQ_CGROUP_DEBUG
blkg_rwstat_exit(&stats->merged);
blkg_rwstat_exit(&stats->service_time);
@@ -448,6 +458,10 @@ static void bfqg_stats_exit(struct bfqg_stats *stats)
static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
{
+ if (blkg_rwstat_init(&stats->bytes, gfp) ||
+ blkg_rwstat_init(&stats->ios, gfp))
+ return -ENOMEM;
+
#ifdef CONFIG_BFQ_CGROUP_DEBUG
if (blkg_rwstat_init(&stats->merged, gfp) ||
blkg_rwstat_init(&stats->service_time, gfp) ||
@@ -1057,18 +1071,35 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
return bfq_io_set_device_weight(of, buf, nbytes, off);
}
-#ifdef CONFIG_BFQ_CGROUP_DEBUG
-static int bfqg_print_stat(struct seq_file *sf, void *v)
+static int bfqg_print_rwstat(struct seq_file *sf, void *v)
{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
- &blkcg_policy_bfq, seq_cft(sf)->private, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
+ &blkcg_policy_bfq, seq_cft(sf)->private, true);
return 0;
}
-static int bfqg_print_rwstat(struct seq_file *sf, void *v)
+static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
- &blkcg_policy_bfq, seq_cft(sf)->private, true);
+ struct blkg_rwstat_sample sum;
+
+ blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
+ return __blkg_prfill_rwstat(sf, pd, &sum);
+}
+
+static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
+ seq_cft(sf)->private, true);
+ return 0;
+}
+
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
+static int bfqg_print_stat(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
+ &blkcg_policy_bfq, seq_cft(sf)->private, false);
return 0;
}
@@ -1097,15 +1128,6 @@ static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, sum);
}
-static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat_sample sum;
-
- blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
- return __blkg_prfill_rwstat(sf, pd, &sum);
-}
-
static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
@@ -1114,18 +1136,11 @@ static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
return 0;
}
-static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
- seq_cft(sf)->private, true);
- return 0;
-}
-
static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{
- u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
+ struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
+ u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
return __blkg_prfill_u64(sf, pd, sum >> 9);
}
@@ -1142,8 +1157,8 @@ static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
{
struct blkg_rwstat_sample tmp;
- blkg_rwstat_recursive_sum(pd->blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes), &tmp);
+ blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
+ offsetof(struct bfq_group, stats.bytes), &tmp);
return __blkg_prfill_u64(sf, pd,
(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
@@ -1226,13 +1241,13 @@ struct cftype bfq_blkcg_legacy_files[] = {
/* statistics, covers only the tasks in the bfqg */
{
.name = "bfq.io_service_bytes",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_bytes,
+ .private = offsetof(struct bfq_group, stats.bytes),
+ .seq_show = bfqg_print_rwstat,
},
{
.name = "bfq.io_serviced",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_ios,
+ .private = offsetof(struct bfq_group, stats.ios),
+ .seq_show = bfqg_print_rwstat,
},
#ifdef CONFIG_BFQ_CGROUP_DEBUG
{
@@ -1269,13 +1284,13 @@ struct cftype bfq_blkcg_legacy_files[] = {
/* the same statistics which cover the bfqg and its descendants */
{
.name = "bfq.io_service_bytes_recursive",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_bytes_recursive,
+ .private = offsetof(struct bfq_group, stats.bytes),
+ .seq_show = bfqg_print_rwstat_recursive,
},
{
.name = "bfq.io_serviced_recursive",
- .private = (unsigned long)&blkcg_policy_bfq,
- .seq_show = blkg_print_stat_ios_recursive,
+ .private = offsetof(struct bfq_group, stats.ios),
+ .seq_show = bfqg_print_rwstat_recursive,
},
#ifdef CONFIG_BFQ_CGROUP_DEBUG
{
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0319d6339822..ad4af4aaf2ce 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
}
}
+
+static
+void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+ /*
+ * To prevent bfqq's service guarantees from being violated,
+ * bfqq may be left busy, i.e., queued for service, even if
+ * empty (see comments in __bfq_bfqq_expire() for
+ * details). But, if no process will send requests to bfqq any
+ * longer, then there is no point in keeping bfqq queued for
+ * service. In addition, keeping bfqq queued for service, but
+ * with no process ref any longer, may have caused bfqq to be
+ * freed when dequeued from service. But this is assumed to
+ * never happen.
+ */
+ if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfqq != bfqd->in_service_queue)
+ bfq_del_bfqq_busy(bfqd, bfqq, false);
+
+ bfq_put_queue(bfqq);
+}
+
static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
@@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
*/
new_bfqq->pid = -1;
bfqq->bic = NULL;
- /* release process reference to bfqq */
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqd, bfqq);
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq);
- bfq_put_queue(bfqq); /* release process reference */
+ bfq_release_process_ref(bfqd, bfqq);
}
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
@@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false);
if (bfqq) {
- /* release process reference on this queue */
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqd, bfqq);
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bic_set_bfqq(bic, bfqq, false);
}
@@ -5464,6 +5484,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool idle_timer_disabled = false;
unsigned int cmd_flags;
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
+ bfqg_stats_update_legacy_io(q, rq);
+#endif
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
spin_unlock_irq(&bfqd->lock);
@@ -5963,7 +5987,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq);
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL;
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 5d1a519640f6..8526f20c53bc 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -10,6 +10,8 @@
#include <linux/hrtimer.h>
#include <linux/blk-cgroup.h>
+#include "blk-cgroup-rwstat.h"
+
#define BFQ_IOPRIO_CLASSES 3
#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
@@ -809,6 +811,9 @@ struct bfq_stat {
};
struct bfqg_stats {
+ /* basic stats */
+ struct blkg_rwstat bytes;
+ struct blkg_rwstat ios;
#ifdef CONFIG_BFQ_CGROUP_DEBUG
/* number of ios merged */
struct blkg_rwstat merged;
@@ -956,6 +961,7 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
/* ---------------- cgroups-support interface ---------------- */
+void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
@@ -1062,6 +1068,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
char pid_str[MAX_PID_STR_LENGTH]; \
+ if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
+ break; \
bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
@@ -1078,6 +1086,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
char pid_str[MAX_PID_STR_LENGTH]; \
+ if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
+ break; \
bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
diff --git a/block/bio.c b/block/bio.c
index 8f0ed6228fc5..b1170ec18464 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
- if (bio->bi_vcnt > 0) {
+ if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c
new file mode 100644
index 000000000000..85d5790ac49b
--- /dev/null
+++ b/block/blk-cgroup-rwstat.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
+ * Do not use in new code.
+ */
+#include "blk-cgroup-rwstat.h"
+
+int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
+{
+ int i, ret;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
+ if (ret) {
+ while (--i >= 0)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ return ret;
+ }
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_init);
+
+void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_exit);
+
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat_sample *rwstat)
+{
+ static const char *rwstr[] = {
+ [BLKG_RWSTAT_READ] = "Read",
+ [BLKG_RWSTAT_WRITE] = "Write",
+ [BLKG_RWSTAT_SYNC] = "Sync",
+ [BLKG_RWSTAT_ASYNC] = "Async",
+ [BLKG_RWSTAT_DISCARD] = "Discard",
+ };
+ const char *dname = blkg_dev_name(pd->blkg);
+ u64 v;
+ int i;
+
+ if (!dname)
+ return 0;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+ rwstat->cnt[i]);
+
+ v = rwstat->cnt[BLKG_RWSTAT_READ] +
+ rwstat->cnt[BLKG_RWSTAT_WRITE] +
+ rwstat->cnt[BLKG_RWSTAT_DISCARD];
+ seq_printf(sf, "%s Total %llu\n", dname, v);
+ return v;
+}
+EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
+
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ struct blkg_rwstat_sample rwstat = { };
+
+ blkg_rwstat_read((void *)pd + off, &rwstat);
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
+
+/**
+ * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
+ * @blkg: blkg of interest
+ * @pol: blkcg_policy which contains the blkg_rwstat
+ * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
+ * @sum: blkg_rwstat_sample structure containing the results
+ *
+ * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
+ * online descendants and their aux counts. The caller must be holding the
+ * queue lock for online tests.
+ *
+ * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
+ * is at @off bytes into @blkg's blkg_policy_data of the policy.
+ */
+void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ int off, struct blkg_rwstat_sample *sum)
+{
+ struct blkcg_gq *pos_blkg;
+ struct cgroup_subsys_state *pos_css;
+ unsigned int i;
+
+ lockdep_assert_held(&blkg->q->queue_lock);
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
+ struct blkg_rwstat *rwstat;
+
+ if (!pos_blkg->online)
+ continue;
+
+ if (pol)
+ rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
+ else
+ rwstat = (void *)pos_blkg + off;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
diff --git a/block/blk-cgroup-rwstat.h b/block/blk-cgroup-rwstat.h
new file mode 100644
index 000000000000..ee746919c41f
--- /dev/null
+++ b/block/blk-cgroup-rwstat.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
+ * Do not use in new code.
+ */
+#ifndef _BLK_CGROUP_RWSTAT_H
+#define _BLK_CGROUP_RWSTAT_H
+
+#include <linux/blk-cgroup.h>
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+ BLKG_RWSTAT_DISCARD,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+};
+
+/*
+ * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
+ * recursive. Used to carry stats of dead children.
+ */
+struct blkg_rwstat {
+ struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
+ atomic64_t aux_cnt[BLKG_RWSTAT_NR];
+};
+
+struct blkg_rwstat_sample {
+ u64 cnt[BLKG_RWSTAT_NR];
+};
+
+static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
+ unsigned int idx)
+{
+ return atomic64_read(&rwstat->aux_cnt[idx]) +
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
+}
+
+int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
+void blkg_rwstat_exit(struct blkg_rwstat *rwstat);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat_sample *rwstat);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ int off, struct blkg_rwstat_sample *sum);
+
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @op: REQ_OP and flags
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ unsigned int op, uint64_t val)
+{
+ struct percpu_counter *cnt;
+
+ if (op_is_discard(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
+ else if (op_is_write(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
+ else
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
+
+ percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
+
+ if (op_is_sync(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
+ else
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
+
+ percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it in the aux counts.
+ */
+static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
+ struct blkg_rwstat_sample *result)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ result->cnt[i] =
+ percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
+}
+
+/**
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat_sample tmp = { };
+
+ blkg_rwstat_read(rwstat, &tmp);
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+ atomic64_set(&rwstat->aux_cnt[i], 0);
+ }
+}
+
+/**
+ * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's count including the aux one to @to's aux count.
+ */
+static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ u64 sum[BLKG_RWSTAT_NR];
+ int i;
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
+ &to->aux_cnt[i]);
+}
+#endif /* _BLK_CGROUP_RWSTAT_H */
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1eb8895be4c6..708dea92dac8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -80,8 +80,7 @@ static void blkg_free(struct blkcg_gq *blkg)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
- blkg_rwstat_exit(&blkg->stat_ios);
- blkg_rwstat_exit(&blkg->stat_bytes);
+ free_percpu(blkg->iostat_cpu);
percpu_ref_exit(&blkg->refcnt);
kfree(blkg);
}
@@ -146,7 +145,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
gfp_t gfp_mask)
{
struct blkcg_gq *blkg;
- int i;
+ int i, cpu;
/* alloc and init base part */
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
@@ -156,8 +155,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
goto err_free;
- if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
- blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
+ blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
+ if (!blkg->iostat_cpu)
goto err_free;
blkg->q = q;
@@ -167,6 +166,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
blkg->blkcg = blkcg;
+ u64_stats_init(&blkg->iostat.sync);
+ for_each_possible_cpu(cpu)
+ u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
+
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkg_policy_data *pd;
@@ -393,7 +396,6 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
- struct blkcg_gq *parent = blkg->parent;
int i;
lockdep_assert_held(&blkg->q->queue_lock);
@@ -410,11 +412,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
pol->pd_offline_fn(blkg->pd[i]);
}
- if (parent) {
- blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
- blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
- }
-
blkg->online = false;
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
@@ -464,7 +461,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
{
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
- int i;
+ int i, cpu;
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -475,8 +472,12 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- blkg_rwstat_reset(&blkg->stat_bytes);
- blkg_rwstat_reset(&blkg->stat_ios);
+ for_each_possible_cpu(cpu) {
+ struct blkg_iostat_set *bis =
+ per_cpu_ptr(blkg->iostat_cpu, cpu);
+ memset(bis, 0, sizeof(*bis));
+ }
+ memset(&blkg->iostat, 0, sizeof(blkg->iostat));
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -560,186 +561,6 @@ u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
}
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
-/**
- * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
- * @sf: seq_file to print to
- * @pd: policy private data of interest
- * @rwstat: rwstat to print
- *
- * Print @rwstat to @sf for the device assocaited with @pd.
- */
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat_sample *rwstat)
-{
- static const char *rwstr[] = {
- [BLKG_RWSTAT_READ] = "Read",
- [BLKG_RWSTAT_WRITE] = "Write",
- [BLKG_RWSTAT_SYNC] = "Sync",
- [BLKG_RWSTAT_ASYNC] = "Async",
- [BLKG_RWSTAT_DISCARD] = "Discard",
- };
- const char *dname = blkg_dev_name(pd->blkg);
- u64 v;
- int i;
-
- if (!dname)
- return 0;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
- rwstat->cnt[i]);
-
- v = rwstat->cnt[BLKG_RWSTAT_READ] +
- rwstat->cnt[BLKG_RWSTAT_WRITE] +
- rwstat->cnt[BLKG_RWSTAT_DISCARD];
- seq_printf(sf, "%s Total %llu\n", dname, v);
- return v;
-}
-EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
-
-/**
- * blkg_prfill_rwstat - prfill callback for blkg_rwstat
- * @sf: seq_file to print to
- * @pd: policy private data of interest
- * @off: offset to the blkg_rwstat in @pd
- *
- * prfill callback for printing a blkg_rwstat.
- */
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off)
-{
- struct blkg_rwstat_sample rwstat = { };
-
- blkg_rwstat_read((void *)pd + off, &rwstat);
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
-
-static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat_sample rwstat = { };
-
- blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-
-/**
- * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
- * @sf: seq_file to print to
- * @v: unused
- *
- * To be used as cftype->seq_show to print blkg->stat_bytes.
- * cftype->private must be set to the blkcg_policy.
- */
-int blkg_print_stat_bytes(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_bytes), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
-
-/**
- * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
- * @sf: seq_file to print to
- * @v: unused
- *
- * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
- * must be set to the blkcg_policy.
- */
-int blkg_print_stat_ios(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_ios), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
-
-static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd,
- int off)
-{
- struct blkg_rwstat_sample rwstat;
-
- blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-
-/**
- * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
- * @sf: seq_file to print to
- * @v: unused
- */
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field_recursive,
- (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_bytes), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
-
-/**
- * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
- * @sf: seq_file to print to
- * @v: unused
- */
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- blkg_prfill_rwstat_field_recursive,
- (void *)seq_cft(sf)->private,
- offsetof(struct blkcg_gq, stat_ios), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
-
-/**
- * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
- * @blkg: blkg of interest
- * @pol: blkcg_policy which contains the blkg_rwstat
- * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
- * @sum: blkg_rwstat_sample structure containing the results
- *
- * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
- * online descendants and their aux counts. The caller must be holding the
- * queue lock for online tests.
- *
- * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
- * is at @off bytes into @blkg's blkg_policy_data of the policy.
- */
-void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
- int off, struct blkg_rwstat_sample *sum)
-{
- struct blkcg_gq *pos_blkg;
- struct cgroup_subsys_state *pos_css;
- unsigned int i;
-
- lockdep_assert_held(&blkg->q->queue_lock);
-
- rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
- struct blkg_rwstat *rwstat;
-
- if (!pos_blkg->online)
- continue;
-
- if (pol)
- rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
- else
- rwstat = (void *)pos_blkg + off;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
- }
- rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
-
/* Performs queue bypass and policy enabled checks then looks up blkg. */
static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
const struct blkcg_policy *pol,
@@ -923,16 +744,18 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct blkcg_gq *blkg;
+ cgroup_rstat_flush(blkcg->css.cgroup);
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct blkg_iostat_set *bis = &blkg->iostat;
const char *dname;
char *buf;
- struct blkg_rwstat_sample rwstat;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
size_t size = seq_get_buf(sf, &buf), off = 0;
int i;
bool has_stats = false;
+ unsigned seq;
spin_lock_irq(&blkg->q->queue_lock);
@@ -951,17 +774,16 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
- blkg_rwstat_recursive_sum(blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes), &rwstat);
- rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
- wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
- dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
+ do {
+ seq = u64_stats_fetch_begin(&bis->sync);
- blkg_rwstat_recursive_sum(blkg, NULL,
- offsetof(struct blkcg_gq, stat_ios), &rwstat);
- rios = rwstat.cnt[BLKG_RWSTAT_READ];
- wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
- dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
+ rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
+ wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
+ dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
+ rios = bis->cur.ios[BLKG_IOSTAT_READ];
+ wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
+ dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
+ } while (u64_stats_fetch_retry(&bis->sync, seq));
if (rbytes || wbytes || rios || wios) {
has_stats = true;
@@ -1297,6 +1119,77 @@ static int blkcg_can_attach(struct cgroup_taskset *tset)
return ret;
}
+static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] = src->bytes[i];
+ dst->ios[i] = src->ios[i];
+ }
+}
+
+static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] += src->bytes[i];
+ dst->ios[i] += src->ios[i];
+ }
+}
+
+static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] -= src->bytes[i];
+ dst->ios[i] -= src->ios[i];
+ }
+}
+
+static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
+{
+ struct blkcg *blkcg = css_to_blkcg(css);
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct blkcg_gq *parent = blkg->parent;
+ struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
+ struct blkg_iostat cur, delta;
+ unsigned seq;
+
+ /* fetch the current per-cpu values */
+ do {
+ seq = u64_stats_fetch_begin(&bisc->sync);
+ blkg_iostat_set(&cur, &bisc->cur);
+ } while (u64_stats_fetch_retry(&bisc->sync, seq));
+
+ /* propagate percpu delta to global */
+ u64_stats_update_begin(&blkg->iostat.sync);
+ blkg_iostat_set(&delta, &cur);
+ blkg_iostat_sub(&delta, &bisc->last);
+ blkg_iostat_add(&blkg->iostat.cur, &delta);
+ blkg_iostat_add(&bisc->last, &delta);
+ u64_stats_update_end(&blkg->iostat.sync);
+
+ /* propagate global delta to parent */
+ if (parent) {
+ u64_stats_update_begin(&parent->iostat.sync);
+ blkg_iostat_set(&delta, &blkg->iostat.cur);
+ blkg_iostat_sub(&delta, &blkg->iostat.last);
+ blkg_iostat_add(&parent->iostat.cur, &delta);
+ blkg_iostat_add(&blkg->iostat.last, &delta);
+ u64_stats_update_end(&parent->iostat.sync);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
static void blkcg_bind(struct cgroup_subsys_state *root_css)
{
int i;
@@ -1329,6 +1222,7 @@ struct cgroup_subsys io_cgrp_subsys = {
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
.can_attach = blkcg_can_attach,
+ .css_rstat_flush = blkcg_rstat_flush,
.bind = blkcg_bind,
.dfl_cftypes = blkcg_files,
.legacy_cftypes = blkcg_legacy_files,
diff --git a/block/blk-core.c b/block/blk-core.c
index d5e668ec751b..a1e228752083 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -132,6 +132,9 @@ static const char *const blk_op_name[] = {
REQ_OP_NAME(SECURE_ERASE),
REQ_OP_NAME(ZONE_RESET),
REQ_OP_NAME(ZONE_RESET_ALL),
+ REQ_OP_NAME(ZONE_OPEN),
+ REQ_OP_NAME(ZONE_CLOSE),
+ REQ_OP_NAME(ZONE_FINISH),
REQ_OP_NAME(WRITE_SAME),
REQ_OP_NAME(WRITE_ZEROES),
REQ_OP_NAME(SCSI_IN),
@@ -336,14 +339,14 @@ EXPORT_SYMBOL_GPL(blk_set_queue_dying);
*/
void blk_cleanup_queue(struct request_queue *q)
{
+ WARN_ON_ONCE(blk_queue_registered(q));
+
/* mark @q DYING, no new request or merges will be allowed afterwards */
- mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- mutex_unlock(&q->sysfs_lock);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
@@ -848,11 +851,7 @@ static inline int blk_partition_remap(struct bio *bio)
if (unlikely(bio_check_ro(bio, p)))
goto out;
- /*
- * Zone reset does not include bi_size so bio_sectors() is always 0.
- * Include a test for the reset op code and perform the remap if needed.
- */
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) {
+ if (bio_sectors(bio)) {
if (bio_check_eod(bio, part_nr_sects_read(p)))
goto out;
bio->bi_iter.bi_sector += p->start_sect;
@@ -936,6 +935,9 @@ generic_make_request_checks(struct bio *bio)
goto not_supported;
break;
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
if (!blk_queue_is_zoned(q))
goto not_supported;
break;
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 1db44ca0f4a6..e20a852ae432 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,6 +55,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk;
rq->end_io = done;
+ blk_account_io_start(rq, true);
+
/*
* don't check dying flag for MQ because the request won't
* be reused after dying flag is set
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 1eec9cbe5a0a..1777346baf06 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -136,6 +136,17 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
blk_mq_add_to_requeue_list(rq, add_front, true);
}
+static void blk_account_io_flush(struct request *rq)
+{
+ struct hd_struct *part = &rq->rq_disk->part0;
+
+ part_stat_lock();
+ part_stat_inc(part, ios[STAT_FLUSH]);
+ part_stat_add(part, nsecs[STAT_FLUSH],
+ ktime_get_ns() - rq->start_time_ns);
+ part_stat_unlock();
+}
+
/**
* blk_flush_complete_seq - complete flush sequence
* @rq: PREFLUSH/FUA request being sequenced
@@ -185,7 +196,7 @@ static void blk_flush_complete_seq(struct request *rq,
case REQ_FSEQ_DONE:
/*
- * @rq was previously adjusted by blk_flush_issue() for
+ * @rq was previously adjusted by blk_insert_flush() for
* flush sequencing and may already have gone through the
* flush data request completion path. Restore @rq for
* normal completion and end it.
@@ -212,6 +223,8 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
struct blk_mq_hw_ctx *hctx;
+ blk_account_io_flush(flush_rq);
+
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a7ed434eae03..e01267f99183 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
atomic64_set(&iocg->active_period, cur_period);
/* already activated or breaking leaf-only constraint? */
- for (i = iocg->level; i > 0; i--)
- if (!list_empty(&iocg->active_list))
+ if (!list_empty(&iocg->active_list))
+ goto succeed_unlock;
+ for (i = iocg->level - 1; i > 0; i--)
+ if (!list_empty(&iocg->ancestors[i]->active_list))
goto fail_unlock;
+
if (iocg->child_active_sum)
goto fail_unlock;
@@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
ioc_start_period(ioc, now);
}
+succeed_unlock:
spin_unlock_irq(&ioc->lock);
return true;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 48e6725b32ee..d783bdc4559b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -293,7 +293,7 @@ split:
void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs)
{
- struct bio *split;
+ struct bio *split = NULL;
switch (bio_op(*bio)) {
case REQ_OP_DISCARD:
@@ -309,6 +309,21 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
nr_segs);
break;
default:
+ /*
+ * All drivers must accept single-segments bios that are <=
+ * PAGE_SIZE. This is a quick and dirty check that relies on
+ * the fact that bi_io_vec[0] is always valid if a bio has data.
+ * The check might lead to occasional false negatives when bios
+ * are cloned, but compared to the performance impact of cloned
+ * bios themselves the loop below doesn't matter anyway.
+ */
+ if (!q->limits.chunk_sectors &&
+ (*bio)->bi_vcnt == 1 &&
+ ((*bio)->bi_io_vec[0].bv_len +
+ (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
+ *nr_segs = 1;
+ break;
+ }
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
break;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index a0d3ce30fa08..062229395a50 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -74,10 +74,8 @@ static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
if (!entry->show)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->show(ctx, page);
+ res = entry->show(ctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -97,10 +95,8 @@ static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->store(ctx, page, length);
+ res = entry->store(ctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -120,10 +116,8 @@ static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
if (!entry->show)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->show(hctx, page);
+ res = entry->show(hctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -144,10 +138,8 @@ static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
if (!entry->store)
return -EIO;
- res = -ENOENT;
mutex_lock(&q->sysfs_lock);
- if (!blk_queue_dying(q))
- res = entry->store(hctx, page, length);
+ res = entry->store(hctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -166,20 +158,25 @@ static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
+ const size_t size = PAGE_SIZE - 1;
unsigned int i, first = 1;
- ssize_t ret = 0;
+ int ret = 0, pos = 0;
for_each_cpu(i, hctx->cpumask) {
if (first)
- ret += sprintf(ret + page, "%u", i);
+ ret = snprintf(pos + page, size - pos, "%u", i);
else
- ret += sprintf(ret + page, ", %u", i);
+ ret = snprintf(pos + page, size - pos, ", %u", i);
+
+ if (ret >= size - pos)
+ break;
first = 0;
+ pos += ret;
}
- ret += sprintf(ret + page, "\n");
- return ret;
+ ret = snprintf(pos + page, size + 1 - pos, "\n");
+ return pos + ret;
}
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 008388e82b5c..fbacde454718 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -15,14 +15,6 @@
#include "blk-mq.h"
#include "blk-mq-tag.h"
-bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
-{
- if (!tags)
- return true;
-
- return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
-}
-
/*
* If a previously inactive queue goes active, bump the active user count.
* We need to do this before try to allocate driver tag, then even if fail
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..15bc74acb57e 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -28,7 +28,6 @@ extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag);
-extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ec791156e9cc..323c9cb28066 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -93,7 +93,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct mq_inflight {
struct hd_struct *part;
- unsigned int *inflight;
+ unsigned int inflight[2];
};
static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
@@ -102,45 +102,29 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
struct mq_inflight *mi = priv;
- /*
- * index[0] counts the specific partition that was asked for.
- */
if (rq->part == mi->part)
- mi->inflight[0]++;
+ mi->inflight[rq_data_dir(rq)]++;
return true;
}
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
{
- unsigned inflight[2];
- struct mq_inflight mi = { .part = part, .inflight = inflight, };
+ struct mq_inflight mi = { .part = part };
- inflight[0] = inflight[1] = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
- return inflight[0];
-}
-
-static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv,
- bool reserved)
-{
- struct mq_inflight *mi = priv;
-
- if (rq->part == mi->part)
- mi->inflight[rq_data_dir(rq)]++;
-
- return true;
+ return mi.inflight[0] + mi.inflight[1];
}
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
- struct mq_inflight mi = { .part = part, .inflight = inflight, };
+ struct mq_inflight mi = { .part = part };
- inflight[0] = inflight[1] = 0;
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+ inflight[0] = mi.inflight[0];
+ inflight[1] = mi.inflight[1];
}
void blk_freeze_queue_start(struct request_queue *q)
@@ -276,12 +260,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
blk_mq_tag_wakeup_all(hctx->tags, true);
}
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
-{
- return blk_mq_has_free_tags(hctx->tags);
-}
-EXPORT_SYMBOL(blk_mq_can_queue);
-
/*
* Only need start/end time stamping if we have iostat or
* blk stats enabled, or using an IO scheduler.
@@ -663,18 +641,6 @@ bool blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
-int blk_mq_request_started(struct request *rq)
-{
- return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
-}
-EXPORT_SYMBOL_GPL(blk_mq_request_started);
-
-int blk_mq_request_completed(struct request *rq)
-{
- return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
-}
-EXPORT_SYMBOL_GPL(blk_mq_request_completed);
-
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -1064,7 +1030,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
bool shared;
if (rq->tag != -1)
- goto done;
+ return true;
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
@@ -1079,7 +1045,6 @@ bool blk_mq_get_driver_tag(struct request *rq)
data.hctx->tags->rqs[rq->tag] = rq;
}
-done:
return rq->tag != -1;
}
@@ -1486,7 +1451,7 @@ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
-bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
int srcu_idx;
bool need_run;
@@ -1504,12 +1469,8 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
blk_mq_hctx_has_pending(hctx);
hctx_unlock(hctx, srcu_idx);
- if (need_run) {
+ if (need_run)
__blk_mq_delay_run_hw_queue(hctx, async, 0);
- return true;
- }
-
- return false;
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);
@@ -2789,6 +2750,23 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+ if (q->nr_hw_queues < set->nr_hw_queues) {
+ struct blk_mq_hw_ctx **new_hctxs;
+
+ new_hctxs = kcalloc_node(set->nr_hw_queues,
+ sizeof(*new_hctxs), GFP_KERNEL,
+ set->numa_node);
+ if (!new_hctxs)
+ return;
+ if (hctxs)
+ memcpy(new_hctxs, hctxs, q->nr_hw_queues *
+ sizeof(*hctxs));
+ q->queue_hw_ctx = new_hctxs;
+ q->nr_hw_queues = set->nr_hw_queues;
+ kfree(hctxs);
+ hctxs = new_hctxs;
+ }
+
/* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2844,19 +2822,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}
-/*
- * Maximum number of hardware queues we support. For single sets, we'll never
- * have more than the CPUs (software queues). For multiple sets, the tag_set
- * user may have set ->nr_hw_queues larger.
- */
-static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
-{
- if (set->nr_maps == 1)
- return nr_cpu_ids;
-
- return max(set->nr_hw_queues, nr_cpu_ids);
-}
-
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q,
bool elevator_init)
@@ -2876,12 +2841,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->nr_queues = nr_hw_queues(set);
- q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
- GFP_KERNEL, set->numa_node);
- if (!q->queue_hw_ctx)
- goto err_sys_init;
-
INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock);
@@ -2929,7 +2888,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
err_hctxs:
kfree(q->queue_hw_ctx);
q->nr_hw_queues = 0;
-err_sys_init:
blk_mq_sysfs_deinit(q);
err_poll:
blk_stat_free_callback(q->poll_cb);
@@ -3030,6 +2988,29 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
}
}
+static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
+ int cur_nr_hw_queues, int new_nr_hw_queues)
+{
+ struct blk_mq_tags **new_tags;
+
+ if (cur_nr_hw_queues >= new_nr_hw_queues)
+ return 0;
+
+ new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
+ GFP_KERNEL, set->numa_node);
+ if (!new_tags)
+ return -ENOMEM;
+
+ if (set->tags)
+ memcpy(new_tags, set->tags, cur_nr_hw_queues *
+ sizeof(*set->tags));
+ kfree(set->tags);
+ set->tags = new_tags;
+ set->nr_hw_queues = new_nr_hw_queues;
+
+ return 0;
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -3083,9 +3064,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
- GFP_KERNEL, set->numa_node);
- if (!set->tags)
+ if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
return -ENOMEM;
ret = -ENOMEM;
@@ -3126,7 +3105,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;
- for (i = 0; i < nr_hw_queues(set); i++)
+ for (i = 0; i < set->nr_hw_queues; i++)
blk_mq_free_map_and_requests(set, i);
for (j = 0; j < set->nr_maps; j++) {
@@ -3271,10 +3250,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
/*
- * Sync with blk_mq_queue_tag_busy_iter.
- */
- synchronize_rcu();
- /*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
* updating the new sw to hw queue mappings.
@@ -3288,6 +3263,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister(q);
}
+ if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
+ 0)
+ goto reregister;
+
prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
@@ -3304,6 +3283,7 @@ fallback:
blk_mq_map_swqueue(q);
}
+reregister:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 32c62c64e6c2..eaaca8fc1c28 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -128,15 +128,6 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_release(struct request_queue *q);
-/**
- * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
- * @rq: target request.
- */
-static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
-{
- return READ_ONCE(rq->state);
-}
-
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 457d9ba3eb20..6e7ec87d49fa 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -42,17 +42,13 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h)
static void trigger_softirq(void *data)
{
struct request *rq = data;
- unsigned long flags;
struct list_head *list;
- local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
list_add_tail(&rq->ipi_list, list);
if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
- local_irq_restore(flags);
}
/*
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 940f15d600f8..7da302ff88d0 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -53,7 +53,7 @@ void blk_stat_add(struct request *rq, u64 now)
struct request_queue *q = rq->q;
struct blk_stat_callback *cb;
struct blk_rq_stat *stat;
- int bucket;
+ int bucket, cpu;
u64 value;
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
@@ -61,6 +61,7 @@ void blk_stat_add(struct request *rq, u64 now)
blk_throtl_stat_add(rq, value);
rcu_read_lock();
+ cpu = get_cpu();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
if (!blk_stat_is_active(cb))
continue;
@@ -69,10 +70,10 @@ void blk_stat_add(struct request *rq, u64 now)
if (bucket < 0)
continue;
- stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
+ stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
blk_rq_stat_add(stat, value);
- put_cpu_ptr(cb->cpu_stat);
}
+ put_cpu();
rcu_read_unlock();
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 46f5198be017..fca9b158f4a0 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -801,10 +801,6 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dying(q)) {
- mutex_unlock(&q->sysfs_lock);
- return -ENOENT;
- }
res = entry->show(q, page);
mutex_unlock(&q->sysfs_lock);
return res;
@@ -823,10 +819,6 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dying(q)) {
- mutex_unlock(&q->sysfs_lock);
- return -ENOENT;
- }
res = entry->store(q, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 18f773e52dfb..98233c9c65a8 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -12,6 +12,7 @@
#include <linux/blktrace_api.h>
#include <linux/blk-cgroup.h>
#include "blk.h"
+#include "blk-cgroup-rwstat.h"
/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;
@@ -176,6 +177,9 @@ struct throtl_grp {
unsigned int bio_cnt; /* total bios */
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
unsigned long bio_cnt_reset_time;
+
+ struct blkg_rwstat stat_bytes;
+ struct blkg_rwstat stat_ios;
};
/* We measure latency for request size from <= 4k to >= 1M */
@@ -489,6 +493,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
if (!tg)
return NULL;
+ if (blkg_rwstat_init(&tg->stat_bytes, gfp))
+ goto err_free_tg;
+
+ if (blkg_rwstat_init(&tg->stat_ios, gfp))
+ goto err_exit_stat_bytes;
+
throtl_service_queue_init(&tg->service_queue);
for (rw = READ; rw <= WRITE; rw++) {
@@ -513,6 +523,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
return &tg->pd;
+
+err_exit_stat_bytes:
+ blkg_rwstat_exit(&tg->stat_bytes);
+err_free_tg:
+ kfree(tg);
+ return NULL;
}
static void throtl_pd_init(struct blkg_policy_data *pd)
@@ -611,6 +627,8 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
struct throtl_grp *tg = pd_to_tg(pd);
del_timer_sync(&tg->service_queue.pending_timer);
+ blkg_rwstat_exit(&tg->stat_bytes);
+ blkg_rwstat_exit(&tg->stat_ios);
kfree(tg);
}
@@ -1464,6 +1482,32 @@ static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
return tg_set_conf(of, buf, nbytes, off, false);
}
+static int tg_print_rwstat(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ blkg_prfill_rwstat, &blkcg_policy_throtl,
+ seq_cft(sf)->private, true);
+ return 0;
+}
+
+static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct blkg_rwstat_sample sum;
+
+ blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
+ &sum);
+ return __blkg_prfill_rwstat(sf, pd, &sum);
+}
+
+static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
+ seq_cft(sf)->private, true);
+ return 0;
+}
+
static struct cftype throtl_legacy_files[] = {
{
.name = "throttle.read_bps_device",
@@ -1491,23 +1535,23 @@ static struct cftype throtl_legacy_files[] = {
},
{
.name = "throttle.io_service_bytes",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_bytes,
+ .private = offsetof(struct throtl_grp, stat_bytes),
+ .seq_show = tg_print_rwstat,
},
{
.name = "throttle.io_service_bytes_recursive",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_bytes_recursive,
+ .private = offsetof(struct throtl_grp, stat_bytes),
+ .seq_show = tg_print_rwstat_recursive,
},
{
.name = "throttle.io_serviced",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_ios,
+ .private = offsetof(struct throtl_grp, stat_ios),
+ .seq_show = tg_print_rwstat,
},
{
.name = "throttle.io_serviced_recursive",
- .private = (unsigned long)&blkcg_policy_throtl,
- .seq_show = blkg_print_stat_ios_recursive,
+ .private = offsetof(struct throtl_grp, stat_ios),
+ .seq_show = tg_print_rwstat_recursive,
},
{ } /* terminate */
};
@@ -2127,7 +2171,16 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());
/* see throtl_charge_bio() */
- if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
+ if (bio_flagged(bio, BIO_THROTTLED))
+ goto out;
+
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
+ blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
+ bio->bi_iter.bi_size);
+ blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
+ }
+
+ if (!tg->has_rules[rw])
goto out;
spin_lock_irq(&q->queue_lock);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 4bc5f260248a..6fad6f3f6980 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -93,172 +93,85 @@ unsigned int blkdev_nr_zones(struct block_device *bdev)
if (!blk_queue_is_zoned(q))
return 0;
- return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
+ return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk));
}
EXPORT_SYMBOL_GPL(blkdev_nr_zones);
-/*
- * Check that a zone report belongs to this partition, and if yes, fix its start
- * sector and write pointer and return true. Return false otherwise.
- */
-static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
-{
- sector_t offset = get_start_sect(bdev);
-
- if (rep->start < offset)
- return false;
-
- rep->start -= offset;
- if (rep->start + rep->len > bdev->bd_part->nr_sects)
- return false;
-
- if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
- rep->wp = rep->start + rep->len;
- else
- rep->wp -= offset;
- return true;
-}
-
-static int blk_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
-{
- struct request_queue *q = disk->queue;
- unsigned int z = 0, n, nrz = *nr_zones;
- sector_t capacity = get_capacity(disk);
- int ret;
-
- while (z < nrz && sector < capacity) {
- n = nrz - z;
- ret = disk->fops->report_zones(disk, sector, &zones[z], &n);
- if (ret)
- return ret;
- if (!n)
- break;
- sector += blk_queue_zone_sectors(q) * n;
- z += n;
- }
-
- WARN_ON(z > *nr_zones);
- *nr_zones = z;
-
- return 0;
-}
-
/**
* blkdev_report_zones - Get zones information
* @bdev: Target block device
* @sector: Sector from which to report zones
- * @zones: Array of zone structures where to return the zones information
- * @nr_zones: Number of zone structures in the zone array
+ * @nr_zones: Maximum number of zones to report
+ * @cb: Callback function called for each reported zone
+ * @data: Private data for the callback
*
* Description:
- * Get zone information starting from the zone containing @sector.
- * The number of zone information reported may be less than the number
- * requested by @nr_zones. The number of zones actually reported is
- * returned in @nr_zones.
- * The caller must use memalloc_noXX_save/restore() calls to control
- * memory allocations done within this function (zone array and command
- * buffer allocation by the device driver).
+ * Get zone information starting from the zone containing @sector for at most
+ * @nr_zones, and call @cb for each zone reported by the device.
+ * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
+ * constant can be passed to @nr_zones.
+ * Returns the number of zones reported by the device, or a negative errno
+ * value in case of failure.
+ *
+ * Note: The caller must use memalloc_noXX_save/restore() calls to control
+ * memory allocations done within this function.
*/
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
- struct request_queue *q = bdev_get_queue(bdev);
- unsigned int i, nrz;
- int ret;
-
- if (!blk_queue_is_zoned(q))
- return -EOPNOTSUPP;
+ struct gendisk *disk = bdev->bd_disk;
+ sector_t capacity = get_capacity(disk);
- /*
- * A block device that advertized itself as zoned must have a
- * report_zones method. If it does not have one defined, the device
- * driver has a bug. So warn about that.
- */
- if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
+ if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
+ WARN_ON_ONCE(!disk->fops->report_zones))
return -EOPNOTSUPP;
- if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
- *nr_zones = 0;
+ if (!nr_zones || sector >= capacity)
return 0;
- }
-
- nrz = min(*nr_zones,
- __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
- ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
- zones, &nrz);
- if (ret)
- return ret;
-
- for (i = 0; i < nrz; i++) {
- if (!blkdev_report_zone(bdev, zones))
- break;
- zones++;
- }
- *nr_zones = i;
-
- return 0;
+ return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
}
EXPORT_SYMBOL_GPL(blkdev_report_zones);
-/*
- * Special case of zone reset operation to reset all zones in one command,
- * useful for applications like mkfs.
- */
-static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
-{
- struct bio *bio = bio_alloc(gfp_mask, 0);
- int ret;
-
- /* across the zones operations, don't need any sectors */
- bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0);
-
- ret = submit_bio_wait(bio);
- bio_put(bio);
-
- return ret;
-}
-
static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
+ sector_t sector,
sector_t nr_sectors)
{
if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
return false;
- if (nr_sectors != part_nr_sects_read(bdev->bd_part))
- return false;
/*
- * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
- * the entire disk, that is, if the blocks device start offset is 0 and
- * its capacity is the same as the entire disk.
+ * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
+ * of the applicable zone range is the entire disk.
*/
- return get_start_sect(bdev) == 0 &&
- part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
+ return !sector && nr_sectors == get_capacity(bdev->bd_disk);
}
/**
- * blkdev_reset_zones - Reset zones write pointer
+ * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
* @bdev: Target block device
- * @sector: Start sector of the first zone to reset
- * @nr_sectors: Number of sectors, at least the length of one zone
+ * @op: Operation to be performed on the zones
+ * @sector: Start sector of the first zone to operate on
+ * @nr_sectors: Number of sectors, should be at least the length of one zone and
+ * must be zone size aligned.
* @gfp_mask: Memory allocation flags (for bio_alloc)
*
* Description:
- * Reset the write pointer of the zones contained in the range
+ * Perform the specified operation on the range of zones specified by
* @sector..@sector+@nr_sectors. Specifying the entire disk sector range
* is valid, but the specified range should not contain conventional zones.
+ * The operation to execute on each zone can be a zone reset, open, close
+ * or finish request.
*/
-int blkdev_reset_zones(struct block_device *bdev,
- sector_t sector, sector_t nr_sectors,
- gfp_t gfp_mask)
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+ sector_t sector, sector_t nr_sectors,
+ gfp_t gfp_mask)
{
struct request_queue *q = bdev_get_queue(bdev);
- sector_t zone_sectors;
+ sector_t zone_sectors = blk_queue_zone_sectors(q);
+ sector_t capacity = get_capacity(bdev->bd_disk);
sector_t end_sector = sector + nr_sectors;
struct bio *bio = NULL;
- struct blk_plug plug;
int ret;
if (!blk_queue_is_zoned(q))
@@ -267,45 +180,62 @@ int blkdev_reset_zones(struct block_device *bdev,
if (bdev_read_only(bdev))
return -EPERM;
- if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
+ if (!op_is_zone_mgmt(op))
+ return -EOPNOTSUPP;
+
+ if (!nr_sectors || end_sector > capacity)
/* Out of range */
return -EINVAL;
- if (blkdev_allow_reset_all_zones(bdev, nr_sectors))
- return __blkdev_reset_all_zones(bdev, gfp_mask);
-
/* Check alignment (handle eventual smaller last zone) */
- zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1))
return -EINVAL;
- if ((nr_sectors & (zone_sectors - 1)) &&
- end_sector != bdev->bd_part->nr_sects)
+ if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
return -EINVAL;
- blk_start_plug(&plug);
while (sector < end_sector) {
-
bio = blk_next_bio(bio, 0, gfp_mask);
- bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
+ /*
+ * Special case for the zone reset operation that reset all
+ * zones, this is useful for applications like mkfs.
+ */
+ if (op == REQ_OP_ZONE_RESET &&
+ blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
+ bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
+ break;
+ }
+
+ bio->bi_opf = op;
+ bio->bi_iter.bi_sector = sector;
sector += zone_sectors;
/* This may take a while, so be nice to others */
cond_resched();
-
}
ret = submit_bio_wait(bio);
bio_put(bio);
- blk_finish_plug(&plug);
-
return ret;
}
-EXPORT_SYMBOL_GPL(blkdev_reset_zones);
+EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
+
+struct zone_report_args {
+ struct blk_zone __user *zones;
+};
+
+static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zone_report_args *args = data;
+
+ if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
+ return -EFAULT;
+ return 0;
+}
/*
* BLKREPORTZONE ioctl processing.
@@ -315,9 +245,9 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
+ struct zone_report_args args;
struct request_queue *q;
struct blk_zone_report rep;
- struct blk_zone *zones;
int ret;
if (!argp)
@@ -339,44 +269,29 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (!rep.nr_zones)
return -EINVAL;
- rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
-
- zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
- GFP_KERNEL | __GFP_ZERO);
- if (!zones)
- return -ENOMEM;
-
- ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
- if (ret)
- goto out;
-
- if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
- ret = -EFAULT;
- goto out;
- }
-
- if (rep.nr_zones) {
- if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
- sizeof(struct blk_zone) * rep.nr_zones))
- ret = -EFAULT;
- }
-
- out:
- kvfree(zones);
+ args.zones = argp + sizeof(struct blk_zone_report);
+ ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
+ blkdev_copy_zone_to_user, &args);
+ if (ret < 0)
+ return ret;
- return ret;
+ rep.nr_zones = ret;
+ if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
+ return -EFAULT;
+ return 0;
}
/*
- * BLKRESETZONE ioctl processing.
+ * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
* Called from blkdev_ioctl.
*/
-int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct request_queue *q;
struct blk_zone_range zrange;
+ enum req_opf op;
if (!argp)
return -EINVAL;
@@ -397,8 +312,25 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
return -EFAULT;
- return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
- GFP_KERNEL);
+ switch (cmd) {
+ case BLKRESETZONE:
+ op = REQ_OP_ZONE_RESET;
+ break;
+ case BLKOPENZONE:
+ op = REQ_OP_ZONE_OPEN;
+ break;
+ case BLKCLOSEZONE:
+ op = REQ_OP_ZONE_CLOSE;
+ break;
+ case BLKFINISHZONE:
+ op = REQ_OP_ZONE_FINISH;
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
+ GFP_KERNEL);
}
static inline unsigned long *blk_alloc_zone_bitmap(int node,
@@ -408,37 +340,99 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node,
GFP_NOIO, node);
}
+void blk_queue_free_zone_bitmaps(struct request_queue *q)
+{
+ kfree(q->seq_zones_bitmap);
+ q->seq_zones_bitmap = NULL;
+ kfree(q->seq_zones_wlock);
+ q->seq_zones_wlock = NULL;
+}
+
+struct blk_revalidate_zone_args {
+ struct gendisk *disk;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+ sector_t sector;
+};
+
/*
- * Allocate an array of struct blk_zone to get nr_zones zone information.
- * The allocated array may be smaller than nr_zones.
+ * Helper function to check the validity of zones of a zoned block device.
*/
-static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
+static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
{
- struct blk_zone *zones;
- size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
+ struct blk_revalidate_zone_args *args = data;
+ struct gendisk *disk = args->disk;
+ struct request_queue *q = disk->queue;
+ sector_t zone_sectors = blk_queue_zone_sectors(q);
+ sector_t capacity = get_capacity(disk);
/*
- * GFP_KERNEL here is meaningless as the caller task context has
- * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
- * with memalloc_noio_save().
+ * All zones must have the same size, with the exception on an eventual
+ * smaller last zone.
*/
- zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
- if (!zones) {
- *nr_zones = 0;
- return NULL;
+ if (zone->start + zone_sectors < capacity &&
+ zone->len != zone_sectors) {
+ pr_warn("%s: Invalid zoned device with non constant zone size\n",
+ disk->disk_name);
+ return false;
}
- *nr_zones = nrz;
+ if (zone->start + zone->len >= capacity &&
+ zone->len > zone_sectors) {
+ pr_warn("%s: Invalid zoned device with larger last zone size\n",
+ disk->disk_name);
+ return -ENODEV;
+ }
+
+ /* Check for holes in the zone report */
+ if (zone->start != args->sector) {
+ pr_warn("%s: Zone gap at sectors %llu..%llu\n",
+ disk->disk_name, args->sector, zone->start);
+ return -ENODEV;
+ }
- return zones;
+ /* Check zone type */
+ switch (zone->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ break;
+ default:
+ pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
+ disk->disk_name, (int)zone->type, zone->start);
+ return -ENODEV;
+ }
+
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(idx, args->seq_zones_bitmap);
+
+ args->sector += zone->len;
+ return 0;
}
-void blk_queue_free_zone_bitmaps(struct request_queue *q)
+static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones,
+ struct blk_revalidate_zone_args *args)
{
- kfree(q->seq_zones_bitmap);
- q->seq_zones_bitmap = NULL;
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
+ /*
+ * Ensure that all memory allocations in this context are done as
+ * if GFP_NOIO was specified.
+ */
+ unsigned int noio_flag = memalloc_noio_save();
+ struct request_queue *q = disk->queue;
+ int ret;
+
+ args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!args->seq_zones_wlock)
+ return -ENOMEM;
+ args->seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!args->seq_zones_bitmap)
+ return -ENOMEM;
+
+ ret = disk->fops->report_zones(disk, 0, nr_zones,
+ blk_revalidate_zone_cb, args);
+ memalloc_noio_restore(noio_flag);
+ return ret;
}
/**
@@ -454,13 +448,12 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
- unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
- unsigned int i, rep_nr_zones = 0, z = 0, nrz;
- struct blk_zone *zones = NULL;
- unsigned int noio_flag;
- sector_t sector = 0;
+ struct blk_revalidate_zone_args args = { .disk = disk };
int ret = 0;
+ if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
+ return -EIO;
+
/*
* BIO based queues do not use a scheduler so only q->nr_zones
* needs to be updated so that the sysfs exposed value is correct.
@@ -470,78 +463,28 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
return 0;
}
- /*
- * Ensure that all memory allocations in this context are done as
- * if GFP_NOIO was specified.
- */
- noio_flag = memalloc_noio_save();
-
- if (!blk_queue_is_zoned(q) || !nr_zones) {
- nr_zones = 0;
- goto update;
- }
-
- /* Allocate bitmaps */
- ret = -ENOMEM;
- seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
- if (!seq_zones_wlock)
- goto out;
- seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
- if (!seq_zones_bitmap)
- goto out;
-
- /* Get zone information and initialize seq_zones_bitmap */
- rep_nr_zones = nr_zones;
- zones = blk_alloc_zones(&rep_nr_zones);
- if (!zones)
- goto out;
-
- while (z < nr_zones) {
- nrz = min(nr_zones - z, rep_nr_zones);
- ret = blk_report_zones(disk, sector, zones, &nrz);
- if (ret)
- goto out;
- if (!nrz)
- break;
- for (i = 0; i < nrz; i++) {
- if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
- set_bit(z, seq_zones_bitmap);
- z++;
- }
- sector += nrz * blk_queue_zone_sectors(q);
- }
-
- if (WARN_ON(z != nr_zones)) {
- ret = -EIO;
- goto out;
- }
+ if (nr_zones)
+ ret = blk_update_zone_info(disk, nr_zones, &args);
-update:
/*
* Install the new bitmaps, making sure the queue is stopped and
* all I/Os are completed (i.e. a scheduler is not referencing the
* bitmaps).
*/
blk_mq_freeze_queue(q);
- q->nr_zones = nr_zones;
- swap(q->seq_zones_wlock, seq_zones_wlock);
- swap(q->seq_zones_bitmap, seq_zones_bitmap);
- blk_mq_unfreeze_queue(q);
-
-out:
- memalloc_noio_restore(noio_flag);
-
- kvfree(zones);
- kfree(seq_zones_wlock);
- kfree(seq_zones_bitmap);
-
- if (ret) {
+ if (ret >= 0) {
+ q->nr_zones = nr_zones;
+ swap(q->seq_zones_wlock, args.seq_zones_wlock);
+ swap(q->seq_zones_bitmap, args.seq_zones_bitmap);
+ ret = 0;
+ } else {
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
- blk_mq_freeze_queue(q);
blk_queue_free_zone_bitmaps(q);
- blk_mq_unfreeze_queue(q);
}
+ blk_mq_unfreeze_queue(q);
+ kfree(args.seq_zones_wlock);
+ kfree(args.seq_zones_bitmap);
return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
diff --git a/block/blk.h b/block/blk.h
index 47fba9362e60..2bea40180b6f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -242,14 +242,11 @@ int blk_dev_init(void);
* Contribute to IO statistics IFF:
*
* a) it's attached to a gendisk, and
- * b) the queue had IO stats enabled when this request was started, and
- * c) it's a file system request
+ * b) the queue had IO stats enabled when this request was started
*/
static inline bool blk_do_io_stat(struct request *rq)
{
- return rq->rq_disk &&
- (rq->rq_flags & RQF_IO_STAT) &&
- !blk_rq_is_passthrough(rq);
+ return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
diff --git a/block/elevator.c b/block/elevator.c
index 076ba7308e65..4eab3d70e880 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -832,3 +832,12 @@ struct request *elv_rb_latter_request(struct request_queue *q,
return NULL;
}
EXPORT_SYMBOL(elv_rb_latter_request);
+
+static int __init elevator_setup(char *str)
+{
+ pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
+ "Please use sysfs to set IO scheduler for individual devices.\n");
+ return 1;
+}
+
+__setup("elevator=", elevator_setup);
diff --git a/block/genhd.c b/block/genhd.c
index 26b31fcae217..ff6268970ddc 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1385,7 +1385,9 @@ static int diskstats_show(struct seq_file *seqf, void *v)
"%lu %lu %lu %u "
"%lu %lu %lu %u "
"%u %u %u "
- "%lu %lu %lu %u\n",
+ "%lu %lu %lu %u "
+ "%lu %u"
+ "\n",
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
disk_name(gp, hd->partno, buf),
part_stat_read(hd, ios[STAT_READ]),
@@ -1402,7 +1404,9 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, ios[STAT_DISCARD]),
part_stat_read(hd, merges[STAT_DISCARD]),
part_stat_read(hd, sectors[STAT_DISCARD]),
- (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
+ (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD),
+ part_stat_read(hd, ios[STAT_FLUSH]),
+ (unsigned int)part_stat_read_msecs(hd, STAT_FLUSH)
);
}
disk_part_iter_exit(&piter);
diff --git a/block/ioctl.c b/block/ioctl.c
index 15a0eb80ada9..7ac8a66c9787 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -155,48 +155,21 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
}
}
-/*
- * This is an exported API for the block driver, and will not
- * acquire bd_mutex. This API should be used in case that
- * caller has held bd_mutex already.
- */
-int __blkdev_reread_part(struct block_device *bdev)
+static int blkdev_reread_part(struct block_device *bdev)
{
- struct gendisk *disk = bdev->bd_disk;
+ int ret;
- if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
+ if (!disk_part_scan_enabled(bdev->bd_disk) || bdev != bdev->bd_contains)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- lockdep_assert_held(&bdev->bd_mutex);
-
- return rescan_partitions(disk, bdev);
-}
-EXPORT_SYMBOL(__blkdev_reread_part);
-
-/*
- * This is an exported API for the block driver, and will
- * try to acquire bd_mutex. If bd_mutex has been held already
- * in current context, please call __blkdev_reread_part().
- *
- * Make sure the held locks in current context aren't required
- * in open()/close() handler and I/O path for avoiding ABBA deadlock:
- * - bd_mutex is held before calling block driver's open/close
- * handler
- * - reading partition table may submit I/O to the block device
- */
-int blkdev_reread_part(struct block_device *bdev)
-{
- int res;
-
mutex_lock(&bdev->bd_mutex);
- res = __blkdev_reread_part(bdev);
+ ret = bdev_disk_changed(bdev, false);
mutex_unlock(&bdev->bd_mutex);
- return res;
+ return ret;
}
-EXPORT_SYMBOL(blkdev_reread_part);
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
unsigned long arg, unsigned long flags)
@@ -532,7 +505,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKREPORTZONE:
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
case BLKRESETZONE:
- return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
+ case BLKOPENZONE:
+ case BLKCLOSEZONE:
+ case BLKFINISHZONE:
+ return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
case BLKGETZONESZ:
return put_uint(arg, bdev_zone_sectors(bdev));
case BLKGETNRZONES:
diff --git a/block/opal_proto.h b/block/opal_proto.h
index 5532412d567c..325cbba2465f 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -76,7 +76,6 @@ enum opal_response_token {
* Derived from: TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Section: 6.3 Assigned UIDs
*/
-#define OPAL_UID_LENGTH 8
#define OPAL_METHOD_LENGTH 8
#define OPAL_MSID_KEYLEN 15
#define OPAL_UID_LENGTH_HALF 4
@@ -108,6 +107,7 @@ enum opal_uid {
OPAL_C_PIN_TABLE,
OPAL_LOCKING_INFO_TABLE,
OPAL_ENTERPRISE_LOCKING_INFO_TABLE,
+ OPAL_DATASTORE,
/* C_PIN_TABLE object ID's */
OPAL_C_PIN_MSID,
OPAL_C_PIN_SID,
@@ -205,6 +205,10 @@ enum opal_lockingstate {
OPAL_LOCKING_LOCKED = 0x03,
};
+enum opal_parameter {
+ OPAL_SUM_SET_LIST = 0x060000,
+};
+
/* Packets derived from:
* TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Secion: 3.2.3 ComPackets, Packets & Subpackets
diff --git a/block/partition-generic.c b/block/partition-generic.c
index aee643ce13d1..1d20c9cf213f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -127,7 +127,8 @@ ssize_t part_stat_show(struct device *dev,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
"%8u %8u %8u "
- "%8lu %8lu %8llu %8u"
+ "%8lu %8lu %8llu %8u "
+ "%8lu %8u"
"\n",
part_stat_read(p, ios[STAT_READ]),
part_stat_read(p, merges[STAT_READ]),
@@ -143,7 +144,9 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, ios[STAT_DISCARD]),
part_stat_read(p, merges[STAT_DISCARD]),
(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
- (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
+ (unsigned int)part_stat_read_msecs(p, STAT_DISCARD),
+ part_stat_read(p, ios[STAT_FLUSH]),
+ (unsigned int)part_stat_read_msecs(p, STAT_FLUSH));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
@@ -439,12 +442,14 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
}
}
-static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
+int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
{
struct disk_part_iter piter;
struct hd_struct *part;
int res;
+ if (!disk_part_scan_enabled(disk))
+ return 0;
if (bdev->bd_part_count || bdev->bd_super)
return -EBUSY;
res = invalidate_partition(disk, 0);
@@ -459,204 +464,124 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
return 0;
}
-static bool part_zone_aligned(struct gendisk *disk,
- struct block_device *bdev,
- sector_t from, sector_t size)
+static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
+ struct parsed_partitions *state, int p)
{
- unsigned int zone_sectors = bdev_zone_sectors(bdev);
+ sector_t size = state->parts[p].size;
+ sector_t from = state->parts[p].from;
+ struct hd_struct *part;
- /*
- * If this function is called, then the disk is a zoned block device
- * (host-aware or host-managed). This can be detected even if the
- * zoned block device support is disabled (CONFIG_BLK_DEV_ZONED not
- * set). In this case, however, only host-aware devices will be seen
- * as a block device is not created for host-managed devices. Without
- * zoned block device support, host-aware drives can still be used as
- * regular block devices (no zone operation) and their zone size will
- * be reported as 0. Allow this case.
- */
- if (!zone_sectors)
+ if (!size)
return true;
- /*
- * Check partition start and size alignement. If the drive has a
- * smaller last runt zone, ignore it and allow the partition to
- * use it. Check the zone size too: it should be a power of 2 number
- * of sectors.
- */
- if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
- u32 rem;
-
- div_u64_rem(from, zone_sectors, &rem);
- if (rem)
+ if (from >= get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d start %llu is beyond EOD, ",
+ disk->disk_name, p, (unsigned long long) from);
+ if (disk_unlock_native_capacity(disk))
return false;
- if ((from + size) < get_capacity(disk)) {
- div_u64_rem(size, zone_sectors, &rem);
- if (rem)
- return false;
- }
+ return true;
+ }
- } else {
+ if (from + size > get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d size %llu extends beyond EOD, ",
+ disk->disk_name, p, (unsigned long long) size);
- if (from & (zone_sectors - 1))
- return false;
- if ((from + size) < get_capacity(disk) &&
- (size & (zone_sectors - 1)))
+ if (disk_unlock_native_capacity(disk))
return false;
+ /*
+ * We can not ignore partitions of broken tables created by for
+ * example camera firmware, but we limit them to the end of the
+ * disk to avoid creating invalid block devices.
+ */
+ size = get_capacity(disk) - from;
+ }
+
+ part = add_partition(disk, p, from, size, state->parts[p].flags,
+ &state->parts[p].info);
+ if (IS_ERR(part)) {
+ printk(KERN_ERR " %s: p%d could not be added: %ld\n",
+ disk->disk_name, p, -PTR_ERR(part));
+ return true;
}
+#ifdef CONFIG_BLK_DEV_MD
+ if (state->parts[p].flags & ADDPART_FLAG_RAID)
+ md_autodetect_dev(part_to_dev(part)->devt);
+#endif
return true;
}
-int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
{
- struct parsed_partitions *state = NULL;
- struct hd_struct *part;
- int p, highest, res;
-rescan:
- if (state && !IS_ERR(state)) {
- free_partitions(state);
- state = NULL;
- }
+ struct parsed_partitions *state;
+ int ret = -EAGAIN, p, highest;
- res = drop_partitions(disk, bdev);
- if (res)
- return res;
+ if (!disk_part_scan_enabled(disk))
+ return 0;
- if (disk->fops->revalidate_disk)
- disk->fops->revalidate_disk(disk);
- check_disk_size_change(disk, bdev, true);
- bdev->bd_invalidated = 0;
- if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+ state = check_partition(disk, bdev);
+ if (!state)
return 0;
if (IS_ERR(state)) {
/*
- * I/O error reading the partition table. If any
- * partition code tried to read beyond EOD, retry
- * after unlocking native capacity.
+ * I/O error reading the partition table. If we tried to read
+ * beyond EOD, retry after unlocking the native capacity.
*/
if (PTR_ERR(state) == -ENOSPC) {
printk(KERN_WARNING "%s: partition table beyond EOD, ",
disk->disk_name);
if (disk_unlock_native_capacity(disk))
- goto rescan;
+ return -EAGAIN;
}
return -EIO;
}
+
/*
- * If any partition code tried to read beyond EOD, try
- * unlocking native capacity even if partition table is
- * successfully read as we could be missing some partitions.
+ * Partitions are not supported on zoned block devices.
+ */
+ if (bdev_is_zoned(bdev)) {
+ pr_warn("%s: ignoring partition table on zoned block device\n",
+ disk->disk_name);
+ ret = 0;
+ goto out_free_state;
+ }
+
+ /*
+ * If we read beyond EOD, try unlocking native capacity even if the
+ * partition table was successfully read as we could be missing some
+ * partitions.
*/
if (state->access_beyond_eod) {
printk(KERN_WARNING
"%s: partition table partially beyond EOD, ",
disk->disk_name);
if (disk_unlock_native_capacity(disk))
- goto rescan;
+ goto out_free_state;
}
/* tell userspace that the media / partition table may have changed */
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
- /* Detect the highest partition number and preallocate
- * disk->part_tbl. This is an optimization and not strictly
- * necessary.
+ /*
+ * Detect the highest partition number and preallocate disk->part_tbl.
+ * This is an optimization and not strictly necessary.
*/
for (p = 1, highest = 0; p < state->limit; p++)
if (state->parts[p].size)
highest = p;
-
disk_expand_part_tbl(disk, highest);
- /* add partitions */
- for (p = 1; p < state->limit; p++) {
- sector_t size, from;
-
- size = state->parts[p].size;
- if (!size)
- continue;
-
- from = state->parts[p].from;
- if (from >= get_capacity(disk)) {
- printk(KERN_WARNING
- "%s: p%d start %llu is beyond EOD, ",
- disk->disk_name, p, (unsigned long long) from);
- if (disk_unlock_native_capacity(disk))
- goto rescan;
- continue;
- }
-
- if (from + size > get_capacity(disk)) {
- printk(KERN_WARNING
- "%s: p%d size %llu extends beyond EOD, ",
- disk->disk_name, p, (unsigned long long) size);
-
- if (disk_unlock_native_capacity(disk)) {
- /* free state and restart */
- goto rescan;
- } else {
- /*
- * we can not ignore partitions of broken tables
- * created by for example camera firmware, but
- * we limit them to the end of the disk to avoid
- * creating invalid block devices
- */
- size = get_capacity(disk) - from;
- }
- }
-
- /*
- * On a zoned block device, partitions should be aligned on the
- * device zone size (i.e. zone boundary crossing not allowed).
- * Otherwise, resetting the write pointer of the last zone of
- * one partition may impact the following partition.
- */
- if (bdev_is_zoned(bdev) &&
- !part_zone_aligned(disk, bdev, from, size)) {
- printk(KERN_WARNING
- "%s: p%d start %llu+%llu is not zone aligned\n",
- disk->disk_name, p, (unsigned long long) from,
- (unsigned long long) size);
- continue;
- }
+ for (p = 1; p < state->limit; p++)
+ if (!blk_add_partition(disk, bdev, state, p))
+ goto out_free_state;
- part = add_partition(disk, p, from, size,
- state->parts[p].flags,
- &state->parts[p].info);
- if (IS_ERR(part)) {
- printk(KERN_ERR " %s: p%d could not be added: %ld\n",
- disk->disk_name, p, -PTR_ERR(part));
- continue;
- }
-#ifdef CONFIG_BLK_DEV_MD
- if (state->parts[p].flags & ADDPART_FLAG_RAID)
- md_autodetect_dev(part_to_dev(part)->devt);
-#endif
- }
+ ret = 0;
+out_free_state:
free_partitions(state);
- return 0;
-}
-
-int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
-{
- int res;
-
- if (!bdev->bd_invalidated)
- return 0;
-
- res = drop_partitions(disk, bdev);
- if (res)
- return res;
-
- set_capacity(disk, 0);
- check_disk_size_change(disk, bdev, false);
- bdev->bd_invalidated = 0;
- /* tell userspace that the media / partition table may have changed */
- kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
-
- return 0;
+ return ret;
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
diff --git a/block/sed-opal.c b/block/sed-opal.c
index b4c761973ac1..880cc57a5f6b 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -149,6 +149,8 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
{ 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x01 },
[OPAL_ENTERPRISE_LOCKING_INFO_TABLE] =
{ 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00 },
+ [OPAL_DATASTORE] =
+ { 0x00, 0x00, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00 },
/* C_PIN_TABLE object ID's */
[OPAL_C_PIN_MSID] =
@@ -1139,11 +1141,11 @@ static int generic_get_column(struct opal_dev *dev, const u8 *table,
*
* the result is provided in dev->resp->tok[4]
*/
-static int generic_get_table_info(struct opal_dev *dev, enum opal_uid table,
+static int generic_get_table_info(struct opal_dev *dev, const u8 *table_uid,
u64 column)
{
u8 uid[OPAL_UID_LENGTH];
- const unsigned int half = OPAL_UID_LENGTH/2;
+ const unsigned int half = OPAL_UID_LENGTH_HALF;
/* sed-opal UIDs can be split in two halves:
* first: actual table index
@@ -1152,7 +1154,7 @@ static int generic_get_table_info(struct opal_dev *dev, enum opal_uid table,
* first part of the target table as relative index into that table
*/
memcpy(uid, opaluid[OPAL_TABLE_TABLE], half);
- memcpy(uid+half, opaluid[table], half);
+ memcpy(uid + half, table_uid, half);
return generic_get_column(dev, uid, column);
}
@@ -1221,6 +1223,75 @@ static int get_active_key(struct opal_dev *dev, void *data)
return get_active_key_cont(dev);
}
+static int generic_table_write_data(struct opal_dev *dev, const u64 data,
+ u64 offset, u64 size, const u8 *uid)
+{
+ const u8 __user *src = (u8 __user *)(uintptr_t)data;
+ u8 *dst;
+ u64 len;
+ size_t off = 0;
+ int err;
+
+ /* do we fit in the available space? */
+ err = generic_get_table_info(dev, uid, OPAL_TABLE_ROWS);
+ if (err) {
+ pr_debug("Couldn't get the table size\n");
+ return err;
+ }
+
+ len = response_get_u64(&dev->parsed, 4);
+ if (size > len || offset > len - size) {
+ pr_debug("Does not fit in the table (%llu vs. %llu)\n",
+ offset + size, len);
+ return -ENOSPC;
+ }
+
+ /* do the actual transmission(s) */
+ while (off < size) {
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_WHERE);
+ add_token_u64(&err, dev, offset + off);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_VALUES);
+
+ /*
+ * The bytestring header is either 1 or 2 bytes, so assume 2.
+ * There also needs to be enough space to accommodate the
+ * trailing OPAL_ENDNAME (1 byte) and tokens added by
+ * cmd_finalize.
+ */
+ len = min(remaining_size(dev) - (2+1+CMD_FINALIZE_BYTES_NEEDED),
+ (size_t)(size - off));
+ pr_debug("Write bytes %zu+%llu/%llu\n", off, len, size);
+
+ dst = add_bytestring_header(&err, dev, len);
+ if (!dst)
+ break;
+
+ if (copy_from_user(dst, src + off, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ dev->pos += len;
+
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+ if (err)
+ break;
+
+ err = finalize_and_send(dev, parse_and_check_status);
+ if (err)
+ break;
+
+ off += len;
+ }
+
+ return err;
+}
+
static int generic_lr_enable_disable(struct opal_dev *dev,
u8 *uid, bool rle, bool wle,
bool rl, bool wl)
@@ -1583,68 +1654,9 @@ static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
static int write_shadow_mbr(struct opal_dev *dev, void *data)
{
struct opal_shadow_mbr *shadow = data;
- const u8 __user *src;
- u8 *dst;
- size_t off = 0;
- u64 len;
- int err = 0;
-
- /* do we fit in the available shadow mbr space? */
- err = generic_get_table_info(dev, OPAL_MBR, OPAL_TABLE_ROWS);
- if (err) {
- pr_debug("MBR: could not get shadow size\n");
- return err;
- }
-
- len = response_get_u64(&dev->parsed, 4);
- if (shadow->size > len || shadow->offset > len - shadow->size) {
- pr_debug("MBR: does not fit in shadow (%llu vs. %llu)\n",
- shadow->offset + shadow->size, len);
- return -ENOSPC;
- }
-
- /* do the actual transmission(s) */
- src = (u8 __user *)(uintptr_t)shadow->data;
- while (off < shadow->size) {
- err = cmd_start(dev, opaluid[OPAL_MBR], opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, OPAL_WHERE);
- add_token_u64(&err, dev, shadow->offset + off);
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, OPAL_VALUES);
-
- /*
- * The bytestring header is either 1 or 2 bytes, so assume 2.
- * There also needs to be enough space to accommodate the
- * trailing OPAL_ENDNAME (1 byte) and tokens added by
- * cmd_finalize.
- */
- len = min(remaining_size(dev) - (2+1+CMD_FINALIZE_BYTES_NEEDED),
- (size_t)(shadow->size - off));
- pr_debug("MBR: write bytes %zu+%llu/%llu\n",
- off, len, shadow->size);
-
- dst = add_bytestring_header(&err, dev, len);
- if (!dst)
- break;
- if (copy_from_user(dst, src + off, len))
- err = -EFAULT;
- dev->pos += len;
-
- add_token_u8(&err, dev, OPAL_ENDNAME);
- if (err)
- break;
-
- err = finalize_and_send(dev, parse_and_check_status);
- if (err)
- break;
-
- off += len;
- }
- return err;
+ return generic_table_write_data(dev, shadow->data, shadow->offset,
+ shadow->size, opaluid[OPAL_MBR]);
}
static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid,
@@ -1874,7 +1886,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
{
struct opal_lr_act *opal_act = data;
u8 user_lr[OPAL_UID_LENGTH];
- u8 uint_3 = 0x83;
int err, i;
err = cmd_start(dev, opaluid[OPAL_LOCKINGSP_UID],
@@ -1887,10 +1898,7 @@ static int activate_lsp(struct opal_dev *dev, void *data)
return err;
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, uint_3);
- add_token_u8(&err, dev, 6);
- add_token_u8(&err, dev, 0);
- add_token_u8(&err, dev, 0);
+ add_token_u64(&err, dev, OPAL_SUM_SET_LIST);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_bytestring(&err, dev, user_lr, OPAL_UID_LENGTH);
@@ -1957,6 +1965,113 @@ static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
return 0;
}
+static int write_table_data(struct opal_dev *dev, void *data)
+{
+ struct opal_read_write_table *write_tbl = data;
+
+ return generic_table_write_data(dev, write_tbl->data, write_tbl->offset,
+ write_tbl->size, write_tbl->table_uid);
+}
+
+static int read_table_data_cont(struct opal_dev *dev)
+{
+ int err;
+ const char *data_read;
+
+ err = parse_and_check_status(dev);
+ if (err)
+ return err;
+
+ dev->prev_d_len = response_get_string(&dev->parsed, 1, &data_read);
+ dev->prev_data = (void *)data_read;
+ if (!dev->prev_data) {
+ pr_debug("%s: Couldn't read data from the table.\n", __func__);
+ return OPAL_INVAL_PARAM;
+ }
+
+ return 0;
+}
+
+/*
+ * IO_BUFFER_LENGTH = 2048
+ * sizeof(header) = 56
+ * No. of Token Bytes in the Response = 11
+ * MAX size of data that can be carried in response buffer
+ * at a time is : 2048 - (56 + 11) = 1981 = 0x7BD.
+ */
+#define OPAL_MAX_READ_TABLE (0x7BD)
+
+static int read_table_data(struct opal_dev *dev, void *data)
+{
+ struct opal_read_write_table *read_tbl = data;
+ int err;
+ size_t off = 0, max_read_size = OPAL_MAX_READ_TABLE;
+ u64 table_len, len;
+ u64 offset = read_tbl->offset, read_size = read_tbl->size - 1;
+ u8 __user *dst;
+
+ err = generic_get_table_info(dev, read_tbl->table_uid, OPAL_TABLE_ROWS);
+ if (err) {
+ pr_debug("Couldn't get the table size\n");
+ return err;
+ }
+
+ table_len = response_get_u64(&dev->parsed, 4);
+
+ /* Check if the user is trying to read from the table limits */
+ if (read_size > table_len || offset > table_len - read_size) {
+ pr_debug("Read size exceeds the Table size limits (%llu vs. %llu)\n",
+ offset + read_size, table_len);
+ return -EINVAL;
+ }
+
+ while (off < read_size) {
+ err = cmd_start(dev, read_tbl->table_uid, opalmethod[OPAL_GET]);
+
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_STARTROW);
+ add_token_u64(&err, dev, offset + off); /* start row value */
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_ENDROW);
+
+ len = min(max_read_size, (size_t)(read_size - off));
+ add_token_u64(&err, dev, offset + off + len); /* end row value
+ */
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+ add_token_u8(&err, dev, OPAL_ENDLIST);
+
+ if (err) {
+ pr_debug("Error building read table data command.\n");
+ break;
+ }
+
+ err = finalize_and_send(dev, read_table_data_cont);
+ if (err)
+ break;
+
+ /* len+1: This includes the NULL terminator at the end*/
+ if (dev->prev_d_len > len + 1) {
+ err = -EOVERFLOW;
+ break;
+ }
+
+ dst = (u8 __user *)(uintptr_t)read_tbl->data;
+ if (copy_to_user(dst + off, dev->prev_data, dev->prev_d_len)) {
+ pr_debug("Error copying data to userspace\n");
+ err = -EFAULT;
+ break;
+ }
+ dev->prev_data = NULL;
+
+ off += len;
+ }
+
+ return err;
+}
+
static int end_opal_session(struct opal_dev *dev, void *data)
{
int err = 0;
@@ -2443,6 +2558,68 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
}
EXPORT_SYMBOL(opal_unlock_from_suspend);
+static int opal_read_table(struct opal_dev *dev,
+ struct opal_read_write_table *rw_tbl)
+{
+ const struct opal_step read_table_steps[] = {
+ { start_admin1LSP_opal_session, &rw_tbl->key },
+ { read_table_data, rw_tbl },
+ { end_opal_session, }
+ };
+ int ret = 0;
+
+ if (!rw_tbl->size)
+ return ret;
+
+ return execute_steps(dev, read_table_steps,
+ ARRAY_SIZE(read_table_steps));
+}
+
+static int opal_write_table(struct opal_dev *dev,
+ struct opal_read_write_table *rw_tbl)
+{
+ const struct opal_step write_table_steps[] = {
+ { start_admin1LSP_opal_session, &rw_tbl->key },
+ { write_table_data, rw_tbl },
+ { end_opal_session, }
+ };
+ int ret = 0;
+
+ if (!rw_tbl->size)
+ return ret;
+
+ return execute_steps(dev, write_table_steps,
+ ARRAY_SIZE(write_table_steps));
+}
+
+static int opal_generic_read_write_table(struct opal_dev *dev,
+ struct opal_read_write_table *rw_tbl)
+{
+ int ret, bit_set;
+
+ mutex_lock(&dev->dev_lock);
+ setup_opal_dev(dev);
+
+ bit_set = fls64(rw_tbl->flags) - 1;
+ switch (bit_set) {
+ case OPAL_READ_TABLE:
+ ret = opal_read_table(dev, rw_tbl);
+ break;
+ case OPAL_WRITE_TABLE:
+ ret = opal_write_table(dev, rw_tbl);
+ break;
+ default:
+ pr_debug("Invalid bit set in the flag (%016llx).\n",
+ rw_tbl->flags);
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&dev->dev_lock);
+
+ return ret;
+}
+
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
{
void *p;
@@ -2505,6 +2682,9 @@ int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
case IOC_OPAL_PSID_REVERT_TPR:
ret = opal_reverttper(dev, p, true);
break;
+ case IOC_OPAL_GENERIC_TABLE_RW:
+ ret = opal_generic_read_write_table(dev, p);
+ break;
default:
break;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 9803c7e0376e..f4907d941f03 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -235,16 +235,12 @@ static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
}
-/**
- * Type 3 does not have a reference tag so no remapping is required.
- */
+/* Type 3 does not have a reference tag so no remapping is required. */
static void t10_pi_type3_prepare(struct request *rq)
{
}
-/**
- * Type 3 does not have a reference tag so no remapping is required.
- */
+/* Type 3 does not have a reference tag so no remapping is required. */
static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
{
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 9e524044d312..5575d48473bd 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -52,12 +52,12 @@ config CRYPTO_AEAD2
select CRYPTO_NULL2
select CRYPTO_RNG2
-config CRYPTO_BLKCIPHER
+config CRYPTO_SKCIPHER
tristate
- select CRYPTO_BLKCIPHER2
+ select CRYPTO_SKCIPHER2
select CRYPTO_ALGAPI
-config CRYPTO_BLKCIPHER2
+config CRYPTO_SKCIPHER2
tristate
select CRYPTO_ALGAPI2
select CRYPTO_RNG2
@@ -123,7 +123,7 @@ config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
select CRYPTO_AEAD2
select CRYPTO_HASH2
- select CRYPTO_BLKCIPHER2
+ select CRYPTO_SKCIPHER2
select CRYPTO_AKCIPHER2
select CRYPTO_KPP2
select CRYPTO_ACOMP2
@@ -169,7 +169,7 @@ config CRYPTO_NULL
config CRYPTO_NULL2
tristate
select CRYPTO_ALGAPI2
- select CRYPTO_BLKCIPHER2
+ select CRYPTO_SKCIPHER2
select CRYPTO_HASH2
config CRYPTO_PCRYPT
@@ -184,7 +184,7 @@ config CRYPTO_PCRYPT
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
help
@@ -195,7 +195,7 @@ config CRYPTO_CRYPTD
config CRYPTO_AUTHENC
tristate "Authenc support"
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
select CRYPTO_NULL
@@ -217,7 +217,7 @@ config CRYPTO_SIMD
config CRYPTO_GLUE_HELPER_X86
tristate
depends on X86
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
config CRYPTO_ENGINE
tristate
@@ -264,6 +264,17 @@ config CRYPTO_ECRDSA
standard algorithms (called GOST algorithms). Only signature verification
is implemented.
+config CRYPTO_CURVE25519
+ tristate "Curve25519 algorithm"
+ select CRYPTO_KPP
+ select CRYPTO_LIB_CURVE25519_GENERIC
+
+config CRYPTO_CURVE25519_X86
+ tristate "x86_64 accelerated Curve25519 scalar multiplication library"
+ depends on X86 && 64BIT
+ select CRYPTO_LIB_CURVE25519_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+
comment "Authenticated Encryption with Associated Data"
config CRYPTO_CCM
@@ -309,6 +320,7 @@ config CRYPTO_AEGIS128
config CRYPTO_AEGIS128_SIMD
bool "Support SIMD acceleration for AEGIS-128"
depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
+ depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800
default y
config CRYPTO_AEGIS128_AESNI_SSE2
@@ -322,7 +334,7 @@ config CRYPTO_AEGIS128_AESNI_SSE2
config CRYPTO_SEQIV
tristate "Sequence Number IV Generator"
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_NULL
select CRYPTO_RNG_DEFAULT
select CRYPTO_MANAGER
@@ -345,7 +357,7 @@ comment "Block modes"
config CRYPTO_CBC
tristate "CBC support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
CBC: Cipher Block Chaining mode
@@ -353,7 +365,7 @@ config CRYPTO_CBC
config CRYPTO_CFB
tristate "CFB support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
CFB: Cipher FeedBack mode
@@ -361,7 +373,7 @@ config CRYPTO_CFB
config CRYPTO_CTR
tristate "CTR support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_SEQIV
select CRYPTO_MANAGER
help
@@ -370,7 +382,7 @@ config CRYPTO_CTR
config CRYPTO_CTS
tristate "CTS support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
CTS: Cipher Text Stealing
@@ -385,7 +397,7 @@ config CRYPTO_CTS
config CRYPTO_ECB
tristate "ECB support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
ECB: Electronic CodeBook mode
@@ -394,7 +406,7 @@ config CRYPTO_ECB
config CRYPTO_LRW
tristate "LRW support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
help
@@ -406,7 +418,7 @@ config CRYPTO_LRW
config CRYPTO_OFB
tristate "OFB support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
OFB: the Output Feedback mode makes a block cipher into a synchronous
@@ -418,7 +430,7 @@ config CRYPTO_OFB
config CRYPTO_PCBC
tristate "PCBC support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
PCBC: Propagating Cipher Block Chaining mode
@@ -426,7 +438,7 @@ config CRYPTO_PCBC
config CRYPTO_XTS
tristate "XTS support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_ECB
help
@@ -436,7 +448,7 @@ config CRYPTO_XTS
config CRYPTO_KEYWRAP
tristate "Key wrapping support"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
Support for key wrapping (NIST SP800-38F / RFC3394) without
@@ -445,7 +457,7 @@ config CRYPTO_KEYWRAP
config CRYPTO_NHPOLY1305
tristate
select CRYPTO_HASH
- select CRYPTO_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC
config CRYPTO_NHPOLY1305_SSE2
tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)"
@@ -466,7 +478,7 @@ config CRYPTO_NHPOLY1305_AVX2
config CRYPTO_ADIANTUM
tristate "Adiantum support"
select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC
select CRYPTO_NHPOLY1305
select CRYPTO_MANAGER
help
@@ -638,6 +650,47 @@ config CRYPTO_XXHASH
xxHash non-cryptographic hash algorithm. Extremely fast, working at
speeds close to RAM limits.
+config CRYPTO_BLAKE2B
+ tristate "BLAKE2b digest algorithm"
+ select CRYPTO_HASH
+ help
+ Implementation of cryptographic hash function BLAKE2b (or just BLAKE2),
+ optimized for 64bit platforms and can produce digests of any size
+ between 1 to 64. The keyed hash is also implemented.
+
+ This module provides the following algorithms:
+
+ - blake2b-160
+ - blake2b-256
+ - blake2b-384
+ - blake2b-512
+
+ See https://blake2.net for further information.
+
+config CRYPTO_BLAKE2S
+ tristate "BLAKE2s digest algorithm"
+ select CRYPTO_LIB_BLAKE2S_GENERIC
+ select CRYPTO_HASH
+ help
+ Implementation of cryptographic hash function BLAKE2s
+ optimized for 8-32bit platforms and can produce digests of any size
+ between 1 to 32. The keyed hash is also implemented.
+
+ This module provides the following algorithms:
+
+ - blake2s-128
+ - blake2s-160
+ - blake2s-224
+ - blake2s-256
+
+ See https://blake2.net for further information.
+
+config CRYPTO_BLAKE2S_X86
+ tristate "BLAKE2s digest algorithm (x86 accelerated version)"
+ depends on X86 && 64BIT
+ select CRYPTO_LIB_BLAKE2S_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+
config CRYPTO_CRCT10DIF
tristate "CRCT10DIF algorithm"
select CRYPTO_HASH
@@ -685,6 +738,7 @@ config CRYPTO_GHASH
config CRYPTO_POLY1305
tristate "Poly1305 authenticator algorithm"
select CRYPTO_HASH
+ select CRYPTO_LIB_POLY1305_GENERIC
help
Poly1305 authenticator algorithm, RFC7539.
@@ -695,7 +749,8 @@ config CRYPTO_POLY1305
config CRYPTO_POLY1305_X86_64
tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
depends on X86 && 64BIT
- select CRYPTO_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
help
Poly1305 authenticator algorithm, RFC7539.
@@ -704,6 +759,11 @@ config CRYPTO_POLY1305_X86_64
in IETF protocols. This is the x86_64 assembler implementation using SIMD
instructions.
+config CRYPTO_POLY1305_MIPS
+ tristate "Poly1305 authenticator algorithm (MIPS optimized)"
+ depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+
config CRYPTO_MD4
tristate "MD4 digest algorithm"
select CRYPTO_HASH
@@ -877,9 +937,6 @@ config CRYPTO_SHA1_PPC_SPE
SHA-1 secure hash standard (DFIPS 180-4) implemented
using powerpc SPE SIMD instruction set.
-config CRYPTO_LIB_SHA256
- tristate
-
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
@@ -1018,9 +1075,6 @@ config CRYPTO_GHASH_CLMUL_NI_INTEL
comment "Ciphers"
-config CRYPTO_LIB_AES
- tristate
-
config CRYPTO_AES
tristate "AES cipher algorithms"
select CRYPTO_ALGAPI
@@ -1067,7 +1121,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AEAD
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_SIMD
help
@@ -1097,8 +1151,7 @@ config CRYPTO_AES_NI_INTEL
config CRYPTO_AES_SPARC64
tristate "AES cipher algorithms (SPARC64)"
depends on SPARC64
- select CRYPTO_CRYPTD
- select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
help
Use SPARC64 crypto opcodes for AES algorithm.
@@ -1125,6 +1178,7 @@ config CRYPTO_AES_SPARC64
config CRYPTO_AES_PPC_SPE
tristate "AES cipher algorithms (PPC SPE)"
depends on PPC && SPE
+ select CRYPTO_SKCIPHER
help
AES cipher algorithms (FIPS-197). Additionally the acceleration
for popular block cipher modes ECB, CBC, CTR and XTS is supported.
@@ -1149,12 +1203,9 @@ config CRYPTO_ANUBIS
<https://www.cosic.esat.kuleuven.be/nessie/reports/>
<http://www.larc.usp.br/~pbarreto/AnubisPage.html>
-config CRYPTO_LIB_ARC4
- tristate
-
config CRYPTO_ARC4
tristate "ARC4 cipher algorithm"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_ARC4
help
ARC4 cipher algorithm.
@@ -1190,7 +1241,7 @@ config CRYPTO_BLOWFISH_COMMON
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_BLOWFISH_COMMON
help
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
@@ -1221,7 +1272,7 @@ config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_GLUE_HELPER_X86
help
Camellia cipher algorithm module (x86_64).
@@ -1238,7 +1289,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SIMD
@@ -1275,6 +1326,7 @@ config CRYPTO_CAMELLIA_SPARC64
depends on SPARC64
depends on CRYPTO
select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
help
Camellia cipher algorithm module (SPARC64).
@@ -1303,7 +1355,7 @@ config CRYPTO_CAST5
config CRYPTO_CAST5_AVX_X86_64
tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CAST5
select CRYPTO_CAST_COMMON
select CRYPTO_SIMD
@@ -1325,7 +1377,7 @@ config CRYPTO_CAST6
config CRYPTO_CAST6_AVX_X86_64
tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CAST6
select CRYPTO_CAST_COMMON
select CRYPTO_GLUE_HELPER_X86
@@ -1338,9 +1390,6 @@ config CRYPTO_CAST6_AVX_X86_64
This module provides the Cast6 cipher algorithm that processes
eight blocks parallel using the AVX instruction set.
-config CRYPTO_LIB_DES
- tristate
-
config CRYPTO_DES
tristate "DES and Triple DES EDE cipher algorithms"
select CRYPTO_ALGAPI
@@ -1353,6 +1402,7 @@ config CRYPTO_DES_SPARC64
depends on SPARC64
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
optimized using SPARC64 crypto opcodes.
@@ -1360,7 +1410,7 @@ config CRYPTO_DES_SPARC64
config CRYPTO_DES3_EDE_X86_64
tristate "Triple DES EDE cipher algorithm (x86-64)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
Triple DES EDE (FIPS 46-3) algorithm.
@@ -1373,7 +1423,7 @@ config CRYPTO_DES3_EDE_X86_64
config CRYPTO_FCRYPT
tristate "FCrypt cipher algorithm"
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
FCrypt algorithm used by RxRPC.
@@ -1392,7 +1442,7 @@ config CRYPTO_KHAZAD
config CRYPTO_SALSA20
tristate "Salsa20 stream cipher algorithm"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Salsa20 stream cipher algorithm.
@@ -1404,7 +1454,8 @@ config CRYPTO_SALSA20
config CRYPTO_CHACHA20
tristate "ChaCha stream cipher algorithms"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms.
@@ -1426,12 +1477,19 @@ config CRYPTO_CHACHA20
config CRYPTO_CHACHA20_X86_64
tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
- select CRYPTO_CHACHA20
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
help
SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
XChaCha20, and XChaCha12 stream ciphers.
+config CRYPTO_CHACHA_MIPS
+ tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)"
+ depends on CPU_MIPS32_R2
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
config CRYPTO_SEED
tristate "SEED cipher algorithm"
select CRYPTO_ALGAPI
@@ -1462,7 +1520,7 @@ config CRYPTO_SERPENT
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SIMD
@@ -1481,7 +1539,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
depends on X86 && !64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SIMD
@@ -1500,7 +1558,7 @@ config CRYPTO_SERPENT_SSE2_586
config CRYPTO_SERPENT_AVX_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SIMD
@@ -1631,7 +1689,7 @@ config CRYPTO_TWOFISH_X86_64
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_GLUE_HELPER_X86
@@ -1652,7 +1710,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY
config CRYPTO_TWOFISH_AVX_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SIMD
select CRYPTO_TWOFISH_COMMON
@@ -1803,7 +1861,7 @@ config CRYPTO_USER_API_HASH
config CRYPTO_USER_API_SKCIPHER
tristate "User-space interface for symmetric key cipher algorithms"
depends on NET
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_USER_API
help
This option enables the user-spaces interface for symmetric
@@ -1822,7 +1880,7 @@ config CRYPTO_USER_API_AEAD
tristate "User-space interface for AEAD cipher algorithms"
depends on NET
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_NULL
select CRYPTO_USER_API
help
@@ -1844,6 +1902,7 @@ config CRYPTO_STATS
config CRYPTO_HASH_INFO
bool
+source "lib/crypto/Kconfig"
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
diff --git a/crypto/Makefile b/crypto/Makefile
index fcb1ee679782..4ca12b6044f7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -14,11 +14,9 @@ crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
+obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o
-crypto_blkcipher-y := ablkcipher.o
-crypto_blkcipher-y += blkcipher.o
-crypto_blkcipher-y += skcipher.o
-obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
+obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
@@ -74,6 +72,8 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
+obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
+obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
@@ -93,7 +93,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o
aegis128-y := aegis128-core.o
ifeq ($(ARCH),arm)
-CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp
+CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv8-a -mfloat-abi=softfp
CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8
aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o
endif
@@ -166,6 +166,7 @@ obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
obj-$(CONFIG_CRYPTO_ECC) += ecc.o
obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o
+obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
deleted file mode 100644
index 072b5646a0a3..000000000000
--- a/crypto/ablkcipher.c
+++ /dev/null
@@ -1,407 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Asynchronous block chaining cipher operations.
- *
- * This is the asynchronous version of blkcipher.c indicating completion
- * via a callback.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
-#include <net/netlink.h>
-
-#include <crypto/scatterwalk.h>
-
-#include "internal.h"
-
-struct ablkcipher_buffer {
- struct list_head entry;
- struct scatter_walk dst;
- unsigned int len;
- void *data;
-};
-
-enum {
- ABLKCIPHER_WALK_SLOW = 1 << 0,
-};
-
-static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
-{
- scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
-}
-
-void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
-{
- struct ablkcipher_buffer *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
- ablkcipher_buffer_write(p);
- list_del(&p->entry);
- kfree(p);
- }
-}
-EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
-
-static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
- struct ablkcipher_buffer *p)
-{
- p->dst = walk->out;
- list_add_tail(&p->entry, &walk->buffers);
-}
-
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
-{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
-
- return max(start, end_page);
-}
-
-static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
- unsigned int n)
-{
- for (;;) {
- unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
-
- if (len_this_page > n)
- len_this_page = n;
- scatterwalk_advance(&walk->out, n);
- if (n == len_this_page)
- break;
- n -= len_this_page;
- scatterwalk_start(&walk->out, sg_next(walk->out.sg));
- }
-}
-
-static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
- unsigned int n)
-{
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
-}
-
-static int ablkcipher_walk_next(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk);
-
-int ablkcipher_walk_done(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk, int err)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- unsigned int n; /* bytes processed */
- bool more;
-
- if (unlikely(err < 0))
- goto finish;
-
- n = walk->nbytes - err;
- walk->total -= n;
- more = (walk->total != 0);
-
- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
- ablkcipher_done_fast(walk, n);
- } else {
- if (WARN_ON(err)) {
- /* unexpected case; didn't process all bytes */
- err = -EINVAL;
- goto finish;
- }
- ablkcipher_done_slow(walk, n);
- }
-
- scatterwalk_done(&walk->in, 0, more);
- scatterwalk_done(&walk->out, 1, more);
-
- if (more) {
- crypto_yield(req->base.flags);
- return ablkcipher_walk_next(req, walk);
- }
- err = 0;
-finish:
- walk->nbytes = 0;
- if (walk->iv != req->info)
- memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
- kfree(walk->iv_buffer);
- return err;
-}
-EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
-
-static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk,
- unsigned int bsize,
- unsigned int alignmask,
- void **src_p, void **dst_p)
-{
- unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
- struct ablkcipher_buffer *p;
- void *src, *dst, *base;
- unsigned int n;
-
- n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
- n += (aligned_bsize * 3 - (alignmask + 1) +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
-
- p = kmalloc(n, GFP_ATOMIC);
- if (!p)
- return ablkcipher_walk_done(req, walk, -ENOMEM);
-
- base = p + 1;
-
- dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
- src = dst = ablkcipher_get_spot(dst, bsize);
-
- p->len = bsize;
- p->data = dst;
-
- scatterwalk_copychunks(src, &walk->in, bsize, 0);
-
- ablkcipher_queue_write(walk, p);
-
- walk->nbytes = bsize;
- walk->flags |= ABLKCIPHER_WALK_SLOW;
-
- *src_p = src;
- *dst_p = dst;
-
- return 0;
-}
-
-static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
- struct crypto_tfm *tfm,
- unsigned int alignmask)
-{
- unsigned bs = walk->blocksize;
- unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
- unsigned aligned_bs = ALIGN(bs, alignmask + 1);
- unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
- (alignmask + 1);
- u8 *iv;
-
- size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
- walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
- if (!walk->iv_buffer)
- return -ENOMEM;
-
- iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
- iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
- iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
- iv = ablkcipher_get_spot(iv, ivsize);
-
- walk->iv = memcpy(iv, walk->iv, ivsize);
- return 0;
-}
-
-static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk)
-{
- walk->src.page = scatterwalk_page(&walk->in);
- walk->src.offset = offset_in_page(walk->in.offset);
- walk->dst.page = scatterwalk_page(&walk->out);
- walk->dst.offset = offset_in_page(walk->out.offset);
-
- return 0;
-}
-
-static int ablkcipher_walk_next(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- unsigned int alignmask, bsize, n;
- void *src, *dst;
- int err;
-
- alignmask = crypto_tfm_alg_alignmask(tfm);
- n = walk->total;
- if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
- req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return ablkcipher_walk_done(req, walk, -EINVAL);
- }
-
- walk->flags &= ~ABLKCIPHER_WALK_SLOW;
- src = dst = NULL;
-
- bsize = min(walk->blocksize, n);
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (n < bsize ||
- !scatterwalk_aligned(&walk->in, alignmask) ||
- !scatterwalk_aligned(&walk->out, alignmask)) {
- err = ablkcipher_next_slow(req, walk, bsize, alignmask,
- &src, &dst);
- goto set_phys_lowmem;
- }
-
- walk->nbytes = n;
-
- return ablkcipher_next_fast(req, walk);
-
-set_phys_lowmem:
- if (err >= 0) {
- walk->src.page = virt_to_page(src);
- walk->dst.page = virt_to_page(dst);
- walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
- walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
- }
-
- return err;
-}
-
-static int ablkcipher_walk_first(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- unsigned int alignmask;
-
- alignmask = crypto_tfm_alg_alignmask(tfm);
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
-
- walk->iv = req->info;
- walk->nbytes = walk->total;
- if (unlikely(!walk->total))
- return 0;
-
- walk->iv_buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & alignmask))) {
- int err = ablkcipher_copy_iv(walk, tfm, alignmask);
-
- if (err)
- return err;
- }
-
- scatterwalk_start(&walk->in, walk->in.sg);
- scatterwalk_start(&walk->out, walk->out.sg);
-
- return ablkcipher_walk_next(req, walk);
-}
-
-int ablkcipher_walk_phys(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk)
-{
- walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
- return ablkcipher_walk_first(req, walk);
-}
-EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
-
-static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
- unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = cipher->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
- return ret;
-}
-
-static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
- unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
-
- if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- if ((unsigned long)key & alignmask)
- return setkey_unaligned(tfm, key, keylen);
-
- return cipher->setkey(tfm, key, keylen);
-}
-
-static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- return alg->cra_ctxsize;
-}
-
-static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
- u32 mask)
-{
- struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
- struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
-
- if (alg->ivsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- crt->setkey = setkey;
- crt->encrypt = alg->encrypt;
- crt->decrypt = alg->decrypt;
- crt->base = __crypto_ablkcipher_cast(tfm);
- crt->ivsize = alg->ivsize;
-
- return 0;
-}
-
-#ifdef CONFIG_NET
-static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_blkcipher rblkcipher;
-
- memset(&rblkcipher, 0, sizeof(rblkcipher));
-
- strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
- strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
-
- rblkcipher.blocksize = alg->cra_blocksize;
- rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
- rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
- rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
-
- return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
- sizeof(rblkcipher), &rblkcipher);
-}
-#else
-static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
-
-static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
- __maybe_unused;
-static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
-{
- struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
-
- seq_printf(m, "type : ablkcipher\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
- seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
- seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
- seq_printf(m, "geniv : <default>\n");
-}
-
-const struct crypto_type crypto_ablkcipher_type = {
- .ctxsize = crypto_ablkcipher_ctxsize,
- .init = crypto_init_ablkcipher_ops,
-#ifdef CONFIG_PROC_FS
- .show = crypto_ablkcipher_show,
-#endif
- .report = crypto_ablkcipher_report,
-};
-EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 395a3ddd3707..aded26092268 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -33,6 +33,7 @@
#include <crypto/b128ops.h>
#include <crypto/chacha.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
#include <crypto/internal/skcipher.h>
#include <crypto/nhpoly1305.h>
#include <crypto/scatterwalk.h>
@@ -242,11 +243,11 @@ static void adiantum_hash_header(struct skcipher_request *req)
BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state, &tctx->header_hash_key,
- &header, sizeof(header) / POLY1305_BLOCK_SIZE);
+ &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
- TWEAK_SIZE / POLY1305_BLOCK_SIZE);
+ TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
poly1305_core_emit(&state, &rctx->header_hash);
}
diff --git a/crypto/aead.c b/crypto/aead.c
index ce035589cf57..47f16d139e8e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -7,19 +7,14 @@
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <crypto/internal/geniv.h>
-#include <crypto/internal/rng.h>
-#include <crypto/null.h>
-#include <crypto/scatterwalk.h>
-#include <linux/err.h>
+#include <crypto/internal/aead.h>
+#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
-#include <linux/compiler.h>
#include <net/netlink.h>
#include "internal.h"
@@ -212,162 +207,6 @@ static const struct crypto_type crypto_aead_type = {
.tfmsize = offsetof(struct crypto_aead, base),
};
-static int aead_geniv_setkey(struct crypto_aead *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
-
- return crypto_aead_setkey(ctx->child, key, keylen);
-}
-
-static int aead_geniv_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
-{
- struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
-
- return crypto_aead_setauthsize(ctx->child, authsize);
-}
-
-struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask)
-{
- const char *name;
- struct crypto_aead_spawn *spawn;
- struct crypto_attr_type *algt;
- struct aead_instance *inst;
- struct aead_alg *alg;
- unsigned int ivsize;
- unsigned int maxauthsize;
- int err;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return ERR_PTR(-EINVAL);
-
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return ERR_CAST(name);
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return ERR_PTR(-ENOMEM);
-
- spawn = aead_instance_ctx(inst);
-
- /* Ignore async algorithms if necessary. */
- mask |= crypto_requires_sync(algt->type, algt->mask);
-
- crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(spawn, name, type, mask);
- if (err)
- goto err_free_inst;
-
- alg = crypto_spawn_aead_alg(spawn);
-
- ivsize = crypto_aead_alg_ivsize(alg);
- maxauthsize = crypto_aead_alg_maxauthsize(alg);
-
- err = -EINVAL;
- if (ivsize < sizeof(u64))
- goto err_drop_alg;
-
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", tmpl->name, alg->base.cra_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
- if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
-
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.base.cra_priority = alg->base.cra_priority;
- inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
- inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
- inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
-
- inst->alg.setkey = aead_geniv_setkey;
- inst->alg.setauthsize = aead_geniv_setauthsize;
-
- inst->alg.ivsize = ivsize;
- inst->alg.maxauthsize = maxauthsize;
-
-out:
- return inst;
-
-err_drop_alg:
- crypto_drop_aead(spawn);
-err_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
- goto out;
-}
-EXPORT_SYMBOL_GPL(aead_geniv_alloc);
-
-void aead_geniv_free(struct aead_instance *inst)
-{
- crypto_drop_aead(aead_instance_ctx(inst));
- kfree(inst);
-}
-EXPORT_SYMBOL_GPL(aead_geniv_free);
-
-int aead_init_geniv(struct crypto_aead *aead)
-{
- struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
- struct aead_instance *inst = aead_alg_instance(aead);
- struct crypto_aead *child;
- int err;
-
- spin_lock_init(&ctx->lock);
-
- err = crypto_get_default_rng();
- if (err)
- goto out;
-
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_aead_ivsize(aead));
- crypto_put_default_rng();
- if (err)
- goto out;
-
- ctx->sknull = crypto_get_default_null_skcipher();
- err = PTR_ERR(ctx->sknull);
- if (IS_ERR(ctx->sknull))
- goto out;
-
- child = crypto_spawn_aead(aead_instance_ctx(inst));
- err = PTR_ERR(child);
- if (IS_ERR(child))
- goto drop_null;
-
- ctx->child = child;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
- sizeof(struct aead_request));
-
- err = 0;
-
-out:
- return err;
-
-drop_null:
- crypto_put_default_null_skcipher();
- goto out;
-}
-EXPORT_SYMBOL_GPL(aead_init_geniv);
-
-void aead_exit_geniv(struct crypto_aead *tfm)
-{
- struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
-
- crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
-}
-EXPORT_SYMBOL_GPL(aead_exit_geniv);
-
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask)
{
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 80e73611bd5c..71c11cb5bad1 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -13,6 +13,7 @@
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -35,15 +36,7 @@ struct aegis_ctx {
union aegis_block key;
};
-struct aegis128_ops {
- int (*skcipher_walk_init)(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-
- void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
- const u8 *src, unsigned int size);
-};
-
-static bool have_simd;
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_simd);
static const union aegis_block crypto_aegis_const[2] = {
{ .words64 = {
@@ -59,7 +52,7 @@ static const union aegis_block crypto_aegis_const[2] = {
static bool aegis128_do_simd(void)
{
#ifdef CONFIG_CRYPTO_AEGIS128_SIMD
- if (have_simd)
+ if (static_branch_likely(&have_simd))
return crypto_simd_usable();
#endif
return false;
@@ -67,10 +60,16 @@ static bool aegis128_do_simd(void)
bool crypto_aegis128_have_simd(void);
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
+void crypto_aegis128_init_simd(struct aegis_state *state,
+ const union aegis_block *key,
+ const u8 *iv);
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
+void crypto_aegis128_final_simd(struct aegis_state *state,
+ union aegis_block *tag_xor,
+ u64 assoclen, u64 cryptlen);
static void crypto_aegis128_update(struct aegis_state *state)
{
@@ -323,25 +322,27 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
}
}
-static void crypto_aegis128_process_crypt(struct aegis_state *state,
- struct aead_request *req,
- const struct aegis128_ops *ops)
+static __always_inline
+int crypto_aegis128_process_crypt(struct aegis_state *state,
+ struct aead_request *req,
+ struct skcipher_walk *walk,
+ void (*crypt)(struct aegis_state *state,
+ u8 *dst, const u8 *src,
+ unsigned int size))
{
- struct skcipher_walk walk;
+ int err = 0;
- ops->skcipher_walk_init(&walk, req, false);
+ while (walk->nbytes) {
+ unsigned int nbytes = walk->nbytes;
- while (walk.nbytes) {
- unsigned int nbytes = walk.nbytes;
+ if (nbytes < walk->total)
+ nbytes = round_down(nbytes, walk->stride);
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
+ crypt(state, walk->dst.virt.addr, walk->src.virt.addr, nbytes);
- ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes);
-
- skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ err = skcipher_walk_done(walk, walk->nbytes - nbytes);
}
+ return err;
}
static void crypto_aegis128_final(struct aegis_state *state,
@@ -390,39 +391,31 @@ static int crypto_aegis128_setauthsize(struct crypto_aead *tfm,
return 0;
}
-static void crypto_aegis128_crypt(struct aead_request *req,
- union aegis_block *tag_xor,
- unsigned int cryptlen,
- const struct aegis128_ops *ops)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
- struct aegis_state state;
-
- crypto_aegis128_init(&state, &ctx->key, req->iv);
- crypto_aegis128_process_ad(&state, req->src, req->assoclen);
- crypto_aegis128_process_crypt(&state, req, ops);
- crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen);
-}
-
static int crypto_aegis128_encrypt(struct aead_request *req)
{
- const struct aegis128_ops *ops = &(struct aegis128_ops){
- .skcipher_walk_init = skcipher_walk_aead_encrypt,
- .crypt_chunk = crypto_aegis128_encrypt_chunk,
- };
-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
+ struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int cryptlen = req->cryptlen;
+ struct skcipher_walk walk;
+ struct aegis_state state;
- if (aegis128_do_simd())
- ops = &(struct aegis128_ops){
- .skcipher_walk_init = skcipher_walk_aead_encrypt,
- .crypt_chunk = crypto_aegis128_encrypt_chunk_simd };
-
- crypto_aegis128_crypt(req, &tag, cryptlen, ops);
+ skcipher_walk_aead_encrypt(&walk, req, false);
+ if (aegis128_do_simd()) {
+ crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
+ crypto_aegis128_process_ad(&state, req->src, req->assoclen);
+ crypto_aegis128_process_crypt(&state, req, &walk,
+ crypto_aegis128_encrypt_chunk_simd);
+ crypto_aegis128_final_simd(&state, &tag, req->assoclen,
+ cryptlen);
+ } else {
+ crypto_aegis128_init(&state, &ctx->key, req->iv);
+ crypto_aegis128_process_ad(&state, req->src, req->assoclen);
+ crypto_aegis128_process_crypt(&state, req, &walk,
+ crypto_aegis128_encrypt_chunk);
+ crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
+ }
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
@@ -431,26 +424,33 @@ static int crypto_aegis128_encrypt(struct aead_request *req)
static int crypto_aegis128_decrypt(struct aead_request *req)
{
- const struct aegis128_ops *ops = &(struct aegis128_ops){
- .skcipher_walk_init = skcipher_walk_aead_decrypt,
- .crypt_chunk = crypto_aegis128_decrypt_chunk,
- };
static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {};
-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
+ struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
+ struct skcipher_walk walk;
+ struct aegis_state state;
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
- if (aegis128_do_simd())
- ops = &(struct aegis128_ops){
- .skcipher_walk_init = skcipher_walk_aead_decrypt,
- .crypt_chunk = crypto_aegis128_decrypt_chunk_simd };
-
- crypto_aegis128_crypt(req, &tag, cryptlen, ops);
+ skcipher_walk_aead_decrypt(&walk, req, false);
+ if (aegis128_do_simd()) {
+ crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
+ crypto_aegis128_process_ad(&state, req->src, req->assoclen);
+ crypto_aegis128_process_crypt(&state, req, &walk,
+ crypto_aegis128_decrypt_chunk_simd);
+ crypto_aegis128_final_simd(&state, &tag, req->assoclen,
+ cryptlen);
+ } else {
+ crypto_aegis128_init(&state, &ctx->key, req->iv);
+ crypto_aegis128_process_ad(&state, req->src, req->assoclen);
+ crypto_aegis128_process_crypt(&state, req, &walk,
+ crypto_aegis128_decrypt_chunk);
+ crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
+ }
return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
}
@@ -481,8 +481,9 @@ static struct aead_alg crypto_aegis128_alg = {
static int __init crypto_aegis128_module_init(void)
{
- if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD))
- have_simd = crypto_aegis128_have_simd();
+ if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
+ crypto_aegis128_have_simd())
+ static_branch_enable(&have_simd);
return crypto_register_aead(&crypto_aegis128_alg);
}
diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c
index f05310ca22aa..2a660ac1bc3a 100644
--- a/crypto/aegis128-neon-inner.c
+++ b/crypto/aegis128-neon-inner.c
@@ -132,6 +132,36 @@ void preload_sbox(void)
:: "r"(crypto_aes_sbox));
}
+void crypto_aegis128_init_neon(void *state, const void *key, const void *iv)
+{
+ static const uint8_t const0[] = {
+ 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d,
+ 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62,
+ };
+ static const uint8_t const1[] = {
+ 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1,
+ 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd,
+ };
+ uint8x16_t k = vld1q_u8(key);
+ uint8x16_t kiv = k ^ vld1q_u8(iv);
+ struct aegis128_state st = {{
+ kiv,
+ vld1q_u8(const1),
+ vld1q_u8(const0),
+ k ^ vld1q_u8(const0),
+ k ^ vld1q_u8(const1),
+ }};
+ int i;
+
+ preload_sbox();
+
+ for (i = 0; i < 5; i++) {
+ st = aegis128_update_neon(st, k);
+ st = aegis128_update_neon(st, kiv);
+ }
+ aegis128_save_state_neon(st, state);
+}
+
void crypto_aegis128_update_neon(void *state, const void *msg)
{
struct aegis128_state st = aegis128_load_state_neon(state);
@@ -210,3 +240,23 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
aegis128_save_state_neon(st, state);
}
+
+void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
+ uint64_t cryptlen)
+{
+ struct aegis128_state st = aegis128_load_state_neon(state);
+ uint8x16_t v;
+ int i;
+
+ preload_sbox();
+
+ v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8 * assoclen),
+ vmov_n_u64(8 * cryptlen));
+
+ for (i = 0; i < 7; i++)
+ st = aegis128_update_neon(st, v);
+
+ v = vld1q_u8(tag_xor);
+ v ^= st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
+ vst1q_u8(tag_xor, v);
+}
diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c
index 751f9c195aa4..8271b1fa0fbc 100644
--- a/crypto/aegis128-neon.c
+++ b/crypto/aegis128-neon.c
@@ -8,11 +8,14 @@
#include "aegis.h"
+void crypto_aegis128_init_neon(void *state, const void *key, const void *iv);
void crypto_aegis128_update_neon(void *state, const void *msg);
void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size);
void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size);
+void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
+ uint64_t cryptlen);
int aegis128_have_aes_insn __ro_after_init;
@@ -25,6 +28,15 @@ bool crypto_aegis128_have_simd(void)
return IS_ENABLED(CONFIG_ARM64);
}
+void crypto_aegis128_init_simd(union aegis_block *state,
+ const union aegis_block *key,
+ const u8 *iv)
+{
+ kernel_neon_begin();
+ crypto_aegis128_init_neon(state, key, iv);
+ kernel_neon_end();
+}
+
void crypto_aegis128_update_simd(union aegis_block *state, const void *msg)
{
kernel_neon_begin();
@@ -47,3 +59,12 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
kernel_neon_end();
}
+
+void crypto_aegis128_final_simd(union aegis_block *state,
+ union aegis_block *tag_xor,
+ u64 assoclen, u64 cryptlen)
+{
+ kernel_neon_begin();
+ crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen);
+ kernel_neon_end();
+}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 879cf23f7489..0dceaabc6321 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1043,7 +1043,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
af_alg_free_resources(areq);
sock_put(sk);
- iocb->ki_complete(iocb, err ? err : resultlen, 0);
+ iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
}
EXPORT_SYMBOL_GPL(af_alg_async_cb);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index de30ddc952d8..b052f38edba6 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -1052,32 +1052,6 @@ void crypto_stats_get(struct crypto_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_stats_get);
-void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.encrypt_cnt);
- atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt);
-
-void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret,
- struct crypto_alg *alg)
-{
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.err_cnt);
- } else {
- atomic64_inc(&alg->stats.cipher.decrypt_cnt);
- atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen);
- }
- crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt);
-
void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
int ret)
{
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index c1601edd70e3..e2c8ab408bed 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -56,7 +56,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = pask->private;
- unsigned int bs = crypto_skcipher_blocksize(tfm);
+ unsigned int bs = crypto_skcipher_chunksize(tfm);
struct af_alg_async_req *areq;
int err = 0;
size_t len = 0;
diff --git a/crypto/api.c b/crypto/api.c
index d8ba54142620..55bca28df92d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
*
* The returned transform is of a non-determinate type. Most people
* should use one of the more specific allocation functions such as
- * crypto_alloc_blkcipher.
+ * crypto_alloc_skcipher().
*
* In case of error the return value is an error pointer.
*/
@@ -608,3 +608,4 @@ EXPORT_SYMBOL_GPL(crypto_req_done);
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: cryptomgr");
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 76d2ce3a1b5b..d16d893bd195 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -13,7 +13,7 @@
#include <crypto/sha.h>
#include <asm/unaligned.h>
#include <keys/asymmetric-subtype.h>
-#include <keys/trusted.h>
+#include <keys/trusted_tpm.h>
#include <crypto/asym_tpm_subtype.h>
#include <crypto/public_key.h>
@@ -21,10 +21,6 @@
#define TPM_ORD_LOADKEY2 65
#define TPM_ORD_UNBIND 30
#define TPM_ORD_SIGN 60
-#define TPM_LOADKEY2_SIZE 59
-#define TPM_FLUSHSPECIFIC_SIZE 18
-#define TPM_UNBIND_SIZE 63
-#define TPM_SIGN_SIZE 63
#define TPM_RT_KEY 0x00000001
@@ -68,16 +64,13 @@ static int tpm_loadkey2(struct tpm_buf *tb,
return ret;
/* build the request buffer */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_LOADKEY2_SIZE + keybloblen);
- store32(tb, TPM_ORD_LOADKEY2);
- store32(tb, keyhandle);
- storebytes(tb, keyblob, keybloblen);
- store32(tb, authhandle);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_LOADKEY2);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, keyblob, keybloblen);
+ tpm_buf_append_u32(tb, authhandle);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -101,12 +94,9 @@ static int tpm_loadkey2(struct tpm_buf *tb,
*/
static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle)
{
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_FLUSHSPECIFIC_SIZE);
- store32(tb, TPM_ORD_FLUSHSPECIFIC);
- store32(tb, handle);
- store32(tb, TPM_RT_KEY);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_FLUSHSPECIFIC);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append_u32(tb, TPM_RT_KEY);
return trusted_tpm_send(tb->data, MAX_BUF_SIZE);
}
@@ -155,17 +145,14 @@ static int tpm_unbind(struct tpm_buf *tb,
return ret;
/* build the request buffer */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_UNBIND_SIZE + bloblen);
- store32(tb, TPM_ORD_UNBIND);
- store32(tb, keyhandle);
- store32(tb, bloblen);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_UNBIND);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append_u32(tb, bloblen);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -241,17 +228,14 @@ static int tpm_sign(struct tpm_buf *tb,
return ret;
/* build the request buffer */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_SIGN_SIZE + bloblen);
- store32(tb, TPM_ORD_SIGN);
- store32(tb, keyhandle);
- store32(tb, bloblen);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SIGN);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append_u32(tb, bloblen);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -519,7 +503,7 @@ static int tpm_key_decrypt(struct tpm_key *tk,
struct kernel_pkey_params *params,
const void *in, void *out)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
uint32_t keyhandle;
uint8_t srkauth[SHA1_DIGEST_SIZE];
uint8_t keyauth[SHA1_DIGEST_SIZE];
@@ -533,14 +517,14 @@ static int tpm_key_decrypt(struct tpm_key *tk,
if (strcmp(params->encoding, "pkcs1"))
return -ENOPKG;
- tb = kzalloc(sizeof(*tb), GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ r = tpm_buf_init(&tb, 0, 0);
+ if (r)
+ return r;
/* TODO: Handle a non-all zero SRK authorization */
memset(srkauth, 0, sizeof(srkauth));
- r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+ r = tpm_loadkey2(&tb, SRKHANDLE, srkauth,
tk->blob, tk->blob_len, &keyhandle);
if (r < 0) {
pr_devel("loadkey2 failed (%d)\n", r);
@@ -550,16 +534,16 @@ static int tpm_key_decrypt(struct tpm_key *tk,
/* TODO: Handle a non-all zero key authorization */
memset(keyauth, 0, sizeof(keyauth));
- r = tpm_unbind(tb, keyhandle, keyauth,
+ r = tpm_unbind(&tb, keyhandle, keyauth,
in, params->in_len, out, params->out_len);
if (r < 0)
pr_devel("tpm_unbind failed (%d)\n", r);
- if (tpm_flushspecific(tb, keyhandle) < 0)
+ if (tpm_flushspecific(&tb, keyhandle) < 0)
pr_devel("flushspecific failed (%d)\n", r);
error:
- kzfree(tb);
+ tpm_buf_destroy(&tb);
pr_devel("<==%s() = %d\n", __func__, r);
return r;
}
@@ -643,7 +627,7 @@ static int tpm_key_sign(struct tpm_key *tk,
struct kernel_pkey_params *params,
const void *in, void *out)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
uint32_t keyhandle;
uint8_t srkauth[SHA1_DIGEST_SIZE];
uint8_t keyauth[SHA1_DIGEST_SIZE];
@@ -681,15 +665,14 @@ static int tpm_key_sign(struct tpm_key *tk,
goto error_free_asn1_wrapped;
}
- r = -ENOMEM;
- tb = kzalloc(sizeof(*tb), GFP_KERNEL);
- if (!tb)
+ r = tpm_buf_init(&tb, 0, 0);
+ if (r)
goto error_free_asn1_wrapped;
/* TODO: Handle a non-all zero SRK authorization */
memset(srkauth, 0, sizeof(srkauth));
- r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+ r = tpm_loadkey2(&tb, SRKHANDLE, srkauth,
tk->blob, tk->blob_len, &keyhandle);
if (r < 0) {
pr_devel("loadkey2 failed (%d)\n", r);
@@ -699,15 +682,15 @@ static int tpm_key_sign(struct tpm_key *tk,
/* TODO: Handle a non-all zero key authorization */
memset(keyauth, 0, sizeof(keyauth));
- r = tpm_sign(tb, keyhandle, keyauth, in, in_len, out, params->out_len);
+ r = tpm_sign(&tb, keyhandle, keyauth, in, in_len, out, params->out_len);
if (r < 0)
pr_devel("tpm_sign failed (%d)\n", r);
- if (tpm_flushspecific(tb, keyhandle) < 0)
+ if (tpm_flushspecific(&tb, keyhandle) < 0)
pr_devel("flushspecific failed (%d)\n", r);
error_free_tb:
- kzfree(tb);
+ tpm_buf_destroy(&tb);
error_free_asn1_wrapped:
kfree(asn1_wrapped);
pr_devel("<==%s() = %d\n", __func__, r);
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
new file mode 100644
index 000000000000..d04b1788dc42
--- /dev/null
+++ b/crypto/blake2b_generic.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0)
+/*
+ * BLAKE2b reference source code package - reference C implementations
+ *
+ * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
+ * terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
+ * your option. The terms of these licenses can be found at:
+ *
+ * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
+ * - OpenSSL license : https://www.openssl.org/source/license.html
+ * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * More information about the BLAKE2 hash function can be found at
+ * https://blake2.net.
+ *
+ * Note: the original sources have been modified for inclusion in linux kernel
+ * in terms of coding style, using generic helpers and simplifications of error
+ * handling.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <crypto/internal/hash.h>
+
+#define BLAKE2B_160_DIGEST_SIZE (160 / 8)
+#define BLAKE2B_256_DIGEST_SIZE (256 / 8)
+#define BLAKE2B_384_DIGEST_SIZE (384 / 8)
+#define BLAKE2B_512_DIGEST_SIZE (512 / 8)
+
+enum blake2b_constant {
+ BLAKE2B_BLOCKBYTES = 128,
+ BLAKE2B_KEYBYTES = 64,
+};
+
+struct blake2b_state {
+ u64 h[8];
+ u64 t[2];
+ u64 f[2];
+ u8 buf[BLAKE2B_BLOCKBYTES];
+ size_t buflen;
+};
+
+static const u64 blake2b_IV[8] = {
+ 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL,
+ 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+ 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL,
+ 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
+};
+
+static const u8 blake2b_sigma[12][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
+ { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
+ { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
+ { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
+ { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
+ { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
+ { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
+ { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
+};
+
+static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc)
+{
+ S->t[0] += inc;
+ S->t[1] += (S->t[0] < inc);
+}
+
+#define G(r,i,a,b,c,d) \
+ do { \
+ a = a + b + m[blake2b_sigma[r][2*i+0]]; \
+ d = ror64(d ^ a, 32); \
+ c = c + d; \
+ b = ror64(b ^ c, 24); \
+ a = a + b + m[blake2b_sigma[r][2*i+1]]; \
+ d = ror64(d ^ a, 16); \
+ c = c + d; \
+ b = ror64(b ^ c, 63); \
+ } while (0)
+
+#define ROUND(r) \
+ do { \
+ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
+ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
+ G(r,2,v[ 2],v[ 6],v[10],v[14]); \
+ G(r,3,v[ 3],v[ 7],v[11],v[15]); \
+ G(r,4,v[ 0],v[ 5],v[10],v[15]); \
+ G(r,5,v[ 1],v[ 6],v[11],v[12]); \
+ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
+ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
+ } while (0)
+
+static void blake2b_compress(struct blake2b_state *S,
+ const u8 block[BLAKE2B_BLOCKBYTES])
+{
+ u64 m[16];
+ u64 v[16];
+ size_t i;
+
+ for (i = 0; i < 16; ++i)
+ m[i] = get_unaligned_le64(block + i * sizeof(m[i]));
+
+ for (i = 0; i < 8; ++i)
+ v[i] = S->h[i];
+
+ v[ 8] = blake2b_IV[0];
+ v[ 9] = blake2b_IV[1];
+ v[10] = blake2b_IV[2];
+ v[11] = blake2b_IV[3];
+ v[12] = blake2b_IV[4] ^ S->t[0];
+ v[13] = blake2b_IV[5] ^ S->t[1];
+ v[14] = blake2b_IV[6] ^ S->f[0];
+ v[15] = blake2b_IV[7] ^ S->f[1];
+
+ ROUND(0);
+ ROUND(1);
+ ROUND(2);
+ ROUND(3);
+ ROUND(4);
+ ROUND(5);
+ ROUND(6);
+ ROUND(7);
+ ROUND(8);
+ ROUND(9);
+ ROUND(10);
+ ROUND(11);
+
+ for (i = 0; i < 8; ++i)
+ S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
+}
+
+#undef G
+#undef ROUND
+
+struct blake2b_tfm_ctx {
+ u8 key[BLAKE2B_KEYBYTES];
+ unsigned int keylen;
+};
+
+static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static int blake2b_init(struct shash_desc *desc)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ const int digestsize = crypto_shash_digestsize(desc->tfm);
+
+ memset(state, 0, sizeof(*state));
+ memcpy(state->h, blake2b_IV, sizeof(state->h));
+
+ /* Parameter block is all zeros except index 0, no xor for 1..7 */
+ state->h[0] ^= 0x01010000 | tctx->keylen << 8 | digestsize;
+
+ if (tctx->keylen) {
+ /*
+ * Prefill the buffer with the key, next call to _update or
+ * _final will process it
+ */
+ memcpy(state->buf, tctx->key, tctx->keylen);
+ state->buflen = BLAKE2B_BLOCKBYTES;
+ }
+ return 0;
+}
+
+static int blake2b_update(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ const size_t left = state->buflen;
+ const size_t fill = BLAKE2B_BLOCKBYTES - left;
+
+ if (!inlen)
+ return 0;
+
+ if (inlen > fill) {
+ state->buflen = 0;
+ /* Fill buffer */
+ memcpy(state->buf + left, in, fill);
+ blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES);
+ /* Compress */
+ blake2b_compress(state, state->buf);
+ in += fill;
+ inlen -= fill;
+ while (inlen > BLAKE2B_BLOCKBYTES) {
+ blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES);
+ blake2b_compress(state, in);
+ in += BLAKE2B_BLOCKBYTES;
+ inlen -= BLAKE2B_BLOCKBYTES;
+ }
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+
+ return 0;
+}
+
+static int blake2b_final(struct shash_desc *desc, u8 *out)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ const int digestsize = crypto_shash_digestsize(desc->tfm);
+ size_t i;
+
+ blake2b_increment_counter(state, state->buflen);
+ /* Set last block */
+ state->f[0] = (u64)-1;
+ /* Padding */
+ memset(state->buf + state->buflen, 0, BLAKE2B_BLOCKBYTES - state->buflen);
+ blake2b_compress(state, state->buf);
+
+ /* Avoid temporary buffer and switch the internal output to LE order */
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+
+ memcpy(out, state->h, digestsize);
+ return 0;
+}
+
+static struct shash_alg blake2b_algs[] = {
+ {
+ .base.cra_name = "blake2b-160",
+ .base.cra_driver_name = "blake2b-160-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = BLAKE2B_160_DIGEST_SIZE,
+ .setkey = blake2b_setkey,
+ .init = blake2b_init,
+ .update = blake2b_update,
+ .final = blake2b_final,
+ .descsize = sizeof(struct blake2b_state),
+ }, {
+ .base.cra_name = "blake2b-256",
+ .base.cra_driver_name = "blake2b-256-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = BLAKE2B_256_DIGEST_SIZE,
+ .setkey = blake2b_setkey,
+ .init = blake2b_init,
+ .update = blake2b_update,
+ .final = blake2b_final,
+ .descsize = sizeof(struct blake2b_state),
+ }, {
+ .base.cra_name = "blake2b-384",
+ .base.cra_driver_name = "blake2b-384-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = BLAKE2B_384_DIGEST_SIZE,
+ .setkey = blake2b_setkey,
+ .init = blake2b_init,
+ .update = blake2b_update,
+ .final = blake2b_final,
+ .descsize = sizeof(struct blake2b_state),
+ }, {
+ .base.cra_name = "blake2b-512",
+ .base.cra_driver_name = "blake2b-512-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = BLAKE2B_512_DIGEST_SIZE,
+ .setkey = blake2b_setkey,
+ .init = blake2b_init,
+ .update = blake2b_update,
+ .final = blake2b_final,
+ .descsize = sizeof(struct blake2b_state),
+ }
+};
+
+static int __init blake2b_mod_init(void)
+{
+ return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
+}
+
+static void __exit blake2b_mod_fini(void)
+{
+ crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
+}
+
+subsys_initcall(blake2b_mod_init);
+module_exit(blake2b_mod_fini);
+
+MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
+MODULE_DESCRIPTION("BLAKE2b generic implementation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("blake2b-160");
+MODULE_ALIAS_CRYPTO("blake2b-160-generic");
+MODULE_ALIAS_CRYPTO("blake2b-256");
+MODULE_ALIAS_CRYPTO("blake2b-256-generic");
+MODULE_ALIAS_CRYPTO("blake2b-384");
+MODULE_ALIAS_CRYPTO("blake2b-384-generic");
+MODULE_ALIAS_CRYPTO("blake2b-512");
+MODULE_ALIAS_CRYPTO("blake2b-512-generic");
diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
new file mode 100644
index 000000000000..ed0c74640470
--- /dev/null
+++ b/crypto/blake2s_generic.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/hash.h>
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static int crypto_blake2s_init(struct shash_desc *desc)
+{
+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2s_state *state = shash_desc_ctx(desc);
+ const int outlen = crypto_shash_digestsize(desc->tfm);
+
+ if (tctx->keylen)
+ blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
+ else
+ blake2s_init(state, outlen);
+
+ return 0;
+}
+
+static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen)
+{
+ struct blake2s_state *state = shash_desc_ctx(desc);
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return 0;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+
+ return 0;
+}
+
+static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
+{
+ struct blake2s_state *state = shash_desc_ctx(desc);
+
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ blake2s_compress_generic(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
+ memzero_explicit(state, sizeof(*state));
+
+ return 0;
+}
+
+static struct shash_alg blake2s_algs[] = {{
+ .base.cra_name = "blake2s-128",
+ .base.cra_driver_name = "blake2s-128-generic",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_128_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}, {
+ .base.cra_name = "blake2s-160",
+ .base.cra_driver_name = "blake2s-160-generic",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_160_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}, {
+ .base.cra_name = "blake2s-224",
+ .base.cra_driver_name = "blake2s-224-generic",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_224_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}, {
+ .base.cra_name = "blake2s-256",
+ .base.cra_driver_name = "blake2s-256-generic",
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
+ .base.cra_priority = 200,
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = BLAKE2S_256_HASH_SIZE,
+ .setkey = crypto_blake2s_setkey,
+ .init = crypto_blake2s_init,
+ .update = crypto_blake2s_update,
+ .final = crypto_blake2s_final,
+ .descsize = sizeof(struct blake2s_state),
+}};
+
+static int __init blake2s_mod_init(void)
+{
+ return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
+}
+
+static void __exit blake2s_mod_exit(void)
+{
+ crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
+}
+
+subsys_initcall(blake2s_mod_init);
+module_exit(blake2s_mod_exit);
+
+MODULE_ALIAS_CRYPTO("blake2s-128");
+MODULE_ALIAS_CRYPTO("blake2s-128-generic");
+MODULE_ALIAS_CRYPTO("blake2s-160");
+MODULE_ALIAS_CRYPTO("blake2s-160-generic");
+MODULE_ALIAS_CRYPTO("blake2s-224");
+MODULE_ALIAS_CRYPTO("blake2s-224-generic");
+MODULE_ALIAS_CRYPTO("blake2s-256");
+MODULE_ALIAS_CRYPTO("blake2s-256-generic");
+MODULE_LICENSE("GPL v2");
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
deleted file mode 100644
index 48a33817de11..000000000000
--- a/crypto/blkcipher.c
+++ /dev/null
@@ -1,548 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Block chaining cipher operations.
- *
- * Generic encrypt/decrypt wrapper for ciphers, handles operations across
- * multiple page boundaries by using temporary blocks. In user context,
- * the kernel is given a chance to schedule us once per page.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#include <crypto/aead.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
-#include <net/netlink.h>
-
-#include "internal.h"
-
-enum {
- BLKCIPHER_WALK_PHYS = 1 << 0,
- BLKCIPHER_WALK_SLOW = 1 << 1,
- BLKCIPHER_WALK_COPY = 1 << 2,
- BLKCIPHER_WALK_DIFF = 1 << 3,
-};
-
-static int blkcipher_walk_next(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-static int blkcipher_walk_first(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-
-static inline void blkcipher_map_src(struct blkcipher_walk *walk)
-{
- walk->src.virt.addr = scatterwalk_map(&walk->in);
-}
-
-static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
-{
- walk->dst.virt.addr = scatterwalk_map(&walk->out);
-}
-
-static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
-{
- scatterwalk_unmap(walk->src.virt.addr);
-}
-
-static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
-{
- scatterwalk_unmap(walk->dst.virt.addr);
-}
-
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
-{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
- return max(start, end_page);
-}
-
-static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
- unsigned int bsize)
-{
- u8 *addr;
-
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- addr = blkcipher_get_spot(addr, bsize);
- scatterwalk_copychunks(addr, &walk->out, bsize, 1);
-}
-
-static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
- unsigned int n)
-{
- if (walk->flags & BLKCIPHER_WALK_COPY) {
- blkcipher_map_dst(walk);
- memcpy(walk->dst.virt.addr, walk->page, n);
- blkcipher_unmap_dst(walk);
- } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
- if (walk->flags & BLKCIPHER_WALK_DIFF)
- blkcipher_unmap_dst(walk);
- blkcipher_unmap_src(walk);
- }
-
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
-}
-
-int blkcipher_walk_done(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk, int err)
-{
- unsigned int n; /* bytes processed */
- bool more;
-
- if (unlikely(err < 0))
- goto finish;
-
- n = walk->nbytes - err;
- walk->total -= n;
- more = (walk->total != 0);
-
- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
- blkcipher_done_fast(walk, n);
- } else {
- if (WARN_ON(err)) {
- /* unexpected case; didn't process all bytes */
- err = -EINVAL;
- goto finish;
- }
- blkcipher_done_slow(walk, n);
- }
-
- scatterwalk_done(&walk->in, 0, more);
- scatterwalk_done(&walk->out, 1, more);
-
- if (more) {
- crypto_yield(desc->flags);
- return blkcipher_walk_next(desc, walk);
- }
- err = 0;
-finish:
- walk->nbytes = 0;
- if (walk->iv != desc->info)
- memcpy(desc->info, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
- return err;
-}
-EXPORT_SYMBOL_GPL(blkcipher_walk_done);
-
-static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- unsigned int bsize,
- unsigned int alignmask)
-{
- unsigned int n;
- unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
-
- if (walk->buffer)
- goto ok;
-
- walk->buffer = walk->page;
- if (walk->buffer)
- goto ok;
-
- n = aligned_bsize * 3 - (alignmask + 1) +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- walk->buffer = kmalloc(n, GFP_ATOMIC);
- if (!walk->buffer)
- return blkcipher_walk_done(desc, walk, -ENOMEM);
-
-ok:
- walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
- alignmask + 1);
- walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
- walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
- aligned_bsize, bsize);
-
- scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
-
- walk->nbytes = bsize;
- walk->flags |= BLKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
-{
- u8 *tmp = walk->page;
-
- blkcipher_map_src(walk);
- memcpy(tmp, walk->src.virt.addr, walk->nbytes);
- blkcipher_unmap_src(walk);
-
- walk->src.virt.addr = tmp;
- walk->dst.virt.addr = tmp;
-
- return 0;
-}
-
-static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- unsigned long diff;
-
- walk->src.phys.page = scatterwalk_page(&walk->in);
- walk->src.phys.offset = offset_in_page(walk->in.offset);
- walk->dst.phys.page = scatterwalk_page(&walk->out);
- walk->dst.phys.offset = offset_in_page(walk->out.offset);
-
- if (walk->flags & BLKCIPHER_WALK_PHYS)
- return 0;
-
- diff = walk->src.phys.offset - walk->dst.phys.offset;
- diff |= walk->src.virt.page - walk->dst.virt.page;
-
- blkcipher_map_src(walk);
- walk->dst.virt.addr = walk->src.virt.addr;
-
- if (diff) {
- walk->flags |= BLKCIPHER_WALK_DIFF;
- blkcipher_map_dst(walk);
- }
-
- return 0;
-}
-
-static int blkcipher_walk_next(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- unsigned int bsize;
- unsigned int n;
- int err;
-
- n = walk->total;
- if (unlikely(n < walk->cipher_blocksize)) {
- desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return blkcipher_walk_done(desc, walk, -EINVAL);
- }
-
- bsize = min(walk->walk_blocksize, n);
-
- walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
- BLKCIPHER_WALK_DIFF);
- if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
- !scatterwalk_aligned(&walk->out, walk->alignmask)) {
- walk->flags |= BLKCIPHER_WALK_COPY;
- if (!walk->page) {
- walk->page = (void *)__get_free_page(GFP_ATOMIC);
- if (!walk->page)
- n = 0;
- }
- }
-
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
- goto set_phys_lowmem;
- }
-
- walk->nbytes = n;
- if (walk->flags & BLKCIPHER_WALK_COPY) {
- err = blkcipher_next_copy(walk);
- goto set_phys_lowmem;
- }
-
- return blkcipher_next_fast(desc, walk);
-
-set_phys_lowmem:
- if (walk->flags & BLKCIPHER_WALK_PHYS) {
- walk->src.phys.page = virt_to_page(walk->src.virt.addr);
- walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
- walk->src.phys.offset &= PAGE_SIZE - 1;
- walk->dst.phys.offset &= PAGE_SIZE - 1;
- }
- return err;
-}
-
-static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
-{
- unsigned bs = walk->walk_blocksize;
- unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
- unsigned int size = aligned_bs * 2 +
- walk->ivsize + max(aligned_bs, walk->ivsize) -
- (walk->alignmask + 1);
- u8 *iv;
-
- size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
- walk->buffer = kmalloc(size, GFP_ATOMIC);
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- iv = blkcipher_get_spot(iv, bs) + aligned_bs;
- iv = blkcipher_get_spot(iv, bs) + aligned_bs;
- iv = blkcipher_get_spot(iv, walk->ivsize);
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-int blkcipher_walk_virt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- walk->flags &= ~BLKCIPHER_WALK_PHYS;
- walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
- walk->cipher_blocksize = walk->walk_blocksize;
- walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
- walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
- return blkcipher_walk_first(desc, walk);
-}
-EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
-
-int blkcipher_walk_phys(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- walk->flags |= BLKCIPHER_WALK_PHYS;
- walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
- walk->cipher_blocksize = walk->walk_blocksize;
- walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
- walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
- return blkcipher_walk_first(desc, walk);
-}
-EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
-
-static int blkcipher_walk_first(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
-
- walk->iv = desc->info;
- walk->nbytes = walk->total;
- if (unlikely(!walk->total))
- return 0;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = blkcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- scatterwalk_start(&walk->in, walk->in.sg);
- scatterwalk_start(&walk->out, walk->out.sg);
- walk->page = NULL;
-
- return blkcipher_walk_next(desc, walk);
-}
-
-int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- unsigned int blocksize)
-{
- walk->flags &= ~BLKCIPHER_WALK_PHYS;
- walk->walk_blocksize = blocksize;
- walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
- walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
- walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
- return blkcipher_walk_first(desc, walk);
-}
-EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
-
-int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_aead *tfm,
- unsigned int blocksize)
-{
- walk->flags &= ~BLKCIPHER_WALK_PHYS;
- walk->walk_blocksize = blocksize;
- walk->cipher_blocksize = crypto_aead_blocksize(tfm);
- walk->ivsize = crypto_aead_ivsize(tfm);
- walk->alignmask = crypto_aead_alignmask(tfm);
- return blkcipher_walk_first(desc, walk);
-}
-EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
-
-static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = cipher->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
- return ret;
-}
-
-static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
-{
- struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
-
- if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- if ((unsigned long)key & alignmask)
- return setkey_unaligned(tfm, key, keylen);
-
- return cipher->setkey(tfm, key, keylen);
-}
-
-static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
-}
-
-static int async_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
- struct blkcipher_desc desc = {
- .tfm = __crypto_blkcipher_cast(tfm),
- .info = req->info,
- .flags = req->base.flags,
- };
-
-
- return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
-}
-
-static int async_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
- struct blkcipher_desc desc = {
- .tfm = __crypto_blkcipher_cast(tfm),
- .info = req->info,
- .flags = req->base.flags,
- };
-
- return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
-}
-
-static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- struct blkcipher_alg *cipher = &alg->cra_blkcipher;
- unsigned int len = alg->cra_ctxsize;
-
- if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
- cipher->ivsize) {
- len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
- len += cipher->ivsize;
- }
-
- return len;
-}
-
-static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
-{
- struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
-
- crt->setkey = async_setkey;
- crt->encrypt = async_encrypt;
- crt->decrypt = async_decrypt;
- crt->base = __crypto_ablkcipher_cast(tfm);
- crt->ivsize = alg->ivsize;
-
- return 0;
-}
-
-static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
-{
- struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
- unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
- unsigned long addr;
-
- crt->setkey = setkey;
- crt->encrypt = alg->encrypt;
- crt->decrypt = alg->decrypt;
-
- addr = (unsigned long)crypto_tfm_ctx(tfm);
- addr = ALIGN(addr, align);
- addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
- crt->iv = (void *)addr;
-
- return 0;
-}
-
-static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
-
- if (alg->ivsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
- return crypto_init_blkcipher_ops_sync(tfm);
- else
- return crypto_init_blkcipher_ops_async(tfm);
-}
-
-#ifdef CONFIG_NET
-static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_blkcipher rblkcipher;
-
- memset(&rblkcipher, 0, sizeof(rblkcipher));
-
- strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
- strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
-
- rblkcipher.blocksize = alg->cra_blocksize;
- rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
- rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
-
- return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
- sizeof(rblkcipher), &rblkcipher);
-}
-#else
-static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
-
-static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
- __maybe_unused;
-static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
-{
- seq_printf(m, "type : blkcipher\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
- seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
- seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
- seq_printf(m, "geniv : <default>\n");
-}
-
-const struct crypto_type crypto_blkcipher_type = {
- .ctxsize = crypto_blkcipher_ctxsize,
- .init = crypto_init_blkcipher_ops,
-#ifdef CONFIG_PROC_FS
- .show = crypto_blkcipher_show,
-#endif
- .report = crypto_blkcipher_report,
-};
-EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index 085d8d219987..8beea79ab117 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -8,29 +8,10 @@
#include <asm/unaligned.h>
#include <crypto/algapi.h>
-#include <crypto/chacha.h>
+#include <crypto/internal/chacha.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
-static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- /* aligned to potentially speed up crypto_xor() */
- u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
-
- while (bytes >= CHACHA_BLOCK_SIZE) {
- chacha_block(state, stream, nrounds);
- crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
- bytes -= CHACHA_BLOCK_SIZE;
- dst += CHACHA_BLOCK_SIZE;
- src += CHACHA_BLOCK_SIZE;
- }
- if (bytes) {
- chacha_block(state, stream, nrounds);
- crypto_xor_cpy(dst, src, stream, bytes);
- }
-}
-
static int chacha_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
@@ -40,7 +21,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, false);
- crypto_chacha_init(state, ctx, iv);
+ chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -48,75 +29,23 @@ static int chacha_stream_xor(struct skcipher_request *req,
if (nbytes < walk.total)
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
- chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, ctx->nrounds);
+ chacha_crypt_generic(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv)
-{
- state[0] = 0x61707865; /* "expa" */
- state[1] = 0x3320646e; /* "nd 3" */
- state[2] = 0x79622d32; /* "2-by" */
- state[3] = 0x6b206574; /* "te k" */
- state[4] = ctx->key[0];
- state[5] = ctx->key[1];
- state[6] = ctx->key[2];
- state[7] = ctx->key[3];
- state[8] = ctx->key[4];
- state[9] = ctx->key[5];
- state[10] = ctx->key[6];
- state[11] = ctx->key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
-}
-EXPORT_SYMBOL_GPL(crypto_chacha_init);
-
-static int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
-
-int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-EXPORT_SYMBOL_GPL(crypto_chacha12_setkey);
-
-int crypto_chacha_crypt(struct skcipher_request *req)
+static int crypto_chacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_stream_xor(req, ctx, req->iv);
}
-EXPORT_SYMBOL_GPL(crypto_chacha_crypt);
-int crypto_xchacha_crypt(struct skcipher_request *req)
+static int crypto_xchacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -125,8 +54,8 @@ int crypto_xchacha_crypt(struct skcipher_request *req)
u8 real_iv[16];
/* Compute the subkey given the original key and first 128 nonce bits */
- crypto_chacha_init(state, ctx, req->iv);
- hchacha_block(state, subctx.key, ctx->nrounds);
+ chacha_init_generic(state, ctx->key, req->iv);
+ hchacha_block_generic(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
/* Build the real IV */
@@ -136,7 +65,6 @@ int crypto_xchacha_crypt(struct skcipher_request *req)
/* Generate the stream and XOR it with the data */
return chacha_stream_xor(req, &subctx, real_iv);
}
-EXPORT_SYMBOL_GPL(crypto_xchacha_crypt);
static struct skcipher_alg algs[] = {
{
@@ -151,7 +79,7 @@ static struct skcipher_alg algs[] = {
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
+ .setkey = chacha20_setkey,
.encrypt = crypto_chacha_crypt,
.decrypt = crypto_chacha_crypt,
}, {
@@ -166,7 +94,7 @@ static struct skcipher_alg algs[] = {
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
+ .setkey = chacha20_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}, {
@@ -181,7 +109,7 @@ static struct skcipher_alg algs[] = {
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
- .setkey = crypto_chacha12_setkey,
+ .setkey = chacha12_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 927760b316a4..2c6649b10923 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -919,7 +919,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_BLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
return cryptd_create_skcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_HASH:
return cryptd_create_hash(tmpl, tb, &queue);
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 055d17977280..eb029ff1e05a 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -214,20 +214,6 @@ static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
}
/**
- * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
- * to list into the engine queue
- * @engine: the hardware engine
- * @req: the request need to be listed into the engine queue
- * TODO: Remove this function when skcipher conversion is finished
- */
-int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
-{
- return crypto_transfer_request_to_engine(engine, &req->base);
-}
-EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
-
-/**
* crypto_transfer_aead_request_to_engine - transfer one aead_request
* to list into the engine queue
* @engine: the hardware engine
@@ -280,21 +266,6 @@ int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
/**
- * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
- * the request is done
- * @engine: the hardware engine
- * @req: the request need to be finalized
- * @err: error number
- * TODO: Remove this function when skcipher conversion is finished
- */
-void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err)
-{
- return crypto_finalize_request(engine, &req->base, err);
-}
-EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
-
-/**
* crypto_finalize_aead_request - finalize one aead_request if
* the request is done
* @engine: the hardware engine
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index 910e0b46012e..b785c476de67 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -213,8 +213,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
drop_alg:
crypto_mod_put(alg);
- if (err)
+ if (err) {
+ kfree_skb(skb);
return err;
+ }
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
}
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 8bad88413de1..154884bf9275 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -213,10 +213,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_BLKCIPHER:
- if (crypto_report_cipher(skb, alg))
- goto nla_put_failure;
- break;
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
@@ -328,8 +324,10 @@ int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
drop_alg:
crypto_mod_put(alg);
- if (err)
+ if (err) {
+ kfree_skb(skb);
return err;
+ }
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
}
diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c
new file mode 100644
index 000000000000..bd88fd571393
--- /dev/null
+++ b/crypto/curve25519-generic.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <crypto/curve25519.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+
+static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ u8 *secret = kpp_tfm_ctx(tfm);
+
+ if (!len)
+ curve25519_generate_secret(secret);
+ else if (len == CURVE25519_KEY_SIZE &&
+ crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
+ memcpy(secret, buf, CURVE25519_KEY_SIZE);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int curve25519_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ const u8 *secret = kpp_tfm_ctx(tfm);
+ u8 public_key[CURVE25519_KEY_SIZE];
+ u8 buf[CURVE25519_KEY_SIZE];
+ int copied, nbytes;
+ u8 const *bp;
+
+ if (req->src) {
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ CURVE25519_KEY_SIZE),
+ public_key, CURVE25519_KEY_SIZE);
+ if (copied != CURVE25519_KEY_SIZE)
+ return -EINVAL;
+ bp = public_key;
+ } else {
+ bp = curve25519_base_point;
+ }
+
+ curve25519_generic(buf, secret, bp);
+
+ /* might want less than we've got */
+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
+ nbytes),
+ buf, nbytes);
+ if (copied != nbytes)
+ return -EINVAL;
+ return 0;
+}
+
+static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
+{
+ return CURVE25519_KEY_SIZE;
+}
+
+static struct kpp_alg curve25519_alg = {
+ .base.cra_name = "curve25519",
+ .base.cra_driver_name = "curve25519-generic",
+ .base.cra_priority = 100,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_ctxsize = CURVE25519_KEY_SIZE,
+
+ .set_secret = curve25519_set_secret,
+ .generate_public_key = curve25519_compute_value,
+ .compute_shared_secret = curve25519_compute_value,
+ .max_size = curve25519_max_size,
+};
+
+static int curve25519_init(void)
+{
+ return crypto_register_kpp(&curve25519_alg);
+}
+
+static void curve25519_exit(void)
+{
+ crypto_unregister_kpp(&curve25519_alg);
+}
+
+subsys_initcall(curve25519_init);
+module_exit(curve25519_exit);
+
+MODULE_ALIAS_CRYPTO("curve25519");
+MODULE_ALIAS_CRYPTO("curve25519-generic");
+MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index dfe114bc0c4a..02d35be7702b 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -336,7 +336,7 @@ static u64 vli_usub(u64 *result, const u64 *left, u64 right,
static uint128_t mul_64_64(u64 left, u64 right)
{
uint128_t result;
-#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
unsigned __int128 m = (unsigned __int128)left * right;
result.m_low = m;
@@ -1284,10 +1284,11 @@ EXPORT_SYMBOL(ecc_point_mult_shamir);
static inline void ecc_swap_digits(const u64 *in, u64 *out,
unsigned int ndigits)
{
+ const __be64 *src = (__force __be64 *)in;
int i;
for (i = 0; i < ndigits; i++)
- out[i] = __swab64(in[ndigits - 1 - i]);
+ out[i] = be64_to_cpu(src[ndigits - 1 - i]);
}
static int __ecc_is_key_valid(const struct ecc_curve *curve,
diff --git a/crypto/essiv.c b/crypto/essiv.c
index a8befc8fb06e..808f2b362106 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -188,8 +188,7 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err)
struct aead_request *req = areq->data;
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
- if (rctx->assoc)
- kfree(rctx->assoc);
+ kfree(rctx->assoc);
aead_request_complete(req, err);
}
@@ -486,7 +485,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
type = algt->type & algt->mask;
switch (type) {
- case CRYPTO_ALG_TYPE_BLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
skcipher_inst = kzalloc(sizeof(*skcipher_inst) +
sizeof(*ictx), GFP_KERNEL);
if (!skcipher_inst)
@@ -586,7 +585,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
base->cra_alignmask = block_base->cra_alignmask;
base->cra_priority = block_base->cra_priority;
- if (type == CRYPTO_ALG_TYPE_BLKCIPHER) {
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
skcipher_inst->alg.setkey = essiv_skcipher_setkey;
skcipher_inst->alg.encrypt = essiv_skcipher_encrypt;
skcipher_inst->alg.decrypt = essiv_skcipher_decrypt;
@@ -628,7 +627,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
out_free_hash:
crypto_mod_put(_hash_alg);
out_drop_skcipher:
- if (type == CRYPTO_ALG_TYPE_BLKCIPHER)
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
else
crypto_drop_aead(&ictx->u.aead_spawn);
diff --git a/crypto/geniv.c b/crypto/geniv.c
new file mode 100644
index 000000000000..b9e45a2a98b5
--- /dev/null
+++ b/crypto/geniv.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * geniv: Shared IV generator code
+ *
+ * This file provides common code to IV generators such as seqiv.
+ *
+ * Copyright (c) 2007-2019 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <crypto/internal/geniv.h>
+#include <crypto/internal/rng.h>
+#include <crypto/null.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+static int aead_geniv_setkey(struct crypto_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return crypto_aead_setkey(ctx->child, key, keylen);
+}
+
+static int aead_geniv_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask)
+{
+ const char *name;
+ struct crypto_aead_spawn *spawn;
+ struct crypto_attr_type *algt;
+ struct aead_instance *inst;
+ struct aead_alg *alg;
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return ERR_CAST(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(name))
+ return ERR_CAST(name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
+
+ spawn = aead_instance_ctx(inst);
+
+ /* Ignore async algorithms if necessary. */
+ mask |= crypto_requires_sync(algt->type, algt->mask);
+
+ crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
+ err = crypto_grab_aead(spawn, name, type, mask);
+ if (err)
+ goto err_free_inst;
+
+ alg = crypto_spawn_aead_alg(spawn);
+
+ ivsize = crypto_aead_alg_ivsize(alg);
+ maxauthsize = crypto_aead_alg_maxauthsize(alg);
+
+ err = -EINVAL;
+ if (ivsize < sizeof(u64))
+ goto err_drop_alg;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_alg;
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_alg;
+
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+
+ inst->alg.setkey = aead_geniv_setkey;
+ inst->alg.setauthsize = aead_geniv_setauthsize;
+
+ inst->alg.ivsize = ivsize;
+ inst->alg.maxauthsize = maxauthsize;
+
+out:
+ return inst;
+
+err_drop_alg:
+ crypto_drop_aead(spawn);
+err_free_inst:
+ kfree(inst);
+ inst = ERR_PTR(err);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(aead_geniv_alloc);
+
+void aead_geniv_free(struct aead_instance *inst)
+{
+ crypto_drop_aead(aead_instance_ctx(inst));
+ kfree(inst);
+}
+EXPORT_SYMBOL_GPL(aead_geniv_free);
+
+int aead_init_geniv(struct crypto_aead *aead)
+{
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
+ struct aead_instance *inst = aead_alg_instance(aead);
+ struct crypto_aead *child;
+ int err;
+
+ spin_lock_init(&ctx->lock);
+
+ err = crypto_get_default_rng();
+ if (err)
+ goto out;
+
+ err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+ crypto_aead_ivsize(aead));
+ crypto_put_default_rng();
+ if (err)
+ goto out;
+
+ ctx->sknull = crypto_get_default_null_skcipher();
+ err = PTR_ERR(ctx->sknull);
+ if (IS_ERR(ctx->sknull))
+ goto out;
+
+ child = crypto_spawn_aead(aead_instance_ctx(inst));
+ err = PTR_ERR(child);
+ if (IS_ERR(child))
+ goto drop_null;
+
+ ctx->child = child;
+ crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
+ sizeof(struct aead_request));
+
+ err = 0;
+
+out:
+ return err;
+
+drop_null:
+ crypto_put_default_null_skcipher();
+ goto out;
+}
+EXPORT_SYMBOL_GPL(aead_init_geniv);
+
+void aead_exit_geniv(struct crypto_aead *tfm)
+{
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->child);
+ crypto_put_default_null_skcipher();
+}
+EXPORT_SYMBOL_GPL(aead_exit_geniv);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Shared IV generator code");
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 701b8d86ab49..a5ce8f96790f 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -44,13 +44,7 @@
#include <linux/crypto.h>
#include <crypto/internal/rng.h>
-struct rand_data;
-int jent_read_entropy(struct rand_data *ec, unsigned char *data,
- unsigned int len);
-int jent_entropy_init(void);
-struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
- unsigned int flags);
-void jent_entropy_collector_free(struct rand_data *entropy_collector);
+#include "jitterentropy.h"
/***************************************************************************
* Helper function
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 77fa2120fe0c..042157f0d28b 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -103,12 +103,7 @@ struct rand_data {
* Helper functions
***************************************************************************/
-void jent_get_nstime(__u64 *out);
-void *jent_zalloc(unsigned int len);
-void jent_zfree(void *ptr);
-int jent_fips_enabled(void);
-void jent_panic(char *s);
-void jent_memcpy(void *dest, const void *src, unsigned int n);
+#include "jitterentropy.h"
/**
* Update of the loop count used for the next round of
@@ -172,7 +167,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
* implies that careful retesting must be done.
*
* Input:
- * @ec entropy collector struct -- may be NULL
+ * @ec entropy collector struct
* @time time stamp to be injected
* @loop_cnt if a value not equal to 0 is set, use the given value as number of
* loops to perform the folding
@@ -400,8 +395,8 @@ static void jent_gen_entropy(struct rand_data *ec)
* primes the test if needed.
*
* Return:
- * 0 if FIPS test passed
- * < 0 if FIPS test failed
+ * returns normally if FIPS test passed
+ * panics the kernel if FIPS test failed
*/
static void jent_fips_test(struct rand_data *ec)
{
diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
new file mode 100644
index 000000000000..c83fff32d130
--- /dev/null
+++ b/crypto/jitterentropy.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+extern void *jent_zalloc(unsigned int len);
+extern void jent_zfree(void *ptr);
+extern int jent_fips_enabled(void);
+extern void jent_panic(char *s);
+extern void jent_memcpy(void *dest, const void *src, unsigned int n);
+extern void jent_get_nstime(__u64 *out);
+
+struct rand_data;
+extern int jent_entropy_init(void);
+extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
+ unsigned int len);
+
+extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
+ unsigned int flags);
+extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index 9ab4e07cde4d..f6b6a52092b4 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -33,6 +33,7 @@
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
#include <crypto/nhpoly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
@@ -78,7 +79,7 @@ static void process_nh_hash_value(struct nhpoly1305_state *state,
BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash,
- NH_HASH_BYTES / POLY1305_BLOCK_SIZE);
+ NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1);
}
/*
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index adc40298c749..21edbd8c99fb 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -13,158 +13,26 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
-#include <crypto/poly1305.h>
+#include <crypto/internal/poly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
-static inline u64 mlt(u64 a, u64 b)
-{
- return a * b;
-}
-
-static inline u32 sr(u64 v, u_char n)
-{
- return v >> n;
-}
-
-static inline u32 and(u32 v, u32 mask)
-{
- return v & mask;
-}
-
-int crypto_poly1305_init(struct shash_desc *desc)
+static int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
poly1305_core_init(&dctx->h);
dctx->buflen = 0;
- dctx->rset = false;
+ dctx->rset = 0;
dctx->sset = false;
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_poly1305_init);
-
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key)
-{
- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff;
- key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03;
- key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff;
- key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff;
- key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff;
-}
-EXPORT_SYMBOL_GPL(poly1305_core_setkey);
-
-/*
- * Poly1305 requires a unique key for each tag, which implies that we can't set
- * it on the tfm that gets accessed by multiple users simultaneously. Instead we
- * expect the key as the first 32 bytes in the update() call.
- */
-unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(&dctx->r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = true;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
-EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
-
-static void poly1305_blocks_internal(struct poly1305_state *state,
- const struct poly1305_key *key,
- const void *src, unsigned int nblocks,
- u32 hibit)
-{
- u32 r0, r1, r2, r3, r4;
- u32 s1, s2, s3, s4;
- u32 h0, h1, h2, h3, h4;
- u64 d0, d1, d2, d3, d4;
-
- if (!nblocks)
- return;
-
- r0 = key->r[0];
- r1 = key->r[1];
- r2 = key->r[2];
- r3 = key->r[3];
- r4 = key->r[4];
-
- s1 = r1 * 5;
- s2 = r2 * 5;
- s3 = r3 * 5;
- s4 = r4 * 5;
-
- h0 = state->h[0];
- h1 = state->h[1];
- h2 = state->h[2];
- h3 = state->h[3];
- h4 = state->h[4];
-
- do {
- /* h += m[i] */
- h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
- h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
- h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
- h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
- h4 += (get_unaligned_le32(src + 12) >> 8) | hibit;
-
- /* h *= r */
- d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
- mlt(h3, s2) + mlt(h4, s1);
- d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) +
- mlt(h3, s3) + mlt(h4, s2);
- d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) +
- mlt(h3, s4) + mlt(h4, s3);
- d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) +
- mlt(h3, r0) + mlt(h4, s4);
- d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) +
- mlt(h3, r1) + mlt(h4, r0);
-
- /* (partial) h %= p */
- d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff);
- d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff);
- d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff);
- d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff);
- h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff);
- h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
-
- src += POLY1305_BLOCK_SIZE;
- } while (--nblocks);
-
- state->h[0] = h0;
- state->h[1] = h1;
- state->h[2] = h2;
- state->h[3] = h3;
- state->h[4] = h4;
-}
-
-void poly1305_core_blocks(struct poly1305_state *state,
- const struct poly1305_key *key,
- const void *src, unsigned int nblocks)
-{
- poly1305_blocks_internal(state, key, src, nblocks, 1 << 24);
-}
-EXPORT_SYMBOL_GPL(poly1305_core_blocks);
-static void poly1305_blocks(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen, u32 hibit)
+static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
+ unsigned int srclen)
{
unsigned int datalen;
@@ -174,12 +42,12 @@ static void poly1305_blocks(struct poly1305_desc_ctx *dctx,
srclen = datalen;
}
- poly1305_blocks_internal(&dctx->h, &dctx->r,
- src, srclen / POLY1305_BLOCK_SIZE, hibit);
+ poly1305_core_blocks(&dctx->h, dctx->r, src,
+ srclen / POLY1305_BLOCK_SIZE, 1);
}
-int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
+static int crypto_poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int bytes;
@@ -193,13 +61,13 @@ int crypto_poly1305_update(struct shash_desc *desc,
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE, 1 << 24);
+ POLY1305_BLOCK_SIZE);
dctx->buflen = 0;
}
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- poly1305_blocks(dctx, src, srclen, 1 << 24);
+ poly1305_blocks(dctx, src, srclen);
src += srclen - (srclen % POLY1305_BLOCK_SIZE);
srclen %= POLY1305_BLOCK_SIZE;
}
@@ -211,87 +79,17 @@ int crypto_poly1305_update(struct shash_desc *desc,
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_poly1305_update);
-
-void poly1305_core_emit(const struct poly1305_state *state, void *dst)
-{
- u32 h0, h1, h2, h3, h4;
- u32 g0, g1, g2, g3, g4;
- u32 mask;
-
- /* fully carry h */
- h0 = state->h[0];
- h1 = state->h[1];
- h2 = state->h[2];
- h3 = state->h[3];
- h4 = state->h[4];
-
- h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
- h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
- h4 += (h3 >> 26); h3 = h3 & 0x3ffffff;
- h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff;
- h1 += (h0 >> 26); h0 = h0 & 0x3ffffff;
-
- /* compute h + -p */
- g0 = h0 + 5;
- g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff;
- g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff;
- g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff;
- g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff;
-
- /* select h if h < p, or h + -p if h >= p */
- mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
- g0 &= mask;
- g1 &= mask;
- g2 &= mask;
- g3 &= mask;
- g4 &= mask;
- mask = ~mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
- h2 = (h2 & mask) | g2;
- h3 = (h3 & mask) | g3;
- h4 = (h4 & mask) | g4;
-
- /* h = h % (2^128) */
- put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0);
- put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4);
- put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8);
- put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12);
-}
-EXPORT_SYMBOL_GPL(poly1305_core_emit);
-int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
+static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- __le32 digest[4];
- u64 f = 0;
if (unlikely(!dctx->sset))
return -ENOKEY;
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_core_emit(&dctx->h, digest);
-
- /* mac = (h + s) % (2^128) */
- f = (f >> 32) + le32_to_cpu(digest[0]) + dctx->s[0];
- put_unaligned_le32(f, dst + 0);
- f = (f >> 32) + le32_to_cpu(digest[1]) + dctx->s[1];
- put_unaligned_le32(f, dst + 4);
- f = (f >> 32) + le32_to_cpu(digest[2]) + dctx->s[2];
- put_unaligned_le32(f, dst + 8);
- f = (f >> 32) + le32_to_cpu(digest[3]) + dctx->s[3];
- put_unaligned_le32(f, dst + 12);
-
+ poly1305_final_generic(dctx, dst);
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_poly1305_final);
static struct shash_alg poly1305_alg = {
.digestsize = POLY1305_DIGEST_SIZE,
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 22753c1c7202..13da43c84b64 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -580,12 +580,6 @@ EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
{
- if (alg->cra_type == &crypto_blkcipher_type)
- return sizeof(struct crypto_blkcipher *);
-
- if (alg->cra_type == &crypto_ablkcipher_type)
- return sizeof(struct crypto_ablkcipher *);
-
return crypto_alg_extsize(alg);
}
@@ -595,205 +589,6 @@ static void skcipher_set_needkey(struct crypto_skcipher *tfm)
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
-static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
- struct crypto_blkcipher *blkcipher = *ctx;
- int err;
-
- crypto_blkcipher_clear_flags(blkcipher, ~0);
- crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_blkcipher_setkey(blkcipher, key, keylen);
- crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
- CRYPTO_TFM_RES_MASK);
- if (unlikely(err)) {
- skcipher_set_needkey(tfm);
- return err;
- }
-
- crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
- return 0;
-}
-
-static int skcipher_crypt_blkcipher(struct skcipher_request *req,
- int (*crypt)(struct blkcipher_desc *,
- struct scatterlist *,
- struct scatterlist *,
- unsigned int))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
- struct blkcipher_desc desc = {
- .tfm = *ctx,
- .info = req->iv,
- .flags = req->base.flags,
- };
-
-
- return crypt(&desc, req->dst, req->src, req->cryptlen);
-}
-
-static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
-{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
-
- return skcipher_crypt_blkcipher(req, alg->encrypt);
-}
-
-static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
-{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
-
- return skcipher_crypt_blkcipher(req, alg->decrypt);
-}
-
-static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
-{
- struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(*ctx);
-}
-
-static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
-{
- struct crypto_alg *calg = tfm->__crt_alg;
- struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
- struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
- struct crypto_blkcipher *blkcipher;
- struct crypto_tfm *btfm;
-
- if (!crypto_mod_get(calg))
- return -EAGAIN;
-
- btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(btfm)) {
- crypto_mod_put(calg);
- return PTR_ERR(btfm);
- }
-
- blkcipher = __crypto_blkcipher_cast(btfm);
- *ctx = blkcipher;
- tfm->exit = crypto_exit_skcipher_ops_blkcipher;
-
- skcipher->setkey = skcipher_setkey_blkcipher;
- skcipher->encrypt = skcipher_encrypt_blkcipher;
- skcipher->decrypt = skcipher_decrypt_blkcipher;
-
- skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
- skcipher->keysize = calg->cra_blkcipher.max_keysize;
-
- skcipher_set_needkey(skcipher);
-
- return 0;
-}
-
-static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
- struct crypto_ablkcipher *ablkcipher = *ctx;
- int err;
-
- crypto_ablkcipher_clear_flags(ablkcipher, ~0);
- crypto_ablkcipher_set_flags(ablkcipher,
- crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
- crypto_skcipher_set_flags(tfm,
- crypto_ablkcipher_get_flags(ablkcipher) &
- CRYPTO_TFM_RES_MASK);
- if (unlikely(err)) {
- skcipher_set_needkey(tfm);
- return err;
- }
-
- crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
- return 0;
-}
-
-static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
- int (*crypt)(struct ablkcipher_request *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
- struct ablkcipher_request *subreq = skcipher_request_ctx(req);
-
- ablkcipher_request_set_tfm(subreq, *ctx);
- ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
- req->base.complete, req->base.data);
- ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
-
- return crypt(subreq);
-}
-
-static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
-{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
-
- return skcipher_crypt_ablkcipher(req, alg->encrypt);
-}
-
-static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
-{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
-
- return skcipher_crypt_ablkcipher(req, alg->decrypt);
-}
-
-static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
-{
- struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_ablkcipher(*ctx);
-}
-
-static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
-{
- struct crypto_alg *calg = tfm->__crt_alg;
- struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
- struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
- struct crypto_ablkcipher *ablkcipher;
- struct crypto_tfm *abtfm;
-
- if (!crypto_mod_get(calg))
- return -EAGAIN;
-
- abtfm = __crypto_alloc_tfm(calg, 0, 0);
- if (IS_ERR(abtfm)) {
- crypto_mod_put(calg);
- return PTR_ERR(abtfm);
- }
-
- ablkcipher = __crypto_ablkcipher_cast(abtfm);
- *ctx = ablkcipher;
- tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
-
- skcipher->setkey = skcipher_setkey_ablkcipher;
- skcipher->encrypt = skcipher_encrypt_ablkcipher;
- skcipher->decrypt = skcipher_decrypt_ablkcipher;
-
- skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
- sizeof(struct ablkcipher_request);
- skcipher->keysize = calg->cra_ablkcipher.max_keysize;
-
- skcipher_set_needkey(skcipher);
-
- return 0;
-}
-
static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -888,12 +683,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
- if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
- return crypto_init_skcipher_ops_blkcipher(tfm);
-
- if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
- return crypto_init_skcipher_ops_ablkcipher(tfm);
-
skcipher->setkey = skcipher_setkey;
skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt;
@@ -964,7 +753,7 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
}
#endif
-static const struct crypto_type crypto_skcipher_type2 = {
+static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.free = crypto_skcipher_free_instance,
@@ -973,7 +762,7 @@ static const struct crypto_type crypto_skcipher_type2 = {
#endif
.report = crypto_skcipher_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
};
@@ -981,7 +770,7 @@ static const struct crypto_type crypto_skcipher_type2 = {
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
const char *name, u32 type, u32 mask)
{
- spawn->base.frontend = &crypto_skcipher_type2;
+ spawn->base.frontend = &crypto_skcipher_type;
return crypto_grab_spawn(&spawn->base, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
@@ -989,7 +778,7 @@ EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
- return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+ return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
@@ -1001,7 +790,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
/* Only sync algorithms allowed. */
mask |= CRYPTO_ALG_ASYNC;
- tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+ tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
/*
* Make sure we do not allocate something that might get used with
@@ -1017,12 +806,11 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
}
EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
-int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
+int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
{
- return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
- type, mask);
+ return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
+EXPORT_SYMBOL_GPL(crypto_has_skcipher);
static int skcipher_prepare_alg(struct skcipher_alg *alg)
{
@@ -1037,7 +825,7 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
if (!alg->walksize)
alg->walksize = alg->chunksize;
- base->cra_type = &crypto_skcipher_type2;
+ base->cra_type = &crypto_skcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 83ad0b1fab30..f42f486e90e8 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -65,7 +65,7 @@ static int mode;
static u32 num_mb = 8;
static char *tvmem[TVMEMSIZE];
-static char *check[] = {
+static const char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
@@ -1634,7 +1634,7 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
static void test_available(void)
{
- char **name = check;
+ const char **name = check;
while (*name) {
printk("alg %s ", *name);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c39e39e55dc2..82513b6b0abd 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4023,6 +4023,58 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "blake2b-160",
+ .test = alg_test_hash,
+ .fips_allowed = 0,
+ .suite = {
+ .hash = __VECS(blake2b_160_tv_template)
+ }
+ }, {
+ .alg = "blake2b-256",
+ .test = alg_test_hash,
+ .fips_allowed = 0,
+ .suite = {
+ .hash = __VECS(blake2b_256_tv_template)
+ }
+ }, {
+ .alg = "blake2b-384",
+ .test = alg_test_hash,
+ .fips_allowed = 0,
+ .suite = {
+ .hash = __VECS(blake2b_384_tv_template)
+ }
+ }, {
+ .alg = "blake2b-512",
+ .test = alg_test_hash,
+ .fips_allowed = 0,
+ .suite = {
+ .hash = __VECS(blake2b_512_tv_template)
+ }
+ }, {
+ .alg = "blake2s-128",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(blakes2s_128_tv_template)
+ }
+ }, {
+ .alg = "blake2s-160",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(blakes2s_160_tv_template)
+ }
+ }, {
+ .alg = "blake2s-224",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(blakes2s_224_tv_template)
+ }
+ }, {
+ .alg = "blake2s-256",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(blakes2s_256_tv_template)
+ }
+ }, {
.alg = "cbc(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -4126,6 +4178,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(aes_cfb_tv_template)
},
}, {
+ .alg = "cfb(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_cfb_tv_template)
+ }
+ }, {
.alg = "chacha20",
.test = alg_test_skcipher,
.suite = {
@@ -4260,6 +4318,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "curve25519",
+ .test = alg_test_kpp,
+ .suite = {
+ .kpp = __VECS(curve25519_tv_template)
+ }
+ }, {
.alg = "deflate",
.test = alg_test_comp,
.fips_allowed = 1,
@@ -4655,6 +4719,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(hmac_sha512_tv_template)
}
}, {
+ .alg = "hmac(sm3)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(hmac_sm3_tv_template)
+ }
+ }, {
.alg = "hmac(streebog256)",
.test = alg_test_hash,
.suite = {
@@ -4791,6 +4861,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "ofb(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_ofb_tv_template)
+ }
+ }, {
.alg = "pcbc(fcrypt)",
.test = alg_test_skcipher,
.suite = {
@@ -4829,6 +4905,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(aes_ctr_rfc3686_tv_template)
}
}, {
+ .alg = "rfc3686(ctr(sm4))",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_ctr_rfc3686_tv_template)
+ }
+ }, {
.alg = "rfc4106(gcm(aes))",
.generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index ef7d21f39d4a..48da646651cb 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1030,6 +1030,1231 @@ static const struct kpp_testvec dh_tv_template[] = {
}
};
+static const struct kpp_testvec curve25519_tv_template[] = {
+{
+ .secret = (u8[32]){ 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
+ 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45,
+ 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a,
+ 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a },
+ .b_public = (u8[32]){ 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4,
+ 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37,
+ 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d,
+ 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f },
+ .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+{
+ .secret = (u8[32]){ 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b,
+ 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6,
+ 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd,
+ 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb },
+ .b_public = (u8[32]){ 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54,
+ 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a,
+ 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4,
+ 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a },
+ .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+{
+ .secret = (u8[32]){ 1 },
+ .b_public = (u8[32]){ 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .expected_ss = (u8[32]){ 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64,
+ 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d,
+ 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98,
+ 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+{
+ .secret = (u8[32]){ 1 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f,
+ 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d,
+ 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3,
+ 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+{
+ .secret = (u8[32]){ 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 },
+ .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+{
+ .secret = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f },
+ .expected_ss = (u8[32]){ 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2,
+ 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57,
+ 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05,
+ 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+{
+ .secret = (u8[32]){ 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 },
+ .expected_ss = (u8[32]){ 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d,
+ 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12,
+ 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99,
+ 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - normal case */
+{
+ .secret = (u8[32]){ 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda,
+ 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66,
+ 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3,
+ 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba },
+ .b_public = (u8[32]){ 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5,
+ 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9,
+ 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e,
+ 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a },
+ .expected_ss = (u8[32]){ 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5,
+ 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38,
+ 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e,
+ 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key on twist */
+{
+ .secret = (u8[32]){ 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4,
+ 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5,
+ 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49,
+ 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 },
+ .b_public = (u8[32]){ 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5,
+ 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8,
+ 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3,
+ 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 },
+ .expected_ss = (u8[32]){ 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff,
+ 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d,
+ 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe,
+ 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key on twist */
+{
+ .secret = (u8[32]){ 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9,
+ 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39,
+ 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5,
+ 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 },
+ .b_public = (u8[32]){ 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f,
+ 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b,
+ 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c,
+ 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 },
+ .expected_ss = (u8[32]){ 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53,
+ 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57,
+ 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0,
+ 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key on twist */
+{
+ .secret = (u8[32]){ 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc,
+ 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d,
+ 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67,
+ 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c },
+ .b_public = (u8[32]){ 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97,
+ 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f,
+ 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45,
+ 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a },
+ .expected_ss = (u8[32]){ 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93,
+ 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2,
+ 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44,
+ 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key on twist */
+{
+ .secret = (u8[32]){ 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1,
+ 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95,
+ 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99,
+ 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d },
+ .b_public = (u8[32]){ 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27,
+ 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07,
+ 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae,
+ 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c },
+ .expected_ss = (u8[32]){ 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73,
+ 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2,
+ 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f,
+ 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key on twist */
+{
+ .secret = (u8[32]){ 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9,
+ 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd,
+ 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b,
+ 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 },
+ .b_public = (u8[32]){ 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5,
+ 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52,
+ 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8,
+ 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 },
+ .expected_ss = (u8[32]){ 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86,
+ 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4,
+ 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6,
+ 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case on twist */
+{
+ .secret = (u8[32]){ 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04,
+ 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77,
+ 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90,
+ 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 },
+ .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .expected_ss = (u8[32]){ 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97,
+ 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9,
+ 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7,
+ 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case on twist */
+{
+ .secret = (u8[32]){ 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36,
+ 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd,
+ 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c,
+ 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 },
+ .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .expected_ss = (u8[32]){ 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e,
+ 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b,
+ 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e,
+ 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case on twist */
+{
+ .secret = (u8[32]){ 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed,
+ 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e,
+ 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd,
+ 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 },
+ .expected_ss = (u8[32]){ 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f,
+ 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1,
+ 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10,
+ 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case on twist */
+{
+ .secret = (u8[32]){ 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3,
+ 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d,
+ 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00,
+ 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 },
+ .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8,
+ 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4,
+ 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70,
+ 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case on twist */
+{
+ .secret = (u8[32]){ 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3,
+ 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a,
+ 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e,
+ 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 },
+ .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57,
+ 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c,
+ 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59,
+ 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case on twist */
+{
+ .secret = (u8[32]){ 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f,
+ 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42,
+ 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9,
+ 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 },
+ .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c,
+ 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5,
+ 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65,
+ 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6,
+ 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4,
+ 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8,
+ 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe },
+ .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .expected_ss = (u8[32]){ 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7,
+ 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca,
+ 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f,
+ 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa,
+ 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3,
+ 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52,
+ 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
+ .expected_ss = (u8[32]){ 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3,
+ 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e,
+ 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75,
+ 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26,
+ 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea,
+ 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00,
+ 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .expected_ss = (u8[32]){ 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8,
+ 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32,
+ 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87,
+ 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c,
+ 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6,
+ 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb,
+ 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff,
+ 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff,
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f },
+ .expected_ss = (u8[32]){ 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85,
+ 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f,
+ 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0,
+ 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38,
+ 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b,
+ 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c,
+ 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .expected_ss = (u8[32]){ 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b,
+ 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81,
+ 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3,
+ 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d,
+ 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42,
+ 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98,
+ 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c,
+ 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9,
+ 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89,
+ 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for public key */
+{
+ .secret = (u8[32]){ 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29,
+ 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6,
+ 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c,
+ 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f },
+ .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75,
+ 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89,
+ 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c,
+ 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc,
+ 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1,
+ 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d,
+ 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae },
+ .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09,
+ 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde,
+ 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1,
+ 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81,
+ 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a,
+ 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99,
+ 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d },
+ .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17,
+ 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35,
+ 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55,
+ 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11,
+ 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b,
+ 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9,
+ 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 },
+ .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53,
+ 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e,
+ 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6,
+ 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78,
+ 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2,
+ 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd,
+ 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .expected_ss = (u8[32]){ 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb,
+ 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40,
+ 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2,
+ 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9,
+ 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60,
+ 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13,
+ 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 },
+ .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .expected_ss = (u8[32]){ 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c,
+ 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3,
+ 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65,
+ 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a,
+ 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7,
+ 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11,
+ 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e },
+ .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .expected_ss = (u8[32]){ 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82,
+ 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4,
+ 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c,
+ 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e,
+ 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a,
+ 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d,
+ 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f },
+ .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .expected_ss = (u8[32]){ 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2,
+ 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60,
+ 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25,
+ 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb,
+ 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97,
+ 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c,
+ 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 },
+ .b_public = (u8[32]){ 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23,
+ 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8,
+ 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69,
+ 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a,
+ 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23,
+ 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b,
+ 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 },
+ .b_public = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b,
+ 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44,
+ 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37,
+ 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80,
+ 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d,
+ 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b,
+ 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 },
+ .b_public = (u8[32]){ 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63,
+ 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae,
+ 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f,
+ 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0,
+ 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd,
+ 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49,
+ 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 },
+ .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41,
+ 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0,
+ 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf,
+ 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9,
+ 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa,
+ 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5,
+ 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e },
+ .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47,
+ 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3,
+ 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b,
+ 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8,
+ 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98,
+ 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0,
+ 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 },
+ .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0,
+ 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1,
+ 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a,
+ 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02,
+ 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4,
+ 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68,
+ 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d },
+ .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f,
+ 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2,
+ 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95,
+ 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7,
+ 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06,
+ 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9,
+ 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 },
+ .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5,
+ 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0,
+ 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80,
+ 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - public key >= p */
+{
+ .secret = (u8[32]){ 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd,
+ 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4,
+ 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04,
+ 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 },
+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .expected_ss = (u8[32]){ 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0,
+ 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac,
+ 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48,
+ 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - RFC 7748 */
+{
+ .secret = (u8[32]){ 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 },
+ .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - RFC 7748 */
+{
+ .secret = (u8[32]){ 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c,
+ 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5,
+ 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4,
+ 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d },
+ .b_public = (u8[32]){ 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3,
+ 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c,
+ 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e,
+ 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 },
+ .expected_ss = (u8[32]){ 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d,
+ 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8,
+ 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52,
+ 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde,
+ 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8,
+ 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4,
+ 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 },
+ .expected_ss = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d,
+ 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64,
+ 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd,
+ 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 },
+ .expected_ss = (u8[32]){ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8,
+ 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf,
+ 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94,
+ 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d },
+ .expected_ss = (u8[32]){ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84,
+ 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62,
+ 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e,
+ 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 },
+ .expected_ss = (u8[32]){ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8,
+ 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58,
+ 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02,
+ 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 },
+ .expected_ss = (u8[32]){ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9,
+ 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a,
+ 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44,
+ 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b },
+ .expected_ss = (u8[32]){ 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd,
+ 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22,
+ 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56,
+ 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b },
+ .expected_ss = (u8[32]){ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53,
+ 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f,
+ 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18,
+ 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f },
+ .expected_ss = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55,
+ 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b,
+ 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79,
+ 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f },
+ .expected_ss = (u8[32]){ 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39,
+ 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c,
+ 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb,
+ 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e },
+ .expected_ss = (u8[32]){ 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04,
+ 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10,
+ 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58,
+ 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c },
+ .expected_ss = (u8[32]){ 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3,
+ 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c,
+ 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88,
+ 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 },
+ .expected_ss = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a,
+ 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49,
+ 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a,
+ 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f },
+ .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - edge case for shared secret */
+{
+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .b_public = (u8[32]){ 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca,
+ 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c,
+ 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb,
+ 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 },
+ .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - checking for overflow */
+{
+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .b_public = (u8[32]){ 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58,
+ 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7,
+ 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01,
+ 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d },
+ .expected_ss = (u8[32]){ 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d,
+ 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27,
+ 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b,
+ 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - checking for overflow */
+{
+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .b_public = (u8[32]){ 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26,
+ 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2,
+ 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44,
+ 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e },
+ .expected_ss = (u8[32]){ 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6,
+ 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d,
+ 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e,
+ 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - checking for overflow */
+{
+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .b_public = (u8[32]){ 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61,
+ 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67,
+ 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e,
+ 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c },
+ .expected_ss = (u8[32]){ 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65,
+ 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce,
+ 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0,
+ 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - checking for overflow */
+{
+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .b_public = (u8[32]){ 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee,
+ 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d,
+ 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14,
+ 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 },
+ .expected_ss = (u8[32]){ 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e,
+ 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc,
+ 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5,
+ 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - checking for overflow */
+{
+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .b_public = (u8[32]){ 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4,
+ 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5,
+ 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c,
+ 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 },
+ .expected_ss = (u8[32]){ 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b,
+ 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93,
+ 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f,
+ 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - private key == -1 (mod order) */
+{
+ .secret = (u8[32]){ 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8,
+ 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 },
+ .b_public = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .expected_ss = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+},
+/* wycheproof - private key == 1 (mod order) on twist */
+{
+ .secret = (u8[32]){ 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef,
+ 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f },
+ .b_public = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .expected_ss = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .secret_size = 32,
+ .b_public_size = 32,
+ .expected_ss_size = 32,
+
+}
+};
+
static const struct kpp_testvec ecdh_tv_template[] = {
{
#ifndef CONFIG_CRYPTO_FIPS
@@ -2628,6 +3853,62 @@ static const struct hash_testvec sm3_tv_template[] = {
}
};
+/* Example vectors below taken from
+ * GM/T 0042-2015 Appendix D.3
+ */
+static const struct hash_testvec hmac_sm3_tv_template[] = {
+ {
+ .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
+ "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
+ "\x11\x12\x13\x14\x15\x16\x17\x18"
+ "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
+ .ksize = 32,
+ .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+ .psize = 112,
+ .digest = "\xca\x05\xe1\x44\xed\x05\xd1\x85"
+ "\x78\x40\xd1\xf3\x18\xa4\xa8\x66"
+ "\x9e\x55\x9f\xc8\x39\x1f\x41\x44"
+ "\x85\xbf\xdf\x7b\xb4\x08\x96\x3a",
+ }, {
+ .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
+ "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
+ "\x11\x12\x13\x14\x15\x16\x17\x18"
+ "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
+ "\x21\x22\x23\x24\x25",
+ .ksize = 37,
+ .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+ "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+ "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+ "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+ .psize = 50,
+ .digest = "\x22\x0b\xf5\x79\xde\xd5\x55\x39"
+ "\x3f\x01\x59\xf6\x6c\x99\x87\x78"
+ "\x22\xa3\xec\xf6\x10\xd1\x55\x21"
+ "\x54\xb4\x1d\x44\xb9\x4d\xb3\xae",
+ }, {
+ .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ksize = 32,
+ .plaintext = "Hi There",
+ .psize = 8,
+ .digest = "\xc0\xba\x18\xc6\x8b\x90\xc8\x8b"
+ "\xc0\x7d\xe7\x94\xbf\xc7\xd2\xc8"
+ "\xd1\x9e\xc3\x1e\xd8\x77\x3b\xc2"
+ "\xb3\x90\xc9\x60\x4e\x0b\xe1\x1e",
+ }, {
+ .key = "Jefe",
+ .ksize = 4,
+ .plaintext = "what do ya want for nothing?",
+ .psize = 28,
+ .digest = "\x2e\x87\xf1\xd1\x68\x62\xe6\xd9"
+ "\x64\xb5\x0a\x52\x00\xbf\x2b\x10"
+ "\xb7\x64\xfa\xa9\x68\x0a\x29\x6a"
+ "\x24\x05\xf2\x4b\xec\x39\xf8\x82",
+ },
+};
+
/*
* SHA1 test vectors from from FIPS PUB 180-1
* Long vector from CAVS 5.0
@@ -11846,6 +13127,133 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = {
}
};
+static const struct cipher_testvec sm4_ctr_rfc3686_tv_template[] = {
+ {
+ .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "Single block msg",
+ .ctext = "\x20\x9b\x77\x31\xd3\x65\xdb\xab"
+ "\x9e\x48\x74\x7e\xbd\x13\x83\xeb",
+ .len = 16,
+ }, {
+ .key = "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .ctext = "\x33\xe0\x28\x01\x92\xed\xc9\x1e"
+ "\x97\x35\xd9\x4a\xec\xd4\xbc\x23"
+ "\x4f\x35\x9f\x1c\x55\x1f\xe0\x27"
+ "\xe0\xdf\xc5\x43\xbc\xb0\x23\x94",
+ .len = 32,
+ }
+};
+
+static const struct cipher_testvec sm4_ofb_tv_template[] = {
+ { /* From: draft-ribose-cfrg-sm4-02, paragraph 12.2.3 */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .iv = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .ctext = "\x69\x3d\x9a\x53\x5b\xad\x5b\xb1"
+ "\x78\x6f\x53\xd7\x25\x3a\x70\x56"
+ "\xf2\x07\x5d\x28\xb5\x23\x5f\x58"
+ "\xd5\x00\x27\xe4\x17\x7d\x2b\xce",
+ .len = 32,
+ }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.3, Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\xac\x32\x36\xcb\x86\x1d\xd3\x16"
+ "\xe6\x41\x3b\x4e\x3c\x75\x24\xb7"
+ "\x1d\x01\xac\xa2\x48\x7c\xa5\x82"
+ "\xcb\xf5\x46\x3e\x66\x98\x53\x9b",
+ .len = 32,
+ }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.3, Example 2 */
+ .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\x5d\xcc\xcd\x25\xa8\x4b\xa1\x65"
+ "\x60\xd7\xf2\x65\x88\x70\x68\x49"
+ "\x33\xfa\x16\xbd\x5c\xd9\xc8\x56"
+ "\xca\xca\xa1\xe1\x01\x89\x7a\x97",
+ .len = 32,
+ }
+};
+
+static const struct cipher_testvec sm4_cfb_tv_template[] = {
+ { /* From: draft-ribose-cfrg-sm4-02, paragraph 12.2.4 */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .iv = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .ctext = "\x69\x3d\x9a\x53\x5b\xad\x5b\xb1"
+ "\x78\x6f\x53\xd7\x25\x3a\x70\x56"
+ "\x9e\xd2\x58\xa8\x5a\x04\x67\xcc"
+ "\x92\xaa\xb3\x93\xdd\x97\x89\x95",
+ .len = 32,
+ }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.4, Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\xac\x32\x36\xcb\x86\x1d\xd3\x16"
+ "\xe6\x41\x3b\x4e\x3c\x75\x24\xb7"
+ "\x69\xd4\xc5\x4e\xd4\x33\xb9\xa0"
+ "\x34\x60\x09\xbe\xb3\x7b\x2b\x3f",
+ .len = 32,
+ }, { /* From: draft-ribose-cfrg-sm4-09, appendix A.2.4, Example 2 */
+ .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\x5d\xcc\xcd\x25\xa8\x4b\xa1\x65"
+ "\x60\xd7\xf2\x65\x88\x70\x68\x49"
+ "\x0d\x9b\x86\xff\x20\xc3\xbf\xe1"
+ "\x15\xff\xa0\x2c\xa6\x19\x2c\xc5",
+ .len = 32,
+ }
+};
+
/* Cast6 test vectors from RFC 2612 */
static const struct cipher_testvec cast6_tv_template[] = {
{
@@ -17043,6 +18451,198 @@ static const struct aead_testvec aes_gcm_tv_template[] = {
"\x25\x19\x49\x8e\x80\xf1\x47\x8f"
"\x37\xba\x55\xbd\x6d\x27\x61\x8c",
.clen = 76,
+ }, {
+ .key = "\x62\x35\xf8\x95\xfc\xa5\xeb\xf6"
+ "\x0e\x92\x12\x04\xd3\xa1\x3f\x2e"
+ "\x8b\x32\xcf\xe7\x44\xed\x13\x59"
+ "\x04\x38\x77\xb0\xb9\xad\xb4\x38",
+ .klen = 32,
+ .iv = "\x00\xff\xff\xff\xff\x00\x00\xff"
+ "\xff\xff\x00\xff",
+ .ptext = "\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
+ "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
+ "\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
+ "\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
+ "\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
+ "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
+ "\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
+ "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
+ "\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
+ "\x00\xa4\x43\x54\x04\x54\x9b\x3b"
+ "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
+ "\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
+ "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
+ "\x35\x23\xf4\x20\x41\xd4\xad\x82"
+ "\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
+ "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
+ "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
+ "\xad\x49\x3a\xae\x98\xce\xa6\x66"
+ "\x10\x30\x90\x8c\x55\x83\xd7\x7c"
+ "\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
+ "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
+ "\x57\xcc\x89\x09\x75\x9b\x78\x70"
+ "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
+ "\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
+ "\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
+ "\x82\xd9\xec\xa2\x87\x68\x55\xf9"
+ "\x8f\x9e\x73\x43\x47\x6a\x08\x36"
+ "\x93\x67\xa8\x2d\xde\xac\x41\xa9"
+ "\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
+ "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
+ "\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
+ "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
+ "\xab\x19\x37\xd9\xba\x76\x5e\xd2"
+ "\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
+ "\x02\x66\x49\xca\x7c\x91\x05\xf2"
+ "\x45\x36\x1e\xf5\x77\xad\x1f\x46"
+ "\xa8\x13\xfb\x63\xb6\x08\x99\x63"
+ "\x82\xa2\xed\xb3\xac\xdf\x43\x19"
+ "\x45\xea\x78\x73\xd9\xb7\x39\x11"
+ "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
+ "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
+ "\xa4\x47\x7d\x80\x20\x26\xfd\x63"
+ "\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
+ "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
+ "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
+ "\x54\x03\xa4\x09\x0c\x37\x7a\x15"
+ "\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
+ "\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
+ "\x03\x25\x3c\x8d\x48\x58\x71\x34"
+ "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
+ "\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
+ "\x80\x6f\x96\x0e\x05\x62\xc7\x78"
+ "\x87\x79\x60\x38\x46\xb4\x25\x57"
+ "\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
+ "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
+ "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
+ "\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
+ "\x09\xfc\x91\x96\x47\x87\x4f\x1a"
+ "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
+ "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
+ "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
+ "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
+ "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
+ "\x0b\x63\xde\x87\x42\x79\x8a\x68"
+ "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
+ "\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
+ "\xe9\x83\x84\xcb\x28\x69\x09\x69"
+ "\xce\x99\x46\x00\x54\xcb\xd8\x38"
+ "\xf9\x53\x4a\xbf\x31\xce\x57\x15"
+ "\x33\xfa\x96\x04\x33\x42\xe3\xc0"
+ "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
+ "\x19\x95\xd0\x0e\x82\x07\x63\xf9"
+ "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
+ "\xb5\x9f\x23\x28\x60\xe7\x20\x51"
+ "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
+ "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
+ "\x78\xc6\x91\x22\x40\x91\x80\xbe"
+ "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
+ "\x67\x10\xa4\x83\x98\x79\x23\xe7"
+ "\x92\xda\xa9\x22\x16\xb1\xe7\x78"
+ "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
+ "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
+ "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
+ "\x48\x11\x06\xbb\x2d\xf2\x63\x88"
+ "\x3f\x73\x09\xe2\x45\x56\x31\x51"
+ "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
+ "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
+ "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
+ "\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
+ "\xa4\x78\xdb\x74\x3d\x8b\xb5",
+ .plen = 719,
+ .ctext = "\x84\x0b\xdb\xd5\xb7\xa8\xfe\x20"
+ "\xbb\xb1\x12\x7f\x41\xea\xb3\xc0"
+ "\xa2\xb4\x37\x19\x11\x58\xb6\x0b"
+ "\x4c\x1d\x38\x05\x54\xd1\x16\x73"
+ "\x8e\x1c\x20\x90\xa2\x9a\xb7\x74"
+ "\x47\xe6\xd8\xfc\x18\x3a\xb4\xea"
+ "\xd5\x16\x5a\x2c\x53\x01\x46\xb3"
+ "\x18\x33\x74\x6c\x50\xf2\xe8\xc0"
+ "\x73\xda\x60\x22\xeb\xe3\xe5\x9b"
+ "\x20\x93\x6c\x4b\x37\x99\xb8\x23"
+ "\x3b\x4e\xac\xe8\x5b\xe8\x0f\xb7"
+ "\xc3\x8f\xfb\x4a\x37\xd9\x39\x95"
+ "\x34\xf1\xdb\x8f\x71\xd9\xc7\x0b"
+ "\x02\xf1\x63\xfc\x9b\xfc\xc5\xab"
+ "\xb9\x14\x13\x21\xdf\xce\xaa\x88"
+ "\x44\x30\x1e\xce\x26\x01\x92\xf8"
+ "\x9f\x00\x4b\x0c\x4b\xf7\x5f\xe0"
+ "\x89\xca\x94\x66\x11\x21\x97\xca"
+ "\x3e\x83\x74\x2d\xdb\x4d\x11\xeb"
+ "\x97\xc2\x14\xff\x9e\x1e\xa0\x6b"
+ "\x08\xb4\x31\x2b\x85\xc6\x85\x6c"
+ "\x90\xec\x39\xc0\xec\xb3\xb5\x4e"
+ "\xf3\x9c\xe7\x83\x3a\x77\x0a\xf4"
+ "\x56\xfe\xce\x18\x33\x6d\x0b\x2d"
+ "\x33\xda\xc8\x05\x5c\xb4\x09\x2a"
+ "\xde\x6b\x52\x98\x01\xef\x36\x3d"
+ "\xbd\xf9\x8f\xa8\x3e\xaa\xcd\xd1"
+ "\x01\x2d\x42\x49\xc3\xb6\x84\xbb"
+ "\x48\x96\xe0\x90\x93\x6c\x48\x64"
+ "\xd4\xfa\x7f\x93\x2c\xa6\x21\xc8"
+ "\x7a\x23\x7b\xaa\x20\x56\x12\xae"
+ "\x16\x9d\x94\x0f\x54\xa1\xec\xca"
+ "\x51\x4e\xf2\x39\xf4\xf8\x5f\x04"
+ "\x5a\x0d\xbf\xf5\x83\xa1\x15\xe1"
+ "\xf5\x3c\xd8\x62\xa3\xed\x47\x89"
+ "\x85\x4c\xe5\xdb\xac\x9e\x17\x1d"
+ "\x0c\x09\xe3\x3e\x39\x5b\x4d\x74"
+ "\x0e\xf5\x34\xee\x70\x11\x4c\xfd"
+ "\xdb\x34\xb1\xb5\x10\x3f\x73\xb7"
+ "\xf5\xfa\xed\xb0\x1f\xa5\xcd\x3c"
+ "\x8d\x35\x83\xd4\x11\x44\x6e\x6c"
+ "\x5b\xe0\x0e\x69\xa5\x39\xe5\xbb"
+ "\xa9\x57\x24\x37\xe6\x1f\xdd\xcf"
+ "\x16\x2a\x13\xf9\x6a\x2d\x90\xa0"
+ "\x03\x60\x7a\xed\x69\xd5\x00\x8b"
+ "\x7e\x4f\xcb\xb9\xfa\x91\xb9\x37"
+ "\xc1\x26\xce\x90\x97\x22\x64\x64"
+ "\xc1\x72\x43\x1b\xf6\xac\xc1\x54"
+ "\x8a\x10\x9c\xdd\x8d\xd5\x8e\xb2"
+ "\xe4\x85\xda\xe0\x20\x5f\xf4\xb4"
+ "\x15\xb5\xa0\x8d\x12\x74\x49\x23"
+ "\x3a\xdf\x4a\xd3\xf0\x3b\x89\xeb"
+ "\xf8\xcc\x62\x7b\xfb\x93\x07\x41"
+ "\x61\x26\x94\x58\x70\xa6\x3c\xe4"
+ "\xff\x58\xc4\x13\x3d\xcb\x36\x6b"
+ "\x32\xe5\xb2\x6d\x03\x74\x6f\x76"
+ "\x93\x77\xde\x48\xc4\xfa\x30\x4a"
+ "\xda\x49\x80\x77\x0f\x1c\xbe\x11"
+ "\xc8\x48\xb1\xe5\xbb\xf2\x8a\xe1"
+ "\x96\x2f\x9f\xd1\x8e\x8a\x5c\xe2"
+ "\xf7\xd7\xd8\x54\xf3\x3f\xc4\x91"
+ "\xb8\xfb\x86\xdc\x46\x24\x91\x60"
+ "\x6c\x2f\xc9\x41\x37\x51\x49\x54"
+ "\x09\x81\x21\xf3\x03\x9f\x2b\xe3"
+ "\x1f\x39\x63\xaf\xf4\xd7\x53\x60"
+ "\xa7\xc7\x54\xf9\xee\xb1\xb1\x7d"
+ "\x75\x54\x65\x93\xfe\xb1\x68\x6b"
+ "\x57\x02\xf9\xbb\x0e\xf9\xf8\xbf"
+ "\x01\x12\x27\xb4\xfe\xe4\x79\x7a"
+ "\x40\x5b\x51\x4b\xdf\x38\xec\xb1"
+ "\x6a\x56\xff\x35\x4d\x42\x33\xaa"
+ "\x6f\x1b\xe4\xdc\xe0\xdb\x85\x35"
+ "\x62\x10\xd4\xec\xeb\xc5\x7e\x45"
+ "\x1c\x6f\x17\xca\x3b\x8e\x2d\x66"
+ "\x4f\x4b\x36\x56\xcd\x1b\x59\xaa"
+ "\xd2\x9b\x17\xb9\x58\xdf\x7b\x64"
+ "\x8a\xff\x3b\x9c\xa6\xb5\x48\x9e"
+ "\xaa\xe2\x5d\x09\x71\x32\x5f\xb6"
+ "\x29\xbe\xe7\xc7\x52\x7e\x91\x82"
+ "\x6b\x6d\x33\xe1\x34\x06\x36\x21"
+ "\x5e\xbe\x1e\x2f\x3e\xc1\xfb\xea"
+ "\x49\x2c\xb5\xca\xf7\xb0\x37\xea"
+ "\x1f\xed\x10\x04\xd9\x48\x0d\x1a"
+ "\x1c\xfb\xe7\x84\x0e\x83\x53\x74"
+ "\xc7\x65\xe2\x5c\xe5\xba\x73\x4c"
+ "\x0e\xe1\xb5\x11\x45\x61\x43\x46"
+ "\xaa\x25\x8f\xbd\x85\x08\xfa\x4c"
+ "\x15\xc1\xc0\xd8\xf5\xdc\x16\xbb"
+ "\x7b\x1d\xe3\x87\x57\xa7\x2a\x1d"
+ "\x38\x58\x9e\x8a\x43\xdc\x57"
+ "\xd1\x81\x7d\x2b\xe9\xff\x99\x3a"
+ "\x4b\x24\x52\x58\x55\xe1\x49\x14",
+ .clen = 735,
}
};
@@ -31567,4 +33167,528 @@ static const struct aead_testvec essiv_hmac_sha256_aes_cbc_tv_temp[] = {
},
};
+static const char blake2_ordered_sequence[] =
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
+
+static const struct hash_testvec blake2b_160_tv_template[] = {{
+ .digest = (u8[]){ 0x33, 0x45, 0x52, 0x4a, 0xbf, 0x6b, 0xbe, 0x18,
+ 0x09, 0x44, 0x92, 0x24, 0xb5, 0x97, 0x2c, 0x41,
+ 0x79, 0x0b, 0x6c, 0xf2, },
+}, {
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0x11, 0xcc, 0x66, 0x61, 0xe9, 0x22, 0xb0, 0xe4,
+ 0x07, 0xe0, 0xa5, 0x72, 0x49, 0xc3, 0x8d, 0x4f,
+ 0xf7, 0x6d, 0x8e, 0xc8, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0x31, 0xe3, 0xd9, 0xd5, 0x4e, 0x72, 0xd8, 0x0b,
+ 0x2b, 0x3b, 0xd7, 0x6b, 0x82, 0x7a, 0x1d, 0xfb,
+ 0x56, 0x2f, 0x79, 0x4c, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0x28, 0x20, 0xd1, 0xbe, 0x7f, 0xcc, 0xc1, 0x62,
+ 0xd9, 0x0d, 0x9a, 0x4b, 0x47, 0xd1, 0x5e, 0x04,
+ 0x74, 0x2a, 0x53, 0x17, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0x45, 0xe9, 0x95, 0xb6, 0xc4, 0xe8, 0x22, 0xea,
+ 0xfe, 0xd2, 0x37, 0xdb, 0x46, 0xbf, 0xf1, 0x25,
+ 0xd5, 0x03, 0x1d, 0x81, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0x7e, 0xb9, 0xf2, 0x9b, 0x2f, 0xc2, 0x01, 0xd4,
+ 0xb0, 0x4f, 0x08, 0x2b, 0x8e, 0xbd, 0x06, 0xef,
+ 0x1c, 0xc4, 0x25, 0x95, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0x6e, 0x35, 0x01, 0x70, 0xbf, 0xb6, 0xc4, 0xba,
+ 0x33, 0x1b, 0xa6, 0xd3, 0xc2, 0x5d, 0xb4, 0x03,
+ 0x95, 0xaf, 0x29, 0x16, },
+}};
+
+static const struct hash_testvec blake2b_256_tv_template[] = {{
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0x9d, 0xf1, 0x4b, 0x72, 0x48, 0x76, 0x4a, 0x86,
+ 0x91, 0x97, 0xc3, 0x5e, 0x39, 0x2d, 0x2a, 0x6d,
+ 0x6f, 0xdc, 0x5b, 0x79, 0xd5, 0x97, 0x29, 0x79,
+ 0x20, 0xfd, 0x3f, 0x14, 0x91, 0xb4, 0x42, 0xd2, },
+}, {
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0x39, 0xa7, 0xeb, 0x9f, 0xed, 0xc1, 0x9a, 0xab,
+ 0xc8, 0x34, 0x25, 0xc6, 0x75, 0x5d, 0xd9, 0x0e,
+ 0x6f, 0x9d, 0x0c, 0x80, 0x49, 0x64, 0xa1, 0xf4,
+ 0xaa, 0xee, 0xa3, 0xb9, 0xfb, 0x59, 0x98, 0x35, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .digest = (u8[]){ 0xc3, 0x08, 0xb1, 0xbf, 0xe4, 0xf9, 0xbc, 0xb4,
+ 0x75, 0xaf, 0x3f, 0x59, 0x6e, 0xae, 0xde, 0x6a,
+ 0xa3, 0x8e, 0xb5, 0x94, 0xad, 0x30, 0xf0, 0x17,
+ 0x1c, 0xfb, 0xd8, 0x3e, 0x8a, 0xbe, 0xed, 0x9c, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0x34, 0x75, 0x8b, 0x64, 0x71, 0x35, 0x62, 0x82,
+ 0x97, 0xfb, 0x09, 0xc7, 0x93, 0x0c, 0xd0, 0x4e,
+ 0x95, 0x28, 0xe5, 0x66, 0x91, 0x12, 0xf5, 0xb1,
+ 0x31, 0x84, 0x93, 0xe1, 0x4d, 0xe7, 0x7e, 0x55, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0xce, 0x74, 0xa9, 0x2e, 0xe9, 0x40, 0x3d, 0xa2,
+ 0x11, 0x4a, 0x99, 0x25, 0x7a, 0x34, 0x5d, 0x35,
+ 0xdf, 0x6a, 0x48, 0x79, 0x2a, 0x93, 0x93, 0xff,
+ 0x1f, 0x3c, 0x39, 0xd0, 0x71, 0x1f, 0x20, 0x7b, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0x2e, 0x84, 0xdb, 0xa2, 0x5f, 0x0e, 0xe9, 0x52,
+ 0x79, 0x50, 0x69, 0x9f, 0xf1, 0xfd, 0xfc, 0x9d,
+ 0x89, 0x83, 0xa9, 0xb6, 0xa4, 0xd5, 0xfa, 0xb5,
+ 0xbe, 0x35, 0x1a, 0x17, 0x8a, 0x2c, 0x7f, 0x7d, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0x2e, 0x26, 0xf0, 0x09, 0x02, 0x65, 0x90, 0x09,
+ 0xcc, 0xf5, 0x4c, 0x44, 0x74, 0x0e, 0xa0, 0xa8,
+ 0x25, 0x4a, 0xda, 0x61, 0x56, 0x95, 0x7d, 0x3f,
+ 0x6d, 0xc0, 0x43, 0x17, 0x95, 0x89, 0xcd, 0x9d, },
+}};
+
+static const struct hash_testvec blake2b_384_tv_template[] = {{
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0xcc, 0x01, 0x08, 0x85, 0x36, 0xf7, 0x84, 0xf0,
+ 0xbb, 0x76, 0x9e, 0x41, 0xc4, 0x95, 0x7b, 0x6d,
+ 0x0c, 0xde, 0x1f, 0xcc, 0x8c, 0xf1, 0xd9, 0x1f,
+ 0xc4, 0x77, 0xd4, 0xdd, 0x6e, 0x3f, 0xbf, 0xcd,
+ 0x43, 0xd1, 0x69, 0x8d, 0x14, 0x6f, 0x34, 0x8b,
+ 0x2c, 0x36, 0xa3, 0x39, 0x68, 0x2b, 0xec, 0x3f, },
+}, {
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0xc8, 0xf8, 0xf0, 0xa2, 0x69, 0xfa, 0xcc, 0x4d,
+ 0x32, 0x5f, 0x13, 0x88, 0xca, 0x71, 0x99, 0x8f,
+ 0xf7, 0x30, 0x41, 0x5d, 0x6e, 0x34, 0xb7, 0x6e,
+ 0x3e, 0xd0, 0x46, 0xb6, 0xca, 0x30, 0x66, 0xb2,
+ 0x6f, 0x0c, 0x35, 0x54, 0x17, 0xcd, 0x26, 0x1b,
+ 0xef, 0x48, 0x98, 0xe0, 0x56, 0x7c, 0x05, 0xd2, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .digest = (u8[]){ 0x15, 0x09, 0x7a, 0x90, 0x13, 0x23, 0xab, 0x0c,
+ 0x0b, 0x43, 0x21, 0x9a, 0xb5, 0xc6, 0x0c, 0x2e,
+ 0x7c, 0x57, 0xfc, 0xcc, 0x4b, 0x0f, 0xf0, 0x57,
+ 0xb7, 0x9c, 0xe7, 0x0f, 0xe1, 0x57, 0xac, 0x37,
+ 0x77, 0xd4, 0xf4, 0x2f, 0x03, 0x3b, 0x64, 0x09,
+ 0x84, 0xa0, 0xb3, 0x24, 0xb7, 0xae, 0x47, 0x5e, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0x0b, 0x82, 0x88, 0xca, 0x05, 0x2f, 0x1b, 0x15,
+ 0xdc, 0xbb, 0x22, 0x27, 0x11, 0x6b, 0xf4, 0xd1,
+ 0xe9, 0x8f, 0x1b, 0x0b, 0x58, 0x3f, 0x5e, 0x86,
+ 0x80, 0x82, 0x6f, 0x8e, 0x54, 0xc1, 0x9f, 0x12,
+ 0xcf, 0xe9, 0x56, 0xc1, 0xfc, 0x1a, 0x08, 0xb9,
+ 0x4a, 0x57, 0x0a, 0x76, 0x3c, 0x15, 0x33, 0x18, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0x4a, 0x81, 0x55, 0xb9, 0x79, 0x42, 0x8c, 0xc6,
+ 0x4f, 0xfe, 0xca, 0x82, 0x3b, 0xb2, 0xf7, 0xbc,
+ 0x5e, 0xfc, 0xab, 0x09, 0x1c, 0xd6, 0x3b, 0xe1,
+ 0x50, 0x82, 0x3b, 0xde, 0xc7, 0x06, 0xee, 0x3b,
+ 0x29, 0xce, 0xe5, 0x68, 0xe0, 0xff, 0xfa, 0xe1,
+ 0x7a, 0xf1, 0xc0, 0xfe, 0x57, 0xf4, 0x60, 0x49, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0x34, 0xbd, 0xe1, 0x99, 0x43, 0x9f, 0x82, 0x72,
+ 0xe7, 0xed, 0x94, 0x9e, 0xe1, 0x84, 0xee, 0x82,
+ 0xfd, 0x26, 0x23, 0xc4, 0x17, 0x8d, 0xf5, 0x04,
+ 0xeb, 0xb7, 0xbc, 0xb8, 0xf3, 0x68, 0xb7, 0xad,
+ 0x94, 0x8e, 0x05, 0x3f, 0x8a, 0x5d, 0x8d, 0x81,
+ 0x3e, 0x88, 0xa7, 0x8c, 0xa2, 0xd5, 0xdc, 0x76, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0x22, 0x14, 0xf4, 0xb0, 0x4c, 0xa8, 0xb5, 0x7d,
+ 0xa7, 0x5c, 0x04, 0xeb, 0xd8, 0x8d, 0x04, 0x71,
+ 0xc7, 0x3c, 0xc7, 0x6e, 0x8b, 0x20, 0x36, 0x40,
+ 0x9d, 0xd0, 0x60, 0xc6, 0xe3, 0x0b, 0x6e, 0x50,
+ 0xf5, 0xaf, 0xf5, 0xc6, 0x3b, 0xe3, 0x84, 0x6a,
+ 0x93, 0x1b, 0x12, 0xd6, 0x18, 0x27, 0xba, 0x36, },
+}};
+
+static const struct hash_testvec blake2b_512_tv_template[] = {{
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0x44, 0x4b, 0x24, 0x0f, 0xe3, 0xed, 0x86, 0xd0,
+ 0xe2, 0xef, 0x4c, 0xe7, 0xd8, 0x51, 0xed, 0xde,
+ 0x22, 0x15, 0x55, 0x82, 0xaa, 0x09, 0x14, 0x79,
+ 0x7b, 0x72, 0x6c, 0xd0, 0x58, 0xb6, 0xf4, 0x59,
+ 0x32, 0xe0, 0xe1, 0x29, 0x51, 0x68, 0x76, 0x52,
+ 0x7b, 0x1d, 0xd8, 0x8f, 0xc6, 0x6d, 0x71, 0x19,
+ 0xf4, 0xab, 0x3b, 0xed, 0x93, 0xa6, 0x1a, 0x0e,
+ 0x2d, 0x2d, 0x2a, 0xea, 0xc3, 0x36, 0xd9, 0x58, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .digest = (u8[]){ 0x10, 0xeb, 0xb6, 0x77, 0x00, 0xb1, 0x86, 0x8e,
+ 0xfb, 0x44, 0x17, 0x98, 0x7a, 0xcf, 0x46, 0x90,
+ 0xae, 0x9d, 0x97, 0x2f, 0xb7, 0xa5, 0x90, 0xc2,
+ 0xf0, 0x28, 0x71, 0x79, 0x9a, 0xaa, 0x47, 0x86,
+ 0xb5, 0xe9, 0x96, 0xe8, 0xf0, 0xf4, 0xeb, 0x98,
+ 0x1f, 0xc2, 0x14, 0xb0, 0x05, 0xf4, 0x2d, 0x2f,
+ 0xf4, 0x23, 0x34, 0x99, 0x39, 0x16, 0x53, 0xdf,
+ 0x7a, 0xef, 0xcb, 0xc1, 0x3f, 0xc5, 0x15, 0x68, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0xd2, 0x11, 0x31, 0x29, 0x3f, 0xea, 0xca, 0x72,
+ 0x21, 0xe4, 0x06, 0x65, 0x05, 0x2a, 0xd1, 0x02,
+ 0xc0, 0x8d, 0x7b, 0xf1, 0x09, 0x3c, 0xef, 0x88,
+ 0xe1, 0x68, 0x0c, 0xf1, 0x3b, 0xa4, 0xe3, 0x03,
+ 0xed, 0xa0, 0xe3, 0x60, 0x58, 0xa0, 0xdb, 0x52,
+ 0x8a, 0x66, 0x43, 0x09, 0x60, 0x1a, 0xbb, 0x67,
+ 0xc5, 0x84, 0x31, 0x40, 0xfa, 0xde, 0xc1, 0xd0,
+ 0xff, 0x3f, 0x4a, 0x69, 0xd9, 0x92, 0x26, 0x86, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0xa3, 0x3e, 0x50, 0xbc, 0xfb, 0xd9, 0xf0, 0x82,
+ 0xa6, 0xd1, 0xdf, 0xaf, 0x82, 0xd0, 0xcf, 0x84,
+ 0x9a, 0x25, 0x3c, 0xae, 0x6d, 0xb5, 0xaf, 0x01,
+ 0xd7, 0xaf, 0xed, 0x50, 0xdc, 0xe2, 0xba, 0xcc,
+ 0x8c, 0x38, 0xf5, 0x16, 0x89, 0x38, 0x86, 0xce,
+ 0x68, 0x10, 0x63, 0x64, 0xa5, 0x79, 0x53, 0xb5,
+ 0x2e, 0x8e, 0xbc, 0x0a, 0xce, 0x95, 0xc0, 0x1e,
+ 0x69, 0x59, 0x1d, 0x3b, 0xd8, 0x19, 0x90, 0xd7, },
+}, {
+ .ksize = 64,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0x65, 0x67, 0x6d, 0x80, 0x06, 0x17, 0x97, 0x2f,
+ 0xbd, 0x87, 0xe4, 0xb9, 0x51, 0x4e, 0x1c, 0x67,
+ 0x40, 0x2b, 0x7a, 0x33, 0x10, 0x96, 0xd3, 0xbf,
+ 0xac, 0x22, 0xf1, 0xab, 0xb9, 0x53, 0x74, 0xab,
+ 0xc9, 0x42, 0xf1, 0x6e, 0x9a, 0xb0, 0xea, 0xd3,
+ 0x3b, 0x87, 0xc9, 0x19, 0x68, 0xa6, 0xe5, 0x09,
+ 0xe1, 0x19, 0xff, 0x07, 0x78, 0x7b, 0x3e, 0xf4,
+ 0x83, 0xe1, 0xdc, 0xdc, 0xcf, 0x6e, 0x30, 0x22, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0xc2, 0x96, 0x2c, 0x6b, 0x84, 0xff, 0xee, 0xea,
+ 0x9b, 0xb8, 0x55, 0x2d, 0x6b, 0xa5, 0xd5, 0xe5,
+ 0xbd, 0xb1, 0x54, 0xb6, 0x1e, 0xfb, 0x63, 0x16,
+ 0x6e, 0x22, 0x04, 0xf0, 0x82, 0x7a, 0xc6, 0x99,
+ 0xf7, 0x4c, 0xff, 0x93, 0x71, 0x57, 0x64, 0xd0,
+ 0x08, 0x60, 0x39, 0x98, 0xb8, 0xd2, 0x2b, 0x4e,
+ 0x81, 0x8d, 0xe4, 0x8f, 0xb2, 0x1e, 0x8f, 0x99,
+ 0x98, 0xf1, 0x02, 0x9b, 0x4c, 0x7c, 0x97, 0x1a, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0x0f, 0x32, 0x05, 0x09, 0xad, 0x9f, 0x25, 0xf7,
+ 0xf2, 0x00, 0x71, 0xc9, 0x9f, 0x08, 0x58, 0xd1,
+ 0x67, 0xc3, 0xa6, 0x2c, 0x0d, 0xe5, 0x7c, 0x15,
+ 0x35, 0x18, 0x5a, 0x68, 0xc1, 0xca, 0x1c, 0x6e,
+ 0x0f, 0xc4, 0xf6, 0x0c, 0x43, 0xe1, 0xb4, 0x3d,
+ 0x28, 0xe4, 0xc7, 0xa1, 0xcf, 0x6b, 0x17, 0x4e,
+ 0xf1, 0x5b, 0xb5, 0x53, 0xd4, 0xa7, 0xd0, 0x5b,
+ 0xae, 0x15, 0x81, 0x15, 0xd0, 0x88, 0xa0, 0x3c, },
+}};
+
+static const struct hash_testvec blakes2s_128_tv_template[] = {{
+ .digest = (u8[]){ 0x64, 0x55, 0x0d, 0x6f, 0xfe, 0x2c, 0x0a, 0x01,
+ 0xa1, 0x4a, 0xba, 0x1e, 0xad, 0xe0, 0x20, 0x0c, },
+}, {
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0xdc, 0x66, 0xca, 0x8f, 0x03, 0x86, 0x58, 0x01,
+ 0xb0, 0xff, 0xe0, 0x6e, 0xd8, 0xa1, 0xa9, 0x0e, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0x88, 0x1e, 0x42, 0xe7, 0xbb, 0x35, 0x80, 0x82,
+ 0x63, 0x7c, 0x0a, 0x0f, 0xd7, 0xec, 0x6c, 0x2f, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0xcf, 0x9e, 0x07, 0x2a, 0xd5, 0x22, 0xf2, 0xcd,
+ 0xa2, 0xd8, 0x25, 0x21, 0x80, 0x86, 0x73, 0x1c, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0xf6, 0x33, 0x5a, 0x2c, 0x22, 0xa0, 0x64, 0xb2,
+ 0xb6, 0x3f, 0xeb, 0xbc, 0xd1, 0xc3, 0xe5, 0xb2, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0x72, 0x66, 0x49, 0x60, 0xf9, 0x4a, 0xea, 0xbe,
+ 0x1f, 0xf4, 0x60, 0xce, 0xb7, 0x81, 0xcb, 0x09, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0xd5, 0xa4, 0x0e, 0xc3, 0x16, 0xc7, 0x51, 0xa6,
+ 0x3c, 0xd0, 0xd9, 0x11, 0x57, 0xfa, 0x1e, 0xbb, },
+}};
+
+static const struct hash_testvec blakes2s_160_tv_template[] = {{
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0xb4, 0xf2, 0x03, 0x49, 0x37, 0xed, 0xb1, 0x3e,
+ 0x5b, 0x2a, 0xca, 0x64, 0x82, 0x74, 0xf6, 0x62,
+ 0xe3, 0xf2, 0x84, 0xff, },
+}, {
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0xaa, 0x56, 0x9b, 0xdc, 0x98, 0x17, 0x75, 0xf2,
+ 0xb3, 0x68, 0x83, 0xb7, 0x9b, 0x8d, 0x48, 0xb1,
+ 0x9b, 0x2d, 0x35, 0x05, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .digest = (u8[]){ 0x50, 0x16, 0xe7, 0x0c, 0x01, 0xd0, 0xd3, 0xc3,
+ 0xf4, 0x3e, 0xb1, 0x6e, 0x97, 0xa9, 0x4e, 0xd1,
+ 0x79, 0x65, 0x32, 0x93, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0x1c, 0x2b, 0xcd, 0x9a, 0x68, 0xca, 0x8c, 0x71,
+ 0x90, 0x29, 0x6c, 0x54, 0xfa, 0x56, 0x4a, 0xef,
+ 0xa2, 0x3a, 0x56, 0x9c, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0x36, 0xc3, 0x5f, 0x9a, 0xdc, 0x7e, 0xbf, 0x19,
+ 0x68, 0xaa, 0xca, 0xd8, 0x81, 0xbf, 0x09, 0x34,
+ 0x83, 0x39, 0x0f, 0x30, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0x86, 0x80, 0x78, 0xa4, 0x14, 0xec, 0x03, 0xe5,
+ 0xb6, 0x9a, 0x52, 0x0e, 0x42, 0xee, 0x39, 0x9d,
+ 0xac, 0xa6, 0x81, 0x63, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0x2d, 0xd8, 0xd2, 0x53, 0x66, 0xfa, 0xa9, 0x01,
+ 0x1c, 0x9c, 0xaf, 0xa3, 0xe2, 0x9d, 0x9b, 0x10,
+ 0x0a, 0xf6, 0x73, 0xe8, },
+}};
+
+static const struct hash_testvec blakes2s_224_tv_template[] = {{
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0x61, 0xb9, 0x4e, 0xc9, 0x46, 0x22, 0xa3, 0x91,
+ 0xd2, 0xae, 0x42, 0xe6, 0x45, 0x6c, 0x90, 0x12,
+ 0xd5, 0x80, 0x07, 0x97, 0xb8, 0x86, 0x5a, 0xfc,
+ 0x48, 0x21, 0x97, 0xbb, },
+}, {
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0x9e, 0xda, 0xc7, 0x20, 0x2c, 0xd8, 0x48, 0x2e,
+ 0x31, 0x94, 0xab, 0x46, 0x6d, 0x94, 0xd8, 0xb4,
+ 0x69, 0xcd, 0xae, 0x19, 0x6d, 0x9e, 0x41, 0xcc,
+ 0x2b, 0xa4, 0xd5, 0xf6, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .digest = (u8[]){ 0x32, 0xc0, 0xac, 0xf4, 0x3b, 0xd3, 0x07, 0x9f,
+ 0xbe, 0xfb, 0xfa, 0x4d, 0x6b, 0x4e, 0x56, 0xb3,
+ 0xaa, 0xd3, 0x27, 0xf6, 0x14, 0xbf, 0xb9, 0x32,
+ 0xa7, 0x19, 0xfc, 0xb8, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0x73, 0xad, 0x5e, 0x6d, 0xb9, 0x02, 0x8e, 0x76,
+ 0xf2, 0x66, 0x42, 0x4b, 0x4c, 0xfa, 0x1f, 0xe6,
+ 0x2e, 0x56, 0x40, 0xe5, 0xa2, 0xb0, 0x3c, 0xe8,
+ 0x7b, 0x45, 0xfe, 0x05, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0x16, 0x60, 0xfb, 0x92, 0x54, 0xb3, 0x6e, 0x36,
+ 0x81, 0xf4, 0x16, 0x41, 0xc3, 0x3d, 0xd3, 0x43,
+ 0x84, 0xed, 0x10, 0x6f, 0x65, 0x80, 0x7a, 0x3e,
+ 0x25, 0xab, 0xc5, 0x02, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0xca, 0xaa, 0x39, 0x67, 0x9c, 0xf7, 0x6b, 0xc7,
+ 0xb6, 0x82, 0xca, 0x0e, 0x65, 0x36, 0x5b, 0x7c,
+ 0x24, 0x00, 0xfa, 0x5f, 0xda, 0x06, 0x91, 0x93,
+ 0x6a, 0x31, 0x83, 0xb5, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0x90, 0x02, 0x26, 0xb5, 0x06, 0x9c, 0x36, 0x86,
+ 0x94, 0x91, 0x90, 0x1e, 0x7d, 0x2a, 0x71, 0xb2,
+ 0x48, 0xb5, 0xe8, 0x16, 0xfd, 0x64, 0x33, 0x45,
+ 0xb3, 0xd7, 0xec, 0xcc, },
+}};
+
+static const struct hash_testvec blakes2s_256_tv_template[] = {{
+ .plaintext = blake2_ordered_sequence,
+ .psize = 15,
+ .digest = (u8[]){ 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21,
+ 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67,
+ 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04,
+ 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .digest = (u8[]){ 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b,
+ 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b,
+ 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a,
+ 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 1,
+ .digest = (u8[]){ 0x22, 0x27, 0xae, 0xaa, 0x6e, 0x81, 0x56, 0x03,
+ 0xa7, 0xe3, 0xa1, 0x18, 0xa5, 0x9a, 0x2c, 0x18,
+ 0xf4, 0x63, 0xbc, 0x16, 0x70, 0xf1, 0xe7, 0x4b,
+ 0x00, 0x6d, 0x66, 0x16, 0xae, 0x9e, 0x74, 0x4e, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 7,
+ .digest = (u8[]){ 0x58, 0x5d, 0xa8, 0x60, 0x1c, 0xa4, 0xd8, 0x03,
+ 0x86, 0x86, 0x84, 0x64, 0xd7, 0xa0, 0x8e, 0x15,
+ 0x2f, 0x05, 0xa2, 0x1b, 0xbc, 0xef, 0x7a, 0x34,
+ 0xb3, 0xc5, 0xbc, 0x4b, 0xf0, 0x32, 0xeb, 0x12, },
+}, {
+ .ksize = 32,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 64,
+ .digest = (u8[]){ 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66,
+ 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26,
+ 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab,
+ 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4, },
+}, {
+ .ksize = 1,
+ .key = "B",
+ .plaintext = blake2_ordered_sequence,
+ .psize = 247,
+ .digest = (u8[]){ 0x2e, 0x74, 0x1c, 0x1d, 0x03, 0xf4, 0x9d, 0x84,
+ 0x6f, 0xfc, 0x86, 0x32, 0x92, 0x49, 0x7e, 0x66,
+ 0xd7, 0xc3, 0x10, 0x88, 0xfe, 0x28, 0xb3, 0xe0,
+ 0xbf, 0x50, 0x75, 0xad, 0x8e, 0xa4, 0xe6, 0xb2, },
+}, {
+ .ksize = 16,
+ .key = blake2_ordered_sequence,
+ .plaintext = blake2_ordered_sequence,
+ .psize = 256,
+ .digest = (u8[]){ 0xb9, 0xd2, 0x81, 0x0e, 0x3a, 0xb1, 0x62, 0x9b,
+ 0xad, 0x44, 0x05, 0xf4, 0x92, 0x2e, 0x99, 0xc1,
+ 0x4a, 0x47, 0xbb, 0x5b, 0x6f, 0xb2, 0x96, 0xed,
+ 0xd5, 0x06, 0xb5, 0x3a, 0x7c, 0x7a, 0x65, 0x1d, },
+}};
+
#endif /* _CRYPTO_TESTMGR_H */
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 052648e24909..aa29c529b44e 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -555,7 +555,7 @@ static int tgr192_final(struct shash_desc *desc, u8 * out)
__le32 *le32p;
u32 t, msb, lsb;
- tgr192_update(desc, NULL, 0); /* flush */ ;
+ tgr192_update(desc, NULL, 0); /* flush */
msb = 0;
t = tctx->nblocks;
@@ -583,7 +583,7 @@ static int tgr192_final(struct shash_desc *desc, u8 * out)
while (tctx->count < 64) {
tctx->hash[tctx->count++] = 0;
}
- tgr192_update(desc, NULL, 0); /* flush */ ;
+ tgr192_update(desc, NULL, 0); /* flush */
memset(tctx->hash, 0, 56); /* fill next block with zeroes */
}
/* append the 64 bit count */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ebe1e9e5fd81..4fb97511a16f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -319,12 +319,6 @@ config ACPI_THERMAL
To compile this driver as a module, choose M here:
the module will be called thermal.
-config ACPI_NUMA
- bool "NUMA support"
- depends on NUMA
- depends on (X86 || IA64 || ARM64)
- default y if IA64 || ARM64
-
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
default ""
@@ -473,8 +467,7 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
source "drivers/acpi/nfit/Kconfig"
-source "drivers/acpi/hmat/Kconfig"
-
+source "drivers/acpi/numa/Kconfig"
source "drivers/acpi/apei/Kconfig"
source "drivers/acpi/dptf/Kconfig"
@@ -513,11 +506,19 @@ menuconfig PMIC_OPREGION
PMIC chip.
if PMIC_OPREGION
-config CRC_PMIC_OPREGION
- bool "ACPI operation region support for CrystalCove PMIC"
+config BYTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Bay Trail Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for the Bay Trail
+ version of the Crystal Cove PMIC.
+
+config CHTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC"
depends on INTEL_SOC_PMIC
help
- This config adds ACPI operation region support for CrystalCove PMIC.
+ This config adds ACPI operation region support for the Cherry Trail
+ version of the Crystal Cove PMIC.
config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 5d361e4e3405..33fdaf67454e 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -48,14 +48,13 @@ acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += power.o
acpi-y += event.o
-acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
+acpi-y += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
-acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
@@ -80,7 +79,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
-obj-$(CONFIG_ACPI_HMAT) += hmat/
+obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
@@ -109,7 +108,8 @@ obj-$(CONFIG_ACPI_APEI) += apei/
obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
-obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
+obj-$(CONFIG_BYTCRC_PMIC_OPREGION) += pmic/intel_pmic_bytcrc.o
+obj-$(CONFIG_CHTCRC_PMIC_OPREGION) += pmic/intel_pmic_chtcrc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 57d9d574d4dd..ece8c1a921cc 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -53,7 +53,7 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
if (!table->header)
return -ENOMEM;
- ret = acpi_load_table(table->header);
+ ret = acpi_load_table(table->header, &table->index);
if (ret) {
kfree(table->header);
table->header = NULL;
@@ -223,7 +223,7 @@ static void acpi_table_drop_item(struct config_group *group,
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
- acpi_tb_unload_table(table->index);
+ acpi_unload_table(table->index);
}
static struct configfs_group_operations acpi_table_group_ops = {
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 60bbc5090abe..70f740b09684 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
@@ -463,6 +464,18 @@ struct lpss_device_links {
const char *consumer_hid;
const char *consumer_uid;
u32 flags;
+ const struct dmi_system_id *dep_missing_ids;
+};
+
+/* Please keep this list sorted alphabetically by vendor and model */
+static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ },
+ {}
};
/*
@@ -473,36 +486,29 @@ struct lpss_device_links {
* the supplier is not enumerated until after the consumer is probed.
*/
static const struct lpss_device_links lpss_device_links[] = {
+ /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
{"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+ /* CHT iGPU depends on PMIC I2C controller */
{"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
+ {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
+ i2c1_dep_missing_dmi_ids},
+ /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
{"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
+ {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
};
-static bool hid_uid_match(struct acpi_device *adev,
- const char *hid2, const char *uid2)
-{
- const char *hid1 = acpi_device_hid(adev);
- const char *uid1 = acpi_device_uid(adev);
-
- if (strcmp(hid1, hid2))
- return false;
-
- if (!uid2)
- return true;
-
- return uid1 && !strcmp(uid1, uid2);
-}
-
static bool acpi_lpss_is_supplier(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
+ return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
}
static bool acpi_lpss_is_consumer(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
+ return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
}
struct hid_uid {
@@ -518,7 +524,7 @@ static int match_hid_uid(struct device *dev, const void *data)
if (!adev)
return 0;
- return hid_uid_match(adev, id->hid, id->uid);
+ return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
}
static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
@@ -570,7 +576,8 @@ static void acpi_lpss_link_consumer(struct device *dev1,
if (!dev2)
return;
- if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
+ || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
device_link_add(dev2, dev1, link->flags);
put_device(dev2);
@@ -585,7 +592,8 @@ static void acpi_lpss_link_supplier(struct device *dev1,
if (!dev2)
return;
- if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
+ || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
device_link_add(dev1, dev2, link->flags);
put_device(dev2);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 00ec4f2bf015..c05050f474cd 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -31,6 +31,44 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"", 0},
};
+static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev);
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int acpi_platform_device_remove_notify(struct notifier_block *nb,
+ unsigned long value, void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct platform_device *pdev;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ /* Nothing to do here */
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ pdev = acpi_platform_device_find_by_companion(adev);
+ if (!pdev)
+ break;
+
+ platform_device_unregister(pdev);
+ put_device(&pdev->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block acpi_platform_notifier = {
+ .notifier_call = acpi_platform_device_remove_notify,
+};
+
static void acpi_platform_fill_resource(struct acpi_device *adev,
const struct resource *src, struct resource *dest)
{
@@ -130,3 +168,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
return pdev;
}
EXPORT_SYMBOL_GPL(acpi_create_platform_device);
+
+void __init acpi_platform_init(void)
+{
+ acpi_reconfig_notifier_register(&acpi_platform_notifier);
+}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4f325e47519f..2f380e7381d6 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -699,9 +699,13 @@ acpi_video_device_EDID(struct acpi_video_device *device,
* event notify code.
* lcd_flag :
* 0. The system BIOS should automatically control the brightness level
- * of the LCD when the power changes from AC to DC
+ * of the LCD when:
+ * - the power changes from AC to DC (ACPI appendix B)
+ * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* 1. The system BIOS should NOT automatically control the brightness
- * level of the LCD when the power changes from AC to DC.
+ * level of the LCD when:
+ * - the power changes from AC to DC (ACPI appendix B)
+ * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* Return Value:
* -EINVAL wrong arg.
*/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 32f2e38c7570..694cf206fa9a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -148,6 +148,8 @@ void acpi_db_find_references(char *object_arg);
void acpi_db_get_bus_info(void);
+acpi_status acpi_db_display_fields(u32 address_space_id);
+
/*
* dbdisply - debug display commands
*/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 218ff4c8b817..2043dff370b1 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -192,6 +192,16 @@ struct acpi_device_walk_info {
u32 num_INI;
};
+/* Info used by Acpi acpi_db_display_fields */
+
+struct acpi_region_walk_info {
+ u32 debug_level;
+ u32 count;
+ acpi_owner_id owner_id;
+ u8 display_type;
+ u32 address_space_id;
+};
+
/* TBD: [Restructure] Merge with struct above */
struct acpi_walk_info {
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 601808be86d1..5fb50634e08e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -142,10 +142,11 @@ struct acpi_pkg_info {
/* acpi_ut_dump_buffer */
-#define DB_BYTE_DISPLAY 1
-#define DB_WORD_DISPLAY 2
-#define DB_DWORD_DISPLAY 4
-#define DB_QWORD_DISPLAY 8
+#define DB_BYTE_DISPLAY 0x01
+#define DB_WORD_DISPLAY 0x02
+#define DB_DWORD_DISPLAY 0x04
+#define DB_QWORD_DISPLAY 0x08
+#define DB_DISPLAY_DATA_ONLY 0x10
/*
* utascii - ASCII utilities
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 9fd9a98a9cbe..2b84ac093698 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -106,6 +106,10 @@ acpi_db_convert_to_buffer(char *string, union acpi_object *object)
u8 *buffer;
acpi_status status;
+ /* Skip all preceding white space */
+
+ acpi_ut_remove_whitespace(&string);
+
/* Generate the final buffer length */
for (i = 0, length = 0; string[i];) {
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 30ab62b0fec8..f2df416d0d2d 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -513,7 +513,6 @@ void acpi_db_display_results(void)
return;
}
- obj_desc = walk_state->method_desc;
node = walk_state->method_node;
if (walk_state->results) {
@@ -565,7 +564,6 @@ void acpi_db_display_calling_tree(void)
return;
}
- node = walk_state->method_node;
acpi_os_printf("Current Control Method Call Tree\n");
while (walk_state) {
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index c6e25734dc5c..e1b6e54a96ac 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -93,7 +93,7 @@ acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head)
while (table_list_head) {
table = table_list_head->table;
- status = acpi_load_table(table);
+ status = acpi_load_table(table, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
acpi_os_printf
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 55a7e10998d8..e1632b340182 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -50,6 +50,7 @@ enum acpi_ex_debugger_commands {
CMD_EVALUATE,
CMD_EXECUTE,
CMD_EXIT,
+ CMD_FIELDS,
CMD_FIND,
CMD_GO,
CMD_HANDLERS,
@@ -127,6 +128,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"EVALUATE", 1},
{"EXECUTE", 1},
{"EXIT", 0},
+ {"FIELDS", 1},
{"FIND", 1},
{"GO", 0},
{"HANDLERS", 0},
@@ -200,6 +202,8 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
"Find ACPI name(s) with wildcards\n"},
{1, " Integrity", "Validate namespace integrity\n"},
{1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Fields <AddressSpaceId>",
+ "Display list of loaded field units by space ID\n"},
{1, " Namespace [Object] [Depth]",
"Display loaded namespace tree/subtree\n"},
{1, " Notify <Object> <Value>", "Send a notification on Object\n"},
@@ -507,6 +511,21 @@ char *acpi_db_get_next_token(char *string,
}
break;
+ case '{':
+
+ /* This is the start of a field unit, scan until closing brace */
+
+ string++;
+ start = string;
+ type = ACPI_TYPE_FIELD_UNIT;
+
+ /* Find end of buffer */
+
+ while (*string && (*string != '}')) {
+ string++;
+ }
+ break;
+
case '[':
/* This is the start of a package, scan until closing bracket */
@@ -674,6 +693,7 @@ acpi_db_command_dispatch(char *input_buffer,
union acpi_parse_object *op)
{
u32 temp;
+ u64 temp64;
u32 command_index;
u32 param_count;
char *command_line;
@@ -689,7 +709,6 @@ acpi_db_command_dispatch(char *input_buffer,
param_count = acpi_db_get_line(input_buffer);
command_index = acpi_db_match_command(acpi_gbl_db_args[0]);
- temp = 0;
/*
* We don't want to add the !! command to the history buffer. It
@@ -790,6 +809,21 @@ acpi_db_command_dispatch(char *input_buffer,
status = acpi_db_find_name_in_namespace(acpi_gbl_db_args[1]);
break;
+ case CMD_FIELDS:
+
+ status = acpi_ut_strtoul64(acpi_gbl_db_args[1], &temp64);
+
+ if (ACPI_FAILURE(status)
+ || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
+ acpi_os_printf
+ ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ACPI_NUM_PREDEFINED_REGIONS - 1);
+ return (AE_OK);
+ }
+
+ status = acpi_db_display_fields((u32)temp64);
+ break;
+
case CMD_GO:
acpi_gbl_cm_single_step = FALSE;
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 76a15b6ffc5d..4e48a7de7413 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -321,6 +321,10 @@ acpi_status acpi_db_disassemble_method(char *name)
walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE;
status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
(void)acpi_dm_parse_deferred_ops(op);
/* Now we can disassemble the method */
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 63fe30e86807..3615e1a6efd8 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -10,6 +10,7 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "acpredef.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbnames")
@@ -504,6 +505,86 @@ acpi_db_walk_for_object_counts(acpi_handle obj_handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_db_walk_for_fields
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Display short info about objects in the namespace
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_db_walk_for_fields(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ union acpi_object *ret_value;
+ struct acpi_region_walk_info *info =
+ (struct acpi_region_walk_info *)context;
+ struct acpi_buffer buffer;
+ acpi_status status;
+ struct acpi_namespace_node *node = acpi_ns_validate_handle(obj_handle);
+
+ if (!node) {
+ return (AE_OK);
+ }
+ if (node->object->field.region_obj->region.space_id !=
+ info->address_space_id) {
+ return (AE_OK);
+ }
+
+ info->count++;
+
+ /* Get and display the full pathname to this object */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not get pathname for object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
+
+ acpi_os_printf("%s ", (char *)buffer.pointer);
+ ACPI_FREE(buffer.pointer);
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+
+ /*
+ * Since this is a field unit, surround the output in braces
+ */
+ acpi_os_printf("{");
+
+ ret_value = (union acpi_object *)buffer.pointer;
+ switch (ret_value->type) {
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf("%8.8X%8.8X",
+ ACPI_FORMAT_UINT64(ret_value->integer.value));
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_ut_dump_buffer(ret_value->buffer.pointer,
+ ret_value->buffer.length,
+ DB_DISPLAY_DATA_ONLY | DB_BYTE_DISPLAY, 0);
+ break;
+
+ default:
+
+ break;
+ }
+ acpi_os_printf("}\n");
+
+ ACPI_FREE(buffer.pointer);
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_walk_for_specific_objects
*
* PARAMETERS: Callback from walk_namespace
@@ -630,6 +711,39 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_display_fields
+ *
+ * PARAMETERS: obj_type_arg - Type of object to display
+ * display_count_arg - Max depth to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Display objects in the namespace of the requested type
+ *
+ ******************************************************************************/
+
+acpi_status acpi_db_display_fields(u32 address_space_id)
+{
+ struct acpi_region_walk_info info;
+
+ info.count = 0;
+ info.owner_id = ACPI_OWNER_ID_MAX;
+ info.debug_level = ACPI_UINT32_MAX;
+ info.display_type = ACPI_DISPLAY_SUMMARY | ACPI_DISPLAY_SHORT;
+ info.address_space_id = address_space_id;
+
+ /* Walk the namespace from the root */
+
+ (void)acpi_walk_namespace(ACPI_TYPE_LOCAL_REGION_FIELD,
+ ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ acpi_db_walk_for_fields, NULL, (void *)&info,
+ NULL);
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_integrity_walk
*
* PARAMETERS: Callback from walk_namespace
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index f9fc84bc3e84..4b4c530a0654 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -464,7 +464,6 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state)
u8 display_args = FALSE;
node = walk_state->method_node;
- obj_desc = walk_state->method_desc;
/* There are no arguments for the module-level code case */
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4847f89c678c..5034fab9cf69 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -85,7 +85,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
control_state->control.loop_timeout = acpi_os_get_timer() +
- (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
+ ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index cf4e061bb0f0..faa38a22263a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -149,7 +149,6 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
if (walk_state->deferred_node) {
node = walk_state->deferred_node;
- status = AE_OK;
} else {
/* Execute flag should always be set when this function is entered */
@@ -264,7 +263,6 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
union acpi_parse_object *child;
#ifdef ACPI_EXEC_APP
- u64 value = 0;
union acpi_operand_object *result_desc;
union acpi_operand_object *obj_desc;
char *name_path;
@@ -406,19 +404,17 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
name_path =
acpi_ns_get_external_pathname(info->
field_node);
- obj_desc =
- acpi_ut_create_integer_object
- (value);
if (ACPI_SUCCESS
(ae_lookup_init_file_entry
- (name_path, &value))) {
+ (name_path, &obj_desc))) {
acpi_ex_write_data_to_field
(obj_desc,
acpi_ns_get_attached_object
(info->field_node),
&result_desc);
+ acpi_ut_remove_reference
+ (obj_desc);
}
- acpi_ut_remove_reference(obj_desc);
ACPI_FREE(name_path);
#endif
}
@@ -636,8 +632,6 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
}
/* Name already exists, just ignore this error */
-
- status = AE_OK;
}
arg->common.node = node;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fb15e9e2373b..9c7adaa7b582 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -110,6 +110,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
status =
acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
if (!gpe_block->previous && !gpe_block->next) {
@@ -359,10 +362,10 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
walk_info.gpe_device = gpe_device;
walk_info.execute_by_owner_id = FALSE;
- status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
- ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_match_gpe_method, NULL,
- &walk_info, NULL);
+ (void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method, NULL, &walk_info,
+ NULL);
/* Return the new block */
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index b04f982e59fa..70d21d5ec5f3 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -156,8 +156,6 @@ acpi_status acpi_ev_gpe_initialize(void)
* GPE0 and GPE1 do not have to be contiguous in the GPE number
* space. However, GPE0 always starts at GPE number zero.
*/
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
}
}
@@ -169,7 +167,6 @@ acpi_status acpi_ev_gpe_initialize(void)
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d45f7639f7ee..aa98fe07cd1b 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -230,11 +230,15 @@ void acpi_ev_terminate(void)
/* Disable all GPEs in all GPE blocks */
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not disable GPEs in GPE block"));
+ }
status = acpi_ev_remove_global_lock_handler();
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not remove Global Lock handler"));
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not remove Global Lock handler"));
}
acpi_gbl_events_initialized = FALSE;
@@ -250,6 +254,10 @@ void acpi_ev_terminate(void)
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not delete GPE handlers"));
+ }
/* Return to original mode if necessary */
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 45dc797df05d..1ff126460007 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -836,11 +836,11 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
objects[1].type = ACPI_TYPE_INTEGER;
objects[1].integer.value = ACPI_REG_CONNECT;
- status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+ (void)acpi_evaluate_object(reg_method, NULL, &args, NULL);
exit:
/* We ignore all errors from above, don't care */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47bbcd2a23..aee09640d710 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -198,7 +198,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
* root bridge. Still need to return a context object
* for the new PCI_Config operation region, however.
*/
- status = AE_OK;
} else {
ACPI_EXCEPTION((AE_INFO, status,
"Could not install PciConfig handler "
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index abbf9702aa7f..2919746c9041 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -166,6 +166,9 @@ acpi_status acpi_enter_sleep_state_s4bios(void)
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.s4_bios_request, 8);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
do {
acpi_os_stall(ACPI_USEC_PER_MSEC);
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 14cbf63f1991..c86d0770ed6e 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -486,5 +486,5 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
error_exit:
ACPI_FREE(name);
*return_object = new_object;
- return (AE_OK);
+ return (status);
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 9731d7cf1b83..9ad340f644a1 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -291,7 +291,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
for (i = 0;
(i < obj_desc->buffer.length
&& i < 12); i++) {
- acpi_os_printf(" %.2hX",
+ acpi_os_printf(" %2.2X",
obj_desc->buffer.
pointer[i]);
}
@@ -404,7 +404,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
- acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n",
+ acpi_os_printf(" Off %.3X Len %.2X Acc %.2X\n",
(obj_desc->common_field.
base_byte_offset * 8)
+
@@ -589,8 +589,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
goto cleanup;
}
-
- obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
}
cleanup:
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 55b4a5b3331f..161e60ddfb69 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -425,8 +425,8 @@ acpi_get_object_info(acpi_handle handle,
}
if (cls) {
- next_id_string = acpi_ns_copy_device_id(&info->class_code,
- cls, next_id_string);
+ (void)acpi_ns_copy_device_id(&info->class_code,
+ cls, next_id_string);
}
/* Copy the fixed-length data */
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 98e5c7400e54..ded2779fc8ea 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -481,8 +481,7 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
walk_state->opcode = (*op)->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, *op, status);
+ (void)acpi_ps_next_parse_state(walk_state, *op, status);
status2 = acpi_ps_complete_this_op(walk_state, *op);
if (ACPI_FAILURE(status2)) {
@@ -490,7 +489,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
}
}
- status = AE_OK;
break;
case AE_CTRL_BREAK:
@@ -512,14 +510,13 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
walk_state->opcode = (*op)->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
- status = acpi_ps_next_parse_state(walk_state, *op, status);
+ (void)acpi_ps_next_parse_state(walk_state, *op, status);
status2 = acpi_ps_complete_this_op(walk_state, *op);
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
- status = AE_OK;
break;
case AE_CTRL_TERMINATE:
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 570ea0df8a1b..c659b54985a5 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -312,6 +312,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
path_buffer.pointer = user_prt->source;
status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* +1 to include null terminator */
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 309440010ab2..2cf36451e46f 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -933,6 +933,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
}
status = acpi_ns_load_table(table_index, parent_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/*
* Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 86f1693f6d29..0782acf85722 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -268,6 +268,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
*
* PARAMETERS: table - Pointer to a buffer containing the ACPI
* table to be loaded.
+ * table_idx - Pointer to a u32 for storing the table
+ * index, might be NULL
*
* RETURN: Status
*
@@ -278,7 +280,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
* to ensure that the table is not deleted or unmapped.
*
******************************************************************************/
-acpi_status acpi_load_table(struct acpi_table_header *table)
+acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
{
acpi_status status;
u32 table_index;
@@ -297,6 +299,10 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
FALSE, &table_index);
+ if (table_idx) {
+ *table_idx = table_index;
+ }
+
if (ACPI_SUCCESS(status)) {
/* Complete the initialization/resolution of new objects */
@@ -390,3 +396,35 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
}
ACPI_EXPORT_SYMBOL(acpi_unload_parent_table)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_unload_table
+ *
+ * PARAMETERS: table_index - Index as returned by acpi_load_table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Via the table_index representing an SSDT or OEMx table, unloads
+ * the table and deletes all namespace objects associated with
+ * that table. Unloading of the DSDT is not allowed.
+ * Note: Mainly intended to support hotplug removal of SSDTs.
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_table(u32 table_index)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_unload_table);
+
+ if (table_index == 1) {
+
+ /* table_index==1 means DSDT is the owner. DSDT cannot be unloaded */
+
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ status = acpi_tb_unload_table(table_index);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_table)
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 61db9967ebe4..db897af1de05 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -37,7 +37,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
u32 j;
u32 temp32;
u8 buf_char;
+ u32 display_data_only = display & DB_DISPLAY_DATA_ONLY;
+ display &= ~DB_DISPLAY_DATA_ONLY;
if (!buffer) {
acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
return;
@@ -53,7 +55,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
/* Print current offset */
- acpi_os_printf("%8.4X: ", (base_offset + i));
+ if (!display_data_only) {
+ acpi_os_printf("%8.4X: ", (base_offset + i));
+ }
/* Print 16 hex chars */
@@ -109,32 +113,34 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
* Print the ASCII equivalent characters but watch out for the bad
* unprintable ones (printable chars are 0x20 through 0x7E)
*/
- acpi_os_printf(" ");
- for (j = 0; j < 16; j++) {
- if (i + j >= count) {
- acpi_os_printf("\n");
- return;
+ if (!display_data_only) {
+ acpi_os_printf(" ");
+ for (j = 0; j < 16; j++) {
+ if (i + j >= count) {
+ acpi_os_printf("\n");
+ return;
+ }
+
+ /*
+ * Add comment characters so rest of line is ignored when
+ * compiled
+ */
+ if (j == 0) {
+ acpi_os_printf("// ");
+ }
+
+ buf_char = buffer[(acpi_size)i + j];
+ if (isprint(buf_char)) {
+ acpi_os_printf("%c", buf_char);
+ } else {
+ acpi_os_printf(".");
+ }
}
- /*
- * Add comment characters so rest of line is ignored when
- * compiled
- */
- if (j == 0) {
- acpi_os_printf("// ");
- }
+ /* Done with that line. */
- buf_char = buffer[(acpi_size)i + j];
- if (isprint(buf_char)) {
- acpi_os_printf("%c", buf_char);
- } else {
- acpi_os_printf(".");
- }
+ acpi_os_printf("\n");
}
-
- /* Done with that line. */
-
- acpi_os_printf("\n");
i += 16;
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index e805abdd95b8..30198c828ab6 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -289,9 +289,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
value);
length = ACPI_EISAID_STRING_SIZE;
} else { /* ACPI_TYPE_STRING */
-
/* Copy the String CID from the returned object */
-
strcpy(next_id_string, cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 8052f7ef5025..14de4d15e618 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -660,7 +660,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
case ACPI_DESC_TYPE_PARSER:
acpi_os_printf
- ("AmlOpcode 0x%04hX\n",
+ ("AmlOpcode 0x%04X\n",
descriptor->op.asl.
aml_opcode);
break;
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 131c35ee9ed3..e358d0046494 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -170,9 +170,9 @@ rewind:
if (ip == ctx->ip) {
if (entry->instruction >= ctx->instructions ||
!ctx->ins_table[entry->instruction].run) {
- pr_warning(FW_WARN APEI_PFX
- "Invalid action table, unknown instruction type: %d\n",
- entry->instruction);
+ pr_warn(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
return -EINVAL;
}
run = ctx->ins_table[entry->instruction].run;
@@ -211,9 +211,9 @@ static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
if (end)
*end = i;
if (ins >= ctx->instructions || !ins_table[ins].run) {
- pr_warning(FW_WARN APEI_PFX
- "Invalid action table, unknown instruction type: %d\n",
- ins);
+ pr_warn(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
return -EINVAL;
}
rc = func(ctx, entry, data);
@@ -579,18 +579,18 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
space_id = reg->space_id;
*paddr = get_unaligned(&reg->address);
if (!*paddr) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
if (access_size_code < 1 || access_size_code > 4) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
*access_bit_width = 1UL << (access_size_code + 2);
@@ -604,19 +604,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
*access_bit_width = 64;
if ((bit_width + bit_offset) > *access_bit_width) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
- pr_warning(FW_BUG APEI_PFX
- "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
- *paddr, bit_width, bit_offset, access_size_code,
- space_id);
+ pr_warn(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
+ *paddr, bit_width, bit_offset, access_size_code,
+ space_id);
return -EINVAL;
}
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index e430cf4caec2..086373f8ccb1 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -172,7 +172,7 @@ static int einj_get_available_error_type(u32 *type)
static int einj_timedout(u64 *t)
{
if ((s64)*t < SPIN_UNIT) {
- pr_warning(FW_WARN "Firmware does not respond in time\n");
+ pr_warn(FW_WARN "Firmware does not respond in time\n");
return 1;
}
*t -= SPIN_UNIT;
@@ -312,7 +312,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
}
rc = einj_check_trigger_header(trigger_tab);
if (rc) {
- pr_warning(FW_BUG "Invalid trigger error action table.\n");
+ pr_warn(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index d0f3a46716e9..c740f0faad39 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -118,9 +118,8 @@ retry:
if (rc < 0)
goto out;
if (len > ERST_DBG_RECORD_LEN_MAX) {
- pr_warning(ERST_DBG_PFX
- "Record (ID: 0x%llx) length is too long: %zd\n",
- id, len);
+ pr_warn(ERST_DBG_PFX
+ "Record (ID: 0x%llx) length is too long: %zd\n", id, len);
rc = -EIO;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 777f6f7122b4..8906c80175e6 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -235,10 +235,10 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
goto err_unmap_read_ack_addr;
error_block_length = generic->error_block_length;
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
- pr_warning(FW_WARN GHES_PFX
- "Error status block length is too long: %u for "
- "generic hardware error source: %d.\n",
- error_block_length, generic->header.source_id);
+ pr_warn(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
error_block_length = GHES_ESTATUS_MAX_SIZE;
}
ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
@@ -748,8 +748,8 @@ static void ghes_add_timer(struct ghes *ghes)
unsigned long expire;
if (!g->notify.poll_interval) {
- pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
- g->header.source_id);
+ pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
+ g->header.source_id);
return;
}
expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
@@ -1155,21 +1155,20 @@ static int ghes_probe(struct platform_device *ghes_dev)
}
break;
case ACPI_HEST_NOTIFY_LOCAL:
- pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
- generic->header.source_id);
+ pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
+ generic->header.source_id);
goto err;
default:
- pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
- generic->notify.type, generic->header.source_id);
+ pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
goto err;
}
rc = -EIO;
if (generic->error_block_length <
sizeof(struct acpi_hest_generic_status)) {
- pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
- generic->error_block_length,
- generic->header.source_id);
+ pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length, generic->header.source_id);
goto err;
}
ghes = ghes_new(generic);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 267bdbf6a7bf..822402480f7d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -92,15 +92,15 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
for (i = 0; i < hest_tab->error_source_count; i++) {
len = hest_esrc_len(hest_hdr);
if (!len) {
- pr_warning(FW_WARN HEST_PFX
- "Unknown or unused hardware error source "
- "type: %d for hardware error source: %d.\n",
- hest_hdr->type, hest_hdr->source_id);
+ pr_warn(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
return -EINVAL;
}
if ((void *)hest_hdr + len >
(void *)hest_tab + hest_tab->header.length) {
- pr_warning(FW_BUG HEST_PFX
+ pr_warn(FW_BUG HEST_PFX
"Table contents overflow for hardware error source: %d.\n",
hest_hdr->source_id);
return -EINVAL;
@@ -164,8 +164,8 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
ghes_dev = ghes_arr->ghes_devs[i];
hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
if (hdr->source_id == hest_hdr->source_id) {
- pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
- hdr->source_id);
+ pr_warn(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+ hdr->source_id);
return -EIO;
}
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 558fedf8a7a1..8f0e0c8d8c3d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1176,7 +1176,7 @@ static const struct file_operations acpi_battery_alarm_fops = {
static int acpi_battery_add_fs(struct acpi_device *device)
{
- pr_warning(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ pr_warn(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 4a2cde2c536a..d27b01c0323d 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -44,9 +44,19 @@
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
-#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
-#define ACPI_BUTTON_LID_INIT_OPEN 0x01
-#define ACPI_BUTTON_LID_INIT_METHOD 0x02
+enum {
+ ACPI_BUTTON_LID_INIT_IGNORE,
+ ACPI_BUTTON_LID_INIT_OPEN,
+ ACPI_BUTTON_LID_INIT_METHOD,
+ ACPI_BUTTON_LID_INIT_DISABLED,
+};
+
+static const char * const lid_init_state_str[] = {
+ [ACPI_BUTTON_LID_INIT_IGNORE] = "ignore",
+ [ACPI_BUTTON_LID_INIT_OPEN] = "open",
+ [ACPI_BUTTON_LID_INIT_METHOD] = "method",
+ [ACPI_BUTTON_LID_INIT_DISABLED] = "disabled",
+};
#define _COMPONENT ACPI_BUTTON_COMPONENT
ACPI_MODULE_NAME("button");
@@ -65,18 +75,39 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
-/*
- * Some devices which don't even have a lid in anyway have a broken _LID
- * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
- */
-static const struct dmi_system_id lid_blacklst[] = {
+/* Please keep this list sorted alphabetically by vendor and model */
+static const struct dmi_system_id dmi_lid_quirks[] = {
+ {
+ /*
+ * Asus T200TA, _LID keeps reporting closed after every second
+ * openening of the lid. Causing immediate re-suspend after
+ * opening every other open. Using LID_INIT_OPEN fixes this.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{
- /* GP-electronic T701 */
+ /* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
},
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
+ },
+ {
+ /*
+ * Medion Akoya E2215T, notification of the LID device only
+ * happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{}
};
@@ -116,9 +147,8 @@ struct acpi_button {
bool suspended;
};
-static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+static long lid_init_state = -1;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
@@ -146,7 +176,6 @@ static int acpi_lid_evaluate_state(struct acpi_device *device)
static int acpi_lid_notify_state(struct acpi_device *device, int state)
{
struct acpi_button *button = acpi_driver_data(device);
- int ret;
ktime_t next_report;
bool do_update;
@@ -223,18 +252,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
button->last_time = ktime_get();
}
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
- if (ret == NOTIFY_DONE)
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
- device);
- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
- /*
- * It is also regarded as success if the notifier_chain
- * returns NOTIFY_OK or NOTIFY_DONE.
- */
- ret = 0;
- }
- return ret;
+ return 0;
}
static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
@@ -331,18 +349,6 @@ static int acpi_button_remove_fs(struct acpi_device *device)
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
-int acpi_lid_notifier_register(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
-}
-EXPORT_SYMBOL(acpi_lid_notifier_register);
-
-int acpi_lid_notifier_unregister(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
-}
-EXPORT_SYMBOL(acpi_lid_notifier_unregister);
-
int acpi_lid_open(void)
{
if (!lid_device)
@@ -472,7 +478,8 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
- if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) &&
+ lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED)
return -ENODEV;
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
@@ -578,36 +585,30 @@ static int acpi_button_remove(struct acpi_device *device)
static int param_set_lid_init_state(const char *val,
const struct kernel_param *kp)
{
- int result = 0;
-
- if (!strncmp(val, "open", sizeof("open") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
- pr_info("Notify initial lid state as open\n");
- } else if (!strncmp(val, "method", sizeof("method") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
- pr_info("Notify initial lid state with _LID return value\n");
- } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
- pr_info("Do not notify initial lid state\n");
- } else
- result = -EINVAL;
- return result;
+ int i;
+
+ i = sysfs_match_string(lid_init_state_str, val);
+ if (i < 0)
+ return i;
+
+ lid_init_state = i;
+ pr_info("Initial lid state set to '%s'\n", lid_init_state_str[i]);
+ return 0;
}
-static int param_get_lid_init_state(char *buffer,
- const struct kernel_param *kp)
+static int param_get_lid_init_state(char *buf, const struct kernel_param *kp)
{
- switch (lid_init_state) {
- case ACPI_BUTTON_LID_INIT_OPEN:
- return sprintf(buffer, "open");
- case ACPI_BUTTON_LID_INIT_METHOD:
- return sprintf(buffer, "method");
- case ACPI_BUTTON_LID_INIT_IGNORE:
- return sprintf(buffer, "ignore");
- default:
- return sprintf(buffer, "invalid");
- }
- return 0;
+ int i, c = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lid_init_state_str); i++)
+ if (i == lid_init_state)
+ c += sprintf(buf + c, "[%s] ", lid_init_state_str[i]);
+ else
+ c += sprintf(buf + c, "%s ", lid_init_state_str[i]);
+
+ buf[c - 1] = '\n'; /* Replace the final space with a newline */
+
+ return c;
}
module_param_call(lid_init_state,
@@ -617,6 +618,16 @@ MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
static int acpi_button_register_driver(struct acpi_driver *driver)
{
+ const struct dmi_system_id *dmi_id;
+
+ if (lid_init_state == -1) {
+ dmi_id = dmi_first_match(dmi_lid_quirks);
+ if (dmi_id)
+ lid_init_state = (long)dmi_id->driver_data;
+ else
+ lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+ }
+
/*
* Modules such as nouveau.ko and i915.ko have a link time dependency
* on acpi_lid_open(), and would therefore not be loadable on ACPI
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index da1e5c5ce150..4fd84fbdac29 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -95,12 +95,12 @@ enum {
EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
- EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
+ EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
- EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
+ EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_GPE_MASKED, /* GPE masked */
+ EC_FLAGS_EVENTS_MASKED, /* Events masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -397,8 +397,8 @@ static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
static void acpi_ec_submit_request(struct acpi_ec *ec)
{
ec->reference_count++;
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
- ec->reference_count == 1)
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
+ ec->gpe >= 0 && ec->reference_count == 1)
acpi_ec_enable_gpe(ec, true);
}
@@ -407,28 +407,36 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
bool flushed = false;
ec->reference_count--;
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
- ec->reference_count == 0)
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
+ ec->gpe >= 0 && ec->reference_count == 0)
acpi_ec_disable_gpe(ec, true);
flushed = acpi_ec_flushed(ec);
if (flushed)
wake_up(&ec->wait);
}
-static void acpi_ec_mask_gpe(struct acpi_ec *ec)
+static void acpi_ec_mask_events(struct acpi_ec *ec)
{
- if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
- acpi_ec_disable_gpe(ec, false);
+ if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
+ if (ec->gpe >= 0)
+ acpi_ec_disable_gpe(ec, false);
+ else
+ disable_irq_nosync(ec->irq);
+
ec_dbg_drv("Polling enabled");
- set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
+ set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
}
}
-static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
+static void acpi_ec_unmask_events(struct acpi_ec *ec)
{
- if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
- clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
- acpi_ec_enable_gpe(ec, false);
+ if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
+ if (ec->gpe >= 0)
+ acpi_ec_enable_gpe(ec, false);
+ else
+ enable_irq(ec->irq);
+
ec_dbg_drv("Polling disabled");
}
}
@@ -454,7 +462,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_mask_gpe(ec);
+ acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -470,7 +478,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_unmask_gpe(ec);
+ acpi_ec_unmask_events(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -648,7 +656,9 @@ static void advance_transaction(struct acpi_ec *ec)
* ensure a hardware STS 0->1 change after this clearing can always
* trigger a GPE interrupt.
*/
- acpi_ec_clear_gpe(ec);
+ if (ec->gpe >= 0)
+ acpi_ec_clear_gpe(ec);
+
status = acpi_ec_read_status(ec);
t = ec->curr;
/*
@@ -717,7 +727,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_mask_gpe(ec);
+ acpi_ec_mask_events(ec);
}
}
out:
@@ -815,7 +825,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_unmask_gpe(ec);
+ acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1275,18 +1285,28 @@ static void acpi_ec_event_handler(struct work_struct *work)
acpi_ec_check_event(ec);
}
-static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number, void *data)
+static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
unsigned long flags;
- struct acpi_ec *ec = data;
spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, void *data)
+{
+ acpi_ec_handle_interrupt(data);
return ACPI_INTERRUPT_HANDLED;
}
+static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
+{
+ acpi_ec_handle_interrupt(data);
+ return IRQ_HANDLED;
+}
+
/* --------------------------------------------------------------------------
* Address Space Management
* -------------------------------------------------------------------------- */
@@ -1359,6 +1379,8 @@ static struct acpi_ec *acpi_ec_alloc(void)
ec->timestamp = jiffies;
ec->busy_polling = true;
ec->polling_guard = 0;
+ ec->gpe = -1;
+ ec->irq = -1;
return ec;
}
@@ -1406,9 +1428,13 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
/* Get GPE bit assignment (EC events). */
/* TODO: Add support for _GPE returning a package */
status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return status;
- ec->gpe = tmp;
+ if (ACPI_SUCCESS(status))
+ ec->gpe = tmp;
+
+ /*
+ * Errors are non-fatal, allowing for ACPI Reduced Hardware
+ * platforms which use GpioInt instead of GPE.
+ */
}
/* Use the global lock for all EC transactions? */
tmp = 0;
@@ -1418,12 +1444,57 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
return AE_CTRL_TERMINATE;
}
+static void install_gpe_event_handler(struct acpi_ec *ec)
+{
+ acpi_status status =
+ acpi_install_gpe_raw_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler,
+ ec);
+ if (ACPI_SUCCESS(status)) {
+ /* This is not fatal as we can poll EC events */
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
+ }
+}
+
+/* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */
+static int install_gpio_irq_event_handler(struct acpi_ec *ec,
+ struct acpi_device *device)
+{
+ int irq = acpi_dev_gpio_irq_get(device, 0);
+ int ret;
+
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED,
+ "ACPI EC", ec);
+
+ /*
+ * Unlike the GPE case, we treat errors here as fatal, we'll only
+ * implement GPIO polling if we find a case that needs it.
+ */
+ if (ret < 0)
+ return ret;
+
+ ec->irq = irq;
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
+
+ return 0;
+}
+
/*
* Note: This function returns an error code only when the address space
* handler is not installed, which means "not able to handle
* transactions".
*/
-static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
+static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ bool handle_events)
{
acpi_status status;
@@ -1456,24 +1527,23 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
if (!handle_events)
return 0;
- if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods,
NULL, ec, NULL);
- set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
+ set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
- if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
- status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- /* This is not fatal as we can poll EC events */
- if (ACPI_SUCCESS(status)) {
- set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
- acpi_ec_leave_noirq(ec);
- if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
- acpi_ec_enable_gpe(ec, true);
+ if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0) {
+ install_gpe_event_handler(ec);
+ } else if (device) {
+ int ret = install_gpio_irq_event_handler(ec, device);
+
+ if (ret)
+ return ret;
+ } else { /* No GPE and no GpioInt? */
+ return -ENODEV;
}
}
/* EC is fully operational, allow queries */
@@ -1504,23 +1574,29 @@ static void ec_remove_handlers(struct acpi_ec *ec)
*/
acpi_ec_stop(ec, false);
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
- if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler)))
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0 &&
+ ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
pr_err("failed to remove gpe handler\n");
- clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+
+ if (ec->irq >= 0)
+ free_irq(ec->irq, ec);
+
+ clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
}
- if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
+ if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
acpi_ec_remove_query_handlers(ec, true, 0);
- clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
+ clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
}
-static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
+static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device,
+ bool handle_events)
{
int ret;
- ret = ec_install_handlers(ec, handle_events);
+ ret = ec_install_handlers(ec, device, handle_events);
if (ret)
return ret;
@@ -1531,8 +1607,8 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
- ec->gpe, ec->command_addr, ec->data_addr);
+ "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ ec->gpe, ec->irq, ec->command_addr, ec->data_addr);
return ret;
}
@@ -1596,7 +1672,7 @@ static int acpi_ec_add(struct acpi_device *device)
}
}
- ret = acpi_ec_setup(ec, true);
+ ret = acpi_ec_setup(ec, device, true);
if (ret)
goto err_query;
@@ -1716,7 +1792,7 @@ void __init acpi_ec_dsdt_probe(void)
* At this point, the GPE is not fully initialized, so do not to
* handle the events.
*/
- ret = acpi_ec_setup(ec, false);
+ ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1889,14 +1965,21 @@ void __init acpi_ec_ecdt_probe(void)
ec->command_addr = ecdt_ptr->control.address;
ec->data_addr = ecdt_ptr->data.address;
}
- ec->gpe = ecdt_ptr->gpe;
+
+ /*
+ * Ignore the GPE value on Reduced Hardware platforms.
+ * Some products have this set to an erroneous value.
+ */
+ if (!acpi_gbl_reduced_hardware)
+ ec->gpe = ecdt_ptr->gpe;
+
ec->handle = ACPI_ROOT_OBJECT;
/*
* At this point, the namespace is not initialized, so do not find
* the namespace objects, or handle the events.
*/
- ret = acpi_ec_setup(ec, false);
+ ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1928,7 +2011,7 @@ static int acpi_ec_suspend_noirq(struct device *dev)
* masked at the low level without side effects.
*/
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
+ ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
acpi_ec_enter_noirq(ec);
@@ -1943,7 +2026,7 @@ static int acpi_ec_resume_noirq(struct device *dev)
acpi_ec_leave_noirq(ec);
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
+ ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
return 0;
diff --git a/drivers/acpi/hmat/Makefile b/drivers/acpi/hmat/Makefile
deleted file mode 100644
index 1c20ef36a385..000000000000
--- a/drivers/acpi/hmat/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ACPI_HMAT) := hmat.o
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index afe6636f9ad3..3616daec650b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -165,7 +165,8 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- u32 gpe;
+ int gpe;
+ int irq;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/hmat/Kconfig b/drivers/acpi/numa/Kconfig
index 95a29964dbea..fcf2e556d69d 100644
--- a/drivers/acpi/hmat/Kconfig
+++ b/drivers/acpi/numa/Kconfig
@@ -1,8 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+config ACPI_NUMA
+ bool "NUMA support"
+ depends on NUMA
+ depends on (X86 || IA64 || ARM64)
+ default y if IA64 || ARM64
+
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"
depends on ACPI_NUMA
select HMEM_REPORTING
+ select MEMREGION
help
If set, this option has the kernel parse and report the
platform's ACPI HMAT (Heterogeneous Memory Attributes Table),
diff --git a/drivers/acpi/numa/Makefile b/drivers/acpi/numa/Makefile
new file mode 100644
index 000000000000..517a6c689a94
--- /dev/null
+++ b/drivers/acpi/numa/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ACPI_NUMA) += srat.o
+obj-$(CONFIG_ACPI_HMAT) += hmat.o
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/numa/hmat.c
index 8b0de8a3c647..2c32cfb72370 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -8,12 +8,18 @@
* the applicable attributes with the node's interfaces.
*/
+#define pr_fmt(fmt) "acpi/hmat: " fmt
+#define dev_fmt(fmt) "acpi/hmat: " fmt
+
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <linux/list_sort.h>
+#include <linux/memregion.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/node.h>
@@ -49,6 +55,7 @@ struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
+ struct resource memregions;
struct node_hmem_attrs hmem_attrs;
struct list_head caches;
struct node_cache_attrs cache_attrs;
@@ -104,22 +111,36 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
list_add_tail(&initiator->node, &initiators);
}
-static __init void alloc_memory_target(unsigned int mem_pxm)
+static __init void alloc_memory_target(unsigned int mem_pxm,
+ resource_size_t start, resource_size_t len)
{
struct memory_target *target;
target = find_mem_target(mem_pxm);
- if (target)
- return;
-
- target = kzalloc(sizeof(*target), GFP_KERNEL);
- if (!target)
- return;
+ if (!target) {
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target)
+ return;
+ target->memory_pxm = mem_pxm;
+ target->processor_pxm = PXM_INVAL;
+ target->memregions = (struct resource) {
+ .name = "ACPI mem",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+ };
+ list_add_tail(&target->node, &targets);
+ INIT_LIST_HEAD(&target->caches);
+ }
- target->memory_pxm = mem_pxm;
- target->processor_pxm = PXM_INVAL;
- list_add_tail(&target->node, &targets);
- INIT_LIST_HEAD(&target->caches);
+ /*
+ * There are potentially multiple ranges per PXM, so record each
+ * in the per-target memregions resource tree.
+ */
+ if (!__request_region(&target->memregions, start, len, "memory target",
+ IORESOURCE_MEM))
+ pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
+ start, start + len, mem_pxm);
}
static __init const char *hmat_data_type(u8 type)
@@ -272,7 +293,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
u8 type, mem_hier;
if (hmat_loc->header.length < sizeof(*hmat_loc)) {
- pr_notice("HMAT: Unexpected locality header length: %d\n",
+ pr_notice("HMAT: Unexpected locality header length: %u\n",
hmat_loc->header.length);
return -EINVAL;
}
@@ -284,12 +305,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
sizeof(*inits) * ipds + sizeof(*targs) * tpds;
if (hmat_loc->header.length < total_size) {
- pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
+ pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
hmat_loc->header.length, total_size);
return -EINVAL;
}
- pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
+ pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
hmat_loc->flags, hmat_data_type(type), ipds, tpds,
hmat_loc->entry_base_unit);
@@ -302,7 +323,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%d-%d]:%d%s\n",
+ pr_info(" Initiator-Target[%u-%u]:%u%s\n",
inits[init], targs[targ], value,
hmat_data_type_suffix(type));
@@ -329,13 +350,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
- pr_notice("HMAT: Unexpected cache header length: %d\n",
+ pr_notice("HMAT: Unexpected cache header length: %u\n",
cache->header.length);
return -EINVAL;
}
attrs = cache->cache_attributes;
- pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
@@ -390,17 +411,17 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
struct memory_target *target = NULL;
if (p->header.length != sizeof(*p)) {
- pr_notice("HMAT: Unexpected address range header length: %d\n",
+ pr_notice("HMAT: Unexpected address range header length: %u\n",
p->header.length);
return -EINVAL;
}
if (hmat_revision == 1)
- pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->reserved3, p->reserved4, p->flags, p->processor_PD,
p->memory_PD);
else
- pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
@@ -417,7 +438,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_debug("HMAT: Invalid Processor Domain\n");
return -EINVAL;
}
- target->processor_pxm = p_node;
+ target->processor_pxm = p->processor_PD;
}
return 0;
@@ -452,7 +473,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
return -EINVAL;
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return 0;
- alloc_memory_target(ma->proximity_domain);
+ alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
return 0;
}
@@ -613,11 +634,92 @@ static void hmat_register_target_perf(struct memory_target *target)
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
}
+static void hmat_register_target_device(struct memory_target *target,
+ struct resource *r)
+{
+ /* define a clean / non-busy resource for the platform device */
+ struct resource res = {
+ .start = r->start,
+ .end = r->end,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *pdev;
+ struct memregion_info info;
+ int rc, id;
+
+ rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
+ IORES_DESC_SOFT_RESERVED);
+ if (rc != REGION_INTERSECTS)
+ return;
+
+ id = memregion_alloc(GFP_KERNEL);
+ if (id < 0) {
+ pr_err("memregion allocation failure for %pr\n", &res);
+ return;
+ }
+
+ pdev = platform_device_alloc("hmem", id);
+ if (!pdev) {
+ pr_err("hmem device allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ pdev->dev.numa_node = acpi_map_pxm_to_online_node(target->memory_pxm);
+ info = (struct memregion_info) {
+ .target_node = acpi_map_pxm_to_node(target->memory_pxm),
+ };
+ rc = platform_device_add_data(pdev, &info, sizeof(info));
+ if (rc < 0) {
+ pr_err("hmem memregion_info allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ rc = platform_device_add_resources(pdev, &res, 1);
+ if (rc < 0) {
+ pr_err("hmem resource allocation failure for %pr\n", &res);
+ goto out_resource;
+ }
+
+ rc = platform_device_add(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "device add failed for %pr\n", &res);
+ goto out_resource;
+ }
+
+ return;
+
+out_resource:
+ put_device(&pdev->dev);
+out_pdev:
+ memregion_free(id);
+}
+
+static void hmat_register_target_devices(struct memory_target *target)
+{
+ struct resource *res;
+
+ /*
+ * Do not bother creating devices if no driver is available to
+ * consume them.
+ */
+ if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
+ return;
+
+ for (res = target->memregions.child; res; res = res->sibling)
+ hmat_register_target_device(target, res);
+}
+
static void hmat_register_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Devices may belong to either an offline or online
+ * node, so unconditionally add them.
+ */
+ hmat_register_target_devices(target);
+
+ /*
* Skip offline nodes. This can happen when memory
* marked EFI_MEMORY_SP, "specific purpose", is applied
* to all the memory in a promixity domain leading to
@@ -677,11 +779,21 @@ static __init void hmat_free_structures(void)
struct target_cache *tcache, *cnext;
list_for_each_entry_safe(target, tnext, &targets, node) {
+ struct resource *res, *res_next;
+
list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
list_del(&tcache->node);
kfree(tcache);
}
+
list_del(&target->node);
+ res = target->memregions.child;
+ while (res) {
+ res_next = res->sibling;
+ __release_region(&target->memregions, res->start,
+ resource_size(res));
+ res = res_next;
+ }
kfree(target);
}
@@ -748,4 +860,4 @@ out_put:
acpi_put_table(tbl);
return 0;
}
-subsys_initcall(hmat_init);
+device_initcall(hmat_init);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa/srat.c
index eadbf90e65d1..eadbf90e65d1 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa/srat.c
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index bec0bebc7f52..9f6853809138 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -473,9 +473,9 @@ static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
*/
/*
- * Without this this EEEpc exports a non working WMI interface, with
- * this it exports a working "good old" eeepc_laptop interface, fixing
- * both brightness control, and rfkill not working.
+ * Without this EEEpc exports a non working WMI interface, with
+ * this it exports a working "good old" eeepc_laptop interface,
+ * fixing both brightness control, and rfkill not working.
*/
{
.callback = dmi_enable_osi_linux,
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index 452041398b34..a371f273f99d 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -252,7 +252,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
struct regmap *regmap,
struct intel_pmic_opregion_data *d)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct intel_pmic_opregion *opregion;
int ret;
@@ -270,7 +270,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
opregion->regmap = regmap;
opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
- status = acpi_install_address_space_handler(handle,
+ if (d->power_table_count)
+ status = acpi_install_address_space_handler(handle,
PMIC_POWER_OPREGION_ID,
intel_pmic_power_handler,
NULL, opregion);
@@ -279,7 +280,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
goto out_error;
}
- status = acpi_install_address_space_handler(handle,
+ if (d->thermal_table_count)
+ status = acpi_install_address_space_handler(handle,
PMIC_THERMAL_OPREGION_ID,
intel_pmic_thermal_handler,
NULL, opregion);
@@ -301,12 +303,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
return 0;
out_remove_thermal_handler:
- acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID,
- intel_pmic_thermal_handler);
+ if (d->thermal_table_count)
+ acpi_remove_address_space_handler(handle,
+ PMIC_THERMAL_OPREGION_ID,
+ intel_pmic_thermal_handler);
out_remove_power_handler:
- acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
- intel_pmic_power_handler);
+ if (d->power_table_count)
+ acpi_remove_address_space_handler(handle,
+ PMIC_POWER_OPREGION_ID,
+ intel_pmic_power_handler);
out_error:
acpi_lpat_free_conversion_table(opregion->lpat_table);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index a0f411a6e5ac..2a692cc4b7ae 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Intel CrystalCove PMIC operation region driver
+ * Intel Bay Trail Crystal Cove PMIC operation region driver
*
* Copyright (C) 2014 Intel Corporation. All rights reserved.
*/
@@ -295,7 +295,7 @@ static int intel_crc_pmic_opregion_probe(struct platform_device *pdev)
static struct platform_driver intel_crc_pmic_opregion_driver = {
.probe = intel_crc_pmic_opregion_probe,
.driver = {
- .name = "crystal_cove_pmic",
+ .name = "byt_crystal_cove_pmic",
},
};
builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtcrc.c b/drivers/acpi/pmic/intel_pmic_chtcrc.c
new file mode 100644
index 000000000000..ebf8d3187df1
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_chtcrc.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail Crystal Cove PMIC operation region driver
+ *
+ * Copyright (C) 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "intel_pmic.h"
+
+/*
+ * We have no docs for the CHT Crystal Cove PMIC. The Asus Zenfone-2 kernel
+ * code has 2 Crystal Cove regulator drivers, one calls the PMIC a "Crystal
+ * Cove Plus" PMIC and talks about Cherry Trail, so presuambly that one
+ * could be used to get register info for the regulators if we need to
+ * implement regulator support in the future.
+ *
+ * For now the sole purpose of this driver is to make
+ * intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a
+ * CHT Crystal Cove PMIC.
+ */
+static struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
+ .pmic_i2c_address = 0x6e,
+};
+
+static int intel_chtcrc_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ return intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
+ &intel_chtcrc_pmic_opregion_data);
+}
+
+static struct platform_driver intel_chtcrc_pmic_opregion_driver = {
+ .probe = intel_chtcrc_pmic_opregion_probe,
+ .driver = {
+ .name = "cht_crystal_cove_pmic",
+ },
+};
+builtin_platform_driver(intel_chtcrc_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ed56c6d20b08..2ae95df2e74f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -642,6 +642,19 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
+static void wait_for_freeze(void)
+{
+#ifdef CONFIG_X86
+ /* No delay is needed if we are in guest */
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+#endif
+ /* Dummy wait op - must do something useless after P_LVL2 read
+ because chipsets cannot guarantee that STPCLK# signal
+ gets asserted in time to freeze execution properly. */
+ inl(acpi_gbl_FADT.xpm_timer_block.address);
+}
+
/**
* acpi_idle_do_entry - enter idle state using the appropriate method
* @cx: cstate data
@@ -658,10 +671,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else {
/* IO port based C-state */
inb(cx->address);
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
- inl(acpi_gbl_FADT.xpm_timer_block.address);
+ wait_for_freeze();
}
}
@@ -682,8 +692,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address);
- /* See comment in acpi_idle_do_entry() */
- inl(acpi_gbl_FADT.xpm_timer_block.address);
+ wait_for_freeze();
} else
return -ENODEV;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3eacf474e1e3..e601c4511a8b 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1317,6 +1317,52 @@ acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
args_count, args);
}
+static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct acpi_device *adev;
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "\\";
+
+ fwnode_handle_put(parent);
+
+ if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+
+ return dn->name;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (WARN_ON(!adev))
+ return NULL;
+
+ return acpi_device_bid(adev);
+}
+
+static const char *
+acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Is this 2nd node from the root? */
+ parent = fwnode_get_next_parent(parent);
+ if (!parent)
+ return "";
+
+ fwnode_handle_put(parent);
+
+ /* ACPI device or data node. */
+ return ".";
+}
+
static struct fwnode_handle *
acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
{
@@ -1357,6 +1403,8 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_parent = acpi_node_get_parent, \
.get_next_child_node = acpi_get_next_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_name = acpi_fwnode_get_name, \
+ .get_name_prefix = acpi_fwnode_get_name_prefix, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
acpi_graph_get_next_endpoint, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 2a3e392751e0..3b4448972374 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -413,8 +413,8 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
if (triggering != trig || polarity != pol) {
- pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
- t ? "level" : "edge", p ? "low" : "high");
+ pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi,
+ t ? "level" : "edge", p ? "low" : "high");
triggering = trig;
polarity = pol;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index aad6be5c0af0..915650bf519f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2174,6 +2174,7 @@ int __init acpi_scan_init(void)
acpi_pci_root_init();
acpi_pci_link_init();
acpi_processor_init();
+ acpi_platform_init();
acpi_lpss_init();
acpi_apd_init();
acpi_cmos_rtc_init();
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index e3974a8f8fd4..804ac0df58ec 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -455,6 +455,7 @@ EXPORT_SYMBOL(acpi_evaluate_ost);
/**
* acpi_handle_path: Return the object path of handle
+ * @handle: ACPI device handle
*
* Caller must free the returned buffer
*/
@@ -473,6 +474,9 @@ static char *acpi_handle_path(acpi_handle handle)
/**
* acpi_handle_printk: Print message with ACPI prefix and object path
+ * @level: log level
+ * @handle: ACPI device handle
+ * @fmt: format string
*
* This function is called through acpi_handle_<level> macros and prints
* a message with ACPI prefix and object path. This function acquires
@@ -501,6 +505,9 @@ EXPORT_SYMBOL(acpi_handle_printk);
#if defined(CONFIG_DYNAMIC_DEBUG)
/**
* __acpi_handle_debug: pr_debug with ACPI prefix and object path
+ * @descriptor: Dynamic Debug descriptor
+ * @handle: ACPI device handle
+ * @fmt: format string
*
* This function is called through acpi_handle_debug macro and debug
* prints a message with ACPI prefix and object path. This function
@@ -695,6 +702,31 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2.
+ * Returns true if matches.
+ */
+bool acpi_dev_hid_uid_match(struct acpi_device *adev,
+ const char *hid2, const char *uid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+ const char *uid1 = acpi_device_uid(adev);
+
+ if (strcmp(hid1, hid2))
+ return false;
+
+ if (!uid2)
+ return true;
+
+ return uid1 && !strcmp(uid1, uid2);
+}
+EXPORT_SYMBOL(acpi_dev_hid_uid_match);
+
+/**
* acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 265d9dd46a5e..a606262da9d6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -65,6 +65,7 @@
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/task_work.h>
+#include <linux/sizes.h>
#include <uapi/linux/android/binder.h>
#include <uapi/linux/android/binderfs.h>
@@ -92,11 +93,6 @@ static atomic_t binder_last_id;
static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE(proc);
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K 0x400
-#endif
-
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index eb76a823fbb2..2d8b9b91dee0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -268,7 +268,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
- /* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_read(&mm->mmap_sem);
@@ -277,8 +276,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return 0;
free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
+ for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
bool ret;
size_t index;
@@ -291,6 +289,8 @@ free_range:
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
+ if (page_addr == start)
+ break;
continue;
err_vm_insert_page_failed:
@@ -298,7 +298,8 @@ err_vm_insert_page_failed:
page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
- ;
+ if (page_addr == start)
+ break;
}
err_no_vma:
if (mm) {
@@ -681,17 +682,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
- if (alloc->buffer) {
+ if (alloc->buffer_size) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+ SZ_4M);
+ mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer = (void __user *)vma->vm_start;
- mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
- SZ_4M);
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
@@ -722,8 +723,9 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer = NULL;
+ mutex_lock(&binder_alloc_mmap_lock);
+ alloc->buffer_size = 0;
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -841,14 +843,20 @@ void binder_alloc_print_pages(struct seq_file *m,
int free = 0;
mutex_lock(&alloc->mutex);
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
+ /*
+ * Make sure the binder_alloc is fully initialized, otherwise we might
+ * read inconsistent state.
+ */
+ if (binder_alloc_get_vma(alloc) != NULL) {
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 753985c01517..46dc54d18f0b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -56,7 +56,7 @@ struct acard_sg {
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
};
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
return si;
}
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 05c2b32dcc4d..ec6c64fce74a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -56,6 +56,7 @@ enum board_ids {
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
+ board_ahci_al,
board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
@@ -167,6 +168,13 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
/* by chipsets */
+ [board_ahci_al] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_avn] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
@@ -415,6 +423,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+ /* Amazon's Annapurna Labs support */
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031),
+ .class = PCI_CLASS_STORAGE_SATA_AHCI,
+ .class_mask = 0xffffff,
+ board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index e3163dae5e85..cb55ebc1725b 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -483,7 +483,6 @@ static int tegra_ahci_probe(struct platform_device *pdev)
struct tegra_ahci_priv *tegra;
struct resource *res;
int ret;
- unsigned int i;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
@@ -543,8 +542,9 @@ static int tegra_ahci_probe(struct platform_device *pdev)
if (!tegra->supplies)
return -ENOMEM;
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
ret = devm_regulator_bulk_get(&pdev->dev,
tegra->soc->num_supplies,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e4da725381d3..3ca7720e7d8f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -841,6 +841,12 @@ static int piix_broken_suspend(void)
},
},
{
+ .ident = "TECRA M3",
+ .matches = {
+ DMI_MATCH(DMI_OEM_STRING, "Tecra M3,"),
+ },
+ },
+ {
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
@@ -955,18 +961,10 @@ static int piix_broken_suspend(void)
{ } /* terminate list */
};
- static const char *oemstrs[] = {
- "Tecra M3,",
- };
- int i;
if (dmi_check_system(sysids))
return 1;
- for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
- if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
- return 1;
-
/* TECRA M4 sometimes forgets its identify and reports bogus
* DMI information. As the bogus information is a bit
* generic, match as many entries as possible. This manual
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index bff369d9a1a7..ea5bf5f4cbed 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
@@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
return sata_pmp_qc_defer_cmd_switch(qc);
}
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static void ahci_fbs_dec_intr(struct ata_port *ap)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28c492be0a57..e9017c570bc5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4980,7 +4980,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
return ATA_DEFER_LINK;
}
-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
+{
+ return AC_ERR_OK;
+}
/**
* ata_sg_init - Associate command with scatter-gather table.
@@ -5443,7 +5446,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- ap->ops->qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -6708,6 +6713,9 @@ void ata_host_detach(struct ata_host *host)
{
int i;
+ /* Ensure ata_port probe has completed */
+ async_synchronize_full();
+
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 4ed682da52ae..038db94216a9 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
@@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg_dumb(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 3aa006c5ed0c..6bd2228bb6ff 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -100,7 +100,7 @@ static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev,
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
- const u16 timing[2][5] = {
+ static const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
@@ -154,7 +154,7 @@ static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
- const u8 timing[2][5] = {
+ static const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 41e0d6a6cd05..27b0952fde6b 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -33,7 +33,6 @@
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
-#define ATA_HD_BASE 0xfff00000
#define ATA_HD_CONTROL 0x39
static struct scsi_host_template pata_falcon_sht = {
@@ -120,24 +119,22 @@ static struct ata_port_operations pata_falcon_ops = {
.set_mode = pata_falcon_set_mode,
};
-static int pata_falcon_init_one(void)
+static int __init pata_falcon_init_one(struct platform_device *pdev)
{
+ struct resource *res;
struct ata_host *host;
struct ata_port *ap;
- struct platform_device *pdev;
void __iomem *base;
- if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
- return -ENODEV;
-
- pr_info(DRV_NAME ": Atari Falcon PATA controller\n");
+ dev_info(&pdev->dev, "Atari Falcon PATA controller\n");
- pdev = platform_device_register_simple(DRV_NAME, 0, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
- if (!devm_request_mem_region(&pdev->dev, ATA_HD_BASE, 0x40, DRV_NAME)) {
- pr_err(DRV_NAME ": resources busy\n");
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
@@ -152,7 +149,7 @@ static int pata_falcon_init_one(void)
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
ap->flags |= ATA_FLAG_PIO_POLLING;
- base = (void __iomem *)ATA_HD_BASE;
+ base = (void __iomem *)res->start;
ap->ioaddr.data_addr = base;
ap->ioaddr.error_addr = base + 1 + 1 * 4;
ap->ioaddr.feature_addr = base + 1 + 1 * 4;
@@ -174,9 +171,26 @@ static int pata_falcon_init_one(void)
return ata_host_activate(host, 0, NULL, 0, &pata_falcon_sht);
}
-module_init(pata_falcon_init_one);
+static int __exit pata_falcon_remove_one(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
+static struct platform_driver pata_falcon_driver = {
+ .remove = __exit_p(pata_falcon_remove_one),
+ .driver = {
+ .name = "atari-falcon-ide",
+ },
+};
+
+module_platform_driver_probe(pata_falcon_driver, pata_falcon_init_one);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:atari-falcon-ide");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 57f2ec71cfc3..1bfd0154dad5 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA40;
}
-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
{
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
@@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
__func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
table = (struct dbdma_cmd *) priv->dma_table_cpu;
@@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
+
+ return AC_ERR_OK;
}
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 4afcb8e63e21..41430f79663c 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d)
/*
* Prepare taskfile for submission.
*/
-static void pxa_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
struct dma_async_tx_descriptor *tx;
enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
DMA_PREP_INTERRUPT);
if (!tx) {
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
- return;
+ return AC_ERR_OK;
}
tx->callback = pxa_ata_dma_irq;
tx->callback_param = pd;
pd->dma_cookie = dmaengine_submit(tx);
+
+ return AC_ERR_OK;
}
/*
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index cb490531b62e..5db55e1e2a61 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_port_stop(struct ata_port *ap);
-static void adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_freeze(struct ata_port *ap);
@@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
return i;
}
-static void adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
{
struct adma_port_priv *pp = qc->ap->private_data;
u8 *buf = pp->pkt;
@@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
@@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
printk("%s\n", obuf);
}
#endif
+ return AC_ERR_OK;
}
static inline void adma_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 8e9cb198fcd1..9239615d8a04 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
return num_prde;
}
-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
@@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
+
+ return AC_ERR_OK;
}
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 7f99e23bff88..a6b76cc12a66 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
prd[-1].flags |= PRD_END;
}
-static void inic_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
{
struct inic_port_priv *pp = qc->ap->private_data;
struct inic_pkt *pkt = pp->pkt;
@@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
inic_fill_sg(prd, qc);
pp->cpb_tbl[0] = pp->pkt_dma;
+
+ return AC_ERR_OK;
}
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index ad385a113391..277f11909fc1 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
-static void mv_qc_prep(struct ata_queued_cmd *qc);
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
@@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
- return;
+ return AC_ERR_OK;
/* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
- return;
+ return AC_ERR_OK;
default:
- return;
+ return AC_ERR_OK;
}
/* Fill in command request block
@@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
- *
- * FIXME: modify libata to give qc_prep a return value and
- * return error here.
*/
- BUG_ON(tf->command);
- break;
+ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
+ tf->command);
+ return AC_ERR_INVALID;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
@@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
@@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
- return;
+ return AC_ERR_OK;
if (tf->command == ATA_CMD_DSM)
- return; /* use bmdma for this */
+ return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 56946012d113..65ec8dff1c51 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap);
@@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
static int nv_swncq_slave_config(struct scsi_device *sdev);
static int nv_swncq_port_start(struct ata_port *ap);
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
@@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
return 1;
}
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
@@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
cpb->resp_flags = NV_CPB_RESP_DONE;
@@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
cpb->ctl_flags = ctl_flags;
wmb();
cpb->resp_flags = 0;
+
+ return AC_ERR_OK;
}
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
@@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
return 0;
}
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
nv_swncq_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5fd464765ddc..c451d7d1c817 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
-static void pdc_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void pdc_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
@@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static int pdc_is_sataii_tx4(unsigned long flags)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c53c5a47204d..ef00ab644afb 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
-static void qs_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
@@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
return si;
}
-static void qs_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
@@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
nelem = qs_fill_sg(qc);
@@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
+
+ return AC_ERR_OK;
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 3495e1733a8e..980aacdbcf3b 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
}
-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sata_rcar_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index e6fbae2f645a..75321f1ceba5 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev);
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
-static void sil_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
static void sil_bmdma_start(struct ata_queued_cmd *qc);
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
@@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void sil_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sil_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 7bef82de53ca..560070d4f1d0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev);
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
-static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
@@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
return ata_std_qc_defer(qc);
}
-static void sil24_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
@@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge);
+
+ return AC_ERR_OK;
}
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 2277ba0c9c7f..2c7b30c5ea3d 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap);
static void pdc_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static unsigned int pdc20621_dimm_init(struct ata_host *host);
@@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
}
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
@@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 2bbab0230aeb..aad00d2b28f5 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1070,7 +1070,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
RC_FLAGS_BFPS_BFP * bfp |
RC_FLAGS_RXBM_PSB, 0, 0);
break;
- };
+ }
if (IS_FS50 (dev)) {
submit_command (dev, &dev->hp_txq,
QE_CMD_REG_WR | QE_CMD_IMM_INQ,
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index cc37511de866..6265871a4af2 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -554,12 +554,27 @@ ssize_t __weak cpu_show_mds(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -568,6 +583,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
&dev_attr_mds.attr,
+ &dev_attr_tsx_async_abort.attr,
+ &dev_attr_itlb_multihit.attr,
NULL
};
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 55907c27075b..84c4e1f72cbd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
}
return ret;
}
+
+struct for_each_memory_block_cb_data {
+ walk_memory_blocks_func_t func;
+ void *arg;
+};
+
+static int for_each_memory_block_cb(struct device *dev, void *data)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ struct for_each_memory_block_cb_data *cb_data = data;
+
+ return cb_data->func(mem, cb_data->arg);
+}
+
+/**
+ * for_each_memory_block - walk through all present memory blocks
+ *
+ * @arg: argument passed to func
+ * @func: callback for each memory block walked
+ *
+ * This function walks through all present memory blocks, calling func on
+ * each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
+{
+ struct for_each_memory_block_cb_data cb_data = {
+ .func = func,
+ .arg = arg,
+ };
+
+ return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
+ for_each_memory_block_cb);
+}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 8db98a1f83dc..bbddb267c2e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -188,6 +188,26 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+ if (dev->pm_domain && dev->pm_domain->start)
+ return dev->pm_domain->start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
* dev_pm_domain_set - Set PM domain of a device.
* @dev: Device whose PM domain is to be set.
* @pd: PM domain to be set, or NULL.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cc85e87eaf05..8e5725b11ee8 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -634,6 +634,13 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
return ret;
}
+static int genpd_dev_pm_start(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+ return genpd_start_dev(genpd, dev);
+}
+
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -922,24 +929,6 @@ static int __init genpd_power_off_unused(void)
}
late_initcall(genpd_power_off_unused);
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-
-static bool genpd_present(const struct generic_pm_domain *genpd)
-{
- const struct generic_pm_domain *gpd;
-
- if (IS_ERR_OR_NULL(genpd))
- return false;
-
- list_for_each_entry(gpd, &gpd_list, gpd_list_node)
- if (gpd == genpd)
- return true;
-
- return false;
-}
-
-#endif
-
#ifdef CONFIG_PM_SLEEP
/**
@@ -1354,8 +1343,8 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
{
struct generic_pm_domain *genpd;
- genpd = dev_to_genpd(dev);
- if (!genpd_present(genpd))
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
return;
if (suspend) {
@@ -1805,6 +1794,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
genpd->domain.ops.restore_noirq = genpd_restore_noirq;
genpd->domain.ops.complete = genpd_complete;
+ genpd->domain.start = genpd_dev_pm_start;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -2020,6 +2010,16 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
return 0;
}
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+ const struct generic_pm_domain *gpd;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+ if (gpd == genpd)
+ return true;
+ return false;
+}
+
/**
* of_genpd_add_provider_simple() - Register a simple PM domain provider
* @np: Device node pointer associated with the PM domain provider.
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 39a06a0cfdaa..444f5c169a0b 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -117,6 +117,13 @@ static inline bool device_pm_initialized(struct device *dev)
return dev->power.in_dpm_list;
}
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -141,6 +148,11 @@ static inline bool device_pm_initialized(struct device *dev)
return device_is_registered(dev);
}
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
@@ -149,21 +161,3 @@ static inline void device_pm_init(struct device *dev)
device_pm_sleep_init(dev);
pm_runtime_init(dev);
}
-
-#ifdef CONFIG_PM_SLEEP
-
-/* drivers/base/power/wakeup_stats.c */
-extern int wakeup_source_sysfs_add(struct device *parent,
- struct wakeup_source *ws);
-extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
-
-extern int pm_wakeup_source_sysfs_add(struct device *parent);
-
-#else /* !CONFIG_PM_SLEEP */
-
-static inline int pm_wakeup_source_sysfs_add(struct device *parent)
-{
- return 0;
-}
-
-#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5ce77d1ef9fc..8e021082dba8 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -272,7 +272,7 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
@@ -299,7 +299,7 @@ void dev_pm_disable_wake_irq_check(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 81bd01ed4042..511f6d7acdfe 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -557,6 +557,42 @@ int device_add_properties(struct device *dev,
EXPORT_SYMBOL_GPL(device_add_properties);
/**
+ * fwnode_get_name - Return the name of a node
+ * @fwnode: The firmware node
+ *
+ * Returns a pointer to the node name.
+ */
+const char *fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name);
+}
+
+/**
+ * fwnode_get_name_prefix - Return the prefix of node for printing purposes
+ * @fwnode: The firmware node
+ *
+ * Returns the prefix of a node, intended to be printed right before the node.
+ * The prefix works also as a separator between the nodes.
+ */
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name_prefix);
+}
+
+/**
+ * fwnode_get_parent - Return parent firwmare node
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * Return parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_parent);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_parent);
+
+/**
* fwnode_get_next_parent - Iterate to the node's parent
* @fwnode: Firmware whose parent is retrieved
*
@@ -578,17 +614,50 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_parent - Return parent firwmare node
- * @fwnode: Firmware whose parent is retrieved
+ * fwnode_count_parents - Return the number of parents a node has
+ * @fwnode: The node the parents of which are to be counted
*
- * Return parent firmware node of the given node if possible or %NULL if no
- * parent was available.
+ * Returns the number of parents a node has.
*/
-struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
- return fwnode_call_ptr_op(fwnode, get_parent);
+ struct fwnode_handle *__fwnode;
+ unsigned int count;
+
+ __fwnode = fwnode_get_parent(fwnode);
+
+ for (count = 0; __fwnode; count++)
+ __fwnode = fwnode_get_next_parent(__fwnode);
+
+ return count;
}
-EXPORT_SYMBOL_GPL(fwnode_get_parent);
+EXPORT_SYMBOL_GPL(fwnode_count_parents);
+
+/**
+ * fwnode_get_nth_parent - Return an nth parent of a node
+ * @fwnode: The node the parent of which is requested
+ * @depth: Distance of the parent from the node
+ *
+ * Returns the nth parent of a node. If there is no parent at the requested
+ * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
+ * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
+ *
+ * The caller is responsible for calling fwnode_handle_put() for the returned
+ * node.
+ */
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
+ unsigned int depth)
+{
+ unsigned int i;
+
+ fwnode_handle_get(fwnode);
+
+ for (i = 0; i < depth && fwnode; i++)
+ fwnode = fwnode_get_next_parent(fwnode);
+
+ return fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
* fwnode_get_next_child_node - Return the next child node handle for a node
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 3a7d30b8c3ac..1fbaaad71ca5 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -215,8 +215,6 @@ struct regmap *__regmap_init_w1(struct device *w1_dev,
return __regmap_init(w1_dev, bus, w1_dev, config,
lock_key, lock_name);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(__regmap_init_w1);
@@ -233,8 +231,6 @@ struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
return __devm_regmap_init(w1_dev, bus, w1_dev, config,
lock_key, lock_name);
-
- return NULL;
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index a1f3f0994f9f..d8d0dc0ca5ac 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -71,9 +71,9 @@ software_node_to_swnode(const struct software_node *node)
return swnode;
}
-const struct software_node *to_software_node(struct fwnode_handle *fwnode)
+const struct software_node *to_software_node(const struct fwnode_handle *fwnode)
{
- struct swnode *swnode = to_swnode(fwnode);
+ const struct swnode *swnode = to_swnode(fwnode);
return swnode ? swnode->node : NULL;
}
@@ -103,71 +103,15 @@ property_entry_get(const struct property_entry *prop, const char *name)
return NULL;
}
-static void
-property_set_pointer(struct property_entry *prop, const void *pointer)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- prop->pointer.u8_data = pointer;
- else
- prop->value.u8_data = *((u8 *)pointer);
- break;
- case DEV_PROP_U16:
- if (prop->is_array)
- prop->pointer.u16_data = pointer;
- else
- prop->value.u16_data = *((u16 *)pointer);
- break;
- case DEV_PROP_U32:
- if (prop->is_array)
- prop->pointer.u32_data = pointer;
- else
- prop->value.u32_data = *((u32 *)pointer);
- break;
- case DEV_PROP_U64:
- if (prop->is_array)
- prop->pointer.u64_data = pointer;
- else
- prop->value.u64_data = *((u64 *)pointer);
- break;
- case DEV_PROP_STRING:
- if (prop->is_array)
- prop->pointer.str = pointer;
- else
- prop->value.str = pointer;
- break;
- default:
- break;
- }
-}
-
static const void *property_get_pointer(const struct property_entry *prop)
{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- return prop->pointer.u8_data;
- return &prop->value.u8_data;
- case DEV_PROP_U16:
- if (prop->is_array)
- return prop->pointer.u16_data;
- return &prop->value.u16_data;
- case DEV_PROP_U32:
- if (prop->is_array)
- return prop->pointer.u32_data;
- return &prop->value.u32_data;
- case DEV_PROP_U64:
- if (prop->is_array)
- return prop->pointer.u64_data;
- return &prop->value.u64_data;
- case DEV_PROP_STRING:
- if (prop->is_array)
- return prop->pointer.str;
- return &prop->value.str;
- default:
+ if (!prop->length)
return NULL;
- }
+
+ if (prop->is_array)
+ return prop->pointer;
+
+ return &prop->value;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -187,66 +131,6 @@ static const void *property_entry_find(const struct property_entry *props,
return pointer;
}
-static int property_entry_read_u8_array(const struct property_entry *props,
- const char *propname,
- u8 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u16_array(const struct property_entry *props,
- const char *propname,
- u16 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u32_array(const struct property_entry *props,
- const char *propname,
- u32 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u64_array(const struct property_entry *props,
- const char *propname,
- u64 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
static int
property_entry_count_elems_of_size(const struct property_entry *props,
const char *propname, size_t length)
@@ -265,49 +149,45 @@ static int property_entry_read_int_array(const struct property_entry *props,
unsigned int elem_size, void *val,
size_t nval)
{
+ const void *pointer;
+ size_t length;
+
if (!val)
return property_entry_count_elems_of_size(props, name,
elem_size);
- switch (elem_size) {
- case sizeof(u8):
- return property_entry_read_u8_array(props, name, val, nval);
- case sizeof(u16):
- return property_entry_read_u16_array(props, name, val, nval);
- case sizeof(u32):
- return property_entry_read_u32_array(props, name, val, nval);
- case sizeof(u64):
- return property_entry_read_u64_array(props, name, val, nval);
- }
- return -ENXIO;
+ if (!is_power_of_2(elem_size) || elem_size > sizeof(u64))
+ return -ENXIO;
+
+ length = nval * elem_size;
+
+ pointer = property_entry_find(props, name, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(val, pointer, length);
+ return 0;
}
static int property_entry_read_string_array(const struct property_entry *props,
const char *propname,
const char **strings, size_t nval)
{
- const struct property_entry *prop;
const void *pointer;
- size_t array_len, length;
+ size_t length;
+ int array_len;
/* Find out the array length. */
- prop = property_entry_get(props, propname);
- if (!prop)
- return -EINVAL;
-
- if (prop->is_array)
- /* Find the length of an array. */
- array_len = property_entry_count_elems_of_size(props, propname,
- sizeof(const char *));
- else
- /* The array length for a non-array string property is 1. */
- array_len = 1;
+ array_len = property_entry_count_elems_of_size(props, propname,
+ sizeof(const char *));
+ if (array_len < 0)
+ return array_len;
/* Return how many there are if strings is NULL. */
if (!strings)
return array_len;
- array_len = min(nval, array_len);
+ array_len = min_t(size_t, nval, array_len);
length = array_len * sizeof(*strings);
pointer = property_entry_find(props, propname, length);
@@ -322,13 +202,15 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
const void *pointer = property_get_pointer(p);
+ const char * const *src_str;
size_t i, nval;
if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer.str) {
+ if (p->type == DEV_PROP_STRING && p->pointer) {
+ src_str = p->pointer;
nval = p->length / sizeof(const char *);
for (i = 0; i < nval; i++)
- kfree(p->pointer.str[i]);
+ kfree(src_str[i]);
}
kfree(pointer);
} else if (p->type == DEV_PROP_STRING) {
@@ -337,29 +219,29 @@ static void property_entry_free_data(const struct property_entry *p)
kfree(p->name);
}
-static int property_copy_string_array(struct property_entry *dst,
- const struct property_entry *src)
+static const char * const *
+property_copy_string_array(const struct property_entry *src)
{
const char **d;
+ const char * const *src_str = src->pointer;
size_t nval = src->length / sizeof(*d);
int i;
d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ return NULL;
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
- if (!d[i] && src->pointer.str[i]) {
+ d[i] = kstrdup(src_str[i], GFP_KERNEL);
+ if (!d[i] && src_str[i]) {
while (--i >= 0)
kfree(d[i]);
kfree(d);
- return -ENOMEM;
+ return NULL;
}
}
- dst->pointer.str = d;
- return 0;
+ return d;
}
static int property_entry_copy_data(struct property_entry *dst,
@@ -367,36 +249,35 @@ static int property_entry_copy_data(struct property_entry *dst,
{
const void *pointer = property_get_pointer(src);
const void *new;
- int error;
if (src->is_array) {
if (!src->length)
return -ENODATA;
if (src->type == DEV_PROP_STRING) {
- error = property_copy_string_array(dst, src);
- if (error)
- return error;
- new = dst->pointer.str;
+ new = property_copy_string_array(src);
+ if (!new)
+ return -ENOMEM;
} else {
new = kmemdup(pointer, src->length, GFP_KERNEL);
if (!new)
return -ENOMEM;
}
+
+ dst->is_array = true;
+ dst->pointer = new;
} else if (src->type == DEV_PROP_STRING) {
new = kstrdup(src->value.str, GFP_KERNEL);
if (!new && src->value.str)
return -ENOMEM;
+
+ dst->value.str = new;
} else {
- new = pointer;
+ dst->value = src->value;
}
dst->length = src->length;
- dst->is_array = src->is_array;
dst->type = src->type;
-
- property_set_pointer(dst, new);
-
dst->name = kstrdup(src->name, GFP_KERNEL);
if (!dst->name)
goto out_free_data;
@@ -515,12 +396,47 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
propname, val, nval);
}
+static const char *
+software_node_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct swnode *swnode = to_swnode(fwnode);
+
+ if (!swnode)
+ return "(null)";
+
+ return kobject_name(&swnode->kobj);
+}
+
+static const char *
+software_node_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ const char *prefix;
+
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Figure out the prefix from the parents. */
+ while (is_software_node(parent))
+ parent = fwnode_get_next_parent(parent);
+
+ prefix = fwnode_get_name_prefix(parent);
+ fwnode_handle_put(parent);
+
+ /* Guess something if prefix was NULL. */
+ return prefix ?: "/";
+}
+
static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
- return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
+ if (!swnode || !swnode->parent)
+ return NULL;
+
+ return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *
@@ -612,6 +528,8 @@ static const struct fwnode_operations software_node_ops = {
.property_present = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
+ .get_name = software_node_get_name,
+ .get_name_prefix = software_node_get_name_prefix,
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index f4161064365c..3056f81efca4 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -233,8 +233,10 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
- /* enable 12 mA drive strenth for 4313 and set chipControl
- register bit 1 */
+ /*
+ * enable 12 mA drive strenth for 4313 and set chipControl
+ * register bit 1
+ */
bcma_chipco_chipctl_maskset(cc, 0,
~BCMA_CCTRL_4313_12MA_LED_DRIVE,
BCMA_CCTRL_4313_12MA_LED_DRIVE);
@@ -246,8 +248,10 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
break;
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
- /* enable 12 mA drive strenth for 43224 and set chipControl
- register bit 15 */
+ /*
+ * enable 12 mA drive strenth for 43224 and set chipControl
+ * register bit 15
+ */
if (bus->chipinfo.rev == 0) {
bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
~BCMA_CCTRL_43224_GPIO_TOGGLE,
@@ -500,8 +504,10 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
case BCMA_CHIP_ID_BCM53572:
/* 5357[ab]0, 43236[ab]0, and 6362b0 */
- /* BCM5357 needs to touch PLL1_PLLCTL[02],
- so offset PLL0_PLLCTL[02] by 6 */
+ /*
+ * BCM5357 needs to touch PLL1_PLLCTL[02],
+ * so offset PLL0_PLLCTL[02] by 6
+ */
phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
@@ -619,8 +625,10 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
/* LCNXN */
- /* PLL Settings for spur avoidance on/off mode,
- no on2 support for 43228A0 */
+ /*
+ * PLL Settings for spur avoidance on/off mode,
+ * no on2 support for 43228A0
+ */
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x01100014);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 5d52a2d32155..de2f94d0103a 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -268,19 +268,18 @@ static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
/* some more paranoia, if the request was over-determined */
if (adm_ctx->device && adm_ctx->resource &&
adm_ctx->device->resource != adm_ctx->resource) {
- pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
- adm_ctx->minor, adm_ctx->resource->name,
- adm_ctx->device->resource->name);
+ pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
+ adm_ctx->minor, adm_ctx->resource->name,
+ adm_ctx->device->resource->name);
drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
return ERR_INVALID_REQUEST;
}
if (adm_ctx->device &&
adm_ctx->volume != VOLUME_UNSPECIFIED &&
adm_ctx->volume != adm_ctx->device->vnr) {
- pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
- adm_ctx->minor, adm_ctx->volume,
- adm_ctx->device->vnr,
- adm_ctx->device->resource->name);
+ pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
+ adm_ctx->minor, adm_ctx->volume,
+ adm_ctx->device->vnr, adm_ctx->device->resource->name);
drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
return ERR_INVALID_REQUEST;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f6f77eaa7217..739b372a5112 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -417,18 +417,20 @@ out_free_page:
return ret;
}
-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
+static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ int mode)
{
/*
- * We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * We use fallocate to manipulate the space mappings used by the image
+ * a.k.a. discard/zerorange. However we do not support this if
+ * encryption is enabled, because it may give an attacker useful
+ * information.
*/
struct file *file = lo->lo_backing_file;
- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
int ret;
+ mode |= FALLOC_FL_KEEP_SIZE;
+
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
ret = -EOPNOTSUPP;
goto out;
@@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
- case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- return lo_discard(lo, rq, pos);
+ /*
+ * If the caller doesn't want deallocation, call zeroout to
+ * write zeroes the range. Otherwise, punch them out.
+ */
+ return lo_fallocate(lo, rq, pos,
+ (rq->cmd_flags & REQ_NOUNMAP) ?
+ FALLOC_FL_ZERO_RANGE :
+ FALLOC_FL_PUNCH_HOLE);
+ case REQ_OP_DISCARD:
+ return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (lo->transfer)
return lo_write_transfer(lo, rq, pos);
@@ -630,7 +640,9 @@ static void loop_reread_partitions(struct loop_device *lo,
{
int rc;
- rc = blkdev_reread_part(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ rc = bdev_disk_changed(bdev, false);
+ mutex_unlock(&bdev->bd_mutex);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -1154,10 +1166,11 @@ out_unlock:
* must be at least one and it can only become zero when the
* current holder is released.
*/
- if (release)
- err = __blkdev_reread_part(bdev);
- else
- err = blkdev_reread_part(bdev);
+ if (!release)
+ mutex_lock(&bdev->bd_mutex);
+ err = bdev_disk_changed(bdev, false);
+ if (!release)
+ mutex_unlock(&bdev->bd_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 964f78cfffa0..f6bafa9a68b9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -129,7 +129,7 @@ struct mtip_compat_ide_task_request_s {
/*
* This function check_for_surprise_removal is called
* while card is removed from the system and it will
- * read the vendor id from the configration space
+ * read the vendor id from the configuration space
*
* @pdev Pointer to the pci_dev structure.
*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a94ee45440b3..57532465fb83 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -993,6 +993,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
+ sockfd_put(sock);
return NULL;
}
@@ -1031,14 +1032,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
sockfd_put(sock);
return -ENOMEM;
}
+
+ config->socks = socks;
+
nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
if (!nsock) {
sockfd_put(sock);
return -ENOMEM;
}
- config->socks = socks;
-
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index a235c45e22a7..bc837862b767 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -91,11 +91,13 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+int null_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors);
+size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector, unsigned int len);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
@@ -103,17 +105,18 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones)
-{
- return -EOPNOTSUPP;
-}
static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
+static inline size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector,
+ unsigned int len)
+{
+ return len;
+}
+#define null_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 0e7da5015ccd..795fda576824 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -227,7 +227,7 @@ static ssize_t nullb_device_uint_attr_store(unsigned int *val,
int result;
result = kstrtouint(page, 0, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -241,7 +241,7 @@ static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
unsigned long tmp;
result = kstrtoul(page, 0, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -255,7 +255,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
int result;
result = kstrtobool(page, &tmp);
- if (result)
+ if (result < 0)
return result;
*val = tmp;
@@ -263,7 +263,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
}
/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
-#define NULLB_DEVICE_ATTR(NAME, TYPE) \
+#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
static ssize_t \
nullb_device_##NAME##_show(struct config_item *item, char *page) \
{ \
@@ -274,31 +274,57 @@ static ssize_t \
nullb_device_##NAME##_store(struct config_item *item, const char *page, \
size_t count) \
{ \
- if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
- return -EBUSY; \
- return nullb_device_##TYPE##_attr_store( \
- &to_nullb_device(item)->NAME, page, count); \
+ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \
+ struct nullb_device *dev = to_nullb_device(item); \
+ TYPE new_value; \
+ int ret; \
+ \
+ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \
+ if (ret < 0) \
+ return ret; \
+ if (apply_fn) \
+ ret = apply_fn(dev, new_value); \
+ else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
+ ret = -EBUSY; \
+ if (ret < 0) \
+ return ret; \
+ dev->NAME = new_value; \
+ return count; \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
-NULLB_DEVICE_ATTR(size, ulong);
-NULLB_DEVICE_ATTR(completion_nsec, ulong);
-NULLB_DEVICE_ATTR(submit_queues, uint);
-NULLB_DEVICE_ATTR(home_node, uint);
-NULLB_DEVICE_ATTR(queue_mode, uint);
-NULLB_DEVICE_ATTR(blocksize, uint);
-NULLB_DEVICE_ATTR(irqmode, uint);
-NULLB_DEVICE_ATTR(hw_queue_depth, uint);
-NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(blocking, bool);
-NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
-NULLB_DEVICE_ATTR(memory_backed, bool);
-NULLB_DEVICE_ATTR(discard, bool);
-NULLB_DEVICE_ATTR(mbps, uint);
-NULLB_DEVICE_ATTR(cache_size, ulong);
-NULLB_DEVICE_ATTR(zoned, bool);
-NULLB_DEVICE_ATTR(zone_size, ulong);
-NULLB_DEVICE_ATTR(zone_nr_conv, uint);
+static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+{
+ struct nullb *nullb = dev->nullb;
+ struct blk_mq_tag_set *set;
+
+ if (!nullb)
+ return 0;
+
+ set = nullb->tag_set;
+ blk_mq_update_nr_hw_queues(set, submit_queues);
+ return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
+}
+
+NULLB_DEVICE_ATTR(size, ulong, NULL);
+NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
+NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
+NULLB_DEVICE_ATTR(home_node, uint, NULL);
+NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
+NULLB_DEVICE_ATTR(blocksize, uint, NULL);
+NULLB_DEVICE_ATTR(irqmode, uint, NULL);
+NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
+NULLB_DEVICE_ATTR(index, uint, NULL);
+NULLB_DEVICE_ATTR(blocking, bool, NULL);
+NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
+NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
+NULLB_DEVICE_ATTR(discard, bool, NULL);
+NULLB_DEVICE_ATTR(mbps, uint, NULL);
+NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zoned, bool, NULL);
+NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -467,7 +493,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_nr_conv\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -996,6 +1022,16 @@ next:
return 0;
}
+static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off)
+{
+ void *dst;
+
+ dst = kmap_atomic(page);
+ memset(dst + off, 0xFF, len);
+ kunmap_atomic(dst);
+}
+
static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
{
size_t temp;
@@ -1036,10 +1072,24 @@ static int null_transfer(struct nullb *nullb, struct page *page,
unsigned int len, unsigned int off, bool is_write, sector_t sector,
bool is_fua)
{
+ struct nullb_device *dev = nullb->dev;
+ unsigned int valid_len = len;
int err = 0;
if (!is_write) {
- err = copy_from_nullb(nullb, page, off, sector, len);
+ if (dev->zoned)
+ valid_len = null_zone_valid_read_len(nullb,
+ sector, len);
+
+ if (valid_len) {
+ err = copy_from_nullb(nullb, page, off,
+ sector, valid_len);
+ off += valid_len;
+ len -= valid_len;
+ }
+
+ if (len)
+ nullb_fill_pattern(nullb, page, len, off);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
@@ -1418,20 +1468,9 @@ static void null_config_discard(struct nullb *nullb)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
-static int null_open(struct block_device *bdev, fmode_t mode)
-{
- return 0;
-}
-
-static void null_release(struct gendisk *disk, fmode_t mode)
-{
-}
-
-static const struct block_device_operations null_fops = {
- .owner = THIS_MODULE,
- .open = null_open,
- .release = null_release,
- .report_zones = null_zone_report,
+static const struct block_device_operations null_ops = {
+ .owner = THIS_MODULE,
+ .report_zones = null_report_zones,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -1532,7 +1571,7 @@ static int null_gendisk_register(struct nullb *nullb)
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
- disk->fops = &null_fops;
+ disk->fops = &null_ops;
disk->private_data = nullb;
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 3d7fdea872f8..d4d88b581822 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -66,22 +66,53 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-int null_zone_report(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+int null_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int zno, nrz = 0;
-
- zno = null_zone_no(dev, sector);
- if (zno < dev->nr_zones) {
- nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
- memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
+ unsigned int first_zone, i;
+ struct blk_zone zone;
+ int error;
+
+ first_zone = null_zone_no(dev, sector);
+ if (first_zone >= dev->nr_zones)
+ return 0;
+
+ nr_zones = min(nr_zones, dev->nr_zones - first_zone);
+ for (i = 0; i < nr_zones; i++) {
+ /*
+ * Stacked DM target drivers will remap the zone information by
+ * modifying the zone information passed to the report callback.
+ * So use a local copy to avoid corruption of the device zone
+ * array.
+ */
+ memcpy(&zone, &dev->zones[first_zone + i],
+ sizeof(struct blk_zone));
+ error = cb(&zone, i, data);
+ if (error)
+ return error;
}
- *nr_zones = nrz;
+ return nr_zones;
+}
- return 0;
+size_t null_zone_valid_read_len(struct nullb *nullb,
+ sector_t sector, unsigned int len)
+{
+ struct nullb_device *dev = nullb->dev;
+ struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
+ unsigned int nr_sectors = len >> SECTOR_SHIFT;
+
+ /* Read must be below the write pointer position */
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
+ sector + nr_sectors <= zone->wp)
+ return len;
+
+ if (sector > zone->wp)
+ return 0;
+
+ return (zone->wp - sector) << SECTOR_SHIFT;
}
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
@@ -118,14 +149,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_OK;
}
-static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
+static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+ sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zno = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zno];
+ struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
size_t i;
- switch (req_op(cmd->rq)) {
+ switch (op) {
case REQ_OP_ZONE_RESET_ALL:
for (i = 0; i < dev->nr_zones; i++) {
if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
@@ -141,6 +172,29 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
break;
+ case REQ_OP_ZONE_OPEN:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case REQ_OP_ZONE_FINISH:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->len;
+ break;
default:
return BLK_STS_NOTSUPP;
}
@@ -155,7 +209,10 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
return null_zone_write(cmd, sector, nr_sectors);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
- return null_zone_reset(cmd, sector);
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return null_zone_mgmt(cmd, op, sector);
default:
return BLK_STS_OK;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 39136675dae5..13527a0b4e44 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data;
u64 objno;
- u8 state, new_state, current_state;
+ u8 state, new_state, uninitialized_var(current_state);
bool has_current_state;
void *p;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 76b73ddf8fd7..10f6368117d8 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
cancel_work_sync(&card->event_work);
+ destroy_workqueue(card->event_wq);
rsxx_destroy_dev(card);
rsxx_dma_destroy(card);
+ destroy_workqueue(card->creg_ctrl.creg_wq);
spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index aae665a3a254..f7aa2dc1ff85 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -380,17 +380,6 @@ config BT_ATH3K
Say Y here to compile support for "Atheros firmware download driver"
into the kernel or say M to compile it as module (ath3k).
-config BT_WILINK
- tristate "Texas Instruments WiLink7 driver"
- depends on TI_ST
- help
- This enables the Bluetooth driver for Texas Instrument's BT/FM/GPS
- combo devices. This makes use of shared transport line discipline
- core driver to communicate with the BT core of the combo chip.
-
- Say Y here to compile support for Texas Instrument's WiLink7 driver
- into the kernel or say M to compile it as module (btwilink).
-
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 34887b9b3a85..1a58a3ae142c 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_BT_INTEL) += btintel.o
obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
-obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o
obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 2d2e6d862068..8e05706fe5d9 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -23,6 +23,7 @@
#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
+#define BDADDR_BCM4334B0 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb0, 0x34, 0x43}})
#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}})
#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
@@ -74,6 +75,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
!bacmp(&bda->bdaddr, BDADDR_BCM2076B1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM4334B0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4345C5) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
@@ -326,6 +328,7 @@ struct bcm_subver_table {
static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
+ { 0x410d, "BCM4334B0" }, /* 002.001.013 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
@@ -339,6 +342,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
{ 0x4217, "BCM4329B1" }, /* 002.002.023 */
{ 0x6106, "BCM4359C0" }, /* 003.001.006 */
+ { 0x4106, "BCM4335A0" }, /* 002.001.006 */
{ }
};
@@ -440,6 +444,12 @@ int btbcm_finalize(struct hci_dev *hdev)
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ /* Some devices ship with the controller default address.
+ * Allow the bootloader to set a valid address through the
+ * device tree.
+ */
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_finalize);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index bb99c8653aab..62e781a18bf0 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -709,6 +709,51 @@ done:
}
EXPORT_SYMBOL_GPL(btintel_download_firmware);
+void btintel_reset_to_bootloader(struct hci_dev *hdev)
+{
+ struct intel_reset params;
+ struct sk_buff *skb;
+
+ /* Send Intel Reset command. This will result in
+ * re-enumeration of BT controller.
+ *
+ * Intel Reset parameter description:
+ * reset_type : 0x00 (Soft reset),
+ * 0x01 (Hard reset)
+ * patch_enable : 0x00 (Do not enable),
+ * 0x01 (Enable)
+ * ddc_reload : 0x00 (Do not reload),
+ * 0x01 (Reload)
+ * boot_option: 0x00 (Current image),
+ * 0x01 (Specified boot address)
+ * boot_param: Boot address
+ *
+ */
+ params.reset_type = 0x01;
+ params.patch_enable = 0x01;
+ params.ddc_reload = 0x01;
+ params.boot_option = 0x00;
+ params.boot_param = cpu_to_le32(0x00000000);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ &params, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "FW download error recovery failed (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+ bt_dev_info(hdev, "Intel reset sent to retry FW download");
+ kfree_skb(skb);
+
+ /* Current Intel BT controllers(ThP/JfP) hold the USB reset
+ * lines for 2ms when it receives Intel Reset in bootloader mode.
+ * Whereas, the upcoming Intel BT controllers will hold USB reset
+ * for 150ms. To keep the delay generic, 150ms is chosen here.
+ */
+ msleep(150);
+}
+EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 3d846190f2bf..a69ea8a87b9b 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -87,6 +87,7 @@ int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
+void btintel_reset_to_bootloader(struct hci_dev *hdev);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -181,4 +182,8 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
{
return -EOPNOTSUPP;
}
+
+static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
+{
+}
#endif
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 813338288453..519788c442ca 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -57,6 +57,7 @@ static const struct sdio_device_id btmtksdio_table[] = {
.driver_data = (kernel_ulong_t)&mt7668_data },
{ } /* Terminating entry */
};
+MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CHLPCR 0x4 /* W1S */
#define C_INT_EN_SET BIT(0)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8cc21ad7cf29..ec69e5dd7bd3 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -14,19 +14,33 @@
#define VERSION "0.1"
-int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type soc_type)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
- struct rome_version *ver;
+ struct qca_btsoc_version *ver;
char cmd;
int err = 0;
+ u8 event_type = HCI_EV_VENDOR;
+ u8 rlen = sizeof(*edl) + sizeof(*ver);
+ u8 rtype = EDL_APP_VER_RES_EVT;
bt_dev_dbg(hdev, "QCA Version Request");
+ /* Unlike other SoC's sending version command response as payload to
+ * VSE event. WCN3991 sends version command response as a payload to
+ * command complete event.
+ */
+ if (soc_type == QCA_WCN3991) {
+ event_type = 0;
+ rlen += 1;
+ rtype = EDL_PATCH_VER_REQ_CMD;
+ }
+
cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
- &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ &cmd, event_type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Reading QCA version information failed (%d)",
@@ -34,7 +48,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
return err;
}
- if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+ if (skb->len != rlen) {
bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
err = -EILSEQ;
goto out;
@@ -48,18 +62,21 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
- edl->rtype != EDL_APP_VER_RES_EVT) {
+ edl->rtype != rtype) {
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
edl->rtype);
err = -EIO;
goto out;
}
- ver = (struct rome_version *)(edl->data);
+ if (soc_type == QCA_WCN3991)
+ memmove(&edl->data, &edl->data[1], sizeof(*ver));
+
+ ver = (struct qca_btsoc_version *)(edl->data);
BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
- BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+ BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rom_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
/* QCA chipset version can be decided by patch and SoC
@@ -67,7 +84,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
* and lower 2 bytes from patch will be used.
*/
*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
if (*soc_version == 0)
err = -EILSEQ;
@@ -121,7 +138,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
-static void qca_tlv_check_data(struct rome_config *config,
+static void qca_tlv_check_data(struct qca_fw_config *config,
const struct firmware *fw)
{
const u8 *data;
@@ -140,8 +157,8 @@ static void qca_tlv_check_data(struct rome_config *config,
BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
BT_DBG("Length\t\t : %d bytes", length);
- config->dnld_mode = ROME_SKIP_EVT_NONE;
- config->dnld_type = ROME_SKIP_EVT_NONE;
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
+ config->dnld_type = QCA_SKIP_EVT_NONE;
switch (config->type) {
case TLV_TYPE_PATCH:
@@ -223,31 +240,45 @@ static void qca_tlv_check_data(struct rome_config *config,
}
static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
- const u8 *data, enum rome_tlv_dnld_mode mode)
+ const u8 *data, enum qca_tlv_dnld_mode mode,
+ enum qca_btsoc_type soc_type)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
struct tlv_seg_resp *tlv_resp;
u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
int err = 0;
+ u8 event_type = HCI_EV_VENDOR;
+ u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp));
+ u8 rtype = EDL_TVL_DNLD_RES_EVT;
cmd[0] = EDL_PATCH_TLV_REQ_CMD;
cmd[1] = seg_size;
memcpy(cmd + 2, data, seg_size);
- if (mode == ROME_SKIP_EVT_VSE_CC || mode == ROME_SKIP_EVT_VSE)
+ if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE)
return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2,
cmd);
+ /* Unlike other SoC's sending version command response as payload to
+ * VSE event. WCN3991 sends version command response as a payload to
+ * command complete event.
+ */
+ if (soc_type == QCA_WCN3991) {
+ event_type = 0;
+ rlen = sizeof(*edl);
+ rtype = EDL_PATCH_TLV_REQ_CMD;
+ }
+
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ event_type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err;
}
- if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+ if (skb->len != rlen) {
bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ;
goto out;
@@ -260,13 +291,19 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
goto out;
}
- tlv_resp = (struct tlv_seg_resp *)(edl->data);
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) {
+ bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x",
+ edl->cresp, edl->rtype);
+ err = -EIO;
+ }
- if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
- edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+ if (soc_type == QCA_WCN3991)
+ goto out;
+
+ tlv_resp = (struct tlv_seg_resp *)(edl->data);
+ if (tlv_resp->result) {
bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
edl->cresp, edl->rtype, tlv_resp->result);
- err = -EIO;
}
out:
@@ -301,7 +338,8 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
}
static int qca_download_firmware(struct hci_dev *hdev,
- struct rome_config *config)
+ struct qca_fw_config *config,
+ enum qca_btsoc_type soc_type)
{
const struct firmware *fw;
const u8 *segment;
@@ -328,10 +366,10 @@ static int qca_download_firmware(struct hci_dev *hdev,
remain -= segsize;
/* The last segment is always acked regardless download mode */
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
- config->dnld_mode = ROME_SKIP_EVT_NONE;
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
ret = qca_tlv_send_segment(hdev, segsize, segment,
- config->dnld_mode);
+ config->dnld_mode, soc_type);
if (ret)
goto out;
@@ -344,8 +382,8 @@ static int qca_download_firmware(struct hci_dev *hdev,
* decrease the BT in initialization time. Here we will inject a command
* complete event to avoid a command timeout error message.
*/
- if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
- config->dnld_type == ROME_SKIP_EVT_VSE)
+ if (config->dnld_type == QCA_SKIP_EVT_VSE_CC ||
+ config->dnld_type == QCA_SKIP_EVT_VSE)
ret = qca_inject_cmd_complete_event(hdev);
out:
@@ -382,7 +420,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver,
const char *firmware_name)
{
- struct rome_config config;
+ struct qca_fw_config config;
int err;
u8 rom_ver = 0;
@@ -405,7 +443,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/rampatch_%08x.bin", soc_ver);
}
- err = qca_download_firmware(hdev, &config);
+ err = qca_download_firmware(hdev, &config, soc_type);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
@@ -426,7 +464,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
- err = qca_download_firmware(hdev, &config);
+ err = qca_download_firmware(hdev, &config, soc_type);
if (err < 0) {
bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 69c5315a65fd..f5795b1a3779 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -56,24 +56,24 @@ enum qca_baudrate {
QCA_BAUDRATE_RESERVED
};
-enum rome_tlv_dnld_mode {
- ROME_SKIP_EVT_NONE,
- ROME_SKIP_EVT_VSE,
- ROME_SKIP_EVT_CC,
- ROME_SKIP_EVT_VSE_CC
+enum qca_tlv_dnld_mode {
+ QCA_SKIP_EVT_NONE,
+ QCA_SKIP_EVT_VSE,
+ QCA_SKIP_EVT_CC,
+ QCA_SKIP_EVT_VSE_CC
};
-enum rome_tlv_type {
+enum qca_tlv_type {
TLV_TYPE_PATCH = 1,
TLV_TYPE_NVM
};
-struct rome_config {
+struct qca_fw_config {
u8 type;
char fwname[64];
uint8_t user_baud_rate;
- enum rome_tlv_dnld_mode dnld_mode;
- enum rome_tlv_dnld_mode dnld_type;
+ enum qca_tlv_dnld_mode dnld_mode;
+ enum qca_tlv_dnld_mode dnld_type;
};
struct edl_event_hdr {
@@ -82,10 +82,10 @@ struct edl_event_hdr {
__u8 data[0];
} __packed;
-struct rome_version {
+struct qca_btsoc_version {
__le32 product_id;
__le16 patch_ver;
- __le16 rome_ver;
+ __le16 rom_ver;
__le32 soc_id;
} __packed;
@@ -125,6 +125,7 @@ enum qca_btsoc_type {
QCA_AR3002,
QCA_ROME,
QCA_WCN3990,
+ QCA_WCN3991,
QCA_WCN3998,
};
@@ -134,12 +135,14 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver,
const char *firmware_name);
-int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
- return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
+ soc_type == QCA_WCN3998;
}
#else
@@ -155,7 +158,8 @@ static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return -EOPNOTSUPP;
}
-static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
+static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
+ enum qca_btsoc_type)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index bf3c02be6930..f838537f9f89 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -418,7 +418,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
if (IS_ERR(skb)) {
rtl_dev_err(hdev, "download fw command failed (%ld)",
PTR_ERR(skb));
- ret = -PTR_ERR(skb);
+ ret = PTR_ERR(skb);
goto out;
}
@@ -778,7 +778,7 @@ int btrtl_get_uart_settings(struct hci_dev *hdev,
rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)",
le16_to_cpu(entry->offset), entry->len);
break;
- };
+ }
i += sizeof(*entry) + entry->len;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a9c35ebb30f8..70e385987d41 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1200,7 +1200,7 @@ static int btusb_open(struct hci_dev *hdev)
if (data->setup_on_usb) {
err = data->setup_on_usb(hdev);
if (err < 0)
- return err;
+ goto setup_fail;
}
data->intf->needs_remote_wakeup = 1;
@@ -1239,6 +1239,7 @@ done:
failed:
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+setup_fail:
usb_autopm_put_interface(data->intf);
return err;
}
@@ -2182,8 +2183,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* loaded.
*/
err = btintel_read_version(hdev, &ver);
- if (err)
+ if (err) {
+ bt_dev_err(hdev, "Intel Read version failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
return err;
+ }
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
@@ -2326,9 +2330,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Start firmware downloading and get boot parameter */
err = btintel_download_firmware(hdev, fw, &boot_param);
- if (err < 0)
+ if (err < 0) {
+ /* When FW download fails, send Intel Reset to retry
+ * FW download.
+ */
+ btintel_reset_to_bootloader(hdev);
goto done;
-
+ }
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
bt_dev_info(hdev, "Waiting for firmware download to complete");
@@ -2355,6 +2363,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (err) {
bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT;
+ btintel_reset_to_bootloader(hdev);
goto done;
}
@@ -2381,8 +2390,11 @@ done:
set_bit(BTUSB_BOOTING, &data->flags);
err = btintel_send_intel_reset(hdev, boot_param);
- if (err)
+ if (err) {
+ bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
return err;
+ }
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
@@ -2404,6 +2416,7 @@ done:
if (err) {
bt_dev_err(hdev, "Device boot timeout");
+ btintel_reset_to_bootloader(hdev);
return -ETIMEDOUT;
}
@@ -2432,6 +2445,13 @@ done:
*/
btintel_set_event_mask(hdev, false);
+ /* Read the Intel version information after loading the FW */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
return 0;
}
@@ -2489,8 +2509,6 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
return 0;
}
-#ifdef CONFIG_BT_HCIBTUSB_MTK
-
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
@@ -3051,7 +3069,6 @@ static int btusb_mtk_shutdown(struct hci_dev *hdev)
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
-#endif
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
@@ -3411,7 +3428,6 @@ static int btusb_setup_qca(struct hci_dev *hdev)
return 0;
}
-#ifdef CONFIG_BT_HCIBTUSB_BCM
static inline int __set_diag_interface(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -3498,7 +3514,6 @@ static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
return submit_or_queue_tx_urb(hdev, urb);
}
-#endif
#ifdef CONFIG_PM
static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
@@ -3724,8 +3739,8 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
-#ifdef CONFIG_BT_HCIBTUSB_BCM
- if (id->driver_info & BTUSB_BCM_PATCHRAM) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) &&
+ (id->driver_info & BTUSB_BCM_PATCHRAM)) {
hdev->manufacturer = 15;
hdev->setup = btbcm_setup_patchram;
hdev->set_diag = btusb_bcm_set_diag;
@@ -3735,7 +3750,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
}
- if (id->driver_info & BTUSB_BCM_APPLE) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) &&
+ (id->driver_info & BTUSB_BCM_APPLE)) {
hdev->manufacturer = 15;
hdev->setup = btbcm_setup_apple;
hdev->set_diag = btusb_bcm_set_diag;
@@ -3743,7 +3759,6 @@ static int btusb_probe(struct usb_interface *intf,
/* Broadcom LM_DIAG Interface numbers are hardcoded */
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
}
-#endif
if (id->driver_info & BTUSB_INTEL) {
hdev->manufacturer = 2;
@@ -3774,14 +3789,13 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
-#ifdef CONFIG_BT_HCIBTUSB_MTK
- if (id->driver_info & BTUSB_MEDIATEK) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) &&
+ (id->driver_info & BTUSB_MEDIATEK)) {
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
}
-#endif
if (id->driver_info & BTUSB_SWAVE) {
set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
@@ -3807,8 +3821,8 @@ static int btusb_probe(struct usb_interface *intf,
btusb_check_needs_reset_resume(intf);
}
-#ifdef CONFIG_BT_HCIBTUSB_RTL
- if (id->driver_info & BTUSB_REALTEK) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
+ (id->driver_info & BTUSB_REALTEK)) {
hdev->setup = btrtl_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
hdev->cmd_timeout = btusb_rtl_cmd_timeout;
@@ -3819,7 +3833,6 @@ static int btusb_probe(struct usb_interface *intf,
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
}
-#endif
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
@@ -3887,15 +3900,13 @@ static int btusb_probe(struct usb_interface *intf,
goto out_free_dev;
}
-#ifdef CONFIG_BT_HCIBTUSB_BCM
- if (data->diag) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
if (!usb_driver_claim_interface(&btusb_driver,
data->diag, data))
__set_diag_interface(hdev);
else
data->diag = NULL;
}
-#endif
if (enable_autosuspend)
usb_enable_autosuspend(data->udev);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
deleted file mode 100644
index e55f06e4270f..000000000000
--- a/drivers/bluetooth/btwilink.c
+++ /dev/null
@@ -1,337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Texas Instrument's Bluetooth Driver For Shared Transport.
- *
- * Bluetooth Driver acts as interface between HCI core and
- * TI Shared Transport Layer.
- *
- * Copyright (C) 2009-2010 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Pavan Savoy <pavan_savoy@ti.com>
- */
-
-#include <linux/platform_device.h>
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/hci.h>
-
-#include <linux/ti_wilink_st.h>
-#include <linux/module.h>
-
-/* Bluetooth Driver Version */
-#define VERSION "1.0"
-#define MAX_BT_CHNL_IDS 3
-
-/* Number of seconds to wait for registration completion
- * when ST returns PENDING status.
- */
-#define BT_REGISTER_TIMEOUT 6000 /* 6 sec */
-
-/**
- * struct ti_st - driver operation structure
- * @hdev: hci device pointer which binds to bt driver
- * @reg_status: ST registration callback status
- * @st_write: write function provided by the ST driver
- * to be used by the driver during send_frame.
- * @wait_reg_completion - completion sync between ti_st_open
- * and st_reg_completion_cb.
- */
-struct ti_st {
- struct hci_dev *hdev;
- int reg_status;
- long (*st_write) (struct sk_buff *);
- struct completion wait_reg_completion;
-};
-
-/* Increments HCI counters based on pocket ID (cmd,acl,sco) */
-static inline void ti_st_tx_complete(struct ti_st *hst, int pkt_type)
-{
- struct hci_dev *hdev = hst->hdev;
-
- /* Update HCI stat counters */
- switch (pkt_type) {
- case HCI_COMMAND_PKT:
- hdev->stat.cmd_tx++;
- break;
-
- case HCI_ACLDATA_PKT:
- hdev->stat.acl_tx++;
- break;
-
- case HCI_SCODATA_PKT:
- hdev->stat.sco_tx++;
- break;
- }
-}
-
-/* ------- Interfaces to Shared Transport ------ */
-
-/* Called by ST layer to indicate protocol registration completion
- * status.ti_st_open() function will wait for signal from this
- * API when st_register() function returns ST_PENDING.
- */
-static void st_reg_completion_cb(void *priv_data, int data)
-{
- struct ti_st *lhst = priv_data;
-
- /* Save registration status for use in ti_st_open() */
- lhst->reg_status = data;
- /* complete the wait in ti_st_open() */
- complete(&lhst->wait_reg_completion);
-}
-
-/* Called by Shared Transport layer when receive data is available */
-static long st_receive(void *priv_data, struct sk_buff *skb)
-{
- struct ti_st *lhst = priv_data;
- int err;
-
- if (!skb)
- return -EFAULT;
-
- if (!lhst) {
- kfree_skb(skb);
- return -EFAULT;
- }
-
- /* Forward skb to HCI core layer */
- err = hci_recv_frame(lhst->hdev, skb);
- if (err < 0) {
- BT_ERR("Unable to push skb to HCI core(%d)", err);
- return err;
- }
-
- lhst->hdev->stat.byte_rx += skb->len;
-
- return 0;
-}
-
-/* ------- Interfaces to HCI layer ------ */
-/* protocol structure registered with shared transport */
-static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
- {
- .chnl_id = HCI_EVENT_PKT, /* HCI Events */
- .hdr_len = sizeof(struct hci_event_hdr),
- .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
- .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
- .reserve = 8,
- },
- {
- .chnl_id = HCI_ACLDATA_PKT, /* ACL */
- .hdr_len = sizeof(struct hci_acl_hdr),
- .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
- .len_size = 2, /* sizeof(dlen) in struct hci_acl_hdr */
- .reserve = 8,
- },
- {
- .chnl_id = HCI_SCODATA_PKT, /* SCO */
- .hdr_len = sizeof(struct hci_sco_hdr),
- .offset_len_in_hdr = offsetof(struct hci_sco_hdr, dlen),
- .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
- .reserve = 8,
- },
-};
-
-/* Called from HCI core to initialize the device */
-static int ti_st_open(struct hci_dev *hdev)
-{
- unsigned long timeleft;
- struct ti_st *hst;
- int err, i;
-
- BT_DBG("%s %p", hdev->name, hdev);
-
- /* provide contexts for callbacks from ST */
- hst = hci_get_drvdata(hdev);
-
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
- ti_st_proto[i].priv_data = hst;
- ti_st_proto[i].max_frame_size = HCI_MAX_FRAME_SIZE;
- ti_st_proto[i].recv = st_receive;
- ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;
-
- /* Prepare wait-for-completion handler */
- init_completion(&hst->wait_reg_completion);
- /* Reset ST registration callback status flag,
- * this value will be updated in
- * st_reg_completion_cb()
- * function whenever it called from ST driver.
- */
- hst->reg_status = -EINPROGRESS;
-
- err = st_register(&ti_st_proto[i]);
- if (!err)
- goto done;
-
- if (err != -EINPROGRESS) {
- BT_ERR("st_register failed %d", err);
- return err;
- }
-
- /* ST is busy with either protocol
- * registration or firmware download.
- */
- BT_DBG("waiting for registration "
- "completion signal from ST");
- timeleft = wait_for_completion_timeout
- (&hst->wait_reg_completion,
- msecs_to_jiffies(BT_REGISTER_TIMEOUT));
- if (!timeleft) {
- BT_ERR("Timeout(%d sec),didn't get reg "
- "completion signal from ST",
- BT_REGISTER_TIMEOUT / 1000);
- return -ETIMEDOUT;
- }
-
- /* Is ST registration callback
- * called with ERROR status?
- */
- if (hst->reg_status != 0) {
- BT_ERR("ST registration completed with invalid "
- "status %d", hst->reg_status);
- return -EAGAIN;
- }
-
-done:
- hst->st_write = ti_st_proto[i].write;
- if (!hst->st_write) {
- BT_ERR("undefined ST write function");
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
- /* Undo registration with ST */
- err = st_unregister(&ti_st_proto[i]);
- if (err)
- BT_ERR("st_unregister() failed with "
- "error %d", err);
- hst->st_write = NULL;
- }
- return -EIO;
- }
- }
- return 0;
-}
-
-/* Close device */
-static int ti_st_close(struct hci_dev *hdev)
-{
- int err, i;
- struct ti_st *hst = hci_get_drvdata(hdev);
-
- for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
- err = st_unregister(&ti_st_proto[i]);
- if (err)
- BT_ERR("st_unregister(%d) failed with error %d",
- ti_st_proto[i].chnl_id, err);
- }
-
- hst->st_write = NULL;
-
- return err;
-}
-
-static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct ti_st *hst;
- long len;
- int pkt_type;
-
- hst = hci_get_drvdata(hdev);
-
- /* Prepend skb with frame type */
- memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
-
- BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
- skb->len);
-
- /* Insert skb to shared transport layer's transmit queue.
- * Freeing skb memory is taken care in shared transport layer,
- * so don't free skb memory here.
- */
- pkt_type = hci_skb_pkt_type(skb);
- len = hst->st_write(skb);
- if (len < 0) {
- BT_ERR("ST write failed (%ld)", len);
- /* Try Again, would only fail if UART has gone bad */
- return -EAGAIN;
- }
-
- /* ST accepted our skb. So, Go ahead and do rest */
- hdev->stat.byte_tx += len;
- ti_st_tx_complete(hst, pkt_type);
-
- return 0;
-}
-
-static int bt_ti_probe(struct platform_device *pdev)
-{
- struct ti_st *hst;
- struct hci_dev *hdev;
- int err;
-
- hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
- if (!hst)
- return -ENOMEM;
-
- /* Expose "hciX" device to user space */
- hdev = hci_alloc_dev();
- if (!hdev)
- return -ENOMEM;
-
- BT_DBG("hdev %p", hdev);
-
- hst->hdev = hdev;
- hdev->bus = HCI_UART;
- hci_set_drvdata(hdev, hst);
- hdev->open = ti_st_open;
- hdev->close = ti_st_close;
- hdev->flush = NULL;
- hdev->send = ti_st_send_frame;
-
- err = hci_register_dev(hdev);
- if (err < 0) {
- BT_ERR("Can't register HCI device error %d", err);
- hci_free_dev(hdev);
- return err;
- }
-
- BT_DBG("HCI device registered (hdev %p)", hdev);
-
- dev_set_drvdata(&pdev->dev, hst);
- return 0;
-}
-
-static int bt_ti_remove(struct platform_device *pdev)
-{
- struct hci_dev *hdev;
- struct ti_st *hst = dev_get_drvdata(&pdev->dev);
-
- if (!hst)
- return -EFAULT;
-
- BT_DBG("%s", hst->hdev->name);
-
- hdev = hst->hdev;
- ti_st_close(hdev);
- hci_unregister_dev(hdev);
-
- hci_free_dev(hdev);
-
- dev_set_drvdata(&pdev->dev, NULL);
- return 0;
-}
-
-static struct platform_driver btwilink_driver = {
- .probe = bt_ti_probe,
- .remove = bt_ti_remove,
- .driver = {
- .name = "btwilink",
- },
-};
-
-module_platform_driver(btwilink_driver);
-
-/* ------ Module Info ------ */
-
-MODULE_AUTHOR("Raja Mani <raja_mani@ti.com>");
-MODULE_DESCRIPTION("Bluetooth Driver for TI Shared Transport" VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 7646636f2d18..d2a6a4afdbbb 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -445,9 +445,11 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
+ hci_uart_set_flow_control(hu, true);
hu->init_speed = bcm->dev->init_speed;
hu->oper_speed = bcm->dev->oper_speed;
err = bcm_gpio_set_power(bcm->dev, true);
+ hci_uart_set_flow_control(hu, false);
if (err)
goto err_unset_hu;
}
@@ -1422,6 +1424,8 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt" },
+ { .compatible = "brcm,bcm43540-bt" },
+ { .compatible = "brcm,bcm4335a0" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index fe2e307009f4..cf4a56095817 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0;
} else
@@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
@@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 285706618f8a..d9a4c6c691e0 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu)
serdev_device_set_flow_control(serdev, true);
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
- else
- speed = 0;
-
do {
/* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
@@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu)
return err;
}
- if (speed) {
- __le32 speed_le = cpu_to_le32(speed);
- struct sk_buff *skb;
-
- skb = __hci_cmd_sync(hu->hdev,
- HCI_VS_UPDATE_UART_HCI_BAUDRATE,
- sizeof(speed_le), &speed_le,
- HCI_INIT_TIMEOUT);
- if (!IS_ERR(skb)) {
- kfree_skb(skb);
- serdev_device_set_baudrate(serdev, speed);
- }
- }
-
err = download_firmware(lldev);
if (!err)
break;
@@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu)
}
/* Operational speed if any */
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ else
+ speed = 0;
+
+ if (speed) {
+ __le32 speed_le = cpu_to_le32(speed);
+ struct sk_buff *skb;
+ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb)) {
+ kfree_skb(skb);
+ serdev_device_set_baudrate(serdev, speed);
+ }
+ }
return 0;
}
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 6463350b7977..05f7f6de6863 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -520,7 +520,7 @@ static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
err = skb_pad(skb, 1);
if (err)
return err;
- skb_put_u8(skb, 0x00);
+ skb_put(skb, 1);
}
skb_queue_tail(&btdev->txq, skb);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e3164c200eac..f10bdf8e1fc5 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -43,7 +43,8 @@
#define HCI_MAX_IBS_SIZE 10
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
-#define IBS_TX_IDLE_TIMEOUT_MS 2000
+#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
+#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
/* susclk rate */
@@ -55,6 +56,7 @@
enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
+ QCA_SUSPENDING,
};
/* HCI_IBS transmit side sleep protocol states */
@@ -100,6 +102,7 @@ struct qca_data {
struct work_struct ws_tx_vote_off;
unsigned long flags;
struct completion drop_ev_comp;
+ wait_queue_head_t suspend_wait_q;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -130,8 +133,6 @@ enum qca_speed_type {
*/
struct qca_vreg {
const char *name;
- unsigned int min_uV;
- unsigned int max_uV;
unsigned int load_uA;
};
@@ -146,8 +147,8 @@ struct qca_vreg_data {
*/
struct qca_power {
struct device *dev;
- const struct qca_vreg_data *vreg_data;
struct regulator_bulk_data *vreg_bulk;
+ int num_vregs;
bool vregs_on;
};
@@ -162,7 +163,8 @@ struct qca_serdev {
const char *firmware_name;
};
-static int qca_power_setup(struct hci_uart *hu, bool on);
+static int qca_regulator_enable(struct qca_serdev *qcadev);
+static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
@@ -438,6 +440,12 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
+ /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
/* No WAKE_ACK, retransmit WAKE */
@@ -497,6 +505,8 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+ init_waitqueue_head(&qca->suspend_wait_q);
+
qca->hu = hu;
init_completion(&qca->drop_ev_comp);
@@ -518,7 +528,7 @@ static int qca_open(struct hci_uart *hu)
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
- ret = qca_power_setup(hu, true);
+ ret = qca_regulator_enable(qcadev);
if (ret) {
destroy_workqueue(qca->workqueue);
kfree_skb(qca->rx_skb);
@@ -533,7 +543,7 @@ static int qca_open(struct hci_uart *hu)
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+ qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -648,6 +658,12 @@ static void device_want_to_wakeup(struct hci_uart *hu)
qca->ibs_recv_wakes++;
+ /* Don't wake the rx up when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->rx_ibs_state) {
case HCI_IBS_RX_ASLEEP:
/* Make sure clock is on - we may have turned clock off since
@@ -712,6 +728,8 @@ static void device_want_to_sleep(struct hci_uart *hu)
break;
}
+ wake_up_interruptible(&qca->suspend_wait_q);
+
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
}
@@ -729,6 +747,12 @@ static void device_woke_up(struct hci_uart *hu)
qca->ibs_recv_wacks++;
+ /* Don't react to the wake-up-acknowledgment when suspending. */
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+ }
+
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_AWAKE:
/* Expect one if we send 2 WAKEs */
@@ -781,8 +805,10 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
+ * Don't wake the device up when suspending.
*/
- if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) {
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
+ test_bit(QCA_SUSPENDING, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
@@ -1188,7 +1214,7 @@ static int qca_wcn3990_init(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
- ret = qca_power_setup(hu, true);
+ ret = qca_regulator_enable(qcadev);
if (ret)
return ret;
@@ -1262,7 +1288,7 @@ static int qca_setup(struct hci_uart *hu)
if (ret)
return ret;
- ret = qca_read_soc_version(hdev, &soc_ver);
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
} else {
@@ -1282,7 +1308,7 @@ static int qca_setup(struct hci_uart *hu)
if (!qca_is_wcn399x(soc_type)) {
/* Get QCA version information */
- ret = qca_read_soc_version(hdev, &soc_ver);
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
}
@@ -1332,10 +1358,21 @@ static const struct hci_uart_proto qca_proto = {
static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
- { "vddio", 1800000, 1900000, 15000 },
- { "vddxo", 1800000, 1900000, 80000 },
- { "vddrf", 1300000, 1350000, 300000 },
- { "vddch0", 3300000, 3400000, 450000 },
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
+ },
+ .num_vregs = 4,
+};
+
+static const struct qca_vreg_data qca_soc_data_wcn3991 = {
+ .soc_type = QCA_WCN3991,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
},
.num_vregs = 4,
};
@@ -1343,19 +1380,22 @@ static const struct qca_vreg_data qca_soc_data_wcn3990 = {
static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
- { "vddio", 1800000, 1900000, 10000 },
- { "vddxo", 1800000, 1900000, 80000 },
- { "vddrf", 1300000, 1352000, 300000 },
- { "vddch0", 3300000, 3300000, 450000 },
+ { "vddio", 10000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
},
.num_vregs = 4,
};
static void qca_power_shutdown(struct hci_uart *hu)
{
+ struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
* data in skb's.
@@ -1367,7 +1407,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
- qca_power_setup(hu, false);
+ qca_regulator_disable(qcadev);
}
static int qca_power_off(struct hci_dev *hdev)
@@ -1383,97 +1423,71 @@ static int qca_power_off(struct hci_dev *hdev)
return 0;
}
-static int qca_enable_regulator(struct qca_vreg vregs,
- struct regulator *regulator)
+static int qca_regulator_enable(struct qca_serdev *qcadev)
{
+ struct qca_power *power = qcadev->bt_power;
int ret;
- ret = regulator_set_voltage(regulator, vregs.min_uV,
- vregs.max_uV);
- if (ret)
- return ret;
+ /* Already enabled */
+ if (power->vregs_on)
+ return 0;
- if (vregs.load_uA)
- ret = regulator_set_load(regulator,
- vregs.load_uA);
+ BT_DBG("enabling %d regulators)", power->num_vregs);
+ ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
if (ret)
return ret;
- return regulator_enable(regulator);
-
-}
-
-static void qca_disable_regulator(struct qca_vreg vregs,
- struct regulator *regulator)
-{
- regulator_disable(regulator);
- regulator_set_voltage(regulator, 0, vregs.max_uV);
- if (vregs.load_uA)
- regulator_set_load(regulator, 0);
+ power->vregs_on = true;
+ return 0;
}
-static int qca_power_setup(struct hci_uart *hu, bool on)
+static void qca_regulator_disable(struct qca_serdev *qcadev)
{
- struct qca_vreg *vregs;
- struct regulator_bulk_data *vreg_bulk;
- struct qca_serdev *qcadev;
- int i, num_vregs, ret = 0;
+ struct qca_power *power;
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
- !qcadev->bt_power->vreg_bulk)
- return -EINVAL;
-
- vregs = qcadev->bt_power->vreg_data->vregs;
- vreg_bulk = qcadev->bt_power->vreg_bulk;
- num_vregs = qcadev->bt_power->vreg_data->num_vregs;
- BT_DBG("on: %d", on);
- if (on && !qcadev->bt_power->vregs_on) {
- for (i = 0; i < num_vregs; i++) {
- ret = qca_enable_regulator(vregs[i],
- vreg_bulk[i].consumer);
- if (ret)
- break;
- }
+ if (!qcadev)
+ return;
- if (ret) {
- BT_ERR("failed to enable regulator:%s", vregs[i].name);
- /* turn off regulators which are enabled */
- for (i = i - 1; i >= 0; i--)
- qca_disable_regulator(vregs[i],
- vreg_bulk[i].consumer);
- } else {
- qcadev->bt_power->vregs_on = true;
- }
- } else if (!on && qcadev->bt_power->vregs_on) {
- /* turn off regulator in reverse order */
- i = qcadev->bt_power->vreg_data->num_vregs - 1;
- for ( ; i >= 0; i--)
- qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
+ power = qcadev->bt_power;
- qcadev->bt_power->vregs_on = false;
- }
+ /* Already disabled? */
+ if (!power->vregs_on)
+ return;
- return ret;
+ regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
+ power->vregs_on = false;
}
static int qca_init_regulators(struct qca_power *qca,
const struct qca_vreg *vregs, size_t num_vregs)
{
+ struct regulator_bulk_data *bulk;
+ int ret;
int i;
- qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
- sizeof(struct regulator_bulk_data),
- GFP_KERNEL);
- if (!qca->vreg_bulk)
+ bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
+ if (!bulk)
return -ENOMEM;
for (i = 0; i < num_vregs; i++)
- qca->vreg_bulk[i].supply = vregs[i].name;
+ bulk[i].supply = vregs[i].name;
+
+ ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_vregs; i++) {
+ ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
+ if (ret)
+ return ret;
+ }
- return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
+ qca->vreg_bulk = bulk;
+ qca->num_vregs = num_vregs;
+
+ return 0;
}
static int qca_serdev_probe(struct serdev_device *serdev)
@@ -1500,7 +1514,6 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->bt_power->dev = &serdev->dev;
- qcadev->bt_power->vreg_data = data;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
if (err) {
@@ -1564,9 +1577,103 @@ static void qca_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&qcadev->serdev_hu);
}
+static int __maybe_unused qca_suspend(struct device *dev)
+{
+ struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ unsigned long flags;
+ int ret = 0;
+ u8 cmd;
+
+ set_bit(QCA_SUSPENDING, &qca->flags);
+
+ /* Device is downloading patch or doesn't support in-band sleep. */
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
+ return 0;
+
+ cancel_work_sync(&qca->ws_awake_device);
+ cancel_work_sync(&qca->ws_awake_rx);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_WAKING:
+ del_timer(&qca->wake_retrans_timer);
+ /* Fall through */
+ case HCI_IBS_TX_AWAKE:
+ del_timer(&qca->tx_idle_timer);
+
+ serdev_device_write_flush(hu->serdev);
+ cmd = HCI_IBS_SLEEP_IND;
+ ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
+
+ if (ret < 0) {
+ BT_ERR("Failed to send SLEEP to device");
+ break;
+ }
+
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->ibs_sent_slps++;
+
+ qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ break;
+
+ default:
+ BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ if (ret < 0)
+ goto error;
+
+ serdev_device_wait_until_sent(hu->serdev,
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+
+ /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
+ * to sleep, so that the packet does not wake the system later.
+ */
+
+ ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
+ qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
+ msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
+
+ if (ret > 0)
+ return 0;
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+error:
+ clear_bit(QCA_SUSPENDING, &qca->flags);
+
+ return ret;
+}
+
+static int __maybe_unused qca_resume(struct device *dev)
+{
+ struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ clear_bit(QCA_SUSPENDING, &qca->flags);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
@@ -1578,6 +1685,7 @@ static struct serdev_device_driver qca_serdev_driver = {
.driver = {
.name = "hci_uart_qca",
.of_match_table = qca_bluetooth_of_match,
+ .pm = &qca_pm_ops,
},
};
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52c7e15143d6..c8b1c3842c1a 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -104,10 +104,8 @@ static int __fsl_mc_device_match(struct device *dev, void *data)
return fsl_mc_device_match(mc_dev, obj_desc);
}
-static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
- *obj_desc,
- struct fsl_mc_device
- *mc_bus_dev)
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev)
{
struct device *dev;
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 0fe3f52ae0de..602f030d84eb 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -554,3 +554,56 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return -ENOTCONN;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5c9bf2e06552..a07cc19becdb 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -166,42 +166,52 @@ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
static struct device_type *fsl_mc_get_device_type(const char *type)
{
@@ -702,6 +712,39 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev, *endpoint;
+ struct fsl_mc_obj_desc endpoint_desc = { 0 };
+ struct dprc_endpoint endpoint1 = { 0 };
+ struct dprc_endpoint endpoint2 = { 0 };
+ int state, err;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ strcpy(endpoint1.type, mc_dev->obj_desc.type);
+ endpoint1.id = mc_dev->obj_desc.id;
+
+ err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+ mc_bus_dev->mc_handle,
+ &endpoint1, &endpoint2,
+ &state);
+
+ if (err == -ENOTCONN || state == -1)
+ return ERR_PTR(-ENOTCONN);
+
+ if (err < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ strcpy(endpoint_desc.type, endpoint2.type);
+ endpoint_desc.id = endpoint2.id;
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+ return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
static int parse_mc_ranges(struct device *dev,
int *paddr_cells,
int *mc_addr_cells,
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 020fcc04ec8b..21ca8c756ee7 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -105,6 +105,8 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
struct dprc_cmd_open {
__le32 container_id;
};
@@ -228,6 +230,22 @@ struct dprc_cmd_set_obj_irq {
u8 obj_type[16];
};
+struct dprc_cmd_get_connection {
+ __le32 ep1_id;
+ __le16 ep1_interface_id;
+ u8 pad[2];
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ __le64 pad[3];
+ __le32 ep2_id;
+ __le16 ep2_interface_id;
+ __le16 pad1;
+ u8 ep2_type[16];
+ __le32 state;
+};
+
/*
* DPRC API for managing and querying DPAA resources
*/
@@ -392,6 +410,27 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int *container_id);
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
/*
* Data Path Buffer Pool (DPBP) API
*/
@@ -574,4 +613,7 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev);
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 6626c84f66d1..5b21dc421c94 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -742,7 +742,7 @@ static int probe_gdrom(struct platform_device *devptr)
int err;
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
- pr_warning("ATA Probe for GDROM failed\n");
+ pr_warn("ATA Probe for GDROM failed\n");
return -ENODEV;
}
/* Print out firmware ID */
@@ -814,7 +814,7 @@ probe_fail_no_disk:
probe_fail_no_mem:
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
gdrom_major = 0;
- pr_warning("Probe failed - error is 0x%X\n", err);
+ pr_warn("Probe failed - error is 0x%X\n", err);
return err;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index df0fc997dc3e..26956c006987 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -439,8 +439,8 @@ config RAW_DRIVER
Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
See the raw(8) manpage for more details.
- Applications should preferably open the device (eg /dev/hda1)
- with the O_DIRECT flag.
+ Applications should preferably open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
@@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool. \ No newline at end of file
+ only mixes the entropy pool.
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 812d6aa6e013..bc54235a7022 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -63,7 +63,7 @@ config AGP_AMD64
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
- K8T400M, SiS755. It may also support other AGP bridges when loaded
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
config AGP_INTEL
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 59f25286befe..8486c29d8324 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -308,6 +308,19 @@ config HW_RANDOM_HISI
If unsure, say Y.
+config HW_RANDOM_HISI_V2
+ tristate "HiSilicon True Random Number Generator V2 support"
+ depends on HW_RANDOM && ARM64 && ACPI
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator V2 hardware found on HiSilicon Hi1620 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi-trng-v2.
+
+ If unsure, say Y.
+
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
@@ -373,17 +386,17 @@ config HW_RANDOM_MESON
If unsure, say Y.
config HW_RANDOM_CAVIUM
- tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Cavium SoCs.
+ tristate "Cavium ThunderX Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Cavium SoCs.
- To compile this driver as a module, choose M here: the
- module will be called cavium_rng.
+ To compile this driver as a module, choose M here: the
+ module will be called cavium_rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
@@ -440,6 +453,19 @@ config HW_RANDOM_OPTEE
If unsure, say Y.
+config HW_RANDOM_NPCM
+ tristate "NPCM Random Number Generator support"
+ depends on ARCH_NPCM || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides support for the Random Number
+ Generator hardware available in Nuvoton NPCM SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called npcm-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
@@ -458,7 +484,7 @@ config UML_RANDOM
/dev/hwrng and injects the entropy into /dev/random.
config HW_RANDOM_KEYSTONE
- depends on ARCH_KEYSTONE
+ depends on ARCH_KEYSTONE || COMPILE_TEST
default HW_RANDOM
tristate "TI Keystone NETCP SA Hardware random number generator"
help
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 7c9ef4a7667f..a7801b49ce6c 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI_V2) += hisi-trng-v2.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
@@ -39,3 +40,4 @@ obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
+obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index e55705745d5e..ecb71c4317a5 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -14,14 +14,22 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/hw_random.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#define TRNG_CR 0x00
+#define TRNG_MR 0x04
#define TRNG_ISR 0x1c
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
+#define TRNG_HALFR BIT(0) /* generate RN every 168 cycles */
+
+struct atmel_trng_data {
+ bool has_half_rate;
+};
+
struct atmel_trng {
struct clk *clk;
void __iomem *base;
@@ -62,21 +70,31 @@ static void atmel_trng_disable(struct atmel_trng *trng)
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
- struct resource *res;
+ const struct atmel_trng_data *data;
int ret;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->base = devm_ioremap_resource(&pdev->dev, res);
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->base))
return PTR_ERR(trng->base);
trng->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(trng->clk))
return PTR_ERR(trng->clk);
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ if (data->has_half_rate) {
+ unsigned long rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
ret = clk_prepare_enable(trng->clk);
if (ret)
@@ -141,9 +159,24 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
};
#endif /* CONFIG_PM */
+static const struct atmel_trng_data at91sam9g45_config = {
+ .has_half_rate = false,
+};
+
+static const struct atmel_trng_data sam9x60_config = {
+ .has_half_rate = true,
+};
+
static const struct of_device_id atmel_trng_dt_ids[] = {
- { .compatible = "atmel,at91sam9g45-trng" },
- { /* sentinel */ }
+ {
+ .compatible = "atmel,at91sam9g45-trng",
+ .data = &at91sam9g45_config,
+ }, {
+ .compatible = "microchip,sam9x60-trng",
+ .data = &sam9x60_config,
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index f759790c3cdb..d2a5791eb49f 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -142,7 +142,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
- struct resource *r;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -151,10 +150,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
/* map peripheral */
- priv->base = devm_ioremap_resource(dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 80b850ef1bf6..d2d7a42d7e0d 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/kernel.h>
@@ -112,6 +111,14 @@ static void drop_current_rng(void)
}
/* Returns ERR_PTR(), NULL or refcounted hwrng */
+static struct hwrng *get_current_rng_nolock(void)
+{
+ if (current_rng)
+ kref_get(&current_rng->ref);
+
+ return current_rng;
+}
+
static struct hwrng *get_current_rng(void)
{
struct hwrng *rng;
@@ -119,9 +126,7 @@ static struct hwrng *get_current_rng(void)
if (mutex_lock_interruptible(&rng_mutex))
return ERR_PTR(-ERESTARTSYS);
- rng = current_rng;
- if (rng)
- kref_get(&rng->ref);
+ rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
return rng;
@@ -156,8 +161,6 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- add_early_randomness(rng);
-
current_quality = rng->quality ? : default_quality;
if (current_quality > 1024)
current_quality = 1024;
@@ -321,12 +324,13 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
const char *buf, size_t len)
{
int err = -ENODEV;
- struct hwrng *rng;
+ struct hwrng *rng, *old_rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
+ old_rng = current_rng;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
} else {
@@ -338,9 +342,15 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
}
}
}
-
+ new_rng = get_current_rng_nolock();
mutex_unlock(&rng_mutex);
+ if (new_rng) {
+ if (new_rng != old_rng)
+ add_early_randomness(new_rng);
+ put_rng(new_rng);
+ }
+
return err ? : len;
}
@@ -422,9 +432,7 @@ static int hwrng_fillfn(void *unused)
{
long rc;
- set_freezable();
-
- while (!kthread_freezable_should_stop(NULL)) {
+ while (!kthread_should_stop()) {
struct hwrng *rng;
rng = get_current_rng();
@@ -460,13 +468,15 @@ static void start_khwrngd(void)
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
- struct hwrng *old_rng, *tmp;
+ struct hwrng *tmp;
struct list_head *rng_list_ptr;
+ bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
mutex_lock(&rng_mutex);
+
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {
@@ -485,10 +495,8 @@ int hwrng_register(struct hwrng *rng)
}
list_add_tail(&rng->list, rng_list_ptr);
- old_rng = current_rng;
- err = 0;
- if (!old_rng ||
- (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+ if (!current_rng ||
+ (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
@@ -497,19 +505,26 @@ int hwrng_register(struct hwrng *rng)
err = set_current_rng(rng);
if (err)
goto out_unlock;
+ /* to use current_rng in add_early_randomness() we need
+ * to take a ref
+ */
+ is_new_current = true;
+ kref_get(&rng->ref);
}
-
- if (old_rng && !rng->init) {
+ mutex_unlock(&rng_mutex);
+ if (is_new_current || !rng->init) {
/*
* Use a new device's input to add some randomness to
* the system. If this rng device isn't going to be
* used right away, its init function hasn't been
- * called yet; so only use the randomness from devices
- * that don't need an init callback.
+ * called yet by set_current_rng(); so only use the
+ * randomness from devices that don't need an init callback
*/
add_early_randomness(rng);
}
-
+ if (is_new_current)
+ put_rng(rng);
+ return 0;
out_unlock:
mutex_unlock(&rng_mutex);
out:
@@ -519,10 +534,12 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
+ struct hwrng *old_rng, *new_rng;
int err;
mutex_lock(&rng_mutex);
+ old_rng = current_rng;
list_del(&rng->list);
if (current_rng == rng) {
err = enable_best_rng();
@@ -532,6 +549,7 @@ void hwrng_unregister(struct hwrng *rng)
}
}
+ new_rng = get_current_rng_nolock();
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
if (hwrng_fill)
@@ -539,6 +557,12 @@ void hwrng_unregister(struct hwrng *rng)
} else
mutex_unlock(&rng_mutex);
+ if (new_rng) {
+ if (old_rng != new_rng)
+ add_early_randomness(new_rng);
+ put_rng(new_rng);
+ }
+
wait_for_completion(&rng->cleanup_done);
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index b4b52ab23b6b..8e1fe3f8dd2d 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -109,7 +109,6 @@ static int exynos_trng_init(struct hwrng *rng)
static int exynos_trng_probe(struct platform_device *pdev)
{
struct exynos_trng_dev *trng;
- struct resource *res;
int ret = -ENOMEM;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
@@ -128,8 +127,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, trng);
trng->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
index c663d5dd85bb..6815e17a9834 100644
--- a/drivers/char/hw_random/hisi-rng.c
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -73,7 +73,6 @@ static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
static int hisi_rng_probe(struct platform_device *pdev)
{
struct hisi_rng *rng;
- struct resource *res;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
@@ -82,8 +81,7 @@ static int hisi_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rng);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng->base = devm_ioremap_resource(&pdev->dev, res);
+ rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
diff --git a/drivers/char/hw_random/hisi-trng-v2.c b/drivers/char/hw_random/hisi-trng-v2.c
new file mode 100644
index 000000000000..6a65b8232ce0
--- /dev/null
+++ b/drivers/char/hw_random/hisi-trng-v2.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define HISI_TRNG_REG 0x00F0
+#define HISI_TRNG_BYTES 4
+#define HISI_TRNG_QUALITY 512
+#define SLEEP_US 10
+#define TIMEOUT_US 10000
+
+struct hisi_trng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct hisi_trng *trng;
+ int currsize = 0;
+ u32 val = 0;
+ u32 ret;
+
+ trng = container_of(rng, struct hisi_trng, rng);
+
+ do {
+ ret = readl_poll_timeout(trng->base + HISI_TRNG_REG, val,
+ val, SLEEP_US, TIMEOUT_US);
+ if (ret)
+ return currsize;
+
+ if (max - currsize >= HISI_TRNG_BYTES) {
+ memcpy(buf + currsize, &val, HISI_TRNG_BYTES);
+ currsize += HISI_TRNG_BYTES;
+ if (currsize == max)
+ return currsize;
+ continue;
+ }
+
+ /* copy remaining bytes */
+ memcpy(buf + currsize, &val, max - currsize);
+ currsize = max;
+ } while (currsize < max);
+
+ return currsize;
+}
+
+static int hisi_trng_probe(struct platform_device *pdev)
+{
+ struct hisi_trng *trng;
+ int ret;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ trng->rng.name = pdev->name;
+ trng->rng.read = hisi_trng_read;
+ trng->rng.quality = HISI_TRNG_QUALITY;
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register hwrng!\n");
+
+ return ret;
+}
+
+static const struct acpi_device_id hisi_trng_acpi_match[] = {
+ { "HISI02B3", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
+
+static struct platform_driver hisi_trng_driver = {
+ .probe = hisi_trng_probe,
+ .driver = {
+ .name = "hisi-trng-v2",
+ .acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
+ },
+};
+
+module_platform_driver(hisi_trng_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Weili Qian <qianweili@huawei.com>");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon true random number generator V2 driver");
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 92be1c0ab99f..899ff25f4f28 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -181,7 +181,6 @@ static void iproc_rng200_cleanup(struct hwrng *rng)
static int iproc_rng200_probe(struct platform_device *pdev)
{
struct iproc_rng200_dev *priv;
- struct resource *res;
struct device *dev = &pdev->dev;
int ret;
@@ -190,13 +189,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return -ENOMEM;
/* Map peripheral */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get rng resources\n");
- return -EINVAL;
- }
-
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(dev, "failed to remap rng regs\n");
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index a67430010aa6..e2330e757f1f 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/delay.h>
+#include <linux/timekeeping.h>
#define SA_CMD_STATUS_OFS 0x8
@@ -84,14 +85,37 @@ struct ks_sa_rng {
struct hwrng rng;
struct clk *clk;
struct regmap *regmap_cfg;
- struct trng_regs *reg_rng;
+ struct trng_regs __iomem *reg_rng;
+ u64 ready_ts;
+ unsigned int refill_delay_ns;
};
+static unsigned int cycles_to_ns(unsigned long clk_rate, unsigned int cycles)
+{
+ return DIV_ROUND_UP_ULL((TRNG_DEF_CLK_DIV_CYCLES + 1) * 1000000000ull *
+ cycles, clk_rate);
+}
+
+static unsigned int startup_delay_ns(unsigned long clk_rate)
+{
+ if (!TRNG_DEF_STARTUP_CYCLES)
+ return cycles_to_ns(clk_rate, BIT(24));
+ return cycles_to_ns(clk_rate, 256 * TRNG_DEF_STARTUP_CYCLES);
+}
+
+static unsigned int refill_delay_ns(unsigned long clk_rate)
+{
+ if (!TRNG_DEF_MAX_REFILL_CYCLES)
+ return cycles_to_ns(clk_rate, BIT(24));
+ return cycles_to_ns(clk_rate, 256 * TRNG_DEF_MAX_REFILL_CYCLES);
+}
+
static int ks_sa_rng_init(struct hwrng *rng)
{
u32 value;
struct device *dev = (struct device *)rng->priv;
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk);
/* Enable RNG module */
regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
@@ -120,6 +144,10 @@ static int ks_sa_rng_init(struct hwrng *rng)
value |= TRNG_CNTL_REG_TRNG_ENABLE;
writel(value, &ks_sa_rng->reg_rng->control);
+ ks_sa_rng->refill_delay_ns = refill_delay_ns(clk_rate);
+ ks_sa_rng->ready_ts = ktime_get_ns() +
+ startup_delay_ns(clk_rate);
+
return 0;
}
@@ -144,6 +172,7 @@ static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
data[1] = readl(&ks_sa_rng->reg_rng->output_h);
writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
+ ks_sa_rng->ready_ts = ktime_get_ns() + ks_sa_rng->refill_delay_ns;
return sizeof(u32) * 2;
}
@@ -152,10 +181,19 @@ static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
{
struct device *dev = (struct device *)rng->priv;
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ u64 now = ktime_get_ns();
u32 ready;
int j;
+ if (wait && now < ks_sa_rng->ready_ts) {
+ /* Max delay expected here is 81920000 ns */
+ unsigned long min_delay =
+ DIV_ROUND_UP((u32)(ks_sa_rng->ready_ts - now), 1000);
+
+ usleep_range(min_delay, min_delay + SA_RNG_DATA_RETRY_DELAY);
+ }
+
for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
ready = readl(&ks_sa_rng->reg_rng->status);
ready &= TRNG_STATUS_REG_READY;
@@ -174,7 +212,6 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
struct ks_sa_rng *ks_sa_rng;
struct device *dev = &pdev->dev;
int ret;
- struct resource *mem;
ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
if (!ks_sa_rng)
@@ -190,8 +227,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
};
ks_sa_rng->rng.priv = (unsigned long)dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
+ ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks_sa_rng->reg_rng))
return PTR_ERR(ks_sa_rng->reg_rng);
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 76e693da5dde..e446236e81f2 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -42,7 +42,6 @@ static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
- struct resource *res;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -51,8 +50,7 @@ static int meson_rng_probe(struct platform_device *pdev)
data->pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index e649be5a5f13..8ad7b515a51b 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -105,16 +105,9 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
static int mtk_rng_probe(struct platform_device *pdev)
{
- struct resource *res;
int ret;
struct mtk_rng *priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -135,7 +128,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
return ret;
}
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
new file mode 100644
index 000000000000..01d04404d8c0
--- /dev/null
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+
+#define NPCM_RNGCS_REG 0x00 /* Control and status register */
+#define NPCM_RNGD_REG 0x04 /* Data register */
+#define NPCM_RNGMODE_REG 0x08 /* Mode register */
+
+#define NPCM_RNG_CLK_SET_25MHZ GENMASK(4, 3) /* 20-25 MHz */
+#define NPCM_RNG_DATA_VALID BIT(1)
+#define NPCM_RNG_ENABLE BIT(0)
+#define NPCM_RNG_M1ROSEL BIT(1)
+
+#define NPCM_RNG_TIMEOUT_USEC 20000
+#define NPCM_RNG_POLL_USEC 1000
+
+#define to_npcm_rng(p) container_of(p, struct npcm_rng, rng)
+
+struct npcm_rng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int npcm_rng_init(struct hwrng *rng)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+
+ writel(NPCM_RNG_CLK_SET_25MHZ | NPCM_RNG_ENABLE,
+ priv->base + NPCM_RNGCS_REG);
+
+ return 0;
+}
+
+static void npcm_rng_cleanup(struct hwrng *rng)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+
+ writel(NPCM_RNG_CLK_SET_25MHZ, priv->base + NPCM_RNGCS_REG);
+}
+
+static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+ int retval = 0;
+ int ready;
+
+ pm_runtime_get_sync((struct device *)priv->rng.priv);
+
+ while (max >= sizeof(u32)) {
+ if (wait) {
+ if (readl_poll_timeout(priv->base + NPCM_RNGCS_REG,
+ ready,
+ ready & NPCM_RNG_DATA_VALID,
+ NPCM_RNG_POLL_USEC,
+ NPCM_RNG_TIMEOUT_USEC))
+ break;
+ } else {
+ if ((readl(priv->base + NPCM_RNGCS_REG) &
+ NPCM_RNG_DATA_VALID) == 0)
+ break;
+ }
+
+ *(u32 *)buf = readl(priv->base + NPCM_RNGD_REG);
+ retval += sizeof(u32);
+ buf += sizeof(u32);
+ max -= sizeof(u32);
+ }
+
+ pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+
+ return retval || !wait ? retval : -EIO;
+}
+
+static int npcm_rng_probe(struct platform_device *pdev)
+{
+ struct npcm_rng *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dev_set_drvdata(&pdev->dev, priv);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+#ifndef CONFIG_PM
+ priv->rng.init = npcm_rng_init;
+ priv->rng.cleanup = npcm_rng_cleanup;
+#endif
+ priv->rng.name = pdev->name;
+ priv->rng.read = npcm_rng_read;
+ priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 1000;
+
+ writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register rng device: %d\n",
+ ret);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int npcm_rng_remove(struct platform_device *pdev)
+{
+ struct npcm_rng *priv = platform_get_drvdata(pdev);
+
+ devm_hwrng_unregister(&pdev->dev, &priv->rng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int npcm_rng_runtime_suspend(struct device *dev)
+{
+ struct npcm_rng *priv = dev_get_drvdata(dev);
+
+ npcm_rng_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int npcm_rng_runtime_resume(struct device *dev)
+{
+ struct npcm_rng *priv = dev_get_drvdata(dev);
+
+ return npcm_rng_init(&priv->rng);
+}
+#endif
+
+static const struct dev_pm_ops npcm_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(npcm_rng_runtime_suspend,
+ npcm_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id rng_dt_id[] = {
+ { .compatible = "nuvoton,npcm750-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rng_dt_id);
+
+static struct platform_driver npcm_rng_driver = {
+ .driver = {
+ .name = "npcm-rng",
+ .pm = &npcm_rng_pm_ops,
+ .of_match_table = of_match_ptr(rng_dt_id),
+ },
+ .probe = npcm_rng_probe,
+ .remove = npcm_rng_remove,
+};
+
+module_platform_driver(npcm_rng_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM Random Number Generator Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b27f39688b5e..0ed07d16ec8e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -66,6 +66,13 @@
#define OMAP4_RNG_OUTPUT_SIZE 0x8
#define EIP76_RNG_OUTPUT_SIZE 0x10
+/*
+ * EIP76 RNG takes approx. 700us to produce 16 bytes of output data
+ * as per testing results. And to account for the lack of udelay()'s
+ * reliability, we keep the timeout as 1000us.
+ */
+#define RNG_DATA_FILL_TIMEOUT 100
+
enum {
RNG_OUTPUT_0_REG = 0,
RNG_OUTPUT_1_REG,
@@ -176,7 +183,7 @@ static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
if (max < priv->pdata->data_size)
return 0;
- for (i = 0; i < 20; i++) {
+ for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) {
present = priv->pdata->data_present(priv);
if (present || !wait)
break;
@@ -432,7 +439,6 @@ static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
static int omap_rng_probe(struct platform_device *pdev)
{
struct omap_rng_dev *priv;
- struct resource *res;
struct device *dev = &pdev->dev;
int ret;
@@ -449,8 +455,7 @@ static int omap_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_ioremap;
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 38b719017186..e08a8887e718 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -11,8 +11,6 @@
* warranty of any kind, whether express or implied.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
@@ -20,117 +18,159 @@
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define RNG_RESET 0x01
#define RNG_GEN_PRNG_HW_INIT 0x02
#define RNG_GEN_HW 0x08
-/* param1: ptr, param2: count, param3: flag */
-static u32 (*omap3_rom_rng_call)(u32, u32, u32);
-
-static struct delayed_work idle_work;
-static int rng_idle;
-static struct clk *rng_clk;
+struct omap_rom_rng {
+ struct clk *clk;
+ struct device *dev;
+ struct hwrng ops;
+ u32 (*rom_rng_call)(u32 ptr, u32 count, u32 flag);
+};
-static void omap3_rom_rng_idle(struct work_struct *work)
+static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
{
+ struct omap_rom_rng *ddata;
+ u32 ptr;
int r;
- r = omap3_rom_rng_call(0, 0, RNG_RESET);
- if (r != 0) {
- pr_err("reset failed: %d\n", r);
- return;
+ ddata = (struct omap_rom_rng *)rng->priv;
+
+ r = pm_runtime_get_sync(ddata->dev);
+ if (r < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return r;
}
- clk_disable_unprepare(rng_clk);
- rng_idle = 1;
+
+ ptr = virt_to_phys(data);
+ r = ddata->rom_rng_call(ptr, 4, RNG_GEN_HW);
+ if (r != 0)
+ r = -EINVAL;
+ else
+ r = 4;
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return r;
}
-static int omap3_rom_rng_get_random(void *buf, unsigned int count)
+static int __maybe_unused omap_rom_rng_runtime_suspend(struct device *dev)
{
- u32 r;
- u32 ptr;
+ struct omap_rom_rng *ddata;
+ int r;
- cancel_delayed_work_sync(&idle_work);
- if (rng_idle) {
- r = clk_prepare_enable(rng_clk);
- if (r)
- return r;
-
- r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
- if (r != 0) {
- clk_disable_unprepare(rng_clk);
- pr_err("HW init failed: %d\n", r);
- return -EIO;
- }
- rng_idle = 0;
- }
+ ddata = dev_get_drvdata(dev);
- ptr = virt_to_phys(buf);
- r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
- schedule_delayed_work(&idle_work, msecs_to_jiffies(500));
+ r = ddata->rom_rng_call(0, 0, RNG_RESET);
if (r != 0)
- return -EINVAL;
+ dev_err(dev, "reset failed: %d\n", r);
+
+ clk_disable_unprepare(ddata->clk);
+
return 0;
}
-static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
+static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev)
{
+ struct omap_rom_rng *ddata;
int r;
- r = omap3_rom_rng_get_random(data, 4);
+ ddata = dev_get_drvdata(dev);
+
+ r = clk_prepare_enable(ddata->clk);
if (r < 0)
return r;
- return 4;
+
+ r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+ if (r != 0) {
+ clk_disable(ddata->clk);
+ dev_err(dev, "HW init failed: %d\n", r);
+
+ return -EIO;
+ }
+
+ return 0;
}
-static struct hwrng omap3_rom_rng_ops = {
- .name = "omap3-rom",
- .read = omap3_rom_rng_read,
-};
+static void omap_rom_rng_finish(void *data)
+{
+ struct omap_rom_rng *ddata = data;
+
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+}
static int omap3_rom_rng_probe(struct platform_device *pdev)
{
+ struct omap_rom_rng *ddata;
int ret = 0;
- pr_info("initializing\n");
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
- omap3_rom_rng_call = pdev->dev.platform_data;
- if (!omap3_rom_rng_call) {
- pr_err("omap3_rom_rng_call is NULL\n");
+ ddata->dev = &pdev->dev;
+ ddata->ops.priv = (unsigned long)ddata;
+ ddata->ops.name = "omap3-rom";
+ ddata->ops.read = of_device_get_match_data(&pdev->dev);
+ ddata->ops.quality = 900;
+ if (!ddata->ops.read) {
+ dev_err(&pdev->dev, "missing rom code handler\n");
+
+ return -ENODEV;
+ }
+ dev_set_drvdata(ddata->dev, ddata);
+
+ ddata->rom_rng_call = pdev->dev.platform_data;
+ if (!ddata->rom_rng_call) {
+ dev_err(ddata->dev, "rom_rng_call is NULL\n");
return -EINVAL;
}
- INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle);
- rng_clk = devm_clk_get(&pdev->dev, "ick");
- if (IS_ERR(rng_clk)) {
- pr_err("unable to get RNG clock\n");
- return PTR_ERR(rng_clk);
+ ddata->clk = devm_clk_get(ddata->dev, "ick");
+ if (IS_ERR(ddata->clk)) {
+ dev_err(ddata->dev, "unable to get RNG clock\n");
+ return PTR_ERR(ddata->clk);
}
- /* Leave the RNG in reset state. */
- ret = clk_prepare_enable(rng_clk);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ ret = devm_add_action_or_reset(ddata->dev, omap_rom_rng_finish,
+ ddata);
if (ret)
return ret;
- omap3_rom_rng_idle(0);
- return hwrng_register(&omap3_rom_rng_ops);
+ return devm_hwrng_register(ddata->dev, &ddata->ops);
}
-static int omap3_rom_rng_remove(struct platform_device *pdev)
-{
- cancel_delayed_work_sync(&idle_work);
- hwrng_unregister(&omap3_rom_rng_ops);
- clk_disable_unprepare(rng_clk);
- return 0;
-}
+static const struct of_device_id omap_rom_rng_match[] = {
+ { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, omap_rom_rng_match);
+
+static const struct dev_pm_ops omap_rom_rng_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rom_rng_runtime_suspend,
+ omap_rom_rng_runtime_resume)
+};
static struct platform_driver omap3_rom_rng_driver = {
.driver = {
.name = "omap3-rom-rng",
+ .of_match_table = omap_rom_rng_match,
+ .pm = &omap_rom_rng_pm_ops,
},
.probe = omap3_rom_rng_probe,
- .remove = omap3_rom_rng_remove,
};
module_platform_driver(omap3_rom_rng_driver);
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 24b1460b49d4..2498d4ef9fe2 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -86,10 +86,8 @@ static struct hwrng pasemi_rng = {
static int rng_probe(struct platform_device *pdev)
{
void __iomem *rng_regs;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng_regs = devm_ioremap_resource(&pdev->dev, res);
+ rng_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng_regs))
return PTR_ERR(rng_regs);
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 90f498c98947..81080cb2294e 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -70,7 +70,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
static int pic32_rng_probe(struct platform_device *pdev)
{
struct pic32_rng *priv;
- struct resource *res;
u32 v;
int ret;
@@ -78,8 +77,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 863448360a7d..783c24e3f8b7 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -72,7 +72,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int st_rng_probe(struct platform_device *pdev)
{
struct st_rng_data *ddata;
- struct resource *res;
struct clk *clk;
void __iomem *base;
int ret;
@@ -81,8 +80,7 @@ static int st_rng_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index 1093583b579c..c8bd34e740fd 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -107,14 +107,12 @@ static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
static int __init tx4939_rng_probe(struct platform_device *dev)
{
struct tx4939_rng *rngdev;
- struct resource *r;
int i;
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
if (!rngdev)
return -ENOMEM;
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- rngdev->base = devm_ioremap_resource(&dev->dev, r);
+ rngdev->base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(rngdev->base))
return PTR_ERR(rngdev->base);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 7e568db87ae2..d7516a446987 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -313,7 +313,6 @@ static struct hwrng xgene_rng_func = {
static int xgene_rng_probe(struct platform_device *pdev)
{
- struct resource *res;
struct xgene_rng_dev *ctx;
int rc = 0;
@@ -324,8 +323,7 @@ static int xgene_rng_probe(struct platform_device *pdev)
ctx->dev = &pdev->dev;
platform_set_drvdata(pdev, ctx);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->csr_base))
return PTR_ERR(ctx->csr_base);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 4bad0614109b..7dc2c3ec4051 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -4,38 +4,38 @@
#
menuconfig IPMI_HANDLER
- tristate 'IPMI top-level message handler'
- depends on HAS_IOMEM
- select IPMI_DMI_DECODE if DMI
- help
- This enables the central IPMI message handler, required for IPMI
- to work.
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ select IPMI_DMI_DECODE if DMI
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
- IPMI is a standard for managing sensors (temperature,
- voltage, etc.) in a system.
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
- See <file:Documentation/IPMI.txt> for more details on the driver.
+ See <file:Documentation/IPMI.txt> for more details on the driver.
- If unsure, say N.
+ If unsure, say N.
config IPMI_DMI_DECODE
- select IPMI_PLAT_DATA
- bool
+ select IPMI_PLAT_DATA
+ bool
config IPMI_PLAT_DATA
- bool
+ bool
if IPMI_HANDLER
config IPMI_PANIC_EVENT
- bool 'Generate a panic event to all BMCs on a panic'
- help
- When a panic occurs, this will cause the IPMI message handler to,
- by default, generate an IPMI event describing the panic to each
- interface registered with the message handler. This is always
- available, the module parameter for ipmi_msghandler named
- panic_op can be set to "event" to chose this value, this config
- simply causes the default value to be set to "event".
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
config IPMI_PANIC_STRING
bool 'Generate OEM events containing the panic string'
@@ -54,43 +54,43 @@ config IPMI_PANIC_STRING
causes the default value to be set to "string".
config IPMI_DEVICE_INTERFACE
- tristate 'Device interface for IPMI'
- help
- This provides an IOCTL interface to the IPMI message handler so
- userland processes may use IPMI. It supports poll() and select().
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
config IPMI_SI
- tristate 'IPMI System Interface handler'
- select IPMI_PLAT_DATA
- help
- Provides a driver for System Interfaces (KCS, SMIC, BT).
- Currently, only KCS and SMIC are supported. If
- you are using IPMI, you should probably say "y" here.
+ tristate 'IPMI System Interface handler'
+ select IPMI_PLAT_DATA
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
config IPMI_SSIF
- tristate 'IPMI SMBus handler (SSIF)'
- select I2C
- help
- Provides a driver for a SMBus interface to a BMC, meaning that you
- have a driver that must be accessed over an I2C bus instead of a
- standard interface. This module requires I2C support.
+ tristate 'IPMI SMBus handler (SSIF)'
+ select I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
config IPMI_POWERNV
- depends on PPC_POWERNV
- tristate 'POWERNV (OPAL firmware) IPMI interface'
- help
- Provides a driver for OPAL firmware-based IPMI interfaces.
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
config IPMI_WATCHDOG
- tristate 'IPMI Watchdog Timer'
- help
- This enables the IPMI watchdog timer.
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
config IPMI_POWEROFF
- tristate 'IPMI Poweroff'
- help
- This enables a function to power off the system with IPMI if
- the IPMI management controller is capable of this.
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
endif # IPMI_HANDLER
@@ -126,7 +126,7 @@ config NPCM7XX_KCS_IPMI_BMC
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+ depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
tristate "BT IPMI bmc driver"
help
Provides a driver for the BT (Block Transfer) IPMI interface
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 40b9927c072c..d36aeacb290e 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -444,15 +444,13 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(bt_bmc->map)) {
- struct resource *res;
void __iomem *base;
/*
* Assume it's not the MFD-based devicetree description, in
* which case generate a regmap ourselves
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 285e0b8f9a97..1ff4fb1def7c 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -133,9 +133,6 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
netf_rq_lun = msg[NETFN_LUN_IDX];
- if (!(netf_rq_lun & NETFN_RSP_BIT_MASK))
- return -EINVAL;
-
/*
* subtract rq_sa and netf_rq_lun from the length of the msg passed to
* i2c_smbus_xfer
@@ -154,16 +151,16 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
return ret ? : count;
}
-static unsigned int ipmb_poll(struct file *file, poll_table *wait)
+static __poll_t ipmb_poll(struct file *file, poll_table *wait)
{
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
- unsigned int mask = POLLOUT;
+ __poll_t mask = EPOLLOUT;
mutex_lock(&ipmb_dev->file_mutex);
poll_wait(file, &ipmb_dev->wait_queue, wait);
if (atomic_read(&ipmb_dev->request_queue_len))
- mask |= POLLIN;
+ mask |= EPOLLIN;
mutex_unlock(&ipmb_dev->file_mutex);
return mask;
@@ -203,25 +200,16 @@ static u8 ipmb_verify_checksum1(struct ipmb_dev *ipmb_dev, u8 rs_sa)
ipmb_dev->request.checksum1);
}
-static bool is_ipmb_request(struct ipmb_dev *ipmb_dev, u8 rs_sa)
+/*
+ * Verify if message has proper ipmb header with minimum length
+ * and correct checksum byte.
+ */
+static bool is_ipmb_msg(struct ipmb_dev *ipmb_dev, u8 rs_sa)
{
- if (ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) {
- if (ipmb_verify_checksum1(ipmb_dev, rs_sa))
- return false;
+ if ((ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) &&
+ (!ipmb_verify_checksum1(ipmb_dev, rs_sa)))
+ return true;
- /*
- * Check whether this is an IPMB request or
- * response.
- * The 6 MSB of netfn_rs_lun are dedicated to the netfn
- * while the remaining bits are dedicated to the lun.
- * If the LSB of the netfn is cleared, it is associated
- * with an IPMB request.
- * If the LSB of the netfn is set, it is associated with
- * an IPMB response.
- */
- if (!(ipmb_dev->request.netfn_rs_lun & NETFN_RSP_BIT_MASK))
- return true;
- }
return false;
}
@@ -273,8 +261,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
case I2C_SLAVE_STOP:
ipmb_dev->request.len = ipmb_dev->msg_idx;
-
- if (is_ipmb_request(ipmb_dev, GET_8BIT_ADDR(client->addr)))
+ if (is_ipmb_msg(ipmb_dev, GET_8BIT_ADDR(client->addr)))
ipmb_handle_request(ipmb_dev);
break;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2aab80e19ae0..cad9563f8f48 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -44,25 +44,6 @@ static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
-#ifdef DEBUG
-static void ipmi_debug_msg(const char *title, unsigned char *data,
- unsigned int len)
-{
- int i, pos;
- char buf[100];
-
- pos = snprintf(buf, sizeof(buf), "%s: ", title);
- for (i = 0; i < len; i++)
- pos += snprintf(buf + pos, sizeof(buf) - pos,
- " %2.2x", data[i]);
- pr_debug("%s\n", buf);
-}
-#else
-static void ipmi_debug_msg(const char *title, unsigned char *data,
- unsigned int len)
-{ }
-#endif
-
static bool initialized;
static bool drvregistered;
@@ -448,6 +429,8 @@ enum ipmi_stat_indexes {
#define IPMI_IPMB_NUM_SEQ 64
struct ipmi_smi {
+ struct module *owner;
+
/* What interface number are we? */
int intf_num;
@@ -1220,6 +1203,11 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
goto out_kfree;
+ if (!try_module_get(intf->owner)) {
+ rv = -ENODEV;
+ goto out_kfree;
+ }
+
/* Note that each existing user holds a refcount to the interface. */
kref_get(&intf->refcount);
@@ -1349,6 +1337,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
kref_put(&intf->refcount, intf_free);
+ module_put(intf->owner);
}
int ipmi_destroy_user(struct ipmi_user *user)
@@ -2267,7 +2256,7 @@ out_err:
ipmi_free_smi_msg(smi_msg);
ipmi_free_recv_msg(recv_msg);
} else {
- ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
+ pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
smi_send(intf, intf->handlers, smi_msg, priority);
}
@@ -2459,7 +2448,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
* been recently fetched, this will just use the cached data. Otherwise
* it will run a new fetch.
*
- * Except for the first time this is called (in ipmi_register_smi()),
+ * Except for the first time this is called (in ipmi_add_smi()),
* this will always return good data;
*/
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
@@ -3031,8 +3020,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bmc->pdev.name = "ipmi_bmc";
rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
- if (rv < 0)
+ if (rv < 0) {
+ kfree(bmc);
goto out;
+ }
+
bmc->pdev.dev.driver = &ipmidriver.driver;
bmc->pdev.id = rv;
bmc->pdev.dev.release = release_bmc_device;
@@ -3377,10 +3369,11 @@ static void redo_bmc_reg(struct work_struct *work)
kref_put(&intf->refcount, intf_free);
}
-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
- void *send_info,
- struct device *si_dev,
- unsigned char slave_addr)
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *si_dev,
+ unsigned char slave_addr)
{
int i, j;
int rv;
@@ -3406,7 +3399,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
return rv;
}
-
+ intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
mutex_init(&intf->bmc->dyn_mutex);
@@ -3514,7 +3507,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
return rv;
}
-EXPORT_SYMBOL(ipmi_register_smi);
+EXPORT_SYMBOL(ipmi_add_smi);
static void deliver_smi_err_response(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg,
@@ -3730,7 +3723,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
msg->data_size = 11;
- ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
+ pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
rcu_read_lock();
if (!intf->in_shutdown) {
@@ -4217,7 +4210,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
int requeue;
int chan;
- ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
+ pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
@@ -4576,7 +4569,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
smi_msg->data_size = recv_msg->msg.data_len;
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
- ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
+ pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
return smi_msg;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6b9a0593d2eb..c7cc8538b84a 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -265,10 +265,10 @@ static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
- struct timespec t;
+ struct timespec64 t;
- ktime_get_ts(&t);
- pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
+ ktime_get_ts64(&t);
+ pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
}
#else
#define debug_timestamp(x)
@@ -935,38 +935,25 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
}
/*
- * Use -1 in the nsec value of the busy waiting timespec to tell that
- * we are spinning in kipmid looking for something and not delaying
- * between checks
+ * Use -1 as a special constant to tell that we are spinning in kipmid
+ * looking for something and not delaying between checks
*/
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
-{
- ts->tv_nsec = -1;
-}
-static inline int ipmi_si_is_busy(struct timespec *ts)
-{
- return ts->tv_nsec != -1;
-}
-
+#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
const struct smi_info *smi_info,
- struct timespec *busy_until)
+ ktime_t *busy_until)
{
unsigned int max_busy_us = 0;
if (smi_info->si_num < num_max_busy_us)
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
- ipmi_si_set_not_busy(busy_until);
- else if (!ipmi_si_is_busy(busy_until)) {
- ktime_get_ts(busy_until);
- timespec_add_ns(busy_until, max_busy_us * NSEC_PER_USEC);
+ *busy_until = IPMI_TIME_NOT_BUSY;
+ else if (*busy_until == IPMI_TIME_NOT_BUSY) {
+ *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
} else {
- struct timespec now;
-
- ktime_get_ts(&now);
- if (unlikely(timespec_compare(&now, busy_until) > 0)) {
- ipmi_si_set_not_busy(busy_until);
+ if (unlikely(ktime_get() > *busy_until)) {
+ *busy_until = IPMI_TIME_NOT_BUSY;
return false;
}
}
@@ -988,9 +975,8 @@ static int ipmi_thread(void *data)
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
- struct timespec busy_until = { 0, 0 };
+ ktime_t busy_until = IPMI_TIME_NOT_BUSY;
- ipmi_si_set_not_busy(&busy_until);
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
int busy_wait;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 7c9269e3477a..bd95aba1f9fe 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
if (copy_from_user(karg, arg, sizeof(karg)))
return -EFAULT;
+ /* sparc64 suseconds_t is 32-bit only */
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ karg[1] >>= 32;
+
return lp_set_timeout(minor, karg[0], karg[1]);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index c86f18aa8985..34bb88fe0b0a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -619,20 +619,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
+ if ((time32[0] < 0) || (time32[1] < 0))
+ return -EINVAL;
+
return pp_set_timeout(pp->pdev, time32[0], time32[1]);
case PPSETTIME64:
if (copy_from_user(time64, argp, sizeof(time64)))
return -EFAULT;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] >>= 32;
+
return pp_set_timeout(pp->pdev, time64[0], time64[1]);
case PPGETTIME32:
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time32[0] = ts.tv_sec;
time32[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time32[0] < 0) || (time32[1] < 0))
- return -EINVAL;
if (copy_to_user(argp, time32, sizeof(time32)))
return -EFAULT;
@@ -643,8 +650,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time64[0] = ts.tv_sec;
time64[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time64[0] < 0) || (time64[1] < 0))
- return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] <<= 32;
if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index de434feb873a..01b8868b9bed 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,7 +327,6 @@
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
-#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_freezable(random_write_wait,
- kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait, kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9c37047f4b56..aacdeed93320 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -67,6 +67,13 @@ config TCG_TIS_SPI
within Linux. To compile this driver as a module, choose M here;
the module will be called tpm_tis_spi.
+config TCG_TIS_SPI_CR50
+ bool "Cr50 SPI Interface"
+ depends on TCG_TIS_SPI
+ help
+ If you have a H1 secure module running Cr50 firmware on SPI bus,
+ say Yes and it will be accessible from within Linux.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index c354cdff9c62..5a0d99d4fec0 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -21,7 +21,9 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
-obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o
+tpm_tis_spi_mod-y := tpm_tis_spi.o
+tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d7a3888ad80f..a438b1206fcb 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/suspend.h>
#include <linux/freezer.h>
#include <linux/tpm_eventlog.h>
@@ -394,7 +395,11 @@ int tpm_pm_suspend(struct device *dev)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
- return 0;
+ goto suspended;
+
+ if ((chip->flags & TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED) &&
+ !pm_suspend_via_firmware())
+ goto suspended;
if (!tpm_chip_start(chip)) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -405,6 +410,7 @@ int tpm_pm_suspend(struct device *dev)
tpm_chip_stop(chip);
}
+suspended:
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
@@ -453,62 +459,6 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
}
EXPORT_SYMBOL_GPL(tpm_get_random);
-/**
- * tpm_seal_trusted() - seal a trusted key payload
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
- *
- * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
- * the keyring subsystem.
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
- return -ENODEV;
-
- rc = tpm2_seal_trusted(chip, payload, options);
-
- tpm_put_ops(chip);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_seal_trusted);
-
-/**
- * tpm_unseal_trusted() - unseal a trusted key
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
- *
- * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
- * the keyring subsystem.
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
- return -ENODEV;
-
- rc = tpm2_unseal_trusted(chip, payload, options);
-
- tpm_put_ops(chip);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
-
static int __init tpm_init(void)
{
int rc;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index edfa89160010..3b53b3e5ec3e 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm1_version *version;
ssize_t rc = 0;
char *str = buf;
cap_t cap;
@@ -232,31 +233,31 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
- /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ /* TPM 1.2 */
+ if (!tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version",
- sizeof(cap.tpm_version_1_2));
- if (!rc) {
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version_1_2.Major,
- cap.tpm_version_1_2.Minor,
- cap.tpm_version_1_2.revMajor,
- cap.tpm_version_1_2.revMinor);
- } else {
- /* Otherwise just use TPM_STRUCT_VER */
- if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version",
- sizeof(cap.tpm_version)))
- goto out_ops;
- str += sprintf(str,
- "TCG version: %d.%d\nFirmware version: %d.%d\n",
- cap.tpm_version.Major,
- cap.tpm_version.Minor,
- cap.tpm_version.revMajor,
- cap.tpm_version.revMinor);
+ sizeof(cap.version2))) {
+ version = &cap.version2.version;
+ goto out_print;
}
+
+ /* TPM 1.1 */
+ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1))) {
+ goto out_ops;
+ }
+
+ version = &cap.version1;
+
+out_print:
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ version->major, version->minor,
+ version->rev_major, version->rev_minor);
+
rc = str - buf;
+
out_ops:
tpm_put_ops(chip);
return rc;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index a7fea3e0ca86..b9e1547be6b5 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -25,7 +25,6 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/tpm.h>
-#include <linux/highmem.h>
#include <linux/tpm_eventlog.h>
#ifdef CONFIG_X86
@@ -58,123 +57,6 @@ enum tpm_addr {
#define TPM_ERR_DISABLED 0x7
#define TPM_ERR_INVALID_POSTINIT 38
-#define TPM_HEADER_SIZE 10
-
-enum tpm2_const {
- TPM2_PLATFORM_PCR = 24,
- TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
-};
-
-enum tpm2_timeouts {
- TPM2_TIMEOUT_A = 750,
- TPM2_TIMEOUT_B = 2000,
- TPM2_TIMEOUT_C = 200,
- TPM2_TIMEOUT_D = 30,
- TPM2_DURATION_SHORT = 20,
- TPM2_DURATION_MEDIUM = 750,
- TPM2_DURATION_LONG = 2000,
- TPM2_DURATION_LONG_LONG = 300000,
- TPM2_DURATION_DEFAULT = 120000,
-};
-
-enum tpm2_structures {
- TPM2_ST_NO_SESSIONS = 0x8001,
- TPM2_ST_SESSIONS = 0x8002,
-};
-
-/* Indicates from what layer of the software stack the error comes from */
-#define TSS2_RC_LAYER_SHIFT 16
-#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
-
-enum tpm2_return_codes {
- TPM2_RC_SUCCESS = 0x0000,
- TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
- TPM2_RC_HANDLE = 0x008B,
- TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
- TPM2_RC_FAILURE = 0x0101,
- TPM2_RC_DISABLED = 0x0120,
- TPM2_RC_COMMAND_CODE = 0x0143,
- TPM2_RC_TESTING = 0x090A, /* RC_WARN */
- TPM2_RC_REFERENCE_H0 = 0x0910,
- TPM2_RC_RETRY = 0x0922,
-};
-
-enum tpm2_command_codes {
- TPM2_CC_FIRST = 0x011F,
- TPM2_CC_HIERARCHY_CONTROL = 0x0121,
- TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129,
- TPM2_CC_CREATE_PRIMARY = 0x0131,
- TPM2_CC_SEQUENCE_COMPLETE = 0x013E,
- TPM2_CC_SELF_TEST = 0x0143,
- TPM2_CC_STARTUP = 0x0144,
- TPM2_CC_SHUTDOWN = 0x0145,
- TPM2_CC_NV_READ = 0x014E,
- TPM2_CC_CREATE = 0x0153,
- TPM2_CC_LOAD = 0x0157,
- TPM2_CC_SEQUENCE_UPDATE = 0x015C,
- TPM2_CC_UNSEAL = 0x015E,
- TPM2_CC_CONTEXT_LOAD = 0x0161,
- TPM2_CC_CONTEXT_SAVE = 0x0162,
- TPM2_CC_FLUSH_CONTEXT = 0x0165,
- TPM2_CC_VERIFY_SIGNATURE = 0x0177,
- TPM2_CC_GET_CAPABILITY = 0x017A,
- TPM2_CC_GET_RANDOM = 0x017B,
- TPM2_CC_PCR_READ = 0x017E,
- TPM2_CC_PCR_EXTEND = 0x0182,
- TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185,
- TPM2_CC_HASH_SEQUENCE_START = 0x0186,
- TPM2_CC_CREATE_LOADED = 0x0191,
- TPM2_CC_LAST = 0x0193, /* Spec 1.36 */
-};
-
-enum tpm2_permanent_handles {
- TPM2_RS_PW = 0x40000009,
-};
-
-enum tpm2_capabilities {
- TPM2_CAP_HANDLES = 1,
- TPM2_CAP_COMMANDS = 2,
- TPM2_CAP_PCRS = 5,
- TPM2_CAP_TPM_PROPERTIES = 6,
-};
-
-enum tpm2_properties {
- TPM_PT_TOTAL_COMMANDS = 0x0129,
-};
-
-enum tpm2_startup_types {
- TPM2_SU_CLEAR = 0x0000,
- TPM2_SU_STATE = 0x0001,
-};
-
-enum tpm2_cc_attrs {
- TPM2_CC_ATTR_CHANDLES = 25,
- TPM2_CC_ATTR_RHANDLE = 28,
-};
-
-#define TPM_VID_INTEL 0x8086
-#define TPM_VID_WINBOND 0x1050
-#define TPM_VID_STM 0x104A
-
-enum tpm_chip_flags {
- TPM_CHIP_FLAG_TPM2 = BIT(1),
- TPM_CHIP_FLAG_IRQ = BIT(2),
- TPM_CHIP_FLAG_VIRTUAL = BIT(3),
- TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
- TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
-};
-
-#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-
-struct tpm_header {
- __be16 tag;
- __be32 length;
- union {
- __be32 ordinal;
- __be32 return_code;
- };
-} __packed;
-
#define TPM_TAG_RQU_COMMAND 193
struct stclear_flags_t {
@@ -186,19 +68,16 @@ struct stclear_flags_t {
u8 bGlobalLock;
} __packed;
-struct tpm_version_t {
- u8 Major;
- u8 Minor;
- u8 revMajor;
- u8 revMinor;
+struct tpm1_version {
+ u8 major;
+ u8 minor;
+ u8 rev_major;
+ u8 rev_minor;
} __packed;
-struct tpm_version_1_2_t {
- __be16 tag;
- u8 Major;
- u8 Minor;
- u8 revMajor;
- u8 revMinor;
+struct tpm1_version2 {
+ __be16 tag;
+ struct tpm1_version version;
} __packed;
struct timeout_t {
@@ -243,8 +122,8 @@ typedef union {
struct stclear_flags_t stclear_flags;
__u8 owned;
__be32 num_pcrs;
- struct tpm_version_t tpm_version;
- struct tpm_version_1_2_t tpm_version_1_2;
+ struct tpm1_version version1;
+ struct tpm1_version2 version2;
__be32 manufacturer_id;
struct timeout_t timeout;
struct duration_t duration;
@@ -274,102 +153,6 @@ enum tpm_sub_capabilities {
* compiler warnings about stack frame size. */
#define TPM_MAX_RNG_DATA 128
-/* A string buffer type for constructing TPM commands. This is based on the
- * ideas of string buffer code in security/keys/trusted.h but is heap based
- * in order to keep the stack usage minimal.
- */
-
-enum tpm_buf_flags {
- TPM_BUF_OVERFLOW = BIT(0),
-};
-
-struct tpm_buf {
- struct page *data_page;
- unsigned int flags;
- u8 *data;
-};
-
-static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-}
-
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
-{
- buf->data_page = alloc_page(GFP_HIGHUSER);
- if (!buf->data_page)
- return -ENOMEM;
-
- buf->flags = 0;
- buf->data = kmap(buf->data_page);
- tpm_buf_reset(buf, tag, ordinal);
- return 0;
-}
-
-static inline void tpm_buf_destroy(struct tpm_buf *buf)
-{
- kunmap(buf->data_page);
- __free_page(buf->data_page);
-}
-
-static inline u32 tpm_buf_length(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be32_to_cpu(head->length);
-}
-
-static inline u16 tpm_buf_tag(struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
-
- return be16_to_cpu(head->tag);
-}
-
-static inline void tpm_buf_append(struct tpm_buf *buf,
- const unsigned char *new_data,
- unsigned int new_len)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- u32 len = tpm_buf_length(buf);
-
- /* Return silently if overflow has already happened. */
- if (buf->flags & TPM_BUF_OVERFLOW)
- return;
-
- if ((len + new_len) > PAGE_SIZE) {
- WARN(1, "tpm_buf: overflow\n");
- buf->flags |= TPM_BUF_OVERFLOW;
- return;
- }
-
- memcpy(&buf->data[len], new_data, new_len);
- head->length = cpu_to_be32(len + new_len);
-}
-
-static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
-{
- tpm_buf_append(buf, &value, 1);
-}
-
-static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
-{
- __be16 value2 = cpu_to_be16(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 2);
-}
-
-static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
-{
- __be32 value2 = cpu_to_be32(value);
-
- tpm_buf_append(buf, (u8 *) &value2, 4);
-}
-
extern struct class *tpm_class;
extern struct class *tpmrm_class;
extern dev_t tpm_devt;
@@ -429,11 +212,6 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline u32 tpm2_rc_value(u32 rc)
-{
- return (rc & BIT(7)) ? rc & 0xff : rc;
-}
-
int tpm2_get_timeouts(struct tpm_chip *chip);
int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest, u16 *digest_size_ptr);
@@ -441,12 +219,6 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
-int tpm2_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
-int tpm2_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index 149e953ca369..ca7158fa6e6c 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -343,6 +343,7 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
{
cap_t cap;
unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+ unsigned long durations[3];
ssize_t rc;
rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
@@ -427,6 +428,20 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */
+ /*
+ * Provide the ability for vendor overrides of duration values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_durations)
+ chip->ops->update_durations(chip, durations);
+
+ if (chip->duration_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported durations.");
+ chip->duration[TPM_SHORT] = durations[0];
+ chip->duration[TPM_MEDIUM] = durations[1];
+ chip->duration[TPM_LONG] = durations[2];
+ }
+
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index ba9acae83bff..fdb457704aa7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -13,20 +13,6 @@
#include "tpm.h"
#include <crypto/hash_info.h>
-#include <keys/trusted-type.h>
-
-enum tpm2_object_attributes {
- TPM2_OA_USER_WITH_AUTH = BIT(6),
-};
-
-enum tpm2_session_attributes {
- TPM2_SA_CONTINUE_SESSION = BIT(0),
-};
-
-struct tpm2_hash {
- unsigned int crypto_id;
- unsigned int tpm_id;
-};
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
@@ -377,299 +363,6 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
tpm_buf_destroy(&buf);
}
-/**
- * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
- *
- * @buf: an allocated tpm_buf instance
- * @session_handle: session handle
- * @nonce: the session nonce, may be NULL if not used
- * @nonce_len: the session nonce length, may be 0 if not used
- * @attributes: the session attributes
- * @hmac: the session HMAC or password, may be NULL if not used
- * @hmac_len: the session HMAC or password length, maybe 0 if not used
- */
-static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
- const u8 *nonce, u16 nonce_len,
- u8 attributes,
- const u8 *hmac, u16 hmac_len)
-{
- tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
- tpm_buf_append_u32(buf, session_handle);
- tpm_buf_append_u16(buf, nonce_len);
-
- if (nonce && nonce_len)
- tpm_buf_append(buf, nonce, nonce_len);
-
- tpm_buf_append_u8(buf, attributes);
- tpm_buf_append_u16(buf, hmac_len);
-
- if (hmac && hmac_len)
- tpm_buf_append(buf, hmac, hmac_len);
-}
-
-/**
- * tpm2_seal_trusted() - seal the payload of a trusted key
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- *
- * Return: < 0 on error and 0 on success.
- */
-int tpm2_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- unsigned int blob_len;
- struct tpm_buf buf;
- u32 hash;
- int i;
- int rc;
-
- for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
- if (options->hash == tpm2_hash_map[i].crypto_id) {
- hash = tpm2_hash_map[i].tpm_id;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(tpm2_hash_map))
- return -EINVAL;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, options->keyhandle);
- tpm2_buf_append_auth(&buf, TPM2_RS_PW,
- NULL /* nonce */, 0,
- 0 /* session_attributes */,
- options->keyauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- /* sensitive */
- tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
-
- tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
- tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
- tpm_buf_append_u16(&buf, payload->key_len + 1);
- tpm_buf_append(&buf, payload->key, payload->key_len);
- tpm_buf_append_u8(&buf, payload->migratable);
-
- /* public */
- tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
- tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
- tpm_buf_append_u16(&buf, hash);
-
- /* policy */
- if (options->policydigest_len) {
- tpm_buf_append_u32(&buf, 0);
- tpm_buf_append_u16(&buf, options->policydigest_len);
- tpm_buf_append(&buf, options->policydigest,
- options->policydigest_len);
- } else {
- tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
- tpm_buf_append_u16(&buf, 0);
- }
-
- /* public parameters */
- tpm_buf_append_u16(&buf, TPM_ALG_NULL);
- tpm_buf_append_u16(&buf, 0);
-
- /* outside info */
- tpm_buf_append_u16(&buf, 0);
-
- /* creation PCR */
- tpm_buf_append_u32(&buf, 0);
-
- if (buf.flags & TPM_BUF_OVERFLOW) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
- if (rc)
- goto out;
-
- blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
- if (blob_len > MAX_BLOB_SIZE) {
- rc = -E2BIG;
- goto out;
- }
- if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
- payload->blob_len = blob_len;
-
-out:
- tpm_buf_destroy(&buf);
-
- if (rc > 0) {
- if (tpm2_rc_value(rc) == TPM2_RC_HASH)
- rc = -EINVAL;
- else
- rc = -EPERM;
- }
-
- return rc;
-}
-
-/**
- * tpm2_load_cmd() - execute a TPM2_Load command
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- * @blob_handle: returned blob handle
- *
- * Return: 0 on success.
- * -E2BIG on wrong payload size.
- * -EPERM on tpm error status.
- * < 0 error from tpm_transmit_cmd.
- */
-static int tpm2_load_cmd(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 *blob_handle)
-{
- struct tpm_buf buf;
- unsigned int private_len;
- unsigned int public_len;
- unsigned int blob_len;
- int rc;
-
- private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
- if (private_len > (payload->blob_len - 2))
- return -E2BIG;
-
- public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
- blob_len = private_len + public_len + 4;
- if (blob_len > payload->blob_len)
- return -E2BIG;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, options->keyhandle);
- tpm2_buf_append_auth(&buf, TPM2_RS_PW,
- NULL /* nonce */, 0,
- 0 /* session_attributes */,
- options->keyauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- tpm_buf_append(&buf, payload->blob, blob_len);
-
- if (buf.flags & TPM_BUF_OVERFLOW) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
- if (!rc)
- *blob_handle = be32_to_cpup(
- (__be32 *) &buf.data[TPM_HEADER_SIZE]);
-
-out:
- tpm_buf_destroy(&buf);
-
- if (rc > 0)
- rc = -EPERM;
-
- return rc;
-}
-
-/**
- * tpm2_unseal_cmd() - execute a TPM2_Unload command
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- * @blob_handle: blob handle
- *
- * Return: 0 on success
- * -EPERM on tpm error status
- * < 0 error from tpm_transmit_cmd
- */
-static int tpm2_unseal_cmd(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 blob_handle)
-{
- struct tpm_buf buf;
- u16 data_len;
- u8 *data;
- int rc;
-
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
- if (rc)
- return rc;
-
- tpm_buf_append_u32(&buf, blob_handle);
- tpm2_buf_append_auth(&buf,
- options->policyhandle ?
- options->policyhandle : TPM2_RS_PW,
- NULL /* nonce */, 0,
- TPM2_SA_CONTINUE_SESSION,
- options->blobauth /* hmac */,
- TPM_DIGEST_SIZE);
-
- rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
- if (rc > 0)
- rc = -EPERM;
-
- if (!rc) {
- data_len = be16_to_cpup(
- (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
- if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
- rc = -EFAULT;
- goto out;
- }
-
- if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
- rc = -EFAULT;
- goto out;
- }
- data = &buf.data[TPM_HEADER_SIZE + 6];
-
- memcpy(payload->key, data, data_len - 1);
- payload->key_len = data_len - 1;
- payload->migratable = data[data_len - 1];
- }
-
-out:
- tpm_buf_destroy(&buf);
- return rc;
-}
-
-/**
- * tpm2_unseal_trusted() - unseal the payload of a trusted key
- *
- * @chip: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
- *
- * Return: Same as with tpm_transmit_cmd.
- */
-int tpm2_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- u32 blob_handle;
- int rc;
-
- rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
- if (rc)
- return rc;
-
- rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
- tpm2_flush_context(chip, blob_handle);
- return rc;
-}
-
struct tpm2_get_cap_out {
u8 more_data;
__be32 subcap_id;
@@ -939,6 +632,10 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands,
GFP_KERNEL);
+ if (!chip->cc_attrs_tbl) {
+ rc = -ENOMEM;
+ goto out;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
if (rc)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index e59f1f91d7f3..a9dcf31eadd2 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -22,6 +22,7 @@
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
+#define TPM_CRB_MAX_RESOURCES 3
static const guid_t crb_acpi_start_guid =
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
@@ -91,7 +92,6 @@ enum crb_status {
struct crb_priv {
u32 sm;
const char *hid;
- void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
u8 __iomem *cmd;
@@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
- struct resource *io_res = data;
+ struct resource *iores_array = data;
struct resource_win win;
struct resource *res = &(win.res);
+ int i;
if (acpi_dev_resource_memory(ares, res) ||
acpi_dev_resource_address_space(ares, &win)) {
- *io_res = *res;
- io_res->name = NULL;
+ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
+ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
+ iores_array[i] = *res;
+ iores_array[i].name = NULL;
+ break;
+ }
+ }
}
return 1;
}
-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
- struct resource *io_res, u64 start, u32 size)
+static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
+ void __iomem **iobase_ptr, u64 start, u32 size)
{
struct resource new_res = {
.start = start,
@@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
if (start != new_res.start)
return (void __iomem *) ERR_PTR(-EINVAL);
- if (!resource_contains(io_res, &new_res))
+ if (!iores)
return devm_ioremap_resource(dev, &new_res);
- return priv->iobase + (new_res.start - io_res->start);
+ if (!*iobase_ptr) {
+ *iobase_ptr = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(*iobase_ptr))
+ return *iobase_ptr;
+ }
+
+ return *iobase_ptr + (new_res.start - iores->start);
}
/*
@@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct acpi_table_tpm2 *buf)
{
- struct list_head resources;
- struct resource io_res;
+ struct list_head acpi_resource_list;
+ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
+ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
struct device *dev = &device->dev;
+ struct resource *iores;
+ void __iomem **iobase_ptr;
+ int i;
u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
@@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
u32 rsp_size;
int ret;
- INIT_LIST_HEAD(&resources);
- ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
- &io_res);
+ INIT_LIST_HEAD(&acpi_resource_list);
+ ret = acpi_dev_get_resources(device, &acpi_resource_list,
+ crb_check_resource, iores_array);
if (ret < 0)
return ret;
- acpi_dev_free_resource_list(&resources);
+ acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(&io_res) != IORESOURCE_MEM) {
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
}
- priv->iobase = devm_ioremap_resource(dev, &io_res);
- if (IS_ERR(priv->iobase))
- return PTR_ERR(priv->iobase);
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (buf->control_address >= iores_array[i].start &&
+ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
+ iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
+ sizeof(struct crb_regs_tail));
+
+ if (IS_ERR(priv->regs_t))
+ return PTR_ERR(priv->regs_t);
/* The ACPI IO region starts at the head area and continues to include
* the control area, as one nice sane region except for some older
@@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
(priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
- if (buf->control_address == io_res.start +
+ if (iores &&
+ buf->control_address == iores->start +
sizeof(*priv->regs_h))
- priv->regs_h = priv->iobase;
+ priv->regs_h = *iobase_ptr;
else
dev_warn(dev, FW_BUG "Bad ACPI memory layout");
}
@@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (ret)
return ret;
- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
- sizeof(struct crb_regs_tail));
- if (IS_ERR(priv->regs_t)) {
- ret = PTR_ERR(priv->regs_t);
- goto out_relinquish_locality;
- }
-
/*
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
@@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
cmd_pa = ((u64)pa_high << 32) | pa_low;
- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
- ioread32(&priv->regs_t->ctrl_cmd_size));
+ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; iores_array[i].end; ++i) {
+ if (cmd_pa >= iores_array[i].start &&
+ cmd_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
pa_high, pa_low, cmd_size);
- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
+ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
if (IS_ERR(priv->cmd)) {
ret = PTR_ERR(priv->cmd);
goto out;
@@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
rsp_pa = le64_to_cpu(__rsp_pa);
- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
- ioread32(&priv->regs_t->ctrl_rsp_size));
+ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (rsp_pa >= iores_array[i].start &&
+ rsp_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
if (cmd_pa != rsp_pa) {
- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
+ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
+ rsp_pa, rsp_size);
ret = PTR_ERR_OR_ZERO(priv->rsp);
goto out;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e4fdde93ed4c..e7df342a317d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -286,7 +286,7 @@ static int tpm_tis_plat_probe(struct platform_device *pdev)
}
tpm_info.res = *res;
- tpm_info.irq = platform_get_irq(pdev, 0);
+ tpm_info.irq = platform_get_irq_optional(pdev, 0);
if (tpm_info.irq <= 0) {
if (pdev != force_pdev)
tpm_info.irq = -1;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 270f43acbb77..8af2cee1a762 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -506,6 +506,84 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
return rc;
}
+struct tis_vendor_durations_override {
+ u32 did_vid;
+ struct tpm1_version version;
+ unsigned long durations[3];
+};
+
+static const struct tis_vendor_durations_override vendor_dur_overrides[] = {
+ /* STMicroelectronics 0x104a */
+ { 0x0000104a,
+ { 1, 2, 8, 28 },
+ { (2 * 60 * HZ), (2 * 60 * HZ), (2 * 60 * HZ) } },
+};
+
+static void tpm_tis_update_durations(struct tpm_chip *chip,
+ unsigned long *duration_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ struct tpm1_version *version;
+ u32 did_vid;
+ int i, rc;
+ cap_t cap;
+
+ chip->duration_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid. %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Try to get a TPM version 1.2 or 1.1 TPM_CAP_VERSION_INFO */
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2));
+ if (!rc) {
+ version = &cap.version2.version;
+ } else {
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1));
+
+ if (rc)
+ goto out;
+
+ version = &cap.version1;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_dur_overrides); i++) {
+ if (vendor_dur_overrides[i].did_vid != did_vid)
+ continue;
+
+ if ((version->major ==
+ vendor_dur_overrides[i].version.major) &&
+ (version->minor ==
+ vendor_dur_overrides[i].version.minor) &&
+ (version->rev_major ==
+ vendor_dur_overrides[i].version.rev_major) &&
+ (version->rev_minor ==
+ vendor_dur_overrides[i].version.rev_minor)) {
+
+ memcpy(duration_cap,
+ vendor_dur_overrides[i].durations,
+ sizeof(vendor_dur_overrides[i].durations));
+
+ chip->duration_adjusted = true;
+ goto out;
+ }
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+}
+
struct tis_vendor_timeout_override {
u32 did_vid;
unsigned long timeout_us[4];
@@ -842,6 +920,7 @@ static const struct tpm_class_ops tpm_tis = {
.send = tpm_tis_send,
.cancel = tpm_tis_ready,
.update_timeouts = tpm_tis_update_timeouts,
+ .update_durations = tpm_tis_update_durations,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 19513e622053..d1754fd6c573 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -20,42 +20,64 @@
* Dorn and Kyleen Hall and Jarko Sakkinnen.
*/
+#include <linux/acpi.h>
+#include <linux/completion.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/acpi.h>
-#include <linux/freezer.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/tpm.h>
+
#include "tpm.h"
#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
-struct tpm_tis_spi_phy {
- struct tpm_tis_data priv;
- struct spi_device *spi_device;
- u8 *iobuf;
-};
-
-static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+/*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+ * keep trying to read from the device until MISO goes high indicating the
+ * wait state has ended.
+ *
+ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+ */
+static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
{
- return container_of(data, struct tpm_tis_spi_phy, priv);
+ struct spi_message m;
+ int ret, i;
+
+ if ((phy->iobuf[3] & 0x01) == 0) {
+ // handle SPI wait states
+ phy->iobuf[0] = 0;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ spi_xfer->len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+ if (phy->iobuf[0] & 0x01)
+ break;
+ }
+
+ if (i == TPM_RETRY)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *in, const u8 *out)
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
- int i;
struct spi_message m;
struct spi_transfer spi_xfer;
u8 transfer_len;
@@ -82,26 +104,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->iobuf[3] & 0x01) == 0) {
- // handle SPI wait states
- phy->iobuf[0] = 0;
-
- for (i = 0; i < TPM_RETRY; i++) {
- spi_xfer.len = 1;
- spi_message_init(&m);
- spi_message_add_tail(&spi_xfer, &m);
- ret = spi_sync_locked(phy->spi_device, &m);
- if (ret < 0)
- goto exit;
- if (phy->iobuf[0] & 0x01)
- break;
- }
-
- if (i == TPM_RETRY) {
- ret = -ETIMEDOUT;
- goto exit;
- }
- }
+ ret = phy->flow_control(phy, &spi_xfer);
+ if (ret < 0)
+ goto exit;
spi_xfer.cs_change = 0;
spi_xfer.len = transfer_len;
@@ -117,6 +122,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_message_init(&m);
spi_message_add_tail(&spi_xfer, &m);
+ reinit_completion(&phy->ready);
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
@@ -146,7 +152,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
-static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
__le16 result_le;
int rc;
@@ -159,7 +165,7 @@ static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
return rc;
}
-static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
__le32 result_le;
int rc;
@@ -172,7 +178,7 @@ static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
return rc;
}
-static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
__le32 value_le;
int rc;
@@ -184,6 +190,18 @@ static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
return rc;
}
+int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops)
+{
+ phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
+ phy->spi_device = spi;
+
+ return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
+}
+
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
.read_bytes = tpm_tis_spi_read_bytes,
.write_bytes = tpm_tis_spi_write_bytes,
@@ -202,11 +220,7 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy)
return -ENOMEM;
- phy->spi_device = dev;
-
- phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
- if (!phy->iobuf)
- return -ENOMEM;
+ phy->flow_control = tpm_tis_spi_flow_control;
/* If the SPI device has an IRQ then use that */
if (dev->irq > 0)
@@ -214,11 +228,27 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
else
irq = -1;
- return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
- NULL);
+ init_completion(&phy->ready);
+ return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
+}
+
+typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
+
+static int tpm_tis_spi_driver_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
+ tpm_tis_spi_probe_func probe_func;
+
+ probe_func = of_device_get_match_data(&spi->dev);
+ if (!probe_func && spi_dev_id)
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+
+ return probe_func(spi);
}
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
static int tpm_tis_spi_remove(struct spi_device *dev)
{
@@ -230,15 +260,17 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
}
static const struct spi_device_id tpm_tis_spi_id[] = {
- {"tpm_tis_spi", 0},
+ { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "cr50", (unsigned long)cr50_spi_probe },
{}
};
MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
static const struct of_device_id of_tis_spi_match[] = {
- { .compatible = "st,st33htpm-spi", },
- { .compatible = "infineon,slb9670", },
- { .compatible = "tcg,tpm_tis-spi", },
+ { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
+ { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "google,cr50", .data = cr50_spi_probe },
{}
};
MODULE_DEVICE_TABLE(of, of_tis_spi_match);
@@ -251,13 +283,12 @@ MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
static struct spi_driver tpm_tis_spi_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tpm_tis_spi",
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_spi_match),
.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
},
- .probe = tpm_tis_spi_probe,
+ .probe = tpm_tis_spi_driver_probe,
.remove = tpm_tis_spi_remove,
.id_table = tpm_tis_spi_id,
};
diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h
new file mode 100644
index 000000000000..bba73979c368
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ */
+
+#ifndef TPM_TIS_SPI_H
+#define TPM_TIS_SPI_H
+
+#include "tpm_tis_core.h"
+
+struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+ int (*flow_control)(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *xfer);
+ struct completion ready;
+ unsigned long wake_after;
+
+ u8 *iobuf;
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops);
+
+extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out);
+
+extern int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result);
+extern int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result);
+extern int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value);
+
+#ifdef CONFIG_TCG_TIS_SPI_CR50
+extern int cr50_spi_probe(struct spi_device *spi);
+#else
+static inline int cr50_spi_probe(struct spi_device *spi)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_TCG_TIS_SPI_CR50)
+extern int tpm_tis_spi_resume(struct device *dev);
+#else
+#define tpm_tis_spi_resume NULL
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
new file mode 100644
index 000000000000..37d72e818335
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This device driver implements a TCG PTP FIFO interface over SPI for chips
+ * with Cr50 firmware.
+ * It is based on tpm_tis_spi driver by Peter Huewe and Christophe Ricard.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+/*
+ * Cr50 timing constants:
+ * - can go to sleep not earlier than after CR50_SLEEP_DELAY_MSEC.
+ * - needs up to CR50_WAKE_START_DELAY_USEC to wake after sleep.
+ * - requires waiting for "ready" IRQ, if supported; or waiting for at least
+ * CR50_NOIRQ_ACCESS_DELAY_MSEC between transactions, if IRQ is not supported.
+ * - waits for up to CR50_FLOW_CONTROL for flow control 'ready' indication.
+ */
+#define CR50_SLEEP_DELAY_MSEC 1000
+#define CR50_WAKE_START_DELAY_USEC 1000
+#define CR50_NOIRQ_ACCESS_DELAY msecs_to_jiffies(2)
+#define CR50_READY_IRQ_TIMEOUT msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define CR50_FLOW_CONTROL msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define MAX_IRQ_CONFIRMATION_ATTEMPTS 3
+
+#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
+#define TPM_CR50_MAX_FW_VER_LEN 64
+
+struct cr50_spi_phy {
+ struct tpm_tis_spi_phy spi_phy;
+
+ struct mutex time_track_mutex;
+ unsigned long last_access;
+
+ unsigned long access_delay;
+
+ unsigned int irq_confirmation_attempt;
+ bool irq_needs_confirmation;
+ bool irq_confirmed;
+};
+
+static inline struct cr50_spi_phy *to_cr50_spi_phy(struct tpm_tis_spi_phy *phy)
+{
+ return container_of(phy, struct cr50_spi_phy, spi_phy);
+}
+
+/*
+ * The cr50 interrupt handler just signals waiting threads that the
+ * interrupt was asserted. It does not do any processing triggered
+ * by interrupts but is instead used to avoid fixed delays.
+ */
+static irqreturn_t cr50_spi_irq_handler(int dummy, void *dev_id)
+{
+ struct cr50_spi_phy *cr50_phy = dev_id;
+
+ cr50_phy->irq_confirmed = true;
+ complete(&cr50_phy->spi_phy.ready);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Cr50 needs to have at least some delay between consecutive
+ * transactions. Make sure we wait.
+ */
+static void cr50_ensure_access_delay(struct cr50_spi_phy *phy)
+{
+ unsigned long allowed_access = phy->last_access + phy->access_delay;
+ unsigned long time_now = jiffies;
+ struct device *dev = &phy->spi_phy.spi_device->dev;
+
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll have an unneeded short delay,
+ * which is fine.
+ */
+ if (time_in_range_open(time_now, phy->last_access, allowed_access)) {
+ unsigned long remaining, timeout = allowed_access - time_now;
+
+ remaining = wait_for_completion_timeout(&phy->spi_phy.ready,
+ timeout);
+ if (!remaining && phy->irq_confirmed)
+ dev_warn(dev, "Timeout waiting for TPM ready IRQ\n");
+ }
+
+ if (phy->irq_needs_confirmation) {
+ unsigned int attempt = ++phy->irq_confirmation_attempt;
+
+ if (phy->irq_confirmed) {
+ phy->irq_needs_confirmation = false;
+ phy->access_delay = CR50_READY_IRQ_TIMEOUT;
+ dev_info(dev, "TPM ready IRQ confirmed on attempt %u\n",
+ attempt);
+ } else if (attempt > MAX_IRQ_CONFIRMATION_ATTEMPTS) {
+ phy->irq_needs_confirmation = false;
+ dev_warn(dev, "IRQ not confirmed - will use delays\n");
+ }
+ }
+}
+
+/*
+ * Cr50 might go to sleep if there is no SPI activity for some time and
+ * miss the first few bits/bytes on the bus. In such case, wake it up
+ * by asserting CS and give it time to start up.
+ */
+static bool cr50_needs_waking(struct cr50_spi_phy *phy)
+{
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll probably timeout or read
+ * incorrect value from TPM_STS and just retry the operation.
+ */
+ return !time_in_range_open(jiffies, phy->last_access,
+ phy->spi_phy.wake_after);
+}
+
+static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
+{
+ struct tpm_tis_spi_phy *phy = &cr50_phy->spi_phy;
+
+ if (cr50_needs_waking(cr50_phy)) {
+ /* Assert CS, wait 1 msec, deassert CS */
+ struct spi_transfer spi_cs_wake = { .delay_usecs = 1000 };
+
+ spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
+ /* Wait for it to fully wake */
+ usleep_range(CR50_WAKE_START_DELAY_USEC,
+ CR50_WAKE_START_DELAY_USEC * 2);
+ }
+
+ /* Reset the time when we need to wake Cr50 again */
+ phy->wake_after = jiffies + msecs_to_jiffies(CR50_SLEEP_DELAY_MSEC);
+}
+
+/*
+ * Flow control: clock the bus and wait for cr50 to set LSB before
+ * sending/receiving data. TCG PTP spec allows it to happen during
+ * the last byte of header, but cr50 never does that in practice,
+ * and earlier versions had a bug when it was set too early, so don't
+ * check for it during header transfer.
+ */
+static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct device *dev = &phy->spi_device->dev;
+ unsigned long timeout = jiffies + CR50_FLOW_CONTROL;
+ struct spi_message m;
+ int ret;
+
+ spi_xfer->len = 1;
+
+ do {
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout during flow control\n");
+ return -EBUSY;
+ }
+ } while (!(phy->iobuf[0] & 0x01));
+
+ return 0;
+}
+
+static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct cr50_spi_phy *cr50_phy = to_cr50_spi_phy(phy);
+ int ret;
+
+ mutex_lock(&cr50_phy->time_track_mutex);
+ /*
+ * Do this outside of spi_bus_lock in case cr50 is not the
+ * only device on that spi bus.
+ */
+ cr50_ensure_access_delay(cr50_phy);
+ cr50_wake_if_needed(cr50_phy);
+
+ ret = tpm_tis_spi_transfer(data, addr, len, in, out);
+
+ cr50_phy->last_access = jiffies;
+ mutex_unlock(&cr50_phy->time_track_mutex);
+
+ return ret;
+}
+
+static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = {
+ .read_bytes = tpm_tis_spi_cr50_read_bytes,
+ .write_bytes = tpm_tis_spi_cr50_write_bytes,
+ .read16 = tpm_tis_spi_read16,
+ .read32 = tpm_tis_spi_read32,
+ .write32 = tpm_tis_spi_write32,
+};
+
+static void cr50_print_fw_version(struct tpm_tis_data *data)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int i, len = 0;
+ char fw_ver[TPM_CR50_MAX_FW_VER_LEN + 1];
+ char fw_ver_block[4];
+
+ /*
+ * Write anything to TPM_CR50_FW_VER to start from the beginning
+ * of the version string
+ */
+ tpm_tis_write8(data, TPM_CR50_FW_VER(data->locality), 0);
+
+ /* Read the string, 4 bytes at a time, until we get '\0' */
+ do {
+ tpm_tis_read_bytes(data, TPM_CR50_FW_VER(data->locality), 4,
+ fw_ver_block);
+ for (i = 0; i < 4 && fw_ver_block[i]; ++len, ++i)
+ fw_ver[len] = fw_ver_block[i];
+ } while (i == 4 && len < TPM_CR50_MAX_FW_VER_LEN);
+ fw_ver[len] = '\0';
+
+ dev_info(&phy->spi_device->dev, "Cr50 firmware version: %s\n", fw_ver);
+}
+
+int cr50_spi_probe(struct spi_device *spi)
+{
+ struct tpm_tis_spi_phy *phy;
+ struct cr50_spi_phy *cr50_phy;
+ int ret;
+ struct tpm_chip *chip;
+
+ cr50_phy = devm_kzalloc(&spi->dev, sizeof(*cr50_phy), GFP_KERNEL);
+ if (!cr50_phy)
+ return -ENOMEM;
+
+ phy = &cr50_phy->spi_phy;
+ phy->flow_control = cr50_spi_flow_control;
+ phy->wake_after = jiffies;
+ init_completion(&phy->ready);
+
+ cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
+ cr50_phy->last_access = jiffies;
+ mutex_init(&cr50_phy->time_track_mutex);
+
+ if (spi->irq > 0) {
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ cr50_spi_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "cr50_spi", cr50_phy);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&spi->dev, "Requesting IRQ %d failed: %d\n",
+ spi->irq, ret);
+ /*
+ * This is not fatal, the driver will fall back to
+ * delays automatically, since ready will never
+ * be completed without a registered irq handler.
+ * So, just fall through.
+ */
+ } else {
+ /*
+ * IRQ requested, let's verify that it is actually
+ * triggered, before relying on it.
+ */
+ cr50_phy->irq_needs_confirmation = true;
+ }
+ } else {
+ dev_warn(&spi->dev,
+ "No IRQ - will use delays between transactions.\n");
+ }
+
+ ret = tpm_tis_spi_init(spi, phy, -1, &tpm_spi_cr50_phy_ops);
+ if (ret)
+ return ret;
+
+ cr50_print_fw_version(&phy->priv);
+
+ chip = dev_get_drvdata(&spi->dev);
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_spi_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ /*
+ * Jiffies not increased during suspend, so we need to reset
+ * the time to wake Cr50 after resume.
+ */
+ phy->wake_after = jiffies;
+
+ return tpm_tis_resume(dev);
+}
+#endif
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 7270e7b69262..3259426f01dc 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols;
}
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{
struct port_buffer *buf;
- unsigned int nr_added_bufs;
+ int nr_added_bufs;
int ret;
nr_added_bufs = 0;
do {
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf)
- break;
+ return -ENOMEM;
spin_lock_irq(lock);
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
free_buf(buf, true);
- break;
+ return ret;
}
nr_added_bufs++;
spin_unlock_irq(lock);
@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16];
struct port *port;
dev_t devt;
- unsigned int nr_added_bufs;
int err;
port = kmalloc(sizeof(*port), GFP_KERNEL);
@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue);
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
+ /* We can safely ignore ENOSPC because it means
+ * the queue already has buffers. Buffers are removed
+ * only by virtcons_remove(), not by unplug_port()
+ */
+ err = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
goto free_device;
}
@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
- unsigned int nr_added_bufs;
-
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
- nr_added_bufs = fill_queue(portdev->c_ivq,
- &portdev->c_ivq_lock);
- if (!nr_added_bufs) {
+ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ if (err < 0) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
/*
@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev)
VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */
virtcons_remove(vdev);
- return -ENOMEM;
+ return err;
}
} else {
/*
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index bfafd8f5e826..96b6de8a30e5 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -116,7 +116,6 @@ static int xilly_drv_probe(struct platform_device *op)
struct xilly_endpoint *endpoint;
int rc;
int irq;
- struct resource *res;
struct xilly_endpoint_hardware *ephw = &of_hw;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
@@ -129,9 +128,7 @@ static int xilly_drv_probe(struct platform_device *op)
dev_set_drvdata(dev, endpoint);
- res = platform_get_resource(op, IORESOURCE_MEM, 0);
- endpoint->registers = devm_ioremap_resource(dev, res);
-
+ endpoint->registers = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 2317d4e3daaf..287d8d58c21a 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -17,6 +17,7 @@
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/mm.h>
+#include <linux/cpuhotplug.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -30,6 +31,15 @@ static u64 hv_sched_clock_offset __ro_after_init;
* mechanism is used when running on older versions of Hyper-V
* that don't support Direct Mode. While Hyper-V provides
* four stimer's per CPU, Linux uses only stimer0.
+ *
+ * Because Direct Mode does not require processing a VMbus
+ * message, stimer interrupts can be enabled earlier in the
+ * process of booting a CPU, and consistent with when timer
+ * interrupts are enabled for other clocksource drivers.
+ * However, for legacy versions of Hyper-V when Direct Mode
+ * is not enabled, setting up stimer interrupts must be
+ * delayed until VMbus is initialized and can process the
+ * interrupt message.
*/
static bool direct_mode_enabled;
@@ -102,17 +112,12 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
/*
* hv_stimer_init - Per-cpu initialization of the clockevent
*/
-void hv_stimer_init(unsigned int cpu)
+static int hv_stimer_init(unsigned int cpu)
{
struct clock_event_device *ce;
- /*
- * Synthetic timers are always available except on old versions of
- * Hyper-V on x86. In that case, just return as Linux will use a
- * clocksource based on emulated PIT or LAPIC timer hardware.
- */
- if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
- return;
+ if (!hv_clock_event)
+ return 0;
ce = per_cpu_ptr(hv_clock_event, cpu);
ce->name = "Hyper-V clockevent";
@@ -127,28 +132,55 @@ void hv_stimer_init(unsigned int cpu)
HV_CLOCK_HZ,
HV_MIN_DELTA_TICKS,
HV_MAX_MAX_DELTA_TICKS);
+ return 0;
}
-EXPORT_SYMBOL_GPL(hv_stimer_init);
/*
* hv_stimer_cleanup - Per-cpu cleanup of the clockevent
*/
-void hv_stimer_cleanup(unsigned int cpu)
+int hv_stimer_cleanup(unsigned int cpu)
{
struct clock_event_device *ce;
- /* Turn off clockevent device */
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
- ce = per_cpu_ptr(hv_clock_event, cpu);
+ if (!hv_clock_event)
+ return 0;
+
+ /*
+ * In the legacy case where Direct Mode is not enabled
+ * (which can only be on x86/64), stimer cleanup happens
+ * relatively early in the CPU offlining process. We
+ * must unbind the stimer-based clockevent device so
+ * that the LAPIC timer can take over until clockevents
+ * are no longer needed in the offlining process. Note
+ * that clockevents_unbind_device() eventually calls
+ * hv_ce_shutdown().
+ *
+ * The unbind should not be done when Direct Mode is
+ * enabled because we may be on an architecture where
+ * there are no other clockevent devices to fallback to.
+ */
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ if (direct_mode_enabled)
hv_ce_shutdown(ce);
- }
+ else
+ clockevents_unbind_device(ce, cpu);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
-int hv_stimer_alloc(int sint)
+int hv_stimer_alloc(void)
{
- int ret;
+ int ret = 0;
+
+ /*
+ * Synthetic timers are always available except on old versions of
+ * Hyper-V on x86. In that case, return as error as Linux will use a
+ * clockevent based on emulated LAPIC timer hardware.
+ */
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
+ return -EINVAL;
hv_clock_event = alloc_percpu(struct clock_event_device);
if (!hv_clock_event)
@@ -159,22 +191,78 @@ int hv_stimer_alloc(int sint)
if (direct_mode_enabled) {
ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
hv_stimer0_isr);
- if (ret) {
- free_percpu(hv_clock_event);
- hv_clock_event = NULL;
- return ret;
- }
+ if (ret)
+ goto free_percpu;
+
+ /*
+ * Since we are in Direct Mode, stimer initialization
+ * can be done now with a CPUHP value in the same range
+ * as other clockevent devices.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
+ "clockevents/hyperv/stimer:starting",
+ hv_stimer_init, hv_stimer_cleanup);
+ if (ret < 0)
+ goto free_stimer0_irq;
}
+ return ret;
- stimer0_message_sint = sint;
- return 0;
+free_stimer0_irq:
+ hv_remove_stimer0_irq(stimer0_irq);
+ stimer0_irq = 0;
+free_percpu:
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+ return ret;
}
EXPORT_SYMBOL_GPL(hv_stimer_alloc);
+/*
+ * hv_stimer_legacy_init -- Called from the VMbus driver to handle
+ * the case when Direct Mode is not enabled, and the stimer
+ * must be initialized late in the CPU onlining process.
+ *
+ */
+void hv_stimer_legacy_init(unsigned int cpu, int sint)
+{
+ if (direct_mode_enabled)
+ return;
+
+ /*
+ * This function gets called by each vCPU, so setting the
+ * global stimer_message_sint value each time is conceptually
+ * not ideal, but the value passed in is always the same and
+ * it avoids introducing yet another interface into this
+ * clocksource driver just to set the sint in the legacy case.
+ */
+ stimer0_message_sint = sint;
+ (void)hv_stimer_init(cpu);
+}
+EXPORT_SYMBOL_GPL(hv_stimer_legacy_init);
+
+/*
+ * hv_stimer_legacy_cleanup -- Called from the VMbus driver to
+ * handle the case when Direct Mode is not enabled, and the
+ * stimer must be cleaned up early in the CPU offlining
+ * process.
+ */
+void hv_stimer_legacy_cleanup(unsigned int cpu)
+{
+ if (direct_mode_enabled)
+ return;
+ (void)hv_stimer_cleanup(cpu);
+}
+EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
+
+
/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
void hv_stimer_free(void)
{
- if (direct_mode_enabled && (stimer0_irq != 0)) {
+ if (!hv_clock_event)
+ return;
+
+ if (direct_mode_enabled) {
+ cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
hv_remove_stimer0_irq(stimer0_irq);
stimer0_irq = 0;
}
@@ -190,14 +278,20 @@ EXPORT_SYMBOL_GPL(hv_stimer_free);
void hv_stimer_global_cleanup(void)
{
int cpu;
- struct clock_event_device *ce;
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
- for_each_present_cpu(cpu) {
- ce = per_cpu_ptr(hv_clock_event, cpu);
- clockevents_unbind_device(ce, cpu);
- }
+ /*
+ * hv_stime_legacy_cleanup() will stop the stimer if Direct
+ * Mode is not enabled, and fallback to the LAPIC timer.
+ */
+ for_each_present_cpu(cpu) {
+ hv_stimer_legacy_cleanup(cpu);
}
+
+ /*
+ * If Direct Mode is enabled, the cpuhp teardown callback
+ * (hv_stimer_cleanup) will be run on all CPUs to stop the
+ * stimers.
+ */
hv_stimer_free();
}
EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 895f53eb5771..dae1b2b5a0c5 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -430,8 +430,7 @@ static int __init samsung_pwm_alloc(struct device_node *np,
of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
if (val >= SAMSUNG_PWM_NUM) {
- pr_warning("%s: invalid channel index in samsung,pwm-outputs property\n",
- __func__);
+ pr_warn("%s: invalid channel index in samsung,pwm-outputs property\n", __func__);
continue;
}
pwm.variant.output_mask |= 1 << val;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a905796f7f85..3858d86cf409 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -49,14 +49,6 @@ config ARM_ARMADA_8K_CPUFREQ
If in doubt, say N.
-# big LITTLE core layer and glue drivers
-config ARM_BIG_LITTLE_CPUFREQ
- tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- select PM_OPP
- help
- This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
-
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
@@ -69,7 +61,9 @@ config ARM_SCPI_CPUFREQ
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ depends on ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on ARCH_VEXPRESS_SPC
+ select PM_OPP
help
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9a9f5ccd13d9..f6670c4abbb0 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -47,8 +47,6 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
-
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
deleted file mode 100644
index 7fe52fcddcf1..000000000000
--- a/drivers/cpufreq/arm_big_little.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * ARM big.LITTLE Platforms CPUFreq support
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/topology.h>
-#include <linux/types.h>
-
-#include "arm_big_little.h"
-
-/* Currently we support only two clusters */
-#define A15_CLUSTER 0
-#define A7_CLUSTER 1
-#define MAX_CLUSTERS 2
-
-#ifdef CONFIG_BL_SWITCHER
-#include <asm/bL_switcher.h>
-static bool bL_switching_enabled;
-#define is_bL_switching_enabled() bL_switching_enabled
-#define set_switching_enabled(x) (bL_switching_enabled = (x))
-#else
-#define is_bL_switching_enabled() false
-#define set_switching_enabled(x) do { } while (0)
-#define bL_switch_request(...) do { } while (0)
-#define bL_switcher_put_enabled() do { } while (0)
-#define bL_switcher_get_enabled() do { } while (0)
-#endif
-
-#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
-#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
-static const struct cpufreq_arm_bL_ops *arm_bL_ops;
-static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
-static atomic_t cluster_usage[MAX_CLUSTERS + 1];
-
-static unsigned int clk_big_min; /* (Big) clock frequencies */
-static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
-
-static DEFINE_PER_CPU(unsigned int, physical_cluster);
-static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-
-static struct mutex cluster_lock[MAX_CLUSTERS];
-
-static inline int raw_cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
-static inline int cpu_to_cluster(int cpu)
-{
- return is_bL_switching_enabled() ?
- MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
-}
-
-static unsigned int find_cluster_maxfreq(int cluster)
-{
- int j;
- u32 max_freq = 0, cpu_freq;
-
- for_each_online_cpu(j) {
- cpu_freq = per_cpu(cpu_last_req_freq, j);
-
- if ((cluster == per_cpu(physical_cluster, j)) &&
- (max_freq < cpu_freq))
- max_freq = cpu_freq;
- }
-
- pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
- max_freq);
-
- return max_freq;
-}
-
-static unsigned int clk_get_cpu_rate(unsigned int cpu)
-{
- u32 cur_cluster = per_cpu(physical_cluster, cpu);
- u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
-
- /* For switcher we use virtual A7 clock rates */
- if (is_bL_switching_enabled())
- rate = VIRT_FREQ(cur_cluster, rate);
-
- pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
- cur_cluster, rate);
-
- return rate;
-}
-
-static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
-{
- if (is_bL_switching_enabled()) {
- pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
- cpu));
-
- return per_cpu(cpu_last_req_freq, cpu);
- } else {
- return clk_get_cpu_rate(cpu);
- }
-}
-
-static unsigned int
-bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
-{
- u32 new_rate, prev_rate;
- int ret;
- bool bLs = is_bL_switching_enabled();
-
- mutex_lock(&cluster_lock[new_cluster]);
-
- if (bLs) {
- prev_rate = per_cpu(cpu_last_req_freq, cpu);
- per_cpu(cpu_last_req_freq, cpu) = rate;
- per_cpu(physical_cluster, cpu) = new_cluster;
-
- new_rate = find_cluster_maxfreq(new_cluster);
- new_rate = ACTUAL_FREQ(new_cluster, new_rate);
- } else {
- new_rate = rate;
- }
-
- pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
- __func__, cpu, old_cluster, new_cluster, new_rate);
-
- ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
- if (!ret) {
- /*
- * FIXME: clk_set_rate hasn't returned an error here however it
- * may be that clk_change_rate failed due to hardware or
- * firmware issues and wasn't able to report that due to the
- * current design of the clk core layer. To work around this
- * problem we will read back the clock rate and check it is
- * correct. This needs to be removed once clk core is fixed.
- */
- if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
- ret = -EIO;
- }
-
- if (WARN_ON(ret)) {
- pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
- new_cluster);
- if (bLs) {
- per_cpu(cpu_last_req_freq, cpu) = prev_rate;
- per_cpu(physical_cluster, cpu) = old_cluster;
- }
-
- mutex_unlock(&cluster_lock[new_cluster]);
-
- return ret;
- }
-
- mutex_unlock(&cluster_lock[new_cluster]);
-
- /* Recalc freq for old cluster when switching clusters */
- if (old_cluster != new_cluster) {
- pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
- __func__, cpu, old_cluster, new_cluster);
-
- /* Switch cluster */
- bL_switch_request(cpu, new_cluster);
-
- mutex_lock(&cluster_lock[old_cluster]);
-
- /* Set freq of old cluster if there are cpus left on it */
- new_rate = find_cluster_maxfreq(old_cluster);
- new_rate = ACTUAL_FREQ(old_cluster, new_rate);
-
- if (new_rate) {
- pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
- __func__, old_cluster, new_rate);
-
- if (clk_set_rate(clk[old_cluster], new_rate * 1000))
- pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
- __func__, ret, old_cluster);
- }
- mutex_unlock(&cluster_lock[old_cluster]);
- }
-
- return 0;
-}
-
-/* Set clock frequency */
-static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
- unsigned int freqs_new;
- int ret;
-
- cur_cluster = cpu_to_cluster(cpu);
- new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
-
- freqs_new = freq_table[cur_cluster][index].frequency;
-
- if (is_bL_switching_enabled()) {
- if ((actual_cluster == A15_CLUSTER) &&
- (freqs_new < clk_big_min)) {
- new_cluster = A7_CLUSTER;
- } else if ((actual_cluster == A7_CLUSTER) &&
- (freqs_new > clk_little_max)) {
- new_cluster = A15_CLUSTER;
- }
- }
-
- ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
-
- if (!ret) {
- arch_set_freq_scale(policy->related_cpus, freqs_new,
- policy->cpuinfo.max_freq);
- }
-
- return ret;
-}
-
-static inline u32 get_table_count(struct cpufreq_frequency_table *table)
-{
- int count;
-
- for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
- ;
-
- return count;
-}
-
-/* get the minimum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_min(struct cpufreq_frequency_table *table)
-{
- struct cpufreq_frequency_table *pos;
- uint32_t min_freq = ~0;
- cpufreq_for_each_entry(pos, table)
- if (pos->frequency < min_freq)
- min_freq = pos->frequency;
- return min_freq;
-}
-
-/* get the maximum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_max(struct cpufreq_frequency_table *table)
-{
- struct cpufreq_frequency_table *pos;
- uint32_t max_freq = 0;
- cpufreq_for_each_entry(pos, table)
- if (pos->frequency > max_freq)
- max_freq = pos->frequency;
- return max_freq;
-}
-
-static int merge_cluster_tables(void)
-{
- int i, j, k = 0, count = 1;
- struct cpufreq_frequency_table *table;
-
- for (i = 0; i < MAX_CLUSTERS; i++)
- count += get_table_count(freq_table[i]);
-
- table = kcalloc(count, sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- freq_table[MAX_CLUSTERS] = table;
-
- /* Add in reverse order to get freqs in increasing order */
- for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
- for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
- j++) {
- table[k].frequency = VIRT_FREQ(i,
- freq_table[i][j].frequency);
- pr_debug("%s: index: %d, freq: %d\n", __func__, k,
- table[k].frequency);
- k++;
- }
- }
-
- table[k].driver_data = k;
- table[k].frequency = CPUFREQ_TABLE_END;
-
- pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
-
- return 0;
-}
-
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
-
- if (!freq_table[cluster])
- return;
-
- clk_put(clk[cluster]);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
- if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpumask);
- dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
-}
-
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
- int i;
-
- if (atomic_dec_return(&cluster_usage[cluster]))
- return;
-
- if (cluster < MAX_CLUSTERS)
- return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
-
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return;
- }
-
- _put_cluster_clk_and_freq_table(cdev, cpumask);
- }
-
- /* free virtual table */
- kfree(freq_table[cluster]);
-}
-
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
- int ret;
-
- if (freq_table[cluster])
- return 0;
-
- ret = arm_bL_ops->init_opp_table(cpumask);
- if (ret) {
- dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
- __func__, cpu_dev->id, ret);
- goto out;
- }
-
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
- if (ret) {
- dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
- __func__, cpu_dev->id, ret);
- goto free_opp_table;
- }
-
- clk[cluster] = clk_get(cpu_dev, NULL);
- if (!IS_ERR(clk[cluster])) {
- dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
- __func__, clk[cluster], freq_table[cluster],
- cluster);
- return 0;
- }
-
- dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
- __func__, cpu_dev->id, cluster);
- ret = PTR_ERR(clk[cluster]);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-
-free_opp_table:
- if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpumask);
-out:
- dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
- cluster);
- return ret;
-}
-
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
- int i, ret;
-
- if (atomic_inc_return(&cluster_usage[cluster]) != 1)
- return 0;
-
- if (cluster < MAX_CLUSTERS) {
- ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
- if (ret)
- atomic_dec(&cluster_usage[cluster]);
- return ret;
- }
-
- /*
- * Get data for all clusters and fill virtual cluster with a merge of
- * both
- */
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return -ENODEV;
- }
-
- ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
- if (ret)
- goto put_clusters;
- }
-
- ret = merge_cluster_tables();
- if (ret)
- goto put_clusters;
-
- /* Assuming 2 cluster, set clk_big_min and clk_little_max */
- clk_big_min = get_table_min(freq_table[0]);
- clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
-
- pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
- __func__, cluster, clk_big_min, clk_little_max);
-
- return 0;
-
-put_clusters:
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return -ENODEV;
- }
-
- _put_cluster_clk_and_freq_table(cdev, cpumask);
- }
-
- atomic_dec(&cluster_usage[cluster]);
-
- return ret;
-}
-
-/* Per-CPU initialization */
-static int bL_cpufreq_init(struct cpufreq_policy *policy)
-{
- u32 cur_cluster = cpu_to_cluster(policy->cpu);
- struct device *cpu_dev;
- int ret;
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- if (cur_cluster < MAX_CLUSTERS) {
- int cpu;
-
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
-
- for_each_cpu(cpu, policy->cpus)
- per_cpu(physical_cluster, cpu) = cur_cluster;
- } else {
- /* Assumption: during init, we are always running on A15 */
- per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
- }
-
- ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
- if (ret)
- return ret;
-
- policy->freq_table = freq_table[cur_cluster];
- policy->cpuinfo.transition_latency =
- arm_bL_ops->get_transition_latency(cpu_dev);
-
- dev_pm_opp_of_register_em(policy->cpus);
-
- if (is_bL_switching_enabled())
- per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
-
- dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
- return 0;
-}
-
-static int bL_cpufreq_exit(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev;
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- if (cur_cluster < MAX_CLUSTERS) {
- cpufreq_cooling_unregister(cdev[cur_cluster]);
- cdev[cur_cluster] = NULL;
- }
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
- dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
-
- return 0;
-}
-
-static void bL_cpufreq_ready(struct cpufreq_policy *policy)
-{
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- /* Do not register a cpu_cooling device if we are in IKS mode */
- if (cur_cluster >= MAX_CLUSTERS)
- return;
-
- cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
-static struct cpufreq_driver bL_cpufreq_driver = {
- .name = "arm-big-little",
- .flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = bL_cpufreq_set_target,
- .get = bL_cpufreq_get_rate,
- .init = bL_cpufreq_init,
- .exit = bL_cpufreq_exit,
- .ready = bL_cpufreq_ready,
- .attr = cpufreq_generic_attr,
-};
-
-#ifdef CONFIG_BL_SWITCHER
-static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
- unsigned long action, void *_arg)
-{
- pr_debug("%s: action: %ld\n", __func__, action);
-
- switch (action) {
- case BL_NOTIFY_PRE_ENABLE:
- case BL_NOTIFY_PRE_DISABLE:
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- break;
-
- case BL_NOTIFY_POST_ENABLE:
- set_switching_enabled(true);
- cpufreq_register_driver(&bL_cpufreq_driver);
- break;
-
- case BL_NOTIFY_POST_DISABLE:
- set_switching_enabled(false);
- cpufreq_register_driver(&bL_cpufreq_driver);
- break;
-
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block bL_switcher_notifier = {
- .notifier_call = bL_cpufreq_switcher_notifier,
-};
-
-static int __bLs_register_notifier(void)
-{
- return bL_switcher_register_notifier(&bL_switcher_notifier);
-}
-
-static int __bLs_unregister_notifier(void)
-{
- return bL_switcher_unregister_notifier(&bL_switcher_notifier);
-}
-#else
-static int __bLs_register_notifier(void) { return 0; }
-static int __bLs_unregister_notifier(void) { return 0; }
-#endif
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
-{
- int ret, i;
-
- if (arm_bL_ops) {
- pr_debug("%s: Already registered: %s, exiting\n", __func__,
- arm_bL_ops->name);
- return -EBUSY;
- }
-
- if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
- !ops->get_transition_latency) {
- pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
- return -ENODEV;
- }
-
- arm_bL_ops = ops;
-
- set_switching_enabled(bL_switcher_get_enabled());
-
- for (i = 0; i < MAX_CLUSTERS; i++)
- mutex_init(&cluster_lock[i]);
-
- ret = cpufreq_register_driver(&bL_cpufreq_driver);
- if (ret) {
- pr_info("%s: Failed registering platform driver: %s, err: %d\n",
- __func__, ops->name, ret);
- arm_bL_ops = NULL;
- } else {
- ret = __bLs_register_notifier();
- if (ret) {
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- arm_bL_ops = NULL;
- } else {
- pr_info("%s: Registered platform driver: %s\n",
- __func__, ops->name);
- }
- }
-
- bL_switcher_put_enabled();
- return ret;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_register);
-
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
-{
- if (arm_bL_ops != ops) {
- pr_err("%s: Registered with: %s, can't unregister, exiting\n",
- __func__, arm_bL_ops->name);
- return;
- }
-
- bL_switcher_get_enabled();
- __bLs_unregister_notifier();
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- bL_switcher_put_enabled();
- pr_info("%s: Un-registered platform driver: %s\n", __func__,
- arm_bL_ops->name);
- arm_bL_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
deleted file mode 100644
index 88a176e466c8..000000000000
--- a/drivers/cpufreq/arm_big_little.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * ARM big.LITTLE platform's CPUFreq header file
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef CPUFREQ_ARM_BIG_LITTLE_H
-#define CPUFREQ_ARM_BIG_LITTLE_H
-
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct cpufreq_arm_bL_ops {
- char name[CPUFREQ_NAME_LEN];
-
- /*
- * This must set opp table for cpu_dev in a similar way as done by
- * dev_pm_opp_of_add_table().
- */
- int (*init_opp_table)(const struct cpumask *cpumask);
-
- /* Optional */
- int (*get_transition_latency)(struct device *cpu_dev);
- void (*free_opp_table)(const struct cpumask *cpumask);
-};
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
-
-#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bca8d1f47fd2..54bc76743b1f 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -86,7 +86,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "ti,omap2", },
- { .compatible = "ti,omap3", },
{ .compatible = "ti,omap4", },
{ .compatible = "ti,omap5", },
@@ -137,6 +136,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
+ { .compatible = "ti,omap3", },
{ }
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 48a224a6b178..77114a3897fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -113,18 +113,21 @@ EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- u64 idle_time;
+ struct kernel_cpustat kcpustat;
u64 cur_wall_time;
+ u64 idle_time;
u64 busy_time;
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+ kcpustat_cpu_fetch(&kcpustat, cpu);
+
+ busy_time = kcpustat.cpustat[CPUTIME_USER];
+ busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat.cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat.cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat.cpustat[CPUTIME_NICE];
idle_time = cur_wall_time - busy_time;
if (wall)
@@ -933,6 +936,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
+ if (!fattr->show)
+ return -EIO;
+
down_read(&policy->rwsem);
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
@@ -947,6 +953,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+ if (!fattr->store)
+ return -EIO;
+
/*
* cpus_read_trylock() is used here to work around a circular lock
* dependency problem with respect to the cpufreq_register_driver().
@@ -2385,7 +2394,10 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
- /* verify the cpu speed can be set within this limit */
+ /*
+ * Verify that the CPU speed can be set within these limits and make sure
+ * that min <= max.
+ */
ret = cpufreq_driver->verify(new_policy);
if (ret)
return ret;
@@ -2628,6 +2640,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (cpufreq_disabled())
return -ENODEV;
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 4bb054d0cb43..f99ae45efaea 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -105,7 +105,7 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
dbs_data->io_is_busy);
if (dbs_data->ignore_nice_load)
- j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
}
}
}
@@ -149,7 +149,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
- u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
j_cdbs->prev_cpu_nice = cur_nice;
@@ -530,7 +530,7 @@ int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
j_cdbs->prev_load = 0;
if (ignore_nice)
- j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
}
gov->start(policy);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 35db14cf3102..85a6efd6b68f 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
- * Early samples without fuses written report "0 0" which means
- * consumer segment and minimum speed grading.
- *
- * According to datasheet minimum speed grading is not supported for
- * consumer parts so clamp to 1 to avoid warning for "no OPPs"
+ * Early samples without fuses written report "0 0" which may NOT
+ * match any OPP defined in DT. So clamp to minimum OPP defined in
+ * DT to avoid warning for "no OPPs".
*
* Applies to i.MX8M series SoCs.
*/
- if (mkt_segment == 0 && speed_grade == 0 && (
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mq")))
- speed_grade = 1;
+ if (mkt_segment == 0 && speed_grade == 0) {
+ if (of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mq"))
+ speed_grade = 1;
+ if (of_machine_is_compatible("fsl,imx8mn"))
+ speed_grade = 0xb;
+ }
supported_hw[0] = BIT(speed_grade);
supported_hw[1] = BIT(mkt_segment);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8ab31702cf6a..d2fa3e9ccd97 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2662,21 +2662,21 @@ enum {
/* Hardware vendor-specific info that has its own power management modes */
static struct acpi_platform_list plat_info[] __initdata = {
- {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
- {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
+ {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{ } /* End */
};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 890813e0bb76..e9caa9586982 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -23,7 +23,7 @@
#include <asm/clock.h>
#include <asm/idle.h>
-#include <asm/mach-loongson64/loongson.h>
+#include <asm/mach-loongson2ef/loongson.h>
static uint nowait;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6061850e59c9..56f4bc0d209e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -1041,9 +1041,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
static int init_chip_info(void)
{
- unsigned int chip[256];
+ unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
+ int ret = 0;
+
+ chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
@@ -1055,8 +1060,10 @@ static int init_chip_info(void)
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
+ if (!chips) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
@@ -1066,7 +1073,9 @@ static int init_chip_info(void)
per_cpu(chip_info, cpu) = &chips[i];
}
- return 0;
+free_and_return:
+ kfree(chip);
+ return ret;
}
static inline void clean_chip_info(void)
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index af0c00dabb22..c6bdfc308e99 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -19,7 +19,6 @@
static struct regulator *vddarm;
static unsigned long regulator_latency;
-#ifdef CONFIG_CPU_S3C6410
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
@@ -48,7 +47,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 4, 800000 },
{ 0, 0, CPUFREQ_TABLE_END },
};
-#endif
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
@@ -149,11 +147,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -EINVAL;
- if (s3c64xx_freq_table == NULL) {
- pr_err("No frequency information for this CPU\n");
- return -ENODEV;
- }
-
policy->clk = clk_get(NULL, "armclk");
if (IS_ERR(policy->clk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2b51e0718c9f..20d1f85d5f5a 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -1,8 +1,6 @@
/*
* System Control and Power Interface (SCPI) based CPUFreq Interface driver
*
- * It provides necessary ops to arm_big_little cpufreq driver.
- *
* Copyright (C) 2015 ARM Ltd.
* Sudeep Holla <sudeep.holla@arm.com>
*
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index eca32e443716..9907a165135b 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -25,7 +25,7 @@
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
/**
- * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
* @versions: Set to the value parsed from efuse
*
* Returns 0 if success.
@@ -69,21 +69,16 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return PTR_ERR(speedbin);
efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
- switch (efuse_value) {
- case 0b0001:
- *versions = 1;
- break;
- case 0b0011:
- *versions = 2;
- break;
- default:
- /*
- * For other situations, we treat it as bin0.
- * This vf table can be run for any good cpu.
- */
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ *versions = efuse_value - 1;
+ else
*versions = 0;
- break;
- }
kfree(speedbin);
return 0;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index aeaa883a8c9d..557cb513bf7f 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -31,11 +31,17 @@
#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C
+#define OMAP3_CONTROL_IDCODE 0x4830A204
+#define OMAP34xx_ProdID_SKUID 0x4830A20C
+#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
struct ti_cpufreq_soc_data {
+ const char * const *reg_names;
unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
unsigned long efuse);
unsigned long efuse_fallback;
@@ -85,6 +91,13 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
return calculated_efuse;
}
+static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ /* OPP enable bit ("Speed Binned") */
+ return BIT(efuse);
+}
+
static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_xlate = amx3_efuse_xlate,
.efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -112,6 +125,74 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.multi_regulator = true,
};
+/*
+ * OMAP35x TRM (SPRUF98K):
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control OMAP Status Register 15:0 (Address 0x4800 244C)
+ * to separate between omap3503, omap3515, omap3525, omap3530
+ * and feature presence.
+ * There are encodings for versions limited to 400/266MHz
+ * but we ignore.
+ * Not clear if this also holds for omap34xx.
+ * some eFuse values e.g. CONTROL_FUSE_OPP1_VDD1
+ * are stored in the SYSCON register range
+ * Register 0x4830A20C [ProdID.SKUID] [0:3]
+ * 0x0 for normal 600/430MHz device.
+ * 0x8 for 720/520MHz device.
+ * Not clear what omap34xx value is.
+ */
+
+static struct ti_cpufreq_soc_data omap34xx_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP34xx_ProdID_SKUID - OMAP3_SYSCON_BASE,
+ .efuse_shift = 3,
+ .efuse_mask = BIT(3),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+/*
+ * AM/DM37x TRM (SPRUGN4M)
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control Device Status Register 15:0 (Address 0x4800 244C)
+ * to separate between am3703, am3715, dm3725, dm3730
+ * and feature presence.
+ * Speed Binned = Bit 9
+ * 0 800/600 MHz
+ * 1 1000/800 MHz
+ * some eFuse values e.g. CONTROL_FUSE_OPP 1G_VDD1
+ * are stored in the SYSCON register range.
+ * There is no 0x4830A20C [ProdID.SKUID] register (exists but
+ * seems to always read as 0).
+ */
+
+static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+
+static struct ti_cpufreq_soc_data omap36xx_soc_data = {
+ .reg_names = omap3_reg_names,
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 9,
+ .efuse_mask = BIT(9),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = true,
+};
+
+/*
+ * AM3517 is quite similar to AM/DM37x except that it has no
+ * high speed grade eFuse and no abb ldo
+ */
+
+static struct ti_cpufreq_soc_data am3517_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 0,
+ .efuse_mask = 0,
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
* @opp_data: pointer to ti_cpufreq_data context
@@ -128,7 +209,17 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
- if (ret) {
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->efuse_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ efuse = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
dev_err(dev,
"Failed to read the efuse value from syscon: %d\n",
ret);
@@ -159,7 +250,17 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
- if (ret) {
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->rev_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ revision = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
dev_err(dev,
"Failed to read the revision number from syscon: %d\n",
ret);
@@ -189,8 +290,14 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+ { .compatible = "ti,am3517", .data = &am3517_soc_data, },
{ .compatible = "ti,am43", .data = &am4x_soc_data, },
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
+ { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ /* legacy */
+ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
{},
};
@@ -212,7 +319,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb"};
int ret;
match = dev_get_platdata(&pdev->dev);
@@ -268,9 +375,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
opp_data->opp_table = ti_opp_table;
if (opp_data->soc_data->multi_regulator) {
+ const char * const *reg_names = default_reg_names;
+
+ if (opp_data->soc_data->reg_names)
+ reg_names = opp_data->soc_data->reg_names;
ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
reg_names,
- ARRAY_SIZE(reg_names));
+ ARRAY_SIZE(default_reg_names));
if (IS_ERR(ti_opp_table)) {
dev_pm_opp_put_supported_hw(opp_data->opp_table);
ret = PTR_ERR(ti_opp_table);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 53237289e606..506e3f2bf53a 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -1,61 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Versatile Express SPC CPUFreq Interface driver
*
- * It provides necessary ops to arm_big_little cpufreq driver.
+ * Copyright (C) 2013 - 2019 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
*
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#define bL_switch_request(...) do { } while (0)
+#define bL_switcher_put_enabled() do { } while (0)
+#define bL_switcher_get_enabled() do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if (cluster == per_cpu(physical_cluster, j) &&
+ max_freq < cpu_freq)
+ max_freq = cpu_freq;
+ }
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled())
+ return per_cpu(cpu_last_req_freq, cpu);
+ else
+ return clk_get_cpu_rate(cpu);
+}
+
+static unsigned int
+ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (!ret) {
+ /*
+ * FIXME: clk_set_rate hasn't returned an error here however it
+ * may be that clk_change_rate failed due to hardware or
+ * firmware issues and wasn't able to report that due to the
+ * current design of the clk core layer. To work around this
+ * problem we will read back the clock rate and check it is
+ * correct. This needs to be removed once clk core is fixed.
+ */
+ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
+ ret = -EIO;
+ }
+
+ if (WARN_ON(ret)) {
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate &&
+ clk_set_rate(clk[old_cluster], new_rate * 1000)) {
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
+}
+
+/* Set clock frequency */
+static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+ int ret;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
+ new_cluster = A7_CLUSTER;
+ else if (actual_cluster == A7_CLUSTER &&
+ freqs_new > clk_little_max)
+ new_cluster = A15_CLUSTER;
+ }
+
+ ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
+ freqs_new);
+
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freqs_new,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
+}
+
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
+
+ return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 min_freq = ~0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency < min_freq)
+ min_freq = pos->frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 max_freq = 0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency > max_freq)
+ max_freq = pos->frequency;
+ return max_freq;
+}
+
+static bool search_frequency(struct cpufreq_frequency_table *table, int size,
+ unsigned int freq)
+{
+ int count;
+
+ for (count = 0; count < size; count++) {
+ if (table[count].frequency == freq)
+ return true;
+ }
+
+ return false;
+}
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kcalloc(count, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ if (i == A15_CLUSTER &&
+ search_frequency(table, count, freq_table[i][j].frequency))
+ continue; /* skip duplicates */
+ table[k++].frequency =
+ VIRT_FREQ(i, freq_table[i][j].frequency);
+ }
+ }
+
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ return 0;
+}
-static int ve_spc_init_opp_table(const struct cpumask *cpumask)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
+
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+ int ret;
+
+ if (freq_table[cluster])
+ return 0;
+
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
*/
- return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ if (ret)
+ goto out;
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (ret)
+ goto out;
+
+ clk[cluster] = clk_get(cpu_dev, NULL);
+ if (!IS_ERR(clk[cluster]))
+ return 0;
+
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+ __func__, cpu_dev->id, cluster);
+ ret = PTR_ERR(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+out:
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+ cluster);
+ return ret;
}
-static int ve_spc_get_transition_latency(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
- return 1000000; /* 1 ms */
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
+ clk_little_max = VIRT_FREQ(A7_CLUSTER,
+ get_table_max(freq_table[A7_CLUSTER]));
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
}
-static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
- .name = "vexpress-spc",
- .get_transition_latency = ve_spc_get_transition_latency,
- .init_opp_table = ve_spc_init_opp_table,
+/* Per-CPU initialization */
+static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ if (ret)
+ return ret;
+
+ policy->freq_table = freq_table[cur_cluster];
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) =
+ clk_get_cpu_rate(policy->cpu);
+
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
+ return 0;
+}
+
+static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpufreq_cooling_unregister(cdev[cur_cluster]);
+ cdev[cur_cluster] = NULL;
+ }
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
+ return 0;
+}
+
+static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ /* Do not register a cpu_cooling device if we are in IKS mode */
+ if (cur_cluster >= MAX_CLUSTERS)
+ return;
+
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
+}
+
+static struct cpufreq_driver ve_spc_cpufreq_driver = {
+ .name = "vexpress-spc",
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ve_spc_cpufreq_set_target,
+ .get = ve_spc_cpufreq_get_rate,
+ .init = ve_spc_cpufreq_init,
+ .exit = ve_spc_cpufreq_exit,
+ .ready = ve_spc_cpufreq_ready,
+ .attr = cpufreq_generic_attr,
};
+#ifdef CONFIG_BL_SWITCHER
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+static int __bLs_register_notifier(void)
+{
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
static int ve_spc_cpufreq_probe(struct platform_device *pdev)
{
- return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+ int ret, i;
+
+ set_switching_enabled(bL_switcher_get_enabled());
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
+ ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ve_spc_cpufreq_driver.name, ret);
+ } else {
+ ret = __bLs_register_notifier();
+ if (ret)
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ else
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ve_spc_cpufreq_driver.name);
+ }
+
+ bL_switcher_put_enabled();
+ return ret;
}
static int ve_spc_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ bL_switcher_get_enabled();
+ __bLs_unregister_notifier();
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ bL_switcher_put_enabled();
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ ve_spc_cpufreq_driver.name);
return 0;
}
@@ -68,4 +599,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
};
module_platform_driver(ve_spc_cpufreq_platdrv);
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 84b1ebe212b3..1b299e801f74 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -56,13 +56,10 @@ static u64 get_snooze_timeout(struct cpuidle_device *dev,
return default_snooze_timeout;
for (i = index + 1; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
-
- if (s->disabled || su->disable)
+ if (dev->states_usage[i].disable)
continue;
- return s->target_residency * tb_ticks_per_usec;
+ return drv->states[i].target_residency * tb_ticks_per_usec;
}
return default_snooze_timeout;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0895b988fa92..569dbac443bd 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -75,44 +75,45 @@ int cpuidle_play_dead(void)
static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
- unsigned int max_latency,
+ u64 max_latency_ns,
unsigned int forbidden_flags,
bool s2idle)
{
- unsigned int latency_req = 0;
+ u64 latency_req = 0;
int i, ret = 0;
for (i = 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable || s->exit_latency <= latency_req
- || s->exit_latency > max_latency
- || (s->flags & forbidden_flags)
- || (s2idle && !s->enter_s2idle))
+ if (dev->states_usage[i].disable ||
+ s->exit_latency_ns <= latency_req ||
+ s->exit_latency_ns > max_latency_ns ||
+ (s->flags & forbidden_flags) ||
+ (s2idle && !s->enter_s2idle))
continue;
- latency_req = s->exit_latency;
+ latency_req = s->exit_latency_ns;
ret = i;
}
return ret;
}
/**
- * cpuidle_use_deepest_state - Set/clear governor override flag.
- * @enable: New value of the flag.
+ * cpuidle_use_deepest_state - Set/unset governor override mode.
+ * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
*
- * Set/unset the current CPU to use the deepest idle state (override governors
- * going forward if set).
+ * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
+ * state with exit latency within @latency_limit_ns (override governors going
+ * forward), or do not override governors if it is zero.
*/
-void cpuidle_use_deepest_state(bool enable)
+void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
struct cpuidle_device *dev;
preempt_disable();
dev = cpuidle_get_device();
if (dev)
- dev->use_deepest_state = enable;
+ dev->forced_idle_latency_limit_ns = latency_limit_ns;
preempt_enable();
}
@@ -122,9 +123,10 @@ void cpuidle_use_deepest_state(bool enable)
* @dev: cpuidle device for the given CPU.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{
- return find_deepest_state(drv, dev, UINT_MAX, 0, false);
+ return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
}
#ifdef CONFIG_SUSPEND
@@ -180,7 +182,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
- index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
+ index = find_deepest_state(drv, dev, U64_MAX, 0, true);
if (index > 0)
enter_s2idle_proper(drv, dev, index);
@@ -209,7 +211,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* CPU as a broadcast timer, this call may fail if it is not available.
*/
if (broadcast && tick_broadcast_enter()) {
- index = find_deepest_state(drv, dev, target_state->exit_latency,
+ index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
CPUIDLE_FLAG_TIMER_STOP, false);
if (index < 0) {
default_idle_call();
@@ -247,7 +249,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
local_irq_enable();
if (entered_state >= 0) {
- s64 diff, delay = drv->states[entered_state].exit_latency;
+ s64 diff, delay = drv->states[entered_state].exit_latency_ns;
int i;
/*
@@ -255,18 +257,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* This can be moved to within driver enter routine,
* but that results in multiple copies of same code.
*/
- diff = ktime_us_delta(time_end, time_start);
- if (diff > INT_MAX)
- diff = INT_MAX;
+ diff = ktime_sub(time_end, time_start);
- dev->last_residency = (int)diff;
- dev->states_usage[entered_state].time += dev->last_residency;
+ dev->last_residency_ns = diff;
+ dev->states_usage[entered_state].time_ns += diff;
dev->states_usage[entered_state].usage++;
- if (diff < drv->states[entered_state].target_residency) {
+ if (diff < drv->states[entered_state].target_residency_ns) {
for (i = entered_state - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
/* Shallower states are enabled, so update. */
@@ -275,22 +274,21 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
} else if (diff > delay) {
for (i = entered_state + 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
/*
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency)
+ if (diff - delay >= drv->states[i].target_residency_ns)
dev->states_usage[entered_state].below++;
break;
}
}
} else {
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
}
return entered_state;
@@ -380,10 +378,10 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
limit_ns = TICK_NSEC;
for (i = 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
- limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+ limit_ns = (u64)drv->states[i].target_residency_ns;
}
dev->poll_limit_ns = limit_ns;
@@ -554,7 +552,7 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
dev->next_hrtimer = 0;
}
@@ -567,12 +565,16 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
*/
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
- int ret;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int i, ret;
if (!try_module_get(drv->owner))
return -EINVAL;
+ for (i = 0; i < drv->state_count; i++)
+ if (drv->states[i].disabled)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 80c1a830d991..c76423aaef4d 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -62,24 +62,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
- * For each CPU in the driver's cpumask, unset the registered driver per CPU
- * to @drv.
- *
- * Returns 0 on success, -EBUSY if the CPUs have driver(s) already.
+ * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver
+ * different from drv already.
*/
static inline int __cpuidle_set_driver(struct cpuidle_driver *drv)
{
int cpu;
for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_driver *old_drv;
- if (__cpuidle_get_cpu_driver(cpu)) {
- __cpuidle_unset_driver(drv);
+ old_drv = __cpuidle_get_cpu_driver(cpu);
+ if (old_drv && old_drv != drv)
return -EBUSY;
- }
+ }
+ for_each_cpu(cpu, drv->cpumask)
per_cpu(cpuidle_drivers, cpu) = drv;
- }
return 0;
}
@@ -166,16 +165,27 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
if (!drv->cpumask)
drv->cpumask = (struct cpumask *)cpu_possible_mask;
- /*
- * Look for the timer stop flag in the different states, so that we know
- * if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually one of the deeper states have this flag set.
- */
- for (i = drv->state_count - 1; i >= 0 ; i--) {
- if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+
+ /*
+ * Look for the timer stop flag in the different states and if
+ * it is found, indicate that the broadcast timer has to be set
+ * up.
+ */
+ if (s->flags & CPUIDLE_FLAG_TIMER_STOP)
drv->bctimer = 1;
- break;
- }
+
+ /*
+ * The core will use the target residency and exit latency
+ * values in nanoseconds, but allow drivers to provide them in
+ * microseconds too.
+ */
+ if (s->target_residency > 0)
+ s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
+
+ if (s->exit_latency > 0)
+ s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
}
}
@@ -379,3 +389,31 @@ void cpuidle_driver_unref(void)
spin_unlock(&cpuidle_driver_lock);
}
+
+/**
+ * cpuidle_driver_state_disabled - Disable or enable an idle state
+ * @drv: cpuidle driver owning the state
+ * @idx: State index
+ * @disable: Whether or not to disable the state
+ */
+void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable)
+{
+ unsigned int cpu;
+
+ mutex_lock(&cpuidle_lock);
+
+ for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+
+ if (!dev)
+ continue;
+
+ if (disable)
+ dev->states_usage[idx].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ else
+ dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ }
+
+ mutex_unlock(&cpuidle_lock);
+}
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index e9801f26c732..e48271e117a3 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -107,11 +107,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
* cpuidle_governor_latency_req - Compute a latency constraint for CPU
* @cpu: Target CPU
*/
-int cpuidle_governor_latency_req(unsigned int cpu)
+s64 cpuidle_governor_latency_req(unsigned int cpu)
{
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
- return device_req < global_req ? device_req : global_req;
+ if (device_req > global_req)
+ device_req = global_req;
+
+ return (s64)device_req * NSEC_PER_USEC;
}
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
index 7a703d2e0064..cb2a96eafc02 100644
--- a/drivers/cpuidle/governors/haltpoll.c
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -49,7 +49,7 @@ static int haltpoll_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
bool *stop_tick)
{
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
if (!drv->state_count || latency_req == 0) {
*stop_tick = false;
@@ -75,10 +75,9 @@ static int haltpoll_select(struct cpuidle_driver *drv,
return 0;
}
-static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us)
+static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
{
unsigned int val;
- u64 block_ns = block_us*NSEC_PER_USEC;
/* Grow cpu_halt_poll_us if
* cpu_halt_poll_us < block_ns < guest_halt_poll_us
@@ -115,7 +114,7 @@ static void haltpoll_reflect(struct cpuidle_device *dev, int index)
dev->last_state_idx = index;
if (index != 0)
- adjust_poll_limit(dev, dev->last_residency);
+ adjust_poll_limit(dev, dev->last_residency_ns);
}
/**
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 428eeb832fe7..8e9058c4ea63 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -27,8 +27,8 @@ struct ladder_device_state {
struct {
u32 promotion_count;
u32 demotion_count;
- u32 promotion_time;
- u32 demotion_time;
+ u64 promotion_time_ns;
+ u64 demotion_time_ns;
} threshold;
struct {
int promotion_count;
@@ -68,9 +68,10 @@ static int ladder_select_state(struct cpuidle_driver *drv,
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
- int last_residency, last_idx = dev->last_state_idx;
+ int last_idx = dev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 last_residency;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
@@ -80,14 +81,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
+ last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
- !drv->states[last_idx + 1].disabled &&
!dev->states_usage[last_idx + 1].disable &&
- last_residency > last_state->threshold.promotion_time &&
- drv->states[last_idx + 1].exit_latency <= latency_req) {
+ last_residency > last_state->threshold.promotion_time_ns &&
+ drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -98,13 +98,12 @@ static int ladder_select_state(struct cpuidle_driver *drv,
/* consider demotion */
if (last_idx > first_idx &&
- (drv->states[last_idx].disabled ||
- dev->states_usage[last_idx].disable ||
- drv->states[last_idx].exit_latency > latency_req)) {
+ (dev->states_usage[last_idx].disable ||
+ drv->states[last_idx].exit_latency_ns > latency_req)) {
int i;
for (i = last_idx - 1; i > first_idx; i--) {
- if (drv->states[i].exit_latency <= latency_req)
+ if (drv->states[i].exit_latency_ns <= latency_req)
break;
}
ladder_do_selection(dev, ldev, last_idx, i);
@@ -112,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
}
if (last_idx > first_idx &&
- last_residency < last_state->threshold.demotion_time) {
+ last_residency < last_state->threshold.demotion_time_ns) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
@@ -152,9 +151,9 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
lstate->threshold.demotion_count = DEMOTION_COUNT;
if (i < drv->state_count - 1)
- lstate->threshold.promotion_time = state->exit_latency;
+ lstate->threshold.promotion_time_ns = state->exit_latency_ns;
if (i > first_idx)
- lstate->threshold.demotion_time = state->exit_latency;
+ lstate->threshold.demotion_time_ns = state->exit_latency_ns;
}
return 0;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index e5a5d0c8d66b..b0a7ad566081 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,22 +19,12 @@
#include <linux/sched/stat.h>
#include <linux/math64.h>
-/*
- * Please note when changing the tuning values:
- * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
- * a scaling operation multiplication may overflow on 32 bit platforms.
- * In that case, #define RESOLUTION as ULL to get 64 bit result:
- * #define RESOLUTION 1024ULL
- *
- * The default values do not overflow.
- */
#define BUCKETS 12
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024
#define DECAY 8
-#define MAX_INTERESTING 50000
-
+#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
/*
* Concepts and ideas behind the menu governor
@@ -120,14 +110,14 @@ struct menu_device {
int needs_update;
int tick_wakeup;
- unsigned int next_timer_us;
+ u64 next_timer_ns;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
int interval_ptr;
};
-static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
+static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
{
int bucket = 0;
@@ -140,15 +130,15 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters
if (nr_iowaiters)
bucket = BUCKETS/2;
- if (duration < 10)
+ if (duration_ns < 10ULL * NSEC_PER_USEC)
return bucket;
- if (duration < 100)
+ if (duration_ns < 100ULL * NSEC_PER_USEC)
return bucket + 1;
- if (duration < 1000)
+ if (duration_ns < 1000ULL * NSEC_PER_USEC)
return bucket + 2;
- if (duration < 10000)
+ if (duration_ns < 10000ULL * NSEC_PER_USEC)
return bucket + 3;
- if (duration < 100000)
+ if (duration_ns < 100000ULL * NSEC_PER_USEC)
return bucket + 4;
return bucket + 5;
}
@@ -276,13 +266,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
- int i;
- int idx;
- unsigned int interactivity_req;
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
unsigned int predicted_us;
+ u64 predicted_ns;
+ u64 interactivity_req;
unsigned long nr_iowaiters;
ktime_t delta_next;
+ int i, idx;
if (data->needs_update) {
menu_update(drv, dev);
@@ -290,15 +280,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
/* determine the expected residency time, round up */
- data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
+ data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
nr_iowaiters = nr_iowait_cpu(dev->cpu);
- data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+ data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
- ((data->next_timer_us < drv->states[1].target_residency ||
- latency_req < drv->states[1].exit_latency) &&
- !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+ ((data->next_timer_ns < drv->states[1].target_residency_ns ||
+ latency_req < drv->states[1].exit_latency_ns) &&
+ !dev->states_usage[0].disable)) {
/*
* In this case state[0] will be used no matter what, so return
* it right away and keep the tick running if state[0] is a
@@ -308,18 +298,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- /*
- * Force the result of multiplication to be 64 bits even if both
- * operands are 32 bits.
- * Make sure to round up for half microseconds.
- */
- predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
- data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
- /*
- * Use the lowest expected idle interval to pick the idle state.
- */
- predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
+ /* Round up the result for half microseconds. */
+ predicted_us = div_u64(data->next_timer_ns *
+ data->correction_factor[data->bucket] +
+ (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
+ RESOLUTION * DECAY * NSEC_PER_USEC);
+ /* Use the lowest expected idle interval to pick the idle state. */
+ predicted_ns = (u64)min(predicted_us,
+ get_typical_interval(data, predicted_us)) *
+ NSEC_PER_USEC;
if (tick_nohz_tick_stopped()) {
/*
@@ -330,14 +317,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* the known time till the closest timer event for the idle
* state selection.
*/
- if (predicted_us < TICK_USEC)
- predicted_us = ktime_to_us(delta_next);
+ if (predicted_ns < TICK_NSEC)
+ predicted_ns = delta_next;
} else {
/*
* Use the performance multiplier and the user-configurable
* latency_req to determine the maximum exit latency.
*/
- interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
+ interactivity_req = div64_u64(predicted_ns,
+ performance_multiplier(nr_iowaiters));
if (latency_req > interactivity_req)
latency_req = interactivity_req;
}
@@ -349,27 +337,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = -1;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable)
+ if (dev->states_usage[i].disable)
continue;
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > predicted_us) {
+ if (s->target_residency_ns > predicted_ns) {
/*
* Use a physical idle state, not busy polling, unless
* a timer is going to trigger soon enough.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency <= latency_req &&
- s->target_residency <= data->next_timer_us) {
- predicted_us = s->target_residency;
+ s->exit_latency_ns <= latency_req &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
idx = i;
break;
}
- if (predicted_us < TICK_USEC)
+ if (predicted_ns < TICK_NSEC)
break;
if (!tick_nohz_tick_stopped()) {
@@ -379,7 +366,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick in that case and let the governor run
* again in the next iteration of the loop.
*/
- predicted_us = drv->states[idx].target_residency;
+ predicted_ns = drv->states[idx].target_residency_ns;
break;
}
@@ -389,13 +376,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* closest timer event, select this one to avoid getting
* stuck in the shallow one for too long.
*/
- if (drv->states[idx].target_residency < TICK_USEC &&
- s->target_residency <= ktime_to_us(delta_next))
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_next)
idx = i;
return idx;
}
- if (s->exit_latency > latency_req)
+ if (s->exit_latency_ns > latency_req)
break;
idx = i;
@@ -409,12 +396,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
- unsigned int delta_next_us = ktime_to_us(delta_next);
-
+ predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
- if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
+ if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
/*
* The tick is not going to be stopped and the target
* residency of the state to be returned is not within
@@ -422,12 +407,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick, so try to correct that.
*/
for (i = idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
idx = i;
- if (drv->states[i].target_residency <= delta_next_us)
+ if (drv->states[i].target_residency_ns <= delta_next)
break;
}
}
@@ -463,7 +447,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct menu_device *data = this_cpu_ptr(&menu_devices);
int last_idx = dev->last_state_idx;
struct cpuidle_state *target = &drv->states[last_idx];
- unsigned int measured_us;
+ u64 measured_ns;
unsigned int new_factor;
/*
@@ -481,7 +465,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* assume the state was never reached and the exit latency is 0.
*/
- if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
+ if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
/*
* The nohz code said that there wouldn't be any events within
* the tick boundary (if the tick was stopped), but the idle
@@ -491,7 +475,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* have been idle long (but not forever) to help the idle
* duration predictor do a better job next time.
*/
- measured_us = 9 * MAX_INTERESTING / 10;
+ measured_ns = 9 * MAX_INTERESTING / 10;
} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
dev->poll_time_limit) {
/*
@@ -501,28 +485,29 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the CPU might have been woken up from idle by the next timer.
* Assume that to be the case.
*/
- measured_us = data->next_timer_us;
+ measured_ns = data->next_timer_ns;
} else {
/* measured value */
- measured_us = dev->last_residency;
+ measured_ns = dev->last_residency_ns;
/* Deduct exit latency */
- if (measured_us > 2 * target->exit_latency)
- measured_us -= target->exit_latency;
+ if (measured_ns > 2 * target->exit_latency_ns)
+ measured_ns -= target->exit_latency_ns;
else
- measured_us /= 2;
+ measured_ns /= 2;
}
/* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
+ if (measured_ns > data->next_timer_ns)
+ measured_ns = data->next_timer_ns;
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;
- if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
- new_factor += RESOLUTION * measured_us / data->next_timer_us;
+ if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
+ new_factor += div64_u64(RESOLUTION * measured_ns,
+ data->next_timer_ns);
else
/*
* we were idle so long that we count it as a perfect
@@ -542,7 +527,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
/* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = measured_us;
+ data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
}
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index b5a0e498f798..de7e706efd46 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -104,7 +104,7 @@ struct teo_cpu {
u64 sleep_length_ns;
struct teo_idle_state states[CPUIDLE_STATE_MAX];
int interval_idx;
- unsigned int intervals[INTERVALS];
+ u64 intervals[INTERVALS];
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -117,9 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
int i, idx_hit = -1, idx_timer = -1;
- unsigned int measured_us;
+ u64 measured_ns;
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
/*
@@ -127,23 +126,28 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* enough to the closest timer event expected at the idle state
* selection time to be discarded.
*/
- measured_us = UINT_MAX;
+ measured_ns = U64_MAX;
} else {
- unsigned int lat;
+ u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- lat = drv->states[dev->last_state_idx].exit_latency;
-
- measured_us = ktime_to_us(cpu_data->time_span_ns);
+ /*
+ * The computations below are to determine whether or not the
+ * (saved) time till the next timer event and the measured idle
+ * duration fall into the same "bin", so use last_residency_ns
+ * for that instead of time_span_ns which includes the cpuidle
+ * overhead.
+ */
+ measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
* executed by the CPU is not likely to be worst-case every
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_us >= lat)
- measured_us -= lat / 2;
+ if (measured_ns >= lat_ns)
+ measured_ns -= lat_ns / 2;
else
- measured_us /= 2;
+ measured_ns /= 2;
}
/*
@@ -155,9 +159,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
- if (drv->states[i].target_residency <= sleep_length_us) {
+ if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) {
idx_timer = i;
- if (drv->states[i].target_residency <= measured_us)
+ if (drv->states[i].target_residency_ns <= measured_ns)
idx_hit = i;
}
}
@@ -193,30 +197,35 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Save idle duration values corresponding to non-timer wakeups for
* pattern detection.
*/
- cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
if (cpu_data->interval_idx > INTERVALS)
cpu_data->interval_idx = 0;
}
+static bool teo_time_ok(u64 interval_ns)
+{
+ return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC;
+}
+
/**
* teo_find_shallower_state - Find shallower idle state matching given duration.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
- * @duration_us: Idle duration value to match.
+ * @duration_ns: Idle duration value to match.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- unsigned int duration_us)
+ u64 duration_ns)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
state_idx = i;
- if (drv->states[i].target_residency <= duration_us)
+ if (drv->states[i].target_residency_ns <= duration_ns)
break;
}
return state_idx;
@@ -232,9 +241,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
- unsigned int duration_us, count;
- int max_early_idx, constraint_idx, idx, i;
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ u64 duration_ns;
+ unsigned int hits, misses, early_hits;
+ int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
ktime_t delta_tick;
if (dev->last_state_idx >= 0) {
@@ -244,50 +254,92 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
cpu_data->time_span_ns = local_clock();
- cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
- duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+ duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+ cpu_data->sleep_length_ns = duration_ns;
- count = 0;
+ hits = 0;
+ misses = 0;
+ early_hits = 0;
max_early_idx = -1;
+ prev_max_early_idx = -1;
constraint_idx = drv->state_count;
idx = -1;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable) {
+ if (dev->states_usage[i].disable) {
+ /*
+ * Ignore disabled states with target residencies beyond
+ * the anticipated idle duration.
+ */
+ if (s->target_residency_ns > duration_ns)
+ continue;
+
+ /*
+ * This state is disabled, so the range of idle duration
+ * values corresponding to it is covered by the current
+ * candidate state, but still the "hits" and "misses"
+ * metrics of the disabled state need to be used to
+ * decide whether or not the state covering the range in
+ * question is good enough.
+ */
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
+
+ if (early_hits >= cpu_data->states[i].early_hits ||
+ idx < 0)
+ continue;
+
/*
- * If the "early hits" metric of a disabled state is
- * greater than the current maximum, it should be taken
- * into account, because it would be a mistake to select
- * a deeper state with lower "early hits" metric. The
- * index cannot be changed to point to it, however, so
- * just increase the max count alone and let the index
- * still point to a shallower idle state.
+ * If the current candidate state has been the one with
+ * the maximum "early hits" metric so far, the "early
+ * hits" metric of the disabled state replaces the
+ * current "early hits" count to avoid selecting a
+ * deeper state with lower "early hits" metric.
*/
- if (max_early_idx >= 0 &&
- count < cpu_data->states[i].early_hits)
- count = cpu_data->states[i].early_hits;
+ if (max_early_idx == idx) {
+ early_hits = cpu_data->states[i].early_hits;
+ continue;
+ }
+
+ /*
+ * The current candidate state is closer to the disabled
+ * one than the current maximum "early hits" state, so
+ * replace the latter with it, but in case the maximum
+ * "early hits" state index has not been set so far,
+ * check if the current candidate state is not too
+ * shallow for that role.
+ */
+ if (teo_time_ok(drv->states[idx].target_residency_ns)) {
+ prev_max_early_idx = max_early_idx;
+ early_hits = cpu_data->states[i].early_hits;
+ max_early_idx = idx;
+ }
continue;
}
- if (idx < 0)
+ if (idx < 0) {
idx = i; /* first enabled state */
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
+ }
- if (s->target_residency > duration_us)
+ if (s->target_residency_ns > duration_ns)
break;
- if (s->exit_latency > latency_req && constraint_idx > i)
+ if (s->exit_latency_ns > latency_req && constraint_idx > i)
constraint_idx = i;
idx = i;
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
- if (count < cpu_data->states[i].early_hits &&
- !(tick_nohz_tick_stopped() &&
- drv->states[i].target_residency < TICK_USEC)) {
- count = cpu_data->states[i].early_hits;
+ if (early_hits < cpu_data->states[i].early_hits &&
+ teo_time_ok(drv->states[i].target_residency_ns)) {
+ prev_max_early_idx = max_early_idx;
+ early_hits = cpu_data->states[i].early_hits;
max_early_idx = i;
}
}
@@ -300,10 +352,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* "early hits" metric, but if that cannot be determined, just use the
* state selected so far.
*/
- if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
- max_early_idx >= 0) {
- idx = max_early_idx;
- duration_us = drv->states[idx].target_residency;
+ if (hits <= misses) {
+ /*
+ * The current candidate state is not suitable, so take the one
+ * whose "early hits" metric is the maximum for the range of
+ * shallower states.
+ */
+ if (idx == max_early_idx)
+ max_early_idx = prev_max_early_idx;
+
+ if (max_early_idx >= 0) {
+ idx = max_early_idx;
+ duration_ns = drv->states[idx].target_residency_ns;
+ }
}
/*
@@ -316,18 +377,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx < 0) {
idx = 0; /* No states enabled. Must use 0. */
} else if (idx > 0) {
+ unsigned int count = 0;
u64 sum = 0;
- count = 0;
-
/*
* Count and sum the most recent idle duration values less than
* the current expected idle duration value.
*/
for (i = 0; i < INTERVALS; i++) {
- unsigned int val = cpu_data->intervals[i];
+ u64 val = cpu_data->intervals[i];
- if (val >= duration_us)
+ if (val >= duration_ns)
continue;
count++;
@@ -339,17 +399,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* values are in the interesting range.
*/
if (count > INTERVALS / 2) {
- unsigned int avg_us = div64_u64(sum, count);
+ u64 avg_ns = div64_u64(sum, count);
/*
* Avoid spending too much time in an idle state that
* would be too shallow.
*/
- if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
- duration_us = avg_us;
- if (drv->states[idx].target_residency > avg_us)
+ if (teo_time_ok(avg_ns)) {
+ duration_ns = avg_ns;
+ if (drv->states[idx].target_residency_ns > avg_ns)
idx = teo_find_shallower_state(drv, dev,
- idx, avg_us);
+ idx, avg_ns);
}
}
}
@@ -359,9 +419,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
- unsigned int delta_tick_us = ktime_to_us(delta_tick);
-
+ duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
/*
@@ -370,8 +428,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* till the closest timer including the tick, try to correct
* that.
*/
- if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+ if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
}
return idx;
@@ -415,7 +473,7 @@ static int teo_enable_device(struct cpuidle_driver *drv,
memset(cpu_data, 0, sizeof(*cpu_data));
for (i = 0; i < INTERVALS; i++)
- cpu_data->intervals[i] = UINT_MAX;
+ cpu_data->intervals[i] = U64_MAX;
return 0;
}
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index c8fa5f41dfc4..9f1ace9c53da 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -49,6 +49,8 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0;
state->target_residency = 0;
+ state->exit_latency_ns = 0;
+ state->target_residency_ns = 0;
state->power_usage = -1;
state->enter = poll_idle;
state->disabled = false;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 2bb2683b493c..38ef770be90d 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -255,25 +255,6 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%u\n", state->_name);\
}
-#define define_store_state_ull_function(_name) \
-static ssize_t store_state_##_name(struct cpuidle_state *state, \
- struct cpuidle_state_usage *state_usage, \
- const char *buf, size_t size) \
-{ \
- unsigned long long value; \
- int err; \
- if (!capable(CAP_SYS_ADMIN)) \
- return -EPERM; \
- err = kstrtoull(buf, 0, &value); \
- if (err) \
- return err; \
- if (value) \
- state_usage->_name = 1; \
- else \
- state_usage->_name = 0; \
- return size; \
-}
-
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
@@ -292,18 +273,60 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%s\n", state->_name);\
}
-define_show_state_function(exit_latency)
-define_show_state_function(target_residency)
+#define define_show_state_time_function(_name) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+}
+
+define_show_state_time_function(exit_latency)
+define_show_state_time_function(target_residency)
define_show_state_function(power_usage)
define_show_state_ull_function(usage)
-define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
-define_show_state_ull_function(disable)
-define_store_state_ull_function(disable)
define_show_state_ull_function(above)
define_show_state_ull_function(below)
+static ssize_t show_state_time(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+}
+
+static ssize_t show_state_disable(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
+}
+
+static ssize_t store_state_disable(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ const char *buf, size_t size)
+{
+ unsigned int value;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtouint(buf, 0, &value);
+ if (err)
+ return err;
+
+ if (value)
+ state_usage->disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ else
+ state_usage->disable &= ~CPUIDLE_STATE_DISABLED_BY_USER;
+
+ return size;
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1fb622f2a87d..43ed1b621718 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -11,6 +11,8 @@ menuconfig CRYPTO_HW
if CRYPTO_HW
+source "drivers/crypto/allwinner/Kconfig"
+
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
@@ -26,7 +28,7 @@ config CRYPTO_DEV_PADLOCK
config CRYPTO_DEV_PADLOCK_AES
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
help
Use VIA PadLock for AES algorithm.
@@ -54,7 +56,7 @@ config CRYPTO_DEV_GEODE
tristate "Support for the Geode LX AES engine"
depends on X86_32 && PCI
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Say 'Y' here to use the AMD Geode LX processor on-board AES
engine for the CryptoAPI AES algorithm.
@@ -107,7 +109,7 @@ config CRYPTO_PAES_S390
depends on ZCRYPT
depends on PKEY
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms for use with protected key.
@@ -169,7 +171,7 @@ config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms"
depends on S390
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
This is the s390 hardware accelerated implementation of the
@@ -182,7 +184,7 @@ config CRYPTO_AES_S390
tristate "AES cipher algorithms"
depends on S390
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197).
@@ -236,7 +238,7 @@ config CRYPTO_DEV_MARVELL_CESA
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select SRAM
help
@@ -248,7 +250,7 @@ config CRYPTO_DEV_MARVELL_CESA
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
@@ -265,7 +267,7 @@ config CRYPTO_DEV_NIAGARA2
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI
depends on !ARCH_DMA_ADDR_T_64BIT
@@ -285,7 +287,7 @@ config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
select HW_RANDOM
depends on FSL_SOC
@@ -323,7 +325,7 @@ config CRYPTO_DEV_IXP4XX
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
@@ -332,11 +334,12 @@ config CRYPTO_DEV_PPC4XX
depends on PPC && 4xx
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AES
select CRYPTO_LIB_AES
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -373,7 +376,7 @@ config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
select CRYPTO_CBC
select CRYPTO_ECB
@@ -387,7 +390,7 @@ config CRYPTO_DEV_OMAP_DES
tristate "Support for OMAP DES/3DES hw engine"
depends on ARCH_OMAP2PLUS
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
help
OMAP processors have DES/3DES module accelerator. Select this if you
@@ -403,7 +406,7 @@ config CRYPTO_DEV_PICOXCELL
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_CBC
select CRYPTO_ECB
@@ -418,7 +421,7 @@ config CRYPTO_DEV_PICOXCELL
config CRYPTO_DEV_SAHARA
tristate "Support for SAHARA crypto accelerator"
depends on ARCH_MXC && OF
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
help
@@ -445,7 +448,7 @@ config CRYPTO_DEV_S5P
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This option allows you to have support for S5P crypto acceleration.
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
@@ -489,11 +492,9 @@ if CRYPTO_DEV_UX500
endif # if CRYPTO_DEV_UX500
config CRYPTO_DEV_ATMEL_AUTHENC
- tristate "Support for Atmel IPSEC/SSL hw accelerator"
+ bool "Support for Atmel IPSEC/SSL hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
- select CRYPTO_AUTHENC
- select CRYPTO_DEV_ATMEL_AES
- select CRYPTO_DEV_ATMEL_SHA
+ depends on CRYPTO_DEV_ATMEL_AES
help
Some Atmel processors can combine the AES and SHA hw accelerators
to enhance support of IPSEC/SSL.
@@ -505,7 +506,9 @@ config CRYPTO_DEV_ATMEL_AES
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AUTHENC if CRYPTO_DEV_ATMEL_AUTHENC
+ select CRYPTO_DEV_ATMEL_SHA if CRYPTO_DEV_ATMEL_AUTHENC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for
@@ -518,7 +521,7 @@ config CRYPTO_DEV_ATMEL_TDES
tristate "Support for Atmel DES/TDES hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
Some Atmel processors have DES/TDES hw accelerator.
Select this if you want to use the Atmel module for
@@ -590,7 +593,7 @@ config CRYPTO_DEV_MXS_DCP
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_AES
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
@@ -620,7 +623,7 @@ config CRYPTO_DEV_QCE
select CRYPTO_CBC
select CRYPTO_XTS
select CRYPTO_CTR
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This driver supports Qualcomm crypto engine accelerator
hardware. To compile this driver as a module, choose M here. The
@@ -657,31 +660,6 @@ config CRYPTO_DEV_IMGTEC_HASH
hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
hashing algorithms.
-config CRYPTO_DEV_SUN4I_SS
- tristate "Support for Allwinner Security System cryptographic accelerator"
- depends on ARCH_SUNXI && !64BIT
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_AES
- select CRYPTO_LIB_DES
- select CRYPTO_BLKCIPHER
- help
- Some Allwinner SoC have a crypto accelerator named
- Security System. Select this if you want to use it.
- The Security System handle AES/DES/3DES ciphers in CBC mode
- and SHA1 and MD5 hash algorithms.
-
- To compile this driver as a module, choose M here: the module
- will be called sun4i-ss.
-
-config CRYPTO_DEV_SUN4I_SS_PRNG
- bool "Support for Allwinner Security System PRNG"
- depends on CRYPTO_DEV_SUN4I_SS
- select CRYPTO_RNG
- help
- Select this option if you want to provide kernel-side support for
- the Pseudo-Random Number Generator found in the Security System.
-
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
@@ -691,7 +669,7 @@ config CRYPTO_DEV_ROCKCHIP
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
help
This driver interfaces with the hardware crypto accelerator.
@@ -702,7 +680,7 @@ config CRYPTO_DEV_MEDIATEK
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CTR
select CRYPTO_SHA1
select CRYPTO_SHA256
@@ -730,7 +708,7 @@ config CRYPTO_DEV_BCM_SPU
select CRYPTO_SHA512
help
This driver provides support for Broadcom crypto acceleration using the
- Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
+ Secure Processing Unit (SPU). The SPU driver registers skcipher,
ahash, and aead algorithms with the kernel cryptographic API.
source "drivers/crypto/stm32/Kconfig"
@@ -740,7 +718,7 @@ config CRYPTO_DEV_SAFEXCEL
depends on OF || PCI || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_HASH
select CRYPTO_HMAC
@@ -748,6 +726,8 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_CHACHA20POLY1305
+ select CRYPTO_SHA3
help
This driver interfaces with the SafeXcel EIP-97 and EIP-197 cryptographic
engines designed by Inside Secure. It currently accelerates DES, 3DES and
@@ -762,7 +742,7 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_CTR
select CRYPTO_HASH
select CRYPTO_SHA1
@@ -779,7 +759,7 @@ config CRYPTO_DEV_CCREE
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
default n
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -805,4 +785,6 @@ config CRYPTO_DEV_CCREE
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/amlogic/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index afc4753b5d28..40229d499476 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
@@ -39,7 +40,6 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
-obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
@@ -48,3 +48,4 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += hisilicon/
+obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
new file mode 100644
index 000000000000..12e7c6a85a02
--- /dev/null
+++ b/drivers/crypto/allwinner/Kconfig
@@ -0,0 +1,87 @@
+config CRYPTO_DEV_ALLWINNER
+ bool "Support for Allwinner cryptographic offloader"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y if ARCH_SUNXI
+ help
+ Say Y here to get to see options for Allwinner hardware crypto devices
+
+config CRYPTO_DEV_SUN4I_SS
+ tristate "Support for Allwinner Security System cryptographic accelerator"
+ depends on ARCH_SUNXI
+ depends on PM
+ depends on CRYPTO_DEV_ALLWINNER
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ help
+ Some Allwinner SoC have a crypto accelerator named
+ Security System. Select this if you want to use it.
+ The Security System handle AES/DES/3DES ciphers in CBC mode
+ and SHA1 and MD5 hash algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun4i-ss.
+
+config CRYPTO_DEV_SUN4I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN4I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
+config CRYPTO_DEV_SUN8I_CE
+ tristate "Support for Allwinner Crypto Engine cryptographic offloader"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ depends on PM
+ help
+ Select y here to have support for the crypto Engine availlable on
+ Allwinner SoC H2+, H3, H5, H6, R40 and A64.
+ The Crypto Engine handle AES/3DES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ce.
+
+config CRYPTO_DEV_SUN8I_CE_DEBUG
+ bool "Enable sun8i-ce stats"
+ depends on CRYPTO_DEV_SUN8I_CE
+ depends on DEBUG_FS
+ help
+ Say y to enable sun8i-ce debug stats.
+ This will create /sys/kernel/debug/sun8i-ce/stats for displaying
+ the number of requests per flow and per algorithm.
+
+config CRYPTO_DEV_SUN8I_SS
+ tristate "Support for Allwinner Security System cryptographic offloader"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ select CRYPTO_DES
+ depends on CRYPTO_DEV_ALLWINNER
+ depends on PM
+ help
+ Select y here to have support for the Security System available on
+ Allwinner SoC A80, A83T.
+ The Security System handle AES/3DES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun8i-ss.
+
+config CRYPTO_DEV_SUN8I_SS_DEBUG
+ bool "Enable sun8i-ss stats"
+ depends on CRYPTO_DEV_SUN8I_SS
+ depends on DEBUG_FS
+ help
+ Say y to enable sun8i-ss debug stats.
+ This will create /sys/kernel/debug/sun8i-ss/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/allwinner/Makefile b/drivers/crypto/allwinner/Makefile
new file mode 100644
index 000000000000..6effe864d7ff
--- /dev/null
+++ b/drivers/crypto/allwinner/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss/
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce/
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss/
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/allwinner/sun4i-ss/Makefile
index c0a2797d3168..c0a2797d3168 100644
--- a/drivers/crypto/sunxi-ss/Makefile
+++ b/drivers/crypto/allwinner/sun4i-ss/Makefile
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 6536fd4bee65..cb2b0874f68f 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -72,7 +72,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
oi = 0;
oo = 0;
do {
- todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo) {
ileft -= todo;
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
@@ -87,7 +88,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
oleft -= todo;
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
@@ -239,7 +241,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* todo is the number of consecutive 4byte word that we
* can read from current SG
*/
- todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft / 4);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo && !ob) {
writesl(ss->base + SS_RXFIFO, mi.addr + oi,
todo);
@@ -253,8 +256,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we need to be able to write all buf in one
* pass, so it is why we min() with rx_cnt
*/
- todo = min3(rx_cnt * 4 - ob, ileft,
- mi.length - oi);
+ todo = min(rx_cnt * 4 - ob, ileft);
+ todo = min_t(size_t, todo, mi.length - oi);
memcpy(buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
@@ -274,7 +277,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
+ dev_dbg(ss->dev,
+ "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->cryptlen, rx_cnt,
oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
@@ -282,7 +286,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (!tx_cnt)
continue;
/* todo in 4bytes word */
- todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft / 4);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
@@ -308,7 +313,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* no more than remaining buffer
* no need to test against oleft
*/
- todo = min(mo.length - oo, obl - obo);
+ todo = min_t(size_t,
+ mo.length - oo, obl - obo);
memcpy(mo.addr + oo, bufo + obo, todo);
oleft -= todo;
obo += todo;
@@ -480,6 +486,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sun4i_ss_alg_template *algt;
const char *name = crypto_tfm_alg_name(tfm);
+ int err;
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
@@ -497,13 +504,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ goto error_pm;
+
return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
}
void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put(op->ss->dev);
}
/* check and set the AES key, prepare the mode to be used */
@@ -524,7 +540,7 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keymode = SS_AES_256BITS;
break;
default:
- dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 9aa6fe081a27..814cd12149a9 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -44,7 +44,8 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun4i_hash_crainit
+ .cra_init = sun4i_hash_crainit,
+ .cra_exit = sun4i_hash_craexit,
}
}
}
@@ -70,7 +71,8 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun4i_hash_crainit
+ .cra_init = sun4i_hash_crainit,
+ .cra_exit = sun4i_hash_craexit,
}
}
}
@@ -223,6 +225,82 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun4i_ss_pm_suspend(struct device *dev)
+{
+ struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
+
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+
+ clk_disable_unprepare(ss->ssclk);
+ clk_disable_unprepare(ss->busclk);
+ return 0;
+}
+
+static int sun4i_ss_pm_resume(struct device *dev)
+{
+ struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
+
+ int err;
+
+ err = clk_prepare_enable(ss->busclk);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable busclk\n");
+ goto err_enable;
+ }
+
+ err = clk_prepare_enable(ss->ssclk);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable ssclk\n");
+ goto err_enable;
+ }
+
+ if (ss->reset) {
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
+ }
+ }
+
+ return err;
+err_enable:
+ sun4i_ss_pm_suspend(dev);
+ return err;
+}
+
+const struct dev_pm_ops sun4i_ss_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
+};
+
+/*
+ * When power management is enabled, this function enables the PM and set the
+ * device as suspended
+ * When power management is disabled, this function just enables the device
+ */
+static int sun4i_ss_pm_init(struct sun4i_ss_ctx *ss)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ss->dev);
+ pm_runtime_set_autosuspend_delay(ss->dev, 2000);
+
+ err = pm_runtime_set_suspended(ss->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ss->dev);
+ return err;
+}
+
+static void sun4i_ss_pm_exit(struct sun4i_ss_ctx *ss)
+{
+ pm_runtime_disable(ss->dev);
+}
+
static int sun4i_ss_probe(struct platform_device *pdev)
{
u32 v;
@@ -269,18 +347,6 @@ static int sun4i_ss_probe(struct platform_device *pdev)
ss->reset = NULL;
}
- /* Enable both clocks */
- err = clk_prepare_enable(ss->busclk);
- if (err) {
- dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
- return err;
- }
- err = clk_prepare_enable(ss->ssclk);
- if (err) {
- dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
- goto error_ssclk;
- }
-
/*
* Check that clock have the correct rates given in the datasheet
* Try to set the clock to the maximum allowed
@@ -288,16 +354,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
err = clk_set_rate(ss->ssclk, cr_mod);
if (err) {
dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
- goto error_clk;
- }
-
- /* Deassert reset if we have a reset control */
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(&pdev->dev, "Cannot deassert reset control\n");
- goto error_clk;
- }
+ return err;
}
/*
@@ -325,12 +382,26 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
cr, cr / 1000000, cr_mod);
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ spin_lock_init(&ss->slock);
+
+ err = sun4i_ss_pm_init(ss);
+ if (err)
+ return err;
+
/*
* Datasheet named it "Die Bonding ID"
* I expect to be a sort of Security System Revision number.
* Since the A80 seems to have an other version of SS
* this info could be useful
*/
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ goto error_pm;
+
writel(SS_ENABLED, ss->base + SS_CTL);
v = readl(ss->base + SS_CTL);
v >>= 16;
@@ -338,9 +409,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Die ID %d\n", v);
writel(0, ss->base + SS_CTL);
- ss->dev = &pdev->dev;
-
- spin_lock_init(&ss->slock);
+ pm_runtime_put_sync(ss->dev);
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
ss_algs[i].ss = ss;
@@ -370,7 +439,6 @@ static int sun4i_ss_probe(struct platform_device *pdev)
break;
}
}
- platform_set_drvdata(pdev, ss);
return 0;
error_alg:
i--;
@@ -387,12 +455,8 @@ error_alg:
break;
}
}
- if (ss->reset)
- reset_control_assert(ss->reset);
-error_clk:
- clk_disable_unprepare(ss->ssclk);
-error_ssclk:
- clk_disable_unprepare(ss->busclk);
+error_pm:
+ sun4i_ss_pm_exit(ss);
return err;
}
@@ -415,11 +479,7 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
}
- writel(0, ss->base + SS_CTL);
- if (ss->reset)
- reset_control_assert(ss->reset);
- clk_disable_unprepare(ss->busclk);
- clk_disable_unprepare(ss->ssclk);
+ sun4i_ss_pm_exit(ss);
return 0;
}
@@ -434,6 +494,7 @@ static struct platform_driver sun4i_ss_driver = {
.remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
+ .pm = &sun4i_ss_pm_ops,
.of_match_table = a20ss_crypto_of_match_table,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fcffba5ef927..fdc0e6cdbb85 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -19,17 +19,29 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct sun4i_ss_alg_template *algt;
+ int err;
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss;
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ return err;
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sun4i_req_ctx));
return 0;
}
+void sun4i_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ pm_runtime_put(op->ss->dev);
+}
+
/* sun4i_hash_init: initialize request context */
int sun4i_hash_init(struct ahash_request *areq)
{
@@ -175,7 +187,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
unsigned int in_i = 0;
- u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
+ u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -184,6 +196,7 @@ static int sun4i_hash(struct ahash_request *areq)
struct sg_mapping_iter mi;
int in_r, err = 0;
size_t copied = 0;
+ __le32 wb = 0;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
@@ -215,7 +228,7 @@ static int sun4i_hash(struct ahash_request *areq)
*/
if (op->byte_count) {
ivmode = SS_IV_ARBITRARY;
- for (i = 0; i < 5; i++)
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
writel(op->hash[i], ss->base + SS_IV0 + i * 4);
}
/* Enable the device */
@@ -272,8 +285,8 @@ static int sun4i_hash(struct ahash_request *areq)
*/
while (op->len < 64 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, end - i,
- 64 - op->len);
+ in_r = min(end - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -293,8 +306,8 @@ static int sun4i_hash(struct ahash_request *areq)
}
if (mi.length - in_i > 3 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- ((mi.length - in_i) / 4) * 4);
+ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
+ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
/* how many bytes we can write in the device*/
todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
@@ -320,8 +333,8 @@ static int sun4i_hash(struct ahash_request *areq)
if ((areq->nbytes - i) < 64) {
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- 64 - op->len);
+ in_r = min(areq->nbytes - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -395,7 +408,7 @@ hash_final:
nbw = op->len - 4 * nwait;
if (nbw) {
- wb = *(u32 *)(op->buf + nwait * 4);
+ wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
wb &= GENMASK((nbw * 8) - 1, 0);
op->byte_count += nbw;
@@ -404,7 +417,7 @@ hash_final:
/* write the remaining bytes of the nbw buffer */
wb |= ((1 << 7) << (nbw * 8));
- bf[j++] = wb;
+ bf[j++] = le32_to_cpu(wb);
/*
* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
@@ -423,13 +436,13 @@ hash_final:
/* write the length of data */
if (op->mode == SS_OP_SHA1) {
- __be64 bits = cpu_to_be64(op->byte_count << 3);
- bf[j++] = lower_32_bits(bits);
- bf[j++] = upper_32_bits(bits);
+ __be64 *bits = (__be64 *)&bf[j];
+ *bits = cpu_to_be64(op->byte_count << 3);
+ j += 2;
} else {
- __le64 bits = op->byte_count << 3;
- bf[j++] = lower_32_bits(bits);
- bf[j++] = upper_32_bits(bits);
+ __le64 *bits = (__le64 *)&bf[j];
+ *bits = cpu_to_le64(op->byte_count << 3);
+ j += 2;
}
writesl(ss->base + SS_RXFIFO, bf, j);
@@ -471,7 +484,7 @@ hash_final:
}
} else {
for (i = 0; i < 4; i++) {
- v = readl(ss->base + SS_MD0 + i * 4);
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 63d636424161..729aafdbea84 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -17,7 +17,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
{
struct sun4i_ss_alg_template *algt;
struct rng_alg *alg = crypto_rng_alg(tfm);
- int i;
+ int i, err;
u32 v;
u32 *data = (u32 *)dst;
const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
@@ -28,6 +28,10 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ return err;
+
spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
@@ -52,5 +56,8 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
writel(0, ss->base + SS_CTL);
spin_unlock_bh(&ss->slock);
+
+ pm_runtime_put(ss->dev);
+
return 0;
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 35a27a7145f8..60425ac75d90 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <crypto/sha.h>
@@ -177,6 +178,7 @@ struct sun4i_req_ctx {
};
int sun4i_hash_crainit(struct crypto_tfm *tfm);
+void sun4i_hash_craexit(struct crypto_tfm *tfm);
int sun4i_hash_init(struct ahash_request *areq);
int sun4i_hash_update(struct ahash_request *areq);
int sun4i_hash_final(struct ahash_request *areq);
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile
new file mode 100644
index 000000000000..08b68c3c1ca9
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o
+sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
new file mode 100644
index 000000000000..37d0b6c386a0
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-cipher.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ce.h"
+
+static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct scatterlist *sg;
+
+ if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
+ return true;
+
+ if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
+ return true;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ return true;
+
+ sg = areq->src;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+#endif
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & CE_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int sun8i_ce_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ struct scatterlist *sg;
+ unsigned int todo, len, offset, ivsize;
+ dma_addr_t addr_iv = 0, addr_key = 0;
+ void *backup_iv = NULL;
+ u32 common, sym;
+ int flow, i;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+
+ dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+#endif
+
+ flow = rctx->flow;
+
+ chan = &ce->chanlist[flow];
+
+ cet = chan->tl;
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->alg_cipher[algt->ce_algo_id];
+ common |= rctx->op_dir | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+ /* CTS and recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->has_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(areq->cryptlen);
+ else
+ cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
+
+ sym = ce->variant->op_mode[algt->ce_blockmode];
+ len = op->keylen;
+ switch (len) {
+ case 128 / 8:
+ sym |= CE_AES_128BITS;
+ break;
+ case 192 / 8:
+ sym |= CE_AES_192BITS;
+ break;
+ case 256 / 8:
+ sym |= CE_AES_256BITS;
+ break;
+ }
+
+ cet->t_sym_ctl = cpu_to_le32(sym);
+ cet->t_asym_ctl = 0;
+
+ chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
+ chan->op_dir = rctx->op_dir;
+ chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
+ chan->keylen = op->keylen;
+
+ addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ cet->t_key = cpu_to_le32(addr_key);
+ if (dma_mapping_error(ce->dev, addr_key)) {
+ dev_err(ce->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ chan->ivlen = ivsize;
+ chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!chan->bounce_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ if (rctx->op_dir & CE_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ memcpy(chan->bounce_iv, areq->iv, ivsize);
+ addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
+ DMA_TO_DEVICE);
+ cet->t_iv = cpu_to_le32(addr_iv);
+ if (dma_mapping_error(ce->dev, addr_iv)) {
+ dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ }
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_src[i].len = cpu_to_le32(todo / 4);
+ dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ for_each_sg(areq->dst, sg, nr_sgd, i) {
+ cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_dst[i].len = cpu_to_le32(todo / 4);
+ dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ chan->timeout = areq->cryptlen;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (areq->iv && ivsize > 0) {
+ if (addr_iv)
+ dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
+ DMA_TO_DEVICE);
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & CE_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kzfree(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(chan->bounce_iv);
+ }
+
+theend_key:
+ dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+ return err;
+}
+
+static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ce_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ce_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = CE_DECRYPTION;
+ if (sun8i_ce_cipher_need_fallback(areq))
+ return sun8i_ce_cipher_fallback(areq);
+
+ e = sun8i_ce_get_engine_number(op->ce);
+ rctx->flow = e;
+ engine = op->ce->chanlist[e].engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = CE_ENCRYPTION;
+ if (sun8i_ce_cipher_need_fallback(areq))
+ return sun8i_ce_cipher_fallback(areq);
+
+ e = sun8i_ce_get_engine_number(op->ce);
+ rctx->flow = e;
+ engine = op->ce->chanlist[e].engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ce_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ op->ce = algt->ce;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+
+ op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+
+ return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync_suspend(op->ce->dev);
+}
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = verify_skcipher_des3_key(tfm, key);
+ if (err)
+ return err;
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
new file mode 100644
index 000000000000..73a7649f915d
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-core.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the CryptoEngine.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ce.h"
+
+/*
+ * mod clock is lower on H3 than other SoC due to some DMA timeout occurring
+ * with high value.
+ * If you want to tune mod clock, loading driver and passing selftest is
+ * insufficient, you need to test with some LUKS test (mount and write to it)
+ */
+static const struct ce_variant ce_h3_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 50000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_h5_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_h6_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .has_t_dlen_in_bytes = true,
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ { "ram", 0, 400000000 },
+ }
+};
+
+static const struct ce_variant ce_a64_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+static const struct ce_variant ce_r40_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ }
+};
+
+/*
+ * sun8i_ce_get_engine_number() get the next channel slot
+ * This is a simple round-robin way of getting the next channel
+ */
+int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce)
+{
+ return atomic_inc_return(&ce->flow) % MAXFLOW;
+}
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
+{
+ u32 v;
+ int err = 0;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->chanlist[flow].stat_req++;
+#endif
+
+ mutex_lock(&ce->mlock);
+
+ v = readl(ce->base + CE_ICR);
+ v |= 1 << flow;
+ writel(v, ce->base + CE_ICR);
+
+ reinit_completion(&ce->chanlist[flow].complete);
+ writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ);
+
+ ce->chanlist[flow].status = 0;
+ /* Be sure all data is written before enabling the task */
+ wmb();
+
+ v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ writel(v, ce->base + CE_TLR);
+ mutex_unlock(&ce->mlock);
+
+ wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
+ msecs_to_jiffies(ce->chanlist[flow].timeout));
+
+ if (ce->chanlist[flow].status == 0) {
+ dev_err(ce->dev, "DMA timeout for %s\n", name);
+ err = -EFAULT;
+ }
+ /* No need to lock for this read, the channel is locked so
+ * nothing could modify the error value for this channel
+ */
+ v = readl(ce->base + CE_ESR);
+ if (v) {
+ v >>= (flow * 4);
+ v &= 0xFF;
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ if (v & CE_ERR_ADDR_INVALID)
+ dev_err(ce->dev, "CE ERROR: address invalid\n");
+ }
+
+ return err;
+}
+
+static irqreturn_t ce_irq_handler(int irq, void *data)
+{
+ struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ce->base + CE_ISR);
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ce->base + CE_ISR);
+ ce->chanlist[flow].status = 1;
+ complete(&ce->chanlist[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ce_alg_template ce_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_AES,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ce_aes_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ce_algo_id = CE_ID_CIPHER_DES3,
+ .ce_blockmode = CE_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ce",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ce_cipher_init,
+ .cra_exit = sun8i_ce_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sun8i_ce_des3_setkey,
+ .encrypt = sun8i_ce_skencrypt,
+ .decrypt = sun8i_ce_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ce_dev *ce = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.skcipher.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ce_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ce_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ce_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(ce->chanlist[i].engine);
+ if (ce->chanlist[i].tl)
+ dma_free_coherent(ce->dev, sizeof(struct ce_task),
+ ce->chanlist[i].tl,
+ ce->chanlist[i].t_phy);
+ i--;
+ }
+}
+
+/*
+ * Allocate the channel list structure
+ */
+static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
+{
+ int i, err;
+
+ ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW,
+ sizeof(struct sun8i_ce_flow), GFP_KERNEL);
+ if (!ce->chanlist)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&ce->chanlist[i].complete);
+
+ ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true);
+ if (!ce->chanlist[i].engine) {
+ dev_err(ce->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ce->chanlist[i].engine);
+ if (err) {
+ dev_err(ce->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ ce->chanlist[i].tl = dma_alloc_coherent(ce->dev,
+ sizeof(struct ce_task),
+ &ce->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!ce->chanlist[i].tl) {
+ dev_err(ce->dev, "Cannot get DMA memory for task %d\n",
+ i);
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ sun8i_ce_free_chanlist(ce, i);
+ return err;
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun8i_ce_pm_suspend(struct device *dev)
+{
+ struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(ce->reset);
+ for (i = 0; i < CE_MAX_CLOCKS; i++)
+ clk_disable_unprepare(ce->ceclks[i]);
+ return 0;
+}
+
+static int sun8i_ce_pm_resume(struct device *dev)
+{
+ struct sun8i_ce_dev *ce = dev_get_drvdata(dev);
+ int err, i;
+
+ for (i = 0; i < CE_MAX_CLOCKS; i++) {
+ if (!ce->variant->ce_clks[i].name)
+ continue;
+ err = clk_prepare_enable(ce->ceclks[i]);
+ if (err) {
+ dev_err(ce->dev, "Cannot prepare_enable %s\n",
+ ce->variant->ce_clks[i].name);
+ goto error;
+ }
+ }
+ err = reset_control_deassert(ce->reset);
+ if (err) {
+ dev_err(ce->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+ return 0;
+error:
+ sun8i_ce_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sun8i_ce_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL)
+};
+
+static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ce->dev);
+ pm_runtime_set_autosuspend_delay(ce->dev, 2000);
+
+ err = pm_runtime_set_suspended(ce->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ce->dev);
+ return err;
+}
+
+static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
+{
+ pm_runtime_disable(ce->dev);
+}
+
+static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
+{
+ unsigned long cr;
+ int err, i;
+
+ for (i = 0; i < CE_MAX_CLOCKS; i++) {
+ if (!ce->variant->ce_clks[i].name)
+ continue;
+ ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name);
+ if (IS_ERR(ce->ceclks[i])) {
+ err = PTR_ERR(ce->ceclks[i]);
+ dev_err(ce->dev, "Cannot get %s CE clock err=%d\n",
+ ce->variant->ce_clks[i].name, err);
+ return err;
+ }
+ cr = clk_get_rate(ce->ceclks[i]);
+ if (!cr)
+ return -EINVAL;
+ if (ce->variant->ce_clks[i].freq > 0 &&
+ cr != ce->variant->ce_clks[i].freq) {
+ dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
+ ce->variant->ce_clks[i].name,
+ ce->variant->ce_clks[i].freq,
+ ce->variant->ce_clks[i].freq / 1000000,
+ cr, cr / 1000000);
+ err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq);
+ if (err)
+ dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n",
+ ce->variant->ce_clks[i].name,
+ ce->variant->ce_clks[i].freq);
+ }
+ if (ce->variant->ce_clks[i].max_freq > 0 &&
+ cr > ce->variant->ce_clks[i].max_freq)
+ dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
+ ce->variant->ce_clks[i].name, cr,
+ ce->variant->ce_clks[i].max_freq);
+ }
+ return 0;
+}
+
+static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
+{
+ int ce_method, err, id, i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ce = ce;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ce_algs[i].ce_algo_id;
+ ce_method = ce->variant->alg_cipher[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_dbg(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ id = ce_algs[i].ce_blockmode;
+ ce_method = ce->variant->op_mode[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ce->dev, "ERROR: Fail to register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ default:
+ ce_algs[i].ce = NULL;
+ dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int sun8i_ce_probe(struct platform_device *pdev)
+{
+ struct sun8i_ce_dev *ce;
+ int err, irq;
+ u32 v;
+
+ ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ ce->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ce);
+
+ ce->variant = of_device_get_match_data(&pdev->dev);
+ if (!ce->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ if (IS_ERR(ce->base))
+ return PTR_ERR(ce->base);
+
+ err = sun8i_ce_get_clks(ce);
+ if (err)
+ return err;
+
+ /* Get Non Secure IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ce->dev, "Cannot get CryptoEngine Non-secure IRQ\n");
+ return irq;
+ }
+
+ ce->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ce->reset)) {
+ if (PTR_ERR(ce->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ce->reset);
+ dev_err(&pdev->dev, "No reset control found\n");
+ return PTR_ERR(ce->reset);
+ }
+
+ mutex_init(&ce->mlock);
+
+ err = sun8i_ce_allocate_chanlist(ce);
+ if (err)
+ return err;
+
+ err = sun8i_ce_pm_init(ce);
+ if (err)
+ goto error_pm;
+
+ err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0,
+ "sun8i-ce-ns", ce);
+ if (err) {
+ dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
+ goto error_irq;
+ }
+
+ err = sun8i_ce_register_algs(ce);
+ if (err)
+ goto error_alg;
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0)
+ goto error_alg;
+
+ v = readl(ce->base + CE_CTR);
+ v >>= CE_DIE_ID_SHIFT;
+ v &= CE_DIE_ID_MASK;
+ dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v);
+
+ pm_runtime_put_sync(ce->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ /* Ignore error of debugfs */
+ ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+ ce->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ce->dbgfs_dir, ce,
+ &sun8i_ce_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ sun8i_ce_unregister_algs(ce);
+error_irq:
+ sun8i_ce_pm_exit(ce);
+error_pm:
+ sun8i_ce_free_chanlist(ce, MAXFLOW);
+ return err;
+}
+
+static int sun8i_ce_remove(struct platform_device *pdev)
+{
+ struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
+
+ sun8i_ce_unregister_algs(ce);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ debugfs_remove_recursive(ce->dbgfs_dir);
+#endif
+
+ sun8i_ce_free_chanlist(ce, MAXFLOW);
+
+ sun8i_ce_pm_exit(ce);
+ return 0;
+}
+
+static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-h3-crypto",
+ .data = &ce_h3_variant },
+ { .compatible = "allwinner,sun8i-r40-crypto",
+ .data = &ce_r40_variant },
+ { .compatible = "allwinner,sun50i-a64-crypto",
+ .data = &ce_a64_variant },
+ { .compatible = "allwinner,sun50i-h5-crypto",
+ .data = &ce_h5_variant },
+ { .compatible = "allwinner,sun50i-h6-crypto",
+ .data = &ce_h6_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
+
+static struct platform_driver sun8i_ce_driver = {
+ .probe = sun8i_ce_probe,
+ .remove = sun8i_ce_remove,
+ .driver = {
+ .name = "sun8i-ce",
+ .pm = &sun8i_ce_pm_ops,
+ .of_match_table = sun8i_ce_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ce_driver);
+
+MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
new file mode 100644
index 000000000000..43db49ceafe4
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ce.h - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6 SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+/* CE Registers */
+#define CE_TDQ 0x00
+#define CE_CTR 0x04
+#define CE_ICR 0x08
+#define CE_ISR 0x0C
+#define CE_TLR 0x10
+#define CE_TSR 0x14
+#define CE_ESR 0x18
+#define CE_CSSGR 0x1C
+#define CE_CDSGR 0x20
+#define CE_CSAR 0x24
+#define CE_CDAR 0x28
+#define CE_TPR 0x2C
+
+/* Used in struct ce_task */
+/* ce_task common */
+#define CE_ENCRYPTION 0
+#define CE_DECRYPTION BIT(8)
+
+#define CE_COMM_INT BIT(31)
+
+/* ce_task symmetric */
+#define CE_AES_128BITS 0
+#define CE_AES_192BITS 1
+#define CE_AES_256BITS 2
+
+#define CE_OP_ECB 0
+#define CE_OP_CBC (1 << 8)
+
+#define CE_ALG_AES 0
+#define CE_ALG_DES 1
+#define CE_ALG_3DES 2
+
+/* Used in ce_variant */
+#define CE_ID_NOTSUPP 0xFF
+
+#define CE_ID_CIPHER_AES 0
+#define CE_ID_CIPHER_DES 1
+#define CE_ID_CIPHER_DES3 2
+#define CE_ID_CIPHER_MAX 3
+
+#define CE_ID_OP_ECB 0
+#define CE_ID_OP_CBC 1
+#define CE_ID_OP_MAX 2
+
+/* Used in CE registers */
+#define CE_ERR_ALGO_NOTSUP BIT(0)
+#define CE_ERR_DATALEN BIT(1)
+#define CE_ERR_KEYSRAM BIT(2)
+#define CE_ERR_ADDR_INVALID BIT(5)
+#define CE_ERR_KEYLADDER BIT(6)
+
+#define CE_DIE_ID_SHIFT 16
+#define CE_DIE_ID_MASK 0x07
+
+#define MAX_SG 8
+
+#define CE_MAX_CLOCKS 3
+
+#define MAXFLOW 4
+
+/*
+ * struct ce_clock - Describe clocks used by sun8i-ce
+ * @name: Name of clock needed by this variant
+ * @freq: Frequency to set for each clock
+ * @max_freq: Maximum frequency for each clock (generally given by datasheet)
+ */
+struct ce_clock {
+ const char *name;
+ unsigned long freq;
+ unsigned long max_freq;
+};
+
+/*
+ * struct ce_variant - Describe CE capability for each variant hardware
+ * @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the
+ * coresponding CE_ALG_XXX value
+ * @op_mode: list of supported block modes
+ * @has_t_dlen_in_bytes: Does the request size for cipher is in
+ * bytes or words
+ * @ce_clks: list of clocks needed by this variant
+ */
+struct ce_variant {
+ char alg_cipher[CE_ID_CIPHER_MAX];
+ u32 op_mode[CE_ID_OP_MAX];
+ bool has_t_dlen_in_bytes;
+ struct ce_clock ce_clks[CE_MAX_CLOCKS];
+};
+
+struct sginfo {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+/*
+ * struct ce_task - CE Task descriptor
+ * The structure of this descriptor could be found in the datasheet
+ */
+struct ce_task {
+ __le32 t_id;
+ __le32 t_common_ctl;
+ __le32 t_sym_ctl;
+ __le32 t_asym_ctl;
+ __le32 t_key;
+ __le32 t_iv;
+ __le32 t_ctr;
+ __le32 t_dlen;
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ __le32 next;
+ __le32 reserved[3];
+} __packed __aligned(8);
+
+/*
+ * struct sun8i_ce_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @bounce_iv: buffer which contain the IV
+ * @ivlen: size of bounce_iv
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @method: current method for flow
+ * @op_dir: direction (encrypt vs decrypt) of this flow
+ * @op_mode: op_mode for this flow
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ce_flow {
+ struct crypto_engine *engine;
+ void *bounce_iv;
+ unsigned int ivlen;
+ unsigned int keylen;
+ struct completion complete;
+ int status;
+ u32 method;
+ u32 op_dir;
+ u32 op_mode;
+ dma_addr_t t_phy;
+ int timeout;
+ struct ce_task *tl;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ce_dev - main container for all this driver information
+ * @base: base address of CE
+ * @ceclks: clocks used by CE
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ce_dev {
+ void __iomem *base;
+ struct clk *ceclks[CE_MAX_CLOCKS];
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ce_flow *chanlist;
+ atomic_t flow;
+ const struct ce_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ce_dev *ce;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ce_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ce_algo_id: the CE_ID for this template
+ * @ce_blockmode: the type of block operation CE_ID
+ * @ce: pointer to the sun8i_ce_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ce_alg_template {
+ u32 type;
+ u32 ce_algo_id;
+ u32 ce_blockmode;
+ struct sun8i_ce_dev *ce;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_skdecrypt(struct skcipher_request *areq);
+int sun8i_ce_skencrypt(struct skcipher_request *areq);
+
+int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
+
+int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile
new file mode 100644
index 000000000000..add7b0543fd5
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o
+sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
new file mode 100644
index 000000000000..f222979a5623
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-cipher.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include "sun8i-ss.h"
+
+static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
+{
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *sg;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16)
+ return true;
+
+ if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
+ return true;
+
+ sg = areq->src;
+ while (sg) {
+ if ((sg->length % 16) != 0)
+ return true;
+ if ((sg_dma_len(sg) % 16) != 0)
+ return true;
+ if (!IS_ALIGNED(sg->offset, 16))
+ return true;
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if ((sg->length % 16) != 0)
+ return true;
+ if ((sg_dma_len(sg) % 16) != 0)
+ return true;
+ if (!IS_ALIGNED(sg->offset, 16))
+ return true;
+ sg = sg_next(sg);
+ }
+
+ /* SS need same numbers of SG (with same length) for source and destination */
+ in_sg = areq->src;
+ out_sg = areq->dst;
+ while (in_sg && out_sg) {
+ if (in_sg->length != out_sg->length)
+ return true;
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ }
+ if (in_sg || out_sg)
+ return true;
+ return false;
+}
+
+static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int sun8i_ss_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+ struct scatterlist *sg;
+ unsigned int todo, len, offset, ivsize;
+ void *backup_iv = NULL;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+ int i;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+
+ dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+#endif
+
+ rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode];
+ rctx->method = ss->variant->alg_cipher[algt->ss_algo_id];
+ rctx->keylen = op->keylen;
+
+ rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, rctx->p_key)) {
+ dev_err(ss->dev, "Cannot DMA MAP KEY\n");
+ err = -EFAULT;
+ goto theend;
+ }
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ rctx->ivlen = ivsize;
+ rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!rctx->biv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ if (rctx->op_dir & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend_key;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ memcpy(rctx->biv, areq->iv, ivsize);
+ rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, rctx->p_iv)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -ENOMEM;
+ goto theend_iv;
+ }
+ }
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > 8) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->src;
+ while (i < nr_sgs && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgs_next;
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgs_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->dst;
+ while (i < nr_sgd && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgd_next;
+ rctx->t_dst[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_dst[i].len = todo / 4;
+ dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgd_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+theend_iv:
+ if (rctx->p_iv)
+ dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
+ DMA_TO_DEVICE);
+
+ if (areq->iv && ivsize > 0) {
+ if (rctx->biv) {
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ memzero_explicit(backup_iv, ivsize);
+ kzfree(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(rctx->biv);
+ }
+ }
+
+theend_key:
+ dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE);
+
+theend:
+
+ return err;
+}
+
+static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sun8i_ss_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sun8i_ss_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
+ rctx->op_dir = SS_DECRYPTION;
+
+ if (sun8i_ss_need_fallback(areq))
+ return sun8i_ss_cipher_fallback(areq);
+
+ e = sun8i_ss_get_engine_number(op->ss);
+ engine = op->ss->flows[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ss_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
+ rctx->op_dir = SS_ENCRYPTION;
+
+ if (sun8i_ss_need_fallback(areq))
+ return sun8i_ss_cipher_fallback(areq);
+
+ e = sun8i_ss_get_engine_number(op->ss);
+ engine = op->ss->flows[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sun8i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ op->ss = algt->ss;
+
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ dev_info(op->ss->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+
+ op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0) {
+ dev_err(op->ss->dev, "pm error %d\n", err);
+ goto error_pm;
+ }
+
+ return 0;
+error_pm:
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync(op->ss->dev);
+}
+
+int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
+
+int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+
+ if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+ dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
new file mode 100644
index 000000000000..90997cc509b8
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-core.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SecuritySystem
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sun8i-ss.h"
+
+static const struct ss_variant ss_a80_variant = {
+ .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
+ },
+ .op_mode = { SS_OP_ECB, SS_OP_CBC,
+ },
+ .ss_clks = {
+ { "bus", 0, 300 * 1000 * 1000 },
+ { "mod", 0, 300 * 1000 * 1000 },
+ }
+};
+
+static const struct ss_variant ss_a83t_variant = {
+ .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
+ },
+ .op_mode = { SS_OP_ECB, SS_OP_CBC,
+ },
+ .ss_clks = {
+ { "bus", 0, 300 * 1000 * 1000 },
+ { "mod", 0, 300 * 1000 * 1000 },
+ }
+};
+
+/*
+ * sun8i_ss_get_engine_number() get the next channel slot
+ * This is a simple round-robin way of getting the next channel
+ */
+int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss)
+{
+ return atomic_inc_return(&ss->flow) % MAXFLOW;
+}
+
+int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx,
+ const char *name)
+{
+ int flow = rctx->flow;
+ u32 v = 1;
+ int i;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[flow].stat_req++;
+#endif
+
+ /* choose between stream0/stream1 */
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ v |= rctx->op_mode;
+ v |= rctx->method;
+
+ if (rctx->op_dir)
+ v |= SS_DECRYPTION;
+
+ switch (rctx->keylen) {
+ case 128 / 8:
+ v |= SS_AES_128BITS << 7;
+ break;
+ case 192 / 8:
+ v |= SS_AES_192BITS << 7;
+ break;
+ case 256 / 8:
+ v |= SS_AES_256BITS << 7;
+ break;
+ }
+
+ for (i = 0; i < MAX_SG; i++) {
+ if (!rctx->t_dst[i].addr)
+ break;
+
+ mutex_lock(&ss->mlock);
+ writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
+
+ if (i == 0) {
+ if (rctx->p_iv)
+ writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
+ } else {
+ if (rctx->biv) {
+ if (rctx->op_dir == SS_ENCRYPTION)
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ else
+ writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ }
+ }
+
+ dev_dbg(ss->dev,
+ "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n",
+ i, flow, name, v,
+ rctx->t_src[i].len, rctx->t_dst[i].len,
+ rctx->method, rctx->op_mode,
+ rctx->op_dir, rctx->t_src[i].len);
+
+ writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
+ writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
+ writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+ mutex_unlock(&ss->mlock);
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(2000));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t ss_irq_handler(int irq, void *data)
+{
+ struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data;
+ int flow = 0;
+ u32 p;
+
+ p = readl(ss->base + SS_INT_STA_REG);
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (p & (BIT(flow))) {
+ writel(BIT(flow), ss->base + SS_INT_STA_REG);
+ ss->flows[flow].status = 1;
+ complete(&ss->flows[flow].complete);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sun8i_ss_alg_template ss_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_AES,
+ .ss_blockmode = SS_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun8i_ss_aes_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_AES,
+ .ss_blockmode = SS_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sun8i_ss_aes_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_DES3,
+ .ss_blockmode = SS_ID_OP_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun8i_ss_des3_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .ss_algo_id = SS_ID_CIPHER_DES3,
+ .ss_blockmode = SS_ID_OP_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun8i-ss",
+ .cra_priority = 400,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sun8i_ss_cipher_init,
+ .cra_exit = sun8i_ss_cipher_exit,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = sun8i_ss_des3_setkey,
+ .encrypt = sun8i_ss_skencrypt,
+ .decrypt = sun8i_ss_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct sun8i_ss_dev *ss = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ss_algs[i].alg.skcipher.base.cra_driver_name,
+ ss_algs[i].alg.skcipher.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun8i_ss_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun8i_ss_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun8i_ss_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(ss->flows[i].engine);
+ i--;
+ }
+}
+
+/*
+ * Allocate the flow list structure
+ */
+static int allocate_flows(struct sun8i_ss_dev *ss)
+{
+ int i, err;
+
+ ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
+ GFP_KERNEL);
+ if (!ss->flows)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&ss->flows[i].complete);
+
+ ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
+ if (!ss->flows[i].engine) {
+ dev_err(ss->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(ss->flows[i].engine);
+ if (err) {
+ dev_err(ss->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ sun8i_ss_free_flows(ss, i);
+ return err;
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sun8i_ss_pm_suspend(struct device *dev)
+{
+ struct sun8i_ss_dev *ss = dev_get_drvdata(dev);
+ int i;
+
+ reset_control_assert(ss->reset);
+ for (i = 0; i < SS_MAX_CLOCKS; i++)
+ clk_disable_unprepare(ss->ssclks[i]);
+ return 0;
+}
+
+static int sun8i_ss_pm_resume(struct device *dev)
+{
+ struct sun8i_ss_dev *ss = dev_get_drvdata(dev);
+ int err, i;
+
+ for (i = 0; i < SS_MAX_CLOCKS; i++) {
+ if (!ss->variant->ss_clks[i].name)
+ continue;
+ err = clk_prepare_enable(ss->ssclks[i]);
+ if (err) {
+ dev_err(ss->dev, "Cannot prepare_enable %s\n",
+ ss->variant->ss_clks[i].name);
+ goto error;
+ }
+ }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+ /* enable interrupts for all flows */
+ writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
+
+ return 0;
+error:
+ sun8i_ss_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sun8i_ss_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL)
+};
+
+static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ss->dev);
+ pm_runtime_set_autosuspend_delay(ss->dev, 2000);
+
+ err = pm_runtime_set_suspended(ss->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ss->dev);
+ return err;
+}
+
+static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss)
+{
+ pm_runtime_disable(ss->dev);
+}
+
+static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
+{
+ int ss_method, err, id, i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ ss_algs[i].ss = ss;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ id = ss_algs[i].ss_algo_id;
+ ss_method = ss->variant->alg_cipher[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ id = ss_algs[i].ss_blockmode;
+ ss_method = ss->variant->op_mode[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ dev_info(ss->dev, "DEBUG: Register %s\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].ss = NULL;
+ return err;
+ }
+ break;
+ default:
+ ss_algs[i].ss = NULL;
+ dev_err(ss->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss)
+{
+ unsigned long cr;
+ int err, i;
+
+ for (i = 0; i < SS_MAX_CLOCKS; i++) {
+ if (!ss->variant->ss_clks[i].name)
+ continue;
+ ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name);
+ if (IS_ERR(ss->ssclks[i])) {
+ err = PTR_ERR(ss->ssclks[i]);
+ dev_err(ss->dev, "Cannot get %s SS clock err=%d\n",
+ ss->variant->ss_clks[i].name, err);
+ return err;
+ }
+ cr = clk_get_rate(ss->ssclks[i]);
+ if (!cr)
+ return -EINVAL;
+ if (ss->variant->ss_clks[i].freq > 0 &&
+ cr != ss->variant->ss_clks[i].freq) {
+ dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n",
+ ss->variant->ss_clks[i].name,
+ ss->variant->ss_clks[i].freq,
+ ss->variant->ss_clks[i].freq / 1000000,
+ cr, cr / 1000000);
+ err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq);
+ if (err)
+ dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n",
+ ss->variant->ss_clks[i].name,
+ ss->variant->ss_clks[i].freq);
+ }
+ if (ss->variant->ss_clks[i].max_freq > 0 &&
+ cr > ss->variant->ss_clks[i].max_freq)
+ dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)",
+ ss->variant->ss_clks[i].name, cr,
+ ss->variant->ss_clks[i].max_freq);
+ }
+ return 0;
+}
+
+static int sun8i_ss_probe(struct platform_device *pdev)
+{
+ struct sun8i_ss_dev *ss;
+ int err, irq;
+ u32 v;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ ss->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ss);
+
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Crypto Engine variant\n");
+ return -EINVAL;
+ }
+
+ ss->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ss->base))
+ return PTR_ERR(ss->base);
+
+ err = sun8i_ss_get_clks(ss);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
+ return irq;
+ }
+
+ ss->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ss->reset)) {
+ if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ss->reset);
+ dev_err(&pdev->dev, "No reset control found\n");
+ return PTR_ERR(ss->reset);
+ }
+
+ mutex_init(&ss->mlock);
+
+ err = allocate_flows(ss);
+ if (err)
+ return err;
+
+ err = sun8i_ss_pm_init(ss);
+ if (err)
+ goto error_pm;
+
+ err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss);
+ if (err) {
+ dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err);
+ goto error_irq;
+ }
+
+ err = sun8i_ss_register_algs(ss);
+ if (err)
+ goto error_alg;
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0)
+ goto error_alg;
+
+ v = readl(ss->base + SS_CTL_REG);
+ v >>= SS_DIE_ID_SHIFT;
+ v &= SS_DIE_ID_MASK;
+ dev_info(&pdev->dev, "Security System Die ID %x\n", v);
+
+ pm_runtime_put_sync(ss->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ /* Ignore error of debugfs */
+ ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
+ ss->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ss->dbgfs_dir, ss,
+ &sun8i_ss_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ sun8i_ss_unregister_algs(ss);
+error_irq:
+ sun8i_ss_pm_exit(ss);
+error_pm:
+ sun8i_ss_free_flows(ss, MAXFLOW);
+ return err;
+}
+
+static int sun8i_ss_remove(struct platform_device *pdev)
+{
+ struct sun8i_ss_dev *ss = platform_get_drvdata(pdev);
+
+ sun8i_ss_unregister_algs(ss);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ debugfs_remove_recursive(ss->dbgfs_dir);
+#endif
+
+ sun8i_ss_free_flows(ss, MAXFLOW);
+
+ sun8i_ss_pm_exit(ss);
+
+ return 0;
+}
+
+static const struct of_device_id sun8i_ss_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a83t-crypto",
+ .data = &ss_a83t_variant },
+ { .compatible = "allwinner,sun9i-a80-crypto",
+ .data = &ss_a80_variant },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
+
+static struct platform_driver sun8i_ss_driver = {
+ .probe = sun8i_ss_probe,
+ .remove = sun8i_ss_remove,
+ .driver = {
+ .name = "sun8i-ss",
+ .pm = &sun8i_ss_pm_ops,
+ .of_match_table = sun8i_ss_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun8i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
new file mode 100644
index 000000000000..b5f855f3de10
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sun8i-ss.h - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+
+#define SS_ENCRYPTION 0
+#define SS_DECRYPTION BIT(6)
+
+#define SS_ALG_AES 0
+#define SS_ALG_DES (1 << 2)
+#define SS_ALG_3DES (2 << 2)
+
+#define SS_CTL_REG 0x00
+#define SS_INT_CTL_REG 0x04
+#define SS_INT_STA_REG 0x08
+#define SS_KEY_ADR_REG 0x10
+#define SS_IV_ADR_REG 0x18
+#define SS_SRC_ADR_REG 0x20
+#define SS_DST_ADR_REG 0x28
+#define SS_LEN_ADR_REG 0x30
+
+#define SS_ID_NOTSUPP 0xFF
+
+#define SS_ID_CIPHER_AES 0
+#define SS_ID_CIPHER_DES 1
+#define SS_ID_CIPHER_DES3 2
+#define SS_ID_CIPHER_MAX 3
+
+#define SS_ID_OP_ECB 0
+#define SS_ID_OP_CBC 1
+#define SS_ID_OP_MAX 2
+
+#define SS_AES_128BITS 0
+#define SS_AES_192BITS 1
+#define SS_AES_256BITS 2
+
+#define SS_OP_ECB 0
+#define SS_OP_CBC (1 << 13)
+
+#define SS_FLOW0 BIT(30)
+#define SS_FLOW1 BIT(31)
+
+#define MAX_SG 8
+
+#define MAXFLOW 2
+
+#define SS_MAX_CLOCKS 2
+
+#define SS_DIE_ID_SHIFT 20
+#define SS_DIE_ID_MASK 0x07
+
+/*
+ * struct ss_clock - Describe clocks used by sun8i-ss
+ * @name: Name of clock needed by this variant
+ * @freq: Frequency to set for each clock
+ * @max_freq: Maximum frequency for each clock
+ */
+struct ss_clock {
+ const char *name;
+ unsigned long freq;
+ unsigned long max_freq;
+};
+
+/*
+ * struct ss_variant - Describe SS capability for each variant hardware
+ * @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the
+ * coresponding SS_ALG_XXX value
+ * @op_mode: list of supported block modes
+ * @ss_clks! list of clock needed by this variant
+ */
+struct ss_variant {
+ char alg_cipher[SS_ID_CIPHER_MAX];
+ u32 op_mode[SS_ID_OP_MAX];
+ struct ss_clock ss_clks[SS_MAX_CLOCKS];
+};
+
+struct sginfo {
+ u32 addr;
+ u32 len;
+};
+
+/*
+ * struct sun8i_ss_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @stat_req: number of request done by this flow
+ */
+struct sun8i_ss_flow {
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct sun8i_ss_dev - main container for all this driver information
+ * @base: base address of SS
+ * @ssclks: clocks used by SS
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @mlock: Control access to device registers
+ * @flows: array of all flow
+ * @flow: flow to use in next request
+ * @variant: pointer to variant specific data
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sun8i_ss_dev {
+ void __iomem *base;
+ struct clk *ssclks[SS_MAX_CLOCKS];
+ struct reset_control *reset;
+ struct device *dev;
+ struct mutex mlock;
+ struct sun8i_ss_flow *flows;
+ atomic_t flow;
+ const struct ss_variant *variant;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+};
+
+/*
+ * struct sun8i_cipher_req_ctx - context for a skcipher request
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @p_key: DMA address of the key
+ * @p_iv: DMA address of the IV
+ * @method: current algorithm for this request
+ * @op_mode: op_mode for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @ivlen: size of biv
+ * @keylen: keylen for this request
+ * @biv: buffer which contain the IV
+ */
+struct sun8i_cipher_req_ctx {
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ u32 p_key;
+ u32 p_iv;
+ u32 method;
+ u32 op_mode;
+ u32 op_dir;
+ int flow;
+ unsigned int ivlen;
+ unsigned int keylen;
+ void *biv;
+};
+
+/*
+ * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ss: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sun8i_ss_dev *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ss_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @ss_algo_id: the SS_ID for this template
+ * @ss_blockmode: the type of block operation SS_ID
+ * @ss: pointer to the sun8i_ss_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct sun8i_ss_alg_template {
+ u32 type;
+ u32 ss_algo_id;
+ u32 ss_blockmode;
+ struct sun8i_ss_dev *ss;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ss_skdecrypt(struct skcipher_request *areq);
+int sun8i_ss_skencrypt(struct skcipher_request *areq);
+
+int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss);
+
+int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index de5e9352e920..7d6b695c4ab3 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
- if (!dev->scatter_buffer_va) {
- dma_free_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- dev->sdr, dev->sdr_pa);
+ if (!dev->scatter_buffer_va)
return -ENOMEM;
- }
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
new file mode 100644
index 000000000000..b90850d18965
--- /dev/null
+++ b/drivers/crypto/amlogic/Kconfig
@@ -0,0 +1,24 @@
+config CRYPTO_DEV_AMLOGIC_GXL
+ tristate "Support for amlogic cryptographic offloader"
+ default y if ARCH_MESON
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_AES
+ help
+ Select y here to have support for the cryptographic offloader
+ available on Amlogic GXL SoC.
+ This hardware handles AES ciphers in ECB/CBC mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amlogic-gxl-crypto.
+
+config CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ bool "Enable amlogic stats"
+ depends on CRYPTO_DEV_AMLOGIC_GXL
+ depends on DEBUG_FS
+ help
+ Say y to enable amlogic-crypto debug stats.
+ This will create /sys/kernel/debug/gxl-crypto/stats for displaying
+ the number of requests per flow and per algorithm.
diff --git a/drivers/crypto/amlogic/Makefile b/drivers/crypto/amlogic/Makefile
new file mode 100644
index 000000000000..39057e62c13e
--- /dev/null
+++ b/drivers/crypto/amlogic/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic-gxl-crypto.o
+amlogic-gxl-crypto-y := amlogic-gxl-core.o amlogic-gxl-cipher.o
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
new file mode 100644
index 000000000000..e589015aac1c
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
+ *
+ * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits keysize in
+ * CBC and ECB mode.
+ */
+
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <crypto/internal/skcipher.h>
+#include "amlogic-gxl.h"
+
+static int get_engine_number(struct meson_dev *mc)
+{
+ return atomic_inc_return(&mc->flow) % MAXFLOW;
+}
+
+static bool meson_cipher_need_fallback(struct skcipher_request *areq)
+{
+ struct scatterlist *src_sg = areq->src;
+ struct scatterlist *dst_sg = areq->dst;
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if (sg_nents(src_sg) != sg_nents(dst_sg))
+ return true;
+
+ /* KEY/IV descriptors use 3 desc */
+ if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
+ return true;
+
+ while (src_sg && dst_sg) {
+ if ((src_sg->length % 16) != 0)
+ return true;
+ if ((dst_sg->length % 16) != 0)
+ return true;
+ if (src_sg->length != dst_sg->length)
+ return true;
+ if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
+ return true;
+ if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
+ return true;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ return false;
+}
+
+static int meson_cipher_do_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct meson_alg_template *algt;
+#endif
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt->stat_fb++;
+#endif
+ skcipher_request_set_sync_tfm(req, op->fallback_tfm);
+ skcipher_request_set_callback(req, areq->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir == MESON_DECRYPT)
+ err = crypto_skcipher_decrypt(req);
+ else
+ err = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return err;
+}
+
+static int meson_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct meson_dev *mc = op->mc;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct meson_alg_template *algt;
+ int flow = rctx->flow;
+ unsigned int todo, eat, len;
+ struct scatterlist *src_sg = areq->src;
+ struct scatterlist *dst_sg = areq->dst;
+ struct meson_desc *desc;
+ int nr_sgs, nr_sgd;
+ int i, err = 0;
+ unsigned int keyivlen, ivsize, offset, tloffset;
+ dma_addr_t phykeyiv;
+ void *backup_iv = NULL, *bkeyiv;
+ __le32 v;
+
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+
+ dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, crypto_skcipher_ivsize(tfm),
+ op->keylen, flow);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ algt->stat_req++;
+ mc->chanlist[flow].stat_req++;
+#endif
+
+ /*
+ * The hardware expect a list of meson_desc structures.
+ * The 2 first structures store key
+ * The third stores IV
+ */
+ bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
+ if (!bkeyiv)
+ return -ENOMEM;
+
+ memcpy(bkeyiv, op->key, op->keylen);
+ keyivlen = op->keylen;
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && ivsize > 0) {
+ if (ivsize > areq->cryptlen) {
+ dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
+ err = -EINVAL;
+ goto theend;
+ }
+ memcpy(bkeyiv + 32, areq->iv, ivsize);
+ keyivlen = 48;
+ if (rctx->op_dir == MESON_DECRYPT) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv) {
+ err = -ENOMEM;
+ goto theend;
+ }
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(backup_iv, areq->src, offset,
+ ivsize, 0);
+ }
+ }
+ if (keyivlen == 24)
+ keyivlen = 32;
+
+ phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(mc->dev, phykeyiv);
+ if (err) {
+ dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
+ goto theend;
+ }
+
+ tloffset = 0;
+ eat = 0;
+ i = 0;
+ while (keyivlen > eat) {
+ desc = &mc->chanlist[flow].tl[tloffset];
+ memset(desc, 0, sizeof(struct meson_desc));
+ todo = min(keyivlen - eat, 16u);
+ desc->t_src = cpu_to_le32(phykeyiv + i * 16);
+ desc->t_dst = cpu_to_le32(i * 16);
+ v = (MODE_KEY << 20) | DESC_OWN | 16;
+ desc->t_status = cpu_to_le32(v);
+
+ eat += todo;
+ i++;
+ tloffset++;
+ }
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs < 0) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend;
+ }
+ }
+
+ src_sg = areq->src;
+ dst_sg = areq->dst;
+ len = areq->cryptlen;
+ while (src_sg) {
+ desc = &mc->chanlist[flow].tl[tloffset];
+ memset(desc, 0, sizeof(struct meson_desc));
+
+ desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
+ desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
+ todo = min(len, sg_dma_len(src_sg));
+ v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
+ if (rctx->op_dir)
+ v |= DESC_ENCRYPTION;
+ len -= todo;
+
+ if (!sg_next(src_sg))
+ v |= DESC_LAST;
+ desc->t_status = cpu_to_le32(v);
+ tloffset++;
+ src_sg = sg_next(src_sg);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ reinit_completion(&mc->chanlist[flow].complete);
+ mc->chanlist[flow].status = 0;
+ writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
+ wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
+ msecs_to_jiffies(500));
+ if (mc->chanlist[flow].status == 0) {
+ dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
+ err = -EINVAL;
+ }
+
+ dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
+
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ if (rctx->op_dir == MESON_DECRYPT) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst,
+ areq->cryptlen - ivsize,
+ ivsize, 0);
+ }
+ }
+theend:
+ kzfree(bkeyiv);
+ kzfree(backup_iv);
+
+ return err;
+}
+
+static int meson_handle_cipher_request(struct crypto_engine *engine,
+ void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = meson_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int meson_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = MESON_DECRYPT;
+ if (meson_cipher_need_fallback(areq))
+ return meson_cipher_do_fallback(areq);
+ e = get_engine_number(op->mc);
+ engine = op->mc->chanlist[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int meson_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+ int e;
+
+ rctx->op_dir = MESON_ENCRYPT;
+ if (meson_cipher_need_fallback(areq))
+ return meson_cipher_do_fallback(areq);
+ e = get_engine_number(op->mc);
+ engine = op->mc->chanlist[e].engine;
+ rctx->flow = e;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int meson_cipher_init(struct crypto_tfm *tfm)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct meson_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+
+ memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ op->mc = algt->mc;
+
+ sktfm->reqsize = sizeof(struct meson_cipher_req_ctx);
+
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ op->enginectx.op.do_one_request = meson_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+void meson_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
+int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct meson_dev *mc = op->mc;
+
+ switch (keylen) {
+ case 128 / 8:
+ op->keymode = MODE_AES_128;
+ break;
+ case 192 / 8:
+ op->keymode = MODE_AES_192;
+ break;
+ case 256 / 8:
+ op->keymode = MODE_AES_256;
+ break;
+ default:
+ dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ if (op->key) {
+ memzero_explicit(op->key, op->keylen);
+ kfree(op->key);
+ }
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
new file mode 100644
index 000000000000..fa05fce1c0de
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * amlgoic-core.c - hardware cryptographic offloader for Amlogic GXL SoC
+ *
+ * Copyright (C) 2018-2019 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * Core file which registers crypto algorithms supported by the hardware.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/dma-mapping.h>
+
+#include "amlogic-gxl.h"
+
+static irqreturn_t meson_irq_handler(int irq, void *data)
+{
+ struct meson_dev *mc = (struct meson_dev *)data;
+ int flow;
+ u32 p;
+
+ for (flow = 0; flow < MAXFLOW; flow++) {
+ if (mc->irqs[flow] == irq) {
+ p = readl(mc->base + ((0x04 + flow) << 2));
+ if (p) {
+ writel_relaxed(0xF, mc->base + ((0x4 + flow) << 2));
+ mc->chanlist[flow].status = 1;
+ complete(&mc->chanlist[flow].complete);
+ return IRQ_HANDLED;
+ }
+ dev_err(mc->dev, "%s %d Got irq for flow %d but ctrl is empty\n", __func__, irq, flow);
+ }
+ }
+
+ dev_err(mc->dev, "%s %d from unknown irq\n", __func__, irq);
+ return IRQ_HANDLED;
+}
+
+static struct meson_alg_template mc_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .blockmode = MESON_OPMODE_CBC,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-gxl",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = meson_cipher_init,
+ .cra_exit = meson_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = meson_aes_setkey,
+ .encrypt = meson_skencrypt,
+ .decrypt = meson_skdecrypt,
+ }
+},
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .blockmode = MESON_OPMODE_ECB,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-gxl",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = meson_cipher_init,
+ .cra_exit = meson_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = meson_aes_setkey,
+ .encrypt = meson_skencrypt,
+ .decrypt = meson_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+static int meson_dbgfs_read(struct seq_file *seq, void *v)
+{
+ struct meson_dev *mc = seq->private;
+ int i;
+
+ for (i = 0; i < MAXFLOW; i++)
+ seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ mc_algs[i].alg.skcipher.base.cra_driver_name,
+ mc_algs[i].alg.skcipher.base.cra_name,
+ mc_algs[i].stat_req, mc_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int meson_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meson_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations meson_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = meson_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static void meson_free_chanlist(struct meson_dev *mc, int i)
+{
+ while (i >= 0) {
+ crypto_engine_exit(mc->chanlist[i].engine);
+ if (mc->chanlist[i].tl)
+ dma_free_coherent(mc->dev, sizeof(struct meson_desc) * MAXDESC,
+ mc->chanlist[i].tl,
+ mc->chanlist[i].t_phy);
+ i--;
+ }
+}
+
+/*
+ * Allocate the channel list structure
+ */
+static int meson_allocate_chanlist(struct meson_dev *mc)
+{
+ int i, err;
+
+ mc->chanlist = devm_kcalloc(mc->dev, MAXFLOW,
+ sizeof(struct meson_flow), GFP_KERNEL);
+ if (!mc->chanlist)
+ return -ENOMEM;
+
+ for (i = 0; i < MAXFLOW; i++) {
+ init_completion(&mc->chanlist[i].complete);
+
+ mc->chanlist[i].engine = crypto_engine_alloc_init(mc->dev, true);
+ if (!mc->chanlist[i].engine) {
+ dev_err(mc->dev, "Cannot allocate engine\n");
+ i--;
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ err = crypto_engine_start(mc->chanlist[i].engine);
+ if (err) {
+ dev_err(mc->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+ mc->chanlist[i].tl = dma_alloc_coherent(mc->dev,
+ sizeof(struct meson_desc) * MAXDESC,
+ &mc->chanlist[i].t_phy,
+ GFP_KERNEL);
+ if (!mc->chanlist[i].tl) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+ return 0;
+error_engine:
+ meson_free_chanlist(mc, i);
+ return err;
+}
+
+static int meson_register_algs(struct meson_dev *mc)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ mc_algs[i].mc = mc;
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(mc->dev, "Fail to register %s\n",
+ mc_algs[i].alg.skcipher.base.cra_name);
+ mc_algs[i].mc = NULL;
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void meson_unregister_algs(struct meson_dev *mc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
+ if (!mc_algs[i].mc)
+ continue;
+ switch (mc_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static int meson_crypto_probe(struct platform_device *pdev)
+{
+ struct meson_dev *mc;
+ int err, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ mc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mc);
+
+ mc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mc->base)) {
+ err = PTR_ERR(mc->base);
+ dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
+ return err;
+ }
+ mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
+ if (IS_ERR(mc->busclk)) {
+ err = PTR_ERR(mc->busclk);
+ dev_err(&pdev->dev, "Cannot get core clock err=%d\n", err);
+ return err;
+ }
+
+ mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
+ for (i = 0; i < MAXFLOW; i++) {
+ mc->irqs[i] = platform_get_irq(pdev, i);
+ if (mc->irqs[i] < 0) {
+ dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
+ return mc->irqs[i];
+ }
+
+ err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
+ "gxl-crypto", mc);
+ if (err < 0) {
+ dev_err(mc->dev, "Cannot request IRQ for flow %d\n", i);
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(mc->busclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+ return err;
+ }
+
+ err = meson_allocate_chanlist(mc);
+ if (err)
+ goto error_flow;
+
+ err = meson_register_algs(mc);
+ if (err)
+ goto error_alg;
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
+ debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
+#endif
+
+ return 0;
+error_alg:
+ meson_unregister_algs(mc);
+error_flow:
+ meson_free_chanlist(mc, MAXFLOW);
+ clk_disable_unprepare(mc->busclk);
+ return err;
+}
+
+static int meson_crypto_remove(struct platform_device *pdev)
+{
+ struct meson_dev *mc = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ debugfs_remove_recursive(mc->dbgfs_dir);
+#endif
+
+ meson_unregister_algs(mc);
+
+ meson_free_chanlist(mc, MAXFLOW);
+
+ clk_disable_unprepare(mc->busclk);
+ return 0;
+}
+
+static const struct of_device_id meson_crypto_of_match_table[] = {
+ { .compatible = "amlogic,gxl-crypto", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
+
+static struct platform_driver meson_crypto_driver = {
+ .probe = meson_crypto_probe,
+ .remove = meson_crypto_remove,
+ .driver = {
+ .name = "gxl-crypto",
+ .of_match_table = meson_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(meson_crypto_driver);
+
+MODULE_DESCRIPTION("Amlogic GXL cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
new file mode 100644
index 000000000000..b7f2de91ab76
--- /dev/null
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * amlogic.h - hardware cryptographic offloader for Amlogic SoC
+ *
+ * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
+ */
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/skcipher.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
+#define MODE_KEY 1
+#define MODE_AES_128 0x8
+#define MODE_AES_192 0x9
+#define MODE_AES_256 0xa
+
+#define MESON_DECRYPT 0
+#define MESON_ENCRYPT 1
+
+#define MESON_OPMODE_ECB 0
+#define MESON_OPMODE_CBC 1
+
+#define MAXFLOW 2
+
+#define MAXDESC 64
+
+#define DESC_LAST BIT(18)
+#define DESC_ENCRYPTION BIT(28)
+#define DESC_OWN BIT(31)
+
+/*
+ * struct meson_desc - Descriptor for DMA operations
+ * Note that without datasheet, some are unknown
+ * @t_status: Descriptor of the cipher operation (see description below)
+ * @t_src: Physical address of data to read
+ * @t_dst: Physical address of data to write
+ * t_status is segmented like this:
+ * @len: 0-16 length of data to operate
+ * @irq: 17 Ignored by hardware
+ * @eoc: 18 End means the descriptor is the last
+ * @loop: 19 Unknown
+ * @mode: 20-23 Type of algorithm (AES, SHA)
+ * @begin: 24 Unknown
+ * @end: 25 Unknown
+ * @op_mode: 26-27 Blockmode (CBC, ECB)
+ * @enc: 28 0 means decryption, 1 is for encryption
+ * @block: 29 Unknown
+ * @error: 30 Unknown
+ * @owner: 31 owner of the descriptor, 1 own by HW
+ */
+struct meson_desc {
+ __le32 t_status;
+ __le32 t_src;
+ __le32 t_dst;
+};
+
+/*
+ * struct meson_flow - Information used by each flow
+ * @engine: ptr to the crypto_engine for this flow
+ * @keylen: keylen for this flow operation
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @t_phy: Physical address of task
+ * @tl: pointer to the current ce_task for this flow
+ * @stat_req: number of request done by this flow
+ */
+struct meson_flow {
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+ unsigned int keylen;
+ dma_addr_t t_phy;
+ struct meson_desc *tl;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ unsigned long stat_req;
+#endif
+};
+
+/*
+ * struct meson_dev - main container for all this driver information
+ * @base: base address of amlogic-crypto
+ * @busclk: bus clock for amlogic-crypto
+ * @dev: the platform device
+ * @chanlist: array of all flow
+ * @flow: flow to use in next request
+ * @irqs: IRQ numbers for amlogic-crypto
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct meson_dev {
+ void __iomem *base;
+ struct clk *busclk;
+ struct device *dev;
+ struct meson_flow *chanlist;
+ atomic_t flow;
+ int *irqs;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ struct dentry *dbgfs_dir;
+#endif
+};
+
+/*
+ * struct meson_cipher_req_ctx - context for a skcipher request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ */
+struct meson_cipher_req_ctx {
+ u32 op_dir;
+ int flow;
+};
+
+/*
+ * struct meson_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @keymode: The keymode(type and size of key) associated with this TFM
+ * @mc: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct meson_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ u32 keymode;
+ struct meson_dev *mc;
+ struct crypto_sync_skcipher *fallback_tfm;
+};
+
+/*
+ * struct meson_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @blockmode: the type of block operation
+ * @mc: pointer to the meson_dev structure associated with this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: total of all data len done on this template
+ */
+struct meson_alg_template {
+ u32 type;
+ u32 blockmode;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+ struct meson_dev *mc;
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ unsigned long stat_req;
+ unsigned long stat_fb;
+#endif
+};
+
+int meson_enqueue(struct crypto_async_request *areq, u32 type);
+
+int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int meson_cipher_init(struct crypto_tfm *tfm);
+void meson_cipher_exit(struct crypto_tfm *tfm);
+int meson_skdecrypt(struct skcipher_request *areq);
+int meson_skencrypt(struct skcipher_request *areq);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 026f193556f9..91092504bc96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
@@ -117,7 +118,7 @@ struct atmel_aes_ctx {
struct atmel_aes_ctr_ctx {
struct atmel_aes_base_ctx base;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
@@ -129,13 +130,13 @@ struct atmel_aes_gcm_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
- u32 j0[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 j0[AES_BLOCK_SIZE / sizeof(u32)];
u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
- u32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
size_t textlen;
- const u32 *ghash_in;
- u32 *ghash_out;
+ const __be32 *ghash_in;
+ __be32 *ghash_out;
atmel_aes_fn_t ghash_resume;
};
@@ -145,7 +146,7 @@ struct atmel_aes_xts_ctx {
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
struct atmel_aes_authenc_ctx {
struct atmel_aes_base_ctx base;
struct atmel_sha_authenc_ctx *auth;
@@ -154,10 +155,10 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
- u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
+ u8 lastc[AES_BLOCK_SIZE];
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
struct atmel_aes_authenc_reqctx {
struct atmel_aes_reqctx base;
@@ -388,13 +389,13 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
}
static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
- u32 *value)
+ void *value)
{
atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
- const u32 *value)
+ const void *value)
{
atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
@@ -486,13 +487,36 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
return (dd->flags & AES_FLAGS_ENCRYPT);
}
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
#endif
+static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
+{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
#endif
@@ -500,26 +524,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead) {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher =
- crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-
- if (rctx->mode & AES_FLAGS_ENCRYPT) {
- scatterwalk_map_and_copy(req->info, req->dst,
- req->nbytes - ivsize, ivsize, 0);
- } else {
- if (req->src == req->dst) {
- memcpy(req->info, rctx->lastc, ivsize);
- } else {
- scatterwalk_map_and_copy(req->info, req->src,
- req->nbytes - ivsize, ivsize, 0);
- }
- }
- }
+ if (!dd->ctx->is_aead)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -530,7 +536,7 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
}
static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv, const u32 *key, int keylen)
+ const __be32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
@@ -561,7 +567,7 @@ static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
}
static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+ const __be32 *iv)
{
atmel_aes_write_ctrl_key(dd, use_dma, iv,
@@ -976,9 +982,9 @@ static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
static int atmel_aes_start(struct atmel_aes_dev *dd)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
- bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
dd->ctx->block_size != AES_BLOCK_SIZE);
int err;
@@ -988,12 +994,13 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- atmel_aes_write_ctrl(dd, use_dma, req->info);
+ atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
if (use_dma)
- return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_dma_start(dd, req->src, req->dst,
+ req->cryptlen,
atmel_aes_transfer_complete);
- return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
atmel_aes_transfer_complete);
}
@@ -1006,7 +1013,7 @@ atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
u32 ctr, blocks;
size_t datalen;
@@ -1014,11 +1021,11 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Check for transfer completion. */
ctx->offset += dd->total;
- if (ctx->offset >= req->nbytes)
+ if (ctx->offset >= req->cryptlen)
return atmel_aes_transfer_complete(dd);
/* Compute data length. */
- datalen = req->nbytes - ctx->offset;
+ datalen = req->cryptlen - ctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
if (dd->caps.has_ctr32) {
@@ -1071,8 +1078,8 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
int err;
atmel_aes_set_mode(dd, rctx);
@@ -1081,16 +1088,16 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
ctx->offset = 0;
dd->total = 0;
return atmel_aes_ctr_transfer(dd);
}
-static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_aes_reqctx *rctx;
struct atmel_aes_dev *dd;
@@ -1121,28 +1128,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
if (!dd)
return -ENODEV;
- rctx = ablkcipher_request_ctx(req);
+ rctx = skcipher_request_ctx(req);
rctx->mode = mode;
if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
- scatterwalk_map_and_copy(rctx->lastc, req->src,
- (req->nbytes - ivsize), ivsize, 0);
+ if (req->cryptlen >= ivsize)
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
}
return atmel_aes_handle_queue(dd, &req->base);
}
-static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1152,297 +1161,279 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_ECB);
}
-static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CBC);
}
-static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_OFB);
}
-static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB128);
}
-static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB64);
}
-static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB32);
}
-static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB16);
}
-static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CFB8);
}
-static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR);
}
-static int atmel_aes_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_start;
return 0;
}
-static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_ctr_start;
return 0;
}
-static struct crypto_alg aes_algs[] = {
-{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "atmel-ecb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ecb_encrypt,
- .decrypt = atmel_aes_ecb_decrypt,
- }
+static struct skcipher_alg aes_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "atmel-ecb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ecb_encrypt,
+ .decrypt = atmel_aes_ecb_decrypt,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "atmel-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cbc_encrypt,
- .decrypt = atmel_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "atmel-cbc-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cbc_encrypt,
+ .decrypt = atmel_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "ofb(aes)",
- .cra_driver_name = "atmel-ofb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ofb_encrypt,
- .decrypt = atmel_aes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "atmel-ofb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ofb_encrypt,
+ .decrypt = atmel_aes_ofb_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb(aes)",
- .cra_driver_name = "atmel-cfb-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb_encrypt,
- .decrypt = atmel_aes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "atmel-cfb-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb_encrypt,
+ .decrypt = atmel_aes_cfb_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb32(aes)",
- .cra_driver_name = "atmel-cfb32-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb32_encrypt,
- .decrypt = atmel_aes_cfb32_decrypt,
- }
+ .base.cra_name = "cfb32(aes)",
+ .base.cra_driver_name = "atmel-cfb32-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB32_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb32_encrypt,
+ .decrypt = atmel_aes_cfb32_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb16(aes)",
- .cra_driver_name = "atmel-cfb16-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb16_encrypt,
- .decrypt = atmel_aes_cfb16_decrypt,
- }
+ .base.cra_name = "cfb16(aes)",
+ .base.cra_driver_name = "atmel-cfb16-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB16_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb16_encrypt,
+ .decrypt = atmel_aes_cfb16_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "cfb8(aes)",
- .cra_driver_name = "atmel-cfb8-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb8_encrypt,
- .decrypt = atmel_aes_cfb8_decrypt,
- }
+ .base.cra_name = "cfb8(aes)",
+ .base.cra_driver_name = "atmel-cfb8-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB8_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb8_encrypt,
+ .decrypt = atmel_aes_cfb8_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "atmel-ctr-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_ctr_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ctr_encrypt,
- .decrypt = atmel_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "atmel-ctr-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_ctr_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ctr_encrypt,
+ .decrypt = atmel_aes_ctr_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
},
};
-static struct crypto_alg aes_cfb64_alg = {
- .cra_name = "cfb64(aes)",
- .cra_driver_name = "atmel-cfb64-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB64_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb64_encrypt,
- .decrypt = atmel_aes_cfb64_decrypt,
- }
+static struct skcipher_alg aes_cfb64_alg = {
+ .base.cra_name = "cfb64(aes)",
+ .base.cra_driver_name = "atmel-cfb64-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB64_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_aes_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb64_encrypt,
+ .decrypt = atmel_aes_cfb64_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
};
@@ -1450,7 +1441,7 @@ static struct crypto_alg aes_cfb64_alg = {
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
const u32 *data, size_t datalen,
- const u32 *ghash_in, u32 *ghash_out,
+ const __be32 *ghash_in, __be32 *ghash_out,
atmel_aes_fn_t resume);
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
@@ -1471,7 +1462,7 @@ atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
const u32 *data, size_t datalen,
- const u32 *ghash_in, u32 *ghash_out,
+ const __be32 *ghash_in, __be32 *ghash_out,
atmel_aes_fn_t resume)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
@@ -1558,7 +1549,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
memcpy(data, iv, ivsize);
memset(data + ivsize, 0, padlen + sizeof(u64));
- ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
+ ((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
NULL, ctx->j0, atmel_aes_gcm_process);
@@ -1591,7 +1582,7 @@ static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
struct aead_request *req = aead_request_cast(dd->areq);
- u32 j0_lsw, *j0 = ctx->j0;
+ __be32 j0_lsw, *j0 = ctx->j0;
size_t padlen;
/* Write incr32(J0) into IV. */
@@ -1674,7 +1665,7 @@ static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
{
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
struct aead_request *req = aead_request_cast(dd->areq);
- u64 *data = dd->buf;
+ __be64 *data = dd->buf;
if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
@@ -1857,8 +1848,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
{
struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
unsigned long flags;
int err;
@@ -1868,7 +1859,7 @@ static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- /* Compute the tweak value from req->info with ecb(aes). */
+ /* Compute the tweak value from req->iv with ecb(aes). */
flags = dd->flags;
dd->flags &= ~AES_FLAGS_MODE_MASK;
dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
@@ -1876,16 +1867,16 @@ static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
ctx->key2, ctx->base.keylen);
dd->flags = flags;
- atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
}
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
- bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
- static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
u8 *tweak_bytes = (u8 *)tweak;
int i;
@@ -1908,20 +1899,21 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
atmel_aes_write_block(dd, AES_TWR(0), tweak);
atmel_aes_write_block(dd, AES_ALPHAR(0), one);
if (use_dma)
- return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_dma_start(dd, req->src, req->dst,
+ req->cryptlen,
atmel_aes_transfer_complete);
- return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
atmel_aes_transfer_complete);
}
-static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
if (err)
return err;
@@ -1932,48 +1924,46 @@ static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+static int atmel_aes_xts_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
}
-static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+static int atmel_aes_xts_decrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_XTS);
}
-static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.start = atmel_aes_xts_start;
return 0;
}
-static struct crypto_alg aes_xts_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "atmel-xts-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_aes_xts_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = atmel_aes_xts_setkey,
- .encrypt = atmel_aes_xts_encrypt,
- .decrypt = atmel_aes_xts_decrypt,
- }
+static struct skcipher_alg aes_xts_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "atmel-xts-aes",
+ .base.cra_priority = ATMEL_AES_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ .init = atmel_aes_xts_init_tfm,
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* authenc aead functions */
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
@@ -2041,7 +2031,7 @@ static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
bool enc = atmel_aes_is_encrypt(dd);
struct scatterlist *src, *dst;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
u32 emr;
if (is_async)
@@ -2460,23 +2450,23 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc)
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
crypto_unregister_aead(&aes_authenc_algs[i]);
#endif
if (dd->caps.has_xts)
- crypto_unregister_alg(&aes_xts_alg);
+ crypto_unregister_skcipher(&aes_xts_alg);
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
if (dd->caps.has_cfb64)
- crypto_unregister_alg(&aes_cfb64_alg);
+ crypto_unregister_skcipher(&aes_cfb64_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -2484,13 +2474,13 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
- err = crypto_register_alg(&aes_cfb64_alg);
+ err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
@@ -2502,12 +2492,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
}
if (dd->caps.has_xts) {
- err = crypto_register_alg(&aes_xts_alg);
+ err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
}
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
err = crypto_register_aead(&aes_authenc_algs[i]);
@@ -2519,22 +2509,22 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
return 0;
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
for (j = 0; j < i; j++)
crypto_unregister_aead(&aes_authenc_algs[j]);
- crypto_unregister_alg(&aes_xts_alg);
+ crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
- crypto_unregister_alg(&aes_cfb64_alg);
+ crypto_unregister_skcipher(&aes_cfb64_alg);
err_aes_cfb64_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -2709,7 +2699,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
atmel_aes_get_cap(aes_dd);
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
goto iclk_unprepare;
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index cbd37a2edada..d6de810df44f 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -12,7 +12,7 @@
#ifndef __ATMEL_AUTHENC_H__
#define __ATMEL_AUTHENC_H__
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
#include <crypto/authenc.h>
#include <crypto/hash.h>
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 84cb8748a795..8ea0e4bcde0d 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -360,7 +360,7 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
{
unsigned int index, padlen;
- u64 bits[2];
+ __be64 bits[2];
u64 size[2];
size[0] = ctx->digcnt[0];
@@ -2212,7 +2212,7 @@ static struct ahash_alg sha_hmac_algs[] = {
},
};
-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* authenc functions */
static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 1a6c86ae6148..0c1f79b30fc1 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -36,6 +36,7 @@
#include <crypto/internal/des.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
@@ -72,7 +73,7 @@ struct atmel_tdes_ctx {
struct atmel_tdes_dev *dd;
int keylen;
- u32 key[3*DES_KEY_SIZE / sizeof(u32)];
+ u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
unsigned long flags;
u16 block_size;
@@ -80,6 +81,7 @@ struct atmel_tdes_ctx {
struct atmel_tdes_reqctx {
unsigned long mode;
+ u8 lastc[DES_BLOCK_SIZE];
};
struct atmel_tdes_dma {
@@ -106,7 +108,7 @@ struct atmel_tdes_dev {
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
size_t total;
struct scatterlist *in_sg;
@@ -307,8 +309,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
dd->ctx->keylen >> 2);
if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
- atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
+ (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
}
return 0;
@@ -502,8 +504,8 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(
+ crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -571,19 +573,45 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
return err;
}
+static void
+atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
+{
+ struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & TDES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+
req->base.complete(&req->base, err);
}
static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct crypto_async_request *async_req, *backlog;
struct atmel_tdes_ctx *ctx;
@@ -593,7 +621,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
+ ret = crypto_enqueue_request(&dd->queue, &req->base);
if (dd->flags & TDES_FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
@@ -610,18 +638,18 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
+ dd->total = req->cryptlen;
dd->in_offset = 0;
dd->in_sg = req->src;
dd->out_offset = 0;
dd->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= TDES_FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
dd->ctx = ctx;
@@ -665,32 +693,32 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
return err;
}
-static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
if (mode & TDES_FLAGS_CFB8) {
- if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB16) {
- if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB32) {
- if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
} else {
- if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
@@ -699,6 +727,15 @@ static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
+ if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ if (req->cryptlen >= ivsize)
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+
return atmel_tdes_handle_queue(ctx->dd, req);
}
@@ -770,13 +807,13 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
dma_release_channel(dd->dma_lch_out.chan);
}
-static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des_key(tfm, key);
+ err = verify_skcipher_des_key(tfm, key);
if (err)
return err;
@@ -786,13 +823,13 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des3_key(tfm, key);
+ err = verify_skcipher_des3_key(tfm, key);
if (err)
return err;
@@ -802,84 +839,84 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
}
-static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, 0);
}
-static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
}
-static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
}
-static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
}
-static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
}
-static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB8);
}
-static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
}
-static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB16);
}
-static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
}
-static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
TDES_FLAGS_CFB32);
}
-static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
}
-static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
}
-static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
+static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
}
-static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
+static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct atmel_tdes_dev *dd;
- tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
dd = atmel_tdes_find_dev(ctx);
if (!dd)
@@ -888,204 +925,184 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg tdes_algs[] = {
+static struct skcipher_alg tdes_algs[] = {
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "atmel-ecb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ecb_encrypt,
- .decrypt = atmel_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "atmel-ecb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "atmel-cbc-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cbc_encrypt,
- .decrypt = atmel_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "atmel-cbc-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
},
{
- .cra_name = "cfb(des)",
- .cra_driver_name = "atmel-cfb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(des)",
+ .base.cra_driver_name = "atmel-cfb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb_encrypt,
+ .decrypt = atmel_tdes_cfb_decrypt,
},
{
- .cra_name = "cfb8(des)",
- .cra_driver_name = "atmel-cfb8-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
- }
+ .base.cra_name = "cfb8(des)",
+ .base.cra_driver_name = "atmel-cfb8-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB8_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb8_encrypt,
+ .decrypt = atmel_tdes_cfb8_decrypt,
},
{
- .cra_name = "cfb16(des)",
- .cra_driver_name = "atmel-cfb16-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
- }
+ .base.cra_name = "cfb16(des)",
+ .base.cra_driver_name = "atmel-cfb16-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB16_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x1,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb16_encrypt,
+ .decrypt = atmel_tdes_cfb16_decrypt,
},
{
- .cra_name = "cfb32(des)",
- .cra_driver_name = "atmel-cfb32-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
- }
+ .base.cra_name = "cfb32(des)",
+ .base.cra_driver_name = "atmel-cfb32-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = CFB32_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x3,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb32_encrypt,
+ .decrypt = atmel_tdes_cfb32_decrypt,
},
{
- .cra_name = "ofb(des)",
- .cra_driver_name = "atmel-ofb-des",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "atmel-ofb-des",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "atmel-ecb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ecb_encrypt,
- .decrypt = atmel_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "atmel-ecb-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "atmel-cbc-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cbc_encrypt,
- .decrypt = atmel_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "atmel-cbc-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
},
{
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "atmel-ofb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "atmel-ofb-tdes",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .base.cra_alignmask = 0x7,
+ .base.cra_module = THIS_MODULE,
+
+ .init = atmel_tdes_init_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
},
};
@@ -1148,7 +1165,7 @@ static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
int i;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
- crypto_unregister_alg(&tdes_algs[i]);
+ crypto_unregister_skcipher(&tdes_algs[i]);
}
static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
@@ -1156,7 +1173,7 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
- err = crypto_register_alg(&tdes_algs[i]);
+ err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
}
@@ -1165,7 +1182,7 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
err_tdes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&tdes_algs[j]);
+ crypto_unregister_skcipher(&tdes_algs[j]);
return err;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index f85356a48e7e..1564a6f8c9cb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -110,8 +110,8 @@ static u8 select_channel(void)
}
/**
- * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
- * receive a SPU response message for an ablkcipher request. Includes buffers to
+ * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
+ * receive a SPU response message for an skcipher request. Includes buffers to
* catch SPU message headers and the response data.
* @mssg: mailbox message containing the receive sg
* @rctx: crypto request context
@@ -130,7 +130,7 @@ static u8 select_channel(void)
* < 0 if an error
*/
static int
-spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
+spu_skcipher_rx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 rx_frag_num,
unsigned int chunksize, u32 stat_pad_len)
@@ -179,8 +179,8 @@ spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
}
/**
- * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
- * send a SPU request message for an ablkcipher request. Includes SPU message
+ * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
+ * send a SPU request message for an skcipher request. Includes SPU message
* headers and the request data.
* @mssg: mailbox message containing the transmit sg
* @rctx: crypto request context
@@ -198,7 +198,7 @@ spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
* < 0 if an error
*/
static int
-spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
+spu_skcipher_tx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
{
@@ -283,7 +283,7 @@ static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
}
/**
- * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
+ * handle_skcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
* data.
* @rctx: Crypto request context
@@ -300,12 +300,12 @@ static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
* asynchronously
* Any other value indicates an error
*/
-static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
+static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req =
- container_of(areq, struct ablkcipher_request, base);
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
int err = 0;
@@ -468,7 +468,7 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
spu->spu_xts_tweak_in_payload())
rx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
+ err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
stat_pad_len);
if (err)
return err;
@@ -482,7 +482,7 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
spu->spu_xts_tweak_in_payload())
tx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
+ err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
pad_len);
if (err)
return err;
@@ -495,16 +495,16 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
}
/**
- * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
+ * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
* total received count for the request and updates global stats.
* @rctx: Crypto request context
*/
-static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
+static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
#endif
struct iproc_ctx_s *ctx = rctx->ctx;
u32 payload_len;
@@ -1685,8 +1685,8 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
/* Process the SPU response message */
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- handle_ablkcipher_resp(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ handle_skcipher_resp(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
handle_ahash_resp(rctx);
@@ -1708,8 +1708,8 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
spu_chunk_cleanup(rctx);
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = handle_ablkcipher_req(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = handle_skcipher_req(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = handle_ahash_req(rctx);
@@ -1739,7 +1739,7 @@ cb_finish:
/* ==================== Kernel Cryptographic API ==================== */
/**
- * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
+ * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
* @req: Crypto API request
* @encrypt: true if encrypting; false if decrypting
*
@@ -1747,11 +1747,11 @@ cb_finish:
* asynchronously
* < 0 if an error
*/
-static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
+static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
{
- struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
+ struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
struct iproc_ctx_s *ctx =
- crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int err;
flow_log("%s() enc:%u\n", __func__, encrypt);
@@ -1761,7 +1761,7 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
rctx->parent = &req->base;
rctx->is_encrypt = encrypt;
rctx->bd_suppress = false;
- rctx->total_todo = req->nbytes;
+ rctx->total_todo = req->cryptlen;
rctx->src_sent = 0;
rctx->total_sent = 0;
rctx->total_received = 0;
@@ -1782,15 +1782,15 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
ctx->cipher.mode == CIPHER_MODE_GCM ||
ctx->cipher.mode == CIPHER_MODE_CCM) {
rctx->iv_ctr_len =
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
+ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
+ memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
} else {
rctx->iv_ctr_len = 0;
}
/* Choose a SPU to process this request */
rctx->chan_idx = select_channel();
- err = handle_ablkcipher_req(rctx);
+ err = handle_skcipher_req(rctx);
if (err != -EINPROGRESS)
/* synchronous result */
spu_chunk_cleanup(rctx);
@@ -1798,13 +1798,13 @@ static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
return err;
}
-static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1812,13 +1812,13 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1826,10 +1826,10 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
if (ctx->cipher.mode == CIPHER_MODE_XTS)
/* XTS includes two keys of equal length */
@@ -1846,7 +1846,7 @@ static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -1854,10 +1854,10 @@ static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int i;
ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
@@ -1874,16 +1874,16 @@ static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
struct spu_cipher_parms cipher_parms;
u32 alloc_len = 0;
int err;
- flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
+ flow_log("skcipher_setkey() keylen: %d\n", keylen);
flow_dump(" key: ", key, keylen);
switch (ctx->cipher.alg) {
@@ -1926,7 +1926,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
cipher_parms.iv_buf = NULL;
- cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
+ cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
cipher_parms.alg = ctx->cipher.alg;
@@ -1950,17 +1950,17 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
+ flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
- return ablkcipher_enqueue(req, true);
+ return skcipher_enqueue(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
- return ablkcipher_enqueue(req, false);
+ flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
+ return skcipher_enqueue(req, false);
}
static int ahash_enqueue(struct ahash_request *req)
@@ -3585,18 +3585,16 @@ static struct iproc_alg_s driver_algs[] = {
.auth_first = 0,
},
-/* ABLKCIPHER algorithms. */
+/* SKCIPHER algorithms. */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ecb-arc4-iproc",
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(arc4)",
+ .base.cra_driver_name = "ecb-arc4-iproc",
+ .base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_RC4,
@@ -3608,16 +3606,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des)",
- .cra_driver_name = "ofb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "ofb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3629,16 +3625,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3650,16 +3644,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3671,16 +3663,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "ofb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "ofb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3692,16 +3682,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3713,16 +3701,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3734,16 +3720,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3755,16 +3739,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3776,16 +3758,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3797,16 +3777,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3818,16 +3796,14 @@ static struct iproc_alg_s driver_algs[] = {
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -4282,16 +4258,17 @@ static int generic_cra_init(struct crypto_tfm *tfm,
return 0;
}
-static int ablkcipher_cra_init(struct crypto_tfm *tfm)
+static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
struct iproc_alg_s *cipher_alg;
flow_log("%s()\n", __func__);
- tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
+ crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
- cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
+ cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
return generic_cra_init(tfm, cipher_alg);
}
@@ -4363,6 +4340,11 @@ static void generic_cra_exit(struct crypto_tfm *tfm)
atomic_dec(&iproc_priv.session_count);
}
+static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ generic_cra_exit(crypto_skcipher_tfm(tfm));
+}
+
static void aead_cra_exit(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
@@ -4524,10 +4506,10 @@ static void spu_counters_init(void)
atomic_set(&iproc_priv.bad_icv, 0);
}
-static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
+static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct crypto_alg *crypto = &driver_alg->alg.crypto;
+ struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
int err;
/* SPU2 does not support RC4 */
@@ -4535,26 +4517,23 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
(spu->spu_type == SPU_TYPE_SPU2))
return 0;
- crypto->cra_module = THIS_MODULE;
- crypto->cra_priority = cipher_pri;
- crypto->cra_alignmask = 0;
- crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
-
- crypto->cra_init = ablkcipher_cra_init;
- crypto->cra_exit = generic_cra_exit;
- crypto->cra_type = &crypto_ablkcipher_type;
- crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ crypto->base.cra_module = THIS_MODULE;
+ crypto->base.cra_priority = cipher_pri;
+ crypto->base.cra_alignmask = 0;
+ crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
+ crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
- crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ crypto->init = skcipher_init_tfm;
+ crypto->exit = skcipher_exit_tfm;
+ crypto->setkey = skcipher_setkey;
+ crypto->encrypt = skcipher_encrypt;
+ crypto->decrypt = skcipher_decrypt;
- err = crypto_register_alg(crypto);
+ err = crypto_register_skcipher(crypto);
/* Mark alg as having been registered, if successful */
if (err == 0)
driver_alg->registered = true;
- pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
+ pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
return err;
}
@@ -4649,8 +4628,8 @@ static int spu_algs_register(struct device *dev)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = spu_register_ablkcipher(&driver_algs[i]);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = spu_register_skcipher(&driver_algs[i]);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = spu_register_ahash(&driver_algs[i]);
@@ -4680,8 +4659,8 @@ err_algs:
if (!driver_algs[j].registered)
continue;
switch (driver_algs[j].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[j].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
driver_algs[j].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -4837,10 +4816,10 @@ static int bcm_spu_remove(struct platform_device *pdev)
continue;
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[i].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
dev_dbg(dev, " unregistered cipher %s\n",
- driver_algs[i].alg.crypto.cra_driver_name);
+ driver_algs[i].alg.skcipher.base.cra_driver_name);
driver_algs[i].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 766452b24d0a..b6d83e3aa46c 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -1,3 +1,4 @@
+
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2016 Broadcom
@@ -11,6 +12,7 @@
#include <linux/mailbox_client.h>
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aead.h>
#include <crypto/arc4.h>
#include <crypto/gcm.h>
@@ -102,7 +104,7 @@ struct auth_op {
struct iproc_alg_s {
u32 type;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -149,7 +151,7 @@ struct spu_msg_buf {
u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
union {
- /* Buffers only used for ablkcipher */
+ /* Buffers only used for skcipher */
struct {
/*
* Field used for either SUPDT when RC4 is used
@@ -214,7 +216,7 @@ struct iproc_ctx_s {
/*
* Buffer to hold SPU message header template. Template is created at
- * setkey time for ablkcipher requests, since most of the fields in the
+ * setkey time for skcipher requests, since most of the fields in the
* header are known at that time. At request time, just fill in a few
* missing pieces related to length of data in the request and IVs, etc.
*/
@@ -256,7 +258,7 @@ struct iproc_reqctx_s {
/* total todo, rx'd, and sent for this request */
unsigned int total_todo;
- unsigned int total_received; /* only valid for ablkcipher */
+ unsigned int total_received; /* only valid for skcipher */
unsigned int total_sent;
/*
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2add51024575..59abb5ecefa4 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -542,7 +542,7 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
- * subsequent ablkcipher requests for this context.
+ * subsequent skcipher requests for this context.
* @spu2_cipher_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
@@ -1107,13 +1107,13 @@ u32 spu2_create_request(u8 *spu_hdr,
}
/**
- * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
+ * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
*
* Called at setkey time to initialize a msg header that can be reused for all
- * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
+ * subsequent skcipher requests. Construct the message starting at spu_hdr.
* Caller should allocate this buffer in DMA-able memory at least
* SPU_HEADER_ALLOC_LEN bytes long.
*
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 137ed3df0c74..87053e46c788 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -97,7 +97,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
Selecting this will offload crypto for users of the
@@ -110,7 +110,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
default y
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_DES
help
Selecting this will use CAAM Queue Interface (QI) for sending
@@ -158,7 +158,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
select CRYPTO_DEV_FSL_CAAM_COMMON
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_AEAD
select CRYPTO_HASH
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 83f96d4f86e0..6619c512ef1a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
- int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ int mapped_src_nents, mapped_dst_nents;
unsigned int diff_size = 0;
int lzeros;
@@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
- if (!diff_size && src_nents == 1)
+ mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ goto src_fail;
+ }
+
+ if (!diff_size && mapped_src_nents == 1)
sec4_sg_len = 0; /* no need for an input hw s/g table */
else
- sec4_sg_len = src_nents + !!diff_size;
+ sec4_sg_len = mapped_src_nents + !!diff_size;
sec4_sg_index = sec4_sg_len;
- if (dst_nents > 1)
- sec4_sg_len += pad_sg_nents(dst_nents);
+
+ if (mapped_dst_nents > 1)
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
else
sec4_sg_len = pad_sg_nents(sec4_sg_len);
@@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc)
- return ERR_PTR(-ENOMEM);
-
- sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(dev, "unable to map source\n");
- goto src_fail;
- }
-
- sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(dev, "unable to map destination\n");
goto dst_fail;
- }
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
if (diff_size)
@@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
edesc->sec4_sg + !!diff_size, 0);
- if (dst_nents > 1)
+ if (mapped_dst_nents > 1)
sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
@@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!sec4_sg_bytes)
return edesc;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
+
edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
@@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
return edesc;
sec4_sg_fail:
- dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ kfree(edesc);
dst_fail:
- dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
src_fail:
- kfree(edesc);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
return ERR_PTR(-ENOMEM);
}
@@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
return -ENOMEM;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
return -ENOMEM;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
+
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_tmp1;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
@@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_tmp1;
}
- if (edesc->src_nents > 1) {
+ if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
} else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
- if (edesc->dst_nents > 1) {
+ if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 2c488c9a3812..c68fb4c03ee6 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -112,8 +112,10 @@ struct caam_rsa_req_ctx {
/**
* rsa_edesc - s/w-extended rsa descriptor
- * @src_nents : number of segments in input scatterlist
- * @dst_nents : number of segments in output scatterlist
+ * @src_nents : number of segments in input s/w scatterlist
+ * @dst_nents : number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes : length of h/w link table
* @sec4_sg_dma : dma address of h/w link table
* @sec4_sg : pointer to h/w link table
@@ -123,6 +125,8 @@ struct caam_rsa_req_ctx {
struct rsa_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index db22777d59b4..d7c3c3805693 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -176,6 +176,73 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
}
/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ * which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ * for the RNG4 state handles which exist in
+ * the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ * - -ENOMEM if there isn't enough memory to allocate the descriptor
+ * - -ENODEV if DECO0 couldn't be acquired
+ * - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+ u32 *desc, status;
+ int sh_idx, ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deinitialized as well
+ */
+ if ((1 << sh_idx) & state_handle_mask) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle
+ */
+ build_deinstantiation_desc(desc, sh_idx);
+
+ /* Try to run it through DECO0 */
+ ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+ dev_err(ctrldev,
+ "Failed to deinstantiate RNG4 SH%d\n",
+ sh_idx);
+ break;
+ }
+ dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+ }
+ }
+
+ kfree(desc);
+
+ return ret;
+}
+
+static void devm_deinstantiate_rng(void *data)
+{
+ struct device *ctrldev = data;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+ if (ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+}
+
+/*
* instantiate_rng - builds and executes a descriptor on DECO0,
* which initializes the RNG block.
* @ctrldev - pointer to device
@@ -247,99 +314,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
kfree(desc);
- return ret;
-}
-
-/*
- * deinstantiate_rng - builds and executes a descriptor on DECO0,
- * which deinitializes the RNG block.
- * @ctrldev - pointer to device
- * @state_handle_mask - bitmask containing the instantiation status
- * for the RNG4 state handles which exist in
- * the RNG4 block: 1 if it's been instantiated
- *
- * Return: - 0 if no error occurred
- * - -ENOMEM if there isn't enough memory to allocate the descriptor
- * - -ENODEV if DECO0 couldn't be acquired
- * - -EAGAIN if an error occurred when executing the descriptor
- */
-static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
-{
- u32 *desc, status;
- int sh_idx, ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
- /*
- * If the corresponding bit is set, then it means the state
- * handle was initialized by us, and thus it needs to be
- * deinitialized as well
- */
- if ((1 << sh_idx) & state_handle_mask) {
- /*
- * Create the descriptor for deinstantating this state
- * handle
- */
- build_deinstantiation_desc(desc, sh_idx);
-
- /* Try to run it through DECO0 */
- ret = run_descriptor_deco0(ctrldev, desc, &status);
-
- if (ret ||
- (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
- dev_err(ctrldev,
- "Failed to deinstantiate RNG4 SH%d\n",
- sh_idx);
- break;
- }
- dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
- }
- }
-
- kfree(desc);
+ if (!ret)
+ ret = devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng,
+ ctrldev);
return ret;
}
-static int caam_remove(struct platform_device *pdev)
-{
- struct device *ctrldev;
- struct caam_drv_private *ctrlpriv;
- struct caam_ctrl __iomem *ctrl;
-
- ctrldev = &pdev->dev;
- ctrlpriv = dev_get_drvdata(ctrldev);
- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
-
- /* Remove platform devices under the crypto node */
- of_platform_depopulate(ctrldev);
-
-#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qi_init)
- caam_qi_shutdown(ctrldev);
-#endif
-
- /*
- * De-initialize RNG state handles initialized by this driver.
- * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
- if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
- deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
-
- /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
- /* Unmap controller region */
- iounmap(ctrl);
-
- return 0;
-}
-
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
@@ -568,6 +549,13 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
}
+#ifdef CONFIG_DEBUG_FS
+static void caam_remove_debugfs(void *root)
+{
+ debugfs_remove_recursive(root);
+}
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -580,6 +568,7 @@ static int caam_probe(struct platform_device *pdev)
struct caam_drv_private *ctrlpriv;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
+ struct dentry *dfs_root;
#endif
u32 scfgr, comp_params;
u8 rng_vid;
@@ -611,10 +600,11 @@ static int caam_probe(struct platform_device *pdev)
/* Get configuration properties from device tree */
/* First, get register page */
- ctrl = of_iomap(nprop, 0);
- if (!ctrl) {
+ ctrl = devm_of_iomap(dev, nprop, 0, NULL);
+ ret = PTR_ERR_OR_ZERO(ctrl);
+ if (ret) {
dev_err(dev, "caam: of_iomap() failed\n");
- return -ENOMEM;
+ return ret;
}
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
@@ -632,22 +622,18 @@ static int caam_probe(struct platform_device *pdev)
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
if (!ret) {
- ret = -EPROBE_DEFER;
- goto iounmap_ctrl;
+ return -EPROBE_DEFER;
} else if (ret < 0) {
dev_err(dev, "failing probe due to qman probe error\n");
- ret = -ENODEV;
- goto iounmap_ctrl;
+ return -ENODEV;
}
ret = qman_portals_probed();
if (!ret) {
- ret = -EPROBE_DEFER;
- goto iounmap_ctrl;
+ return -EPROBE_DEFER;
} else if (ret < 0) {
dev_err(dev, "failing probe due to qman portals probe error\n");
- ret = -ENODEV;
- goto iounmap_ctrl;
+ return -ENODEV;
}
}
#endif
@@ -722,7 +708,7 @@ static int caam_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
- goto iounmap_ctrl;
+ return ret;
}
ctrlpriv->era = caam_get_era(ctrl);
@@ -736,8 +722,12 @@ static int caam_probe(struct platform_device *pdev)
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+ dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root);
+ if (ret)
+ return ret;
+
+ ctrlpriv->ctl = debugfs_create_dir("ctl", dfs_root);
#endif
/* Check to see if (DPAA 1.x) QI present. If so, enable */
@@ -757,12 +747,6 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- goto shutdown_qi;
- }
-
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
@@ -779,8 +763,7 @@ static int caam_probe(struct platform_device *pdev)
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
- ret = -ENOMEM;
- goto caam_remove;
+ return -ENOMEM;
}
if (ctrlpriv->era < 10)
@@ -843,7 +826,7 @@ static int caam_probe(struct platform_device *pdev)
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
- goto caam_remove;
+ return ret;
}
/*
* Set handles init'ed by this module as the complement of the
@@ -916,19 +899,11 @@ static int caam_probe(struct platform_device *pdev)
debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
#endif
- return 0;
-caam_remove:
- caam_remove(pdev);
- return ret;
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ dev_err(dev, "JR platform devices creation error\n");
-shutdown_qi:
-#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qi_init)
- caam_qi_shutdown(dev);
-#endif
-iounmap_ctrl:
- iounmap(ctrl);
return ret;
}
@@ -938,7 +913,6 @@ static struct platform_driver caam_driver = {
.of_match_table = caam_match,
},
.probe = caam_probe,
- .remove = caam_remove,
};
module_platform_driver(caam_driver);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 731b06becd9c..c7c10c90464b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -81,9 +81,6 @@ struct caam_drv_private {
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
-#ifdef CONFIG_CAAM_QI
- u8 qi_init; /* Nonzero if QI has been initialized */
-#endif
u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -102,7 +99,6 @@ struct caam_drv_private {
* variables at runtime.
*/
#ifdef CONFIG_DEBUG_FS
- struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 378f627e1d64..dacf2fa4aa8e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -500,9 +500,10 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-void caam_qi_shutdown(struct device *qidev)
+static void caam_qi_shutdown(void *data)
{
int i;
+ struct device *qidev = data;
struct caam_qi_priv *priv = &qipriv;
const cpumask_t *cpus = qman_affine_cpus();
@@ -761,7 +762,10 @@ int caam_qi_init(struct platform_device *caam_pdev)
&times_congested, &caam_fops_u64_ro);
#endif
- ctrlpriv->qi_init = 1;
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ if (err)
+ return err;
+
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index db0549549e3b..848958951f68 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -147,7 +147,6 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 596ce28b957d..1ad66677d88e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -92,15 +92,15 @@ static inline void update_output_data(struct cpt_request_info *req_info,
}
}
-static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
+static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
u32 *argcnt)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct fc_context *fctx = &rctx->fctx;
u64 *offset_control = &rctx->control_word;
- u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+ u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct cpt_request_info *req_info = &rctx->cpt_req;
u64 *ctrl_flags = NULL;
@@ -115,7 +115,7 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
else
req_info->req.opcode.s.minor = 3;
- req_info->req.param1 = req->nbytes; /* Encryption Data length */
+ req_info->req.param1 = req->cryptlen; /* Encryption Data length */
req_info->req.param2 = 0; /*Auth data length */
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
@@ -147,32 +147,32 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
return 0;
}
-static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc,
+static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
create_ctx_hdr(req, enc, &argcnt);
- update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
- update_input_data(req_info, req->src, req->nbytes, &argcnt);
+ update_input_iv(req_info, req->iv, enc_iv_len, &argcnt);
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
req_info->incnt = argcnt;
return 0;
}
-static inline void store_cb_info(struct ablkcipher_request *req,
+static inline void store_cb_info(struct skcipher_request *req,
struct cpt_request_info *req_info)
{
req_info->callback = (void *)cvm_callback;
req_info->callback_arg = (void *)&req->base;
}
-static inline void create_output_list(struct ablkcipher_request *req,
+static inline void create_output_list(struct skcipher_request *req,
u32 enc_iv_len)
{
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
@@ -184,16 +184,16 @@ static inline void create_output_list(struct ablkcipher_request *req,
* [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
*/
/* Reading IV information */
- update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
- update_output_data(req_info, req->dst, req->nbytes, &argcnt);
+ update_output_iv(req_info, req->iv, enc_iv_len, &argcnt);
+ update_output_data(req_info, req->dst, req->cryptlen, &argcnt);
req_info->outcnt = argcnt;
}
-static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
+static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
- u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
+ u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct fc_context *fctx = &rctx->fctx;
struct cpt_request_info *req_info = &rctx->cpt_req;
void *cdev = NULL;
@@ -217,20 +217,20 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
return -EINPROGRESS;
}
-static int cvm_encrypt(struct ablkcipher_request *req)
+static int cvm_encrypt(struct skcipher_request *req)
{
return cvm_enc_dec(req, true);
}
-static int cvm_decrypt(struct ablkcipher_request *req)
+static int cvm_decrypt(struct skcipher_request *req)
{
return cvm_enc_dec(req, false);
}
-static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
const u8 *key1 = key;
@@ -284,10 +284,10 @@ static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
return -EINVAL;
}
-static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen, u8 cipher_type)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->cipher_type = cipher_type;
@@ -295,183 +295,159 @@ static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_ablkcipher_set_flags(cipher,
+ crypto_skcipher_set_flags(cipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
-static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CBC);
}
-static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_ECB);
}
-static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CFB);
}
-static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
+ return verify_skcipher_des3_key(cipher, key) ?:
cvm_setkey(cipher, key, keylen, DES3_CBC);
}
-static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
+ return verify_skcipher_des3_key(cipher, key) ?:
cvm_setkey(cipher, key, keylen, DES3_ECB);
}
-static int cvm_enc_dec_init(struct crypto_tfm *tfm)
+static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx));
+
return 0;
}
-static struct crypto_alg algs[] = { {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "xts(aes)",
- .cra_driver_name = "cavium-xts-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .setkey = cvm_xts_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+static struct skcipher_alg algs[] = { {
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cavium-xts-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = cvm_xts_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cavium-cbc-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cbc_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cavium-cbc-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_cbc_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "ecb(aes)",
- .cra_driver_name = "cavium-ecb-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_ecb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cavium-ecb-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_ecb_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cavium-cfb-aes",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cfb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cavium-cfb-aes",
+ .base.cra_module = THIS_MODULE,
+
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = cvm_cfb_aes_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_des3_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cavium-cbc-des3_ede",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = cvm_cbc_des3_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cavium-cbc-des3_ede",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = cvm_cbc_des3_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
}, {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cvm_des3_ctx),
- .cra_alignmask = 7,
- .cra_priority = 4001,
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "cavium-ecb-des3_ede",
- .cra_type = &crypto_ablkcipher_type,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = cvm_ecb_des3_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- },
- },
- .cra_init = cvm_enc_dec_init,
- .cra_module = THIS_MODULE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cavium-ecb-des3_ede",
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = cvm_ecb_des3_setkey,
+ .encrypt = cvm_encrypt,
+ .decrypt = cvm_decrypt,
+ .init = cvm_enc_dec_init,
} };
static inline int cav_register_algs(void)
{
int err = 0;
- err = crypto_register_algs(algs, ARRAY_SIZE(algs));
+ err = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
if (err)
return err;
@@ -480,7 +456,7 @@ static inline int cav_register_algs(void)
static inline void cav_unregister_algs(void)
{
- crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
int cvm_crypto_init(struct cpt_vf *cptvf)
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig
index 7b1e751bb9cd..7dc008332a81 100644
--- a/drivers/crypto/cavium/nitrox/Kconfig
+++ b/drivers/crypto/cavium/nitrox/Kconfig
@@ -4,7 +4,7 @@
#
config CRYPTO_DEV_NITROX
tristate
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_LIB_DES
select FW_LOADER
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index e4841eb2a09f..6f80cc3b5c84 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -74,6 +74,25 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
return 0;
}
+static int nitrox_aes_gcm_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return nitrox_aead_setauthsize(aead, authsize);
+}
+
static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
struct scatterlist *src, char *iv, int ivsize,
int buflen)
@@ -186,6 +205,14 @@ static void nitrox_aead_callback(void *arg, int err)
areq->base.complete(&areq->base, err);
}
+static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen)
+{
+ if (assoclen <= 512)
+ return true;
+
+ return false;
+}
+
static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
@@ -195,6 +222,9 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
struct flexi_crypto_context *fctx = nctx->u.fctx;
int ret;
+ if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen))
+ return -EINVAL;
+
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
rctx->cryptlen = areq->cryptlen;
@@ -226,6 +256,9 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
struct flexi_crypto_context *fctx = nctx->u.fctx;
int ret;
+ if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen))
+ return -EINVAL;
+
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
rctx->cryptlen = areq->cryptlen - aead->authsize;
@@ -492,13 +525,13 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_driver_name = "n5_aes_gcm",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.setkey = nitrox_aes_gcm_setkey,
- .setauthsize = nitrox_aead_setauthsize,
+ .setauthsize = nitrox_aes_gcm_setauthsize,
.encrypt = nitrox_aes_gcm_enc,
.decrypt = nitrox_aes_gcm_dec,
.init = nitrox_aes_gcm_init,
@@ -511,7 +544,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_driver_name = "n5_rfc4106",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 2217a2736c8e..c2d0c23fb81b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -109,6 +109,13 @@ struct nitrox_q_vector {
};
};
+enum mcode_type {
+ MCODE_TYPE_INVALID,
+ MCODE_TYPE_AE,
+ MCODE_TYPE_SE_SSL,
+ MCODE_TYPE_SE_IPSEC,
+};
+
/**
* mbox_msg - Mailbox message data
* @type: message type
@@ -128,6 +135,14 @@ union mbox_msg {
u64 chipid: 8;
u64 vfid: 8;
} id;
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 count: 4;
+ u64 info: 40;
+ u64 next_se_grp: 3;
+ u64 next_ae_grp: 3;
+ } mcode_info;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index bc924980e10c..c4632d84c9a1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -103,8 +103,7 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
offset = UCD_UCODE_LOAD_BLOCK_NUM;
nitrox_write_csr(ndev, offset, block_num);
- code_size = ucode_size;
- code_size = roundup(code_size, 8);
+ code_size = roundup(ucode_size, 16);
while (code_size) {
data = ucode_data[i];
/* write 8 bytes at a time */
@@ -220,11 +219,11 @@ static int nitrox_load_fw(struct nitrox_device *ndev)
/* write block number and firmware length
* bit:<2:0> block number
- * bit:3 is set SE uses 32KB microcode
- * bit:3 is clear SE uses 64KB microcode
+ * bit:3 is set AE uses 32KB microcode
+ * bit:3 is clear AE uses 64KB microcode
*/
core_2_eid_val.value = 0ULL;
- core_2_eid_val.ucode_blk = 0;
+ core_2_eid_val.ucode_blk = 2;
if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
core_2_eid_val.ucode_len = 1;
else
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 02ee95064841..b51b0449b478 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -25,6 +25,7 @@ enum mbx_msg_opcode {
MSG_OP_VF_UP,
MSG_OP_VF_DOWN,
MSG_OP_CHIPID_VFID,
+ MSG_OP_MCODE_INFO = 11,
};
struct pf2vf_work {
@@ -73,6 +74,13 @@ static void pf2vf_send_response(struct nitrox_device *ndev,
vfdev->nr_queues = 0;
atomic_set(&vfdev->state, __NDEV_NOT_READY);
break;
+ case MSG_OP_MCODE_INFO:
+ msg.data = 0;
+ msg.mcode_info.count = 2;
+ msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
+ msg.mcode_info.next_se_grp = 1;
+ msg.mcode_info.next_ae_grp = 1;
+ break;
default:
msg.type = MBX_MSG_TYPE_NOP;
break;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index f69ba02c4d25..12282c1b14f5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -10,6 +10,8 @@
#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define PRIO 4001
+typedef void (*sereq_completion_t)(void *req, int err);
+
/**
* struct gphdr - General purpose Header
* @param0: first parameter.
@@ -203,12 +205,14 @@ struct nitrox_crypto_ctx {
struct flexi_crypto_context *fctx;
} u;
struct crypto_ctx_hdr *chdr;
+ sereq_completion_t callback;
};
struct nitrox_kcrypt_request {
struct se_crypto_request creq;
u8 *src;
u8 *dst;
+ u8 *iv_out;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 3cdce1f0f257..97af4d50d003 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -6,6 +6,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/xts.h>
@@ -47,6 +48,63 @@ static enum flexi_cipher flexi_cipher_type(const char *name)
return cipher->value;
}
+static void free_src_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->dst);
+}
+
+static void nitrox_skcipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+
+ free_src_sglist(skreq);
+ free_dst_sglist(skreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ skcipher_request_complete(skreq, err);
+}
+
+static void nitrox_cbc_cipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ unsigned int start = skreq->cryptlen - ivsize;
+
+ if (err) {
+ nitrox_skcipher_callback(arg, err);
+ return;
+ }
+
+ if (nkreq->creq.ctrl.s.arg == ENCRYPT) {
+ scatterwalk_map_and_copy(skreq->iv, skreq->dst, start, ivsize,
+ 0);
+ } else {
+ if (skreq->src != skreq->dst) {
+ scatterwalk_map_and_copy(skreq->iv, skreq->src, start,
+ ivsize, 0);
+ } else {
+ memcpy(skreq->iv, nkreq->iv_out, ivsize);
+ kfree(nkreq->iv_out);
+ }
+ }
+
+ nitrox_skcipher_callback(arg, err);
+}
+
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
@@ -63,6 +121,8 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
nitrox_put_device(nctx->ndev);
return -ENOMEM;
}
+
+ nctx->callback = nitrox_skcipher_callback;
nctx->chdr = chdr;
nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
sizeof(struct ctx_hdr));
@@ -71,6 +131,19 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
return 0;
}
+static int nitrox_cbc_init(struct crypto_skcipher *tfm)
+{
+ int err;
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+
+ err = nitrox_skcipher_init(tfm);
+ if (err)
+ return err;
+
+ nctx->callback = nitrox_cbc_cipher_callback;
+ return 0;
+}
+
static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
@@ -173,34 +246,6 @@ static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize)
return 0;
}
-static void free_src_sglist(struct skcipher_request *skreq)
-{
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
-
- kfree(nkreq->src);
-}
-
-static void free_dst_sglist(struct skcipher_request *skreq)
-{
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
-
- kfree(nkreq->dst);
-}
-
-static void nitrox_skcipher_callback(void *arg, int err)
-{
- struct skcipher_request *skreq = arg;
-
- free_src_sglist(skreq);
- free_dst_sglist(skreq);
- if (err) {
- pr_err_ratelimited("request failed status 0x%0x\n", err);
- err = -EINVAL;
- }
-
- skcipher_request_complete(skreq, err);
-}
-
static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
@@ -240,8 +285,28 @@ static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
}
/* send the crypto request */
- return nitrox_process_se_request(nctx->ndev, creq,
- nitrox_skcipher_callback, skreq);
+ return nitrox_process_se_request(nctx->ndev, creq, nctx->callback,
+ skreq);
+}
+
+static int nitrox_cbc_decrypt(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ gfp_t flags = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ unsigned int start = skreq->cryptlen - ivsize;
+
+ if (skreq->src != skreq->dst)
+ return nitrox_skcipher_crypt(skreq, false);
+
+ nkreq->iv_out = kmalloc(ivsize, flags);
+ if (!nkreq->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(nkreq->iv_out, skreq->src, start, ivsize, 0);
+ return nitrox_skcipher_crypt(skreq, false);
}
static int nitrox_aes_encrypt(struct skcipher_request *skreq)
@@ -340,8 +405,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
+ .decrypt = nitrox_cbc_decrypt,
+ .init = nitrox_cbc_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
@@ -428,7 +493,6 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
@@ -455,8 +519,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = nitrox_3des_setkey,
.encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
+ .decrypt = nitrox_cbc_decrypt,
+ .init = nitrox_cbc_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 8fec733f567f..e0a8bd15aa74 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -27,7 +27,7 @@ config CRYPTO_DEV_CCP_CRYPTO
depends on CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_RSA
select CRYPTO_LIB_AES
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 94c1ad7eeddf..ff50ee80d223 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -172,14 +172,12 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
.cra_exit = ccp_aes_gcm_cra_exit,
.cra_module = THIS_MODULE,
},
@@ -229,11 +227,10 @@ static int ccp_register_aes_aead(struct list_head *head,
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->base.cra_blocksize = def->blocksize;
- alg->base.cra_ablkcipher.ivsize = def->ivsize;
ret = crypto_register_aead(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ pr_err("%s aead algorithm registration error (%d)\n",
alg->base.cra_name, ret);
kfree(ccp_aead);
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 8e4a531f4f70..04b2517df955 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -24,7 +24,7 @@ struct ccp_aes_xts_def {
const char *drv_name;
};
-static struct ccp_aes_xts_def aes_xts_algs[] = {
+static const struct ccp_aes_xts_def aes_xts_algs[] = {
{
.name = "xts(aes)",
.drv_name = "xts-aes-ccp",
@@ -61,26 +61,25 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
- struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ccpversion = ccp_version();
int ret;
- ret = xts_check_key(xfm, key, key_len);
+ ret = xts_verify_key(tfm, key, key_len);
if (ret)
return ret;
@@ -102,11 +101,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
-static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+static int ccp_aes_xts_crypt(struct skcipher_request *req,
unsigned int encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
unsigned int ccpversion = ccp_version();
unsigned int fallback = 0;
unsigned int unit;
@@ -116,7 +116,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!ctx->u.aes.key_len)
return -EINVAL;
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
/* Check conditions under which the CCP can fulfill a request. The
@@ -127,7 +127,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
*/
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
- if (req->nbytes == xts_unit_sizes[unit].size) {
+ if (req->cryptlen == xts_unit_sizes[unit].size) {
unit_size = unit;
break;
}
@@ -155,14 +155,14 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return ret;
}
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -177,7 +177,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.u.xts.iv = &rctx->iv_sg;
rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
rctx->cmd.u.xts.src = req->src;
- rctx->cmd.u.xts.src_len = req->nbytes;
+ rctx->cmd.u.xts.src_len = req->cryptlen;
rctx->cmd.u.xts.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -185,19 +185,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
return ret;
}
-static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_encrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 1);
}
-static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_decrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 0);
}
-static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
@@ -212,14 +212,14 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -227,8 +227,8 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -239,30 +239,30 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_blocksize = AES_BLOCK_SIZE;
- alg->cra_ctxsize = sizeof(struct ccp_ctx);
- alg->cra_priority = CCP_CRA_PRIORITY;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
- alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
- alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
- alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
- alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
- alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
- alg->cra_init = ccp_aes_xts_cra_init;
- alg->cra_exit = ccp_aes_xts_cra_exit;
- alg->cra_module = THIS_MODULE;
-
- ret = crypto_register_alg(alg);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->base.cra_blocksize = AES_BLOCK_SIZE;
+ alg->base.cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->base.cra_priority = CCP_CRA_PRIORITY;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->setkey = ccp_aes_xts_setkey;
+ alg->encrypt = ccp_aes_xts_encrypt;
+ alg->decrypt = ccp_aes_xts_decrypt;
+ alg->min_keysize = AES_MIN_KEY_SIZE * 2;
+ alg->max_keysize = AES_MAX_KEY_SIZE * 2;
+ alg->ivsize = AES_BLOCK_SIZE;
+ alg->init = ccp_aes_xts_init_tfm;
+ alg->exit = ccp_aes_xts_exit_tfm;
+
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 58c6dddfc5e1..33328a153225 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -21,25 +21,24 @@
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
- struct ccp_crypto_ablkcipher_alg *alg =
- ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
@@ -52,7 +51,7 @@ static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
@@ -64,10 +63,11 @@ static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
@@ -77,14 +77,14 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
(ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
- (req->nbytes & (AES_BLOCK_SIZE - 1)))
+ (req->cryptlen & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = AES_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
@@ -102,7 +102,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
rctx->cmd.u.aes.iv = iv_sg;
rctx->cmd.u.aes.iv_len = iv_len;
rctx->cmd.u.aes.src = req->src;
- rctx->cmd.u.aes.src_len = req->nbytes;
+ rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -110,48 +110,44 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return ret;
}
-static int ccp_aes_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_encrypt(struct skcipher_request *req)
{
return ccp_aes_crypt(req, true);
}
-static int ccp_aes_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_decrypt(struct skcipher_request *req)
{
return ccp_aes_crypt(req, false);
}
-static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_aes_complete;
ctx->u.aes.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
/* Restore the original pointer */
- req->info = rctx->rfc3686_info;
+ req->iv = rctx->rfc3686_info;
return ccp_aes_complete(async_req, ret);
}
-static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -162,10 +158,11 @@ static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ccp_aes_setkey(tfm, key, key_len);
}
-static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
u8 *iv;
/* Initialize the CTR block */
@@ -173,84 +170,72 @@ static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
iv += CTR_RFC3686_NONCE_SIZE;
- memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE);
iv += CTR_RFC3686_IV_SIZE;
*(__be32 *)iv = cpu_to_be32(1);
/* Point to the new IV */
- rctx->rfc3686_info = req->info;
- req->info = rctx->rfc3686_iv;
+ rctx->rfc3686_info = req->iv;
+ req->iv = rctx->rfc3686_iv;
return ccp_aes_crypt(req, encrypt);
}
-static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, true);
}
-static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, false);
}
-static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_aes_rfc3686_complete;
ctx->u.aes.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct crypto_alg ccp_aes_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_aes_cra_init,
- .cra_exit = ccp_aes_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_aes_setkey,
- .encrypt = ccp_aes_encrypt,
- .decrypt = ccp_aes_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
+static const struct skcipher_alg ccp_aes_defaults = {
+ .setkey = ccp_aes_setkey,
+ .encrypt = ccp_aes_encrypt,
+ .decrypt = ccp_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .init = ccp_aes_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
-static struct crypto_alg ccp_aes_rfc3686_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_aes_rfc3686_cra_init,
- .cra_exit = ccp_aes_rfc3686_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_aes_rfc3686_setkey,
- .encrypt = ccp_aes_rfc3686_encrypt,
- .decrypt = ccp_aes_rfc3686_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- },
+static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
+ .setkey = ccp_aes_rfc3686_setkey,
+ .encrypt = ccp_aes_rfc3686_encrypt,
+ .decrypt = ccp_aes_rfc3686_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .init = ccp_aes_rfc3686_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
struct ccp_aes_def {
@@ -260,7 +245,7 @@ struct ccp_aes_def {
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
- struct crypto_alg *alg_defaults;
+ const struct skcipher_alg *alg_defaults;
};
static struct ccp_aes_def aes_algs[] = {
@@ -323,8 +308,8 @@ static struct ccp_aes_def aes_algs[] = {
static int ccp_register_aes_alg(struct list_head *head,
const struct ccp_aes_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -338,16 +323,16 @@ static int ccp_register_aes_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index d2c49b2f0323..9c129defdb50 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -20,28 +20,27 @@
static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
- memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, DES3_EDE_BLOCK_SIZE);
return 0;
}
-static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
- struct ccp_crypto_ablkcipher_alg *alg =
- ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = verify_ablkcipher_des3_key(tfm, key);
+ err = verify_skcipher_des3_key(tfm, key);
if (err)
return err;
@@ -58,10 +57,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
+static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
@@ -71,14 +71,14 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
(ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
- (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
+ (req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
- memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, DES3_EDE_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = DES3_EDE_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
@@ -97,7 +97,7 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
rctx->cmd.u.des3.iv = iv_sg;
rctx->cmd.u.des3.iv_len = iv_len;
rctx->cmd.u.des3.src = req->src;
- rctx->cmd.u.des3.src_len = req->nbytes;
+ rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -105,51 +105,43 @@ static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
return ret;
}
-static int ccp_des3_encrypt(struct ablkcipher_request *req)
+static int ccp_des3_encrypt(struct skcipher_request *req)
{
return ccp_des3_crypt(req, true);
}
-static int ccp_des3_decrypt(struct ablkcipher_request *req)
+static int ccp_des3_decrypt(struct skcipher_request *req)
{
return ccp_des3_crypt(req, false);
}
-static int ccp_des3_cra_init(struct crypto_tfm *tfm)
+static int ccp_des3_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_des3_complete;
ctx->u.des3.key_len = 0;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_des3_req_ctx));
return 0;
}
-static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct crypto_alg ccp_des3_defaults = {
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ccp_ctx),
- .cra_priority = CCP_CRA_PRIORITY,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = ccp_des3_cra_init,
- .cra_exit = ccp_des3_cra_exit,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = ccp_des3_setkey,
- .encrypt = ccp_des3_encrypt,
- .decrypt = ccp_des3_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
+static const struct skcipher_alg ccp_des3_defaults = {
+ .setkey = ccp_des3_setkey,
+ .encrypt = ccp_des3_encrypt,
+ .decrypt = ccp_des3_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .init = ccp_des3_init_tfm,
+
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ccp_ctx),
+ .base.cra_priority = CCP_CRA_PRIORITY,
+ .base.cra_module = THIS_MODULE,
};
struct ccp_des3_def {
@@ -159,10 +151,10 @@ struct ccp_des3_def {
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
- struct crypto_alg *alg_defaults;
+ const struct skcipher_alg *alg_defaults;
};
-static struct ccp_des3_def des3_algs[] = {
+static const struct ccp_des3_def des3_algs[] = {
{
.mode = CCP_DES3_MODE_ECB,
.version = CCP_VERSION(5, 0),
@@ -186,8 +178,8 @@ static struct ccp_des3_def des3_algs[] = {
static int ccp_register_des3_alg(struct list_head *head,
const struct ccp_des3_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -201,16 +193,16 @@ static int ccp_register_des3_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8ee4cb45a3f3..88275b4867ea 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
-static LIST_HEAD(cipher_algs);
+static LIST_HEAD(skcipher_algs);
static LIST_HEAD(aead_algs);
static LIST_HEAD(akcipher_algs);
@@ -330,7 +330,7 @@ static int ccp_register_algs(void)
int ret;
if (!aes_disable) {
- ret = ccp_register_aes_algs(&cipher_algs);
+ ret = ccp_register_aes_algs(&skcipher_algs);
if (ret)
return ret;
@@ -338,7 +338,7 @@ static int ccp_register_algs(void)
if (ret)
return ret;
- ret = ccp_register_aes_xts_algs(&cipher_algs);
+ ret = ccp_register_aes_xts_algs(&skcipher_algs);
if (ret)
return ret;
@@ -348,7 +348,7 @@ static int ccp_register_algs(void)
}
if (!des3_disable) {
- ret = ccp_register_des3_algs(&cipher_algs);
+ ret = ccp_register_des3_algs(&skcipher_algs);
if (ret)
return ret;
}
@@ -371,7 +371,7 @@ static int ccp_register_algs(void)
static void ccp_unregister_algs(void)
{
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
- struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+ struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
@@ -381,8 +381,8 @@ static void ccp_unregister_algs(void)
kfree(ahash_alg);
}
- list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
- crypto_unregister_alg(&ablk_alg->alg);
+ list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&ablk_alg->alg);
list_del(&ablk_alg->entry);
kfree(ablk_alg);
}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 9015b5da6ba3..90a009e6b5c1 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -21,6 +21,7 @@
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/akcipher.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/rsa.h>
/* We want the module name in front of our messages */
@@ -31,12 +32,12 @@
#define CCP_CRA_PRIORITY 300
-struct ccp_crypto_ablkcipher_alg {
+struct ccp_crypto_skcipher_alg {
struct list_head entry;
u32 mode;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
};
struct ccp_crypto_aead {
@@ -66,12 +67,12 @@ struct ccp_crypto_akcipher_alg {
struct akcipher_alg alg;
};
-static inline struct ccp_crypto_ablkcipher_alg *
- ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
+static inline struct ccp_crypto_skcipher_alg *
+ ccp_crypto_skcipher_alg(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
+ return container_of(alg, struct ccp_crypto_skcipher_alg, alg);
}
static inline struct ccp_crypto_ahash_alg *
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 57eb53b8ac21..82ac4c14c04c 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -789,6 +789,18 @@ static int ccp5_init(struct ccp_device *ccp)
/* Find available queues */
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ /*
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and fail
+ * the initialization (but not the load, as the PSP could get
+ * properly initialized).
+ */
+ if (qmr == 0xffffffff) {
+ dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
+ return 1;
+ }
+
for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
if (!(qmr & (1 << i)))
continue;
@@ -854,7 +866,7 @@ static int ccp5_init(struct ccp_device *ccp)
if (ccp->cmd_q_count == 0) {
dev_notice(dev, "no command queues available\n");
- ret = -EIO;
+ ret = 1;
goto e_pool;
}
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 73acf0fdb793..19ac509ed76e 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -641,18 +641,27 @@ int ccp_dev_init(struct sp_device *sp)
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp);
- if (ret)
+ if (ret) {
+ /* A positive number means that the device cannot be initialized,
+ * but no additional message is required.
+ */
+ if (ret > 0)
+ goto e_quiet;
+
+ /* An unexpected problem occurred, and should be reported in the log */
goto e_err;
+ }
dev_notice(dev, "ccp enabled\n");
return 0;
e_err:
- sp->ccp_data = NULL;
-
dev_notice(dev, "ccp initialization failed\n");
+e_quiet:
+ sp->ccp_data = NULL;
+
return ret;
}
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index a54f9367a580..0770a83bf1a5 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
desc->ccp = chan->ccp;
+ INIT_LIST_HEAD(&desc->entry);
INIT_LIST_HEAD(&desc->pending);
INIT_LIST_HEAD(&desc->active);
desc->status = DMA_IN_PROGRESS;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c8da8eb160da..422193690fd4 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1777,8 +1777,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
LSB_ITEM_SIZE);
break;
default:
+ kfree(hmac_buf);
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
memset(&hmac_cmd, 0, sizeof(hmac_cmd));
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 6b17d179ef8a..7ca2d3408e7a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -21,6 +21,8 @@
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <asm/smp.h>
+
#include "sp-dev.h"
#include "psp-dev.h"
@@ -235,6 +237,13 @@ static int __sev_platform_init_locked(int *error)
return rc;
psp->sev_state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
dev_dbg(psp->dev, "SEV firmware initialized\n");
return rc;
@@ -294,6 +303,9 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
{
int state, rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/*
* The SEV spec requires that FACTORY_RESET must be issued in
* UNINIT state. Before we go further lets check if any guest is
@@ -338,6 +350,9 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
{
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (psp_master->sev_state == SEV_STATE_UNINIT) {
rc = __sev_platform_init_locked(&argp->error);
if (rc)
@@ -354,6 +369,9 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
void *blob = NULL;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -540,6 +558,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
void *pek_blob, *oca_blob;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -695,6 +716,16 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
struct sev_data_pdh_cert_export *data;
int ret;
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
@@ -741,13 +772,6 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
data->cert_chain_len = input.cert_chain_len;
cmd:
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_cert;
- }
-
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -929,8 +953,22 @@ static int sev_misc_init(struct psp_device *psp)
static int psp_check_sev_support(struct psp_device *psp)
{
- /* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
+
+ /*
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
+ */
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
+ return -ENODEV;
+ }
+
+ if (!(val & 1)) {
+ /* Device does not support the SEV feature */
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@@ -1064,6 +1102,18 @@ void psp_pci_init(void)
/* Initialize the platform */
rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sp->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
return;
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 82a084f02990..dd516b35ba86 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -23,6 +23,7 @@
#include <linux/dmaengine.h>
#include <linux/psp-sev.h>
#include <linux/miscdevice.h>
+#include <linux/capability.h>
#include "sp-dev.h"
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index d3e8faa03f15..64d318dc0d47 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -293,7 +293,8 @@ static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
return 4;
}
-static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
+static unsigned int hmac_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 254b48797799..3112b58d0bb1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -16,7 +16,7 @@
#include "cc_cipher.h"
#include "cc_request_mgr.h"
-#define MAX_ABLKCIPHER_SEQ_LEN 6
+#define MAX_SKCIPHER_SEQ_LEN 6
#define template_skcipher template_u.skcipher
@@ -822,7 +822,7 @@ static int cc_cipher_process(struct skcipher_request *req,
void *iv = req->iv;
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct cc_hw_desc desc[MAX_SKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
int rc;
unsigned int seq_len = 0;
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 250150560e68..91e424378217 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -35,7 +35,7 @@ config CHELSIO_IPSEC_INLINE
config CRYPTO_DEV_CHELSIO_TLS
tristate "Chelsio Crypto Inline TLS Driver"
depends on CHELSIO_T4
- depends on TLS
+ depends on TLS_TOE
select CRYPTO_DEV_CHELSIO
---help---
Support Chelsio Inline TLS with Chelsio crypto accelerator.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 38ee38b37ae6..1b4a5664e604 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -93,7 +93,7 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err);
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
@@ -568,11 +568,11 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.crypto);
+ container_of(alg, struct chcr_alg_template, alg.skcipher);
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
@@ -757,14 +757,14 @@ static inline void create_wreq(struct chcr_context *ctx,
*/
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- struct chcr_blkcipher_req_ctx *reqctx =
- ablkcipher_request_ctx(wrparam->req);
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(wrparam->req);
unsigned int temp = 0, transhdr_len, dst_size;
int error;
int nents;
@@ -807,9 +807,9 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ (!(get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)) &&
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ (!(get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
@@ -843,7 +843,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
if (reqctx->op && (ablkctx->ciph_mode ==
CHCR_SCMD_CIPHER_MODE_AES_CBC))
sg_pcopy_to_buffer(wrparam->req->src,
- sg_nents(wrparam->req->src), wrparam->req->info, 16,
+ sg_nents(wrparam->req->src), wrparam->req->iv, 16,
reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
return skb;
@@ -866,11 +866,11 @@ static inline int chcr_keyctx_ck_size(unsigned int keylen)
return ck_size;
}
-static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
@@ -886,7 +886,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
return err;
}
-static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -912,13 +912,13 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
}
-static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -943,13 +943,13 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
}
-static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
+static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
@@ -981,7 +981,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1017,12 +1017,12 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
u32 isfinal)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_aes_ctx aes;
int ret, i;
u8 *key;
@@ -1051,16 +1051,16 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
return 0;
}
-static int chcr_update_cipher_iv(struct ablkcipher_request *req,
+static int chcr_update_cipher_iv(struct skcipher_request *req,
struct cpl_fw6_pld *fw6_pld, u8 *iv)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
+ ctr_add_iv(iv, req->iv, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
@@ -1071,7 +1071,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
/*Updated before sending last WR*/
- memcpy(iv, req->info, AES_BLOCK_SIZE);
+ memcpy(iv, req->iv, AES_BLOCK_SIZE);
else
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1085,16 +1085,16 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
* for subsequent update requests
*/
-static int chcr_final_cipher_iv(struct ablkcipher_request *req,
+static int chcr_final_cipher_iv(struct skcipher_request *req,
struct cpl_fw6_pld *fw6_pld, u8 *iv)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
@@ -1108,25 +1108,25 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct cipher_wr_param wrparam;
struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
goto unmap;
- if (req->nbytes == reqctx->processed) {
+ if (req->cryptlen == reqctx->processed) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
- err = chcr_final_cipher_iv(req, fw6_pld, req->info);
+ err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
goto complete;
}
@@ -1134,13 +1134,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
else
bytes = rounddown(bytes, 16);
} else {
/*CTR mode counter overfloa*/
- bytes = req->nbytes - reqctx->processed;
+ bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
if (err)
@@ -1153,13 +1153,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
req->base.flags,
req->src,
req->dst,
- req->nbytes,
- req->info,
+ req->cryptlen,
+ req->iv,
reqctx->op);
goto complete;
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
@@ -1185,33 +1185,33 @@ complete:
return err;
}
-static int process_cipher(struct ablkcipher_request *req,
+static int process_cipher(struct skcipher_request *req,
unsigned short qid,
struct sk_buff **skb,
unsigned short op_type)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam;
int bytes, err = -EINVAL;
reqctx->processed = 0;
- if (!req->info)
+ if (!req->iv)
goto error;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes == 0) ||
- (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
+ (req->cryptlen == 0) ||
+ (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
- ablkctx->enckey_len, req->nbytes, ivsize);
+ ablkctx->enckey_len, req->cryptlen, ivsize);
goto error;
}
err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
if (err)
goto error;
- if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
/*Min dsgl size*/
@@ -1219,14 +1219,14 @@ static int process_cipher(struct ablkcipher_request *req,
/* Can be sent as Imm*/
unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
- dnents = sg_nents_xlen(req->dst, req->nbytes,
+ dnents = sg_nents_xlen(req->dst, req->cryptlen,
CHCR_DST_SG_SIZE, 0);
phys_dsgl = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+ reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
SGE_MAX_WR_LEN;
- bytes = IV + req->nbytes;
+ bytes = IV + req->cryptlen;
} else {
reqctx->imm = 0;
@@ -1236,21 +1236,21 @@ static int process_cipher(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
else
bytes = rounddown(bytes, 16);
} else {
- bytes = req->nbytes;
+ bytes = req->cryptlen;
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR) {
- bytes = adjust_ctr_overflow(req->info, bytes);
+ bytes = adjust_ctr_overflow(req->iv, bytes);
}
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+ if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
@@ -1259,7 +1259,7 @@ static int process_cipher(struct ablkcipher_request *req,
} else {
- memcpy(reqctx->iv, req->info, IV);
+ memcpy(reqctx->iv, req->iv, IV);
}
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
@@ -1268,7 +1268,7 @@ static int process_cipher(struct ablkcipher_request *req,
req->base.flags,
req->src,
req->dst,
- req->nbytes,
+ req->cryptlen,
reqctx->iv,
op_type);
goto error;
@@ -1296,9 +1296,9 @@ error:
return err;
}
-static int chcr_aes_encrypt(struct ablkcipher_request *req)
+static int chcr_aes_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
@@ -1329,9 +1329,9 @@ error:
return err;
}
-static int chcr_aes_decrypt(struct ablkcipher_request *req)
+static int chcr_aes_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
@@ -1398,27 +1398,28 @@ out:
return err;
}
-static int chcr_cra_init(struct crypto_tfm *tfm)
+static int chcr_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+
+ return chcr_device_init(ctx);
}
-static int chcr_rfc3686_init(struct crypto_tfm *tfm)
+static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
@@ -1427,17 +1428,17 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
- return chcr_device_init(crypto_tfm_ctx(tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ return chcr_device_init(ctx);
}
-static void chcr_cra_exit(struct crypto_tfm *tfm)
+static void chcr_exit_tfm(struct crypto_skcipher *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
crypto_free_sync_skcipher(ablkctx->sw_cipher);
@@ -2056,8 +2057,8 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ chcr_handle_cipher_resp(skcipher_request_cast(req),
input, err);
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -2148,7 +2149,7 @@ out:
return err;
}
-static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int key_len)
{
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
@@ -2172,7 +2173,7 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -2576,12 +2577,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
u8 *buf = ulptx;
memcpy(buf, reqctx->iv, IV);
@@ -2599,13 +2600,13 @@ void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid)
{
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
@@ -2680,7 +2681,7 @@ void chcr_hash_dma_unmap(struct device *dev,
}
int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
int error;
@@ -2709,7 +2710,7 @@ err:
}
void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
@@ -3712,82 +3713,76 @@ static int chcr_aead_decrypt(struct aead_request *req)
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_cbc_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_cbc_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
- }
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-chcr",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_init = chcr_cra_init,
- .cra_exit = NULL,
- .cra_u .ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_xts_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_xts_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_cra_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = chcr_aes_ctr_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_ctr_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ .type = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
.is_registered = 0,
- .alg.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-chcr",
- .cra_blocksize = 1,
- .cra_init = chcr_rfc3686_init,
- .cra_exit = chcr_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = chcr_aes_rfc3686_setkey,
- .encrypt = chcr_aes_encrypt,
- .decrypt = chcr_aes_decrypt,
- }
+ .alg.skcipher = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_rfc3686_init,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = chcr_aes_rfc3686_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
}
},
/* SHA */
@@ -4254,10 +4249,10 @@ static int chcr_unregister_alg(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
if (driver_algs[i].is_registered)
- crypto_unregister_alg(
- &driver_algs[i].alg.crypto);
+ crypto_unregister_skcipher(
+ &driver_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AEAD:
if (driver_algs[i].is_registered)
@@ -4293,21 +4288,20 @@ static int chcr_register_alg(void)
if (driver_algs[i].is_registered)
continue;
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- driver_algs[i].alg.crypto.cra_priority =
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ driver_algs[i].alg.skcipher.base.cra_priority =
CHCR_CRA_PRIORITY;
- driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
- driver_algs[i].alg.crypto.cra_flags =
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+ driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
+ driver_algs[i].alg.skcipher.base.cra_flags =
+ CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK;
- driver_algs[i].alg.crypto.cra_ctxsize =
+ driver_algs[i].alg.skcipher.base.cra_ctxsize =
sizeof(struct chcr_context) +
sizeof(struct ablk_ctx);
- driver_algs[i].alg.crypto.cra_alignmask = 0;
- driver_algs[i].alg.crypto.cra_type =
- &crypto_ablkcipher_type;
- err = crypto_register_alg(&driver_algs[i].alg.crypto);
- name = driver_algs[i].alg.crypto.cra_driver_name;
+ driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
+
+ err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
+ name = driver_algs[i].alg.skcipher.base.cra_driver_name;
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index d1e6b51df0ce..f58c2b5c7fc5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -287,7 +287,7 @@ struct hash_wr_param {
};
struct cipher_wr_param {
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
char *iv;
int bytes;
unsigned short qid;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 993c97e70565..6db2df8c8a05 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -160,9 +160,9 @@ static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
return crypto_aead_ctx(tfm);
}
-static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+static inline struct chcr_context *c_ctx(struct crypto_skcipher *tfm)
{
- return crypto_ablkcipher_ctx(tfm);
+ return crypto_skcipher_ctx(tfm);
}
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
@@ -285,7 +285,7 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
-struct chcr_blkcipher_req_ctx {
+struct chcr_skcipher_req_ctx {
struct sk_buff *skb;
struct scatterlist *dstsg;
unsigned int processed;
@@ -302,7 +302,7 @@ struct chcr_alg_template {
u32 type;
u32 is_registered;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -321,12 +321,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned short qid);
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam);
-int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
-void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req);
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 24355680f30a..9da0f93a330b 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -673,16 +673,16 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ unsigned int last_desc, ndesc, flits = 0;
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
+ struct tx_sw_desc *sgl_sdesc;
int qidx, left, credits;
- unsigned int flits = 0, ndesc;
- struct adapter *adap;
+ bool immediate = false;
struct sge_eth_txq *q;
+ struct adapter *adap;
struct port_info *pi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
struct sec_path *sp;
- bool immediate = false;
if (!x->xso.offload_handle)
return NETDEV_TX_BUSY;
@@ -715,8 +715,14 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
if (!immediate &&
- unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
goto out_free;
}
@@ -742,17 +748,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
- int last_desc;
-
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
- 0, addr);
+ 0, sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index 025c831d0899..d2bc655ab931 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -21,6 +21,7 @@
#include <crypto/internal/hash.h>
#include <linux/tls.h>
#include <net/tls.h>
+#include <net/tls_toe.h>
#include "t4fw_api.h"
#include "t4_msg.h"
@@ -118,7 +119,7 @@ struct tls_scmd {
};
struct chtls_dev {
- struct tls_device tlsdev;
+ struct tls_toe_device tlsdev;
struct list_head list;
struct cxgb4_lld_info *lldi;
struct pci_dev *pdev;
@@ -362,7 +363,7 @@ enum {
#define TCP_PAGE(sk) (sk->sk_frag.page)
#define TCP_OFF(sk) (sk->sk_frag.offset)
-static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev)
+static inline struct chtls_dev *to_chtls_dev(struct tls_toe_device *tlsdev)
{
return container_of(tlsdev, struct chtls_dev, tlsdev);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 98bc5a4cd5e7..5cf9b021220b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -97,7 +97,7 @@ static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
if (!skb)
return NULL;
- memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+ __skb_put_data(skb, flowc, flowclen);
skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
return skb;
@@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
@@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
break;
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
@@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
@@ -1841,8 +1841,7 @@ skip_copy:
tp->urg_data = 0;
if (avail + offset >= skb->len) {
- if (likely(skb))
- chtls_free_skb(sk, skb);
+ chtls_free_skb(sk, skb);
buffers_freed++;
if (copied >= target &&
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index e6df5b95ed47..18996935d8ba 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -124,7 +124,7 @@ static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
mutex_unlock(&notify_mutex);
}
-static int chtls_inline_feature(struct tls_device *dev)
+static int chtls_inline_feature(struct tls_toe_device *dev)
{
struct net_device *netdev;
struct chtls_dev *cdev;
@@ -140,7 +140,7 @@ static int chtls_inline_feature(struct tls_device *dev)
return 0;
}
-static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
+static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
@@ -149,7 +149,7 @@ static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
return 0;
}
-static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
+static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
@@ -161,7 +161,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
{
int i;
- tls_unregister_device(&cdev->tlsdev);
+ tls_toe_unregister_device(&cdev->tlsdev);
kvfree(cdev->kmap.addr);
idr_destroy(&cdev->hwtid_idr);
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
@@ -173,27 +173,27 @@ static void chtls_free_uld(struct chtls_dev *cdev)
static inline void chtls_dev_release(struct kref *kref)
{
+ struct tls_toe_device *dev;
struct chtls_dev *cdev;
- struct tls_device *dev;
- dev = container_of(kref, struct tls_device, kref);
+ dev = container_of(kref, struct tls_toe_device, kref);
cdev = to_chtls_dev(dev);
chtls_free_uld(cdev);
}
static void chtls_register_dev(struct chtls_dev *cdev)
{
- struct tls_device *tlsdev = &cdev->tlsdev;
+ struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
+ strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
- TLS_DEVICE_NAME_MAX);
+ TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tlsdev->release = chtls_dev_release;
kref_init(&tlsdev->kref);
- tls_register_device(tlsdev);
+ tls_toe_register_device(tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index d81a1297cb9e..73a899e6f837 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -23,12 +24,12 @@ static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
static inline void
-_writefield(u32 offset, void *value)
+_writefield(u32 offset, const void *value)
{
int i;
for (i = 0; i < 4; i++)
- iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
+ iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
}
/* Read a 128 bit field (either a writable key or IV) */
@@ -42,12 +43,12 @@ _readfield(u32 offset, void *value)
}
static int
-do_crypt(void *src, void *dst, int len, u32 flags)
+do_crypt(const void *src, void *dst, u32 len, u32 flags)
{
u32 status;
u32 counter = AES_OP_TIMEOUT;
- iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
+ iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
iowrite32(len, _iobase + AES_LENA_REG);
@@ -64,16 +65,14 @@ do_crypt(void *src, void *dst, int len, u32 flags)
return counter ? 0 : 1;
}
-static unsigned int
-geode_aes_crypt(struct geode_aes_op *op)
+static void
+geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
+ void *dst, u32 len, u8 *iv, int mode, int dir)
{
u32 flags = 0;
unsigned long iflags;
int ret;
- if (op->len == 0)
- return 0;
-
/* If the source and destination is the same, then
* we need to turn on the coherent flags, otherwise
* we don't need to worry
@@ -81,32 +80,28 @@ geode_aes_crypt(struct geode_aes_op *op)
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
- if (op->dir == AES_DIR_ENCRYPT)
+ if (dir == AES_DIR_ENCRYPT)
flags |= AES_CTRL_ENCRYPT;
/* Start the critical section */
spin_lock_irqsave(&lock, iflags);
- if (op->mode == AES_MODE_CBC) {
+ if (mode == AES_MODE_CBC) {
flags |= AES_CTRL_CBC;
- _writefield(AES_WRITEIV0_REG, op->iv);
+ _writefield(AES_WRITEIV0_REG, iv);
}
- if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
- flags |= AES_CTRL_WRKEY;
- _writefield(AES_WRITEKEY0_REG, op->key);
- }
+ flags |= AES_CTRL_WRKEY;
+ _writefield(AES_WRITEKEY0_REG, tctx->key);
- ret = do_crypt(op->src, op->dst, op->len, flags);
+ ret = do_crypt(src, dst, len, flags);
BUG_ON(ret);
- if (op->mode == AES_MODE_CBC)
- _readfield(AES_WRITEIV0_REG, op->iv);
+ if (mode == AES_MODE_CBC)
+ _readfield(AES_WRITEIV0_REG, iv);
spin_unlock_irqrestore(&lock, iflags);
-
- return op->len;
}
/* CRYPTO-API Functions */
@@ -114,13 +109,13 @@ geode_aes_crypt(struct geode_aes_op *op)
static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- op->keylen = len;
+ tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
- memcpy(op->key, key, len);
+ memcpy(tctx->key, key, len);
return 0;
}
@@ -133,135 +128,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+ tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tctx->fallback.cip->base.crt_flags |=
+ (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(op->fallback.cip, key, len);
+ ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
}
return ret;
}
-static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
+static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
unsigned int ret;
- op->keylen = len;
+ tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
- memcpy(op->key, key, len);
+ memcpy(tctx->key, key, len);
return 0;
}
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
- return ret;
-}
-
-static int fallback_blk_dec(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-
- tfm = desc->tfm;
- desc->tfm = op->fallback.blk;
-
- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
- return ret;
-}
-static int fallback_blk_enc(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm;
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
-
- tfm = desc->tfm;
- desc->tfm = op->fallback.blk;
-
- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
-
- desc->tfm = tfm;
+ crypto_skcipher_clear_flags(tctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(tctx->fallback.skcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(tctx->fallback.skcipher) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
static void
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
- crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
return;
}
- op->src = (void *) in;
- op->dst = (void *) out;
- op->mode = AES_MODE_ECB;
- op->flags = 0;
- op->len = AES_BLOCK_SIZE;
- op->dir = AES_DIR_ENCRYPT;
-
- geode_aes_crypt(op);
+ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
+ AES_MODE_ECB, AES_DIR_ENCRYPT);
}
static void
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
- crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
return;
}
- op->src = (void *) in;
- op->dst = (void *) out;
- op->mode = AES_MODE_ECB;
- op->flags = 0;
- op->len = AES_BLOCK_SIZE;
- op->dir = AES_DIR_DECRYPT;
-
- geode_aes_crypt(op);
+ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
+ AES_MODE_ECB, AES_DIR_DECRYPT);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- op->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ tctx->fallback.cip = crypto_alloc_cipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(op->fallback.cip)) {
+ if (IS_ERR(tctx->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.cip);
+ return PTR_ERR(tctx->fallback.cip);
}
return 0;
@@ -269,10 +222,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(op->fallback.cip);
- op->fallback.cip = NULL;
+ crypto_free_cipher(tctx->fallback.cip);
}
static struct crypto_alg geode_alg = {
@@ -285,7 +237,7 @@ static struct crypto_alg geode_alg = {
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
+ .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -298,209 +250,126 @@ static struct crypto_alg geode_alg = {
}
};
-static int
-geode_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_init_skcipher(struct crypto_skcipher *tfm)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- op->iv = walk.iv;
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_CBC;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_DECRYPT;
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- ret = geode_aes_crypt(op);
-
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ tctx->fallback.skcipher =
+ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tctx->fallback.skcipher)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(tctx->fallback.skcipher);
}
- return err;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tctx->fallback.skcipher));
+ return 0;
}
-static int
-geode_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static void geode_exit_skcipher(struct crypto_skcipher *tfm)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- op->iv = walk.iv;
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_CBC;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_ENCRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- return err;
+ crypto_free_skcipher(tctx->fallback.skcipher);
}
-static int fallback_init_blk(struct crypto_tfm *tfm)
+static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
+ if (dir == AES_DIR_DECRYPT)
+ return crypto_skcipher_decrypt(subreq);
+ else
+ return crypto_skcipher_encrypt(subreq);
+ }
- op->fallback.blk = crypto_alloc_blkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ err = skcipher_walk_virt(&walk, req, false);
- if (IS_ERR(op->fallback.blk)) {
- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.blk);
+ while ((nbytes = walk.nbytes) != 0) {
+ geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ walk.iv, mode, dir);
+ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
- return 0;
+ return err;
}
-static void fallback_exit_blk(struct crypto_tfm *tfm)
+static int geode_cbc_encrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(op->fallback.blk);
- op->fallback.blk = NULL;
+ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
}
-static struct crypto_alg geode_cbc_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-geode",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
- .cra_alignmask = 15,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = geode_setkey_blk,
- .encrypt = geode_cbc_encrypt,
- .decrypt = geode_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
-};
-
-static int
-geode_ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_cbc_decrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_ECB;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_DECRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
+ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
}
-static int
-geode_ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int geode_ecb_encrypt(struct skcipher_request *req)
{
- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, ret;
-
- if (unlikely(op->keylen != AES_KEYSIZE_128))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- op->src = walk.src.virt.addr,
- op->dst = walk.dst.virt.addr;
- op->mode = AES_MODE_ECB;
- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
- op->dir = AES_DIR_ENCRYPT;
-
- ret = geode_aes_crypt(op);
- nbytes -= ret;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
+}
- return err;
+static int geode_ecb_decrypt(struct skcipher_request *req)
+{
+ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
}
-static struct crypto_alg geode_ecb_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-geode",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct geode_aes_op),
- .cra_alignmask = 15,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = geode_setkey_blk,
- .encrypt = geode_ecb_encrypt,
- .decrypt = geode_ecb_decrypt,
- }
- }
+static struct skcipher_alg geode_skcipher_algs[] = {
+ {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-geode",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .base.cra_alignmask = 15,
+ .base.cra_module = THIS_MODULE,
+ .init = geode_init_skcipher,
+ .exit = geode_exit_skcipher,
+ .setkey = geode_setkey_skcipher,
+ .encrypt = geode_cbc_encrypt,
+ .decrypt = geode_cbc_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-geode",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .base.cra_alignmask = 15,
+ .base.cra_module = THIS_MODULE,
+ .init = geode_init_skcipher,
+ .exit = geode_exit_skcipher,
+ .setkey = geode_setkey_skcipher,
+ .encrypt = geode_ecb_encrypt,
+ .decrypt = geode_ecb_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
};
static void geode_aes_remove(struct pci_dev *dev)
{
crypto_unregister_alg(&geode_alg);
- crypto_unregister_alg(&geode_ecb_alg);
- crypto_unregister_alg(&geode_cbc_alg);
+ crypto_unregister_skciphers(geode_skcipher_algs,
+ ARRAY_SIZE(geode_skcipher_algs));
pci_iounmap(dev, _iobase);
_iobase = NULL;
@@ -538,20 +407,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (ret)
goto eiomap;
- ret = crypto_register_alg(&geode_ecb_alg);
+ ret = crypto_register_skciphers(geode_skcipher_algs,
+ ARRAY_SIZE(geode_skcipher_algs));
if (ret)
goto ealg;
- ret = crypto_register_alg(&geode_cbc_alg);
- if (ret)
- goto eecb;
-
dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
return 0;
- eecb:
- crypto_unregister_alg(&geode_ecb_alg);
-
ealg:
crypto_unregister_alg(&geode_alg);
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index 5c6e131a8f9d..6d0a0cdc7647 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -46,21 +46,10 @@
#define AES_OP_TIMEOUT 0x50000
-struct geode_aes_op {
-
- void *src;
- void *dst;
-
- u32 mode;
- u32 dir;
- u32 flags;
- int len;
-
+struct geode_aes_tfm_ctx {
u8 key[AES_KEYSIZE_128];
- u8 *iv;
-
union {
- struct crypto_blkcipher *blk;
+ struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
u32 keylen;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a18e62df68d9..4e7323884ae3 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -22,6 +22,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
static char hifn_pll_ref[sizeof("extNNN")] = "ext";
module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
@@ -596,7 +597,7 @@ struct hifn_crypt_result {
struct hifn_crypto_alg {
struct list_head entry;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
struct hifn_device *dev;
};
@@ -1404,7 +1405,7 @@ static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
w->num = 0;
}
-static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
+static int skcipher_add(unsigned int *drestp, struct scatterlist *dst,
unsigned int size, unsigned int *nbytesp)
{
unsigned int copy, drest = *drestp, nbytes = *nbytesp;
@@ -1433,11 +1434,11 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
-static int hifn_cipher_walk(struct ablkcipher_request *req,
+static int hifn_cipher_walk(struct skcipher_request *req,
struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
- unsigned int nbytes = req->nbytes, offset, copy, diff;
+ unsigned int nbytes = req->cryptlen, offset, copy, diff;
int idx, tidx, err;
tidx = idx = 0;
@@ -1459,7 +1460,7 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
t = &w->cache[idx];
- err = ablkcipher_add(&dlen, dst, slen, &nbytes);
+ err = skcipher_add(&dlen, dst, slen, &nbytes);
if (err < 0)
return err;
@@ -1498,7 +1499,7 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
dst = &req->dst[idx];
- err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
+ err = skcipher_add(&dlen, dst, nbytes, &nbytes);
if (err < 0)
return err;
@@ -1518,13 +1519,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
return tidx;
}
-static int hifn_setup_session(struct ablkcipher_request *req)
+static int hifn_setup_session(struct skcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
struct hifn_device *dev = ctx->dev;
unsigned long dlen, flags;
- unsigned int nbytes = req->nbytes, idx = 0;
+ unsigned int nbytes = req->cryptlen, idx = 0;
int err = -EINVAL, sg_num;
struct scatterlist *dst;
@@ -1563,7 +1564,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
goto err_out;
}
- err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
+ err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->cryptlen, req);
if (err)
goto err_out;
@@ -1610,7 +1611,7 @@ static int hifn_start_device(struct hifn_device *dev)
return 0;
}
-static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
+static int skcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
{
unsigned int srest = *srestp, nbytes = *nbytesp, copy;
@@ -1660,12 +1661,12 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i)
BUG_ON(dev->started < 0);
}
-static void hifn_process_ready(struct ablkcipher_request *req, int error)
+static void hifn_process_ready(struct skcipher_request *req, int error)
{
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
- unsigned int nbytes = req->nbytes;
+ unsigned int nbytes = req->cryptlen;
int idx = 0, err;
struct scatterlist *dst, *t;
void *saddr;
@@ -1688,7 +1689,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
saddr = kmap_atomic(sg_page(t));
- err = ablkcipher_get(saddr, &t->length, t->offset,
+ err = skcipher_get(saddr, &t->length, t->offset,
dst, nbytes, &nbytes);
if (err < 0) {
kunmap_atomic(saddr);
@@ -1910,7 +1911,7 @@ static void hifn_flush(struct hifn_device *dev)
{
unsigned long flags;
struct crypto_async_request *async_req;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int i;
@@ -1926,7 +1927,7 @@ static void hifn_flush(struct hifn_device *dev)
spin_lock_irqsave(&dev->lock, flags);
while ((async_req = crypto_dequeue_request(&dev->queue))) {
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
spin_unlock_irqrestore(&dev->lock, flags);
hifn_process_ready(req, -ENODEV);
@@ -1936,14 +1937,14 @@ static void hifn_flush(struct hifn_device *dev)
spin_unlock_irqrestore(&dev->lock, flags);
}
-static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int hifn_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_context *ctx = crypto_skcipher_ctx(cipher);
struct hifn_device *dev = ctx->dev;
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1955,14 +1956,14 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int hifn_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_context *ctx = crypto_skcipher_ctx(cipher);
struct hifn_device *dev = ctx->dev;
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1974,36 +1975,36 @@ static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int hifn_handle_req(struct ablkcipher_request *req)
+static int hifn_handle_req(struct skcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
int err = -EAGAIN;
- if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
+ if (dev->started + DIV_ROUND_UP(req->cryptlen, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
err = hifn_setup_session(req);
if (err == -EAGAIN) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
spin_unlock_irqrestore(&dev->lock, flags);
}
return err;
}
-static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
+static int hifn_setup_crypto_req(struct skcipher_request *req, u8 op,
u8 type, u8 mode)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
- struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+ struct hifn_request_context *rctx = skcipher_request_ctx(req);
unsigned ivsize;
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
- if (req->info && mode != ACRYPTO_MODE_ECB) {
+ if (req->iv && mode != ACRYPTO_MODE_ECB) {
if (type == ACRYPTO_TYPE_AES_128)
ivsize = HIFN_AES_IV_LENGTH;
else if (type == ACRYPTO_TYPE_DES)
@@ -2022,7 +2023,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
rctx->op = op;
rctx->mode = mode;
rctx->type = type;
- rctx->iv = req->info;
+ rctx->iv = req->iv;
rctx->ivsize = ivsize;
/*
@@ -2037,7 +2038,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
static int hifn_process_queue(struct hifn_device *dev)
{
struct crypto_async_request *async_req, *backlog;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
unsigned long flags;
int err = 0;
@@ -2053,7 +2054,7 @@ static int hifn_process_queue(struct hifn_device *dev)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- req = ablkcipher_request_cast(async_req);
+ req = skcipher_request_cast(async_req);
err = hifn_handle_req(req);
if (err)
@@ -2063,7 +2064,7 @@ static int hifn_process_queue(struct hifn_device *dev)
return err;
}
-static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
+static int hifn_setup_crypto(struct skcipher_request *req, u8 op,
u8 type, u8 mode)
{
int err;
@@ -2083,22 +2084,22 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
/*
* AES ecryption functions.
*/
-static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_aes_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
@@ -2107,22 +2108,22 @@ static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
/*
* AES decryption functions.
*/
-static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_aes_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
@@ -2131,22 +2132,22 @@ static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
/*
* DES ecryption functions.
*/
-static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
@@ -2155,22 +2156,22 @@ static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
/*
* DES decryption functions.
*/
-static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
@@ -2179,44 +2180,44 @@ static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
/*
* 3DES ecryption functions.
*/
-static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
+static inline int hifn_encrypt_3des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
}
/* 3DES decryption functions. */
-static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_ecb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
-static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_cbc(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_cfb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
-static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
+static inline int hifn_decrypt_3des_ofb(struct skcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
@@ -2226,16 +2227,16 @@ struct hifn_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char drv_name[CRYPTO_MAX_ALG_NAME];
unsigned int bsize;
- struct ablkcipher_alg ablkcipher;
+ struct skcipher_alg skcipher;
};
-static struct hifn_alg_template hifn_alg_templates[] = {
+static const struct hifn_alg_template hifn_alg_templates[] = {
/*
* 3DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2245,7 +2246,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2255,7 +2256,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2266,7 +2267,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_des3_setkey,
@@ -2280,7 +2281,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
*/
{
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2290,7 +2291,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2300,7 +2301,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2311,7 +2312,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@@ -2325,7 +2326,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
*/
{
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2335,7 +2336,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.ivsize = HIFN_AES_IV_LENGTH,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -2346,7 +2347,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2356,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
{
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@@ -2366,18 +2367,19 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
};
-static int hifn_cra_init(struct crypto_tfm *tfm)
+static int hifn_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
- struct hifn_context *ctx = crypto_tfm_ctx(tfm);
+ struct hifn_context *ctx = crypto_skcipher_ctx(tfm);
ctx->dev = ha->dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct hifn_request_context));
+
return 0;
}
-static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
+static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_template *t)
{
struct hifn_crypto_alg *alg;
int err;
@@ -2386,26 +2388,25 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
if (!alg)
return -ENOMEM;
- snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
- snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
+ alg->alg = t->skcipher;
+ alg->alg.init = hifn_init_tfm;
+
+ snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
+ snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
t->drv_name, dev->name);
- alg->alg.cra_priority = 300;
- alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
- alg->alg.cra_blocksize = t->bsize;
- alg->alg.cra_ctxsize = sizeof(struct hifn_context);
- alg->alg.cra_alignmask = 0;
- alg->alg.cra_type = &crypto_ablkcipher_type;
- alg->alg.cra_module = THIS_MODULE;
- alg->alg.cra_u.ablkcipher = t->ablkcipher;
- alg->alg.cra_init = hifn_cra_init;
+ alg->alg.base.cra_priority = 300;
+ alg->alg.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->alg.base.cra_blocksize = t->bsize;
+ alg->alg.base.cra_ctxsize = sizeof(struct hifn_context);
+ alg->alg.base.cra_alignmask = 0;
+ alg->alg.base.cra_module = THIS_MODULE;
alg->dev = dev;
list_add_tail(&alg->entry, &dev->alg_list);
- err = crypto_register_alg(&alg->alg);
+ err = crypto_register_skcipher(&alg->alg);
if (err) {
list_del(&alg->entry);
kfree(alg);
@@ -2420,7 +2421,7 @@ static void hifn_unregister_alg(struct hifn_device *dev)
list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
list_del(&a->entry);
- crypto_unregister_alg(&a->alg);
+ crypto_unregister_skcipher(&a->alg);
kfree(a);
}
}
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index ebaf91e0146d..c0e7a85fe129 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -2,7 +2,7 @@
config CRYPTO_DEV_HISI_SEC
tristate "Support for Hisilicon SEC crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select SG_SPLIT
@@ -14,26 +14,47 @@ config CRYPTO_DEV_HISI_SEC
To compile this as a module, choose M here: the module
will be called hisi_sec.
+config CRYPTO_DEV_HISI_SEC2
+ tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_DES
+ select CRYPTO_DEV_HISI_QM
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
+ It provides AES, SM4, and 3DES algorithms with ECB
+ CBC, and XTS cipher mode.
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec2.
+
config CRYPTO_DEV_HISI_QM
tristate
- depends on ARM64 && PCI && PCI_MSI
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI && PCI_MSI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
-config CRYPTO_HISI_SGL
- tristate
- depends on ARM64
- help
- HiSilicon accelerator engines use a common hardware scatterlist
- interface for data format. Specific engine driver may use this
- module.
-
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HiSilicon ZIP accelerator"
- depends on ARM64 && PCI && PCI_MSI
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select CRYPTO_HISI_SGL
select SG_SPLIT
help
Support for HiSilicon ZIP Driver
+
+config CRYPTO_DEV_HISI_HPRE
+ tristate "Support for HISI HPRE accelerator"
+ depends on PCI && PCI_MSI
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ select CRYPTO_DEV_HISI_QM
+ select CRYPTO_DH
+ select CRYPTO_RSA
+ help
+ Support for HiSilicon HPRE(High Performance RSA Engine)
+ accelerator, which can accelerate RSA and DH algorithms.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 45a279741126..7f5f74c72baa 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
-obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o
-obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
+obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
+hisi_qm-objs = qm.o sgl.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
diff --git a/drivers/crypto/hisilicon/hpre/Makefile b/drivers/crypto/hisilicon/hpre/Makefile
new file mode 100644
index 000000000000..4fd32b789e1e
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hisi_hpre.o
+hisi_hpre-objs = hpre_main.o hpre_crypto.o
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
new file mode 100644
index 000000000000..ddf13ea9862a
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef __HISI_HPRE_H
+#define __HISI_HPRE_H
+
+#include <linux/list.h>
+#include "../qm.h"
+
+#define HPRE_SQE_SIZE sizeof(struct hpre_sqe)
+#define HPRE_PF_DEF_Q_NUM 64
+#define HPRE_PF_DEF_Q_BASE 0
+
+enum {
+ HPRE_CLUSTER0,
+ HPRE_CLUSTER1,
+ HPRE_CLUSTER2,
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM,
+};
+
+enum hpre_ctrl_dbgfs_file {
+ HPRE_CURRENT_QM,
+ HPRE_CLEAR_ENABLE,
+ HPRE_CLUSTER_CTRL,
+ HPRE_DEBUG_FILE_NUM,
+};
+
+#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
+
+struct hpre_debugfs_file {
+ int index;
+ enum hpre_ctrl_dbgfs_file type;
+ spinlock_t lock;
+ struct hpre_debug *debug;
+};
+
+/*
+ * One HPRE controller has one PF and multiple VFs, some global configurations
+ * which PF has need this structure.
+ * Just relevant for PF.
+ */
+struct hpre_debug {
+ struct dentry *debug_root;
+ struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
+};
+
+struct hpre {
+ struct hisi_qm qm;
+ struct list_head list;
+ struct hpre_debug debug;
+ u32 num_vfs;
+ unsigned long status;
+};
+
+enum hpre_alg_type {
+ HPRE_ALG_NC_NCRT = 0x0,
+ HPRE_ALG_NC_CRT = 0x1,
+ HPRE_ALG_KG_STD = 0x2,
+ HPRE_ALG_KG_CRT = 0x3,
+ HPRE_ALG_DH_G2 = 0x4,
+ HPRE_ALG_DH = 0x5,
+};
+
+struct hpre_sqe {
+ __le32 dw0;
+ __u8 task_len1;
+ __u8 task_len2;
+ __u8 mrttest_num;
+ __u8 resv1;
+ __le64 key;
+ __le64 in;
+ __le64 out;
+ __le16 tag;
+ __le16 resv2;
+#define _HPRE_SQE_ALIGN_EXT 7
+ __le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
+};
+
+struct hpre *hpre_find_device(int node);
+int hpre_algs_register(void);
+void hpre_algs_unregister(void);
+
+#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
new file mode 100644
index 000000000000..98f037e6ea3e
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+#include <crypto/akcipher.h>
+#include <crypto/dh.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/kpp.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <linux/module.h>
+#include "hpre.h"
+
+struct hpre_ctx;
+
+#define HPRE_CRYPTO_ALG_PRI 1000
+#define HPRE_ALIGN_SZ 64
+#define HPRE_BITS_2_BYTES_SHIFT 3
+#define HPRE_RSA_512BITS_KSZ 64
+#define HPRE_RSA_1536BITS_KSZ 192
+#define HPRE_CRT_PRMS 5
+#define HPRE_CRT_Q 2
+#define HPRE_CRT_P 3
+#define HPRE_CRT_INV 4
+#define HPRE_DH_G_FLAG 0x02
+#define HPRE_TRY_SEND_TIMES 100
+#define HPRE_INVLD_REQ_ID (-1)
+#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
+
+#define HPRE_SQE_ALG_BITS 5
+#define HPRE_SQE_DONE_SHIFT 30
+#define HPRE_DH_MAX_P_SZ 512
+
+typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
+
+struct hpre_rsa_ctx {
+ /* low address: e--->n */
+ char *pubkey;
+ dma_addr_t dma_pubkey;
+
+ /* low address: d--->n */
+ char *prikey;
+ dma_addr_t dma_prikey;
+
+ /* low address: dq->dp->q->p->qinv */
+ char *crt_prikey;
+ dma_addr_t dma_crt_prikey;
+
+ struct crypto_akcipher *soft_tfm;
+};
+
+struct hpre_dh_ctx {
+ /*
+ * If base is g we compute the public key
+ * ya = g^xa mod p; [RFC2631 sec 2.1.1]
+ * else if base if the counterpart public key we
+ * compute the shared secret
+ * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ */
+ char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ dma_addr_t dma_xa_p;
+
+ char *g; /* m */
+ dma_addr_t dma_g;
+};
+
+struct hpre_ctx {
+ struct hisi_qp *qp;
+ struct hpre_asym_request **req_list;
+ spinlock_t req_lock;
+ unsigned int key_sz;
+ bool crt_g2_mode;
+ struct idr req_idr;
+ union {
+ struct hpre_rsa_ctx rsa;
+ struct hpre_dh_ctx dh;
+ };
+};
+
+struct hpre_asym_request {
+ char *src;
+ char *dst;
+ struct hpre_sqe req;
+ struct hpre_ctx *ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
+ int err;
+ int req_id;
+ hpre_cb cb;
+};
+
+static DEFINE_MUTEX(hpre_alg_lock);
+static unsigned int hpre_active_devs;
+
+static int hpre_alloc_req_id(struct hpre_ctx *ctx)
+{
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&ctx->req_lock, flags);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ spin_unlock_irqrestore(&ctx->req_lock, flags);
+
+ return id;
+}
+
+static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->req_lock, flags);
+ idr_remove(&ctx->req_idr, req_id);
+ spin_unlock_irqrestore(&ctx->req_lock, flags);
+}
+
+static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
+{
+ struct hpre_ctx *ctx;
+ int id;
+
+ ctx = hpre_req->ctx;
+ id = hpre_alloc_req_id(ctx);
+ if (id < 0)
+ return -EINVAL;
+
+ ctx->req_list[id] = hpre_req;
+ hpre_req->req_id = id;
+
+ return id;
+}
+
+static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ int id = hpre_req->req_id;
+
+ if (hpre_req->req_id >= 0) {
+ hpre_req->req_id = HPRE_INVLD_REQ_ID;
+ ctx->req_list[id] = NULL;
+ hpre_free_req_id(ctx, id);
+ }
+}
+
+static struct hisi_qp *hpre_get_qp_and_start(void)
+{
+ struct hisi_qp *qp;
+ struct hpre *hpre;
+ int ret;
+
+ /* find the proper hpre device, which is near the current CPU core */
+ hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
+ if (!hpre) {
+ pr_err("Can not find proper hpre device!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ qp = hisi_qm_create_qp(&hpre->qm, 0);
+ if (IS_ERR(qp)) {
+ pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0) {
+ hisi_qm_release_qp(qp);
+ pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return qp;
+}
+
+static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ enum dma_data_direction dma_dir;
+
+ if (is_src) {
+ hpre_req->src = NULL;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ hpre_req->dst = NULL;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+ *tmp = dma_map_single(dev, sg_virt(data),
+ len, dma_dir);
+ if (dma_mapping_error(dev, *tmp)) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
+{
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ void *ptr;
+ int shift;
+
+ shift = ctx->key_sz - len;
+ if (shift < 0)
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ if (is_src) {
+ scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
+ hpre_req->src = ptr;
+ } else {
+ hpre_req->dst = ptr;
+ }
+
+ return 0;
+}
+
+static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ dma_addr_t tmp;
+ int ret;
+
+ /* when the data is dh's source, we should format it */
+ if ((sg_is_last(data) && len == ctx->key_sz) &&
+ ((is_dh && !is_src) || !is_dh))
+ ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
+ else
+ ret = hpre_prepare_dma_buf(hpre_req, data, len,
+ is_src, &tmp);
+ if (ret)
+ return ret;
+
+ if (is_src)
+ msg->in = cpu_to_le64(tmp);
+ else
+ msg->out = cpu_to_le64(tmp);
+
+ return 0;
+}
+
+static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst, struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t tmp;
+
+ tmp = le64_to_cpu(sqe->in);
+ if (!tmp)
+ return;
+
+ if (src) {
+ if (req->src)
+ dma_free_coherent(dev, ctx->key_sz,
+ req->src, tmp);
+ else
+ dma_unmap_single(dev, tmp,
+ ctx->key_sz, DMA_TO_DEVICE);
+ }
+
+ tmp = le64_to_cpu(sqe->out);
+ if (!tmp)
+ return;
+
+ if (req->dst) {
+ if (dst)
+ scatterwalk_map_and_copy(req->dst, dst, 0,
+ ctx->key_sz, 1);
+ dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
+ } else {
+ dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
+ }
+}
+
+static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
+ void **kreq)
+{
+ struct hpre_asym_request *req;
+ int err, id, done;
+
+#define HPRE_NO_HW_ERR 0
+#define HPRE_HW_TASK_DONE 3
+#define HREE_HW_ERR_MASK 0x7ff
+#define HREE_SQE_DONE_MASK 0x3
+ id = (int)le16_to_cpu(sqe->tag);
+ req = ctx->req_list[id];
+ hpre_rm_req_from_ctx(req);
+ *kreq = req;
+
+ err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
+ HREE_HW_ERR_MASK;
+
+ done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
+ HREE_SQE_DONE_MASK;
+
+ if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
+{
+ if (!ctx || !qp || qlen < 0)
+ return -EINVAL;
+
+ spin_lock_init(&ctx->req_lock);
+ ctx->qp = qp;
+
+ ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
+ if (!ctx->req_list)
+ return -ENOMEM;
+ ctx->key_sz = 0;
+ ctx->crt_g2_mode = false;
+ idr_init(&ctx->req_idr);
+
+ return 0;
+}
+
+static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ if (is_clear_all) {
+ idr_destroy(&ctx->req_idr);
+ kfree(ctx->req_list);
+ hisi_qm_release_qp(ctx->qp);
+ }
+
+ ctx->crt_g2_mode = false;
+ ctx->key_sz = 0;
+}
+
+static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_asym_request *req;
+ struct kpp_request *areq;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.dh;
+ areq->dst_len = ctx->key_sz;
+ hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+}
+
+static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_asym_request *req;
+ struct akcipher_request *areq;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.rsa;
+ areq->dst_len = ctx->key_sz;
+ hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ akcipher_request_complete(areq, ret);
+}
+
+static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
+{
+ struct hpre_ctx *ctx = qp->qp_ctx;
+ struct hpre_sqe *sqe = resp;
+
+ ctx->req_list[sqe->tag]->cb(ctx, resp);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx)
+{
+ struct hisi_qp *qp;
+
+ qp = hpre_get_qp_and_start();
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ qp->qp_ctx = ctx;
+ qp->req_cb = hpre_alg_cb;
+
+ return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+}
+
+static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (is_rsa) {
+ struct akcipher_request *akreq = req;
+
+ if (akreq->dst_len < ctx->key_sz) {
+ akreq->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ tmp = akcipher_request_ctx(akreq);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_rsa_cb;
+ h_req->areq.rsa = akreq;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ } else {
+ struct kpp_request *kreq = req;
+
+ if (kreq->dst_len < ctx->key_sz) {
+ kreq->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ tmp = kpp_request_ctx(kreq);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_dh_cb;
+ h_req->areq.dh = kreq;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
+ }
+
+ msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+
+ return 0;
+}
+
+#ifdef CONFIG_CRYPTO_DH
+static int hpre_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, false);
+ if (ret)
+ return ret;
+
+ if (req->src) {
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
+ if (ret)
+ goto clear_all;
+ }
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
+ if (ret)
+ goto clear_all;
+
+ if (ctx->crt_g2_mode && !req->src)
+ msg->dw0 |= HPRE_ALG_DH_G2;
+ else
+ msg->dw0 |= HPRE_ALG_DH;
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_is_dh_params_length_valid(unsigned int key_sz)
+{
+#define _HPRE_DH_GRP1 768
+#define _HPRE_DH_GRP2 1024
+#define _HPRE_DH_GRP5 1536
+#define _HPRE_DH_GRP14 2048
+#define _HPRE_DH_GRP15 3072
+#define _HPRE_DH_GRP16 4096
+ switch (key_sz) {
+ case _HPRE_DH_GRP1:
+ case _HPRE_DH_GRP2:
+ case _HPRE_DH_GRP5:
+ case _HPRE_DH_GRP14:
+ case _HPRE_DH_GRP15:
+ case _HPRE_DH_GRP16:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz;
+
+ if (params->p_size > HPRE_DH_MAX_P_SZ)
+ return -EINVAL;
+
+ if (hpre_is_dh_params_length_valid(params->p_size <<
+ HPRE_BITS_2_BYTES_SHIFT))
+ return -EINVAL;
+
+ sz = ctx->key_sz = params->p_size;
+ ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
+ if (!ctx->dh.xa_p)
+ return -ENOMEM;
+
+ memcpy(ctx->dh.xa_p + sz, params->p, sz);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
+ ctx->crt_g2_mode = true;
+ return 0;
+ }
+
+ ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
+ if (!ctx->dh.g) {
+ dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
+ ctx->dh.dma_xa_p);
+ ctx->dh.xa_p = NULL;
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
+
+ return 0;
+}
+
+static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (ctx->dh.g) {
+ memset(ctx->dh.g, 0, sz);
+ dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
+ ctx->dh.g = NULL;
+ }
+
+ if (ctx->dh.xa_p) {
+ memset(ctx->dh.xa_p, 0, sz);
+ dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
+ ctx->dh.dma_xa_p);
+ ctx->dh.xa_p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ hpre_dh_clear_ctx(ctx, false);
+
+ ret = hpre_dh_set_params(ctx, &params);
+ if (ret < 0)
+ goto err_clear_ctx;
+
+ memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+
+err_clear_ctx:
+ hpre_dh_clear_ctx(ctx, false);
+ return ret;
+}
+
+static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx);
+}
+
+static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_dh_clear_ctx(ctx, true);
+}
+#endif
+
+static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
+{
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static bool hpre_rsa_key_size_is_support(unsigned int len)
+{
+ unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
+
+#define _RSA_1024BITS_KEY_WDTH 1024
+#define _RSA_2048BITS_KEY_WDTH 2048
+#define _RSA_3072BITS_KEY_WDTH 3072
+#define _RSA_4096BITS_KEY_WDTH 4096
+
+ switch (bits) {
+ case _RSA_1024BITS_KEY_WDTH:
+ case _RSA_2048BITS_KEY_WDTH:
+ case _RSA_3072BITS_KEY_WDTH:
+ case _RSA_4096BITS_KEY_WDTH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int hpre_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ void *tmp = akcipher_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
+ ret = crypto_akcipher_encrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (!ctx->rsa.pubkey)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, true);
+ if (ret)
+ return ret;
+
+ msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
+
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ if (ret)
+ goto clear_all;
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ if (ret)
+ goto clear_all;
+
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ void *tmp = akcipher_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ctr = 0;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
+ ret = crypto_akcipher_decrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (!ctx->rsa.prikey)
+ return -EINVAL;
+
+ ret = hpre_msg_request_set(ctx, req, true);
+ if (ret)
+ return ret;
+
+ if (ctx->crt_g2_mode) {
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
+ msg->dw0 |= HPRE_ALG_NC_CRT;
+ } else {
+ msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
+ msg->dw0 |= HPRE_ALG_NC_NCRT;
+ }
+
+ ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
+ if (ret)
+ goto clear_all;
+
+ ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
+ if (ret)
+ goto clear_all;
+
+ do {
+ ret = hisi_qp_send(ctx->qp, msg);
+ } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
+
+ /* success */
+ if (!ret)
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+
+ return ret;
+}
+
+static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
+ size_t vlen, bool private)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ ctx->key_sz = vlen;
+
+ /* if invalid key size provided, we use software tfm */
+ if (!hpre_rsa_key_size_is_support(ctx->key_sz))
+ return 0;
+
+ ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ &ctx->rsa.dma_pubkey,
+ GFP_KERNEL);
+ if (!ctx->rsa.pubkey)
+ return -ENOMEM;
+
+ if (private) {
+ ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ &ctx->rsa.dma_prikey,
+ GFP_KERNEL);
+ if (!ctx->rsa.prikey) {
+ dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.pubkey,
+ ctx->rsa.dma_pubkey);
+ ctx->rsa.pubkey = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
+ }
+ memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
+
+ /* Using hardware HPRE to do RSA */
+ return 1;
+}
+
+static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->rsa.pubkey = NULL;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
+
+ return 0;
+}
+
+static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ const char *ptr = value;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &vlen);
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ return -EINVAL;
+
+ memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
+
+ return 0;
+}
+
+static int hpre_crt_para_get(char *para, const char *raw,
+ unsigned int raw_sz, unsigned int para_size)
+{
+ const char *ptr = raw;
+ size_t len = raw_sz;
+
+ hpre_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len || len > para_size)
+ return -EINVAL;
+
+ memcpy(para + para_size - len, ptr, len);
+
+ return 0;
+}
+
+static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
+{
+ unsigned int hlf_ksz = ctx->key_sz >> 1;
+ struct device *dev = HPRE_DEV(ctx);
+ u64 offset;
+ int ret;
+
+ ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
+ &ctx->rsa.dma_crt_prikey,
+ GFP_KERNEL);
+ if (!ctx->rsa.crt_prikey)
+ return -ENOMEM;
+
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
+ rsa_key->dq_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
+ rsa_key->dp_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_Q;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_P;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ offset = hlf_ksz * HPRE_CRT_INV;
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
+ rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ if (ret)
+ goto free_key;
+
+ ctx->crt_g2_mode = true;
+
+ return 0;
+
+free_key:
+ offset = hlf_ksz * HPRE_CRT_PRMS;
+ memset(ctx->rsa.crt_prikey, 0, offset);
+ dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
+ ctx->rsa.dma_crt_prikey);
+ ctx->rsa.crt_prikey = NULL;
+ ctx->crt_g2_mode = false;
+
+ return ret;
+}
+
+/* If it is clear all, all the resources of the QP will be cleaned. */
+static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
+{
+ unsigned int half_key_sz = ctx->key_sz >> 1;
+ struct device *dev = HPRE_DEV(ctx);
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (ctx->rsa.pubkey) {
+ dma_free_coherent(dev, ctx->key_sz << 1,
+ ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
+ ctx->rsa.pubkey = NULL;
+ }
+
+ if (ctx->rsa.crt_prikey) {
+ memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
+ ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
+ ctx->rsa.crt_prikey = NULL;
+ }
+
+ if (ctx->rsa.prikey) {
+ memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
+ ctx->rsa.dma_prikey);
+ ctx->rsa.prikey = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+/*
+ * we should judge if it is CRT or not,
+ * CRT: return true, N-CRT: return false .
+ */
+static bool hpre_is_crt_key(struct rsa_key *key)
+{
+ u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
+ key->qinv_sz;
+
+#define LEN_OF_NCRT_PARA 5
+
+ /* N-CRT less than 5 parameters */
+ return len > LEN_OF_NCRT_PARA;
+}
+
+static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct rsa_key rsa_key;
+ int ret;
+
+ hpre_rsa_clear_ctx(ctx, false);
+
+ if (private)
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ return ret;
+
+ ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
+ if (ret <= 0)
+ return ret;
+
+ if (private) {
+ ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+
+ if (hpre_is_crt_key(&rsa_key)) {
+ ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
+ if (ret < 0)
+ goto free;
+ }
+ }
+
+ ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+ if (ret < 0)
+ goto free;
+
+ if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+
+free:
+ hpre_rsa_clear_ctx(ctx, false);
+ return ret;
+}
+
+static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return hpre_rsa_setkey(ctx, key, keylen, false);
+}
+
+static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return hpre_rsa_setkey(ctx, key, keylen, true);
+}
+
+static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /* For 512 and 1536 bits key size, use soft tfm instead */
+ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
+ ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
+ return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
+ if (IS_ERR(ctx->rsa.soft_tfm)) {
+ pr_err("Can not alloc_akcipher!\n");
+ return PTR_ERR(ctx->rsa.soft_tfm);
+ }
+
+ return hpre_ctx_init(ctx);
+}
+
+static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ hpre_rsa_clear_ctx(ctx, true);
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+}
+
+static struct akcipher_alg rsa = {
+ .sign = hpre_rsa_dec,
+ .verify = hpre_rsa_enc,
+ .encrypt = hpre_rsa_enc,
+ .decrypt = hpre_rsa_dec,
+ .set_pub_key = hpre_rsa_setpubkey,
+ .set_priv_key = hpre_rsa_setprivkey,
+ .max_size = hpre_rsa_max_size,
+ .init = hpre_rsa_init_tfm,
+ .exit = hpre_rsa_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "rsa",
+ .cra_driver_name = "hpre-rsa",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+#ifdef CONFIG_CRYPTO_DH
+static struct kpp_alg dh = {
+ .set_secret = hpre_dh_set_secret,
+ .generate_public_key = hpre_dh_compute_value,
+ .compute_shared_secret = hpre_dh_compute_value,
+ .max_size = hpre_dh_max_size,
+ .init = hpre_dh_init_tfm,
+ .exit = hpre_dh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "dh",
+ .cra_driver_name = "hpre-dh",
+ .cra_module = THIS_MODULE,
+ },
+};
+#endif
+
+int hpre_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&hpre_alg_lock);
+ if (++hpre_active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+#ifdef CONFIG_CRYPTO_DH
+ ret = crypto_register_kpp(&dh);
+ if (ret) {
+ crypto_unregister_akcipher(&rsa);
+ goto unlock;
+ }
+#endif
+ }
+
+unlock:
+ mutex_unlock(&hpre_alg_lock);
+ return ret;
+}
+
+void hpre_algs_unregister(void)
+{
+ mutex_lock(&hpre_alg_lock);
+ if (--hpre_active_devs == 0) {
+ crypto_unregister_akcipher(&rsa);
+#ifdef CONFIG_CRYPTO_DH
+ crypto_unregister_kpp(&dh);
+#endif
+ }
+ mutex_unlock(&hpre_alg_lock);
+}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
new file mode 100644
index 000000000000..34e0424410bf
--- /dev/null
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
+#include <linux/acpi.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/topology.h>
+#include "hpre.h"
+
+#define HPRE_VF_NUM 63
+#define HPRE_QUEUE_NUM_V2 1024
+#define HPRE_QM_ABNML_INT_MASK 0x100004
+#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HPRE_COMM_CNT_CLR_CE 0x0
+#define HPRE_CTRL_CNT_CLR_CE 0x301000
+#define HPRE_FSM_MAX_CNT 0x301008
+#define HPRE_VFG_AXQOS 0x30100c
+#define HPRE_VFG_AXCACHE 0x301010
+#define HPRE_RDCHN_INI_CFG 0x301014
+#define HPRE_AWUSR_FP_CFG 0x301018
+#define HPRE_BD_ENDIAN 0x301020
+#define HPRE_ECC_BYPASS 0x301024
+#define HPRE_RAS_WIDTH_CFG 0x301028
+#define HPRE_POISON_BYPASS 0x30102c
+#define HPRE_BD_ARUSR_CFG 0x301030
+#define HPRE_BD_AWUSR_CFG 0x301034
+#define HPRE_TYPES_ENB 0x301038
+#define HPRE_DATA_RUSER_CFG 0x30103c
+#define HPRE_DATA_WUSER_CFG 0x301040
+#define HPRE_INT_MASK 0x301400
+#define HPRE_INT_STATUS 0x301800
+#define HPRE_CORE_INT_ENABLE 0
+#define HPRE_CORE_INT_DISABLE 0x003fffff
+#define HPRE_RAS_ECC_1BIT_TH 0x30140c
+#define HPRE_RDCHN_INI_ST 0x301a00
+#define HPRE_CLSTR_BASE 0x302000
+#define HPRE_CORE_EN_OFFSET 0x04
+#define HPRE_CORE_INI_CFG_OFFSET 0x20
+#define HPRE_CORE_INI_STATUS_OFFSET 0x80
+#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
+#define HPRE_CORE_IS_SCHD_OFFSET 0x90
+
+#define HPRE_RAS_CE_ENB 0x301410
+#define HPRE_HAC_RAS_CE_ENABLE 0x3f
+#define HPRE_RAS_NFE_ENB 0x301414
+#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0
+#define HPRE_RAS_FE_ENB 0x301418
+#define HPRE_HAC_RAS_FE_ENABLE 0
+
+#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
+#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
+#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
+#define HPRE_HAC_ECC1_CNT 0x301a04
+#define HPRE_HAC_ECC2_CNT 0x301a08
+#define HPRE_HAC_INT_STATUS 0x301800
+#define HPRE_HAC_SOURCE_INT 0x301600
+#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
+#define MASTER_TRANS_RETURN_RW 3
+#define HPRE_MASTER_TRANS_RETURN 0x300150
+#define HPRE_MASTER_GLOBAL_CTRL 0x300000
+#define HPRE_CLSTR_ADDR_INTRVL 0x1000
+#define HPRE_CLUSTER_INQURY 0x100
+#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
+#define HPRE_TIMEOUT_ABNML_BIT 6
+#define HPRE_PASID_EN_BIT 9
+#define HPRE_REG_RD_INTVRL_US 10
+#define HPRE_REG_RD_TMOUT_US 1000
+#define HPRE_DBGFS_VAL_MAX_LEN 20
+#define HPRE_PCI_DEVICE_ID 0xa258
+#define HPRE_PCI_VF_DEVICE_ID 0xa259
+#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_QM_USR_CFG_MASK 0xfffffffe
+#define HPRE_QM_AXI_CFG_MASK 0xffff
+#define HPRE_QM_VFG_AX_MASK 0xff
+#define HPRE_BD_USR_MASK 0x3
+#define HPRE_CLUSTER_CORE_MASK 0xf
+
+#define HPRE_VIA_MSI_DSM 1
+
+static LIST_HEAD(hpre_list);
+static DEFINE_MUTEX(hpre_list_lock);
+static const char hpre_name[] = "hisi_hpre";
+static struct dentry *hpre_debugfs_root;
+static const struct pci_device_id hpre_dev_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
+
+struct hpre_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const char * const hpre_debug_file_name[] = {
+ [HPRE_CURRENT_QM] = "current_qm",
+ [HPRE_CLEAR_ENABLE] = "rdclr_en",
+ [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
+};
+
+static const struct hpre_hw_error hpre_hw_errors[] = {
+ { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
+ { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
+ { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
+ { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
+ { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
+ { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
+ { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
+ { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
+ { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
+ { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
+ { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
+ { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { /* sentinel */ }
+};
+
+static const u64 hpre_cluster_offsets[] = {
+ [HPRE_CLUSTER0] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER1] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER2] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
+ [HPRE_CLUSTER3] =
+ HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
+};
+
+static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
+ {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
+ {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
+ {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
+ {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
+ {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
+};
+
+static struct debugfs_reg32 hpre_com_dfx_regs[] = {
+ {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
+ {"AXQOS ", HPRE_VFG_AXQOS},
+ {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
+ {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1},
+ {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1},
+ {"BD_ENDIAN ", HPRE_BD_ENDIAN},
+ {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
+ {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
+ {"POISON_BYPASS ", HPRE_POISON_BYPASS},
+ {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
+ {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
+ {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
+ {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
+ {"INT_STATUS ", HPRE_INT_STATUS},
+};
+
+static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
+ if (!pdev) {
+ q_num = HPRE_QUEUE_NUM_V2;
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ rev_id = pdev->revision;
+ if (rev_id != QM_HW_V2)
+ return -EINVAL;
+
+ q_num = HPRE_QUEUE_NUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || n == 0 || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops hpre_pf_q_num_ops = {
+ .set = hpre_pf_q_num_set,
+ .get = param_get_int,
+};
+
+static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
+module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
+MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
+
+static inline void hpre_add_to_list(struct hpre *hpre)
+{
+ mutex_lock(&hpre_list_lock);
+ list_add_tail(&hpre->list, &hpre_list);
+ mutex_unlock(&hpre_list_lock);
+}
+
+static inline void hpre_remove_from_list(struct hpre *hpre)
+{
+ mutex_lock(&hpre_list_lock);
+ list_del(&hpre->list);
+ mutex_unlock(&hpre_list_lock);
+}
+
+struct hpre *hpre_find_device(int node)
+{
+ struct hpre *hpre, *ret = NULL;
+ int min_distance = INT_MAX;
+ struct device *dev;
+ int dev_node = 0;
+
+ mutex_lock(&hpre_list_lock);
+ list_for_each_entry(hpre, &hpre_list, list) {
+ dev = &hpre->qm.pdev->dev;
+#ifdef CONFIG_NUMA
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+#endif
+ if (node_distance(dev_node, node) < min_distance) {
+ ret = hpre;
+ min_distance = node_distance(dev_node, node);
+ }
+ }
+ mutex_unlock(&hpre_list_lock);
+
+ return ret;
+}
+
+static int hpre_cfg_by_dsm(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ union acpi_object *obj;
+ guid_t guid;
+
+ if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
+ dev_err(dev, "Hpre GUID failed\n");
+ return -EINVAL;
+ }
+
+ /* Switch over to MSI handling due to non-standard PCI implementation */
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
+ 0, HPRE_VIA_MSI_DSM, NULL);
+ if (!obj) {
+ dev_err(dev, "ACPI handle failed!\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
+ int ret, i;
+ u32 val;
+
+ writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
+ writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
+ writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
+
+ /* disable FLR triggered by BME(bus master enable) */
+ writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+
+ /* HPRE need more time, we close this interrupt */
+ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
+ writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+
+ writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
+ writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
+ writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
+ writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
+ writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
+ writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
+ writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
+
+ writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
+ writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
+ writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
+ val & BIT(0),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev, "read rd channel timeout fail!\n");
+ return -ETIMEDOUT;
+ }
+
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+ /* clusters initiating */
+ writel(HPRE_CLUSTER_CORE_MASK,
+ HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
+ writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
+ HPRE_CORE_INI_STATUS), val,
+ ((val & HPRE_CLUSTER_CORE_MASK) ==
+ HPRE_CLUSTER_CORE_MASK),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev,
+ "cluster %d int st status timeout!\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ ret = hpre_cfg_by_dsm(qm);
+ if (ret)
+ dev_err(dev, "acpi_evaluate_dsm err.\n");
+
+ return ret;
+}
+
+static void hpre_cnt_regs_clear(struct hisi_qm *qm)
+{
+ unsigned long offset;
+ int i;
+
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear clusterX/cluster_ctrl */
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
+ writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+ }
+
+ /* clear rdclr_en */
+ writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
+
+ hisi_qm_debug_regs_clear(qm);
+}
+
+static void hpre_hw_error_disable(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ /* disable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+}
+
+static void hpre_hw_error_enable(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ /* enable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+}
+
+static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
+{
+ struct hpre *hpre = container_of(file->debug, struct hpre, debug);
+
+ return &hpre->qm;
+}
+
+static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ struct hpre_debug *debug = file->debug;
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ u32 num_vfs = hpre->num_vfs;
+ u32 vfq_num, tmp;
+
+
+ if (val > num_vfs)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (val == 0) {
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ } else {
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (val == num_vfs) {
+ qm->debug.curr_qm_qp_num =
+ qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
+ } else {
+ qm->debug.curr_qm_qp_num = vfq_num;
+ }
+ }
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+
+ return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
+ HPRE_CTRL_CNT_CLR_CE_BIT;
+}
+
+static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ u32 tmp;
+
+ if (val != 1 && val != 0)
+ return -EINVAL;
+
+ tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
+ ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
+ writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
+
+ return 0;
+}
+
+static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ int cluster_index = file->index - HPRE_CLUSTER_CTRL;
+ unsigned long offset = HPRE_CLSTR_BASE +
+ cluster_index * HPRE_CLSTR_ADDR_INTRVL;
+
+ return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
+}
+
+static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = hpre_file_to_qm(file);
+ int cluster_index = file->index - HPRE_CLUSTER_CTRL;
+ unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
+ HPRE_CLSTR_ADDR_INTRVL;
+
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+
+ return 0;
+}
+
+static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hpre_debugfs_file *file = filp->private_data;
+ char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
+ u32 val;
+ int ret;
+
+ spin_lock_irq(&file->lock);
+ switch (file->type) {
+ case HPRE_CURRENT_QM:
+ val = hpre_current_qm_read(file);
+ break;
+ case HPRE_CLEAR_ENABLE:
+ val = hpre_clear_enable_read(file);
+ break;
+ case HPRE_CLUSTER_CTRL:
+ val = hpre_cluster_inqry_read(file);
+ break;
+ default:
+ spin_unlock_irq(&file->lock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&file->lock);
+ ret = sprintf(tbuf, "%u\n", val);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hpre_debugfs_file *file = filp->private_data;
+ char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
+ unsigned long val;
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= HPRE_DBGFS_VAL_MAX_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
+ pos, buf, count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ spin_lock_irq(&file->lock);
+ switch (file->type) {
+ case HPRE_CURRENT_QM:
+ ret = hpre_current_qm_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case HPRE_CLEAR_ENABLE:
+ ret = hpre_clear_enable_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case HPRE_CLUSTER_CTRL:
+ ret = hpre_cluster_inqry_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_input;
+ }
+ spin_unlock_irq(&file->lock);
+
+ return count;
+
+err_input:
+ spin_unlock_irq(&file->lock);
+ return ret;
+}
+
+static const struct file_operations hpre_ctrl_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hpre_ctrl_debug_read,
+ .write = hpre_ctrl_debug_write,
+};
+
+static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
+ enum hpre_ctrl_dbgfs_file type, int indx)
+{
+ struct dentry *tmp, *file_dir;
+
+ if (dir)
+ file_dir = dir;
+ else
+ file_dir = dbg->debug_root;
+
+ if (type >= HPRE_DEBUG_FILE_NUM)
+ return -EINVAL;
+
+ spin_lock_init(&dbg->files[indx].lock);
+ dbg->files[indx].debug = dbg;
+ dbg->files[indx].type = type;
+ dbg->files[indx].index = indx;
+ tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
+ if (!tmp)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = hpre_com_dfx_regs;
+ regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
+ regset->base = qm->io_base;
+
+ tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ if (!tmp)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ char buf[HPRE_DBGFS_VAL_MAX_LEN];
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d, *tmp;
+ int i, ret;
+
+ for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ sprintf(buf, "cluster%d", i);
+
+ tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ if (!tmp_d)
+ return -ENOENT;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = hpre_cluster_dfx_regs;
+ regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ regset->base = qm->io_base + hpre_cluster_offsets[i];
+
+ tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ if (!tmp)
+ return -ENOENT;
+ ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
+ i + HPRE_CLUSTER_CTRL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hpre_ctrl_debug_init(struct hpre_debug *debug)
+{
+ int ret;
+
+ ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM,
+ HPRE_CURRENT_QM);
+ if (ret)
+ return ret;
+
+ ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE,
+ HPRE_CLEAR_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = hpre_pf_comm_regs_debugfs_init(debug);
+ if (ret)
+ return ret;
+
+ return hpre_cluster_debugfs_init(debug);
+}
+
+static int hpre_debugfs_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct dentry *dir;
+ int ret;
+
+ dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
+ if (!dir)
+ return -ENOENT;
+
+ qm->debug.debug_root = dir;
+
+ ret = hisi_qm_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
+ if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
+ hpre->debug.debug_root = dir;
+ ret = hpre_ctrl_debug_init(&hpre->debug);
+ if (ret)
+ goto failed_to_create;
+ }
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(qm->debug.debug_root);
+ return ret;
+}
+
+static void hpre_debugfs_exit(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+
+ debugfs_remove_recursive(qm->debug.debug_root);
+}
+
+static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ enum qm_hw_ver rev_id;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id < 0)
+ return -ENODEV;
+
+ if (rev_id == QM_HW_V1) {
+ pci_warn(pdev, "HPRE version 1 is not supported!\n");
+ return -EINVAL;
+ }
+
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+ qm->sqe_size = HPRE_SQE_SIZE;
+ qm->dev_name = hpre_name;
+ qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
+ QM_HW_PF : QM_HW_VF;
+ if (pdev->is_physfn) {
+ qm->qp_base = HPRE_PF_DEF_Q_BASE;
+ qm->qp_num = hpre_pf_q_num;
+ }
+ qm->use_dma_api = true;
+
+ return 0;
+}
+
+static void hpre_hw_err_init(struct hpre *hpre)
+{
+ hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
+ 0, QM_DB_RANDOM_INVALID);
+ hpre_hw_error_enable(hpre);
+}
+
+static int hpre_pf_probe_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
+
+ ret = hpre_set_user_domain_and_cache(hpre);
+ if (ret)
+ return ret;
+
+ hpre_hw_err_init(hpre);
+
+ return 0;
+}
+
+static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_qm *qm;
+ struct hpre *hpre;
+ int ret;
+
+ hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
+ if (!hpre)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, hpre);
+
+ qm = &hpre->qm;
+ ret = hpre_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
+
+ ret = hisi_qm_init(qm);
+ if (ret)
+ return ret;
+
+ if (pdev->is_physfn) {
+ ret = hpre_pf_probe_init(hpre);
+ if (ret)
+ goto err_with_qm_init;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
+ /* v2 starts to support get vft by mailbox */
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_with_qm_init;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ goto err_with_err_init;
+
+ ret = hpre_debugfs_init(hpre);
+ if (ret)
+ dev_warn(&pdev->dev, "init debugfs fail!\n");
+
+ hpre_add_to_list(hpre);
+
+ ret = hpre_algs_register();
+ if (ret < 0) {
+ hpre_remove_from_list(hpre);
+ pci_err(pdev, "fail to register algs to crypto!\n");
+ goto err_with_qm_start;
+ }
+ return 0;
+
+err_with_qm_start:
+ hisi_qm_stop(qm);
+
+err_with_err_init:
+ if (pdev->is_physfn)
+ hpre_hw_error_disable(hpre);
+
+err_with_qm_init:
+ hisi_qm_uninit(qm);
+
+ return ret;
+}
+
+static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ u32 qp_num = qm->qp_num;
+ int q_num, remain_q_num, i;
+ u32 q_base = qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qp_num;
+
+ /* If remaining queues are not enough, return error. */
+ if (remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
+ if (ret)
+ return ret;
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int hpre_clear_vft_config(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ u32 num_vfs = hpre->num_vfs;
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= num_vfs; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ hpre->num_vfs = 0;
+
+ return 0;
+}
+
+static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, ret;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev,
+ "Can't enable VF. Please disable pre-enabled VFs!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
+ ret = hpre_vf_q_assign(hpre, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ hpre->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ hpre_clear_vft_config(hpre);
+ return ret;
+ }
+
+ return num_vfs;
+}
+
+static int hpre_sriov_disable(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+
+ return hpre_clear_vft_config(hpre);
+}
+
+static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs)
+ return hpre_sriov_enable(pdev, num_vfs);
+ else
+ return hpre_sriov_disable(pdev);
+}
+
+static void hpre_remove(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ hpre_algs_unregister();
+ hpre_remove_from_list(hpre);
+ if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
+ ret = hpre_sriov_disable(pdev);
+ if (ret) {
+ pci_err(pdev, "Disable SRIOV fail!\n");
+ return;
+ }
+ }
+ if (qm->fun_type == QM_HW_PF) {
+ hpre_cnt_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ }
+
+ hpre_debugfs_exit(hpre);
+ hisi_qm_stop(qm);
+ if (qm->fun_type == QM_HW_PF)
+ hpre_hw_error_disable(hpre);
+ hisi_qm_uninit(qm);
+}
+
+static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
+{
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &hpre->qm.pdev->dev;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
+}
+
+static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
+ if (err_sts) {
+ hpre_log_hw_error(hpre, err_sts);
+
+ /* clear error interrupts */
+ writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
+{
+ struct hpre *hpre = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, hpre_ret;
+
+ /* log qm error */
+ qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
+
+ /* log hpre error */
+ hpre_ret = hpre_hw_error_handle(hpre);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return hpre_process_hw_error(pdev);
+}
+
+static const struct pci_error_handlers hpre_err_handler = {
+ .error_detected = hpre_error_detected,
+};
+
+static struct pci_driver hpre_pci_driver = {
+ .name = hpre_name,
+ .id_table = hpre_dev_ids,
+ .probe = hpre_probe,
+ .remove = hpre_remove,
+ .sriov_configure = hpre_sriov_configure,
+ .err_handler = &hpre_err_handler,
+};
+
+static void hpre_register_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return;
+
+ hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
+ if (IS_ERR_OR_NULL(hpre_debugfs_root))
+ hpre_debugfs_root = NULL;
+}
+
+static void hpre_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hpre_debugfs_root);
+}
+
+static int __init hpre_init(void)
+{
+ int ret;
+
+ hpre_register_debugfs();
+
+ ret = pci_register_driver(&hpre_pci_driver);
+ if (ret) {
+ hpre_unregister_debugfs();
+ pr_err("hpre: can't register hisi hpre driver.\n");
+ }
+
+ return ret;
+}
+
+static void __exit hpre_exit(void)
+{
+ pci_unregister_driver(&hpre_pci_driver);
+ hpre_unregister_debugfs();
+}
+
+module_init(hpre_init);
+module_exit(hpre_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f975c393a603..b57da5ef8b5b 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -59,17 +59,17 @@
#define QM_CQ_PHASE_SHIFT 0
#define QM_CQ_FLAG_SHIFT 1
-#define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1)
+#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1)
+#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1)
+#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_DOORBELL_CMD_SQ 0
@@ -169,17 +169,17 @@
#define QM_MK_SQC_DW3_V2(sqe_sz) \
((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define INIT_QC_COMMON(qc, base, pasid) do { \
- (qc)->head = 0; \
- (qc)->tail = 0; \
- (qc)->base_l = lower_32_bits(base); \
- (qc)->base_h = upper_32_bits(base); \
- (qc)->dw3 = 0; \
- (qc)->w8 = 0; \
- (qc)->rsvd0 = 0; \
- (qc)->pasid = pasid; \
- (qc)->w11 = 0; \
- (qc)->rsvd1 = 0; \
+#define INIT_QC_COMMON(qc, base, pasid) do { \
+ (qc)->head = 0; \
+ (qc)->tail = 0; \
+ (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
+ (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
+ (qc)->dw3 = 0; \
+ (qc)->w8 = 0; \
+ (qc)->rsvd0 = 0; \
+ (qc)->pasid = cpu_to_le16(pasid); \
+ (qc)->w11 = 0; \
+ (qc)->rsvd1 = 0; \
} while (0)
enum vft_type {
@@ -331,12 +331,18 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
unsigned long tmp0 = 0, tmp1 = 0;
+ if (!IS_ENABLED(CONFIG_ARM64)) {
+ memcpy_toio(fun_base, src, 16);
+ wmb();
+ return;
+ }
+
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
"dsb sy\n"
: "=&r" (tmp0),
"=&r" (tmp1),
- "+Q" (*((char *)fun_base))
+ "+Q" (*((char __iomem *)fun_base))
: "Q" (*((char *)src))
: "memory");
}
@@ -350,12 +356,12 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
queue, cmd, (unsigned long long)dma_addr);
- mailbox.w0 = cmd |
+ mailbox.w0 = cpu_to_le16(cmd |
(op ? 0x1 << QM_MB_OP_SHIFT : 0) |
- (0x1 << QM_MB_BUSY_SHIFT);
- mailbox.queue_num = queue;
- mailbox.base_l = lower_32_bits(dma_addr);
- mailbox.base_h = upper_32_bits(dma_addr);
+ (0x1 << QM_MB_BUSY_SHIFT));
+ mailbox.queue_num = cpu_to_le16(queue);
+ mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
+ mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
mailbox.rsvd = 0;
mutex_lock(&qm->mailbox_lock);
@@ -442,7 +448,7 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
- u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK;
+ u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
return qm->qp_array[cqn];
}
@@ -464,7 +470,8 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
if (qp->req_cb) {
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
dma_rmb();
- qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head);
+ qp->req_cb(qp, qp->sqe + qm->sqe_size *
+ le16_to_cpu(cqe->sq_head));
qm_cq_head_update(qp);
cqe = qp->cqe + qp->qp_status.cq_head;
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
@@ -542,7 +549,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_NONE;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT;
+ type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
if (type < ARRAY_SIZE(qm_fifo_overflow))
dev_err(&qm->pdev->dev, "%s overflow\n",
qm_fifo_overflow[type]);
@@ -646,7 +653,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
- qp_status->cqc_phase = 1;
+ qp_status->cqc_phase = true;
qp_status->flags = 0;
}
@@ -963,13 +970,11 @@ static const struct file_operations qm_regs_fops = {
static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
{
- struct dentry *qm_d = qm->debug.qm_d, *tmp;
+ struct dentry *qm_d = qm->debug.qm_d;
struct debugfs_file *file = qm->debug.files + index;
- tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
- &qm_debug_fops);
- if (IS_ERR(tmp))
- return -ENOENT;
+ debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+ &qm_debug_fops);
file->index = index;
mutex_init(&file->lock);
@@ -981,9 +986,6 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi)
{
- dev_info(&qm->pdev->dev,
- "QM v%d does not support hw error handle\n", qm->ver);
-
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
@@ -1123,6 +1125,7 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
}
set_bit(qp_id, qm->qp_bitmap);
qm->qp_array[qp_id] = qp;
+ qm->qp_in_used++;
write_unlock(&qm->qps_lock);
@@ -1187,6 +1190,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
write_lock(&qm->qps_lock);
qm->qp_array[qp->qp_id] = NULL;
clear_bit(qp->qp_id, qm->qp_bitmap);
+ qm->qp_in_used--;
write_unlock(&qm->qps_lock);
kfree(qp);
@@ -1218,14 +1222,14 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
- sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size);
- sqc->w8 = QM_Q_DEPTH - 1;
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
+ sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
} else if (ver == QM_HW_V2) {
- sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size);
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
sqc->w8 = 0; /* rand_qc */
}
- sqc->cq_num = qp_id;
- sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type);
+ sqc->cq_num = cpu_to_le16(qp_id);
+ sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
@@ -1245,13 +1249,13 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
- cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4);
- cqc->w8 = QM_Q_DEPTH - 1;
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
+ cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
} else if (ver == QM_HW_V2) {
- cqc->dw3 = QM_MK_CQC_DW3_V2(4);
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
cqc->w8 = 0;
}
- cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT;
+ cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
@@ -1392,6 +1396,24 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
}
/**
+ * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
+ * @qm: The qm which want to get free qp.
+ *
+ * This function return free number of qp in qm.
+ */
+int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
+{
+ int ret;
+
+ read_lock(&qm->qps_lock);
+ ret = qm->qp_num - qm->qp_in_used;
+ read_unlock(&qm->qps_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
*
@@ -1454,6 +1476,7 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
goto err_free_irq_vectors;
+ qm->qp_in_used = 0;
mutex_init(&qm->mailbox_lock);
rwlock_init(&qm->qps_lock);
@@ -1560,8 +1583,8 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm)
status->eq_head = 0;
status->aeq_head = 0;
- status->eqc_phase = 1;
- status->aeqc_phase = 1;
+ status->eqc_phase = true;
+ status->aeqc_phase = true;
}
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
@@ -1585,11 +1608,11 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- eqc->base_l = lower_32_bits(qm->eqe_dma);
- eqc->base_h = upper_32_bits(qm->eqe_dma);
+ eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
+ eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
- eqc->dw3 = QM_EQE_AEQE_SIZE;
- eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
+ eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
+ eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -1606,9 +1629,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- aeqc->base_l = lower_32_bits(qm->aeqe_dma);
- aeqc->base_h = upper_32_bits(qm->aeqe_dma);
- aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
+ aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
@@ -1780,12 +1803,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop);
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
- struct dentry *qm_d, *qm_regs;
+ struct dentry *qm_d;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
- if (IS_ERR(qm_d))
- return -ENOENT;
qm->debug.qm_d = qm_d;
/* only show this in PF */
@@ -1796,12 +1817,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
- &qm_regs_fops);
- if (IS_ERR(qm_regs)) {
- ret = -ENOENT;
- goto failed_to_create;
- }
+ debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
return 0;
@@ -1862,8 +1878,7 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi)
{
if (!qm->ops->hw_error_init) {
- dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n",
- qm->ver);
+ dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
@@ -1877,11 +1892,10 @@ EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
*
* Accelerators use this function to handle qm non-fatal hardware errors.
*/
-int hisi_qm_hw_error_handle(struct hisi_qm *qm)
+pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
- dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n",
- qm->ver);
+ dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
return PCI_ERS_RESULT_NONE;
}
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 70e672ae86bf..078b8f1f1b77 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -75,6 +75,8 @@
#define QM_Q_DEPTH 1024
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+
enum qp_state {
QP_STOP,
};
@@ -132,6 +134,7 @@ struct hisi_qm {
u32 sqe_size;
u32 qp_base;
u32 qp_num;
+ u32 qp_in_used;
u32 ctrl_qp_num;
struct qm_dma qdma;
@@ -204,12 +207,24 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp);
void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi);
-int hisi_qm_hw_error_handle(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/Makefile b/drivers/crypto/hisilicon/sec2/Makefile
new file mode 100644
index 000000000000..b4f6cf14be3a
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += hisi_sec2.o
+hisi_sec2-objs = sec_main.o sec_crypto.o
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
new file mode 100644
index 000000000000..26754d0570ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
+
+#include <linux/list.h>
+
+#include "../qm.h"
+#include "sec_crypto.h"
+
+/* Cipher resource per hardware SEC queue */
+struct sec_cipher_res {
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ /* Cipher supported only at present */
+ struct sec_cipher_req c_req;
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ int fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @get_res: Get resources for TFM on the SEC device
+ * @resource_alloc: Allocate resources for queue context on the SEC device
+ * @resource_free: Free resources for queue context on the SEC device
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
+ void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
+
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req **req_list;
+ struct idr req_idr;
+ void *alg_meta_data;
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
+ struct sec_cipher_ctx c_ctx;
+};
+
+enum sec_endian {
+ SEC_LE = 0,
+ SEC_32BE,
+ SEC_64BE
+};
+
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
+
+struct sec_dfx {
+ u64 send_cnt;
+ u64 recv_cnt;
+};
+
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
+ struct hisi_qm qm;
+ struct list_head list;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ u32 num_vfs;
+ unsigned long status;
+};
+
+struct sec_dev *sec_find_device(int node);
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
new file mode 100644
index 000000000000..dc1eb97d57f7
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+
+#include "sec.h"
+#include "sec_crypto.h"
+
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+
+static DEFINE_MUTEX(sec_algs_lock);
+static unsigned int sec_active_devs;
+
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
+ ctx->hlf_q_num;
+
+ return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
+
+static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
+
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ int req_id;
+
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (req_id < 0) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
+ }
+
+ req->qp_ctx = qp_ctx;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
+}
+
+static void sec_free_req_id(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ return;
+ }
+
+ qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
+
+ mutex_lock(&qp_ctx->req_lock);
+ idr_remove(&qp_ctx->req_idr, req_id);
+ mutex_unlock(&qp_ctx->req_lock);
+}
+
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
+{
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ u16 done, flag;
+ u8 type;
+ struct sec_req *req;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (type == SEC_BD_TYPE2) {
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (req->err_type || done != 0x1 || flag != 0x2)
+ dev_err(SEC_CTX_DEV(req->ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ } else {
+ pr_err("err bd type [%d]\n", type);
+ return;
+ }
+
+ __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+
+ req->ctx->req_op->buf_unmap(req->ctx, req);
+
+ req->ctx->req_op->callback(req->ctx, req);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ mutex_unlock(&qp_ctx->req_lock);
+ __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+
+ if (ret == -EBUSY)
+ return -ENOBUFS;
+
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
+
+ return ret;
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
+ qp = hisi_qm_create_qp(qm, alg_type);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp->req_cb = sec_req_cb;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ mutex_init(&qp_ctx->req_lock);
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
+
+ qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
+ if (!qp_ctx->req_list)
+ goto err_destroy_idr;
+
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
+ SEC_SGL_SGE_NR);
+ if (!qp_ctx->c_in_pool) {
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_free_req_list;
+ }
+
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
+ SEC_SGL_SGE_NR);
+ if (!qp_ctx->c_out_pool) {
+ dev_err(dev, "fail to create sgl pool for output!\n");
+ goto err_free_c_in_pool;
+ }
+
+ ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ if (ret)
+ goto err_free_c_out_pool;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_queue_free;
+
+ return 0;
+
+err_queue_free:
+ ctx->req_op->resource_free(ctx, qp_ctx);
+err_free_c_out_pool:
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
+err_free_c_in_pool:
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ hisi_qm_release_qp(qp);
+
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ hisi_qm_stop_qp(qp_ctx->qp);
+ ctx->req_op->resource_free(ctx, qp_ctx);
+
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
+ hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+
+ idr_destroy(&qp_ctx->req_idr);
+ kfree(qp_ctx->req_list);
+ hisi_qm_release_qp(qp_ctx->qp);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx;
+ struct sec_dev *sec;
+ struct device *dev;
+ struct hisi_qm *qm;
+ int i, ret;
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+
+ sec = sec_find_device(cpu_to_node(smp_processor_id()));
+ if (!sec) {
+ pr_err("find no Hisilicon SEC device!\n");
+ return -ENODEV;
+ }
+ ctx->sec = sec;
+ qm = &sec->qm;
+ dev = &qm->pdev->dev;
+ ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ if (ret)
+ goto err_sec_release_qp_ctx;
+ }
+
+ c_ctx = &ctx->c_ctx;
+ c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
+ if (c_ctx->ivsize > SEC_IV_SIZE) {
+ dev_err(dev, "get error iv size!\n");
+ ret = -EINVAL;
+ goto err_sec_release_qp_ctx;
+ }
+ c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key) {
+ ret = -ENOMEM;
+ goto err_sec_release_qp_ctx;
+ }
+
+ return 0;
+
+err_sec_release_qp_ctx:
+ for (i = i - 1; i >= 0; i--)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+
+ kfree(ctx->qp_ctx);
+ return ret;
+}
+
+static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int i = 0;
+
+ if (c_ctx->c_key) {
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+ c_ctx->c_key = NULL;
+ }
+
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+
+ kfree(ctx->qp_ctx);
+}
+
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
+{
+ switch (keylen) {
+ case SEC_DES3_2KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
+ break;
+ case SEC_DES3_3KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
+{
+ if (c_mode == SEC_CMODE_XTS) {
+ switch (keylen) {
+ case SEC_XTS_MIN_KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case SEC_XTS_MAX_KEY_SIZE:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: xts mode key error!\n");
+ return -EINVAL;
+ }
+ } else {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aes key error!\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int ret;
+
+ if (c_mode == SEC_CMODE_XTS) {
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ return ret;
+ }
+ }
+
+ c_ctx->c_alg = c_alg;
+ c_ctx->c_mode = c_mode;
+
+ switch (c_alg) {
+ case SEC_CALG_3DES:
+ ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+ break;
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
+ ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ return ret;
+ }
+
+ memcpy(c_ctx->c_key, key, keylen);
+
+ return 0;
+}
+
+#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
+static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
+ u32 keylen) \
+{ \
+ return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
+}
+
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
+
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
+
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
+
+static int sec_skcipher_get_res(struct sec_ctx *ctx,
+ struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
+ struct sec_cipher_req *c_req = &req->c_req;
+ int req_id = req->req_id;
+
+ c_req->c_ivin = c_res[req_id].c_ivin;
+ c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
+
+ return 0;
+}
+
+static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_res *res;
+ int i;
+
+ res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin) {
+ kfree(res);
+ return -ENOMEM;
+ }
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+ qp_ctx->alg_meta_data = res;
+
+ return 0;
+}
+
+static void sec_skcipher_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_cipher_res *res = qp_ctx->alg_meta_data;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (!res)
+ return;
+
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
+ kfree(res);
+}
+
+static int sec_skcipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
+
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ return PTR_ERR(c_req->c_in);
+ }
+
+ if (dst == src) {
+ c_req->c_out = c_req->c_in;
+ c_req->c_out_dma = c_req->c_in_dma;
+ } else {
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
+ if (IS_ERR(c_req->c_out)) {
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
+ }
+ }
+
+ return 0;
+}
+
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_cipher_req *c_req = &req->c_req;
+
+ return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
+ c_req->sk_req->src, c_req->sk_req->dst);
+}
+
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sk_req = c_req->sk_req;
+
+ if (sk_req->dst != sk_req->src)
+ hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+}
+
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ int ret;
+
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (ret)
+ return ret;
+
+ ctx->req_op->do_transfer(ctx, req);
+
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (ret)
+ goto unmap_req_buf;
+
+ return ret;
+
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
+
+ return ret;
+}
+
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
+}
+
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+
+ c_req->c_len = sk_req->cryptlen;
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+}
+
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 de = 0;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
+ else
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
+
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
+
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
+
+ /* Just set DST address type */
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
+
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
+
+ return 0;
+}
+
+static void sec_update_iv(struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ size_t sz;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
+ iv_size, sk_req->cryptlen - iv_size);
+ if (sz != iv_size)
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+}
+
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+
+ /* IV output at encrypto of CBC mode */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req);
+
+ if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+
+ sk_req->base.complete(&sk_req->base, req->err_type);
+}
+
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_put_queue_id(ctx, req);
+}
+
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int issue_id, ret;
+
+ /* To load balance */
+ issue_id = sec_get_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[issue_id];
+
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (req->req_id < 0) {
+ sec_put_queue_id(ctx, req);
+ return req->req_id;
+ }
+
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = 1;
+ else
+ req->fake_busy = 0;
+
+ ret = ctx->req_op->get_res(ctx, req);
+ if (ret) {
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_request_uninit(ctx, req);
+ dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
+ }
+
+ return ret;
+}
+
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_request_init(ctx, req);
+ if (ret)
+ return ret;
+
+ ret = sec_request_transfer(ctx, req);
+ if (ret)
+ goto err_uninit_req;
+
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req);
+
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (ret != -EBUSY && ret != -EINPROGRESS) {
+ dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ goto err_send_req;
+ }
+
+ return ret;
+
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
+ ctx->c_ctx.ivsize);
+
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
+
+ return ret;
+}
+
+static struct sec_req_op sec_req_ops_tbl = {
+ .get_res = sec_skcipher_get_res,
+ .resource_alloc = sec_skcipher_resource_alloc,
+ .resource_free = sec_skcipher_resource_free,
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->req_op = &sec_req_ops_tbl;
+
+ return sec_skcipher_init(tfm);
+}
+
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_exit(tfm);
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx,
+ struct skcipher_request *sk_req)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (!sk_req->src || !sk_req->dst) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
+ }
+
+ if (c_alg == SEC_CALG_3DES) {
+ if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
+}
+
+static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ if (!sk_req->cryptlen)
+ return 0;
+
+ ret = sec_skcipher_param_check(ctx, sk_req);
+ if (ret)
+ return ret;
+
+ req->c_req.sk_req = sk_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
+{
+ return sec_skcipher_crypto(sk_req, true);
+}
+
+static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
+{
+ return sec_skcipher_crypto(sk_req, false);
+}
+
+#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_skcipher_decrypt,\
+ .encrypt = sec_skcipher_encrypt,\
+ .min_keysize = sec_min_key_size,\
+ .max_keysize = sec_max_key_size,\
+ .ivsize = iv_size,\
+},
+
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
+ max_key_size, blk_size, iv_size) \
+ SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
+
+static struct skcipher_alg sec_algs[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+};
+
+int sec_register_to_crypto(void)
+{
+ int ret = 0;
+
+ /* To avoid repeat register */
+ mutex_lock(&sec_algs_lock);
+ if (++sec_active_devs == 1)
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ mutex_unlock(&sec_algs_lock);
+
+ return ret;
+}
+
+void sec_unregister_from_crypto(void)
+{
+ mutex_lock(&sec_algs_lock);
+ if (--sec_active_devs == 0)
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ mutex_unlock(&sec_algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
new file mode 100644
index 000000000000..097dce828340
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
+
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_COMM_SCENE 0
+
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+struct sec_sqe_type2 {
+
+ /*
+ * mac_len: 0~5 bits
+ * a_key_len: 6~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ /* Just using type2 BD now */
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
+#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
new file mode 100644
index 000000000000..74f0654028c9
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+
+#include <linux/acpi.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/topology.h>
+
+#include "sec.h"
+
+#define SEC_VF_NUM 63
+#define SEC_QUEUE_NUM_V1 4096
+#define SEC_QUEUE_NUM_V2 1024
+#define SEC_PF_PCI_DEVICE_ID 0xa255
+#define SEC_VF_PCI_DEVICE_ID 0xa256
+
+#define SEC_XTS_MIV_ENABLE_REG 0x301384
+#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
+#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0xfffff7fd
+#define SEC_BD_ERR_CHK_EN2 0xffffbfff
+
+#define SEC_SQE_SIZE 128
+#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
+#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_BASE 0
+#define SEC_CTX_Q_NUM_DEF 24
+
+#define SEC_CTRL_CNT_CLR_CE 0x301120
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define SEC_ENGINE_PF_CFG_OFF 0x300000
+#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CORE_INT_SOURCE 0x301010
+#define SEC_CORE_INT_MASK 0x301000
+#define SEC_CORE_INT_STATUS 0x301008
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
+#define SEC_CORE_INT_DISABLE 0x0
+#define SEC_CORE_INT_ENABLE 0x1ff
+
+#define SEC_RAS_CE_REG 0x50
+#define SEC_RAS_FE_REG 0x54
+#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_ENB_MSK 0x88
+#define SEC_RAS_FE_ENB_MSK 0x0
+#define SEC_RAS_NFE_ENB_MSK 0x177
+#define SEC_RAS_DISABLE 0x0
+#define SEC_MEM_START_INIT_REG 0x0100
+#define SEC_MEM_INIT_DONE_REG 0x0104
+#define SEC_QM_ABNORMAL_INT_MASK 0x100004
+
+#define SEC_CONTROL_REG 0x0200
+#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
+#define SEC_CLK_GATE_DISABLE (~BIT(3))
+#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
+#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
+
+#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
+#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_BD_ERR_CHK_EN_REG1 0x0384
+#define SEC_BD_ERR_CHK_EN_REG2 0x038c
+
+#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
+#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
+
+#define SEC_DELAY_10_US 10
+#define SEC_POLL_TIMEOUT_US 1000
+#define SEC_VF_CNT_MASK 0xffffffc0
+#define SEC_DBGFS_VAL_MAX_LEN 20
+
+#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
+ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
+
+struct sec_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const char sec_name[] = "hisi_sec2";
+static struct dentry *sec_debugfs_root;
+static LIST_HEAD(sec_list);
+static DEFINE_MUTEX(sec_list_lock);
+
+static const struct sec_hw_error sec_hw_errors[] = {
+ {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
+ {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
+ {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
+ {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
+ {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
+ {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
+ {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
+ {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
+ {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
+ { /* sentinel */ }
+};
+
+struct sec_dev *sec_find_device(int node)
+{
+#define SEC_NUMA_MAX_DISTANCE 100
+ int min_distance = SEC_NUMA_MAX_DISTANCE;
+ int dev_node = 0, free_qp_num = 0;
+ struct sec_dev *sec, *ret = NULL;
+ struct hisi_qm *qm;
+ struct device *dev;
+
+ mutex_lock(&sec_list_lock);
+ list_for_each_entry(sec, &sec_list, list) {
+ qm = &sec->qm;
+ dev = &qm->pdev->dev;
+#ifdef CONFIG_NUMA
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+#endif
+ if (node_distance(dev_node, node) < min_distance) {
+ free_qp_num = hisi_qm_get_free_qp_num(qm);
+ if (free_qp_num >= sec->ctx_q_num) {
+ ret = sec;
+ min_distance = node_distance(dev_node, node);
+ }
+ }
+ }
+ mutex_unlock(&sec_list_lock);
+
+ return ret;
+}
+
+static const char * const sec_dbg_file_name[] = {
+ [SEC_CURRENT_QM] = "current_qm",
+ [SEC_CLEAR_ENABLE] = "clear_enable",
+};
+
+static struct debugfs_reg32 sec_dfx_regs[] = {
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_SAA_EN ", 0x301270},
+ {"SEC_BD_LATENCY_MIN ", 0x301600},
+ {"SEC_BD_LATENCY_MAX ", 0x301608},
+ {"SEC_BD_LATENCY_AVG ", 0x30160C},
+ {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
+ {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
+ {"SEC_BD_NUM_IN_SEC ", 0x301680},
+ {"SEC_ECC_1BIT_CNT ", 0x301C00},
+ {"SEC_ECC_1BIT_INFO ", 0x301C04},
+ {"SEC_ECC_2BIT_CNT ", 0x301C10},
+ {"SEC_ECC_2BIT_INFO ", 0x301C14},
+ {"SEC_BD_SAA0 ", 0x301C20},
+ {"SEC_BD_SAA1 ", 0x301C24},
+ {"SEC_BD_SAA2 ", 0x301C28},
+ {"SEC_BD_SAA3 ", 0x301C2C},
+ {"SEC_BD_SAA4 ", 0x301C30},
+ {"SEC_BD_SAA5 ", 0x301C34},
+ {"SEC_BD_SAA6 ", 0x301C38},
+ {"SEC_BD_SAA7 ", 0x301C3C},
+ {"SEC_BD_SAA8 ", 0x301C40},
+};
+
+static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ SEC_PF_PCI_DEVICE_ID, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
+ pr_info("No device, suppose queue number is %d!\n", q_num);
+ } else {
+ rev_id = pdev->revision;
+
+ switch (rev_id) {
+ case QM_HW_V1:
+ q_num = SEC_QUEUE_NUM_V1;
+ break;
+ case QM_HW_V2:
+ q_num = SEC_QUEUE_NUM_V2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sec_pf_q_num_ops = {
+ .set = sec_pf_q_num_set,
+ .get = param_get_int,
+};
+static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
+
+static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 ctx_q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &ctx_q_num);
+ if (ret)
+ return -EINVAL;
+
+ if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
+ return -EINVAL;
+ }
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sec_ctx_q_num_ops = {
+ .set = sec_ctx_q_num_set,
+ .get = param_get_int,
+};
+static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
+module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
+MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+
+static const struct pci_device_id sec_dev_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
+
+static inline void sec_add_to_list(struct sec_dev *sec)
+{
+ mutex_lock(&sec_list_lock);
+ list_add_tail(&sec->list, &sec_list);
+ mutex_unlock(&sec_list_lock);
+}
+
+static inline void sec_remove_from_list(struct sec_dev *sec)
+{
+ mutex_lock(&sec_list_lock);
+ list_del(&sec->list);
+ mutex_unlock(&sec_list_lock);
+}
+
+static u8 sec_get_endian(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 reg;
+
+ /*
+ * As for VF, it is a wrong way to get endian setting by
+ * reading a register of the engine
+ */
+ if (qm->pdev->is_virtfn) {
+ dev_err_ratelimited(&qm->pdev->dev,
+ "cannot access a register in VF!\n");
+ return SEC_LE;
+ }
+ reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
+ SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
+
+ /* BD little endian mode */
+ if (!(reg & BIT(0)))
+ return SEC_LE;
+
+ /* BD 32-bits big endian mode */
+ else if (!(reg & BIT(1)))
+ return SEC_32BE;
+
+ /* BD 64-bits big endian mode */
+ else
+ return SEC_64BE;
+}
+
+static int sec_engine_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+ u32 reg;
+
+ /* disable clock gate control */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg &= SEC_CLK_GATE_DISABLE;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+
+ ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ reg, reg & 0x1, SEC_DELAY_10_US,
+ SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ return ret;
+ }
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= (0x1 << SEC_TRNG_EN_SHIFT);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ reg |= SEC_USER1_SMMU_NORMAL;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ writel_relaxed(SEC_BD_ERR_CHK_EN2,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
+
+ /* enable clock gate control */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= SEC_CLK_GATE_ENABLE;
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* config endian */
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= sec_get_endian(sec);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* Enable sm4 xts mode multiple iv */
+ writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
+ qm->io_base + SEC_XTS_MIV_ENABLE_REG);
+
+ return 0;
+}
+
+static int sec_set_user_domain_and_cache(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+
+ /* qm user domain */
+ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
+
+ /* qm cache */
+ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
+
+ /* disable FLR triggered by BME(bus master enable) */
+ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
+
+ /* enable sqc,cqc writeback */
+ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
+ CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
+ FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
+
+ return sec_engine_init(sec);
+}
+
+/* sec_debug_regs_clear() - clear the sec debug regs */
+static void sec_debug_regs_clear(struct hisi_qm *qm)
+{
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear rdclr_en */
+ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+
+ hisi_qm_debug_regs_clear(qm);
+}
+
+static void sec_hw_error_enable(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 val;
+
+ if (qm->ver == QM_HW_V1) {
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+ dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
+ return;
+ }
+
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+
+ /* clear SEC hw error source if having */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* enable RAS int */
+ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* enable SEC block master OOO when m-bit error occur */
+ val = val | SEC_AXI_SHUTDOWN_ENABLE;
+
+ writel(val, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_hw_error_disable(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 val;
+
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+
+ /* disable RAS int */
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* disable SEC block master OOO when m-bit error occur */
+ val = val & SEC_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_hw_error_init(struct sec_dev *sec)
+{
+ if (sec->qm.fun_type == QM_HW_VF)
+ return;
+
+ hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
+ QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
+ | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
+ QM_DB_RANDOM_INVALID);
+ sec_hw_error_enable(sec);
+}
+
+static void sec_hw_error_uninit(struct sec_dev *sec)
+{
+ if (sec->qm.fun_type == QM_HW_VF)
+ return;
+
+ sec_hw_error_disable(sec);
+ writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
+}
+
+static u32 sec_current_qm_read(struct sec_debug_file *file)
+{
+ struct hisi_qm *qm = file->qm;
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
+{
+ struct hisi_qm *qm = file->qm;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
+ u32 vfq_num;
+ u32 tmp;
+
+ if (val > sec->num_vfs)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val) {
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ } else {
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
+
+ if (val == sec->num_vfs)
+ qm->debug.curr_qm_qp_num =
+ qm->ctrl_qp_num - qm->qp_num -
+ (sec->num_vfs - 1) * vfq_num;
+ else
+ qm->debug.curr_qm_qp_num = vfq_num;
+ }
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
+{
+ struct hisi_qm *qm = file->qm;
+
+ return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
+ SEC_CTRL_CNT_CLR_CE_BIT;
+}
+
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+{
+ struct hisi_qm *qm = file->qm;
+ u32 tmp;
+
+ if (val != 1 && val)
+ return -EINVAL;
+
+ tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+
+ return 0;
+}
+
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct sec_debug_file *file = filp->private_data;
+ char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ u32 val;
+ int ret;
+
+ spin_lock_irq(&file->lock);
+
+ switch (file->index) {
+ case SEC_CURRENT_QM:
+ val = sec_current_qm_read(file);
+ break;
+ case SEC_CLEAR_ENABLE:
+ val = sec_clear_enable_read(file);
+ break;
+ default:
+ spin_unlock_irq(&file->lock);
+ return -EINVAL;
+ }
+
+ spin_unlock_irq(&file->lock);
+ ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct sec_debug_file *file = filp->private_data;
+ char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ unsigned long val;
+ int len, ret;
+
+ if (*pos != 0)
+ return 0;
+
+ if (count >= SEC_DBGFS_VAL_MAX_LEN)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
+ pos, buf, count);
+ if (len < 0)
+ return len;
+
+ tbuf[len] = '\0';
+ if (kstrtoul(tbuf, 0, &val))
+ return -EFAULT;
+
+ spin_lock_irq(&file->lock);
+
+ switch (file->index) {
+ case SEC_CURRENT_QM:
+ ret = sec_current_qm_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ case SEC_CLEAR_ENABLE:
+ ret = sec_clear_enable_write(file, val);
+ if (ret)
+ goto err_input;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_input;
+ }
+
+ spin_unlock_irq(&file->lock);
+
+ return count;
+
+ err_input:
+ spin_unlock_irq(&file->lock);
+ return ret;
+}
+
+static const struct file_operations sec_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
+};
+
+static int sec_core_debug_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct sec_dfx *dfx = &sec->debug.dfx;
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d;
+
+ tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOENT;
+
+ regset->regs = sec_dfx_regs;
+ regset->nregs = ARRAY_SIZE(sec_dfx_regs);
+ regset->base = qm->io_base;
+
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
+
+ debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+
+ debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+
+ return 0;
+}
+
+static int sec_debug_init(struct sec_dev *sec)
+{
+ int i;
+
+ for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = &sec->qm;
+
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
+ sec->qm.debug.debug_root,
+ sec->debug.files + i,
+ &sec_dbg_fops);
+ }
+
+ return sec_core_debug_init(sec);
+}
+
+static int sec_debugfs_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
+ ret = hisi_qm_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ ret = sec_debug_init(sec);
+ if (ret)
+ goto failed_to_create;
+ }
+
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(sec_debugfs_root);
+
+ return ret;
+}
+
+static void sec_debugfs_exit(struct sec_dev *sec)
+{
+ debugfs_remove_recursive(sec->qm.debug.debug_root);
+}
+
+static int sec_pf_probe_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
+ break;
+
+ case QM_HW_V2:
+ qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = sec_set_user_domain_and_cache(sec);
+ if (ret)
+ return ret;
+
+ sec_hw_error_init(sec);
+ sec_debug_regs_clear(qm);
+
+ return 0;
+}
+
+static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ enum qm_hw_ver rev_id;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id == QM_HW_UNKNOWN)
+ return -ENODEV;
+
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+ qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
+ QM_HW_PF : QM_HW_VF;
+ qm->use_dma_api = true;
+
+ return hisi_qm_init(qm);
+}
+
+static void sec_qm_uninit(struct hisi_qm *qm)
+{
+ hisi_qm_uninit(qm);
+}
+
+static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
+{
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = SEC_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+
+ return sec_pf_probe_init(sec);
+ } else if (qm->fun_type == QM_HW_VF) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ * v2 hardware has no such problem.
+ */
+ if (qm->ver == QM_HW_V1) {
+ qm->qp_base = SEC_PF_DEF_Q_NUM;
+ qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
+ } else if (qm->ver == QM_HW_V2) {
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void sec_probe_uninit(struct sec_dev *sec)
+{
+ sec_hw_error_uninit(sec);
+}
+
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sec_dev *sec;
+ struct hisi_qm *qm;
+ int ret;
+
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, sec);
+
+ sec->ctx_q_num = ctx_q_num;
+
+ qm = &sec->qm;
+
+ ret = sec_qm_init(qm, pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to pre init qm!\n");
+ return ret;
+ }
+
+ ret = sec_probe_init(qm, sec);
+ if (ret) {
+ pci_err(pdev, "Failed to probe!\n");
+ goto err_qm_uninit;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start sec qm!\n");
+ goto err_probe_uninit;
+ }
+
+ ret = sec_debugfs_init(sec);
+ if (ret)
+ pci_warn(pdev, "Failed to init debugfs!\n");
+
+ sec_add_to_list(sec);
+
+ ret = sec_register_to_crypto();
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_remove_from_list;
+ }
+
+ return 0;
+
+err_remove_from_list:
+ sec_remove_from_list(sec);
+ sec_debugfs_exit(sec);
+ hisi_qm_stop(qm);
+
+err_probe_uninit:
+ sec_probe_uninit(sec);
+
+err_qm_uninit:
+ sec_qm_uninit(qm);
+
+ return ret;
+}
+
+/* now we only support equal assignment */
+static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 qp_num = qm->qp_num;
+ u32 q_base = qp_num;
+ u32 q_num, remain_q_num;
+ int i, j, ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qp_num;
+ q_num = remain_q_num / num_vfs;
+
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret) {
+ for (j = i; j > 0; j--)
+ hisi_qm_set_vft(qm, j, 0, 0);
+ return ret;
+ }
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int sec_clear_vft_config(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ u32 num_vfs = sec->num_vfs;
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= num_vfs; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ sec->num_vfs = 0;
+
+ return 0;
+}
+
+static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ int pre_existing_vfs, ret;
+ u32 num_vfs;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+
+ if (pre_existing_vfs) {
+ pci_err(pdev, "Can't enable VF. Please disable at first!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
+
+ ret = sec_vf_q_assign(sec, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ sec->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ sec_clear_vft_config(sec);
+ return ret;
+ }
+
+ return num_vfs;
+}
+
+static int sec_sriov_disable(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in sec_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+
+ return sec_clear_vft_config(sec);
+}
+
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs)
+ return sec_sriov_enable(pdev, num_vfs);
+ else
+ return sec_sriov_disable(pdev);
+}
+
+static void sec_remove(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ struct hisi_qm *qm = &sec->qm;
+
+ sec_unregister_from_crypto();
+
+ sec_remove_from_list(sec);
+
+ if (qm->fun_type == QM_HW_PF && sec->num_vfs)
+ (void)sec_sriov_disable(pdev);
+
+ sec_debugfs_exit(sec);
+
+ (void)hisi_qm_stop(qm);
+
+ if (qm->fun_type == QM_HW_PF)
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(sec);
+
+ sec_qm_uninit(qm);
+}
+
+static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
+{
+ const struct sec_hw_error *errs = sec_hw_errors;
+ struct device *dev = &sec->qm.pdev->dev;
+ u32 err_val;
+
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
+
+ if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
+ err_val = readl(sec->qm.io_base +
+ SEC_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ dev_err(dev, "multi ecc sram addr=0x%x\n",
+ SEC_ECC_ADDR(err_val));
+ }
+ }
+ errs++;
+ }
+}
+
+static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
+ if (err_sts) {
+ sec_log_hw_error(sec, err_sts);
+
+ /* clear error interrupts */
+ writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
+{
+ struct sec_dev *sec = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, sec_ret;
+
+ if (!sec) {
+ pci_err(pdev, "Can't recover error during device init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* log qm error */
+ qm_ret = hisi_qm_hw_error_handle(&sec->qm);
+
+ /* log sec error */
+ sec_ret = sec_hw_error_handle(sec);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return sec_process_hw_error(pdev);
+}
+
+static const struct pci_error_handlers sec_err_handler = {
+ .error_detected = sec_error_detected,
+};
+
+static struct pci_driver sec_pci_driver = {
+ .name = "hisi_sec2",
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
+};
+
+static void sec_register_debugfs(void)
+{
+ if (!debugfs_initialized())
+ return;
+
+ sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
+}
+
+static void sec_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(sec_debugfs_root);
+}
+
+static int __init sec_init(void)
+{
+ int ret;
+
+ sec_register_debugfs();
+
+ ret = pci_register_driver(&sec_pci_driver);
+ if (ret < 0) {
+ sec_unregister_debugfs();
+ pr_err("Failed to register pci driver.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit sec_exit(void)
+{
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
+}
+
+module_init(sec_init);
+module_exit(sec_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
+MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index e083d172b618..012023c347b1 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -2,37 +2,13 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include "./sgl.h"
+#include <linux/slab.h>
+#include "qm.h"
#define HISI_ACC_SGL_SGE_NR_MIN 1
-#define HISI_ACC_SGL_SGE_NR_MAX 255
-#define HISI_ACC_SGL_SGE_NR_DEF 10
#define HISI_ACC_SGL_NR_MAX 256
#define HISI_ACC_SGL_ALIGN_SIZE 64
-
-static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp)
-{
- int ret;
- u32 n;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops acc_sgl_sge_ops = {
- .set = acc_sgl_sge_set,
- .get = param_get_int,
-};
-
-static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF;
-module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444);
-MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)");
+#define HISI_ACC_MEM_BLOCK_NR 5
struct acc_hw_sge {
dma_addr_t buf;
@@ -55,37 +31,91 @@ struct hisi_acc_hw_sgl {
struct acc_hw_sge sge_entries[];
} __aligned(1);
+struct hisi_acc_sgl_pool {
+ struct mem_block {
+ struct hisi_acc_hw_sgl *sgl;
+ dma_addr_t sgl_dma;
+ size_t size;
+ } mem_block[HISI_ACC_MEM_BLOCK_NR];
+ u32 sgl_num_per_block;
+ u32 block_num;
+ u32 count;
+ u32 sge_nr;
+ size_t sgl_size;
+};
+
/**
* hisi_acc_create_sgl_pool() - Create a hw sgl pool.
* @dev: The device which hw sgl pool belongs to.
- * @pool: Pointer of pool.
* @count: Count of hisi_acc_hw_sgl in pool.
+ * @sge_nr: The count of sge in hw_sgl
*
* This function creates a hw sgl pool, after this user can get hw sgl memory
* from it.
*/
-int hisi_acc_create_sgl_pool(struct device *dev,
- struct hisi_acc_sgl_pool *pool, u32 count)
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr)
{
- u32 sgl_size;
- u32 size;
+ u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+ struct hisi_acc_sgl_pool *pool;
+ struct mem_block *block;
+ u32 i, j;
- if (!dev || !pool || !count)
- return -EINVAL;
+ if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
+ return ERR_PTR(-EINVAL);
- sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr +
+ sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
- size = sgl_size * count;
+ block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1));
+ sgl_num_per_block = block_size / sgl_size;
+ block_num = count / sgl_num_per_block;
+ remain_sgl = count % sgl_num_per_block;
+
+ if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) ||
+ (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1))
+ return ERR_PTR(-EINVAL);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ block = pool->mem_block;
+
+ for (i = 0; i < block_num; i++) {
+ block[i].sgl = dma_alloc_coherent(dev, block_size,
+ &block[i].sgl_dma,
+ GFP_KERNEL);
+ if (!block[i].sgl)
+ goto err_free_mem;
+
+ block[i].size = block_size;
+ }
+
+ if (remain_sgl > 0) {
+ block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
+ &block[i].sgl_dma,
+ GFP_KERNEL);
+ if (!block[i].sgl)
+ goto err_free_mem;
- pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL);
- if (!pool->sgl)
- return -ENOMEM;
+ block[i].size = remain_sgl * sgl_size;
+ }
- pool->size = size;
+ pool->sgl_num_per_block = sgl_num_per_block;
+ pool->block_num = remain_sgl ? block_num + 1 : block_num;
pool->count = count;
pool->sgl_size = sgl_size;
+ pool->sge_nr = sge_nr;
- return 0;
+ return pool;
+
+err_free_mem:
+ for (j = 0; j < i; j++) {
+ dma_free_coherent(dev, block_size, block[j].sgl,
+ block[j].sgl_dma);
+ memset(block + j, 0, sizeof(*block));
+ }
+ kfree(pool);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
@@ -98,38 +128,57 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
*/
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
- dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma);
- memset(pool, 0, sizeof(struct hisi_acc_sgl_pool));
+ struct mem_block *block;
+ int i;
+
+ if (!dev || !pool)
+ return;
+
+ block = pool->mem_block;
+
+ for (i = 0; i < pool->block_num; i++)
+ dma_free_coherent(dev, block[i].size, block[i].sgl,
+ block[i].sgl_dma);
+
+ kfree(pool);
}
EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);
-struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index,
- dma_addr_t *hw_sgl_dma)
+static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma)
{
- if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl)
+ struct mem_block *block;
+ u32 block_index, offset;
+
+ if (!pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
- *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index;
- return (void *)pool->sgl + pool->sgl_size * index;
-}
+ block = pool->mem_block;
+ block_index = index / pool->sgl_num_per_block;
+ offset = index % pool->sgl_num_per_block;
-void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {}
+ *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset;
+ return (void *)block[block_index].sgl + pool->sgl_size * offset;
+}
static void sg_map_to_hw_sg(struct scatterlist *sgl,
struct acc_hw_sge *hw_sge)
{
- hw_sge->buf = sgl->dma_address;
- hw_sge->len = sgl->dma_length;
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
- hw_sgl->entry_sum_in_sgl++;
+ u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
+
+ var++;
+ hw_sgl->entry_sum_in_sgl = cpu_to_le16(var);
}
static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
{
- hw_sgl->entry_sum_in_chain = sum;
+ hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
/**
@@ -153,10 +202,13 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int sg_n = sg_nents(sgl);
- int i, ret;
+ int i, ret, sg_n;
- if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr)
+ if (!dev || !sgl || !pool || !hw_sgl_dma)
+ return ERR_PTR(-EINVAL);
+
+ sg_n = sg_nents(sgl);
+ if (sg_n > pool->sge_nr)
return ERR_PTR(-EINVAL);
ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
@@ -164,11 +216,12 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
return ERR_PTR(-EINVAL);
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
- if (!curr_hw_sgl) {
- ret = -ENOMEM;
- goto err_unmap_sg;
+ if (IS_ERR(curr_hw_sgl)) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ return ERR_PTR(-ENOMEM);
+
}
- curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr;
+ curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
for_each_sg(sgl, sg, sg_n, i) {
@@ -177,14 +230,10 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sge++;
}
- update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr);
+ update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr);
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
-
-err_unmap_sg:
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
@@ -201,6 +250,9 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
struct hisi_acc_hw_sgl *hw_sgl)
{
+ if (!dev || !sgl || !hw_sgl)
+ return;
+
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
hw_sgl->entry_sum_in_chain = 0;
diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h
deleted file mode 100644
index 3ac8871c7acf..000000000000
--- a/drivers/crypto/hisilicon/sgl.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 HiSilicon Limited. */
-#ifndef HISI_ACC_SGL_H
-#define HISI_ACC_SGL_H
-
-struct hisi_acc_sgl_pool {
- struct hisi_acc_hw_sgl *sgl;
- dma_addr_t sgl_dma;
- size_t size;
- u32 count;
- size_t sgl_size;
-};
-
-struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
-void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
-int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool,
- u32 count);
-void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool);
-#endif
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index ffb00d987d02..79fc4dd3fe00 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -8,7 +8,6 @@
#include <linux/list.h>
#include "../qm.h"
-#include "../sgl.h"
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 59023545a1c4..795428c1d07e 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -22,6 +22,7 @@
#define HZIP_CTX_Q_NUM 2
#define HZIP_GZIP_HEAD_BUF 256
#define HZIP_ALG_PRIORITY 300
+#define HZIP_SGL_SGE_NR 10
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0,
@@ -41,7 +42,7 @@ enum hisi_zip_alg_type {
#define TO_HEAD(req_type) \
(((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
- ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \
+ ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \
struct hisi_zip_req {
struct acomp_req *req;
@@ -67,7 +68,7 @@ struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
- struct hisi_acc_sgl_pool sgl_pool;
+ struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
struct hisi_zip_ctx *ctx;
};
@@ -78,6 +79,30 @@ struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
};
+static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ u16 n;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou16(val, 10, &n);
+ if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops sgl_sge_nr_ops = {
+ .set = sgl_sge_nr_set,
+ .get = param_get_int,
+};
+
+static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
+module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
+MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
+
static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
{
u32 val;
@@ -265,14 +290,15 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
struct hisi_zip_qp_ctx *tmp;
- int i, ret;
+ struct device *dev;
+ int i;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
- ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev,
- &tmp->sgl_pool,
- QM_Q_DEPTH << 1);
- if (ret < 0) {
+ dev = &tmp->qp->qm->pdev->dev;
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ sgl_sge_nr);
+ if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
goto err_free_sgl_pool0;
return -ENOMEM;
@@ -283,7 +309,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
err_free_sgl_pool0:
hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev,
- &ctx->qp_ctx[QPC_COMP].sgl_pool);
+ ctx->qp_ctx[QPC_COMP].sgl_pool);
return -ENOMEM;
}
@@ -293,7 +319,7 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
- &ctx->qp_ctx[i].sgl_pool);
+ ctx->qp_ctx[i].sgl_pool);
}
static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
@@ -512,7 +538,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
- struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool;
+ struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
dma_addr_t input;
dma_addr_t output;
int ret;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 1b2ee96c888d..e1bab1a91333 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -79,47 +79,80 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 22
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-LIST_HEAD(hisi_zip_list);
-DEFINE_MUTEX(hisi_zip_list_lock);
+static LIST_HEAD(hisi_zip_list);
+static DEFINE_MUTEX(hisi_zip_list_lock);
-#ifdef CONFIG_NUMA
-static struct hisi_zip *find_zip_device_numa(int node)
+struct hisi_zip_resource {
+ struct hisi_zip *hzip;
+ int distance;
+ struct list_head list;
+};
+
+static void free_list(struct list_head *head)
{
- struct hisi_zip *zip = NULL;
- struct hisi_zip *hisi_zip;
- int min_distance = HZIP_NUMA_DISTANCE;
- struct device *dev;
+ struct hisi_zip_resource *res, *tmp;
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = &hisi_zip->qm.pdev->dev;
- if (node_distance(dev->numa_node, node) < min_distance) {
- zip = hisi_zip;
- min_distance = node_distance(dev->numa_node, node);
- }
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
}
-
- return zip;
}
-#endif
struct hisi_zip *find_zip_device(int node)
{
- struct hisi_zip *zip = NULL;
+ struct hisi_zip_resource *res, *tmp;
+ struct hisi_zip *ret = NULL;
+ struct hisi_zip *hisi_zip;
+ struct list_head *n;
+ struct device *dev;
+ LIST_HEAD(head);
mutex_lock(&hisi_zip_list_lock);
-#ifdef CONFIG_NUMA
- zip = find_zip_device_numa(node);
-#else
- zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
-#endif
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ goto err;
+
+ dev = &hisi_zip->qm.pdev->dev;
+ res->hzip = hisi_zip;
+ res->distance = node_distance(dev_to_node(dev), node);
+
+ n = &head;
+ list_for_each_entry(tmp, &head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
+ ret = tmp->hzip;
+ break;
+ }
+ }
+
+ free_list(&head);
+ } else {
+ ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
+ }
+
mutex_unlock(&hisi_zip_list_lock);
- return zip;
+ return ret;
+
+err:
+ free_list(&head);
+ mutex_unlock(&hisi_zip_list_lock);
+ return NULL;
}
struct hisi_zip_hw_error {
@@ -267,6 +300,10 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
static int uacce_mode;
module_param(uacce_mode, int, 0);
+static u32 vfs_num;
+module_param(vfs_num, uint, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
+
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
@@ -335,8 +372,7 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n",
- qm->ver);
+ dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
return;
}
@@ -511,7 +547,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
@@ -521,10 +557,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
else
sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
- if (!tmp_d)
- return -ENOENT;
-
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
return -ENOENT;
@@ -533,9 +565,8 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
}
return 0;
@@ -543,7 +574,6 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
- struct dentry *tmp;
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
@@ -551,11 +581,9 @@ static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
ctrl->files[i].ctrl = ctrl;
ctrl->files[i].index = i;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ ctrl->debug_root, ctrl->files + i,
+ &ctrl_debug_fops);
}
return hisi_zip_core_debug_init(ctrl);
@@ -569,8 +597,6 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
int ret;
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
- if (!dev_d)
- return -ENOENT;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
@@ -652,90 +678,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return 0;
}
-static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
- struct hisi_qm *qm;
- int ret;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
- hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
- if (!hisi_zip)
- return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
-
- qm = &hisi_zip->qm;
- qm->pdev = pdev;
- qm->ver = rev_id;
-
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- switch (uacce_mode) {
- case 0:
- qm->use_dma_api = true;
- break;
- case 1:
- qm->use_dma_api = false;
- break;
- case 2:
- qm->use_dma_api = true;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hisi_qm_init(qm);
- if (ret) {
- dev_err(&pdev->dev, "Failed to init qm!\n");
- return ret;
- }
-
- if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
- if (ret)
- return ret;
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- *
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = HZIP_PF_DEF_Q_NUM;
- qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2)
- /* v2 starts to support get vft by mailbox */
- hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- }
-
- ret = hisi_qm_start(qm);
- if (ret)
- goto err_qm_uninit;
-
- ret = hisi_zip_debugfs_init(hisi_zip);
- if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
-
- hisi_zip_add_to_list(hisi_zip);
-
- return 0;
-
-err_qm_uninit:
- hisi_qm_uninit(qm);
- return ret;
-}
-
/* Currently we only support equal assignment */
static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
{
@@ -832,6 +774,100 @@ static int hisi_zip_sriov_disable(struct pci_dev *pdev)
return hisi_zip_clear_vft_config(hisi_zip);
}
+static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_zip *hisi_zip;
+ enum qm_hw_ver rev_id;
+ struct hisi_qm *qm;
+ int ret;
+
+ rev_id = hisi_qm_get_hw_version(pdev);
+ if (rev_id == QM_HW_UNKNOWN)
+ return -EINVAL;
+
+ hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
+ if (!hisi_zip)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, hisi_zip);
+
+ qm = &hisi_zip->qm;
+ qm->pdev = pdev;
+ qm->ver = rev_id;
+
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
+ QM_HW_VF;
+ switch (uacce_mode) {
+ case 0:
+ qm->use_dma_api = true;
+ break;
+ case 1:
+ qm->use_dma_api = false;
+ break;
+ case 2:
+ qm->use_dma_api = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init qm!\n");
+ return ret;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hisi_zip_pf_probe_init(hisi_zip);
+ if (ret)
+ return ret;
+
+ qm->qp_base = HZIP_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ } else if (qm->fun_type == QM_HW_VF) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ *
+ * v2 hardware has no such problem.
+ */
+ if (qm->ver == QM_HW_V1) {
+ qm->qp_base = HZIP_PF_DEF_Q_NUM;
+ qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
+ } else if (qm->ver == QM_HW_V2)
+ /* v2 starts to support get vft by mailbox */
+ hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ ret = hisi_zip_debugfs_init(hisi_zip);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
+
+ hisi_zip_add_to_list(hisi_zip);
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_zip_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_remove_from_list;
+ }
+
+ return 0;
+
+err_remove_from_list:
+ hisi_zip_remove_from_list(hisi_zip);
+ hisi_zip_debugfs_exit(hisi_zip);
+ hisi_qm_stop(qm);
+err_qm_uninit:
+ hisi_qm_uninit(qm);
+ return ret;
+}
+
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
@@ -945,7 +981,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
- hisi_zip_sriov_configure : 0,
+ hisi_zip_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};
@@ -955,8 +991,6 @@ static void hisi_zip_register_debugfs(void)
return;
hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
- if (IS_ERR_OR_NULL(hzip_debugfs_root))
- hzip_debugfs_root = NULL;
}
static void hisi_zip_unregister_debugfs(void)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4ab1bde8dd9b..64894d8b442a 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -75,9 +75,9 @@ static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
}
static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
- int maxbanks, u32 probemask)
+ int maxbanks, u32 probemask, u32 stride)
{
- u32 val, addrhi, addrlo, addrmid;
+ u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
int actbank;
/*
@@ -87,32 +87,37 @@ static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
addrhi = 1 << (16 + maxbanks);
addrlo = 0;
actbank = min(maxbanks - 1, 0);
- while ((addrhi - addrlo) > 32) {
+ while ((addrhi - addrlo) > stride) {
/* write marker to lowest address in top half */
addrmid = (addrhi + addrlo) >> 1;
+ marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
- writel((addrmid | (addrlo << 16)) & probemask,
+ writel(marker,
priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
- /* write marker to lowest address in bottom half */
- eip197_trc_cache_banksel(priv, addrlo, &actbank);
- writel((addrlo | (addrhi << 16)) & probemask,
- priv->base + EIP197_CLASSIFICATION_RAMS +
- (addrlo & 0xffff));
+ /* write invalid markers to possible aliases */
+ delta = 1 << __fls(addrmid);
+ while (delta >= stride) {
+ addralias = addrmid - delta;
+ eip197_trc_cache_banksel(priv, addralias, &actbank);
+ writel(~marker,
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ (addralias & 0xffff));
+ delta >>= 1;
+ }
/* read back marker from top half */
eip197_trc_cache_banksel(priv, addrmid, &actbank);
val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
(addrmid & 0xffff));
- if (val == ((addrmid | (addrlo << 16)) & probemask)) {
+ if ((val & probemask) == marker)
/* read back correct, continue with top half */
addrlo = addrmid;
- } else {
+ else
/* not read back correct, continue with bottom half */
addrhi = addrmid;
- }
}
return addrhi;
}
@@ -150,7 +155,7 @@ static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
htable_offset + i * sizeof(u32));
}
-static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, dsize, asize;
int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
@@ -183,7 +188,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed data RAM size in bytes */
- dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
+ dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
/*
* Now probe the administration RAM size pretty much the same way
@@ -196,11 +201,18 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Probed admin RAM size in admin words */
- asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
+ asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
/* Clear any ECC errors detected while probing! */
writel(0, priv->base + EIP197_TRC_ECCCTRL);
+ /* Sanity check probing results */
+ if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
+ dev_err(priv->dev, "Record cache probing failed (%d,%d).",
+ dsize, asize);
+ return -ENODEV;
+ }
+
/*
* Determine optimal configuration from RAM sizes
* Note that we assume that the physical RAM configuration is sane
@@ -221,9 +233,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Step #3: Determine log2 of hash table size */
cs_ht_sz = __fls(asize - cs_rc_max) - 2;
/* Step #4: determine current size of hash table in dwords */
- cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */
+ cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
/* Step #5: add back excess words and see if we can fit more records */
- cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4));
+ cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
/* Clear the cache RAMs */
eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
@@ -251,6 +263,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
+ return 0;
}
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
@@ -298,13 +311,14 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
const struct firmware *fw)
{
- const u32 *data = (const u32 *)fw->data;
+ const __be32 *data = (const __be32 *)fw->data;
int i;
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
writel(be32_to_cpu(data[i]),
- priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ i * sizeof(__be32));
/* Exclude final 2 NOPs from size */
return i - EIP197_FW_TERMINAL_NOPS;
@@ -471,6 +485,14 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
cd_size_rnd) - 1;
}
+ /*
+ * Since we're using command desc's way larger than formally specified,
+ * we need to check whether we can fit even 1 for low-end EIP196's!
+ */
+ if (!cd_fetch_cnt) {
+ dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
+ return -ENODEV;
+ }
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
@@ -479,12 +501,12 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
- (cd_fetch_cnt * priv->config.cd_offset),
+ (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
@@ -527,13 +549,13 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
priv->config.rd_size,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((rd_fetch_cnt *
(rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
- (rd_fetch_cnt * priv->config.rd_offset),
+ (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
@@ -559,7 +581,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 val;
- int i, ret, pe;
+ int i, ret, pe, opbuflo, opbufhi;
dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
priv->config.pes, priv->config.rings);
@@ -595,8 +617,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(EIP197_DxE_THR_CTRL_RESET_PE,
EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- if (priv->flags & SAFEXCEL_HW_EIP197)
- /* Reset HIA input interface arbiter (EIP197 only) */
+ if (priv->flags & EIP197_PE_ARB)
+ /* Reset HIA input interface arbiter (if present) */
writel(EIP197_HIA_RA_PE_CTRL_RESET,
EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
@@ -639,9 +661,16 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
;
/* DMA transfer size to use */
+ if (priv->hwconfig.hwnumpes > 4) {
+ opbuflo = 9;
+ opbufhi = 10;
+ } else {
+ opbuflo = 7;
+ opbufhi = 8;
+ }
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
- EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
/* FIXME: instability issues can occur for EIP97 but disabling
@@ -655,8 +684,8 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
/* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
- EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
/* Processing Engine configuration */
@@ -696,7 +725,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
writel(0,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
@@ -719,7 +748,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
- writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+ writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
@@ -736,19 +765,28 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->flags & SAFEXCEL_HW_EIP197) {
- eip197_trc_cache_init(priv);
- priv->flags |= EIP197_TRC_CACHE;
+ if (priv->flags & EIP197_SIMPLE_TRC) {
+ writel(EIP197_STRC_CONFIG_INIT |
+ EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
+ EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
+ priv->base + EIP197_STRC_CONFIG);
+ writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
+ EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
+ } else if (priv->flags & SAFEXCEL_HW_EIP197) {
+ ret = eip197_trc_cache_init(priv);
+ if (ret)
+ return ret;
+ }
+ if (priv->flags & EIP197_ICE) {
ret = eip197_load_firmwares(priv);
if (ret)
return ret;
}
- safexcel_hw_setup_cdesc_rings(priv);
- safexcel_hw_setup_rdesc_rings(priv);
-
- return 0;
+ return safexcel_hw_setup_cdesc_rings(priv) ?:
+ safexcel_hw_setup_rdesc_rings(priv) ?:
+ 0;
}
/* Called with ring's lock taken */
@@ -836,20 +874,24 @@ finalize:
spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
- writel((rdesc * priv->config.rd_offset) << 2,
+ writel((rdesc * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
- writel((cdesc * priv->config.cd_offset) << 2,
+ writel((cdesc * priv->config.cd_offset),
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
- struct safexcel_result_desc *rdesc)
+ void *rdp)
{
- if (likely((!rdesc->descriptor_overflow) &&
- (!rdesc->buffer_overflow) &&
- (!rdesc->result_data.error_code)))
+ struct safexcel_result_desc *rdesc = rdp;
+ struct result_data_desc *result_data = rdp + priv->config.res_offset;
+
+ if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
+ ((!rdesc->descriptor_overflow) &&
+ (!rdesc->buffer_overflow) &&
+ (!result_data->error_code))))
return 0;
if (rdesc->descriptor_overflow)
@@ -858,13 +900,14 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
if (rdesc->buffer_overflow)
dev_err(priv->dev, "Buffer overflow detected");
- if (rdesc->result_data.error_code & 0x4066) {
+ if (result_data->error_code & 0x4066) {
/* Fatal error (bits 1,2,5,6 & 14) */
dev_err(priv->dev,
"result descriptor error (%x)",
- rdesc->result_data.error_code);
+ result_data->error_code);
+
return -EIO;
- } else if (rdesc->result_data.error_code &
+ } else if (result_data->error_code &
(BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
/*
* Give priority over authentication fails:
@@ -872,7 +915,7 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
* something wrong with the input!
*/
return -EINVAL;
- } else if (rdesc->result_data.error_code & BIT(9)) {
+ } else if (result_data->error_code & BIT(9)) {
/* Authentication failed */
return -EBADMSG;
}
@@ -1003,7 +1046,7 @@ handle_results:
acknowledge:
if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
- EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ (tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
/* If the number of requests overflowed the counter, try to proceed more
@@ -1120,6 +1163,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
irq_name, irq);
return irq;
}
+ } else {
+ return -ENXIO;
}
ret = devm_request_threaded_irq(dev, irq, handler,
@@ -1169,6 +1214,44 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
+ &safexcel_alg_crc32,
+ &safexcel_alg_cbcmac,
+ &safexcel_alg_xcbcmac,
+ &safexcel_alg_cmac,
+ &safexcel_alg_chacha20,
+ &safexcel_alg_chachapoly,
+ &safexcel_alg_chachapoly_esp,
+ &safexcel_alg_sm3,
+ &safexcel_alg_hmac_sm3,
+ &safexcel_alg_ecb_sm4,
+ &safexcel_alg_cbc_sm4,
+ &safexcel_alg_ofb_sm4,
+ &safexcel_alg_cfb_sm4,
+ &safexcel_alg_ctr_sm4,
+ &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
+ &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
+ &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
+ &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
+ &safexcel_alg_sha3_224,
+ &safexcel_alg_sha3_256,
+ &safexcel_alg_sha3_384,
+ &safexcel_alg_sha3_512,
+ &safexcel_alg_hmac_sha3_224,
+ &safexcel_alg_hmac_sha3_256,
+ &safexcel_alg_hmac_sha3_384,
+ &safexcel_alg_hmac_sha3_512,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des,
+ &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha256_cbc_des,
+ &safexcel_alg_authenc_hmac_sha224_cbc_des,
+ &safexcel_alg_authenc_hmac_sha512_cbc_des,
+ &safexcel_alg_authenc_hmac_sha384_cbc_des,
+ &safexcel_alg_rfc4106_gcm,
+ &safexcel_alg_rfc4543_gcm,
+ &safexcel_alg_rfc4309_ccm,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -1238,30 +1321,28 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask = 0;
-
- val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
-
- /* Read number of PEs from the engine */
- if (priv->flags & SAFEXCEL_HW_EIP197)
- /* Wider field width for all EIP197 type engines */
- mask = EIP197_N_PES_MASK;
- else
- /* Narrow field width for EIP97 type engine */
- mask = EIP97_N_PES_MASK;
+ u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
- priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+ priv->config.pes = priv->hwconfig.hwnumpes;
+ priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
+ /* Cannot currently support more rings than we have ring AICs! */
+ priv->config.rings = min_t(u32, priv->config.rings,
+ priv->hwconfig.hwnumraic);
- priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
-
- val = (val & GENMASK(27, 25)) >> 25;
- mask = BIT(val) - 1;
-
- priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+ priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
- priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+ /* res token is behind the descr, but ofs must be rounded to buswdth */
+ priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
+ /* now the size of the descr is this 1st part plus the result struct */
+ priv->config.rd_size = priv->config.res_offset +
+ EIP197_RD64_RESULT_SIZE;
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+
+ /* convert dwords to bytes */
+ priv->config.cd_offset *= sizeof(u32);
+ priv->config.rd_offset *= sizeof(u32);
+ priv->config.res_offset *= sizeof(u32);
}
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
@@ -1307,7 +1388,7 @@ static int safexcel_probe_generic(void *pdev,
int is_pci_dev)
{
struct device *dev = priv->dev;
- u32 peid, version, mask, val, hiaopt;
+ u32 peid, version, mask, val, hiaopt, hwopt, peopt;
int i, ret, hwctg;
priv->context_pool = dmam_pool_create("safexcel-context", dev,
@@ -1369,13 +1450,16 @@ static int safexcel_probe_generic(void *pdev,
*/
version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
if (((priv->flags & SAFEXCEL_HW_EIP197) &&
- (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
+ (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
+ (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
((!(priv->flags & SAFEXCEL_HW_EIP197) &&
(EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
/*
* We did not find the device that matched our initial probing
* (or our initial probing failed) Report appropriate error.
*/
+ dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
+ version);
return -ENODEV;
}
@@ -1383,6 +1467,14 @@ static int safexcel_probe_generic(void *pdev,
hwctg = version >> 28;
peid = version & 255;
+ /* Detect EIP206 processing pipe */
+ version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
+ dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
+
/* Detect EIP96 packet engine and version */
version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
@@ -1391,10 +1483,13 @@ static int safexcel_probe_generic(void *pdev,
}
priv->hwconfig.pever = EIP197_VERSION_MASK(version);
+ hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197 */
+ peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
+
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
EIP197_HWDATAW_MASK;
priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
@@ -1403,6 +1498,19 @@ static int safexcel_probe_generic(void *pdev,
priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
EIP197_RFSIZE_MASK) +
EIP197_RFSIZE_ADJUST;
+ priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
+ EIP197_N_PES_MASK;
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
+ EIP197_N_RINGS_MASK;
+ if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
+ priv->flags |= EIP197_PE_ARB;
+ if (EIP206_OPT_ICE_TYPE(peopt) == 1)
+ priv->flags |= EIP197_ICE;
+ /* If not a full TRC, then assume simple TRC */
+ if (!(hwopt & EIP197_OPT_HAS_TRC))
+ priv->flags |= EIP197_SIMPLE_TRC;
+ /* EIP197 always has SOME form of TRC */
+ priv->flags |= EIP197_TRC_CACHE;
} else {
/* EIP97 */
priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
@@ -1411,6 +1519,23 @@ static int safexcel_probe_generic(void *pdev,
EIP97_CFSIZE_MASK;
priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
EIP97_RFSIZE_MASK;
+ priv->hwconfig.hwnumpes = 1; /* by definition */
+ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
+ EIP197_N_RINGS_MASK;
+ }
+
+ /* Scan for ring AIC's */
+ for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
+ version = readl(EIP197_HIA_AIC_R(priv) +
+ EIP197_HIA_AIC_R_VERSION(i));
+ if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
+ break;
+ }
+ priv->hwconfig.hwnumraic = i;
+ /* Low-end EIP196 may not have any ring AIC's ... */
+ if (!priv->hwconfig.hwnumraic) {
+ dev_err(priv->dev, "No ring interrupt controller present!\n");
+ return -ENODEV;
}
/* Get supported algorithms from EIP96 transform engine */
@@ -1418,10 +1543,12 @@ static int safexcel_probe_generic(void *pdev,
EIP197_PE_EIP96_OPTIONS(0));
/* Print single info line describing what we just detected */
- dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
- peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
- priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
- priv->hwconfig.hwrfsize, priv->hwconfig.pever,
+ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
+ peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
+ priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
+ priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
+ priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
+ priv->hwconfig.ppver, priv->hwconfig.pever,
priv->hwconfig.algo_flags);
safexcel_configure(priv);
@@ -1545,7 +1672,6 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
}
}
-#if IS_ENABLED(CONFIG_OF)
/* for Device Tree platform driver */
static int safexcel_probe(struct platform_device *pdev)
@@ -1623,6 +1749,7 @@ static int safexcel_remove(struct platform_device *pdev)
safexcel_unregister_algorithms(priv);
safexcel_hw_reset_rings(priv);
+ clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1664,9 +1791,7 @@ static struct platform_driver crypto_safexcel = {
.of_match_table = safexcel_of_match_table,
},
};
-#endif
-#if IS_ENABLED(CONFIG_PCI)
/* PCIE devices - i.e. Inside Secure development boards */
static int safexcel_pci_probe(struct pci_dev *pdev,
@@ -1757,7 +1882,7 @@ static int safexcel_pci_probe(struct pci_dev *pdev,
return rc;
}
-void safexcel_pci_remove(struct pci_dev *pdev)
+static void safexcel_pci_remove(struct pci_dev *pdev)
{
struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
int i;
@@ -1787,54 +1912,32 @@ static struct pci_driver safexcel_pci_driver = {
.probe = safexcel_pci_probe,
.remove = safexcel_pci_remove,
};
-#endif
-
-/* Unfortunately, we have to resort to global variables here */
-#if IS_ENABLED(CONFIG_PCI)
-int pcireg_rc = -EINVAL; /* Default safe value */
-#endif
-#if IS_ENABLED(CONFIG_OF)
-int ofreg_rc = -EINVAL; /* Default safe value */
-#endif
static int __init safexcel_init(void)
{
-#if IS_ENABLED(CONFIG_PCI)
+ int ret;
+
/* Register PCI driver */
- pcireg_rc = pci_register_driver(&safexcel_pci_driver);
-#endif
+ ret = pci_register_driver(&safexcel_pci_driver);
-#if IS_ENABLED(CONFIG_OF)
/* Register platform driver */
- ofreg_rc = platform_driver_register(&crypto_safexcel);
- #if IS_ENABLED(CONFIG_PCI)
- /* Return success if either PCI or OF registered OK */
- return pcireg_rc ? ofreg_rc : 0;
- #else
- return ofreg_rc;
- #endif
-#else
- #if IS_ENABLED(CONFIG_PCI)
- return pcireg_rc;
- #else
- return -EINVAL;
- #endif
-#endif
+ if (IS_ENABLED(CONFIG_OF) && !ret) {
+ ret = platform_driver_register(&crypto_safexcel);
+ if (ret)
+ pci_unregister_driver(&safexcel_pci_driver);
+ }
+
+ return ret;
}
static void __exit safexcel_exit(void)
{
-#if IS_ENABLED(CONFIG_OF)
/* Unregister platform driver */
- if (!ofreg_rc)
+ if (IS_ENABLED(CONFIG_OF))
platform_driver_unregister(&crypto_safexcel);
-#endif
-#if IS_ENABLED(CONFIG_PCI)
/* Unregister PCI driver if successfully registered before */
- if (!pcireg_rc)
- pci_unregister_driver(&safexcel_pci_driver);
-#endif
+ pci_unregister_driver(&safexcel_pci_driver);
}
module_init(safexcel_init);
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 930cc48a6f85..b4624b5687ce 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -17,8 +17,11 @@
#define EIP197_HIA_VERSION_BE 0xca35
#define EIP197_HIA_VERSION_LE 0x35ca
#define EIP97_VERSION_LE 0x9e61
+#define EIP196_VERSION_LE 0x3bc4
#define EIP197_VERSION_LE 0x3ac5
#define EIP96_VERSION_LE 0x9f60
+#define EIP201_VERSION_LE 0x36c9
+#define EIP206_VERSION_LE 0x31ce
#define EIP197_REG_LO16(reg) (reg & 0xffff)
#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
@@ -26,12 +29,22 @@
((reg >> 4) & 0xf0) | \
((reg >> 12) & 0xf))
+/* EIP197 HIA OPTIONS ENCODING */
+#define EIP197_HIA_OPT_HAS_PE_ARB BIT(29)
+
+/* EIP206 OPTIONS ENCODING */
+#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3)
+
+/* EIP197 OPTIONS ENCODING */
+#define EIP197_OPT_HAS_TRC BIT(31)
+
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 18
+#define EIP197_MAX_TOKENS 19
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
+#define EIP197_MAX_RING_AIC 14
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
@@ -138,6 +151,7 @@
#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_VERSION(r) (0xe01c - EIP197_HIA_AIC_R_OFF(r))
#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
#define EIP197_HIA_AIC_G_ACK 0xf810
@@ -157,12 +171,16 @@
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL2(n) (0x102c + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
+#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n)))
+#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
+#define EIP197_OPTIONS 0xfff8
#define EIP197_VERSION 0xfffc
/* EIP197-specific registers, no indirection */
@@ -178,6 +196,7 @@
#define EIP197_TRC_ECCADMINSTAT 0xf0838
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
+#define EIP197_STRC_CONFIG 0xf43f0
#define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n)))
#define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n)))
#define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n)))
@@ -213,7 +232,6 @@
/* EIP197_HIA_xDR_PROC_COUNT */
#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
-#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -228,6 +246,8 @@
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
/* EIP197_HIA_OPTIONS */
+#define EIP197_N_RINGS_OFFSET 0
+#define EIP197_N_RINGS_MASK GENMASK(3, 0)
#define EIP197_N_PES_OFFSET 4
#define EIP197_N_PES_MASK GENMASK(4, 0)
#define EIP97_N_PES_MASK GENMASK(2, 0)
@@ -237,13 +257,13 @@
#define EIP197_CFSIZE_OFFSET 9
#define EIP197_CFSIZE_ADJUST 4
#define EIP97_CFSIZE_OFFSET 8
-#define EIP197_CFSIZE_MASK GENMASK(3, 0)
-#define EIP97_CFSIZE_MASK GENMASK(4, 0)
+#define EIP197_CFSIZE_MASK GENMASK(2, 0)
+#define EIP97_CFSIZE_MASK GENMASK(3, 0)
#define EIP197_RFSIZE_OFFSET 12
#define EIP197_RFSIZE_ADJUST 4
#define EIP97_RFSIZE_OFFSET 12
-#define EIP197_RFSIZE_MASK GENMASK(3, 0)
-#define EIP97_RFSIZE_MASK GENMASK(4, 0)
+#define EIP197_RFSIZE_MASK GENMASK(2, 0)
+#define EIP97_RFSIZE_MASK GENMASK(3, 0)
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
@@ -327,13 +347,21 @@
#define EIP197_ADDRESS_MODE BIT(8)
#define EIP197_CONTROL_MODE BIT(9)
+/* EIP197_PE_EIP96_TOKEN_CTRL2 */
+#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3)
+
+/* EIP197_STRC_CONFIG */
+#define EIP197_STRC_CONFIG_INIT BIT(31)
+#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8)
+#define EIP197_STRC_CONFIG_SMALL_REC(s) (s<<0)
+
/* EIP197_FLUE_CONFIG */
#define EIP197_FLUE_CONFIG_MAGIC 0xc7000004
/* Context Control */
struct safexcel_context_record {
- u32 control0;
- u32 control1;
+ __le32 control0;
+ __le32 control1;
__le32 data[40];
} __packed;
@@ -358,10 +386,14 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 (0x8 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SM4 (0xd << 17)
+#define CONTEXT_CONTROL_DIGEST_INITIAL (0x0 << 21)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_CRC32 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
@@ -371,17 +403,25 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SM3 (0x7 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256 (0xb << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224 (0xc << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512 (0xd << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384 (0xe << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_POLY1305 (0xf << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
/* control1 */
#define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0)
+#define CONTEXT_CONTROL_CHACHA20_MODE_256_32 (2 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0)
#define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17))
+#define CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK (12 << 0)
#define CONTEXT_CONTROL_IV0 BIT(5)
#define CONTEXT_CONTROL_IV1 BIT(6)
#define CONTEXT_CONTROL_IV2 BIT(7)
@@ -394,6 +434,13 @@ struct safexcel_context_record {
#define EIP197_XCM_MODE_GCM 1
#define EIP197_XCM_MODE_CCM 2
+#define EIP197_AEAD_TYPE_IPSEC_ESP 2
+#define EIP197_AEAD_TYPE_IPSEC_ESP_GMAC 3
+#define EIP197_AEAD_IPSEC_IV_SIZE 8
+#define EIP197_AEAD_IPSEC_NONCE_SIZE 4
+#define EIP197_AEAD_IPSEC_COUNTER_SIZE 4
+#define EIP197_AEAD_IPSEC_CCM_NONCE_SIZE 3
+
/* The hash counter given to the engine in the context has a granularity of
* 64 bits.
*/
@@ -423,6 +470,8 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
+#define EIP197_MIN_DSIZE 1024
+#define EIP197_MIN_ASIZE 8
#define EIP197_CS_TRC_REC_WC 64
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
@@ -447,7 +496,7 @@ struct result_data_desc {
u16 application_id;
u16 rsvd1;
- u32 rsvd2;
+ u32 rsvd2[5];
} __packed;
@@ -465,16 +514,15 @@ struct safexcel_result_desc {
u32 data_lo;
u32 data_hi;
-
- struct result_data_desc result_data;
} __packed;
/*
* The EIP(1)97 only needs to fetch the descriptor part of
* the result descriptor, not the result token part!
*/
-#define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\
- sizeof(struct result_data_desc)) /\
+#define EIP197_RD64_FETCH_SIZE (sizeof(struct safexcel_result_desc) /\
+ sizeof(u32))
+#define EIP197_RD64_RESULT_SIZE (sizeof(struct result_data_desc) /\
sizeof(u32))
struct safexcel_token {
@@ -561,6 +609,9 @@ struct safexcel_command_desc {
struct safexcel_control_data_desc control_data;
} __packed;
+#define EIP197_CD64_FETCH_SIZE (sizeof(struct safexcel_command_desc) /\
+ sizeof(u32))
+
/*
* Internal structures & functions
*/
@@ -604,6 +655,7 @@ struct safexcel_config {
u32 rd_size;
u32 rd_offset;
+ u32 res_offset;
};
struct safexcel_work_data {
@@ -654,6 +706,12 @@ enum safexcel_eip_version {
/* Priority we use for advertising our algorithms */
#define SAFEXCEL_CRA_PRIORITY 300
+/* SM3 digest result for zero length message */
+#define EIP197_SM3_ZEROM_HASH "\x1A\xB2\x1D\x83\x55\xCF\xA1\x7F" \
+ "\x8E\x61\x19\x48\x31\xE8\x1A\x8F" \
+ "\x22\xBE\xC8\xC7\x28\xFE\xFB\x74" \
+ "\x7E\xD0\x35\xEB\x50\x82\xAA\x2B"
+
/* EIP algorithm presence flags */
enum safexcel_eip_algorithms {
SAFEXCEL_ALG_BC0 = BIT(5),
@@ -697,16 +755,23 @@ struct safexcel_register_offsets {
enum safexcel_flags {
EIP197_TRC_CACHE = BIT(0),
SAFEXCEL_HW_EIP197 = BIT(1),
+ EIP197_PE_ARB = BIT(2),
+ EIP197_ICE = BIT(3),
+ EIP197_SIMPLE_TRC = BIT(4),
};
struct safexcel_hwconfig {
enum safexcel_eip_algorithms algo_flags;
int hwver;
int hiaver;
+ int ppver;
int pever;
int hwdataw;
int hwcfsize;
int hwrfsize;
+ int hwnumpes;
+ int hwnumrings;
+ int hwnumraic;
};
struct safexcel_crypto_priv {
@@ -778,7 +843,7 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
- struct safexcel_result_desc *rdesc);
+ void *rdp);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
@@ -853,5 +918,43 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
+extern struct safexcel_alg_template safexcel_alg_crc32;
+extern struct safexcel_alg_template safexcel_alg_cbcmac;
+extern struct safexcel_alg_template safexcel_alg_xcbcmac;
+extern struct safexcel_alg_template safexcel_alg_cmac;
+extern struct safexcel_alg_template safexcel_alg_chacha20;
+extern struct safexcel_alg_template safexcel_alg_chachapoly;
+extern struct safexcel_alg_template safexcel_alg_chachapoly_esp;
+extern struct safexcel_alg_template safexcel_alg_sm3;
+extern struct safexcel_alg_template safexcel_alg_hmac_sm3;
+extern struct safexcel_alg_template safexcel_alg_ecb_sm4;
+extern struct safexcel_alg_template safexcel_alg_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_ofb_sm4;
+extern struct safexcel_alg_template safexcel_alg_cfb_sm4;
+extern struct safexcel_alg_template safexcel_alg_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4;
+extern struct safexcel_alg_template safexcel_alg_sha3_224;
+extern struct safexcel_alg_template safexcel_alg_sha3_256;
+extern struct safexcel_alg_template safexcel_alg_sha3_384;
+extern struct safexcel_alg_template safexcel_alg_sha3_512;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_rfc4106_gcm;
+extern struct safexcel_alg_template safexcel_alg_rfc4543_gcm;
+extern struct safexcel_alg_template safexcel_alg_rfc4309_ccm;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index ef51f8c2b473..c02995694b41 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -5,18 +5,22 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/chacha.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
+#include <crypto/poly1305.h>
#include <crypto/sha.h>
+#include <crypto/sm3.h>
+#include <crypto/sm4.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -33,6 +37,8 @@ enum safexcel_cipher_alg {
SAFEXCEL_DES,
SAFEXCEL_3DES,
SAFEXCEL_AES,
+ SAFEXCEL_CHACHA20,
+ SAFEXCEL_SM4,
};
struct safexcel_cipher_ctx {
@@ -41,8 +47,8 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- bool aead;
- int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
__le32 key[16];
u32 nonce;
@@ -51,10 +57,11 @@ struct safexcel_cipher_ctx {
/* All the below is AEAD specific */
u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ __be32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ __be32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
struct crypto_cipher *hkaes;
+ struct crypto_aead *fback;
};
struct safexcel_cipher_req {
@@ -70,24 +77,50 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
{
u32 block_sz = 0;
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] = cpu_to_be32(1);
+
+ if (ctx->alg == SAFEXCEL_CHACHA20 ||
+ ctx->xcm == EIP197_XCM_MODE_CCM) {
+ /* 32 bit counter, starting at 0 */
+ cdesc->control_data.token[3] = 0;
+ } else {
+ /* 32 bit counter, start at 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(1);
+ }
return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+ } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
+ (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 96 bit IV part */
memcpy(&cdesc->control_data.token[0], iv, 12);
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] = cpu_to_be32(1);
+
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, starting at 0 */
+ cdesc->control_data.token[3] = 0;
+ } else {
+ /* 32 bit counter, start at 1 (big endian!) */
+ *(__be32 *)&cdesc->control_data.token[3] =
+ cpu_to_be32(1);
+ }
+
+ return;
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+ /* 96 bit nonce part */
+ memcpy(&cdesc->control_data.token[0], &iv[4], 12);
+ /* 32 bit counter */
+ cdesc->control_data.token[3] = *(u32 *)iv;
return;
} else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
@@ -112,10 +145,16 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
block_sz = DES3_EDE_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
+ case SAFEXCEL_SM4:
+ block_sz = SM4_BLOCK_SIZE;
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
case SAFEXCEL_AES:
block_sz = AES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
break;
+ default:
+ break;
}
memcpy(cdesc->control_data.token, iv, block_sz);
}
@@ -153,72 +192,84 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
if (direction == SAFEXCEL_ENCRYPT) {
/* align end of instruction sequence to end of token */
token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 13);
+ EIP197_MAX_TOKENS - 14);
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[12].packet_length = digestsize;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[13].packet_length = digestsize;
+ token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
} else {
cryptlen -= digestsize;
/* align end of instruction sequence to end of token */
token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
+ EIP197_MAX_TOKENS - 15);
- token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[12].packet_length = digestsize;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ token[13].packet_length = digestsize;
+ token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[13].packet_length = digestsize |
+ token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ token[14].packet_length = digestsize |
EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
- if (likely(cryptlen)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[10].packet_length = cryptlen;
- token[10].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[10].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[6].packet_length = assoclen;
+ token[6].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[11].packet_length = cryptlen;
+ token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ /* Do not send to crypt engine in case of GMAC */
+ token[11].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ token[11].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
} else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
}
if (!ctx->xcm)
return;
- token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[8].packet_length = 0;
- token[8].instructions = AES_BLOCK_SIZE;
+ token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ token[9].packet_length = 0;
+ token[9].instructions = AES_BLOCK_SIZE;
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[9].packet_length = AES_BLOCK_SIZE;
- token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[10].packet_length = AES_BLOCK_SIZE;
+ token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
- if (ctx->xcm == EIP197_XCM_MODE_GCM) {
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
- } else {
+ if (ctx->xcm != EIP197_XCM_MODE_GCM) {
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
u8 *cbcmaciv = (u8 *)&token[1];
- u32 *aadlen = (u32 *)&token[5];
+ __le32 *aadlen = (__le32 *)&token[5];
/* Construct IV block B0 for the CBC-MAC */
token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -227,17 +278,18 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
EIP197_TOKEN_INS_TYPE_HASH;
/* Variable length IV part */
- memcpy(cbcmaciv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
/* fixup flags byte */
cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
/* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
/* insert lower 2 bytes of message length */
cbcmaciv[14] = cryptlen >> 8;
cbcmaciv[15] = cryptlen & 255;
if (assoclen) {
- *aadlen = cpu_to_le32(cpu_to_be16(assoclen));
+ *aadlen = cpu_to_le32((assoclen >> 8) |
+ ((assoclen & 0xff) << 8));
assoclen += 2;
}
@@ -252,13 +304,13 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
/* Align crypto data towards hash engine */
- token[10].stat = 0;
+ token[11].stat = 0;
- token[11].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
cryptlen &= 15;
- token[11].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
+ token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[7].instructions = EIP197_TOKEN_INS_LAST |
@@ -284,7 +336,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -309,25 +361,29 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_authenc_keys keys;
struct crypto_aes_ctx aes;
- int err = -EINVAL;
+ int err = -EINVAL, i;
- if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+ if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
goto badkey;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
- /* Minimum keysize is minimum AES key size + nonce size */
- if (keys.enckeylen < (AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE))
+ /* Must have at least space for the nonce here */
+ if (unlikely(keys.enckeylen < CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* last 4 bytes of key are the nonce! */
ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
CTR_RFC3686_NONCE_SIZE);
/* exclude the nonce here */
- keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
}
/* Encryption key */
switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
+ if (unlikely(err))
+ goto badkey_expflags;
+ break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
@@ -338,14 +394,24 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
if (unlikely(err))
goto badkey;
break;
+ case SAFEXCEL_SM4:
+ if (unlikely(keys.enckeylen != SM4_KEY_SIZE))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
goto badkey;
}
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
- memcmp(ctx->key, keys.enckey, keys.enckeylen))
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < keys.enckeylen / sizeof(u32); i++) {
+ if (le32_to_cpu(ctx->key[i]) !=
+ ((u32 *)keys.enckey)[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+ }
+ }
/* Auth key */
switch (ctx->hash_alg) {
@@ -374,6 +440,11 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
+ if (safexcel_hmac_setkey("safexcel-sm3", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -388,7 +459,8 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->base.needs_inv = true;
/* Now copy the keys into the context */
- memcpy(ctx->key, keys.enckey, keys.enckeylen);
+ for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
+ ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
ctx->key_len = keys.enckeylen;
memcpy(ctx->ipad, &istate.state, ctx->state_sz);
@@ -423,6 +495,17 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
CONTEXT_CONTROL_DIGEST_XCM |
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* Chacha20-Poly1305 */
+ cdesc->control_data.control0 =
+ CONTEXT_CONTROL_KEY_EN |
+ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 |
+ (sreq->direction == SAFEXCEL_ENCRYPT ?
+ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT :
+ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN) |
+ ctx->hash_alg |
+ CONTEXT_CONTROL_SIZE(ctrl_size);
+ return 0;
} else {
ctrl_size += ctx->state_sz / sizeof(u32) * 2;
cdesc->control_data.control0 =
@@ -431,17 +514,21 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
ctx->hash_alg |
CONTEXT_CONTROL_SIZE(ctrl_size);
}
- if (sreq->direction == SAFEXCEL_ENCRYPT)
- cdesc->control_data.control0 |=
- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
- CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT :
- CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ if (sreq->direction == SAFEXCEL_ENCRYPT &&
+ (ctx->xcm == EIP197_XCM_MODE_CCM ||
+ ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC))
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT;
+ else if (sreq->direction == SAFEXCEL_ENCRYPT)
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ else if (ctx->xcm == EIP197_XCM_MODE_CCM)
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN;
else
cdesc->control_data.control0 |=
- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
- CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN :
- CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
} else {
if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 =
@@ -480,6 +567,12 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
ctx->key_len >> ctx->xts);
return -EINVAL;
}
+ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20;
+ } else if (ctx->alg == SAFEXCEL_SM4) {
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_CRYPTO_ALG_SM4;
}
return 0;
@@ -1295,7 +1388,7 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -1451,13 +1544,11 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma) {
+ if (ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
- }
memcpy(ctx->key, key, len);
-
ctx->key_len = len;
return 0;
@@ -1777,6 +1868,312 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
},
};
+static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha256_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha224_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha512_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha384_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha1_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha1_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha256_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha224_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha512_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sha384_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1972,7 +2369,7 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -1991,8 +2388,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
- if (ctx->key[i + keylen / sizeof(u32)] !=
- cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i + keylen / sizeof(u32)]) !=
+ aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2082,7 +2479,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2109,7 +2506,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) {
+ if (be32_to_cpu(ctx->ipad[i]) != hashkey[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2199,7 +2596,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2303,3 +2700,938 @@ struct safexcel_alg_template safexcel_alg_ccm = {
},
},
};
+
+static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
+ const u8 *key)
+{
+ struct safexcel_crypto_priv *priv = ctx->priv;
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, CHACHA_KEY_SIZE);
+ ctx->key_len = CHACHA_KEY_SIZE;
+}
+
+static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+
+ if (len != CHACHA_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ safexcel_chacha20_setkey(ctx, key);
+
+ return 0;
+}
+
+static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_chacha20 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_chacha20_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .base = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "safexcel-chacha20",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_chacha20_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_aead_ctx(ctfm);
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP &&
+ len > EIP197_AEAD_IPSEC_NONCE_SIZE) {
+ /* ESP variant has nonce appended to key */
+ len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
+ ctx->nonce = *(u32 *)(key + len);
+ }
+ if (len != CHACHA_KEY_SIZE) {
+ crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ safexcel_chacha20_setkey(ctx, key);
+
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_crypt(struct aead_request *req,
+ enum safexcel_cipher_direction dir)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+ u32 key[CHACHA_KEY_SIZE / sizeof(u32) + 1];
+ int ret = 0;
+
+ /*
+ * Instead of wasting time detecting umpteen silly corner cases,
+ * just dump all "small" requests to the fallback implementation.
+ * HW would not be faster on such small requests anyway.
+ */
+ if (likely((ctx->aead != EIP197_AEAD_TYPE_IPSEC_ESP ||
+ req->assoclen >= EIP197_AEAD_IPSEC_IV_SIZE) &&
+ req->cryptlen > POLY1305_DIGEST_SIZE)) {
+ return safexcel_queue_req(&req->base, creq, dir);
+ }
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ memcpy(key, ctx->key, CHACHA_KEY_SIZE);
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* ESP variant has nonce appended to the key */
+ key[CHACHA_KEY_SIZE / sizeof(u32)] = ctx->nonce;
+ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
+ CHACHA_KEY_SIZE +
+ EIP197_AEAD_IPSEC_NONCE_SIZE);
+ } else {
+ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
+ CHACHA_KEY_SIZE);
+ }
+ if (ret) {
+ crypto_aead_clear_flags(aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aead, crypto_aead_get_flags(ctx->fback) &
+ CRYPTO_TFM_REQ_MASK);
+ return ret;
+ }
+
+ aead_request_set_tfm(subreq, ctx->fback);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return (dir == SAFEXCEL_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static int safexcel_aead_chachapoly_encrypt(struct aead_request *req)
+{
+ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_chachapoly_decrypt(struct aead_request *req)
+{
+ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_fallback_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *aead = __crypto_aead_cast(tfm);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+
+ /* Allocate fallback implementation */
+ ctx->fback = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fback))
+ return PTR_ERR(ctx->fback);
+
+ crypto_aead_set_reqsize(aead, max(sizeof(struct safexcel_cipher_req),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->fback)));
+
+ return 0;
+}
+
+static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_fallback_cra_init(tfm);
+ ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
+ CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
+ ctx->state_sz = 0; /* Precomputed by HW */
+ return 0;
+}
+
+static void safexcel_aead_fallback_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->fback);
+ safexcel_aead_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_chachapoly = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
+ .alg.aead = {
+ .setkey = safexcel_aead_chachapoly_setkey,
+ .setauthsize = safexcel_aead_chachapoly_setauthsize,
+ .encrypt = safexcel_aead_chachapoly_encrypt,
+ .decrypt = safexcel_aead_chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "safexcel-chacha20-poly1305",
+ /* +1 to put it above HW chacha + SW poly */
+ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_chachapoly_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_chachapoly_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
+ .alg.aead = {
+ .setkey = safexcel_aead_chachapoly_setkey,
+ .setauthsize = safexcel_aead_chachapoly_setauthsize,
+ .encrypt = safexcel_aead_chachapoly_encrypt,
+ .decrypt = safexcel_aead_chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE - EIP197_AEAD_IPSEC_NONCE_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "safexcel-chacha20-poly1305-esp",
+ /* +1 to put it above HW chacha + SW poly */
+ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_chachapolyesp_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+
+ if (len != SM4_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, SM4_KEY_SIZE))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, SM4_KEY_SIZE);
+ ctx->key_len = SM4_KEY_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sm4_blk_encrypt(struct skcipher_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_sm4_blk_decrypt(struct skcipher_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_sm4_blk_encrypt,
+ .decrypt = safexcel_sm4_blk_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "safexcel-ecb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ecb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_sm4_blk_encrypt,
+ .decrypt = safexcel_sm4_blk_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "safexcel-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_cbc_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ofb(sm4)",
+ .cra_driver_name = "safexcel-ofb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ofb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cfb(sm4)",
+ .cra_driver_name = "safexcel-cfb-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_cfb_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+ /* exclude the nonce here */
+ len -= CTR_RFC3686_NONCE_SIZE;
+
+ return safexcel_skcipher_sm4_setkey(ctfm, key, len);
+}
+
+static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_SM4,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_sm4ctr_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ /* Add nonce size */
+ .min_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .base = {
+ .cra_name = "rfc3686(ctr(sm4))",
+ .cra_driver_name = "safexcel-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_sm4_ctr_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4_blk_encrypt(struct aead_request *req)
+{
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, aead_request_ctx(req),
+ SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_sm4_blk_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, aead_request_ctx(req),
+ SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->state_sz = SHA1_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_sm4_blk_encrypt,
+ .decrypt = safexcel_aead_sm4_blk_decrypt,
+ .ivsize = SM4_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(sm4))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4cbc_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_fallback_setkey(struct crypto_aead *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Keep fallback cipher synchronized */
+ return crypto_aead_setkey(ctx->fback, (u8 *)key, len) ?:
+ safexcel_aead_setkey(ctfm, key, len);
+}
+
+static int safexcel_aead_fallback_setauthsize(struct crypto_aead *ctfm,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Keep fallback cipher synchronized */
+ return crypto_aead_setauthsize(ctx->fback, authsize);
+}
+
+static int safexcel_aead_fallback_crypt(struct aead_request *req,
+ enum safexcel_cipher_direction dir)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->fback);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return (dir == SAFEXCEL_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static int safexcel_aead_sm4cbc_sm3_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else if (req->cryptlen || req->assoclen) /* If input length > 0 only */
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ return safexcel_aead_fallback_crypt(req, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sm3_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
+ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
+ return -EINVAL;
+ else if (req->cryptlen > crypto_aead_authsize(tfm) || req->assoclen)
+ /* If input length > 0 only */
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+
+ /* HW cannot do full (AAD+payload) zero length, use fallback */
+ return safexcel_aead_fallback_crypt(req, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_fallback_cra_init(tfm);
+ ctx->alg = SAFEXCEL_SM4;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ ctx->state_sz = SM3_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
+ .alg.aead = {
+ .setkey = safexcel_aead_fallback_setkey,
+ .setauthsize = safexcel_aead_fallback_setauthsize,
+ .encrypt = safexcel_aead_sm4cbc_sm3_encrypt,
+ .decrypt = safexcel_aead_sm4cbc_sm3_decrypt,
+ .ivsize = SM4_BLOCK_SIZE,
+ .maxauthsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sm3),cbc(sm4))",
+ .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4cbc_sm3_cra_init,
+ .cra_exit = safexcel_aead_fallback_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4ctr_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sm4cbc_sha1_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(sm4)))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4ctr_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sm4ctr_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_sm4cbc_sm3_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SM3_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sm3),rfc3686(ctr(sm4)))",
+ .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sm4ctr_sm3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_rfc4106_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+
+ len -= CTR_RFC3686_NONCE_SIZE;
+ return safexcel_aead_gcm_setkey(ctfm, key, len);
+}
+
+static int safexcel_rfc4106_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int safexcel_rfc4106_encrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ safexcel_aead_encrypt(req);
+}
+
+static int safexcel_rfc4106_decrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ safexcel_aead_decrypt(req);
+}
+
+static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_gcm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+ .alg.aead = {
+ .setkey = safexcel_rfc4106_gcm_setkey,
+ .setauthsize = safexcel_rfc4106_gcm_setauthsize,
+ .encrypt = safexcel_rfc4106_encrypt,
+ .decrypt = safexcel_rfc4106_decrypt,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "safexcel-rfc4106-gcm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4106_gcm_cra_init,
+ .cra_exit = safexcel_aead_gcm_cra_exit,
+ },
+ },
+};
+
+static int safexcel_rfc4543_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != GHASH_DIGEST_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int safexcel_rfc4543_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_gcm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP_GMAC;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+ .alg.aead = {
+ .setkey = safexcel_rfc4106_gcm_setkey,
+ .setauthsize = safexcel_rfc4543_gcm_setauthsize,
+ .encrypt = safexcel_rfc4106_encrypt,
+ .decrypt = safexcel_rfc4106_decrypt,
+ .ivsize = GCM_RFC4543_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "safexcel-rfc4543-gcm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4543_gcm_cra_init,
+ .cra_exit = safexcel_aead_gcm_cra_exit,
+ },
+ },
+};
+
+static int safexcel_rfc4309_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* First byte of the nonce = L = always 3 for RFC4309 (4 byte ctr) */
+ *(u8 *)&ctx->nonce = EIP197_AEAD_IPSEC_COUNTER_SIZE - 1;
+ /* last 3 bytes of key are the nonce! */
+ memcpy((u8 *)&ctx->nonce + 1, key + len -
+ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE,
+ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE);
+
+ len -= EIP197_AEAD_IPSEC_CCM_NONCE_SIZE;
+ return safexcel_aead_ccm_setkey(ctfm, key, len);
+}
+
+static int safexcel_rfc4309_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ /* Borrowed from crypto/ccm.c */
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int safexcel_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Borrowed from crypto/ccm.c */
+ if (req->assoclen != 16 && req->assoclen != 20)
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ /* Borrowed from crypto/ccm.c */
+ if (req->assoclen != 16 && req->assoclen != 20)
+ return -EINVAL;
+
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_aead_ccm_cra_init(tfm);
+ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ return ret;
+}
+
+struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
+ .alg.aead = {
+ .setkey = safexcel_rfc4309_ccm_setkey,
+ .setauthsize = safexcel_rfc4309_ccm_setauthsize,
+ .encrypt = safexcel_rfc4309_ccm_encrypt,
+ .decrypt = safexcel_rfc4309_ccm_decrypt,
+ .ivsize = EIP197_AEAD_IPSEC_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "safexcel-rfc4309-ccm-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_rfc4309_ccm_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2effb6d21e8b..2134daef24f6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -5,9 +5,13 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <crypto/aes.h>
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
+#include <crypto/sha3.h>
+#include <crypto/skcipher.h>
+#include <crypto/sm3.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
@@ -19,9 +23,19 @@ struct safexcel_ahash_ctx {
struct safexcel_crypto_priv *priv;
u32 alg;
-
- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 key_sz;
+ bool cbcmac;
+ bool do_fallback;
+ bool fb_init_done;
+ bool fb_do_setkey;
+
+ __le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
+ __le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
+
+ struct crypto_cipher *kaes;
+ struct crypto_ahash *fback;
+ struct crypto_shash *shpre;
+ struct shash_desc *shdesc;
};
struct safexcel_ahash_req {
@@ -31,6 +45,8 @@ struct safexcel_ahash_req {
bool needs_inv;
bool hmac_zlen;
bool len_is_le;
+ bool not_first;
+ bool xcbcmac;
int nents;
dma_addr_t result_dma;
@@ -39,7 +55,9 @@ struct safexcel_ahash_req {
u8 state_sz; /* expected state size, only set once */
u8 block_sz; /* block size, only set once */
- u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u8 digest_sz; /* output digest size, only set once */
+ __le32 state[SHA3_512_BLOCK_SIZE /
+ sizeof(__le32)] __aligned(sizeof(__le32));
u64 len;
u64 processed;
@@ -57,21 +75,31 @@ static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
- u32 input_length, u32 result_length)
+ u32 input_length, u32 result_length,
+ bool cbcmac)
{
struct safexcel_token *token =
(struct safexcel_token *)cdesc->control_data.token;
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token[0].packet_length = input_length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[1].packet_length = result_length;
- token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ input_length &= 15;
+ if (unlikely(cbcmac && input_length)) {
+ token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[1].packet_length = 16 - input_length;
+ token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ } else {
+ token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ }
+
+ token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET;
- token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ token[2].packet_length = result_length;
+ token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
}
@@ -82,29 +110,48 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_crypto_priv *priv = ctx->priv;
u64 count = 0;
- cdesc->control_data.control0 |= ctx->alg;
+ cdesc->control_data.control0 = ctx->alg;
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (!req->processed) {
- /* First - and possibly only - block of basic hash only */
- if (req->finish) {
+ if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
+ if (req->xcbcmac)
+ memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
+ else
+ memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
+
+ if (!req->finish && req->xcbcmac)
cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_DIGEST_XCM |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_NO_FINISH_HASH |
+ CONTEXT_CONTROL_SIZE(req->state_sz /
+ sizeof(u32));
+ else
+ cdesc->control_data.control0 |=
+ CONTEXT_CONTROL_DIGEST_XCM |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_SIZE(req->state_sz /
+ sizeof(u32));
+ return;
+ } else if (!req->processed) {
+ /* First - and possibly only - block of basic hash only */
+ if (req->finish)
+ cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
- } else {
- cdesc->control_data.control0 |=
+ else
+ cdesc->control_data.control0 |= req->digest |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
CONTEXT_CONTROL_NO_FINISH_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE(1);
- }
return;
}
@@ -204,7 +251,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
}
if (sreq->result_dma) {
- dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+ dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz,
DMA_FROM_DEVICE);
sreq->result_dma = 0;
}
@@ -223,7 +270,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
memcpy(sreq->cache, sreq->state,
crypto_ahash_digestsize(ahash));
- memcpy(sreq->state, ctx->opad, sreq->state_sz);
+ memcpy(sreq->state, ctx->opad, sreq->digest_sz);
sreq->len = sreq->block_sz +
crypto_ahash_digestsize(ahash);
@@ -238,8 +285,14 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
+ if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
+ /* Undo final XOR with 0xffffffff ...*/
+ *(__le32 *)areq->result = ~sreq->state[0];
+ } else {
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
+ }
}
cache_len = safexcel_queued_len(sreq);
@@ -261,10 +314,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra = 0, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
+ u64 queued, len;
- queued = len = safexcel_queued_len(req);
+ queued = safexcel_queued_len(req);
if (queued <= HASH_CACHE_SIZE)
cache_len = queued;
else
@@ -287,15 +340,52 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
areq->nbytes - extra);
queued -= extra;
- len -= extra;
if (!queued) {
*commands = 0;
*results = 0;
return 0;
}
+
+ extra = 0;
}
+ if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) {
+ if (unlikely(cache_len < AES_BLOCK_SIZE)) {
+ /*
+ * Cache contains less than 1 full block, complete.
+ */
+ extra = AES_BLOCK_SIZE - cache_len;
+ if (queued > cache_len) {
+ /* More data follows: borrow bytes */
+ u64 tmp = queued - cache_len;
+
+ skip = min_t(u64, tmp, extra);
+ sg_pcopy_to_buffer(areq->src,
+ sg_nents(areq->src),
+ req->cache + cache_len,
+ skip, 0);
+ }
+ extra -= skip;
+ memset(req->cache + cache_len + skip, 0, extra);
+ if (!ctx->cbcmac && extra) {
+ // 10- padding for XCBCMAC & CMAC
+ req->cache[cache_len + skip] = 0x80;
+ // HW will use K2 iso K3 - compensate!
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+ ((__be32 *)req->cache)[i] ^=
+ cpu_to_be32(le32_to_cpu(
+ ctx->ipad[i] ^ ctx->ipad[i + 4]));
+ }
+ cache_len = AES_BLOCK_SIZE;
+ queued = queued + extra;
+ }
+
+ /* XCBC continue: XOR previous result into 1st word */
+ crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE);
+ }
+
+ len = queued;
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
@@ -306,8 +396,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
- req->cache_dma, cache_len, len,
- ctx->base.ctxr_dma);
+ req->cache_dma, cache_len,
+ len, ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -319,10 +409,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
goto send_command;
}
- /* Skip descriptor generation for zero-length requests */
- if (!areq->nbytes)
- goto send_command;
-
/* Now handle the current ahash request buffer(s) */
req->nents = dma_map_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src,
@@ -336,26 +422,34 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
+ if (unlikely(sglen <= skip)) {
+ skip -= sglen;
+ continue;
+ }
+
/* Do not overflow the request */
- if (queued < sglen)
+ if ((queued + skip) <= sglen)
sglen = queued;
+ else
+ sglen -= skip;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
- sg_dma_address(sg),
- sglen, len, ctx->base.ctxr_dma);
+ sg_dma_address(sg) + skip, sglen,
+ len, ctx->base.ctxr_dma);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
}
- n_cdesc++;
- if (n_cdesc == 1)
+ if (!n_cdesc)
first_cdesc = cdesc;
+ n_cdesc++;
queued -= sglen;
if (!queued)
break;
+ skip = 0;
}
send_command:
@@ -363,9 +457,9 @@ send_command:
safexcel_context_control(ctx, req, first_cdesc);
/* Add the token */
- safexcel_hash_token(first_cdesc, len, req->state_sz);
+ safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac);
- req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+ req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL;
@@ -374,7 +468,7 @@ send_command:
/* Add a result descriptor */
rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
- req->state_sz);
+ req->digest_sz);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
goto unmap_result;
@@ -382,17 +476,20 @@ send_command:
safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
+ req->processed += len - extra;
*commands = n_cdesc;
*results = 1;
return 0;
unmap_result:
- dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+ dma_unmap_single(priv->dev, req->result_dma, req->digest_sz,
DMA_FROM_DEVICE);
unmap_sg:
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ if (req->nents) {
+ dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ req->nents = 0;
+ }
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
@@ -590,16 +687,12 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (ctx->base.ctxr) {
if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
- req->processed &&
- (/* invalidate for basic hash continuation finish */
- (req->finish &&
- (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
+ /* invalidate for *any* non-XCBC continuation */
+ ((req->not_first && !req->xcbcmac) ||
/* invalidate if (i)digest changed */
memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
- /* invalidate for HMAC continuation finish */
- (req->finish && (req->processed != req->block_sz)) ||
/* invalidate for HMAC finish with odigest changed */
- (req->finish &&
+ (req->finish && req->hmac &&
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
ctx->opad, req->state_sz))))
/*
@@ -622,6 +715,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (!ctx->base.ctxr)
return -ENOMEM;
}
+ req->not_first = true;
ring = ctx->base.ring;
@@ -691,8 +785,34 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
memcpy(areq->result, sha512_zero_message_hash,
SHA512_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) {
+ memcpy(areq->result,
+ EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE);
+ }
return 0;
+ } else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM &&
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
+ req->len == sizeof(u32) && !areq->nbytes)) {
+ /* Zero length CRC32 */
+ memcpy(areq->result, ctx->ipad, sizeof(u32));
+ return 0;
+ } else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
+ !areq->nbytes)) {
+ /* Zero length CBC MAC */
+ memset(areq->result, 0, AES_BLOCK_SIZE);
+ return 0;
+ } else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE &&
+ !areq->nbytes)) {
+ /* Zero length (X)CBC/CMAC */
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+ ((__be32 *)areq->result)[i] =
+ cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
+ areq->result[0] ^= 0x80; // 10- padding
+ crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
!areq->nbytes)) {
@@ -792,6 +912,7 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
+ ctx->fb_do_setkey = false;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
@@ -808,6 +929,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
+ req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
return 0;
@@ -889,6 +1011,7 @@ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
+ req->digest_sz = SHA1_DIGEST_SIZE;
req->block_sz = SHA1_BLOCK_SIZE;
req->hmac = true;
@@ -1125,6 +1248,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
@@ -1180,6 +1304,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
return 0;
@@ -1248,6 +1373,7 @@ static int safexcel_hmac_sha224_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
@@ -1318,6 +1444,7 @@ static int safexcel_hmac_sha256_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
+ req->digest_sz = SHA256_DIGEST_SIZE;
req->block_sz = SHA256_BLOCK_SIZE;
req->hmac = true;
@@ -1375,6 +1502,7 @@ static int safexcel_sha512_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
@@ -1430,6 +1558,7 @@ static int safexcel_sha384_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
return 0;
@@ -1498,6 +1627,7 @@ static int safexcel_hmac_sha512_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
@@ -1568,6 +1698,7 @@ static int safexcel_hmac_sha384_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA512_DIGEST_SIZE;
+ req->digest_sz = SHA512_DIGEST_SIZE;
req->block_sz = SHA512_BLOCK_SIZE;
req->hmac = true;
@@ -1625,6 +1756,7 @@ static int safexcel_md5_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
+ req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
return 0;
@@ -1686,6 +1818,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq)
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = MD5_DIGEST_SIZE;
+ req->digest_sz = MD5_DIGEST_SIZE;
req->block_sz = MD5_HMAC_BLOCK_SIZE;
req->len_is_le = true; /* MD5 is little endian! ... */
req->hmac = true;
@@ -1740,3 +1873,1247 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
},
};
+
+static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret = safexcel_ahash_cra_init(tfm);
+
+ /* Default 'key' is all zeroes */
+ memset(ctx->ipad, 0, sizeof(u32));
+ return ret;
+}
+
+static int safexcel_crc32_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from loaded key */
+ req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
+ /* Set processed to non-zero to enable invalidation detection */
+ req->len = sizeof(u32);
+ req->processed = sizeof(u32);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
+ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
+ req->state_sz = sizeof(u32);
+ req->digest_sz = sizeof(u32);
+ req->block_sz = sizeof(u32);
+
+ return 0;
+}
+
+static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+ if (keylen != sizeof(u32)) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->ipad, key, sizeof(u32));
+ return 0;
+}
+
+static int safexcel_crc32_digest(struct ahash_request *areq)
+{
+ return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_crc32 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_crc32_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_crc32_digest,
+ .setkey = safexcel_crc32_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = sizeof(u32),
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "safexcel-crc32",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_crc32_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_cbcmac_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from loaded keys */
+ memcpy(req->state, ctx->ipad, ctx->key_sz);
+ /* Set processed to non-zero to enable invalidation detection */
+ req->len = AES_BLOCK_SIZE;
+ req->processed = AES_BLOCK_SIZE;
+
+ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
+ req->state_sz = ctx->key_sz;
+ req->digest_sz = AES_BLOCK_SIZE;
+ req->block_sz = AES_BLOCK_SIZE;
+ req->xcbcmac = true;
+
+ return 0;
+}
+
+static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
+ for (i = 0; i < len / sizeof(u32); i++)
+ ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
+
+ if (len == AES_KEYSIZE_192) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else if (len == AES_KEYSIZE_256) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ }
+ ctx->cbcmac = true;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int safexcel_cbcmac_digest(struct ahash_request *areq)
+{
+ return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_cbcmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_cbcmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "cbcmac(aes)",
+ .cra_driver_name = "safexcel-cbcmac-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ /* precompute the XCBC key material */
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes, key, len);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
+ ctx->ipad[i] =
+ cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
+
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ ctx->cbcmac = false;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_ahash_cra_init(tfm);
+ ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
+ return PTR_ERR_OR_ZERO(ctx->kaes);
+}
+
+static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(ctx->kaes);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_xcbcmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_xcbcmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "safexcel-xcbc-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_xcbcmac_cra_init,
+ .cra_exit = safexcel_xcbcmac_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct crypto_aes_ctx aes;
+ __be64 consts[4];
+ u64 _const[2];
+ u8 msb_mask, gfmask;
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < len / sizeof(u32); i++)
+ ctx->ipad[i + 8] =
+ cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
+
+ /* precompute the CMAC key material */
+ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->kaes, key, len);
+ crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
+ CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+ /* code below borrowed from crypto/cmac.c */
+ /* encrypt the zero block */
+ memset(consts, 0, AES_BLOCK_SIZE);
+ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+
+ gfmask = 0x87;
+ _const[0] = be64_to_cpu(consts[1]);
+ _const[1] = be64_to_cpu(consts[0]);
+
+ /* gf(2^128) multiply zero-ciphertext with u and u^2 */
+ for (i = 0; i < 4; i += 2) {
+ msb_mask = ((s64)_const[1] >> 63) & gfmask;
+ _const[1] = (_const[1] << 1) | (_const[0] >> 63);
+ _const[0] = (_const[0] << 1) ^ msb_mask;
+
+ consts[i + 0] = cpu_to_be64(_const[1]);
+ consts[i + 1] = cpu_to_be64(_const[0]);
+ }
+ /* end of code borrowed from crypto/cmac.c */
+
+ for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
+ ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
+
+ if (len == AES_KEYSIZE_192) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else if (len == AES_KEYSIZE_256) {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ } else {
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
+ }
+ ctx->cbcmac = false;
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cmac = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = 0,
+ .alg.ahash = {
+ .init = safexcel_cbcmac_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_cbcmac_digest,
+ .setkey = safexcel_cmac_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "safexcel-cmac-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_xcbcmac_cra_init,
+ .cra_exit = safexcel_xcbcmac_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sm3_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SM3_DIGEST_SIZE;
+ req->digest_sz = SM3_DIGEST_SIZE;
+ req->block_sz = SM3_BLOCK_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sm3_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sm3_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sm3 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SM3,
+ .alg.ahash = {
+ .init = safexcel_sm3_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sm3_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "safexcel-sm3",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3",
+ SM3_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sm3_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Start from ipad precompute */
+ memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
+ /* Already processed the key^ipad part now! */
+ req->len = SM3_BLOCK_SIZE;
+ req->processed = SM3_BLOCK_SIZE;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SM3_DIGEST_SIZE;
+ req->digest_sz = SM3_DIGEST_SIZE;
+ req->block_sz = SM3_BLOCK_SIZE;
+ req->hmac = true;
+
+ return 0;
+}
+
+static int safexcel_hmac_sm3_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sm3_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SM3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sm3_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sm3_digest,
+ .setkey = safexcel_hmac_sm3_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "safexcel-hmac-sm3",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_224_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_224_DIGEST_SIZE;
+ req->digest_sz = SHA3_224_DIGEST_SIZE;
+ req->block_sz = SHA3_224_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_fbcheck(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+ int ret = 0;
+
+ if (ctx->do_fallback) {
+ ahash_request_set_tfm(subreq, ctx->fback);
+ ahash_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(subreq, req->src, req->result,
+ req->nbytes);
+ if (!ctx->fb_init_done) {
+ if (ctx->fb_do_setkey) {
+ /* Set fallback cipher HMAC key */
+ u8 key[SHA3_224_BLOCK_SIZE];
+
+ memcpy(key, ctx->ipad,
+ crypto_ahash_blocksize(ctx->fback) / 2);
+ memcpy(key +
+ crypto_ahash_blocksize(ctx->fback) / 2,
+ ctx->opad,
+ crypto_ahash_blocksize(ctx->fback) / 2);
+ ret = crypto_ahash_setkey(ctx->fback, key,
+ crypto_ahash_blocksize(ctx->fback));
+ memzero_explicit(key,
+ crypto_ahash_blocksize(ctx->fback));
+ ctx->fb_do_setkey = false;
+ }
+ ret = ret ?: crypto_ahash_init(subreq);
+ ctx->fb_init_done = true;
+ }
+ }
+ return ret;
+}
+
+static int safexcel_sha3_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
+}
+
+static int safexcel_sha3_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
+}
+
+static int safexcel_sha3_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback |= !req->nbytes;
+ if (ctx->do_fallback)
+ /* Update or ex/import happened or len 0, cannot use the HW */
+ return safexcel_sha3_fbcheck(req) ?:
+ crypto_ahash_finup(subreq);
+ else
+ return safexcel_ahash_finup(req);
+}
+
+static int safexcel_sha3_digest_fallback(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ ctx->fb_init_done = false;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq);
+}
+
+static int safexcel_sha3_224_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_sha3_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
+}
+
+static int safexcel_sha3_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = ahash_request_ctx(req);
+
+ ctx->do_fallback = true;
+ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
+ // return safexcel_ahash_import(req, in);
+}
+
+static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_ahash_cra_init(tfm);
+
+ /* Allocate fallback implementation */
+ ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fback))
+ return PTR_ERR(ctx->fback);
+
+ /* Update statesize from fallback algorithm! */
+ crypto_hash_alg_common(ahash)->statesize =
+ crypto_ahash_statesize(ctx->fback);
+ crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ctx->fback)));
+ return 0;
+}
+
+static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->fback);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_224_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_224_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "safexcel-sha3-224",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_256_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_256_DIGEST_SIZE;
+ req->digest_sz = SHA3_256_DIGEST_SIZE;
+ req->block_sz = SHA3_256_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_256_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_256_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_256_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "safexcel-sha3-256",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_384_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_384_DIGEST_SIZE;
+ req->digest_sz = SHA3_384_DIGEST_SIZE;
+ req->block_sz = SHA3_384_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_384_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_384_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_384_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "safexcel-sha3-384",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha3_512_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
+ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
+ req->state_sz = SHA3_512_DIGEST_SIZE;
+ req->digest_sz = SHA3_512_DIGEST_SIZE;
+ req->block_sz = SHA3_512_BLOCK_SIZE;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_sha3_512_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length hash, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+struct safexcel_alg_template safexcel_alg_sha3_512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_sha3_512_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_sha3_512_digest,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "safexcel-sha3-512",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_sha3_cra_init,
+ .cra_exit = safexcel_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = safexcel_sha3_cra_init(tfm);
+ if (ret)
+ return ret;
+
+ /* Allocate precalc basic digest implementation */
+ ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shpre))
+ return PTR_ERR(ctx->shpre);
+
+ ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) +
+ crypto_shash_descsize(ctx->shpre), GFP_KERNEL);
+ if (!ctx->shdesc) {
+ crypto_free_shash(ctx->shpre);
+ return -ENOMEM;
+ }
+ ctx->shdesc->tfm = ctx->shpre;
+ return 0;
+}
+
+static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->fback);
+ crypto_free_shash(ctx->shpre);
+ kfree(ctx->shdesc);
+ safexcel_ahash_cra_exit(tfm);
+}
+
+static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret = 0;
+
+ if (keylen > crypto_ahash_blocksize(tfm)) {
+ /*
+ * If the key is larger than the blocksize, then hash it
+ * first using our fallback cipher
+ */
+ ret = crypto_shash_digest(ctx->shdesc, key, keylen,
+ (u8 *)ctx->ipad);
+ keylen = crypto_shash_digestsize(ctx->shpre);
+
+ /*
+ * If the digest is larger than half the blocksize, we need to
+ * move the rest to opad due to the way our HMAC infra works.
+ */
+ if (keylen > crypto_ahash_blocksize(tfm) / 2)
+ /* Buffers overlap, need to use memmove iso memcpy! */
+ memmove(ctx->opad,
+ (u8 *)ctx->ipad +
+ crypto_ahash_blocksize(tfm) / 2,
+ keylen - crypto_ahash_blocksize(tfm) / 2);
+ } else {
+ /*
+ * Copy the key to our ipad & opad buffers
+ * Note that ipad and opad each contain one half of the key,
+ * to match the existing HMAC driver infrastructure.
+ */
+ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
+ memcpy(ctx->ipad, key, keylen);
+ } else {
+ memcpy(ctx->ipad, key,
+ crypto_ahash_blocksize(tfm) / 2);
+ memcpy(ctx->opad,
+ key + crypto_ahash_blocksize(tfm) / 2,
+ keylen - crypto_ahash_blocksize(tfm) / 2);
+ }
+ }
+
+ /* Pad key with zeroes */
+ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
+ memset((u8 *)ctx->ipad + keylen, 0,
+ crypto_ahash_blocksize(tfm) / 2 - keylen);
+ memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
+ } else {
+ memset((u8 *)ctx->opad + keylen -
+ crypto_ahash_blocksize(tfm) / 2, 0,
+ crypto_ahash_blocksize(tfm) - keylen);
+ }
+
+ /* If doing fallback, still need to set the new key! */
+ ctx->fb_do_setkey = true;
+ return ret;
+}
+
+static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_224_BLOCK_SIZE;
+ req->processed = SHA3_224_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_224_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_224_DIGEST_SIZE;
+ req->block_sz = SHA3_224_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_224_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_224_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-224");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_224_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_224_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-224)",
+ .cra_driver_name = "safexcel-hmac-sha3-224",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_224_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_256_BLOCK_SIZE;
+ req->processed = SHA3_256_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_256_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_256_DIGEST_SIZE;
+ req->block_sz = SHA3_256_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_256_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_256_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-256");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_256_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_256_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-256)",
+ .cra_driver_name = "safexcel-hmac-sha3-256",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_256_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_384_BLOCK_SIZE;
+ req->processed = SHA3_384_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_384_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_384_DIGEST_SIZE;
+ req->block_sz = SHA3_384_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_384_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_384_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-384");
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_384_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_384_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-384)",
+ .cra_driver_name = "safexcel-hmac-sha3-384",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_384_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ /* Copy (half of) the key */
+ memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
+ /* Start of HMAC should have len == processed == blocksize */
+ req->len = SHA3_512_BLOCK_SIZE;
+ req->processed = SHA3_512_BLOCK_SIZE;
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->state_sz = SHA3_512_BLOCK_SIZE / 2;
+ req->digest_sz = SHA3_512_DIGEST_SIZE;
+ req->block_sz = SHA3_512_BLOCK_SIZE;
+ req->hmac = true;
+ ctx->do_fallback = false;
+ ctx->fb_init_done = false;
+ return 0;
+}
+
+static int safexcel_hmac_sha3_512_digest(struct ahash_request *req)
+{
+ if (req->nbytes)
+ return safexcel_hmac_sha3_512_init(req) ?:
+ safexcel_ahash_finup(req);
+
+ /* HW cannot do zero length HMAC, use fallback instead */
+ return safexcel_sha3_digest_fallback(req);
+}
+
+static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm)
+{
+ return safexcel_hmac_sha3_cra_init(tfm, "sha3-512");
+}
+struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .algo_mask = SAFEXCEL_ALG_SHA3,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha3_512_init,
+ .update = safexcel_sha3_update,
+ .final = safexcel_sha3_final,
+ .finup = safexcel_sha3_finup,
+ .digest = safexcel_hmac_sha3_512_digest,
+ .setkey = safexcel_hmac_sha3_setkey,
+ .export = safexcel_sha3_export,
+ .import = safexcel_sha3_import,
+ .halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha3-512)",
+ .cra_driver_name = "safexcel-hmac-sha3-512",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_hmac_sha3_512_cra_init,
+ .cra_exit = safexcel_hmac_sha3_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 0f269b89cfd4..9237ba745c2f 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,7 +14,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
- cdr->offset = sizeof(u32) * priv->config.cd_offset;
+ cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
&cdr->base_dma, GFP_KERNEL);
@@ -24,7 +24,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
- rdr->offset = sizeof(u32) * priv->config.rd_offset;
+ rdr->offset = priv->config.rd_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -180,6 +180,7 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->first_seg = first;
rdesc->last_seg = last;
+ rdesc->result_size = EIP197_RD64_RESULT_SIZE;
rdesc->particle_size = len;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9181523ba760..391e3b4df364 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -23,6 +23,7 @@
#include <crypto/sha.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -137,7 +138,7 @@ struct crypt_ctl {
/* Used by Host: 4*4 bytes*/
unsigned ctl_flags;
union {
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
@@ -186,7 +187,7 @@ struct ixp_ctx {
};
struct ixp_alg {
- struct crypto_alg crypto;
+ struct skcipher_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
@@ -239,17 +240,17 @@ static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
+ return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
+ return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
}
static int setup_crypt_desc(void)
@@ -378,8 +379,8 @@ static void one_packet(dma_addr_t phys)
break;
}
case CTL_FLAG_PERFORM_ABLK: {
- struct ablkcipher_request *req = crypt->data.ablk_req;
- struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = crypt->data.ablk_req;
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
if (req_ctx->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
@@ -571,10 +572,10 @@ static int init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static int init_tfm_ablk(struct crypto_tfm *tfm)
+static int init_tfm_ablk(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
- return init_tfm(tfm);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
+ return init_tfm(crypto_skcipher_tfm(tfm));
}
static int init_tfm_aead(struct crypto_aead *tfm)
@@ -590,6 +591,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
free_sa_dir(&ctx->decrypt);
}
+static void exit_tfm_ablk(struct crypto_skcipher *tfm)
+{
+ exit_tfm(crypto_skcipher_tfm(tfm));
+}
+
static void exit_tfm_aead(struct crypto_aead *tfm)
{
exit_tfm(crypto_aead_tfm(tfm));
@@ -809,10 +815,10 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
return buf;
}
-static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
int ret;
@@ -845,17 +851,17 @@ out:
return ret;
}
-static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- return verify_ablkcipher_des3_key(tfm, key) ?:
+ return verify_skcipher_des3_key(tfm, key) ?:
ablk_setkey(tfm, key, key_len);
}
-static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
@@ -868,16 +874,16 @@ static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ablk_setkey(tfm, key, key_len);
}
-static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+static int ablk_perform(struct skcipher_request *req, int encrypt)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
- unsigned int nbytes = req->nbytes;
+ unsigned int nbytes = req->cryptlen;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
- struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct buffer_desc src_hook;
struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
@@ -902,8 +908,8 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
- BUG_ON(ivsize && !req->info);
- memcpy(crypt->iv, req->info, ivsize);
+ BUG_ON(ivsize && !req->iv);
+ memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
@@ -941,22 +947,22 @@ free_buf_dest:
return -ENOMEM;
}
-static int ablk_encrypt(struct ablkcipher_request *req)
+static int ablk_encrypt(struct skcipher_request *req)
{
return ablk_perform(req, 1);
}
-static int ablk_decrypt(struct ablkcipher_request *req)
+static int ablk_decrypt(struct skcipher_request *req)
{
return ablk_perform(req, 0);
}
-static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
+static int ablk_rfc3686_crypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
- u8 *info = req->info;
+ u8 *info = req->iv;
int ret;
/* set up counter block */
@@ -967,9 +973,9 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- req->info = iv;
+ req->iv = iv;
ret = ablk_perform(req, 1);
- req->info = info;
+ req->iv = info;
return ret;
}
@@ -1212,107 +1218,91 @@ static int aead_decrypt(struct aead_request *req)
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
- .cra_name = "cbc(des)",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "ecb(des)",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- }
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = ablk_des3_setkey,
- }
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = ablk_des3_setkey,
- }
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "cbc(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
- .cra_name = "ecb(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- }
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
- .cra_name = "ctr(aes)",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_rfc3686_setkey,
- .encrypt = ablk_rfc3686_crypt,
- .decrypt = ablk_rfc3686_crypt }
- }
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_rfc3686_setkey,
+ .encrypt = ablk_rfc3686_crypt,
+ .decrypt = ablk_rfc3686_crypt,
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
@@ -1421,10 +1411,10 @@ static int __init ixp_module_init(void)
return err;
}
for (i=0; i< num; i++) {
- struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
+ struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
- if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s"IXP_POSTFIX, cra->cra_name) >=
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
{
continue;
@@ -1434,26 +1424,24 @@ static int __init ixp_module_init(void)
}
/* block ciphers */
- cra->cra_type = &crypto_ablkcipher_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- if (!cra->cra_ablkcipher.setkey)
- cra->cra_ablkcipher.setkey = ablk_setkey;
- if (!cra->cra_ablkcipher.encrypt)
- cra->cra_ablkcipher.encrypt = ablk_encrypt;
- if (!cra->cra_ablkcipher.decrypt)
- cra->cra_ablkcipher.decrypt = ablk_decrypt;
- cra->cra_init = init_tfm_ablk;
-
- cra->cra_ctxsize = sizeof(struct ixp_ctx);
- cra->cra_module = THIS_MODULE;
- cra->cra_alignmask = 3;
- cra->cra_priority = 300;
- cra->cra_exit = exit_tfm;
- if (crypto_register_alg(cra))
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ if (!cra->setkey)
+ cra->setkey = ablk_setkey;
+ if (!cra->encrypt)
+ cra->encrypt = ablk_encrypt;
+ if (!cra->decrypt)
+ cra->decrypt = ablk_decrypt;
+ cra->init = init_tfm_ablk;
+ cra->exit = exit_tfm_ablk;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+ if (crypto_register_skcipher(cra))
printk(KERN_ERR "Failed to register '%s'\n",
- cra->cra_name);
+ cra->base.cra_name);
else
ixp4xx_algos[i].registered = 1;
}
@@ -1504,7 +1492,7 @@ static void __exit ixp_module_exit(void)
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
- crypto_unregister_alg(&ixp4xx_algos[i].crypto);
+ crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
platform_device_unregister(pdev);
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index d63a6ee905c9..f1ed3b85c0d2 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -232,13 +232,13 @@ struct mv_cesa_sec_accel_desc {
};
/**
- * struct mv_cesa_blkcipher_op_ctx - cipher operation context
+ * struct mv_cesa_skcipher_op_ctx - cipher operation context
* @key: cipher key
* @iv: cipher IV
*
* Context associated to a cipher operation.
*/
-struct mv_cesa_blkcipher_op_ctx {
+struct mv_cesa_skcipher_op_ctx {
u32 key[8];
u32 iv[4];
};
@@ -265,7 +265,7 @@ struct mv_cesa_hash_op_ctx {
struct mv_cesa_op_ctx {
struct mv_cesa_sec_accel_desc desc;
union {
- struct mv_cesa_blkcipher_op_ctx blkcipher;
+ struct mv_cesa_skcipher_op_ctx skcipher;
struct mv_cesa_hash_op_ctx hash;
} ctx;
};
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 84ceddfee76b..d8e8c857770c 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -209,7 +209,7 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
+ memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
} else {
memcpy_fromio(skreq->iv,
@@ -470,7 +470,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+ memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
@@ -523,7 +523,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
@@ -575,7 +575,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
+ memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
return mv_cesa_skcipher_queue_req(req, tmpl);
}
@@ -628,7 +628,7 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
@@ -694,7 +694,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
key = ctx->aes.key_enc;
for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
- tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
+ tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
if (ctx->aes.key_length == 24)
cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
@@ -755,7 +755,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90c9644fb8a8..90880a81c534 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -11,6 +11,7 @@
#include <crypto/aes.h>
#include <crypto/gcm.h>
+#include <crypto/internal/skcipher.h>
#include "mtk-platform.h"
#define AES_QUEUE_SIZE 512
@@ -414,7 +415,7 @@ exit:
static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
size_t len)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_info *info = &ctx->info;
u32 cnt = 0;
@@ -450,7 +451,7 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
return;
}
- mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
+ mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
AES_BLOCK_SIZE);
ctr:
info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
@@ -552,13 +553,13 @@ static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
+ struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
aes->resume = mtk_aes_transfer_complete;
- return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
+ return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
}
static inline struct mtk_aes_ctr_ctx *
@@ -571,7 +572,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
struct scatterlist *src, *dst;
u32 start, end, ctr, blocks;
size_t datalen;
@@ -579,11 +580,11 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Check for transfer completion. */
cctx->offset += aes->total;
- if (cctx->offset >= req->nbytes)
+ if (cctx->offset >= req->cryptlen)
return mtk_aes_transfer_complete(cryp, aes);
/* Compute data length. */
- datalen = req->nbytes - cctx->offset;
+ datalen = req->cryptlen - cctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(cctx->iv[3]);
@@ -591,7 +592,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
start = ctr;
end = start + blocks - 1;
if (end < start) {
- ctr |= 0xffffffff;
+ ctr = 0xffffffff;
datalen = AES_BLOCK_SIZE * -start;
fragmented = true;
}
@@ -620,12 +621,12 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
- struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(aes->areq);
+ struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
- memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
cctx->offset = 0;
aes->total = 0;
aes->resume = mtk_aes_ctr_transfer;
@@ -634,10 +635,10 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
}
/* Check and set the AES key to transform state buffer */
-static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
+static int mtk_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
- struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
switch (keylen) {
case AES_KEYSIZE_128:
@@ -651,7 +652,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
break;
default:
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -661,10 +662,10 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
return 0;
}
-static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
+static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct mtk_aes_reqctx *rctx;
struct mtk_cryp *cryp;
@@ -672,185 +673,168 @@ static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
if (!cryp)
return -ENODEV;
- rctx = ablkcipher_request_ctx(req);
+ rctx = skcipher_request_ctx(req);
rctx->mode = mode;
return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
&req->base);
}
-static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
}
-static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ECB);
}
-static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
}
-static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CBC);
}
-static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
}
-static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CTR);
}
-static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
}
-static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_OFB);
}
-static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
}
-static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CFB128);
}
-static int mtk_aes_cra_init(struct crypto_tfm *tfm)
+static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_start;
return 0;
}
-static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
+static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
- struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_ctr_start;
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cbc_encrypt,
- .decrypt = mtk_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cbc_encrypt,
+ .decrypt = mtk_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mtk_aes_init_tfm,
},
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ecb_encrypt,
- .decrypt = mtk_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ecb_encrypt,
+ .decrypt = mtk_aes_ecb_decrypt,
+ .init = mtk_aes_init_tfm,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_ctr_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ctr_encrypt,
- .decrypt = mtk_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ctr_encrypt,
+ .decrypt = mtk_aes_ctr_decrypt,
+ .init = mtk_aes_ctr_init_tfm,
},
{
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ofb_encrypt,
- .decrypt = mtk_aes_ofb_decrypt,
- }
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ofb_encrypt,
+ .decrypt = mtk_aes_ofb_decrypt,
},
{
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cfb_encrypt,
- .decrypt = mtk_aes_cfb_decrypt,
- }
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cfb-aes-mtk",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cfb_encrypt,
+ .decrypt = mtk_aes_cfb_decrypt,
},
};
@@ -1259,7 +1243,7 @@ static void mtk_aes_unregister_algs(void)
crypto_unregister_aead(&aes_gcm_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
}
static int mtk_aes_register_algs(void)
@@ -1267,7 +1251,7 @@ static int mtk_aes_register_algs(void)
int err, i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1280,7 +1264,7 @@ static int mtk_aes_register_algs(void)
err_aes_algs:
for (; i--; )
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
return err;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index bf8d2197bc11..f438b425c655 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -211,11 +211,11 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
* Encryption (AES128)
*/
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
- struct ablkcipher_request *req, int init)
+ struct skcipher_request *req, int init)
{
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
@@ -274,9 +274,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
{
struct dcp *sdcp = global_sdcp;
- struct ablkcipher_request *req = ablkcipher_request_cast(arq);
+ struct skcipher_request *req = skcipher_request_cast(arq);
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
@@ -305,7 +305,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
if (!rctx->ecb) {
/* Copy the CBC IV just past the key. */
- memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
+ memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
/* CBC needs the INIT set. */
init = 1;
} else {
@@ -316,10 +316,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
- limit_hit = tlen > req->nbytes;
+ limit_hit = tlen > req->cryptlen;
if (limit_hit)
- len = req->nbytes - (tlen - len);
+ len = req->cryptlen - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -375,10 +375,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
/* Copy the IV for CBC for chaining */
if (!rctx->ecb) {
if (rctx->enc)
- memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
else
- memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
}
@@ -422,17 +422,17 @@ static int dcp_chan_thread_aes(void *data)
return 0;
}
-static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
+static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (enc)
ret = crypto_skcipher_encrypt(subreq);
@@ -444,12 +444,12 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
return ret;
}
-static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
+static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
{
struct dcp *sdcp = global_sdcp;
struct crypto_async_request *arq = &req->base;
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
if (unlikely(actx->key_len != AES_KEYSIZE_128))
@@ -468,30 +468,30 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
return ret;
}
-static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 0, 1);
}
-static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 1, 1);
}
-static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 0, 0);
}
-static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
{
return mxs_dcp_aes_enqueue(req, 1, 0);
}
-static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
- struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
unsigned int ret;
/*
@@ -525,10 +525,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret;
}
-static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
+static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -536,13 +536,13 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
return PTR_ERR(blk);
actx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
return 0;
}
-static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
+static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
{
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(actx->fallback);
}
@@ -854,54 +854,44 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
}
/* AES 128 ECB and AES 128 CBC */
-static struct crypto_alg dcp_aes_algs[] = {
+static struct skcipher_alg dcp_aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-dcp",
- .cra_priority = 400,
- .cra_alignmask = 15,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-dcp",
+ .base.cra_priority = 400,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = mxs_dcp_aes_fallback_init,
- .cra_exit = mxs_dcp_aes_fallback_exit,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mxs_dcp_aes_setkey,
- .encrypt = mxs_dcp_aes_ecb_encrypt,
- .decrypt = mxs_dcp_aes_ecb_decrypt
- },
- },
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
+ .init = mxs_dcp_aes_fallback_init_tfm,
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-dcp",
- .cra_priority = 400,
- .cra_alignmask = 15,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-dcp",
+ .base.cra_priority = 400,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .cra_init = mxs_dcp_aes_fallback_init,
- .cra_exit = mxs_dcp_aes_fallback_exit,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mxs_dcp_aes_setkey,
- .encrypt = mxs_dcp_aes_cbc_encrypt,
- .decrypt = mxs_dcp_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mxs_dcp_aes_fallback_init_tfm,
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
},
};
@@ -1104,8 +1094,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
- ret = crypto_register_algs(dcp_aes_algs,
- ARRAY_SIZE(dcp_aes_algs));
+ ret = crypto_register_skciphers(dcp_aes_algs,
+ ARRAY_SIZE(dcp_aes_algs));
if (ret) {
/* Failed to register algorithm. */
dev_err(dev, "Failed to register AES crypto!\n");
@@ -1139,7 +1129,7 @@ err_unregister_sha1:
err_unregister_aes:
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
err_destroy_aes_thread:
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
@@ -1164,7 +1154,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
crypto_unregister_ahash(&dcp_sha1_alg);
if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index dc15b06e96ab..63bd565048f4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
@@ -381,8 +382,8 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm)
fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
- pr_warning("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
+ pr_warn("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
err = PTR_ERR(fallback_tfm);
goto out;
}
@@ -418,16 +419,16 @@ static int n2_hmac_cra_init(struct crypto_tfm *tfm)
fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
- pr_warning("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
+ pr_warn("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
err = PTR_ERR(fallback_tfm);
goto out;
}
child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
if (IS_ERR(child_shash)) {
- pr_warning("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
+ pr_warn("Child shash '%s' could not be loaded!\n",
+ n2alg->child_alg);
err = PTR_ERR(child_shash);
goto out_free_fallback;
}
@@ -657,7 +658,7 @@ static int n2_hmac_async_digest(struct ahash_request *req)
ctx->hash_key_len);
}
-struct n2_cipher_context {
+struct n2_skcipher_context {
int key_len;
int enc_type;
union {
@@ -683,7 +684,7 @@ struct n2_crypto_chunk {
};
struct n2_request_context {
- struct ablkcipher_walk walk;
+ struct skcipher_walk walk;
struct list_head chunk_list;
struct n2_crypto_chunk chunk;
u8 temp_iv[16];
@@ -708,29 +709,29 @@ struct n2_request_context {
* is not a valid sequence.
*/
-struct n2_cipher_alg {
+struct n2_skcipher_alg {
struct list_head entry;
u8 enc_type;
- struct crypto_alg alg;
+ struct skcipher_alg skcipher;
};
-static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- return container_of(alg, struct n2_cipher_alg, alg);
+ return container_of(alg, struct n2_skcipher_alg, skcipher);
}
-struct n2_cipher_request_context {
- struct ablkcipher_walk walk;
+struct n2_skcipher_request_context {
+ struct skcipher_walk walk;
};
-static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
@@ -745,7 +746,7 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -754,15 +755,15 @@ static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(skcipher, key);
if (err)
return err;
@@ -773,15 +774,15 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(skcipher, key);
if (err)
return err;
@@ -792,12 +793,12 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
u8 *s = ctx->key.arc4;
u8 *x = s + 256;
u8 *y = x + 1;
@@ -822,7 +823,7 @@ static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
{
int this_len = nbytes;
@@ -830,10 +831,11 @@ static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
return this_len > (1 << 16) ? (1 << 16) : this_len;
}
-static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
+ struct n2_crypto_chunk *cp,
struct spu_queue *qp, bool encrypt)
{
- struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
struct cwq_initial_entry *ent;
bool in_place;
int i;
@@ -877,18 +879,17 @@ static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
}
-static int n2_compute_chunks(struct ablkcipher_request *req)
+static int n2_compute_chunks(struct skcipher_request *req)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct ablkcipher_walk *walk = &rctx->walk;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct skcipher_walk *walk = &rctx->walk;
struct n2_crypto_chunk *chunk;
unsigned long dest_prev;
unsigned int tot_len;
bool prev_in_place;
int err, nbytes;
- ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
- err = ablkcipher_walk_phys(req, walk);
+ err = skcipher_walk_async(walk, req);
if (err)
return err;
@@ -910,12 +911,12 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
bool in_place;
int this_len;
- src_paddr = (page_to_phys(walk->src.page) +
- walk->src.offset);
- dest_paddr = (page_to_phys(walk->dst.page) +
- walk->dst.offset);
+ src_paddr = (page_to_phys(walk->src.phys.page) +
+ walk->src.phys.offset);
+ dest_paddr = (page_to_phys(walk->dst.phys.page) +
+ walk->dst.phys.offset);
in_place = (src_paddr == dest_paddr);
- this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+ this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
if (chunk->arr_len != 0) {
if (in_place != prev_in_place ||
@@ -946,7 +947,7 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
prev_in_place = in_place;
tot_len += this_len;
- err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+ err = skcipher_walk_done(walk, nbytes - this_len);
if (err)
break;
}
@@ -958,15 +959,14 @@ static int n2_compute_chunks(struct ablkcipher_request *req)
return err;
}
-static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
struct n2_crypto_chunk *c, *tmp;
if (final_iv)
memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
- ablkcipher_walk_complete(&rctx->walk);
list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
list_del(&c->entry);
if (unlikely(c != &rctx->chunk))
@@ -975,10 +975,10 @@ static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
}
-static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct crypto_tfm *tfm = req->base.tfm;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
int err = n2_compute_chunks(req);
struct n2_crypto_chunk *c, *tmp;
unsigned long flags, hv_ret;
@@ -1017,20 +1017,20 @@ out:
return err;
}
-static int n2_encrypt_ecb(struct ablkcipher_request *req)
+static int n2_encrypt_ecb(struct skcipher_request *req)
{
return n2_do_ecb(req, true);
}
-static int n2_decrypt_ecb(struct ablkcipher_request *req)
+static int n2_decrypt_ecb(struct skcipher_request *req)
{
return n2_do_ecb(req, false);
}
-static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
{
- struct n2_request_context *rctx = ablkcipher_request_ctx(req);
- struct crypto_tfm *tfm = req->base.tfm;
+ struct n2_request_context *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned long flags, hv_ret, iv_paddr;
int err = n2_compute_chunks(req);
struct n2_crypto_chunk *c, *tmp;
@@ -1107,32 +1107,32 @@ out:
return err;
}
-static int n2_encrypt_chaining(struct ablkcipher_request *req)
+static int n2_encrypt_chaining(struct skcipher_request *req)
{
return n2_do_chaining(req, true);
}
-static int n2_decrypt_chaining(struct ablkcipher_request *req)
+static int n2_decrypt_chaining(struct skcipher_request *req)
{
return n2_do_chaining(req, false);
}
-struct n2_cipher_tmpl {
+struct n2_skcipher_tmpl {
const char *name;
const char *drv_name;
u8 block_size;
u8 enc_type;
- struct ablkcipher_alg ablkcipher;
+ struct skcipher_alg skcipher;
};
-static const struct n2_cipher_tmpl cipher_tmpls[] = {
+static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
/* ARC4: only ECB is supported (chaining bits ignored) */
{ .name = "ecb(arc4)",
.drv_name = "ecb-arc4",
.block_size = 1,
.enc_type = (ENC_TYPE_ALG_RC4_STREAM |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 1,
.max_keysize = 256,
.setkey = n2_arc4_setkey,
@@ -1147,7 +1147,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = n2_des_setkey,
@@ -1160,7 +1160,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1174,7 +1174,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CFB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = n2_des_setkey,
@@ -1189,7 +1189,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = n2_3des_setkey,
@@ -1202,7 +1202,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
@@ -1216,7 +1216,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = DES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CFB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = n2_3des_setkey,
@@ -1230,7 +1230,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_ECB),
- .ablkcipher = {
+ .skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = n2_aes_setkey,
@@ -1243,7 +1243,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_CBC),
- .ablkcipher = {
+ .skcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1257,7 +1257,7 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
.block_size = AES_BLOCK_SIZE,
.enc_type = (ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_COUNTER),
- .ablkcipher = {
+ .skcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1268,9 +1268,9 @@ static const struct n2_cipher_tmpl cipher_tmpls[] = {
},
};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-static LIST_HEAD(cipher_algs);
+static LIST_HEAD(skcipher_algs);
struct n2_hash_tmpl {
const char *name;
@@ -1344,14 +1344,14 @@ static int algs_registered;
static void __n2_unregister_algs(void)
{
- struct n2_cipher_alg *cipher, *cipher_tmp;
+ struct n2_skcipher_alg *skcipher, *skcipher_tmp;
struct n2_ahash_alg *alg, *alg_tmp;
struct n2_hmac_alg *hmac, *hmac_tmp;
- list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
- crypto_unregister_alg(&cipher->alg);
- list_del(&cipher->entry);
- kfree(cipher);
+ list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&skcipher->skcipher);
+ list_del(&skcipher->entry);
+ kfree(skcipher);
}
list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
crypto_unregister_ahash(&hmac->derived.alg);
@@ -1365,44 +1365,42 @@ static void __n2_unregister_algs(void)
}
}
-static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
return 0;
}
-static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
{
- struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct crypto_alg *alg;
+ struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct skcipher_alg *alg;
int err;
if (!p)
return -ENOMEM;
- alg = &p->alg;
+ alg = &p->skcipher;
+ *alg = tmpl->skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->cra_priority = N2_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
- alg->cra_blocksize = tmpl->block_size;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+ alg->base.cra_priority = N2_CRA_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->base.cra_blocksize = tmpl->block_size;
p->enc_type = tmpl->enc_type;
- alg->cra_ctxsize = sizeof(struct n2_cipher_context);
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_u.ablkcipher = tmpl->ablkcipher;
- alg->cra_init = n2_cipher_cra_init;
- alg->cra_module = THIS_MODULE;
-
- list_add(&p->entry, &cipher_algs);
- err = crypto_register_alg(alg);
+ alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
+ alg->base.cra_module = THIS_MODULE;
+ alg->init = n2_skcipher_init_tfm;
+
+ list_add(&p->entry, &skcipher_algs);
+ err = crypto_register_skcipher(alg);
if (err) {
- pr_err("%s alg registration failed\n", alg->cra_name);
+ pr_err("%s alg registration failed\n", alg->base.cra_name);
list_del(&p->entry);
kfree(p);
} else {
- pr_info("%s alg registered\n", alg->cra_name);
+ pr_info("%s alg registered\n", alg->base.cra_name);
}
return err;
}
@@ -1517,7 +1515,7 @@ static int n2_register_algs(void)
}
}
for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_cipher(&cipher_tmpls[i]);
+ err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
if (err) {
__n2_unregister_algs();
goto out;
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index e631f9979127..92e921eceed7 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int cbc_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int cbc_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,11 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_cbc.iv);
+ rc = nx_build_sg_lists(nx_ctx, req->iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
@@ -83,56 +82,46 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_encrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return cbc_aes_nx_crypt(req, 1);
}
-static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_decrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return cbc_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_cbc_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_aes_nx_set_key,
- .encrypt = cbc_aes_nx_encrypt,
- .decrypt = cbc_aes_nx_decrypt,
- }
+struct skcipher_alg nx_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_cbc_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_nx_set_key,
+ .encrypt = cbc_aes_nx_encrypt,
+ .decrypt = cbc_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5be8f01c5da8..4c9362eebefd 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -405,7 +405,7 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -481,67 +481,50 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc, req->assoclen);
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc, req->assoclen);
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_ccm_aes_alg = {
.base = {
.cra_name = "ccm(aes)",
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 191e226a11a1..6d5ce1a66f1e 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -19,11 +19,11 @@
#include "nx.h"
-static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -51,11 +51,11 @@ static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr3686_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -69,12 +69,10 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
return ctr_aes_nx_set_key(tfm, in_key, key_len);
}
-static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -83,10 +81,11 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_ctr.iv);
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
@@ -96,59 +95,51 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
u8 iv[16];
memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
- memcpy(iv + CTR_RFC3686_NONCE_SIZE,
- desc->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = iv;
-
- return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+ return ctr_aes_nx_crypt(req, iv);
}
-struct crypto_alg nx_ctr3686_aes_alg = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ctr_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ctr3686_aes_nx_set_key,
- .encrypt = ctr3686_aes_nx_crypt,
- .decrypt = ctr3686_aes_nx_crypt,
- }
+struct skcipher_alg nx_ctr3686_aes_alg = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ctr_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ctr3686_aes_nx_set_key,
+ .encrypt = ctr3686_aes_nx_crypt,
+ .decrypt = ctr3686_aes_nx_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index c67570470c9d..77e338dc33f1 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ecb_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int ecb_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,10 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, NULL);
+ rc = nx_build_sg_lists(nx_ctx, NULL, req->dst, req->src,
+ &to_process, processed, NULL);
if (rc)
goto out;
@@ -83,7 +81,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
@@ -92,46 +90,36 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_encrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return ecb_aes_nx_crypt(req, 1);
}
-static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_decrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return ecb_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 0xf,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ecb_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ecb_aes_nx_set_key,
- .encrypt = ecb_aes_nx_encrypt,
- .decrypt = ecb_aes_nx_decrypt,
- }
+struct skcipher_alg nx_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_alignmask = 0xf,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ecb_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_nx_set_key,
+ .encrypt = ecb_aes_nx_encrypt,
+ .decrypt = ecb_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 7d3d67871270..19c6ed5baea4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc;
}
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
- unsigned int assoclen)
+static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */
- memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
do {
/*
@@ -240,8 +239,7 @@ out:
return rc;
}
-static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
- int enc)
+static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
len = AES_BLOCK_SIZE;
/* Encrypt the counter/IV */
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
&len, nx_ctx->ap->sglen);
if (len != AES_BLOCK_SIZE)
@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process;
unsigned long irq_flags;
@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = rctx->iv;
/* initialize the counter */
- *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+ *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
if (nbytes == 0) {
if (assoclen == 0)
- rc = gcm_empty(req, &desc, enc);
+ rc = gcm_empty(req, rctx->iv, enc);
else
- rc = gmac(req, &desc, assoclen);
+ rc = gmac(req, rctx->iv, assoclen);
if (rc)
goto out;
else
@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
- rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
+ rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
req->src, &to_process,
processed + req->assoclen,
csbcpb->cpb.aes_gcm.iv_or_cnt);
@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
if (rc)
goto out;
- memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_s0,
@@ -471,11 +467,6 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_gcm_aes_alg = {
.base = {
.cra_name = "gcm(aes)",
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 28817880c76d..f03c238f5a31 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -243,25 +243,25 @@ static long int trim_sg_list(struct nx_sg *sg,
* scatterlists based on them.
*
* @nx_ctx: NX crypto context for the lists we're building
- * @desc: the block cipher descriptor for the operation
+ * @iv: iv data, if the algorithm requires it
* @dst: destination scatterlist
* @src: source scatterlist
* @nbytes: length of data described in the scatterlists
* @offset: number of bytes to fast-forward past at the beginning of
* scatterlists.
- * @iv: destination for the iv data, if the algorithm requires it
+ * @oiv: destination for the iv data, if the algorithm requires it
*
- * This is common code shared by all the AES algorithms. It uses the block
- * cipher walk routines to traverse input and output scatterlists, building
+ * This is common code shared by all the AES algorithms. It uses the crypto
+ * scatterlist walk routines to traverse input and output scatterlists, building
* corresponding NX scatterlists
*/
int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
- struct blkcipher_desc *desc,
+ const u8 *iv,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int *nbytes,
unsigned int offset,
- u8 *iv)
+ u8 *oiv)
{
unsigned int delta = 0;
unsigned int total = *nbytes;
@@ -274,8 +274,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
- if (iv)
- memcpy(iv, desc->info, AES_BLOCK_SIZE);
+ if (oiv)
+ memcpy(oiv, iv, AES_BLOCK_SIZE);
*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
@@ -511,10 +511,10 @@ static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
return true;
}
-static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
- crypto_register_alg(alg) : 0;
+ crypto_register_skcipher(alg) : 0;
}
static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -531,10 +531,10 @@ static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
crypto_register_shash(alg) : 0;
}
-static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
if (nx_check_props(NULL, fc, mode))
- crypto_unregister_alg(alg);
+ crypto_unregister_skcipher(alg);
}
static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -573,15 +573,16 @@ static int nx_register_algs(void)
nx_driver.of.status = NX_OKAY;
- rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
if (rc)
goto out;
- rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
if (rc)
goto out_unreg_ecb;
- rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CTR);
if (rc)
goto out_unreg_cbc;
@@ -633,11 +634,11 @@ out_unreg_gcm4106:
out_unreg_gcm:
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_ctr3686:
- nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
out_unreg_cbc:
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
out_unreg_ecb:
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
out:
return rc;
}
@@ -704,21 +705,21 @@ int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
NX_MODE_AES_GCM);
}
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CTR);
}
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CBC);
}
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_ECB);
}
@@ -752,6 +753,11 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
nx_ctx->out_sg = NULL;
}
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
+}
+
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
{
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
@@ -798,10 +804,12 @@ static int nx_remove(struct vio_dev *viodev)
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_aead(&nx_gcm_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
- nx_unregister_alg(&nx_ctr3686_aes_alg,
- NX_FC_AES, NX_MODE_AES_CTR);
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
+ NX_MODE_AES_ECB);
}
return 0;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 7ecca168f8c4..91c54289124a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -145,19 +145,20 @@ struct crypto_aead;
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
-int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- struct scatterlist *, struct scatterlist *, unsigned int *,
- unsigned int, u8 *);
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int *nbytes, unsigned int offset, u8 *oiv);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
unsigned int *);
@@ -175,11 +176,11 @@ void nx_debugfs_fini(struct nx_crypto_driver *);
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
-extern struct crypto_alg nx_cbc_aes_alg;
-extern struct crypto_alg nx_ecb_aes_alg;
+extern struct skcipher_alg nx_cbc_aes_alg;
+extern struct skcipher_alg nx_ecb_aes_alg;
extern struct aead_alg nx_gcm_aes_alg;
extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr3686_aes_alg;
+extern struct skcipher_alg nx_ctr3686_aes_alg;
extern struct aead_alg nx_ccm_aes_alg;
extern struct aead_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index e0d44a5512ab..1975bcbee997 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -38,23 +38,23 @@ void nx_debugfs_init(struct nx_crypto_driver *drv)
drv->dfs_root = root;
debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.aes_ops);
+ root, &drv->stats.aes_ops.counter);
debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha256_ops);
+ root, &drv->stats.sha256_ops.counter);
debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha512_ops);
+ root, &drv->stats.sha512_ops.counter);
debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.aes_bytes);
+ root, &drv->stats.aes_bytes.counter);
debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha256_bytes);
+ root, &drv->stats.sha256_bytes.counter);
debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha512_bytes);
+ root, &drv->stats.sha512_bytes.counter);
debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.errors);
+ root, &drv->stats.errors.counter);
debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error);
+ root, &drv->stats.last_error.counter);
debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error_pid);
+ root, &drv->stats.last_error_pid.counter);
}
void
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 2f53fbb74100..a1fc03ed01f3 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -142,8 +142,8 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
- omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4);
if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
rctx = aead_request_ctx(dd->aead_req);
@@ -382,11 +382,11 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -403,10 +403,10 @@ int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -414,10 +414,10 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
static int omap_aes_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
int ret;
u16 flags;
@@ -427,8 +427,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -469,8 +469,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
if (!dd)
@@ -505,26 +505,26 @@ static void omap_aes_done_task(unsigned long data)
pr_debug("exit\n");
}
-static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd;
int ret;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (req->nbytes < aes_fallback_sz) {
+ if (req->cryptlen < aes_fallback_sz) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
ret = crypto_skcipher_encrypt(subreq);
@@ -545,10 +545,10 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -571,32 +571,32 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, 0);
}
-static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CBC);
}
-static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
}
-static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CTR);
}
@@ -606,10 +606,10 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *req);
-static int omap_aes_cra_init(struct crypto_tfm *tfm)
+static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -618,7 +618,7 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
ctx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -657,9 +657,9 @@ static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
return 0;
}
-static void omap_aes_cra_exit(struct crypto_tfm *tfm)
+static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
crypto_free_sync_skcipher(ctx->fallback);
@@ -671,7 +671,10 @@ static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
{
struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- omap_aes_cra_exit(crypto_aead_tfm(tfm));
+ if (ctx->fallback)
+ crypto_free_sync_skcipher(ctx->fallback);
+
+ ctx->fallback = NULL;
if (ctx->ctr)
crypto_free_skcipher(ctx->ctr);
@@ -679,78 +682,69 @@ static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ecb_encrypt,
- .decrypt = omap_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ecb_encrypt,
+ .decrypt = omap_aes_ecb_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_cbc_encrypt,
- .decrypt = omap_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_cbc_encrypt,
+ .decrypt = omap_aes_cbc_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
}
};
-static struct crypto_alg algs_ctr[] = {
+static struct skcipher_alg algs_ctr[] = {
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ctr_encrypt,
- .decrypt = omap_aes_ctr_decrypt,
- }
-} ,
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+}
};
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
@@ -1121,7 +1115,7 @@ static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct aead_alg *aalg;
struct resource res;
int err = -ENOMEM, i, j, irq = -1;
@@ -1215,9 +1209,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1230,9 +1224,8 @@ static int omap_aes_probe(struct platform_device *pdev)
!dd->pdata->aead_algs_info->registered) {
for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- algp = &aalg->base;
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", aalg->base.cra_name);
err = crypto_register_aead(aalg);
if (err)
@@ -1257,7 +1250,7 @@ err_aead_algs:
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1290,7 +1283,7 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d4b1f87a1c9..2d3575231e31 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -112,7 +112,7 @@ struct omap_aes_reqctx {
#define OMAP_AES_CACHE_SIZE 0
struct omap_aes_algs_info {
- struct crypto_alg *algs_list;
+ struct skcipher_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -162,7 +162,7 @@ struct omap_aes_dev {
struct aead_queue aead_queue;
spinlock_t lock;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct aead_request *aead_req;
struct crypto_engine *engine;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index b19d7e5d55ec..4c4dbc2b377e 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
@@ -98,7 +99,7 @@ struct omap_des_reqctx {
#define OMAP_DES_CACHE_SIZE 0
struct omap_des_algs_info {
- struct crypto_alg *algs_list;
+ struct skcipher_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -139,7 +140,7 @@ struct omap_des_dev {
struct tasklet_struct done_task;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
@@ -261,8 +262,8 @@ static int omap_des_write_ctrl(struct omap_des_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & FLAGS_CBC) && dd->req->info)
- omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ omap_des_write_n(dd, DES_REG_IV(dd, 0), (void *)dd->req->iv, 2);
if (dd->flags & FLAGS_CBC)
val |= DES_REG_CTRL_CBC;
@@ -456,8 +457,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(
+ crypto_skcipher_reqtfm(dd->req));
int err;
pr_debug("total: %d\n", dd->total);
@@ -491,11 +492,11 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
static void omap_des_finish_req(struct omap_des_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -514,10 +515,10 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
}
static int omap_des_handle_queue(struct omap_des_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -525,9 +526,9 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
static int omap_des_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
struct omap_des_reqctx *rctx;
int ret;
@@ -538,8 +539,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -568,8 +569,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
if (dd->out_sg_len < 0)
return dd->out_sg_len;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
@@ -582,9 +583,9 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
static int omap_des_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
if (!dd)
@@ -619,18 +620,18 @@ static void omap_des_done_task(unsigned long data)
pr_debug("exit\n");
}
-static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_des_reqctx *rctx = skcipher_request_ctx(req);
struct omap_des_dev *dd;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
@@ -646,15 +647,15 @@ static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int omap_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug("enter, keylen: %d\n", keylen);
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -664,15 +665,15 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int omap_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug("enter, keylen: %d\n", keylen);
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -682,22 +683,22 @@ static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_des_ecb_encrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_des_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_des_ecb_decrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, 0);
}
-static int omap_des_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_des_cbc_encrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_des_cbc_decrypt(struct skcipher_request *req)
{
return omap_des_crypt(req, FLAGS_CBC);
}
@@ -707,13 +708,13 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
static int omap_des_crypt_req(struct crypto_engine *engine,
void *areq);
-static int omap_des_cra_init(struct crypto_tfm *tfm)
+static int omap_des_init_tfm(struct crypto_skcipher *tfm)
{
- struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm);
pr_debug("enter\n");
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx));
ctx->enginectx.op.prepare_request = omap_des_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -722,103 +723,78 @@ static int omap_des_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void omap_des_cra_exit(struct crypto_tfm *tfm)
-{
- pr_debug("enter\n");
-}
-
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- }
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- }
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- }
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-omap",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-omap",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_des_cra_init,
- .cra_exit = omap_des_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = 3*DES_KEY_SIZE,
- .max_keysize = 3*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- }
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
}
};
@@ -976,7 +952,7 @@ static int omap_des_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_des_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct resource *res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -1071,9 +1047,9 @@ static int omap_des_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1086,7 +1062,7 @@ static int omap_des_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1119,7 +1095,7 @@ static int omap_des_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a0661250078..c5b60f50e1b5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/padlock.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -97,9 +98,9 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
return aes_ctx_common(crypto_tfm_ctx(tfm));
}
-static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
{
- return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+ return aes_ctx_common(crypto_skcipher_ctx(tfm));
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
@@ -162,6 +163,12 @@ ok:
return 0;
}
+static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
/* ====== Encryption/decryption routines ====== */
/* These are the real call to PadLock. */
@@ -338,25 +345,24 @@ static struct crypto_alg aes_alg = {
}
};
-static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -364,25 +370,24 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
return err;
}
-static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.decrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -390,48 +395,41 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
return err;
}
-static struct crypto_alg ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-padlock",
- .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_alignmask = PADLOCK_ALIGNMENT - 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = ecb_aes_encrypt,
- .decrypt = ecb_aes_decrypt,
- }
- }
+static struct skcipher_alg ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-padlock",
+ .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
};
-static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
walk.iv, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.decrypt);
@@ -439,25 +437,24 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return err;
}
-static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
padlock_reset_key(&ctx->cword.encrypt);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) != 0) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
padlock_store_cword(&ctx->cword.encrypt);
@@ -465,26 +462,20 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return err;
}
-static struct crypto_alg cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-padlock",
- .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_alignmask = PADLOCK_ALIGNMENT - 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = cbc_aes_encrypt,
- .decrypt = cbc_aes_decrypt,
- }
- }
+static struct skcipher_alg cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-padlock",
+ .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
};
static const struct x86_cpu_id padlock_cpu_id[] = {
@@ -506,13 +497,13 @@ static int __init padlock_init(void)
return -ENODEV;
}
- if ((ret = crypto_register_alg(&aes_alg)))
+ if ((ret = crypto_register_alg(&aes_alg)) != 0)
goto aes_err;
- if ((ret = crypto_register_alg(&ecb_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
goto ecb_aes_err;
- if ((ret = crypto_register_alg(&cbc_aes_alg)))
+ if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
goto cbc_aes_err;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
@@ -527,7 +518,7 @@ out:
return ret;
cbc_aes_err:
- crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_skcipher(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
@@ -537,8 +528,8 @@ aes_err:
static void __exit padlock_fini(void)
{
- crypto_unregister_alg(&cbc_aes_alg);
- crypto_unregister_alg(&ecb_aes_alg);
+ crypto_unregister_skcipher(&cbc_aes_alg);
+ crypto_unregister_skcipher(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 3cbefb41b099..29da449b3e9e 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -134,7 +134,7 @@ struct spacc_engine {
struct spacc_alg {
unsigned long ctrl_default;
unsigned long type;
- struct crypto_alg alg;
+ struct skcipher_alg alg;
struct spacc_engine *engine;
struct list_head entry;
int key_offs;
@@ -173,7 +173,7 @@ struct spacc_aead_ctx {
static int spacc_ablk_submit(struct spacc_req *req);
-static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
+static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
{
return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
}
@@ -733,13 +733,13 @@ static void spacc_aead_cra_exit(struct crypto_aead *tfm)
* Set the DES key for a block cipher transform. This also performs weak key
* checking if the transform has requested it.
*/
-static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -753,13 +753,13 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the 3DES key for a block cipher transform. This also performs weak key
* checking if the transform has requested it.
*/
-static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -773,15 +773,15 @@ static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the key for an AES block cipher. Some key lengths are not supported in
* hardware so this must also check whether a fallback is needed.
*/
-static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -822,15 +822,15 @@ sw_setkey_failed:
return err;
}
-static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
+static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -844,12 +844,12 @@ out:
static int spacc_ablk_need_fallback(struct spacc_req *req)
{
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
struct spacc_ablk_ctx *ctx;
- struct crypto_tfm *tfm = req->req->tfm;
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- ctx = crypto_tfm_ctx(tfm);
+ ctx = crypto_skcipher_ctx(tfm);
return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES &&
@@ -859,39 +859,39 @@ static int spacc_ablk_need_fallback(struct spacc_req *req)
static void spacc_ablk_complete(struct spacc_req *req)
{
- struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
if (ablk_req->src != ablk_req->dst) {
spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
- ablk_req->nbytes, DMA_TO_DEVICE);
+ ablk_req->cryptlen, DMA_TO_DEVICE);
spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->nbytes, DMA_FROM_DEVICE);
+ ablk_req->cryptlen, DMA_FROM_DEVICE);
} else
spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->nbytes, DMA_BIDIRECTIONAL);
+ ablk_req->cryptlen, DMA_BIDIRECTIONAL);
req->req->complete(req->req, req->result);
}
static int spacc_ablk_submit(struct spacc_req *req)
{
- struct crypto_tfm *tfm = req->req->tfm;
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
struct spacc_engine *engine = ctx->generic.engine;
u32 ctrl;
req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
- ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
+ ctx->key_len, ablk_req->iv, alg->ivsize,
NULL, 0);
writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
- writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
@@ -907,11 +907,11 @@ static int spacc_ablk_submit(struct spacc_req *req)
return -EINPROGRESS;
}
-static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
+static int spacc_ablk_do_fallback(struct skcipher_request *req,
unsigned alg_type, bool is_encrypt)
{
struct crypto_tfm *old_tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
@@ -924,7 +924,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -932,12 +932,13 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
return err;
}
-static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
+static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
bool is_encrypt)
{
- struct crypto_alg *alg = req->base.tfm->__crt_alg;
- struct spacc_engine *engine = to_spacc_alg(alg)->engine;
- struct spacc_req *dev_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
+ struct spacc_req *dev_req = skcipher_request_ctx(req);
unsigned long flags;
int err = -ENOMEM;
@@ -956,17 +957,17 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
*/
if (req->src != req->dst) {
dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
- req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
+ req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
if (!dev_req->src_ddt)
goto out;
dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
+ req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
if (!dev_req->dst_ddt)
goto out_free_src;
} else {
dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
+ req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
if (!dev_req->dst_ddt)
goto out;
@@ -999,65 +1000,65 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
out_free_ddts:
spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
- req->nbytes, req->src == req->dst ?
+ req->cryptlen, req->src == req->dst ?
DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
out_free_src:
if (req->src != req->dst)
spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
- req->src, req->nbytes, DMA_TO_DEVICE);
+ req->src, req->cryptlen, DMA_TO_DEVICE);
out:
return err;
}
-static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
+static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
{
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
struct spacc_engine *engine = spacc_alg->engine;
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher = crypto_alloc_sync_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->cra_name);
+ alg->base.cra_name);
return PTR_ERR(ctx->sw_cipher);
}
}
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req));
return 0;
}
-static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
+static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
{
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->sw_cipher);
}
-static int spacc_ablk_encrypt(struct ablkcipher_request *req)
+static int spacc_ablk_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- return spacc_ablk_setup(req, alg->type, 1);
+ return spacc_ablk_setup(req, spacc_alg->type, 1);
}
-static int spacc_ablk_decrypt(struct ablkcipher_request *req)
+static int spacc_ablk_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
+ struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- return spacc_ablk_setup(req, alg->type, 0);
+ return spacc_ablk_setup(req, spacc_alg->type, 0);
}
static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
@@ -1233,27 +1234,24 @@ static struct spacc_alg ipsec_engine_algs[] = {
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1261,25 +1259,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = AES_MAX_KEY_SIZE,
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1287,26 +1283,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
.alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1314,25 +1307,22 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1340,26 +1330,23 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
.alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-ede-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des3_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
{
@@ -1367,25 +1354,22 @@ static struct spacc_alg ipsec_engine_algs[] = {
.iv_offs = 0,
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
.alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-ede-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_des3_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
};
@@ -1581,25 +1565,23 @@ static struct spacc_alg l2_engine_algs[] = {
.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
SPA_CTRL_CIPH_MODE_F8,
.alg = {
- .cra_name = "f8(kasumi)",
- .cra_driver_name = "f8-kasumi-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 8,
- .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_ablkcipher = {
- .setkey = spacc_kasumi_f8_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = 16,
- .max_keysize = 16,
- .ivsize = 8,
- },
- .cra_init = spacc_ablk_cra_init,
- .cra_exit = spacc_ablk_cra_exit,
+ .base.cra_name = "f8(kasumi)",
+ .base.cra_driver_name = "f8-kasumi-picoxcell",
+ .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 8,
+ .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .setkey = spacc_kasumi_f8_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = 16,
+ .max_keysize = 16,
+ .ivsize = 8,
+ .init = spacc_ablk_init_tfm,
+ .exit = spacc_ablk_exit_tfm,
},
},
};
@@ -1721,7 +1703,7 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
- err = crypto_register_alg(&engine->algs[i].alg);
+ err = crypto_register_skcipher(&engine->algs[i].alg);
if (!err) {
list_add_tail(&engine->algs[i].entry,
&engine->registered_algs);
@@ -1729,10 +1711,10 @@ static int spacc_probe(struct platform_device *pdev)
}
if (err)
dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->algs[i].alg.cra_name);
+ engine->algs[i].alg.base.cra_name);
else
dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->algs[i].alg.cra_name);
+ engine->algs[i].alg.base.cra_name);
}
INIT_LIST_HEAD(&engine->registered_aeads);
@@ -1781,7 +1763,7 @@ static int spacc_remove(struct platform_device *pdev)
list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
list_del(&alg->entry);
- crypto_unregister_alg(&alg->alg);
+ crypto_unregister_skcipher(&alg->alg);
}
clk_disable_unprepare(engine->clk);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 6ab7e5a88756..2006322345de 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -3,7 +3,7 @@ config CRYPTO_DEV_QAT
tristate
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
select CRYPTO_HMAC
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index b50eb55f8f57..35bca76b640f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -48,6 +48,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
@@ -122,7 +123,7 @@ struct qat_alg_aead_ctx {
char opad[SHA512_BLOCK_SIZE];
};
-struct qat_alg_ablkcipher_ctx {
+struct qat_alg_skcipher_ctx {
struct icp_qat_hw_cipher_algo_blk *enc_cd;
struct icp_qat_hw_cipher_algo_blk *dec_cd;
dma_addr_t enc_cd_paddr;
@@ -130,7 +131,7 @@ struct qat_alg_ablkcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -463,10 +464,10 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return 0;
}
-static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
@@ -485,28 +486,28 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
}
-static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
}
-static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
@@ -577,21 +578,21 @@ error:
return -EFAULT;
}
-static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
- const uint8_t *key,
- unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+ const uint8_t *key,
+ unsigned int keylen,
+ int mode)
{
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
goto bad_key;
- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -832,12 +833,12 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
areq->base.complete(&areq->base, res);
}
-static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
- struct qat_crypto_request *qat_req)
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
{
- struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
- struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+ struct skcipher_request *sreq = qat_req->skcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -846,11 +847,11 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
- memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
- areq->base.complete(&areq->base, res);
+ sreq->base.complete(&sreq->base, res);
}
void qat_alg_callback(void *resp)
@@ -949,21 +950,21 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
@@ -990,7 +991,7 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
goto out_free_enc;
}
- ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
if (ret)
goto out_free_all;
@@ -1012,51 +1013,51 @@ out_free_instance:
return ret;
}
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->enc_cd)
- return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else
- return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CBC_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CTR_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CTR_MODE);
}
-static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_XTS_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_XTS_MODE);
}
-static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
@@ -1073,17 +1074,17 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -1097,26 +1098,26 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_encrypt(req);
+ return qat_alg_skcipher_encrypt(req);
}
-static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
@@ -1133,17 +1134,17 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -1157,12 +1158,12 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_decrypt(req);
+ return qat_alg_skcipher_decrypt(req);
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
@@ -1218,18 +1219,18 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
qat_crypto_put_instance(inst);
}
-static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
ctx->tfm = tfm;
return 0;
}
-static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
@@ -1308,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { {
.maxauthsize = SHA512_DIGEST_SIZE,
} };
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "qat_aes_cbc",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_cbc_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+static struct skcipher_alg qat_skciphers[] = { {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "qat_aes_cbc",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_cbc_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "qat_aes_ctr",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_ctr_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "qat_aes_ctr",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_ctr_setkey,
+ .decrypt = qat_alg_skcipher_decrypt,
+ .encrypt = qat_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "qat_aes_xts",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_xts_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "qat_aes_xts",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_xts_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
} };
int qat_algs_register(void)
{
- int ret = 0, i;
+ int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs != 1)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
-
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_skciphers(qat_skciphers,
+ ARRAY_SIZE(qat_skciphers));
if (ret)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
- qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
-
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
if (ret)
goto unreg_algs;
@@ -1403,7 +1387,7 @@ unlock:
return ret;
unreg_algs:
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
goto unlock;
}
@@ -1414,7 +1398,7 @@ void qat_algs_unregister(void)
goto unlock;
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
unlock:
mutex_unlock(&algs_lock);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index c77a80020cde..300bb919a33a 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -79,11 +79,11 @@ struct qat_crypto_request {
struct icp_qat_fw_la_bulk_req req;
union {
struct qat_alg_aead_ctx *aead_ctx;
- struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *skcipher_ctx;
};
union {
struct aead_request *aead_req;
- struct ablkcipher_request *ablkcipher_req;
+ struct skcipher_request *skcipher_req;
};
struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 19a7f899acff..8caa04e1ec43 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -4,4 +4,4 @@ qcrypto-objs := core.o \
common.o \
dma.o \
sha.o \
- ablkcipher.o
+ skcipher.o
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5cab8f0706a8..7770660bc853 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -45,12 +45,12 @@ struct qce_cipher_reqctx {
unsigned int cryptlen;
};
-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- return container_of(alg, struct qce_alg_template, alg.crypto);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ return container_of(alg, struct qce_alg_template, alg.skcipher);
}
-extern const struct qce_algo_ops ablkcipher_ops;
+extern const struct qce_algo_ops skcipher_ops;
#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 3fb510164326..da1188abc9ba 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -304,13 +304,13 @@ go_proc:
return 0;
}
-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
@@ -389,8 +389,8 @@ int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return qce_setup_regs_skcipher(async_req, totallen, offset);
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
default:
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 47fb523357ac..282d4317470d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
+#include <crypto/internal/skcipher.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
@@ -79,7 +80,7 @@ struct qce_alg_template {
unsigned long alg_flags;
const u32 *std_iv;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 08d4ce3bfddf..0a44a6eeacf5 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,7 +22,7 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
- &ablkcipher_ops,
+ &skcipher_ops,
&ahash_ops,
};
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 0984a719144d..40a59214d2e1 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -12,11 +12,11 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
- dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+ dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
- dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+ dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 0853e74583ad..95ab16fc8fd6 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -495,7 +495,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
- base->cra_flags = CRYPTO_ALG_ASYNC;
+ base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/skcipher.c
index 7a98bf5cc967..fee07323f8f9 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -12,14 +12,14 @@
#include "cipher.h"
-static LIST_HEAD(ablkcipher_algs);
+static LIST_HEAD(skcipher_algs);
-static void qce_ablkcipher_done(void *data)
+static void qce_skcipher_done(void *data)
{
struct crypto_async_request *async_req = data;
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
u32 status;
@@ -32,7 +32,7 @@ static void qce_ablkcipher_done(void *data)
error = qce_dma_terminate_all(&qce->dma);
if (error)
- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
error);
if (diff_dst)
@@ -43,18 +43,18 @@ static void qce_ablkcipher_done(void *data)
error = qce_check_status(qce, &status);
if (error < 0)
- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
qce->async_req_done(tmpl->qce, error);
}
static int
-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
struct scatterlist *sg;
@@ -62,17 +62,17 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
gfp_t gfp;
int ret;
- rctx->iv = req->info;
- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- rctx->cryptlen = req->nbytes;
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->cryptlen = req->cryptlen;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (diff_dst)
- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
else
rctx->dst_nents = rctx->src_nents;
if (rctx->src_nents < 0) {
@@ -125,13 +125,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
rctx->dst_sg, rctx->dst_nents,
- qce_ablkcipher_done, async_req);
+ qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
if (ret)
goto error_terminate;
@@ -149,10 +149,10 @@ error_free:
return ret;
}
-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
@@ -177,13 +177,13 @@ fallback:
return ret;
}
-static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- err = verify_ablkcipher_des_key(ablk, key);
+ err = verify_skcipher_des_key(ablk, key);
if (err)
return err;
@@ -192,13 +192,13 @@ static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
return 0;
}
-static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- err = verify_ablkcipher_des3_key(ablk, key);
+ err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
@@ -207,12 +207,11 @@ static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
return 0;
}
-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
int ret;
@@ -227,7 +226,7 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -237,36 +236,36 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}
-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qce_skcipher_encrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 1);
+ return qce_skcipher_crypt(req, 1);
}
-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qce_skcipher_decrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 0);
+ return qce_skcipher_crypt(req, 0);
}
-static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+static int qce_skcipher_init(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
-static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qce_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
-struct qce_ablkcipher_def {
+struct qce_skcipher_def {
unsigned long flags;
const char *name;
const char *drv_name;
@@ -276,7 +275,7 @@ struct qce_ablkcipher_def {
unsigned int max_keysize;
};
-static const struct qce_ablkcipher_def ablkcipher_def[] = {
+static const struct qce_skcipher_def skcipher_def[] = {
{
.flags = QCE_ALG_AES | QCE_MODE_ECB,
.name = "ecb(aes)",
@@ -351,90 +350,91 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = {
},
};
-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
struct qce_device *qce)
{
struct qce_alg_template *tmpl;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg;
int ret;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl)
return -ENOMEM;
- alg = &tmpl->alg.crypto;
+ alg = &tmpl->alg.skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
- alg->cra_ablkcipher.min_keysize = def->min_keysize;
- alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
- IS_DES(def->flags) ? qce_des_setkey :
- qce_ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
-
- alg->cra_priority = 300;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
- alg->cra_alignmask = 0;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = qce_ablkcipher_init;
- alg->cra_exit = qce_ablkcipher_exit;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->ivsize = def->ivsize;
+ alg->min_keysize = def->min_keysize;
+ alg->max_keysize = def->max_keysize;
+ alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
+ IS_DES(def->flags) ? qce_des_setkey :
+ qce_skcipher_setkey;
+ alg->encrypt = qce_skcipher_encrypt;
+ alg->decrypt = qce_skcipher_decrypt;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = qce_skcipher_init;
+ alg->exit = qce_skcipher_exit;
INIT_LIST_HEAD(&tmpl->entry);
- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
tmpl->alg_flags = def->flags;
tmpl->qce = qce;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
kfree(tmpl);
- dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
return ret;
}
- list_add_tail(&tmpl->entry, &ablkcipher_algs);
- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+ list_add_tail(&tmpl->entry, &skcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
return 0;
}
-static void qce_ablkcipher_unregister(struct qce_device *qce)
+static void qce_skcipher_unregister(struct qce_device *qce)
{
struct qce_alg_template *tmpl, *n;
- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
- crypto_unregister_alg(&tmpl->alg.crypto);
+ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&tmpl->alg.skcipher);
list_del(&tmpl->entry);
kfree(tmpl);
}
}
-static int qce_ablkcipher_register(struct qce_device *qce)
+static int qce_skcipher_register(struct qce_device *qce)
{
int ret, i;
- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
+ ret = qce_skcipher_register_one(&skcipher_def[i], qce);
if (ret)
goto err;
}
return 0;
err:
- qce_ablkcipher_unregister(qce);
+ qce_skcipher_unregister(qce);
return ret;
}
-const struct qce_algo_ops ablkcipher_ops = {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .register_algs = qce_ablkcipher_register,
- .unregister_algs = qce_ablkcipher_unregister,
- .async_req_handle = qce_ablkcipher_async_req_handle,
+const struct qce_algo_ops skcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .register_algs = qce_skcipher_register,
+ .unregister_algs = qce_skcipher_unregister,
+ .async_req_handle = qce_skcipher_async_req_handle,
};
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
index 6e23764e6c8a..785277aca71e 100644
--- a/drivers/crypto/rockchip/Makefile
+++ b/drivers/crypto/rockchip/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
- rk3288_crypto_ablkcipher.o \
+ rk3288_crypto_skcipher.o \
rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index e5714ef24bf2..f385587f99af 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -264,8 +264,8 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- err = crypto_register_alg(
- &rk_cipher_algs[i]->alg.crypto);
+ err = crypto_register_skcipher(
+ &rk_cipher_algs[i]->alg.skcipher);
else
err = crypto_register_ahash(
&rk_cipher_algs[i]->alg.hash);
@@ -277,7 +277,7 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+ crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
@@ -290,7 +290,7 @@ static void rk_crypto_unregister(void)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
- crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+ crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 18e2b3f29336..2b49c677afdb 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
@@ -256,7 +257,7 @@ enum alg_type {
struct rk_crypto_tmp {
struct rk_crypto_info *dev;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
} alg;
enum alg_type type;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
deleted file mode 100644
index d0f4b2d18059..000000000000
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Crypto acceleration support for Rockchip RK3288
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
- *
- * Author: Zain Wang <zain.wang@rock-chips.com>
- *
- * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
- */
-#include "rk3288_crypto.h"
-
-#define RK_CRYPTO_DEC BIT(0)
-
-static void rk_crypto_complete(struct crypto_async_request *base, int err)
-{
- if (base->complete)
- base->complete(base, err);
-}
-
-static int rk_handle_req(struct rk_crypto_info *dev,
- struct ablkcipher_request *req)
-{
- if (!IS_ALIGNED(req->nbytes, dev->align_size))
- return -EINVAL;
- else
- return dev->enqueue(dev, &req->base);
-}
-
-static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
- return 0;
-}
-
-static int rk_des_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- int err;
-
- err = verify_ablkcipher_des_key(cipher, key);
- if (err)
- return err;
-
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
-}
-
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- int err;
-
- err = verify_ablkcipher_des3_key(cipher, key);
- if (err)
- return err;
-
- ctx->keylen = keylen;
- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
-}
-
-static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_ECB_MODE;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_CBC_MODE;
- return rk_handle_req(dev, req);
-}
-
-static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = 0;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
- return rk_handle_req(dev, req);
-}
-
-static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct rk_crypto_info *dev = ctx->dev;
-
- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
- RK_CRYPTO_DEC;
- return rk_handle_req(dev, req);
-}
-
-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- u32 ivsize, block, conf_reg = 0;
-
- block = crypto_tfm_alg_blocksize(tfm);
- ivsize = crypto_ablkcipher_ivsize(cipher);
-
- if (block == DES_BLOCK_SIZE) {
- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
- RK_CRYPTO_TDES_BYTESWAP_KEY |
- RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
- conf_reg = RK_CRYPTO_DESSEL;
- } else {
- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
- RK_CRYPTO_AES_KEY_CHANGE |
- RK_CRYPTO_AES_BYTESWAP_KEY |
- RK_CRYPTO_AES_BYTESWAP_IV;
- if (ctx->keylen == AES_KEYSIZE_192)
- ctx->mode |= RK_CRYPTO_AES_192BIT_key;
- else if (ctx->keylen == AES_KEYSIZE_256)
- ctx->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
- }
- conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
- RK_CRYPTO_BYTESWAP_BRFIFO;
- CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
- CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
- RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
-}
-
-static void crypto_dma_start(struct rk_crypto_info *dev)
-{
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
- CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
- _SBF(RK_CRYPTO_BLOCK_START, 16));
-}
-
-static int rk_set_data_start(struct rk_crypto_info *dev)
-{
- int err;
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
- dev->sg_src->offset + dev->sg_src->length - ivsize;
-
- /* Store the iv that need to be updated in chain mode.
- * And update the IV buffer to contain the next IV for decryption mode.
- */
- if (ctx->mode & RK_CRYPTO_DEC) {
- memcpy(ctx->iv, src_last_blk, ivsize);
- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
- ivsize, dev->total - ivsize);
- }
-
- err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
- if (!err)
- crypto_dma_start(dev);
- return err;
-}
-
-static int rk_ablk_start(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- unsigned long flags;
- int err = 0;
-
- dev->left_bytes = req->nbytes;
- dev->total = req->nbytes;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->src_nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->dst_nents = sg_nents(req->dst);
- dev->aligned = 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- rk_ablk_hw_init(dev);
- err = rk_set_data_start(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
- return err;
-}
-
-static void rk_iv_copyback(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
-
- /* Update the IV buffer to contain the next IV for encryption mode. */
- if (!(ctx->mode & RK_CRYPTO_DEC)) {
- if (dev->aligned) {
- memcpy(req->info, sg_virt(dev->sg_dst) +
- dev->sg_dst->length - ivsize, ivsize);
- } else {
- memcpy(req->info, dev->addr_vir +
- dev->count - ivsize, ivsize);
- }
- }
-}
-
-static void rk_update_iv(struct rk_crypto_info *dev)
-{
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- u8 *new_iv = NULL;
-
- if (ctx->mode & RK_CRYPTO_DEC) {
- new_iv = ctx->iv;
- } else {
- new_iv = page_address(sg_page(dev->sg_dst)) +
- dev->sg_dst->offset + dev->sg_dst->length - ivsize;
- }
-
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
-}
-
-/* return:
- * true some err was occurred
- * fault no err, continue
- */
-static int rk_ablk_rx(struct rk_crypto_info *dev)
-{
- int err = 0;
- struct ablkcipher_request *req =
- ablkcipher_request_cast(dev->async_req);
-
- dev->unload_data(dev);
- if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
- dev->addr_vir, dev->count,
- dev->total - dev->left_bytes -
- dev->count)) {
- err = -EINVAL;
- goto out_rx;
- }
- }
- if (dev->left_bytes) {
- rk_update_iv(dev);
- if (dev->aligned) {
- if (sg_is_last(dev->sg_src)) {
- dev_err(dev->dev, "[%s:%d] Lack of data\n",
- __func__, __LINE__);
- err = -ENOMEM;
- goto out_rx;
- }
- dev->sg_src = sg_next(dev->sg_src);
- dev->sg_dst = sg_next(dev->sg_dst);
- }
- err = rk_set_data_start(dev);
- } else {
- rk_iv_copyback(dev);
- /* here show the calculation is over without any err */
- dev->complete(dev->async_req, 0);
- tasklet_schedule(&dev->queue_task);
- }
-out_rx:
- return err;
-}
-
-static int rk_ablk_cra_init(struct crypto_tfm *tfm)
-{
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct rk_crypto_tmp *algt;
-
- algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
-
- ctx->dev = algt->dev;
- ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
- ctx->dev->start = rk_ablk_start;
- ctx->dev->update = rk_ablk_rx;
- ctx->dev->complete = rk_crypto_complete;
- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
-
- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
-}
-
-static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
-{
- struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- free_page((unsigned long)ctx->dev->addr_vir);
- ctx->dev->disable_clk(ctx->dev);
-}
-
-struct rk_crypto_tmp rk_ecb_aes_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = rk_aes_setkey,
- .encrypt = rk_aes_ecb_encrypt,
- .decrypt = rk_aes_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_aes_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = rk_aes_setkey,
- .encrypt = rk_aes_cbc_encrypt,
- .decrypt = rk_aes_cbc_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_ecb_des_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = rk_des_setkey,
- .encrypt = rk_des_ecb_encrypt,
- .decrypt = rk_des_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_des_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_des_setkey,
- .encrypt = rk_des_cbc_encrypt,
- .decrypt = rk_des_cbc_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-ede-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
- .encrypt = rk_des3_ede_ecb_encrypt,
- .decrypt = rk_des3_ede_ecb_decrypt,
- }
- }
-};
-
-struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .type = ALG_TYPE_CIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-ede-rk",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct rk_cipher_ctx),
- .cra_alignmask = 0x07,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = rk_ablk_cra_init,
- .cra_exit = rk_ablk_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
- .encrypt = rk_des3_ede_cbc_encrypt,
- .decrypt = rk_des3_ede_cbc_decrypt,
- }
- }
-};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
new file mode 100644
index 000000000000..ca4de4ddfe1f
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+#define RK_CRYPTO_DEC BIT(0)
+
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
+{
+ if (base->complete)
+ base->complete(base, err);
+}
+
+static int rk_handle_req(struct rk_crypto_info *dev,
+ struct skcipher_request *req)
+{
+ if (!IS_ALIGNED(req->cryptlen, dev->align_size))
+ return -EINVAL;
+ else
+ return dev->enqueue(dev, &req->base);
+}
+
+static int rk_aes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+ return 0;
+}
+
+static int rk_des_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des3_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = 0;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ u32 ivsize, block, conf_reg = 0;
+
+ block = crypto_tfm_alg_blocksize(tfm);
+ ivsize = crypto_skcipher_ivsize(cipher);
+
+ if (block == DES_BLOCK_SIZE) {
+ ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ RK_CRYPTO_TDES_BYTESWAP_KEY |
+ RK_CRYPTO_TDES_BYTESWAP_IV;
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
+ conf_reg = RK_CRYPTO_DESSEL;
+ } else {
+ ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ RK_CRYPTO_AES_KEY_CHANGE |
+ RK_CRYPTO_AES_BYTESWAP_KEY |
+ RK_CRYPTO_AES_BYTESWAP_IV;
+ if (ctx->keylen == AES_KEYSIZE_192)
+ ctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ else if (ctx->keylen == AES_KEYSIZE_256)
+ ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
+ }
+ conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO;
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
+ RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+}
+
+static void crypto_dma_start(struct rk_crypto_info *dev)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+ _SBF(RK_CRYPTO_BLOCK_START, 16));
+}
+
+static int rk_set_data_start(struct rk_crypto_info *dev)
+{
+ int err;
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+ u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+ dev->sg_src->offset + dev->sg_src->length - ivsize;
+
+ /* Store the iv that need to be updated in chain mode.
+ * And update the IV buffer to contain the next IV for decryption mode.
+ */
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ memcpy(ctx->iv, src_last_blk, ivsize);
+ sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
+ ivsize, dev->total - ivsize);
+ }
+
+ err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+ if (!err)
+ crypto_dma_start(dev);
+ return err;
+}
+
+static int rk_ablk_start(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ unsigned long flags;
+ int err = 0;
+
+ dev->left_bytes = req->cryptlen;
+ dev->total = req->cryptlen;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->src_nents = sg_nents(req->src);
+ dev->sg_dst = req->dst;
+ dev->dst_nents = sg_nents(req->dst);
+ dev->aligned = 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ rk_ablk_hw_init(dev);
+ err = rk_set_data_start(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return err;
+}
+
+static void rk_iv_copyback(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+
+ /* Update the IV buffer to contain the next IV for encryption mode. */
+ if (!(ctx->mode & RK_CRYPTO_DEC)) {
+ if (dev->aligned) {
+ memcpy(req->iv, sg_virt(dev->sg_dst) +
+ dev->sg_dst->length - ivsize, ivsize);
+ } else {
+ memcpy(req->iv, dev->addr_vir +
+ dev->count - ivsize, ivsize);
+ }
+ }
+}
+
+static void rk_update_iv(struct rk_crypto_info *dev)
+{
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 ivsize = crypto_skcipher_ivsize(tfm);
+ u8 *new_iv = NULL;
+
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ new_iv = ctx->iv;
+ } else {
+ new_iv = page_address(sg_page(dev->sg_dst)) +
+ dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+ }
+
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+ else if (ivsize == AES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+}
+
+/* return:
+ * true some err was occurred
+ * fault no err, continue
+ */
+static int rk_ablk_rx(struct rk_crypto_info *dev)
+{
+ int err = 0;
+ struct skcipher_request *req =
+ skcipher_request_cast(dev->async_req);
+
+ dev->unload_data(dev);
+ if (!dev->aligned) {
+ if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
+ dev->addr_vir, dev->count,
+ dev->total - dev->left_bytes -
+ dev->count)) {
+ err = -EINVAL;
+ goto out_rx;
+ }
+ }
+ if (dev->left_bytes) {
+ rk_update_iv(dev);
+ if (dev->aligned) {
+ if (sg_is_last(dev->sg_src)) {
+ dev_err(dev->dev, "[%s:%d] Lack of data\n",
+ __func__, __LINE__);
+ err = -ENOMEM;
+ goto out_rx;
+ }
+ dev->sg_src = sg_next(dev->sg_src);
+ dev->sg_dst = sg_next(dev->sg_dst);
+ }
+ err = rk_set_data_start(dev);
+ } else {
+ rk_iv_copyback(dev);
+ /* here show the calculation is over without any err */
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
+ }
+out_rx:
+ return err;
+}
+
+static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct rk_crypto_tmp *algt;
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+
+ ctx->dev = algt->dev;
+ ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
+ ctx->dev->start = rk_ablk_start;
+ ctx->dev->update = rk_ablk_rx;
+ ctx->dev->complete = rk_crypto_complete;
+ ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+
+ return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+}
+
+static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ free_page((unsigned long)ctx->dev->addr_vir);
+ ctx->dev->disable_clk(ctx->dev);
+}
+
+struct rk_crypto_tmp rk_ecb_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_ecb_encrypt,
+ .decrypt = rk_aes_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_cbc_encrypt,
+ .decrypt = rk_aes_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_ecb_encrypt,
+ .decrypt = rk_des_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_cbc_encrypt,
+ .decrypt = rk_des_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_ecb_encrypt,
+ .decrypt = rk_des3_ede_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_cbc_encrypt,
+ .decrypt = rk_des3_ede_cbc_decrypt,
+ }
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 010f1bb20dad..d66e20a2f54c 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -303,7 +303,7 @@ struct s5p_aes_dev {
void __iomem *aes_ioaddr;
int irq_fc;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct s5p_aes_ctx *ctx;
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
@@ -456,7 +456,7 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
if (!*sg)
return;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
free_pages((unsigned long)sg_virt(*sg), get_order(len));
kfree(*sg);
@@ -478,27 +478,27 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
static void s5p_sg_done(struct s5p_aes_dev *dev)
{
- struct ablkcipher_request *req = dev->req;
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = dev->req;
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
- dev->req->nbytes);
+ dev->req->cryptlen);
s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
- dev->req->nbytes, 1);
+ dev->req->cryptlen, 1);
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
if (reqctx->mode & FLAGS_AES_CBC)
- memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
else if (reqctx->mode & FLAGS_AES_CTR)
- memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct ablkcipher_request *req, int err)
+static void s5p_aes_complete(struct skcipher_request *req, int err)
{
req->base.complete(&req->base, err);
}
@@ -523,7 +523,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
if (!*dst)
return -ENOMEM;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
if (!pages) {
kfree(*dst);
@@ -531,7 +531,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
return -ENOMEM;
}
- s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+ s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
@@ -660,7 +660,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
@@ -1870,7 +1870,7 @@ static bool s5p_is_sg_aligned(struct scatterlist *sg)
}
static int s5p_set_indata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1897,7 +1897,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1925,7 +1925,7 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
- struct ablkcipher_request *req = dev->req;
+ struct skcipher_request *req = dev->req;
u32 aes_control;
unsigned long flags;
int err;
@@ -1938,12 +1938,12 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- iv = req->info;
+ iv = req->iv;
ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
iv = NULL;
- ctr = req->info;
+ ctr = req->iv;
} else {
iv = NULL; /* AES_ECB */
ctr = NULL;
@@ -2021,21 +2021,21 @@ static void s5p_tasklet_cb(unsigned long data)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- dev->req = ablkcipher_request_cast(async_req);
+ dev->req = skcipher_request_cast(async_req);
dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
- reqctx = ablkcipher_request_ctx(dev->req);
+ reqctx = skcipher_request_ctx(dev->req);
s5p_aes_crypt_start(dev, reqctx->mode);
}
static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
unsigned long flags;
int err;
spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
return err;
@@ -2049,17 +2049,17 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
return err;
}
-static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!req->nbytes)
+ if (!req->cryptlen)
return 0;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -2070,10 +2070,10 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return s5p_aes_handle_req(dev, req);
}
-static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
+static int s5p_aes_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
@@ -2087,106 +2087,97 @@ static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, 0);
}
-static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
}
-static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_CBC);
}
-static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
-static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+static int s5p_aes_ctr_crypt(struct skcipher_request *req)
{
return s5p_aes_crypt(req, FLAGS_AES_CTR);
}
-static int s5p_aes_cra_init(struct crypto_tfm *tfm)
+static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
{
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->dev = s5p_dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
return 0;
}
-static struct crypto_alg algs[] = {
+static struct skcipher_alg algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ecb_encrypt,
- .decrypt = s5p_aes_ecb_decrypt,
- }
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ecb_encrypt,
+ .decrypt = s5p_aes_ecb_decrypt,
+ .init = s5p_aes_init_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_cbc_encrypt,
- .decrypt = s5p_aes_cbc_decrypt,
- }
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_cbc_encrypt,
+ .decrypt = s5p_aes_cbc_decrypt,
+ .init = s5p_aes_init_tfm,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-s5p",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ctr_crypt,
- .decrypt = s5p_aes_ctr_crypt,
- }
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ .init = s5p_aes_init_tfm,
},
};
@@ -2297,7 +2288,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
for (i = 0; i < ARRAY_SIZE(algs); i++) {
- err = crypto_register_alg(&algs[i]);
+ err = crypto_register_skcipher(&algs[i]);
if (err)
goto err_algs;
}
@@ -2334,11 +2325,11 @@ err_hash:
err_algs:
if (i < ARRAY_SIZE(algs))
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+ dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
err);
for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
+ crypto_unregister_skcipher(&algs[j]);
tasklet_kill(&pdata->tasklet);
@@ -2362,7 +2353,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
+ crypto_unregister_skcipher(&algs[i]);
tasklet_kill(&pdata->tasklet);
if (pdata->use_hash) {
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8ac8ec6decd5..d4ea2f11ca68 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -547,7 +547,7 @@ unmap_in:
return -EINVAL;
}
-static int sahara_aes_process(struct ablkcipher_request *req)
+static int sahara_aes_process(struct skcipher_request *req)
{
struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
@@ -558,20 +558,20 @@ static int sahara_aes_process(struct ablkcipher_request *req)
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- req->nbytes, req->src, req->dst);
+ req->cryptlen, req->src, req->dst);
/* assign new request to device */
- dev->total = req->nbytes;
+ dev->total = req->cryptlen;
dev->in_sg = req->src;
dev->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
- if ((dev->flags & FLAGS_CBC) && req->info)
- memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+ if ((dev->flags & FLAGS_CBC) && req->iv)
+ memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
/* assign new context to device */
dev->ctx = ctx;
@@ -597,10 +597,10 @@ static int sahara_aes_process(struct ablkcipher_request *req)
return 0;
}
-static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
ctx->keylen = keylen;
@@ -630,16 +630,16 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return ret;
}
-static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
int err = 0;
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
- req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+ req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
dev_err(dev->device,
"request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -648,7 +648,7 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
mutex_lock(&dev->queue_mutex);
- err = ablkcipher_enqueue_request(&dev->queue, req);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
mutex_unlock(&dev->queue_mutex);
wake_up_process(dev->kthread);
@@ -656,10 +656,10 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return err;
}
-static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -669,7 +669,7 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_encrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -678,10 +678,10 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -691,7 +691,7 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -700,10 +700,10 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, 0);
}
-static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -713,7 +713,7 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_encrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -722,10 +722,10 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
@@ -735,7 +735,7 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
err = crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return err;
@@ -744,10 +744,10 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
return sahara_aes_crypt(req, FLAGS_CBC);
}
-static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
@@ -756,14 +756,14 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->fallback);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx));
return 0;
}
-static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
@@ -1071,8 +1071,8 @@ static int sahara_queue_manage(void *data)
ret = sahara_sha_process(req);
} else {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
+ struct skcipher_request *req =
+ skcipher_request_cast(async_req);
ret = sahara_aes_process(req);
}
@@ -1189,48 +1189,42 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "sahara-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_ecb_encrypt,
- .decrypt = sahara_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "sahara-ecb-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "sahara-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_cbc_encrypt,
- .decrypt = sahara_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "sahara-cbc-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
}
};
@@ -1318,7 +1312,7 @@ static int sahara_register_algs(struct sahara_dev *dev)
unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1348,7 +1342,7 @@ err_sha_v3_algs:
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -1358,7 +1352,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index ba5ea6434f9c..d347a1d6e351 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -19,6 +19,7 @@
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#define DRIVER_NAME "stm32-cryp"
@@ -137,7 +138,7 @@ struct stm32_cryp {
struct crypto_engine *engine;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
struct aead_request *areq;
size_t authsize;
@@ -395,8 +396,8 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
{
- struct ablkcipher_request *req = cryp->req;
- u32 *tmp = req->info;
+ struct skcipher_request *req = cryp->req;
+ u32 *tmp = (void *)req->iv;
if (!tmp)
return;
@@ -616,7 +617,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
case CR_TDES_CBC:
case CR_AES_CBC:
case CR_AES_CTR:
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->iv);
break;
default:
@@ -667,7 +668,7 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
else
- crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
+ crypto_finalize_skcipher_request(cryp->engine, cryp->req,
err);
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
@@ -685,11 +686,11 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
void *areq);
-static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
@@ -714,11 +715,11 @@ static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
return 0;
}
-static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int stm32_cryp_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = skcipher_request_ctx(req);
struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
if (!cryp)
@@ -726,7 +727,7 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
- return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
}
static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
@@ -743,10 +744,10 @@ static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
-static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -754,7 +755,7 @@ static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -764,17 +765,17 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return stm32_cryp_setkey(tfm, key, keylen);
}
-static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return verify_ablkcipher_des_key(tfm, key) ?:
+ return verify_skcipher_des_key(tfm, key) ?:
stm32_cryp_setkey(tfm, key, keylen);
}
-static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int stm32_cryp_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return verify_ablkcipher_des3_key(tfm, key) ?:
+ return verify_skcipher_des3_key(tfm, key) ?:
stm32_cryp_setkey(tfm, key, keylen);
}
@@ -818,32 +819,32 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
return 0;
}
-static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
}
-static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
}
-static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
}
-static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
@@ -868,47 +869,47 @@ static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
}
-static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
}
-static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
}
-static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
}
-static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
}
-static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
}
-static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
-static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
+static int stm32_cryp_prepare_req(struct skcipher_request *req,
struct aead_request *areq)
{
struct stm32_cryp_ctx *ctx;
@@ -919,7 +920,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!req && !areq)
return -EINVAL;
- ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
+ ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
crypto_aead_ctx(crypto_aead_reqtfm(areq));
cryp = ctx->cryp;
@@ -927,7 +928,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!cryp)
return -ENODEV;
- rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
+ rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
ctx->cryp = cryp;
@@ -939,7 +940,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (req) {
cryp->req = req;
cryp->areq = NULL;
- cryp->total_in = req->nbytes;
+ cryp->total_in = req->cryptlen;
cryp->total_out = cryp->total_in;
} else {
/*
@@ -1016,8 +1017,8 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq,
- struct ablkcipher_request,
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
base);
return stm32_cryp_prepare_req(req, NULL);
@@ -1025,11 +1026,11 @@ static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
- struct ablkcipher_request *req = container_of(areq,
- struct ablkcipher_request,
+ struct skcipher_request *req = container_of(areq,
+ struct skcipher_request,
base);
- struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
if (!cryp)
@@ -1724,150 +1725,129 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-static struct crypto_alg crypto_algs[] = {
-{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "stm32-ecb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ecb_encrypt,
- .decrypt = stm32_cryp_aes_ecb_decrypt,
- }
+static struct skcipher_alg crypto_algs[] = {
+{
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "stm32-ecb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "stm32-cbc-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_cbc_encrypt,
- .decrypt = stm32_cryp_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "stm32-cbc-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
},
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "stm32-ctr-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ctr_encrypt,
- .decrypt = stm32_cryp_aes_ctr_decrypt,
- }
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "stm32-ctr-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
},
{
- .cra_name = "ecb(des)",
- .cra_driver_name = "stm32-ecb-des",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_ecb_encrypt,
- .decrypt = stm32_cryp_des_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "stm32-ecb-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
},
{
- .cra_name = "cbc(des)",
- .cra_driver_name = "stm32-cbc-des",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_cbc_encrypt,
- .decrypt = stm32_cryp_des_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "stm32-cbc-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
},
{
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "stm32-ecb-des3",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_ecb_encrypt,
- .decrypt = stm32_cryp_tdes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "stm32-ecb-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
},
{
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "stm32-cbc-des3",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = stm32_cryp_cra_init,
- .cra_ablkcipher = {
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_cbc_encrypt,
- .decrypt = stm32_cryp_tdes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "stm32-cbc-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
},
};
@@ -2010,7 +1990,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine2;
}
- ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
if (ret) {
dev_err(dev, "Could not register algs\n");
goto err_algs;
@@ -2027,7 +2007,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return 0;
err_aead_algs:
- crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -2059,7 +2039,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
return ret;
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
- crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 56e3068c9947..d71d65846e47 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -35,7 +35,7 @@
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
@@ -1490,10 +1490,10 @@ static int aead_decrypt(struct aead_request *req)
return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct device *dev = ctx->dev;
if (ctx->keylen)
@@ -1507,39 +1507,39 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- return verify_ablkcipher_des_key(cipher, key) ?:
- ablkcipher_setkey(cipher, key, keylen);
+ return verify_skcipher_des_key(cipher, key) ?:
+ skcipher_setkey(cipher, key, keylen);
}
-static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- return verify_ablkcipher_des3_key(cipher, key) ?:
- ablkcipher_setkey(cipher, key, keylen);
+ return verify_skcipher_des3_key(cipher, key) ?:
+ skcipher_setkey(cipher, key, keylen);
}
-static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
keylen == AES_KEYSIZE_256)
- return ablkcipher_setkey(cipher, key, keylen);
+ return skcipher_setkey(cipher, key, keylen);
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct ablkcipher_request *areq)
+ struct skcipher_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
if (edesc->dma_len)
@@ -1547,20 +1547,20 @@ static void common_nonsnoop_unmap(struct device *dev,
DMA_BIDIRECTIONAL);
}
-static void ablkcipher_done(struct device *dev,
+static void skcipher_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
- struct ablkcipher_request *areq = context;
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct skcipher_request *areq = context;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
- memcpy(areq->info, ctx->iv, ivsize);
+ memcpy(areq->iv, ctx->iv, ivsize);
kfree(edesc);
@@ -1568,17 +1568,17 @@ static void ablkcipher_done(struct device *dev,
}
static int common_nonsnoop(struct talitos_edesc *edesc,
- struct ablkcipher_request *areq,
+ struct skcipher_request *areq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->nbytes;
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ unsigned int cryptlen = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
int sg_count, ret;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
@@ -1638,65 +1638,65 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
return ret;
}
-static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
areq, bool encrypt)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- areq->info, 0, areq->nbytes, 0, ivsize, 0,
+ areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
areq->base.flags, encrypt);
}
-static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+static int skcipher_encrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct talitos_edesc *edesc;
unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+ crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
- if (!areq->nbytes)
+ if (!areq->cryptlen)
return 0;
- if (areq->nbytes % blocksize)
+ if (areq->cryptlen % blocksize)
return -EINVAL;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq, true);
+ edesc = skcipher_edesc_alloc(areq, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return common_nonsnoop(edesc, areq, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, skcipher_done);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+static int skcipher_decrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
struct talitos_edesc *edesc;
unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+ crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
- if (!areq->nbytes)
+ if (!areq->cryptlen)
return 0;
- if (areq->nbytes % blocksize)
+ if (areq->cryptlen % blocksize)
return -EINVAL;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq, false);
+ edesc = skcipher_edesc_alloc(areq, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- return common_nonsnoop(edesc, areq, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, skcipher_done);
}
static void common_nonsnoop_hash_unmap(struct device *dev,
@@ -1704,6 +1704,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
struct talitos_desc *desc = &edesc->desc;
@@ -1714,6 +1715,9 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+ if (req_ctx->last)
+ memcpy(areq->result, req_ctx->hw_context,
+ crypto_ahash_digestsize(tfm));
if (req_ctx->psrc)
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
@@ -1845,7 +1849,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (req_ctx->last)
map_single_talitos_ptr(dev, &desc->ptr[5],
crypto_ahash_digestsize(tfm),
- areq->result, DMA_FROM_DEVICE);
+ req_ctx->hw_context, DMA_FROM_DEVICE);
else
map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
req_ctx->hw_context_size,
@@ -2253,7 +2257,7 @@ struct talitos_alg_template {
u32 type;
u32 priority;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -2698,123 +2702,102 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
- /* ABLKCIPHER algorithms. */
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ /* SKCIPHER algorithms. */
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-talitos",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-talitos",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablkcipher_aes_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-talitos",
+ .base.cra_blocksize = 1,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-talitos",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = ablkcipher_des_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-talitos",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = skcipher_des_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-talitos",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = ablkcipher_des_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-talitos",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = skcipher_des_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = ablkcipher_des3_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-3des-talitos",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = skcipher_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_3DES,
},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = ablkcipher_des3_setkey,
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-3des-talitos",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = skcipher_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -3032,46 +3015,45 @@ static int talitos_init_common(struct talitos_ctx *ctx,
return 0;
}
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
return talitos_init_common(ctx, talitos_alg);
}
-static int talitos_cra_init_aead(struct crypto_aead *tfm)
+static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
{
- struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+ struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.aead);
+ algt.alg.skcipher);
return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- talitos_cra_init(tfm);
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
ctx->keylen = 0;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct talitos_ahash_req_ctx));
- return 0;
+ return talitos_init_common(ctx, talitos_alg);
}
static void talitos_cra_exit(struct crypto_tfm *tfm)
@@ -3112,7 +3094,8 @@ static int talitos_remove(struct platform_device *ofdev)
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&t_alg->algt.alg.aead);
@@ -3156,15 +3139,14 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt = *template;
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg = &t_alg->algt.alg.crypto;
- alg->cra_init = talitos_cra_init;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ alg = &t_alg->algt.alg.skcipher.base;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
- ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
+ t_alg->algt.alg.skcipher.setkey =
+ t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
+ t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
+ t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
@@ -3461,10 +3443,10 @@ static int talitos_probe(struct platform_device *ofdev)
}
switch (t_alg->algt.type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = crypto_register_alg(
- &t_alg->algt.alg.crypto);
- alg = &t_alg->algt.alg.crypto;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = crypto_register_skcipher(
+ &t_alg->algt.alg.skcipher);
+ alg = &t_alg->algt.alg.skcipher.base;
break;
case CRYPTO_ALG_TYPE_AEAD:
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b1c6f739f77b..b731895aa241 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -8,7 +8,7 @@ config CRYPTO_DEV_UX500_CRYP
tristate "UX500 crypto driver for CRYP block"
depends on CRYPTO_DEV_UX500
select CRYPTO_ALGAPI
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
This selects the crypto driver for the UX500_CRYP hardware. It supports
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 1628ae7a1467..95fb694a2667 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -30,6 +30,7 @@
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/platform_data/crypto-ux500.h>
@@ -828,10 +829,10 @@ static int get_nents(struct scatterlist *sg, int nbytes)
return nents;
}
-static int ablk_dma_crypt(struct ablkcipher_request *areq)
+static int ablk_dma_crypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
struct cryp_device_data *device_data;
int bytes_written = 0;
@@ -840,8 +841,8 @@ static int ablk_dma_crypt(struct ablkcipher_request *areq)
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- ctx->datalen = areq->nbytes;
- ctx->outlen = areq->nbytes;
+ ctx->datalen = areq->cryptlen;
+ ctx->outlen = areq->cryptlen;
ret = cryp_get_device_data(ctx, &device_data);
if (ret)
@@ -885,11 +886,11 @@ out:
return 0;
}
-static int ablk_crypt(struct ablkcipher_request *areq)
+static int ablk_crypt(struct skcipher_request *areq)
{
- struct ablkcipher_walk walk;
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct skcipher_walk walk;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
struct cryp_device_data *device_data;
unsigned long src_paddr;
unsigned long dst_paddr;
@@ -902,21 +903,20 @@ static int ablk_crypt(struct ablkcipher_request *areq)
if (ret)
goto out;
- ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
- ret = ablkcipher_walk_phys(areq, &walk);
+ ret = skcipher_walk_async(&walk, areq);
if (ret) {
- pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ pr_err(DEV_DBG_NAME "[%s]: skcipher_walk_async() failed!",
__func__);
goto out;
}
while ((nbytes = walk.nbytes) > 0) {
ctx->iv = walk.iv;
- src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ src_paddr = (page_to_phys(walk.src.phys.page) + walk.src.phys.offset);
ctx->indata = phys_to_virt(src_paddr);
- dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ dst_paddr = (page_to_phys(walk.dst.phys.page) + walk.dst.phys.offset);
ctx->outdata = phys_to_virt(dst_paddr);
ctx->datalen = nbytes - (nbytes % ctx->blocksize);
@@ -926,11 +926,10 @@ static int ablk_crypt(struct ablkcipher_request *areq)
goto out;
nbytes -= ctx->datalen;
- ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ ret = skcipher_walk_done(&walk, nbytes);
if (ret)
goto out;
}
- ablkcipher_walk_complete(&walk);
out:
/* Release the device */
@@ -948,10 +947,10 @@ out:
return ret;
}
-static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -983,15 +982,15 @@ static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int des_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1002,15 +1001,15 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+static int des3_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1021,10 +1020,10 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int cryp_blk_encrypt(struct ablkcipher_request *areq)
+static int cryp_blk_encrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -1039,10 +1038,10 @@ static int cryp_blk_encrypt(struct ablkcipher_request *areq)
return ablk_crypt(areq);
}
-static int cryp_blk_decrypt(struct ablkcipher_request *areq)
+static int cryp_blk_decrypt(struct skcipher_request *areq)
{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
- struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -1058,19 +1057,19 @@ static int cryp_blk_decrypt(struct ablkcipher_request *areq)
struct cryp_algo_template {
enum cryp_algo_mode algomode;
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
};
-static int cryp_cra_init(struct crypto_tfm *tfm)
+static int cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct cryp_algo_template *cryp_alg = container_of(alg,
struct cryp_algo_template,
- crypto);
+ skcipher);
ctx->config.algomode = cryp_alg->algomode;
- ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+ ctx->blocksize = crypto_skcipher_blocksize(tfm);
return 0;
}
@@ -1078,205 +1077,147 @@ static int cryp_cra_init(struct crypto_tfm *tfm)
static struct cryp_algo_template cryp_algs[] = {
{
.algomode = CRYP_ALGO_AES_ECB,
- .crypto = {
- .cra_name = "aes",
- .cra_driver_name = "aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
- },
- {
- .algomode = CRYP_ALGO_AES_ECB,
- .crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_AES_CBC,
- .crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
+ .ivsize = AES_BLOCK_SIZE,
}
},
{
.algomode = CRYP_ALGO_AES_CTR,
- .crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
}
},
{
.algomode = CRYP_ALGO_DES_ECB,
- .crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_TDES_ECB,
- .crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_DES_CBC,
- .crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .init = cryp_init_tfm,
}
},
{
.algomode = CRYP_ALGO_TDES_CBC,
- .crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
- }
+ .skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-ux500",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cryp_ctx),
+ .base.cra_alignmask = 3,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_skcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .init = cryp_init_tfm,
}
}
};
@@ -1293,18 +1234,18 @@ static int cryp_algs_register_all(void)
pr_debug("[%s]", __func__);
for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
- ret = crypto_register_alg(&cryp_algs[i].crypto);
+ ret = crypto_register_skcipher(&cryp_algs[i].skcipher);
if (ret) {
count = i;
pr_err("[%s] alg registration failed",
- cryp_algs[i].crypto.cra_driver_name);
+ cryp_algs[i].skcipher.base.cra_driver_name);
goto unreg;
}
}
return 0;
unreg:
for (i = 0; i < count; i++)
- crypto_unregister_alg(&cryp_algs[i].crypto);
+ crypto_unregister_skcipher(&cryp_algs[i].skcipher);
return ret;
}
@@ -1318,7 +1259,7 @@ static void cryp_algs_unregister_all(void)
pr_debug(DEV_DBG_NAME " [%s]", __func__);
for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
- crypto_unregister_alg(&cryp_algs[i].crypto);
+ crypto_unregister_skcipher(&cryp_algs[i].skcipher);
}
static int ux500_cryp_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index c172a6953477..c24f2db8d5e8 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -140,7 +140,6 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
{
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *channel = NULL;
- dma_cookie_t cookie;
if (direction != DMA_TO_DEVICE) {
dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
@@ -176,7 +175,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
desc->callback = hash_dma_callback;
desc->callback_param = ctx;
- cookie = dmaengine_submit(desc);
+ dmaengine_submit(desc);
dma_async_issue_pending(channel);
return 0;
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 01b625e4e5ad..fb294174e408 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -3,7 +3,7 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver"
depends on VIRTIO
select CRYPTO_AEAD
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
default m
help
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 42d19205166b..4b71e80951b7 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -8,6 +8,7 @@
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <crypto/scatterwalk.h>
#include <linux/atomic.h>
@@ -16,10 +17,10 @@
#include "virtio_crypto_common.h"
-struct virtio_crypto_ablkcipher_ctx {
+struct virtio_crypto_skcipher_ctx {
struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
@@ -30,8 +31,8 @@ struct virtio_crypto_sym_request {
/* Cipher or aead */
uint32_t type;
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
- struct ablkcipher_request *ablkcipher_req;
+ struct virtio_crypto_skcipher_ctx *skcipher_ctx;
+ struct skcipher_request *skcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
@@ -41,7 +42,7 @@ struct virtio_crypto_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct crypto_alg algo;
+ struct skcipher_alg algo;
};
/*
@@ -49,9 +50,9 @@ struct virtio_crypto_algo {
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static void virtio_crypto_ablkcipher_finalize_req(
+static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+ struct skcipher_request *req,
int err);
static void virtio_crypto_dataq_sym_callback
@@ -59,7 +60,7 @@ static void virtio_crypto_dataq_sym_callback
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *ablk_req;
int error;
/* Finish the encrypt or decrypt process */
@@ -79,8 +80,8 @@ static void virtio_crypto_dataq_sym_callback
error = -EIO;
break;
}
- ablk_req = vc_sym_req->ablkcipher_req;
- virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+ ablk_req = vc_sym_req->skcipher_req;
+ virtio_crypto_skcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
}
@@ -105,15 +106,13 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
break;
default:
- pr_err("virtio_crypto: Unsupported key length: %d\n",
- key_len);
return -EINVAL;
}
return 0;
}
-static int virtio_crypto_alg_ablkcipher_init_session(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_init_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
uint32_t alg, const uint8_t *key,
unsigned int keylen,
int encrypt)
@@ -202,8 +201,8 @@ static int virtio_crypto_alg_ablkcipher_init_session(
return 0;
}
-static int virtio_crypto_alg_ablkcipher_close_session(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_close_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
@@ -263,8 +262,8 @@ static int virtio_crypto_alg_ablkcipher_close_session(
return 0;
}
-static int virtio_crypto_alg_ablkcipher_init_sessions(
- struct virtio_crypto_ablkcipher_ctx *ctx,
+static int virtio_crypto_alg_skcipher_init_sessions(
+ struct virtio_crypto_skcipher_ctx *ctx,
const uint8_t *key, unsigned int keylen)
{
uint32_t alg;
@@ -280,30 +279,30 @@ static int virtio_crypto_alg_ablkcipher_init_sessions(
goto bad_key;
/* Create encryption session */
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 1);
if (ret)
return ret;
/* Create decryption session */
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
alg, key, keylen, 0);
if (ret) {
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
return ret;
}
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/* Note: kernel crypto API realization */
-static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
const uint8_t *key,
unsigned int keylen)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
uint32_t alg;
int ret;
@@ -325,11 +324,11 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
ctx->vcrypto = vcrypto;
} else {
/* Rekeying, we should close the created sessions previously */
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
}
- ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+ ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
if (ret) {
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
@@ -341,14 +340,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
static int
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ struct skcipher_request *req,
struct data_queue *data_vq)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
@@ -361,7 +360,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int sg_total;
uint8_t *iv;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -398,7 +397,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
req_data->u.sym_req.u.cipher.para.src_data_len =
- cpu_to_le32(req->nbytes);
+ cpu_to_le32(req->cryptlen);
dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
if (unlikely(dst_len > U32_MAX)) {
@@ -408,9 +407,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
}
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
- req->nbytes, dst_len);
+ req->cryptlen, dst_len);
- if (unlikely(req->nbytes + dst_len + ivsize +
+ if (unlikely(req->cryptlen + dst_len + ivsize +
sizeof(vc_req->status) > vcrypto->max_size)) {
pr_err("virtio_crypto: The length is too big\n");
err = -EINVAL;
@@ -436,7 +435,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
err = -ENOMEM;
goto free;
}
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
+ if (!vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
vc_sym_req->iv = iv;
@@ -473,83 +477,93 @@ free:
return err;
}
-static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->ablkcipher_ctx = ctx;
- vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = true;
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
-static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->ablkcipher_ctx = ctx;
- vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = false;
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
}
-static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
+ ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
-static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (!ctx->vcrypto)
return;
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
}
-int virtio_crypto_ablkcipher_crypt_req(
+int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq)
{
- struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
+ struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
struct virtio_crypto_sym_request *vc_sym_req =
- ablkcipher_request_ctx(req);
+ skcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
- ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
+ ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
@@ -558,12 +572,16 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0;
}
-static void virtio_crypto_ablkcipher_finalize_req(
+static void virtio_crypto_skcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
- struct ablkcipher_request *req,
+ struct skcipher_request *req,
int err)
{
- crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ if (vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
@@ -573,27 +591,21 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
.algo = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "virtio_crypto_aes_cbc",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = virtio_crypto_skcipher_init,
+ .exit = virtio_crypto_skcipher_exit,
+ .setkey = virtio_crypto_skcipher_setkey,
+ .decrypt = virtio_crypto_skcipher_decrypt,
+ .encrypt = virtio_crypto_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
} };
@@ -613,14 +625,14 @@ int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 0) {
- ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
- virtio_crypto_algs[i].algo.cra_name);
+ virtio_crypto_algs[i].algo.base.cra_name);
}
unlock:
@@ -644,7 +656,7 @@ void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 1)
- crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+ crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
virtio_crypto_algs[i].active_devs--;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 1c6e00da5a29..a24f85c589e7 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -112,7 +112,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node,
uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
-int virtio_crypto_ablkcipher_crypt_req(
+int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq);
void
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index cab32cfec9c4..709670d2b553 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-TARGET := linux-ppc64le
+override flavour := linux-ppc64le
else
-TARGET := linux-ppc64
+override flavour := linux-ppc64
endif
quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
targets += aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index f33c73e4af41..3b6c06f07326 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -32,19 +32,36 @@ config DEV_DAX_PMEM
Say M if unsure
+config DEV_DAX_HMEM
+ tristate "HMEM DAX: direct access to 'specific purpose' memory"
+ depends on EFI_SOFT_RESERVE
+ default DEV_DAX
+ help
+ EFI 2.8 platforms, and others, may advertise 'specific purpose'
+ memory. For example, a high bandwidth memory pool. The
+ indication from platform firmware is meant to reserve the
+ memory from typical usage by default. This driver creates
+ device-dax instances for these memory ranges, and that also
+ enables the possibility to assign them to the DEV_DAX_KMEM
+ driver to override the reservation and add them to kernel
+ "System RAM" pool.
+
+ Say M if unsure.
+
config DEV_DAX_KMEM
tristate "KMEM DAX: volatile-use of persistent memory"
default DEV_DAX
depends on DEV_DAX
depends on MEMORY_HOTPLUG # for add_memory() and friends
help
- Support access to persistent memory as if it were RAM. This
- allows easier use of persistent memory by unmodified
- applications.
+ Support access to persistent, or other performance
+ differentiated memory as if it were System RAM. This allows
+ easier use of persistent memory by unmodified applications, or
+ adds core kernel memory services to heterogeneous memory types
+ (HMEM) marked "reserved" by platform firmware.
To use this feature, a DAX device must be unbound from the
- device_dax driver (PMEM DAX) and bound to this kmem driver
- on each boot.
+ device_dax driver and bound to this kmem driver on each boot.
Say N if unsure.
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 81f7d54dadfb..80065b38b3c4 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -2,9 +2,11 @@
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
+obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
+dax_hmem-y := hmem.o
obj-y += pmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 8fafbeab510a..eccdda1f7b71 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -227,7 +227,7 @@ static void dax_region_unregister(void *region)
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align,
- unsigned long pfn_flags)
+ unsigned long long pfn_flags)
{
struct dax_region *dax_region;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 8619e3299943..9e4eba67e8b9 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -11,7 +11,7 @@ struct dax_region;
void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align,
- unsigned long flags);
+ unsigned long long flags);
enum dev_dax_subsys {
DEV_DAX_BUS,
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 6ccca3b890d6..3107ce80e809 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -32,7 +32,7 @@ struct dax_region {
struct device *dev;
unsigned int align;
struct resource res;
- unsigned long pfn_flags;
+ unsigned long long pfn_flags;
};
/**
diff --git a/drivers/dax/hmem.c b/drivers/dax/hmem.c
new file mode 100644
index 000000000000..fe7214daf62e
--- /dev/null
+++ b/drivers/dax/hmem.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/platform_device.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include "bus.h"
+
+static int dax_hmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pagemap pgmap = { };
+ struct dax_region *dax_region;
+ struct memregion_info *mri;
+ struct dev_dax *dev_dax;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ mri = dev->platform_data;
+ memcpy(&pgmap.res, res, sizeof(*res));
+
+ dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
+ PMD_SIZE, PFN_DEV|PFN_MAP);
+ if (!dax_region)
+ return -ENOMEM;
+
+ dev_dax = devm_create_dev_dax(dax_region, 0, &pgmap);
+ if (IS_ERR(dev_dax))
+ return PTR_ERR(dev_dax);
+
+ /* child dev_dax instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
+ return 0;
+}
+
+static int dax_hmem_remove(struct platform_device *pdev)
+{
+ /* devm handles teardown */
+ return 0;
+}
+
+static struct platform_driver dax_hmem_driver = {
+ .probe = dax_hmem_probe,
+ .remove = dax_hmem_remove,
+ .driver = {
+ .name = "hmem",
+ },
+};
+
+module_platform_driver(dax_hmem_driver);
+
+MODULE_ALIAS("platform:hmem*");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 446490c9d635..f840e61e5a27 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -160,6 +160,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
int lev, prev_lev, ret = 0;
unsigned long cur_time;
+ lockdep_assert_held(&devfreq->lock);
cur_time = jiffies;
/* Immediately exit if previous_freq is not initialized yet. */
@@ -409,6 +410,9 @@ static void devfreq_monitor(struct work_struct *work)
*/
void devfreq_monitor_start(struct devfreq *devfreq)
{
+ if (devfreq->governor->interrupt_driven)
+ return;
+
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
@@ -426,6 +430,9 @@ EXPORT_SYMBOL(devfreq_monitor_start);
*/
void devfreq_monitor_stop(struct devfreq *devfreq)
{
+ if (devfreq->governor->interrupt_driven)
+ return;
+
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -453,6 +460,10 @@ void devfreq_monitor_suspend(struct devfreq *devfreq)
devfreq_update_status(devfreq, devfreq->previous_freq);
devfreq->stop_polling = true;
mutex_unlock(&devfreq->lock);
+
+ if (devfreq->governor->interrupt_driven)
+ return;
+
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_suspend);
@@ -473,11 +484,15 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
if (!devfreq->stop_polling)
goto out;
+ if (devfreq->governor->interrupt_driven)
+ goto out_update;
+
if (!delayed_work_pending(&devfreq->work) &&
devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
+out_update:
devfreq->last_stat_updated = jiffies;
devfreq->stop_polling = false;
@@ -509,6 +524,9 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
if (devfreq->stop_polling)
goto out;
+ if (devfreq->governor->interrupt_driven)
+ goto out;
+
/* if new delay is zero, stop polling */
if (!new_delay) {
mutex_unlock(&devfreq->lock);
@@ -625,7 +643,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq = find_device_devfreq(dev);
mutex_unlock(&devfreq_list_lock);
if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+ dev_err(dev, "%s: devfreq device already exists!\n",
__func__);
err = -EINVAL;
goto err_out;
@@ -1195,7 +1213,7 @@ static ssize_t available_governors_show(struct device *d,
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor->immutable) {
+ if (df->governor && df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
"%s ", df->governor_name);
/*
@@ -1397,12 +1415,17 @@ static ssize_t trans_stat_show(struct device *dev,
int i, j;
unsigned int max_state = devfreq->profile->max_state;
- if (!devfreq->stop_polling &&
- devfreq_update_status(devfreq, devfreq->previous_freq))
- return 0;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling &&
+ devfreq_update_status(devfreq, devfreq->previous_freq)) {
+ mutex_unlock(&devfreq->lock);
+ return 0;
+ }
+ mutex_unlock(&devfreq->lock);
+
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 87b42055e6bc..85c7a77bf3f0 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -673,7 +673,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
if (IS_ERR(edev[i])) {
- ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
return PTR_ERR(edev[i]);
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index bbe5ff9fcecf..dc7533ccc3db 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -31,6 +31,8 @@
* @name: Governor's name
* @immutable: Immutable flag for governor. If the value is 1,
* this govenror is never changeable to other governor.
+ * @interrupt_driven: Devfreq core won't schedule polling work for this
+ * governor if value is set to 1.
* @get_target_freq: Returns desired operating frequency for the device.
* Basically, get_target_freq will run
* devfreq_dev_profile.get_dev_status() to get the
@@ -49,6 +51,7 @@ struct devfreq_governor {
const char name[DEVFREQ_NAME_LEN];
const unsigned int immutable;
+ const unsigned int interrupt_driven;
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
int (*event_handler)(struct devfreq *devfreq,
unsigned int event, void *data);
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index a6ba75f4106d..0b65f89d74d5 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -11,11 +11,13 @@
#include <linux/devfreq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
#include "governor.h"
@@ -33,6 +35,8 @@
#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
#define ACTMON_DEV_CTRL_ENB BIT(31)
+#define ACTMON_DEV_CTRL_STOP 0x00000000
+
#define ACTMON_DEV_UPPER_WMARK 0x4
#define ACTMON_DEV_LOWER_WMARK 0x8
#define ACTMON_DEV_INIT_AVG 0xc
@@ -68,6 +72,8 @@
#define KHZ 1000
+#define KHZ_MAX (ULONG_MAX / KHZ)
+
/* Assume that the bus is saturated if the utilization is 25% */
#define BUS_SATURATION_RATIO 25
@@ -90,9 +96,10 @@ struct tegra_devfreq_device_config {
unsigned int boost_down_threshold;
/*
- * Threshold of activity (cycles) below which the CPU frequency isn't
- * to be taken into account. This is to avoid increasing the EMC
- * frequency when the CPU is very busy but not accessing the bus often.
+ * Threshold of activity (cycles translated to kHz) below which the
+ * CPU frequency isn't to be taken into account. This is to avoid
+ * increasing the EMC frequency when the CPU is very busy but not
+ * accessing the bus often.
*/
u32 avg_dependency_threshold;
};
@@ -102,7 +109,7 @@ enum tegra_actmon_device {
MCCPU,
};
-static struct tegra_devfreq_device_config actmon_device_configs[] = {
+static const struct tegra_devfreq_device_config actmon_device_configs[] = {
{
/* MCALL: All memory accesses (including from the CPUs) */
.offset = 0x1c0,
@@ -117,10 +124,10 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
.offset = 0x200,
.irq_mask = 1 << 25,
.boost_up_coeff = 800,
- .boost_down_coeff = 90,
+ .boost_down_coeff = 40,
.boost_up_threshold = 27,
.boost_down_threshold = 10,
- .avg_dependency_threshold = 50000,
+ .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
},
};
@@ -156,11 +163,16 @@ struct tegra_devfreq {
struct clk *emc_clock;
unsigned long max_freq;
unsigned long cur_freq;
- struct notifier_block rate_change_nb;
+ struct notifier_block clk_rate_change_nb;
+
+ struct delayed_work cpufreq_update_work;
+ struct notifier_block cpu_rate_change_nb;
struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
- int irq;
+ unsigned int irq;
+
+ bool started;
};
struct tegra_actmon_emc_ratio {
@@ -168,8 +180,8 @@ struct tegra_actmon_emc_ratio {
unsigned long emc_freq;
};
-static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
- { 1400000, ULONG_MAX },
+static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+ { 1400000, KHZ_MAX },
{ 1200000, 750000 },
{ 1100000, 600000 },
{ 1000000, 500000 },
@@ -199,18 +211,26 @@ static void device_writel(struct tegra_devfreq_device *dev, u32 val,
writel_relaxed(val, dev->regs + offset);
}
-static unsigned long do_percent(unsigned long val, unsigned int pct)
+static unsigned long do_percent(unsigned long long val, unsigned int pct)
{
- return val * pct / 100;
+ val = val * pct;
+ do_div(val, 100);
+
+ /*
+ * High freq + high boosting percent + large polling interval are
+ * resulting in integer overflow when watermarks are calculated.
+ */
+ return min_t(u64, val, U32_MAX);
}
static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- u32 avg = dev->avg_count;
u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
- u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+ u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
+ u32 avg;
+ avg = min(dev->avg_count, U32_MAX - band);
device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
avg = max(dev->avg_count, band);
@@ -220,7 +240,7 @@ static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
ACTMON_DEV_UPPER_WMARK);
@@ -229,12 +249,6 @@ static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
ACTMON_DEV_LOWER_WMARK);
}
-static void actmon_write_barrier(struct tegra_devfreq *tegra)
-{
- /* ensure the update has reached the ACTMON */
- readl(tegra->regs + ACTMON_GLB_STATUS);
-}
-
static void actmon_isr_device(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
@@ -256,10 +270,10 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- if (dev->boost_freq >= tegra->max_freq)
+ if (dev->boost_freq >= tegra->max_freq) {
+ dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
dev->boost_freq = tegra->max_freq;
- else
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+ }
} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
/*
* new_boost = old_boost * down_coef
@@ -270,31 +284,22 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
- if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
- dev->boost_freq = 0;
- else
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- }
-
- if (dev->config->avg_dependency_threshold) {
- if (dev->avg_count >= dev->config->avg_dependency_threshold)
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- else if (dev->boost_freq == 0)
+ if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ dev->boost_freq = 0;
+ }
}
device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
-
- actmon_write_barrier(tegra);
}
static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned long cpu_freq)
{
unsigned int i;
- struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+ const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
if (cpu_freq >= ratio->cpu_freq) {
@@ -308,25 +313,37 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
return 0;
}
+static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
+{
+ unsigned int avg_sustain_coef;
+ unsigned long target_freq;
+
+ target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
+ avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+ target_freq = do_percent(target_freq, avg_sustain_coef);
+
+ return target_freq;
+}
+
static void actmon_update_target(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
unsigned long cpu_freq = 0;
unsigned long static_cpu_emc_freq = 0;
- unsigned int avg_sustain_coef;
- if (dev->config->avg_dependency_threshold) {
- cpu_freq = cpufreq_get(0);
- static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- }
+ dev->target_freq = actmon_device_target_freq(tegra, dev);
- dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
- avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
- dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
- dev->target_freq += dev->boost_freq;
+ if (dev->config->avg_dependency_threshold &&
+ dev->config->avg_dependency_threshold <= dev->target_freq) {
+ cpu_freq = cpufreq_quick_get(0);
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- if (dev->avg_count >= dev->config->avg_dependency_threshold)
+ dev->target_freq += dev->boost_freq;
dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+ } else {
+ dev->target_freq += dev->boost_freq;
+ }
}
static irqreturn_t actmon_thread_isr(int irq, void *data)
@@ -354,8 +371,8 @@ static irqreturn_t actmon_thread_isr(int irq, void *data)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
- unsigned long action, void *ptr)
+static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
{
struct clk_notifier_data *data = ptr;
struct tegra_devfreq *tegra;
@@ -365,7 +382,7 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
if (action != POST_RATE_CHANGE)
return NOTIFY_OK;
- tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
+ tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
tegra->cur_freq = data->new_rate / KHZ;
@@ -375,7 +392,79 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
tegra_devfreq_update_wmark(tegra, dev);
}
- actmon_write_barrier(tegra);
+ return NOTIFY_OK;
+}
+
+static void tegra_actmon_delayed_update(struct work_struct *work)
+{
+ struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
+ cpufreq_update_work.work);
+
+ mutex_lock(&tegra->devfreq->lock);
+ update_devfreq(tegra->devfreq);
+ mutex_unlock(&tegra->devfreq->lock);
+}
+
+static unsigned long
+tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
+ unsigned int cpu_freq)
+{
+ struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
+ unsigned long static_cpu_emc_freq, dev_freq;
+
+ dev_freq = actmon_device_target_freq(tegra, actmon_dev);
+
+ /* check whether CPU's freq is taken into account at all */
+ if (dev_freq < actmon_dev->config->avg_dependency_threshold)
+ return 0;
+
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+
+ if (dev_freq >= static_cpu_emc_freq)
+ return 0;
+
+ return static_cpu_emc_freq;
+}
+
+static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct cpufreq_freqs *freqs = ptr;
+ struct tegra_devfreq *tegra;
+ unsigned long old, new, delay;
+
+ if (action != CPUFREQ_POSTCHANGE)
+ return NOTIFY_OK;
+
+ tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
+
+ /*
+ * Quickly check whether CPU frequency should be taken into account
+ * at all, without blocking CPUFreq's core.
+ */
+ if (mutex_trylock(&tegra->devfreq->lock)) {
+ old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
+ new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
+ mutex_unlock(&tegra->devfreq->lock);
+
+ /*
+ * If CPU's frequency shouldn't be taken into account at
+ * the moment, then there is no need to update the devfreq's
+ * state because ISR will re-check CPU's frequency on the
+ * next interrupt.
+ */
+ if (old == new)
+ return NOTIFY_OK;
+ }
+
+ /*
+ * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
+ * to allow asynchronous notifications. This means we can't block
+ * here for too long, otherwise CPUFreq's core will complain with a
+ * warning splat.
+ */
+ delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
+ schedule_delayed_work(&tegra->cpufreq_update_work, delay);
return NOTIFY_OK;
}
@@ -385,9 +474,12 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
{
u32 val = 0;
+ /* reset boosting on governor's restart */
+ dev->boost_freq = 0;
+
dev->target_freq = tegra->cur_freq;
- dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
tegra_devfreq_update_avg_wmark(tegra, dev);
@@ -405,45 +497,116 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_ENB;
device_writel(dev, val, ACTMON_DEV_CTRL);
}
-static void tegra_actmon_start(struct tegra_devfreq *tegra)
+static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
{
+ struct tegra_devfreq_device *dev = tegra->devices;
unsigned int i;
- disable_irq(tegra->irq);
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
+ device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
+ device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
+ ACTMON_DEV_INTR_STATUS);
+ }
+}
- actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+static int tegra_actmon_resume(struct tegra_devfreq *tegra)
+{
+ unsigned int i;
+ int err;
+
+ if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+ return 0;
+
+ actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
ACTMON_GLB_PERIOD_CTRL);
+ /*
+ * CLK notifications are needed in order to reconfigure the upper
+ * consecutive watermark in accordance to the actual clock rate
+ * to avoid unnecessary upper interrupts.
+ */
+ err = clk_notifier_register(tegra->emc_clock,
+ &tegra->clk_rate_change_nb);
+ if (err) {
+ dev_err(tegra->devfreq->dev.parent,
+ "Failed to register rate change notifier\n");
+ return err;
+ }
+
+ tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
tegra_actmon_configure_device(tegra, &tegra->devices[i]);
- actmon_write_barrier(tegra);
+ /*
+ * We are estimating CPU's memory bandwidth requirement based on
+ * amount of memory accesses and system's load, judging by CPU's
+ * frequency. We also don't want to receive events about CPU's
+ * frequency transaction when governor is stopped, hence notifier
+ * is registered dynamically.
+ */
+ err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (err) {
+ dev_err(tegra->devfreq->dev.parent,
+ "Failed to register rate change notifier: %d\n", err);
+ goto err_stop;
+ }
enable_irq(tegra->irq);
+
+ return 0;
+
+err_stop:
+ tegra_actmon_stop_devices(tegra);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+
+ return err;
}
-static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+static int tegra_actmon_start(struct tegra_devfreq *tegra)
{
- unsigned int i;
+ int ret = 0;
- disable_irq(tegra->irq);
+ if (!tegra->started) {
+ tegra->started = true;
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- device_writel(&tegra->devices[i], 0x00000000, ACTMON_DEV_CTRL);
- device_writel(&tegra->devices[i], ACTMON_INTR_STATUS_CLEAR,
- ACTMON_DEV_INTR_STATUS);
+ ret = tegra_actmon_resume(tegra);
+ if (ret)
+ tegra->started = false;
}
- actmon_write_barrier(tegra);
+ return ret;
+}
- enable_irq(tegra->irq);
+static void tegra_actmon_pause(struct tegra_devfreq *tegra)
+{
+ if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+ return;
+
+ disable_irq(tegra->irq);
+
+ cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ cancel_delayed_work_sync(&tegra->cpufreq_update_work);
+
+ tegra_actmon_stop_devices(tegra);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+}
+
+static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+{
+ tegra_actmon_pause(tegra);
+ tegra->started = false;
}
static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
@@ -463,7 +626,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
rate = dev_pm_opp_get_freq(opp);
dev_pm_opp_put(opp);
- err = clk_set_min_rate(tegra->emc_clock, rate);
+ err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
if (err)
return err;
@@ -492,7 +655,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->private_data = tegra;
/* The below are to be used by the other governors */
- stat->current_frequency = cur_freq * KHZ;
+ stat->current_frequency = cur_freq;
actmon_dev = &tegra->devices[MCALL];
@@ -503,7 +666,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->busy_time *= 100 / BUS_SATURATION_RATIO;
/* Number of cycles in a sampling period */
- stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
+ stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
stat->busy_time = min(stat->busy_time, stat->total_time);
@@ -511,7 +674,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile tegra_devfreq_profile = {
- .polling_ms = 0,
+ .polling_ms = ACTMON_SAMPLING_PERIOD,
.target = tegra_devfreq_target,
.get_dev_status = tegra_devfreq_get_dev_status,
};
@@ -542,7 +705,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
target_freq = max(target_freq, dev->target_freq);
}
- *freq = target_freq * KHZ;
+ *freq = target_freq;
return 0;
}
@@ -551,11 +714,19 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
+ unsigned int *new_delay = data;
+ int ret = 0;
+
+ /*
+ * Couple devfreq-device with the governor early because it is
+ * needed at the moment of governor's start (used by ISR).
+ */
+ tegra->devfreq = devfreq;
switch (event) {
case DEVFREQ_GOV_START:
devfreq_monitor_start(devfreq);
- tegra_actmon_start(tegra);
+ ret = tegra_actmon_start(tegra);
break;
case DEVFREQ_GOV_STOP:
@@ -563,6 +734,21 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
+ case DEVFREQ_GOV_INTERVAL:
+ /*
+ * ACTMON hardware supports up to 256 milliseconds for the
+ * sampling period.
+ */
+ if (*new_delay > 256) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tegra_actmon_pause(tegra);
+ devfreq_interval_update(devfreq, new_delay);
+ ret = tegra_actmon_resume(tegra);
+ break;
+
case DEVFREQ_GOV_SUSPEND:
tegra_actmon_stop(tegra);
devfreq_monitor_suspend(devfreq);
@@ -570,11 +756,11 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_RESUME:
devfreq_monitor_resume(devfreq);
- tegra_actmon_start(tegra);
+ ret = tegra_actmon_start(tegra);
break;
}
- return 0;
+ return ret;
}
static struct devfreq_governor tegra_devfreq_governor = {
@@ -582,14 +768,16 @@ static struct devfreq_governor tegra_devfreq_governor = {
.get_target_freq = tegra_governor_get_target,
.event_handler = tegra_governor_event_handler,
.immutable = true,
+ .interrupt_driven = true,
};
static int tegra_devfreq_probe(struct platform_device *pdev)
{
- struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
+ struct tegra_devfreq *tegra;
+ struct devfreq *devfreq;
unsigned int i;
- unsigned long rate;
+ long rate;
int err;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
@@ -618,12 +806,22 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return PTR_ERR(tegra->emc_clock);
}
- tegra->irq = platform_get_irq(pdev, 0);
- if (tegra->irq < 0) {
- err = tegra->irq;
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
return err;
}
+ tegra->irq = err;
+
+ irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
+
+ err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
+ actmon_thread_isr, IRQF_ONESHOT,
+ "tegra-devfreq", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
+ return err;
+ }
reset_control_assert(tegra->reset);
@@ -636,8 +834,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
reset_control_deassert(tegra->reset);
- tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
- tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+ rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+ if (rate < 0) {
+ dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
+ return rate;
+ }
+
+ tegra->max_freq = rate / KHZ;
for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
dev = tegra->devices + i;
@@ -648,7 +851,14 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
rate = clk_round_rate(tegra->emc_clock, rate);
- err = dev_pm_opp_add(&pdev->dev, rate, 0);
+ if (rate < 0) {
+ dev_err(&pdev->dev,
+ "Failed to round clock rate: %ld\n", rate);
+ err = rate;
+ goto remove_opps;
+ }
+
+ err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
goto remove_opps;
@@ -657,49 +867,33 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tegra);
- tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
- err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to register rate change notifier\n");
- goto remove_opps;
- }
+ tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
+ tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
+
+ INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
+ tegra_actmon_delayed_update);
err = devfreq_add_governor(&tegra_devfreq_governor);
if (err) {
dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
- goto unreg_notifier;
+ goto remove_opps;
}
tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
- tegra->devfreq = devfreq_add_device(&pdev->dev,
- &tegra_devfreq_profile,
- "tegra_actmon",
- NULL);
- if (IS_ERR(tegra->devfreq)) {
- err = PTR_ERR(tegra->devfreq);
- goto remove_governor;
- }
+ tegra_devfreq_profile.initial_freq /= KHZ;
- err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
- actmon_thread_isr, IRQF_ONESHOT,
- "tegra-devfreq", tegra);
- if (err) {
- dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
- goto remove_devfreq;
+ devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
+ "tegra_actmon", NULL);
+ if (IS_ERR(devfreq)) {
+ err = PTR_ERR(devfreq);
+ goto remove_governor;
}
return 0;
-remove_devfreq:
- devfreq_remove_device(tegra->devfreq);
-
remove_governor:
devfreq_remove_governor(&tegra_devfreq_governor);
-unreg_notifier:
- clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
-
remove_opps:
dev_pm_opp_remove_all_dynamic(&pdev->dev);
@@ -716,7 +910,6 @@ static int tegra_devfreq_remove(struct platform_device *pdev)
devfreq_remove_device(tegra->devfreq);
devfreq_remove_governor(&tegra_devfreq_governor);
- clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
dev_pm_opp_remove_all_dynamic(&pdev->dev);
reset_control_reset(tegra->reset);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index fbda4b876afd..e91cf1147a4e 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
+#include <linux/mfd/altera-sysmgr.h>
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of_address.h>
@@ -275,7 +276,6 @@ release:
return ret;
}
-static int socfpga_is_a10(void);
static int altr_sdram_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
@@ -399,7 +399,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
goto err;
/* Only the Arria10 has separate IRQs */
- if (socfpga_is_a10()) {
+ if (of_machine_is_compatible("altr,socfpga-arria10")) {
/* Arria10 specific initialization */
res = a10_init(mc_vbase);
if (res < 0)
@@ -502,68 +502,6 @@ module_platform_driver(altr_sdram_edac_driver);
#endif /* CONFIG_EDAC_ALTERA_SDRAM */
-/**************** Stratix 10 EDAC Memory Controller Functions ************/
-
-/**
- * s10_protected_reg_write
- * Write to a protected SMC register.
- * @context: Not used.
- * @reg: Address of register
- * @value: Value to write
- * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
- * INTEL_SIP_SMC_REG_ERROR on error
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
- */
-static int s10_protected_reg_write(void *context, unsigned int reg,
- unsigned int val)
-{
- struct arm_smccc_res result;
- unsigned long offset = (unsigned long)context;
-
- arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, offset + reg, val, 0, 0,
- 0, 0, 0, &result);
-
- return (int)result.a0;
-}
-
-/**
- * s10_protected_reg_read
- * Read the status of a protected SMC register
- * @context: Not used.
- * @reg: Address of register
- * @value: Value read.
- * Return: INTEL_SIP_SMC_STATUS_OK (0) on success
- * INTEL_SIP_SMC_REG_ERROR on error
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION if not supported
- */
-static int s10_protected_reg_read(void *context, unsigned int reg,
- unsigned int *val)
-{
- struct arm_smccc_res result;
- unsigned long offset = (unsigned long)context;
-
- arm_smccc_smc(INTEL_SIP_SMC_REG_READ, offset + reg, 0, 0, 0,
- 0, 0, 0, &result);
-
- *val = (unsigned int)result.a1;
-
- return (int)result.a0;
-}
-
-static const struct regmap_config s10_sdram_regmap_cfg = {
- .name = "s10_ddr",
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = 0xffd12228,
- .reg_read = s10_protected_reg_read,
- .reg_write = s10_protected_reg_write,
- .use_single_read = true,
- .use_single_write = true,
-};
-
-/************** </Stratix10 EDAC Memory Controller Functions> ***********/
-
/************************* EDAC Parent Probe *************************/
static const struct of_device_id altr_edac_device_of_match[];
@@ -1008,16 +946,6 @@ static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
return ret;
}
-static int socfpga_is_a10(void)
-{
- return of_machine_is_compatible("altr,socfpga-arria10");
-}
-
-static int socfpga_is_s10(void)
-{
- return of_machine_is_compatible("altr,socfpga-stratix10");
-}
-
static __init int __maybe_unused
altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
u32 ecc_ctrl_en_mask, bool dual_port)
@@ -1033,34 +961,10 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
/* Get the ECC Manager - parent of the device EDACs */
np_eccmgr = of_get_parent(np);
- if (socfpga_is_a10()) {
- ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
- "altr,sysmgr-syscon");
- } else {
- struct device_node *sysmgr_np;
- struct resource res;
- uintptr_t base;
-
- sysmgr_np = of_parse_phandle(np_eccmgr,
- "altr,sysmgr-syscon", 0);
- if (!sysmgr_np) {
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Unable to find altr,sysmgr-syscon\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(sysmgr_np, 0, &res)) {
- of_node_put(sysmgr_np);
- return -ENOMEM;
- }
+ ecc_mgr_map =
+ altr_sysmgr_regmap_lookup_by_phandle(np_eccmgr,
+ "altr,sysmgr-syscon");
- /* Need physical address for SMCC call */
- base = res.start;
-
- ecc_mgr_map = regmap_init(NULL, NULL, (void *)base,
- &s10_sdram_regmap_cfg);
- of_node_put(sysmgr_np);
- }
of_node_put(np_eccmgr);
if (IS_ERR(ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1125,9 +1029,6 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
int irq;
struct device_node *child, *np;
- if (!socfpga_is_a10() && !socfpga_is_s10())
- return -ENODEV;
-
np = of_find_compatible_node(NULL, NULL,
"altr,socfpga-a10-ecc-manager");
if (!np) {
@@ -2178,33 +2079,9 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->a10_ecc_devices);
- if (socfpga_is_a10()) {
- edac->ecc_mgr_map =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "altr,sysmgr-syscon");
- } else {
- struct device_node *sysmgr_np;
- struct resource res;
- uintptr_t base;
-
- sysmgr_np = of_parse_phandle(pdev->dev.of_node,
- "altr,sysmgr-syscon", 0);
- if (!sysmgr_np) {
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Unable to find altr,sysmgr-syscon\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(sysmgr_np, 0, &res))
- return -ENOMEM;
-
- /* Need physical address for SMCC call */
- base = res.start;
-
- edac->ecc_mgr_map = devm_regmap_init(&pdev->dev, NULL,
- (void *)base,
- &s10_sdram_regmap_cfg);
- }
+ edac->ecc_mgr_map =
+ altr_sysmgr_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon");
if (IS_ERR(edac->ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -2270,18 +2147,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
if (!of_device_is_available(child))
continue;
- if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-a10-ocram-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-eth-mac-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-nand-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
- of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
-#ifdef CONFIG_EDAC_ALTERA_SDRAM
- of_device_is_compatible(child, "altr,sdram-edac-s10") ||
-#endif
- of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
-
+ if (of_match_node(altr_edac_a10_device_of_match, child))
altr_edac_a10_device_add(edac, child);
#ifdef CONFIG_EDAC_ALTERA_SDRAM
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c1d4536ae466..428ce98f6776 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -16,12 +16,11 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
+static struct amd64_family_type *fam_type;
+
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
-/* Number of Unified Memory Controllers */
-static u8 num_umcs;
-
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -454,7 +453,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
#define for_each_umc(i) \
- for (i = 0; i < num_umcs; i++)
+ for (i = 0; i < fam_type->max_mcs; i++)
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
@@ -2224,6 +2223,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
+ .max_mcs = 2,
.ops = {
.early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
@@ -2234,6 +2234,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F10h",
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2244,6 +2245,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F15h",
.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2254,6 +2256,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F15h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2264,6 +2267,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F15h_M60h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2274,6 +2278,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F16h",
.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2284,6 +2289,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F16h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2294,6 +2300,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h",
.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2303,6 +2310,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h_M10h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2312,6 +2320,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h_M30h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
+ .max_mcs = 8,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2321,6 +2330,7 @@ static struct amd64_family_type family_types[] = {
.ctl_name = "F17h_M70h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+ .max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
@@ -2838,8 +2848,6 @@ skip:
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
determine_ecc_sym_sz(pvt);
-
- dump_misc_regs(pvt);
}
/*
@@ -2936,6 +2944,7 @@ static int init_csrows_df(struct mem_ctl_info *mci)
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
+ dimm->grain = 64;
}
}
@@ -3012,6 +3021,7 @@ static int init_csrows(struct mem_ctl_info *mci)
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
+ dimm->grain = 64;
}
}
@@ -3178,43 +3188,27 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
amd64_warn("Error restoring NB MCGCTL settings!\n");
}
-/*
- * EDAC requires that the BIOS have ECC enabled before
- * taking over the processing of ECC errors. A command line
- * option allows to force-enable hardware ECC later in
- * enable_ecc_error_reporting().
- */
-static const char *ecc_msg =
- "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
- " Either enable ECC checking or force module loading by setting "
- "'ecc_enable_override'.\n"
- " (Note that use of the override may cause unknown side effects.)\n";
-
-static bool ecc_enabled(struct pci_dev *F3, u16 nid)
+static bool ecc_enabled(struct amd64_pvt *pvt)
{
+ u16 nid = pvt->mc_node_id;
bool nb_mce_en = false;
u8 ecc_en = 0, i;
u32 value;
if (boot_cpu_data.x86 >= 0x17) {
u8 umc_en_mask = 0, ecc_en_mask = 0;
+ struct amd64_umc *umc;
for_each_umc(i) {
- u32 base = get_umc_base(i);
+ umc = &pvt->umc[i];
/* Only check enabled UMCs. */
- if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
- continue;
-
- if (!(value & UMC_SDP_INIT))
+ if (!(umc->sdp_ctrl & UMC_SDP_INIT))
continue;
umc_en_mask |= BIT(i);
- if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
- continue;
-
- if (value & UMC_ECC_ENABLED)
+ if (umc->umc_cap_hi & UMC_ECC_ENABLED)
ecc_en_mask |= BIT(i);
}
@@ -3227,7 +3221,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
/* Assume UMC MCA banks are enabled. */
nb_mce_en = true;
} else {
- amd64_read_pci_cfg(F3, NBCFG, &value);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
ecc_en = !!(value & NBCFG_ECC_ENABLE);
@@ -3240,11 +3234,10 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
amd64_info("Node %d: DRAM ECC %s.\n",
nid, (ecc_en ? "enabled" : "disabled"));
- if (!ecc_en || !nb_mce_en) {
- amd64_info("%s", ecc_msg);
+ if (!ecc_en || !nb_mce_en)
return false;
- }
- return true;
+ else
+ return true;
}
static inline void
@@ -3278,8 +3271,7 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
}
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
- struct amd64_family_type *fam)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
@@ -3298,7 +3290,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = fam->ctl_name;
+ mci->ctl_name = fam_type->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
@@ -3312,8 +3304,6 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
*/
static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
{
- struct amd64_family_type *fam_type = NULL;
-
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
@@ -3401,51 +3391,15 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
-/* Set the number of Unified Memory Controllers in the system. */
-static void compute_num_umcs(void)
-{
- u8 model = boot_cpu_data.x86_model;
-
- if (boot_cpu_data.x86 < 0x17)
- return;
-
- if (model >= 0x30 && model <= 0x3f)
- num_umcs = 8;
- else
- num_umcs = 2;
-
- edac_dbg(1, "Number of UMCs: %x", num_umcs);
-}
-
-static int init_one_instance(unsigned int nid)
+static int hw_info_get(struct amd64_pvt *pvt)
{
- struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
- struct amd64_family_type *fam_type = NULL;
- struct mem_ctl_info *mci = NULL;
- struct edac_mc_layer layers[2];
- struct amd64_pvt *pvt = NULL;
u16 pci_id1, pci_id2;
- int err = 0, ret;
-
- ret = -ENOMEM;
- pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
- if (!pvt)
- goto err_ret;
-
- pvt->mc_node_id = nid;
- pvt->F3 = F3;
-
- ret = -EINVAL;
- fam_type = per_family_init(pvt);
- if (!fam_type)
- goto err_free;
+ int ret = -EINVAL;
if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
- if (!pvt->umc) {
- ret = -ENOMEM;
- goto err_free;
- }
+ pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc)
+ return -ENOMEM;
pci_id1 = fam_type->f0_id;
pci_id2 = fam_type->f6_id;
@@ -3454,21 +3408,37 @@ static int init_one_instance(unsigned int nid)
pci_id2 = fam_type->f2_id;
}
- err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
- if (err)
- goto err_post_init;
+ ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
+ if (ret)
+ return ret;
read_mc_regs(pvt);
+ return 0;
+}
+
+static void hw_info_put(struct amd64_pvt *pvt)
+{
+ if (pvt->F0 || pvt->F1)
+ free_mc_sibling_devs(pvt);
+
+ kfree(pvt->umc);
+}
+
+static int init_one_instance(struct amd64_pvt *pvt)
+{
+ struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
+ int ret = -EINVAL;
+
/*
* We need to determine how many memory channels there are. Then use
* that information for calculating the size of the dynamic instance
* tables in the 'mci' structure.
*/
- ret = -EINVAL;
pvt->channel_count = pvt->ops->early_channel_count(pvt);
if (pvt->channel_count < 0)
- goto err_siblings;
+ return ret;
ret = -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
@@ -3480,24 +3450,18 @@ static int init_one_instance(unsigned int nid)
* Always allocate two channels since we can have setups with DIMMs on
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
- *
- * On Fam17h+, the number of controllers may be greater than two. So set
- * the size equal to the maximum number of UMCs.
*/
- if (pvt->fam >= 0x17)
- layers[1].size = num_umcs;
- else
- layers[1].size = 2;
+ layers[1].size = fam_type->max_mcs;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
if (!mci)
- goto err_siblings;
+ return ret;
mci->pvt_info = pvt;
mci->pdev = &pvt->F3->dev;
- setup_mci_misc_attrs(mci, fam_type);
+ setup_mci_misc_attrs(mci);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -3505,31 +3469,30 @@ static int init_one_instance(unsigned int nid)
ret = -ENODEV;
if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
edac_dbg(1, "failed edac_mc_add_mc()\n");
- goto err_add_mc;
+ edac_mc_free(mci);
+ return ret;
}
return 0;
+}
-err_add_mc:
- edac_mc_free(mci);
-
-err_siblings:
- free_mc_sibling_devs(pvt);
-
-err_post_init:
- if (pvt->fam >= 0x17)
- kfree(pvt->umc);
+static bool instance_has_memory(struct amd64_pvt *pvt)
+{
+ bool cs_enabled = false;
+ int cs = 0, dct = 0;
-err_free:
- kfree(pvt);
+ for (dct = 0; dct < fam_type->max_mcs; dct++) {
+ for_each_chip_select(cs, dct, pvt)
+ cs_enabled |= csrow_enabled(cs, dct, pvt);
+ }
-err_ret:
- return ret;
+ return cs_enabled;
}
static int probe_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
+ struct amd64_pvt *pvt = NULL;
struct ecc_settings *s;
int ret;
@@ -3540,8 +3503,29 @@ static int probe_one_instance(unsigned int nid)
ecc_stngs[nid] = s;
- if (!ecc_enabled(F3, nid)) {
- ret = 0;
+ pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+ if (!pvt)
+ goto err_settings;
+
+ pvt->mc_node_id = nid;
+ pvt->F3 = F3;
+
+ fam_type = per_family_init(pvt);
+ if (!fam_type)
+ goto err_enable;
+
+ ret = hw_info_get(pvt);
+ if (ret < 0)
+ goto err_enable;
+
+ ret = 0;
+ if (!instance_has_memory(pvt)) {
+ amd64_info("Node %d: No DIMMs detected.\n", nid);
+ goto err_enable;
+ }
+
+ if (!ecc_enabled(pvt)) {
+ ret = -ENODEV;
if (!ecc_enable_override)
goto err_enable;
@@ -3556,7 +3540,7 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
- ret = init_one_instance(nid);
+ ret = init_one_instance(pvt);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
@@ -3566,9 +3550,15 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
+ dump_misc_regs(pvt);
+
return ret;
err_enable:
+ hw_info_put(pvt);
+ kfree(pvt);
+
+err_settings:
kfree(s);
ecc_stngs[nid] = NULL;
@@ -3595,14 +3585,13 @@ static void remove_one_instance(unsigned int nid)
restore_ecc_error_reporting(s, nid, F3);
- free_mc_sibling_devs(pvt);
-
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
/* Free the EDAC CORE resources */
mci->pvt_info = NULL;
+ hw_info_put(pvt);
kfree(pvt);
edac_mc_free(mci);
}
@@ -3668,8 +3657,6 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
- compute_num_umcs();
-
for (i = 0; i < amd_nb_num(); i++) {
err = probe_one_instance(i);
if (err) {
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8c3cda81e619..9be31688110b 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -479,6 +479,8 @@ struct low_ops {
struct amd64_family_type {
const char *ctl_name;
u16 f0_id, f1_id, f2_id, f6_id;
+ /* Maximum number of memory controllers per die/node. */
+ u8 max_mcs;
struct low_ops ops;
};
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 5634437bb39d..09a9e3de9595 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -281,16 +281,11 @@ static int aspeed_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
- struct resource *res;
void __iomem *regs;
u32 reg04;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 65cf2b9355c4..8c4d947fb848 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -555,12 +555,16 @@ static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
return edac_dev->panic_on_ue;
}
-void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
+void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
+ if (!count)
+ return;
+
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
@@ -582,27 +586,31 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
- block->counters.ce_count++;
+ block->counters.ce_count += count;
}
/* Propagate the count up the 'totals' tree */
- instance->counters.ce_count++;
- edac_dev->counters.ce_count++;
+ instance->counters.ce_count += count;
+ edac_dev->counters.ce_count += count;
if (edac_device_get_log_ce(edac_dev))
edac_device_printk(edac_dev, KERN_WARNING,
- "CE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ "CE: %s instance: %s block: %s count: %d '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", count, msg);
}
-EXPORT_SYMBOL_GPL(edac_device_handle_ce);
+EXPORT_SYMBOL_GPL(edac_device_handle_ce_count);
-void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
+void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
+ if (!count)
+ return;
+
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
@@ -624,22 +632,22 @@ void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
- block->counters.ue_count++;
+ block->counters.ue_count += count;
}
/* Propagate the count up the 'totals' tree */
- instance->counters.ue_count++;
- edac_dev->counters.ue_count++;
+ instance->counters.ue_count += count;
+ edac_dev->counters.ue_count += count;
if (edac_device_get_log_ue(edac_dev))
edac_device_printk(edac_dev, KERN_EMERG,
- "UE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ "UE: %s instance: %s block: %s count: %d '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", count, msg);
if (edac_device_get_panic_on_ue(edac_dev))
- panic("EDAC %s: UE instance: %s block %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ panic("EDAC %s: UE instance: %s block %s count: %d '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", count, msg);
}
-EXPORT_SYMBOL_GPL(edac_device_handle_ue);
+EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
index 1aaba74ae411..c4c0e0bdce14 100644
--- a/drivers/edac/edac_device.h
+++ b/drivers/edac/edac_device.h
@@ -286,27 +286,60 @@ extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
/**
- * edac_device_handle_ue():
- * perform a common output and handling of an 'edac_dev' UE event
+ * Log correctable errors.
*
* @edac_dev: pointer to struct &edac_device_ctl_info
- * @inst_nr: number of the instance where the UE error happened
- * @block_nr: number of the block where the UE error happened
+ * @inst_nr: number of the instance where the CE error happened
+ * @count: Number of errors to log.
+ * @block_nr: number of the block where the CE error happened
+ * @msg: message to be printed
+ */
+void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg);
+
+/**
+ * Log uncorrectable errors.
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the CE error happened
+ * @count: Number of errors to log.
+ * @block_nr: number of the block where the CE error happened
* @msg: message to be printed
*/
-extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg);
+void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
+ unsigned int count, int inst_nr, int block_nr,
+ const char *msg);
+
/**
- * edac_device_handle_ce():
- * perform a common output and handling of an 'edac_dev' CE event
+ * edac_device_handle_ce(): Log a single correctable error
*
* @edac_dev: pointer to struct &edac_device_ctl_info
* @inst_nr: number of the instance where the CE error happened
* @block_nr: number of the block where the CE error happened
* @msg: message to be printed
*/
-extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg);
+static inline void
+edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, int inst_nr,
+ int block_nr, const char *msg)
+{
+ edac_device_handle_ce_count(edac_dev, 1, inst_nr, block_nr, msg);
+}
+
+/**
+ * edac_device_handle_ue(): Log a single uncorrectable error
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the UE error happened
+ * @block_nr: number of the block where the UE error happened
+ * @msg: message to be printed
+ */
+static inline void
+edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, int inst_nr,
+ int block_nr, const char *msg)
+{
+ edac_device_handle_ue_count(edac_dev, 1, inst_nr, block_nr, msg);
+}
/**
* edac_device_alloc_index: Allocate a unique device index number
@@ -316,5 +349,4 @@ extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
*/
extern int edac_device_alloc_index(void);
extern const char *edac_layer_name[];
-
#endif
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e6fd079783bd..7243b88f81d8 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -145,15 +145,18 @@ static void edac_mc_dump_channel(struct rank_info *chan)
edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
}
-static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
{
char location[80];
+ if (!dimm->nr_pages)
+ return;
+
edac_dimm_info_location(dimm, location, sizeof(location));
edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
dimm->mci->csbased ? "rank" : "dimm",
- number, location, dimm->csrow, dimm->cschannel);
+ dimm->idx, location, dimm->csrow, dimm->cschannel);
edac_dbg(4, " dimm = %p\n", dimm);
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
@@ -314,25 +317,28 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
unsigned int pos[EDAC_MAX_LAYERS];
- unsigned int size, tot_dimms = 1, count = 1;
+ unsigned int idx, size, tot_dimms = 1, count = 1;
unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL;
- int i, j, row, chn, n, len, off;
+ int i, j, row, chn, n, len;
bool per_rank = false;
- BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+ if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
+ return NULL;
+
/*
* Calculate the total amount of dimms and csrows/cschannels while
* in the old API emulation mode
*/
- for (i = 0; i < n_layers; i++) {
- tot_dimms *= layers[i].size;
- if (layers[i].is_virt_csrow)
- tot_csrows *= layers[i].size;
+ for (idx = 0; idx < n_layers; idx++) {
+ tot_dimms *= layers[idx].size;
+
+ if (layers[idx].is_virt_csrow)
+ tot_csrows *= layers[idx].size;
else
- tot_channels *= layers[i].size;
+ tot_channels *= layers[idx].size;
- if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+ if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
per_rank = true;
}
@@ -425,19 +431,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
memset(&pos, 0, sizeof(pos));
row = 0;
chn = 0;
- for (i = 0; i < tot_dimms; i++) {
+ for (idx = 0; idx < tot_dimms; idx++) {
chan = mci->csrows[row]->channels[chn];
- off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
- if (off < 0 || off >= tot_dimms) {
- edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
- goto error;
- }
dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
if (!dimm)
goto error;
- mci->dimms[off] = dimm;
+ mci->dimms[idx] = dimm;
dimm->mci = mci;
+ dimm->idx = idx;
/*
* Copy DIMM location and initialize it.
@@ -714,6 +716,7 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
edac_mc_dump_mci(mci);
if (edac_debug_level >= 4) {
+ struct dimm_info *dimm;
int i;
for (i = 0; i < mci->nr_csrows; i++) {
@@ -730,9 +733,9 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
if (csrow->channels[j]->dimm->nr_pages)
edac_mc_dump_channel(csrow->channels[j]);
}
- for (i = 0; i < mci->tot_dimms; i++)
- if (mci->dimms[i]->nr_pages)
- edac_mc_dump_dimm(mci->dimms[i], i);
+
+ mci_for_each_dimm(mci, dimm)
+ edac_mc_dump_dimm(dimm);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -1055,6 +1058,21 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
{
char detail[80];
int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+ u8 grain_bits;
+
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+
+ grain_bits = fls_long(e->grain - 1);
+
+ /* Report the error via the trace interface */
+ if (IS_ENABLED(CONFIG_RAS))
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer,
+ e->low_layer,
+ (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
+ grain_bits, e->syndrome, e->other_detail);
/* Memory type dependent details about the error */
if (type == HW_EVENT_ERR_CORRECTED) {
@@ -1090,11 +1108,11 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *msg,
const char *other_detail)
{
+ struct dimm_info *dimm;
char *p;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
- u8 grain_bits;
struct edac_raw_error_desc *e = &mci->error_desc;
edac_dbg(3, "MC%d\n", mci->mc_idx);
@@ -1150,9 +1168,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = e->label;
*p = '\0';
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = mci->dimms[i];
-
+ mci_for_each_dimm(mci, dimm) {
if (top_layer >= 0 && top_layer != dimm->location[0])
continue;
if (mid_layer >= 0 && mid_layer != dimm->location[1])
@@ -1170,37 +1186,37 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* channel/memory controller/... may be affected.
* Also, don't show errors for empty DIMM slots.
*/
- if (e->enable_per_layer_report && dimm->nr_pages) {
- if (n_labels >= EDAC_MAX_LABELS) {
- e->enable_per_layer_report = false;
- break;
- }
- n_labels++;
- if (p != e->label) {
- strcpy(p, OTHER_LABEL);
- p += strlen(OTHER_LABEL);
- }
- strcpy(p, dimm->label);
- p += strlen(p);
- *p = '\0';
+ if (!e->enable_per_layer_report || !dimm->nr_pages)
+ continue;
- /*
- * get csrow/channel of the DIMM, in order to allow
- * incrementing the compat API counters
- */
- edac_dbg(4, "%s csrows map: (%d,%d)\n",
- mci->csbased ? "rank" : "dimm",
- dimm->csrow, dimm->cschannel);
- if (row == -1)
- row = dimm->csrow;
- else if (row >= 0 && row != dimm->csrow)
- row = -2;
-
- if (chan == -1)
- chan = dimm->cschannel;
- else if (chan >= 0 && chan != dimm->cschannel)
- chan = -2;
+ if (n_labels >= EDAC_MAX_LABELS) {
+ e->enable_per_layer_report = false;
+ break;
+ }
+ n_labels++;
+ if (p != e->label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
}
+ strcpy(p, dimm->label);
+ p += strlen(p);
+
+ /*
+ * get csrow/channel of the DIMM, in order to allow
+ * incrementing the compat API counters
+ */
+ edac_dbg(4, "%s csrows map: (%d,%d)\n",
+ mci->csbased ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
+ if (row == -1)
+ row = dimm->csrow;
+ else if (row >= 0 && row != dimm->csrow)
+ row = -2;
+
+ if (chan == -1)
+ chan = dimm->cschannel;
+ else if (chan >= 0 && chan != dimm->cschannel)
+ chan = -2;
}
if (!e->enable_per_layer_report) {
@@ -1234,20 +1250,6 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
if (p > e->location)
*(p - 1) = '\0';
- /* Sanity-check driver-supplied grain value. */
- if (WARN_ON_ONCE(!e->grain))
- e->grain = 1;
-
- grain_bits = fls_long(e->grain - 1);
-
- /* Report the error via the trace interface */
- if (IS_ENABLED(CONFIG_RAS))
- trace_mc_event(type, e->msg, e->label, e->error_count,
- mci->mc_idx, e->top_layer, e->mid_layer,
- e->low_layer,
- (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
- grain_bits, e->syndrome, e->other_detail);
-
edac_raw_mc_handle_error(type, mci, e);
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 32d016f1ecd1..0367554e7437 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -557,14 +557,8 @@ static ssize_t dimmdev_ce_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
u32 count;
- int off;
-
- off = EDAC_DIMM_OFF(dimm->mci->layers,
- dimm->mci->n_layers,
- dimm->location[0],
- dimm->location[1],
- dimm->location[2]);
- count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][off];
+
+ count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][dimm->idx];
return sprintf(data, "%u\n", count);
}
@@ -574,14 +568,8 @@ static ssize_t dimmdev_ue_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
u32 count;
- int off;
-
- off = EDAC_DIMM_OFF(dimm->mci->layers,
- dimm->mci->n_layers,
- dimm->location[0],
- dimm->location[1],
- dimm->location[2]);
- count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][off];
+
+ count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][dimm->idx];
return sprintf(data, "%u\n", count);
}
@@ -633,8 +621,7 @@ static const struct device_type dimm_attr_type = {
/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
- struct dimm_info *dimm,
- int index)
+ struct dimm_info *dimm)
{
int err;
dimm->mci = mci;
@@ -644,9 +631,9 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dimm->dev.parent = &mci->dev;
if (mci->csbased)
- dev_set_name(&dimm->dev, "rank%d", index);
+ dev_set_name(&dimm->dev, "rank%d", dimm->idx);
else
- dev_set_name(&dimm->dev, "dimm%d", index);
+ dev_set_name(&dimm->dev, "dimm%d", dimm->idx);
dev_set_drvdata(&dimm->dev, dimm);
pm_runtime_forbid(&mci->dev);
@@ -928,7 +915,8 @@ static const struct device_type mci_attr_type = {
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
- int i, err;
+ struct dimm_info *dimm;
+ int err;
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
@@ -952,13 +940,12 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
/*
* Create the dimm/rank devices
*/
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = mci->dimms[i];
+ mci_for_each_dimm(mci, dimm) {
/* Only expose populated DIMMs */
if (!dimm->nr_pages)
continue;
- err = edac_create_dimm_object(mci, dimm, i);
+ err = edac_create_dimm_object(mci, dimm);
if (err)
goto fail_unregister_dimm;
}
@@ -973,12 +960,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
return 0;
fail_unregister_dimm:
- for (i--; i >= 0; i--) {
- struct dimm_info *dimm = mci->dimms[i];
- if (!dimm->nr_pages)
- continue;
-
- device_unregister(&dimm->dev);
+ mci_for_each_dimm(mci, dimm) {
+ if (device_is_registered(&dimm->dev))
+ device_unregister(&dimm->dev);
}
device_unregister(&mci->dev);
@@ -990,7 +974,7 @@ fail_unregister_dimm:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ struct dimm_info *dimm;
edac_dbg(0, "\n");
@@ -1001,8 +985,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
edac_delete_csrow_objects(mci);
#endif
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = mci->dimms[i];
+ mci_for_each_dimm(mci, dimm) {
if (dimm->nr_pages == 0)
continue;
edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 0bb62857ffb2..b99080d8a10c 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -21,14 +21,22 @@ struct ghes_edac_pvt {
struct mem_ctl_info *mci;
/* Buffers for the error handling routine */
- char detail_location[240];
- char other_detail[160];
+ char other_detail[400];
char msg[80];
};
-static atomic_t ghes_init = ATOMIC_INIT(0);
+static refcount_t ghes_refcount = REFCOUNT_INIT(0);
+
+/*
+ * Access to ghes_pvt must be protected by ghes_lock. The spinlock
+ * also provides the necessary (implicit) memory barrier for the SMP
+ * case to make the pointer visible on another CPU.
+ */
static struct ghes_edac_pvt *ghes_pvt;
+/* GHES registration mutex */
+static DEFINE_MUTEX(ghes_reg_mutex);
+
/*
* Sync with other, potentially concurrent callers of
* ghes_edac_report_mem_error(). We don't know what the
@@ -79,15 +87,15 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++;
}
-static int get_dimm_smbios_index(u16 handle)
+static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
{
- struct mem_ctl_info *mci = ghes_pvt->mci;
- int i;
+ struct dimm_info *dimm;
- for (i = 0; i < mci->tot_dimms; i++) {
- if (mci->dimms[i]->smbios_handle == handle)
- return i;
+ mci_for_each_dimm(mci, dimm) {
+ if (dimm->smbios_handle == handle)
+ return dimm->idx;
}
+
return -1;
}
@@ -98,9 +106,7 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
if (dh->type == DMI_ENTRY_MEM_DEVICE) {
struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
- struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers,
- dimm_fill->count, 0, 0);
+ struct dimm_info *dimm = edac_get_dimm(mci, dimm_fill->count, 0, 0);
u16 rdr_mask = BIT(7) | BIT(13);
if (entry->size == 0xffff) {
@@ -198,13 +204,9 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = ghes_pvt;
+ struct ghes_edac_pvt *pvt;
unsigned long flags;
char *p;
- u8 grain_bits;
-
- if (!pvt)
- return;
/*
* We can do the locking below because GHES defers error processing
@@ -216,12 +218,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
spin_lock_irqsave(&ghes_lock, flags);
+ pvt = ghes_pvt;
+ if (!pvt)
+ goto unlock;
+
mci = pvt->mci;
e = &mci->error_desc;
/* Cleans the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = 1;
+ e->grain = 1;
strcpy(e->label, "unknown label");
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
@@ -311,13 +318,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* Error address */
if (mem_err->validation_bits & CPER_MEM_VALID_PA) {
- e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
- e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
+ e->page_frame_number = PHYS_PFN(mem_err->physical_addr);
+ e->offset_in_page = offset_in_page(mem_err->physical_addr);
}
/* Error grain */
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ e->grain = ~mem_err->physical_addr_mask + 1;
/* Memory error location, mapped on e->location */
p = e->location;
@@ -348,7 +355,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
- index = get_dimm_smbios_index(mem_err->mem_dev_handle);
+ index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
if (index >= 0) {
e->top_layer = index;
e->enable_per_layer_report = true;
@@ -360,6 +367,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
+ p += snprintf(p, sizeof(pvt->other_detail),
+ "APEI location: %s ", e->location);
if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
u64 status = mem_err->error_status;
@@ -433,16 +442,9 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
if (p > pvt->other_detail)
*(p - 1) = '\0';
- /* Generate the trace event */
- grain_bits = fls_long(e->grain);
- snprintf(pvt->detail_location, sizeof(pvt->detail_location),
- "APEI location: %s %s", e->location, e->other_detail);
- trace_mc_event(type, e->msg, e->label, e->error_count,
- mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
- (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
- grain_bits, e->syndrome, pvt->detail_location);
-
edac_raw_mc_handle_error(type, mci, e);
+
+unlock:
spin_unlock_irqrestore(&ghes_lock, flags);
}
@@ -457,10 +459,12 @@ static struct acpi_platform_list plat_list[] = {
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
bool fake = false;
- int rc, num_dimm = 0;
+ int rc = 0, num_dimm = 0;
struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt;
struct edac_mc_layer layers[1];
struct ghes_edac_dimm_fill dimm_fill;
+ unsigned long flags;
int idx = -1;
if (IS_ENABLED(CONFIG_X86)) {
@@ -472,11 +476,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
idx = 0;
}
+ /* finish another registration/unregistration instance first */
+ mutex_lock(&ghes_reg_mutex);
+
/*
* We have only one logical memory controller to which all DIMMs belong.
*/
- if (atomic_inc_return(&ghes_init) > 1)
- return 0;
+ if (refcount_inc_not_zero(&ghes_refcount))
+ goto unlock;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -494,12 +501,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto unlock;
}
- ghes_pvt = mci->pvt_info;
- ghes_pvt->ghes = ghes;
- ghes_pvt->mci = mci;
+ pvt = mci->pvt_info;
+ pvt->ghes = ghes;
+ pvt->mci = mci;
mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
@@ -527,8 +535,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
dimm_fill.mci = mci;
dmi_walk(ghes_edac_dmidecode, &dimm_fill);
} else {
- struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, 0, 0, 0);
+ struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0);
dimm->nr_pages = 1;
dimm->grain = 128;
@@ -541,23 +548,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- return -ENODEV;
+ rc = -ENODEV;
+ goto unlock;
}
- return 0;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+ ghes_pvt = pvt;
+ spin_unlock_irqrestore(&ghes_lock, flags);
+
+ /* only set on success */
+ refcount_set(&ghes_refcount, 1);
+
+unlock:
+ mutex_unlock(&ghes_reg_mutex);
+
+ return rc;
}
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
+ unsigned long flags;
- if (!ghes_pvt)
- return;
+ mutex_lock(&ghes_reg_mutex);
- if (atomic_dec_return(&ghes_init))
- return;
+ if (!refcount_dec_and_test(&ghes_refcount))
+ goto unlock;
- mci = ghes_pvt->mci;
+ /*
+ * Wait for the irq handler being finished.
+ */
+ spin_lock_irqsave(&ghes_lock, flags);
+ mci = ghes_pvt ? ghes_pvt->mci : NULL;
ghes_pvt = NULL;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
+ spin_unlock_irqrestore(&ghes_lock, flags);
+
+ if (!mci)
+ goto unlock;
+
+ mci = edac_mc_del_mc(mci->pdev);
+ if (mci)
+ edac_mc_free(mci);
+
+unlock:
+ mutex_unlock(&ghes_reg_mutex);
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index c370d5457e6b..059eccf0582b 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -154,8 +154,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
ndimms = 0;
for (j = 0; j < I10NM_NUM_DIMMS; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 299b441647cd..432b375a4075 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -392,8 +392,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
unsigned long nr_pages;
for (j = 0; j < nr_channels; j++) {
- struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i, j, 0);
+ struct dimm_info *dimm = edac_get_dimm(mci, i, j, 0);
nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
if (nr_pages == 0)
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 078a7351bf05..1a6f69c859ab 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1275,9 +1275,8 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- channel / MAX_BRANCHES,
- channel % MAX_BRANCHES, slot);
+ dimm = edac_get_dimm(mci, channel / MAX_BRANCHES,
+ channel % MAX_BRANCHES, slot);
csrow_megs = pvt->dimm_info[slot][channel].megabytes;
dimm->grain = 8;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 251f2b692785..0ddc41e47a96 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -713,7 +713,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
{
struct i5100_priv *priv = mci->pvt_info;
u16 w;
- unsigned long et;
pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
if (i5100_spddata_busy(w))
@@ -724,7 +723,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
0, 0));
/* wait up to 100ms */
- et = jiffies + HZ / 10;
udelay(100);
while (1) {
pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
@@ -848,21 +846,17 @@ static void i5100_init_interleaving(struct pci_dev *pdev,
static void i5100_init_csrows(struct mem_ctl_info *mci)
{
- int i;
struct i5100_priv *priv = mci->pvt_info;
+ struct dimm_info *dimm;
- for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm;
- const unsigned long npages = i5100_npages(mci, i);
- const unsigned int chan = i5100_csrow_to_chan(mci, i);
- const unsigned int rank = i5100_csrow_to_rank(mci, i);
+ mci_for_each_dimm(mci, dimm) {
+ const unsigned long npages = i5100_npages(mci, dimm->idx);
+ const unsigned int chan = i5100_csrow_to_chan(mci, dimm->idx);
+ const unsigned int rank = i5100_csrow_to_rank(mci, dimm->idx);
if (!npages)
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- chan, rank, 0);
-
dimm->nr_pages = npages;
dimm->grain = 32;
dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6f8bcdb9256a..f131c05ade9f 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -548,8 +548,8 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
+ edac_dbg(0, "\t\t%s DIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
+ type, rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
/* Only 1 bit will be on */
@@ -1054,8 +1054,6 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
u32 actual_tolm;
u16 limit;
int slot_row;
- int maxch;
- int maxdimmperch;
int way0, way1;
pvt = mci->pvt_info;
@@ -1065,9 +1063,6 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
&pvt->u.ambase_top);
- maxdimmperch = pvt->maxdimmperch;
- maxch = pvt->maxch;
-
edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
@@ -1170,17 +1165,13 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
struct dimm_info *dimm;
- int ndimms, channel_count;
- int max_dimms;
+ int ndimms;
int mtr;
int size_mb;
int channel, slot;
pvt = mci->pvt_info;
- channel_count = pvt->maxch;
- max_dimms = pvt->maxdimmperch;
-
ndimms = 0;
/*
@@ -1196,8 +1187,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- channel / 2, channel % 2, slot);
+ dimm = edac_get_dimm(mci, channel / 2, channel % 2, slot);
size_mb = pvt->dimm_info[slot][channel].megabytes;
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 7bf910d54d11..2e9bbe56cde9 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -580,7 +580,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
* @ch: Channel number within the branch (0 or 1)
* @branch: Branch number (0 or 1)
* @dinfo: Pointer to DIMM info where dimm size is stored
- * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
+ * @dimm: Pointer to the struct dimm_info that corresponds to that element
*/
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
@@ -794,8 +794,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
for (ch = 0; ch < max_channel; ch++) {
int channel = to_channel(ch, branch);
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, branch, ch, slot);
+ dimm = edac_get_dimm(mci, branch, ch, slot);
dinfo = &pvt->dimm_info[slot][channel];
@@ -817,7 +816,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
/**
* decode_mir() - Decodes Memory Interleave Register (MIR) info
- * @int mir_no: number of the MIR register to decode
+ * @mir_no: number of the MIR register to decode
* @mir: array with the MIR data cached on the driver
*/
static void decode_mir(int mir_no, u16 mir[MAX_MIR])
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index a71cca6eeb33..b3135b208f9a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -585,8 +585,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index d26300f9cb07..4f65073f230b 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -490,9 +490,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
if (dimm_info[j][i].dual_rank) {
nr_pages = nr_pages / 2;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, (i * 2) + 1,
- j, 0);
+ dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* just a guess */
@@ -503,8 +501,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i * 2, j, 0);
+ dimm = edac_get_dimm(mci, i * 2, j, 0);
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* same guess */
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index b1193be1ef1d..933f7722b893 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1231,7 +1231,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci)
if (!(chan_mask & BIT(i)))
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
+ dimm = edac_get_dimm(mci, i, 0, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d\n", i);
continue;
@@ -1311,7 +1311,7 @@ static void dnv_get_dimm_config(struct mem_ctl_info *mci)
if (!ranks_of_dimm[j])
continue;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
continue;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f743502ca9b7..4957e8ee1879 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
* FIXME: Implement the error count reads directly
*/
-static const u32 correrrcnt[] = {
- 0x104, 0x108, 0x10c, 0x110,
-};
-
#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
+#if 0 /* Currently unused*/
+static const u32 correrrcnt[] = {
+ 0x104, 0x108, 0x10c, 0x110,
+};
+
static const u32 correrrthrsld[] = {
0x11c, 0x120, 0x124, 0x128,
};
+#endif
#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
@@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s)
*/
static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
{
- u64 sad_base, sad_size, sad_limit = 0;
+ u64 sad_base, sad_limit = 0;
u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
int sad_rule = 0;
int tad_rule = 0;
@@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
edram_only = KNL_EDRAM_ONLY(dram_rule);
sad_limit = pvt->info.sad_limit(dram_rule)+1;
- sad_size = sad_limit - sad_base;
pci_read_config_dword(pvt->pci_sad0,
pvt->info.interleave_list[sad_rule], &interleave_reg);
@@ -1620,7 +1621,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
}
for (j = 0; j < max_dimms_per_channel; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
if (pvt->info.type == KNIGHTS_LANDING) {
pci_read_config_dword(pvt->knl.pci_channel[i],
knl_mtr_reg, &mtr);
@@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
enum hw_event_mc_err_type tp_event;
- char *type, *optype, msg[256];
+ char *optype, msg[256];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
} else {
- type = "NON_FATAL";
tp_event = HW_EVENT_ERR_UNCORRECTED;
}
} else {
- type = "CORRECTED";
tp_event = HW_EVENT_ERR_CORRECTED;
}
@@ -3200,7 +3198,6 @@ static struct notifier_block sbridge_mce_dec = {
static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci = sbridge_dev->mci;
- struct sbridge_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
@@ -3209,8 +3206,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
return;
}
- pvt = mci->pvt_info;
-
edac_dbg(0, "MC: mci = %p, dev = %p\n",
mci, &sbridge_dev->pdev[0]->dev);
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 0fcf3785e8f3..83545b4facb7 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -46,7 +46,8 @@ static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
}
enum munittype {
- CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+ CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD,
+ ERRCHAN0, ERRCHAN1, ERRCHAN2,
};
struct munit {
@@ -68,6 +69,9 @@ static const struct munit skx_all_munits[] = {
{ 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
{ 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
{ 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+ { 0x2043, { PCI_DEVFN(10, 3), PCI_DEVFN(12, 3) }, 2, 2, ERRCHAN0 },
+ { 0x2047, { PCI_DEVFN(10, 7), PCI_DEVFN(12, 7) }, 2, 2, ERRCHAN1 },
+ { 0x204b, { PCI_DEVFN(11, 3), PCI_DEVFN(13, 3) }, 2, 2, ERRCHAN2 },
{ 0x208e, { }, 1, 0, SAD },
{ }
};
@@ -104,10 +108,18 @@ static int get_all_munits(const struct munit *m)
}
switch (m->mtype) {
- case CHAN0: case CHAN1: case CHAN2:
+ case CHAN0:
+ case CHAN1:
+ case CHAN2:
pci_dev_get(pdev);
d->imc[i].chan[m->mtype].cdev = pdev;
break;
+ case ERRCHAN0:
+ case ERRCHAN1:
+ case ERRCHAN2:
+ pci_dev_get(pdev);
+ d->imc[i].chan[m->mtype - ERRCHAN0].edev = pdev;
+ break;
case SAD_ALL:
pci_dev_get(pdev);
d->sad_all = pdev;
@@ -177,8 +189,7 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
for (j = 0; j < SKX_NUM_DIMMS; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
- mci->n_layers, i, j, 0);
+ dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
@@ -216,6 +227,39 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
+static void skx_show_retry_rd_err_log(struct decoded_addr *res,
+ char *msg, int len)
+{
+ u32 log0, log1, log2, log3, log4;
+ u32 corr0, corr1, corr2, corr3;
+ struct pci_dev *edev;
+ int n;
+
+ edev = res->dev->imc[res->imc].chan[res->channel].edev;
+
+ pci_read_config_dword(edev, 0x154, &log0);
+ pci_read_config_dword(edev, 0x148, &log1);
+ pci_read_config_dword(edev, 0x150, &log2);
+ pci_read_config_dword(edev, 0x15c, &log3);
+ pci_read_config_dword(edev, 0x114, &log4);
+
+ n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x]",
+ log0, log1, log2, log3, log4);
+
+ pci_read_config_dword(edev, 0x104, &corr0);
+ pci_read_config_dword(edev, 0x108, &corr1);
+ pci_read_config_dword(edev, 0x10c, &corr2);
+ pci_read_config_dword(edev, 0x110, &corr3);
+
+ if (len - n > 0)
+ snprintf(msg + n, len - n,
+ " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
+ corr0 & 0xffff, corr0 >> 16,
+ corr1 & 0xffff, corr1 >> 16,
+ corr2 & 0xffff, corr2 >> 16,
+ corr3 & 0xffff, corr3 >> 16);
+}
+
static bool skx_sad_decode(struct decoded_addr *res)
{
struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list);
@@ -659,7 +703,7 @@ static int __init skx_init(void)
}
}
- skx_set_decode(skx_decode);
+ skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
if (nvdimm_count && skx_adxl_get() == -ENODEV)
skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index d8ff63d91b86..95662a4ff4c4 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -37,6 +37,7 @@ static char *adxl_msg;
static char skx_msg[MSG_SIZE];
static skx_decode_f skx_decode;
+static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
@@ -100,6 +101,7 @@ void __exit skx_adxl_put(void)
static bool skx_adxl_decode(struct decoded_addr *res)
{
+ struct skx_dev *d;
int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
@@ -118,6 +120,24 @@ static bool skx_adxl_decode(struct decoded_addr *res)
res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+ if (res->imc > NUM_IMC - 1) {
+ skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
+ return false;
+ }
+
+ list_for_each_entry(d, &dev_edac_list, list) {
+ if (d->imc[0].src_id == res->socket) {
+ res->dev = d;
+ break;
+ }
+ }
+
+ if (!res->dev) {
+ skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
+ res->socket, res->imc);
+ return false;
+ }
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -131,9 +151,10 @@ static bool skx_adxl_decode(struct decoded_addr *res)
return true;
}
-void skx_set_decode(skx_decode_f decode)
+void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
skx_decode = decode;
+ skx_show_retry_rd_err_log = show_retry_log;
}
int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
@@ -452,34 +473,17 @@ static void skx_unregister_mci(struct skx_imc *imc)
edac_mc_free(mci);
}
-static struct mem_ctl_info *get_mci(int src_id, int lmc)
-{
- struct skx_dev *d;
-
- if (lmc > NUM_IMC - 1) {
- skx_printk(KERN_ERR, "Bad lmc %d\n", lmc);
- return NULL;
- }
-
- list_for_each_entry(d, &dev_edac_list, list) {
- if (d->imc[0].src_id == src_id)
- return d->imc[lmc].mci;
- }
-
- skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc);
- return NULL;
-}
-
static void skx_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m,
struct decoded_addr *res)
{
enum hw_event_mc_err_type tp_event;
- char *type, *optype;
+ char *optype;
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
bool recoverable;
+ int len;
u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
u32 mscod = GET_BITFIELD(m->status, 16, 31);
u32 errcode = GET_BITFIELD(m->status, 0, 15);
@@ -490,14 +494,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
} else {
- type = "NON_FATAL";
tp_event = HW_EVENT_ERR_UNCORRECTED;
}
} else {
- type = "CORRECTED";
tp_event = HW_EVENT_ERR_CORRECTED;
}
@@ -539,12 +540,12 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
}
}
if (adxl_component_count) {
- snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
+ len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
- snprintf(skx_msg, MSG_SIZE,
+ len = snprintf(skx_msg, MSG_SIZE,
"%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
@@ -553,6 +554,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
res->bank_group, res->bank_address, res->row, res->column);
}
+ if (skx_show_retry_rd_err_log)
+ skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len);
+
edac_dbg(0, "%s\n", skx_msg);
/* Call the helper to output message */
@@ -583,15 +587,12 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (adxl_component_count) {
if (!skx_adxl_decode(&res))
return NOTIFY_DONE;
-
- mci = get_mci(res.socket, res.imc);
- } else {
- if (!skx_decode || !skx_decode(&res))
- return NOTIFY_DONE;
-
- mci = res.dev->imc[res.imc].mci;
+ } else if (!skx_decode || !skx_decode(&res)) {
+ return NOTIFY_DONE;
}
+ mci = res.dev->imc[res.imc].mci;
+
if (!mci)
return NOTIFY_DONE;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 08cc971a50ea..60d1ea669afd 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -64,6 +64,7 @@ struct skx_dev {
u8 src_id, node_id;
struct skx_channel {
struct pci_dev *cdev;
+ struct pci_dev *edev;
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -113,10 +114,11 @@ struct decoded_addr {
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci);
typedef bool (*skx_decode_f)(struct decoded_addr *res);
+typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
int __init skx_adxl_get(void);
void __exit skx_adxl_put(void);
-void skx_set_decode(skx_decode_f decode);
+void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_node_id(struct skx_dev *d, u8 *id);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 6ac26d1b929f..8be3e89a510e 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -135,7 +135,7 @@ static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
u32 val;
u32 memsize;
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+ dimm = edac_get_dimm(mci, 0, 0, 0);
val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 415afaf479e7..a7f216191493 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -322,6 +322,25 @@ static void axp288_put_role_sw(void *data)
usb_role_switch_put(info->role_sw);
}
+static int axp288_extcon_find_role_sw(struct axp288_extcon_info *info)
+{
+ const struct software_node *swnode;
+ struct fwnode_handle *fwnode;
+
+ if (!x86_match_cpu(cherry_trail_cpu_ids))
+ return 0;
+
+ swnode = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!swnode)
+ return -EPROBE_DEFER;
+
+ fwnode = software_node_fwnode(swnode);
+ info->role_sw = usb_role_switch_find_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return info->role_sw ? 0 : -EPROBE_DEFER;
+}
+
static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
@@ -343,9 +362,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- info->role_sw = usb_role_switch_get(dev);
- if (IS_ERR(info->role_sw))
- return PTR_ERR(info->role_sw);
+ ret = axp288_extcon_find_role_sw(info);
+ if (ret)
+ return ret;
+
if (info->role_sw) {
ret = devm_add_action_or_reset(dev, axp288_put_role_sw, info);
if (ret)
@@ -440,26 +460,14 @@ static struct platform_driver axp288_extcon_driver = {
},
};
-static struct device_connection axp288_extcon_role_sw_conn = {
- .endpoint[0] = "axp288_extcon",
- .endpoint[1] = "intel_xhci_usb_sw-role-switch",
- .id = "usb-role-switch",
-};
-
static int __init axp288_extcon_init(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_add(&axp288_extcon_role_sw_conn);
-
return platform_driver_register(&axp288_extcon_driver);
}
module_init(axp288_extcon_init);
static void __exit axp288_extcon_exit(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_remove(&axp288_extcon_role_sw_conn);
-
platform_driver_unregister(&axp288_extcon_driver);
}
module_exit(axp288_extcon_exit);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 9d32150e68db..771f6f4cf92e 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -338,6 +338,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
+ int pwrsrc_sts, id;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -387,8 +388,19 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
goto disable_sw_control;
}
- /* Route D+ and D- to PMIC for initial charger detection */
- cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
+ ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
+ if (ret) {
+ dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ /*
+ * If no USB host or device connected, route D+ and D- to PMIC for
+ * initial charger detection
+ */
+ id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
+ if (id != INTEL_USB_ID_GND)
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
/* Get initial state */
cht_wc_extcon_pwrsrc_event(ext);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index dc43847ad2b0..bcf65aaca5d2 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -65,6 +65,10 @@ struct sm5502_muic_info {
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
@@ -272,7 +276,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
/* Return cable type of attached or detached accessories */
static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
{
- unsigned int cable_type = -1, adc, dev_type1;
+ unsigned int cable_type, adc, dev_type1;
int ret;
/* Read ADC value according to external cable or button */
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index 9dbb634d213b..ce1f1ec310c4 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -237,6 +237,8 @@ enum sm5502_reg {
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define SM5502_REG_RESET_MASK (0x1)
+
/* SM5502 Interrupts */
enum sm5502_irq {
/* INT1 */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index b132ab9ad607..715e491dfbc3 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -250,7 +250,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
- hh->hh_len = FWNET_HLEN;
+
+ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 9cd70d1a5622..a479023fa036 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -967,29 +967,29 @@ static int sdei_get_conduit(struct platform_device *pdev)
if (np) {
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
- return CONDUIT_INVALID;
+ return SMCCC_CONDUIT_NONE;
}
if (!strcmp("hvc", method)) {
sdei_firmware_call = &sdei_smccc_hvc;
- return CONDUIT_HVC;
+ return SMCCC_CONDUIT_HVC;
} else if (!strcmp("smc", method)) {
sdei_firmware_call = &sdei_smccc_smc;
- return CONDUIT_SMC;
+ return SMCCC_CONDUIT_SMC;
}
pr_warn("invalid \"method\" property: %s\n", method);
} else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
if (acpi_psci_use_hvc()) {
sdei_firmware_call = &sdei_smccc_hvc;
- return CONDUIT_HVC;
+ return SMCCC_CONDUIT_HVC;
} else {
sdei_firmware_call = &sdei_smccc_smc;
- return CONDUIT_SMC;
+ return SMCCC_CONDUIT_SMC;
}
}
- return CONDUIT_INVALID;
+ return SMCCC_CONDUIT_NONE;
}
static int sdei_probe(struct platform_device *pdev)
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
index d03ed8e43ad7..8e3d355a637a 100644
--- a/drivers/firmware/broadcom/Kconfig
+++ b/drivers/firmware/broadcom/Kconfig
@@ -22,3 +22,11 @@ config BCM47XX_SPROM
In case of SoC devices SPROM content is stored on a flash used by
bootloader firmware CFE. This driver provides method to ssb and bcma
drivers to read SPROM on SoC.
+
+config TEE_BNXT_FW
+ tristate "Broadcom BNXT firmware manager"
+ depends on (ARCH_BCM_IPROC && OPTEE) || (COMPILE_TEST && TEE)
+ default ARCH_BCM_IPROC
+ help
+ This module help to manage firmware on Broadcom BNXT device. The module
+ registers on tee bus and invoke calls to manage firmware on BNXT device.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
index 72c7fdc20c77..17c5061c47a7 100644
--- a/drivers/firmware/broadcom/Makefile
+++ b/drivers/firmware/broadcom/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o
obj-$(CONFIG_BCM47XX_SPROM) += bcm47xx_sprom.o
+obj-$(CONFIG_TEE_BNXT_FW) += tee_bnxt_fw.o
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
new file mode 100644
index 000000000000..5b7ef89eb701
--- /dev/null
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+
+#define MAX_SHM_MEM_SZ SZ_4M
+
+#define MAX_TEE_PARAM_ARRY_MEMB 4
+
+enum ta_cmd {
+ /*
+ * TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
+ *
+ * param[0] unused
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
+ */
+ TA_CMD_BNXT_FASTBOOT = 0,
+
+ /*
+ * TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
+ *
+ * param[0] (inout memref) - Coredump buffer memory reference
+ * param[1] (in value) - value.a: offset, data to be copied from
+ * value.b: size of data to be copied
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
+ */
+ TA_CMD_BNXT_COPY_COREDUMP = 3,
+};
+
+/**
+ * struct tee_bnxt_fw_private - OP-TEE bnxt private data
+ * @dev: OP-TEE based bnxt device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: TA session identifier.
+ */
+struct tee_bnxt_fw_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *fw_shm_pool;
+};
+
+static struct tee_bnxt_fw_private pvt_data;
+
+static void prepare_args(int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = pvt_data.session_id;
+ arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
+
+ /* Fill invoke cmd params */
+ switch (cmd) {
+ case TA_CMD_BNXT_COPY_COREDUMP:
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data.fw_shm_pool;
+ param[0].u.memref.size = MAX_SHM_MEM_SZ;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ break;
+ case TA_CMD_BNXT_FASTBOOT:
+ default:
+ /* Nothing to do */
+ break;
+ }
+}
+
+/**
+ * tee_bnxt_fw_load() - Load the bnxt firmware
+ * Uses an OP-TEE call to start a secure
+ * boot process.
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_fw_load(void)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_fw_load);
+
+/**
+ * tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
+ * Uses an OP-TEE call to copy coredump
+ * @buf: destination buffer where core dump is copied into
+ * @offset: offset from the base address of core dump area
+ * @size: size of the dump
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
+{
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+ void *core_data;
+ u32 rbytes = size;
+ u32 nbytes = 0;
+ int ret = 0;
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
+
+ while (rbytes) {
+ nbytes = rbytes;
+
+ nbytes = min_t(u32, rbytes, param[0].u.memref.size);
+
+ /* Fill additional invoke cmd params */
+ param[1].u.value.a = offset;
+ param[1].u.value.b = nbytes;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
+ if (IS_ERR(core_data)) {
+ dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
+ return PTR_ERR(core_data);
+ }
+
+ memcpy(buf, core_data, nbytes);
+
+ rbytes -= nbytes;
+ buf += nbytes;
+ offset += nbytes;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_copy_coredump);
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return (ver->impl_id == TEE_IMPL_ID_OPTEE);
+}
+
+static int tee_bnxt_fw_probe(struct device *dev)
+{
+ struct tee_client_device *bnxt_device = to_tee_client_device(dev);
+ int ret, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *fw_shm_pool;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with Bnxt load Trusted App */
+ memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if (ret < 0 || sess_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ pvt_data.dev = dev;
+
+ fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(fw_shm_pool)) {
+ tee_client_close_context(pvt_data.ctx);
+ dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+ err = PTR_ERR(fw_shm_pool);
+ goto out_sess;
+ }
+
+ pvt_data.fw_shm_pool = fw_shm_pool;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int tee_bnxt_fw_remove(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+
+ return 0;
+}
+
+static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
+ {UUID_INIT(0x6272636D, 0x2019, 0x0716,
+ 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
+
+static struct tee_client_driver tee_bnxt_fw_driver = {
+ .id_table = tee_bnxt_fw_id_table,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .bus = &tee_bus_type,
+ .probe = tee_bnxt_fw_probe,
+ .remove = tee_bnxt_fw_remove,
+ },
+};
+
+static int __init tee_bnxt_fw_mod_init(void)
+{
+ return driver_register(&tee_bnxt_fw_driver.driver);
+}
+
+static void __exit tee_bnxt_fw_mod_exit(void)
+{
+ driver_unregister(&tee_bnxt_fw_driver.driver);
+}
+
+module_init(tee_bnxt_fw_mod_init);
+module_exit(tee_bnxt_fw_mod_exit);
+
+MODULE_AUTHOR("Vikas Gupta <vikas.gupta@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index b248870a9806..bcc378c19ebe 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -75,6 +75,27 @@ config EFI_MAX_FAKE_MEM
Ranges can be set up to this value using comma-separated list.
The default value is 8.
+config EFI_SOFT_RESERVE
+ bool "Reserve EFI Specific Purpose Memory"
+ depends on EFI && EFI_STUB && ACPI_HMAT
+ default ACPI_HMAT
+ help
+ On systems that have mixed performance classes of memory EFI
+ may indicate specific purpose memory with an attribute (See
+ EFI_MEMORY_SP in UEFI 2.8). A memory range tagged with this
+ attribute may have unique performance characteristics compared
+ to the system's general purpose "System RAM" pool. On the
+ expectation that such memory has application specific usage,
+ and its base EFI memory type is "conventional" answer Y to
+ arrange for the kernel to reserve it as a "Soft Reserved"
+ resource, and set aside for direct-access (device-dax) by
+ default. The memory range can later be optionally assigned to
+ the page allocator by system administrator policy via the
+ device-dax kmem facility. Say N to have the kernel treat this
+ memory as "System RAM" by default.
+
+ If unsure, say Y.
+
config EFI_PARAMS_FROM_FDT
bool
help
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 4ac2de4dfa72..554d795270d9 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -20,13 +20,16 @@ obj-$(CONFIG_UEFI_CPER) += cper.o
obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
obj-$(CONFIG_EFI_STUB) += libstub/
-obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
+fake_map-y += fake_mem.o
+fake_map-$(CONFIG_X86) += x86_fake_mem.o
+
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 0e206c9e0d7a..5ccf39986a14 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -53,7 +53,8 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
for (i = 0; i < dev_header->prop_count; i++) {
int remaining = dev_header->len - (ptr - (void *)dev_header);
- u32 key_len, val_len;
+ u32 key_len, val_len, entry_len;
+ const u8 *entry_data;
char *key;
if (sizeof(key_len) > remaining)
@@ -85,17 +86,14 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
ucs2_as_utf8(key, ptr + sizeof(key_len),
key_len - sizeof(key_len));
- entry[i].name = key;
- entry[i].length = val_len - sizeof(val_len);
- entry[i].is_array = !!entry[i].length;
- entry[i].type = DEV_PROP_U8;
- entry[i].pointer.u8_data = ptr + key_len + sizeof(val_len);
-
+ entry_data = ptr + key_len + sizeof(val_len);
+ entry_len = val_len - sizeof(val_len);
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
if (dump_properties) {
- dev_info(dev, "property: %s\n", entry[i].name);
+ dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
- 16, 1, entry[i].pointer.u8_data,
- entry[i].length, true);
+ 16, 1, entry_data, entry_len, true);
}
ptr += key_len + val_len;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 311cd349a862..904fa09e6a6b 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -164,6 +164,15 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
+ * Special purpose memory is 'soft reserved', which means it
+ * is set aside initially, but can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
* them as System RAM if they can be mapped writeback cacheable.
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index e2ac5fa5531b..899b803842bb 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -121,6 +121,30 @@ static int __init arm_enable_runtime_services(void)
return 0;
}
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e98bbf8e56d9..d101f072c8f8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -81,6 +81,11 @@ bool efi_runtime_disabled(void)
return disable_runtime;
}
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
+}
+
static int __init parse_efi_cmdline(char *str)
{
if (!str) {
@@ -94,6 +99,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "nosoftreserve"))
+ set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
+
return 0;
}
early_param("efi", parse_efi_cmdline);
@@ -296,7 +304,7 @@ static __init int efivar_ssdt_load(void)
goto free_data;
}
- ret = acpi_load_table(data);
+ ret = acpi_load_table(data, NULL);
if (ret) {
pr_err("failed to load table: %d\n", ret);
goto free_data;
@@ -842,15 +850,16 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
- EFI_MEMORY_NV |
+ EFI_MEMORY_NV | EFI_MEMORY_SP |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_SP ? "SP" : "",
attr & EFI_MEMORY_NV ? "NV" : "",
attr & EFI_MEMORY_XP ? "XP" : "",
attr & EFI_MEMORY_RP ? "RP" : "",
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index d6dd5f503fa2..2762e0662bf4 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -246,6 +246,9 @@ void __init efi_esrt_init(void)
int rc;
phys_addr_t end;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
pr_debug("esrt-init: loading.\n");
if (!esrt_table_exists())
return;
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 9501edc0fcfb..bb9fc70d0cfa 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -17,12 +17,10 @@
#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/sort.h>
-#include <asm/efi.h>
+#include "fake_mem.h"
-#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
-
-static struct efi_mem_range fake_mems[EFI_MAX_FAKEMEM];
-static int nr_fake_mem;
+struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+int nr_fake_mem;
static int __init cmp_fake_mem(const void *x1, const void *x2)
{
@@ -44,13 +42,13 @@ void __init efi_fake_memmap(void)
void *new_memmap;
int i;
- if (!nr_fake_mem)
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
return;
/* count up the number of EFI memory descriptor */
for (i = 0; i < nr_fake_mem; i++) {
for_each_efi_memory_desc(md) {
- struct range *r = &fake_mems[i].range;
+ struct range *r = &efi_fake_mems[i].range;
new_nr_map += efi_memmap_split_count(md, r);
}
@@ -70,7 +68,7 @@ void __init efi_fake_memmap(void)
}
for (i = 0; i < nr_fake_mem; i++)
- efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
+ efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
/* swap into new EFI memmap */
early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
@@ -104,22 +102,22 @@ static int __init setup_fake_mem(char *p)
if (nr_fake_mem >= EFI_MAX_FAKEMEM)
break;
- fake_mems[nr_fake_mem].range.start = start;
- fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
- fake_mems[nr_fake_mem].attribute = attribute;
+ efi_fake_mems[nr_fake_mem].range.start = start;
+ efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
+ efi_fake_mems[nr_fake_mem].attribute = attribute;
nr_fake_mem++;
if (*p == ',')
p++;
}
- sort(fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
+ sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
cmp_fake_mem, NULL);
for (i = 0; i < nr_fake_mem; i++)
pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
- fake_mems[i].attribute, fake_mems[i].range.start,
- fake_mems[i].range.end);
+ efi_fake_mems[i].attribute, efi_fake_mems[i].range.start,
+ efi_fake_mems[i].range.end);
return *p == '\0' ? 0 : -EINVAL;
}
diff --git a/drivers/firmware/efi/fake_mem.h b/drivers/firmware/efi/fake_mem.h
new file mode 100644
index 000000000000..d52791af4b18
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __EFI_FAKE_MEM_H__
+#define __EFI_FAKE_MEM_H__
+#include <asm/efi.h>
+
+#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+extern struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+extern int nr_fake_mem;
+#endif /* __EFI_FAKE_MEM_H__ */
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ee0661ddb25b..c35f893897e1 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -38,7 +38,8 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
+ random.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
@@ -47,7 +48,7 @@ arm-deps-$(CONFIG_ARM64) += sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
+lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps-y))
lib-$(CONFIG_ARM) += arm32-stub.o
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index c382a48c6678..817237ce2420 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -189,6 +189,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
+ efi_retrieve_tpm2_eventlog(sys_table);
+
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation(sys_table);
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 41213bf5fcf5..4566640de650 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -146,6 +146,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
continue;
case EFI_CONVENTIONAL_MEMORY:
+ /* Skip soft reserved conventional memory */
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
/*
* Reserve the intersection between this entry and the
* region.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 35dbc2791c97..e02579907f2e 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,6 +32,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
static int __section(.data) __nokaslr;
static int __section(.data) __quiet;
static int __section(.data) __novamap;
+static bool __section(.data) efi_nosoftreserve;
int __pure nokaslr(void)
{
@@ -45,6 +46,10 @@ int __pure novamap(void)
{
return __novamap;
}
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_nosoftreserve;
+}
#define EFI_MMAP_NR_SLACK_SLOTS 8
@@ -211,6 +216,10 @@ again:
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
if (desc->num_pages < nr_pages)
continue;
@@ -305,6 +314,10 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
if (desc->num_pages < nr_pages)
continue;
@@ -484,6 +497,12 @@ efi_status_t efi_parse_options(char const *cmdline)
__novamap = 1;
}
+ if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
+ !strncmp(str, "nosoftreserve", 7)) {
+ str += strlen("nosoftreserve");
+ efi_nosoftreserve = 1;
+ }
+
/* Group words together, delimited by "," */
while (*str && *str != ' ' && *str != ',')
str++;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 7f1556fd867d..05739ae013c8 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -63,8 +63,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
-
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
/* Helper macros for the usual case of using simple C variables: */
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index b4b1d1dcb5fd..35edd7cfb6a1 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -9,6 +9,18 @@
#include "efistub.h"
+typedef struct efi_rng_protocol efi_rng_protocol_t;
+
+typedef struct {
+ u32 get_info;
+ u32 get_rng;
+} efi_rng_protocol_32_t;
+
+typedef struct {
+ u64 get_info;
+ u64 get_rng;
+} efi_rng_protocol_64_t;
+
struct efi_rng_protocol {
efi_status_t (*get_info)(struct efi_rng_protocol *,
unsigned long *, efi_guid_t *);
@@ -28,7 +40,7 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
if (status != EFI_SUCCESS)
return status;
- return rng->get_rng(rng, NULL, size, out);
+ return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out);
}
/*
@@ -46,6 +58,10 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return 0;
+
region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
first_slot = round_up(md->phys_addr, align);
@@ -161,15 +177,16 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
if (status != EFI_SUCCESS)
return status;
- status = rng->get_rng(rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE,
- seed->bits);
+ status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
+
if (status == EFI_UNSUPPORTED)
/*
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = rng->get_rng(rng, NULL, EFI_RANDOM_SEED_SIZE,
- seed->bits);
+ status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
diff --git a/drivers/firmware/efi/x86_fake_mem.c b/drivers/firmware/efi/x86_fake_mem.c
new file mode 100644
index 000000000000..e5d6d5a1b240
--- /dev/null
+++ b/drivers/firmware/efi/x86_fake_mem.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights reserved. */
+#include <linux/efi.h>
+#include <asm/e820/api.h>
+#include "fake_mem.h"
+
+void __init efi_fake_memmap_early(void)
+{
+ int i;
+
+ /*
+ * The late efi_fake_mem() call can handle all requests if
+ * EFI_MEMORY_SP support is disabled.
+ */
+ if (!efi_soft_reserve_enabled())
+ return;
+
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ /*
+ * Given that efi_fake_memmap() needs to perform memblock
+ * allocations it needs to run after e820__memblock_setup().
+ * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given
+ * address range that potentially needs to mark the memory as
+ * reserved prior to e820__memblock_setup(). Update e820
+ * directly if EFI_MEMORY_SP is specified for an
+ * EFI_CONVENTIONAL_MEMORY descriptor.
+ */
+ for (i = 0; i < nr_fake_mem; i++) {
+ struct efi_mem_range *mem = &efi_fake_mems[i];
+ efi_memory_desc_t *md;
+ u64 m_start, m_end;
+
+ if ((mem->attribute & EFI_MEMORY_SP) == 0)
+ continue;
+
+ m_start = mem->range.start;
+ m_end = mem->range.end;
+ for_each_efi_memory_desc(md) {
+ u64 start, end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ if (m_start <= end && m_end >= start)
+ /* fake range overlaps descriptor */;
+ else
+ continue;
+
+ /*
+ * Trim the boundary of the e820 update to the
+ * descriptor in case the fake range overlaps
+ * !EFI_CONVENTIONAL_MEMORY
+ */
+ start = max(start, m_start);
+ end = min(end, m_end);
+
+ if (end <= start)
+ continue;
+ e820__range_update(start, end - start + 1, E820_TYPE_RAM,
+ E820_TYPE_SOFT_RESERVED);
+ e820__update_table(e820_table);
+ }
+ }
+}
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 84f4ff351c62..b3b6c15e7b36 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -53,10 +53,18 @@ bool psci_tos_resident_on(int cpu)
}
struct psci_operations psci_ops = {
- .conduit = PSCI_CONDUIT_NONE,
+ .conduit = SMCCC_CONDUIT_NONE,
.smccc_version = SMCCC_VERSION_1_0,
};
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+{
+ if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
+ return SMCCC_CONDUIT_NONE;
+
+ return psci_ops.conduit;
+}
+
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
@@ -212,13 +220,13 @@ static unsigned long psci_migrate_info_up_cpu(void)
0, 0, 0);
}
-static void set_conduit(enum psci_conduit conduit)
+static void set_conduit(enum arm_smccc_conduit conduit)
{
switch (conduit) {
- case PSCI_CONDUIT_HVC:
+ case SMCCC_CONDUIT_HVC:
invoke_psci_fn = __invoke_psci_fn_hvc;
break;
- case PSCI_CONDUIT_SMC:
+ case SMCCC_CONDUIT_SMC:
invoke_psci_fn = __invoke_psci_fn_smc;
break;
default:
@@ -240,9 +248,9 @@ static int get_set_conduit_method(struct device_node *np)
}
if (!strcmp("hvc", method)) {
- set_conduit(PSCI_CONDUIT_HVC);
+ set_conduit(SMCCC_CONDUIT_HVC);
} else if (!strcmp("smc", method)) {
- set_conduit(PSCI_CONDUIT_SMC);
+ set_conduit(SMCCC_CONDUIT_SMC);
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
@@ -583,9 +591,9 @@ int __init psci_acpi_init(void)
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
- set_conduit(PSCI_CONDUIT_HVC);
+ set_conduit(SMCCC_CONDUIT_HVC);
else
- set_conduit(PSCI_CONDUIT_SMC);
+ set_conduit(SMCCC_CONDUIT_SMC);
return psci_probe();
}
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index bb008c019920..f8533338b018 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -20,7 +20,6 @@
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
-#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
@@ -109,9 +108,12 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status != BIT(SVC_STATUS_RSU_OK))
- dev_err(client->dev, "RSU returned status is %i\n",
- data->status);
+ if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ dev_err(client->dev, "Failure, returned status is %lu\n",
+ BIT(data->status));
+
complete(&priv->completion);
}
@@ -133,9 +135,11 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_RSU_OK))
priv->retry_counter = *counter;
+ else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
- dev_err(client->dev, "Failed to get retry counter %i\n",
- data->status);
+ dev_err(client->dev, "Failed to get retry counter %lu\n",
+ BIT(data->status));
complete(&priv->completion);
}
@@ -333,15 +337,10 @@ static ssize_t notify_store(struct device *dev,
return ret;
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
- 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- return ret;
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ return ret;
}
return count;
@@ -413,15 +412,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
- rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b485321189e1..c6c31402848d 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -493,8 +493,24 @@ static int svc_normal_to_secure_thread(void *data)
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
- pr_warn("it shouldn't happen\n");
+ pr_warn("Secure firmware doesn't support...\n");
+
+ /*
+ * be compatible with older version firmware which
+ * doesn't support RSU notify or retry
+ */
+ if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_NOTIFY)) {
+ cbdata->status =
+ BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(
+ pdata->chan->scl, cbdata);
+ }
break;
+
}
};
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 73c779e920ed..72380e1d31c7 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL
+ depends on FPGA_DFL && HWMON
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 4d78e182878f..7c930e6b314d 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -14,6 +14,8 @@
* Henry Mitchel <henry.mitchel@intel.com>
*/
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -181,6 +183,381 @@ static const struct dfl_feature_ops fme_hdr_ops = {
.ioctl = fme_hdr_ioctl,
};
+#define FME_THERM_THRESHOLD 0x8
+#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
+#define TEMP_THRESHOLD1_EN BIT_ULL(7)
+#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
+#define TEMP_THRESHOLD2_EN BIT_ULL(15)
+#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
+#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
+#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
+/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
+#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
+
+#define FME_THERM_RDSENSOR_FMT1 0x10
+#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
+
+#define FME_THERM_CAP 0x20
+#define THERM_NO_THROTTLE BIT_ULL(0)
+
+#define MD_PRE_DEG
+
+static bool fme_thermal_throttle_support(void __iomem *base)
+{
+ u64 v = readq(base + FME_THERM_CAP);
+
+ return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
+}
+
+static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct dfl_feature *feature = drvdata;
+
+ /* temperature is always supported, and check hardware cap for others */
+ if (attr == hwmon_temp_input)
+ return 0444;
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
+}
+
+static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+ break;
+ case hwmon_temp_max:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+ break;
+ case hwmon_temp_crit:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+ break;
+ case hwmon_temp_emergency:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+ break;
+ case hwmon_temp_max_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_temp_crit_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops thermal_hwmon_ops = {
+ .is_visible = thermal_hwmon_attrs_visible,
+ .read = thermal_hwmon_read,
+};
+
+static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_hwmon_chip_info = {
+ .ops = &thermal_hwmon_ops,
+ .info = thermal_hwmon_info,
+};
+
+static ssize_t temp1_max_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
+}
+
+static DEVICE_ATTR_RO(temp1_max_policy);
+
+static struct attribute *thermal_extra_attrs[] = {
+ &dev_attr_temp1_max_policy.attr,
+ NULL,
+};
+
+static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
+}
+
+static const struct attribute_group thermal_extra_group = {
+ .attrs = thermal_extra_attrs,
+ .is_visible = thermal_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(thermal_extra);
+
+static int fme_thermal_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ /*
+ * create hwmon to allow userspace monitoring temperature and other
+ * threshold information.
+ *
+ * temp1_input -> FPGA device temperature
+ * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
+ * temp1_crit -> hardware threshold 2 -> 100% throttling
+ * temp1_emergency -> hardware trip_threshold to shutdown FPGA
+ * temp1_max_alarm -> hardware threshold 1 alarm
+ * temp1_crit_alarm -> hardware threshold 2 alarm
+ *
+ * create device specific sysfs interfaces, e.g. read temp1_max_policy
+ * to understand the actual hardware throttling action (50% vs 90%).
+ *
+ * If hardware doesn't support automatic throttling per thresholds,
+ * then all above sysfs interfaces are not visible except temp1_input
+ * for temperature.
+ */
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_thermal", feature,
+ &thermal_hwmon_chip_info,
+ thermal_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_THERMAL_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+};
+
+#define FME_PWR_STATUS 0x8
+#define FME_LATENCY_TOLERANCE BIT_ULL(18)
+#define PWR_CONSUMED GENMASK_ULL(17, 0)
+
+#define FME_PWR_THRESHOLD 0x10
+#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
+#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
+#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
+#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
+#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
+
+#define FME_PWR_XEON_LIMIT 0x18
+#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define XEON_PWR_EN BIT_ULL(15)
+#define FME_PWR_FPGA_LIMIT 0x20
+#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define FPGA_PWR_EN BIT_ULL(15)
+
+static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_power_input:
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+ break;
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+ break;
+ case hwmon_power_max_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_power_crit_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ int ret = 0;
+ u64 v;
+
+ val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+
+ mutex_lock(&pdata->lock);
+
+ switch (attr) {
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD1;
+ v |= FIELD_PREP(PWR_THRESHOLD1, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD2;
+ v |= FIELD_PREP(PWR_THRESHOLD2, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static umode_t power_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max_alarm:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_crit:
+ return 0644;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops power_hwmon_ops = {
+ .is_visible = power_hwmon_attrs_visible,
+ .read = power_hwmon_read,
+ .write = power_hwmon_write,
+};
+
+static const struct hwmon_channel_info *power_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MAX_ALARM |
+ HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info power_hwmon_chip_info = {
+ .ops = &power_hwmon_ops,
+ .info = power_hwmon_info,
+};
+
+static ssize_t power1_xeon_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 xeon_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
+
+ if (FIELD_GET(XEON_PWR_EN, v))
+ xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", xeon_limit * 100000);
+}
+
+static ssize_t power1_fpga_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 fpga_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
+
+ if (FIELD_GET(FPGA_PWR_EN, v))
+ fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", fpga_limit * 100000);
+}
+
+static ssize_t power1_ltr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
+}
+
+static DEVICE_ATTR_RO(power1_xeon_limit);
+static DEVICE_ATTR_RO(power1_fpga_limit);
+static DEVICE_ATTR_RO(power1_ltr);
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_xeon_limit.attr,
+ &dev_attr_power1_fpga_limit.attr,
+ &dev_attr_power1_ltr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(power_extra);
+
+static int fme_power_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_power", feature,
+ &power_hwmon_chip_info,
+ power_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register power hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_POWER_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+};
+
static struct dfl_feature_driver fme_feature_drvs[] = {
{
.id_table = fme_hdr_id_table,
@@ -195,6 +572,14 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_global_err_ops,
},
{
+ .id_table = fme_thermal_mgmt_id_table,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .id_table = fme_power_mgmt_id_table,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
.ops = NULL,
},
};
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 31ef38e38537..ee7765049607 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -578,10 +578,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
init_completion(&priv->dma_done);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(dev, "No IRQ available\n");
+ if (priv->irq < 0)
return priv->irq;
- }
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index c612db7a914a..92ce6d85802c 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -53,6 +53,14 @@ config FSI_MASTER_AST_CF
lines driven by the internal ColdFire coprocessor. This requires
the corresponding machine specific ColdFire firmware to be available.
+config FSI_MASTER_ASPEED
+ tristate "FSI ASPEED master"
+ help
+ This option enables a FSI master that is present behind an OPB bridge
+ in the AST2600.
+
+ Enable it for your BMC kernel in an OpenPower or IBM Power system.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index e4a2ff043c32..da218a1ad8e1 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_ASPEED) += fsi-master-aspeed.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 1f76740f33b6..8244da8a7241 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -544,6 +544,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
return 0;
}
+static unsigned long aligned_access_size(size_t offset, size_t count)
+{
+ unsigned long offset_unit, count_unit;
+
+ /* Criteria:
+ *
+ * 1. Access size must be less than or equal to the maximum access
+ * width or the highest power-of-two factor of offset
+ * 2. Access size must be less than or equal to the amount specified by
+ * count
+ *
+ * The access width is optimal if we can calculate 1 to be strictly
+ * equal while still satisfying 2.
+ */
+
+ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
+ offset_unit = BIT(__builtin_ctzl(offset | 4));
+
+ /* Find 2 by the top bit of count */
+ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
+
+ /* Constrain the maximum access width to the minimum of both criteria */
+ return BIT(__builtin_ctzl(offset_unit | count_unit));
+}
+
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -559,8 +584,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
- read_len = min_t(size_t, count, 4);
- read_len -= off & 0x3;
+ read_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
if (rc)
@@ -587,8 +611,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
- write_len = min_t(size_t, count, 4);
- write_len -= off & 0x3;
+ write_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
if (rc)
@@ -1241,6 +1264,19 @@ static ssize_t master_break_store(struct device *dev,
static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+static struct attribute *master_attrs[] = {
+ &dev_attr_break.attr,
+ &dev_attr_rescan.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(master);
+
+static struct class fsi_master_class = {
+ .name = "fsi-master",
+ .dev_groups = master_groups,
+};
+
int fsi_master_register(struct fsi_master *master)
{
int rc;
@@ -1249,6 +1285,7 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
+ master->dev.class = &fsi_master_class;
rc = device_register(&master->dev);
if (rc) {
@@ -1256,20 +1293,6 @@ int fsi_master_register(struct fsi_master *master)
return rc;
}
- rc = device_create_file(&master->dev, &dev_attr_rescan);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
- rc = device_create_file(&master->dev, &dev_attr_break);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
np = dev_of_node(&master->dev);
if (!of_property_read_bool(np, "no-scan-on-init")) {
mutex_lock(&master->scan_lock);
@@ -1350,8 +1373,15 @@ static int __init fsi_init(void)
rc = bus_register(&fsi_bus_type);
if (rc)
goto fail_bus;
+
+ rc = class_register(&fsi_master_class);
+ if (rc)
+ goto fail_class;
+
return 0;
+ fail_class:
+ bus_unregister(&fsi_bus_type);
fail_bus:
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
return rc;
@@ -1360,6 +1390,7 @@ postcore_initcall(fsi_init);
static void fsi_exit(void)
{
+ class_unregister(&fsi_master_class);
bus_unregister(&fsi_bus_type);
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
ida_destroy(&fsi_minor_ida);
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
new file mode 100644
index 000000000000..f49742b310c2
--- /dev/null
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2018
+// FSI master driver for AST2600
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+
+#include "fsi-master.h"
+
+struct fsi_master_aspeed {
+ struct fsi_master master;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+#define to_fsi_master_aspeed(m) \
+ container_of(m, struct fsi_master_aspeed, master)
+
+/* Control register (size 0x400) */
+static const u32 ctrl_base = 0x80000000;
+
+static const u32 fsi_base = 0xa0000000;
+
+#define OPB_FSI_VER 0x00
+#define OPB_TRIGGER 0x04
+#define OPB_CTRL_BASE 0x08
+#define OPB_FSI_BASE 0x0c
+#define OPB_CLK_SYNC 0x3c
+#define OPB_IRQ_CLEAR 0x40
+#define OPB_IRQ_MASK 0x44
+#define OPB_IRQ_STATUS 0x48
+
+#define OPB0_SELECT 0x10
+#define OPB0_RW 0x14
+#define OPB0_XFER_SIZE 0x18
+#define OPB0_FSI_ADDR 0x1c
+#define OPB0_FSI_DATA_W 0x20
+#define OPB0_STATUS 0x80
+#define OPB0_FSI_DATA_R 0x84
+
+#define OPB0_WRITE_ORDER1 0x4c
+#define OPB0_WRITE_ORDER2 0x50
+#define OPB1_WRITE_ORDER1 0x54
+#define OPB1_WRITE_ORDER2 0x58
+#define OPB0_READ_ORDER1 0x5c
+#define OPB1_READ_ORDER2 0x60
+
+#define OPB_RETRY_COUNTER 0x64
+
+/* OPBn_STATUS */
+#define STATUS_HALFWORD_ACK BIT(0)
+#define STATUS_FULLWORD_ACK BIT(1)
+#define STATUS_ERR_ACK BIT(2)
+#define STATUS_RETRY BIT(3)
+#define STATUS_TIMEOUT BIT(4)
+
+/* OPB_IRQ_MASK */
+#define OPB1_XFER_ACK_EN BIT(17)
+#define OPB0_XFER_ACK_EN BIT(16)
+
+/* OPB_RW */
+#define CMD_READ BIT(0)
+#define CMD_WRITE 0
+
+/* OPBx_XFER_SIZE */
+#define XFER_FULLWORD (BIT(1) | BIT(0))
+#define XFER_HALFWORD (BIT(0))
+#define XFER_BYTE (0)
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_aspeed.h>
+
+#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
+
+#define DEFAULT_DIVISOR 14
+#define OPB_POLL_TIMEOUT 10000
+
+static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
+ u32 val, u32 transfer_size)
+{
+ void __iomem *base = aspeed->base;
+ u32 reg, status;
+ int ret;
+
+ writel(CMD_WRITE, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(val, base + OPB0_FSI_DATA_W);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ trace_fsi_master_aspeed_opb_write(addr, val, transfer_size, status, reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ return 0;
+}
+
+static int opb_writeb(struct fsi_master_aspeed *aspeed, u32 addr, u8 val)
+{
+ return __opb_write(aspeed, addr, val, XFER_BYTE);
+}
+
+static int opb_writew(struct fsi_master_aspeed *aspeed, u32 addr, __be16 val)
+{
+ return __opb_write(aspeed, addr, (__force u16)val, XFER_HALFWORD);
+}
+
+static int opb_writel(struct fsi_master_aspeed *aspeed, u32 addr, __be32 val)
+{
+ return __opb_write(aspeed, addr, (__force u32)val, XFER_FULLWORD);
+}
+
+static int __opb_read(struct fsi_master_aspeed *aspeed, uint32_t addr,
+ u32 transfer_size, void *out)
+{
+ void __iomem *base = aspeed->base;
+ u32 result, reg;
+ int status, ret;
+
+ writel(CMD_READ, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ result = readl(base + OPB0_FSI_DATA_R);
+
+ trace_fsi_master_aspeed_opb_read(addr, transfer_size, result,
+ readl(base + OPB0_STATUS),
+ reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ if (out) {
+ switch (transfer_size) {
+ case XFER_BYTE:
+ *(u8 *)out = result;
+ break;
+ case XFER_HALFWORD:
+ *(u16 *)out = result;
+ break;
+ case XFER_FULLWORD:
+ *(u32 *)out = result;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int opb_readl(struct fsi_master_aspeed *aspeed, uint32_t addr, __be32 *out)
+{
+ return __opb_read(aspeed, addr, XFER_FULLWORD, out);
+}
+
+static int opb_readw(struct fsi_master_aspeed *aspeed, uint32_t addr, __be16 *out)
+{
+ return __opb_read(aspeed, addr, XFER_HALFWORD, (void *)out);
+}
+
+static int opb_readb(struct fsi_master_aspeed *aspeed, uint32_t addr, u8 *out)
+{
+ return __opb_read(aspeed, addr, XFER_BYTE, (void *)out);
+}
+
+static int check_errors(struct fsi_master_aspeed *aspeed, int err)
+{
+ int ret;
+
+ if (trace_fsi_master_aspeed_opb_error_enabled()) {
+ __be32 mresp0, mstap0, mesrb0;
+
+ opb_readl(aspeed, ctrl_base + FSI_MRESP0, &mresp0);
+ opb_readl(aspeed, ctrl_base + FSI_MSTAP0, &mstap0);
+ opb_readl(aspeed, ctrl_base + FSI_MESRB0, &mesrb0);
+
+ trace_fsi_master_aspeed_opb_error(
+ be32_to_cpu(mresp0),
+ be32_to_cpu(mstap0),
+ be32_to_cpu(mesrb0));
+ }
+
+ if (err == -EIO) {
+ /* Check MAEB (0x70) ? */
+
+ /* Then clear errors in master */
+ ret = opb_writel(aspeed, ctrl_base + FSI_MRESP0,
+ cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
+ if (ret) {
+ /* TODO: log? return different code? */
+ return ret;
+ }
+ /* TODO: confirm that 0x70 was okay */
+ }
+
+ /* This will pass through timeout errors */
+ return err;
+}
+
+static int aspeed_master_read(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_readb(aspeed, fsi_base + addr, val);
+ break;
+ case 2:
+ ret = opb_readw(aspeed, fsi_base + addr, val);
+ break;
+ case 4:
+ ret = opb_readl(aspeed, fsi_base + addr, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_write(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
+ break;
+ case 2:
+ ret = opb_writew(aspeed, fsi_base + addr, *(__be16 *)val);
+ break;
+ case 4:
+ ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_link_enable(struct fsi_master *master, int link)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int idx, bit, ret;
+ __be32 reg, result;
+
+ idx = link / 32;
+ bit = link % 32;
+
+ reg = cpu_to_be32(0x80000000 >> bit);
+
+ ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
+ if (ret)
+ return ret;
+
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ ret = opb_readl(aspeed, ctrl_base + FSI_MENP0 + (4 * idx), &result);
+ if (ret)
+ return ret;
+
+ if (result != reg) {
+ dev_err(aspeed->dev, "%s failed: %08x\n", __func__, result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x4;
+ cmd = cpu_to_be32(0xecc00000);
+
+ return aspeed_master_write(master, link, id, addr, &cmd, 4);
+}
+
+static int aspeed_master_break(struct fsi_master *master, int link)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x0;
+ cmd = cpu_to_be32(0xc0de0000);
+
+ return aspeed_master_write(master, link, 0, addr, &cmd, 4);
+}
+
+static void aspeed_master_release(struct device *dev)
+{
+ struct fsi_master_aspeed *aspeed =
+ to_fsi_master_aspeed(dev_to_fsi_master(dev));
+
+ kfree(aspeed);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+ return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+ return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
+{
+ __be32 reg;
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ /* Initialize the MFSI (hub master) engine */
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+ opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
+
+ reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
+ | fsi_mmode_crs0(DEFAULT_DIVISOR)
+ | fsi_mmode_crs1(DEFAULT_DIVISOR)
+ | FSI_MMODE_P8_TO_LSB);
+ opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
+
+ reg = cpu_to_be32(0xffff0000);
+ opb_writel(aspeed, ctrl_base + FSI_MDLYR, reg);
+
+ reg = cpu_to_be32(~0);
+ opb_writel(aspeed, ctrl_base + FSI_MSENP0, reg);
+
+ /* Leave enabled long enough for master logic to set up */
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ opb_writel(aspeed, ctrl_base + FSI_MCENP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MAEB, NULL);
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MLEVP0, NULL);
+
+ /* Reset the master bridge */
+ reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ return 0;
+}
+
+static int fsi_master_aspeed_probe(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed;
+ struct resource *res;
+ int rc, links, reg;
+ __be32 raw;
+
+ aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
+ if (!aspeed)
+ return -ENOMEM;
+
+ aspeed->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspeed->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aspeed->base))
+ return PTR_ERR(aspeed->base);
+
+ aspeed->clk = devm_clk_get(aspeed->dev, NULL);
+ if (IS_ERR(aspeed->clk)) {
+ dev_err(aspeed->dev, "couldn't get clock\n");
+ return PTR_ERR(aspeed->clk);
+ }
+ rc = clk_prepare_enable(aspeed->clk);
+ if (rc) {
+ dev_err(aspeed->dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ writel(0x1, aspeed->base + OPB_CLK_SYNC);
+ writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
+ aspeed->base + OPB_IRQ_MASK);
+
+ /* TODO: determine an appropriate value */
+ writel(0x10, aspeed->base + OPB_RETRY_COUNTER);
+
+ writel(ctrl_base, aspeed->base + OPB_CTRL_BASE);
+ writel(fsi_base, aspeed->base + OPB_FSI_BASE);
+
+ /* Set read data order */
+ writel(0x00030b1b, aspeed->base + OPB0_READ_ORDER1);
+
+ /* Set write data order */
+ writel(0x0011101b, aspeed->base + OPB0_WRITE_ORDER1);
+ writel(0x0c330f3f, aspeed->base + OPB0_WRITE_ORDER2);
+
+ /*
+ * Select OPB0 for all operations.
+ * Will need to be reworked when enabling DMA or anything that uses
+ * OPB1.
+ */
+ writel(0x1, aspeed->base + OPB0_SELECT);
+
+ rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to read hub version\n");
+ return rc;
+ }
+
+ reg = be32_to_cpu(raw);
+ links = (reg >> 8) & 0xff;
+ dev_info(&pdev->dev, "hub version %08x (%d links)\n", reg, links);
+
+ aspeed->master.dev.parent = &pdev->dev;
+ aspeed->master.dev.release = aspeed_master_release;
+ aspeed->master.dev.of_node = of_node_get(dev_of_node(&pdev->dev));
+
+ aspeed->master.n_links = links;
+ aspeed->master.read = aspeed_master_read;
+ aspeed->master.write = aspeed_master_write;
+ aspeed->master.send_break = aspeed_master_break;
+ aspeed->master.term = aspeed_master_term;
+ aspeed->master.link_enable = aspeed_master_link_enable;
+
+ dev_set_drvdata(&pdev->dev, aspeed);
+
+ aspeed_master_init(aspeed);
+
+ rc = fsi_master_register(&aspeed->master);
+ if (rc)
+ goto err_release;
+
+ /* At this point, fsi_master_register performs the device_initialize(),
+ * and holds the sole reference on master.dev. This means the device
+ * will be freed (via ->release) during any subsequent call to
+ * fsi_master_unregister. We add our own reference to it here, so we
+ * can perform cleanup (in _remove()) without it being freed before
+ * we're ready.
+ */
+ get_device(&aspeed->master.dev);
+ return 0;
+
+err_release:
+ clk_disable_unprepare(aspeed->clk);
+ return rc;
+}
+
+static int fsi_master_aspeed_remove(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed = platform_get_drvdata(pdev);
+
+ fsi_master_unregister(&aspeed->master);
+ clk_disable_unprepare(aspeed->clk);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_aspeed_match[] = {
+ { .compatible = "aspeed,ast2600-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_aspeed_driver = {
+ .driver = {
+ .name = "fsi-master-aspeed",
+ .of_match_table = fsi_master_aspeed_match,
+ },
+ .probe = fsi_master_aspeed_probe,
+ .remove = fsi_master_aspeed_remove,
+};
+
+module_platform_driver(fsi_master_aspeed_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index f158b1a88286..def35cf92571 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -13,53 +13,7 @@
#include "fsi-master.h"
-/* Control Registers */
-#define FSI_MMODE 0x0 /* R/W: mode */
-#define FSI_MDLYR 0x4 /* R/W: delay */
-#define FSI_MCRSP 0x8 /* R/W: clock rate */
-#define FSI_MENP0 0x10 /* R/W: enable */
-#define FSI_MLEVP0 0x18 /* R: plug detect */
-#define FSI_MSENP0 0x18 /* S: Set enable */
-#define FSI_MCENP0 0x20 /* C: Clear enable */
-#define FSI_MAEB 0x70 /* R: Error address */
-#define FSI_MVER 0x74 /* R: master version/type */
-#define FSI_MRESP0 0xd0 /* W: Port reset */
-#define FSI_MESRB0 0x1d0 /* R: Master error status */
-#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
-#define FSI_MECTRL 0x2e0 /* W: Error control */
-
-/* MMODE: Mode control */
-#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
-#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
-#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
-#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
- /* MSB=1, LSB=0 is 0.8 ms */
- /* MSB=0, LSB=1 is 0.9 ms */
-#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
-#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
-#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
-#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
-
-/* MRESB: Reset brindge */
-#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
-#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
-
-/* MRESB: Reset port */
-#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
-#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
-#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
-#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
-#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
-
-/* MECTRL: Error control */
-#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
- /* master 0 in error */
-#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
-
#define FSI_ENGID_HUB_MASTER 0x1c
-#define FSI_HUB_LINK_OFFSET 0x80000
-#define FSI_HUB_LINK_SIZE 0x80000
-#define FSI_HUB_MASTER_MAX_LINKS 8
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index c7174237e864..6e8d4d4d5149 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -12,6 +12,71 @@
#include <linux/device.h>
#include <linux/mutex.h>
+/*
+ * Master registers
+ *
+ * These are used by hardware masters, such as the one in the FSP2, AST2600 and
+ * the hub master in POWER processors.
+ */
+
+/* Control Registers */
+#define FSI_MMODE 0x0 /* R/W: mode */
+#define FSI_MDLYR 0x4 /* R/W: delay */
+#define FSI_MCRSP 0x8 /* R/W: clock rate */
+#define FSI_MENP0 0x10 /* R/W: enable */
+#define FSI_MLEVP0 0x18 /* R: plug detect */
+#define FSI_MSENP0 0x18 /* S: Set enable */
+#define FSI_MCENP0 0x20 /* C: Clear enable */
+#define FSI_MAEB 0x70 /* R: Error address */
+#define FSI_MVER 0x74 /* R: master version/type */
+#define FSI_MSTAP0 0xd0 /* R: Port status */
+#define FSI_MRESP0 0xd0 /* W: Port reset */
+#define FSI_MESRB0 0x1d0 /* R: Master error status */
+#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
+#define FSI_MSCSB0 0x1d4 /* R: Master sub command stack */
+#define FSI_MATRB0 0x1d8 /* R: Master address trace */
+#define FSI_MDTRB0 0x1dc /* R: Master data trace */
+#define FSI_MECTRL 0x2e0 /* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
+#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
+#define FSI_MMODE_RELA 0x20000000 /* Enable relative address commands */
+#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
+ /* MSB=1, LSB=0 is 0.8 ms */
+ /* MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
+#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
+
+/* MRESP: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
+ /* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
+
+#define FSI_HUB_LINK_OFFSET 0x80000
+#define FSI_HUB_LINK_SIZE 0x80000
+#define FSI_HUB_MASTER_MAX_LINKS 8
+
+/*
+ * Protocol definitions
+ *
+ * These are used by low level masters that bit-bang out the protocol
+ */
+
/* Various protocol delays */
#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
@@ -47,6 +112,12 @@
/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
+/*
+ * Structures and function prototypes
+ *
+ * These are common to all masters
+ */
+
struct fsi_master {
struct device dev;
int idx;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 38e096e6925f..92d0ff63b3ea 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -298,7 +298,7 @@ config GPIO_IXP4XX
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
- depends on CPU_LOONGSON2 || CPU_LOONGSON3
+ depends on CPU_LOONGSON2EF || CPU_LOONGSON64
help
driver for GPIO functionality on Loongson-2F/3A/3B processors.
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 0c1ead12d883..4ba4d4a67881 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
case 0:
val = BD70528_DEBOUNCE_DISABLE;
break;
- case 1 ... 15:
+ case 1 ... 15000:
val = BD70528_DEBOUNCE_15MS;
break;
- case 16 ... 30:
+ case 15001 ... 30000:
val = BD70528_DEBOUNCE_30MS;
break;
- case 31 ... 50:
+ case 30001 ... 50000:
val = BD70528_DEBOUNCE_50MS;
break;
default:
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 00943170ce36..a42145873cc9 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -22,7 +22,7 @@
#define STLS2F_N_GPIO 4
#define STLS3A_N_GPIO 16
-#ifdef CONFIG_CPU_LOONGSON3
+#ifdef CONFIG_CPU_LOONGSON64
#define LOONGSON_N_GPIO STLS3A_N_GPIO
#else
#define LOONGSON_N_GPIO STLS2F_N_GPIO
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index faf86ea9c51a..642c6321c22a 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
- case 1000 ... 8000:
+ case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
- case 9000 ... 16000:
+ case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
- case 17000 ... 32000:
+ case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index 70fdb42a8e88..1e21c661d79d 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -211,3 +211,4 @@ MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 609ed16ae933..59ccfd24627d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1304,11 +1304,28 @@ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
{
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
}
},
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ }
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 98e3c20d9730..4421be09b960 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -185,12 +185,11 @@ struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
/**
- * devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
- * device's child node
+ * devm_fwnode_gpiod_get_index - get a GPIO descriptor from a given node
* @dev: GPIO consumer
+ * @fwnode: firmware node containing GPIO reference
* @con_id: function within the GPIO consumer
* @index: index of the GPIO to obtain in the consumer
- * @child: firmware node (child of @dev)
* @flags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
@@ -200,35 +199,21 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
* On successful request the GPIO pin is configured in accordance with
* provided @flags.
*/
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
{
- char prop_name[32]; /* 32 is max size of property name */
struct gpio_desc **dr;
struct gpio_desc *desc;
- unsigned int i;
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id)
- snprintf(prop_name, sizeof(prop_name), "%s-%s",
- con_id, gpio_suffixes[i]);
- else
- snprintf(prop_name, sizeof(prop_name), "%s",
- gpio_suffixes[i]);
-
- desc = fwnode_get_named_gpiod(child, prop_name, index, flags,
- label);
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
- break;
- }
+ desc = fwnode_gpiod_get_index(fwnode, con_id, index, flags, label);
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
@@ -239,7 +224,7 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
return desc;
}
-EXPORT_SYMBOL_GPL(devm_fwnode_get_index_gpiod_from_child);
+EXPORT_SYMBOL_GPL(devm_fwnode_gpiod_get_index);
/**
* devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 104ed299d5ea..fb33ff6fc1a9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -4356,6 +4356,54 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
}
/**
+ * fwnode_gpiod_get_index - obtain a GPIO from firmware node
+ * @fwnode: handle of the firmware node
+ * @con_id: function within the GPIO consumer
+ * @index: index of the GPIO to obtain for the consumer
+ * @flags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * This function can be used for drivers that get their configuration
+ * from opaque firmware.
+ *
+ * The function properly finds the corresponding GPIO using whatever is the
+ * underlying firmware interface and then makes sure that the GPIO
+ * descriptor is requested before it is returned to the caller.
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @flags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+ char prop_name[32]; /* 32 is max size of property name */
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id)
+ snprintf(prop_name, sizeof(prop_name), "%s-%s",
+ con_id, gpio_suffixes[i]);
+ else
+ snprintf(prop_name, sizeof(prop_name), "%s",
+ gpio_suffixes[i]);
+
+ desc = fwnode_get_named_gpiod(fwnode, prop_name, index, flags,
+ label);
+ if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ break;
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index);
+
+/**
* gpiod_count - return the number of GPIOs associated with a device / function
* or -ENOENT if no GPIO has been assigned to the requested function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1d4aaa9580f4..82efc1e22e61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -511,7 +511,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/
if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN &&
+ adev->asic_type < CHIP_RAVEN &&
(adev->flags & AMD_IS_APU) &&
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e1c15721611a..b19157b19fa0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1013,10 +1013,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
- {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a042ef471fbd..a73206784cba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -649,15 +649,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
- for (i = 0; i < info->read_mmr_reg.count; i++)
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
&regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
+ amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT;
}
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
return n ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4d71537a960d..a46090071034 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -950,21 +950,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
struct amdgpu_device *adev = psp->adev;
- const struct sdma_firmware_header_v1_0 *sdma_hdr =
- (const struct sdma_firmware_header_v1_0 *)
- adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
- const struct gfx_firmware_header_v1_0 *ce_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- const struct gfx_firmware_header_v1_0 *pfp_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- const struct gfx_firmware_header_v1_0 *me_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- const struct gfx_firmware_header_v1_0 *mec_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- const struct rlc_firmware_header_v2_0 *rlc_hdr =
- (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- const struct smc_firmware_header_v1_0 *smc_hdr =
- (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+ struct common_firmware_header *hdr;
switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_SDMA0:
@@ -975,25 +961,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
case AMDGPU_UCODE_ID_SDMA5:
case AMDGPU_UCODE_ID_SDMA6:
case AMDGPU_UCODE_ID_SDMA7:
- amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
+ hdr = (struct common_firmware_header *)
+ adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+ amdgpu_ucode_print_sdma_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_CE:
- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_PFP:
- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_ME:
- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_MEC1:
- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_RLC_G:
- amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(hdr);
break;
case AMDGPU_UCODE_ID_SMC:
- amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dfca83a2de47..97cf0b536873 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1038,8 +1038,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- &&((adev->gfx.rlc_fw_version != 106 &&
+ /* Disable GFXOFF on original raven. There are combinations
+ * of sbios and platforms that are not stable.
+ */
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ &&((adev->gfx.rlc_fw_version != 106 &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a52f0b13a2c8..4139f129eafb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -688,7 +688,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/
if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN)
+ adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 34f95e0e3ea4..203ce4b1028f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3478,18 +3478,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- *query = tmp;
+ /*
+ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
+ * - Hawaii
+ * - Bonaire
+ * - Fiji
+ * - Tonga
+ */
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- if (tmp != 0)
- return 0;
+ if (tmp != 0)
+ return 0;
+ }
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3ec5a10a7c4d..328e258a6895 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -759,6 +759,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 1d15cf9b6821..6c2c44d0bdee 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -151,11 +151,22 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
return -EINVAL;
}
+static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
.get_eld = dw_hdmi_i2s_get_eld,
.get_dai_id = dw_hdmi_i2s_get_dai_id,
+ .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 521d689413c8..2102872bf43c 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -191,6 +191,10 @@ struct dw_hdmi {
struct mutex cec_notifier_mutex;
struct cec_notifier *cec_notifier;
+
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ enum drm_connector_status last_connector_result;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -215,6 +219,28 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset)
return val;
}
+static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged)
+{
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, plugged);
+}
+
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ bool plugged;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ plugged = hdmi->last_connector_result == connector_status_connected;
+ handle_plugged_change(hdmi, plugged);
+ mutex_unlock(&hdmi->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb);
+
static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
{
regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data);
@@ -2161,6 +2187,7 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
+ enum drm_connector_status result;
mutex_lock(&hdmi->mutex);
hdmi->force = DRM_FORCE_UNSPECIFIED;
@@ -2168,7 +2195,18 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
dw_hdmi_update_phy_mask(hdmi);
mutex_unlock(&hdmi->mutex);
- return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+ result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+
+ mutex_lock(&hdmi->mutex);
+ if (result != hdmi->last_connector_result) {
+ dev_dbg(hdmi->dev, "read_hpd result: %d", result);
+ handle_plugged_change(hdmi,
+ result == connector_status_connected);
+ hdmi->last_connector_result = result;
+ }
+ mutex_unlock(&hdmi->mutex);
+
+ return result;
}
static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -2619,6 +2657,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
hdmi->mc_clkdis = 0x7f;
+ hdmi->last_connector_result = connector_status_disconnected;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4c766624b20d..4a8b2e5c2af6 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -719,7 +719,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
__drm_connector_put_safe(iter->conn);
spin_unlock_irqrestore(&config->connector_list_lock, flags);
}
- lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
+ lock_release(&connector_list_iter_dep_map, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 00786a142ff0..1400fce39c58 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -22,7 +22,6 @@ config DRM_I915_DEBUG
depends on DRM_I915
select DEBUG_FS
select PREEMPT_COUNT
- select REFCOUNT_FULL
select I2C_CHARDEV
select STACKDEPOT
select DRM_DP_AUX_CHARDEV
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index d3fb75bb9eb1..7cb2257bbb93 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -201,6 +201,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
+ crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 71a0201437a9..aa1e2c670bc4 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -990,6 +990,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
dev_priv->display.color_commit(crtc_state);
}
+static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->base.gamma_lut &&
+ !old_crtc_state->base.degamma_lut;
+}
+
+static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * CGM_PIPE_MODE is itself single buffered. We'd have to
+ * somehow split it out from chv_load_luts() if we wanted
+ * the ability to preload the CGM LUTs/CSC without tearing.
+ */
+ if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
+ return false;
+
+ return !old_crtc_state->base.gamma_lut;
+}
+
+static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * The hardware degamma is active whenever the pipe
+ * CSC is active. Thus even if the old state has no
+ * software degamma we need to avoid clobbering the
+ * linear hardware degamma mid scanout.
+ */
+ return !old_crtc_state->csc_enable &&
+ !old_crtc_state->base.gamma_lut;
+}
+
int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
@@ -1133,6 +1182,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1185,6 +1236,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1224,6 +1277,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1281,6 +1336,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1319,6 +1376,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
+
return 0;
}
@@ -1368,6 +1427,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state);
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dfff6f4357b8..af50f05f4e9d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2504,6 +2504,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all.
*/
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ if (!crtc)
+ return 0;
+
plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier,
@@ -13740,6 +13743,11 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
} else {
+ if (new_crtc_state->preload_luts &&
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
+
intel_pre_plane_update(old_crtc_state, new_crtc_state);
if (new_crtc_state->update_pipe)
@@ -14034,6 +14042,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) &&
+ !new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 12099760d99e..c002f234ff31 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4896,6 +4896,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
+ /* Must happen before power domain init on VLV/CHV */
+ intel_update_rawclk(i915);
+
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 449abaea619f..4075b0387c87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -761,6 +761,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
+ bool preload_luts;
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index d59eee5c5d9c..b5c588e511dd 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -235,6 +235,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ info->fix.smem_len = vma->node.size;
+
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -244,10 +249,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start = (unsigned long)info->screen_base;
- info->fix.smem_len = info->screen_size;
-
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 1cdfe05514c3..755c4542629f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
+ kfree(ctx->jump_whitelist);
+
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+ ctx->jump_whitelist = NULL;
+ ctx->jump_whitelist_cmds = 0;
+
return ctx;
err_free:
@@ -1629,7 +1634,7 @@ replace:
i915_gem_context_set_user_engines(ctx);
else
i915_gem_context_clear_user_engines(ctx);
- rcu_swap_protected(ctx->engines, set.engines, 1);
+ set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
call_rcu(&set.engines->rcu, free_engines_rcu);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 260d59cc3de8..00537b9d7006 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -192,6 +192,13 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
+
+ /** jump_whitelist: Bit array for tracking cmds during cmdparsing
+ * Guarded by struct_mutex
+ */
+ unsigned long *jump_whitelist;
+ /** jump_whitelist_cmds: No of cmd slots available */
+ u32 jump_whitelist_cmds;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b5f6937369ea..e635e1e5f4d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -296,7 +296,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
+ return intel_engine_requires_cmd_parser(eb->engine) ||
+ (intel_engine_using_cmd_parser(eb->engine) &&
+ eb->args->batch_len);
}
static int eb_create(struct i915_execbuffer *eb)
@@ -1955,40 +1957,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
return 0;
}
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
+static struct i915_vma *
+shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = eb->i915;
+ struct i915_vma * const vma = *eb->vma;
+ struct i915_address_space *vm;
+ u64 flags;
+
+ /*
+ * PPGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
+ */
+ if (CMDPARSER_USES_GGTT(dev_priv)) {
+ flags = PIN_GLOBAL;
+ vm = &dev_priv->ggtt.vm;
+ } else if (vma->vm->has_read_only) {
+ flags = PIN_USER;
+ vm = vma->vm;
+ i915_gem_object_set_readonly(obj);
+ } else {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+}
+
+static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{
struct intel_engine_pool_node *pool;
struct i915_vma *vma;
+ u64 batch_start;
+ u64 shadow_batch_start;
int err;
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
- err = intel_engine_cmd_parser(eb->engine,
+ vma = shadow_batch_pin(eb, pool->obj);
+ if (IS_ERR(vma))
+ goto err;
+
+ batch_start = gen8_canonical_addr(eb->batch->node.start) +
+ eb->batch_start_offset;
+
+ shadow_batch_start = gen8_canonical_addr(vma->node.start);
+
+ err = intel_engine_cmd_parser(eb->gem_context,
+ eb->engine,
eb->batch->obj,
- pool->obj,
+ batch_start,
eb->batch_start_offset,
eb->batch_len,
- is_master);
+ pool->obj,
+ shadow_batch_start);
+
if (err) {
- if (err == -EACCES) /* unhandled chained batch */
+ i915_vma_unpin(vma);
+
+ /*
+ * Unsafe GGTT-backed buffers can still be submitted safely
+ * as non-secure.
+ * For PPGTT backing however, we have no choice but to forcibly
+ * reject unsafe buffers
+ */
+ if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
+ /* Execute original buffer non-secure */
vma = NULL;
else
vma = ERR_PTR(err);
goto err;
}
- vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
- goto err;
-
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
+ eb->batch_start_offset = 0;
+ eb->batch = vma;
+
+ if (CMDPARSER_USES_GGTT(eb->i915))
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+
+ /* eb->batch_len unchanged */
+
vma->private = pool;
return vma;
@@ -2421,6 +2477,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
struct drm_syncobj **fences)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
@@ -2432,7 +2489,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
- eb.i915 = to_i915(dev);
+ eb.i915 = i915;
eb.file = file;
eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
@@ -2452,8 +2509,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
+ if (INTEL_GEN(i915) >= 11)
+ return -ENODEV;
+
+ /* Return -EPERM to trigger fallback code on old binaries. */
+ if (!HAS_SECURE_BATCHES(i915))
+ return -EPERM;
+
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return -EPERM;
eb.batch_flags |= I915_DISPATCH_SECURE;
}
@@ -2530,34 +2594,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
+ if (eb.batch_len == 0)
+ eb.batch_len = eb.batch->size - eb.batch_start_offset;
+
if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
- vma = eb_parse(&eb, drm_is_current_master(file));
+ vma = eb_parse(&eb);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_vma;
}
-
- if (vma) {
- /*
- * Batch parsed and accepted:
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in
- * the dispatch_execbuffer implementations. We
- * specifically don't want that set on batches the
- * command parser has accepted.
- */
- eb.batch_flags |= I915_DISPATCH_SECURE;
- eb.batch_start_offset = 0;
- eb.batch = vma;
- }
}
- if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
-
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index edd21d14e64f..1a51b3598d63 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -509,14 +509,14 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
I915_MM_SHRINKER, 0, _RET_IP_);
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
- mutex_release(&mutex->dep_map, 0, _RET_IP_);
+ mutex_release(&mutex->dep_map, _RET_IP_);
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+ mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
fs_reclaim_release(GFP_KERNEL);
if (unlock)
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+ mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
}
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 6b3b50f0f6d9..abfbac49b8e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -671,8 +671,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
+ if (obj->mm.dirty && trylock_page(page)) {
+ /*
+ * As this may not be anonymous memory (e.g. shmem)
+ * but exist on a real mapping, we have to lock
+ * the page in order to dirty it -- holding
+ * the page reference is not sufficient to
+ * prevent the inode from being truncated.
+ * Play safe and take the lock.
+ *
+ * However...!
+ *
+ * The mmu-notifier can be invalidated for a
+ * migrate_page, that is alreadying holding the lock
+ * on the page. Such a try_to_unmap() will result
+ * in us calling put_pages() and so recursively try
+ * to lock the page. We avoid that deadlock with
+ * a trylock_page() and in exchange we risk missing
+ * some page dirtying.
+ */
set_page_dirty(page);
+ unlock_page(page);
+ }
mark_page_accessed(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 65b5ca74b394..7f647243b3b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -52,7 +52,7 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
static inline void __timeline_mark_unlock(struct intel_context *ce,
unsigned long flags)
{
- mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
local_irq_restore(flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 4cd54c569911..379a91780bd4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -103,6 +103,8 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_CAST(obj);
}
+ i915_gem_object_set_readonly(obj);
+
node->obj = obj;
return node;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index a82cea95c2f2..9dd8c299cb2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -475,12 +475,13 @@ struct intel_engine_cs {
struct intel_engine_hangcheck hangcheck;
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
/*
@@ -541,9 +542,15 @@ struct intel_engine_cs {
};
static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1363e069ec83..fac75afed35b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -38,6 +38,9 @@ static int __gt_unpark(struct intel_wakeref *wf)
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
@@ -67,6 +70,11 @@ static int __gt_park(struct intel_wakeref *wf)
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
+ i915_rc6_ctx_wa_check(i915);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ }
+
/* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 728704bbbe18..cea184a7dde9 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -199,14 +199,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
- /* Bypass LLC - Uncached (EHL+) */ \
- MOCS_ENTRY(16, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_1_UC), \
- /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
- MOCS_ENTRY(17, \
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
- L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
@@ -270,7 +262,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
L3_1_UC),
/* HW Special Case (Displayable) */
MOCS_ENTRY(61,
- LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
+ LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
};
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 13044c027f27..4bfaefdf548d 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -498,8 +498,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
goto out_free_gem;
}
- i915_gem_object_put(obj);
-
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
if (ret < 0) {
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
@@ -524,6 +522,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
file_count(dmabuf->file),
kref_read(&obj->base.refcount));
+ i915_gem_object_put(obj);
+
return dmabuf_fd;
out_free_dmabuf:
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 24555102e198..f24096e27bef 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -53,13 +53,11 @@
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
- * only be used by the kernel driver. The parser generally rejects such
- * commands, though it may allow some from the drm master process.
+ * only be used by the kernel driver. The parser rejects such commands
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
- * provides a whitelist of registers which userspace may safely access (for both
- * normal and drm master processes).
+ * provides a whitelist of registers which userspace may safely access
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
@@ -84,9 +82,9 @@
* in the per-engine command tables.
*
* Other command table entries map fairly directly to high level categories
- * mentioned above: rejected, master-only, register whitelist. The parser
- * implements a number of checks, including the privileged memory checks, via a
- * general bitmasking mechanism.
+ * mentioned above: rejected, register whitelist. The parser implements a number
+ * of checks, including the privileged memory checks, via a general bitmasking
+ * mechanism.
*/
/*
@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
* CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
*/
u32 flags;
#define CMD_DESC_FIXED (1<<0)
@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
#define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
/*
* The command's unique identification bits and the bitmask to get them.
@@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
- .cmd = { (op), ~0u << (opm) }, \
+ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
@@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
-#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
-static const struct drm_i915_cmd_descriptor common_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
- CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
-static const struct drm_i915_cmd_descriptor render_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
-static const struct drm_i915_cmd_descriptor video_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ),
};
-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
};
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
+/*
+ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
+ * need to re-enforce the register access checks. We therefore only need to
+ * teach the cmdparser how to find the end of each command, and identify
+ * register accesses. The table doesn't need to reject any commands, and so
+ * the only commands listed here are:
+ * 1) Those that touch registers
+ * 2) Those that do not have the default 8-bit length
+ *
+ * Note that the default MI length mask chosen for this table is 0xFF, not
+ * the 0x3F used on older devices. This is because the vast majority of MI
+ * cmds on Gen9 use a standard 8-bit Length field.
+ * All the Gen9 blitter instructions are standard 0xFF length mask, and
+ * none allow access to non-general registers, so in fact no BLT cmds are
+ * included in the table at all.
+ *
+ */
+static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
+ CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_BB_START_OPERAND_MASK,
+ .expected = MI_BB_START_OPERAND_EXPECT,
+ }}, ),
+};
+
static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S);
@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R
#undef W
#undef B
-#undef M
-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
};
-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { video_cmds, ARRAY_SIZE(video_cmds) },
+static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
};
-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
};
-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
};
-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
+static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
+ { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
+};
+
+
/*
* Register whitelists, sorted by increasing register offset.
*/
@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
-};
-
-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
+static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
+ REG32(BCS_SWCTRL),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG64_IDX(BCS_GPR, 0),
+ REG64_IDX(BCS_GPR, 1),
+ REG64_IDX(BCS_GPR, 2),
+ REG64_IDX(BCS_GPR, 3),
+ REG64_IDX(BCS_GPR, 4),
+ REG64_IDX(BCS_GPR, 5),
+ REG64_IDX(BCS_GPR, 6),
+ REG64_IDX(BCS_GPR, 7),
+ REG64_IDX(BCS_GPR, 8),
+ REG64_IDX(BCS_GPR, 9),
+ REG64_IDX(BCS_GPR, 10),
+ REG64_IDX(BCS_GPR, 11),
+ REG64_IDX(BCS_GPR, 12),
+ REG64_IDX(BCS_GPR, 13),
+ REG64_IDX(BCS_GPR, 14),
+ REG64_IDX(BCS_GPR, 15),
};
#undef REG64
@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs;
int num_regs;
- bool master;
};
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
};
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
};
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
};
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+};
+
+static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
+ { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
};
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
+static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN(engine->i915, 7))
+ if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
+ engine->class == COPY_ENGINE_CLASS))
return;
switch (engine->class) {
case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_render_ring_cmds;
+ cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count =
- ARRAY_SIZE(hsw_render_ring_cmds);
+ ARRAY_SIZE(hsw_render_ring_cmd_table);
} else {
- cmd_tables = gen7_render_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ cmd_tables = gen7_render_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
}
if (IS_HASWELL(engine->i915)) {
@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
-
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VIDEO_DECODE_CLASS:
- cmd_tables = gen7_video_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ cmd_tables = gen7_video_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case COPY_ENGINE_CLASS:
- if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_blt_ring_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ if (IS_GEN(engine->i915, 9)) {
+ cmd_tables = gen9_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
+ engine->get_cmd_length_mask =
+ gen9_blt_get_cmd_length_mask;
+
+ /* BCS Engine unsafe without parser */
+ engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
+ } else if (IS_HASWELL(engine->i915)) {
+ cmd_tables = hsw_blt_ring_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else {
- cmd_tables = gen7_blt_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ cmd_tables = gen7_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_HASWELL(engine->i915)) {
+ if (IS_GEN(engine->i915, 9)) {
+ engine->reg_tables = gen9_blt_reg_tables;
+ engine->reg_table_count =
+ ARRAY_SIZE(gen9_blt_reg_tables);
+ } else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
-
- engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VIDEO_ENHANCEMENT_CLASS:
- cmd_tables = hsw_vebox_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ cmd_tables = hsw_vebox_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
+ engine->flags |= I915_ENGINE_USING_CMD_PARSER;
}
/**
@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!intel_engine_needs_cmd_parser(engine))
+ if (!intel_engine_using_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
}
static const struct drm_i915_reg_descriptor *
-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, u32 addr)
{
const struct drm_i915_reg_table *table = engine->reg_tables;
+ const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count;
- for (; count > 0; ++table, --count) {
- if (!table->master || is_master) {
- const struct drm_i915_reg_descriptor *reg;
+ for (; !reg && (count > 0); ++table, --count)
+ reg = __find_reg(table->regs, table->num_regs, addr);
- reg = __find_reg(table->regs, table->num_regs, addr);
- if (reg != NULL)
- return reg;
- }
- }
-
- return NULL;
+ return reg;
}
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd, u32 length,
- const bool is_master)
+ const u32 *cmd, u32 length)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
- *cmd);
- return false;
- }
-
if (desc->flags & CMD_DESC_REGISTER) {
/*
* Get the distance between individual register offset
@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(engine, is_master, reg_addr);
+ find_reg(engine, reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
+static int check_bbstart(const struct i915_gem_context *ctx,
+ u32 *cmd, u32 offset, u32 length,
+ u32 batch_len,
+ u64 batch_start,
+ u64 shadow_batch_start)
+{
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64*)(cmd+1);
+ jump_offset = jump_target - batch_start;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_len) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_len is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+ return -EINVAL;
+ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+{
+ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+ unsigned long *next_whitelist;
+
+ if (CMDPARSER_USES_GGTT(ctx->i915))
+ return;
+
+ if (batch_cmds <= ctx->jump_whitelist_cmds) {
+ bitmap_zero(ctx->jump_whitelist, batch_cmds);
+ return;
+ }
+
+again:
+ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+ if (next_whitelist) {
+ kfree(ctx->jump_whitelist);
+ ctx->jump_whitelist = next_whitelist;
+ ctx->jump_whitelist_cmds =
+ next_size * BITS_PER_BYTE * sizeof(long);
+ return;
+ }
+
+ if (next_size > exact_size) {
+ next_size = exact_size;
+ goto again;
+ }
+
+ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+ bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+
+ return;
+}
+
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
- * @is_master: is the submitting process the drm master?
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master)
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start)
{
- u32 *cmd, *batch_end;
+ u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false;
@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd);
}
+ init_whitelist(ctx, batch_len);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
do {
u32 length;
- if (*cmd == MI_BATCH_BUFFER_END) {
- if (needs_clflush_after) {
- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
- drm_clflush_virt_range(ptr,
- (void *)(cmd + 1) - ptr);
- }
+ if (*cmd == MI_BATCH_BUFFER_END)
break;
- }
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
ret = -EINVAL;
- break;
- }
-
- /*
- * If the batch buffer contains a chained batch, return an
- * error that tells the caller to abort and dispatch the
- * workload as a non-secure batch.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- break;
+ goto err;
}
if (desc->flags & CMD_DESC_FIXED)
@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
length,
batch_end - cmd);
ret = -EINVAL;
- break;
+ goto err;
}
- if (!check_cmd(engine, desc, cmd, length, is_master)) {
+ if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES;
+ goto err;
+ }
+
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, batch_start,
+ shadow_batch_start);
+
+ if (ret)
+ goto err;
break;
}
+ if (ctx->jump_whitelist_cmds > offset)
+ set_bit(offset, ctx->jump_whitelist);
+
cmd += length;
+ offset += length;
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
- break;
+ goto err;
}
} while (1);
+ if (needs_clflush_after) {
+ void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+
+ drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
+ }
+
+err:
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
}
@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_uabi_engine(engine, dev_priv) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ if (intel_engine_using_cmd_parser(engine)) {
active = true;
break;
}
@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* the parser enabled.
* 9. Don't whitelist or handle oacontrol specially, as ownership
* for oacontrol state is moving to i915-perf.
+ * 10. Support for Gen9 BCS Parsing
*/
- return 9;
+ return 10;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bb6f86c7067a..3d717e282908 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -364,9 +364,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
if (ret)
goto cleanup_vga_client;
- /* must happen before intel_power_domains_init_hw() on VLV/CHV */
- intel_update_rawclk(dev_priv);
-
intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv);
@@ -1850,6 +1847,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
+ i915_rc6_ctx_wa_suspend(dev_priv);
+
intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
@@ -2053,6 +2052,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
+ i915_rc6_ctx_wa_resume(dev_priv);
+
intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 953e1d12c23c..89b6112bd66b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -593,6 +593,8 @@ struct intel_rps {
struct intel_rc6 {
bool enabled;
+ bool ctx_corrupted;
+ intel_wakeref_t ctx_corrupted_wakeref;
u64 prev_hw_residency[4];
u64 cur_residency[4];
};
@@ -2075,9 +2077,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+/*
+ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+ * All later gens can run the final buffer from the ppgtt
+ */
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
+#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2110,10 +2119,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
+#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
+ (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || \
- IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+ (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -2284,6 +2295,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+struct i915_vma * __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags);
+
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __must_check
@@ -2393,12 +2412,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+int intel_engine_cmd_parser(struct i915_gem_context *cxt,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master);
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start);
/* intel_device_info.c */
static inline struct intel_device_info *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0f94f239919..98305d987ac1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -964,6 +964,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm;
+
+ return i915_gem_object_pin(obj, vm, view, size, alignment,
+ flags | PIN_GLOBAL);
+}
+
+struct i915_vma *
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
@@ -1038,7 +1052,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin(vma, size, alignment, flags);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 5d9101376a3d..9f1517af5b7f 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -62,7 +62,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
+ value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(i915);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8e251e719390..212acaef581e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -843,8 +843,8 @@ create_event_attributes(struct i915_pmu *pmu)
const char *name;
const char *unit;
} events[] = {
- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
+ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
+ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abd199093c5..f8ee9aba3955 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -471,6 +471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1 << 13)
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
@@ -555,6 +557,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define BCS_SWCTRL _MMIO(0x22200)
+/* There are 16 GPR registers */
+#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
+
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300)
@@ -7211,6 +7217,10 @@ enum {
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
+
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1c5506822dc7..bc828a9ace84 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1495,7 +1495,7 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
- mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 7b84ebca2901..3eba8a2b39c2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION;
}
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static inline bool need_preempt(int prio, int active)
{
- const struct i915_request *inflight = *engine->execlists.active;
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ */
+ return prio >= max(I915_PRIORITY_NORMAL, active);
+}
+
+static void kick_submission(struct intel_engine_cs *engine,
+ const struct i915_request *rq,
+ int prio)
+{
+ const struct i915_request *inflight;
+
+ /*
+ * We only need to kick the tasklet once for the high priority
+ * new context we add into the queue.
+ */
+ if (prio <= engine->execlists.queue_priority_hint)
+ return;
+
+ rcu_read_lock();
+
+ /* Nothing currently active? We're overdue for a submission! */
+ inflight = execlists_active(&engine->execlists);
+ if (!inflight)
+ goto unlock;
/*
* If we are already the currently executing context, don't
@@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context.
*/
- if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
- return;
+ if (inflight->hw_context == rq->hw_context)
+ goto unlock;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ engine->execlists.queue_priority_hint = prio;
+ if (need_preempt(prio, rq_prio(inflight)))
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+ rcu_read_unlock();
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist);
}
- if (prio <= engine->execlists.queue_priority_hint)
- continue;
-
- engine->execlists.queue_priority_hint = prio;
-
/* Defer (tasklet) submission until after all of our updates. */
- kick_submission(engine, prio);
+ kick_submission(engine, node_to_request(node), prio);
}
spin_unlock(&engine->active.lock);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 75ee027abb80..2efe1d12d5a9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -126,6 +126,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
+
+ /*
+ * Lower the display internal timeout.
+ * This is needed to avoid any hard hangs when DSI port PLL
+ * is off and a MMIO access is attempted by any privilege
+ * application, using batch buffers or any other means.
+ */
+ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -8544,6 +8552,100 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
+{
+ return !I915_READ(GEN8_RC6_CTX_INFO);
+}
+
+static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
+{
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (i915_rc6_ctx_corrupted(i915)) {
+ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+ i915->gt_pm.rc6.ctx_corrupted = true;
+ i915->gt_pm.rc6.ctx_corrupted_wakeref =
+ intel_runtime_pm_get(&i915->runtime_pm);
+ }
+}
+
+static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
+{
+ if (i915->gt_pm.rc6.ctx_corrupted) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ i915->gt_pm.rc6.ctx_corrupted_wakeref);
+ i915->gt_pm.rc6.ctx_corrupted = false;
+ }
+}
+
+/**
+ * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
+ */
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
+{
+ if (i915->gt_pm.rc6.ctx_corrupted)
+ intel_runtime_pm_put(&i915->runtime_pm,
+ i915->gt_pm.rc6.ctx_corrupted_wakeref);
+}
+
+/**
+ * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
+{
+ if (!i915->gt_pm.rc6.ctx_corrupted)
+ return;
+
+ if (i915_rc6_ctx_corrupted(i915)) {
+ i915->gt_pm.rc6.ctx_corrupted_wakeref =
+ intel_runtime_pm_get(&i915->runtime_pm);
+ return;
+ }
+
+ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+ i915->gt_pm.rc6.ctx_corrupted = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv);
+
+/**
+ * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @i915: i915 device
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+ *
+ * Return false if no context corruption has happened since the last call of
+ * this function, true otherwise.
+*/
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
+{
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return false;
+
+ if (i915->gt_pm.rc6.ctx_corrupted)
+ return false;
+
+ if (!i915_rc6_ctx_corrupted(i915))
+ return false;
+
+ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+ intel_disable_rc6(i915);
+ i915->gt_pm.rc6.ctx_corrupted = true;
+ i915->gt_pm.rc6.ctx_corrupted_wakeref =
+ intel_runtime_pm_get_noresume(&i915->runtime_pm);
+
+ return true;
+}
+
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -8557,6 +8659,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_get(&dev_priv->drm.pdev->dev);
}
+ i915_rc6_ctx_wa_init(dev_priv);
+
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
@@ -8595,6 +8699,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
+ i915_rc6_ctx_wa_cleanup(dev_priv);
+
if (!HAS_RC6(dev_priv))
pm_runtime_put(&dev_priv->drm.pdev->dev);
}
@@ -8623,7 +8729,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
i915->gt_pm.llc_pstate.enabled = false;
}
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
@@ -8642,6 +8748,15 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
dev_priv->gt_pm.rc6.enabled = false;
}
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ mutex_lock(&rps->lock);
+ __intel_disable_rc6(dev_priv);
+ mutex_unlock(&rps->lock);
+}
+
static void intel_disable_rps(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
@@ -8667,7 +8782,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->gt_pm.rps.lock);
- intel_disable_rc6(dev_priv);
+ __intel_disable_rc6(dev_priv);
intel_disable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_disable_llc_pstate(dev_priv);
@@ -8694,6 +8809,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
if (dev_priv->gt_pm.rc6.enabled)
return;
+ if (dev_priv->gt_pm.rc6.ctx_corrupted)
+ return;
+
if (IS_CHERRYVIEW(dev_priv))
cherryview_enable_rc6(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index e3573e1e16e3..0f7390c850ec 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct i915_request *rq);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 04c721d0d3b9..b89439ed210d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 6;
+ tcon->dclk_min_div = 1;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c
index fc8f57f97ce6..e3799a53a193 100644
--- a/drivers/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -361,9 +361,6 @@ static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
- if (!hd->driver->cport_quiesce)
- return 0;
-
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index fcc52797c169..6098e0cbdb4b 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -202,7 +202,7 @@ int hv_synic_init(unsigned int cpu)
{
hv_synic_enable_regs(cpu);
- hv_stimer_init(cpu);
+ hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
return 0;
}
@@ -277,7 +277,7 @@ int hv_synic_cleanup(unsigned int cpu)
if (channel_found && vmbus_connection.conn_state == CONNECTED)
return -EBUSY;
- hv_stimer_cleanup(cpu);
+ hv_stimer_legacy_cleanup(cpu);
hv_synic_disable_regs(cpu);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 53a60c81e220..8c06b3361c27 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1340,10 +1340,6 @@ static int vmbus_bus_init(void)
if (ret)
goto err_alloc;
- ret = hv_stimer_alloc(VMBUS_MESSAGE_SINT);
- if (ret < 0)
- goto err_alloc;
-
/*
* Initialize the per-cpu interrupt state and stimer state.
* Then connect to the host.
@@ -1400,9 +1396,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
err_cpuhp:
- hv_stimer_free();
-err_alloc:
hv_synic_free();
+err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
@@ -2315,20 +2310,23 @@ static void hv_crash_handler(struct pt_regs *regs)
static int hv_synic_suspend(void)
{
/*
- * When we reach here, all the non-boot CPUs have been offlined, and
- * the stimers on them have been unbound in hv_synic_cleanup() ->
+ * When we reach here, all the non-boot CPUs have been offlined.
+ * If we're in a legacy configuration where stimer Direct Mode is
+ * not enabled, the stimers on the non-boot CPUs have been unbound
+ * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
* hv_stimer_cleanup() -> clockevents_unbind_device().
*
- * hv_synic_suspend() only runs on CPU0 with interrupts disabled. Here
- * we do not unbind the stimer on CPU0 because: 1) it's unnecessary
- * because the interrupts remain disabled between syscore_suspend()
- * and syscore_resume(): see create_image() and resume_target_kernel();
+ * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
+ * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
+ * 1) it's unnecessary as interrupts remain disabled between
+ * syscore_suspend() and syscore_resume(): see create_image() and
+ * resume_target_kernel()
* 2) the stimer on CPU0 is automatically disabled later by
* syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
- * -> clockevents_shutdown() -> ... -> hv_ce_shutdown(); 3) a warning
- * would be triggered if we call clockevents_unbind_device(), which
- * may sleep, in an interrupts-disabled context. So, we intentionally
- * don't call hv_stimer_cleanup(0) here.
+ * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
+ * 3) a warning would be triggered if we call
+ * clockevents_unbind_device(), which may sleep, in an
+ * interrupts-disabled context.
*/
hv_synic_disable_regs(0);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5308c59d7001..23dfe848979a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -310,7 +310,6 @@ config SENSORS_APPLESMC
depends on INPUT && X86
select NEW_LEDS
select LEDS_CLASS
- select INPUT_POLLDEV
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -728,6 +727,33 @@ config SENSORS_LTC2945
This driver can also be built as a module. If so, the module will
be called ltc2945.
+config SENSORS_LTC2947
+ tristate
+
+config SENSORS_LTC2947_I2C
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over I2C"
+ depends on I2C
+ select REGMAP_I2C
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ I2C High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-i2c.
+
+config SENSORS_LTC2947_SPI
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over SPI"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ SPI High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-spi.
+
config SENSORS_LTC2990
tristate "Linear Technology LTC2990"
depends on I2C
@@ -1710,6 +1736,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TMP513
+ tristate "Texas Instruments TMP513 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP512,
+ and TMP513 temperature and power supply sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp513.
+
config SENSORS_VEXPRESS
tristate "Versatile Express"
depends on VEXPRESS_CONFIG
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 40c036ea45e6..6db5db9cdc29 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -106,6 +106,9 @@ obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2947) += ltc2947-core.o
+obj-$(CONFIG_SENSORS_LTC2947_I2C) += ltc2947-i2c.o
+obj-$(CONFIG_SENSORS_LTC2947_SPI) += ltc2947-spi.o
obj-$(CONFIG_SENSORS_LTC2990) += ltc2990.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
@@ -166,6 +169,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a5cf6b2a6e49..681f0623868f 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1264,7 +1264,7 @@ static int abituguru_probe(struct platform_device *pdev)
* El weirdo probe order, to keep the sysfs order identical to the
* BIOS and window-appliction listing order.
*/
- const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
+ static const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
0x00, 0x01, 0x03, 0x04, 0x0A, 0x08, 0x0E, 0x02,
0x09, 0x06, 0x05, 0x0B, 0x0F, 0x0D, 0x07, 0x0C };
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 183ff3d25129..ec93b8d673f5 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -140,7 +140,7 @@ static s16 rest_y;
static u8 backlight_state[2];
static struct device *hwmon_dev;
-static struct input_polled_dev *applesmc_idev;
+static struct input_dev *applesmc_idev;
/*
* Last index written to key_at_index sysfs file, and value to use for all other
@@ -681,9 +681,8 @@ static void applesmc_calibrate(void)
rest_x = -rest_x;
}
-static void applesmc_idev_poll(struct input_polled_dev *dev)
+static void applesmc_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s16 x, y;
if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x))
@@ -1134,7 +1133,6 @@ out:
/* Create accelerometer resources */
static int applesmc_create_accelerometer(void)
{
- struct input_dev *idev;
int ret;
if (!smcreg.has_accelerometer)
@@ -1144,37 +1142,38 @@ static int applesmc_create_accelerometer(void)
if (ret)
goto out;
- applesmc_idev = input_allocate_polled_device();
+ applesmc_idev = input_allocate_device();
if (!applesmc_idev) {
ret = -ENOMEM;
goto out_sysfs;
}
- applesmc_idev->poll = applesmc_idev_poll;
- applesmc_idev->poll_interval = APPLESMC_POLL_INTERVAL;
-
/* initial calibrate for the input device */
applesmc_calibrate();
/* initialize the input device */
- idev = applesmc_idev->input;
- idev->name = "applesmc";
- idev->id.bustype = BUS_HOST;
- idev->dev.parent = &pdev->dev;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X,
+ applesmc_idev->name = "applesmc";
+ applesmc_idev->id.bustype = BUS_HOST;
+ applesmc_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(applesmc_idev, ABS_X,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- input_set_abs_params(idev, ABS_Y,
+ input_set_abs_params(applesmc_idev, ABS_Y,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- ret = input_register_polled_device(applesmc_idev);
+ ret = input_setup_polling(applesmc_idev, applesmc_idev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL);
+
+ ret = input_register_device(applesmc_idev);
if (ret)
goto out_idev;
return 0;
out_idev:
- input_free_polled_device(applesmc_idev);
+ input_free_device(applesmc_idev);
out_sysfs:
applesmc_destroy_nodes(accelerometer_group);
@@ -1189,8 +1188,7 @@ static void applesmc_release_accelerometer(void)
{
if (!smcreg.has_accelerometer)
return;
- input_unregister_polled_device(applesmc_idev);
- input_free_polled_device(applesmc_idev);
+ input_unregister_device(applesmc_idev);
applesmc_destroy_nodes(accelerometer_group);
}
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 40c489be62ea..33fb54845bf6 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -891,17 +891,12 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
struct device_node *np, *child;
struct aspeed_pwm_tacho_data *priv;
void __iomem *regs;
- struct resource *res;
struct device *hwmon;
struct clk *clk;
int ret;
np = dev->of_node;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 4212d022d253..17583bf8c2dc 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -68,6 +68,8 @@ static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
static bool disallow_fan_support;
+static unsigned int manual_fan;
+static unsigned int auto_fan;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -301,6 +303,20 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
}
/*
+ * Enable or disable automatic BIOS fan control support
+ */
+static int i8k_enable_fan_auto_mode(bool enable)
+{
+ struct smm_regs regs = { };
+
+ if (disallow_fan_support)
+ return -EINVAL;
+
+ regs.eax = enable ? auto_fan : manual_fan;
+ return i8k_smm(&regs);
+}
+
+/*
* Set the fan speed (off, low, high). Returns the new fan status.
*/
static int i8k_set_fan(int fan, int speed)
@@ -726,6 +742,35 @@ static ssize_t i8k_hwmon_pwm_store(struct device *dev,
return err < 0 ? -EIO : count;
}
+static ssize_t i8k_hwmon_pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ bool enable;
+ unsigned long val;
+
+ if (!auto_fan)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val == 1)
+ enable = false;
+ else if (val == 2)
+ enable = true;
+ else
+ return -EINVAL;
+
+ mutex_lock(&i8k_mutex);
+ err = i8k_enable_fan_auto_mode(enable);
+ mutex_unlock(&i8k_mutex);
+
+ return err ? err : count;
+}
+
static SENSOR_DEVICE_ATTR_RO(temp1_input, i8k_hwmon_temp, 0);
static SENSOR_DEVICE_ATTR_RO(temp1_label, i8k_hwmon_temp_label, 0);
static SENSOR_DEVICE_ATTR_RO(temp2_input, i8k_hwmon_temp, 1);
@@ -749,6 +794,7 @@ static SENSOR_DEVICE_ATTR_RO(temp10_label, i8k_hwmon_temp_label, 9);
static SENSOR_DEVICE_ATTR_RO(fan1_input, i8k_hwmon_fan, 0);
static SENSOR_DEVICE_ATTR_RO(fan1_label, i8k_hwmon_fan_label, 0);
static SENSOR_DEVICE_ATTR_RW(pwm1, i8k_hwmon_pwm, 0);
+static SENSOR_DEVICE_ATTR_WO(pwm1_enable, i8k_hwmon_pwm_enable, 0);
static SENSOR_DEVICE_ATTR_RO(fan2_input, i8k_hwmon_fan, 1);
static SENSOR_DEVICE_ATTR_RO(fan2_label, i8k_hwmon_fan_label, 1);
static SENSOR_DEVICE_ATTR_RW(pwm2, i8k_hwmon_pwm, 1);
@@ -780,12 +826,13 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr, /* 20 */
&sensor_dev_attr_fan1_label.dev_attr.attr, /* 21 */
&sensor_dev_attr_pwm1.dev_attr.attr, /* 22 */
- &sensor_dev_attr_fan2_input.dev_attr.attr, /* 23 */
- &sensor_dev_attr_fan2_label.dev_attr.attr, /* 24 */
- &sensor_dev_attr_pwm2.dev_attr.attr, /* 25 */
- &sensor_dev_attr_fan3_input.dev_attr.attr, /* 26 */
- &sensor_dev_attr_fan3_label.dev_attr.attr, /* 27 */
- &sensor_dev_attr_pwm3.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr, /* 23 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 24 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 25 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 26 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 27 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 29 */
NULL
};
@@ -828,16 +875,19 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP10))
return 0;
- if (index >= 20 && index <= 22 &&
+ if (index >= 20 && index <= 23 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
return 0;
- if (index >= 23 && index <= 25 &&
+ if (index >= 24 && index <= 26 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
- if (index >= 26 && index <= 28 &&
+ if (index >= 27 && index <= 29 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
return 0;
+ if (index == 23 && !auto_fan)
+ return 0;
+
return attr->mode;
}
@@ -1135,12 +1185,48 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
{ }
};
+struct i8k_fan_control_data {
+ unsigned int manual_fan;
+ unsigned int auto_fan;
+};
+
+enum i8k_fan_controls {
+ I8K_FAN_34A3_35A3,
+};
+
+static const struct i8k_fan_control_data i8k_fan_control_data[] = {
+ [I8K_FAN_34A3_35A3] = {
+ .manual_fan = 0x34a3,
+ .auto_fan = 0x35a3,
+ },
+};
+
+static struct dmi_system_id i8k_whitelist_fan_control[] __initdata = {
+ {
+ .ident = "Dell Precision 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Precision 5530"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ {
+ .ident = "Dell Latitude E6440",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
{
- const struct dmi_system_id *id;
+ const struct dmi_system_id *id, *fan_control;
int fan, ret;
/*
@@ -1200,6 +1286,15 @@ static int __init i8k_probe(void)
i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
+ fan_control = dmi_first_match(i8k_whitelist_fan_control);
+ if (fan_control && fan_control->driver_data) {
+ const struct i8k_fan_control_data *data = fan_control->driver_data;
+
+ manual_fan = data->manual_fan;
+ auto_fan = data->auto_fan;
+ pr_info("enabling support for setting automatic/manual fan control\n");
+ }
+
if (!fan_mult) {
/*
* Autodetect fan multiplier based on nominal rpm
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 8a51dcf055ea..f335d0cb0c77 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -31,6 +31,8 @@
#define INA3221_WARN2 0x0a
#define INA3221_CRIT3 0x0b
#define INA3221_WARN3 0x0c
+#define INA3221_SHUNT_SUM 0x0d
+#define INA3221_CRIT_SUM 0x0e
#define INA3221_MASK_ENABLE 0x0f
#define INA3221_CONFIG_MODE_MASK GENMASK(2, 0)
@@ -50,6 +52,8 @@
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
+#define INA3221_MASK_ENABLE_SCC_MASK GENMASK(14, 12)
+
#define INA3221_CONFIG_DEFAULT 0x7127
#define INA3221_RSHUNT_DEFAULT 10000
@@ -60,9 +64,11 @@ enum ina3221_fields {
/* Status Flags */
F_CVRF,
- /* Alert Flags */
+ /* Warning Flags */
F_WF3, F_WF2, F_WF1,
- F_CF3, F_CF2, F_CF1,
+
+ /* Alert Flags: SF is the summation-alert flag */
+ F_SF, F_CF3, F_CF2, F_CF1,
/* sentinel */
F_MAX_FIELDS
@@ -75,6 +81,7 @@ static const struct reg_field ina3221_reg_fields[] = {
[F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
[F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
[F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+ [F_SF] = REG_FIELD(INA3221_MASK_ENABLE, 6, 6),
[F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
[F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
[F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
@@ -107,6 +114,7 @@ struct ina3221_input {
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
* @reg_config: Register value of INA3221_CONFIG
+ * @summation_shunt_resistor: equivalent shunt resistor value for summation
* @single_shot: running in single-shot operating mode
*/
struct ina3221_data {
@@ -116,16 +124,51 @@ struct ina3221_data {
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
u32 reg_config;
+ int summation_shunt_resistor;
bool single_shot;
};
static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel)
{
+ /* Summation channel checks shunt resistor values */
+ if (channel > INA3221_CHANNEL3)
+ return ina->summation_shunt_resistor != 0;
+
return pm_runtime_active(ina->pm_dev) &&
(ina->reg_config & INA3221_CONFIG_CHx_EN(channel));
}
+/**
+ * Helper function to return the resistor value for current summation.
+ *
+ * There is a condition to calculate current summation -- all the shunt
+ * resistor values should be the same, so as to simply fit the formula:
+ * current summation = shunt voltage summation / shunt resistor
+ *
+ * Returns the equivalent shunt resistor value on success or 0 on failure
+ */
+static inline int ina3221_summation_shunt_resistor(struct ina3221_data *ina)
+{
+ struct ina3221_input *input = ina->inputs;
+ int i, shunt_resistor = 0;
+
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
+ if (input[i].disconnected || !input[i].shunt_resistor)
+ continue;
+ if (!shunt_resistor) {
+ /* Found the reference shunt resistor value */
+ shunt_resistor = input[i].shunt_resistor;
+ } else {
+ /* No summation if resistor values are different */
+ if (shunt_resistor != input[i].shunt_resistor)
+ return 0;
+ }
+ }
+
+ return shunt_resistor;
+}
+
/* Lookup table for Bus and Shunt conversion times in usec */
static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
@@ -183,7 +226,14 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
if (ret)
return ret;
- *val = sign_extend32(regval >> 3, 12);
+ /*
+ * Shunt Voltage Sum register has 14-bit value with 1-bit shift
+ * Other Shunt Voltage registers have 12 bits with 3-bit shift
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ *val = sign_extend32(regval >> 1, 14);
+ else
+ *val = sign_extend32(regval >> 3, 12);
return 0;
}
@@ -195,6 +245,7 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT1,
INA3221_SHUNT2,
INA3221_SHUNT3,
+ INA3221_SHUNT_SUM,
};
static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
@@ -224,8 +275,12 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
u8 reg = ina3221_in_reg[channel];
int regval, ret;
- /* Translate shunt channel index to sensor channel index */
- channel %= INA3221_NUM_CHANNELS;
+ /*
+ * Translate shunt channel index to sensor channel index except
+ * the 7th channel (6 since being 0-aligned) is for summation.
+ */
+ if (channel != 6)
+ channel %= INA3221_NUM_CHANNELS;
switch (attr) {
case hwmon_in_input:
@@ -259,22 +314,29 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
}
}
-static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS] = {
- [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2, INA3221_SHUNT3 },
- [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3 },
- [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2, INA3221_CRIT3 },
- [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3 },
- [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3 },
+static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS + 1] = {
+ [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2,
+ INA3221_SHUNT3, INA3221_SHUNT_SUM },
+ [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3, 0 },
+ [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2,
+ INA3221_CRIT3, INA3221_CRIT_SUM },
+ [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3, 0 },
+ [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3, F_SF },
};
static int ina3221_read_curr(struct device *dev, u32 attr,
int channel, long *val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, voltage_nv, ret;
+ int resistance_uo, voltage_nv;
+ int regval, ret;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
switch (attr) {
case hwmon_curr_input:
@@ -293,6 +355,9 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
/* fall through */
case hwmon_curr_crit:
case hwmon_curr_max:
+ if (!resistance_uo)
+ return -ENODATA;
+
ret = ina3221_read_value(ina, reg, &regval);
if (ret)
return ret;
@@ -366,10 +431,18 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, current_ma, voltage_uv;
+ int resistance_uo, current_ma, voltage_uv;
+ int regval;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
+
+ if (!resistance_uo)
+ return -EOPNOTSUPP;
/* clamp current */
current_ma = clamp_val(val,
@@ -381,8 +454,21 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
/* clamp voltage */
voltage_uv = clamp_val(voltage_uv, -163800, 163800);
- /* 1 / 40uV(scale) << 3(register shift) = 5 */
- regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+ /*
+ * Formula to convert voltage_uv to register value:
+ * regval = (voltage_uv / scale) << shift
+ * Note:
+ * The scale is 40uV for all shunt voltage registers
+ * Shunt Voltage Sum register left-shifts 1 bit
+ * All other Shunt Voltage registers shift 3 bits
+ * Results:
+ * SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV
+ * SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe;
+ else
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
return regmap_write(ina->regmap, reg, regval);
}
@@ -499,7 +585,10 @@ static int ina3221_read_string(struct device *dev, enum hwmon_sensor_types type,
struct ina3221_data *ina = dev_get_drvdata(dev);
int index = channel - 1;
- *str = ina->inputs[index].label;
+ if (channel == 7)
+ *str = "sum of shunt voltages";
+ else
+ *str = ina->inputs[index].label;
return 0;
}
@@ -529,6 +618,8 @@ static umode_t ina3221_is_visible(const void *drvdata,
case hwmon_in_label:
if (channel - 1 <= INA3221_CHANNEL3)
input = &ina->inputs[channel - 1];
+ else if (channel == 7)
+ return 0444;
/* Hide label node if label is not provided */
return (input && input->label) ? 0444 : 0;
case hwmon_in_input:
@@ -573,11 +664,16 @@ static const struct hwmon_channel_info *ina3221_info[] = {
/* 4-6: shunt voltage Channels */
HWMON_I_INPUT,
HWMON_I_INPUT,
- HWMON_I_INPUT),
+ HWMON_I_INPUT,
+ /* 7: summation of shunt voltage channels */
+ HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(curr,
+ /* 1-3: current channels*/
+ INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG),
+ /* 4: summation of current channels */
+ HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM),
NULL
};
@@ -624,6 +720,9 @@ static ssize_t ina3221_shunt_store(struct device *dev,
input->shunt_resistor = val;
+ /* Update summation_shunt_resistor for summation channel */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
return count;
}
@@ -642,6 +741,7 @@ ATTRIBUTE_GROUPS(ina3221);
static const struct regmap_range ina3221_yes_ranges[] = {
regmap_reg_range(INA3221_CONFIG, INA3221_BUS3),
+ regmap_reg_range(INA3221_SHUNT_SUM, INA3221_SHUNT_SUM),
regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
};
@@ -772,6 +872,9 @@ static int ina3221_probe(struct i2c_client *client,
ina->reg_config &= ~INA3221_CONFIG_CHx_EN(i);
}
+ /* Initialize summation_shunt_resistor for summation channel control */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
ina->pm_dev = dev;
mutex_init(&ina->lock);
dev_set_drvdata(dev, ina);
@@ -875,6 +978,22 @@ static int __maybe_unused ina3221_resume(struct device *dev)
if (ret)
return ret;
+ /* Initialize summation channel control */
+ if (ina->summation_shunt_resistor) {
+ /*
+ * Take all three channels into summation by default
+ * Shunt measurements of disconnected channels should
+ * be 0, so it does not matter for summation.
+ */
+ ret = regmap_update_bits(ina->regmap, INA3221_MASK_ENABLE,
+ INA3221_MASK_ENABLE_SCC_MASK,
+ INA3221_MASK_ENABLE_SCC_MASK);
+ if (ret) {
+ dev_err(dev, "Unable to control summation channel\n");
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
new file mode 100644
index 000000000000..bb3f7749a0b0
--- /dev/null
+++ b/drivers/hwmon/ltc2947-core.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+/* register's */
+#define LTC2947_REG_PAGE_CTRL 0xFF
+#define LTC2947_REG_CTRL 0xF0
+#define LTC2947_REG_TBCTL 0xE9
+#define LTC2947_CONT_MODE_MASK BIT(3)
+#define LTC2947_CONT_MODE(x) FIELD_PREP(LTC2947_CONT_MODE_MASK, x)
+#define LTC2947_PRE_MASK GENMASK(2, 0)
+#define LTC2947_PRE(x) FIELD_PREP(LTC2947_PRE_MASK, x)
+#define LTC2947_DIV_MASK GENMASK(7, 3)
+#define LTC2947_DIV(x) FIELD_PREP(LTC2947_DIV_MASK, x)
+#define LTC2947_SHUTDOWN_MASK BIT(0)
+#define LTC2947_REG_ACCUM_POL 0xE1
+#define LTC2947_ACCUM_POL_1_MASK GENMASK(1, 0)
+#define LTC2947_ACCUM_POL_1(x) FIELD_PREP(LTC2947_ACCUM_POL_1_MASK, x)
+#define LTC2947_ACCUM_POL_2_MASK GENMASK(3, 2)
+#define LTC2947_ACCUM_POL_2(x) FIELD_PREP(LTC2947_ACCUM_POL_2_MASK, x)
+#define LTC2947_REG_ACCUM_DEADBAND 0xE4
+#define LTC2947_REG_GPIOSTATCTL 0x67
+#define LTC2947_GPIO_EN_MASK BIT(0)
+#define LTC2947_GPIO_EN(x) FIELD_PREP(LTC2947_GPIO_EN_MASK, x)
+#define LTC2947_GPIO_FAN_EN_MASK BIT(6)
+#define LTC2947_GPIO_FAN_EN(x) FIELD_PREP(LTC2947_GPIO_FAN_EN_MASK, x)
+#define LTC2947_GPIO_FAN_POL_MASK BIT(7)
+#define LTC2947_GPIO_FAN_POL(x) FIELD_PREP(LTC2947_GPIO_FAN_POL_MASK, x)
+#define LTC2947_REG_GPIO_ACCUM 0xE3
+/* 200Khz */
+#define LTC2947_CLK_MIN 200000
+/* 25Mhz */
+#define LTC2947_CLK_MAX 25000000
+#define LTC2947_PAGE0 0
+#define LTC2947_PAGE1 1
+/* Voltage registers */
+#define LTC2947_REG_VOLTAGE 0xA0
+#define LTC2947_REG_VOLTAGE_MAX 0x50
+#define LTC2947_REG_VOLTAGE_MIN 0x52
+#define LTC2947_REG_VOLTAGE_THRE_H 0x90
+#define LTC2947_REG_VOLTAGE_THRE_L 0x92
+#define LTC2947_REG_DVCC 0xA4
+#define LTC2947_REG_DVCC_MAX 0x58
+#define LTC2947_REG_DVCC_MIN 0x5A
+#define LTC2947_REG_DVCC_THRE_H 0x98
+#define LTC2947_REG_DVCC_THRE_L 0x9A
+#define LTC2947_VOLTAGE_GEN_CHAN 0
+#define LTC2947_VOLTAGE_DVCC_CHAN 1
+/* in mV */
+#define VOLTAGE_MAX 15500
+#define VOLTAGE_MIN -300
+#define VDVCC_MAX 15000
+#define VDVCC_MIN 4750
+/* Current registers */
+#define LTC2947_REG_CURRENT 0x90
+#define LTC2947_REG_CURRENT_MAX 0x40
+#define LTC2947_REG_CURRENT_MIN 0x42
+#define LTC2947_REG_CURRENT_THRE_H 0x80
+#define LTC2947_REG_CURRENT_THRE_L 0x82
+/* in mA */
+#define CURRENT_MAX 30000
+#define CURRENT_MIN -30000
+/* Power registers */
+#define LTC2947_REG_POWER 0x93
+#define LTC2947_REG_POWER_MAX 0x44
+#define LTC2947_REG_POWER_MIN 0x46
+#define LTC2947_REG_POWER_THRE_H 0x84
+#define LTC2947_REG_POWER_THRE_L 0x86
+/* in uW */
+#define POWER_MAX 450000000
+#define POWER_MIN -450000000
+/* Temperature registers */
+#define LTC2947_REG_TEMP 0xA2
+#define LTC2947_REG_TEMP_MAX 0x54
+#define LTC2947_REG_TEMP_MIN 0x56
+#define LTC2947_REG_TEMP_THRE_H 0x94
+#define LTC2947_REG_TEMP_THRE_L 0x96
+#define LTC2947_REG_TEMP_FAN_THRE_H 0x9C
+#define LTC2947_REG_TEMP_FAN_THRE_L 0x9E
+#define LTC2947_TEMP_FAN_CHAN 1
+/* in millidegress Celsius */
+#define TEMP_MAX 85000
+#define TEMP_MIN -40000
+/* Energy registers */
+#define LTC2947_REG_ENERGY1 0x06
+#define LTC2947_REG_ENERGY2 0x16
+/* Status/Alarm/Overflow registers */
+#define LTC2947_REG_STATUS 0x80
+#define LTC2947_REG_STATVT 0x81
+#define LTC2947_REG_STATIP 0x82
+#define LTC2947_REG_STATVDVCC 0x87
+
+#define LTC2947_ALERTS_SIZE (LTC2947_REG_STATVDVCC - LTC2947_REG_STATUS)
+#define LTC2947_MAX_VOLTAGE_MASK BIT(0)
+#define LTC2947_MIN_VOLTAGE_MASK BIT(1)
+#define LTC2947_MAX_CURRENT_MASK BIT(0)
+#define LTC2947_MIN_CURRENT_MASK BIT(1)
+#define LTC2947_MAX_POWER_MASK BIT(2)
+#define LTC2947_MIN_POWER_MASK BIT(3)
+#define LTC2947_MAX_TEMP_MASK BIT(2)
+#define LTC2947_MIN_TEMP_MASK BIT(3)
+#define LTC2947_MAX_TEMP_FAN_MASK BIT(4)
+#define LTC2947_MIN_TEMP_FAN_MASK BIT(5)
+
+struct ltc2947_data {
+ struct regmap *map;
+ struct device *dev;
+ /*
+ * The mutex is needed because the device has 2 memory pages. When
+ * reading/writing the correct page needs to be set so that, the
+ * complete sequence select_page->read/write needs to be protected.
+ */
+ struct mutex lock;
+ u32 lsb_energy;
+ bool gpio_out;
+};
+
+static int __ltc2947_val_read16(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be16 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(__val);
+
+ return 0;
+}
+
+static int __ltc2947_val_read24(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be32 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 3);
+ if (ret)
+ return ret;
+
+ *val = be32_to_cpu(__val) >> 8;
+
+ return 0;
+}
+
+static int __ltc2947_val_read64(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be64 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 6);
+ if (ret)
+ return ret;
+
+ *val = be64_to_cpu(__val) >> 16;
+
+ return 0;
+}
+
+static int ltc2947_val_read(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, s64 *val)
+{
+ int ret;
+ u64 __val = 0;
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Read val, reg:%02X, p:%d sz:%zu\n", reg, page,
+ size);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_read16(st, reg, &__val);
+ break;
+ case 3:
+ ret = __ltc2947_val_read24(st, reg, &__val);
+ break;
+ case 6:
+ ret = __ltc2947_val_read64(st, reg, &__val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend64(__val, (8 * size) - 1);
+
+ dev_dbg(st->dev, "Got s:%lld, u:%016llX\n", *val, __val);
+
+ return 0;
+}
+
+static int __ltc2947_val_write64(const struct ltc2947_data *st, const u8 reg,
+ const u64 val)
+{
+ __be64 __val;
+
+ __val = cpu_to_be64(val << 16);
+ return regmap_bulk_write(st->map, reg, &__val, 6);
+}
+
+static int __ltc2947_val_write16(const struct ltc2947_data *st, const u8 reg,
+ const u16 val)
+{
+ __be16 __val;
+
+ __val = cpu_to_be16(val);
+ return regmap_bulk_write(st->map, reg, &__val, 2);
+}
+
+static int ltc2947_val_write(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, const u64 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* set device on correct page */
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Write val, r:%02X, p:%d, sz:%zu, val:%016llX\n",
+ reg, page, size, val);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_write16(st, reg, val);
+ break;
+ case 6:
+ ret = __ltc2947_val_write64(st, reg, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ltc2947_reset_history(struct ltc2947_data *st, const u8 reg_h,
+ const u8 reg_l)
+{
+ int ret;
+ /*
+ * let's reset the tracking register's. Tracking register's have all
+ * 2 bytes size
+ */
+ ret = ltc2947_val_write(st, reg_h, LTC2947_PAGE0, 2, 0x8000U);
+ if (ret)
+ return ret;
+
+ return ltc2947_val_write(st, reg_l, LTC2947_PAGE0, 2, 0x7FFFU);
+}
+
+static int ltc2947_alarm_read(struct ltc2947_data *st, const u8 reg,
+ const u32 mask, long *val)
+{
+ u8 offset = reg - LTC2947_REG_STATUS;
+ /* +1 to include status reg */
+ char alarms[LTC2947_ALERTS_SIZE + 1];
+ int ret = 0;
+
+ memset(alarms, 0, sizeof(alarms));
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, LTC2947_PAGE0);
+ if (ret)
+ goto unlock;
+
+ dev_dbg(st->dev, "Read alarm, reg:%02X, mask:%02X\n", reg, mask);
+ /*
+ * As stated in the datasheet, when Threshold and Overflow registers
+ * are used, the status and all alert registers must be read in one
+ * multi-byte transaction.
+ */
+ ret = regmap_bulk_read(st->map, LTC2947_REG_STATUS, alarms,
+ sizeof(alarms));
+ if (ret)
+ goto unlock;
+
+ /* get the alarm */
+ *val = !!(alarms[offset] & mask);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static ssize_t ltc2947_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret;
+ s64 val = 0;
+
+ ret = ltc2947_val_read(st, attr->index, LTC2947_PAGE0, 6, &val);
+ if (ret)
+ return ret;
+
+ /* value in microJoule. st->lsb_energy was multiplied by 10E9 */
+ val = div_s64(val * st->lsb_energy, 1000);
+
+ return sprintf(buf, "%lld\n", val);
+}
+
+static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ int ret;
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_max_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_MASK, val);
+ case hwmon_temp_min_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_MASK, val);
+ case hwmon_temp_max:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_temp_min:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* in milidegrees celcius, temp is given by: */
+ *val = (__val * 204) + 550;
+
+ return 0;
+}
+
+static int ltc2947_read_power(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u32 lsb = 200000; /* in uW */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER, LTC2947_PAGE0,
+ 3, &__val);
+ lsb = 50000;
+ break;
+ case hwmon_power_input_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_input_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_POWER_MASK, val);
+ case hwmon_power_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_POWER_MASK, val);
+ case hwmon_power_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_power_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_curr(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 12; /* in mA */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT,
+ LTC2947_PAGE0, 3, &__val);
+ lsb = 3;
+ break;
+ case hwmon_curr_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_CURRENT_MASK, val);
+ case hwmon_curr_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_CURRENT_MASK, val);
+ case hwmon_curr_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_curr_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_in(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 2; /* in mV */
+ s64 __val = 0;
+
+ if (channel < 0 || channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_highest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_lowest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_max_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MAX_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_VOLTAGE_MASK, val);
+ case hwmon_in_min_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MIN_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_VOLTAGE_MASK, val);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_read_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_read_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_read_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_read_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_temp(struct device *dev, const u32 attr,
+ long val, const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel < 0 || channel > LTC2947_TEMP_FAN_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for temperature", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_temp_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_TEMP_MAX,
+ LTC2947_REG_TEMP_MIN);
+ case hwmon_temp_max:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ case hwmon_temp_min:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_power(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_power_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_POWER_MAX,
+ LTC2947_REG_POWER_MIN);
+ case hwmon_power_max:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ case hwmon_power_min:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_curr(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_curr_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_REG_CURRENT_MIN);
+ case hwmon_curr_max:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ case hwmon_curr_min:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_in(struct device *dev, const u32 attr, long val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_reset_history:
+ if (val != 1)
+ return -EINVAL;
+
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_reset_history(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_REG_DVCC_MIN);
+
+ return ltc2947_reset_history(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_REG_VOLTAGE_MIN);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_write_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_write_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_write_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_write_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ *str = "DVCC";
+ else
+ *str = "VP-VM";
+ return 0;
+ case hwmon_curr:
+ *str = "IP-IM";
+ return 0;
+ case hwmon_temp:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ *str = "TEMPFAN";
+ else
+ *str = "Ambient";
+ return 0;
+ case hwmon_power:
+ *str = "Power";
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_in_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_lowest:
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_reset_history:
+ return 0200;
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_curr_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_highest:
+ case hwmon_curr_lowest:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_label:
+ return 0444;
+ case hwmon_curr_reset_history:
+ return 0200;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_power_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ case hwmon_power_input_lowest:
+ case hwmon_power_label:
+ case hwmon_power_max_alarm:
+ case hwmon_power_min_alarm:
+ return 0444;
+ case hwmon_power_reset_history:
+ return 0200;
+ case hwmon_power_max:
+ case hwmon_power_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_temp_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_highest:
+ case hwmon_temp_lowest:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_reset_history:
+ return 0200;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc2947_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_in_is_visible(attr);
+ case hwmon_curr:
+ return ltc2947_curr_is_visible(attr);
+ case hwmon_power:
+ return ltc2947_power_is_visible(attr);
+ case hwmon_temp:
+ return ltc2947_temp_is_visible(attr);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info *ltc2947_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LOWEST | HWMON_C_HIGHEST |
+ HWMON_C_MAX | HWMON_C_MIN | HWMON_C_RESET_HISTORY |
+ HWMON_C_MIN_ALARM | HWMON_C_MAX_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_INPUT_LOWEST |
+ HWMON_P_INPUT_HIGHEST | HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_RESET_HISTORY | HWMON_P_MAX_ALARM |
+ HWMON_P_MIN_ALARM | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MAX | HWMON_T_MIN | HWMON_T_RESET_HISTORY |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_LABEL,
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | HWMON_T_MAX |
+ HWMON_T_MIN | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops ltc2947_hwmon_ops = {
+ .is_visible = ltc2947_is_visible,
+ .read = ltc2947_read,
+ .write = ltc2947_write,
+ .read_string = ltc2947_read_labels,
+};
+
+static const struct hwmon_chip_info ltc2947_chip_info = {
+ .ops = &ltc2947_hwmon_ops,
+ .info = ltc2947_info,
+};
+
+/* energy attributes are 6bytes wide so we need u64 */
+static SENSOR_DEVICE_ATTR(energy1_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY1);
+static SENSOR_DEVICE_ATTR(energy2_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY2);
+
+static struct attribute *ltc2947_attrs[] = {
+ &sensor_dev_attr_energy1_input.dev_attr.attr,
+ &sensor_dev_attr_energy2_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2947);
+
+static void ltc2947_clk_disable(void *data)
+{
+ struct clk *extclk = data;
+
+ clk_disable_unprepare(extclk);
+}
+
+static int ltc2947_setup(struct ltc2947_data *st)
+{
+ int ret;
+ struct clk *extclk;
+ u32 dummy, deadband, pol;
+ u32 accum[2];
+
+ /* clear status register by reading it */
+ ret = regmap_read(st->map, LTC2947_REG_STATUS, &dummy);
+ if (ret)
+ return ret;
+ /*
+ * Set max/min for power here since the default values x scale
+ * would overflow on 32bit arch
+ */
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H, LTC2947_PAGE1, 2,
+ POWER_MAX / 200000);
+ if (ret)
+ return ret;
+
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L, LTC2947_PAGE1, 2,
+ POWER_MIN / 200000);
+ if (ret)
+ return ret;
+
+ /* check external clock presence */
+ extclk = devm_clk_get(st->dev, NULL);
+ if (!IS_ERR(extclk)) {
+ unsigned long rate_hz;
+ u8 pre = 0, div, tbctl;
+ u64 aux;
+
+ /* let's calculate and set the right valus in TBCTL */
+ rate_hz = clk_get_rate(extclk);
+ if (rate_hz < LTC2947_CLK_MIN || rate_hz > LTC2947_CLK_MAX) {
+ dev_err(st->dev, "Invalid rate:%lu for external clock",
+ rate_hz);
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(extclk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(st->dev, ltc2947_clk_disable,
+ extclk);
+ if (ret)
+ return ret;
+ /* as in table 1 of the datasheet */
+ if (rate_hz >= LTC2947_CLK_MIN && rate_hz <= 1000000)
+ pre = 0;
+ else if (rate_hz > 1000000 && rate_hz <= 2000000)
+ pre = 1;
+ else if (rate_hz > 2000000 && rate_hz <= 4000000)
+ pre = 2;
+ else if (rate_hz > 4000000 && rate_hz <= 8000000)
+ pre = 3;
+ else if (rate_hz > 8000000 && rate_hz <= 16000000)
+ pre = 4;
+ else if (rate_hz > 16000000 && rate_hz <= LTC2947_CLK_MAX)
+ pre = 5;
+ /*
+ * Div is given by:
+ * floor(fref / (2^PRE * 32768))
+ */
+ div = rate_hz / ((1 << pre) * 32768);
+ tbctl = LTC2947_PRE(pre) | LTC2947_DIV(div);
+
+ ret = regmap_write(st->map, LTC2947_REG_TBCTL, tbctl);
+ if (ret)
+ return ret;
+ /*
+ * The energy lsb is given by (in W*s):
+ * 06416 * (1/fref) * 2^PRE * (DIV + 1)
+ * The value is multiplied by 10E9
+ */
+ aux = (div + 1) * ((1 << pre) * 641600000ULL);
+ st->lsb_energy = DIV_ROUND_CLOSEST_ULL(aux, rate_hz);
+ } else {
+ /* 19.89E-6 * 10E9 */
+ st->lsb_energy = 19890;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node,
+ "adi,accumulator-ctl-pol", accum,
+ ARRAY_SIZE(accum));
+ if (!ret) {
+ u32 accum_reg = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_POL, accum_reg);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32(st->dev->of_node,
+ "adi,accumulation-deadband-microamp",
+ &deadband);
+ if (!ret) {
+ /* the LSB is the same as the current, so 3mA */
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND,
+ deadband / (1000 * 3));
+ if (ret)
+ return ret;
+ }
+ /* check gpio cfg */
+ ret = of_property_read_u32(st->dev->of_node, "adi,gpio-out-pol", &pol);
+ if (!ret) {
+ /* setup GPIO as output */
+ u32 gpio_ctl = LTC2947_GPIO_EN(1) | LTC2947_GPIO_FAN_EN(1) |
+ LTC2947_GPIO_FAN_POL(pol);
+
+ st->gpio_out = true;
+ ret = regmap_write(st->map, LTC2947_REG_GPIOSTATCTL, gpio_ctl);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node, "adi,gpio-in-accum",
+ accum, ARRAY_SIZE(accum));
+ if (!ret) {
+ /*
+ * Setup the accum options. The gpioctl is already defined as
+ * input by default.
+ */
+ u32 accum_val = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ if (st->gpio_out) {
+ dev_err(st->dev,
+ "Cannot have input gpio config if already configured as output");
+ return -EINVAL;
+ }
+
+ ret = regmap_write(st->map, LTC2947_REG_GPIO_ACCUM, accum_val);
+ if (ret)
+ return ret;
+ }
+
+ /* set continuos mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+int ltc2947_core_probe(struct regmap *map, const char *name)
+{
+ struct ltc2947_data *st;
+ struct device *dev = regmap_get_device(map);
+ struct device *hwmon;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->map = map;
+ st->dev = dev;
+ dev_set_drvdata(dev, st);
+ mutex_init(&st->lock);
+
+ ret = ltc2947_setup(st);
+ if (ret)
+ return ret;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, name, st,
+ &ltc2947_chip_info,
+ ltc2947_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+EXPORT_SYMBOL_GPL(ltc2947_core_probe);
+
+static int __maybe_unused ltc2947_resume(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ u32 ctrl = 0;
+ int ret;
+
+ /* dummy read to wake the device */
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /*
+ * Wait for the device. It takes 100ms to wake up so, 10ms extra
+ * should be enough.
+ */
+ msleep(110);
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /* ctrl should be 0 */
+ if (ctrl != 0) {
+ dev_err(st->dev, "Device failed to wake up, ctl:%02X\n", ctrl);
+ return -ETIMEDOUT;
+ }
+
+ /* set continuous mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+static int __maybe_unused ltc2947_suspend(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_SHUTDOWN_MASK, 1);
+}
+
+SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
+EXPORT_SYMBOL_GPL(ltc2947_pm_ops);
+
+const struct of_device_id ltc2947_of_match[] = {
+ { .compatible = "adi,ltc2947" },
+ {}
+};
+EXPORT_SYMBOL_GPL(ltc2947_of_match);
+MODULE_DEVICE_TABLE(of, ltc2947_of_match);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 power and energy monitor core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
new file mode 100644
index 000000000000..cf6074b110ae
--- /dev/null
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over I2C
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ltc2947_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_i2c(i2c, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, i2c->name);
+}
+
+static const struct i2c_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2947_id);
+
+static struct i2c_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_i2c_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 I2C power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-spi.c b/drivers/hwmon/ltc2947-spi.c
new file mode 100644
index 000000000000..c24ca569db1b
--- /dev/null
+++ b/drivers/hwmon/ltc2947-spi.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over SPI
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+};
+
+static int ltc2947_probe(struct spi_device *spi)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_spi(spi, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, spi_get_device_id(spi)->name);
+}
+
+static const struct spi_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ltc2947_id);
+
+static struct spi_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_spi_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 SPI power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947.h b/drivers/hwmon/ltc2947.h
new file mode 100644
index 000000000000..5b8ff81a3dba
--- /dev/null
+++ b/drivers/hwmon/ltc2947.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LTC2947_H
+#define _LINUX_LTC2947_H
+
+struct regmap;
+
+extern const struct of_device_id ltc2947_of_match[];
+extern const struct dev_pm_ops ltc2947_pm_ops;
+
+int ltc2947_core_probe(struct regmap *map, const char *name);
+
+#endif
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index d62d69bb7e49..59859979571d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -36,6 +36,15 @@ config SENSORS_ADM1275
This driver can also be built as a module. If so, the module will
be called adm1275.
+config SENSORS_BEL_PFE
+ tristate "Bel PFE Compatible Power Supplies"
+ help
+ If you say yes here you get hardware monitoring support for BEL
+ PFE1100 and PFE3000 Power Supplies.
+
+ This driver can also be built as a module. If so, the module will
+ be called bel-pfe.
+
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
depends on LEDS_CLASS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 03bacfcfd660..3f8c1014938b 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
new file mode 100644
index 000000000000..f236e18f45a5
--- /dev/null
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for BEL PFE family power supplies.
+ *
+ * Copyright (c) 2019 Facebook Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+enum chips {pfe1100, pfe3000};
+
+/*
+ * Disable status check for pfe3000 devices, because some devices report
+ * communication error (invalid command) for VOUT_MODE command (0x20)
+ * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
+ * exponent in linear mode.
+ */
+static struct pmbus_platform_data pfe3000_plat_data = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static struct pmbus_driver_info pfe_driver_info[] = {
+ [pfe1100] = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12,
+ },
+
+ [pfe3000] = {
+ .pages = 7,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ /* Page 0: V1. */
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_FAN12 |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_VCAP,
+
+ /* Page 1: Vsb. */
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_POUT,
+
+ /*
+ * Page 2: V1 Ishare.
+ * Page 3: Reserved.
+ * Page 4: V1 Cathode.
+ * Page 5: Vsb Cathode.
+ * Page 6: V1 Sense.
+ */
+ .func[2] = PMBUS_HAVE_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT,
+ },
+};
+
+static int pfe_pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int model;
+
+ model = (int)id->driver_data;
+
+ /*
+ * PFE3000-12-069RA devices may not stay in page 0 during device
+ * probe which leads to probe failure (read status word failed).
+ * So let's set the device to page 0 at the beginning.
+ */
+ if (model == pfe3000) {
+ client->dev.platform_data = &pfe3000_plat_data;
+ i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ }
+
+ return pmbus_do_probe(client, id, &pfe_driver_info[model]);
+}
+
+static const struct i2c_device_id pfe_device_id[] = {
+ {"pfe1100", pfe1100},
+ {"pfe3000", pfe3000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pfe_device_id);
+
+static struct i2c_driver pfe_pmbus_driver = {
+ .driver = {
+ .name = "bel-pfe",
+ },
+ .probe = pfe_pmbus_probe,
+ .remove = pmbus_do_remove,
+ .id_table = pfe_device_id,
+};
+
+module_i2c_driver(pfe_pmbus_driver);
+
+MODULE_AUTHOR("Tao Ren <rentao.bupt@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for BEL PFE Family Power Supplies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d44745e498e7..d359b76bcb36 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -3,6 +3,7 @@
* Copyright 2017 IBM Corp.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -29,6 +30,10 @@
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
+#define CFFPS_CCIN_VERSION GENMASK(15, 8)
+#define CFFPS_CCIN_VERSION_1 0x2b
+#define CFFPS_CCIN_VERSION_2 0x2e
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -39,9 +44,13 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+/*
+ * LED off state actually relinquishes LED control to PSU firmware, so it can
+ * turn on the LED for faults.
+ */
+#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
-#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
@@ -54,7 +63,7 @@ enum {
CFFPS_DEBUGFS_NUM_ENTRIES
};
-enum versions { cffps1, cffps2 };
+enum versions { cffps1, cffps2, cffps_unknown };
struct ibm_cffps_input_history {
struct mutex update_lock;
@@ -292,28 +301,38 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
-static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
+static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
int rc;
+ u8 next_led_state;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
if (brightness == LED_OFF) {
- psu->led_state = CFFPS_LED_OFF;
+ next_led_state = CFFPS_LED_OFF;
} else {
brightness = LED_FULL;
+
if (psu->led_state != CFFPS_LED_BLINK)
- psu->led_state = CFFPS_LED_ON;
+ next_led_state = CFFPS_LED_ON;
+ else
+ next_led_state = CFFPS_LED_BLINK;
}
+ dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n",
+ brightness, next_led_state);
+
pmbus_set_page(psu->client, 0);
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
- psu->led_state);
+ next_led_state);
if (rc < 0)
- return;
+ return rc;
+ psu->led_state = next_led_state;
led_cdev->brightness = brightness;
+
+ return 0;
}
static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
@@ -323,10 +342,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
int rc;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
- psu->led_state = CFFPS_LED_BLINK;
-
- if (led_cdev->brightness == LED_OFF)
- return 0;
+ dev_dbg(&psu->client->dev, "LED blink set.\n");
pmbus_set_page(psu->client, 0);
@@ -335,6 +351,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
if (rc < 0)
return rc;
+ psu->led_state = CFFPS_LED_BLINK;
+ led_cdev->brightness = LED_FULL;
*delay_on = CFFPS_BLINK_RATE_MS;
*delay_off = CFFPS_BLINK_RATE_MS;
@@ -351,7 +369,7 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
client->addr);
psu->led.name = psu->led_name;
psu->led.max_brightness = LED_FULL;
- psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.brightness_set_blocking = ibm_cffps_led_brightness_set;
psu->led.blink_set = ibm_cffps_led_blink_set;
rc = devm_led_classdev_register(dev, &psu->led);
@@ -395,7 +413,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i, rc;
- enum versions vs;
+ enum versions vs = cffps_unknown;
struct dentry *debugfs;
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
@@ -405,8 +423,27 @@ static int ibm_cffps_probe(struct i2c_client *client,
vs = (enum versions)md;
else if (id)
vs = (enum versions)id->driver_data;
- else
- vs = cffps1;
+
+ if (vs == cffps_unknown) {
+ u16 ccin_version = CFFPS_CCIN_VERSION_1;
+ int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
+
+ if (ccin > 0)
+ ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
+
+ switch (ccin_version) {
+ default:
+ case CFFPS_CCIN_VERSION_1:
+ vs = cffps1;
+ break;
+ case CFFPS_CCIN_VERSION_2:
+ vs = cffps2;
+ break;
+ }
+
+ /* Set the client name to include the version number. */
+ snprintf(client->name, I2C_NAME_SIZE, "cffps%d", vs + 1);
+ }
client->dev.platform_data = &ibm_cffps_pdata;
rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
@@ -465,6 +502,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
static const struct i2c_device_id ibm_cffps_id[] = {
{ "ibm_cffps1", cffps1 },
{ "ibm_cffps2", cffps2 },
+ { "ibm_cffps", cffps_unknown },
{}
};
MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
@@ -478,6 +516,10 @@ static const struct of_device_id ibm_cffps_of_match[] = {
.compatible = "ibm,cffps2",
.data = (void *)cffps2
},
+ {
+ .compatible = "ibm,cffps",
+ .data = (void *)cffps_unknown
+ },
{}
};
MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index a94e35cff3e5..83a4fab151d2 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -127,7 +127,8 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
+ !data->valid) {
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
new file mode 100644
index 000000000000..df66e0bc1253
--- /dev/null
+++ b/drivers/hwmon/tmp513.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Texas Instruments TMP512, TMP513 power monitor chips
+ *
+ * TMP513:
+ * Thermal/Power Management with Triple Remote and
+ * Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp513
+ *
+ * TMP512:
+ * Thermal/Power Management with Dual Remote
+ * and Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp512
+ *
+ * Copyright (C) 2019 Eric Tremblay <etremblay@distech-controls.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+// Common register definition
+#define TMP51X_SHUNT_CONFIG 0x00
+#define TMP51X_TEMP_CONFIG 0x01
+#define TMP51X_STATUS 0x02
+#define TMP51X_SMBUS_ALERT 0x03
+#define TMP51X_SHUNT_CURRENT_RESULT 0x04
+#define TMP51X_BUS_VOLTAGE_RESULT 0x05
+#define TMP51X_POWER_RESULT 0x06
+#define TMP51X_BUS_CURRENT_RESULT 0x07
+#define TMP51X_LOCAL_TEMP_RESULT 0x08
+#define TMP51X_REMOTE_TEMP_RESULT_1 0x09
+#define TMP51X_REMOTE_TEMP_RESULT_2 0x0A
+#define TMP51X_SHUNT_CURRENT_H_LIMIT 0x0C
+#define TMP51X_SHUNT_CURRENT_L_LIMIT 0x0D
+#define TMP51X_BUS_VOLTAGE_H_LIMIT 0x0E
+#define TMP51X_BUS_VOLTAGE_L_LIMIT 0x0F
+#define TMP51X_POWER_LIMIT 0x10
+#define TMP51X_LOCAL_TEMP_LIMIT 0x11
+#define TMP51X_REMOTE_TEMP_LIMIT_1 0x12
+#define TMP51X_REMOTE_TEMP_LIMIT_2 0x13
+#define TMP51X_SHUNT_CALIBRATION 0x15
+#define TMP51X_N_FACTOR_AND_HYST_1 0x16
+#define TMP51X_N_FACTOR_2 0x17
+#define TMP51X_MAN_ID_REG 0xFE
+#define TMP51X_DEVICE_ID_REG 0xFF
+
+// TMP513 specific register definition
+#define TMP513_REMOTE_TEMP_RESULT_3 0x0B
+#define TMP513_REMOTE_TEMP_LIMIT_3 0x14
+#define TMP513_N_FACTOR_3 0x18
+
+// Common attrs, and NULL
+#define TMP51X_MANUFACTURER_ID 0x55FF
+
+#define TMP512_DEVICE_ID 0x22FF
+#define TMP513_DEVICE_ID 0x23FF
+
+// Default config
+#define TMP51X_SHUNT_CONFIG_DEFAULT 0x399F
+#define TMP51X_SHUNT_VALUE_DEFAULT 1000
+#define TMP51X_VBUS_RANGE_DEFAULT TMP51X_VBUS_RANGE_32V
+#define TMP51X_PGA_DEFAULT 8
+#define TMP51X_MAX_REGISTER_ADDR 0xFF
+
+#define TMP512_TEMP_CONFIG_DEFAULT 0xBF80
+#define TMP513_TEMP_CONFIG_DEFAULT 0xFF80
+
+// Mask and shift
+#define CURRENT_SENSE_VOLTAGE_320_MASK 0x1800
+#define CURRENT_SENSE_VOLTAGE_160_MASK 0x1000
+#define CURRENT_SENSE_VOLTAGE_80_MASK 0x0800
+#define CURRENT_SENSE_VOLTAGE_40_MASK 0
+
+#define TMP51X_BUS_VOLTAGE_MASK 0x2000
+#define TMP51X_NFACTOR_MASK 0xFF00
+#define TMP51X_HYST_MASK 0x00FF
+
+#define TMP51X_BUS_VOLTAGE_SHIFT 3
+#define TMP51X_TEMP_SHIFT 3
+
+// Alarms
+#define TMP51X_SHUNT_CURRENT_H_LIMIT_POS 15
+#define TMP51X_SHUNT_CURRENT_L_LIMIT_POS 14
+#define TMP51X_BUS_VOLTAGE_H_LIMIT_POS 13
+#define TMP51X_BUS_VOLTAGE_L_LIMIT_POS 12
+#define TMP51X_POWER_LIMIT_POS 11
+#define TMP51X_LOCAL_TEMP_LIMIT_POS 10
+#define TMP51X_REMOTE_TEMP_LIMIT_1_POS 9
+#define TMP51X_REMOTE_TEMP_LIMIT_2_POS 8
+#define TMP513_REMOTE_TEMP_LIMIT_3_POS 7
+
+#define TMP51X_VBUS_RANGE_32V 32000000
+#define TMP51X_VBUS_RANGE_16V 16000000
+
+// Max and Min value
+#define MAX_BUS_VOLTAGE_32_LIMIT 32764
+#define MAX_BUS_VOLTAGE_16_LIMIT 16382
+
+// Max possible value is -256 to +256 but datasheet indicated -40 to 125.
+#define MAX_TEMP_LIMIT 125000
+#define MIN_TEMP_LIMIT -40000
+
+#define MAX_TEMP_HYST 127500
+
+static const u8 TMP51X_TEMP_INPUT[4] = {
+ TMP51X_LOCAL_TEMP_RESULT,
+ TMP51X_REMOTE_TEMP_RESULT_1,
+ TMP51X_REMOTE_TEMP_RESULT_2,
+ TMP513_REMOTE_TEMP_RESULT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT,
+ TMP51X_REMOTE_TEMP_LIMIT_1,
+ TMP51X_REMOTE_TEMP_LIMIT_2,
+ TMP513_REMOTE_TEMP_LIMIT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT_ALARM[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_1_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_2_POS,
+ TMP513_REMOTE_TEMP_LIMIT_3_POS
+};
+
+static const u8 TMP51X_TEMP_CRIT_HYST[4] = {
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1
+};
+
+static const u8 TMP51X_CURR_INPUT[2] = {
+ TMP51X_SHUNT_CURRENT_RESULT,
+ TMP51X_BUS_CURRENT_RESULT
+};
+
+static struct regmap_config tmp51x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP51X_MAX_REGISTER_ADDR,
+};
+
+enum tmp51x_ids {
+ tmp512, tmp513
+};
+
+struct tmp51x_data {
+ u16 shunt_config;
+ u16 pga_gain;
+ u32 vbus_range_uvolt;
+
+ u16 temp_config;
+ u32 nfactor[3];
+
+ u32 shunt_uohms;
+
+ u32 curr_lsb_ua;
+ u32 pwr_lsb_uw;
+
+ enum tmp51x_ids id;
+ struct regmap *regmap;
+};
+
+// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
+static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
+{
+ return 5 - ffs(data->pga_gain);
+}
+
+static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ unsigned int regval, long *val)
+{
+ switch (reg) {
+ case TMP51X_STATUS:
+ *val = (regval >> pos) & 1;
+ break;
+ case TMP51X_SHUNT_CURRENT_RESULT:
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The valus is read in voltage in the chip but reported as
+ * current to the user.
+ * 2's compliment number shifted by one to four depending
+ * on the pga gain setting. 1lsb = 10uV
+ */
+ *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
+ *val = DIV_ROUND_CLOSEST(*val * 10000, data->shunt_uohms);
+ break;
+ case TMP51X_BUS_VOLTAGE_RESULT:
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ *val = (regval >> TMP51X_BUS_VOLTAGE_SHIFT) * 4;
+ break;
+ case TMP51X_POWER_RESULT:
+ case TMP51X_POWER_LIMIT:
+ // Power = (current * BusVoltage) / 5000
+ *val = regval * data->pwr_lsb_uw;
+ break;
+ case TMP51X_BUS_CURRENT_RESULT:
+ // Current = (ShuntVoltage * CalibrationRegister) / 4096
+ *val = sign_extend32(regval, 16) * data->curr_lsb_ua;
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ break;
+ case TMP51X_LOCAL_TEMP_RESULT:
+ case TMP51X_REMOTE_TEMP_RESULT_1:
+ case TMP51X_REMOTE_TEMP_RESULT_2:
+ case TMP513_REMOTE_TEMP_RESULT_3:
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
+ *val = DIV_ROUND_CLOSEST(*val * 625, 10);
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ *val = (regval & TMP51X_HYST_MASK) * 500;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ *val = 0;
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
+{
+ int regval, max_val;
+ u32 mask = 0;
+
+ switch (reg) {
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The user enter current value and we convert it to
+ * voltage. 1lsb = 10uV
+ */
+ val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10000);
+ max_val = U16_MAX >> tmp51x_get_pga_shift(data);
+ regval = clamp_val(val, -max_val, max_val);
+ break;
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ max_val = (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) ?
+ MAX_BUS_VOLTAGE_32_LIMIT : MAX_BUS_VOLTAGE_16_LIMIT;
+
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 4), 0, max_val);
+ regval = val << TMP51X_BUS_VOLTAGE_SHIFT;
+ break;
+ case TMP51X_POWER_LIMIT:
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, data->pwr_lsb_uw), 0,
+ U16_MAX);
+ break;
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ val = clamp_val(val, MIN_TEMP_LIMIT, MAX_TEMP_LIMIT);
+ regval = DIV_ROUND_CLOSEST(val * 10, 625) << TMP51X_TEMP_SHIFT;
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ val = clamp_val(val, 0, MAX_TEMP_HYST);
+ regval = DIV_ROUND_CLOSEST(val, 500);
+ mask = TMP51X_HYST_MASK;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+ }
+
+ if (mask == 0)
+ return regmap_write(data->regmap, reg, regval);
+ else
+ return regmap_update_bits(data->regmap, reg, mask, regval);
+}
+
+static u8 tmp51x_get_reg(enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return TMP51X_TEMP_INPUT[channel];
+ case hwmon_temp_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_temp_crit:
+ return TMP51X_TEMP_CRIT[channel];
+ case hwmon_temp_crit_hyst:
+ return TMP51X_TEMP_CRIT_HYST[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return TMP51X_BUS_VOLTAGE_RESULT;
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_in_lcrit:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT;
+ case hwmon_in_crit:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return TMP51X_CURR_INPUT[channel];
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_curr_lcrit:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT;
+ case hwmon_curr_crit:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return TMP51X_POWER_RESULT;
+ case hwmon_power_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_power_crit:
+ return TMP51X_POWER_LIMIT;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static u8 tmp51x_get_status_pos(enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_crit_alarm:
+ return TMP51X_TEMP_CRIT_ALARM[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_lcrit_alarm:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT_POS;
+ case hwmon_in_crit_alarm:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_lcrit_alarm:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT_POS;
+ case hwmon_curr_crit_alarm:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_crit_alarm:
+ return TMP51X_POWER_LIMIT_POS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tmp51x_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct tmp51x_data *data = dev_get_drvdata(dev);
+ int ret;
+ u32 regval;
+ u8 pos = 0, reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ if (reg == TMP51X_STATUS)
+ pos = tmp51x_get_status_pos(type, attr, channel);
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ return tmp51x_get_value(data, reg, pos, regval, val);
+}
+
+static int tmp51x_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ u8 reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ return tmp51x_set_value(dev_get_drvdata(dev), reg, val);
+}
+
+static umode_t tmp51x_is_visible(const void *_data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct tmp51x_data *data = _data;
+
+ switch (type) {
+ case hwmon_temp:
+ if (data->id == tmp512 && channel == 4)
+ return 0;
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return 0644;
+ return 0444;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return 0444;
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_curr:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return 0444;
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_power:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_crit:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *tmp51x_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM |
+ HWMON_I_CRIT | HWMON_I_CRIT_ALARM),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM |
+ HWMON_C_CRIT | HWMON_C_CRIT_ALARM,
+ HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops tmp51x_hwmon_ops = {
+ .is_visible = tmp51x_is_visible,
+ .read = tmp51x_read,
+ .write = tmp51x_write,
+};
+
+static const struct hwmon_chip_info tmp51x_chip_info = {
+ .ops = &tmp51x_hwmon_ops,
+ .info = tmp51x_info,
+};
+
+/*
+ * Calibrate the tmp51x following the datasheet method
+ */
+static int tmp51x_calibrate(struct tmp51x_data *data)
+{
+ int vshunt_max = data->pga_gain * 40;
+ u64 max_curr_ma;
+ u32 div;
+
+ /*
+ * If shunt_uohms is equal to 0, the calibration should be set to 0.
+ * The consequence will be that the current and power measurement engine
+ * of the sensor will not work. Temperature and voltage sensing will
+ * continue to work.
+ */
+ if (data->shunt_uohms == 0)
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, 0);
+
+ max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * 1000 * 1000,
+ data->shunt_uohms);
+
+ /*
+ * Calculate the minimal bit resolution for the current and the power.
+ * Those values will be used during register interpretation.
+ */
+ data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * 1000, 32767);
+ data->pwr_lsb_uw = 20 * data->curr_lsb_ua;
+
+ div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms,
+ 1000 * 1000);
+
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION,
+ DIV_ROUND_CLOSEST(40960, div));
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int tmp51x_init(struct tmp51x_data *data)
+{
+ unsigned int regval;
+ int ret = regmap_write(data->regmap, TMP51X_SHUNT_CONFIG,
+ data->shunt_config);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_TEMP_CONFIG, data->temp_config);
+ if (ret < 0)
+ return ret;
+
+ // nFactor configuration
+ ret = regmap_update_bits(data->regmap, TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_NFACTOR_MASK, data->nfactor[0] << 8);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_N_FACTOR_2,
+ data->nfactor[1] << 8);
+ if (ret < 0)
+ return ret;
+
+ if (data->id == tmp513) {
+ ret = regmap_write(data->regmap, TMP513_N_FACTOR_3,
+ data->nfactor[2] << 8);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = tmp51x_calibrate(data);
+ if (ret < 0)
+ return ret;
+
+ // Read the status register before using as the datasheet propose
+ return regmap_read(data->regmap, TMP51X_STATUS, &regval);
+}
+
+static const struct i2c_device_id tmp51x_id[] = {
+ { "tmp512", tmp512 },
+ { "tmp513", tmp513 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp51x_id);
+
+static const struct of_device_id tmp51x_of_match[] = {
+ {
+ .compatible = "ti,tmp512",
+ .data = (void *)tmp512
+ },
+ {
+ .compatible = "ti,tmp513",
+ .data = (void *)tmp513
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp51x_of_match);
+
+static int tmp51x_vbus_range_to_reg(struct device *dev,
+ struct tmp51x_data *data)
+{
+ if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) {
+ data->shunt_config |= TMP51X_BUS_VOLTAGE_MASK;
+ } else if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_16V) {
+ data->shunt_config &= ~TMP51X_BUS_VOLTAGE_MASK;
+ } else {
+ dev_err(dev, "ti,bus-range-microvolt is invalid: %u\n",
+ data->vbus_range_uvolt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_pga_gain_to_reg(struct device *dev, struct tmp51x_data *data)
+{
+ if (data->pga_gain == 8) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_320_MASK;
+ } else if (data->pga_gain == 4) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_160_MASK;
+ } else if (data->pga_gain == 2) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_80_MASK;
+ } else if (data->pga_gain == 1) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_40_MASK;
+ } else {
+ dev_err(dev, "ti,pga-gain is invalid: %u\n", data->pga_gain);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_read_properties(struct device *dev, struct tmp51x_data *data)
+{
+ int ret;
+ u32 nfactor[3];
+ u32 val;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms", &val);
+ data->shunt_uohms = (ret >= 0) ? val : TMP51X_SHUNT_VALUE_DEFAULT;
+
+ ret = device_property_read_u32(dev, "ti,bus-range-microvolt", &val);
+ data->vbus_range_uvolt = (ret >= 0) ? val : TMP51X_VBUS_RANGE_DEFAULT;
+ ret = tmp51x_vbus_range_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32(dev, "ti,pga-gain", &val);
+ data->pga_gain = (ret >= 0) ? val : TMP51X_PGA_DEFAULT;
+ ret = tmp51x_pga_gain_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32_array(dev, "ti,nfactor", nfactor,
+ (data->id == tmp513) ? 3 : 2);
+ if (ret >= 0)
+ memcpy(data->nfactor, nfactor, (data->id == tmp513) ? 3 : 2);
+
+ // Check if shunt value is compatible with pga-gain
+ if (data->shunt_uohms > data->pga_gain * 40 * 1000 * 1000) {
+ dev_err(dev, "shunt-resistor: %u too big for pga_gain: %u\n",
+ data->shunt_uohms, data->pga_gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void tmp51x_use_default(struct tmp51x_data *data)
+{
+ data->vbus_range_uvolt = TMP51X_VBUS_RANGE_DEFAULT;
+ data->pga_gain = TMP51X_PGA_DEFAULT;
+ data->shunt_uohms = TMP51X_SHUNT_VALUE_DEFAULT;
+}
+
+static int tmp51x_configure(struct device *dev, struct tmp51x_data *data)
+{
+ data->shunt_config = TMP51X_SHUNT_CONFIG_DEFAULT;
+ data->temp_config = (data->id == tmp513) ?
+ TMP513_TEMP_CONFIG_DEFAULT : TMP512_TEMP_CONFIG_DEFAULT;
+
+ if (dev->of_node)
+ return tmp51x_read_properties(dev, data);
+
+ tmp51x_use_default(data);
+
+ return 0;
+}
+
+static int tmp51x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tmp51x_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (client->dev.of_node)
+ data->id = (enum tmp51x_ids)device_get_match_data(&client->dev);
+ else
+ data->id = id->driver_data;
+
+ ret = tmp51x_configure(dev, data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return ret;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client, &tmp51x_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = tmp51x_init(data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &tmp51x_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "power monitor %s\n", id->name);
+
+ return 0;
+}
+
+static struct i2c_driver tmp51x_driver = {
+ .driver = {
+ .name = "tmp51x",
+ .of_match_table = of_match_ptr(tmp51x_of_match),
+ },
+ .probe = tmp51x_probe,
+ .id_table = tmp51x_id,
+};
+
+module_i2c_driver(tmp51x_driver);
+
+MODULE_AUTHOR("Eric Tremblay <etremblay@distechcontrols.com>");
+MODULE_DESCRIPTION("tmp51x driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9df48b70c70c..a0307e6761b8 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -2096,7 +2096,7 @@ END:
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
- u8 res = 0xff;
+ u8 res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7a9f5fb08330..6ff30e25af55 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
+ depends on ARM || ARM64
depends on OF || ACPI
select ARM_AMBA
select PERF_EVENTS
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 219c10eb752c..ce41482431f9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t reset_store(struct device *dev,
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
+ config->vipcssctlr = 0x0;
/* Disable seq events */
for (i = 0; i < drvdata->nrseqstate-1; i++)
@@ -238,6 +239,7 @@ static ssize_t reset_store(struct device *dev,
for (i = 0; i < drvdata->nr_resource; i++)
config->res_ctrl[i] = 0x0;
+ config->ss_idx = 0x0;
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_ctrl[i] = 0x0;
config->ss_pe_cmp[i] = 0x0;
@@ -296,8 +298,6 @@ static ssize_t mode_store(struct device *dev,
spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
@@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
+
+ /* mask off max threshold before checking min value */
+ val &= ETM_CYC_THRESHOLD_MASK;
if (val < drvdata->ccitmin)
return -EINVAL;
- config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ config->ccctlr = val;
return size;
}
static DEVICE_ATTR_RW(cyc_threshold);
@@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
return -EINVAL;
if (!drvdata->nr_addr_cmp)
return -EINVAL;
+
/*
- * Bit[7:0] selects which address range comparator is used for
- * branch broadcast control.
+ * Bit[8] controls include(1) / exclude(0), bits[0-7] select
+ * individual range comparators. If include then at least 1
+ * range must be selected.
*/
- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
return -EINVAL;
- config->bb_ctrl = val;
+ config->bb_ctrl = val & GENMASK(8, 0);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -738,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = BMVAL(config->vinst_ctrl, 16, 19);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -754,8 +759,8 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
- config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+ /* clear all EXLEVEL_S bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
config->vinst_ctrl |= (val << 16);
@@ -773,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = BMVAL(config->vinst_ctrl, 20, 23);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -789,8 +794,8 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear EXLEVEL_NS bits (bit[23] is never implemented */
- config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+ /* clear EXLEVEL_NS bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
config->vinst_ctrl |= (val << 20);
@@ -966,8 +971,12 @@ static ssize_t addr_range_store(struct device *dev,
unsigned long val1, val2;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int elements, exclude;
+
+ elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* exclude is optional, but need at least two parameter */
+ if (elements < 2)
return -EINVAL;
/* lower address comparator cannot have a higher address value */
if (val1 > val2)
@@ -995,9 +1004,11 @@ static ssize_t addr_range_store(struct device *dev,
/*
* Program include or exclude control bits for vinst or vdata
* whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+ * use supplied value, or default to bit set in 'mode'
*/
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
+ if (elements != 3)
+ exclude = config->mode & ETM_MODE_EXCLUDE;
+ etm4_set_mode_exclude(drvdata, exclude ? true : false);
spin_unlock(&drvdata->spinlock);
return size;
@@ -1054,8 +1065,6 @@ static ssize_t addr_start_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->vissctlr |= BIT(idx);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1111,8 +1120,6 @@ static ssize_t addr_stop_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->vissctlr |= BIT(idx + 16);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1228,6 +1235,131 @@ static ssize_t addr_context_store(struct device *dev,
}
static DEVICE_ATTR_RW(addr_context);
+static ssize_t addr_exlevel_s_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ val = BMVAL(config->addr_acc[idx], 8, 14);
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_exlevel_s_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val & ~((GENMASK(14, 8) >> 8)))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
+ config->addr_acc[idx] &= ~(GENMASK(14, 8));
+ config->addr_acc[idx] |= (val << 8);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_exlevel_s_ns);
+
+static const char * const addr_type_names[] = {
+ "unused",
+ "single",
+ "range",
+ "start",
+ "stop"
+};
+
+static ssize_t addr_cmp_view_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx, addr_type;
+ unsigned long addr_v, addr_v2, addr_ctrl;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+ int size = 0;
+ bool exclude = false;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ addr_v = config->addr_val[idx];
+ addr_ctrl = config->addr_acc[idx];
+ addr_type = config->addr_type[idx];
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ if (idx & 0x1) {
+ idx -= 1;
+ addr_v2 = addr_v;
+ addr_v = config->addr_val[idx];
+ } else {
+ addr_v2 = config->addr_val[idx + 1];
+ }
+ exclude = config->viiectlr & BIT(idx / 2 + 16);
+ }
+ spin_unlock(&drvdata->spinlock);
+ if (addr_type) {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
+ addr_type_names[addr_type], addr_v);
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " %#lx %s", addr_v2,
+ exclude ? "exclude" : "include");
+ }
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " ctrl(%#lx)\n", addr_ctrl);
+ } else {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
+ }
+ return size;
+}
+static DEVICE_ATTR_RO(addr_cmp_view);
+
+static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+ val = config->vipcssctlr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->vipcssctlr = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
+
static ssize_t seq_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1324,8 +1456,8 @@ static ssize_t seq_event_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
- /* RST, bits[7:0] */
- config->seq_ctrl[idx] = val & 0xFF;
+ /* Seq control has two masks B[15:8] F[7:0] */
+ config->seq_ctrl[idx] = val & 0xFFFF;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1580,12 +1712,129 @@ static ssize_t res_ctrl_store(struct device *dev,
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
val &= ~BIT(21);
- config->res_ctrl[idx] = val;
+ config->res_ctrl[idx] = val & GENMASK(21, 0);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_ctrl);
+static ssize_t sshot_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ss_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_ss_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->ss_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_idx);
+
+static ssize_t sshot_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_ctrl[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_ctrl[idx] = val & GENMASK(24, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_ctrl);
+
+static ssize_t sshot_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_status[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(sshot_status);
+
+static ssize_t sshot_pe_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_pe_cmp[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_pe_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_pe_ctrl);
+
static ssize_t ctxid_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1714,6 +1963,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* Don't use contextID tracing if coming from a PID namespace. See
@@ -1729,7 +1979,9 @@ static ssize_t ctxid_masks_store(struct device *dev,
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numcidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1903,6 +2155,7 @@ static ssize_t vmid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* only implemented when vmid tracing is enabled, i.e. at least one
@@ -1910,7 +2163,9 @@ static ssize_t vmid_masks_store(struct device *dev,
*/
if (!drvdata->vmid_size || !drvdata->numvmidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -2033,6 +2288,13 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_addr_stop.attr,
&dev_attr_addr_ctxtype.attr,
&dev_attr_addr_context.attr,
+ &dev_attr_addr_exlevel_s_ns.attr,
+ &dev_attr_addr_cmp_view.attr,
+ &dev_attr_vinst_pe_cmp_start_stop.attr,
+ &dev_attr_sshot_idx.attr,
+ &dev_attr_sshot_ctrl.attr,
+ &dev_attr_sshot_pe_ctrl.attr,
+ &dev_attr_sshot_status.attr,
&dev_attr_seq_idx.attr,
&dev_attr_seq_state.attr,
&dev_attr_seq_event.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a128b5063f46..dc3f507e7562 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -18,6 +18,7 @@
#include <linux/stat.h>
#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/pm_wakeup.h>
@@ -26,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <asm/sections.h>
#include <asm/local.h>
#include <asm/virt.h>
@@ -37,6 +39,15 @@ static int boot_enable;
module_param(boot_enable, int, 0444);
MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
+#define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */
+#define PARAM_PM_SAVE_NEVER 1 /* never save any state */
+#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
+
+static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
+module_param(pm_save_enable, int, 0444);
+MODULE_PARM_DESC(pm_save_enable,
+ "Save/restore state on power down: 1 = never, 2 = self-hosted");
+
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
@@ -54,6 +65,14 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
isb();
}
+static void etm4_os_lock(struct etmv4_drvdata *drvdata)
+{
+ /* Writing 0x1 to TRCOSLAR locks the trace registers */
+ writel_relaxed(0x1, drvdata->base + TRCOSLAR);
+ drvdata->os_unlock = false;
+ isb();
+}
+
static bool etm4_arch_supported(u8 arch)
{
/* Mask out the minor version number */
@@ -149,6 +168,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
drvdata->base + TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ /* always clear status bit on restart if using single-shot */
+ if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
+ config->ss_status[i] &= ~BIT(31);
writel_relaxed(config->ss_ctrl[i],
drvdata->base + TRCSSCCRn(i));
writel_relaxed(config->ss_status[i],
@@ -448,6 +470,9 @@ static void etm4_disable_hw(void *info)
{
u32 control;
struct etmv4_drvdata *drvdata = info;
+ struct etmv4_config *config = &drvdata->config;
+ struct device *etm_dev = &drvdata->csdev->dev;
+ int i;
CS_UNLOCK(drvdata->base);
@@ -470,6 +495,18 @@ static void etm4_disable_hw(void *info)
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ /* wait for TRCSTATR.PMSTABLE to go to '1' */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1))
+ dev_err(etm_dev,
+ "timeout while waiting for PM stable Trace Status\n");
+
+ /* read the status of the single shot comparators */
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ config->ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
+
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
@@ -576,6 +613,7 @@ static void etm4_init_arch_data(void *info)
u32 etmidr4;
u32 etmidr5;
struct etmv4_drvdata *drvdata = info;
+ int i;
/* Make sure all registers are accessible */
etm4_os_unlock(drvdata);
@@ -629,6 +667,7 @@ static void etm4_init_arch_data(void *info)
* TRCARCHMAJ, bits[11:8] architecture major versin number
*/
drvdata->arch = BMVAL(etmidr1, 4, 11);
+ drvdata->config.arch = drvdata->arch;
/* maximum size of resources */
etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
@@ -698,9 +737,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
- * comparator control for tracing
+ * comparator control for tracing. Read any status regs as these
+ * also contain RO capability data.
*/
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ drvdata->config.ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
@@ -780,6 +824,7 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
static u64 etm4_get_access_type(struct etmv4_config *config)
{
u64 access_type = etm4_get_ns_access_type(config);
+ u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
@@ -787,7 +832,8 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
*/
access_type |= (ETM_EXLEVEL_S_APP |
ETM_EXLEVEL_S_OS |
- ETM_EXLEVEL_S_HYP);
+ s_hyp |
+ ETM_EXLEVEL_S_MON);
return access_type;
}
@@ -865,6 +911,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* in the started state
*/
config->vinst_ctrl |= BIT(9);
+ config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1085,6 +1132,288 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
+#ifdef CONFIG_CPU_PM
+static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+{
+ int i, ret = 0;
+ struct etmv4_save_state *state;
+ struct device *etm_dev = &drvdata->csdev->dev;
+
+ /*
+ * As recommended by 3.4.1 ("The procedure when powering down the PE")
+ * of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Lock the OS lock to disable trace and external debugger access */
+ etm4_os_lock(drvdata);
+
+ /* wait for TRCSTATR.PMSTABLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for PM Stable Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ state = drvdata->save_state;
+
+ state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
+ state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
+ state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
+ state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
+ state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+ state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
+ state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
+ state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
+ state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
+ state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
+ state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
+ state->trcqctlr = readl(drvdata->base + TRCQCTLR);
+
+ state->trcvictlr = readl(drvdata->base + TRCVICTLR);
+ state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
+ state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
+ state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
+ state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
+ state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+
+ state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
+ state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
+ state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
+ state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
+ state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
+ state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i));
+ state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i));
+ }
+
+ /*
+ * Data trace stream is architecturally prohibited for A profile cores
+ * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
+ * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
+ * unit") of ARM IHI 0064D.
+ */
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i));
+
+ state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
+
+ state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+
+ state->trcpdcr = readl(drvdata->base + TRCPDCR);
+
+ /* wait for TRCSTATR.IDLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for Idle Trace Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ drvdata->state_needs_restore = true;
+
+ /*
+ * Power can be removed from the trace unit now. We do this to
+ * potentially save power on systems that respect the TRCPDCR_PU
+ * despite requesting software to save/restore state.
+ */
+ writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
+ drvdata->base + TRCPDCR);
+
+out:
+ CS_LOCK(drvdata->base);
+ return ret;
+}
+
+static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+{
+ int i;
+ struct etmv4_save_state *state = drvdata->save_state;
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
+ writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
+ writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+ writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
+ writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
+ writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
+ writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
+ writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
+ writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
+
+ writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
+ writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
+ writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
+ writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
+ writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
+ writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ writel_relaxed(state->trcseqevr[i],
+ drvdata->base + TRCSEQEVRn(i));
+
+ writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
+ writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
+ writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ writel_relaxed(state->trccntrldvr[i],
+ drvdata->base + TRCCNTRLDVRn(i));
+ writel_relaxed(state->trccntctlr[i],
+ drvdata->base + TRCCNTCTLRn(i));
+ writel_relaxed(state->trccntvr[i],
+ drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ writel_relaxed(state->trcrsctlr[i],
+ drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ writel_relaxed(state->trcssccr[i],
+ drvdata->base + TRCSSCCRn(i));
+ writel_relaxed(state->trcsscsr[i],
+ drvdata->base + TRCSSCSRn(i));
+ writel_relaxed(state->trcsspcicr[i],
+ drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ writel_relaxed(state->trcacvr[i],
+ drvdata->base + TRCACVRn(i));
+ writel_relaxed(state->trcacatr[i],
+ drvdata->base + TRCACATRn(i));
+ }
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ writel_relaxed(state->trccidcvr[i],
+ drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ writel_relaxed(state->trcvmidcvr[i],
+ drvdata->base + TRCVMIDCVRn(i));
+
+ writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+
+ drvdata->state_needs_restore = false;
+
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ /* Unlock the OS lock to re-enable trace and external debug access */
+ etm4_os_unlock(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
+static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct etmv4_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+
+ if (!etmdrvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = etmdrvdata[cpu];
+
+ if (!drvdata->save_state)
+ return NOTIFY_OK;
+
+ if (WARN_ON_ONCE(drvdata->cpu != cpu))
+ return NOTIFY_BAD;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* save the state if self-hosted coresight is in use */
+ if (local_read(&drvdata->mode))
+ if (etm4_cpu_save(drvdata))
+ return NOTIFY_BAD;
+ break;
+ case CPU_PM_EXIT:
+ /* fallthrough */
+ case CPU_PM_ENTER_FAILED:
+ if (drvdata->state_needs_restore)
+ etm4_cpu_restore(drvdata);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block etm4_cpu_pm_nb = {
+ .notifier_call = etm4_cpu_pm_notify,
+};
+
+static int etm4_cpu_pm_register(void)
+{
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+}
+
+static void etm4_cpu_pm_unregister(void)
+{
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+}
+#else
+static int etm4_cpu_pm_register(void) { return 0; }
+static void etm4_cpu_pm_unregister(void) { }
+#endif
+
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -1101,6 +1430,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_set_drvdata(dev, drvdata);
+ if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
+ pm_save_enable = coresight_loses_context_with_cpu(dev) ?
+ PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
+
+ if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
+ drvdata->save_state = devm_kmalloc(dev,
+ sizeof(struct etmv4_save_state), GFP_KERNEL);
+ if (!drvdata->save_state)
+ return -ENOMEM;
+ }
+
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
@@ -1135,6 +1475,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (ret < 0)
goto err_arch_supported;
hp_online = ret;
+
+ ret = etm4_cpu_pm_register();
+ if (ret)
+ goto err_arch_supported;
}
cpus_read_unlock();
@@ -1185,6 +1529,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
+ etm4_cpu_pm_unregister();
+
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
@@ -1211,6 +1557,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4523f10ddd0f..4a695bf90582 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -175,22 +175,28 @@
ETM_MODE_EXCL_USER)
#define TRCSTATR_IDLE_BIT 0
+#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels */
+/* secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_S_APP BIT(8)
#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_NA BIT(10)
-#define ETM_EXLEVEL_S_HYP BIT(11)
-/* non-secure state access levels */
+#define ETM_EXLEVEL_S_HYP BIT(10)
+#define ETM_EXLEVEL_S_MON BIT(11)
+/* non-secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_NS_APP BIT(12)
#define ETM_EXLEVEL_NS_OS BIT(13)
#define ETM_EXLEVEL_NS_HYP BIT(14)
#define ETM_EXLEVEL_NS_NA BIT(15)
+/* secure / non secure masks - TRCVICTLR, IDR3 */
+#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
+/* NS MON (EL3) mode never implemented */
+#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+
/**
* struct etmv4_config - configuration information related to an ETMv4
* @mode: Controls various modes supported by this ETM.
@@ -221,6 +227,7 @@
* @cntr_val: Sets or returns the value for a counter.
* @res_idx: Resource index selector.
* @res_ctrl: Controls the selection of the resources in the trace unit.
+ * @ss_idx: Single-shot index selector.
* @ss_ctrl: Controls the corresponding single-shot comparator resource.
* @ss_status: The status of the corresponding single-shot comparator.
* @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control.
@@ -237,6 +244,7 @@
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
+ * @arch: ETM architecture version (for arch dependent config).
*/
struct etmv4_config {
u32 mode;
@@ -263,6 +271,7 @@ struct etmv4_config {
u32 cntr_val[ETMv4_MAX_CNTR];
u8 res_idx;
u32 res_ctrl[ETM_MAX_RES_SEL];
+ u8 ss_idx;
u32 ss_ctrl[ETM_MAX_SS_CMP];
u32 ss_status[ETM_MAX_SS_CMP];
u32 ss_pe_cmp[ETM_MAX_SS_CMP];
@@ -279,6 +288,66 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
+ u8 arch;
+};
+
+/**
+ * struct etm4_save_state - state to be preserved when ETM is without power
+ */
+struct etmv4_save_state {
+ u32 trcprgctlr;
+ u32 trcprocselr;
+ u32 trcconfigr;
+ u32 trcauxctlr;
+ u32 trceventctl0r;
+ u32 trceventctl1r;
+ u32 trcstallctlr;
+ u32 trctsctlr;
+ u32 trcsyncpr;
+ u32 trcccctlr;
+ u32 trcbbctlr;
+ u32 trctraceidr;
+ u32 trcqctlr;
+
+ u32 trcvictlr;
+ u32 trcviiectlr;
+ u32 trcvissctlr;
+ u32 trcvipcssctlr;
+ u32 trcvdctlr;
+ u32 trcvdsacctlr;
+ u32 trcvdarcctlr;
+
+ u32 trcseqevr[ETM_MAX_SEQ_STATES];
+ u32 trcseqrstevr;
+ u32 trcseqstr;
+ u32 trcextinselr;
+ u32 trccntrldvr[ETMv4_MAX_CNTR];
+ u32 trccntctlr[ETMv4_MAX_CNTR];
+ u32 trccntvr[ETMv4_MAX_CNTR];
+
+ u32 trcrsctlr[ETM_MAX_RES_SEL * 2];
+
+ u32 trcssccr[ETM_MAX_SS_CMP];
+ u32 trcsscsr[ETM_MAX_SS_CMP];
+ u32 trcsspcicr[ETM_MAX_SS_CMP];
+
+ u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trccidcvr[ETMv4_MAX_CTXID_CMP];
+ u32 trcvmidcvr[ETM_MAX_VMID_CMP];
+ u32 trccidcctlr0;
+ u32 trccidcctlr1;
+ u32 trcvmidcctlr0;
+ u32 trcvmidcctlr1;
+
+ u32 trcclaimset;
+
+ u32 cntr_val[ETMv4_MAX_CNTR];
+ u32 seq_state;
+ u32 vinst_ctrl;
+ u32 ss_status[ETM_MAX_SS_CMP];
+
+ u32 trcpdcr;
};
/**
@@ -336,6 +405,8 @@ struct etmv4_config {
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
* @config: structure holding configuration parameters.
+ * @save_state: State to be preserved across power loss
+ * @state_needs_restore: True when there is context to restore after PM exit
*/
struct etmv4_drvdata {
void __iomem *base;
@@ -381,6 +452,8 @@ struct etmv4_drvdata {
bool atbtrig;
bool lpoverride;
struct etmv4_config config;
+ struct etmv4_save_state *save_state;
+ bool state_needs_restore;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 05f7896c3a01..900690a9f7f0 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -38,12 +38,14 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
* @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
+ * @spinlock: serialize enable/disable operations.
*/
struct funnel_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
unsigned long priority;
+ spinlock_t spinlock;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
@@ -76,11 +78,21 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_funnel_enable_hw(drvdata, inport);
-
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_funnel_enable_hw(drvdata, inport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[inport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
@@ -107,11 +119,19 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ dynamic_funnel_disable_hw(drvdata, inport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_funnel_disable_hw(drvdata, inport);
-
- dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
+ if (last_disable)
+ dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
@@ -233,6 +253,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b29ba640eb25..e7dc1c31d20d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -31,11 +31,13 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* whether this one is programmable or not.
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
+ * @spinlock: serialize enable/disable operations.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
+ spinlock_t spinlock;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -97,10 +99,22 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_replicator_enable(drvdata, inport, outport);
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_replicator_enable(drvdata, inport,
+ outport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[outport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
return rc;
}
@@ -137,10 +151,19 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ dynamic_replicator_disable(drvdata, inport, outport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_replicator_disable(drvdata, inport, outport);
- dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -225,6 +248,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 807416b75ecc..d0cc3985b72a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -334,9 +334,10 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
static int tmc_enable_etf_link(struct coresight_device *csdev,
int inport, int outport)
{
- int ret;
+ int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -344,12 +345,18 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- ret = tmc_etf_enable_hw(drvdata);
+ if (atomic_read(&csdev->refcnt[0]) == 0) {
+ ret = tmc_etf_enable_hw(drvdata);
+ if (!ret) {
+ drvdata->mode = CS_MODE_SYSFS;
+ first_enable = true;
+ }
+ }
if (!ret)
- drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(&csdev->refcnt[0]);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (!ret)
+ if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
@@ -359,6 +366,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -366,11 +374,15 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- tmc_etf_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
+ tmc_etf_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ last_disable = true;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 6453c67a4d01..ef20f74c85fa 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -253,9 +253,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int ret;
+ int ret = 0;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return -EINVAL;
@@ -264,29 +264,17 @@ static int coresight_enable_link(struct coresight_device *csdev,
outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
- refport = inport;
- else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
- refport = outport;
- else
- refport = 0;
-
- if (refport < 0)
- return refport;
-
- if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
- if (link_ops(csdev)->enable) {
- ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret) {
- atomic_dec(&csdev->refcnt[refport]);
- return ret;
- }
- }
- }
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0)
+ return inport;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
+ return outport;
- csdev->enable = true;
+ if (link_ops(csdev)->enable)
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (!ret)
+ csdev->enable = true;
- return 0;
+ return ret;
}
static void coresight_disable_link(struct coresight_device *csdev,
@@ -295,7 +283,7 @@ static void coresight_disable_link(struct coresight_device *csdev,
{
int i, nr_conns;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return;
@@ -305,20 +293,15 @@ static void coresight_disable_link(struct coresight_device *csdev,
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- refport = inport;
nr_conns = csdev->pdata->nr_inport;
} else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- refport = outport;
nr_conns = csdev->pdata->nr_outport;
} else {
- refport = 0;
nr_conns = 1;
}
- if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
- if (link_ops(csdev)->disable)
- link_ops(csdev)->disable(csdev, inport, outport);
- }
+ if (link_ops(csdev)->disable)
+ link_ops(csdev)->disable(csdev, inport, outport);
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->refcnt[i]) != 0)
@@ -1308,6 +1291,12 @@ static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
return -ENOENT;
}
+bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return fwnode_property_present(dev_fwnode(dev),
+ "arm,coresight-loses-context-with-cpu");
+}
+
/*
* coresight_alloc_device_name - Get an index for a given device in the
* device index list specific to a driver. An index is allocated for a
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d5c1821b31c6..0dfd97bbde9e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -649,10 +649,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = intel_th_device_add_resources(thdev, res, subdev->nres);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_put_device;
- }
if (subdev->type == INTEL_TH_OUTPUT) {
if (subdev->mknode)
@@ -667,10 +665,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = device_add(&thdev->dev);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_free_res;
- }
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 03ca5b1bef9f..ebf3e30e989a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -210,6 +210,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Ice Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8a29),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Tiger Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9a33),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Tiger Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 4b9e44b227d8..4f932a419752 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -345,7 +345,11 @@ void stp_policy_unbind(struct stp_policy *policy)
stm->policy = NULL;
policy->stm = NULL;
+ /*
+ * Drop the reference on the protocol driver and lose the link.
+ */
stm_put_protocol(stm->pdrv);
+ stm->pdrv = NULL;
stm_put_device(stm);
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 9cb2aa1e20ef..62a1c92ab803 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
int index;
u32 speed;
u32 min_speed;
+ u32 force_speed;
};
/**
@@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches,
return acpi_match_device(matches, &client->dev);
}
+static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+ /*
+ * These Silead touchscreen controllers only work at 400KHz, for
+ * some reason they do not work at 100KHz. On some devices the ACPI
+ * tables list another device at their bus as only being capable
+ * of 100KHz, testing has shown that these other devices work fine
+ * at 400KHz (as can be expected of any recent i2c hw) so we force
+ * the speed of the bus to 400 KHz if a Silead device is present.
+ */
+ { "MSSL1680", 0 },
+ {}
+};
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
if (lookup->speed <= lookup->min_speed)
lookup->min_speed = lookup->speed;
+ if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
+ lookup->force_speed = 400000;
+
return AE_OK;
}
@@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
return 0;
}
- return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
+ if (lookup.force_speed) {
+ if (lookup.force_speed != lookup.min_speed)
+ dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
+ lookup.min_speed, lookup.force_speed);
+ return lookup.force_speed;
+ } else if (lookup.min_speed != UINT_MAX) {
+ return lookup.min_speed;
+ } else {
+ return 0;
+ }
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 6f632d543fcc..7eb41990bd6d 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -245,14 +245,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
}
client = of_i2c_register_device(adap, rd->dn);
- put_device(&adap->dev);
-
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
rd->dn);
+ put_device(&adap->dev);
of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
+ put_device(&adap->dev);
break;
case OF_RECONFIG_CHANGE_REMOVE:
/* already depopulated? */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 5c051dba32a5..043691656245 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1763,7 +1763,7 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
static struct i3c_dev_desc *
i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
{
- struct i3c_master_controller *master = refdev->common.master;
+ struct i3c_master_controller *master = i3c_dev_get_master(refdev);
struct i3c_dev_desc *i3cdev;
i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
@@ -2493,7 +2493,7 @@ int i3c_master_register(struct i3c_master_controller *master,
/*
* We're done initializing the bus and the controller, we can now
- * register I3C devices dicovered during the initial DAA.
+ * register I3C devices discovered during the initial DAA.
*/
master->init_done = true;
i3c_bus_normaluse_lock(&master->bus);
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index a5a07ccb81a7..dbeb2605e5f6 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -15,6 +15,7 @@
#include <linux/blkdev.h>
#include <linux/ide.h>
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
@@ -25,13 +26,7 @@
#define DRV_NAME "falconide"
/*
- * Base of the IDE interface
- */
-
-#define ATA_HD_BASE 0xfff00000
-
- /*
- * Offsets from the above base
+ * Offsets from base address
*/
#define ATA_HD_CONTROL 0x39
@@ -114,18 +109,18 @@ static const struct ide_port_info falconide_port_info = {
.chipset = ide_generic,
};
-static void __init falconide_setup_ports(struct ide_hw *hw)
+static void __init falconide_setup_ports(struct ide_hw *hw, unsigned long base)
{
int i;
memset(hw, 0, sizeof(*hw));
- hw->io_ports.data_addr = ATA_HD_BASE;
+ hw->io_ports.data_addr = base;
for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
+ hw->io_ports_array[i] = base + 1 + i * 4;
- hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
+ hw->io_ports.ctl_addr = base + ATA_HD_CONTROL;
hw->irq = IRQ_MFP_IDE;
}
@@ -134,23 +129,29 @@ static void __init falconide_setup_ports(struct ide_hw *hw)
* Probe for a Falcon IDE interface
*/
-static int __init falconide_init(void)
+static int __init falconide_init(struct platform_device *pdev)
{
+ struct resource *res;
struct ide_host *host;
struct ide_hw hw, *hws[] = { &hw };
+ unsigned long base;
int rc;
- if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
- return -ENODEV;
+ dev_info(&pdev->dev, "Atari Falcon IDE controller\n");
- printk(KERN_INFO "ide: Falcon IDE controller\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
- if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) {
- printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
- falconide_setup_ports(&hw);
+ base = (unsigned long)res->start;
+
+ falconide_setup_ports(&hw, base);
host = ide_host_alloc(&falconide_port_info, hws, 1);
if (host == NULL) {
@@ -169,10 +170,29 @@ static int __init falconide_init(void)
err_free:
ide_host_free(host);
err:
- release_mem_region(ATA_HD_BASE, 0x40);
+ release_mem_region(res->start, resource_size(res));
return rc;
}
-module_init(falconide_init);
+static int falconide_remove(struct platform_device *pdev)
+{
+ struct ide_host *host = dev_get_drvdata(&pdev->dev);
+
+ ide_host_remove(host);
+
+ return 0;
+}
+
+static struct platform_driver ide_falcon_driver = {
+ .remove = falconide_remove,
+ .driver = {
+ .name = "atari-falcon-ide",
+ },
+};
+
+module_platform_driver_probe(ide_falcon_driver, falconide_init);
+MODULE_AUTHOR("Geert Uytterhoeven");
+MODULE_DESCRIPTION("low-level driver for Atari Falcon IDE");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atari-falcon-ide");
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 40a3f55b08dd..962eb92501b5 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -46,7 +46,7 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle)
shwt++;
if (shwt > 7) {
- pr_warning("tx4938ide: SHWT violation (%d)\n", shwt);
+ pr_warn("tx4938ide: SHWT violation (%d)\n", shwt);
shwt = 7;
}
pr_debug("tx4938ide: ebus %d, bus cycle %dns, WT %d, SHWT %d\n",
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 88d132edc4e3..d5e871fe840d 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -363,9 +363,9 @@ static int tx4939ide_dma_test_irq(ide_drive_t *drive)
case TX4939IDE_INT_HOST | TX4939IDE_INT_XFEREND:
dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
if (!(dma_stat & ATA_DMA_INTR))
- pr_warning("%s: weird interrupt status. "
- "DMA_Stat %#02x int_ctl %#04x\n",
- hwif->name, dma_stat, ctl);
+ pr_warn("%s: weird interrupt status. "
+ "DMA_Stat %#02x int_ctl %#04x\n",
+ hwif->name, dma_stat, ctl);
found = 1;
break;
}
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 3b2fbb7ce431..196c8226381e 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -167,3 +167,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
MODULE_ALIAS("mcb:16z188");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 85de565a4e80..e14c8536fd09 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -39,24 +39,24 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
.bits_per_word = 8,
@@ -139,16 +139,16 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->read_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.rx_buf = adis->rx,
@@ -156,8 +156,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->read_delay,
- .cs_change_delay = adis->data->cs_change_delay,
- .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
+ .cs_change_delay.value = adis->data->cs_change_delay,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
.bits_per_word = 8,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b44b1c322ec8..ade86388434f 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -83,7 +83,6 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 09881bd5f12d..9a8871e21545 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- nldev.o restrack.o counters.o
+ nldev.o restrack.o counters.o ib_core_uverbs.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 00fb3eacda19..d535995711c3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -819,22 +819,16 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table)
{
int i;
- bool deleted = false;
if (!table)
return;
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (is_gid_entry_valid(table->data_vec[i])) {
+ if (is_gid_entry_valid(table->data_vec[i]))
del_gid(ib_dev, port, table, i);
- deleted = true;
- }
}
mutex_unlock(&table->lock);
-
- if (deleted)
- dispatch_gid_change_event(ib_dev, port);
}
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5920c0085d35..455b3659d84b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,36 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/completion.h>
@@ -246,7 +220,7 @@ struct cm_work {
};
struct cm_timewait_info {
- struct cm_work work; /* Must be first. */
+ struct cm_work work;
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
@@ -263,7 +237,7 @@ struct cm_id_private {
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
* Protected by the cm.lock spinlock. */
int listen_sharecount;
@@ -308,7 +282,7 @@ static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
- if (atomic_dec_and_test(&cm_id_priv->refcount))
+ if (refcount_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
@@ -365,7 +339,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
m->ah = ah;
m->retries = cm_id_priv->max_cm_retries;
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
@@ -626,7 +600,7 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -883,7 +857,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->prim_list);
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
- atomic_set(&cm_id_priv->refcount, 1);
+ refcount_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
error:
@@ -1230,7 +1204,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
spin_unlock_irqrestore(&cm.lock, flags);
return ERR_PTR(-EINVAL);
}
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
++cm_id_priv->listen_sharecount;
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1525,14 +1499,6 @@ static int cm_issue_rej(struct cm_port *port,
return ret;
}
-static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
- __be32 local_qpn, __be32 remote_qpn)
-{
- return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
- ((local_ca_guid == remote_ca_guid) &&
- (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
-}
-
static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
{
return ((req_msg->alt_local_lid) ||
@@ -1895,8 +1861,8 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
NULL, 0);
goto out;
}
- atomic_inc(&listen_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&listen_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
@@ -2052,7 +2018,7 @@ static int cm_req_handler(struct cm_work *work)
return 0;
rejected:
- atomic_dec(&cm_id_priv->refcount);
+ refcount_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
free_timeinfo:
kfree(cm_id_priv->timewait_info);
@@ -2826,7 +2792,7 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
cm_local_id(timewait_info->work.local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -3434,7 +3400,7 @@ static int cm_timewait_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
int ret;
- timewait_info = (struct cm_timewait_info *)work;
+ timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
@@ -3596,8 +3562,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
- atomic_inc(&cur_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cur_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 3d16d614aff6..92d7260ac913 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING the madirectory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use source and binary forms, with or
- * withmodification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retathe above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(CM_MSGS_H)
+#ifndef CM_MSGS_H
#define CM_MSGS_H
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d78f67623f24..25f2b70fd8ef 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1,36 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
- * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
+ * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/completion.h>
@@ -2531,7 +2504,9 @@ EXPORT_SYMBOL(rdma_set_service_type);
* This function should be called before rdma_connect() on active side,
* and on passive side before rdma_accept(). It is applicable to primary
* path only. The timeout will affect the local side of the QP, it is not
- * negotiated with remote side and zero disables the timer.
+ * negotiated with remote side and zero disables the timer. In case it is
+ * set before rdma_resolve_route, the value will also be used to determine
+ * PacketLifeTime for RoCE.
*
* Return: 0 for success
*/
@@ -2828,22 +2803,65 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
return 0;
}
-static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
{
- int prio;
struct net_device *dev;
- prio = rt_tos2priority(tos);
- dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ dev = vlan_dev_real_dev(vlan_ndev);
if (dev->num_tc)
return netdev_get_prio_tc_map(dev, prio);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+struct iboe_prio_tc_map {
+ int input_prio;
+ int output_tc;
+ bool found;
+};
+
+static int get_lower_vlan_dev_tc(struct net_device *dev, void *data)
+{
+ struct iboe_prio_tc_map *map = data;
+
+ if (is_vlan_dev(dev))
+ map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
+ else if (dev->num_tc)
+ map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
+ else
+ map->output_tc = 0;
+ /* We are interested only in first level VLAN device, so always
+ * return 1 to stop iterating over next level devices.
+ */
+ map->found = true;
+ return 1;
+}
+
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ struct iboe_prio_tc_map prio_tc_map = {};
+ int prio = rt_tos2priority(tos);
+
+ /* If VLAN device, get it directly from the VLAN netdev */
if (is_vlan_dev(ndev))
- return (vlan_dev_get_egress_qos_mask(ndev, prio) &
- VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-#endif
- return 0;
+ return get_vlan_ndev_tc(ndev, prio);
+
+ prio_tc_map.input_prio = prio;
+ rcu_read_lock();
+ netdev_walk_all_lower_dev_rcu(ndev,
+ get_lower_vlan_dev_tc,
+ &prio_tc_map);
+ rcu_read_unlock();
+ /* If map is found from lower device, use it; Otherwise
+ * continue with the current netdevice to get priority to tc map.
+ */
+ if (prio_tc_map.found)
+ return prio_tc_map.output_tc;
+ else if (ndev->num_tc)
+ return netdev_get_prio_tc_map(ndev, prio);
+ else
+ return 0;
}
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
@@ -2897,7 +2915,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->rate = iboe_get_rate(ndev);
dev_put(ndev);
route->path_rec->packet_life_time_selector = IB_SA_EQ;
- route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ /* In case ACK timeout is set, use this value to calculate
+ * PacketLifeTime. As per IBTA 12.7.34,
+ * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
+ * Assuming a negligible local ACK delay, we can use
+ * PacketLifeTime = local ACK timeout/2
+ * as a reasonable approximation for RoCE networks.
+ */
+ route->path_rec->packet_life_time = id_priv->timeout_set ?
+ id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
+
if (!route->path_rec->mtu) {
ret = -EINVAL;
goto err2;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 9d07378b5b42..3645e092e1c7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -388,4 +388,15 @@ int ib_device_set_netns_put(struct sk_buff *skb,
int rdma_nl_net_init(struct rdma_dev_net *rnet);
void rdma_nl_net_exit(struct rdma_dev_net *rnet);
+
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+ struct rdma_user_mmap_entry *entry;
+};
+
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 680ad27f497d..8434ec082c3a 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,11 +149,18 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (!rdma_is_visible_in_pid_ns(&qp->res))
- return false;
-
- /* Ensure that counter belongs to the right PID */
- if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
+ /*
+ * Ensure that counter belongs to the right PID. This operation can
+ * race with user space which kills the process and leaves QP and
+ * counters orphans.
+ *
+ * It is not a big deal because exitted task will leave both QP and
+ * counter in the same bucket of zombie process. Just ensure that
+ * process is still alive before procedding.
+ *
+ */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task) ||
+ !task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -229,9 +236,6 @@ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
rt = &dev->res[RDMA_RESTRACK_COUNTER];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
counter = container_of(res, struct rdma_counter, res);
if ((counter->device != qp->device) || (counter->port != port))
goto next;
@@ -412,9 +416,6 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res))
- goto err;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
goto err;
@@ -445,11 +446,6 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res)) {
- rdma_restrack_put(res);
- return NULL;
- }
-
counter = container_of(res, struct rdma_counter, res);
kref_get(&counter->kref);
rdma_restrack_put(res);
@@ -463,10 +459,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
u32 qp_num, u32 counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
+ port_counter = &dev->port_data[port].port_counter;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
@@ -503,6 +504,7 @@ err:
int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
u32 qp_num, u32 *counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
@@ -510,9 +512,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
- if (!dev->port_data[port].port_counter.hstats)
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
return -EOPNOTSUPP;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 50a92442c4f7..9ebb09a75049 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -128,17 +128,14 @@ module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
MODULE_PARM_DESC(netns_mode,
"Share device among net namespaces; default=1 (shared)");
/**
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
+ * rdma_dev_access_netns() - Return whether an rdma device can be accessed
* from a specified net namespace or not.
- * @device: Pointer to rdma device which needs to be checked
+ * @dev: Pointer to rdma device which needs to be checked
* @net: Pointer to net namesapce for which access to be checked
*
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
- * from a specified net namespace or not. When
- * rdma device is in shared mode, it ignores the
- * net namespace. When rdma device is exclusive
- * to a net namespace, rdma device net namespace is
- * checked against the specified one.
+ * When the rdma device is in shared mode, it ignores the net namespace.
+ * When the rdma device is exclusive to a net namespace, rdma device net
+ * namespace is checked against the specified one.
*/
bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
{
@@ -1199,9 +1196,21 @@ static void setup_dma_device(struct ib_device *device)
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
- /* Setup default max segment size for all IB devices */
- dma_set_max_seg_size(device->dma_device, SZ_2G);
+ if (!device->dev.dma_parms) {
+ if (parent) {
+ /*
+ * The caller did not provide DMA parameters, so
+ * 'parent' probably represents a PCI device. The PCI
+ * core sets the maximum segment size to 64
+ * KB. Increase this parameter to 2 GB.
+ */
+ device->dev.dma_parms = parent->dma_parms;
+ dma_set_max_seg_size(device->dma_device, SZ_2G);
+ } else {
+ WARN_ON_ONCE(true);
+ }
+ }
}
/*
@@ -1317,7 +1326,9 @@ out:
/**
* ib_register_device - Register an IB device with IB core
- * @device:Device to register
+ * @device: Device to register
+ * @name: unique string device name. This may include a '%' which will
+ * cause a unique index to be added to the passed device name.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
@@ -1444,7 +1455,7 @@ out:
/**
* ib_unregister_device - Unregister an IB device
- * @device: The device to unregister
+ * @ib_dev: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
*
@@ -1466,7 +1477,7 @@ EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_unregister_device_and_put - Unregister a device while holding a 'get'
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This is the same as ib_unregister_device(), except it includes an internal
* ib_device_put() that should match a 'get' obtained by the caller.
@@ -1536,7 +1547,7 @@ static void ib_unregister_work(struct work_struct *work)
/**
* ib_unregister_device_queued - Unregister a device using a work queue
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This schedules an asynchronous unregistration using a WQ for the device. A
* driver should use this to avoid holding locks while doing unregistration,
@@ -2366,7 +2377,7 @@ int ib_modify_device(struct ib_device *device,
struct ib_device_modify *device_modify)
{
if (!device->ops.modify_device)
- return -ENOSYS;
+ return -EOPNOTSUPP;
return device->ops.modify_device(device, device_modify_mask,
device_modify);
@@ -2397,8 +2408,12 @@ int ib_modify_port(struct ib_device *device,
rc = device->ops.modify_port(device, port_num,
port_modify_mask,
port_modify);
+ else if (rdma_protocol_roce(device, port_num) &&
+ ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
+ (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
+ rc = 0;
else
- rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
+ rc = -EOPNOTSUPP;
return rc;
}
EXPORT_SYMBOL(ib_modify_port);
@@ -2607,6 +2622,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, drain_sq);
SET_DEVICE_OP(dev_ops, enable_driver);
SET_DEVICE_OP(dev_ops, fill_res_entry);
+ SET_DEVICE_OP(dev_ops, fill_stat_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
@@ -2615,6 +2631,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
+ SET_DEVICE_OP(dev_ops, get_vf_guid);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, init_port);
SET_DEVICE_OP(dev_ops, invalidate_range);
@@ -2630,6 +2647,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
+ SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
new file mode 100644
index 000000000000..f509c478b469
--- /dev/null
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2019 Marvell. All rights reserved.
+ */
+#include <linux/xarray.h>
+#include "uverbs.h"
+#include "core_priv.h"
+
+/**
+ * rdma_umap_priv_init() - Initialize the private data of a vma
+ *
+ * @priv: The already allocated private data
+ * @vma: The vm area struct that needs private data
+ * @entry: entry into the mmap_xa that needs to be linked with
+ * this vma
+ *
+ * Each time we map IO memory into user space this keeps track of the
+ * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
+ * to point to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ *
+ */
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ if (entry) {
+ kref_get(&entry->ref);
+ priv->entry = entry;
+ }
+ vma->vm_private_data = priv;
+ /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+EXPORT_SYMBOL(rdma_umap_priv_init);
+
+/**
+ * rdma_user_mmap_io() - Map IO memory into a process
+ *
+ * @ucontext: associated user context
+ * @vma: the vma related to the current mmap call
+ * @pfn: pfn to map
+ * @size: size to map
+ * @prot: pgprot to use in remap call
+ * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
+ * if mmap_entry is not used by the driver
+ *
+ * This is to be called by drivers as part of their mmap() functions if they
+ * wish to send something like PCI-E BAR memory to userspace.
+ *
+ * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
+ * success.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return -EINVAL;
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return -EINVAL;
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma, entry);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/**
+ * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @pgoff: The mmap offset >> PAGE_SHIFT
+ *
+ * This function is called when a user tries to mmap with an offset (returned
+ * by rdma_user_mmap_get_offset()) it initially received from the driver. The
+ * rdma_user_mmap_entry was created by the function
+ * rdma_user_mmap_entry_insert(). This function increases the refcnt of the
+ * entry so that it won't be deleted from the xarray in the meantime.
+ *
+ * Return an reference to an entry if exists or NULL if there is no
+ * match. rdma_user_mmap_entry_put() must be called to put the reference.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (pgoff > U32_MAX)
+ return NULL;
+
+ xa_lock(&ucontext->mmap_xa);
+
+ entry = xa_load(&ucontext->mmap_xa, pgoff);
+
+ /*
+ * If refcount is zero, entry is already being deleted, driver_removed
+ * indicates that the no further mmaps are possible and we waiting for
+ * the active VMAs to be closed.
+ */
+ if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
+ !kref_get_unless_zero(&entry->ref))
+ goto err;
+
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
+ pgoff, entry->npages);
+
+ return entry;
+
+err:
+ xa_unlock(&ucontext->mmap_xa);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
+
+/**
+ * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @vma: the vma being mmap'd into
+ *
+ * This function is like rdma_user_mmap_entry_get_pgoff() except that it also
+ * checks that the VMA is correct.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return NULL;
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
+ if (!entry)
+ return NULL;
+ if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
+ rdma_user_mmap_entry_put(entry);
+ return NULL;
+ }
+ return entry;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get);
+
+static void rdma_user_mmap_entry_free(struct kref *kref)
+{
+ struct rdma_user_mmap_entry *entry =
+ container_of(kref, struct rdma_user_mmap_entry, ref);
+ struct ib_ucontext *ucontext = entry->ucontext;
+ unsigned long i;
+
+ /*
+ * Erase all entries occupied by this single entry, this is deferred
+ * until all VMA are closed so that the mmap offsets remain unique.
+ */
+ xa_lock(&ucontext->mmap_xa);
+ for (i = 0; i < entry->npages; i++)
+ __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
+ entry->start_pgoff, entry->npages);
+
+ if (ucontext->device->ops.mmap_free)
+ ucontext->device->ops.mmap_free(entry);
+}
+
+/**
+ * rdma_user_mmap_entry_put() - Drop reference to the mmap entry
+ *
+ * @entry: an entry in the mmap_xa
+ *
+ * This function is called when the mapping is closed if it was
+ * an io mapping or when the driver is done with the entry for
+ * some other reason.
+ * Should be called after rdma_user_mmap_entry_get was called
+ * and entry is no longer needed. This function will erase the
+ * entry and free it if its refcnt reaches zero.
+ */
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
+{
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_put);
+
+/**
+ * rdma_user_mmap_entry_remove() - Drop reference to entry and
+ * mark it as unmmapable
+ *
+ * @entry: the entry to insert into the mmap_xa
+ *
+ * Drivers can call this to prevent userspace from creating more mappings for
+ * entry, however existing mmaps continue to exist and ops->mmap_free() will
+ * not be called until all user mmaps are destroyed.
+ */
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->driver_removed = true;
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for implementing their mmap syscall A database of mmap offsets is
+ * handled in the core and helper functions are provided to insert entries
+ * into the database and extract entries when the user calls mmap with the
+ * given offset. The function allocates a unique page offset that should be
+ * provided to user, the user will use the offset to retrieve information such
+ * as address to be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ XA_STATE(xas, &ucontext->mmap_xa, 0);
+ u32 xa_first, xa_last, npages;
+ int err;
+ u32 i;
+
+ if (!entry)
+ return -EINVAL;
+
+ kref_init(&entry->ref);
+ entry->ucontext = ucontext;
+
+ /*
+ * We want the whole allocation to be done without interruption from a
+ * different thread. The allocation requires finding a free range and
+ * storing. During the xa_insert the lock could be released, possibly
+ * allowing another thread to choose the same range.
+ */
+ mutex_lock(&ufile->umap_lock);
+
+ xa_lock(&ucontext->mmap_xa);
+
+ /* We want to find an empty range */
+ npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
+ entry->npages = npages;
+ while (true) {
+ /* First find an empty index */
+ xas_find_marked(&xas, U32_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ goto err_unlock;
+
+ xa_first = xas.xa_index;
+
+ /* Is there enough room to have the range? */
+ if (check_add_overflow(xa_first, npages, &xa_last))
+ goto err_unlock;
+
+ /*
+ * Now look for the next present entry. If an entry doesn't
+ * exist, we found an empty range and can proceed.
+ */
+ xas_next_entry(&xas, xa_last - 1);
+ if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
+ break;
+ }
+
+ for (i = xa_first; i < xa_last; i++) {
+ err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
+ if (err)
+ goto err_undo;
+ }
+
+ /*
+ * Internally the kernel uses a page offset, in libc this is a byte
+ * offset. Drivers should not return pgoff to userspace.
+ */
+ entry->start_pgoff = xa_first;
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
+ entry->start_pgoff, npages);
+
+ return 0;
+
+err_undo:
+ for (; i > xa_first; i--)
+ __xa_erase(&ucontext->mmap_xa, i - 1);
+
+err_unlock:
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 7e2bcc72f66c..1bf87d9fd0bd 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -210,8 +210,10 @@ int iwpm_mapinfo_available(void);
/**
* iwpm_compare_sockaddr - Compare two sockaddr storage structs
+ * @a_sockaddr: first sockaddr to compare
+ * @b_sockaddr: second sockaddr to compare
*
- * Returns 0 if they are holding the same ip/tcp address info,
+ * Return: 0 if they are holding the same ip/tcp address info,
* otherwise returns 1
*/
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
@@ -272,6 +274,7 @@ void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
* iwpm_send_hello - Send hello response to iwpmd
*
* @nl_client: The index of the netlink client
+ * @iwpm_pid: The pid of the user space port mapper
* @abi_version: The kernel's abi_version
*
* Returns 0 on success or a negative error code
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9947d16edef2..c54db13fa9b0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -913,9 +913,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
- (const struct ib_mad_hdr *)smp, mad_size,
- (struct ib_mad_hdr *)mad_priv->mad,
- &mad_size, &out_mad_pkey_index);
+ (const struct ib_mad *)smp,
+ (struct ib_mad *)mad_priv->mad, &mad_size,
+ &out_mad_pkey_index);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -1397,25 +1397,6 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
}
EXPORT_SYMBOL(ib_free_recv_mad);
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- return ERR_PTR(-EINVAL); /* XXX: for now */
-}
-EXPORT_SYMBOL(ib_redirect_mad_qp);
-
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc)
-{
- dev_err(&mad_agent->device->dev,
- "ib_process_mad_wc() not implemented yet\n");
- return 0;
-}
-EXPORT_SYMBOL(ib_process_mad_wc);
-
static int method_in_use(struct ib_mad_mgmt_method_table **method,
struct ib_mad_reg_req *mad_reg_req)
{
@@ -2340,9 +2321,9 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc,
- &recv->grh, (const struct ib_mad_hdr *)recv->mad,
- recv->mad_size, (struct ib_mad_hdr *)response->mad,
- &mad_size, &resp_mad_pkey_index);
+ &recv->grh, (const struct ib_mad *)recv->mad,
+ (struct ib_mad *)response->mad, &mad_size,
+ &resp_mad_pkey_index);
if (opa)
wc->pkey_index = resp_mad_pkey_index;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index c03af08b80e7..cbf6041a5d4a 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -42,6 +42,9 @@
#include "cma_priv.h"
#include "restrack.h"
+typedef int (*res_fill_func_t)(struct sk_buff*, bool,
+ struct rdma_restrack_entry*, uint32_t);
+
/*
* Sort array elements by the netlink attribute name
*/
@@ -180,6 +183,19 @@ static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
return 0;
}
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str)
+{
+ if (put_driver_name_print_type(msg, name,
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
+ return -EMSGSIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_string);
+
int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
{
return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
@@ -399,20 +415,34 @@ err:
static int fill_res_name_pid(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
+ int err = 0;
+
/*
* For user resources, user is should read /proc/PID/comm to get the
* name of the task file.
*/
if (rdma_is_kernel_res(res)) {
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
- res->kern_name))
- return -EMSGSIZE;
+ err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
+ res->kern_name);
} else {
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
- task_pid_vnr(res->task)))
- return -EMSGSIZE;
+ pid_t pid;
+
+ pid = task_pid_vnr(res->task);
+ /*
+ * Task is dead and in zombie state.
+ * There is no need to print PID anymore.
+ */
+ if (pid)
+ /*
+ * This part is racy, task can be killed and PID will
+ * be zero right here but it is ok, next query won't
+ * return PID. We don't promise real-time reflection
+ * of SW objects.
+ */
+ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
}
- return 0;
+
+ return err ? -EMSGSIZE : 0;
}
static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
@@ -423,6 +453,14 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
return dev->ops.fill_res_entry(msg, res);
}
+static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (!dev->ops.fill_stat_entry)
+ return false;
+ return dev->ops.fill_stat_entry(msg, res);
+}
+
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
@@ -698,9 +736,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
rt = &counter->device->res[RDMA_RESTRACK_QP];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
continue;
@@ -723,8 +758,8 @@ err:
return ret;
}
-static int fill_stat_hwcounter_entry(struct sk_buff *msg,
- const char *name, u64 value)
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value)
{
struct nlattr *entry_attr;
@@ -746,6 +781,25 @@ err:
nla_nest_cancel(msg, entry_attr);
return -EMSGSIZE;
}
+EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
+
+static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ goto err;
+
+ if (fill_stat_entry(dev, msg, res))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
static int fill_stat_counter_hwcounters(struct sk_buff *msg,
struct rdma_counter *counter)
@@ -759,7 +813,7 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg,
return -EMSGSIZE;
for (i = 0; i < st->num_counters; i++)
- if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
+ if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
goto err;
nla_nest_end(msg, table_attr);
@@ -1117,8 +1171,6 @@ static int nldev_res_get_dumpit(struct sk_buff *skb,
}
struct nldev_fill_res_entry {
- int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
- struct rdma_restrack_entry *res, u32 port);
enum rdma_nldev_attr nldev_attr;
enum rdma_nldev_command nldev_cmd;
u8 flags;
@@ -1132,21 +1184,18 @@ enum nldev_res_flags {
static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_QP] = {
- .fill_res_func = fill_res_qp_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_LQPN,
},
[RDMA_RESTRACK_CM_ID] = {
- .fill_res_func = fill_res_cm_id_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
},
[RDMA_RESTRACK_CQ] = {
- .fill_res_func = fill_res_cq_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
.flags = NLDEV_PER_DEV,
@@ -1154,7 +1203,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_CQN,
},
[RDMA_RESTRACK_MR] = {
- .fill_res_func = fill_res_mr_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
.flags = NLDEV_PER_DEV,
@@ -1162,7 +1210,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_MRN,
},
[RDMA_RESTRACK_PD] = {
- .fill_res_func = fill_res_pd_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
.flags = NLDEV_PER_DEV,
@@ -1170,7 +1217,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_PDN,
},
[RDMA_RESTRACK_COUNTER] = {
- .fill_res_func = fill_res_counter_entry,
.nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
@@ -1180,7 +1226,8 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1222,11 +1269,6 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err;
}
- if (!rdma_is_visible_in_pid_ns(res)) {
- ret = -ENOENT;
- goto err_get;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
@@ -1243,7 +1285,9 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
}
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
- ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
+
+ ret = fill_func(msg, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret)
goto err_free;
@@ -1263,7 +1307,8 @@ err:
static int res_get_common_dumpit(struct sk_buff *skb,
struct netlink_callback *cb,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1334,9 +1379,6 @@ static int res_get_common_dumpit(struct sk_buff *skb,
* objects.
*/
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
if (idx < start || !rdma_restrack_get(res))
goto next;
@@ -1351,7 +1393,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto msg_full;
}
- ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
+ ret = fill_func(skb, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret) {
@@ -1394,17 +1437,19 @@ err_index:
return ret;
}
-#define RES_GET_FUNCS(name, type) \
- static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
+#define RES_GET_FUNCS(name, type) \
+ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
struct netlink_callback *cb) \
- { \
- return res_get_common_dumpit(skb, cb, type); \
- } \
- static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
- struct nlmsghdr *nlh, \
+ { \
+ return res_get_common_dumpit(skb, cb, type, \
+ fill_res_##name##_entry); \
+ } \
+ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
+ struct nlmsghdr *nlh, \
struct netlink_ext_ack *extack) \
- { \
- return res_get_common_doit(skb, nlh, extack, type); \
+ { \
+ return res_get_common_doit(skb, nlh, extack, type, \
+ fill_res_##name##_entry); \
}
RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
@@ -1880,7 +1925,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
for (i = 0; i < num_cnts; i++) {
v = stats->value[i] +
rdma_counter_get_hwstat_value(device, port, i);
- if (fill_stat_hwcounter_entry(msg, stats->names[i], v)) {
+ if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
ret = -EMSGSIZE;
goto err_table;
}
@@ -1989,7 +2034,10 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
case RDMA_NLDEV_ATTR_RES_QP:
ret = stat_get_doit_qp(skb, nlh, extack, tb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
@@ -2013,7 +2061,10 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
case RDMA_NLDEV_ATTR_RES_QP:
ret = nldev_res_get_counter_dumpit(skb, cb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index ccf4d069c25c..6c72773faf29 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -817,6 +817,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
rdma_restrack_del(&ucontext->res);
ib_dev->ops.dealloc_ucontext(ucontext);
+ WARN_ON(!xa_empty(&ucontext->mmap_xa));
kfree(ucontext);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index a07665f7ef8c..62fbb0ae9cb4 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -116,11 +116,8 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
u32 cnt = 0;
xa_lock(&rt->xa);
- xas_for_each(&xas, e, U32_MAX) {
- if (!rdma_is_visible_in_pid_ns(e))
- continue;
+ xas_for_each(&xas, e, U32_MAX)
cnt++;
- }
xa_unlock(&rt->xa);
return cnt;
}
@@ -346,18 +343,3 @@ out:
}
}
EXPORT_SYMBOL(rdma_restrack_del);
-
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
-{
- /*
- * 1. Kern resources should be visible in init
- * namespace only
- * 2. Present only resources visible in the current
- * namespace
- */
- if (rdma_is_kernel_res(res))
- return task_active_pid_ns(current) == &init_pid_ns;
-
- /* PID 0 means that resource is not found in current namespace */
- return task_pid_vnr(res->task);
-}
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
index 7bd177cc0a61..d084e5f89849 100644
--- a/drivers/infiniband/core/restrack.h
+++ b/drivers/infiniband/core/restrack.h
@@ -27,5 +27,4 @@ int rdma_restrack_init(struct ib_device *dev);
void rdma_restrack_clean(struct ib_device *dev);
void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
struct task_struct *task);
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res);
#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5337393d4dfe..4fad732f9b3c 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -20,14 +20,17 @@ module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
/*
- * Check if the device might use memory registration. This is currently only
- * true for iWarp devices. In the future we can hopefully fine tune this based
- * on HCA driver input.
+ * Report whether memory registration should be used. Memory registration must
+ * be used for iWarp devices because of iWARP-specific limitations. Memory
+ * registration is also enabled if registering memory might yield better
+ * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
*/
static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
{
if (rdma_protocol_iwarp(dev, port_num))
return true;
+ if (dev->attrs.max_sgl_rd)
+ return true;
if (unlikely(rdma_rw_force_mr))
return true;
return false;
@@ -35,17 +38,19 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
/*
* Check if the device will use memory registration for this RW operation.
- * We currently always use memory registrations for iWarp RDMA READs, and
- * have a debug option to force usage of MRs.
- *
- * XXX: In the future we can hopefully fine tune this based on HCA driver
- * input.
+ * For RDMA READs we must use MRs on iWarp and can optionally use them as an
+ * optimization otherwise. Additionally we have a debug option to force usage
+ * of MRs to help testing this code path.
*/
static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
enum dma_data_direction dir, int dma_nents)
{
- if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
- return true;
+ if (dir == DMA_FROM_DEVICE) {
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
+ return true;
+ }
if (unlikely(rdma_rw_force_mr))
return true;
return false;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 17fc2936c077..8917125ea16d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1246,7 +1246,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
* @port_num: Port on the specified device.
* @rec: path record entry to use for ah attributes initialization.
* @ah_attr: address handle attributes to initialization from path record.
- * @sgid_attr: SGID attribute to consider during initialization.
+ * @gid_attr: SGID attribute to consider during initialization.
*
* When ib_init_ah_attr_from_path() returns success,
* (a) for IB link layer it optionally contains a reference to SGID attribute
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7a50cedcef1f..087682e6969e 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -481,8 +481,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (!dev->ops.process_mad)
return -ENOSYS;
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
@@ -497,10 +497,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */
- if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
- port_num, NULL, NULL,
- (const struct ib_mad_hdr *)in_mad, mad_size,
- (struct ib_mad_hdr *)out_mad, &mad_size,
+ if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL,
+ in_mad, out_mad, &mad_size,
&out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
@@ -1268,7 +1266,7 @@ static ssize_t node_desc_store(struct device *device,
int ret;
if (!dev->ops.modify_device)
- return -EIO;
+ return -EOPNOTSUPP;
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 24244a2f68cc..7a3b99597ead 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
- * @dmasync: flush in-flight DMA when the memory region is written
*/
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync)
+ size_t size, int access)
{
struct ib_ucontext *context;
struct ib_umem *umem;
@@ -199,7 +198,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct mm_struct *mm;
unsigned long npages;
int ret;
- unsigned long dma_attrs = 0;
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
@@ -211,9 +209,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
if (!context)
return ERR_PTR(-EIO);
- if (dmasync)
- dma_attrs |= DMA_ATTR_WRITE_BARRIER;
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -294,11 +289,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg_attrs(context->device,
+ umem->nmap = ib_dma_map_sg(context->device,
umem->sg_head.sgl,
umem->sg_nents,
- DMA_BIDIRECTIONAL,
- dma_attrs);
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 163ff7ba92b7..d7d5fadf0899 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -508,7 +508,6 @@ static int ib_umem_odp_map_dma_single_page(
{
struct ib_device *dev = umem_odp->umem.ibdev;
dma_addr_t dma_addr;
- int remove_existing_mapping = 0;
int ret = 0;
/*
@@ -534,28 +533,29 @@ static int ib_umem_odp_map_dma_single_page(
} else if (umem_odp->page_list[page_index] == page) {
umem_odp->dma_list[page_index] |= access_mask;
} else {
- pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem_odp->page_list[page_index], page);
- /* Better remove the mapping now, to prevent any further
- * damage. */
- remove_existing_mapping = 1;
+ /*
+ * This is a race here where we could have done:
+ *
+ * CPU0 CPU1
+ * get_user_pages()
+ * invalidate()
+ * page_fault()
+ * mutex_lock(umem_mutex)
+ * page from GUP != page in ODP
+ *
+ * It should be prevented by the retry test above as reading
+ * the seq number should be reliable under the
+ * umem_mutex. Thus something is really not working right if
+ * things get here.
+ */
+ WARN(true,
+ "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem_odp->page_list[page_index], page);
+ ret = -EAGAIN;
}
out:
put_user_page(page);
-
- if (remove_existing_mapping) {
- ib_umem_notifier_start_account(umem_odp);
- dev->ops.invalidate_range(
- umem_odp,
- ib_umem_start(umem_odp) +
- (page_index << umem_odp->page_shift),
- ib_umem_start(umem_odp) +
- ((page_index + 1) << umem_odp->page_shift));
- ib_umem_notifier_end_account(umem_odp);
- ret = -EAGAIN;
- }
-
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 14a80fd9f464..06ed32c8662f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -252,6 +252,8 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ucontext->closing = false;
ucontext->cleanup_retryable = false;
+ xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+
ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0)
goto err_free;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 61758201d9b2..269938f59d3f 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -795,6 +795,9 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
if (size < attr->ptr_attr.len) {
if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
attr->ptr_attr.len - size))
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index db98111b47f4..9aa7ffc1d12a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -772,6 +772,8 @@ out_unlock:
return (ret) ? : count;
}
+static const struct vm_operations_struct rdma_umap_ops;
+
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
@@ -785,7 +787,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
ret = PTR_ERR(ucontext);
goto out;
}
-
+ vma->vm_ops = &rdma_umap_ops;
ret = ucontext->device->ops.mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
@@ -793,38 +795,6 @@ out:
}
/*
- * Each time we map IO memory into user space this keeps track of the mapping.
- * When the device is hot-unplugged we 'zap' the mmaps in user space to point
- * to the zero page and allow the hot unplug to proceed.
- *
- * This is necessary for cases like PCI physical hot unplug as the actual BAR
- * memory may vanish after this and access to it from userspace could MCE.
- *
- * RDMA drivers supporting disassociation must have their user space designed
- * to cope in some way with their IO pages going to the zero page.
- */
-struct rdma_umap_priv {
- struct vm_area_struct *vma;
- struct list_head list;
-};
-
-static const struct vm_operations_struct rdma_umap_ops;
-
-static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
- struct vm_area_struct *vma)
-{
- struct ib_uverbs_file *ufile = vma->vm_file->private_data;
-
- priv->vma = vma;
- vma->vm_private_data = priv;
- vma->vm_ops = &rdma_umap_ops;
-
- mutex_lock(&ufile->umap_lock);
- list_add(&priv->list, &ufile->umaps);
- mutex_unlock(&ufile->umap_lock);
-}
-
-/*
* The VMA has been dup'd, initialize the vm_private_data with a new tracking
* struct
*/
@@ -849,7 +819,7 @@ static void rdma_umap_open(struct vm_area_struct *vma)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_unlock;
- rdma_umap_priv_init(priv, vma);
+ rdma_umap_priv_init(priv, vma, opriv->entry);
up_read(&ufile->hw_destroy_rwsem);
return;
@@ -880,6 +850,9 @@ static void rdma_umap_close(struct vm_area_struct *vma)
* this point.
*/
mutex_lock(&ufile->umap_lock);
+ if (priv->entry)
+ rdma_user_mmap_entry_put(priv->entry);
+
list_del(&priv->list);
mutex_unlock(&ufile->umap_lock);
kfree(priv);
@@ -931,44 +904,6 @@ static const struct vm_operations_struct rdma_umap_ops = {
.fault = rdma_umap_fault,
};
-/*
- * Map IO memory into a process. This is to be called by drivers as part of
- * their mmap() functions if they wish to send something like PCI-E BAR memory
- * to userspace.
- */
-int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- struct ib_uverbs_file *ufile = ucontext->ufile;
- struct rdma_umap_priv *priv;
-
- if (!(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- if (vma->vm_end - vma->vm_start != size)
- return -EINVAL;
-
- /* Driver is using this wrong, must be called by ib_uverbs_mmap */
- if (WARN_ON(!vma->vm_file ||
- vma->vm_file->private_data != ufile))
- return -EINVAL;
- lockdep_assert_held(&ufile->device->disassociate_srcu);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- vma->vm_page_prot = prot;
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
- kfree(priv);
- return -EAGAIN;
- }
-
- rdma_umap_priv_init(priv, vma);
- return 0;
-}
-EXPORT_SYMBOL(rdma_user_mmap_io);
-
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{
struct rdma_umap_priv *priv, *next_priv;
@@ -1018,6 +953,11 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
+
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 35c2841a569e..dd765e176cdd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -244,6 +244,8 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/**
* ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ * @caller: caller's build-time module name
*
* A protection domain object provides an association between QPs, shared
* receive queues, address handles, memory regions, and memory windows.
@@ -2459,6 +2461,16 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
}
EXPORT_SYMBOL(ib_set_vf_guid);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ if (!device->ops.get_vf_guid)
+ return -EOPNOTSUPP;
+
+ return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
+}
+EXPORT_SYMBOL(ib_get_vf_guid);
/**
* ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
* information) and set an appropriate memory region for registration.
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 433fca59febd..0aeccd984889 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
-obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index ab8779d23382..b83f1cc38c52 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_BNXT_RE
- tristate "Broadcom Netxtreme HCA support"
- depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
- ---help---
+ tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
+ depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ select NET_VENDOR_BROADCOM
+ select BNXT
+ ---help---
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
the module will be called bnxt_re.
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index e55a1666c0cd..725b2350e349 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -108,6 +108,7 @@ struct bnxt_re_sqp_entries {
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
+#define BNXT_RE_GEN_P5_MAX_VF 64
struct bnxt_re_dev {
struct ib_device ibdev;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index b4149dc9e824..9b6ca15a183c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -191,24 +191,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
return 0;
}
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- switch (device_modify_mask) {
- case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
- /* Modify the GUID requires the modification of the GID table */
- /* GUID should be made as READ-ONLY */
- break;
- case IB_DEVICE_MODIFY_NODE_DESC:
- /* Node Desc should be made as READ-ONLY */
- break;
- default:
- break;
- }
- return 0;
-}
-
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr)
@@ -855,7 +837,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -869,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qprva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
@@ -1322,7 +1304,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2565,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base),
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto fail;
@@ -3530,7 +3512,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 31662b1ee35a..23d972da5652 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -145,9 +145,6 @@ struct bnxt_re_ucontext {
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 30a54f8aa42c..e7e8a0f49464 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -119,61 +119,76 @@ static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
* reserved for the function. The driver may choose to allocate fewer
* resources than the firmware maximum.
*/
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
{
- u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
- u32 i;
- u32 vf_pct;
- u32 num_vfs;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *ctx;
+ int i;
- rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
- dev_attr->max_qp);
+ attr = &rdev->dev_attr;
+ ctx = &rdev->qplib_ctx;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ attr->max_qp);
+ ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
/* Use max_mr from fw since max_mrw does not get set */
- rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
- dev_attr->max_mr);
- rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
- dev_attr->max_srq);
- rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
- dev_attr->max_cq);
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-
- if (rdev->num_vfs) {
- /*
- * Reserve a set of resources for the PF. Divide the remaining
- * resources among the VFs
- */
- vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
- num_vfs = 100 * rdev->num_vfs;
- vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
- vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
- vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
- /*
- * The driver allows many more MRs than other resources. If the
- * firmware does also, then reserve a fixed amount for the PF
- * and divide the rest among VFs. VFs may use many MRs for NFS
- * mounts, ISER, NVME applications, etc. If the firmware
- * severely restricts the number of MRs, then let PF have
- * half and divide the rest among VFs, as for the other
- * resource types.
- */
- if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
- vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
- else
- vf_mrws = (rdev->qplib_ctx.mrw_count -
- BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
- vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+{
+ struct bnxt_qplib_vf_res *vf_res;
+ u32 mrws = 0;
+ u32 vf_pct;
+ u32 nvfs;
+
+ vf_res = &qplib_ctx->vf_res;
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ nvfs = num_vf;
+ num_vf = 100 * num_vf;
+ vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
+ vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
+ vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF and
+ * divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware severely
+ * restricts the number of MRs, then let PF have half and divide
+ * the rest among VFs, as for the other resource types.
+ */
+ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
+ mrws = qplib_ctx->mrw_count * vf_pct;
+ nvfs = num_vf;
+ } else {
+ mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
}
- rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
- rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
- rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
- rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
- rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+ vf_res->max_mrw_per_vf = (mrws / nvfs);
+ vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 num_vfs;
+
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
/* for handling bnxt_en callbacks later */
@@ -193,9 +208,11 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- bnxt_re_set_resource_limits(rdev);
- bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+ }
}
static void bnxt_re_shutdown(void *p)
@@ -477,6 +494,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@ -625,7 +643,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
.modify_ah = bnxt_re_modify_ah,
- .modify_device = bnxt_re_modify_device,
.modify_qp = bnxt_re_modify_qp,
.modify_srq = bnxt_re_modify_srq,
.poll_cq = bnxt_re_poll_cq,
@@ -895,10 +912,14 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0;
}
+#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
+#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
- 0x10000 : rdev->msix_entries[indx].db_offset;
+ (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
+ BNXT_RE_GEN_P5_PF_NQ_DB) :
+ rdev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -1270,10 +1291,10 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
return;
}
rdev->qplib_ctx.hwrm_intf_ver =
- (u64)resp.hwrm_intf_major << 48 |
- (u64)resp.hwrm_intf_minor << 32 |
- (u64)resp.hwrm_intf_build << 16 |
- resp.hwrm_intf_patch;
+ (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
+ (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
+ (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(resp.hwrm_intf_patch);
}
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
@@ -1408,8 +1429,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- if (!rdev->is_virtfn)
- bnxt_re_set_resource_limits(rdev);
+
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 60c8f76aab33..5cdfa84faf85 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -494,8 +494,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (is_virtfn)
goto skip_ctx_setup;
+ if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ goto config_vf_res;
level = ctx->qpc_tbl.level;
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
@@ -540,6 +542,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
+config_vf_res:
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index fbda11a7ab1a..aaa76d792185 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -186,7 +186,9 @@ struct bnxt_qplib_chip_ctx {
u8 chip_metal;
};
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
struct bnxt_qplib_res {
struct pci_dev *pdev;
@@ -203,7 +205,9 @@ struct bnxt_qplib_res {
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{
- return (cctx->chip_num == CHIP_NUM_57500);
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+ cctx->chip_num == CHIP_NUM_57504 ||
+ cctx->chip_num == CHIP_NUM_57502);
}
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
deleted file mode 100644
index 8c1a72bff447..000000000000
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_CXGB3
- tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3
- select GENERIC_ALLOCATOR
- ---help---
- This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
- 10GbE adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called iw_cxgb3.
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
deleted file mode 100644
index 34bb86a6ae3a..000000000000
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb3
-
-obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
-
-iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
- iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
deleted file mode 100644
index 95b22a651673..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <asm/delay.h>
-
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-#include "sge_defs.h"
-
-static LIST_HEAD(rdev_list);
-static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (!strcmp(rdev->dev_name, dev_name))
- return rdev;
- return NULL;
-}
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (rdev->t3cdev_p == tdev)
- return rdev;
- return NULL;
-}
-
-int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit)
-{
- int ret;
- struct t3_cqe *cqe;
- u32 rptr;
-
- struct rdma_cq_op setup;
- setup.id = cq->cqid;
- setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
- setup.op = op;
- ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
-
- if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
- return ret;
-
- /*
- * If the rearm returned an index other than our current index,
- * then there might be CQE's in flight (being DMA'd). We must wait
- * here for them to complete or the consumer can miss a notification.
- */
- if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
- int i=0;
-
- rptr = cq->rptr;
-
- /*
- * Keep the generation correct by bumping rptr until it
- * matches the index returned by the rearm - 1.
- */
- while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
- rptr++;
-
- /*
- * Now rptr is the index for the (last) cqe that was
- * in-flight at the time the HW rearmed the CQ. We
- * spin until that CQE is valid.
- */
- cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
- while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
- udelay(1);
- if (i++ > 1000000) {
- pr_err("%s: stalled rnic\n", rdev_p->dev_name);
- return -EIO;
- }
- }
-
- return 1;
- }
-
- return 0;
-}
-
-static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
-{
- struct rdma_cq_setup setup;
- setup.id = cqid;
- setup.base_addr = 0; /* NULL address */
- setup.size = 0; /* disaable the CQ */
- setup.credits = 0;
- setup.credit_thres = 0;
- setup.ovfl_mode = 0;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
-{
- u64 sge_cmd;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
- T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
- T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = qpid << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
-{
- struct rdma_cq_setup setup;
- int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
-
- size += 1; /* one extra page for storing cq-in-err state */
- cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
- if (!cq->cqid)
- return -ENOMEM;
- if (kernel) {
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- }
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
- &(cq->dma_addr), GFP_KERNEL);
- if (!cq->queue) {
- kfree(cq->sw_queue);
- return -ENOMEM;
- }
- dma_unmap_addr_set(cq, mapping, cq->dma_addr);
- setup.id = cq->cqid;
- setup.base_addr = (u64) (cq->dma_addr);
- setup.size = 1UL << cq->size_log2;
- setup.credits = 65535;
- setup.credit_thres = 1;
- if (rdev_p->t3cdev_p->type != T3A)
- setup.ovfl_mode = 0;
- else
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
- u32 qpid;
- int i;
-
- mutex_lock(&uctx->lock);
- if (!list_empty(&uctx->qpids)) {
- entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
- entry);
- list_del(&entry->entry);
- qpid = entry->qpid;
- kfree(entry);
- } else {
- qpid = cxio_hal_get_qpid(rdev_p->rscp);
- if (!qpid)
- goto out;
- for (i = qpid+1; i & rdev_p->qpmask; i++) {
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
- entry->qpid = i;
- list_add_tail(&entry->entry, &uctx->qpids);
- }
- }
-out:
- mutex_unlock(&uctx->lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
- struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- entry->qpid = qpid;
- mutex_lock(&uctx->lock);
- list_add_tail(&entry->entry, &uctx->qpids);
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct list_head *pos, *nxt;
- struct cxio_qpid_list *entry;
-
- mutex_lock(&uctx->lock);
- list_for_each_safe(pos, nxt, &uctx->qpids) {
- entry = list_entry(pos, struct cxio_qpid_list, entry);
- list_del_init(&entry->entry);
- if (!(entry->qpid & rdev_p->qpmask))
- cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
- kfree(entry);
- }
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- INIT_LIST_HEAD(&uctx->qpids);
- mutex_init(&uctx->lock);
-}
-
-int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
- struct t3_wq *wq, struct cxio_ucontext *uctx)
-{
- int depth = 1UL << wq->size_log2;
- int rqsize = 1UL << wq->rq_size_log2;
-
- wq->qpid = get_qpid(rdev_p, uctx);
- if (!wq->qpid)
- return -ENOMEM;
-
- wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL);
- if (!wq->rq)
- goto err1;
-
- wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
- if (!wq->rq_addr)
- goto err2;
-
- wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL);
- if (!wq->sq)
- goto err3;
-
- wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
- if (!wq->queue)
- goto err4;
-
- dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
- if (!kernel_domain)
- wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
- (wq->qpid << rdev_p->qpshift);
- wq->rdev = rdev_p;
- pr_debug("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n",
- __func__, wq->qpid, wq->doorbell, (unsigned long long)wq->udb);
- return 0;
-err4:
- kfree(wq->sq);
-err3:
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
-err2:
- kfree(wq->rq);
-err1:
- put_qpid(rdev_p, wq->qpid, uctx);
- return -ENOMEM;
-}
-
-void cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
-{
- cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
- kfree(cq->sw_queue);
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2))
- * sizeof(struct t3_cqe) + 1, cq->queue,
- dma_unmap_addr(cq, mapping));
- cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
-}
-
-int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
- struct cxio_ucontext *uctx)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (wq->size_log2))
- * sizeof(union t3_wr), wq->queue,
- dma_unmap_addr(wq, mapping));
- kfree(wq->sq);
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
- kfree(wq->rq);
- put_qpid(rdev_p, wq->qpid, uctx);
- return 0;
-}
-
-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(T3_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- u32 ptr;
- int flushed = 0;
-
- pr_debug("%s wq %p cq %p\n", __func__, wq, cq);
-
- /* flush RQ */
- pr_debug("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
- wq->rq_rptr, wq->rq_wptr, count);
- ptr = wq->rq_rptr + count;
- while (ptr++ != wq->rq_wptr) {
- insert_recv_cqe(wq, cq);
- flushed++;
- }
- return flushed;
-}
-
-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
- struct t3_swsq *sqp)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(sqp->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- cqe.u.scqe.wrid_hi = sqp->sq_wptr;
-
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- __u32 ptr = wq->sq_rptr + count;
- int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
-
- while (ptr != wq->sq_wptr) {
- sqp->signaled = 0;
- insert_sq_cqe(wq, cq, sqp);
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- flushed++;
- }
- return flushed;
-}
-
-/*
- * Move all CQEs from the HWCQ into the SWCQ.
- */
-void cxio_flush_hw_cq(struct t3_cq *cq)
-{
- struct t3_cqe *cqe, *swcqe;
-
- pr_debug("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
- cqe = cxio_next_hw_cqe(cq);
- while (cqe) {
- pr_debug("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
- __func__, cq->rptr, cq->sw_wptr);
- swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
- *swcqe = *cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
- cq->sw_wptr++;
- cq->rptr++;
- cqe = cxio_next_hw_cqe(cq);
- }
-}
-
-static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
-{
- if (CQE_OPCODE(*cqe) == T3_TERMINATE)
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
- return 0;
-
- if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
- return 0;
-
- return 1;
-}
-
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if ((SQ_TYPE(*cqe) ||
- ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
- (CQE_QPID(*cqe) == wq->qpid))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- pr_debug("%s count zero %d\n", __func__, *count);
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
- (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
-{
- struct rdma_cq_setup setup;
- setup.id = 0;
- setup.base_addr = 0; /* NULL address */
- setup.size = 1; /* enable the CQ */
- setup.credits = 0;
-
- /* force SGE to redirect to RspQ and interrupt */
- setup.credit_thres = 0;
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- int err;
- u64 sge_cmd, ctx0, ctx1;
- u64 base_addr;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb;
-
- skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- err = cxio_hal_init_ctrl_cq(rdev_p);
- if (err) {
- pr_debug("%s err %d initializing ctrl_cq\n", __func__, err);
- goto err;
- }
- rdev_p->ctrl_qp.workq = dma_alloc_coherent(
- &(rdev_p->rnic_info.pdev->dev),
- (1 << T3_CTRL_QP_SIZE_LOG2) *
- sizeof(union t3_wr),
- &(rdev_p->ctrl_qp.dma_addr),
- GFP_KERNEL);
- if (!rdev_p->ctrl_qp.workq) {
- pr_debug("%s dma_alloc_coherent failed\n", __func__);
- err = -ENOMEM;
- goto err;
- }
- dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
- rdev_p->ctrl_qp.dma_addr);
- rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
-
- mutex_init(&rdev_p->ctrl_qp.lock);
- init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
-
- /* update HW Ctrl QP context */
- base_addr = rdev_p->ctrl_qp.dma_addr;
- base_addr >>= 12;
- ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
- V_EC_BASE_LO((u32) base_addr & 0xffff));
- ctx0 <<= 32;
- ctx0 |= V_EC_CREDITS(FW_WR_NUM);
- base_addr >>= 16;
- ctx1 = (u32) base_addr;
- base_addr >>= 32;
- ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
- V_EC_TYPE(0) | V_EC_GEN(1) |
- V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
- T3_CTL_QP_TID, 7, T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- wqe->ctx1 = cpu_to_be64(ctx1);
- wqe->ctx0 = cpu_to_be64(ctx0);
- pr_debug("CtrlQP dma_addr %pad workq %p size %d\n",
- &rdev_p->ctrl_qp.dma_addr, rdev_p->ctrl_qp.workq,
- 1 << T3_CTRL_QP_SIZE_LOG2);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-err:
- kfree_skb(skb);
- return err;
-}
-
-static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << T3_CTRL_QP_SIZE_LOG2)
- * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
- return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
-}
-
-/* write len bytes of data into addr (32B aligned address)
- * If data is NULL, clear len byte of memory to zero.
- * caller acquires the ctrl_qp lock before the call
- */
-static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- u32 len, void *data)
-{
- u32 i, nr_wqe, copy_len;
- u8 *copy_data;
- u8 wr_len, utx_len; /* length in 8 byte flit */
- enum t3_wr_flags flag;
- __be64 *wqe;
- u64 utx_cmd;
- addr &= 0x7FFFFFF;
- nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
- pr_debug("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
- __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
- nr_wqe, data, addr);
- utx_len = 3; /* in 32B unit */
- for (i = 0; i < nr_wqe; i++) {
- if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2)) {
- pr_debug("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, wait for more space i %d\n",
- __func__,
- rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- !Q_FULL(rdev_p->ctrl_qp.rptr,
- rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2))) {
- pr_debug("%s ctrl_qp workq interrupted\n",
- __func__);
- return -ERESTARTSYS;
- }
- pr_debug("%s ctrl_qp wakeup, continue posting work request i %d\n",
- __func__, i);
- }
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
- flag = 0;
- if (i == (nr_wqe - 1)) {
- /* last WQE */
- flag = T3_COMPLETION_FLAG;
- if (len % 32)
- utx_len = len / 32 + 1;
- else
- utx_len = len / 32;
- }
-
- /*
- * Force a CQE to return the credit to the workq in case
- * we posted more than half the max QP size of WRs
- */
- if ((i != 0) &&
- (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
- flag = T3_COMPLETION_FLAG;
- pr_debug("%s force completion at i %d\n", __func__, i);
- }
-
- /* build the utx mem command */
- wqe += (sizeof(struct t3_bypass_wr) >> 3);
- utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
- utx_cmd <<= 32;
- utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
- *wqe = cpu_to_be64(utx_cmd);
- wqe++;
- copy_data = (u8 *) data + i * 96;
- copy_len = len > 96 ? 96 : len;
-
- /* clear memory content if data is NULL */
- if (data)
- memcpy(wqe, copy_data, copy_len);
- else
- memset(wqe, 0, copy_len);
- if (copy_len % 32)
- memset(((u8 *) wqe) + copy_len, 0,
- 32 - (copy_len % 32));
- wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
- (utx_len << 2);
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
-
- /* wptr in the WRID[31:0] */
- ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
-
- /*
- * This must be the last write with a memory barrier
- * for the genbit
- */
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
- Q_GENBIT(rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
- wr_len, T3_SOPEOP);
- if (flag == T3_COMPLETION_FLAG)
- ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
- len -= 96;
- rdev_p->ctrl_qp.wptr++;
- }
- return 0;
-}
-
-/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
- * OUT: stag index
- * TBD: shared memory region support
- */
-static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
- u32 *stag, u8 stag_state, u32 pdid,
- enum tpt_mem_type type, enum tpt_mem_perm perm,
- u32 zbva, u64 to, u32 len, u8 page_size,
- u32 pbl_size, u32 pbl_addr)
-{
- int err;
- struct tpt_entry tpt;
- u32 stag_idx;
- u32 wptr;
-
- if (cxio_fatal_error(rdev_p))
- return -EIO;
-
- stag_state = stag_state > 0;
- stag_idx = (*stag) >> 8;
-
- if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
- stag_idx = cxio_hal_get_stag(rdev_p->rscp);
- if (!stag_idx)
- return -ENOMEM;
- *stag = (stag_idx << 8) | ((*stag) & 0xFF);
- }
- pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
-
- /* write TPT entry */
- if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
- else {
- tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
- V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
- V_TPT_STAG_STATE(stag_state) |
- V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
- BUG_ON(page_size >= 28);
- tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
- ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
- V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
- V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
- tpt.len = cpu_to_be32(len);
- tpt.va_hi = cpu_to_be32((u32) (to >> 32));
- tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
- tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
- }
- err = cxio_hal_ctrl_qp_write_mem(rdev_p,
- stag_idx +
- (rdev_p->rnic_info.tpt_base >> 5),
- sizeof(tpt), &tpt);
-
- /* release the stag index to free pool */
- if (reset_tpt_entry)
- cxio_hal_put_stag(rdev_p->rscp, stag_idx);
-
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (!err)
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
- return err;
-}
-
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
-{
- u32 wptr;
- int err;
-
- pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
- pbl_size);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
- err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
- pbl);
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (err)
- return err;
-
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
- u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- pbl_size, pbl_addr);
-}
-
-int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
- 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
-}
-
-int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
-{
- struct t3_rdma_init_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
- pr_debug("%s rdev_p %p\n", __func__, rdev_p);
- wqe = __skb_put(skb, sizeof(*wqe));
- wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
- wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
- V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
- wqe->wrid.id1 = 0;
- wqe->qpid = cpu_to_be32(attr->qpid);
- wqe->pdid = cpu_to_be32(attr->pdid);
- wqe->scqid = cpu_to_be32(attr->scqid);
- wqe->rcqid = cpu_to_be32(attr->rcqid);
- wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
- wqe->rq_size = cpu_to_be32(attr->rq_size);
- wqe->mpaattrs = attr->mpaattrs;
- wqe->qpcaps = attr->qpcaps;
- wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
- wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags |
- V_RTR_TYPE(attr->rtr_type) |
- V_CHAN(attr->chan));
- wqe->ord = cpu_to_be32(attr->ord);
- wqe->ird = cpu_to_be32(attr->ird);
- wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
- wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
- wqe->irs = cpu_to_be32(attr->irs);
- skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = ev_cb;
-}
-
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = NULL;
-}
-
-static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
-{
- static int cnt;
- struct cxio_rdev *rdev_p = NULL;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- pr_debug("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x se %0x notify %0x cqbranch %0x creditth %0x\n",
- cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
- RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
- RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
- RSPQ_CREDIT_THRESH(rsp_msg));
- pr_debug("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
- rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
- if (!rdev_p) {
- pr_debug("%s called by t3cdev %p with null ulp\n", __func__,
- t3cdev_p);
- return 0;
- }
- if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
- rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
- wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
- dev_kfree_skb_irq(skb);
- } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
- dev_kfree_skb_irq(skb);
- else if (cxio_ev_cb)
- (*cxio_ev_cb) (rdev_p, skb);
- else
- dev_kfree_skb_irq(skb);
- cnt++;
- return 0;
-}
-
-/* Caller takes care of locking if needed */
-int cxio_rdev_open(struct cxio_rdev *rdev_p)
-{
- struct net_device *netdev_p = NULL;
- int err = 0;
- if (strlen(rdev_p->dev_name)) {
- if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
- return -EBUSY;
- }
- netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
- if (!netdev_p) {
- return -EINVAL;
- }
- dev_put(netdev_p);
- } else if (rdev_p->t3cdev_p) {
- if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
- return -EBUSY;
- }
- netdev_p = rdev_p->t3cdev_p->lldev;
- strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
- T3_MAX_DEV_NAME_LEN);
- } else {
- pr_debug("%s t3cdev_p or dev_name must be set\n", __func__);
- return -EINVAL;
- }
-
- list_add_tail(&rdev_p->entry, &rdev_list);
-
- pr_debug("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
- memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
- if (!rdev_p->t3cdev_p)
- rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
- rdev_p->t3cdev_p->ulp = (void *) rdev_p;
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
- &(rdev_p->fw_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
- pr_err("fatal firmware version mismatch: need version %u but adapter has version %u\n",
- CXIO_FW_MAJ,
- G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
- err = -EINVAL;
- goto err1;
- }
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
- &(rdev_p->rnic_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
- &(rdev_p->port_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
-
- /*
- * qpshift is the number of bits to shift the qpid left in order
- * to get the correct address of the doorbell for that qp.
- */
- cxio_init_ucontext(rdev_p, &rdev_p->uctx);
- rdev_p->qpshift = PAGE_SHIFT -
- ilog2(65536 >>
- ilog2(rdev_p->rnic_info.udbell_len >>
- PAGE_SHIFT));
- rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
- rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
- pr_debug("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
- __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
- rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
- rdev_p->rnic_info.pbl_base,
- rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
- rdev_p->rnic_info.rqt_top);
- pr_debug("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu qpnr %d qpmask 0x%x\n",
- rdev_p->rnic_info.udbell_len,
- rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
- rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
-
- err = cxio_hal_init_ctrl_qp(rdev_p);
- if (err) {
- pr_err("%s error %d initializing ctrl_qp\n", __func__, err);
- goto err1;
- }
- err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
- 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
- T3_MAX_NUM_PD);
- if (err) {
- pr_err("%s error %d initializing hal resources\n",
- __func__, err);
- goto err2;
- }
- err = cxio_hal_pblpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing pbl mem pool\n",
- __func__, err);
- goto err3;
- }
- err = cxio_hal_rqtpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing rqt mem pool\n",
- __func__, err);
- goto err4;
- }
- return 0;
-err4:
- cxio_hal_pblpool_destroy(rdev_p);
-err3:
- cxio_hal_destroy_resource(rdev_p->rscp);
-err2:
- cxio_hal_destroy_ctrl_qp(rdev_p);
-err1:
- rdev_p->t3cdev_p->ulp = NULL;
- list_del(&rdev_p->entry);
- return err;
-}
-
-void cxio_rdev_close(struct cxio_rdev *rdev_p)
-{
- if (rdev_p) {
- cxio_hal_pblpool_destroy(rdev_p);
- cxio_hal_rqtpool_destroy(rdev_p);
- list_del(&rdev_p->entry);
- cxio_hal_destroy_ctrl_qp(rdev_p);
- cxio_hal_destroy_resource(rdev_p->rscp);
- rdev_p->t3cdev_p->ulp = NULL;
- }
-}
-
-int __init cxio_hal_init(void)
-{
- if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
- return -ENOMEM;
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
- return 0;
-}
-
-void __exit cxio_hal_exit(void)
-{
- struct cxio_rdev *rdev, *tmp;
-
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
- list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
- cxio_rdev_close(rdev);
- cxio_hal_destroy_rhdl_resource();
-}
-
-static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_swsq *sqp;
- __u32 ptr = wq->sq_rptr;
- int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
-
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- while (count--)
- if (!sqp->signaled) {
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- } else if (sqp->complete) {
-
- /*
- * Insert this completed cqe into the swcq.
- */
- pr_debug("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
- __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
- Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
- sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
- = sqp->cqe;
- cq->sw_wptr++;
- sqp->signaled = 0;
- break;
- } else
- break;
-}
-
-static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
- struct t3_cqe *read_cqe)
-{
- read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
- read_cqe->len = wq->oldest_read->read_len;
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
- V_CQE_OPCODE(T3_READ_REQ) |
- V_CQE_TYPE(1));
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t3_wq *wq)
-{
-
- u32 rptr = wq->oldest_read - wq->sq + 1;
- u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
-
- while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
- wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
-
- if (wq->oldest_read->opcode == T3_READ_REQ)
- return;
- rptr++;
- }
- wq->oldest_read = NULL;
-}
-
-/*
- * cxio_poll_cq
- *
- * Caller must:
- * check the validity of the first CQE,
- * supply the wq assicated with the qpid.
- *
- * credit: cq credit to return to sge.
- * cqe_flushed: 1 iff the CQE is flushed.
- * cqe: copy of the polled CQE.
- *
- * return value:
- * 0 CQE returned,
- * -1 CQE skipped, try again.
- */
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
-{
- int ret = 0;
- struct t3_cqe *hw_cqe, read_cqe;
-
- *cqe_flushed = 0;
- *credit = 0;
- hw_cqe = cxio_next_cqe(cq);
-
- pr_debug("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
- CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
- CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
- CQE_WRID_LOW(*hw_cqe));
-
- /*
- * skip cqe's not affiliated with a QP.
- */
- if (wq == NULL) {
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Gotta tweak READ completions:
- * 1) the cqe doesn't contain the sq_wptr from the wr.
- * 2) opcode not reflected from the wr.
- * 3) read_len not reflected from the wr.
- * 4) cq_type is RQ_TYPE not SQ_TYPE.
- */
- if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
-
- /*
- * If this is an unsolicited read response, then the read
- * was generated by the kernel driver as part of peer-2-peer
- * connection setup. So ignore the completion.
- */
- if (!wq->oldest_read) {
- if (CQE_STATUS(*hw_cqe))
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Don't write to the HWCQ, so create a new read req CQE
- * in local memory.
- */
- create_read_req_cqe(wq, hw_cqe, &read_cqe);
- hw_cqe = &read_cqe;
- advance_oldest_read(wq);
- }
-
- /*
- * T3A: Discard TERMINATE CQEs.
- */
- if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
- ret = -1;
- wq->error = 1;
- goto skip_cqe;
- }
-
- if (CQE_STATUS(*hw_cqe) || wq->error) {
- *cqe_flushed = wq->error;
- wq->error = 1;
-
- /*
- * T3A inserts errors into the CQE. We cannot return
- * these as work completions.
- */
- /* incoming write failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
- && RQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
- /* incoming read request failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
-
- /* incoming SEND with no receive posted failures */
- if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- ret = -1;
- goto skip_cqe;
- }
- BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
- goto proc_cqe;
- }
-
- /*
- * RECV completion.
- */
- if (RQ_TYPE(*hw_cqe)) {
-
- /*
- * HW only validates 4 bits of MSN. So we must validate that
- * the MSN in the SEND is the next expected MSN. If its not,
- * then we complete this with TPT_ERR_MSN and mark the wq in
- * error.
- */
-
- if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
- wq->error = 1;
- hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
- goto proc_cqe;
- }
- goto proc_cqe;
- }
-
- /*
- * If we get here its a send completion.
- *
- * Handle out of order completion. These get stuffed
- * in the SW SQ. Then the SW SQ is walked to move any
- * now in-order completions into the SW CQ. This handles
- * 2 cases:
- * 1) reaping unsignaled WRs when the first subsequent
- * signaled WR is completed.
- * 2) out of order read completions.
- */
- if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
- struct t3_swsq *sqp;
-
- pr_debug("%s out of order completion going in swsq at idx %ld\n",
- __func__,
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe),
- wq->sq_size_log2));
- sqp = wq->sq +
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
- sqp->cqe = *hw_cqe;
- sqp->complete = 1;
- ret = -1;
- goto flush_wq;
- }
-
-proc_cqe:
- *cqe = *hw_cqe;
-
- /*
- * Reap the associated WR(s) that are freed up with this
- * completion.
- */
- if (SQ_TYPE(*hw_cqe)) {
- wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
- pr_debug("%s completing sq idx %ld\n", __func__,
- Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
- *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
- wq->sq_rptr++;
- } else {
- pr_debug("%s completing rq idx %ld\n", __func__,
- Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
- *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
- if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
- cxio_hal_pblpool_free(wq->rdev,
- wq->rq[Q_PTR2IDX(wq->rq_rptr,
- wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
- BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
- wq->rq_rptr++;
- }
-
-flush_wq:
- /*
- * Flush any completed cqes that are now in-order.
- */
- flush_completed_wrs(wq, cq);
-
-skip_cqe:
- if (SW_CQE(*hw_cqe)) {
- pr_debug("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->sw_rptr);
- ++cq->sw_rptr;
- } else {
- pr_debug("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->rptr);
- ++cq->rptr;
-
- /*
- * T3A: compute credits.
- */
- if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
- || ((cq->rptr - cq->wptr) >= 128)) {
- *credit = cq->rptr - cq->wptr;
- cq->wptr = cq->rptr;
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
deleted file mode 100644
index 40c029ffa425..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_HAL_H__
-#define __CXIO_HAL_H__
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kfifo.h>
-
-#include "t3_cpl.h"
-#include "t3cdev.h"
-#include "cxgb3_ctl_defs.h"
-#include "cxio_wr.h"
-
-#define T3_CTRL_QP_ID FW_RI_SGEEC_START
-#define T3_CTL_QP_TID FW_RI_TID_START
-#define T3_CTRL_QP_SIZE_LOG2 8
-#define T3_CTRL_CQ_ID 0
-
-#define T3_MAX_NUM_RI (1<<15)
-#define T3_MAX_NUM_QP (1<<15)
-#define T3_MAX_NUM_CQ (1<<15)
-#define T3_MAX_NUM_PD (1<<15)
-#define T3_MAX_PBL_SIZE 256
-#define T3_MAX_RQ_SIZE 1024
-#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 65536
-#define T3_MAX_NUM_STAG (1<<15)
-#define T3_MAX_MR_SIZE 0x100000000ULL
-#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
-
-#define T3_STAG_UNSET 0xffffffff
-
-#define T3_MAX_DEV_NAME_LEN 32
-
-#define CXIO_FW_MAJ 7
-
-struct cxio_hal_ctrl_qp {
- u32 wptr;
- u32 rptr;
- struct mutex lock; /* for the wtpr, can sleep */
- wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
- union t3_wr *workq; /* the work request queue */
- dma_addr_t dma_addr; /* pci bus address of the workq */
- DEFINE_DMA_UNMAP_ADDR(mapping);
- void __iomem *doorbell;
-};
-
-struct cxio_hal_resource {
- struct kfifo tpt_fifo;
- spinlock_t tpt_fifo_lock;
- struct kfifo qpid_fifo;
- spinlock_t qpid_fifo_lock;
- struct kfifo cqid_fifo;
- spinlock_t cqid_fifo_lock;
- struct kfifo pdid_fifo;
- spinlock_t pdid_fifo_lock;
-};
-
-struct cxio_qpid_list {
- struct list_head entry;
- u32 qpid;
-};
-
-struct cxio_ucontext {
- struct list_head qpids;
- struct mutex lock;
-};
-
-struct cxio_rdev {
- char dev_name[T3_MAX_DEV_NAME_LEN];
- struct t3cdev *t3cdev_p;
- struct rdma_info rnic_info;
- struct adap_ports port_info;
- struct cxio_hal_resource *rscp;
- struct cxio_hal_ctrl_qp ctrl_qp;
- void *ulp;
- unsigned long qpshift;
- u32 qpnr;
- u32 qpmask;
- struct cxio_ucontext uctx;
- struct gen_pool *pbl_pool;
- struct gen_pool *rqt_pool;
- struct list_head entry;
- struct ch_embedded_info fw_info;
- u32 flags;
-#define CXIO_ERROR_FATAL 1
-};
-
-static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
-{
- return rdev_p->flags & CXIO_ERROR_FATAL;
-}
-
-static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
-{
- return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
-}
-
-typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
- struct sk_buff * skb);
-
-#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
-#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
-#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
-#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
-#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
-#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
-#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
-#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
-#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
-
-struct respQ_msg_t {
- __be32 flags; /* flit 0 */
- __be32 cq_ptrid;
- __be64 rsvd; /* flit 1 */
- struct t3_cqe cqe; /* flits 2-3 */
-};
-
-enum t3_cq_opcode {
- CQ_ARM_AN = 0x2,
- CQ_ARM_SE = 0x6,
- CQ_FORCE_AN = 0x3,
- CQ_CREDIT_UPDATE = 0x7
-};
-
-int cxio_rdev_open(struct cxio_rdev *rdev);
-void cxio_rdev_close(struct cxio_rdev *rdev);
-int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
-void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
-void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size);
-int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr);
-int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
-int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
-int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
-int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
-int __init cxio_hal_init(void);
-void __exit cxio_hal_exit(void);
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_flush_hw_cq(struct t3_cq *cq);
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit);
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
deleted file mode 100644
index c6e7bc4420b6..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* Crude resource management */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-
-static struct kfifo rhdl_fifo;
-static spinlock_t rhdl_fifo_lock;
-
-#define RANDOM_SIZE 16
-
-static int __cxio_init_resource_fifo(struct kfifo *fifo,
- spinlock_t *fifo_lock,
- u32 nr, u32 skip_low,
- u32 skip_high,
- int random)
-{
- u32 i, j, entry = 0, idx;
- u32 random_bytes;
- u32 rarray[16];
- spin_lock_init(fifo_lock);
-
- if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 0; i < skip_low + skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
- if (random) {
- j = 0;
- random_bytes = prandom_u32();
- for (i = 0; i < RANDOM_SIZE; i++)
- rarray[i] = i + skip_low;
- for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
- if (j >= RANDOM_SIZE) {
- j = 0;
- random_bytes = prandom_u32();
- }
- idx = (random_bytes >> (j * 2)) & 0xF;
- kfifo_in(fifo,
- (unsigned char *) &rarray[idx],
- sizeof(u32));
- rarray[idx] = i;
- j++;
- }
- for (i = 0; i < RANDOM_SIZE; i++)
- kfifo_in(fifo,
- (unsigned char *) &rarray[i],
- sizeof(u32));
- } else
- for (i = skip_low; i < nr - skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
-
- for (i = 0; i < skip_low + skip_high; i++)
- if (kfifo_out_locked(fifo, (unsigned char *) &entry,
- sizeof(u32), fifo_lock) != sizeof(u32))
- break;
- return 0;
-}
-
-static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 0));
-}
-
-static int cxio_init_resource_fifo_random(struct kfifo *fifo,
- spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
-
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 1));
-}
-
-static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
-{
- u32 i;
-
- spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
-
- if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 16; i < T3_MAX_NUM_QP; i++)
- if (!(i & rdev_p->qpmask))
- kfifo_in(&rdev_p->rscp->qpid_fifo,
- (unsigned char *) &i, sizeof(u32));
- return 0;
-}
-
-int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
-{
- return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
- 0);
-}
-
-void cxio_hal_destroy_rhdl_resource(void)
-{
- kfifo_free(&rhdl_fifo);
-}
-
-/* nr_* must be power of 2 */
-int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
-{
- int err = 0;
- struct cxio_hal_resource *rscp;
-
- rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
- if (!rscp)
- return -ENOMEM;
- rdev_p->rscp = rscp;
- err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
- &rscp->tpt_fifo_lock,
- nr_tpt, 1, 0);
- if (err)
- goto tpt_err;
- err = cxio_init_qpid_fifo(rdev_p);
- if (err)
- goto qpid_err;
- err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
- nr_cqid, 1, 0);
- if (err)
- goto cqid_err;
- err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
- nr_pdid, 1, 0);
- if (err)
- goto pdid_err;
- return 0;
-pdid_err:
- kfifo_free(&rscp->cqid_fifo);
-cqid_err:
- kfifo_free(&rscp->qpid_fifo);
-qpid_err:
- kfifo_free(&rscp->tpt_fifo);
-tpt_err:
- return -ENOMEM;
-}
-
-/*
- * returns 0 if no resource available
- */
-static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
-{
- u32 entry;
- if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
- return entry;
- else
- return 0; /* fifo emptry */
-}
-
-static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
- u32 entry)
-{
- BUG_ON(
- kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
- == 0);
-}
-
-u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
-}
-
-void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
-{
- cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
-}
-
-u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
-{
- u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
- &rscp->qpid_fifo_lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
-{
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
-}
-
-u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
-}
-
-void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
-{
- cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
-}
-
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
-}
-
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
-{
- cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
-}
-
-void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
-{
- kfifo_free(&rscp->tpt_fifo);
- kfifo_free(&rscp->cqid_fifo);
- kfifo_free(&rscp->qpid_fifo);
- kfifo_free(&rscp->pdid_fifo);
- kfree(rscp);
-}
-
-/*
- * PBL Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
-
-u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- return (u32)addr;
-}
-
-void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
- gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
-}
-
-int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned pbl_start, pbl_chunk;
-
- rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
- if (!rdev_p->pbl_pool)
- return -ENOMEM;
-
- pbl_start = rdev_p->rnic_info.pbl_base;
- pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
-
- while (pbl_start < rdev_p->rnic_info.pbl_top) {
- pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
- pbl_chunk);
- if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
- pr_debug("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- pr_warn("%s: Failed to add all PBL chunks (%x/%x)\n",
- __func__, pbl_start,
- rdev_p->rnic_info.pbl_top - pbl_start);
- return 0;
- }
- pbl_chunk >>= 1;
- } else {
- pr_debug("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- pbl_start += pbl_chunk;
- }
- }
-
- return 0;
-}
-
-void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->pbl_pool);
-}
-
-/*
- * RQT Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
-#define RQT_CHUNK 2*1024*1024
-
-u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- return (u32)addr;
-}
-
-void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
- gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
-}
-
-int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned long i;
- rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
- if (rdev_p->rqt_pool)
- for (i = rdev_p->rnic_info.rqt_base;
- i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
- i += RQT_CHUNK)
- gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
- return rdev_p->rqt_pool ? 0 : -ENOMEM;
-}
-
-void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->rqt_pool);
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.h b/drivers/infiniband/hw/cxgb3/cxio_resource.h
deleted file mode 100644
index a2703a3d882d..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_RESOURCE_H__
-#define __CXIO_RESOURCE_H__
-
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/genalloc.h>
-#include "cxio_hal.h"
-
-extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
-extern void cxio_hal_destroy_rhdl_resource(void);
-extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
- u32 nr_pdid);
-extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
-extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
-extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
-extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
-
-#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
-extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-
-#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
-extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
deleted file mode 100644
index 53aa5c36247a..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_WR_H__
-#define __CXIO_WR_H__
-
-#include <asm/io.h>
-#include <linux/pci.h>
-#include <linux/timer.h>
-#include "firmware_exports.h"
-
-#define T3_MAX_SGE 4
-#define T3_MAX_INLINE 64
-#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
-#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
-#define T3_STAG0_PAGE_SHIFT 15
-
-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
-#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
- ((rptr)!=(wptr)) )
-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
-
-static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
-{
- writel(((1<<31) | qpid), doorbell);
-}
-
-#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
-
-enum t3_wr_flags {
- T3_COMPLETION_FLAG = 0x01,
- T3_NOTIFY_FLAG = 0x02,
- T3_SOLICITED_EVENT_FLAG = 0x04,
- T3_READ_FENCE_FLAG = 0x08,
- T3_LOCAL_FENCE_FLAG = 0x10
-} __packed;
-
-enum t3_wr_opcode {
- T3_WR_BP = FW_WROPCODE_RI_BYPASS,
- T3_WR_SEND = FW_WROPCODE_RI_SEND,
- T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
- T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
- T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
- T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
- T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
- T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
- T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
- T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
-} __packed;
-
-enum t3_rdma_opcode {
- T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
- T3_READ_REQ,
- T3_READ_RESP,
- T3_SEND,
- T3_SEND_WITH_INV,
- T3_SEND_WITH_SE,
- T3_SEND_WITH_SE_INV,
- T3_TERMINATE,
- T3_RDMA_INIT, /* CHELSIO RI specific ... */
- T3_BIND_MW,
- T3_FAST_REGISTER,
- T3_LOCAL_INV,
- T3_QP_MOD,
- T3_BYPASS,
- T3_RDMA_READ_REQ_WITH_INV,
-} __packed;
-
-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
-{
- switch (wrop) {
- case T3_WR_BP: return T3_BYPASS;
- case T3_WR_SEND: return T3_SEND;
- case T3_WR_WRITE: return T3_RDMA_WRITE;
- case T3_WR_READ: return T3_READ_REQ;
- case T3_WR_INV_STAG: return T3_LOCAL_INV;
- case T3_WR_BIND: return T3_BIND_MW;
- case T3_WR_INIT: return T3_RDMA_INIT;
- case T3_WR_QP_MOD: return T3_QP_MOD;
- case T3_WR_FASTREG: return T3_FAST_REGISTER;
- default: break;
- }
- return -1;
-}
-
-
-/* Work request id */
-union t3_wrid {
- struct {
- u32 hi;
- u32 low;
- } id0;
- u64 id1;
-};
-
-#define WRID(wrid) (wrid.id1)
-#define WRID_GEN(wrid) (wrid.id0.wr_gen)
-#define WRID_IDX(wrid) (wrid.id0.wr_idx)
-#define WRID_LO(wrid) (wrid.id0.wr_lo)
-
-struct fw_riwrh {
- __be32 op_seop_flags;
- __be32 gen_tid_len;
-};
-
-#define S_FW_RIWR_OP 24
-#define M_FW_RIWR_OP 0xff
-#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
-#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
-
-#define S_FW_RIWR_SOPEOP 22
-#define M_FW_RIWR_SOPEOP 0x3
-#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
-
-#define S_FW_RIWR_FLAGS 8
-#define M_FW_RIWR_FLAGS 0x3fffff
-#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
-#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
-
-#define S_FW_RIWR_TID 8
-#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
-
-#define S_FW_RIWR_LEN 0
-#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
-
-#define S_FW_RIWR_GEN 31
-#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
-
-struct t3_sge {
- __be32 stag;
- __be32 len;
- __be64 to;
-};
-
-/* If num_sgle is zero, flit 5+ contains immediate data.*/
-struct t3_send_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
-
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 rem_stag;
- __be32 plen; /* 3 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
-};
-
-#define T3_MAX_FASTREG_DEPTH 10
-#define T3_MAX_FASTREG_FRAG 10
-
-struct t3_fastreg_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 len;
- __be32 va_base_hi; /* 3 */
- __be32 va_base_lo_fbo;
- __be32 page_type_perms; /* 4 */
- __be32 reserved1;
- __be64 pbl_addrs[0]; /* 5+ */
-};
-
-/*
- * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
- */
-struct t3_pbl_frag {
- struct fw_riwrh wrh; /* 0 */
- __be64 pbl_addrs[14]; /* 1..14 */
-};
-
-#define S_FR_PAGE_COUNT 24
-#define M_FR_PAGE_COUNT 0xff
-#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
-#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
-
-#define S_FR_PAGE_SIZE 16
-#define M_FR_PAGE_SIZE 0x1f
-#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
-#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
-
-#define S_FR_TYPE 8
-#define M_FR_TYPE 0x1
-#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
-#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
-
-#define S_FR_PERMS 0
-#define M_FR_PERMS 0xff
-#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
-#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
-
-struct t3_local_inv_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 reserved;
-};
-
-struct t3_rdma_write_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 stag_sink;
- __be64 to_sink; /* 3 */
- __be32 plen; /* 4 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
-};
-
-struct t3_rdma_read_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 local_inv;
- u8 reserved[2];
- __be32 rem_stag;
- __be64 rem_to; /* 3 */
- __be32 local_stag; /* 4 */
- __be32 local_len;
- __be64 local_to; /* 5 */
-};
-
-struct t3_bind_mw_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u16 reserved; /* 2 */
- u8 type;
- u8 perms;
- __be32 mr_stag;
- __be32 mw_stag; /* 3 */
- __be32 mw_len;
- __be64 mw_va; /* 4 */
- __be32 mr_pbl_addr; /* 5 */
- u8 reserved2[3];
- u8 mr_pagesz;
-};
-
-struct t3_receive_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 pagesz[T3_MAX_SGE];
- __be32 num_sgle; /* 2 */
- struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
- __be32 pbl_addr[T3_MAX_SGE];
-};
-
-struct t3_bypass_wr {
- struct fw_riwrh wrh;
- union t3_wrid wrid; /* 1 */
-};
-
-struct t3_modify_qp_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 flags; /* 2 */
- __be32 quiesce; /* 2 */
- __be32 max_ird; /* 3 */
- __be32 max_ord; /* 3 */
- __be64 sge_cmd; /* 4 */
- __be64 ctx1; /* 5 */
- __be64 ctx0; /* 6 */
-};
-
-enum t3_modify_qp_flags {
- MODQP_QUIESCE = 0x01,
- MODQP_MAX_IRD = 0x02,
- MODQP_MAX_ORD = 0x04,
- MODQP_WRITE_EC = 0x08,
- MODQP_READ_EC = 0x10,
-};
-
-
-enum t3_mpa_attrs {
- uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
- uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
- uP_RI_MPA_CRC_ENABLE = 0x4,
- uP_RI_MPA_IETF_ENABLE = 0x8
-} __packed;
-
-enum t3_qp_caps {
- uP_RI_QP_RDMA_READ_ENABLE = 0x01,
- uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
- uP_RI_QP_BIND_ENABLE = 0x04,
- uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
- uP_RI_QP_STAG0_ENABLE = 0x10
-} __packed;
-
-enum rdma_init_rtr_types {
- RTR_READ = 1,
- RTR_WRITE = 2,
- RTR_SEND = 3,
-};
-
-#define S_RTR_TYPE 2
-#define M_RTR_TYPE 0x3
-#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
-#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
-
-#define S_CHAN 4
-#define M_CHAN 0x3
-#define V_CHAN(x) ((x) << S_CHAN)
-#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
-
-struct t3_rdma_init_attr {
- u32 tid;
- u32 qpid;
- u32 pdid;
- u32 scqid;
- u32 rcqid;
- u32 rq_addr;
- u32 rq_size;
- enum t3_mpa_attrs mpaattrs;
- enum t3_qp_caps qpcaps;
- u16 tcp_emss;
- u32 ord;
- u32 ird;
- u64 qp_dma_addr;
- u32 qp_dma_size;
- enum rdma_init_rtr_types rtr_type;
- u16 flags;
- u16 rqe_count;
- u32 irs;
- u32 chan;
-};
-
-struct t3_rdma_init_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 qpid; /* 2 */
- __be32 pdid;
- __be32 scqid; /* 3 */
- __be32 rcqid;
- __be32 rq_addr; /* 4 */
- __be32 rq_size;
- u8 mpaattrs; /* 5 */
- u8 qpcaps;
- __be16 ulpdu_size;
- __be16 flags_rtr_type;
- __be16 rqe_count;
- __be32 ord; /* 6 */
- __be32 ird;
- __be64 qp_dma_addr; /* 7 */
- __be32 qp_dma_size; /* 8 */
- __be32 irs;
-};
-
-struct t3_genbit {
- u64 flit[15];
- __be64 genbit;
-};
-
-struct t3_wq_in_err {
- u64 flit[13];
- u64 err;
-};
-
-enum rdma_init_wr_flags {
- MPA_INITIATOR = (1<<0),
- PRIV_QP = (1<<1),
-};
-
-union t3_wr {
- struct t3_send_wr send;
- struct t3_rdma_write_wr write;
- struct t3_rdma_read_wr read;
- struct t3_receive_wr recv;
- struct t3_fastreg_wr fastreg;
- struct t3_pbl_frag pbl_frag;
- struct t3_local_inv_wr local_inv;
- struct t3_bind_mw_wr bind;
- struct t3_bypass_wr bypass;
- struct t3_rdma_init_wr init;
- struct t3_modify_qp_wr qp_mod;
- struct t3_genbit genbit;
- struct t3_wq_in_err wq_in_err;
- __be64 flit[16];
-};
-
-#define T3_SQ_CQE_FLIT 13
-#define T3_SQ_COOKIE_FLIT 14
-
-#define T3_RQ_COOKIE_FLIT 13
-#define T3_RQ_CQE_FLIT 14
-
-static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
-{
- return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
-}
-
-enum t3_wr_hdr_bits {
- T3_EOP = 1,
- T3_SOP = 2,
- T3_SOPEOP = T3_EOP|T3_SOP,
-};
-
-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
- enum t3_wr_flags flags, u8 genbit, u32 tid,
- u8 len, u8 sopeop)
-{
- wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
- V_FW_RIWR_SOPEOP(sopeop) |
- V_FW_RIWR_FLAGS(flags));
- wmb();
- wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
- V_FW_RIWR_TID(tid) |
- V_FW_RIWR_LEN(len));
- /* 2nd gen bit... */
- ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
-}
-
-/*
- * T3 ULP2_TX commands
- */
-enum t3_utx_mem_op {
- T3_UTX_MEM_READ = 2,
- T3_UTX_MEM_WRITE = 3
-};
-
-/* T3 MC7 RDMA TPT entry format */
-
-enum tpt_mem_type {
- TPT_NON_SHARED_MR = 0x0,
- TPT_SHARED_MR = 0x1,
- TPT_MW = 0x2,
- TPT_MW_RELAXED_PROTECTION = 0x3
-};
-
-enum tpt_addr_type {
- TPT_ZBTO = 0,
- TPT_VATO = 1
-};
-
-enum tpt_mem_perm {
- TPT_MW_BIND = 0x10,
- TPT_LOCAL_READ = 0x8,
- TPT_LOCAL_WRITE = 0x4,
- TPT_REMOTE_READ = 0x2,
- TPT_REMOTE_WRITE = 0x1
-};
-
-struct tpt_entry {
- __be32 valid_stag_pdid;
- __be32 flags_pagesize_qpid;
-
- __be32 rsvd_pbl_addr;
- __be32 len;
- __be32 va_hi;
- __be32 va_low_or_fbo;
-
- __be32 rsvd_bind_cnt_or_pstag;
- __be32 rsvd_pbl_size;
-};
-
-#define S_TPT_VALID 31
-#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
-#define F_TPT_VALID V_TPT_VALID(1U)
-
-#define S_TPT_STAG_KEY 23
-#define M_TPT_STAG_KEY 0xFF
-#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
-#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
-
-#define S_TPT_STAG_STATE 22
-#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
-#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
-
-#define S_TPT_STAG_TYPE 20
-#define M_TPT_STAG_TYPE 0x3
-#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
-#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
-
-#define S_TPT_PDID 0
-#define M_TPT_PDID 0xFFFFF
-#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
-#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
-
-#define S_TPT_PERM 28
-#define M_TPT_PERM 0xF
-#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
-#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
-
-#define S_TPT_REM_INV_DIS 27
-#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
-#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
-
-#define S_TPT_ADDR_TYPE 26
-#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
-#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
-
-#define S_TPT_MW_BIND_ENABLE 25
-#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
-#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
-
-#define S_TPT_PAGE_SIZE 20
-#define M_TPT_PAGE_SIZE 0x1F
-#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
-#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
-
-#define S_TPT_PBL_ADDR 0
-#define M_TPT_PBL_ADDR 0x1FFFFFFF
-#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
-#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
-
-#define S_TPT_QPID 0
-#define M_TPT_QPID 0xFFFFF
-#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
-#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
-
-#define S_TPT_PSTAG 0
-#define M_TPT_PSTAG 0xFFFFFF
-#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
-#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
-
-#define S_TPT_PBL_SIZE 0
-#define M_TPT_PBL_SIZE 0xFFFFF
-#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
-#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
-
-/*
- * CQE defs
- */
-struct t3_cqe {
- __be32 header;
- __be32 len;
- union {
- struct {
- __be32 stag;
- __be32 msn;
- } rcqe;
- struct {
- u32 wrid_hi;
- u32 wrid_low;
- } scqe;
- } u;
-};
-
-#define S_CQE_OOO 31
-#define M_CQE_OOO 0x1
-#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
-#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
-
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0x7FFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_GENBIT 10
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
-#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
-#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
-#define SQ_TYPE(x) (CQE_TYPE((x)))
-#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
-
-#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
-
-#define CQE_LEN(x) (be32_to_cpu((x).len))
-
-/* used for RQ completion processing */
-#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
-#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
-
-/* used for SQ completion processing */
-#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
-
-/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
-
-#define TPT_ERR_SUCCESS 0x0
-#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
- /* STAG is offlimt, being 0, */
- /* or STAG_key mismatch */
-#define TPT_ERR_PDID 0x2 /* PDID mismatch */
-#define TPT_ERR_QPID 0x3 /* QPID mismatch */
-#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
-#define TPT_ERR_WRAP 0x5 /* Wrap error */
-#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
-#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_ECC 0x9 /* ECC error detected */
-#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
- /* reading PSTAG for a MW */
- /* Invalidate */
-#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
- /* software error */
-#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
-#define TPT_ERR_CRC 0x10 /* CRC error */
-#define TPT_ERR_MARKER 0x11 /* Marker error */
-#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
-#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
-#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
-#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
-#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
-#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
-#define TPT_ERR_MSN 0x18 /* MSN error */
-#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
-#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
- /* or READ_REQ */
-#define TPT_ERR_MSN_GAP 0x1B
-#define TPT_ERR_MSN_RANGE 0x1C
-#define TPT_ERR_IRD_OVERFLOW 0x1D
-#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
- /* software error */
-#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
- /* mismatch) */
-
-struct t3_swsq {
- __u64 wr_id;
- struct t3_cqe cqe;
- __u32 sq_wptr;
- __be32 read_len;
- int opcode;
- int complete;
- int signaled;
-};
-
-struct t3_swrq {
- __u64 wr_id;
- __u32 pbl_addr;
-};
-
-/*
- * A T3 WQ implements both the SQ and RQ.
- */
-struct t3_wq {
- union t3_wr *queue; /* DMA accessible memory */
- dma_addr_t dma_addr; /* DMA address for HW */
- DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
- u32 error; /* 1 once we go to ERROR */
- u32 qpid;
- u32 wptr; /* idx to next available WR slot */
- u32 size_log2; /* total wq size */
- struct t3_swsq *sq; /* SW SQ */
- struct t3_swsq *oldest_read; /* tracks oldest pending read */
- u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
- u32 sq_rptr; /* pending wrs */
- u32 sq_size_log2; /* sq size */
- struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
- u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
- u32 rq_rptr; /* pending wrs */
- struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
- u32 rq_size_log2; /* rq size */
- u32 rq_addr; /* rq adapter address */
- void __iomem *doorbell; /* kernel db */
- u64 udb; /* user db if any */
- struct cxio_rdev *rdev;
-};
-
-struct t3_cq {
- u32 cqid;
- u32 rptr;
- u32 wptr;
- u32 size_log2;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- struct t3_cqe *queue;
- struct t3_cqe *sw_queue;
- u32 sw_rptr;
- u32 sw_wptr;
-};
-
-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
- CQE_GENBIT(*cqe))
-
-struct t3_cq_status_page {
- u32 cq_err;
-};
-
-static inline int cxio_cq_in_error(struct t3_cq *cq)
-{
- return ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err;
-}
-
-static inline void cxio_set_cq_in_error(struct t3_cq *cq)
-{
- ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err = 1;
-}
-
-static inline void cxio_set_wq_in_error(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 1;
-}
-
-static inline void cxio_disable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 2;
-}
-
-static inline void cxio_enable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err &= ~2;
-}
-
-static inline int cxio_wq_db_enabled(struct t3_wq *wq)
-{
- return !(wq->queue->wq_in_err.err & 2);
-}
-
-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
deleted file mode 100644
index 56a8ab6210cf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-#include <rdma/cxgb3-abi.h>
-#include "iwch.h"
-#include "iwch_cm.h"
-
-#define DRV_VERSION "1.1"
-
-MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
-MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static void open_rnic_dev(struct t3cdev *);
-static void close_rnic_dev(struct t3cdev *);
-static void iwch_event_handler(struct t3cdev *, u32, u32);
-
-struct cxgb3_client t3c_client = {
- .name = "iw_cxgb3",
- .add = open_rnic_dev,
- .remove = close_rnic_dev,
- .handlers = t3c_handlers,
- .redirect = iwch_ep_redirect,
- .event_handler = iwch_event_handler
-};
-
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(dev_mutex);
-
-static void disable_dbs(struct iwch_dev *rnicp)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp)
- cxio_disable_wq_db(&qhp->wq);
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp) {
- if (ring_db)
- ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
- qhp->wq.qpid);
- cxio_enable_wq_db(&qhp->wq);
- }
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void iwch_db_drop_task(struct work_struct *work)
-{
- struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
- db_drop_task.work);
- enable_dbs(rnicp, 1);
-}
-
-static void rnic_init(struct iwch_dev *rnicp)
-{
- pr_debug("%s iwch_dev %p\n", __func__, rnicp);
- xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->mrs, XA_FLAGS_LOCK_IRQ);
- INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
-
- rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
- rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
- rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
- rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
- rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
- rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
- rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
- rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
- rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
- rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
- rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
- rnicp->attr.can_resize_wq = 0;
- rnicp->attr.max_rdma_reads_per_qp = 8;
- rnicp->attr.max_rdma_read_resources =
- rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
- rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
- rnicp->attr.max_rdma_read_depth =
- rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
- rnicp->attr.rq_overflow_handled = 0;
- rnicp->attr.can_modify_ird = 0;
- rnicp->attr.can_modify_ord = 0;
- rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
- rnicp->attr.stag0_value = 1;
- rnicp->attr.zbva_support = 1;
- rnicp->attr.local_invalidate_fence = 1;
- rnicp->attr.cq_overflow_detection = 1;
- return;
-}
-
-static void open_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *rnicp;
-
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
- rnicp = ib_alloc_device(iwch_dev, ibdev);
- if (!rnicp) {
- pr_err("Cannot allocate ib device\n");
- return;
- }
- rnicp->rdev.ulp = rnicp;
- rnicp->rdev.t3cdev_p = tdev;
-
- mutex_lock(&dev_mutex);
-
- if (cxio_rdev_open(&rnicp->rdev)) {
- mutex_unlock(&dev_mutex);
- pr_err("Unable to open CXIO rdev\n");
- ib_dealloc_device(&rnicp->ibdev);
- return;
- }
-
- rnic_init(rnicp);
-
- list_add_tail(&rnicp->entry, &dev_list);
- mutex_unlock(&dev_mutex);
-
- if (iwch_register_device(rnicp)) {
- pr_err("Unable to register device\n");
- close_rnic_dev(tdev);
- }
- pr_info("Initialized device %s\n",
- pci_name(rnicp->rdev.rnic_info.pdev));
- return;
-}
-
-static void close_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *dev, *tmp;
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
- if (dev->rdev.t3cdev_p == tdev) {
- dev->rdev.flags = CXIO_ERROR_FATAL;
- synchronize_net();
- cancel_delayed_work_sync(&dev->db_drop_task);
- list_del(&dev->entry);
- iwch_unregister_device(dev);
- cxio_rdev_close(&dev->rdev);
- WARN_ON(!xa_empty(&dev->cqs));
- WARN_ON(!xa_empty(&dev->qps));
- WARN_ON(!xa_empty(&dev->mrs));
- ib_dealloc_device(&dev->ibdev);
- break;
- }
- }
- mutex_unlock(&dev_mutex);
-}
-
-static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
-{
- struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp;
- struct ib_event event;
- u32 portnum = port_id + 1;
- int dispatch = 0;
-
- if (!rdev)
- return;
- rnicp = rdev_to_iwch_dev(rdev);
- switch (evt) {
- case OFFLOAD_STATUS_DOWN: {
- rdev->flags = CXIO_ERROR_FATAL;
- synchronize_net();
- event.event = IB_EVENT_DEVICE_FATAL;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_DOWN: {
- event.event = IB_EVENT_PORT_ERR;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_UP: {
- event.event = IB_EVENT_PORT_ACTIVE;
- dispatch = 1;
- break;
- }
- case OFFLOAD_DB_FULL: {
- disable_dbs(rnicp);
- break;
- }
- case OFFLOAD_DB_EMPTY: {
- enable_dbs(rnicp, 1);
- break;
- }
- case OFFLOAD_DB_DROP: {
- unsigned long delay = 1000;
- unsigned short r;
-
- disable_dbs(rnicp);
- get_random_bytes(&r, 2);
- delay += r & 1023;
-
- /*
- * delay is between 1000-2023 usecs.
- */
- schedule_delayed_work(&rnicp->db_drop_task,
- usecs_to_jiffies(delay));
- break;
- }
- }
-
- if (dispatch) {
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
- }
-
- return;
-}
-
-static int __init iwch_init_module(void)
-{
- int err;
-
- err = cxio_hal_init();
- if (err)
- return err;
- err = iwch_cm_init();
- if (err)
- return err;
- cxio_register_ev_cb(iwch_ev_dispatch);
- cxgb3_register_client(&t3c_client);
- return 0;
-}
-
-static void __exit iwch_exit_module(void)
-{
- cxgb3_unregister_client(&t3c_client);
- cxio_unregister_ev_cb(iwch_ev_dispatch);
- iwch_cm_term();
- cxio_hal_exit();
-}
-
-module_init(iwch_init_module);
-module_exit(iwch_exit_module);
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
deleted file mode 100644
index 310a937bffcf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_H__
-#define __IWCH_H__
-
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/xarray.h>
-#include <linux/workqueue.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-
-struct iwch_pd;
-struct iwch_cq;
-struct iwch_qp;
-struct iwch_mr;
-
-struct iwch_rnic_attributes {
- u32 max_qps;
- u32 max_wrs; /* Max for any SQ/RQ */
- u32 max_sge_per_wr;
- u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
- u32 max_cqs;
- u32 max_cqes_per_cq;
- u32 max_mem_regs;
- u32 max_phys_buf_entries; /* for phys buf list */
- u32 max_pds;
-
- /*
- * The memory page sizes supported by this RNIC.
- * Bit position i in bitmap indicates page of
- * size (4k)^i. Phys block list mode unsupported.
- */
- u32 mem_pgsizes_bitmask;
- u64 max_mr_size;
- u8 can_resize_wq;
-
- /*
- * The maximum number of RDMA Reads that can be outstanding
- * per QP with this RNIC as the target.
- */
- u32 max_rdma_reads_per_qp;
-
- /*
- * The maximum number of resources used for RDMA Reads
- * by this RNIC with this RNIC as the target.
- */
- u32 max_rdma_read_resources;
-
- /*
- * The max depth per QP for initiation of RDMA Read
- * by this RNIC.
- */
- u32 max_rdma_read_qp_depth;
-
- /*
- * The maximum depth for initiation of RDMA Read
- * operations by this RNIC on all QPs
- */
- u32 max_rdma_read_depth;
- u8 rq_overflow_handled;
- u32 can_modify_ird;
- u32 can_modify_ord;
- u32 max_mem_windows;
- u32 stag0_value;
- u8 zbva_support;
- u8 local_invalidate_fence;
- u32 cq_overflow_detection;
-};
-
-struct iwch_dev {
- struct ib_device ibdev;
- struct cxio_rdev rdev;
- u32 device_cap_flags;
- struct iwch_rnic_attributes attr;
- struct xarray cqs;
- struct xarray qps;
- struct xarray mrs;
- struct list_head entry;
- struct delayed_work db_drop_task;
-};
-
-static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct iwch_dev, ibdev);
-}
-
-static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
-{
- return container_of(rdev, struct iwch_dev, rdev);
-}
-
-static inline int t3b_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3B;
-}
-
-static inline int t3a_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3A;
-}
-
-static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
-{
- return xa_load(&rhp->cqs, cqid);
-}
-
-static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
-{
- return xa_load(&rhp->qps, qpid);
-}
-
-static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
-{
- return xa_load(&rhp->mrs, mmid);
-}
-
-extern struct cxgb3_client t3c_client;
-extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
deleted file mode 100644
index 0bca72cb4d9a..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ /dev/null
@@ -1,2258 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
-#include <linux/inetdevice.h>
-
-#include <net/neighbour.h>
-#include <net/netevent.h>
-#include <net/route.h>
-
-#include "tcb.h"
-#include "cxgb3_offload.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-
-static char *states[] = {
- "idle",
- "listen",
- "connecting",
- "mpa_wait_req",
- "mpa_req_sent",
- "mpa_req_rcvd",
- "mpa_rep_sent",
- "fpdu_mode",
- "aborting",
- "closing",
- "moribund",
- "dead",
- NULL,
-};
-
-int peer2peer = 0;
-module_param(peer2peer, int, 0644);
-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
-
-static int ep_timeout_secs = 60;
-module_param(ep_timeout_secs, int, 0644);
-MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
- "in seconds (default=60)");
-
-static int mpa_rev = 1;
-module_param(mpa_rev, int, 0644);
-MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is spec compliant. (default=1)");
-
-static int markers_enabled = 0;
-module_param(markers_enabled, int, 0644);
-MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
-
-static int crc_enabled = 1;
-module_param(crc_enabled, int, 0644);
-MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
-
-static int rcv_win = 256 * 1024;
-module_param(rcv_win, int, 0644);
-MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
-
-static int snd_win = 32 * 1024;
-module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
-
-static unsigned int nocong = 0;
-module_param(nocong, uint, 0644);
-MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
-
-static unsigned int cong_flavor = 1;
-module_param(cong_flavor, uint, 0644);
-MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-
-static struct workqueue_struct *workq;
-
-static struct sk_buff_head rxq;
-
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(struct timer_list *t);
-static void connect_reply_upcall(struct iwch_ep *ep, int status);
-
-static void start_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (timer_pending(&ep->timer)) {
- pr_debug("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- get_ep(&ep->com);
- ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- add_timer(&ep->timer);
-}
-
-static void stop_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
- __func__, ep, ep->com.state);
- return;
- }
- del_timer_sync(&ep->timer);
- put_ep(&ep->com);
-}
-
-static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = l2t_send(tdev, skb, l2e);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = cxgb3_ofld_send(tdev, skb);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
-{
- struct cpl_tid_release *req;
-
- skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
- if (!skb)
- return;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_cxgb3_ofld_send(tdev, skb);
- return;
-}
-
-int iwch_quiesce_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-int iwch_resume_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = 0;
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static void set_emss(struct iwch_ep *ep, u16 opt)
-{
- pr_debug("%s ep %p opt %u\n", __func__, ep, opt);
- ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
- if (G_TCPOPT_TSTAMP(opt))
- ep->emss -= 12;
- if (ep->emss < 128)
- ep->emss = 128;
- pr_debug("emss=%d\n", ep->emss);
-}
-
-static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
-{
- unsigned long flags;
- enum iwch_ep_state state;
-
- spin_lock_irqsave(&epc->lock, flags);
- state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
- return state;
-}
-
-static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- epc->state = new;
-}
-
-static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
- __state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
- return;
-}
-
-static void *alloc_ep(int size, gfp_t gfp)
-{
- struct iwch_ep_common *epc;
-
- epc = kzalloc(size, gfp);
- if (epc) {
- kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
- }
- pr_debug("%s alloc ep %p\n", __func__, epc);
- return epc;
-}
-
-void __free_ep(struct kref *kref)
-{
- struct iwch_ep *ep;
- ep = container_of(container_of(kref, struct iwch_ep_common, kref),
- struct iwch_ep, com);
- pr_debug("%s ep %p state %s\n",
- __func__, ep, states[state_read(&ep->com)]);
- if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
- cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- }
- kfree(ep);
-}
-
-static void release_ep_resources(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- set_bit(RELEASE_RESOURCES, &ep->com.flags);
- put_ep(&ep->com);
-}
-
-static int status2errno(int status)
-{
- switch (status) {
- case CPL_ERR_NONE:
- return 0;
- case CPL_ERR_CONN_RESET:
- return -ECONNRESET;
- case CPL_ERR_ARP_MISS:
- return -EHOSTUNREACH;
- case CPL_ERR_CONN_TIMEDOUT:
- return -ETIMEDOUT;
- case CPL_ERR_TCAM_FULL:
- return -ENOMEM;
- case CPL_ERR_CONN_EXIST:
- return -EADDRINUSE;
- default:
- return -EIO;
- }
-}
-
-/*
- * Try and reuse skbs already allocated...
- */
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
-{
- if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
- skb_trim(skb, 0);
- skb_get(skb);
- } else {
- skb = alloc_skb(len, gfp);
- }
- return skb;
-}
-
-static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- return rt;
-}
-
-static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
-{
- int i = 0;
-
- while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
- ++i;
- return i;
-}
-
-static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p\n", __func__, dev);
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for an active open.
- */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_err("ARP failure during connect\n");
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
- * and send it along.
- */
-static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- struct cpl_abort_req *req = cplhdr(skb);
-
- pr_debug("%s t3cdev %p\n", __func__, dev);
- req->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(dev, skb);
-}
-
-static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
-{
- struct cpl_close_con_req *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- struct cpl_abort_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(skb, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, abort_arp_failure);
- req = skb_put_zero(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_connect(struct iwch_ep *ep)
-{
- struct cpl_act_open_req *req;
- struct sk_buff *skb;
- u32 opt0h, opt0l, opt2;
- unsigned int mtu_idx;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
- skb->priority = CPL_PRIORITY_SETUP;
- set_arp_failure_handler(skb, act_open_req_arp_failure);
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0h = htonl(opt0h);
- req->opt0l = htonl(opt0l);
- req->params = 0;
- req->opt2 = htonl(opt2);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
-
- pr_debug("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
-
- BUG_ON(skb_cloned(skb));
-
- mpalen = sizeof(*mpa) + ep->plen;
- if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
- kfree_skb(skb);
- skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- connect_reply_upcall(ep, -ENOMEM);
- return;
- }
- }
- skb_trim(skb, 0);
- skb_reserve(skb, sizeof(*req));
- skb_put(skb, mpalen);
- skb->priority = CPL_PRIORITY_DATA;
- mpa = (struct mpa_message *) skb->data;
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->private_data_size = htons(ep->plen);
- mpa->revision = mpa_rev;
-
- if (ep->plen)
- memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
- start_ep_timer(ep);
- state_set(&ep->com, MPA_REQ_SENT);
- return;
-}
-
-static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb again. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(mpalen);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- ep->mpa_skb = skb;
- state_set(&ep->com, MPA_REP_SENT);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_establish *req = cplhdr(skb);
- unsigned int tid = GET_TID(req);
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, tid);
-
- dst_confirm(ep->dst);
-
- /* setup the hwtid for this connection */
- ep->hwtid = tid;
- cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
-
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- /* dealloc the atid */
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-
- /* start MPA negotiation */
- send_mpa_req(ep, skb);
-
- return 0;
-}
-
-static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- pr_debug("%s ep %p\n", __FILE__, ep);
- state_set(&ep->com, ABORTING);
- send_abort(ep, skb, gfp);
-}
-
-static void close_complete_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- if (ep->com.cm_id) {
- pr_debug("close complete delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void peer_close_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_DISCONNECT;
- if (ep->com.cm_id) {
- pr_debug("peer close delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static void peer_abort_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- event.status = -ECONNRESET;
- if (ep->com.cm_id) {
- pr_debug("abort delivered ep %p cm_id %p tid %d\n", ep,
- ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_reply_upcall(struct iwch_ep *ep, int status)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p status %d\n", __func__, ep, status);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REPLY;
- event.status = status;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-
- if ((status == 0) || (status == -ECONNREFUSED)) {
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- }
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_request_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REQUEST;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.local_addr));
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- event.provider_data = ep;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (state_read(&ep->parent_ep->com) != DEAD) {
- get_ep(&ep->com);
- ep->parent_ep->com.cm_id->event_handler(
- ep->parent_ep->com.cm_id,
- &event);
- }
- put_ep(&ep->parent_ep->com);
- ep->parent_ep = NULL;
-}
-
-static void established_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_ESTABLISHED;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static int update_rx_credits(struct iwch_ep *ep, u32 credits)
-{
- struct cpl_rx_data_ack *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("update_rx_credits - cannot alloc skb!\n");
- return 0;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
- req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
- skb->priority = CPL_PRIORITY_ACK;
- iwch_cxgb3_ofld_send(ep->com.tdev, skb);
- return credits;
-}
-
-static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- int err;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_SENT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- err = -EINVAL;
- goto err;
- }
-
- /*
- * copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * if we don't even have the mpa message, then bail.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /* Validate MPA header. */
- if (mpa->revision != mpa_rev) {
- err = -EPROTO;
- goto err;
- }
- if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
- err = -EPROTO;
- goto err;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- err = -EPROTO;
- goto err;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- err = -EPROTO;
- goto err;
- }
-
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- if (mpa->flags & MPA_REJECT) {
- err = -ECONNREFUSED;
- goto err;
- }
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data. And
- * the MPA header is valid.
- */
- state_set(&ep->com, FPDU_MODE);
- ep->mpa_attr.initiator = 1;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
-
- /* bind QP and TID with INIT_WR */
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err;
-
- if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep);
- }
-
- goto out;
-err:
- abort_connection(ep, skb, GFP_KERNEL);
-out:
- connect_reply_upcall(ep, err);
- return;
-}
-
-static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_WAIT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
-
- /*
- * Copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * If we don't even have the mpa message, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /*
- * Validate MPA Header.
- */
- if (mpa->revision != mpa_rev) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data.
- */
- ep->mpa_attr.initiator = 0;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- connect_request_upcall(ep);
- return;
-}
-
-static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_rx_data *hdr = cplhdr(skb);
- unsigned int dlen = ntohs(hdr->len);
-
- pr_debug("%s ep %p dlen %u\n", __func__, ep, dlen);
-
- skb_pull(skb, sizeof(*hdr));
- skb_trim(skb, dlen);
-
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
- switch (state_read(&ep->com)) {
- case MPA_REQ_SENT:
- process_mpa_reply(ep, skb);
- break;
- case MPA_REQ_WAIT:
- process_mpa_request(ep, skb);
- break;
- case MPA_REP_SENT:
- break;
- default:
- pr_err("%s Unexpected streaming data. ep %p state %d tid %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
- break;
- }
-
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Upcall from the adapter indicating data has been transmitted.
- * For us its just the single MPA request or reply. We can now free
- * the skb holding the mpa message.
- */
-static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_wr_ack *hdr = cplhdr(skb);
- unsigned int credits = ntohs(hdr->credits);
- unsigned long flags;
- int post_zb = 0;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
-
- if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- BUG_ON(credits != 1);
- dst_confirm(ep->dst);
- if (!ep->mpa_skb) {
- pr_debug("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->mpa_attr.initiator) {
- pr_debug("%s initiator ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (peer2peer && ep->com.state == FPDU_MODE)
- post_zb = 1;
- } else {
- pr_debug("%s responder ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->com.state == MPA_REQ_RCVD) {
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- }
- }
- } else {
- pr_debug("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, ep->com.state);
- kfree_skb(ep->mpa_skb);
- ep->mpa_skb = NULL;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (post_zb)
- iwch_post_zb_read(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /*
- * We get 2 abort replies from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case ABORTING:
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- default:
- pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Return whether a failed active open has allocated a TID
- */
-static inline int act_open_has_tid(int status)
-{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
-}
-
-static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
- status2errno(rpl->status));
- connect_reply_upcall(ep, status2errno(rpl->status));
- state_set(&ep->com, DEAD);
- if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
- release_tid(ep->com.tdev, GET_TID(rpl), NULL);
- cxgb3_free_atid(ep->com.tdev, ep->atid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- put_ep(&ep->com);
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_start(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_pass_open_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("t3c_listen_start failed to alloc skb!\n");
- return -ENOMEM;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
- req->local_port = ep->com.local_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_port = 0;
- req->peer_ip = 0;
- req->peer_netmask = 0;
- req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
- req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
- req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
-
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_pass_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_stop(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_close_listserv_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->cpu_idx = 0;
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
- void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- return CPL_RET_BUF_DONE;
-}
-
-static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
-{
- struct cpl_pass_accept_rpl *rpl;
- unsigned int mtu_idx;
- u32 opt0h, opt0l, opt2;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(*rpl));
- skb_get(skb);
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
-
- rpl = cplhdr(skb);
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(opt0h);
- rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
- rpl->opt2 = htonl(opt2);
- rpl->rsvd = rpl->opt2; /* workaround for HW bug */
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-
- return;
-}
-
-static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
- struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
- peer_ip);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
-
- if (tdev->type != T3A)
- release_tid(tdev, hwtid, skb);
- else {
- struct cpl_pass_accept_rpl *rpl;
-
- rpl = cplhdr(skb);
- skb->priority = CPL_PRIORITY_SETUP;
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(F_TCAM_BYPASS);
- rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
- rpl->opt2 = 0;
- rpl->rsvd = rpl->opt2;
- iwch_cxgb3_ofld_send(tdev, skb);
- }
-}
-
-static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *child_ep, *parent_ep = ctx;
- struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int hwtid = GET_TID(req);
- struct dst_entry *dst;
- struct l2t_entry *l2t;
- struct rtable *rt;
- struct iff_mac tim;
-
- pr_debug("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
- if (state_read(&parent_ep->com) != LISTEN) {
- pr_err("%s - listening ep not in LISTEN\n", __func__);
- goto reject;
- }
-
- /*
- * Find the netdev for this connection request.
- */
- tim.mac_addr = req->dst_mac;
- tim.vlan_tag = ntohs(req->vlan_tag);
- if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- pr_err("%s bad dst mac %pM\n", __func__, req->dst_mac);
- goto reject;
- }
-
- /* Find output route */
- rt = find_route(tdev,
- req->local_ip,
- req->peer_ip,
- req->local_port,
- req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
- if (!rt) {
- pr_err("%s - failed to find dst entry!\n", __func__);
- goto reject;
- }
- dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
- if (!l2t) {
- pr_err("%s - failed to allocate l2t entry!\n", __func__);
- dst_release(dst);
- goto reject;
- }
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- pr_err("%s - failed to allocate ep entry!\n", __func__);
- l2t_release(tdev, l2t);
- dst_release(dst);
- goto reject;
- }
- state_set(&child_ep->com, CONNECTING);
- child_ep->com.tdev = tdev;
- child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = AF_INET;
- child_ep->com.local_addr.sin_port = req->local_port;
- child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = AF_INET;
- child_ep->com.remote_addr.sin_port = req->peer_port;
- child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
- get_ep(&parent_ep->com);
- child_ep->parent_ep = parent_ep;
- child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
- child_ep->l2t = l2t;
- child_ep->dst = dst;
- child_ep->hwtid = hwtid;
- timer_setup(&child_ep->timer, ep_timeout, 0);
- cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
- accept_cr(child_ep, req->peer_ip, skb);
- goto out;
-reject:
- reject_cr(tdev, hwtid, req->peer_ip, skb);
-out:
- return CPL_RET_BUF_DONE;
-}
-
-static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_pass_establish *req = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
- start_ep_timer(ep);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int disconnect = 1;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- dst_confirm(ep->dst);
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- __state_set(&ep->com, CLOSING);
- break;
- case MPA_REQ_SENT:
- __state_set(&ep->com, CLOSING);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REP_SENT:
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case FPDU_MODE:
- start_ep_timer(ep);
- __state_set(&ep->com, CLOSING);
- attrs.next_state = IWCH_QP_STATE_CLOSING;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- break;
- case ABORTING:
- disconnect = 0;
- break;
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- disconnect = 0;
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- disconnect = 0;
- break;
- case DEAD:
- disconnect = 0;
- break;
- default:
- BUG_ON(1);
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (disconnect)
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
-static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct iwch_ep *ep = ctx;
- struct cpl_abort_rpl *rpl;
- struct sk_buff *rpl_skb;
- struct iwch_qp_attributes attrs;
- int ret;
- int release = 0;
- unsigned long flags;
-
- if (is_neg_adv_abort(req->status)) {
- pr_debug("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
- ep->hwtid);
- t3_l2t_send_event(ep->com.tdev, ep->l2t);
- return CPL_RET_BUF_DONE;
- }
-
- /*
- * We get 2 peer aborts from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p state %u\n", __func__, ep, ep->com.state);
- switch (ep->com.state) {
- case CONNECTING:
- break;
- case MPA_REQ_WAIT:
- stop_ep_timer(ep);
- break;
- case MPA_REQ_SENT:
- stop_ep_timer(ep);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MORIBUND:
- case CLOSING:
- stop_ep_timer(ep);
- /*FALLTHROUGH*/
- case FPDU_MODE:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- ret = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (ret)
- pr_err("%s - qp <- error failed!\n", __func__);
- }
- peer_abort_upcall(ep);
- break;
- case ABORTING:
- break;
- case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
- return CPL_RET_BUF_DONE;
- default:
- BUG_ON(1);
- break;
- }
- dst_confirm(ep->dst);
- if (ep->com.state != ABORTING) {
- __state_set(&ep->com, DEAD);
- release = 1;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- pr_err("%s - cannot allocate skb!\n", __func__);
- release = 1;
- goto out;
- }
- rpl_skb->priority = CPL_PRIORITY_DATA;
- rpl = skb_put(rpl_skb, sizeof(*rpl));
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
-out:
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if ((ep->com.cm_id) && (ep->com.qp)) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- case ABORTING:
- case DEAD:
- break;
- default:
- BUG_ON(1);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * T3A does 3 things when a TERM is received:
- * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
- * 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcode cqe into the associated CQ.
- *
- * For (1), we save the message in the qp for later consumer consumption.
- * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
- * For (3), we toss the CQE in cxio_poll_cq().
- *
- * terminate() handles case (1)...
- */
-static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
-
- if (state_read(&ep->com) != FPDU_MODE)
- return CPL_RET_BUF_DONE;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb_pull(skb, sizeof(struct cpl_rdma_terminate));
- pr_debug("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
- return CPL_RET_BUF_DONE;
-}
-
-static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_rdma_ec_status *rep = cplhdr(skb);
- struct iwch_ep *ep = ctx;
-
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
- rep->status);
- if (rep->status) {
- struct iwch_qp_attributes attrs;
-
- pr_err("%s BAD CLOSE - Aborting tid %u\n",
- __func__, ep->hwtid);
- stop_ep_timer(ep);
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- abort_connection(ep, NULL, GFP_KERNEL);
- }
- return CPL_RET_BUF_DONE;
-}
-
-static void ep_timeout(struct timer_list *t)
-{
- struct iwch_ep *ep = from_timer(ep, t, timer);
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int abort = 1;
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
- switch (ep->com.state) {
- case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
- connect_reply_upcall(ep, -ETIMEDOUT);
- break;
- case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
- break;
- case CLOSING:
- case MORIBUND:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- __state_set(&ep->com, ABORTING);
- break;
- default:
- WARN(1, "%s unexpected state ep %p state %u\n",
- __func__, ep, ep->com.state);
- abort = 0;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (abort)
- abort_connection(ep, NULL, GFP_ATOMIC);
- put_ep(&ep->com);
-}
-
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct iwch_ep *ep = to_ep(cm_id);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-
- if (state_read(&ep->com) == DEAD) {
- put_ep(&ep->com);
- return -ECONNRESET;
- }
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
- else {
- send_mpa_reject(ep, pdata, pdata_len);
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- }
- put_ep(&ep->com);
- return 0;
-}
-
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- int err;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- struct iwch_ep *ep = to_ep(cm_id);
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
- err = -ECONNRESET;
- goto err;
- }
-
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- BUG_ON(!qp);
-
- if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
- (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
- abort_connection(ep, NULL, GFP_KERNEL);
- err = -EINVAL;
- goto err;
- }
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = qp;
-
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ird == 0)
- ep->ird = 1;
-
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
-
- /* bind QP to EP and move to RTS */
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- /* bind QP and TID with INIT_WR */
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_MAX_ORD;
-
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err1;
-
- /* if needed, wait for wr_ack */
- if (iwch_rqes_posted(qp)) {
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (err)
- goto err1;
- }
-
- err = send_mpa_reply(ep, conn_param->private_data,
- conn_param->private_data_len);
- if (err)
- goto err1;
-
-
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- put_ep(&ep->com);
- return 0;
-err1:
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
-err:
- put_ep(&ep->com);
- return err;
-}
-
-static int is_loopback_dst(struct iw_cm_id *cm_id)
-{
- struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
- if (!dev)
- return 0;
- dev_put(dev);
- return 1;
-}
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_ep *ep;
- struct rtable *rt;
- int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- if (cm_id->m_remote_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto out;
- }
-
- if (is_loopback_dst(cm_id)) {
- err = -ENOSYS;
- goto out;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto out;
- }
- timer_setup(&ep->timer, ep_timeout, 0);
- ep->plen = conn_param->private_data_len;
- if (ep->plen)
- memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
- conn_param->private_data, ep->plen);
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ord == 0)
- ep->ord = 1;
-
- ep->com.tdev = h->rdev.t3cdev_p;
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = get_qhp(h, conn_param->qpn);
- BUG_ON(!ep->com.qp);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->atid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, IPTOS_LOWDELAY);
- if (!rt) {
- pr_err("%s - cannot find route\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
- &raddr->sin_addr.s_addr);
- if (!ep->l2t) {
- pr_err("%s - cannot alloc l2e\n", __func__);
- err = -ENOMEM;
- goto fail4;
- }
-
- state_set(&ep->com, CONNECTING);
- ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
- sizeof(ep->com.remote_addr));
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- l2t_release(h->rdev.t3cdev_p, ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-out:
- return err;
-}
-
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- int err = 0;
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_listen_ep *ep;
-
-
- might_sleep();
-
- if (cm_id->m_local_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto fail1;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto fail1;
- }
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.tdev = h->rdev.t3cdev_p;
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
-
- /*
- * Allocate a server TID.
- */
- ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->stid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- state_set(&ep->com, LISTEN);
- err = listen_start(ep);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (!err) {
- cm_id->provider_data = ep;
- goto out;
- }
-fail3:
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-fail1:
-out:
- return err;
-}
-
-int iwch_destroy_listen(struct iw_cm_id *cm_id)
-{
- int err;
- struct iwch_listen_ep *ep = to_listen_ep(cm_id);
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- might_sleep();
- state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
- err = listen_stop(ep);
- if (err)
- goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-done:
- err = ep->com.rpl_err;
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
- return err;
-}
-
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
-{
- int ret=0;
- unsigned long flags;
- int close = 0;
- int fatal = 0;
- struct t3cdev *tdev;
- struct cxio_rdev *rdev;
-
- spin_lock_irqsave(&ep->com.lock, flags);
-
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
-
- tdev = (struct t3cdev *)ep->com.tdev;
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- fatal = 1;
- close_complete_upcall(ep);
- ep->com.state = DEAD;
- }
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- case MPA_REQ_SENT:
- case MPA_REQ_RCVD:
- case MPA_REP_SENT:
- case FPDU_MODE:
- close = 1;
- if (abrupt)
- ep->com.state = ABORTING;
- else {
- ep->com.state = CLOSING;
- start_ep_timer(ep);
- }
- set_bit(CLOSE_SENT, &ep->com.flags);
- break;
- case CLOSING:
- if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
- }
- break;
- case MORIBUND:
- case ABORTING:
- case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
- break;
- default:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (close) {
- if (abrupt)
- ret = send_abort(ep, NULL, gfp);
- else
- ret = send_halfclose(ep, gfp);
- if (ret)
- fatal = 1;
- }
- if (fatal)
- release_ep_resources(ep);
- return ret;
-}
-
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t)
-{
- struct iwch_ep *ep = ctx;
-
- if (ep->dst != old)
- return 0;
-
- pr_debug("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
- l2t);
- dst_hold(new);
- l2t_release(ep->com.tdev, ep->l2t);
- ep->l2t = l2t;
- dst_release(old);
- ep->dst = new;
- return 1;
-}
-
-/*
- * All the CM events are handled on a work queue to have a safe context.
- * These are the real handlers that are called from the work queue.
- */
-static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = act_establish,
- [CPL_ACT_OPEN_RPL] = act_open_rpl,
- [CPL_RX_DATA] = rx_data,
- [CPL_TX_DMA_ACK] = tx_ack,
- [CPL_ABORT_RPL_RSS] = abort_rpl,
- [CPL_ABORT_RPL] = abort_rpl,
- [CPL_PASS_OPEN_RPL] = pass_open_rpl,
- [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
- [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
- [CPL_PASS_ESTABLISH] = pass_establish,
- [CPL_PEER_CLOSE] = peer_close,
- [CPL_ABORT_REQ_RSS] = peer_abort,
- [CPL_CLOSE_CON_RPL] = close_con_rpl,
- [CPL_RDMA_TERMINATE] = terminate,
- [CPL_RDMA_EC_STATUS] = ec_status,
-};
-
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
-static DECLARE_WORK(skb_work, process_work);
-
-static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep_common *epc = ctx;
-
- get_ep(epc);
-
- /*
- * Save ctx and tdev in the skb->cb area.
- */
- *((void **) skb->cb) = ctx;
- *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
-
- /*
- * Queue the skb and schedule the worker thread.
- */
- skb_queue_tail(&rxq, skb);
- queue_work(workq, &skb_work);
- return 0;
-}
-
-static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
-
- if (rpl->status != CPL_ERR_NONE) {
- pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
- rpl->status, GET_TID(rpl));
- }
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * All upcalls from the T3 Core go to sched() to schedule the
- * processing on a work queue.
- */
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = sched,
- [CPL_ACT_OPEN_RPL] = sched,
- [CPL_RX_DATA] = sched,
- [CPL_TX_DMA_ACK] = sched,
- [CPL_ABORT_RPL_RSS] = sched,
- [CPL_ABORT_RPL] = sched,
- [CPL_PASS_OPEN_RPL] = sched,
- [CPL_CLOSE_LISTSRV_RPL] = sched,
- [CPL_PASS_ACCEPT_REQ] = sched,
- [CPL_PASS_ESTABLISH] = sched,
- [CPL_PEER_CLOSE] = sched,
- [CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
- [CPL_RDMA_TERMINATE] = sched,
- [CPL_RDMA_EC_STATUS] = sched,
- [CPL_SET_TCB_RPL] = set_tcb_rpl,
-};
-
-int __init iwch_cm_init(void)
-{
- skb_queue_head_init(&rxq);
-
- workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
- if (!workq)
- return -ENOMEM;
-
- return 0;
-}
-
-void __exit iwch_cm_term(void)
-{
- flush_workqueue(workq);
- destroy_workqueue(workq);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
deleted file mode 100644
index cc7fe644d260..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _IWCH_CM_H_
-#define _IWCH_CM_H_
-
-#include <linux/inet.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/kref.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-
-#define MPA_KEY_REQ "MPA ID Req Frame"
-#define MPA_KEY_REP "MPA ID Rep Frame"
-
-#define MPA_MAX_PRIVATE_DATA 256
-#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
-#define MPA_REJECT 0x20
-#define MPA_CRC 0x40
-#define MPA_MARKERS 0x80
-#define MPA_FLAGS_MASK 0xE0
-
-#define put_ep(ep) { \
- pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- WARN_ON(kref_read(&((ep)->kref)) < 1); \
- kref_put(&((ep)->kref), __free_ep); \
-}
-
-#define get_ep(ep) { \
- pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- kref_get(&((ep)->kref)); \
-}
-
-struct mpa_message {
- u8 key[16];
- u8 flags;
- u8 revision;
- __be16 private_data_size;
- u8 private_data[0];
-};
-
-struct terminate_message {
- u8 layer_etype;
- u8 ecode;
- __be16 hdrct_rsvd;
- u8 len_hdrs[0];
-};
-
-#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
-
-enum iwch_layers_types {
- LAYER_RDMAP = 0x00,
- LAYER_DDP = 0x10,
- LAYER_MPA = 0x20,
- RDMAP_LOCAL_CATA = 0x00,
- RDMAP_REMOTE_PROT = 0x01,
- RDMAP_REMOTE_OP = 0x02,
- DDP_LOCAL_CATA = 0x00,
- DDP_TAGGED_ERR = 0x01,
- DDP_UNTAGGED_ERR = 0x02,
- DDP_LLP = 0x03
-};
-
-enum iwch_rdma_ecodes {
- RDMAP_INV_STAG = 0x00,
- RDMAP_BASE_BOUNDS = 0x01,
- RDMAP_ACC_VIOL = 0x02,
- RDMAP_STAG_NOT_ASSOC = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_VERS = 0x05,
- RDMAP_INV_OPCODE = 0x06,
- RDMAP_STREAM_CATA = 0x07,
- RDMAP_GLOBAL_CATA = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum iwch_ddp_ecodes {
- DDPT_INV_STAG = 0x00,
- DDPT_BASE_BOUNDS = 0x01,
- DDPT_STAG_NOT_ASSOC = 0x02,
- DDPT_TO_WRAP = 0x03,
- DDPT_INV_VERS = 0x04,
- DDPU_INV_QN = 0x01,
- DDPU_INV_MSN_NOBUF = 0x02,
- DDPU_INV_MSN_RANGE = 0x03,
- DDPU_INV_MO = 0x04,
- DDPU_MSG_TOOBIG = 0x05,
- DDPU_INV_VERS = 0x06
-};
-
-enum iwch_mpa_ecodes {
- MPA_CRC_ERR = 0x02,
- MPA_MARKER_ERR = 0x03
-};
-
-enum iwch_ep_state {
- IDLE = 0,
- LISTEN,
- CONNECTING,
- MPA_REQ_WAIT,
- MPA_REQ_SENT,
- MPA_REQ_RCVD,
- MPA_REP_SENT,
- FPDU_MODE,
- ABORTING,
- CLOSING,
- MORIBUND,
- DEAD,
-};
-
-enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = 0,
- ABORT_REQ_IN_PROGRESS = 1,
- RELEASE_RESOURCES = 2,
- CLOSE_SENT = 3,
-};
-
-struct iwch_ep_common {
- struct iw_cm_id *cm_id;
- struct iwch_qp *qp;
- struct t3cdev *tdev;
- enum iwch_ep_state state;
- struct kref kref;
- spinlock_t lock;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
- unsigned long flags;
-};
-
-struct iwch_listen_ep {
- struct iwch_ep_common com;
- unsigned int stid;
- int backlog;
-};
-
-struct iwch_ep {
- struct iwch_ep_common com;
- struct iwch_ep *parent_ep;
- struct timer_list timer;
- unsigned int atid;
- u32 hwtid;
- u32 snd_seq;
- u32 rcv_seq;
- struct l2t_entry *l2t;
- struct dst_entry *dst;
- struct sk_buff *mpa_skb;
- struct iwch_mpa_attributes mpa_attr;
- unsigned int mpa_pkt_len;
- u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
- u8 tos;
- u16 emss;
- u16 plen;
- u32 ird;
- u32 ord;
-};
-
-static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
-/* CM prototypes */
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
-int iwch_destroy_listen(struct iw_cm_id *cm_id);
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
-int iwch_quiesce_tid(struct iwch_ep *ep);
-int iwch_resume_tid(struct iwch_ep *ep);
-void __free_ep(struct kref *kref);
-void iwch_rearp(struct iwch_ep *ep);
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
-
-int __init iwch_cm_init(void);
-void __exit iwch_cm_term(void);
-extern int peer2peer;
-
-#endif /* _IWCH_CM_H_ */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
deleted file mode 100644
index a098c0140580..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "iwch_provider.h"
-#include "iwch.h"
-
-static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct iwch_qp *qhp, struct ib_wc *wc)
-{
- struct t3_wq *wq = qhp ? &qhp->wq : NULL;
- struct t3_cqe cqe;
- u32 credit = 0;
- u8 cqe_flushed;
- u64 cookie;
- int ret = 1;
-
- ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
- &credit);
- if (t3a_device(chp->rhp) && credit) {
- pr_debug("%s updating %d cq credits on id %d\n", __func__,
- credit, chp->cq.cqid);
- cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
- }
-
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
- ret = 1;
-
- wc->wr_id = cookie;
- wc->qp = qhp ? &qhp->ibqp : NULL;
- wc->vendor_err = CQE_STATUS(cqe);
- wc->wc_flags = 0;
-
- pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
- __func__,
- CQE_QPID(cqe), CQE_TYPE(cqe),
- CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
- CQE_WRID_LOW(cqe), (unsigned long long)cookie);
-
- if (CQE_TYPE(cqe) == 0) {
- if (!CQE_STATUS(cqe))
- wc->byte_len = CQE_LEN(cqe);
- else
- wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
- CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
- wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- }
- } else {
- switch (CQE_OPCODE(cqe)) {
- case T3_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case T3_READ_REQ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = CQE_LEN(cqe);
- break;
- case T3_SEND:
- case T3_SEND_WITH_SE:
- case T3_SEND_WITH_INV:
- case T3_SEND_WITH_SE_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case T3_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case T3_FAST_REGISTER:
- wc->opcode = IB_WC_REG_MR;
- break;
- default:
- pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
- CQE_OPCODE(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- goto out;
- }
- }
-
- if (cqe_flushed)
- wc->status = IB_WC_WR_FLUSH_ERR;
- else {
-
- switch (CQE_STATUS(cqe)) {
- case TPT_ERR_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case TPT_ERR_STAG:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_PDID:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_WRAP:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- case TPT_ERR_BOUND:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- case TPT_ERR_OPCODE:
- wc->status = IB_WC_FATAL_ERR;
- break;
- case TPT_ERR_SWFLUSH:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- default:
- pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
- CQE_STATUS(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- }
- }
-out:
- return ret;
-}
-
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
-{
- struct iwch_qp *qhp;
- struct t3_cqe *rd_cqe;
- int ret;
-
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (qhp) {
- spin_lock(&qhp->lock);
- ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
- spin_unlock(&qhp->lock);
- } else {
- ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
- }
- return ret;
-}
-
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- unsigned long flags;
- int npolled;
- int err = 0;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
-
- spin_lock_irqsave(&chp->lock, flags);
- for (npolled = 0; npolled < num_entries; ++npolled) {
-
- /*
- * Because T3 can post CQEs that are _not_ associated
- * with a WR, we might have to poll again after removing
- * one of these.
- */
- do {
- err = iwch_poll_cq_one(rhp, chp, wc + npolled);
- } while (err == -EAGAIN);
- if (err <= 0)
- break;
- }
- spin_unlock_irqrestore(&chp->lock, flags);
-
- if (err < 0)
- return err;
- else {
- return npolled;
- }
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
deleted file mode 100644
index 9d356c1301c7..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/gfp.h>
-#include <linux/mman.h>
-#include <net/sock.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_wr.h"
-
-static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
- struct respQ_msg_t *rsp_msg,
- enum ib_event_type ib_event,
- int send_term)
-{
- struct ib_event event;
- struct iwch_qp_attributes attrs;
- struct iwch_qp *qhp;
- unsigned long flag;
-
- xa_lock(&rnicp->qps);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
-
- if (!qhp) {
- pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
- __func__, CQE_STATUS(rsp_msg->cqe),
- CQE_QPID(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
- (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
- pr_debug("%s AE received after RTS - qp state %d qpid 0x%x status 0x%x\n",
- __func__,
- qhp->attr.state, qhp->wq.qpid,
- CQE_STATUS(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- pr_err("%s - AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- __func__,
- CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
- CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
-
- atomic_inc(&qhp->refcnt);
- xa_unlock(&rnicp->qps);
-
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
-
- event.event = ib_event;
- event.device = chp->ibcq.device;
- if (ib_event == IB_EVENT_CQ_ERR)
- event.element.cq = &chp->ibcq;
- else
- event.element.qp = &qhp->ibqp;
-
- if (qhp->ibqp.event_handler)
- (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
-
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-}
-
-void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
-{
- struct iwch_dev *rnicp;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- struct iwch_cq *chp;
- struct iwch_qp *qhp;
- u32 cqid = RSPQ_CQID(rsp_msg);
- unsigned long flag;
-
- rnicp = (struct iwch_dev *) rdev_p->ulp;
- xa_lock(&rnicp->qps);
- chp = get_chp(rnicp, cqid);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
- if (!chp || !qhp) {
- pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- cqid, CQE_QPID(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
- CQE_WRID_LOW(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- goto out;
- }
- iwch_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
- xa_unlock(&rnicp->qps);
-
- /*
- * 1) completion of our sending a TERMINATE.
- * 2) incoming TERMINATE message.
- */
- if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
- (CQE_STATUS(rsp_msg->cqe) == 0)) {
- if (SQ_TYPE(rsp_msg->cqe)) {
- pr_debug("%s QPID 0x%x ep %p disconnecting\n",
- __func__, qhp->wq.qpid, qhp->ep);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- } else {
- pr_debug("%s post REQ_ERR AE QPID 0x%x\n", __func__,
- qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg,
- IB_EVENT_QP_REQ_ERR, 0);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- }
- goto done;
- }
-
- /* Bad incoming Read request */
- if (SQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- /* Bad incoming write */
- if (RQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- switch (CQE_STATUS(rsp_msg->cqe)) {
-
- /* Completion Events */
- case TPT_ERR_SUCCESS:
-
- /*
- * Confirm the destination entry if this is a RECV completion.
- */
- if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
- dst_confirm(qhp->ep->dst);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- break;
-
- case TPT_ERR_STAG:
- case TPT_ERR_PDID:
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- case TPT_ERR_WRAP:
- case TPT_ERR_BOUND:
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
- break;
-
- /* Device Fatal Errors */
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
- break;
-
- /* QP Fatal Errors */
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_PBL_ADDR_BOUND:
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_OPCODE:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_RQE_ADDR_BOUND:
- case TPT_ERR_IRD_OVERFLOW:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
-
- default:
- pr_err("Unknown T3 status 0x%x QPID 0x%x\n",
- CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
- }
-done:
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
- iwch_qp_rem_ref(&qhp->ibqp);
-out:
- dev_kfree_skb_irq(skb);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
deleted file mode 100644
index ce0f2741821d..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-
-static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
-{
- u32 mmid;
-
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
-}
-
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift)
-{
- u32 stag;
- int ret;
-
- if (cxio_register_phys_mem(&rhp->rdev,
- &stag, mhp->attr.pdid,
- mhp->attr.perms,
- mhp->attr.zbva,
- mhp->attr.va_fbo,
- mhp->attr.len,
- shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr))
- return -ENOMEM;
-
- ret = iwch_finish_mem_reg(mhp, stag);
- if (ret)
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- return ret;
-}
-
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
-{
- mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
- npages << 3);
-
- if (!mhp->attr.pbl_addr)
- return -ENOMEM;
-
- mhp->attr.pbl_size = npages;
-
- return 0;
-}
-
-void iwch_free_pbl(struct iwch_mr *mhp)
-{
- cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
- mhp->attr.pbl_size << 3);
-}
-
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
-{
- return cxio_write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (offset << 3), npages);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
deleted file mode 100644
index dcf02ec02810..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ /dev/null
@@ -1,1321 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/sched/mm.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-#include <linux/inetdevice.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/uverbs_ioctl.h>
-
-#include "cxio_hal.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-#include <rdma/cxgb3-abi.h>
-#include "common.h"
-
-static void iwch_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct iwch_dev *rhp = to_iwch_dev(context->device);
- struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
- struct iwch_mm_entry *mm, *tmp;
-
- pr_debug("%s context %p\n", __func__, context);
- list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
- kfree(mm);
- cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
-}
-
-static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ucontext->device;
- struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
- struct iwch_dev *rhp = to_iwch_dev(ibdev);
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- cxio_init_ucontext(&rhp->rdev, &context->uctx);
- INIT_LIST_HEAD(&context->mmaps);
- spin_lock_init(&context->mmap_lock);
- return 0;
-}
-
-static void iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
-{
- struct iwch_cq *chp;
-
- pr_debug("%s ib_cq %p\n", __func__, ib_cq);
- chp = to_iwch_cq(ib_cq);
-
- xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
-
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
-}
-
-static int iwch_create_cq(struct ib_cq *ibcq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ibcq->device;
- int entries = attr->cqe;
- struct iwch_dev *rhp = to_iwch_dev(ibcq->device);
- struct iwch_cq *chp = to_iwch_cq(ibcq);
- struct iwch_create_cq_resp uresp;
- struct iwch_create_cq_req ureq;
- static int warned;
- size_t resplen;
-
- pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
- if (attr->flags)
- return -EINVAL;
-
- if (udata) {
- if (!t3a_device(rhp)) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
-
- chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
- }
- }
-
- if (t3a_device(rhp)) {
-
- /*
- * T3A: Add some fluff to handle extra CQEs inserted
- * for various errors.
- * Additional CQE possibilities:
- * TERMINATE,
- * incoming RDMA WRITE Failures
- * incoming RDMA READ REQUEST FAILUREs
- * NOTE: We cannot ensure the CQ won't overflow.
- */
- entries += 16;
- }
- entries = roundup_pow_of_two(entries);
- chp->cq.size_log2 = ilog2(entries);
-
- if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata))
- return -ENOMEM;
-
- chp->rhp = rhp;
- chp->ibcq.cqe = 1 << chp->cq.size_log2;
- spin_lock_init(&chp->lock);
- spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
- if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) {
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
- return -ENOMEM;
- }
-
- if (udata) {
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct iwch_ucontext, ibucontext);
-
- mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- iwch_destroy_cq(&chp->ibcq, udata);
- return -ENOMEM;
- }
- uresp.cqid = chp->cq.cqid;
- uresp.size_log2 = chp->cq.size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- if (udata->outlen < sizeof(uresp)) {
- if (!warned++)
- pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof(struct t3_cqe));
- resplen = sizeof(struct iwch_create_cq_resp_v0);
- } else {
- mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
- sizeof(struct t3_cqe));
- uresp.memsize = mm->len;
- uresp.reserved = 0;
- resplen = sizeof(uresp);
- }
- if (ib_copy_to_udata(udata, &uresp, resplen)) {
- kfree(mm);
- iwch_destroy_cq(&chp->ibcq, udata);
- return -EFAULT;
- }
- insert_mmap(ucontext, mm);
- }
- pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr %pad\n",
- chp->cq.cqid, chp, (1 << chp->cq.size_log2),
- &chp->cq.dma_addr);
- return 0;
-}
-
-static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- enum t3_cq_opcode cq_op;
- int err;
- unsigned long flag;
- u32 rptr;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
- if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- cq_op = CQ_ARM_SE;
- else
- cq_op = CQ_ARM_AN;
- if (chp->user_rptr_addr) {
- if (get_user(rptr, chp->user_rptr_addr))
- return -EFAULT;
- spin_lock_irqsave(&chp->lock, flag);
- chp->cq.rptr = rptr;
- } else
- spin_lock_irqsave(&chp->lock, flag);
- pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
- err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
- spin_unlock_irqrestore(&chp->lock, flag);
- if (err < 0)
- pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
- if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- err = 0;
- return err;
-}
-
-static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- int len = vma->vm_end - vma->vm_start;
- u32 key = vma->vm_pgoff << PAGE_SHIFT;
- struct cxio_rdev *rdev_p;
- int ret = 0;
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext;
- u64 addr;
-
- pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
-
- if (vma->vm_start & (PAGE_SIZE-1)) {
- return -EINVAL;
- }
-
- rdev_p = &(to_iwch_dev(context->device)->rdev);
- ucontext = to_iwch_ucontext(context);
-
- mm = remove_mmap(ucontext, key, len);
- if (!mm)
- return -EINVAL;
- addr = mm->addr;
- kfree(mm);
-
- if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
- (addr < (rdev_p->rnic_info.udbell_physbase +
- rdev_p->rnic_info.udbell_len))) {
-
- /*
- * Map T3 DB register.
- */
- if (vma->vm_flags & VM_READ) {
- return -EPERM;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
- cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
-}
-
-static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_pd *php = to_iwch_pd(pd);
- struct ib_device *ibdev = pd->device;
- u32 pdid;
- struct iwch_dev *rhp;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- rhp = (struct iwch_dev *) ibdev;
- pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
- if (!pdid)
- return -EINVAL;
-
- php->pdid = pdid;
- php->rhp = rhp;
- if (udata) {
- struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
-
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- iwch_deallocate_pd(&php->ibpd, udata);
- return -EFAULT;
- }
- }
- pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return 0;
-}
-
-static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_mr *mhp;
- u32 mmid;
-
- pr_debug("%s ib_mr %p\n", __func__, ib_mr);
-
- mhp = to_iwch_mr(ib_mr);
- kfree(mhp->pages);
- rhp = mhp->rhp;
- mmid = mhp->attr.stag >> 8;
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- iwch_free_pbl(mhp);
- xa_erase_irq(&rhp->mrs, mmid);
- if (mhp->kva)
- kfree((void *) (unsigned long) mhp->kva);
- ib_umem_release(mhp->umem);
- pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
-{
- const u64 total_size = 0xffffffff;
- const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct iwch_pd *php = to_iwch_pd(pd);
- struct iwch_dev *rhp = php->rhp;
- struct iwch_mr *mhp;
- __be64 *page_list;
- int shift = 26, npages, ret, i;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- /*
- * T3 only supports 32 bits of size.
- */
- if (sizeof(phys_addr_t) > 4) {
- pr_warn_once("Cannot support dma_mrs on this platform\n");
- return ERR_PTR(-ENOTSUPP);
- }
-
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- npages = (total_size + (1ULL << shift) - 1) >> shift;
- if (!npages) {
- ret = -EINVAL;
- goto err;
- }
-
- page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
- if (!page_list) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < npages; i++)
- page_list[i] = cpu_to_be64((u64)i << shift);
-
- pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
- __func__, mask, shift, total_size, npages);
-
- ret = iwch_alloc_pbl(mhp, npages);
- if (ret) {
- kfree(page_list);
- goto err_pbl;
- }
-
- ret = iwch_write_pbl(mhp, page_list, npages, 0);
- kfree(page_list);
- if (ret)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
-
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = 0;
- mhp->attr.page_size = shift - 12;
-
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- ret = iwch_register_mem(rhp, php, mhp, shift);
- if (ret)
- goto err_pbl;
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- kfree(mhp);
- return ERR_PTR(ret);
-}
-
-static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- __be64 *pages;
- int shift, n, i;
- int err = 0;
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- struct iwch_reg_user_mr_resp uresp;
- struct sg_dma_page_iter sg_iter;
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
- }
-
- shift = PAGE_SHIFT;
-
- n = ib_umem_num_pages(mhp->umem);
-
- err = iwch_alloc_pbl(mhp, n);
- if (err)
- goto err;
-
- pages = (__be64 *) __get_free_page(GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err_pbl;
- }
-
- i = n = 0;
-
- for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
- pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
- if (i == PAGE_SIZE / sizeof(*pages)) {
- err = iwch_write_pbl(mhp, pages, i, n);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- err = iwch_write_pbl(mhp, pages, i, n);
-
-pbl_done:
- free_page((unsigned long) pages);
- if (err)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = virt;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
-
- err = iwch_register_mem(rhp, php, mhp, shift);
- if (err)
- goto err_pbl;
-
- if (udata && !t3a_device(rhp)) {
- uresp.pbl_addr = (mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3;
- pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
- uresp.pbl_addr);
-
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- iwch_dereg_mr(&mhp->ibmr, udata);
- err = -EFAULT;
- goto err;
- }
- }
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- ib_umem_release(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
-}
-
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mw *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
- ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
- if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmw);
-}
-
-static int iwch_dealloc_mw(struct ib_mw *mw)
-{
- struct iwch_dev *rhp;
- struct iwch_mw *mhp;
- u32 mmid;
-
- mhp = to_iwch_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- xa_erase_irq(&rhp->mrs, mmid);
- pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret = -ENOMEM;
-
- if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > T3_MAX_FASTREG_DEPTH)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- goto err;
-
- mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages)
- goto pl_err;
-
- mhp->rhp = rhp;
- ret = iwch_alloc_pbl(mhp, max_num_sg);
- if (ret)
- goto err1;
- mhp->attr.pbl_size = max_num_sg;
- ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret)
- goto err2;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_NON_SHARED_MR;
- mhp->attr.stag = stag;
- mhp->attr.state = 1;
- mmid = (stag) >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL);
- if (ret)
- goto err3;
-
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmr);
-err3:
- cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err2:
- iwch_free_pbl(mhp);
-err1:
- kfree(mhp->pages);
-pl_err:
- kfree(mhp);
-err:
- return ERR_PTR(ret);
-}
-
-static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- if (unlikely(mhp->npages == mhp->attr.pbl_size))
- return -ENOMEM;
-
- mhp->pages[mhp->npages++] = addr;
-
- return 0;
-}
-
-static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- mhp->npages = 0;
-
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
-}
-
-static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_qp_attributes attrs;
- struct iwch_ucontext *ucontext;
-
- qhp = to_iwch_qp(ib_qp);
- rhp = qhp->rhp;
-
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
- wait_event(qhp->wait, !qhp->ep);
-
- xa_erase_irq(&rhp->qps, qhp->wq.qpid);
-
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
-
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
- pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
- ib_qp, qhp->wq.qpid, qhp);
- kfree(qhp);
- return 0;
-}
-
-static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_pd *php;
- struct iwch_cq *schp;
- struct iwch_cq *rchp;
- struct iwch_create_qp_resp uresp;
- int wqsize, sqsize, rqsize;
- struct iwch_ucontext *ucontext;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
- if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
- rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
- if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
-
- /* The RQT size must be # of entries + 1 rounded up to a power of two */
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (rqsize == attrs->cap.max_recv_wr)
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
-
- /* T3 doesn't support RQT depth < 16 */
- if (rqsize < 16)
- rqsize = 16;
-
- if (rqsize > T3_MAX_RQ_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (attrs->cap.max_inline_data > T3_MAX_INLINE)
- return ERR_PTR(-EINVAL);
-
- /*
- * NOTE: The SQ and total WQ sizes don't need to be
- * a power of two. However, all the code assumes
- * they are. EG: Q_FREECNT() and friends.
- */
- sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
- wqsize = roundup_pow_of_two(rqsize + sqsize);
-
- /*
- * Kernel users need more wq space for fastreg WRs which can take
- * 2 WR fragments.
- */
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
- wqsize = roundup_pow_of_two(rqsize +
- roundup_pow_of_two(attrs->cap.max_send_wr * 2));
- pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
- wqsize, sqsize, rqsize);
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
- qhp->wq.size_log2 = ilog2(wqsize);
- qhp->wq.rq_size_log2 = ilog2(rqsize);
- qhp->wq.sq_size_log2 = ilog2(sqsize);
- if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- attrs->cap.max_recv_wr = rqsize - 1;
- attrs->cap.max_send_wr = sqsize;
- attrs->cap.max_inline_data = T3_MAX_INLINE;
-
- qhp->rhp = rhp;
- qhp->attr.pd = php->pdid;
- qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
- qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
- qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
- qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
- qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.next_state = IWCH_QP_STATE_IDLE;
-
- /*
- * XXX - These don't get passed in from the openib user
- * at create time. The CM sets them via a QP modify.
- * Need to fix... I think the CM should
- */
- qhp->attr.enable_rdma_read = 1;
- qhp->attr.enable_rdma_write = 1;
- qhp->attr.enable_bind = 1;
- qhp->attr.max_ord = 1;
- qhp->attr.max_ird = 1;
-
- spin_lock_init(&qhp->lock);
- init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
-
- if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- if (udata) {
-
- struct iwch_mm_entry *mm1, *mm2;
-
- mm1 = kmalloc(sizeof(*mm1), GFP_KERNEL);
- if (!mm1) {
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
- if (!mm2) {
- kfree(mm1);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- uresp.qpid = qhp->wq.qpid;
- uresp.size_log2 = qhp->wq.size_log2;
- uresp.sq_size_log2 = qhp->wq.sq_size_log2;
- uresp.rq_size_log2 = qhp->wq.rq_size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- uresp.db_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- kfree(mm1);
- kfree(mm2);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-EFAULT);
- }
- mm1->key = uresp.key;
- mm1->addr = virt_to_phys(qhp->wq.queue);
- mm1->len = PAGE_ALIGN(wqsize * sizeof(union t3_wr));
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.db_key;
- mm2->addr = qhp->wq.udb & PAGE_MASK;
- mm2->len = PAGE_SIZE;
- insert_mmap(ucontext, mm2);
- }
- qhp->ibqp.qp_num = qhp->wq.qpid;
- pr_debug(
- "%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr %pad size %d rq_addr 0x%x\n",
- __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
- qhp->wq.qpid, qhp, &qhp->wq.dma_addr, 1 << qhp->wq.size_log2,
- qhp->wq.rq_addr);
- return &qhp->ibqp;
-}
-
-static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- enum iwch_qp_attr_mask mask = 0;
- struct iwch_qp_attributes attrs = {};
-
- pr_debug("%s ib_qp %p\n", __func__, ibqp);
-
- /* iwarp does not support the RTR state */
- if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
- attr_mask &= ~IB_QP_STATE;
-
- /* Make sure we still have something left to do */
- if (!attr_mask)
- return 0;
-
- qhp = to_iwch_qp(ibqp);
- rhp = qhp->rhp;
-
- attrs.next_state = iwch_convert_state(attr->qp_state);
- attrs.enable_rdma_read = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ) ? 1 : 0;
- attrs.enable_rdma_write = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
- attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
-
-
- mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
- mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
- (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
-
- return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_iwch_qp(qp)->refcnt));
-}
-
-void iwch_qp_rem_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
- wake_up(&(to_iwch_qp(qp)->wait));
-}
-
-static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
-{
- pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
- return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-}
-
-
-static int iwch_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- *pkey = 0;
- return 0;
-}
-
-static int iwch_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
- dev = to_iwch_dev(ibdev);
- BUG_ON(port == 0 || port > 2);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
- return 0;
-}
-
-static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
-{
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
- char *cp, *next;
- unsigned fw_maj, fw_min, fw_mic;
-
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
-
- next = info.fw_version + 1;
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_maj);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_min);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_mic);
-
- return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
- (fw_mic & 0xffff);
-}
-
-static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
-
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- dev = to_iwch_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- props->hw_ver = dev->rdev.t3cdev_p->type;
- props->fw_ver = fw_vers_string_to_u64(dev);
- props->device_cap_flags = dev->device_cap_flags;
- props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
- props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
- props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
- props->max_mr_size = dev->attr.max_mr_size;
- props->max_qp = dev->attr.max_qps;
- props->max_qp_wr = dev->attr.max_wrs;
- props->max_send_sge = dev->attr.max_sge_per_wr;
- props->max_recv_sge = dev->attr.max_sge_per_wr;
- props->max_sge_rd = 1;
- props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_cq = dev->attr.max_cqs;
- props->max_cqe = dev->attr.max_cqes_per_cq;
- props->max_mr = dev->attr.max_mem_regs;
- props->max_pd = dev->attr.max_pds;
- props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
-
- return 0;
-}
-
-static int iwch_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
- props->max_msg_sz = -1;
-
- return 0;
-}
-
-static ssize_t hw_rev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t hca_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
-}
-static DEVICE_ATTR_RO(hca_type);
-
-static ssize_t board_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
- iwch_dev->rdev.rnic_info.pdev->device);
-}
-static DEVICE_ATTR_RO(board_id);
-
-enum counters {
- IPINRECEIVES,
- IPINHDRERRORS,
- IPINADDRERRORS,
- IPINUNKNOWNPROTOS,
- IPINDISCARDS,
- IPINDELIVERS,
- IPOUTREQUESTS,
- IPOUTDISCARDS,
- IPOUTNOROUTES,
- IPREASMTIMEOUT,
- IPREASMREQDS,
- IPREASMOKS,
- IPREASMFAILS,
- TCPACTIVEOPENS,
- TCPPASSIVEOPENS,
- TCPATTEMPTFAILS,
- TCPESTABRESETS,
- TCPCURRESTAB,
- TCPINSEGS,
- TCPOUTSEGS,
- TCPRETRANSSEGS,
- TCPINERRS,
- TCPOUTRSTS,
- TCPRTOMIN,
- TCPRTOMAX,
- NR_COUNTERS
-};
-
-static const char * const names[] = {
- [IPINRECEIVES] = "ipInReceives",
- [IPINHDRERRORS] = "ipInHdrErrors",
- [IPINADDRERRORS] = "ipInAddrErrors",
- [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
- [IPINDISCARDS] = "ipInDiscards",
- [IPINDELIVERS] = "ipInDelivers",
- [IPOUTREQUESTS] = "ipOutRequests",
- [IPOUTDISCARDS] = "ipOutDiscards",
- [IPOUTNOROUTES] = "ipOutNoRoutes",
- [IPREASMTIMEOUT] = "ipReasmTimeout",
- [IPREASMREQDS] = "ipReasmReqds",
- [IPREASMOKS] = "ipReasmOKs",
- [IPREASMFAILS] = "ipReasmFails",
- [TCPACTIVEOPENS] = "tcpActiveOpens",
- [TCPPASSIVEOPENS] = "tcpPassiveOpens",
- [TCPATTEMPTFAILS] = "tcpAttemptFails",
- [TCPESTABRESETS] = "tcpEstabResets",
- [TCPCURRESTAB] = "tcpCurrEstab",
- [TCPINSEGS] = "tcpInSegs",
- [TCPOUTSEGS] = "tcpOutSegs",
- [TCPRETRANSSEGS] = "tcpRetransSegs",
- [TCPINERRS] = "tcpInErrs",
- [TCPOUTRSTS] = "tcpOutRsts",
- [TCPRTOMIN] = "tcpRtoMin",
- [TCPRTOMAX] = "tcpRtoMax",
-};
-
-static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
- u8 port_num)
-{
- BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
-
- /* Our driver only supports device level stats */
- if (port_num != 0)
- return NULL;
-
- return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
-}
-
-static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port, int index)
-{
- struct iwch_dev *dev;
- struct tp_mib_stats m;
- int ret;
-
- if (port != 0 || !stats)
- return -ENOSYS;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- dev = to_iwch_dev(ibdev);
- ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
- if (ret)
- return -ENOSYS;
-
- stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
- stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
- stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
- stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
- stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
- stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
- stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
- stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
- stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
- stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
- stats->value[IPREASMREQDS] = m.ipReasmReqds;
- stats->value[IPREASMOKS] = m.ipReasmOKs;
- stats->value[IPREASMFAILS] = m.ipReasmFails;
- stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
- stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
- stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
- stats->value[TCPESTABRESETS] = m.tcpEstabResets;
- stats->value[TCPCURRESTAB] = m.tcpOutRsts;
- stats->value[TCPINSEGS] = m.tcpCurrEstab;
- stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
- stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
- stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
- stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
- stats->value[TCPRTOMIN] = m.tcpRtoMin;
- stats->value[TCPRTOMAX] = m.tcpRtoMax;
-
- return stats->num_counters;
-}
-
-static struct attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- NULL
-};
-
-static const struct attribute_group iwch_attr_group = {
- .attrs = iwch_class_attributes,
-};
-
-static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
-{
- struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
-}
-
-static const struct ib_device_ops iwch_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_CXGB3,
- .uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION,
- .uverbs_no_driver_id_binding = 1,
-
- .alloc_hw_stats = iwch_alloc_stats,
- .alloc_mr = iwch_alloc_mr,
- .alloc_mw = iwch_alloc_mw,
- .alloc_pd = iwch_allocate_pd,
- .alloc_ucontext = iwch_alloc_ucontext,
- .create_cq = iwch_create_cq,
- .create_qp = iwch_create_qp,
- .dealloc_mw = iwch_dealloc_mw,
- .dealloc_pd = iwch_deallocate_pd,
- .dealloc_ucontext = iwch_dealloc_ucontext,
- .dereg_mr = iwch_dereg_mr,
- .destroy_cq = iwch_destroy_cq,
- .destroy_qp = iwch_destroy_qp,
- .get_dev_fw_str = get_dev_fw_ver_str,
- .get_dma_mr = iwch_get_dma_mr,
- .get_hw_stats = iwch_get_mib,
- .get_port_immutable = iwch_port_immutable,
- .iw_accept = iwch_accept_cr,
- .iw_add_ref = iwch_qp_add_ref,
- .iw_connect = iwch_connect,
- .iw_create_listen = iwch_create_listen,
- .iw_destroy_listen = iwch_destroy_listen,
- .iw_get_qp = iwch_get_qp,
- .iw_reject = iwch_reject_cr,
- .iw_rem_ref = iwch_qp_rem_ref,
- .map_mr_sg = iwch_map_mr_sg,
- .mmap = iwch_mmap,
- .modify_qp = iwch_ib_modify_qp,
- .poll_cq = iwch_poll_cq,
- .post_recv = iwch_post_receive,
- .post_send = iwch_post_send,
- .query_device = iwch_query_device,
- .query_gid = iwch_query_gid,
- .query_pkey = iwch_query_pkey,
- .query_port = iwch_query_port,
- .reg_user_mr = iwch_reg_user_mr,
- .req_notify_cq = iwch_arm_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
- INIT_RDMA_OBJ_SIZE(ib_cq, iwch_cq, ibcq),
- INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
-};
-
-static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
-{
- int ret;
- int i;
-
- for (i = 0; i < rdev->port_info.nports; i++) {
- ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
- i + 1);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-int iwch_register_device(struct iwch_dev *dev)
-{
- int err;
-
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- /* cxgb3 supports STag 0. */
- dev->ibdev.local_dma_lkey = 0;
-
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
- memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
- dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
-
- memcpy(dev->ibdev.iw_ifname, dev->rdev.t3cdev_p->lldev->name,
- sizeof(dev->ibdev.iw_ifname));
-
- rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
- ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
- err = set_netdevs(&dev->ibdev, &dev->rdev);
- if (err)
- return err;
-
- return ib_register_device(&dev->ibdev, "cxgb3_%d");
-}
-
-void iwch_unregister_device(struct iwch_dev *dev)
-{
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- ib_unregister_device(&dev->ibdev);
- return;
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
deleted file mode 100644
index 8adbe9658935..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_PROVIDER_H__
-#define __IWCH_PROVIDER_H__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <rdma/ib_verbs.h>
-#include <asm/types.h>
-#include "t3cdev.h"
-#include "iwch.h"
-#include "cxio_wr.h"
-#include "cxio_hal.h"
-
-struct iwch_pd {
- struct ib_pd ibpd;
- u32 pdid;
- struct iwch_dev *rhp;
-};
-
-static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct iwch_pd, ibpd);
-}
-
-struct tpt_attributes {
- u32 stag;
- u32 state:1;
- u32 type:2;
- u32 rsvd:1;
- enum tpt_mem_perm perms;
- u32 remote_invaliate_disable:1;
- u32 zbva:1;
- u32 mw_bind_enable:1;
- u32 page_size:5;
-
- u32 pdid;
- u32 qpid;
- u32 pbl_addr;
- u32 len;
- u64 va_fbo;
- u32 pbl_size;
-};
-
-struct iwch_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
- u64 *pages;
- u32 npages;
-};
-
-typedef struct iwch_mw iwch_mw_handle;
-
-static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct iwch_mr, ibmr);
-}
-
-struct iwch_mw {
- struct ib_mw ibmw;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
-};
-
-static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct iwch_mw, ibmw);
-}
-
-struct iwch_cq {
- struct ib_cq ibcq;
- struct iwch_dev *rhp;
- struct t3_cq cq;
- spinlock_t lock;
- spinlock_t comp_handler_lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- u32 __user *user_rptr_addr;
-};
-
-static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct iwch_cq, ibcq);
-}
-
-enum IWCH_QP_FLAGS {
- QP_QUIESCED = 0x01
-};
-
-struct iwch_mpa_attributes {
- u8 initiator;
- u8 recv_marker_enabled;
- u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
- u8 crc_enabled;
- u8 version; /* 0 or 1 */
-};
-
-struct iwch_qp_attributes {
- u32 scq;
- u32 rcq;
- u32 sq_num_entries;
- u32 rq_num_entries;
- u32 sq_max_sges;
- u32 sq_max_sges_rdma_write;
- u32 rq_max_sges;
- u32 state;
- u8 enable_rdma_read;
- u8 enable_rdma_write; /* enable inbound Read Resp. */
- u8 enable_bind;
- u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
- /*
- * Next QP state. If specify the current state, only the
- * QP attributes will be modified.
- */
- u32 max_ord;
- u32 max_ird;
- u32 pd; /* IN */
- u32 next_state;
- char terminate_buffer[52];
- u32 terminate_msg_len;
- u8 is_terminate_local;
- struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
- struct iwch_ep *llp_stream_handle;
- char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
- u32 stream_msg_buf_len; /* Only on Idle -> RTS */
-};
-
-struct iwch_qp {
- struct ib_qp ibqp;
- struct iwch_dev *rhp;
- struct iwch_ep *ep;
- struct iwch_qp_attributes attr;
- struct t3_wq wq;
- spinlock_t lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- enum IWCH_QP_FLAGS flags;
-};
-
-static inline int qp_quiesced(struct iwch_qp *qhp)
-{
- return qhp->flags & QP_QUIESCED;
-}
-
-static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct iwch_qp, ibqp);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp);
-void iwch_qp_rem_ref(struct ib_qp *qp);
-
-struct iwch_ucontext {
- struct ib_ucontext ibucontext;
- struct cxio_ucontext uctx;
- u32 key;
- spinlock_t mmap_lock;
- struct list_head mmaps;
-};
-
-static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
-{
- return container_of(c, struct iwch_ucontext, ibucontext);
-}
-
-struct iwch_mm_entry {
- struct list_head entry;
- u64 addr;
- u32 key;
- unsigned len;
-};
-
-static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
- u32 key, unsigned len)
-{
- struct list_head *pos, *nxt;
- struct iwch_mm_entry *mm;
-
- spin_lock(&ucontext->mmap_lock);
- list_for_each_safe(pos, nxt, &ucontext->mmaps) {
-
- mm = list_entry(pos, struct iwch_mm_entry, entry);
- if (mm->key == key && mm->len == len) {
- list_del_init(&mm->entry);
- spin_unlock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, key,
- (unsigned long long)mm->addr, mm->len);
- return mm;
- }
- }
- spin_unlock(&ucontext->mmap_lock);
- return NULL;
-}
-
-static inline void insert_mmap(struct iwch_ucontext *ucontext,
- struct iwch_mm_entry *mm)
-{
- spin_lock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, mm->key, (unsigned long long)mm->addr, mm->len);
- list_add_tail(&mm->entry, &ucontext->mmaps);
- spin_unlock(&ucontext->mmap_lock);
-}
-
-enum iwch_qp_attr_mask {
- IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
- IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
- IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
- IWCH_QP_ATTR_MAX_ORD = 1 << 11,
- IWCH_QP_ATTR_MAX_IRD = 1 << 12,
- IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
- IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
- IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
- IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_MAX_ORD |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_STREAM_MSG_BUFFER |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
-};
-
-int iwch_modify_qp(struct iwch_dev *rhp,
- struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal);
-
-enum iwch_qp_state {
- IWCH_QP_STATE_IDLE,
- IWCH_QP_STATE_RTS,
- IWCH_QP_STATE_ERROR,
- IWCH_QP_STATE_TERMINATE,
- IWCH_QP_STATE_CLOSING,
- IWCH_QP_STATE_TOT
-};
-
-static inline int iwch_convert_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- case IB_QPS_INIT:
- return IWCH_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return IWCH_QP_STATE_RTS;
- case IB_QPS_SQD:
- return IWCH_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return IWCH_QP_STATE_TERMINATE;
- case IB_QPS_ERR:
- return IWCH_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static inline u32 iwch_ib_to_tpt_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
- (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
- TPT_LOCAL_READ;
-}
-
-static inline u32 iwch_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
-}
-
-enum iwch_mmid_state {
- IWCH_STAG_STATE_VALID,
- IWCH_STAG_STATE_INVALID
-};
-
-enum iwch_qp_query_flags {
- IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
- IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
- IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
-
- /*
- * Quiesce QP context; Consumer
- * will NOT replay outstanding WR
- */
- IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
- IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
- IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
-};
-
-u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_ep *ep);
-int iwch_register_device(struct iwch_dev *dev);
-void iwch_unregister_device(struct iwch_dev *dev);
-void stop_read_rep_timer(struct iwch_qp *qhp);
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift);
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
-void iwch_free_pbl(struct iwch_mr *mhp);
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
-
-#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
deleted file mode 100644
index c649faad63f9..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ /dev/null
@@ -1,1082 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-
-#define NO_SUPPORT -1
-
-static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
-
- switch (wr->opcode) {
- case IB_WR_SEND:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE;
- else
- wqe->send.rdmaop = T3_SEND;
- wqe->send.rem_stag = 0;
- break;
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
- else
- wqe->send.rdmaop = T3_SEND_WITH_INV;
- wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
- break;
- default:
- return -EINVAL;
- }
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->send.reserved[0] = 0;
- wqe->send.reserved[1] = 0;
- wqe->send.reserved[2] = 0;
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
-
- plen += wr->sg_list[i].length;
- wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 4 + ((wr->num_sge) << 1);
- wqe->send.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->write.rdmaop = T3_RDMA_WRITE;
- wqe->write.reserved[0] = 0;
- wqe->write.reserved[1] = 0;
- wqe->write.reserved[2] = 0;
- wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
-
- if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
- plen = 4;
- wqe->write.sgl[0].stag = wr->ex.imm_data;
- wqe->write.sgl[0].len = cpu_to_be32(0);
- wqe->write.num_sgle = cpu_to_be32(0);
- *flit_cnt = 6;
- } else {
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- wqe->write.sgl[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->write.sgl[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->write.sgl[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 5 + ((wr->num_sge) << 1);
- }
- wqe->write.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- if (wr->num_sge > 1)
- return -EINVAL;
- wqe->read.rdmaop = T3_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- wqe->read.local_inv = 1;
- else
- wqe->read.local_inv = 0;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr);
- wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
- wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
- wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
- *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
- return 0;
-}
-
-static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
-{
- struct iwch_mr *mhp = to_iwch_mr(wr->mr);
- int i;
- __be64 *p;
-
- if (mhp->npages > T3_MAX_FASTREG_DEPTH)
- return -EINVAL;
- *wr_cnt = 1;
- wqe->fastreg.stag = cpu_to_be32(wr->key);
- wqe->fastreg.len = cpu_to_be32(mhp->ibmr.length);
- wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
- wqe->fastreg.va_base_lo_fbo =
- cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
- wqe->fastreg.page_type_perms = cpu_to_be32(
- V_FR_PAGE_COUNT(mhp->npages) |
- V_FR_PAGE_SIZE(ilog2(wr->mr->page_size) - 12) |
- V_FR_TYPE(TPT_VATO) |
- V_FR_PERMS(iwch_ib_to_tpt_access(wr->access)));
- p = &wqe->fastreg.pbl_addrs[0];
- for (i = 0; i < mhp->npages; i++, p++) {
-
- /* If we need a 2nd WR, then set it up */
- if (i == T3_MAX_FASTREG_FRAG) {
- *wr_cnt = 2;
- wqe = (union t3_wr *)(wq->queue +
- Q_PTR2IDX((wq->wptr+1), wq->size_log2));
- build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
- Q_GENBIT(wq->wptr + 1, wq->size_log2),
- 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG,
- T3_EOP);
-
- p = &wqe->pbl_frag.pbl_addrs[0];
- }
- *p = cpu_to_be64((u64)mhp->pages[i]);
- }
- *flit_cnt = 5 + mhp->npages;
- if (*flit_cnt > 15)
- *flit_cnt = 15;
- return 0;
-}
-
-static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
- wqe->local_inv.reserved = 0;
- *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
- return 0;
-}
-
-static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
- u32 num_sgle, u32 * pbl_addr, u8 * page_size)
-{
- int i;
- struct iwch_mr *mhp;
- u64 offset;
- for (i = 0; i < num_sgle; i++) {
-
- mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
- if (!mhp) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (!mhp->attr.state) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (mhp->attr.zbva) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
-
- if (sg_list[i].addr < mhp->attr.va_fbo) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) <
- sg_list[i].addr) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) >
- mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- offset = sg_list[i].addr - mhp->attr.va_fbo;
- offset += mhp->attr.va_fbo &
- ((1UL << (12 + mhp->attr.page_size)) - 1);
- pbl_addr[i] = ((mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3) +
- (offset >> (12 + mhp->attr.page_size));
- page_size[i] = mhp->attr.page_size;
- }
- return 0;
-}
-
-static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i, err = 0;
- u32 pbl_addr[T3_MAX_SGE];
- u8 page_size[T3_MAX_SGE];
-
- err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
- page_size);
- if (err)
- return err;
- wqe->recv.pagesz[0] = page_size[0];
- wqe->recv.pagesz[1] = page_size[1];
- wqe->recv.pagesz[2] = page_size[2];
- wqe->recv.pagesz[3] = page_size[3];
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
- for (i = 0; i < wr->num_sge; i++) {
- wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
-
- /* to in the WQE == the offset into the page */
- wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
- ((1UL << (12 + page_size[i])) - 1));
-
- /* pbl_addr is the adapters address in the PBL */
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = 0;
- return 0;
-}
-
-static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i;
- u32 pbl_addr;
- u32 pbl_offset;
-
-
- /*
- * The T3 HW requires the PBL in the HW recv descriptor to reference
- * a PBL entry. So we allocate the max needed PBL memory here and pass
- * it to the uP in the recv WR. The uP will build the PBL and setup
- * the HW recv descriptor.
- */
- pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
- if (!pbl_addr)
- return -ENOMEM;
-
- /*
- * Compute the 8B aligned offset.
- */
- pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
-
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
-
- for (i = 0; i < wr->num_sge; i++) {
-
- /*
- * Use a 128MB page size. This and an imposed 128MB
- * sge length limit allows us to require only a 2-entry HW
- * PBL for each SGE. This restriction is acceptable since
- * since it is not possible to allocate 128MB of contiguous
- * DMA coherent memory!
- */
- if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
- return -EINVAL;
- wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
-
- /*
- * T3 restricts a recv to all zero-stag or all non-zero-stag.
- */
- if (wr->sg_list[i].lkey != 0)
- return -EINVAL;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
- pbl_offset += 2;
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.pagesz[i] = 0;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
- return 0;
-}
-
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- int err = 0;
- u8 uninitialized_var(t3_wr_flit_cnt);
- enum t3_wr_opcode t3_wr_opcode = 0;
- enum t3_wr_flags t3_wr_flags;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
- struct t3_swsq *sqp;
- int wr_cnt = 1;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
- qhp->wq.sq_size_log2);
- if (num_wrs == 0) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (num_wrs == 0) {
- err = -ENOMEM;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- t3_wr_flags = 0;
- if (wr->send_flags & IB_SEND_SOLICITED)
- t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
- t3_wr_flags |= T3_COMPLETION_FLAG;
- sqp = qhp->wq.sq +
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
- switch (wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_READ_FENCE_FLAG;
- t3_wr_opcode = T3_WR_SEND;
- err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- t3_wr_opcode = T3_WR_WRITE;
- err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- t3_wr_opcode = T3_WR_READ;
- t3_wr_flags = 0; /* T3 reads are always signaled */
- err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
- if (err)
- break;
- sqp->read_len = wqe->read.local_len;
- if (!qhp->wq.oldest_read)
- qhp->wq.oldest_read = sqp;
- break;
- case IB_WR_REG_MR:
- t3_wr_opcode = T3_WR_FASTREG;
- err = build_memreg(wqe, reg_wr(wr), &t3_wr_flit_cnt,
- &wr_cnt, &qhp->wq);
- break;
- case IB_WR_LOCAL_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
- t3_wr_opcode = T3_WR_INV_STAG;
- err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
- break;
- default:
- pr_debug("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
- err = -EINVAL;
- }
- if (err)
- break;
- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
- sqp->wr_id = wr->wr_id;
- sqp->opcode = wr2opcode(t3_wr_opcode);
- sqp->sq_wptr = qhp->wq.sq_wptr;
- sqp->complete = 0;
- sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
-
- build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, t3_wr_flit_cnt,
- (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
- pr_debug("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
- __func__, (unsigned long long)wr->wr_id, idx,
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
- sqp->opcode);
- wr = wr->next;
- num_wrs--;
- qhp->wq.wptr += wr_cnt;
- ++(qhp->wq.sq_wptr);
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- int err = 0;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2) - 1;
- if (!wr) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (wr->num_sge > T3_MAX_SGE) {
- err = -EINVAL;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- if (num_wrs)
- if (wr->sg_list[0].lkey)
- err = build_rdma_recv(qhp, wqe, wr);
- else
- err = build_zero_stag_recv(qhp, wqe, wr);
- else
- err = -ENOMEM;
-
- if (err)
- break;
-
- build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
- pr_debug("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x wqe %p\n",
- __func__, (unsigned long long)wr->wr_id,
- idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
- ++(qhp->wq.rq_wptr);
- ++(qhp->wq.wptr);
- wr = wr->next;
- num_wrs--;
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
- u8 *layer_type, u8 *ecode)
-{
- int status = TPT_ERR_INTERNAL_ERR;
- int tagged = 0;
- int opcode = -1;
- int rqtype = 0;
- int send_inv = 0;
-
- if (rsp_msg) {
- status = CQE_STATUS(rsp_msg->cqe);
- opcode = CQE_OPCODE(rsp_msg->cqe);
- rqtype = RQ_TYPE(rsp_msg->cqe);
- send_inv = (opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV);
- tagged = (opcode == T3_RDMA_WRITE) ||
- (rqtype && (opcode == T3_READ_RESP));
- }
-
- switch (status) {
- case TPT_ERR_STAG:
- if (send_inv) {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_INV_STAG;
- }
- break;
- case TPT_ERR_PDID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- if ((opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV))
- *ecode = RDMAP_CANT_INV_STAG;
- else
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_QPID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_ACCESS:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_ACC_VIOL;
- break;
- case TPT_ERR_WRAP:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_TO_WRAP;
- break;
- case TPT_ERR_BOUND:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_BASE_BOUNDS;
- }
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- break;
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_OUT_OF_RQE:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_NOBUF;
- break;
- case TPT_ERR_PBL_ADDR_BOUND:
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- break;
- case TPT_ERR_CRC:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_CRC_ERR;
- break;
- case TPT_ERR_MARKER:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_MARKER_ERR;
- break;
- case TPT_ERR_PDU_LEN_ERR:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_MSG_TOOBIG;
- break;
- case TPT_ERR_DDP_VERSION:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_INV_VERS;
- } else {
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_VERS;
- }
- break;
- case TPT_ERR_RDMA_VERSION:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_VERS;
- break;
- case TPT_ERR_OPCODE:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_OPCODE;
- break;
- case TPT_ERR_DDP_QUEUE_NUM:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_QN;
- break;
- case TPT_ERR_MSN:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_RANGE;
- break;
- case TPT_ERR_TBIT:
- *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_MO:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MO;
- break;
- default:
- *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- }
-}
-
-int iwch_post_zb_read(struct iwch_ep *ep)
-{
- union t3_wr *wqe;
- struct sk_buff *skb;
- u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
-
- pr_debug("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- pr_err("%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr));
- wqe->read.rdmaop = T3_READ_REQ;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(1);
- wqe->read.rem_to = cpu_to_be64(1);
- wqe->read.local_stag = cpu_to_be32(1);
- wqe->read.local_len = cpu_to_be32(0);
- wqe->read.local_to = cpu_to_be64(1);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
- V_FW_RIWR_LEN(flit_cnt));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * This posts a TERMINATE with layer=RDMA, type=catastrophic.
- */
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
-{
- union t3_wr *wqe;
- struct terminate_message *term;
- struct sk_buff *skb;
-
- pr_debug("%s %d\n", __func__, __LINE__);
- skb = alloc_skb(40, GFP_ATOMIC);
- if (!skb) {
- pr_err("%s cannot send TERMINATE!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, 40);
- wqe->send.rdmaop = T3_TERMINATE;
-
- /* immediate data length */
- wqe->send.plen = htonl(4);
-
- /* immediate data starts here. */
- term = (struct terminate_message *)wqe->send.sgl;
- build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
- V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * Assumes qhp lock is held.
- */
-static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
- struct iwch_cq *schp)
- __releases(&qhp->lock)
- __acquires(&qhp->lock)
-{
- int count;
- int flushed;
-
- lockdep_assert_held(&qhp->lock);
-
- pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock(&qhp->lock);
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&rchp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&rchp->cq);
- cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&rchp->lock);
- if (flushed) {
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- }
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&schp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&schp->cq);
- cxio_count_scqes(&schp->cq, &qhp->wq, &count);
- flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&schp->lock);
- if (flushed) {
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock(&qhp->lock);
-}
-
-static void flush_qp(struct iwch_qp *qhp)
-{
- struct iwch_cq *rchp, *schp;
-
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
-
- if (qhp->ibqp.uobject) {
- cxio_set_wq_in_error(&qhp->wq);
- cxio_set_cq_in_error(&rchp->cq);
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- if (schp != rchp) {
- cxio_set_cq_in_error(&schp->cq);
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
- return;
- }
- __flush_qp(qhp, rchp, schp);
-}
-
-
-/*
- * Return count of RECV WRs posted
- */
-u16 iwch_rqes_posted(struct iwch_qp *qhp)
-{
- union t3_wr *wqe = qhp->wq.queue;
- u16 count = 0;
-
- while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
- count++;
- wqe++;
- }
- pr_debug("%s qhp %p count %u\n", __func__, qhp, count);
- return count;
-}
-
-static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs)
-{
- struct t3_rdma_init_attr init_attr;
- int ret;
-
- init_attr.tid = qhp->ep->hwtid;
- init_attr.qpid = qhp->wq.qpid;
- init_attr.pdid = qhp->attr.pd;
- init_attr.scqid = qhp->attr.scq;
- init_attr.rcqid = qhp->attr.rcq;
- init_attr.rq_addr = qhp->wq.rq_addr;
- init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
- init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
- qhp->attr.mpa_attr.recv_marker_enabled |
- (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
- (qhp->attr.mpa_attr.crc_enabled << 2);
-
- init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
- uP_RI_QP_RDMA_WRITE_ENABLE |
- uP_RI_QP_BIND_ENABLE;
- if (!qhp->ibqp.uobject)
- init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
- uP_RI_QP_FAST_REGISTER_ENABLE;
-
- init_attr.tcp_emss = qhp->ep->emss;
- init_attr.ord = qhp->attr.max_ord;
- init_attr.ird = qhp->attr.max_ird;
- init_attr.qp_dma_addr = qhp->wq.dma_addr;
- init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
- init_attr.rqe_count = iwch_rqes_posted(qhp);
- init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
- init_attr.chan = qhp->ep->l2t->smt_idx;
- if (peer2peer) {
- init_attr.rtr_type = RTR_READ;
- if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
- init_attr.ord = 1;
- if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
- init_attr.ird = 1;
- } else
- init_attr.rtr_type = 0;
- init_attr.irs = qhp->ep->rcv_seq;
- pr_debug("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d flags 0x%x qpcaps 0x%x\n",
- __func__,
- init_attr.rq_addr, init_attr.rq_size,
- init_attr.flags, init_attr.qpcaps);
- ret = cxio_rdma_init(&rhp->rdev, &init_attr);
- pr_debug("%s ret %d\n", __func__, ret);
- return ret;
-}
-
-int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal)
-{
- int ret = 0;
- struct iwch_qp_attributes newattr = qhp->attr;
- unsigned long flag;
- int disconnect = 0;
- int terminate = 0;
- int abort = 0;
- int free = 0;
- struct iwch_ep *ep = NULL;
-
- pr_debug("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
- (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
-
- spin_lock_irqsave(&qhp->lock, flag);
-
- /* Process attr changes if in IDLE */
- if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
- if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
- ret = -EIO;
- goto out;
- }
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
- newattr.enable_rdma_read = attrs->enable_rdma_read;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
- newattr.enable_rdma_write = attrs->enable_rdma_write;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
- newattr.enable_bind = attrs->enable_bind;
- if (mask & IWCH_QP_ATTR_MAX_ORD) {
- if (attrs->max_ord >
- rhp->attr.max_rdma_read_qp_depth) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ord = attrs->max_ord;
- }
- if (mask & IWCH_QP_ATTR_MAX_IRD) {
- if (attrs->max_ird >
- rhp->attr.max_rdma_reads_per_qp) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ird = attrs->max_ird;
- }
- qhp->attr = newattr;
- }
-
- if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
- goto out;
- if (qhp->attr.state == attrs->next_state)
- goto out;
-
- switch (qhp->attr.state) {
- case IWCH_QP_STATE_IDLE:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_RTS:
- if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
- ret = -EINVAL;
- goto out;
- }
- if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.mpa_attr = attrs->mpa_attr;
- qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
- qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = IWCH_QP_STATE_RTS;
-
- /*
- * Ref the endpoint here and deref when we
- * disassociate the endpoint from the QP. This
- * happens in CLOSING->IDLE transition or *->ERROR
- * transition.
- */
- get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
- ret = rdma_init(rhp, qhp, mask, attrs);
- spin_lock_irqsave(&qhp->lock, flag);
- if (ret)
- goto err;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- flush_qp(qhp);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_RTS:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_CLOSING:
- BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
- qhp->attr.state = IWCH_QP_STATE_CLOSING;
- if (!internal) {
- abort=0;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- break;
- case IWCH_QP_STATE_TERMINATE:
- qhp->attr.state = IWCH_QP_STATE_TERMINATE;
- if (qhp->ibqp.uobject)
- cxio_set_wq_in_error(&qhp->wq);
- if (!internal)
- terminate = 1;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- if (!internal) {
- abort=1;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- goto err;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_CLOSING:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- switch (attrs->next_state) {
- case IWCH_QP_STATE_IDLE:
- flush_qp(qhp);
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.llp_stream_handle = NULL;
- put_ep(&qhp->ep->com);
- qhp->ep = NULL;
- wake_up(&qhp->wait);
- break;
- case IWCH_QP_STATE_ERROR:
- goto err;
- default:
- ret = -EINVAL;
- goto err;
- }
- break;
- case IWCH_QP_STATE_ERROR:
- if (attrs->next_state != IWCH_QP_STATE_IDLE) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
- !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- break;
- case IWCH_QP_STATE_TERMINATE:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- goto err;
- break;
- default:
- pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
- ret = -EINVAL;
- goto err;
- break;
- }
- goto out;
-err:
- pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.qpid);
-
- /* disassociate the LLP connection */
- qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
- qhp->ep = NULL;
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- free=1;
- wake_up(&qhp->wait);
- BUG_ON(!ep);
- flush_qp(qhp);
-out:
- spin_unlock_irqrestore(&qhp->lock, flag);
-
- if (terminate)
- iwch_post_terminate(qhp, NULL);
-
- /*
- * If disconnect is 1, then we need to initiate a disconnect
- * on the EP. This can be a normal close (RTS->CLOSING) or
- * an abnormal close (RTS/CLOSING->ERROR).
- */
- if (disconnect) {
- iwch_ep_disconnect(ep, abort, GFP_KERNEL);
- put_ep(&ep->com);
- }
-
- /*
- * If free is 1, then we've disassociated the EP from the QP
- * and we need to dereference the EP.
- */
- if (free)
- put_ep(&ep->com);
-
- pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/tcb.h b/drivers/infiniband/hw/cxgb3/tcb.h
deleted file mode 100644
index c702dc199e18..000000000000
--- a/drivers/infiniband/hw/cxgb3/tcb.h
+++ /dev/null
@@ -1,632 +0,0 @@
-/*
- * Copyright (c) 2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _TCB_DEFS_H
-#define _TCB_DEFS_H
-
-#define W_TCB_T_STATE 0
-#define S_TCB_T_STATE 0
-#define M_TCB_T_STATE 0xfULL
-#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
-
-#define W_TCB_TIMER 0
-#define S_TCB_TIMER 4
-#define M_TCB_TIMER 0x1ULL
-#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
-
-#define W_TCB_DACK_TIMER 0
-#define S_TCB_DACK_TIMER 5
-#define M_TCB_DACK_TIMER 0x1ULL
-#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
-
-#define W_TCB_DEL_FLAG 0
-#define S_TCB_DEL_FLAG 6
-#define M_TCB_DEL_FLAG 0x1ULL
-#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
-
-#define W_TCB_L2T_IX 0
-#define S_TCB_L2T_IX 7
-#define M_TCB_L2T_IX 0x7ffULL
-#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
-
-#define W_TCB_SMAC_SEL 0
-#define S_TCB_SMAC_SEL 18
-#define M_TCB_SMAC_SEL 0x3ULL
-#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
-
-#define W_TCB_TOS 0
-#define S_TCB_TOS 20
-#define M_TCB_TOS 0x3fULL
-#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
-
-#define W_TCB_MAX_RT 0
-#define S_TCB_MAX_RT 26
-#define M_TCB_MAX_RT 0xfULL
-#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
-
-#define W_TCB_T_RXTSHIFT 0
-#define S_TCB_T_RXTSHIFT 30
-#define M_TCB_T_RXTSHIFT 0xfULL
-#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
-
-#define W_TCB_T_DUPACKS 1
-#define S_TCB_T_DUPACKS 2
-#define M_TCB_T_DUPACKS 0xfULL
-#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
-
-#define W_TCB_T_MAXSEG 1
-#define S_TCB_T_MAXSEG 6
-#define M_TCB_T_MAXSEG 0xfULL
-#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
-
-#define W_TCB_T_FLAGS1 1
-#define S_TCB_T_FLAGS1 10
-#define M_TCB_T_FLAGS1 0xffffffffULL
-#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
-
-#define W_TCB_T_MIGRATION 1
-#define S_TCB_T_MIGRATION 20
-#define M_TCB_T_MIGRATION 0x1ULL
-#define V_TCB_T_MIGRATION(x) ((x) << S_TCB_T_MIGRATION)
-
-#define W_TCB_T_FLAGS2 2
-#define S_TCB_T_FLAGS2 10
-#define M_TCB_T_FLAGS2 0x7fULL
-#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
-
-#define W_TCB_SND_SCALE 2
-#define S_TCB_SND_SCALE 17
-#define M_TCB_SND_SCALE 0xfULL
-#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
-
-#define W_TCB_RCV_SCALE 2
-#define S_TCB_RCV_SCALE 21
-#define M_TCB_RCV_SCALE 0xfULL
-#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
-
-#define W_TCB_SND_UNA_RAW 2
-#define S_TCB_SND_UNA_RAW 25
-#define M_TCB_SND_UNA_RAW 0x7ffffffULL
-#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
-
-#define W_TCB_SND_NXT_RAW 3
-#define S_TCB_SND_NXT_RAW 20
-#define M_TCB_SND_NXT_RAW 0x7ffffffULL
-#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
-
-#define W_TCB_RCV_NXT 4
-#define S_TCB_RCV_NXT 15
-#define M_TCB_RCV_NXT 0xffffffffULL
-#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
-
-#define W_TCB_RCV_ADV 5
-#define S_TCB_RCV_ADV 15
-#define M_TCB_RCV_ADV 0xffffULL
-#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
-
-#define W_TCB_SND_MAX_RAW 5
-#define S_TCB_SND_MAX_RAW 31
-#define M_TCB_SND_MAX_RAW 0x7ffffffULL
-#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
-
-#define W_TCB_SND_CWND 6
-#define S_TCB_SND_CWND 26
-#define M_TCB_SND_CWND 0x7ffffffULL
-#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
-
-#define W_TCB_SND_SSTHRESH 7
-#define S_TCB_SND_SSTHRESH 21
-#define M_TCB_SND_SSTHRESH 0x7ffffffULL
-#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
-
-#define W_TCB_T_RTT_TS_RECENT_AGE 8
-#define S_TCB_T_RTT_TS_RECENT_AGE 16
-#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
-#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
-
-#define W_TCB_T_RTSEQ_RECENT 9
-#define S_TCB_T_RTSEQ_RECENT 16
-#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
-#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
-
-#define W_TCB_T_SRTT 10
-#define S_TCB_T_SRTT 16
-#define M_TCB_T_SRTT 0xffffULL
-#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
-
-#define W_TCB_T_RTTVAR 11
-#define S_TCB_T_RTTVAR 0
-#define M_TCB_T_RTTVAR 0xffffULL
-#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
-
-#define W_TCB_TS_LAST_ACK_SENT_RAW 11
-#define S_TCB_TS_LAST_ACK_SENT_RAW 16
-#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
-#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
-
-#define W_TCB_DIP 12
-#define S_TCB_DIP 11
-#define M_TCB_DIP 0xffffffffULL
-#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
-
-#define W_TCB_SIP 13
-#define S_TCB_SIP 11
-#define M_TCB_SIP 0xffffffffULL
-#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
-
-#define W_TCB_DP 14
-#define S_TCB_DP 11
-#define M_TCB_DP 0xffffULL
-#define V_TCB_DP(x) ((x) << S_TCB_DP)
-
-#define W_TCB_SP 14
-#define S_TCB_SP 27
-#define M_TCB_SP 0xffffULL
-#define V_TCB_SP(x) ((x) << S_TCB_SP)
-
-#define W_TCB_TIMESTAMP 15
-#define S_TCB_TIMESTAMP 11
-#define M_TCB_TIMESTAMP 0xffffffffULL
-#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
-
-#define W_TCB_TIMESTAMP_OFFSET 16
-#define S_TCB_TIMESTAMP_OFFSET 11
-#define M_TCB_TIMESTAMP_OFFSET 0xfULL
-#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
-
-#define W_TCB_TX_MAX 16
-#define S_TCB_TX_MAX 15
-#define M_TCB_TX_MAX 0xffffffffULL
-#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
-
-#define W_TCB_TX_HDR_PTR_RAW 17
-#define S_TCB_TX_HDR_PTR_RAW 15
-#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
-
-#define W_TCB_TX_LAST_PTR_RAW 18
-#define S_TCB_TX_LAST_PTR_RAW 0
-#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
-
-#define W_TCB_TX_COMPACT 18
-#define S_TCB_TX_COMPACT 17
-#define M_TCB_TX_COMPACT 0x1ULL
-#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
-
-#define W_TCB_RX_COMPACT 18
-#define S_TCB_RX_COMPACT 18
-#define M_TCB_RX_COMPACT 0x1ULL
-#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
-
-#define W_TCB_RCV_WND 18
-#define S_TCB_RCV_WND 19
-#define M_TCB_RCV_WND 0x7ffffffULL
-#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
-
-#define W_TCB_RX_HDR_OFFSET 19
-#define S_TCB_RX_HDR_OFFSET 14
-#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
-#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
-
-#define W_TCB_RX_FRAG0_START_IDX_RAW 20
-#define S_TCB_RX_FRAG0_START_IDX_RAW 9
-#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
-
-#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
-#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
-#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
-#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
-
-#define W_TCB_RX_FRAG0_LEN 21
-#define S_TCB_RX_FRAG0_LEN 31
-#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
-
-#define W_TCB_RX_FRAG1_LEN 22
-#define S_TCB_RX_FRAG1_LEN 26
-#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
-
-#define W_TCB_NEWRENO_RECOVER 23
-#define S_TCB_NEWRENO_RECOVER 21
-#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
-#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
-
-#define W_TCB_PDU_HAVE_LEN 24
-#define S_TCB_PDU_HAVE_LEN 16
-#define M_TCB_PDU_HAVE_LEN 0x1ULL
-#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
-
-#define W_TCB_PDU_LEN 24
-#define S_TCB_PDU_LEN 17
-#define M_TCB_PDU_LEN 0xffffULL
-#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
-
-#define W_TCB_RX_QUIESCE 25
-#define S_TCB_RX_QUIESCE 1
-#define M_TCB_RX_QUIESCE 0x1ULL
-#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
-
-#define W_TCB_RX_PTR_RAW 25
-#define S_TCB_RX_PTR_RAW 2
-#define M_TCB_RX_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
-
-#define W_TCB_CPU_NO 25
-#define S_TCB_CPU_NO 19
-#define M_TCB_CPU_NO 0x7fULL
-#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
-
-#define W_TCB_ULP_TYPE 25
-#define S_TCB_ULP_TYPE 26
-#define M_TCB_ULP_TYPE 0xfULL
-#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
-
-#define W_TCB_RX_FRAG1_PTR_RAW 25
-#define S_TCB_RX_FRAG1_PTR_RAW 30
-#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
-#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
-#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
-
-#define W_TCB_RX_FRAG2_PTR_RAW 27
-#define S_TCB_RX_FRAG2_PTR_RAW 10
-#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_LEN_RAW 27
-#define S_TCB_RX_FRAG2_LEN_RAW 27
-#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_PTR_RAW 28
-#define S_TCB_RX_FRAG3_PTR_RAW 22
-#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
-
-#define W_TCB_RX_FRAG3_LEN_RAW 29
-#define S_TCB_RX_FRAG3_LEN_RAW 7
-#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
-#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
-#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
-
-#define W_TCB_PDU_HDR_LEN 30
-#define S_TCB_PDU_HDR_LEN 29
-#define M_TCB_PDU_HDR_LEN 0xffULL
-#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
-
-#define W_TCB_SLUSH1 31
-#define S_TCB_SLUSH1 5
-#define M_TCB_SLUSH1 0x7ffffULL
-#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
-
-#define W_TCB_ULP_RAW 31
-#define S_TCB_ULP_RAW 24
-#define M_TCB_ULP_RAW 0xffULL
-#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
-
-#define W_TCB_DDP_RDMAP_VERSION 25
-#define S_TCB_DDP_RDMAP_VERSION 30
-#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
-#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
-
-#define W_TCB_MARKER_ENABLE_RX 25
-#define S_TCB_MARKER_ENABLE_RX 31
-#define M_TCB_MARKER_ENABLE_RX 0x1ULL
-#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
-
-#define W_TCB_MARKER_ENABLE_TX 26
-#define S_TCB_MARKER_ENABLE_TX 0
-#define M_TCB_MARKER_ENABLE_TX 0x1ULL
-#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
-
-#define W_TCB_CRC_ENABLE 26
-#define S_TCB_CRC_ENABLE 1
-#define M_TCB_CRC_ENABLE 0x1ULL
-#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
-
-#define W_TCB_IRS_ULP 26
-#define S_TCB_IRS_ULP 2
-#define M_TCB_IRS_ULP 0x1ffULL
-#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
-
-#define W_TCB_ISS_ULP 26
-#define S_TCB_ISS_ULP 11
-#define M_TCB_ISS_ULP 0x1ffULL
-#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
-
-#define W_TCB_TX_PDU_LEN 26
-#define S_TCB_TX_PDU_LEN 20
-#define M_TCB_TX_PDU_LEN 0x3fffULL
-#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
-
-#define W_TCB_TX_PDU_OUT 27
-#define S_TCB_TX_PDU_OUT 2
-#define M_TCB_TX_PDU_OUT 0x1ULL
-#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
-
-#define W_TCB_CQ_IDX_SQ 27
-#define S_TCB_CQ_IDX_SQ 3
-#define M_TCB_CQ_IDX_SQ 0xffffULL
-#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
-
-#define W_TCB_CQ_IDX_RQ 27
-#define S_TCB_CQ_IDX_RQ 19
-#define M_TCB_CQ_IDX_RQ 0xffffULL
-#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
-
-#define W_TCB_QP_ID 28
-#define S_TCB_QP_ID 3
-#define M_TCB_QP_ID 0xffffULL
-#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
-
-#define W_TCB_PD_ID 28
-#define S_TCB_PD_ID 19
-#define M_TCB_PD_ID 0xffffULL
-#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
-
-#define W_TCB_STAG 29
-#define S_TCB_STAG 3
-#define M_TCB_STAG 0xffffffffULL
-#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
-
-#define W_TCB_RQ_START 30
-#define S_TCB_RQ_START 3
-#define M_TCB_RQ_START 0x3ffffffULL
-#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
-
-#define W_TCB_RQ_MSN 30
-#define S_TCB_RQ_MSN 29
-#define M_TCB_RQ_MSN 0x3ffULL
-#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
-
-#define W_TCB_RQ_MAX_OFFSET 31
-#define S_TCB_RQ_MAX_OFFSET 7
-#define M_TCB_RQ_MAX_OFFSET 0xfULL
-#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
-
-#define W_TCB_RQ_WRITE_PTR 31
-#define S_TCB_RQ_WRITE_PTR 11
-#define M_TCB_RQ_WRITE_PTR 0x3ffULL
-#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
-
-#define W_TCB_INB_WRITE_PERM 31
-#define S_TCB_INB_WRITE_PERM 21
-#define M_TCB_INB_WRITE_PERM 0x1ULL
-#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
-
-#define W_TCB_INB_READ_PERM 31
-#define S_TCB_INB_READ_PERM 22
-#define M_TCB_INB_READ_PERM 0x1ULL
-#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
-
-#define W_TCB_ORD_L_BIT_VLD 31
-#define S_TCB_ORD_L_BIT_VLD 23
-#define M_TCB_ORD_L_BIT_VLD 0x1ULL
-#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
-
-#define W_TCB_RDMAP_OPCODE 31
-#define S_TCB_RDMAP_OPCODE 24
-#define M_TCB_RDMAP_OPCODE 0xfULL
-#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
-
-#define W_TCB_TX_FLUSH 31
-#define S_TCB_TX_FLUSH 28
-#define M_TCB_TX_FLUSH 0x1ULL
-#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
-
-#define W_TCB_TX_OOS_RXMT 31
-#define S_TCB_TX_OOS_RXMT 29
-#define M_TCB_TX_OOS_RXMT 0x1ULL
-#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
-
-#define W_TCB_TX_OOS_TXMT 31
-#define S_TCB_TX_OOS_TXMT 30
-#define M_TCB_TX_OOS_TXMT 0x1ULL
-#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
-
-#define W_TCB_SLUSH_AUX2 31
-#define S_TCB_SLUSH_AUX2 31
-#define M_TCB_SLUSH_AUX2 0x1ULL
-#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
-
-#define W_TCB_RX_FRAG1_PTR_RAW2 25
-#define S_TCB_RX_FRAG1_PTR_RAW2 30
-#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
-
-#define W_TCB_RX_DDP_FLAGS 26
-#define S_TCB_RX_DDP_FLAGS 15
-#define M_TCB_RX_DDP_FLAGS 0x3ffULL
-#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
-
-#define W_TCB_SLUSH_AUX3 26
-#define S_TCB_SLUSH_AUX3 31
-#define M_TCB_SLUSH_AUX3 0x1ffULL
-#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
-
-#define W_TCB_RX_DDP_BUF0_OFFSET 27
-#define S_TCB_RX_DDP_BUF0_OFFSET 8
-#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
-
-#define W_TCB_RX_DDP_BUF0_LEN 27
-#define S_TCB_RX_DDP_BUF0_LEN 30
-#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
-
-#define W_TCB_RX_DDP_BUF1_OFFSET 28
-#define S_TCB_RX_DDP_BUF1_OFFSET 20
-#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
-
-#define W_TCB_RX_DDP_BUF1_LEN 29
-#define S_TCB_RX_DDP_BUF1_LEN 10
-#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
-
-#define W_TCB_RX_DDP_BUF0_TAG 30
-#define S_TCB_RX_DDP_BUF0_TAG 0
-#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
-
-#define W_TCB_RX_DDP_BUF1_TAG 31
-#define S_TCB_RX_DDP_BUF1_TAG 0
-#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-
-#define S_TF_DACK 10
-#define V_TF_DACK(x) ((x) << S_TF_DACK)
-
-#define S_TF_NAGLE 11
-#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
-
-#define S_TF_RECV_SCALE 12
-#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
-
-#define S_TF_RECV_TSTMP 13
-#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
-
-#define S_TF_RECV_SACK 14
-#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
-
-#define S_TF_TURBO 15
-#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
-
-#define S_TF_KEEPALIVE 16
-#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
-
-#define S_TF_TCAM_BYPASS 17
-#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
-
-#define S_TF_CORE_FIN 18
-#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
-
-#define S_TF_CORE_MORE 19
-#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
-
-#define S_TF_MIGRATING 20
-#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
-
-#define S_TF_ACTIVE_OPEN 21
-#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
-
-#define S_TF_ASK_MODE 22
-#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
-
-#define S_TF_NON_OFFLOAD 23
-#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
-
-#define S_TF_MOD_SCHD 24
-#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
-
-#define S_TF_MOD_SCHD_REASON0 25
-#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
-
-#define S_TF_MOD_SCHD_REASON1 26
-#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
-
-#define S_TF_MOD_SCHD_RX 27
-#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
-
-#define S_TF_CORE_PUSH 28
-#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
-
-#define S_TF_RCV_COALESCE_ENABLE 29
-#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
-
-#define S_TF_RCV_COALESCE_PUSH 30
-#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
-
-#define S_TF_RCV_COALESCE_LAST_PSH 31
-#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
-
-#define S_TF_RCV_COALESCE_HEARTBEAT 32
-#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
-
-#define S_TF_HALF_CLOSE 33
-#define V_TF_HALF_CLOSE(x) ((x) << S_TF_HALF_CLOSE)
-
-#define S_TF_DACK_MSS 34
-#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
-
-#define S_TF_CCTRL_SEL0 35
-#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
-
-#define S_TF_CCTRL_SEL1 36
-#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
-
-#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
-#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
-
-#define S_TF_TX_PACE_AUTO 38
-#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
-
-#define S_TF_PEER_FIN_HELD 39
-#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
-
-#define S_TF_CORE_URG 40
-#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
-
-#define S_TF_RDMA_ERROR 41
-#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
-
-#define S_TF_SSWS_DISABLED 42
-#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
-
-#define S_TF_DUPACK_COUNT_ODD 43
-#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
-
-#define S_TF_TX_CHANNEL 44
-#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
-
-#define S_TF_RX_CHANNEL 45
-#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
-
-#define S_TF_TX_PACE_FIXED 46
-#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
-
-#define S_TF_RDMA_FLM_ERROR 47
-#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
-
-#define S_TF_RX_FLOW_CONTROL_DISABLE 48
-#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
-
-#endif /* _TCB_DEFS_H */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 347dc242fb88..ee1182f9b627 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3379,7 +3379,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
@@ -3401,7 +3401,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 35c284af574d..fe3a7e8561df 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
+ mhp->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index d373ac0fe2cb..ba83d942997c 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -305,7 +305,10 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
+ int ret = 0;
pr_debug("ibdev %p\n", ibdev);
+ ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
props->port_cap_flags =
IB_PORT_CM_SUP |
@@ -315,11 +318,9 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
- return 0;
+ return ret;
}
static ssize_t hw_rev_show(struct device *dev,
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 2283e432693e..aa7396a1588a 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -60,8 +60,6 @@ struct efa_dev {
u64 mem_bar_len;
u64 db_bar_addr;
u64 db_bar_len;
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
int admin_msix_vector_idx;
struct efa_irq admin_irq;
@@ -71,8 +69,6 @@ struct efa_dev {
struct efa_ucontext {
struct ib_ucontext ibucontext;
- struct xarray mmap_xa;
- u32 mmap_xa_page;
u16 uarn;
};
@@ -91,6 +87,7 @@ struct efa_cq {
struct efa_ucontext *ucontext;
dma_addr_t dma_addr;
void *cpu_addr;
+ struct rdma_user_mmap_entry *mmap_entry;
size_t size;
u16 cq_idx;
};
@@ -101,6 +98,13 @@ struct efa_qp {
void *rq_cpu_addr;
size_t rq_size;
enum ib_qp_state state;
+
+ /* Used for saving mmap_xa entries */
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *llq_desc_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_mmap_entry;
+
u32 qp_handle;
u32 max_send_wr;
u32 max_recv_wr;
@@ -147,6 +151,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
struct rdma_ah_attr *ah_attr,
u32 flags,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 2be0469d545f..e96bcb16bd2b 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -362,9 +362,13 @@ struct efa_admin_reg_mr_cmd {
/*
* permissions
- * 0 : local_write_enable - Write permissions: value
- * of 1 needed for RQ buffers and for RDMA write
- * 7:1 : reserved1 - remote access flags, etc
+ * 0 : local_write_enable - Local write permissions:
+ * must be set for RQ buffers and buffers posted for
+ * RDMA Read requests
+ * 1 : reserved1 - MBZ
+ * 2 : remote_read_enable - Remote read permissions:
+ * must be set to enable RDMA read from the region
+ * 7:3 : reserved2 - MBZ
*/
u8 permissions;
@@ -558,6 +562,16 @@ struct efa_admin_feature_device_attr_desc {
/* Indicates how many bits are used virtual address access */
u8 virt_addr_width;
+
+ /*
+ * 0 : rdma_read - If set, RDMA Read is supported on
+ * TX queues
+ * 31:1 : reserved - MBZ
+ */
+ u32 device_caps;
+
+ /* Max RDMA transfer size in bytes */
+ u32 max_rdma_size;
};
struct efa_admin_feature_queue_attr_desc {
@@ -604,6 +618,9 @@ struct efa_admin_feature_queue_attr_desc {
/* The maximum size of LLQ in bytes */
u32 max_llq_size;
+
+ /* Maximum number of SGEs for a single RDMA read WQE */
+ u16 max_wr_rdma_sges;
};
struct efa_admin_feature_aenq_desc {
@@ -618,6 +635,7 @@ struct efa_admin_feature_network_attr_desc {
/* Raw address data in network byte order */
u8 addr[16];
+ /* max packet payload size in bytes */
u32 mtu;
};
@@ -780,6 +798,8 @@ struct efa_admin_mmio_req_read_less_resp {
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
@@ -791,4 +811,7 @@ struct efa_admin_mmio_req_read_less_resp {
/* get_set_feature_common_desc */
#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+/* feature_device_attr_desc */
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 3c412bc5b94f..0778f4f7dccd 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -317,6 +317,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
struct efa_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
+ struct efa_admin_aq_entry *aqe;
struct efa_comp_ctx *comp_ctx;
u16 queue_size_mask;
u16 cmd_id;
@@ -350,7 +351,9 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
reinit_completion(&comp_ctx->wait_event);
- memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
+ aqe = &aq->sq.entries[pi];
+ memset(aqe, 0, sizeof(*aqe));
+ memcpy(aqe, cmd, cmd_size_in_bytes);
aq->sq.pc++;
atomic64_inc(&aq->stats.submitted_cmd);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index c079f1332082..e20bd84a1014 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -230,8 +230,7 @@ int efa_com_register_mr(struct efa_com_dev *edev,
mr_cmd.flags |= params->page_shift &
EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
mr_cmd.iova = params->iova;
- mr_cmd.permissions |= params->permissions &
- EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+ mr_cmd.permissions = params->permissions;
if (params->inline_pbl) {
memcpy(mr_cmd.pbl.inline_pbl_array,
@@ -423,28 +422,6 @@ static int efa_com_get_feature(struct efa_com_dev *edev,
return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
}
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result)
-{
- struct efa_admin_get_feature_resp resp;
- int err;
-
- err = efa_com_get_feature(edev, &resp,
- EFA_ADMIN_NETWORK_ATTR);
- if (err) {
- ibdev_err_ratelimited(edev->efa_dev,
- "Failed to get network attributes %d\n",
- err);
- return err;
- }
-
- memcpy(result->addr, resp.u.network_attr.addr,
- sizeof(resp.u.network_attr.addr));
- result->mtu = resp.u.network_attr.mtu;
-
- return 0;
-}
-
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result)
{
@@ -467,6 +444,8 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->phys_addr_width = resp.u.device_attr.phys_addr_width;
result->virt_addr_width = resp.u.device_attr.virt_addr_width;
result->db_bar = resp.u.device_attr.db_bar;
+ result->max_rdma_size = resp.u.device_attr.max_rdma_size;
+ result->device_caps = resp.u.device_attr.device_caps;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
@@ -500,6 +479,19 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->max_ah = resp.u.queue_attr.max_ah;
result->max_llq_size = resp.u.queue_attr.max_llq_size;
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
+ result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
+ return err;
+ }
+
+ memcpy(result->addr, resp.u.network_attr.addr,
+ sizeof(resp.u.network_attr.addr));
+ result->mtu = resp.u.network_attr.mtu;
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 7f6c13052f49..31db5a0cbd5b 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -100,14 +100,11 @@ struct efa_com_destroy_ah_params {
u16 pdn;
};
-struct efa_com_get_network_attr_result {
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
-};
-
struct efa_com_get_device_attr_result {
+ u8 addr[EFA_GID_SIZE];
u64 page_size_cap;
u64 max_mr_pages;
+ u32 mtu;
u32 fw_version;
u32 admin_api_version;
u32 device_version;
@@ -124,9 +121,12 @@ struct efa_com_get_device_attr_result {
u32 max_pd;
u32 max_ah;
u32 max_llq_size;
+ u32 max_rdma_size;
+ u32 device_caps;
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
+ u16 max_wr_rdma_sge;
u8 db_bar;
};
@@ -181,12 +181,7 @@ struct efa_com_reg_mr_params {
* address mapping
*/
u8 page_shift;
- /*
- * permissions
- * 0: local_write_enable - Write permissions: value of 1 needed
- * for RQ buffers and for RDMA write:1: reserved1 - remote
- * access flags, etc
- */
+ /* see permissions field of struct efa_admin_reg_mr_cmd */
u8 permissions;
u8 inline_pbl;
u8 indirect;
@@ -271,8 +266,6 @@ int efa_com_create_ah(struct efa_com_dev *edev,
struct efa_com_create_ah_result *result);
int efa_com_destroy_ah(struct efa_com_dev *edev,
struct efa_com_destroy_ah_params *params);
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result);
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 83858f7e83d0..faf3ff1bca2a 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -30,15 +30,6 @@ MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-static void efa_update_network_attr(struct efa_dev *dev,
- struct efa_com_get_network_attr_result *network_attr)
-{
- memcpy(dev->addr, network_attr->addr, sizeof(network_attr->addr));
- dev->mtu = network_attr->mtu;
-
- dev_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr);
-}
-
/* This handler will called for unknown event group or unimplemented handlers */
static void unimplemented_aenq_handler(void *data,
struct efa_admin_aenq_entry *aenq_e)
@@ -217,6 +208,7 @@ static const struct ib_device_ops efa_dev_ops = {
.get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap,
+ .mmap_free = efa_mmap_free,
.modify_qp = efa_modify_qp,
.query_device = efa_query_device,
.query_gid = efa_query_gid,
@@ -233,7 +225,6 @@ static const struct ib_device_ops efa_dev_ops = {
static int efa_ib_device_add(struct efa_dev *dev)
{
- struct efa_com_get_network_attr_result network_attr;
struct efa_com_get_hw_hints_result hw_hints;
struct pci_dev *pdev = dev->pdev;
int err;
@@ -249,12 +240,6 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
return err;
- err = efa_com_get_network_attr(&dev->edev, &network_attr);
- if (err)
- goto err_release_doorbell_bar;
-
- efa_update_network_attr(dev, &network_attr);
-
err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
if (err)
goto err_release_doorbell_bar;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 4edae89e8e3c..c9d294caa27a 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -13,10 +13,6 @@
#include "efa.h"
-#define EFA_MMAP_FLAG_SHIFT 56
-#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
-#define EFA_MMAP_INVALID U64_MAX
-
enum {
EFA_MMAP_DMA_PAGE = 0,
EFA_MMAP_IO_WC,
@@ -27,20 +23,12 @@ enum {
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-struct efa_mmap_entry {
- void *obj;
+struct efa_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
u64 address;
- u64 length;
- u32 mmap_page;
u8 mmap_flag;
};
-static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
-{
- return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
- ((u64)efa->mmap_page << PAGE_SHIFT);
-}
-
#define EFA_DEFINE_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \
@@ -82,8 +70,6 @@ static const char *const efa_stats_names[] = {
#define EFA_CHUNK_USED_SIZE \
((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
-#define EFA_SUPPORTED_ACCESS_FLAGS IB_ACCESS_LOCAL_WRITE
-
struct pbl_chunk {
dma_addr_t dma_addr;
u64 *buf;
@@ -147,6 +133,17 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
return container_of(ibah, struct efa_ah, ibah);
}
+static inline struct efa_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
+}
+
+static inline bool is_rdma_read_cap(struct efa_dev *dev)
+{
+ return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
+}
+
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
FIELD_SIZEOF(typeof(x), fld) <= (sz))
@@ -172,106 +169,6 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr;
}
-/*
- * This is only called when the ucontext is destroyed and there can be no
- * concurrent query via mmap or allocate on the xarray, thus we can be sure no
- * other thread is using the entry pointer. We also know that all the BAR
- * pages have either been zap'd or munmaped at this point. Normal pages are
- * refcounted and will be freed at the proper time.
- */
-static void mmap_entries_remove_free(struct efa_dev *dev,
- struct efa_ucontext *ucontext)
-{
- struct efa_mmap_entry *entry;
- unsigned long mmap_page;
-
- xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
- xa_erase(&ucontext->mmap_xa, mmap_page);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, get_mmap_key(entry), entry->address,
- entry->length);
- if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
- /* DMA mapping is already gone, now free the pages */
- free_pages_exact(phys_to_virt(entry->address),
- entry->length);
- kfree(entry);
- }
-}
-
-static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
- struct efa_ucontext *ucontext,
- u64 key, u64 len)
-{
- struct efa_mmap_entry *entry;
- u64 mmap_page;
-
- mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
- if (mmap_page > U32_MAX)
- return NULL;
-
- entry = xa_load(&ucontext->mmap_xa, mmap_page);
- if (!entry || get_mmap_key(entry) != key || entry->length != len)
- return NULL;
-
- ibdev_dbg(&dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, key, entry->address, entry->length);
-
- return entry;
-}
-
-/*
- * Note this locking scheme cannot support removal of entries, except during
- * ucontext destruction when the core code guarentees no concurrency.
- */
-static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
- void *obj, u64 address, u64 length, u8 mmap_flag)
-{
- struct efa_mmap_entry *entry;
- u32 next_mmap_page;
- int err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return EFA_MMAP_INVALID;
-
- entry->obj = obj;
- entry->address = address;
- entry->length = length;
- entry->mmap_flag = mmap_flag;
-
- xa_lock(&ucontext->mmap_xa);
- if (check_add_overflow(ucontext->mmap_xa_page,
- (u32)(length >> PAGE_SHIFT),
- &next_mmap_page))
- goto err_unlock;
-
- entry->mmap_page = ucontext->mmap_xa_page;
- ucontext->mmap_xa_page = next_mmap_page;
- err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
- GFP_KERNEL);
- if (err)
- goto err_unlock;
-
- xa_unlock(&ucontext->mmap_xa);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
- entry->obj, entry->address, entry->length, get_mmap_key(entry));
-
- return get_mmap_key(entry);
-
-err_unlock:
- xa_unlock(&ucontext->mmap_xa);
- kfree(entry);
- return EFA_MMAP_INVALID;
-
-}
-
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
@@ -306,12 +203,17 @@ int efa_query_device(struct ib_device *ibdev,
dev_attr->max_rq_depth);
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
+ props->max_sge_rd = dev_attr->max_wr_rdma_sge;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
resp.max_rq_sge = dev_attr->max_rq_sge;
resp.max_sq_wr = dev_attr->max_sq_depth;
resp.max_rq_wr = dev_attr->max_rq_depth;
+ resp.max_rdma_size = dev_attr->max_rdma_size;
+
+ if (is_rdma_read_cap(dev))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
@@ -338,9 +240,9 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->active_speed = IB_SPEED_EDR;
props->active_width = IB_WIDTH_4X;
- props->max_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->active_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->max_msg_sz = dev->mtu;
+ props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->max_msg_sz = dev->dev_attr.mtu;
props->max_vl_num = 1;
return 0;
@@ -401,7 +303,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct efa_dev *dev = to_edev(ibdev);
- memcpy(gid->raw, dev->addr, sizeof(dev->addr));
+ memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
return 0;
}
@@ -485,8 +387,19 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
return efa_com_destroy_qp(&dev->edev, &params);
}
+static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
+ struct efa_qp *qp)
+{
+ rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
+}
+
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
+ struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct efa_ucontext, ibucontext);
struct efa_dev *dev = to_edev(ibqp->pd->device);
struct efa_qp *qp = to_eqp(ibqp);
int err;
@@ -505,61 +418,101 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
DMA_TO_DEVICE);
}
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
kfree(qp);
return 0;
}
+static struct rdma_user_mmap_entry*
+efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ u8 mmap_flag, u64 *offset)
+{
+ struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int err;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_flag = mmap_flag;
+
+ err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
+ length);
+ if (err) {
+ kfree(entry);
+ return NULL;
+ }
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
static int qp_mmap_entries_setup(struct efa_qp *qp,
struct efa_dev *dev,
struct efa_ucontext *ucontext,
struct efa_com_create_qp_params *params,
struct efa_ibv_create_qp_resp *resp)
{
- /*
- * Once an entry is inserted it might be mmapped, hence cannot be
- * cleaned up until dealloc_ucontext.
- */
- resp->sq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->sq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
+ size_t length;
+ u64 address;
+
+ address = dev->db_bar_addr + resp->sq_db_offset;
+ qp->sq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->sq_db_mmap_key);
+ if (!qp->sq_db_mmap_entry)
return -ENOMEM;
resp->sq_db_offset &= ~PAGE_MASK;
- resp->llq_desc_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->mem_bar_addr + resp->llq_desc_offset,
- PAGE_ALIGN(params->sq_ring_size_in_bytes +
- (resp->llq_desc_offset & ~PAGE_MASK)),
- EFA_MMAP_IO_WC);
- if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->mem_bar_addr + resp->llq_desc_offset;
+ length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
+ (resp->llq_desc_offset & ~PAGE_MASK));
+
+ qp->llq_desc_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, length,
+ EFA_MMAP_IO_WC,
+ &resp->llq_desc_mmap_key);
+ if (!qp->llq_desc_mmap_entry)
+ goto err_remove_mmap;
resp->llq_desc_offset &= ~PAGE_MASK;
if (qp->rq_size) {
- resp->rq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->rq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->db_bar_addr + resp->rq_db_offset;
+
+ qp->rq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, PAGE_SIZE,
+ EFA_MMAP_IO_NC,
+ &resp->rq_db_mmap_key);
+ if (!qp->rq_db_mmap_entry)
+ goto err_remove_mmap;
resp->rq_db_offset &= ~PAGE_MASK;
- resp->rq_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- virt_to_phys(qp->rq_cpu_addr),
- qp->rq_size, EFA_MMAP_DMA_PAGE);
- if (resp->rq_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = virt_to_phys(qp->rq_cpu_addr);
+ qp->rq_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, qp->rq_size,
+ EFA_MMAP_DMA_PAGE,
+ &resp->rq_mmap_key);
+ if (!qp->rq_mmap_entry)
+ goto err_remove_mmap;
resp->rq_mmap_size = qp->rq_size;
}
return 0;
+
+err_remove_mmap:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
+
+ return -ENOMEM;
}
static int efa_qp_validate_cap(struct efa_dev *dev,
@@ -634,7 +587,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {};
- bool rq_entry_inserted = false;
struct efa_ucontext *ucontext;
struct efa_qp *qp;
int err;
@@ -742,7 +694,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err)
goto err_destroy_qp;
- rq_entry_inserted = true;
qp->qp_handle = create_qp_resp.qp_handle;
qp->ibqp.qp_num = create_qp_resp.qp_num;
qp->ibqp.qp_type = init_attr->qp_type;
@@ -759,7 +710,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev,
"Failed to copy udata for qp[%u]\n",
create_qp_resp.qp_num);
- goto err_destroy_qp;
+ goto err_remove_mmap_entries;
}
}
@@ -767,13 +718,16 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
return &qp->ibqp;
+err_remove_mmap_entries:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
if (qp->rq_size) {
dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
DMA_TO_DEVICE);
- if (!rq_entry_inserted)
+
+ if (!qp->rq_mmap_entry)
free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
}
err_free_qp:
@@ -897,16 +851,18 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx);
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
struct efa_ibv_create_cq_resp *resp)
{
resp->q_mmap_size = cq->size;
- resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq,
- virt_to_phys(cq->cpu_addr),
- cq->size, EFA_MMAP_DMA_PAGE);
- if (resp->q_mmap_key == EFA_MMAP_INVALID)
+ cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ virt_to_phys(cq->cpu_addr),
+ cq->size, EFA_MMAP_DMA_PAGE,
+ &resp->q_mmap_key);
+ if (!cq->mmap_entry)
return -ENOMEM;
return 0;
@@ -924,7 +880,6 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_dev *dev = to_edev(ibdev);
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
- bool cq_entry_inserted = false;
int entries = attr->cqe;
int err;
@@ -1013,15 +968,13 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_destroy_cq;
}
- cq_entry_inserted = true;
-
if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n");
- goto err_destroy_cq;
+ goto err_remove_mmap;
}
}
@@ -1030,13 +983,16 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
+err_remove_mmap:
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
- if (!cq_entry_inserted)
+ if (!cq->mmap_entry)
free_pages_exact(cq->cpu_addr, cq->size);
+
err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err);
return err;
@@ -1396,6 +1352,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct efa_com_reg_mr_params params = {};
struct efa_com_reg_mr_result result = {};
struct pbl_context pbl;
+ int supp_access_flags;
unsigned int pg_sz;
struct efa_mr *mr;
int inline_size;
@@ -1409,10 +1366,14 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) {
+ supp_access_flags =
+ IB_ACCESS_LOCAL_WRITE |
+ (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
+
+ if (access_flags & ~supp_access_flags) {
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
- access_flags, EFA_SUPPORTED_ACCESS_FLAGS);
+ access_flags, supp_access_flags);
err = -EOPNOTSUPP;
goto err_out;
}
@@ -1423,7 +1384,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
@@ -1434,7 +1395,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
params.mr_length_in_bytes = length;
- params.permissions = access_flags & 0x1;
+ params.permissions = access_flags;
pg_sz = ib_umem_find_best_pgsz(mr->umem,
dev->dev_attr.page_size_cap,
@@ -1556,7 +1517,6 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
goto err_out;
ucontext->uarn = result.uarn;
- xa_init(&ucontext->mmap_xa);
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
@@ -1585,38 +1545,56 @@ void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- mmap_entries_remove_free(dev, ucontext);
efa_dealloc_uar(dev, ucontext->uarn);
}
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ /* DMA mapping is already gone, now free the pages */
+ if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
+ free_pages_exact(phys_to_virt(entry->address),
+ entry->rdma_entry.npages * PAGE_SIZE);
+ kfree(entry);
+}
+
static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
- struct vm_area_struct *vma, u64 key, u64 length)
+ struct vm_area_struct *vma)
{
- struct efa_mmap_entry *entry;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct efa_user_mmap_entry *entry;
unsigned long va;
+ int err = 0;
u64 pfn;
- int err;
- entry = mmap_entry_get(dev, ucontext, key, length);
- if (!entry) {
- ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n",
- key);
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev,
+ "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
+ entry = to_emmap(rdma_entry);
ibdev_dbg(&dev->ibdev,
- "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n",
- entry->address, length, entry->mmap_flag);
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag);
pfn = entry->address >> PAGE_SHIFT;
switch (entry->mmap_flag) {
case EFA_MMAP_IO_NC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_noncached(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_IO_WC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_writecombine(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_DMA_PAGE:
for (va = vma->vm_start; va < vma->vm_end;
@@ -1633,12 +1611,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
if (err) {
ibdev_dbg(
&dev->ibdev,
- "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n",
- entry->address, length, entry->mmap_flag, err);
- return err;
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag, err);
}
- return 0;
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
}
int efa_mmap(struct ib_ucontext *ibucontext,
@@ -1646,26 +1625,13 @@ int efa_mmap(struct ib_ucontext *ibucontext,
{
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- u64 length = vma->vm_end - vma->vm_start;
- u64 key = vma->vm_pgoff << PAGE_SHIFT;
+ size_t length = vma->vm_end - vma->vm_start;
ibdev_dbg(&dev->ibdev,
- "start %#lx, end %#lx, length = %#llx, key = %#llx\n",
- vma->vm_start, vma->vm_end, length, key);
-
- if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
- ibdev_dbg(&dev->ibdev,
- "length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
- length, PAGE_SIZE, vma->vm_flags);
- return -EINVAL;
- }
-
- if (vma->vm_flags & VM_EXEC) {
- ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
- return -EPERM;
- }
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- return __efa_mmap(dev, ucontext, vma, key, length);
+ return __efa_mmap(dev, ucontext, vma);
}
static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 71cb9525c074..26b792bb1027 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
goto bail_dev;
}
- hfi1_compute_tid_rdma_flow_wt();
/*
* These must be called before the driver is registered with
* the PCI subsystem.
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index d8ff063a5419..a51bcd2b4391 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4915,16 +4915,11 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*/
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
- switch (in_mad->base_version) {
+ switch (in_mad->mad_hdr.base_version) {
case OPA_MGMT_BASE_VERSION:
- if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
- dev_err(ibdev->dev.parent, "invalid in_mad_size\n");
- return IB_MAD_RESULT_FAILURE;
- }
return hfi1_process_opa_mad(ibdev, mad_flags, port,
in_wc, in_grh,
(struct opa_mad *)in_mad,
@@ -4932,10 +4927,8 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
out_mad_size,
out_mad_pkey_index);
case IB_MGMT_BASE_VERSION:
- return hfi1_process_ib_mad(ibdev, mad_flags, port,
- in_wc, in_grh,
- (const struct ib_mad *)in_mad,
- (struct ib_mad *)out_mad);
+ return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
+ in_grh, in_mad, out_mad);
default:
break;
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61aa5504d7c3..61362bd6d3ce 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
/*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/
- if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+ if (parent &&
+ (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
+ dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0;
}
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index cbf7faa5038c..36593f2efe26 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -634,7 +634,7 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
u32 config_data, const char *message)
{
u8 i;
- int ret = HCMD_SUCCESS;
+ int ret;
for (i = 0; i < 4; i++) {
ret = load_8051_config(ppd->dd, field_id, i, config_data);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 513a8aac9ccd..1a3c647675a7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail_stop;
rdi = ib_to_rvt(qp->ibqp.device);
- if (qp->s_rnr_retry == 0 &&
- !((rdi->post_parms[wqe->wr.opcode].flags &
- RVT_OPERATION_IGN_RNR_CNT) &&
- qp->s_rnr_retry_cnt == 0)) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
+ if (!(rdi->post_parms[wqe->wr.opcode].flags &
+ RVT_OPERATION_IGN_RNR_CNT)) {
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
+ qp->s_rnr_retry--;
}
- if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
- qp->s_rnr_retry--;
/*
* The last valid PSN is the previous PSN. For TID RDMA WRITE
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index c61b6022575e..5774dfc22e18 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -881,8 +881,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
cpu_id = smp_processor_id();
rcu_read_lock();
- rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
- sdma_rht_params);
+ rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
+ sdma_rht_params);
if (rht_node && rht_node->map[vl]) {
struct sdma_rht_map_elem *map = rht_node->map[vl];
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index f21fca3617d5..e53f542b60af 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
* C - Capcode
*/
-static u32 tid_rdma_flow_wt;
-
static void tid_rdma_trigger_resume(struct work_struct *work);
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
@@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
struct tid_rdma_flow *flow,
bool fecn);
+static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
+{
+ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ priv->r_tid_ack = priv->r_tid_tail;
+}
+
+static void tid_rdma_schedule_ack(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ priv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+}
+
+static void tid_rdma_trigger_ack(struct rvt_qp *qp)
+{
+ validate_r_tid_ack(qp->priv);
+ tid_rdma_schedule_ack(qp);
+}
+
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{
return
@@ -3005,10 +3023,7 @@ nak_psn:
qpriv->s_nak_state = IB_NAK_PSN_ERROR;
/* We are NAK'ing the next expected PSN */
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
- qpriv->r_tid_ack = qpriv->r_tid_tail;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
}
goto unlock;
}
@@ -3371,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
}
-void hfi1_compute_tid_rdma_flow_wt(void)
+static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
{
/*
* Heuristic for computing the RNR timeout when waiting on the flow
* queue. Rather than a computationaly expensive exact estimate of when
* a flow will be available, we assume that if a QP is at position N in
* the flow queue it has to wait approximately (N + 1) * (number of
- * segments between two sync points), assuming PMTU of 4K. The rationale
- * for this is that flows are released and recycled at each sync point.
+ * segments between two sync points). The rationale for this is that
+ * flows are released and recycled at each sync point.
*/
- tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
- TID_RDMA_MAX_SEGMENT_SIZE;
+ return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
}
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
@@ -3505,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
if (ret) {
- to_seg = tid_rdma_flow_wt *
+ to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
position_in_queue(qpriv,
&rcd->flow_queue);
break;
@@ -3526,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
/*
* If overtaking req->acked_tail, send an RNR NAK. Because the
* QP is not queued in this case, and the issue can only be
- * caused due a delay in scheduling the second leg which we
+ * caused by a delay in scheduling the second leg which we
* cannot estimate, we use a rather arbitrary RNR timeout of
* (MAX_FLOWS / 2) segments
*/
@@ -3534,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
MAX_FLOWS)) {
ret = -EAGAIN;
to_seg = MAX_FLOWS >> 1;
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
break;
}
@@ -4335,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
req);
trace_hfi1_tid_write_rsp_rcv_data(qp);
- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
- priv->r_tid_ack = priv->r_tid_tail;
+ validate_r_tid_ack(priv);
if (opcode == TID_OP(WRITE_DATA_LAST)) {
release_rdma_sge_mr(e);
@@ -4375,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
}
done:
- priv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_schedule_ack(qp);
exit:
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
if (fecn)
@@ -4388,10 +4399,7 @@ send_nak:
if (!priv->s_nak_state) {
priv->s_nak_state = IB_NAK_PSN_ERROR;
priv->s_nak_psn = flow->flow_state.r_next_psn;
- priv->s_flags |= RVT_S_ACK_PENDING;
- if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
- priv->r_tid_ack = priv->r_tid_tail;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
}
goto done;
}
@@ -4939,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
qpriv->resync = true;
/* RESYNC request always gets a TID RDMA ACK. */
qpriv->s_nak_state = 0;
- qpriv->s_flags |= RVT_S_ACK_PENDING;
- hfi1_schedule_tid_send(qp);
+ tid_rdma_trigger_ack(qp);
bail:
if (fecn)
qp->s_flags |= RVT_S_ECN;
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h
index 1c536185261e..6e82df2190b7 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.h
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
@@ -17,6 +17,7 @@
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
+#define TID_RDMA_SEGMENT_SHIFT 18
/*
* Bit definitions for priv->s_flags.
@@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
-void hfi1_compute_tid_rdma_flow_wt(void);
-
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index ae9582ddbc8f..b0e9bf7cd150 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -330,9 +330,8 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
/*
* The PSN_MASK and PSN_SHIFT allow for
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index d602b698b57e..4921c1e40ccd 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,23 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- bool "HNS RoCE Driver"
+ tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on (HNS_DSAF && HNS_ENET) || HNS3
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
+ To compile HIP06 or HIP08 driver as module, choose M here.
+
config INFINIBAND_HNS_HIP06
- tristate "Hisilicon Hip06 Family RoCE support"
+ bool "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+ depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v1
+
config INFINIBAND_HNS_HIP08
- tristate "Hisilicon Hip08 Family RoCE support"
+ bool "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
+ depends on INFINIBAND_HNS=m || HNS3=y
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
+
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 449a2d81319d..e105945b94a1 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
+ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
+endif
+ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 90e08c0c332d..8a522e14ef62 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -46,32 +46,32 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
- u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u16 vlan_id = 0xffff;
bool vlan_en = false;
int ret;
gid_attr = ah_attr->grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- if (vlan_tag < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
vlan_en = true;
- vlan_tag |= (rdma_ah_get_sl(ah_attr) &
+ vlan_id |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
}
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
- ah->av.vlan = vlan_tag;
+ ah->av.vlan_id = vlan_id;
ah->av.vlan_en = vlan_en;
- dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
- ah->av.vlan);
+ dev_dbg(dev, "gid_index = 0x%x,vlan_id = 0x%x\n", ah->av.gid_index,
+ ah->av.vlan_id);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 8c063c598d2a..da574c26e063 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -55,7 +55,7 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
bitmap->last = 0;
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
@@ -100,7 +100,7 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 2b6ac646ca9a..1915bacaded0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -115,12 +115,12 @@ enum {
enum {
/* TPT commands */
- HNS_ROCE_CMD_SW2HW_MPT = 0xd,
- HNS_ROCE_CMD_HW2SW_MPT = 0xf,
+ HNS_ROCE_CMD_CREATE_MPT = 0xd,
+ HNS_ROCE_CMD_DESTROY_MPT = 0xf,
/* CQ commands */
- HNS_ROCE_CMD_SW2HW_CQ = 0x16,
- HNS_ROCE_CMD_HW2SW_CQ = 0x17,
+ HNS_ROCE_CMD_CREATE_CQC = 0x16,
+ HNS_ROCE_CMD_DESTROY_CQC = 0x17,
/* QP/EE commands */
HNS_ROCE_CMD_RST2INIT_QP = 0x19,
@@ -129,14 +129,14 @@ enum {
HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
HNS_ROCE_CMD_2ERR_QP = 0x1e,
HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
- HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22,
- HNS_ROCE_CMD_SW2HW_SRQ = 0x70,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_CREATE_SRQ = 0x70,
HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
HNS_ROCE_CMD_QUERY_SRQC = 0x73,
- HNS_ROCE_CMD_HW2SW_SRQ = 0x74,
+ HNS_ROCE_CMD_DESTROY_SRQ = 0x74,
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 22541d19cd09..af1d8823b3f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,8 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
-{
- struct ib_cq *ibcq = &hr_cq->ib_cq;
-
- ibcq->comp_handler(ibcq, ibcq->cq_context);
-}
-
-static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
- enum hns_roce_event event_type)
-{
- struct hns_roce_dev *hr_dev;
- struct ib_event event;
- struct ib_cq *ibcq;
-
- ibcq = &hr_cq->ib_cq;
- hr_dev = to_hr_dev(ibcq->device);
-
- if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(hr_dev->dev,
- "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
- event_type, hr_cq->cqn);
- return;
- }
-
- if (ibcq->event_handler) {
- event.device = ibcq->device;
- event.event = IB_EVENT_CQ_ERR;
- event.element.cq = ibcq;
- ibcq->event_handler(&event, ibcq->cq_context);
- }
-}
-
-static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
- struct hns_roce_mtt *hr_mtt,
- struct hns_roce_cq *hr_cq, int vector)
+static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table;
@@ -101,35 +58,32 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
else
mtt_table = &hr_dev->mr_table.mtt_table;
- mtts = hns_roce_table_find(hr_dev, mtt_table,
- hr_mtt->first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
- return -EINVAL;
- }
+ mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
+ &dma_handle);
- if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "CQ alloc.Invalid vector.\n");
+ if (!mtts) {
+ dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL;
}
- hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret == -1) {
- dev_err(dev, "CQ alloc.Failed to alloc index.\n");
- return -ENOMEM;
+ if (ret) {
+ dev_err(dev, "Num of CQ out of range.\n");
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ dev_err(dev,
+ "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "CQ alloc failed xa_store.\n");
+ dev_err(dev, "Failed to xa_store CQ.\n");
goto err_put;
}
@@ -140,14 +94,16 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_xa;
}
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
- nent, vector);
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
/* Send mailbox to hw */
- ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
+ HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ dev_err(dev,
+ "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_xa;
}
@@ -170,24 +126,17 @@ err_out:
return ret;
}
-static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_DESTROY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn);
@@ -204,103 +153,91 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
- struct ib_udata *udata,
- struct hns_roce_cq_buf *buf,
- struct ib_umem **umem, u64 buf_addr, int cqe)
+static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct hns_roce_ib_create_cq ucmd,
+ struct ib_udata *udata)
{
- int ret;
- u32 page_shift;
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
+ struct ib_umem **umem = &hr_cq->umem;
u32 npages;
+ int ret;
- *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
-
- if (hr_dev->caps.cqe_buf_pg_sz) {
- npages = (ib_umem_page_count(*umem) +
- (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.cqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
- &buf->hr_mtt);
- } else {
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- PAGE_SHIFT, &buf->hr_mtt);
- }
+ mtt->mtt_type = MTT_TYPE_WQE;
+
+ npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
+ 1 << hr_dev->caps.cqe_buf_pg_sz);
+ ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
-static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, u32 nent)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
int ret;
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- (1 << page_shift) * 2, &buf->hr_buf,
- page_shift);
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
+ buf, buf->page_shift);
if (ret)
goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+ mtt->mtt_type = MTT_TYPE_WQE;
- ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
- buf->hr_buf.page_shift, &buf->hr_mtt);
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
- hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+
out:
return ret;
}
-static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, int cqe)
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp,
- int cq_entries)
+ struct hns_roce_ib_create_cq_resp *resp)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
@@ -314,9 +251,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
}
/* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
- &hr_cq->umem, ucmd.buf_addr,
- cq_entries);
+ ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
@@ -337,17 +272,16 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, int cq_entries)
+ struct hns_roce_cq *hr_cq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_uar *uar;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
@@ -361,15 +295,14 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
}
/* Init mtt table and write buff address to mtt table */
- ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+ ret = alloc_cq_buf(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
- uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * uar->index;
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
return 0;
@@ -392,64 +325,69 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(context, &hr_cq->db);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct device *dev = hr_dev->dev;
int vector = attr->comp_vector;
- int cq_entries = attr->cqe;
+ u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
- if (hr_dev->caps.min_cqes)
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
- cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1;
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+ hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
+ ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
if (ret) {
dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
} else {
- ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
+ ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_cq;
}
}
- /* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- hr_cq, vector);
+ ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
if (ret) {
- dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ dev_err(dev, "Alloc CQ failed(%d).\n", ret);
goto err_dbmap;
}
@@ -462,11 +400,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
- /* Get created cq handler and carry out event */
- hr_cq->comp = hns_roce_ib_cq_comp;
- hr_cq->event = hns_roce_ib_cq_event;
- hr_cq->cq_depth = cq_entries;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -477,7 +410,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
return 0;
err_cqc:
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
err_dbmap:
if (udata)
@@ -489,7 +422,7 @@ err_cq:
return ret;
}
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -499,8 +432,8 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
return;
}
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_free_cqc(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (udata) {
@@ -512,7 +445,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
&hr_cq->db);
} else {
/* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
@@ -520,38 +453,57 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *ibcq;
- cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
- if (!cq) {
- dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ cqn);
return;
}
- ++cq->arm_sn;
- cq->comp(cq);
+ ++hr_cq->arm_sn;
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->comp_handler)
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
- struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_event event;
+ struct ib_cq *ibcq;
- cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
- if (!cq) {
- dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ event_type, cqn);
return;
}
- cq->event(cq, (enum hns_roce_event)event_type);
+ atomic_inc(&hr_cq->refcount);
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
}
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index c00714c2f16a..10af6958ab69 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 96d1302abde1..5617434cbfb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,7 +45,7 @@
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
-#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
+#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
@@ -53,8 +53,6 @@
#define BA_BYTE_LEN 8
-#define BITS_PER_BYTE 8
-
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
@@ -426,7 +424,6 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */
spinlock_t lock;
int wqe_cnt; /* WQE num */
- u32 max_post;
int max_gs;
int offset;
int wqe_shift; /* WQE size */
@@ -451,6 +448,7 @@ struct hns_roce_buf {
struct hns_roce_buf_list *page_list;
int nbufs;
u32 npages;
+ u32 size;
int page_shift;
};
@@ -482,22 +480,14 @@ struct hns_roce_db {
int order;
};
-struct hns_roce_cq_buf {
- struct hns_roce_buf hr_buf;
- struct hns_roce_mtt hr_mtt;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_cq_buf hr_buf;
+ struct hns_roce_buf buf;
+ struct hns_roce_mtt mtt;
struct hns_roce_db db;
u8 db_en;
spinlock_t lock;
struct ib_umem *umem;
- void (*comp)(struct hns_roce_cq *cq);
- void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type);
-
- struct hns_roce_uar *uar;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -521,9 +511,8 @@ struct hns_roce_idx_que {
struct hns_roce_srq {
struct ib_srq ibsrq;
- void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn;
- int max;
+ u32 wqe_cnt;
int max_gs;
int wqe_shift;
void __iomem *db_reg_l;
@@ -539,8 +528,8 @@ struct hns_roce_srq {
spinlock_t lock;
int head;
int tail;
- u16 wqe_ctr;
struct mutex mutex;
+ void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
struct hns_roce_uar_table {
@@ -582,7 +571,7 @@ struct hns_roce_av {
u8 tclass;
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN];
- u16 vlan;
+ u16 vlan_id;
bool vlan_en;
};
@@ -695,10 +684,6 @@ struct hns_roce_qp {
struct hns_roce_rinl_buf rq_inl_buf;
};
-struct hns_roce_sqp {
- struct hns_roce_qp hr_qp;
-};
-
struct hns_roce_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
@@ -821,8 +806,8 @@ struct hns_roce_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int num_cqs;
- int max_cqes;
- int min_cqes;
+ u32 max_cqes;
+ u32 min_cqes;
u32 min_wqes;
int reserved_cqs;
int reserved_srqs;
@@ -953,7 +938,7 @@ struct hns_roce_hw {
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
- dma_addr_t dma_handle, int nent, u32 vector);
+ dma_addr_t dma_handle);
int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
@@ -1092,11 +1077,6 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
-static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
-{
- return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
-}
-
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
__raw_writeq(*(u64 *) val, dest);
@@ -1198,9 +1178,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index);
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
@@ -1257,12 +1237,11 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 86783276fb1f..3bb8f78fb7b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -59,7 +59,7 @@ enum {
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
- (sizeof(struct scatterlist)))
+ (sizeof(struct scatterlist) + sizeof(void *)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5f74bf55f471..2a2b2112f886 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -732,7 +732,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
if (!cq)
return -ENOMEM;
- ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
+ ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
if (ret) {
dev_err(dev, "Create cq for reserved loop qp failed!");
goto alloc_cq_failed;
@@ -868,7 +868,7 @@ alloc_pd_failed:
kfree(pd);
alloc_mem_failed:
- hns_roce_ib_destroy_cq(cq, NULL);
+ hns_roce_destroy_cq(cq, NULL);
alloc_cq_failed:
kfree(cq);
return ret;
@@ -897,7 +897,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
i, ret);
}
- hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
+ hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
kfree(&free_mr->mr_free_cq->ib_cq);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
kfree(&free_mr->mr_free_pd->ibpd);
@@ -1114,9 +1114,10 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
free_mr = &priv->free_mr;
if (mr->enabled) {
- if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "HW2SW_MPT failed!\n");
+ if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1)))
+ dev_warn(dev, "DESTROY_MPT failed!\n");
}
mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
@@ -1979,8 +1980,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -1989,7 +1989,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
}
static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
@@ -2072,8 +2072,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf;
@@ -2108,9 +2107,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2((unsigned int)nent));
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+ CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
@@ -3644,10 +3643,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- kfree(hr_qp);
- else
- kfree(hr_to_hr_sqp(hr_qp));
+ kfree(hr_qp);
return 0;
}
@@ -3658,10 +3654,9 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct device *dev = &hr_dev->pdev->dev;
u32 cqe_cnt_ori;
u32 cqe_cnt_cur;
- u32 cq_buf_size;
int wait_time = 0;
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
@@ -3686,13 +3681,12 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
wait_time++;
}
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (!udata) {
/* Free the buff of stored cq */
- cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
- hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e82567fcdeb7..cb8071a3e0d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -389,7 +389,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_VLAN_M,
V2_UD_SEND_WQE_BYTE_36_VLAN_S,
- le16_to_cpu(ah->av.vlan));
+ ah->av.vlan_id);
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
@@ -2447,8 +2447,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2457,7 +2456,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
@@ -2550,8 +2549,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_v2_cq_context *cq_context;
@@ -2563,9 +2561,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
- V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+ V2_CQC_BYTE_4_SHIFT_S,
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
- V2_CQC_BYTE_4_CEQN_S, vector);
+ V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
@@ -4061,8 +4060,8 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
+ u16 vlan_id = 0xffff;
bool is_udp = false;
- u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
int ret;
@@ -4074,7 +4073,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
@@ -4083,7 +4082,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
IB_GID_TYPE_ROCE_UDP_ENCAP);
}
- if (vlan < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
@@ -4095,7 +4094,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
@@ -4650,16 +4649,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
{
struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
- int ret;
+ int ret = 0;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
- if (ret) {
+ if (ret)
ibdev_err(ibdev, "modify QP to Reset failed.\n");
- return ret;
- }
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
@@ -4715,7 +4712,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
kfree(hr_qp->rq_inl_buf.wqe_list);
}
- return 0;
+ return ret;
}
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
@@ -4725,16 +4722,11 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
int ret;
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
- if (ret) {
+ if (ret)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
- return ret;
- }
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- kfree(hr_to_hr_sqp(hr_qp));
- else
- kfree(hr_qp);
+ kfree(hr_qp);
return 0;
}
@@ -4951,10 +4943,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- __le32 doorbell[2];
-
- doorbell[0] = 0;
- doorbell[1] = 0;
+ __le32 doorbell[2] = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
@@ -6047,7 +6036,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
hr_dev->caps.srqwqe_hop_num));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->max));
+ ilog2(srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
SRQC_BYTE_4_SRQN_S, srq->srqn);
@@ -6092,11 +6081,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz);
+ hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz);
+ hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
srq_context->idx_nxt_blk_addr =
cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
@@ -6133,7 +6122,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->max)
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -6193,7 +6182,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
- attr->max_wr = srq->max - 1;
+ attr->max_wr = srq->wqe_cnt - 1;
attr->max_sge = srq->max_gs;
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
@@ -6246,7 +6235,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
spin_lock_irqsave(&srq->lock, flags);
- ind = srq->head & (srq->max - 1);
+ ind = srq->head & (srq->wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) {
@@ -6261,7 +6250,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break;
}
- wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
if (wqe_idx < 0) {
ret = -ENOMEM;
*bad_wr = wr;
@@ -6285,7 +6274,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->max - 1);
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
}
if (likely(nreq)) {
@@ -6380,12 +6369,14 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
-static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int i;
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
@@ -6410,8 +6401,6 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
priv->handle = handle;
-
- return 0;
}
static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
@@ -6429,14 +6418,7 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_kzalloc;
}
- hr_dev->pci_dev = handle->pdev;
- hr_dev->dev = &handle->pdev->dev;
-
- ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
- if (ret) {
- dev_err(hr_dev->dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
+ hns_roce_hw_v2_get_cfg(hr_dev, handle);
ret = hns_roce_init(hr_dev);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 43219d2f7de0..76a14db7028d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -87,8 +87,8 @@
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
-#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
-#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
+#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b5d196c119ee..854ef6e74788 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -111,7 +111,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "port(%d) can't find netdev\n", port);
+ dev_err(dev, "Can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -253,7 +253,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "find netdev %d failed!\r\n", port);
+ dev_err(dev, "Find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -301,12 +301,6 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
-static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -359,7 +353,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
/* vm_pgoff: 1 -- TPTR */
case 1:
@@ -372,7 +367,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
- vma->vm_page_prot);
+ vma->vm_page_prot,
+ NULL);
default:
return -EINVAL;
@@ -423,14 +419,14 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
.create_ah = hns_roce_create_ah,
- .create_cq = hns_roce_ib_create_cq,
+ .create_cq = hns_roce_create_cq,
.create_qp = hns_roce_create_qp,
.dealloc_pd = hns_roce_dealloc_pd,
.dealloc_ucontext = hns_roce_dealloc_ucontext,
.del_gid = hns_roce_del_gid,
.dereg_mr = hns_roce_dereg_mr,
.destroy_ah = hns_roce_destroy_ah,
- .destroy_cq = hns_roce_ib_destroy_cq,
+ .destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr,
@@ -438,7 +434,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device,
- .modify_port = hns_roce_modify_port,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
.query_device = hns_roce_query_device,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 5f8416ba09a9..9ad19170c3f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -48,21 +48,21 @@ unsigned long key_to_hw_index(u32 key)
return (key << 24) | (key >> 8);
}
-static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
- HNS_ROCE_CMD_SW2HW_MPT,
+ HNS_ROCE_CMD_CREATE_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
- mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
+ mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -83,7 +83,7 @@ static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
}
}
spin_unlock(&buddy->lock);
- return -1;
+ return -EINVAL;
found:
clear_bit(*seg, buddy->bits[o]);
@@ -206,13 +206,14 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
}
ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret == -1)
- return -1;
+ if (ret)
+ return ret;
- if (hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1)) {
+ ret = hns_roce_table_get_range(hr_dev, table, *seg,
+ *seg + (1 << order) - 1);
+ if (ret) {
hns_roce_buddy_free(buddy, *seg, order);
- return -1;
+ return ret;
}
return 0;
@@ -578,7 +579,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
mr->iova = iova; /* MR va starting addr */
@@ -707,10 +708,11 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
int ret;
if (mr->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
}
if (mr->size != ~0ULL) {
@@ -763,10 +765,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
@@ -1143,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1228,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
@@ -1308,9 +1310,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
if (ret)
goto free_cmd_mbox;
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
mr->enabled = 0;
@@ -1332,9 +1334,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
goto free_cmd_mbox;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
ib_umem_release(mr->umem);
goto free_cmd_mbox;
}
@@ -1448,10 +1450,11 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
int ret;
if (mw->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mw->rkey) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mw->rkey));
@@ -1487,10 +1490,10 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 912b89b4da34..780c780fdb22 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -96,7 +96,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
/* Using bitmap to manager UAR index */
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index bd78ff90d998..a6565b674801 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -318,7 +318,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
* hr_qp->rq.max_gs);
}
- cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
+ cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs;
return 0;
@@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
u8 max_sq_stride = ilog2(roundup_sq_stride);
/* Sanity check SQ size before proceeding */
- if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
- ucmd->log_sq_stride > max_sq_stride ||
- ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ if (ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
return -EINVAL;
}
@@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
u32 max_cnt;
int ret;
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
+ hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ return -EINVAL;
+
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
return ret;
}
- hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
@@ -391,37 +393,37 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (ex_sge_num) {
- hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
hr_qp->rq.offset = hr_qp->sge.offset +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
page_size);
} else {
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
@@ -591,24 +593,24 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+ size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
- size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+ size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
}
hr_qp->rq.offset = size;
- size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+ size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
page_size);
hr_qp->buff_size = size;
/* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
+ cap->max_send_wr = hr_qp->sq.wqe_cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -743,7 +745,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
- hr_qp->buff_size, 0, 0);
+ hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
@@ -1017,7 +1019,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -1030,7 +1031,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp);
if (ret) {
- ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
+ ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp);
return ERR_PTR(ret);
@@ -1047,11 +1048,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
- if (!hr_sqp)
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
return ERR_PTR(-ENOMEM);
- hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1066,7 +1066,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
ibdev_err(ibdev, "Create GSI QP failed!\n");
- kfree(hr_sqp);
+ kfree(hr_qp);
return ERR_PTR(ret);
}
@@ -1289,7 +1289,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
u32 cur;
cur = hr_wq->head - hr_wq->tail;
- if (likely(cur + nreq < hr_wq->max_post))
+ if (likely(cur + nreq < hr_wq->wqe_cnt))
return false;
hr_cq = to_hr_cq(ib_cq);
@@ -1297,7 +1297,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
cur = hr_wq->head - hr_wq->tail;
spin_unlock(&hr_cq->lock);
- return cur + nreq >= hr_wq->max_post;
+ return cur + nreq >= hr_wq->wqe_cnt;
}
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 0a31d0a3d657..06871731ac43 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -98,11 +98,15 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr)
+ if (!table_attr) {
+ ret = -EMSGSIZE;
goto err;
+ }
- if (hns_roce_fill_cq(msg, context))
+ if (hns_roce_fill_cq(msg, context)) {
+ ret = -EMSGSIZE;
goto err_cancel_table;
+ }
nla_nest_end(msg, table_attr);
kfree(context);
@@ -113,7 +117,7 @@ err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
kfree(context);
- return -EMSGSIZE;
+ return ret;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 9591457eb768..7113ebfdb4f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -59,21 +59,21 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
}
}
-static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
- HNS_ROCE_CMD_SW2HW_SRQ,
+ HNS_ROCE_CMD_CREATE_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
+ mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -95,8 +95,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
srq->mtt.first_seg,
&dma_handle_wqe);
if (!mtts_wqe) {
- dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find srq buf addr.\n");
+ dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
return -EINVAL;
}
@@ -106,13 +105,14 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
&dma_handle_idx);
if (!mtts_idx) {
dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find idx que buf addr.\n");
+ "Failed to find mtt for srq idx queue buf.\n");
return -EINVAL;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
- if (ret == -1) {
- dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Failed to alloc a bit from srq bitmap.\n");
return -ENOMEM;
}
@@ -134,7 +134,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
mtts_wqe, mtts_idx, dma_handle_wqe,
dma_handle_idx);
- ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
+ ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
goto err_xa;
@@ -160,9 +160,9 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
- ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
+ ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
if (ret)
- dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
+ dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
xa_erase(&srq_table->xa, srq->srqn);
@@ -180,22 +180,23 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq ucmd;
- u32 page_shift;
- u32 npages;
+ struct hns_roce_buf *buf;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
- npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
+ buf = &srq->buf;
+ buf->npages = (ib_umem_page_count(srq->umem) +
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->mtt);
if (ret)
goto err_user_buf;
@@ -205,16 +206,19 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
- srq->idx_que.buf_size, 0, 0);
+ srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_user_srq_mtt;
}
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
- PAGE_SHIFT, &srq->idx_que.mtt);
-
+ buf = &srq->idx_que.idx_buf;
+ buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
+ 1 << hr_dev->caps.idx_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->idx_que.mtt);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
@@ -251,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
@@ -277,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
return -ENOMEM;
srq->head = 0;
- srq->tail = srq->max - 1;
+ srq->tail = srq->wqe_cnt - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
@@ -308,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
if (ret)
goto err_kernel_idx_buf;
- srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
@@ -354,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
- struct ib_srq_init_attr *srq_init_attr,
+ struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
@@ -366,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
- if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
- srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
+ if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+ init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
- srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
- srq->max_gs = srq_init_attr->attr.max_sge;
+ srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->max_gs = init_attr->attr.max_sge;
- srq_desc_size = max(16, 16 * srq->max_gs);
+ srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
- srq_buf_size = srq->max * srq_desc_size;
+ srq_buf_size = srq->wqe_cnt * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
+ srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
@@ -401,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
- to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
+ cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
@@ -449,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
} else {
kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
+ hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
&srq->buf);
}
ib_umem_release(srq->idx_que.umem);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 2d6a378e8560..bb78d3280acc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2079,9 +2079,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
if (!dst || dst->error) {
if (dst) {
- dst_release(dst);
i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return rc;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index cd9ee1664a69..86375947bc67 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1763,7 +1763,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc, 0);
+ region = ib_umem_get(udata, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a7d238d312f0..306b21281fa2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -145,7 +145,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int n;
*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 0f390351cef0..714f9df5bf39 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,7 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57079110af9b..abe68708d6d6 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -966,7 +966,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
mutex_unlock(&dev->counters_table[port_num - 1].mutex);
if (stats_avail) {
- memset(out_mad->data, 0, sizeof out_mad->data);
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
@@ -984,38 +983,31 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
* queries, should be called only by VFs and for that specific purpose
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
- (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
- in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
- in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+ in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+ in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
+ return iboe_process_mad(ibdev, mad_flags, port_num,
+ in_wc, in_grh, in, out);
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in, out);
}
if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ in_grh, in, out);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2f1e38b891..0b5dc1d5928f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -256,6 +256,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
int hw_update = 0;
int i;
struct gid_entry *gids = NULL;
+ u16 vlan_id = 0xffff;
+ u8 mac[ETH_ALEN];
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -266,12 +268,16 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
if (!context)
return -EINVAL;
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
if (!memcmp(&port_gid_table->gids[i].gid,
&attr->gid, sizeof(attr->gid)) &&
- port_gid_table->gids[i].gid_type == attr->gid_type) {
+ port_gid_table->gids[i].gid_type == attr->gid_type &&
+ port_gid_table->gids[i].vlan_id == vlan_id) {
found = i;
break;
}
@@ -291,6 +297,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
memcpy(&port_gid_table->gids[free].gid,
&attr->gid, sizeof(attr->gid));
port_gid_table->gids[free].gid_type = attr->gid_type;
+ port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
hw_update = 1;
@@ -1146,7 +1153,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return rdma_user_mmap_io(context, vma,
to_mucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case 1:
if (dev->dev->caps.bf_reg_size == 0)
@@ -1155,7 +1163,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
context, vma,
to_mucontext(context)->uar.pfn +
dev->dev->caps.num_uars,
- PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
+ NULL);
case 3: {
struct mlx4_clock_params params;
@@ -1171,7 +1180,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
params.bar) +
params.offset) >>
PAGE_SHIFT,
- PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
+ NULL);
}
default:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index eb53bb4c0c91..d188573187fa 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -508,6 +508,7 @@ struct gid_entry {
union ib_gid gid;
enum ib_gid_type gid_type;
struct gid_cache_context *ctx;
+ u16 vlan_id;
};
struct mlx4_port_gid_table {
@@ -786,11 +787,10 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 6ae503cfc526..dfa17bcdcdbc 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags, 0);
+ return ib_umem_get(udata, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bd4aa04416c6..85f57b76e446 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,8 +1110,7 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem =
- ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 848db7264cc9..8dcf6e3d9ae2 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,7 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 9924be8384d8..d0a043ccbe58 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o
+ cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 45f48cde6b9d..dd8d24ee8e1d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -423,9 +423,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
- struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
@@ -519,27 +516,29 @@ repoll:
}
}
break;
- case MLX5_CQE_SIG_ERR:
- sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+ case MLX5_CQE_SIG_ERR: {
+ struct mlx5_sig_err_cqe *sig_err_cqe =
+ (struct mlx5_sig_err_cqe *)cqe64;
+ struct mlx5_core_sig_ctx *sig;
- xa_lock(&dev->mdev->priv.mkey_table);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ xa_lock(&dev->sig_mrs);
+ sig = xa_load(&dev->sig_mrs,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- mr = to_mibmr(mmkey);
- get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
- mr->sig->sig_err_exists = true;
- mr->sig->sigerr_count++;
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
+ sig->sig_err_exists = true;
+ sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
- cq->mcq.cqn, mr->sig->err_item.key,
- mr->sig->err_item.err_type,
- mr->sig->err_item.sig_err_offset,
- mr->sig->err_item.expected,
- mr->sig->err_item.actual);
+ cq->mcq.cqn, sig->err_item.key,
+ sig->err_item.err_type,
+ sig->err_item.sig_err_offset,
+ sig->err_item.expected,
+ sig->err_item.actual);
- xa_unlock(&dev->mdev->priv.mkey_table);
+ xa_unlock(&dev->sig_mrs);
goto repoll;
}
+ }
return 0;
}
@@ -710,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->buf.umem =
ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1111,7 +1110,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
umem = ib_umem_get(udata, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index d609f4659afb..9d0a18cf9e5e 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -100,6 +100,7 @@ struct devx_obj {
struct mlx5_ib_devx_mr devx_mr;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
+ u32 flow_counter_bulk_size;
};
struct list_head event_sub; /* holds devx_event_subscription entries */
};
@@ -192,15 +193,20 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
+
+ if (offset && offset >= devx_obj->flow_counter_bulk_size)
+ return false;
+
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
+ *counter_id += offset;
return true;
}
@@ -1265,8 +1271,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
- return xa_err(xa_store(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
+ GFP_KERNEL));
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1345,9 +1351,9 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+ xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->devx_mr.mmkey.key));
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
@@ -1463,6 +1469,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (err)
goto obj_free;
+ if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+ u8 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = 128UL * bulk;
+ }
+
uobj->object = obj;
INIT_LIST_HEAD(&obj->event_sub);
obj->ib_dev = dev;
@@ -2121,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
+ obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 8f4e5f22b84c..12737c509aa2 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b198ff10cde9..dbee17d22d50 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -85,6 +85,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
int len, ret, i;
u32 counter_id = 0;
+ u32 *offset_attr;
+ u32 offset = 0;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -151,8 +153,27 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
if (len) {
devx_obj = arr_flow_actions[0]->object;
- if (!mlx5_ib_devx_is_flow_counter(devx_obj, &counter_id))
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
+
+ int num_offsets = uverbs_attr_ptr_get_array_size(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ sizeof(u32));
+
+ if (num_offsets != 1)
+ return -EINVAL;
+
+ offset_attr = uverbs_attr_get_alloced_ptr(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
+ offset = *offset_attr;
+ }
+
+ if (!mlx5_ib_devx_is_flow_counter(devx_obj, offset,
+ &counter_id))
return -EINVAL;
+
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -598,7 +619,11 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
UVERBS_ACCESS_READ, 1, 1,
- UA_OPTIONAL));
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 4950df3f71b6..ac4d8d1b9a07 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -263,7 +263,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
},
.sq_sig_type = gsi->sq_sig_type,
.qp_type = IB_QPT_UD,
- .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+ .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
};
return ib_create_qp(pd, &init_attr);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 74ce9249e75a..5c3d052ac30b 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -35,7 +35,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
int vport_index;
if (rep->vport == MLX5_VPORT_UPLINK)
- profile = &uplink_rep_profile;
+ profile = &raw_eth_profile;
else
return mlx5_ib_set_vport_rep(dev, rep);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index de43b423bafc..3b6750cba796 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -10,7 +10,7 @@
#include "mlx5_ib.h"
#ifdef CONFIG_MLX5_ESWITCH
-extern const struct mlx5_ib_profile uplink_rep_profile;
+extern const struct mlx5_ib_profile raw_eth_profile;
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 649a3364f838..4f0edd4832bd 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -201,3 +201,27 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
return -EINVAL;
}
+
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
+ if (err)
+ goto ex;
+
+ port_guid->guid = rep->port_guid;
+ node_guid->guid = rep->node_guid;
+ex:
+ kfree(rep);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 348c1df69cdc..14e0c17de6a9 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -74,58 +74,6 @@ static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
port);
}
-static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
- u16 slid;
- int err;
-
- slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
- return IB_MAD_RESULT_SUCCESS;
-
- /* Don't process SMInfo queries -- the SMA can't handle them.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
- return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
- return IB_MAD_RESULT_SUCCESS;
- } else {
- return IB_MAD_RESULT_SUCCESS;
- }
-
- err = mlx5_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
- if (err)
- return IB_MAD_RESULT_FAILURE;
-
- /* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
- /* no response for trap repress */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
void *out)
{
@@ -271,30 +219,66 @@ done:
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
- int ret;
+ u8 mgmt_class = in->mad_hdr.mgmt_class;
+ u8 method = in->mad_hdr.method;
+ u16 slid;
+ int err;
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
+ be16_to_cpu(IB_LID_PERMISSIVE);
- memset(out_mad->data, 0, sizeof(out_mad->data));
+ if (method == IB_MGMT_METHOD_TRAP && !slid)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
- } else {
- ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
- in_mad, out_mad);
+ switch (mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /* Don't process SMInfo queries -- the SMA can't handle them.
+ */
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ method == IB_MGMT_METHOD_GET)
+ return process_pma_cmd(dev, port_num, in, out);
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS1:
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS2:
+ case IB_MGMT_CLASS_CONG_MGMT: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ default:
+ return IB_MAD_RESULT_SUCCESS;
}
- return ret;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* set return bit in status of directed route responses */
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 831539419c30..97b26e9a5234 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -67,6 +67,7 @@
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -693,21 +694,6 @@ static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
get_atomic_caps(dev, atomic_size_qp, props);
}
-static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
- struct ib_device_attr *props)
-{
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
-
- get_atomic_caps(dev, atomic_size_qp, props);
-}
-
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
-{
- struct ib_device_attr props = {};
-
- get_atomic_caps_dc(dev, &props);
- return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
-}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -844,8 +830,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
if (uhw->outlen && uhw->outlen < resp_len)
return -EINVAL;
- else
- resp.response_length = resp_len;
+
+ resp.response_length = resp_len;
if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
@@ -1011,6 +997,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
props->max_pi_fast_reg_page_list_len =
props->max_fast_reg_page_list_len / 2;
+ props->max_sgl_rd =
+ MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -1031,7 +1019,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
- if (!mlx5_core_is_pf(mdev))
+ if (mlx5_core_is_vf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
@@ -1161,8 +1149,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
- resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ else
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.supported_qpts =
@@ -1808,7 +1802,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
return -EINVAL;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ if (dev->wc_support)
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
@@ -2168,7 +2162,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
- prot);
+ prot, NULL);
if (err) {
mlx5_ib_err(dev,
"rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
@@ -2210,7 +2204,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
PAGE_SHIFT) +
page_idx;
return rdma_user_mmap_io(context, vma, pfn, map_size,
- pgprot_writecombine(vma->vm_page_prot));
+ pgprot_writecombine(vma->vm_page_prot),
+ NULL);
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,7 +2243,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
PAGE_SHIFT;
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -5145,8 +5141,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
- if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
@@ -5249,11 +5244,9 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
- if (MLX5_CAP_GEN(dev->mdev, roce)) {
- err = mlx5_nic_vport_enable_roce(dev->mdev);
- if (err)
- return err;
- }
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
err = mlx5_eth_lag_init(dev);
if (err)
@@ -5262,8 +5255,7 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
return 0;
err_disable_roce:
- if (MLX5_CAP_GEN(dev->mdev, roce))
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_nic_vport_disable_roce(dev->mdev);
return err;
}
@@ -5271,8 +5263,7 @@ err_disable_roce:
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
mlx5_eth_lag_cleanup(dev);
- if (MLX5_CAP_GEN(dev->mdev, roce))
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_nic_vport_disable_roce(dev->mdev);
}
struct mlx5_ib_counter {
@@ -6145,11 +6136,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- srcu_barrier(&dev->mr_srcu);
- cleanup_srcu_struct(&dev->mr_srcu);
- }
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ cleanup_srcu_struct(&dev->odp_srcu);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
}
@@ -6201,15 +6191,15 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
+ xa_init(&dev->odp_mkeys);
+ xa_init(&dev->sig_mrs);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- err = init_srcu_struct(&dev->mr_srcu);
- if (err)
- goto err_mp;
- }
+ err = init_srcu_struct(&dev->odp_srcu);
+ if (err)
+ goto err_mp;
return 0;
@@ -6269,6 +6259,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
+ .enable_driver = mlx5_ib_enable_driver,
+ .fill_res_entry = mlx5_ib_fill_res_entry,
+ .fill_stat_entry = mlx5_ib_fill_stat_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
.get_link_layer = mlx5_ib_port_link_layer,
@@ -6315,6 +6308,7 @@ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
.get_vf_config = mlx5_ib_get_vf_config,
+ .get_vf_guid = mlx5_ib_get_vf_guid,
.get_vf_stats = mlx5_ib_get_vf_stats,
.set_vf_guid = mlx5_ib_set_vf_guid,
.set_vf_link_state = mlx5_ib_set_vf_link_state,
@@ -6444,7 +6438,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.query_port = mlx5_ib_rep_query_port,
};
-static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
{
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
return 0;
@@ -6484,7 +6478,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_remove_netdev_notifier(dev, port_num);
}
-static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
@@ -6500,7 +6494,7 @@ static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
return err;
}
-static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_stage_common_roce_cleanup(dev);
}
@@ -6710,6 +6704,18 @@ static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
}
}
+int mlx5_ib_enable_driver(struct ib_device *dev)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(dev);
+ int ret;
+
+ ret = mlx5_ib_test_wc(mdev);
+ mlx5_ib_dbg(mdev, "Write-Combining %s",
+ mdev->wc_support ? "supported" : "not supported");
+
+ return ret;
+}
+
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
@@ -6807,7 +6813,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_delay_drop_cleanup),
};
-const struct mlx5_ib_profile uplink_rep_profile = {
+const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@@ -6818,11 +6824,11 @@ const struct mlx5_ib_profile uplink_rep_profile = {
mlx5_ib_stage_caps_init,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
- mlx5_ib_stage_rep_non_default_cb,
+ mlx5_ib_stage_raw_eth_non_default_cb,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
- mlx5_ib_stage_rep_roce_init,
- mlx5_ib_stage_rep_roce_cleanup),
+ mlx5_ib_stage_raw_eth_roce_init,
+ mlx5_ib_stage_raw_eth_roce_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -6898,6 +6904,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
+ const struct mlx5_ib_profile *profile;
enum rdma_link_layer ll;
struct mlx5_ib_dev *dev;
int port_type_cap;
@@ -6933,7 +6940,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->mdev = mdev;
dev->num_ports = num_ports;
- return __mlx5_ib_add(dev, &pf_profile);
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+ profile = &raw_eth_profile;
+ else
+ profile = &pf_profile;
+
+ return __mlx5_ib_add(dev, profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b5aece786b36..048f4e974a61 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -34,6 +34,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
+#include <linux/jiffies.h>
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
@@ -216,3 +217,201 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
*offset = buf_off >> ilog2(off_size);
return 0;
}
+
+#define WR_ID_BF 0xBF
+#define WR_ID_END 0xBAD
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ bool signaled)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_bf *bf = &qp->bf;
+ __be32 mmio_wqe[16] = {};
+ unsigned long flags;
+ unsigned int idx;
+ int i;
+
+ if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+
+ memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
+ ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
+ (qp->trans_qp.base.mqp.qpn << 8));
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
+ qp->sq.wqe_head[idx] = qp->sq.head + 1;
+ qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+ qp->sq.head++;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell
+ */
+ wmb();
+ for (i = 0; i < 8; i++)
+ mlx5_write64(&mmio_wqe[i * 2],
+ bf->bfreg->map + bf->offset + i * 8);
+
+ bf->offset ^= bf->buf_size;
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return 0;
+}
+
+static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
+{
+ int ret;
+ struct ib_wc wc = {};
+ unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+
+ do {
+ ret = ib_poll_cq(cq, 1, &wc);
+ if (ret < 0 || wc.status)
+ return ret < 0 ? ret : -EINVAL;
+ if (ret)
+ break;
+ } while (!time_after(jiffies, end));
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (wc.wr_id != WR_ID_BF)
+ ret = 0;
+
+ return ret;
+}
+
+static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ int err, i;
+
+ for (i = 0; i < TEST_WC_NUM_WQES; i++) {
+ err = post_send_nop(dev, qp, WR_ID_BF, false);
+ if (err)
+ return err;
+ }
+
+ return post_send_nop(dev, qp, WR_ID_END, true);
+}
+
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
+{
+ struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+ struct ib_qp_init_attr qp_init_attr = {
+ .cap = { .max_send_wr = TEST_WC_NUM_WQES },
+ .qp_type = IB_QPT_UD,
+ .sq_sig_type = IB_SIGNAL_REQ_WR,
+ .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
+ };
+ struct ib_qp_attr qp_attr = { .port_num = 1 };
+ struct ib_device *ibdev = &dev->ib_dev;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, bf))
+ return 0;
+
+ if (!dev->mdev->roce.roce_en &&
+ port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
+ if (mlx5_core_is_pf(dev->mdev))
+ dev->wc_support = true;
+ return 0;
+ }
+
+ ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
+ if (ret)
+ goto print_err;
+
+ if (!dev->wc_bfreg.wc)
+ goto out1;
+
+ pd = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto out1;
+ }
+
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out2;
+ }
+
+ qp_init_attr.recv_cq = cq;
+ qp_init_attr.send_cq = cq;
+ qp = ib_create_qp(pd, &qp_init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out3;
+ }
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_modify_qp(qp, &qp_attr,
+ IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
+ IB_QP_QKEY);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret)
+ goto out4;
+
+ ret = test_wc_do_send(dev, qp);
+ if (ret < 0)
+ goto out4;
+
+ ret = test_wc_poll_cq_result(dev, cq);
+ if (ret > 0) {
+ dev->wc_support = true;
+ ret = 0;
+ }
+
+out4:
+ ib_destroy_qp(qp);
+out3:
+ ib_destroy_cq(cq);
+out2:
+ ib_dealloc_pd(pd);
+out1:
+ mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
+print_err:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Error %d while trying to test write-combining support\n",
+ ret);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 1a98ee2e01c4..b983e385a8c5 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -247,12 +247,8 @@ struct mlx5_ib_flow_db {
* These flags are intended for internal use by the mlx5_ib driver, and they
* rely on the range reserved for that use in the ib_qp_create_flags enum.
*/
-
-/* Create a UD QP whose source QP number is 1 */
-static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
-{
- return IB_QP_CREATE_RESERVED_START;
-}
+#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
+#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
struct wr_list {
u16 opcode;
@@ -295,6 +291,7 @@ enum mlx5_ib_wq_flags {
#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
struct mlx5_ib_rwq {
struct ib_wq ibwq;
@@ -585,6 +582,9 @@ struct mlx5_ib_dm {
IB_ACCESS_REMOTE_READ |\
IB_ZERO_BASED)
+#define mlx5_update_odp_stats(mr, counter_name, value) \
+ atomic64_add(value, &((mr)->odp_stats.counter_name))
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
void *descs;
@@ -606,7 +606,6 @@ struct mlx5_ib_mr {
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
- unsigned int live;
void *descs_alloc;
int access_flags; /* Needed for rereg MR */
@@ -618,10 +617,18 @@ struct mlx5_ib_mr {
u64 data_iova;
u64 pi_iova;
- atomic_t num_leaf_free;
- wait_queue_head_t q_leaf_free;
+ /* For ODP and implicit */
+ atomic_t num_deferred_work;
+ struct xarray implicit_children;
+ union {
+ struct rcu_head rcu;
+ struct list_head elm;
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+
struct mlx5_async_work cb_work;
- atomic_t num_pending_prefetch;
};
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -957,7 +964,11 @@ struct mlx5_ib_dev {
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
- bool ib_active;
+ u8 ib_active:1;
+ u8 fill_delay:1;
+ u8 is_rep:1;
+ u8 lag_active:1;
+ u8 wc_support:1;
struct umr_common umrc;
/* sync used page count stats
*/
@@ -966,7 +977,6 @@ struct mlx5_ib_dev {
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
- int fill_delay;
struct ib_odp_caps odp_caps;
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
@@ -975,7 +985,9 @@ struct mlx5_ib_dev {
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
*/
- struct srcu_struct mr_srcu;
+ struct srcu_struct odp_srcu;
+ struct xarray odp_mkeys;
+
u32 null_mkey;
struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
@@ -984,11 +996,10 @@ struct mlx5_ib_dev {
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg wc_bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
- bool is_rep;
- int lag_active;
struct mlx5_ib_lb_state lb;
u8 umr_fence;
@@ -999,6 +1010,8 @@ struct mlx5_ib_dev {
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
+
+ struct xarray sig_mrs;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1162,6 +1175,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1179,9 +1193,8 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
unsigned int *meta_sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
@@ -1223,6 +1236,8 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
@@ -1235,7 +1250,6 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
@@ -1300,6 +1314,9 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
u8 port, int state);
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
@@ -1334,6 +1351,10 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
u8 *native_port_num);
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
@@ -1349,7 +1370,7 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_flow_act *flow_act, u32 counter_id,
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id);
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
@@ -1491,4 +1512,7 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
return true;
}
+
+int mlx5_ib_enable_driver(struct ib_device *dev);
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 7019c12005f4..60d39b9ec41c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -50,7 +50,6 @@ enum {
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@@ -59,13 +58,9 @@ static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-
- return err;
+ return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -94,8 +89,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct xarray *mkeys = &dev->mdev->priv.mkey_table;
- int err;
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
@@ -122,13 +115,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
- xa_lock_irqsave(mkeys, flags);
- err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_ATOMIC));
- xa_unlock_irqrestore(mkeys, flags);
- if (err)
- pr_err("Error inserting to mkey tree. 0x%x\n", -err);
-
if (!completion_done(&ent->compl))
complete(&ent->compl);
}
@@ -218,9 +204,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- synchronize_srcu(&dev->mr_srcu);
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -428,7 +411,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
ent = &cache->ent[entry];
@@ -511,7 +494,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
c = order2idx(dev, mr->order);
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr)) {
+ if (mlx5_mr_cache_invalidate(mr)) {
mr->allocated_from_cache = false;
destroy_mkey(dev, mr);
ent = &cache->ent[c];
@@ -555,10 +538,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- synchronize_srcu(&dev->mr_srcu);
-#endif
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -679,6 +658,20 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
return 0;
}
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -702,16 +695,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET(mkc, mkc, length64, 1);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, 0);
+ set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -779,7 +764,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags, 0);
+ u = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -1169,16 +1154,8 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, start_addr);
+ set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -1337,10 +1314,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
- atomic_set(&mr->num_pending_prefetch, 0);
+ atomic_set(&mr->num_deferred_work, 0);
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
+ GFP_KERNEL));
+ if (err) {
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+ }
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- smp_store_release(&mr->live, 1);
return &mr->ibmr;
error:
@@ -1348,22 +1330,29 @@ error:
return ERR_PTR(err);
}
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+/**
+ * mlx5_mr_cache_invalidate - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA inprogress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
{
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_umr_wr umrwr = {};
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
- umrwr.pd = dev->umrc.pd;
+ umrwr.pd = mr->dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
umrwr.ignore_free_state = 1;
- return mlx5_ib_post_send_wait(dev, &umrwr);
+ return mlx5_ib_post_send_wait(mr->dev, &umrwr);
}
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
@@ -1447,7 +1436,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* UMR can't be used - MKey needs to be replaced.
*/
if (mr->allocated_from_cache)
- err = unreg_umr(dev, mr);
+ err = mlx5_mr_cache_invalidate(mr);
else
err = destroy_mkey(dev, mr);
if (err)
@@ -1560,6 +1549,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
+ xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
kfree(mr->sig);
mr->sig = NULL;
}
@@ -1575,54 +1565,20 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
- if (is_odp_mr(mr)) {
- struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
-
- /* Prevent new page faults and
- * prefetch requests from succeeding
- */
- WRITE_ONCE(mr->live, 0);
-
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
-
- /* dequeue pending prefetch requests for the mr */
- if (atomic_read(&mr->num_pending_prefetch))
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&mr->num_pending_prefetch));
-
- /* Destroy all page mappings */
- if (!umem_odp->is_implicit_odp)
- mlx5_ib_invalidate_range(umem_odp,
- ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- else
- mlx5_ib_free_implicit_mr(mr);
- /*
- * We kill the umem before the MR for ODP,
- * so that there will not be any invalidations in
- * flight, looking at the *mr struct.
- */
- ib_umem_odp_release(umem_odp);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
-
- /* Avoid double-freeing the umem. */
- umem = NULL;
- }
+ /* Stop all DMA */
+ if (is_odp_mr(mr))
+ mlx5_ib_fence_odp_mr(mr);
+ else
+ clean_mr(dev, mr);
- clean_mr(dev, mr);
+ if (mr->allocated_from_cache)
+ mlx5_mr_cache_free(dev, mr);
+ else
+ kfree(mr);
- /*
- * We should unregister the DMA address from the HCA before
- * remove the DMA mapping.
- */
- mlx5_mr_cache_free(dev, mr);
ib_umem_release(umem);
- if (umem)
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
- if (!mr->allocated_from_cache)
- kfree(mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1634,6 +1590,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
}
+ if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
+ mlx5_ib_free_implicit_mr(mmr);
+ return 0;
+ }
+
dereg_mr(to_mdev(ibmr->device), mmr);
return 0;
@@ -1797,8 +1758,15 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
if (err)
goto err_free_mtt_mr;
+ err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, GFP_KERNEL));
+ if (err)
+ goto err_free_descs;
return 0;
+err_free_descs:
+ destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
err_free_mtt_mr:
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
mr->mtt_mr = NULL;
@@ -1951,9 +1919,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
}
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
+ GFP_KERNEL));
+ if (err)
+ goto free_mkey;
+ }
+
kfree(in);
return &mw->ibmw;
+free_mkey:
+ mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
kfree(mw);
kfree(in);
@@ -1967,13 +1945,12 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase_irq(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mmw->mmkey.key));
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
/*
* pagefault_single_data_segment() may be accessing mmw under
* SRCU if the user bound an ODP MR to this MW.
*/
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3f9478d19376..45ee40c2f36e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,158 +93,152 @@ struct mlx5_pagefault {
static u64 mlx5_imr_ksm_entries;
-static int check_parent(struct ib_umem_odp *odp,
- struct mlx5_ib_mr *parent)
+void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
- struct mlx5_ib_mr *mr = odp->private;
-
- return mr && mr->parent == parent && !odp->dying;
-}
-
-static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
-{
- if (WARN_ON(!mr || !is_odp_mr(mr)))
- return NULL;
-
- return to_ib_umem_odp(mr->umem)->per_mm;
-}
-
-static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
-{
- struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext_per_mm *per_mm = odp->per_mm;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- while (1) {
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (check_parent(odp, parent))
- goto end;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
- struct mlx5_ib_mr *parent)
-{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
- struct ib_umem_odp *odp;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
- if (!odp)
- goto end;
-
- while (1) {
- if (check_parent(odp, parent))
- goto end;
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp) > start + length)
- goto not_found;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
- size_t nentries, struct mlx5_ib_mr *mr, int flags)
-{
- struct ib_pd *pd = mr->ibmr.pd;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_umem_odp *odp;
- unsigned long va;
- int i;
+ struct mlx5_klm *end = pklm + nentries;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (i = 0; i < nentries; i++, pklm++) {
+ for (; pklm != end; pklm++, idx++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
pklm->va = 0;
}
return;
}
/*
- * The locking here is pretty subtle. Ideally the implicit children
- * list would be protected by the umem_mutex, however that is not
+ * The locking here is pretty subtle. Ideally the implicit_children
+ * xarray would be protected by the umem_mutex, however that is not
* possible. Instead this uses a weaker update-then-lock pattern:
*
* srcu_read_lock()
- * <change children list>
+ * xa_store()
* mutex_lock(umem_mutex)
* mlx5_ib_update_xlt()
* mutex_unlock(umem_mutex)
* destroy lkey
*
- * ie any change the children list must be followed by the locked
- * update_xlt before destroying.
+ * ie any change the xarray must be followed by the locked update_xlt
+ * before destroying.
*
* The umem_mutex provides the acquire/release semantic needed to make
- * the children list visible to a racing thread. While SRCU is not
+ * the xa_store() visible to a racing thread. While SRCU is not
* technically required, using it gives consistent use of the SRCU
- * locking around the children list.
+ * locking around the xarray.
*/
- lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
- lockdep_assert_held(&mr->dev->mr_srcu);
+ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
+ lockdep_assert_held(&imr->dev->odp_srcu);
- odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ for (; pklm != end; pklm++, idx++) {
+ struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
- for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && ib_umem_start(odp) == va) {
- struct mlx5_ib_mr *mtt = odp->private;
-
+ if (mtt) {
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
- odp = odp_next(odp);
+ pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
+ pklm->va = 0;
}
- mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
- i, va, be32_to_cpu(pklm->key));
}
}
-static void mr_leaf_free_action(struct work_struct *work)
+static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ /* Ensure mlx5_ib_invalidate_range() will not touch the MR any more */
+ mutex_lock(&odp->umem_mutex);
+ if (odp->npages) {
+ mlx5_mr_cache_invalidate(mr);
+ ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp),
+ ib_umem_end(odp));
+ WARN_ON(odp->npages);
+ }
+ odp->private = NULL;
+ mutex_unlock(&odp->umem_mutex);
+
+ if (!mr->allocated_from_cache) {
+ mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
+ WARN_ON(mr->descs);
+ }
+}
+
+/*
+ * This must be called after the mr has been removed from implicit_children
+ * and the SRCU synchronized. NOTE: The MR does not necessarily have to be
+ * empty here, parallel page faults could have raced with the free process and
+ * added pages to it.
+ */
+static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
{
- struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
- struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+ struct mlx5_ib_mr *imr = mr->parent;
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
int srcu_key;
- mr->parent = NULL;
- synchronize_srcu(&mr->dev->mr_srcu);
+ /* implicit_child_mr's are not allowed to have deferred work */
+ WARN_ON(atomic_read(&mr->num_deferred_work));
- if (smp_load_acquire(&imr->live)) {
- srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+ if (need_imr_xlt) {
+ srcu_key = srcu_read_lock(&mr->dev->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
- mlx5_ib_update_xlt(imr, idx, 1, 0,
+ mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&mr->dev->odp_srcu, srcu_key);
}
- ib_umem_odp_release(odp);
+
+ dma_fence_odp_mr(mr);
+
+ mr->parent = NULL;
mlx5_mr_cache_free(mr->dev, mr);
+ ib_umem_odp_release(odp);
+ atomic_dec(&imr->num_deferred_work);
+}
+
+static void free_implicit_child_mr_work(struct work_struct *work)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
- if (atomic_dec_and_test(&imr->num_leaf_free))
- wake_up(&imr->q_leaf_free);
+ free_implicit_child_mr(mr, true);
+}
+
+static void free_implicit_child_mr_rcu(struct rcu_head *head)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
+
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
+}
+
+static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ struct mlx5_ib_mr *imr = mr->parent;
+
+ xa_lock(&imr->implicit_children);
+ /*
+ * This can race with mlx5_ib_free_implicit_mr(), the first one to
+ * reach the xa lock wins the race and destroys the MR.
+ */
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
+ mr)
+ goto out_unlock;
+
+ atomic_inc(&imr->num_deferred_work);
+ call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu,
+ free_implicit_child_mr_rcu);
+
+out_unlock:
+ xa_unlock(&imr->implicit_children);
}
void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
@@ -254,19 +248,19 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ u64 invalidations = 0;
int in_block = 0;
u64 addr;
- if (!umem_odp) {
- pr_err("invalidation called on NULL umem or non-ODP umem\n");
- return;
- }
-
+ mutex_lock(&umem_odp->umem_mutex);
+ /*
+ * If npages is zero then umem_odp->private may not be setup yet. This
+ * does not complete until after the first page is mapped for DMA.
+ */
+ if (!umem_odp->npages)
+ goto out;
mr = umem_odp->private;
- if (!mr || !mr->ibmr.pd)
- return;
-
start = max_t(u64, ib_umem_start(umem_odp), start);
end = min_t(u64, ib_umem_end(umem_odp), end);
@@ -276,7 +270,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
- mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -291,6 +284,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
blk_start_idx = idx;
in_block = 1;
}
+
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
} else {
u64 umr_offset = idx & umr_block_mask;
@@ -308,6 +304,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+
+ mlx5_update_odp_stats(mr, invalidations, invalidations);
+
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -316,13 +315,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
- if (unlikely(!umem_odp->npages && mr->parent &&
- !umem_odp->dying)) {
- WRITE_ONCE(mr->live, 0);
- umem_odp->dying = 1;
- atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem_odp->work);
- }
+ if (unlikely(!umem_odp->npages && mr->parent))
+ destroy_unused_implicit_child_mr(mr);
+out:
mutex_unlock(&umem_odp->umem_mutex);
}
@@ -390,8 +385,6 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
-
- return;
}
static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
@@ -416,237 +409,213 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
wq_num, err);
}
-static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
- struct ib_umem_odp *umem_odp,
- bool ksm, int access_flags)
+static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ unsigned long idx)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *ret;
int err;
- mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
- MLX5_IMR_MTT_CACHE_ENTRY);
+ odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
+ idx * MLX5_IMR_MTT_SIZE,
+ MLX5_IMR_MTT_SIZE);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+ ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
if (IS_ERR(mr))
- return mr;
-
- mr->ibmr.pd = pd;
-
- mr->dev = dev;
- mr->access_flags = access_flags;
- mr->mmkey.iova = 0;
- mr->umem = &umem_odp->umem;
-
- if (ksm) {
- err = mlx5_ib_update_xlt(mr, 0,
- mlx5_imr_ksm_entries,
- MLX5_KSM_PAGE_SHIFT,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE);
-
- } else {
- err = mlx5_ib_update_xlt(mr, 0,
- MLX5_IMR_MTT_ENTRIES,
- PAGE_SHIFT,
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE |
- MLX5_IB_UPD_XLT_ATOMIC);
- }
-
- if (err)
- goto fail;
+ goto out_umem;
+ mr->ibmr.pd = imr->ibmr.pd;
+ mr->access_flags = imr->access_flags;
+ mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
-
- mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
- mr->mmkey.key, dev->mdev, mr);
-
- return mr;
-
-fail:
- mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
- mlx5_mr_cache_free(dev, mr);
-
- return ERR_PTR(err);
-}
-
-static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt)
-{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
- struct ib_umem_odp *odp, *result = NULL;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
- u64 addr = io_virt & MLX5_IMR_MTT_MASK;
- int nentries = 0, start_idx = 0, ret;
- struct mlx5_ib_mr *mtt;
-
- mutex_lock(&odp_mr->umem_mutex);
- odp = odp_lookup(addr, 1, mr);
-
- mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
- io_virt, bcnt, addr, odp);
-
-next_mr:
- if (likely(odp)) {
- if (nentries)
- nentries++;
- } else {
- odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(odp)) {
- mutex_unlock(&odp_mr->umem_mutex);
- return ERR_CAST(odp);
- }
-
- mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0,
- mr->access_flags);
- if (IS_ERR(mtt)) {
- mutex_unlock(&odp_mr->umem_mutex);
- ib_umem_odp_release(odp);
- return ERR_CAST(mtt);
- }
-
- odp->private = mtt;
- mtt->umem = &odp->umem;
- mtt->mmkey.iova = addr;
- mtt->parent = mr;
- INIT_WORK(&odp->work, mr_leaf_free_action);
-
- smp_store_release(&mtt->live, 1);
-
- if (!nentries)
- start_idx = addr >> MLX5_IMR_MTT_SHIFT;
- nentries++;
- }
-
- /* Return first odp if region not covered by single one */
- if (likely(!result))
- result = odp;
-
- addr += MLX5_IMR_MTT_SIZE;
- if (unlikely(addr < io_virt + bcnt)) {
- odp = odp_next(odp);
- if (odp && ib_umem_start(odp) != addr)
- odp = NULL;
- goto next_mr;
+ mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->parent = imr;
+ odp->private = mr;
+
+ err = mlx5_ib_update_xlt(mr, 0,
+ MLX5_IMR_MTT_ENTRIES,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out_mr;
}
- if (unlikely(nentries)) {
- ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ATOMIC);
- if (ret) {
- mlx5_ib_err(dev, "Failed to update PAS\n");
- result = ERR_PTR(ret);
+ /*
+ * Once the store to either xarray completes any error unwind has to
+ * use synchronize_srcu(). Avoid this with xa_reserve()
+ */
+ ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
+ if (unlikely(ret)) {
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto out_mr;
}
+ /*
+ * Another thread beat us to creating the child mr, use
+ * theirs.
+ */
+ goto out_mr;
}
- mutex_unlock(&odp_mr->umem_mutex);
- return result;
+ mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
+ return mr;
+
+out_mr:
+ mlx5_mr_cache_free(imr->dev, mr);
+out_umem:
+ ib_umem_odp_release(odp);
+ return ret;
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags)
{
- struct mlx5_ib_mr *imr;
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *imr;
+ int err;
umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
+ imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
if (IS_ERR(imr)) {
- ib_umem_odp_release(umem_odp);
- return ERR_CAST(imr);
+ err = PTR_ERR(imr);
+ goto out_umem;
}
+ imr->ibmr.pd = &pd->ibpd;
+ imr->access_flags = access_flags;
+ imr->mmkey.iova = 0;
imr->umem = &umem_odp->umem;
- init_waitqueue_head(&imr->q_leaf_free);
- atomic_set(&imr->num_leaf_free, 0);
- atomic_set(&imr->num_pending_prefetch, 0);
- smp_store_release(&imr->live, 1);
+ imr->ibmr.lkey = imr->mmkey.key;
+ imr->ibmr.rkey = imr->mmkey.key;
+ imr->umem = &umem_odp->umem;
+ imr->is_odp_implicit = true;
+ atomic_set(&imr->num_deferred_work, 0);
+ xa_init(&imr->implicit_children);
+
+ err = mlx5_ib_update_xlt(imr, 0,
+ mlx5_imr_ksm_entries,
+ MLX5_KSM_PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err)
+ goto out_mr;
+ err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
+ &imr->mmkey, GFP_KERNEL));
+ if (err)
+ goto out_mr;
+
+ mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
return imr;
+out_mr:
+ mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
+ mlx5_mr_cache_free(dev, imr);
+out_umem:
+ ib_umem_odp_release(umem_odp);
+ return ERR_PTR(err);
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- struct rb_node *node;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct mlx5_ib_dev *dev = imr->dev;
+ struct list_head destroy_list;
+ struct mlx5_ib_mr *mtt;
+ struct mlx5_ib_mr *tmp;
+ unsigned long idx;
- down_read(&per_mm->umem_rwsem);
- for (node = rb_first_cached(&per_mm->umem_tree); node;
- node = rb_next(node)) {
- struct ib_umem_odp *umem_odp =
- rb_entry(node, struct ib_umem_odp, interval_tree.rb);
- struct mlx5_ib_mr *mr = umem_odp->private;
+ INIT_LIST_HEAD(&destroy_list);
- if (mr->parent != imr)
- continue;
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
+ /*
+ * This stops the SRCU protected page fault path from touching either
+ * the imr or any children. The page fault path can only reach the
+ * children xarray via the imr.
+ */
+ synchronize_srcu(&dev->odp_srcu);
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
+ xa_lock(&imr->implicit_children);
+ xa_for_each (&imr->implicit_children, idx, mtt) {
+ __xa_erase(&imr->implicit_children, idx);
+ list_add(&mtt->odp_destroy.elm, &destroy_list);
+ }
+ xa_unlock(&imr->implicit_children);
- if (umem_odp->dying) {
- mutex_unlock(&umem_odp->umem_mutex);
- continue;
- }
+ /*
+ * num_deferred_work can only be incremented inside the odp_srcu, or
+ * under xa_lock while the child is in the xarray. Thus at this point
+ * it is only decreasing, and all work holding it is now on the wq.
+ */
+ if (atomic_read(&imr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&imr->num_deferred_work));
+ }
+
+ /*
+ * Fence the imr before we destroy the children. This allows us to
+ * skip updating the XLT of the imr during destroy of the child mkey
+ * the imr points to.
+ */
+ mlx5_mr_cache_invalidate(imr);
+
+ list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
+ free_implicit_child_mr(mtt, false);
+
+ mlx5_mr_cache_free(dev, imr);
+ ib_umem_odp_release(odp_imr);
+}
- umem_odp->dying = 1;
- atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem_odp->work);
- mutex_unlock(&umem_odp->umem_mutex);
+/**
+ * mlx5_ib_fence_odp_mr - Stop all access to the ODP MR
+ * @mr: to fence
+ *
+ * On return no parallel threads will be touching this MR and no DMA will be
+ * active.
+ */
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ /* Prevent new page faults and prefetch requests from succeeding */
+ xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&mr->dev->odp_srcu);
+
+ if (atomic_read(&mr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&mr->num_deferred_work));
}
- up_read(&per_mm->umem_rwsem);
- wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
+ dma_fence_odp_mr(mr);
}
-#define MLX5_PF_FLAGS_PREFETCH BIT(0)
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
-static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt, u32 *bytes_mapped,
- u32 flags)
+static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ u64 user_va, size_t bcnt, u32 *bytes_mapped,
+ u32 flags)
{
- int npages = 0, current_seq, page_shift, ret, np;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
+ int current_seq, page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
u64 access_mask;
u64 start_idx, page_mask;
- struct ib_umem_odp *odp;
- size_t size;
-
- if (odp_mr->is_implicit_odp) {
- odp = implicit_mr_get_data(mr, io_virt, bcnt);
-
- if (IS_ERR(odp))
- return PTR_ERR(odp);
- mr = odp->private;
- } else {
- odp = odp_mr;
- }
-
-next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
page_shift = odp->page_shift;
page_mask = ~(BIT(page_shift) - 1);
- start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
- if (prefetch && !downgrade && !odp->umem.writable) {
- /* prefetch with write-access must
- * be supported by the MR
- */
- ret = -EINVAL;
- goto out;
- }
-
if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT;
@@ -657,13 +626,10 @@ next_mr:
*/
smp_rmb();
- ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
- current_seq);
-
- if (ret < 0)
- goto out;
-
- np = ret;
+ np = ib_umem_odp_map_dma_pages(odp, user_va, bcnt, access_mask,
+ current_seq);
+ if (np < 0)
+ return np;
mutex_lock(&odp->umem_mutex);
if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
@@ -681,35 +647,19 @@ next_mr:
if (ret < 0) {
if (ret != -EAGAIN)
- mlx5_ib_err(dev, "Failed to update mkey page tables\n");
+ mlx5_ib_err(mr->dev,
+ "Failed to update mkey page tables\n");
goto out;
}
if (bytes_mapped) {
u32 new_mappings = (np << page_shift) -
- (io_virt - round_down(io_virt, 1 << page_shift));
- *bytes_mapped += min_t(u32, new_mappings, size);
- }
-
- npages += np << (page_shift - PAGE_SHIFT);
- bcnt -= size;
-
- if (unlikely(bcnt)) {
- struct ib_umem_odp *next;
+ (user_va - round_down(user_va, 1 << page_shift));
- io_virt += size;
- next = odp_next(odp);
- if (unlikely(!next || ib_umem_start(next) != io_virt)) {
- mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
- io_virt, next);
- return -EAGAIN;
- }
- odp = next;
- mr = odp->private;
- goto next_mr;
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
}
- return npages;
+ return np << (page_shift - PAGE_SHIFT);
out:
if (ret == -EAGAIN) {
@@ -718,7 +668,7 @@ out:
if (!wait_for_completion_timeout(&odp->notifier_completion,
timeout)) {
mlx5_ib_warn(
- dev,
+ mr->dev,
"timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
current_seq, odp->notifiers_seq,
odp->notifiers_count);
@@ -728,6 +678,109 @@ out:
return ret;
}
+static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
+ struct ib_umem_odp *odp_imr, u64 user_va,
+ size_t bcnt, u32 *bytes_mapped, u32 flags)
+{
+ unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long upd_start_idx = end_idx + 1;
+ unsigned long upd_len = 0;
+ unsigned long npages = 0;
+ int err;
+ int ret;
+
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
+ mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ return -EFAULT;
+
+ /* Fault each child mr that intersects with our interval. */
+ while (bcnt) {
+ unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *mtt;
+ u64 len;
+
+ mtt = xa_load(&imr->implicit_children, idx);
+ if (unlikely(!mtt)) {
+ mtt = implicit_get_child_mr(imr, idx);
+ if (IS_ERR(mtt)) {
+ ret = PTR_ERR(mtt);
+ goto out;
+ }
+ upd_start_idx = min(upd_start_idx, idx);
+ upd_len = idx - upd_start_idx + 1;
+ }
+
+ umem_odp = to_ib_umem_odp(mtt->umem);
+ len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
+ user_va;
+
+ ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
+ bytes_mapped, flags);
+ if (ret < 0)
+ goto out;
+ user_va += len;
+ bcnt -= len;
+ npages += ret;
+ }
+
+ ret = npages;
+
+ /*
+ * Any time the implicit_children are changed we must perform an
+ * update of the xlt before exiting to ensure the HW and the
+ * implicit_children remains synchronized.
+ */
+out:
+ if (likely(!upd_len))
+ return ret;
+
+ /*
+ * Notice this is not strictly ordered right, the KSM is updated after
+ * the implicit_children is updated, so a parallel page fault could
+ * see a MR that is not yet visible in the KSM. This is similar to a
+ * parallel page fault seeing a MR that is being concurrently removed
+ * from the KSM. Both of these improbable situations are resolved
+ * safely by resuming the HW and then taking another page fault. The
+ * next pagefault handler will see the new information.
+ */
+ mutex_lock(&odp_imr->umem_mutex);
+ err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ if (err) {
+ mlx5_ib_err(imr->dev, "Failed to update PAS\n");
+ return err;
+ }
+ return ret;
+}
+
+/*
+ * Returns:
+ * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
+ * not accessible, or the MR is no longer valid.
+ * -EAGAIN/-ENOMEM: The operation should be retried
+ *
+ * -EINVAL/others: General internal malfunction
+ * >0: Number of pages mapped
+ */
+static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ if (!odp->is_implicit_odp) {
+ if (unlikely(io_virt < ib_umem_start(odp) ||
+ ib_umem_end(odp) - io_virt < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -775,10 +828,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt,
u32 *bytes_committed,
- u32 *bytes_mapped, u32 flags)
+ u32 *bytes_mapped)
{
int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -787,58 +839,49 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
size_t offset;
int ndescs;
- srcu_key = srcu_read_lock(&dev->mr_srcu);
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
next_mr:
- mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and only
+ * some of them are ODP. Treat the non-ODP ones as fully
+ * faulted.
+ */
+ ret = 0;
+ goto srcu_unlock;
+ }
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
goto srcu_unlock;
}
- if (prefetch && mmkey->type != MLX5_MKEY_MR) {
- mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
- ret = -EINVAL;
- goto srcu_unlock;
- }
-
switch (mmkey->type) {
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
- mlx5_ib_dbg(dev, "got dead MR\n");
- ret = -EFAULT;
- goto srcu_unlock;
- }
- if (prefetch) {
- if (!is_odp_mr(mr) ||
- mr->ibmr.pd != pd) {
- mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
- is_odp_mr(mr) ? "MR is not ODP" :
- "PD is not of the MR");
- ret = -EINVAL;
- goto srcu_unlock;
- }
- }
-
- if (!is_odp_mr(mr)) {
- mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped += bcnt;
- ret = 0;
- goto srcu_unlock;
- }
-
- ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
if (ret < 0)
goto srcu_unlock;
+ /*
+ * When prefetching a page, page fault is generated
+ * in order to bring the page to the main memory.
+ * In the current flow, page faults are being counted.
+ */
+ mlx5_update_odp_stats(mr, faults, ret);
+
npages += ret;
ret = 0;
break;
@@ -928,7 +971,7 @@ srcu_unlock:
}
kfree(out);
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
*bytes_committed = 0;
return ret ? ret : npages;
}
@@ -1009,7 +1052,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, key,
io_virt, bcnt,
&pfault->bytes_committed,
- bytes_mapped, 0);
+ bytes_mapped);
if (ret < 0)
break;
npages += ret;
@@ -1292,8 +1335,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
}
ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
- &pfault->bytes_committed, NULL,
- 0);
+ &pfault->bytes_committed, NULL);
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
@@ -1320,8 +1362,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len,
- &bytes_committed, NULL,
- 0);
+ &bytes_committed, NULL);
if (ret < 0 && ret != -EAGAIN) {
mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
ret, pfault->token, address, prefetch_len);
@@ -1624,114 +1665,128 @@ int mlx5_ib_odp_init(void)
struct prefetch_mr_work {
struct work_struct work;
- struct ib_pd *pd;
u32 pf_flags;
u32 num_sge;
- struct ib_sge sg_list[0];
+ struct {
+ u64 io_virt;
+ struct mlx5_ib_mr *mr;
+ size_t length;
+ } frags[];
};
-static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
- struct ib_sge *sg_list, u32 num_sge,
- u32 from)
+static void destroy_prefetch_work(struct prefetch_mr_work *work)
{
u32 i;
- int srcu_key;
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
- for (i = from; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
-
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- atomic_dec(&mr->num_pending_prefetch);
- }
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ for (i = 0; i < work->num_sge; ++i)
+ atomic_dec(&work->frags[i].mr->num_deferred_work);
+ kvfree(work);
}
-static bool num_pending_prefetch_inc(struct ib_pd *pd,
- struct ib_sge *sg_list, u32 num_sge)
+static struct mlx5_ib_mr *
+get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- bool ret = true;
- u32 i;
+ struct mlx5_core_mkey *mmkey;
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
- for (i = 0; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
+ lockdep_assert_held(&dev->odp_srcu);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- if (!mmkey || mmkey->key != sg_list[i].lkey) {
- ret = false;
- break;
- }
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
+ if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ return NULL;
- if (mmkey->type != MLX5_MKEY_MR) {
- ret = false;
- break;
- }
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ if (mr->ibmr.pd != pd)
+ return NULL;
- if (!smp_load_acquire(&mr->live)) {
- ret = false;
- break;
- }
+ odp = to_ib_umem_odp(mr->umem);
- if (mr->ibmr.pd != pd) {
- ret = false;
- break;
- }
+ /* prefetch with write-access must be supported by the MR */
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !odp->umem.writable)
+ return NULL;
- atomic_inc(&mr->num_pending_prefetch);
- }
+ return mr;
+}
- if (!ret)
- num_pending_prefetch_dec(dev, sg_list, i, 0);
+static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ u32 bytes_mapped = 0;
+ u32 i;
- return ret;
+ for (i = 0; i < work->num_sge; ++i)
+ pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags);
+
+ destroy_prefetch_work(work);
}
-static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
- struct ib_sge *sg_list, u32 num_sge)
+static bool init_prefetch_work(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct prefetch_mr_work *work,
+ struct ib_sge *sg_list, u32 num_sge)
{
u32 i;
- int ret = 0;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- struct ib_sge *sg = &sg_list[i];
- int bytes_committed = 0;
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr =
+ get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!work->frags[i].mr) {
+ work->num_sge = i - 1;
+ if (i)
+ destroy_prefetch_work(work);
+ return false;
+ }
- ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
- sg->length,
- &bytes_committed, NULL,
- pf_flags);
- if (ret < 0)
- break;
+ /* Keep the MR pointer will valid outside the SRCU */
+ atomic_inc(&work->frags[i].mr->num_deferred_work);
}
-
- return ret < 0 ? ret : 0;
+ work->num_sge = num_sge;
+ return true;
}
-static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
{
- struct prefetch_mr_work *w =
- container_of(work, struct prefetch_mr_work, work);
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 bytes_mapped = 0;
+ int srcu_key;
+ int ret = 0;
+ u32 i;
+
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
- if (ib_device_try_get(w->pd->device)) {
- mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
- w->num_sge);
- ib_device_put(w->pd->device);
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!mr) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+ &bytes_mapped, pf_flags);
+ if (ret < 0)
+ goto out;
}
+ ret = 0;
- num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
- w->num_sge, 0);
- kvfree(w);
+out:
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return ret;
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1739,43 +1794,27 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
+ u32 pf_flags = 0;
struct prefetch_mr_work *work;
- bool valid_req;
int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
- return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
+ return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
num_sge);
- work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
if (!work)
return -ENOMEM;
- memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
-
- /* It is guaranteed that the pd when work is executed is the pd when
- * work was queued since pd can't be destroyed while it holds MRs and
- * destroying a MR leads to flushing the workquque
- */
- work->pd = pd;
- work->pf_flags = pf_flags;
- work->num_sge = num_sge;
-
- INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
-
- valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
- if (valid_req)
- queue_work(system_unbound_wq, &work->work);
- else
- kvfree(work);
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
-
- return valid_req ? 0 : -EINVAL;
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return -EINVAL;
+ }
+ queue_work(system_unbound_wq, &work->work);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5fd071c05944..7e51870e9e01 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0, 0);
+ *umem = ib_umem_get(udata, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
+ rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -1041,11 +1041,14 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
IB_QP_CREATE_NETIF_QP |
- mlx5_ib_create_qp_sqpn_qp1()))
+ MLX5_IB_QP_CREATE_SQPN_QP1 |
+ MLX5_IB_QP_CREATE_WC_TEST))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
+ else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1104,7 +1107,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+ if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
@@ -2140,7 +2143,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EINVAL;
}
if (init_attr->create_flags &
- mlx5_ib_create_qp_sqpn_qp1()) {
+ MLX5_IB_QP_CREATE_SQPN_QP1) {
mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
return -EINVAL;
}
@@ -5330,7 +5333,6 @@ out:
* we hit doorbell */
wmb();
- /* currently we support only regular doorbells */
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
@@ -5825,7 +5827,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
+ qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -5957,12 +5959,21 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ /*
+ * In Firmware number of strides in each WQE is:
+ * "512 * 2^single_wqe_log_num_of_strides"
+ * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
+ * accepted as 0 to 9
+ */
+ static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9 };
MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
MLX5_SET(wq, wq, log_wqe_stride_size,
rwq->single_stride_log_num_of_bytes -
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
- MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ fw_map[rwq->log_num_strides -
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
}
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
@@ -6037,6 +6048,19 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
return 0;
}
+static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
+{
+ if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
+ (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ return true;
+}
+
static int prepare_user_rq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata,
@@ -6084,14 +6108,16 @@ static int prepare_user_rq(struct ib_pd *pd,
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
return -EINVAL;
}
- if ((ucmd.single_wqe_log_num_of_strides >
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
- (ucmd.single_wqe_log_num_of_strides <
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
- mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
- ucmd.single_wqe_log_num_of_strides,
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ if (!log_of_strides_valid(dev,
+ ucmd.single_wqe_log_num_of_strides)) {
+ mlx5_ib_dbg(
+ dev,
+ "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
return -EINVAL;
}
rwq->single_stride_log_num_of_bytes =
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
new file mode 100644
index 000000000000..8f6c04f12531
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <uapi/rdma/rdma_netlink.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/restrack.h>
+#include "mlx5_ib.h"
+
+static int fill_stat_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+
+ if (!table_attr)
+ goto err;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
+ atomic64_read(&mr->odp_stats.faults)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations",
+ atomic64_read(&mr->odp_stats.invalidations)))
+ goto err_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (mr->is_odp_implicit) {
+ if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
+ goto err;
+ } else {
+ if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_res_mr_entry(msg, res);
+
+ return 0;
+}
+
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_stat_mr_entry(msg, res);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4e7fde86c96b..62939df3c692 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index bfd4eebc1182..599794c5a78f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -576,14 +576,10 @@ enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7ad517da4917..99aa8183a7f2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -196,30 +196,19 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int err;
u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
- slid == 0) {
- forward_trap(to_mdev(ibdev), port_num, in_mad);
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
+ forward_trap(to_mdev(ibdev), port_num, in);
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
@@ -229,40 +218,39 @@ int mthca_process_mad(struct ib_device *ibdev,
* Only handle PMA and Mellanox vendor-specific class gets and
* sets for other classes.
*/
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/*
* Don't process SMInfo queries or vendor-specific
* MADs -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+ ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
IB_SMP_ATTR_VENDOR_MASK))
return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
} else
return IB_MAD_RESULT_SUCCESS;
- if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ in->mad_hdr.method == IB_MGMT_METHOD_SET &&
+ in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
prev_lid = ib_lid_cpu16(pattr.lid);
- err = mthca_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
+ err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
else if (err) {
@@ -270,16 +258,16 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE;
}
- if (!out_mad->mad_hdr.status) {
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
- node_desc_override(ibdev, out_mad);
+ if (!out->mad_hdr.status) {
+ smp_snoop(ibdev, port_num, in, prev_lid);
+ node_desc_override(ibdev, out);
}
/* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 23554d8bf241..33002530fee7 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,9 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
-
+ mr->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 8d3e36d548aa..2b7f00ac41b0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -247,35 +247,20 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-int ocrdma_process_mad(struct ib_device *ibdev,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
- int status;
+ int status = IB_MAD_RESULT_SUCCESS;
struct ocrdma_dev *dev;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_PERF_MGMT:
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
dev = get_ocrdma_dev(ibdev);
- if (!ocrdma_pma_counters(dev, out_mad))
- status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
- else
- status = IB_MAD_RESULT_SUCCESS;
- break;
- default:
- status = IB_MAD_RESULT_SUCCESS;
- break;
+ ocrdma_pma_counters(dev, out);
+ status |= IB_MAD_RESULT_REPLY;
}
+
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 64cb82c08664..9780afcde780 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -56,12 +56,9 @@ int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_process_mad(struct ib_device *,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
#endif /* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c15cfc6cef81..d8c47d24d6d6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -166,7 +166,6 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.get_port_immutable = ocrdma_port_immutable,
.map_mr_sg = ocrdma_map_mr_sg,
.mmap = ocrdma_mmap,
- .modify_port = ocrdma_modify_port,
.modify_qp = ocrdma_modify_qp,
.poll_cq = ocrdma_poll_cq,
.post_recv = ocrdma_post_recv,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6ef89c226ad8..c2e0d0fa44be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -2034,7 +2034,7 @@ struct ocrdma_rx_stats {
};
struct ocrdma_rx_qp_err_stats {
- u32 nak_invalid_requst_errors;
+ u32 nak_invalid_request_errors;
u32 nak_remote_operation_errors;
u32 nak_count_remote_access_errors;
u32 local_length_errors;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index a902942adb5d..5f831e3bdbad 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -423,8 +423,8 @@ static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
- pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
- (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
+ (u64)rx_qp_err_stats->nak_invalid_request_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
(u64)rx_qp_err_stats->nak_remote_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
@@ -670,12 +670,10 @@ err:
return -EFAULT;
}
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad)
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
{
struct ib_pma_portcounters *pma_cnt;
- memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
@@ -683,7 +681,6 @@ int ocrdma_pma_counters(struct ocrdma_dev *dev,
pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
- return 0;
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index bba1fec4f11f..98feca26ac55 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -69,7 +69,6 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad);
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad);
#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e8267e590772..9bc1ca6f6f9e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -190,12 +190,6 @@ int ocrdma_query_port(struct ib_device *ibdev,
return 0;
}
-int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
unsigned long len)
{
@@ -875,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 32488da1b752..3a5010881be5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -54,8 +54,6 @@ int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u8 port_num);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dc71b6e16a07..dcdc85a1ab25 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -212,7 +212,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.get_link_layer = qedr_link_layer,
.map_mr_sg = qedr_map_mr_sg,
.mmap = qedr_mmap,
- .modify_port = qedr_modify_port,
+ .mmap_free = qedr_mmap_free,
.modify_qp = qedr_modify_qp,
.modify_srq = qedr_modify_srq,
.poll_cq = qedr_poll_cq,
@@ -357,9 +357,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
return -ENOMEM;
spin_lock_init(&dev->sgid_lock);
+ xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
if (IS_IWARP(dev)) {
- xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 0cfd849b13d6..5488dbd59d3c 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -40,6 +40,7 @@
#include <linux/qed/qed_rdma_if.h>
#include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h>
+#include <linux/completion.h>
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
@@ -230,14 +231,16 @@ struct qedr_ucontext {
struct qedr_dev *dev;
struct qedr_pd *pd;
void __iomem *dpi_addr;
+ struct rdma_user_mmap_entry *db_mmap_entry;
u64 dpi_phys_addr;
u32 dpi_size;
u16 dpi;
+ bool db_rec;
+};
- struct list_head mm_head;
-
- /* Lock to protect mm list */
- struct mutex mm_list_lock;
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
};
union db_prod64 {
@@ -265,6 +268,13 @@ struct qedr_userq {
struct qedr_pbl *pbl_tbl;
u64 buf_addr;
size_t buf_len;
+
+ /* doorbell recovery */
+ void __iomem *db_addr;
+ struct qedr_user_db_rec *db_rec_data;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ void __iomem *db_rec_db2_addr;
+ union db_prod32 db_rec_db2_data;
};
struct qedr_cq {
@@ -300,19 +310,6 @@ struct qedr_pd {
struct qedr_ucontext *uctx;
};
-struct qedr_mm {
- struct {
- u64 phy_addr;
- unsigned long len;
- } key;
- struct list_head entry;
-};
-
-union db_prod32 {
- struct rdma_pwm_val16_data data;
- u32 raw;
-};
-
struct qedr_qp_hwq_info {
/* WQE Elements */
struct qed_chain pbl;
@@ -377,10 +374,20 @@ enum qedr_qp_err_bitmap {
QEDR_QP_ERR_RQ_PBL_FULL = 32,
};
+enum qedr_qp_create_type {
+ QEDR_QP_CREATE_NONE,
+ QEDR_QP_CREATE_USER,
+ QEDR_QP_CREATE_KERNEL,
+};
+
+enum qedr_iwarp_cm_flags {
+ QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0),
+ QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
+};
+
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
- struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@@ -395,6 +402,7 @@ struct qedr_qp {
u32 id;
struct qedr_pd *pd;
enum ib_qp_type qp_type;
+ enum qedr_qp_create_type create_type;
struct qed_rdma_qp *qed_qp;
u32 qp_id;
u16 icid;
@@ -437,8 +445,11 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
- atomic_t refcnt;
- bool destroyed;
+
+ /* synchronization objects used with iwarp ep */
+ struct kref refcnt;
+ struct completion iwarp_cm_comp;
+ unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
struct qedr_ah {
@@ -476,6 +487,18 @@ struct qedr_mr {
u32 npages;
};
+struct qedr_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ struct qedr_dev *dev;
+ union {
+ u64 io_address;
+ void *address;
+ };
+ size_t length;
+ u16 dpi;
+ u8 mmap_flag;
+};
+
#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
@@ -531,7 +554,7 @@ struct qedr_iw_ep {
struct iw_cm_id *cm_id;
struct qedr_qp *qp;
void *qed_context;
- u8 during_connect;
+ struct kref refcnt;
};
static inline
@@ -574,4 +597,11 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct qedr_srq, ibsrq);
}
+
+static inline struct qedr_user_mmap_entry *
+get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct qedr_user_mmap_entry,
+ rdma_entry);
+}
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 22881d4442b9..792eecd206b6 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
}
}
+static void qedr_iw_free_qp(struct kref *ref)
+{
+ struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
+
+ kfree(qp);
+}
+
+static void
+qedr_iw_free_ep(struct kref *ref)
+{
+ struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
+
+ if (ep->qp)
+ kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
+
+ if (ep->cm_id)
+ ep->cm_id->rem_ref(ep->cm_id);
+
+ kfree(ep);
+}
+
static void
qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
ep->dev = dev;
ep->qed_context = params->ep_context;
+ kref_init(&ep->refcnt);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
@@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
- if (ep->cm_id) {
+ if (ep->cm_id)
qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
struct qedr_qp *qp = ep->qp;
struct iw_cm_event event;
- if (qp->destroyed) {
- kfree(dwork);
- qedr_iw_qp_rem_ref(&qp->ibqp);
- return;
- }
+ /* The qp won't be released until we release the ep.
+ * the ep's refcnt was increased before calling this
+ * function, therefore it is safe to access qp
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ goto out;
memset(&event, 0, sizeof(event));
event.status = dwork->status;
@@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
else
qp_params.new_state = QED_ROCE_QP_STATE_SQD;
- kfree(dwork);
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
@@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
- qedr_iw_qp_rem_ref(&qp->ibqp);
+ complete(&ep->qp->iwarp_cm_comp);
+out:
+ kfree(dwork);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context,
struct qedr_discon_work *work;
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
- struct qedr_qp *qp = ep->qp;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
- qedr_iw_qp_add_ref(&qp->ibqp);
+ /* We can't get a close event before disconnect, but since
+ * we're scheduling a work queue we need to make sure close
+ * won't delete the ep, so we increase the refcnt
+ */
+ kref_get(&ep->refcnt);
+
work->ep = ep;
work->event = params->event;
work->status = params->status;
@@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context,
if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
DP_DEBUG(dev, QEDR_MSG_IWARP,
"PASSIVE connection refused releasing ep...\n");
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return;
}
+ complete(&ep->qp->iwarp_cm_comp);
qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
if (params->status < 0)
qedr_iw_close_event(context, params);
}
+static void
+qedr_iw_active_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ complete(&ep->qp->iwarp_cm_comp);
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
+
+ if (params->status < 0)
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
static int
qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
qedr_iw_mpa_reply(context, params);
break;
case QED_IWARP_EVENT_PASSIVE_COMPLETE:
- ep->during_connect = 0;
qedr_iw_passive_complete(context, params);
break;
-
case QED_IWARP_EVENT_ACTIVE_COMPLETE:
- ep->during_connect = 0;
- qedr_iw_issue_event(context,
- params,
- IW_CM_EVENT_CONNECT_REPLY);
- if (params->status < 0) {
- struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
-
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ qedr_iw_active_complete(context, params);
break;
case QED_IWARP_EVENT_DISCONNECT:
qedr_iw_disconnect_event(context, params);
break;
case QED_IWARP_EVENT_CLOSE:
- ep->during_connect = 0;
qedr_iw_close_event(context, params);
break;
case QED_IWARP_EVENT_RQ_EMPTY:
@@ -451,10 +481,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
if ((!dst) || dst->error) {
if (dst) {
- dst_release(dst);
DP_ERR(dev,
"ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return -EINVAL;
}
@@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev,
return rc;
}
+static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
+{
+ struct qedr_qp *qp;
+
+ xa_lock(&dev->qps);
+ qp = xa_load(&dev->qps, qpn);
+ if (qp)
+ kref_get(&qp->refcnt);
+ xa_unlock(&dev->qps);
+
+ return qp;
+}
+
int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
@@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = xa_load(&dev->qps, conn_param->qpn);
- if (unlikely(!qp))
- return -EINVAL;
-
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
@@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -ENOMEM;
ep->dev = dev;
+ kref_init(&ep->refcnt);
+
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ if (!qp) {
+ rc = -EINVAL;
+ goto err;
+ }
+
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
in_params.qp = qp->qed_qp;
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already being destroyed */
+
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
err:
- cm_id->rem_ref(cm_id);
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return rc;
}
@@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
- int rc;
+ int rc = 0;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = xa_load(&dev->qps, conn_param->qpn);
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
}
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
params.ird = conn_param->ird;
params.ord = conn_param->ord;
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already destroyed */
+
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
+
err:
- ep->during_connect = 0;
- cm_id->rem_ref(cm_id);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+
return rc;
}
@@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- atomic_inc(&qp->refcnt);
+ kref_get(&qp->refcnt);
}
void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- if (atomic_dec_and_test(&qp->refcnt)) {
- xa_erase_irq(&qp->dev->qps, qp->qp_id);
- kfree(qp);
- }
+ kref_put(&qp->refcnt, qedr_iw_free_qp);
}
struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6f3ce86019b7..4cd292966aa9 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -51,6 +51,7 @@
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_roce_cm.h"
+#include "qedr_iw_cm.h"
#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
#define RDMA_MAX_SGE_PER_SRQ (4)
@@ -58,6 +59,11 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+enum {
+ QEDR_USER_MMAP_IO_WC = 0,
+ QEDR_USER_MMAP_PHYS_PAGE,
+};
+
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
size_t len)
{
@@ -250,78 +256,31 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
return 0;
}
-int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
-static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- struct qedr_mm *mm;
-
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm)
- return -ENOMEM;
-
- mm->key.phy_addr = phy_addr;
- /* This function might be called with a length which is not a multiple
- * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
- * forces this granularity by increasing the requested size if needed.
- * When qedr_mmap is called, it will search the list with the updated
- * length as a key. To prevent search failures, the length is rounded up
- * in advance to PAGE_SIZE.
- */
- mm->key.len = roundup(len, PAGE_SIZE);
- INIT_LIST_HEAD(&mm->entry);
-
- mutex_lock(&uctx->mm_list_lock);
- list_add(&mm->entry, &uctx->mm_head);
- mutex_unlock(&uctx->mm_list_lock);
-
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- (unsigned long long)mm->key.phy_addr,
- (unsigned long)mm->key.len, uctx);
-
- return 0;
-}
-
-static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- bool found = false;
- struct qedr_mm *mm;
-
- mutex_lock(&uctx->mm_list_lock);
- list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
- continue;
-
- found = true;
- break;
- }
- mutex_unlock(&uctx->mm_list_lock);
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
- mm->key.phy_addr, mm->key.len, uctx, found);
-
- return found;
-}
-
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
struct ib_device *ibdev = uctx->device;
int rc;
struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
struct qedr_alloc_ucontext_resp uresp = {};
+ struct qedr_alloc_ucontext_req ureq = {};
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_add_user_out_params oparams;
+ struct qedr_user_mmap_entry *entry;
if (!udata)
return -EFAULT;
+ if (udata->inlen) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return -EFAULT;
+ }
+
+ ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
+ }
+
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
if (rc) {
DP_ERR(dev,
@@ -334,13 +293,29 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
ctx->dpi_addr = oparams.dpi_addr;
ctx->dpi_phys_addr = oparams.dpi_phys_addr;
ctx->dpi_size = oparams.dpi_size;
- INIT_LIST_HEAD(&ctx->mm_head);
- mutex_init(&ctx->mm_list_lock);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ entry->io_address = ctx->dpi_phys_addr;
+ entry->length = ctx->dpi_size;
+ entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
+ entry->dpi = ctx->dpi;
+ entry->dev = dev;
+ rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
+ ctx->dpi_size);
+ if (rc) {
+ kfree(entry);
+ goto err;
+ }
+ ctx->db_mmap_entry = &entry->rdma_entry;
uresp.dpm_enabled = dev->user_dpm_enabled;
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
- uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
uresp.db_size = ctx->dpi_size;
uresp.max_send_wr = dev->attr.max_sqe;
uresp.max_recv_wr = dev->attr.max_rqe;
@@ -352,82 +327,92 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
- return rc;
+ goto err;
ctx->dev = dev;
- rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
- if (rc)
- return rc;
-
DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
&ctx->ibucontext);
return 0;
+
+err:
+ if (!ctx->db_mmap_entry)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
+ else
+ rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
+
+ return rc;
}
void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
{
struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
- struct qedr_mm *mm, *tmp;
DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
uctx);
- uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
- list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- mm->key.phy_addr, mm->key.len, uctx);
- list_del(&mm->entry);
- kfree(mm);
- }
+ rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
}
-int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
- struct qedr_dev *dev = get_qedr_dev(context->device);
- unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long len = (vma->vm_end - vma->vm_start);
- unsigned long dpi_start;
+ struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
+ struct qedr_dev *dev = entry->dev;
- dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
+ if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
+ free_page((unsigned long)entry->address);
+ else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
- DP_DEBUG(dev, QEDR_MSG_INIT,
- "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
- (void *)vma->vm_start, (void *)vma->vm_end,
- (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
+ kfree(entry);
+}
- if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
- DP_ERR(dev,
- "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
- (void *)vma->vm_start, (void *)vma->vm_end);
- return -EINVAL;
- }
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
+{
+ struct ib_device *dev = ucontext->device;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct qedr_user_mmap_entry *entry;
+ int rc = 0;
+ u64 pfn;
- if (!qedr_search_mmap(ucontext, phys_addr, len)) {
- DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
- vma->vm_pgoff);
- return -EINVAL;
- }
+ ibdev_dbg(dev,
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- if (phys_addr < dpi_start ||
- ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
- DP_ERR(dev,
- "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
- (void *)phys_addr, (void *)dpi_start,
- ucontext->dpi_size);
+ rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
-
- if (vma->vm_flags & VM_READ) {
- DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
- return -EINVAL;
+ entry = get_qedr_mmap_entry(rdma_entry);
+ ibdev_dbg(dev,
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->io_address, length, entry->mmap_flag);
+
+ switch (entry->mmap_flag) {
+ case QEDR_USER_MMAP_IO_WC:
+ pfn = entry->io_address >> PAGE_SHIFT;
+ rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case QEDR_USER_MMAP_PHYS_PAGE:
+ rc = vm_insert_page(vma, vma->vm_start,
+ virt_to_page(entry->address));
+ break;
+ default:
+ rc = -EINVAL;
}
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
- vma->vm_page_prot);
+ if (rc)
+ ibdev_dbg(dev,
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->io_address, length, entry->mmap_flag, rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
}
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -657,16 +642,50 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
}
}
+static int qedr_db_recovery_add(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return 0;
+ }
+
+ return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
+ db_width, db_space);
+}
+
+static void qedr_db_recovery_del(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return;
+ }
+
+ /* Ignore return code as there is not much we can do about it. Error
+ * log will be printed inside.
+ */
+ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
+}
+
static int qedr_copy_cq_uresp(struct qedr_dev *dev,
- struct qedr_cq *cq, struct ib_udata *udata)
+ struct qedr_cq *cq, struct ib_udata *udata,
+ u32 db_offset)
{
struct qedr_create_cq_uresp uresp;
int rc;
memset(&uresp, 0, sizeof(uresp));
- uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.db_offset = db_offset;
uresp.icid = cq->icid;
+ if (cq->q.db_mmap_entry)
+ uresp.db_rec_addr =
+ rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
@@ -694,10 +713,58 @@ static inline int qedr_align_cq_entries(int entries)
return aligned_size / QEDR_CQE_SIZE;
}
+static int qedr_init_user_db_rec(struct ib_udata *udata,
+ struct qedr_dev *dev, struct qedr_userq *q,
+ bool requires_db_rec)
+{
+ struct qedr_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ struct qedr_user_mmap_entry *entry;
+ int rc;
+
+ /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
+ if (requires_db_rec == 0 || !uctx->db_rec)
+ return 0;
+
+ /* Allocate a page for doorbell recovery, add to mmap */
+ q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
+ if (!q->db_rec_data) {
+ DP_ERR(dev, "get_zeroed_page failed\n");
+ return -ENOMEM;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err_free_db_data;
+
+ entry->address = q->db_rec_data;
+ entry->length = PAGE_SIZE;
+ entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
+ rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
+ &entry->rdma_entry,
+ PAGE_SIZE);
+ if (rc)
+ goto err_free_entry;
+
+ q->db_mmap_entry = &entry->rdma_entry;
+
+ return 0;
+
+err_free_entry:
+ kfree(entry);
+
+err_free_db_data:
+ free_page((unsigned long)q->db_rec_data);
+ q->db_rec_data = NULL;
+ return -ENOMEM;
+}
+
static inline int qedr_init_user_queue(struct ib_udata *udata,
struct qedr_dev *dev,
struct qedr_userq *q, u64 buf_addr,
- size_t buf_len, int access, int dmasync,
+ size_t buf_len, bool requires_db_rec,
+ int access,
int alloc_and_init)
{
u32 fw_pages;
@@ -705,7 +772,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
+ q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -735,7 +802,8 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
}
}
- return 0;
+ /* mmap the user address used to store doorbell data for recovery */
+ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
err0:
ib_umem_release(q->umem);
@@ -821,6 +889,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int entries = attr->cqe;
struct qedr_cq *cq = get_qedr_cq(ibcq);
int chain_entries;
+ u32 db_offset;
int page_cnt;
u64 pbl_ptr;
u16 icid;
@@ -840,8 +909,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ /* calc db offset. user will add DPI base, kernel will add db addr */
+ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create cq: problem copying data from user space\n");
goto err0;
@@ -856,7 +929,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE, 1,
+ ureq.len, true, IB_ACCESS_LOCAL_WRITE,
1);
if (rc)
goto err0;
@@ -865,6 +938,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
page_cnt = cq->q.pbl_info.num_pbes;
cq->ibcq.cqe = chain_entries;
+ cq->q.db_addr = ctx->dpi_addr + db_offset;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -876,7 +950,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
sizeof(union rdma_cqe),
&cq->pbl, NULL);
if (rc)
- goto err1;
+ goto err0;
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
@@ -888,21 +962,28 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
if (rc)
- goto err2;
+ goto err1;
cq->icid = icid;
cq->sig = QEDR_CQ_MAGIC_NUMBER;
spin_lock_init(&cq->cq_lock);
if (udata) {
- rc = qedr_copy_cq_uresp(dev, cq, udata);
+ rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
+ if (rc)
+ goto err2;
+
+ rc = qedr_db_recovery_add(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data,
+ DB_REC_WIDTH_64B,
+ DB_REC_USER);
if (rc)
- goto err3;
+ goto err2;
+
} else {
/* Generate doorbell address. */
- cq->db_addr = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
cq->db.data.icid = cq->icid;
+ cq->db_addr = dev->db_addr + db_offset;
cq->db.data.params = DB_AGG_CMD_SET <<
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
@@ -912,6 +993,11 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->latest_cqe = NULL;
consume_cqe(cq);
cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ if (rc)
+ goto err2;
}
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -920,18 +1006,19 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
-err3:
+err2:
destroy_iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
&destroy_oparams);
-err2:
- if (udata)
- qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
- else
- dev->ops->common->chain_free(dev->cdev, &cq->pbl);
err1:
- if (udata)
+ if (udata) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+ if (cq->q.db_mmap_entry)
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ } else {
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
err0:
return -EINVAL;
}
@@ -962,8 +1049,10 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->destroyed = 1;
/* GSIs CQs are handled by driver, so they don't exist in the FW */
- if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
return;
+ }
iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
@@ -972,6 +1061,14 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
if (udata) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+
+ if (cq->q.db_rec_data) {
+ qedr_db_recovery_del(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ }
+ } else {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
}
/* We don't want the IRQ handler to handle a non-existing CQ so we
@@ -1136,8 +1233,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev,
}
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
- struct qedr_create_qp_uresp *uresp,
- struct qedr_qp *qp)
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
{
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
@@ -1150,6 +1247,9 @@ static void qedr_copy_rq_uresp(struct qedr_dev *dev,
}
uresp->rq_icid = qp->icid;
+ if (qp->urq.db_mmap_entry)
+ uresp->rq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
}
static void qedr_copy_sq_uresp(struct qedr_dev *dev,
@@ -1163,22 +1263,26 @@ static void qedr_copy_sq_uresp(struct qedr_dev *dev,
uresp->sq_icid = qp->icid;
else
uresp->sq_icid = qp->icid + 1;
+
+ if (qp->usq.db_mmap_entry)
+ uresp->sq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
- struct qedr_qp *qp, struct ib_udata *udata)
+ struct qedr_qp *qp, struct ib_udata *udata,
+ struct qedr_create_qp_uresp *uresp)
{
- struct qedr_create_qp_uresp uresp;
int rc;
- memset(&uresp, 0, sizeof(uresp));
- qedr_copy_sq_uresp(dev, &uresp, qp);
- qedr_copy_rq_uresp(dev, &uresp, qp);
+ memset(uresp, 0, sizeof(*uresp));
+ qedr_copy_sq_uresp(dev, uresp, qp);
+ qedr_copy_rq_uresp(dev, uresp, qp);
- uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
- uresp.qp_id = qp->qp_id;
+ uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp->qp_id = qp->qp_id;
- rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
@@ -1193,7 +1297,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
- atomic_set(&qp->refcnt, 1);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ kref_init(&qp->refcnt);
+ init_completion(&qp->iwarp_cm_comp);
+ }
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1222,16 +1329,35 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->sq.max_sges, qp->sq_cq->icid);
}
-static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
if (!qp->srq) {
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ qedr_db_recovery_del(dev, qp->sq.db,
+ &qp->sq.db_data);
}
+
+ return rc;
}
static int qedr_check_srq_params(struct qedr_dev *dev,
@@ -1279,19 +1405,19 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
static int qedr_init_srq_user_params(struct ib_udata *udata,
struct qedr_srq *srq,
struct qedr_create_srq_ureq *ureq,
- int access, int dmasync)
+ int access)
{
struct scatterlist *sg;
int rc;
rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
- ureq->srq_len, access, dmasync, 1);
+ ureq->srq_len, false, access, 1);
if (rc)
return rc;
srq->prod_umem =
ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access, dmasync);
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -1381,13 +1507,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
hw_srq->max_sges = init_attr->attr.max_sge;
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create srq: problem copying data from user space\n");
goto err0;
}
- rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0);
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
if (rc)
goto err0;
@@ -1570,13 +1697,39 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
}
-static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
+static void qedr_cleanup_user(struct qedr_dev *dev,
+ struct qedr_ucontext *ctx,
+ struct qedr_qp *qp)
{
ib_umem_release(qp->usq.umem);
qp->usq.umem = NULL;
ib_umem_release(qp->urq.umem);
qp->urq.umem = NULL;
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ kfree(qp->urq.pbl_tbl);
+ }
+
+ if (qp->usq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
+ }
+
+ if (qp->urq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data);
}
static int qedr_create_user_qp(struct qedr_dev *dev,
@@ -1588,27 +1741,30 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_create_qp_uresp uresp;
+ struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
+ qp->create_type = QEDR_QP_CREATE_USER;
memset(&ureq, 0, sizeof(ureq));
- rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
if (rc) {
DP_ERR(dev, "Problem copying data from user space\n");
return rc;
}
- /* SQ - read access only (0), dma sync not required (0) */
+ /* SQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, 0, 0, alloc_and_init);
+ ureq.sq_len, true, 0, alloc_and_init);
if (rc)
return rc;
if (!qp->srq) {
- /* RQ - read access only (0), dma sync not required (0) */
+ /* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0, alloc_and_init);
+ ureq.rq_len, true, 0, alloc_and_init);
if (rc)
return rc;
}
@@ -1638,29 +1794,76 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- rc = qedr_copy_qp_uresp(dev, qp, udata);
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+
+ /* db offset was calculated in copy_qp_uresp, now set in the user q */
+ ctx = pd->uctx;
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+ }
+
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
if (rc)
goto err;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
qedr_qp_user_print(dev, qp);
- return 0;
+ return rc;
err:
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
if (rc)
DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
err1:
- qedr_cleanup_user(dev, qp);
+ qedr_cleanup_user(dev, ctx, qp);
return rc;
}
-static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
@@ -1668,6 +1871,19 @@ static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
qp->rq.iwarp_db2_data.data.icid = qp->icid;
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
}
static int
@@ -1715,8 +1931,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_roce_db_info(dev, qp);
- return rc;
+ return qedr_set_roce_db_info(dev, qp);
}
static int
@@ -1774,8 +1989,7 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_iwarp_db_info(dev, qp);
- return rc;
+ return qedr_set_iwarp_db_info(dev, qp);
err:
dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -1790,6 +2004,20 @@ static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
kfree(qp->rqe_wr_id);
+
+ /* GSI qp is not registered to db mechanism so no need to delete */
+ if (qp->qp_type == IB_QPT_GSI)
+ return;
+
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
+
+ if (!qp->srq) {
+ qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data);
+ }
}
static int qedr_create_kernel_qp(struct qedr_dev *dev,
@@ -1805,6 +2033,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_entries;
memset(&in_params, 0, sizeof(in_params));
+ qp->create_type = QEDR_QP_CREATE_KERNEL;
/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
* the ring. The ring should allow at least a single WR, even if the
@@ -1918,7 +2147,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
- rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
goto err;
}
@@ -2429,7 +2658,10 @@ err:
static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
struct ib_udata *udata)
{
- int rc = 0;
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
if (qp->qp_type != IB_QPT_GSI) {
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -2437,8 +2669,8 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
return rc;
}
- if (udata)
- qedr_cleanup_user(dev, qp);
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
else
qedr_cleanup_kernel(dev, qp);
@@ -2467,34 +2699,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
}
} else {
- /* Wait for the connect/accept to complete */
- if (qp->ep) {
- int wait_count = 1;
-
- while (qp->ep->during_connect) {
- DP_DEBUG(dev, QEDR_MSG_QP,
- "Still in during connect/accept\n");
-
- msleep(100);
- if (wait_count++ > 200) {
- DP_NOTICE(dev,
- "during connect timeout\n");
- break;
- }
- }
- }
+ /* If connection establishment started the WAIT_FOR_CONNECT
+ * bit will be on and we need to Wait for the establishment
+ * to complete before destroying the qp.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
+
+ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
+ * bit will be on, and we need to wait for the disconnect to
+ * complete before continuing. We can use the same completion,
+ * iwarp_cm_comp, since this is the only place that waits for
+ * this completion and it is sequential. In addition,
+ * disconnect can't occur before the connection is fully
+ * established, therefore if WAIT_FOR_DISCONNECT is on it
+ * means WAIT_FOR_CONNECT is also on and the completion for
+ * CONNECT already occurred.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
}
if (qp->qp_type == IB_QPT_GSI)
qedr_destroy_gsi_qp(dev);
+ /* We need to remove the entry from the xarray before we release the
+ * qp_id to avoid a race of the qp_id being reallocated and failing
+ * on xa_insert
+ */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ xa_erase(&dev->qps, qp->qp_id);
+
qedr_free_qp_resources(dev, qp, udata);
- if (atomic_dec_and_test(&qp->refcnt) &&
- rdma_protocol_iwarp(&dev->ibdev, 1)) {
- xa_erase_irq(&dev->qps, qp->qp_id);
- kfree(qp);
- }
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+
return 0;
}
@@ -2597,7 +2839,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -2673,8 +2915,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+ if (mr->type != QEDR_MR_DMA)
+ free_mr_info(dev, &mr->info);
/* it could be user registered memory. */
ib_umem_release(mr->umem);
@@ -4106,19 +4348,10 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *mad_hdr,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index)
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
{
- struct qedr_dev *dev = get_qedr_dev(ibdev);
-
- DP_DEBUG(dev, QEDR_MSG_GSI,
- "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
- mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
- mad_hdr->class_specific, mad_hdr->class_version,
- mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 9aaa90283d6e..18027844eb87 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -35,8 +35,6 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int qedr_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid);
@@ -46,7 +44,8 @@ int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
-int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
@@ -93,10 +92,9 @@ int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index);
+ const struct ib_grh *in_grh, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 531d8a1db2c3..ca5ea734e3d0 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1417,7 +1417,6 @@ static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
*
* The exact combo of LEDs if on is true is determined by looking
* at the ibcstatus.
-
* These LEDs indicate the physical and logical state of IB link.
* For this chip (at least with recommended board pinouts), LED1
* is Yellow (logical state) and LED2 is Green (physical state),
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index f92faf5ec369..79bb83222e8d 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2098,8 +2098,6 @@ static int cc_get_classportinfo(struct ib_cc_mad *ccp,
struct ib_cc_classportinfo_attr *p =
(struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->base_version = 1;
p->class_version = 1;
p->cap_mask = 0;
@@ -2120,8 +2118,6 @@ static int cc_get_congestion_info(struct ib_cc_mad *ccp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
@@ -2138,8 +2134,6 @@ static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_cc_congestion_entry_shadow *entries;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
entries = ppd->congestion_entries_shadow->entries;
@@ -2176,8 +2170,6 @@ static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
max_cct_block =
@@ -2296,18 +2288,11 @@ bail:
return reply_failure((struct ib_smp *) ccp);
}
-static int check_cc_key(struct qib_ibport *ibp,
- struct ib_cc_mad *ccp, int mad_flags)
-{
- return 0;
-}
-
static int process_cc(struct ib_device *ibdev, int mad_flags,
u8 port, const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
int ret;
*out_mad = *in_mad;
@@ -2318,10 +2303,6 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
goto bail;
}
- ret = check_cc_key(ibp, ccp, mad_flags);
- if (ret)
- goto bail;
-
switch (ccp->method) {
case IB_MGMT_METHOD_GET:
switch (ccp->attr_id) {
@@ -2405,28 +2386,21 @@ bail:
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
+ switch (in->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_subn(ibdev, mad_flags, port, in, out);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in_mad, out_mad);
+ ret = process_perf(ibdev, port, in, out);
goto bail;
case IB_MGMT_CLASS_CONG_MGMT:
@@ -2435,7 +2409,7 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_cc(ibdev, mad_flags, port, in, out);
goto bail;
default:
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3926be78036e..568b21eb6ea1 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->show)
+ return -EIO;
+
return pattr->show(ppd, buf);
}
@@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->store)
+ return -EIO;
+
return pattr->store(ppd, buf, len);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 17bdf8acee2f..8bf414b47b96 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -245,9 +245,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 7800e6930502..a26a4fd86bf4 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -136,7 +136,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
goto err_cq;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index 8f9749d54688..86a6c054ea26 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -58,7 +58,8 @@
#define PVRDMA_ROCEV1_VERSION 17
#define PVRDMA_ROCEV2_VERSION 18
#define PVRDMA_PPN64_VERSION 19
-#define PVRDMA_VERSION PVRDMA_PPN64_VERSION
+#define PVRDMA_QPHANDLE_VERSION 20
+#define PVRDMA_VERSION PVRDMA_QPHANDLE_VERSION
#define PVRDMA_BOARD_ID 1
#define PVRDMA_REV_ID 1
@@ -581,6 +582,17 @@ struct pvrdma_cmd_create_qp_resp {
u32 max_inline_data;
};
+struct pvrdma_cmd_create_qp_resp_v2 {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
struct pvrdma_cmd_modify_qp {
struct pvrdma_cmd_hdr hdr;
u32 qp_handle;
@@ -663,6 +675,7 @@ union pvrdma_cmd_resp {
struct pvrdma_cmd_create_cq_resp create_cq_resp;
struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
struct pvrdma_cmd_query_qp_resp query_qp_resp;
struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
struct pvrdma_cmd_create_srq_resp create_srq_resp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index f3a3d22ee8d7..c61e665ff261 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags, 0);
+ umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index bca6a58a442e..f15809c28f67 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -52,6 +52,9 @@
#include "pvrdma.h"
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp);
+
static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
struct pvrdma_cq **recv_cq)
{
@@ -195,7 +198,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
struct pvrdma_create_qp ucmd;
+ struct pvrdma_create_qp_resp qp_resp = {};
unsigned long flags;
int ret;
bool is_srq = !!init_attr->srq;
@@ -260,10 +265,19 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
+ /* Userspace supports qpn and qp handles? */
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
+ udata->outlen < sizeof(qp_resp)) {
+ dev_warn(&dev->pdev->dev,
+ "create queuepair not supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_qp;
+ }
+
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -275,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
- ucmd.sbuf_size, 0, 0);
+ ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
ib_umem_release(qp->rumem);
@@ -379,13 +393,33 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
- qp->qp_handle = resp->qpn;
qp->port = init_attr->port_num;
- qp->ibqp.qp_num = resp->qpn;
+
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
+ qp->ibqp.qp_num = resp_v2->qpn;
+ qp->qp_handle = resp_v2->qp_handle;
+ } else {
+ qp->ibqp.qp_num = resp->qpn;
+ qp->qp_handle = resp->qpn;
+ }
+
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+ if (udata) {
+ qp_resp.qpn = qp->ibqp.qp_num;
+ qp_resp.qp_handle = qp->qp_handle;
+
+ if (ib_copy_to_udata(udata, &qp_resp,
+ min(udata->outlen, sizeof(qp_resp)))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ __pvrdma_destroy_qp(dev, qp);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
return &qp->ibqp;
err_pdir:
@@ -400,27 +434,15 @@ err_qp:
return ERR_PTR(ret);
}
-static void pvrdma_free_qp(struct pvrdma_qp *qp)
+static void _pvrdma_free_qp(struct pvrdma_qp *qp)
{
+ unsigned long flags;
struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
- struct pvrdma_cq *scq;
- struct pvrdma_cq *rcq;
- unsigned long flags, scq_flags, rcq_flags;
-
- /* In case cq is polling */
- get_cqs(qp, &scq, &rcq);
- pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
- _pvrdma_flush_cqe(qp, scq);
- if (scq != rcq)
- _pvrdma_flush_cqe(qp, rcq);
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle] = NULL;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
- pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
@@ -435,34 +457,71 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
atomic_dec(&dev->num_qps);
}
-/**
- * pvrdma_destroy_qp - destroy a queue pair
- * @qp: the queue pair to destroy
- * @udata: user data or null for kernel object
- *
- * @return: 0 on success.
- */
-int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ /*
+ * We're now unlocking the CQs before clearing out the qp handle this
+ * should still be safe. We have destroyed the backend QP and flushed
+ * the CQEs so there should be no other completions for this QP.
+ */
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_free_qp(qp);
+}
+
+static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
+ u32 qp_handle)
{
- struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req;
struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
int ret;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
- cmd->qp_handle = vqp->qp_handle;
+ cmd->qp_handle = qp_handle;
- ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
if (ret < 0)
- dev_warn(&to_vdev(qp->device)->pdev->dev,
+ dev_warn(&dev->pdev->dev,
"destroy queuepair failed, error: %d\n", ret);
+}
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
+ *
+ * @return: always 0.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+
+ _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
pvrdma_free_qp(vqp);
return 0;
}
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp)
+{
+ _pvrdma_destroy_qp_work(dev, qp->qp_handle);
+ _pvrdma_free_qp(qp);
+}
+
/**
* pvrdma_modify_qp - modify queue pair attributes
* @ibqp: the queue pair
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 36cdfbdbd325..98c8be71d91d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index fe99da0ff060..ee02c6176007 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -129,7 +129,6 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
* rvt_destory_ah - Destory an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
- * @udata: user data or NULL for kernel object
*
* Return: 0 on success
*/
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index a85571a4cf57..13d7f66eadab 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -552,7 +552,6 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
/**
* rvt_driver_cq_init - Init cq resources on behalf of driver
- * @rdi: rvt dev structure
*
* Return: 0 on success
*/
@@ -568,7 +567,6 @@ int rvt_driver_cq_init(void)
/**
* rvt_cq_exit - tear down cq reources
- * @rdi: rvt dev structure
*/
void rvt_cq_exit(void)
{
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index a6a39f01dca3..b9a76bf74857 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0b0a241c57ff..3cdf75d0c7a4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2563,10 +2563,9 @@ void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
EXPORT_SYMBOL(rvt_add_retry_timer_ext);
/**
- * rvt_add_rnr_timer - add/start an rnr timer
- * @qp - the QP
- * @aeth - aeth of RNR timeout, simulated aeth for loopback
- * add an rnr timer on the QP
+ * rvt_add_rnr_timer - add/start an rnr timer on the QP
+ * @qp: the QP
+ * @aeth: aeth of RNR timeout, simulated aeth for loopback
*/
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
{
@@ -2583,7 +2582,7 @@ EXPORT_SYMBOL(rvt_add_rnr_timer);
/**
* rvt_stop_rc_timers - stop all timers
- * @qp - the QP
+ * @qp: the QP
* stop any pending timers
*/
void rvt_stop_rc_timers(struct rvt_qp *qp)
@@ -2617,7 +2616,7 @@ static void rvt_stop_rnr_timer(struct rvt_qp *qp)
/**
* rvt_del_timers_sync - wait for any timeout routines to exit
- * @qp - the QP
+ * @qp: the QP
*/
void rvt_del_timers_sync(struct rvt_qp *qp)
{
@@ -2626,7 +2625,7 @@ void rvt_del_timers_sync(struct rvt_qp *qp)
}
EXPORT_SYMBOL(rvt_del_timers_sync);
-/**
+/*
* This is called from s_timer for missing responses.
*/
static void rvt_rc_timeout(struct timer_list *t)
@@ -2676,12 +2675,13 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
* rvt_qp_iter_init - initial for QP iteration
* @rdi: rvt devinfo
* @v: u64 value
+ * @cb: user-defined callback
*
* This returns an iterator suitable for iterating QPs
* in the system.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* @cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
@@ -2712,7 +2712,7 @@ EXPORT_SYMBOL(rvt_qp_iter_init);
/**
* rvt_qp_iter_next - return the next QP in iter
- * @iter - the iterator
+ * @iter: the iterator
*
* Fine grained QP iterator suitable for use
* with debugfs seq_file mechanisms.
@@ -2775,14 +2775,14 @@ EXPORT_SYMBOL(rvt_qp_iter_next);
/**
* rvt_qp_iter - iterate all QPs
- * @rdi - rvt devinfo
- * @v - a 64 bit value
- * @cb - a callback
+ * @rdi: rvt devinfo
+ * @v: a 64-bit value
+ * @cb: a callback
*
* This provides a way for iterating all QPs.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 18da1e1ea979..986265ad6e79 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -683,9 +683,10 @@ EXPORT_SYMBOL(rvt_unregister_device);
/**
* rvt_init_port - init internal data for driver port
- * @rdi: rvt dev strut
+ * @rdi: rvt_dev_info struct
* @port: rvt port
* @port_index: 0 based index of ports, different from IB core port num
+ * @pkey_table: pkey_table for @port
*
* Keep track of a list of ports. No need to have a detach port.
* They persist until the driver goes away.
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index a8c11b5e1e94..0946a301a5c5 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
- rxe->attr.fw_ver = RXE_FW_VER;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
- rxe->attr.vendor_id = RXE_VENDOR_ID;
- rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
- rxe->attr.hw_ver = RXE_HW_VER;
rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
@@ -94,22 +90,13 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_mr = RXE_MAX_MR;
rxe->attr.max_pd = RXE_MAX_PD;
rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
- rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
- rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
rxe->attr.atomic_cap = IB_ATOMIC_HCA;
- rxe->attr.max_ee = RXE_MAX_EE;
- rxe->attr.max_rdd = RXE_MAX_RDD;
- rxe->attr.max_mw = RXE_MAX_MW;
- rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
- rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
rxe->attr.max_ah = RXE_MAX_AH;
- rxe->attr.max_fmr = RXE_MAX_FMR;
- rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
rxe->attr.max_srq = RXE_MAX_SRQ;
rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index ea6a819b7167..35a2baf2f364 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access, 0);
+ umem = ib_umem_get(udata, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index fe5207386700..353c6668249e 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -60,12 +60,8 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
/* default/initial rxe device parameter settings */
enum rxe_device_param {
- RXE_FW_VER = 0,
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_VENDOR_ID = 0,
- RXE_VENDOR_PART_ID = 0,
- RXE_HW_VER = 0,
RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
RXE_MAX_INLINE_DATA = 400,
@@ -87,21 +83,12 @@ enum rxe_device_param {
RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
- RXE_MAX_EE_RD_ATOM = 0,
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
- RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_MAX_EE = 0,
- RXE_MAX_RDD = 0,
- RXE_MAX_MW = 0,
- RXE_MAX_RAW_IPV6_QP = 0,
- RXE_MAX_RAW_ETHY_QP = 0,
RXE_MAX_MCAST_GRP = 8192,
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
RXE_MAX_AH = 100,
- RXE_MAX_FMR = 0,
- RXE_MAX_MAP_PER_FMR = 0,
RXE_MAX_SRQ = 960,
RXE_MAX_SRQ_WR = 0x4000,
RXE_MIN_SRQ_WR = 1,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 623129f27f5a..9dd4bd7aea92 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -106,6 +106,10 @@ static int rxe_modify_device(struct ib_device *dev,
{
struct rxe_dev *rxe = to_rdev(dev);
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC))
+ return -EOPNOTSUPP;
+
if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
@@ -1171,6 +1175,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dev->dev.dma_parms = &rxe->dma_parms;
+ rxe->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
dma_coerce_mask_and_coherent(&dev->dev,
dma_get_required_mask(&dev->dev));
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 5c4b2239129c..95834206c80c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -384,6 +384,7 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
+ struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index dba4535494ab..b939f489cd46 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -70,6 +70,7 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
+ struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;
@@ -98,18 +99,9 @@ struct siw_device {
struct work_struct netdev_down;
};
-struct siw_uobj {
- void *addr;
- u32 size;
-};
-
struct siw_ucontext {
struct ib_ucontext base_ucontext;
struct siw_device *sdev;
-
- /* xarray of user mappable objects */
- struct xarray xa;
- u32 uobj_nextkey;
};
/*
@@ -149,8 +141,6 @@ struct siw_pbl {
struct siw_pble pbe[1];
};
-struct siw_mr;
-
/*
* Generic memory representation for registered siw memory.
* Memory lookup always via higher 24 bit of STag (STag index).
@@ -220,7 +210,7 @@ struct siw_cq {
u32 cq_get;
u32 num_cqe;
bool kernel_verbs;
- u32 xa_cq_index; /* mmap information for CQE array */
+ struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
@@ -263,7 +253,7 @@ struct siw_srq {
u32 rq_put;
u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */
- u32 xa_srq_index; /* mmap information for SRQ array */
+ struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
char armed; /* inform user if limit hit */
char kernel_verbs; /* '1' if kernel client */
};
@@ -477,8 +467,8 @@ struct siw_qp {
u8 layer : 4, etype : 4;
u8 ecode;
} term_info;
- u32 xa_sq_index; /* mmap information for SQE array */
- u32 xa_rq_index; /* mmap information for RQE array */
+ struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
+ struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
struct rcu_head rcu;
};
@@ -503,6 +493,11 @@ struct iwarp_msg_info {
int (*rx_data)(struct siw_qp *qp);
};
+struct siw_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ void *address;
+};
+
/* Global siw parameters. Currently set in siw_main.c */
extern const bool zcopy_tx;
extern const bool try_gso;
@@ -607,6 +602,12 @@ static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
return container_of(base_mr, struct siw_mr, base_mr);
}
+static inline struct siw_user_mmap_entry *
+to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
+{
+ return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
+}
+
static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
{
struct siw_qp *qp;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 8c1931a57f4a..3bccfef40e7e 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1373,22 +1373,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
rv = -EINVAL;
goto error;
}
- if (v4)
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- pd_len,
- &((struct sockaddr_in *)(laddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(laddr))->sin_port),
- &((struct sockaddr_in *)(raddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(raddr))->sin_port));
- else
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- pd_len,
- &((struct sockaddr_in6 *)(laddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
- &((struct sockaddr_in6 *)(raddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(raddr))->sin6_port));
+ siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
+ raddr);
rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
@@ -1867,14 +1853,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
- if (addr_family == AF_INET)
- siw_dbg(id->device, "Listen at laddr %pI4 %u\n",
- &(((struct sockaddr_in *)laddr)->sin_addr),
- ((struct sockaddr_in *)laddr)->sin_port);
- else
- siw_dbg(id->device, "Listen at laddr %pI6 %u\n",
- &(((struct sockaddr_in6 *)laddr)->sin6_addr),
- ((struct sockaddr_in6 *)laddr)->sin6_port);
+ siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
return 0;
@@ -1935,7 +1914,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
/*
* siw_create_listen - Create resources for a listener's IWCM ID @id
*
- * Listens on the socket addresses id->local_addr and id->remote_addr.
+ * Listens on the socket address id->local_addr.
*
* If the listener's @id provides a specific local IP address, at most one
* listening socket is created and associated with @id.
@@ -1959,7 +1938,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
*/
if (id->local_addr.ss_family == AF_INET) {
struct in_device *in_dev = in_dev_get(dev);
- struct sockaddr_in s_laddr, *s_raddr;
+ struct sockaddr_in s_laddr;
const struct in_ifaddr *ifa;
if (!in_dev) {
@@ -1967,12 +1946,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
goto out;
}
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
- s_raddr = (struct sockaddr_in *)&id->remote_addr;
- siw_dbg(id->device,
- "laddr %pI4:%d, raddr %pI4:%d\n",
- &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
- &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
@@ -1992,17 +1967,13 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
} else if (id->local_addr.ss_family == AF_INET6) {
struct inet6_dev *in6_dev = in6_dev_get(dev);
struct inet6_ifaddr *ifp;
- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
- *s_raddr = &to_sockaddr_in6(id->remote_addr);
+ struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
if (!in6_dev) {
rv = -ENODEV;
goto out;
}
- siw_dbg(id->device,
- "laddr %pI6:%d, raddr %pI6:%d\n",
- &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
- &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 05a92f997f60..c147f0613d95 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/rdma_netlink.h>
@@ -248,24 +249,6 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
return NULL;
}
-static void siw_verbs_sq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_sq_flush(qp);
- up_write(&qp->state_lock);
-}
-
-static void siw_verbs_rq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_rq_flush(qp);
- up_write(&qp->state_lock);
-}
-
static const struct ib_device_ops siw_device_ops = {
.owner = THIS_MODULE,
.uverbs_abi_ver = SIW_ABI_VERSION,
@@ -284,8 +267,6 @@ static const struct ib_device_ops siw_device_ops = {
.destroy_cq = siw_destroy_cq,
.destroy_qp = siw_destroy_qp,
.destroy_srq = siw_destroy_srq,
- .drain_rq = siw_verbs_rq_flush,
- .drain_sq = siw_verbs_sq_flush,
.get_dma_mr = siw_get_dma_mr,
.get_port_immutable = siw_get_port_immutable,
.iw_accept = siw_accept,
@@ -298,6 +279,7 @@ static const struct ib_device_ops siw_device_ops = {
.iw_rem_ref = siw_qp_put_ref,
.map_mr_sg = siw_map_mr_sg,
.mmap = siw_mmap,
+ .mmap_free = siw_mmap_free,
.modify_qp = siw_verbs_modify_qp,
.modify_srq = siw_modify_srq,
.poll_cq = siw_poll_cq,
@@ -350,15 +332,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->netdev = netdev;
if (netdev->type != ARPHRD_LOOPBACK) {
- memcpy(&base_dev->node_guid, netdev->dev_addr, 6);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ netdev->dev_addr);
} else {
/*
* The loopback device does not have a HW address,
* but connection mangagement lib expects gid != 0
*/
- size_t gidlen = min_t(size_t, strlen(base_dev->name), 6);
+ size_t len = min_t(size_t, strlen(base_dev->name), 6);
+ char addr[6] = { };
- memcpy(&base_dev->node_guid, base_dev->name, gidlen);
+ memcpy(addr, base_dev->name, len);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ addr);
}
base_dev->uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -397,6 +383,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
base_dev->dev.dma_ops = &dma_virt_ops;
+ base_dev->dev.dma_parms = &sdev->dma_parms;
+ sdev->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
ib_set_device_ops(base_dev, &siw_device_ops);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index b18a677832e1..5fd6d6499b3d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -34,44 +34,19 @@ static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
[IB_QPS_ERR] = "ERR"
};
-static u32 siw_create_uobj(struct siw_ucontext *uctx, void *vaddr, u32 size)
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct siw_uobj *uobj;
- struct xa_limit limit = XA_LIMIT(0, SIW_UOBJ_MAX_KEY);
- u32 key;
+ struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
- uobj = kzalloc(sizeof(*uobj), GFP_KERNEL);
- if (!uobj)
- return SIW_INVAL_UOBJ_KEY;
-
- if (xa_alloc_cyclic(&uctx->xa, &key, uobj, limit, &uctx->uobj_nextkey,
- GFP_KERNEL) < 0) {
- kfree(uobj);
- return SIW_INVAL_UOBJ_KEY;
- }
- uobj->size = PAGE_ALIGN(size);
- uobj->addr = vaddr;
-
- return key;
-}
-
-static struct siw_uobj *siw_get_uobj(struct siw_ucontext *uctx,
- unsigned long off, u32 size)
-{
- struct siw_uobj *uobj = xa_load(&uctx->xa, off);
-
- if (uobj && uobj->size == size)
- return uobj;
-
- return NULL;
+ kfree(entry);
}
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
{
struct siw_ucontext *uctx = to_siw_ctx(ctx);
- struct siw_uobj *uobj;
- unsigned long off = vma->vm_pgoff;
- int size = vma->vm_end - vma->vm_start;
+ size_t size = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct siw_user_mmap_entry *entry;
int rv = -EINVAL;
/*
@@ -79,18 +54,25 @@ int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
*/
if (vma->vm_start & (PAGE_SIZE - 1)) {
pr_warn("siw: mmap not page aligned\n");
- goto out;
+ return -EINVAL;
}
- uobj = siw_get_uobj(uctx, off, size);
- if (!uobj) {
- siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %u\n",
- off, size);
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
+ if (!rdma_entry) {
+ siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
+ vma->vm_pgoff, size);
+ return -EINVAL;
+ }
+ entry = to_siw_mmap_entry(rdma_entry);
+
+ rv = remap_vmalloc_range(vma, entry->address, 0);
+ if (rv) {
+ pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
+ size);
goto out;
}
- rv = remap_vmalloc_range(vma, uobj->addr, 0);
- if (rv)
- pr_warn("remap_vmalloc_range failed: %lu, %u\n", off, size);
out:
+ rdma_user_mmap_entry_put(rdma_entry);
+
return rv;
}
@@ -105,8 +87,6 @@ int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
rv = -ENOMEM;
goto err_out;
}
- xa_init_flags(&ctx->xa, XA_FLAGS_ALLOC);
- ctx->uobj_nextkey = 0;
ctx->sdev = sdev;
uresp.dev_id = sdev->vendor_part_id;
@@ -135,19 +115,7 @@ err_out:
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
{
struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
- void *entry;
- unsigned long index;
- /*
- * Make sure all user mmap objects are gone. Since QP, CQ
- * and SRQ destroy routines destroy related objects, nothing
- * should be found here.
- */
- xa_for_each(&uctx->xa, index, entry) {
- kfree(xa_erase(&uctx->xa, index));
- pr_warn("siw: dropping orphaned uobj at %lu\n", index);
- }
- xa_destroy(&uctx->xa);
atomic_dec(&uctx->sdev->num_ctx);
}
@@ -293,6 +261,33 @@ void siw_qp_put_ref(struct ib_qp *base_qp)
siw_qp_put(to_siw_qp(base_qp));
}
+static struct rdma_user_mmap_entry *
+siw_mmap_entry_insert(struct siw_ucontext *uctx,
+ void *address, size_t length,
+ u64 *offset)
+{
+ struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int rv;
+
+ *offset = SIW_INVAL_UOBJ_KEY;
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+
+ rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
+ &entry->rdma_entry,
+ length);
+ if (rv) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
/*
* siw_create_qp()
*
@@ -317,6 +312,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
+ size_t length;
siw_dbg(base_dev, "create new QP\n");
@@ -380,8 +376,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->orq_lock);
qp->kernel_verbs = !udata;
- qp->xa_sq_index = SIW_INVAL_UOBJ_KEY;
- qp->xa_rq_index = SIW_INVAL_UOBJ_KEY;
rv = siw_qp_add(sdev, qp);
if (rv)
@@ -458,22 +452,27 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
uresp.qp_id = qp_id(qp);
if (qp->sendq) {
- qp->xa_sq_index =
- siw_create_uobj(uctx, qp->sendq,
- num_sqe * sizeof(struct siw_sqe));
+ length = num_sqe * sizeof(struct siw_sqe);
+ qp->sq_entry =
+ siw_mmap_entry_insert(uctx, qp->sendq,
+ length, &uresp.sq_key);
+ if (!qp->sq_entry) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
+
if (qp->recvq) {
- qp->xa_rq_index =
- siw_create_uobj(uctx, qp->recvq,
- num_rqe * sizeof(struct siw_rqe));
- }
- if (qp->xa_sq_index == SIW_INVAL_UOBJ_KEY ||
- qp->xa_rq_index == SIW_INVAL_UOBJ_KEY) {
- rv = -ENOMEM;
- goto err_out_xa;
+ length = num_rqe * sizeof(struct siw_rqe);
+ qp->rq_entry =
+ siw_mmap_entry_insert(uctx, qp->recvq,
+ length, &uresp.rq_key);
+ if (!qp->rq_entry) {
+ uresp.sq_key = SIW_INVAL_UOBJ_KEY;
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
- uresp.sq_key = qp->xa_sq_index << PAGE_SHIFT;
- uresp.rq_key = qp->xa_rq_index << PAGE_SHIFT;
if (udata->outlen < sizeof(uresp)) {
rv = -EINVAL;
@@ -501,11 +500,10 @@ err_out:
kfree(siw_base_qp);
if (qp) {
- if (qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
-
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
vfree(qp->sendq);
vfree(qp->recvq);
kfree(qp);
@@ -618,10 +616,10 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->attrs.flags |= SIW_QP_IN_DESTROY;
qp->rx_stream.rx_suspend = 1;
- if (uctx && qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (uctx && qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
down_write(&qp->state_lock);
@@ -685,6 +683,47 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
return bytes;
}
+/* Complete SQ WR's without processing */
+static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct siw_sqe sqe = {};
+ int rv = 0;
+
+ while (wr) {
+ sqe.id = wr->wr_id;
+ sqe.opcode = wr->opcode;
+ rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
+/* Complete RQ WR's without processing */
+static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_rqe rqe = {};
+ int rv = 0;
+
+ while (wr) {
+ rqe.id = wr->wr_id;
+ rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
/*
* siw_post_send()
*
@@ -703,26 +742,54 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags;
int rv = 0;
+ if (wr && !qp->kernel_verbs) {
+ siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state);
- return -ENOTCONN;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_sq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. SQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_sq().
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state);
- return -ENOTCONN;
- }
- if (wr && !qp->kernel_verbs) {
- siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
spin_lock_irqsave(&qp->sq_lock, flags);
@@ -917,24 +984,54 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */
}
+ if (!qp->kernel_verbs) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- return -ENOTCONN;
- }
- if (!qp->kernel_verbs) {
- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_rq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (qp->attrs.state > SIW_QP_STATE_RTS) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. RQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_rq().
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
/*
* Serialize potentially multiple producers.
@@ -991,8 +1088,8 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
siw_cq_flush(cq);
- if (ctx && cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
atomic_dec(&sdev->num_cq);
@@ -1029,7 +1126,6 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
size = roundup_pow_of_two(size);
cq->base_cq.cqe = size;
cq->num_cqe = size;
- cq->xa_cq_index = SIW_INVAL_UOBJ_KEY;
if (!udata) {
cq->kernel_verbs = 1;
@@ -1055,16 +1151,17 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
+ size_t length = size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl);
- cq->xa_cq_index =
- siw_create_uobj(ctx, cq->queue,
- size * sizeof(struct siw_cqe) +
- sizeof(struct siw_cq_ctrl));
- if (cq->xa_cq_index == SIW_INVAL_UOBJ_KEY) {
+ cq->cq_entry =
+ siw_mmap_entry_insert(ctx, cq->queue,
+ length, &uresp.cq_key);
+ if (!cq->cq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.cq_key = cq->xa_cq_index << PAGE_SHIFT;
+
uresp.cq_id = cq->id;
uresp.num_cqe = size;
@@ -1085,8 +1182,8 @@ err_out:
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
vfree(cq->queue);
}
atomic_dec(&sdev->num_cq);
@@ -1490,7 +1587,6 @@ int siw_create_srq(struct ib_srq *base_srq,
}
srq->max_sge = attrs->max_sge;
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
- srq->xa_srq_index = SIW_INVAL_UOBJ_KEY;
srq->limit = attrs->srq_limit;
if (srq->limit)
srq->armed = 1;
@@ -1509,15 +1605,16 @@ int siw_create_srq(struct ib_srq *base_srq,
}
if (udata) {
struct siw_uresp_create_srq uresp = {};
+ size_t length = srq->num_rqe * sizeof(struct siw_rqe);
- srq->xa_srq_index = siw_create_uobj(
- ctx, srq->recvq, srq->num_rqe * sizeof(struct siw_rqe));
-
- if (srq->xa_srq_index == SIW_INVAL_UOBJ_KEY) {
+ srq->srq_entry =
+ siw_mmap_entry_insert(ctx, srq->recvq,
+ length, &uresp.srq_key);
+ if (!srq->srq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.srq_key = srq->xa_srq_index;
+
uresp.num_rqe = srq->num_rqe;
if (udata->outlen < sizeof(uresp)) {
@@ -1536,8 +1633,8 @@ int siw_create_srq(struct ib_srq *base_srq,
err_out:
if (srq->recvq) {
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
}
atomic_dec(&sdev->num_srq);
@@ -1623,9 +1720,8 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
-
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
atomic_dec(&sdev->num_srq);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 1910869281cb..1a731989fad6 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -83,6 +83,7 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac0583ff280d..e5f438ab716c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2019,6 +2019,15 @@ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
}
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
static int ipoib_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
@@ -2045,6 +2054,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_vf_link_state = ipoib_set_vf_link_state,
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2e72fc5af157..3690e28cc7ea 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -646,13 +646,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ib_conn->pi_support) {
u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- shost->virt_boundary_mask = ~MASK_4K;
+ shost->virt_boundary_mask = SZ_4K - 1;
if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -785,7 +786,7 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* iscsi_iser_ep_connect() - Initiate iSER connection establishment
* @shost: scsi_host
* @dst_addr: destination address
- * @non-blocking: indicate if routine can block
+ * @non_blocking: indicate if routine can block
*
* Allocate an iscsi endpoint, an iser_conn structure and bind them.
* After that start RDMA connection establishment via rdma_cm. We
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 52ce63592dcf..029c00163442 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -96,16 +96,12 @@
#define iser_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define SHIFT_4K 12
-#define SIZE_4K (1ULL << SHIFT_4K)
-#define MASK_4K (~(SIZE_4K-1))
-
/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024
#define ISCSI_ISER_DEF_SG_TABLESIZE \
- ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K)
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
/* Maximum support is 16MB I/O size */
-#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K)
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
@@ -232,15 +228,16 @@ enum iser_desc_type {
* @iser_header: iser header
* @iscsi_header: iscsi header
* @type: command/control/dataout
- * @dam_addr: header buffer dma_address
+ * @dma_addr: header buffer dma_address
* @tx_sg: sg[0] points to iser/iscsi headers
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @cqe: completion handler
* @mapped: Is the task header mapped
- * reg_wr: registration WR
- * send_wr: send WR
- * inv_wr: invalidate WR
+ * @reg_wr: registration WR
+ * @send_wr: send WR
+ * @inv_wr: invalidate WR
*/
struct iser_tx_desc {
struct iser_ctrl iser_header;
@@ -267,6 +264,7 @@ struct iser_tx_desc {
* @data: received data segment
* @dma_addr: receive buffer dma address
* @rx_sg: ib_sge of receive buffer
+ * @cqe: completion handler
* @pad: for sense data TODO: Modify to maximum sense length supported
*/
struct iser_rx_desc {
@@ -283,9 +281,9 @@ struct iser_rx_desc {
* struct iser_login_desc - iSER login descriptor
*
* @req: pointer to login request buffer
- * @resp: pointer to login response buffer
+ * @rsp: pointer to login response buffer
* @req_dma: DMA address of login request buffer
- * @rsp_dma: DMA address of login response buffer
+ * @rsp_dma: DMA address of login response buffer
* @sge: IB sge for login post recv
* @cqe: completion handler
*/
@@ -315,12 +313,12 @@ struct iser_comp {
};
/**
- * struct iser_device - Memory registration operations
+ * struct iser_reg_ops - Memory registration operations
* per-device registration schemes
*
* @alloc_reg_res: Allocate registration resources
* @free_reg_res: Free registration resources
- * @fast_reg_mem: Register memory buffers
+ * @reg_mem: Register memory buffers
* @unreg_mem: Un-register memory buffers
* @reg_desc_get: Get a registration descriptor for pool
* @reg_desc_put: Get a registration descriptor to pool
@@ -369,7 +367,7 @@ struct iser_device {
};
/**
- * struct iser_reg_resources - Fast registration recources
+ * struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
* @fmr_pool: pool of fmrs
@@ -402,7 +400,7 @@ struct iser_fr_desc {
};
/**
- * struct iser_fr_pool: connection fast registration pool
+ * struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool
@@ -427,6 +425,7 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
+ * @reg_cqe: completion handler
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -467,6 +466,7 @@ struct ib_conn {
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
* @pages_per_mr: maximum pages available for registration
+ * @snd_w_inv: connection uses remote invalidation
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -525,7 +525,7 @@ struct iser_page_vec {
};
/**
- * struct iser_global: iSER global context
+ * struct iser_global - iSER global context
*
* @device_list_mutex: protects device_list
* @device_list: iser devices global list
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5cbb4b3a0566..4a7045bb0831 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -358,6 +358,8 @@ static inline bool iser_signal_comp(u8 sig_count)
/**
* iser_send_command - send command PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
*/
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
@@ -429,6 +431,9 @@ send_command_error:
/**
* iser_send_data_out - send data out PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ * @hdr: pointer to the LLD's iSCSI message header
*/
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2cc89a9b9e9b..0f74dc6d12fa 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -170,7 +170,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
- if (data->dma_nents == 0) {
+ if (unlikely(data->dma_nents == 0)) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
@@ -237,7 +237,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
int ret, plen;
page_vec->npages = 0;
- page_vec->fake_mr.page_size = SIZE_4K;
+ page_vec->fake_mr.page_size = SZ_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
mem->dma_nents, NULL, iser_set_page);
if (unlikely(plen < mem->dma_nents)) {
@@ -451,7 +451,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->dma_nents);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a6548de0e218..1f4a37a3c2b3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -58,12 +58,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
dev_name(&event->device->dev), event->element.port_num);
}
-/**
+/*
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
- * the adapator.
+ * the adaptor.
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
@@ -124,9 +124,9 @@ comps_err:
return -1;
}
-/**
+/*
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
- * CQ and PD created with the device associated with the adapator.
+ * CQ and PD created with the device associated with the adaptor.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
@@ -149,8 +149,11 @@ static void iser_free_device_ib_res(struct iser_device *device)
/**
* iser_alloc_fmr_pool - Creates FMR pool and page_vector
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
*
- * returns 0 on success, or errno code on failure
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -180,7 +183,7 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
page_vec->pages = (u64 *)(page_vec + 1);
- params.page_shift = SHIFT_4K;
+ params.page_shift = ilog2(SZ_4K);
params.max_pages_per_fmr = size;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
@@ -215,6 +218,7 @@ err_frpl:
/**
* iser_free_fmr_pool - releases the FMR pool and page vec
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
@@ -295,7 +299,11 @@ static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
/**
* iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
- * returns 0 on success, or errno code on failure
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
+ *
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -332,6 +340,7 @@ err:
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
@@ -355,10 +364,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
fr_pool->size - i);
}
-/**
+/*
* iser_create_ib_conn_res - Queue-Pair (QP)
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
@@ -436,7 +445,7 @@ out_err:
return ret;
}
-/**
+/*
* based on the resolved device node GUID see if there already allocated
* device for this device. If there's no such, create one.
*/
@@ -487,9 +496,9 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
enum iser_conn_state comp,
enum iser_conn_state exch)
@@ -561,7 +570,8 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
}
/**
- * Frees all conn objects and deallocs conn descriptor
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
+ * @iser_conn: iSER connection context
*/
void iser_conn_release(struct iser_conn *iser_conn)
{
@@ -595,7 +605,10 @@ void iser_conn_release(struct iser_conn *iser_conn)
}
/**
- * triggers start of the disconnect procedures and wait for them to be done
+ * iser_conn_terminate - triggers start of the disconnect procedures and
+ * waits for them to be done
+ * @iser_conn: iSER connection context
+ *
* Called with state mutex held
*/
int iser_conn_terminate(struct iser_conn *iser_conn)
@@ -632,9 +645,9 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
return 1;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
@@ -670,7 +683,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
else
max_num_sg = attr->max_fast_reg_page_list_len;
- sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
@@ -684,9 +697,9 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
@@ -732,9 +745,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
@@ -1019,7 +1032,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
ib_conn->post_recv_buf_count += count;
ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (ib_ret) {
+ if (unlikely(ib_ret)) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count -= count;
} else
@@ -1030,9 +1043,12 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
/**
- * iser_start_send - Initiate a Send DTO operation
+ * iser_post_send - Initiate a Send DTO operation
+ * @ib_conn: connection RDMA resources
+ * @tx_desc: iSER TX descriptor
+ * @signal: true to send work request as SIGNALED
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
@@ -1060,7 +1076,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
first_wr = wr;
ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (ib_ret)
+ if (unlikely(ib_ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ib_ret, wr->opcode);
@@ -1081,7 +1097,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
/* Not a lot we can do, return ambiguous guard error */
*sector = 0;
return 0x1;
@@ -1093,7 +1109,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
sector_div(sector_off, sector_size + 8);
*sector = scsi_get_lba(iser_task->sc) + sector_off;
- pr_err("PI error found type %d at sector %llx "
+ iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
mr_status.sig_err.err_type,
(unsigned long long)*sector,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 43ac61ffef4a..6dbc08e1a6a6 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -70,7 +70,7 @@
struct opa_vnic_adapter;
-/**
+/*
* struct __opa_vesw_info - OPA vnic virtual switch info
*
* Same as opa_vesw_info without bitwise attribute.
@@ -96,7 +96,7 @@ struct __opa_vesw_info {
u8 rsvd4[2];
} __packed;
-/**
+/*
* struct __opa_per_veswport_info - OPA vnic per port info
*
* Same as opa_per_veswport_info without bitwise attribute.
@@ -136,7 +136,7 @@ struct __opa_per_veswport_info {
u8 rsvd3[8];
} __packed;
-/**
+/*
* struct __opa_veswport_info - OPA vnic port info
*
* Same as opa_veswport_info without bitwise attribute.
@@ -146,7 +146,7 @@ struct __opa_veswport_info {
struct __opa_per_veswport_info vport;
};
-/**
+/*
* struct __opa_veswport_trap - OPA vnic trap info
*
* Same as opa_veswport_trap without bitwise attribute.
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b5960351bec0..b7f7a5f7bd98 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -174,9 +174,9 @@ static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
int tmo = *(int *)kp->arg;
if (tmo >= 0)
- return sprintf(buffer, "%d", tmo);
+ return sprintf(buffer, "%d\n", tmo);
else
- return sprintf(buffer, "off");
+ return sprintf(buffer, "off\n");
}
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
@@ -352,11 +352,11 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
init_completion(&ch->done);
ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
- (struct sockaddr *)&target->rdma_cm.src : NULL,
- (struct sockaddr *)&target->rdma_cm.dst,
+ &target->rdma_cm.src.sa : NULL,
+ &target->rdma_cm.dst.sa,
SRP_PATH_REC_TIMEOUT_MS);
if (ret) {
- pr_err("No route available from %pIS to %pIS (%d)\n",
+ pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
&target->rdma_cm.src, &target->rdma_cm.dst, ret);
goto out;
}
@@ -366,7 +366,7 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret) {
- pr_err("Resolving address %pIS failed (%d)\n",
+ pr_err("Resolving address %pISpsc failed (%d)\n",
&target->rdma_cm.dst, ret);
goto out;
}
@@ -552,6 +552,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ const struct ib_device_attr *attr = &dev->dev->attrs;
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
@@ -583,12 +584,14 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size + 1;
init_attr->cap.max_recv_sge = 1;
- init_attr->cap.max_send_sge = SRP_MAX_SGE;
+ init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
+ ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
+
if (target->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
qp = ch->rdma_cm.cm_id->qp;
@@ -1362,7 +1365,8 @@ static void srp_terminate_io(struct srp_rport *rport)
}
/* Calculate maximum initiator to target information unit length. */
-static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
+static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
+ uint32_t max_it_iu_size)
{
uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
sizeof(struct srp_indirect_buf) +
@@ -1372,6 +1376,11 @@ static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
srp_max_imm_data);
+ if (max_it_iu_size)
+ max_iu_len = min(max_iu_len, max_it_iu_size);
+
+ pr_debug("max_iu_len = %d\n", max_iu_len);
+
return max_iu_len;
}
@@ -1389,7 +1398,8 @@ static int srp_rport_reconnect(struct srp_rport *rport)
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- srp_use_imm_data);
+ srp_use_imm_data,
+ target->max_it_iu_size);
int i, j, ret = 0;
bool multich = false;
@@ -1838,7 +1848,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
return -EIO;
if (ch->use_imm_data &&
- count <= SRP_MAX_IMM_SGE &&
+ count <= ch->max_imm_sge &&
SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
scmnd->sc_data_direction == DMA_TO_DEVICE) {
struct srp_imm_buf *buf;
@@ -2538,7 +2548,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- ch->use_imm_data);
+ ch->use_imm_data,
+ target->max_it_iu_size);
WARN_ON_ONCE(ch->max_it_iu_len >
be32_to_cpu(lrsp->max_it_iu_len));
@@ -3411,6 +3422,7 @@ enum {
SRP_OPT_IP_SRC = 1 << 15,
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+ SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3443,6 +3455,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
+ { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -3736,6 +3749,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->tl_retry_count = token;
break;
+ case SRP_OPT_MAX_IT_IU_SIZE:
+ if (match_int(args, &token) || token < 0) {
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
+ goto out;
+ }
+ target->max_it_iu_size = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3887,7 +3908,9 @@ static ssize_t srp_create_target(struct device *dev,
target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
- max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
+ max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ srp_use_imm_data,
+ target->max_it_iu_size);
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index b2861cd2087a..5359ece561ca 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,6 +161,7 @@ struct srp_rdma_ch {
};
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
+ u8 max_imm_sge;
bool use_imm_data;
/* Everything above this point is used in the hot path of
@@ -209,6 +210,7 @@ struct srp_target_port {
u32 ch_count;
u32 lkey;
enum srp_target_state state;
+ uint32_t max_it_iu_size;
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
@@ -245,11 +247,13 @@ struct srp_target_port {
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} src;
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} dst;
bool src_specified;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e25c70a56be6..23c782e3d49a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -556,34 +556,41 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_port_attr port_attr;
int ret;
- memset(&port_modify, 0, sizeof(port_modify));
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- port_modify.clr_port_cap_mask = 0;
-
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
- if (ret)
- goto err_mod_port;
-
ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
if (ret)
- goto err_query_port;
+ return ret;
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
- goto err_query_port;
+ return ret;
- sport->port_guid_wwn.priv = sport;
- srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ sport->port_guid_id.wwn.priv = sport;
+ srpt_format_guid(sport->port_guid_id.name,
+ sizeof(sport->port_guid_id.name),
&sport->gid.global.interface_id);
- sport->port_gid_wwn.priv = sport;
- snprintf(sport->port_gid, sizeof(sport->port_gid),
+ sport->port_gid_id.wwn.priv = sport;
+ snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
+ return 0;
+
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret) {
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
+ return 0;
+ }
+
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -599,23 +606,14 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(sport->mad_agent)) {
- ret = PTR_ERR(sport->mad_agent);
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
- goto err_query_port;
}
}
return 0;
-
-err_query_port:
-
- port_modify.set_port_cap_mask = 0;
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-
-err_mod_port:
-
- return ret;
}
/**
@@ -1364,9 +1362,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, u64 tag,
int status)
{
+ struct se_cmd *cmd = &ioctx->cmd;
struct srp_rsp *srp_rsp;
const u8 *sense_data;
int sense_data_len, max_sense_len;
+ u32 resid = cmd->residual_count;
/*
* The lowest bit of all SAM-3 status codes is zero (see also
@@ -1388,6 +1388,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->tag = tag;
srp_rsp->status = status;
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ }
+
if (sense_data_len) {
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
@@ -1931,41 +1953,22 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
-{
- struct srpt_nexus *nexus;
- struct srpt_rdma_ch *ch2;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry) {
- list_for_each_entry(ch2, &nexus->ch_list, list) {
- if (ch2 == ch) {
- res = false;
- goto done;
- }
- }
- }
-done:
- rcu_read_unlock();
-
- return res;
-}
-
/* Send DREQ and wait for DREP. */
static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
{
+ DECLARE_COMPLETION_ONSTACK(closed);
struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
+ ch->closed = &closed;
+
mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
- 5 * HZ) == 0)
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
@@ -2045,10 +2048,17 @@ static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
__srpt_close_all_ch(sport);
}
+static void srpt_drop_sport_ref(struct srpt_port *sport)
+{
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
+ complete(sport->freed_channels);
+}
+
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+ srpt_drop_sport_ref(ch->sport);
kfree_rcu(ch, rcu);
}
@@ -2092,6 +2102,9 @@ static void srpt_release_channel_work(struct work_struct *w)
list_del_rcu(&ch->list);
mutex_unlock(&sport->mutex);
+ if (ch->closed)
+ complete(ch->closed);
+
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2106,8 +2119,6 @@ static void srpt_release_channel_work(struct work_struct *w)
kmem_cache_destroy(ch->req_buf_cache);
- wake_up(&sport->ch_releaseQ);
-
kref_put(&ch->kref, srpt_free_ch);
}
@@ -2144,6 +2155,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
char i_port_id[36];
u32 it_iu_len;
int i, tag_num, tag_size, ret;
+ struct srpt_tpg *stpg;
WARN_ON_ONCE(irqs_disabled());
@@ -2296,23 +2308,38 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
- pr_debug("registering session %s\n", ch->sess_name);
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
+ i_port_id);
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
+
+ mutex_lock(&sport->port_guid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ }
+ mutex_unlock(&sport->port_guid_id.mutex);
+
+ mutex_lock(&sport->port_gid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- /* Retry without leading "0x" */
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->port_gid_id.mutex);
+
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
@@ -2325,6 +2352,12 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto destroy_ib;
}
+ /*
+ * Once a session has been created destruction of srpt_rdma_ch objects
+ * will decrement sport->refcount. Hence increment sport->refcount now.
+ */
+ atomic_inc(&sport->refcount);
+
mutex_lock(&sport->mutex);
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
@@ -2505,6 +2538,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
struct srpt_device *sdev;
struct srp_login_req req;
const struct srp_login_req_rdma *req_rdma;
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
char src_addr[40];
sdev = ib_get_client_data(cm_id->device, &srpt_client);
@@ -2530,7 +2564,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
&cm_id->route.addr.src_addr);
return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
- cm_id->route.path_rec->pkey, &req, src_addr);
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
}
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
@@ -2906,39 +2940,29 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
-static bool srpt_ch_list_empty(struct srpt_port *sport)
-{
- struct srpt_nexus *nexus;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry)
- if (!list_empty(&nexus->ch_list))
- res = false;
- rcu_read_unlock();
-
- return res;
-}
-
/**
* srpt_release_sport - disable login and wait for associated channels
* @sport: SRPT HCA port.
*/
static int srpt_release_sport(struct srpt_port *sport)
{
+ DECLARE_COMPLETION_ONSTACK(c);
struct srpt_nexus *nexus, *next_n;
struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
+ sport->freed_channels = &c;
+
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, false);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ,
- srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
- pr_info("%s_%d: waiting for session unregistration ...\n",
- dev_name(&sport->sdev->device->dev), sport->port);
+ while (atomic_read(&sport->refcount) > 0 &&
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ atomic_read(&sport->refcount));
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2975,10 +2999,10 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid, name) == 0)
- return &sport->port_guid_wwn;
- if (strcmp(sport->port_gid, name) == 0)
- return &sport->port_gid_wwn;
+ if (strcmp(sport->port_guid_id.name, name) == 0)
+ return &sport->port_guid_id.wwn;
+ if (strcmp(sport->port_gid_id.name, name) == 0)
+ return &sport->port_gid_id.wwn;
}
}
@@ -3147,7 +3171,6 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
- init_waitqueue_head(&sport->ch_releaseQ);
mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
@@ -3156,6 +3179,10 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
+ mutex_init(&sport->port_guid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
+ mutex_init(&sport->port_gid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
@@ -3258,14 +3285,23 @@ static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
return tpg->se_tpg_wwn->priv;
}
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = wwn->priv;
+
+ if (wwn == &sport->port_guid_id.wwn)
+ return &sport->port_guid_id;
+ if (wwn == &sport->port_gid_id.wwn)
+ return &sport->port_gid_id;
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
- struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
- WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
- tpg != &sport->port_gid_tpg);
- return tpg == &sport->port_guid_tpg ? sport->port_guid :
- sport->port_gid;
+ return stpg->sport_id->name;
}
static u16 srpt_get_tag(struct se_portal_group *tpg)
@@ -3721,19 +3757,25 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
const char *name)
{
- struct srpt_port *sport = wwn->priv;
- struct se_portal_group *tpg;
- int res;
-
- WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
- wwn != &sport->port_gid_wwn);
- tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
- &sport->port_gid_tpg;
- res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
- if (res)
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
+ struct srpt_tpg *stpg;
+ int res = -ENOMEM;
+
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
+ if (!stpg)
return ERR_PTR(res);
+ stpg->sport_id = sport_id;
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
+ if (res) {
+ kfree(stpg);
+ return ERR_PTR(res);
+ }
- return tpg;
+ mutex_lock(&sport_id->mutex);
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
+ mutex_unlock(&sport_id->mutex);
+
+ return &stpg->tpg;
}
/**
@@ -3742,10 +3784,17 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
+ struct srpt_port_id *sport_id = stpg->sport_id;
struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ mutex_lock(&sport_id->mutex);
+ list_del(&stpg->entry);
+ mutex_unlock(&sport_id->mutex);
+
sport->enabled = false;
core_tpg_deregister(tpg);
+ kfree(stpg);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ee9f20e9177a..2e1a69840857 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -264,6 +264,8 @@ enum rdma_ch_state {
* @zw_cqe: Zero-length write CQE.
* @rcu: RCU head.
* @kref: kref for this channel.
+ * @closed: Completion object that will be signaled as soon as a new
+ * channel object with the same identity can be created.
* @rq_size: IB receive queue size.
* @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
@@ -306,6 +308,7 @@ struct srpt_rdma_ch {
struct ib_cqe zw_cqe;
struct rcu_head rcu;
struct kref kref;
+ struct completion *closed;
int rq_size;
u32 max_rsp_size;
atomic_t sq_wr_avail;
@@ -361,24 +364,52 @@ struct srpt_port_attrib {
};
/**
+ * struct srpt_tpg - information about a single "target portal group"
+ * @entry: Entry in @sport_id->tpg_list.
+ * @sport_id: Port name this TPG is associated with.
+ * @tpg: LIO TPG data structure.
+ *
+ * Zero or more target portal groups are associated with each port name
+ * (srpt_port_id). With each TPG an ACL list is associated.
+ */
+struct srpt_tpg {
+ struct list_head entry;
+ struct srpt_port_id *sport_id;
+ struct se_portal_group tpg;
+};
+
+/**
+ * struct srpt_port_id - information about an RDMA port name
+ * @mutex: Protects @tpg_list changes.
+ * @tpg_list: TPGs associated with the RDMA port name.
+ * @wwn: WWN associated with the RDMA port name.
+ * @name: ASCII representation of the port name.
+ *
+ * Multiple sysfs directories can be associated with a single RDMA port. This
+ * data structure represents a single (port, name) pair.
+ */
+struct srpt_port_id {
+ struct mutex mutex;
+ struct list_head tpg_list;
+ struct se_wwn wwn;
+ char name[64];
+};
+
+/**
* struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
- * @port_guid: ASCII representation of Port GUID
- * @port_gid: ASCII representation of Port GID
* @port: one-based port number.
* @sm_lid: cached value of the port's sm_lid.
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
- * @port_acl_lock spinlock for port_acl_list:
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_tpg: TPG associated with target port GUID.
- * @port_guid_wwn: WWN associated with target port GUID.
- * @port_gid_tpg: TPG associated with target port GID.
- * @port_gid_wwn: WWN associated with target port GID.
+ * @port_guid_id: target port GUID
+ * @port_gid_id: target port GID
* @port_attrib: Port attributes that can be accessed through configfs.
- * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @refcount: Number of objects associated with this port.
+ * @freed_channels: Completion that will be signaled once @refcount becomes 0.
* @mutex: Protects nexus_list.
* @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
@@ -386,19 +417,16 @@ struct srpt_port {
struct srpt_device *sdev;
struct ib_mad_agent *mad_agent;
bool enabled;
- u8 port_guid[24];
- u8 port_gid[64];
u8 port;
u32 sm_lid;
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct se_portal_group port_guid_tpg;
- struct se_wwn port_guid_wwn;
- struct se_portal_group port_gid_tpg;
- struct se_wwn port_gid_wwn;
+ struct srpt_port_id port_guid_id;
+ struct srpt_port_id port_gid_id;
struct srpt_port_attrib port_attrib;
- wait_queue_head_t ch_releaseQ;
+ atomic_t refcount;
+ struct completion *freed_channels;
struct mutex mutex;
struct list_head nexus_list;
};
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 1cb40c7475af..8229a9006917 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
{
struct ml_device *ml = ff->private;
+ /*
+ * Even though we stop all playing effects when tearing down
+ * an input device (via input_device_flush() that calls into
+ * input_ff_flush() that stops and erases all effects), we
+ * do not actually stop the timer, and therefore we should
+ * do it here.
+ */
+ del_timer_sync(&ml->timer);
+
kfree(ml->private);
}
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f28a7158b2ef..bbf9ae9f3f0c 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -510,7 +510,6 @@ struct f11_data {
struct rmi_2d_sensor_platform_data sensor_pdata;
unsigned long *abs_mask;
unsigned long *rel_mask;
- unsigned long *result_bits;
};
enum f11_finger_state {
@@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
/*
** init instance data, fill in values and create any sysfs files
*/
- f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
+ f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
GFP_KERNEL);
if (!f11)
return -ENOMEM;
@@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
+ sizeof(struct f11_data));
f11->rel_mask = (unsigned long *)((char *)f11
+ sizeof(struct f11_data) + mask_size);
- f11->result_bits = (unsigned long *)((char *)f11
- + sizeof(struct f11_data) + mask_size * 2);
set_bit(fn->irq_pos, f11->abs_mask);
set_bit(fn->irq_pos + 1, f11->rel_mask);
@@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
valid_bytes = f11->sensor.attn_size;
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
valid_bytes);
- drvdata->attn_data.data += f11->sensor.attn_size;
- drvdata->attn_data.size -= f11->sensor.attn_size;
+ drvdata->attn_data.data += valid_bytes;
+ drvdata->attn_data.size -= valid_bytes;
} else {
error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt,
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index d20a5d6780d1..7e97944f7616 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -55,6 +55,9 @@ struct f12_data {
const struct rmi_register_desc_item *data15;
u16 data15_offset;
+
+ unsigned long *abs_mask;
+ unsigned long *rel_mask;
};
static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
@@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
valid_bytes = sensor->attn_size;
memcpy(sensor->data_pkt, drvdata->attn_data.data,
valid_bytes);
- drvdata->attn_data.data += sensor->attn_size;
- drvdata->attn_data.size -= sensor->attn_size;
+ drvdata->attn_data.data += valid_bytes;
+ drvdata->attn_data.size -= valid_bytes;
} else {
retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size);
@@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
static int rmi_f12_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ struct rmi_2d_sensor *sensor;
int ret;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ sensor = &f12->sensor;
+
+ if (!sensor->report_abs)
+ drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
+
+ drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
ret = rmi_f12_write_control_regs(fn);
if (ret)
@@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0;
+ int mask_size;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
+ mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
+
ret = rmi_read(fn->rmi_dev, query_addr, &buf);
if (ret < 0) {
dev_err(&fn->dev, "Failed to read general info register: %d\n",
@@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
return -ENODEV;
}
- f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
+ f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
+ GFP_KERNEL);
if (!f12)
return -ENOMEM;
+ f12->abs_mask = (unsigned long *)((char *)f12
+ + sizeof(struct f12_data));
+ f12->rel_mask = (unsigned long *)((char *)f12
+ + sizeof(struct f12_data) + mask_size);
+
+ set_bit(fn->irq_pos, f12->abs_mask);
+ set_bit(fn->irq_pos + 1, f12->rel_mask);
+
f12->has_dribble = !!(buf & BIT(3));
if (fn->dev.of_node) {
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 710b02595486..897105b9a98b 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
static const struct vb2_queue rmi_f54_queue = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
- .buf_struct_size = sizeof(struct vb2_buffer),
+ .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
.ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
@@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
return 0;
}
@@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
video_unregister_device(&f54->vdev);
v4l2_device_unregister(&f54->v4l2);
+ destroy_workqueue(f54->workqueue);
}
struct rmi_function_handler rmi_f54_handler = {
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 4b22d49a0f49..6bcffc930384 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1990,11 +1990,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
/* get sysinfo */
md->si = &cd->sysinfo;
- if (!md->si) {
- dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
- __func__, md->si);
- goto error_get_sysinfo;
- }
rc = cyttsp4_setup_input_device(cd);
if (rc)
@@ -2004,8 +1999,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
error_init_input:
input_free_device(md->input);
-error_get_sysinfo:
- input_set_drvdata(md->input, NULL);
error_alloc_failed:
dev_err(dev, "%s failed.\n", __func__);
return rc;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 6ab4012a059a..c49afbea3458 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_MSM8974
+ tristate "Qualcomm MSM8974 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8974-based
+ platforms.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 67dafb783dec..9adf9e380545 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-msm8974-objs := msm8974.o
qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
new file mode 100644
index 000000000000..ce599a0c83d9
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ *
+ * Based on MSM bus code from downstream MSM kernel sources.
+ * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
+ *
+ * Based on qcs404.c
+ * Copyright (C) 2019 Linaro Ltd
+ *
+ * Here's a rough representation that shows the various buses that form the
+ * Network On Chip (NOC) for the msm8974:
+ *
+ * Multimedia Subsystem (MMSS)
+ * |----------+-----------------------------------+-----------|
+ * | |
+ * | |
+ * Config | Bus Interface | Memory Controller
+ * |------------+-+-----------| |------------+-+-----------|
+ * | |
+ * | |
+ * | System |
+ * |--------------+-+---------------------------------+-+-------------|
+ * | |
+ * | |
+ * Peripheral | On Chip | Memory (OCMEM)
+ * |------------+-------------| |------------+-------------|
+ */
+
+#include <dt-bindings/interconnect/qcom,msm8974.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+enum {
+ MSM8974_BIMC_MAS_AMPSS_M0 = 1,
+ MSM8974_BIMC_MAS_AMPSS_M1,
+ MSM8974_BIMC_MAS_MSS_PROC,
+ MSM8974_BIMC_TO_MNOC,
+ MSM8974_BIMC_TO_SNOC,
+ MSM8974_BIMC_SLV_EBI_CH0,
+ MSM8974_BIMC_SLV_AMPSS_L2,
+ MSM8974_CNOC_MAS_RPM_INST,
+ MSM8974_CNOC_MAS_RPM_DATA,
+ MSM8974_CNOC_MAS_RPM_SYS,
+ MSM8974_CNOC_MAS_DEHR,
+ MSM8974_CNOC_MAS_QDSS_DAP,
+ MSM8974_CNOC_MAS_SPDM,
+ MSM8974_CNOC_MAS_TIC,
+ MSM8974_CNOC_SLV_CLK_CTL,
+ MSM8974_CNOC_SLV_CNOC_MSS,
+ MSM8974_CNOC_SLV_SECURITY,
+ MSM8974_CNOC_SLV_TCSR,
+ MSM8974_CNOC_SLV_TLMM,
+ MSM8974_CNOC_SLV_CRYPTO_0_CFG,
+ MSM8974_CNOC_SLV_CRYPTO_1_CFG,
+ MSM8974_CNOC_SLV_IMEM_CFG,
+ MSM8974_CNOC_SLV_MESSAGE_RAM,
+ MSM8974_CNOC_SLV_BIMC_CFG,
+ MSM8974_CNOC_SLV_BOOT_ROM,
+ MSM8974_CNOC_SLV_PMIC_ARB,
+ MSM8974_CNOC_SLV_SPDM_WRAPPER,
+ MSM8974_CNOC_SLV_DEHR_CFG,
+ MSM8974_CNOC_SLV_MPM,
+ MSM8974_CNOC_SLV_QDSS_CFG,
+ MSM8974_CNOC_SLV_RBCPR_CFG,
+ MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG,
+ MSM8974_CNOC_TO_SNOC,
+ MSM8974_CNOC_SLV_CNOC_ONOC_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_CFG,
+ MSM8974_CNOC_SLV_PNOC_CFG,
+ MSM8974_CNOC_SLV_SNOC_MPU_CFG,
+ MSM8974_CNOC_SLV_SNOC_CFG,
+ MSM8974_CNOC_SLV_EBI1_DLL_CFG,
+ MSM8974_CNOC_SLV_PHY_APU_CFG,
+ MSM8974_CNOC_SLV_EBI1_PHY_CFG,
+ MSM8974_CNOC_SLV_RPM,
+ MSM8974_CNOC_SLV_SERVICE_CNOC,
+ MSM8974_MNOC_MAS_GRAPHICS_3D,
+ MSM8974_MNOC_MAS_JPEG,
+ MSM8974_MNOC_MAS_MDP_PORT0,
+ MSM8974_MNOC_MAS_VIDEO_P0,
+ MSM8974_MNOC_MAS_VIDEO_P1,
+ MSM8974_MNOC_MAS_VFE,
+ MSM8974_MNOC_TO_CNOC,
+ MSM8974_MNOC_TO_BIMC,
+ MSM8974_MNOC_SLV_CAMERA_CFG,
+ MSM8974_MNOC_SLV_DISPLAY_CFG,
+ MSM8974_MNOC_SLV_OCMEM_CFG,
+ MSM8974_MNOC_SLV_CPR_CFG,
+ MSM8974_MNOC_SLV_CPR_XPU_CFG,
+ MSM8974_MNOC_SLV_MISC_CFG,
+ MSM8974_MNOC_SLV_MISC_XPU_CFG,
+ MSM8974_MNOC_SLV_VENUS_CFG,
+ MSM8974_MNOC_SLV_GRAPHICS_3D_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG,
+ MSM8974_MNOC_SLV_MNOC_MPU_CFG,
+ MSM8974_MNOC_SLV_ONOC_MPU_CFG,
+ MSM8974_MNOC_SLV_SERVICE_MNOC,
+ MSM8974_OCMEM_NOC_TO_OCMEM_VNOC,
+ MSM8974_OCMEM_MAS_JPEG_OCMEM,
+ MSM8974_OCMEM_MAS_MDP_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM,
+ MSM8974_OCMEM_MAS_VFE_OCMEM,
+ MSM8974_OCMEM_MAS_CNOC_ONOC_CFG,
+ MSM8974_OCMEM_SLV_SERVICE_ONOC,
+ MSM8974_OCMEM_VNOC_TO_SNOC,
+ MSM8974_OCMEM_VNOC_TO_OCMEM_NOC,
+ MSM8974_OCMEM_VNOC_MAS_GFX3D,
+ MSM8974_OCMEM_SLV_OCMEM,
+ MSM8974_PNOC_MAS_PNOC_CFG,
+ MSM8974_PNOC_MAS_SDCC_1,
+ MSM8974_PNOC_MAS_SDCC_3,
+ MSM8974_PNOC_MAS_SDCC_4,
+ MSM8974_PNOC_MAS_SDCC_2,
+ MSM8974_PNOC_MAS_TSIF,
+ MSM8974_PNOC_MAS_BAM_DMA,
+ MSM8974_PNOC_MAS_BLSP_2,
+ MSM8974_PNOC_MAS_USB_HSIC,
+ MSM8974_PNOC_MAS_BLSP_1,
+ MSM8974_PNOC_MAS_USB_HS,
+ MSM8974_PNOC_TO_SNOC,
+ MSM8974_PNOC_SLV_SDCC_1,
+ MSM8974_PNOC_SLV_SDCC_3,
+ MSM8974_PNOC_SLV_SDCC_2,
+ MSM8974_PNOC_SLV_SDCC_4,
+ MSM8974_PNOC_SLV_TSIF,
+ MSM8974_PNOC_SLV_BAM_DMA,
+ MSM8974_PNOC_SLV_BLSP_2,
+ MSM8974_PNOC_SLV_USB_HSIC,
+ MSM8974_PNOC_SLV_BLSP_1,
+ MSM8974_PNOC_SLV_USB_HS,
+ MSM8974_PNOC_SLV_PDM,
+ MSM8974_PNOC_SLV_PERIPH_APU_CFG,
+ MSM8974_PNOC_SLV_PNOC_MPU_CFG,
+ MSM8974_PNOC_SLV_PRNG,
+ MSM8974_PNOC_SLV_SERVICE_PNOC,
+ MSM8974_SNOC_MAS_LPASS_AHB,
+ MSM8974_SNOC_MAS_QDSS_BAM,
+ MSM8974_SNOC_MAS_SNOC_CFG,
+ MSM8974_SNOC_TO_BIMC,
+ MSM8974_SNOC_TO_CNOC,
+ MSM8974_SNOC_TO_PNOC,
+ MSM8974_SNOC_TO_OCMEM_VNOC,
+ MSM8974_SNOC_MAS_CRYPTO_CORE0,
+ MSM8974_SNOC_MAS_CRYPTO_CORE1,
+ MSM8974_SNOC_MAS_LPASS_PROC,
+ MSM8974_SNOC_MAS_MSS,
+ MSM8974_SNOC_MAS_MSS_NAV,
+ MSM8974_SNOC_MAS_OCMEM_DMA,
+ MSM8974_SNOC_MAS_WCSS,
+ MSM8974_SNOC_MAS_QDSS_ETR,
+ MSM8974_SNOC_MAS_USB3,
+ MSM8974_SNOC_SLV_AMPSS,
+ MSM8974_SNOC_SLV_LPASS,
+ MSM8974_SNOC_SLV_USB3,
+ MSM8974_SNOC_SLV_WCSS,
+ MSM8974_SNOC_SLV_OCIMEM,
+ MSM8974_SNOC_SLV_SNOC_OCMEM,
+ MSM8974_SNOC_SLV_SERVICE_SNOC,
+ MSM8974_SNOC_SLV_QDSS_STM,
+};
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+#define to_msm8974_icc_provider(_provider) \
+ container_of(_provider, struct msm8974_icc_provider, provider)
+
+static const struct clk_bulk_data msm8974_icc_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct msm8974_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8974_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define MSM8974_ICC_MAX_LINKS 3
+
+/**
+ * struct msm8974_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM ID for devices that are bus masters
+ * @slv_rpm_id: RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8974_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[MSM8974_ICC_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct msm8974_icc_desc {
+ struct msm8974_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct msm8974_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_ampss_m0, MSM8974_BIMC_MAS_AMPSS_M0, 8, 0, -1);
+DEFINE_QNODE(mas_ampss_m1, MSM8974_BIMC_MAS_AMPSS_M1, 8, 0, -1);
+DEFINE_QNODE(mas_mss_proc, MSM8974_BIMC_MAS_MSS_PROC, 8, 1, -1);
+DEFINE_QNODE(bimc_to_mnoc, MSM8974_BIMC_TO_MNOC, 8, 2, -1, MSM8974_BIMC_SLV_EBI_CH0);
+DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC, MSM8974_BIMC_SLV_EBI_CH0, MSM8974_BIMC_MAS_AMPSS_M0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
+DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
+
+static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+ [BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
+ [BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
+ [BIMC_MAS_MSS_PROC] = &mas_mss_proc,
+ [BIMC_TO_MNOC] = &bimc_to_mnoc,
+ [BIMC_TO_SNOC] = &bimc_to_snoc,
+ [BIMC_SLV_EBI_CH0] = &slv_ebi_ch0,
+ [BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
+};
+
+static struct msm8974_icc_desc msm8974_bimc = {
+ .nodes = msm8974_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
+};
+
+DEFINE_QNODE(mas_rpm_inst, MSM8974_CNOC_MAS_RPM_INST, 8, 45, -1);
+DEFINE_QNODE(mas_rpm_data, MSM8974_CNOC_MAS_RPM_DATA, 8, 46, -1);
+DEFINE_QNODE(mas_rpm_sys, MSM8974_CNOC_MAS_RPM_SYS, 8, 47, -1);
+DEFINE_QNODE(mas_dehr, MSM8974_CNOC_MAS_DEHR, 8, 48, -1);
+DEFINE_QNODE(mas_qdss_dap, MSM8974_CNOC_MAS_QDSS_DAP, 8, 49, -1);
+DEFINE_QNODE(mas_spdm, MSM8974_CNOC_MAS_SPDM, 8, 50, -1);
+DEFINE_QNODE(mas_tic, MSM8974_CNOC_MAS_TIC, 8, 51, -1);
+DEFINE_QNODE(slv_clk_ctl, MSM8974_CNOC_SLV_CLK_CTL, 8, -1, 47);
+DEFINE_QNODE(slv_cnoc_mss, MSM8974_CNOC_SLV_CNOC_MSS, 8, -1, 48);
+DEFINE_QNODE(slv_security, MSM8974_CNOC_SLV_SECURITY, 8, -1, 49);
+DEFINE_QNODE(slv_tcsr, MSM8974_CNOC_SLV_TCSR, 8, -1, 50);
+DEFINE_QNODE(slv_tlmm, MSM8974_CNOC_SLV_TLMM, 8, -1, 51);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8974_CNOC_SLV_CRYPTO_0_CFG, 8, -1, 52);
+DEFINE_QNODE(slv_crypto_1_cfg, MSM8974_CNOC_SLV_CRYPTO_1_CFG, 8, -1, 53);
+DEFINE_QNODE(slv_imem_cfg, MSM8974_CNOC_SLV_IMEM_CFG, 8, -1, 54);
+DEFINE_QNODE(slv_message_ram, MSM8974_CNOC_SLV_MESSAGE_RAM, 8, -1, 55);
+DEFINE_QNODE(slv_bimc_cfg, MSM8974_CNOC_SLV_BIMC_CFG, 8, -1, 56);
+DEFINE_QNODE(slv_boot_rom, MSM8974_CNOC_SLV_BOOT_ROM, 8, -1, 57);
+DEFINE_QNODE(slv_pmic_arb, MSM8974_CNOC_SLV_PMIC_ARB, 8, -1, 59);
+DEFINE_QNODE(slv_spdm_wrapper, MSM8974_CNOC_SLV_SPDM_WRAPPER, 8, -1, 60);
+DEFINE_QNODE(slv_dehr_cfg, MSM8974_CNOC_SLV_DEHR_CFG, 8, -1, 61);
+DEFINE_QNODE(slv_mpm, MSM8974_CNOC_SLV_MPM, 8, -1, 62);
+DEFINE_QNODE(slv_qdss_cfg, MSM8974_CNOC_SLV_QDSS_CFG, 8, -1, 63);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8974_CNOC_SLV_RBCPR_CFG, 8, -1, 64);
+DEFINE_QNODE(slv_rbcpr_qdss_apu_cfg, MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG, 8, -1, 65);
+DEFINE_QNODE(cnoc_to_snoc, MSM8974_CNOC_TO_SNOC, 8, 52, 75);
+DEFINE_QNODE(slv_cnoc_onoc_cfg, MSM8974_CNOC_SLV_CNOC_ONOC_CFG, 8, -1, 68);
+DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG, 8, -1, 58);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_CFG, 8, -1, 66);
+DEFINE_QNODE(slv_pnoc_cfg, MSM8974_CNOC_SLV_PNOC_CFG, 8, -1, 69);
+DEFINE_QNODE(slv_snoc_mpu_cfg, MSM8974_CNOC_SLV_SNOC_MPU_CFG, 8, -1, 67);
+DEFINE_QNODE(slv_snoc_cfg, MSM8974_CNOC_SLV_SNOC_CFG, 8, -1, 70);
+DEFINE_QNODE(slv_ebi1_dll_cfg, MSM8974_CNOC_SLV_EBI1_DLL_CFG, 8, -1, 71);
+DEFINE_QNODE(slv_phy_apu_cfg, MSM8974_CNOC_SLV_PHY_APU_CFG, 8, -1, 72);
+DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
+DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
+DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
+
+static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+ [CNOC_MAS_RPM_INST] = &mas_rpm_inst,
+ [CNOC_MAS_RPM_DATA] = &mas_rpm_data,
+ [CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
+ [CNOC_MAS_DEHR] = &mas_dehr,
+ [CNOC_MAS_QDSS_DAP] = &mas_qdss_dap,
+ [CNOC_MAS_SPDM] = &mas_spdm,
+ [CNOC_MAS_TIC] = &mas_tic,
+ [CNOC_SLV_CLK_CTL] = &slv_clk_ctl,
+ [CNOC_SLV_CNOC_MSS] = &slv_cnoc_mss,
+ [CNOC_SLV_SECURITY] = &slv_security,
+ [CNOC_SLV_TCSR] = &slv_tcsr,
+ [CNOC_SLV_TLMM] = &slv_tlmm,
+ [CNOC_SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [CNOC_SLV_CRYPTO_1_CFG] = &slv_crypto_1_cfg,
+ [CNOC_SLV_IMEM_CFG] = &slv_imem_cfg,
+ [CNOC_SLV_MESSAGE_RAM] = &slv_message_ram,
+ [CNOC_SLV_BIMC_CFG] = &slv_bimc_cfg,
+ [CNOC_SLV_BOOT_ROM] = &slv_boot_rom,
+ [CNOC_SLV_PMIC_ARB] = &slv_pmic_arb,
+ [CNOC_SLV_SPDM_WRAPPER] = &slv_spdm_wrapper,
+ [CNOC_SLV_DEHR_CFG] = &slv_dehr_cfg,
+ [CNOC_SLV_MPM] = &slv_mpm,
+ [CNOC_SLV_QDSS_CFG] = &slv_qdss_cfg,
+ [CNOC_SLV_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [CNOC_SLV_RBCPR_QDSS_APU_CFG] = &slv_rbcpr_qdss_apu_cfg,
+ [CNOC_TO_SNOC] = &cnoc_to_snoc,
+ [CNOC_SLV_CNOC_ONOC_CFG] = &slv_cnoc_onoc_cfg,
+ [CNOC_SLV_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
+ [CNOC_SLV_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [CNOC_SLV_PNOC_CFG] = &slv_pnoc_cfg,
+ [CNOC_SLV_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
+ [CNOC_SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [CNOC_SLV_EBI1_DLL_CFG] = &slv_ebi1_dll_cfg,
+ [CNOC_SLV_PHY_APU_CFG] = &slv_phy_apu_cfg,
+ [CNOC_SLV_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
+ [CNOC_SLV_RPM] = &slv_rpm,
+ [CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
+};
+
+static struct msm8974_icc_desc msm8974_cnoc = {
+ .nodes = msm8974_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
+};
+
+DEFINE_QNODE(mas_graphics_3d, MSM8974_MNOC_MAS_GRAPHICS_3D, 16, 6, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_jpeg, MSM8974_MNOC_MAS_JPEG, 16, 7, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_mdp_port0, MSM8974_MNOC_MAS_MDP_PORT0, 16, 8, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_video_p0, MSM8974_MNOC_MAS_VIDEO_P0, 16, 9, -1);
+DEFINE_QNODE(mas_video_p1, MSM8974_MNOC_MAS_VIDEO_P1, 16, 10, -1);
+DEFINE_QNODE(mas_vfe, MSM8974_MNOC_MAS_VFE, 16, 11, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mnoc_to_cnoc, MSM8974_MNOC_TO_CNOC, 16, 4, -1);
+DEFINE_QNODE(mnoc_to_bimc, MSM8974_MNOC_TO_BIMC, 16, -1, 16, MSM8974_BIMC_TO_MNOC);
+DEFINE_QNODE(slv_camera_cfg, MSM8974_MNOC_SLV_CAMERA_CFG, 16, -1, 3);
+DEFINE_QNODE(slv_display_cfg, MSM8974_MNOC_SLV_DISPLAY_CFG, 16, -1, 4);
+DEFINE_QNODE(slv_ocmem_cfg, MSM8974_MNOC_SLV_OCMEM_CFG, 16, -1, 5);
+DEFINE_QNODE(slv_cpr_cfg, MSM8974_MNOC_SLV_CPR_CFG, 16, -1, 6);
+DEFINE_QNODE(slv_cpr_xpu_cfg, MSM8974_MNOC_SLV_CPR_XPU_CFG, 16, -1, 7);
+DEFINE_QNODE(slv_misc_cfg, MSM8974_MNOC_SLV_MISC_CFG, 16, -1, 8);
+DEFINE_QNODE(slv_misc_xpu_cfg, MSM8974_MNOC_SLV_MISC_XPU_CFG, 16, -1, 9);
+DEFINE_QNODE(slv_venus_cfg, MSM8974_MNOC_SLV_VENUS_CFG, 16, -1, 10);
+DEFINE_QNODE(slv_graphics_3d_cfg, MSM8974_MNOC_SLV_GRAPHICS_3D_CFG, 16, -1, 11);
+DEFINE_QNODE(slv_mmss_clk_cfg, MSM8974_MNOC_SLV_MMSS_CLK_CFG, 16, -1, 12);
+DEFINE_QNODE(slv_mmss_clk_xpu_cfg, MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG, 16, -1, 13);
+DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
+DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
+DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
+
+static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+ [MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
+ [MNOC_MAS_JPEG] = &mas_jpeg,
+ [MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
+ [MNOC_MAS_VIDEO_P0] = &mas_video_p0,
+ [MNOC_MAS_VIDEO_P1] = &mas_video_p1,
+ [MNOC_MAS_VFE] = &mas_vfe,
+ [MNOC_TO_CNOC] = &mnoc_to_cnoc,
+ [MNOC_TO_BIMC] = &mnoc_to_bimc,
+ [MNOC_SLV_CAMERA_CFG] = &slv_camera_cfg,
+ [MNOC_SLV_DISPLAY_CFG] = &slv_display_cfg,
+ [MNOC_SLV_OCMEM_CFG] = &slv_ocmem_cfg,
+ [MNOC_SLV_CPR_CFG] = &slv_cpr_cfg,
+ [MNOC_SLV_CPR_XPU_CFG] = &slv_cpr_xpu_cfg,
+ [MNOC_SLV_MISC_CFG] = &slv_misc_cfg,
+ [MNOC_SLV_MISC_XPU_CFG] = &slv_misc_xpu_cfg,
+ [MNOC_SLV_VENUS_CFG] = &slv_venus_cfg,
+ [MNOC_SLV_GRAPHICS_3D_CFG] = &slv_graphics_3d_cfg,
+ [MNOC_SLV_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
+ [MNOC_SLV_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
+ [MNOC_SLV_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [MNOC_SLV_ONOC_MPU_CFG] = &slv_onoc_mpu_cfg,
+ [MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
+};
+
+static struct msm8974_icc_desc msm8974_mnoc = {
+ .nodes = msm8974_mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
+};
+
+DEFINE_QNODE(ocmem_noc_to_ocmem_vnoc, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC, 16, 54, 78, MSM8974_OCMEM_SLV_OCMEM);
+DEFINE_QNODE(mas_jpeg_ocmem, MSM8974_OCMEM_MAS_JPEG_OCMEM, 16, 13, -1);
+DEFINE_QNODE(mas_mdp_ocmem, MSM8974_OCMEM_MAS_MDP_OCMEM, 16, 14, -1);
+DEFINE_QNODE(mas_video_p0_ocmem, MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM, 16, 15, -1);
+DEFINE_QNODE(mas_video_p1_ocmem, MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM, 16, 16, -1);
+DEFINE_QNODE(mas_vfe_ocmem, MSM8974_OCMEM_MAS_VFE_OCMEM, 16, 17, -1);
+DEFINE_QNODE(mas_cnoc_onoc_cfg, MSM8974_OCMEM_MAS_CNOC_ONOC_CFG, 16, 12, -1);
+DEFINE_QNODE(slv_service_onoc, MSM8974_OCMEM_SLV_SERVICE_ONOC, 16, -1, 19);
+DEFINE_QNODE(slv_ocmem, MSM8974_OCMEM_SLV_OCMEM, 16, -1, 18);
+
+/* Virtual NoC is needed for connection to OCMEM */
+DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
+DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+
+static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+ [OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
+ [OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
+ [OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
+ [OCMEM_MAS_VIDEO_P0_OCMEM] = &mas_video_p0_ocmem,
+ [OCMEM_MAS_VIDEO_P1_OCMEM] = &mas_video_p1_ocmem,
+ [OCMEM_MAS_VFE_OCMEM] = &mas_vfe_ocmem,
+ [OCMEM_MAS_CNOC_ONOC_CFG] = &mas_cnoc_onoc_cfg,
+ [OCMEM_SLV_SERVICE_ONOC] = &slv_service_onoc,
+ [OCMEM_VNOC_TO_SNOC] = &ocmem_vnoc_to_snoc,
+ [OCMEM_VNOC_TO_OCMEM_NOC] = &ocmem_vnoc_to_onoc,
+ [OCMEM_VNOC_MAS_GFX3D] = &mas_v_ocmem_gfx3d,
+ [OCMEM_SLV_OCMEM] = &slv_ocmem,
+};
+
+static struct msm8974_icc_desc msm8974_onoc = {
+ .nodes = msm8974_onoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
+};
+
+DEFINE_QNODE(mas_pnoc_cfg, MSM8974_PNOC_MAS_PNOC_CFG, 8, 43, -1);
+DEFINE_QNODE(mas_sdcc_1, MSM8974_PNOC_MAS_SDCC_1, 8, 33, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_3, MSM8974_PNOC_MAS_SDCC_3, 8, 34, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_4, MSM8974_PNOC_MAS_SDCC_4, 8, 36, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_2, MSM8974_PNOC_MAS_SDCC_2, 8, 35, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_tsif, MSM8974_PNOC_MAS_TSIF, 8, 37, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_bam_dma, MSM8974_PNOC_MAS_BAM_DMA, 8, 38, -1);
+DEFINE_QNODE(mas_blsp_2, MSM8974_PNOC_MAS_BLSP_2, 8, 39, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hsic, MSM8974_PNOC_MAS_USB_HSIC, 8, 40, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_blsp_1, MSM8974_PNOC_MAS_BLSP_1, 8, 41, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hs, MSM8974_PNOC_MAS_USB_HS, 8, 42, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(pnoc_to_snoc, MSM8974_PNOC_TO_SNOC, 8, 44, 45, MSM8974_SNOC_TO_PNOC, MSM8974_PNOC_SLV_PRNG);
+DEFINE_QNODE(slv_sdcc_1, MSM8974_PNOC_SLV_SDCC_1, 8, -1, 31);
+DEFINE_QNODE(slv_sdcc_3, MSM8974_PNOC_SLV_SDCC_3, 8, -1, 32);
+DEFINE_QNODE(slv_sdcc_2, MSM8974_PNOC_SLV_SDCC_2, 8, -1, 33);
+DEFINE_QNODE(slv_sdcc_4, MSM8974_PNOC_SLV_SDCC_4, 8, -1, 34);
+DEFINE_QNODE(slv_tsif, MSM8974_PNOC_SLV_TSIF, 8, -1, 35);
+DEFINE_QNODE(slv_bam_dma, MSM8974_PNOC_SLV_BAM_DMA, 8, -1, 36);
+DEFINE_QNODE(slv_blsp_2, MSM8974_PNOC_SLV_BLSP_2, 8, -1, 37);
+DEFINE_QNODE(slv_usb_hsic, MSM8974_PNOC_SLV_USB_HSIC, 8, -1, 38);
+DEFINE_QNODE(slv_blsp_1, MSM8974_PNOC_SLV_BLSP_1, 8, -1, 39);
+DEFINE_QNODE(slv_usb_hs, MSM8974_PNOC_SLV_USB_HS, 8, -1, 40);
+DEFINE_QNODE(slv_pdm, MSM8974_PNOC_SLV_PDM, 8, -1, 41);
+DEFINE_QNODE(slv_periph_apu_cfg, MSM8974_PNOC_SLV_PERIPH_APU_CFG, 8, -1, 42);
+DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
+DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
+
+static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+ [PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
+ [PNOC_MAS_SDCC_1] = &mas_sdcc_1,
+ [PNOC_MAS_SDCC_3] = &mas_sdcc_3,
+ [PNOC_MAS_SDCC_4] = &mas_sdcc_4,
+ [PNOC_MAS_SDCC_2] = &mas_sdcc_2,
+ [PNOC_MAS_TSIF] = &mas_tsif,
+ [PNOC_MAS_BAM_DMA] = &mas_bam_dma,
+ [PNOC_MAS_BLSP_2] = &mas_blsp_2,
+ [PNOC_MAS_USB_HSIC] = &mas_usb_hsic,
+ [PNOC_MAS_BLSP_1] = &mas_blsp_1,
+ [PNOC_MAS_USB_HS] = &mas_usb_hs,
+ [PNOC_TO_SNOC] = &pnoc_to_snoc,
+ [PNOC_SLV_SDCC_1] = &slv_sdcc_1,
+ [PNOC_SLV_SDCC_3] = &slv_sdcc_3,
+ [PNOC_SLV_SDCC_2] = &slv_sdcc_2,
+ [PNOC_SLV_SDCC_4] = &slv_sdcc_4,
+ [PNOC_SLV_TSIF] = &slv_tsif,
+ [PNOC_SLV_BAM_DMA] = &slv_bam_dma,
+ [PNOC_SLV_BLSP_2] = &slv_blsp_2,
+ [PNOC_SLV_USB_HSIC] = &slv_usb_hsic,
+ [PNOC_SLV_BLSP_1] = &slv_blsp_1,
+ [PNOC_SLV_USB_HS] = &slv_usb_hs,
+ [PNOC_SLV_PDM] = &slv_pdm,
+ [PNOC_SLV_PERIPH_APU_CFG] = &slv_periph_apu_cfg,
+ [PNOC_SLV_PNOC_MPU_CFG] = &slv_pnoc_mpu_cfg,
+ [PNOC_SLV_PRNG] = &slv_prng,
+ [PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
+};
+
+static struct msm8974_icc_desc msm8974_pnoc = {
+ .nodes = msm8974_pnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
+};
+
+DEFINE_QNODE(mas_lpass_ahb, MSM8974_SNOC_MAS_LPASS_AHB, 8, 18, -1);
+DEFINE_QNODE(mas_qdss_bam, MSM8974_SNOC_MAS_QDSS_BAM, 8, 19, -1);
+DEFINE_QNODE(mas_snoc_cfg, MSM8974_SNOC_MAS_SNOC_CFG, 8, 20, -1);
+DEFINE_QNODE(snoc_to_bimc, MSM8974_SNOC_TO_BIMC, 8, 21, 24, MSM8974_BIMC_TO_SNOC);
+DEFINE_QNODE(snoc_to_cnoc, MSM8974_SNOC_TO_CNOC, 8, 22, 25);
+DEFINE_QNODE(snoc_to_pnoc, MSM8974_SNOC_TO_PNOC, 8, 29, 28, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(snoc_to_ocmem_vnoc, MSM8974_SNOC_TO_OCMEM_VNOC, 8, 53, 77, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+DEFINE_QNODE(mas_crypto_core0, MSM8974_SNOC_MAS_CRYPTO_CORE0, 8, 23, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(mas_crypto_core1, MSM8974_SNOC_MAS_CRYPTO_CORE1, 8, 24, -1);
+DEFINE_QNODE(mas_lpass_proc, MSM8974_SNOC_MAS_LPASS_PROC, 8, 25, -1, MSM8974_SNOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(mas_mss, MSM8974_SNOC_MAS_MSS, 8, 26, -1);
+DEFINE_QNODE(mas_mss_nav, MSM8974_SNOC_MAS_MSS_NAV, 8, 27, -1);
+DEFINE_QNODE(mas_ocmem_dma, MSM8974_SNOC_MAS_OCMEM_DMA, 8, 28, -1);
+DEFINE_QNODE(mas_wcss, MSM8974_SNOC_MAS_WCSS, 8, 30, -1);
+DEFINE_QNODE(mas_qdss_etr, MSM8974_SNOC_MAS_QDSS_ETR, 8, 31, -1);
+DEFINE_QNODE(mas_usb3, MSM8974_SNOC_MAS_USB3, 8, 32, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(slv_ampss, MSM8974_SNOC_SLV_AMPSS, 8, -1, 20);
+DEFINE_QNODE(slv_lpass, MSM8974_SNOC_SLV_LPASS, 8, -1, 21);
+DEFINE_QNODE(slv_usb3, MSM8974_SNOC_SLV_USB3, 8, -1, 22);
+DEFINE_QNODE(slv_wcss, MSM8974_SNOC_SLV_WCSS, 8, -1, 23);
+DEFINE_QNODE(slv_ocimem, MSM8974_SNOC_SLV_OCIMEM, 8, -1, 26);
+DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
+DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
+DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
+
+static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+ [SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
+ [SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
+ [SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
+ [SNOC_TO_BIMC] = &snoc_to_bimc,
+ [SNOC_TO_CNOC] = &snoc_to_cnoc,
+ [SNOC_TO_PNOC] = &snoc_to_pnoc,
+ [SNOC_TO_OCMEM_VNOC] = &snoc_to_ocmem_vnoc,
+ [SNOC_MAS_CRYPTO_CORE0] = &mas_crypto_core0,
+ [SNOC_MAS_CRYPTO_CORE1] = &mas_crypto_core1,
+ [SNOC_MAS_LPASS_PROC] = &mas_lpass_proc,
+ [SNOC_MAS_MSS] = &mas_mss,
+ [SNOC_MAS_MSS_NAV] = &mas_mss_nav,
+ [SNOC_MAS_OCMEM_DMA] = &mas_ocmem_dma,
+ [SNOC_MAS_WCSS] = &mas_wcss,
+ [SNOC_MAS_QDSS_ETR] = &mas_qdss_etr,
+ [SNOC_MAS_USB3] = &mas_usb3,
+ [SNOC_SLV_AMPSS] = &slv_ampss,
+ [SNOC_SLV_LPASS] = &slv_lpass,
+ [SNOC_SLV_USB3] = &slv_usb3,
+ [SNOC_SLV_WCSS] = &slv_wcss,
+ [SNOC_SLV_OCIMEM] = &slv_ocimem,
+ [SNOC_SLV_SNOC_OCMEM] = &slv_snoc_ocmem,
+ [SNOC_SLV_SERVICE_SNOC] = &slv_service_snoc,
+ [SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
+};
+
+static struct msm8974_icc_desc msm8974_snoc = {
+ .nodes = msm8974_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
+};
+
+static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
+ char *name, int id, u64 val)
+{
+ int ret;
+
+ if (id == -1)
+ return;
+
+ /*
+ * Setting the bandwidth requests for some nodes fails and this same
+ * behavior occurs on the downstream MSM 3.4 kernel sources based on
+ * errors like this in that kernel:
+ *
+ * msm_rpm_get_error_from_ack(): RPM NACK Unsupported resource
+ * AXI: msm_bus_rpm_req(): RPM: Ack failed
+ * AXI: msm_bus_rpm_commit_arb(): RPM: Req fail: mas:32, bw:240000000
+ *
+ * Since there's no publicly available documentation for this hardware,
+ * and the bandwidth for some nodes in the path can be set properly,
+ * let's not return an error.
+ */
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, rsc_type, id,
+ val);
+ if (ret)
+ dev_dbg(dev, "Cannot set bandwidth for node %s (%d): %d\n",
+ name, id, ret);
+}
+
+static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct msm8974_icc_node *src_qn, *dst_qn;
+ struct msm8974_icc_provider *qp;
+ u64 sum_bw, max_peak_bw, rate;
+ u32 agg_avg = 0, agg_peak = 0;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int ret, i;
+
+ src_qn = src->data;
+ dst_qn = dst->data;
+ provider = src->provider;
+ qp = to_msm8974_icc_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* Set bandwidth on source node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ src_qn->name, src_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ src_qn->name, src_qn->slv_rpm_id, sum_bw);
+
+ /* Set bandwidth on destination node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ dst_qn->name, dst_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ dst_qn->name, dst_qn->slv_rpm_id, sum_bw);
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, src_qn->buswidth);
+
+ if (src_qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ dev_err(provider->dev, "%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ ret = 0;
+ }
+ }
+
+ src_qn->rate = rate;
+
+ return 0;
+}
+
+static int msm8974_icc_probe(struct platform_device *pdev)
+{
+ const struct msm8974_icc_desc *desc;
+ struct msm8974_icc_node **qnodes;
+ struct msm8974_icc_provider *qp;
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, msm8974_icc_bus_clocks,
+ sizeof(msm8974_icc_bus_clocks), GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(msm8974_icc_bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8974_icc_set;
+ provider->aggregate = msm8974_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ goto err_disable_clks;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err_del_icc;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(dev, "registered node %s\n", node->name);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+err_del_icc:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(provider);
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+}
+
+static int msm8974_icc_remove(struct platform_device *pdev)
+{
+ struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id msm8974_noc_of_match[] = {
+ { .compatible = "qcom,msm8974-bimc", .data = &msm8974_bimc},
+ { .compatible = "qcom,msm8974-cnoc", .data = &msm8974_cnoc},
+ { .compatible = "qcom,msm8974-mmssnoc", .data = &msm8974_mnoc},
+ { .compatible = "qcom,msm8974-ocmemnoc", .data = &msm8974_onoc},
+ { .compatible = "qcom,msm8974-pnoc", .data = &msm8974_pnoc},
+ { .compatible = "qcom,msm8974-snoc", .data = &msm8974_snoc},
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
+
+static struct platform_driver msm8974_noc_driver = {
+ .probe = msm8974_icc_probe,
+ .remove = msm8974_icc_remove,
+ .driver = {
+ .name = "qnoc-msm8974",
+ .of_match_table = msm8974_noc_of_match,
+ },
+};
+module_platform_driver(msm8974_noc_driver);
+MODULE_DESCRIPTION("Qualcomm MSM8974 NoC driver");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index dd555078258c..12e5039a7a25 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -124,30 +124,6 @@ static struct lock_class_key reserved_rbtree_key;
*
****************************************************************************/
-static inline int match_hid_uid(struct device *dev,
- struct acpihid_map_entry *entry)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *hid, *uid;
-
- if (!adev)
- return -ENODEV;
-
- hid = acpi_device_hid(adev);
- uid = acpi_device_uid(adev);
-
- if (!hid || !(*hid))
- return -ENODEV;
-
- if (!uid || !(*uid))
- return strcmp(hid, entry->hid);
-
- if (!(*entry->uid))
- return strcmp(hid, entry->hid);
-
- return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
-}
-
static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -158,10 +134,14 @@ static inline u16 get_pci_device_id(struct device *dev)
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpihid_map_entry *p;
+ if (!adev)
+ return -ENODEV;
+
list_for_each_entry(p, &acpihid_map, list) {
- if (!match_hid_uid(dev, p)) {
+ if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
if (entry)
*entry = p;
return p->devid;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1edc99335a94..6bb1f682f78b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -87,6 +87,15 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+/*
+ * Global static key controlling whether an update to PMR allowing more
+ * interrupts requires to be propagated to the redistributor (DSB SY).
+ * And this needs to be exported for modules to be able to enable
+ * interrupts...
+ */
+DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
+EXPORT_SYMBOL(gic_pmr_sync);
+
/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
static refcount_t *ppi_nmi_refs;
@@ -1502,6 +1511,17 @@ static void gic_enable_nmi_support(void)
for (i = 0; i < gic_data.ppi_nr; i++)
refcount_set(&ppi_nmi_refs[i], 0);
+ /*
+ * Linux itself doesn't use 1:N distribution, so has no need to
+ * set PMHE. The only reason to have it set is if EL3 requires it
+ * (and we can't change it).
+ */
+ if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
+ static_branch_enable(&gic_pmr_sync);
+
+ pr_info("%s ICC_PMR_EL1 synchronisation\n",
+ static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing");
+
static_branch_enable(&supports_pseudo_nmis);
if (static_branch_likely(&supports_deactivate_key))
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 563e87ed0766..45969927cc81 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
int its_schedule_vpe(struct its_vpe *vpe, bool on)
{
struct its_cmd_info info;
+ int ret;
WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
- return its_send_vpe_cmd(vpe, &info);
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->resident = on;
+
+ return ret;
}
int its_invall_vpe(struct its_vpe *vpe)
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index 304f50c08da2..078eeadf707a 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -10,7 +10,7 @@ config MISDN_HFCPCI
depends on PCI
help
Enable support for cards with Cologne Chip AG's
- HFC PCI chip.
+ HFC PCI chip.
config MISDN_HFCMULTI
tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 1137dd152b5c..ecc1ef6c386d 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -402,8 +402,8 @@ hdlc_empty_fifo(struct bchannel *bch, int count)
} else {
cnt = bchannel_get_rxbuf(bch, count);
if (cnt < 0) {
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- fc->name, bch->nr, count);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ fc->name, bch->nr, count);
return;
}
p = skb_put(bch->rx_skb, count);
@@ -538,8 +538,8 @@ HDLC_irq(struct bchannel *bch, u32 stat)
}
if (stat & HDLC_INT_RPR) {
if (stat & HDLC_STAT_RDO) {
- pr_warning("%s: ch%d stat %x RDO\n",
- fc->name, bch->nr, stat);
+ pr_warn("%s: ch%d stat %x RDO\n",
+ fc->name, bch->nr, stat);
hdlc->ctrl.sr.xml = 0;
hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS;
write_ctrl(bch, 1);
@@ -561,8 +561,8 @@ HDLC_irq(struct bchannel *bch, u32 stat)
HDLC_STAT_CRCVFR) {
recv_Bchannel(bch, 0, false);
} else {
- pr_warning("%s: got invalid frame\n",
- fc->name);
+ pr_warn("%s: got invalid frame\n",
+ fc->name);
skb_trim(bch->rx_skb, 0);
}
}
@@ -574,8 +574,8 @@ handle_tx:
* restart transmitting the whole frame on HDLC
* in transparent mode we send the next data
*/
- pr_warning("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
- stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
+ pr_warn("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
+ stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
if (bch->tx_skb && bch->tx_skb->len) {
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
bch->tx_idx = 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 86669ec8b977..7013a3f08429 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2248,8 +2248,8 @@ next_frame:
if (bch) {
maxlen = bchannel_get_rxbuf(bch, Zsize);
if (maxlen < 0) {
- pr_warning("card%d.B%d: No bufferspace for %d bytes\n",
- hc->id + 1, bch->nr, Zsize);
+ pr_warn("card%d.B%d: No bufferspace for %d bytes\n",
+ hc->id + 1, bch->nr, Zsize);
return;
}
sp = &bch->rx_skb;
@@ -2260,8 +2260,8 @@ next_frame:
if (*sp == NULL) {
*sp = mI_alloc_skb(maxlen, GFP_ATOMIC);
if (*sp == NULL) {
- pr_warning("card%d: No mem for dch rx_skb\n",
- hc->id + 1);
+ pr_warn("card%d: No mem for dch rx_skb\n",
+ hc->id + 1);
return;
}
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 2330a7d24267..abdf787c1a71 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -566,8 +566,7 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
}
maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
if (maxlen < 0) {
- pr_warning("B%d: No bufferspace for %d bytes\n",
- bch->nr, fcnt_rx);
+ pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
} else {
ptr = skb_put(bch->rx_skb, fcnt_rx);
if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 008a74a1ed44..621364bb6b12 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -841,8 +841,8 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
if (maxlen < 0) {
if (rx_skb)
skb_trim(rx_skb, 0);
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- hw->name, fifo->bch->nr, len);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ hw->name, fifo->bch->nr, len);
spin_unlock_irqrestore(&hw->lock, flags);
return;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index e4fa2a2824af..7e2bc5068019 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -173,8 +173,8 @@ symbolic(struct hfcusb_symbolic_list list[], const int num)
/*
- * List of all supported enpoints configiration sets, used to find the
- * best matching endpoint configuration within a devices' USB descriptor.
+ * List of all supported endpoint configuration sets, used to find the
+ * best matching endpoint configuration within a device's USB descriptor.
* We need at least 3 RX endpoints, and 3 TX endpoints, either
* INT-in and ISO-out, or ISO-in and ISO-out)
* with 4 RX endpoints even E-Channel logging is possible
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index bca880213e91..ec475087fbf9 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -936,8 +936,8 @@ hscx_empty_fifo(struct hscx_hw *hscx, u8 count)
hscx_cmdr(hscx, 0x80); /* RMC */
if (hscx->bch.rx_skb)
skb_trim(hscx->bch.rx_skb, 0);
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- hscx->ip->name, hscx->bch.nr, count);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ hscx->ip->name, hscx->bch.nr, count);
return;
}
p = skb_put(hscx->bch.rx_skb, count);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 4a3e748a1c26..e325e87c0593 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -27,7 +27,6 @@ MODULE_VERSION(ISAR_REV);
#define DEBUG_HW_FIRMWARE_FIFO 0x10000
-static const u8 faxmodulation_s[] = "3,24,48,72,73,74,96,97,98,121,122,145,146";
static const u8 faxmodulation[] = {3, 24, 48, 72, 73, 74, 96, 97, 98, 121,
122, 145, 146};
#define FAXMODCNT 13
@@ -222,7 +221,7 @@ load_firmware(struct isar_hw *isar, const u8 *buf, int size)
goto reterror;
}
if (!poll_mbox(isar, 1000)) {
- pr_warning("ISAR poll_mbox dkey failed\n");
+ pr_warn("ISAR poll_mbox dkey failed\n");
ret = -ETIME;
goto reterror;
}
@@ -432,8 +431,8 @@ isar_rcv_frame(struct isar_ch *ch)
case ISDN_P_B_MODEM_ASYNC:
maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
if (maxlen < 0) {
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- ch->is->name, ch->bch.nr, ch->is->clsb);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ ch->is->name, ch->bch.nr, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
@@ -443,8 +442,8 @@ isar_rcv_frame(struct isar_ch *ch)
case ISDN_P_B_HDLC:
maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
if (maxlen < 0) {
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- ch->is->name, ch->bch.nr, ch->is->clsb);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ ch->is->name, ch->bch.nr, ch->is->clsb);
ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
break;
}
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 61caa7e50b9a..6aae97e827b7 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -380,8 +380,8 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
stat = bchannel_get_rxbuf(&bc->bch, cnt);
/* only transparent use the count here, HDLC overun is detected later */
if (stat == -ENOMEM) {
- pr_warning("%s.B%d: No memory for %d bytes\n",
- card->name, bc->bch.nr, cnt);
+ pr_warn("%s.B%d: No memory for %d bytes\n",
+ card->name, bc->bch.nr, cnt);
return;
}
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
@@ -420,8 +420,8 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
recv_Bchannel(&bc->bch, 0, false);
stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
if (stat < 0) {
- pr_warning("%s.B%d: No memory for %d bytes\n",
- card->name, bc->bch.nr, cnt);
+ pr_warn("%s.B%d: No memory for %d bytes\n",
+ card->name, bc->bch.nr, cnt);
return;
}
} else if (stat == -HDLC_CRC_ERROR) {
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index bad55fdacd36..f3b8db7b48fe 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -466,8 +466,8 @@ W6692_empty_Bfifo(struct w6692_ch *wch, int count)
WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
if (wch->bch.rx_skb)
skb_trim(wch->bch.rx_skb, 0);
- pr_warning("%s.B%d: No bufferspace for %d bytes\n",
- card->name, wch->bch.nr, count);
+ pr_warn("%s.B%d: No bufferspace for %d bytes\n",
+ card->name, wch->bch.nr, count);
return;
}
ptr = skb_put(wch->bch.rx_skb, count);
@@ -729,8 +729,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
wch->bch.nr, star);
}
if (star & W_B_STAR_XDOW) {
- pr_warning("%s: B%d XDOW proto=%x\n", card->name,
- wch->bch.nr, wch->bch.state);
+ pr_warn("%s: B%d XDOW proto=%x\n", card->name,
+ wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_xdu++;
#endif
@@ -747,8 +747,8 @@ W6692B_interrupt(struct w6692_hw *card, int ch)
return; /* handle XDOW only once */
}
if (stat & W_B_EXI_XDUN) {
- pr_warning("%s: B%d XDUN proto=%x\n", card->name,
- wch->bch.nr, wch->bch.state);
+ pr_warn("%s: B%d XDUN proto=%x\n", card->name,
+ wch->bch.nr, wch->bch.state);
#ifdef ERROR_STATISTIC
wch->bch.err_xdu++;
#endif
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index f378173bcf6f..8c93af06ed02 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -474,8 +474,8 @@ bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
if (bch->rx_skb) {
len = skb_tailroom(bch->rx_skb);
if (len < reqlen) {
- pr_warning("B%d no space for %d (only %d) bytes\n",
- bch->nr, reqlen, len);
+ pr_warn("B%d no space for %d (only %d) bytes\n",
+ bch->nr, reqlen, len);
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
/* send what we have now and try a new buffer */
recv_Bchannel(bch, 0, true);
@@ -508,8 +508,7 @@ bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
}
bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!bch->rx_skb) {
- pr_warning("B%d receive no memory for %d bytes\n",
- bch->nr, len);
+ pr_warn("B%d receive no memory for %d bytes\n", bch->nr, len);
len = -ENOMEM;
}
return len;
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 6a72b7e13719..14ba7faaed9e 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -57,11 +57,15 @@ static void led_activity_function(struct timer_list *t)
curr_used = 0;
for_each_possible_cpu(i) {
- curr_used += kcpustat_cpu(i).cpustat[CPUTIME_USER]
- + kcpustat_cpu(i).cpustat[CPUTIME_NICE]
- + kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]
- + kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]
- + kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ struct kernel_cpustat kcpustat;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
+
+ curr_used += kcpustat.cpustat[CPUTIME_USER]
+ + kcpustat.cpustat[CPUTIME_NICE]
+ + kcpustat.cpustat[CPUTIME_SYSTEM]
+ + kcpustat.cpustat[CPUTIME_SOFTIRQ]
+ + kcpustat.cpustat[CPUTIME_IRQ];
cpus++;
}
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 4134e580f786..60311e8d6240 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,14 @@ static int rackmeter_ignore_nice;
*/
static inline u64 get_cpu_idle_time(unsigned int cpu)
{
+ struct kernel_cpustat *kcpustat = &kcpustat_cpu(cpu);
u64 retval;
- retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
- kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+ retval = kcpustat->cpustat[CPUTIME_IDLE] +
+ kcpustat->cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice)
- retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+ retval += kcpustat_field(kcpustat, CPUTIME_NICE, cpu);
return retval;
}
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 3c971297b6dc..67daeec94b44 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -468,9 +468,7 @@ static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
else
id = ((*reg) - 0x30) / 2;
if (id > 7) {
- pr_warning("wf_fcu: Can't parse "
- "fan ID in device-tree for %pOF\n",
- np);
+ pr_warn("wf_fcu: Can't parse fan ID in device-tree for %pOF\n", np);
break;
}
wf_fcu_add_fan(pv, name, type, id);
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index e44525b19071..b03a33b803b7 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -124,8 +124,8 @@ static int wf_lm87_probe(struct i2c_client *client,
}
}
if (!name) {
- pr_warning("wf_lm87: Unsupported sensor %pOF\n",
- client->dev.of_node);
+ pr_warn("wf_lm87: Unsupported sensor %pOF\n",
+ client->dev.of_node);
return -ENODEV;
}
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index c5da0fc24884..e81746b87cff 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -285,8 +285,8 @@ static void cpu_fans_tick_split(void)
/* Apply result directly to exhaust fan */
err = wf_control_set(cpu_rear_fans[cpu], sp->target);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_rear_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_rear_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
break;
}
@@ -296,8 +296,8 @@ static void cpu_fans_tick_split(void)
DBG_LOTS(" CPU%d: intake = %d RPM\n", cpu, intake);
err = wf_control_set(cpu_front_fans[cpu], intake);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_front_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_front_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
break;
}
@@ -367,22 +367,22 @@ static void cpu_fans_tick_combined(void)
for (cpu = 0; cpu < nr_chips; cpu++) {
err = wf_control_set(cpu_rear_fans[cpu], sp->target);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_rear_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_rear_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
}
err = wf_control_set(cpu_front_fans[cpu], intake);
if (err) {
- pr_warning("wf_pm72: Fan %s reports error %d\n",
- cpu_front_fans[cpu]->name, err);
+ pr_warn("wf_pm72: Fan %s reports error %d\n",
+ cpu_front_fans[cpu]->name, err);
failure_state |= FAILURE_FAN;
}
err = 0;
if (cpu_pumps[cpu])
err = wf_control_set(cpu_pumps[cpu], pump);
if (err) {
- pr_warning("wf_pm72: Pump %s reports error %d\n",
- cpu_pumps[cpu]->name, err);
+ pr_warn("wf_pm72: Pump %s reports error %d\n",
+ cpu_pumps[cpu]->name, err);
failure_state |= FAILURE_FAN;
}
}
@@ -561,7 +561,7 @@ static void drives_fan_tick(void)
err = wf_sensor_get(drives_temp, &temp);
if (err) {
- pr_warning("wf_pm72: drive bay temp sensor error %d\n", err);
+ pr_warn("wf_pm72: drive bay temp sensor error %d\n", err);
failure_state |= FAILURE_SENSOR;
wf_control_set_max(drives_fan);
return;
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 8456eb67184b..7acd1684c451 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -281,8 +281,8 @@ static void cpu_fans_tick(void)
for (i = 0; i < 3; i++) {
err = wf_control_set(cpu_fans[cpu][i], speed);
if (err) {
- pr_warning("wf_rm31: Fan %s reports error %d\n",
- cpu_fans[cpu][i]->name, err);
+ pr_warn("wf_rm31: Fan %s reports error %d\n",
+ cpu_fans[cpu][i]->name, err);
failure_state |= FAILURE_FAN;
}
}
@@ -465,7 +465,7 @@ static void slots_fan_tick(void)
err = wf_sensor_get(slots_temp, &temp);
if (err) {
- pr_warning("wf_rm31: slots temp sensor error %d\n", err);
+ pr_warn("wf_rm31: slots temp sensor error %d\n", err);
failure_state |= FAILURE_SENSOR;
wf_control_set_max(slots_fan);
return;
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b72e82efaee5..38fbb3b59873 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -191,7 +191,7 @@ int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(__mcb_register_driver);
+EXPORT_SYMBOL_NS_GPL(__mcb_register_driver, MCB);
/**
* mcb_unregister_driver() - Unregister a @mcb_driver from the system
@@ -203,7 +203,7 @@ void mcb_unregister_driver(struct mcb_driver *drv)
{
driver_unregister(&drv->driver);
}
-EXPORT_SYMBOL_GPL(mcb_unregister_driver);
+EXPORT_SYMBOL_NS_GPL(mcb_unregister_driver, MCB);
static void mcb_release_dev(struct device *dev)
{
@@ -249,7 +249,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(mcb_device_register);
+EXPORT_SYMBOL_NS_GPL(mcb_device_register, MCB);
static void mcb_free_bus(struct device *dev)
{
@@ -301,7 +301,7 @@ err_free:
kfree(bus);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(mcb_alloc_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
static int __mcb_devices_unregister(struct device *dev, void *data)
{
@@ -323,7 +323,7 @@ void mcb_release_bus(struct mcb_bus *bus)
{
mcb_devices_unregister(bus);
}
-EXPORT_SYMBOL_GPL(mcb_release_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
/**
* mcb_bus_put() - Increment refcnt
@@ -338,7 +338,7 @@ struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
return bus;
}
-EXPORT_SYMBOL_GPL(mcb_bus_get);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_get, MCB);
/**
* mcb_bus_put() - Decrement refcnt
@@ -351,7 +351,7 @@ void mcb_bus_put(struct mcb_bus *bus)
if (bus)
put_device(&bus->dev);
}
-EXPORT_SYMBOL_GPL(mcb_bus_put);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_put, MCB);
/**
* mcb_alloc_dev() - Allocate a device
@@ -371,7 +371,7 @@ struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
return dev;
}
-EXPORT_SYMBOL_GPL(mcb_alloc_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_dev, MCB);
/**
* mcb_free_dev() - Free @mcb_device
@@ -383,7 +383,7 @@ void mcb_free_dev(struct mcb_device *dev)
{
kfree(dev);
}
-EXPORT_SYMBOL_GPL(mcb_free_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
@@ -412,7 +412,7 @@ void mcb_bus_add_devices(const struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
}
-EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_add_devices, MCB);
/**
* mcb_get_resource() - get a resource for a mcb device
@@ -428,7 +428,7 @@ struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type)
else
return NULL;
}
-EXPORT_SYMBOL_GPL(mcb_get_resource);
+EXPORT_SYMBOL_NS_GPL(mcb_get_resource, MCB);
/**
* mcb_request_mem() - Request memory
@@ -454,7 +454,7 @@ struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
return mem;
}
-EXPORT_SYMBOL_GPL(mcb_request_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
/**
* mcb_release_mem() - Release memory requested by device
@@ -469,7 +469,7 @@ void mcb_release_mem(struct resource *mem)
size = resource_size(mem);
release_mem_region(mem->start, size);
}
-EXPORT_SYMBOL_GPL(mcb_release_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_release_mem, MCB);
static int __mcb_get_irq(struct mcb_device *dev)
{
@@ -495,7 +495,7 @@ int mcb_get_irq(struct mcb_device *dev)
return __mcb_get_irq(dev);
}
-EXPORT_SYMBOL_GPL(mcb_get_irq);
+EXPORT_SYMBOL_NS_GPL(mcb_get_irq, MCB);
static int mcb_init(void)
{
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index 8f1bde437a7e..506676754538 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -168,3 +168,4 @@ module_exit(mcb_lpc_exit);
MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over LPC support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 3b69e6aa3d88..0266bfddfbe2 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -253,4 +253,4 @@ free_header:
return ret;
}
-EXPORT_SYMBOL_GPL(chameleon_parse_cells);
+EXPORT_SYMBOL_NS_GPL(chameleon_parse_cells, MCB);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 14866aa22f75..dc88232d9af8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -131,3 +131,4 @@ module_pci_driver(mcb_pci_driver);
MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over PCI support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index aa98953f4462..d6d5ab23c088 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -38,9 +38,9 @@ config MD_AUTODETECT
default y
---help---
If you say Y here, then the kernel will try to autodetect raid
- arrays as part of its boot process.
+ arrays as part of its boot process.
- If you don't use raid and say Y, this autodetection can cause
+ If you don't use raid and say Y, this autodetection can cause
a several-second delay in the boot time due to various
synchronisation steps that are part of this step.
@@ -290,7 +290,7 @@ config DM_SNAPSHOT
depends on BLK_DEV_DM
select DM_BUFIO
---help---
- Allow volume managers to take writable snapshots of a device.
+ Allow volume managers to take writable snapshots of a device.
config DM_THIN_PROVISIONING
tristate "Thin provisioning target"
@@ -298,7 +298,7 @@ config DM_THIN_PROVISIONING
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- Provides thin provisioning and snapshots that share a data store.
+ Provides thin provisioning and snapshots that share a data store.
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
@@ -307,23 +307,23 @@ config DM_CACHE
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- dm-cache attempts to improve performance of a block device by
- moving frequently used data to a smaller, higher performance
- device. Different 'policy' plugins can be used to change the
- algorithms used to select which blocks are promoted, demoted,
- cleaned etc. It supports writeback and writethrough modes.
+ dm-cache attempts to improve performance of a block device by
+ moving frequently used data to a smaller, higher performance
+ device. Different 'policy' plugins can be used to change the
+ algorithms used to select which blocks are promoted, demoted,
+ cleaned etc. It supports writeback and writethrough modes.
config DM_CACHE_SMQ
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
default y
---help---
- A cache policy that uses a multiqueue ordered by recent hits
- to select which blocks should be promoted and demoted.
- This is meant to be a general purpose policy. It prioritises
- reads over writes. This SMQ policy (vs MQ) offers the promise
- of less memory utilization, improved performance and increased
- adaptability in the face of changing workloads.
+ A cache policy that uses a multiqueue ordered by recent hits
+ to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes. This SMQ policy (vs MQ) offers the promise
+ of less memory utilization, improved performance and increased
+ adaptability in the face of changing workloads.
config DM_WRITECACHE
tristate "Writecache target"
@@ -343,9 +343,9 @@ config DM_ERA
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
- dm-era tracks which parts of a block device are written to
- over time. Useful for maintaining cache coherency when using
- vendor snapshots.
+ dm-era tracks which parts of a block device are written to
+ over time. Useful for maintaining cache coherency when using
+ vendor snapshots.
config DM_CLONE
tristate "Clone target (EXPERIMENTAL)"
@@ -353,20 +353,20 @@ config DM_CLONE
default n
select DM_PERSISTENT_DATA
---help---
- dm-clone produces a one-to-one copy of an existing, read-only source
- device into a writable destination device. The cloned device is
- visible/mountable immediately and the copy of the source device to the
- destination device happens in the background, in parallel with user
- I/O.
+ dm-clone produces a one-to-one copy of an existing, read-only source
+ device into a writable destination device. The cloned device is
+ visible/mountable immediately and the copy of the source device to the
+ destination device happens in the background, in parallel with user
+ I/O.
- If unsure, say N.
+ If unsure, say N.
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
---help---
- Allow volume managers to mirror logical volumes, also
- needed for live data migration tools such as 'pvmove'.
+ Allow volume managers to mirror logical volumes, also
+ needed for live data migration tools such as 'pvmove'.
config DM_LOG_USERSPACE
tristate "Mirror userspace logging"
@@ -483,7 +483,7 @@ config DM_FLAKEY
tristate "Flakey target"
depends on BLK_DEV_DM
---help---
- A target that intermittently fails I/O for debugging purposes.
+ A target that intermittently fails I/O for debugging purposes.
config DM_VERITY
tristate "Verity target support"
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index d26b35195825..fd714628da6a 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -5,5 +5,3 @@ obj-$(CONFIG_BCACHE) += bcache.o
bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
util.o writeback.o
-
-CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 6f776823b9ba..a1df0d95151c 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -377,7 +377,10 @@ retry_invalidate:
if (!fifo_full(&ca->free_inc))
goto retry_invalidate;
- bch_prio_write(ca);
+ if (bch_prio_write(ca, false) < 0) {
+ ca->invalidate_needs_gc = 1;
+ wake_up_gc(ca->set);
+ }
}
}
out:
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 013e35a9e317..9198c1b480d9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -582,6 +582,7 @@ struct cache_set {
*/
wait_queue_head_t btree_cache_wait;
struct task_struct *btree_cache_alloc_lock;
+ spinlock_t btree_cannibalize_lock;
/*
* When we free a btree node, we increment the gen of the bucket the
@@ -723,6 +724,7 @@ struct cache_set {
unsigned int gc_always_rewrite:1;
unsigned int shrinker_disabled:1;
unsigned int copy_gc_enabled:1;
+ unsigned int idle_max_writeback_rate_enabled:1;
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
@@ -977,7 +979,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
-void bch_prio_write(struct cache *ca);
+int bch_prio_write(struct cache *ca, bool wait);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 08768796b543..cffcdc9feefb 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -155,6 +155,7 @@ int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
return 0;
}
+/* Pop the top key of keylist by pointing l->top to its previous key */
struct bkey *bch_keylist_pop(struct keylist *l)
{
struct bkey *k = l->keys;
@@ -168,6 +169,7 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k;
}
+/* Pop the bottom key of keylist and update l->top_p */
void bch_keylist_pop_front(struct keylist *l)
{
l->top_p -= bkey_u64s(l->keys);
@@ -309,7 +311,6 @@ void bch_btree_keys_free(struct btree_keys *b)
t->tree = NULL;
t->data = NULL;
}
-EXPORT_SYMBOL(bch_btree_keys_free);
int bch_btree_keys_alloc(struct btree_keys *b,
unsigned int page_order,
@@ -342,7 +343,6 @@ err:
bch_btree_keys_free(b);
return -ENOMEM;
}
-EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
@@ -361,7 +361,6 @@ void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
* any more.
*/
}
-EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
@@ -678,7 +677,6 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
bch_bset_build_unwritten_tree(b);
}
-EXPORT_SYMBOL(bch_bset_init_next);
/*
* Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
@@ -732,7 +730,6 @@ void bch_bset_build_written_tree(struct btree_keys *b)
j = inorder_next(j, t->size))
make_bfloat(t, j);
}
-EXPORT_SYMBOL(bch_bset_build_written_tree);
/* Insert */
@@ -780,7 +777,6 @@ fix_right: do {
j = j * 2 + 1;
} while (j < t->size);
}
-EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
static void bch_bset_fix_lookup_table(struct btree_keys *b,
struct bset_tree *t,
@@ -855,7 +851,6 @@ bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
return b->ops->key_merge(b, l, r);
}
-EXPORT_SYMBOL(bch_bkey_try_merge);
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
struct bkey *insert)
@@ -875,7 +870,6 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
bkey_copy(where, insert);
bch_bset_fix_lookup_table(b, t, where);
}
-EXPORT_SYMBOL(bch_bset_insert);
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
@@ -931,7 +925,6 @@ copy: bkey_copy(m, k);
merged:
return status;
}
-EXPORT_SYMBOL(bch_btree_insert_key);
/* Lookup */
@@ -1077,7 +1070,6 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
return i.l;
}
-EXPORT_SYMBOL(__bch_bset_search);
/* Btree iterator */
@@ -1132,7 +1124,6 @@ struct bkey *bch_btree_iter_init(struct btree_keys *b,
{
return __bch_btree_iter_init(b, iter, search, b->set);
}
-EXPORT_SYMBOL(bch_btree_iter_init);
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
btree_iter_cmp_fn *cmp)
@@ -1165,7 +1156,6 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
return __bch_btree_iter_next(iter, btree_iter_cmp);
}
-EXPORT_SYMBOL(bch_btree_iter_next);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree_keys *b, ptr_filter_fn fn)
@@ -1196,7 +1186,6 @@ int bch_bset_sort_state_init(struct bset_sort_state *state,
return mempool_init_page_pool(&state->pool, 1, page_order);
}
-EXPORT_SYMBOL(bch_bset_sort_state_init);
static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
@@ -1313,7 +1302,6 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
-EXPORT_SYMBOL(bch_btree_sort_partial);
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
struct btree_iter *iter,
@@ -1366,7 +1354,6 @@ void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
out:
bch_bset_build_written_tree(b);
}
-EXPORT_SYMBOL(bch_btree_sort_lazy);
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ba434d9ac720..14d6c33b0957 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -543,6 +543,11 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
set_btree_node_dirty(b);
+ /*
+ * w->journal is always the oldest journal pin of all bkeys
+ * in the leaf node, to make sure the oldest jset seq won't
+ * be increased before this btree node is flushed.
+ */
if (journal_ref) {
if (w->journal &&
journal_pin_cmp(b->c, w->journal, journal_ref)) {
@@ -723,6 +728,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));
i = 0;
@@ -884,15 +891,17 @@ out:
static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
{
- struct task_struct *old;
-
- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
- if (old && old != current) {
+ spin_lock(&c->btree_cannibalize_lock);
+ if (likely(c->btree_cache_alloc_lock == NULL)) {
+ c->btree_cache_alloc_lock = current;
+ } else if (c->btree_cache_alloc_lock != current) {
if (op)
prepare_to_wait(&c->btree_cache_wait, &op->wait,
TASK_UNINTERRUPTIBLE);
+ spin_unlock(&c->btree_cannibalize_lock);
return -EINTR;
}
+ spin_unlock(&c->btree_cannibalize_lock);
return 0;
}
@@ -927,10 +936,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
*/
static void bch_cannibalize_unlock(struct cache_set *c)
{
+ spin_lock(&c->btree_cannibalize_lock);
if (c->btree_cache_alloc_lock == current) {
c->btree_cache_alloc_lock = NULL;
wake_up(&c->btree_cache_wait);
}
+ spin_unlock(&c->btree_cannibalize_lock);
}
static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index c12cd809ab19..0164a1fe94a9 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -45,7 +45,6 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL(closure_sub);
/*
* closure_put - decrement a closure's refcount
@@ -54,7 +53,6 @@ void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL(closure_put);
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
@@ -76,7 +74,6 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL(__closure_wake_up);
/**
* closure_wait - add a closure to a waitlist
@@ -96,7 +93,6 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
return true;
}
-EXPORT_SYMBOL(closure_wait);
struct closure_syncer {
struct task_struct *task;
@@ -131,7 +127,6 @@ void __sched __closure_sync(struct closure *cl)
__set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -149,7 +144,6 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -162,7 +156,6 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *closure_debug;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 41adcd1546f1..73478a91a342 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -62,18 +62,6 @@ static void bch_data_insert_keys(struct closure *cl)
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret;
- /*
- * If we're looping, might already be waiting on
- * another journal write - can't wait on more than one journal write at
- * a time
- *
- * XXX: this looks wrong
- */
-#if 0
- while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
- closure_sync(&s->cl);
-#endif
-
if (!op->replace)
journal_ref = bch_journal(op->c, &op->insert_keys,
op->flush_journal ? cl : NULL);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 20ed838e9413..77e9869345e7 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -92,10 +92,11 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
sb->version, sb->flags, sb->seq, sb->keys);
- err = "Not a bcache superblock";
+ err = "Not a bcache superblock (bad offset)";
if (sb->offset != SB_SECTOR)
goto err;
+ err = "Not a bcache superblock (bad magic)";
if (memcmp(sb->magic, bcache_magic, 16))
goto err;
@@ -529,12 +530,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
closure_sync(cl);
}
-void bch_prio_write(struct cache *ca)
+int bch_prio_write(struct cache *ca, bool wait)
{
int i;
struct bucket *b;
struct closure cl;
+ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
+ fifo_used(&ca->free[RESERVE_PRIO]),
+ fifo_used(&ca->free[RESERVE_NONE]),
+ fifo_used(&ca->free_inc));
+
+ /*
+ * Pre-check if there are enough free buckets. In the non-blocking
+ * scenario it's better to fail early rather than starting to allocate
+ * buckets and do a cleanup later in case of failure.
+ */
+ if (!wait) {
+ size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
+ fifo_used(&ca->free[RESERVE_NONE]);
+ if (prio_buckets(ca) > avail)
+ return -ENOMEM;
+ }
+
closure_init_stack(&cl);
lockdep_assert_held(&ca->set->bucket_lock);
@@ -544,9 +562,6 @@ void bch_prio_write(struct cache *ca)
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
&ca->meta_sectors_written);
- //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
- // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
-
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
struct prio_set *p = ca->disk_buckets;
@@ -564,7 +579,7 @@ void bch_prio_write(struct cache *ca)
p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
+ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -593,6 +608,7 @@ void bch_prio_write(struct cache *ca)
ca->prio_last_buckets[i] = ca->prio_buckets[i];
}
+ return 0;
}
static void prio_read(struct cache *ca, uint64_t bucket)
@@ -761,20 +777,28 @@ static inline int idx_to_first_minor(int idx)
static void bcache_device_free(struct bcache_device *d)
{
+ struct gendisk *disk = d->disk;
+
lockdep_assert_held(&bch_register_lock);
- pr_info("%s stopped", d->disk->disk_name);
+ if (disk)
+ pr_info("%s stopped", disk->disk_name);
+ else
+ pr_err("bcache device (NULL gendisk) stopped");
if (d->c)
bcache_device_detach(d);
- if (d->disk && d->disk->flags & GENHD_FL_UP)
- del_gendisk(d->disk);
- if (d->disk && d->disk->queue)
- blk_cleanup_queue(d->disk->queue);
- if (d->disk) {
+
+ if (disk) {
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+
ida_simple_remove(&bcache_device_idx,
- first_minor_to_idx(d->disk->first_minor));
- put_disk(d->disk);
+ first_minor_to_idx(disk->first_minor));
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -1769,6 +1793,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
+ spin_lock_init(&c->btree_cannibalize_lock);
init_waitqueue_head(&c->bucket_wait);
init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
@@ -1809,6 +1834,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->congested_read_threshold_us = 2000;
c->congested_write_threshold_us = 20000;
c->error_limit = DEFAULT_IO_ERROR_LIMIT;
+ c->idle_max_writeback_rate_enabled = 1;
WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
return c;
@@ -1954,7 +1980,7 @@ static int run_cache_set(struct cache_set *c)
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i)
- bch_prio_write(ca);
+ bch_prio_write(ca, true);
mutex_unlock(&c->bucket_lock);
err = "cannot allocate new UUID bucket";
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 627dcea0f5b6..733e2ddf3c78 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -134,6 +134,7 @@ rw_attribute(expensive_debug_checks);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
+rw_attribute(idle_max_writeback_rate);
rw_attribute(gc_after_writeback);
rw_attribute(size);
@@ -747,6 +748,8 @@ SHOW(__bch_cache_set)
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+ sysfs_printf(idle_max_writeback_rate, "%i",
+ c->idle_max_writeback_rate_enabled);
sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
sysfs_printf(io_disable, "%i",
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -864,6 +867,9 @@ STORE(__bch_cache_set)
sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
+ sysfs_strtoul_bool(idle_max_writeback_rate,
+ c->idle_max_writeback_rate_enabled);
+
/*
* write gc_after_writeback here may overwrite an already set
* BCH_DO_AUTO_GC, it doesn't matter because this flag will be
@@ -954,6 +960,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
&sysfs_copy_gc_enabled,
+ &sysfs_idle_max_writeback_rate,
&sysfs_gc_after_writeback,
&sysfs_io_disable,
&sysfs_cutoff_writeback,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d60268fe49e1..4a40f9eadeaf 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -122,6 +122,10 @@ static void __update_writeback_rate(struct cached_dev *dc)
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
+ /* Don't sst max writeback rate if it is disabled */
+ if (!c->idle_max_writeback_rate_enabled)
+ return false;
+
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index b5389890bbc3..1f8f98efd97a 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -150,11 +150,10 @@ static int bio_detain(struct dm_bio_prison *prison,
struct dm_bio_prison_cell **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -198,11 +197,9 @@ void dm_cell_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
__cell_release(prison, cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
@@ -250,12 +247,10 @@ void dm_cell_visit_release(struct dm_bio_prison *prison,
void *context,
struct dm_bio_prison_cell *cell)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
visit_fn(context, cell);
rb_erase(&cell->node, &prison->cells);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);
@@ -275,11 +270,10 @@ int dm_cell_promote_or_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __promote_or_release(prison, cell);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -379,10 +373,9 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
- unsigned long flags;
unsigned next_entry;
- spin_lock_irqsave(&ds->lock, flags);
+ spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
!ds->entries[ds->current_entry].count)
r = 0;
@@ -392,7 +385,7 @@ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
if (!ds->entries[next_entry].count)
ds->current_entry = next_entry;
}
- spin_unlock_irqrestore(&ds->lock, flags);
+ spin_unlock_irq(&ds->lock);
return r;
}
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index b092cdc8e1ae..8ee019eda32d 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -177,11 +177,10 @@ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -261,11 +260,10 @@ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 **cell_result)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -285,11 +283,9 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
struct work_struct *continuation)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
__quiesce(prison, cell, continuation);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
@@ -309,11 +305,10 @@ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
unsigned new_lock_level)
{
int r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __promote(prison, cell, new_lock_level);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
@@ -342,11 +337,10 @@ bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
struct bio_list *bios)
{
bool r;
- unsigned long flags;
- spin_lock_irqsave(&prison->lock, flags);
+ spin_lock_irq(&prison->lock);
r = __unlock(prison, cell, bios);
- spin_unlock_irqrestore(&prison->lock, flags);
+ spin_unlock_irq(&prison->lock);
return r;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8346e6d1816c..2d32821b3a5b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
{
bool r;
- unsigned long flags;
- spin_lock_irqsave(&iot->lock, flags);
+ spin_lock_irq(&iot->lock);
r = __iot_idle_for(iot, jifs);
- spin_unlock_irqrestore(&iot->lock, flags);
+ spin_unlock_irq(&iot->lock);
return r;
}
static void iot_io_begin(struct io_tracker *iot, sector_t len)
{
- unsigned long flags;
-
- spin_lock_irqsave(&iot->lock, flags);
+ spin_lock_irq(&iot->lock);
iot->in_flight += len;
- spin_unlock_irqrestore(&iot->lock, flags);
+ spin_unlock_irq(&iot->lock);
}
static void __iot_io_end(struct io_tracker *iot, sector_t len)
@@ -172,7 +169,6 @@ static void __commit(struct work_struct *_ws)
{
struct batcher *b = container_of(_ws, struct batcher, commit_work);
blk_status_t r;
- unsigned long flags;
struct list_head work_items;
struct work_struct *ws, *tmp;
struct continuation *k;
@@ -186,12 +182,12 @@ static void __commit(struct work_struct *_ws)
* We have to grab these before the commit_op to avoid a race
* condition.
*/
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
list_splice_init(&b->work_items, &work_items);
bio_list_merge(&bios, &b->bios);
bio_list_init(&b->bios);
b->commit_scheduled = false;
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
r = b->commit_op(b->commit_context);
@@ -238,13 +234,12 @@ static void async_commit(struct batcher *b)
static void continue_after_commit(struct batcher *b, struct continuation *k)
{
- unsigned long flags;
bool commit_scheduled;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
commit_scheduled = b->commit_scheduled;
list_add_tail(&k->ws.entry, &b->work_items);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (commit_scheduled)
async_commit(b);
@@ -255,13 +250,12 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
*/
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
- unsigned long flags;
bool commit_scheduled;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
commit_scheduled = b->commit_scheduled;
bio_list_add(&b->bios, bio);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (commit_scheduled)
async_commit(b);
@@ -273,12 +267,11 @@ static void issue_after_commit(struct batcher *b, struct bio *bio)
static void schedule_commit(struct batcher *b)
{
bool immediate;
- unsigned long flags;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irq(&b->lock);
immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
b->commit_scheduled = true;
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irq(&b->lock);
if (immediate)
async_commit(b);
@@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio)
static void defer_bio(struct cache *cache, struct bio *bio)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_add(&cache->deferred_bios, bio);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
}
static void defer_bios(struct cache *cache, struct bio_list *bios)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_merge(&cache->deferred_bios, bios);
bio_list_init(bios);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
}
@@ -756,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
static void set_discard(struct cache *cache, dm_dblock_t b)
{
- unsigned long flags;
-
BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
atomic_inc(&cache->stats.discard_count);
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
set_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static void clear_discard(struct cache *cache, dm_dblock_t b)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
clear_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
return r;
}
@@ -790,12 +773,10 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
return r;
}
@@ -827,17 +808,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
- unsigned long flags;
struct per_bio_data *pb;
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb = get_per_bio_data(bio);
pb->tick = true;
cache->need_tick_bio = false;
}
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
}
static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
@@ -1889,17 +1869,16 @@ static void process_deferred_bios(struct work_struct *ws)
{
struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
- unsigned long flags;
bool commit_needed = false;
struct bio_list bios;
struct bio *bio;
bio_list_init(&bios);
- spin_lock_irqsave(&cache->lock, flags);
+ spin_lock_irq(&cache->lock);
bio_list_merge(&bios, &cache->deferred_bios);
bio_list_init(&cache->deferred_bios);
- spin_unlock_irqrestore(&cache->lock, flags);
+ spin_unlock_irq(&cache->lock);
while ((bio = bio_list_pop(&bios))) {
if (bio->bi_opf & REQ_PREFLUSH)
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 6bc8c1d1c351..08c552e5e41b 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -712,7 +712,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
{
int r;
- unsigned long word, flags;
+ unsigned long word;
word = 0;
do {
@@ -736,9 +736,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
return r;
/* Update the changed flag */
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
dmap->changed = 0;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
return 0;
}
@@ -746,7 +746,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
{
int r = -EPERM;
- unsigned long flags;
struct dirty_map *dmap, *next_dmap;
down_write(&cmd->lock);
@@ -770,9 +769,9 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
}
/* Swap dirty bitmaps */
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->current_dmap = next_dmap;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
/*
* No one is accessing the old dirty bitmap anymore, so we can flush
@@ -817,9 +816,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
{
int r = 0;
struct dirty_map *dmap;
- unsigned long word, region_nr, flags;
+ unsigned long word, region_nr;
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
if (cmd->read_only) {
r = -EPERM;
@@ -836,7 +835,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
}
}
out:
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
return r;
}
@@ -903,13 +902,11 @@ out:
void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
{
- unsigned long flags;
-
down_write(&cmd->lock);
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 1;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io)
dm_bm_set_read_only(cmd->bm);
@@ -919,13 +916,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
{
- unsigned long flags;
-
down_write(&cmd->lock);
- spin_lock_irqsave(&cmd->bitmap_lock, flags);
+ spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 0;
- spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
+ spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io)
dm_bm_set_read_write(cmd->bm);
diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
index 434bff08508b..3fe50a781c11 100644
--- a/drivers/md/dm-clone-metadata.h
+++ b/drivers/md/dm-clone-metadata.h
@@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
* @start: Starting region number
* @nr_regions: Number of regions in the range
*
- * This function doesn't block, so it's safe to call it from interrupt context.
+ * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
+ * it's NOT safe to call it from any context where interrupts are disabled, e.g.,
+ * from interrupt context.
*/
int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
unsigned long nr_regions);
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 4ca8f1977222..b3d89072d21c 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -332,8 +332,6 @@ static void submit_bios(struct bio_list *bios)
*/
static void issue_bio(struct clone *clone, struct bio *bio)
{
- unsigned long flags;
-
if (!bio_triggers_commit(clone, bio)) {
generic_make_request(bio);
return;
@@ -352,9 +350,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
* Batch together any bios that trigger commits and then issue a single
* commit for them in process_deferred_flush_bios().
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
wake_worker(clone);
}
@@ -469,7 +467,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
static void process_discard_bio(struct clone *clone, struct bio *bio)
{
- unsigned long rs, re, flags;
+ unsigned long rs, re;
bio_region_range(clone, bio, &rs, &re);
BUG_ON(re > clone->nr_regions);
@@ -501,9 +499,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
/*
* Defer discard processing.
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_discard_bios, bio);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
wake_worker(clone);
}
@@ -554,6 +552,12 @@ struct hash_table_bucket {
#define bucket_unlock_irqrestore(bucket, flags) \
spin_unlock_irqrestore(&(bucket)->lock, flags)
+#define bucket_lock_irq(bucket) \
+ spin_lock_irq(&(bucket)->lock)
+
+#define bucket_unlock_irq(bucket) \
+ spin_unlock_irq(&(bucket)->lock)
+
static int hash_table_init(struct clone *clone)
{
unsigned int i, sz;
@@ -851,7 +855,6 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
*/
static void hydrate_bio_region(struct clone *clone, struct bio *bio)
{
- unsigned long flags;
unsigned long region_nr;
struct hash_table_bucket *bucket;
struct dm_clone_region_hydration *hd, *hd2;
@@ -859,19 +862,19 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
region_nr = bio_to_region(clone, bio);
bucket = get_hash_table_bucket(clone, region_nr);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
hd = __hash_find(bucket, region_nr);
if (hd) {
/* Someone else is hydrating the region */
bio_list_add(&hd->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
return;
}
if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
/* The region has been hydrated */
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
issue_bio(clone, bio);
return;
}
@@ -880,16 +883,16 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
* We must allocate a hydration descriptor and start the hydration of
* the corresponding region.
*/
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hd = alloc_hydration(clone);
hydration_init(hd, region_nr);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
/* Check if the region has been hydrated in the meantime. */
if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
issue_bio(clone, bio);
return;
@@ -899,7 +902,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
if (hd2 != hd) {
/* Someone else started the region's hydration. */
bio_list_add(&hd2->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
return;
}
@@ -911,7 +914,7 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
*/
if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
hlist_del(&hd->h);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
free_hydration(hd);
bio_io_error(bio);
return;
@@ -925,11 +928,11 @@ static void hydrate_bio_region(struct clone *clone, struct bio *bio)
* to the destination device.
*/
if (is_overwrite_bio(clone, bio)) {
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hydration_overwrite(hd, bio);
} else {
bio_list_add(&hd->deferred_bios, bio);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
hydration_copy(hd, 1);
}
}
@@ -996,7 +999,6 @@ static unsigned long __start_next_hydration(struct clone *clone,
unsigned long offset,
struct batch_info *batch)
{
- unsigned long flags;
struct hash_table_bucket *bucket;
struct dm_clone_region_hydration *hd;
unsigned long nr_regions = clone->nr_regions;
@@ -1010,13 +1012,13 @@ static unsigned long __start_next_hydration(struct clone *clone,
break;
bucket = get_hash_table_bucket(clone, offset);
- bucket_lock_irqsave(bucket, flags);
+ bucket_lock_irq(bucket);
if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
!__hash_find(bucket, offset)) {
hydration_init(hd, offset);
__insert_region_hydration(bucket, hd);
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
/* Batch hydration */
__batch_hydration(batch, hd);
@@ -1024,7 +1026,7 @@ static unsigned long __start_next_hydration(struct clone *clone,
return (offset + 1);
}
- bucket_unlock_irqrestore(bucket, flags);
+ bucket_unlock_irq(bucket);
} while (++offset < nr_regions);
@@ -1140,13 +1142,13 @@ static void process_deferred_discards(struct clone *clone)
int r = -EPERM;
struct bio *bio;
struct blk_plug plug;
- unsigned long rs, re, flags;
+ unsigned long rs, re;
struct bio_list discards = BIO_EMPTY_LIST;
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&discards, &clone->deferred_discard_bios);
bio_list_init(&clone->deferred_discard_bios);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&discards))
return;
@@ -1176,13 +1178,12 @@ out:
static void process_deferred_bios(struct clone *clone)
{
- unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST;
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_bios);
bio_list_init(&clone->deferred_bios);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios))
return;
@@ -1193,7 +1194,6 @@ static void process_deferred_bios(struct clone *clone)
static void process_deferred_flush_bios(struct clone *clone)
{
struct bio *bio;
- unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST;
struct bio_list bio_completions = BIO_EMPTY_LIST;
@@ -1201,13 +1201,13 @@ static void process_deferred_flush_bios(struct clone *clone)
* If there are any deferred flush bios, we must commit the metadata
* before issuing them or signaling their completion.
*/
- spin_lock_irqsave(&clone->lock, flags);
+ spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_flush_bios);
bio_list_init(&clone->deferred_flush_bios);
bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
bio_list_init(&clone->deferred_flush_completions);
- spin_unlock_irqrestore(&clone->lock, flags);
+ spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f87f6495652f..eb9782fc93fe 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
- 1, devname);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname);
else
cc->crypt_queue = alloc_workqueue("kcryptd/%s",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 8288887b7f94..eb37584427a4 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -17,6 +17,7 @@
struct badblock {
struct rb_node node;
sector_t bb;
+ unsigned char wr_fail_cnt;
};
struct dust_device {
@@ -101,7 +102,8 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
return 0;
}
-static int dust_add_block(struct dust_device *dd, unsigned long long block)
+static int dust_add_block(struct dust_device *dd, unsigned long long block,
+ unsigned char wr_fail_cnt)
{
struct badblock *bblock;
unsigned long flags;
@@ -115,6 +117,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
spin_lock_irqsave(&dd->dust_lock, flags);
bblock->bb = block;
+ bblock->wr_fail_cnt = wr_fail_cnt;
if (!dust_rb_insert(&dd->badblocklist, bblock)) {
if (!dd->quiet_mode) {
DMERR("%s: block %llu already in badblocklist",
@@ -126,8 +129,10 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
}
dd->badblock_count++;
- if (!dd->quiet_mode)
- DMINFO("%s: badblock added at block %llu", __func__, block);
+ if (!dd->quiet_mode) {
+ DMINFO("%s: badblock added at block %llu with write fail count %hhu",
+ __func__, block, wr_fail_cnt);
+ }
spin_unlock_irqrestore(&dd->dust_lock, flags);
return 0;
@@ -163,22 +168,27 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
- int ret = DM_MAPIO_REMAPPED;
+ int r = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- ret = __dust_map_read(dd, thisblock);
+ r = __dust_map_read(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return ret;
+ return r;
}
-static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
+static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
{
struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+ if (bblk && bblk->wr_fail_cnt > 0) {
+ bblk->wr_fail_cnt--;
+ return DM_MAPIO_KILL;
+ }
+
if (bblk) {
rb_erase(&bblk->node, &dd->badblocklist);
dd->badblock_count--;
@@ -189,37 +199,40 @@ static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
(unsigned long long)thisblock);
}
}
+
+ return DM_MAPIO_REMAPPED;
}
static int dust_map_write(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
+ int ret = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- __dust_map_write(dd, thisblock);
+ ret = __dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return DM_MAPIO_REMAPPED;
+ return ret;
}
static int dust_map(struct dm_target *ti, struct bio *bio)
{
struct dust_device *dd = ti->private;
- int ret;
+ int r;
bio_set_dev(bio, dd->dev->bdev);
bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
if (bio_data_dir(bio) == READ)
- ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
else
- ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
- return ret;
+ return r;
}
static bool __dust_clear_badblocks(struct rb_root *tree,
@@ -375,8 +388,10 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
struct dust_device *dd = ti->private;
sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
bool invalid_msg = false;
- int result = -EINVAL;
+ int r = -EINVAL;
unsigned long long tmp, block;
+ unsigned char wr_fail_cnt;
+ unsigned int tmp_ui;
unsigned long flags;
char dummy;
@@ -388,45 +403,69 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
} else if (!strcasecmp(argv[0], "disable")) {
DMINFO("disabling read failures on bad sectors");
dd->fail_read_on_bb = false;
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "enable")) {
DMINFO("enabling read failures on bad sectors");
dd->fail_read_on_bb = true;
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "countbadblocks")) {
spin_lock_irqsave(&dd->dust_lock, flags);
DMINFO("countbadblocks: %llu badblock(s) found",
dd->badblock_count);
spin_unlock_irqrestore(&dd->dust_lock, flags);
- result = 0;
+ r = 0;
} else if (!strcasecmp(argv[0], "clearbadblocks")) {
- result = dust_clear_badblocks(dd);
+ r = dust_clear_badblocks(dd);
} else if (!strcasecmp(argv[0], "quiet")) {
if (!dd->quiet_mode)
dd->quiet_mode = true;
else
dd->quiet_mode = false;
- result = 0;
+ r = 0;
} else {
invalid_msg = true;
}
} else if (argc == 2) {
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
- return result;
+ return r;
block = tmp;
sector_div(size, dd->sect_per_block);
if (block > size) {
DMERR("selected block value out of range");
- return result;
+ return r;
}
if (!strcasecmp(argv[0], "addbadblock"))
- result = dust_add_block(dd, block);
+ r = dust_add_block(dd, block, 0);
else if (!strcasecmp(argv[0], "removebadblock"))
- result = dust_remove_block(dd, block);
+ r = dust_remove_block(dd, block);
else if (!strcasecmp(argv[0], "queryblock"))
- result = dust_query_block(dd, block);
+ r = dust_query_block(dd, block);
+ else
+ invalid_msg = true;
+
+ } else if (argc == 3) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
+ return r;
+
+ if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
+ return r;
+
+ block = tmp;
+ if (tmp_ui > 255) {
+ DMERR("selected write fail count out of range");
+ return r;
+ }
+ wr_fail_cnt = tmp_ui;
+ sector_div(size, dd->sect_per_block);
+ if (block > size) {
+ DMERR("selected block value out of range");
+ return r;
+ }
+
+ if (!strcasecmp(argv[0], "addbadblock"))
+ r = dust_add_block(dd, block, wr_fail_cnt);
else
invalid_msg = true;
@@ -436,7 +475,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
if (invalid_msg)
DMERR("unrecognized message '%s' received", argv[0]);
- return result;
+ return r;
}
static void dust_status(struct dm_target *ti, status_type_t type,
@@ -499,12 +538,12 @@ static struct target_type dust_target = {
static int __init dm_dust_init(void)
{
- int result = dm_register_target(&dust_target);
+ int r = dm_register_target(&dust_target);
- if (result < 0)
- DMERR("dm_register_target failed %d", result);
+ if (r < 0)
+ DMERR("dm_register_target failed %d", r);
- return result;
+ return r;
}
static void __exit dm_dust_exit(void)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 2900fbde89b3..a2cc9e45cbba 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -280,7 +280,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
struct flakey_c *fc = ti->private;
bio_set_dev(bio, fc->dev->bdev);
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -322,8 +322,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
- /* Do not fail reset zone */
- if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (op_is_zone_mgmt(bio_op(bio)))
goto map_bio;
/* Are we alive ? */
@@ -384,7 +383,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
@@ -460,21 +459,15 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
}
#ifdef CONFIG_BLK_DEV_ZONED
-static int flakey_report_zones(struct dm_target *ti, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+static int flakey_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
{
struct flakey_c *fc = ti->private;
- int ret;
+ sector_t sector = flakey_map_sector(ti, args->next_sector);
- /* Do report and remap it */
- ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
- zones, nr_zones);
- if (ret != 0)
- return ret;
-
- if (*nr_zones)
- dm_remap_zone_report(ti, fc->start, zones, nr_zones);
- return 0;
+ args->start = fc->start;
+ return blkdev_report_zones(fc->dev->bdev, sector, nr_zones,
+ dm_report_zones_cb, args);
}
#endif
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index dab4446fe7d8..b225b3e445fa 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -53,6 +53,7 @@
#define SB_VERSION_1 1
#define SB_VERSION_2 2
#define SB_VERSION_3 3
+#define SB_VERSION_4 4
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -73,6 +74,7 @@ struct superblock {
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
#define SB_FLAG_DIRTY_BITMAP 0x4
+#define SB_FLAG_FIXED_PADDING 0x8
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -250,6 +252,7 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
+ bool fix_padding;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -463,7 +466,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
+ ic->sb->version = SB_VERSION_4;
+ else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
ic->sb->version = SB_VERSION_3;
else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
ic->sb->version = SB_VERSION_2;
@@ -2955,6 +2960,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
+ arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -2974,6 +2980,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
}
+ if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
+ DMEMIT(" fix_padding");
#define EMIT_ALG(a, n) \
do { \
@@ -3042,8 +3050,14 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
if (!ic->meta_dev) {
sector_t last_sector, last_area, last_offset;
- ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
- (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
+ /* we have to maintain excessive padding for compatibility with existing volumes */
+ __u64 metadata_run_padding =
+ ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
+ (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
+ (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
+
+ ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
+ metadata_run_padding) >> SECTOR_SHIFT;
if (!(ic->metadata_run & (ic->metadata_run - 1)))
ic->log2_metadata_run = __ffs(ic->metadata_run);
else
@@ -3086,6 +3100,8 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
journal_sections = 1;
if (!ic->meta_dev) {
+ if (ic->fix_padding)
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
ic->sb->journal_sections = cpu_to_le32(journal_sections);
if (!interleave_sectors)
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
@@ -3725,6 +3741,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "fix_padding")) {
+ ic->fix_padding = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3867,7 +3885,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -4182,7 +4200,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index ecefe6703736..8d07fdf63a47 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -90,7 +90,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private;
bio_set_dev(bio, lc->dev->bdev);
- if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
+ if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -136,21 +136,15 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
}
#ifdef CONFIG_BLK_DEV_ZONED
-static int linear_report_zones(struct dm_target *ti, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+static int linear_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
{
- struct linear_c *lc = (struct linear_c *) ti->private;
- int ret;
-
- /* Do report and remap it */
- ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
- zones, nr_zones);
- if (ret != 0)
- return ret;
+ struct linear_c *lc = ti->private;
+ sector_t sector = linear_map_sector(ti, args->next_sector);
- if (*nr_zones)
- dm_remap_zone_report(ti, lc->start, zones, nr_zones);
- return 0;
+ args->start = lc->start;
+ return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
+ dm_report_zones_cb, args);
}
#endif
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0aa595e4375..c412eaa975fc 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -209,6 +209,7 @@ struct raid_dev {
#define RT_FLAG_RS_SUSPENDED 5
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
+#define RT_FLAG_RS_GROW 8
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -241,6 +242,9 @@ struct raid_set {
struct raid_type *raid_type;
struct dm_target_callbacks callbacks;
+ sector_t array_sectors;
+ sector_t dev_sectors;
+
/* Optional raid4/5/6 journal device */
struct journal_dev {
struct dm_dev *dev;
@@ -616,7 +620,6 @@ static int raid10_format_to_md_layout(struct raid_set *rs,
} else if (algorithm == ALGORITHM_RAID10_FAR) {
f = copies;
- r = !RAID10_OFFSET;
if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
r |= RAID10_USE_FAR_SETS;
@@ -1615,13 +1618,12 @@ static int _check_data_dev_sectors(struct raid_set *rs)
}
/* Calculate the sectors per device and per array used for @rs */
-static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
+static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
{
int delta_disks;
unsigned int data_stripes;
+ sector_t array_sectors = sectors, dev_sectors = sectors;
struct mddev *mddev = &rs->md;
- struct md_rdev *rdev;
- sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
if (use_mddev) {
delta_disks = mddev->delta_disks;
@@ -1656,12 +1658,9 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
/* Striped layouts */
array_sectors = (data_stripes + delta_disks) * dev_sectors;
- rdev_for_each(rdev, mddev)
- if (!test_bit(Journal, &rdev->flags))
- rdev->sectors = dev_sectors;
-
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
+ rs_set_rdev_sectors(rs);
return _check_data_dev_sectors(rs);
bad:
@@ -1670,7 +1669,7 @@ bad:
}
/* Setup recovery on @rs */
-static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
{
/* raid0 does not recover */
if (rs_is_raid0(rs))
@@ -1691,22 +1690,6 @@ static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
? MaxSector : dev_sectors;
}
-/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
-static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
-{
- if (!dev_sectors)
- /* New raid set or 'sync' flag provided */
- __rs_setup_recovery(rs, 0);
- else if (dev_sectors == MaxSector)
- /* Prevent recovery */
- __rs_setup_recovery(rs, MaxSector);
- else if (__rdev_sectors(rs) < dev_sectors)
- /* Grown raid set */
- __rs_setup_recovery(rs, __rdev_sectors(rs));
- else
- __rs_setup_recovery(rs, MaxSector);
-}
-
static void do_table_event(struct work_struct *ws)
{
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
@@ -2474,7 +2457,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
- /* Enable bitmap creation for RAID levels != 0 */
+ /* Enable bitmap creation on @rs unless no metadevs or raid0 or journaled raid4/5/6 set. */
mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
@@ -2911,7 +2894,7 @@ static int rs_setup_reshape(struct raid_set *rs)
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
- r = rs_set_dev_and_array_sectors(rs, true);
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
/* Change layout and/or chunk size */
@@ -3008,7 +2991,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
+ sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3067,11 +3050,13 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*
* Any existing superblock will overwrite the array and device sizes
*/
- r = rs_set_dev_and_array_sectors(rs, false);
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
if (r)
goto bad;
- calculated_dev_sectors = rs->md.dev_sectors;
+ /* Memorize just calculated, potentially larger sizes to grow the raid set in preresume */
+ rs->array_sectors = rs->md.array_sectors;
+ rs->dev_sectors = rs->md.dev_sectors;
/*
* Backup any new raid set level, layout, ...
@@ -3084,6 +3069,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
+ /* All in-core metadata now as of current superblocks after calling analyse_superblocks() */
+ sb_array_sectors = rs->md.array_sectors;
rdev_sectors = __rdev_sectors(rs);
if (!rdev_sectors) {
ti->error = "Invalid rdev size";
@@ -3093,8 +3080,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
reshape_sectors = _get_reshape_sectors(rs);
- if (calculated_dev_sectors != rdev_sectors)
- resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
+ if (rs->dev_sectors != rdev_sectors) {
+ resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
+ if (rs->dev_sectors > rdev_sectors - reshape_sectors)
+ set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
+ }
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3121,13 +3111,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_set_new(rs);
} else if (rs_is_recovering(rs)) {
- /* Rebuild particular devices */
- if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- rs_setup_recovery(rs, MaxSector);
- }
/* A recovering raid set may be resized */
- ; /* skip setup rs */
+ goto size_check;
} else if (rs_is_reshaping(rs)) {
/* Have to reject size change request during reshape */
if (resize) {
@@ -3171,6 +3156,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs_setup_recovery(rs, MaxSector);
rs_set_new(rs);
} else if (rs_reshape_requested(rs)) {
+ /* Only request grow on raid set size extensions, not on reshapes. */
+ clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
+
/*
* No need to check for 'ongoing' takeover here, because takeover
* is an instant operation as oposed to an ongoing reshape.
@@ -3201,13 +3189,31 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
rs_set_cur(rs);
} else {
+size_check:
/* May not set recovery when a device rebuild is requested */
if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- rs_setup_recovery(rs, MaxSector);
+ clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- } else
- rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
- 0 : (resize ? calculated_dev_sectors : MaxSector));
+ rs_setup_recovery(rs, MaxSector);
+ } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
+ /*
+ * Set raid set to current size, i.e. size as of
+ * superblocks to grow to larger size in preresume.
+ */
+ r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
+ if (r)
+ goto bad;
+
+ rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
+ } else {
+ /* This is no size change or it is shrinking, update size and record in superblocks */
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
+ if (r)
+ goto bad;
+
+ if (sb_array_sectors > rs->array_sectors)
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ }
rs_set_cur(rs);
}
@@ -3406,10 +3412,9 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
- sector_t resync_max_sectors)
+ enum sync_state state, sector_t resync_max_sectors)
{
sector_t r;
- enum sync_state state;
struct mddev *mddev = &rs->md;
clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3420,8 +3425,6 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- state = decipher_sync_action(mddev, recovery);
-
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
r = mddev->recovery_cp;
else
@@ -3439,18 +3442,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
/*
* In case we are recovering, the array is not in sync
* and health chars should show the recovering legs.
+ *
+ * Already retrieved recovery offset from curr_resync_completed above.
*/
;
- else if (state == st_resync)
- /*
- * If "resync" is occurring, the raid set
- * is or may be out of sync hence the health
- * characters shall be 'a'.
- */
- set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
- else if (state == st_reshape)
+
+ else if (state == st_resync || state == st_reshape)
/*
- * If "reshape" is occurring, the raid set
+ * If "resync/reshape" is occurring, the raid set
* is or may be out of sync hence the health
* characters shall be 'a'.
*/
@@ -3464,22 +3463,22 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
- else {
- struct md_rdev *rdev;
-
+ else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
/*
* We are idle and recovery is needed, prevent 'A' chars race
* caused by components still set to in-sync by constructor.
*/
- if (test_bit(MD_RECOVERY_NEEDED, &recovery))
- set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+ else {
/*
- * The raid set may be doing an initial sync, or it may
- * be rebuilding individual components. If all the
- * devices are In_sync, then it is the raid set that is
- * being initialized.
+ * We are idle and the raid set may be doing an initial
+ * sync, or it may be rebuilding individual components.
+ * If all the devices are In_sync, then it is the raid set
+ * that is being initialized.
*/
+ struct md_rdev *rdev;
+
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
@@ -3512,7 +3511,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
unsigned int rebuild_disks;
unsigned int write_mostly_params = 0;
sector_t progress, resync_max_sectors, resync_mismatches;
- const char *sync_action;
+ enum sync_state state;
struct raid_type *rt;
switch (type) {
@@ -3526,14 +3525,14 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
- recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, recovery, resync_max_sectors);
+ recovery = rs->md.recovery;
+ state = decipher_sync_action(mddev, recovery);
+ progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
@@ -3561,7 +3560,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* See Documentation/admin-guide/device-mapper/dm-raid.rst for
* information on each of these states.
*/
- DMEMIT(" %s", sync_action);
+ DMEMIT(" %s", sync_str(state));
/*
* v1.5.0+:
@@ -3955,11 +3954,22 @@ static int raid_preresume(struct dm_target *ti)
if (r)
return r;
- /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
- mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
- r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
- to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+ /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
+ mddev->array_sectors = rs->array_sectors;
+ mddev->dev_sectors = rs->dev_sectors;
+ rs_set_rdev_sectors(rs);
+ rs_set_capacity(rs);
+ }
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
+ (rs->requested_bitmap_chunk_sectors &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
+ int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
+
+ r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
if (r)
DMERR("Failed to resize bitmap");
}
@@ -3968,8 +3978,10 @@ static int raid_preresume(struct dm_target *ti)
/* Be prepared for mddev_resume() in raid_resume() */
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
+ mddev->resync_max_sectors = mddev->dev_sectors;
}
/* Check for any reshape request unless new raid set */
@@ -4017,7 +4029,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 8547d7594338..63bbcc20f49a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -55,19 +55,6 @@ static void trigger_event(struct work_struct *work)
dm_table_event(sc->ti->table);
}
-static inline struct stripe_c *alloc_context(unsigned int stripes)
-{
- size_t len;
-
- if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
- stripes))
- return NULL;
-
- len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
-
- return kmalloc(len, GFP_KERNEL);
-}
-
/*
* Parse a single <dev> <sector> pair
*/
@@ -142,7 +129,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- sc = alloc_context(stripes);
+ sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
if (!sc) {
ti->error = "Memory allocation for striped context "
"failed";
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 52e049554f5c..2ae0c1913766 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -918,21 +918,15 @@ bool dm_table_supports_dax(struct dm_table *t,
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-struct verify_rq_based_data {
- unsigned sq_count;
- unsigned mq_count;
-};
-
-static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- struct verify_rq_based_data *v = data;
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
- if (queue_is_mq(q))
- v->mq_count++;
- else
- v->sq_count++;
+ /* request-based cannot stack on partitions! */
+ if (bdev != bdev->bd_contains)
+ return false;
return queue_is_mq(q);
}
@@ -941,7 +935,6 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -1045,14 +1038,10 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */
if (!tgt->type->iterate_devices ||
- !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
- if (v.sq_count > 0) {
- DMERR("table load rejected: not all devices are blk-mq request-stackable");
- return -EINVAL;
- }
return 0;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fcd887703f95..5a2c494cb552 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -609,13 +609,12 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
blk_status_t error)
{
struct bio_list bios;
- unsigned long flags;
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
__merge_bio_list(&bios, master);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
}
@@ -623,15 +622,14 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
static void requeue_deferred_cells(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct list_head cells;
struct dm_bio_prison_cell *cell, *tmp;
INIT_LIST_HEAD(&cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice_init(&tc->deferred_cells, &cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
list_for_each_entry_safe(cell, tmp, &cells, user_list)
cell_requeue(pool, cell);
@@ -640,14 +638,13 @@ static void requeue_deferred_cells(struct thin_c *tc)
static void requeue_io(struct thin_c *tc)
{
struct bio_list bios;
- unsigned long flags;
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
__merge_bio_list(&bios, &tc->deferred_bio_list);
__merge_bio_list(&bios, &tc->retry_on_resume_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
requeue_deferred_cells(tc);
@@ -756,7 +753,6 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- unsigned long flags;
if (!bio_triggers_commit(tc, bio)) {
generic_make_request(bio);
@@ -777,9 +773,9 @@ static void issue(struct thin_c *tc, struct bio *bio)
* Batch together any bios that trigger commits and then issue a
* single commit for them in process_deferred_bios().
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_add(&pool->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
@@ -886,12 +882,15 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
{
struct pool *pool = tc->pool;
unsigned long flags;
+ int has_work;
spin_lock_irqsave(&tc->lock, flags);
cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
+ has_work = !bio_list_empty(&tc->deferred_bio_list);
spin_unlock_irqrestore(&tc->lock, flags);
- wake_worker(pool);
+ if (has_work)
+ wake_worker(pool);
}
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
@@ -960,7 +959,6 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- unsigned long flags;
/*
* If the bio has the REQ_FUA flag set we must commit the metadata
@@ -985,9 +983,9 @@ static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
* Batch together any bios that trigger commits and then issue a
* single commit for them in process_deferred_bios().
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_add(&pool->deferred_flush_completions, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
@@ -1226,14 +1224,13 @@ static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
static void process_prepared(struct pool *pool, struct list_head *head,
process_mapping_fn *fn)
{
- unsigned long flags;
struct list_head maps;
struct dm_thin_new_mapping *m, *tmp;
INIT_LIST_HEAD(&maps);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
list_splice_init(head, &maps);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
list_for_each_entry_safe(m, tmp, &maps, list)
(*fn)(m);
@@ -1510,14 +1507,12 @@ static int commit(struct pool *pool)
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
- unsigned long flags;
-
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->low_water_triggered = true;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
dm_table_event(pool->ti->table);
}
}
@@ -1593,11 +1588,10 @@ static void retry_on_resume(struct bio *bio)
{
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
- unsigned long flags;
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->retry_on_resume_list, bio);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
}
static blk_status_t should_error_unserviceable_bio(struct pool *pool)
@@ -2170,7 +2164,6 @@ static void __sort_thin_deferred_bios(struct thin_c *tc)
static void process_thin_deferred_bios(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -2184,10 +2177,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
bio_list_init(&bios);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
if (bio_list_empty(&tc->deferred_bio_list)) {
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
return;
}
@@ -2196,7 +2189,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
bio_list_merge(&bios, &tc->deferred_bio_list);
bio_list_init(&tc->deferred_bio_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -2206,10 +2199,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
* prepared mappings to process.
*/
if (ensure_next_mapping(pool)) {
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->deferred_bio_list, bio);
bio_list_merge(&tc->deferred_bio_list, &bios);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
break;
}
@@ -2264,16 +2257,15 @@ static unsigned sort_cells(struct pool *pool, struct list_head *cells)
static void process_thin_deferred_cells(struct thin_c *tc)
{
struct pool *pool = tc->pool;
- unsigned long flags;
struct list_head cells;
struct dm_bio_prison_cell *cell;
unsigned i, j, count;
INIT_LIST_HEAD(&cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice_init(&tc->deferred_cells, &cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
if (list_empty(&cells))
return;
@@ -2294,9 +2286,9 @@ static void process_thin_deferred_cells(struct thin_c *tc)
for (j = i; j < count; j++)
list_add(&pool->cell_sort_array[j]->user_list, &cells);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_splice(&cells, &tc->deferred_cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
return;
}
@@ -2349,7 +2341,6 @@ static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
static void process_deferred_bios(struct pool *pool)
{
- unsigned long flags;
struct bio *bio;
struct bio_list bios, bio_completions;
struct thin_c *tc;
@@ -2368,13 +2359,13 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&bios);
bio_list_init(&bio_completions);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_list_merge(&bios, &pool->deferred_flush_bios);
bio_list_init(&pool->deferred_flush_bios);
bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
bio_list_init(&pool->deferred_flush_completions);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
@@ -2657,12 +2648,11 @@ static void metadata_operation_failed(struct pool *pool, const char *op, int r)
*/
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
{
- unsigned long flags;
struct pool *pool = tc->pool;
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_add(&tc->deferred_bio_list, bio);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
wake_worker(pool);
}
@@ -2678,13 +2668,12 @@ static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
- unsigned long flags;
struct pool *pool = tc->pool;
throttle_lock(&pool->throttle);
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
list_add_tail(&cell->user_list, &tc->deferred_cells);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
throttle_unlock(&pool->throttle);
wake_worker(pool);
@@ -2810,15 +2799,14 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
static void requeue_bios(struct pool *pool)
{
- unsigned long flags;
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list) {
- spin_lock_irqsave(&tc->lock, flags);
+ spin_lock_irq(&tc->lock);
bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
bio_list_init(&tc->retry_on_resume_list);
- spin_unlock_irqrestore(&tc->lock, flags);
+ spin_unlock_irq(&tc->lock);
}
rcu_read_unlock();
}
@@ -3412,15 +3400,14 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
/*
* As this is a singleton target, ti->begin is always zero.
*/
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
bio_set_dev(bio, pt->data_dev->bdev);
r = DM_MAPIO_REMAPPED;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
return r;
}
@@ -3591,7 +3578,6 @@ static void pool_resume(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
/*
* Must requeue active_thins' bios and then resume
@@ -3600,10 +3586,10 @@ static void pool_resume(struct dm_target *ti)
requeue_bios(pool);
pool_resume_active_thins(pool);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->low_water_triggered = false;
pool->suspended = false;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
do_waker(&pool->waker.work);
}
@@ -3612,11 +3598,10 @@ static void pool_presuspend(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->suspended = true;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
pool_suspend_active_thins(pool);
}
@@ -3625,13 +3610,12 @@ static void pool_presuspend_undo(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- unsigned long flags;
pool_resume_active_thins(pool);
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock_irq(&pool->lock);
pool->suspended = false;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock_irq(&pool->lock);
}
static void pool_postsuspend(struct dm_target *ti)
@@ -4110,11 +4094,10 @@ static void thin_put(struct thin_c *tc)
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irq(&tc->pool->lock);
list_del_rcu(&tc->list);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
synchronize_rcu();
thin_put(tc);
@@ -4150,7 +4133,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
- unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex);
@@ -4244,9 +4226,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex);
- spin_lock_irqsave(&tc->pool->lock, flags);
+ spin_lock_irq(&tc->pool->lock);
if (tc->pool->suspended) {
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
ti->error = "Unable to activate thin device while pool is suspended";
r = -EINVAL;
@@ -4255,7 +4237,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
refcount_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
- spin_unlock_irqrestore(&tc->pool->lock, flags);
+ spin_unlock_irq(&tc->pool->lock);
/*
* This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d06b8aa41e26..7d727a72aa13 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1218,7 +1218,8 @@ bio_copy:
}
} while (bio->bi_iter.bi_size);
- if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks))
+ if (unlikely(bio->bi_opf & REQ_FUA ||
+ wc->uncommitted_blocks >= wc->autocommit_blocks))
writecache_flush(wc);
else
writecache_schedule_autocommit(wc);
@@ -1561,7 +1562,7 @@ static void writecache_writeback(struct work_struct *work)
{
struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
struct blk_plug plug;
- struct wc_entry *f, *g, *e = NULL;
+ struct wc_entry *f, *uninitialized_var(g), *e = NULL;
struct rb_node *node, *next_node;
struct list_head skipped;
struct writeback_list wbl;
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 595a73110e17..22b3cb0050a7 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -554,6 +554,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
+ dmz_check_bdev(zmd->dev);
return ERR_PTR(-EIO);
}
@@ -625,6 +626,8 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
ret = submit_bio_wait(bio);
bio_put(bio);
+ if (ret)
+ dmz_check_bdev(zmd->dev);
return ret;
}
@@ -691,6 +694,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
+ dmz_check_bdev(zmd->dev);
ret = -EIO;
}
nr_mblks_submitted--;
@@ -768,7 +772,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
- goto out;
+ goto err;
}
/*
@@ -778,7 +782,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
ret = dmz_log_dirty_mblocks(zmd, &write_list);
if (ret)
- goto out;
+ goto err;
/*
* The log is on disk. It is now safe to update in place
@@ -786,11 +790,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
if (ret)
- goto out;
+ goto err;
ret = dmz_write_sb(zmd, zmd->mblk_primary);
if (ret)
- goto out;
+ goto err;
while (!list_empty(&write_list)) {
mblk = list_first_entry(&write_list, struct dmz_mblock, link);
@@ -805,16 +809,20 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
zmd->sb_gen++;
out:
- if (ret && !list_empty(&write_list)) {
- spin_lock(&zmd->mblk_lock);
- list_splice(&write_list, &zmd->mblk_dirty_list);
- spin_unlock(&zmd->mblk_lock);
- }
-
dmz_unlock_flush(zmd);
up_write(&zmd->mblk_sem);
return ret;
+
+err:
+ if (!list_empty(&write_list)) {
+ spin_lock(&zmd->mblk_lock);
+ list_splice(&write_list, &zmd->mblk_dirty_list);
+ spin_unlock(&zmd->mblk_lock);
+ }
+ if (!dmz_check_bdev(zmd->dev))
+ ret = -EIO;
+ goto out;
}
/*
@@ -1080,9 +1088,10 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
/*
* Initialize a zone descriptor.
*/
-static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
- struct blk_zone *blkz)
+static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
{
+ struct dmz_metadata *zmd = data;
+ struct dm_zone *zone = &zmd->zones[idx];
struct dmz_dev *dev = zmd->dev;
/* Ignore the eventual last runt (smaller) zone */
@@ -1096,26 +1105,29 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
atomic_set(&zone->refcount, 0);
zone->chunk = DMZ_MAP_UNMAPPED;
- if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ switch (blkz->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
zmd->nr_rnd_zones++;
- } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
- blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
set_bit(DMZ_SEQ, &zone->flags);
- } else
+ break;
+ default:
return -ENXIO;
-
- if (blkz->cond == BLK_ZONE_COND_OFFLINE)
- set_bit(DMZ_OFFLINE, &zone->flags);
- else if (blkz->cond == BLK_ZONE_COND_READONLY)
- set_bit(DMZ_READ_ONLY, &zone->flags);
+ }
if (dmz_is_rnd(zone))
zone->wp_block = 0;
else
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
- if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
+ else {
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
@@ -1139,23 +1151,13 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
}
/*
- * The size of a zone report in number of zones.
- * This results in 4096*64B=256KB report zones commands.
- */
-#define DMZ_REPORT_NR_ZONES 4096
-
-/*
* Allocate and initialize zone descriptors using the zone
* information from disk.
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
struct dmz_dev *dev = zmd->dev;
- struct dm_zone *zone;
- struct blk_zone *blkz;
- unsigned int nr_blkz;
- sector_t sector = 0;
- int i, ret = 0;
+ int ret;
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
@@ -1169,54 +1171,38 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
dmz_dev_info(dev, "Using %zu B for zone information",
sizeof(struct dm_zone) * dev->nr_zones);
- /* Get zone information */
- nr_blkz = DMZ_REPORT_NR_ZONES;
- blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
- if (!blkz) {
- ret = -ENOMEM;
- goto out;
- }
-
/*
- * Get zone information and initialize zone descriptors.
- * At the same time, determine where the super block
- * should be: first block of the first randomly writable
- * zone.
+ * Get zone information and initialize zone descriptors. At the same
+ * time, determine where the super block should be: first block of the
+ * first randomly writable zone.
*/
- zone = zmd->zones;
- while (sector < dev->capacity) {
- /* Get zone information */
- nr_blkz = DMZ_REPORT_NR_ZONES;
- ret = blkdev_report_zones(dev->bdev, sector, blkz, &nr_blkz);
- if (ret) {
- dmz_dev_err(dev, "Report zones failed %d", ret);
- goto out;
- }
+ ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
+ zmd);
+ if (ret < 0) {
+ dmz_drop_zones(zmd);
+ return ret;
+ }
- if (!nr_blkz)
- break;
+ return 0;
+}
- /* Process report */
- for (i = 0; i < nr_blkz; i++) {
- ret = dmz_init_zone(zmd, zone, &blkz[i]);
- if (ret)
- goto out;
- sector += dev->zone_nr_sectors;
- zone++;
- }
- }
+static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
+ void *data)
+{
+ struct dm_zone *zone = data;
- /* The entire zone configuration of the disk should now be known */
- if (sector < dev->capacity) {
- dmz_dev_err(dev, "Failed to get correct zone information");
- ret = -ENXIO;
- }
-out:
- kfree(blkz);
- if (ret)
- dmz_drop_zones(zmd);
+ clear_bit(DMZ_OFFLINE, &zone->flags);
+ clear_bit(DMZ_READ_ONLY, &zone->flags);
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
- return ret;
+ if (dmz_is_seq(zone))
+ zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
+ else
+ zone->wp_block = 0;
+ return 0;
}
/*
@@ -1224,9 +1210,7 @@ out:
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- unsigned int nr_blkz = 1;
unsigned int noio_flag;
- struct blk_zone blkz;
int ret;
/*
@@ -1236,29 +1220,19 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz);
+ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
+ dmz_update_zone_cb, zone);
memalloc_noio_restore(noio_flag);
- if (!nr_blkz)
+
+ if (ret == 0)
ret = -EIO;
- if (ret) {
+ if (ret < 0) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
+ dmz_check_bdev(zmd->dev);
return ret;
}
- clear_bit(DMZ_OFFLINE, &zone->flags);
- clear_bit(DMZ_READ_ONLY, &zone->flags);
- if (blkz.cond == BLK_ZONE_COND_OFFLINE)
- set_bit(DMZ_OFFLINE, &zone->flags);
- else if (blkz.cond == BLK_ZONE_COND_READONLY)
- set_bit(DMZ_READ_ONLY, &zone->flags);
-
- if (dmz_is_seq(zone))
- zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
- else
- zone->wp_block = 0;
-
return 0;
}
@@ -1312,9 +1286,9 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
struct dmz_dev *dev = zmd->dev;
- ret = blkdev_reset_zones(dev->bdev,
- dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
+ dmz_start_sect(zmd, zone),
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index d240d7ca8a8a..e7ace908a9b7 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -82,6 +82,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
dmz_id(zmd, zone), (unsigned long long)wp_block,
(unsigned long long)block, nr_blocks, ret);
+ dmz_check_bdev(zrc->dev);
return ret;
}
@@ -489,12 +490,7 @@ static void dmz_reclaim_work(struct work_struct *work)
ret = dmz_do_reclaim(zrc);
if (ret) {
dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
- if (ret == -EIO)
- /*
- * LLD might be performing some error handling sequence
- * at the underlying device. To not interfere, do not
- * attempt to schedule the next reclaim run immediately.
- */
+ if (!dmz_check_bdev(zrc->dev))
return;
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index d3bcc4197f5d..4574e0dedbd6 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -80,6 +80,8 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
+ if (bio->bi_status != BLK_STS_OK)
+ bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
@@ -565,32 +567,52 @@ out:
}
/*
- * Check the backing device availability. If it's on the way out,
+ * Check if the backing device is being removed. If it's on the way out,
* start failing I/O. Reclaim and metadata components also call this
* function to cleanly abort operation in the event of such failure.
*/
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
{
- struct gendisk *disk;
+ if (dmz_dev->flags & DMZ_BDEV_DYING)
+ return true;
- if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
- disk = dmz_dev->bdev->bd_disk;
- if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
- dmz_dev_warn(dmz_dev, "Backing device queue dying");
- dmz_dev->flags |= DMZ_BDEV_DYING;
- } else if (disk->fops->check_events) {
- if (disk->fops->check_events(disk, 0) &
- DISK_EVENT_MEDIA_CHANGE) {
- dmz_dev_warn(dmz_dev, "Backing device offline");
- dmz_dev->flags |= DMZ_BDEV_DYING;
- }
- }
+ if (dmz_dev->flags & DMZ_CHECK_BDEV)
+ return !dmz_check_bdev(dmz_dev);
+
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
}
return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
+ * Check the backing device availability. This detects such events as
+ * backing device going offline due to errors, media removals, etc.
+ * This check is less efficient than dmz_bdev_is_dying() and should
+ * only be performed as a part of error handling.
+ */
+bool dmz_check_bdev(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ dmz_dev->flags &= ~DMZ_CHECK_BDEV;
+
+ if (dmz_bdev_is_dying(dmz_dev))
+ return false;
+
+ disk = dmz_dev->bdev->bd_disk;
+ if (disk->fops->check_events &&
+ disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+
+ return !(dmz_dev->flags & DMZ_BDEV_DYING);
+}
+
+/*
* Process a new BIO.
*/
static int dmz_map(struct dm_target *ti, struct bio *bio)
@@ -902,8 +924,8 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
- if (dmz_bdev_is_dying(dmz->dev))
- return -ENODEV;
+ if (!dmz_check_bdev(dmz->dev))
+ return -EIO;
*bdev = dmz->dev->bdev;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index d8e70b0ade35..5b5e493d479c 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -72,6 +72,7 @@ struct dmz_dev {
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
+#define DMZ_CHECK_BDEV (2 << 0)
/*
* Zone descriptor.
@@ -255,5 +256,6 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
* Functions defined in dm-zoned-target.c
*/
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+bool dmz_check_bdev(struct dmz_dev *dmz_dev);
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1a5e328c443a..e8f9661a10a1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -440,14 +440,48 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
+{
+ struct dm_report_zones_args *args = data;
+ sector_t sector_diff = args->tgt->begin - args->start;
+
+ /*
+ * Ignore zones beyond the target range.
+ */
+ if (zone->start >= args->start + args->tgt->len)
+ return 0;
+
+ /*
+ * Remap the start sector and write pointer position of the zone
+ * to match its position in the target range.
+ */
+ zone->start += sector_diff;
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp += sector_diff;
+ }
+
+ args->next_sector = zone->start + zone->len;
+ return args->orig_cb(zone, args->zone_idx++, args->orig_data);
+}
+EXPORT_SYMBOL_GPL(dm_report_zones_cb);
+
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
-#ifdef CONFIG_BLK_DEV_ZONED
struct mapped_device *md = disk->private_data;
- struct dm_target *tgt;
struct dm_table *map;
int srcu_idx, ret;
+ struct dm_report_zones_args args = {
+ .next_sector = sector,
+ .orig_data = data,
+ .orig_cb = cb,
+ };
if (dm_suspended_md(md))
return -EAGAIN;
@@ -456,38 +490,30 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
if (!map)
return -EIO;
- tgt = dm_table_find_target(map, sector);
- if (!tgt) {
- ret = -EIO;
- goto out;
- }
+ do {
+ struct dm_target *tgt;
- /*
- * If we are executing this, we already know that the block device
- * is a zoned device and so each target should have support for that
- * type of drive. A missing report_zones method means that the target
- * driver has a problem.
- */
- if (WARN_ON(!tgt->type->report_zones)) {
- ret = -EIO;
- goto out;
- }
+ tgt = dm_table_find_target(map, args.next_sector);
+ if (WARN_ON_ONCE(!tgt->type->report_zones)) {
+ ret = -EIO;
+ goto out;
+ }
- /*
- * blkdev_report_zones() will loop and call this again to cover all the
- * zones of the target, eventually moving on to the next target.
- * So there is no need to loop here trying to fill the entire array
- * of zones.
- */
- ret = tgt->type->report_zones(tgt, sector, zones, nr_zones);
+ args.tgt = tgt;
+ ret = tgt->type->report_zones(tgt, &args, nr_zones);
+ if (ret < 0)
+ goto out;
+ } while (args.zone_idx < nr_zones &&
+ args.next_sector < get_capacity(disk));
+ ret = args.zone_idx;
out:
dm_put_live_table(md, srcu_idx);
return ret;
-#else
- return -ENOTSUPP;
-#endif
}
+#else
+#define dm_blk_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
@@ -1174,7 +1200,8 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
+ * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
+ * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1212,54 +1239,6 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
-/*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the underlying device of the target. The zone
- * descriptors must be remapped to match their position within the dm device.
- * The caller target should obtain the zones information using
- * blkdev_report_zones() to ensure that remapping for partition offset is
- * already handled.
- */
-void dm_remap_zone_report(struct dm_target *ti, sector_t start,
- struct blk_zone *zones, unsigned int *nr_zones)
-{
-#ifdef CONFIG_BLK_DEV_ZONED
- struct blk_zone *zone;
- unsigned int nrz = *nr_zones;
- int i;
-
- /*
- * Remap the start sector and write pointer position of the zones in
- * the array. Since we may have obtained from the target underlying
- * device more zones that the target size, also adjust the number
- * of zones.
- */
- for (i = 0; i < nrz; i++) {
- zone = zones + i;
- if (zone->start >= start + ti->len) {
- memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
- break;
- }
-
- zone->start = zone->start + ti->begin - start;
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- continue;
-
- if (zone->cond == BLK_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
- else if (zone->cond == BLK_ZONE_COND_EMPTY)
- zone->wp = zone->start;
- else
- zone->wp = zone->wp + ti->begin - start;
- }
-
- *nr_zones = i;
-#else /* !CONFIG_BLK_DEV_ZONED */
- *nr_zones = 0;
-#endif
-}
-EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
@@ -1627,7 +1606,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
- } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
+ } else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
ci.sector_count = 0;
error = __split_and_process_non_flush(&ci);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b092c7b5282f..3ad18246fcb3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2139,6 +2139,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
sizeof(bitmap_super_t));
+ spin_lock_irq(&bitmap->counts.lock);
md_bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
@@ -2154,7 +2155,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
blocks = min(old_counts.chunks << old_counts.chunkshift,
chunks << chunkshift);
- spin_lock_irq(&bitmap->counts.lock);
/* For cluster raid, need to pre-allocate bitmap */
if (mddev_is_clustered(bitmap->mddev)) {
unsigned long page;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index c766c559d36d..26c75c0199fa 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -244,10 +244,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
sector_t start_sector, end_sector, data_offset;
sector_t bio_sector = bio->bi_iter.bi_sector;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 6780938d2991..152f9e65a226 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -104,10 +104,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1be7abeb24fd..805b33e27496 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -550,7 +550,13 @@ static void md_submit_flush_data(struct work_struct *ws)
}
}
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+/*
+ * Manages consolidation of flushes and submitting any flushes needed for
+ * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
+ * being finished in another context. Returns false if the flushing is
+ * complete but still needs the I/O portion of the bio to be processed.
+ */
+bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
ktime_t start = ktime_get_boottime();
spin_lock_irq(&mddev->lock);
@@ -575,9 +581,10 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
else {
bio->bi_opf &= ~REQ_PREFLUSH;
- mddev->pers->make_request(mddev, bio);
+ return false;
}
}
+ return true;
}
EXPORT_SYMBOL(md_flush_request);
@@ -1098,6 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
+ bool spare_disk = true;
/*
* Calculate the position of the superblock (512byte sectors),
@@ -1148,8 +1156,18 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
else
rdev->desc_nr = sb->this_disk.number;
+ /* not spare disk, or LEVEL_MULTIPATH */
+ if (sb->level == LEVEL_MULTIPATH ||
+ (rdev->desc_nr >= 0 &&
+ sb->disks[rdev->desc_nr].state &
+ ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
+ spare_disk = false;
+
if (!refdev) {
- ret = 1;
+ if (!spare_disk)
+ ret = 1;
+ else
+ ret = 0;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
@@ -1165,7 +1183,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
- if (ev1 > ev2)
+
+ if (!spare_disk && ev1 > ev2)
ret = 1;
else
ret = 0;
@@ -1525,6 +1544,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
+ bool spare_disk = true;
/*
* Calculate the position of the superblock in 512byte sectors.
@@ -1658,8 +1678,19 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != 0)
return -EINVAL;
+ /* not spare disk, or LEVEL_MULTIPATH */
+ if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
+ (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
+ spare_disk = false;
+
if (!refdev) {
- ret = 1;
+ if (!spare_disk)
+ ret = 1;
+ else
+ ret = 0;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
@@ -1676,7 +1707,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
- if (ev1 > ev2)
+ if (!spare_disk && ev1 > ev2)
ret = 1;
else
ret = 0;
@@ -3597,7 +3628,7 @@ abort_free:
* Check a full RAID array for plausibility
*/
-static void analyze_sbs(struct mddev *mddev)
+static int analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
@@ -3618,6 +3649,12 @@ static void analyze_sbs(struct mddev *mddev)
md_kick_rdev_from_array(rdev);
}
+ /* Cannot find a valid fresh disk */
+ if (!freshest) {
+ pr_warn("md: cannot find a valid disk\n");
+ return -EINVAL;
+ }
+
super_types[mddev->major_version].
validate_super(mddev, freshest);
@@ -3652,6 +3689,8 @@ static void analyze_sbs(struct mddev *mddev)
clear_bit(In_sync, &rdev->flags);
}
}
+
+ return 0;
}
/* Read a fixed-point number.
@@ -5570,7 +5609,9 @@ int md_run(struct mddev *mddev)
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
- analyze_sbs(mddev);
+ err = analyze_sbs(mddev);
+ if (err)
+ return -EINVAL;
}
if (mddev->level != LEVEL_NONE)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c5e3ff398b59..5f86f8adb0a4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -550,7 +550,7 @@ struct md_personality
int level;
struct list_head list;
struct module *owner;
- bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
/*
* start up works that do NOT require md_thread. tasks that
* requires md_thread should go into start()
@@ -703,7 +703,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
extern int mddev_congested(struct mddev *mddev, int bits);
-extern void md_flush_request(struct mddev *mddev, struct bio *bio);
+extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern int md_super_wait(struct mddev *mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 1e772287b1c8..b7c20979bd19 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -575,10 +575,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
unsigned chunk_sects;
unsigned sectors;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
raid0_handle_discard(mddev, bio);
@@ -615,7 +614,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
tmp_dev = map_sector(mddev, zone, sector, &sector);
break;
default:
- WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
+ WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
bio_io_error(bio);
return true;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0466ee2453b4..a409ab6f30bc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -819,6 +819,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
else
generic_make_request(bio);
bio = next;
+ cond_resched();
}
}
@@ -1567,10 +1568,9 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
{
sector_t sectors;
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
/*
* There is a limit to the maximum size, but
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 299c7b1c9718..ec136e44aef7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_pages:
while (--j >= 0)
- resync_free_pages(&rps[j * 2]);
+ resync_free_pages(&rps[j]);
j = 0;
out_free_bio:
@@ -1525,10 +1525,9 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
int chunk_sects = chunk_mask + 1;
int sectors = bio_sectors(bio);
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
- md_flush_request(mddev, bio);
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
return true;
- }
if (!md_write_start(mddev, bio))
return false;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 18a4064a61a8..cab5b1352892 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1404,7 +1404,7 @@ int ppl_init_log(struct r5conf *conf)
atomic64_set(&ppl_conf->seq, 0);
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
- ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET;
+ ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
if (!mddev->external) {
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 223e97ab27e6..f0fc538bfe59 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1134,7 +1134,7 @@ again:
bi->bi_iter.bi_size = STRIPE_SIZE;
bi->bi_write_hint = sh->dev[i].write_hint;
if (!rrdev)
- sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
+ sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -1187,7 +1187,7 @@ again:
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_iter.bi_size = STRIPE_SIZE;
rbi->bi_write_hint = sh->dev[i].write_hint;
- sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET;
+ sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -5592,8 +5592,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (ret == 0)
return true;
if (ret == -ENODEV) {
- md_flush_request(mddev, bi);
- return true;
+ if (md_flush_request(mddev, bi))
+ return true;
}
/* ret == -EAGAIN, fallback */
/*
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 5ef7daeb8cbd..9340435a94a0 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -319,6 +319,8 @@ static void cec_post_state_event(struct cec_adapter *adap)
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event(adap, &ev);
}
@@ -1976,7 +1978,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Play function, this message can have variable length
* depending on the specific play function that is used.
*/
- case 0x60:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
if (msg->len == 2)
rc_keydown(adap->rc, RC_PROTO_CEC,
msg->msg[2], 0);
@@ -1993,8 +1995,12 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* For the time being these messages are not processed by the
* framework and are simply forwarded to the user space.
*/
- case 0x56: case 0x57:
- case 0x67: case 0x68: case 0x69: case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
break;
default:
rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 12d676484472..17d1cb2e5f97 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -187,6 +187,21 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return 0;
}
+static long cec_adap_g_connector_info(struct cec_adapter *adap,
+ struct cec_log_addrs __user *parg)
+{
+ int ret = 0;
+
+ if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
+ return -ENOTTY;
+
+ mutex_lock(&adap->lock);
+ if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
+ ret = -EFAULT;
+ mutex_unlock(&adap->lock);
+ return ret;
+}
+
static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_msg __user *parg)
{
@@ -506,6 +521,9 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case CEC_ADAP_S_LOG_ADDRS:
return cec_adap_s_log_addrs(adap, fh, block, parg);
+ case CEC_ADAP_G_CONNECTOR_INFO:
+ return cec_adap_g_connector_info(adap, parg);
+
case CEC_TRANSMIT:
return cec_transmit(adap, fh, block, parg);
@@ -578,6 +596,8 @@ static int cec_open(struct inode *inode, struct file *filp)
/* Queue up initial state events */
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event_fh(fh, &ev, 0);
#ifdef CONFIG_CEC_PIN
if (adap->pin && adap->pin->ops->read_hpd) {
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 9c610e1e99b8..db7adffcdc76 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -257,11 +257,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
struct cec_adapter *adap;
int res;
- /*
- * Disable this capability until the connector info public API
- * is ready.
- */
- caps &= ~CEC_CAP_CONNECTOR_INFO;
#ifndef CONFIG_MEDIA_CEC_RC
caps &= ~CEC_CAP_RC;
#endif
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 8f987bc0dd88..660fe111f540 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -1279,6 +1279,15 @@ static void cec_pin_adap_free(struct cec_adapter *adap)
kfree(pin);
}
+static int cec_pin_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct cec_pin *pin = adap->pin;
+
+ if (pin->ops->received)
+ return pin->ops->received(adap, msg);
+ return -ENOMSG;
+}
+
void cec_pin_changed(struct cec_adapter *adap, bool value)
{
struct cec_pin *pin = adap->pin;
@@ -1301,6 +1310,7 @@ static const struct cec_adap_ops cec_pin_adap_ops = {
.error_inj_parse_line = cec_pin_error_inj_parse_line,
.error_inj_show = cec_pin_error_inj_show,
#endif
+ .received = cec_pin_received,
};
struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 0ba51dacc580..c1511094fdc7 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -230,8 +230,8 @@ static char *siano_msgs[] = {
[MSG_SMS_FLASH_DL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FLASH_DL_REQ",
[MSG_SMS_EXEC_TEST_1_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_REQ",
[MSG_SMS_EXEC_TEST_1_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_RES",
- [MSG_SMS_ENBALE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_REQ",
- [MSG_SMS_ENBALE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_RES",
+ [MSG_SMS_ENABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_REQ",
+ [MSG_SMS_ENABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_RES",
[MSG_SMS_SPI_SET_BUS_WIDTH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_REQ",
[MSG_SMS_SPI_SET_BUS_WIDTH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_RES",
[MSG_SMS_SEND_EMM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_REQ",
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index a2f95f4899c2..b3b793b5caf3 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -434,8 +434,8 @@ enum msg_types {
MSG_SMS_FLASH_DL_REQ = 732,
MSG_SMS_EXEC_TEST_1_REQ = 734,
MSG_SMS_EXEC_TEST_1_RES = 735,
- MSG_SMS_ENBALE_TS_INTERFACE_REQ = 736,
- MSG_SMS_ENBALE_TS_INTERFACE_RES = 737,
+ MSG_SMS_ENABLE_TS_INTERFACE_REQ = 736,
+ MSG_SMS_ENABLE_TS_INTERFACE_RES = 737,
MSG_SMS_SPI_SET_BUS_WIDTH_REQ = 738,
MSG_SMS_SPI_SET_BUS_WIDTH_RES = 739,
MSG_SMS_SEND_EMM_REQ = 740,
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index b2c54c256e86..ada41d5c4e83 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
*
* Siano Mobile Silicon, Inc.
* MDTV receiver kernel modules.
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 5a9ba3846f0a..e652f4318284 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -49,8 +49,11 @@ module_param(debug, int, 0644);
V4L2_BUF_FLAG_REQUEST_FD | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
-#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
- V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
+ V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | \
+ V4L2_BUF_FLAG_TIMECODE | \
+ V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
/*
* __verify_planes_array() - verify that the planes array passed in struct
@@ -194,6 +197,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
vbuf->sequence = 0;
vbuf->request_fd = -1;
+ vbuf->is_held = false;
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
switch (b->memory) {
@@ -321,6 +325,8 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
*/
vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
vbuf->field = b->field;
+ if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
} else {
/* Zero any output buffer flags as this is a capture buffer */
vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
@@ -654,6 +660,8 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 6f7eedb4c00e..0ba382948c51 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -298,7 +298,7 @@ int cxd2820r_sleep_c(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index d56c6f788196..fbdfa6bf38dc 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -392,7 +392,7 @@ int cxd2820r_sleep_t(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index f924a80b968a..34ef2bb2de34 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -386,7 +386,7 @@ int cxd2820r_sleep_t2(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 1b30cf570803..758c95bc3b11 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -60,6 +60,7 @@ struct cxd2841er_priv {
enum cxd2841er_xtal xtal;
enum fe_caps caps;
u32 flags;
+ unsigned long stats_time;
};
static const struct cxd2841er_cnr_data s_cn_data[] = {
@@ -3279,9 +3280,15 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
if (status & FE_HAS_LOCK) {
+ if (priv->stats_time &&
+ (!time_after(jiffies, priv->stats_time)))
+ return 0;
+
+ /* Prevent retrieving stats faster than once per second */
+ priv->stats_time = jiffies + msecs_to_jiffies(1000);
+
cxd2841er_read_snr(fe);
cxd2841er_read_ucblocks(fe);
-
cxd2841er_read_ber(fe);
} else {
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
@@ -3360,6 +3367,9 @@ done:
p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* Reset the wait for jiffies logic */
+ priv->stats_time = 0;
+
return ret;
}
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 2f5af4813a74..ac7be872f460 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4201,7 +4201,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
u16 *data, u32 flags)
{
u8 buf[2] = { 0 };
- int rc = -EIO;
+ int rc;
u16 word = 0;
if (!data)
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 4e50441c247a..a7faf0cf8788 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -517,7 +517,7 @@ static void mb86a20s_reset_frontend_cache(struct dvb_frontend *fe)
* Estimates the bit rate using the per-segment bit rate given by
* ABNT/NBR 15601 spec (table 4).
*/
-static u32 isdbt_rate[3][5][4] = {
+static const u32 isdbt_rate[3][5][4] = {
{ /* DQPSK/QPSK */
{ 280850, 312060, 330420, 340430 }, /* 1/2 */
{ 374470, 416080, 440560, 453910 }, /* 2/3 */
@@ -539,13 +539,9 @@ static u32 isdbt_rate[3][5][4] = {
}
};
-static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
- u32 modulation, u32 forward_error_correction,
- u32 guard_interval,
- u32 segment)
+static u32 isdbt_layer_min_bitrate(struct dtv_frontend_properties *c,
+ u32 layer)
{
- struct mb86a20s_state *state = fe->demodulator_priv;
- u32 rate;
int mod, fec, guard;
/*
@@ -553,7 +549,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
* to consider the lowest bit rate, to avoid taking too long time
* to get BER.
*/
- switch (modulation) {
+ switch (c->layer[layer].modulation) {
case DQPSK:
case QPSK:
default:
@@ -567,7 +563,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (forward_error_correction) {
+ switch (c->layer[layer].fec) {
default:
case FEC_1_2:
case FEC_AUTO:
@@ -587,7 +583,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (guard_interval) {
+ switch (c->guard_interval) {
default:
case GUARD_INTERVAL_1_4:
guard = 0;
@@ -603,29 +599,14 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- /* Samples BER at BER_SAMPLING_RATE seconds */
- rate = isdbt_rate[mod][fec][guard] * segment * BER_SAMPLING_RATE;
-
- /* Avoids sampling too quickly or to overflow the register */
- if (rate < 256)
- rate = 256;
- else if (rate > (1 << 24) - 1)
- rate = (1 << 24) - 1;
-
- dev_dbg(&state->i2c->dev,
- "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
- __func__, 'A' + layer,
- segment * isdbt_rate[mod][fec][guard]/1000,
- rate, rate);
-
- state->estimated_rate[layer] = rate;
+ return isdbt_rate[mod][fec][guard] * c->layer[layer].segment_count;
}
static int mb86a20s_get_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int layer, rc;
+ int layer, rc, rate, counter;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
@@ -676,10 +657,21 @@ static int mb86a20s_get_frontend(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev, "%s: interleaving %d.\n",
__func__, rc);
c->layer[layer].interleaving = rc;
- mb86a20s_layer_bitrate(fe, layer, c->layer[layer].modulation,
- c->layer[layer].fec,
- c->guard_interval,
- c->layer[layer].segment_count);
+
+ rate = isdbt_layer_min_bitrate(c, layer);
+ counter = rate * BER_SAMPLING_RATE;
+
+ /* Avoids sampling too quickly or to overflow the register */
+ if (counter < 256)
+ counter = 256;
+ else if (counter > (1 << 24) - 1)
+ counter = (1 << 24) - 1;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
+ __func__, 'A' + layer, rate / 1000, counter, counter);
+
+ state->estimated_rate[layer] = counter;
}
rc = mb86a20s_writereg(state, 0x6d, 0x84);
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index 7cae7d632030..d43a67045dbe 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -135,11 +135,6 @@ static inline int mt312_writereg(struct mt312_state *state,
return mt312_write(state, reg, &tmp, 1);
}
-static inline u32 mt312_div(u32 a, u32 b)
-{
- return (a + (b / 2)) / b;
-}
-
static int mt312_reset(struct mt312_state *state, const u8 full)
{
return mt312_writereg(state, RESET, full ? 0x80 : 0x40);
@@ -187,7 +182,7 @@ static int mt312_get_symbol_rate(struct mt312_state *state, u32 *sr)
monitor = (buf[0] << 8) | buf[1];
dprintk("sr(auto) = %u\n",
- mt312_div(monitor * 15625, 4));
+ DIV_ROUND_CLOSEST(monitor * 15625, 4));
} else {
ret = mt312_writereg(state, MON_CTRL, 0x05);
if (ret < 0)
@@ -291,10 +286,10 @@ static int mt312_initfe(struct dvb_frontend *fe)
}
/* SYS_CLK */
- buf[0] = mt312_div(state->xtal * state->freq_mult * 2, 1000000);
+ buf[0] = DIV_ROUND_CLOSEST(state->xtal * state->freq_mult * 2, 1000000);
/* DISEQC_RATIO */
- buf[1] = mt312_div(state->xtal, 22000 * 4);
+ buf[1] = DIV_ROUND_CLOSEST(state->xtal, 22000 * 4);
ret = mt312_write(state, SYS_CLK, buf, sizeof(buf));
if (ret < 0)
@@ -610,7 +605,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
}
/* sr = (u16)(sr * 256.0 / 1000000.0) */
- sr = mt312_div(p->symbol_rate * 4, 15625);
+ sr = DIV_ROUND_CLOSEST(p->symbol_rate * 4, 15625);
/* SYM_RATE */
buf[0] = (sr >> 8) & 0x3f;
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 50dccb394efa..ecd21adf8950 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -9,38 +9,43 @@
#define SI2168_H
#include <linux/dvb/frontend.h>
-/*
- * I2C address
- * 0x64
+/**
+ * struct si2168_config - configuration parameters for si2168
+ *
+ * @fe:
+ * frontend returned by driver
+ * @i2c_adapter:
+ * tuner I2C adapter returned by driver
+ * @ts_mode:
+ * Transport Stream mode. Can be:
+ * - %SI2168_TS_PARALLEL
+ * - %SI2168_TS_SERIAL
+ * - %SI2168_TS_TRISTATE
+ * - %SI2168_TS_CLK_MANUAL
+ * @ts_clock_inv:
+ * TS clock inverted
+ * @ts_clock_gapped:
+ * TS clock gapped
+ * @spectral_inversion:
+ * Inverted spectrum
+ *
+ * Note:
+ * The I2C address of this demod is 0x64.
*/
struct si2168_config {
- /*
- * frontend
- * returned by driver
- */
struct dvb_frontend **fe;
-
- /*
- * tuner I2C adapter
- * returned by driver
- */
struct i2c_adapter **i2c_adapter;
- /* TS mode */
#define SI2168_TS_PARALLEL 0x06
#define SI2168_TS_SERIAL 0x03
#define SI2168_TS_TRISTATE 0x00
#define SI2168_TS_CLK_MANUAL 0x20
u8 ts_mode;
- /* TS clock inverted */
- bool ts_clock_inv;
-
- /* TS clock gapped */
- bool ts_clock_gapped;
-
- /* Inverted spectrum */
- bool spectral_inversion;
+ /* Flags */
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
#endif
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 804d5b30c697..18bea5222082 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -34,12 +34,12 @@ struct si2168_dev {
unsigned int chip_id;
unsigned int version;
const char *firmware_name;
- bool active;
- bool warm;
u8 ts_mode;
- bool ts_clock_inv;
- bool ts_clock_gapped;
- bool spectral_inversion;
+ unsigned int active:1;
+ unsigned int warm:1;
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
/* firmware command struct */
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 849d63dbc279..e83836b29715 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -685,10 +685,33 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
p += new_msgs[j].len;
}
- if (i < num)
+ if (i < num) {
ret = -ENOMEM;
- else
+ } else if (!state->cfg.split_tuner_read_i2c || rd_num == 0) {
ret = i2c_transfer(state->i2c_client->adapter, new_msgs, j);
+ } else {
+ /*
+ * Split transactions at each I2C_M_RD message.
+ * Some of the parent device require this,
+ * such as Friio (see. dvb-usb-gl861).
+ */
+ int from, to;
+
+ ret = 0;
+ from = 0;
+ do {
+ int r;
+
+ to = from + 1;
+ while (to < j && !(new_msgs[to].flags & I2C_M_RD))
+ to++;
+ r = i2c_transfer(state->i2c_client->adapter,
+ &new_msgs[from], to - from);
+ ret = (r <= 0) ? r : ret + r;
+ from = to;
+ } while (from < j && ret > 0);
+ }
+
if (ret >= 0 && ret < j)
ret = -EIO;
kfree(new_msgs);
diff --git a/drivers/media/dvb-frontends/tc90522.h b/drivers/media/dvb-frontends/tc90522.h
index ac0e2ab51924..07e3813bf590 100644
--- a/drivers/media/dvb-frontends/tc90522.h
+++ b/drivers/media/dvb-frontends/tc90522.h
@@ -28,6 +28,9 @@ struct tc90522_config {
/* [OUT] tuner I2C adapter returned by driver */
struct i2c_adapter *tuner_i2c;
+
+ /* [IN] use two separate I2C transactions for one tuner read */
+ bool split_tuner_read_i2c;
};
#endif /* TC90522_H */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 7eee1812bba3..c68e002d26ea 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -566,10 +566,23 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+if MEDIA_CAMERA_SUPPORT
+
+config VIDEO_HI556
+ tristate "Hynix Hi-556 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-556 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi556.
+
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
@@ -581,7 +594,6 @@ config VIDEO_IMX214
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -592,16 +604,25 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
+config VIDEO_IMX290
+ tristate "Sony IMX290 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX290 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx290.
+
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX319 camera.
@@ -612,7 +633,6 @@ config VIDEO_IMX319
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX355 camera.
@@ -623,7 +643,6 @@ config VIDEO_IMX355
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
@@ -633,8 +652,7 @@ config VIDEO_OV2640
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
- depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
+ depends on VIDEO_V4L2 && I2C && GPIOLIB
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -646,7 +664,6 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -658,7 +675,6 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -671,7 +687,6 @@ config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Omnivision
@@ -681,7 +696,6 @@ config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -693,7 +707,6 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -705,7 +718,6 @@ config VIDEO_OV5647
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
@@ -716,7 +728,6 @@ config VIDEO_OV6650
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -729,7 +740,6 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -742,7 +752,7 @@ config VIDEO_OV5675
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
@@ -753,7 +763,6 @@ config VIDEO_OV5695
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -765,7 +774,6 @@ config VIDEO_OV7251
config VIDEO_OV772X
tristate "OmniVision OV772x sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -777,7 +785,6 @@ config VIDEO_OV772X
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -788,7 +795,6 @@ config VIDEO_OV7640
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -798,7 +804,6 @@ config VIDEO_OV7670
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
@@ -806,7 +811,6 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -833,7 +837,6 @@ config VIDEO_OV9650
config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -842,7 +845,6 @@ config VIDEO_OV13858
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the ST VS6624
camera.
@@ -853,7 +855,6 @@ config VIDEO_VS6624
config VIDEO_MT9M001
tristate "mt9m001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports MT9M001 cameras from Micron, monochrome
and colour models.
@@ -861,7 +862,6 @@ config VIDEO_MT9M001
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This driver supports MT9M032 camera sensors from Aptina, monochrome
@@ -878,7 +878,6 @@ config VIDEO_MT9M111
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This is a Video4Linux2 sensor driver for the Aptina
@@ -887,7 +886,6 @@ config VIDEO_MT9P031
config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -895,7 +893,6 @@ config VIDEO_MT9T001
config VIDEO_MT9T112
tristate "Aptina MT9T111/MT9T112 support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
@@ -906,7 +903,6 @@ config VIDEO_MT9T112
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
@@ -915,7 +911,6 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -925,7 +920,6 @@ config VIDEO_MT9V032
config VIDEO_MT9V111
tristate "Aptina MT9V111 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina/Micron
MT9V111 sensor.
@@ -936,14 +930,12 @@ config VIDEO_MT9V111
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports SR030PC30 VGA camera from Siliconfile
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports NOON010PC30 CIF camera from Siliconfile
@@ -952,7 +944,6 @@ source "drivers/media/i2c/m5mols/Kconfig"
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
sensor.
@@ -962,7 +953,6 @@ config VIDEO_RJ54N1
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
@@ -970,7 +960,6 @@ config VIDEO_S5K6AA
config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
@@ -1002,12 +991,15 @@ config VIDEO_S5C73M3
help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
+endif
comment "Lens drivers"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on GPIOLIB && I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
@@ -1042,12 +1034,15 @@ config VIDEO_DW9807_VCM
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+endif
+
comment "Flash devices"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -1055,7 +1050,6 @@ config VIDEO_ADP1653
config VIDEO_LM3560
tristate "LM3560 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3560 dual flash controllers. It controls
@@ -1064,12 +1058,13 @@ config VIDEO_LM3560
config VIDEO_LM3646
tristate "LM3646 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
+endif
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -1113,6 +1108,7 @@ comment "SDR tuner chips"
config SDR_MAX2175
tristate "Maxim 2175 RF to Bits tuner"
depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
+ select REGMAP_I2C
help
Support for Maxim 2175 tuner. It is an advanced analog/digital
radio receiver with RF-to-Bits front-end designed for SDR solutions.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index beb170b002dc..c147bb9d28db 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -109,9 +109,11 @@ obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
+obj-$(CONFIG_VIDEO_IMX290) += imx290.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 925c171e7797..19c74db0649f 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -19,13 +19,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-#define AD5820_NAME "ad5820"
-
/* Register definitions */
#define AD5820_POWER_DOWN (1 << 15)
#define AD5820_DAC_SHIFT 4
@@ -47,6 +46,8 @@ struct ad5820_device {
u32 focus_ramp_time;
u32 focus_ramp_mode;
+ struct gpio_desc *enable_gpio;
+
struct mutex power_lock;
int power_count;
@@ -114,6 +115,8 @@ static int ad5820_power_off(struct ad5820_device *coil, bool standby)
ret = ad5820_update_hw(coil);
}
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
+
ret2 = regulator_disable(coil->vana);
if (ret)
return ret;
@@ -128,6 +131,8 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
if (ret < 0)
return ret;
+ gpiod_set_value_cansleep(coil->enable_gpio, 1);
+
if (restore) {
/* Restore the hardware settings. */
coil->standby = false;
@@ -138,6 +143,7 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
return 0;
fail:
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
coil->standby = true;
regulator_disable(coil->vana);
@@ -304,11 +310,21 @@ static int ad5820_probe(struct i2c_client *client,
return ret;
}
+ coil->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(coil->enable_gpio)) {
+ ret = PTR_ERR(coil->enable_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "could not get enable gpio\n");
+ return ret;
+ }
+
mutex_init(&coil->power_lock);
v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
coil->subdev.internal_ops = &ad5820_internal_ops;
+ coil->subdev.entity.function = MEDIA_ENT_F_LENS;
strscpy(coil->subdev.name, "ad5820 focus", sizeof(coil->subdev.name));
ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
@@ -341,17 +357,28 @@ static int ad5820_remove(struct i2c_client *client)
}
static const struct i2c_device_id ad5820_id_table[] = {
- { AD5820_NAME, 0 },
+ { "ad5820", 0 },
+ { "ad5821", 0 },
+ { "ad5823", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+static const struct of_device_id ad5820_of_table[] = {
+ { .compatible = "adi,ad5820" },
+ { .compatible = "adi,ad5821" },
+ { .compatible = "adi,ad5823" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5820_of_table);
+
static SIMPLE_DEV_PM_OPS(ad5820_pm, ad5820_suspend, ad5820_resume);
static struct i2c_driver ad5820_i2c_driver = {
.driver = {
- .name = AD5820_NAME,
+ .name = "ad5820",
.pm = &ad5820_pm,
+ .of_match_table = ad5820_of_table,
},
.probe = ad5820_probe,
.remove = ad5820_remove,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index e780969cc2f2..6528e2343fc8 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1309,9 +1309,6 @@ static int adv7180_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr, client->adapter->name);
-
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -1382,6 +1379,9 @@ static int adv7180_probe(struct i2c_client *client,
if (ret)
goto err_free_irq;
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr, client->adapter->name);
+
return 0;
err_free_irq:
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 885619841719..0855f648416d 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2547,7 +2547,7 @@ struct adv7842_cfg_read_infoframe {
u8 payload_addr;
};
-static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infoframe *cri)
+static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_infoframe *cri)
{
int i;
u8 buffer[32];
@@ -2585,7 +2585,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
static void adv7842_log_infoframes(struct v4l2_subdev *sd)
{
int i;
- struct adv7842_cfg_read_infoframe cri[] = {
+ static const struct adv7842_cfg_read_infoframe cri[] = {
{ "AVI", 0x01, 0xe0, 0x00 },
{ "Audio", 0x02, 0xe3, 0x1c },
{ "SDP", 0x04, 0xe6, 0x2a },
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 43336175c7d9..73bc50c919d7 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -157,7 +157,7 @@ static int bt819_init(struct v4l2_subdev *sd)
0x12, 0x04, /* 0x12 Output Format */
0x13, 0x20, /* 0x13 Vertical Scaling msb 0x00
chroma comb OFF, line drop scaling, interlace scaling
- BUG? Why does turning the chroma comb on fuck up color?
+ BUG? Why does turning the chroma comb on screw up color?
Bug in the bt819 stepping on my board?
*/
0x14, 0x00, /* 0x14 Vertical Scaling lsb */
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
new file mode 100644
index 000000000000..c66cd1446c0f
--- /dev/null
+++ b/drivers/media/i2c/hi556.c
@@ -0,0 +1,1200 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI556_REG_VALUE_08BIT 1
+#define HI556_REG_VALUE_16BIT 2
+#define HI556_REG_VALUE_24BIT 3
+
+#define HI556_LINK_FREQ_437MHZ 437000000ULL
+#define HI556_MCLK 19200000
+#define HI556_DATA_LANES 2
+#define HI556_RGB_DEPTH 10
+
+#define HI556_REG_CHIP_ID 0x0f16
+#define HI556_CHIP_ID 0x0556
+
+#define HI556_REG_MODE_SELECT 0x0a00
+#define HI556_MODE_STANDBY 0x0000
+#define HI556_MODE_STREAMING 0x0100
+
+/* vertical-timings from sensor */
+#define HI556_REG_FLL 0x0006
+#define HI556_FLL_30FPS 0x0814
+#define HI556_FLL_30FPS_MIN 0x0814
+#define HI556_FLL_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define HI556_REG_LLP 0x0008
+
+/* Exposure controls from sensor */
+#define HI556_REG_EXPOSURE 0x0074
+#define HI556_EXPOSURE_MIN 6
+#define HI556_EXPOSURE_MAX_MARGIN 2
+#define HI556_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI556_REG_ANALOG_GAIN 0x0077
+#define HI556_ANAL_GAIN_MIN 0
+#define HI556_ANAL_GAIN_MAX 240
+#define HI556_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define HI556_REG_MWB_GR_GAIN 0x0078
+#define HI556_REG_MWB_GB_GAIN 0x007a
+#define HI556_REG_MWB_R_GAIN 0x007c
+#define HI556_REG_MWB_B_GAIN 0x007e
+#define HI556_DGTL_GAIN_MIN 0
+#define HI556_DGTL_GAIN_MAX 2048
+#define HI556_DGTL_GAIN_STEP 1
+#define HI556_DGTL_GAIN_DEFAULT 256
+
+/* Test Pattern Control */
+#define HI556_REG_ISP 0X0a05
+#define HI556_REG_ISP_TPG_EN 0x01
+#define HI556_REG_TEST_PATTERN 0x0201
+
+enum {
+ HI556_LINK_FREQ_437MHZ_INDEX,
+};
+
+struct hi556_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi556_reg_list {
+ u32 num_of_regs;
+ const struct hi556_reg *regs;
+};
+
+struct hi556_link_freq_config {
+ const struct hi556_reg_list reg_list;
+};
+
+struct hi556_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 llp;
+
+ /* Default vertical timining size */
+ u32 fll_def;
+
+ /* Min vertical timining size */
+ u32 fll_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct hi556_reg_list reg_list;
+};
+
+#define to_hi556(_sd) container_of(_sd, struct hi556, sd)
+
+//SENSOR_INITIALIZATION
+static const struct hi556_reg mipi_data_rate_874mbps[] = {
+ {0x0e00, 0x0102},
+ {0x0e02, 0x0102},
+ {0x0e0c, 0x0100},
+ {0x2000, 0x7400},
+ {0x2002, 0x001c},
+ {0x2004, 0x0242},
+ {0x2006, 0x0942},
+ {0x2008, 0x7007},
+ {0x200a, 0x0fd9},
+ {0x200c, 0x0259},
+ {0x200e, 0x7008},
+ {0x2010, 0x160e},
+ {0x2012, 0x0047},
+ {0x2014, 0x2118},
+ {0x2016, 0x0041},
+ {0x2018, 0x00d8},
+ {0x201a, 0x0145},
+ {0x201c, 0x0006},
+ {0x201e, 0x0181},
+ {0x2020, 0x13cc},
+ {0x2022, 0x2057},
+ {0x2024, 0x7001},
+ {0x2026, 0x0fca},
+ {0x2028, 0x00cb},
+ {0x202a, 0x009f},
+ {0x202c, 0x7002},
+ {0x202e, 0x13cc},
+ {0x2030, 0x019b},
+ {0x2032, 0x014d},
+ {0x2034, 0x2987},
+ {0x2036, 0x2766},
+ {0x2038, 0x0020},
+ {0x203a, 0x2060},
+ {0x203c, 0x0e5d},
+ {0x203e, 0x181d},
+ {0x2040, 0x2066},
+ {0x2042, 0x20c4},
+ {0x2044, 0x5000},
+ {0x2046, 0x0005},
+ {0x2048, 0x0000},
+ {0x204a, 0x01db},
+ {0x204c, 0x025a},
+ {0x204e, 0x00c0},
+ {0x2050, 0x0005},
+ {0x2052, 0x0006},
+ {0x2054, 0x0ad9},
+ {0x2056, 0x0259},
+ {0x2058, 0x0618},
+ {0x205a, 0x0258},
+ {0x205c, 0x2266},
+ {0x205e, 0x20c8},
+ {0x2060, 0x2060},
+ {0x2062, 0x707b},
+ {0x2064, 0x0fdd},
+ {0x2066, 0x81b8},
+ {0x2068, 0x5040},
+ {0x206a, 0x0020},
+ {0x206c, 0x5060},
+ {0x206e, 0x3143},
+ {0x2070, 0x5081},
+ {0x2072, 0x025c},
+ {0x2074, 0x7800},
+ {0x2076, 0x7400},
+ {0x2078, 0x001c},
+ {0x207a, 0x0242},
+ {0x207c, 0x0942},
+ {0x207e, 0x0bd9},
+ {0x2080, 0x0259},
+ {0x2082, 0x7008},
+ {0x2084, 0x160e},
+ {0x2086, 0x0047},
+ {0x2088, 0x2118},
+ {0x208a, 0x0041},
+ {0x208c, 0x00d8},
+ {0x208e, 0x0145},
+ {0x2090, 0x0006},
+ {0x2092, 0x0181},
+ {0x2094, 0x13cc},
+ {0x2096, 0x2057},
+ {0x2098, 0x7001},
+ {0x209a, 0x0fca},
+ {0x209c, 0x00cb},
+ {0x209e, 0x009f},
+ {0x20a0, 0x7002},
+ {0x20a2, 0x13cc},
+ {0x20a4, 0x019b},
+ {0x20a6, 0x014d},
+ {0x20a8, 0x2987},
+ {0x20aa, 0x2766},
+ {0x20ac, 0x0020},
+ {0x20ae, 0x2060},
+ {0x20b0, 0x0e5d},
+ {0x20b2, 0x181d},
+ {0x20b4, 0x2066},
+ {0x20b6, 0x20c4},
+ {0x20b8, 0x50a0},
+ {0x20ba, 0x0005},
+ {0x20bc, 0x0000},
+ {0x20be, 0x01db},
+ {0x20c0, 0x025a},
+ {0x20c2, 0x00c0},
+ {0x20c4, 0x0005},
+ {0x20c6, 0x0006},
+ {0x20c8, 0x0ad9},
+ {0x20ca, 0x0259},
+ {0x20cc, 0x0618},
+ {0x20ce, 0x0258},
+ {0x20d0, 0x2266},
+ {0x20d2, 0x20c8},
+ {0x20d4, 0x2060},
+ {0x20d6, 0x707b},
+ {0x20d8, 0x0fdd},
+ {0x20da, 0x86b8},
+ {0x20dc, 0x50e0},
+ {0x20de, 0x0020},
+ {0x20e0, 0x5100},
+ {0x20e2, 0x3143},
+ {0x20e4, 0x5121},
+ {0x20e6, 0x7800},
+ {0x20e8, 0x3140},
+ {0x20ea, 0x01c4},
+ {0x20ec, 0x01c1},
+ {0x20ee, 0x01c0},
+ {0x20f0, 0x01c4},
+ {0x20f2, 0x2700},
+ {0x20f4, 0x3d40},
+ {0x20f6, 0x7800},
+ {0x20f8, 0xffff},
+ {0x27fe, 0xe000},
+ {0x3000, 0x60f8},
+ {0x3002, 0x187f},
+ {0x3004, 0x7060},
+ {0x3006, 0x0114},
+ {0x3008, 0x60b0},
+ {0x300a, 0x1473},
+ {0x300c, 0x0013},
+ {0x300e, 0x140f},
+ {0x3010, 0x0040},
+ {0x3012, 0x100f},
+ {0x3014, 0x60f8},
+ {0x3016, 0x187f},
+ {0x3018, 0x7060},
+ {0x301a, 0x0114},
+ {0x301c, 0x60b0},
+ {0x301e, 0x1473},
+ {0x3020, 0x0013},
+ {0x3022, 0x140f},
+ {0x3024, 0x0040},
+ {0x3026, 0x000f},
+
+ {0x0b00, 0x0000},
+ {0x0b02, 0x0045},
+ {0x0b04, 0xb405},
+ {0x0b06, 0xc403},
+ {0x0b08, 0x0081},
+ {0x0b0a, 0x8252},
+ {0x0b0c, 0xf814},
+ {0x0b0e, 0xc618},
+ {0x0b10, 0xa828},
+ {0x0b12, 0x004c},
+ {0x0b14, 0x4068},
+ {0x0b16, 0x0000},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x0954, 0x0009},
+ {0x0956, 0x0000},
+ {0x0958, 0xbb80},
+ {0x095a, 0x5140},
+ {0x0c00, 0x1110},
+ {0x0c02, 0x0011},
+ {0x0c04, 0x0000},
+ {0x0c06, 0x0200},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a10, 0x4000},
+ {0x3068, 0xf800},
+ {0x306a, 0xf876},
+ {0x006c, 0x0000},
+ {0x005e, 0x0200},
+ {0x000e, 0x0100},
+ {0x0e0a, 0x0001},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x07bc},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x080e},
+ {0x0070, 0x0407},
+ {0x0002, 0x0000},
+ {0x0a02, 0x0100},
+ {0x0a24, 0x0100},
+ {0x0046, 0x0000},
+ {0x0076, 0x0000},
+ {0x0060, 0x0000},
+ {0x0062, 0x0530},
+ {0x0064, 0x0500},
+ {0x0066, 0x0530},
+ {0x0068, 0x0500},
+ {0x0122, 0x0300},
+ {0x015a, 0xff08},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x005c, 0x0102},
+ {0x0a1a, 0x0800},
+};
+
+static const struct hi556_reg mode_2592x1944_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8252},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x0a04, 0x014a},
+ {0x090c, 0x0fdc},
+ {0x090e, 0x002d},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc10a},
+ {0x0916, 0x071f},
+ {0x0918, 0x0408},
+ {0x091a, 0x0c0d},
+ {0x091c, 0x0f09},
+ {0x091e, 0x0a00},
+ {0x0958, 0xbb80},
+};
+
+static const struct hi556_reg mode_1296x972_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8259},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7167},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0122},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0404},
+ {0x0012, 0x000c},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0022},
+ {0x002a, 0x002b},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x3311},
+ {0x0030, 0x3311},
+ {0x0032, 0x3311},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0510},
+ {0x0a14, 0x03cc},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0308},
+ {0x0806, 0x0100},
+ {0x0a04, 0x016a},
+ {0x090e, 0x0010},
+ {0x090c, 0x09c0},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc106},
+ {0x0916, 0x040e},
+ {0x0918, 0x0304},
+ {0x091a, 0x0708},
+ {0x091c, 0x0e06},
+ {0x091e, 0x0300},
+ {0x0958, 0xbb80},
+};
+
+static const char * const hi556_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+};
+
+static const s64 link_freq_menu_items[] = {
+ HI556_LINK_FREQ_437MHZ,
+};
+
+static const struct hi556_link_freq_config link_freq_configs[] = {
+ [HI556_LINK_FREQ_437MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_874mbps),
+ .regs = mipi_data_rate_874mbps,
+ }
+ }
+};
+
+static const struct hi556_mode supported_modes[] = {
+ {
+ .width = 2592,
+ .height = 1944,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ },
+ {
+ .width = 1296,
+ .height = 972,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x972_regs),
+ .regs = mode_1296x972_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ }
+};
+
+struct hi556 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct hi556_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HI556_DATA_LANES;
+
+ do_div(pixel_rate, HI556_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi556_read_reg(struct hi556 *hi556, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int hi556_write_reg_list(struct hi556 *hi556,
+ const struct hi556_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = hi556_write_reg(hi556, r_list->regs[i].address,
+ HI556_REG_VALUE_16BIT,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi556_update_digital_gain(struct hi556 *hi556, u32 d_gain)
+{
+ int ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GR_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GB_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_R_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return hi556_write_reg(hi556, HI556_REG_MWB_B_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+}
+
+static int hi556_test_pattern(struct hi556 *hi556, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ if (pattern) {
+ ret = hi556_read_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT,
+ val | HI556_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN,
+ HI556_REG_VALUE_08BIT, pattern);
+}
+
+static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi556 *hi556 = container_of(ctrl->handler,
+ struct hi556, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi556->cur_mode->height + ctrl->val -
+ HI556_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi556->exposure,
+ hi556->exposure->minimum,
+ exposure_max, hi556->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi556_write_reg(hi556, HI556_REG_ANALOG_GAIN,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi556_update_digital_gain(hi556, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = hi556_write_reg(hi556, HI556_REG_EXPOSURE,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = hi556_write_reg(hi556, HI556_REG_FLL,
+ HI556_REG_VALUE_16BIT,
+ hi556->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi556_test_pattern(hi556, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi556_ctrl_ops = {
+ .s_ctrl = hi556_set_ctrl,
+};
+
+static int hi556_init_controls(struct hi556 *hi556)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &hi556->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi556->mutex;
+ hi556->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (hi556->link_freq)
+ hi556->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi556->pixel_rate = v4l2_ctrl_new_std
+ (ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX),
+ 1,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX));
+ hi556->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi556->cur_mode->fll_min -
+ hi556->cur_mode->height,
+ HI556_FLL_MAX -
+ hi556->cur_mode->height, 1,
+ hi556->cur_mode->fll_def -
+ hi556->cur_mode->height);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ hi556->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi556->hblank)
+ hi556->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI556_ANAL_GAIN_MIN, HI556_ANAL_GAIN_MAX,
+ HI556_ANAL_GAIN_STEP, HI556_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI556_DGTL_GAIN_MIN, HI556_DGTL_GAIN_MAX,
+ HI556_DGTL_GAIN_STEP, HI556_DGTL_GAIN_DEFAULT);
+ exposure_max = hi556->cur_mode->fll_def - HI556_EXPOSURE_MAX_MARGIN;
+ hi556->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI556_EXPOSURE_MIN, exposure_max,
+ HI556_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi556_test_pattern_menu) - 1,
+ 0, 0, hi556_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ hi556->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void hi556_assign_pad_format(const struct hi556_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int hi556_start_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ const struct hi556_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = hi556->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &hi556->cur_mode->reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(hi556->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hi556_stop_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+
+ if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi556->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi556->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&hi556->mutex);
+ return ret;
+ }
+
+ ret = hi556_start_streaming(hi556);
+ if (ret) {
+ enable = 0;
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+
+ hi556->streaming = enable;
+ mutex_unlock(&hi556->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused hi556_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming)
+ hi556_stop_streaming(hi556);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused hi556_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+ int ret;
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming) {
+ ret = hi556_start_streaming(hi556);
+ if (ret)
+ goto error;
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+
+error:
+ hi556_stop_streaming(hi556);
+ hi556->streaming = 0;
+ mutex_unlock(&hi556->mutex);
+ return ret;
+}
+
+static int hi556_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ const struct hi556_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ hi556->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(hi556->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->fll_def - mode->height;
+ __v4l2_ctrl_modify_range(hi556->vblank,
+ mode->fll_min - mode->height,
+ HI556_FLL_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi556->vblank, vblank_def);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi556->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg,
+ fmt->pad);
+ else
+ hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int hi556_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi556_video_ops = {
+ .s_stream = hi556_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi556_pad_ops = {
+ .set_fmt = hi556_set_format,
+ .get_fmt = hi556_get_format,
+ .enum_mbus_code = hi556_enum_mbus_code,
+ .enum_frame_size = hi556_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops hi556_subdev_ops = {
+ .video = &hi556_video_ops,
+ .pad = &hi556_pad_ops,
+};
+
+static const struct media_entity_operations hi556_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops hi556_internal_ops = {
+ .open = hi556_open,
+};
+
+static int hi556_identify_module(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ int ret;
+ u32 val;
+
+ ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID,
+ HI556_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != HI556_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HI556_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int hi556_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret = 0;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != HI556_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int hi556_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_probe(struct i2c_client *client)
+{
+ struct hi556 *hi556;
+ int ret;
+
+ ret = hi556_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL);
+ if (!hi556)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
+ ret = hi556_identify_module(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&hi556->mutex);
+ hi556->cur_mode = &supported_modes[0];
+ ret = hi556_init_controls(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ hi556->sd.internal_ops = &hi556_internal_ops;
+ hi556->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi556->sd.entity.ops = &hi556_subdev_entity_ops;
+ hi556->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&hi556->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&hi556->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi556->sd.ctrl_handler);
+ mutex_destroy(&hi556->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops hi556_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hi556_acpi_ids[] = {
+ {"INT3537"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids);
+#endif
+
+static struct i2c_driver hi556_i2c_driver = {
+ .driver = {
+ .name = "hi556",
+ .pm = &hi556_pm_ops,
+ .acpi_match_table = ACPI_PTR(hi556_acpi_ids),
+ },
+ .probe_new = hi556_probe,
+ .remove = hi556_remove,
+};
+
+module_i2c_driver(hi556_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("Hynix HI556 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 159a3a604f0e..adcaaa8c86d1 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -47,6 +47,7 @@ struct imx214 {
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *unit_size;
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
@@ -948,6 +949,10 @@ static int imx214_probe(struct i2c_client *client)
static const s64 link_freq[] = {
IMX214_DEFAULT_LINK_FREQ,
};
+ static const struct v4l2_area unit_size = {
+ .width = 1120,
+ .height = 1120,
+ };
int ret;
ret = imx214_parse_fwnode(dev);
@@ -1029,6 +1034,10 @@ static int imx214_probe(struct i2c_client *client)
V4L2_CID_EXPOSURE,
0, 3184, 1, 0x0c70);
+ imx214->unit_size = v4l2_ctrl_new_std_compound(&imx214->ctrls,
+ NULL,
+ V4L2_CID_UNIT_CELL_SIZE,
+ v4l2_ctrl_ptr_create((void *)&unit_size));
ret = imx214->ctrls.error;
if (ret) {
dev_err(&client->dev, "%s control init failed (%d)\n",
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
new file mode 100644
index 000000000000..f7678e5a5d87
--- /dev/null
+++ b/drivers/media/i2c/imx290.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony IMX290 CMOS Image Sensor Driver
+ *
+ * Copyright (C) 2019 FRAMOS GmbH.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define IMX290_STANDBY 0x3000
+#define IMX290_REGHOLD 0x3001
+#define IMX290_XMSTA 0x3002
+#define IMX290_GAIN 0x3014
+
+#define IMX290_DEFAULT_LINK_FREQ 445500000
+
+static const char * const imx290_supply_name[] = {
+ "vdda",
+ "vddd",
+ "vdddo",
+};
+
+#define IMX290_NUM_SUPPLIES ARRAY_SIZE(imx290_supply_name)
+
+struct imx290_regval {
+ u16 reg;
+ u8 val;
+};
+
+struct imx290_mode {
+ u32 width;
+ u32 height;
+ u32 pixel_rate;
+ u32 link_freq_index;
+
+ const struct imx290_regval *data;
+ u32 data_size;
+};
+
+struct imx290 {
+ struct device *dev;
+ struct clk *xclk;
+ struct regmap *regmap;
+
+ struct v4l2_subdev sd;
+ struct v4l2_fwnode_endpoint ep;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt current_format;
+ const struct imx290_mode *current_mode;
+
+ struct regulator_bulk_data supplies[IMX290_NUM_SUPPLIES];
+ struct gpio_desc *rst_gpio;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+
+ struct mutex lock;
+};
+
+struct imx290_pixfmt {
+ u32 code;
+};
+
+static const struct imx290_pixfmt imx290_formats[] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10 },
+};
+
+static const struct regmap_config imx290_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct imx290_regval imx290_global_init_settings[] = {
+ { 0x3007, 0x00 },
+ { 0x3009, 0x00 },
+ { 0x3018, 0x65 },
+ { 0x3019, 0x04 },
+ { 0x301a, 0x00 },
+ { 0x3443, 0x03 },
+ { 0x3444, 0x20 },
+ { 0x3445, 0x25 },
+ { 0x3407, 0x03 },
+ { 0x303a, 0x0c },
+ { 0x3040, 0x00 },
+ { 0x3041, 0x00 },
+ { 0x303c, 0x00 },
+ { 0x303d, 0x00 },
+ { 0x3042, 0x9c },
+ { 0x3043, 0x07 },
+ { 0x303e, 0x49 },
+ { 0x303f, 0x04 },
+ { 0x304b, 0x0a },
+ { 0x300f, 0x00 },
+ { 0x3010, 0x21 },
+ { 0x3012, 0x64 },
+ { 0x3016, 0x09 },
+ { 0x3070, 0x02 },
+ { 0x3071, 0x11 },
+ { 0x309b, 0x10 },
+ { 0x309c, 0x22 },
+ { 0x30a2, 0x02 },
+ { 0x30a6, 0x20 },
+ { 0x30a8, 0x20 },
+ { 0x30aa, 0x20 },
+ { 0x30ac, 0x20 },
+ { 0x30b0, 0x43 },
+ { 0x3119, 0x9e },
+ { 0x311c, 0x1e },
+ { 0x311e, 0x08 },
+ { 0x3128, 0x05 },
+ { 0x313d, 0x83 },
+ { 0x3150, 0x03 },
+ { 0x317e, 0x00 },
+ { 0x32b8, 0x50 },
+ { 0x32b9, 0x10 },
+ { 0x32ba, 0x00 },
+ { 0x32bb, 0x04 },
+ { 0x32c8, 0x50 },
+ { 0x32c9, 0x10 },
+ { 0x32ca, 0x00 },
+ { 0x32cb, 0x04 },
+ { 0x332c, 0xd3 },
+ { 0x332d, 0x10 },
+ { 0x332e, 0x0d },
+ { 0x3358, 0x06 },
+ { 0x3359, 0xe1 },
+ { 0x335a, 0x11 },
+ { 0x3360, 0x1e },
+ { 0x3361, 0x61 },
+ { 0x3362, 0x10 },
+ { 0x33b0, 0x50 },
+ { 0x33b2, 0x1a },
+ { 0x33b3, 0x04 },
+};
+
+static const struct imx290_regval imx290_1080p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x00 },
+ { 0x303a, 0x0c },
+ { 0x3414, 0x0a },
+ { 0x3472, 0x80 },
+ { 0x3473, 0x07 },
+ { 0x3418, 0x38 },
+ { 0x3419, 0x04 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x18 },
+ { 0x305d, 0x03 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x57 },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x37 },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x1f },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x1f },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x1f },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x77 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x1f },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0x98 },
+ { 0x301d, 0x08 },
+};
+
+static const struct imx290_regval imx290_720p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x10 },
+ { 0x303a, 0x06 },
+ { 0x3414, 0x04 },
+ { 0x3472, 0x00 },
+ { 0x3473, 0x05 },
+ { 0x3418, 0xd0 },
+ { 0x3419, 0x02 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x20 },
+ { 0x305d, 0x00 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x4f },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x2f },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x17 },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x17 },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x17 },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x57 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x17 },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0xe4 },
+ { 0x301d, 0x0c },
+};
+
+static const struct imx290_regval imx290_10bit_settings[] = {
+ { 0x3005, 0x00},
+ { 0x3046, 0x00},
+ { 0x3129, 0x1d},
+ { 0x317c, 0x12},
+ { 0x31ec, 0x37},
+ { 0x3441, 0x0a},
+ { 0x3442, 0x0a},
+ { 0x300a, 0x3c},
+ { 0x300b, 0x00},
+};
+
+/* supported link frequencies */
+static const s64 imx290_link_freq[] = {
+ IMX290_DEFAULT_LINK_FREQ,
+};
+
+/* Mode configs */
+static const struct imx290_mode imx290_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .data = imx290_1080p_settings,
+ .data_size = ARRAY_SIZE(imx290_1080p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .data = imx290_720p_settings,
+ .data_size = ARRAY_SIZE(imx290_720p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+};
+
+static inline struct imx290 *to_imx290(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx290, sd);
+}
+
+static inline int imx290_read_reg(struct imx290 *imx290, u16 addr, u8 *value)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(imx290->regmap, addr, &regval);
+ if (ret) {
+ dev_err(imx290->dev, "I2C read failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ *value = regval & 0xff;
+
+ return 0;
+}
+
+static int imx290_write_reg(struct imx290 *imx290, u16 addr, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(imx290->regmap, addr, value);
+ if (ret) {
+ dev_err(imx290->dev, "I2C write failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_register_array(struct imx290 *imx290,
+ const struct imx290_regval *settings,
+ unsigned int num_settings)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_settings; ++i, ++settings) {
+ ret = imx290_write_reg(imx290, settings->reg, settings->val);
+ if (ret < 0)
+ return ret;
+
+ /* Settle time is 10ms for all registers */
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int imx290_write_buffered_reg(struct imx290 *imx290, u16 address_low,
+ u8 nr_regs, u32 value)
+{
+ unsigned int i;
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x01);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ for (i = 0; i < nr_regs; i++) {
+ ret = imx290_write_reg(imx290, address_low + i,
+ (u8)(value >> (i * 8)));
+ if (ret) {
+ dev_err(imx290->dev, "Error writing buffered registers\n");
+ return ret;
+ }
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x00);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_gain(struct imx290 *imx290, u32 value)
+{
+ int ret;
+
+ ret = imx290_write_buffered_reg(imx290, IMX290_GAIN, 1, value);
+ if (ret)
+ dev_err(imx290->dev, "Unable to write gain\n");
+
+ return ret;
+}
+
+/* Stop streaming */
+static int imx290_stop_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x01);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x01);
+}
+
+static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx290 *imx290 = container_of(ctrl->handler,
+ struct imx290, ctrls);
+ int ret = 0;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(imx290->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ret = imx290_set_gain(imx290, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(imx290->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx290_ctrl_ops = {
+ .s_ctrl = imx290_set_ctrl,
+};
+
+static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(imx290_formats))
+ return -EINVAL;
+
+ code->code = imx290_formats[code->index].code;
+
+ return 0;
+}
+
+static int imx290_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&imx290->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ framefmt = v4l2_subdev_get_try_format(&imx290->sd, cfg,
+ fmt->pad);
+ else
+ framefmt = &imx290->current_format;
+
+ fmt->format = *framefmt;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ const struct imx290_mode *mode;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ mutex_lock(&imx290->lock);
+
+ mode = v4l2_find_nearest_size(imx290_modes,
+ ARRAY_SIZE(imx290_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+
+ for (i = 0; i < ARRAY_SIZE(imx290_formats); i++)
+ if (imx290_formats[i].code == fmt->format.code)
+ break;
+
+ if (i >= ARRAY_SIZE(imx290_formats))
+ i = 0;
+
+ fmt->format.code = imx290_formats[i].code;
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ } else {
+ format = &imx290->current_format;
+ __v4l2_ctrl_s_ctrl(imx290->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(imx290->pixel_rate, mode->pixel_rate);
+
+ imx290->current_mode = mode;
+ }
+
+ *format = fmt->format;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.width = 1920;
+ fmt.format.height = 1080;
+
+ imx290_set_fmt(subdev, cfg, &fmt);
+
+ return 0;
+}
+
+static int imx290_write_current_format(struct imx290 *imx290,
+ struct v4l2_mbus_framefmt *format)
+{
+ int ret;
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ ret = imx290_set_register_array(imx290, imx290_10bit_settings,
+ ARRAY_SIZE(
+ imx290_10bit_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set format registers\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(imx290->dev, "Unknown pixel format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx290_start_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ /* Set init register settings */
+ ret = imx290_set_register_array(imx290, imx290_global_init_settings,
+ ARRAY_SIZE(
+ imx290_global_init_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set init registers\n");
+ return ret;
+ }
+
+ /* Set current frame format */
+ ret = imx290_write_current_format(imx290, &imx290->current_format);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set frame format\n");
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ ret = imx290_set_register_array(imx290, imx290->current_mode->data,
+ imx290->current_mode->data_size);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set current mode\n");
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = v4l2_ctrl_handler_setup(imx290->sd.ctrl_handler);
+ if (ret) {
+ dev_err(imx290->dev, "Could not sync v4l2 controls\n");
+ return ret;
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x00);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ /* Start streaming */
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x00);
+}
+
+static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret = 0;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx290->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(imx290->dev);
+ goto unlock_and_return;
+ }
+
+ ret = imx290_start_streaming(imx290);
+ if (ret) {
+ dev_err(imx290->dev, "Start stream failed\n");
+ pm_runtime_put(imx290->dev);
+ goto unlock_and_return;
+ }
+ } else {
+ imx290_stop_streaming(imx290);
+ pm_runtime_put(imx290->dev);
+ }
+
+unlock_and_return:
+
+ return ret;
+}
+
+static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX290_NUM_SUPPLIES; i++)
+ imx290->supplies[i].supply = imx290_supply_name[i];
+
+ return devm_regulator_bulk_get(dev, IMX290_NUM_SUPPLIES,
+ imx290->supplies);
+}
+
+static int imx290_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret;
+
+ ret = clk_prepare_enable(imx290->xclk);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(IMX290_NUM_SUPPLIES, imx290->supplies);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable regulators\n");
+ clk_disable_unprepare(imx290->xclk);
+ return ret;
+ }
+
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 1);
+ usleep_range(30000, 31000);
+
+ return 0;
+}
+
+static int imx290_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ clk_disable_unprepare(imx290->xclk);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 0);
+ regulator_bulk_disable(IMX290_NUM_SUPPLIES, imx290->supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx290_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx290_power_on, imx290_power_off, NULL)
+};
+
+static const struct v4l2_subdev_video_ops imx290_video_ops = {
+ .s_stream = imx290_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx290_pad_ops = {
+ .init_cfg = imx290_entity_init_cfg,
+ .enum_mbus_code = imx290_enum_mbus_code,
+ .get_fmt = imx290_get_fmt,
+ .set_fmt = imx290_set_fmt,
+};
+
+static const struct v4l2_subdev_ops imx290_subdev_ops = {
+ .video = &imx290_video_ops,
+ .pad = &imx290_pad_ops,
+};
+
+static const struct media_entity_operations imx290_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int imx290_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *endpoint;
+ struct imx290 *imx290;
+ u32 xclk_freq;
+ int ret;
+
+ imx290 = devm_kzalloc(dev, sizeof(*imx290), GFP_KERNEL);
+ if (!imx290)
+ return -ENOMEM;
+
+ imx290->dev = dev;
+ imx290->regmap = devm_regmap_init_i2c(client, &imx290_regmap_config);
+ if (IS_ERR(imx290->regmap)) {
+ dev_err(dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &imx290->ep);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "Parsing endpoint node failed\n");
+ goto free_err;
+ }
+
+ if (!imx290->ep.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ if (imx290->ep.link_frequencies[0] != IMX290_DEFAULT_LINK_FREQ) {
+ dev_err(dev, "Unsupported link frequency\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Only CSI2 is supported for now */
+ if (imx290->ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Unsupported bus type, should be CSI2\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Set default mode to max resolution */
+ imx290->current_mode = &imx290_modes[0];
+
+ /* get system clock (xclk) */
+ imx290->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(imx290->xclk)) {
+ dev_err(dev, "Could not get xclk");
+ ret = PTR_ERR(imx290->xclk);
+ goto free_err;
+ }
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not get xclk frequency\n");
+ goto free_err;
+ }
+
+ /* external clock must be 37.125 MHz */
+ if (xclk_freq != 37125000) {
+ dev_err(dev, "External clock frequency %u is not supported\n",
+ xclk_freq);
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ ret = clk_set_rate(imx290->xclk, xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not set xclk frequency\n");
+ goto free_err;
+ }
+
+ ret = imx290_get_regulators(dev, imx290);
+ if (ret < 0) {
+ dev_err(dev, "Cannot get regulators\n");
+ goto free_err;
+ }
+
+ imx290->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(imx290->rst_gpio)) {
+ dev_err(dev, "Cannot get reset gpio\n");
+ ret = PTR_ERR(imx290->rst_gpio);
+ goto free_err;
+ }
+
+ mutex_init(&imx290->lock);
+
+ v4l2_ctrl_handler_init(&imx290->ctrls, 3);
+
+ v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_GAIN, 0, 72, 1, 0);
+ imx290->link_freq =
+ v4l2_ctrl_new_int_menu(&imx290->ctrls,
+ &imx290_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(imx290_link_freq) - 1,
+ 0, imx290_link_freq);
+ if (imx290->link_freq)
+ imx290->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ imx290->pixel_rate = v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1,
+ imx290_modes[0].pixel_rate);
+
+ imx290->sd.ctrl_handler = &imx290->ctrls;
+
+ if (imx290->ctrls.error) {
+ dev_err(dev, "Control initialization error %d\n",
+ imx290->ctrls.error);
+ ret = imx290->ctrls.error;
+ goto free_ctrl;
+ }
+
+ v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
+ imx290->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx290->sd.dev = &client->dev;
+ imx290->sd.entity.ops = &imx290_subdev_entity_ops;
+ imx290->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ imx290->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&imx290->sd.entity, 1, &imx290->pad);
+ if (ret < 0) {
+ dev_err(dev, "Could not register media entity\n");
+ goto free_ctrl;
+ }
+
+ ret = v4l2_async_register_subdev(&imx290->sd);
+ if (ret < 0) {
+ dev_err(dev, "Could not register v4l2 device\n");
+ goto free_entity;
+ }
+
+ /* Power on the device to match runtime PM state below */
+ ret = imx290_power_on(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not power on the device\n");
+ goto free_entity;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return 0;
+
+free_entity:
+ media_entity_cleanup(&imx290->sd.entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&imx290->ctrls);
+ mutex_destroy(&imx290->lock);
+free_err:
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return ret;
+}
+
+static int imx290_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ mutex_destroy(&imx290->lock);
+
+ pm_runtime_disable(imx290->dev);
+ if (!pm_runtime_status_suspended(imx290->dev))
+ imx290_power_off(imx290->dev);
+ pm_runtime_set_suspended(imx290->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx290_of_match[] = {
+ { .compatible = "sony,imx290" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx290_of_match);
+
+static struct i2c_driver imx290_i2c_driver = {
+ .probe_new = imx290_probe,
+ .remove = imx290_remove,
+ .driver = {
+ .name = "imx290",
+ .pm = &imx290_pm_ops,
+ .of_match_table = of_match_ptr(imx290_of_match),
+ },
+};
+
+module_i2c_driver(imx290_i2c_driver);
+
+MODULE_DESCRIPTION("Sony IMX290 CMOS Image Sensor Driver");
+MODULE_AUTHOR("FRAMOS GmbH");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index d8a8853f9a2b..c76ccf67a909 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -134,7 +134,7 @@ static int lm3646_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct lm3646_flash *flash = to_lm3646_flash(ctrl);
unsigned int reg_val;
- int rval = -EINVAL;
+ int rval;
switch (ctrl->id) {
case V4L2_CID_FLASH_LED_MODE:
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 19a3ceea3bc2..506a30e69ced 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -591,8 +591,8 @@ static int max2175_set_lo_freq(struct max2175 *ctx, u32 lo_freq)
lo_freq *= lo_mult;
int_desired = lo_freq / ctx->xtal_freq;
- frac_desired = div_u64((u64)(lo_freq % ctx->xtal_freq) << 20,
- ctx->xtal_freq);
+ frac_desired = div64_ul((u64)(lo_freq % ctx->xtal_freq) << 20,
+ ctx->xtal_freq);
/* Check CSM is not busy */
ret = max2175_poll_csm_ready(ctx);
diff --git a/drivers/media/i2c/max2175.h b/drivers/media/i2c/max2175.h
index 1ece587c153d..4c722ea3e5f1 100644
--- a/drivers/media/i2c/max2175.h
+++ b/drivers/media/i2c/max2175.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Maxim Integrated MAX2175 RF to Bits tuner driver
*
* This driver & most of the hard coded values are based on the reference
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 5613072908ac..210ea76adb53 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -167,7 +167,7 @@ static int multi_reg_write(struct i2c_client *client,
static int mt9m001_init(struct i2c_client *client)
{
- const struct mt9m001_reg init_regs[] = {
+ static const struct mt9m001_reg init_regs[] = {
/*
* Issue a soft reset. This returns all registers to their
* default values.
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index f4ded0669ff9..42f64175a6df 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Omnivision OV2659 CMOS Image Sensor driver
*
@@ -5,46 +6,21 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/media.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
-#include <media/media-entity.h>
#include <media/i2c/ov2659.h>
-#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
-#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#define DRIVER_NAME "ov2659"
@@ -232,6 +208,10 @@ struct ov2659 {
struct sensor_register *format_ctrl_regs;
struct ov2659_pll_ctrl pll;
int streaming;
+ /* used to control the sensor PWDN pin */
+ struct gpio_desc *pwdn_gpio;
+ /* used to control the sensor RESETB pin */
+ struct gpio_desc *resetb_gpio;
};
static const struct sensor_register ov2659_init_regs[] = {
@@ -419,10 +399,14 @@ static struct sensor_register ov2659_720p[] = {
{ REG_TIMING_YINC, 0x11 },
{ REG_TIMING_VERT_FORMAT, 0x80 },
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
{ 0x3a03, 0xe8 },
{ 0x3a09, 0x6f },
{ 0x3a0b, 0x5d },
{ 0x3a15, 0x9a },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
{ REG_NULL, 0x00 },
};
@@ -661,7 +645,7 @@ static struct sensor_register ov2659_vga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -709,7 +693,7 @@ static struct sensor_register ov2659_qvga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -1198,14 +1182,27 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
/* Stop Streaming Sequence */
ov2659_set_streaming(ov2659, 0);
ov2659->streaming = on;
+ pm_runtime_put(&client->dev);
goto unlock;
}
- ov2659_set_pixel_clock(ov2659);
- ov2659_set_frame_size(ov2659);
- ov2659_set_format(ov2659);
- ov2659_set_streaming(ov2659, 1);
- ov2659->streaming = on;
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto unlock;
+ }
+
+ ret = ov2659_init(sd, 0);
+ if (!ret)
+ ret = ov2659_set_pixel_clock(ov2659);
+ if (!ret)
+ ret = ov2659_set_frame_size(ov2659);
+ if (!ret)
+ ret = ov2659_set_format(ov2659);
+ if (!ret) {
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+ }
unlock:
mutex_unlock(&ov2659->lock);
@@ -1239,12 +1236,18 @@ static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2659 *ov2659 =
container_of(ctrl->handler, struct ov2659, ctrls);
+ struct i2c_client *client = ov2659->client;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
return ov2659_set_test_pattern(ov2659, ctrl->val);
}
+ pm_runtime_put(&client->dev);
return 0;
}
@@ -1257,6 +1260,39 @@ static const char * const ov2659_test_pattern_menu[] = {
"Vertical Color Bars",
};
+static int ov2659_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 1);
+
+ return 0;
+}
+
+static int ov2659_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 0);
+
+ if (ov2659->resetb_gpio) {
+ gpiod_set_value(ov2659->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(ov2659->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 subdev internal operations
*/
@@ -1330,13 +1366,13 @@ static int ov2659_detect(struct v4l2_subdev *sd)
unsigned short id;
id = OV265X_ID(pid, ver);
- if (id != OV2659_ID)
+ if (id != OV2659_ID) {
dev_err(&client->dev,
"Sensor detection failed (%04X, %d)\n",
id, ret);
- else {
+ ret = -ENODEV;
+ } else {
dev_info(&client->dev, "Found OV%04X sensor\n", id);
- ret = ov2659_init(sd, 0);
}
}
@@ -1413,6 +1449,18 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->xvclk_frequency > 27000000)
return -EINVAL;
+ /* Optional gpio don't fail if not present */
+ ov2659->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ov2659->pwdn_gpio))
+ return PTR_ERR(ov2659->pwdn_gpio);
+
+ /* Optional gpio don't fail if not present */
+ ov2659->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2659->resetb_gpio))
+ return PTR_ERR(ov2659->resetb_gpio);
+
v4l2_ctrl_handler_init(&ov2659->ctrls, 2);
ov2659->link_frequency =
v4l2_ctrl_new_std(&ov2659->ctrls, &ov2659_ctrl_ops,
@@ -1458,6 +1506,8 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->frame_size = &ov2659_framesizes[2];
ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
+ ov2659_power_on(&client->dev);
+
ret = ov2659_detect(sd);
if (ret < 0)
goto error;
@@ -1471,10 +1521,15 @@ static int ov2659_probe(struct i2c_client *client)
dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
return 0;
error:
v4l2_ctrl_handler_free(&ov2659->ctrls);
+ ov2659_power_off(&client->dev);
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
return ret;
@@ -1490,9 +1545,18 @@ static int ov2659_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov2659_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
return 0;
}
+static const struct dev_pm_ops ov2659_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov2659_power_off, ov2659_power_on, NULL)
+};
+
static const struct i2c_device_id ov2659_id[] = {
{ "ov2659", 0 },
{ /* sentinel */ },
@@ -1510,6 +1574,7 @@ MODULE_DEVICE_TABLE(of, ov2659_of_match);
static struct i2c_driver ov2659_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
+ .pm = &ov2659_pm_ops,
.of_match_table = of_match_ptr(ov2659_of_match),
},
.probe_new = ov2659_probe,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 500d9bbff10b..5e495c833d32 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -193,6 +193,7 @@ struct ov5640_mode_info {
struct ov5640_ctrls {
struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *pixel_rate;
struct {
struct v4l2_ctrl *auto_exp;
struct v4l2_ctrl *exposure;
@@ -489,7 +490,6 @@ static const struct reg_value ov5640_setting_720P_1280_720[] = {
};
static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
- {0x3008, 0x42, 0, 0},
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
@@ -517,7 +517,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
{0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0},
{0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
- {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
+ {0x4005, 0x1a, 0, 0},
};
static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
@@ -1611,9 +1611,24 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
!(mode->hact == 640 && mode->vact == 480))
return NULL;
+ /* 2592x1944 only works at 15fps max */
+ if ((mode->hact == 2592 && mode->vact == 1944) &&
+ fr > OV5640_15_FPS)
+ return NULL;
+
return mode;
}
+static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
+{
+ u64 rate;
+
+ rate = sensor->current_mode->vtot * sensor->current_mode->htot;
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ return rate;
+}
+
/*
* sensor changes between scaling and subsampling, go through
* exposure calculation
@@ -1818,8 +1833,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
* All the formats we support have 16 bits per pixel, seems to require
* the same rate than YUV, so we can just use 16 bpp all the time.
*/
- rate = mode->vtot * mode->htot * 16;
- rate *= ov5640_framerates[sensor->current_fr];
+ rate = ov5640_calc_pixel_rate(sensor) * 16;
if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
ret = ov5640_set_mipi_pclk(sensor, rate);
@@ -2233,6 +2247,8 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2657,6 +2673,11 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* we can use our own mutex for the ctrl lock */
hdl->lock = &sensor->lock;
+ /* Clock related controls */
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ 0, INT_MAX, 1,
+ ov5640_calc_pixel_rate(sensor));
+
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
V4L2_CID_AUTO_WHITE_BALANCE,
@@ -2704,6 +2725,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
goto free_ctrls;
}
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2816,6 +2838,9 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->frame_interval = fi->interval;
sensor->current_mode = mode;
sensor->pending_mode_change = true;
+
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
}
out:
mutex_unlock(&sensor->lock);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 34b7046d9702..d6cd15bb699a 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1325,7 +1325,7 @@ static int ov5695_probe(struct i2c_client *client,
goto err_power_off;
#endif
- ret = v4l2_async_register_subdev(sd);
+ ret = v4l2_async_register_subdev_sensor_common(sd);
if (ret) {
dev_err(dev, "v4l2 async register subdev failed\n");
goto err_clean_entity;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 5b9af5e5b7f1..91906b94f978 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -124,12 +124,13 @@
#define DEF_AECH 0x4D
-#define CLKRC_6MHz 0x00
+#define CLKRC_8MHz 0x00
#define CLKRC_12MHz 0x40
#define CLKRC_16MHz 0x80
#define CLKRC_24MHz 0xc0
#define CLKRC_DIV_MASK 0x3f
#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
+#define DEF_CLKRC 0x00
#define COMA_RESET BIT(7)
#define COMA_QCIF BIT(5)
@@ -196,13 +197,33 @@ struct ov6650 {
struct v4l2_clk *clk;
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
- unsigned long pclk_limit; /* from host */
- unsigned long pclk_max; /* from resolution and format */
struct v4l2_fract tpf; /* as requested with s_frame_interval */
u32 code;
- enum v4l2_colorspace colorspace;
};
+struct ov6650_xclk {
+ unsigned long rate;
+ u8 clkrc;
+};
+
+static const struct ov6650_xclk ov6650_xclk[] = {
+{
+ .rate = 8000000,
+ .clkrc = CLKRC_8MHz,
+},
+{
+ .rate = 12000000,
+ .clkrc = CLKRC_12MHz,
+},
+{
+ .rate = 16000000,
+ .clkrc = CLKRC_16MHz,
+},
+{
+ .rate = 24000000,
+ .clkrc = CLKRC_24MHz,
+},
+};
static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
@@ -213,6 +234,17 @@ static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_Y8_1X8,
};
+static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
+ .width = W_CIF,
+ .height = H_CIF,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
/* read a register */
static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -465,38 +497,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect rect = sel->r;
int ret;
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
- &rect.height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
- &rect.top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
- 0);
+ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
+ &sel->r.height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
+ &sel->r.top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
+ 1, 0);
- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
+ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
if (!ret) {
- priv->rect.left = rect.left;
+ priv->rect.width += priv->rect.left - sel->r.left;
+ priv->rect.left = sel->r.left;
ret = ov6650_reg_write(client, REG_HSTOP,
- (rect.left + rect.width) >> 1);
+ (sel->r.left + sel->r.width) >> 1);
}
if (!ret) {
- priv->rect.width = rect.width;
- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
+ priv->rect.width = sel->r.width;
+ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
}
if (!ret) {
- priv->rect.top = rect.top;
+ priv->rect.height += priv->rect.top - sel->r.top;
+ priv->rect.top = sel->r.top;
ret = ov6650_reg_write(client, REG_VSTOP,
- (rect.top + rect.height) >> 1);
+ (sel->r.top + sel->r.height) >> 1);
}
if (!ret)
- priv->rect.height = rect.height;
+ priv->rect.height = sel->r.height;
return ret;
}
@@ -512,12 +545,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- mf->colorspace = priv->colorspace;
- mf->field = V4L2_FIELD_NONE;
+ /* initialize response with default media bus frame format */
+ *mf = ov6650_def_fmt;
+
+ /* update media bus format code and frame size */
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+ } else {
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -526,22 +567,7 @@ static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
return width > rect->width >> 1 || height > rect->height >> 1;
}
-static u8 to_clkrc(struct v4l2_fract *timeperframe,
- unsigned long pclk_limit, unsigned long pclk_max)
-{
- unsigned long pclk;
-
- if (timeperframe->numerator && timeperframe->denominator)
- pclk = pclk_max * timeperframe->denominator /
- (FRAME_RATE_MAX * timeperframe->numerator);
- else
- pclk = pclk_max;
-
- if (pclk_limit && pclk_limit < pclk)
- pclk = pclk_limit;
-
- return (pclk_max - 1) / pclk;
-}
+#define to_clkrc(div) ((div) - 1)
/* set the format we will capture in */
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
@@ -560,8 +586,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
.r.height = mf->height << half_scale,
};
u32 code = mf->code;
- unsigned long mclk, pclk;
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
+ u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
int ret;
/* select color matrix configuration for given color encoding */
@@ -610,58 +635,35 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
return -EINVAL;
}
- priv->code = code;
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
coml_mask = COML_ONE_CHANNEL;
coml_set = 0;
- priv->pclk_max = 4000000;
} else {
coml_mask = 0;
coml_set = COML_ONE_CHANNEL;
- priv->pclk_max = 8000000;
}
- if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
- priv->colorspace = V4L2_COLORSPACE_SRGB;
- else if (code != 0)
- priv->colorspace = V4L2_COLORSPACE_JPEG;
-
if (half_scale) {
dev_dbg(&client->dev, "max resolution: QCIF\n");
coma_set |= COMA_QCIF;
- priv->pclk_max /= 2;
} else {
dev_dbg(&client->dev, "max resolution: CIF\n");
coma_mask |= COMA_QCIF;
}
- priv->half_scale = half_scale;
-
- clkrc = CLKRC_12MHz;
- mclk = 12000000;
- priv->pclk_limit = 1334000;
- dev_dbg(&client->dev, "using 12MHz input clock\n");
-
- clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc);
- dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
- mclk / pclk, 10 * mclk % pclk / pclk);
ret = ov6650_set_selection(sd, NULL, &sel);
if (!ret)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
-
if (!ret) {
- mf->colorspace = priv->colorspace;
- mf->width = priv->rect.width >> half_scale;
- mf->height = priv->rect.height >> half_scale;
+ priv->half_scale = half_scale;
+
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
}
+ if (!ret)
+ priv->code = code;
+
return ret;
}
@@ -680,8 +682,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
&mf->height, 2, H_CIF, 1, 0);
- mf->field = V4L2_FIELD_NONE;
-
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
@@ -691,20 +691,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
/* fall through */
case MEDIA_BUS_FMT_SBGGR8_1X8:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov6650_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ /* store media bus format code and frame size in pad config */
+ cfg->try_fmt.width = mf->width;
+ cfg->try_fmt.height = mf->height;
+ cfg->try_fmt.code = mf->code;
+ /* return default mbus frame format updated with pad config */
+ *mf = ov6650_def_fmt;
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+
+ } else {
+ /* apply new media bus format code and frame size */
+ int ret = ov6650_s_fmt(sd, mf);
+
+ if (ret)
+ return ret;
+
+ /* return default format updated with active size and code */
+ *mf = ov6650_def_fmt;
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -725,9 +744,7 @@ static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- ival->interval.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf,
- priv->pclk_limit, priv->pclk_max));
- ival->interval.denominator = FRAME_RATE_MAX;
+ ival->interval = priv->tpf;
dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
ival->interval.numerator, ival->interval.denominator);
@@ -742,7 +759,6 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
struct ov6650 *priv = to_ov6650(client);
struct v4l2_fract *tpf = &ival->interval;
int div, ret;
- u8 clkrc;
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
@@ -754,19 +770,12 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
- /*
- * Keep result to be used as tpf limit
- * for subsequent clock divider calculations
- */
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
if (!ret) {
- tpf->numerator = GET_CLKRC_DIV(clkrc);
- tpf->denominator = FRAME_RATE_MAX;
+ priv->tpf.numerator = div;
+ priv->tpf.denominator = FRAME_RATE_MAX;
+
+ *tpf = priv->tpf;
}
return ret;
@@ -788,7 +797,7 @@ static int ov6650_reset(struct i2c_client *client)
}
/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client)
+static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
{
int ret;
@@ -796,6 +805,8 @@ static int ov6650_prog_dflt(struct i2c_client *client)
ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+ if (!ret)
ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
return ret;
@@ -805,8 +816,10 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- u8 pidh, pidl, midh, midl;
- int ret;
+ const struct ov6650_xclk *xclk = NULL;
+ unsigned long rate;
+ u8 pidh, pidl, midh, midl;
+ int i, ret = 0;
priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -815,6 +828,33 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
return ret;
}
+ rate = v4l2_clk_get_rate(priv->clk);
+ for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ if (rate != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using host default clock rate %lukHz\n",
+ rate / 1000);
+ break;
+ }
+ for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ ret = v4l2_clk_set_rate(priv->clk, ov6650_xclk[i].rate);
+ if (ret || v4l2_clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
+ xclk->rate / 1000);
+ break;
+ }
+ if (!xclk) {
+ dev_err(&client->dev, "unable to get supported clock rate\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto eclkput;
+ }
+
ret = ov6650_s_power(sd, 1);
if (ret < 0)
goto eclkput;
@@ -848,7 +888,12 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
ret = ov6650_reset(client);
if (!ret)
- ret = ov6650_prog_dflt(client);
+ ret = ov6650_prog_dflt(client, xclk->clkrc);
+ if (!ret) {
+ struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
+
+ ret = ov6650_s_fmt(sd, &mf);
+ }
if (!ret)
ret = v4l2_ctrl_handler_setup(&priv->hdl);
@@ -989,8 +1034,10 @@ static int ov6650_probe(struct i2c_client *client,
V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto ectlhdlfree;
+ }
v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
@@ -1001,15 +1048,18 @@ static int ov6650_probe(struct i2c_client *client,
priv->rect.top = DEF_VSTRT << 1;
priv->rect.width = W_CIF;
priv->rect.height = H_CIF;
- priv->half_scale = false;
- priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
- priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ /* Hardware default frame interval */
+ priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
+ priv->tpf.denominator = FRAME_RATE_MAX;
priv->subdev.internal_ops = &ov6650_internal_ops;
ret = v4l2_async_register_subdev(&priv->subdev);
- if (ret)
- v4l2_ctrl_handler_free(&priv->hdl);
+ if (!ret)
+ return 0;
+ectlhdlfree:
+ v4l2_ctrl_handler_free(&priv->hdl);
return ret;
}
@@ -1041,6 +1091,6 @@ static struct i2c_driver ov6650_i2c_driver = {
module_i2c_driver(ov6650_i2c_driver);
-MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650");
-MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
+MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h
index 44fabe08234d..4b5f6985710b 100644
--- a/drivers/media/i2c/saa711x_regs.h
+++ b/drivers/media/i2c/saa711x_regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
* saa711x - Philips SAA711x video decoder register specifications
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 9adf8e034e7d..84f9771b5fed 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -682,66 +682,6 @@ static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
return 0;
}
-static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- static u32 const limits[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN,
- };
- static u32 const limits_replace[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN,
- };
- unsigned int i;
- int rval;
-
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY] ==
- SMIAPP_BINNING_CAPABILITY_NO) {
- for (i = 0; i < ARRAY_SIZE(limits); i++)
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
-
- return 0;
- }
-
- rval = smiapp_get_limits(sensor, limits, ARRAY_SIZE(limits));
- if (rval < 0)
- return rval;
-
- /*
- * Sanity check whether the binning limits are valid. If not,
- * use the non-binning ones.
- */
- if (sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN])
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(limits); i++) {
- dev_dbg(&client->dev,
- "replace limit 0x%8.8x \"%s\" = %d, 0x%x\n",
- smiapp_reg_limits[limits[i]].addr,
- smiapp_reg_limits[limits[i]].what,
- sensor->limits[limits_replace[i]],
- sensor->limits[limits_replace[i]]);
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
- }
-
- return 0;
-}
-
static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -891,60 +831,47 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor)
{
struct v4l2_ctrl *vblank = sensor->vblank;
struct v4l2_ctrl *hblank = sensor->hblank;
+ uint16_t min_fll, max_fll, min_llp, max_llp, min_lbp;
int min, max;
+ if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN];
+ } else {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK];
+ }
+
min = max_t(int,
sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ min_fll -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
- max = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
+ max = max_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
__v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
min = max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ min_llp -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
- max = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
+ min_lbp);
+ max = max_llp - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
__v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
__smiapp_update_exposure_limits(sensor);
}
-static int smiapp_update_mode(struct smiapp_sensor *sensor)
+static int smiapp_pll_blanking_update(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned int binning_mode;
int rval;
- /* Binning has to be set up here; it affects limits */
- if (sensor->binning_horizontal == 1 &&
- sensor->binning_vertical == 1) {
- binning_mode = 0;
- } else {
- u8 binning_type =
- (sensor->binning_horizontal << 4)
- | sensor->binning_vertical;
-
- rval = smiapp_write(
- sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
- if (rval < 0)
- return rval;
-
- binning_mode = 1;
- }
- rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
- if (rval < 0)
- return rval;
-
- /* Get updated limits due to binning */
- rval = smiapp_get_limits_binning(sensor);
- if (rval < 0)
- return rval;
-
rval = smiapp_pll_update(sensor);
if (rval < 0)
return rval;
@@ -970,62 +897,91 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
* SMIA++ NVM handling
*
*/
-static int smiapp_read_nvm(struct smiapp_sensor *sensor,
- unsigned char *nvm)
+
+static int smiapp_read_nvm_page(struct smiapp_sensor *sensor, u32 p, u8 *nvm,
+ u8 *status)
{
- u32 i, s, p, np, v;
- int rval = 0, rval2;
+ unsigned int i;
+ int rval;
+ u32 s;
- np = sensor->nvm_size / SMIAPP_NVM_PAGE_SIZE;
- for (p = 0; p < np; p++) {
- rval = smiapp_write(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
- if (rval)
- goto out;
+ *status = 0;
- rval = smiapp_write(sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN |
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN);
- if (rval)
- goto out;
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
+ if (rval)
+ return rval;
- for (i = 1000; i > 0; i--) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN);
+ if (rval)
+ return rval;
- if (rval)
- goto out;
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
+ if (rval)
+ return rval;
+
+ if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE) {
+ *status = s;
+ return -ENODATA;
+ }
+ if (sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL) {
+ for (i = 1000; i > 0; i--) {
if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
break;
- }
- if (!i) {
- rval = -ETIMEDOUT;
- goto out;
- }
-
- for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
rval = smiapp_read(
sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
- &v);
- if (rval)
- goto out;
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
- *nvm++ = v;
+ if (rval)
+ return rval;
}
+
+ if (!i)
+ return -ETIMEDOUT;
}
-out:
+ for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
+ u32 v;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
+ &v);
+ if (rval)
+ return rval;
+
+ *nvm++ = v;
+ }
+
+ return 0;
+}
+
+static int smiapp_read_nvm(struct smiapp_sensor *sensor, unsigned char *nvm,
+ size_t nvm_size)
+{
+ u8 status = 0;
+ u32 p;
+ int rval = 0, rval2;
+
+ for (p = 0; p < nvm_size / SMIAPP_NVM_PAGE_SIZE && !rval; p++) {
+ rval = smiapp_read_nvm_page(sensor, p, nvm, &status);
+ nvm += SMIAPP_NVM_PAGE_SIZE;
+ }
+
+ if (rval == -ENODATA &&
+ status & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE)
+ rval = 0;
+
rval2 = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL, 0);
if (rval < 0)
return rval;
else
- return rval2;
+ return rval2 ?: p * SMIAPP_NVM_PAGE_SIZE;
}
/*
@@ -1324,10 +1280,6 @@ static int smiapp_power_on(struct device *dev)
rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
if (rval)
goto out_cci_addr_fail;
-
- rval = smiapp_update_mode(sensor);
- if (rval < 0)
- goto out_cci_addr_fail;
}
mutex_unlock(&sensor->mutex);
@@ -1387,6 +1339,7 @@ static int smiapp_power_off(struct device *dev)
static int smiapp_start_streaming(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int binning_mode;
int rval;
mutex_lock(&sensor->mutex);
@@ -1397,6 +1350,27 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if (rval)
goto out;
+ /* Binning configuration */
+ if (sensor->binning_horizontal == 1 &&
+ sensor->binning_vertical == 1) {
+ binning_mode = 0;
+ } else {
+ u8 binning_type =
+ (sensor->binning_horizontal << 4)
+ | sensor->binning_vertical;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
+ if (rval < 0)
+ goto out;
+
+ binning_mode = 1;
+ }
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
+ if (rval < 0)
+ goto out;
+
+ /* Set up PLL */
rval = smiapp_pll_configure(sensor);
if (rval)
goto out;
@@ -2073,7 +2047,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return smiapp_update_mode(sensor);
+ return smiapp_pll_blanking_update(sensor);
return 0;
}
@@ -2312,41 +2286,34 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- unsigned int nbytes;
+ int rval;
if (!sensor->dev_init_done)
return -EBUSY;
- if (!sensor->nvm_size) {
- int rval;
-
- /* NVM not read yet - read it now */
- sensor->nvm_size = sensor->hwcfg->nvm_size;
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ return -ENODEV;
+ }
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(&client->dev);
- pm_runtime_put(&client->dev);
- return -ENODEV;
- }
+ rval = smiapp_read_nvm(sensor, buf, PAGE_SIZE);
+ if (rval < 0) {
+ pm_runtime_put(&client->dev);
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
- if (smiapp_read_nvm(sensor, sensor->nvm)) {
- dev_err(&client->dev, "nvm read failed\n");
- return -ENODEV;
- }
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- pm_runtime_mark_last_busy(&client->dev);
- pm_runtime_put_autosuspend(&client->dev);
- }
/*
* NVM is still way below a PAGE_SIZE, so we can safely
* assume this for now.
*/
- nbytes = min_t(unsigned int, sensor->nvm_size, PAGE_SIZE);
- memcpy(buf, sensor->nvm, nbytes);
-
- return nbytes;
+ return rval;
}
static DEVICE_ATTR(nvm, S_IRUGO, smiapp_sysfs_nvm_read, NULL);
@@ -2810,16 +2777,13 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
}
}
- /* NVM size is not mandatory */
- fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
-
rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
if (rval)
dev_info(dev, "can't get clock-frequency\n");
- dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
- hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
+ dev_dbg(dev, "clk %d, mode %d\n", hwcfg->ext_clk,
+ hwcfg->csi_signalling_mode);
if (!bus_cfg.nr_of_link_frequencies) {
dev_warn(dev, "no link frequencies defined\n");
@@ -2862,7 +2826,6 @@ static int smiapp_probe(struct i2c_client *client)
return -ENOMEM;
sensor->hwcfg = hwcfg;
- mutex_init(&sensor->mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
@@ -2926,6 +2889,8 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
return rval;
+ mutex_init(&sensor->mutex);
+
rval = smiapp_identify_module(sensor);
if (rval) {
rval = -ENODEV;
@@ -3003,17 +2968,10 @@ static int smiapp_probe(struct i2c_client *client)
rval = -ENOENT;
goto out_power_off;
}
- /* SMIA++ NVM initialization - it will be read from the sensor
- * when it is first requested by userspace.
- */
- if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
- sensor->nvm = devm_kzalloc(&client->dev,
- sensor->hwcfg->nvm_size, GFP_KERNEL);
- if (sensor->nvm == NULL) {
- rval = -ENOMEM;
- goto out_cleanup;
- }
+ if (sensor->minfo.smiapp_version &&
+ sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED) {
if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
dev_err(&client->dev, "sysfs nvm entry failed\n");
rval = -EBUSY;
@@ -3086,7 +3044,7 @@ static int smiapp_probe(struct i2c_client *client)
}
mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
+ rval = smiapp_pll_blanking_update(sensor);
mutex_unlock(&sensor->mutex);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -3101,19 +3059,23 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
- if (rval < 0)
- goto out_media_entity_cleanup;
-
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_enable(&client->dev);
+
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
+ if (rval < 0)
+ goto out_disable_runtime_pm;
+
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
+out_disable_runtime_pm:
+ pm_runtime_disable(&client->dev);
+
out_media_entity_cleanup:
media_entity_cleanup(&sensor->src->sd.entity);
@@ -3122,6 +3084,7 @@ out_cleanup:
out_power_off:
smiapp_power_off(&client->dev);
+ mutex_destroy(&sensor->mutex);
return rval;
}
@@ -3144,6 +3107,7 @@ static int smiapp_remove(struct i2c_client *client)
media_entity_cleanup(&sensor->ssds[i].sd.entity);
}
smiapp_cleanup(sensor);
+ mutex_destroy(&sensor->mutex);
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 2804a4d9a4e1..43505cd0616e 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -11,25 +11,29 @@
#ifndef __SMIAPP_REG_H_
#define __SMIAPP_REG_H_
+#include <linux/bits.h>
+
#include "smiapp-reg-defs.h"
/* Bits for above register */
-#define SMIAPP_IMAGE_ORIENTATION_HFLIP (1 << 0)
-#define SMIAPP_IMAGE_ORIENTATION_VFLIP (1 << 1)
-
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN (0 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE (1 << 3)
-
-#define SMIAPP_SOFTWARE_RESET (1 << 0)
-
-#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE (1 << 0)
-#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE (1 << 1)
+#define SMIAPP_IMAGE_ORIENTATION_HFLIP BIT(0)
+#define SMIAPP_IMAGE_ORIENTATION_VFLIP BIT(1)
+
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE BIT(3)
+
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL BIT(2)
+
+#define SMIAPP_SOFTWARE_RESET BIT(0)
+
+#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0)
+#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE BIT(1)
#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index ecf8a17dbe37..3ab874a5deba 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -208,9 +208,6 @@ struct smiapp_sensor {
bool dev_init_done;
u8 compressed_min_bpp;
- u8 *nvm; /* nvm memory buffer */
- unsigned int nvm_size; /* bytes */
-
struct smiapp_module_info minfo;
struct smiapp_pll pll;
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 81285b8d5cfb..003ba22334cd 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -971,6 +971,11 @@ static int mipid02_probe(struct i2c_client *client)
bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
+ if (IS_ERR(bridge->reset_gpio)) {
+ dev_err(dev, "failed to get reset GPIO\n");
+ return PTR_ERR(bridge->reset_gpio);
+ }
+
ret = mipid02_get_regulators(bridge);
if (ret) {
dev_err(dev, "failed to get regulators %d", ret);
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index ecf87534613b..d9b3daada07d 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Gateworks Corporation
*/
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 9088186c24d1..f716129adf09 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
*
* tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
*
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 39f66e7a0e42..8be03fe5928c 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -375,7 +375,7 @@ static int vpx3220_s_routing(struct v4l2_subdev *sd,
input = 1: COMPOSITE input
input = 2: SVHS input */
- const int input_vals[3][2] = {
+ static const int input_vals[3][2] = {
{0x0c, 0},
{0x0d, 0},
{0x0e, 1}
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index e19df5165e78..da8088351135 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode)
dev_dbg(devnode->parent, "Media device released\n");
}
+static void __media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_link *link, *tmp;
+ struct media_interface *intf;
+ unsigned int i;
+
+ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+
+ /* Remove all interface links pointing to this entity */
+ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+ list_for_each_entry_safe(link, tmp, &intf->links, list) {
+ if (link->entity == entity)
+ __media_remove_intf_link(link);
+ }
+ }
+
+ /* Remove all data links that belong to this entity */
+ __media_entity_remove_links(entity);
+
+ /* Remove all pads that belong to this entity */
+ for (i = 0; i < entity->num_pads; i++)
+ media_gobj_destroy(&entity->pads[i].graph_obj);
+
+ /* Remove the entity */
+ media_gobj_destroy(&entity->graph_obj);
+
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
+ entity->graph_obj.mdev = NULL;
+}
+
/**
* media_device_register_entity - Register an entity with a media device
* @mdev: The media device
@@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
*/
ret = media_graph_walk_init(&new, mdev);
if (ret) {
+ __media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
-static void __media_device_unregister_entity(struct media_entity *entity)
-{
- struct media_device *mdev = entity->graph_obj.mdev;
- struct media_link *link, *tmp;
- struct media_interface *intf;
- unsigned int i;
-
- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
-
- /* Remove all interface links pointing to this entity */
- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
- list_for_each_entry_safe(link, tmp, &intf->links, list) {
- if (link->entity == entity)
- __media_remove_intf_link(link);
- }
- }
-
- /* Remove all data links that belong to this entity */
- __media_entity_remove_links(entity);
-
- /* Remove all pads that belong to this entity */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_destroy(&entity->pads[i].graph_obj);
-
- /* Remove the entity */
- media_gobj_destroy(&entity->graph_obj);
-
- /* invoke entity_notify callbacks to handle entity removal?? */
-
- entity->graph_obj.mdev = NULL;
-}
-
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 85f3e7307538..fa57e12f2ac8 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -664,7 +664,7 @@ static int _cx18_process_idx_data(struct cx18_buffer *buf,
struct cx18_enc_idx_entry *e_buf;
/* Frame type lookup: 1=I, 2=P, 4=B */
- const int mapping[8] = {
+ static const int mapping[8] = {
-1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P,
-1, V4L2_ENC_IDX_FRAME_B, -1, -1, -1
};
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index e880afe37f15..d59ca3601785 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
+ GFP_KERNEL)) {
+ kfree(state);
return -ENOMEM;
+ }
state->dev = dev;
sd = &state->sd;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 3cd87626cd79..9fa388626bae 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1781,6 +1781,41 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_NOTONLYTV_LV3H] = {
+ .name = "NotOnlyTV LV3H",
+ .tuner_type = TUNER_XC2028,
+ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
+ /* if gpio1:bit9 is enabled, DVB-T won't work */
+
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO] = {
.name = "DViCO FusionHDTV DVB-T PRO",
.tuner_type = TUNER_XC2028,
@@ -2654,6 +2689,7 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6f18,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
+ /* Also NotOnlyTV LV3H (version 1.11 is silkscreened on the board) */
.subvendor = 0x14f1,
.subdevice = 0x8852,
.card = CX88_BOARD_GENIATECH_X8000_MT,
@@ -3121,6 +3157,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
@@ -3322,6 +3359,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
udelay(1000);
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
@@ -3378,6 +3416,11 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
*/
ctl->disable_power_mgmt = 1;
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ ctl->read_not_reliable = 1;
+ break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 0292d0947cc7..202ff9e8c257 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1378,6 +1378,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index dcc0f02aeb70..b8abcd550604 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1277,7 +1277,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core = cx88_core_get(dev->pci);
if (!core) {
err = -EINVAL;
- goto fail_free;
+ goto fail_disable;
}
dev->core = core;
@@ -1323,7 +1323,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->audio_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
}
@@ -1337,7 +1337,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->video_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
if (vc->id == V4L2_CID_CHROMA_AGC)
@@ -1509,11 +1509,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
fail_unreg:
cx8800_unregister_video(dev);
- free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
+fail_irq:
+ free_irq(pci_dev->irq, dev);
fail_core:
core->v4ldev = NULL;
cx88_core_put(core, dev->pci);
+fail_disable:
+ pci_disable_device(pci_dev);
fail_free:
kfree(dev);
return err;
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 744a22328ebc..ce4acf6de6aa 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -228,6 +228,7 @@ extern const struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36 89
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43 90
+#define CX88_BOARD_NOTONLYTV_LV3H 91
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index bb3a8cc9de0c..9dce31d2b525 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 6d22c0107d33..80478b026d75 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -325,7 +325,7 @@ static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
- struct v4l2_decode_vbi_line vbi;
+ struct v4l2_decode_vbi_line vbi = {};
int i;
unsigned lines = 0;
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 67aebe759232..c0bd5d7e148b 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -60,10 +60,8 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(!mantis)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index deadd0b92233..906e4500d87d 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -69,10 +69,8 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 9ae04e18e6c6..126d085be9a7 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -13,12 +13,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <asm/div64.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
#include "saa7164.h"
MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards");
@@ -1045,92 +1043,138 @@ static void saa7164_dev_unregister(struct saa7164_dev *dev)
return;
}
-#ifdef CONFIG_PROC_FS
-static int saa7164_proc_show(struct seq_file *m, void *v)
+#ifdef CONFIG_DEBUG_FS
+static void *saa7164_seq_start(struct seq_file *s, loff_t *pos)
{
struct saa7164_dev *dev;
- struct tmComResBusInfo *b;
- struct list_head *list;
- int i, c;
+ loff_t index = *pos;
- if (saa7164_devcount == 0)
- return 0;
+ mutex_lock(&devlist);
+ list_for_each_entry(dev, &saa7164_devlist, devlist) {
+ if (index-- == 0) {
+ mutex_unlock(&devlist);
+ return dev;
+ }
+ }
+ mutex_unlock(&devlist);
- list_for_each(list, &saa7164_devlist) {
- dev = list_entry(list, struct saa7164_dev, devlist);
- seq_printf(m, "%s = %p\n", dev->name, dev);
+ return NULL;
+}
- /* Lock the bus from any other access */
- b = &dev->bus;
- mutex_lock(&b->lock);
+static void *saa7164_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct saa7164_dev *dev = v;
+ void *ret;
- seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
- b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+ mutex_lock(&devlist);
+ if (list_is_last(&dev->devlist, &saa7164_devlist))
+ ret = NULL;
+ else
+ ret = list_next_entry(dev, devlist);
+ mutex_unlock(&devlist);
- seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
- b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+ ++*pos;
- seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
- b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+ return ret;
+}
- seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
- b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
- c = 0;
- seq_printf(m, "\n Set Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeSetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+static void saa7164_seq_stop(struct seq_file *s, void *v)
+{
+}
- seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+static int saa7164_seq_show(struct seq_file *m, void *v)
+{
+ struct saa7164_dev *dev = v;
+ struct tmComResBusInfo *b;
+ int i, c;
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
- }
+ seq_printf(m, "%s = %p\n", dev->name, dev);
- c = 0;
- seq_printf(m, "\n Get Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeGetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+ /* Lock the bus from any other access */
+ b = &dev->bus;
+ mutex_lock(&b->lock);
- seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+ seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
+ seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+ c = 0;
+ seq_puts(m, "\n Set Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeSetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
}
+ }
- mutex_unlock(&b->lock);
+ c = 0;
+ seq_puts(m, "\n Get Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeGetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+ seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
+ }
}
+ mutex_unlock(&b->lock);
+
return 0;
}
-static struct proc_dir_entry *saa7164_pe;
+static const struct seq_operations saa7164_seq_ops = {
+ .start = saa7164_seq_start,
+ .next = saa7164_seq_next,
+ .stop = saa7164_seq_stop,
+ .show = saa7164_seq_show,
+};
-static int saa7164_proc_create(void)
+static int saa7164_open(struct inode *inode, struct file *file)
{
- saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
- if (!saa7164_pe)
- return -ENOMEM;
+ return seq_open(file, &saa7164_seq_ops);
+}
- return 0;
+static const struct file_operations saa7164_operations = {
+ .owner = THIS_MODULE,
+ .open = saa7164_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct dentry *saa7614_dentry;
+
+static void __init saa7164_debugfs_create(void)
+{
+ saa7614_dentry = debugfs_create_file("saa7164", 0444, NULL, NULL,
+ &saa7164_operations);
}
-static void saa7164_proc_destroy(void)
+static void __exit saa7164_debugfs_remove(void)
{
- if (saa7164_pe)
- remove_proc_entry("saa7164", NULL);
+ debugfs_remove(saa7614_dentry);
}
#else
-static int saa7164_proc_create(void) { return 0; }
-static void saa7164_proc_destroy(void) {}
+static void saa7164_debugfs_create(void) { }
+static void saa7164_debugfs_remove(void) { }
#endif
static int saa7164_thread_function(void *data)
@@ -1507,7 +1551,7 @@ static int __init saa7164_init(void)
if (ret)
return ret;
- saa7164_proc_create();
+ saa7164_debugfs_create();
pr_info("saa7164 driver loaded\n");
@@ -1516,7 +1560,7 @@ static int __init saa7164_init(void)
static void __exit saa7164_fini(void)
{
- saa7164_proc_destroy();
+ saa7164_debugfs_remove();
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 65bc7e29450b..2b5e0154814c 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 30c8f2ec9c3c..eaa57d835ea8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -353,7 +353,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
G723_PERIOD_BYTES * PERIODS,
G723_PERIOD_BYTES * PERIODS);
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 40373bd23381..7786e51d19ae 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -300,7 +300,7 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(dev->pci_dev),
+ &dev->pci_dev->dev,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX);
return 0;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f1f61419fd29..e84f35d3a68e 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -483,6 +483,7 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
@@ -493,6 +494,19 @@ config VIDEO_QCOM_VENUS
on various Qualcomm SoCs.
To compile this driver as a module choose m here.
+config VIDEO_SUN8I_DEINTERLACE
+ tristate "Allwinner Deinterlace driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner deinterlace unit with scaling
+ capability found on some SoCs, like H3.
+ To compile this driver as a module choose m here.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
@@ -585,9 +599,10 @@ config VIDEO_MESON_G12A_AO_CEC
config CEC_GPIO
tristate "Generic GPIO-based CEC driver"
- depends on PREEMPT || COMPILE_TEST
+ depends on PREEMPTION || COMPILE_TEST
select CEC_CORE
select CEC_PIN
+ select CEC_NOTIFIER
select GPIOLIB
help
This is a generic GPIO-based CEC driver.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 6ee7eb0d36f4..d13db96e3015 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
-obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
-
-obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/
+obj-y += ti-vpe/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 2b42ba1f5949..09104304bd06 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI VPFE capture Driver
*
@@ -5,19 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/delay.h>
@@ -69,125 +57,64 @@ static const struct vpfe_standard vpfe_standards[] = {
{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
};
-struct bus_format {
- unsigned int width;
- unsigned int bpp;
-};
-
-/*
- * struct vpfe_fmt - VPFE media bus format information
- * @code: V4L2 media bus format code
- * @shifted: V4L2 media bus format code for the same pixel layout but
- * shifted to be 8 bits per pixel. =0 if format is not shiftable.
- * @pixelformat: V4L2 pixel format FCC identifier
- * @width: Bits per pixel (when transferred over a bus)
- * @bpp: Bytes per pixel (when stored in memory)
- * @supported: Indicates format supported by subdev
- */
-struct vpfe_fmt {
- u32 fourcc;
- u32 code;
- struct bus_format l;
- struct bus_format s;
- bool supported;
- u32 index;
-};
-
-static struct vpfe_fmt formats[] = {
+static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
.code = MEDIA_BUS_FMT_YVYU8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
.code = MEDIA_BUS_FMT_VYUY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X,
.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
},
};
-static int
-__vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp);
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt);
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f);
-static struct vpfe_fmt *find_format_by_code(unsigned int code)
+static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe,
+ unsigned int code)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->code == code)
return fmt;
}
@@ -195,13 +122,14 @@ static struct vpfe_fmt *find_format_by_code(unsigned int code)
return NULL;
}
-static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe,
+ unsigned int pixelformat)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->fourcc == pixelformat)
return fmt;
}
@@ -209,48 +137,18 @@ static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
return NULL;
}
-static void
-mbus_to_pix(struct vpfe_device *vpfe,
- const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix, unsigned int *bpp)
+static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt)
{
struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
unsigned int bus_width = sdinfo->vpfe_param.bus_width;
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_code(mbus->code);
- if (WARN_ON(fmt == NULL)) {
- pr_err("Invalid mbus code set\n");
- *bpp = 1;
- return;
- }
-
- memset(pix, 0, sizeof(*pix));
- v4l2_fill_pix_format(pix, mbus);
- pix->pixelformat = fmt->fourcc;
- *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+ u32 bpp, bus_width_bytes, clocksperpixel;
- /* pitch should be 32 bytes aligned */
- pix->bytesperline = ALIGN(pix->width * *bpp, 32);
- pix->sizeimage = pix->bytesperline * pix->height;
-}
-
-static void pix_to_mbus(struct vpfe_device *vpfe,
- struct v4l2_pix_format *pix_fmt,
- struct v4l2_mbus_framefmt *mbus_fmt)
-{
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_pix(pix_fmt->pixelformat);
- if (!fmt) {
- /* default to first entry */
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- pix_fmt->pixelformat);
- fmt = &formats[0];
- }
+ bus_width_bytes = ALIGN(bus_width, 8) >> 3;
+ clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width);
+ bpp = clocksperpixel * bus_width_bytes;
- memset(mbus_fmt, 0, sizeof(*mbus_fmt));
- v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+ return bpp;
}
/* Print Four-character-code (FOURCC) */
@@ -267,20 +165,6 @@ static char *print_fourcc(u32 fmt)
return code;
}
-static int
-cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
-{
- return lhs->type == rhs->type &&
- lhs->fmt.pix.width == rhs->fmt.pix.width &&
- lhs->fmt.pix.height == rhs->fmt.pix.height &&
- lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
- lhs->fmt.pix.field == rhs->fmt.pix.field &&
- lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
- lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
- lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
- lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
-}
-
static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
{
return ioread32(ccdc->ccdc_cfg.base_addr + offset);
@@ -345,13 +229,9 @@ static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
vert_nr_lines = (image_win->height >> 1) - 1;
vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
/* configure VDINT0 */
val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
} else {
- /* Since first line doesn't have any data */
- vert_start += 1;
vert_nr_lines = image_win->height - 1;
/*
* configure VDINT0 and VDINT1. VDINT1 will be at half
@@ -405,7 +285,6 @@ vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
- ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
max_gamma > max_data) {
vpfe_dbg(1, vpfe, "Invalid data line select\n");
return -EINVAL;
@@ -445,40 +324,25 @@ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
{
- int dma_cntl, i, pcr;
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ u32 dma_cntl, pcr;
- /* If the CCDC module is still busy wait for it to be done */
- for (i = 0; i < 10; i++) {
- usleep_range(5000, 6000);
- pcr = vpfe_reg_read(ccdc, VPFE_PCR);
- if (!pcr)
- break;
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (pcr)
+ vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr);
- /* make sure it it is disabled */
- vpfe_pcr_enable(ccdc, 0);
- }
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)",
+ dma_cntl);
/* Disable CCDC by resetting all register to default POR values */
vpfe_ccdc_restore_defaults(ccdc);
- /* if DMA_CNTL overflow bit is set. Clear it
- * It appears to take a while for this to become quiescent ~20ms
- */
- for (i = 0; i < 10; i++) {
- dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
- if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
- break;
-
- /* Clear the overflow bit */
- vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
- usleep_range(5000, 6000);
- }
-
/* Disabled the module at the CONFIG level */
vpfe_config_enable(ccdc, 0);
pm_runtime_put_sync(dev);
-
return 0;
}
@@ -494,8 +358,8 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
x = copy_from_user(&raw_params, params, sizeof(raw_params));
if (x) {
vpfe_dbg(1, vpfe,
- "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
- x);
+ "%s: error in copying ccdc params, %d\n",
+ __func__, x);
return -EFAULT;
}
@@ -513,11 +377,9 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
*/
static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
u32 syn_mode;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
/*
* first restore the CCDC registers to default values
* This is important since we assume default values to be set in
@@ -649,8 +511,6 @@ static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
unsigned int syn_mode;
unsigned int val;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
-
/* Reset CCDC */
vpfe_ccdc_restore_defaults(ccdc);
@@ -751,8 +611,8 @@ static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
{
struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
- vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
- ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+ vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
+ __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
@@ -1036,10 +896,9 @@ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
{
enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ u32 bpp;
int ret = 0;
- vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
-
vpfe_dbg(1, vpfe, "pixelformat: %s\n",
print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
@@ -1050,7 +909,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
}
/* configure the image window */
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp);
switch (vpfe->fmt.fmt.pix.field) {
case V4L2_FIELD_INTERLACED:
@@ -1094,7 +954,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
static int vpfe_config_image_format(struct vpfe_device *vpfe,
v4l2_std_id std_id)
{
- struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ struct vpfe_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
int i, ret;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
@@ -1116,26 +977,29 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe,
return -EINVAL;
}
- vpfe->crop.top = vpfe->crop.left = 0;
- vpfe->crop.width = vpfe->std_info.active_pixels;
- vpfe->crop.height = vpfe->std_info.active_lines;
- pix->width = vpfe->crop.width;
- pix->height = vpfe->crop.height;
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
-
- /* first field and frame format based on standard frame format */
- if (vpfe->std_info.frame_format)
- pix->field = V4L2_FIELD_INTERLACED;
- else
- pix->field = V4L2_FIELD_NONE;
-
- ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ ret = __subdev_get_format(vpfe, &mbus_fmt);
if (ret)
return ret;
+ fmt = find_format_by_code(vpfe, mbus_fmt.code);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current subdev format */
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc;
+ vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt);
+ vpfe->current_vpfe_fmt = fmt;
+
/* Update the crop window based on found values */
- vpfe->crop.width = pix->width;
- vpfe->crop.height = pix->height;
+ vpfe->crop.top = 0;
+ vpfe->crop.left = 0;
+ vpfe->crop.width = mbus_fmt.width;
+ vpfe->crop.height = mbus_fmt.height;
return vpfe_config_ccdc_image_format(vpfe);
}
@@ -1237,22 +1101,29 @@ unlock:
* This function will get next buffer from the dma queue and
* set the buffer address in the vpfe register for capture.
* the buffer is marked active
- *
- * Assumes caller is holding vpfe->dma_queue_lock already
*/
-static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
{
+ dma_addr_t addr;
+
+ spin_lock(&vpfe->dma_queue_lock);
+ if (list_empty(&vpfe->dma_queue)) {
+ spin_unlock(&vpfe->dma_queue_lock);
+ return;
+ }
+
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
struct vpfe_cap_buffer, list);
list_del(&vpfe->next_frm->list);
+ spin_unlock(&vpfe->dma_queue_lock);
- vpfe_set_sdr_addr(&vpfe->ccdc,
- vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
}
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
{
- unsigned long addr;
+ dma_addr_t addr;
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
vpfe->field_off;
@@ -1277,6 +1148,58 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
vpfe->cur_frm = vpfe->next_frm;
}
+static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
+ enum v4l2_field field)
+{
+ int fid;
+
+ /* interlaced or TB capture check which field
+ * we are in hardware
+ */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+
+ if (vpfe->stopping)
+ return;
+
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+ } else {
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ if (vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ }
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+}
+
/*
* vpfe_isr : ISR handler for vpfe capture (VINT0)
* @irq: irq number
@@ -1288,76 +1211,28 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
static irqreturn_t vpfe_isr(int irq, void *dev)
{
struct vpfe_device *vpfe = (struct vpfe_device *)dev;
- enum v4l2_field field;
- int intr_status;
- int fid;
+ enum v4l2_field field = vpfe->fmt.fmt.pix.field;
+ int intr_status, stopping = vpfe->stopping;
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
if (intr_status & VPFE_VDINT0) {
- field = vpfe->fmt.fmt.pix.field;
-
if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe);
- goto next_intr;
+ } else {
+ vpfe_handle_interlaced_irq(vpfe, field);
}
-
- /* interlaced or TB capture check which field
- we are in hardware */
- fid = vpfe_ccdc_getfid(&vpfe->ccdc);
-
- /* switch the software maintained field id */
- vpfe->field ^= 1;
- if (fid == vpfe->field) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the
- * next frame is available, release the
- * current frame and move on
- */
- if (vpfe->cur_frm != vpfe->next_frm)
- vpfe_process_buffer_complete(vpfe);
- /*
- * based on whether the two fields are stored
- * interleave or separately in memory,
- * reconfigure the CCDC memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_schedule_bottom_field(vpfe);
-
- goto next_intr;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the empty
- * queue if no frame is available hold on to the
- * current buffer
- */
- spin_lock(&vpfe->dma_queue_lock);
- if (!list_empty(&vpfe->dma_queue) &&
- vpfe->cur_frm == vpfe->next_frm)
- vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- vpfe->field = fid;
+ if (stopping) {
+ vpfe->stopping = false;
+ complete(&vpfe->capture_stop);
}
}
-next_intr:
- if (intr_status & VPFE_VDINT1) {
- spin_lock(&vpfe->dma_queue_lock);
- if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
- !list_empty(&vpfe->dma_queue) &&
+ if (intr_status & VPFE_VDINT1 && !stopping) {
+ if (field == V4L2_FIELD_NONE &&
vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
}
vpfe_clear_intr(&vpfe->ccdc, intr_status);
@@ -1394,8 +1269,6 @@ static int vpfe_querycap(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_querycap\n");
-
strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
@@ -1404,83 +1277,74 @@ static int vpfe_querycap(struct file *file, void *priv,
}
/* get the format set at output pad of the adjacent subdev */
-static int __vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct v4l2_mbus_framefmt mbus_fmt;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
- ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
return ret;
- if (!ret) {
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
- } else {
- ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
- sdinfo->grp_id,
- pad, get_fmt,
- NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
- v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
- mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
- }
-
- format->type = vpfe->fmt.type;
+ *fmt = *mbus_fmt;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
return 0;
}
/* set the format at output pad of the adjacent subdev */
-static int __vpfe_set_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_set_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
- pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+ return 0;
+}
- ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
- if (ret)
- return ret;
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f)
+{
+ u32 bpp;
+
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n");
+ return -EINVAL;
+ }
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ bpp = __get_bytesperpixel(vpfe, fmt);
- format->type = vpfe->fmt.type;
+ /* pitch should be 32 bytes aligned */
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, print_fourcc(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
return 0;
}
@@ -1490,8 +1354,6 @@ static int vpfe_g_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
-
*fmt = vpfe->fmt;
return 0;
@@ -1502,82 +1364,124 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- struct vpfe_fmt *fmt = NULL;
- unsigned int k;
-
- vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
- f->index);
+ struct vpfe_fmt *fmt;
sdinfo = vpfe->current_subdev;
if (!sdinfo->sd)
return -EINVAL;
- if (f->index > ARRAY_SIZE(formats))
+ if (f->index >= vpfe->num_active_fmt)
return -EINVAL;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- if (formats[k].index == f->index) {
- fmt = &formats[k];
- break;
- }
- }
- if (!fmt)
- return -EINVAL;
+ fmt = vpfe->active_fmt[f->index];
f->pixelformat = fmt->fourcc;
- vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s\n",
- f->index, fmt->code, print_fourcc(fmt->fourcc));
+ vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n",
+ __func__, f->index, fmt->code, print_fourcc(fmt->fourcc));
return 0;
}
static int vpfe_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
+ struct v4l2_format *f)
{
struct vpfe_device *vpfe = video_drvdata(file);
- unsigned int bpp;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ const struct vpfe_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret, found;
+
+ fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ f->fmt.pix.pixelformat);
+ fmt = vpfe->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = vpfe->fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if (f->fmt.pix.width == fse.max_width &&
+ f->fmt.pix.height == fse.max_height) {
+ found = true;
+ break;
+ } else if (f->fmt.pix.width >= fse.min_width &&
+ f->fmt.pix.width <= fse.max_width &&
+ f->fmt.pix.height >= fse.min_height &&
+ f->fmt.pix.height <= fse.max_height) {
+ found = true;
+ break;
+ }
+ }
- vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = vpfe->fmt.fmt.pix.width;
+ f->fmt.pix.height = vpfe->fmt.fmt.pix.height;
+ }
- return __vpfe_get_format(vpfe, fmt, &bpp);
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace;
+ return vpfe_calc_format_size(vpfe, fmt, f);
}
static int vpfe_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vpfe_device *vpfe = video_drvdata(file);
- struct v4l2_format format;
- unsigned int bpp;
+ struct vpfe_fmt *f;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
}
- ret = __vpfe_get_format(vpfe, &format, &bpp);
- if (ret)
+ ret = vpfe_try_fmt(file, priv, fmt);
+ if (ret < 0)
return ret;
+ f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat);
- if (!cmp_v4l2_format(fmt, &format)) {
- /* Sensor format is different from the requested format
- * so we need to change it
- */
- ret = __vpfe_set_format(vpfe, fmt, &bpp);
- if (ret)
- return ret;
- } else /* Just make sure all of the fields are consistent */
- *fmt = format;
+ v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code);
- /* First detach any IRQ if currently attached */
- vpfe_detach_irq(vpfe);
- vpfe->fmt = *fmt;
- vpfe->bpp = bpp;
+ ret = __subdev_set_format(vpfe, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (mbus_fmt.code != f->code) {
+ vpfe_dbg(3, vpfe,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = f->fourcc;
+ vpfe_calc_format_size(vpfe, f, &vpfe->fmt);
+ *fmt = vpfe->fmt;
+ vpfe->current_vpfe_fmt = f;
/* Update the crop window based on found values */
vpfe->crop.width = fmt->fmt.pix.width;
@@ -1592,57 +1496,40 @@ static int vpfe_enum_size(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_subdev_frame_size_enum fse;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_mbus_framefmt mbus;
- struct v4l2_pix_format pix;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
struct vpfe_fmt *fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
-
/* check for valid format */
- fmt = find_format_by_pix(fsize->pixel_format);
+ fmt = find_format_by_pix(vpfe, fsize->pixel_format);
if (!fmt) {
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- fsize->pixel_format);
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
return -EINVAL;
}
memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- memset(&pix, 0x0, sizeof(pix));
- /* Construct pix from parameter and use default for the rest */
- pix.pixelformat = fsize->pixel_format;
- pix.width = 640;
- pix.height = 480;
- pix.colorspace = V4L2_COLORSPACE_SRGB;
- pix.field = V4L2_FIELD_NONE;
- pix_to_mbus(vpfe, &pix, &mbus);
-
memset(&fse, 0x0, sizeof(fse));
fse.index = fsize->index;
fse.pad = 0;
- fse.code = mbus.code;
+ fse.code = fmt->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
if (ret)
- return -EINVAL;
+ return ret;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
- fse.index, fse.code, fse.min_width, fse.max_width,
- fse.min_height, fse.max_height);
+ vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width;
fsize->discrete.height = fse.max_height;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
- fsize->index, print_fourcc(fsize->pixel_format),
- fsize->discrete.width, fsize->discrete.height);
+ vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n",
+ __func__, fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
return 0;
}
@@ -1707,8 +1594,6 @@ static int vpfe_enum_input(struct file *file, void *priv,
struct vpfe_subdev_info *sdinfo;
int subdev, index;
- vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
-
if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
inp->index) < 0) {
vpfe_dbg(1, vpfe,
@@ -1725,8 +1610,6 @@ static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_input\n");
-
return vpfe_get_app_input_index(vpfe, index);
}
@@ -1739,8 +1622,6 @@ static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
u32 input, output;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1796,9 +1677,6 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe,
- "vpfe_s_input: index: %d\n", index);
-
return vpfe_set_input(vpfe, index);
}
@@ -1807,8 +1685,6 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_querystd\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
@@ -1824,12 +1700,14 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
struct vpfe_subdev_info *sdinfo;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_std\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1853,8 +1731,6 @@ static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_g_std\n");
-
sdinfo = vpfe->current_subdev;
if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
return -ENODATA;
@@ -1872,8 +1748,6 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
{
struct v4l2_rect image_win;
- vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
-
vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
vpfe->field_off = image_win.height * image_win.width;
}
@@ -1957,6 +1831,29 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
}
+static void vpfe_return_all_buffers(struct vpfe_device *vpfe,
+ enum vb2_buffer_state state)
+{
+ struct vpfe_cap_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ if (vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state);
+
+ if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state);
+
+ vpfe->cur_frm = NULL;
+ vpfe->next_frm = NULL;
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
/*
* vpfe_start_streaming : Starts the DMA engine for streaming
* @vb: ptr to vb2_buffer
@@ -1965,7 +1862,6 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
- struct vpfe_cap_buffer *buf, *tmp;
struct vpfe_subdev_info *sdinfo;
unsigned long flags;
unsigned long addr;
@@ -1980,6 +1876,9 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
vpfe_attach_irq(vpfe);
+ vpfe->stopping = false;
+ init_completion(&vpfe->capture_stop);
+
if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
vpfe_ccdc_config_raw(&vpfe->ccdc);
else
@@ -2008,11 +1907,8 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err:
- list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- }
-
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED);
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
return ret;
}
@@ -2027,11 +1923,15 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
struct vpfe_subdev_info *sdinfo;
- unsigned long flags;
int ret;
vpfe_pcr_enable(&vpfe->ccdc, 0);
+ /* Wait for the last frame to be captured */
+ vpfe->stopping = true;
+ wait_for_completion_timeout(&vpfe->capture_stop,
+ msecs_to_jiffies(250));
+
vpfe_detach_irq(vpfe);
sdinfo = vpfe->current_subdev;
@@ -2040,27 +1940,7 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
/* release all active buffers */
- spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
- if (vpfe->cur_frm == vpfe->next_frm) {
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- } else {
- if (vpfe->cur_frm != NULL)
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- if (vpfe->next_frm != NULL)
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&vpfe->dma_queue)) {
- vpfe->next_frm = list_entry(vpfe->dma_queue.next,
- struct vpfe_cap_buffer, list);
- list_del(&vpfe->next_frm->list);
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR);
}
static int vpfe_g_pixelaspect(struct file *file, void *priv,
@@ -2068,8 +1948,6 @@ static int vpfe_g_pixelaspect(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_pixelaspect\n");
-
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
return -EINVAL;
@@ -2128,6 +2006,7 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_rect cr = vpfe->crop;
struct v4l2_rect r = s->r;
+ u32 bpp;
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
@@ -2153,10 +2032,12 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
s->r = vpfe->crop = r;
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp);
vpfe->fmt.fmt.pix.width = r.width;
vpfe->fmt.fmt.pix.height = r.height;
- vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.bytesperline =
+ vpfe_ccdc_get_line_length(&vpfe->ccdc);
vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
vpfe->fmt.fmt.pix.height;
@@ -2172,8 +2053,6 @@ static long vpfe_ioctl_default(struct file *file, void *priv,
struct vpfe_device *vpfe = video_drvdata(file);
int ret;
- vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
-
if (!valid_prio) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
@@ -2279,10 +2158,10 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
struct vpfe_device, v4l2_dev);
struct v4l2_subdev_mbus_code_enum mbus_code;
struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt;
+ int ret = 0;
bool found = false;
- int i, j;
-
- vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+ int i, j, k;
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
if (vpfe->cfg->asd[i]->match.fwnode ==
@@ -2302,27 +2181,37 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
- /* setup the supported formats & indexes */
- for (j = 0, i = 0; ; ++j) {
- struct vpfe_fmt *fmt;
- int ret;
-
+ vpfe->num_active_fmt = 0;
+ for (j = 0, i = 0; (ret != -EINVAL); ++j) {
memset(&mbus_code, 0, sizeof(mbus_code));
mbus_code.index = j;
mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
- NULL, &mbus_code);
+ NULL, &mbus_code);
if (ret)
- break;
-
- fmt = find_format_by_code(mbus_code.code);
- if (!fmt)
continue;
- fmt->supported = true;
- fmt->index = i++;
+ vpfe_dbg(3, vpfe,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, j);
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (mbus_code.code != fmt->code)
+ continue;
+ vpfe->active_fmt[i] = fmt;
+ vpfe_dbg(3, vpfe,
+ "matched fourcc: %s code: %04x idx: %d\n",
+ print_fourcc(fmt->fourcc), mbus_code.code, i);
+ vpfe->num_active_fmt = ++i;
+ }
}
+ if (!i) {
+ vpfe_err(vpfe, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
return 0;
}
@@ -2605,8 +2494,6 @@ static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe = platform_get_drvdata(pdev);
- vpfe_dbg(2, vpfe, "vpfe_remove\n");
-
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vpfe->notifier);
@@ -2653,22 +2540,21 @@ static int vpfe_suspend(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full suspend if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Save VPFE context */
- vpfe_save_context(ccdc);
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
- /* Disable CCDC */
- vpfe_pcr_enable(ccdc, 0);
- vpfe_config_enable(ccdc, 0);
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
- /* Disable both master and slave clock */
- pm_runtime_put_sync(dev);
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+ }
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -2710,19 +2596,18 @@ static int vpfe_resume(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- /* Enable both master and slave clock */
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full resume if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Restore VPFE context */
- vpfe_restore_context(ccdc);
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
- vpfe_config_enable(ccdc, 0);
- pm_runtime_put_sync(dev);
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+ }
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 4678285f34c6..05ee37db0273 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 - 2014 Texas Instruments, Inc.
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef AM437X_VPFE_H
@@ -23,6 +11,7 @@
#include <linux/am437x-vpfe.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/i2c.h>
@@ -214,6 +203,25 @@ struct vpfe_ccdc {
u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
};
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * fourcc: V4L2 pixel format code
+ * code: V4L2 media bus format code
+ * bitsperpixel: Bits per pixel over the bus
+ */
+struct vpfe_fmt {
+ u32 fourcc;
+ u32 code;
+ u32 bitsperpixel;
+};
+
+/*
+ * When formats[] is modified make sure to adjust this value also.
+ * Expect compile time warnings if VPFE_NUM_FORMATS is smaller then
+ * the number of elements in formats[].
+ */
+#define VPFE_NUM_FORMATS 10
+
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
@@ -249,8 +257,11 @@ struct vpfe_device {
struct vpfe_cap_buffer *next_frm;
/* Used to store pixel format */
struct v4l2_format fmt;
- /* Used to store current bytes per pixel based on current format */
- unsigned int bpp;
+ /* Used to keep a reference to the current vpfe_fmt */
+ struct vpfe_fmt *current_vpfe_fmt;
+ struct vpfe_fmt *active_fmt[VPFE_NUM_FORMATS];
+ unsigned int num_active_fmt;
+
/*
* used when IMP is chained to store the crop window which
* is different from the image window
@@ -270,6 +281,8 @@ struct vpfe_device {
*/
u32 field_off;
struct vpfe_ccdc ccdc;
+ int stopping;
+ struct completion capture_stop;
};
#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
index 0746c48ec23f..63ecdca3b908 100644
--- a/drivers/media/platform/am437x/am437x-vpfe_regs.h
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI AM437x Image Sensor Interface Registers
*
@@ -5,15 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef AM437X_VPFE_REGS_H
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index eb12f3793062..d8593cb2ae84 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -606,6 +606,16 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
+ /*
+ * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
+ * are disabled in the VE_INTERRUPT_CTRL register so clear them to
+ * prevent unnecessary interrupt calls.
+ */
+ if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
+ sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
+ if (sts & VE_INTERRUPT_FRAME_COMPLETE)
+ sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
+
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -614,7 +624,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
int i;
int hsync_counter = 0;
int vsync_counter = 0;
- u32 sts;
+ u32 sts, ctrl;
for (i = 0; i < NUM_POLARITY_CHECKS; ++i) {
sts = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
@@ -629,30 +639,29 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
hsync_counter++;
}
- if (hsync_counter < 0 || vsync_counter < 0) {
- u32 ctrl = 0;
+ ctrl = aspeed_video_read(video, VE_CTRL);
- if (hsync_counter < 0) {
- ctrl = VE_CTRL_HSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_HSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_HSYNC_POS_POL;
- }
-
- if (vsync_counter < 0) {
- ctrl = VE_CTRL_VSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_VSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_VSYNC_POS_POL;
- }
+ if (hsync_counter < 0) {
+ ctrl |= VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_HSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_HSYNC_POS_POL;
+ }
- if (ctrl)
- aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ if (vsync_counter < 0) {
+ ctrl |= VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_VSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_VSYNC_POS_POL;
}
+
+ aspeed_video_write(video, VE_CTRL, ctrl);
}
static bool aspeed_video_alloc_buf(struct aspeed_video *video,
@@ -741,6 +750,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
}
set_bit(VIDEO_RES_DETECT, &video->flags);
+ aspeed_video_update(video, VE_CTRL,
+ VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0);
aspeed_video_enable_mode_detect(video);
rc = wait_event_interruptible_timeout(video->wait,
@@ -1646,7 +1657,8 @@ static int aspeed_video_probe(struct platform_device *pdev)
{
int rc;
struct resource *res;
- struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+ struct aspeed_video *video =
+ devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
if (!video)
return -ENOMEM;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 31ace114eda1..be9ec59774d6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
*/
for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
unsigned int idx = find_first_zero_bit(&lanes_used,
- sizeof(lanes_used));
+ csi2rx->max_lanes);
set_bit(idx, &lanes_used);
reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
}
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index 5b17d3a31896..7be91e712c4a 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -8,10 +8,12 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <media/cec-notifier.h>
#include <media/cec-pin.h>
struct cec_gpio {
struct cec_adapter *adap;
+ struct cec_notifier *notifier;
struct device *dev;
struct gpio_desc *cec_gpio;
@@ -173,9 +175,17 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
static int cec_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device *hdmi_dev;
struct cec_gpio *cec;
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
int ret;
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
+ if (PTR_ERR(hdmi_dev) == -EPROBE_DEFER)
+ return PTR_ERR(hdmi_dev);
+ if (IS_ERR(hdmi_dev))
+ caps |= CEC_CAP_PHYS_ADDR;
+
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
@@ -196,8 +206,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
return PTR_ERR(cec->v5_gpio);
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
- cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
- CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ cec, pdev->name, caps);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
@@ -205,7 +214,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
cec->adap->name, cec);
if (ret)
- return ret;
+ goto del_adap;
cec_gpio_disable_irq(cec->adap);
@@ -218,7 +227,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"hpd-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
if (cec->v5_gpio) {
@@ -230,23 +239,37 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"v5-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
+ if (!IS_ERR(hdmi_dev)) {
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto del_adap;
+ }
}
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto unreg_notifier;
+
platform_set_drvdata(pdev, cec);
return 0;
+
+unreg_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier);
+del_adap:
+ cec_delete_adapter(cec->adap);
+ return ret;
}
static int cec_gpio_remove(struct platform_device *pdev)
{
struct cec_gpio *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notifier);
cec_unregister_adapter(cec->adap);
return 0;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 73222c0615c0..94fb4d2ecc43 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -933,7 +933,8 @@ static int coda_g_selection(struct file *file, void *fh,
rsel = &r;
/* fallthrough */
case V4L2_SEL_TGT_CROP:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ ctx->inst_type == CODA_INST_DECODER)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -942,7 +943,8 @@ static int coda_g_selection(struct file *file, void *fh,
/* fallthrough */
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ ctx->inst_type == CODA_INST_ENCODER)
return -EINVAL;
break;
default:
@@ -1084,16 +1086,16 @@ static int coda_decoder_cmd(struct file *file, void *fh,
switch (dc->cmd) {
case V4L2_DEC_CMD_START:
- mutex_lock(&ctx->bitstream_mutex);
mutex_lock(&dev->coda_mutex);
+ mutex_lock(&ctx->bitstream_mutex);
coda_bitstream_flush(ctx);
- mutex_unlock(&dev->coda_mutex);
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_clear_last_buffer_dequeued(dst_vq);
ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
+ mutex_unlock(&dev->coda_mutex);
break;
case V4L2_DEC_CMD_STOP:
stream_end = false;
@@ -2387,6 +2389,7 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->mem_ops = &vb2_dma_contig_memops;
return coda_queue_init(priv, dst_vq);
@@ -2959,8 +2962,6 @@ static int coda_probe(struct platform_device *pdev)
else
return -EINVAL;
- spin_lock_init(&dev->irqlock);
-
dev->dev = &pdev->dev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(dev->clk_per)) {
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 848bf1da401e..9f226140b486 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -86,7 +86,6 @@ struct coda_dev {
struct gen_pool *iram_pool;
struct coda_aux_buf iram;
- spinlock_t irqlock;
struct mutex dev_mutex;
struct mutex coda_mutex;
struct workqueue_struct *workqueue;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 378cc302e1f8..d2cbcdca0463 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -313,7 +313,7 @@ static int isp_video_release(struct file *file)
ivc->streaming = 0;
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
if (v4l2_fh_is_singular_file(file)) {
fimc_pipeline_call(&ivc->ve, close);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a838189d4490..9aaf3b8060d5 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1457,12 +1457,12 @@ static int fimc_md_probe(struct platform_device *pdev)
ret = v4l2_device_register(dev, &fmd->v4l2_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
- return ret;
+ goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err_md;
+ goto err_v4l2dev;
ret = fimc_md_get_pinctrl(fmd);
if (ret < 0) {
@@ -1519,9 +1519,10 @@ err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
fimc_md_put_clocks(fmd);
+err_v4l2dev:
+ v4l2_device_unregister(&fmd->v4l2_dev);
err_md:
media_device_cleanup(&fmd->media_dev);
- v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
index 3b39e875292e..3d8fe854feb0 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -662,34 +662,27 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_ao_cec_g12a_regmap_conf);
if (IS_ERR(ao_cec->regmap)) {
ret = PTR_ERR(ao_cec->regmap);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
&meson_ao_cec_g12a_cec_regmap_conf);
if (IS_ERR(ao_cec->regmap_cec)) {
ret = PTR_ERR(ao_cec->regmap_cec);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -699,45 +692,52 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
if (IS_ERR(ao_cec->oscin)) {
dev_err(&pdev->dev, "oscin clock request failed\n");
ret = PTR_ERR(ao_cec->oscin);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = meson_ao_cec_g12a_setup_clk(ao_cec);
if (ret)
- goto out_probe_notify;
+ goto out_probe_adapter;
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
device_reset_optional(&pdev->dev);
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_core_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_core_clk;
+ goto out_probe_notify;
/* Setup Hardware */
regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
return 0;
-out_probe_core_clk:
- clk_disable_unprepare(ao_cec->core);
-
out_probe_notify:
cec_notifier_cec_adap_unregister(ao_cec->notify);
+out_probe_core_clk:
+ clk_disable_unprepare(ao_cec->core);
+
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index 64ed549bf012..03600e8b3ef0 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -624,20 +624,13 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -647,20 +640,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ao_cec->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(ao_cec->core);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
@@ -674,9 +667,16 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->pdev = pdev;
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_clk;
+ goto out_probe_notify;
/* Setup Hardware */
writel_relaxed(CEC_GEN_CNTL_RESET,
@@ -684,12 +684,12 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
return 0;
-out_probe_clk:
- clk_disable_unprepare(ao_cec->core);
-
out_probe_notify:
cec_notifier_cec_adap_unregister(ao_cec->notify);
+out_probe_clk:
+ clk_disable_unprepare(ao_cec->core);
+
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 26a55c3e807e..858727824889 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -284,7 +284,7 @@ static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
fmt = &mtk_video_formats[k];
if (fmt->fourcc == pixelformat) {
mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt.fourcc, pixelformat);
+ dst_q_data->fmt->fourcc, pixelformat);
dst_q_data->fmt = fmt;
return;
}
@@ -841,12 +841,20 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
+ /*
+ * Setting OUTPUT format after OUTPUT buffers are allocated is invalid
+ * if using the stateful API.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
}
+ /*
+ * Setting CAPTURE format after CAPTURE buffers are allocated is
+ * invalid.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
mtk_v4l2_err("cap_q_ctx buffers already requested");
@@ -865,6 +873,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
fmt = mtk_vdec_find_format(f);
}
}
+ if (fmt == NULL)
+ return -EINVAL;
q_data->fmt = fmt;
vidioc_try_fmt(f, q_data->fmt);
@@ -873,10 +883,10 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
q_data->coded_width = pix_mp->width;
q_data->coded_height = pix_mp->height;
- ctx->colorspace = f->fmt.pix_mp.colorspace;
- ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
- ctx->quantization = f->fmt.pix_mp.quantization;
- ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ ctx->xfer_func = pix_mp->xfer_func;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 00d090df11bb..944771ee5f5c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -253,13 +253,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res == NULL) {
- dev_err(&pdev->dev, "get memory resource failed.");
- ret = -ENXIO;
- goto err_res;
- }
- dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR((__force void *)dev->reg_base[i])) {
ret = PTR_ERR((__force void *)dev->reg_base[i]);
goto err_res;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 49aa85a9bb5a..50048c170b99 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -283,7 +283,6 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_H264;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 63a8708ce682..6011fdd60a22 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -402,7 +402,6 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP8;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5066c283d86d..24c1f0bf2147 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -793,7 +793,6 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP9;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
if (vpu_dec_init(&inst->vpu)) {
mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 3f38cc4509ef..70abfd4cd4b9 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -25,10 +25,16 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
}
/*
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ *
* This function runs in interrupt context and it means there's an IPI MSG
* from VPU.
*/
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
{
struct vdec_vpu_ipi_ack *msg = data;
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
@@ -102,6 +108,7 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
mtk_vcodec_debug_enter(vpu);
init_waitqueue_head(&vpu->wq);
+ vpu->handler = vpu_dec_ipi_handler;
err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
if (err != 0) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index b76f717e4fd7..f779b0676fbd 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -76,13 +76,4 @@ int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
*/
int vpu_dec_reset(struct vdec_vpu_inst *vpu);
-/**
- * vpu_dec_ipi_handler - Handler for VPU ipi message.
- *
- * @data: ipi message
- * @len : length of ipi message
- * @priv: callback private data which is passed by decoder when register.
- */
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
-
#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index cc2ff40d060d..a768707abb94 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -273,7 +273,7 @@ int vpu_ipi_register(struct platform_device *pdev,
return -EPROBE_DEFER;
}
- if (id >= 0 && id < IPI_MAX && handler) {
+ if (id < IPI_MAX && handler) {
ipi_desc = vpu->ipi_desc;
ipi_desc[id].name = name;
ipi_desc[id].handler = handler;
@@ -398,7 +398,7 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
handler = vpu->wdt.handler;
- if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
+ if (id < VPU_RST_MAX && wdt_reset) {
dev_dbg(vpu->dev, "wdt register id %d\n", id);
mutex_lock(&vpu->vpu_mutex);
handler[id].reset_func = wdt_reset;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index e6eff512a8a1..07312a2fab24 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/interconnect.h>
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -239,6 +240,14 @@ static int venus_probe(struct platform_device *pdev)
if (IS_ERR(core->base))
return PTR_ERR(core->base);
+ core->video_path = of_icc_get(dev, "video-mem");
+ if (IS_ERR(core->video_path))
+ return PTR_ERR(core->video_path);
+
+ core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
+ if (IS_ERR(core->cpucfg_path))
+ return PTR_ERR(core->cpucfg_path);
+
core->irq = platform_get_irq(pdev, 0);
if (core->irq < 0)
return core->irq;
@@ -273,6 +282,10 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000));
+ if (ret)
+ return ret;
+
ret = hfi_create(core, &venus_core_ops);
if (ret)
return ret;
@@ -355,6 +368,9 @@ static int venus_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ icc_put(core->video_path);
+ icc_put(core->cpucfg_path);
+
v4l2_device_unregister(&core->v4l2_dev);
return ret;
@@ -427,10 +443,11 @@ static const struct venus_resources msm8916_res = {
};
static const struct freq_tbl msm8996_freq_table[] = {
- { 1944000, 490000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 150000000 }, /* 1080p @ 60 */
- { 244800, 75000000 }, /* 1080p @ 30 */
+ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
+ { 972000, 520000000 }, /* 4k UHD @ 30 */
+ { 489600, 346666667 }, /* 1080p @ 60 */
+ { 244800, 150000000 }, /* 1080p @ 30 */
+ { 108000, 75000000 }, /* 720p @ 30 */
};
static const struct reg_val msm8996_reg_preset[] = {
@@ -464,9 +481,40 @@ static const struct freq_tbl sdm845_freq_table[] = {
{ 244800, 100000000 }, /* 1920x1080@30 */
};
+static struct codec_freq_data sdm845_codec_freq_data[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
+};
+
+static const struct bw_tbl sdm845_bw_table_enc[] = {
+ { 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */
+ { 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */
+ { 489600, 723000, 0, 973000, 0 }, /* 1920x1080@60 */
+ { 244800, 370000, 0, 495000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sdm845_bw_table_dec[] = {
+ { 2073600, 3929000, 0, 5551000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1987000, 0, 2797000, 0 }, /* 4096x2160@30 */
+ { 489600, 1040000, 0, 1298000, 0 }, /* 1920x1080@60 */
+ { 244800, 530000, 0, 659000, 0 }, /* 1920x1080@30 */
+};
+
static const struct venus_resources sdm845_res = {
.freq_tbl = sdm845_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .bw_tbl_enc = sdm845_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
+ .bw_tbl_dec = sdm845_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
+ .codec_freq_data = sdm845_codec_freq_data,
+ .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.max_load = 3110400, /* 4096x2160@90 */
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 922cb7e64bfa..11585fb3cae3 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -26,12 +26,33 @@ struct reg_val {
u32 value;
};
+struct codec_freq_data {
+ u32 pixfmt;
+ u32 session_type;
+ unsigned long vpp_freq;
+ unsigned long vsp_freq;
+};
+
+struct bw_tbl {
+ u32 mbs_per_sec;
+ u32 avg;
+ u32 peak;
+ u32 avg_10bit;
+ u32 peak_10bit;
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
unsigned int freq_tbl_size;
+ const struct bw_tbl *bw_tbl_enc;
+ unsigned int bw_tbl_enc_size;
+ const struct bw_tbl *bw_tbl_dec;
+ unsigned int bw_tbl_dec_size;
const struct reg_val *reg_tbl;
unsigned int reg_tbl_size;
+ const struct codec_freq_data *codec_freq_data;
+ unsigned int codec_freq_data_size;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
enum hfi_version hfi_version;
@@ -115,6 +136,8 @@ struct venus_core {
struct clk *core1_clk;
struct clk *core0_bus_clk;
struct clk *core1_bus_clk;
+ struct icc_path *video_path;
+ struct icc_path *cpucfg_path;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -208,6 +231,12 @@ struct venus_buffer {
struct list_head ref_list;
};
+struct clock_data {
+ u32 core_id;
+ unsigned long freq;
+ const struct codec_freq_data *codec_freq_data;
+};
+
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
enum venus_dec_state {
@@ -288,6 +317,7 @@ struct venus_inst {
struct list_head list;
struct mutex lock;
struct venus_core *core;
+ struct clock_data clk_data;
struct list_head dpbbufs;
struct list_head internalbufs;
struct list_head registeredbufs;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 1ad96c25ab09..a172f1ac0b35 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
+#include <linux/interconnect.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -388,12 +389,91 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
return mbs_per_sec;
}
-int venus_helper_load_scale_clocks(struct venus_core *core)
+static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
{
+ const struct venus_resources *res = inst->core->res;
+ const struct bw_tbl *bw_tbl;
+ unsigned int num_rows, i;
+
+ *avg = 0;
+ *peak = 0;
+
+ if (mbs == 0)
+ return;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
+ num_rows = res->bw_tbl_enc_size;
+ bw_tbl = res->bw_tbl_enc;
+ } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ num_rows = res->bw_tbl_dec_size;
+ bw_tbl = res->bw_tbl_dec;
+ } else {
+ return;
+ }
+
+ if (!bw_tbl || num_rows == 0)
+ return;
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs > bw_tbl[i].mbs_per_sec)
+ break;
+
+ if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
+ *avg = bw_tbl[i].avg_10bit;
+ *peak = bw_tbl[i].peak_10bit;
+ } else {
+ *avg = bw_tbl[i].avg;
+ *peak = bw_tbl[i].peak;
+ }
+ }
+}
+
+static int load_scale_bw(struct venus_core *core)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ mbs_per_sec = load_per_instance(inst);
+ mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
+ total_avg += avg;
+ total_peak += peak;
+ }
+ mutex_unlock(&core->lock);
+
+ dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
+ total_avg, total_peak);
+
+ return icc_set_bw(core->video_path, total_avg, total_peak);
+}
+
+static int set_clk_freq(struct venus_core *core, unsigned long freq)
+{
+ struct clk *clk = core->clks[0];
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int scale_clocks(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
unsigned long freq = table[0].freq;
- struct clk *clk = core->clks[0];
struct device *dev = core->dev;
u32 mbs_per_sec;
unsigned int i;
@@ -419,23 +499,124 @@ int venus_helper_load_scale_clocks(struct venus_core *core)
set_freq:
- ret = clk_set_rate(clk, freq);
- if (ret)
- goto err;
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core0_clk, freq);
- if (ret)
- goto err;
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core1_clk, freq);
- if (ret)
- goto err;
+ return 0;
+}
+
+static unsigned long calculate_inst_freq(struct venus_inst *inst,
+ unsigned long filled_len)
+{
+ unsigned long vpp_freq = 0, vsp_freq = 0;
+ u32 fps = (u32)inst->fps;
+ u32 mbs_per_sec;
+
+ mbs_per_sec = load_per_instance(inst) / fps;
+
+ vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
+
+ /* 10 / 7 is overhead factor */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
+ else
+ vsp_freq += ((fps * filled_len * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+static int scale_clocks_v4(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct device *dev = core->dev;
+ unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
+ unsigned long filled_len = 0;
+ struct venus_buffer *buf, *n;
+ struct vb2_buffer *vb;
+ int i, ret;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ vb = &buf->vb.vb2_buf;
+ filled_len = max(filled_len, vb2_get_plane_payload(vb, 0));
+ }
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
+ return 0;
+
+ freq = calculate_inst_freq(inst, filled_len);
+ inst->clk_data.freq = freq;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
+ freq_core1 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
+ freq_core2 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ freq_core1 += inst->clk_data.freq;
+ freq_core2 += inst->clk_data.freq;
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ freq = max(freq_core1, freq_core2);
+
+ if (freq >= table[0].freq) {
+ freq = table[0].freq;
+ dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
+ freq, table[0].freq);
+ goto set_freq;
+ }
+
+ for (i = num_rows - 1 ; i >= 0; i--) {
+ if (freq <= table[i].freq) {
+ freq = table[i].freq;
+ break;
+ }
+ }
+
+set_freq:
+
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
+
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
return 0;
+}
-err:
- dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
- return ret;
+int venus_helper_load_scale_clocks(struct venus_inst *inst)
+{
+ if (IS_V4(inst->core))
+ return scale_clocks_v4(inst);
+
+ return scale_clocks(inst);
}
EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
@@ -541,6 +722,8 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
put_ts_metadata(inst, vbuf);
+
+ venus_helper_load_scale_clocks(inst);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
@@ -809,6 +992,7 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct hfi_videocores_usage_type cu;
+ inst->clk_data.core_id = usage;
if (!IS_V4(inst->core))
return 0;
@@ -818,6 +1002,36 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
}
EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst)
+{
+ const struct codec_freq_data *data;
+ unsigned int i, data_size;
+ u32 pixfmt;
+ int ret = 0;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ data = inst->core->res->codec_freq_data;
+ data_size = inst->core->res->codec_freq_data_size;
+ pixfmt = inst->session_type == VIDC_SESSION_TYPE_DEC ?
+ inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
+
+ for (i = 0; i < data_size; i++) {
+ if (data[i].pixfmt == pixfmt &&
+ data[i].session_type == inst->session_type) {
+ inst->clk_data.codec_freq_data = &data[i];
+ break;
+ }
+ }
+
+ if (!inst->clk_data.codec_freq_data)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_init_codec_freq_data);
+
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs)
@@ -1140,7 +1354,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -1193,7 +1407,6 @@ EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
int venus_helper_vb2_start_streaming(struct venus_inst *inst)
{
- struct venus_core *core = inst->core;
int ret;
ret = venus_helper_intbufs_alloc(inst);
@@ -1204,7 +1417,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_bufs_free;
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_load_res(inst);
if (ret)
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 01f411b12f81..34dcd0c13f06 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,6 +33,7 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height,
u32 buftype);
int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst);
int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
@@ -59,7 +60,7 @@ int venus_helper_intbufs_free(struct venus_inst *inst);
int venus_helper_intbufs_realloc(struct venus_inst *inst);
int venus_helper_queue_dpb_bufs(struct venus_inst *inst);
int venus_helper_unregister_bufs(struct venus_inst *inst);
-int venus_helper_load_scale_clocks(struct venus_core *core);
+int venus_helper_load_scale_clocks(struct venus_inst *inst);
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 7129a2aea09a..0d8855014ab3 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -1472,6 +1472,7 @@ static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
+ u32 ctrl_status;
bool val;
int ret;
@@ -1487,6 +1488,10 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ goto power_off;
+
/*
* Power collapse sequence for Venus 3xx and 4xx versions:
* 1. Check for ARM9 and video core to be idle by checking WFI bit
@@ -1511,6 +1516,7 @@ static int venus_suspend_3xx(struct venus_core *core)
if (ret)
return ret;
+power_off:
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 7f4660555ddb..8feaf5daece9 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -685,6 +685,10 @@ static int vdec_session_init(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
return 0;
deinit:
hfi_session_deinit(inst);
@@ -864,7 +868,7 @@ reconfigure:
if (ret)
goto free_dpb_bufs;
- venus_helper_load_scale_clocks(inst->core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_continue(inst);
if (ret)
@@ -1072,7 +1076,7 @@ static void vdec_session_release(struct venus_inst *inst)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
mutex_unlock(&inst->lock);
@@ -1412,9 +1416,6 @@ static const struct v4l2_file_operations vdec_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int vdec_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 1b7fb2d5887c..453edf966d4f 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -842,6 +842,10 @@ static int venc_init_session(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
ret = venc_set_properties(inst);
if (ret)
goto deinit;
@@ -1235,9 +1239,6 @@ static const struct v4l2_file_operations venc_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int venc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 6993484ff0f3..7440c8965d27 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -983,6 +983,7 @@ static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
static const struct rvin_info rcar_info_r8a7795 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7795_routes,
@@ -1077,6 +1078,7 @@ static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
static const struct rvin_info rcar_info_r8a7796 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7796_routes,
@@ -1121,6 +1123,7 @@ static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
static const struct rvin_info rcar_info_r8a77965 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77965_routes,
@@ -1168,6 +1171,7 @@ static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
static const struct rvin_info rcar_info_r8a77980 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77980_routes,
@@ -1184,6 +1188,7 @@ static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
static const struct rvin_info rcar_info_r8a77990 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77990_routes,
@@ -1196,6 +1201,7 @@ static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
static const struct rvin_info rcar_info_r8a77995 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77995_routes,
@@ -1207,6 +1213,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a774b1",
+ .data = &rcar_info_r8a77965,
+ },
+ {
.compatible = "renesas,vin-r8a774c0",
.data = &rcar_info_r8a77990,
},
@@ -1282,7 +1292,6 @@ static int rcar_vin_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rvin_dev *vin;
- struct resource *mem;
int irq, ret;
vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
@@ -1301,11 +1310,7 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (attr)
vin->info = attr->data;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL)
- return -EINVAL;
-
- vin->base = devm_ioremap_resource(vin->dev, mem);
+ vin->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vin->base))
return PTR_ERR(vin->base);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index c14af1b929df..faa9fb23a2e9 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -1082,6 +1082,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.data = &rcar_csi2_info_r8a7796,
},
{
+ .compatible = "renesas,r8a774b1-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
.compatible = "renesas,r8a774c0-csi2",
.data = &rcar_csi2_info_r8a77990,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 3cb29b2e0b2b..cf9029efeb04 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -118,6 +118,7 @@
#define VNDMR_ABIT (1 << 2)
#define VNDMR_DTMD_YCSEP (1 << 1)
#define VNDMR_DTMD_ARGB (1 << 0)
+#define VNDMR_DTMD_YCSEP_420 (3 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -529,12 +530,17 @@ static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
{
+ unsigned int crop_height;
u32 xs, ys;
/* Set scaling coefficient */
+ crop_height = vin->crop.height;
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
+ crop_height *= 2;
+
ys = 0;
- if (vin->crop.height != vin->compose.height)
- ys = (4096 * vin->crop.height) / vin->compose.height;
+ if (crop_height != vin->compose.height)
+ ys = (4096 * crop_height) / vin->compose.height;
rvin_write(vin, ys, VNYS_REG);
xs = 0;
@@ -557,16 +563,11 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
rvin_write(vin, 0, VNSPPOC_REG);
rvin_write(vin, 0, VNSLPOC_REG);
rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
+
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
- break;
- default:
+ else
rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
- break;
- }
vin_dbg(vin,
"Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
@@ -583,21 +584,9 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
/* Set Start/End Pixel/Line Pre-Clip */
rvin_write(vin, vin->crop.left, VNSPPRC_REG);
rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1, VNELPRC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- rvin_write(vin, vin->crop.top / 2, VNSLPRC_REG);
- rvin_write(vin, (vin->crop.top + vin->crop.height) / 2 - 1,
- VNELPRC_REG);
- break;
- default:
- rvin_write(vin, vin->crop.top, VNSLPRC_REG);
- rvin_write(vin, vin->crop.top + vin->crop.height - 1,
- VNELPRC_REG);
- break;
- }
/* TODO: Add support for the UDS scaler. */
if (vin->info->model != RCAR_GEN3)
@@ -641,6 +630,9 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
+ case V4L2_FIELD_ALTERNATE:
+ vnmc = VNMC_IM_ODD_EVEN;
+ break;
default:
vnmc = VNMC_IM_ODD;
break;
@@ -710,11 +702,13 @@ static int rvin_setup(struct rvin_dev *vin)
* Output format
*/
switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
rvin_write(vin,
- ALIGN(vin->format.width * vin->format.height, 0x80),
- VNUVAOF_REG);
- dmr = VNDMR_DTMD_YCSEP;
+ ALIGN(vin->format.bytesperline * vin->format.height,
+ 0x80), VNUVAOF_REG);
+ dmr = vin->format.pixelformat == V4L2_PIX_FMT_NV12 ?
+ VNDMR_DTMD_YCSEP_420 : VNDMR_DTMD_YCSEP;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_YUYV:
@@ -799,6 +793,18 @@ static bool rvin_capture_active(struct rvin_dev *vin)
return rvin_read(vin, VNMS_REG) & VNMS_CA;
}
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ /* If FS is set it is an Even field. */
+ if (vnms & VNMS_FS)
+ return V4L2_FIELD_BOTTOM;
+ return V4L2_FIELD_TOP;
+ }
+
+ return vin->format.field;
+}
+
static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
{
const struct rvin_video_format *fmt;
@@ -948,7 +954,7 @@ static irqreturn_t rvin_irq(int irq, void *data)
/* Capture frame */
if (vin->queue_buf[slot]) {
- vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
vin->queue_buf[slot]->sequence = vin->sequence;
vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf,
@@ -1075,6 +1081,7 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
break;
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index cbc1c07f0a96..9e2e63ffcc47 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -31,6 +31,10 @@
static const struct rvin_video_format rvin_formats[] = {
{
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_NV16,
.bpp = 1,
},
@@ -72,6 +76,9 @@ const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32)
return NULL;
+ if (pixelformat == V4L2_PIX_FMT_NV12 && !vin->info->nv12)
+ return NULL;
+
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
if (rvin_formats[i].fourcc == pixelformat)
return rvin_formats + i;
@@ -90,17 +97,29 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
if (WARN_ON(!fmt))
return -EINVAL;
- align = pix->pixelformat == V4L2_PIX_FMT_NV16 ? 0x20 : 0x10;
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ align = 0x20;
+ break;
+ default:
+ align = 0x10;
+ break;
+ }
return ALIGN(pix->width, align) * fmt->bpp;
}
static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
{
- if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ return pix->bytesperline * pix->height * 3 / 2;
+ case V4L2_PIX_FMT_NV16:
return pix->bytesperline * pix->height * 2;
-
- return pix->bytesperline * pix->height;
+ default:
+ return pix->bytesperline * pix->height;
+ }
}
static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
@@ -117,23 +136,23 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- break;
case V4L2_FIELD_ALTERNATE:
- /*
- * Driver does not (yet) support outputting ALTERNATE to a
- * userspace. It does support outputting INTERLACED so use
- * the VIN hardware to combine the two fields.
- */
- pix->field = V4L2_FIELD_INTERLACED;
- pix->height *= 2;
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
break;
}
- /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
- walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+ /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ walign = 5;
+ break;
+ default:
+ walign = 1;
+ break;
+ }
/* Limit to VIN capabilities */
v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
@@ -164,22 +183,32 @@ static int rvin_reset_format(struct rvin_dev *vin)
v4l2_fill_pix_format(&vin->format, &fmt.format);
+ vin->src_rect.top = 0;
+ vin->src_rect.left = 0;
+ vin->src_rect.width = vin->format.width;
+ vin->src_rect.height = vin->format.height;
+
+ /* Make use of the hardware interlacer by default. */
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ vin->format.field = V4L2_FIELD_INTERLACED;
+ vin->format.height *= 2;
+ }
+
rvin_format_align(vin, &vin->format);
- vin->source.top = 0;
- vin->source.left = 0;
- vin->source.width = vin->format.width;
- vin->source.height = vin->format.height;
+ vin->crop = vin->src_rect;
- vin->crop = vin->source;
- vin->compose = vin->source;
+ vin->compose.top = 0;
+ vin->compose.left = 0;
+ vin->compose.width = vin->format.width;
+ vin->compose.height = vin->format.height;
return 0;
}
static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_pix_format *pix,
- struct v4l2_rect *crop, struct v4l2_rect *compose)
+ struct v4l2_rect *src_rect)
{
struct v4l2_subdev *sd = vin_to_source(vin);
struct v4l2_subdev_pad_config *pad_cfg;
@@ -208,21 +237,15 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
+ ret = 0;
v4l2_fill_pix_format(pix, &format.format);
- if (crop) {
- crop->top = 0;
- crop->left = 0;
- crop->width = pix->width;
- crop->height = pix->height;
-
- /*
- * If source is ALTERNATE the driver will use the VIN hardware
- * to INTERLACE it. The crop height then needs to be doubled.
- */
- if (pix->field == V4L2_FIELD_ALTERNATE)
- crop->height *= 2;
+ if (src_rect) {
+ src_rect->top = 0;
+ src_rect->left = 0;
+ src_rect->width = pix->width;
+ src_rect->height = pix->height;
}
if (field != V4L2_FIELD_ANY)
@@ -232,17 +255,10 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
pix->height = height;
rvin_format_align(vin, pix);
-
- if (compose) {
- compose->top = 0;
- compose->left = 0;
- compose->width = pix->width;
- compose->height = pix->height;
- }
done:
v4l2_subdev_free_pad_config(pad_cfg);
- return 0;
+ return ret;
}
static int rvin_querycap(struct file *file, void *priv,
@@ -262,29 +278,34 @@ static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
{
struct rvin_dev *vin = video_drvdata(file);
- return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL,
- NULL);
+ return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL);
}
static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_rect crop, compose;
+ struct v4l2_rect fmt_rect, src_rect;
int ret;
if (vb2_is_busy(&vin->queue))
return -EBUSY;
ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
- &crop, &compose);
+ &src_rect);
if (ret)
return ret;
vin->format = f->fmt.pix;
- vin->crop = crop;
- vin->compose = compose;
- vin->source = crop;
+
+ fmt_rect.top = 0;
+ fmt_rect.left = 0;
+ fmt_rect.width = vin->format.width;
+ fmt_rect.height = vin->format.height;
+
+ v4l2_rect_map_inside(&vin->crop, &src_rect);
+ v4l2_rect_map_inside(&vin->compose, &fmt_rect);
+ vin->src_rect = src_rect;
return 0;
}
@@ -302,12 +323,22 @@ static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index >= ARRAY_SIZE(rvin_formats))
- return -EINVAL;
-
- f->pixelformat = rvin_formats[f->index].fourcc;
+ struct rvin_dev *vin = video_drvdata(file);
+ unsigned int i;
+ int matched;
+
+ matched = -1;
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) {
+ if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc))
+ matched++;
+
+ if (matched == f->index) {
+ f->pixelformat = rvin_formats[i].fourcc;
+ return 0;
+ }
+ }
- return 0;
+ return -EINVAL;
}
static int rvin_g_selection(struct file *file, void *fh,
@@ -322,8 +353,8 @@ static int rvin_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = s->r.top = 0;
- s->r.width = vin->source.width;
- s->r.height = vin->source.height;
+ s->r.width = vin->src_rect.width;
+ s->r.height = vin->src_rect.height;
break;
case V4L2_SEL_TGT_CROP:
s->r = vin->crop;
@@ -365,21 +396,22 @@ static int rvin_s_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP:
/* Can't crop outside of source input */
max_rect.top = max_rect.left = 0;
- max_rect.width = vin->source.width;
- max_rect.height = vin->source.height;
+ max_rect.width = vin->src_rect.width;
+ max_rect.height = vin->src_rect.height;
v4l2_rect_map_inside(&r, &max_rect);
- v4l_bound_align_image(&r.width, 6, vin->source.width, 0,
- &r.height, 2, vin->source.height, 0, 0);
+ v4l_bound_align_image(&r.width, 6, vin->src_rect.width, 0,
+ &r.height, 2, vin->src_rect.height, 0, 0);
- r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height);
- r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);
+ r.top = clamp_t(s32, r.top, 0,
+ vin->src_rect.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, vin->src_rect.width - r.width);
vin->crop = s->r = r;
vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
r.width, r.height, r.left, r.top,
- vin->source.width, vin->source.height);
+ vin->src_rect.width, vin->src_rect.height);
break;
case V4L2_SEL_TGT_COMPOSE:
/* Make sure compose rect fits inside output format */
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index e562c2ff21ec..a36b0824f81d 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,6 +126,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
* @routes: list of possible routes from the CSI-2 recivers to
@@ -134,6 +135,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool nv12;
unsigned int max_width;
unsigned int max_height;
@@ -176,7 +178,7 @@ struct rvin_info {
*
* @crop: active cropping
* @compose: active composing
- * @source: active size of the video source
+ * @src_rect: active size of the video source
* @std: active video standard of the video source
*
* @alpha: Alpha component to fill in for supported pixel formats
@@ -215,7 +217,7 @@ struct rvin_dev {
struct v4l2_rect crop;
struct v4l2_rect compose;
- struct v4l2_rect source;
+ struct v4l2_rect src_rect;
v4l2_std_id std;
unsigned int alpha;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 608e5217ccd5..0f267a237b42 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index cb93a13e1777..97bed45360f0 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2369,7 +2369,7 @@ static int fdp1_probe(struct platform_device *pdev)
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
case FD1_IP_M3N:
- dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
break;
case FD1_IP_E3:
dprintk(fdp1, "FDP1 Version R-Car E3\n");
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index 1a65532dc36d..e80204f5720c 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -553,7 +553,7 @@ void camif_hw_disable_capture(struct camif_vp *vp)
void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
{
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 8dbbd5f2a40a..ac2162235cef 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1236,7 +1236,6 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
}
result->sof = sof;
result->sof_len = sof_len;
- result->components = components;
return true;
}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 3bc52f83f5bc..4407fe775afa 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -190,7 +190,6 @@ struct s5p_jpeg_marker {
* @dqt: DQT markers' positions relative to the buffer beginning
* @sof: SOF0 marker's position relative to the buffer beginning
* @sof_len: SOF0 marker's payload length (without length field itself)
- * @components: number of image components
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
@@ -202,7 +201,6 @@ struct s5p_jpeg_q_data {
struct s5p_jpeg_marker dqt;
u32 sof;
u32 sof_len;
- u32 components;
u32 size;
};
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index 9cd60fe1867c..a86b6e8f9196 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -675,6 +675,7 @@ err_notifier:
err_delete_adapter:
cec_delete_adapter(secocec->cec_adap);
err:
+ release_region(BRA_SMB_BASE_ADDR, 7);
dev_err(dev, "%s device probe failed\n", dev_name(dev));
return ret;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index e90f1ba30574..675b5f2b4c2e 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -651,8 +651,7 @@ static int bdisp_release(struct file *file)
dev_dbg(bdisp->dev, "%s\n", __func__);
- if (mutex_lock_interruptible(&bdisp->lock))
- return -ERESTARTSYS;
+ mutex_lock(&bdisp->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index a05127529006..3878cb4efdc2 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -1,2 +1,3 @@
obj-y += sun4i-csi/
obj-y += sun6i-csi/
+obj-y += sun8i-di/
diff --git a/drivers/media/platform/sunxi/sun8i-di/Makefile b/drivers/media/platform/sunxi/sun8i-di/Makefile
new file mode 100644
index 000000000000..109f7e5442b7
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUN8I_DEINTERLACE) += sun8i-di.o
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
new file mode 100644
index 000000000000..aaa1dc159ac2
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner sun8i deinterlacer with scaler driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * Based on vim2m driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "sun8i-di.h"
+
+#define FLAG_SIZE (DEINTERLACE_MAX_WIDTH * DEINTERLACE_MAX_HEIGHT / 4)
+
+static u32 deinterlace_formats[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline u32 deinterlace_read(struct deinterlace_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline void deinterlace_write(struct deinterlace_dev *dev,
+ u32 reg, u32 value)
+{
+ writel(value, dev->base + reg);
+}
+
+static inline void deinterlace_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 bits)
+{
+ writel(readl(dev->base + reg) | bits, dev->base + reg);
+}
+
+static inline void deinterlace_clr_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 clr, u32 set)
+{
+ u32 val = readl(dev->base + reg);
+
+ val &= ~clr;
+ val |= set;
+
+ writel(val, dev->base + reg);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *dev = ctx->dev;
+ u32 size, stride, width, height, val;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned int hstep, vstep;
+ dma_addr_t addr;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE,
+ DEINTERLACE_MOD_ENABLE_EN);
+
+ if (ctx->field) {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag1_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag2_buf_dma);
+ } else {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag2_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag1_buf_dma);
+ }
+ deinterlace_write(dev, DEINTERLACE_FLAG_LINE_STRIDE, 0x200);
+
+ width = ctx->src_fmt.width;
+ height = ctx->src_fmt.height;
+ stride = ctx->src_fmt.bytesperline;
+ size = stride * height;
+
+ addr = vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR2, 0);
+
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE1, stride);
+
+ deinterlace_write(dev, DEINTERLACE_CH0_IN_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_IN_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ val = DEINTERLACE_IN_FMT_FMT(DEINTERLACE_IN_FMT_YUV420) |
+ DEINTERLACE_IN_FMT_MOD(DEINTERLACE_MODE_UV_COMBINED);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_IN_FMT, val);
+
+ if (ctx->prev)
+ addr = vb2_dma_contig_plane_dma_addr(&ctx->prev->vb2_buf, 0);
+
+ deinterlace_write(dev, DEINTERLACE_PRELUMA, addr);
+ deinterlace_write(dev, DEINTERLACE_PRECHROMA, addr + size);
+
+ val = DEINTERLACE_OUT_FMT_FMT(DEINTERLACE_OUT_FMT_YUV420SP);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_OUT_FMT, val);
+
+ width = ctx->dst_fmt.width;
+ height = ctx->dst_fmt.height;
+ stride = ctx->dst_fmt.bytesperline;
+ size = stride * height;
+
+ deinterlace_write(dev, DEINTERLACE_CH0_OUT_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_OUT_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE1, stride);
+
+ addr = vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR2, 0);
+
+ hstep = (ctx->src_fmt.width << 16) / ctx->dst_fmt.width;
+ vstep = (ctx->src_fmt.height << 16) / ctx->dst_fmt.height;
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_FACT, vstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_REG_READY);
+
+ deinterlace_set_bits(dev, DEINTERLACE_INT_ENABLE,
+ DEINTERLACE_INT_ENABLE_WB_EN);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_WB_EN);
+}
+
+static int deinterlace_job_ready(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) >= 1 &&
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) >= 2;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+static irqreturn_t deinterlace_irq(int irq, void *data)
+{
+ struct deinterlace_dev *dev = data;
+ struct vb2_v4l2_buffer *src, *dst;
+ enum vb2_buffer_state state;
+ struct deinterlace_ctx *ctx;
+ unsigned int val;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_NONE;
+ }
+
+ val = deinterlace_read(dev, DEINTERLACE_INT_STATUS);
+ if (!(val & DEINTERLACE_INT_STATUS_WRITEBACK))
+ return IRQ_NONE;
+
+ deinterlace_write(dev, DEINTERLACE_INT_ENABLE, 0);
+ deinterlace_set_bits(dev, DEINTERLACE_INT_STATUS,
+ DEINTERLACE_INT_STATUS_WRITEBACK);
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE, 0);
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START, 0);
+
+ val = deinterlace_read(dev, DEINTERLACE_STATUS);
+ if (val & DEINTERLACE_STATUS_WB_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(dst, state);
+
+ if (ctx->field != ctx->first_field || ctx->aborting) {
+ ctx->field = ctx->first_field;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+ ctx->prev = src;
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ ctx->field = !ctx->first_field;
+ deinterlace_device_run(ctx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void deinterlace_init(struct deinterlace_dev *dev)
+{
+ u32 val;
+ int i;
+
+ deinterlace_write(dev, DEINTERLACE_BYPASS,
+ DEINTERLACE_BYPASS_CSC);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE_CTRL,
+ DEINTERLACE_WB_LINE_STRIDE_CTRL_EN);
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_OUT_CTRL);
+ deinterlace_write(dev, DEINTERLACE_AGTH_SEL,
+ DEINTERLACE_AGTH_SEL_LINEBUF);
+
+ val = DEINTERLACE_CTRL_EN |
+ DEINTERLACE_CTRL_MODE_MIXED |
+ DEINTERLACE_CTRL_DIAG_INTP_EN |
+ DEINTERLACE_CTRL_TEMP_DIFF_EN;
+ deinterlace_write(dev, DEINTERLACE_CTRL, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_LUMA_TH,
+ DEINTERLACE_LUMA_TH_MIN_LUMA_MSK,
+ DEINTERLACE_LUMA_TH_MIN_LUMA(4));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_SPAT_COMP,
+ DEINTERLACE_SPAT_COMP_TH2_MSK,
+ DEINTERLACE_SPAT_COMP_TH2(5));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_TEMP_DIFF,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(5));
+
+ val = DEINTERLACE_DIAG_INTP_TH0(60) |
+ DEINTERLACE_DIAG_INTP_TH1(0) |
+ DEINTERLACE_DIAG_INTP_TH3(30);
+ deinterlace_write(dev, DEINTERLACE_DIAG_INTP, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
+ DEINTERLACE_CHROMA_DIFF_TH_MSK,
+ DEINTERLACE_CHROMA_DIFF_TH(5));
+
+ /* neutral filter coefficients */
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+
+ for (i = 0; i < 32; i++) {
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ }
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+}
+
+static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct deinterlace_ctx, fh);
+}
+
+static bool deinterlace_check_format(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(deinterlace_formats); i++)
+ if (deinterlace_formats[i] == pixelformat)
+ return true;
+
+ return false;
+}
+
+static void deinterlace_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int height = pix_fmt->height;
+ unsigned int width = pix_fmt->width;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+
+ width = clamp(width, DEINTERLACE_MIN_WIDTH,
+ DEINTERLACE_MAX_WIDTH);
+ height = clamp(height, DEINTERLACE_MIN_HEIGHT,
+ DEINTERLACE_MAX_HEIGHT);
+
+ bytesperline = ALIGN(width, 2);
+ /* luma */
+ sizeimage = bytesperline * height;
+ /* chroma */
+ sizeimage += bytesperline * height / 2;
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int deinterlace_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DEINTERLACE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DEINTERLACE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DEINTERLACE_NAME);
+
+ return 0;
+}
+
+static int deinterlace_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index < ARRAY_SIZE(deinterlace_formats)) {
+ f->pixelformat = deinterlace_formats[f->index];
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int deinterlace_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ if (!deinterlace_check_format(fsize->pixel_format))
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = DEINTERLACE_MIN_WIDTH;
+ fsize->stepwise.min_height = DEINTERLACE_MIN_HEIGHT;
+ fsize->stepwise.max_width = DEINTERLACE_MAX_WIDTH;
+ fsize->stepwise.max_height = DEINTERLACE_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_NONE)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+ .vidioc_querycap = deinterlace_querycap,
+
+ .vidioc_enum_framesizes = deinterlace_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_cap = deinterlace_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = deinterlace_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = deinterlace_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_out = deinterlace_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = deinterlace_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = deinterlace_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void deinterlace_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+
+ do {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, state);
+ } while (vbuf);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+}
+
+static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx->dev->dev;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable module\n");
+
+ goto err_runtime_get;
+ }
+
+ ctx->first_field =
+ ctx->src_fmt.field == V4L2_FIELD_INTERLACED_BT;
+ ctx->field = ctx->first_field;
+
+ ctx->prev = NULL;
+ ctx->aborting = 0;
+
+ ctx->flag1_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag1_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag1_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem1;
+ }
+
+ ctx->flag2_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag2_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag2_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem2;
+ }
+ }
+
+ return 0;
+
+err_no_mem2:
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+err_no_mem1:
+ pm_runtime_put(dev);
+err_runtime_get:
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void deinterlace_stop_streaming(struct vb2_queue *vq)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ struct device *dev = ctx->dev->dev;
+
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag2_buf,
+ ctx->flag2_buf_dma);
+
+ pm_runtime_put(dev);
+ }
+
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops deinterlace_qops = {
+ .queue_setup = deinterlace_queue_setup,
+ .buf_prepare = deinterlace_buf_prepare,
+ .buf_queue = deinterlace_buf_queue,
+ .start_streaming = deinterlace_start_streaming,
+ .stop_streaming = deinterlace_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &deinterlace_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->min_buffers_needed = 2;
+ dst_vq->ops = &deinterlace_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int deinterlace_open(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ /* default output format */
+ ctx->src_fmt.pixelformat = deinterlace_formats[0];
+ ctx->src_fmt.field = V4L2_FIELD_INTERLACED;
+ ctx->src_fmt.width = 640;
+ ctx->src_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->src_fmt);
+
+ /* default capture format */
+ ctx->dst_fmt.pixelformat = deinterlace_formats[0];
+ ctx->dst_fmt.field = V4L2_FIELD_NONE;
+ ctx->dst_fmt.width = 640;
+ ctx->dst_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->dst_fmt);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &deinterlace_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_free;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int deinterlace_release(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = container_of(file->private_data,
+ struct deinterlace_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+ .owner = THIS_MODULE,
+ .open = deinterlace_open,
+ .release = deinterlace_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device deinterlace_video_device = {
+ .name = DEINTERLACE_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &deinterlace_fops,
+ .ioctl_ops = &deinterlace_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops deinterlace_m2m_ops = {
+ .device_run = deinterlace_device_run,
+ .job_ready = deinterlace_job_ready,
+ .job_abort = deinterlace_job_abort,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int irq, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = deinterlace_video_device;
+ dev->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev->dev, "Failed to get IRQ\n");
+
+ return irq;
+ }
+
+ ret = devm_request_irq(dev->dev, irq, deinterlace_irq,
+ 0, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->base)) {
+ dev_err(dev->dev, "Failed to map registers\n");
+
+ return PTR_ERR(dev->base);
+ }
+
+ dev->bus_clk = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->bus_clk)) {
+ dev_err(dev->dev, "Failed to get bus clock\n");
+
+ return PTR_ERR(dev->bus_clk);
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ dev_err(dev->dev, "Failed to get mod clock\n");
+
+ return PTR_ERR(dev->mod_clk);
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ dev_err(dev->dev, "Failed to get ram clock\n");
+
+ return PTR_ERR(dev->ram_clk);
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ dev_err(dev->dev, "Failed to get reset control\n");
+
+ return PTR_ERR(dev->rstc);
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register V4L2 device\n");
+
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ deinterlace_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->m2m_dev = v4l2_m2m_init(&deinterlace_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ pm_runtime_enable(dev->dev);
+
+ return 0;
+
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int deinterlace_runtime_resume(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = clk_set_rate_exclusive(dev->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev->dev, "Failed to set exclusive mod clock rate\n");
+
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev->bus_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable bus clock\n");
+
+ goto err_exlusive_rate;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable mod clock\n");
+
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable ram clock\n");
+
+ goto err_mod_clk;
+ }
+
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_ram_clk;
+ }
+
+ deinterlace_init(dev);
+
+ return 0;
+
+err_exlusive_rate:
+ clk_rate_exclusive_put(dev->mod_clk);
+err_ram_clk:
+ clk_disable_unprepare(dev->ram_clk);
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_bus_clk:
+ clk_disable_unprepare(dev->bus_clk);
+
+ return ret;
+}
+
+static int deinterlace_runtime_suspend(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->bus_clk);
+ clk_rate_exclusive_put(dev->mod_clk);
+
+ return 0;
+}
+
+static const struct of_device_id deinterlace_dt_match[] = {
+ { .compatible = "allwinner,sun8i-h3-deinterlace" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, deinterlace_dt_match);
+
+static const struct dev_pm_ops deinterlace_pm_ops = {
+ .runtime_resume = deinterlace_runtime_resume,
+ .runtime_suspend = deinterlace_runtime_suspend,
+};
+
+static struct platform_driver deinterlace_driver = {
+ .probe = deinterlace_probe,
+ .remove = deinterlace_remove,
+ .driver = {
+ .name = DEINTERLACE_NAME,
+ .of_match_table = deinterlace_dt_match,
+ .pm = &deinterlace_pm_ops,
+ },
+};
+module_platform_driver(deinterlace_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner Deinterlace driver");
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
new file mode 100644
index 000000000000..0254251d8687
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Allwinner Deinterlace driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#ifndef _SUN8I_DEINTERLACE_H_
+#define _SUN8I_DEINTERLACE_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define DEINTERLACE_NAME "sun8i-di"
+
+#define DEINTERLACE_MOD_ENABLE 0x00
+#define DEINTERLACE_MOD_ENABLE_EN BIT(0)
+
+#define DEINTERLACE_FRM_CTRL 0x04
+#define DEINTERLACE_FRM_CTRL_REG_READY BIT(0)
+#define DEINTERLACE_FRM_CTRL_WB_EN BIT(2)
+#define DEINTERLACE_FRM_CTRL_OUT_CTRL BIT(11)
+#define DEINTERLACE_FRM_CTRL_START BIT(16)
+#define DEINTERLACE_FRM_CTRL_COEF_ACCESS BIT(23)
+
+#define DEINTERLACE_BYPASS 0x08
+#define DEINTERLACE_BYPASS_CSC BIT(1)
+
+#define DEINTERLACE_AGTH_SEL 0x0c
+#define DEINTERLACE_AGTH_SEL_LINEBUF BIT(8)
+
+#define DEINTERLACE_LINT_CTRL 0x10
+#define DEINTERLACE_TRD_PRELUMA 0x1c
+#define DEINTERLACE_BUF_ADDR0 0x20
+#define DEINTERLACE_BUF_ADDR1 0x24
+#define DEINTERLACE_BUF_ADDR2 0x28
+
+#define DEINTERLACE_FIELD_CTRL 0x2c
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT(v) ((v) & 0xff)
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK (0xff)
+
+#define DEINTERLACE_TB_OFFSET0 0x30
+#define DEINTERLACE_TB_OFFSET1 0x34
+#define DEINTERLACE_TB_OFFSET2 0x38
+#define DEINTERLACE_TRD_PRECHROMA 0x3c
+#define DEINTERLACE_LINE_STRIDE0 0x40
+#define DEINTERLACE_LINE_STRIDE1 0x44
+#define DEINTERLACE_LINE_STRIDE2 0x48
+
+#define DEINTERLACE_IN_FMT 0x4c
+#define DEINTERLACE_IN_FMT_PS(v) ((v) & 3)
+#define DEINTERLACE_IN_FMT_FMT(v) (((v) & 7) << 4)
+#define DEINTERLACE_IN_FMT_MOD(v) (((v) & 7) << 8)
+
+#define DEINTERLACE_WB_ADDR0 0x50
+#define DEINTERLACE_WB_ADDR1 0x54
+#define DEINTERLACE_WB_ADDR2 0x58
+
+#define DEINTERLACE_OUT_FMT 0x5c
+#define DEINTERLACE_OUT_FMT_FMT(v) ((v) & 0xf)
+#define DEINTERLACE_OUT_FMT_PS(v) (((v) & 3) << 5)
+
+#define DEINTERLACE_INT_ENABLE 0x60
+#define DEINTERLACE_INT_ENABLE_WB_EN BIT(7)
+
+#define DEINTERLACE_INT_STATUS 0x64
+#define DEINTERLACE_INT_STATUS_WRITEBACK BIT(7)
+
+#define DEINTERLACE_STATUS 0x68
+#define DEINTERLACE_STATUS_COEF_STATUS BIT(11)
+#define DEINTERLACE_STATUS_WB_ERROR BIT(12)
+
+#define DEINTERLACE_CSC_COEF 0x70 /* 12 registers */
+
+#define DEINTERLACE_CTRL 0xa0
+#define DEINTERLACE_CTRL_EN BIT(0)
+#define DEINTERLACE_CTRL_FLAG_OUT_EN BIT(8)
+#define DEINTERLACE_CTRL_MODE_PASSTROUGH (0 << 16)
+#define DEINTERLACE_CTRL_MODE_WEAVE (1 << 16)
+#define DEINTERLACE_CTRL_MODE_BOB (2 << 16)
+#define DEINTERLACE_CTRL_MODE_MIXED (3 << 16)
+#define DEINTERLACE_CTRL_DIAG_INTP_EN BIT(24)
+#define DEINTERLACE_CTRL_TEMP_DIFF_EN BIT(25)
+
+#define DEINTERLACE_DIAG_INTP 0xa4
+#define DEINTERLACE_DIAG_INTP_TH0(v) ((v) & 0x7f)
+#define DEINTERLACE_DIAG_INTP_TH0_MSK (0x7f)
+#define DEINTERLACE_DIAG_INTP_TH1(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_DIAG_INTP_TH1_MSK (0x7f << 8)
+#define DEINTERLACE_DIAG_INTP_TH3(v) (((v) & 0xff) << 24)
+#define DEINTERLACE_DIAG_INTP_TH3_MSK (0xff << 24)
+
+#define DEINTERLACE_TEMP_DIFF 0xa8
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH(v) ((v) & 0x7f)
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH_MSK (0x7f)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK (0x7f << 8)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH(v) (((v) & 0x7ff) << 16)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH_MSK (0x7ff << 16)
+
+#define DEINTERLACE_LUMA_TH 0xac
+#define DEINTERLACE_LUMA_TH_MIN_LUMA(v) ((v) & 0xff)
+#define DEINTERLACE_LUMA_TH_MIN_LUMA_MSK (0xff)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA(v) (((v) & 0xff) << 8)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA_MSK (0xff << 8)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT_MSK (0xff << 16)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC(v) (((v) & 3) << 24)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC_MSK (3 << 24)
+
+#define DEINTERLACE_SPAT_COMP 0xb0
+#define DEINTERLACE_SPAT_COMP_TH2(v) ((v) & 0xff)
+#define DEINTERLACE_SPAT_COMP_TH2_MSK (0xff)
+#define DEINTERLACE_SPAT_COMP_TH3(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_SPAT_COMP_TH3_MSK (0xff << 16)
+
+#define DEINTERLACE_CHROMA_DIFF 0xb4
+#define DEINTERLACE_CHROMA_DIFF_TH(v) ((v) & 0xff)
+#define DEINTERLACE_CHROMA_DIFF_TH_MSK (0xff)
+#define DEINTERLACE_CHROMA_DIFF_LUMA(v) (((v) & 0x3f) << 16)
+#define DEINTERLACE_CHROMA_DIFF_LUMA_MSK (0x3f << 16)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA(v) (((v) & 0x3f) << 24)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA_MSK (0x3f << 24)
+
+#define DEINTERLACE_PRELUMA 0xb8
+#define DEINTERLACE_PRECHROMA 0xbc
+#define DEINTERLACE_TILE_FLAG0 0xc0
+#define DEINTERLACE_TILE_FLAG1 0xc4
+#define DEINTERLACE_FLAG_LINE_STRIDE 0xc8
+#define DEINTERLACE_FLAG_SEQ 0xcc
+
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL 0xd0
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL_EN BIT(0)
+
+#define DEINTERLACE_WB_LINE_STRIDE0 0xd4
+#define DEINTERLACE_WB_LINE_STRIDE1 0xd8
+#define DEINTERLACE_WB_LINE_STRIDE2 0xdc
+#define DEINTERLACE_TRD_CTRL 0xe0
+#define DEINTERLACE_TRD_BUF_ADDR0 0xe4
+#define DEINTERLACE_TRD_BUF_ADDR1 0xe8
+#define DEINTERLACE_TRD_BUF_ADDR2 0xec
+#define DEINTERLACE_TRD_TB_OFF0 0xf0
+#define DEINTERLACE_TRD_TB_OFF1 0xf4
+#define DEINTERLACE_TRD_TB_OFF2 0xf8
+#define DEINTERLACE_TRD_WB_STRIDE 0xfc
+#define DEINTERLACE_CH0_IN_SIZE 0x100
+#define DEINTERLACE_CH0_OUT_SIZE 0x104
+#define DEINTERLACE_CH0_HORZ_FACT 0x108
+#define DEINTERLACE_CH0_VERT_FACT 0x10c
+#define DEINTERLACE_CH0_HORZ_PHASE 0x110
+#define DEINTERLACE_CH0_VERT_PHASE0 0x114
+#define DEINTERLACE_CH0_VERT_PHASE1 0x118
+#define DEINTERLACE_CH0_HORZ_TAP0 0x120
+#define DEINTERLACE_CH0_HORZ_TAP1 0x124
+#define DEINTERLACE_CH0_VERT_TAP 0x128
+#define DEINTERLACE_CH1_IN_SIZE 0x200
+#define DEINTERLACE_CH1_OUT_SIZE 0x204
+#define DEINTERLACE_CH1_HORZ_FACT 0x208
+#define DEINTERLACE_CH1_VERT_FACT 0x20c
+#define DEINTERLACE_CH1_HORZ_PHASE 0x210
+#define DEINTERLACE_CH1_VERT_PHASE0 0x214
+#define DEINTERLACE_CH1_VERT_PHASE1 0x218
+#define DEINTERLACE_CH1_HORZ_TAP0 0x220
+#define DEINTERLACE_CH1_HORZ_TAP1 0x224
+#define DEINTERLACE_CH1_VERT_TAP 0x228
+#define DEINTERLACE_CH0_HORZ_COEF0 0x400 /* 32 registers */
+#define DEINTERLACE_CH0_HORZ_COEF1 0x480 /* 32 registers */
+#define DEINTERLACE_CH0_VERT_COEF 0x500 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF0 0x600 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF1 0x680 /* 32 registers */
+#define DEINTERLACE_CH1_VERT_COEF 0x700 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF0 0x800 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF1 0x880 /* 32 registers */
+#define DEINTERLACE_CH3_VERT_COEF 0x900 /* 32 registers */
+
+#define DEINTERLACE_MIN_WIDTH 2U
+#define DEINTERLACE_MIN_HEIGHT 2U
+#define DEINTERLACE_MAX_WIDTH 2048U
+#define DEINTERLACE_MAX_HEIGHT 1100U
+
+#define DEINTERLACE_MODE_UV_COMBINED 2
+
+#define DEINTERLACE_IN_FMT_YUV420 2
+
+#define DEINTERLACE_OUT_FMT_YUV420SP 13
+
+#define DEINTERLACE_PS_UVUV 0
+#define DEINTERLACE_PS_VUVU 1
+
+#define DEINTERLACE_IDENTITY_COEF 0x4000
+
+#define DEINTERLACE_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+struct deinterlace_ctx {
+ struct v4l2_fh fh;
+ struct deinterlace_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+
+ void *flag1_buf;
+ dma_addr_t flag1_buf_dma;
+
+ void *flag2_buf;
+ dma_addr_t flag2_buf_dma;
+
+ struct vb2_v4l2_buffer *prev;
+
+ unsigned int first_field;
+ unsigned int field;
+
+ int aborting;
+};
+
+struct deinterlace_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+
+ void __iomem *base;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+};
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index eda2a5985da7..834114a4eebe 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -15,76 +15,96 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
#include "csc.h"
/*
- * 16 coefficients in the order:
+ * 12 coefficients in the order:
* a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
- * (we may need to pass non-default values from user space later on, we might
- * need to make the coefficient struct more easy to populate)
*/
-struct colorspace_coeffs {
- u16 sd[12];
- u16 hd[12];
+struct quantization {
+ u16 coeff[12];
};
-/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
-#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
-#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
-#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
-#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+struct colorspace {
+ struct quantization limited;
+ struct quantization full;
+};
+
+struct encoding_direction {
+ struct colorspace r601;
+ struct colorspace r709;
+};
+
+struct csc_coeffs {
+ struct encoding_direction y2r;
+ struct encoding_direction r2y;
+};
/* default colorspace coefficients */
-static struct colorspace_coeffs colorspace_coeffs[4] = {
- [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
- 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+static struct csc_coeffs csc_coeffs = {
+ .y2r = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ }
+ },
},
- {
- /* HDTV */
- 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
- 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ }
+ },
},
},
- [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
- 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ .r2y = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
- },
- },
- [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
- 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
- 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
- },
- },
- [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
- 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x00bb, 0x0275, 0x003f, 0x1f99, 0x1ea5, 0x01c2,
+ 0x01c2, 0x1e67, 0x1fd7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
},
+
};
void csc_dump_regs(struct csc_data *csc)
@@ -117,46 +137,114 @@ EXPORT_SYMBOL(csc_set_coeff_bypass);
* set the color space converter coefficient shadow register values
*/
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace)
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt)
{
u32 *csc_reg5 = csc_reg0 + 5;
u32 *shadow_csc = csc_reg0;
- struct colorspace_coeffs *sd_hd_coeffs;
u16 *coeff, *end_coeff;
- enum v4l2_colorspace yuv_colorspace;
- int sel = 0;
-
- /*
- * support only graphics data range(full range) for now, a control ioctl
- * would be nice here
- */
- /* Y2R */
- if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
- (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- src_colorspace == V4L2_COLORSPACE_REC709)) {
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_format_info *src_finfo, *dst_finfo;
+ enum v4l2_ycbcr_encoding src_ycbcr_enc, dst_ycbcr_enc;
+ enum v4l2_quantization src_quantization, dst_quantization;
+ u32 src_pixelformat, dst_pixelformat;
+
+ switch (src_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &src_fmt->fmt.pix;
+ src_pixelformat = pix->pixelformat;
+ src_ycbcr_enc = pix->ycbcr_enc;
+ src_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &src_fmt->fmt.pix_mp;
+ src_pixelformat = mp->pixelformat;
+ src_ycbcr_enc = mp->ycbcr_enc;
+ src_quantization = mp->quantization;
+ break;
+ }
+
+ switch (dst_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &dst_fmt->fmt.pix;
+ dst_pixelformat = pix->pixelformat;
+ dst_ycbcr_enc = pix->ycbcr_enc;
+ dst_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &dst_fmt->fmt.pix_mp;
+ dst_pixelformat = mp->pixelformat;
+ dst_ycbcr_enc = mp->ycbcr_enc;
+ dst_quantization = mp->quantization;
+ break;
+ }
+
+ src_finfo = v4l2_format_info(src_pixelformat);
+ dst_finfo = v4l2_format_info(dst_pixelformat);
+
+ if (v4l2_is_format_yuv(src_finfo) &&
+ v4l2_is_format_rgb(dst_finfo)) {
/* Y2R */
- sel = 1;
- yuv_colorspace = src_colorspace;
- } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- dst_colorspace == V4L2_COLORSPACE_REC709) &&
- src_colorspace == V4L2_COLORSPACE_SRGB) {
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ src_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (src_quantization == V4L2_QUANTIZATION_DEFAULT)
+ src_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r601.limited.coeff;
+ } else if (src_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r709.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ }
+ } else if (v4l2_is_format_rgb(src_finfo) &&
+ v4l2_is_format_yuv(dst_finfo)) {
/* R2Y */
- sel = 3;
- yuv_colorspace = dst_colorspace;
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ dst_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (dst_quantization == V4L2_QUANTIZATION_DEFAULT)
+ dst_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r601.limited.coeff;
+ } else if (dst_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r709.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ }
} else {
*csc_reg5 |= CSC_BYPASS;
return;
}
- sd_hd_coeffs = &colorspace_coeffs[sel];
-
- /* select between SD or HD coefficients */
- if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
- coeff = sd_hd_coeffs->sd;
- else
- coeff = sd_hd_coeffs->hd;
-
end_coeff = coeff + 12;
for (; coeff < end_coeff; coeff += 2)
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index de9a58af2ca8..af2e86bccf57 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -58,8 +58,8 @@ struct csc_data {
void csc_dump_regs(struct csc_data *csc);
void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace);
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt);
+
struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 53d27cd6e10a..2e5148ae7a0f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -56,6 +56,11 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_C420,
.depth = 4,
},
+ [VPDMA_DATA_FMT_CB420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CB420,
+ .depth = 4,
+ },
[VPDMA_DATA_FMT_YCR422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YCR422,
@@ -759,7 +764,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word1: line_length = %d, xfer_height = %d\n",
dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
- pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
+ pr_debug("word2: start_addr = %x\n", dtd->start_addr);
pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
dtd_get_pkt_type(dtd),
@@ -825,7 +830,8 @@ void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
@@ -893,7 +899,8 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = chan_info[chan].num;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 28bc94129348..393fcbb3cb40 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -57,6 +57,7 @@ struct vpdma_data_format {
* line stride of source and dest
* buffers should be 16 byte aligned
*/
+#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
@@ -71,6 +72,7 @@ enum vpdma_yuv_formats {
VPDMA_DATA_FMT_C444,
VPDMA_DATA_FMT_C422,
VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_CB420,
VPDMA_DATA_FMT_YCR422,
VPDMA_DATA_FMT_YC444,
VPDMA_DATA_FMT_CRY422,
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c488609bc162..0bbee45338bd 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -92,6 +92,7 @@
#define DATA_TYPE_C444 0x4
#define DATA_TYPE_C422 0x5
#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_CB420 0x16
#define DATA_TYPE_YC444 0x8
#define DATA_TYPE_YCB422 0x7
#define DATA_TYPE_YCR422 0x17
@@ -165,11 +166,11 @@ struct vpdma_dtd {
u32 xfer_length_height;
u32 w1;
};
- dma_addr_t start_addr;
+ u32 start_addr;
u32 pkt_ctl;
union {
u32 frame_width_height; /* inbound */
- dma_addr_t desc_write_addr; /* outbound */
+ u32 desc_write_addr; /* outbound */
};
union {
u32 start_h_v; /* inbound */
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 60b575bb44c4..65c2c048b018 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -52,7 +52,7 @@
#define MIN_W 32
#define MIN_H 32
#define MAX_W 2048
-#define MAX_H 1184
+#define MAX_H 2048
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
@@ -249,6 +249,14 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420],
+ },
+ },
+ {
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
@@ -311,14 +319,9 @@ static struct vpe_fmt vpe_formats[] = {
* there is one source queue and one destination queue for each m2m context.
*/
struct vpe_q_data {
- unsigned int width; /* frame width */
- unsigned int height; /* frame height */
- unsigned int nplanes; /* Current number of planes */
- unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
- enum v4l2_colorspace colorspace;
- enum v4l2_field field; /* supported field value */
+ /* current v4l2 format info */
+ struct v4l2_format format;
unsigned int flags;
- unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
struct v4l2_rect c_rect; /* crop/compose rectangle */
struct vpe_fmt *fmt; /* format info */
};
@@ -328,9 +331,14 @@ struct vpe_q_data {
#define Q_DATA_MODE_TILED BIT(1)
#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+#define Q_DATA_INTERLACED_SEQ_BT BIT(4)
+
+#define Q_IS_SEQ_XX (Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
- Q_DATA_INTERLACED_SEQ_TB)
+ Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
enum {
Q_DATA_SRC = 0,
@@ -338,20 +346,25 @@ enum {
};
/* find our format description corresponding to the passed v4l2_format */
-static struct vpe_fmt *find_format(struct v4l2_format *f)
+static struct vpe_fmt *__find_format(u32 fourcc)
{
struct vpe_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
fmt = &vpe_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
/*
* there is one vpe_dev structure in the driver, it is shared by
* all instances.
@@ -681,7 +694,8 @@ static void set_cfg_modes(struct vpe_ctx *ctx)
* Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
*/
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
cfg_mode = 0;
write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
@@ -696,7 +710,8 @@ static void set_line_modes(struct vpe_ctx *ctx)
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
int line_mode = 1;
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
line_mode = 0; /* double lines to line buffer */
/* regs for now */
@@ -741,11 +756,12 @@ static void set_src_registers(struct vpe_ctx *ctx)
static void set_dst_registers(struct vpe_ctx *ctx)
{
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ const struct v4l2_format_info *finfo;
u32 val = 0;
- if (clrspc == V4L2_COLORSPACE_SRGB) {
+ finfo = v4l2_format_info(fmt->fourcc);
+ if (v4l2_is_format_rgb(finfo)) {
val |= VPE_RGB_OUT_SELECT;
vpdma_set_bg_color(ctx->dev->vpdma,
(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
@@ -758,7 +774,8 @@ static void set_dst_registers(struct vpe_ctx *ctx)
*/
val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
- if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12 &&
+ fmt->fourcc != V4L2_PIX_FMT_NV21)
val |= VPE_DS_BYPASS;
mmr_adb->out_fmt_reg[0] = val;
@@ -847,11 +864,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
unsigned int src_h = s_q_data->c_rect.height;
unsigned int dst_w = d_q_data->c_rect.width;
unsigned int dst_h = d_q_data->c_rect.height;
+ struct v4l2_pix_format_mplane *spix;
size_t mv_buf_size;
int ret;
ctx->sequence = 0;
ctx->field = V4L2_FIELD_TOP;
+ spix = &s_q_data->format.fmt.pix_mp;
if ((s_q_data->flags & Q_IS_INTERLACED) &&
!(d_q_data->flags & Q_IS_INTERLACED)) {
@@ -866,9 +885,9 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
* extra space will not be used by the de-interlacer, but will
* ensure that vpdma operates correctly
*/
- bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
- VPDMA_STRIDE_ALIGN);
- mv_buf_size = bytes_per_line * s_q_data->height;
+ bytes_per_line = ALIGN((spix->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * spix->height;
ctx->deinterlacing = true;
src_h <<= 1;
@@ -888,7 +907,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
set_dei_regs(ctx);
csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
- s_q_data->colorspace, d_q_data->colorspace);
+ &s_q_data->format, &d_q_data->format);
sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
@@ -901,14 +920,6 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
}
/*
- * Return the vpe_ctx structure for a given struct file
- */
-static struct vpe_ctx *file2ctx(struct file *file)
-{
- return container_of(file->private_data, struct vpe_ctx, fh);
-}
-
-/*
* mem2mem callbacks
*/
@@ -1010,27 +1021,33 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
struct vpe_fmt *fmt = q_data->fmt;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = !ctx->src_mv_buf_selector;
+ struct v4l2_pix_format_mplane *pix;
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
q_data = &ctx->q_data[Q_DATA_SRC];
+ pix = &q_data->format.fmt.pix_mp;
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
+ pix = &q_data->format.fmt.pix_mp;
vpdma_fmt = fmt->vpdma_fmt[plane];
/*
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1044,6 +1061,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1054,8 +1072,8 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
MAX_W, MAX_H);
- vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
+ vpdma_add_out_dtd(&ctx->desc_list, pix->width,
+ stride, &q_data->c_rect,
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
@@ -1067,6 +1085,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_fmt *fmt = q_data->fmt;
+ struct v4l2_pix_format_mplane *pix;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = ctx->src_mv_buf_selector;
int field = vbuf->field == V4L2_FIELD_BOTTOM;
@@ -1074,10 +1093,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
+ pix = &q_data->format.fmt.pix_mp;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
@@ -1087,10 +1110,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1104,26 +1127,39 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
- /*
- * Use top or bottom field from same vb alternately
- * f,f-1,f-2 = TBT when seq is even
- * f,f-1,f-2 = BTB when seq is odd
- */
- field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+ /*
+ * field used in VPDMA desc = 0 (top) / 1 (bottom)
+ * Use top or bottom field from same vb alternately
+ * For each de-interlacing operation, f,f-1,f-2 should be one
+ * of TBT or BTB
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB ||
+ q_data->flags & Q_DATA_INTERLACED_SEQ_BT) {
+ /* Select initial value based on format */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT)
+ field = 1;
+ else
+ field = 0;
+
+ /* Toggle for each vb_index and each operation */
+ field = (field + p_data->vb_index + ctx->sequence) % 2;
if (field) {
- /*
- * bottom field of a SEQ_TB buffer
- * Skip the top field data by
- */
- int height = q_data->height / 2;
- int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
- 1 : (vpdma_fmt->depth >> 3);
+ int height = pix->height / 2;
+ int bpp;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
+ bpp = 1;
+ else
+ bpp = vpdma_fmt->depth >> 3;
+
if (plane)
height /= 2;
- dma_addr += q_data->width * height * bpp;
+
+ dma_addr += pix->width * height * bpp;
}
}
}
@@ -1136,13 +1172,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
frame_width = q_data->c_rect.width;
frame_height = q_data->c_rect.height;
- if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21))
frame_height /= 2;
- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
- frame_height, 0, 0);
+ vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride,
+ &q_data->c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, field, flags, frame_width,
+ frame_height, 0, 0);
}
/*
@@ -1176,13 +1213,18 @@ static void device_run(void *priv)
struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
-
- if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
- ctx->sequence % 2 == 0) {
- /* When using SEQ_TB buffers, When using it first time,
- * No need to remove the buffer as the next field is present
- * in the same buffer. (so that job_ready won't fail)
- * It will be removed when using bottom field
+ const struct v4l2_format_info *d_finfo;
+
+ d_finfo = v4l2_format_info(d_q_data->fmt->fourcc);
+
+ if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_XX type buffers, each buffer has two fields
+ * each buffer has two fields (top & bottom)
+ * Removing one buffer is actually getting two fields
+ * Alternate between two operations:-
+ * Even : consume one field but DO NOT REMOVE from queue
+ * Odd : consume other field and REMOVE from queue
*/
ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[0] == NULL);
@@ -1246,7 +1288,7 @@ static void device_run(void *priv)
if (ctx->deinterlacing)
add_out_dtd(ctx, VPE_PORT_MV_OUT);
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
add_out_dtd(ctx, VPE_PORT_RGB_OUT);
} else {
add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
@@ -1288,7 +1330,7 @@ static void device_run(void *priv)
}
/* sync on channel control descriptors for output ports */
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
VPE_CHAN_RGB_OUT);
} else {
@@ -1391,9 +1433,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
/* the previous dst mv buffer becomes the next src mv buffer */
ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
- if (ctx->aborting)
- goto finished;
-
s_vb = ctx->src_vbs[0];
d_vb = ctx->dst_vb;
@@ -1404,6 +1443,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->timecode = s_vb->timecode;
d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_IS_INTERLACED) {
@@ -1457,6 +1497,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[0] = NULL;
ctx->dst_vb = NULL;
+ if (ctx->aborting)
+ goto finished;
+
ctx->bufs_completed++;
if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
@@ -1519,38 +1562,32 @@ static int vpe_enum_fmt(struct file *file, void *priv,
static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vb2_queue *vq;
struct vpe_q_data *q_data;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
- pix->width = q_data->width;
- pix->height = q_data->height;
- pix->pixelformat = q_data->fmt->fourcc;
- pix->field = q_data->field;
+ *f = q_data->format;
- if (V4L2_TYPE_IS_OUTPUT(f->type)) {
- pix->colorspace = q_data->colorspace;
- } else {
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
struct vpe_q_data *s_q_data;
+ struct v4l2_pix_format_mplane *spix;
- /* get colorspace from the source queue */
+ /* get colorimetry from the source queue */
s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ spix = &s_q_data->format.fmt.pix_mp;
- pix->colorspace = s_q_data->colorspace;
- }
-
- pix->num_planes = q_data->nplanes;
-
- for (i = 0; i < pix->num_planes; i++) {
- pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
- pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ pix->colorspace = spix->colorspace;
+ pix->xfer_func = spix->xfer_func;
+ pix->ycbcr_enc = spix->ycbcr_enc;
+ pix->quantization = spix->quantization;
}
return 0;
@@ -1564,15 +1601,18 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
unsigned int w_align;
int i, depth, depth_bytes, height;
unsigned int stride = 0;
+ const struct v4l2_format_info *finfo;
if (!fmt || !(fmt->types & type)) {
- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
pix->pixelformat);
- return -EINVAL;
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
}
- if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
- && pix->field != V4L2_FIELD_SEQ_TB)
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ pix->field != V4L2_FIELD_SEQ_TB &&
+ pix->field != V4L2_FIELD_SEQ_BT)
pix->field = V4L2_FIELD_NONE;
depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1615,27 +1655,25 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- if (!pix->num_planes)
+ if (!pix->num_planes || pix->num_planes > 2)
pix->num_planes = fmt->coplanar ? 2 : 1;
else if (pix->num_planes > 1 && !fmt->coplanar)
pix->num_planes = 1;
pix->pixelformat = fmt->fourcc;
+ finfo = v4l2_format_info(fmt->fourcc);
/*
* For the actual image parameters, we need to consider the field
- * height of the image for SEQ_TB buffers.
+ * height of the image for SEQ_XX buffers.
*/
- if (pix->field == V4L2_FIELD_SEQ_TB)
+ if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT)
height = pix->height / 2;
else
height = pix->height;
if (!pix->colorspace) {
- if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
- fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ if (v4l2_is_format_rgb(finfo)) {
pix->colorspace = V4L2_COLORSPACE_SRGB;
} else {
if (height > 1280) /* HD */
@@ -1654,6 +1692,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (stride > plane_fmt->bytesperline)
plane_fmt->bytesperline = stride;
+ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
+ stride,
+ VPDMA_MAX_STRIDE);
+
plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
VPDMA_STRIDE_ALIGN);
@@ -1679,7 +1721,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_fmt *fmt = find_format(f);
if (V4L2_TYPE_IS_OUTPUT(f->type))
@@ -1691,10 +1733,9 @@ static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct v4l2_plane_pix_format *plane_fmt;
+ struct v4l2_pix_format_mplane *qpix;
struct vpe_q_data *q_data;
struct vb2_queue *vq;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -1709,42 +1750,34 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
if (!q_data)
return -EINVAL;
+ qpix = &q_data->format.fmt.pix_mp;
q_data->fmt = find_format(f);
- q_data->width = pix->width;
- q_data->height = pix->height;
- q_data->colorspace = pix->colorspace;
- q_data->field = pix->field;
- q_data->nplanes = pix->num_planes;
-
- for (i = 0; i < pix->num_planes; i++) {
- plane_fmt = &pix->plane_fmt[i];
-
- q_data->bytesperline[i] = plane_fmt->bytesperline;
- q_data->sizeimage[i] = plane_fmt->sizeimage;
- }
+ q_data->format = *f;
q_data->c_rect.left = 0;
q_data->c_rect.top = 0;
- q_data->c_rect.width = q_data->width;
- q_data->c_rect.height = q_data->height;
+ q_data->c_rect.width = pix->width;
+ q_data->c_rect.height = pix->height;
- if (q_data->field == V4L2_FIELD_ALTERNATE)
+ if (qpix->field == V4L2_FIELD_ALTERNATE)
q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
- else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ else if (qpix->field == V4L2_FIELD_SEQ_TB)
q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
+ else if (qpix->field == V4L2_FIELD_SEQ_BT)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_BT;
else
q_data->flags &= ~Q_IS_INTERLACED;
- /* the crop height is halved for the case of SEQ_TB buffers */
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ /* the crop height is halved for the case of SEQ_XX buffers */
+ if (q_data->flags & Q_IS_SEQ_XX)
q_data->c_rect.height /= 2;
vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
- f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
- q_data->bytesperline[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ f->type, pix->width, pix->height, pix->pixelformat,
+ pix->plane_fmt[0].bytesperline);
+ if (pix->num_planes == 2)
vpe_dbg(ctx->dev, " bpl_uv %d\n",
- q_data->bytesperline[VPE_CHROMA]);
+ pix->plane_fmt[1].bytesperline);
return 0;
}
@@ -1752,7 +1785,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
int ret;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
ret = vpe_try_fmt(file, priv, f);
if (ret)
@@ -1773,6 +1806,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
{
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
int height;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1783,6 +1817,8 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
/*
@@ -1809,27 +1845,27 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
}
/*
- * For SEQ_TB buffers, crop height should be less than the height of
+ * For SEQ_XX buffers, crop height should be less than the height of
* the field height, not the buffer height
*/
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
- height = q_data->height / 2;
+ if (q_data->flags & Q_IS_SEQ_XX)
+ height = pix->height / 2;
else
- height = q_data->height;
+ height = pix->height;
if (s->r.top < 0 || s->r.left < 0) {
vpe_err(ctx->dev, "negative values for top and left\n");
s->r.top = s->r.left = 0;
}
- v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
+ v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1,
&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
/* adjust left/top if cropping rectangle is out of bounds */
- if (s->r.left + s->r.width > q_data->width)
- s->r.left = q_data->width - s->r.width;
- if (s->r.top + s->r.height > q_data->height)
- s->r.top = q_data->height - s->r.height;
+ if (s->r.left + s->r.width > pix->width)
+ s->r.left = pix->width - s->r.width;
+ if (s->r.top + s->r.height > pix->height)
+ s->r.top = pix->height - s->r.height;
return 0;
}
@@ -1837,8 +1873,9 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
static int vpe_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
bool use_c_rect = false;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1849,6 +1886,8 @@ static int vpe_g_selection(struct file *file, void *fh,
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -1887,8 +1926,8 @@ static int vpe_g_selection(struct file *file, void *fh,
*/
s->r.left = 0;
s->r.top = 0;
- s->r.width = q_data->width;
- s->r.height = q_data->height;
+ s->r.width = pix->width;
+ s->r.height = pix->height;
}
return 0;
@@ -1898,7 +1937,7 @@ static int vpe_g_selection(struct file *file, void *fh,
static int vpe_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
struct v4l2_selection sel = *s;
int ret;
@@ -1991,17 +2030,21 @@ static int vpe_queue_setup(struct vb2_queue *vq,
int i;
struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
- *nplanes = q_data->nplanes;
+ pix = &q_data->format.fmt.pix_mp;
+ *nplanes = pix->num_planes;
for (i = 0; i < *nplanes; i++)
- sizes[i] = q_data->sizeimage[i];
+ sizes[i] = pix->plane_fmt[i].sizeimage;
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ if (*nplanes == 2)
vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
return 0;
@@ -2012,12 +2055,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vpe_q_data *q_data;
- int i, num_planes;
+ struct v4l2_pix_format_mplane *pix;
+ int i;
vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- num_planes = q_data->nplanes;
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (!(q_data->flags & Q_IS_INTERLACED)) {
@@ -2025,23 +2072,24 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
} else {
if (vbuf->field != V4L2_FIELD_TOP &&
vbuf->field != V4L2_FIELD_BOTTOM &&
- vbuf->field != V4L2_FIELD_SEQ_TB)
+ vbuf->field != V4L2_FIELD_SEQ_TB &&
+ vbuf->field != V4L2_FIELD_SEQ_BT)
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++) {
- if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ for (i = 0; i < pix->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
vpe_err(ctx->dev,
"data will not fit into plane (%lu < %lu)\n",
vb2_plane_size(vb, i),
- (long) q_data->sizeimage[i]);
+ (long)pix->plane_fmt[i].sizeimage);
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++)
- vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+ for (i = 0; i < pix->num_planes; i++)
+ vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
return 0;
}
@@ -2226,6 +2274,7 @@ static int vpe_open(struct file *file)
struct vpe_q_data *s_q_data;
struct v4l2_ctrl_handler *hdl;
struct vpe_ctx *ctx;
+ struct v4l2_pix_format_mplane *pix;
int ret;
vpe_dbg(dev, "vpe_open\n");
@@ -2261,7 +2310,7 @@ static int vpe_open(struct file *file)
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
+ file->private_data = ctx;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 1);
@@ -2274,23 +2323,32 @@ static int vpe_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
s_q_data = &ctx->q_data[Q_DATA_SRC];
- s_q_data->fmt = &vpe_formats[2];
- s_q_data->width = 1920;
- s_q_data->height = 1080;
- s_q_data->nplanes = 1;
- s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
+ pix = &s_q_data->format.fmt.pix_mp;
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ pix->pixelformat = s_q_data->fmt->fourcc;
+ s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ pix->width = 1920;
+ pix->height = 1080;
+ pix->num_planes = 1;
+ pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
- s_q_data->height);
- s_q_data->colorspace = V4L2_COLORSPACE_REC709;
- s_q_data->field = V4L2_FIELD_NONE;
+ pix->plane_fmt[VPE_LUMA].sizeimage =
+ pix->plane_fmt[VPE_LUMA].bytesperline *
+ pix->height;
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pix->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
- s_q_data->c_rect.width = s_q_data->width;
- s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->c_rect.width = pix->width;
+ s_q_data->c_rect.height = pix->height;
s_q_data->flags = 0;
ctx->q_data[Q_DATA_DST] = *s_q_data;
+ ctx->q_data[Q_DATA_DST].format.type =
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
set_dei_shadow_registers(ctx);
set_src_registers(ctx);
@@ -2346,12 +2404,18 @@ free_ctx:
static int vpe_release(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
vpe_dbg(dev, "releasing instance %p\n", ctx);
mutex_lock(&dev->dev_mutex);
free_mv_buffers(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
@@ -2459,6 +2523,13 @@ static int vpe_probe(struct platform_device *pdev)
struct vpe_dev *dev;
int ret, irq, func;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return ret;
+ }
+
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -2473,7 +2544,12 @@ static int vpe_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "vpe_top");
+ "vpe_top");
+ if (!dev->res) {
+ dev_err(&pdev->dev, "missing 'vpe_top' resources data\n");
+ return -ENODEV;
+ }
+
/*
* HACK: we get resource info from device tree in the form of a list of
* VPE sub blocks, the driver currently uses only the base of vpe_top
@@ -2568,7 +2644,7 @@ static int vpe_remove(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id vpe_of_match[] = {
{
- .compatible = "ti,vpe",
+ .compatible = "ti,dra7-vpe",
},
{},
};
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 0ee143ae0f6b..82350097503e 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -2139,6 +2139,9 @@ static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev)
v4l2_m2m_release(dev->stateful_enc.m2m_dev);
v4l2_m2m_release(dev->stateful_dec.m2m_dev);
v4l2_m2m_release(dev->stateless_dec.m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -2250,7 +2253,6 @@ static int vicodec_remove(struct platform_device *pdev)
v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->stateful_enc.vfd);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index acd3bd48c7e2..8d6b09623d88 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1073,6 +1073,9 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
if (!q_data)
return -EINVAL;
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->aborting = 0;
+
q_data->sequence = 0;
return 0;
}
@@ -1272,6 +1275,9 @@ static void vim2m_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_m2m_release(dev->m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -1343,6 +1349,7 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
+ dev->m2m_dev = NULL;
goto error_dev;
}
@@ -1395,7 +1402,6 @@ static int vim2m_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->vfd);
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 96d06f030c31..a53b2b532e9f 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-vimc-y := vimc-core.o vimc-common.o vimc-streamer.o
+vimc-y := vimc-core.o vimc-common.o vimc-streamer.o vimc-capture.o \
+ vimc-debayer.o vimc-scaler.o vimc-sensor.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc-capture.o vimc-debayer.o \
- vimc-scaler.o vimc-sensor.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 1d56b91830ba..76c015898cfd 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
@@ -16,12 +12,9 @@
#include "vimc-common.h"
#include "vimc-streamer.h"
-#define VIMC_CAP_DRV_NAME "vimc-capture"
-
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
- struct device *dev;
struct v4l2_pix_format format;
struct vb2_queue queue;
struct list_head buf_list;
@@ -36,6 +29,7 @@ struct vimc_cap_device {
struct mutex lock;
u32 sequence;
struct vimc_stream stream;
+ struct media_pad pad;
};
static const struct v4l2_pix_format fmt_default = {
@@ -130,7 +124,7 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- dev_dbg(vcap->dev, "%s: format update: "
+ dev_dbg(vcap->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
/* old */
@@ -306,7 +300,7 @@ static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
unsigned long size = vcap->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(vcap->dev, "%s: buffer too small (%lu < %lu)\n",
+ dev_err(vcap->ved.dev, "%s: buffer too small (%lu < %lu)\n",
vcap->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
@@ -328,7 +322,7 @@ static const struct vb2_ops vimc_cap_qops = {
};
static const struct media_entity_operations vimc_cap_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = vimc_vdev_link_validate,
};
static void vimc_cap_release(struct video_device *vdev)
@@ -336,19 +330,16 @@ static void vimc_cap_release(struct video_device *vdev)
struct vimc_cap_device *vcap =
container_of(vdev, struct vimc_cap_device, vdev);
- vimc_pads_cleanup(vcap->ved.pads);
+ media_entity_cleanup(vcap->ved.ent);
kfree(vcap);
}
-static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
- ved);
+ struct vimc_cap_device *vcap;
+ vcap = container_of(ved, struct vimc_cap_device, ved);
vb2_queue_release(&vcap->queue);
- media_entity_cleanup(ved->ent);
video_unregister_device(&vcap->vdev);
}
@@ -391,11 +382,10 @@ static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
return NULL;
}
-static int vimc_cap_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
@@ -405,23 +395,16 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Allocate the vimc_cap_device struct */
vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
if (!vcap)
- return -ENOMEM;
-
- /* Allocate the pads */
- vcap->ved.pads =
- vimc_pads_init(1, (const unsigned long[1]) {MEDIA_PAD_FL_SINK});
- if (IS_ERR(vcap->ved.pads)) {
- ret = PTR_ERR(vcap->ved.pads);
- goto err_free_vcap;
- }
+ return NULL;
/* Initialize the media entity */
- vcap->vdev.entity.name = pdata->entity_name;
+ vcap->vdev.entity.name = vcfg_name;
vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ vcap->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vcap->vdev.entity,
- 1, vcap->ved.pads);
+ 1, &vcap->pad);
if (ret)
- goto err_clean_pads;
+ goto err_free_vcap;
/* Initialize the lock */
mutex_init(&vcap->lock);
@@ -440,8 +423,8 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
ret = vb2_queue_init(q);
if (ret) {
- dev_err(comp, "%s: vb2 queue init failed (err=%d)\n",
- pdata->entity_name, ret);
+ dev_err(&vimc->pdev.dev, "%s: vb2 queue init failed (err=%d)\n",
+ vcfg_name, ret);
goto err_clean_m_ent;
}
@@ -460,8 +443,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vcap->ved.ent = &vcap->vdev.entity;
vcap->ved.process_frame = vimc_cap_process_frame;
vcap->ved.vdev_get_format = vimc_cap_get_format;
- dev_set_drvdata(comp, &vcap->ved);
- vcap->dev = comp;
+ vcap->ved.dev = &vimc->pdev.dev;
/* Initialize the video_device struct */
vdev = &vcap->vdev;
@@ -474,68 +456,25 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
- strscpy(vdev->name, pdata->entity_name, sizeof(vdev->name));
+ strscpy(vdev->name, vcfg_name, sizeof(vdev->name));
video_set_drvdata(vdev, &vcap->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
- dev_err(comp, "%s: video register failed (err=%d)\n",
+ dev_err(&vimc->pdev.dev, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
goto err_release_queue;
}
- return 0;
+ return &vcap->ved;
err_release_queue:
vb2_queue_release(q);
err_clean_m_ent:
media_entity_cleanup(&vcap->vdev.entity);
-err_clean_pads:
- vimc_pads_cleanup(vcap->ved.pads);
err_free_vcap:
kfree(vcap);
- return ret;
-}
-
-static const struct component_ops vimc_cap_comp_ops = {
- .bind = vimc_cap_comp_bind,
- .unbind = vimc_cap_comp_unbind,
-};
-
-static int vimc_cap_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_cap_comp_ops);
-}
-
-static int vimc_cap_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_cap_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_cap_driver_ids[] = {
- {
- .name = VIMC_CAP_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_cap_pdrv = {
- .probe = vimc_cap_probe,
- .remove = vimc_cap_remove,
- .id_table = vimc_cap_driver_ids,
- .driver = {
- .name = VIMC_CAP_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_cap_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Capture");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 7e1ae0b12f1e..16ce9f3b7c75 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -164,6 +164,16 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
},
};
+bool vimc_is_source(struct media_entity *ent)
+{
+ unsigned int i;
+
+ for (i = 0; i < ent->num_pads; i++)
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SINK)
+ return false;
+ return true;
+}
+
const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
{
if (i >= ARRAY_SIZE(vimc_pix_map_list))
@@ -171,7 +181,6 @@ const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
return &vimc_pix_map_list[i];
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
{
@@ -183,7 +192,6 @@ const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
{
@@ -195,87 +203,37 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
-
-/* Helper function to allocate and initialize pads */
-struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
-{
- struct media_pad *pads;
- unsigned int i;
-
- /* Allocate memory for the pads */
- pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
- if (!pads)
- return ERR_PTR(-ENOMEM);
-
- /* Initialize the pads */
- for (i = 0; i < num_pads; i++) {
- pads[i].index = i;
- pads[i].flags = pads_flag[i];
- }
-
- return pads;
-}
-EXPORT_SYMBOL_GPL(vimc_pads_init);
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
-{
- struct v4l2_subdev *sd;
- struct media_pad *pad;
- unsigned int i;
- int ret;
-
- for (i = 0; i < ent->num_pads; i++) {
- if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
- continue;
-
- /* Start the stream in the subdevice direct connected */
- pad = media_entity_remote_pad(&ent->pads[i]);
- if (!pad)
- continue;
-
- if (!is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
-
- sd = media_entity_to_v4l2_subdev(pad->entity);
- ret = v4l2_subdev_call(sd, video, s_stream, enable);
- if (ret && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vimc_pipeline_s_stream);
-
-static int vimc_get_mbus_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
+static int vimc_get_pix_format(struct media_pad *pad,
+ struct v4l2_pix_format *fmt)
{
if (is_media_entity_v4l2_subdev(pad->entity)) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(pad->entity);
+ struct v4l2_subdev_format sd_fmt;
+ const struct vimc_pix_map *pix_map;
int ret;
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = pad->index;
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;
+ v4l2_fill_pix_format(fmt, &sd_fmt.format);
+ pix_map = vimc_pix_map_by_code(sd_fmt.format.code);
+ fmt->pixelformat = pix_map->pixelformat;
} else if (is_media_entity_v4l2_video_device(pad->entity)) {
struct video_device *vdev = container_of(pad->entity,
struct video_device,
entity);
struct vimc_ent_device *ved = video_get_drvdata(vdev);
- const struct vimc_pix_map *vpix;
- struct v4l2_pix_format vdev_fmt;
if (!ved->vdev_get_format)
return -ENOIOCTLCMD;
- ved->vdev_get_format(ved, &vdev_fmt);
- vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
- v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ ved->vdev_get_format(ved, fmt);
} else {
return -EINVAL;
}
@@ -283,16 +241,16 @@ static int vimc_get_mbus_format(struct media_pad *pad,
return 0;
}
-int vimc_link_validate(struct media_link *link)
+int vimc_vdev_link_validate(struct media_link *link)
{
- struct v4l2_subdev_format source_fmt, sink_fmt;
+ struct v4l2_pix_format source_fmt, sink_fmt;
int ret;
- ret = vimc_get_mbus_format(link->source, &source_fmt);
+ ret = vimc_get_pix_format(link->source, &source_fmt);
if (ret)
return ret;
- ret = vimc_get_mbus_format(link->sink, &sink_fmt);
+ ret = vimc_get_pix_format(link->sink, &sink_fmt);
if (ret)
return ret;
@@ -301,21 +259,21 @@ int vimc_link_validate(struct media_link *link)
"%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n",
/* src */
link->source->entity->name,
- source_fmt.format.width, source_fmt.format.height,
- source_fmt.format.code, source_fmt.format.colorspace,
- source_fmt.format.quantization, source_fmt.format.xfer_func,
- source_fmt.format.ycbcr_enc,
+ source_fmt.width, source_fmt.height,
+ source_fmt.pixelformat, source_fmt.colorspace,
+ source_fmt.quantization, source_fmt.xfer_func,
+ source_fmt.ycbcr_enc,
/* sink */
link->sink->entity->name,
- sink_fmt.format.width, sink_fmt.format.height,
- sink_fmt.format.code, sink_fmt.format.colorspace,
- sink_fmt.format.quantization, sink_fmt.format.xfer_func,
- sink_fmt.format.ycbcr_enc);
-
- /* The width, height and code must match. */
- if (source_fmt.format.width != sink_fmt.format.width
- || source_fmt.format.height != sink_fmt.format.height
- || source_fmt.format.code != sink_fmt.format.code)
+ sink_fmt.width, sink_fmt.height,
+ sink_fmt.pixelformat, sink_fmt.colorspace,
+ sink_fmt.quantization, sink_fmt.xfer_func,
+ sink_fmt.ycbcr_enc);
+
+ /* The width, height and pixelformat must match. */
+ if (source_fmt.width != sink_fmt.width ||
+ source_fmt.height != sink_fmt.height ||
+ source_fmt.pixelformat != sink_fmt.pixelformat)
return -EPIPE;
/*
@@ -323,44 +281,43 @@ int vimc_link_validate(struct media_link *link)
* to support interlaced hardware connected to bridges that support
* progressive formats only.
*/
- if (source_fmt.format.field != sink_fmt.format.field &&
- sink_fmt.format.field != V4L2_FIELD_NONE)
+ if (source_fmt.field != sink_fmt.field &&
+ sink_fmt.field != V4L2_FIELD_NONE)
return -EPIPE;
/*
* If colorspace is DEFAULT, then assume all the colorimetry is also
* DEFAULT, return 0 to skip comparing the other colorimetry parameters
*/
- if (source_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT
- || sink_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ if (source_fmt.colorspace == V4L2_COLORSPACE_DEFAULT ||
+ sink_fmt.colorspace == V4L2_COLORSPACE_DEFAULT)
return 0;
/* Colorspace must match. */
- if (source_fmt.format.colorspace != sink_fmt.format.colorspace)
+ if (source_fmt.colorspace != sink_fmt.colorspace)
return -EPIPE;
/* Colorimetry must match if they are not set to DEFAULT */
- if (source_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && sink_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && source_fmt.format.ycbcr_enc != sink_fmt.format.ycbcr_enc)
+ if (source_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ sink_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ source_fmt.ycbcr_enc != sink_fmt.ycbcr_enc)
return -EPIPE;
- if (source_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && sink_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && source_fmt.format.quantization != sink_fmt.format.quantization)
+ if (source_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ sink_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ source_fmt.quantization != sink_fmt.quantization)
return -EPIPE;
- if (source_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && sink_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && source_fmt.format.xfer_func != sink_fmt.format.xfer_func)
+ if (source_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ sink_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ source_fmt.xfer_func != sink_fmt.xfer_func)
return -EPIPE;
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_link_validate);
static const struct media_entity_operations vimc_ent_sd_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = v4l2_subdev_link_validate,
};
int vimc_ent_sd_register(struct vimc_ent_device *ved,
@@ -369,17 +326,12 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
- /* Allocate the pads */
- ved->pads = vimc_pads_init(num_pads, pads_flag);
- if (IS_ERR(ved->pads))
- return PTR_ERR(ved->pads);
-
/* Fill the vimc_ent_device struct */
ved->ent = &sd->entity;
@@ -398,9 +350,9 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
/* Initialize the media entity */
- ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ ret = media_entity_pads_init(&sd->entity, num_pads, pads);
if (ret)
- goto err_clean_pads;
+ return ret;
/* Register the subdev with the v4l2 and the media framework */
ret = v4l2_device_register_subdev(v4l2_dev, sd);
@@ -415,16 +367,5 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
err_clean_m_ent:
media_entity_cleanup(&sd->entity);
-err_clean_pads:
- vimc_pads_cleanup(ved->pads);
return ret;
}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
-
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
-{
- media_entity_cleanup(ved->ent);
- vimc_pads_cleanup(ved->pads);
- v4l2_device_unregister_subdev(sd);
-}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 9c2e0e216c6b..87eb8259c2a8 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -8,6 +8,7 @@
#ifndef _VIMC_COMMON_H_
#define _VIMC_COMMON_H_
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -18,6 +19,7 @@
#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
#define VIMC_CID_TEST_PATTERN (VIMC_CID_VIMC_BASE + 0)
+#define VIMC_CID_MEAN_WIN_SIZE (VIMC_CID_VIMC_BASE + 1)
#define VIMC_FRAME_MAX_WIDTH 4096
#define VIMC_FRAME_MAX_HEIGHT 2160
@@ -26,6 +28,10 @@
#define VIMC_FRAME_INDEX(lin, col, width, bpp) ((lin * width + col) * bpp)
+/* Source and sink pad checks */
+#define VIMC_IS_SRC(pad) (pad)
+#define VIMC_IS_SINK(pad) (!(pad))
+
/**
* struct vimc_colorimetry_clamp - Adjust colorimetry parameters
*
@@ -53,21 +59,6 @@ do { \
} while (0)
/**
- * struct vimc_platform_data - platform data to components
- *
- * @entity_name: The name of the entity to be created
- *
- * Board setup code will often provide additional information using the device's
- * platform_data field to hold additional information.
- * When injecting a new platform_device in the component system the core needs
- * to provide to the corresponding submodules the name of the entity that should
- * be used when registering the subdevice in the Media Controller system.
- */
-struct vimc_platform_data {
- char entity_name[32];
-};
-
-/**
* struct vimc_pix_map - maps media bus code with v4l2 pixel format
*
* @code: media bus format code defined by MEDIA_BUS_FMT_* macros
@@ -85,10 +76,11 @@ struct vimc_pix_map {
};
/**
- * struct vimc_ent_device - core struct that represents a node in the topology
+ * struct vimc_ent_device - core struct that represents an entity in the
+ * topology
*
+ * @dev: a pointer of the device struct of the driver
* @ent: the pointer to struct media_entity for the node
- * @pads: the list of pads of the node
* @process_frame: callback send a frame to that node
* @vdev_get_format: callback that returns the current format a pad, used
* only when is_media_entity_v4l2_video_device(ent) returns
@@ -103,8 +95,8 @@ struct vimc_pix_map {
* media_entity
*/
struct vimc_ent_device {
+ struct device *dev;
struct media_entity *ent;
- struct media_pad *pads;
void * (*process_frame)(struct vimc_ent_device *ved,
const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
@@ -112,38 +104,65 @@ struct vimc_ent_device {
};
/**
- * vimc_pads_init - initialize pads
- *
- * @num_pads: number of pads to initialize
- * @pads_flags: flags to use in each pad
+ * struct vimc_device - main device for vimc driver
*
- * Helper functions to allocate/initialize pads
+ * @pdev pointer to the platform device
+ * @pipe_cfg pointer to the vimc pipeline configuration structure
+ * @ent_devs array of vimc_ent_device pointers
+ * @mdev the associated media_device parent
+ * @v4l2_dev Internal v4l2 parent device
*/
-struct media_pad *vimc_pads_init(u16 num_pads,
- const unsigned long *pads_flag);
+struct vimc_device {
+ struct platform_device pdev;
+ const struct vimc_pipeline_config *pipe_cfg;
+ struct vimc_ent_device **ent_devs;
+ struct media_device mdev;
+ struct v4l2_device v4l2_dev;
+};
/**
- * vimc_pads_cleanup - free pads
- *
- * @pads: pointer to the pads
- *
- * Helper function to free the pads initialized with vimc_pads_init
+ * struct vimc_ent_config Structure which describes individual
+ * configuration for each entity
+ *
+ * @name entity name
+ * @ved pointer to vimc_ent_device (a node in the
+ * topology)
+ * @add subdev add hook - initializes and registers
+ * subdev called from vimc-core
+ * @rm subdev rm hook - unregisters and frees
+ * subdev called from vimc-core
*/
-static inline void vimc_pads_cleanup(struct media_pad *pads)
-{
- kfree(pads);
-}
+struct vimc_ent_config {
+ const char *name;
+ struct vimc_ent_device *(*add)(struct vimc_device *vimc,
+ const char *vcfg_name);
+ void (*rm)(struct vimc_device *vimc, struct vimc_ent_device *ved);
+};
/**
- * vimc_pipeline_s_stream - start stream through the pipeline
+ * vimc_is_source - returns true if the entity has only source pads
*
- * @ent: the pointer to struct media_entity for the node
- * @enable: 1 to start the stream and 0 to stop
+ * @ent: pointer to &struct media_entity
*
- * Helper function to call the s_stream of the subdevices connected
- * in all the sink pads of the entity
*/
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
+bool vimc_is_source(struct media_entity *ent);
+
+/* prototypes for vimc_ent_config add and rm hooks */
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
@@ -176,7 +195,8 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* unique.
* @function: media entity function defined by MEDIA_ENT_F_* macros
* @num_pads: number of pads to initialize
- * @pads_flag: flags to use in each pad
+ * @pads: the array of pads of the entity, the caller should set the
+ flags of the pads
* @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
@@ -189,29 +209,17 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops);
/**
- * vimc_ent_sd_unregister - cleanup and unregister a subdev node
- *
- * @ved: the vimc_ent_device struct to be cleaned up
- * @sd: the v4l2_subdev struct to be unregistered
- *
- * Helper function cleanup and unregister the struct vimc_ent_device and struct
- * v4l2_subdev which represents a subdev node in the topology
- */
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved,
- struct v4l2_subdev *sd);
-
-/**
- * vimc_link_validate - validates a media link
+ * vimc_vdev_link_validate - validates a media link
*
* @link: pointer to &struct media_link
*
* This function calls validates if a media link is valid for streaming.
*/
-int vimc_link_validate(struct media_link *link);
+int vimc_vdev_link_validate(struct media_link *link);
#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 571c55aa0e16..97a272f3350a 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -5,7 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -24,29 +23,6 @@
.flags = link_flags, \
}
-struct vimc_device {
- /* The platform device */
- struct platform_device pdev;
-
- /* The pipeline configuration */
- const struct vimc_pipeline_config *pipe_cfg;
-
- /* The Associated media_device parent */
- struct media_device mdev;
-
- /* Internal v4l2 parent device*/
- struct v4l2_device v4l2_dev;
-
- /* Subdevices */
- struct platform_device **subdevs;
-};
-
-/* Structure which describes individual configuration for each entity */
-struct vimc_ent_config {
- const char *name;
- const char *drv;
-};
-
/* Structure which describes links between entities */
struct vimc_ent_link {
unsigned int src_ent;
@@ -68,43 +44,52 @@ struct vimc_pipeline_config {
* Topology Configuration
*/
-static const struct vimc_ent_config ent_config[] = {
+static struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Sensor B",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Debayer A",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Debayer B",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Raw Capture 0",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
.name = "Raw Capture 1",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
- .name = "RGB/YUV Input",
/* TODO: change this to vimc-input when it is implemented */
- .drv = "vimc-sensor",
+ .name = "RGB/YUV Input",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Scaler",
- .drv = "vimc-scaler",
+ .add = vimc_sca_add,
+ .rm = vimc_sca_rm,
},
{
.name = "RGB/YUV Capture",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
};
@@ -127,7 +112,7 @@ static const struct vimc_ent_link ent_links[] = {
VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
};
-static const struct vimc_pipeline_config pipe_cfg = {
+static struct vimc_pipeline_config pipe_cfg = {
.ents = ent_config,
.num_ents = ARRAY_SIZE(ent_config),
.links = ent_links,
@@ -136,6 +121,14 @@ static const struct vimc_pipeline_config pipe_cfg = {
/* -------------------------------------------------------------------------- */
+static void vimc_rm_links(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ media_entity_remove_links(vimc->ent_devs[i]->ent);
+}
+
static int vimc_create_links(struct vimc_device *vimc)
{
unsigned int i;
@@ -144,32 +137,56 @@ static int vimc_create_links(struct vimc_device *vimc)
/* Initialize the links between entities */
for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
- /*
- * TODO: Check another way of retrieving ved struct without
- * relying on platform_get_drvdata
- */
+
struct vimc_ent_device *ved_src =
- platform_get_drvdata(vimc->subdevs[link->src_ent]);
+ vimc->ent_devs[link->src_ent];
struct vimc_ent_device *ved_sink =
- platform_get_drvdata(vimc->subdevs[link->sink_ent]);
+ vimc->ent_devs[link->sink_ent];
ret = media_create_pad_link(ved_src->ent, link->src_pad,
ved_sink->ent, link->sink_pad,
link->flags);
if (ret)
- return ret;
+ goto err_rm_links;
}
return 0;
+
+err_rm_links:
+ vimc_rm_links(vimc);
+ return ret;
}
-static int vimc_comp_bind(struct device *master)
+static int vimc_add_subdevs(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
- int ret;
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(&vimc->pdev.dev, "new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc,
+ vimc->pipe_cfg->ents[i].name);
+ if (!vimc->ent_devs[i]) {
+ dev_err(&vimc->pdev.dev, "add new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void vimc_rm_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ if (vimc->ent_devs[i])
+ vimc->pipe_cfg->ents[i].rm(vimc, vimc->ent_devs[i]);
+}
- dev_dbg(master, "bind");
+static int vimc_register_devices(struct vimc_device *vimc)
+{
+ int ret;
/* Register the v4l2 struct */
ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
@@ -179,22 +196,31 @@ static int vimc_comp_bind(struct device *master)
return ret;
}
- /* Bind subdevices */
- ret = component_bind_all(master, &vimc->v4l2_dev);
- if (ret)
+ /* allocate ent_devs */
+ vimc->ent_devs = kcalloc(vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->ent_devs), GFP_KERNEL);
+ if (!vimc->ent_devs) {
+ ret = -ENOMEM;
goto err_v4l2_unregister;
+ }
+
+ /* Invoke entity config hooks to initialize and register subdevs */
+ ret = vimc_add_subdevs(vimc);
+ if (ret)
+ /* remove sundevs that got added */
+ goto err_rm_subdevs;
/* Initialize links */
ret = vimc_create_links(vimc);
if (ret)
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
/* Register the media device */
ret = media_device_register(&vimc->mdev);
if (ret) {
dev_err(vimc->mdev.dev,
"media device register failed (err=%d)\n", ret);
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
}
/* Expose all subdev's nodes*/
@@ -211,98 +237,32 @@ static int vimc_comp_bind(struct device *master)
err_mdev_unregister:
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
-err_comp_unbind_all:
- component_unbind_all(master, NULL);
+err_rm_subdevs:
+ vimc_rm_subdevs(vimc);
+ kfree(vimc->ent_devs);
err_v4l2_unregister:
v4l2_device_unregister(&vimc->v4l2_dev);
return ret;
}
-static void vimc_comp_unbind(struct device *master)
+static void vimc_unregister(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
-
- dev_dbg(master, "unbind");
-
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
- component_unbind_all(master, NULL);
v4l2_device_unregister(&vimc->v4l2_dev);
+ kfree(vimc->ent_devs);
}
-static int vimc_comp_compare(struct device *comp, void *data)
-{
- return comp == data;
-}
-
-static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
-{
- struct component_match *match = NULL;
- struct vimc_platform_data pdata;
- int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- dev_dbg(&vimc->pdev.dev, "new pdev for %s\n",
- vimc->pipe_cfg->ents[i].drv);
-
- strscpy(pdata.entity_name, vimc->pipe_cfg->ents[i].name,
- sizeof(pdata.entity_name));
-
- vimc->subdevs[i] = platform_device_register_data(&vimc->pdev.dev,
- vimc->pipe_cfg->ents[i].drv,
- PLATFORM_DEVID_AUTO,
- &pdata,
- sizeof(pdata));
- if (IS_ERR(vimc->subdevs[i])) {
- match = ERR_CAST(vimc->subdevs[i]);
- while (--i >= 0)
- platform_device_unregister(vimc->subdevs[i]);
-
- return match;
- }
-
- component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
- &vimc->subdevs[i]->dev);
- }
-
- return match;
-}
-
-static void vimc_rm_subdevs(struct vimc_device *vimc)
-{
- unsigned int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
- platform_device_unregister(vimc->subdevs[i]);
-}
-
-static const struct component_master_ops vimc_comp_ops = {
- .bind = vimc_comp_bind,
- .unbind = vimc_comp_unbind,
-};
-
static int vimc_probe(struct platform_device *pdev)
{
struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
- struct component_match *match = NULL;
int ret;
dev_dbg(&pdev->dev, "probe");
memset(&vimc->mdev, 0, sizeof(vimc->mdev));
- /* Create platform_device for each entity in the topology*/
- vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
- sizeof(*vimc->subdevs), GFP_KERNEL);
- if (!vimc->subdevs)
- return -ENOMEM;
-
- match = vimc_add_subdevs(vimc);
- if (IS_ERR(match))
- return PTR_ERR(match);
-
/* Link the media device within the v4l2_device */
vimc->v4l2_dev.mdev = &vimc->mdev;
@@ -314,12 +274,9 @@ static int vimc_probe(struct platform_device *pdev)
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
- /* Add self to the component system */
- ret = component_master_add_with_match(&pdev->dev, &vimc_comp_ops,
- match);
+ ret = vimc_register_devices(vimc);
if (ret) {
media_device_cleanup(&vimc->mdev);
- vimc_rm_subdevs(vimc);
return ret;
}
@@ -332,8 +289,8 @@ static int vimc_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove");
- component_master_del(&pdev->dev, &vimc_comp_ops);
vimc_rm_subdevs(vimc);
+ vimc_unregister(vimc);
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index b72b8385067b..5d1b67d684bb 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -5,28 +5,16 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_DEB_DRV_NAME "vimc-debayer"
-
-static unsigned int deb_mean_win_size = 3;
-module_param(deb_mean_win_size, uint, 0000);
-MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
- "NOTE: the window size needs to be an odd number, as the main pixel "
- "stays in the center of the window, otherwise the next odd number "
- "is considered");
-
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
-
enum vimc_deb_rgb_colors {
VIMC_DEB_RED = 0,
VIMC_DEB_GREEN = 1,
@@ -41,7 +29,6 @@ struct vimc_deb_pix_map {
struct vimc_deb_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* The active format */
struct v4l2_mbus_framefmt sink_fmt;
u32 src_code;
@@ -51,6 +38,9 @@ struct vimc_deb_device {
u8 *src_frame;
const struct vimc_deb_pix_map *sink_pix_map;
unsigned int sink_bpp;
+ unsigned int mean_win_size;
+ struct v4l2_ctrl_handler hdl;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -159,7 +149,7 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
/* We only support one format for source pads */
- if (IS_SRC(code->pad)) {
+ if (VIMC_IS_SRC(code->pad)) {
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
if (code->index)
@@ -185,7 +175,7 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index)
return -EINVAL;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
const struct vimc_deb_pix_map *vpix =
vimc_deb_pix_map_by_code(fse->code);
@@ -215,7 +205,7 @@ static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
vdeb->sink_fmt;
/* Set the right code for the source pad */
- if (IS_SRC(fmt->pad))
+ if (VIMC_IS_SRC(fmt->pad))
fmt->format.code = vdeb->src_code;
return 0;
@@ -262,7 +252,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
/* TODO: Add support for other formats */
fmt->format.code = vdeb->src_code;
@@ -270,7 +260,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_deb_adjust_sink_fmt(&fmt->format);
- dev_dbg(vdeb->dev, "%s: sink format update: "
+ dev_dbg(vdeb->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
/* old */
@@ -351,11 +341,18 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+static const struct v4l2_subdev_core_ops vimc_deb_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops vimc_deb_video_ops = {
.s_stream = vimc_deb_s_stream,
};
static const struct v4l2_subdev_ops vimc_deb_ops = {
+ .core = &vimc_deb_core_ops,
.pad = &vimc_deb_pad_ops,
.video = &vimc_deb_video_ops,
};
@@ -389,11 +386,11 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* the top left corner of the mean window (considering the current
* pixel as the center)
*/
- seek = deb_mean_win_size / 2;
+ seek = vdeb->mean_win_size / 2;
/* Sum the values of the colors in the mean window */
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
@@ -426,7 +423,7 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
vdeb->sink_fmt.width,
vdeb->sink_bpp);
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
vdeb->sd.name, index, wlin, wcol, color);
@@ -437,21 +434,21 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
/* Save how many values we already added */
n_rgb[color]++;
- dev_dbg(vdeb->dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ dev_dbg(vdeb->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n",
vdeb->sd.name, rgb[color], n_rgb[color]);
}
}
/* Calculate the mean */
for (i = 0; i < 3; i++) {
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
if (n_rgb[i])
rgb[i] = rgb[i] / n_rgb[i];
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
vdeb->sd.name, lin, col, i, rgb[i]);
}
@@ -476,14 +473,34 @@ static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
}
return vdeb->src_frame;
+}
+static int vimc_deb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vimc_deb_device *vdeb =
+ container_of(ctrl->handler, struct vimc_deb_device, hdl);
+
+ switch (ctrl->id) {
+ case VIMC_CID_MEAN_WIN_SIZE:
+ vdeb->mean_win_size = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
+static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
+ .s_ctrl = vimc_deb_s_ctrl,
+};
+
static void vimc_deb_release(struct v4l2_subdev *sd)
{
struct vimc_deb_device *vdeb =
container_of(sd, struct vimc_deb_device, sd);
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+ media_entity_cleanup(vdeb->ved.ent);
kfree(vdeb);
}
@@ -491,44 +508,69 @@ static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = {
.release = vimc_deb_release,
};
-static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
- ved);
+ struct vimc_deb_device *vdeb;
- vimc_ent_sd_unregister(ved, &vdeb->sd);
+ vdeb = container_of(ved, struct vimc_deb_device, ved);
+ v4l2_device_unregister_subdev(&vdeb->sd);
}
-static int vimc_deb_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+static const struct v4l2_ctrl_config vimc_deb_ctrl_class = {
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIMC_CID_VIMC_CLASS,
+ .name = "VIMC Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
+ .ops = &vimc_deb_ctrl_ops,
+ .id = VIMC_CID_MEAN_WIN_SIZE,
+ .name = "Debayer Mean Window Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 25,
+ .step = 2,
+ .def = 3,
+};
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_deb_device *vdeb;
int ret;
/* Allocate the vdeb struct */
vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
if (!vdeb)
- return -ENOMEM;
+ return NULL;
+
+ /* Create controls: */
+ v4l2_ctrl_handler_init(&vdeb->hdl, 2);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_mean_win_size, NULL);
+ vdeb->sd.ctrl_handler = &vdeb->hdl;
+ if (vdeb->hdl.error) {
+ ret = vdeb->hdl.error;
+ goto err_free_vdeb;
+ }
/* Initialize ved and sd */
+ vdeb->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vdeb->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vdeb->pads,
&vimc_deb_int_ops, &vimc_deb_ops);
- if (ret) {
- kfree(vdeb);
- return ret;
- }
+ if (ret)
+ goto err_free_hdl;
vdeb->ved.process_frame = vimc_deb_process_frame;
- dev_set_drvdata(comp, &vdeb->ved);
- vdeb->dev = comp;
+ vdeb->ved.dev = &vimc->pdev.dev;
+ vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def;
/* Initialize the frame format */
vdeb->sink_fmt = sink_fmt_default;
@@ -541,46 +583,12 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
- return 0;
-}
-
-static const struct component_ops vimc_deb_comp_ops = {
- .bind = vimc_deb_comp_bind,
- .unbind = vimc_deb_comp_unbind,
-};
+ return &vdeb->ved;
-static int vimc_deb_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_deb_comp_ops);
-}
-
-static int vimc_deb_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_deb_comp_ops);
+err_free_hdl:
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+err_free_vdeb:
+ kfree(vdeb);
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_deb_driver_ids[] = {
- {
- .name = VIMC_DEB_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_deb_pdrv = {
- .probe = vimc_deb_probe,
- .remove = vimc_deb_remove,
- .id_table = vimc_deb_driver_ids,
- .driver = {
- .name = VIMC_DEB_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_deb_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Debayer");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 49ab8d9dd9c9..2f88a7d9d67b 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -5,30 +5,22 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_SCA_DRV_NAME "vimc-scaler"
-
static unsigned int sca_mult = 3;
module_param(sca_mult, uint, 0000);
MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
#define MAX_ZOOM 8
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* NOTE: the source fmt is the same as the sink
* with the width and hight multiplied by mult
*/
@@ -37,6 +29,7 @@ struct vimc_sca_device {
u8 *src_frame;
unsigned int src_line_size;
unsigned int bpp;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -98,7 +91,7 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
fse->max_width = VIMC_FRAME_MAX_WIDTH;
fse->max_height = VIMC_FRAME_MAX_HEIGHT;
} else {
@@ -121,7 +114,7 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
vsca->sink_fmt;
/* Scale the frame size for the source pad */
- if (IS_SRC(format->pad)) {
+ if (VIMC_IS_SRC(format->pad)) {
format->format.width = vsca->sink_fmt.width * sca_mult;
format->format.height = vsca->sink_fmt.height * sca_mult;
}
@@ -170,7 +163,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
fmt->format.width = sink_fmt->width * sca_mult;
fmt->format.height = sink_fmt->height * sca_mult;
@@ -178,7 +171,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_sca_adjust_sink_fmt(&fmt->format);
- dev_dbg(vsca->dev, "%s: sink format update: "
+ dev_dbg(vsca->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
/* old */
@@ -278,7 +271,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
vsca->bpp);
pixel = &sink_frame[index];
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
vsca->sd.name, lin, col, index);
@@ -288,7 +281,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
vsca->sink_fmt.width * sca_mult, vsca->bpp);
- dev_dbg(vsca->dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
+ dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
vsca->sd.name, lin * sca_mult, col * sca_mult, index);
/* Repeat this pixel mult times */
@@ -297,7 +290,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
* pixel repetition in a line
*/
for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: sca: scale_pix src pos %d\n",
vsca->sd.name, index + j);
@@ -343,6 +336,7 @@ static void vimc_sca_release(struct v4l2_subdev *sd)
struct vimc_sca_device *vsca =
container_of(sd, struct vimc_sca_device, sd);
+ media_entity_cleanup(vsca->ved.ent);
kfree(vsca);
}
@@ -350,89 +344,45 @@ static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = {
.release = vimc_sca_release,
};
-static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
- ved);
+ struct vimc_sca_device *vsca;
- vimc_ent_sd_unregister(ved, &vsca->sd);
+ vsca = container_of(ved, struct vimc_sca_device, ved);
+ v4l2_device_unregister_subdev(&vsca->sd);
}
-
-static int vimc_sca_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sca_device *vsca;
int ret;
/* Allocate the vsca struct */
vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
if (!vsca)
- return -ENOMEM;
+ return NULL;
/* Initialize ved and sd */
+ vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vsca->pads,
&vimc_sca_int_ops, &vimc_sca_ops);
if (ret) {
kfree(vsca);
- return ret;
+ return NULL;
}
vsca->ved.process_frame = vimc_sca_process_frame;
- dev_set_drvdata(comp, &vsca->ved);
- vsca->dev = comp;
+ vsca->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsca->sink_fmt = sink_fmt_default;
- return 0;
-}
-
-static const struct component_ops vimc_sca_comp_ops = {
- .bind = vimc_sca_comp_bind,
- .unbind = vimc_sca_comp_unbind,
-};
-
-static int vimc_sca_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sca_comp_ops);
-}
-
-static int vimc_sca_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sca_comp_ops);
-
- return 0;
+ return &vsca->ved;
}
-
-static const struct platform_device_id vimc_sca_driver_ids[] = {
- {
- .name = VIMC_SCA_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sca_pdrv = {
- .probe = vimc_sca_probe,
- .remove = vimc_sca_remove,
- .id_table = vimc_sca_driver_ids,
- .driver = {
- .name = VIMC_SCA_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sca_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Scaler");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 6c53b9fc1617..32380f504591 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
#include <media/v4l2-ctrls.h>
@@ -18,18 +14,15 @@
#include "vimc-common.h"
-#define VIMC_SEN_DRV_NAME "vimc-sensor"
-
struct vimc_sen_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
struct tpg_data tpg;
- struct task_struct *kthread_sen;
u8 *frame;
/* The active format */
struct v4l2_mbus_framefmt mbus_format;
struct v4l2_ctrl_handler hdl;
+ struct media_pad pad;
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -164,7 +157,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
/* Set the new format */
vimc_sen_adjust_fmt(&fmt->format);
- dev_dbg(vsen->dev, "%s: format update: "
+ dev_dbg(vsen->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
/* old */
@@ -208,10 +201,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (vsen->kthread_sen)
- /* tpg is already executing */
- return 0;
-
/* Calculate the frame size */
vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
frame_size = vsen->mbus_format.width * vpix->bpp *
@@ -297,6 +286,7 @@ static void vimc_sen_release(struct v4l2_subdev *sd)
v4l2_ctrl_handler_free(&vsen->hdl);
tpg_free(&vsen->tpg);
+ media_entity_cleanup(vsen->ved.ent);
kfree(vsen);
}
@@ -304,14 +294,12 @@ static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = {
.release = vimc_sen_release,
};
-static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sen_device *vsen =
- container_of(ved, struct vimc_sen_device, ved);
+ struct vimc_sen_device *vsen;
- vimc_ent_sd_unregister(ved, &vsen->sd);
+ vsen = container_of(ved, struct vimc_sen_device, ved);
+ v4l2_device_unregister_subdev(&vsen->sd);
}
/* Image Processing Controls */
@@ -331,18 +319,17 @@ static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
.qmenu = tpg_pattern_strings,
};
-static int vimc_sen_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sen_device *vsen;
int ret;
/* Allocate the vsen struct */
vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
if (!vsen)
- return -ENOMEM;
+ return NULL;
v4l2_ctrl_handler_init(&vsen->hdl, 4);
@@ -366,78 +353,36 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
goto err_free_vsen;
}
+ /* Initialize the test pattern generator */
+ tpg_init(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height);
+ ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
+ if (ret)
+ goto err_free_hdl;
+
/* Initialize ved and sd */
+ vsen->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
- pdata->entity_name,
- MEDIA_ENT_F_CAM_SENSOR, 1,
- (const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
+ vcfg_name,
+ MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad,
&vimc_sen_int_ops, &vimc_sen_ops);
if (ret)
- goto err_free_hdl;
+ goto err_free_tpg;
vsen->ved.process_frame = vimc_sen_process_frame;
- dev_set_drvdata(comp, &vsen->ved);
- vsen->dev = comp;
+ vsen->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsen->mbus_format = fmt_default;
- /* Initialize the test pattern generator */
- tpg_init(&vsen->tpg, vsen->mbus_format.width,
- vsen->mbus_format.height);
- ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
- if (ret)
- goto err_unregister_ent_sd;
-
- return 0;
+ return &vsen->ved;
-err_unregister_ent_sd:
- vimc_ent_sd_unregister(&vsen->ved, &vsen->sd);
+err_free_tpg:
+ tpg_free(&vsen->tpg);
err_free_hdl:
v4l2_ctrl_handler_free(&vsen->hdl);
err_free_vsen:
kfree(vsen);
- return ret;
-}
-
-static const struct component_ops vimc_sen_comp_ops = {
- .bind = vimc_sen_comp_bind,
- .unbind = vimc_sen_comp_unbind,
-};
-
-static int vimc_sen_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sen_comp_ops);
-}
-
-static int vimc_sen_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sen_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_sen_driver_ids[] = {
- {
- .name = VIMC_SEN_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sen_pdrv = {
- .probe = vimc_sen_probe,
- .remove = vimc_sen_remove,
- .id_table = vimc_sen_driver_ids,
- .driver = {
- .name = VIMC_SEN_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sen_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Sensor");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index 048d770e498b..cd6b55433c9e 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -7,7 +7,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -97,17 +96,26 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
sd = media_entity_to_v4l2_subdev(ved->ent);
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
- pr_err("subdev_call error %s\n",
- ved->ent->name);
+ dev_err(ved->dev, "subdev_call error %s\n",
+ ved->ent->name);
vimc_streamer_pipeline_terminate(stream);
return ret;
}
}
entity = vimc_get_source_entity(ved->ent);
- /* Check if the end of the pipeline was reached*/
- if (!entity)
+ /* Check if the end of the pipeline was reached */
+ if (!entity) {
+ /* the first entity of the pipe should be source only */
+ if (!vimc_is_source(ved->ent)) {
+ dev_err(ved->dev,
+ "first entity in the pipe '%s' is not a source\n",
+ ved->ent->name);
+ vimc_streamer_pipeline_terminate(stream);
+ return -EPIPE;
+ }
return 0;
+ }
/* Get the next device in the pipeline */
if (is_media_entity_v4l2_subdev(entity)) {
@@ -217,4 +225,3 @@ int vimc_streamer_s_stream(struct vimc_stream *stream,
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 2f5762e3309a..e8a50c506dc9 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,7 +3,7 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o
+ vivid-osd.o vivid-meta-cap.o vivid-meta-out.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index 4d822dbed972..4d2413e87730 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -276,12 +276,11 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source)
{
- char name[sizeof(dev->vid_out_dev.name) + 2];
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
+ char name[32];
- snprintf(name, sizeof(name), "%s%d",
- is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
- idx);
+ snprintf(name, sizeof(name), "vivid-%03d-vid-%s%d",
+ dev->inst, is_source ? "out" : "cap", idx);
return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
name, caps, 1);
}
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 53315c8dd2bb..c184f9b0be69 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -37,6 +37,8 @@
#include "vivid-osd.h"
#include "vivid-cec.h"
#include "vivid-ctrls.h"
+#include "vivid-meta-cap.h"
+#include "vivid-meta-out.h"
#define VIVID_MODULE_NAME "vivid"
@@ -79,6 +81,14 @@ static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(radio_tx_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect");
+static int meta_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_cap_nr, " videoX start number, -1 is autodetect");
+
+static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect");
+
static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(ccs_cap_mode, int, NULL, 0444);
MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
@@ -95,10 +105,15 @@ static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1
module_param_array(multiplanar, uint, NULL, 0444);
MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
-/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
-static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
+/*
+ * Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr +
+ * vbi-out + vid-out + meta-cap
+ */
+static unsigned int node_types[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 0x61d3d
+};
module_param_array(node_types, uint, NULL, 0444);
-MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the following meaning:\n"
+MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the following meaning:\n"
"\t\t bit 0: Video Capture node\n"
"\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 4: Radio Receiver node\n"
@@ -106,7 +121,9 @@ MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the f
"\t\t bit 8: Video Output node\n"
"\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 12: Radio Transmitter node\n"
- "\t\t bit 16: Framebuffer for testing overlays");
+ "\t\t bit 16: Framebuffer for testing overlays\n"
+ "\t\t bit 17: Metadata Capture node\n"
+ "\t\t bit 18: Metadata Output node\n");
/* Default: 4 inputs */
static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
@@ -205,7 +222,8 @@ static int vidioc_querycap(struct file *file, void *priv,
cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
dev->vbi_cap_caps | dev->vbi_out_caps |
dev->radio_rx_caps | dev->radio_tx_caps |
- dev->sdr_cap_caps | V4L2_CAP_DEVICE_CAPS;
+ dev->sdr_cap_caps | dev->meta_cap_caps |
+ dev->meta_out_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -433,7 +451,9 @@ static bool vivid_is_last_user(struct vivid_dev *dev)
vivid_is_in_use(&dev->vbi_out_dev) +
vivid_is_in_use(&dev->sdr_cap_dev) +
vivid_is_in_use(&dev->radio_rx_dev) +
- vivid_is_in_use(&dev->radio_tx_dev);
+ vivid_is_in_use(&dev->radio_tx_dev) +
+ vivid_is_in_use(&dev->meta_cap_dev) +
+ vivid_is_in_use(&dev->meta_out_dev);
return uses == 1;
}
@@ -459,6 +479,8 @@ static int vivid_fop_release(struct file *file)
set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
}
mutex_unlock(&dev->mutex);
if (file->private_data == dev->overlay_cap_owner)
@@ -604,6 +626,16 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_enum_fmt_meta_cap = vidioc_enum_fmt_meta_cap,
+ .vidioc_g_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_s_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_try_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+
+ .vidioc_enum_fmt_meta_out = vidioc_enum_fmt_meta_out,
+ .vidioc_g_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = vidioc_g_fmt_meta_out,
};
/* -----------------------------------------------------------------
@@ -616,6 +648,9 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
vivid_free_controls(dev);
v4l2_device_unregister(&dev->v4l2_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
vfree(dev->scaled_line);
vfree(dev->blended_line);
vfree(dev->edid);
@@ -645,14 +680,44 @@ static const struct media_device_ops vivid_media_ops = {
};
#endif
+static int vivid_create_queue(struct vivid_dev *dev,
+ struct vb2_queue *q,
+ u32 buf_type,
+ unsigned int min_buffers_needed,
+ const struct vb2_ops *ops)
+{
+ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE && !dev->has_raw_vbi_cap)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_OUTPUT && !dev->has_raw_vbi_out)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+
+ q->type = buf_type;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ;
+ if (allocators[dev->inst] != 1)
+ q->io_modes |= VB2_USERPTR;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = ops;
+ q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
+ &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = min_buffers_needed;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
+
+ return vb2_queue_init(q);
+}
+
static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
V4L2_DV_BT_CEA_1280X720P60;
- static const struct vb2_mem_ops * const vivid_mem_ops[2] = {
- &vb2_vmalloc_memops,
- &vb2_dma_contig_memops,
- };
unsigned in_type_counter[4] = { 0, 0, 0, 0 };
unsigned out_type_counter[4] = { 0, 0, 0, 0 };
int ccs_cap = ccs_cap_mode[inst];
@@ -661,9 +726,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
bool has_modulator;
struct vivid_dev *dev;
struct video_device *vfd;
- struct vb2_queue *q;
unsigned node_type = node_types[inst];
- unsigned int allocator = allocators[inst];
v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
int ret;
int i;
@@ -758,6 +821,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap;
}
+ /* do we create a meta capture device */
+ dev->has_meta_cap = node_type & 0x20000;
+
+ /* sanity checks */
+ if ((in_type_counter[WEBCAM] || in_type_counter[HDMI]) &&
+ !dev->has_vid_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "Webcam or HDMI input without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if ((in_type_counter[TV] || in_type_counter[SVID]) &&
+ !dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "TV or S-Video input without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a video output device? */
dev->has_vid_out = node_type & 0x0100;
@@ -768,6 +850,24 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out;
}
+ /* do we create a metadata output device */
+ dev->has_meta_out = node_type & 0x40000;
+
+ /* sanity checks */
+ if (out_type_counter[SVID] &&
+ !dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "S-Video output without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "HDMI output without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a radio receiver device? */
dev->has_radio_rx = node_type & 0x0010;
@@ -777,6 +877,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* do we create a software defined radio capture device? */
dev->has_sdr_cap = node_type & 0x0020;
+ /* do we have a TV tuner? */
+ dev->has_tv_tuner = in_type_counter[TV];
+
/* do we have a tuner? */
has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
dev->has_radio_rx || dev->has_sdr_cap;
@@ -828,7 +931,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vid_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vid_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vid_out) {
@@ -849,7 +952,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vbi_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vbi_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vbi_out) {
@@ -875,6 +978,23 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
V4L2_CAP_READWRITE;
+ /* set up the capabilities of meta capture device */
+ if (dev->has_meta_cap) {
+ dev->meta_cap_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->meta_cap_caps |= V4L2_CAP_AUDIO;
+ if (dev->has_tv_tuner)
+ dev->meta_cap_caps |= V4L2_CAP_TUNER;
+ }
+ /* set up the capabilities of meta output device */
+ if (dev->has_meta_out) {
+ dev->meta_out_caps = V4L2_CAP_META_OUTPUT |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->meta_out_caps |= V4L2_CAP_AUDIO;
+ }
+
ret = -ENOMEM;
/* initialize the test pattern generator */
tpg_init(&dev->tpg, 640, 360);
@@ -934,6 +1054,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_ENUMAUDIO);
}
if (!dev->has_audio_outputs) {
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT);
@@ -942,6 +1065,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_ENUMAUDOUT);
}
if (!in_type_counter[TV] && !in_type_counter[SVID]) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD);
@@ -959,12 +1085,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_FREQUENCY);
}
if (!has_tuner) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_TUNER);
}
if (in_type_counter[HDMI] == 0) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID);
@@ -990,12 +1120,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY);
/* configure internal data */
dev->fmt_cap = &vivid_formats[0];
@@ -1078,6 +1211,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->vbi_cap_active);
INIT_LIST_HEAD(&dev->vbi_out_active);
INIT_LIST_HEAD(&dev->sdr_cap_active);
+ INIT_LIST_HEAD(&dev->meta_cap_active);
+ INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->cec_work_list);
spin_lock_init(&dev->cec_slock);
@@ -1092,126 +1227,69 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
goto unreg_dev;
}
- if (allocator == 1)
+ if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- else if (allocator >= ARRAY_SIZE(vivid_mem_ops))
- allocator = 0;
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
- snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name),
- "vivid-%03d-vid-cap", inst);
/* initialize vid_cap queue */
- q = &dev->vb_vid_cap_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
- V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_cap_q,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, 2,
+ &vivid_vid_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vid_out) {
- snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name),
- "vivid-%03d-vid-out", inst);
/* initialize vid_out queue */
- q = &dev->vb_vid_out_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
- V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_out_q,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, 2,
+ &vivid_vid_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_cap) {
/* initialize vbi_cap queue */
- q = &dev->vb_vbi_cap_q;
- q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
- V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_cap_q,
+ V4L2_BUF_TYPE_VBI_CAPTURE, 2,
+ &vivid_vbi_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_out) {
/* initialize vbi_out queue */
- q = &dev->vb_vbi_out_q;
- q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
- V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_out_q,
+ V4L2_BUF_TYPE_VBI_OUTPUT, 2,
+ &vivid_vbi_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_sdr_cap) {
/* initialize sdr_cap queue */
- q = &dev->vb_sdr_cap_q;
- q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_sdr_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 8;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_sdr_cap_q,
+ V4L2_BUF_TYPE_SDR_CAPTURE, 8,
+ &vivid_sdr_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_cap) {
+ /* initialize meta_cap queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_cap_q,
+ V4L2_BUF_TYPE_META_CAPTURE, 2,
+ &vivid_meta_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_out) {
+ /* initialize meta_out queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_out_q,
+ V4L2_BUF_TYPE_META_OUTPUT, 1,
+ &vivid_meta_out_qops);
if (ret)
goto unreg_dev;
}
@@ -1222,7 +1300,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
- dev->fb_info.node);
+ dev->fb_info.node);
}
#ifdef CONFIG_VIDEO_VIVID_CEC
@@ -1265,10 +1343,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vid_cap_caps;
@@ -1314,6 +1396,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vid_out) {
vfd = &dev->vid_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1492,6 +1576,65 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+ if (dev->has_meta_cap) {
+ vfd = &dev->meta_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_cap_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_cap;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_meta_out) {
+ vfd = &dev->meta_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-out", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_out_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_out_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_out;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata output device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device */
ret = media_device_register(&dev->mdev);
@@ -1508,6 +1651,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
return 0;
unreg_dev:
+ video_unregister_device(&dev->meta_out_dev);
+ video_unregister_device(&dev->meta_cap_dev);
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1580,7 +1725,6 @@ static int vivid_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
- media_device_cleanup(&dev->mdev);
#endif
if (dev->has_vid_cap) {
@@ -1624,6 +1768,16 @@ static int vivid_remove(struct platform_device *pdev)
unregister_framebuffer(&dev->fb_info);
vivid_fb_release_buffers(dev);
}
+ if (dev->has_meta_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_cap_dev));
+ video_unregister_device(&dev->meta_cap_dev);
+ }
+ if (dev->has_meta_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_out_dev));
+ video_unregister_device(&dev->meta_out_dev);
+ }
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 7ebb14673c75..59192b67231c 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -131,6 +131,8 @@ struct vivid_dev {
struct media_pad vbi_cap_pad;
struct media_pad vbi_out_pad;
struct media_pad sdr_cap_pad;
+ struct media_pad meta_cap_pad;
+ struct media_pad meta_out_pad;
#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
@@ -153,6 +155,11 @@ struct vivid_dev {
struct v4l2_ctrl_handler ctrl_hdl_radio_tx;
struct video_device sdr_cap_dev;
struct v4l2_ctrl_handler ctrl_hdl_sdr_cap;
+ struct video_device meta_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_cap;
+ struct video_device meta_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_out;
+
spinlock_t slock;
struct mutex mutex;
@@ -164,6 +171,8 @@ struct vivid_dev {
u32 sdr_cap_caps;
u32 radio_rx_caps;
u32 radio_tx_caps;
+ u32 meta_cap_caps;
+ u32 meta_out_caps;
/* supported features */
bool multiplanar;
@@ -189,6 +198,9 @@ struct vivid_dev {
bool has_radio_tx;
bool has_sdr_cap;
bool has_fb;
+ bool has_meta_cap;
+ bool has_meta_out;
+ bool has_tv_tuner;
bool can_loop_video;
@@ -390,6 +402,8 @@ struct vivid_dev {
struct list_head vid_cap_active;
struct vb2_queue vb_vbi_cap_q;
struct list_head vbi_cap_active;
+ struct vb2_queue vb_meta_cap_q;
+ struct list_head meta_cap_active;
/* thread for generating video capture stream */
struct task_struct *kthread_vid_cap;
@@ -407,6 +421,9 @@ struct vivid_dev {
u32 vbi_cap_seq_count;
bool vbi_cap_streaming;
bool stream_sliced_vbi_cap;
+ u32 meta_cap_seq_start;
+ u32 meta_cap_seq_count;
+ bool meta_cap_streaming;
/* video output */
const struct vivid_fmt *fmt_out;
@@ -421,6 +438,8 @@ struct vivid_dev {
struct list_head vid_out_active;
struct vb2_queue vb_vbi_out_q;
struct list_head vbi_out_active;
+ struct vb2_queue vb_meta_out_q;
+ struct list_head meta_out_active;
/* video loop precalculated rectangles */
@@ -461,6 +480,9 @@ struct vivid_dev {
u32 vbi_out_seq_count;
bool vbi_out_streaming;
bool stream_sliced_vbi_out;
+ u32 meta_out_seq_start;
+ u32 meta_out_seq_count;
+ bool meta_out_streaming;
/* SDR capture */
struct vb2_queue vb_sdr_cap_q;
@@ -527,6 +549,9 @@ struct vivid_dev {
/* CEC OSD String */
char osd[14];
unsigned long osd_jiffies;
+
+ bool meta_pts;
+ bool meta_scr;
};
static inline bool vivid_is_webcam(const struct vivid_dev *dev)
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index cb19a9a73092..68e8124c7973 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -32,6 +32,7 @@
#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
+#define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -94,6 +95,9 @@
#define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110)
+#define VIVID_CID_META_CAP_GENERATE_PTS (VIVID_CID_VIVID_BASE + 111)
+#define VIVID_CID_META_CAP_GENERATE_SCR (VIVID_CID_VIVID_BASE + 112)
+
/* General User Controls */
static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -110,6 +114,7 @@ static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
break;
case VIVID_CID_BUTTON:
dev->button_pressed = 30;
@@ -262,6 +267,18 @@ static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
.type = V4L2_CTRL_TYPE_BUTTON,
};
+static const struct v4l2_area area = {
+ .width = 1000,
+ .height = 2000,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_area = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_AREA,
+ .name = "Area",
+ .type = V4L2_CTRL_TYPE_AREA,
+ .p_def.p_const = &area,
+};
/* Framebuffer Controls */
@@ -1421,6 +1438,47 @@ static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = {
.step = 1,
};
+/* Metadata Capture Control */
+
+static int vivid_meta_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev,
+ ctrl_hdl_meta_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_META_CAP_GENERATE_PTS:
+ dev->meta_pts = ctrl->val;
+ break;
+ case VIVID_CID_META_CAP_GENERATE_SCR:
+ dev->meta_scr = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_meta_cap_ctrl_ops = {
+ .s_ctrl = vivid_meta_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_pts = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_PTS,
+ .name = "Generate PTS",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_src_clk = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_SCR,
+ .name = "Generate SCR",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
static const struct v4l2_ctrl_config vivid_ctrl_class = {
.ops = &vivid_user_gen_ctrl_ops,
@@ -1448,6 +1506,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx;
struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx;
struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
+ struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap;
+ struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out;
+
struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
.ops = &vivid_vid_cap_ctrl_ops,
.id = VIVID_CID_DV_TIMINGS,
@@ -1486,6 +1547,10 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_sdr_cap, 19);
v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_cap, 2);
+ v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_out, 2);
+ v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL);
/* User Controls */
dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
@@ -1522,6 +1587,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
@@ -1743,6 +1809,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_sdr_cap,
&vivid_ctrl_sdr_cap_fm_deviation, NULL);
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_pts, NULL);
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_src_clk, NULL);
+ }
+
if (hdl_user_gen->error)
return hdl_user_gen->error;
if (hdl_user_vid->error)
@@ -1817,6 +1890,20 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
return hdl_sdr_cap->error;
dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_streaming, NULL, false);
+ if (hdl_meta_cap->error)
+ return hdl_meta_cap->error;
+ dev->meta_cap_dev.ctrl_handler = hdl_meta_cap;
+ }
+ if (dev->has_meta_out) {
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_streaming, NULL, false);
+ if (hdl_meta_out->error)
+ return hdl_meta_out->error;
+ dev->meta_out_dev.ctrl_handler = hdl_meta_out;
+ }
return 0;
}
@@ -1836,4 +1923,6 @@ void vivid_free_controls(struct vivid_dev *dev)
v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 003319d7816d..01a9d671b947 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -39,6 +39,7 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
{
@@ -677,6 +678,7 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
{
struct vivid_buffer *vid_cap_buf = NULL;
struct vivid_buffer *vbi_cap_buf = NULL;
+ struct vivid_buffer *meta_cap_buf = NULL;
u64 f_time = 0;
dprintk(dev, 1, "Video Capture Thread Tick\n");
@@ -704,15 +706,19 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
list_del(&vbi_cap_buf->list);
}
}
+ if (!list_empty(&dev->meta_cap_active)) {
+ meta_cap_buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_cap_buf->list);
+ }
+
spin_unlock(&dev->slock);
- if (!vid_cap_buf && !vbi_cap_buf)
+ if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
goto update_mv;
f_time = dev->cap_frame_period * dev->vid_cap_seq_count +
dev->cap_stream_start + dev->time_wrap_offset;
- if (!dev->tstamp_src_is_soe)
- f_time += dev->cap_frame_eof_offset;
if (vid_cap_buf) {
v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
@@ -735,6 +741,8 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
vid_cap_buf->vb.vb2_buf.index);
vid_cap_buf->vb.vb2_buf.timestamp = f_time;
+ if (!dev->tstamp_src_is_soe)
+ vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset;
}
if (vbi_cap_buf) {
@@ -756,8 +764,22 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
/* If capturing a VBI, offset by 0.05 */
vbi_period = dev->cap_frame_period * 5;
do_div(vbi_period, 100);
- vbi_cap_buf->vb.vb2_buf.timestamp = f_time + vbi_period;
+ vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period;
+ }
+
+ if (meta_cap_buf) {
+ v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time);
+ v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_cap %d done\n",
+ meta_cap_buf->vb.vb2_buf.index);
+ meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset;
}
+
dev->dqbuf_error = false;
update_mv:
@@ -796,7 +818,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies;
@@ -835,6 +861,7 @@ static int vivid_thread_vid_cap(void *data)
dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
+ dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start;
vivid_thread_vid_cap_tick(dev, dropped_bufs);
@@ -883,8 +910,10 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_cap_streaming)
dev->vid_cap_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_cap_streaming)
dev->vbi_cap_seq_start = seq_count;
+ else
+ dev->meta_cap_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -894,6 +923,7 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
dev->vid_cap_seq_start = dev->seq_wrap * 128;
dev->vbi_cap_seq_start = dev->seq_wrap * 128;
+ dev->meta_cap_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
"%s-vid-cap", dev->v4l2_dev.name);
@@ -951,13 +981,27 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_cap_streaming || dev->vbi_cap_streaming)
+ if (pstreaming == &dev->meta_cap_streaming) {
+ while (!list_empty(&dev->meta_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_cap_streaming || dev->vbi_cap_streaming ||
+ dev->meta_cap_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index ce5bcda2348c..6780687978f9 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -38,11 +38,13 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
{
struct vivid_buffer *vid_out_buf = NULL;
struct vivid_buffer *vbi_out_buf = NULL;
+ struct vivid_buffer *meta_out_buf = NULL;
dprintk(dev, 1, "Video Output Thread Tick\n");
@@ -69,9 +71,14 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
struct vivid_buffer, list);
list_del(&vbi_out_buf->list);
}
+ if (!list_empty(&dev->meta_out_active)) {
+ meta_out_buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_out_buf->list);
+ }
spin_unlock(&dev->slock);
- if (!vid_out_buf && !vbi_out_buf)
+ if (!vid_out_buf && !vbi_out_buf && !meta_out_buf)
return;
if (vid_out_buf) {
@@ -111,6 +118,21 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
dprintk(dev, 2, "vbi_out buffer %d done\n",
vbi_out_buf->vb.vb2_buf.index);
}
+ if (meta_out_buf) {
+ v4l2_ctrl_request_setup(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_request_complete(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vivid_meta_out_process(dev, meta_out_buf);
+ meta_out_buf->vb.sequence = dev->meta_out_seq_count;
+ meta_out_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&meta_out_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ meta_out_buf->vb.vb2_buf.index);
+ }
+
dev->dqbuf_error = false;
}
@@ -136,6 +158,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = 0xffffff80U;
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
+ dev->meta_out_seq_start = 0;
dev->out_seq_resync = false;
for (;;) {
@@ -143,7 +166,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies;
@@ -178,6 +205,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = buffers_since_start + dev->out_seq_offset;
dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start;
dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start;
+ dev->meta_out_seq_count = dev->out_seq_count - dev->meta_out_seq_start;
vivid_thread_vid_out_tick(dev);
mutex_unlock(&dev->mutex);
@@ -229,8 +257,10 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_out_streaming)
dev->vid_out_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_out_streaming)
dev->vbi_out_seq_start = seq_count;
+ else
+ dev->meta_out_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -239,6 +269,7 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->seq_wrap * 128;
dev->vbi_out_seq_start = dev->seq_wrap * 128;
+ dev->meta_out_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev,
"%s-vid-out", dev->v4l2_dev.name);
@@ -296,13 +327,27 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_out_streaming || dev->vbi_out_streaming)
+ if (pstreaming == &dev->meta_out_streaming) {
+ while (!list_empty(&dev->meta_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_out_streaming || dev->vbi_out_streaming ||
+ dev->meta_out_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.c b/drivers/media/platform/vivid/vivid-meta-cap.c
new file mode 100644
index 000000000000..780f96860a6d
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-cap.c - meta capture support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
+
+static int meta_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev,
+ &dev->meta_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->meta_cap_streaming);
+}
+
+static void meta_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_cap);
+}
+
+const struct vb2_ops vivid_meta_cap_qops = {
+ .queue_setup = meta_cap_queue_setup,
+ .buf_prepare = meta_cap_buf_prepare,
+ .buf_queue = meta_cap_buf_queue,
+ .start_streaming = meta_cap_start_streaming,
+ .stop_streaming = meta_cap_stop_streaming,
+ .buf_request_complete = meta_cap_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->pixelformat = V4L2_META_FMT_UVC;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_cap)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_UVC;
+ meta->buffersize = sizeof(struct vivid_uvc_meta_buf);
+ return 0;
+}
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe)
+{
+ struct vivid_uvc_meta_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ int buf_off = 0;
+
+ buf->vb.sequence = dev->meta_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.sequence /= 2;
+ memset(meta, 1, vb2_plane_size(&buf->vb.vb2_buf, 0));
+
+ meta->ns = ktime_get_ns();
+ meta->sof = buf->vb.sequence * 30;
+ meta->length = sizeof(*meta) - offsetof(struct vivid_uvc_meta_buf, length);
+ meta->flags = UVC_STREAM_EOH | UVC_STREAM_EOF;
+
+ if ((buf->vb.sequence % 2) == 0)
+ meta->flags |= UVC_STREAM_FID;
+
+ dprintk(dev, 2, "%s ns:%llu sof:%4d len:%u flags: 0x%02x",
+ __func__, meta->ns, meta->sof, meta->length, meta->flags);
+ if (dev->meta_pts) {
+ meta->flags |= UVC_STREAM_PTS;
+ meta->buf[0] = div_u64(soe, VIVID_META_CLOCK_UNIT);
+ buf_off = 4;
+ dprintk(dev, 2, " pts: %u\n", *(__u32 *)(meta->buf));
+ }
+
+ if (dev->meta_scr) {
+ meta->flags |= UVC_STREAM_SCR;
+ meta->buf[buf_off] = div_u64((soe + dev->cap_frame_eof_offset),
+ VIVID_META_CLOCK_UNIT);
+
+ meta->buf[buf_off + 4] = (buf->vb.sequence * 30) % 1000;
+ dprintk(dev, 2, " stc: %u, sof counter: %u\n",
+ *(__u32 *)(meta->buf + buf_off),
+ *(__u16 *)(meta->buf + buf_off + 4));
+ }
+ dprintk(dev, 2, "\n");
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.h b/drivers/media/platform/vivid/vivid-meta-cap.h
new file mode 100644
index 000000000000..4670d00d1576
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-cap.h - meta capture support functions.
+ */
+#ifndef _VIVID_META_CAP_H_
+#define _VIVID_META_CAP_H_
+
+#define VIVID_META_CLOCK_UNIT 10 /* 100 MHz */
+
+struct vivid_uvc_meta_buf {
+ __u64 ns;
+ __u16 sof;
+ __u8 length;
+ __u8 flags;
+ __u8 buf[10]; /* PTS(4)+STC(4)+SOF(2) */
+} __packed;
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe);
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-meta-out.c b/drivers/media/platform/vivid/vivid-meta-out.c
new file mode 100644
index 000000000000..ff8a039aba72
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-out.c - meta output support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
+
+static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_out_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_out_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev,
+ &dev->meta_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->meta_out_streaming);
+}
+
+static void meta_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_out);
+}
+
+const struct vb2_ops vivid_meta_out_qops = {
+ .queue_setup = meta_out_queue_setup,
+ .buf_prepare = meta_out_buf_prepare,
+ .buf_queue = meta_out_buf_queue,
+ .start_streaming = meta_out_start_streaming,
+ .stop_streaming = meta_out_stop_streaming,
+ .buf_request_complete = meta_out_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_OUTPUT;
+ f->pixelformat = V4L2_META_FMT_VIVID;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_out)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_VIVID;
+ meta->buffersize = sizeof(struct vivid_meta_out_buf);
+ return 0;
+}
+
+void vivid_meta_out_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
+{
+ struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ tpg_s_brightness(&dev->tpg, meta->brightness);
+ tpg_s_contrast(&dev->tpg, meta->contrast);
+ tpg_s_saturation(&dev->tpg, meta->saturation);
+ tpg_s_hue(&dev->tpg, meta->hue);
+ dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
+ __func__, meta->brightness, meta->contrast,
+ meta->saturation, meta->hue);
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-out.h b/drivers/media/platform/vivid/vivid-meta-out.h
new file mode 100644
index 000000000000..0c639b7c2842
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-out.h - meta output support functions.
+ */
+#ifndef _VIVID_META_OUT_H_
+#define _VIVID_META_OUT_H_
+
+struct vivid_meta_out_buf {
+ u16 brightness;
+ u16 contrast;
+ u16 saturation;
+ s16 hue;
+};
+
+void vivid_meta_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_out_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 9acc709b0740..2b7522e16efc 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies;
@@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
}
/* shutdown control thread */
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL;
- mutex_lock(&dev->mutex);
}
static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 8cbaa0c998ed..e94beef008c8 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_cap)
- return 0;
-
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++)
@@ -1359,7 +1356,9 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
if (i == dev->input)
return 0;
- if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ if (vb2_is_busy(&dev->vb_vid_cap_q) ||
+ vb2_is_busy(&dev->vb_vbi_cap_q) ||
+ vb2_is_busy(&dev->vb_meta_cap_q))
return -EBUSY;
dev->input = i;
@@ -1369,6 +1368,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
}
dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
+ dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
vivid_update_format_cap(dev, false);
if (dev->colorspace) {
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 148b663a6075..ee3446e3217c 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_out)
- return 0;
-
dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) {
@@ -1082,7 +1079,9 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
if (o == dev->output)
return 0;
- if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ if (vb2_is_busy(&dev->vb_vid_out_q) ||
+ vb2_is_busy(&dev->vb_vbi_out_q) ||
+ vb2_is_busy(&dev->vb_meta_out_q))
return -EBUSY;
dev->output = o;
@@ -1093,6 +1092,7 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
dev->vid_out_dev.tvnorms = 0;
dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
+ dev->meta_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
vivid_update_format_out(dev);
v4l2_ctrl_activate(dev->ctrl_display_present, vivid_is_hdmi_out(dev));
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 5aec4d17eb21..2378bdae57ae 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video DMA
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index f71e2b650453..a528a32ea1dc 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Core
*
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index e65fce9538f9..cc52c1854dbd 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Composite Device
*
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 90cf44245283..855845911ffc 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video Timing Controller
*
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 104ac41c6f96..112376873167 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1148,8 +1148,7 @@ static int wl1273_fm_fops_release(struct file *file)
if (radio->rds_users > 0) {
radio->rds_users--;
if (radio->rds_users == 0) {
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
+ mutex_lock(&core->lock);
radio->irq_flags &= ~WL1273_RDS_EVENT;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 7541698a0be1..f491420d7b53 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -482,6 +482,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
if (radio->gpio_reset)
gpiod_set_value(radio->gpio_reset, 0);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
return 0;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 37a850421fbb..ed95244da894 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -83,6 +83,7 @@ struct imon_usb_dev_descr {
__u16 flags;
#define IMON_NO_FLAGS 0
#define IMON_NEED_20MS_PKT_DELAY 1
+#define IMON_SUPPRESS_REPEATED_KEYS 2
struct imon_panel_key_table key_table[];
};
@@ -149,8 +150,9 @@ struct imon_context {
struct timer_list ttimer; /* touch screen timer */
int touch_x; /* x coordinate on touchscreen */
int touch_y; /* y coordinate on touchscreen */
- struct imon_usb_dev_descr *dev_descr; /* device description with key
- table for front panels */
+ const struct imon_usb_dev_descr *dev_descr;
+ /* device description with key */
+ /* table for front panels */
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -315,6 +317,32 @@ static const struct imon_usb_dev_descr imon_DH102 = {
}
};
+/* imon ultrabay front panel key table */
+static const struct imon_usb_dev_descr ultrabay_table = {
+ .flags = IMON_SUPPRESS_REPEATED_KEYS,
+ .key_table = {
+ { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */
+ { 0x000000000100ffeell, KEY_UP },
+ { 0x000000000001ffeell, KEY_DOWN },
+ { 0x000000160000ffeell, KEY_ENTER },
+ { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */
+ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */
+ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */
+ { 0x000000270000ffeell, KEY_DVD }, /* DVD */
+ { 0x000000230000ffeell, KEY_TV }, /* TV */
+ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */
+ { 0x000000070000ffeell, KEY_REWIND },
+ { 0x000000040000ffeell, KEY_STOP },
+ { 0x000000020000ffeell, KEY_PLAYPAUSE },
+ { 0x000000080000ffeell, KEY_FASTFORWARD },
+ { 0x000000060000ffeell, KEY_NEXT }, /* Next */
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000010000ffeell, KEY_MUTE },
+ { 0, KEY_RESERVED },
+ }
+};
+
/*
* USB Device ID for iMON USB Control Boards
*
@@ -1264,9 +1292,11 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code)
{
- int i;
+ const struct imon_panel_key_table *key_table;
u32 keycode = KEY_RESERVED;
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ int i;
+
+ key_table = ictx->dev_descr->key_table;
for (i = 0; key_table[i].hw_code != 0; i++) {
if (key_table[i].hw_code == (code | 0xffee)) {
@@ -1550,7 +1580,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
u32 kc;
u64 scancode;
int press_type = 0;
- long msec;
ktime_t t;
static ktime_t prev_time;
u8 ktype;
@@ -1598,8 +1627,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
- buf[7] == 0x86) {
+ if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;
@@ -1653,14 +1681,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_lock_irqsave(&ictx->kc_lock, flags);
t = ktime_get();
- /* KEY_MUTE repeats from knob need to be suppressed */
- if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- msec = ktime_ms_delta(t, prev_time);
- if (msec < ictx->idev->rep[REP_DELAY]) {
+ /* KEY repeats from knob and panel that need to be suppressed */
+ if (ictx->kc == KEY_MUTE ||
+ ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) {
+ if (ictx->kc == ictx->last_keycode &&
+ ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+
prev_time = t;
kc = ictx->kc;
@@ -1848,6 +1878,14 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR");
ictx->display_supported = false;
break;
+ /* Soundgraph iMON UltraBay */
+ case 0x98:
+ dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE;
+ ictx->dev_descr = &ultrabay_table;
+ break;
+
default:
dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1979,10 +2017,12 @@ out:
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ const struct imon_panel_key_table *key_table;
struct input_dev *idev;
int ret, i;
+ key_table = ictx->dev_descr->key_table;
+
idev = input_allocate_device();
if (!idev)
goto out;
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index d4aedcf76418..aae0a3cc9479 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -57,32 +57,18 @@ static void imon_ir_data(struct imon *imon)
* fls will tell us the highest bit set plus 1 (or 0 if no
* bits are set).
*/
+ rawir.pulse = !rawir.pulse;
bit = fls64(data & (BIT_ULL(offset) - 1));
if (bit < offset) {
- dev_dbg(imon->dev, "pulse: %d bits", offset - bit);
- rawir.pulse = true;
+ dev_dbg(imon->dev, "%s: %d bits",
+ rawir.pulse ? "pulse" : "space", offset - bit);
rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
- if (bit == 0)
- break;
-
offset = bit;
}
- /*
- * Find highest clear bit which is less than offset.
- *
- * Just invert the data and use same trick as above.
- */
- bit = fls64(~data & (BIT_ULL(offset) - 1));
- dev_dbg(imon->dev, "space: %d bits", offset - bit);
-
- rawir.pulse = false;
- rawir.duration = (offset - bit) * BIT_DURATION;
- ir_raw_event_store_with_filter(imon->rcdev, &rawir);
-
- offset = bit;
+ data = ~data;
} while (offset > 0);
if (packet_no == 0x0a && !imon->rcdev->idle) {
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index 64fb65a9a19f..028df5cb1828 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -79,7 +79,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT))
break;
data->state = STATE_LOW;
@@ -91,7 +91,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT))
break;
data->state = STATE_BUMP;
@@ -164,6 +164,8 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
break;
}
+ dev_dbg(&dev->dev, "RC-MM decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 3ab6cec0dc3b..07667c04c1d2 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -382,7 +382,7 @@ static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
ite_dbg("%s called", __func__);
/* clear the array just in case */
- memset(last_sent, 0, ARRAY_SIZE(last_sent));
+ memset(last_sent, 0, sizeof(last_sent));
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index a56fc634d2d6..63261ef6380a 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia-rm-ks.o \
rc-avertv-303.o \
rc-azurewave-ad-tu700.o \
+ rc-beelink-gs1.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
@@ -114,6 +115,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-tt-1500.o \
rc-twinhan-dtv-cab-ci.o \
rc-twinhan1027.o \
+ rc-vega-s9x.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
diff --git a/drivers/media/rc/keymaps/rc-beelink-gs1.c b/drivers/media/rc/keymaps/rc-beelink-gs1.c
new file mode 100644
index 000000000000..cedbd5d20bc7
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-beelink-gs1.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2019 Clément Péron
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * Keymap for the Beelink GS1 remote control
+ */
+
+static struct rc_map_table beelink_gs1_table[] = {
+ /*
+ * TV Keys (Power, Learn and Volume)
+ * { 0x40400d, KEY_TV },
+ * { 0x80f1, KEY_TV },
+ * { 0x80f3, KEY_TV },
+ * { 0x80f4, KEY_TV },
+ */
+
+ { 0x8051, KEY_POWER },
+ { 0x804d, KEY_MUTE },
+ { 0x8040, KEY_CONFIG },
+
+ { 0x8026, KEY_UP },
+ { 0x8028, KEY_DOWN },
+ { 0x8025, KEY_LEFT },
+ { 0x8027, KEY_RIGHT },
+ { 0x800d, KEY_OK },
+
+ { 0x8053, KEY_HOME },
+ { 0x80bc, KEY_MEDIA },
+ { 0x801b, KEY_BACK },
+ { 0x8049, KEY_MENU },
+
+ { 0x804e, KEY_VOLUMEUP },
+ { 0x8056, KEY_VOLUMEDOWN },
+
+ { 0x8054, KEY_SUBTITLE }, /* Web */
+ { 0x8052, KEY_EPG }, /* Media */
+
+ { 0x8041, KEY_CHANNELUP },
+ { 0x8042, KEY_CHANNELDOWN },
+
+ { 0x8031, KEY_1 },
+ { 0x8032, KEY_2 },
+ { 0x8033, KEY_3 },
+
+ { 0x8034, KEY_4 },
+ { 0x8035, KEY_5 },
+ { 0x8036, KEY_6 },
+
+ { 0x8037, KEY_7 },
+ { 0x8038, KEY_8 },
+ { 0x8039, KEY_9 },
+
+ { 0x8044, KEY_DELETE },
+ { 0x8030, KEY_0 },
+ { 0x8058, KEY_MODE }, /* # Input Method */
+};
+
+static struct rc_map_list beelink_gs1_map = {
+ .map = {
+ .scan = beelink_gs1_table,
+ .size = ARRAY_SIZE(beelink_gs1_table),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_BEELINK_GS1,
+ }
+};
+
+static int __init init_rc_map_beelink_gs1(void)
+{
+ return rc_map_register(&beelink_gs1_map);
+}
+
+static void __exit exit_rc_map_beelink_gs1(void)
+{
+ rc_map_unregister(&beelink_gs1_map);
+}
+
+module_init(init_rc_map_beelink_gs1)
+module_exit(exit_rc_map_beelink_gs1)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Clément Péron <peron.clem@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-vega-s9x.c b/drivers/media/rc/keymaps/rc-vega-s9x.c
new file mode 100644
index 000000000000..bf210c4dc535
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-vega-s9x.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the Tronsmart Vega S9x remote control
+//
+
+static struct rc_map_table vega_s9x[] = {
+ { 0x18, KEY_POWER },
+ { 0x17, KEY_MUTE }, // mouse
+
+ { 0x46, KEY_UP },
+ { 0x47, KEY_LEFT },
+ { 0x55, KEY_OK },
+ { 0x15, KEY_RIGHT },
+ { 0x16, KEY_DOWN },
+
+ { 0x06, KEY_HOME },
+ { 0x42, KEY_PLAYPAUSE},
+ { 0x40, KEY_BACK },
+
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x04, KEY_MENU },
+ { 0x10, KEY_VOLUMEUP },
+};
+
+static struct rc_map_list vega_s9x_map = {
+ .map = {
+ .scan = vega_s9x,
+ .size = ARRAY_SIZE(vega_s9x),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_VEGA_S9X,
+ }
+};
+
+static int __init init_rc_map_vega_s9x(void)
+{
+ return rc_map_register(&vega_s9x_map);
+}
+
+static void __exit exit_rc_map_vega_s9x(void)
+{
+ rc_map_unregister(&vega_s9x_map);
+}
+
+module_init(init_rc_map_vega_s9x)
+module_exit(exit_rc_map_vega_s9x)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3fc9829a9233..f9616158bcf4 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
datasize = 4;
break;
case MCE_CMD_G_REVISION:
- datasize = 2;
+ datasize = 4;
break;
case MCE_RSP_EQWAKESUPPORT:
case MCE_RSP_GETWAKESOURCE:
@@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
char *inout;
u8 cmd, subcmd, *data;
struct device *dev = ir->dev;
- int start, skip = 0;
u32 carrier, period;
- /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
- if (ir->flags.microsoft_gen1 && !out && !offset)
- skip = 2;
-
- if (len <= skip)
+ if (offset < 0 || offset >= buf_len)
return;
dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
@@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
inout = out ? "Request" : "Got";
- start = offset + skip;
- cmd = buf[start] & 0xff;
- subcmd = buf[start + 1] & 0xff;
- data = buf + start + 2;
+ cmd = buf[offset];
+ subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
+ data = &buf[offset] + 2;
+
+ /* Trace meaningless 0xb1 0x60 header bytes on original receiver */
+ if (ir->flags.microsoft_gen1 && !out && !offset) {
+ dev_dbg(dev, "MCE gen 1 header");
+ return;
+ }
+
+ /* Trace IR data header or trailer */
+ if (cmd != MCE_CMD_PORT_IR &&
+ (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
+ if (cmd == MCE_IRDATA_TRAILER)
+ dev_dbg(dev, "End of raw IR data");
+ else
+ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+ cmd & MCE_PACKET_LENGTH_MASK);
+ return;
+ }
+
+ /* Unexpected end of buffer? */
+ if (offset + len > buf_len)
+ return;
+ /* Decode MCE command/response */
switch (cmd) {
case MCE_CMD_NULL:
if (subcmd == MCE_CMD_NULL)
@@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
dev_dbg(dev, "Get hw/sw rev?");
else
dev_dbg(dev, "hw/sw rev %*ph",
- 4, &buf[start + 2]);
+ 4, &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
@@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
default:
break;
}
-
- if (cmd == MCE_IRDATA_TRAILER)
- dev_dbg(dev, "End of raw IR data");
- else if ((cmd != MCE_CMD_PORT_IR) &&
- ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
- dev_dbg(dev, "Raw IR data, %d pulse/space samples",
- cmd & MCE_PACKET_LENGTH_MASK);
#endif
}
@@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
}
/*
+ * Handle PORT_SYS/IR command response received from the MCE device.
+ *
+ * Assumes single response with all its data (not truncated)
+ * in buf_in[]. The response itself determines its total length
+ * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
+ *
* We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information
* we want to store so that we can use them.
*/
-static void mceusb_handle_command(struct mceusb_dev *ir, int index)
+static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
{
+ u8 cmd = buf_in[0];
+ u8 subcmd = buf_in[1];
+ u8 *hi = &buf_in[2]; /* read only when required */
+ u8 *lo = &buf_in[3]; /* read only when required */
struct ir_raw_event rawir = {};
- u8 hi = ir->buf_in[index + 1] & 0xff;
- u8 lo = ir->buf_in[index + 2] & 0xff;
u32 carrier_cycles;
u32 cycles_fix;
- switch (ir->buf_in[index]) {
- /* the one and only 5-byte return value command */
- case MCE_RSP_GETPORTSTATUS:
- if ((ir->buf_in[index + 4] & 0xff) == 0x00)
- ir->txports_cabled |= 1 << hi;
- break;
+ if (cmd == MCE_CMD_PORT_SYS) {
+ switch (subcmd) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+ if (buf_in[5] == 0)
+ ir->txports_cabled |= 1 << *hi;
+ break;
+
+ /* 1-byte return value commands */
+ case MCE_RSP_EQEMVER:
+ ir->emver = *hi;
+ break;
+
+ /* No return value commands */
+ case MCE_RSP_CMD_ILLEGAL:
+ ir->need_reset = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return;
+ }
+ if (cmd != MCE_CMD_PORT_IR)
+ return;
+
+ switch (subcmd) {
/* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT:
- ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
+ ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
break;
case MCE_RSP_EQIRNUMPORTS:
- ir->num_txports = hi;
- ir->num_rxports = lo;
+ ir->num_txports = *hi;
+ ir->num_rxports = *lo;
break;
case MCE_RSP_EQIRRXCFCNT:
/*
@@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
*/
if (ir->carrier_report_enabled && ir->learning_active &&
ir->pulse_tunit > 0) {
- carrier_cycles = (hi << 8 | lo);
+ carrier_cycles = (*hi << 8 | *lo);
/*
* Adjust carrier cycle count by adding
* 1 missed count per pulse "on"
@@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
break;
/* 1-byte return value commands */
- case MCE_RSP_EQEMVER:
- ir->emver = hi;
- break;
case MCE_RSP_EQIRTXPORTS:
- ir->tx_mask = hi;
+ ir->tx_mask = *hi;
break;
case MCE_RSP_EQIRRXPORTEN:
- ir->learning_active = ((hi & 0x02) == 0x02);
- if (ir->rxports_active != hi) {
+ ir->learning_active = ((*hi & 0x02) == 0x02);
+ if (ir->rxports_active != *hi) {
dev_info(ir->dev, "%s-range (0x%x) receiver active",
- ir->learning_active ? "short" : "long", hi);
- ir->rxports_active = hi;
+ ir->learning_active ? "short" : "long", *hi);
+ ir->rxports_active = *hi;
}
break;
+
+ /* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true;
break;
+
default:
break;
}
@@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false);
- mceusb_handle_command(ir, i);
+ if (i + ir->rem < buf_len)
+ mceusb_handle_command(ir, &ir->buf_in[i - 1]);
ir->parser_state = CMD_DATA;
break;
case PARSE_IRDATA:
@@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem--;
break;
case CMD_HEADER:
- /* decode mce packets of the form (84),AA,BB,CC,DD */
- /* IR data packets can span USB messages - rem */
ir->cmd = ir->buf_in[i];
if ((ir->cmd == MCE_CMD_PORT_IR) ||
((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) {
+ /*
+ * got PORT_SYS, PORT_IR, or unknown
+ * command response prefix
+ */
ir->parser_state = SUBCMD;
continue;
}
+ /*
+ * got IR data prefix (0x80 + num_bytes)
+ * decode MCE packets of the form {0x83, AA, BB, CC}
+ * IR data packets can span USB messages
+ */
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false);
@@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
+
+ /*
+ * Accept IR data spanning multiple rx buffers.
+ * Reject MCE command response spanning multiple rx buffers.
+ */
+ if (ir->parser_state != PARSE_IRDATA || !ir->rem)
+ ir->parser_state = CMD_HEADER;
+
if (event) {
dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 9f21b3e8b377..5f36244cc34f 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Remote Controller core raw events header
*
* Copyright (C) 2010 by Mauro Carvalho Chehab
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 13da4c5c7d17..7741151606ef 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
set_bit(MSC_SCAN, dev->input_dev->mscbit);
/* Pointer/mouse events */
+ set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
set_bit(EV_REL, dev->input_dev->evbit);
set_bit(REL_X, dev->input_dev->relbit);
set_bit(REL_Y, dev->input_dev->relbit);
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
index 451ec4e9dcfa..b8eb5bc4d9be 100644
--- a/drivers/media/rc/tango-ir.c
+++ b/drivers/media/rc/tango-ir.c
@@ -157,20 +157,10 @@ static int tango_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rc_dev *rc;
struct tango_ir *ir;
- struct resource *rc5_res;
- struct resource *rc6_res;
u64 clkrate, clkdiv;
int irq, err;
u32 val;
- rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!rc5_res)
- return -EINVAL;
-
- rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!rc6_res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
@@ -179,11 +169,11 @@ static int tango_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ ir->rc5_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->rc5_base))
return PTR_ERR(ir->rc5_base);
- ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ ir->rc6_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ir->rc6_base))
return PTR_ERR(ir->rc6_base);
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 83ca5dc047ea..0e26d22f0b26 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -206,7 +206,7 @@ static int qm1d1c0042_set_params(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- a = (freq + state->cfg.xtal_freq / 2) / state->cfg.xtal_freq;
+ a = DIV_ROUND_CLOSEST(freq, state->cfg.xtal_freq);
state->regs[0x06] &= 0x40;
state->regs[0x06] |= (a - 12) / 4;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index e87040d6eca7..898e0f9f8b70 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -118,6 +118,11 @@ static int si2157_init(struct dvb_frontend *fe)
goto err;
}
+ if (dev->dont_load_firmware) {
+ dev_info(&client->dev, "device is buggy, skipping firmware download\n");
+ goto skip_fw_download;
+ }
+
/* query chip revision */
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
@@ -440,6 +445,7 @@ static int si2157_probe(struct i2c_client *client,
i2c_set_clientdata(client, dev);
dev->fe = cfg->fe;
dev->inversion = cfg->inversion;
+ dev->dont_load_firmware = cfg->dont_load_firmware;
dev->if_port = cfg->if_port;
dev->chiptype = (u8)id->driver_data;
dev->if_frequency = 5000000; /* default value of property 0x0706 */
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index c22ca784f43f..ffdece3c2eaa 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -11,29 +11,34 @@
#include <media/media-device.h>
#include <media/dvb_frontend.h>
-/*
- * I2C address
- * 0x60
+/**
+ * struct si2157_config - configuration parameters for si2157
+ *
+ * @fe:
+ * frontend returned by driver
+ * @mdev:
+ * media device returned by driver
+ * @inversion:
+ * spectral inversion
+ * @dont_load_firmware:
+ * Instead of uploading a new firmware, use the existing one
+ * @if_port:
+ * Port selection
+ * Select the RF interface to use (pins 9+11 or 12+13)
+ *
+ * Note:
+ * The I2C address of this demod is 0x60.
*/
struct si2157_config {
- /*
- * frontend
- */
struct dvb_frontend *fe;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_device *mdev;
#endif
- /*
- * Spectral Inversion
- */
- bool inversion;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
- /*
- * Port selection
- * Select the RF interface to use (pins 9+11 or 12+13)
- */
u8 if_port;
};
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 2bda903358da..778f81b39996 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -23,8 +23,9 @@ enum si2157_pads {
struct si2157_dev {
struct mutex i2c_mutex;
struct dvb_frontend *fe;
- bool active;
- bool inversion;
+ unsigned int active:1;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
u8 chiptype;
u8 if_port;
u32 if_frequency;
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 50d017a4822a..fcca39d3e006 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028_types
*
* This file includes internal tipes to be used inside tuner-xc2028.
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 7b58bc06e35c..2dd45d0765d7 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 1826ff825c2e..039963a7765b 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
mutex_unlock(&fc_usb->data_mutex);
- return 0;
+ return ret;
}
/* actual bus specific access functions,
@@ -504,7 +504,13 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
/* use the alternate setting with the larges buffer */
- usb_set_interface(fc_usb->udev,0,1);
+ int ret = usb_set_interface(fc_usb->udev, 0, 1);
+
+ if (ret) {
+ err("set interface failed.");
+ return ret;
+ }
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
@@ -538,6 +544,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n");
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 74f3b29d9c60..2fe2b2d335ba 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_CX231XX
depends on VIDEO_DEV && I2C && I2C_MUX
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select VIDEO_CX25840
select VIDEO_CX2341X
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 6d218a036966..1aec4459f50a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -60,10 +60,6 @@
#define MCI_MODE_MEMORY_READ 0x000
#define MCI_MODE_MEMORY_WRITE 0x4000
-static unsigned int mpegbufs = 8;
-module_param(mpegbufs, int, 0644);
-MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
-
static unsigned int mpeglines = 128;
module_param(mpeglines, int, 0644);
MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
@@ -1080,16 +1076,6 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
return 0;
}
-static void cx231xx_417_check_encoder(struct cx231xx *dev)
-{
- u32 status, seq;
-
- status = 0;
- seq = 0;
- cx231xx_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
- dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
-}
-
static void cx231xx_codec_settings(struct cx231xx *dev)
{
dprintk(1, "%s()\n", __func__);
@@ -1227,40 +1213,25 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
/* ------------------------------------------------------------------ */
-static int bb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned int size = mpeglinesize * mpeglines;
- fh->dev->ts1.ts_packet_size = mpeglinesize;
- fh->dev->ts1.ts_packet_count = mpeglines;
+ dev->ts1.ts_packet_size = mpeglinesize;
+ dev->ts1.ts_packet_count = mpeglines;
- *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
- *count = mpegbufs;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
- return 0;
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = mpeglinesize * mpeglines;
- BUG_ON(in_interrupt());
-
- spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
- spin_unlock_irqrestore(&dev->video_mode.slock, flags);
- videobuf_waiton(vq, &buf->vb, 0, 0);
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *urb,
@@ -1276,13 +1247,13 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
return;
buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ struct cx231xx_buffer, list);
dev->video_mode.isoc_ctl.buf = buf;
dma_q->mpeg_buffer_done = 1;
}
/* Fill buffer */
buf = dev->video_mode.isoc_ctl.buf;
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if ((dma_q->mpeg_buffer_completed+len) <
mpeglines*mpeglinesize) {
@@ -1306,11 +1277,10 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
memcpy(vbuf+dma_q->mpeg_buffer_completed,
data, tail_data);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dma_q->mpeg_buffer_completed = 0;
if (len - tail_data > 0) {
@@ -1331,17 +1301,15 @@ static void buffer_filled(char *data, int len, struct urb *urb,
if (list_empty(&dma_q->active))
return;
- buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Fill buffer */
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
memcpy(vbuf, data, len);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
@@ -1394,100 +1362,104 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
-static int bb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = q->priv_data;
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
- int size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
- return -EINVAL;
- buf->vb.width = fh->dev->ts1.ts_packet_size;
- buf->vb.height = fh->dev->ts1.ts_packet_count;
- buf->vb.size = size;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
}
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
+
+ vidq->sequence = 0;
dev->mode_tv = 1;
- if (urb_init) {
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- rc = cx231xx_unmute_audio(dev);
- if (dev->USE_ISO) {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
- rc = cx231xx_init_isoc(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_isoc_copy);
- } else {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- rc = cx231xx_init_bulk(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- if (rc < 0)
- goto fail;
- }
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ cx231xx_set_gpio_value(dev, 2, 0);
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
+ cx231xx_initialize_codec(dev);
+
+ cx231xx_start_TS1(dev);
+
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, 320, 5,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
-fail:
- free_buffer(q, buf);
- return rc;
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void bb_buf_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned long flags;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ call_all(dev, video, s_stream, 0);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ cx231xx_stop_TS1(dev);
-}
+ /* do this before setting alternate! */
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
+ cx231xx_set_mode(dev, CX231XX_SUSPEND);
-static void bb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- /*struct cx231xx_fh *fh = q->priv_data;*/
- /*struct cx231xx *dev = (struct cx231xx *)fh->dev;*/
+ cx231xx_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_BITS_NONE);
- free_buffer(q, buf);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_qops = {
- .buf_setup = bb_buf_setup,
- .buf_prepare = bb_buf_prepare,
- .buf_queue = bb_buf_queue,
- .buf_release = bb_buf_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------ */
@@ -1495,8 +1467,7 @@ static const struct videobuf_queue_ops cx231xx_qops = {
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->encodernorm.id & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1511,8 +1482,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1533,8 +1503,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*norm = dev->encodernorm.id;
return 0;
@@ -1542,8 +1511,7 @@ static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cx231xx_tvnorms); i++)
@@ -1575,8 +1543,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev *sd;
dprintk(3, "enter vidioc_s_ctrl()\n");
@@ -1601,8 +1568,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_g_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1621,8 +1587,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_try_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1636,230 +1601,21 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_reqbufs(&fh->vidq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_querybuf(&fh->vidq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_qbuf(&fh->vidq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
-
- return videobuf_dqbuf(&fh->vidq, b, file->f_flags & O_NONBLOCK);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "enter vidioc_streamon()\n");
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- if (dev->USE_ISO)
- cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else {
- cx231xx_init_bulk(dev, 320,
- 5,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- dprintk(3, "exit vidioc_streamon()\n");
- return videobuf_streamon(&fh->vidq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_streamoff(&fh->vidq);
-}
-
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
call_all(dev, core, log_status);
return v4l2_ctrl_log_status(file, priv);
}
-static int mpeg_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx231xx *dev = video_drvdata(file);
- struct cx231xx_fh *fh;
-
- dprintk(2, "%s()\n", __func__);
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- mutex_unlock(&dev->lock);
- return -ENOMEM;
- }
-
- file->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- fh->dev = dev;
-
-
- videobuf_queue_vmalloc_init(&fh->vidq, &cx231xx_qops,
- NULL, &dev->video_mode.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer), fh, &dev->lock);
-/*
- videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
- dev->dev, &dev->ts1.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
-*/
-
- cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
- cx231xx_set_gpio_value(dev, 2, 0);
-
- cx231xx_initialize_codec(dev);
-
- mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
- cx231xx_start_TS1(dev);
-
- return 0;
-}
-
-static int mpeg_release(struct file *file)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "mpeg_release()! dev=0x%p\n", dev);
-
- mutex_lock(&dev->lock);
-
- cx231xx_stop_TS1(dev);
-
- /* do this before setting alternate! */
- if (dev->USE_ISO)
- cx231xx_uninit_isoc(dev);
- else
- cx231xx_uninit_bulk(dev);
- cx231xx_set_mode(dev, CX231XX_SUSPEND);
-
- cx231xx_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
- CX231xx_RAW_BITS_NONE);
-
- /* FIXME: Review this crap */
- /* Shut device down on last close */
- if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
- if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
- /* stop mpeg capture */
-
- msleep(500);
- cx231xx_417_check_encoder(dev);
-
- }
- }
-
- if (fh->vidq.streaming)
- videobuf_streamoff(&fh->vidq);
- if (fh->vidq.reading)
- videobuf_read_stop(&fh->vidq);
-
- videobuf_mmap_free(&fh->vidq);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static ssize_t mpeg_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
- /* Start mpeg encoder on first read. */
- if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
- if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
- if (cx231xx_initialize_codec(dev) < 0)
- return -EINVAL;
- }
- }
-
- return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
-}
-
-static __poll_t mpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(file, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(file, &fh->vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- dprintk(2, "%s()\n", __func__);
-
- return videobuf_mmap_mapper(&fh->vidq, vma);
-}
-
static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
- .open = mpeg_open,
- .release = mpeg_release,
- .read = mpeg_read,
- .poll = mpeg_poll,
- .mmap = mpeg_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1881,12 +1637,12 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = vidioc_log_status,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cx231xx_g_register,
@@ -1980,6 +1736,7 @@ int cx231xx_417_register(struct cx231xx *dev)
/* FIXME: Port1 hardcoded here */
int err = -ENODEV;
struct cx231xx_tsport *tsport = &dev->ts1;
+ struct vb2_queue *q;
dprintk(1, "%s()\n", __func__);
@@ -2017,6 +1774,21 @@ int cx231xx_417_register(struct cx231xx *dev)
/* Allocate and initialize V4L video device */
cx231xx_video_dev_init(dev, dev->udev,
&dev->v4l_device, &cx231xx_mpeg_template, "mpeg");
+ q = &dev->mpegq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ err = vb2_queue_init(q);
+ if (err)
+ return err;
+ dev->v4l_device.queue = q;
+
err = video_register_device(&dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 9ef362e221df..fd6e2df3d1b7 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -14,7 +14,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index d417b5fe4093..0974965e848f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1240,7 +1240,7 @@ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev)
int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
u8 analog_or_digital)
{
- int status = 0;
+ int status;
/* first set the direction to output */
status = cx231xx_set_gpio_direction(dev,
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e123e74c549e..92efe6c1f47b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1479,13 +1479,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
goto err_dev_init;
}
- /* init video dma queues */
+ /* init video dma queue */
INIT_LIST_HEAD(&dev->video_mode.vidq.active);
- INIT_LIST_HEAD(&dev->video_mode.vidq.queued);
- /* init vbi dma queues */
+ /* init vbi dma queue */
INIT_LIST_HEAD(&dev->vbi_mode.vidq.active);
- INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued);
/* Reset other chips required if they are tied up with GPIO pins */
cx231xx_add_into_devlist(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index fba7ccdf5a25..d2f143a096d1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -153,131 +153,98 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
Vbi buf operations
------------------------------------------------------------------*/
-static int
-vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int vbi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
u32 height = 0;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- *size = (dev->width * height * 2 * 2);
- if (0 == *count)
- *count = CX231XX_DEF_VBI_BUF;
-
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
-
+ *nplanes = 1;
+ sizes[0] = (dev->width * height * 2 * 2);
return 0;
}
/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->vbi_mode.slock, flags);
- if (dev->vbi_mode.bulk_ctl.buf == buf)
- dev->vbi_mode.bulk_ctl.buf = NULL;
- spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int
-vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buf_prepare(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
u32 height = 0;
+ u32 size;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- buf->vb.size = ((dev->width << 1) * height * 2);
+ size = ((dev->width << 1) * height * 2);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = height;
- buf->vb.field = field;
- buf->vb.field = V4L2_FIELD_SEQ_TB;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
-
- if (!dev->vbi_mode.bulk_ctl.num_bufs)
- urb_init = 1;
-
- if (urb_init) {
- rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
- CX231XX_NUM_VBI_BUFS,
- dev->vbi_mode.alt_max_pkt_size[0],
- cx231xx_isoc_vbi_copy);
- if (rc < 0)
- goto fail;
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(vb, 0, size);
return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
}
-static void
-vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void vbi_buf_queue(struct vb2_buffer *vb)
{
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ unsigned long flags;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
+}
+
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ dev->vbi_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
}
-static void vbi_buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ int ret;
+
+ vidq->sequence = 0;
+ ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
+ CX231XX_NUM_VBI_BUFS,
+ dev->vbi_mode.alt_max_pkt_size[0],
+ cx231xx_isoc_vbi_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+static void vbi_stop_streaming(struct vb2_queue *vq)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-const struct videobuf_queue_ops cx231xx_vbi_qops = {
- .buf_setup = vbi_buffer_setup,
- .buf_prepare = vbi_buffer_prepare,
- .buf_queue = vbi_buffer_queue,
- .buf_release = vbi_buffer_release,
+struct vb2_ops cx231xx_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buf_prepare,
+ .buf_queue = vbi_buf_queue,
+ .start_streaming = vbi_start_streaming,
+ .stop_streaming = vbi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -512,16 +479,15 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
+ /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
dev->vbi_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
@@ -611,11 +577,11 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
dev->vbi_mode.bulk_ctl.buf = *buf;
@@ -656,7 +622,7 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -EINVAL;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if (dma_q->bytes_left_in_line != _line_size) {
current_line_bytes_copied =
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 7cddd629fbfc..0b21bee5fa30 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -10,7 +10,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern const struct videobuf_queue_ops cx231xx_vbi_qops;
+extern struct vb2_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 9b51f07a729e..69abafaebbf3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -58,10 +58,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(CX231XX_VERSION);
-static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
module_param_array(card, int, NULL, 0444);
module_param_array(video_nr, int, NULL, 0444);
@@ -166,18 +166,19 @@ static inline void buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.vb2_buf.index);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
else
dev->video_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
@@ -241,11 +242,11 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = *buf;
@@ -653,7 +654,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -1;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line;
@@ -672,7 +673,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
bytes_to_copy : dma_q->bytes_left_in_line;
- if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + buf->vb.size))
+ if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + dev->size))
return 0;
/* The below copies the UYVY data straight into video buffer */
@@ -708,149 +709,98 @@ u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q)
Videobuf operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
- if (0 == *count)
- *count = CX231XX_DEF_BUF;
+ dev->size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
-
- cx231xx_enable_analog_tuner(dev);
+ if (*nplanes)
+ return sizes[0] < dev->size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = dev->size;
return 0;
}
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
-
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
+ list_add_tail(&buf->list, &vidq->active);
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
-
- /* The only currently supported format is 16 bits/pixel */
- buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
- + 7) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
- }
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
- if (urb_init) {
- dev->mode_tv = 0;
- if (dev->USE_ISO)
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else
- rc = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_bulk_copy);
- if (rc < 0)
- goto fail;
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
}
-
- buf->vb.state = VIDEOBUF_PREPARED;
-
- return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
}
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ vidq->sequence = 0;
+ dev->mode_tv = 0;
+ cx231xx_enable_analog_tuner(dev);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = (struct cx231xx *)fh->dev;
-
- cx231xx_isocdbg("cx231xx: called buffer_release\n");
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ call_all(dev, video, s_stream, 0);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/********************* v4l2 interface **************************************/
@@ -872,58 +822,6 @@ void video_mux(struct cx231xx *dev, int index)
cx231xx_do_mode_ctrl_overrides(dev);
}
-/* Usage lock check functions */
-static int res_get(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
- int rc = 0;
-
- /* This instance already has stream_on */
- if (fh->stream_on)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (dev->stream_on)
- return -EBUSY;
- dev->stream_on = 1;
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (dev->vbi_stream_on)
- return -EBUSY;
- dev->vbi_stream_on = 1;
- } else
- return -EINVAL;
-
- fh->stream_on = 1;
-
- return rc;
-}
-
-static int res_check(struct cx231xx_fh *fh)
-{
- return fh->stream_on;
-}
-
-static void res_free(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
-
- fh->stream_on = 0;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- dev->stream_on = 0;
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- dev->vbi_stream_on = 0;
-}
-
-static int check_dev(struct cx231xx *dev)
-{
- if (dev->state & DEV_DISCONNECTED) {
- dev_err(dev->dev, "v4l2 ioctl: device not present\n");
- return -ENODEV;
- }
- return 0;
-}
-
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
@@ -931,8 +829,7 @@ static int check_dev(struct cx231xx *dev)
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
@@ -960,8 +857,7 @@ static struct cx231xx_fmt *format_by_fourcc(unsigned int fourcc)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int width = f->fmt.pix.width;
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
@@ -993,39 +889,25 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
- struct cx231xx_fmt *fmt;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ int rc;
- rc = check_dev(dev);
- if (rc < 0)
+ rc = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (rc)
return rc;
- vidioc_try_fmt_vid_cap(file, priv, f);
-
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt)
- return -EINVAL;
-
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
+ if (vb2_is_busy(&dev->vidq)) {
dev_err(dev->dev, "%s: queue busy\n", __func__);
return -EBUSY;
}
- if (dev->stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s: device in use by another fh\n", __func__);
- return -EBUSY;
- }
-
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- dev->format = fmt;
+ dev->format = format_by_fourcc(f->fmt.pix.pixelformat);
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
call_all(dev, pad, set_fmt, NULL, &format);
@@ -1036,8 +918,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*id = dev->norm;
return 0;
@@ -1045,21 +926,15 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (dev->norm == norm)
return 0;
- if (videobuf_queue_is_busy(&fh->vb_vidq))
+ if (vb2_is_busy(&dev->vidq))
return -EBUSY;
dev->norm = norm;
@@ -1141,8 +1016,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev)
int cx231xx_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
u32 gen_stat;
unsigned int n;
int ret;
@@ -1181,8 +1055,7 @@ int cx231xx_enum_input(struct file *file, void *priv,
int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*i = dev->video_input;
@@ -1191,14 +1064,9 @@ int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
+ struct cx231xx *dev = video_drvdata(file);
dev->mode_tv = 0;
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (i >= MAX_CX231XX_INPUT)
return -EINVAL;
@@ -1220,13 +1088,7 @@ int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
+ struct cx231xx *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -1244,27 +1106,15 @@ int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != t->index)
return -EINVAL;
-#if 0
- call_all(dev, tuner, s_tuner, t);
-#endif
return 0;
}
int cx231xx_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (f->tuner)
return -EINVAL;
@@ -1277,8 +1127,7 @@ int cx231xx_g_frequency(struct file *file, void *priv,
int cx231xx_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_frequency new_freq = *f;
int rc;
u32 if_frequency = 5400000;
@@ -1287,10 +1136,6 @@ int cx231xx_s_frequency(struct file *file, void *priv,
"Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
f->frequency, f->type);
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != f->tuner)
return -EINVAL;
@@ -1365,8 +1210,7 @@ int cx231xx_g_chip_info(struct file *file, void *fh,
int cx231xx_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 value[4] = { 0, 0, 0, 0 };
u32 data = 0;
@@ -1424,8 +1268,7 @@ int cx231xx_g_register(struct file *file, void *priv,
int cx231xx_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 data[4] = { 0, 0, 0, 0 };
@@ -1472,8 +1315,7 @@ int cx231xx_s_register(struct file *file, void *priv,
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->norm & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1488,8 +1330,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1508,54 +1349,10 @@ static int vidioc_g_selection(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (likely(rc >= 0))
- rc = videobuf_streamon(&fh->vb_vidq);
-
- call_all(dev, video, s_stream, 1);
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (type != fh->type)
- return -EINVAL;
-
- cx25840_call(dev, video, s_stream, 0);
-
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh);
-
- return 0;
-}
-
int cx231xx_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
strscpy(cap->driver, "cx231xx", sizeof(cap->driver));
strscpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
@@ -1587,8 +1384,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1610,8 +1406,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1634,77 +1429,16 @@ static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
-
- if (dev->vbi_stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s device in use by another fh\n", __func__);
- return -EBUSY;
- }
return vidioc_try_fmt_vbi_cap(file, priv, f);
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_reqbufs(&fh->vb_vidq, rb);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_querybuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_qbuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
-}
-
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1717,7 +1451,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
}
static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1733,52 +1467,20 @@ static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner
*/
static int cx231xx_v4l2_open(struct file *filp)
{
- int radio = 0;
struct video_device *vdev = video_devdata(filp);
struct cx231xx *dev = video_drvdata(filp);
- struct cx231xx_fh *fh;
- enum v4l2_buf_type fh_type = 0;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
- default:
- return -EINVAL;
- }
-
- cx231xx_videodbg("open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[fh_type],
- dev->users);
-
-#if 0
- errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
- if (errCode < 0) {
- dev_err(dev->dev,
- "Device locked on digital mode. Can't open analog\n");
- return -EBUSY;
- }
-#endif
+ int ret;
- fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
- if (mutex_lock_interruptible(&dev->lock)) {
- kfree(fh);
+ if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(filp);
+ if (ret) {
+ mutex_unlock(&dev->lock);
+ return ret;
}
- fh->dev = dev;
- fh->type = fh_type;
- filp->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+ if (dev->users++ == 0) {
/* Power up in Analog TV mode */
if (dev->board.external_av)
cx231xx_set_power_mode(dev,
@@ -1786,10 +1488,6 @@ static int cx231xx_v4l2_open(struct file *filp)
else
cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
-#if 0
- cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
-#endif
-
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
@@ -1799,38 +1497,21 @@ static int cx231xx_v4l2_open(struct file *filp)
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
-
}
- if (radio) {
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
cx231xx_videodbg("video_open: setting radio device\n");
/* cx231xx_start_radio(dev); */
call_all(dev, tuner, s_radio);
}
-
- dev->users++;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
- NULL, &dev->video_mode.slock,
- fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
- NULL, &dev->vbi_mode.slock,
- fh->type, V4L2_FIELD_SEQ_TB,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
}
mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
-
return 0;
}
@@ -1871,68 +1552,12 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
*/
static int cx231xx_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
-
- cx231xx_videodbg("users=%d\n", dev->users);
-
- cx231xx_videodbg("users=%d\n", dev->users);
- if (res_check(fh))
- res_free(fh);
-
- /*
- * To workaround error number=-71 on EP0 for VideoGrabber,
- * need exclude following.
- * FIXME: It is probably safe to remove most of these, as we're
- * now avoiding the alternate setting for INDEX_VANC
- */
- if (!dev->board.no_alt_vanc)
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- if (atomic_read(&dev->devlist_count) > 0) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
- return 0;
- }
-
- /* do this before setting alternate! */
- cx231xx_uninit_vbi_isoc(dev);
-
- /* set alternate 0 */
- if (!dev->vbi_or_sliced_cc_mode)
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
- else
- cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- dev->users--;
- wake_up_interruptible(&dev->open);
- return 0;
- }
+ struct cx231xx *dev = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
- v4l2_fh_del(&fh->fh);
- dev->users--;
- if (!dev->users) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
+ _vb2_fop_release(filp, NULL);
+ if (--dev->users == 0) {
/* Save some power by putting tuner to sleep */
call_all(dev, tuner, standby);
@@ -1942,20 +1567,40 @@ static int cx231xx_close(struct file *filp)
else
cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
+ }
+
+ /*
+ * To workaround error number=-71 on EP0 for VideoGrabber,
+ * need exclude following.
+ * FIXME: It is probably safe to remove most of these, as we're
+ * now avoiding the alternate setting for INDEX_VANC
+ */
+ if (!dev->board.no_alt_vanc && vdev->vfl_type == VFL_TYPE_VBI) {
+ /* do this before setting alternate! */
+ cx231xx_uninit_vbi_isoc(dev);
/* set alternate 0 */
+ if (!dev->vbi_or_sliced_cc_mode)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ else
+ cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
+
+ wake_up_interruptible_nr(&dev->open, 1);
+ return 0;
+ }
+
+ if (dev->users == 0) {
+ /* set alternate 0 */
cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0);
}
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
+
wake_up_interruptible(&dev->open);
return 0;
}
static int cx231xx_v4l2_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(filp);
int rc;
mutex_lock(&dev->lock);
@@ -1964,116 +1609,13 @@ static int cx231xx_v4l2_close(struct file *filp)
return rc;
}
-/*
- * cx231xx_v4l2_read()
- * will allocate buffers when called for the first time
- */
-static ssize_t
-cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
- loff_t *pos)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- mutex_unlock(&dev->lock);
- return rc;
- }
- return 0;
-}
-
-/*
- * cx231xx_v4l2_poll()
- * will allocate buffers when called for the first time
- */
-static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return EPOLLERR;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return EPOLLERR;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(filp, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) ||
- (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type)) {
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
- }
- return res | EPOLLERR;
-}
-
-/*
- * cx231xx_v4l2_mmap()
- */
-static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- mutex_unlock(&dev->lock);
-
- cx231xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end -
- (unsigned long)vma->vm_start, rc);
-
- return rc;
-}
-
static const struct v4l2_file_operations cx231xx_v4l_fops = {
.owner = THIS_MODULE,
.open = cx231xx_v4l2_open,
.release = cx231xx_v4l2_close,
- .read = cx231xx_v4l2_read,
- .poll = cx231xx_v4l2_poll,
- .mmap = cx231xx_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -2088,17 +1630,17 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
.vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_selection = vidioc_g_selection,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = cx231xx_enum_input,
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = cx231xx_g_tuner,
.vidioc_s_tuner = cx231xx_s_tuner,
.vidioc_g_frequency = cx231xx_g_frequency,
@@ -2175,6 +1717,7 @@ static void cx231xx_vdev_init(struct cx231xx *dev,
int cx231xx_register_analog_devices(struct cx231xx *dev)
{
+ struct vb2_queue *q;
int ret;
dev_info(dev->dev, "v4l2 driver version %s\n", CX231XX_VERSION);
@@ -2221,6 +1764,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize video media entity!\n");
#endif
dev->vdev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vdev.queue = q;
dev->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
@@ -2254,6 +1812,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize vbi media entity!\n");
#endif
dev->vbi_dev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vbi_dev.queue = q;
dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index b007611abc37..b32eab641793 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -20,7 +20,7 @@
#include <media/drv-intf/cx2341x.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
@@ -223,8 +223,8 @@ struct cx231xx_fmt {
/* buffer for one video frame */
struct cx231xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
struct list_head frame;
int top_field;
int receiving;
@@ -237,7 +237,6 @@ enum ps_package_head {
struct cx231xx_dmaqueue {
struct list_head active;
- struct list_head queued;
wait_queue_head_t wq;
@@ -251,6 +250,7 @@ struct cx231xx_dmaqueue {
u32 lines_completed;
u8 field1_done;
u32 lines_per_field;
+ u32 sequence;
/*Mpeg2 control buffer*/
u8 *p_left_data;
@@ -427,23 +427,6 @@ struct cx231xx_audio {
struct cx231xx;
-struct cx231xx_fh {
- struct v4l2_fh fh;
- struct cx231xx *dev;
- unsigned int stream_on:1; /* Locks streams */
- enum v4l2_buf_type type;
-
- struct videobuf_queue vb_vidq;
-
- /* vbi capture */
- struct videobuf_queue vidq;
- struct videobuf_queue vbiq;
-
- /* MPEG Encoder specifics ONLY */
-
- atomic_t v4l_reading;
-};
-
/*****************************************************************/
/* set/get i2c */
/* 00--1Mb/s, 01-400kb/s, 10--100kb/s, 11--5Mb/s */
@@ -634,6 +617,7 @@ struct cx231xx {
int width; /* current frame width */
int height; /* current frame height */
int interlaced; /* 1=interlace fields, 0=just top fields */
+ unsigned int size;
struct cx231xx_audio adev;
@@ -657,6 +641,9 @@ struct cx231xx {
struct media_pad input_pad[MAX_CX231XX_INPUT];
#endif
+ struct vb2_queue vidq;
+ struct vb2_queue vbiq;
+
unsigned char eedata[256];
struct cx231xx_video_mode video_mode;
@@ -717,6 +704,7 @@ struct cx231xx {
u8 USE_ISO;
struct cx231xx_tvnorm encodernorm;
struct cx231xx_tsport ts1, ts2;
+ struct vb2_queue mpegq;
struct video_device v4l_device;
atomic_t v4l_reader_count;
u32 freq;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 3afd18733614..792667ee5ebc 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1197,6 +1197,15 @@ err:
return ret;
}
+/*
+ * The I2C speed register is calculated with:
+ * I2C speed register = (1000000000 / (24.4 * 16 * I2C_speed))
+ *
+ * The default speed register for it930x is 7, with means a
+ * speed of ~366 kbps
+ */
+#define I2C_SPEED_366K 7
+
static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
@@ -1208,13 +1217,13 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
- /* I2C master bus 2 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
+ /* I2C master bus 2 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f6a7, I2C_SPEED_366K);
if (ret < 0)
goto err;
- /* I2C master bus 1,3 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f103, 0x07);
+ /* I2C master bus 1,3 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f103, I2C_SPEED_366K);
if (ret < 0)
goto err;
@@ -1610,6 +1619,24 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = adap->fe[0];
+
+ /*
+ * HACK: The Logilink VG0022A has a bug: when the si2157
+ * firmware that came with the device is replaced by a new
+ * one, the I2C transfers to the tuner will return just 0xff.
+ *
+ * Probably, the vendor firmware has some patch specifically
+ * designed for this device. So, we can't replace by the
+ * generic firmware. The right solution would be to extract
+ * the si2157 firmware from the original driver and ask the
+ * driver to load the specifically designed firmware, but,
+ * while we don't have that, the next best solution is to just
+ * keep the original firmware at the device.
+ */
+ if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
+ le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100)
+ si2157_config.dont_load_firmware = true;
+
si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
ret = af9035_add_i2c_dev(d, "si2157",
it930x_addresses_table[state->it930x_addresses].tuner_i2c_addr,
@@ -2121,6 +2148,8 @@ static const struct usb_device_id af9035_id_table[] = {
&it930x_props, "ITE 9303 Generic", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310,
&it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
+ &it930x_props, "Logilink VG0022A", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index b874a49ececf..52bcc2d2efe5 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -121,6 +121,7 @@ struct dvb_usb_driver_info {
* @interval: time in ms between two queries
* @driver_type: used to point if a device supports raw mode
* @bulk_mode: device supports bulk mode for rc (disable polling mode)
+ * @timeout: set to length of last space before raw IR goes idle
*/
struct dvb_usb_rc {
const char *map_name;
@@ -130,6 +131,7 @@ struct dvb_usb_rc {
unsigned int interval;
enum rc_driver_type driver_type;
bool bulk_mode;
+ int timeout;
};
/**
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index e5e056bf9dfa..f1c79f351ec8 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -150,6 +150,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
dev->map_name = d->rc.map_name;
dev->allowed_protocols = d->rc.allowed_protos;
dev->change_protocol = d->rc.change_protocol;
+ dev->timeout = d->rc.timeout;
dev->priv = d;
ret = rc_register_device(dev);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 617a306f6815..356fd8e66834 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -22,7 +22,6 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dvbsky_state {
- struct mutex stream_mutex;
u8 ibuf[DVBSKY_BUF_LEN];
u8 obuf[DVBSKY_BUF_LEN];
u8 last_lock;
@@ -60,17 +59,19 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
{
struct dvbsky_state *state = d_to_priv(d);
+ static const u8 obuf_pre[3] = { 0x37, 0, 0 };
+ static const u8 obuf_post[3] = { 0x36, 3, 0 };
int ret;
- u8 obuf_pre[3] = { 0x37, 0, 0 };
- u8 obuf_post[3] = { 0x36, 3, 0 };
- mutex_lock(&state->stream_mutex);
- ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
+ mutex_lock(&d->usb_mutex);
+ memcpy(state->obuf, obuf_pre, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
if (!ret && onoff) {
msleep(20);
- ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
+ memcpy(state->obuf, obuf_post, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
}
- mutex_unlock(&state->stream_mutex);
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -591,17 +592,7 @@ static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
static int dvbsky_init(struct dvb_usb_device *d)
{
struct dvbsky_state *state = d_to_priv(d);
-
- /* use default interface */
- /*
- ret = usb_set_interface(d->udev, 0, 0);
- if (ret)
- return ret;
- */
- mutex_init(&state->stream_mutex);
-
state->last_lock = 0;
-
return 0;
}
@@ -792,6 +783,9 @@ static const struct usb_device_id dvbsky_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
+ { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C_LITE,
+ &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C Lite",
+ NULL) },
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C2,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C v2",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index c7197e534c02..19217dcf20f1 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -5,7 +5,7 @@
*/
#include <linux/string.h>
-#include "gl861.h"
+#include "dvb_usb.h"
#include "zl10353.h"
#include "qt1010.h"
@@ -14,93 +14,157 @@
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
- u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
-{
- u16 index;
- u16 value = addr << (8 + 1);
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 req, type;
- u8 *buf;
- int ret;
+struct gl861 {
+ /* USB control message buffer */
+ u8 buf[16];
- if (wo) {
- req = GL861_REQ_I2C_WRITE;
- type = GL861_WRITE;
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- } else { /* rw */
- req = GL861_REQ_I2C_READ;
- type = GL861_READ;
- buf = kmalloc(rlen, GFP_KERNEL);
- }
- if (!buf)
- return -ENOMEM;
+ struct i2c_adapter *demod_sub_i2c;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+};
- switch (wlen) {
- case 1:
- index = wbuf[0];
+#define CMD_WRITE_SHORT 0x01
+#define CMD_READ 0x02
+#define CMD_WRITE 0x03
+
+static int gl861_ctrl_msg(struct dvb_usb_device *d, u8 request, u16 value,
+ u16 index, void *data, u16 size)
+{
+ struct gl861 *ctx = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
+ int ret;
+ unsigned int pipe;
+ u8 requesttype;
+
+ mutex_lock(&d->usb_mutex);
+
+ switch (request) {
+ case CMD_WRITE:
+ memcpy(ctx->buf, data, size);
+ /* Fall through */
+ case CMD_WRITE_SHORT:
+ pipe = usb_sndctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_OUT;
break;
- case 2:
- index = wbuf[0];
- value = value + wbuf[1];
+ case CMD_READ:
+ pipe = usb_rcvctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_IN;
break;
default:
- dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n",
- KBUILD_MODNAME, wlen);
- kfree(buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mutex_unlock;
}
- usleep_range(1000, 2000); /* avoid I2C errors */
+ ret = usb_control_msg(d->udev, pipe, request, requesttype, value,
+ index, ctx->buf, size, 200);
+ dev_dbg(&intf->dev, "%d | %02x %02x %*ph %*ph %*ph %s %*ph\n",
+ ret, requesttype, request, 2, &value, 2, &index, 2, &size,
+ (requesttype & USB_DIR_IN) ? "<<<" : ">>>", size, ctx->buf);
+ if (ret < 0)
+ goto err_mutex_unlock;
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
- value, index, buf, rlen, 2000);
+ if (request == CMD_READ)
+ memcpy(data, ctx->buf, size);
- if (!wo && ret > 0)
- memcpy(rbuf, buf, rlen);
+ usleep_range(1000, 2000); /* Avoid I2C errors */
- kfree(buf);
+ mutex_unlock(&d->usb_mutex);
+
+ return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&d->usb_mutex);
+ dev_dbg(&intf->dev, "failed %d\n", ret);
return ret;
}
-/* I2C */
-static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+static int gl861_short_write(struct dvb_usb_device *d, u8 addr, u8 reg, u8 val)
+{
+ return gl861_ctrl_msg(d, CMD_WRITE_SHORT,
+ (addr << 9) | val, reg, NULL, 0);
+}
+
+static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
+ struct usb_interface *intf = d->intf;
+ struct gl861 *ctx = d_to_priv(d);
+ int ret;
+ u8 request, *data;
+ u16 value, index, size;
+
+ /* XXX: I2C adapter maximum data lengths are not tested */
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ /* I2C write */
+ if (msg[0].len < 2 || msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+
+ if (msg[0].len == 2) {
+ request = CMD_WRITE_SHORT;
+ value |= msg[0].buf[1];
+ size = 0;
+ data = NULL;
+ } else {
+ request = CMD_WRITE;
+ size = msg[0].len - 1;
+ data = &msg[0].buf[1];
+ }
+
+ ret = gl861_ctrl_msg(d, request, value, index, data, size);
+ } else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ /* I2C write + read */
+ if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- /* write/read request */
- if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, msg[i+1].buf, msg[i+1].len) < 0)
- break;
- i++;
- } else
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, NULL, 0) < 0)
- break;
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[1].buf, msg[1].len);
+ } else if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ /* I2C read */
+ if (msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ value = (msg[0].addr << 1) << 8;
+ index = 0x0100;
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[0].buf, msg[0].len);
+ } else {
+ /* Unsupported I2C message */
+ dev_dbg(&intf->dev, "unknown i2c msg, num %u\n", num);
+ ret = -EOPNOTSUPP;
}
+ if (ret)
+ goto err;
- mutex_unlock(&d->i2c_mutex);
- return i;
+ return num;
+err:
+ dev_dbg(&intf->dev, "failed %d\n", ret);
+ return ret;
}
-static u32 gl861_i2c_func(struct i2c_adapter *adapter)
+static u32 gl861_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm gl861_i2c_algo = {
- .master_xfer = gl861_i2c_xfer,
- .functionality = gl861_i2c_func,
+ .master_xfer = gl861_i2c_master_xfer,
+ .functionality = gl861_i2c_functionality,
};
/* Callbacks for DVB USB */
@@ -149,6 +213,8 @@ static struct dvb_usb_device_properties gl861_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct gl861),
+
.i2c_algo = &gl861_i2c_algo,
.frontend_attach = gl861_frontend_attach,
.tuner_attach = gl861_tuner_attach,
@@ -166,14 +232,6 @@ static struct dvb_usb_device_properties gl861_props = {
/*
* For Friio
*/
-
-struct friio_priv {
- struct i2c_adapter *demod_sub_i2c;
- struct i2c_client *i2c_client_demod;
- struct i2c_client *i2c_client_tuner;
- struct i2c_adapter tuner_adap;
-};
-
struct friio_config {
struct i2c_board_info demod_info;
struct tc90522_config demod_cfg;
@@ -184,132 +242,10 @@ struct friio_config {
static const struct friio_config friio_config = {
.demod_info = { I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x18), },
+ .demod_cfg = { .split_tuner_read_i2c = true, },
.tuner_info = { I2C_BOARD_INFO("tua6034_friio", 0x60), },
};
-/* For another type of I2C:
- * message sent by a USB control-read/write transaction with data stage.
- * Used in init/config of Friio.
- */
-static int
-gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- addr << (8 + 1), 0x0100, buf, wlen, 2000);
- kfree(buf);
- return ret;
-}
-
-static int
-gl861_i2c_read_ex(struct dvb_usb_device *d, u8 addr, u8 *rbuf, u16 rlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmalloc(rlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
- GL861_REQ_I2C_READ, GL861_READ,
- addr << (8 + 1), 0x0100, buf, rlen, 2000);
- if (ret > 0 && rlen > 0)
- memcpy(buf, rbuf, rlen);
- kfree(buf);
- return ret;
-}
-
-/* For I2C transactions to the tuner of Friio (dvb_pll).
- *
- * Friio uses irregular USB encapsulation for tuner i2c transactions:
- * write transacions are encapsulated with a different USB 'request' value.
- *
- * Although all transactions are sent via the demod(tc90522)
- * and the demod provides an i2c adapter for them, it cannot be used in Friio
- * since it assumes using the same parent adapter with the demod,
- * which does not use the request value and uses same one for both read/write.
- * So we define a dedicated i2c adapter here.
- */
-
-static int
-friio_i2c_tuner_read(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- struct friio_priv *priv;
- u8 addr;
-
- priv = d_to_priv(d);
- addr = priv->i2c_client_demod->addr;
- return gl861_i2c_read_ex(d, addr, msg->buf, msg->len);
-}
-
-static int
-friio_i2c_tuner_write(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- u8 *buf;
- int ret;
- struct friio_priv *priv;
-
- priv = d_to_priv(d);
-
- if (msg->len < 1)
- return -EINVAL;
-
- buf = kmalloc(msg->len + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf[0] = msg->addr << 1;
- memcpy(buf + 1, msg->buf, msg->len);
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- priv->i2c_client_demod->addr << (8 + 1),
- 0xFE, buf, msg->len + 1, 2000);
- kfree(buf);
- return ret;
-}
-
-static int friio_tuner_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
-{
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
-
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- int ret;
-
- if (msg[i].flags & I2C_M_RD)
- ret = friio_i2c_tuner_read(d, &msg[i]);
- else
- ret = friio_i2c_tuner_write(d, &msg[i]);
-
- if (ret < 0)
- break;
-
- usleep_range(1000, 2000); /* avoid I2C errors */
- }
-
- mutex_unlock(&d->i2c_mutex);
- return i;
-}
-
-static struct i2c_algorithm friio_tuner_i2c_algo = {
- .master_xfer = friio_tuner_i2c_xfer,
- .functionality = gl861_i2c_func,
-};
/* GPIO control in Friio */
@@ -377,9 +313,11 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
/* init/config of gl861 for Friio */
/* NOTE:
* This function cannot be moved to friio_init()/dvb_usbv2_init(),
- * because the init defined here must be done before any activities like I2C,
+ * because the init defined here includes a whole device reset,
+ * it must be run early before any activities like I2C,
* but friio_init() is called by dvb-usbv2 after {_frontend, _tuner}_attach(),
* where I2C communication is used.
+ * In addition, this reset is required in reset_resume() as well.
* Thus this function is set to be called from _power_ctl().
*
* Since it will be called on the early init stage
@@ -389,7 +327,7 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
static int friio_reset(struct dvb_usb_device *d)
{
int i, ret;
- u8 wbuf[2], rbuf[2];
+ u8 wbuf[1], rbuf[2];
static const u8 friio_init_cmds[][2] = {
{0x33, 0x08}, {0x37, 0x40}, {0x3a, 0x1f}, {0x3b, 0xff},
@@ -401,16 +339,12 @@ static int friio_reset(struct dvb_usb_device *d)
if (ret < 0)
return ret;
- wbuf[0] = 0x11;
- wbuf[1] = 0x02;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x02);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- wbuf[0] = 0x11;
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x00);
if (ret < 0)
return ret;
@@ -420,14 +354,13 @@ static int friio_reset(struct dvb_usb_device *d)
*/
usleep_range(1000, 2000);
- wbuf[0] = 0x03;
- wbuf[1] = 0x80;
- ret = gl861_i2c_write_ex(d, 0x09, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x09 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x09, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x09 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
@@ -435,38 +368,33 @@ static int friio_reset(struct dvb_usb_device *d)
usleep_range(1000, 2000);
- ret = gl861_i2c_write_ex(d, 0x48, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x48 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x48, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x48 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
return -ENODEV;
- wbuf[0] = 0x30;
- wbuf[1] = 0x04;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x30, 0x04);
if (ret < 0)
return ret;
- wbuf[0] = 0x00;
- wbuf[1] = 0x01;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x00, 0x01);
if (ret < 0)
return ret;
- wbuf[0] = 0x06;
- wbuf[1] = 0x0f;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x06, 0x0f);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(friio_init_cmds); i++) {
- ret = gl861_i2c_msg(d, 0x00, (u8 *)friio_init_cmds[i], 2,
- NULL, 0);
+ ret = gl861_short_write(d, 0x00, friio_init_cmds[i][0],
+ friio_init_cmds[i][1]);
if (ret < 0)
return ret;
}
@@ -488,9 +416,10 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d;
struct tc90522_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
info = &friio_config.demod_info;
+ cfg = friio_config.demod_cfg;
d = adap_to_d(adap);
cl = dvb_module_probe("tc90522", info->type,
&d->i2c_adap, info->addr, &cfg);
@@ -498,25 +427,17 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
adap->fe[0] = cfg.fe;
- /* ignore cfg.tuner_i2c and create new one */
priv = adap_to_priv(adap);
priv->i2c_client_demod = cl;
- priv->tuner_adap.algo = &friio_tuner_i2c_algo;
- priv->tuner_adap.dev.parent = &d->udev->dev;
- strscpy(priv->tuner_adap.name, d->name, sizeof(priv->tuner_adap.name));
- strlcat(priv->tuner_adap.name, "-tuner", sizeof(priv->tuner_adap.name));
- priv->demod_sub_i2c = &priv->tuner_adap;
- i2c_set_adapdata(&priv->tuner_adap, d);
-
- return i2c_add_adapter(&priv->tuner_adap);
+ priv->demod_sub_i2c = cfg.tuner_i2c;
+ return 0;
}
static int friio_frontend_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
- i2c_del_adapter(&priv->tuner_adap);
dvb_module_release(priv->i2c_client_demod);
return 0;
}
@@ -526,7 +447,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
const struct i2c_board_info *info;
struct dvb_pll_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
info = &friio_config.tuner_info;
@@ -543,7 +464,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
static int friio_tuner_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
dvb_module_release(priv->i2c_client_tuner);
@@ -554,7 +475,7 @@ static int friio_init(struct dvb_usb_device *d)
{
int i;
int ret;
- struct friio_priv *priv;
+ struct gl861 *priv;
static const u8 demod_init[][2] = {
{0x01, 0x40}, {0x04, 0x38}, {0x05, 0x40}, {0x07, 0x40},
@@ -606,7 +527,7 @@ static struct dvb_usb_device_properties friio_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct friio_priv),
+ .size_of_priv = sizeof(struct gl861),
.i2c_algo = &gl861_i2c_algo,
.power_ctrl = friio_power_ctrl,
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
deleted file mode 100644
index 02c00e10748a..000000000000
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DVB_USB_GL861_H_
-#define _DVB_USB_GL861_H_
-
-#include "dvb_usb.h"
-
-#define GL861_WRITE 0x40
-#define GL861_READ 0xc0
-
-#define GL861_REQ_I2C_WRITE 0x01
-#define GL861_REQ_I2C_READ 0x02
-#define GL861_REQ_I2C_RAW 0x03
-
-#endif
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 1a36bda28542..5016ede7b35f 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1781,7 +1781,6 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
}
/* 'flush' ir_raw_event_store_with_filter() */
- ir_raw_event_set_idle(d->rc_dev, true);
ir_raw_event_handle(d->rc_dev);
exit:
return ret;
@@ -1804,6 +1803,8 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
rc->driver_type = RC_DRIVER_IR_RAW;
rc->query = rtl2832u_rc_query;
rc->interval = 200;
+ /* we program idle len to 0xc0, set timeout to one less */
+ rc->timeout = 0xbf * 50800;
return 0;
}
@@ -1957,7 +1958,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
- &rtl28xxu_props, "Astrometa DVB-T2", NULL) },
+ &rtl28xxu_props, "Astrometa DVB-T2",
+ RC_MAP_ASTROMETA_T2HYBRID) },
{ DVB_USB_DEVICE(0x5654, 0xca42,
&rtl28xxu_props, "GoTView MasterHD 3", NULL) },
{ }
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 02697d86e8c1..ac93e88d7038 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev,
else if (reply == 0x02)
*cold = 0;
else
- return -EIO;
- deb_info("Identify state cold = %d\n", *cold);
+ ret = -EIO;
+ if (!ret)
+ deb_info("Identify state cold = %d\n", *cold);
err:
kfree(buf);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index f02fa0a67aa4..fac19ec46089 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
{
u8 ircode[4];
- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
+ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
+ return 0;
if (ircode[2] || ircode[3])
rc_keydown(d->rc_dev, RC_PROTO_NEC,
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 49c9b70b632b..79dfbb25714b 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -31,7 +31,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5983e72a0622..def9cdd931a9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2487,6 +2487,24 @@ const struct em28xx_board em28xx_boards[] = {
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
},
+ /*
+ * 1b80:e349 Magix USB Videowandler-2
+ * (same chips as Honestech VIDBOX NW03)
+ * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner
+ */
+ [EM2861_BOARD_MAGIX_VIDEOWANDLER2] = {
+ .name = "Magix USB Videowandler-2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2696,6 +2714,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PLEX_PX_BCUD },
{ USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */
.driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN },
+ { USB_DEVICE(0x1b80, 0xe349), /* Magix USB Videowandler-2 */
+ .driver_info = EM2861_BOARD_MAGIX_VIDEOWANDLER2 },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a73faf12f7e4..0ab6c493bc74 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -471,13 +471,13 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
{EM2874_R80_GPIO_P0_CTRL, 0xaf, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x76},
@@ -493,7 +493,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -537,20 +537,20 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
static void terratec_h5_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_h5_init[] = {
+ static const struct em28xx_reg_seq terratec_h5_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_h5_end[] = {
+ static const struct em28xx_reg_seq terratec_h5_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -594,14 +594,14 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* 0xe6: unknown (does not affect DVB-T).
* 0xb6: unknown (does not affect DVB-T).
*/
- struct em28xx_reg_seq terratec_htc_stick_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_stick_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
{ -1, -1, -1, -1},
@@ -611,7 +611,7 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -642,14 +642,14 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
@@ -660,7 +660,7 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -704,7 +704,7 @@ static void pctv_520e_init(struct em28xx *dev)
* digital demodulator and tuner are routed via AVF4910B.
*/
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -800,7 +800,7 @@ static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
static void px_bcud_init(struct em28xx *dev)
{
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs1[] = {
@@ -818,7 +818,7 @@ static void px_bcud_init(struct em28xx *dev)
{{ 0x85, 0x7a }, 2},
{{ 0x87, 0x04 }, 2},
};
- static struct em28xx_reg_seq gpio[] = {
+ static const struct em28xx_reg_seq gpio[] = {
{EM28XX_R06_I2C_CLK, 0x40, 0xff, 300},
{EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 60},
{EM28XX_R15_RGAIN, 0x20, 0xff, 0},
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index a3155ec196cc..592b98b3643a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -949,7 +949,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
unsigned char buf;
int i, rc;
- memset(i2c_devicelist, 0, ARRAY_SIZE(i2c_devicelist));
+ memset(i2c_devicelist, 0, sizeof(i2c_devicelist));
for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
dev->i2c_client[bus].addr = i;
@@ -964,7 +964,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
if (bus == dev->def_i2c_bus)
dev->i2c_hash = em28xx_hash_mem(i2c_devicelist,
- ARRAY_SIZE(i2c_devicelist), 32);
+ sizeof(i2c_devicelist), 32);
}
/*
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index c8bc59059a19..4ecadd57dac7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -149,6 +149,7 @@
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
#define EM2884_BOARD_TERRATEC_H6 101
#define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102
+#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 863c485f4275..97799cfb832e 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -378,6 +378,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index 3d7f6dcdd7a8..6ca947aef298 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -276,6 +276,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index f869eb6065ce..b23988d8c7bc 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -35,7 +35,7 @@ struct sd {
static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
int size)
{
- int ret = -1;
+ int ret;
u8 req_type = 0;
unsigned int pipe = 0;
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
index 7104a88b1e43..aac19d449be2 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
@@ -117,7 +117,7 @@ static int st6422_init(struct sd *sd)
{
int err = 0, i;
- const u16 st6422_bridge_init[][2] = {
+ static const u16 st6422_bridge_init[][2] = {
{ STV_ISO_ENABLE, 0x00 }, /* disable capture */
{ 0x1436, 0x00 },
{ 0x1432, 0x03 }, /* 0x00-0x1F brightness */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index a34717eba409..eaa08c7999d4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -898,8 +898,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
- !list_empty(&vp->dev_radio->devbase.fh_list))
+ (vp->dev_radio &&
+ !list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
return;
+ }
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -935,7 +939,8 @@ static int pvr2_v4l2_release(struct file *file)
kfree(fhp);
if (vp->channel.mc_head->disconnect_flag &&
list_empty(&vp->dev_video->devbase.fh_list) &&
- list_empty(&vp->dev_radio->devbase.fh_list)) {
+ (!vp->dev_radio ||
+ list_empty(&vp->dev_radio->devbase.fh_list))) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h
index d10424673db9..6a181f2e7ef2 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/media/usb/tm6000/tm6000-regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
index b275dbce3a1b..e3c6933f854d 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index bf396544da9a..c08c95312739 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 6f108996142d..e746c8ddfc49 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -378,8 +378,7 @@ int usbtv_audio_init(struct usbtv *usbtv)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), USBTV_AUDIO_BUFFER,
- USBTV_AUDIO_BUFFER);
+ NULL, USBTV_AUDIO_BUFFER, USBTV_AUDIO_BUFFER);
rv = snd_card_register(card);
if (rv)
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index cdc66adda755..93d36aab824f 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto unlock;
+ }
if (usbvision->user) {
err_code = -EBUSY;
} else {
@@ -377,6 +381,7 @@ unlock:
static int usbvision_v4l2_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "close");
@@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
@@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ if (!usbvision->dev)
+ return -ENODEV;
+
strscpy(vc->driver, "USBVision", sizeof(vc->driver));
strscpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string,
@@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto out;
+ }
err_code = v4l2_fh_open(file);
if (err_code)
goto out;
@@ -1093,21 +1107,24 @@ out:
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "");
mutex_lock(&usbvision->v4l2_lock);
/* Set packet size to 0 */
usbvision->iface_alt = 0;
- usb_set_interface(usbvision->dev, usbvision->iface,
- usbvision->iface_alt);
+ if (usbvision->dev)
+ usb_set_interface(usbvision->dev, usbvision->iface,
+ usbvision->iface_alt);
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file);
usbvision_release(usbvision);
@@ -1539,6 +1556,7 @@ err_usb:
static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
+ int u;
PDEBUG(DBG_PROBE, "");
@@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
+ u = usbvision->user;
usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->user) {
+ if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__);
wake_up_interruptible(&usbvision->wait_frame);
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index d2b109959d82..2b8af4b54117 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -108,15 +108,7 @@ void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
void uvc_debugfs_init(void)
{
- struct dentry *dir;
-
- dir = debugfs_create_dir("uvcvideo", usb_debug_root);
- if (IS_ERR_OR_NULL(dir)) {
- uvc_printk(KERN_INFO, "Unable to create debugfs directory\n");
- return;
- }
-
- uvc_debugfs_root_dir = dir;
+ uvc_debugfs_root_dir = debugfs_create_dir("uvcvideo", usb_debug_root);
}
void uvc_debugfs_cleanup(void)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 66ee168ddc7e..428235ca2635 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->name) - len);
}
+ /* Initialize the media device. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strscpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ media_device_init(&dev->mdev);
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
- /* Initialize the media device and register the V4L2 device. */
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->mdev.dev = &intf->dev;
- strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
- if (udev->serial)
- strscpy(dev->mdev.serial, udev->serial,
- sizeof(dev->mdev.serial));
- usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- media_device_init(&dev->mdev);
-
- dev->vdev.mdev = &dev->mdev;
-#endif
+ /* Register the V4L2 device. */
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 99bb71b47117..b6279ad7ac84 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -51,7 +51,7 @@ static int uvc_meta_v4l2_get_format(struct file *file, void *fh,
memset(fmt, 0, sizeof(*fmt));
fmt->dataformat = stream->meta.format;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
@@ -72,7 +72,7 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
fmt->dataformat = fmeta == dev->info->meta_format
? fmeta : V4L2_META_FMT_UVC;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index da72577c2998..cd60c6c1749e 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -79,7 +79,7 @@ static int uvc_queue_setup(struct vb2_queue *vq,
switch (vq->type) {
case V4L2_BUF_TYPE_META_CAPTURE:
- size = UVC_METATADA_BUF_SIZE;
+ size = UVC_METADATA_BUF_SIZE;
break;
default:
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c7c1baa90dea..f773dc5d802c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -491,7 +491,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METATADA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 1024
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 637962825d7a..57dbcc8083bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -20,7 +20,6 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/highmem.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -556,14 +555,12 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
{
unsigned char *pdest;
unsigned char *psrc;
- s32 idx = -1;
- struct zr364xx_framei *frm;
+ s32 idx = cam->cur_frame;
+ struct zr364xx_framei *frm = &cam->buffer.frame[idx];
int i = 0;
unsigned char *ptr = NULL;
_DBG("buffer to user\n");
- idx = cam->cur_frame;
- frm = &cam->buffer.frame[idx];
/* swap bytes if camera needs it */
if (cam->method == METHOD0) {
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 62f7aa92ac29..d0e5ebc736f9 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -236,77 +236,79 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{
static const struct v4l2_format_info formats[] = {
/* RGB formats */
- { .format = V4L2_PIX_FMT_BGR24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
- { .format = V4L2_PIX_FMT_YUYV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVYU, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_VYUY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats */
- { .format = V4L2_PIX_FMT_NV12, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV24, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV42, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_YUV410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YVU410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YUV411P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats, non contiguous variant */
- { .format = V4L2_PIX_FMT_YUV420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_NV12M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* Bayer RGB formats */
- { .format = V4L2_PIX_FMT_SBGGR8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 1d8f38824631..2928c5e0a73d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -29,6 +29,8 @@
#define call_op(master, op) \
(has_op(master, op) ? master->ops->op(master) : 0)
+static const union v4l2_ctrl_ptr ptr_null;
+
/* Internal temporary helper struct, one for each v4l2_ext_control */
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
@@ -566,6 +568,16 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Disabled at slice boundary",
"NULL",
};
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -687,7 +699,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
-
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ return hevc_start_code;
default:
return NULL;
}
@@ -957,6 +972,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -994,6 +1014,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
case V4L2_CID_PAN_SPEED: return "Pan, Speed";
case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1265,6 +1286,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -1375,6 +1398,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER:
*type = V4L2_CTRL_TYPE_VP8_FRAME_HEADER;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1520,7 +1556,8 @@ static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
if (ctrl->is_int)
return ptr1.p_s32[idx] == ptr2.p_s32[idx];
idx *= ctrl->elem_size;
- return !memcmp(ptr1.p + idx, ptr2.p + idx, ctrl->elem_size);
+ return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
+ ctrl->elem_size);
}
}
@@ -1530,7 +1567,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
void *p = ptr.p + idx * ctrl->elem_size;
- memset(p, 0, ctrl->elem_size);
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
/*
* The cast is needed to get rid of a gcc warning complaining that
@@ -1672,7 +1712,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
{
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *area;
void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
@@ -1748,6 +1793,76 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
zero_padding(p_vp8_frame_header->entropy_header);
zero_padding(p_vp8_frame_header->coder_state);
break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+
+ zero_padding(*p_hevc_pps);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ p_hevc_slice_params = p;
+
+ if (p_hevc_slice_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+
+ zero_padding(p_hevc_slice_params->pred_weight_table);
+
+ for (i = 0; i < p_hevc_slice_params->num_active_dpb_entries;
+ i++) {
+ struct v4l2_hevc_dpb_entry *dpb_entry =
+ &p_hevc_slice_params->dpb[i];
+
+ zero_padding(*dpb_entry);
+ }
+
+ zero_padding(*p_hevc_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -1840,7 +1955,7 @@ static int ptr_to_user(struct v4l2_ext_control *c,
u32 len;
if (ctrl->is_ptr && !ctrl->is_string)
- return copy_to_user(c->ptr, ptr.p, c->size) ?
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
-EFAULT : 0;
switch (ctrl->type) {
@@ -1955,7 +2070,7 @@ static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
{
if (ctrl == NULL)
return;
- memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
+ memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
}
/* Copy the new value to the current value. */
@@ -2354,7 +2469,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, void *priv)
+ const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra;
@@ -2421,6 +2537,18 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
elem_size = sizeof(struct v4l2_ctrl_vp8_frame_header);
break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2460,6 +2588,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array)
sz_extra += 2 * tot_ctrl_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
handler_set_err(hdl, -ENOMEM);
@@ -2503,6 +2634,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->p_new.p = &ctrl->val;
ctrl->p_cur.p = &ctrl->cur.val;
}
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
for (idx = 0; idx < elems; idx++) {
ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
@@ -2554,7 +2691,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, priv);
+ flags, qmenu, qmenu_int, cfg->p_def, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -2579,7 +2716,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, NULL);
+ flags, NULL, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -2612,7 +2749,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, NULL);
+ flags, qmenu, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -2644,11 +2781,32 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, NULL);
+ flags, qmenu, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
/* Helper function for standard integer menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
@@ -2669,7 +2827,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, NULL);
+ flags, NULL, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
@@ -3144,6 +3302,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
struct v4l2_ctrl_handler *prev_hdl = NULL;
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ mutex_lock(main_hdl->lock);
if (list_empty(&main_hdl->requests_queued))
goto queue;
@@ -3175,18 +3334,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
queue:
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
list_del_init(&hdl->requests);
+ mutex_lock(main_hdl->lock);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
@@ -4080,6 +4243,18 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_AREA);
+ *ctrl->p_new.p_area = *area;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_area);
+
void v4l2_ctrl_request_complete(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
@@ -4128,9 +4303,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(ctrl);
}
+ mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4037689a945a..da42d172714a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -533,13 +533,23 @@ static int get_index(struct video_device *vdev)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -571,8 +581,10 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
- SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ if (!is_tch) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ }
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
@@ -586,40 +598,32 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
- if (is_vid || is_tch) {
- /* video and metadata specific ioctls */
+ if (is_vid) {
+ /* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_meta_cap)) ||
- (is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_meta_out)))
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
- ops->vidioc_g_fmt_vid_overlay ||
- ops->vidioc_g_fmt_meta_cap)) ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
- ops->vidioc_g_fmt_vid_out_overlay ||
- ops->vidioc_g_fmt_meta_out)))
+ ops->vidioc_g_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
- ops->vidioc_s_fmt_vid_overlay ||
- ops->vidioc_s_fmt_meta_cap)) ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
- ops->vidioc_s_fmt_vid_out_overlay ||
- ops->vidioc_s_fmt_meta_out)))
+ ops->vidioc_s_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
- ops->vidioc_try_fmt_vid_overlay ||
- ops->vidioc_try_fmt_meta_cap)) ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
- ops->vidioc_try_fmt_vid_out_overlay ||
- ops->vidioc_try_fmt_meta_out)))
+ ops->vidioc_try_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
@@ -641,7 +645,21 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- } else if (is_vbi) {
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
@@ -659,30 +677,35 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_tch) {
+ /* touch specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
} else if (is_sdr && is_rx) {
/* SDR receiver specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
} else if (is_sdr && is_tx) {
/* SDR transmitter specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
}
- if (is_vid || is_vbi || is_sdr || is_tch) {
- /* ioctls valid for video, metadata, vbi or sdr */
+ if (is_vid || is_vbi || is_sdr || is_tch || is_meta) {
+ /* ioctls valid for video, vbi, sdr, touch and metadata */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -694,8 +717,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
- if (is_vid || is_vbi || is_tch) {
- /* ioctls valid for video or vbi */
+ if (is_vid || is_vbi || is_meta) {
+ /* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
@@ -719,8 +742,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
- if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
- ops->vidioc_g_std))
+ if (ops->vidioc_g_parm || ops->vidioc_g_std)
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
@@ -734,7 +756,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
- if (is_rx) {
+ if (is_rx && !is_tch) {
/* receiver only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4f23e939ead0..230d65a64217 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -293,7 +293,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
if (prefix == NULL)
prefix = "";
- pr_info("%s: %s%ux%u%s%u.%u (%ux%u)\n", dev_prefix, prefix,
+ pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
bt->width, bt->height, bt->interlaced ? "i" : "p",
fps / 100, fps % 100, htot, vtot);
@@ -757,7 +757,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
hsync = (frame_width * 8 + 50) / 100;
- hsync = ((hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN) * GTF_CELL_GRAN;
+ hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3bd1888787eb..192cac076761 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -512,6 +512,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
return;
kfree(vep->link_frequencies);
+ vep->link_frequencies = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 51b912743f0f..4e700583659b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -932,12 +932,22 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -996,11 +1006,11 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
case V4L2_BUF_TYPE_META_OUTPUT:
- if (is_vid && is_tx && ops->vidioc_g_fmt_meta_out)
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
return 0;
break;
default:
@@ -1330,6 +1340,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
+ case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
default:
/* Compressed formats */
@@ -1356,6 +1367,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
@@ -1466,10 +1478,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
return ret;
}
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
if (ret)
@@ -1507,6 +1535,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
@@ -1544,21 +1574,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
-{
- /*
- * The v4l2_pix_format structure contains fields that make no sense for
- * touch. Set them to default values in this case.
- */
-
- p->field = V4L2_FIELD_NONE;
- p->colorspace = V4L2_COLORSPACE_RAW;
- p->flags = 0;
- p->ycbcr_enc = 0;
- p->quantization = 0;
- p->xfer_func = 0;
-}
-
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1602,12 +1617,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
@@ -1633,22 +1648,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
@@ -1704,12 +1719,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
@@ -1735,22 +1750,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
@@ -2637,7 +2652,7 @@ struct v4l2_ioctl_info {
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
- sizeof(((struct v4l2_struct *)0)->field)) << 16)
+ FIELD_SIZEOF(struct v4l2_struct, field)) << 16)
#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 19937dd3c6f6..1afd9c6ad908 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -307,20 +308,37 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
- && !m2m_ctx->out_q_ctx.buffered) {
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
- goto out_unlock;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
- && !m2m_ctx->cap_q_ctx.buffered) {
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
- goto cap_unlock;
+ goto job_unlock;
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+
+ m2m_ctx->new_frame = true;
+
+ if (src && dst && dst->is_held &&
+ dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
+ }
+
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -331,13 +349,6 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
- return;
-
-cap_unlock:
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-out_unlock:
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
@@ -412,37 +423,97 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
}
}
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
- struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags;
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Called by an instance not currently running\n");
- return;
+ return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ return true;
+}
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
-
- /* This instance might have more buffers ready, but since we do not
- * allow more than one job on the job_queue per instance, each has
- * to be scheduled separately after the previous one finishes. */
- __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
- /* We might be running in atomic context,
- * but the job must be run in non-atomic context.
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
*/
- schedule_work(&m2m_dev->job_work);
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ v4l2_m2m_buf_done(src_buf, state);
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
@@ -1154,6 +1225,59 @@ int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
+ unsigned long flags;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
+
+ /*
+ * If there is an out buffer pending, then clear any HOLD flag.
+ *
+ * By clearing this flag we ensure that when this output
+ * buffer is processed any held capture buffer will be released.
+ */
+ if (out_vb) {
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else if (cap_vb && cap_vb->is_held) {
+ /*
+ * If there were no output buffers, but there is a
+ * capture buffer that is held, then release that
+ * buffer.
+ */
+ cap_vb->is_held = false;
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
+
/*
* v4l2_file_operations helpers. It is assumed here same lock is used
* for the output and the capture buffer queue.
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f725cd9b66b9..9e987c0f840e 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -112,7 +112,7 @@ static int subdev_close(struct file *file)
return 0;
}
-static inline int check_which(__u32 which)
+static inline int check_which(u32 which)
{
if (which != V4L2_SUBDEV_FORMAT_TRY &&
which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -121,7 +121,7 @@ static inline int check_which(__u32 which)
return 0;
}
-static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
+static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
if (sd->entity.num_pads) {
@@ -136,7 +136,7 @@ static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
return 0;
}
-static int check_cfg(__u32 which, struct v4l2_subdev_pad_config *cfg)
+static int check_cfg(u32 which, struct v4l2_subdev_pad_config *cfg)
{
if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
return -EINVAL;
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 516f454fde14..08192fd70eb4 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -6,16 +6,16 @@
comment "MemoryStick drivers"
config MEMSTICK_UNSAFE_RESUME
- bool "Allow unsafe resume (DANGEROUS)"
- help
- If you say Y here, the MemoryStick layer will assume that all
- cards stayed in their respective slots during the suspend. The
- normal behaviour is to remove them at suspend and
- redetecting them at resume. Breaking this assumption will
- in most cases result in data corruption.
+ bool "Allow unsafe resume (DANGEROUS)"
+ help
+ If you say Y here, the MemoryStick layer will assume that all
+ cards stayed in their respective slots during the suspend. The
+ normal behaviour is to remove them at suspend and
+ redetecting them at resume. Breaking this assumption will
+ in most cases result in data corruption.
- This option is usually just for embedded systems which use
- a MemoryStick card for rootfs. Most people should say N here.
+ This option is usually just for embedded systems which use
+ a MemoryStick card for rootfs. Most people should say N here.
config MSPRO_BLOCK
tristate "MemoryStick Pro block device driver"
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 446c93ecef8f..4113343da056 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -18,7 +18,7 @@ config MEMSTICK_TIFM_MS
'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
(TIFM_7XX1)'.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called tifm_ms.
config MEMSTICK_JMICRON_38X
@@ -29,7 +29,7 @@ config MEMSTICK_JMICRON_38X
Say Y here if you want to be able to access MemoryStick cards with
the JMicron(R) JMB38X MemoryStick card reader.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called jmb38x_ms.
config MEMSTICK_R592
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 64fff6abe60e..b36142df0295 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -433,13 +433,13 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
writel(((1 << 16) & BLOCK_COUNT_MASK)
| (data_len & BLOCK_SIZE_MASK),
host->addr + BLOCK);
- t_val = readl(host->addr + INT_STATUS_ENABLE);
- t_val |= host->req->data_dir == READ
- ? INT_STATUS_FIFO_RRDY
- : INT_STATUS_FIFO_WRDY;
+ t_val = readl(host->addr + INT_STATUS_ENABLE);
+ t_val |= host->req->data_dir == READ
+ ? INT_STATUS_FIFO_RRDY
+ : INT_STATUS_FIFO_WRDY;
- writel(t_val, host->addr + INT_STATUS_ENABLE);
- writel(t_val, host->addr + INT_SIGNAL_ENABLE);
+ writel(t_val, host->addr + INT_STATUS_ENABLE);
+ writel(t_val, host->addr + INT_SIGNAL_ENABLE);
} else {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index b6ab72fa0569..ab09b8225b76 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -75,7 +75,7 @@ static struct mfd_cell crystal_cove_byt_dev[] = {
.resources = gpio_resources,
},
{
- .name = "crystal_cove_pmic",
+ .name = "byt_crystal_cove_pmic",
},
{
.name = "crystal_cove_pwm",
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 6ac3607a79c2..c906324d293e 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -91,6 +91,32 @@ static int tps6105x_add_device(struct tps6105x *tps6105x,
PLATFORM_DEVID_AUTO, cell, 1, NULL, 0, NULL);
}
+static struct tps6105x_platform_data *tps6105x_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct tps6105x_platform_data *pdata;
+ struct device_node *child;
+
+ if (!np)
+ return ERR_PTR(-EINVAL);
+ if (of_get_available_child_count(np) > 1) {
+ dev_err(dev, "cannot support multiple operational modes");
+ return ERR_PTR(-EINVAL);
+ }
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ pdata->mode = TPS6105X_MODE_SHUTDOWN;
+ for_each_available_child_of_node(np, child) {
+ if (child->name && !of_node_cmp(child->name, "regulator"))
+ pdata->mode = TPS6105X_MODE_VOLTAGE;
+ else if (child->name && !of_node_cmp(child->name, "led"))
+ pdata->mode = TPS6105X_MODE_TORCH;
+ }
+
+ return pdata;
+}
+
static int tps6105x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -99,9 +125,11 @@ static int tps6105x_probe(struct i2c_client *client,
int ret;
pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_err(&client->dev, "missing platform data\n");
- return -ENODEV;
+ if (!pdata)
+ pdata = tps6105x_parse_dt(&client->dev);
+ if (IS_ERR(pdata)) {
+ dev_err(&client->dev, "No platform data or DT found");
+ return PTR_ERR(pdata);
}
tps6105x = devm_kmalloc(&client->dev, sizeof(*tps6105x), GFP_KERNEL);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c55b63750757..7f0d48f406e3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -8,7 +8,6 @@ menu "Misc devices"
config SENSORS_LIS3LV02D
tristate
depends on INPUT
- select INPUT_POLLDEV
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
@@ -326,14 +325,14 @@ config SENSORS_TSL2550
will be called tsl2550.
config SENSORS_BH1770
- tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
- depends on I2C
- ---help---
- Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
SFH7770 (Osram) combined ambient light and proximity sensor chip.
- To compile this driver as a module, choose M here: the
- module will be called bh1770glc. If unsure, say N here.
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
config SENSORS_APDS990X
tristate "APDS990X combined als and proximity sensors"
@@ -438,8 +437,8 @@ config PCI_ENDPOINT_TEST
select CRC32
tristate "PCI Endpoint Test driver"
---help---
- Enable this configuration option to enable the host side test driver
- for PCI Endpoint.
+ Enable this configuration option to enable the host side test driver
+ for PCI Endpoint.
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 08b5b639d77f..7de7840f613c 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -109,7 +109,6 @@ static int __init tc_probe(struct platform_device *pdev)
struct atmel_tc *tc;
struct clk *clk;
int irq;
- struct resource *r;
unsigned int i;
if (of_get_child_count(pdev->dev.of_node))
@@ -133,8 +132,7 @@ static int __init tc_probe(struct platform_device *pdev)
if (IS_ERR(tc->slow_clk))
return PTR_ERR(tc->slow_clk);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tc->regs = devm_ioremap_resource(&pdev->dev, r);
+ tc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tc->regs))
return PTR_ERR(tc->regs);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index d9bff5a2217e..1f56267ed2f4 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 40a6d199f2ea..4214f02a17fd 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -191,7 +191,6 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
{
- int err = 0;
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
@@ -231,7 +230,7 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
- return err;
+ return 0;
}
static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
new file mode 100644
index 000000000000..32dcec2e9dfd
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5261.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5261_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0x3A, 0x3A, 0x3A},
+ {0xE6, 0xE6, 0xE6},
+ {0xB3, 0xB3, 0xB3},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+ /* 0x814~0x817 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ if (!rts5261_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rts5261_reg_to_card_drive_sel(reg);
+
+ if (rts5261_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ /* 0x724~0x727 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ pcr->aspm_en = rts5261_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
+}
+
+static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5261_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5261_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5261_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5261_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5261_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5261_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG1,
+ RTS5261_LDO1_TUNE_MASK, RTS5261_LDO1_33);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO1_POWERON, RTS5261_LDO1_POWERON);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO3318_POWERON, RTS5261_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5261_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5261_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5261_CARD_PWR_CTL,
+ RTS5261_PUPDC, RTS5261_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5261_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5261_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5261_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5261_stop_cmd(pcr);
+ rts5261_switch_output_voltage(pcr, OUTPUT_3V3);
+
+}
+
+static void rts5261_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+static void rts5261_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+
+}
+
+static int rts5261_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5261_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO_POWERON_MASK, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5261_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_LMT_THD_MASK,
+ RTS5261_LDO1_LMT_THD_2000);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5261_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5261_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5261_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5261_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rts5261_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+{
+ int retval;
+ u32 lval, i;
+ u8 valid, efuse_valid, tmp;
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+ REG_EFUSE_POR | REG_EFUSE_POWERON);
+ udelay(1);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_ADDR,
+ RTS5261_EFUSE_ADDR_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_CTL,
+ RTS5261_EFUSE_ENABLE | RTS5261_EFUSE_MODE_MASK,
+ RTS5261_EFUSE_ENABLE);
+
+ /* Wait transfer end */
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_READ_DATA, &tmp);
+ efuse_valid = ((tmp & 0x0C) >> 2);
+ pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+ if (efuse_valid == 0) {
+ retval = rtsx_pci_read_config_dword(pcr,
+ PCR_SETTING_REG2, &lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "read 0x814 DW fail\n");
+ pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
+ /* 0x816 */
+ valid = (u8)((lval >> 16) & 0x03);
+ pcr_dbg(pcr, "0x816: %d\n", valid);
+ }
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR, 0);
+ pcr_dbg(pcr, "Disable efuse por!\n");
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ lval = lval & 0x00FFFFFF;
+ retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "write config fail\n");
+
+ return retval;
+}
+
+static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5261_init_from_cfg(pcr);
+ rts5261_init_from_hw(pcr);
+
+ /* power off efuse */
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_AUX_CLK_16M_EN, 0);
+
+ /* Release PRSNT# */
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_FORCE_PRSNT_LOW, 0);
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5261_fill_driving(pcr, OUTPUT_3V3);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ /* Clear Enter RTD3_cold Information*/
+ rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
+ RTS5261_INFORM_RTD3_COLD, 0);
+
+ return 0;
+}
+
+static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = FORCE_ASPM_CTL0;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ }
+ pcr->aspm_enabled = enable;
+
+}
+
+static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ udelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5261_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5261_enable_aspm(pcr, true);
+ else
+ rts5261_disable_aspm(pcr, false);
+}
+
+static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5261_pcr_ops = {
+ .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
+ .turn_on_led = rts5261_turn_on_led,
+ .turn_off_led = rts5261_turn_off_led,
+ .extra_init_hw = rts5261_extra_init_hw,
+ .enable_auto_blink = rts5261_enable_auto_blink,
+ .disable_auto_blink = rts5261_disable_auto_blink,
+ .card_power_on = rts5261_card_power_on,
+ .card_power_off = rts5261_card_power_off,
+ .switch_output_voltage = rts5261_switch_output_voltage,
+ .force_power_down = rts5261_force_power_down,
+ .stop_cmd = rts5261_stop_cmd,
+ .set_aspm = rts5261_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5261_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5261_enable_ocp,
+ .disable_ocp = rts5261_disable_ocp,
+ .init_ocp = rts5261_init_ocp,
+ .process_ocp = rts5261_process_ocp,
+ .clear_ocpstat = rts5261_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u8 n, clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5261_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5261_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = (u8)(clk - 4);
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2);
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5261_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5261_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5261_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5261_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5261_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5261_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5261_LDO1_OCP_THD_1040;
+}
diff --git a/drivers/misc/cardreader/rts5261.h b/drivers/misc/cardreader/rts5261.h
new file mode 100644
index 000000000000..ebfdd236a553
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5261_H
+#define RTS5261_H
+
+/*New add*/
+#define rts5261_vendor_setting_valid(reg) ((reg) & 0x010000)
+#define rts5261_reg_to_aspm(reg) (((reg) >> 28) ^ 0x03)
+#define rts5261_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5261_reg_to_card_drive_sel(reg) ((((reg) >> 6) & 0x01) << 6)
+#define rts5261_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) ^ 0x03)
+#define rts5261_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) ^ 0x03)
+
+
+#define RTS5261_AUTOLOAD_CFG0 0xFF7B
+#define RTS5261_AUTOLOAD_CFG1 0xFF7C
+#define RTS5261_AUTOLOAD_CFG2 0xFF7D
+#define RTS5261_AUTOLOAD_CFG3 0xFF7E
+#define RTS5261_AUTOLOAD_CFG4 0xFF7F
+#define RTS5261_FORCE_PRSNT_LOW (1 << 6)
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
+
+#define RTS5261_REG_VREF 0xFE97
+#define RTS5261_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5261_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5261_SSC_DEPTH_MASK 0x07
+#define RTS5261_SSC_DEPTH_DISALBE 0x00
+#define RTS5261_SSC_DEPTH_8M 0x01
+#define RTS5261_SSC_DEPTH_4M 0x02
+#define RTS5261_SSC_DEPTH_2M 0x03
+#define RTS5261_SSC_DEPTH_1M 0x04
+#define RTS5261_SSC_DEPTH_512K 0x05
+#define RTS5261_SSC_DEPTH_256K 0x06
+#define RTS5261_SSC_DEPTH_128K 0x07
+
+/* efuse control register*/
+#define RTS5261_EFUSE_CTL 0xFC30
+#define RTS5261_EFUSE_ENABLE 0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5261_EFUSE_MODE_MASK 0x40
+#define RTS5261_EFUSE_PROGRAM 0x40
+
+#define RTS5261_EFUSE_ADDR 0xFC31
+#define RTS5261_EFUSE_ADDR_MASK 0x3F
+
+#define RTS5261_EFUSE_WRITE_DATA 0xFC32
+#define RTS5261_EFUSE_READ_DATA 0xFC34
+
+/* DMACTL 0xFE2C */
+#define RTS5261_DMA_PACK_SIZE_MASK 0xF0
+
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01<<0)
+#define RTS5261_FW_EA_MODE_MASK (0x01<<5)
+
+/* FW config register */
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01<<0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01<<7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01<<6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01<<5)
+/*MCU_bus_mode_sel: 0=real 8051 1=fake mcu*/
+#define RTS5261_MCU_BUS_SEL_MASK (0x01<<4)
+/*MCU_clock_sel:VerA 00=aux16M 01=aux400K 1x=REFCLK100M*/
+/*MCU_clock_sel:VerB 00=aux400K 01=aux16M 10=REFCLK100M*/
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03<<2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01<<2)
+#define RTS5261_MCU_CLOCK_GATING (0x01<<1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01<<0)
+
+/* FW status register */
+#define RTS5261_FW_STATUS 0xFF56
+#define RTS5261_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5261_FW_CTL 0xFF5F
+#define RTS5261_INFORM_RTD3_COLD (0x01<<5)
+
+#define RTS5261_REG_FPDCTL 0xFF60
+
+#define RTS5261_REG_LDO12_CFG 0xFF6E
+#define RTS5261_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO12_115 (0x03<<1)
+#define RTS5261_LDO12_120 (0x04<<1)
+#define RTS5261_LDO12_125 (0x05<<1)
+#define RTS5261_LDO12_130 (0x06<<1)
+#define RTS5261_LDO12_135 (0x07<<1)
+
+/* LDO control register */
+#define RTS5261_CARD_PWR_CTL 0xFD50
+#define RTS5261_SD_CLK_ISO (0x01<<7)
+#define RTS5261_PAD_SD_DAT_FW_CTRL (0x01<<6)
+#define RTS5261_PUPDC (0x01<<5)
+#define RTS5261_SD_CMD_ISO (0x01<<4)
+#define RTS5261_SD_DAT_ISO_MASK (0x0F<<0)
+
+#define RTS5261_LDO1233318_POW_CTL 0xFF70
+#define RTS5261_LDO3318_POWERON (0x01<<3)
+#define RTS5261_LDO3_POWERON (0x01<<2)
+#define RTS5261_LDO2_POWERON (0x01<<1)
+#define RTS5261_LDO1_POWERON (0x01<<0)
+#define RTS5261_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5261_DV3318_CFG 0xFF71
+#define RTS5261_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5261_DV3318_18 (0x02<<4)
+#define RTS5261_DV3318_19 (0x04<<4)
+#define RTS5261_DV3318_33 (0x07<<4)
+
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO1_OCP_EN (0x01<<4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01<<1)
+
+/* CRD6603-433 190319 request changed */
+#define RTS5261_LDO1_OCP_THD_740 (0x00<<5)
+#define RTS5261_LDO1_OCP_THD_800 (0x01<<5)
+#define RTS5261_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5261_LDO1_OCP_THD_920 (0x03<<5)
+#define RTS5261_LDO1_OCP_THD_980 (0x04<<5)
+#define RTS5261_LDO1_OCP_THD_1040 (0x05<<5)
+#define RTS5261_LDO1_OCP_THD_1100 (0x06<<5)
+#define RTS5261_LDO1_OCP_THD_1160 (0x07<<5)
+
+#define RTS5261_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5261_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5261_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5261_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5261_LDO1_CFG1 0xFF73
+#define RTS5261_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO1_18 (0x05<<1)
+#define RTS5261_LDO1_33 (0x07<<1)
+#define RTS5261_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO2_CFG0 0xFF74
+#define RTS5261_LDO2_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO2_OCP_EN (0x01<<4)
+#define RTS5261_LDO2_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO2_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO2_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO2_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO2_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO2_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO2_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO2_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO2_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO2_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO2_CFG1 0xFF75
+#define RTS5261_LDO2_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO2_18 (0x05<<1)
+#define RTS5261_LDO2_33 (0x07<<1)
+#define RTS5261_LDO2_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO3_CFG0 0xFF76
+#define RTS5261_LDO3_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO3_OCP_EN (0x01<<4)
+#define RTS5261_LDO3_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO3_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO3_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO3_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO3_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO3_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO3_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO3_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO3_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO3_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO3_CFG1 0xFF77
+#define RTS5261_LDO3_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO3_18 (0x05<<1)
+#define RTS5261_LDO3_33 (0x07<<1)
+#define RTS5261_LDO3_PWD_MASK (0x01<<0)
+
+#define RTS5261_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+#define SD_EXPRESS_LUN 2
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5261_H */
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index b4a66b64f742..fd7b2167103d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -22,6 +22,7 @@
#include <asm/unaligned.h>
#include "rtsx_pcr.h"
+#include "rts5261.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -34,9 +35,6 @@ static struct mfd_cell rtsx_pcr_cells[] = {
[RTSX_SD_CARD] = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
},
- [RTSX_MS_CARD] = {
- .name = DRV_NAME_RTSX_PCI_MS,
- },
};
static const struct pci_device_id rtsx_pci_ids[] = {
@@ -51,6 +49,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -438,8 +437,16 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ if (PCI_PID(pcr) == PID_5261) {
+ if (len > 0xFFFF)
+ val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
+ | (((u64)len >> 16) << 6) | option;
+ else
+ val = ((u64)addr << 32) | ((u64)len << 16) | option;
+ } else {
+ val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ }
put_unaligned_le64(val, ptr);
pcr->sgi++;
}
@@ -684,7 +691,6 @@ int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
else
return -EINVAL;
-
return rtsx_pci_set_pull_ctl(pcr, tbl);
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
@@ -735,6 +741,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
};
+ if (PCI_PID(pcr) == PID_5261)
+ return rts5261_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
+
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
@@ -1253,7 +1263,15 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
- err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ if (PCI_PID(pcr) == PID_5261) {
+ /* Gating real mcu clock */
+ err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
+ RTS5261_MCU_CLOCK_GATING, 0);
+ err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, 0);
+ } else {
+ err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ }
if (err < 0)
return err;
@@ -1283,7 +1301,12 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
/* Enable SSC Clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
0xFF, SSC_8X_EN | SSC_SEL_4M);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+ if (PCI_PID(pcr) == PID_5261)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5261_SSC_DEPTH_2M);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+
/* Disable cd_pwr_save */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
/* Clear Link Ready Interrupt */
@@ -1314,6 +1337,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_524A:
case PID_525A:
case PID_5260:
+ case PID_5261:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1393,9 +1417,14 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+
case 0x5260:
rts5260_init_params(pcr);
break;
+
+ case 0x5261:
+ rts5261_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 98f729263dc1..ed391df52f4f 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
+void rts5261_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2cfe3d4ae144..226b5efa6a77 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -175,6 +175,10 @@ static int eeprom_probe(struct i2c_client *client,
}
}
+ /* Let the users know they are using deprecated driver */
+ dev_notice(&client->dev,
+ "eeprom driver is deprecated, please use at24 instead\n");
+
/* create the sysfs eeprom file */
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 1b1a794d639d..ae4ee27a63c4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -32,8 +32,9 @@
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
-#define INIT_FILELEN_MAX (64 * 1024 * 1024)
+#define INIT_FILELEN_MAX (2 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
+#define ADSP_MMAP_ADD_PAGES 0x1000
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
@@ -66,6 +67,8 @@
/* Remote Method id table */
#define FASTRPC_RMID_INIT_ATTACH 0
#define FASTRPC_RMID_INIT_RELEASE 1
+#define FASTRPC_RMID_INIT_MMAP 4
+#define FASTRPC_RMID_INIT_MUNMAP 5
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
@@ -89,6 +92,23 @@ struct fastrpc_remote_arg {
u64 len;
};
+struct fastrpc_mmap_rsp_msg {
+ u64 vaddr;
+};
+
+struct fastrpc_mmap_req_msg {
+ s32 pgid;
+ u32 flags;
+ u64 vaddr;
+ s32 num;
+};
+
+struct fastrpc_munmap_req_msg {
+ s32 pgid;
+ u64 vaddr;
+ u64 size;
+};
+
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
@@ -123,6 +143,9 @@ struct fastrpc_buf {
/* Lock for dma buf attachments */
struct mutex lock;
struct list_head attachments;
+ /* mmap support */
+ struct list_head node; /* list of user requested mmaps */
+ uintptr_t raddr;
};
struct fastrpc_dma_buf_attachment {
@@ -192,6 +215,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
@@ -269,6 +293,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&buf->attachments);
+ INIT_LIST_HEAD(&buf->node);
mutex_init(&buf->lock);
buf->fl = fl;
@@ -276,6 +301,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
buf->phys = 0;
buf->size = size;
buf->dev = dev;
+ buf->raddr = 0;
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
@@ -934,8 +960,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
- /* Wait for remote dsp to respond or time out */
- err = wait_for_completion_interruptible(&ctx->work);
+ if (kernel) {
+ if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
+ err = -ETIMEDOUT;
+ } else {
+ err = wait_for_completion_interruptible(&ctx->work);
+ }
+
if (err)
goto bail;
@@ -954,12 +985,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
bail:
- /* We are done with this compute context, remove it from pending list */
- spin_lock(&fl->lock);
- list_del(&ctx->node);
- spin_unlock(&fl->lock);
- fastrpc_context_put(ctx);
-
+ if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
+ /* We are done with this compute context */
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
if (err)
dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
@@ -1131,6 +1163,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
+ struct fastrpc_buf *buf, *b;
unsigned long flags;
fastrpc_release_current_dsp_process(fl);
@@ -1152,6 +1185,11 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_map_put(map);
}
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ list_del(&buf->node);
+ fastrpc_buf_free(buf);
+ }
+
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
@@ -1180,6 +1218,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
INIT_LIST_HEAD(&fl->maps);
+ INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
fl->tgid = current->tgid;
fl->cctx = cctx;
@@ -1285,6 +1324,148 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
+ struct fastrpc_req_munmap *req)
+{
+ struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
+ struct fastrpc_buf *buf, *b;
+ struct fastrpc_munmap_req_msg req_msg;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ break;
+ buf = NULL;
+ }
+ spin_unlock(&fl->lock);
+
+ if (!buf) {
+ dev_err(dev, "mmap not in list\n");
+ return -EINVAL;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.size = buf->size;
+ req_msg.vaddr = buf->raddr;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (!err) {
+ dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
+ spin_lock(&fl->lock);
+ list_del(&buf->node);
+ spin_unlock(&fl->lock);
+ fastrpc_buf_free(buf);
+ } else {
+ dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
+ }
+
+ return err;
+}
+
+static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_req_munmap req;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ return fastrpc_req_munmap_impl(fl, &req);
+}
+
+static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
+ struct fastrpc_buf *buf = NULL;
+ struct fastrpc_mmap_req_msg req_msg;
+ struct fastrpc_mmap_rsp_msg rsp_msg;
+ struct fastrpc_req_munmap req_unmap;
+ struct fastrpc_phy_page pages;
+ struct fastrpc_req_mmap req;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ if (req.flags != ADSP_MMAP_ADD_PAGES) {
+ dev_err(dev, "flag not supported 0x%x\n", req.flags);
+ return -EINVAL;
+ }
+
+ if (req.vaddrin) {
+ dev_err(dev, "adding user allocated pages is not supported\n");
+ return -EINVAL;
+ }
+
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
+ if (err) {
+ dev_err(dev, "failed to allocate buffer\n");
+ return err;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.flags = req.flags;
+ req_msg.vaddr = req.vaddrin;
+ req_msg.num = sizeof(pages);
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ pages.addr = buf->phys;
+ pages.size = buf->size;
+
+ args[1].ptr = (u64) (uintptr_t) &pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64) (uintptr_t) &rsp_msg;
+ args[2].length = sizeof(rsp_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (err) {
+ dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
+ goto err_invoke;
+ }
+
+ /* update the buffer to be able to deallocate the memory on the DSP */
+ buf->raddr = (uintptr_t) rsp_msg.vaddr;
+
+ /* let the client know the address to use */
+ req.vaddrout = rsp_msg.vaddr;
+
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(&fl->lock);
+
+ if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ /* unmap the memory and release the buffer */
+ req_unmap.vaddrout = buf->raddr;
+ req_unmap.size = buf->size;
+ fastrpc_req_munmap_impl(fl, &req_unmap);
+ return -EFAULT;
+ }
+
+ dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
+ buf->raddr, buf->size);
+
+ return 0;
+
+err_invoke:
+ fastrpc_buf_free(buf);
+
+ return err;
+}
+
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1305,6 +1486,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
+ case FASTRPC_IOCTL_MMAP:
+ err = fastrpc_req_mmap(fl, argp);
+ break;
+ case FASTRPC_IOCTL_MUNMAP:
+ err = fastrpc_req_munmap(fl, argp);
+ break;
default:
err = -ENOTTY;
break;
@@ -1430,8 +1617,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
data->miscdev.minor = MISC_DYNAMIC_MINOR;
- data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
- domains[domain_id]);
+ data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
+ domains[domain_id]);
data->miscdev.fops = &fastrpc_fops;
err = misc_register(&data->miscdev);
if (err)
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index a9ac045dcfde..8850f475a413 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -65,6 +65,18 @@ static void cs_put(struct hl_cs *cs)
kref_put(&cs->refcount, cs_do_release);
}
+static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ /*
+ * Patched CB is created for external queues jobs, and for H/W queues
+ * jobs if the user CB was allocated by driver and MMU is disabled.
+ */
+ return (job->queue_type == QUEUE_TYPE_EXT ||
+ (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb &&
+ !hdev->mmu_enable));
+}
+
/*
* cs_parser - parse the user command submission
*
@@ -91,11 +103,13 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.patched_cb = NULL;
parser.user_cb = job->user_cb;
parser.user_cb_size = job->user_cb_size;
- parser.ext_queue = job->ext_queue;
+ parser.queue_type = job->queue_type;
+ parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
- if (job->ext_queue) {
+
+ if (is_cb_patched(hdev, job)) {
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
@@ -124,7 +138,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job)) {
hl_userptr_delete_list(hdev, &job->userptr_list);
/*
@@ -140,6 +154,19 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
}
}
+ /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
+ * enabled, the user CB isn't released in cs_parser() and thus should be
+ * released here.
+ */
+ if (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb && hdev->mmu_enable) {
+ spin_lock(&job->user_cb->lock);
+ job->user_cb->cs_cnt--;
+ spin_unlock(&job->user_cb->lock);
+
+ hl_cb_put(job->user_cb);
+ }
+
/*
* This is the only place where there can be multiple threads
* modifying the list at the same time
@@ -150,7 +177,8 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
hl_debugfs_remove_job(hdev, job);
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_put(cs);
kfree(job);
@@ -387,18 +415,13 @@ static void job_wq_completion(struct work_struct *work)
free_job(hdev, job);
}
-static struct hl_cb *validate_queue_index(struct hl_device *hdev,
- struct hl_cb_mgr *cb_mgr,
- struct hl_cs_chunk *chunk,
- bool *ext_queue)
+static int validate_queue_index(struct hl_device *hdev,
+ struct hl_cs_chunk *chunk,
+ enum hl_queue_type *queue_type,
+ bool *is_kernel_allocated_cb)
{
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
- u32 cb_handle;
- struct hl_cb *cb;
-
- /* Assume external queue */
- *ext_queue = true;
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
@@ -406,20 +429,29 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
- return NULL;
+ return -EINVAL;
}
if (hw_queue_prop->driver_only) {
dev_err(hdev->dev,
"Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
- return NULL;
- } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
- *ext_queue = false;
- return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
+ return -EINVAL;
}
- /* Retrieve CB object */
+ *queue_type = hw_queue_prop->type;
+ *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
+
+ return 0;
+}
+
+static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
+ struct hl_cb_mgr *cb_mgr,
+ struct hl_cs_chunk *chunk)
+{
+ struct hl_cb *cb;
+ u32 cb_handle;
+
cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
cb = hl_cb_get(hdev, cb_mgr, cb_handle);
@@ -444,7 +476,8 @@ release_cb:
return NULL;
}
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
{
struct hl_cs_job *job;
@@ -452,12 +485,14 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
if (!job)
return NULL;
- job->ext_queue = ext_queue;
+ job->queue_type = queue_type;
+ job->is_kernel_allocated_cb = is_kernel_allocated_cb;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job))
INIT_LIST_HEAD(&job->userptr_list);
+
+ if (job->queue_type == QUEUE_TYPE_EXT)
INIT_WORK(&job->finish_work, job_wq_completion);
- }
return job;
}
@@ -470,7 +505,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
- bool ext_queue_present = false;
+ bool int_queues_only = true;
u32 size_to_copy;
int rc, i, parse_cnt;
@@ -514,23 +549,33 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
- bool ext_queue;
+ enum hl_queue_type queue_type;
+ bool is_kernel_allocated_cb;
+
+ rc = validate_queue_index(hdev, chunk, &queue_type,
+ &is_kernel_allocated_cb);
+ if (rc)
+ goto free_cs_object;
- cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
- &ext_queue);
- if (ext_queue) {
- ext_queue_present = true;
+ if (is_kernel_allocated_cb) {
+ cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
rc = -EINVAL;
goto free_cs_object;
}
+ } else {
+ cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- job = hl_cs_allocate_job(hdev, ext_queue);
+ if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ int_queues_only = false;
+
+ job = hl_cs_allocate_job(hdev, queue_type,
+ is_kernel_allocated_cb);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
- if (ext_queue)
+ if (is_kernel_allocated_cb)
goto release_cb;
else
goto free_cs_object;
@@ -540,7 +585,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
- if (job->ext_queue)
+ if (is_kernel_allocated_cb)
job->job_cb_size = cb->size;
else
job->job_cb_size = chunk->cb_size;
@@ -553,10 +598,11 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/*
* Increment CS reference. When CS reference is 0, CS is
* done and can be signaled to user and free all its resources
- * Only increment for JOB on external queues, because only
- * for those JOBs we get completion
+ * Only increment for JOB on external or H/W queues, because
+ * only for those JOBs we get completion
*/
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_get(cs);
hl_debugfs_add_job(hdev, job);
@@ -570,9 +616,9 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
}
}
- if (!ext_queue_present) {
+ if (int_queues_only) {
dev_err(hdev->dev,
- "Reject CS %d.%llu because no external queues jobs\n",
+ "Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
@@ -580,9 +626,10 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu to H/W queues, error %d\n",
- cs->ctx->asid, cs->sequence, rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ cs->ctx->asid, cs->sequence, rc);
goto free_cs_object;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 87f37ac31ccd..20413e350343 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -307,45 +307,57 @@ static inline u64 get_hop0_addr(struct hl_ctx *ctx)
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
{
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
+ ((virt_addr & mask) >> shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
+ mmu_specs->hop0_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
+ mmu_specs->hop1_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
+ mmu_specs->hop2_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
+ mmu_specs->hop3_shift);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
+ mmu_specs->hop4_shift);
}
static inline u64 get_next_hop_addr(u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -355,7 +367,10 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
struct hl_ctx *ctx;
+ bool is_dram_addr;
u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
@@ -377,33 +392,39 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* the following lookup is copied from unmap() in mmu.c */
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
hop1_addr = get_next_hop_addr(hop0_pte);
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
hop2_addr = get_next_hop_addr(hop1_pte);
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
hop3_addr = get_next_hop_addr(hop2_pte);
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
if (!(hop3_pte & LAST_MASK)) {
@@ -412,7 +433,8 @@ static int mmu_show(struct seq_file *s, void *data)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
if (!(hop4_pte & PAGE_PRESENT_MASK))
goto not_mapped;
@@ -506,6 +528,12 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't check device idle during reset\n");
+ return 0;
+ }
+
hdev->asic_funcs->is_device_idle(hdev, NULL, s);
return 0;
@@ -534,41 +562,50 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
struct hl_ctx *ctx = hdev->compute_ctx;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop_addr, hop_pte_addr, hop_pte;
- u64 offset_mask = HOP4_MASK | OFFSET_MASK;
+ u64 offset_mask = HOP4_MASK | FLAGS_MASK;
int rc = 0;
+ bool is_dram_addr;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* hop 0 */
hop_addr = get_hop0_addr(ctx);
- hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 1 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 2 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 3 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
if (!(hop_pte & LAST_MASK)) {
@@ -576,10 +613,11 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
+ virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
- offset_mask = OFFSET_MASK;
+ offset_mask = FLAGS_MASK;
}
if (!(hop_pte & PAGE_PRESENT_MASK))
@@ -608,6 +646,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
u32 val;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+ return 0;
+ }
+
if (*ppos)
return 0;
@@ -637,6 +680,11 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+ return 0;
+ }
+
rc = kstrtouint_from_user(buf, count, 16, &value);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 459fee70a597..b155e9549076 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,12 +42,10 @@ static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
struct hl_device *hdev;
- struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
- ctx = hpriv->ctx;
put_pid(hpriv->taskpid);
@@ -889,13 +887,19 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
- /* Kill processes here after CS rollback. This is because the process
- * can't really exit until all its CSs are done, which is what we
- * do in cs rollback
- */
- if (from_hard_reset_thread)
+ if (hard_reset) {
+ /* Kill processes here after CS rollback. This is because the
+ * process can't really exit until all its CSs are done, which
+ * is what we do in cs rollback
+ */
device_kill_open_processes(hdev);
+ /* Flush the Event queue workers to make sure no other thread is
+ * reading or writing to registers during the reset
+ */
+ flush_workqueue(hdev->eq_wq);
+ }
+
/* Release kernel context */
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index ea2ca67fbfbf..f5bd03171dac 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -143,10 +143,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
if (!rc) {
- if (result == ARMCP_PACKET_FENCE_VAL)
- dev_info(hdev->dev,
- "queue test on CPU queue succeeded\n");
- else
+ if (result != ARMCP_PACKET_FENCE_VAL)
dev_err(hdev->dev,
"CPU queue test failed (0x%08lX)\n", result);
} else {
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 6fba14b81f90..c8d16aa4382c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -72,6 +72,9 @@
*
*/
+#define GOYA_UBOOT_FW_FILE "habanalabs/goya/goya-u-boot.bin"
+#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
+
#define GOYA_MMU_REGS_NUM 63
#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
@@ -337,17 +340,20 @@ void goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 1;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < HL_MAX_QUEUES; i++)
@@ -377,6 +383,23 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->dmmu.hop0_shift = HOP0_SHIFT;
+ prop->dmmu.hop1_shift = HOP1_SHIFT;
+ prop->dmmu.hop2_shift = HOP2_SHIFT;
+ prop->dmmu.hop3_shift = HOP3_SHIFT;
+ prop->dmmu.hop4_shift = HOP4_SHIFT;
+ prop->dmmu.hop0_mask = HOP0_MASK;
+ prop->dmmu.hop1_mask = HOP1_MASK;
+ prop->dmmu.hop2_mask = HOP2_MASK;
+ prop->dmmu.hop3_mask = HOP3_MASK;
+ prop->dmmu.hop4_mask = HOP4_MASK;
+ prop->dmmu.huge_page_size = PAGE_SIZE_2MB;
+
+ /* No difference between PMMU and DMMU except of page size */
+ memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
prop->va_space_host_start_address = VA_HOST_SPACE_START;
prop->va_space_host_end_address = VA_HOST_SPACE_END;
prop->va_space_dram_start_address = VA_DDR_SPACE_START;
@@ -393,6 +416,9 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
}
/*
@@ -1454,6 +1480,9 @@ static void goya_init_golden_registers(struct hl_device *hdev)
1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
+ ICACHE_FETCH_LINE_NUM, 2);
}
WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
@@ -1533,7 +1562,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
- u64 qman_base_addr;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
@@ -1545,9 +1573,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
- qman_base_addr = hdev->asic_prop.sram_base_address +
- MME_QMAN_BASE_OFFSET;
-
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
@@ -2141,13 +2166,11 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
*/
static int goya_push_uboot_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_UBOOT_FW_FILE, dst);
}
/*
@@ -2160,13 +2183,11 @@ static int goya_push_uboot_to_device(struct hl_device *hdev)
*/
static int goya_push_linux_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
}
static int goya_pldm_init_cpu(struct hl_device *hdev)
@@ -2291,6 +2312,10 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
10000,
cpu_timeout);
+ /* Read U-Boot version now in case we will later fail */
+ goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
+ goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
if (rc) {
dev_err(hdev->dev, "Error in ARM u-boot!");
switch (status) {
@@ -2328,6 +2353,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
"ARM status %d - u-boot stopped by user\n",
status);
break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "ARM status %d - Thermal Sensor initialization failed\n",
+ status);
+ break;
default:
dev_err(hdev->dev,
"ARM status %d - Invalid status code\n",
@@ -2337,10 +2367,6 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
- /* Read U-Boot version now in case we will later fail */
- goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
- goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
-
if (!hdev->fw_loading) {
dev_info(hdev->dev, "Skip loading FW\n");
goto out;
@@ -2453,7 +2479,8 @@ int goya_mmu_init(struct hl_device *hdev)
WREG32_AND(mmSTLB_STLB_FEATURE_EN,
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
@@ -2978,9 +3005,6 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EIO;
- } else {
- dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
- hw_queue_id);
}
free_pkt:
@@ -3925,7 +3949,7 @@ static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
return 0;
dev_err(hdev->dev,
- "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
@@ -3935,7 +3959,7 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct goya_device *goya = hdev->asic_specific;
- if (!parser->ext_queue)
+ if (parser->queue_type == QUEUE_TYPE_INT)
return goya_parse_cb_no_ext_queue(hdev, parser);
if (goya->hw_cap_initialized & HW_CAP_MMU)
@@ -4606,7 +4630,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkt++;
} while (--lin_dma_pkts_cnt);
- job = hl_cs_allocate_job(hdev, true);
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -4835,13 +4859,15 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
+static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+ u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -4880,7 +4906,8 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
u32 status, timeout_usec, inv_data, pi;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -5137,7 +5164,8 @@ static const struct hl_asic_funcs goya_funcs = {
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
- .halt_coresight = goya_halt_coresight
+ .halt_coresight = goya_halt_coresight,
+ .get_clk_rate = goya_get_clk_rate
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 89b6574f8e4f..c3230cb6e25c 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -233,4 +233,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index b4d406af1bed..c1ee6e2b5dff 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -8,6 +8,7 @@
#include "goyaP.h"
#include "include/goya/goya_coresight.h"
#include "include/goya/asic_reg/goya_regs.h"
+#include "include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -377,33 +378,32 @@ static int goya_config_etr(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
- u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
u32 val;
int rc;
- WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+ WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
- val = RREG32(base_reg + 0x304);
+ val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
- rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- WREG32(base_reg + 0x20, 0);
+ WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
input = params->input;
@@ -423,25 +423,26 @@ static int goya_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- WREG32(base_reg + 0x34, 0x3FFC);
- WREG32(base_reg + 0x4, input->buffer_size);
- WREG32(base_reg + 0x28, input->sink_mode);
- WREG32(base_reg + 0x110, 0x700);
- WREG32(base_reg + 0x118,
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ WREG32(mmPSOC_ETR_AXICTL,
+ 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
- WREG32(base_reg + 0x11C,
+ WREG32(mmPSOC_ETR_DBAHI,
upper_32_bits(input->buffer_address));
- WREG32(base_reg + 0x304, 3);
- WREG32(base_reg + 0x308, 0xA);
- WREG32(base_reg + 0x20, 1);
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0xA);
+ WREG32(mmPSOC_ETR_CTL, 1);
} else {
- WREG32(base_reg + 0x34, 0);
- WREG32(base_reg + 0x4, 0x400);
- WREG32(base_reg + 0x118, 0);
- WREG32(base_reg + 0x11C, 0);
- WREG32(base_reg + 0x308, 0);
- WREG32(base_reg + 0x28, 0);
- WREG32(base_reg + 0x304, 0);
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
@@ -451,8 +452,8 @@ static int goya_config_etr(struct hl_device *hdev,
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
*/
- rwp = RREG32(base_reg + 0x18);
- rwphi = RREG32(base_reg + 0x3c) & 0xff;
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
*(u64 *) params->output = ((u64) rwphi << 32) | rwp;
}
}
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index a2a700c3d597..b2ebc01e27f4 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -32,6 +32,37 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
}
}
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+{
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0) {
+ dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
+ value);
+ return value;
+ }
+
+ *max_clk = (value / 1000 / 1000);
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0) {
+ dev_err(hdev->dev,
+ "Failed to retrieve device current clock %ld\n",
+ value);
+ return value;
+ }
+
+ *cur_clk = (value / 1000 / 1000);
+
+ return 0;
+}
+
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 75862be53c60..00c949f4ccd1 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -40,8 +40,6 @@
#define HL_MAX_QUEUES 128
-#define HL_MAX_JOBS_PER_CS 64
-
/* MUST BE POWER OF 2 and larger than 1 */
#define HL_MAX_PENDING_CS 64
@@ -85,12 +83,15 @@ struct hl_fpriv;
* @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
* memories and/or operates the compute engines.
* @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
+ * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
+ * notifications are sent by H/W.
*/
enum hl_queue_type {
QUEUE_TYPE_NA,
QUEUE_TYPE_EXT,
QUEUE_TYPE_INT,
- QUEUE_TYPE_CPU
+ QUEUE_TYPE_CPU,
+ QUEUE_TYPE_HW
};
/**
@@ -98,10 +99,13 @@ enum hl_queue_type {
* @type: queue type.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
+ * @requires_kernel_cb: true if a CB handle must be provided for jobs on this
+ * queue, false otherwise (a CB address must be provided).
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
+ u8 requires_kernel_cb;
};
/**
@@ -110,8 +114,8 @@ struct hw_queue_properties {
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
enum vm_type_t {
- VM_TYPE_USERPTR,
- VM_TYPE_PHYS_PACK
+ VM_TYPE_USERPTR = 0x1,
+ VM_TYPE_PHYS_PACK = 0x2
};
/**
@@ -127,12 +131,44 @@ enum hl_device_hw_state {
};
/**
+ * struct hl_mmu_properties - ASIC specific MMU address translation properties.
+ * @hop0_shift: shift of hop 0 mask.
+ * @hop1_shift: shift of hop 1 mask.
+ * @hop2_shift: shift of hop 2 mask.
+ * @hop3_shift: shift of hop 3 mask.
+ * @hop4_shift: shift of hop 4 mask.
+ * @hop0_mask: mask to get the PTE address in hop 0.
+ * @hop1_mask: mask to get the PTE address in hop 1.
+ * @hop2_mask: mask to get the PTE address in hop 2.
+ * @hop3_mask: mask to get the PTE address in hop 3.
+ * @hop4_mask: mask to get the PTE address in hop 4.
+ * @page_size: default page size used to allocate memory.
+ * @huge_page_size: page size used to allocate memory with huge pages.
+ */
+struct hl_mmu_properties {
+ u64 hop0_shift;
+ u64 hop1_shift;
+ u64 hop2_shift;
+ u64 hop3_shift;
+ u64 hop4_shift;
+ u64 hop0_mask;
+ u64 hop1_mask;
+ u64 hop2_mask;
+ u64 hop3_mask;
+ u64 hop4_mask;
+ u32 page_size;
+ u32 huge_page_size;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @armcp_info: received various information from ArmCP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
* @preboot_ver: F/W Preboot version.
+ * @dmmu: DRAM MMU address translation properties.
+ * @pmmu: PCI (host) MMU address translation properties.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -169,53 +205,55 @@ enum hl_device_hw_state {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
- * @completion_queues_count: number of completion queues.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @tpc_enabled_mask: which TPCs are enabled.
+ * @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
- struct armcp_info armcp_info;
- char uboot_ver[VERSION_MAX_LEN];
- char preboot_ver[VERSION_MAX_LEN];
- u64 sram_base_address;
- u64 sram_end_address;
- u64 sram_user_base_address;
- u64 dram_base_address;
- u64 dram_end_address;
- u64 dram_user_base_address;
- u64 dram_size;
- u64 dram_pci_bar_size;
- u64 max_power_default;
- u64 va_space_host_start_address;
- u64 va_space_host_end_address;
- u64 va_space_dram_start_address;
- u64 va_space_dram_end_address;
- u64 dram_size_for_default_page_mapping;
- u64 pcie_dbi_base_address;
- u64 pcie_aux_dbi_reg_addr;
- u64 mmu_pgt_addr;
- u64 mmu_dram_default_page_addr;
- u32 mmu_pgt_size;
- u32 mmu_pte_size;
- u32 mmu_hop_table_size;
- u32 mmu_hop0_tables_total_size;
- u32 dram_page_size;
- u32 cfg_size;
- u32 sram_size;
- u32 max_asid;
- u32 num_of_events;
- u32 psoc_pci_pll_nr;
- u32 psoc_pci_pll_nf;
- u32 psoc_pci_pll_od;
- u32 psoc_pci_pll_div_factor;
- u32 high_pll;
- u32 cb_pool_cb_cnt;
- u32 cb_pool_cb_size;
- u8 completion_queues_count;
- u8 tpc_enabled_mask;
+ struct armcp_info armcp_info;
+ char uboot_ver[VERSION_MAX_LEN];
+ char preboot_ver[VERSION_MAX_LEN];
+ struct hl_mmu_properties dmmu;
+ struct hl_mmu_properties pmmu;
+ u64 sram_base_address;
+ u64 sram_end_address;
+ u64 sram_user_base_address;
+ u64 dram_base_address;
+ u64 dram_end_address;
+ u64 dram_user_base_address;
+ u64 dram_size;
+ u64 dram_pci_bar_size;
+ u64 max_power_default;
+ u64 va_space_host_start_address;
+ u64 va_space_host_end_address;
+ u64 va_space_dram_start_address;
+ u64 va_space_dram_end_address;
+ u64 dram_size_for_default_page_mapping;
+ u64 pcie_dbi_base_address;
+ u64 pcie_aux_dbi_reg_addr;
+ u64 mmu_pgt_addr;
+ u64 mmu_dram_default_page_addr;
+ u32 mmu_pgt_size;
+ u32 mmu_pte_size;
+ u32 mmu_hop_table_size;
+ u32 mmu_hop0_tables_total_size;
+ u32 dram_page_size;
+ u32 cfg_size;
+ u32 sram_size;
+ u32 max_asid;
+ u32 num_of_events;
+ u32 psoc_pci_pll_nr;
+ u32 psoc_pci_pll_nf;
+ u32 psoc_pci_pll_od;
+ u32 psoc_pci_pll_div_factor;
+ u32 high_pll;
+ u32 cb_pool_cb_cnt;
+ u32 cb_pool_cb_size;
+ u8 tpc_enabled_mask;
+ u8 completion_queues_count;
};
/**
@@ -236,8 +274,6 @@ struct hl_dma_fence {
* Command Buffers
*/
-#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
-
/**
* struct hl_cb_mgr - describes a Command Buffer Manager.
* @cb_lock: protects cb_handles.
@@ -481,8 +517,8 @@ enum hl_pll_frequency {
* @get_events_stat: retrieve event queue entries histogram.
* @read_pte: read MMU page table entry from DRAM.
* @write_pte: write MMU page table entry to DRAM.
- * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
- * hard (L0 & L1) flush.
+ * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
+ * (L1 only) or hard (L0 & L1) flush.
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
@@ -502,6 +538,7 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -562,7 +599,8 @@ struct hl_asic_funcs {
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
+ void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ u32 flags);
void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
@@ -584,6 +622,7 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
};
@@ -688,7 +727,7 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
- * @addr: user-space virtual pointer to the start of the memory area.
+ * @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
@@ -752,11 +791,14 @@ struct hl_cs {
* @userptr_list: linked-list of userptr mappings that belong to this job and
* wait for completion.
* @debugfs_list: node in debugfs list of command submission jobs.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @id: the id of this job inside a CS.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
- * @ext_queue: whether the job is for external queue or internal queue.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_job {
struct list_head cs_node;
@@ -766,39 +808,44 @@ struct hl_cs_job {
struct work_struct finish_work;
struct list_head userptr_list;
struct list_head debugfs_list;
+ enum hl_queue_type queue_type;
u32 id;
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
- u8 ext_queue;
+ u8 is_kernel_allocated_cb;
};
/**
- * struct hl_cs_parser - command submission paerser properties.
+ * struct hl_cs_parser - command submission parser properties.
* @user_cb: the CB we got from the user.
* @patched_cb: in case of patching, this is internal CB which is submitted on
* the queue instead of the CB we got from the IOCTL.
* @job_userptr_list: linked-list of userptr mappings that belong to the related
* job and wait for completion.
* @cs_sequence: the sequence number of the related CS.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @ctx_id: the ID of the context the related CS belongs to.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @patched_cb_size: the size of the CB after parsing.
- * @ext_queue: whether the job is for external queue or internal queue.
* @job_id: the id of the related job inside the related CS.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
struct hl_cb *patched_cb;
struct list_head *job_userptr_list;
u64 cs_sequence;
+ enum hl_queue_type queue_type;
u32 ctx_id;
u32 hw_queue_id;
u32 user_cb_size;
u32 patched_cb_size;
- u8 ext_queue;
u8 job_id;
+ u8 is_kernel_allocated_cb;
};
@@ -1048,9 +1095,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
-#define WREG32_FIELD(reg, field, val) \
- WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
- (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
+ ~REG_FIELD_MASK(reg, field)) | \
+ (val) << REG_FIELD_SHIFT(reg, field))
/* Timeout should be longer when working with simulator but cap the
* increased timeout to some maximum
@@ -1501,7 +1549,8 @@ int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
void hl_cs_rollback_all(struct hl_device *hdev);
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void goya_set_asic_funcs(struct hl_device *hdev);
@@ -1513,7 +1562,7 @@ void hl_vm_fini(struct hl_device *hdev);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr);
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list);
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 66d9c710073c..6474b868ef27 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -60,11 +60,16 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
hw_ip.dram_size = prop->dram_size - dram_kmd_size;
- if (hw_ip.dram_size > 0)
+ if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.num_of_events = prop->num_of_events;
- memcpy(hw_ip.armcp_version,
- prop->armcp_info.armcp_version, VERSION_MAX_LEN);
+
+ memcpy(hw_ip.armcp_version, prop->armcp_info.armcp_version,
+ min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
+
+ memcpy(hw_ip.card_name, prop->armcp_info.card_name,
+ min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
+
hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
@@ -179,17 +184,14 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
goto out;
}
- if (output) {
- if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
- output,
- args->output_size)) {
- dev_err(hdev->dev,
- "copy to user failed in debug ioctl\n");
- rc = -EFAULT;
- goto out;
- }
+ if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
+ output, args->output_size)) {
+ dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
+ rc = -EFAULT;
+ goto out;
}
+
out:
kfree(params);
kfree(output);
@@ -221,6 +223,41 @@ static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
}
+static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_clk_rate clk_rate = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
+ &clk_rate.max_clk_rate_mhz);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &clk_rate,
+ min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
+}
+
+static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_reset_count reset_count = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+
+ return copy_to_user(out, &reset_count,
+ min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -239,6 +276,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
+ case HL_INFO_RESET_COUNT:
+ return get_reset_count(hdev, args);
+
default:
break;
}
@@ -271,6 +311,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = hw_events_info(hdev, true, args);
break;
+ case HL_INFO_CLK_RATE:
+ rc = get_clk_rate(hdev, args);
+ break;
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
@@ -406,9 +450,8 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = func(hpriv, kdata);
- if (cmd & IOC_OUT)
- if (copy_to_user((void __user *)arg, kdata, usize))
- retcode = -EFAULT;
+ if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
+ retcode = -EFAULT;
out_err:
if (retcode)
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 55b383b2a116..91579dde9262 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -58,8 +58,8 @@ out:
}
/*
- * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
- *
+ * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
* @ctl: BD's control word
@@ -73,8 +73,8 @@ out:
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
- u32 ctl, u32 len, u64 ptr)
+static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
+ struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -174,6 +174,45 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
+ * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * @hdev: Pointer to hl_device structure.
+ * @q: Pointer to hl_hw_queue structure.
+ * @num_of_entries: How many entries to check for space.
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the completion queue.
+ * This check also ensures that there is enough space in the h/w queue, as
+ * both queues are of the same size.
+ * - Reserve space in the completion queue (needs to be reversed if there
+ * is a failure down the road before the actual submission of work).
+ *
+ * Both operations are done using the "free_slots_cnt" field of the completion
+ * queue. The CI counters of the queue and the completion queue are not
+ * needed/used for the H/W queue type.
+ */
+static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
+ int num_of_entries)
+{
+ atomic_t *free_slots =
+ &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+
+ /*
+ * Check we have enough space in the completion queue.
+ * Add -1 to counter (decrement) unless counter was already 0.
+ * In that case, CQ is full so we can't submit a new CB.
+ * atomic_add_unless will return 0 if counter was already 0.
+ */
+ if (atomic_add_negative(num_of_entries * -1, free_slots)) {
+ dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
+ num_of_entries, q->hw_queue_id);
+ atomic_add(num_of_entries, free_slots);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
* hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
*
* @hdev: pointer to hl_device structure
@@ -188,7 +227,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- int rc;
+ int rc = 0;
/*
* The CPU queue is a synchronous queue with an effective depth of
@@ -206,11 +245,18 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- rc = ext_queue_sanity_checks(hdev, q, 1, false);
- if (rc)
- goto out;
+ /*
+ * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
+ * type only on init phase, when the queues are empty and being tested,
+ * so there is no need for sanity checks.
+ */
+ if (q->queue_type != QUEUE_TYPE_HW) {
+ rc = ext_queue_sanity_checks(hdev, q, 1, false);
+ if (rc)
+ goto out;
+ }
- ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
if (q->queue_type != QUEUE_TYPE_CPU)
@@ -220,14 +266,14 @@ out:
}
/*
- * ext_hw_queue_schedule_job - submit an JOB to an external queue
+ * ext_queue_schedule_job - submit a JOB to an external queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
+static void ext_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -260,7 +306,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
* H/W queues is done under the scheduler mutex
*
* No need to check if CQ is full because it was already
- * checked in hl_queue_sanity_checks
+ * checked in ext_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->hw_queue_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
@@ -274,18 +320,18 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
- ext_queue_submit_bd(hdev, q, ctl, len, ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
- * int_hw_queue_schedule_job - submit an JOB to an internal queue
+ * int_queue_schedule_job - submit a JOB to an internal queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+static void int_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -308,6 +354,60 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
}
/*
+ * hw_queue_schedule_job - submit a JOB to a H/W queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_cq *cq;
+ u64 ptr;
+ u32 offset, ctl, len;
+
+ /*
+ * Upon PQE completion, COMP_DATA is used as the write data to the
+ * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
+ * write address offset in the SM block (QMAN LBW message).
+ * The write address offset is calculated as "COMP_OFFSET << 2".
+ */
+ offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
+ ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
+
+ len = job->job_cb_size;
+
+ /*
+ * A patched CB is created only if a user CB was allocated by driver and
+ * MMU is disabled. If MMU is enabled, the user CB should be used
+ * instead. If the user CB wasn't allocated by driver, assume that it
+ * holds an address.
+ */
+ if (job->patched_cb)
+ ptr = job->patched_cb->bus_address;
+ else if (job->is_kernel_allocated_cb)
+ ptr = job->user_cb->bus_address;
+ else
+ ptr = (u64) (uintptr_t) job->user_cb;
+
+ /*
+ * No need to protect pi_offset because scheduling to the
+ * H/W queues is done under the scheduler mutex
+ *
+ * No need to check if CQ is full because it was already
+ * checked in hw_queue_sanity_checks
+ */
+ cq = &hdev->completion_queue[q->hw_queue_id];
+ cq->pi = hl_cq_inc_ptr(cq->pi);
+
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+}
+
+/*
* hl_hw_queue_schedule_cs - schedule a command submission
*
* @job : pointer to the CS
@@ -330,23 +430,34 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
q = &hdev->kernel_queues[0];
- /* This loop assumes all external queues are consecutive */
for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_EXT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ switch (q->queue_type) {
+ case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i], true);
- if (rc)
- goto unroll_cq_resv;
- cq_cnt++;
- }
- } else if (q->queue_type == QUEUE_TYPE_INT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ cs->jobs_in_queue_cnt[i], true);
+ break;
+ case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i]);
- if (rc)
- goto unroll_cq_resv;
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ default:
+ dev_err(hdev->dev, "Queue type %d is invalid\n",
+ q->queue_type);
+ rc = -EINVAL;
+ break;
}
+
+ if (rc)
+ goto unroll_cq_resv;
+
+ if (q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW)
+ cq_cnt++;
}
}
@@ -373,21 +484,30 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- if (job->ext_queue)
- ext_hw_queue_schedule_job(job);
- else
- int_hw_queue_schedule_job(job);
+ switch (job->queue_type) {
+ case QUEUE_TYPE_EXT:
+ ext_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_INT:
+ int_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_HW:
+ hw_queue_schedule_job(job);
+ break;
+ default:
+ break;
+ }
cs->submitted = true;
goto out;
unroll_cq_resv:
- /* This loop assumes all external queues are consecutive */
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT) &&
- (cs->jobs_in_queue_cnt[i])) {
+ if ((q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW) &&
+ cs->jobs_in_queue_cnt[i]) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -414,8 +534,8 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
q->ci = hl_queue_inc_ptr(q->ci);
}
-static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
- struct hl_hw_queue *q, bool is_cpu_queue)
+static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+ bool is_cpu_queue)
{
void *p;
int rc;
@@ -465,7 +585,7 @@ free_queue:
return rc;
}
-static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
@@ -485,18 +605,38 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
return 0;
}
-static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_queue_init(hdev, q, true);
+}
+
+static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, true);
+ return ext_and_cpu_queue_init(hdev, q, false);
}
-static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, false);
+ void *p;
+
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->kernel_address = (u64) (uintptr_t) p;
+
+ /* Make sure read/write pointers are initialized to start of queue */
+ q->ci = 0;
+ q->pi = 0;
+
+ return 0;
}
/*
- * hw_queue_init - main initialization function for H/W queue object
+ * queue_init - main initialization function for H/W queue object
*
* @hdev: pointer to hl_device device structure
* @q: pointer to hl_hw_queue queue structure
@@ -505,7 +645,7 @@ static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
* Allocate dma-able memory for the queue and initialize fields
* Returns 0 on success
*/
-static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
u32 hw_queue_id)
{
int rc;
@@ -516,21 +656,20 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
- rc = ext_hw_queue_init(hdev, q);
+ rc = ext_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_INT:
- rc = int_hw_queue_init(hdev, q);
+ rc = int_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_CPU:
- rc = cpu_hw_queue_init(hdev, q);
+ rc = cpu_queue_init(hdev, q);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_NA:
q->valid = 0;
return 0;
-
default:
dev_crit(hdev->dev, "wrong queue type %d during init\n",
q->queue_type);
@@ -554,7 +693,7 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
*
* Free the queue memory
*/
-static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
+static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
{
if (!q->valid)
return;
@@ -612,7 +751,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
- rc = hw_queue_init(hdev, q, i);
+ rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
@@ -624,7 +763,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
release_queues:
for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -637,7 +776,7 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
int i;
for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
}
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 8618891d5afa..3c44ef3a23ed 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -260,4 +260,6 @@
#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+
#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 19b0f0ef1d0b..fce490e6a231 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -84,6 +84,7 @@
#include "tpc6_rtr_regs.h"
#include "tpc7_nrtr_regs.h"
#include "tpc0_eml_cfg_regs.h"
+#include "psoc_etr_regs.h"
#include "psoc_global_conf_masks.h"
#include "dma_macro_masks.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..b7c33e025db5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x2C43004
+
+#define mmPSOC_ETR_STS 0x2C4300C
+
+#define mmPSOC_ETR_RRD 0x2C43010
+
+#define mmPSOC_ETR_RRP 0x2C43014
+
+#define mmPSOC_ETR_RWP 0x2C43018
+
+#define mmPSOC_ETR_TRG 0x2C4301C
+
+#define mmPSOC_ETR_CTL 0x2C43020
+
+#define mmPSOC_ETR_RWD 0x2C43024
+
+#define mmPSOC_ETR_MODE 0x2C43028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x2C43030
+
+#define mmPSOC_ETR_BUFWM 0x2C43034
+
+#define mmPSOC_ETR_RRPHI 0x2C43038
+
+#define mmPSOC_ETR_RWPHI 0x2C4303C
+
+#define mmPSOC_ETR_AXICTL 0x2C43110
+
+#define mmPSOC_ETR_DBALO 0x2C43118
+
+#define mmPSOC_ETR_DBAHI 0x2C4311C
+
+#define mmPSOC_ETR_FFSR 0x2C43300
+
+#define mmPSOC_ETR_FFCR 0x2C43304
+
+#define mmPSOC_ETR_PSCR 0x2C43308
+
+#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8
+
+#define mmPSOC_ETR_ITCTRL 0x2C43F00
+
+#define mmPSOC_ETR_CLAIMSET 0x2C43FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4
+
+#define mmPSOC_ETR_LAR 0x2C43FB0
+
+#define mmPSOC_ETR_LSR 0x2C43FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8
+
+#define mmPSOC_ETR_DEVID 0x2C43FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x2C43FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x2C43FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x2C43FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x2C43FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x2C43FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x2C43FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x2C43FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x2C43FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x2C43FEC
+
+#define mmPSOC_ETR_COMPID0 0x2C43FF0
+
+#define mmPSOC_ETR_COMPID1 0x2C43FF4
+
+#define mmPSOC_ETR_COMPID2 0x2C43FF8
+
+#define mmPSOC_ETR_COMPID3 0x2C43FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 4cd04c090285..2853a2de8cf6 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -20,6 +20,8 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_DRAM_INIT_FAIL,
CPU_BOOT_STATUS_FIT_CORRUPTED,
CPU_BOOT_STATUS_UBOOT_NOT_READY,
+ CPU_BOOT_STATUS_RESERVED,
+ CPU_BOOT_STATUS_TS_INIT_FAIL,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 71ea3c3e8ba3..a6851a9d3f03 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -12,18 +12,16 @@
#define PAGE_SHIFT_2MB 21
#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
-#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
#define LAST_MASK 0x0000000000800ull
-#define PHYS_ADDR_MASK 0xFFFFFFFFFFFFF000ull
#define HOP0_MASK 0x3000000000000ull
#define HOP1_MASK 0x0FF8000000000ull
#define HOP2_MASK 0x0007FC0000000ull
#define HOP3_MASK 0x000003FE00000ull
#define HOP4_MASK 0x00000001FF000ull
-#define OFFSET_MASK 0x0000000000FFFull
+#define FLAGS_MASK 0x0000000000FFFull
#define HOP0_SHIFT 48
#define HOP1_SHIFT 39
@@ -31,8 +29,7 @@
#define HOP3_SHIFT 21
#define HOP4_SHIFT 12
-#define PTE_PHYS_ADDR_SHIFT 12
-#define PTE_PHYS_ADDR_MASK ~OFFSET_MASK
+#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
#define HOP_TABLE_SIZE PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
index bf59bbe27fdc..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -23,6 +23,8 @@ struct hl_bd {
#define HL_BD_SIZE sizeof(struct hl_bd)
/*
+ * S/W CTL FIELDS.
+ *
* BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
* valid. 1 means the repeat field is valid, 0 means not-valid,
* i.e. repeat == 1
@@ -34,6 +36,16 @@ struct hl_bd {
#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
/*
+ * H/W CTL FIELDS
+ */
+
+#define BD_CTL_COMP_OFFSET_SHIFT 16
+#define BD_CTL_COMP_OFFSET_MASK 0x00FF0000
+
+#define BD_CTL_COMP_DATA_SHIFT 0
+#define BD_CTL_COMP_DATA_MASK 0x0000FFFF
+
+/*
* COMPLETION QUEUE
*/
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 365fb0cb8dff..6c72cb4eff54 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/genalloc.h>
-#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
#define HL_MMU_DEBUG 0
/*
@@ -159,20 +158,19 @@ pages_pack_err:
}
/*
- * get_userptr_from_host_va - initialize userptr structure from given host
- * virtual address
- *
- * @hdev : habanalabs device structure
- * @args : parameters containing the virtual address and size
- * @p_userptr : pointer to result userptr structure
+ * dma_map_host_va - DMA mapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @p_userptr: pointer to result userptr structure
*
* This function does the following:
* - Allocate userptr structure
* - Pin the given host memory using the userptr structure
* - Perform DMA mapping to have the DMA addresses of the pages
*/
-static int get_userptr_from_host_va(struct hl_device *hdev,
- struct hl_mem_in *args, struct hl_userptr **p_userptr)
+static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr **p_userptr)
{
struct hl_userptr *userptr;
int rc;
@@ -183,8 +181,7 @@ static int get_userptr_from_host_va(struct hl_device *hdev,
goto userptr_err;
}
- rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
- args->map_host.mem_size, userptr);
+ rc = hl_pin_host_memory(hdev, addr, size, userptr);
if (rc) {
dev_err(hdev->dev, "Failed to pin host memory\n");
goto pin_err;
@@ -215,16 +212,16 @@ userptr_err:
}
/*
- * free_userptr - free userptr structure
- *
- * @hdev : habanalabs device structure
- * @userptr : userptr to free
+ * dma_unmap_host_va - DMA unmapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @userptr: userptr to free
*
* This function does the following:
* - Unpins the physical pages
* - Frees the userptr structure
*/
-static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+static void dma_unmap_host_va(struct hl_device *hdev,
+ struct hl_userptr *userptr)
{
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
@@ -253,10 +250,9 @@ static void dram_pg_pool_do_release(struct kref *ref)
}
/*
- * free_phys_pg_pack - free physical page pack
- *
- * @hdev : habanalabs device structure
- * @phys_pg_pack : physical page pack to free
+ * free_phys_pg_pack - free physical page pack
+ * @hdev: habanalabs device structure
+ * @phys_pg_pack: physical page pack to free
*
* This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block
@@ -264,7 +260,7 @@ static void dram_pg_pool_do_release(struct kref *ref)
* - Free the hl_vm_phys_pg_pack structure
*/
static void free_phys_pg_pack(struct hl_device *hdev,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
@@ -519,8 +515,8 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 size, u64 hint_addr,
- bool is_userptr)
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
+ bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
u64 valid_start, valid_size, prev_start, prev_end, page_mask,
@@ -528,18 +524,17 @@ static u64 get_va_block(struct hl_device *hdev,
u32 page_size;
bool add_prev = false;
- if (is_userptr) {
+ if (is_userptr)
/*
* We cannot know if the user allocated memory with huge pages
* or not, hence we continue with the biggest possible
* granularity.
*/
- page_size = PAGE_SIZE_2MB;
- page_mask = PAGE_MASK_2MB;
- } else {
- page_size = hdev->asic_prop.dram_page_size;
- page_mask = ~((u64)page_size - 1);
- }
+ page_size = hdev->asic_prop.pmmu.huge_page_size;
+ else
+ page_size = hdev->asic_prop.dmmu.page_size;
+
+ page_mask = ~((u64)page_size - 1);
mutex_lock(&va_range->lock);
@@ -549,7 +544,6 @@ static u64 get_va_block(struct hl_device *hdev,
/* calc the first possible aligned addr */
valid_start = va_block->start;
-
if (valid_start & (page_size - 1)) {
valid_start &= page_mask;
valid_start += page_size;
@@ -561,7 +555,6 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size >= size &&
(!new_va_block || valid_size < res_valid_size)) {
-
new_va_block = va_block;
res_valid_start = valid_start;
res_valid_size = valid_size;
@@ -631,11 +624,10 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
/*
* init_phys_pg_pack_from_userptr - initialize physical page pack from host
- * memory
- *
- * @ctx : current context
- * @userptr : userptr to initialize from
- * @pphys_pg_pack : res pointer
+ * memory
+ * @ctx: current context
+ * @userptr: userptr to initialize from
+ * @pphys_pg_pack: result pointer
*
* This function does the following:
* - Pin the physical pages related to the given virtual block
@@ -643,16 +635,19 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
* virtual block
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
- struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_userptr *userptr,
+ struct hl_vm_phys_pg_pack **pphys_pg_pack)
{
+ struct hl_mmu_properties *mmu_prop = &ctx->hdev->asic_prop.pmmu;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE;
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = mmu_prop->huge_page_size;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -675,14 +670,14 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
total_npages += npages;
- if ((npages % PGS_IN_2MB_PAGE) ||
- (dma_addr & (PAGE_SIZE_2MB - 1)))
+ if ((npages % pgs_in_huge_page) ||
+ (dma_addr & (huge_page_size - 1)))
is_huge_page_opt = false;
}
if (is_huge_page_opt) {
- page_size = PAGE_SIZE_2MB;
- total_npages /= PGS_IN_2MB_PAGE;
+ page_size = huge_page_size;
+ do_div(total_npages, pgs_in_huge_page);
}
page_mask = ~(((u64) page_size) - 1);
@@ -714,7 +709,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
dma_addr += page_size;
if (is_huge_page_opt)
- npages -= PGS_IN_2MB_PAGE;
+ npages -= pgs_in_huge_page;
else
npages--;
}
@@ -731,19 +726,18 @@ page_pack_arr_mem_err:
}
/*
- * map_phys_page_pack - maps the physical page pack
- *
- * @ctx : current context
- * @vaddr : start address of the virtual area to map from
- * @phys_pg_pack : the pack of physical pages to map to
+ * map_phys_pg_pack - maps the physical page pack.
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to map from
+ * @phys_pg_pack: the pack of physical pages to map to
*
* This function does the following:
* - Maps each chunk of virtual memory to matching physical chunk
* - Stores number of successful mappings in the given argument
- * - Returns 0 on success, error code otherwise.
+ * - Returns 0 on success, error code otherwise
*/
-static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
@@ -783,6 +777,36 @@ err:
return rc;
}
+/*
+ * unmap_phys_pg_pack - unmaps the physical page pack
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to unmap
+ * @phys_pg_pack: the pack of physical pages to unmap
+ */
+static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 next_vaddr, i;
+ u32 page_size;
+
+ page_size = phys_pg_pack->page_size;
+ next_vaddr = vaddr;
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ /*
+ * unmapping on Palladium can be really long, so avoid a CPU
+ * soft lockup bug by sleeping a little between unmapping pages
+ */
+ if (hdev->pldm)
+ usleep_range(500, 1000);
+ }
+}
+
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *paddr)
{
@@ -839,7 +863,10 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
*device_addr = 0;
if (is_userptr) {
- rc = get_userptr_from_host_va(hdev, args, &userptr);
+ u64 addr = args->map_host.host_virt_addr,
+ size = args->map_host.mem_size;
+
+ rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) {
dev_err(hdev->dev, "failed to get userptr from va\n");
return rc;
@@ -850,7 +877,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
- args->map_host.host_virt_addr);
+ addr);
goto init_page_pack_err;
}
@@ -909,7 +936,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
mutex_lock(&ctx->mmu_lock);
- rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
+ rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
mutex_unlock(&ctx->mmu_lock);
dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
@@ -917,7 +944,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
@@ -955,7 +982,7 @@ shared_err:
free_phys_pg_pack(hdev, phys_pg_pack);
init_page_pack_err:
if (is_userptr)
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
return rc;
}
@@ -965,20 +992,20 @@ init_page_pack_err:
*
* @ctx : current context
* @vaddr : device virtual address to unmap
+ * @ctx_free : true if in context free flow, false otherwise.
*
* This function does the following:
* - Unmap the physical pages related to the given virtual address
* - return the device virtual block to the virtual block list
*/
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
+ struct hl_va_range *va_range;
enum vm_type_t *vm_type;
- u64 next_vaddr, i;
- u32 page_size;
bool is_userptr;
int rc;
@@ -1003,9 +1030,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
+ va_range = &ctx->host_va_range;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1014,6 +1042,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
}
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
+ va_range = &ctx->dram_va_range;
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
@@ -1029,42 +1058,41 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
goto mapping_cnt_err;
}
- page_size = phys_pg_pack->page_size;
- vaddr &= ~(((u64) page_size) - 1);
-
- next_vaddr = vaddr;
+ vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&ctx->mmu_lock);
- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
- dev_warn_ratelimited(hdev->dev,
- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
-
- /* unmapping on Palladium can be really long, so avoid a CPU
- * soft lockup bug by sleeping a little between unmapping pages
- */
- if (hdev->pldm)
- usleep_range(500, 1000);
- }
+ unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ /*
+ * During context free this function is called in a loop to clean all
+ * the context mappings. Hence the cache invalidation can be called once
+ * at the loop end rather than for each iteration
+ */
+ if (!ctx_free)
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
mutex_unlock(&ctx->mmu_lock);
- if (add_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- vaddr,
- vaddr + phys_pg_pack->total_size - 1))
- dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
- vaddr);
+ /*
+ * No point in maintaining the free VA block list if the context is
+ * closing as the list will be freed anyway
+ */
+ if (!ctx_free) {
+ rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (rc)
+ dev_warn(hdev->dev,
+ "add va block failed for vaddr: 0x%llx\n",
+ vaddr);
+ }
atomic_dec(&phys_pg_pack->mapping_cnt);
kfree(hnode);
if (is_userptr) {
free_phys_pg_pack(hdev, phys_pg_pack);
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
}
return 0;
@@ -1189,8 +1217,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx,
- args->in.unmap.device_virt_addr);
+ rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
+ false);
break;
default:
@@ -1203,20 +1231,72 @@ out:
return rc;
}
+static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
+ u32 npages, u64 start, u32 offset,
+ struct hl_userptr *userptr)
+{
+ int rc;
+
+ if (!access_ok((void __user *) (uintptr_t) addr, size)) {
+ dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
+ return -EFAULT;
+ }
+
+ userptr->vec = frame_vector_create(npages);
+ if (!userptr->vec) {
+ dev_err(hdev->dev, "Failed to create frame vector\n");
+ return -ENOMEM;
+ }
+
+ rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ userptr->vec);
+
+ if (rc != npages) {
+ dev_err(hdev->dev,
+ "Failed to map host memory, user ptr probably wrong\n");
+ if (rc < 0)
+ goto destroy_framevec;
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ if (frame_vector_to_pages(userptr->vec) < 0) {
+ dev_err(hdev->dev,
+ "Failed to translate frame vector to pages\n");
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ rc = sg_alloc_table_from_pages(userptr->sgt,
+ frame_vector_pages(userptr->vec),
+ npages, offset, size, GFP_ATOMIC);
+ if (rc < 0) {
+ dev_err(hdev->dev, "failed to create SG table from pages\n");
+ goto put_framevec;
+ }
+
+ return 0;
+
+put_framevec:
+ put_vaddr_frames(userptr->vec);
+destroy_framevec:
+ frame_vector_destroy(userptr->vec);
+ return rc;
+}
+
/*
- * hl_pin_host_memory - pins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @addr : the user-space virtual address of the memory area
- * @size : the size of the memory area
- * @userptr : pointer to hl_userptr structure
+ * hl_pin_host_memory - pins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Pins the physical pages
- * - Create a SG list from those pages
+ * - Create an SG list from those pages
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
- struct hl_userptr *userptr)
+ struct hl_userptr *userptr)
{
u64 start, end;
u32 npages, offset;
@@ -1227,11 +1307,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
- if (!access_ok((void __user *) (uintptr_t) addr, size)) {
- dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
- return -EFAULT;
- }
-
/*
* If the combination of the address and size requested for this memory
* region causes an integer overflow, return error.
@@ -1244,6 +1319,14 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ /*
+ * This function can be called also from data path, hence use atomic
+ * always as it is not a big allocation.
+ */
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ if (!userptr->sgt)
+ return -ENOMEM;
+
start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size);
@@ -1254,42 +1337,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node);
- userptr->vec = frame_vector_create(npages);
- if (!userptr->vec) {
- dev_err(hdev->dev, "Failed to create frame vector\n");
- return -ENOMEM;
- }
-
- rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- userptr->vec);
-
- if (rc != npages) {
- dev_err(hdev->dev,
- "Failed to map host memory, user ptr probably wrong\n");
- if (rc < 0)
- goto destroy_framevec;
- rc = -EFAULT;
- goto put_framevec;
- }
-
- if (frame_vector_to_pages(userptr->vec) < 0) {
+ rc = get_user_memory(hdev, addr, size, npages, start, offset,
+ userptr);
+ if (rc) {
dev_err(hdev->dev,
- "Failed to translate frame vector to pages\n");
- rc = -EFAULT;
- goto put_framevec;
- }
-
- userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
- if (!userptr->sgt) {
- rc = -ENOMEM;
- goto put_framevec;
- }
-
- rc = sg_alloc_table_from_pages(userptr->sgt,
- frame_vector_pages(userptr->vec),
- npages, offset, size, GFP_ATOMIC);
- if (rc < 0) {
- dev_err(hdev->dev, "failed to create SG table from pages\n");
+ "failed to get user memory for address 0x%llx\n",
+ addr);
goto free_sgt;
}
@@ -1299,34 +1352,28 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
free_sgt:
kfree(userptr->sgt);
-put_framevec:
- put_vaddr_frames(userptr->vec);
-destroy_framevec:
- frame_vector_destroy(userptr->vec);
return rc;
}
/*
- * hl_unpin_host_memory - unpins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr : pointer to hl_userptr structure
+ * hl_unpin_host_memory - unpins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Unpins the physical pages related to the host memory
* - Free the SG list
*/
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
struct page **pages;
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sg(hdev,
- userptr->sgt->sgl,
- userptr->sgt->nents,
- userptr->dir);
+ hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents,
+ userptr->dir);
pages = frame_vector_pages(userptr->vec);
if (!IS_ERR(pages)) {
@@ -1342,8 +1389,6 @@ int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
sg_free_table(userptr->sgt);
kfree(userptr->sgt);
-
- return 0;
}
/*
@@ -1542,43 +1587,16 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
* @hdev : pointer to the habanalabs structure
* va_range : pointer to virtual addresses range
*
- * This function initializes the following:
- * - Checks that the given range contains the whole initial range
+ * This function does the following:
* - Frees the virtual addresses block list and its lock
*/
static void hl_va_range_fini(struct hl_device *hdev,
struct hl_va_range *va_range)
{
- struct hl_vm_va_block *va_block;
-
- if (list_empty(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not be empty on cleanup!\n");
- goto out;
- }
-
- if (!list_is_singular(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not contain multiple blocks on cleanup!\n");
- goto free_va_list;
- }
-
- va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
-
- if (va_block->start != va_range->start_addr ||
- va_block->end != va_range->end_addr) {
- dev_warn(hdev->dev,
- "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
- va_block->start, va_block->end);
- goto free_va_list;
- }
-
-free_va_list:
mutex_lock(&va_range->lock);
clear_va_list_locked(hdev, &va_range->list);
mutex_unlock(&va_range->lock);
-out:
mutex_destroy(&va_range->lock);
}
@@ -1613,21 +1631,31 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
- if (!hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
+ /*
+ * Clearly something went wrong on hard reset so no point in printing
+ * another side effect error
+ */
+ if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+ dev_notice(hdev->dev,
+ "ctx %d is freed while it has va in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
- unmap_device_va(ctx, hnode->vaddr);
+ unmap_device_va(ctx, hnode->vaddr, true);
}
+ /* invalidate the cache once after the unmapping loop */
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev,
- "page list 0x%p of asid %d is still alive\n",
+ "page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size,
&hdev->dram_used_mem);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 176c315836f1..6262b26e2086 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -25,10 +25,9 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
return pgt_info;
}
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{
struct hl_device *hdev = ctx->hdev;
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
hdev->asic_prop.mmu_hop_table_size);
@@ -37,6 +36,13 @@ static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
kfree(pgt_info);
}
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ _free_hop(ctx, pgt_info);
+}
+
static u64 alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -105,8 +111,8 @@ static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
* clear the 12 LSBs and translate the shadow hop to its associated
* physical hop, and add back the original 12 LSBs.
*/
- u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
- (val & OFFSET_MASK);
+ u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
+ (val & FLAGS_MASK);
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
get_phys_addr(ctx, shadow_pte_addr),
@@ -159,7 +165,7 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
- free_hop(ctx, hop_addr);
+ _free_hop(ctx, pgt_info);
return num_of_ptes_left;
}
@@ -171,35 +177,50 @@ static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
((virt_addr & mask) >> shift);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
+ mmu_prop->hop0_shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
+ mmu_prop->hop1_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
+ mmu_prop->hop2_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
+ mmu_prop->hop3_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
+ mmu_prop->hop4_shift);
}
static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -288,23 +309,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
}
/* need only pte 0 in hops 0 and 1 */
- pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_addr, pte_val);
- pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_addr, pte_val);
get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
+ pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, pte_val);
get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
- pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
+ pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
LAST_MASK | PAGE_PRESENT_MASK;
for (i = 0 ; i < num_of_hop3 ; i++) {
@@ -400,8 +421,6 @@ int hl_mmu_init(struct hl_device *hdev)
if (!hdev->mmu_enable)
return 0;
- /* MMU H/W init was already done in device hw_init() */
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
@@ -427,6 +446,8 @@ int hl_mmu_init(struct hl_device *hdev)
goto err_pool_add;
}
+ /* MMU H/W init will be done in device hw_init() */
+
return 0;
err_pool_add:
@@ -450,10 +471,10 @@ void hl_mmu_fini(struct hl_device *hdev)
if (!hdev->mmu_enable)
return;
+ /* MMU H/W fini was already done in device hw_fini() */
+
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
-
- /* MMU H/W fini will be done in device hw_fini() */
}
/**
@@ -501,36 +522,36 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
dram_default_mapping_fini(ctx);
if (!hash_empty(ctx->mmu_shadow_hash))
- dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- free_hop(ctx, pgt_info->shadow_addr);
+ _free_hop(ctx, pgt_info);
}
mutex_destroy(&ctx->mmu_lock);
}
-static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
+static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
hop3_addr = 0, hop3_pte_addr = 0,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte;
- bool is_dram_addr, is_huge, clear_hop3 = true;
+ bool is_huge, clear_hop3 = true;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
@@ -539,7 +560,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
@@ -548,7 +569,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
@@ -557,7 +578,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
@@ -575,7 +596,8 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
@@ -584,7 +606,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
@@ -667,25 +689,36 @@ not_mapped:
int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr;
u32 real_page_size, npages;
int i, rc;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and unmap them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and unmap them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
- page_size);
+ "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -694,7 +727,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_unmap(ctx, real_virt_addr);
+ rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
return rc;
@@ -705,10 +738,11 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
}
static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size)
+ u32 page_size, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
@@ -716,21 +750,19 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte = 0;
bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge, is_dram_addr;
+ hop4_new = false, is_huge;
int rc = -ENOMEM;
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * This mapping function can map a 4KB/2MB page. For 2MB page there are
- * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
- * pages only but user memory could have been allocated with one of the
- * two page sizes. Since this is a common code for all the three cases,
- * we need this hugs page check.
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 3 hops rather than 4. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
*/
- is_huge = page_size == PAGE_SIZE_2MB;
-
- is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_huge = page_size == mmu_prop->huge_page_size;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
@@ -738,28 +770,28 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
if (hop1_addr == ULLONG_MAX)
goto err;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
if (hop2_addr == ULLONG_MAX)
goto err;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
if (hop3_addr == ULLONG_MAX)
goto err;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
if (!is_huge) {
@@ -767,13 +799,14 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop4_addr == ULLONG_MAX)
goto err;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
@@ -813,7 +846,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
| PAGE_PRESENT_MASK;
if (is_huge)
@@ -823,25 +856,25 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop1_new) {
curr_pte =
- (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_pte_addr, curr_pte);
}
if (hop2_new) {
curr_pte =
- (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_pte_addr, curr_pte);
get_pte(ctx, hop1_addr);
}
if (hop3_new) {
curr_pte =
- (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, curr_pte);
get_pte(ctx, hop2_addr);
}
if (!is_huge) {
if (hop4_new) {
- curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
+ curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop3_pte_addr, curr_pte);
get_pte(ctx, hop3_addr);
@@ -890,25 +923,36 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and map them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and map them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't map\n",
- page_size);
+ "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -923,7 +967,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
- real_page_size);
+ real_page_size, is_dram_addr);
if (rc)
goto err;
@@ -937,7 +981,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (_hl_mmu_unmap(ctx, real_virt_addr))
+ if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 94dfb9e40e29..1aa433a7f66c 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/char/hpilo.h
*
diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h
index e140ada8fe2c..0e1756fffeae 100644
--- a/drivers/misc/ibmvmc.h
+++ b/drivers/misc/ibmvmc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* linux/drivers/misc/ibmvmc.h
*
* IBM Power Systems Virtual Management Channel Support.
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 057d7bbde402..dd65cedf3b12 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -16,7 +16,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
@@ -434,23 +434,23 @@ int lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_poll(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
int x, y, z;
mutex_lock(&lis3->mutex);
lis3lv02d_get_xyz(lis3, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_Z, z);
+ input_sync(input);
mutex_unlock(&lis3->mutex);
}
-static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
+static int lis3lv02d_joystick_open(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
if (lis3->pm_dev)
pm_runtime_get_sync(lis3->pm_dev);
@@ -461,12 +461,14 @@ static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
* Update coordinates for the case where poll interval is 0 and
* the chip in running purely under interrupt control
*/
- lis3lv02d_joystick_poll(pidev);
+ lis3lv02d_joystick_poll(input);
+
+ return 0;
}
-static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_close(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
atomic_set(&lis3->wake_thread, 0);
if (lis3->pm_dev)
@@ -497,7 +499,7 @@ out:
static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- struct input_dev *dev = lis3->idev->input;
+ struct input_dev *dev = lis3->idev;
u8 click_src;
mutex_lock(&lis3->mutex);
@@ -677,26 +679,19 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
if (lis3->idev)
return -EINVAL;
- lis3->idev = input_allocate_polled_device();
- if (!lis3->idev)
+ input_dev = input_allocate_device();
+ if (!input_dev)
return -ENOMEM;
- lis3->idev->poll = lis3lv02d_joystick_poll;
- lis3->idev->open = lis3lv02d_joystick_open;
- lis3->idev->close = lis3lv02d_joystick_close;
- lis3->idev->poll_interval = MDPS_POLL_INTERVAL;
- lis3->idev->poll_interval_min = MDPS_POLL_MIN;
- lis3->idev->poll_interval_max = MDPS_POLL_MAX;
- lis3->idev->private = lis3;
- input_dev = lis3->idev->input;
-
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
input_dev->dev.parent = &lis3->pdev->dev;
- set_bit(EV_ABS, input_dev->evbit);
+ input_dev->open = lis3lv02d_joystick_open;
+ input_dev->close = lis3lv02d_joystick_close;
+
max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
if (lis3->whoami == WAI_12B) {
fuzz = LIS3_DEFAULT_FUZZ_12B;
@@ -712,17 +707,32 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ input_set_drvdata(input_dev, lis3);
+ lis3->idev = input_dev;
+
+ err = input_setup_polling(input_dev, lis3lv02d_joystick_poll);
+ if (err)
+ goto err_free_input;
+
+ input_set_poll_interval(input_dev, MDPS_POLL_INTERVAL);
+ input_set_min_poll_interval(input_dev, MDPS_POLL_MIN);
+ input_set_max_poll_interval(input_dev, MDPS_POLL_MAX);
+
lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
- err = input_register_polled_device(lis3->idev);
- if (err) {
- input_free_polled_device(lis3->idev);
- lis3->idev = NULL;
- }
+ err = input_register_device(lis3->idev);
+ if (err)
+ goto err_free_input;
+ return 0;
+
+err_free_input:
+ input_free_device(input_dev);
+ lis3->idev = NULL;
return err;
+
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
@@ -738,8 +748,7 @@ void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
if (lis3->irq)
misc_deregister(&lis3->miscdev);
- input_unregister_polled_device(lis3->idev);
- input_free_polled_device(lis3->idev);
+ input_unregister_device(lis3->idev);
lis3->idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -895,10 +904,9 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
(p->click_thresh_y << 4));
if (lis3->idev) {
- struct input_dev *input_dev = lis3->idev->input;
- input_set_capability(input_dev, EV_KEY, BTN_X);
- input_set_capability(input_dev, EV_KEY, BTN_Y);
- input_set_capability(input_dev, EV_KEY, BTN_Z);
+ input_set_capability(lis3->idev, EV_KEY, BTN_X);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Y);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Z);
}
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index 1b0c99883c57..c394c0b08519 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -6,7 +6,7 @@
* Copyright (C) 2008-2009 Eric Piel
*/
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/miscdevice.h>
@@ -281,7 +281,7 @@ struct lis3lv02d {
* (1/1000th of earth gravity)
*/
- struct input_polled_dev *idev; /* input device */
+ struct input_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
struct regulator_bulk_data regulators[2];
atomic_t count; /* interrupt count after last read */
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
index 0a146b32da13..de7c5ab528d9 100644
--- a/drivers/misc/lkdtm/refcount.c
+++ b/drivers/misc/lkdtm/refcount.c
@@ -6,14 +6,6 @@
#include "lkdtm.h"
#include <linux/refcount.h>
-#ifdef CONFIG_REFCOUNT_FULL
-#define REFCOUNT_MAX (UINT_MAX - 1)
-#define REFCOUNT_SATURATED UINT_MAX
-#else
-#define REFCOUNT_MAX INT_MAX
-#define REFCOUNT_SATURATED (INT_MIN / 2)
-#endif
-
static void overflow_check(refcount_t *ref)
{
switch (refcount_read(ref)) {
@@ -127,7 +119,7 @@ void lkdtm_REFCOUNT_DEC_ZERO(void)
static void check_negative(refcount_t *ref, int start)
{
/*
- * CONFIG_REFCOUNT_FULL refuses to move a refcount at all on an
+ * refcount_t refuses to move a refcount at all on an
* over-sub, so we have to track our starting position instead of
* looking only at zero-pinning.
*/
@@ -210,7 +202,6 @@ static void check_from_zero(refcount_t *ref)
/*
* A refcount_inc() from zero should pin to zero or saturate and may WARN.
- * Only CONFIG_REFCOUNT_FULL provides this protection currently.
*/
void lkdtm_REFCOUNT_INC_ZERO(void)
{
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0a2b99e1af45..9ad9c01ddf41 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -46,8 +46,6 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
}
@@ -59,8 +57,6 @@ static void number_of_connections(struct mei_cl_device *cldev)
*/
static void blacklist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 0;
}
@@ -71,8 +67,6 @@ static void blacklist(struct mei_cl_device *cldev)
*/
static void whitelist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 1;
}
@@ -256,7 +250,6 @@ static void mei_wd(struct mei_cl_device *cldev)
{
struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
if (pdev->device == MEI_DEV_ID_WPT_LP ||
pdev->device == MEI_DEV_ID_SPT ||
pdev->device == MEI_DEV_ID_SPT_H)
@@ -410,8 +403,6 @@ static void mei_nfc(struct mei_cl_device *cldev)
bus = cldev->bus;
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
cl = mei_cl_alloc_linked(bus);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 985bd4fd3328..a0a495c95e3c 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -791,11 +791,44 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+}
+static DEVICE_ATTR_RO(max_conn);
+
+static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 fixed = mei_me_cl_fixed(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+}
+static DEVICE_ATTR_RO(fixed);
+
+static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+}
+static DEVICE_ATTR_RO(max_len);
+
static struct attribute *mei_cldev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_uuid.attr,
&dev_attr_version.attr,
&dev_attr_modalias.attr,
+ &dev_attr_max_conn.attr,
+ &dev_attr_fixed.attr,
+ &dev_attr_max_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(mei_cldev);
@@ -873,15 +906,16 @@ static const struct device_type mei_cl_device_type = {
/**
* mei_cl_bus_set_name - set device name for me client device
+ * <controller>-<client device>
+ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
*
* @cldev: me client device
*/
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
- cldev->name,
- mei_me_cl_uuid(cldev->me_cl),
- mei_me_cl_ver(cldev->me_cl));
+ dev_set_name(&cldev->dev, "%s-%pUl",
+ dev_name(cldev->bus->dev),
+ mei_me_cl_uuid(cldev->me_cl));
}
/**
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c1f9e810cf81..2f8954def591 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -69,6 +69,42 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
return me_cl->props.protocol_version;
}
+/**
+ * mei_me_cl_max_conn - return me client max number of connections
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max number of connections
+ */
+static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_number_of_connections;
+}
+
+/**
+ * mei_me_cl_fixed - return me client fixed address, if any
+ *
+ * @me_cl: me client
+ *
+ * Return: me client fixed address
+ */
+static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.fixed_address;
+}
+
+/**
+ * mei_me_cl_max_len - return me client max msg length
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max msg length
+ */
+static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_msg_length;
+}
+
/*
* MEI IO Functions
*/
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c09f8bb49495..7cd67fb2365d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,7 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
@@ -162,7 +163,8 @@ access to ME_CBD */
#define ME_IS_HRA 0x00000002
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
-
+/* TRC control shadow register */
+#define ME_TRC 0x00000030
/* H_HPG_CSR register bits */
#define H_HPG_CSR_PGIHEXR 0x00000001
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index c4f6991d3028..668418d7ea77 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -173,6 +173,27 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
}
/**
+ * mei_me_trc_status - read trc status register
+ *
+ * @dev: mei device
+ * @trc: trc status register value
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (!hw->cfg->hw_trc_supported)
+ return -EOPNOTSUPP;
+
+ *trc = mei_me_reg_read(hw, ME_TRC);
+ trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
+
+ return 0;
+}
+
+/**
* mei_me_fw_status - read fw status register from pci config space
*
* @dev: mei device
@@ -183,20 +204,19 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
static int mei_me_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
int ret;
int i;
- if (!fw_status)
+ if (!fw_status || !hw->read_fws)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev, fw_src->status[i],
- &fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ ret = hw->read_fws(dev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -210,19 +230,26 @@ static int mei_me_fw_status(struct mei_device *dev,
* mei_me_hw_config - configure hw dependent settings
*
* @dev: mei device
+ *
+ * Return:
+ * * -EINVAL when read_fws is not set
+ * * 0 on success
+ *
*/
-static void mei_me_hw_config(struct mei_device *dev)
+static int mei_me_hw_config(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr, reg;
+ if (WARN_ON(!hw->read_fws))
+ return -EINVAL;
+
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -233,6 +260,8 @@ static void mei_me_hw_config(struct mei_device *dev)
if (reg & H_D0I3C_I3)
hw->pg_state = MEI_PG_ON;
}
+
+ return 0;
}
/**
@@ -269,7 +298,7 @@ static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
}
/**
- * mei_me_intr_clear - clear and stop interrupts
+ * me_intr_clear - clear and stop interrupts
*
* @dev: the device structure
* @hcsr: supplied hcsr register value
@@ -323,9 +352,9 @@ static void mei_me_intr_disable(struct mei_device *dev)
*/
static void mei_me_synchronize_irq(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct mei_me_hw *hw = to_me_hw(dev);
- synchronize_irq(pdev->irq);
+ synchronize_irq(hw->irq);
}
/**
@@ -1294,6 +1323,7 @@ end:
static const struct mei_hw_ops mei_me_hw_ops = {
+ .trc_status = mei_me_trc_status,
.fw_status = mei_me_fw_status,
.pg_state = mei_me_pg_state,
@@ -1384,6 +1414,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
+#define MEI_CFG_TRC \
+ .hw_trc_supported = 1
+
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
@@ -1432,6 +1465,14 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
+/* Tiger Lake and newer devices */
+static const struct mei_cfg mei_me_pch15_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1446,6 +1487,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
@@ -1461,19 +1503,19 @@ const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
- * @pdev: The pci device structure
+ * @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
*
* Return: The mei_device pointer on success, NULL on failure.
*/
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg)
{
struct mei_device *dev;
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+ dev = devm_kzalloc(parent, sizeof(struct mei_device) +
sizeof(struct mei_me_hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1483,7 +1525,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
+ mei_device_init(dev, parent, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 1d8794828cbc..4a8d4dcd5a91 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -21,12 +21,14 @@
* @quirk_probe: device exclusion quirk
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
+ * @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
+ u32 hw_trc_supported:1;
};
@@ -42,16 +44,20 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
+ * @irq: irq number
* @pg_state: power gating state
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
+ * @read_fws: read FW status register handler
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
+ int irq;
enum mei_pg_state pg_state;
bool d0i3_supported;
u8 hbuf_depth;
+ int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
@@ -74,6 +80,7 @@ struct mei_me_hw {
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -86,12 +93,13 @@ enum mei_cfg_idx {
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH15_CFG,
MEI_ME_NUM_CFG,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg);
int mei_me_pg_enter_sync(struct mei_device *dev);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 5e58656b8e19..785b260b3ae9 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -660,14 +660,16 @@ static int mei_txe_fw_status(struct mei_device *dev,
}
/**
- * mei_txe_hw_config - configure hardware at the start of the devices
+ * mei_txe_hw_config - configure hardware at the start of the devices
*
* @dev: the device structure
*
* Configure hardware at the start of the device should be done only
* once at the device probe time
+ *
+ * Return: always 0
*/
-static void mei_txe_hw_config(struct mei_device *dev)
+static int mei_txe_hw_config(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
@@ -677,6 +679,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
+
+ return 0;
}
/**
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index b9fef773e71b..bcee77768b91 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -190,7 +190,9 @@ int mei_start(struct mei_device *dev)
/* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
- mei_hw_config(dev);
+ ret = mei_hw_config(dev);
+ if (ret)
+ goto err;
dev_dbg(dev->dev, "reset in start the mei device.\n");
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7310b476323c..4ef6e37caafc 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -701,6 +701,29 @@ static int mei_fasync(int fd, struct file *file, int band)
}
/**
+ * trc_show - mei device trc attribute show method
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t trc_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ u32 trc;
+ int ret;
+
+ ret = mei_trc_status(dev, &trc);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%08X\n", trc);
+}
+static DEVICE_ATTR_RO(trc);
+
+/**
* fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
@@ -887,6 +910,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_tx_queue_limit.attr,
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
+ &dev_attr_trc.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 0f2141178299..76f8ff5ff974 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -260,6 +260,7 @@ struct mei_cl {
* @hw_config : configure hw
*
* @fw_status : get fw status registers
+ * @trc_status : get trc status register
* @pg_state : power gating state of the device
* @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
@@ -287,9 +288,11 @@ struct mei_hw_ops {
bool (*hw_is_ready)(struct mei_device *dev);
int (*hw_reset)(struct mei_device *dev, bool enable);
int (*hw_start)(struct mei_device *dev);
- void (*hw_config)(struct mei_device *dev);
+ int (*hw_config)(struct mei_device *dev);
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
+ int (*trc_status)(struct mei_device *dev, u32 *trc);
+
enum mei_pg_state (*pg_state)(struct mei_device *dev);
bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
@@ -614,9 +617,9 @@ void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
*/
-static inline void mei_hw_config(struct mei_device *dev)
+static inline int mei_hw_config(struct mei_device *dev)
{
- dev->ops->hw_config(dev);
+ return dev->ops->hw_config(dev);
}
static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
@@ -711,6 +714,13 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+static inline int mei_trc_status(struct mei_device *dev, u32 *trc)
+{
+ if (dev->ops->trc_status)
+ return dev->ops->trc_status(dev, trc);
+ return -EOPNOTSUPP;
+}
+
static inline int mei_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3dca63eddaa0..c845b7e40f26 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,12 +98,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
/* required last entry */
@@ -120,6 +121,13 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
+static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pci_read_config_dword(pdev, where, val);
+}
+
/**
* mei_me_quirk_probe - probe for devices that doesn't valid ME interface
*
@@ -191,13 +199,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(pdev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg);
if (!dev) {
err = -ENOMEM;
goto end;
}
hw = to_me_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev)[0];
+ hw->irq = pdev->irq;
+ hw->read_fws = mei_me_read_fws;
pci_enable_msi(pdev);
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 948f45bbf135..b6841ba6d922 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Intel MIC & related support"
-comment "Intel MIC Bus Driver"
-
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
depends on 64BIT && PCI && X86
@@ -18,8 +16,6 @@ config INTEL_MIC_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Bus Driver"
-
config SCIF_BUS
tristate "SCIF Bus Driver"
depends on 64BIT && PCI && X86
@@ -35,8 +31,6 @@ config SCIF_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Bus Driver"
-
config VOP_BUS
tristate "VOP Bus Driver"
help
@@ -51,8 +45,6 @@ config VOP_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Host Driver"
-
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
@@ -71,8 +63,6 @@ config INTEL_MIC_HOST
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Card Driver"
-
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
depends on 64BIT && X86
@@ -90,8 +80,6 @@ config INTEL_MIC_CARD
For more information see
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Driver"
-
config SCIF
tristate "SCIF Driver"
depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
@@ -110,8 +98,6 @@ config SCIF
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Coprocessor State Management (COSM) Drivers"
-
config MIC_COSM
tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
depends on 64BIT && PCI && X86 && SCIF
@@ -128,8 +114,6 @@ config MIC_COSM
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Driver"
-
config VOP
tristate "VOP Driver"
depends on VOP_BUS
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 97415afd79f3..345bf843a38e 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#ifndef _OCXL_INTERNAL_H_
#define _OCXL_INTERNAL_H_
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index 024f417e7e01..17e21cb2addd 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ocxl
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3a8d76d1ccae..2817f4751306 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -119,7 +119,7 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
- seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ seq_puts(s, "#id count aver-clks max-clks\n");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -165,8 +165,7 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
+ seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
@@ -191,10 +190,8 @@ static int gru_seq_show(struct seq_file *file, void *data)
struct gru_state *gru = GID_TO_GRU(gid);
if (gid == 0) {
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
- "ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
- "busy", "busy", "free", "free", "free");
+ seq_puts(file, "# gid nid ctx cbr dsr ctx cbr dsr\n");
+ seq_puts(file, "# busy busy busy free free free\n");
}
if (gru) {
ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 7d9e23aa0b92..2ae9948a91e1 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -708,7 +708,6 @@ EXPORT_SYMBOL_GPL(st_unregister);
*/
static int st_tty_open(struct tty_struct *tty)
{
- int err = 0;
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
@@ -731,7 +730,8 @@ static int st_tty_open(struct tty_struct *tty)
*/
st_kim_complete(st_gdata->kim_data);
pr_debug("done %s", __func__);
- return err;
+
+ return 0;
}
static void st_tty_close(struct tty_struct *tty)
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 819e35995d32..cbb706dabede 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -28,6 +28,10 @@ MODULE_PARM_DESC(disable_guest,
static bool vmci_guest_personality_initialized;
static bool vmci_host_personality_initialized;
+static DEFINE_MUTEX(vmci_vsock_mutex); /* protects vmci_vsock_transport_cb */
+static vmci_vsock_cb vmci_vsock_transport_cb;
+static bool vmci_vsock_cb_host_called;
+
/*
* vmci_get_context_id() - Gets the current context ID.
*
@@ -45,6 +49,69 @@ u32 vmci_get_context_id(void)
}
EXPORT_SYMBOL_GPL(vmci_get_context_id);
+/*
+ * vmci_register_vsock_callback() - Register the VSOCK vmci_transport callback.
+ *
+ * The callback will be called when the first host or guest becomes active,
+ * or if they are already active when this function is called.
+ * To unregister the callback, call this function with NULL parameter.
+ *
+ * Returns 0 on success. -EBUSY if a callback is already registered.
+ */
+int vmci_register_vsock_callback(vmci_vsock_cb callback)
+{
+ int err = 0;
+
+ mutex_lock(&vmci_vsock_mutex);
+
+ if (vmci_vsock_transport_cb && callback) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ vmci_vsock_transport_cb = callback;
+
+ if (!vmci_vsock_transport_cb) {
+ vmci_vsock_cb_host_called = false;
+ goto out;
+ }
+
+ if (vmci_guest_code_active())
+ vmci_vsock_transport_cb(false);
+
+ if (vmci_host_users() > 0) {
+ vmci_vsock_cb_host_called = true;
+ vmci_vsock_transport_cb(true);
+ }
+
+out:
+ mutex_unlock(&vmci_vsock_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmci_register_vsock_callback);
+
+void vmci_call_vsock_callback(bool is_host)
+{
+ mutex_lock(&vmci_vsock_mutex);
+
+ if (!vmci_vsock_transport_cb)
+ goto out;
+
+ /* In the host, this function could be called multiple times,
+ * but we want to register it only once.
+ */
+ if (is_host) {
+ if (vmci_vsock_cb_host_called)
+ goto out;
+
+ vmci_vsock_cb_host_called = true;
+ }
+
+ vmci_vsock_transport_cb(is_host);
+out:
+ mutex_unlock(&vmci_vsock_mutex);
+}
+
static int __init vmci_drv_init(void)
{
int vmci_err;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index aab81b67670c..990682480bf6 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -36,10 +36,12 @@ extern struct pci_dev *vmci_pdev;
u32 vmci_get_context_id(void);
int vmci_send_datagram(struct vmci_datagram *dg);
+void vmci_call_vsock_callback(bool is_host);
int vmci_host_init(void);
void vmci_host_exit(void);
bool vmci_host_code_active(void);
+int vmci_host_users(void);
int vmci_guest_init(void);
void vmci_guest_exit(void);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 7a84a48c75da..cc8eeb361fcd 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -637,6 +637,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
pci_set_drvdata(pdev, vmci_dev);
+
+ vmci_call_vsock_callback(false);
return 0;
err_free_irq:
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 833e2bd248a5..ff3c396146ff 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -108,6 +108,11 @@ bool vmci_host_code_active(void)
atomic_read(&vmci_host_active_users) > 0);
}
+int vmci_host_users(void)
+{
+ return atomic_read(&vmci_host_active_users);
+}
+
/*
* Called on open of /dev/vmci.
*/
@@ -338,6 +343,8 @@ static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
atomic_inc(&vmci_host_active_users);
+ vmci_call_vsock_callback(true);
+
retval = 0;
out:
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2c71a434c915..95b41c0891d0 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -408,38 +408,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
- u32 retries_max)
-{
- int err;
- u32 retry_count = 0;
-
- if (!status || !retries_max)
- return -EINVAL;
-
- do {
- err = __mmc_send_status(card, status, 5);
- if (err)
- break;
-
- if (!R1_STATUS(*status) &&
- (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
- break; /* RPMB programming operation complete */
-
- /*
- * Rechedule to give the MMC device a chance to continue
- * processing the previous command without being polled too
- * frequently.
- */
- usleep_range(1000, 5000);
- } while (++retry_count < retries_max);
-
- if (retry_count == retries_max)
- err = -EPERM;
-
- return err;
-}
-
static int ioctl_do_sanitize(struct mmc_card *card)
{
int err;
@@ -468,6 +436,58 @@ out:
return err;
}
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ u32 *resp_errs)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ err = __mmc_send_status(card, &status, 5);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "error %d requesting status\n", err);
+ return err;
+ }
+
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (done) {
+ dev_err(mmc_dev(card->host),
+ "Card stuck in wrong state! %s status: %#x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!mmc_blk_in_tran_state(status));
+
+ return err;
+}
+
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
@@ -477,7 +497,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct scatterlist sg;
int err;
unsigned int target_part;
- u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
@@ -611,16 +630,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (idata->rpmb) {
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
/*
- * Ensure RPMB command has completed by polling CMD13
+ * Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
*/
- err = ioctl_rpmb_card_status_poll(card, &status, 5);
- if (err)
- dev_err(mmc_dev(card->host),
- "%s: Card Status=0x%08X, error %d\n",
- __func__, status, err);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
}
return err;
@@ -970,58 +985,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
return ms;
}
-static inline bool mmc_blk_in_tran_state(u32 status)
-{
- /*
- * Some cards mishandle the status bits, so make sure to check both the
- * busy indication and the card state.
- */
- return status & R1_READY_FOR_DATA &&
- (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
-}
-
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, u32 *resp_errs)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- int err = 0;
- u32 status;
-
- do {
- bool done = time_after(jiffies, timeout);
-
- err = __mmc_send_status(card, &status, 5);
- if (err) {
- pr_err("%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- return err;
- }
-
- /* Accumulate any response error bits seen */
- if (resp_errs)
- *resp_errs |= status;
-
- /*
- * Timeout if the device never becomes ready for data and never
- * leaves the program state.
- */
- if (done) {
- pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
- mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__, status);
- return -ETIMEDOUT;
- }
-
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!mmc_blk_in_tran_state(status));
-
- return err;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1671,7 +1634,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
mmc_blk_send_stop(card, timeout);
- err = card_busy_detect(card, timeout, req, NULL);
+ err = card_busy_detect(card, timeout, NULL);
mmc_retune_release(card->host);
@@ -1895,7 +1858,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
return 0;
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
/*
* Do not assume data transferred correctly if there are any error bits
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 221127324709..abf8f5eb0a1c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1469,8 +1469,7 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
-static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
- bool cd_irq)
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
{
/*
* If the device is configured as wakeup, we prevent a new sleep for
@@ -2129,7 +2128,7 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->hw_reset(host);
mmc_bus_put(host);
- if (ret)
+ if (ret < 0)
pr_warn("%s: tried to HW reset card, got error %d\n",
mmc_hostname(host), ret);
@@ -2297,11 +2296,8 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_get(host);
- /*
- * if there is a _removable_ card registered, check whether it is
- * still present
- */
- if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
+ /* Verify a registered card to be functional, else remove it. */
+ if (host->bus_ops && !host->bus_dead)
host->bus_ops->detect(host);
host->detect_change = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 328c78dbee66..575ac0257af2 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -70,6 +70,8 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq);
int _mmc_detect_card_removed(struct mmc_host *host);
int mmc_detect_card_removed(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c8804895595f..f6912ded652d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
-static void mmc_part_add(struct mmc_card *card, unsigned int size,
+static void mmc_part_add(struct mmc_card *card, u64 size,
unsigned int part_cfg, char *name, int idx, bool ro,
int area_type)
{
@@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
u8 hc_erase_grp_sz, hc_wp_grp_sz;
- unsigned int part_size;
+ u64 part_size;
/*
* General purpose partition feature support --
@@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
<< 8) +
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
- part_size *= (size_t)(hc_erase_grp_sz *
- hc_wp_grp_sz);
+ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
"gp%d", idx, false,
@@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
- unsigned int part_size;
+ u64 part_size;
struct device_node *np;
bool broken_hpi = false;
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 2d2d9ea8be4f..3dba15bccce2 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
END_FIXUP
};
+
static const struct mmc_fixup sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 26cabd53ddc5..ebb387aa5158 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1048,9 +1048,35 @@ static int mmc_sdio_runtime_resume(struct mmc_host *host)
return ret;
}
+/*
+ * SDIO HW reset
+ *
+ * Returns 0 if the HW reset was executed synchronously, returns 1 if the HW
+ * reset was asynchronously scheduled, else a negative error code.
+ */
static int mmc_sdio_hw_reset(struct mmc_host *host)
{
- mmc_power_cycle(host, host->card->ocr);
+ struct mmc_card *card = host->card;
+
+ /*
+ * In case the card is shared among multiple func drivers, reset the
+ * card through a rescan work. In this way it will be removed and
+ * re-detected, thus all func drivers becomes informed about it.
+ */
+ if (atomic_read(&card->sdio_funcs_probed) > 1) {
+ if (mmc_card_removed(card))
+ return 1;
+ host->rescan_entered = 0;
+ mmc_card_set_removed(card);
+ _mmc_detect_change(host, 0, false);
+ return 1;
+ }
+
+ /*
+ * A single func driver has been probed, then let's skip the heavy
+ * hotplug dance above and execute the reset immediately.
+ */
+ mmc_power_cycle(host, card->ocr);
return mmc_sdio_reinit_card(host);
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 2963e6542958..3cc928282af7 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -138,6 +138,8 @@ static int sdio_bus_probe(struct device *dev)
if (ret)
return ret;
+ atomic_inc(&func->card->sdio_funcs_probed);
+
/* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -153,7 +155,10 @@ static int sdio_bus_probe(struct device *dev)
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
- ret = sdio_set_block_size(func, 0);
+ if (mmc_card_removed(func->card))
+ ret = -ENOMEDIUM;
+ else
+ ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
goto disable_runtimepm;
@@ -165,6 +170,7 @@ static int sdio_bus_probe(struct device *dev)
return 0;
disable_runtimepm:
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
dev_pm_domain_detach(dev, false);
@@ -181,6 +187,7 @@ static int sdio_bus_remove(struct device *dev)
pm_runtime_get_sync(dev);
drv->remove(func);
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->irq_handler) {
pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 49ea02c467bf..d06b2dfe3c95 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -159,6 +159,7 @@ config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the ASPEED Secure Digital Host Controller Interface.
@@ -368,6 +369,17 @@ config MMC_SDHCI_F_SDH30
If unsure, say N.
+config MMC_SDHCI_MILBEAUT
+ tristate "SDHCI support for Socionext Milbeaut Serieas using F_SDH30"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ Needed by Milbeaut SoC for MMC / SD / SDIO support.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
@@ -1011,6 +1023,7 @@ config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's AM654 SOCs. The controller supports
@@ -1019,3 +1032,11 @@ config MMC_SDHCI_AM654
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_OWL
+ tristate "Actions Semi Owl SD/MMC Host Controller support"
+ depends on HAS_DMA
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ help
+ This selects support for the SD/MMC Host Controller on
+ Actions Semi Owl SoCs.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 11c4598e91d9..21d9089e5eda 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
+obj-$(CONFIG_MMC_SDHCI_MILBEAUT) += sdhci-milbeaut.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_SDHCI_AM654) += sdhci_am654.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -73,6 +74,7 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
obj-$(CONFIG_MMC_BCM2835) += bcm2835.o
+obj-$(CONFIG_MMC_OWL) += owl-mmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index c26fbe5f2222..581b99f9e113 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2347,8 +2347,7 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
static int atmci_configure_dma(struct atmel_mci *host)
{
- host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
- "rxtx");
+ host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
if (PTR_ERR(host->dma.chan) == -ENODEV) {
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 148414d7f0c9..99f61fd2a658 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1357,7 +1357,6 @@ static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct clk *clk;
- struct resource *iomem;
struct bcm2835_host *host;
struct mmc_host *mmc;
const __be32 *regaddr_p;
@@ -1373,8 +1372,7 @@ static int bcm2835_probe(struct platform_device *pdev)
host->pdev = pdev;
spin_lock_init(&host->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 22aded1065ae..916746c6c2c7 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -148,7 +148,6 @@ static int octeon_mmc_probe(struct platform_device *pdev)
{
struct device_node *cn, *node = pdev->dev.of_node;
struct cvm_mmc_host *host;
- struct resource *res;
void __iomem *base;
int mmc_irq[9];
int i, ret = 0;
@@ -205,23 +204,13 @@ static int octeon_mmc_probe(struct platform_device *pdev)
host->last_slot = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[0] is missing\n");
- return -ENXIO;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
host->base = (void __iomem *)base;
host->reg_off = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[1] is missing\n");
- return -EINVAL;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
host->dma_base = (void __iomem *)base;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 79c55c7b4afd..bf0048ebbda3 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3441,8 +3441,8 @@ int dw_mci_runtime_resume(struct device *dev)
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index f816c06ef916..78383f60a3dc 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -41,6 +41,7 @@
#define JZ_REG_MMC_RESP_FIFO 0x34
#define JZ_REG_MMC_RXFIFO 0x38
#define JZ_REG_MMC_TXFIFO 0x3C
+#define JZ_REG_MMC_LPM 0x40
#define JZ_REG_MMC_DMAC 0x44
#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
@@ -77,6 +78,8 @@
#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
+#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
+#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
#define JZ_MMC_CMDAT_DMA_EN BIT(8)
#define JZ_MMC_CMDAT_INIT BIT(7)
#define JZ_MMC_CMDAT_BUSY BIT(6)
@@ -98,12 +101,20 @@
#define JZ_MMC_DMAC_DMA_SEL BIT(1)
#define JZ_MMC_DMAC_DMA_EN BIT(0)
+#define JZ_MMC_LPM_DRV_RISING BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
+#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
+#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
+
#define JZ_MMC_CLK_RATE 24000000
enum jz4740_mmc_version {
JZ_MMC_JZ4740,
JZ_MMC_JZ4725B,
+ JZ_MMC_JZ4760,
JZ_MMC_JZ4780,
+ JZ_MMC_X1000,
};
enum jz4740_mmc_state {
@@ -852,6 +863,22 @@ static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
}
writew(div, host->base + JZ_REG_MMC_CLKRT);
+
+ if (real_rate > 25000000) {
+ if (host->version >= JZ_MMC_X1000) {
+ writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
+ JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4760) {
+ writel(JZ_MMC_LPM_DRV_RISING |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4725B)
+ writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ }
+
return real_rate;
}
@@ -895,11 +922,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
break;
case MMC_BUS_WIDTH_4:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
+ host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
+ break;
default:
break;
}
@@ -924,7 +956,9 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
+ { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
+ { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
@@ -1025,11 +1059,12 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
goto err_release_dma;
}
- dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+ dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
host->use_dma ? "DMA" : "PIO",
- (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+ (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
+ ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
return 0;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 66e354d51ee9..74c6cfbf9172 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1421,7 +1421,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c37e70dbe250..40e72c30ea84 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -44,6 +44,7 @@
#define DRIVER_NAME "mmci-pl18x"
static void mmci_variant_init(struct mmci_host *host);
+static void ux500_variant_init(struct mmci_host *host);
static void ux500v2_variant_init(struct mmci_host *host);
static unsigned int fmax = 515633;
@@ -184,7 +185,7 @@ static struct variant_data variant_ux500 = {
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
- .init = mmci_variant_init,
+ .init = ux500_variant_init,
};
static struct variant_data variant_ux500v2 = {
@@ -261,6 +262,10 @@ static struct variant_data variant_stm32_sdmmc = {
.datalength_bits = 25,
.datactrl_blocksz = 14,
.stm32_idmabsize_mask = GENMASK(12, 5),
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
@@ -419,7 +424,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
-void mmci_dma_release(struct mmci_host *host)
+static void mmci_dma_release(struct mmci_host *host)
{
if (host->ops && host->ops->dma_release)
host->ops->dma_release(host);
@@ -427,7 +432,7 @@ void mmci_dma_release(struct mmci_host *host)
host->use_dma = false;
}
-void mmci_dma_setup(struct mmci_host *host)
+static void mmci_dma_setup(struct mmci_host *host)
{
if (!host->ops || !host->ops->dma_setup)
return;
@@ -462,7 +467,7 @@ static int mmci_validate_data(struct mmci_host *host,
return 0;
}
-int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
{
int err;
@@ -478,7 +483,7 @@ int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
return err;
}
-void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
int err)
{
if (host->ops && host->ops->unprep_data)
@@ -487,7 +492,7 @@ void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
data->host_cookie = 0;
}
-void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
@@ -495,7 +500,7 @@ void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
host->ops->get_next_data(host, data);
}
-int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
{
struct mmc_data *data = host->data;
int ret;
@@ -530,7 +535,7 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
return 0;
}
-void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!host->use_dma)
return;
@@ -539,7 +544,7 @@ void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
host->ops->dma_finalize(host, data);
}
-void mmci_dma_error(struct mmci_host *host)
+static void mmci_dma_error(struct mmci_host *host)
{
if (!host->use_dma)
return;
@@ -610,6 +615,67 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
+static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+
+ /*
+ * Before unmasking for the busy end IRQ, confirm that the
+ * command was sent successfully. To keep track of having a
+ * command in-progress, waiting for busy signaling to end,
+ * store the status in host->busy_status.
+ *
+ * Note that, the card may need a couple of clock cycles before
+ * it starts signaling busy on DAT0, hence re-read the
+ * MMCISTATUS register here, to allow the busy bit to be set.
+ * Potentially we may even need to poll the register for a
+ * while, to allow it to be set, but tests indicates that it
+ * isn't needed.
+ */
+ if (!host->busy_status && !(status & err_msk) &&
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent, then bail out if busy status is set and wait for the
+ * busy end IRQ.
+ *
+ * Note that, the HW triggers an IRQ on both edges while
+ * monitoring DAT0 for busy completion, but there is only one
+ * status bit in MMCISTATUS for the busy state. Therefore
+ * both the start and the end interrupts needs to be cleared,
+ * one after the other. So, clear the busy start IRQ here.
+ */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag)) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent and the busy bit isn't set, it means we have received
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
+ * process the command.
+ */
+ if (host->busy_status) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -948,14 +1014,21 @@ static struct mmci_host_ops mmci_variant_ops = {
};
#endif
-void mmci_variant_init(struct mmci_host *host)
+static void mmci_variant_init(struct mmci_host *host)
+{
+ host->ops = &mmci_variant_ops;
+}
+
+static void ux500_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
}
-void ux500v2_variant_init(struct mmci_host *host)
+static void ux500v2_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
}
@@ -1075,6 +1148,7 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
@@ -1097,6 +1171,16 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
else
c |= host->variant->cmdreg_srsp;
}
+
+ if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
+ if (!cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
+ do_div(clks, MSEC_PER_SEC);
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
+ }
+
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -1201,6 +1285,7 @@ static void
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
void __iomem *base = host->base;
bool sbc, busy_resp;
@@ -1215,74 +1300,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
* handling. Note that we tag on any latent IRQs postponed
* due to waiting for busy status.
*/
- if (!((status|host->busy_status) &
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
+ if (host->variant->busy_timeout && busy_resp)
+ err_msk |= MCI_DATATIMEOUT;
+
+ if (!((status | host->busy_status) &
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
return;
/* Handle busy detection on DAT0 if the variant supports it. */
- if (busy_resp && host->variant->busy_detect) {
-
- /*
- * Before unmasking for the busy end IRQ, confirm that the
- * command was sent successfully. To keep track of having a
- * command in-progress, waiting for busy signaling to end,
- * store the status in host->busy_status.
- *
- * Note that, the card may need a couple of clock cycles before
- * it starts signaling busy on DAT0, hence re-read the
- * MMCISTATUS register here, to allow the busy bit to be set.
- * Potentially we may even need to poll the register for a
- * while, to allow it to be set, but tests indicates that it
- * isn't needed.
- */
- if (!host->busy_status &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
-
- host->busy_status =
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
- return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent, then bail out if busy status is set and wait for the
- * busy end IRQ.
- *
- * Note that, the HW triggers an IRQ on both edges while
- * monitoring DAT0 for busy completion, but there is only one
- * status bit in MMCISTATUS for the busy state. Therefore
- * both the start and the end interrupts needs to be cleared,
- * one after the other. So, clear the busy start IRQ here.
- */
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag)) {
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
+ if (busy_resp && host->variant->busy_detect)
+ if (!host->ops->busy_complete(host, status, err_msk))
return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent and the busy bit isn't set, it means we have received
- * the busy end IRQ. Clear and mask the IRQ, then continue to
- * process the command.
- */
- if (host->busy_status) {
-
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
-
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask,
- base + MMCIMASK0);
- host->busy_status = 0;
- }
- }
host->cmd = NULL;
@@ -1290,6 +1318,9 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
+ } else if (host->variant->busy_timeout && busy_resp &&
+ status & MCI_DATATIMEOUT) {
+ cmd->error = -ETIMEDOUT;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1583,6 +1614,20 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ u32 max_busy_timeout = 0;
+
+ if (!host->variant->busy_detect)
+ return;
+
+ if (host->variant->busy_timeout && mmc->actual_clock)
+ max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
+
+ mmc->max_busy_timeout = max_busy_timeout;
+}
+
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1687,6 +1732,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
mmci_set_clkreg(host, ios->clock);
+ mmci_set_max_busy_timeout(mmc);
+
if (host->ops && host->ops->set_pwrreg)
host->ops->set_pwrreg(host, pwr);
else
@@ -1957,7 +2004,6 @@ static int mmci_probe(struct amba_device *dev,
mmci_write_datactrlreg(host,
host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- mmc->max_busy_timeout = 0;
}
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 833236ecb31e..158e1231aa23 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -164,6 +164,7 @@
#define MCI_ST_CARDBUSY (1 << 24)
/* Extended status bits for the STM32 variants */
#define MCI_STM32_BUSYD0 BIT(20)
+#define MCI_STM32_BUSYD0END BIT(21)
#define MMCICLEAR 0x038
#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -287,6 +288,8 @@ struct mmci_host;
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
* @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_timeout: true if the variant starts data timer when the DPSM
+ * enter in Wait_R or Busy state.
* @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
* @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
* indicating that the card is busy
@@ -333,6 +336,7 @@ struct variant_data {
u8 signal_direction:1;
u8 pwrreg_clkgate:1;
u8 busy_detect:1;
+ u8 busy_timeout:1;
u32 busy_dpsm_flag;
u32 busy_detect_flag;
u32 busy_detect_mask;
@@ -366,6 +370,7 @@ struct mmci_host_ops {
void (*dma_error)(struct mmci_host *host);
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
+ bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
};
struct mmci_host {
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 8e83ae6920ae..a4f7e8e689d3 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -25,8 +25,8 @@ struct sdmmc_priv {
void *sg_cpu;
};
-int sdmmc_idma_validate_data(struct mmci_host *host,
- struct mmc_data *data)
+static int sdmmc_idma_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
{
struct scatterlist *sg;
int i;
@@ -282,6 +282,47 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
return datactrl;
}
+static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ u32 busy_d0, busy_d0end, mask, sdmmc_status;
+
+ mask = readl_relaxed(base + MMCIMASK0);
+ sdmmc_status = readl_relaxed(base + MMCISTATUS);
+ busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END;
+ busy_d0 = sdmmc_status & MCI_STM32_BUSYD0;
+
+ /* complete if there is an error or busy_d0end */
+ if ((status & err_msk) || busy_d0end)
+ goto complete;
+
+ /*
+ * On response the busy signaling is reflected in the BUSYD0 flag.
+ * if busy_d0 is in-progress we must activate busyd0end interrupt
+ * to wait this completion. Else this request has no busy step.
+ */
+ if (busy_d0) {
+ if (!host->busy_status) {
+ writel_relaxed(mask | host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_status = status &
+ (MCI_CMDSENT | MCI_CMDRESPEND);
+ }
+ return false;
+ }
+
+complete:
+ if (host->busy_status) {
+ writel_relaxed(mask & ~host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ writel_relaxed(host->variant->busy_detect_mask,
+ base + MMCICLEAR);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
static struct mmci_host_ops sdmmc_variant_ops = {
.validate_data = sdmmc_idma_validate_data,
.prep_data = sdmmc_idma_prep_data,
@@ -292,6 +333,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_finalize = sdmmc_idma_finalize,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
+ .busy_complete = sdmmc_busy_complete,
};
void sdmmc_variant_init(struct mmci_host *host)
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index a0670e9cd012..fc6b9cf27d0b 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -608,8 +608,8 @@ static int moxart_probe(struct platform_device *pdev)
host->timeout = msecs_to_jiffies(1000);
host->sysclk = clk_get_rate(clk);
host->fifo_width = readl(host->base + REG_FEATURE) << 2;
- host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx");
- host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx");
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 952fa4063ff8..767e964ca5a2 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1510,8 +1510,35 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- if (mmc_pdata(host)->init_card)
- mmc_pdata(host)->init_card(card);
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ struct device_node *np = mmc_dev(mmc)->of_node;
+
+ /*
+ * REVISIT: should be moved to sdio core and made more
+ * general e.g. by expanding the DT bindings of child nodes
+ * to provide a mechanism to provide this information:
+ * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ */
+
+ np = of_get_compatible_child(np, "ti,wl1251");
+ if (np) {
+ /*
+ * We have TI wl1251 attached to MMC3. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(host->dev, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+ card->ocr = 0x80;
+ of_node_put(np);
+ }
+ }
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
new file mode 100644
index 000000000000..771e3d00f1bb
--- /dev/null
+++ b/drivers/mmc/host/owl-mmc.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semi Owl SoCs SD/MMC driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: SDIO support
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+/*
+ * SDC registers
+ */
+#define OWL_REG_SD_EN 0x0000
+#define OWL_REG_SD_CTL 0x0004
+#define OWL_REG_SD_STATE 0x0008
+#define OWL_REG_SD_CMD 0x000c
+#define OWL_REG_SD_ARG 0x0010
+#define OWL_REG_SD_RSPBUF0 0x0014
+#define OWL_REG_SD_RSPBUF1 0x0018
+#define OWL_REG_SD_RSPBUF2 0x001c
+#define OWL_REG_SD_RSPBUF3 0x0020
+#define OWL_REG_SD_RSPBUF4 0x0024
+#define OWL_REG_SD_DAT 0x0028
+#define OWL_REG_SD_BLK_SIZE 0x002c
+#define OWL_REG_SD_BLK_NUM 0x0030
+#define OWL_REG_SD_BUF_SIZE 0x0034
+
+/* SD_EN Bits */
+#define OWL_SD_EN_RANE BIT(31)
+#define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
+#define OWL_SD_EN_S18EN BIT(12)
+#define OWL_SD_EN_RESE BIT(10)
+#define OWL_SD_EN_DAT1_S BIT(9)
+#define OWL_SD_EN_CLK_S BIT(8)
+#define OWL_SD_ENABLE BIT(7)
+#define OWL_SD_EN_BSEL BIT(6)
+#define OWL_SD_EN_SDIOEN BIT(3)
+#define OWL_SD_EN_DDREN BIT(2)
+#define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
+
+/* SD_CTL Bits */
+#define OWL_SD_CTL_TOUTEN BIT(31)
+#define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
+#define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
+#define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
+#define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
+#define OWL_SD_CTL_CMDLEN BIT(13)
+#define OWL_SD_CTL_SCC BIT(12)
+#define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
+#define OWL_SD_CTL_TS BIT(7)
+#define OWL_SD_CTL_LBE BIT(6)
+#define OWL_SD_CTL_C7EN BIT(5)
+#define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
+
+#define OWL_SD_DELAY_LOW_CLK 0x0f
+#define OWL_SD_DELAY_MID_CLK 0x0a
+#define OWL_SD_DELAY_HIGH_CLK 0x09
+#define OWL_SD_RDELAY_DDR50 0x0a
+#define OWL_SD_WDELAY_DDR50 0x08
+
+/* SD_STATE Bits */
+#define OWL_SD_STATE_DAT1BS BIT(18)
+#define OWL_SD_STATE_SDIOB_P BIT(17)
+#define OWL_SD_STATE_SDIOB_EN BIT(16)
+#define OWL_SD_STATE_TOUTE BIT(15)
+#define OWL_SD_STATE_BAEP BIT(14)
+#define OWL_SD_STATE_MEMRDY BIT(12)
+#define OWL_SD_STATE_CMDS BIT(11)
+#define OWL_SD_STATE_DAT1AS BIT(10)
+#define OWL_SD_STATE_SDIOA_P BIT(9)
+#define OWL_SD_STATE_SDIOA_EN BIT(8)
+#define OWL_SD_STATE_DAT0S BIT(7)
+#define OWL_SD_STATE_TEIE BIT(6)
+#define OWL_SD_STATE_TEI BIT(5)
+#define OWL_SD_STATE_CLNR BIT(4)
+#define OWL_SD_STATE_CLC BIT(3)
+#define OWL_SD_STATE_WC16ER BIT(2)
+#define OWL_SD_STATE_RC16ER BIT(1)
+#define OWL_SD_STATE_CRC7ER BIT(0)
+
+struct owl_mmc_host {
+ struct device *dev;
+ struct reset_control *reset;
+ void __iomem *base;
+ struct clk *clk;
+ struct completion sdc_complete;
+ spinlock_t lock;
+ int irq;
+ u32 clock;
+ bool ddr_50;
+
+ enum dma_data_direction dma_dir;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config dma_cfg;
+ struct completion dma_complete;
+
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
+{
+ unsigned int regval;
+
+ regval = readl(reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(regval, reg);
+}
+
+static irqreturn_t owl_irq_handler(int irq, void *devid)
+{
+ struct owl_mmc_host *owl_host = devid;
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&owl_host->lock, flags);
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (state & OWL_SD_STATE_TEI) {
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ state |= OWL_SD_STATE_TEI;
+ writel(state, owl_host->base + OWL_REG_SD_STATE);
+ complete(&owl_host->sdc_complete);
+ }
+
+ spin_unlock_irqrestore(&owl_host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
+{
+ struct mmc_request *mrq = owl_host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ /* Should never be NULL */
+ WARN_ON(!mrq);
+
+ owl_host->mrq = NULL;
+
+ if (data)
+ dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
+ owl_host->dma_dir);
+
+ /* Finally finish request */
+ mmc_request_done(owl_host->mmc, mrq);
+}
+
+static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
+ struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ u32 mode, state, resp[2];
+ u32 cmd_rsp_mask = 0;
+
+ init_completion(&owl_host->sdc_complete);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ mode = OWL_SD_CTL_TM(0);
+ break;
+
+ case MMC_RSP_R1:
+ if (data) {
+ if (data->flags & MMC_DATA_READ)
+ mode = OWL_SD_CTL_TM(4);
+ else
+ mode = OWL_SD_CTL_TM(5);
+ } else {
+ mode = OWL_SD_CTL_TM(1);
+ }
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+
+ break;
+
+ case MMC_RSP_R1B:
+ mode = OWL_SD_CTL_TM(3);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R2:
+ mode = OWL_SD_CTL_TM(2);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R3:
+ mode = OWL_SD_CTL_TM(1);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR;
+ break;
+
+ default:
+ dev_warn(owl_host->dev, "Unknown MMC command\n");
+ cmd->error = -EINVAL;
+ return;
+ }
+
+ /* Keep current WDELAY and RDELAY */
+ mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+
+ /* Start to send corresponding command type */
+ writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
+ writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
+
+ /* Set LBE to send clk at the end of last read block */
+ if (data) {
+ mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
+ } else {
+ mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
+ mode |= OWL_SD_CTL_TS;
+ }
+
+ owl_host->cmd = cmd;
+
+ /* Start transfer */
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (data)
+ return;
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, 30 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ cmd->error = -ETIMEDOUT;
+ return;
+ }
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (cmd_rsp_mask & state) {
+ if (state & OWL_SD_STATE_CLNR) {
+ dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+
+ if (state & OWL_SD_STATE_CRC7ER) {
+ dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+ }
+
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
+ cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
+ } else {
+ resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
+ cmd->resp[1] = resp[1] >> 8;
+ }
+ }
+}
+
+static void owl_mmc_dma_complete(void *param)
+{
+ struct owl_mmc_host *owl_host = param;
+ struct mmc_data *data = owl_host->data;
+
+ if (data)
+ complete(&owl_host->dma_complete);
+}
+
+static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
+ struct mmc_data *data)
+{
+ u32 total;
+
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
+ true);
+ writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
+ writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
+ total = data->blksz * data->blocks;
+
+ if (total < 512)
+ writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
+ else
+ writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
+
+ if (data->flags & MMC_DATA_WRITE) {
+ owl_host->dma_dir = DMA_TO_DEVICE;
+ owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
+ } else {
+ owl_host->dma_dir = DMA_FROM_DEVICE;
+ owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
+ }
+
+ dma_map_sg(owl_host->dma->device->dev, data->sg,
+ data->sg_len, owl_host->dma_dir);
+
+ dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
+ owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
+ data->sg_len,
+ owl_host->dma_cfg.direction,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!owl_host->desc) {
+ dev_err(owl_host->dev, "Can't prepare slave sg\n");
+ return -EBUSY;
+ }
+
+ owl_host->data = data;
+
+ owl_host->desc->callback = owl_mmc_dma_complete;
+ owl_host->desc->callback_param = (void *)owl_host;
+ data->error = 0;
+
+ return 0;
+}
+
+static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ int ret;
+
+ owl_host->mrq = mrq;
+ if (mrq->data) {
+ ret = owl_mmc_prepare_data(owl_host, data);
+ if (ret < 0) {
+ data->error = ret;
+ goto err_out;
+ }
+
+ init_completion(&owl_host->dma_complete);
+ dmaengine_submit(owl_host->desc);
+ dma_async_issue_pending(owl_host->dma);
+ }
+
+ owl_mmc_send_cmd(owl_host, mrq->cmd, data);
+
+ if (data) {
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete,
+ 10 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (!wait_for_completion_timeout(&owl_host->dma_complete,
+ 5 * HZ)) {
+ dev_err(owl_host->dev, "DMA interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (data->stop)
+ owl_mmc_send_cmd(owl_host, data->stop, NULL);
+
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+err_out:
+ owl_mmc_finish_request(owl_host);
+}
+
+static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
+ unsigned int rate)
+{
+ unsigned long clk_rate;
+ int ret;
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_CTL);
+ reg &= ~OWL_SD_CTL_DELAY_MSK;
+
+ /* Set RDELAY and WDELAY based on the clock */
+ if (rate <= 1000000) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 1000000) && (rate <= 26000000)) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ /* DDR50 mode has special delay chain */
+ } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
+ OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else {
+ dev_err(owl_host->dev, "SD clock rate not supported\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_round_rate(owl_host->clk, rate << 1);
+ ret = clk_set_rate(owl_host->clk, clk_rate);
+
+ return ret;
+}
+
+static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
+{
+ if (!ios->clock)
+ return;
+
+ owl_host->clock = ios->clock;
+ owl_mmc_set_clk_rate(owl_host, ios->clock);
+}
+
+static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_EN);
+ reg &= ~0x03;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ break;
+ case MMC_BUS_WIDTH_4:
+ reg |= OWL_SD_EN_DATAWID(1);
+ break;
+ case MMC_BUS_WIDTH_8:
+ reg |= OWL_SD_EN_DATAWID(2);
+ break;
+ }
+
+ writel(reg, owl_host->base + OWL_REG_SD_EN);
+}
+
+static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
+{
+ reset_control_assert(owl_host->reset);
+ udelay(20);
+ reset_control_deassert(owl_host->reset);
+}
+
+static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
+{
+ u32 mode;
+
+ init_completion(&owl_host->sdc_complete);
+
+ /* Enable transfer end IRQ */
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
+ OWL_SD_STATE_TEIE, true);
+
+ /* Send init clk */
+ mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+ mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ return;
+ }
+}
+
+static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ dev_dbg(owl_host->dev, "Powering card up\n");
+
+ /* Reset the SDC controller to clear all previous states */
+ owl_mmc_ctr_reset(owl_host);
+ clk_prepare_enable(owl_host->clk);
+ writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
+ owl_host->base + OWL_REG_SD_EN);
+
+ break;
+
+ case MMC_POWER_ON:
+ dev_dbg(owl_host->dev, "Powering card on\n");
+ owl_mmc_power_on(owl_host);
+
+ break;
+
+ case MMC_POWER_OFF:
+ dev_dbg(owl_host->dev, "Powering card off\n");
+ clk_disable_unprepare(owl_host->clk);
+
+ return;
+
+ default:
+ dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
+ break;
+ }
+
+ if (ios->clock != owl_host->clock)
+ owl_mmc_set_clk(owl_host, ios);
+
+ owl_mmc_set_bus_width(owl_host, ios);
+
+ /* Enable DDR mode if requested */
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ owl_host->ddr_50 = 1;
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_DDREN, true);
+ } else {
+ owl_host->ddr_50 = 0;
+ }
+}
+
+static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ /* It is enough to change the pad ctrl bit for voltage switch */
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, false);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, true);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct mmc_host_ops owl_mmc_ops = {
+ .request = owl_mmc_request,
+ .set_ios = owl_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
+};
+
+static int owl_mmc_probe(struct platform_device *pdev)
+{
+ struct owl_mmc_host *owl_host;
+ struct mmc_host *mmc;
+ struct resource *res;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "mmc alloc host failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, mmc);
+
+ owl_host = mmc_priv(mmc);
+ owl_host->dev = &pdev->dev;
+ owl_host->mmc = mmc;
+ spin_lock_init(&owl_host->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ owl_host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(owl_host->base)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(owl_host->base);
+ goto err_free_host;
+ }
+
+ owl_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->clk)) {
+ dev_err(&pdev->dev, "No clock defined\n");
+ ret = PTR_ERR(owl_host->clk);
+ goto err_free_host;
+ }
+
+ owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->reset)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+ ret = PTR_ERR(owl_host->reset);
+ goto err_free_host;
+ }
+
+ mmc->ops = &owl_mmc_ops;
+ mmc->max_blk_count = 512;
+ mmc->max_blk_size = 512;
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 262144;
+ mmc->max_req_size = 262144;
+ /* 100kHz ~ 52MHz */
+ mmc->f_min = 100000;
+ mmc->f_max = 52000000;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_4_BIT_DATA;
+ mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_165_195;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_free_host;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
+ if (!owl_host->dma) {
+ dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
+ ret = -ENXIO;
+ goto err_free_host;
+ }
+
+ dev_info(&pdev->dev, "Using %s for DMA transfers\n",
+ dma_chan_name(owl_host->dma));
+
+ owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.device_fc = false;
+
+ owl_host->irq = platform_get_irq(pdev, 0);
+ if (owl_host->irq < 0) {
+ ret = -EINVAL;
+ goto err_free_host;
+ }
+
+ ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
+ 0, dev_name(&pdev->dev), owl_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n",
+ owl_host->irq);
+ goto err_free_host;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add host\n");
+ goto err_free_host;
+ }
+
+ dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
+
+ return 0;
+
+err_free_host:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int owl_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ disable_irq(owl_host->irq);
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static const struct of_device_id owl_mmc_of_match[] = {
+ {.compatible = "actions,owl-mmc",},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
+
+static struct platform_driver owl_mmc_driver = {
+ .driver = {
+ .name = "owl_mmc",
+ .of_match_table = of_match_ptr(owl_mmc_of_match),
+ },
+ .probe = owl_mmc_probe,
+ .remove = owl_mmc_remove,
+};
+module_platform_driver(owl_mmc_driver);
+
+MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
+MODULE_AUTHOR("Actions Semi");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index a66f8d6d61d1..18839a10594c 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -308,6 +308,7 @@ static const struct soc_device_attribute soc_whitelist[] = {
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
/* generic ones */
{ .soc_id = "r8a774a1" },
+ { .soc_id = "r8a774b1" },
{ .soc_id = "r8a774c0" },
{ .soc_id = "r8a77470" },
{ .soc_id = "r8a7795" },
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 1604f512c7bd..105e73d4a3b9 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -61,7 +61,7 @@ struct sdhci_acpi_slot {
mmc_pm_flag_t pm_caps;
unsigned int flags;
size_t priv_size;
- int (*probe_slot)(struct platform_device *, const char *, const char *);
+ int (*probe_slot)(struct platform_device *, struct acpi_device *);
int (*remove_slot)(struct platform_device *);
int (*free_slot)(struct platform_device *pdev);
int (*setup_host)(struct platform_device *pdev);
@@ -325,12 +325,10 @@ static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device,
* wifi card in the expected slot with an ACPI companion node, is used to
* indicate that acpi_device_fix_up_power() should be avoided.
*/
-static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
- const char *uid)
+static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
{
return sdhci_acpi_cht() &&
- !strcmp(hid, "80860F14") &&
- !strcmp(uid, "2") &&
+ acpi_dev_hid_uid_match(adev, "80860F14", "2") &&
sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28);
}
@@ -345,8 +343,7 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
- const char *uid)
+static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
{
return false;
}
@@ -375,19 +372,18 @@ out:
return ret;
}
-static int intel_probe_slot(struct platform_device *pdev, const char *hid,
- const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct intel_host *intel_host = sdhci_acpi_priv(c);
struct sdhci_host *host = c->host;
- if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
+ if (acpi_dev_hid_uid_match(adev, "80860F14", "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- if (hid && !strcmp(hid, "80865ACA"))
+ if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL))
host->mmc_host_ops.get_cd = bxt_get_cd;
intel_dsm_init(intel_host, &pdev->dev, host->mmc);
@@ -473,8 +469,7 @@ static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr)
return IRQ_HANDLED;
}
-static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
- const char *uid)
+static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
@@ -482,7 +477,7 @@ static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
*irq = -EINVAL;
- if (strcmp(hid, "QCOM8051"))
+ if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
*irq = platform_get_irq(pdev, 1);
@@ -501,14 +496,12 @@ static int qcom_free_slot(struct platform_device *pdev)
struct sdhci_host *host = c->host;
struct acpi_device *adev;
int *irq = sdhci_acpi_priv(c);
- const char *hid;
adev = ACPI_COMPANION(dev);
if (!adev)
return -ENODEV;
- hid = acpi_device_hid(adev);
- if (strcmp(hid, "QCOM8051"))
+ if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
if (*irq < 0)
@@ -583,7 +576,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
};
static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+ struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
@@ -654,17 +647,12 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
- const char *uid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev)
{
const struct sdhci_acpi_uid_slot *u;
for (u = sdhci_acpi_uids; u->hid; u++) {
- if (strcmp(u->hid, hid))
- continue;
- if (!u->uid)
- return u->slot;
- if (uid && !strcmp(u->uid, uid))
+ if (acpi_dev_hid_uid_match(adev, u->hid, u->uid))
return u->slot;
}
return NULL;
@@ -680,22 +668,17 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct resource *iomem;
resource_size_t len;
size_t priv_size;
- const char *hid;
- const char *uid;
int err;
device = ACPI_COMPANION(dev);
if (!device)
return -ENODEV;
- hid = acpi_device_hid(device);
- uid = acpi_device_uid(device);
-
- slot = sdhci_acpi_get_slot(hid, uid);
+ slot = sdhci_acpi_get_slot(device);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
- if (!sdhci_acpi_no_fixup_child_power(hid, uid)) {
+ if (!sdhci_acpi_no_fixup_child_power(device)) {
list_for_each_entry(child, &device->children, node)
if (child->status.present && child->status.enabled)
acpi_device_fix_up_power(child);
@@ -745,7 +728,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (c->slot) {
if (c->slot->probe_slot) {
- err = c->slot->probe_slot(pdev, hid, uid);
+ err = c->slot->probe_slot(pdev, device);
if (err)
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 57b582bf73d9..9289bb4d633e 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -51,6 +51,11 @@
#define ESDHC_CLOCK_HCKEN 0x00000002
#define ESDHC_CLOCK_IPGEN 0x00000001
+/* System Control 2 Register */
+#define ESDHC_SYSTEM_CONTROL_2 0x3c
+#define ESDHC_SMPCLKSEL 0x00800000
+#define ESDHC_EXTN 0x00400000
+
/* Host Controller Capabilities Register 2 */
#define ESDHC_CAPABILITIES_1 0x114
@@ -59,7 +64,16 @@
#define ESDHC_HS400_WNDW_ADJUST 0x00000040
#define ESDHC_HS400_MODE 0x00000010
#define ESDHC_TB_EN 0x00000004
+#define ESDHC_TB_MODE_MASK 0x00000003
+#define ESDHC_TB_MODE_SW 0x00000003
+#define ESDHC_TB_MODE_3 0x00000002
+
+#define ESDHC_TBSTAT 0x124
+
#define ESDHC_TBPTR 0x128
+#define ESDHC_WNDW_STRT_PTR_SHIFT 8
+#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
+#define ESDHC_WNDW_END_PTR_MASK 0x7f
/* SD Clock Control Register */
#define ESDHC_SDCLKCTL 0x144
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
new file mode 100644
index 000000000000..a1aa21b9ae1c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ * Takao Orito <orito.takao@socionext.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci_f_sdh30.h"
+
+/* milbeaut bridge controller register */
+#define MLB_SOFT_RESET 0x0200
+#define MLB_SOFT_RESET_RSTX BIT(0)
+
+#define MLB_WP_CD_LED_SET 0x0210
+#define MLB_WP_CD_LED_SET_LED_INV BIT(2)
+
+#define MLB_CR_SET 0x0220
+#define MLB_CR_SET_CR_TOCLKUNIT BIT(24)
+#define MLB_CR_SET_CR_TOCLKFREQ_SFT (16)
+#define MLB_CR_SET_CR_TOCLKFREQ_MASK (0x3F << MLB_CR_SET_CR_TOCLKFREQ_SFT)
+#define MLB_CR_SET_CR_BCLKFREQ_SFT (8)
+#define MLB_CR_SET_CR_BCLKFREQ_MASK (0xFF << MLB_CR_SET_CR_BCLKFREQ_SFT)
+#define MLB_CR_SET_CR_RTUNTIMER_SFT (4)
+#define MLB_CR_SET_CR_RTUNTIMER_MASK (0xF << MLB_CR_SET_CR_RTUNTIMER_SFT)
+
+#define MLB_SD_TOCLK_I_DIV 16
+#define MLB_TOCLKFREQ_UNIT_THRES 16000000
+#define MLB_CAL_TOCLKFREQ_MHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000000)
+#define MLB_CAL_TOCLKFREQ_KHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000)
+#define MLB_TOCLKFREQ_MAX 63
+#define MLB_TOCLKFREQ_MIN 1
+
+#define MLB_SD_BCLK_I_DIV 4
+#define MLB_CAL_BCLKFREQ(rate) (rate / MLB_SD_BCLK_I_DIV / 1000000)
+#define MLB_BCLKFREQ_MAX 255
+#define MLB_BCLKFREQ_MIN 1
+
+#define MLB_CDR_SET 0x0230
+#define MLB_CDR_SET_CLK2POW16 3
+
+struct f_sdhost_priv {
+ struct clk *clk_iface;
+ struct clk *clk;
+ struct device *dev;
+ bool enable_cmd_dat_delay;
+};
+
+static void sdhci_milbeaut_soft_voltage_switch(struct sdhci_host *host)
+{
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+ ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+
+ ctrl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ usleep_range(2500, 3000);
+
+ ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
+ ctrl |= F_SDH30_CMD_CHK_DIS;
+ sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
+}
+
+static unsigned int sdhci_milbeaut_get_min_clock(struct sdhci_host *host)
+{
+ return F_SDH30_MIN_CLOCK;
+}
+
+static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u16 clk;
+ u32 ctl;
+ ktime_t timeout;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk = (clk & ~SDHCI_CLOCK_CARD_EN) | SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ sdhci_reset(host, mask);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static void sdhci_milbeaut_set_power(struct sdhci_host *host,
+ unsigned char mode, unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
+static const struct sdhci_ops sdhci_milbeaut_ops = {
+ .voltage_switch = sdhci_milbeaut_soft_voltage_switch,
+ .get_min_clock = sdhci_milbeaut_get_min_clock,
+ .reset = sdhci_milbeaut_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_milbeaut_set_power,
+};
+
+static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
+ int reset_flag)
+{
+ if (reset_flag)
+ sdhci_writel(host, 0, MLB_SOFT_RESET);
+ else
+ sdhci_writel(host, MLB_SOFT_RESET_RSTX, MLB_SOFT_RESET);
+}
+
+static void sdhci_milbeaut_bridge_init(struct sdhci_host *host,
+ int rate)
+{
+ u32 val, clk;
+
+ /* IO_SDIO_CR_SET should be set while reset */
+ val = sdhci_readl(host, MLB_CR_SET);
+ val &= ~(MLB_CR_SET_CR_TOCLKFREQ_MASK | MLB_CR_SET_CR_TOCLKUNIT |
+ MLB_CR_SET_CR_BCLKFREQ_MASK);
+ if (rate >= MLB_TOCLKFREQ_UNIT_THRES) {
+ clk = MLB_CAL_TOCLKFREQ_MHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ val |= MLB_CR_SET_CR_TOCLKUNIT |
+ (clk << MLB_CR_SET_CR_TOCLKFREQ_SFT);
+ } else {
+ clk = MLB_CAL_TOCLKFREQ_KHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_TOCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_TOCLKFREQ_SFT;
+ }
+
+ clk = MLB_CAL_BCLKFREQ(rate);
+ clk = min_t(u32, MLB_BCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_BCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_BCLKFREQ_SFT;
+ val &= ~MLB_CR_SET_CR_RTUNTIMER_MASK;
+ sdhci_writel(host, val, MLB_CR_SET);
+
+ sdhci_writel(host, MLB_CDR_SET_CLK2POW16, MLB_CDR_SET);
+
+ sdhci_writel(host, MLB_WP_CD_LED_SET_LED_INV, MLB_WP_CD_LED_SET);
+}
+
+static void sdhci_milbeaut_vendor_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
+ ctl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+
+ ctl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
+ ctl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
+ F_SDH30_AHB_INCR_4;
+ ctl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
+ sdhci_writew(host, ctl, F_SDH30_AHB_CONFIG);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static const struct of_device_id mlb_dt_ids[] = {
+ {
+ .compatible = "socionext,milbeaut-m10v-sdhci-3.0",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mlb_dt_ids);
+
+static void sdhci_milbeaut_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ int rate = clk_get_rate(priv->clk);
+ u16 ctl;
+
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ ctl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ ctl &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, ctl, SDHCI_CLOCK_CONTROL);
+
+ sdhci_milbeaut_bridge_reset(host, 1);
+
+ sdhci_milbeaut_bridge_init(host, rate);
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ sdhci_milbeaut_vendor_init(host);
+}
+
+static int sdhci_milbeaut_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ret = 0;
+ struct f_sdhost_priv *priv;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "%s: no irq specified\n", __func__);
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ priv = sdhci_priv(host);
+ priv->dev = dev;
+
+ host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET |
+ SDHCI_QUIRK_DELAY_AFTER_POWER;
+ host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+ SDHCI_QUIRK2_TUNING_WORK_AROUND |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ host->hw_name = "f_sdh30";
+ host->ops = &sdhci_milbeaut_ops;
+ host->irq = irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err;
+ }
+
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
+
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
+
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
+
+ sdhci_milbeaut_init(host);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(priv->clk);
+err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+err:
+ sdhci_free_host(host);
+ return ret;
+}
+
+static int sdhci_milbeaut_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+
+ sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ clk_disable_unprepare(priv->clk_iface);
+ clk_disable_unprepare(priv->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_milbeaut_driver = {
+ .driver = {
+ .name = "sdhci-milbeaut",
+ .of_match_table = of_match_ptr(mlb_dt_ids),
+ },
+ .probe = sdhci_milbeaut_probe,
+ .remove = sdhci_milbeaut_remove,
+};
+
+module_platform_driver(sdhci_milbeaut_driver);
+
+MODULE_DESCRIPTION("MILBEAUT SD Card Controller driver");
+MODULE_AUTHOR("Takao Orito <orito.takao@socionext.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci-milbeaut");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7023cbec4017..e49b44b4d82e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -22,6 +22,7 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
#include "sdhci-pltfm.h"
@@ -32,6 +33,10 @@
#define PHY_CLK_TOO_SLOW_HZ 400000
+/* Default settings for ZynqMP Clock Phases */
+#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
+#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -72,13 +77,38 @@ struct sdhci_arasan_soc_ctl_map {
};
/**
+ * struct sdhci_arasan_clk_data
+ * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
+ * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @sampleclk_hw: Struct for the clock we might provide to a PHY.
+ * @sampleclk: Pointer to normal 'struct clock' for sampleclk_hw.
+ * @clk_phase_in: Array of Input Clock Phase Delays for all speed modes
+ * @clk_phase_out: Array of Output Clock Phase Delays for all speed modes
+ * @set_clk_delays: Function pointer for setting Clock Delays
+ * @clk_of_data: Platform specific runtime clock data storage pointer
+ */
+struct sdhci_arasan_clk_data {
+ struct clk_hw sdcardclk_hw;
+ struct clk *sdcardclk;
+ struct clk_hw sampleclk_hw;
+ struct clk *sampleclk;
+ int clk_phase_in[MMC_TIMING_MMC_HS400 + 1];
+ int clk_phase_out[MMC_TIMING_MMC_HS400 + 1];
+ void (*set_clk_delays)(struct sdhci_host *host);
+ void *clk_of_data;
+};
+
+struct sdhci_arasan_zynqmp_clk_data {
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+/**
* struct sdhci_arasan_data
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
* @is_phy_on: True if the PHY is on; false if not.
- * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
- * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @clk_data: Struct for the Arasan Controller Clock Data.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
*/
@@ -89,8 +119,7 @@ struct sdhci_arasan_data {
bool is_phy_on;
bool has_cqe;
- struct clk_hw sdcardclk_hw;
- struct clk *sdcardclk;
+ struct sdhci_arasan_clk_data clk_data;
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
@@ -120,6 +149,12 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x80, .width = 8, .shift = 2 },
+ .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 },
+ .hiword_update = false,
+};
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
@@ -174,6 +209,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
bool ctrl_phy = false;
if (!IS_ERR(sdhci_arasan->phy)) {
@@ -215,6 +251,10 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
+ /* Set the Input and Output Clock Phase Delays */
+ if (clk_data->set_clk_delays)
+ clk_data->set_clk_delays(host);
+
sdhci_set_clock(host, clock);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE)
@@ -384,6 +424,11 @@ static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+static struct sdhci_arasan_of_data intel_lgm_sdxc_data = {
+ .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -489,6 +534,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,lgm-sdhci-5.1-emmc",
.data = &intel_lgm_emmc_data,
},
+ {
+ .compatible = "intel,lgm-sdhci-5.1-sdxc",
+ .data = &intel_lgm_sdxc_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -502,6 +551,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "arasan,sdhci-4.9a",
.data = &sdhci_arasan_data,
},
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -520,8 +573,10 @@ static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
struct sdhci_arasan_data *sdhci_arasan =
- container_of(hw, struct sdhci_arasan_data, sdcardclk_hw);
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
return host->mmc->actual_clock;
@@ -532,6 +587,177 @@ static const struct clk_ops arasan_sdcardclk_ops = {
};
/**
+ * sdhci_arasan_sampleclk_recalc_rate - Return the sampling clock rate
+ *
+ * Return the current actual rate of the sampling clock. This can be used
+ * to communicate with out PHY.
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate The parent rate (should be rate of clk_xin).
+ * Returns the sample clock rate.
+ */
+static unsigned long sdhci_arasan_sampleclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+
+ return host->mmc->actual_clock;
+}
+
+static const struct clk_ops arasan_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+};
+
+/**
+ * sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
+ *
+ * Set the SD Output Clock Tap Delays for Output path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 30 Taps are available */
+ tap_max = 30;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 15 Taps are available */
+ tap_max = 15;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 8 Taps are available */
+ tap_max = 8;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Output Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sdcardclk_set_phase,
+};
+
+/**
+ * sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays
+ *
+ * Set the SD Input Clock Tap Delays for Input path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 120 Taps are available */
+ tap_max = 120;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 60 Taps are available */
+ tap_max = 60;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 30 Taps are available */
+ tap_max = 30;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_INPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Input Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sampleclk_set_phase,
+};
+
+/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
* The corecfg_clockmultiplier is supposed to contain clock multiplier
@@ -609,39 +835,128 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz);
}
+static void sdhci_arasan_set_clk_delays(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+
+ clk_set_phase(clk_data->sampleclk,
+ clk_data->clk_phase_in[host->timing]);
+ clk_set_phase(clk_data->sdcardclk,
+ clk_data->clk_phase_out[host->timing]);
+}
+
+static void arasan_dt_read_clk_phase(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data,
+ unsigned int timing, const char *prop)
+{
+ struct device_node *np = dev->of_node;
+
+ int clk_phase[2] = {0};
+
+ /*
+ * Read Tap Delay values from DT, if the DT does not contain the
+ * Tap Values then use the pre-defined values.
+ */
+ if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
+ 2, 0)) {
+ dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
+ prop, clk_data->clk_phase_in[timing],
+ clk_data->clk_phase_out[timing]);
+ return;
+ }
+
+ /* The values read are Input and Output Clock Delays in order */
+ clk_data->clk_phase_in[timing] = clk_phase[0];
+ clk_data->clk_phase_out[timing] = clk_phase[1];
+}
+
/**
- * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
+ * arasan_dt_parse_clk_phases - Read Clock Delay values from DT
+ *
+ * Called at initialization to parse the values of Clock Delays.
+ *
+ * @dev: Pointer to our struct device.
+ * @clk_data: Pointer to the Clock Data structure
+ */
+static void arasan_dt_parse_clk_phases(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data)
+{
+ int *iclk_phase, *oclk_phase;
+ u32 mio_bank = 0;
+ int i;
+
+ /*
+ * This has been kept as a pointer and is assigned a function here.
+ * So that different controller variants can assign their own handling
+ * function.
+ */
+ clk_data->set_clk_delays = sdhci_arasan_set_clk_delays;
+
+ if (of_device_is_compatible(dev->of_node, "xlnx,zynqmp-8.9a")) {
+ iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_ICLK_PHASE;
+ oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_OCLK_PHASE;
+
+ of_property_read_u32(dev->of_node, "xlnx,mio-bank", &mio_bank);
+ if (mio_bank == 2) {
+ oclk_phase[MMC_TIMING_UHS_SDR104] = 90;
+ oclk_phase[MMC_TIMING_MMC_HS200] = 90;
+ }
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = iclk_phase[i];
+ clk_data->clk_phase_out[i] = oclk_phase[i];
+ }
+ }
+
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
+ "clk-phase-legacy");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
+ "clk-phase-mmc-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_SD_HS,
+ "clk-phase-sd-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR12,
+ "clk-phase-uhs-sdr12");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR25,
+ "clk-phase-uhs-sdr25");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR50,
+ "clk-phase-uhs-sdr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR104,
+ "clk-phase-uhs-sdr104");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_DDR50,
+ "clk-phase-uhs-ddr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_DDR52,
+ "clk-phase-mmc-ddr52");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS200,
+ "clk-phase-mmc-hs200");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS400,
+ "clk-phase-mmc-hs400");
+}
+
+/**
+ * sdhci_arasan_register_sdcardclk - Register the sdcardclk for a PHY to use
*
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
*
- * Note: without seriously re-architecting SDHCI's clock code and testing on
- * all platforms, there's no way to create a totally beautiful clock here
- * with all clock ops implemented. Instead, we'll just create a clock that can
- * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
- * framework that we're doing things behind its back. This should be sufficient
- * to create nice clean device tree bindings and later (if needed) we can try
- * re-architecting SDHCI if we see some benefit to it.
- *
* @sdhci_arasan: Our private data structure.
* @clk_xin: Pointer to the functional clock
* @dev: Pointer to our struct device.
* Returns 0 on success and error value on error
*/
-static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
- struct clk *clk_xin,
- struct device *dev)
+static int
+sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
struct device_node *np = dev->of_node;
struct clk_init_data sdcardclk_init;
const char *parent_clk_name;
int ret;
- /* Providing a clock to the PHY is optional; no error if missing */
- if (!of_find_property(np, "#clock-cells", NULL))
- return 0;
-
ret = of_property_read_string_index(np, "clock-output-names", 0,
&sdcardclk_init.name);
if (ret) {
@@ -653,17 +968,72 @@ static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
sdcardclk_init.parent_names = &parent_clk_name;
sdcardclk_init.num_parents = 1;
sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
- sdcardclk_init.ops = &arasan_sdcardclk_ops;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sdcardclk_init.ops = &zynqmp_sdcardclk_ops;
+ else
+ sdcardclk_init.ops = &arasan_sdcardclk_ops;
+
+ clk_data->sdcardclk_hw.init = &sdcardclk_init;
+ clk_data->sdcardclk =
+ devm_clk_register(dev, &clk_data->sdcardclk_hw);
+ clk_data->sdcardclk_hw.init = NULL;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get,
+ clk_data->sdcardclk);
+ if (ret)
+ dev_err(dev, "Failed to add sdcard clock provider\n");
+
+ return ret;
+}
+
+/**
+ * sdhci_arasan_register_sampleclk - Register the sampleclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int
+sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data sampleclk_init;
+ const char *parent_clk_name;
+ int ret;
- sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init;
- sdhci_arasan->sdcardclk =
- devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw);
- sdhci_arasan->sdcardclk_hw.init = NULL;
+ ret = of_property_read_string_index(np, "clock-output-names", 1,
+ &sampleclk_init.name);
+ if (ret) {
+ dev_err(dev, "DT has #clock-cells but no clock-output-names\n");
+ return ret;
+ }
+
+ parent_clk_name = __clk_get_name(clk_xin);
+ sampleclk_init.parent_names = &parent_clk_name;
+ sampleclk_init.num_parents = 1;
+ sampleclk_init.flags = CLK_GET_RATE_NOCACHE;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sampleclk_init.ops = &zynqmp_sampleclk_ops;
+ else
+ sampleclk_init.ops = &arasan_sampleclk_ops;
+
+ clk_data->sampleclk_hw.init = &sampleclk_init;
+ clk_data->sampleclk =
+ devm_clk_register(dev, &clk_data->sampleclk_hw);
+ clk_data->sampleclk_hw.init = NULL;
ret = of_clk_add_provider(np, of_clk_src_simple_get,
- sdhci_arasan->sdcardclk);
+ clk_data->sampleclk);
if (ret)
- dev_err(dev, "Failed to add clock provider\n");
+ dev_err(dev, "Failed to add sample clock provider\n");
return ret;
}
@@ -686,6 +1056,54 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+/**
+ * sdhci_arasan_register_sdclk - Register the sdcardclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * Note: without seriously re-architecting SDHCI's clock code and testing on
+ * all platforms, there's no way to create a totally beautiful clock here
+ * with all clock ops implemented. Instead, we'll just create a clock that can
+ * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
+ * framework that we're doing things behind its back. This should be sufficient
+ * to create nice clean device tree bindings and later (if needed) we can try
+ * re-architecting SDHCI if we see some benefit to it.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 num_clks = 0;
+ int ret;
+
+ /* Providing a clock to the PHY is optional; no error if missing */
+ if (of_property_read_u32(np, "#clock-cells", &num_clks) < 0)
+ return 0;
+
+ ret = sdhci_arasan_register_sdcardclk(sdhci_arasan, clk_xin, dev);
+ if (ret)
+ return ret;
+
+ if (num_clks) {
+ ret = sdhci_arasan_register_sampleclk(sdhci_arasan, clk_xin,
+ dev);
+ if (ret) {
+ sdhci_arasan_unregister_sdclk(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
{
struct sdhci_host *host = sdhci_arasan->host;
@@ -814,6 +1232,25 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (ret)
goto clk_disable_all;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
+ const struct zynqmp_eemi_ops *eemi_ops;
+
+ zynqmp_clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*zynqmp_clk_data),
+ GFP_KERNEL);
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops)) {
+ ret = PTR_ERR(eemi_ops);
+ goto unreg_clk;
+ }
+
+ zynqmp_clk_data->eemi_ops = eemi_ops;
+ sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
+ }
+
+ arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
+
ret = mmc_of_parse(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 8962f6664381..56912e30c47e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -111,7 +111,19 @@ static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE) &&
+ (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH))
+ val ^= SDHCI_CARD_PRESENT;
+
+ return val;
+}
+
static const struct sdhci_ops aspeed_sdhci_ops = {
+ .read_l = aspeed_sdhci_readl,
.set_clock = aspeed_sdhci_set_clock,
.get_max_clock = aspeed_sdhci_get_max_clock,
.set_bus_width = aspeed_sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e7d1920729fb..5959e394b416 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -27,6 +27,9 @@
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
+#define SDMMC_CALCR 0x240
+#define SDMMC_CALCR_EN BIT(0)
+#define SDMMC_CALCR_ALWYSON BIT(4)
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
@@ -35,6 +38,7 @@ struct sdhci_at91_priv {
struct clk *gck;
struct clk *mainck;
bool restore_needed;
+ bool cal_always_on;
};
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
@@ -116,10 +120,17 @@ static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
sdhci_reset(host, mask);
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
sdhci_at91_set_force_card_detect(host);
+
+ if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
+ sdhci_writel(host, SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
+ SDMMC_CALCR);
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
@@ -345,6 +356,14 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->restore_needed = false;
+ /*
+ * if SDCAL pin is wrongly connected, we must enable
+ * the analog calibration cell permanently.
+ */
+ priv->cal_always_on =
+ device_property_read_bool(&pdev->dev,
+ "microchip,sdcal-inverted");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
@@ -358,7 +377,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
/* HS200 is broken at this moment */
- host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1d1953dfc54b..5cca3fa4610b 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -77,8 +77,10 @@ struct sdhci_esdhc {
bool quirk_incorrect_hostver;
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
- bool quirk_fixup_tuning;
+ bool quirk_tuning_erratum_type1;
+ bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
+ bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
u32 div_ratio;
@@ -408,6 +410,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -416,10 +420,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32be(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32be(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -428,6 +446,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
@@ -560,6 +590,32 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
}
}
+static void esdhc_flush_async_fifo(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ val |= ESDHC_FLUSH_ASYNC_FIFO;
+ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
+ ESDHC_FLUSH_ASYNC_FIFO))
+ break;
+ if (timedout) {
+ pr_err("%s: flushing asynchronous FIFO timeout.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -652,9 +708,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
esdhc_clock_enable(host, false);
- temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- temp |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
}
/* Wait max 20 ms */
@@ -796,16 +850,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
}
-static struct soc_device_attribute soc_fixup_tuning[] = {
+static struct soc_device_attribute soc_tuning_erratum_type1[] = {
+ { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ T1040", .revision = "1.0", },
{ .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ LS1021A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { },
+};
+
+static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1012A", .revision = "1.0", },
{ .family = "QorIQ LS1043A", .revision = "1.*", },
{ .family = "QorIQ LS1046A", .revision = "1.0", },
+ { .family = "QorIQ LS1080A", .revision = "1.0", },
+ { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { .family = "QorIQ LA1575A", .revision = "1.0", },
{ },
};
@@ -814,10 +873,7 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
u32 val;
esdhc_clock_enable(host, false);
-
- val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- val |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
val = sdhci_readl(host, ESDHC_TBCTL);
if (enable)
@@ -829,15 +885,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 tbstat_15_8, tbstat_7_0;
+ u32 val;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ /* Write TBCTL[11:8]=4'h8 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~(0xf << 8);
+ val |= 8 << 8;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read TBCTL[31:0] register and rewrite again */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read the TBSTAT[31:0] register twice */
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+
+ /* Reset data lines by setting ESDHCCTL[RSTD] */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+ /* Write 32'hFFFF_FFFF to IRQSTAT register */
+ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
+
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
+ * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
+ * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
+ */
+ tbstat_7_0 = val & 0xff;
+ tbstat_15_8 = (val >> 8) & 0xff;
+
+ if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ *window_start = 8 * esdhc->div_ratio;
+ *window_end = 4 * esdhc->div_ratio;
+ } else {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ }
+}
+
+static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
+ u8 window_start, u8 window_end)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+ int ret;
+
+ /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
+ val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
+ ESDHC_WNDW_STRT_PTR_MASK;
+ val |= window_end & ESDHC_WNDW_END_PTR_MASK;
+ sdhci_writel(host, val, ESDHC_TBPTR);
+
+ /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_SW;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ esdhc->in_sw_tuning = true;
+ ret = sdhci_execute_tuning(mmc, opcode);
+ esdhc->in_sw_tuning = false;
+ return ret;
+}
+
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 window_start, window_end;
+ int ret, retries = 1;
bool hs400_tuning;
unsigned int clk;
u32 val;
- int ret;
/* For tuning mode, the sd clock divisor value
* must be larger than 3 according to reference manual.
@@ -846,39 +984,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->clock > clk)
esdhc_of_set_clock(host, clk);
- if (esdhc->quirk_limited_clk_division &&
- host->flags & SDHCI_HS400_TUNING)
- esdhc_of_set_clock(host, host->clock);
-
esdhc_tuning_block_enable(host, true);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
- ret = sdhci_execute_tuning(mmc, opcode);
- if (hs400_tuning) {
- val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
- val |= ESDHC_FLW_CTL_BG;
- sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
- }
+ do {
+ if (esdhc->quirk_limited_clk_division &&
+ hs400_tuning)
+ esdhc_of_set_clock(host, host->clock);
- if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
+ /* Do HW tuning */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_3;
+ sdhci_writel(host, val, ESDHC_TBCTL);
- /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
- * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
- */
- val = sdhci_readl(host, ESDHC_TBPTR);
- val = (val & ~((0x7f << 8) | 0x7f)) |
- (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
- sdhci_writel(host, val, ESDHC_TBPTR);
+ ret = sdhci_execute_tuning(mmc, opcode);
+ if (ret)
+ break;
- /* program the software tuning mode by setting
- * TBCTL[TB_MODE]=2'h3
+ /* If HW tuning fails and triggers erratum,
+ * try workaround.
*/
- val = sdhci_readl(host, ESDHC_TBCTL);
- val |= 0x3;
- sdhci_writel(host, val, ESDHC_TBCTL);
- sdhci_execute_tuning(mmc, opcode);
+ ret = host->tuning_err;
+ if (ret == -EAGAIN &&
+ (esdhc->quirk_tuning_erratum_type1 ||
+ esdhc->quirk_tuning_erratum_type2)) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+ pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
+ mmc_hostname(mmc));
+ /* Do SW tuning */
+ esdhc_prepare_sw_tuning(host, &window_start,
+ &window_end);
+ ret = esdhc_execute_sw_tuning(mmc, opcode,
+ window_start,
+ window_end);
+ if (ret)
+ break;
+
+ /* Retry both HW/SW tuning with reduced clock. */
+ ret = host->tuning_err;
+ if (ret == -EAGAIN && retries) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+
+ clk = host->max_clk / (esdhc->div_ratio + 1);
+ esdhc_of_set_clock(host, clk);
+ pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
+ mmc_hostname(mmc));
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ } while (retries--);
+
+ if (ret) {
+ esdhc_tuning_block_enable(host, false);
+ } else if (hs400_tuning) {
+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
+ val |= ESDHC_FLW_CTL_BG;
+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
}
+
return ret;
}
@@ -1114,10 +1286,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
- if (soc_device_match(soc_fixup_tuning))
- esdhc->quirk_fixup_tuning = true;
+ if (soc_device_match(soc_tuning_erratum_type1))
+ esdhc->quirk_tuning_erratum_type1 = true;
+ else
+ esdhc->quirk_tuning_erratum_type1 = false;
+
+ if (soc_device_match(soc_tuning_erratum_type2))
+ esdhc->quirk_tuning_erratum_type2 = true;
else
- esdhc->quirk_fixup_tuning = false;
+ esdhc->quirk_tuning_erratum_type2 = false;
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index eaffa85bc728..acefb76b4e15 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -21,6 +21,7 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
@@ -1590,11 +1591,59 @@ static int amd_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static u32 sdhci_read_present_state(struct sdhci_host *host)
+{
+ return sdhci_readl(host, SDHCI_PRESENT_STATE);
+}
+
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 present_state;
+
+ /*
+ * SDHC 0x7906 requires a hard reset to clear all internal state.
+ * Otherwise it can get into a bad state where the DATA lines are always
+ * read as zeros.
+ */
+ if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
+ pci_clear_master(pdev);
+
+ pci_save_state(pdev);
+
+ pci_set_power_state(pdev, PCI_D3cold);
+ pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
+ pdev->current_state);
+ pci_set_power_state(pdev, PCI_D0);
+
+ pci_restore_state(pdev);
+
+ /*
+ * SDHCI_RESET_ALL says the card detect logic should not be
+ * reset, but since we need to reset the entire controller
+ * we should wait until the card detect logic has stabilized.
+ *
+ * This normally takes about 40ms.
+ */
+ readx_poll_timeout(
+ sdhci_read_present_state,
+ host,
+ present_state,
+ present_state & SDHCI_CD_STABLE,
+ 10000,
+ 100000
+ );
+ }
+
+ return sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -1673,6 +1722,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 558202fe64c6..981bbbe63aff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,8 @@
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
+#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
+#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b056400e34b1..3140fe2e5dba 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,8 +337,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
+ u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
sdhci_init(host, 0);
sdhci_enable_card_detection(host);
+
+ /*
+ * A change to the card detect bits indicates a change in present state,
+ * refer sdhci_set_card_detection(). A card detect interrupt might have
+ * been missed while the host controller was being reset, so trigger a
+ * rescan to check.
+ */
+ if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
static void __sdhci_led_activate(struct sdhci_host *host)
@@ -2202,7 +2213,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
- pr_warn("%s: 3.3V regulator output did not became stable\n",
+ pr_warn("%s: 3.3V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
@@ -2234,7 +2245,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (ctrl & SDHCI_CTRL_VDD_180)
return 0;
- pr_warn("%s: 1.8V regulator output did not became stable\n",
+ pr_warn("%s: 1.8V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index bb90757ecace..b8e897e31e2e 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -12,6 +12,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
+#include "cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -68,6 +69,9 @@
#define CLOCK_TOO_SLOW_HZ 400000
+/* Command Queue Host Controller Interface Base address */
+#define SDHCI_AM654_CQE_BASE_ADDR 0x200
+
static struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -259,6 +263,19 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -267,6 +284,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -290,6 +308,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -304,6 +323,40 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.pdata = &sdhci_j721e_4bit_pdata,
.flags = IOMUX_PRESENT,
};
+
+static void sdhci_am654_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_am654_dumpregs,
+};
+
+static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
+{
+ struct cqhci_host *cq_host;
+ int ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host),
+ GFP_KERNEL);
+ if (!cq_host)
+ return -ENOMEM;
+
+ cq_host->mmio = host->ioaddr + SDHCI_AM654_CQE_BASE_ADDR;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->ops = &sdhci_am654_cqhci_ops;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE;
+
+ ret = cqhci_init(cq_host, host->mmc, 1);
+
+ return ret;
+}
+
static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -344,7 +397,23 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
ctl_cfg_2);
- return sdhci_add_host(host);
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ ret = sdhci_am654_cqe_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ return 0;
+
+err_cleanup_host:
+ sdhci_cleanup_host(host);
+ return ret;
}
static int sdhci_am654_get_of_property(struct platform_device *pdev,
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index f8b939e63e02..fa0dfc657c22 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -16,31 +16,7 @@
#include <linux/clk.h>
#include "sdhci-pltfm.h"
-
-/* F_SDH30 extended Controller registers */
-#define F_SDH30_AHB_CONFIG 0x100
-#define F_SDH30_AHB_BIGED 0x00000040
-#define F_SDH30_BUSLOCK_DMA 0x00000020
-#define F_SDH30_BUSLOCK_EN 0x00000010
-#define F_SDH30_SIN 0x00000008
-#define F_SDH30_AHB_INCR_16 0x00000004
-#define F_SDH30_AHB_INCR_8 0x00000002
-#define F_SDH30_AHB_INCR_4 0x00000001
-
-#define F_SDH30_TUNING_SETTING 0x108
-#define F_SDH30_CMD_CHK_DIS 0x00010000
-
-#define F_SDH30_IO_CONTROL2 0x114
-#define F_SDH30_CRES_O_DN 0x00080000
-#define F_SDH30_MSEL_O_1_8 0x00040000
-
-#define F_SDH30_ESD_CONTROL 0x124
-#define F_SDH30_EMMC_RST 0x00000002
-#define F_SDH30_EMMC_HS200 0x01000000
-
-#define F_SDH30_CMD_DAT_DELAY 0x200
-
-#define F_SDH30_MIN_CLOCK 400000
+#include "sdhci_f_sdh30.h"
struct f_sdhost_priv {
struct clk *clk_iface;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.h b/drivers/mmc/host/sdhci_f_sdh30.h
new file mode 100644
index 000000000000..fc1ad28f7ca9
--- /dev/null
+++ b/drivers/mmc/host/sdhci_f_sdh30.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ *
+ */
+
+/* F_SDH30 extended Controller registers */
+#define F_SDH30_AHB_CONFIG 0x100
+#define F_SDH30_AHB_BIGED BIT(6)
+#define F_SDH30_BUSLOCK_DMA BIT(5)
+#define F_SDH30_BUSLOCK_EN BIT(4)
+#define F_SDH30_SIN BIT(3)
+#define F_SDH30_AHB_INCR_16 BIT(2)
+#define F_SDH30_AHB_INCR_8 BIT(1)
+#define F_SDH30_AHB_INCR_4 BIT(0)
+
+#define F_SDH30_TUNING_SETTING 0x108
+#define F_SDH30_CMD_CHK_DIS BIT(16)
+
+#define F_SDH30_IO_CONTROL2 0x114
+#define F_SDH30_CRES_O_DN BIT(19)
+#define F_SDH30_MSEL_O_1_8 BIT(18)
+
+#define F_SDH30_ESD_CONTROL 0x124
+#define F_SDH30_EMMC_RST BIT(1)
+#define F_SDH30_CMD_DAT_DELAY BIT(9)
+#define F_SDH30_EMMC_HS200 BIT(24)
+
+#define F_SDH30_MIN_CLOCK 400000
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 2f0b092d6dcc..c5ba13fae399 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -163,7 +163,6 @@ struct tmio_mmc_host {
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
- bool runtime_synced;
bool sdio_irq_enabled;
/* Mandatory callback */
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9b6e1001e77c..c4a1d49fbea4 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -1184,7 +1185,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (ret == -EPROBE_DEFER)
return ret;
- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
@@ -1248,10 +1249,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
+ dev_pm_domain_start(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
ret = mmc_add_host(mmc);
if (ret)
@@ -1333,11 +1336,6 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
- if (!host->runtime_synced) {
- host->runtime_synced = true;
- return 0;
- }
-
tmio_mmc_clk_enable(host);
tmio_mmc_hw_reset(host->mmc);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index a3680c900689..6ced1b7f642f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2070,18 +2070,11 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{ /* NOT irq */
- struct vub300_mmc_host *vub300 = mmc_priv(mmc);
- dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
-}
-
static const struct mmc_host_ops vub300_mmc_ops = {
.request = vub300_mmc_request,
.set_ios = vub300_mmc_set_ios,
.get_ro = vub300_mmc_get_ro,
.enable_sdio_irq = vub300_enable_sdio_irq,
- .init_card = vub300_init_card,
};
static int vub300_probe(struct usb_interface *interface,
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 79a53cb8507b..00a79489067c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1353,7 +1353,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
adr += chip->start;
@@ -1383,7 +1383,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, last_end = 0;
int chipnum;
- int ret = 0;
+ int ret;
if (!map->virt)
return -EINVAL;
@@ -1550,7 +1550,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd;
- int ret=0;
+ int ret;
adr += chip->start;
@@ -1624,7 +1624,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -1871,7 +1871,7 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs, vec_seek, i;
size_t len = 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index cf8c8be40a9c..04b383bc3947 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -123,19 +123,23 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}
-static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
- unsigned long adr)
+static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
if (!cfi_use_status_reg(cfi))
- return;
+ return 0;
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
status = map_read(map, adr);
+ /* The error bits are invalid while the chip's busy */
+ if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
+ return 0;
+
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
@@ -151,7 +155,12 @@ static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
if (chipstatus & CFI_SR_SLSB)
pr_err("%s sector write protected, status %lx\n",
map->name, chipstatus);
+
+ /* Erase/Program status bits are set on the operation failure */
+ if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
+ return 1;
}
+ return 0;
}
/* #define DEBUG_CFI_FEATURES */
@@ -785,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
- kfree(cfi->cfiq);
return NULL;
}
@@ -848,20 +856,16 @@ static int __xipram chip_good(struct map_info *map, struct flchip *chip,
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB);
- map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
+
/*
* For chips that support status register, check device
- * ready bit and Erase/Program status bit to know if
- * operation succeeded.
+ * ready bit
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
curd = map_read(map, addr);
- if (map_word_andequal(map, curd, ready, ready))
- return !map_word_bitsset(map, curd, err);
-
- return 0;
+ return map_word_andequal(map, curd, ready, ready);
}
oldd = map_read(map, addr);
@@ -1699,8 +1703,11 @@ static int __xipram do_write_oneword_once(struct map_info *map,
break;
}
- if (chip_good(map, chip, adr, datum))
+ if (chip_good(map, chip, adr, datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
@@ -1713,7 +1720,7 @@ static int __xipram do_write_oneword_start(struct map_info *map,
struct flchip *chip,
unsigned long adr, int mode)
{
- int ret = 0;
+ int ret;
mutex_lock(&chip->mutex);
@@ -1773,7 +1780,6 @@ static int __xipram do_write_oneword_retry(struct map_info *map,
ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -1791,7 +1797,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode)
{
- int ret = 0;
+ int ret;
adr += chip->start;
@@ -1815,7 +1821,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
@@ -1970,12 +1976,17 @@ static int __xipram do_write_buffer_wait(struct map_info *map,
*/
if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, datum)) {
+ pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
ret = -EIO;
break;
}
- if (chip_good(map, chip, adr, datum))
+ if (chip_good(map, chip, adr, datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
@@ -2014,7 +2025,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
int len)
{
struct cfi_private *cfi = map->fldrv_priv;
- int ret = -EIO;
+ int ret;
unsigned long cmd_adr;
int z, words;
map_word datum;
@@ -2071,12 +2082,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
chip->word_write_time);
ret = do_write_buffer_wait(map, chip, adr, datum);
- if (ret) {
- cfi_check_err_status(map, chip, adr);
+ if (ret)
do_write_buffer_reset(map, chip, cfi);
- pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
- __func__, adr);
- }
xip_enable(map, chip, adr);
@@ -2095,7 +2102,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -2232,7 +2239,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int retry_cnt = 0;
map_word oldd;
- int ret = 0;
+ int ret;
int i;
adr += chip->start;
@@ -2271,9 +2278,9 @@ retry:
udelay(1);
}
- if (!chip_good(map, chip, adr, datum)) {
+ if (!chip_good(map, chip, adr, datum) ||
+ cfi_check_err_status(map, chip, adr)) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -2307,7 +2314,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, chipstart;
- int ret = 0;
+ int ret;
int chipnum;
chipnum = to >> cfi->chipshift;
@@ -2411,7 +2418,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
unsigned long timeo = jiffies + HZ;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
int retry_cnt = 0;
adr = cfi->addr_unlock1;
@@ -2467,8 +2474,11 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map)))
+ if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
@@ -2483,7 +2493,6 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
@@ -2508,7 +2517,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
int retry_cnt = 0;
adr += chip->start;
@@ -2564,8 +2573,11 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map)))
+ if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
break;
+ }
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
@@ -2580,7 +2592,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
- cfi_check_err_status(map, chip, adr);
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index e752067526a5..54edae63b92d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -611,7 +611,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
+ int ret;
int chipnum;
unsigned long ofs;
@@ -895,7 +895,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
{ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr, len;
- int chipnum, ret = 0;
+ int chipnum, ret;
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
@@ -1132,7 +1132,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
- int chipnum, ret = 0;
+ int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
@@ -1279,7 +1279,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
- int chipnum, ret = 0;
+ int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e3b266ee06af..e2d4db05aeb3 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -26,7 +26,7 @@
void cfi_udelay(int us)
{
if (us >= 1000) {
- msleep((us+999)/1000);
+ msleep(DIV_ROUND_UP(us, 1000));
} else {
udelay(us);
cond_resched();
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index b20d02b4f830..77c872fd3d83 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -64,15 +64,17 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
- int ret;
+ int ret, cmd_len;
spi_message_init(&message);
+ cmd_len = mchp23k256_cmdsz(flash);
+
command[0] = MCHP23K256_CMD_WRITE;
mchp23k256_addr2cmd(flash, to, command);
transfer[0].tx_buf = command;
- transfer[0].len = mchp23k256_cmdsz(flash);
+ transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].tx_buf = buf;
@@ -88,8 +90,8 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- if (retlen && message.actual_length > sizeof(command))
- *retlen += message.actual_length - sizeof(command);
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
return 0;
}
@@ -101,16 +103,18 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
- int ret;
+ int ret, cmd_len;
spi_message_init(&message);
+ cmd_len = mchp23k256_cmdsz(flash);
+
memset(&transfer, 0, sizeof(transfer));
command[0] = MCHP23K256_CMD_READ;
mchp23k256_addr2cmd(flash, from, command);
transfer[0].tx_buf = command;
- transfer[0].len = mchp23k256_cmdsz(flash);
+ transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].rx_buf = buf;
@@ -126,8 +130,8 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
if (ret)
return ret;
- if (retlen && message.actual_length > sizeof(command))
- *retlen += message.actual_length - sizeof(command);
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
return 0;
}
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 986f81d2f93e..79dcca16481d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -592,6 +592,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
+/*
+ * The purpose of this function is to ensure a memcpy_toio() with byte writes
+ * only. Its structure is inspired from the ARM implementation of _memcpy_toio()
+ * which also does single byte writes but cannot be used here as this is just an
+ * implementation detail and not part of the API. Not mentioning the comment
+ * stating that _memcpy_toio() should be optimized.
+ */
+static void spear_smi_memcpy_toio_b(volatile void __iomem *dest,
+ const void *src, size_t len)
+{
+ const unsigned char *from = src;
+
+ while (len) {
+ len--;
+ writeb(*from, dest);
+ from++;
+ dest++;
+ }
+}
+
static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
void __iomem *dest, const void *src, size_t len)
{
@@ -614,7 +634,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
ctrlreg1 = readl(dev->io_base + SMI_CR1);
writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
- memcpy_toio(dest, src, len);
+ /*
+ * In Write Burst mode (WB_MODE), the specs states that writes must be:
+ * - incremental
+ * - of the same size
+ * The ARM implementation of memcpy_toio() will optimize the number of
+ * I/O by using as much 4-byte writes as possible, surrounded by
+ * 2-byte/1-byte access if:
+ * - the destination is not 4-byte aligned
+ * - the length is not a multiple of 4-byte.
+ * Avoid this alternance of write access size by using our own 'byte
+ * access' helper if at least one of the two conditions above is true.
+ */
+ if (IS_ALIGNED(len, sizeof(u32)) &&
+ IS_ALIGNED((uintptr_t)dest, sizeof(u32)))
+ memcpy_toio(dest, src, len);
+ else
+ spear_smi_memcpy_toio_b(dest, src, len);
writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -777,9 +813,6 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev,
/* Fill structs for each subnode (flash device) */
while ((pp = of_get_next_child(np, pp))) {
- struct spear_smi_flash_info *flash_info;
-
- flash_info = &pdata->board_flash_info[i];
pdata->np[i] = pp;
/* Read base-addr and size from DT */
@@ -933,7 +966,6 @@ static int spear_smi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
- dev_err(&pdev->dev, "invalid smi irq\n");
goto err;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index f4d1667daaf9..1888523d9745 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -255,7 +255,6 @@ struct stfsm_seq {
struct stfsm {
struct device *dev;
void __iomem *base;
- struct resource *region;
struct mtd_info mtd;
struct mutex lock;
struct flash_info *info;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bc82305ebb4c..b28225a7c4f3 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -96,6 +96,17 @@ config MTD_PHYSMAP_GEMINI
platforms, some detection and setting up parallel mode on the
external interface.
+config MTD_PHYSMAP_IXP4XX
+ bool "Intel IXP4xx OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on ARM
+ select MTD_COMPLEX_MAPPINGS
+ select MTD_CFI_BE_BYTE_SWAP if CPU_BIG_ENDIAN
+ default ARCH_IXP4XX
+ help
+ This provides some extra DT physmap parsing for the Intel IXP4xx
+ platforms, some elaborate endianness handling in particular.
+
config MTD_PHYSMAP_GPIO_ADDR
bool "GPIO-assisted Flash Chip Support"
depends on MTD_PHYSMAP
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 1146009f41df..c0da86a5d26f 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
physmap-objs-y += physmap-core.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 876f12f40018..0eeadfeb620d 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -86,7 +86,7 @@ static int __init init_l440gx(void)
return -ENOMEM;
}
simple_map_init(&l440gx_map);
- printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
+ pr_debug("window_addr = %p\n", l440gx_map.virt);
/* Setup the pm iobase resource
* This code should move into some kind of generic bridge
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 21b556afc305..a9f7964e2edb 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -41,6 +41,7 @@
#include <linux/gpio/consumer.h>
#include "physmap-gemini.h"
+#include "physmap-ixp4xx.h"
#include "physmap-versatile.h"
struct physmap_flash_info {
@@ -370,6 +371,10 @@ static int physmap_flash_of_init(struct platform_device *dev)
if (err)
return err;
+ err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
if (err)
return err;
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
new file mode 100644
index 000000000000..6a054229a8a0
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IXP4xx OF physmap add-on
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the ixp4xx.c map driver, originally written by:
+ * Intel Corporation
+ * Deepak Saxena <dsaxena@mvista.com>
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include "physmap-ixp4xx.h"
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ * D15 D0
+ * +---+---+
+ * | A | B | 0
+ * +---+---+
+ * | C | D | 2
+ * +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define BYTE0(h) ((h) & 0xFF)
+#define BYTE1(h) (((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(d, addr);
+}
+
+#define BYTE0(h) (((h) >> 8) & 0xFF)
+#define BYTE1(h) ((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+
+ val.x[0] = flash_read16(map->virt + ofs);
+ return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 *dest = (u8 *) to;
+ void __iomem *src = map->virt + from;
+
+ if (len <= 0)
+ return;
+
+ if (from & 1) {
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
+ --len;
+ }
+
+ while (len >= 2) {
+ u16 data = flash_read16(src);
+ *dest++ = BYTE0(data);
+ *dest++ = BYTE1(data);
+ src += 2;
+ len -= 2;
+ }
+
+ if (len > 0)
+ *dest++ = BYTE0(flash_read16(src));
+}
+
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device *dev = &pdev->dev;
+
+ /* Multiplatform guard */
+ if (!of_device_is_compatible(np, "intel,ixp4xx-flash"))
+ return 0;
+
+ map->read = ixp4xx_read16;
+ map->write = ixp4xx_write16;
+ map->copy_from = ixp4xx_copy_from;
+ map->copy_to = NULL;
+
+ dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n");
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
new file mode 100644
index 000000000000..b0fc49b7f3ed
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 975aed94f06c..b841008a9eb7 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -174,7 +174,7 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
break;
case MTD_FILE_MODE_RAW:
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
@@ -268,7 +268,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
case MTD_FILE_MODE_RAW:
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
@@ -350,7 +350,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
uint32_t retlen;
int ret = 0;
@@ -394,7 +394,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
int ret = 0;
if (length > 4096)
@@ -587,7 +587,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
int ret;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6cc7ecb0c788..5fac4355b9c2 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -382,33 +382,21 @@ static struct dentry *dfs_dir_mtd;
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
struct device *dev = &mtd->dev;
- struct dentry *root, *dent;
+ struct dentry *root;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
- if (IS_ERR_OR_NULL(root)) {
- dev_dbg(dev, "won't show data in debugfs\n");
- return;
- }
-
mtd->dbg.dfs_dir = root;
- if (mtd->dbg.partid) {
- dent = debugfs_create_file("partid", 0400, root, mtd,
- &mtd_partid_debug_fops);
- if (IS_ERR_OR_NULL(dent))
- dev_err(dev, "can't create debugfs entry for partid\n");
- }
+ if (mtd->dbg.partid)
+ debugfs_create_file("partid", 0400, root, mtd,
+ &mtd_partid_debug_fops);
- if (mtd->dbg.partname) {
- dent = debugfs_create_file("partname", 0400, root, mtd,
- &mtd_partname_debug_fops);
- if (IS_ERR_OR_NULL(dent))
- dev_err(dev,
- "can't create debugfs entry for partname\n");
- }
+ if (mtd->dbg.partname)
+ debugfs_create_file("partname", 0400, root, mtd,
+ &mtd_partname_debug_fops);
}
#ifndef CONFIG_MMU
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f92414eb4c86..58eefa43af14 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1257,7 +1257,6 @@ DEFINE_SHOW_ATTRIBUTE(mtdswap);
static int mtdswap_add_debugfs(struct mtdswap_dev *d)
{
struct dentry *root = d->mtd->dbg.dfs_dir;
- struct dentry *dent;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -1265,12 +1264,7 @@ static int mtdswap_add_debugfs(struct mtdswap_dev *d)
if (IS_ERR_OR_NULL(root))
return -1;
- dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d,
- &mtdswap_fops);
- if (!dent) {
- dev_err(d->dev, "debugfs_create_file failed\n");
- return -1;
- }
+ debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
return 0;
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e59de3f60cf6..74fb91adeb46 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -450,6 +450,13 @@ config MTD_NAND_PLATFORM
devices. You will need to provide platform-specific functions
via platform_data.
+config MTD_NAND_CADENCE
+ tristate "Support Cadence NAND (HPNFC) controller"
+ depends on OF || COMPILE_TEST
+ help
+ Enable the driver for NAND flash on platforms using a Cadence NAND
+ controller.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index a98721988e61..2d136b158fb7 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 15ef30b368a5..1a66b1cd51c0 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -117,6 +117,18 @@ enum flash_dma_reg {
FLASH_DMA_CURRENT_DESC_EXT,
};
+/* flash_dma registers v0*/
+static const u16 flash_dma_regs_v0[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_CTRL] = 0x08,
+ [FLASH_DMA_MODE] = 0x0c,
+ [FLASH_DMA_STATUS] = 0x10,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x14,
+ [FLASH_DMA_ERROR_STATUS] = 0x18,
+ [FLASH_DMA_CURRENT_DESC] = 0x1c,
+};
+
/* flash_dma registers v1*/
static const u16 flash_dma_regs_v1[] = {
[FLASH_DMA_REVISION] = 0x00,
@@ -597,6 +609,8 @@ static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
/* flash_dma register offsets */
if (ctrl->nand_version >= 0x0703)
ctrl->flash_dma_offsets = flash_dma_regs_v4;
+ else if (ctrl->nand_version == 0x0602)
+ ctrl->flash_dma_offsets = flash_dma_regs_v0;
else
ctrl->flash_dma_offsets = flash_dma_regs_v1;
}
@@ -918,7 +932,7 @@ static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
return;
if (has_flash_dma(ctrl)) {
- ctrl->flash_dma_base = 0;
+ ctrl->flash_dma_base = NULL;
disable_irq(ctrl->dma_irq);
}
@@ -1673,8 +1687,11 @@ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
- flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
- (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ if (ctrl->nand_version > 0x0602) {
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
+ upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ }
/* Start FLASH_DMA engine */
ctrl->dma_pending = true;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
new file mode 100644
index 000000000000..3a36285a8d8a
--- /dev/null
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -0,0 +1,3030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cadence NAND flash controller driver
+ *
+ * Copyright (C) 2019 Cadence
+ *
+ * Author: Piotr Sroka <piotrs@cadence.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+
+/*
+ * HPNFC can work in 3 modes:
+ * - PIO - can work in master or slave DMA
+ * - CDMA - needs Master DMA for accessing command descriptors.
+ * - Generic mode - can use only slave DMA.
+ * CDMA and PIO modes can be used to execute only base commands.
+ * Generic mode can be used to execute any command
+ * on NAND flash memory. Driver uses CDMA mode for
+ * block erasing, page reading, page programing.
+ * Generic mode is used for executing rest of commands.
+ */
+
+#define MAX_OOB_SIZE_PER_SECTOR 32
+#define MAX_ADDRESS_CYC 6
+#define MAX_ERASE_ADDRESS_CYC 3
+#define MAX_DATA_SIZE 0xFFFC
+#define DMA_DATA_SIZE_ALIGN 8
+
+/* Register definition. */
+/*
+ * Command register 0.
+ * Writing data to this register will initiate a new transaction
+ * of the NF controller.
+ */
+#define CMD_REG0 0x0000
+/* Command type field mask. */
+#define CMD_REG0_CT GENMASK(31, 30)
+/* Command type CDMA. */
+#define CMD_REG0_CT_CDMA 0uL
+/* Command type generic. */
+#define CMD_REG0_CT_GEN 3uL
+/* Command thread number field mask. */
+#define CMD_REG0_TN GENMASK(27, 24)
+
+/* Command register 2. */
+#define CMD_REG2 0x0008
+/* Command register 3. */
+#define CMD_REG3 0x000C
+/* Pointer register to select which thread status will be selected. */
+#define CMD_STATUS_PTR 0x0010
+/* Command status register for selected thread. */
+#define CMD_STATUS 0x0014
+
+/* Interrupt status register. */
+#define INTR_STATUS 0x0110
+#define INTR_STATUS_SDMA_ERR BIT(22)
+#define INTR_STATUS_SDMA_TRIGG BIT(21)
+#define INTR_STATUS_UNSUPP_CMD BIT(19)
+#define INTR_STATUS_DDMA_TERR BIT(18)
+#define INTR_STATUS_CDMA_TERR BIT(17)
+#define INTR_STATUS_CDMA_IDL BIT(16)
+
+/* Interrupt enable register. */
+#define INTR_ENABLE 0x0114
+#define INTR_ENABLE_INTR_EN BIT(31)
+#define INTR_ENABLE_SDMA_ERR_EN BIT(22)
+#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
+#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
+#define INTR_ENABLE_DDMA_TERR_EN BIT(18)
+#define INTR_ENABLE_CDMA_TERR_EN BIT(17)
+#define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
+
+/* Controller internal state. */
+#define CTRL_STATUS 0x0118
+#define CTRL_STATUS_INIT_COMP BIT(9)
+#define CTRL_STATUS_CTRL_BUSY BIT(8)
+
+/* Command Engine threads state. */
+#define TRD_STATUS 0x0120
+
+/* Command Engine interrupt thread error status. */
+#define TRD_ERR_INT_STATUS 0x0128
+/* Command Engine interrupt thread error enable. */
+#define TRD_ERR_INT_STATUS_EN 0x0130
+/* Command Engine interrupt thread complete status. */
+#define TRD_COMP_INT_STATUS 0x0138
+
+/*
+ * Transfer config 0 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_0 0x0400
+/* Offset value from the beginning of the page. */
+#define TRAN_CFG_0_OFFSET GENMASK(31, 16)
+/* Numbers of sectors to transfer within singlNF device's page. */
+#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
+
+/*
+ * Transfer config 1 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_1 0x0404
+/* Size of last data sector. */
+#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
+/* Size of not-last data sector. */
+#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
+
+/* ECC engine configuration register 0. */
+#define ECC_CONFIG_0 0x0428
+/* Correction strength. */
+#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
+/* Enable erased pages detection mechanism. */
+#define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
+/* Enable controller ECC check bits generation and correction. */
+#define ECC_CONFIG_0_ECC_EN BIT(0)
+
+/* ECC engine configuration register 1. */
+#define ECC_CONFIG_1 0x042C
+
+/* Multiplane settings register. */
+#define MULTIPLANE_CFG 0x0434
+/* Cache operation settings. */
+#define CACHE_CFG 0x0438
+
+/* DMA settings register. */
+#define DMA_SETINGS 0x043C
+/* Enable SDMA error report on access unprepared slave DMA interface. */
+#define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
+
+/* Transferred data block size for the slave DMA module. */
+#define SDMA_SIZE 0x0440
+
+/* Thread number associated with transferred data block
+ * for the slave DMA module.
+ */
+#define SDMA_TRD_NUM 0x0444
+/* Thread number mask. */
+#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
+
+#define CONTROL_DATA_CTRL 0x0494
+/* Thread number mask. */
+#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
+
+#define CTRL_VERSION 0x800
+#define CTRL_VERSION_REV GENMASK(7, 0)
+
+/* Available hardware features of the controller. */
+#define CTRL_FEATURES 0x804
+/* Support for NV-DDR2/3 work mode. */
+#define CTRL_FEATURES_NVDDR_2_3 BIT(28)
+/* Support for NV-DDR work mode. */
+#define CTRL_FEATURES_NVDDR BIT(27)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_ASYNC BIT(26)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
+/* Slave and Master DMA data width. */
+#define CTRL_FEATURES_DMA_DWITH64 BIT(21)
+/* Availability of Control Data feature.*/
+#define CTRL_FEATURES_CONTROL_DATA BIT(10)
+
+/* BCH Engine identification register 0 - correction strengths. */
+#define BCH_CFG_0 0x838
+#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
+#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
+#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
+#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
+
+/* BCH Engine identification register 1 - correction strengths. */
+#define BCH_CFG_1 0x83C
+#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
+#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
+#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
+#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
+
+/* BCH Engine identification register 2 - sector sizes. */
+#define BCH_CFG_2 0x840
+#define BCH_CFG_2_SECT_0 GENMASK(15, 0)
+#define BCH_CFG_2_SECT_1 GENMASK(31, 16)
+
+/* BCH Engine identification register 3. */
+#define BCH_CFG_3 0x844
+
+/* Ready/Busy# line status. */
+#define RBN_SETINGS 0x1004
+
+/* Common settings. */
+#define COMMON_SET 0x1008
+/* 16 bit device connected to the NAND Flash interface. */
+#define COMMON_SET_DEVICE_16BIT BIT(8)
+
+/* Skip_bytes registers. */
+#define SKIP_BYTES_CONF 0x100C
+#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
+#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
+
+#define SKIP_BYTES_OFFSET 0x1010
+#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
+
+/* Timings configuration. */
+#define ASYNC_TOGGLE_TIMINGS 0x101c
+#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
+#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
+#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
+#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
+
+#define TIMINGS0 0x1024
+#define TIMINGS0_TADL GENMASK(31, 24)
+#define TIMINGS0_TCCS GENMASK(23, 16)
+#define TIMINGS0_TWHR GENMASK(15, 8)
+#define TIMINGS0_TRHW GENMASK(7, 0)
+
+#define TIMINGS1 0x1028
+#define TIMINGS1_TRHZ GENMASK(31, 24)
+#define TIMINGS1_TWB GENMASK(23, 16)
+#define TIMINGS1_TVDLY GENMASK(7, 0)
+
+#define TIMINGS2 0x102c
+#define TIMINGS2_TFEAT GENMASK(25, 16)
+#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
+#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
+
+/* Configuration of the resynchronization of slave DLL of PHY. */
+#define DLL_PHY_CTRL 0x1034
+#define DLL_PHY_CTRL_DLL_RST_N BIT(24)
+#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
+#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
+#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
+#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
+
+/* Register controlling DQ related timing. */
+#define PHY_DQ_TIMING 0x2000
+/* Register controlling DSQ related timing. */
+#define PHY_DQS_TIMING 0x2004
+#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
+#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
+#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
+
+/* Register controlling the gate and loopback control related timing. */
+#define PHY_GATE_LPBK_CTRL 0x2008
+#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
+
+/* Register holds the control for the master DLL logic. */
+#define PHY_DLL_MASTER_CTRL 0x200C
+#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
+
+/* Register holds the control for the slave DLL logic. */
+#define PHY_DLL_SLAVE_CTRL 0x2010
+
+/* This register handles the global control settings for the PHY. */
+#define PHY_CTRL 0x2080
+#define PHY_CTRL_SDR_DQS BIT(14)
+#define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
+
+/*
+ * This register handles the global control settings
+ * for the termination selects for reads.
+ */
+#define PHY_TSEL 0x2084
+
+/* Generic command layout. */
+#define GCMD_LAY_CS GENMASK_ULL(11, 8)
+/*
+ * This bit informs the minicotroller if it has to wait for tWB
+ * after sending the last CMD/ADDR/DATA in the sequence.
+ */
+#define GCMD_LAY_TWB BIT_ULL(6)
+/* Type of generic instruction. */
+#define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
+
+/* Generic CMD sequence type. */
+#define GCMD_LAY_INSTR_CMD 0
+/* Generic ADDR sequence type. */
+#define GCMD_LAY_INSTR_ADDR 1
+/* Generic data transfer sequence type. */
+#define GCMD_LAY_INSTR_DATA 2
+
+/* Input part of generic command type of input is command. */
+#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
+
+/* Generic command address sequence - address fields. */
+#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
+/* Generic command address sequence - address size. */
+#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
+
+/* Transfer direction field of generic command data sequence. */
+#define GCMD_DIR BIT_ULL(11)
+/* Read transfer direction of generic command data sequence. */
+#define GCMD_DIR_READ 0
+/* Write transfer direction of generic command data sequence. */
+#define GCMD_DIR_WRITE 1
+
+/* ECC enabled flag of generic command data sequence - ECC enabled. */
+#define GCMD_ECC_EN BIT_ULL(12)
+/* Generic command data sequence - sector size. */
+#define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
+/* Generic command data sequence - sector count. */
+#define GCMD_SECT_CNT GENMASK_ULL(39, 32)
+/* Generic command data sequence - last sector size. */
+#define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
+
+/* CDMA descriptor fields. */
+/* Erase command type of CDMA descriptor. */
+#define CDMA_CT_ERASE 0x1000
+/* Program page command type of CDMA descriptor. */
+#define CDMA_CT_WR 0x2100
+/* Read page command type of CDMA descriptor. */
+#define CDMA_CT_RD 0x2200
+
+/* Flash pointer memory shift. */
+#define CDMA_CFPTR_MEM_SHIFT 24
+/* Flash pointer memory mask. */
+#define CDMA_CFPTR_MEM GENMASK(26, 24)
+
+/*
+ * Command DMA descriptor flags. If set causes issue interrupt after
+ * the completion of descriptor processing.
+ */
+#define CDMA_CF_INT BIT(8)
+/*
+ * Command DMA descriptor flags - the next descriptor
+ * address field is valid and descriptor processing should continue.
+ */
+#define CDMA_CF_CONT BIT(9)
+/* DMA master flag of command DMA descriptor. */
+#define CDMA_CF_DMA_MASTER BIT(10)
+
+/* Operation complete status of command descriptor. */
+#define CDMA_CS_COMP BIT(15)
+/* Operation complete status of command descriptor. */
+/* Command descriptor status - operation fail. */
+#define CDMA_CS_FAIL BIT(14)
+/* Command descriptor status - page erased. */
+#define CDMA_CS_ERP BIT(11)
+/* Command descriptor status - timeout occurred. */
+#define CDMA_CS_TOUT BIT(10)
+/*
+ * Maximum amount of correction applied to one ECC sector.
+ * It is part of command descriptor status.
+ */
+#define CDMA_CS_MAXERR GENMASK(9, 2)
+/* Command descriptor status - uncorrectable ECC error. */
+#define CDMA_CS_UNCE BIT(1)
+/* Command descriptor status - descriptor error. */
+#define CDMA_CS_ERR BIT(0)
+
+/* Status of operation - OK. */
+#define STAT_OK 0
+/* Status of operation - FAIL. */
+#define STAT_FAIL 2
+/* Status of operation - uncorrectable ECC error. */
+#define STAT_ECC_UNCORR 3
+/* Status of operation - page erased. */
+#define STAT_ERASED 5
+/* Status of operation - correctable ECC error. */
+#define STAT_ECC_CORR 6
+/* Status of operation - unsuspected state. */
+#define STAT_UNKNOWN 7
+/* Status of operation - operation is not completed yet. */
+#define STAT_BUSY 0xFF
+
+#define BCH_MAX_NUM_CORR_CAPS 8
+#define BCH_MAX_NUM_SECTOR_SIZES 2
+
+struct cadence_nand_timings {
+ u32 async_toggle_timings;
+ u32 timings0;
+ u32 timings1;
+ u32 timings2;
+ u32 dll_phy_ctrl;
+ u32 phy_ctrl;
+ u32 phy_dqs_timing;
+ u32 phy_gate_lpbk_ctrl;
+};
+
+/* Command DMA descriptor. */
+struct cadence_nand_cdma_desc {
+ /* Next descriptor address. */
+ u64 next_pointer;
+
+ /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
+ u32 flash_pointer;
+ /*field appears in HPNFC version 13*/
+ u16 bank;
+ u16 rsvd0;
+
+ /* Operation the controller needs to perform. */
+ u16 command_type;
+ u16 rsvd1;
+ /* Flags for operation of this command. */
+ u16 command_flags;
+ u16 rsvd2;
+
+ /* System/host memory address required for data DMA commands. */
+ u64 memory_pointer;
+
+ /* Status of operation. */
+ u32 status;
+ u32 rsvd3;
+
+ /* Address pointer to sync buffer location. */
+ u64 sync_flag_pointer;
+
+ /* Controls the buffer sync mechanism. */
+ u32 sync_arguments;
+ u32 rsvd4;
+
+ /* Control data pointer. */
+ u64 ctrl_data_ptr;
+};
+
+/* Interrupt status. */
+struct cadence_nand_irq_status {
+ /* Thread operation complete status. */
+ u32 trd_status;
+ /* Thread operation error. */
+ u32 trd_error;
+ /* Controller status. */
+ u32 status;
+};
+
+/* Cadence NAND flash controller capabilities get from driver data. */
+struct cadence_nand_dt_devdata {
+ /* Skew value of the output signals of the NAND Flash interface. */
+ u32 if_skew;
+ /* It informs if slave DMA interface is connected to DMA engine. */
+ unsigned int has_dma:1;
+};
+
+/* Cadence NAND flash controller capabilities read from registers. */
+struct cdns_nand_caps {
+ /* Maximum number of banks supported by hardware. */
+ u8 max_banks;
+ /* Slave and Master DMA data width in bytes (4 or 8). */
+ u8 data_dma_width;
+ /* Control Data feature supported. */
+ bool data_control_supp;
+ /* Is PHY type DLL. */
+ bool is_phy_type_dll;
+};
+
+struct cdns_nand_ctrl {
+ struct device *dev;
+ struct nand_controller controller;
+ struct cadence_nand_cdma_desc *cdma_desc;
+ /* IP capability. */
+ const struct cadence_nand_dt_devdata *caps1;
+ struct cdns_nand_caps caps2;
+ u8 ctrl_rev;
+ dma_addr_t dma_cdma_desc;
+ u8 *buf;
+ u32 buf_size;
+ u8 curr_corr_str_idx;
+
+ /* Register interface. */
+ void __iomem *reg;
+
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ int irq;
+ /* Interrupts that have happened. */
+ struct cadence_nand_irq_status irq_status;
+ /* Interrupts we are waiting for. */
+ struct cadence_nand_irq_status irq_mask;
+ struct completion complete;
+ /* Protect irq_mask and irq_status. */
+ spinlock_t irq_lock;
+
+ int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
+ struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
+ struct nand_ecc_caps ecc_caps;
+
+ int curr_trans_type;
+
+ struct dma_chan *dmac;
+
+ u32 nf_clk_rate;
+ /*
+ * Estimated Board delay. The value includes the total
+ * round trip delay for the signals and is used for deciding on values
+ * associated with data read capture.
+ */
+ u32 board_delay;
+
+ struct nand_chip *selected_chip;
+
+ unsigned long assigned_cs;
+ struct list_head chips;
+};
+
+struct cdns_nand_chip {
+ struct cadence_nand_timings timings;
+ struct nand_chip chip;
+ u8 nsels;
+ struct list_head node;
+
+ /*
+ * part of oob area of NAND flash memory page.
+ * This part is available for user to read or write.
+ */
+ u32 avail_oob_size;
+
+ /* Sector size. There are few sectors per mtd->writesize */
+ u32 sector_size;
+ u32 sector_count;
+
+ /* Offset of BBM. */
+ u8 bbm_offs;
+ /* Number of bytes reserved for BBM. */
+ u8 bbm_len;
+ /* ECC strength index. */
+ u8 corr_str_idx;
+
+ u8 cs[];
+};
+
+struct ecc_info {
+ int (*calc_ecc_bytes)(int step_size, int strength);
+ int max_step_size;
+};
+
+static inline struct
+cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct cdns_nand_chip, chip);
+}
+
+static inline struct
+cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
+{
+ return container_of(controller, struct cdns_nand_ctrl, controller);
+}
+
+static bool
+cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
+ u32 buf_len)
+{
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ return buf && virt_addr_valid(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
+ likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
+}
+
+static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 reg_offset, u32 timeout_us,
+ u32 mask, bool is_clear)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
+ val, !(val & mask) == is_clear,
+ 10, timeout_us);
+
+ if (ret < 0) {
+ dev_err(cdns_ctrl->dev,
+ "Timeout while waiting for reg %x with mask %x is clear %d\n",
+ reg_offset, mask, is_clear);
+ }
+
+ return ret;
+}
+
+static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ECC_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ECC_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ return 0;
+}
+
+static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 corr_str_idx)
+{
+ u32 reg;
+
+ if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
+ return;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+ reg &= ~ECC_CONFIG_0_CORR_STR;
+ reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ cdns_ctrl->curr_corr_str_idx = corr_str_idx;
+}
+
+static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 strength)
+{
+ int i, corr_str_idx = -1;
+
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] == strength) {
+ corr_str_idx = i;
+ break;
+ }
+ }
+
+ return corr_str_idx;
+}
+
+static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
+ u16 marker_value)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_MARKER_VALUE;
+ reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
+ marker_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+
+ return 0;
+}
+
+static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 num_of_bytes,
+ u32 offset_value,
+ int enable)
+{
+ u32 reg, skip_bytes_offset;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ if (!enable) {
+ num_of_bytes = 0;
+ offset_value = 0;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_NUM_OF_BYTES;
+ reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
+ num_of_bytes);
+ skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
+ offset_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+ writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
+
+ return 0;
+}
+
+/* Functions enables/disables hardware detection of erased data */
+static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable,
+ u8 bitflips_threshold)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ERASE_DET_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+ writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
+}
+
+static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
+ bool bit_bus16)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
+
+ if (!bit_bus16)
+ reg &= ~COMMON_SET_DEVICE_16BIT;
+ else
+ reg |= COMMON_SET_DEVICE_16BIT;
+ writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
+
+ return 0;
+}
+
+static void
+cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
+ writel_relaxed(irq_status->trd_status,
+ cdns_ctrl->reg + TRD_COMP_INT_STATUS);
+ writel_relaxed(irq_status->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS);
+}
+
+static void
+cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
+ irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+ + TRD_COMP_INT_STATUS);
+ irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+ + TRD_ERR_INT_STATUS);
+}
+
+static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ cadence_nand_read_int_status(cdns_ctrl, irq_status);
+
+ return irq_status->status || irq_status->trd_status ||
+ irq_status->trd_error;
+}
+
+static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
+ memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
+ memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
+ spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device.
+ */
+static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = dev_id;
+ struct cadence_nand_irq_status irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&cdns_ctrl->irq_lock);
+
+ if (irq_detected(cdns_ctrl, &irq_status)) {
+ /* Handle interrupt. */
+ /* First acknowledge it. */
+ cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
+ /* Status in the device context for someone to read. */
+ cdns_ctrl->irq_status.status |= irq_status.status;
+ cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
+ cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
+ /* Notify anyone who cares that it happened. */
+ complete(&cdns_ctrl->complete);
+ /* Tell the OS that we've handled this. */
+ result = IRQ_HANDLED;
+ }
+ spin_unlock(&cdns_ctrl->irq_lock);
+
+ return result;
+}
+
+static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask)
+{
+ writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
+ cdns_ctrl->reg + INTR_ENABLE);
+
+ writel_relaxed(irq_mask->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
+}
+
+static void
+cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask,
+ struct cadence_nand_irq_status *irq_status)
+{
+ unsigned long timeout = msecs_to_jiffies(10000);
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
+ timeout);
+
+ *irq_status = cdns_ctrl->irq_status;
+ if (time_left == 0) {
+ /* Timeout error. */
+ dev_err(cdns_ctrl->dev, "timeout occurred:\n");
+ dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
+ irq_status->status, irq_mask->status);
+ dev_err(cdns_ctrl->dev,
+ "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
+ irq_status->trd_status, irq_mask->trd_status);
+ dev_err(cdns_ctrl->dev,
+ "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
+ irq_status->trd_error, irq_mask->trd_error);
+ }
+}
+
+/* Execute generic command on NAND controller. */
+static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 chip_nr,
+ u64 mini_ctrl_cmd)
+{
+ u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
+ mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
+ mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select generic command. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, 0);
+
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Wait for data on slave DMA interface. */
+static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *out_sdma_trd,
+ u32 *out_sdma_size)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status;
+
+ irq_mask.trd_status = 0;
+ irq_mask.trd_error = 0;
+ irq_mask.status = INTR_STATUS_SDMA_TRIGG
+ | INTR_STATUS_SDMA_ERR
+ | INTR_STATUS_UNSUPP_CMD;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+ if (irq_status.status == 0) {
+ dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
+ return -ETIMEDOUT;
+ }
+
+ if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
+ *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
+ *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
+ *out_sdma_trd =
+ FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
+ } else {
+ dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
+ irq_status.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
+
+ cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
+
+ if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
+ cdns_ctrl->caps2.data_dma_width = 8;
+ else
+ cdns_ctrl->caps2.data_dma_width = 4;
+
+ if (reg & CTRL_FEATURES_CONTROL_DATA)
+ cdns_ctrl->caps2.data_control_supp = true;
+
+ if (reg & (CTRL_FEATURES_NVDDR_2_3
+ | CTRL_FEATURES_NVDDR))
+ cdns_ctrl->caps2.is_phy_type_dll = true;
+}
+
+/* Prepare CDMA descriptor. */
+static void
+cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
+ char nf_mem, u32 flash_ptr, char *mem_ptr,
+ char *ctrl_data_ptr, u16 ctype)
+{
+ struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
+
+ memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
+
+ /* Set fields for one descriptor. */
+ cdma_desc->flash_pointer = flash_ptr;
+ if (cdns_ctrl->ctrl_rev >= 13)
+ cdma_desc->bank = nf_mem;
+ else
+ cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
+
+ cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
+ cdma_desc->command_flags |= CDMA_CF_INT;
+
+ cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+ cdma_desc->status = 0;
+ cdma_desc->sync_flag_pointer = 0;
+ cdma_desc->sync_arguments = 0;
+
+ cdma_desc->command_type = ctype;
+ cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+}
+
+static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 desc_status)
+{
+ if (desc_status & CDMA_CS_ERP)
+ return STAT_ERASED;
+
+ if (desc_status & CDMA_CS_UNCE)
+ return STAT_ECC_UNCORR;
+
+ if (desc_status & CDMA_CS_ERR) {
+ dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
+ return STAT_FAIL;
+ }
+
+ if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
+ return STAT_ECC_CORR;
+
+ return STAT_FAIL;
+}
+
+static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
+ u8 status = STAT_BUSY;
+
+ if (desc_ptr->status & CDMA_CS_FAIL) {
+ status = cadence_nand_check_desc_error(cdns_ctrl,
+ desc_ptr->status);
+ dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
+ } else if (desc_ptr->status & CDMA_CS_COMP) {
+ /* Descriptor finished with no errors. */
+ if (desc_ptr->command_flags & CDMA_CF_CONT) {
+ dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
+ status = STAT_UNKNOWN;
+ } else {
+ /* Last descriptor. */
+ status = STAT_OK;
+ }
+ }
+
+ return status;
+}
+
+static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ u32 reg;
+ int status;
+
+ /* Wait for thread ready. */
+ status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
+ 1000000,
+ BIT(thread), true);
+ if (status)
+ return status;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
+ cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select CDMA mode. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, thread);
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Send SDMA command and wait for finish. */
+static u32
+cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status = {0};
+ int status;
+
+ irq_mask.trd_status = BIT(thread);
+ irq_mask.trd_error = BIT(thread);
+ irq_mask.status = INTR_STATUS_CDMA_TERR;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+
+ status = cadence_nand_cdma_send(cdns_ctrl, thread);
+ if (status)
+ return status;
+
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+
+ if (irq_status.status == 0 && irq_status.trd_status == 0 &&
+ irq_status.trd_error == 0) {
+ dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
+ return -ETIMEDOUT;
+ }
+ if (irq_status.status & irq_mask.status) {
+ dev_err(cdns_ctrl->dev, "CDMA command failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ECC size depends on configured ECC strength and on maximum supported
+ * ECC step size.
+ */
+static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
+{
+ int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
+
+ return ALIGN(nbytes, 2);
+}
+
+#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
+ static int \
+ cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
+ int strength)\
+ {\
+ return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
+ }
+
+CADENCE_NAND_CALC_ECC_BYTES(256)
+CADENCE_NAND_CALC_ECC_BYTES(512)
+CADENCE_NAND_CALC_ECC_BYTES(1024)
+CADENCE_NAND_CALC_ECC_BYTES(2048)
+CADENCE_NAND_CALC_ECC_BYTES(4096)
+
+/* Function reads BCH capabilities. */
+static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
+ int max_step_size = 0, nstrengths, i;
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
+ cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
+ cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
+ cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
+ cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
+ cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
+ cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
+ cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
+ cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
+ cdns_ctrl->ecc_stepinfos[0].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_0, reg);
+
+ cdns_ctrl->ecc_stepinfos[1].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_1, reg);
+
+ nstrengths = 0;
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] != 0)
+ nstrengths++;
+ }
+
+ ecc_caps->nstepinfos = 0;
+ for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
+ /* ECC strengths are common for all step infos. */
+ cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
+ cdns_ctrl->ecc_stepinfos[i].strengths =
+ cdns_ctrl->ecc_strengths;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
+ ecc_caps->nstepinfos++;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
+ max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
+ }
+ ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
+
+ switch (max_step_size) {
+ case 256:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
+ break;
+ case 512:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
+ break;
+ case 1024:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
+ break;
+ case 2048:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
+ break;
+ case 4096:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
+ break;
+ default:
+ dev_err(cdns_ctrl->dev,
+ "Unsupported sector size(ecc step size) %d\n",
+ max_step_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization. */
+static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ int status;
+ u32 reg;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_INIT_COMP, false);
+ if (status)
+ return status;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
+ cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
+
+ dev_info(cdns_ctrl->dev,
+ "%s: cadence nand controller version reg %x\n",
+ __func__, reg);
+
+ /* Disable cache and multiplane. */
+ writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
+ writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
+
+ /* Clear all interrupts. */
+ writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
+
+ cadence_nand_get_caps(cdns_ctrl);
+ cadence_nand_read_bch_caps(cdns_ctrl);
+
+ /*
+ * Set IO width access to 8.
+ * It is because during SW device discovering width access
+ * is expected to be 8.
+ */
+ status = cadence_nand_set_access_width16(cdns_ctrl, false);
+
+ return status;
+}
+
+#define TT_MAIN_OOB_AREAS 2
+#define TT_RAW_PAGE 3
+#define TT_BBM 4
+#define TT_MAIN_OOB_AREA_EXT 5
+
+/* Prepare size of data to transfer. */
+static void
+cadence_nand_prepare_data_size(struct nand_chip *chip,
+ int transfer_type)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 sec_size = 0, offset = 0, sec_cnt = 1;
+ u32 last_sec_size = cdns_chip->sector_size;
+ u32 data_ctrl_size = 0;
+ u32 reg = 0;
+
+ if (cdns_ctrl->curr_trans_type == transfer_type)
+ return;
+
+ switch (transfer_type) {
+ case TT_MAIN_OOB_AREA_EXT:
+ sec_cnt = cdns_chip->sector_count;
+ sec_size = cdns_chip->sector_size;
+ data_ctrl_size = cdns_chip->avail_oob_size;
+ break;
+ case TT_MAIN_OOB_AREAS:
+ sec_cnt = cdns_chip->sector_count;
+ last_sec_size = cdns_chip->sector_size
+ + cdns_chip->avail_oob_size;
+ sec_size = cdns_chip->sector_size;
+ break;
+ case TT_RAW_PAGE:
+ last_sec_size = mtd->writesize + mtd->oobsize;
+ break;
+ case TT_BBM:
+ offset = mtd->writesize + cdns_chip->bbm_offs;
+ last_sec_size = 8;
+ break;
+ }
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
+ reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
+ reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
+
+ if (cdns_ctrl->caps2.data_control_supp) {
+ reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ reg &= ~CONTROL_DATA_CTRL_SIZE;
+ reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
+ writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ }
+
+ cdns_ctrl->curr_trans_type = transfer_type;
+}
+
+static int
+cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
+ int page, void *buf, void *ctrl_dat, u32 buf_size,
+ u32 ctrl_dat_size, enum dma_data_direction dir,
+ bool with_ecc)
+{
+ dma_addr_t dma_buf, dma_ctrl_dat = 0;
+ u8 thread_nr = chip_nr;
+ int status;
+ u16 ctype;
+
+ if (dir == DMA_FROM_DEVICE)
+ ctype = CDMA_CT_RD;
+ else
+ ctype = CDMA_CT_WR;
+
+ cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
+
+ dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+
+ if (ctrl_dat && ctrl_dat_size) {
+ dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
+ ctrl_dat_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+ }
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
+ (void *)dma_buf, (void *)dma_ctrl_dat,
+ ctype);
+
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+
+ if (ctrl_dat && ctrl_dat_size)
+ dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
+ ctrl_dat_size, dir);
+ if (status)
+ return status;
+
+ return cadence_nand_cdma_finish(cdns_ctrl);
+}
+
+static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_timings *t)
+{
+ writel_relaxed(t->async_toggle_timings,
+ cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+ writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+ writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+ writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+
+ writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
+ writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
+ writel_relaxed(t->phy_dqs_timing,
+ cdns_ctrl->reg + PHY_DQS_TIMING);
+ writel_relaxed(t->phy_gate_lpbk_ctrl,
+ cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+ writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
+ cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+ writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+ }
+}
+
+static int cadence_nand_select_target(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (chip == cdns_ctrl->selected_chip)
+ return 0;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ cdns_ctrl->curr_trans_type = -1;
+ cdns_ctrl->selected_chip = chip;
+
+ return 0;
+}
+
+static int cadence_nand_erase(struct nand_chip *chip, u32 page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ int status;
+ u8 thread_nr = cdns_chip->cs[chip->cur_cs];
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, NULL, NULL,
+ CDMA_CT_ERASE);
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "erase operation failed\n");
+ return -EIO;
+ }
+
+ status = cadence_nand_cdma_finish(cdns_ctrl);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
+{
+ int status;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ cadence_nand_prepare_data_size(chip, TT_BBM);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /*
+ * Read only bad block marker from offset
+ * defined by a memory manufacturer.
+ */
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "read BBM failed\n");
+ return -EIO;
+ }
+
+ memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
+
+ return 0;
+}
+
+static int cadence_nand_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+ u16 marker_val = 0xFFFF;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs,
+ 1);
+
+ if (oob_required) {
+ marker_val = *(u16 *)(chip->oob_poi
+ + cdns_chip->bbm_offs);
+ } else {
+ /* Set oob data to 0xFF. */
+ memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
+ cdns_chip->avail_oob_size);
+ }
+
+ cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, (void *)buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_TO_DEVICE, true);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "write page failed\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (oob_required) {
+ /* Transfer the data to the oob area. */
+ memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
+ cdns_chip->avail_oob_size);
+ }
+
+ memcpy(cdns_ctrl->buf, buf, mtd->writesize);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_TO_DEVICE, true);
+}
+
+static int cadence_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
+
+ return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int oob_skip = cdns_chip->bbm_len;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, size);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(tmp_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ const u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(tmp_buf + writesize, oob, oob_skip);
+
+ /* OOB free. */
+ memcpy(tmp_buf + oob_data_offset, oob,
+ cdns_chip->avail_oob_size);
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC. */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(tmp_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize +
+ mtd->oobsize,
+ 0, DMA_TO_DEVICE, false);
+}
+
+static int cadence_nand_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_write_page_raw(chip, NULL, true, page);
+}
+
+static int cadence_nand_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status = 0;
+ int ecc_err_count = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs, 1);
+
+ /*
+ * If data buffer can be accessed by DMA and data_control feature
+ * is supported then transfer data and oob directly.
+ */
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_FROM_DEVICE, true);
+ /* Otherwise use bounce buffer. */
+ } else {
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf,
+ NULL, mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_FROM_DEVICE, true);
+
+ memcpy(buf, cdns_ctrl->buf, mtd->writesize);
+ if (oob_required)
+ memcpy(chip->oob_poi,
+ cdns_ctrl->buf + mtd->writesize,
+ mtd->oobsize);
+ }
+
+ switch (status) {
+ case STAT_ECC_UNCORR:
+ mtd->ecc_stats.failed++;
+ ecc_err_count++;
+ break;
+ case STAT_ECC_CORR:
+ ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
+ cdns_ctrl->cdma_desc->status);
+ mtd->ecc_stats.corrected += ecc_err_count;
+ break;
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read page failed\n");
+ return -EIO;
+ }
+
+ if (oob_required)
+ if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
+ return -EIO;
+
+ return ecc_err_count;
+}
+
+/* Reads OOB data from the device. */
+static int cadence_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+ return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int oob_skip = cdns_chip->bbm_len;
+ int writesize = mtd->writesize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+
+ switch (status) {
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read raw page failed\n");
+ return -EIO;
+ }
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, tmp_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, tmp_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* OOB free. */
+ memcpy(oob, tmp_buf + oob_data_offset,
+ cdns_chip->avail_oob_size);
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(oob, tmp_buf + writesize, oob_skip);
+
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, tmp_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, tmp_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cadence_nand_read_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_read_page_raw(chip, NULL, true, page);
+}
+
+static void cadence_nand_slave_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ void *buf,
+ dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ chan = cdns_ctrl->dmac;
+ dma_dev = chan->device;
+
+ buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
+ if (dma_mapping_error(dma_dev->dev, buf_dma)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = cdns_ctrl->io.dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = cdns_ctrl->io.dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = cadence_nand_slave_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cdns_ctrl->dmac);
+ wait_for_completion(&finished);
+
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ /* read alingment data */
+ ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* read rest data from slave DMA interface if any */
+ ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ /* copy rest of data */
+ memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
+ len - (len_in_words << 2));
+ }
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
+ cdns_ctrl->io.dma,
+ len, DMA_FROM_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_FROM_DEVICE);
+
+ if (status) {
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+ return status;
+ }
+
+ memcpy(buf, cdns_ctrl->buf, len);
+
+ return 0;
+}
+
+static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ const u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ int len_in_words = len >> 2;
+
+ iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+ if (sdma_size > len) {
+ /* copy rest of data */
+ memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
+ len - (len_in_words << 2));
+ /* write all expected by nand controller data */
+ iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+ }
+
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
+ cdns_ctrl->io.dma,
+ len, DMA_TO_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ memcpy(cdns_ctrl->buf, buf, len);
+
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_TO_DEVICE);
+
+ if (status)
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+
+ return status;
+}
+
+static int cadence_nand_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ int status;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return 0;
+
+ status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
+
+ return status;
+}
+
+static int cadence_nand_cmd_opcode(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_CMD);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
+ instr->ctx.cmd.opcode);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
+ instr->ctx.cmd.opcode);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_address(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ unsigned int offset, naddrs;
+ u64 address = 0;
+ const u8 *addrs;
+ int ret;
+ int i;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_ADDR);
+
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ address |= (u64)addrs[i] << (8 * i);
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
+ address);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
+ naddrs - 1);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_erase(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ unsigned int op_id;
+
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
+ int i;
+ const struct nand_op_instr *instr = NULL;
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ u32 page = 0;
+
+ instr = &subop->instrs[1];
+ offset = nand_subop_get_addr_start_off(subop, 1);
+ naddrs = nand_subop_get_num_addr_cyc(subop, 1);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ page |= (u32)addrs[i] << (8 * i);
+
+ return cadence_nand_erase(chip, page);
+ }
+
+ /*
+ * If it is not an erase operation then handle operation
+ * by calling exec_op function.
+ */
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ int ret;
+ const struct nand_operation nand_op = {
+ .cs = chip->cur_cs,
+ .instrs = &subop->instrs[op_id],
+ .ninstrs = 1};
+ ret = chip->controller->ops->exec_op(chip, &nand_op, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cadence_nand_cmd_data(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int offset, op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int len = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_DATA);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
+ GCMD_DIR_WRITE);
+
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, true);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ return ret;
+ }
+ }
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
+ return ret;
+ }
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ void *buf = instr->ctx.data.buf.in + offset;
+
+ ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
+ } else {
+ const void *buf = instr->ctx.data.buf.out + offset;
+
+ ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
+ }
+
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
+ return ret;
+ }
+
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, false);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ }
+ }
+
+ return ret;
+}
+
+static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ int status;
+ unsigned int op_id = 0;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
+ timeout_us,
+ BIT(cdns_chip->cs[chip->cur_cs]),
+ false);
+ return status;
+}
+
+static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_erase,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_opcode,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_address,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
+ );
+
+static int cadence_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int status = cadence_nand_select_target(chip);
+
+ if (status)
+ return status;
+
+ return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
+ check_only);
+}
+
+static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->bbm_len;
+ oobregion->length = cdns_chip->avail_oob_size
+ - cdns_chip->bbm_len;
+
+ return 0;
+}
+
+static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->avail_oob_size;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
+ .free = cadence_nand_ooblayout_free,
+ .ecc = cadence_nand_ooblayout_ecc,
+};
+
+static int calc_cycl(u32 timing, u32 clock)
+{
+ if (timing == 0 || clock == 0)
+ return 0;
+
+ if ((timing % clock) > 0)
+ return timing / clock;
+ else
+ return timing / clock - 1;
+}
+
+/* Calculate max data valid window. */
+static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 board_delay_skew_min, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min +
+ board_delay_skew_min;
+}
+
+/* Calculate data valid window. */
+static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 trea_max, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
+}
+
+static int
+cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ const struct nand_sdr_timings *sdr;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct cadence_nand_timings *t = &cdns_chip->timings;
+ u32 reg;
+ u32 board_delay = cdns_ctrl->board_delay;
+ u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+ cdns_ctrl->nf_clk_rate);
+ u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+ u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
+ u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
+ u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
+ u32 if_skew = cdns_ctrl->caps1->if_skew;
+ u32 board_delay_skew_min = board_delay - if_skew;
+ u32 board_delay_skew_max = board_delay + if_skew;
+ u32 dqs_sampl_res, phony_dqs_mod;
+ u32 tdvw, tdvw_min, tdvw_max;
+ u32 ext_rd_mode, ext_wr_mode;
+ u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
+ u32 sampling_point;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ memset(t, 0, sizeof(*t));
+ /* Sampling point calculation. */
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_mod = 2;
+ else
+ phony_dqs_mod = 1;
+
+ dqs_sampl_res = clk_period / phony_dqs_mod;
+
+ tdvw_min = sdr->tREA_max + board_delay_skew_max;
+ /*
+ * The idea of those calculation is to get the optimum value
+ * for tRP and tRH timings. If it is NOT possible to sample data
+ * with optimal tRP/tRH settings, the parameters will be extended.
+ * If clk_period is 50ns (the lowest value) this condition is met
+ * for asynchronous timing modes 1, 2, 3, 4 and 5.
+ * If clk_period is 20ns the condition is met only
+ * for asynchronous timing mode 5.
+ */
+ if (sdr->tRC_min <= clk_period &&
+ sdr->tRP_min <= (clk_period / 2) &&
+ sdr->tREH_min <= (clk_period / 2)) {
+ /* Performance mode. */
+ ext_rd_mode = 0;
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * and is not on the edge (ie. we have hold margin).
+ * If not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ if (tdvw_max <= tdvw_min ||
+ (tdvw_max % dqs_sampl_res) == 0) {
+ /*
+ * No valid sampling point so the RE pulse need
+ * to be widen widening by half clock cycle.
+ */
+ ext_rd_mode = 1;
+ }
+ } else {
+ /*
+ * There is no valid window
+ * to be able to sample data the tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ ext_rd_mode = 1;
+ }
+
+ } else {
+ /* Extended read mode. */
+ u32 trh;
+
+ ext_rd_mode = 1;
+ trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
+ trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
+ if (sdr->tREH_min >= trh)
+ trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
+ else
+ trh_cnt = calc_cycl(trh, clk_period);
+
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * or if it is at the edge check if previous is valid
+ * - if not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+
+ if ((((tdvw_max / dqs_sampl_res)
+ * dqs_sampl_res) <= tdvw_min) ||
+ (((tdvw_max % dqs_sampl_res) == 0) &&
+ (((tdvw_max / dqs_sampl_res - 1)
+ * dqs_sampl_res) <= tdvw_min))) {
+ /*
+ * Data valid window width is lower than
+ * sampling resolution and do not hit any
+ * sampling point to be sure the sampling point
+ * will be found the RE low pulse width will be
+ * extended by one clock cycle.
+ */
+ trp_cnt = trp_cnt + 1;
+ }
+ } else {
+ /*
+ * There is no valid window to be able to sample data.
+ * The tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ }
+ }
+
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min, ext_rd_mode);
+
+ if (sdr->tWC_min <= clk_period &&
+ (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
+ (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
+ ext_wr_mode = 0;
+ } else {
+ u32 twh;
+
+ ext_wr_mode = 1;
+ twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
+ if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
+ twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
+ clk_period);
+
+ twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
+ if (sdr->tWH_min >= twh)
+ twh = sdr->tWH_min;
+
+ twh_cnt = calc_cycl(twh + if_skew, clk_period);
+ }
+
+ reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
+ t->async_toggle_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
+
+ tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
+ tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
+ twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
+ trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
+ reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+
+ /*
+ * If timing exceeds delay field in timing register
+ * then use maximum value.
+ */
+ if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
+ reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+ else
+ reg |= TIMINGS0_TCCS;
+
+ reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+ t->timings0 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
+
+ /* The following is related to single signal so skew is not needed. */
+ trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
+ trhz_cnt = trhz_cnt + 1;
+ twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
+ /*
+ * Because of the two stage syncflop the value must be increased by 3
+ * first value is related with sync, second value is related
+ * with output if delay.
+ */
+ twb_cnt = twb_cnt + 3 + 5;
+ /*
+ * The following is related to the we edge of the random data input
+ * sequence so skew is not needed.
+ */
+ tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
+ reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+ t->timings1 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
+
+ tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
+ if (tfeat_cnt < twb_cnt)
+ tfeat_cnt = twb_cnt;
+
+ tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
+ tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
+
+ reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+ t->timings2 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ reg = DLL_PHY_CTRL_DLL_RST_N;
+ if (ext_wr_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
+ if (ext_rd_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
+
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
+ t->dll_phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
+ }
+
+ /* Sampling point calculation. */
+ if ((tdvw_max % dqs_sampl_res) > 0)
+ sampling_point = tdvw_max / dqs_sampl_res;
+ else
+ sampling_point = (tdvw_max / dqs_sampl_res - 1);
+
+ if (sampling_point * dqs_sampl_res > tdvw_min) {
+ dll_phy_dqs_timing =
+ FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
+ phony_dqs_timing = sampling_point / phony_dqs_mod;
+
+ if ((sampling_point % 2) > 0) {
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
+ if ((tdvw_max % dqs_sampl_res) == 0)
+ /*
+ * Calculation for sampling point at the edge
+ * of data and being odd number.
+ */
+ phony_dqs_timing = (tdvw_max / dqs_sampl_res)
+ / phony_dqs_mod - 1;
+
+ if (!cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_timing--;
+
+ } else {
+ phony_dqs_timing--;
+ }
+ rd_del_sel = phony_dqs_timing + 3;
+ } else {
+ dev_warn(cdns_ctrl->dev,
+ "ERROR : cannot find valid sampling point\n");
+ }
+
+ reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ reg |= PHY_CTRL_SDR_DQS;
+ t->phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
+ dll_phy_dqs_timing);
+ t->phy_dqs_timing = dll_phy_dqs_timing;
+
+ reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+ dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
+ reg);
+ t->phy_gate_lpbk_ctrl = reg;
+
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
+ PHY_DLL_MASTER_CTRL_BYPASS_MODE);
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
+ }
+
+ return 0;
+}
+
+int cadence_nand_attach_chip(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 max_oob_data_size;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ret = cadence_nand_set_access_width16(cdns_ctrl, true);
+ if (ret)
+ return ret;
+ }
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->ecc.mode = NAND_ECC_HW;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ cdns_chip->bbm_offs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ cdns_chip->bbm_offs &= ~0x01;
+ cdns_chip->bbm_len = 2;
+ } else {
+ cdns_chip->bbm_len = 1;
+ }
+
+ ret = nand_ecc_choose_conf(chip,
+ &cdns_ctrl->ecc_caps,
+ mtd->oobsize - cdns_chip->bbm_len);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
+ return ret;
+ }
+
+ dev_dbg(cdns_ctrl->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ /* Error correction configuration. */
+ cdns_chip->sector_size = chip->ecc.size;
+ cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+
+ cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
+
+ max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
+
+ if (cdns_chip->avail_oob_size > max_oob_data_size)
+ cdns_chip->avail_oob_size = max_oob_data_size;
+
+ if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
+ > mtd->oobsize)
+ cdns_chip->avail_oob_size -= 4;
+
+ ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
+ if (ret < 0)
+ return -EINVAL;
+
+ cdns_chip->corr_str_idx = (u8)ret;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ /* Override the default read operations. */
+ chip->ecc.read_page = cadence_nand_read_page;
+ chip->ecc.read_page_raw = cadence_nand_read_page_raw;
+ chip->ecc.write_page = cadence_nand_write_page;
+ chip->ecc.write_page_raw = cadence_nand_write_page_raw;
+ chip->ecc.read_oob = cadence_nand_read_oob;
+ chip->ecc.write_oob = cadence_nand_write_oob;
+ chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
+ chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
+
+ if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
+ cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
+ return ret;
+ }
+
+ mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
+
+ return 0;
+}
+
+static const struct nand_controller_ops cadence_nand_controller_ops = {
+ .attach_chip = cadence_nand_attach_chip,
+ .exec_op = cadence_nand_exec_op,
+ .setup_data_interface = cadence_nand_setup_data_interface,
+};
+
+static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
+ struct device_node *np)
+{
+ struct cdns_nand_chip *cdns_chip;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Allocate the nand chip structure. */
+ cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
+ (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!cdns_chip) {
+ dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ cdns_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ /* Retrieve CS id. */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= cdns_ctrl->caps2.max_banks) {
+ dev_err(cdns_ctrl->dev,
+ "invalid reg value: %u (max CS = %d)\n",
+ cs, cdns_ctrl->caps2.max_banks);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
+ dev_err(cdns_ctrl->dev,
+ "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ cdns_chip->cs[i] = cs;
+ }
+
+ chip = &cdns_chip->chip;
+ chip->controller = &cdns_ctrl->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = cdns_ctrl->dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ ret = nand_scan(chip, cdns_chip->nsels);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
+
+ return 0;
+}
+
+static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cdns_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
+ nand_release(&entry->chip);
+ list_del(&entry->node);
+ }
+}
+
+static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct device_node *np = cdns_ctrl->dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = cdns_ctrl->caps2.max_banks;
+ int nchips, ret;
+
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(cdns_ctrl->dev,
+ "too many NAND chips: %d (max = %d CS)\n",
+ nchips, max_cs);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+{
+ /* Disable interrupts. */
+ writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
+}
+
+static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+ sizeof(*cdns_ctrl->cdma_desc),
+ &cdns_ctrl->dma_cdma_desc,
+ GFP_KERNEL);
+ if (!cdns_ctrl->dma_cdma_desc)
+ return -ENOMEM;
+
+ cdns_ctrl->buf_size = SZ_16K;
+ cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto free_buf_desc;
+ }
+
+ if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
+ IRQF_SHARED, "cadence-nand-controller",
+ cdns_ctrl)) {
+ dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto free_buf;
+ }
+
+ spin_lock_init(&cdns_ctrl->irq_lock);
+ init_completion(&cdns_ctrl->complete);
+
+ ret = cadence_nand_hw_init(cdns_ctrl);
+ if (ret)
+ goto disable_irq;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (cdns_ctrl->caps1->has_dma) {
+ cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!cdns_ctrl->dmac) {
+ dev_err(cdns_ctrl->dev,
+ "Unable to get a DMA channel\n");
+ ret = -EBUSY;
+ goto disable_irq;
+ }
+ }
+
+ nand_controller_init(&cdns_ctrl->controller);
+ INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+ cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
+ cdns_ctrl->curr_corr_str_idx = 0xFF;
+
+ ret = cadence_nand_chips_init(cdns_ctrl);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ ret);
+ goto dma_release_chnl;
+ }
+
+ kfree(cdns_ctrl->buf);
+ cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto dma_release_chnl;
+ }
+
+ return 0;
+
+dma_release_chnl:
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+
+disable_irq:
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+
+free_buf:
+ kfree(cdns_ctrl->buf);
+
+free_buf_desc:
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ return ret;
+}
+
+/* Driver exit point. */
+static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ kfree(cdns_ctrl->buf);
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+}
+
+struct cadence_nand_dt {
+ struct cdns_nand_ctrl cdns_ctrl;
+ struct clk *clk;
+};
+
+static const struct cadence_nand_dt_devdata cadence_nand_default = {
+ .if_skew = 0,
+ .has_dma = 1,
+};
+
+static const struct of_device_id cadence_nand_dt_ids[] = {
+ {
+ .compatible = "cdns,hp-nfc",
+ .data = &cadence_nand_default
+ }, {}
+};
+
+MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
+
+static int cadence_nand_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *res;
+ struct cadence_nand_dt *dt;
+ struct cdns_nand_ctrl *cdns_ctrl;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct cadence_nand_dt_devdata *devdata;
+ u32 val;
+
+ of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ devdata = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+
+ cdns_ctrl = &dt->cdns_ctrl;
+ cdns_ctrl->caps1 = devdata;
+
+ cdns_ctrl->dev = &ofdev->dev;
+ cdns_ctrl->irq = platform_get_irq(ofdev, 0);
+ if (cdns_ctrl->irq < 0)
+ return cdns_ctrl->irq;
+
+ dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
+
+ cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(cdns_ctrl->reg)) {
+ dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n");
+ return PTR_ERR(cdns_ctrl->reg);
+ }
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
+ cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
+ if (IS_ERR(cdns_ctrl->io.virt)) {
+ dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n");
+ return PTR_ERR(cdns_ctrl->io.virt);
+ }
+
+ dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
+
+ ret = of_property_read_u32(ofdev->dev.of_node,
+ "cdns,board-delay-ps", &val);
+ if (ret) {
+ val = 4830;
+ dev_info(cdns_ctrl->dev,
+ "missing cdns,board-delay-ps property, %d was set\n",
+ val);
+ }
+ cdns_ctrl->board_delay = val;
+
+ ret = cadence_nand_init(cdns_ctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+}
+
+static int cadence_nand_dt_remove(struct platform_device *ofdev)
+{
+ struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
+
+ cadence_nand_remove(&dt->cdns_ctrl);
+
+ return 0;
+}
+
+static struct platform_driver cadence_nand_dt_driver = {
+ .probe = cadence_nand_dt_probe,
+ .remove = cadence_nand_dt_remove,
+ .driver = {
+ .name = "cadence-nand-controller",
+ .of_match_table = cadence_nand_dt_ids,
+ },
+};
+
+module_platform_driver(cadence_nand_dt_driver);
+
+MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
+
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 5e14836f6bd5..8b779a899dcf 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -102,47 +102,6 @@ static int denali_dt_chip_init(struct denali_controller *denali,
return denali_chip_init(denali, dchip);
}
-/* Backward compatibility for old platforms */
-static int denali_dt_legacy_chip_init(struct denali_controller *denali)
-{
- struct denali_chip *dchip;
- int nsels, i;
-
- nsels = denali->nbanks;
-
- dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
- GFP_KERNEL);
- if (!dchip)
- return -ENOMEM;
-
- dchip->nsels = nsels;
-
- for (i = 0; i < nsels; i++)
- dchip->sels[i].bank = i;
-
- nand_set_flash_node(&dchip->chip, denali->dev->of_node);
-
- return denali_chip_init(denali, dchip);
-}
-
-/*
- * Check the DT binding.
- * The new binding expects chip subnodes in the controller node.
- * So, #address-cells = <1>; #size-cells = <0>; are required.
- * Check the #size-cells to distinguish the binding.
- */
-static bool denali_dt_is_legacy_binding(struct device_node *np)
-{
- u32 cells;
- int ret;
-
- ret = of_property_read_u32(np, "#size-cells", &cells);
- if (ret)
- return true;
-
- return cells != 0;
-}
-
static int denali_dt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -167,10 +126,8 @@ static int denali_dt_probe(struct platform_device *pdev)
denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
- if (denali->irq < 0) {
- dev_err(dev, "no irq defined\n");
+ if (denali->irq < 0)
return denali->irq;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
denali->reg = devm_ioremap_resource(dev, res);
@@ -213,17 +170,11 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_ecc;
- if (denali_dt_is_legacy_binding(dev->of_node)) {
- ret = denali_dt_legacy_chip_init(denali);
- if (ret)
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
goto out_remove_denali;
- } else {
- for_each_child_of_node(dev->of_node, np) {
- ret = denali_dt_chip_init(denali, np);
- if (ret) {
- of_node_put(np);
- goto out_remove_denali;
- }
}
}
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 6a4626a8bf95..0b48be54ba6f 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -751,10 +751,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
mtd = nand_to_mtd(chip);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no IRQ resource defined\n");
+ if (irq < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->iobase = devm_ioremap_resource(dev, res);
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 78b31f845c50..241b58b83240 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -773,7 +773,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto release_dma_chan;
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index fc49e13d81ec..fb5abdcfb007 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2862,10 +2862,8 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
nfc->core_clk = devm_clk_get(&pdev->dev, "core");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 1b82b687e5a5..9f17b5b8efbf 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1399,10 +1399,8 @@ static int meson_nfc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no NFC IRQ resource\n");
+ if (irq < 0)
return -EINVAL;
- }
ret = meson_nfc_clk_init(nfc);
if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 74595b644b7c..75f1fa3d4d35 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -527,10 +527,8 @@ static int mtk_ecc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 373d47d1ba4c..b8305e39ab51 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1540,7 +1540,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no nfi irq resource\n");
ret = -EINVAL;
goto clk_disable;
}
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index 9d49e6c845e1..ed7a4e021bf5 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -524,10 +524,8 @@ static int mxic_nfc_probe(struct platform_device *pdev)
nand_chip->controller = &nfc->controller;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
mxic_nfc_hw_init(nfc);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 5c2c30a7dffa..f64e3b6605c6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -292,12 +292,16 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page)
struct mtd_info *mtd = nand_to_mtd(chip);
int last_page = ((mtd->erasesize - mtd->writesize) >>
chip->page_shift) & chip->pagemask;
+ unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
+ | NAND_BBM_LASTPAGE;
+ if (page == 0 && !(chip->options & bbm_flags))
+ return 0;
if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
return 0;
- else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
return 1;
- else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
return last_page;
return -EINVAL;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 8ca9fad6e6ad..56654030ec7f 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -446,8 +446,10 @@ static int micron_nand_init(struct nand_chip *chip)
if (ret)
goto err_free_manuf_data;
+ chip->options |= NAND_BBM_FIRSTPAGE;
+
if (mtd->writesize == 2048)
- chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+ chip->options |= NAND_BBM_SECONDPAGE;
ondie = micron_supports_on_die_ecc(chip);
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 6ec65f48501c..ad77c112a78a 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1967,10 +1967,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_IRQ:
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
- if (info->gpmc_irq_fifo <= 0) {
- dev_err(dev, "Error getting fifo IRQ\n");
+ if (info->gpmc_irq_fifo <= 0)
return -ENODEV;
- }
err = devm_request_irq(dev, info->gpmc_irq_fifo,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-fifo", info);
@@ -1982,10 +1980,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
}
info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
- if (info->gpmc_irq_count <= 0) {
- dev_err(dev, "Error getting IRQ count\n");
+ if (info->gpmc_irq_count <= 0)
return -ENODEV;
- }
err = devm_request_irq(dev, info->gpmc_irq_count,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-count", info);
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index e509c93737c4..058e99d0cbcf 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1129,10 +1129,8 @@ static int flctl_probe(struct platform_device *pdev)
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
"flste", flctl);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 8cc852dc7d54..9e63800f768a 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1880,11 +1880,8 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "IRQ error missing or invalid\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
dev_name(dev), fmc2);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 89773293c64d..37a4ac0dd85b 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -2071,10 +2071,8 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq\n");
+ if (irq < 0)
return irq;
- }
nfc->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 009c1da8574c..2b7cabbb680c 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -320,7 +320,8 @@ static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_unlock(&chip->controller->mutex);
}
-static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -331,8 +332,8 @@ static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return 0;
}
-static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -746,6 +747,15 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
return 0;
}
+static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
+ .prepare = aspeed_smc_prep,
+ .unprepare = aspeed_smc_unprep,
+ .read_reg = aspeed_smc_read_reg,
+ .write_reg = aspeed_smc_write_reg,
+ .read = aspeed_smc_read_user,
+ .write = aspeed_smc_write_user,
+};
+
static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
struct device_node *np, struct resource *r)
{
@@ -805,12 +815,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
nor->dev = dev;
nor->priv = chip;
spi_nor_set_flash_node(nor, child);
- nor->read = aspeed_smc_read_user;
- nor->write = aspeed_smc_write_user;
- nor->read_reg = aspeed_smc_read_reg;
- nor->write_reg = aspeed_smc_write_reg;
- nor->prepare = aspeed_smc_prep;
- nor->unprepare = aspeed_smc_unprep;
+ nor->controller_ops = &aspeed_smc_controller_ops;
ret = aspeed_smc_chip_setup_init(chip, r);
if (ret)
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 7bef63947b29..06f997247d0f 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -285,7 +285,7 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
return IRQ_HANDLED;
}
-static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
u32 rdreg = 0;
@@ -354,27 +354,27 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
-static int cqspi_command_read(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
+ u8 *rxbuf, size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int rdreg;
unsigned int reg;
- unsigned int read_len;
+ size_t read_len;
int status;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
- dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+ dev_err(nor->dev,
+ "Invalid input argument, len %zu rxbuf 0x%p\n",
n_rx, rxbuf);
return -EINVAL;
}
- reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
- rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+ rdreg = cqspi_calc_rdreg(nor);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
@@ -404,19 +404,19 @@ static int cqspi_command_read(struct spi_nor *nor,
}
static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
- const u8 *txbuf, const unsigned n_tx)
+ const u8 *txbuf, size_t n_tx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
unsigned int data;
- u32 write_len;
+ size_t write_len;
int ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(nor->dev,
- "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ "Invalid input argument, cmdlen %zu txbuf 0x%p\n",
n_tx, txbuf);
return -EINVAL;
}
@@ -470,7 +470,7 @@ static int cqspi_read_setup(struct spi_nor *nor)
unsigned int reg;
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
- reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+ reg |= cqspi_calc_rdreg(nor);
/* Setup dummy clock cycles */
dummy_clk = nor->read_dummy;
@@ -603,7 +603,7 @@ static int cqspi_write_setup(struct spi_nor *nor)
/* Set opcode. */
reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
- reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ reg = cqspi_calc_rdreg(nor);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
reg = readl(reg_base + CQSPI_REG_SIZE);
@@ -1050,7 +1050,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
return ret;
/* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
if (ret)
return ret;
@@ -1080,18 +1080,19 @@ static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_unlock(&cqspi->bus_mutex);
}
-static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
int ret;
ret = cqspi_set_protocol(nor, 0);
if (!ret)
- ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+ ret = cqspi_command_read(nor, opcode, buf, len);
return ret;
}
-static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
int ret;
@@ -1216,6 +1217,16 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
init_completion(&cqspi->rx_dma_complete);
}
+static const struct spi_nor_controller_ops cqspi_controller_ops = {
+ .prepare = cqspi_prep,
+ .unprepare = cqspi_unprep,
+ .read_reg = cqspi_read_reg,
+ .write_reg = cqspi_write_reg,
+ .read = cqspi_read,
+ .write = cqspi_write,
+ .erase = cqspi_erase,
+};
+
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
struct platform_device *pdev = cqspi->pdev;
@@ -1265,14 +1276,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
nor->dev = dev;
spi_nor_set_flash_node(nor, np);
nor->priv = f_pdata;
-
- nor->read_reg = cqspi_read_reg;
- nor->write_reg = cqspi_write_reg;
- nor->read = cqspi_read;
- nor->write = cqspi_write;
- nor->erase = cqspi_erase;
- nor->prepare = cqspi_prep;
- nor->unprepare = cqspi_unprep;
+ nor->controller_ops = &cqspi_controller_ops;
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
dev_name(dev), cs);
@@ -1366,10 +1370,8 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Cannot obtain IRQ.\n");
+ if (irq < 0)
return -ENXIO;
- }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index 6dac9dd8bf42..a1258216f89d 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -177,7 +177,7 @@ static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
}
static int hisi_spi_nor_op_reg(struct spi_nor *nor,
- u8 opcode, int len, u8 optype)
+ u8 opcode, size_t len, u8 optype)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -200,7 +200,7 @@ static int hisi_spi_nor_op_reg(struct spi_nor *nor,
}
static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+ size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -215,7 +215,7 @@ static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
}
static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
- u8 *buf, int len)
+ const u8 *buf, size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -311,6 +311,15 @@ static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
return len;
}
+static const struct spi_nor_controller_ops hisi_controller_ops = {
+ .prepare = hisi_spi_nor_prep,
+ .unprepare = hisi_spi_nor_unprep,
+ .read_reg = hisi_spi_nor_read_reg,
+ .write_reg = hisi_spi_nor_write_reg,
+ .read = hisi_spi_nor_read,
+ .write = hisi_spi_nor_write,
+};
+
/**
* Get spi flash device information and register it as a mtd device.
*/
@@ -357,14 +366,8 @@ static int hisi_spi_nor_register(struct device_node *np,
}
priv->host = host;
nor->priv = priv;
+ nor->controller_ops = &hisi_controller_ops;
- nor->prepare = hisi_spi_nor_prep;
- nor->unprepare = hisi_spi_nor_unprep;
- nor->read_reg = hisi_spi_nor_read_reg;
- nor->write_reg = hisi_spi_nor_write_reg;
- nor->read = hisi_spi_nor_read;
- nor->write = hisi_spi_nor_write;
- nor->erase = NULL;
ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
return ret;
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 3cda8e7a68f8..3d8987baea2a 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -20,6 +20,10 @@ static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
};
+static const struct intel_spi_boardinfo cnl_info = {
+ .type = INTEL_SPI_CNL,
+};
+
static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -61,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
@@ -68,6 +73,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 43e55a2e9b27..61d2a0ad2131 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -108,6 +108,10 @@
#define BXT_FREG_NUM 12
#define BXT_PR_NUM 6
+#define CNL_PR 0x84
+#define CNL_FREG_NUM 6
+#define CNL_PR_NUM 5
+
#define LVSCC 0xc4
#define UVSCC 0xc8
#define ERASE_OPCODE_SHIFT 8
@@ -187,12 +191,16 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
- value = readl(ispi->sregs + SSFSTS_CTL);
- dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
- dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
- readl(ispi->sregs + PREOP_OPTYPE));
- dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
- dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+ if (ispi->sregs) {
+ value = readl(ispi->sregs + SSFSTS_CTL);
+ dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+ dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+ readl(ispi->sregs + PREOP_OPTYPE));
+ dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
+ readl(ispi->sregs + OPMENU0));
+ dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
+ readl(ispi->sregs + OPMENU1));
+ }
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
@@ -340,6 +348,13 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->erase_64k = true;
break;
+ case INTEL_SPI_CNL:
+ ispi->sregs = NULL;
+ ispi->pregs = ispi->base + CNL_PR;
+ ispi->nregions = CNL_FREG_NUM;
+ ispi->pr_num = CNL_PR_NUM;
+ break;
+
default:
return -EINVAL;
}
@@ -367,6 +382,11 @@ static int intel_spi_init(struct intel_spi *ispi)
!(uvscc & ERASE_64K_OPCODE_MASK))
ispi->erase_64k = false;
+ if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
+ dev_err(ispi->dev, "software sequencer not supported, but required\n");
+ return -EINVAL;
+ }
+
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
@@ -383,7 +403,7 @@ static int intel_spi_init(struct intel_spi *ispi)
val = readl(ispi->base + HSFSTS_CTL);
ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
- if (ispi->locked) {
+ if (ispi->locked && ispi->sregs) {
/*
* BIOS programs allowed opcodes and then locks down the
* register. So read back what opcodes it decided to support.
@@ -426,7 +446,7 @@ static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
{
u32 val, status;
int ret;
@@ -469,7 +489,7 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
int optype)
{
u32 val = 0, status;
@@ -535,7 +555,8 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
return 0;
}
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct intel_spi *ispi = nor->priv;
int ret;
@@ -555,7 +576,8 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return intel_spi_read_block(ispi, buf, len);
}
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct intel_spi *ispi = nor->priv;
int ret;
@@ -864,6 +886,14 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
+static const struct spi_nor_controller_ops intel_spi_controller_ops = {
+ .read_reg = intel_spi_read_reg,
+ .write_reg = intel_spi_write_reg,
+ .read = intel_spi_read,
+ .write = intel_spi_write,
+ .erase = intel_spi_erase,
+};
+
struct intel_spi *intel_spi_probe(struct device *dev,
struct resource *mem, const struct intel_spi_boardinfo *info)
{
@@ -897,11 +927,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
ispi->nor.dev = ispi->dev;
ispi->nor.priv = ispi;
- ispi->nor.read_reg = intel_spi_read_reg;
- ispi->nor.write_reg = intel_spi_write_reg;
- ispi->nor.read = intel_spi_read;
- ispi->nor.write = intel_spi_write;
- ispi->nor.erase = intel_spi_erase;
+ ispi->nor.controller_ops = &intel_spi_controller_ops;
ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
if (ret) {
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 34db01ab6cab..b1691680d174 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -151,9 +151,9 @@ static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
}
static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+ const u8 *tx, size_t txlen, u8 *rx, size_t rxlen)
{
- int len = 1 + txlen + rxlen;
+ size_t len = 1 + txlen + rxlen;
int i, ret, idx;
if (len > MTK_NOR_MAX_SHIFT)
@@ -193,7 +193,7 @@ static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
}
/* Do a WRSR (Write Status Register) command */
-static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr)
{
writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
@@ -354,7 +354,7 @@ static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
int ret;
struct mtk_nor *mtk_nor = nor->priv;
@@ -376,8 +376,8 @@ static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return ret;
}
-static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
int ret;
struct mtk_nor *mtk_nor = nor->priv;
@@ -419,6 +419,13 @@ static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
return 0;
}
+static const struct spi_nor_controller_ops mtk_controller_ops = {
+ .read_reg = mtk_nor_read_reg,
+ .write_reg = mtk_nor_write_reg,
+ .read = mtk_nor_read,
+ .write = mtk_nor_write,
+};
+
static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
@@ -438,12 +445,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor,
nor->dev = mtk_nor->dev;
nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
+ nor->controller_ops = &mtk_controller_ops;
- /* fill the hooks to spi nor */
- nor->read = mtk_nor_read;
- nor->read_reg = mtk_nor_read_reg;
- nor->write = mtk_nor_write;
- nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index 4a871587392b..9a5b1a7c636a 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -123,7 +123,8 @@ static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
return ret;
}
-static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
@@ -145,7 +146,8 @@ static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return nxp_spifi_wait_for_cmd(spifi);
}
-static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
@@ -263,9 +265,18 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
{
u8 id[SPI_NOR_MAX_ID_LEN];
- nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
}
+static const struct spi_nor_controller_ops nxp_spifi_controller_ops = {
+ .read_reg = nxp_spifi_read_reg,
+ .write_reg = nxp_spifi_write_reg,
+ .read = nxp_spifi_read,
+ .write = nxp_spifi_write,
+ .erase = nxp_spifi_erase,
+};
+
static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
struct device_node *np)
{
@@ -332,11 +343,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
spifi->nor.dev = spifi->dev;
spi_nor_set_flash_node(&spifi->nor, np);
spifi->nor.priv = spifi;
- spifi->nor.read = nxp_spifi_read;
- spifi->nor.write = nxp_spifi_write;
- spifi->nor.erase = nxp_spifi_erase;
- spifi->nor.read_reg = nxp_spifi_read_reg;
- spifi->nor.write_reg = nxp_spifi_write_reg;
+ spifi->nor.controller_ops = &nxp_spifi_controller_ops;
/*
* The first read on a hard reset isn't reliable so do a
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 7acf4a93b592..f4afe123e9dc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -338,7 +338,7 @@ static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
if (nor->spimem)
return spi_nor_spimem_read_data(nor, from, len, buf);
- return nor->read(nor, from, len, buf);
+ return nor->controller_ops->read(nor, from, len, buf);
}
/**
@@ -385,239 +385,172 @@ static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
if (nor->spimem)
return spi_nor_spimem_write_data(nor, to, len, buf);
- return nor->write(nor, to, len, buf);
+ return nor->controller_ops->write(nor, to, len, buf);
}
-/*
- * Read the status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_sr(struct spi_nor *nor)
+static int spi_nor_write_enable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_NO_DATA);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
}
- if (ret < 0) {
- pr_err("error %d reading SR\n", (int) ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Read the flag status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_fsr(struct spi_nor *nor)
+static int spi_nor_write_disable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_NO_DATA);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
}
- if (ret < 0) {
- pr_err("error %d reading FSR\n", ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Read configuration register, returning its value in the
- * location. Return the configuration register value.
- * Returns negative if error occurred.
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int read_cr(struct spi_nor *nor)
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+ sr, 1);
}
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading CR\n", ret);
- return ret;
- }
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
- return nor->bouncebuf[0];
+ return ret;
}
-/*
- * Write status register 1 byte
- * Returns negative if error occurred.
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int write_sr(struct spi_nor *nor, u8 val)
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
{
- nor->bouncebuf[0] = val;
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
-
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
-}
+ int ret;
-/*
- * Set write enable latch with Write Enable command.
- * Returns negative if error occurred.
- */
-static int write_enable(struct spi_nor *nor)
-{
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
+ SPI_MEM_OP_DATA_IN(1, fsr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+ fsr, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
}
-/*
- * Send write disable instruction to the chip.
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
-static int write_disable(struct spi_nor *nor)
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
+ SPI_MEM_OP_DATA_IN(1, cr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
- return mtd->priv;
-}
-
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == opcode)
- return table[i][1];
-
- /* No conversion found, keep input op code. */
- return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
- static const u8 spi_nor_3to4_read[][2] = {
- { SPINOR_OP_READ, SPINOR_OP_READ_4B },
- { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
- { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
- { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
- { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
- { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
- { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
- { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
-
- { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
- { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
- { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
- ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
- static const u8 spi_nor_3to4_program[][2] = {
- { SPINOR_OP_PP, SPINOR_OP_PP_4B },
- { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
- { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
- { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
- { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
- ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
- static const u8 spi_nor_3to4_erase[][2] = {
- { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
- { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
- { SPINOR_OP_SE, SPINOR_OP_SE_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
- ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
- nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
- nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
- nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
- if (!spi_nor_has_uniform_erase(nor)) {
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- struct spi_nor_erase_type *erase;
- int i;
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- erase = &map->erase_type[i];
- erase->opcode =
- spi_nor_convert_3to4_erase(erase->opcode);
- }
- }
+ return ret;
}
+/**
+ * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int macronix_set_4byte(struct spi_nor *nor, bool enable)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
@@ -628,26 +561,55 @@ static int macronix_set_4byte(struct spi_nor *nor, bool enable)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
}
- return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
- NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
}
+/**
+ * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
{
int ret;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
ret = macronix_set_4byte(nor, enable);
- write_disable(nor);
+ if (ret)
+ return ret;
- return ret;
+ return spi_nor_write_disable(nor);
}
+/**
+ * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spansion_set_4byte(struct spi_nor *nor, bool enable)
{
+ int ret;
+
nor->bouncebuf[0] = enable << 7;
if (nor->spimem) {
@@ -657,14 +619,29 @@ static int spansion_set_4byte(struct spi_nor *nor, bool enable)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
}
- return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
}
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
{
+ int ret;
+
nor->bouncebuf[0] = ear;
if (nor->spimem) {
@@ -674,12 +651,26 @@ static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
}
- return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
}
+/**
+ * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int winbond_set_4byte(struct spi_nor *nor, bool enable)
{
int ret;
@@ -693,15 +684,29 @@ static int winbond_set_4byte(struct spi_nor *nor, bool enable)
* Register to be set to 1, so all 3-byte-address reads come from the
* second 16M. We must clear the register to enable normal behavior.
*/
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
ret = spi_nor_write_ear(nor, 0);
- write_disable(nor);
+ if (ret)
+ return ret;
- return ret;
+ return spi_nor_write_disable(nor);
}
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
@@ -709,27 +714,44 @@ static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_IN(1, sr, 1));
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+ sr, 1);
}
- return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+ return ret;
}
+/**
+ * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the
+ * flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int s3an_sr_ready(struct spi_nor *nor)
{
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ if (ret)
return ret;
- }
return !!(nor->bouncebuf[0] & XSR_RDY);
}
-static int spi_nor_clear_sr(struct spi_nor *nor)
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
@@ -737,20 +759,33 @@ static int spi_nor_clear_sr(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
}
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_sr_ready(struct spi_nor *nor)
{
- int sr = read_sr(nor);
- if (sr < 0)
- return sr;
+ int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
- if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
- if (sr & SR_E_ERR)
+ if (nor->flags & SNOR_F_USE_CLSR &&
+ nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
dev_err(nor->dev, "Erase Error occurred\n");
else
dev_err(nor->dev, "Programming Error occurred\n");
@@ -759,11 +794,17 @@ static int spi_nor_sr_ready(struct spi_nor *nor)
return -EIO;
}
- return !(sr & SR_WIP);
+ return !(nor->bouncebuf[0] & SR_WIP);
}
-static int spi_nor_clear_fsr(struct spi_nor *nor)
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
{
+ int ret;
+
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
@@ -771,25 +812,37 @@ static int spi_nor_clear_fsr(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
}
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_fsr_ready(struct spi_nor *nor)
{
- int fsr = read_fsr(nor);
- if (fsr < 0)
- return fsr;
+ int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
- if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
- if (fsr & FSR_E_ERR)
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
dev_err(nor->dev, "Erase operation failed.\n");
else
dev_err(nor->dev, "Program operation failed.\n");
- if (fsr & FSR_PT_ERR)
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
dev_err(nor->dev,
"Attempted to modify a protected sector.\n");
@@ -797,9 +850,15 @@ static int spi_nor_fsr_ready(struct spi_nor *nor)
return -EIO;
}
- return fsr & FSR_READY;
+ return nor->bouncebuf[0] & FSR_READY;
}
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_ready(struct spi_nor *nor)
{
int sr, fsr;
@@ -816,9 +875,13 @@ static int spi_nor_ready(struct spi_nor *nor)
return sr && fsr;
}
-/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
unsigned long timeout_jiffies)
@@ -841,24 +904,305 @@ static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
cond_resched();
}
- dev_err(nor->dev, "flash operation timed out\n");
+ dev_dbg(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static int spi_nor_wait_till_ready(struct spi_nor *nor)
{
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
-/*
- * Erase the whole flash memory
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+ sr, len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (nor->params.quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * We can safely assume that when the Quad Enable method is
+ * set, the value of the QE bit is one, as a consequence of the
+ * nor->params.quad_enable() call.
+ *
+ * We can safely assume that the Quad Enable bit is present in
+ * the Status Register 2 at BIT(1). According to the JESD216
+ * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+ * Write Status (01h) command is available just for the cases
+ * in which the QE bit is described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+ sr2, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
*
- * Returns 0 if successful, non-zero otherwise.
+ * Return: 0 on success, -errno otherwise.
*/
-static int erase_chip(struct spi_nor *nor)
+static int spi_nor_erase_chip(struct spi_nor *nor)
{
+ int ret;
+
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
if (nor->spimem) {
@@ -868,10 +1212,99 @@ static int erase_chip(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
- return spi_mem_exec_op(nor->spimem, &op);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
}
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params.erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -880,10 +1313,9 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
mutex_lock(&nor->lock);
- if (nor->prepare) {
- ret = nor->prepare(nor, ops);
+ if (nor->controller_ops && nor->controller_ops->prepare) {
+ ret = nor->controller_ops->prepare(nor, ops);
if (ret) {
- dev_err(nor->dev, "failed in the preparation.\n");
mutex_unlock(&nor->lock);
return ret;
}
@@ -893,8 +1325,8 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
- if (nor->unprepare)
- nor->unprepare(nor, ops);
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor, ops);
mutex_unlock(&nor->lock);
}
@@ -935,9 +1367,6 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr = spi_nor_convert_addr(nor, addr);
- if (nor->erase)
- return nor->erase(nor, addr);
-
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
@@ -946,6 +1375,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
SPI_MEM_OP_NO_DATA);
return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return nor->controller_ops->erase(nor, addr);
}
/*
@@ -957,8 +1388,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr >>= 8;
}
- return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
- nor->addr_width);
+ return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
}
/**
@@ -1208,7 +1639,9 @@ static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
list_for_each_entry_safe(cmd, next, &erase_list, list) {
nor->erase_opcode = cmd->opcode;
while (cmd->count) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
ret = spi_nor_erase_sector(nor, addr);
if (ret)
@@ -1263,12 +1696,13 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
unsigned long timeout;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
- if (erase_chip(nor)) {
- ret = -EIO;
+ ret = spi_nor_erase_chip(nor);
+ if (ret)
goto erase_err;
- }
/*
* Scale the timeout linearly with the size of the flash, with
@@ -1291,7 +1725,9 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
ret = spi_nor_erase_sector(nor, addr);
if (ret)
@@ -1312,7 +1748,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
goto erase_err;
}
- write_disable(nor);
+ ret = spi_nor_write_disable(nor);
erase_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
@@ -1320,27 +1756,6 @@ erase_err:
return ret;
}
-/* Write status register and ensure bits in mask match written values */
-static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
-{
- int ret;
-
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (ret < 0)
- return ret;
-
- return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
-}
-
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -1433,16 +1848,18 @@ static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
- int status_old, status_new;
+ int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- status_old = read_sr(nor);
- if (status_old < 0)
- return status_old;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
/* If nothing in our range is unlocked, we don't need to do anything */
if (stm_is_locked_sr(nor, ofs, len, status_old))
@@ -1502,7 +1919,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- return write_sr_and_check(nor, status_new, mask);
+ return spi_nor_write_sr_and_check(nor, status_new);
}
/*
@@ -1513,16 +1930,18 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
- int status_old, status_new;
+ int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- status_old = read_sr(nor);
- if (status_old < 0)
- return status_old;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
/* If nothing in our range is locked, we don't need to do anything */
if (stm_is_unlocked_sr(nor, ofs, len, status_old))
@@ -1585,7 +2004,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- return write_sr_and_check(nor, status_new, mask);
+ return spi_nor_write_sr_and_check(nor, status_new);
}
/*
@@ -1597,13 +2016,13 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*/
static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- int status;
+ int ret;
- status = read_sr(nor);
- if (status < 0)
- return status;
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
- return stm_is_locked_sr(nor, ofs, len, status);
+ return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
}
static const struct spi_nor_locking_ops stm_locking_ops = {
@@ -1657,242 +2076,59 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-/*
- * Write status Register and configuration register with 2 bytes
- * The first byte will be written to the status register, while the
- * second byte will be written to the configuration register.
- * Return negative if error occurred.
- */
-static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
-{
- int ret;
-
- write_enable(nor);
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
- }
-
- if (ret < 0) {
- dev_err(nor->dev,
- "error while writing configuration register\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret) {
- dev_err(nor->dev,
- "timeout while writing configuration register\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * macronix_quad_enable() - set QE bit in Status Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register.
- *
- * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_quad_enable(struct spi_nor *nor)
-{
- int ret, val;
-
- val = read_sr(nor);
- if (val < 0)
- return val;
- if (val & SR_QUAD_EN_MX)
- return 0;
-
- write_enable(nor);
-
- write_sr(nor, val | SR_QUAD_EN_MX);
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
- dev_err(nor->dev, "Macronix Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
- * spansion_quad_enable() - set QE bit in Configuraiton Register.
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
* @nor: pointer to a 'struct spi_nor'
*
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function is kept for legacy purpose because it has been used for a
- * long time without anybody complaining but it should be considered as
- * deprecated and maybe buggy.
- * First, this function doesn't care about the previous values of the Status
- * and Configuration Registers when it sets the QE bit (bit 1) in the
- * Configuration Register: all other bits are cleared, which may have unwanted
- * side effects like removing some block protections.
- * Secondly, it uses the Read Configuration Register (35h) instruction though
- * some very old and few memories don't support this instruction. If a pull-up
- * resistor is present on the MISO/IO1 line, we might still be able to pass the
- * "read back" test because the QSPI memory doesn't recognize the command,
- * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spansion_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
{
- u8 *sr_cr = nor->bouncebuf;
int ret;
- sr_cr[0] = 0;
- sr_cr[1] = CR_QUAD_EN_SPAN;
- ret = write_sr_cr(nor, sr_cr);
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
- /* read back and check it */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories not supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
-{
- u8 *sr_cr = nor->bouncebuf;
- int ret;
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
- sr_cr[1] = CR_QUAD_EN_SPAN;
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
- return write_sr_cr(nor, sr_cr);
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
}
/**
- * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories supporting the Read
- * Configuration Register (35h) instruction.
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
*
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
{
- struct device *dev = nor->dev;
- u8 *sr_cr = nor->bouncebuf;
int ret;
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading configuration register\n");
- return -EINVAL;
- }
-
- if (ret & CR_QUAD_EN_SPAN)
- return 0;
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
- sr_cr[1] = ret | CR_QUAD_EN_SPAN;
-
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
-
- ret = write_sr_cr(nor, sr_cr);
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
if (ret)
return ret;
- /* Read back and check it. */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
-{
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
-
- return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
-}
-
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
- return spi_mem_exec_op(nor->spimem, &op);
- }
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
- return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
/**
- * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
* @nor: pointer to a 'struct spi_nor'
*
* Set the Quad Enable (QE) bit in the Status Register 2.
@@ -1903,10 +2139,11 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
*
* Return: 0 on success, -errno otherwise.
*/
-static int sr2_bit7_quad_enable(struct spi_nor *nor)
+static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
{
u8 *sr2 = nor->bouncebuf;
int ret;
+ u8 sr2_written;
/* Check current Quad Enable bit value. */
ret = spi_nor_read_sr2(nor, sr2);
@@ -1918,117 +2155,23 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor)
/* Update the Quad Enable bit. */
*sr2 |= SR2_QUAD_EN_BIT7;
- write_enable(nor);
-
ret = spi_nor_write_sr2(nor, sr2);
- if (ret < 0) {
- dev_err(nor->dev, "error while writing status register 2\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret < 0) {
- dev_err(nor->dev, "timeout while writing status register 2\n");
+ if (ret)
return ret;
- }
+
+ sr2_written = *sr2;
/* Read back and check it. */
ret = spi_nor_read_sr2(nor, sr2);
- if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
- dev_err(nor->dev, "SR2 Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_clear_sr_bp(struct spi_nor *nor)
-{
- int ret;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return ret;
- }
-
- write_enable(nor);
-
- ret = write_sr(nor, ret & ~mask);
- if (ret) {
- dev_err(nor->dev, "write to status register failed\n");
- return ret;
- }
-
- ret = spi_nor_wait_till_ready(nor);
if (ret)
- dev_err(nor->dev, "timeout while writing status register\n");
- return ret;
-}
-
-/**
- * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
- * bits on spansion flashes.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits. The function is tightly
- * coupled with the spansion_quad_enable() function. Both assume that the Write
- * Register with 16 bits, together with the Read Configuration Register (35h)
- * instructions are supported.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
-{
- int ret;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 *sr_cr = nor->bouncebuf;
-
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while reading configuration register\n");
return ret;
- }
-
- /*
- * When the configuration register Quad Enable bit is one, only the
- * Write Status (01h) command with two data bytes may be used.
- */
- if (ret & CR_QUAD_EN_SPAN) {
- sr_cr[1] = ret;
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while reading status register\n");
- return ret;
- }
- sr_cr[0] = ret & ~mask;
-
- ret = write_sr_cr(nor, sr_cr);
- if (ret)
- dev_err(nor->dev, "16-bit write register failed\n");
- return ret;
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
}
- /*
- * If the Quad Enable bit is zero, use the Write Status (01h) command
- * with one data byte.
- */
- return spi_nor_clear_sr_bp(nor);
+ return 0;
}
/* Used when the "_ext_id" is two bytes at most */
@@ -2136,7 +2279,7 @@ static void gd25q256_default_init(struct spi_nor *nor)
* indicate the quad_enable method for this case, we need
* to set it in the default_init fixup hook.
*/
- nor->params.quad_enable = macronix_quad_enable;
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static struct spi_nor_fixups gd25q256_fixups = {
@@ -2179,6 +2322,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
{ "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ) },
@@ -2267,6 +2412,10 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -2482,6 +2631,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
@@ -2520,11 +2671,11 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
tmp = spi_mem_exec_op(nor->spimem, &op);
} else {
- tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
- SPI_NOR_MAX_ID_LEN);
+ tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
}
- if (tmp < 0) {
- dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
+ if (tmp) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
return ERR_PTR(tmp);
}
@@ -2544,7 +2695,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
+ ssize_t ret;
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
@@ -2583,7 +2734,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t actual;
+ size_t actual = 0;
int ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -2592,26 +2743,28 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
nor->sst_write_second = false;
- actual = to % 2;
/* Start write from odd address. */
- if (actual) {
+ if (to % 2) {
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
ret = spi_nor_write_data(nor, to, 1, buf);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
+
+ to++;
+ actual++;
}
- to += actual;
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
@@ -2620,39 +2773,44 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write two bytes. */
ret = spi_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
to += 2;
nor->sst_write_second = true;
}
nor->sst_write_second = false;
- write_disable(nor);
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
+ goto out;
/* Write out trailing byte if it exists. */
if (actual != len) {
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
nor->program_opcode = SPINOR_OP_BP;
ret = spi_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
- goto sst_write_err;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n",
- (int)ret);
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto sst_write_err;
- write_disable(nor);
+ goto out;
+
actual += 1;
+
+ ret = spi_nor_write_disable(nor);
}
-sst_write_err:
+out:
*retlen += actual;
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
@@ -2701,7 +2859,10 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
addr = spi_nor_convert_addr(nor, addr);
- write_enable(nor);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto write_err;
+
ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
if (ret < 0)
goto write_err;
@@ -2722,13 +2883,21 @@ write_err:
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev ||
- (!nor->spimem &&
- (!nor->read || !nor->write || !nor->read_reg ||
- !nor->write_reg))) {
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
pr_err("spi-nor: please fill all the necessary fields!\n");
return -EINVAL;
}
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2738,10 +2907,8 @@ static int s3an_nor_setup(struct spi_nor *nor,
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ if (ret)
return ret;
- }
nor->erase_opcode = SPINOR_OP_XSE;
nor->program_opcode = SPINOR_OP_XPP;
@@ -2865,7 +3032,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
*/
static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
{
- int ret;
+ ssize_t ret;
while (len) {
ret = spi_nor_read_data(nor, addr, len, buf);
@@ -3489,20 +3656,39 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
break;
case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
- params->quad_enable = spansion_no_read_cr_quad_enable;
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
case BFPT_DWORD15_QER_SR1_BIT6:
- params->quad_enable = macronix_quad_enable;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT7:
- params->quad_enable = sr2_bit7_quad_enable;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT1:
- params->quad_enable = spansion_read_cr_quad_enable;
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
default:
@@ -4101,7 +4287,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
err = spi_nor_read_sfdp(nor, sizeof(header),
psize, param_headers);
if (err < 0) {
- dev_err(dev, "failed to read SFDP parameter headers\n");
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
goto exit;
}
}
@@ -4348,7 +4534,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the (Fast) Read command. */
err = spi_nor_select_read(nor, shared_mask);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select read settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4356,7 +4542,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the Page Program command. */
err = spi_nor_select_pp(nor, shared_mask);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select write settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4364,7 +4550,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
/* Select the Sector Erase command. */
err = spi_nor_select_erase(nor);
if (err) {
- dev_err(nor->dev,
+ dev_dbg(nor->dev,
"can't select erase settings supported by both the SPI controller and memory.\n");
return err;
}
@@ -4381,12 +4567,32 @@ static int spi_nor_setup(struct spi_nor *nor,
return nor->params.setup(nor, hwcaps);
}
+static void atmel_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void intel_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void issi_set_default_init(struct spi_nor *nor)
+{
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
static void macronix_set_default_init(struct spi_nor *nor)
{
- nor->params.quad_enable = macronix_quad_enable;
+ nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
nor->params.set_4byte = macronix_set_4byte;
}
+static void sst_set_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
static void st_micron_set_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
@@ -4408,6 +4614,18 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
{
/* Init flash parameters based on MFR */
switch (JEDEC_MFR(nor->info)) {
+ case SNOR_MFR_ATMEL:
+ atmel_set_default_init(nor);
+ break;
+
+ case SNOR_MFR_INTEL:
+ intel_set_default_init(nor);
+ break;
+
+ case SNOR_MFR_ISSI:
+ issi_set_default_init(nor);
+ break;
+
case SNOR_MFR_MACRONIX:
macronix_set_default_init(nor);
break;
@@ -4417,6 +4635,10 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
st_micron_set_default_init(nor);
break;
+ case SNOR_MFR_SST:
+ sst_set_default_init(nor);
+ break;
+
case SNOR_MFR_WINBOND:
winbond_set_default_init(nor);
break;
@@ -4465,9 +4687,11 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
u8 i, erase_mask;
/* Initialize legacy flash parameters and settings. */
- params->quad_enable = spansion_quad_enable;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
params->set_4byte = spansion_set_4byte;
params->setup = spi_nor_default_setup;
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->size = (u64)info->sector_size * info->n_sectors;
@@ -4675,25 +4899,36 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
return nor->params.quad_enable(nor);
}
+/**
+ * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ */
+static int spi_nor_unlock_all(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_LOCK)
+ return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
+
+ return 0;
+}
+
static int spi_nor_init(struct spi_nor *nor)
{
int err;
- if (nor->clear_sr_bp) {
- if (nor->params.quad_enable == spansion_quad_enable)
- nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
-
- err = nor->clear_sr_bp(nor);
- if (err) {
- dev_err(nor->dev,
- "fail to clear block protection bits\n");
- return err;
- }
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
}
- err = spi_nor_quad_enable(nor);
+ err = spi_nor_unlock_all(nor);
if (err) {
- dev_err(nor->dev, "quad mode not supported\n");
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
return err;
}
@@ -4761,7 +4996,7 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
}
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_err(nor->dev, "address width is too large: %u\n",
+ dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
return -EINVAL;
}
@@ -4879,16 +5114,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set.
- */
- if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
- nor->info->flags & SPI_NOR_HAS_LOCK)
- nor->clear_sr_bp = spi_nor_clear_sr_bp;
-
/* Init flash parameters based on flash_info struct and SFDP */
spi_nor_init_params(nor);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index a1dff92ceedf..0f847d510950 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -509,11 +509,9 @@ static const struct file_operations eraseblk_count_fops = {
*/
int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
- int err, n;
unsigned long ubi_num = ubi->ubi_num;
- const char *fname;
- struct dentry *dent;
struct ubi_debug_info *d = &ubi->dbg;
+ int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -522,95 +520,52 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
ubi->ubi_num);
if (n == UBI_DFS_DIR_LEN) {
/* The array size is too small */
- fname = UBI_DFS_DIR_NAME;
- dent = ERR_PTR(-EINVAL);
- goto out;
+ return -EINVAL;
}
- fname = d->dfs_dir_name;
- dent = debugfs_create_dir(fname, dfs_rootdir);
- if (IS_ERR_OR_NULL(dent))
- goto out;
- d->dfs_dir = dent;
-
- fname = "chk_gen";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_gen = dent;
-
- fname = "chk_io";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_io = dent;
-
- fname = "chk_fastmap";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_chk_fastmap = dent;
-
- fname = "tst_disable_bgt";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_disable_bgt = dent;
-
- fname = "tst_emulate_bitflips";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_bitflips = dent;
-
- fname = "tst_emulate_io_failures";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_io_failures = dent;
-
- fname = "tst_emulate_power_cut";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_emulate_power_cut = dent;
-
- fname = "tst_emulate_power_cut_min";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_power_cut_min = dent;
-
- fname = "tst_emulate_power_cut_max";
- dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
- &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
- d->dfs_power_cut_max = dent;
-
- fname = "detailed_erase_block_info";
- dent = debugfs_create_file(fname, S_IRUSR, d->dfs_dir, (void *)ubi_num,
- &eraseblk_count_fops);
- if (IS_ERR_OR_NULL(dent))
- goto out_remove;
+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- return 0;
+ d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
-out_remove:
- debugfs_remove_recursive(d->dfs_dir);
-out:
- err = dent ? PTR_ERR(dent) : -ENODEV;
- ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
- fname, err);
- return err;
+ d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
+ S_IWUSR, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+ (void *)ubi_num, &eraseblk_count_fops);
+
+ return 0;
}
/**
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index df1c7989e13d..d02f12a5254e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -153,22 +153,22 @@ config IPVLAN_L3S
select NET_L3_MASTER_DEV
config IPVLAN
- tristate "IP-VLAN support"
- depends on INET
- depends on IPV6 || !IPV6
- ---help---
- This allows one to create virtual devices off of a main interface
- and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
- on packets. All interfaces (including the main interface) share L2
- making it transparent to the connected L2 switch.
+ tristate "IP-VLAN support"
+ depends on INET
+ depends on IPV6 || !IPV6
+ ---help---
+ This allows one to create virtual devices off of a main interface
+ and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
+ on packets. All interfaces (including the main interface) share L2
+ making it transparent to the connected L2 switch.
- Ipvlan devices can be added using the "ip" command from the
- iproute2 package starting with the iproute2-3.19 release:
+ Ipvlan devices can be added using the "ip" command from the
+ iproute2 package starting with the iproute2-3.19 release:
- "ip link add link <main-dev> [ NAME ] type ipvlan"
+ "ip link add link <main-dev> [ NAME ] type ipvlan"
- To compile this driver as a module, choose M here: the module
- will be called ipvlan.
+ To compile this driver as a module, choose M here: the module
+ will be called ipvlan.
config IPVTAP
tristate "IP-VLAN based tap driver"
@@ -185,11 +185,11 @@ config IPVTAP
will be called ipvtap.
config VXLAN
- tristate "Virtual eXtensible Local Area Network (VXLAN)"
- depends on INET
- select NET_UDP_TUNNEL
- select GRO_CELLS
- ---help---
+ tristate "Virtual eXtensible Local Area Network (VXLAN)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ ---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
to tunnel virtual network infrastructure in virtualized environments.
@@ -200,12 +200,12 @@ config VXLAN
will be called vxlan.
config GENEVE
- tristate "Generic Network Virtualization Encapsulation"
- depends on INET
- depends on IPV6 || !IPV6
- select NET_UDP_TUNNEL
- select GRO_CELLS
- ---help---
+ tristate "Generic Network Virtualization Encapsulation"
+ depends on INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ ---help---
This allows one to create geneve virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. GENEVE is often used
to tunnel virtual network infrastructure in virtualized environments.
@@ -244,8 +244,8 @@ config MACSEC
config NETCONSOLE
tristate "Network console logging support"
---help---
- If you want to log kernel messages over the network, enable this.
- See <file:Documentation/networking/netconsole.txt> for details.
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
@@ -362,12 +362,12 @@ config NET_VRF
support enables VRF devices.
config VSOCKMON
- tristate "Virtual vsock monitoring device"
- depends on VHOST_VSOCK
- ---help---
- This option enables a monitoring net device for vsock sockets. It is
- mostly intended for developers or support to debug vsock issues. If
- unsure, say N.
+ tristate "Virtual vsock monitoring device"
+ depends on VHOST_VSOCK
+ ---help---
+ This option enables a monitoring net device for vsock sockets. It is
+ mostly intended for developers or support to debug vsock issues. If
+ unsure, say N.
endif # NET_CORE
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62f65573eb04..fcb7c2f7f001 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -41,6 +41,8 @@
#include <linux/in.h>
#include <net/ip.h>
#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/slab.h>
@@ -200,6 +202,51 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+static const struct flow_dissector_key flow_keys_bonding_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_TIPC,
+ .offset = offsetof(struct flow_keys, addrs.tipckey),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_ICMP,
+ .offset = offsetof(struct flow_keys, icmp),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_VLAN,
+ .offset = offsetof(struct flow_keys, vlan),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+ .offset = offsetof(struct flow_keys, tags),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+ .offset = offsetof(struct flow_keys, keyid),
+ },
+};
+
+static struct flow_dissector flow_keys_bonding __read_mostly;
+
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -3252,39 +3299,78 @@ static inline u32 bond_eth_hash(struct sk_buff *skb)
return 0;
}
-/* Extract the appropriate headers based on bond's xmit policy */
-static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
- struct flow_keys *fk)
+static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
+ int *noff, int *proto, bool l34)
{
const struct ipv6hdr *iph6;
const struct iphdr *iph;
- int noff, proto = -1;
-
- if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
- return skb_flow_dissect_flow_keys(skb, fk, 0);
- fk->ports.ports = 0;
- noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
- if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
return false;
- iph = ip_hdr(skb);
+ iph = (const struct iphdr *)(skb->data + *noff);
iph_to_flow_copy_v4addrs(fk, iph);
- noff += iph->ihl << 2;
+ *noff += iph->ihl << 2;
if (!ip_is_fragment(iph))
- proto = iph->protocol;
+ *proto = iph->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
+ if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
return false;
- iph6 = ipv6_hdr(skb);
+ iph6 = (const struct ipv6hdr *)(skb->data + *noff);
iph_to_flow_copy_v6addrs(fk, iph6);
- noff += sizeof(*iph6);
- proto = iph6->nexthdr;
+ *noff += sizeof(*iph6);
+ *proto = iph6->nexthdr;
} else {
return false;
}
- if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
- fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+
+ if (l34 && *proto >= 0)
+ fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);
+
+ return true;
+}
+
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+ struct flow_keys *fk)
+{
+ bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
+ int noff, proto = -1;
+
+ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+ memset(fk, 0, sizeof(*fk));
+ return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
+ fk, NULL, 0, 0, 0, 0);
+ }
+
+ fk->ports.ports = 0;
+ memset(&fk->icmp, 0, sizeof(fk->icmp));
+ noff = skb_network_offset(skb);
+ if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
+ return false;
+
+ /* ICMP error packets contains at least 8 bytes of the header
+ * of the packet which generated the error. Use this information
+ * to correlate ICMP error packets within the same flow which
+ * generated the error.
+ */
+ if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
+ skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
+ skb_transport_offset(skb),
+ skb_headlen(skb));
+ if (proto == IPPROTO_ICMP) {
+ if (!icmp_is_err(fk->icmp.type))
+ return true;
+
+ noff += sizeof(struct icmphdr);
+ } else if (proto == IPPROTO_ICMPV6) {
+ if (!icmpv6_is_err(fk->icmp.type))
+ return true;
+
+ noff += sizeof(struct icmp6hdr);
+ }
+ return bond_flow_ip(skb, fk, &noff, &proto, l34);
+ }
return true;
}
@@ -3311,10 +3397,14 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
- bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
hash = bond_eth_hash(skb);
- else
- hash = (__force u32)flow.ports.ports;
+ } else {
+ if (flow.icmp.id)
+ memcpy(&hash, &flow.icmp, sizeof(hash));
+ else
+ memcpy(&hash, &flow.ports.ports, sizeof(hash));
+ }
hash ^= (__force u32)flow_get_u32_dst(&flow) ^
(__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
@@ -4891,6 +4981,10 @@ static int __init bonding_init(void)
goto err;
}
+ skb_flow_dissector_init(&flow_keys_bonding,
+ flow_keys_bonding_keys,
+ ARRAY_SIZE(flow_keys_bonding_keys));
+
register_netdevice_notifier(&bond_netdev_notifier);
out:
return res;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 2b9a2f117113..e74e2bb61236 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -3,44 +3,50 @@
# CAIF physical drivers
#
-comment "CAIF transport drivers"
+menuconfig CAIF_DRIVERS
+ bool "CAIF transport drivers"
+ depends on CAIF
+ help
+ Enable this to see CAIF physical drivers.
+
+if CAIF_DRIVERS
config CAIF_TTY
tristate "CAIF TTY transport driver"
depends on CAIF && TTY
default n
---help---
- The CAIF TTY transport driver is a Line Discipline (ldisc)
- identified as N_CAIF. When this ldisc is opened from user space
- it will redirect the TTY's traffic into the CAIF stack.
+ The CAIF TTY transport driver is a Line Discipline (ldisc)
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
depends on CAIF && HAS_DMA
default n
---help---
- The CAIF Link layer SPI Protocol driver for Slave SPI interface.
- This driver implements a platform driver to accommodate for a
- platform specific SPI device. A sample CAIF SPI Platform device is
- provided in Documentation/networking/caif/spi_porting.txt
+ The CAIF Link layer SPI Protocol driver for Slave SPI interface.
+ This driver implements a platform driver to accommodate for a
+ platform specific SPI device. A sample CAIF SPI Platform device is
+ provided in <file:Documentation/networking/caif/spi_porting.txt>.
config CAIF_SPI_SYNC
bool "Next command and length in start of frame"
depends on CAIF_SPI_SLAVE
default n
---help---
- Putting the next command and length in the start of the frame can
- help to synchronize to the next transfer in case of over or under-runs.
- This option also needs to be enabled on the modem.
+ Putting the next command and length in the start of the frame can
+ help to synchronize to the next transfer in case of over or under-runs.
+ This option also needs to be enabled on the modem.
config CAIF_HSI
- tristate "CAIF HSI transport driver"
- depends on CAIF
- default n
- ---help---
- The caif low level driver for CAIF over HSI.
- Be aware that if you enable this then you also need to
- enable a low-level HSI driver.
+ tristate "CAIF HSI transport driver"
+ depends on CAIF
+ default n
+ ---help---
+ The CAIF low level driver for CAIF over HSI.
+ Be aware that if you enable this then you also need to
+ enable a low-level HSI driver.
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
@@ -50,8 +56,10 @@ config CAIF_VIRTIO
select GENERIC_ALLOCATOR
default n
---help---
- The caif driver for CAIF over Virtio.
+ The CAIF driver for CAIF over Virtio.
if CAIF_VIRTIO
source "drivers/vhost/Kconfig.vringh"
endif
+
+endif # CAIF_DRIVERS
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index b5145a7f874c..05f425ceb53a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -39,10 +39,11 @@
#include "c_can.h"
-#define DCAN_RAM_INIT_BIT (1 << 3)
+#define DCAN_RAM_INIT_BIT BIT(3)
+
static DEFINE_SPINLOCK(raminit_lock);
-/*
- * 16-bit c_can registers can be arranged differently in the memory
+
+/* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
@@ -54,7 +55,7 @@ static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
}
static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
@@ -66,7 +67,7 @@ static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
}
static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
- enum reg index, u16 val)
+ enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
@@ -144,13 +145,13 @@ static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
u32 val;
val = priv->read_reg(priv, index);
- val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+ val |= ((u32)priv->read_reg(priv, index + 1)) << 16;
return val;
}
-static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+static void c_can_plat_write_reg32(const struct c_can_priv *priv,
+ enum reg index, u32 val)
{
priv->write_reg(priv, index + 1, val >> 16);
priv->write_reg(priv, index, val);
@@ -161,8 +162,8 @@ static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
return readl(priv->base + priv->regs[index]);
}
-static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
- u32 val)
+static void d_can_plat_write_reg32(const struct c_can_priv *priv,
+ enum reg index, u32 val)
{
writel(val, priv->base + priv->regs[index]);
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 1c88c361938c..6ee06a49fb4c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -553,10 +553,9 @@ static void can_restart(struct net_device *dev)
/* send restart message upstream */
skb = alloc_can_err_skb(dev, &cf);
- if (!skb) {
- err = -ENOMEM;
+ if (!skb)
goto restart;
- }
+
cf->can_id |= CAN_ERR_RESTARTED;
netif_rx(skb);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 57f9a2f51085..a929cdda9ab2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -142,7 +142,7 @@
#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
-#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f)
+#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -277,9 +277,9 @@ struct flexcan_priv {
u8 mb_size;
u8 clk_src; /* clock source of CAN Protocol Engine */
+ u64 rx_mask;
+ u64 tx_mask;
u32 reg_ctrl_default;
- u32 reg_imask1_default;
- u32 reg_imask2_default;
struct clk *clk_ipg;
struct clk *clk_per;
@@ -743,8 +743,6 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
u32 timestamp;
int err;
- timestamp = priv->read(&regs->timer) << 16;
-
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
@@ -764,6 +762,8 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (likely(new_state == priv->can.state))
return;
+ timestamp = priv->read(&regs->timer) << 16;
+
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return;
@@ -778,21 +778,58 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
dev->stats.rx_fifo_errors++;
}
+static inline u64 flexcan_read64_mask(struct flexcan_priv *priv, void __iomem *addr, u64 mask)
+{
+ u64 reg = 0;
+
+ if (upper_32_bits(mask))
+ reg = (u64)priv->read(addr - 4) << 32;
+ if (lower_32_bits(mask))
+ reg |= priv->read(addr);
+
+ return reg & mask;
+}
+
+static inline void flexcan_write64(struct flexcan_priv *priv, u64 val, void __iomem *addr)
+{
+ if (upper_32_bits(val))
+ priv->write(upper_32_bits(val), addr - 4);
+ if (lower_32_bits(val))
+ priv->write(lower_32_bits(val), addr);
+}
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
+{
+ return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->rx_mask);
+}
+
+static inline u64 flexcan_read_reg_iflag_tx(struct flexcan_priv *priv)
+{
+ return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->tx_mask);
+}
+
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
{
return container_of(offload, struct flexcan_priv, offload);
}
-static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int n)
+static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
+ unsigned int n, u32 *timestamp,
+ bool drop)
{
struct flexcan_priv *priv = rx_offload_to_priv(offload);
struct flexcan_regs __iomem *regs = priv->regs;
struct flexcan_mb __iomem *mb;
+ struct sk_buff *skb;
+ struct can_frame *cf;
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -806,7 +843,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
- return 0;
+ return NULL;
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
/* This MB was overrun, we lost data */
@@ -816,11 +853,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
} else {
reg_iflag1 = priv->read(&regs->iflag1);
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
- return 0;
+ return NULL;
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (!skb) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
/* increase timstamp to full 32 bit */
*timestamp = reg_ctrl << 16;
@@ -839,16 +882,11 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
*(__be32 *)(cf->data + i) = data;
}
- /* mark as read */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- /* Clear IRQ */
- if (n < 32)
- priv->write(BIT(n), &regs->iflag1);
- else
- priv->write(BIT(n - 32), &regs->iflag2);
- } else {
+ mark_as_read:
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1);
+ else
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- }
/* Read the Free Running Timer. It is optional but recommended
* to unlock Mailbox as soon as possible and make it available
@@ -856,20 +894,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
*/
priv->read(&regs->timer);
- return 1;
-}
-
-
-static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
-{
- struct flexcan_regs __iomem *regs = priv->regs;
- u32 iflag1, iflag2;
-
- iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
- ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
- iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
-
- return (u64)iflag2 << 32 | iflag1;
+ return skb;
}
static irqreturn_t flexcan_irq(int irq, void *dev_id)
@@ -879,18 +904,19 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE;
- u32 reg_iflag2, reg_esr;
+ u64 reg_iflag_tx;
+ u32 reg_esr;
enum can_state last_state = priv->can.state;
/* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 reg_iflag;
+ u64 reg_iflag_rx;
int ret;
- while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) {
+ while ((reg_iflag_rx = flexcan_read_reg_iflag_rx(priv))) {
handled = IRQ_HANDLED;
ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
- reg_iflag);
+ reg_iflag_rx);
if (!ret)
break;
}
@@ -913,10 +939,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
}
}
- reg_iflag2 = priv->read(&regs->iflag2);
+ reg_iflag_tx = flexcan_read_reg_iflag_tx(priv);
/* transmission complete interrupt */
- if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+ if (reg_iflag_tx & priv->tx_mask) {
u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
@@ -928,7 +954,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb->can_ctrl);
- priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag2);
+ flexcan_write64(priv, priv->tx_mask, &regs->iflag1);
netif_wake_queue(dev);
}
@@ -1040,6 +1066,7 @@ static int flexcan_chip_start(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
+ u64 reg_imask;
int err, i;
struct flexcan_mb __iomem *mb;
@@ -1214,8 +1241,9 @@ static int flexcan_chip_start(struct net_device *dev)
/* enable interrupts atomically */
disable_irq(dev->irq);
priv->write(priv->reg_ctrl_default, &regs->ctrl);
- priv->write(priv->reg_imask1_default, &regs->imask1);
- priv->write(priv->reg_imask2_default, &regs->imask2);
+ reg_imask = priv->rx_mask | priv->tx_mask;
+ priv->write(upper_32_bits(reg_imask), &regs->imask2);
+ priv->write(lower_32_bits(reg_imask), &regs->imask1);
enable_irq(dev->irq);
/* print chip status */
@@ -1283,26 +1311,19 @@ static int flexcan_open(struct net_device *dev)
flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
priv->tx_mb_idx = priv->mb_count - 1;
priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
-
- priv->reg_imask1_default = 0;
- priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+ priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
priv->offload.mailbox_read = flexcan_mailbox_read;
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 imask;
-
priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
priv->offload.mb_last = priv->mb_count - 2;
- imask = GENMASK_ULL(priv->offload.mb_last,
- priv->offload.mb_first);
- priv->reg_imask1_default |= imask;
- priv->reg_imask2_default |= imask >> 32;
-
+ priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
+ priv->offload.mb_first);
err = can_rx_offload_add_timestamp(dev, &priv->offload);
} else {
- priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+ priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
err = can_rx_offload_add_fifo(dev, &priv->offload,
FLEXCAN_NAPI_WEIGHT);
@@ -1534,7 +1555,6 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
- struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
int err, irq;
@@ -1569,12 +1589,11 @@ static int flexcan_probe(struct platform_device *pdev)
clock_freq = clk_get_rate(clk_per);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -ENODEV;
- regs = devm_ioremap_resource(&pdev->dev, mem);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index b8f1f2b69dd3..378200b682fa 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1652,7 +1652,6 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
- struct resource *res;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1672,8 +1671,7 @@ static int grcan_probe(struct platform_device *ofdev)
goto exit_error;
}
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&ofdev->dev, res);
+ base = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(base)) {
err = PTR_ERR(base);
goto exit_error;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index fedd927ba6ed..04d59bede5ea 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -942,13 +942,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct ifi_canfd_priv *priv;
- struct resource *res;
void __iomem *addr;
int irq, ret;
u32 id, rev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
irq = platform_get_irq(pdev, 0);
if (IS_ERR(addr) || irq < 0)
return -EINVAL;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 562c8317e3aa..02c5795b7393 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -123,6 +123,7 @@ enum m_can_reg {
#define CCCR_CME_CANFD_BRS 0x2
#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
+#define CCCR_DAR BIT(6)
#define CCCR_MON BIT(5)
#define CCCR_CSR BIT(4)
#define CCCR_CSA BIT(3)
@@ -777,6 +778,43 @@ static inline bool is_lec_err(u32 psr)
return psr && (psr != LEC_UNUSED);
}
+static inline bool m_can_is_protocol_err(u32 irqstatus)
+{
+ return irqstatus & IR_ERR_LEC_31X;
+}
+
+static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+
+ /* update tx error stats since there is protocol error */
+ stats->tx_errors++;
+
+ /* update arbitration lost status */
+ if (cdev->version >= 31 && (irqstatus & IR_PEA)) {
+ netdev_dbg(dev, "Protocol error in Arbitration fail\n");
+ cdev->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ if (unlikely(!skb)) {
+ netdev_dbg(dev, "allocation of skb failed\n");
+ return 0;
+ }
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
u32 psr)
{
@@ -791,6 +829,11 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
is_lec_err(psr))
work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+ /* handle protocol errors in arbitration phase */
+ if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ m_can_is_protocol_err(irqstatus))
+ work_done += m_can_handle_protocol_error(dev, irqstatus);
+
/* other unproccessed error interrupts */
m_can_handle_other_err(dev, irqstatus);
@@ -1135,7 +1178,7 @@ static void m_can_chip_config(struct net_device *dev)
if (cdev->version == 30) {
/* Version 3.0.x */
- cccr &= ~(CCCR_TEST | CCCR_MON |
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
(CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
(CCCR_CME_MASK << CCCR_CME_SHIFT));
@@ -1145,7 +1188,7 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
- CCCR_NISO);
+ CCCR_NISO | CCCR_DAR);
/* Only 3.2.x has NISO Bit implemented */
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1165,6 +1208,10 @@ static void m_can_chip_config(struct net_device *dev)
if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cccr |= CCCR_MON;
+ /* Disable Auto Retransmission (all versions) */
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ cccr |= CCCR_DAR;
+
/* Write config */
m_can_write(cdev, M_CAN_CCCR, cccr);
m_can_write(cdev, M_CAN_TEST, test);
@@ -1310,7 +1357,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
- CAN_CTRLMODE_FD;
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_ONE_SHOT;
/* Set properties depending on M_CAN version */
switch (m_can_dev->version) {
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 6ac4c35f247a..38ea5e600fb8 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -107,7 +107,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->is_peripheral = false;
- platform_set_drvdata(pdev, mcan_class->dev);
+ platform_set_drvdata(pdev, mcan_class->net);
m_can_init_ram(mcan_class);
@@ -166,8 +166,6 @@ static int __maybe_unused m_can_runtime_resume(struct device *dev)
if (err)
clk_disable_unprepare(mcan_class->hclk);
- m_can_class_resume(dev);
-
return err;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 6b0c6a99fc8d..10aa3e457c33 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Copyright (C) 2016 PEAK System-Technik GmbH
@@ -122,7 +121,8 @@ static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
- priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
+ priv->can.ctrlmode &
+ CAN_CTRLMODE_3_SAMPLES);
cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
@@ -232,6 +232,20 @@ static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
return pucan_write_cmd(priv);
}
+static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
+{
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+ u64 ts_us;
+
+ ts_us = (u64)le32_to_cpu(ts_high) << 32;
+ ts_us |= le32_to_cpu(ts_low);
+
+ /* IP core timestamps are µs. */
+ hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
+
+ return netif_rx(skb);
+}
+
/* handle the reception of one CAN frame */
static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
struct pucan_rx_msg *msg)
@@ -299,7 +313,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
stats->rx_bytes += cf->len;
stats->rx_packets++;
- netif_rx(skb);
+ pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
}
@@ -325,7 +339,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
if (pucan_status_is_rx_barrier(msg)) {
-
if (priv->enable_tx_path) {
int err = priv->enable_tx_path(priv);
@@ -393,7 +406,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index 95b23caa7dd6..a72719dc3b74 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * CAN driver for PEAK System micro-CAN based adapters
+/* CAN driver for PEAK System micro-CAN based adapters
*
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 13b10cbf236a..d08a3d559114 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
@@ -841,7 +840,8 @@ err_disable_pci:
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
- * (err is unchanged if negative) */
+ * (err is unchanged if negative)
+ */
return pcibios_err_to_errno(err);
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index bf5adea9c0a3..48575900adb7 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -744,7 +744,6 @@ static int rcar_can_probe(struct platform_device *pdev)
{
struct rcar_can_priv *priv;
struct net_device *ndev;
- struct resource *mem;
void __iomem *addr;
u32 clock_select = CLKR_CLKP1;
int err = -ENODEV;
@@ -759,8 +758,7 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index edaa1ca972c1..de59dd6aad29 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1630,7 +1630,6 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
- struct resource *mem;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1704,8 +1703,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail_dev;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index 84cae167e42f..e8328910a234 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+/* Copyright (c) 2014 Protonic Holland,
+ * David Jander
+ * Copyright (C) 2014-2017 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
*/
#include <linux/can/dev.h>
@@ -11,14 +12,17 @@ struct can_rx_offload_cb {
u32 timestamp;
};
-static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
+static inline struct can_rx_offload_cb *
+can_rx_offload_get_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
return (struct can_rx_offload_cb *)skb->cb;
}
-static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
+static inline bool
+can_rx_offload_le(struct can_rx_offload *offload,
+ unsigned int a, unsigned int b)
{
if (offload->inc)
return a <= b;
@@ -26,7 +30,8 @@ static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned in
return a >= b;
}
-static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+static inline unsigned int
+can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
{
if (offload->inc)
return (*val)++;
@@ -36,7 +41,9 @@ static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, un
static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
{
- struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
+ struct can_rx_offload *offload = container_of(napi,
+ struct can_rx_offload,
+ napi);
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
@@ -65,8 +72,9 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
return work_done;
}
-static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
- int (*compare)(struct sk_buff *a, struct sk_buff *b))
+static inline void
+__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+ int (*compare)(struct sk_buff *a, struct sk_buff *b))
{
struct sk_buff *pos, *insert = NULL;
@@ -101,7 +109,7 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
cb_a = can_rx_offload_get_cb(a);
cb_b = can_rx_offload_get_cb(b);
- /* Substract two u32 and return result as int, to keep
+ /* Subtract two u32 and return result as int, to keep
* difference steady around the u32 overflow.
*/
return cb_b->timestamp - cb_a->timestamp;
@@ -131,75 +139,40 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{
- struct sk_buff *skb = NULL, *skb_error = NULL;
+ struct sk_buff *skb;
struct can_rx_offload_cb *cb;
- struct can_frame *cf;
- int ret;
-
- if (likely(skb_queue_len(&offload->skb_queue) <
- offload->skb_queue_len_max)) {
- skb = alloc_can_skb(offload->dev, &cf);
- if (unlikely(!skb))
- skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
- } else {
- skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
- }
-
- /* If queue is full or skb not available, drop by reading into
- * overflow buffer.
- */
- if (unlikely(skb_error)) {
- struct can_frame cf_overflow;
- u32 timestamp;
-
- ret = offload->mailbox_read(offload, &cf_overflow,
- &timestamp, n);
-
- /* Mailbox was empty. */
- if (unlikely(!ret))
- return NULL;
-
- /* Mailbox has been read and we're dropping it or
- * there was a problem reading the mailbox.
- *
- * Increment error counters in any case.
- */
- offload->dev->stats.rx_dropped++;
- offload->dev->stats.rx_fifo_errors++;
-
- /* There was a problem reading the mailbox, propagate
- * error value.
- */
- if (unlikely(ret < 0))
- return ERR_PTR(ret);
-
- return skb_error;
- }
+ bool drop = false;
+ u32 timestamp;
- cb = can_rx_offload_get_cb(skb);
- ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
+ /* If queue is full drop frame */
+ if (unlikely(skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max))
+ drop = true;
+ skb = offload->mailbox_read(offload, n, &timestamp, drop);
/* Mailbox was empty. */
- if (unlikely(!ret)) {
- kfree_skb(skb);
+ if (unlikely(!skb))
return NULL;
- }
-
- /* There was a problem reading the mailbox, propagate error value. */
- if (unlikely(ret < 0)) {
- kfree_skb(skb);
+ /* There was a problem reading the mailbox, propagate
+ * error value.
+ */
+ if (unlikely(IS_ERR(skb))) {
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
- return ERR_PTR(ret);
+ return skb;
}
/* Mailbox was read. */
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
return skb;
}
-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+ u64 pending)
{
struct sk_buff_head skb_queue;
unsigned int i;
@@ -229,8 +202,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
- if ((queue_len = skb_queue_len(&offload->skb_queue)) >
- (offload->skb_queue_len_max / 8))
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
@@ -328,7 +301,9 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
-static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+static int can_rx_offload_init_queue(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
{
offload->dev = dev;
@@ -337,7 +312,6 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
- can_rx_offload_reset(offload);
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
@@ -346,7 +320,8 @@ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offlo
return 0;
}
-int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
+int can_rx_offload_add_timestamp(struct net_device *dev,
+ struct can_rx_offload *offload)
{
unsigned int weight;
@@ -366,7 +341,8 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
-int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+int can_rx_offload_add_fifo(struct net_device *dev,
+ struct can_rx_offload *offload, unsigned int weight)
{
if (!offload->mailbox_read)
return -EINVAL;
@@ -377,7 +353,6 @@ EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
void can_rx_offload_enable(struct can_rx_offload *offload)
{
- can_rx_offload_reset(offload);
napi_enable(&offload->napi);
}
EXPORT_SYMBOL_GPL(can_rx_offload_enable);
@@ -388,8 +363,3 @@ void can_rx_offload_del(struct can_rx_offload *offload)
skb_queue_purge(&offload->skb_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
-
-void can_rx_offload_reset(struct can_rx_offload *offload)
-{
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_reset);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index bb6032211043..0a9f42e5fedf 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -617,6 +617,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ free_netdev(sl->dev);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bb20a9b75cc6..5009ff294941 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,7 +22,6 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/led.h>
-#include <linux/can/platform/mcp251x.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -321,6 +320,18 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
mcp251x_spi_trans(spi, 3);
}
+static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
+ priv->spi_tx_buf[1] = reg;
+ priv->spi_tx_buf[2] = v1;
+ priv->spi_tx_buf[3] = v2;
+
+ mcp251x_spi_trans(spi, 4);
+}
+
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
u8 mask, u8 val)
{
@@ -457,6 +468,39 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
+/* May only be called when device is sleeping! */
+static int mcp251x_hw_wake(struct spi_device *spi)
+{
+ unsigned long timeout;
+
+ /* Force wakeup interrupt to wake device, but don't execute IST */
+ disable_irq(spi->irq);
+ mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
+
+ /* Wait for oscillator startup timer after wake up */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ /* Put device into config mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF);
+
+ /* Wait for the device to enter config mode */
+ timeout = jiffies + HZ;
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+ CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Disable and clear pending interrupts */
+ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00);
+ enable_irq(spi->irq);
+
+ return 0;
+}
+
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -646,8 +690,7 @@ static int mcp251x_stop(struct net_device *net)
mutex_lock(&priv->mcp_lock);
/* Disable and clear pending interrupts */
- mcp251x_write_reg(spi, CANINTE, 0x00);
- mcp251x_write_reg(spi, CANINTF, 0x00);
+ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00);
mcp251x_write_reg(spi, TXBCTRL(0), 0);
mcp251x_clean(net);
@@ -715,8 +758,12 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mcp251x_hw_reset(spi);
- mcp251x_setup(net, spi);
+ if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+ mcp251x_hw_reset(spi);
+ mcp251x_setup(net, spi);
+ } else {
+ mcp251x_hw_wake(spi);
+ }
priv->force_quit = 0;
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi);
@@ -913,7 +960,7 @@ static int mcp251x_open(struct net_device *net)
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
- ret = mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_wake(spi);
if (ret)
goto out_free_wq;
ret = mcp251x_setup(net, spi);
@@ -986,19 +1033,19 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
const void *match = device_get_match_data(&spi->dev);
- struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
- int freq, ret;
+ u32 freq;
+ int ret;
clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
freq = clk_get_rate(clk);
- if (freq == 0 && pdata)
- freq = pdata->oscillator_frequency;
+ if (freq == 0)
+ device_property_read_u32(&spi->dev, "clock-frequency", &freq);
/* Sanity check */
if (freq < 1000000 || freq > 25000000)
@@ -1155,13 +1202,13 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
if (priv->after_suspend & AFTER_SUSPEND_POWER)
mcp251x_power_enable(priv->power, 1);
-
- if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ if (priv->after_suspend & AFTER_SUSPEND_UP)
mcp251x_power_enable(priv->transceiver, 1);
+
+ if (priv->after_suspend & (AFTER_SUSPEND_POWER | AFTER_SUSPEND_UP))
queue_work(priv->wq, &priv->restart_work);
- } else {
+ else
priv->after_suspend = 0;
- }
priv->force_quit = 0;
enable_irq(spi->irq);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index f4cd88196404..e3ba8ab0cbf4 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -771,7 +771,6 @@ static int sun4ican_remove(struct platform_device *pdev)
static int sun4ican_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *mem;
struct clk *clk;
void __iomem *addr;
int err, irq;
@@ -791,8 +790,7 @@ static int sun4ican_probe(struct platform_device *pdev)
goto exit;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = -EBUSY;
goto exit;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 31ad364a89bb..94b1491b569f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -535,15 +535,28 @@ struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
return container_of(offload, struct ti_hecc_priv, offload);
}
-static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mbxno)
+static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
+ unsigned int mbxno, u32 *timestamp,
+ bool drop)
{
struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
+ struct sk_buff *skb;
+ struct can_frame *cf;
u32 data, mbx_mask;
- int ret = 1;
mbx_mask = BIT(mbxno);
+
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb)) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
if (data & HECC_CANMID_IDE)
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -578,11 +591,12 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
*/
if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
hecc_read(priv, HECC_CANRML) & mbx_mask))
- ret = -ENOBUFS;
+ skb = ERR_PTR(-ENOBUFS);
+ mark_as_read:
hecc_write(priv, HECC_CANRMP, mbx_mask);
- return ret;
+ return skb;
}
static int ti_hecc_error(struct net_device *ndev, int int_status,
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 7c482b2d78d2..4a96e2dd7d77 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -194,7 +194,7 @@ struct xcan_devtype_data {
*/
struct xcan_priv {
struct can_priv can;
- spinlock_t tx_lock;
+ spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -400,7 +400,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
XCAN_SR_CONFIG_MASK;
if (!is_config_mode) {
netdev_alert(ndev,
- "BUG! Cannot set bittiming - CAN is not in config mode\n");
+ "BUG! Cannot set bittiming - CAN is not in config mode\n");
return -EPERM;
}
@@ -470,7 +470,13 @@ static int xcan_chip_start(struct net_device *ndev)
if (err < 0)
return err;
- /* Enable interrupts */
+ /* Enable interrupts
+ *
+ * We enable the ERROR interrupt even with
+ * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
+ * dedicated interrupt for a state change to
+ * ERROR_WARNING/ERROR_PASSIVE.
+ */
ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
@@ -482,11 +488,10 @@ static int xcan_chip_start(struct net_device *ndev)
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
/* Check whether it is loopback mode or normal mode */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
reg_msr = XCAN_MSR_LBACK_MASK;
- } else {
+ else
reg_msr = 0x0;
- }
/* enable the first extended filter, if any, as cores with extended
* filtering default to non-receipt if all filters are disabled
@@ -981,12 +986,9 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
+ struct can_frame cf = { };
u32 err_status;
- skb = alloc_can_err_skb(ndev, &cf);
-
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
@@ -996,32 +998,27 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
/* Leave device in Config Mode in bus-off state */
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
can_bus_off(ndev);
- if (skb)
- cf->can_id |= CAN_ERR_BUSOFF;
+ cf.can_id |= CAN_ERR_BUSOFF;
} else {
enum can_state new_state = xcan_current_error_state(ndev);
if (new_state != priv->can.state)
- xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+ xcan_set_error_state(ndev, new_state, &cf);
}
/* Check for Arbitration lost interrupt */
if (isr & XCAN_IXR_ARBLST_MASK) {
priv->can.can_stats.arbitration_lost++;
- if (skb) {
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
- }
+ cf.can_id |= CAN_ERR_LOSTARB;
+ cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
}
/* Check for RX FIFO Overflow interrupt */
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- }
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
}
/* Check for RX Match Not Finished interrupt */
@@ -1029,68 +1026,77 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
stats->rx_dropped++;
stats->rx_errors++;
netdev_err(ndev, "RX match not finished, frame discarded\n");
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
- }
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
}
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
- if (skb)
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ bool berr_reporting = false;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ berr_reporting = true;
+ cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ }
/* Check for Ack error interrupt */
if (err_status & XCAN_ESR_ACKER_MASK) {
stats->tx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_ACK;
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_ACK;
+ cf.data[3] = CAN_ERR_PROT_LOC_ACK;
}
}
/* Check for Bit error interrupt */
if (err_status & XCAN_ESR_BERR_MASK) {
stats->tx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_BIT;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_BIT;
}
}
/* Check for Stuff error interrupt */
if (err_status & XCAN_ESR_STER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_STUFF;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_STUFF;
}
}
/* Check for Form error interrupt */
if (err_status & XCAN_ESR_FMER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_FORM;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[2] = CAN_ERR_PROT_FORM;
}
}
/* Check for CRC error interrupt */
if (err_status & XCAN_ESR_CRCER_MASK) {
stats->rx_errors++;
- if (skb) {
- cf->can_id |= CAN_ERR_PROT;
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (berr_reporting) {
+ cf.can_id |= CAN_ERR_PROT;
+ cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
}
priv->can.can_stats.bus_error++;
}
- if (skb) {
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ if (cf.can_id) {
+ struct can_frame *skb_cf;
+ struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
+
+ if (skb) {
+ skb_cf->can_id |= cf.can_id;
+ memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
+ stats->rx_packets++;
+ stats->rx_bytes += CAN_ERR_DLC;
+ netif_rx(skb);
+ }
}
netdev_dbg(ndev, "%s: error status register:0x%x\n",
@@ -1651,7 +1657,6 @@ MODULE_DEVICE_TABLE(of, xcan_of_match);
*/
static int xcan_probe(struct platform_device *pdev)
{
- struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
const struct of_device_id *of_id;
@@ -1663,8 +1668,7 @@ static int xcan_probe(struct platform_device *pdev)
const char *hw_tx_max_property;
/* Get the virtual base address for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f6232ce8481f..c7667645f04a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -52,6 +52,8 @@ source "drivers/net/dsa/microchip/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+source "drivers/net/dsa/ocelot/Kconfig"
+
source "drivers/net/dsa/sja1105/Kconfig"
config NET_DSA_QCA8K
@@ -77,6 +79,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
+ select REGMAP
---help---
This enables support for the SMSC/Microchip LAN9303 3 port ethernet
switch chips.
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index ae70b79628d6..9d384a32b3a2 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
+obj-y += ocelot/
obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index cc3536315eff..36828f210030 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -524,7 +524,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
if (!dsa_is_user_port(ds, port))
return 0;
- cpu_port = ds->ports[port].cpu_dp->index;
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -1503,11 +1503,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
idx = 1;
}
- memset(&ent, 0, sizeof(ent));
- ent.port = port;
+ /* For multicast address, the port is a bitmask and the validity
+ * is determined by having at least one port being still active
+ */
+ if (!is_multicast_ether_addr(addr)) {
+ ent.port = port;
+ ent.is_valid = is_valid;
+ } else {
+ if (is_valid)
+ ent.port |= BIT(port);
+ else
+ ent.port &= ~BIT(port);
+
+ ent.is_valid = !!(ent.port);
+ }
+
ent.is_valid = is_valid;
ent.vid = vid;
ent.is_static = true;
+ ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
@@ -1626,10 +1640,51 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_mdb_prepare);
+
+void b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ if (ret)
+ dev_err(ds->dev, "failed to add MDB entry\n");
+}
+EXPORT_SYMBOL(b53_mdb_add);
+
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+ if (ret)
+ dev_err(ds->dev, "failed to delete MDB entry\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_mdb_del);
+
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
@@ -1675,7 +1730,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1994,6 +2049,9 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
+ .port_mdb_prepare = b53_mdb_prepare,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
};
struct b53_chip_data {
@@ -2341,10 +2399,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
struct dsa_switch *ds;
struct b53_device *dev;
- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a7dd8acc281b..1877acf05081 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -250,7 +250,7 @@ b53_build_op(write48, u64);
b53_build_op(write64, u64);
struct b53_arl_entry {
- u8 port;
+ u16 port;
u8 mac[ETH_ALEN];
u16 vid;
u8 is_valid:1;
@@ -351,6 +351,12 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+void b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 69fc13046ac7..e43040c9f9ee 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -350,6 +350,18 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
+ int ret;
+
+ /* The watchdog reset does not work on 7278, we need to hit the
+ * "external" reset line through the reset controller.
+ */
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) {
+ ret = reset_control_assert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ return reset_control_deassert(priv->rcdev);
+ }
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
@@ -381,8 +393,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
struct device_node *port;
- int mode;
unsigned int port_num;
+ phy_interface_t mode;
+ int err;
priv->moca_port = -1;
@@ -395,8 +408,8 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
* has completed, since they might be turned off at that
* time
*/
- mode = of_get_phy_mode(port);
- if (mode < 0)
+ err = of_get_phy_mode(port, &mode);
+ if (err)
continue;
if (mode == PHY_INTERFACE_MODE_INTERNAL)
@@ -668,7 +681,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(ds->ports[port].slave);
+ netif_carrier_off(dsa_to_port(ds, port)->slave);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
@@ -734,7 +747,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -758,9 +771,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
if (p->ethtool_ops->get_wol)
@@ -974,6 +987,9 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
+ .port_mdb_prepare = b53_mdb_prepare,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
};
struct bcm_sf2_of_data {
@@ -1088,6 +1104,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
+ priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "switch");
+ if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER)
+ return PTR_ERR(priv->rcdev);
+
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
@@ -1220,6 +1241,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
+ reset_control_assert(priv->rcdev);
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 1df30ccec42d..de386dd96d66 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/reset.h>
#include <net/dsa.h>
@@ -64,6 +65,8 @@ struct bcm_sf2_priv {
void __iomem *fcb;
void __iomem *acb;
+ struct reset_control *rcdev;
+
/* Register offsets indirection tables */
u32 type;
const u16 *reg_offsets;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d264776a95a3..f3f0c3f07391 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -821,7 +821,7 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
int ret;
@@ -1049,7 +1049,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1092,7 +1092,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 925ed135a4d9..c8d7ef27fd72 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -286,10 +286,13 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
dev_info(&mdiodev->dev, "%s: 0x%0x\n",
pdata->name, pdata->enabled_ports);
- ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = DSA_MAX_PORTS;
+
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index bbec86b9418e..e3c333a8f45d 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1283,10 +1283,12 @@ static int lan9303_register_switch(struct lan9303 *chip)
{
int base;
- chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
+ chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
+ chip->ds->dev = chip->dev;
+ chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
base = chip->phy_addr_base;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index a69c9b9878b7..955324968b74 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1854,10 +1854,12 @@ static int gswip_probe(struct platform_device *pdev)
if (!priv->hw_info)
return -EINVAL;
- priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = &gswip_switch_ops;
priv->dev = dev;
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index fdffd9e0c518..7d050fab0889 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -87,7 +87,6 @@ MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
.probe = ksz9477_i2c_probe,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index fe47180c908b..d8fda4a02640 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -398,10 +398,13 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
struct dsa_switch *ds;
struct ksz_device *swdev;
- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
return NULL;
@@ -419,6 +422,7 @@ EXPORT_SYMBOL(ksz_switch_alloc);
int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops)
{
+ phy_interface_t interface;
int ret;
if (dev->pdata)
@@ -453,9 +457,9 @@ int ksz_switch_register(struct ksz_device *dev,
* device tree.
*/
if (dev->dev->of_node) {
- ret = of_get_phy_mode(dev->dev->of_node);
- if (ret >= 0)
- dev->interface = ret;
+ ret = of_get_phy_mode(dev->dev->of_node, &interface);
+ if (ret == 0)
+ dev->interface = interface;
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1d8d36de4d20..ed1ec10ec62b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -862,7 +862,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
- dsa_port_is_vlan_filtering(&ds->ports[i])) {
+ dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
break;
}
@@ -922,7 +922,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* other port is still a VLAN-aware port.
*/
if (dsa_is_user_port(ds, i) && i != port &&
- !dsa_port_is_vlan_filtering(&ds->ports[i])) {
+ !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -1165,7 +1165,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return;
mutex_lock(&priv->reg_mutex);
@@ -1196,7 +1196,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return 0;
mutex_lock(&priv->reg_mutex);
@@ -1252,7 +1252,7 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
+ dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
if (priv->id == ID_MT7530) {
priv->ethernet = syscon_node_to_regmap(dn);
@@ -1340,7 +1340,9 @@ mt7530_setup(struct dsa_switch *ds)
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- interface = of_get_phy_mode(ds->ports[5].dn);
+ ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node(dn, mac_np) {
@@ -1354,7 +1356,9 @@ mt7530_setup(struct dsa_switch *ds)
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
if (phy_node->parent == priv->dev->of_node->parent) {
- interface = of_get_phy_mode(mac_np);
+ ret = of_get_phy_mode(mac_np, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
@@ -1632,10 +1636,13 @@ mt7530_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = DSA_MAX_PORTS;
+
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
*/
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 2a2489b5196d..a5a37f47b320 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -270,10 +270,12 @@ static int mv88e6060_probe(struct mdio_device *mdiodev)
dev_info(dev, "switch %s detected\n", name);
- ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = MV88E6060_PORTS;
ds->priv = priv;
ds->dev = dev;
ds->ops = &mv88e6060_switch_ops;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 6787d560e9e3..3bd988529178 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1057,35 +1057,43 @@ static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
+/* Mask of the local ports allowed to receive frames from a given fabric port */
static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
- struct dsa_switch *ds = NULL;
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
struct net_device *br;
+ struct dsa_port *dp;
+ bool found = false;
u16 pvlan;
- int i;
- if (dev < DSA_MAX_SWITCHES)
- ds = chip->ds->dst->ds[dev];
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index == dev && dp->index == port) {
+ found = true;
+ break;
+ }
+ }
/* Prevent frames from unknown switch or port */
- if (!ds || port >= ds->num_ports)
+ if (!found)
return 0;
/* Frames from DSA links and CPU ports can egress any local port */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
- br = ds->ports[port].bridge_dev;
+ br = dp->bridge_dev;
pvlan = 0;
/* Frames from user ports can egress any local DSA links and CPU ports,
* as well as any local member of their bridge group.
*/
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- if (dsa_is_cpu_port(chip->ds, i) ||
- dsa_is_dsa_port(chip->ds, i) ||
- (br && dsa_to_port(chip->ds, i)->bridge_dev == br))
- pvlan |= BIT(i);
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds &&
+ (dp->type == DSA_PORT_TYPE_CPU ||
+ dp->type == DSA_PORT_TYPE_DSA ||
+ (br && dp->bridge_dev == br)))
+ pvlan |= BIT(dp->index);
return pvlan;
}
@@ -1135,6 +1143,7 @@ static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
{
+ struct dsa_switch *ds = chip->ds;
int target, port;
int err;
@@ -1143,10 +1152,9 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
/* Initialize the routing port to the 32 possible target devices */
for (target = 0; target < 32; target++) {
- port = 0x1f;
- if (target < DSA_MAX_SWITCHES)
- if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
- port = chip->ds->rtable[target];
+ port = dsa_routing_port(ds, target);
+ if (port == ds->num_ports)
+ port = 0x1f;
err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
if (err)
@@ -1253,7 +1261,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
- return -EOPNOTSUPP;
+ return 0;
/* Skip the local source device, which uses in-chip port VLAN */
if (dev != chip->ds->index)
@@ -1370,6 +1378,22 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -1402,7 +1426,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
- if (!ds->ports[i].slave)
+ if (!dsa_to_port(ds, i)->slave)
continue;
if (vlan.member[i] ==
@@ -1410,7 +1434,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
continue;
if (dsa_to_port(ds, i)->bridge_dev ==
- ds->ports[port].bridge_dev)
+ dsa_to_port(ds, port)->bridge_dev)
break; /* same bridge, check next VLAN */
if (!dsa_to_port(ds, i)->bridge_dev)
@@ -2035,32 +2059,26 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
struct net_device *br)
{
- struct dsa_switch *ds;
- int port;
- int dev;
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
int err;
- /* Remap the Port VLAN of each local bridge group member */
- for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
- if (chip->ds->ports[port].bridge_dev == br) {
- err = mv88e6xxx_port_vlan_map(chip, port);
- if (err)
- return err;
- }
- }
-
- if (!mv88e6xxx_has_pvt(chip))
- return 0;
-
- /* Remap the Port VLAN of each cross-chip bridge group member */
- for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) {
- ds = chip->ds->dst->ds[dev];
- if (!ds)
- break;
-
- for (port = 0; port < ds->num_ports; ++port) {
- if (ds->ports[port].bridge_dev == br) {
- err = mv88e6xxx_pvt_map(chip, dev, port);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_dev == br) {
+ if (dp->ds == ds) {
+ /* This is a local bridge group member,
+ * remap its Port VLAN Map.
+ */
+ err = mv88e6xxx_port_vlan_map(chip, dp->index);
+ if (err)
+ return err;
+ } else {
+ /* This is an external bridge group member,
+ * remap its cross-chip Port VLAN Table entry.
+ */
+ err = mv88e6xxx_pvt_map(chip, dp->ds->index,
+ dp->index);
if (err)
return err;
}
@@ -2101,9 +2119,6 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_has_pvt(chip))
- return 0;
-
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, dev, port);
mv88e6xxx_reg_unlock(chip);
@@ -2116,9 +2131,6 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
{
struct mv88e6xxx_chip *chip = ds->priv;
- if (!mv88e6xxx_has_pvt(chip))
- return;
-
mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, dev, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
@@ -2378,7 +2390,14 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
if (chip->info->ops->set_egress_port) {
err = chip->info->ops->set_egress_port(chip,
- upstream_port);
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ upstream_port);
+ if (err)
+ return err;
+
+ err = chip->info->ops->set_egress_port(chip,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+ upstream_port);
if (err)
return err;
}
@@ -2641,6 +2660,248 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static void mv88e6xxx_teardown(struct dsa_switch *ds)
+{
+ mv88e6xxx_teardown_devlink_params(ds);
+ dsa_devlink_resources_unregister(ds);
+}
+
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2757,6 +3018,22 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
unlock:
mv88e6xxx_reg_unlock(chip);
+ if (err)
+ return err;
+
+ /* Have to be called without holding the register lock, since
+ * they take the devlink lock, and we later take the locks in
+ * the reverse order when getting/setting parameters or
+ * resource occupancy.
+ */
+ err = mv88e6xxx_setup_devlink_resources(ds);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_params(ds);
+ if (err)
+ dsa_devlink_resources_unregister(ds);
+
return err;
}
@@ -3117,6 +3394,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3246,6 +3525,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
@@ -3280,6 +3561,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
@@ -3322,6 +3605,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3366,6 +3651,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3409,6 +3696,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3453,6 +3742,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3538,6 +3829,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3587,6 +3880,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3635,6 +3930,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3686,6 +3983,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3777,6 +4076,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3963,6 +4264,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -4003,6 +4306,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
@@ -4049,6 +4354,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -4105,6 +4412,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4158,6 +4467,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4177,6 +4488,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6085",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4199,6 +4511,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6095,
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4219,6 +4532,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
@@ -4241,6 +4555,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6123",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4263,6 +4578,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6131",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 8,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4283,6 +4599,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
.num_databases = 4096,
+ .num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
.num_gpio = 11,
@@ -4306,6 +4623,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4329,6 +4647,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6165",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4352,6 +4671,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6171",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4374,6 +4694,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6172",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4397,6 +4718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6175",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4419,6 +4741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6176",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4442,6 +4765,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6185",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4462,6 +4786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4485,6 +4810,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4508,6 +4834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6191",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
@@ -4558,6 +4885,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6240",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4628,6 +4956,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6320",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4652,6 +4981,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6321",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4675,6 +5005,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
+ .num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
.num_gpio = 11,
@@ -4699,6 +5030,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6350",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4721,6 +5053,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6351",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4743,6 +5076,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6352",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4766,6 +5100,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4789,6 +5124,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4914,6 +5250,80 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
return err;
}
+static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ enum mv88e6xxx_egress_direction direction = ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+ int err;
+
+ if (!chip->info->ops->set_egress_port)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+ if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) !=
+ mirror->to_local_port) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Can't change egress port when other mirror is active */
+ if (other_mirrors) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = chip->info->ops->set_egress_port(chip,
+ direction,
+ mirror->to_local_port);
+ if (err)
+ goto out;
+ }
+
+ err = mv88e6xxx_port_set_mirror(chip, port, direction, true);
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ enum mv88e6xxx_egress_direction direction = mirror->ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_set_mirror(chip, port, direction, false))
+ dev_err(ds->dev, "p%d: failed to disable mirroring\n", port);
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= mirror->ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Reset egress port when no other mirror is active */
+ if (!other_mirrors) {
+ if (chip->info->ops->set_egress_port(chip,
+ direction,
+ dsa_upstream_port(ds,
+ port)))
+ dev_err(ds->dev, "failed to set egress port\n");
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast)
{
@@ -4933,6 +5343,7 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
+ .teardown = mv88e6xxx_teardown,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_link_state,
.phylink_mac_config = mv88e6xxx_mac_config,
@@ -4968,6 +5379,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
+ .port_mirror_add = mv88e6xxx_port_mirror_add,
+ .port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
.crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
.port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set,
@@ -4975,6 +5388,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_txtstamp = mv88e6xxx_port_txtstamp,
.port_rxtstamp = mv88e6xxx_port_rxtstamp,
.get_ts_info = mv88e6xxx_get_ts_info,
+ .devlink_param_get = mv88e6xxx_devlink_param_get,
+ .devlink_param_set = mv88e6xxx_devlink_param_set,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -4982,10 +5397,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
struct device *dev = chip->dev;
struct dsa_switch *ds;
- ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = mv88e6xxx_num_ports(chip);
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index e9b1a1ac9a8e..8a8e38bfb161 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -33,6 +33,11 @@ enum mv88e6xxx_egress_mode {
MV88E6XXX_EGRESS_MODE_ETHERTYPE,
};
+enum mv88e6xxx_egress_direction {
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+};
+
enum mv88e6xxx_frame_mode {
MV88E6XXX_FRAME_MODE_NORMAL,
MV88E6XXX_FRAME_MODE_DSA,
@@ -94,6 +99,7 @@ struct mv88e6xxx_info {
u16 prod_num;
const char *name;
unsigned int num_databases;
+ unsigned int num_macs;
unsigned int num_ports;
unsigned int num_internal_phys;
unsigned int num_gpio;
@@ -227,6 +233,8 @@ struct mv88e6xxx_port {
u64 vtu_member_violation;
u64 vtu_miss_violation;
u8 cmode;
+ bool mirror_ingress;
+ bool mirror_egress;
unsigned int serdes_irq;
};
@@ -310,6 +318,10 @@ struct mv88e6xxx_chip {
u16 evcap_config;
u16 enable_count;
+ /* Current ingress and egress monitor ports */
+ int egress_dest_port;
+ int ingress_dest_port;
+
/* Per-port timestamping resources. */
struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
@@ -464,7 +476,9 @@ struct mv88e6xxx_ops {
int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
- int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+ int (*set_egress_port)(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
#define MV88E6XXX_CASCADE_PORT_NONE 0xe
#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf
@@ -497,6 +511,10 @@ struct mv88e6xxx_ops {
int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+ /* Address Translation Unit operations */
+ int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
+ int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
+
/* VLAN Translation Unit operations */
int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -609,6 +627,11 @@ static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
return chip->info->num_databases;
}
+static inline unsigned int mv88e6xxx_num_macs(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_macs;
+}
+
static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
{
return chip->info->num_ports;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 25ec4c0ac589..120a65d3e3ef 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -263,8 +263,11 @@ int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
-int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
{
+ int *dest_port_chip;
u16 reg;
int err;
@@ -272,13 +275,28 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg &= ~(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK |
- MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ dest_port_chip = &chip->ingress_dest_port;
+ reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ dest_port_chip = &chip->egress_dest_port;
+ reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+ if (!err)
+ *dest_port_chip = port;
- reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK) |
- port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
-
- return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+ return err;
}
/* Older generations also call this the ARP destination. It has been
@@ -310,22 +328,32 @@ static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg);
}
-int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
{
+ int *dest_port_chip;
u16 ptr;
int err;
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
- err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (err)
- return err;
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ dest_port_chip = &chip->ingress_dest_port;
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ dest_port_chip = &chip->egress_dest_port;
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
+ break;
+ default:
+ return -EINVAL;
+ }
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
err = mv88e6390_g1_monitor_write(chip, ptr, port);
- if (err)
- return err;
+ if (!err)
+ *dest_port_chip = port;
- return 0;
+ return err;
}
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 0870fcc8bfc8..bc5a6b2bb1e4 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -109,6 +109,7 @@
/* Offset 0x0A: ATU Control Register */
#define MV88E6XXX_G1_ATU_CTL 0x0a
#define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008
+#define MV88E6161_G1_ATU_CTL_HASH_MASK 0x0003
/* Offset 0x0B: ATU Operation Register */
#define MV88E6XXX_G1_ATU_OP 0x0b
@@ -287,8 +288,12 @@ int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val);
int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip);
-int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
@@ -318,6 +323,8 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
bool all);
int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -338,5 +345,6 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 792a96ef418f..bdcd25560dd2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -73,6 +73,38 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
return 0;
}
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ int err;
+ u16 val;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ *hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK;
+
+ return 0;
+}
+
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ int err;
+ u16 val;
+
+ if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK)
+ return -EINVAL;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK;
+ val |= hash;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+}
+
/* Offset 0x0B: ATU Operation Register */
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
@@ -122,6 +154,11 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
return mv88e6xxx_g1_atu_op_wait(chip);
}
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+}
+
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index bdbb72fc20ed..87bfe7c8c9cd 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -280,6 +280,19 @@ int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
return err;
}
+/* Offset 0x0E: ATU Statistics */
+
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
+ kind | bin);
+}
+
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
+{
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
+}
+
/* Offset 0x0F: Priority Override Table */
static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 42da4bca73e8..1f42ee656816 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -113,7 +113,16 @@
#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
/* Offset 0x0E: ATU Stats Register */
-#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS_BIN_0 (0x0 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_1 (0x1 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_2 (0x2 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_3 (0x3 << 14)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL (0x0 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC (0x1 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL (0x2 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC (0x3 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MASK 0x0fff
/* Offset 0x0F: Priority Override Table */
#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
@@ -353,6 +362,8 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
@@ -515,6 +526,18 @@ static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
+ u16 kind, u16 bin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip,
+ u16 *stats)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 15ef81654b67..7fe256c5739d 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1181,6 +1181,43 @@ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror)
+{
+ bool *mirror_port;
+ u16 reg;
+ u16 bit;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ bit = MV88E6XXX_PORT_CTL2_INGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_ingress;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ bit = MV88E6XXX_PORT_CTL2_EGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_egress;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg &= ~bit;
+ if (mirror)
+ reg |= bit;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+ if (!err)
+ *mirror_port = mirror;
+
+ return err;
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03a480cd71b9..0ec4327c2b42 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -368,6 +368,9 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror);
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 073cbd0bb91b..d838c174dc0d 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
new file mode 100644
index 000000000000..0031ca814346
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX
+ tristate "Ocelot / Felix Ethernet switch support"
+ depends on NET_DSA && PCI
+ select MSCC_OCELOT_SWITCH
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC9959 network switch, which is a member of
+ the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
+ It is embedded as a PCIe function of the NXP LS1028A ENETC integrated
+ endpoint.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
new file mode 100644
index 000000000000..37ad403e0b2a
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+
+mscc_felix-objs := \
+ felix.o \
+ felix_vsc9959.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
new file mode 100644
index 000000000000..b7f92464815d
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 NXP Semiconductors
+ */
+#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/packing.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <net/dsa.h>
+#include "felix.h"
+
+static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_OCELOT;
+}
+
+static int felix_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_set_ageing_time(ocelot, ageing_time);
+
+ return 0;
+}
+
+static void felix_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_adjust_link(ocelot, port, phydev);
+}
+
+static int felix_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_dump(ocelot, port, cb, data);
+}
+
+static int felix_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+ bool vlan_aware;
+
+ vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port));
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware);
+}
+
+static int felix_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
+
+static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_bridge_stp_state_set(ocelot, port, state);
+}
+
+static int felix_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_bridge_join(ocelot, port, br);
+}
+
+static void felix_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_bridge_leave(ocelot, port, br);
+}
+
+/* This callback needs to be present */
+static int felix_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return 0;
+}
+
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_vlan_filtering(ocelot, port, enabled);
+
+ return 0;
+}
+
+static void felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_add(ocelot, port, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err) {
+ dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
+ vid, port, err);
+ return;
+ }
+ }
+}
+
+static int felix_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_del(ocelot, port, vid);
+ if (err) {
+ dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
+ vid, port, err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_enable(ocelot, port, phy);
+
+ return 0;
+}
+
+static void felix_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_disable(ocelot, port);
+}
+
+static void felix_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_strings(ocelot, port, stringset, data);
+}
+
+static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+
+static int felix_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
+
+static int felix_init_structs(struct felix *felix, int num_phys_ports)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ resource_size_t base;
+ int port, i, err;
+
+ ocelot->num_phys_ports = num_phys_ports;
+ ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ ocelot->map = felix->info->map;
+ ocelot->stats_layout = felix->info->stats_layout;
+ ocelot->num_stats = felix->info->num_stats;
+ ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+ ocelot->ops = felix->info->ops;
+
+ base = pci_resource_start(felix->pdev, felix->info->pci_bar);
+
+ for (i = 0; i < TARGET_MAX; i++) {
+ struct regmap *target;
+ struct resource *res;
+
+ if (!felix->info->target_io_res[i].name)
+ continue;
+
+ res = &felix->info->target_io_res[i];
+ res->flags = IORESOURCE_MEM;
+ res->start += base;
+ res->end += base;
+
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target)) {
+ dev_err(ocelot->dev,
+ "Failed to map device memory space\n");
+ return PTR_ERR(target);
+ }
+
+ ocelot->targets[i] = target;
+ }
+
+ err = ocelot_regfields_init(ocelot, felix->info->regfields);
+ if (err) {
+ dev_err(ocelot->dev, "failed to init reg fields map\n");
+ return err;
+ }
+
+ for (port = 0; port < num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port;
+ void __iomem *port_regs;
+ struct resource *res;
+
+ ocelot_port = devm_kzalloc(ocelot->dev,
+ sizeof(struct ocelot_port),
+ GFP_KERNEL);
+ if (!ocelot_port) {
+ dev_err(ocelot->dev,
+ "failed to allocate port memory\n");
+ return -ENOMEM;
+ }
+
+ res = &felix->info->port_io_res[port];
+ res->flags = IORESOURCE_MEM;
+ res->start += base;
+ res->end += base;
+
+ port_regs = devm_ioremap_resource(ocelot->dev, res);
+ if (IS_ERR(port_regs)) {
+ dev_err(ocelot->dev,
+ "failed to map registers for port %d\n", port);
+ return PTR_ERR(port_regs);
+ }
+
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->regs = port_regs;
+ ocelot->ports[port] = ocelot_port;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization done here so that we can allocate structures with
+ * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
+ * us to allocate structures twice (leak memory) and map PCI memory twice
+ * (which will not work).
+ */
+static int felix_setup(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port, err;
+
+ err = felix_init_structs(felix, ds->num_ports);
+ if (err)
+ return err;
+
+ ocelot_init(ocelot);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ ocelot_init_port(ocelot, port);
+
+ if (dsa_is_cpu_port(ds, port))
+ ocelot_set_cpu_port(ocelot, port,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_LONG);
+ }
+
+ return 0;
+}
+
+static void felix_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* stop workqueue thread */
+ ocelot_deinit(ocelot);
+}
+
+static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_hwstamp_get(ocelot, port, ifr);
+}
+
+static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_hwstamp_set(ocelot, port, ifr);
+}
+
+static bool felix_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot *ocelot = ds->priv;
+ u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
+ u32 tstamp_lo, tstamp_hi;
+ struct timespec64 ts;
+ u64 tstamp, val;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ packing(extraction, &val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+ tstamp_lo = (u32)val;
+
+ tstamp_hi = tstamp >> 32;
+ if ((tstamp & 0xffffffff) < tstamp_lo)
+ tstamp_hi--;
+
+ tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = tstamp;
+ return false;
+}
+
+static bool felix_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone))
+ return true;
+
+ return false;
+}
+
+static const struct dsa_switch_ops felix_switch_ops = {
+ .get_tag_protocol = felix_get_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .adjust_link = felix_adjust_link,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_prepare = felix_vlan_prepare,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+ .port_hwtstamp_get = felix_hwtstamp_get,
+ .port_hwtstamp_set = felix_hwtstamp_set,
+ .port_rxtstamp = felix_rxtstamp,
+ .port_txtstamp = felix_txtstamp,
+};
+
+static struct felix_info *felix_instance_tbl[] = {
+ [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959,
+};
+
+static irqreturn_t felix_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = (struct ocelot *)data;
+
+ /* The INTB interrupt is used for both PTP TX timestamp interrupt
+ * and preemption status change interrupt on each port.
+ *
+ * - Get txtstamp if have
+ * - TODO: handle preemption. Without handling it, driver may get
+ * interrupt storm.
+ */
+
+ ocelot_get_txtstamp(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int felix_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum felix_instance instance = id->driver_data;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ goto err_pci_enable;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+ if (!felix) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+ goto err_alloc_felix;
+ }
+
+ pci_set_drvdata(pdev, felix);
+ ocelot = &felix->ocelot;
+ ocelot->dev = &pdev->dev;
+ felix->pdev = pdev;
+ felix->info = felix_instance_tbl[instance];
+
+ pci_set_master(pdev);
+
+ err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
+ &felix_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq\n");
+ goto err_alloc_irq;
+ }
+
+ ocelot->ptp = 1;
+
+ ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+ goto err_alloc_ds;
+ }
+
+ ds->dev = &pdev->dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ goto err_register_ds;
+ }
+
+ return 0;
+
+err_register_ds:
+ kfree(ds);
+err_alloc_ds:
+err_alloc_irq:
+err_alloc_felix:
+ kfree(felix);
+err_dma:
+ pci_disable_device(pdev);
+err_pci_enable:
+ return err;
+}
+
+static void felix_pci_remove(struct pci_dev *pdev)
+{
+ struct felix *felix;
+
+ felix = pci_get_drvdata(pdev);
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id felix_ids[] = {
+ {
+ /* NXP LS1028A */
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
+ .driver_data = FELIX_INSTANCE_VSC9959,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, felix_ids);
+
+static struct pci_driver felix_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = felix_ids,
+ .probe = felix_pci_probe,
+ .remove = felix_pci_remove,
+};
+
+module_pci_driver(felix_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
new file mode 100644
index 000000000000..204296e51d0c
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP Semiconductors
+ */
+#ifndef _MSCC_FELIX_H
+#define _MSCC_FELIX_H
+
+#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+
+/* Platform-specific information */
+struct felix_info {
+ struct resource *target_io_res;
+ struct resource *port_io_res;
+ const struct reg_field *regfields;
+ const u32 *const *map;
+ const struct ocelot_ops *ops;
+ int shared_queue_sz;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+ int num_ports;
+ int pci_bar;
+};
+
+extern struct felix_info felix_info_vsc9959;
+
+enum felix_instance {
+ FELIX_INSTANCE_VSC9959 = 0,
+};
+
+/* DSA glue / front-end for struct ocelot */
+struct felix {
+ struct dsa_switch *ds;
+ struct pci_dev *pdev;
+ struct felix_info *info;
+ struct ocelot ocelot;
+};
+
+#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
new file mode 100644
index 000000000000..b9758b0d18c7
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2017 Microsemi Corporation
+ * Copyright 2018-2019 NXP Semiconductors
+ */
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include "felix.h"
+
+static const u32 vsc9959_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x0089a0),
+ REG(ANA_VLANMASK, 0x0089a4),
+ REG_RESERVED(ANA_PORT_B_DOMAIN),
+ REG(ANA_ANAGEFIL, 0x0089ac),
+ REG(ANA_ANEVENTS, 0x0089b0),
+ REG(ANA_STORMLIMIT_BURST, 0x0089b4),
+ REG(ANA_STORMLIMIT_CFG, 0x0089b8),
+ REG(ANA_ISOLATED_PORTS, 0x0089c8),
+ REG(ANA_COMMUNITY_PORTS, 0x0089cc),
+ REG(ANA_AUTOAGE, 0x0089d0),
+ REG(ANA_MACTOPTIONS, 0x0089d4),
+ REG(ANA_LEARNDISC, 0x0089d8),
+ REG(ANA_AGENCTRL, 0x0089dc),
+ REG(ANA_MIRRORPORTS, 0x0089e0),
+ REG(ANA_EMIRRORPORTS, 0x0089e4),
+ REG(ANA_FLOODING, 0x0089e8),
+ REG(ANA_FLOODING_IPMC, 0x008a08),
+ REG(ANA_SFLOW_CFG, 0x008a0c),
+ REG(ANA_PORT_MODE, 0x008a28),
+ REG(ANA_CUT_THRU_CFG, 0x008a48),
+ REG(ANA_PGID_PGID, 0x008400),
+ REG(ANA_TABLES_ANMOVED, 0x007f1c),
+ REG(ANA_TABLES_MACHDATA, 0x007f20),
+ REG(ANA_TABLES_MACLDATA, 0x007f24),
+ REG(ANA_TABLES_STREAMDATA, 0x007f28),
+ REG(ANA_TABLES_MACACCESS, 0x007f2c),
+ REG(ANA_TABLES_MACTINDX, 0x007f30),
+ REG(ANA_TABLES_VLANACCESS, 0x007f34),
+ REG(ANA_TABLES_VLANTIDX, 0x007f38),
+ REG(ANA_TABLES_ISDXACCESS, 0x007f3c),
+ REG(ANA_TABLES_ISDXTIDX, 0x007f40),
+ REG(ANA_TABLES_ENTRYLIM, 0x007f00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x007f48),
+ REG(ANA_TABLES_STREAMACCESS, 0x007f4c),
+ REG(ANA_TABLES_STREAMTIDX, 0x007f50),
+ REG(ANA_TABLES_SEQ_HISTORY, 0x007f54),
+ REG(ANA_TABLES_SEQ_MASK, 0x007f58),
+ REG(ANA_TABLES_SFID_MASK, 0x007f5c),
+ REG(ANA_TABLES_SFIDACCESS, 0x007f60),
+ REG(ANA_TABLES_SFIDTIDX, 0x007f64),
+ REG(ANA_MSTI_STATE, 0x008600),
+ REG(ANA_OAM_UPM_LM_CNT, 0x008000),
+ REG(ANA_SG_ACCESS_CTRL, 0x008a64),
+ REG(ANA_SG_CONFIG_REG_1, 0x007fb0),
+ REG(ANA_SG_CONFIG_REG_2, 0x007fb4),
+ REG(ANA_SG_CONFIG_REG_3, 0x007fb8),
+ REG(ANA_SG_CONFIG_REG_4, 0x007fbc),
+ REG(ANA_SG_CONFIG_REG_5, 0x007fc0),
+ REG(ANA_SG_GCL_GS_CONFIG, 0x007f80),
+ REG(ANA_SG_GCL_TI_CONFIG, 0x007f90),
+ REG(ANA_SG_STATUS_REG_1, 0x008980),
+ REG(ANA_SG_STATUS_REG_2, 0x008984),
+ REG(ANA_SG_STATUS_REG_3, 0x008988),
+ REG(ANA_PORT_VLAN_CFG, 0x007800),
+ REG(ANA_PORT_DROP_CFG, 0x007804),
+ REG(ANA_PORT_QOS_CFG, 0x007808),
+ REG(ANA_PORT_VCAP_CFG, 0x00780c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00781c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007820),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007860),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c),
+ REG(ANA_PORT_PORT_CFG, 0x007870),
+ REG(ANA_PORT_POL_CFG, 0x007874),
+ REG(ANA_PORT_PTP_CFG, 0x007878),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007880),
+ REG(ANA_PORT_SFID_CFG, 0x007884),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG_RESERVED(ANA_PFC_PFC_TIMER),
+ REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
+ REG_RESERVED(ANA_IPT_IPT),
+ REG_RESERVED(ANA_PPT_PPT),
+ REG_RESERVED(ANA_FID_MAP_FID_MAP),
+ REG(ANA_AGGR_CFG, 0x008a68),
+ REG(ANA_CPUQ_CFG, 0x008a6c),
+ REG_RESERVED(ANA_CPUQ_CFG2),
+ REG(ANA_CPUQ_8021_CFG, 0x008a74),
+ REG(ANA_DSCP_CFG, 0x008ab4),
+ REG(ANA_DSCP_REWR_CFG, 0x008bb4),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14),
+ REG_RESERVED(ANA_VRAP_CFG),
+ REG_RESERVED(ANA_VRAP_HDR_DATA),
+ REG_RESERVED(ANA_VRAP_HDR_MASK),
+ REG(ANA_DISCARD_CFG, 0x008c40),
+ REG(ANA_FID_CFG, 0x008c44),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG_RESERVED(ANA_POL_STATE),
+ REG(ANA_POL_FLOWC, 0x008c48),
+ REG(ANA_POL_HYST, 0x008cb4),
+ REG_RESERVED(ANA_POL_MISC_CFG),
+};
+
+static const u32 vsc9959_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG_RESERVED(QS_INH_DBG),
+};
+
+static const u32 vsc9959_s2_regmap[] = {
+ REG(S2_CORE_UPDATE_CTRL, 0x000000),
+ REG(S2_CORE_MV_CFG, 0x000004),
+ REG(S2_CACHE_ENTRY_DAT, 0x000008),
+ REG(S2_CACHE_MASK_DAT, 0x000108),
+ REG(S2_CACHE_ACTION_DAT, 0x000208),
+ REG(S2_CACHE_CNT_DAT, 0x000308),
+ REG(S2_CACHE_TG_DAT, 0x000388),
+};
+
+static const u32 vsc9959_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x00f460),
+ REG(QSYS_SWITCH_PORT_MODE, 0x00f480),
+ REG(QSYS_STAT_CNT_CFG, 0x00f49c),
+ REG(QSYS_EEE_CFG, 0x00f4a0),
+ REG(QSYS_EEE_THRES, 0x00f4b8),
+ REG(QSYS_IGR_NO_SHARING, 0x00f4bc),
+ REG(QSYS_EGR_NO_SHARING, 0x00f4c0),
+ REG(QSYS_SW_STATUS, 0x00f4c4),
+ REG(QSYS_EXT_CPU_CFG, 0x00f4e0),
+ REG_RESERVED(QSYS_PAD_CFG),
+ REG(QSYS_CPU_GROUP_MAP, 0x00f4e8),
+ REG_RESERVED(QSYS_QMAP),
+ REG_RESERVED(QSYS_ISDX_SGRP),
+ REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
+ REG(QSYS_TFRM_MISC, 0x00f50c),
+ REG(QSYS_TFRM_PORT_DLY, 0x00f510),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530),
+ REG(QSYS_RED_PROFILE, 0x00f534),
+ REG(QSYS_RES_QOS_MODE, 0x00f574),
+ REG(QSYS_RES_CFG, 0x00c000),
+ REG(QSYS_RES_STAT, 0x00c004),
+ REG(QSYS_EGR_DROP_MODE, 0x00f578),
+ REG(QSYS_EQ_CTRL, 0x00f57c),
+ REG_RESERVED(QSYS_EVENTS_CORE),
+ REG(QSYS_QMAXSDU_CFG_0, 0x00f584),
+ REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0),
+ REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc),
+ REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8),
+ REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4),
+ REG(QSYS_QMAXSDU_CFG_5, 0x00f610),
+ REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
+ REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
+ REG(QSYS_PREEMPTION_CFG, 0x00f664),
+ REG_RESERVED(QSYS_CIR_CFG),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG_RESERVED(QSYS_SE_CONNECT),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG_RESERVED(QSYS_SE_STATE),
+ REG(QSYS_HSCH_MISC_CFG, 0x00f67c),
+ REG(QSYS_TAG_CONFIG, 0x00f680),
+ REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698),
+ REG(QSYS_PORT_MAX_SDU, 0x00f69c),
+ REG(QSYS_PARAM_CFG_REG_1, 0x00f440),
+ REG(QSYS_PARAM_CFG_REG_2, 0x00f444),
+ REG(QSYS_PARAM_CFG_REG_3, 0x00f448),
+ REG(QSYS_PARAM_CFG_REG_4, 0x00f44c),
+ REG(QSYS_PARAM_CFG_REG_5, 0x00f450),
+ REG(QSYS_GCL_CFG_REG_1, 0x00f454),
+ REG(QSYS_GCL_CFG_REG_2, 0x00f458),
+ REG(QSYS_PARAM_STATUS_REG_1, 0x00f400),
+ REG(QSYS_PARAM_STATUS_REG_2, 0x00f404),
+ REG(QSYS_PARAM_STATUS_REG_3, 0x00f408),
+ REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c),
+ REG(QSYS_PARAM_STATUS_REG_5, 0x00f410),
+ REG(QSYS_PARAM_STATUS_REG_6, 0x00f414),
+ REG(QSYS_PARAM_STATUS_REG_7, 0x00f418),
+ REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c),
+ REG(QSYS_PARAM_STATUS_REG_9, 0x00f420),
+ REG(QSYS_GCL_STATUS_REG_1, 0x00f424),
+ REG(QSYS_GCL_STATUS_REG_2, 0x00f428),
+};
+
+static const u32 vsc9959_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_RED_TAG_CFG, 0x000058),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000410),
+ REG(REW_DSCP_REMAP_CFG, 0x000510),
+ REG_RESERVED(REW_STAT_CFG),
+ REG_RESERVED(REW_REW_STICKY),
+ REG_RESERVED(REW_PPT),
+};
+
+static const u32 vsc9959_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_COLLISION, 0x000210),
+ REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_64, 0x00021c),
+ REG(SYS_COUNT_TX_65_127, 0x000220),
+ REG(SYS_COUNT_TX_128_511, 0x000224),
+ REG(SYS_COUNT_TX_512_1023, 0x000228),
+ REG(SYS_COUNT_TX_1024_1526, 0x00022c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_RESET_CFG, 0x000e00),
+ REG(SYS_SR_ETYPE_CFG, 0x000e04),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
+ REG(SYS_PORT_MODE, 0x000e0c),
+ REG(SYS_FRONT_PORT_MODE, 0x000e2c),
+ REG(SYS_FRM_AGING, 0x000e44),
+ REG(SYS_STAT_CFG, 0x000e48),
+ REG(SYS_SW_STATUS, 0x000e4c),
+ REG_RESERVED(SYS_MISC_CFG),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c),
+ REG(SYS_REW_MAC_LOW_CFG, 0x000e84),
+ REG(SYS_TIMESTAMP_OFFSET, 0x000e9c),
+ REG(SYS_PAUSE_CFG, 0x000ea0),
+ REG(SYS_PAUSE_TOT_CFG, 0x000ebc),
+ REG(SYS_ATOP, 0x000ec0),
+ REG(SYS_ATOP_TOT_CFG, 0x000edc),
+ REG(SYS_MAC_FC_CFG, 0x000ee0),
+ REG(SYS_MMGT, 0x000ef8),
+ REG_RESERVED(SYS_MMGT_FAST),
+ REG_RESERVED(SYS_EVENTS_DIF),
+ REG_RESERVED(SYS_EVENTS_CORE),
+ REG_RESERVED(SYS_CNT),
+ REG(SYS_PTP_STATUS, 0x000f14),
+ REG(SYS_PTP_TXSTAMP, 0x000f18),
+ REG(SYS_PTP_NXT, 0x000f1c),
+ REG(SYS_PTP_CFG, 0x000f20),
+ REG(SYS_RAM_INIT, 0x000f24),
+ REG_RESERVED(SYS_CM_ADDR),
+ REG_RESERVED(SYS_CM_DATA_WR),
+ REG_RESERVED(SYS_CM_DATA_RD),
+ REG_RESERVED(SYS_CM_OP),
+ REG_RESERVED(SYS_CM_DATA),
+};
+
+static const u32 vsc9959_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
+static const u32 vsc9959_gcb_regmap[] = {
+ REG(GCB_SOFT_RST, 0x000004),
+};
+
+static const u32 *vsc9959_regmap[] = {
+ [ANA] = vsc9959_ana_regmap,
+ [QS] = vsc9959_qs_regmap,
+ [QSYS] = vsc9959_qsys_regmap,
+ [REW] = vsc9959_rew_regmap,
+ [SYS] = vsc9959_sys_regmap,
+ [S2] = vsc9959_s2_regmap,
+ [PTP] = vsc9959_ptp_regmap,
+ [GCB] = vsc9959_gcb_regmap,
+};
+
+/* Addresses are relative to the PCI device's base address and
+ * will be fixed up at ioremap time.
+ */
+static struct resource vsc9959_target_io_res[] = {
+ [ANA] = {
+ .start = 0x0280000,
+ .end = 0x028ffff,
+ .name = "ana",
+ },
+ [QS] = {
+ .start = 0x0080000,
+ .end = 0x00800ff,
+ .name = "qs",
+ },
+ [QSYS] = {
+ .start = 0x0200000,
+ .end = 0x021ffff,
+ .name = "qsys",
+ },
+ [REW] = {
+ .start = 0x0030000,
+ .end = 0x003ffff,
+ .name = "rew",
+ },
+ [SYS] = {
+ .start = 0x0010000,
+ .end = 0x001ffff,
+ .name = "sys",
+ },
+ [S2] = {
+ .start = 0x0060000,
+ .end = 0x00603ff,
+ .name = "s2",
+ },
+ [PTP] = {
+ .start = 0x0090000,
+ .end = 0x00900cb,
+ .name = "ptp",
+ },
+ [GCB] = {
+ .start = 0x0070000,
+ .end = 0x00701ff,
+ .name = "devcpu_gcb",
+ },
+};
+
+static struct resource vsc9959_port_io_res[] = {
+ {
+ .start = 0x0100000,
+ .end = 0x010ffff,
+ .name = "port0",
+ },
+ {
+ .start = 0x0110000,
+ .end = 0x011ffff,
+ .name = "port1",
+ },
+ {
+ .start = 0x0120000,
+ .end = 0x012ffff,
+ .name = "port2",
+ },
+ {
+ .start = 0x0130000,
+ .end = 0x013ffff,
+ .name = "port3",
+ },
+ {
+ .start = 0x0140000,
+ .end = 0x014ffff,
+ .name = "port4",
+ },
+ {
+ .start = 0x0150000,
+ .end = 0x015ffff,
+ .name = "port5",
+ },
+};
+
+static const struct reg_field vsc9959_regfields[] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
+ [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+};
+
+static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
+ { .offset = 0x00, .name = "rx_octets", },
+ { .offset = 0x01, .name = "rx_unicast", },
+ { .offset = 0x02, .name = "rx_multicast", },
+ { .offset = 0x03, .name = "rx_broadcast", },
+ { .offset = 0x04, .name = "rx_shorts", },
+ { .offset = 0x05, .name = "rx_fragments", },
+ { .offset = 0x06, .name = "rx_jabbers", },
+ { .offset = 0x07, .name = "rx_crc_align_errs", },
+ { .offset = 0x08, .name = "rx_sym_errs", },
+ { .offset = 0x09, .name = "rx_frames_below_65_octets", },
+ { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
+ { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
+ { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
+ { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
+ { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
+ { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
+ { .offset = 0x10, .name = "rx_pause", },
+ { .offset = 0x11, .name = "rx_control", },
+ { .offset = 0x12, .name = "rx_longs", },
+ { .offset = 0x13, .name = "rx_classified_drops", },
+ { .offset = 0x14, .name = "rx_red_prio_0", },
+ { .offset = 0x15, .name = "rx_red_prio_1", },
+ { .offset = 0x16, .name = "rx_red_prio_2", },
+ { .offset = 0x17, .name = "rx_red_prio_3", },
+ { .offset = 0x18, .name = "rx_red_prio_4", },
+ { .offset = 0x19, .name = "rx_red_prio_5", },
+ { .offset = 0x1A, .name = "rx_red_prio_6", },
+ { .offset = 0x1B, .name = "rx_red_prio_7", },
+ { .offset = 0x1C, .name = "rx_yellow_prio_0", },
+ { .offset = 0x1D, .name = "rx_yellow_prio_1", },
+ { .offset = 0x1E, .name = "rx_yellow_prio_2", },
+ { .offset = 0x1F, .name = "rx_yellow_prio_3", },
+ { .offset = 0x20, .name = "rx_yellow_prio_4", },
+ { .offset = 0x21, .name = "rx_yellow_prio_5", },
+ { .offset = 0x22, .name = "rx_yellow_prio_6", },
+ { .offset = 0x23, .name = "rx_yellow_prio_7", },
+ { .offset = 0x24, .name = "rx_green_prio_0", },
+ { .offset = 0x25, .name = "rx_green_prio_1", },
+ { .offset = 0x26, .name = "rx_green_prio_2", },
+ { .offset = 0x27, .name = "rx_green_prio_3", },
+ { .offset = 0x28, .name = "rx_green_prio_4", },
+ { .offset = 0x29, .name = "rx_green_prio_5", },
+ { .offset = 0x2A, .name = "rx_green_prio_6", },
+ { .offset = 0x2B, .name = "rx_green_prio_7", },
+ { .offset = 0x80, .name = "tx_octets", },
+ { .offset = 0x81, .name = "tx_unicast", },
+ { .offset = 0x82, .name = "tx_multicast", },
+ { .offset = 0x83, .name = "tx_broadcast", },
+ { .offset = 0x84, .name = "tx_collision", },
+ { .offset = 0x85, .name = "tx_drops", },
+ { .offset = 0x86, .name = "tx_pause", },
+ { .offset = 0x87, .name = "tx_frames_below_65_octets", },
+ { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
+ { .offset = 0x89, .name = "tx_frames_128_255_octets", },
+ { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
+ { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
+ { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
+ { .offset = 0x8E, .name = "tx_yellow_prio_0", },
+ { .offset = 0x8F, .name = "tx_yellow_prio_1", },
+ { .offset = 0x90, .name = "tx_yellow_prio_2", },
+ { .offset = 0x91, .name = "tx_yellow_prio_3", },
+ { .offset = 0x92, .name = "tx_yellow_prio_4", },
+ { .offset = 0x93, .name = "tx_yellow_prio_5", },
+ { .offset = 0x94, .name = "tx_yellow_prio_6", },
+ { .offset = 0x95, .name = "tx_yellow_prio_7", },
+ { .offset = 0x96, .name = "tx_green_prio_0", },
+ { .offset = 0x97, .name = "tx_green_prio_1", },
+ { .offset = 0x98, .name = "tx_green_prio_2", },
+ { .offset = 0x99, .name = "tx_green_prio_3", },
+ { .offset = 0x9A, .name = "tx_green_prio_4", },
+ { .offset = 0x9B, .name = "tx_green_prio_5", },
+ { .offset = 0x9C, .name = "tx_green_prio_6", },
+ { .offset = 0x9D, .name = "tx_green_prio_7", },
+ { .offset = 0x9E, .name = "tx_aged", },
+ { .offset = 0x100, .name = "drop_local", },
+ { .offset = 0x101, .name = "drop_tail", },
+ { .offset = 0x102, .name = "drop_yellow_prio_0", },
+ { .offset = 0x103, .name = "drop_yellow_prio_1", },
+ { .offset = 0x104, .name = "drop_yellow_prio_2", },
+ { .offset = 0x105, .name = "drop_yellow_prio_3", },
+ { .offset = 0x106, .name = "drop_yellow_prio_4", },
+ { .offset = 0x107, .name = "drop_yellow_prio_5", },
+ { .offset = 0x108, .name = "drop_yellow_prio_6", },
+ { .offset = 0x109, .name = "drop_yellow_prio_7", },
+ { .offset = 0x10A, .name = "drop_green_prio_0", },
+ { .offset = 0x10B, .name = "drop_green_prio_1", },
+ { .offset = 0x10C, .name = "drop_green_prio_2", },
+ { .offset = 0x10D, .name = "drop_green_prio_3", },
+ { .offset = 0x10E, .name = "drop_green_prio_4", },
+ { .offset = 0x10F, .name = "drop_green_prio_5", },
+ { .offset = 0x110, .name = "drop_green_prio_6", },
+ { .offset = 0x111, .name = "drop_green_prio_7", },
+};
+
+#define VSC9959_INIT_TIMEOUT 50000
+#define VSC9959_GCB_RST_SLEEP 100
+#define VSC9959_SYS_RAMINIT_SLEEP 80
+
+static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
+{
+ int val;
+
+ regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val);
+
+ return val;
+}
+
+static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, SYS_RAM_INIT);
+}
+
+static int vsc9959_reset(struct ocelot *ocelot)
+{
+ int val, err;
+
+ /* soft-reset the switch core */
+ regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1);
+
+ err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
+ VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch core reset\n");
+ return err;
+ }
+
+ /* initialize switch mem ~40us */
+ ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT);
+ err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val,
+ VSC9959_SYS_RAMINIT_SLEEP,
+ VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch sram init\n");
+ return err;
+ }
+
+ /* enable switch core */
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ return 0;
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+};
+
+struct felix_info felix_info_vsc9959 = {
+ .target_io_res = vsc9959_target_io_res,
+ .port_io_res = vsc9959_port_io_res,
+ .regfields = vsc9959_regfields,
+ .map = vsc9959_regmap,
+ .ops = &vsc9959_ops,
+ .stats_layout = vsc9959_stats_layout,
+ .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .shared_queue_sz = 128 * 1024,
+ .num_ports = 6,
+ .pci_bar = 4,
+};
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index b00274caae4f..e548289df31e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -639,7 +639,8 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i, phy_mode = -1;
+ phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA;
+ int ret, i;
u32 mask;
/* Make sure that port 0 is the cpu port */
@@ -661,10 +662,10 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode);
+ if (ret) {
pr_err("Can't find phy-mode for master device\n");
- return phy_mode;
+ return ret;
}
ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
if (ret < 0)
@@ -1077,10 +1078,13 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
- priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
+ QCA8K_NUM_PORTS);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index dc0509c02d29..fae188c60191 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -444,9 +444,12 @@ static int realtek_smi_probe(struct platform_device *pdev)
return ret;
}
- smi->ds = dsa_switch_alloc(dev, smi->num_ports);
+ smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
if (!smi->ds)
return -ENOMEM;
+
+ smi->ds->dev = dev;
+ smi->ds->num_ports = smi->num_ports;
smi->ds->priv = smi;
smi->ds->ops = var->ds_ops;
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index ffac0ea4e8d5..0fe1ae173aa1 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -28,6 +28,7 @@ config NET_DSA_SJA1105_TAS
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
+ depends on NET_DSA_SJA1105_PTP
help
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index fbb564c3beb8..d801fc204d19 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,7 +20,13 @@
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
#include "sja1105_tas.h"
+#include "sja1105_ptp.h"
/* Keeps the different addresses between E/T and P/Q/R/S */
struct sja1105_regs {
@@ -32,9 +38,10 @@ struct sja1105_regs {
u64 config;
u64 rmii_pll1;
u64 ptp_control;
- u64 ptpclk;
+ u64 ptpclkval;
u64 ptpclkrate;
- u64 ptptsclk;
+ u64 ptpclkcorp;
+ u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
u64 pad_mii_id[SJA1105_NUM_PORTS];
@@ -71,14 +78,15 @@ struct sja1105_info {
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
- int (*ptp_cmd)(const void *ctx, const void *data);
- int (*reset_cmd)(const void *ctx, const void *data);
+ int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
+ void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
const char *name;
};
@@ -91,26 +99,16 @@ struct sja1105_private {
struct spi_device *spidev;
struct dsa_switch *ds;
struct sja1105_port ports[SJA1105_NUM_PORTS];
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *clock;
- /* The cycle counter translates the PTP timestamps (based on
- * a free-running counter) into a software time domain.
- */
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
- struct delayed_work refresh_work;
- /* Serializes all operations on the cycle counter */
- struct mutex ptp_lock;
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
struct sja1105_tagger_data tagger_data;
+ struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
#include "sja1105_dynamic_config.h"
-#include "sja1105_ptp.h"
struct sja1105_spi_message {
u64 access;
@@ -118,24 +116,27 @@ struct sja1105_spi_message {
u64 address;
};
-typedef enum {
- SPI_READ = 0,
- SPI_WRITE = 1,
-} sja1105_spi_rw_mode_t;
-
/* From sja1105_main.c */
-int sja1105_static_config_reload(struct sja1105_private *priv);
+enum sja1105_reset_reason {
+ SJA1105_VLAN_FILTERING = 0,
+ SJA1105_RX_HWTSTAMPING,
+ SJA1105_AGEING_TIME,
+ SJA1105_SCHEDULING,
+};
+
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason);
/* From sja1105_spi.c */
-int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- void *packed_buf, size_t size_bytes);
-int sja1105_spi_send_int(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u64 *value, u64 size_bytes);
-int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 base_addr,
- void *packed_buf, u64 buf_len);
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len);
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts);
int sja1105_static_config_upload(struct sja1105_private *priv);
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 608126a15d72..9082e52b55e9 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -118,9 +118,8 @@ static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
idiv.pd = enabled ? 0 : 1; /* Power down? */
sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->cgu_idiv[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
@@ -167,9 +166,8 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
mii_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_tx_clk[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -192,9 +190,8 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
mii_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_rx_clk[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -217,9 +214,8 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_ext_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -242,9 +238,8 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_ext_rx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
@@ -337,9 +332,8 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
txc.pd = 0;
sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rgmii_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
/* AGU */
@@ -383,9 +377,8 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_tx[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
@@ -405,7 +398,7 @@ sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
}
/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static inline u64 sja1105_rgmii_delay(u64 phase)
+static u64 sja1105_rgmii_delay(u64 phase)
{
/* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
* To avoid floating point operations we'll multiply by 10
@@ -442,9 +435,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
pad_mii_id.txc_pd = 1;
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_id[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
if (rc < 0)
return rc;
@@ -459,9 +451,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
}
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_id[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
@@ -547,9 +538,8 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
ref_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rmii_ref_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -565,9 +555,8 @@ sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rmii_ext_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
@@ -595,8 +584,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
pll.pd = 0x1;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to configure PLL1 for 50MHz\n");
return rc;
@@ -606,8 +595,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
pll.pd = 0x0;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to enable PLL1\n");
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 91da430045ff..25381bd65ed7 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -686,8 +686,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
@@ -698,8 +698,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
memset(packed_buf, 0, ops->packed_size);
/* Retrieve the read operation's result */
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
@@ -771,8 +771,8 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index ab581a28cd41..064301cc7d5b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -167,8 +167,8 @@ static int sja1105_port_status_get_mac(struct sja1105_private *priv,
int rc;
/* MAC area */
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port],
- packed_buf, SJA1105_SIZE_MAC_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac[port], packed_buf,
+ SJA1105_SIZE_MAC_AREA);
if (rc < 0)
return rc;
@@ -185,8 +185,8 @@ static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port],
- packed_buf, SJA1105_SIZE_HL1_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl1[port], packed_buf,
+ SJA1105_SIZE_HL1_AREA);
if (rc < 0)
return rc;
@@ -203,8 +203,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port],
- packed_buf, SJA1105_SIZE_HL2_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl2[port], packed_buf,
+ SJA1105_SIZE_HL2_AREA);
if (rc < 0)
return rc;
@@ -215,8 +215,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
priv->info->device_id == SJA1105T_DEVICE_ID)
return 0;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port],
- packed_buf, SJA1105_SIZE_QLEVEL_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->qlevel[port], packed_buf,
+ SJA1105_SIZE_QLEVEL_AREA);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 7687ddcae159..a51ac088c0bc 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -382,8 +382,8 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
- /* Disallow dynamic changing of the mirror port */
- .mirr_ptacu = 0,
+ /* Allow dynamic changing of the mirror port */
+ .mirr_ptacu = true,
.switchid = priv->ds->index,
/* Priority queue for link-local management frames
* (both ingress to and egress from CPU - PTP, STP etc)
@@ -403,8 +403,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
* by installing a temporary 'management route'
*/
.host_port = dsa_upstream_port(priv->ds, 0),
- /* Same as host port */
- .mirr_port = dsa_upstream_port(priv->ds, 0),
+ /* Default to an invalid value */
+ .mirr_port = SJA1105_NUM_PORTS,
/* Link-local traffic received on casc_port will be forwarded
* to host_port without embedding the source port and device ID
* info in the destination MAC address (presumably because it
@@ -458,9 +458,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
-static inline void
-sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
- int index)
+static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
+ int index)
{
policing[index].sharindx = index;
policing[index].smax = 65535; /* Burst size in bytes */
@@ -507,39 +506,6 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0;
}
-static int sja1105_init_avb_params(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_avb_params_entry *avb;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
-
- /* Discard previous AVB Parameters Table */
- if (table->entry_count) {
- kfree(table->entries);
- table->entry_count = 0;
- }
-
- /* Configure the reception of meta frames only if requested */
- if (!on)
- return 0;
-
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
-
- avb = table->entries;
-
- avb->destmeta = SJA1105_META_DMAC;
- avb->srcmeta = SJA1105_META_SMAC;
-
- return 0;
-}
-
static int sja1105_static_config_load(struct sja1105_private *priv,
struct sja1105_dt_port *ports)
{
@@ -580,9 +546,6 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
- rc = sja1105_init_avb_params(priv, false);
- if (rc < 0)
- return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
@@ -594,15 +557,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
int i;
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (ports->role == XMII_MAC)
+ if (ports[i].role == XMII_MAC)
continue;
- if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
- ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
priv->rgmii_rx_delay[i] = true;
- if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
- ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
priv->rgmii_tx_delay[i] = true;
if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
@@ -621,8 +584,9 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
for_each_child_of_node(ports_node, child) {
struct device_node *phy_node;
- int phy_mode;
+ phy_interface_t phy_mode;
u32 index;
+ int err;
/* Get switch port number from DT */
if (of_property_read_u32(child, "reg", &index) < 0) {
@@ -633,8 +597,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
/* Get PHY mode from DT */
- phy_mode = of_get_phy_mode(child);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
@@ -951,7 +915,7 @@ sja1105_static_fdb_change(struct sja1105_private *priv, int port,
* For the placement of a newly learnt FDB entry, the switch selects the bin
* based on a hash function, and the way within that bin incrementally.
*/
-static inline int sja1105et_fdb_index(int bin, int way)
+static int sja1105et_fdb_index(int bin, int way)
{
return bin * SJA1105ET_FDB_BIN_SIZE + way;
}
@@ -1095,7 +1059,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1158,7 +1122,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1204,7 +1168,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_add_cmd(ds, port, addr, vid);
@@ -1215,7 +1179,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
@@ -1254,7 +1218,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
@@ -1377,17 +1341,33 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
sja1105_bridge_member(ds, port, br, false);
}
+static const char * const sja1105_reset_reasons[] = {
+ [SJA1105_VLAN_FILTERING] = "VLAN filtering",
+ [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
+ [SJA1105_AGEING_TIME] = "Ageing time",
+ [SJA1105_SCHEDULING] = "Time-aware scheduling",
+};
+
/* For situations where we need to change a setting at runtime that is only
* available through the static configuration, resetting the switch in order
* to upload the new static config is unavoidable. Back up the settings we
* modify at runtime (currently only MAC) and restore them after uploading,
* such that this operation is relatively seamless.
*/
-int sja1105_static_config_reload(struct sja1105_private *priv)
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason)
{
+ struct ptp_system_timestamp ptp_sts_before;
+ struct ptp_system_timestamp ptp_sts_after;
struct sja1105_mac_config_entry *mac;
int speed_mbps[SJA1105_NUM_PORTS];
+ struct dsa_switch *ds = priv->ds;
+ s64 t1, t2, t3, t4;
+ s64 t12, t34;
int rc, i;
+ s64 now;
+
+ mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@@ -1401,10 +1381,41 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
mac[i].speed = SJA1105_SPEED_AUTO;
}
+ /* No PTP operations can run right now */
+ mutex_lock(&priv->ptp_data.lock);
+
+ rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
/* Reset switch and send updated static configuration */
rc = sja1105_static_config_upload(priv);
if (rc < 0)
- goto out;
+ goto out_unlock_ptp;
+
+ rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
+ if (rc < 0)
+ goto out_unlock_ptp;
+
+ t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
+ t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
+ t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
+ t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
+ /* Mid point, corresponds to pre-reset PTPCLKVAL */
+ t12 = t1 + (t2 - t1) / 2;
+ /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
+ t34 = t3 + (t4 - t3) / 2;
+ /* Advance PTPCLKVAL by the time it took since its readout */
+ now += (t34 - t12);
+
+ __sja1105_ptp_adjtime(ds, now);
+
+out_unlock_ptp:
+ mutex_unlock(&priv->ptp_data.lock);
+
+ dev_info(priv->ds->dev,
+ "Reset switch and programmed static config. Reason: %s\n",
+ sja1105_reset_reasons[reason]);
/* Configure the CGU (PLLs) for MII and RMII PHYs.
* For these interfaces there is no dynamic configuration
@@ -1420,6 +1431,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
goto out;
}
out:
+ mutex_unlock(&priv->mgmt_lock);
+
return rc;
}
@@ -1598,7 +1611,7 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
l2_lookup_params = table->entries;
l2_lookup_params->shared_learn = !enabled;
- rc = sja1105_static_config_reload(priv);
+ rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1687,7 +1700,7 @@ static int sja1105_setup(struct dsa_switch *ds)
return rc;
}
- rc = sja1105_ptp_clock_register(priv);
+ rc = sja1105_ptp_clock_register(ds);
if (rc < 0) {
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
return rc;
@@ -1729,9 +1742,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
struct sja1105_private *priv = ds->priv;
sja1105_tas_teardown(ds);
- cancel_work_sync(&priv->tagger_data.rxtstamp_work);
- skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
- sja1105_ptp_clock_unregister(priv);
+ sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
}
@@ -1743,7 +1754,7 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
- slave = ds->ports[port].slave;
+ slave = dsa_to_port(ds, port)->slave;
slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1775,7 +1786,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
}
/* Transfer skb to the host port. */
- dsa_enqueue_skb(skb, ds->ports[port].slave);
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
/* Wait until the switch has processed the frame */
do {
@@ -1817,11 +1828,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
- struct skb_shared_hwtstamps shwt = {0};
int slot = sp->mgmt_slot;
struct sk_buff *clone;
- u64 now, ts;
- int rc;
/* The tragic fact about the switch having 4x2 slots for installing
* management routes is that all of them except one are actually
@@ -1847,27 +1855,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
if (!clone)
goto out;
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
-
- mutex_lock(&priv->ptp_lock);
+ sja1105_ptp_txtstamp_skb(ds, slot, clone);
- now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
- rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
- if (rc < 0) {
- dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
- kfree_skb(clone);
- goto out_unlock_ptp;
- }
-
- ts = sja1105_tstamp_reconstruct(priv, now, ts);
- ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
-
- shwt.hwtstamp = ns_to_ktime(ts);
- skb_complete_tx_timestamp(clone, &shwt);
-
-out_unlock_ptp:
- mutex_unlock(&priv->ptp_lock);
out:
mutex_unlock(&priv->mgmt_lock);
return NETDEV_TX_OK;
@@ -1894,183 +1883,97 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
l2_lookup_params->maxage = maxage;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
}
-/* Must be called only with priv->tagger_data.state bit
- * SJA1105_HWTS_RX_EN cleared
+static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return sja1105_setup_tc_taprio(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* We have a single mirror (@to) port, but can configure ingress and egress
+ * mirroring on all other (@from) ports.
+ * We need to allow mirroring rules only as long as the @to port is always the
+ * same, and we need to unset the @to port from mirr_port only when there is no
+ * mirroring rule that references it.
*/
-static int sja1105_change_rxtstamping(struct sja1105_private *priv,
- bool on)
+static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ bool ingress, bool enabled)
{
struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
- general_params->send_meta1 = on;
- general_params->send_meta0 = on;
- rc = sja1105_init_avb_params(priv, on);
- if (rc < 0)
- return rc;
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- /* Initialize the meta state machine to a known state */
- if (priv->tagger_data.stampable_skb) {
- kfree_skb(priv->tagger_data.stampable_skb);
- priv->tagger_data.stampable_skb = NULL;
+ already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+ general_params->mirr_port);
+ return -EBUSY;
}
- return sja1105_static_config_reload(priv);
-}
+ new_mirr_port = to;
+ if (!enabled) {
+ bool keep = false;
+ int port;
-static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
-{
- struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- bool rx_on;
- int rc;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- priv->ports[port].hwts_tx_en = false;
- break;
- case HWTSTAMP_TX_ON:
- priv->ports[port].hwts_tx_en = true;
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- rx_on = false;
- break;
- default:
- rx_on = true;
- break;
+ /* Anybody still referencing mirr_port? */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+ }
+ }
+ /* Unset already_enabled for next time */
+ if (!keep)
+ new_mirr_port = SJA1105_NUM_PORTS;
}
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
- if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
- clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
-
- rc = sja1105_change_rxtstamping(priv, rx_on);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to change RX timestamping: %d\n", rc);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
+ 0, general_params, true);
+ if (rc < 0)
return rc;
- }
- if (rx_on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
- return 0;
-}
-
-static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
-{
- struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
-
- config.flags = 0;
- if (priv->ports[port].hwts_tx_en)
- config.tx_type = HWTSTAMP_TX_ON;
+ if (ingress)
+ mac[from].ing_mirr = enabled;
else
- config.tx_type = HWTSTAMP_TX_OFF;
- if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-#define to_tagger(d) \
- container_of((d), struct sja1105_tagger_data, rxtstamp_work)
-#define to_sja1105(d) \
- container_of((d), struct sja1105_private, tagger_data)
-
-static void sja1105_rxtstamp_work(struct work_struct *work)
-{
- struct sja1105_tagger_data *data = to_tagger(work);
- struct sja1105_private *priv = to_sja1105(data);
- struct sk_buff *skb;
- u64 now;
-
- mutex_lock(&priv->ptp_lock);
-
- while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
- struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
- u64 ts;
-
- now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
- *shwt = (struct skb_shared_hwtstamps) {0};
+ mac[from].egr_mirr = enabled;
- ts = SJA1105_SKB_CB(skb)->meta_tstamp;
- ts = sja1105_tstamp_reconstruct(priv, now, ts);
- ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
-
- shwt->hwtstamp = ns_to_ktime(ts);
- netif_rx_ni(skb);
- }
-
- mutex_unlock(&priv->ptp_lock);
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
+ &mac[from], true);
}
-/* Called from dsa_skb_defer_rx_timestamp */
-static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+static int sja1105_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *data = &priv->tagger_data;
-
- if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
- return false;
-
- /* We need to read the full PTP clock to reconstruct the Rx
- * timestamp. For that we need a sleepable context.
- */
- skb_queue_tail(&data->skb_rxtstamp_queue, skb);
- schedule_work(&data->rxtstamp_work);
- return true;
+ return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ ingress, true);
}
-/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
- * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
- * callback, where we will timestamp it synchronously.
- */
-static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+static void sja1105_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!sp->hwts_tx_en)
- return false;
-
- return true;
-}
-
-static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
- enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_TAPRIO:
- return sja1105_setup_tc_taprio(ds, port, type_data);
- default:
- return -EOPNOTSUPP;
- }
+ sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ mirror->ingress, false);
}
static const struct dsa_switch_ops sja1105_switch_ops = {
@@ -2106,6 +2009,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_rxtstamp = sja1105_port_rxtstamp,
.port_txtstamp = sja1105_port_txtstamp,
.port_setup_tc = sja1105_port_setup_tc,
+ .port_mirror_add = sja1105_mirror_add,
+ .port_mirror_del = sja1105_mirror_del,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2113,23 +2018,23 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
const struct sja1105_regs *regs = priv->info->regs;
u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
struct device *dev = &priv->spidev->dev;
- u64 device_id;
+ u32 device_id;
u64 part_no;
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
- &device_id, SJA1105_SIZE_DEVICE_ID);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
+ NULL);
if (rc < 0)
return rc;
if (device_id != priv->info->device_id) {
- dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
+ dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n",
priv->info->device_id, device_id);
return -ENODEV;
}
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
- prod_id, SJA1105_SIZE_DEVICE_ID);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
+ SJA1105_SIZE_DEVICE_ID);
if (rc < 0)
return rc;
@@ -2193,32 +2098,37 @@ static int sja1105_probe(struct spi_device *spi)
dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
- ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = SJA1105_NUM_PORTS;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
tagger_data = &priv->tagger_data;
- skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
- INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
- spin_lock_init(&tagger_data->meta_lock);
+
+ mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->mgmt_lock);
+
+ sja1105_tas_setup(ds);
+
+ rc = dsa_register_switch(priv->ds);
+ if (rc)
+ return rc;
/* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
struct sja1105_port *sp = &priv->ports[i];
- ds->ports[i].priv = sp;
- sp->dp = &ds->ports[i];
+ dsa_to_port(ds, i)->priv = sp;
+ sp->dp = dsa_to_port(ds, i);
sp->data = tagger_data;
}
- mutex_init(&priv->mgmt_lock);
-
- sja1105_tas_setup(ds);
- return dsa_register_switch(priv->ds);
+ return 0;
}
static int sja1105_remove(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index d8e8dd59f3d1..54258a25031d 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
+#include <linux/spi/spi.h>
#include "sja1105.h"
/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
@@ -13,24 +14,6 @@
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
-/* Timestamps are in units of 8 ns clock ticks (equivalent to a fixed
- * 125 MHz clock) so the scale factor (MULT / SHIFT) needs to be 8.
- * Furthermore, wisely pick SHIFT as 28 bits, which translates
- * MULT into 2^31 (0x80000000). This is the same value around which
- * the hardware PTPCLKRATE is centered, so the same ppb conversion
- * arithmetic can be reused.
- */
-#define SJA1105_CC_SHIFT 28
-#define SJA1105_CC_MULT (8 << SJA1105_CC_SHIFT)
-
-/* Having 33 bits of cycle counter left until a 64-bit overflow during delta
- * conversion, we multiply this by the 8 ns counter resolution and arrive at
- * a comfortable 68.71 second refresh interval until the delta would cause
- * an integer overflow, in absence of any other readout.
- * Approximate to 1 minute.
- */
-#define SJA1105_REFRESH_INTERVAL (HZ * 60)
-
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
@@ -41,7 +24,7 @@
*
* This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
* and defines the scaling factor between scaled_ppm and the actual
- * frequency adjustments (both cycle counter and hardware).
+ * frequency adjustments of the PHC.
*
* ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
* simplifies to
@@ -49,22 +32,154 @@
*/
#define SJA1105_CC_MULT_NUM (1 << 9)
#define SJA1105_CC_MULT_DEM 15625
+#define SJA1105_CC_MULT 0x80000000
-#define ptp_to_sja1105(d) container_of((d), struct sja1105_private, ptp_caps)
-#define cc_to_sja1105(d) container_of((d), struct sja1105_private, tstamp_cc)
-#define dw_to_sja1105(d) container_of((d), struct sja1105_private, refresh_work)
-
-struct sja1105_ptp_cmd {
- u64 resptp; /* reset */
+enum sja1105_ptp_clk_mode {
+ PTP_ADD_MODE = 1,
+ PTP_SET_MODE = 0,
};
+#define ptp_caps_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, caps)
+#define ptp_data_to_sja1105(d) \
+ container_of((d), struct sja1105_private, ptp_data)
+
+static int sja1105_init_avb_params(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Configure the reception of meta frames only if requested */
+ if (!on)
+ return 0;
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+
+ return 0;
+}
+
+/* Must be called only with priv->tagger_data.state bit
+ * SJA1105_HWTS_RX_EN cleared
+ */
+static int sja1105_change_rxtstamping(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->send_meta1 = on;
+ general_params->send_meta0 = on;
+
+ rc = sja1105_init_avb_params(priv, on);
+ if (rc < 0)
+ return rc;
+
+ /* Initialize the meta state machine to a known state */
+ if (priv->tagger_data.stampable_skb) {
+ kfree_skb(priv->tagger_data.stampable_skb);
+ priv->tagger_data.stampable_skb = NULL;
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
+}
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+ bool rx_on;
+ int rc;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->ports[port].hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->ports[port].hwts_tx_en = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ rx_on = false;
+ break;
+ default:
+ rx_on = true;
+ break;
+ }
+
+ if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
+ clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+
+ rc = sja1105_change_rxtstamping(priv, rx_on);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to change RX timestamping: %d\n", rc);
+ return rc;
+ }
+ if (rx_on)
+ set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->ports[port].hwts_tx_en)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
/* Called during cleanup */
- if (!priv->clock)
+ if (!ptp_data->clock)
return -ENODEV;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
@@ -74,42 +189,58 @@ int sja1105_get_ts_info(struct dsa_switch *ds, int port,
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
- info->phc_index = ptp_clock_index(priv->clock);
+ info->phc_index = ptp_clock_index(ptp_data->clock);
return 0;
}
-int sja1105et_ptp_cmd(const void *ctx, const void *data)
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
{
- const struct sja1105_ptp_cmd *cmd = data;
- const struct sja1105_private *priv = ctx;
- const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
- u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
- sja1105_pack(buf, &valid, 31, 31, size);
- sja1105_pack(buf, &cmd->resptp, 2, 2, size);
-
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
- buf, SJA1105_SIZE_PTP_CMD);
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
}
-int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
{
- const struct sja1105_ptp_cmd *cmd = data;
- const struct sja1105_private *priv = ctx;
- const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
- u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
/* No need to keep this as part of the structure */
u64 valid = 1;
- sja1105_pack(buf, &valid, 31, 31, size);
- sja1105_pack(buf, &cmd->resptp, 3, 3, size);
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ const struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ int rc;
+
+ if (rw == SPI_WRITE)
+ priv->info->ptp_cmd_packing(buf, cmd, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
- buf, SJA1105_SIZE_PTP_CMD);
+ if (rw == SPI_READ)
+ priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
+
+ return rc;
}
/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
@@ -126,9 +257,10 @@ int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
* Must be called within one wraparound period of the partial timestamp since
* it was generated by the MAC.
*/
-u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
- u64 ts_partial)
+static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
+ u64 ts_partial)
{
+ struct sja1105_private *priv = ds->priv;
u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
u64 ts_reconstructed;
@@ -170,8 +302,9 @@ u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
* To have common code for E/T and P/Q/R/S for reading the timestamp,
* we need to juggle with the offset and the bit indices.
*/
-int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
{
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
int tstamp_bit_start, tstamp_bit_end;
int timeout = 10;
@@ -180,10 +313,8 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
int rc;
do {
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
- regs->ptpegr_ts[port],
- packed_buf,
- priv->info->ptpegr_ts_bytes);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
+ packed_buf, priv->info->ptpegr_ts_bytes);
if (rc < 0)
return rc;
@@ -216,177 +347,350 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
return 0;
}
-int sja1105_ptp_reset(struct sja1105_private *priv)
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
+ ptp_sts);
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
+ struct ptp_system_timestamp *ptp_sts)
{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
+ ptp_sts);
+}
+
+#define rxtstamp_to_tagger(d) \
+ container_of((d), struct sja1105_tagger_data, rxtstamp_work)
+#define tagger_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tagger_data)
+
+static void sja1105_rxtstamp_work(struct work_struct *work)
+{
+ struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work);
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct dsa_switch *ds = priv->ds;
- struct sja1105_ptp_cmd cmd = {0};
+ struct sk_buff *skb;
+
+ mutex_lock(&ptp_data->lock);
+
+ while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ticks, ts;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ continue;
+ }
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ netif_rx_ni(skb);
+ }
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+
+ if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state))
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb);
+ schedule_work(&tagger_data->rxtstamp_work);
+ return true;
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
+ * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!sp->hwts_tx_en)
+ return false;
+
+ return true;
+}
+
+static int sja1105_ptp_reset(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
int rc;
- mutex_lock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
cmd.resptp = 1;
+
dev_dbg(ds->dev, "Resetting PTP clock\n");
- rc = priv->info->ptp_cmd(priv, &cmd);
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
- timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
+ sja1105_tas_clockstep(priv->ds);
- mutex_unlock(&priv->ptp_lock);
+ mutex_unlock(&ptp_data->lock);
return rc;
}
-static int sja1105_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+/* Caller must hold ptp_data->lock */
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *ptp_sts)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
- u64 ns;
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks;
+ int rc;
- mutex_lock(&priv->ptp_lock);
- ns = timecounter_read(&priv->tstamp_tc);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ return rc;
+ }
- *ts = ns_to_timespec64(ns);
+ *ns = sja1105_ticks_to_ns(ticks);
return 0;
}
+static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 now = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
+ *ts = ns_to_timespec64(now);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptp_mode_set(struct sja1105_private *priv,
+ enum sja1105_ptp_clk_mode mode)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (ptp_data->cmd.ptpclkadd == mode)
+ return 0;
+
+ ptp_data->cmd.ptpclkadd = mode;
+
+ return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 0 */
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks = ns_to_sja1105_ticks(ns);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
u64 ns = timespec64_to_ns(ts);
+ int rc;
- mutex_lock(&priv->ptp_lock);
- timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, ns);
- mutex_unlock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
- return 0;
+ rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 clkrate32;
s64 clkrate;
+ int rc;
clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
- mutex_lock(&priv->ptp_lock);
-
- /* Force a readout to update the timer *before* changing its frequency.
- *
- * This way, its corrected time curve can at all times be modeled
- * as a linear "A * x + B" function, where:
- *
- * - B are past frequency adjustments and offset shifts, all
- * accumulated into the cycle_last variable.
- *
- * - A is the new frequency adjustments we're just about to set.
- *
- * Reading now makes B accumulate the correct amount of time,
- * corrected at the old rate, before changing it.
- *
- * Hardware timestamps then become simple points on the curve and
- * are approximated using the above function. This is still better
- * than letting the switch take the timestamps using the hardware
- * rate-corrected clock (PTPCLKVAL) - the comparison in this case would
- * be that we're shifting the ruler at the same time as we're taking
- * measurements with it.
- *
- * The disadvantage is that it's possible to receive timestamps when
- * a frequency adjustment took place in the near past.
- * In this case they will be approximated using the new ppb value
- * instead of a compound function made of two segments (one at the old
- * and the other at the new rate) - introducing some inaccuracy.
- */
- timecounter_read(&priv->tstamp_tc);
+ /* Take a +/- value and re-center it around 2^31. */
+ clkrate = SJA1105_CC_MULT + clkrate;
+ WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
+ clkrate32 = clkrate;
- priv->tstamp_cc.mult = SJA1105_CC_MULT + clkrate;
+ mutex_lock(&ptp_data->lock);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
+ NULL);
- return 0;
+ sja1105_tas_adjfreq(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
-static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+/* Write to PTPCLKVAL while PTPCLKADD is 1 */
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_private *priv = ds->priv;
+ s64 ticks = ns_to_sja1105_ticks(delta);
+ int rc;
- mutex_lock(&priv->ptp_lock);
- timecounter_adjtime(&priv->tstamp_tc, delta);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
+ return rc;
+ }
- return 0;
+ rc = sja1105_ptpclkval_write(priv, ticks, NULL);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
}
-static u64 sja1105_ptptsclk_read(const struct cyclecounter *cc)
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct sja1105_private *priv = cc_to_sja1105(cc);
- const struct sja1105_regs *regs = priv->info->regs;
- u64 ptptsclk = 0;
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->ptptsclk,
- &ptptsclk, 8);
- if (rc < 0)
- dev_err_ratelimited(priv->ds->dev,
- "failed to read ptp cycle counter: %d\n",
- rc);
- return ptptsclk;
-}
+ mutex_lock(&ptp_data->lock);
-static void sja1105_ptp_overflow_check(struct work_struct *work)
-{
- struct delayed_work *dw = to_delayed_work(work);
- struct sja1105_private *priv = dw_to_sja1105(dw);
- struct timespec64 ts;
+ rc = __sja1105_ptp_adjtime(priv->ds, delta);
- sja1105_ptp_gettime(&priv->ptp_caps, &ts);
+ mutex_unlock(&ptp_data->lock);
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+ return rc;
}
-static const struct ptp_clock_info sja1105_ptp_caps = {
- .owner = THIS_MODULE,
- .name = "SJA1105 PHC",
- .adjfine = sja1105_ptp_adjfine,
- .adjtime = sja1105_ptp_adjtime,
- .gettime64 = sja1105_ptp_gettime,
- .settime64 = sja1105_ptp_settime,
- .max_adj = SJA1105_MAX_ADJ_PPB,
-};
-
-int sja1105_ptp_clock_register(struct sja1105_private *priv)
+int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
- struct dsa_switch *ds = priv->ds;
-
- /* Set up the cycle counter */
- priv->tstamp_cc = (struct cyclecounter) {
- .read = sja1105_ptptsclk_read,
- .mask = CYCLECOUNTER_MASK(64),
- .shift = SJA1105_CC_SHIFT,
- .mult = SJA1105_CC_MULT,
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ ptp_data->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettimex64 = sja1105_ptp_gettimex,
+ .settime64 = sja1105_ptp_settime,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
};
- mutex_init(&priv->ptp_lock);
- priv->ptp_caps = sja1105_ptp_caps;
- priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
- if (IS_ERR_OR_NULL(priv->clock))
- return PTR_ERR(priv->clock);
+ skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
+ INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+ spin_lock_init(&tagger_data->meta_lock);
- INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
- return sja1105_ptp_reset(priv);
+ ptp_data->cmd.corrclk4ts = true;
+ ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+
+ return sja1105_ptp_reset(ds);
}
-void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
- if (IS_ERR_OR_NULL(priv->clock))
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- cancel_delayed_work_sync(&priv->refresh_work);
- ptp_clock_unregister(priv->clock);
- priv->clock = NULL;
+ cancel_work_sync(&priv->tagger_data.rxtstamp_work);
+ skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ ptp_clock_unregister(ptp_data->clock);
+ ptp_data->clock = NULL;
+}
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct skb_shared_hwtstamps shwt = {0};
+ u64 ticks, ts;
+ int rc;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ goto out;
+ }
+
+ rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "timed out polling for tstamp\n");
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ skb_complete_tx_timestamp(skb, &shwt);
+
+out:
+ mutex_unlock(&ptp_data->lock);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 394e12a6ad59..470f44b76318 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -6,59 +6,136 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-int sja1105_ptp_clock_register(struct sja1105_private *priv);
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
-void sja1105_ptp_clock_unregister(struct sja1105_private *priv);
+struct sja1105_ptp_cmd {
+ u64 ptpstrtsch; /* start schedule */
+ u64 ptpstopsch; /* stop schedule */
+ u64 resptp; /* reset */
+ u64 corrclk4ts; /* use the corrected clock for timestamps */
+ u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
+};
-int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts);
+struct sja1105_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct sja1105_ptp_cmd cmd;
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
-int sja1105et_ptp_cmd(const void *ctx, const void *data);
+int sja1105_ptp_clock_register(struct dsa_switch *ds);
-int sja1105pqrs_ptp_cmd(const void *ctx, const void *data);
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *ts);
-u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
- u64 ts_partial);
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone);
+
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts);
-int sja1105_ptp_reset(struct sja1105_private *priv);
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts);
+
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw);
#else
-static inline int sja1105_ptp_clock_register(struct sja1105_private *priv)
+struct sja1105_ptp_cmd;
+
+/* Structures cannot be empty in C. Bah!
+ * Keep the mutex as the only element, which is a bit more difficult to
+ * refactor out of sja1105_main.c anyway.
+ */
+struct sja1105_ptp_data {
+ struct mutex lock;
+};
+
+static inline int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
return 0;
}
-static inline void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
+static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone)
+{
+}
+
+static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts)
{
- return;
+ return 0;
}
-static inline int
-sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
{
return 0;
}
-static inline u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv,
- u64 now, u64 ts_partial)
+static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
{
return 0;
}
-static inline int sja1105_ptp_reset(struct sja1105_private *priv)
+static inline int sja1105_ptp_commit(struct dsa_switch *ds,
+ struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
{
return 0;
}
-#define sja1105et_ptp_cmd NULL
+#define sja1105et_ptp_cmd_packing NULL
-#define sja1105pqrs_ptp_cmd NULL
+#define sja1105pqrs_ptp_cmd_packing NULL
#define sja1105_get_ts_info NULL
+#define sja1105_port_rxtstamp NULL
+
+#define sja1105_port_txtstamp NULL
+
+#define sja1105_hwtstamp_get NULL
+
+#define sja1105_hwtstamp_set NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 58dd37ecde17..29b127f3bf9c 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -7,42 +7,15 @@
#include <linux/packing.h>
#include "sja1105.h"
-#define SJA1105_SIZE_PORT_CTRL 4
#define SJA1105_SIZE_RESET_CMD 4
#define SJA1105_SIZE_SPI_MSG_HEADER 4
#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
-#define SJA1105_SIZE_SPI_TRANSFER_MAX \
- (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN)
-static int sja1105_spi_transfer(const struct sja1105_private *priv,
- const void *tx, void *rx, int size)
-{
- struct spi_device *spi = priv->spidev;
- struct spi_transfer transfer = {
- .tx_buf = tx,
- .rx_buf = rx,
- .len = size,
- };
- struct spi_message msg;
- int rc;
-
- if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) {
- dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n",
- size, SJA1105_SIZE_SPI_TRANSFER_MAX);
- return -EMSGSIZE;
- }
-
- spi_message_init(&msg);
- spi_message_add_tail(&transfer, &msg);
-
- rc = spi_sync(spi, &msg);
- if (rc < 0) {
- dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
- return rc;
- }
-
- return rc;
-}
+struct sja1105_chunk {
+ u8 *buf;
+ size_t len;
+ u64 reg_addr;
+};
static void
sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
@@ -56,242 +29,219 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
sja1105_pack(buf, &msg->address, 24, 4, size);
}
+#define sja1105_hdr_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk))
+#define sja1105_chunk_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk) + 1)
+#define sja1105_hdr_buf(hdr_bufs, chunk) \
+ ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
+
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
- * address reg_addr, taking size_bytes from *packed_buf
+ * address reg_addr, taking @len bytes from *buf
* - SPI_READ: creates and sends an SPI read message from absolute
- * address reg_addr, writing size_bytes into *packed_buf
- *
- * This function should only be called if it is priorly known that
- * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers
- * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below.
+ * address reg_addr, writing @len bytes into *buf
*/
-int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- void *packed_buf, size_t size_bytes)
+static int sja1105_xfer(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
+ size_t len, struct ptp_system_timestamp *ptp_sts)
{
- u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
- u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
- const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER;
- struct sja1105_spi_message msg = {0};
- int rc;
+ struct sja1105_chunk chunk = {
+ .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
+ .reg_addr = reg_addr,
+ .buf = buf,
+ };
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer *xfers;
+ int num_chunks;
+ int rc, i = 0;
+ u8 *hdr_bufs;
- if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX)
- return -ERANGE;
+ num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
- msg.access = rw;
- msg.address = reg_addr;
- if (rw == SPI_READ)
- msg.read_count = size_bytes / 4;
+ /* One transfer for each message header, one for each message
+ * payload (chunk).
+ */
+ xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
+ GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
- sja1105_spi_message_pack(tx_buf, &msg);
+ /* Packed buffers for the num_chunks SPI message headers,
+ * stored as a contiguous array
+ */
+ hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
+ GFP_KERNEL);
+ if (!hdr_bufs) {
+ kfree(xfers);
+ return -ENOMEM;
+ }
- if (rw == SPI_WRITE)
- memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
- packed_buf, size_bytes);
+ for (i = 0; i < num_chunks; i++) {
+ struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
+ struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
+ u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
+ struct spi_transfer *ptp_sts_xfer;
+ struct sja1105_spi_message msg;
+
+ /* Populate the transfer's header buffer */
+ msg.address = chunk.reg_addr;
+ msg.access = rw;
+ if (rw == SPI_READ)
+ msg.read_count = chunk.len / 4;
+ else
+ /* Ignored */
+ msg.read_count = 0;
+ sja1105_spi_message_pack(hdr_buf, &msg);
+ hdr_xfer->tx_buf = hdr_buf;
+ hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ /* Populate the transfer's data buffer */
+ if (rw == SPI_READ)
+ chunk_xfer->rx_buf = chunk.buf;
+ else
+ chunk_xfer->tx_buf = chunk.buf;
+ chunk_xfer->len = chunk.len;
+
+ /* Request timestamping for the transfer. Instead of letting
+ * callers specify which byte they want to timestamp, we can
+ * make certain assumptions:
+ * - A read operation will request a software timestamp when
+ * what's being read is the PTP time. That is snapshotted by
+ * the switch hardware at the end of the command portion
+ * (hdr_xfer).
+ * - A write operation will request a software timestamp on
+ * actions that modify the PTP time. Taking clock stepping as
+ * an example, the switch writes the PTP time at the end of
+ * the data portion (chunk_xfer).
+ */
+ if (rw == SPI_READ)
+ ptp_sts_xfer = hdr_xfer;
+ else
+ ptp_sts_xfer = chunk_xfer;
+ ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts = ptp_sts;
+
+ /* Calculate next chunk */
+ chunk.buf += chunk.len;
+ chunk.reg_addr += chunk.len / 4;
+ chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
+ SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ /* De-assert the chip select after each chunk. */
+ if (chunk.len)
+ chunk_xfer->cs_change = 1;
+ }
- rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len);
+ rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
if (rc < 0)
- return rc;
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
- if (rw == SPI_READ)
- memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
- size_bytes);
+ kfree(hdr_bufs);
+ kfree(xfers);
- return 0;
+ return rc;
+}
+
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
+{
+ return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
}
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
- * address reg_addr, taking size_bytes from *packed_buf
+ * address reg_addr
* - SPI_READ: creates and sends an SPI read message from absolute
- * address reg_addr, writing size_bytes into *packed_buf
+ * address reg_addr
*
* The u64 *value is unpacked, meaning that it's stored in the native
* CPU endianness and directly usable by software running on the core.
- *
- * This is a wrapper around sja1105_spi_send_packed_buf().
*/
-int sja1105_spi_send_int(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u64 *value, u64 size_bytes)
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts)
{
- u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN];
+ u8 packed_buf[8];
int rc;
- if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN)
- return -ERANGE;
-
if (rw == SPI_WRITE)
- sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0,
- size_bytes);
+ sja1105_pack(packed_buf, value, 63, 0, 8);
- rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf,
- size_bytes);
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
if (rw == SPI_READ)
- sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0,
- size_bytes);
+ sja1105_unpack(packed_buf, value, 63, 0, 8);
return rc;
}
-/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
- * must be sent/received. Splitting the buffer into chunks and assembling
- * those into SPI messages is done automatically by this function.
- */
-int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 base_addr,
- void *packed_buf, u64 buf_len)
+/* Same as above, but transfers only a 4 byte word */
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts)
{
- struct chunk {
- void *buf_ptr;
- int len;
- u64 spi_address;
- } chunk;
- int distance_to_end;
+ u8 packed_buf[4];
+ u64 tmp;
int rc;
- /* Initialize chunk */
- chunk.buf_ptr = packed_buf;
- chunk.spi_address = base_addr;
- chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN);
-
- while (chunk.len) {
- rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address,
- chunk.buf_ptr, chunk.len);
- if (rc < 0)
- return rc;
-
- chunk.buf_ptr += chunk.len;
- chunk.spi_address += chunk.len / 4;
- distance_to_end = (uintptr_t)(packed_buf + buf_len -
- chunk.buf_ptr);
- chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ if (rw == SPI_WRITE) {
+ /* The packing API only supports u64 as CPU word size,
+ * so we need to convert.
+ */
+ tmp = *value;
+ sja1105_pack(packed_buf, &tmp, 31, 0, 4);
}
- return 0;
-}
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
-/* Back-ported structure from UM11040 Table 112.
- * Reset control register (addr. 100440h)
- * In the SJA1105 E/T, only warm_rst and cold_rst are
- * supported (exposed in UM10944 as rst_ctrl), but the bit
- * offsets of warm_rst and cold_rst are actually reversed.
- */
-struct sja1105_reset_cmd {
- u64 switch_rst;
- u64 cfg_rst;
- u64 car_rst;
- u64 otp_rst;
- u64 warm_rst;
- u64 cold_rst;
- u64 por_rst;
-};
-
-static void
-sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
-{
- const int size = SJA1105_SIZE_RESET_CMD;
-
- memset(buf, 0, size);
-
- sja1105_pack(buf, &reset->cold_rst, 3, 3, size);
- sja1105_pack(buf, &reset->warm_rst, 2, 2, size);
-}
-
-static void
-sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
-{
- const int size = SJA1105_SIZE_RESET_CMD;
-
- memset(buf, 0, size);
+ if (rw == SPI_READ) {
+ sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
+ *value = tmp;
+ }
- sja1105_pack(buf, &reset->switch_rst, 8, 8, size);
- sja1105_pack(buf, &reset->cfg_rst, 7, 7, size);
- sja1105_pack(buf, &reset->car_rst, 5, 5, size);
- sja1105_pack(buf, &reset->otp_rst, 4, 4, size);
- sja1105_pack(buf, &reset->warm_rst, 3, 3, size);
- sja1105_pack(buf, &reset->cold_rst, 2, 2, size);
- sja1105_pack(buf, &reset->por_rst, 1, 1, size);
+ return rc;
}
-static int sja1105et_reset_cmd(const void *ctx, const void *data)
+static int sja1105et_reset_cmd(struct dsa_switch *ds)
{
- const struct sja1105_private *priv = ctx;
- const struct sja1105_reset_cmd *reset = data;
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- struct device *dev = priv->ds->dev;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD];
-
- if (reset->switch_rst ||
- reset->cfg_rst ||
- reset->car_rst ||
- reset->otp_rst ||
- reset->por_rst) {
- dev_err(dev, "Only warm and cold reset is supported "
- "for SJA1105 E/T!\n");
- return -EINVAL;
- }
-
- if (reset->warm_rst)
- dev_dbg(dev, "Warm reset requested\n");
- if (reset->cold_rst)
- dev_dbg(dev, "Cold reset requested\n");
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
- sja1105et_reset_cmd_pack(packed_buf, reset);
+ sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
- packed_buf, SJA1105_SIZE_RESET_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
}
-static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
+static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
{
- const struct sja1105_private *priv = ctx;
- const struct sja1105_reset_cmd *reset = data;
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- struct device *dev = priv->ds->dev;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD];
-
- if (reset->switch_rst)
- dev_dbg(dev, "Main reset for all functional modules requested\n");
- if (reset->cfg_rst)
- dev_dbg(dev, "Chip configuration reset requested\n");
- if (reset->car_rst)
- dev_dbg(dev, "Clock and reset control logic reset requested\n");
- if (reset->otp_rst)
- dev_dbg(dev, "OTP read cycle for reading product "
- "config settings requested\n");
- if (reset->warm_rst)
- dev_dbg(dev, "Warm reset requested\n");
- if (reset->cold_rst)
- dev_dbg(dev, "Cold reset requested\n");
- if (reset->por_rst)
- dev_dbg(dev, "Power-on reset requested\n");
-
- sja1105pqrs_reset_cmd_pack(packed_buf, reset);
-
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
- packed_buf, SJA1105_SIZE_RESET_CMD);
-}
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
+ const int size = SJA1105_SIZE_RESET_CMD;
+ u64 cold_rst = 1;
-static int sja1105_cold_reset(const struct sja1105_private *priv)
-{
- struct sja1105_reset_cmd reset = {0};
+ sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
- reset.cold_rst = 1;
- return priv->info->reset_cmd(priv, &reset);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
}
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited)
{
const struct sja1105_regs *regs = priv->info->regs;
- u64 inhibit_cmd;
+ u32 inhibit_cmd;
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control,
- &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, NULL);
if (rc < 0)
return rc;
@@ -300,8 +250,8 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv,
else
inhibit_cmd &= ~port_bitmap;
- return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control,
- &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, NULL);
}
struct sja1105_status {
@@ -339,9 +289,7 @@ static int sja1105_status_get(struct sja1105_private *priv,
u8 packed_buf[4];
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
- regs->status,
- packed_buf, 4);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
if (rc < 0)
return rc;
@@ -429,7 +377,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
usleep_range(500, 1000);
do {
/* Put the SJA1105 in programming mode */
- rc = sja1105_cold_reset(priv);
+ rc = priv->info->reset_cmd(priv->ds);
if (rc < 0) {
dev_err(dev, "Failed to reset switch, retrying...\n");
continue;
@@ -437,9 +385,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
/* Wait for the switch to come out of reset */
usleep_range(1000, 5000);
/* Upload the static config to the device */
- rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE,
- regs->config,
- config_buf, buf_len);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
+ config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Failed to upload config, retrying...\n");
continue;
@@ -482,12 +429,6 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
}
- rc = sja1105_ptp_reset(priv);
- if (rc < 0)
- dev_err(dev, "Failed to reset PTP clock: %d\n", rc);
-
- dev_info(dev, "Reset switch and programmed static config\n");
-
out:
kfree(config_buf);
return rc;
@@ -516,10 +457,11 @@ static struct sja1105_regs sja1105et_regs = {
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
.ptp_control = 0x17,
- .ptpclk = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
- .ptptsclk = 0x1B, /* Spans 0x1B to 0x1C */
+ .ptpclkcorp = 0x1D,
};
static struct sja1105_regs sja1105pqrs_regs = {
@@ -547,10 +489,11 @@ static struct sja1105_regs sja1105pqrs_regs = {
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
.ptp_control = 0x18,
- .ptpclk = 0x19,
+ .ptpclkval = 0x19,
.ptpclkrate = 0x1B,
- .ptptsclk = 0x1C,
+ .ptpclkcorp = 0x1E,
};
struct sja1105_info sja1105e_info = {
@@ -563,7 +506,7 @@ struct sja1105_info sja1105e_info = {
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
- .ptp_cmd = sja1105et_ptp_cmd,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.regs = &sja1105et_regs,
.name = "SJA1105E",
};
@@ -577,7 +520,7 @@ struct sja1105_info sja1105t_info = {
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
- .ptp_cmd = sja1105et_ptp_cmd,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.regs = &sja1105et_regs,
.name = "SJA1105T",
};
@@ -592,7 +535,7 @@ struct sja1105_info sja1105p_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105P",
};
@@ -607,7 +550,7 @@ struct sja1105_info sja1105q_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105Q",
};
@@ -622,7 +565,7 @@ struct sja1105_info sja1105r_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.regs = &sja1105pqrs_regs,
.name = "SJA1105R",
};
@@ -638,6 +581,6 @@ struct sja1105_info sja1105s_info = {
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
- .ptp_cmd = sja1105pqrs_ptp_cmd,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.name = "SJA1105S",
};
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 33eca6a82ec5..26b925b5dace 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -10,6 +10,11 @@
#define SJA1105_TAS_MAX_DELTA BIT(19)
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
+#define work_to_sja1105_tas(d) \
+ container_of((d), struct sja1105_tas_data, tas_work)
+#define tas_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tas_data)
+
/* This is not a preprocessor macro because the "ns" argument may or may not be
* s64 at caller side. This ensures it is properly type-cast before div_s64.
*/
@@ -18,6 +23,100 @@ static s64 ns_to_sja1105_delta(s64 ns)
return div_s64(ns, 200);
}
+static s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
+static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ s64 earliest_base_time = S64_MAX;
+ s64 latest_base_time = 0;
+ s64 its_cycle_time = 0;
+ s64 max_cycle_time = 0;
+ int port;
+
+ tas_data->enabled = false;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ tas_data->enabled = true;
+
+ if (max_cycle_time < offload->cycle_time)
+ max_cycle_time = offload->cycle_time;
+ if (latest_base_time < offload->base_time)
+ latest_base_time = offload->base_time;
+ if (earliest_base_time > offload->base_time) {
+ earliest_base_time = offload->base_time;
+ its_cycle_time = offload->cycle_time;
+ }
+ }
+
+ if (!tas_data->enabled)
+ return 0;
+
+ /* Roll the earliest base time over until it is in a comparable
+ * time base with the latest, then compare their deltas.
+ * We want to enforce that all ports' base times are within
+ * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
+ */
+ earliest_base_time = future_base_time(earliest_base_time,
+ its_cycle_time,
+ latest_base_time);
+ while (earliest_base_time > latest_base_time)
+ earliest_base_time -= its_cycle_time;
+ if (latest_base_time - earliest_base_time >
+ sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
+ dev_err(ds->dev,
+ "Base times too far apart: min %llu max %llu\n",
+ earliest_base_time, latest_base_time);
+ return -ERANGE;
+ }
+
+ tas_data->earliest_base_time = earliest_base_time;
+ tas_data->max_cycle_time = max_cycle_time;
+
+ dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
+ dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
+ dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
+
+ return 0;
+}
+
/* Lo and behold: the egress scheduler from hell.
*
* At the hardware level, the Time-Aware Shaper holds a global linear arrray of
@@ -99,7 +198,11 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
int num_cycles = 0;
int cycle = 0;
int i, k = 0;
- int port;
+ int port, rc;
+
+ rc = sja1105_tas_set_runtime_params(priv);
+ if (rc < 0)
+ return rc;
/* Discard previous Schedule Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
@@ -184,11 +287,13 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_entry_points = table->entries;
/* Finally start populating the static config tables */
- schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE;
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
schedule_entry_points_params->actsubsch = num_cycles - 1;
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
offload = tas_data->offload[port];
if (!offload)
@@ -196,15 +301,21 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_start_idx = k;
schedule_end_idx = k + offload->num_entries - 1;
- /* TODO this is the base time for the port's subschedule,
- * relative to PTPSCHTM. But as we're using the standalone
- * clock source and not PTP clock as time reference, there's
- * little point in even trying to put more logic into this,
- * like preserving the phases between the subschedules of
- * different ports. We'll get all of that when switching to the
- * PTP clock source.
+ /* This is the base time expressed as a number of TAS ticks
+ * relative to PTPSCHTM, which we'll (perhaps improperly) call
+ * the operational base time.
+ */
+ rbt = future_base_time(offload->base_time,
+ offload->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
+ * delta cannot be zero, which is shitty. Advance all relative
+ * base times by 1 TAS delta, so that even the earliest base
+ * time becomes 1 in relative terms. Then start the operational
+ * base time (PTPSCHTM) one TAS delta earlier than planned.
*/
- entry_point_delta = 1;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
schedule_entry_points[cycle].subschindx = cycle;
schedule_entry_points[cycle].delta = entry_point_delta;
@@ -352,7 +463,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
}
/* The cycle time extension is the amount of time the last cycle from
@@ -400,11 +511,306 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
- return sja1105_static_config_reload(priv);
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+}
+
+static int sja1105_tas_check_running(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
+ if (rc < 0)
+ return rc;
+
+ if (cmd.ptpstrtsch == 1)
+ /* Schedule successfully started */
+ tas_data->state = SJA1105_TAS_STATE_RUNNING;
+ else if (cmd.ptpstopsch == 1)
+ /* Schedule is stopped */
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ else
+ /* Schedule is probably not configured with PTP clock source */
+ rc = -EINVAL;
+
+ return rc;
+}
+
+/* Write to PTPCLKCORP */
+static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
+ u64 correction)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
+ &ptpclkcorp, NULL);
+}
+
+/* Write to PTPSCHTM */
+static int sja1105_tas_set_base_time(struct sja1105_private *priv,
+ u64 base_time)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptpschtm = ns_to_sja1105_ticks(base_time);
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
+ &ptpschtm, NULL);
+}
+
+static int sja1105_tas_start(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Starting the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
+ tas_data->state == SJA1105_TAS_STATE_RUNNING) {
+ dev_err(ds->dev, "TAS already started\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstrtsch = 1;
+ cmd->ptpstopsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
+
+ return 0;
+}
+
+static int sja1105_tas_stop(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Stopping the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
+ dev_err(ds->dev, "TAS already disabled\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstopsch = 1;
+ cmd->ptpstrtsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+
+ return 0;
+}
+
+/* The schedule engine and the PTP clock are driven by the same oscillator, and
+ * they run in parallel. But whilst the PTP clock can keep an absolute
+ * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
+ * up a delta, which is 200ns), and wrapping around at the end of each cycle.
+ * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
+ * (in PTP domain).
+ * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
+ * a software servo, and the schedule engine clock runs in parallel to the PTP
+ * clock, there is logic internal to the switch that periodically keeps the
+ * schedule engine from drifting away. The frequency with which this internal
+ * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
+ * a value also in the PTP clock domain, and is also rate-corrected.
+ * To be precise, during a correction period, there is logic to determine by
+ * how many scheduler clock ticks has the PTP clock drifted. At the end of each
+ * correction period/beginning of new one, the length of a delta is shrunk or
+ * expanded with an integer number of ticks, compared with the typical 25.
+ * So a delta lasts for 200ns (or 25 ticks) only on average.
+ * Sometimes it is longer, sometimes it is shorter. The internal syntonization
+ * logic can adjust for at most 5 ticks each 20 ticks.
+ *
+ * The first implication is that you should choose your schedule correction
+ * period to be an integer multiple of the schedule length. Preferably one.
+ * In case there are schedules of multiple ports active, then the correction
+ * period needs to be a multiple of them all. Given the restriction that the
+ * cycle times have to be multiples of one another anyway, this means the
+ * correction period can simply be the largest cycle time, hence the current
+ * choice. This way, the updates are always synchronous to the transmission
+ * cycle, and therefore predictable.
+ *
+ * The second implication is that at the beginning of a correction period, the
+ * first few deltas will be modulated in time, until the schedule engine is
+ * properly phase-aligned with the PTP clock. For this reason, you should place
+ * your best-effort traffic at the beginning of a cycle, and your
+ * time-triggered traffic afterwards.
+ *
+ * The third implication is that once the schedule engine is started, it can
+ * only adjust for so much drift within a correction period. In the servo you
+ * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
+ * want to do the latter, you need to stop and restart the schedule engine,
+ * which is what the state machine handles.
+ */
+static void sja1105_tas_state_machine(struct work_struct *work)
+{
+ struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
+ struct sja1105_private *priv = tas_to_sja1105(tas_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct timespec64 base_time_ts, now_ts;
+ struct dsa_switch *ds = priv->ds;
+ struct timespec64 diff;
+ s64 base_time, now;
+ int rc = 0;
+
+ mutex_lock(&ptp_data->lock);
+
+ switch (tas_data->state) {
+ case SJA1105_TAS_STATE_DISABLED:
+ /* Can't do anything at all if clock is still being stepped */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
+ break;
+
+ rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
+ if (rc < 0)
+ break;
+
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ /* Plan to start the earliest schedule first. The others
+ * will be started in hardware, by way of their respective
+ * entry points delta.
+ * Try our best to avoid fringe cases (race condition between
+ * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
+ * least one second in the future from now. This is not ideal,
+ * but this only needs to buy us time until the
+ * sja1105_tas_start command below gets executed.
+ */
+ base_time = future_base_time(tas_data->earliest_base_time,
+ tas_data->max_cycle_time,
+ now + 1ull * NSEC_PER_SEC);
+ base_time -= sja1105_delta_to_ns(1);
+
+ rc = sja1105_tas_set_base_time(priv, base_time);
+ if (rc < 0)
+ break;
+
+ tas_data->oper_base_time = base_time;
+
+ rc = sja1105_tas_start(priv);
+ if (rc < 0)
+ break;
+
+ base_time_ts = ns_to_timespec64(base_time);
+ now_ts = ns_to_timespec64(now);
+
+ dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
+ base_time_ts.tv_sec, base_time_ts.tv_nsec,
+ now_ts.tv_sec, now_ts.tv_nsec);
+
+ break;
+
+ case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ /* Clock was stepped.. bad news for TAS */
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ /* Check if TAS has actually started, by comparing the
+ * scheduled start time with the SJA1105 PTP clock
+ */
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ if (now < tas_data->oper_base_time) {
+ /* TAS has not started yet */
+ diff = ns_to_timespec64(tas_data->oper_base_time - now);
+ dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
+ diff.tv_sec, diff.tv_nsec);
+ break;
+ }
+
+ /* Time elapsed, what happened? */
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ /* TAS has started */
+ dev_err(ds->dev,
+ "TAS not started despite time elapsed\n");
+
+ break;
+
+ case SJA1105_TAS_STATE_RUNNING:
+ /* Clock was stepped.. bad news for TAS */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ dev_err(ds->dev, "TAS surprisingly stopped\n");
+
+ break;
+
+ default:
+ if (net_ratelimit())
+ dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
+ }
+
+ if (rc && net_ratelimit())
+ dev_err(ds->dev, "An operation returned %d\n", rc);
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+void sja1105_tas_clockstep(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ /* No reason to schedule the workqueue, nothing changed */
+ if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
+ schedule_work(&tas_data->tas_work);
}
void sja1105_tas_setup(struct dsa_switch *ds)
{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ tas_data->last_op = SJA1105_PTP_NONE;
}
void sja1105_tas_teardown(struct dsa_switch *ds)
@@ -413,6 +819,8 @@ void sja1105_tas_teardown(struct dsa_switch *ds)
struct tc_taprio_qopt_offload *offload;
int port;
+ cancel_work_sync(&priv->tas_data.tas_work);
+
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
offload = priv->tas_data.offload[port];
if (!offload)
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0aad212d88b2..b226c3dfd5b1 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -8,8 +8,27 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
+enum sja1105_tas_state {
+ SJA1105_TAS_STATE_DISABLED,
+ SJA1105_TAS_STATE_ENABLED_NOT_RUNNING,
+ SJA1105_TAS_STATE_RUNNING,
+};
+
+enum sja1105_ptp_op {
+ SJA1105_PTP_NONE,
+ SJA1105_PTP_CLOCKSTEP,
+ SJA1105_PTP_ADJUSTFREQ,
+};
+
struct sja1105_tas_data {
struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ enum sja1105_tas_state state;
+ enum sja1105_ptp_op last_op;
+ struct work_struct tas_work;
+ s64 earliest_base_time;
+ s64 oper_base_time;
+ u64 max_cycle_time;
+ bool enabled;
};
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
@@ -19,6 +38,10 @@ void sja1105_tas_setup(struct dsa_switch *ds);
void sja1105_tas_teardown(struct dsa_switch *ds);
+void sja1105_tas_clockstep(struct dsa_switch *ds);
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds);
+
#else
/* C doesn't allow empty structures, bah! */
@@ -36,6 +59,10 @@ static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
+static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 614377ef7956..42c1574d45f2 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1178,9 +1178,12 @@ int vsc73xx_probe(struct vsc73xx *vsc)
* We allocate 8 ports and avoid access to the nonexistant
* ports.
*/
- vsc->ds = dsa_switch_alloc(dev, 8);
+ vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
+
+ vsc->ds->dev = dev;
+ vsc->ds->num_ports = 8;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 54e4d8b07f0e..3031a5fc5427 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -51,41 +51,15 @@ static void set_multicast_list(struct net_device *dev)
{
}
-struct pcpu_dstats {
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static void dummy_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_dstats *dstats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- dstats = per_cpu_ptr(dev->dstats, i);
- do {
- start = u64_stats_fetch_begin_irq(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpackets;
- }
+ dev_lstats_read(dev, &stats->tx_packets, &stats->tx_bytes);
}
static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- dstats->tx_packets++;
- dstats->tx_bytes += skb->len;
- u64_stats_update_end(&dstats->syncp);
+ dev_lstats_add(dev, skb->len);
skb_tx_timestamp(skb);
dev_kfree_skb(skb);
@@ -94,8 +68,8 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
- if (!dev->dstats)
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+ if (!dev->lstats)
return -ENOMEM;
return 0;
@@ -103,7 +77,7 @@ static int dummy_dev_init(struct net_device *dev)
static void dummy_dev_uninit(struct net_device *dev)
{
- free_percpu(dev->dstats);
+ free_percpu(dev->lstats);
}
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e8e9c166185d..4ded81b27d0a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
-source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 05abebc17804..f8f38dcb5f8a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
-obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bb032be7fe31..4cd53fc338b5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -730,12 +730,12 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
struct device_node *np = priv->device->of_node;
- int ret = 0;
+ int ret;
- priv->phy_iface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np, &priv->phy_iface);
/* Avoid get phy addr and create mdio if no phy is present */
- if (!priv->phy_iface)
+ if (ret)
return 0;
/* try to get PHY address from device tree, use PHY autodetection if
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 16553d92fad2..a3250dcf7d53 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -133,7 +133,7 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
u64 *ptr;
int i, j;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
ring = &adapter->tx_ring[i];
@@ -205,7 +205,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
@@ -214,7 +214,7 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
const struct ena_stats *ena_stats;
int i, j;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
@@ -333,7 +333,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->tx_ring[i].smoothed_interval = val;
}
@@ -344,7 +344,7 @@ static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].smoothed_interval = val;
}
@@ -612,7 +612,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = adapter->num_queues;
+ info->data = adapter->num_io_queues;
rc = 0;
break;
case ETHTOOL_GRXFH:
@@ -734,14 +734,20 @@ static void ena_get_channels(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = adapter->num_queues;
- channels->max_tx = adapter->num_queues;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = adapter->num_queues;
- channels->tx_count = adapter->num_queues;
- channels->other_count = 0;
- channels->combined_count = 0;
+ channels->max_combined = adapter->max_num_io_queues;
+ channels->combined_count = adapter->num_io_queues;
+}
+
+static int ena_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ u32 count = channels->combined_count;
+ /* The check for max value is already done in ethtool */
+ if (count < ENA_MIN_NUM_IO_QUEUES)
+ return -EINVAL;
+
+ return ena_update_queue_count(adapter, count);
}
static int ena_get_tunable(struct net_device *netdev,
@@ -807,6 +813,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
.get_channels = ena_get_channels,
+ .set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c487d2a7d6dd..d46a912002ff 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -101,7 +101,7 @@ static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].mtu = mtu;
}
@@ -129,10 +129,10 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
u32 i;
int rc;
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
+ adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
if (!adapter->netdev->rx_cpu_rmap)
return -ENOMEM;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
int irq_idx = ENA_IO_IRQ_IDX(i);
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
@@ -172,7 +172,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
@@ -294,7 +294,7 @@ static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
@@ -322,7 +322,7 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_tx_resources(adapter, i);
}
@@ -428,7 +428,7 @@ static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_rx_resources(adapter, i);
if (rc)
goto err_setup_rx;
@@ -456,7 +456,7 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_resources(adapter, i);
}
@@ -600,7 +600,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
struct ena_ring *rx_ring;
int i, rc, bufs_num;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
bufs_num = rx_ring->ring_size - 1;
rc = ena_refill_rx_bufs(rx_ring, bufs_num);
@@ -616,7 +616,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_bufs(adapter, i);
}
@@ -688,7 +688,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
@@ -699,7 +699,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
@@ -710,7 +710,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
@@ -1331,7 +1331,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
* the number of potential io queues is the minimum of what the device
* supports and the number of vCPUs.
*/
-static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
+static int ena_enable_msix(struct ena_adapter *adapter)
{
int msix_vecs, irq_cnt;
@@ -1342,7 +1342,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
}
/* Reserved the max msix vectors we might need */
- msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
+ msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1359,7 +1359,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_notice(adapter, probe, adapter->netdev,
"enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
- adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
+ adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
if (ena_init_rx_cpu_rmap(adapter))
@@ -1397,7 +1397,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
netdev = adapter->netdev;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
@@ -1529,7 +1529,7 @@ static void ena_del_napi(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
netif_napi_del(&adapter->ena_napi[i].napi);
}
@@ -1538,7 +1538,7 @@ static void ena_init_napi(struct ena_adapter *adapter)
struct ena_napi *napi;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
@@ -1555,7 +1555,7 @@ static void ena_napi_disable_all(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
@@ -1563,7 +1563,7 @@ static void ena_napi_enable_all(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
@@ -1673,7 +1673,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1741,7 +1741,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_rx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1764,7 +1764,7 @@ static void set_io_rings_size(struct ena_adapter *adapter,
{
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
adapter->tx_ring[i].ring_size = new_tx_size;
adapter->rx_ring[i].ring_size = new_rx_size;
}
@@ -1902,14 +1902,14 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
/* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_unmask_interrupt(&adapter->tx_ring[i],
&adapter->rx_ring[i]);
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
@@ -1984,13 +1984,13 @@ static int ena_open(struct net_device *netdev)
int rc;
/* Notify the stack of the actual queue counts. */
- rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
+ rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
return rc;
}
- rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
+ rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
return rc;
@@ -2043,14 +2043,30 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size)
{
- bool dev_up;
+ bool dev_was_up;
- dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
ena_init_io_rings(adapter);
- return dev_up ? ena_up(adapter) : 0;
+ return dev_was_up ? ena_up(adapter) : 0;
+}
+
+int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ bool dev_was_up;
+
+ dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ ena_close(adapter->netdev);
+ adapter->num_io_queues = new_channel_count;
+ /* We need to destroy the rss table so that the indirection
+ * table will be reinitialized by ena_up()
+ */
+ ena_com_rss_destroy(ena_dev);
+ ena_init_io_rings(adapter);
+ return dev_was_up ? ena_open(adapter->netdev) : 0;
}
static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
@@ -2495,7 +2511,7 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
u64 bytes, packets;
tx_ring = &adapter->tx_ring[i];
@@ -2682,14 +2698,13 @@ err_mmio_read_less:
return rc;
}
-static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
- int io_vectors)
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct device *dev = &adapter->pdev->dev;
int rc;
- rc = ena_enable_msix(adapter, io_vectors);
+ rc = ena_enable_msix(adapter);
if (rc) {
dev_err(dev, "Can not reserve msix vectors\n");
return rc;
@@ -2782,8 +2797,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
- rc = ena_enable_msix_and_set_admin_interrupts(adapter,
- adapter->num_queues);
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev, "Enable MSI-X failed\n");
goto err_device_destroy;
@@ -2948,7 +2962,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
- for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
+ for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
@@ -2965,7 +2979,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
- adapter->last_monitored_tx_qid = i % adapter->num_queues;
+ adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -2995,7 +3009,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
refill_required =
@@ -3137,16 +3151,16 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, jiffies + HZ);
}
-static int ena_calc_io_queue_num(struct pci_dev *pdev,
- struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
+ int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&get_feat_ctx->max_queue_ext.max_queue_ext;
- io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
+ io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
max_queue_ext->max_rx_cq_num);
io_tx_sq_num = max_queue_ext->max_tx_sq_num;
@@ -3156,25 +3170,25 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
&get_feat_ctx->max_queues;
io_tx_sq_num = max_queues->max_sq_num;
io_tx_cq_num = max_queues->max_cq_num;
- io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
+ io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
}
/* In case of LLQ use the llq fields for the tx SQ/CQ */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
- io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
- io_queue_num = min_t(int, io_queue_num, io_rx_num);
- io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
- io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
+ max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
- io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!io_queue_num)) {
+ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
+ if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
- return io_queue_num;
+ return max_num_io_queues;
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3302,7 +3316,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
- val = ethtool_rxfh_indir_default(i, adapter->num_queues);
+ val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
@@ -3349,7 +3363,7 @@ static void set_default_llq_configurations(struct ena_llq_configurations *llq_co
llq_config->llq_ring_entry_size_value = 128;
}
-static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
@@ -3358,7 +3372,7 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
u32 max_tx_queue_size;
u32 max_rx_queue_size;
- if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
@@ -3432,11 +3446,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
- int io_queue_num, bars, rc;
struct net_device *netdev;
static int adapters_found;
+ u32 max_num_io_queues;
char *queue_type_str;
bool wd_state;
+ int bars, rc;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -3497,27 +3512,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.pdev = pdev;
/* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
- * Updated during device initialization with the real granularity
- */
+ * Updated during device initialization with the real granularity
+ */
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
- io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_queue_size(&calc_queue_ctx);
- if (rc || io_queue_num <= 0) {
+ max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
+ rc = ena_calc_io_queue_size(&calc_queue_ctx);
+ if (rc || !max_num_io_queues) {
rc = -EFAULT;
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
- io_queue_num,
- calc_queue_ctx.rx_queue_size,
- calc_queue_ctx.tx_queue_size,
- (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
- "ENABLED" : "DISABLED");
-
/* dev zeroed in init_etherdev */
- netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
if (!netdev) {
dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
rc = -ENOMEM;
@@ -3545,7 +3553,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
- adapter->num_queues = io_queue_num;
+ adapter->num_io_queues = max_num_io_queues;
+ adapter->max_num_io_queues = max_num_io_queues;
+
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
@@ -3569,7 +3579,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&adapter->syncp);
- rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev,
"Failed to enable and set the admin interrupts\n");
@@ -3611,9 +3621,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_type_str = "Low Latency";
dev_info(&pdev->dev,
- "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
+ "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, io_queue_num, queue_type_str);
+ netdev->dev_addr, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 72ee51a82ec7..bffd778f2ce3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -82,6 +82,8 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_MIN_RING_SIZE (256)
+#define ENA_MIN_NUM_IO_QUEUES (1)
+
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
@@ -161,10 +163,10 @@ struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
struct pci_dev *pdev;
- u16 tx_queue_size;
- u16 rx_queue_size;
- u16 max_tx_queue_size;
- u16 max_rx_queue_size;
+ u32 tx_queue_size;
+ u32 rx_queue_size;
+ u32 max_tx_queue_size;
+ u32 max_rx_queue_size;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
@@ -324,7 +326,8 @@ struct ena_adapter {
u32 rx_copybreak;
u32 max_mtu;
- int num_queues;
+ u32 num_io_queues;
+ u32 max_num_io_queues;
int msix_vecs;
@@ -387,6 +390,7 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
+int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 131cab855be7..6e0a6e234483 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -4,15 +4,8 @@
# aQuantia Ethernet Controller AQtion Linux Driver
# Copyright(c) 2014-2017 aQuantia Corporation.
#
-# Contact Information: <rdc-drv@aquantia.com>
-# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
-#
################################################################################
-#
-# Makefile for the AQtion(tm) Ethernet driver
-#
-
obj-$(CONFIG_AQTION) += atlantic.o
atlantic-objs := aq_main.o \
@@ -24,8 +17,11 @@ atlantic-objs := aq_main.o \
aq_ethtool.o \
aq_drvinfo.o \
aq_filters.o \
+ aq_phy.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
+
+atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 02f1b70c4e25..f0c41f7408e5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_cfg.h: Definition of configuration parameters and constants. */
@@ -27,7 +27,7 @@
#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
-#define AQ_CFG_IRQ_MASK 0x1FFU
+#define AQ_CFG_IRQ_MASK 0x3FFU
#define AQ_CFG_VECS_MAX 8U
#define AQ_CFG_TCS_MAX 8U
@@ -70,14 +70,11 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
-#define AQ_NIC_FC_OFF 0U
-#define AQ_NIC_FC_TX 1U
-#define AQ_NIC_FC_RX 2U
-#define AQ_NIC_FC_FULL 3U
-#define AQ_NIC_FC_AUTO 4U
-
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
+/* Default WOL modes used on initialization */
+#define AQ_CFG_WOL_MODES WAKE_MAGIC
+
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
#define AQ_CFG_IS_AUTONEG_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 24df132384fb..a1f99bef4a68 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ethtool.c: Definition of ethertool related functions. */
@@ -9,13 +9,18 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_ptp.h"
#include "aq_filters.h"
+#include <linux/ptp_clock_kernel.h>
+
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
memset(p, 0, regs_count * sizeof(u32));
aq_nic_get_regs(aq_nic, regs, p);
@@ -24,7 +29,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev,
static int aq_ethtool_get_regs_len(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ u32 regs_count;
+
+ regs_count = aq_nic_get_regs_count(aq_nic);
return regs_count * sizeof(u32);
}
@@ -89,11 +96,21 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
+static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "DMASystemLoopback",
+ "PKTSystemLoopback",
+ "DMANetworkLoopback",
+ "PHYInternalLoopback",
+ "PHYExternalLoopback",
+};
+
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) *
@@ -104,11 +121,15 @@ static void aq_ethtool_stats(struct net_device *ndev,
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
- u32 firmware_version = aq_nic_get_fw_version(aq_nic);
- u32 regs_count = aq_nic_get_regs_count(aq_nic);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 firmware_version;
+ u32 regs_count;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ firmware_version = aq_nic_get_fw_version(aq_nic);
+ regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
@@ -129,12 +150,15 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
- int i, si;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
u8 *p = data;
+ int i, si;
- if (stringset == ETH_SS_STATS) {
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
@@ -147,23 +171,63 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(p, aq_ethtool_priv_flag_names,
+ sizeof(aq_ethtool_priv_flag_names));
+ break;
}
}
-static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+static int aq_ethtool_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_hw_s *hw = aq_nic->aq_hw;
int ret = 0;
+
+ if (!aq_nic->aq_fw_ops->led_control)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_BLINK |
+ AQ_HW_LED_BLINK << 2 | AQ_HW_LED_BLINK << 4);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_DEFAULT);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return ret;
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+ int ret = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
ret = ARRAY_SIZE(aq_ethtool_stat_names) +
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
break;
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
+ break;
default:
ret = -EOPNOTSUPP;
}
+
return ret;
}
@@ -175,7 +239,9 @@ static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
return sizeof(cfg->aq_rss.hash_secret_key);
}
@@ -184,9 +250,11 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
+ cfg = aq_nic_get_cfg(aq_nic);
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
@@ -196,6 +264,7 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
if (key)
memcpy(key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
+
return 0;
}
@@ -239,9 +308,11 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
int err = 0;
+ cfg = aq_nic_get_cfg(aq_nic);
+
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
@@ -266,8 +337,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
static int aq_ethtool_set_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -288,7 +359,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
@@ -302,6 +375,7 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
coal->rx_max_coalesced_frames = 1;
coal->tx_max_coalesced_frames = 1;
}
+
return 0;
}
@@ -309,7 +383,9 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
+
+ cfg = aq_nic_get_cfg(aq_nic);
/* This is not yet supported
*/
@@ -351,13 +427,12 @@ static void aq_ethtool_get_wol(struct net_device *ndev,
struct ethtool_wolinfo *wol)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
+ cfg = aq_nic_get_cfg(aq_nic);
- if (cfg->wol)
- wol->wolopts |= WAKE_MAGIC;
+ wol->supported = AQ_NIC_WOL_MODES;
+ wol->wolopts = cfg->wol;
}
static int aq_ethtool_set_wol(struct net_device *ndev,
@@ -365,18 +440,50 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
int err = 0;
- if (wol->wolopts & WAKE_MAGIC)
- cfg->wol |= AQ_NIC_WOL_ENABLED;
- else
- cfg->wol &= ~AQ_NIC_WOL_ENABLED;
- err = device_set_wakeup_enable(&pdev->dev, wol->wolopts);
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (wol->wolopts & ~AQ_NIC_WOL_MODES)
+ return -EOPNOTSUPP;
+
+ cfg->wol = wol->wolopts;
+
+ err = device_set_wakeup_enable(&pdev->dev, !!cfg->wol);
return err;
}
+static int aq_ethtool_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ ethtool_op_get_ts_info(ndev, info);
+
+ if (!aq_nic->aq_ptp)
+ return 0;
+
+ info->so_timestamping |=
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp));
+
+ return 0;
+}
+
static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
{
u32 rate = 0;
@@ -481,7 +588,7 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- u32 fc = aq_nic->aq_nic_cfg.flow_control;
+ u32 fc = aq_nic->aq_nic_cfg.fc.req;
pause->autoneg = 0;
@@ -503,14 +610,14 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
return -EOPNOTSUPP;
if (pause->rx_pause)
- aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_RX;
else
- aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_RX;
if (pause->tx_pause)
- aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_TX;
else
- aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+ aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_TX;
mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
@@ -523,23 +630,28 @@ static void aq_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ struct aq_nic_cfg_s *cfg;
- ring->rx_pending = aq_nic_cfg->rxds;
- ring->tx_pending = aq_nic_cfg->txds;
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = cfg->rxds;
+ ring->tx_pending = cfg->txds;
- ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
- ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+ ring->rx_max_pending = cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = cfg->aq_hw_caps->txds_max;
}
static int aq_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
- int err = 0;
- bool ndev_running = false;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
- const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+ const struct aq_hw_caps_s *hw_caps;
+ bool ndev_running = false;
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ hw_caps = cfg->aq_hw_caps;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
err = -EOPNOTSUPP;
@@ -553,18 +665,18 @@ static int aq_set_ringparam(struct net_device *ndev,
aq_nic_free_vectors(aq_nic);
- aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
- aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
- aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+ cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
+ cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
- aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
- aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
- aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+ cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ cfg->txds = min(cfg->txds, hw_caps->txds_max);
+ cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
- for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
aq_nic->aq_vecs++) {
aq_nic->aq_vec[aq_nic->aq_vecs] =
- aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
err = -ENOMEM;
goto err_exit;
@@ -577,12 +689,61 @@ err_exit:
return err;
}
+static u32 aq_get_msg_level(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->msg_enable;
+}
+
+static void aq_set_msg_level(struct net_device *ndev, u32 data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic->msg_enable = data;
+}
+
+static u32 aq_ethtool_get_priv_flags(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic->aq_nic_cfg.priv_flags;
+}
+
+static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg;
+ u32 priv_flags;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ priv_flags = cfg->priv_flags;
+
+ if (flags & ~AQ_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ cfg->priv_flags = flags;
+
+ if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+
+ dev_open(ndev, NULL);
+ }
+ } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) {
+ aq_nic_set_loopback(aq_nic);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
+ .set_phys_id = aq_ethtool_set_phys_id,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
.get_wol = aq_ethtool_get_wol,
.set_wol = aq_ethtool_set_wol,
@@ -598,10 +759,15 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
+ .get_msglevel = aq_get_msg_level,
+ .set_msglevel = aq_set_msg_level,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
+ .get_priv_flags = aq_ethtool_get_priv_flags,
+ .set_priv_flags = aq_ethtool_set_priv_flags,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
.set_link_ksettings = aq_ethtool_set_link_ksettings,
.get_coalesce = aq_ethtool_get_coalesce,
.set_coalesce = aq_ethtool_set_coalesce,
+ .get_ts_info = aq_ethtool_get_ts_info,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
index 632b5531db4a..6d5be5ebeb13 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -12,5 +12,6 @@
#include "aq_common.h"
extern const struct ethtool_ops aq_ethtool_ops;
+#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index aee827f07c16..6102251bb909 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2014-2017 aQuantia Corporation. */
+/* Copyright (C) 2014-2019 aQuantia Corporation. */
/* File aq_filters.c: RX filters related functions. */
@@ -89,12 +89,14 @@ static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
+ aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
- fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
- AQ_RX_FIRST_LOC_FL3L4,
- AQ_RX_LAST_LOC_FL3L4);
+ AQ_RX_FIRST_LOC_FL3L4, last_location);
return -EINVAL;
}
if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
@@ -124,12 +126,15 @@ aq_check_approve_fl2(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ u32 last_location = AQ_RX_LAST_LOC_FETHERT -
+ aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
+
if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
- fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FETHERT,
- AQ_RX_LAST_LOC_FETHERT);
+ last_location);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 53d7478689a0..cc70c606b6ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
@@ -15,6 +15,9 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_HW_MAC_COUNTER_HZ 312500000ll
+#define AQ_HW_PHY_COUNTER_HZ 160000000ll
+
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
@@ -94,6 +97,7 @@ struct aq_stats_s {
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
#define AQ_HW_FLAG_CLOSING 0x00000020U
+#define AQ_HW_PTP_AVAILABLE 0x01000000U
#define AQ_HW_LINK_DOWN 0x04000000U
#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_HW_FLAG_ERR_HW 0x80000000U
@@ -115,6 +119,23 @@ struct aq_stats_s {
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+#define AQ_HW_LED_BLINK 0x2U
+#define AQ_HW_LED_DEFAULT 0x0U
+
+enum aq_priv_flags {
+ AQ_HW_LOOPBACK_DMA_SYS,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ AQ_HW_LOOPBACK_DMA_NET,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+};
+
+#define AQ_HW_LOOPBACK_MASK (BIT(AQ_HW_LOOPBACK_DMA_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PKT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_DMA_NET) |\
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -133,8 +154,11 @@ struct aq_hw_s {
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
+ u32 settings_addr;
u32 rpc_tid;
struct hw_atl_utils_fw_rpc rpc;
+ s64 ptp_clk_offset;
+ u16 phy_id;
};
struct aq_ring_s;
@@ -235,7 +259,43 @@ struct aq_hw_ops {
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
+ int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
+
+ int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
+
+ int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_hwts_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+ void (*hw_get_ptp_ts)(struct aq_hw_s *self, u64 *stamp);
+
+ int (*hw_adj_clock_freq)(struct aq_hw_s *self, s32 delta);
+
+ int (*hw_adj_sys_clock)(struct aq_hw_s *self, s64 delta);
+
+ int (*hw_set_sys_clock)(struct aq_hw_s *self, u64 time, u64 ts);
+
+ int (*hw_ts_to_sys_clock)(struct aq_hw_s *self, u64 ts, u64 *time);
+
+ int (*hw_gpio_pulse)(struct aq_hw_s *self, u32 index, u64 start,
+ u32 period);
+
+ int (*hw_extts_gpio_enable)(struct aq_hw_s *self, u32 index,
+ u32 enable);
+
+ int (*hw_get_sync_ts)(struct aq_hw_s *self, u64 *ts);
+
+ u16 (*rx_extract_ts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
+ int (*extract_hwts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
+
+ int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
};
struct aq_fw_ops {
@@ -264,9 +324,19 @@ struct aq_fw_ops {
int (*set_flow_control)(struct aq_hw_s *self);
+ int (*led_control)(struct aq_hw_s *self, u32 mode);
+
+ int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
+
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
u8 *mac);
+ int (*send_fw_request)(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size);
+
+ void (*enable_ptp)(struct aq_hw_s *self, int enable);
+
int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 9c7a226d81b6..7dbf49adcea6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -59,6 +59,7 @@ u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
u64 value = aq_hw_read_reg(hw, reg);
value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
+
return value;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index bb65dd39f847..538f460a3da7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_main.c: Main file for aQuantia Linux driver. */
@@ -10,10 +10,13 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_ptp.h"
#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
@@ -50,8 +53,8 @@ struct net_device *aq_ndev_alloc(void)
static int aq_ndev_open(struct net_device *ndev)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
err = aq_nic_init(aq_nic);
if (err < 0)
@@ -71,19 +74,20 @@ static int aq_ndev_open(struct net_device *ndev)
err_exit:
if (err < 0)
- aq_nic_deinit(aq_nic);
+ aq_nic_deinit(aq_nic, true);
+
return err;
}
static int aq_ndev_close(struct net_device *ndev)
{
- int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
err = aq_nic_stop(aq_nic);
if (err < 0)
goto err_exit;
- aq_nic_deinit(aq_nic);
+ aq_nic_deinit(aq_nic, true);
err_exit:
return err;
@@ -93,13 +97,33 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
+ /* Hardware adds the Timestamp for PTPv2 802.AS1
+ * and PTPv2 IPv4 UDP.
+ * We have to push even general 320 port messages to the ptp
+ * queue explicitly. This is a limitation of current firmware
+ * and hardware PTP design of the chip. Otherwise ptp stream
+ * will fail to sync
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ unlikely((ip_hdr(skb)->version == 4) &&
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ ((udp_hdr(skb)->dest == htons(319)) ||
+ (udp_hdr(skb)->dest == htons(320)))) ||
+ unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
+ return aq_ptp_xmit(aq_nic, skb);
+ }
+
+ skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+ int err;
+
+ err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
goto err_exit;
@@ -112,8 +136,8 @@ err_exit:
static int aq_ndev_set_features(struct net_device *ndev,
netdev_features_t features)
{
- bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
+ bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
bool need_ndev_restart = false;
struct aq_nic_cfg_s *aq_cfg;
@@ -197,6 +221,87 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
(void)aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
+ struct hwtstamp_config *config)
+{
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
+}
+
+static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
+ if (ret_val)
+ return ret_val;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return aq_ndev_hwtstamp_set(aq_nic, ifr);
+
+ case SIOCGHWTSTAMP:
+ return aq_ndev_hwtstamp_get(aq_nic, ifr);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
@@ -234,6 +339,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 137c1de4c6ec..a17a4da7bc15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -12,6 +12,9 @@
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_main.h"
+#include "aq_phy.h"
+#include "aq_ptp.h"
+#include "aq_filters.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -38,10 +41,6 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
- struct aq_rss_parameters *rss_params = &cfg->aq_rss;
- int i = 0;
-
static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
@@ -49,6 +48,11 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
};
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ struct aq_rss_parameters *rss_params;
+ int i = 0;
+
+ rss_params = &cfg->aq_rss;
rss_params->hash_secret_key_size = sizeof(rss_key);
memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
@@ -75,7 +79,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
- cfg->flow_control = AQ_CFG_FC_MODE;
+ cfg->fc.req = AQ_CFG_FC_MODE;
+ cfg->wol = AQ_CFG_WOL_MODES;
cfg->mtu = AQ_CFG_MTU_DEF;
cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
@@ -139,18 +144,27 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (err)
return err;
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ self->aq_nic_cfg.fc.cur = fc;
+
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
- pr_info("%s: link change old %d new %d\n",
- AQ_CFG_DRV_NAME, self->link_status.mbps,
- self->aq_hw->aq_link_status.mbps);
+ netdev_info(self->ndev, "%s: link change old %d new %d\n",
+ AQ_CFG_DRV_NAME, self->link_status.mbps,
+ self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
+ if (self->aq_ptp) {
+ aq_ptp_clock_init(self);
+ aq_ptp_tm_offset_set(self,
+ self->aq_hw->aq_link_status.mbps);
+ aq_ptp_link_change(self);
+ }
+
/* Driver has to update flow control settings on RX block
* on any link event.
* We should query FW whether it negotiated FC.
*/
- if (self->aq_fw_ops->get_flow_control)
- self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
if (self->aq_hw_ops->hw_set_fc)
self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
@@ -169,6 +183,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
+
return 0;
}
@@ -183,6 +198,7 @@ static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
BIT(self->aq_nic_cfg.link_irq_vec));
+
return IRQ_HANDLED;
}
@@ -192,6 +208,8 @@ static void aq_nic_service_task(struct work_struct *work)
service_task);
int err;
+ aq_ptp_service_task(self);
+
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
return;
@@ -211,7 +229,8 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, service_timer);
- mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+ mod_timer(&self->service_timer,
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
aq_ndev_schedule_work(&self->service_task);
}
@@ -290,9 +309,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_SG |
NETIF_F_LRO | NETIF_F_TSO;
+ self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
@@ -312,8 +333,8 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
int aq_nic_init(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
- int err = 0;
unsigned int i = 0U;
+ int err = 0;
self->power_state = AQ_HW_POWER_STATE_D0;
mutex_lock(&self->fwreq_mutex);
@@ -327,10 +348,27 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ err = aq_phy_init(self->aq_hw);
+ }
+
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+ err = aq_ptp_init(self, self->irqvecs - 1);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_init(self);
+ if (err < 0)
+ goto err_exit;
+
netif_carrier_off(self->ndev);
err_exit:
@@ -340,8 +378,8 @@ err_exit:
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
- int err = 0;
unsigned int i = 0U;
+ int err = 0;
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
@@ -361,6 +399,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
+ err = aq_ptp_ring_start(self);
+ if (err < 0)
+ goto err_exit;
+
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -371,6 +413,8 @@ int aq_nic_start(struct aq_nic_s *self)
INIT_WORK(&self->service_task, aq_nic_service_task);
+ aq_nic_set_loopback(self);
+
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
@@ -388,6 +432,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
+ err = aq_ptp_irq_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
if (self->aq_nic_cfg.link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
self->aq_nic_cfg.link_irq_vec);
@@ -420,30 +468,48 @@ err_exit:
return err;
}
-static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
- struct sk_buff *skb,
- struct aq_ring_s *ring)
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring)
{
- unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int frag_count = 0U;
- unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL;
- struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
+ u8 ipver = ip_hdr(skb)->version;
+ struct aq_ring_buff_s *dx_buff;
bool need_context_tag = false;
+ unsigned int frag_count = 0U;
+ unsigned int ret = 0U;
+ unsigned int dx;
+ u8 l4proto = 0;
+
+ if (ipver == 4)
+ l4proto = ip_hdr(skb)->protocol;
+ else if (ipver == 6)
+ l4proto = ipv6_hdr(skb)->nexthdr;
+ dx = ring->sw_tail;
+ dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
if (unlikely(skb_is_gso(skb))) {
dx_buff->mss = skb_shinfo(skb)->gso_size;
- dx_buff->is_gso = 1U;
+ if (l4proto == IPPROTO_TCP) {
+ dx_buff->is_gso_tcp = 1U;
+ dx_buff->len_l4 = tcp_hdrlen(skb);
+ } else if (l4proto == IPPROTO_UDP) {
+ dx_buff->is_gso_udp = 1U;
+ dx_buff->len_l4 = sizeof(struct udphdr);
+ /* UDP GSO Hardware does not replace packet length. */
+ udp_hdr(skb)->len = htons(dx_buff->mss +
+ dx_buff->len_l4);
+ } else {
+ WARN_ONCE(true, "Bad GSO mode");
+ goto exit;
+ }
dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
- dx_buff->len_l3 = ip_hdrlen(skb);
- dx_buff->len_l4 = tcp_hdrlen(skb);
+ dx_buff->len_l3 = skb_network_header_len(skb);
dx_buff->eop_index = 0xffffU;
- dx_buff->is_ipv6 =
- (ip_hdr(skb)->version == 6) ? 1U : 0U;
+ dx_buff->is_ipv6 = (ipver == 6);
need_context_tag = true;
}
@@ -477,24 +543,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
- 1U : 0U;
-
- if (ip_hdr(skb)->version == 4) {
- dx_buff->is_tcp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
- 1U : 0U;
- dx_buff->is_udp_cso =
- (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
- 1U : 0U;
- } else if (ip_hdr(skb)->version == 6) {
- dx_buff->is_tcp_cso =
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
- 1U : 0U;
- dx_buff->is_udp_cso =
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
- 1U : 0U;
- }
+ dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
+ dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
+ dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
}
for (; nr_frags--; ++frag_count) {
@@ -549,7 +600,8 @@ mapping_error:
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
- if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
+ if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
+ !dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
@@ -570,11 +622,11 @@ exit:
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
+ unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
- unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
- unsigned int tc = 0U;
int err = NETDEV_TX_OK;
+ unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
@@ -587,6 +639,11 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
+ if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
/* Above status update may stop the queue. Check this. */
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY;
@@ -667,6 +724,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
if (err < 0)
return err;
}
+
return aq_nic_set_packet_filter(self, packet_filter);
}
@@ -711,10 +769,10 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- unsigned int i = 0U;
- unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
+ unsigned int count = 0U;
+ unsigned int i = 0U;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
@@ -764,8 +822,8 @@ err_exit:;
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
- struct net_device *ndev = self->ndev;
struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+ struct net_device *ndev = self->ndev;
ndev->stats.rx_packets = stats->dma_pkt_rc;
ndev->stats.rx_bytes = stats->dma_oct_rc;
@@ -810,9 +868,12 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->flow_control)
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Asym_Pause);
+ }
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
@@ -846,13 +907,13 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
+ if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
/* Asym is when either RX or TX, but not both */
- if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
- !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
+ if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
@@ -935,6 +996,44 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
return fw_version;
}
+int aq_nic_set_loopback(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_hw_ops->hw_set_loopback ||
+ !self->aq_fw_ops->set_phyloopback)
+ return -ENOTSUPP;
+
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PKT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PKT_SYS)));
+
+ self->aq_hw_ops->hw_set_loopback(self->aq_hw,
+ AQ_HW_LOOPBACK_DMA_NET,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_DMA_NET)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYINT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
+
+ self->aq_fw_ops->set_phyloopback(self->aq_hw,
+ AQ_HW_LOOPBACK_PHYEXT_SYS,
+ !!(cfg->priv_flags &
+ BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
+ mutex_unlock(&self->fwreq_mutex);
+
+ return 0;
+}
+
int aq_nic_stop(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
@@ -953,14 +1052,31 @@ int aq_nic_stop(struct aq_nic_s *self)
else
aq_pci_func_free_irqs(self);
+ aq_ptp_irq_free(self);
+
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
+ aq_ptp_ring_stop(self);
+
return self->aq_hw_ops->hw_stop(self->aq_hw);
}
-void aq_nic_deinit(struct aq_nic_s *self)
+void aq_nic_set_power(struct aq_nic_s *self)
+{
+ if (self->power_state != AQ_HW_POWER_STATE_D0 ||
+ self->aq_hw->aq_nic_cfg->wol)
+ if (likely(self->aq_fw_ops->set_power)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+}
+
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@@ -972,23 +1088,17 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
- if (likely(self->aq_fw_ops->deinit)) {
+ aq_ptp_unregister(self);
+ aq_ptp_ring_deinit(self);
+ aq_ptp_ring_free(self);
+ aq_ptp_free(self);
+
+ if (likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
}
- if (self->power_state != AQ_HW_POWER_STATE_D0 ||
- self->aq_hw->aq_nic_cfg->wol)
- if (likely(self->aq_fw_ops->set_power)) {
- mutex_lock(&self->fwreq_mutex);
- self->aq_fw_ops->set_power(self->aq_hw,
- self->power_state,
- self->ndev->dev_addr);
- mutex_unlock(&self->fwreq_mutex);
- }
-
-
err_exit:;
}
@@ -1009,44 +1119,6 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
-{
- int err = 0;
-
- if (!netif_running(self->ndev)) {
- err = 0;
- goto out;
- }
- rtnl_lock();
- if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
- self->power_state = AQ_HW_POWER_STATE_D3;
- netif_device_detach(self->ndev);
- netif_tx_stop_all_queues(self->ndev);
-
- err = aq_nic_stop(self);
- if (err < 0)
- goto err_exit;
-
- aq_nic_deinit(self);
- } else {
- err = aq_nic_init(self);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_start(self);
- if (err < 0)
- goto err_exit;
-
- netif_device_attach(self->ndev);
- netif_tx_start_all_queues(self->ndev);
- }
-
-err_exit:
- rtnl_unlock();
-out:
- return err;
-}
-
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
@@ -1063,8 +1135,52 @@ void aq_nic_shutdown(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
}
- aq_nic_deinit(self);
+ aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(self);
err_exit:
rtnl_unlock();
}
+
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
+{
+ u8 location = 0xFF;
+ u32 fltr_cnt;
+ u32 n_bit;
+
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
+ self->aq_hw_rx_fltrs.fet_reserved_count;
+ self->aq_hw_rx_fltrs.fet_reserved_count++;
+ break;
+ case aq_rx_filter_l3l4:
+ fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
+ n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
+ location = n_bit;
+ break;
+ default:
+ break;
+ }
+
+ return location;
+}
+
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location)
+{
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ self->aq_hw_rx_fltrs.fet_reserved_count--;
+ break;
+ case aq_rx_filter_l3l4:
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 255b54a6ae07..a752f8bb4b08 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.h: Declaration of common code for NIC. */
@@ -17,6 +17,20 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
+struct aq_ptp_s;
+enum aq_rx_filter_type;
+
+enum aq_fc_mode {
+ AQ_NIC_FC_OFF = 0,
+ AQ_NIC_FC_TX,
+ AQ_NIC_FC_RX,
+ AQ_NIC_FC_FULL,
+};
+
+struct aq_fc_info {
+ enum aq_fc_mode req;
+ enum aq_fc_mode cur;
+};
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
@@ -32,7 +46,7 @@ struct aq_nic_cfg_s {
u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
- u32 flow_control;
+ struct aq_fc_info fc;
u32 link_speed_msk;
u32 wol;
u8 is_vlan_rx_strip;
@@ -44,6 +58,7 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
+ u32 priv_flags;
u8 tcs;
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
@@ -53,11 +68,13 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_STOPPING 0x00000008U
#define AQ_NIC_FLAG_RESETTING 0x00000010U
#define AQ_NIC_FLAG_CLOSING 0x00000020U
+#define AQ_NIC_PTP_DPATH_UP 0x02000000U
#define AQ_NIC_LINK_DOWN 0x04000000U
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
-#define AQ_NIC_WOL_ENABLED BIT(0)
+#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
+ WAKE_PHY)
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
@@ -67,9 +84,10 @@ struct aq_hw_rx_fl2 {
};
struct aq_hw_rx_fl3l4 {
- u8 active_ipv4;
- u8 active_ipv6:2;
+ u8 active_ipv4;
+ u8 active_ipv6:2;
u8 is_ipv6;
+ u8 reserved_count;
};
struct aq_hw_rx_fltrs_s {
@@ -77,10 +95,13 @@ struct aq_hw_rx_fltrs_s {
u16 active_filters;
struct aq_hw_rx_fl2 fl2;
struct aq_hw_rx_fl3l4 fl3l4;
+ /*filter ether type */
+ u8 fet_reserved_count;
};
struct aq_nic_s {
atomic_t flags;
+ u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
@@ -108,6 +129,8 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
+ /* PTP support */
+ struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
@@ -126,12 +149,15 @@ void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
-void aq_nic_deinit(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
+void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
@@ -145,8 +171,10 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
const struct ethtool_link_ksettings *cmd);
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
-int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_set_loopback(struct aq_nic_s *self);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
-
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 74b9f3f1da81..2bb329606794 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_pci_func.c: Definition of PCI functions. */
@@ -185,6 +185,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
+
return AQ_HW_IRQ_LEGACY;
}
@@ -196,12 +197,12 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
- struct aq_nic_s *self;
- int err;
struct net_device *ndev;
resource_size_t mmio_pa;
- u32 bar;
+ struct aq_nic_s *self;
u32 numvecs;
+ u32 bar;
+ int err;
err = pci_enable_device(pdev);
if (err)
@@ -269,6 +270,9 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
+ /* Request IRQ vector for PTP */
+ numvecs += 1;
+
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
@@ -308,6 +312,7 @@ err_ndev:
pci_release_regions(pdev);
err_pci_func:
pci_disable_device(pdev);
+
return err;
}
@@ -344,29 +349,98 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+static int aq_suspend_common(struct device *dev, bool deep)
{
- struct aq_nic_s *self = pci_get_drvdata(pdev);
+ struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
- return aq_nic_change_pm_state(self, &pm_msg);
+ nic->power_state = AQ_HW_POWER_STATE_D3;
+ netif_device_detach(nic->ndev);
+ netif_tx_stop_all_queues(nic->ndev);
+
+ aq_nic_stop(nic);
+
+ if (deep) {
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
+ }
+
+ rtnl_unlock();
+
+ return 0;
}
-static int aq_pci_resume(struct pci_dev *pdev)
+static int atl_resume_common(struct device *dev, bool deep)
{
- struct aq_nic_s *self = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct aq_nic_s *nic;
+ int ret;
+
+ nic = pci_get_drvdata(pdev);
+
+ rtnl_lock();
- return aq_nic_change_pm_state(self, &pm_msg);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (deep) {
+ ret = aq_nic_init(nic);
+ if (ret)
+ goto err_exit;
+ }
+
+ ret = aq_nic_start(nic);
+ if (ret)
+ goto err_exit;
+
+ netif_device_attach(nic->ndev);
+ netif_tx_start_all_queues(nic->ndev);
+
+err_exit:
+ rtnl_unlock();
+
+ return ret;
}
+static int aq_pm_freeze(struct device *dev)
+{
+ return aq_suspend_common(dev, false);
+}
+
+static int aq_pm_suspend_poweroff(struct device *dev)
+{
+ return aq_suspend_common(dev, true);
+}
+
+static int aq_pm_thaw(struct device *dev)
+{
+ return atl_resume_common(dev, false);
+}
+
+static int aq_pm_resume_restore(struct device *dev)
+{
+ return atl_resume_common(dev, true);
+}
+
+static const struct dev_pm_ops aq_pm_ops = {
+ .suspend = aq_pm_suspend_poweroff,
+ .poweroff = aq_pm_suspend_poweroff,
+ .freeze = aq_pm_freeze,
+ .resume = aq_pm_resume_restore,
+ .restore = aq_pm_resume_restore,
+ .thaw = aq_pm_thaw,
+};
+
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
.shutdown = aq_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver.pm = &aq_pm_ops,
+#endif
};
int aq_pci_func_register_driver(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
new file mode 100644
index 000000000000..51ae921e3e1f
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* aQuantia Corporation Network Driver
+ * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+ */
+
+#include "aq_phy.h"
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
+ val, val == 0U, 10U, 100000U);
+
+ if (err < 0)
+ return false;
+
+ return true;
+}
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ /* Send Read command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+ /* Read result. */
+ aq_mdio_busy_wait(aq_hw);
+
+ return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
+}
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
+ HW_ATL_MDIO_WRITE_DATA_SHIFT);
+ /* Send Write command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+}
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+
+ if (err < 0) {
+ err = 0xffff;
+ goto err_exit;
+ }
+
+ err = aq_mdio_read_word(aq_hw, mmd, address);
+
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+
+err_exit:
+ return err;
+}
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+ if (err < 0)
+ return;
+
+ aq_mdio_write_word(aq_hw, mmd, address, data);
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
+{
+ u16 val;
+
+ for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
+ ++aq_hw->phy_id) {
+ /* PMA Standard Device Identifier 2: Address 1.3 */
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (val != 0xffff)
+ return true;
+ }
+
+ return false;
+}
+
+bool aq_phy_init(struct aq_hw_s *aq_hw)
+{
+ u32 dev_id;
+
+ if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
+ if (!aq_phy_init_phy_id(aq_hw))
+ return false;
+
+ /* PMA Standard Device Identifier:
+ * Address 1.2 = MSW,
+ * Address 1.3 = LSW
+ */
+ dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
+ dev_id <<= 16;
+ dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (dev_id == 0xffffffff) {
+ aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
new file mode 100644
index 000000000000..84b72ad04a4a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* aQuantia Corporation Network Driver
+ * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+ */
+
+#ifndef AQ_PHY_H
+#define AQ_PHY_H
+
+#include <linux/mdio.h>
+
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+#define HW_ATL_PHY_ID_MAX 32U
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw);
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr);
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data);
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address);
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data);
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
+
+bool aq_phy_init(struct aq_hw_s *aq_hw);
+
+#endif /* AQ_PHY_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
new file mode 100644
index 000000000000..58e8c641e8b3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Aquantia Corporation Network Driver
+ * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+ */
+
+/* File aq_ptp.c:
+ * Definition of functions for Linux PTP support.
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+
+#include "aq_nic.h"
+#include "aq_ptp.h"
+#include "aq_ring.h"
+#include "aq_phy.h"
+#include "aq_filters.h"
+
+#define AQ_PTP_TX_TIMEOUT (HZ * 10)
+
+#define POLL_SYNC_TIMER_MS 15
+
+enum ptp_speed_offsets {
+ ptp_offset_idx_10 = 0,
+ ptp_offset_idx_100,
+ ptp_offset_idx_1000,
+ ptp_offset_idx_2500,
+ ptp_offset_idx_5000,
+ ptp_offset_idx_10000,
+};
+
+struct ptp_skb_ring {
+ struct sk_buff **buff;
+ spinlock_t lock;
+ unsigned int size;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct ptp_tx_timeout {
+ spinlock_t lock;
+ bool active;
+ unsigned long tx_start;
+};
+
+struct aq_ptp_s {
+ struct aq_nic_s *aq_nic;
+ struct hwtstamp_config hwtstamp_config;
+ spinlock_t ptp_lock;
+ spinlock_t ptp_ring_lock;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+
+ atomic_t offset_egress;
+ atomic_t offset_ingress;
+
+ struct aq_ring_param_s ptp_ring_param;
+
+ struct ptp_tx_timeout ptp_tx_timeout;
+
+ unsigned int idx_vector;
+ struct napi_struct napi;
+
+ struct aq_ring_s ptp_tx;
+ struct aq_ring_s ptp_rx;
+ struct aq_ring_s hwts_rx;
+
+ struct ptp_skb_ring skb_ring;
+
+ struct aq_rx_filter_l3l4 udp_filter;
+ struct aq_rx_filter_l2 eth_type_filter;
+
+ struct delayed_work poll_sync;
+ u32 poll_timeout_ms;
+
+ bool extts_pin_enabled;
+ u64 last_sync1588_ts;
+};
+
+struct ptp_tm_offset {
+ unsigned int mbps;
+ int egress;
+ int ingress;
+};
+
+static struct ptp_tm_offset ptp_offset[6];
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int i, egress, ingress;
+
+ if (!aq_ptp)
+ return;
+
+ egress = 0;
+ ingress = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ if (mbps == ptp_offset[i].mbps) {
+ egress = ptp_offset[i].egress;
+ ingress = ptp_offset[i].ingress;
+ break;
+ }
+ }
+
+ atomic_set(&aq_ptp->offset_egress, egress);
+ atomic_set(&aq_ptp->offset_ingress, ingress);
+}
+
+static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned int next_head = (ring->head + 1) % ring->size;
+
+ if (next_head == ring->tail)
+ return -ENOMEM;
+
+ ring->buff[ring->head] = skb_get(skb);
+ ring->head = next_head;
+
+ return 0;
+}
+
+static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ ret = __aq_ptp_skb_put(ring, skb);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ if (ring->tail == ring->head)
+ return NULL;
+
+ skb = ring->buff[ring->tail];
+ ring->tail = (ring->tail + 1) % ring->size;
+
+ return skb;
+}
+
+static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ skb = __aq_ptp_skb_get(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return skb;
+}
+
+static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ len = (ring->head >= ring->tail) ?
+ ring->head - ring->tail :
+ ring->size - ring->tail + ring->head;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return len;
+}
+
+static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
+{
+ struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_init(&ring->lock);
+
+ ring->buff = buff;
+ ring->size = size;
+ ring->head = 0;
+ ring->tail = 0;
+
+ return 0;
+}
+
+static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ while ((skb = aq_ptp_skb_get(ring)) != NULL)
+ dev_kfree_skb_any(skb);
+}
+
+static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
+{
+ if (ring->buff) {
+ aq_ptp_skb_ring_clean(ring);
+ kfree(ring->buff);
+ ring->buff = NULL;
+ }
+}
+
+static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
+{
+ spin_lock_init(&timeout->lock);
+ timeout->active = false;
+}
+
+static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = true;
+ timeout->tx_start = jiffies;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+}
+
+static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
+{
+ if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = false;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+ }
+}
+
+static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+ bool timeout_flag;
+
+ timeout_flag = false;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ if (timeout->active) {
+ timeout_flag = time_is_before_jiffies(timeout->tx_start +
+ AQ_PTP_TX_TIMEOUT);
+ /* reset active flag if timeout detected */
+ if (timeout_flag)
+ timeout->active = false;
+ }
+ spin_unlock_irqrestore(&timeout->lock, flags);
+
+ if (timeout_flag) {
+ aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
+ netdev_err(aq_ptp->aq_nic->ndev,
+ "PTP Timeout. Clearing Tx Timestamp SKBs\n");
+ }
+}
+
+/* aq_ptp_adjfine
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
+ scaled_ppm_to_ppb(scaled_ppm));
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+/* aq_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+/* aq_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/* aq_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int aq_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns = timespec64_to_ns(ts);
+ u64 now;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
+
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = ns_to_ktime(timestamp);
+}
+
+static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
+ u64 period)
+{
+ if (period)
+ netdev_dbg(aq_nic->ndev,
+ "Enable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+ else
+ netdev_dbg(aq_nic->ndev,
+ "Disable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+
+ /* Notify hardware of request to being sending pulses.
+ * If period is ZERO then pulsen is disabled.
+ */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
+ start, (u32)period);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct ptp_clock_time *t = &rq->perout.period;
+ struct ptp_clock_time *s = &rq->perout.start;
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = rq->perout.index;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ /* we cannot support periods greater
+ * than 4 seconds due to reg limit
+ */
+ if (t->sec > 4 || t->sec < 0)
+ return -ERANGE;
+
+ /* convert to unsigned 64b ns,
+ * verify we can put it in a 32b register
+ */
+ period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
+
+ /* verify the value is in range supported by hardware */
+ if (period > U32_MAX)
+ return -ERANGE;
+ /* convert to unsigned 64b ns */
+ /* TODO convert to AQ time */
+ start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = 0;
+ u32 rest = 0;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
+ div_u64_rem(start, NSEC_PER_SEC, &rest);
+ period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
+ start = on ? start - rest + NSEC_PER_SEC *
+ (rest > 990000000LL ? 2 : 1) : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u32 enable = aq_ptp->extts_pin_enabled;
+
+ if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
+ aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
+ enable);
+}
+
+static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+
+ u32 pin_index = rq->extts.index;
+
+ if (pin_index >= ptp->n_ext_ts)
+ return -EINVAL;
+
+ aq_ptp->extts_pin_enabled = !!on;
+ if (on) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+
+ aq_ptp_extts_pin_ctrl(aq_ptp);
+ return 0;
+}
+
+/* aq_ptp_gpio_feature_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ */
+static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return aq_ptp_extts_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return aq_ptp_perout_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return aq_ptp_pps_pin_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* aq_ptp_verify
+ * @ptp: the ptp clock structure
+ * @pin: index of the pin in question
+ * @func: the desired function to use
+ * @chan: the function channel index to use
+ */
+static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* verify the requested pin is there */
+ if (!ptp->pin_config || pin >= ptp->n_pins)
+ return -EINVAL;
+
+ /* enforce locked channels, no changing them */
+ if (chan != ptp->pin_config[pin].chan)
+ return -EINVAL;
+
+ /* we want to keep the functions locked as well */
+ if (func != ptp->pin_config[pin].func)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: the private adapter struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
+ struct skb_shared_hwtstamps hwtstamp;
+
+ if (!skb) {
+ netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
+ return;
+ }
+
+ timestamp += atomic_read(&aq_ptp->offset_egress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
+ skb_tstamp_tx(skb, &hwtstamp);
+ dev_kfree_skb_any(skb);
+
+ aq_ptp_tx_timeout_update(aq_ptp);
+}
+
+/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
+ u64 timestamp)
+{
+ timestamp -= atomic_read(&aq_ptp->offset_ingress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
+}
+
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ *config = aq_ptp->hwtstamp_config;
+}
+
+static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
+{
+ aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 |
+ HW_ATL_RX_UDP |
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 |
+ aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
+
+ aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
+ aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
+}
+
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ const struct aq_hw_ops *hw_ops;
+ int err = 0;
+
+ hw_ops = aq_nic->aq_hw_ops;
+ if (config->tx_type == HWTSTAMP_TX_ON ||
+ config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
+ aq_ptp_prepare_filters(aq_ptp);
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_set) {
+ err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ } else {
+ aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_clear) {
+ err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ }
+
+ if (err)
+ return -EREMOTEIO;
+
+ aq_ptp->hwtstamp_config = *config;
+
+ return 0;
+}
+
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return false;
+
+ return &aq_ptp->ptp_tx == ring ||
+ &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+}
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ unsigned int len)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ u64 timestamp = 0;
+ u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
+ p, len, &timestamp);
+
+ if (ret > 0)
+ aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
+
+ return ret;
+}
+
+static int aq_ptp_poll(struct napi_struct *napi, int budget)
+{
+ struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ bool was_cleaned = false;
+ int work_done = 0;
+ int err;
+
+ /* Processing PTP TX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+
+ was_cleaned = true;
+ }
+
+ /* Processing HW_TIMESTAMP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
+ aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
+
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ was_cleaned = true;
+ }
+
+ /* Processing PTP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
+ unsigned int sw_tail_old;
+
+ err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = aq_ptp->ptp_rx.sw_tail;
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (was_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
+ BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
+ }
+
+err_exit:
+ return work_done;
+}
+
+static irqreturn_t aq_ptp_isr(int irq, void *private)
+{
+ struct aq_ptp_s *aq_ptp = private;
+ int err = 0;
+
+ if (!aq_ptp) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&aq_ptp->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct aq_ring_s *ring = &aq_ptp->ptp_tx;
+ unsigned long irq_flags;
+ int err = NETDEV_TX_OK;
+ unsigned int frags;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ /* Frags cannot be bigger 16KB
+ * because PTP usually works
+ * without Jumbo even in a background
+ */
+ if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
+ /* Drop packet because it doesn't make sence to delay it */
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
+ if (err) {
+ netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
+ ring->size);
+ return NETDEV_TX_BUSY;
+ }
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ aq_ptp_tx_timeout_start(aq_ptp);
+ skb_tx_timestamp(skb);
+
+ spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+ frags = aq_nic_map_skb(aq_nic, skb, ring);
+
+ if (likely(frags)) {
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
+ ring, frags);
+ if (err >= 0) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
+ err = NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ptp_tx_timeout_check(aq_ptp);
+}
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ struct pci_dev *pdev = aq_nic->pdev;
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (pdev->msix_enabled || pdev->msi_enabled) {
+ err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
+ aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
+ } else {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct pci_dev *pdev = aq_nic->pdev;
+
+ if (!aq_ptp)
+ return;
+
+ free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
+}
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_ring_init(&aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ 0U);
+ if (err < 0)
+ goto err_rx_free;
+
+ err = aq_ring_init(&aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ return err;
+
+err_rx_free:
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+err_exit:
+ return err;
+}
+
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ napi_enable(&aq_ptp->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
+
+ napi_disable(&aq_ptp->napi);
+}
+
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
+ return;
+
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+}
+
+#define PTP_8TC_RING_IDX 8
+#define PTP_4TC_RING_IDX 16
+#define PTP_HWST_RING_IDX 31
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ unsigned int tx_ring_idx, rx_ring_idx;
+ struct aq_ring_s *hwts;
+ u32 tx_tc_mode, rx_tc_mode;
+ struct aq_ring_s *ring;
+ int err;
+
+ if (!aq_ptp)
+ return 0;
+
+ /* Index must to be 8 (8 TCs) or 16 (4 TCs).
+ * It depends from Traffic Class mode.
+ */
+ aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
+ if (tx_tc_mode == 0)
+ tx_ring_idx = PTP_8TC_RING_IDX;
+ else
+ tx_ring_idx = PTP_4TC_RING_IDX;
+
+ ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+ tx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
+ if (rx_tc_mode == 0)
+ rx_ring_idx = PTP_8TC_RING_IDX;
+ else
+ rx_ring_idx = PTP_4TC_RING_IDX;
+
+ ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+ rx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit_ptp_tx;
+ }
+
+ hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+ aq_nic->aq_nic_cfg.rxds,
+ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+ if (!hwts) {
+ err = -ENOMEM;
+ goto err_exit_ptp_rx;
+ }
+
+ err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
+ if (err != 0) {
+ err = -ENOMEM;
+ goto err_exit_hwts_rx;
+ }
+
+ aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
+ aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
+ aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
+ cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
+ &aq_ptp->ptp_ring_param.affinity_mask);
+
+ return 0;
+
+err_exit_hwts_rx:
+ aq_ring_free(&aq_ptp->hwts_rx);
+err_exit_ptp_rx:
+ aq_ring_free(&aq_ptp->ptp_rx);
+err_exit_ptp_tx:
+ aq_ring_free(&aq_ptp->ptp_tx);
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ring_free(&aq_ptp->ptp_tx);
+ aq_ring_free(&aq_ptp->ptp_rx);
+ aq_ring_free(&aq_ptp->hwts_rx);
+
+ aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+}
+
+#define MAX_PTP_GPIO_COUNT 4
+
+static struct ptp_clock_info aq_ptp_clock = {
+ .owner = THIS_MODULE,
+ .name = "atlantic ptp",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfine = aq_ptp_adjfine,
+ .adjtime = aq_ptp_adjtime,
+ .gettime64 = aq_ptp_gettime,
+ .settime64 = aq_ptp_settime,
+ .n_per_out = 0,
+ .enable = aq_ptp_gpio_feature_enable,
+ .n_pins = 0,
+ .verify = aq_ptp_verify,
+ .pin_config = NULL,
+};
+
+#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
+ ptp_offset[__idx].mbps = (__mbps); \
+ ptp_offset[__idx].egress = (__egress); \
+ ptp_offset[__idx].ingress = (__ingress); } \
+ while (0)
+
+static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
+{
+ int i;
+
+ /* Load offsets for PTP */
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ switch (i) {
+ /* 100M */
+ case ptp_offset_idx_100:
+ ptp_offset_init(i, 100,
+ offsets->egress_100,
+ offsets->ingress_100);
+ break;
+ /* 1G */
+ case ptp_offset_idx_1000:
+ ptp_offset_init(i, 1000,
+ offsets->egress_1000,
+ offsets->ingress_1000);
+ break;
+ /* 2.5G */
+ case ptp_offset_idx_2500:
+ ptp_offset_init(i, 2500,
+ offsets->egress_2500,
+ offsets->ingress_2500);
+ break;
+ /* 5G */
+ case ptp_offset_idx_5000:
+ ptp_offset_init(i, 5000,
+ offsets->egress_5000,
+ offsets->ingress_5000);
+ break;
+ /* 10G */
+ case ptp_offset_idx_10000:
+ ptp_offset_init(i, 10000,
+ offsets->egress_10000,
+ offsets->ingress_10000);
+ break;
+ }
+ }
+}
+
+static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
+{
+ memset(ptp_offset, 0, sizeof(ptp_offset));
+
+ aq_ptp_offset_init_from_fw(offsets);
+}
+
+static void aq_ptp_gpio_init(struct ptp_clock_info *info,
+ struct hw_atl_info *hw_info)
+{
+ struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
+ u32 extts_pin_cnt = 0;
+ u32 out_pin_cnt = 0;
+ u32 i;
+
+ memset(pin_desc, 0, sizeof(pin_desc));
+
+ for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
+ if (hw_info->gpio_pin[i] ==
+ (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", i);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = out_pin_cnt;
+ pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
+ }
+ }
+
+ info->n_per_out = out_pin_cnt;
+
+ if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
+ extts_pin_cnt += 1;
+
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", out_pin_cnt);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = 0;
+ pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
+ }
+
+ info->n_pins = out_pin_cnt + extts_pin_cnt;
+ info->n_ext_ts = extts_pin_cnt;
+
+ if (!info->n_pins)
+ return;
+
+ info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
+ GFP_KERNEL);
+
+ if (!info->pin_config)
+ return;
+
+ memcpy(info->pin_config, &pin_desc,
+ sizeof(struct ptp_pin_desc) * info->n_pins);
+}
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ aq_ptp_settime(&aq_ptp->ptp_info, &ts);
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
+
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ struct hw_atl_utils_mbox mbox;
+ struct ptp_clock *clock;
+ struct aq_ptp_s *aq_ptp;
+ int err = 0;
+
+ if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ if (!aq_nic->aq_fw_ops->enable_ptp) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
+
+ if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ aq_ptp_offset_init(&mbox.info.ptp_offset);
+
+ aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
+ if (!aq_ptp) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_ptp->aq_nic = aq_nic;
+
+ spin_lock_init(&aq_ptp->ptp_lock);
+ spin_lock_init(&aq_ptp->ptp_ring_lock);
+
+ aq_ptp->ptp_info = aq_ptp_clock;
+ aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
+ clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
+ if (IS_ERR(clock)) {
+ netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
+ err = PTR_ERR(clock);
+ goto err_exit;
+ }
+ aq_ptp->ptp_clock = clock;
+ aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
+
+ atomic_set(&aq_ptp->offset_egress, 0);
+ atomic_set(&aq_ptp->offset_ingress, 0);
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
+ aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+
+ aq_ptp->idx_vector = idx_vec;
+
+ aq_nic->aq_ptp = aq_ptp;
+
+ /* enable ptp counter */
+ aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
+ aq_ptp_clock_init(aq_nic);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
+ aq_ptp->eth_type_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
+ aq_ptp->udp_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
+
+ return 0;
+
+err_exit:
+ if (aq_ptp)
+ kfree(aq_ptp->ptp_info.pin_config);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+ return err;
+}
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ ptp_clock_unregister(aq_ptp->ptp_clock);
+}
+
+void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
+ aq_ptp->eth_type_filter.location);
+ aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
+ aq_ptp->udp_filter.location);
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ /* disable ptp */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ kfree(aq_ptp->ptp_info.pin_config);
+
+ netif_napi_del(&aq_ptp->napi);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+}
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return aq_ptp->ptp_clock;
+}
+
+/* PTP external GPIO nanoseconds count */
+static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
+{
+ u64 ts = 0;
+
+ if (aq_nic->aq_hw_ops->hw_get_sync_ts)
+ aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
+
+ return ts;
+}
+
+static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
+{
+ if (aq_ptp->extts_pin_enabled) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ aq_ptp->last_sync1588_ts =
+ aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+}
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (aq_nic->aq_hw->aq_link_status.mbps)
+ aq_ptp_start_work(aq_ptp);
+ else
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+
+ return 0;
+}
+
+static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts2;
+ u64 sync_ts;
+
+ sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
+
+ if (sync_ts != aq_ptp->last_sync1588_ts) {
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ sync_ts = sync_ts2;
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ netdev_err(aq_nic->ndev,
+ "%s: Unable to get correct GPIO TS",
+ __func__);
+ sync_ts = 0;
+ }
+ }
+
+ *new_ts = sync_ts;
+ return true;
+ }
+ return false;
+}
+
+static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts;
+
+ /* Sync1588 pin was triggered */
+ if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
+ if (aq_ptp->extts_pin_enabled) {
+ struct ptp_clock_event ptp_event;
+ u64 time = 0;
+
+ aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
+ sync_ts, &time);
+ ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
+ ptp_event.timestamp = time;
+
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
+ }
+
+ aq_ptp->last_sync1588_ts = sync_ts;
+ }
+
+ return 0;
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
+{
+ struct delayed_work *dw = to_delayed_work(w);
+ struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
+
+ aq_ptp_check_sync1588(aq_ptp);
+
+ if (aq_ptp->extts_pin_enabled) {
+ unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
+
+ schedule_delayed_work(&aq_ptp->poll_sync, timeout);
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
new file mode 100644
index 000000000000..231906431a48
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Aquantia Corporation Network Driver
+ * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+ */
+
+/* File aq_ptp.h: Declaration of PTP functions.
+ */
+#ifndef AQ_PTP_H
+#define AQ_PTP_H
+
+#include <linux/net_tstamp.h>
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/* Common functions */
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec);
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic);
+void aq_ptp_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic);
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic);
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic);
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps);
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic);
+
+/* Traffic processing functions */
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb);
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
+
+/* Must be to check available of PTP before call */
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+
+/* Return either ring is belong to PTP or not*/
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ unsigned int len);
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic);
+
+#else
+
+static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ return 0;
+}
+
+static inline void aq_ptp_unregister(struct aq_nic_s *aq_nic) {}
+
+static inline void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_free(struct aq_nic_s *aq_nic) {}
+
+static inline int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_stop(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_service_task(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic,
+ unsigned int mbps) {}
+static inline void aq_ptp_clock_init(struct aq_nic_s *aq_nic) {}
+static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
+static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config) {}
+static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ return 0;
+}
+
+static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ return false;
+}
+
+static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+ struct sk_buff *skb, u8 *p,
+ unsigned int len)
+{
+ return 0;
+}
+
+static inline struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return NULL;
+}
+
+static inline int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+#endif
+
+#endif /* AQ_PTP_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 76bdbe1596d6..951d86f8b66e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
@@ -10,6 +10,7 @@
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
+#include "aq_ptp.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -29,8 +30,8 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
struct device *dev)
{
struct page *page;
- dma_addr_t daddr;
int ret = -ENOMEM;
+ dma_addr_t daddr;
page = dev_alloc_pages(order);
if (unlikely(!page))
@@ -117,6 +118,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -143,6 +145,7 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
return self;
}
@@ -174,6 +177,31 @@ err_exit:
aq_ring_free(self);
self = NULL;
}
+
+ return self;
+}
+
+struct aq_ring_s *
+aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, unsigned int size, unsigned int dx_size)
+{
+ struct device *dev = aq_nic_get_dev(aq_nic);
+ size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
+
+ memset(self, 0, sizeof(*self));
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = size;
+ self->dx_size = dx_size;
+
+ self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
+ GFP_KERNEL);
+ if (!self->dx_ring) {
+ aq_ring_free(self);
+ return NULL;
+ }
+
return self;
}
@@ -182,6 +210,7 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
+
return 0;
}
@@ -290,6 +319,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
@@ -354,6 +384,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
skb_put(skb, buff->len);
page_ref_inc(buff->rxdata.page);
} else {
@@ -362,6 +397,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
hdr_len = buff->len;
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
@@ -421,8 +461,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
-
- skb_record_rx_queue(skb, self->idx);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
@@ -434,6 +474,21 @@ err_exit:
return err;
}
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
+{
+ while (self->sw_head != self->hw_head) {
+ u64 ns;
+
+ aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
+ self->dx_ring +
+ (self->sw_head * self->dx_size),
+ self->dx_size, &ns);
+ aq_ptp_tx_hwtstamp(aq_nic, ns);
+
+ self->sw_head = aq_ring_next_dx(self, self->sw_head);
+ }
+}
+
int aq_ring_rx_fill(struct aq_ring_s *self)
{
unsigned int page_order = self->page_order;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 47abd09d06c2..991e4d31b094 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
@@ -65,19 +65,20 @@ struct __packed aq_ring_buff_s {
};
union {
struct {
- u16 len;
+ u32 len:16;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
- u32 is_gso:1;
+ u32 is_gso_tcp:1;
+ u32 is_gso_udp:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
u32 is_vlan:1;
- u32 rsvd3:5;
+ u32 rsvd3:4;
u16 eop_index;
u16 rsvd4;
};
@@ -174,4 +175,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
+struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+
#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index a95c263a45aa..f40a427970dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -103,8 +103,8 @@ err_exit:
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
- struct aq_vec_s *self = NULL;
struct aq_ring_s *ring = NULL;
+ struct aq_vec_s *self = NULL;
unsigned int i = 0U;
int err = 0;
@@ -159,6 +159,7 @@ err_exit:
aq_vec_free(self);
self = NULL;
}
+
return self;
}
@@ -263,6 +264,7 @@ void aq_vec_deinit(struct aq_vec_s *self)
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
+
err_exit:;
}
@@ -361,9 +363,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
{
- unsigned int count = 0U;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
+ unsigned int count = 0U;
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 359a4d387185..9b1062b8af64 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -119,10 +119,10 @@ err_exit:
static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
- unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
+ unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -155,7 +155,7 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req);
buff_size = HW_ATL_A0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -180,9 +180,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
- int err = 0;
- unsigned int i = 0U;
unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@@ -207,12 +207,12 @@ err_exit:
static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- u8 *indirection_table = rss_params->indirection_table;
- u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
- int err = 0;
+ u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -321,9 +321,9 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
- int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
+ int err = 0;
if (!mac_addr) {
err = -EINVAL;
@@ -352,10 +352,9 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
-
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
- struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
@@ -404,6 +403,7 @@ static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -411,6 +411,7 @@ static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -418,6 +419,7 @@ static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -425,6 +427,7 @@ static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
return 0;
}
@@ -435,8 +438,8 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
- unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
@@ -451,7 +454,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_gso) {
+ if (buff->is_gso_tcp) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
@@ -500,6 +503,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+
return aq_hw_err_from_flags(self);
}
@@ -507,8 +511,8 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -549,8 +553,8 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@@ -599,8 +603,8 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- int err = 0;
unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+ int err = 0;
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@@ -720,6 +724,7 @@ static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
+
return aq_hw_err_from_flags(self);
}
@@ -737,6 +742,7 @@ static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
+
return aq_hw_err_from_flags(self);
}
@@ -859,6 +865,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
{
hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+
return aq_hw_err_from_flags(self);
}
@@ -866,6 +873,7 @@ static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -873,6 +881,7 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 2ad3fa6316ce..58e891af6e09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -10,6 +10,7 @@
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "../aq_nic.h"
+#include "../aq_phy.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
@@ -42,13 +43,17 @@
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
- NETIF_F_HW_VLAN_CTAG_TX, \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
.mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
+#define FRAC_PER_NS 0x100000000LL
+
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
@@ -104,14 +109,15 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
{
hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+
return 0;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
- u32 tc = 0U;
- u32 buff_size = 0U;
unsigned int i_priority = 0U;
+ u32 buff_size = 0U;
+ u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -124,13 +130,16 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ tc = 0;
+
+ /* TX Packet Scheduler Data TC0 */
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
- /* Tx buf size */
- buff_size = HW_ATL_B0_TXBUF_MAX;
+ /* Tx buf size TC0 */
+ buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
@@ -141,10 +150,15 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
+ /* Init TC2 for PTP_TX */
+ tc = 2;
+
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
+ tc);
/* QoS Rx buf size per TC */
tc = 0;
- buff_size = HW_ATL_B0_RXBUF_MAX;
+ buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
@@ -156,7 +170,15 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+
+ /* Init TC2 for PTP_RX */
+ tc = 2;
+
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
+ tc);
+ /* No flow control for PTP */
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -169,9 +191,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
- int err = 0;
- unsigned int i = 0U;
unsigned int addr = 0U;
+ unsigned int i = 0U;
+ int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@@ -196,12 +218,12 @@ err_exit:
static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- u8 *indirection_table = rss_params->indirection_table;
- u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
- int err = 0;
+ u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+ int err = 0;
+ u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -285,6 +307,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_itr_rsc_delay_set(self, 1U);
}
+
return aq_hw_err_from_flags(self);
}
@@ -363,9 +386,9 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
- int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
+ int err = 0;
if (!mac_addr) {
err = -EINVAL;
@@ -394,11 +417,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
-
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
u32 val;
- struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
@@ -441,8 +463,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
/* Interrupts */
hw_atl_reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+ ((HW_ATL_B0_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL_B0_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
/* Enable link interrupt */
if (aq_nic_cfg->link_irq_vec)
@@ -459,6 +483,7 @@ static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -466,6 +491,7 @@ static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -473,6 +499,7 @@ static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -480,6 +507,7 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
return 0;
}
@@ -490,8 +518,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
- unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ unsigned int pkt_len = 0U;
bool is_vlan = false;
bool is_gso = false;
@@ -507,8 +535,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_gso) {
- txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
+ if (buff->is_gso_tcp || buff->is_gso_udp) {
+ if (buff->is_gso_tcp)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24);
@@ -528,7 +557,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
txd->ctl |= buff->vlan_tx_tag << 4;
is_vlan = true;
}
- if (!buff->is_gso && !buff->is_vlan) {
+ if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
@@ -567,6 +596,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+
return aq_hw_err_from_flags(self);
}
@@ -574,9 +604,9 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -617,8 +647,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
- u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@@ -664,11 +694,53 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ unsigned int i;
+
+ for (i = aq_ring_avail_dx(ring); i--;
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)
+ &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
+
+ rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
+ rxd->hdr_addr = 0U;
+ }
+ /* Make sure descriptors are updated before bump tail*/
+ wmb();
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ while (ring->hw_head != ring->sw_tail) {
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb =
+ (struct hw_atl_rxd_hwts_wb_s *)
+ (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
+
+ /* RxD is not done */
+ if (!(hwts_wb->sec_lw0 & 0x1U))
+ break;
+
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
+ unsigned int hw_head_;
int err = 0;
- unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@@ -784,6 +856,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
+
return aq_hw_err_from_flags(self);
}
@@ -793,12 +866,14 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&self->dpc);
+
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
+
return aq_hw_err_from_flags(self);
}
@@ -807,8 +882,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
- unsigned int i = 0U;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int i = 0U;
hw_atl_rpfl2promiscuous_mode_en_set(self,
IS_FILTER_ENABLED(IFF_PROMISC));
@@ -846,29 +921,30 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 count)
{
int err = 0;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
err = -EBADRQC;
goto err_exit;
}
- for (self->aq_nic_cfg->mc_list_count = 0U;
- self->aq_nic_cfg->mc_list_count < count;
- ++self->aq_nic_cfg->mc_list_count) {
- u32 i = self->aq_nic_cfg->mc_list_count;
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- hw_atl_rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL_B0_MAC_MIN + i);
- hw_atl_rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ (cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -995,6 +1071,7 @@ static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
@@ -1002,9 +1079,231 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
+
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
+{
+ *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
+{
+ *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define get_ptp_ts_val_u64(self, indx) \
+ ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
+
+static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
+{
+ u64 ns;
+
+ hw_atl_pcs_ptp_clock_read_enable(self, 1);
+ hw_atl_pcs_ptp_clock_read_enable(self, 0);
+ ns = (get_ptp_ts_val_u64(self, 0) +
+ (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
+ (get_ptp_ts_val_u64(self, 3) +
+ (get_ptp_ts_val_u64(self, 4) << 16));
+
+ *stamp = ns + self->ptp_clk_offset;
+}
+
+static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
+{
+ /* For accuracy, the digit is extended */
+ s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
+ u64 nsi_frac = 0;
+ u64 nsi;
+
+ base_ns = div64_s64(base_ns, freq);
+ nsi = div64_u64(base_ns, NSEC_PER_SEC);
+
+ if (base_ns != nsi * NSEC_PER_SEC) {
+ s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
+ base_ns - nsi * NSEC_PER_SEC);
+ nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
+ }
+
+ *ns = (u32)nsi;
+ *fns = (u32)nsi_frac;
+}
+
+static void
+hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
+ u64 phyfreq, u64 macfreq)
+{
+ s64 adj_fns_val;
+ s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
+ FRAC_PER_NS * ptp_adj_freq->ns_phy);
+ s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
+ FRAC_PER_NS * ptp_adj_freq->ns_mac);
+ s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
+ s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
+ /* MAC MCP counter freq is macfreq / 4 */
+ s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
+ 4 * FRAC_PER_NS;
+
+ diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
+ AQ_HW_MAC_COUNTER_HZ);
+ adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
+ ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
+
+ ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
+ ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
+ FRAC_PER_NS;
+}
+
+static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
+{
+ self->ptp_clk_offset += delta;
+
+ return 0;
+}
+
+static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
+{
+ s64 delta = time - (self->ptp_clk_offset + ts);
+
+ return hw_atl_b0_adj_sys_clock(self, delta);
+}
+
+static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
+{
+ *time = self->ptp_clk_offset + ts;
+ return 0;
+}
+
+static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
+ hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_mac,
+ &fwreq.ptp_adj_freq.fns_mac);
+ hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_phy,
+ &fwreq.ptp_adj_freq.fns_phy);
+ hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
+ AQ_HW_PHY_COUNTER_HZ,
+ AQ_HW_MAC_COUNTER_HZ);
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
+ u64 start, u32 period)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
+ fwreq.ptp_gpio_ctrl.index = index;
+ fwreq.ptp_gpio_ctrl.period = period;
+ /* Apply time offset */
+ fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
+ u32 enable)
+{
+ /* Enable/disable Sync1588 GPIO Timestamping */
+ aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
+
+ return 0;
+}
+
+static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
+{
+ u64 sec_l;
+ u64 sec_h;
+ u64 nsec_l;
+ u64 nsec_h;
+
+ if (!ts)
+ return -1;
+
+ /* PTP external GPIO clock seconds count 15:0 */
+ sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
+ /* PTP external GPIO clock seconds count 31:16 */
+ sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
+ /* PTP external GPIO clock nanoseconds count 15:0 */
+ nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
+ /* PTP external GPIO clock nanoseconds count 31:16 */
+ nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
+
+ *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
+
+ return 0;
+}
+
+static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
+ unsigned int len, u64 *timestamp)
+{
+ unsigned int offset = 14;
+ struct ethhdr *eth;
+ __be64 sec;
+ __be32 ns;
+ u8 *ptr;
+
+ if (len <= offset || !timestamp)
+ return 0;
+
+ /* The TIMESTAMP in the end of package has following format:
+ * (big-endian)
+ * struct {
+ * uint64_t sec;
+ * uint32_t ns;
+ * uint16_t stream_id;
+ * };
+ */
+ ptr = p + (len - offset);
+ memcpy(&sec, ptr, sizeof(sec));
+ ptr += sizeof(sec);
+ memcpy(&ns, ptr, sizeof(ns));
+
+ *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
+ be32_to_cpu(ns) + self->ptp_clk_offset;
+
+ eth = (struct ethhdr *)p;
+
+ return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
+}
+
+static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp)
+{
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
+ u64 tmp, sec, ns;
+
+ sec = 0;
+ tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
+ sec += tmp;
+ tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
+ sec += tmp;
+ ns = sec * NSEC_PER_SEC + hwts_wb->ns;
+ if (timestamp)
+ *timestamp = ns + self->ptp_clk_offset;
+ return 0;
+}
+
static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
@@ -1038,7 +1337,8 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
hw_atl_b0_hw_fl3l4_clear(self, data);
- if (data->cmd) {
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
if (!data->is_ipv6) {
hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
location,
@@ -1055,8 +1355,13 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
data->ip_src);
}
}
- hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
- hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ }
+
hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
return aq_hw_err_from_flags(self);
@@ -1141,6 +1446,31 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ switch (mode) {
+ case AQ_HW_LOOPBACK_DMA_SYS:
+ hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_PKT_SYS:
+ hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
+ hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
+ break;
+ case AQ_HW_LOOPBACK_DMA_NET:
+ hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
+ hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
+ hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
+ hw_atl_rpb_dma_net_lbk_set(self, enable);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -1177,6 +1507,27 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_set_offload = hw_atl_b0_hw_offload_set,
- .hw_set_fc = hw_atl_b0_set_fc,
+
+ .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
+ .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
+
+ .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
+ .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
+
+ .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
+ .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
+ .hw_set_sys_clock = hw_atl_b0_set_sys_clock,
+ .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
+ .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
+ .hw_gpio_pulse = hw_atl_b0_gpio_pulse,
+ .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
+ .hw_get_sync_ts = hw_atl_b0_get_sync_ts,
+ .rx_extract_ts = hw_atl_b0_rx_extract_ts,
+ .extract_hwts = hw_atl_b0_extract_hwts,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_loopback = hw_atl_b0_set_loopback,
+ .hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 808d8cd4252a..7ab23a1751d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
@@ -64,8 +64,11 @@
#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
-#define HW_ATL_B0_TXBUF_MAX 160U
-#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_PTP_TXBUF_SIZE 8U
+
+#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_PTP_RXBUF_SIZE 16U
#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 6f340695e6bd..d1f68fc16291 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
@@ -563,6 +563,13 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_NET_LBK_ADR,
+ HW_ATL_RPB_DMA_NET_LBK_MSK,
+ HW_ATL_RPB_DMA_NET_LBK_SHIFT, dma_net_lbk);
+}
+
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode)
{
@@ -572,6 +579,13 @@ void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
rx_traf_class_mode);
}
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
+}
+
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
@@ -636,8 +650,8 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
rx_pkt_buff_size_per_tc);
}
-void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
HW_ATL_RPB_RXBXOFF_EN_MSK,
@@ -1290,6 +1304,13 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT);
+}
+
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
@@ -1327,7 +1348,26 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_
tx_dma_sys_lbk_en);
}
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_NET_LBK_ADR,
+ HW_ATL_TPB_DMA_NET_LBK_MSK,
+ HW_ATL_TPB_DMA_NET_LBK_SHIFT,
+ tx_dma_net_lbk_en);
+}
+
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_CLK_GATE_EN_ADR,
+ HW_ATL_TPB_TX_CLK_GATE_EN_MSK,
+ HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT,
+ tx_clk_gate_en);
+}
+
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+
u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
@@ -1526,6 +1566,20 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
glb_cpu_scratch_scp);
}
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT,
+ ptp_clock_read_enable);
+}
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_PCS_PTP_TS_VAL_ADDR(index));
+}
+
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
@@ -1616,6 +1670,11 @@ u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
}
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO);
+}
+
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
{
return aq_hw_read_reg(aq_hw,
@@ -1631,3 +1690,60 @@ u32 hw_atl_scrpad25_get(struct aq_hw_s *self)
{
return hw_atl_scrpad_get(self, 0x18);
}
+
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1), value);
+}
+
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1));
+}
+
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2), value);
+}
+
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2));
+}
+
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3), value);
+}
+
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3));
+}
+
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4), value);
+}
+
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4));
+}
+
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5), value);
+}
+
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5));
+}
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MDIO_BUSY_ADR,
+ HW_ATL_MDIO_BUSY_MSK,
+ HW_ATL_MDIO_BUSY_SHIFT);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index c3ee278c3747..62992b23c0e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
@@ -288,10 +288,16 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
/* set dma system loopback */
void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+/* set dma network loopback */
+void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk);
+
/* set rx traffic class mode */
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
+/* get rx traffic class mode */
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
+
/* set rx buffer enable */
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
@@ -306,7 +312,8 @@ void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 buffer);
/* set rx flow control mode */
-void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
@@ -320,7 +327,8 @@ void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
-void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc,
u32 buffer);
/* rpf */
@@ -605,6 +613,9 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
+/* get TX Traffic Class Mode */
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
@@ -621,9 +632,18 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
/* set tx dma system loopback enable */
void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+/* set tx dma network loopback enable */
+void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_net_lbk_en);
+
+/* set tx clock gating enable */
+void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_clk_gate_en);
+
/* set tx packet buffer size (per tc) */
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+ u32 tx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set tx path pad insert enable */
void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
@@ -715,6 +735,12 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* set pci register reset disable */
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+/* pcs */
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable);
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index);
+
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
@@ -752,9 +778,44 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest);
+/* set Global MDIO Interface 1 */
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 1 */
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 2 */
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 2 */
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 3 */
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 3 */
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 4 */
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 4 */
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 5 */
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 5 */
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw);
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw);
+
/* get global microprocessor ram semaphore */
u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
+/* get global microprocessor mdio semaphore */
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
+
/* get global microprocessor scratch pad register */
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 35887ad89025..18de2f7b8959 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh_internal.h: Preprocessor definitions
@@ -554,6 +554,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
+/* rx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_rpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_ADR 0x00005000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_RPB_DMA_NET_LBK_DEFAULT 0x0
+
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
* port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
@@ -1308,6 +1326,52 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l3_l4_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_l4_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_l4_en_i[0]"
+ */
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(filter) (0x00005380 + (filter) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
/* RX l4_sp{D}[F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
* Parameter: srcport {D} | stride size 0x4 | range [0, 7]
@@ -2061,6 +2125,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
+/* tx dma_net_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_net_loopback".
+ * port="pif_tpb_dma_net_lbk_i"
+ */
+
+/* register address for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_ADR 0x00007000
+/* bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSK 0x00000010
+/* inverted bitmask for bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_MSKN 0xffffffef
+/* lower bit position of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_SHIFT 4
+/* width of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_WIDTH 1
+/* default value of bitfield dma_net_loopback */
+#define HW_ATL_TPB_DMA_NET_LBK_DEFAULT 0x0
+
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@@ -2098,6 +2180,24 @@
/* default value of bitfield tx_scp_ins_en */
#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
+/* tx tx_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_clk_gate_en".
+ * port="pif_tpb_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSK 0x00000010
+/* inverted bitmask for bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_MSKN 0xffffffef
+/* lower bit position of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT 4
+/* width of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_clk_gate_en */
+#define HW_ATL_TPB_TX_CLK_GATE_EN_DEFAULT 0x1
+
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_tpo_ipv4_chk_en_i"
@@ -2440,6 +2540,22 @@
/* default value of bitfield register write strobe */
#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
+/* register address for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR 0x00004628
+/* bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK 0x00000010
+/* inverted bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSKN 0xFFFFFFEF
+/* lower bit position of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT 4
+/* width of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH 1
+/* default value of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_DEFAULT 0x0
+
+/* register address for ptp counter reading */
+#define HW_ATL_PCS_PTP_TS_VAL_ADDR(index) (0x00004900 + (index) * 0x4)
+
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
* port="pif_glb_res_i"
@@ -2532,50 +2648,121 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
-#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
-#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
-#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
-
-#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
-
-/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
- * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
- * Parameter: location {D} | stride size 0x4 | range [0, 7]
- * PORT="pif_rpf_l3_sa0_i[31:0]"
- */
-
-/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
-#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
-/* Bitmask for bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
-/* Inverted bitmask for bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
-/* Lower bit position of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_SHIFT 0
-/* Width of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_WIDTH 32
-/* Default value of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
-
-/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
- * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
- * Parameter: location {D} | stride size 0x4 | range [0, 7]
- * PORT="pif_rpf_l3_da0_i[31:0]"
- */
-
- /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
-/* Bitmask for bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
-/* Inverted bitmask for bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
-/* Lower bit position of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_SHIFT 0
-/* Width of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_WIDTH 32
-/* Default value of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
-
+/* Preprocessor definitions for Global MDIO Interfaces
+ * Address: 0x00000280 + 0x4 * Number of interface
+ */
+#define HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN 0x00000280u
+
+#define HW_ATL_GLB_MDIO_IFACE_N_ADR(number) \
+ (HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN + (((number) - 1) * 0x4))
+
+/* MIF MDIO Busy Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Busy".
+ * PORT="mdio_pif_busy_o"
+ */
+
+/* Register address for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_ADR 0x00000284
+/* Bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSK 0x80000000
+/* Inverted bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_SHIFT 31
+/* Width of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_WIDTH 1
+
+/* MIF MDIO Execute Operation Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Execute Operation".
+ * PORT="pif_mdio_op_start_i"
+ */
+
+/* Register address for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_ADR 0x00000284
+/* Bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSK 0x00008000
+/* Inverted bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSKN 0xFFFF7FFF
+/* Lower bit position of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_SHIFT 15
+/* Width of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_WIDTH 1
+/* Default value of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_DEFAULT 0x0
+
+/* MIF Op Mode [1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "Op Mode [1:0]".
+ * PORT="pif_mdio_mode_i[1:0]"
+ */
+
+/* Register address for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_ADR 0x00000284
+/* Bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSK 0x00003000
+/* Inverted bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSKN 0xFFFFCFFF
+/* Lower bit position of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_SHIFT 12
+/* Width of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_WIDTH 2
+/* Default value of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_DEFAULT 0x0
+
+/* MIF PHY address Bitfield Definitions
+ * Preprocessor definitions for the bitfield "PHY address".
+ * PORT="pif_mdio_phy_addr_i[9:0]"
+ */
+
+/* Register address for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_ADR 0x00000284
+/* Bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSK 0x000003FF
+/* Inverted bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSKN 0xFFFFFC00
+/* Lower bit position of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_SHIFT 0
+/* Width of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_WIDTH 10
+/* Default value of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_DEFAULT 0x0
+
+/* MIF MDIO WriteData [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO WriteData [F:0]".
+ * PORT="pif_mdio_wdata_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_ADR 0x00000288
+/* Bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_SHIFT 0
+/* Width of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_WIDTH 16
+/* Default value of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_DEFAULT 0x0
+
+/* MIF MDIO Address [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Address [F:0]".
+ * PORT="pif_mdio_addr_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_ADR 0x0000028C
+/* Bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_SHIFT 0
+/* Width of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_WIDTH 16
+/* Default value of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0
+
+#define HW_ATL_FW_SM_MDIO 0x0U
#define HW_ATL_FW_SM_RAM 0x2U
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 52646855495e..8910b62e67ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -47,6 +47,11 @@
#define FORCE_FLASHLESS 0
+enum mcp_area {
+ MCP_AREA_CONFIG = 0x80000000,
+ MCP_AREA_SETTINGS = 0x20000000,
+};
+
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
@@ -87,6 +92,7 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
}
self->aq_fw_ops = *fw_ops;
err = self->aq_fw_ops->init(self);
+
return err;
}
@@ -237,9 +243,9 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
int hw_atl_utils_soft_reset(struct aq_hw_s *self)
{
- int k;
u32 boot_exit_code = 0;
u32 val;
+ int k;
for (k = 0; k < 1000; ++k) {
u32 flb_status = aq_hw_read_reg(self,
@@ -327,11 +333,75 @@ err_exit:
return err;
}
-static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
- u32 cnt)
+static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt, enum mcp_area area)
{
+ u32 data_offset = 0;
+ u32 offset = addr;
+ int err = 0;
u32 val;
+
+ switch (area) {
+ case MCP_AREA_CONFIG:
+ offset -= self->rpc_addr;
+ break;
+
+ case MCP_AREA_SETTINGS:
+ offset -= self->settings_addr;
+ break;
+ }
+
+ offset = offset / sizeof(u32);
+
+ for (; data_offset < cnt; ++data_offset, ++offset) {
+ aq_hw_write_reg(self, 0x328, p[data_offset]);
+ aq_hw_write_reg(self, 0x32C,
+ (area | (0xFFFF & (offset * 4))));
+ hw_atl_mcp_up_force_intr_set(self, 1);
+ /* 1000 times by 10us = 10ms */
+ err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
+ self, val,
+ (val & 0xF0000000) !=
+ area,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
+ u32 *p, u32 cnt)
+{
+ u32 offset = 0;
int err = 0;
+ u32 val;
+
+ aq_hw_write_reg(self, 0x208, addr);
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x20C, p[offset]);
+ aq_hw_write_reg(self, 0x200, 0xC000);
+
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ (val & 0x100) == 0U,
+ 10U, 10000U);
+
+ if (err < 0)
+ break;
+ }
+
+ return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
+ u32 cnt, enum mcp_area area)
+{
+ int err = 0;
+ u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
val, val == 1U,
@@ -339,54 +409,47 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_B1)) {
- u32 offset = 0;
-
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x328, p[offset]);
- aq_hw_write_reg(self, 0x32C,
- (0x80000000 | (0xFFFF & (offset * 4))));
- hw_atl_mcp_up_force_intr_set(self, 1);
- /* 1000 times by 10us = 10ms */
- err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
- self, val,
- (val & 0xF0000000) !=
- 0x80000000,
- 10U, 10000U);
- }
- } else {
- u32 offset = 0;
-
- aq_hw_write_reg(self, 0x208, a);
+ if (IS_CHIP_FEATURE(REVISION_B1))
+ err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
+ else
+ err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x20C, p[offset]);
- aq_hw_write_reg(self, 0x200, 0xC000);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
- self, val,
- (val & 0x100) == 0,
- 1000U, 10000U);
- }
- }
+ if (err < 0)
+ goto err_exit;
- hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
+ cnt, MCP_AREA_CONFIG);
+}
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt)
+{
+ return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
+ p, cnt, MCP_AREA_SETTINGS);
+}
+
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
- int err = 0;
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
+ int err = 0;
err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
if (err < 0)
goto err_exit;
err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
-EOPNOTSUPP : 0;
+
err_exit:
return err;
}
@@ -431,17 +494,16 @@ struct aq_hw_atl_utils_fw_rpc_tid_s {
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
{
- int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ int err = 0;
if (!IS_CHIP_FEATURE(MIPS)) {
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
- (u32 *)(void *)&self->rpc,
- (rpc_size + sizeof(u32) -
- sizeof(u8)) / sizeof(u32));
+ err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
@@ -456,9 +518,9 @@ err_exit:
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc)
{
- int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+ int err = 0;
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
@@ -562,10 +624,10 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- int err = 0;
- u32 transaction_id = 0;
- struct hw_atl_utils_mbox_header mbox;
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ struct hw_atl_utils_mbox_header mbox;
+ u32 transaction_id = 0;
+ int err = 0;
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -593,20 +655,26 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
val |= state & HW_ATL_MPI_STATE_MSK;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+
err_exit:
return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
- u32 cp0x036C = hw_atl_utils_mpi_get_state(self);
- u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
- if (!link_speed_mask) {
+ mpi_state = hw_atl_utils_mpi_get_state(self);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
+
+ if (!speed) {
link_status->mbps = 0U;
} else {
- switch (link_speed_mask) {
+ switch (speed) {
case HAL_ATLANTIC_RATE_10G:
link_status->mbps = 10000U;
break;
@@ -639,14 +707,15 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u8 *mac)
{
+ u32 mac_addr[2];
+ u32 efuse_addr;
int err = 0;
u32 h = 0U;
u32 l = 0U;
- u32 mac_addr[2];
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
- unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
+ unsigned int rnd = 0;
get_random_bytes(&rnd, sizeof(unsigned int));
@@ -654,11 +723,10 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- err = hw_atl_utils_fw_downld_dwords(self,
- aq_hw_read_reg(self, 0x00000374U) +
- (40U * 4U),
- mac_addr,
- ARRAY_SIZE(mac_addr));
+ efuse_addr = aq_hw_read_reg(self, 0x00000374U);
+
+ err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
+ mac_addr, ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -720,14 +788,15 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
default:
break;
}
+
return ret;
}
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
- u32 chip_features = 0U;
u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
+ u32 chip_features = 0U;
if ((0xFU & mif_rev) == 1U) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
@@ -754,13 +823,14 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
hw_atl_utils_mpi_set_speed(self, 0);
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+
return 0;
}
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_utils_mbox mbox;
struct aq_stats_s *cs = &self->curr_stats;
+ struct hw_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
@@ -837,16 +907,19 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
hw_atl_utils_hw_mac_regs[i]);
+
return 0;
}
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
{
*fw_version = aq_hw_read_reg(self, 0x18U);
+
return 0;
}
-static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
+static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
+ u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -859,22 +932,26 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
memset(prpc, 0, sizeof(*prpc));
if (wol_enabled) {
- rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol);
+ rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
+ sizeof(prpc->msg_wol_add);
+
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
- prpc->msg_wol.priority =
+ prpc->msg_wol_add.priority =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
- prpc->msg_wol.pattern_id =
+ prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
- prpc->msg_wol.wol_packet_type =
+ prpc->msg_wol_add.packet_type =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
- ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac);
+ ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
+ mac);
} else {
- rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id);
+ rpc_size = sizeof(prpc->msg_wol_remove) +
+ offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
- prpc->msg_wol.pattern_id =
+ prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
}
@@ -891,8 +968,8 @@ static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
unsigned int rpc_size = 0U;
int err = 0;
- if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
- err = aq_fw1x_set_wol(self, 1, mac);
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ err = aq_fw1x_set_wake_magic(self, 1, mac);
if (err < 0)
goto err_exit;
@@ -964,4 +1041,7 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_eee_rate = NULL,
.get_eee_rate = NULL,
.set_flow_control = NULL,
+ .send_fw_request = NULL,
+ .enable_ptp = NULL,
+ .led_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 692bed70e104..42f0c5c6ec2d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -41,7 +41,15 @@ struct __packed hw_atl_rxd_wb_s {
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
- u16 vlan;
+ __le16 vlan;
+};
+
+/* Hardware rx HW TIMESTAMP writeback */
+struct __packed hw_atl_rxd_hwts_wb_s {
+ u32 sec_hw;
+ u32 ns;
+ u32 sec_lw0;
+ u32 sec_lw1;
};
struct __packed hw_atl_stats_s {
@@ -62,104 +70,41 @@ struct __packed hw_atl_stats_s {
u32 dpc;
};
-union __packed ip_addr {
- struct {
- u8 addr[16];
- } v6;
- struct {
- u8 padding[12];
- u8 addr[4];
- } v4;
-};
-
-struct __packed hw_atl_utils_fw_rpc {
- u32 msg_id;
-
+struct __packed drv_msg_enable_wakeup {
union {
- struct {
- u32 pong;
- } msg_ping;
+ u32 pattern_mask;
struct {
- u8 mac_addr[6];
- u32 ip_addr_cnt;
+ u32 reason_arp_v4_pkt : 1;
+ u32 reason_ipv4_ping_pkt : 1;
+ u32 reason_ipv6_ns_pkt : 1;
+ u32 reason_ipv6_ping_pkt : 1;
+ u32 reason_link_up : 1;
+ u32 reason_link_down : 1;
+ u32 reason_maximum : 1;
+ };
+ };
- struct {
- union ip_addr addr;
- union ip_addr mask;
- } ip[1];
- } msg_arp;
+ union {
+ u32 offload_mask;
+ };
+};
- struct {
- u32 len;
- u8 packet[1514U];
- } msg_inject;
+struct __packed magic_packet_pattern_s {
+ u8 mac_addr[ETH_ALEN];
+};
- struct {
- u32 priority;
- u32 wol_packet_type;
- u32 pattern_id;
- u32 next_wol_pattern_offset;
-
- union {
- struct {
- u32 flags;
- u8 ipv4_source_address[4];
- u8 ipv4_dest_address[4];
- u16 tcp_source_port_number;
- u16 tcp_dest_port_number;
- } ipv4_tcp_syn_parameters;
-
- struct {
- u32 flags;
- u8 ipv6_source_address[16];
- u8 ipv6_dest_address[16];
- u16 tcp_source_port_number;
- u16 tcp_dest_port_number;
- } ipv6_tcp_syn_parameters;
-
- struct {
- u32 flags;
- } eapol_request_id_message_parameters;
-
- struct {
- u32 flags;
- u32 mask_offset;
- u32 mask_size;
- u32 pattern_offset;
- u32 pattern_size;
- } wol_bit_map_pattern;
-
- struct {
- u8 mac_addr[ETH_ALEN];
- } wol_magic_packet_patter;
- } wol_pattern;
- } msg_wol;
+struct __packed drv_msg_wol_add {
+ u32 priority;
+ u32 packet_type;
+ u32 pattern_id;
+ u32 next_pattern_offset;
- struct {
- union {
- u32 pattern_mask;
-
- struct {
- u32 reason_arp_v4_pkt : 1;
- u32 reason_ipv4_ping_pkt : 1;
- u32 reason_ipv6_ns_pkt : 1;
- u32 reason_ipv6_ping_pkt : 1;
- u32 reason_link_up : 1;
- u32 reason_link_down : 1;
- u32 reason_maximum : 1;
- };
- };
-
- union {
- u32 offload_mask;
- };
- } msg_enable_wakeup;
+ struct magic_packet_pattern_s magic_packet_pattern;
+};
- struct {
- u32 id;
- } msg_del_id;
- };
+struct __packed drv_msg_wol_remove {
+ u32 id;
};
struct __packed hw_atl_utils_mbox_header {
@@ -168,43 +113,89 @@ struct __packed hw_atl_utils_mbox_header {
u32 error;
};
-struct __packed hw_aq_info {
+struct __packed hw_atl_ptp_offset {
+ u16 ingress_100;
+ u16 egress_100;
+ u16 ingress_1000;
+ u16 egress_1000;
+ u16 ingress_2500;
+ u16 egress_2500;
+ u16 ingress_5000;
+ u16 egress_5000;
+ u16 ingress_10000;
+ u16 egress_10000;
+};
+
+struct __packed hw_atl_cable_diag {
+ u8 fault;
+ u8 distance;
+ u8 far_distance;
+ u8 reserved;
+};
+
+enum gpio_pin_function {
+ GPIO_PIN_FUNCTION_NC,
+ GPIO_PIN_FUNCTION_VAUX_ENABLE,
+ GPIO_PIN_FUNCTION_EFUSE_BURN_ENABLE,
+ GPIO_PIN_FUNCTION_SFP_PLUS_DETECT,
+ GPIO_PIN_FUNCTION_TX_DISABLE,
+ GPIO_PIN_FUNCTION_RATE_SEL_0,
+ GPIO_PIN_FUNCTION_RATE_SEL_1,
+ GPIO_PIN_FUNCTION_TX_FAULT,
+ GPIO_PIN_FUNCTION_PTP0,
+ GPIO_PIN_FUNCTION_PTP1,
+ GPIO_PIN_FUNCTION_PTP2,
+ GPIO_PIN_FUNCTION_SIZE
+};
+
+struct __packed hw_atl_info {
u8 reserved[6];
u16 phy_fault_code;
u16 phy_temperature;
u8 cable_len;
u8 reserved1;
- u32 cable_diag_data[4];
- u8 reserved2[32];
+ struct hw_atl_cable_diag cable_diag_data[4];
+ struct hw_atl_ptp_offset ptp_offset;
+ u8 reserved2[12];
u32 caps_lo;
u32 caps_hi;
+ u32 reserved_datapath;
+ u32 reserved3[7];
+ u32 reserved_simpleresp[3];
+ u32 reserved_linkstat[7];
+ u32 reserved_wakes_count;
+ u32 reserved_eee_stat[12];
+ u32 tx_stuck_cnt;
+ u32 setting_address;
+ u32 setting_length;
+ u32 caps_ex;
+ enum gpio_pin_function gpio_pin[3];
+ u32 pcie_aer_dump[18];
+ u16 snr_margin[4];
};
struct __packed hw_atl_utils_mbox {
struct hw_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
- struct hw_aq_info info;
+ struct hw_atl_info info;
};
-/* fw2x */
-typedef u32 fw_offset_t;
-
struct __packed offload_ip_info {
u8 v4_local_addr_count;
u8 v4_addr_count;
u8 v6_local_addr_count;
u8 v6_addr_count;
- fw_offset_t v4_addr;
- fw_offset_t v4_prefix;
- fw_offset_t v6_addr;
- fw_offset_t v6_prefix;
+ u32 v4_addr;
+ u32 v4_prefix;
+ u32 v6_addr;
+ u32 v6_prefix;
};
struct __packed offload_port_info {
u16 udp_port_count;
u16 tcp_port_count;
- fw_offset_t udp_port;
- fw_offset_t tcp_port;
+ u32 udp_port;
+ u32 tcp_port;
};
struct __packed offload_ka_info {
@@ -212,15 +203,15 @@ struct __packed offload_ka_info {
u16 v6_ka_count;
u32 retry_count;
u32 retry_interval;
- fw_offset_t v4_ka;
- fw_offset_t v6_ka;
+ u32 v4_ka;
+ u32 v6_ka;
};
struct __packed offload_rr_info {
u32 rr_count;
u32 rr_buf_len;
- fw_offset_t rr_id_x;
- fw_offset_t rr_buf;
+ u32 rr_id_x;
+ u32 rr_buf;
};
struct __packed offload_info {
@@ -237,9 +228,103 @@ struct __packed offload_info {
u8 buf[0];
};
+struct __packed hw_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ /* fw1x structures */
+ struct drv_msg_wol_add msg_wol_add;
+ struct drv_msg_wol_remove msg_wol_remove;
+ struct drv_msg_enable_wakeup msg_enable_wakeup;
+ /* fw2x structures */
+ struct offload_info fw2x_offloads;
+ };
+};
+
+/* Mailbox FW Request interface */
+struct __packed hw_fw_request_ptp_gpio_ctrl {
+ u32 index;
+ u32 period;
+ u64 start;
+};
+
+struct __packed hw_fw_request_ptp_adj_freq {
+ u32 ns_mac;
+ u32 fns_mac;
+ u32 ns_phy;
+ u32 fns_phy;
+ u32 mac_ns_adj;
+ u32 mac_fns_adj;
+};
+
+struct __packed hw_fw_request_ptp_adj_clock {
+ u32 ns;
+ u32 sec;
+ int sign;
+};
+
+#define HW_AQ_FW_REQUEST_PTP_GPIO_CTRL 0x11
+#define HW_AQ_FW_REQUEST_PTP_ADJ_FREQ 0x12
+#define HW_AQ_FW_REQUEST_PTP_ADJ_CLOCK 0x13
+
+struct __packed hw_fw_request_iface {
+ u32 msg_id;
+ union {
+ /* PTP FW Request */
+ struct hw_fw_request_ptp_gpio_ctrl ptp_gpio_ctrl;
+ struct hw_fw_request_ptp_adj_freq ptp_adj_freq;
+ struct hw_fw_request_ptp_adj_clock ptp_adj_clock;
+ };
+};
+
+struct __packed hw_atl_utils_settings {
+ u32 mtu;
+ u32 downshift_retry_count;
+ u32 link_pause_frame_quanta_100m;
+ u32 link_pause_frame_threshold_100m;
+ u32 link_pause_frame_quanta_1g;
+ u32 link_pause_frame_threshold_1g;
+ u32 link_pause_frame_quanta_2p5g;
+ u32 link_pause_frame_threshold_2p5g;
+ u32 link_pause_frame_quanta_5g;
+ u32 link_pause_frame_threshold_5g;
+ u32 link_pause_frame_quanta_10g;
+ u32 link_pause_frame_threshold_10g;
+ u32 pfc_quanta_class_0;
+ u32 pfc_threshold_class_0;
+ u32 pfc_quanta_class_1;
+ u32 pfc_threshold_class_1;
+ u32 pfc_quanta_class_2;
+ u32 pfc_threshold_class_2;
+ u32 pfc_quanta_class_3;
+ u32 pfc_threshold_class_3;
+ u32 pfc_quanta_class_4;
+ u32 pfc_threshold_class_4;
+ u32 pfc_quanta_class_5;
+ u32 pfc_threshold_class_5;
+ u32 pfc_quanta_class_6;
+ u32 pfc_threshold_class_6;
+ u32 pfc_quanta_class_7;
+ u32 pfc_threshold_class_7;
+ u32 eee_link_down_timeout;
+ u32 eee_link_up_timeout;
+ u32 eee_max_link_drops;
+ u32 eee_rates_mask;
+ u32 wake_timer;
+ u32 thermal_shutdown_off_temp;
+ u32 thermal_shutdown_warning_temp;
+ u32 thermal_shutdown_cold_temp;
+ u32 msm_options;
+ u32 dac_cable_serdes_modes;
+ u32 media_detect;
+};
+
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
+ HW_ATL_RX_MNGMNT,
+ HW_ATL_RX_HOST_AND_MNGMNT,
+ HW_ATL_RX_WOL
};
struct aq_rx_filter_vlan {
@@ -321,20 +406,12 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
-#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U
-#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U
-#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U
#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U
-#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U
-#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U
-#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U
-#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU
-#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU
enum hw_atl_fw2x_rate {
FW2X_RATE_100M = 0x20,
@@ -344,91 +421,135 @@ enum hw_atl_fw2x_rate {
FW2X_RATE_10G = 0x800,
};
+/* 0x370
+ * Link capabilities resolution register
+ */
enum hw_atl_fw2x_caps_lo {
- CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_HD = 0,
CAPS_LO_10BASET_FD,
CAPS_LO_100BASETX_HD,
CAPS_LO_100BASET4_HD,
CAPS_LO_100BASET2_HD,
- CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASETX_FD = 5,
CAPS_LO_100BASET2_FD,
CAPS_LO_1000BASET_HD,
CAPS_LO_1000BASET_FD,
CAPS_LO_2P5GBASET_FD,
- CAPS_LO_5GBASET_FD,
+ CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
};
+/* 0x374
+ * Status register
+ */
enum hw_atl_fw2x_caps_hi {
- CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_RESERVED1 = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
- CAPS_HI_100BASETX_EEE,
+ CAPS_HI_100BASETX_EEE = 5,
CAPS_HI_RESERVED3,
CAPS_HI_RESERVED4,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
- CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
- CAPS_HI_RESERVED5,
+ CAPS_HI_FW_REQUEST,
CAPS_HI_RESERVED6,
CAPS_HI_RESERVED7,
- CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED8 = 15,
CAPS_HI_RESERVED9,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
- CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_PTP_AVB_EN_FW2X = 20,
CAPS_HI_MEDIA_DETECT,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
- CAPS_HI_MAC_STOP,
+ CAPS_HI_MAC_STOP = 25,
CAPS_HI_EXT_LOOPBACK,
CAPS_HI_INT_LOOPBACK,
CAPS_HI_EFUSE_AGENT,
CAPS_HI_WOL_TIMER,
- CAPS_HI_STATISTICS,
+ CAPS_HI_STATISTICS = 30,
CAPS_HI_TRANSACTION_ID,
};
+/* 0x36C
+ * Control register
+ */
enum hw_atl_fw2x_ctrl {
- CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED1 = 0,
CTRL_RESERVED2,
CTRL_RESERVED3,
CTRL_PAUSE,
CTRL_ASYMMETRIC_PAUSE,
- CTRL_RESERVED4,
+ CTRL_RESERVED4 = 5,
CTRL_RESERVED5,
CTRL_RESERVED6,
CTRL_1GBASET_FD_EEE,
CTRL_2P5GBASET_FD_EEE,
- CTRL_5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE = 10,
CTRL_10GBASET_FD_EEE,
CTRL_THERMAL_SHUTDOWN,
CTRL_PHY_LOGS,
CTRL_EEE_AUTO_DISABLE,
- CTRL_PFC,
+ CTRL_PFC = 15,
CTRL_WAKE_ON_LINK,
CTRL_CABLE_DIAG,
CTRL_TEMPERATURE,
CTRL_DOWNSHIFT,
- CTRL_PTP_AVB,
+ CTRL_PTP_AVB = 20,
CTRL_RESERVED7,
CTRL_LINK_DROP,
CTRL_SLEEP_PROXY,
CTRL_WOL,
- CTRL_MAC_STOP,
+ CTRL_MAC_STOP = 25,
CTRL_EXT_LOOPBACK,
CTRL_INT_LOOPBACK,
CTRL_RESERVED8,
CTRL_WOL_TIMER,
- CTRL_STATISTICS,
+ CTRL_STATISTICS = 30,
CTRL_FORCE_RECONNECT,
};
+enum hw_atl_caps_ex {
+ CAPS_EX_LED_CONTROL = 0,
+ CAPS_EX_LED0_MODE_LO,
+ CAPS_EX_LED0_MODE_HI,
+ CAPS_EX_LED1_MODE_LO,
+ CAPS_EX_LED1_MODE_HI,
+ CAPS_EX_LED2_MODE_LO = 5,
+ CAPS_EX_LED2_MODE_HI,
+ CAPS_EX_RESERVED07,
+ CAPS_EX_RESERVED08,
+ CAPS_EX_RESERVED09,
+ CAPS_EX_RESERVED10 = 10,
+ CAPS_EX_RESERVED11,
+ CAPS_EX_RESERVED12,
+ CAPS_EX_RESERVED13,
+ CAPS_EX_RESERVED14,
+ CAPS_EX_RESERVED15 = 15,
+ CAPS_EX_PHY_PTP_EN,
+ CAPS_EX_MAC_PTP_EN,
+ CAPS_EX_EXT_CLK_EN,
+ CAPS_EX_SCHED_DMA_EN,
+ CAPS_EX_PTP_GPIO_EN = 20,
+ CAPS_EX_UPDATE_SETTINGS,
+ CAPS_EX_PHY_CTRL_TS_PIN,
+ CAPS_EX_SNR_OPERATING_MARGIN,
+ CAPS_EX_RESERVED24,
+ CAPS_EX_RESERVED25 = 25,
+ CAPS_EX_RESERVED26,
+ CAPS_EX_RESERVED27,
+ CAPS_EX_RESERVED28,
+ CAPS_EX_RESERVED29,
+ CAPS_EX_RESERVED30 = 30,
+ CAPS_EX_RESERVED31
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
@@ -475,6 +596,11 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
+int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt);
+
+int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
+ u32 cnt);
+
int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 7bc51f8d6f2f..97ebf849695f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -17,26 +17,34 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
-#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
+#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c
+#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
-#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
-#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
-#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
-#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
-#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
-#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+#define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
+#define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
+#define HW_ATL_FW2X_CTRL_WAKE_ON_LINK BIT(CTRL_WAKE_ON_LINK)
#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY)
#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
+#define HW_ATL_FW2X_CTRL_INT_LOOPBACK BIT(CTRL_INT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_EXT_LOOPBACK BIT(CTRL_EXT_LOOPBACK)
+#define HW_ATL_FW2X_CTRL_DOWNSHIFT BIT(CTRL_DOWNSHIFT)
#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
@@ -47,6 +55,9 @@
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
+#define HW_ATL_FW_VER_LED 0x03010026U
+#define HW_ATL_FW_VER_MEDIA_CONTROL 0x0301005aU
+
struct __packed fw2x_msg_wol_pattern {
u8 mask[16];
u32 crc;
@@ -71,6 +82,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
@@ -88,6 +100,8 @@ static int aq_fw2x_init(struct aq_hw_s *self)
self->rpc_addr != 0U,
1000U, 100000U);
+ err = aq_fw2x_settings_get(self, &self->settings_addr);
+
return err;
}
@@ -167,17 +181,26 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
-static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+static void aq_fw2x_upd_flow_control_bits(struct aq_hw_s *self,
+ u32 *mpi_state, u32 fc)
{
- if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
- *mpi_state |= BIT(CAPS_HI_PAUSE);
- else
- *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+ *mpi_state &= ~(HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE);
- if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
- *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
- else
- *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ switch (fc) {
+ /* There is not explicit mode of RX only pause frames,
+ * thus, we join this mode with FC full.
+ * FC full is either Rx, either Tx, or both.
+ */
+ case AQ_NIC_FC_FULL:
+ case AQ_NIC_FC_RX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_PAUSE |
+ HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ case AQ_NIC_FC_TX:
+ *mpi_state |= HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
+ break;
+ }
}
static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts,
@@ -201,7 +224,8 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
case MPI_INIT:
mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds);
- aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
break;
case MPI_DEINIT:
mpi_state |= BIT(CAPS_HI_LINK_DROP);
@@ -212,15 +236,20 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
break;
}
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
return 0;
}
static int aq_fw2x_update_link_status(struct aq_hw_s *self)
{
- u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
- u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+ u32 mpi_state;
+ u32 speed;
+
+ mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G |
+ FW2X_RATE_10G);
if (speed) {
if (speed & FW2X_RATE_10G)
@@ -244,11 +273,11 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
{
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+ u32 mac_addr[2] = { 0 };
int err = 0;
u32 h = 0U;
u32 l = 0U;
- u32 mac_addr[2] = { 0 };
- u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -282,15 +311,16 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
+
return err;
}
static int aq_fw2x_update_stats(struct aq_hw_s *self)
{
- int err = 0;
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
u32 stats_val;
+ int err = 0;
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@@ -317,9 +347,9 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
int err = 0;
u32 val;
- phy_temp_offset = self->mbox_addr +
- offsetof(struct hw_atl_utils_mbox, info) +
- offsetof(struct hw_aq_info, phy_temperature);
+ phy_temp_offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.phy_temperature);
+
/* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
@@ -342,106 +372,117 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
- struct offload_info *cfg = NULL;
- unsigned int rpc_size = 0U;
- u32 mpi_opts;
+ struct offload_info *info = NULL;
+ u32 wol_bits = 0;
+ u32 rpc_size;
int err = 0;
u32 val;
- rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
-
- err = hw_atl_utils_fw_rpc_wait(self, &rpc);
- if (err < 0)
- goto err_exit;
-
- memset(rpc, 0, rpc_size);
- cfg = (struct offload_info *)(&rpc->msg_id + 1);
+ if (self->aq_nic_cfg->wol & WAKE_PHY) {
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR,
+ HW_ATL_FW2X_CTRL_LINK_DROP);
+ readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ (val &
+ HW_ATL_FW2X_CTRL_LINK_DROP) != 0,
+ 1000, 100000);
+ wol_bits |= HW_ATL_FW2X_CTRL_WAKE_ON_LINK;
+ }
- memcpy(cfg->mac_addr, mac, ETH_ALEN);
- cfg->len = sizeof(*cfg);
+ if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
+ wol_bits |= HW_ATL_FW2X_CTRL_SLEEP_PROXY |
+ HW_ATL_FW2X_CTRL_WOL;
- /* Clear bit 0x36C.23 and 0x36C.22 */
- mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY;
- mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP;
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ rpc_size = sizeof(*info) +
+ offsetof(struct hw_atl_utils_fw_rpc, fw2x_offloads);
+ memset(rpc, 0, rpc_size);
+ info = &rpc->fw2x_offloads;
+ memcpy(info->mac_addr, mac, ETH_ALEN);
+ info->len = sizeof(*info);
- err = hw_atl_utils_fw_rpc_call(self, rpc_size);
- if (err < 0)
- goto err_exit;
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
- /* Set bit 0x36C.23 */
- mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
-
- err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
- self, val,
- val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
- 1U, 100000U);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, wol_bits);
err_exit:
return err;
}
-static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
+ u8 *mac)
{
- struct hw_atl_utils_fw_rpc *rpc = NULL;
- struct fw2x_msg_wol *msg = NULL;
- u32 mpi_opts;
int err = 0;
- u32 val;
-
- err = hw_atl_utils_fw_rpc_wait(self, &rpc);
- if (err < 0)
- goto err_exit;
-
- msg = (struct fw2x_msg_wol *)rpc;
-
- memset(msg, 0, sizeof(*msg));
- msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
- msg->magic_packet_enabled = true;
- memcpy(msg->hw_addr, mac, ETH_ALEN);
+ if (self->aq_nic_cfg->wol)
+ err = aq_fw2x_set_wol(self, mac);
- mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL);
+ return err;
+}
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size)
+{
+ u32 ctrl2, orig_ctrl2;
+ u32 dword_cnt;
+ int err = 0;
+ u32 val;
- err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg));
+ /* Write data to drvIface Mailbox */
+ dword_cnt = size / sizeof(u32);
+ if (size % sizeof(u32))
+ dword_cnt++;
+ err = hw_atl_write_fwcfg_dwords(self, (void *)fw_req, dword_cnt);
if (err < 0)
goto err_exit;
- /* Set bit 0x36C.24 */
- mpi_opts |= HW_ATL_FW2X_CTRL_WOL;
- aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ /* Toggle statistics bit for FW to update */
+ ctrl2 = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ orig_ctrl2 = ctrl2 & BIT(CAPS_HI_FW_REQUEST);
+ ctrl2 = ctrl2 ^ BIT(CAPS_HI_FW_REQUEST);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, ctrl2);
- err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
- self, val, val & HW_ATL_FW2X_CTRL_WOL,
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ orig_ctrl2 != (val &
+ BIT(CAPS_HI_FW_REQUEST)),
1U, 10000U);
err_exit:
return err;
}
-static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
{
- int err = 0;
+ u32 ptp_opts = aq_hw_read_reg(self, HW_ATL_FW3X_EXT_STATE_ADDR);
+ u32 all_ptp_features = BIT(CAPS_EX_PHY_PTP_EN) |
+ BIT(CAPS_EX_PTP_GPIO_EN);
- if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
- err = aq_fw2x_set_sleep_proxy(self, mac);
- if (err < 0)
- goto err_exit;
- err = aq_fw2x_set_wol_params(self, mac);
- }
+ if (enable)
+ ptp_opts |= all_ptp_features;
+ else
+ ptp_opts &= ~all_ptp_features;
-err_exit:
- return err;
+ aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
+}
+
+static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
+{
+ if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
+ return -EOPNOTSUPP;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode);
+
+ return 0;
}
static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
@@ -461,11 +502,12 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
u32 mpi_state;
u32 caps_hi;
int err = 0;
- u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) +
- offsetof(struct hw_aq_info, caps_hi);
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.caps_hi);
- err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi,
- sizeof(caps_hi) / sizeof(u32));
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &caps_hi, 1);
if (err)
return err;
@@ -493,7 +535,8 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
- aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ aq_fw2x_upd_flow_control_bits(self, &mpi_state,
+ self->aq_nic_cfg->fc.req);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
@@ -503,17 +546,41 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
u32 mpi_state = aq_fw2x_state2_get(self);
+ *fcmode = 0;
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
- if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
- *fcmode = AQ_NIC_FC_RX;
+ *fcmode |= AQ_NIC_FC_RX;
+
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode |= AQ_NIC_FC_TX;
+
+ return 0;
+}
+
+static int aq_fw2x_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+ u32 mpi_opts;
+
+ switch (mode) {
+ case AQ_HW_LOOPBACK_PHYINT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_INT_LOOPBACK;
else
- *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
- else
- if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
- *fcmode = AQ_NIC_FC_TX;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_INT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ case AQ_HW_LOOPBACK_PHYEXT_SYS:
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (enable)
+ mpi_opts |= HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
else
- *fcmode = 0;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -528,25 +595,42 @@ static u32 aq_fw2x_rpc_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR);
}
+static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
+{
+ int err = 0;
+ u32 offset;
+
+ offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
+ info.setting_address);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, addr, 1);
+
+ return err;
+}
+
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
const struct aq_fw_ops aq_fw_2x_ops = {
- .init = aq_fw2x_init,
- .deinit = aq_fw2x_deinit,
- .reset = NULL,
- .renegotiate = aq_fw2x_renegotiate,
- .get_mac_permanent = aq_fw2x_get_mac_permanent,
- .set_link_speed = aq_fw2x_set_link_speed,
- .set_state = aq_fw2x_set_state,
+ .init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
+ .reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
- .update_stats = aq_fw2x_update_stats,
- .get_phy_temp = aq_fw2x_get_phy_temp,
- .set_power = aq_fw2x_set_power,
- .set_eee_rate = aq_fw2x_set_eee_rate,
- .get_eee_rate = aq_fw2x_get_eee_rate,
- .set_flow_control = aq_fw2x_set_flow_control,
- .get_flow_control = aq_fw2x_get_flow_control
+ .update_stats = aq_fw2x_update_stats,
+ .get_phy_temp = aq_fw2x_get_phy_temp,
+ .set_power = aq_fw2x_set_power,
+ .set_eee_rate = aq_fw2x_set_eee_rate,
+ .get_eee_rate = aq_fw2x_get_eee_rate,
+ .set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control,
+ .send_fw_request = aq_fw2x_send_fw_request,
+ .enable_ptp = aq_fw3x_enable_ptp,
+ .led_control = aq_fw2x_led_control,
+ .set_phyloopback = aq_fw2x_set_phyloopback,
};
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 78e52d217e56..539166112993 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -20,9 +20,10 @@
static int emac_arc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct net_device *ndev;
struct arc_emac_priv *priv;
- int interface, err;
+ phy_interface_t interface;
+ struct net_device *ndev;
+ int err;
if (!dev->of_node)
return -ENODEV;
@@ -37,9 +38,13 @@ static int emac_arc_probe(struct platform_device *pdev)
priv->drv_name = DRV_NAME;
priv->drv_version = DRV_VERSION;
- interface = of_get_phy_mode(dev->of_node);
- if (interface < 0)
- interface = PHY_INTERFACE_MODE_MII;
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err) {
+ if (err == -ENODEV)
+ interface = PHY_INTERFACE_MODE_MII;
+ else
+ goto out_netdev;
+ }
priv->clk = devm_clk_get(dev, "hclk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 664d664e0925..aae231c5224f 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -97,8 +97,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
struct net_device *ndev;
struct rockchip_priv_data *priv;
const struct of_device_id *match;
+ phy_interface_t interface;
u32 data;
- int err, interface;
+ int err;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -114,7 +115,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv->emac.drv_version = DRV_VERSION;
priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
- interface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err)
+ goto out_netdev;
/* RK3036/RK3066/RK3188 SoCs only support RMII */
if (interface != PHY_INTERFACE_MODE_RMII) {
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 1b1a09095c0d..8f5021091eee 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1744,10 +1744,9 @@ static int ag71xx_probe(struct platform_device *pdev)
eth_random_addr(ndev->dev_addr);
}
- ag->phy_if_mode = of_get_phy_mode(np);
- if (ag->phy_if_mode < 0) {
+ err = of_get_phy_mode(np, ag->phy_if_mode);
+ if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- err = ag->phy_if_mode;
goto err_free;
}
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 37752d9514e7..30b455013bf3 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1371,8 +1371,8 @@ static int nb8800_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->base = base;
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if (priv->phy_mode < 0)
+ ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (ret)
priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
priv->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index aacc3cce2cc0..40941fb6065b 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -287,7 +287,7 @@ struct nb8800_priv {
struct device_node *phy_node;
/* PHY connection type from DT */
- int phy_mode;
+ phy_interface_t phy_mode;
/* Current link status */
int speed;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 97ab0dd25552..035dbb1b2c98 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -511,9 +511,6 @@ static void b44_stats_update(struct b44 *bp)
*val++ += br32(bp, reg);
}
- /* Pad */
- reg += 8*4UL;
-
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a977a459bd20..825af709708e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2479,9 +2479,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->netdev = dev;
priv->pdev = pdev;
- priv->phy_interface = of_get_phy_mode(dn);
+ ret = of_get_phy_mode(dn, &priv->phy_interface);
/* Default to GMII interface mode */
- if ((int)priv->phy_interface < 0)
+ if (ret)
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
/* In the case of a fixed PHY, the DT node associated
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d10b421ed1f1..5e037a305b83 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1934,7 +1934,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
+ return netdev_pick_tx(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 226ab29f4cb6..3f8435208bf4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -32,31 +32,31 @@
* IRO[142].m2) + ((sbId) * IRO[142].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[323].base + ((pfId) * IRO[323].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[324].base + ((pfId) * IRO[324].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[325].base + ((pfId) * IRO[325].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
+ (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
+ (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
+ (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
+ (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+ (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
+ (IRO[322].base + ((pfId) * IRO[322].m1) + ((iscsiEqId) * IRO[322].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
+ (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[322].base + ((pfId) * IRO[322].m1))
+ (IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[314].base + ((pfId) * IRO[314].m1))
+ (IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[313].base + ((pfId) * IRO[313].m1))
+ (IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[312].base + ((pfId) * IRO[312].m1))
+ (IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -99,81 +99,81 @@
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[276].base + ((pfId) * IRO[276].m1))
+ (IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[275].base + ((pfId) * IRO[275].m1))
+ (IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[274].base + ((pfId) * IRO[274].m1))
+ (IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[270].base + ((pfId) * IRO[270].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
- (IRO[223].base + ((pfId) * IRO[223].m1))
+ (IRO[224].base + ((pfId) * IRO[224].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[108].base + ((funcId) * IRO[108].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
-#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_AGG_DATA_OFFSET (IRO[213].base)
+#define USTORM_AGG_DATA_SIZE (IRO[213].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[325].base + ((pfId) * IRO[325].m1))
+ (IRO[326].base + ((pfId) * IRO[326].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[286].base + ((pfId) * IRO[286].m1))
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
+ (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
- (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
- IRO[215].m2))
+ (IRO[216].base + ((portId) * IRO[216].m1) + ((clientId) * \
+ IRO[216].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
- (IRO[216].base + ((qzoneId) * IRO[216].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
-#define USTORM_TPA_BTR_SIZE (IRO[213].size)
+ (IRO[217].base + ((qzoneId) * IRO[217].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[214].base)
+#define USTORM_TPA_BTR_SIZE (IRO[214].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
@@ -188,39 +188,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[302].base + ((pfId) * IRO[302].m1))
+ (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[309].base + ((pfId) * IRO[309].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[301].base + ((pfId) * IRO[301].m1))
+ (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[300].base + ((pfId) * IRO[300].m1))
+ (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[299].base + ((pfId) * IRO[299].m1))
+ (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[304].base + ((pfId) * IRO[304].m1))
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[303].base + ((pfId) * IRO[303].m1))
+ (IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
+ (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -233,12 +233,12 @@
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
- (IRO[217].base + ((portId) * IRO[217].m1))
-#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[218].base + ((portId) * IRO[218].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[219].base + ((portId) * IRO[219].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
- (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
- IRO[220].m2))
+ (IRO[221].base + (((pfId)>>1) * IRO[221].m1) + (((pfId)&1) * \
+ IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 78326a6c0aba..622fadc50316 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 11
+#define BCM_5710_FW_REVISION_VERSION 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d581d0ae6584..9638d65d8261 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -5611,9 +5611,9 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -5685,7 +5685,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
return rc;
}
-static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
@@ -7364,9 +7364,9 @@ static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
}
}
-static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val = 0, tmp1;
@@ -7427,7 +7427,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
- return 0;
+ return;
} else {
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
@@ -7509,7 +7509,6 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
- return 0;
}
static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
@@ -7676,9 +7675,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8705 PHY SECTION */
/******************************************************************/
-static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "init 8705\n");
@@ -7700,7 +7699,6 @@ static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
/* BCM8705 doesn't have microcode, hence the 0 */
bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
- return 0;
}
static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
@@ -8887,9 +8885,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8706 PHY SECTION */
/******************************************************************/
-static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 cnt, val, tmp1;
@@ -8989,13 +8987,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
}
-
- return 0;
}
-static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
return bnx2x_8706_8726_read_status(phy, params, vars);
}
@@ -9070,9 +9066,9 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
}
-static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
@@ -9150,9 +9146,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_PMA_REG_8726_TX_CTRL2,
phy->tx_preemphasis[1]);
}
-
- return 0;
-
}
static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
@@ -9288,9 +9281,9 @@ static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
}
}
-static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 tmp1, mod_abs, tmp2;
@@ -9370,8 +9363,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
(tmp2 & 0x7fff));
}
-
- return 0;
}
static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
@@ -9946,9 +9937,9 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
@@ -9960,7 +9951,7 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- return bnx2x_848xx_cmn_config_init(phy, params, vars);
+ bnx2x_848xx_cmn_config_init(phy, params, vars);
}
#define PHY848xx_CMDHDLR_WAIT 300
@@ -10210,8 +10201,8 @@ static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
return reset_gpios;
}
-static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
- struct link_params *params)
+static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u8 reset_gpios;
@@ -10239,8 +10230,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
udelay(10);
DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
reset_gpios);
-
- return 0;
}
static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
@@ -10283,9 +10272,9 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
}
#define PHY84833_CONSTANT_LATENCY 1193
-static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
@@ -10430,7 +10419,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
if (rc) {
DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
bnx2x_8483x_disable_eee(phy, params, vars);
- return rc;
+ return;
}
if ((phy->req_duplex == DUPLEX_FULL) &&
@@ -10442,7 +10431,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_8483x_disable_eee(phy, params, vars);
if (rc) {
DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
- return rc;
+ return;
}
} else {
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
@@ -10481,7 +10470,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_84833_TOP_CFG_XGPHY_STRAP1,
(u16)~MDIO_84833_SUPER_ISOLATE);
}
- return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
@@ -11038,9 +11026,9 @@ static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
}
}
-static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port;
@@ -11240,8 +11228,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
bnx2x_cl22_write(bp, phy,
MDIO_PMA_REG_CTRL, autoneg_val);
-
- return 0;
}
@@ -11465,9 +11451,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
}
-static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u16 fw_ver1, fw_ver2, val;
struct bnx2x *bp = params->bp;
@@ -11502,7 +11488,6 @@ static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port,
(u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
- return 0;
}
static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
@@ -11636,14 +11621,14 @@ static const struct bnx2x_phy phy_null = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)NULL,
- .read_status = (read_status_t)NULL,
- .link_reset = (link_reset_t)NULL,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_serdes = {
@@ -11671,14 +11656,14 @@ static const struct bnx2x_phy phy_serdes = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_xgxs_config_init,
- .read_status = (read_status_t)bnx2x_link_settings_status,
- .link_reset = (link_reset_t)bnx2x_int_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_xgxs = {
@@ -11707,14 +11692,14 @@ static const struct bnx2x_phy phy_xgxs = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_xgxs_config_init,
- .read_status = (read_status_t)bnx2x_link_settings_status,
- .link_reset = (link_reset_t)bnx2x_int_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = bnx2x_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_xgxs_specific_func
};
static const struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11745,14 +11730,14 @@ static const struct bnx2x_phy phy_warpcore = {
.speed_cap_mask = 0,
/* req_duplex = */0,
/* rsrv = */0,
- .config_init = (config_init_t)bnx2x_warpcore_config_init,
- .read_status = (read_status_t)bnx2x_warpcore_read_status,
- .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_warpcore_config_init,
+ .read_status = bnx2x_warpcore_read_status,
+ .link_reset = bnx2x_warpcore_link_reset,
+ .config_loopback = bnx2x_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = bnx2x_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
@@ -11776,14 +11761,14 @@ static const struct bnx2x_phy phy_7101 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_7101_config_init,
- .read_status = (read_status_t)bnx2x_7101_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
- .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_7101_config_init,
+ .read_status = bnx2x_7101_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = bnx2x_7101_config_loopback,
+ .format_fw_ver = bnx2x_7101_format_ver,
+ .hw_reset = bnx2x_7101_hw_reset,
+ .set_link_led = bnx2x_7101_set_link_led,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8073 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
@@ -11807,14 +11792,14 @@ static const struct bnx2x_phy phy_8073 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8073_config_init,
- .read_status = (read_status_t)bnx2x_8073_read_status,
- .link_reset = (link_reset_t)bnx2x_8073_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
+ .config_init = bnx2x_8073_config_init,
+ .read_status = bnx2x_8073_read_status,
+ .link_reset = bnx2x_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_8073_specific_func
};
static const struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11835,14 +11820,14 @@ static const struct bnx2x_phy phy_8705 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8705_config_init,
- .read_status = (read_status_t)bnx2x_8705_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8705_config_init,
+ .read_status = bnx2x_8705_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
@@ -11864,14 +11849,14 @@ static const struct bnx2x_phy phy_8706 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8706_config_init,
- .read_status = (read_status_t)bnx2x_8706_read_status,
- .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8706_config_init,
+ .read_status = bnx2x_8706_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8726 = {
@@ -11896,14 +11881,14 @@ static const struct bnx2x_phy phy_8726 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8726_config_init,
- .read_status = (read_status_t)bnx2x_8726_read_status,
- .link_reset = (link_reset_t)bnx2x_8726_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8726_config_init,
+ .read_status = bnx2x_8726_read_status,
+ .link_reset = bnx2x_8726_link_reset,
+ .config_loopback = bnx2x_8726_config_loopback,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_8727 = {
@@ -11927,14 +11912,14 @@ static const struct bnx2x_phy phy_8727 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8727_config_init,
- .read_status = (read_status_t)bnx2x_8727_read_status,
- .link_reset = (link_reset_t)bnx2x_8727_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+ .config_init = bnx2x_8727_config_init,
+ .read_status = bnx2x_8727_read_status,
+ .link_reset = bnx2x_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = bnx2x_8727_hw_reset,
+ .set_link_led = bnx2x_8727_set_link_led,
+ .phy_specific_func = bnx2x_8727_specific_func
};
static const struct bnx2x_phy phy_8481 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
@@ -11962,14 +11947,14 @@ static const struct bnx2x_phy phy_8481 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_8481_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_8481_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .config_init = bnx2x_8481_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_8481_hw_reset,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = NULL
};
static const struct bnx2x_phy phy_84823 = {
@@ -11999,14 +11984,14 @@ static const struct bnx2x_phy phy_84823 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84833 = {
@@ -12034,14 +12019,14 @@ static const struct bnx2x_phy phy_84833 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84834 = {
@@ -12068,14 +12053,14 @@ static const struct bnx2x_phy phy_84834 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_84858 = {
@@ -12102,14 +12087,14 @@ static const struct bnx2x_phy phy_84858 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_848x3_config_init,
- .read_status = (read_status_t)bnx2x_848xx_read_status,
- .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
- .config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
- .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_8485x_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
};
static const struct bnx2x_phy phy_54618se = {
@@ -12136,14 +12121,14 @@ static const struct bnx2x_phy phy_54618se = {
.speed_cap_mask = 0,
/* req_duplex = */0,
/* rsrv = */0,
- .config_init = (config_init_t)bnx2x_54618se_config_init,
- .read_status = (read_status_t)bnx2x_54618se_read_status,
- .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
- .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
- .format_fw_ver = (format_fw_ver_t)NULL,
- .hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
+ .config_init = bnx2x_54618se_config_init,
+ .read_status = bnx2x_54618se_read_status,
+ .link_reset = bnx2x_54618se_link_reset,
+ .config_loopback = bnx2x_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_5461x_set_link_led,
+ .phy_specific_func = bnx2x_54618se_specific_func
};
/*****************************************************************/
/* */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 7115f5025664..cae03c89dc73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -127,15 +127,15 @@ struct link_vars;
struct link_params;
struct bnx2x_phy;
-typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
- struct link_vars *vars);
+typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef void (*link_reset_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
struct link_params *params);
-typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0edbb0a76847..5097a44686b3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
/* send the ramrod on all the queues of the PF */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
/* Set the appropriate Queue object */
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure Tx switching\n");
- return rc;
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 04ec909e06df..85983f0e3134 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -250,10 +250,12 @@ static const u16 bnxt_vf_req_snif[] = {
static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
};
@@ -1767,8 +1769,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
- bnxt_sched_reset(bp, rxr);
+ bnapi->cp_ring.rx_buf_errors++;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ netdev_warn(bp->dev, "RX buffer error %x\n",
+ rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
}
goto next_rx_no_len;
}
@@ -1964,6 +1970,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
}
/* fall through */
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
+ set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
+ /* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -2684,6 +2694,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
+ if (rmem->init_val)
+ memset(rmem->pg_arr[i], rmem->init_val,
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -3171,13 +3184,8 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
bnxt_init_rxbd_pages(ring, type);
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
- if (IS_ERR(rxr->xdp_prog)) {
- int rc = PTR_ERR(rxr->xdp_prog);
-
- rxr->xdp_prog = NULL;
- return rc;
- }
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
}
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
@@ -4274,6 +4282,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
i++ < tmo_count) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
@@ -4297,6 +4310,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Check if response len is updated */
for (i = 0; i < tmo_count; i++) {
+ /* Abort the wait for completion if the FW health
+ * check has failed.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
@@ -4385,59 +4403,29 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc;
}
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
- int bmap_size)
+int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
+ bool async_only)
{
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap;
- int i;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
-
- req.enables =
- cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
-
- memset(async_events_bmap, 0, sizeof(async_events_bmap));
- for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
- u16 event_id = bnxt_async_events_arr[i];
-
- if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
- !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- continue;
- __set_bit(bnxt_async_events_arr[i], async_events_bmap);
- }
- if (bmap && bmap_size) {
- for (i = 0; i < bmap_size; i++) {
- if (test_bit(i, bmap))
- __set_bit(i, async_events_bmap);
- }
- }
-
- for (i = 0; i < 8; i++)
- req.async_event_fwd[i] |= cpu_to_le32(events[i]);
-
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-}
-
-static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
-{
- struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_func_drv_rgtr_input req = {0};
u32 flags;
- int rc;
+ int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
- FUNC_DRV_RGTR_REQ_ENABLES_VER);
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
- flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
+ FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
req.flags = cpu_to_le32(flags);
req.ver_maj_8b = DRV_VER_MAJ;
req.ver_min_8b = DRV_VER_MIN;
@@ -4471,11 +4459,36 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
req.flags |= cpu_to_le32(
FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
+ memset(async_events_bmap, 0, sizeof(async_events_bmap));
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+ u16 event_id = bnxt_async_events_arr[i];
+
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ continue;
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ }
+ if (bmap && bmap_size) {
+ for (i = 0; i < bmap_size; i++) {
+ if (test_bit(i, bmap))
+ __set_bit(i, async_events_bmap);
+ }
+ }
+ for (i = 0; i < 8; i++)
+ req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+ if (async_only)
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc && (resp->flags &
- cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
- bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ if (!rc) {
+ set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -4484,6 +4497,9 @@ static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
struct hwrm_func_drv_unrgtr_input req = {0};
+ if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -4601,21 +4617,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic;
- u32 dst_ena = 0;
+ u32 flags = 0;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
- dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
- req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
- vnic = &bp->vnic_info[0];
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req.dst_id = cpu_to_le16(fltr->rxq);
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
+ req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
@@ -6480,6 +6496,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
} else {
rc = 0;
}
@@ -6634,7 +6651,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth)
+ u8 depth, bool use_init_val)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -6672,6 +6689,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
+ if (use_init_val)
+ rmem->init_val = bp->ctx->ctx_kind_initializer;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -6686,6 +6705,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
+ if (use_init_val)
+ rmem->init_val = bp->ctx->ctx_kind_initializer;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -6776,21 +6797,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
@@ -6798,14 +6819,14 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc)
return rc;
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc)
return rc;
@@ -6821,7 +6842,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
if (rc)
return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
@@ -6833,7 +6854,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
@@ -6847,7 +6868,7 @@ skip_rdma:
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
@@ -6943,6 +6964,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
@@ -7042,8 +7065,8 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
flags = le32_to_cpu(resp->flags);
if (flags &
- CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
- bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
hwrm_cfa_adv_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -8402,7 +8425,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_EEE_CAP;
if (bp->test_info)
- bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
+ bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
+ BNXT_TEST_FL_AN_PHY_LPBK);
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -8428,6 +8452,14 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (bp->test_info)
bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
+ if (bp->test_info)
+ bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
+ }
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
+ if (BNXT_PF(bp))
+ bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -8542,7 +8574,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
diff = link_info->support_auto_speeds ^ link_info->advertising;
@@ -8762,6 +8794,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
if (resc_reinit || fw_reset) {
if (fw_reset) {
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_ulp_stop(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -9224,13 +9258,16 @@ static int bnxt_open(struct net_device *dev)
if (rc) {
bnxt_hwrm_if_change(bp, false);
} else {
- if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
- BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
- int n = pf->active_vfs;
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
- if (n)
- bnxt_cfg_hw_sriov(bp, &n, true);
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_ulp_start(bp, 0);
}
bnxt_hwmon_open(bp);
}
@@ -9688,7 +9725,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
static bool bnxt_rfs_supported(struct bnxt *bp)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
}
@@ -9927,12 +9964,15 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (netif_running(bp->dev)) {
int rc;
- if (!silent)
+ if (silent) {
+ bnxt_close_nic(bp, false, false);
+ bnxt_open_nic(bp, false, false);
+ } else {
bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, false, false);
- rc = bnxt_open_nic(bp, false, false);
- if (!silent && !rc)
- bnxt_ulp_start(bp);
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
+ bnxt_ulp_start(bp, rc);
+ }
}
}
@@ -10004,7 +10044,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->link_info.phy_retry) {
if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
- bp->link_info.phy_retry = 0;
+ bp->link_info.phy_retry = false;
netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
} else {
set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
@@ -10048,8 +10088,8 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
+ bnxt_ulp_stop(bp);
__bnxt_close_nic(bp, true, false);
- bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_ctx_mem(bp);
@@ -10107,6 +10147,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
void bnxt_fw_exception(struct bnxt *bp)
{
+ netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_rtnl_lock_sp(bp);
bnxt_force_fw_reset(bp);
@@ -10218,6 +10259,31 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
static void bnxt_cfg_ntp_filters(struct bnxt *);
+static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ link_info->autoneg = BNXT_AUTONEG_SPEED;
+ if (bp->hwrm_spec_code >= 0x10201) {
+ if (link_info->auto_pause_setting &
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ } else {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ }
+ link_info->advertising = link_info->auto_link_speeds;
+ } else {
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_duplex = link_info->duplex_setting;
+ }
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->req_flow_ctrl =
+ link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+ else
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -10268,6 +10334,10 @@ static void bnxt_sp_task(struct work_struct *work)
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
+ if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
+ &bp->sp_event))
+ bnxt_init_ethtool_link_settings(bp);
+
rc = bnxt_update_link(bp, true);
mutex_unlock(&bp->link_lock);
if (rc)
@@ -10463,11 +10533,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
rc);
- rc = bnxt_hwrm_func_drv_rgtr(bp);
- if (rc)
- return -ENODEV;
-
- rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
if (rc)
return -ENODEV;
@@ -10582,14 +10648,23 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
- int i;
+ int i, rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
+#ifdef CONFIG_TEE_BNXT_FW
+ rc = tee_bnxt_fw_load();
+ if (rc)
+ netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ bp->fw_reset_timestamp = jiffies;
+#endif
+ return;
+ }
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
bnxt_fw_reset_writel(bp, i);
} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
struct hwrm_fw_reset_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
@@ -10720,19 +10795,22 @@ static void bnxt_fw_reset_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
dev_close(bp->dev);
}
- bnxt_ulp_irq_restart(bp, rc);
- rtnl_unlock();
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_status_update(bp, true);
+ rtnl_unlock();
break;
}
return;
fw_reset_abort:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
+ bnxt_dl_health_status_update(bp, false);
bp->fw_reset_state = 0;
rtnl_lock();
dev_close(bp->dev);
@@ -10934,7 +11012,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static LIST_HEAD(bnxt_block_cb_list);
+LIST_HEAD(bnxt_block_cb_list);
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
@@ -11372,26 +11450,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
if (!fw_dflt)
return 0;
- /*initialize the ethool setting copy with NVM settings */
- if (BNXT_AUTO_MODE(link_info->auto_mode)) {
- link_info->autoneg = BNXT_AUTONEG_SPEED;
- if (bp->hwrm_spec_code >= 0x10201) {
- if (link_info->auto_pause_setting &
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
- link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- } else {
- link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- }
- link_info->advertising = link_info->auto_link_speeds;
- } else {
- link_info->req_link_speed = link_info->force_link_speed;
- link_info->req_duplex = link_info->duplex_setting;
- }
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
- link_info->req_flow_ctrl =
- link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
- else
- link_info->req_flow_ctrl = link_info->force_pause_setting;
+ bnxt_init_ethtool_link_settings(bp);
return 0;
}
@@ -11831,6 +11890,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_free_ctx_mem(bp);
@@ -11882,11 +11942,16 @@ static int bnxt_suspend(struct device *device)
int rc = 0;
rtnl_lock();
+ bnxt_ulp_stop(bp);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
}
bnxt_hwrm_func_drv_unrgtr(bp);
+ pci_disable_device(bp->pdev);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
rtnl_unlock();
return rc;
}
@@ -11898,7 +11963,14 @@ static int bnxt_resume(struct device *device)
int rc = 0;
rtnl_lock();
- if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+ rc = pci_enable_device(bp->pdev);
+ if (rc) {
+ netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
+ rc);
+ goto resume_exit;
+ }
+ pci_set_master(bp->pdev);
+ if (bnxt_hwrm_ver_get(bp)) {
rc = -ENODEV;
goto resume_exit;
}
@@ -11907,6 +11979,26 @@ static int bnxt_resume(struct device *device)
rc = -EBUSY;
goto resume_exit;
}
+
+ if (bnxt_hwrm_queue_qportcfg(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (bnxt_alloc_ctx_mem(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+ }
+ if (BNXT_NEW_RM(bp))
+ bnxt_hwrm_func_resc_qcaps(bp, false);
+
+ if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
rc = bnxt_open(dev);
@@ -11915,6 +12007,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ bnxt_ulp_start(bp, rc);
rtnl_unlock();
return rc;
}
@@ -11994,10 +12087,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- if (!err) {
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- bnxt_ulp_start(bp);
- }
+ bnxt_ulp_start(bp, err);
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d333589811a5..505af5cfb1bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.0"
+#define DRV_MODULE_VERSION "1.10.1"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 0
+#define DRV_VER_UPD 1
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -25,6 +25,11 @@
#include <net/dst_metadata.h>
#include <net/xdp.h>
#include <linux/dim.h>
+#ifdef CONFIG_TEE_BNXT_FW
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+#endif
+
+extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -716,6 +721,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
+ u8 init_val;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -927,6 +933,7 @@ struct bnxt_cp_ring_info {
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
u64 missed_irqs;
struct bnxt_ring_struct cp_ring_struct;
@@ -1219,7 +1226,8 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
u8 flags;
-#define BNXT_TEST_FL_EXT_LPBK 0x1
+#define BNXT_TEST_FL_EXT_LPBK 0x1
+#define BNXT_TEST_FL_AN_PHY_LPBK 0x2
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1241,6 +1249,14 @@ struct bnxt_tc_flow_stats {
u64 bytes;
};
+#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+struct bnxt_flower_indr_block_cb_priv {
+ struct net_device *tunnel_netdev;
+ struct bnxt *bp;
+ struct list_head list;
+};
+#endif
+
struct bnxt_tc_info {
bool enabled;
@@ -1338,6 +1354,7 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1370,6 +1387,7 @@ struct bnxt_fw_health {
u32 last_fw_reset_cnt;
u8 enabled:1;
u8 master:1;
+ u8 fatal:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1428,6 +1446,8 @@ struct bnxt {
#define CHIP_NUM_57414L 0x16db
#define CHIP_NUM_5745X 0xd730
+#define CHIP_NUM_57452 0xc452
+#define CHIP_NUM_57454 0xc454
#define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751
@@ -1460,7 +1480,10 @@ struct bnxt {
((chip_num) == CHIP_NUM_58700)
#define BNXT_CHIP_NUM_5745X(chip_num) \
- ((chip_num) == CHIP_NUM_5745X)
+ ((chip_num) == CHIP_NUM_5745X || \
+ (chip_num) == CHIP_NUM_57452 || \
+ (chip_num) == CHIP_NUM_57454)
+
#define BNXT_CHIP_NUM_57X0X(chip_num) \
(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
@@ -1525,6 +1548,8 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
+#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \
+ ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
@@ -1626,6 +1651,7 @@ struct bnxt {
#define BNXT_STATE_IN_FW_RESET 4
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
+#define BNXT_STATE_DRV_REGISTERED 7
struct bnxt_irq *irq_tbl;
int total_irqs;
@@ -1653,10 +1679,12 @@ struct bnxt {
#define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
- #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
+ #define BNXT_FW_CAP_HOT_RESET 0x00200000
+ #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1738,6 +1766,7 @@ struct bnxt {
#define BNXT_RING_COAL_NOW_SP_EVENT 17
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
+#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -1804,6 +1833,9 @@ struct bnxt {
u8 num_leds;
struct bnxt_led_info leds[BNXT_MAX_LED];
+ u16 dump_flag;
+#define BNXT_DUMP_LIVE 0
+#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
@@ -1815,6 +1847,8 @@ struct bnxt {
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
+ struct list_head tc_indr_block_list;
+ struct notifier_block tc_netdev_nb;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
};
@@ -1969,8 +2003,8 @@ int _hwrm_send_message(struct bnxt *, void *, u32, int);
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
- int bmap_size);
+int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size, bool async_only);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 7151244f8c7d..acb2dd64c023 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -14,9 +14,29 @@
#include "bnxt.h"
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
+#include "bnxt_ethtool.h"
+
+static int
+bnxt_dl_flash_update(struct devlink *dl, const char *filename,
+ const char *region, struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (region)
+ return -EOPNOTSUPP;
+
+ if (!BNXT_PF(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flash update not supported from a VF");
+ return -EPERM;
+ }
+
+ return bnxt_flash_package_from_file(bp->dev, filename, 0);
+}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *health = bp->fw_health;
@@ -61,7 +81,8 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
};
static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
@@ -79,7 +100,8 @@ struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
};
static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
@@ -88,6 +110,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
if (!priv_ctx)
return -EOPNOTSUPP;
+ bp->fw_health->fatal = true;
event = fw_reporter_ctx->sp_event;
if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
bnxt_fw_reset(bp);
@@ -196,11 +219,32 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
}
}
+void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+ u8 state;
+
+ if (healthy)
+ state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ else
+ state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ if (health->fatal)
+ devlink_health_reporter_state_update(health->fw_fatal_reporter,
+ state);
+ else
+ devlink_health_reporter_state_update(health->fw_reset_reporter,
+ state);
+
+ health->fatal = false;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get,
#endif /* CONFIG_BNXT_SRIOV */
+ .flash_update = bnxt_dl_flash_update,
};
enum bnxt_dl_param_id {
@@ -311,10 +355,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
bnxt_copy_from_nvm_data(val, data,
nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
+ } else {
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (resp->cmd_err ==
+ NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
+ rc = -EOPNOTSUPP;
+ }
}
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
if (rc == -EACCES)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 2f4fd0a7d04b..665d4bdcd8c0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -57,6 +57,7 @@ struct bnxt_dl_nvm_param {
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
+void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 51c140476717..2ccf79cdcb1e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
static const char * const bnxt_ring_sw_stats_str[] = {
"rx_l4_csum_errors",
+ "rx_buf_errors",
"missed_irqs",
};
@@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
+ buf[j++] = cpr->rx_buf_errors;
buf[j++] = cpr->missed_irqs;
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
@@ -1588,7 +1590,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
u32 speed;
int rc = 0;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -1660,7 +1662,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (epause->autoneg) {
@@ -1785,6 +1787,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNXT_FW_RESET_CHIP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
break;
case BNXT_FW_RESET_AP:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
@@ -1996,8 +2000,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
-static int bnxt_flash_package_from_file(struct net_device *dev,
- char *filename, u32 install_type)
+int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
+ u32 install_type)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
@@ -2395,7 +2399,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
int rc = 0;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
@@ -2582,7 +2586,7 @@ static int bnxt_nway_reset(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_SINGLE_PF(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
@@ -2694,7 +2698,8 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
u16 fw_speed;
int rc;
- if (!link_info->autoneg)
+ if (!link_info->autoneg ||
+ (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK))
return 0;
rc = bnxt_query_force_speeds(bp, &fw_advertising);
@@ -2981,7 +2986,8 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EOPNOTSUPP;
}
- if (pci_vfs_assigned(bp->pdev)) {
+ if (pci_vfs_assigned(bp->pdev) &&
+ !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
netdev_err(dev,
"Reset not allowed when VFs are assigned to VMs\n");
return -EBUSY;
@@ -2994,7 +3000,9 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc) {
- netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ netdev_info(dev, "Reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ netdev_info(dev, "Reload driver to complete reset\n");
*flags = 0;
}
} else if (*flags == ETH_RESET_AP) {
@@ -3038,7 +3046,8 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
mutex_lock(&bp->hwrm_cmd_lock);
while (1) {
*seq_ptr = cpu_to_le16(seq);
- rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message(bp, msg, msg_len,
+ HWRM_COREDUMP_TIMEOUT);
if (rc)
break;
@@ -3311,6 +3320,24 @@ err:
return rc;
}
+static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (dump->flag > BNXT_DUMP_CRASH) {
+ netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
+ netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ }
+
+ bp->dump_flag = dump->flag;
+ return 0;
+}
+
static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3323,7 +3350,12 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_bld_8b << 8 |
bp->ver_resp.hwrm_fw_rsvd_8b;
- return bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->flag = bp->dump_flag;
+ if (bp->dump_flag == BNXT_DUMP_CRASH)
+ dump->len = BNXT_CRASH_DUMP_LEN;
+ else
+ bnxt_get_coredump(bp, NULL, &dump->len);
+ return 0;
}
static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
@@ -3336,7 +3368,16 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
- return bnxt_get_coredump(bp, buf, &dump->len);
+ dump->flag = bp->dump_flag;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+ return tee_bnxt_copy_coredump(buf, 0, dump->len);
+#endif
+ } else {
+ return bnxt_get_coredump(bp, buf, &dump->len);
+ }
+
+ return 0;
}
void bnxt_ethtool_init(struct bnxt *bp)
@@ -3446,6 +3487,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .set_dump = bnxt_set_dump,
.get_dump_flag = bnxt_get_dump_flag,
.get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index b5b65b3f8534..4428d0abcbc1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -59,6 +59,8 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
@@ -79,6 +81,8 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
+ u32 install_type);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 03b197eb793b..7cf27dffadb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -176,6 +176,9 @@ struct cmd_nums {
#define HWRM_RESERVED6 0x65UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -208,7 +211,7 @@ struct cmd_nums {
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
#define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
#define HWRM_FW_STATE_QUIESCE 0xc5UL
#define HWRM_FW_STATE_BACKUP 0xc6UL
#define HWRM_FW_STATE_RESTORE 0xc7UL
@@ -225,8 +228,11 @@ struct cmd_nums {
#define HWRM_PORT_PRBS_TEST 0xd5UL
#define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
#define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -308,6 +314,7 @@ struct cmd_nums {
#define HWRM_ENGINE_STATS_CONFIG 0x155UL
#define HWRM_ENGINE_STATS_CLEAR 0x156UL
#define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
#define HWRM_ENGINE_RQ_ALLOC 0x15eUL
#define HWRM_ENGINE_RQ_FREE 0x15fUL
#define HWRM_ENGINE_CQ_ALLOC 0x160UL
@@ -390,6 +397,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -420,9 +428,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 100
-#define HWRM_VERSION_STR "1.10.0.100"
+#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_RSVD 12
+#define HWRM_VERSION_STR "1.10.1.12"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -637,6 +645,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1115,6 +1125,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1255,7 +1266,8 @@ struct hwrm_func_qcfg_output {
u8 unused_1;
u8 always_1;
__le32 reset_addr_poll;
- u8 unused_2[3];
+ __le16 legacy_l2_db_size_kb;
+ u8 unused_2[1];
u8 valid;
};
@@ -1500,6 +1512,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1762,7 +1775,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1792,6 +1805,10 @@ struct hwrm_func_backing_store_qcaps_output {
__le32 tim_max_entries;
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le32 rsvd;
+ __le16 rsvd1;
+ u8 rsvd2;
u8 valid;
};
@@ -2524,6 +2541,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -2761,8 +2779,8 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le16 resp_len;
u8 flags;
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -3177,10 +3195,12 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4980,6 +5000,15 @@ struct hwrm_vnic_rss_cfg_output {
u8 valid;
};
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
__le16 req_type;
@@ -5807,7 +5836,7 @@ struct hwrm_cfa_encap_record_free_output {
u8 valid;
};
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5815,10 +5844,12 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -5887,8 +5918,6 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__be16 dst_port;
__be16 dst_port_mask;
__le64 ntuple_filter_id_hint;
- __le16 rfs_ring_tbl_idx;
- u8 unused_0[6];
};
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
@@ -5954,7 +5983,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
__le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -6534,18 +6564,21 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
u8 unused_0[3];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f6f3454d6059..2aba1e02a8f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -515,6 +515,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
struct bnxt_pf_info *pf = &bp->pf;
int i, rc = 0, min = 1;
u16 vf_msix = 0;
+ u16 vf_rss;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
@@ -533,9 +534,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+ vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
- req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
min = 0;
req.min_rsscos_ctx = cpu_to_le16(min);
@@ -557,6 +558,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_vnics /= num_vfs;
vf_stat_ctx /= num_vfs;
vf_ring_grps /= num_vfs;
+ vf_rss /= num_vfs;
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings);
@@ -565,6 +567,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req.min_rsscos_ctx = cpu_to_le16(vf_rss);
}
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings);
@@ -573,6 +576,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ req.max_rsscos_ctx = cpu_to_le16(vf_rss);
if (bp->flags & BNXT_FLAG_CHIP_P5)
req.max_msix = cpu_to_le16(vf_msix / num_vfs);
@@ -598,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
n;
hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
- hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+ hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index c8062d020d1e..0cc6ec51f45f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -16,7 +16,9 @@
#include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
+#include <net/vxlan.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -36,6 +38,8 @@
#define is_vid_exactmatch(vlan_tci_mask) \
((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+static bool is_wildcard(void *mask, int len);
+static bool is_exactmatch(void *mask, int len);
/* Return the dst fid of the func for flow forwarding
* For PFs: src_fid is the fid of the PF
* For VF-reps: src_fid the fid of the VF
@@ -111,10 +115,182 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
return 0;
}
+/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes
+ * each(u32).
+ * This routine consolidates such multiple unaligned values into one
+ * field each for Key & Mask (for src and dst macs separately)
+ * For example,
+ * Mask/Key Offset Iteration
+ * ========== ====== =========
+ * dst mac 0xffffffff 0 1
+ * dst mac 0x0000ffff 4 2
+ *
+ * src mac 0xffff0000 4 1
+ * src mac 0xffffffff 8 2
+ *
+ * The above combination coming from the stack will be consolidated as
+ * Mask/Key
+ * ==============
+ * src mac: 0xffffffffffff
+ * dst mac: 0xffffffffffff
+ */
+static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
+ u8 *actual_key, u8 *actual_mask)
+{
+ u32 key = get_unaligned((u32 *)actual_key);
+ u32 mask = get_unaligned((u32 *)actual_mask);
+
+ part_key &= part_mask;
+ part_key |= key & ~part_mask;
+
+ put_unaligned(mask | part_mask, (u32 *)actual_mask);
+ put_unaligned(part_key, (u32 *)actual_key);
+}
+
+static int
+bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
+ u16 *eth_addr, u16 *eth_addr_mask)
+{
+ u16 *p;
+ int j;
+
+ if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)))
+ return -EINVAL;
+
+ if (!is_wildcard(&eth_addr_mask[0], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[0], ETH_ALEN))
+ return -EINVAL;
+ /* FW expects dmac to be in u16 array format */
+ p = eth_addr;
+ for (j = 0; j < 3; j++)
+ actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
+ }
+
+ if (!is_wildcard(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN))
+ return -EINVAL;
+ /* FW expects smac to be in u16 array format */
+ p = &eth_addr[ETH_ALEN / 2];
+ for (j = 0; j < 3; j++)
+ actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
+ }
+
+ return 0;
+}
+
+static int
+bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
+ struct flow_action_entry *act, int act_idx, u8 *eth_addr,
+ u8 *eth_addr_mask)
+{
+ size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr);
+ size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr);
+ u32 mask, val, offset, idx;
+ u8 htype;
+
+ offset = act->mangle.offset;
+ htype = act->mangle.htype;
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) {
+ netdev_err(bp->dev,
+ "%s: eth_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+ actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE;
+
+ bnxt_set_l2_key_mask(val, mask, &eth_addr[offset],
+ &eth_addr_mask[offset]);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+ actions->nat.l3_is_ipv4 = true;
+ if (offset == offsetof(struct iphdr, saddr)) {
+ actions->nat.src_xlate = true;
+ actions->nat.l3.ipv4.saddr.s_addr = htonl(val);
+ } else if (offset == offsetof(struct iphdr, daddr)) {
+ actions->nat.src_xlate = false;
+ actions->nat.l3.ipv4.daddr.s_addr = htonl(val);
+ } else {
+ netdev_err(bp->dev,
+ "%s: IPv4_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n",
+ actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr,
+ &actions->nat.l3.ipv4.daddr);
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+ actions->nat.l3_is_ipv4 = false;
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offset_of_ip6_daddr) {
+ /* 16 byte IPv6 address comes in 4 iterations of
+ * 4byte chunks each
+ */
+ actions->nat.src_xlate = true;
+ idx = (offset - offset_of_ip6_saddr) / 4;
+ /* First 4bytes will be copied to idx 0 and so on */
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ } else if (offset >= offset_of_ip6_daddr &&
+ offset < offset_of_ip6_daddr + 16) {
+ actions->nat.src_xlate = false;
+ idx = (offset - offset_of_ip6_daddr) / 4;
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ } else {
+ netdev_err(bp->dev,
+ "%s: IPv6_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* HW does not support L4 rewrite alone without L3
+ * rewrite
+ */
+ if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
+ netdev_err(bp->dev,
+ "Need to specify L3 rewrite as well\n");
+ return -EINVAL;
+ }
+ if (actions->nat.src_xlate)
+ actions->nat.l4.ports.sport = htons(val);
+ else
+ actions->nat.l4.ports.dport = htons(val);
+ netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
+ actions->nat.l4.ports.sport,
+ actions->nat.l4.ports.dport);
+ break;
+ default:
+ netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
struct flow_action *flow_action)
{
+ /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
+ * dmac or smac
+ */
+ u16 eth_addr_mask[ETH_ALEN] = { 0 };
+ /* Used to store the L2 rewrite key for dmac (6 bytes) followed by
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
+ * dmac or smac
+ */
+ u16 eth_addr[ETH_ALEN] = { 0 };
struct flow_action_entry *act;
int i, rc;
@@ -148,11 +324,26 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
case FLOW_ACTION_TUNNEL_DECAP:
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
break;
+ /* Packet edit: L2 rewrite, NAT, NAPT */
+ case FLOW_ACTION_MANGLE:
+ rc = bnxt_tc_parse_pedit(bp, actions, act, i,
+ (u8 *)eth_addr,
+ (u8 *)eth_addr_mask);
+ if (rc)
+ return rc;
+ break;
default:
break;
}
}
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+ rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
+ eth_addr_mask);
+ if (rc)
+ return rc;
+ }
+
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
@@ -401,6 +592,76 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle;
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+ memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
+ ETH_ALEN);
+ memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
+ ETH_ALEN);
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
+ }
+
+ if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) {
+ if (actions->nat.l3_is_ipv4) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS;
+
+ if (actions->nat.src_xlate) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+ /* L3 source rewrite */
+ req.nat_ip_address[0] =
+ actions->nat.l3.ipv4.saddr.s_addr;
+ /* L4 source port */
+ if (actions->nat.l4.ports.sport)
+ req.nat_port =
+ actions->nat.l4.ports.sport;
+ } else {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+ /* L3 destination rewrite */
+ req.nat_ip_address[0] =
+ actions->nat.l3.ipv4.daddr.s_addr;
+ /* L4 destination port */
+ if (actions->nat.l4.ports.dport)
+ req.nat_port =
+ actions->nat.l4.ports.dport;
+ }
+ netdev_dbg(bp->dev,
+ "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
+ req.nat_ip_address, actions->nat.src_xlate,
+ req.nat_port);
+ } else {
+ if (actions->nat.src_xlate) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+ /* L3 source rewrite */
+ memcpy(req.nat_ip_address,
+ actions->nat.l3.ipv6.saddr.s6_addr32,
+ sizeof(req.nat_ip_address));
+ /* L4 source port */
+ if (actions->nat.l4.ports.sport)
+ req.nat_port =
+ actions->nat.l4.ports.sport;
+ } else {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+ /* L3 destination rewrite */
+ memcpy(req.nat_ip_address,
+ actions->nat.l3.ipv6.daddr.s6_addr32,
+ sizeof(req.nat_ip_address));
+ /* L4 destination port */
+ if (actions->nat.l4.ports.dport)
+ req.nat_port =
+ actions->nat.l4.ports.dport;
+ }
+ netdev_dbg(bp->dev,
+ "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
+ req.nat_ip_address, actions->nat.src_xlate,
+ req.nat_port);
+ }
+ }
+
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
req.tunnel_handle = tunnel_handle;
@@ -1274,7 +1535,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -EOPNOTSUPP;
- goto free_node;
+ kfree_rcu(new_node, rcu);
+ return rc;
}
/* If a flow exists with the same cookie, delete it */
@@ -1580,6 +1842,147 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
}
}
+static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+ struct flow_cls_offload *flower = type_data;
+ struct bnxt *bp = priv->bp;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct bnxt_flower_indr_block_cb_priv *
+bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
+{
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
+ if (cb_priv->tunnel_netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static void bnxt_tc_setup_indr_rel(void *cb_priv)
+{
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
+ struct flow_block_offload *f)
+{
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->tunnel_netdev = netdev;
+ cb_priv->bp = bp;
+ list_add(&cb_priv->list, &bp->tc_indr_block_list);
+
+ block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ bnxt_tc_setup_indr_rel);
+ if (IS_ERR(block_cb)) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ return PTR_ERR(block_cb);
+ }
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev);
+ if (!cb_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ bnxt_tc_setup_indr_block_cb,
+ cb_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
+{
+ return netif_is_vxlan(netdev);
+}
+
+static int bnxt_tc_indr_block_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct bnxt *bp;
+ int rc;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ if (!bnxt_is_netdev_indr_offload(netdev))
+ return NOTIFY_OK;
+
+ bp = container_of(nb, struct bnxt, tc_netdev_nb);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ rc = __flow_indr_block_cb_register(netdev, bp,
+ bnxt_tc_setup_indr_cb,
+ bp);
+ if (rc)
+ netdev_info(bp->dev,
+ "Failed to register indirect blk: dev: %s",
+ netdev->name);
+ break;
+ case NETDEV_UNREGISTER:
+ __flow_indr_block_cb_unregister(netdev,
+ bnxt_tc_setup_indr_cb,
+ bp);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
.head_offset = offsetof(struct bnxt_tc_flow_node, node),
.key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
@@ -1663,7 +2066,15 @@ int bnxt_init_tc(struct bnxt *bp)
bp->dev->hw_features |= NETIF_F_HW_TC;
bp->dev->features |= NETIF_F_HW_TC;
bp->tc_info = tc_info;
- return 0;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&bp->tc_indr_block_list);
+ bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
+ rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+ if (!rc)
+ return 0;
+
+ rhashtable_destroy(&tc_info->encap_table);
destroy_decap_table:
rhashtable_destroy(&tc_info->decap_table);
@@ -1685,6 +2096,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
if (!bnxt_tc_flower_enabled(bp))
return;
+ unregister_netdevice_notifier(&bp->tc_netdev_nb);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 4f05305052f2..10c62b094914 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -62,6 +62,12 @@ struct bnxt_tc_tunnel_key {
__be32 id;
};
+#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \
+ ((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \
+ is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \
+ (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \
+ is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN)))
+
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
@@ -71,6 +77,8 @@ struct bnxt_tc_actions {
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6)
#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7)
+#define BNXT_TC_ACTION_FLAG_L2_REWRITE BIT(8)
+#define BNXT_TC_ACTION_FLAG_NAT_XLATE BIT(9)
u16 dst_fid;
struct net_device *dst_dev;
@@ -79,6 +87,18 @@ struct bnxt_tc_actions {
/* tunnel encap */
struct ip_tunnel_key tun_encap_key;
+#define PEDIT_OFFSET_SMAC_LAST_4_BYTES 0x8
+ __be16 l2_rewrite_dmac[3];
+ __be16 l2_rewrite_smac[3];
+ struct {
+ bool src_xlate; /* true => translate src,
+ * false => translate dst
+ * Mutually exclusive, i.e cannot set both
+ */
+ bool l3_is_ipv4; /* false means L3 is ipv6 */
+ struct bnxt_tc_l3_key l3;
+ struct bnxt_tc_l4_key l4;
+ } nat;
};
struct bnxt_tc_flow {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b2c160947fc8..c601ff7b8f61 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -81,7 +81,7 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
edev->en_ops->bnxt_free_msix(edev, ulp_id);
if (ulp->max_async_event_id)
- bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
@@ -182,7 +182,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev)) {
+ if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
@@ -266,6 +266,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@@ -276,7 +277,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
}
}
-void bnxt_ulp_start(struct bnxt *bp)
+void bnxt_ulp_start(struct bnxt *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
@@ -285,6 +286,11 @@ void bnxt_ulp_start(struct bnxt *bp)
if (!edev)
return;
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+
+ if (err)
+ return;
+
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@@ -435,7 +441,7 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
ulp->max_async_event_id = max_id;
- bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
+ bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index cd78453d0bf0..9895406b9830 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -64,6 +64,7 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
+ #define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
@@ -92,7 +93,7 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
-void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 155599dcee76..61ab7d21f6bd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5208,6 +5208,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
+ data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
+
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1de51811fcb4..120fa05a39ff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2576,7 +2576,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
}
/* Init rDma */
- bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
+ DMA_SCB_BURST_SIZE);
/* Initialize Rx queues */
ret = bcmgenet_init_rx_queues(priv->dev);
@@ -2589,7 +2590,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
}
/* Init tDma */
- bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+ bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
+ DMA_SCB_BURST_SIZE);
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
@@ -3420,12 +3422,48 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
params->words_per_bd);
}
+struct bcmgenet_plat_data {
+ enum bcmgenet_version version;
+ u32 dma_max_burst_length;
+};
+
+static const struct bcmgenet_plat_data v1_plat_data = {
+ .version = GENET_V1,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v2_plat_data = {
+ .version = GENET_V2,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v3_plat_data = {
+ .version = GENET_V3,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v4_plat_data = {
+ .version = GENET_V4,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data v5_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+};
+
+static const struct bcmgenet_plat_data bcm2711_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = 0x08,
+};
+
static const struct of_device_id bcmgenet_match[] = {
- { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
- { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
- { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
- { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
- { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
+ { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
+ { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
+ { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
+ { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
+ { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
+ { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3435,6 +3473,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
+ const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
@@ -3458,24 +3497,21 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
+ if (priv->irq0 < 0) {
+ err = priv->irq0;
+ goto err;
+ }
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
- if (!priv->irq0 || !priv->irq1) {
- dev_err(&pdev->dev, "can't find IRQs\n");
- err = -EINVAL;
+ if (priv->irq1 < 0) {
+ err = priv->irq1;
goto err;
}
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
- if (dn) {
+ if (dn)
macaddr = of_get_mac_address(dn);
- if (IS_ERR(macaddr)) {
- dev_err(&pdev->dev, "can't find MAC address\n");
- err = -EINVAL;
- goto err;
- }
- } else {
+ else
macaddr = pd->mac_address;
- }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -3487,7 +3523,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
- ether_addr_copy(dev->dev_addr, macaddr);
+ if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(dev);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
dev->watchdog_timeo = 2 * HZ;
dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
@@ -3514,10 +3555,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- if (of_id)
- priv->version = (enum bcmgenet_version)of_id->data;
- else
+ if (of_id) {
+ pdata = of_id->data;
+ priv->version = pdata->version;
+ priv->dma_max_burst_length = pdata->dma_max_burst_length;
+ } else {
priv->version = pd->genet_version;
+ priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
+ }
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
@@ -3602,6 +3647,11 @@ static int bcmgenet_remove(struct platform_device *pdev)
return 0;
}
+static void bcmgenet_shutdown(struct platform_device *pdev)
+{
+ bcmgenet_remove(pdev);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcmgenet_resume(struct device *d)
{
@@ -3721,6 +3771,7 @@ static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
.remove = bcmgenet_remove,
+ .shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbc69d8fa05f..a5659197598f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -664,6 +664,7 @@ struct bcmgenet_priv {
bool crc_fwd_en;
unsigned int dma_rx_chk_bit;
+ u32 dma_max_burst_length;
u32 msg_enable;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index dbe18cdf6c1b..6392a2530183 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -213,11 +213,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
udelay(2);
}
- priv->ext_phy = !priv->internal_phy &&
- (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
-
switch (priv->phy_interface) {
case PHY_INTERFACE_MODE_INTERNAL:
+ phy_name = "internal PHY";
+ /* fall through */
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
@@ -229,11 +228,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
else
port_ctrl = PORT_MODE_INT_EPHY;
- bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
-
- if (priv->internal_phy) {
- phy_name = "internal PHY";
- } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ if (!phy_name) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
}
@@ -242,11 +237,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
case PHY_INTERFACE_MODE_MII:
phy_name = "external MII";
phy_set_max_speed(phydev, SPEED_100);
- bcmgenet_sys_writel(priv,
- PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
- /* Restore the MII PHY after isolation */
- if (bmcr >= 0)
- phy_write(phydev, MII_BMCR, bmcr);
+ port_ctrl = PORT_MODE_EXT_EPHY;
break;
case PHY_INTERFACE_MODE_REVMII:
@@ -261,31 +252,43 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
- bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
break;
case PHY_INTERFACE_MODE_RGMII:
/* RGMII_NO_ID: TXC transitions at the same time as TXD
* (requires PCB or receiver-side delay)
- * RGMII: Add 2ns delay on TXC (90 degree shift)
*
* ID is implicitly disabled for 100Mbps (RG)MII operation.
*/
+ phy_name = "external RGMII (no delay)";
id_mode_dis = BIT(16);
- /* fall through */
+ port_ctrl = PORT_MODE_EXT_GPHY;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (id_mode_dis)
- phy_name = "external RGMII (no delay)";
- else
- phy_name = "external RGMII (TX delay)";
- bcmgenet_sys_writel(priv,
- PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */
+ phy_name = "external RGMII (TX delay)";
+ port_ctrl = PORT_MODE_EXT_GPHY;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ phy_name = "external RGMII (RX delay)";
+ port_ctrl = PORT_MODE_EXT_GPHY;
break;
default:
dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
return -EINVAL;
}
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ /* Restore the MII PHY after isolation */
+ if (bmcr >= 0)
+ phy_write(phydev, MII_BMCR, bmcr);
+
+ priv->ext_phy = !priv->internal_phy &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
/* This is an external PHY (xMII), so we need to enable the RGMII
* block for the interface to work
*/
@@ -479,7 +482,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
struct phy_device *phydev;
- int phy_mode;
+ phy_interface_t phy_mode;
int ret;
/* Fetch the PHY phandle */
@@ -497,10 +500,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
}
/* Get the link mode */
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dn, &phy_mode);
+ if (ret) {
dev_err(kdev, "invalid PHY mode property\n");
- return phy_mode;
+ return ret;
}
priv->phy_interface = phy_mode;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 77f3511b97de..ca3aa1250dd1 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (rq->perout.index != 0)
return -EINVAL;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f4b3bd85dfe3..53b50c24d9c9 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
- select PHYLIB
+ select PHYLINK
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 03983bd46eef..19fe4f4867c7 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -7,7 +7,7 @@
#ifndef _MACB_H
#define _MACB_H
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
@@ -1185,15 +1185,14 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
- struct device_node *phy_node;
- int link;
- int speed;
- int duplex;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u32 caps;
unsigned int dma_burst_length;
phy_interface_t phy_interface;
+ int speed;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1e1b774e1953..d5ae2e1e0b0e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -25,7 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -388,6 +388,27 @@ mdio_pm_exit:
return status;
}
+static void macb_init_buffers(struct macb *bp)
+{
+ struct macb_queue *queue;
+ unsigned int q;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+ }
+}
+
/**
* macb_set_tx_clk() - Set a clock to a new frequency
* @clk Pointer to the clock to change
@@ -432,114 +453,178 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
netdev_err(dev, "adjusting tx_clk failed.\n");
}
-static void macb_handle_link_change(struct net_device *dev)
+static void macb_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct macb *bp = netdev_priv(ndev);
+
+ /* We only support MII, RMII, GMII, RGMII & SGMII. */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_RMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ if (!macb_is_gem(bp) &&
+ (state->interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(state->interface))) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
+ (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_GMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(state->interface))) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void macb_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ state->link = 0;
+}
+
+static void macb_mac_an_restart(struct phylink_config *config)
+{
+ /* Not supported */
+}
+
+static void macb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
unsigned long flags;
- int status_change = 0;
+ u32 old_ctrl, ctrl;
spin_lock_irqsave(&bp->lock, flags);
- if (phydev->link) {
- if ((bp->speed != phydev->speed) ||
- (bp->duplex != phydev->duplex)) {
- u32 reg;
+ old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
- reg = macb_readl(bp, NCFGR);
- reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
- if (macb_is_gem(bp))
- reg &= ~GEM_BIT(GBE);
+ /* Clear all the bits we might set later */
+ ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) |
+ GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
- if (phydev->duplex)
- reg |= MACB_BIT(FD);
- if (phydev->speed == SPEED_100)
- reg |= MACB_BIT(SPD);
- if (phydev->speed == SPEED_1000 &&
- bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- reg |= GEM_BIT(GBE);
+ if (state->speed == SPEED_1000)
+ ctrl |= GEM_BIT(GBE);
+ else if (state->speed == SPEED_100)
+ ctrl |= MACB_BIT(SPD);
- macb_or_gem_writel(bp, NCFGR, reg);
+ if (state->duplex)
+ ctrl |= MACB_BIT(FD);
- bp->speed = phydev->speed;
- bp->duplex = phydev->duplex;
- status_change = 1;
- }
- }
+ /* We do not support MLO_PAUSE_RX yet */
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl |= MACB_BIT(PAE);
- if (phydev->link != bp->link) {
- if (!phydev->link) {
- bp->speed = 0;
- bp->duplex = -1;
- }
- bp->link = phydev->link;
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
- status_change = 1;
- }
+ /* Apply the new configuration, if any */
+ if (old_ctrl ^ ctrl)
+ macb_or_gem_writel(bp, NCFGR, ctrl);
+
+ bp->speed = state->speed;
spin_unlock_irqrestore(&bp->lock, flags);
+}
- if (status_change) {
- if (phydev->link) {
- /* Update the TX clock rate if and only if the link is
- * up and there has been a link change.
- */
- macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ unsigned int q;
+ u32 ctrl;
- netif_carrier_on(dev);
- netdev_info(dev, "link up (%d/%s)\n",
- phydev->speed,
- phydev->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- } else {
- netif_carrier_off(dev);
- netdev_info(dev, "link down\n");
- }
- }
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IDR,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+
+ /* Disable Rx and Tx */
+ ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(ndev);
}
-/* based on au1000_eth. c*/
-static int macb_mii_probe(struct net_device *dev)
+static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *np;
- int ret, i;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ unsigned int q;
- np = bp->pdev->dev.of_node;
- ret = 0;
+ macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
- if (np) {
- if (of_phy_is_fixed_link(np)) {
- bp->phy_node = of_node_get(np);
- } else {
- bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
- /* fallback to standard phy registration if no
- * phy-handle was found nor any phy found during
- * dt phy registration
- */
- if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev) &&
- PTR_ERR(phydev) != -ENODEV) {
- ret = PTR_ERR(phydev);
- break;
- }
- }
+ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+ * cleared the pipeline and control registers.
+ */
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_buffers(bp);
- if (ret)
- return -ENODEV;
- }
- }
- }
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IER,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+
+ /* Enable Rx and Tx */
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops macb_phylink_ops = {
+ .validate = macb_validate,
+ .mac_pcs_get_state = macb_mac_pcs_get_state,
+ .mac_an_restart = macb_mac_an_restart,
+ .mac_config = macb_mac_config,
+ .mac_link_down = macb_mac_link_down,
+ .mac_link_up = macb_mac_link_up,
+};
- if (bp->phy_node) {
- phydev = of_phy_connect(dev, bp->phy_node,
- &macb_handle_link_change, 0,
- bp->phy_interface);
- if (!phydev)
- return -ENODEV;
+static int macb_phylink_connect(struct macb *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (bp->pdev->dev.of_node &&
+ of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
+ ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
+ 0);
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
} else {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
@@ -548,27 +633,33 @@ static int macb_mii_probe(struct net_device *dev)
}
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
- bp->phy_interface);
+ ret = phylink_connect_phy(bp->phylink, phydev);
if (ret) {
- netdev_err(dev, "Could not attach to PHY\n");
+ netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
return ret;
}
}
- /* mask with MAC supported features */
- if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- phy_set_max_speed(phydev, SPEED_1000);
- else
- phy_set_max_speed(phydev, SPEED_100);
+ phylink_start(bp->phylink);
- if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ return 0;
+}
- bp->link = 0;
- bp->speed = 0;
- bp->duplex = -1;
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ bp->phylink_config.dev = &dev->dev;
+ bp->phylink_config.type = PHYLINK_NETDEV;
+
+ bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
+ bp->phy_interface, &macb_phylink_ops);
+ if (IS_ERR(bp->phylink)) {
+ netdev_err(dev, "Could not create a phylink instance (%ld)\n",
+ PTR_ERR(bp->phylink));
+ return PTR_ERR(bp->phylink);
+ }
return 0;
}
@@ -598,20 +689,10 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (np && of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification %pOF\n", np);
- goto err_out_free_mdiobus;
- }
-
- err = mdiobus_register(bp->mii_bus);
- } else {
- err = of_mdiobus_register(bp->mii_bus, np);
- }
+ err = of_mdiobus_register(bp->mii_bus, np);
if (err)
- goto err_out_free_fixed_link;
+ goto err_out_free_mdiobus;
err = macb_mii_probe(bp->dev);
if (err)
@@ -621,11 +702,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
-err_out_free_fixed_link:
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
err_out_free_mdiobus:
- of_node_put(bp->phy_node);
mdiobus_free(bp->mii_bus);
err_out:
return err;
@@ -1314,26 +1391,14 @@ static void macb_hresp_error_task(unsigned long data)
bp->macbgem_ops.mog_init_rings(bp);
/* Initialize TX and RX buffers */
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
-#endif
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
+ macb_init_buffers(bp);
- /* Enable interrupts */
+ /* Enable interrupts */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue_writel(queue, IER,
bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
- }
ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
macb_writel(bp, NCR, ctrl);
@@ -2221,19 +2286,13 @@ static void macb_configure_dma(struct macb *bp)
static void macb_init_hw(struct macb *bp)
{
- struct macb_queue *queue;
- unsigned int q;
-
u32 config;
macb_reset_hw(bp);
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
- config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
- config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
@@ -2249,36 +2308,11 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCFGR, config);
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
gem_writel(bp, JML, bp->jumbo_max_len);
- bp->speed = SPEED_10;
- bp->duplex = DUPLEX_HALF;
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
-
- /* Initialize TX and RX buffers */
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
-#endif
- queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
-
- /* Enable interrupts */
- queue_writel(queue, IER,
- bp->rx_intr_mask |
- MACB_TX_INT_FLAGS |
- MACB_BIT(HRESP));
- }
-
- /* Enable TX and RX */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two
@@ -2402,8 +2436,8 @@ static void macb_set_rx_mode(struct net_device *dev)
static int macb_open(struct net_device *dev)
{
- struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned int q;
int err;
@@ -2414,15 +2448,6 @@ static int macb_open(struct net_device *dev)
if (err < 0)
goto pm_exit;
- /* carrier starts down */
- netif_carrier_off(dev);
-
- /* if the phy is not yet register, retry later*/
- if (!dev->phydev) {
- err = -EAGAIN;
- goto pm_exit;
- }
-
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2436,11 +2461,11 @@ static int macb_open(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_enable(&queue->napi);
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
- /* schedule a link state check */
- phy_start(dev->phydev);
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto pm_exit;
netif_tx_start_all_queues(dev);
@@ -2467,8 +2492,8 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
- if (dev->phydev)
- phy_stop(dev->phydev);
+ phylink_stop(bp->phylink);
+ phylink_disconnect_phy(bp->phylink);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -2702,17 +2727,18 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->supported = 0;
wol->wolopts = 0;
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ phylink_ethtool_get_wol(bp->phylink, wol);
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ ret = phylink_ethtool_set_wol(bp->phylink, wol);
+ if (!ret)
+ return 0;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
@@ -2728,6 +2754,22 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int macb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *kset)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(bp->phylink, kset);
+}
+
+static int macb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *kset)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(bp->phylink, kset);
+}
+
static void macb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -3164,8 +3206,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = macb_get_link_ksettings,
+ .set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
};
@@ -3178,8 +3220,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = macb_get_link_ksettings,
+ .set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
.get_rxnfc = gem_get_rxnfc,
@@ -3188,26 +3230,21 @@ static const struct ethtool_ops gem_ethtool_ops = {
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct phy_device *phydev = dev->phydev;
struct macb *bp = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
- if (!phydev)
- return -ENODEV;
-
- if (!bp->ptp_info)
- return phy_mii_ioctl(phydev, rq, cmd);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
- case SIOCGHWTSTAMP:
- return bp->ptp_info->get_hwtst(dev, rq);
- default:
- return phy_mii_ioctl(phydev, rq, cmd);
+ if (bp->ptp_info) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
+ case SIOCGHWTSTAMP:
+ return bp->ptp_info->get_hwtst(dev, rq);
+ }
}
+
+ return phylink_mii_ioctl(bp->phylink, rq, cmd);
}
static inline void macb_set_txcsum_feature(struct macb *bp,
@@ -3330,7 +3367,8 @@ static void macb_configure_caps(struct macb *bp,
#ifdef CONFIG_MACB_USE_HWSTAMP
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
- pr_err("GEM doesn't support hardware ptp.\n");
+ dev_err(&bp->pdev->dev,
+ "GEM doesn't support hardware ptp.\n");
else {
bp->hw_dma_cap |= HW_DMA_CAP_PTP;
bp->ptp_info = &gem_ptp_info;
@@ -3707,8 +3745,9 @@ static int at91ether_open(struct net_device *dev)
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
- /* schedule a link state check */
- phy_start(dev->phydev);
+ ret = macb_phylink_connect(lp);
+ if (ret)
+ return ret;
netif_start_queue(dev);
@@ -3737,6 +3776,9 @@ static int at91ether_close(struct net_device *dev)
netif_stop_queue(dev);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
@@ -4181,7 +4223,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
bool native_io;
- struct phy_device *phydev;
+ phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
@@ -4308,12 +4350,14 @@ static int macb_probe(struct platform_device *pdev)
macb_get_hwaddr(bp);
}
- err = of_get_phy_mode(np);
- if (err < 0)
+ err = of_get_phy_mode(np, &interface);
+ if (err)
/* not found in DT, MII by default */
bp->phy_interface = PHY_INTERFACE_MODE_MII;
else
- bp->phy_interface = err;
+ bp->phy_interface = interface;
+
+ bp->speed = SPEED_UNKNOWN;
/* IP specific init */
err = init(pdev);
@@ -4324,8 +4368,6 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
- phydev = dev->phydev;
-
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -4337,8 +4379,6 @@ static int macb_probe(struct platform_device *pdev)
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
(unsigned long)bp);
- phy_attached_info(phydev);
-
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
@@ -4349,11 +4389,7 @@ static int macb_probe(struct platform_device *pdev)
return 0;
err_out_unregister_mdio:
- phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
- of_node_put(bp->phy_node);
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
err_out_free_netdev:
@@ -4377,18 +4413,12 @@ static int macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
- struct device_node *np = pdev->dev.of_node;
dev = platform_get_drvdata(pdev);
if (dev) {
bp = netdev_priv(dev);
- if (dev->phydev)
- phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
@@ -4403,7 +4433,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->tsu_clk);
pm_runtime_set_suspended(&pdev->dev);
}
- of_node_put(bp->phy_node);
+ phylink_destroy(bp->phylink);
free_netdev(dev);
}
@@ -4421,7 +4451,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!netif_running(netdev))
return 0;
-
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
@@ -4432,8 +4461,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_disable(&queue->napi);
- phy_stop(netdev->phydev);
- phy_suspend(netdev->phydev);
+ rtnl_lock();
+ phylink_stop(bp->phylink);
+ rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -4481,12 +4511,11 @@ static int __maybe_unused macb_resume(struct device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_enable(&queue->napi);
- phy_resume(netdev->phydev);
- phy_init_hw(netdev->phydev);
- phy_start(netdev->phydev);
+ rtnl_lock();
+ phylink_start(bp->phylink);
+ rtnl_unlock();
}
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
macb_set_rx_mode(netdev);
macb_restore_features(bp);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f96a42af1014..af04a2c81adb 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1914,10 +1914,10 @@ static struct platform_driver xgmac_driver = {
.driver = {
.name = "calxedaxgmac",
.of_match_table = xgmac_of_match,
+ .pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
.remove = xgmac_remove,
- .driver.pm = &xgmac_pm_ops,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 40a44dcb3d9b..f28409279ea4 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1876,13 +1876,8 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
if (nic->xdp_prog) {
/* Attach BPF program */
- nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
- if (!IS_ERR(nic->xdp_prog)) {
- bpf_attached = true;
- } else {
- ret = PTR_ERR(nic->xdp_prog);
- nic->xdp_prog = NULL;
- }
+ bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
+ bpf_attached = true;
}
/* Calculate Tx queues needed for XDP and network stack */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index acb016834f04..1e09fdb63c4f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1007,14 +1007,14 @@ static void bgx_poll_for_link(struct work_struct *work)
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
- lmac->link_up = 1;
+ lmac->link_up = true;
if (lmac->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = SPEED_40000;
else
lmac->last_speed = SPEED_10000;
lmac->last_duplex = DUPLEX_FULL;
} else {
- lmac->link_up = 0;
+ lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
@@ -1023,7 +1023,7 @@ static void bgx_poll_for_link(struct work_struct *work)
if (lmac->link_up) {
if (bgx_xaui_check_link(lmac)) {
/* Errors, clear link_up state */
- lmac->link_up = 0;
+ lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
@@ -1055,11 +1055,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
- lmac->is_sgmii = 1;
+ lmac->is_sgmii = true;
if (bgx_lmac_sgmii_init(bgx, lmac))
return -1;
} else {
- lmac->is_sgmii = 0;
+ lmac->is_sgmii = false;
if (bgx_lmac_xaui_init(bgx, lmac))
return -1;
}
@@ -1304,7 +1304,7 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
{
if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
(lmac->lmac_type != BGX_MODE_40G_KR)) {
- lmac->use_training = 0;
+ lmac->use_training = false;
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 20390f6afbb4..a4b4d475abf8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
- cudbg_common.o cudbg_lib.o cudbg_zlib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \
+ cxgb4_tc_matchall.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 69746696a929..f5be3ee1bdb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype {
CUDBG_QTYPE_CRYPTO_FLQ,
CUDBG_QTYPE_TLS_RXQ,
CUDBG_QTYPE_TLS_FLQ,
+ CUDBG_QTYPE_ETHOFLD_TXQ,
+ CUDBG_QTYPE_ETHOFLD_RXQ,
+ CUDBG_QTYPE_ETHOFLD_FLQ,
CUDBG_QTYPE_MAX,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index c2e92786608b..19c11568113a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -4,6 +4,7 @@
*/
#include <linux/sort.h>
+#include <linux/string.h>
#include "t4_regs.h"
#include "cxgb4.h"
@@ -776,24 +777,18 @@ static int cudbg_get_mem_region(struct adapter *padap,
struct cudbg_mem_desc *mem_desc)
{
u8 mc, found = 0;
- u32 i, idx = 0;
- int rc;
+ u32 idx = 0;
+ int rc, i;
rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
if (rc)
return rc;
- for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
- if (!strcmp(cudbg_region[i], region_name)) {
- found = 1;
- idx = i;
- break;
- }
- }
- if (!found)
+ i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name);
+ if (i < 0)
return -EINVAL;
- found = 0;
+ idx = i;
for (i = 0; i < meminfo->mem_c; i++) {
if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* Skip holes */
@@ -2930,6 +2925,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
MAX_RXQ_DESC_SIZE;
+ /* ETHOFLD TXQ, RXQ, and FLQ */
+ tot_entries += MAX_OFLD_QSETS * 3;
+ tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+
tot_size += sizeof(struct cudbg_ver_hdr) +
sizeof(struct cudbg_qdesc_info) +
sizeof(struct cudbg_qdesc_entry) * tot_entries;
@@ -3087,6 +3086,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
}
}
+ /* ETHOFLD TXQ */
+ if (s->eohw_txq)
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_TXQ(&s->eohw_txq[i].q,
+ CUDBG_QTYPE_ETHOFLD_TXQ, out);
+
+ /* ETHOFLD RXQ and FLQ */
+ if (s->eohw_rxq) {
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
+ CUDBG_QTYPE_ETHOFLD_RXQ, out);
+
+ for (i = 0; i < s->eoqsets; i++)
+ QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
+ CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ }
+
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1fbb640e896a..a70ac2097892 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -392,6 +392,7 @@ struct adapter_params {
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char crypto; /* HW capability for crypto */
+ unsigned char ethofld; /* QoS support */
unsigned char bypass;
unsigned char hash_filter;
@@ -602,6 +603,8 @@ struct port_info {
u8 vivld;
u8 smt_idx;
u8 rx_cchan;
+
+ bool tc_block_shared;
};
struct dentry;
@@ -711,6 +714,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_eth_stats stats;
+ struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct sge_ofld_stats { /* offload queue statistics */
@@ -724,13 +728,19 @@ struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_ofld_stats stats;
+ struct msix_info *msix;
} ____cacheline_aligned_in_smp;
struct tx_desc {
__be64 flit[8];
};
-struct tx_sw_desc;
+struct ulptx_sgl;
+
+struct tx_sw_desc {
+ struct sk_buff *skb; /* SKB to free after getting completion */
+ dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
+};
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
@@ -762,6 +772,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
u8 dbqt; /* SGE Doorbell Queue Timer in use */
unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */
unsigned long tso; /* # of TSO requests */
+ unsigned long uso; /* # of USO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
@@ -788,7 +799,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_uld_rxq_info {
char name[IFNAMSIZ]; /* name of ULD driver */
struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
- u16 *msix_tbl; /* msix_tbl for uld */
u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */
@@ -801,6 +811,51 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
+enum sge_eosw_state {
+ CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
+ CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
+ CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
+ CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
+};
+
+struct sge_eosw_txq {
+ spinlock_t lock; /* Per queue lock to synchronize completions */
+ enum sge_eosw_state state; /* Current ETHOFLD State */
+ struct tx_sw_desc *desc; /* Descriptor ring to hold packets */
+ u32 ndesc; /* Number of descriptors */
+ u32 pidx; /* Current Producer Index */
+ u32 last_pidx; /* Last successfully transmitted Producer Index */
+ u32 cidx; /* Current Consumer Index */
+ u32 last_cidx; /* Last successfully reclaimed Consumer Index */
+ u32 flowc_idx; /* Descriptor containing a FLOWC request */
+ u32 inuse; /* Number of packets held in ring */
+
+ u32 cred; /* Current available credits */
+ u32 ncompl; /* # of completions posted */
+ u32 last_compl; /* # of credits consumed since last completion req */
+
+ u32 eotid; /* Index into EOTID table in software */
+ u32 hwtid; /* Hardware EOTID index */
+
+ u32 hwqid; /* Underlying hardware queue index */
+ struct net_device *netdev; /* Pointer to netdevice */
+ struct tasklet_struct qresume_tsk; /* Restarts the queue */
+ struct completion completion; /* completion for FLOWC rendezvous */
+};
+
+struct sge_eohw_txq {
+ spinlock_t lock; /* Per queue lock */
+ struct sge_txq q; /* HW Txq */
+ struct adapter *adap; /* Backpointer to adapter */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long uso; /* # of USO requests */
+ unsigned long tx_cso; /* # of Tx checksum offloads */
+ unsigned long vlan_ins; /* # of Tx VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_txq ptptxq;
@@ -814,11 +869,16 @@ struct sge {
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
+ struct sge_eohw_txq *eohw_txq;
+ struct sge_ofld_rxq *eohw_rxq;
+
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
+ u16 eoqsets; /* # of ETHOFLD queues */
+
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u16 dbqtimer_tick;
@@ -841,6 +901,9 @@ struct sge {
unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
+
+ int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */
+ int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
@@ -870,13 +933,13 @@ struct hash_mac_addr {
unsigned int iface_mac;
};
-struct uld_msix_bmap {
+struct msix_bmap {
unsigned long *msix_bmap;
unsigned int mapsize;
spinlock_t lock; /* lock for acquiring bitmap */
};
-struct uld_msix_info {
+struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
unsigned int idx;
@@ -945,14 +1008,9 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- struct msix_info {
- unsigned short vec;
- char desc[IFNAMSIZ + 10];
- cpumask_var_t aff_mask;
- } msix_info[MAX_INGQ + 1];
- struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
- struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
- int msi_idx;
+ /* MSI-X Info for NIC and OFLD queues */
+ struct msix_info *msix_info;
+ struct msix_bmap msix_bmap;
struct doorbell_stats db_stats;
struct sge sge;
@@ -1044,6 +1102,12 @@ struct adapter {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal ch_thermal;
#endif
+
+ /* TC MQPRIO offload */
+ struct cxgb4_tc_mqprio *tc_mqprio;
+
+ /* TC MATCHALL classifier offload */
+ struct cxgb4_tc_matchall *tc_matchall;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1073,10 +1137,12 @@ enum {
enum {
SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */
+ SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */
};
enum {
SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
+ SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */
};
enum {
@@ -1087,11 +1153,6 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1100,6 +1161,14 @@ struct ch_sched_queue {
s8 class; /* class index */
};
+/* Support for "sched_flowc" command to allow one or more FLOWC
+ * to be bound to a TX Scheduling Class.
+ */
+struct ch_sched_flowc {
+ s32 tid; /* TID to bind */
+ s8 class; /* class index */
+};
+
/* Defined bit width of user definable filter tuples
*/
#define ETHTYPE_BITWIDTH 16
@@ -1214,8 +1283,11 @@ struct ch_filter_specification {
u16 nat_lport; /* local port to use after NAT'ing */
u16 nat_fport; /* foreign port to use after NAT'ing */
+ u32 tc_prio; /* TC's filter priority index */
+ u64 tc_cookie; /* Unique cookie identifying TC rules */
+
/* reservation for future additions */
- u8 rsvd[24];
+ u8 rsvd[12];
/* Filter rule value/mask pairs.
*/
@@ -1293,6 +1365,11 @@ static inline int is_uld(const struct adapter *adap)
return (adap->params.offload || adap->params.crypto);
}
+static inline int is_ethofld(const struct adapter *adap)
+{
+ return adap->params.ethofld;
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1426,6 +1503,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type);
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid);
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
@@ -1890,6 +1970,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
+void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
+ u32 ndesc);
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
+void cxgb4_ethofld_restart(unsigned long data);
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
void cxgb4_reclaim_completed_tx(struct adapter *adap,
struct sge_txq *q, bool unmap);
@@ -1948,5 +2034,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
-
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
+int cxgb_open(struct net_device *dev);
+int cxgb_close(struct net_device *dev);
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
+void cxgb4_quiesce_rx(struct sge_rspq *q);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index ae6a47dd7dc9..93868dca186a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int eth_entries, ctrl_entries, eo_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
@@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
int i, n, r = (uintptr_t)v - 1;
- int eth_entries, ctrl_entries;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+ if (adap->sge.eohw_txq)
+ eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
mutex_lock(&uld_mutex);
if (s->uld_txq_info)
@@ -2746,6 +2748,7 @@ do { \
RL("RxDrops:", stats.rx_drops);
RL("RxBadPkts:", stats.bad_rx_pkts);
TL("TSO:", tso);
+ TL("USO:", uso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
@@ -2761,6 +2764,55 @@ do { \
}
r -= eth_entries;
+ if (r < eo_entries) {
+ int base_qset = r * 4;
+ const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
+ const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
+
+ n = min(4, s->eoqsets - 4 * r);
+
+ S("QType:", "ETHOFLD");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImm:", stats.imm);
+ RL("RxAN", stats.an);
+ RL("RxNoMem", stats.nomem);
+ TL("TSO:", tso);
+ TL("USO:", uso);
+ TL("TxCSO:", tx_cso);
+ TL("VLANins:", vlan_ins);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
+ TL("TxMapErr:", mapping_err);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
+ RL("FLStarving:", fl.starving);
+
+ goto unlock;
+ }
+
+ r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
@@ -3007,6 +3059,7 @@ static int sge_queue_entries(const struct adapter *adap)
mutex_unlock(&uld_mutex);
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+ (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 76538f4cd595..20ab3b6285a2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -91,6 +91,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"rx_bg3_frames_trunc ",
"tso ",
+ "uso ",
"tx_csum_offload ",
"rx_csum_good ",
"vlan_extractions ",
@@ -220,6 +221,7 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
*/
struct queue_port_stats {
u64 tso;
+ u64 uso;
u64 tx_csum;
u64 rx_csum;
u64 vlan_ex;
@@ -240,13 +242,15 @@ static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
- int i;
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+ struct sge_eohw_txq *eohw_tx;
+ unsigned int i;
memset(s, 0, sizeof(*s));
for (i = 0; i < p->nqsets; i++, rx++, tx++) {
s->tso += tx->tso;
+ s->uso += tx->uso;
s->tx_csum += tx->tx_cso;
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
@@ -254,6 +258,16 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->gro_pkts += rx->stats.lro_pkts;
s->gro_merged += rx->stats.lro_merged;
}
+
+ if (adap->sge.eohw_txq) {
+ eohw_tx = &adap->sge.eohw_txq[p->first_qset];
+ for (i = 0; i < p->nqsets; i++, eohw_tx++) {
+ s->tso += eohw_tx->tso;
+ s->uso += eohw_tx->uso;
+ s->tx_csum += eohw_tx->tx_cso;
+ s->vlan_ins += eohw_tx->vlan_ins;
+ }
+ }
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 43b0f8c57da7..1d39fca11810 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -440,36 +440,48 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
struct adapter *adap = netdev2adap(dev);
struct tid_info *t = &adap->tids;
+ bool found = false;
+ u8 i, n, cnt;
int ftid;
- spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET) {
- ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
- if (ftid >= t->nftids)
- ftid = -1;
- } else {
- if (is_t6(adap->params.chip)) {
- ftid = bitmap_find_free_region(t->ftid_bmap,
- t->nftids, 1);
- if (ftid < 0)
- goto out_unlock;
-
- /* this is only a lookup, keep the found region
- * unallocated
- */
- bitmap_release_region(t->ftid_bmap, ftid, 1);
- } else {
- ftid = bitmap_find_free_region(t->ftid_bmap,
- t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
+ * on T5.
+ */
+ n = 1;
+ if (family == PF_INET6) {
+ n++;
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
+ n += 2;
+ }
+
+ if (n > t->nftids)
+ return -ENOMEM;
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ /* Find free filter slots from the end of TCAM. Appropriate
+ * checks must be done by caller later to ensure the prio
+ * passed by TC doesn't conflict with prio saved by existing
+ * rules in the TCAM.
+ */
+ spin_lock_bh(&t->ftid_lock);
+ ftid = t->nftids - 1;
+ while (ftid >= n - 1) {
+ cnt = 0;
+ for (i = 0; i < n; i++) {
+ if (test_bit(ftid - i, t->ftid_bmap))
+ break;
+ cnt++;
}
+ if (cnt == n) {
+ ftid &= ~(n - 1);
+ found = true;
+ break;
+ }
+
+ ftid -= n;
}
-out_unlock:
spin_unlock_bh(&t->ftid_lock);
- return ftid;
+
+ return found ? ftid : -ENOMEM;
}
static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
@@ -510,6 +522,60 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
+bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct filter_entry *prev_fe, *next_fe;
+ struct tid_info *t = &adap->tids;
+ u32 prev_ftid, next_ftid;
+ bool valid = true;
+
+ /* Only insert the rule if both of the following conditions
+ * are met:
+ * 1. The immediate previous rule has priority <= @prio.
+ * 2. The immediate next rule has priority >= @prio.
+ */
+ spin_lock_bh(&t->ftid_lock);
+ /* Don't insert if there's a rule already present at @idx. */
+ if (test_bit(idx, t->ftid_bmap)) {
+ valid = false;
+ goto out_unlock;
+ }
+
+ next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+
+ next_fe = &adap->tids.ftid_tab[next_ftid];
+
+ prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ if (prev_ftid >= idx)
+ prev_ftid = idx;
+
+ /* See if the filter entry belongs to an IPv6 rule, which
+ * occupy 4 slots on T5 and 2 slots on T6. Adjust the
+ * reference to the previously inserted filter entry
+ * accordingly.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
+ prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
+ if (!prev_fe->fs.type)
+ prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ } else {
+ prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
+ if (!prev_fe->fs.type)
+ prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ }
+
+ if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
+ (next_fe->valid && prio > next_fe->fs.tc_prio))
+ valid = false;
+
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
+ return valid;
+}
+
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
@@ -806,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.tos |= ~0;
if (fs->val.proto && !fs->mask.proto)
fs->mask.proto |= ~0;
+ if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
+ fs->mask.pfvf_vld |= ~0;
+ if (fs->val.pf && !fs->mask.pf)
+ fs->mask.pf |= ~0;
+ if (fs->val.vf && !fs->mask.vf)
+ fs->mask.vf |= ~0;
for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
lip |= fs->val.lip[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index b0751c0611ec..b3e4a645043d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -53,4 +53,5 @@ void clear_all_filters(struct adapter *adapter);
void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
+bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38024877751c..12ff69b3ba91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
+#include <net/xfrm.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -82,6 +83,8 @@
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
+#include "cxgb4_tc_mqprio.h"
+#include "cxgb4_tc_matchall.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"
@@ -184,6 +187,8 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+static int cfg_queues(struct adapter *adap);
+
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
@@ -683,31 +688,6 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
return IRQ_HANDLED;
}
-/*
- * Name the MSI-X interrupts.
- */
-static void name_msix_vecs(struct adapter *adap)
-{
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
-
- /* non-data interrupts */
- snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
-
- /* FW events */
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
- adap->port[0]->name);
-
- /* Ethernet queues */
- for_each_port(adap, j) {
- struct net_device *d = adap->port[j];
- const struct port_info *pi = netdev_priv(d);
-
- for (i = 0; i < pi->nqsets; i++, msi_idx++)
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
- d->name, i);
- }
-}
-
int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
cpumask_var_t *aff_mask, int idx)
{
@@ -741,15 +721,19 @@ static int request_msix_queue_irqs(struct adapter *adap)
struct sge *s = &adap->sge;
struct msix_info *minfo;
int err, ethqidx;
- int msi_index = 2;
- err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
- adap->msix_info[1].desc, &s->fw_evtq);
+ if (s->fwevtq_msix_idx < 0)
+ return -ENOMEM;
+
+ err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[s->fwevtq_msix_idx].desc,
+ &s->fw_evtq);
if (err)
return err;
for_each_ethrxq(s, ethqidx) {
- minfo = &adap->msix_info[msi_index];
+ minfo = s->ethrxq[ethqidx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
@@ -759,18 +743,16 @@ static int request_msix_queue_irqs(struct adapter *adap)
cxgb4_set_msix_aff(adap, minfo->vec,
&minfo->aff_mask, ethqidx);
- msi_index++;
}
return 0;
unwind:
while (--ethqidx >= 0) {
- msi_index--;
- minfo = &adap->msix_info[msi_index];
+ minfo = s->ethrxq[ethqidx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
}
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
return err;
}
@@ -778,11 +760,11 @@ static void free_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
struct msix_info *minfo;
- int i, msi_index = 2;
+ int i;
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
for_each_ethrxq(s, i) {
- minfo = &adap->msix_info[msi_index++];
+ minfo = s->ethrxq[i].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_irq(minfo->vec, &s->ethrxq[i].rspq);
}
@@ -899,6 +881,12 @@ static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}
+void cxgb4_quiesce_rx(struct sge_rspq *q)
+{
+ if (q->handler)
+ napi_disable(&q->napi);
+}
+
/*
* Wait until all NAPI handlers are descheduled.
*/
@@ -909,19 +897,24 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler)
- napi_disable(&q->napi);
+ if (!q)
+ continue;
+
+ cxgb4_quiesce_rx(q);
}
}
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
+ struct sge *s = &adap->sge;
+
if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
- free_irq(adap->msix_info[0].vec, adap);
+ free_irq(adap->msix_info[s->nd_msix_idx].vec,
+ adap);
} else {
free_irq(adap->pdev->irq, adap);
}
@@ -929,6 +922,17 @@ static void disable_interrupts(struct adapter *adap)
}
}
+void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (q->handler)
+ napi_enable(&q->napi);
+
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
+}
+
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@@ -941,37 +945,63 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
- if (q->handler)
- napi_enable(&q->napi);
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
+ cxgb4_enable_rx(adap, q);
}
}
+static int setup_non_data_intr(struct adapter *adap)
+{
+ int msix;
+
+ adap->sge.nd_msix_idx = -1;
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ return 0;
+
+ /* Request MSI-X vector for non-data interrupt */
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0)
+ return -ENOMEM;
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s", adap->port[0]->name);
+
+ adap->sge.nd_msix_idx = msix;
+ return 0;
+}
static int setup_fw_sge_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err = 0;
+ int msix, err = 0;
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & CXGB4_USING_MSIX)
- adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
- else {
+ if (adap->flags & CXGB4_USING_MSIX) {
+ s->fwevtq_msix_idx = -1;
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0)
+ return -ENOMEM;
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s-FWeventq", adap->port[0]->name);
+ } else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
NULL, NULL, NULL, -1);
if (err)
return err;
- adap->msi_idx = -((int)s->intrq.abs_id + 1);
+ msix = -((int)s->intrq.abs_id + 1);
}
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
+ msix, NULL, fwevtq_handler, NULL, -1);
+ if (err && msix >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, msix);
+
+ s->fwevtq_msix_idx = msix;
return err;
}
@@ -985,14 +1015,17 @@ static int setup_fw_sge_queues(struct adapter *adap)
*/
static int setup_sge_queues(struct adapter *adap)
{
- int err, i, j;
- struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info = NULL;
+ struct sge *s = &adap->sge;
unsigned int cmplqid = 0;
+ int err, i, j, msix = 0;
if (is_uld(adap))
rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)s->intrq.abs_id + 1);
+
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
@@ -1000,10 +1033,21 @@ static int setup_sge_queues(struct adapter *adap)
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
- if (adap->msi_idx > 0)
- adap->msi_idx++;
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ err = msix;
+ goto freeout;
+ }
+
+ snprintf(adap->msix_info[msix].desc,
+ sizeof(adap->msix_info[msix].desc),
+ "%s-Rx%d", dev->name, j);
+ q->msix = &adap->msix_info[msix];
+ }
+
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
- adap->msi_idx, &q->fl,
+ msix, &q->fl,
t4_ethrx_handler,
NULL,
t4_get_tp_ch_map(adap,
@@ -1090,6 +1134,24 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif /* CONFIG_CHELSIO_T4_DCB */
+ if (dev->num_tc) {
+ struct port_info *pi = netdev2pinfo(dev);
+ u8 ver, proto;
+
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
+ ip_hdr(skb)->protocol;
+
+ /* Send unsupported traffic pattern to normal NIC queues. */
+ txq = netdev_pick_tx(dev, skb, sb_dev);
+ if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
+ skb->encapsulation ||
+ (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
+ txq = txq % pi->nqsets;
+
+ return txq;
+ }
+
if (select_queue) {
txq = (skb_rx_queue_recorded(skb)
? skb_get_rx_queue(skb)
@@ -1456,19 +1518,23 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
+ unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
+ eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
- ftid_bmap_size * sizeof(long);
+ ftid_bmap_size * sizeof(long) +
+ t->neotids * sizeof(*t->eotid_tab) +
+ eotid_bmap_size * sizeof(long);
t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
@@ -1479,6 +1545,8 @@ static int tid_init(struct tid_info *t)
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
+ t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
+ t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
spin_lock_init(&t->ftid_lock);
@@ -1505,6 +1573,9 @@ static int tid_init(struct tid_info *t)
if (!t->stid_base &&
CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
__set_bit(0, t->stid_bmap);
+
+ if (t->neotids)
+ bitmap_zero(t->eotid_bmap, t->neotids);
}
bitmap_zero(t->ftid_bmap, t->nftids);
@@ -2361,6 +2432,7 @@ static void update_clip(const struct adapter *adap)
*/
static int cxgb_up(struct adapter *adap)
{
+ struct sge *s = &adap->sge;
int err;
mutex_lock(&uld_mutex);
@@ -2372,16 +2444,20 @@ static int cxgb_up(struct adapter *adap)
goto freeq;
if (adap->flags & CXGB4_USING_MSIX) {
- name_msix_vecs(adap);
- err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
- adap->msix_info[0].desc, adap);
+ if (s->nd_msix_idx < 0) {
+ err = -ENOMEM;
+ goto irq_err;
+ }
+
+ err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
+ t4_nondata_intr, 0,
+ adap->msix_info[s->nd_msix_idx].desc, adap);
if (err)
goto irq_err;
+
err = request_msix_queue_irqs(adap);
- if (err) {
- free_irq(adap->msix_info[0].vec, adap);
- goto irq_err;
- }
+ if (err)
+ goto irq_err_free_nd_msix;
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & CXGB4_USING_MSI) ? 0
@@ -2403,11 +2479,13 @@ static int cxgb_up(struct adapter *adap)
#endif
return err;
- irq_err:
+irq_err_free_nd_msix:
+ free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
+irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
- freeq:
+freeq:
t4_free_sge_resources(adap);
- rel_lock:
+rel_lock:
mutex_unlock(&uld_mutex);
return err;
}
@@ -2429,11 +2507,11 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-static int cxgb_open(struct net_device *dev)
+int cxgb_open(struct net_device *dev)
{
- int err;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ int err;
netif_carrier_off(dev);
@@ -2456,7 +2534,7 @@ static int cxgb_open(struct net_device *dev)
return err;
}
-static int cxgb_close(struct net_device *dev)
+int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3163,8 +3241,33 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev,
}
}
-static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+static int cxgb_setup_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!adap->tc_matchall)
+ return -ENOMEM;
+
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
+ case TC_CLSMATCHALL_STATS:
+ if (ingress)
+ return cxgb4_tc_matchall_stats(dev, cls_matchall);
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
struct port_info *pi = netdev2pinfo(dev);
@@ -3185,24 +3288,81 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return cxgb_setup_tc_cls_u32(dev, type_data);
case TC_SETUP_CLSFLOWER:
return cxgb_setup_tc_flower(dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return cxgb_setup_tc_matchall(dev, type_data, true);
default:
return -EOPNOTSUPP;
}
}
+static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return cxgb_setup_tc_matchall(dev, type_data, false);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cxgb_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!is_ethofld(adap) || !adap->tc_mqprio)
+ return -ENOMEM;
+
+ return cxgb4_setup_tc_mqprio(dev, mqprio);
+}
+
static LIST_HEAD(cxgb_block_cb_list);
+static int cxgb_setup_tc_block(struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct port_info *pi = netdev_priv(dev);
+ flow_setup_cb_t *cb;
+ bool ingress_only;
+
+ pi->tc_block_shared = f->block_shared;
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = cxgb_setup_tc_block_egress_cb;
+ ingress_only = false;
+ } else {
+ cb = cxgb_setup_tc_block_ingress_cb;
+ ingress_only = true;
+ }
+
+ return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
+ cb, pi, dev, ingress_only);
+}
+
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct port_info *pi = netdev2pinfo(dev);
-
switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return cxgb_setup_tc_mqprio(dev, type_data);
case TC_SETUP_BLOCK:
- return flow_block_cb_setup_simple(type_data,
- &cxgb_block_cb_list,
- cxgb_setup_tc_block_cb,
- pi, dev, true);
+ return cxgb_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4286,14 +4446,14 @@ static struct fw_info *find_fw_info(int chip)
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
-static int adap_init0(struct adapter *adap)
+static int adap_init0(struct adapter *adap, int vpd_skip)
{
- int ret;
- u32 v, port_vec;
- enum dev_state state;
- u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
+ u32 params[7], val[7];
+ enum dev_state state;
+ u32 v, port_vec;
int reset = 1;
+ int ret;
/* Grab Firmware Device Log parameters as early as possible so we have
* access to it for debugging, etc.
@@ -4448,9 +4608,11 @@ static int adap_init0(struct adapter *adap)
* could have FLASHed a new VPD which won't be read by the firmware
* until we do the RESET ...
*/
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
+ if (!vpd_skip) {
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+ }
/* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -4600,11 +4762,18 @@ static int adap_init0(struct adapter *adap)
adap->clipt_start = val[0];
adap->clipt_end = val[1];
- /* We don't yet have a PARAMs calls to retrieve the number of Traffic
- * Classes supported by the hardware/firmware so we hard code it here
- * for now.
- */
- adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+ /* Get the supported number of traffic classes */
+ params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+ if (ret < 0) {
+ /* We couldn't retrieve the number of Traffic Classes
+ * supported by the hardware/firmware. So we hard
+ * code it here.
+ */
+ adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+ } else {
+ adap->params.nsched_cls = val[0];
+ }
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
@@ -4689,7 +4858,8 @@ static int adap_init0(struct adapter *adap)
adap->params.offload = 1;
if (caps_cmd.ofldcaps ||
- (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -4731,6 +4901,19 @@ static int adap_init0(struct adapter *adap)
} else {
adap->num_ofld_uld += 1;
}
+
+ if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
+ params[0] = FW_PARAM_PFVF(ETHOFLD_START);
+ params[1] = FW_PARAM_PFVF(ETHOFLD_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (!ret) {
+ adap->tids.eotid_base = val[0];
+ adap->tids.neotids = min_t(u32, MAX_ATIDS,
+ val[1] - val[0] + 1);
+ adap->params.ethofld = 1;
+ }
+ }
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -5050,10 +5233,93 @@ static void eeh_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void eeh_reset_prepare(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ if (adapter->pf != 4)
+ return;
+
+ adapter->flags &= ~CXGB4_FW_OK;
+
+ notify_ulds(adapter, CXGB4_STATE_DOWN);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ disable_interrupts(adapter);
+ cxgb4_free_mps_ref_entries(adapter);
+
+ adap_free_hma_mem(adapter);
+
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adapter);
+}
+
+static void eeh_reset_done(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int err, i;
+
+ if (adapter->pf != 4)
+ return;
+
+ err = t4_wait_dev_ready(adapter->regs);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev,
+ "Device not ready, err %d", err);
+ return;
+ }
+
+ setup_memwin(adapter);
+
+ err = adap_init0(adapter, 1);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Adapter init failed, err %d", err);
+ return;
+ }
+
+ setup_memwin_rdma(adapter);
+
+ if (adapter->flags & CXGB4_FW_OK) {
+ err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Port init failed, err %d", err);
+ return;
+ }
+ }
+
+ err = cfg_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Config queues failed, err %d", err);
+ return;
+ }
+
+ cxgb4_init_mps_ref_entries(adapter);
+
+ err = setup_fw_sge_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "FW sge queue allocation failed, err %d", err);
+ return;
+ }
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_open(adapter->port[i]);
+}
+
static const struct pci_error_handlers cxgb4_eeh = {
.error_detected = eeh_err_detected,
.slot_reset = eeh_slot_reset,
.resume = eeh_resume,
+ .reset_prepare = eeh_reset_prepare,
+ .reset_done = eeh_reset_done,
};
/* Return true if the Link Configuration supports "High Speeds" (those greater
@@ -5070,26 +5336,25 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-/*
- * Perform default configuration of DMA queues depending on the number and type
+/* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
static int cfg_queues(struct adapter *adap)
{
+ u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+ u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
- int i, n10g = 0, qidx = 0;
- int niqflint, neq, avail_eth_qsets;
- int max_eth_qsets = 32;
+ u32 i, n10g = 0, qidx = 0;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- /* Reduce memory usage in kdump environment, disable all offload.
- */
+ /* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
+ adap->params.ethofld = 0;
}
/* Calculate the number of Ethernet Queue Sets available based on
@@ -5108,14 +5373,11 @@ static int cfg_queues(struct adapter *adap)
if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
- avail_eth_qsets = min(niqflint, neq);
+ avail_qsets = min(niqflint, neq);
- if (avail_eth_qsets > max_eth_qsets)
- avail_eth_qsets = max_eth_qsets;
-
- if (avail_eth_qsets < adap->params.nports) {
+ if (avail_qsets < adap->params.nports) {
dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
- avail_eth_qsets, adap->params.nports);
+ avail_qsets, adap->params.nports);
return -ENOMEM;
}
@@ -5123,6 +5385,7 @@ static int cfg_queues(struct adapter *adap)
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -5142,8 +5405,7 @@ static int cfg_queues(struct adapter *adap)
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
- /*
- * We default to 1 queue per non-10G port and up to # of cores queues
+ /* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
@@ -5165,19 +5427,40 @@ static int cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
+ avail_qsets -= qidx;
if (is_uld(adap)) {
- /*
- * For offload we use 1 queue/channel if all ports are up to 1G,
+ /* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
- if (n10g) {
- i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
- } else {
+ num_ulds = adap->num_uld + adap->num_ofld_uld;
+ i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+ avail_uld_qsets = roundup(i, adap->params.nports);
+ if (avail_qsets < num_ulds * adap->params.nports) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ s->ofldqsets = 0;
+ } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
s->ofldqsets = adap->params.nports;
+ } else {
+ s->ofldqsets = avail_uld_qsets;
+ }
+
+ avail_qsets -= num_ulds * s->ofldqsets;
+ }
+
+ /* ETHOFLD Queues used for QoS offload should follow same
+ * allocation scheme as normal Ethernet Queues.
+ */
+ if (is_ethofld(adap)) {
+ if (avail_qsets < s->max_ethqsets) {
+ adap->params.ethofld = 0;
+ s->eoqsets = 0;
+ } else {
+ s->eoqsets = s->max_ethqsets;
}
+ avail_qsets -= s->eoqsets;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5230,42 +5513,62 @@ static void reduce_ethqs(struct adapter *adap, int n)
}
}
-static int get_msix_info(struct adapter *adap)
+static int alloc_msix_info(struct adapter *adap, u32 num_vec)
{
- struct uld_msix_info *msix_info;
- unsigned int max_ingq = 0;
-
- if (is_offload(adap))
- max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
- if (is_pci_uld(adap))
- max_ingq += MAX_OFLD_QSETS * adap->num_uld;
-
- if (!max_ingq)
- goto out;
+ struct msix_info *msix_info;
- msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
+ msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
- sizeof(long), GFP_KERNEL);
- if (!adap->msix_bmap_ulds.msix_bmap) {
+ adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
}
- spin_lock_init(&adap->msix_bmap_ulds.lock);
- adap->msix_info_ulds = msix_info;
-out:
+
+ spin_lock_init(&adap->msix_bmap.lock);
+ adap->msix_bmap.mapsize = num_vec;
+
+ adap->msix_info = msix_info;
return 0;
}
static void free_msix_info(struct adapter *adap)
{
- if (!(adap->num_uld && adap->num_ofld_uld))
- return;
+ kfree(adap->msix_bmap.msix_bmap);
+ kfree(adap->msix_info);
+}
- kfree(adap->msix_info_ulds);
- kfree(adap->msix_bmap_ulds.msix_bmap);
+int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
+{
+ struct msix_bmap *bmap = &adap->msix_bmap;
+ unsigned int msix_idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
+ if (msix_idx < bmap->mapsize) {
+ __set_bit(msix_idx, bmap->msix_bmap);
+ } else {
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return -ENOSPC;
+ }
+
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return msix_idx;
+}
+
+void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
+ unsigned int msix_idx)
+{
+ struct msix_bmap *bmap = &adap->msix_bmap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ __clear_bit(msix_idx, bmap->msix_bmap);
+ spin_unlock_irqrestore(&bmap->lock, flags);
}
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
@@ -5273,88 +5576,161 @@ static void free_msix_info(struct adapter *adap)
static int enable_msix(struct adapter *adap)
{
- int ofld_need = 0, uld_need = 0;
- int i, j, want, need, allocated;
+ u32 eth_need, uld_need = 0, ethofld_need = 0;
+ u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
+ u8 num_uld = 0, nchan = adap->params.nports;
+ u32 i, want, need, num_vec;
struct sge *s = &adap->sge;
- unsigned int nchan = adap->params.nports;
struct msix_entry *entries;
- int max_ingq = MAX_INGQ;
-
- if (is_pci_uld(adap))
- max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
- if (is_offload(adap))
- max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
- entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- /* map for msix */
- if (get_msix_info(adap)) {
- adap->params.offload = 0;
- adap->params.crypto = 0;
- }
-
- for (i = 0; i < max_ingq + 1; ++i)
- entries[i].entry = i;
+ struct port_info *pi;
+ int allocated, ret;
- want = s->max_ethqsets + EXTRA_VECS;
- if (is_offload(adap)) {
- want += adap->num_ofld_uld * s->ofldqsets;
- ofld_need = adap->num_ofld_uld * nchan;
- }
- if (is_pci_uld(adap)) {
- want += adap->num_uld * s->ofldqsets;
- uld_need = adap->num_uld * nchan;
- }
+ want = s->max_ethqsets;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port.
*/
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
+ need = 8 * nchan;
#else
- need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
+ need = nchan;
#endif
+ eth_need = need;
+ if (is_uld(adap)) {
+ num_uld = adap->num_ofld_uld + adap->num_uld;
+ want += num_uld * s->ofldqsets;
+ uld_need = num_uld * nchan;
+ need += uld_need;
+ }
+
+ if (is_ethofld(adap)) {
+ want += s->eoqsets;
+ ethofld_need = eth_need;
+ need += ethofld_need;
+ }
+
+ want += EXTRA_VECS;
+ need += EXTRA_VECS;
+
+ entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < want; i++)
+ entries[i].entry = i;
+
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) {
- dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
- " not using MSI-X\n");
- kfree(entries);
- return allocated;
+ /* Disable offload and attempt to get vectors for NIC
+ * only mode.
+ */
+ want = s->max_ethqsets + EXTRA_VECS;
+ need = eth_need + EXTRA_VECS;
+ allocated = pci_enable_msix_range(adap->pdev, entries,
+ need, want);
+ if (allocated < 0) {
+ dev_info(adap->pdev_dev,
+ "Disabling MSI-X due to insufficient MSI-X vectors\n");
+ ret = allocated;
+ goto out_free;
+ }
+
+ dev_info(adap->pdev_dev,
+ "Disabling offload due to insufficient MSI-X vectors\n");
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ adap->params.ethofld = 0;
+ s->ofldqsets = 0;
+ s->eoqsets = 0;
+ uld_need = 0;
+ ethofld_need = 0;
+ }
+
+ num_vec = allocated;
+ if (num_vec < want) {
+ /* Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ ethqsets = eth_need;
+ if (is_uld(adap))
+ ofldqsets = nchan;
+ if (is_ethofld(adap))
+ eoqsets = ethofld_need;
+
+ num_vec -= need;
+ while (num_vec) {
+ if (num_vec < eth_need + ethofld_need ||
+ ethqsets > s->max_ethqsets)
+ break;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->nqsets < 2)
+ continue;
+
+ ethqsets++;
+ num_vec--;
+ if (ethofld_need) {
+ eoqsets++;
+ num_vec--;
+ }
+ }
+ }
+
+ if (is_uld(adap)) {
+ while (num_vec) {
+ if (num_vec < uld_need ||
+ ofldqsets > s->ofldqsets)
+ break;
+
+ ofldqsets++;
+ num_vec -= uld_need;
+ }
+ }
+ } else {
+ ethqsets = s->max_ethqsets;
+ if (is_uld(adap))
+ ofldqsets = s->ofldqsets;
+ if (is_ethofld(adap))
+ eoqsets = s->eoqsets;
}
- /* Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = allocated - EXTRA_VECS - ofld_need - uld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
+ if (ethqsets < s->max_ethqsets) {
+ s->max_ethqsets = ethqsets;
+ reduce_ethqs(adap, ethqsets);
}
+
if (is_uld(adap)) {
- if (allocated < want)
- s->nqs_per_uld = nchan;
- else
- s->nqs_per_uld = s->ofldqsets;
+ s->ofldqsets = ofldqsets;
+ s->nqs_per_uld = s->ofldqsets;
}
- for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
+ if (is_ethofld(adap))
+ s->eoqsets = eoqsets;
+
+ /* map for msix */
+ ret = alloc_msix_info(adap, allocated);
+ if (ret)
+ goto out_disable_msix;
+
+ for (i = 0; i < allocated; i++) {
adap->msix_info[i].vec = entries[i].vector;
- if (is_uld(adap)) {
- for (j = 0 ; i < allocated; ++i, j++) {
- adap->msix_info_ulds[j].vec = entries[i].vector;
- adap->msix_info_ulds[j].idx = i;
- }
- adap->msix_bmap_ulds.mapsize = j;
+ adap->msix_info[i].idx = i;
}
- dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d per uld %d\n",
- allocated, s->max_ethqsets, s->nqs_per_uld);
+
+ dev_info(adap->pdev_dev,
+ "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
+ allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
kfree(entries);
return 0;
+
+out_disable_msix:
+ pci_disable_msix(adap->pdev);
+
+out_free:
+ kfree(entries);
+ return ret;
}
#undef EXTRA_VECS
@@ -5441,6 +5817,8 @@ static void free_some_resources(struct adapter *adapter)
kvfree(adapter->srq);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_matchall(adapter);
+ cxgb4_cleanup_tc_mqprio(adapter);
cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
@@ -5466,7 +5844,8 @@ static void free_some_resources(struct adapter *adapter)
t4_fw_bye(adapter, adapter->pf);
}
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
@@ -5837,7 +6216,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
setup_memwin(adapter);
- err = adap_init0(adapter);
+ err = adap_init0(adapter, 0);
#ifdef CONFIG_DEBUG_FS
bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
#endif
@@ -5855,8 +6234,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&adapter->mac_hlist);
for_each_port(adapter, i) {
+ /* For supporting MQPRIO Offload, need some extra
+ * queues for each ETHOFLD TIDs. Keep it equal to
+ * MAX_ATIDs for now. Once we connect to firmware
+ * later and query the EOTID params, we'll come to
+ * know the actual # of EOTIDs supported.
+ */
netdev = alloc_etherdev_mq(sizeof(struct port_info),
- MAX_ETH_QSETS);
+ MAX_ETH_QSETS + MAX_ATIDS);
if (!netdev) {
err = -ENOMEM;
goto out_free_dev;
@@ -6004,6 +6389,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cxgb4_init_tc_flower(adapter))
dev_warn(&pdev->dev,
"could not offload tc flower, continuing\n");
+
+ if (cxgb4_init_tc_mqprio(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc mqprio, continuing\n");
+
+ if (cxgb4_init_tc_matchall(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc matchall, continuing\n");
}
if (is_offload(adapter) || is_hashfilter(adapter)) {
@@ -6040,6 +6433,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_dev;
+ err = setup_non_data_intr(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Non Data interrupt allocation failed, err: %d\n", err);
+ goto out_free_dev;
+ }
+
err = setup_fw_sge_queues(adapter);
if (err) {
dev_err(adapter->pdev_dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index e447976bdd3e..0fa80bef575d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
}
-static void cxgb4_process_flow_actions(struct net_device *in,
- struct flow_cls_offload *cls,
- struct ch_filter_specification *fs)
+void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
int i;
- flow_action_for_each(i, act, &rule->action) {
+ flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
@@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev,
return true;
}
-static int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_cls_offload *cls)
+int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_action *actions)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
- flow_action_for_each(i, act, &rule->action) {
+ flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_DROP:
@@ -636,14 +634,15 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
struct ch_filter_specification *fs;
struct filter_ctx ctx;
- int fidx;
- int ret;
+ int fidx, ret;
- if (cxgb4_validate_flow_actions(dev, cls))
+ if (cxgb4_validate_flow_actions(dev, &rule->action))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
@@ -658,20 +657,41 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
fs = &ch_flower->fs;
fs->hitcnts = 1;
cxgb4_process_flow_match(dev, cls, fs);
- cxgb4_process_flow_actions(dev, cls, fs);
+ cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
if (fs->hash) {
fidx = 0;
} else {
- fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
- if (fidx < 0) {
- netdev_err(dev, "%s: No fidx for offload.\n", __func__);
+ u8 inet_family;
+
+ inet_family = fs->type ? PF_INET6 : PF_INET;
+
+ /* Note that TC uses prio 0 to indicate stack to
+ * generate automatic prio and hence doesn't pass prio
+ * 0 to driver. However, the hardware TCAM index
+ * starts from 0. Hence, the -1 here.
+ */
+ if (cls->common.prio <= adap->tids.nftids)
+ fidx = cls->common.prio - 1;
+ else
+ fidx = cxgb4_get_free_ftid(dev, inet_family);
+
+ /* Only insert FLOWER rule if its priority doesn't
+ * conflict with existing rules in the LETCAM.
+ */
+ if (fidx < 0 ||
+ !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
ret = -ENOMEM;
goto free_entry;
}
}
+ fs->tc_prio = cls->common.prio;
+ fs->tc_cookie = cls->cookie;
+
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index eb4c95248baf..e132516e9868 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -108,6 +108,12 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
+void cxgb4_process_flow_actions(struct net_device *in,
+ struct flow_action *actions,
+ struct ch_filter_specification *fs);
+int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_action *actions);
+
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
new file mode 100644
index 000000000000..102b370fbd3e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#include "cxgb4.h"
+#include "cxgb4_tc_matchall.h"
+#include "sched.h"
+#include "cxgb4_uld.h"
+#include "cxgb4_filter.h"
+#include "cxgb4_tc_flower.h"
+
+static int cxgb4_matchall_egress_validate(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct flow_action_entry *entry;
+ u64 max_link_rate;
+ u32 i, speed;
+ int ret;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload needs at least 1 policing action");
+ return -EINVAL;
+ } else if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload only supports 1 policing action");
+ return -EINVAL;
+ } else if (pi->tc_block_shared) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload not supported with shared blocks");
+ return -EINVAL;
+ }
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to get max speed supported by the link");
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to bps */
+ max_link_rate = (u64)speed * 1000 * 1000;
+
+ flow_action_for_each(i, entry, actions) {
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Convert bytes per second to bits per second */
+ if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified policing max rate is larger than underlying link speed");
+ return -ERANGE;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only policing action supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int cxgb4_matchall_alloc_tc(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct ch_sched_params p = {
+ .type = SCHED_CLASS_TYPE_PACKET,
+ .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
+ .u.params.mode = SCHED_CLASS_MODE_CLASS,
+ .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
+ .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
+ .u.params.class = SCHED_CLS_NONE,
+ .u.params.minrate = 0,
+ .u.params.weight = 0,
+ .u.params.pktsize = dev->mtu,
+ };
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct flow_action_entry *entry;
+ struct sched_class *e;
+ u32 i;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ flow_action_for_each(i, entry, &cls->rule->action)
+ if (entry->id == FLOW_ACTION_POLICE)
+ break;
+
+ /* Convert from bytes per second to Kbps */
+ p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
+ p.u.params.channel = pi->tx_chan;
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free traffic class available for policing action");
+ return -ENOMEM;
+ }
+
+ tc_port_matchall->egress.hwtc = e->idx;
+ tc_port_matchall->egress.cookie = cls->cookie;
+ tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
+ return 0;
+}
+
+static void cxgb4_matchall_free_tc(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
+
+ tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
+ tc_port_matchall->egress.cookie = 0;
+ tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
+}
+
+static int cxgb4_matchall_alloc_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_filter_specification *fs;
+ int ret, fidx;
+
+ /* Note that TC uses prio 0 to indicate stack to generate
+ * automatic prio and hence doesn't pass prio 0 to driver.
+ * However, the hardware TCAM index starts from 0. Hence, the
+ * -1 here. 1 slot is enough to create a wildcard matchall
+ * VIID rule.
+ */
+ if (cls->common.prio <= adap->tids.nftids)
+ fidx = cls->common.prio - 1;
+ else
+ fidx = cxgb4_get_free_ftid(dev, PF_INET);
+
+ /* Only insert MATCHALL rule if its priority doesn't conflict
+ * with existing rules in the LETCAM.
+ */
+ if (fidx < 0 ||
+ !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ return -ENOMEM;
+ }
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ fs = &tc_port_matchall->ingress.fs;
+ memset(fs, 0, sizeof(*fs));
+
+ fs->tc_prio = cls->common.prio;
+ fs->tc_cookie = cls->cookie;
+ fs->hitcnts = 1;
+
+ fs->val.pfvf_vld = 1;
+ fs->val.pf = adap->pf;
+ fs->val.vf = pi->vin;
+
+ cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
+
+ ret = cxgb4_set_filter(dev, fidx, fs);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.tid = fidx;
+ tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
+ return 0;
+}
+
+static int cxgb4_matchall_free_filter(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
+ &tc_port_matchall->ingress.fs);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.packets = 0;
+ tc_port_matchall->ingress.bytes = 0;
+ tc_port_matchall->ingress.last_used = 0;
+ tc_port_matchall->ingress.tid = 0;
+ tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
+ return 0;
+}
+
+int cxgb4_tc_matchall_replace(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = cls_matchall->common.extack;
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (ingress) {
+ if (tc_port_matchall->ingress.state ==
+ CXGB4_MATCHALL_STATE_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 Ingress MATCHALL can be offloaded");
+ return -ENOMEM;
+ }
+
+ ret = cxgb4_validate_flow_actions(dev,
+ &cls_matchall->rule->action);
+ if (ret)
+ return ret;
+
+ return cxgb4_matchall_alloc_filter(dev, cls_matchall);
+ }
+
+ if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 Egress MATCHALL can be offloaded");
+ return -ENOMEM;
+ }
+
+ ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
+ if (ret)
+ return ret;
+
+ return cxgb4_matchall_alloc_tc(dev, cls_matchall);
+}
+
+int cxgb4_tc_matchall_destroy(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (ingress) {
+ if (cls_matchall->cookie !=
+ tc_port_matchall->ingress.fs.tc_cookie)
+ return -ENOENT;
+
+ return cxgb4_matchall_free_filter(dev);
+ }
+
+ if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
+ return -ENOENT;
+
+ cxgb4_matchall_free_tc(dev);
+ return 0;
+}
+
+int cxgb4_tc_matchall_stats(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u64 packets, bytes;
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
+ return -ENOENT;
+
+ ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
+ &packets, &bytes,
+ tc_port_matchall->ingress.fs.hash);
+ if (ret)
+ return ret;
+
+ if (tc_port_matchall->ingress.packets != packets) {
+ flow_stats_update(&cls_matchall->stats,
+ bytes - tc_port_matchall->ingress.bytes,
+ packets - tc_port_matchall->ingress.packets,
+ tc_port_matchall->ingress.last_used);
+
+ tc_port_matchall->ingress.packets = packets;
+ tc_port_matchall->ingress.bytes = bytes;
+ tc_port_matchall->ingress.last_used = jiffies;
+ }
+
+ return 0;
+}
+
+static void cxgb4_matchall_disable_offload(struct net_device *dev)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
+ cxgb4_matchall_free_tc(dev);
+
+ if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
+ cxgb4_matchall_free_filter(dev);
+}
+
+int cxgb4_init_tc_matchall(struct adapter *adap)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct cxgb4_tc_matchall *tc_matchall;
+ int ret;
+
+ tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
+ if (!tc_matchall)
+ return -ENOMEM;
+
+ tc_port_matchall = kcalloc(adap->params.nports,
+ sizeof(*tc_port_matchall),
+ GFP_KERNEL);
+ if (!tc_port_matchall) {
+ ret = -ENOMEM;
+ goto out_free_matchall;
+ }
+
+ tc_matchall->port_matchall = tc_port_matchall;
+ adap->tc_matchall = tc_matchall;
+ return 0;
+
+out_free_matchall:
+ kfree(tc_matchall);
+ return ret;
+}
+
+void cxgb4_cleanup_tc_matchall(struct adapter *adap)
+{
+ u8 i;
+
+ if (adap->tc_matchall) {
+ if (adap->tc_matchall->port_matchall) {
+ for (i = 0; i < adap->params.nports; i++) {
+ struct net_device *dev = adap->port[i];
+
+ if (dev)
+ cxgb4_matchall_disable_offload(dev);
+ }
+ kfree(adap->tc_matchall->port_matchall);
+ }
+ kfree(adap->tc_matchall);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
new file mode 100644
index 000000000000..ab6b5683dfd3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#ifndef __CXGB4_TC_MATCHALL_H__
+#define __CXGB4_TC_MATCHALL_H__
+
+#include <net/pkt_cls.h>
+
+enum cxgb4_matchall_state {
+ CXGB4_MATCHALL_STATE_DISABLED = 0,
+ CXGB4_MATCHALL_STATE_ENABLED,
+};
+
+struct cxgb4_matchall_egress_entry {
+ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
+ u8 hwtc; /* Traffic class bound to port */
+ u64 cookie; /* Used to identify the MATCHALL rule offloaded */
+};
+
+struct cxgb4_matchall_ingress_entry {
+ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
+ u32 tid; /* Index to hardware filter entry */
+ struct ch_filter_specification fs; /* Filter entry */
+ u64 bytes; /* # of bytes hitting the filter */
+ u64 packets; /* # of packets hitting the filter */
+ u64 last_used; /* Last updated jiffies time */
+};
+
+struct cxgb4_tc_port_matchall {
+ struct cxgb4_matchall_egress_entry egress; /* Egress offload info */
+ struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */
+};
+
+struct cxgb4_tc_matchall {
+ struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */
+};
+
+int cxgb4_tc_matchall_replace(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress);
+int cxgb4_tc_matchall_destroy(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall,
+ bool ingress);
+int cxgb4_tc_matchall_stats(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls_matchall);
+
+int cxgb4_init_tc_matchall(struct adapter *adap);
+void cxgb4_cleanup_tc_matchall(struct adapter *adap);
+#endif /* __CXGB4_TC_MATCHALL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
new file mode 100644
index 000000000000..477973d2e341
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#include "cxgb4.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
+
+static int cxgb4_mqprio_validate(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u64 min_rate = 0, max_rate = 0, max_link_rate;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 speed, qcount = 0, qoffset = 0;
+ int ret;
+ u8 i;
+
+ if (!mqprio->qopt.num_tc)
+ return 0;
+
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
+ netdev_err(dev, "Only full TC hardware offload is supported\n");
+ return -EINVAL;
+ } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
+ netdev_err(dev, "Only channel mode offload is supported\n");
+ return -EINVAL;
+ } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ netdev_err(dev, "Only bandwidth rate shaper supported\n");
+ return -EINVAL;
+ } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
+ netdev_err(dev,
+ "Only %u traffic classes supported by hardware\n",
+ adap->params.nsched_cls);
+ return -ERANGE;
+ }
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (ret) {
+ netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to bps */
+ max_link_rate = (u64)speed * 1000 * 1000;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
+ qcount += mqprio->qopt.count[i];
+
+ /* Convert byte per second to bits per second */
+ min_rate += (mqprio->min_rate[i] * 8);
+ max_rate += (mqprio->max_rate[i] * 8);
+ }
+
+ if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
+ return -ENOMEM;
+
+ if (min_rate > max_link_rate || max_rate > max_link_rate) {
+ netdev_err(dev,
+ "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
+ min_rate, max_rate, max_link_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_init_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u32 eotid, u32 hwqid)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tx_sw_desc *ring;
+
+ memset(eosw_txq, 0, sizeof(*eosw_txq));
+
+ ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
+ sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return -ENOMEM;
+
+ eosw_txq->desc = ring;
+ eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
+ spin_lock_init(&eosw_txq->lock);
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ eosw_txq->eotid = eotid;
+ eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
+ eosw_txq->cred = adap->params.ofldq_wr_cred;
+ eosw_txq->hwqid = hwqid;
+ eosw_txq->netdev = dev;
+ tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
+ (unsigned long)eosw_txq);
+ return 0;
+}
+
+static void cxgb4_clean_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
+ eosw_txq->pidx = 0;
+ eosw_txq->last_pidx = 0;
+ eosw_txq->cidx = 0;
+ eosw_txq->last_cidx = 0;
+ eosw_txq->flowc_idx = 0;
+ eosw_txq->inuse = 0;
+ eosw_txq->cred = adap->params.ofldq_wr_cred;
+ eosw_txq->ncompl = 0;
+ eosw_txq->last_compl = 0;
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+}
+
+static void cxgb4_free_eosw_txq(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ spin_lock_bh(&eosw_txq->lock);
+ cxgb4_clean_eosw_txq(dev, eosw_txq);
+ kfree(eosw_txq->desc);
+ spin_unlock_bh(&eosw_txq->lock);
+ tasklet_kill(&eosw_txq->qresume_tsk);
+}
+
+static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_ofld_rxq *eorxq;
+ struct sge_eohw_txq *eotxq;
+ int ret, msix = 0;
+ u32 i;
+
+ /* Allocate ETHOFLD hardware queue structures if not done already */
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
+ sizeof(struct sge_ofld_rxq),
+ GFP_KERNEL);
+ if (!adap->sge.eohw_rxq)
+ return -ENOMEM;
+
+ adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
+ sizeof(struct sge_eohw_txq),
+ GFP_KERNEL);
+ if (!adap->sge.eohw_txq) {
+ kfree(adap->sge.eohw_rxq);
+ return -ENOMEM;
+ }
+ }
+
+ if (!(adap->flags & CXGB4_USING_MSIX))
+ msix = -((int)adap->sge.intrq.abs_id + 1);
+
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ /* Allocate Rxqs for receiving ETHOFLD Tx completions */
+ if (msix >= 0) {
+ msix = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msix < 0) {
+ ret = msix;
+ goto out_free_queues;
+ }
+
+ eorxq->msix = &adap->msix_info[msix];
+ snprintf(eorxq->msix->desc,
+ sizeof(eorxq->msix->desc),
+ "%s-eorxq%d", dev->name, i);
+ }
+
+ init_rspq(adap, &eorxq->rspq,
+ CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
+ CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
+ CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
+ CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
+
+ eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
+
+ ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
+ dev, msix, &eorxq->fl,
+ cxgb4_ethofld_rx_handler,
+ NULL, 0);
+ if (ret)
+ goto out_free_queues;
+
+ /* Allocate ETHOFLD hardware Txqs */
+ eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
+ ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
+ eorxq->rspq.cntxt_id);
+ if (ret)
+ goto out_free_queues;
+
+ /* Allocate IRQs, set IRQ affinity, and start Rx */
+ if (adap->flags & CXGB4_USING_MSIX) {
+ ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
+ eorxq->msix->desc, &eorxq->rspq);
+ if (ret)
+ goto out_free_msix;
+
+ cxgb4_set_msix_aff(adap, eorxq->msix->vec,
+ &eorxq->msix->aff_mask, i);
+ }
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb4_enable_rx(adap, &eorxq->rspq);
+ }
+
+ refcount_inc(&adap->tc_mqprio->refcnt);
+ return 0;
+
+out_free_msix:
+ while (i-- > 0) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb4_quiesce_rx(&eorxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
+ }
+ }
+
+out_free_queues:
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ if (eorxq->rspq.desc)
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+ if (eorxq->msix)
+ cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
+ t4_sge_free_ethofld_txq(adap, eotxq);
+ }
+
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+
+ return ret;
+}
+
+static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_ofld_rxq *eorxq;
+ struct sge_eohw_txq *eotxq;
+ u32 i;
+
+ /* Return if no ETHOFLD structures have been allocated yet */
+ if (!refcount_read(&adap->tc_mqprio->refcnt))
+ return;
+
+ /* Return if no hardware queues have been allocated */
+ if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
+ return;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
+ eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
+
+ /* Device removal path will already disable NAPI
+ * before unregistering netdevice. So, only disable
+ * NAPI if we're not in device removal path
+ */
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ cxgb4_quiesce_rx(&eorxq->rspq);
+
+ if (adap->flags & CXGB4_USING_MSIX) {
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
+ }
+
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+ t4_sge_free_ethofld_txq(adap, eotxq);
+ }
+
+ /* Free up ETHOFLD structures if there are no users */
+ if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+ }
+}
+
+static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct ch_sched_params p = {
+ .type = SCHED_CLASS_TYPE_PACKET,
+ .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
+ .u.params.mode = SCHED_CLASS_MODE_FLOW,
+ .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
+ .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
+ .u.params.class = SCHED_CLS_NONE,
+ .u.params.weight = 0,
+ .u.params.pktsize = dev->mtu,
+ };
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sched_class *e;
+ int ret;
+ u8 i;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ p.u.params.channel = pi->tx_chan;
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ /* Convert from bytes per second to Kbps */
+ p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
+ p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
+
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ tc_port_mqprio->tc_hwtc_map[i] = e->idx;
+ }
+
+ return 0;
+
+out_err:
+ while (i--)
+ cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
+
+ return ret;
+}
+
+static void cxgb4_mqprio_free_tc(struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u8 i;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
+ cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
+}
+
+static int cxgb4_mqprio_class_bind(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u8 tc)
+{
+ struct ch_sched_flowc fe;
+ int ret;
+
+ init_completion(&eosw_txq->completion);
+
+ fe.tid = eosw_txq->eotid;
+ fe.class = tc;
+
+ ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&eosw_txq->completion,
+ CXGB4_FLOWC_WAIT_TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void cxgb4_mqprio_class_unbind(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq,
+ u8 tc)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_sched_flowc fe;
+
+ /* If we're shutting down, interrupts are disabled and no completions
+ * come back. So, skip waiting for completions in this scenario.
+ */
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ init_completion(&eosw_txq->completion);
+
+ fe.tid = eosw_txq->eotid;
+ fe.class = tc;
+ cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
+
+ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
+ wait_for_completion_timeout(&eosw_txq->completion,
+ CXGB4_FLOWC_WAIT_TIMEOUT);
+}
+
+static int cxgb4_mqprio_enable_offload(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ u32 qoffset, qcount, tot_qcount, qid, hwqid;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ int eotid, ret;
+ u16 i, j;
+ u8 hwtc;
+
+ ret = cxgb4_mqprio_alloc_hw_resources(dev);
+ if (ret)
+ return -ENOMEM;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qoffset = mqprio->qopt.offset[i];
+ qcount = mqprio->qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eotid = cxgb4_get_free_eotid(&adap->tids);
+ if (eotid < 0) {
+ ret = -ENOMEM;
+ goto out_free_eotids;
+ }
+
+ qid = qoffset + j;
+ hwqid = pi->first_qset + (eotid % pi->nqsets);
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ ret = cxgb4_init_eosw_txq(dev, eosw_txq,
+ eotid, hwqid);
+ if (ret)
+ goto out_free_eotids;
+
+ cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
+ if (ret)
+ goto out_free_eotids;
+ }
+ }
+
+ memcpy(&tc_port_mqprio->mqprio, mqprio,
+ sizeof(struct tc_mqprio_qopt_offload));
+
+ /* Inform the stack about the configured tc params.
+ *
+ * Set the correct queue map. If no queue count has been
+ * specified, then send the traffic through default NIC
+ * queues; instead of ETHOFLD queues.
+ */
+ ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+ if (ret)
+ goto out_free_eotids;
+
+ tot_qcount = pi->nqsets;
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ qcount = mqprio->qopt.count[i];
+ if (qcount) {
+ qoffset = mqprio->qopt.offset[i] + pi->nqsets;
+ } else {
+ qcount = pi->nqsets;
+ qoffset = 0;
+ }
+
+ ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
+ if (ret)
+ goto out_reset_tc;
+
+ tot_qcount += mqprio->qopt.count[i];
+ }
+
+ ret = netif_set_real_num_tx_queues(dev, tot_qcount);
+ if (ret)
+ goto out_reset_tc;
+
+ tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
+ return 0;
+
+out_reset_tc:
+ netdev_reset_tc(dev);
+ i = mqprio->qopt.num_tc;
+
+out_free_eotids:
+ while (i-- > 0) {
+ qoffset = mqprio->qopt.offset[i];
+ qcount = mqprio->qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
+
+ cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
+ cxgb4_free_eosw_txq(dev, eosw_txq);
+ }
+ }
+
+ cxgb4_mqprio_free_hw_resources(dev);
+ return ret;
+}
+
+static void cxgb4_mqprio_disable_offload(struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qoffset, qcount;
+ u16 i, j;
+ u8 hwtc;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
+ return;
+
+ netdev_reset_tc(dev);
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+
+ for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
+ qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
+ qcount = tc_port_mqprio->mqprio.qopt.count[i];
+ for (j = 0; j < qcount; j++) {
+ eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
+
+ hwtc = tc_port_mqprio->tc_hwtc_map[i];
+ cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
+
+ cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
+ cxgb4_free_eosw_txq(dev, eosw_txq);
+ }
+ }
+
+ cxgb4_mqprio_free_hw_resources(dev);
+
+ /* Free up the traffic classes */
+ cxgb4_mqprio_free_tc(dev);
+
+ memset(&tc_port_mqprio->mqprio, 0,
+ sizeof(struct tc_mqprio_qopt_offload));
+
+ tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
+}
+
+int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ bool needs_bring_up = false;
+ int ret;
+
+ ret = cxgb4_mqprio_validate(dev, mqprio);
+ if (ret)
+ return ret;
+
+ /* To configure tc params, the current allocated EOTIDs must
+ * be freed up. However, they can't be freed up if there's
+ * traffic running on the interface. So, ensure interface is
+ * down before configuring tc params.
+ */
+ if (netif_running(dev)) {
+ cxgb_close(dev);
+ needs_bring_up = true;
+ }
+
+ cxgb4_mqprio_disable_offload(dev);
+
+ /* If requested for clear, then just return since resources are
+ * already freed up by now.
+ */
+ if (!mqprio->qopt.num_tc)
+ goto out;
+
+ /* Allocate free available traffic classes and configure
+ * their rate parameters.
+ */
+ ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
+ if (ret)
+ goto out;
+
+ ret = cxgb4_mqprio_enable_offload(dev, mqprio);
+ if (ret) {
+ cxgb4_mqprio_free_tc(dev);
+ goto out;
+ }
+
+out:
+ if (needs_bring_up)
+ cxgb_open(dev);
+
+ return ret;
+}
+
+int cxgb4_init_tc_mqprio(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
+ struct cxgb4_tc_mqprio *tc_mqprio;
+ struct sge_eosw_txq *eosw_txq;
+ int ret = 0;
+ u8 i;
+
+ tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
+ if (!tc_mqprio)
+ return -ENOMEM;
+
+ tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
+ GFP_KERNEL);
+ if (!tc_port_mqprio) {
+ ret = -ENOMEM;
+ goto out_free_mqprio;
+ }
+
+ tc_mqprio->port_mqprio = tc_port_mqprio;
+ for (i = 0; i < adap->params.nports; i++) {
+ port_mqprio = &tc_mqprio->port_mqprio[i];
+ eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
+ GFP_KERNEL);
+ if (!eosw_txq) {
+ ret = -ENOMEM;
+ goto out_free_ports;
+ }
+ port_mqprio->eosw_txq = eosw_txq;
+ }
+
+ adap->tc_mqprio = tc_mqprio;
+ refcount_set(&adap->tc_mqprio->refcnt, 0);
+ return 0;
+
+out_free_ports:
+ for (i = 0; i < adap->params.nports; i++) {
+ port_mqprio = &tc_mqprio->port_mqprio[i];
+ kfree(port_mqprio->eosw_txq);
+ }
+ kfree(tc_port_mqprio);
+
+out_free_mqprio:
+ kfree(tc_mqprio);
+ return ret;
+}
+
+void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 i;
+
+ if (adap->tc_mqprio) {
+ if (adap->tc_mqprio->port_mqprio) {
+ for (i = 0; i < adap->params.nports; i++) {
+ struct net_device *dev = adap->port[i];
+
+ if (dev)
+ cxgb4_mqprio_disable_offload(dev);
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ kfree(port_mqprio->eosw_txq);
+ }
+ kfree(adap->tc_mqprio->port_mqprio);
+ }
+ kfree(adap->tc_mqprio);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
new file mode 100644
index 000000000000..c532f1ef8451
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
+
+#ifndef __CXGB4_TC_MQPRIO_H__
+#define __CXGB4_TC_MQPRIO_H__
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
+
+#define CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM 1024
+
+#define CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM 1024
+#define CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE 64
+#define CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC 5
+#define CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT 8
+
+#define CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM 72
+
+#define CXGB4_FLOWC_WAIT_TIMEOUT (5 * HZ)
+
+enum cxgb4_mqprio_state {
+ CXGB4_MQPRIO_STATE_DISABLED = 0,
+ CXGB4_MQPRIO_STATE_ACTIVE,
+};
+
+struct cxgb4_tc_port_mqprio {
+ enum cxgb4_mqprio_state state; /* Current MQPRIO offload state */
+ struct tc_mqprio_qopt_offload mqprio; /* MQPRIO offload params */
+ struct sge_eosw_txq *eosw_txq; /* Netdev SW Tx queue array */
+ u8 tc_hwtc_map[TC_QOPT_MAX_QUEUE]; /* MQPRIO tc to hardware tc map */
+};
+
+struct cxgb4_tc_mqprio {
+ refcount_t refcnt; /* Refcount for adapter-wide resources */
+ struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
+};
+
+int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio);
+int cxgb4_init_tc_mqprio(struct adapter *adap);
+void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
+#endif /* __CXGB4_TC_MQPRIO_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 02fc63fa7f25..133f8623ba86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -36,6 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "cxgb4_tc_u32_parse.h"
#include "cxgb4_tc_u32.h"
@@ -148,6 +149,7 @@ static int fill_action_fields(struct adapter *adap,
int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
{
const struct cxgb4_match_field *start, *link_start = NULL;
+ struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adapter = netdev2adap(dev);
__be16 protocol = cls->common.protocol;
struct ch_filter_specification fs;
@@ -164,14 +166,21 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
- /* Fetch the location to insert the filter. */
- filter_id = cls->knode.handle & 0xFFFFF;
+ /* Note that TC uses prio 0 to indicate stack to generate
+ * automatic prio and hence doesn't pass prio 0 to driver.
+ * However, the hardware TCAM index starts from 0. Hence, the
+ * -1 here.
+ */
+ filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id > adapter->tids.nftids) {
- dev_err(adapter->pdev_dev,
- "Location %d out of range for insertion. Max: %d\n",
- filter_id, adapter->tids.nftids);
- return -ERANGE;
+ /* Only insert U32 rule if its priority doesn't conflict with
+ * existing rules in the LETCAM.
+ */
+ if (filter_id >= adapter->tids.nftids ||
+ !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ return -ENOMEM;
}
t = adapter->tc_u32;
@@ -190,6 +199,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
+ fs.tc_prio = cls->common.prio;
+ fs.tc_cookie = cls->knode.handle;
+
if (protocol == htons(ETH_P_IPV6)) {
start = cxgb4_ipv6_fields;
is_ipv6 = true;
@@ -350,14 +362,10 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
- filter_id = cls->knode.handle & 0xFFFFF;
-
- if (filter_id > adapter->tids.nftids) {
- dev_err(adapter->pdev_dev,
- "Location %d out of range for deletion. Max: %d\n",
- filter_id, adapter->tids.nftids);
+ filter_id = TC_U32_NODE(cls->knode.handle) - 1;
+ if (filter_id >= adapter->tids.nftids ||
+ cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
return -ERANGE;
- }
t = adapter->tc_u32;
handle = cls->knode.handle;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 86b528d8364c..cce33d279094 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -53,35 +53,6 @@
#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
-static int get_msix_idx_from_bmap(struct adapter *adap)
-{
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
- unsigned long flags;
- unsigned int msix_idx;
-
- spin_lock_irqsave(&bmap->lock, flags);
- msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
- if (msix_idx < bmap->mapsize) {
- __set_bit(msix_idx, bmap->msix_bmap);
- } else {
- spin_unlock_irqrestore(&bmap->lock, flags);
- return -ENOSPC;
- }
-
- spin_unlock_irqrestore(&bmap->lock, flags);
- return msix_idx;
-}
-
-static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
-{
- struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
- unsigned long flags;
-
- spin_lock_irqsave(&bmap->lock, flags);
- __clear_bit(msix_idx, bmap->msix_bmap);
- spin_unlock_irqrestore(&bmap->lock, flags);
-}
-
/* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q)
{
@@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
- int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
+ int i, err, msi_idx, que_idx = 0;
struct sge *s = &adap->sge;
unsigned int per_chan;
@@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
}
if (msi_idx >= 0) {
- bmap_idx = get_msix_idx_from_bmap(adap);
- if (bmap_idx < 0) {
+ msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
+ if (msi_idx < 0) {
err = -ENOSPC;
goto freeout;
}
- msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+
+ snprintf(adap->msix_info[msi_idx].desc,
+ sizeof(adap->msix_info[msi_idx].desc),
+ "%s-%s%d",
+ adap->port[0]->name, rxq_info->name, i);
+
+ q->msix = &adap->msix_info[msi_idx];
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[que_idx++ / per_chan],
@@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
0);
if (err)
goto freeout;
- if (msi_idx >= 0)
- rxq_info->msix_tbl[i] = bmap_idx;
+
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
@@ -188,6 +164,8 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
+ if (q->msix)
+ cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
}
return err;
}
@@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
- if (adap->flags & CXGB4_USING_MSIX) {
- rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
- sizeof(unsigned short),
- GFP_KERNEL);
- if (!rxq_info->msix_tbl)
- return -ENOMEM;
- }
-
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
@@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
- if (adap->flags & CXGB4_USING_MSIX)
- kfree(rxq_info->msix_tbl);
}
static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
@@ -355,13 +323,12 @@ static int
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- struct uld_msix_info *minfo;
+ struct msix_info *minfo;
+ unsigned int idx;
int err = 0;
- unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
+ minfo = rxq_info->uldrxq[idx].msix;
err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
minfo->desc,
@@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
unwind:
while (idx-- > 0) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
+ minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
- free_msix_idx_in_bmap(adap, bmap_idx);
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
return err;
@@ -389,69 +355,45 @@ static void
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- struct uld_msix_info *minfo;
- unsigned int idx, bmap_idx;
+ struct msix_info *minfo;
+ unsigned int idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
- minfo = &adap->msix_info_ulds[bmap_idx];
-
+ minfo = rxq_info->uldrxq[idx].msix;
cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
- free_msix_idx_in_bmap(adap, bmap_idx);
+ cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
}
-static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
+static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int n = sizeof(adap->msix_info_ulds[0].desc);
- unsigned int idx, bmap_idx;
+ int idx;
for_each_uldrxq(rxq_info, idx) {
- bmap_idx = rxq_info->msix_tbl[idx];
-
- snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
- adap->port[0]->name, rxq_info->name, idx);
- }
-}
-
-static void enable_rx(struct adapter *adap, struct sge_rspq *q)
-{
- if (!q)
- return;
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
- if (q->handler)
- napi_enable(&q->napi);
-
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
-}
+ if (!q)
+ continue;
-static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
-{
- if (q && q->handler)
- napi_disable(&q->napi);
+ cxgb4_enable_rx(adap, q);
+ }
}
-static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
+static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
- for_each_uldrxq(rxq_info, idx)
- enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
-}
+ for_each_uldrxq(rxq_info, idx) {
+ struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
-static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
-{
- struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx;
+ if (!q)
+ continue;
- for_each_uldrxq(rxq_info, idx)
- quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
+ cxgb4_quiesce_rx(q);
+ }
}
static void
@@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type,
if (ret)
goto free_queues;
if (adap->flags & CXGB4_USING_MSIX) {
- name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cee582e36134..861b25d28ed6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -89,6 +89,10 @@ union aopen_entry {
union aopen_entry *next;
};
+struct eotid_entry {
+ void *data;
+};
+
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically.
@@ -126,6 +130,12 @@ struct tid_info {
unsigned int v6_stids_in_use;
unsigned int sftids_in_use;
+ /* ETHOFLD range */
+ struct eotid_entry *eotid_tab;
+ unsigned long *eotid_bmap;
+ unsigned int eotid_base;
+ unsigned int neotids;
+
/* TIDs in the TCAM */
atomic_t tids_in_use;
/* TIDs in the HASH */
@@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
atomic_inc(&t->conns_in_use);
}
+static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t,
+ u32 eotid)
+{
+ return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL;
+}
+
+static inline int cxgb4_get_free_eotid(struct tid_info *t)
+{
+ int eotid;
+
+ eotid = find_first_zero_bit(t->eotid_bmap, t->neotids);
+ if (eotid >= t->neotids)
+ eotid = -1;
+
+ return eotid;
+}
+
+static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
+{
+ set_bit(eotid, t->eotid_bmap);
+ t->eotid_tab[eotid].data = data;
+}
+
+static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
+{
+ clear_bit(eotid, t->eotid_bmap);
+ t->eotid_tab[eotid].data = NULL;
+}
+
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1a407d3c1d67..e9e45006632d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -351,15 +351,13 @@ exists:
static void _t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
- struct sk_buff *skb;
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
- while ((skb = __skb_dequeue(&e->arpq)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&e->arpq);
}
d = container_of(e, struct l2t_data, l2tab[e->idx]);
@@ -370,7 +368,6 @@ static void _t4_l2e_free(struct l2t_entry *e)
static void t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
- struct sk_buff *skb;
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
@@ -378,8 +375,7 @@ static void t4_l2e_free(struct l2t_entry *e)
neigh_release(e->neigh);
e->neigh = NULL;
}
- while ((skb = __skb_dequeue(&e->arpq)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&e->arpq);
}
spin_unlock_bh(&e->lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 60218dc676a8..3e61bd5d0c29 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
e = &s->tab[p->u.params.class];
switch (op) {
case SCHED_FW_OP_ADD:
+ case SCHED_FW_OP_DEL:
err = t4_sched_params(adap, p->type,
p->u.params.level, p->u.params.mode,
p->u.params.rateunit,
@@ -92,45 +93,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
pf = adap->pf;
vf = 0;
+
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1,
+ &fw_param, &fw_class);
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ fe = (struct sched_flowc_entry *)arg;
+
+ fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
+ err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
+ fe->param.tid, fw_class);
break;
}
default:
err = -ENOTSUPP;
- goto out;
+ break;
}
- err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
-
-out:
return err;
}
-static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
- const unsigned int qid,
- int *index)
+static void *t4_sched_entry_lookup(struct port_info *pi,
+ enum sched_bind_type type,
+ const u32 val)
{
struct sched_table *s = pi->sched_tbl;
struct sched_class *e, *end;
- struct sched_class *found = NULL;
- int i;
+ void *found = NULL;
- /* Look for a class with matching bound queue parameters */
+ /* Look for an entry with matching @val */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
- struct sched_queue_entry *qe;
-
- i = 0;
- if (e->state == SCHED_STATE_UNUSED)
+ if (e->state == SCHED_STATE_UNUSED ||
+ e->bind_type != type)
continue;
- list_for_each_entry(qe, &e->queue_list, list) {
- if (qe->cntxt_id == qid) {
- found = e;
- if (index)
- *index = i;
- break;
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ list_for_each_entry(qe, &e->entry_list, list) {
+ if (qe->cntxt_id == val) {
+ found = qe;
+ break;
+ }
+ }
+ break;
+ }
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list) {
+ if (fe->param.tid == val) {
+ found = fe;
+ break;
+ }
}
- i++;
+ break;
+ }
+ default:
+ return NULL;
}
if (found)
@@ -142,52 +167,41 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- unsigned int qid;
- int index = -1;
+ struct sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
- qid = txq->q.cntxt_id;
- /* Find the existing class that the queue is bound to */
- e = t4_sched_queue_lookup(pi, qid, &index);
- if (e && index >= 0) {
- int i = 0;
-
- list_for_each_entry(qe, &e->queue_list, list) {
- if (i == index)
- break;
- i++;
- }
+ /* Find the existing entry that the queue is bound to */
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ if (qe) {
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
if (err)
return err;
+ e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
- if (atomic_dec_and_test(&e->refcnt)) {
- e->state = SCHED_STATE_UNUSED;
- memset(&e->info, 0, sizeof(e->info));
- }
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
{
- struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
+ struct sched_class *e;
unsigned int qid;
int err = 0;
@@ -215,7 +229,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (err)
goto out_err;
- list_add_tail(&qe->list, &e->queue_list);
+ list_add_tail(&qe->list, &e->entry_list);
+ e->bind_type = SCHED_QUEUE;
atomic_inc(&e->refcnt);
return err;
@@ -224,6 +239,71 @@ out_err:
return err;
}
+static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ /* Find the existing entry that the flowc is bound to */
+ fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
+ if (fe) {
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
+ false);
+ if (err)
+ return err;
+
+ e = &pi->sched_tbl->tab[fe->param.class];
+ list_del(&fe->list);
+ kvfree(fe);
+ if (atomic_dec_and_test(&e->refcnt))
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
+ }
+ return err;
+}
+
+static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_flowc_entry *fe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ int err = 0;
+
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
+ return -ERANGE;
+
+ fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
+ if (!fe)
+ return -ENOMEM;
+
+ /* Unbind flowc from any existing class */
+ err = t4_sched_flowc_unbind(pi, p);
+ if (err)
+ goto out_err;
+
+ /* Bind flowc to specified class */
+ memcpy(&fe->param, p, sizeof(fe->param));
+
+ e = &s->tab[fe->param.class];
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
+ if (err)
+ goto out_err;
+
+ list_add_tail(&fe->list, &e->entry_list);
+ e->bind_type = SCHED_FLOWC;
+ atomic_inc(&e->refcnt);
+ return err;
+
+out_err:
+ kvfree(fe);
+ return err;
+}
+
static void t4_sched_class_unbind_all(struct port_info *pi,
struct sched_class *e,
enum sched_bind_type type)
@@ -235,10 +315,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi,
case SCHED_QUEUE: {
struct sched_queue_entry *qe;
- list_for_each_entry(qe, &e->queue_list, list)
+ list_for_each_entry(qe, &e->entry_list, list)
t4_sched_queue_unbind(pi, &qe->param);
break;
}
+ case SCHED_FLOWC: {
+ struct sched_flowc_entry *fe;
+
+ list_for_each_entry(fe, &e->entry_list, list)
+ t4_sched_flowc_unbind(pi, &fe->param);
+ break;
+ }
default:
break;
}
@@ -262,6 +349,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
err = t4_sched_queue_unbind(pi, qe);
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ if (bind)
+ err = t4_sched_flowc_bind(pi, fe);
+ else
+ err = t4_sched_flowc_unbind(pi, fe);
+ break;
+ }
default:
err = -ENOTSUPP;
break;
@@ -299,6 +395,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -340,6 +442,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
class_id = qe->class;
break;
}
+ case SCHED_FLOWC: {
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
+
+ class_id = fe->class;
+ break;
+ }
default:
return -ENOTSUPP;
}
@@ -355,8 +463,8 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
struct sched_class *found = NULL;
+ struct sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -400,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_class *e;
+ struct sched_class *e = NULL;
u8 class_id;
int err;
@@ -415,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- /* See if there's an exisiting class with same
- * requested sched params
+ /* See if there's an exisiting class with same requested sched
+ * params. Classes can only be shared among FLOWC types. For
+ * other types, always request a new class.
*/
- e = t4_sched_class_lookup(pi, p);
+ if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
+ e = t4_sched_class_lookup(pi, p);
+
if (!e) {
struct ch_sched_params np;
@@ -467,9 +578,57 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
return t4_sched_class_alloc(pi, p);
}
-static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
+/**
+ * cxgb4_sched_class_free - free a scheduling class
+ * @dev: net_device pointer
+ * @e: scheduling class
+ *
+ * Frees a scheduling class if there are no users.
+ */
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
- t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s = pi->sched_tbl;
+ struct ch_sched_params p;
+ struct sched_class *e;
+ u32 speed;
+ int ret;
+
+ e = &s->tab[classid];
+ if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
+ /* Port based rate limiting needs explicit reset back
+ * to max rate. But, we'll do explicit reset for all
+ * types, instead of just port based type, to be on
+ * the safer side.
+ */
+ memcpy(&p, &e->info, sizeof(p));
+ /* Always reset mode to 0. Otherwise, FLOWC mode will
+ * still be enabled even after resetting the traffic
+ * class.
+ */
+ p.u.params.mode = 0;
+ p.u.params.minrate = 0;
+ p.u.params.pktsize = 0;
+
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
+ if (!ret)
+ p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
+ else
+ p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
+
+ t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
+
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+}
+
+static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ t4_sched_class_unbind_all(pi, e, e->bind_type);
+ cxgb4_sched_class_free(dev, e->idx);
}
struct sched_table *t4_init_sched(unsigned int sched_size)
@@ -487,7 +646,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
- INIT_LIST_HEAD(&s->tab[i].queue_list);
+ INIT_LIST_HEAD(&s->tab[i].entry_list);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -510,7 +669,7 @@ void t4_cleanup_sched(struct adapter *adap)
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
- t4_sched_class_free(pi, e);
+ t4_sched_class_free(adap->port[j], e);
}
kvfree(s);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 168fb4ce3759..e92ff68bdd0a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -52,10 +52,12 @@ enum {
enum sched_fw_ops {
SCHED_FW_OP_ADD,
+ SCHED_FW_OP_DEL,
};
enum sched_bind_type {
SCHED_QUEUE,
+ SCHED_FLOWC,
};
struct sched_queue_entry {
@@ -64,11 +66,17 @@ struct sched_queue_entry {
struct ch_sched_queue param;
};
+struct sched_flowc_entry {
+ struct list_head list;
+ struct ch_sched_flowc param;
+};
+
struct sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
- struct list_head queue_list;
+ enum sched_bind_type bind_type;
+ struct list_head entry_list;
atomic_t refcnt;
};
@@ -102,6 +110,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
struct ch_sched_params *p);
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
void t4_cleanup_sched(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 928bfea5457b..97cda501e7e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -55,6 +55,8 @@
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
#include "cxgb4_uld.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -269,7 +271,6 @@ out_err:
}
EXPORT_SYMBOL(cxgb4_map_skb);
-#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const dma_addr_t *addr)
{
@@ -284,6 +285,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
+#ifdef CONFIG_NEED_DMA_MAP_STATE
/**
* deferred_unmap_destructor - unmap a packet when it is freed
* @skb: the packet
@@ -298,65 +300,6 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
}
#endif
-static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
- const struct ulptx_sgl *sgl, const struct sge_txq *q)
-{
- const struct ulptx_sge_pair *p;
- unsigned int nfrags = skb_shinfo(skb)->nr_frags;
-
- if (likely(skb_headlen(skb)))
- dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- else {
- dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- nfrags--;
- }
-
- /*
- * the complexity below is because of the possibility of a wrap-around
- * in the middle of an SGL
- */
- for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
- if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
-unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p++;
- } else if ((u8 *)p == (u8 *)q->stat) {
- p = (const struct ulptx_sge_pair *)q->desc;
- goto unmap;
- } else if ((u8 *)p + 8 == (u8 *)q->stat) {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[2];
- } else {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[1];
- }
- }
- if (nfrags) {
- __be64 addr;
-
- if ((u8 *)p == (u8 *)q->stat)
- p = (const struct ulptx_sge_pair *)q->desc;
- addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
- *(const __be64 *)q->desc;
- dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
- DMA_TO_DEVICE);
- }
-}
-
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
* @adapter: the adapter
@@ -370,15 +313,16 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap)
{
- struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
- struct device *dev = adap->pdev_dev;
+ struct tx_sw_desc *d;
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
- if (unmap)
- unmap_sgl(dev, d->skb, d->sgl, q);
+ if (unmap && d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
@@ -790,6 +734,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct cpl_tx_tnl_lso);
hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ return 0;
} else {
hdrlen = skb_shinfo(skb)->gso_size ?
sizeof(struct cpl_tx_pkt_lso_core) : 0;
@@ -831,12 +777,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size) {
- if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ if (skb->encapsulation && chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_tnl_lso);
- else
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ u32 pkt_hdrlen;
+
+ pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
+ skb_headlen(skb));
+ hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
+ round_up(pkt_hdrlen, 16);
+ } else {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core);
+ }
hdrlen += sizeof(struct cpl_tx_pkt_core);
flits += (hdrlen / sizeof(__be64));
@@ -1309,6 +1263,35 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
}
+static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
+ struct cpl_tx_pkt_lso_core *lso)
+{
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ int l3hdr_len = skb_network_header_len(skb);
+ const struct skb_shared_info *ssi;
+ bool ipv6 = false;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_type & SKB_GSO_TCPV6)
+ ipv6 = true;
+
+ lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(ipv6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(skb->len);
+ else
+ lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
+
+ return (void *)(lso + 1);
+}
+
/**
* t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
* @adap: the adapter
@@ -1347,6 +1330,50 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
return reclaimed;
}
+static inline int cxgb4_validate_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 min_pkt_len)
+{
+ u32 max_pkt_len;
+
+ /* The chip min packet length is 10 octets but some firmware
+ * commands have a minimum packet length requirement. So, play
+ * safe and reject anything shorter than @min_pkt_len.
+ */
+ if (unlikely(skb->len < min_pkt_len))
+ return -EINVAL;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+
+ if (skb_vlan_tagged(skb))
+ max_pkt_len += VLAN_HLEN;
+
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len)
+{
+ wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
+ wr->u.udpseg.ethlen = skb_network_offset(skb);
+ wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.udpseg.udplen = sizeof(struct udphdr);
+ wr->u.udpseg.rtplen = 0;
+ wr->u.udpseg.r4 = 0;
+ if (skb_shinfo(skb)->gso_size)
+ wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ else
+ wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
+ wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
+ wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ return (void *)(wr + 1);
+}
+
/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1356,41 +1383,25 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
*/
static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid, ctrl0, op;
- u64 cntrl, *end, *sgl;
- int qidx, credits;
- unsigned int flits, ndesc;
- struct adapter *adap;
- struct sge_eth_txq *q;
- const struct port_info *pi;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int last_desc, flits, ndesc;
+ u32 wr_mid, ctrl0, op, sgl_off = 0;
+ const struct skb_shared_info *ssi;
+ int len, qidx, credits, ret, left;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_eo_wr *eowr;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
- const struct skb_shared_info *ssi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct port_info *pi;
bool immediate = false;
- int len, max_pkt_len;
- bool ptp_enabled = is_ptp_enabled(skb, dev);
+ u64 cntrl, *end, *sgl;
+ struct sge_eth_txq *q;
unsigned int chip_ver;
- enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
-
-#ifdef CONFIG_CHELSIO_T4_FCOE
- int err;
-#endif /* CONFIG_CHELSIO_T4_FCOE */
-
- /*
- * The chip min packet length is 10 octets but play safe and reject
- * anything shorter than an Ethernet header.
- */
- if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ struct adapter *adap;
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tagged(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
goto out_free;
pi = netdev_priv(dev);
@@ -1421,8 +1432,8 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
- err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(err == -ENOTSUPP)) {
+ ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+ if (unlikely(ret == -ENOTSUPP)) {
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
goto out_free;
@@ -1450,8 +1461,14 @@ out_free: dev_kfree_skb_any(skb);
if (skb->encapsulation && chip_ver > CHELSIO_T5)
tnl_type = cxgb_encap_offload_supported(skb);
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
if (!immediate &&
- unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1482,17 +1499,18 @@ out_free: dev_kfree_skb_any(skb);
}
wr = (void *)&q->q.desc[q->q.pidx];
+ eowr = (void *)&q->q.desc[q->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
wr->r3 = cpu_to_be64(0);
- end = (u64 *)wr + flits;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ end = (u64 *)eowr + flits;
+ else
+ end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
len += sizeof(*cpl);
- if (ssi->gso_size) {
+ if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
- bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
- int l3hdr_len = skb_network_header_len(skb);
- int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
if (tnl_type)
@@ -1519,46 +1537,33 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
- lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->ipid_ofst = htons(0);
- lso->mss = htons(ssi->gso_size);
- lso->seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->len = htonl(skb->len);
- else
- lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
-
- if (CHELSIO_CHIP_VERSION(adap->params.chip)
- <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
-
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ cpl = write_tso_wr(adap, skb, lso);
+ cntrl = hwcsum(adap->params.chip, skb);
}
sgl = (u64 *)(cpl + 1); /* sgl start here */
- if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
- /* If current position is already at the end of the
- * txq, reset the current to point to start of the queue
- * and update the end ptr as well.
- */
- if (sgl == (u64 *)q->q.stat) {
- int left = (u8 *)end - (u8 *)q->q.stat;
-
- end = (void *)q->q.desc + left;
- sgl = (void *)q->q.desc;
- }
- }
q->tso++;
q->tx_cso += ssi->gso_segs;
+ } else if (ssi->gso_size) {
+ u64 *start;
+ u32 hdrlen;
+
+ hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ len += hdrlen;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(len));
+ cpl = write_eo_udp_wr(skb, eowr, hdrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+
+ start = (u64 *)(cpl + 1);
+ sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
+ hdrlen);
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ sgl_off = hdrlen;
+ q->uso++;
+ q->tx_cso += ssi->gso_segs;
} else {
if (ptp_enabled)
op = FW_PTP_TX_PKT_WR;
@@ -1575,6 +1580,16 @@ out_free: dev_kfree_skb_any(skb);
}
}
+ if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ sgl = (void *)q->q.desc;
+ }
+
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
@@ -1604,16 +1619,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
- int last_desc;
-
- cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
+ sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
@@ -1622,6 +1631,10 @@ out_free: dev_kfree_skb_any(skb);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
+
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Constants ... */
@@ -1707,35 +1720,28 @@ static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ unsigned int last_desc, flits, ndesc;
const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_vm_wr *wr;
- int qidx, credits, max_pkt_len;
+ struct tx_sw_desc *sgl_sdesc;
struct cpl_tx_pkt_core *cpl;
const struct port_info *pi;
- unsigned int flits, ndesc;
struct sge_eth_txq *txq;
struct adapter *adapter;
+ int qidx, credits, ret;
+ size_t fw_hdr_copy_len;
u64 cntrl, *end;
u32 wr_mid;
- const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci);
/* The chip minimum packet length is 10 octets but the firmware
* command that we are using requires that we copy the Ethernet header
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- if (unlikely(skb->len < fw_hdr_copy_len))
- goto out_free;
-
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
+ if (ret)
goto out_free;
/* Figure out which TX Queue we're going to use. */
@@ -1771,12 +1777,19 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ last_desc = txq->q.pidx + ndesc - 1;
+ if (last_desc >= txq->q.size)
+ last_desc -= txq->q.size;
+ sgl_sdesc = &txq->q.sdesc[last_desc];
+
if (!t4vf_is_eth_imm(skb) &&
- unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
/* We need to map the skb into PCI DMA space (because it can't
* be in-lined directly into the Work Request) and the mapping
* operation failed. Record the error and drop the packet.
*/
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
txq->mapping_err++;
goto out_free;
}
@@ -1951,7 +1964,6 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
struct sge_txq *tq = &txq->q;
- int last_desc;
/* If the Work Request header was an exact multiple of our TX
* Descriptor length, then it's possible that the starting SGL
@@ -1965,14 +1977,9 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
((void *)end - (void *)tq->stat));
}
- cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = tq->pidx + ndesc - 1;
- if (last_desc >= tq->size)
- last_desc -= tq->size;
- tq->sdesc[last_desc].skb = skb;
- tq->sdesc[last_desc].sgl = sgl;
+ sgl_sdesc->skb = skb;
}
/* Advance our internal TX Queue state, tell the hardware about
@@ -1991,34 +1998,473 @@ out_free:
return NETDEV_TX_OK;
}
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
+{
+ u32 val = *idx + n;
+
+ if (val >= max)
+ val -= max;
+
+ *idx = val;
+}
+
+void cxgb4_eosw_txq_free_desc(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq, u32 ndesc)
+{
+ struct tx_sw_desc *d;
+
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ while (ndesc--) {
+ if (d->skb) {
+ if (d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
+ dev_consume_skb_any(d->skb);
+ d->skb = NULL;
+ }
+ eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
+ eosw_txq->ndesc);
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ }
+}
+
+static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
+{
+ eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
+ eosw_txq->inuse += n;
+}
+
+static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb)
+{
+ if (eosw_txq->inuse == eosw_txq->ndesc)
+ return -ENOMEM;
+
+ eosw_txq->desc[eosw_txq->pidx].skb = skb;
+ return 0;
+}
+
+static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
+{
+ return eosw_txq->desc[eosw_txq->last_pidx].skb;
+}
+
+static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
+ struct sk_buff *skb, u32 hdr_len)
+{
+ u8 flits, nsgl = 0;
+ u32 wrlen;
+
+ wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+ wrlen += sizeof(struct cpl_tx_pkt_lso_core);
+
+ wrlen += roundup(hdr_len, 16);
+
+ /* Packet headers + WR + CPLs */
+ flits = DIV_ROUND_UP(wrlen, 8);
+
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ if (skb_headlen(skb) - hdr_len)
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ else
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
+ } else if (skb->len - hdr_len) {
+ nsgl = sgl_len(1);
+ }
+
+ return flits + nsgl;
+}
+
+static inline void *write_eo_wr(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
+{
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cpl_tx_pkt_core *cpl;
+ u32 immd_len, wrlen16;
+ bool compl = false;
+ u8 ver, proto;
+
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
+
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+ immd_len = sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+ immd_len += sizeof(struct cpl_tx_pkt_lso_core);
+ immd_len += hdr_len;
+
+ if (!eosw_txq->ncompl ||
+ eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ compl = true;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+ }
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
+ FW_WR_COMPL_V(compl));
+ wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ wr->r3 = 0;
+ if (proto == IPPROTO_UDP) {
+ cpl = write_eo_udp_wr(skb, wr, hdr_len);
+ } else {
+ wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
+ wr->u.tcpseg.ethlen = skb_network_offset(skb);
+ wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
+ wr->u.tcpseg.tsclk_tsoff = 0;
+ wr->u.tcpseg.r4 = 0;
+ wr->u.tcpseg.r5 = 0;
+ wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+
+ wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
+ cpl = write_tso_wr(adap, skb, lso);
+ } else {
+ wr->u.tcpseg.mss = cpu_to_be16(0xffff);
+ cpl = (void *)(wr + 1);
+ }
+ }
+
+ eosw_txq->cred -= wrlen16;
+ eosw_txq->last_compl += wrlen16;
+ return cpl;
+}
+
+static void ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 wrlen, wrlen16, hdr_len, data_len;
+ enum sge_eosw_state next_state;
+ u64 cntrl, *start, *end, *sgl;
+ struct sge_eohw_txq *eohw_txq;
+ struct cpl_tx_pkt_core *cpl;
+ struct fw_eth_tx_eo_wr *wr;
+ bool skip_eotx_wr = false;
+ struct tx_sw_desc *d;
+ struct sk_buff *skb;
+ u8 flits, ndesc;
+ int left;
+
+ eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
+ spin_lock(&eohw_txq->lock);
+ reclaim_completed_tx_imm(&eohw_txq->q);
+
+ d = &eosw_txq->desc[eosw_txq->last_pidx];
+ skb = d->skb;
+ skb_tx_timestamp(skb);
+
+ wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
+ if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
+ eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
+ hdr_len = skb->len;
+ data_len = 0;
+ flits = DIV_ROUND_UP(hdr_len, 8);
+ if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
+ else
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
+ skip_eotx_wr = true;
+ } else {
+ hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ data_len = skb->len - hdr_len;
+ flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
+ }
+ ndesc = flits_to_desc(flits);
+ wrlen = flits * 8;
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+
+ /* If there are no CPL credits, then wait for credits
+ * to come back and retry again
+ */
+ if (unlikely(wrlen16 > eosw_txq->cred))
+ goto out_unlock;
+
+ if (unlikely(skip_eotx_wr)) {
+ start = (u64 *)wr;
+ eosw_txq->state = next_state;
+ goto write_wr_headers;
+ }
+
+ cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+ if (skb_vlan_tag_present(skb))
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
+ cpl->pack = 0;
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ start = (u64 *)(cpl + 1);
+
+write_wr_headers:
+ sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
+ hdr_len);
+ if (data_len) {
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ memset(d->addr, 0, sizeof(d->addr));
+ eohw_txq->mapping_err++;
+ goto out_unlock;
+ }
+
+ end = (u64 *)wr + flits;
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+ end = (void *)eohw_txq->q.desc + left;
+ }
+
+ if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+
+ end = (void *)eohw_txq->q.desc + left;
+ sgl = (void *)eohw_txq->q.desc;
+ }
+
+ cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
+ d->addr);
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ eohw_txq->uso++;
+ else
+ eohw_txq->tso++;
+ eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ eohw_txq->tx_cso++;
+ }
+
+ if (skb_vlan_tag_present(skb))
+ eohw_txq->vlan_ins++;
+
+ txq_advance(&eohw_txq->q, ndesc);
+ cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
+
+out_unlock:
+ spin_unlock(&eohw_txq->lock);
+}
+
+static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
+{
+ struct sk_buff *skb;
+ int pktcount;
+
+ switch (eosw_txq->state) {
+ case CXGB4_EO_STATE_ACTIVE:
+ case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
+ pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+ break;
+ case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
+ case CXGB4_EO_STATE_CLOSED:
+ default:
+ return;
+ }
+
+ while (pktcount--) {
+ skb = eosw_txq_peek(eosw_txq);
+ if (!skb) {
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
+ eosw_txq->ndesc);
+ continue;
+ }
+
+ ethofld_hard_xmit(dev, eosw_txq);
+ }
+}
+
+static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qid;
+ int ret;
+
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
+ goto out_free;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ qid = skb_get_queue_mapping(skb) - pi->nqsets;
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ spin_lock_bh(&eosw_txq->lock);
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret)
+ goto out_unlock;
+
+ /* SKB is queued for processing until credits are available.
+ * So, call the destructor now and we'll free the skb later
+ * after it has been successfully transmitted.
+ */
+ skb_orphan(skb);
+
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+ spin_unlock_bh(&eosw_txq->lock);
+ return NETDEV_TX_OK;
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
+ u16 qid = skb_get_queue_mapping(skb);
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
return cxgb4_vf_eth_xmit(skb, dev);
+ if (unlikely(qid >= pi->nqsets))
+ return cxgb4_ethofld_xmit(skb, dev);
+
return cxgb4_eth_xmit(skb, dev);
}
/**
- * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
- * @q: the SGE control Tx queue
+ * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
+ * @dev - netdevice
+ * @eotid - ETHOFLD tid to bind/unbind
+ * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
- * This is a variant of cxgb4_reclaim_completed_tx() that is used
- * for Tx queues that send only immediate data (presently just
- * the control queues) and thus do not have any sk_buffs to release.
+ * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
+ * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
+ * a traffic class.
*/
-static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
{
- int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
- int reclaim = hw_cidx - q->cidx;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ enum sge_eosw_state next_state;
+ struct sge_eosw_txq *eosw_txq;
+ u32 len, len16, nparams = 6;
+ struct fw_flowc_wr *flowc;
+ struct eotid_entry *entry;
+ struct sge_ofld_rxq *rxq;
+ struct sk_buff *skb;
+ int ret = 0;
- if (reclaim < 0)
- reclaim += q->size;
+ len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
+ len16 = DIV_ROUND_UP(len, 16);
- q->in_use -= reclaim;
- q->cidx = hw_cidx;
+ entry = cxgb4_lookup_eotid(&adap->tids, eotid);
+ if (!entry)
+ return -ENOMEM;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ return -ENOMEM;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ spin_lock_bh(&eosw_txq->lock);
+ if (tc != FW_SCHED_CLS_NONE) {
+ if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
+ } else {
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
+ }
+
+ flowc = __skb_put(skb, len);
+ memset(flowc, 0, len);
+
+ rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams) |
+ FW_WR_COMPL_V(1));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[4].val = cpu_to_be32(tc);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
+ flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
+ FW_FLOWC_MNEM_EOSTATE_CLOSING :
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
+
+ eosw_txq->cred -= len16;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret) {
+ dev_consume_skb_any(skb);
+ goto out_unlock;
+ }
+
+ eosw_txq->state = next_state;
+ eosw_txq->flowc_idx = eosw_txq->pidx;
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+ return ret;
}
/**
@@ -3311,6 +3757,112 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
+void cxgb4_ethofld_restart(unsigned long data)
+{
+ struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
+ int pktcount;
+
+ spin_lock(&eosw_txq->lock);
+ pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+
+ if (pktcount) {
+ cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
+ eosw_txq, pktcount);
+ eosw_txq->inuse -= pktcount;
+ }
+
+ /* There may be some packets waiting for completions. So,
+ * attempt to send these packets now.
+ */
+ ethofld_xmit(eosw_txq->netdev, eosw_txq);
+ spin_unlock(&eosw_txq->lock);
+}
+
+/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the CPL message
+ * @si: the gather list of packet fragments
+ *
+ * Process a ETHOFLD Tx completion. Increment the cidx here, but
+ * free up the descriptors in a tasklet later.
+ */
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ /* skip RSS header */
+ rsp++;
+
+ if (opcode == CPL_FW4_ACK) {
+ const struct cpl_fw4_ack *cpl;
+ struct sge_eosw_txq *eosw_txq;
+ struct eotid_entry *entry;
+ struct sk_buff *skb;
+ u32 hdr_len, eotid;
+ u8 flits, wrlen16;
+ int credits;
+
+ cpl = (const struct cpl_fw4_ack *)rsp;
+ eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
+ q->adap->tids.eotid_base;
+ entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
+ if (!entry)
+ goto out_done;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ goto out_done;
+
+ spin_lock(&eosw_txq->lock);
+ credits = cpl->credits;
+ while (credits > 0) {
+ skb = eosw_txq->desc[eosw_txq->cidx].skb;
+ if (!skb)
+ break;
+
+ if (unlikely((eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
+ eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
+ eosw_txq->cidx == eosw_txq->flowc_idx)) {
+ flits = DIV_ROUND_UP(skb->len, 8);
+ if (eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
+ eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
+ else
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ complete(&eosw_txq->completion);
+ } else {
+ hdr_len = eth_get_headlen(eosw_txq->netdev,
+ skb->data,
+ skb_headlen(skb));
+ flits = ethofld_calc_tx_flits(q->adap, skb,
+ hdr_len);
+ }
+ eosw_txq_advance_index(&eosw_txq->cidx, 1,
+ eosw_txq->ndesc);
+ wrlen16 = DIV_ROUND_UP(flits * 8, 16);
+ credits -= wrlen16;
+ }
+
+ eosw_txq->cred += cpl->credits;
+ eosw_txq->ncompl--;
+
+ spin_unlock(&eosw_txq->lock);
+
+ /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
+ * if there were packets waiting for completion.
+ */
+ tasklet_schedule(&eosw_txq->qresume_tsk);
+ }
+
+out_done:
+ return 0;
+}
+
/*
* The MSI-X interrupt handler for an SGE response queue.
*/
@@ -3835,7 +4387,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->q.q_type = CXGB4_TXQ_ETH;
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
- txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->tso = 0;
+ txq->uso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
txq->mapping_err = 0;
txq->dbqt = dbqt;
@@ -3912,30 +4467,30 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
-int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
- struct net_device *dev, unsigned int iqid,
- unsigned int uld_type)
+static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
+ struct net_device *dev, u32 cmd, u32 iqid)
{
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
- int ret, nentries;
- struct fw_eq_ofld_cmd c;
- struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int cmd = FW_EQ_OFLD_CMD;
+ struct sge *s = &adap->sge;
+ struct fw_eq_ofld_cmd c;
+ u32 fb_min, nentries;
+ int ret;
/* Add status entries */
- nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
- sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
- NUMA_NO_NODE);
- if (!txq->q.desc)
+ nentries = q->size + s->stat_len / sizeof(struct tx_desc);
+ q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &q->phys_addr,
+ &q->sdesc, s->stat_len, NUMA_NO_NODE);
+ if (!q->desc)
return -ENOMEM;
+ if (chip_ver <= CHELSIO_T5)
+ fb_min = FETCHBURSTMIN_64B_X;
+ else
+ fb_min = FETCHBURSTMIN_64B_T6_X;
+
memset(&c, 0, sizeof(c));
- if (unlikely(uld_type == CXGB4_TX_CRYPTO))
- cmd = FW_EQ_CTRL_CMD;
c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
@@ -3947,27 +4502,42 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
- ? FETCHBURSTMIN_64B_X
- : FETCHBURSTMIN_64B_T6_X) |
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+ c.eqaddr = cpu_to_be64(q->phys_addr);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
+ kfree(q->sdesc);
+ q->sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
+ q->desc, q->phys_addr);
+ q->desc = NULL;
return ret;
}
+ init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
+ return 0;
+}
+
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
+{
+ u32 cmd = FW_EQ_OFLD_CMD;
+ int ret;
+
+ if (unlikely(uld_type == CXGB4_TX_CRYPTO))
+ cmd = FW_EQ_CTRL_CMD;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
+ if (ret)
+ return ret;
+
txq->q.q_type = CXGB4_TXQ_ULD;
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
@@ -3976,6 +4546,26 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
return 0;
}
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid)
+{
+ int ret;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
+ if (ret)
+ return ret;
+
+ txq->q.q_type = CXGB4_TXQ_ULD;
+ spin_lock_init(&txq->lock);
+ txq->adap = adap;
+ txq->tso = 0;
+ txq->uso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
@@ -4031,6 +4621,17 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
q->fl.size ? &q->fl : NULL);
}
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+ txq->q.cntxt_id);
+ free_tx_desc(adap, &txq->q, txq->q.in_use, false);
+ kfree(txq->q.sdesc);
+ free_txq(adap, &txq->q);
+ }
+}
+
/**
* t4_free_sge_resources - free SGE resources
* @adap: the adapter
@@ -4060,6 +4661,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+ if (eq->msix) {
+ cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
+ eq->msix = NULL;
+ }
etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
@@ -4086,8 +4691,15 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- if (adap->sge.fw_evtq.desc)
+ if (adap->sge.fw_evtq.desc) {
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+ if (adap->sge.fwevtq_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap,
+ adap->sge.fwevtq_msix_idx);
+ }
+
+ if (adap->sge.nd_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f2a7824da42b..19d18acfc9a6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8777,8 +8777,8 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
unsigned int *speedp, unsigned int *mtup)
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
- struct fw_port_cmd port_cmd;
unsigned int action, link_ok, mtu;
+ struct fw_port_cmd port_cmd;
fw_port_cap32_t linkattr;
int ret;
@@ -8813,9 +8813,12 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- *link_okp = link_ok;
- *speedp = fwcap_to_speed(linkattr);
- *mtup = mtu;
+ if (link_okp)
+ *link_okp = link_ok;
+ if (speedp)
+ *speedp = fwcap_to_speed(linkattr);
+ if (mtup)
+ *mtup = mtu;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 38dd41eb959e..575c6abcdae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1421,6 +1421,11 @@ enum {
CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
};
+#define CPL_FW4_ACK_FLOWID_S 0
+#define CPL_FW4_ACK_FLOWID_M 0xffffff
+#define CPL_FW4_ACK_FLOWID_G(x) \
+ (((x) >> CPL_FW4_ACK_FLOWID_S) & CPL_FW4_ACK_FLOWID_M)
+
struct cpl_fw6_msg {
u8 opcode;
u8 type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 65313f6b5704..ac4fb43bdec6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -87,6 +87,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_EO_WR = 0x1c,
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
@@ -534,6 +535,47 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+enum fw_eth_tx_eo_type {
+ FW_ETH_TX_EO_TYPE_UDPSEG = 0,
+ FW_ETH_TX_EO_TYPE_TCPSEG,
+};
+
+struct fw_eth_tx_eo_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+ union fw_eth_tx_eo {
+ struct fw_eth_tx_eo_udpseg {
+ __u8 type;
+ __u8 ethlen;
+ __be16 iplen;
+ __u8 udplen;
+ __u8 rtplen;
+ __be16 r4;
+ __be16 mss;
+ __be16 schedpktsize;
+ __be32 plen;
+ } udpseg;
+ struct fw_eth_tx_eo_tcpseg {
+ __u8 type;
+ __u8 ethlen;
+ __be16 iplen;
+ __u8 tcplen;
+ __u8 tsclk_tsoff;
+ __be16 r4;
+ __be16 mss;
+ __be16 r5;
+ __be32 plen;
+ } tcpseg;
+ } u;
+};
+
+#define FW_ETH_TX_EO_WR_IMMDLEN_S 0
+#define FW_ETH_TX_EO_WR_IMMDLEN_M 0x1ff
+#define FW_ETH_TX_EO_WR_IMMDLEN_V(x) ((x) << FW_ETH_TX_EO_WR_IMMDLEN_S)
+#define FW_ETH_TX_EO_WR_IMMDLEN_G(x) \
+ (((x) >> FW_ETH_TX_EO_WR_IMMDLEN_S) & FW_ETH_TX_EO_WR_IMMDLEN_M)
+
struct fw_ofld_connection_wr {
__be32 op_compl;
__be32 len16_pkd;
@@ -660,6 +702,12 @@ enum fw_flowc_mnem_tcpstate {
FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */
};
+enum fw_flowc_mnem_eostate {
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */
+ /* graceful close, after sending outstanding payload */
+ FW_FLOWC_MNEM_EOSTATE_CLOSING = 2,
+};
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -1134,6 +1182,7 @@ enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002,
FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
+ FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
};
enum fw_caps_config_ofld {
@@ -1276,6 +1325,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
+ FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index f1a0c4dceda0..f37c9a08c4cf 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
dev = platform_get_drvdata(pdev);
if (dev == NULL)
@@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
iounmap(ep->base_addr);
if (ep->res != NULL) {
- release_resource(ep->res);
- kfree(ep->res);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index e736ce2c58ca..a8f4c69252ff 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
+ free_netdev(port->netdev);
return 0;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 96e9565f1e08..a6f2063f1475 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -90,6 +90,9 @@ struct ftgmac100 {
struct mii_bus *mii_bus;
struct clk *clk;
+ /* AST2500/AST2600 RMII ref clock gate */
+ struct clk *rclk;
+
/* Link management */
int cur_speed;
int cur_duplex;
@@ -1609,7 +1612,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
- int phy_intf = PHY_INTERFACE_MODE_RGMII;
+ phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
struct device_node *np = pdev->dev.of_node;
int i, err = 0;
u32 reg;
@@ -1634,8 +1637,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
/* Get PHY mode from device-tree */
if (np) {
/* Default to RGMII. It's a gigabit part after all */
- phy_intf = of_get_phy_mode(np);
- if (phy_intf < 0)
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
phy_intf = PHY_INTERFACE_MODE_RGMII;
/* Aspeed only supports these. I don't know about other IP
@@ -1717,20 +1720,41 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd->link_up ? "up" : "down");
}
-static void ftgmac100_setup_clk(struct ftgmac100 *priv)
+static int ftgmac100_setup_clk(struct ftgmac100 *priv)
{
- priv->clk = devm_clk_get(priv->dev, NULL);
- if (IS_ERR(priv->clk))
- return;
+ struct clk *clk;
+ int rc;
- clk_prepare_enable(priv->clk);
+ clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ priv->clk = clk;
+ rc = clk_prepare_enable(priv->clk);
+ if (rc)
+ return rc;
/* Aspeed specifies a 100MHz clock is required for up to
* 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
* is sufficient
*/
- clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
- FTGMAC_100MHZ);
+ rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
+ FTGMAC_100MHZ);
+ if (rc)
+ goto cleanup_clk;
+
+ /* RCLK is for RMII, typically used for NCSI. Optional because its not
+ * necessary if it's the AST2400 MAC, or the MAC is configured for
+ * RGMII, or the controller is not an ASPEED-based controller.
+ */
+ priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
+ rc = clk_prepare_enable(priv->rclk);
+ if (!rc)
+ return 0;
+
+cleanup_clk:
+ clk_disable_unprepare(priv->clk);
+
+ return rc;
}
static int ftgmac100_probe(struct platform_device *pdev)
@@ -1852,8 +1876,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_setup_mdio;
}
- if (priv->is_aspeed)
- ftgmac100_setup_clk(priv);
+ if (priv->is_aspeed) {
+ err = ftgmac100_setup_clk(priv);
+ if (err)
+ goto err_ncsi_dev;
+ }
/* Default ring sizes */
priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
@@ -1885,8 +1912,10 @@ static int ftgmac100_probe(struct platform_device *pdev)
return 0;
-err_ncsi_dev:
err_register_netdev:
+ clk_disable_unprepare(priv->rclk);
+ clk_disable_unprepare(priv->clk);
+err_ncsi_dev:
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
iounmap(priv->base);
@@ -1908,6 +1937,7 @@ static int ftgmac100_remove(struct platform_device *pdev)
unregister_netdev(netdev);
+ clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
/* There's a small chance the reset task will have been re-queued,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index b4b82b9c5cd6..6a9d12dad5d9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -178,31 +178,9 @@ struct fm_port_fqs {
/* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
-/* The raw buffer size must be cacheline aligned */
#define DPAA_BP_RAW_SIZE 4096
-/* When using more than one buffer pool, the raw sizes are as follows:
- * 1 bp: 4KB
- * 2 bp: 2KB, 4KB
- * 3 bp: 1KB, 2KB, 4KB
- * 4 bp: 1KB, 2KB, 4KB, 8KB
- */
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
-{
- size_t res = DPAA_BP_RAW_SIZE / 4;
- u8 i;
-
- for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
- res *= 2;
- return res;
-}
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
- * alignment.
- */
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
static int dpaa_max_frm;
@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
/* Allow the Fman (Tx) port to process in-flight frames before we
* try switching it off.
*/
- usleep_range(5000, 10000);
+ msleep(200);
err = mac_dev->stop(mac_dev);
if (err < 0)
@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
phy_disconnect(net_dev->phydev);
net_dev->phydev = NULL;
+ msleep(200);
+
return err;
}
@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
static void dpaa_bps_free(struct dpaa_priv *priv)
{
- int i;
-
- for (i = 0; i < DPAA_BPS_NUM; i++)
- dpaa_bp_free(priv->dpaa_bps[i]);
+ dpaa_bp_free(priv->dpaa_bp);
}
/* Use multiple WQs for FQ assignment:
@@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
qman_release_pool(rx_pool_channel);
}
-static void dpaa_eth_add_channel(u16 channel)
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{
u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
const cpumask_t *cpus = qman_affine_cpus();
@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
portal = qman_get_affine_portal(cpu);
qman_p_static_dequeue_add(portal, pool);
+ qman_start_using_portal(portal, dev);
}
}
@@ -901,7 +879,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
if (num_portals == 0)
dev_err(priv->net_dev->dev.parent,
- "No Qman software (affine) channels found");
+ "No Qman software (affine) channels found\n");
/* Initialize each FQ in the list */
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
return err;
}
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+ struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
struct fman_port_params params;
- int i, err;
+ int err;
memset(&params, 0, sizeof(params));
memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
}
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
- for (i = 0; i < count; i++) {
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
- }
+ rx_p->ext_buf_pools.num_of_pools_used = 1;
+ rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
err = fman_port_config(port, &params);
if (err) {
@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
+ struct dpaa_bp *bp,
struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout,
struct device *dev)
@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
if (err)
return err;
- err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
port_fqs->rx_defq, port_fqs->rx_pcdq,
&buf_layout[RX]);
@@ -1335,15 +1310,16 @@ static void dpaa_fd_release(const struct net_device *net_dev,
vaddr = phys_to_virt(qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd);
- dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
dpaa_release_sgt_members(sgt);
- addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dpaa_bp->dev, addr)) {
- dev_err(dpaa_bp->dev, "DMA mapping failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+ virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
+ netdev_err(net_dev, "DMA mapping failed\n");
return;
}
bm_buffer_set64(&bmb, addr);
@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
struct sk_buff *skb,
struct qm_fd *fd,
- char *parse_results)
+ void *parse_results)
{
struct fman_prs_result *parse_result;
u16 ethertype = ntohs(skb->protocol);
@@ -1488,25 +1464,24 @@ return_error:
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
{
- struct device *dev = dpaa_bp->dev;
+ struct net_device *net_dev = dpaa_bp->priv->net_dev;
struct bm_buffer bmb[8];
dma_addr_t addr;
- void *new_buf;
+ struct page *p;
u8 i;
for (i = 0; i < 8; i++) {
- new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
- if (unlikely(!new_buf)) {
- dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
- dpaa_bp->raw_size);
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
goto release_previous_buffs;
}
- new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
- addr = dma_map_single(dev, new_buf,
- dpaa_bp->size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dpaa_bp->dev, "DMA map failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
+ addr))) {
+ netdev_err(net_dev, "DMA map failed\n");
goto release_previous_buffs;
}
@@ -1581,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{
struct dpaa_bp *dpaa_bp;
int *countptr;
- int res, i;
+ int res;
+
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bp = priv->dpaa_bps[i];
- if (!dpaa_bp)
- return -EINVAL;
- countptr = this_cpu_ptr(dpaa_bp->percpu_count);
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
- if (res)
- return res;
- }
return 0;
}
@@ -1600,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
* Skb freeing is not handled here.
*
* This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
+ * against cases when not all fd relevant fields were filled in. To avoid
+ * reading the invalid transmission timestamp for the error paths set ts to
+ * false.
*
* Return the skb backpointer, since for S/G frames the buffer containing it
* gets freed here.
*/
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
- const struct qm_fd *fd)
+ const struct qm_fd *fd, bool ts)
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt;
- struct sk_buff **skbh, *skb;
- int nr_frags, i;
+ struct sk_buff *skb;
u64 ns;
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
- skb = *skbh;
-
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
- &ns)) {
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else {
- dev_warn(dev, "fman_port_get_tstamp failed!\n");
- }
- }
+ int i;
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dev, addr,
- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
- dma_dir);
+ dma_unmap_page(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+ dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
* it's from lowmem.
*/
- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
/* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i <= nr_frags; i++) {
+ for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
- dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir);
}
-
- /* Free the page frag that we allocated on Tx */
- skb_free_frag(phys_to_virt(addr));
} else {
- dma_unmap_single(dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ priv->tx_headroom + qm_fd_get_length(fd),
+ dma_dir);
+ }
+
+ skb = *(struct sk_buff **)vaddr;
+
+ /* DMA unmapping is required before accessing the HW provided info */
+ if (ts && priv->tx_tstamp &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
}
+ if (qm_fd_get_format(fd) == qm_fd_sg)
+ /* Free the page that we allocated on Tx for the SGT */
+ free_pages((unsigned long)vaddr, 0);
+
return skb;
}
@@ -1715,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
return skb;
free_buffer:
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1762,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
goto free_buffers;
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1815,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
/* free the SG table buffer */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return skb;
@@ -1832,7 +1812,7 @@ free_buffers:
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -1843,7 +1823,7 @@ free_buffers:
break;
}
/* free the SGT fragment */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1853,9 +1833,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
int *offset)
{
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
enum dma_data_direction dma_dir;
- unsigned char *buffer_start;
+ unsigned char *buff_start;
struct sk_buff **skbh;
dma_addr_t addr;
int err;
@@ -1864,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* available, so just use that for offset.
*/
fd->bpid = FSL_DPAA_BPID_INV;
- buffer_start = skb->data - priv->tx_headroom;
+ buff_start = skb->data - priv->tx_headroom;
dma_dir = DMA_TO_DEVICE;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
/* Enable L3/L4 hardware checksum computation.
@@ -1876,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1889,9 +1868,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
- addr = dma_map_single(dev, skbh,
- skb_tail_pointer(skb) - buffer_start, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ priv->tx_headroom + skb->len, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
return -EINVAL;
@@ -1907,24 +1886,22 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
const int nr_frags = skb_shinfo(skb)->nr_frags;
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
struct qm_sg_entry *sgt;
struct sk_buff **skbh;
- int i, j, err, sz;
- void *buffer_start;
+ void *buff_start;
skb_frag_t *frag;
dma_addr_t addr;
size_t frag_len;
- void *sgt_buf;
-
- /* get a page frag to store the SGTable */
- sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
- sgt_buf = netdev_alloc_frag(sz);
- if (unlikely(!sgt_buf)) {
- netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
- sz);
+ struct page *p;
+ int i, j, err;
+
+ /* get a page to store the SGTable */
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
return -ENOMEM;
}
+ buff_start = page_address(p);
/* Enable L3/L4 hardware checksum computation.
*
@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1941,15 +1918,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
}
/* SGT[0] is used by the linear part */
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
frag_len = skb_headlen(skb);
qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV;
sgt[0].offset = 0;
- addr = dma_map_single(dev, skb->data,
+ addr = dma_map_single(priv->tx_dma_dev, skb->data,
skb_headlen(skb), dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg0_map_failed;
}
@@ -1960,10 +1937,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
- addr = skb_frag_dma_map(dev, frag, 0,
+ addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
frag_len, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg_map_failed;
}
@@ -1979,17 +1956,17 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* Set the final bit in the last used entry of the SGT */
qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
+ /* set fd offset to priv->tx_headroom */
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */
- buffer_start = (void *)sgt - priv->tx_headroom;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
- addr = dma_map_single(dev, buffer_start,
- priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ addr = dma_map_page(priv->tx_dma_dev, p, 0,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sgt_map_failed;
}
@@ -2003,11 +1980,11 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
sgt_map_failed:
sg_map_failed:
for (j = 0; j < i; j++)
- dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed:
csum_failed:
- skb_free_frag(sgt_buf);
+ free_pages((unsigned long)buff_start, 0);
return err;
}
@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
- dpaa_cleanup_tx_fd(priv, &fd);
+ dpaa_cleanup_tx_fd(priv, &fd, false);
skb_to_fd_failed:
enomem:
percpu_stats->tx_errors++;
@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
percpu_priv->stats.tx_errors++;
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb(skb);
}
@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
percpu_priv->tx_confirm++;
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, true);
consume_skb(skb);
}
@@ -2304,11 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume;
}
- dpaa_bp = dpaa_bpid2pool(fd->bpid);
- if (!dpaa_bp)
- return qman_cb_dqrr_consume;
-
- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */
vaddr = phys_to_virt(addr);
@@ -2430,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
percpu_priv->stats.tx_fifo_errors++;
count_ern(percpu_priv, msg);
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb_any(skb);
}
@@ -2663,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
{
dma_addr_t addr = bm_buf_addr(bmb);
- dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
skb_free_frag(phys_to_virt(addr));
}
@@ -2764,21 +2739,46 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
static int dpaa_eth_probe(struct platform_device *pdev)
{
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
struct net_device *net_dev = NULL;
+ struct dpaa_bp *dpaa_bp = NULL;
struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs;
struct mac_device *mac_dev;
- int err = 0, i, channel;
+ int err = 0, channel;
struct device *dev;
- /* device used for DMA mapping */
- dev = pdev->dev.parent;
- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (err) {
- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
- return err;
+ dev = &pdev->dev;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+ err = bman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to bman portals probe error\n");
+ return -ENODEV;
+ }
+ err = qman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to qman portals probe error\n");
+ return -ENODEV;
}
/* Allocate this early, so we can store relevant information in
@@ -2801,11 +2801,23 @@ static int dpaa_eth_probe(struct platform_device *pdev)
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
- dev_err(dev, "dpaa_mac_dev_get() failed\n");
+ netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
goto free_netdev;
}
+ /* Devices used for DMA mapping */
+ priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
+ priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
+ err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
+ if (!err)
+ err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
+ DMA_BIT_MASK(40));
+ if (err) {
+ netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
+ return err;
+ }
+
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
* we choose conservatively and let the user explicitly set a higher
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
@@ -2822,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* bp init */
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bps[i] = dpaa_bp_alloc(dev);
- if (IS_ERR(dpaa_bps[i])) {
- err = PTR_ERR(dpaa_bps[i]);
- goto free_dpaa_bps;
- }
- /* the raw size of the buffers used for reception */
- dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
- /* avoid runtime computations by keeping the usable size here */
- dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
- dpaa_bps[i]->dev = dev;
-
- err = dpaa_bp_alloc_pool(dpaa_bps[i]);
- if (err < 0)
- goto free_dpaa_bps;
- priv->dpaa_bps[i] = dpaa_bps[i];
+ dpaa_bp = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bp)) {
+ err = PTR_ERR(dpaa_bp);
+ goto free_dpaa_bps;
}
+ /* the raw size of the buffers used for reception */
+ dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+ dpaa_bp->priv = priv;
+
+ err = dpaa_bp_alloc_pool(dpaa_bp);
+ if (err < 0)
+ goto free_dpaa_bps;
+ priv->dpaa_bp = dpaa_bp;
INIT_LIST_HEAD(&priv->dpaa_fq_list);
@@ -2864,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
- dpaa_eth_add_channel(priv->channel);
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
@@ -2896,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
&priv->buf_layout[0], dev);
if (err)
goto free_dpaa_fqs;
@@ -2955,7 +2965,7 @@ static int dpaa_remove(struct platform_device *pdev)
struct device *dev;
int err;
- dev = pdev->dev.parent;
+ dev = &pdev->dev;
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index f7e59e8db075..fc2cc4c48e06 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -47,8 +47,6 @@
/* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
-#define DPAA_BPS_NUM 3 /* number of bpools per interface */
-
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
@@ -80,9 +78,11 @@ struct dpaa_fq_cbs {
struct qman_fq egress_ern;
};
+struct dpaa_priv;
+
struct dpaa_bp {
- /* device used in the DMA mapping operations */
- struct device *dev;
+ /* used in the DMA mapping operations */
+ struct dpaa_priv *priv;
/* current number of buffers in the buffer pool alloted to each CPU */
int __percpu *percpu_count;
/* all buffers allocated for this pool have this raw size */
@@ -146,13 +146,15 @@ struct dpaa_buffer_layout {
struct dpaa_priv {
struct dpaa_percpu_priv __percpu *percpu_priv;
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
+ struct dpaa_bp *dpaa_bp;
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
u16 tx_headroom;
struct net_device *net_dev;
struct mac_device *mac_dev;
+ struct device *rx_dma_dev;
+ struct device *tx_dma_dev;
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 0d9b185e317f..ee62d25cac81 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
{
struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
ssize_t bytes = 0;
- int i = 0;
- for (i = 0; i < DPAA_BPS_NUM; i++)
- bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
- priv->dpaa_bps[i]->bpid);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+ priv->dpaa_bp->bpid);
return bytes;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 7ce2e99b594d..66d150872d48 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
"tx S/G",
"tx error",
"rx error",
+ "rx dropped",
+ "tx dropped",
};
static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- if (!net_dev->phydev) {
- netdev_dbg(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return 0;
- }
phy_ethtool_ksettings_get(net_dev->phydev, cmd);
@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
{
int err;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return -ENODEV;
- }
err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if (err < 0)
@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev)
{
int err;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return -ENODEV;
- }
err = 0;
if (net_dev->phydev->autoneg) {
@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return;
- }
epause->autoneg = mac_dev->autoneg_pause;
epause->rx_pause = mac_dev->rx_pause_active;
@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
unsigned int total_stats, num_stats;
num_stats = num_online_cpus() + 1;
- total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
+ total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
DPAA_STATS_GLOBAL_LEN;
switch (type) {
@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
}
static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
- int crr_cpu, u64 *bp_count, u64 *data)
+ int crr_cpu, u64 bp_count, u64 *data)
{
int num_values = num_cpus + 1;
- int crr = 0, j;
+ int crr = 0;
/* update current CPU's stats and also add them to the total values */
data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
- for (j = 0; j < DPAA_BPS_NUM; j++) {
- data[crr * num_values + crr_cpu] = bp_count[j];
- data[crr++ * num_values + num_cpus] += bp_count[j];
- }
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
}
static void dpaa_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats, u64 *data)
{
- u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
struct dpaa_percpu_priv *percpu_priv;
struct dpaa_rx_errors rx_errors;
unsigned int num_cpus, offset;
+ u64 bp_count, cg_time, cg_num;
struct dpaa_ern_cnt ern_cnt;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
- int total_stats, i, j;
+ int total_stats, i;
bool cg_status;
total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < DPAA_BPS_NUM; j++) {
- dpaa_bp = priv->dpaa_bps[j];
- if (!dpaa_bp->percpu_count)
- continue;
- bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
- }
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp->percpu_count)
+ continue;
+ bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
rx_errors.dme += percpu_priv->rx_errors.dme;
rx_errors.fpe += percpu_priv->rx_errors.fpe;
rx_errors.fse += percpu_priv->rx_errors.fse;
@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
copy_stats(percpu_priv, num_cpus, i, bp_count, data);
}
- offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
+ offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN,
- "bpool %c [CPU %d]", 'a' + i, j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
- 'a' + i);
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN,
+ "bpool [CPU %d]", j);
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
+ snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+
memcpy(strings, dpaa_stats_global, size);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index fbef2829f3de..c6fb8e4021ac 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -2,6 +2,7 @@
config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
+ select PHYLINK
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index d1e78cdd512f..69184ca3b7b9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 19379bae0144..7ff147e89426 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2019 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -221,6 +221,7 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
dma_addr_t addr)
{
+ int retries = 0;
int err;
ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
@@ -229,8 +230,11 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
ch->xdp.drop_bufs,
- ch->xdp.drop_cnt)) == -EBUSY)
+ ch->xdp.drop_cnt)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
cpu_relax();
+ }
if (err) {
free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
@@ -458,7 +462,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
struct dpaa2_eth_fq *fq = NULL;
struct dpaa2_dq *dq;
const struct dpaa2_fd *fd;
- int cleaned = 0;
+ int cleaned = 0, retries = 0;
int is_last;
do {
@@ -469,6 +473,11 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
* the store until we get some sort of valid response
* token (either a valid frame or an "empty dequeue")
*/
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
+ netdev_err_once(priv->net_dev,
+ "Unable to read a valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
continue;
}
@@ -477,6 +486,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fq->consume(priv, ch, fd, fq);
cleaned++;
+ retries = 0;
} while (!is_last);
if (!cleaned)
@@ -949,6 +959,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
struct page *page;
dma_addr_t addr;
+ int retries = 0;
int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
@@ -980,8 +991,11 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
release_bufs:
/* In case the portal is busy, retry until successful */
while ((err = dpaa2_io_service_release(ch->dpio, bpid,
- buf_array, i)) == -EBUSY)
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
cpu_relax();
+ }
/* If release command failed, clean up and bail out;
* not much else we can do about it
@@ -1032,16 +1046,21 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ int retries = 0;
int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
buf_array, count);
if (ret < 0) {
+ if (ret == -EBUSY &&
+ retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ continue;
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
free_bufs(priv, buf_array, ret);
+ retries = 0;
} while (ret);
}
@@ -1094,7 +1113,7 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
ch->store);
dequeues++;
cpu_relax();
- } while (err == -EBUSY);
+ } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
ch->stats.dequeue_portal_busy += dequeues;
if (unlikely(err))
@@ -1118,6 +1137,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct netdev_queue *nq;
int store_cleaned, work_done;
struct list_head rx_list;
+ int retries = 0;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -1136,7 +1156,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
refill_pool(priv, ch, priv->bpid);
store_cleaned = consume_frames(ch, &fq);
- if (!store_cleaned)
+ if (store_cleaned <= 0)
break;
if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
@@ -1163,7 +1183,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
do {
err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
cpu_relax();
- } while (err == -EBUSY);
+ } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
@@ -1235,8 +1255,6 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
priv->rx_td_enabled = enable;
}
-static void update_tx_fqids(struct dpaa2_eth_priv *priv);
-
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
@@ -1258,12 +1276,17 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
!!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (priv->mac)
+ goto out;
+
/* Chech link state; speed / duplex changes are not treated yet */
if (priv->link_state.up == state.up)
goto out;
if (state.up) {
- update_tx_fqids(priv);
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
} else {
@@ -1295,17 +1318,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- /* We'll only start the txqs when the link is actually ready; make sure
- * we don't race against the link up notification, which may come
- * immediately after dpni_enable();
- */
- netif_tx_stop_all_queues(net_dev);
+ if (!priv->mac) {
+ /* We'll only start the txqs when the link is actually ready;
+ * make sure we don't race against the link up notification,
+ * which may come immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+
+ /* Also, explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+ }
enable_ch_napi(priv);
- /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
- * return true and cause 'ip link show' to report the LOWER_UP flag,
- * even though the link notification wasn't even received.
- */
- netif_carrier_off(net_dev);
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
@@ -1313,13 +1340,17 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- /* If the DPMAC object has already processed the link up interrupt,
- * we have to learn the link state ourselves.
- */
- err = link_state_update(priv);
- if (err < 0) {
- netdev_err(net_dev, "Can't update link state\n");
- goto link_state_err;
+ if (!priv->mac) {
+ /* If the DPMAC object has already processed the link up
+ * interrupt, we have to learn the link state ourselves.
+ */
+ err = link_state_update(priv);
+ if (err < 0) {
+ netdev_err(net_dev, "Can't update link state\n");
+ goto link_state_err;
+ }
+ } else {
+ phylink_start(priv->mac->phylink);
}
return 0;
@@ -1394,8 +1425,12 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- netif_tx_stop_all_queues(net_dev);
- netif_carrier_off(net_dev);
+ if (!priv->mac) {
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+ } else {
+ phylink_stop(priv->mac->phylink);
+ }
/* On dpni_disable(), the MC firmware will:
* - stop MAC Rx and wait for all Rx frames to be enqueued to software
@@ -1772,11 +1807,8 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
if (prog && !xdp_mtu_valid(priv, dev->mtu))
return -EINVAL;
- if (prog) {
- prog = bpf_prog_add(prog, priv->num_channels);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
- }
+ if (prog)
+ bpf_prog_add(prog, priv->num_channels);
up = netif_running(dev);
need_update = (!!priv->xdp_prog != !!prog);
@@ -2046,7 +2078,6 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpcon;
struct device *dev = priv->net_dev->dev.parent;
- struct dpcon_attr attrs;
int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
@@ -2071,12 +2102,6 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
goto close;
}
- err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
- if (err) {
- dev_err(dev, "dpcon_get_attributes() failed\n");
- goto close;
- }
-
err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_enable() failed\n");
@@ -2232,8 +2257,16 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
- if (err == -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER) {
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+ nctx = &channel->nctx;
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+ free_channel(priv, channel);
+ }
+ priv->num_channels = 0;
return err;
+ }
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
@@ -3332,12 +3365,56 @@ static int poll_link_state(void *arg)
return 0;
}
+static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ return 0;
+
+ mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = priv->mc_io;
+ mac->net_dev = priv->net_dev;
+
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ kfree(mac);
+ return err;
+ }
+ priv->mac = mac;
+
+ return 0;
+}
+
+static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
+{
+ if (!priv->mac)
+ return;
+
+ dpaa2_mac_disconnect(priv->mac);
+ kfree(priv->mac);
+ priv->mac = NULL;
+}
+
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{
u32 status = ~0;
struct device *dev = (struct device *)arg;
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -3350,8 +3427,17 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
- if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
set_mac_addr(netdev_priv(net_dev));
+ update_tx_fqids(priv);
+
+ rtnl_lock();
+ if (priv->mac)
+ dpaa2_eth_disconnect_mac(priv);
+ else
+ dpaa2_eth_connect_mac(priv);
+ rtnl_unlock();
+ }
return IRQ_HANDLED;
}
@@ -3527,6 +3613,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->do_link_poll = true;
}
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
@@ -3541,6 +3631,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
return 0;
err_netdev_reg:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
@@ -3583,6 +3675,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+ rtnl_lock();
+ dpaa2_eth_disconnect_mac(priv);
+ rtnl_unlock();
+
unregister_netdev(net_dev);
if (priv->do_link_poll)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 8a0e65b3267f..7635db3ef903 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -17,6 +17,7 @@
#include "dpaa2-eth-trace.h"
#include "dpaa2-eth-debugfs.h"
+#include "dpaa2-mac.h"
#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -245,6 +246,14 @@ static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
*/
#define DPAA2_ETH_ENQUEUE_RETRIES 10
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_ETH_SWP_BUSY_RETRIES 1000
+
/* Driver statistics, other than those in struct rtnl_link_stats64.
* These are usually collected per-CPU and aggregated by ethtool.
*/
@@ -407,6 +416,8 @@ struct dpaa2_eth_priv {
#ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg;
#endif
+
+ struct dpaa2_mac *mac;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 0aa1c34019bb..96676abcebd5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,6 +85,10 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ if (priv->mac)
+ return phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+
link_settings->base.autoneg = AUTONEG_DISABLE;
if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
link_settings->base.duplex = DUPLEX_FULL;
@@ -93,12 +97,29 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
return 0;
}
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->mac)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+}
+
static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
+ if (priv->mac) {
+ phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ return;
+ }
+
pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
pause->tx_pause = pause->rx_pause ^
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
@@ -118,6 +139,9 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
+ if (priv->mac)
+ return phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -149,6 +173,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -162,15 +187,22 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ if (priv->mac)
+ dpaa2_mac_get_strings(p);
break;
}
}
static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
{
+ int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ if (priv->mac)
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
default:
return -EOPNOTSUPP;
}
@@ -216,7 +248,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
if (err == -EINVAL)
/* Older firmware versions don't support all pages */
memset(&dpni_stats, 0, sizeof(dpni_stats));
- else
+ else if (err)
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
num_cnt = dpni_stats_page_size[j] / sizeof(u64);
@@ -269,6 +301,9 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
return;
}
*(data + i++) = buf_cnt;
+
+ if (priv->mac)
+ dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
@@ -728,6 +763,7 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
.get_pauseparam = dpaa2_eth_get_pauseparam,
.set_pauseparam = dpaa2_eth_set_pauseparam,
.get_sset_count = dpaa2_eth_get_sset_count,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
new file mode 100644
index 000000000000..84233e467ed1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "dpaa2-eth.h"
+#include "dpaa2-mac.h"
+
+#define phylink_to_dpaa2_mac(config) \
+ container_of((config), struct dpaa2_mac, phylink_config)
+
+static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
+{
+ *if_mode = PHY_INTERFACE_MODE_NA;
+
+ switch (eth_if) {
+ case DPMAC_ETH_IF_RGMII:
+ *if_mode = PHY_INTERFACE_MODE_RGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Caller must call of_node_put on the returned value */
+static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
+{
+ struct device_node *dpmacs, *dpmac = NULL;
+ u32 id;
+ int err;
+
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+
+ while ((dpmac = of_get_next_child(dpmacs, dpmac)) != NULL) {
+ err = of_property_read_u32(dpmac, "reg", &id);
+ if (err)
+ continue;
+ if (id == dpmac_id)
+ break;
+ }
+
+ of_node_put(dpmacs);
+
+ return dpmac;
+}
+
+static int dpaa2_mac_get_if_mode(struct device_node *node,
+ struct dpmac_attr attr)
+{
+ phy_interface_t if_mode;
+ int err;
+
+ err = of_get_phy_mode(node, &if_mode);
+ if (!err)
+ return if_mode;
+
+ err = phy_mode(attr.eth_if, &if_mode);
+ if (!err)
+ return if_mode;
+
+ return err;
+}
+
+static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return (interface != mac->if_mode);
+ default:
+ return true;
+ }
+}
+
+static void dpaa2_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
+ goto empty_set;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ default:
+ goto empty_set;
+ }
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+
+ return;
+
+empty_set:
+ linkmode_zero(supported);
+}
+
+static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ if (state->speed != SPEED_UNKNOWN)
+ dpmac_state->rate = state->speed;
+
+ if (state->duplex != DUPLEX_UNKNOWN) {
+ if (!state->duplex)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+ }
+
+ if (state->an_enabled)
+ dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
+
+ if (state->pause & MLO_PAUSE_RX)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (!!(state->pause & MLO_PAUSE_RX) ^ !!(state->pause & MLO_PAUSE_TX))
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 1;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 0;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
+ .validate = dpaa2_mac_validate,
+ .mac_config = dpaa2_mac_config,
+ .mac_link_up = dpaa2_mac_link_up,
+ .mac_link_down = dpaa2_mac_link_down,
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io)
+{
+ struct dpmac_attr attr;
+ bool fixed = false;
+ u16 mc_handle = 0;
+ int err;
+
+ err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
+ &mc_handle);
+ if (err || !mc_handle)
+ return false;
+
+ err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
+ if (err)
+ goto out;
+
+ if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
+ fixed = true;
+
+out:
+ dpmac_close(mc_io, 0, mc_handle);
+
+ return fixed;
+}
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ struct device_node *dpmac_node;
+ struct phylink *phylink;
+ struct dpmac_attr attr;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpmac_node = dpaa2_mac_get_node(attr.id);
+ if (!dpmac_node) {
+ netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
+ err = -ENODEV;
+ goto err_close_dpmac;
+ }
+
+ err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ if (err < 0) {
+ err = -EINVAL;
+ goto err_put_node;
+ }
+ mac->if_mode = err;
+
+ /* The MAC does not have the capability to add RGMII delays so
+ * error out if the interface mode requests them and there is no PHY
+ * to act upon them
+ */
+ if (of_phy_is_fixed_link(dpmac_node) &&
+ (mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_err(net_dev, "RGMII delay not supported\n");
+ err = -EINVAL;
+ goto err_put_node;
+ }
+
+ mac->phylink_config.dev = &net_dev->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(dpmac_node), mac->if_mode,
+ &dpaa2_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_node;
+ }
+ mac->phylink = phylink;
+
+ err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
+ if (err) {
+ netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
+ goto err_phylink_destroy;
+ }
+
+ of_node_put(dpmac_node);
+
+ return 0;
+
+err_phylink_destroy:
+ phylink_destroy(mac->phylink);
+err_put_node:
+ of_node_put(dpmac_node);
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
+{
+ if (!mac->phylink)
+ return;
+
+ phylink_disconnect_phy(mac->phylink);
+ phylink_destroy(mac->phylink);
+ dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+}
+
+static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
+ [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames",
+ [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok",
+ [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors",
+ [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards",
+ [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast",
+ [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast",
+ [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast",
+ [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes",
+ [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes",
+ [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes",
+ [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes",
+ [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes",
+ [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes",
+ [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes",
+ [DPMAC_CNT_ING_FRAG] = "[mac] rx frags",
+ [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber",
+ [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors",
+ [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized",
+ [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause",
+ [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes",
+ [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok",
+ [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast",
+ [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast",
+ [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast",
+ [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors",
+ [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized",
+ [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause",
+ [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes",
+};
+
+#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
+
+int dpaa2_mac_get_sset_count(void)
+{
+ return DPAA2_MAC_NUM_STATS;
+}
+
+void dpaa2_mac_get_strings(u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ int i, err;
+ u64 value;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle,
+ i, &value);
+ if (err) {
+ netdev_err_once(mac->net_dev,
+ "dpmac_get_counter error %d\n", err);
+ *(data + i) = U64_MAX;
+ continue;
+ }
+ *(data + i) = value;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
new file mode 100644
index 000000000000..4da8079b9155
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+#ifndef DPAA2_MAC_H
+#define DPAA2_MAC_H
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac {
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_link_state state;
+ struct net_device *net_dev;
+ struct fsl_mc_io *mc_io;
+
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ phy_interface_t if_mode;
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io);
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac);
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+int dpaa2_mac_get_sset_count(void);
+
+void dpaa2_mac_get_strings(u8 *data);
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+
+#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
new file mode 100644
index 000000000000..3ea51dd9374b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 4
+#define DPMAC_CMD_BASE_VERSION 1
+#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+
+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
+
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field) \
+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+ DPMAC_##field##_SHIFT)
+
+#define dpmac_set_field(var, field, val) \
+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field) \
+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+ __le32 dpmac_id;
+};
+
+struct dpmac_rsp_get_attributes {
+ u8 eth_if;
+ u8 link_type;
+ __le16 id;
+ __le32 max_rate;
+};
+
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ __le64 options;
+ __le32 rate;
+ __le32 pad0;
+ /* from lsb: up:1, state_valid:1 */
+ u8 state;
+ u8 pad1[7];
+ __le64 supported;
+ __le64 advertising;
+};
+
+struct dpmac_cmd_get_counter {
+ u8 id;
+};
+
+struct dpmac_rsp_get_counter {
+ u64 pad;
+ u64 counter;
+};
+
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
new file mode 100644
index 000000000000..d5997b654562
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id: DPMAC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+ attr->eth_if = rsp_params->eth_if;
+ attr->link_type = rsp_params->link_type;
+ attr->id = le16_to_cpu(rsp_params->id);
+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
+ dpmac_set_field(cmd_params->state, STATE_VALID,
+ link_state->state_valid);
+ cmd_params->supported = cpu_to_le64(link_state->supported);
+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_counter() - Read a specific DPMAC counter
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @id: The requested counter ID
+ * @value: Returned counter value
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value)
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
+ dpmac_cmd->id = id;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
+ *value = le64_to_cpu(dpmac_rsp->counter);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
new file mode 100644
index 000000000000..135f143097a5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpmac_link_type - DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+ DPMAC_LINK_TYPE_NONE,
+ DPMAC_LINK_TYPE_FIXED,
+ DPMAC_LINK_TYPE_PHY,
+ DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ * @DPMAC_ETH_IF_CAUI: CAUI interface
+ * @DPMAC_ETH_IF_1000BASEX: 1000BASEX interface
+ * @DPMAC_ETH_IF_USXGMII: USXGMII interface
+ */
+enum dpmac_eth_if {
+ DPMAC_ETH_IF_MII,
+ DPMAC_ETH_IF_RMII,
+ DPMAC_ETH_IF_SMII,
+ DPMAC_ETH_IF_GMII,
+ DPMAC_ETH_IF_RGMII,
+ DPMAC_ETH_IF_SGMII,
+ DPMAC_ETH_IF_QSGMII,
+ DPMAC_ETH_IF_XAUI,
+ DPMAC_ETH_IF_XFI,
+ DPMAC_ETH_IF_CAUI,
+ DPMAC_ETH_IF_1000BASEX,
+ DPMAC_ETH_IF_USXGMII,
+};
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id: DPMAC object ID
+ * @max_rate: Maximum supported rate - in Mbps
+ * @eth_if: Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+ u16 id;
+ u32 max_rate;
+ enum dpmac_eth_if eth_if;
+ enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr);
+
+/**
+ * DPMAC link configuration/state options
+ */
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
+/**
+ * Enable half-duplex mode
+ */
+#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
+/**
+ * Enable pause frames
+ */
+#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
+
+/**
+ * Advertised link speeds
+ */
+#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
+#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
+#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
+#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
+#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
+
+/**
+ * Advertise auto-negotiation enable
+ */
+#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+ int state_valid;
+ u64 supported;
+ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+/**
+ * enum dpmac_counter_id - DPMAC counter types
+ *
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
+ * (up to max frame length specified),
+ * good or bad.
+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
+ * with a wrong CRC
+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
+ * specified, with a bad frame check sequence.
+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
+ * Occurs when a receive FIFO overflows.
+ * Includes also frames truncated as a result of
+ * the receive FIFO overflow.
+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
+ * (optional used for wrong SFD).
+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
+ * bytes long with a good CRC.
+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
+ * specified, with a good frame check sequence.
+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
+ * (regular and PFC).
+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
+ * frames and valid pause frames.
+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
+ * (except for undersized/fragment frame).
+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
+ * frames and valid pause frames transmitted.
+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
+ * pause frames.
+ * @DPMAC_CNT_EGR_GOOD_FRAME: counts frames transmitted without error, including
+ * pause frames.
+ */
+enum dpmac_counter_id {
+ DPMAC_CNT_ING_FRAME_64,
+ DPMAC_CNT_ING_FRAME_127,
+ DPMAC_CNT_ING_FRAME_255,
+ DPMAC_CNT_ING_FRAME_511,
+ DPMAC_CNT_ING_FRAME_1023,
+ DPMAC_CNT_ING_FRAME_1518,
+ DPMAC_CNT_ING_FRAME_1519_MAX,
+ DPMAC_CNT_ING_FRAG,
+ DPMAC_CNT_ING_JABBER,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ DPMAC_CNT_ING_ALIGN_ERR,
+ DPMAC_CNT_EGR_UNDERSIZED,
+ DPMAC_CNT_ING_OVERSIZED,
+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
+ DPMAC_CNT_ING_BYTE,
+ DPMAC_CNT_ING_MCAST_FRAME,
+ DPMAC_CNT_ING_BCAST_FRAME,
+ DPMAC_CNT_ING_ALL_FRAME,
+ DPMAC_CNT_ING_UCAST_FRAME,
+ DPMAC_CNT_ING_ERR_FRAME,
+ DPMAC_CNT_EGR_BYTE,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ DPMAC_CNT_EGR_BCAST_FRAME,
+ DPMAC_CNT_EGR_UCAST_FRAME,
+ DPMAC_CNT_EGR_ERR_FRAME,
+ DPMAC_CNT_ING_GOOD_FRAME,
+ DPMAC_CNT_EGR_GOOD_FRAME
+};
+
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value);
+
+#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index c219587bd334..edad4ca46327 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -50,3 +50,13 @@ config FSL_ENETC_HW_TIMESTAMPING
allocation has not been supported and it is too expensive to use
extended RX BDs if timestamping is not used, this option enables
extended RX BDs in order to support hardware timestamping.
+
+config FSL_ENETC_QOS
+ bool "ENETC hardware Time-sensitive Network support"
+ depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS)
+ help
+ There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci
+ /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set
+ enable/disable from user space via Qos commands(tc). In the kernel
+ side, it can be loaded by Qos driver. Currently, it is only support
+ taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index d200c27c3bf6..d0db33e5b6b7 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -5,9 +5,11 @@ common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index b6ff89307409..9db1b96ed9b9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -742,9 +742,14 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
if (val & ENETC_SIPCAPR0_RSS) {
- val = enetc_rd(hw, ENETC_SIRSSCAPR);
- si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
+ u32 rss;
+
+ rss = enetc_rd(hw, ENETC_SIRSSCAPR);
+ si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
+
+ if (val & ENETC_SIPCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
@@ -1314,8 +1319,12 @@ static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
static void adjust_link(struct net_device *ndev)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
+ if (priv->active_offloads & ENETC_F_QBV)
+ enetc_sched_speed_set(ndev);
+
phy_print_status(phydev);
}
@@ -1427,8 +1436,7 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
+static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
@@ -1436,9 +1444,6 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
u8 num_tc;
int i;
- if (type != TC_SETUP_QDISC_MQPRIO)
- return -EOPNOTSUPP;
-
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_tc = mqprio->num_tc;
@@ -1483,6 +1488,21 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return 0;
}
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1599,7 +1619,10 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (cmd == SIOCGHWTSTAMP)
return enetc_hwtstamp_get(ndev, rq);
#endif
- return -EINVAL;
+
+ if (!ndev->phydev)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(ndev->phydev, rq, cmd);
}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 541b4e2073fe..7ee0da6d0015 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -118,6 +118,8 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(2),
};
+#define ENETC_SI_F_QBV BIT(0)
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -133,6 +135,7 @@ struct enetc_si {
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ int hw_features;
};
#define ENETC_SI_ALIGN 32
@@ -173,6 +176,7 @@ struct enetc_cls_rule {
enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(0),
ENETC_F_TX_TSTAMP = BIT(1),
+ ENETC_F_QBV = BIT(2),
};
struct enetc_ndev_priv {
@@ -188,6 +192,8 @@ struct enetc_ndev_priv {
u16 msg_enable;
int active_offloads;
+ u32 speed; /* store speed for compare update pspeed */
+
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -244,3 +250,14 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+
+#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
+void enetc_sched_speed_set(struct net_device *ndev);
+int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+#else
+#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
+#define enetc_sched_speed_set(ndev) (void)0
+#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index de466b71bf8f..201cbc362e33 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -32,7 +32,7 @@ static int enetc_cbd_unused(struct enetc_cbdr *r)
r->bd_count;
}
-static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
{
struct enetc_cbdr *ring = &si->cbd_ring;
int timeout = ENETC_CBDR_TIMEOUT;
@@ -66,6 +66,9 @@ static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
if (!timeout)
return -EBUSY;
+ /* CBD may writeback data, feedback up level */
+ *cbd = *dest_cbd;
+
enetc_clean_cbdr(si);
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index fcb52efec075..880a8ed8bb47 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -584,6 +584,31 @@ static int enetc_get_ts_info(struct net_device *ndev,
return 0;
}
+static void enetc_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int enetc_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, wol->wolopts);
+
+ return ret;
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
@@ -601,6 +626,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_ts_info = enetc_get_ts_info,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 88276299f447..51f543ef37a8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -18,6 +18,7 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
@@ -148,6 +149,11 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PORT_BASE 0x10000
#define ENETC_PMR 0x0000
#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8)
+#define ENETC_PMR_PSPEED_10M 0
+#define ENETC_PMR_PSPEED_100M BIT(8)
+#define ENETC_PMR_PSPEED_1000M BIT(9)
+#define ENETC_PMR_PSPEED_2500M BIT(10)
#define ENETC_PSR 0x0004 /* RO */
#define ENETC_PSIPMR 0x0018
#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
@@ -179,6 +185,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_CBSE BIT(31)
+#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
@@ -440,22 +448,6 @@ union enetc_rx_bd {
#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
#define ENETC_MAX_NUM_VFS 2
-struct enetc_cbd {
- union {
- struct {
- __le32 addr[2];
- __le32 opt[4];
- };
- __le32 data[6];
- };
- __le16 index;
- __le16 length;
- u8 cmd;
- u8 cls;
- u8 _res;
- u8 status_flags;
-};
-
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
@@ -554,3 +546,72 @@ static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
val |= ENETC_TBMR_SET_PRIO(prio);
enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
}
+
+enum bdcr_cmd_class {
+ BDCR_CMD_UNSPEC = 0,
+ BDCR_CMD_MAC_FILTER,
+ BDCR_CMD_VLAN_FILTER,
+ BDCR_CMD_RSS,
+ BDCR_CMD_RFS,
+ BDCR_CMD_PORT_GCL,
+ BDCR_CMD_RECV_CLASSIFIER,
+ __BDCR_CMD_MAX_LEN,
+ BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
+};
+
+/* class 5, command 0 */
+struct tgs_gcl_conf {
+ u8 atc; /* init gate value */
+ u8 res[7];
+ struct {
+ u8 res1[4];
+ __le16 acl_len;
+ u8 res2[2];
+ };
+};
+
+/* gate control list entry */
+struct gce {
+ __le32 period;
+ u8 gate;
+ u8 res[3];
+};
+
+/* tgs_gcl_conf address point to this data space */
+struct tgs_gcl_data {
+ __le32 btl;
+ __le32 bth;
+ __le32 ct;
+ __le32 cte;
+ struct gce entry[0];
+};
+
+struct enetc_cbd {
+ union{
+ struct {
+ __le32 addr[2];
+ union {
+ __le32 opt[4];
+ struct tgs_gcl_conf gcl_conf;
+ };
+ }; /* Long format */
+ __le32 data[6];
+ };
+ __le16 index;
+ __le16 length;
+ u8 cmd;
+ u8 cls;
+ u8 _res;
+ u8 status_flags;
+};
+
+#define ENETC_CLK 400000000ULL
+
+/* port time gating control register */
+#define ENETC_QBV_PTGCR_OFFSET 0x11a00
+#define ENETC_QBV_TGE BIT(31)
+#define ENETC_QBV_TGPE BIT(30)
+
+/* Port time gating capability register */
+#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
+#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index b73421c3e25b..e7482d483b28 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -742,6 +742,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->priv_flags |= IFF_UNICAST_FLT;
+ if (si->hw_features & ENETC_SI_F_QBV)
+ priv->active_offloads |= ENETC_F_QBV;
+
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
@@ -784,8 +787,8 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
}
}
- priv->if_mode = of_get_phy_mode(np);
- if ((int)priv->if_mode < 0) {
+ err = of_get_phy_mode(np, &priv->if_mode);
+ if (err) {
dev_err(priv->dev, "missing phy type\n");
of_node_put(priv->phy_node);
if (of_phy_is_fixed_link(np))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
new file mode 100644
index 000000000000..2e99438cb1bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "enetc.h"
+
+#include <net/pkt_sched.h>
+#include <linux/math64.h>
+
+static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
+{
+ return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
+ & ENETC_QBV_MAX_GCL_LEN_MASK;
+}
+
+void enetc_sched_speed_set(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 old_speed = priv->speed;
+ u32 speed, pspeed;
+
+ if (phydev->speed == old_speed)
+ return;
+
+ speed = phydev->speed;
+ switch (speed) {
+ case SPEED_1000:
+ pspeed = ENETC_PMR_PSPEED_1000M;
+ break;
+ case SPEED_2500:
+ pspeed = ENETC_PMR_PSPEED_2500M;
+ break;
+ case SPEED_100:
+ pspeed = ENETC_PMR_PSPEED_100M;
+ break;
+ case SPEED_10:
+ default:
+ pspeed = ENETC_PMR_PSPEED_10M;
+ netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC_PMR,
+ (enetc_port_rd(&priv->si->hw, ENETC_PMR)
+ & (~ENETC_PMR_PSPEED_MASK))
+ | pspeed);
+}
+
+static int enetc_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *admin_conf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct tgs_gcl_conf *gcl_config;
+ struct tgs_gcl_data *gcl_data;
+ struct gce *gce;
+ dma_addr_t dma;
+ u16 data_size;
+ u16 gcl_len;
+ u32 tge;
+ int err;
+ int i;
+
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ return -EINVAL;
+ gcl_len = admin_conf->num_entries;
+
+ tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ if (!admin_conf->enable) {
+ enetc_wr(&priv->si->hw,
+ ENETC_QBV_PTGCR_OFFSET,
+ tge & (~ENETC_QBV_TGE));
+ return 0;
+ }
+
+ if (admin_conf->cycle_time > U32_MAX ||
+ admin_conf->cycle_time_extension > U32_MAX)
+ return -EINVAL;
+
+ /* Configure the (administrative) gate control list using the
+ * control BD descriptor.
+ */
+ gcl_config = &cbd.gcl_conf;
+
+ data_size = struct_size(gcl_data, entry, gcl_len);
+ gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!gcl_data)
+ return -ENOMEM;
+
+ gce = (struct gce *)(gcl_data + 1);
+
+ /* Set all gates open as default */
+ gcl_config->atc = 0xff;
+ gcl_config->acl_len = cpu_to_le16(gcl_len);
+
+ if (!admin_conf->base_time) {
+ gcl_data->btl =
+ cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
+ gcl_data->bth =
+ cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
+ } else {
+ gcl_data->btl =
+ cpu_to_le32(lower_32_bits(admin_conf->base_time));
+ gcl_data->bth =
+ cpu_to_le32(upper_32_bits(admin_conf->base_time));
+ }
+
+ gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
+ gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
+
+ for (i = 0; i < gcl_len; i++) {
+ struct tc_taprio_sched_entry *temp_entry;
+ struct gce *temp_gce = gce + i;
+
+ temp_entry = &admin_conf->entries[i];
+
+ temp_gce->gate = (u8)temp_entry->gate_mask;
+ temp_gce->period = cpu_to_le32(temp_entry->interval);
+ }
+
+ cbd.length = cpu_to_le16(data_size);
+ cbd.status_flags = 0;
+
+ dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
+ data_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(gcl_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+ cbd.cls = BDCR_CMD_PORT_GCL;
+ cbd.status_flags = 0;
+
+ enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
+ tge | ENETC_QBV_TGE);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ enetc_wr(&priv->si->hw,
+ ENETC_QBV_PTGCR_OFFSET,
+ tge & (~ENETC_QBV_TGE));
+
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
+ kfree(gcl_data);
+
+ return err;
+}
+
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_set_bdr_prio(&priv->si->hw,
+ priv->tx_ring[i]->index,
+ taprio->enable ? i : 0);
+
+ err = enetc_setup_taprio(ndev, taprio);
+
+ if (err)
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_set_bdr_prio(&priv->si->hw,
+ priv->tx_ring[i]->index,
+ taprio->enable ? 0 : i);
+
+ return err;
+}
+
+static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
+{
+ return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
+}
+
+static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
+{
+ return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
+}
+
+int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_cbs_qopt_offload *cbs = type_data;
+ u32 port_transmit_rate = priv->speed;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_si *si = priv->si;
+ u32 hi_credit_bit, hi_credit_reg;
+ u32 max_interference_size;
+ u32 port_frame_max_size;
+ u32 tc_max_sized_frame;
+ u8 tc = cbs->queue;
+ u8 prio_top, prio_next;
+ int bw_sum = 0;
+ u8 bw;
+
+ prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
+ prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
+
+ /* Support highest prio and second prio tc in cbs mode */
+ if (tc != prio_top && tc != prio_next)
+ return -EOPNOTSUPP;
+
+ if (!cbs->enable) {
+ /* Make sure the other TC that are numerically
+ * lower than this TC have been disabled.
+ */
+ if (tc == prio_top &&
+ enetc_get_cbs_enable(&si->hw, prio_next)) {
+ dev_err(&ndev->dev,
+ "Disable TC%d before disable TC%d\n",
+ prio_next, tc);
+ return -EINVAL;
+ }
+
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+
+ return 0;
+ }
+
+ if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
+ cbs->idleslope < 0 || cbs->sendslope > 0)
+ return -EOPNOTSUPP;
+
+ port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ bw = cbs->idleslope / (port_transmit_rate * 10UL);
+
+ /* Make sure the other TC that are numerically
+ * higher than this TC have been enabled.
+ */
+ if (tc == prio_next) {
+ if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ dev_err(&ndev->dev,
+ "Enable TC%d first before enable TC%d\n",
+ prio_top, prio_next);
+ return -EINVAL;
+ }
+ bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ }
+
+ if (bw_sum + bw >= 100) {
+ dev_err(&ndev->dev,
+ "The sum of all CBS Bandwidth can't exceed 100\n");
+ return -EINVAL;
+ }
+
+ tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+
+ /* For top prio TC, the max_interfrence_size is maxSizedFrame.
+ *
+ * For next prio TC, the max_interfrence_size is calculated as below:
+ *
+ * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
+ *
+ * - RA: idleSlope for AVB Class A
+ * - R0: port transmit rate
+ * - M0: maximum sized frame for the port
+ * - MA: maximum sized frame for AVB Class A
+ */
+
+ if (tc == prio_top) {
+ max_interference_size = port_frame_max_size * 8;
+ } else {
+ u32 m0, ma, r0, ra;
+
+ m0 = port_frame_max_size * 8;
+ ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ port_transmit_rate * 10000ULL;
+ r0 = port_transmit_rate * 1000000ULL;
+ max_interference_size = m0 + ma +
+ (u32)div_u64((u64)ra * m0, r0 - ra);
+ }
+
+ /* hiCredit bits calculate by:
+ *
+ * maxSizedFrame * (idleSlope/portTxRate)
+ */
+ hi_credit_bit = max_interference_size * bw / 100;
+
+ /* hiCredit bits to hiCredit register need to calculated as:
+ *
+ * (enetClockFrequency / portTransmitRate) * 100
+ */
+ hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ port_transmit_rate * 1000000ULL);
+
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+
+ /* Set bw register and enable this traffic class */
+ enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a9c386b63581..05c1899f6628 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2706,7 +2706,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_tx_queues; q++) {
txq = fep->tx_queue[q];
- bdp = txq->bd.base;
for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
@@ -3394,6 +3393,7 @@ fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
struct fec_platform_data *pdata;
+ phy_interface_t interface;
struct net_device *ndev;
int i, irq, ret = 0;
const struct of_device_id *of_id;
@@ -3466,15 +3466,15 @@ fec_probe(struct platform_device *pdev)
}
fep->phy_node = phy_node;
- ret = of_get_phy_mode(pdev->dev.of_node);
- if (ret < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &interface);
+ if (ret) {
pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
fep->phy_interface = PHY_INTERFACE_MODE_MII;
} else {
- fep->phy_interface = ret;
+ fep->phy_interface = interface;
}
fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -3636,6 +3636,11 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -3643,15 +3648,17 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
free_netdev(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 210749bf1eac..934111def0be 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -634,6 +634,9 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
{
u32 tmp;
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ return;
/* set LIODN base for this port */
tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
if (port_id % 2) {
@@ -644,7 +647,6 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
tmp |= liodn_base << DMA_LIODN_SHIFT;
}
iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
}
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -1942,6 +1944,8 @@ static int fman_init(struct fman *fman)
fman->liodn_offset[i] =
ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ continue;
liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
if (i % 2) {
/* FMDM_PLR LSB holds LIODN base for odd ports */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ee82ee1384eb..87b26f063cc8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -435,7 +435,6 @@ struct fman_port_cfg {
struct fman_port_rx_pools_params {
u8 num_of_pools;
- u16 second_largest_buf_size;
u16 largest_buf_size;
};
@@ -946,8 +945,6 @@ static int set_ext_buffer_pools(struct fman_port *port)
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
port->rx_pools_params.largest_buf_size =
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
- port->rx_pools_params.second_largest_buf_size =
- sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
/* FMBM_RMPD reg. - pool depletion */
if (buf_pool_depletion->pools_grp_mode_enable) {
@@ -1728,6 +1725,20 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+/**
+ * fman_port_get_device
+ * port: Pointer to the FMan port device
+ *
+ * Get the 'struct device' associated to the specified FMan port device
+ *
+ * Return: pointer to associated 'struct device'
+ */
+struct device *fman_port_get_device(struct fman_port *port)
+{
+ return port->dev;
+}
+EXPORT_SYMBOL(fman_port_get_device);
+
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
{
if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 9dbb69f40121..82f12661a46d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -157,4 +157,6 @@ int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
struct fman_port *fman_port_bind(struct device *dev);
+struct device *fman_port_get_device(struct fman_port *port);
+
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7ab8095db192..f0806ace1ae2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -608,7 +608,7 @@ static int mac_probe(struct platform_device *_of_dev)
const u8 *mac_addr;
u32 val;
u8 fman_id;
- int phy_if;
+ phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -776,8 +776,8 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the PHY connection type */
- phy_if = of_get_phy_mode(mac_node);
- if (phy_if < 0) {
+ err = of_get_phy_mode(mac_node, &phy_if);
+ if (err) {
dev_warn(dev,
"of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n",
mac_node);
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index 245d9a68a71f..7f20840fde07 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config FS_ENET
- tristate "Freescale Ethernet Driver"
- depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
- select MII
- select PHYLIB
+ tristate "Freescale Ethernet Driver"
+ depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+ select MII
+ select PHYLIB
config FS_ENET_MPC5121_FEC
def_bool y if (FS_ENET && PPC_MPC512x)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 51ad86417cb1..72868a28b621 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -641,6 +641,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
const char *model;
const void *mac_addr;
int err = 0, i;
+ phy_interface_t interface;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
struct device_node *np = ofdev->dev.of_node;
@@ -805,9 +806,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
* rgmii-id really needs to be specified. Other types can be
* detected by hardware
*/
- err = of_get_phy_mode(np);
- if (err >= 0)
- priv->interface = err;
+ err = of_get_phy_mode(np, &interface);
+ if (!err)
+ priv->interface = interface;
else
priv->interface = gfar_get_interface(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f472a6dbbe6f..432c6a818ae5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -90,11 +90,11 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-/* prevent fragmenation by HW in DSA environments */
-#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
-#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
+#define GFAR_SKBFRAG_OVR (RXBUF_ALIGNMENT \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_SIZE rounddown(GFAR_RXB_TRUESIZE - GFAR_SKBFRAG_OVR, 64)
+#define GFAR_SKBFRAG_SIZE (GFAR_RXB_SIZE + GFAR_SKBFRAG_OVR)
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 0a9a7ee2a866..f4889431f9b7 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
u64 iov_offset, u64 iov_len)
{
+ u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
+ u64 first_page = iov_offset / PAGE_SIZE;
dma_addr_t dma;
- u64 addr;
+ u64 page;
- for (addr = iov_offset; addr < iov_offset + iov_len;
- addr += PAGE_SIZE) {
- dma = page_buses[addr / PAGE_SIZE];
+ for (page = first_page; page <= last_page; page++) {
+ dma = page_buses[page];
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 4606a7e4a6d1..3e9b6d543c77 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -211,7 +211,7 @@ struct hip04_priv {
#if defined(CONFIG_HI13X1_GMAC)
void __iomem *sysctrl_base;
#endif
- int phy_mode;
+ phy_interface_t phy_mode;
int chan;
unsigned int port;
unsigned int group;
@@ -961,10 +961,9 @@ static int hip04_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->phy_mode = of_get_phy_mode(node);
- if (priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
dev_warn(d, "not find phy-mode\n");
- ret = -EINVAL;
goto init_fail;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c41b19c760f8..247de9105d10 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1193,10 +1193,9 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto err_free_mdio;
- priv->phy_mode = of_get_phy_mode(node);
- if ((int)priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
netdev_err(ndev, "not find phy-mode\n");
- ret = -EINVAL;
goto err_mdiobus;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3a14bbc26ea2..1c5243cc1dc6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3049,7 +3049,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
u32 sl;
u32 credit;
int i;
- const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
{DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
{DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
{DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
@@ -3059,7 +3059,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
{DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
{DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
};
- const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index f8a87f8ca983..1b0313900f98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -45,8 +45,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
- HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
@@ -71,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 03ca7d925e8e..eef1b2764d34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
return;
mutex_lock(&hnae3_common_lock);
-
+ /* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
if (client_tmp->type == client->type) {
existed = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a0998937727d..3b5e2d7251e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -130,7 +130,6 @@ enum hnae3_module_type {
HNAE3_MODULE_TYPE_CR = 0x04,
HNAE3_MODULE_TYPE_KR = 0x05,
HNAE3_MODULE_TYPE_TP = 0x06,
-
};
enum hnae3_fec_mode {
@@ -366,6 +365,19 @@ struct hnae3_ae_dev {
* Enable/disable HW GRO
* add_arfs_entry
* Check the 5-tuples of flow, and create flow director rule
+ * get_vf_config
+ * Get the VF configuration setting by the host
+ * set_vf_link_state
+ * Set VF link status
+ * set_vf_spoofchk
+ * Enable/disable spoof check for specified vf
+ * set_vf_trust
+ * Enable/disable trust for specified vf, if the vf being trusted, then
+ * it can enable promisc mode
+ * set_vf_rate
+ * Set the max tx rate of specified vf.
+ * set_vf_mac
+ * Configure the default MAC for specified VF
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -531,6 +543,16 @@ struct hnae3_ae_ops {
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
void (*restore_vlan_table)(struct hnae3_handle *handle);
+ int (*get_vf_config)(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf);
+ int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
+ int link_state);
+ int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
+ bool enable);
+ int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
+ int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force);
+ int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
};
struct hnae3_dcb_ops {
@@ -553,7 +575,8 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table;
};
-#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16)
+#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
+#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
#define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 28961a68e333..6b328a259efc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
u32 value, i = 0;
int cnt;
- if (!priv->ring_data) {
- dev_err(&h->pdev->dev, "ring_data is NULL\n");
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT;
}
@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
return -EINVAL;
}
- ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -54,73 +52,73 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
- ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+ dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
- ring = ring_data[i].ring;
+ ring = &priv->ring[i];
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+ dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+ dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
value);
}
@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
static int hns3_dbg_queue_map(struct hnae3_handle *h)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
int i;
if (!h->ae_algo->ops->get_global_queue_id)
@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
u16 global_qid;
global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
- ring_data = &priv->ring_data[i];
- if (!ring_data || !ring_data->ring ||
- !ring_data->ring->tqp_vector)
+ if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
dev_info(&h->pdev->dev,
" %4d %4d %4d\n",
- i, global_qid,
- ring_data->ring->tqp_vector->vector_irq);
+ i, global_qid, priv->ring[i].tqp_vector->vector_irq);
}
return 0;
@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
struct hns3_desc *rx_desc, *tx_desc;
struct device *dev = &h->pdev->dev;
struct hns3_enet_ring *ring;
@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL;
}
- ring_data = priv->ring_data;
- ring = ring_data[q_num].ring;
+ ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
@@ -198,23 +190,26 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(tx_desc->addr);
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX)addr: %pad\n", &addr);
- dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
- dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
+ dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
+ dev_info(dev, "(TX)send_size: %u\n",
+ le16_to_cpu(tx_desc->tx.send_size));
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
- dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
- dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
+ dev_info(dev, "(TX)vlan_tag: %u\n",
+ le16_to_cpu(tx_desc->tx.outer_vlan_tag));
+ dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
- dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
- dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
- dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
+ dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
+ dev_info(dev, "(TX)vld_ra_ri: %u\n",
+ le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
+ dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
- ring = ring_data[q_num + h->kinfo.num_tqps].ring;
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index];
@@ -222,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: %pad\n", &addr);
- dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
- dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
- dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
- dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
- dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
- dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
- dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
- dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
- dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
+ dev_info(dev, "(RX)l234_info: %u\n",
+ le32_to_cpu(rx_desc->rx.l234_info));
+ dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
+ dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
+ dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
+ dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
+ dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
+ dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
+ le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
+ dev_info(dev, "(RX)ot_vlan_tag: %u\n",
+ le16_to_cpu(rx_desc->rx.ot_vlan_tag));
+ dev_info(dev, "(RX)bd_base_info: %u\n",
+ le32_to_cpu(rx_desc->rx.bd_base_info));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 616cad0faa21..ba0536802b13 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -483,7 +483,7 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
for (i = 0; i < h->kinfo.num_tqps; i++) {
dev_queue = netdev_get_tx_queue(ndev,
- priv->ring_data[i].queue_index);
+ priv->ring[i].queue_index);
netdev_tx_reset_queue(dev_queue);
}
}
@@ -681,7 +681,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
return 0;
ret = skb_cow_head(skb, 0);
- if (unlikely(ret))
+ if (unlikely(ret < 0))
return ret;
l3.hdr = skb_network_header(skb);
@@ -962,14 +962,6 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
-static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
-{
- /* Config bd buffer end */
- if (!!frag_end)
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
-}
-
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
@@ -1062,7 +1054,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l4_proto_err++;
u64_stats_update_end(&ring->syncp);
@@ -1072,7 +1064,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l2l3l4_err++;
u64_stats_update_end(&ring->syncp);
@@ -1081,7 +1073,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_tso_err++;
u64_stats_update_end(&ring->syncp);
@@ -1102,9 +1094,10 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- unsigned int size, int frag_end,
- enum hns_desc_type type)
+ unsigned int size, enum hns_desc_type type)
{
+#define HNS3_LIKELY_BD_NUM 1
+
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
@@ -1118,7 +1111,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret))
+ if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1137,19 +1130,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
- u16 bdtp_fe_sc_vld_ra_ri = 0;
-
desc_cb->priv = priv;
desc_cb->dma = dma;
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16(size);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
desc->tx.bdtp_fe_sc_vld_ra_ri =
- cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
ring_ptr_move_fw(ring, next_to_use);
- return 0;
+ return HNS3_LIKELY_BD_NUM;
}
frag_buf_num = hns3_tx_bd_count(size);
@@ -1158,8 +1148,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- u16 bdtp_fe_sc_vld_ra_ri = 0;
-
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1170,11 +1158,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
- frag_end && (k == frag_buf_num - 1) ?
- 1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
- cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
@@ -1183,23 +1168,78 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc = &ring->desc[ring->next_to_use];
}
- return 0;
+ return frag_buf_num;
}
-static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
{
- unsigned int bd_num;
+ unsigned int size;
int i;
- /* if the total len is within the max bd limit */
- if (likely(skb->len <= HNS3_MAX_BD_SIZE))
- return skb_shinfo(skb)->nr_frags + 1;
+ size = skb_headlen(skb);
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
- bd_num = hns3_tx_bd_count(skb_headlen(skb));
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ if (size) {
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- bd_num += hns3_tx_bd_count(skb_frag_size(frag));
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
+{
+ struct sk_buff *frag_skb;
+ unsigned int bd_num = 0;
+
+ /* If the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
+ return skb_shinfo(skb)->nr_frags + 1U;
+
+ /* The below case will always be linearized, return
+ * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
+ */
+ if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
+ return HNS3_MAX_TSO_BD_NUM + 1U;
+
+ bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+
+ if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+
+ skb_walk_frags(skb, frag_skb) {
+ bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
}
return bd_num;
@@ -1218,26 +1258,26 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
* 7 frags to to be larger than gso header len + mss, and the remaining
* continuous 7 frags to be larger than MSS except the last 7 frags.
*/
-static bool hns3_skb_need_linearized(struct sk_buff *skb)
+static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
{
- int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
- for (i = 0; i < bd_limit; i++)
- tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
+ tot_len += bd_size[i];
- /* ensure headlen + the first 7 frags is greater than mss + header
- * and the first 7 frags is greater than mss.
- */
- if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
- hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ /* ensure the first 8 frags is greater than mss + header */
+ if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
+ skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
return true;
- /* ensure the remaining continuous 7 buffer is greater than mss */
- for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
- tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
- tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+ /* ensure every continuous 7 buffer is greater than mss
+ * except the last one.
+ */
+ for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
+ tot_len -= bd_size[i];
+ tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];
if (tot_len < skb_shinfo(skb)->gso_size)
return true;
@@ -1249,15 +1289,16 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb)
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
+ unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
struct sk_buff *skb = *out_skb;
unsigned int bd_num;
- bd_num = hns3_nic_bd_num(skb);
- if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
+ bd_num = hns3_tx_bd_num(skb, bd_size);
+ if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
- !hns3_skb_need_linearized(skb))
+ if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ !hns3_skb_need_linearized(skb, bd_size, bd_num))
goto out;
/* manual split the send packet */
@@ -1267,9 +1308,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
- bd_num = hns3_nic_bd_num(new_skb);
- if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
- (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ bd_num = hns3_tx_bd_count(new_skb->len);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
+ (!skb_is_gso(new_skb) &&
+ bd_num > HNS3_MAX_NON_TSO_BD_NUM))
return -ENOMEM;
u64_stats_update_begin(&ring->syncp);
@@ -1314,73 +1356,98 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
}
}
+static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, enum hns_desc_type type)
+{
+ unsigned int size = skb_headlen(skb);
+ int i, ret, bd_num = 0;
+
+ if (size) {
+ ret = hns3_fill_desc(ring, skb, size, type);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ return bd_num;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_nic_ring_data *ring_data =
- &tx_ring_data(priv, skb->queue_mapping);
- struct hns3_enet_ring *ring = ring_data->ring;
+ struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct netdev_queue *dev_queue;
- skb_frag_t *frag;
- int next_to_use_head;
- int buf_num;
- int seg_num;
- int size;
+ int pre_ntu, next_to_use_head;
+ struct sk_buff *frag_skb;
+ int bd_num = 0;
int ret;
- int i;
/* Prefetch the data used later */
prefetch(skb->data);
- buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
- if (unlikely(buf_num <= 0)) {
- if (buf_num == -EBUSY) {
+ ret = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(ret <= 0)) {
+ if (ret == -EBUSY) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
goto out_net_tx_busy;
- } else if (buf_num == -ENOMEM) {
+ } else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
}
- hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
+ hns3_rl_err(netdev, "xmit error: %d!\n", ret);
goto out_err_tx_ok;
}
- /* No. of segments (plus a header) */
- seg_num = skb_shinfo(skb)->nr_frags + 1;
- /* Fill the first part */
- size = skb_headlen(skb);
-
next_to_use_head = ring->next_to_use;
- ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
- DESC_TYPE_SKB);
- if (unlikely(ret))
+ ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+ if (unlikely(ret < 0))
goto fill_err;
- /* Fill the fragments */
- for (i = 1; i < seg_num; i++) {
- frag = &skb_shinfo(skb)->frags[i - 1];
- size = skb_frag_size(frag);
+ bd_num += ret;
- ret = hns3_fill_desc(ring, frag, size,
- seg_num - 1 == i ? 1 : 0,
- DESC_TYPE_PAGE);
+ if (!skb_has_frag_list(skb))
+ goto out;
- if (unlikely(ret))
+ skb_walk_frags(skb, frag_skb) {
+ ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
goto fill_err;
+
+ bd_num += ret;
}
+out:
+ pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ (ring->desc_num - 1);
+ ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+ cpu_to_le16(BIT(HNS3_TXD_FE_B));
/* Complete translate all packets */
- dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
+ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
netdev_tx_sent_queue(dev_queue, skb->len);
wmb(); /* Commit all data before submit */
- hnae3_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, bd_num);
return NETDEV_TX_OK;
@@ -1392,7 +1459,7 @@ out_err_tx_ok:
return NETDEV_TX_OK;
out_net_tx_busy:
- netif_stop_subqueue(netdev, ring_data->queue_index);
+ netif_stop_subqueue(netdev, ring->queue_index);
smp_mb(); /* Commit all data before submit */
return NETDEV_TX_BUSY;
@@ -1413,6 +1480,16 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+ /* For VF device, if there is a perm_addr, then the user will not
+ * be allowed to change the address.
+ */
+ if (!hns3_is_phys_func(h->pdev) &&
+ !is_zero_ether_addr(netdev->perm_addr)) {
+ netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
+ netdev->perm_addr, mac_addr->sa_data);
+ return -EPERM;
+ }
+
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
if (ret) {
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1505,7 +1582,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
- ring = priv->ring_data[idx].ring;
+ ring = &priv->ring[idx];
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
@@ -1523,7 +1600,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
- ring = priv->ring_data[idx + queue_num].ring;
+ ring = &priv->ring[idx + queue_num];
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
@@ -1633,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
int ret = -EIO;
netif_dbg(h, drv, netdev,
- "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
- vf, vlan, qos, vlan_proto);
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
+ vf, vlan, qos, ntohs(vlan_proto));
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
@@ -1643,6 +1720,29 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
return ret;
}
+static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!handle->ae_algo->ops->set_vf_spoofchk)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
+}
+
+static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (!handle->ae_algo->ops->set_vf_trust)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
+}
+
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -1671,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hns3_enet_ring *tx_ring = NULL;
+ struct hns3_enet_ring *tx_ring;
struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
@@ -1692,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
timeout_queue = i;
+ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
+ q->state,
+ jiffies_to_msecs(jiffies - trans_start));
break;
}
}
@@ -1705,7 +1808,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
priv->tx_timeout_count++;
- tx_ring = priv->ring_data[timeout_queue].ring;
+ tx_ring = &priv->ring[timeout_queue];
napi = &tx_ring->tqp_vector->napi;
netdev_info(ndev,
@@ -1805,6 +1908,57 @@ static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
#endif
+static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->get_vf_config)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_vf_config(h, vf, ivf);
+}
+
+static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
+ int link_state)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_link_state)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
+}
+
+static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_rate)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
+ false);
+}
+
+static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->set_vf_mac)
+ return -EOPNOTSUPP;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev,
+ "Invalid MAC:%pM specified. Could not set MAC\n",
+ mac);
+ return -EINVAL;
+ }
+
+ return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -1820,10 +1974,15 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+ .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
+ .ndo_set_vf_trust = hns3_set_vf_trust,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = hns3_rx_flow_steer,
#endif
-
+ .ndo_get_vf_config = hns3_nic_get_vf_config,
+ .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
+ .ndo_set_vf_rate = hns3_nic_set_vf_rate,
+ .ndo_set_vf_mac = hns3_nic_set_vf_mac,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -1843,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
return false;
default:
- dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+ dev_warn(&pdev->dev, "un-recognized pci device-id %u",
dev_id);
}
@@ -2069,9 +2228,8 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
-
- netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
@@ -2081,21 +2239,24 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
netdev->vlan_features |=
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_GRO_HW;
@@ -2320,18 +2481,19 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
int head;
head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean)
return; /* no data to poll */
+ rmb(); /* Make sure head is ready before touch any data */
+
if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2358,7 +2520,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
netdev_tx_completed_queue(dev_queue, pkts, bytes);
if (unlikely(pkts && netif_carrier_ok(netdev) &&
- (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
+ ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -2401,7 +2563,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- hns3_rl_err(ring->tqp_vector->napi.dev,
+ hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
break;
@@ -2510,7 +2672,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
int l3_type, l4_type;
int ol4_type;
@@ -2626,7 +2788,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
{
#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
@@ -2672,10 +2834,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
}
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
- struct sk_buff **out_skb, bool pending)
+ bool pending)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *skb = ring->skb;
+ struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc;
@@ -2704,10 +2866,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return -ENXIO;
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
- new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
if (unlikely(!new_skb)) {
- hns3_rl_err(ring->tqp_vector->napi.dev,
+ hns3_rl_err(ring_to_netdev(ring),
"alloc rx fraglist skb fail\n");
return -ENXIO;
}
@@ -2783,7 +2944,7 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
enum hns3_pkt_l2t_type l2_frame_type;
u32 bd_base_info, l234info, ol_info;
struct hns3_desc *desc;
@@ -2858,8 +3019,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
return 0;
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
@@ -2897,12 +3057,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
- *out_skb = skb = ring->skb;
+ skb = ring->skb;
if (ret < 0) /* alloc buffer fail */
return ret;
if (ret > 0) { /* need add frag */
- ret = hns3_add_frag(ring, desc, &skb, false);
+ ret = hns3_add_frag(ring, desc, false);
if (ret)
return ret;
@@ -2913,7 +3073,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
} else {
- ret = hns3_add_frag(ring, desc, &skb, true);
+ ret = hns3_add_frag(ring, desc, true);
if (ret)
return ret;
@@ -2931,8 +3091,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
skb_record_rx_queue(skb, ring->tqp->tqp_index);
- *out_skb = skb;
-
return 0;
}
@@ -2941,17 +3099,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = ring->skb;
int recv_pkts = 0;
int recv_bds = 0;
int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
- rmb(); /* Make sure num taken effect before the other data is touched */
-
num -= unused_count;
unused_count -= ring->pending_buf;
+ if (num <= 0)
+ goto out;
+
+ rmb(); /* Make sure num taken effect before the other data is touched */
+
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
@@ -2961,27 +3121,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb);
- if (unlikely(!skb)) /* This fault cannot be repaired */
- goto out;
-
- if (err == -ENXIO) { /* Do not get FE for the packet */
+ err = hns3_handle_rx_bd(ring);
+ /* Do not get FE for the packet or failed to alloc skb */
+ if (unlikely(!ring->skb || err == -ENXIO)) {
goto out;
- } else if (unlikely(err)) { /* Do jump the err */
- recv_bds += ring->pending_buf;
- unused_count += ring->pending_buf;
- ring->skb = NULL;
- ring->pending_buf = 0;
- continue;
+ } else if (likely(!err)) {
+ rx_fn(ring, ring->skb);
+ recv_pkts++;
}
- rx_fn(ring, skb);
recv_bds += ring->pending_buf;
unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
-
- recv_pkts++;
}
out:
@@ -3324,13 +3476,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[vector_i];
hns3_add_ring_to_group(&tqp_vector->tx_group,
- priv->ring_data[i].ring);
+ &priv->ring[i]);
hns3_add_ring_to_group(&tqp_vector->rx_group,
- priv->ring_data[i + tqp_num].ring);
+ &priv->ring[i + tqp_num]);
- priv->ring_data[i].ring->tqp_vector = tqp_vector;
- priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
+ priv->ring[i].tqp_vector = tqp_vector;
+ priv->ring[i + tqp_num].tqp_vector = tqp_vector;
tqp_vector->num_tqps++;
}
@@ -3474,28 +3626,22 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
return 0;
}
-static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
- unsigned int ring_type)
+static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+ unsigned int ring_type)
{
- struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
- struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring;
int desc_num;
- ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return -ENOMEM;
-
if (ring_type == HNAE3_RING_TYPE_TX) {
+ ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
- ring_data[q->tqp_index].ring = ring;
- ring_data[q->tqp_index].queue_index = q->tqp_index;
+ ring->queue_index = q->tqp_index;
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
+ ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
- ring_data[q->tqp_index + queue_num].ring = ring;
- ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
+ ring->queue_index = q->tqp_index;
ring->io_base = q->io_base;
}
@@ -3510,76 +3656,41 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
-
- return 0;
}
-static int hns3_queue_to_ring(struct hnae3_queue *tqp,
- struct hns3_nic_priv *priv)
+static void hns3_queue_to_ring(struct hnae3_queue *tqp,
+ struct hns3_nic_priv *priv)
{
- int ret;
-
- ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
- if (ret)
- return ret;
-
- ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
- if (ret) {
- devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
- return ret;
- }
-
- return 0;
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
}
static int hns3_get_ring_config(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
- int i, ret;
+ int i;
- priv->ring_data = devm_kzalloc(&pdev->dev,
- array3_size(h->kinfo.num_tqps,
- sizeof(*priv->ring_data),
- 2),
- GFP_KERNEL);
- if (!priv->ring_data)
+ priv->ring = devm_kzalloc(&pdev->dev,
+ array3_size(h->kinfo.num_tqps,
+ sizeof(*priv->ring), 2),
+ GFP_KERNEL);
+ if (!priv->ring)
return -ENOMEM;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
- if (ret)
- goto err;
- }
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_queue_to_ring(h->kinfo.tqp[i], priv);
return 0;
-err:
- while (i--) {
- devm_kfree(priv->dev, priv->ring_data[i].ring);
- devm_kfree(priv->dev,
- priv->ring_data[i + h->kinfo.num_tqps].ring);
- }
-
- devm_kfree(&pdev->dev, priv->ring_data);
- priv->ring_data = NULL;
- return ret;
}
static void hns3_put_ring_config(struct hns3_nic_priv *priv)
{
- struct hnae3_handle *h = priv->ae_handle;
- int i;
-
- if (!priv->ring_data)
+ if (!priv->ring)
return;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- devm_kfree(priv->dev, priv->ring_data[i].ring);
- devm_kfree(priv->dev,
- priv->ring_data[i + h->kinfo.num_tqps].ring);
- }
- devm_kfree(priv->dev, priv->ring_data);
- priv->ring_data = NULL;
+ devm_kfree(priv->dev, priv->ring);
+ priv->ring = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
@@ -3696,7 +3807,7 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
for (j = 0; j < tc_info->tqp_count; j++) {
struct hnae3_queue *q;
- q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ q = priv->ring[tc_info->tqp_offset + j].tqp;
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
tc_info->tc);
}
@@ -3711,21 +3822,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int ret;
for (i = 0; i < ring_num; i++) {
- ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
+ ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
dev_err(priv->dev,
"Alloc ring memory fail! ret=%d\n", ret);
goto out_when_alloc_ring_memory;
}
- u64_stats_init(&priv->ring_data[i].ring->syncp);
+ u64_stats_init(&priv->ring[i].syncp);
}
return 0;
out_when_alloc_ring_memory:
for (j = i - 1; j >= 0; j--)
- hns3_fini_ring(priv->ring_data[j].ring);
+ hns3_fini_ring(&priv->ring[j]);
return -ENOMEM;
}
@@ -3736,30 +3847,31 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
int i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- hns3_fini_ring(priv->ring_data[i].ring);
- hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ hns3_fini_ring(&priv->ring[i]);
+ hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
}
return 0;
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static int hns3_init_mac_addr(struct net_device *netdev, bool init)
+static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
- if (h->ae_algo->ops->get_mac_addr && init) {
+ if (h->ae_algo->ops->get_mac_addr)
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
- }
/* Check if the MAC address is valid, if not get a random one */
- if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
+ } else {
+ ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ ether_addr_copy(netdev->perm_addr, mac_addr_temp);
}
if (h->ae_algo->ops->set_mac_addr)
@@ -3827,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
- dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
- dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
- dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
- dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
- dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
- dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
- dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
- dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
}
static int hns3_client_init(struct hnae3_handle *handle)
@@ -3863,7 +3975,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
- hns3_init_mac_addr(netdev, true);
+ hns3_init_mac_addr(netdev);
hns3_set_default_feature(netdev);
@@ -3897,7 +4009,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_init_all_ring(priv);
if (ret) {
ret = -ENOMEM;
- goto out_init_ring_data;
+ goto out_init_ring;
}
ret = hns3_init_phy(netdev);
@@ -3936,12 +4048,12 @@ out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
-out_init_ring_data:
+out_init_ring:
hns3_nic_uninit_vector_data(priv);
out_init_vector_data:
hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
- priv->ring_data = NULL;
+ priv->ring = NULL;
out_get_ring_cfg:
priv->ae_handle = NULL;
free_netdev(netdev);
@@ -4102,7 +4214,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
- netdev_warn(ring->tqp->handle->kinfo.netdev,
+ netdev_warn(ring_to_netdev(ring),
"reserve buffer map failed, ret = %d\n",
ret);
return ret;
@@ -4148,10 +4260,10 @@ static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
for (i = 0; i < h->kinfo.num_tqps; i++) {
struct hns3_enet_ring *ring;
- ring = priv->ring_data[i].ring;
+ ring = &priv->ring[i];
hns3_clear_tx_ring(ring);
- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ ring = &priv->ring[i + h->kinfo.num_tqps];
/* Continue to clear other rings even if clearing some
* rings failed.
*/
@@ -4175,16 +4287,16 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
if (ret)
return ret;
- hns3_init_ring_hw(priv->ring_data[i].ring);
+ hns3_init_ring_hw(&priv->ring[i]);
/* We need to clear tx ring here because self test will
* use the ring and will not run down before up
*/
- hns3_clear_tx_ring(priv->ring_data[i].ring);
- priv->ring_data[i].ring->next_to_clean = 0;
- priv->ring_data[i].ring->next_to_use = 0;
+ hns3_clear_tx_ring(&priv->ring[i]);
+ priv->ring[i].next_to_clean = 0;
+ priv->ring[i].next_to_use = 0;
- rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring);
ret = hns3_clear_rx_ring(rx_ring);
if (ret)
@@ -4331,7 +4443,7 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
bool vlan_filter_enable;
int ret;
- ret = hns3_init_mac_addr(netdev, false);
+ ret = hns3_init_mac_addr(netdev);
if (ret)
return ret;
@@ -4454,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev,
if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < 1) {
dev_err(&netdev->dev,
- "Change tqps fail, the tqp range is from 1 to %d",
+ "Change tqps fail, the tqp range is from 1 to %u",
hns3_get_max_available_channels(h));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 5d468ed404a6..9d47abd5c37c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -76,7 +76,7 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32760
-#define HNS3_RING_MIN_PENDING 24
+#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
@@ -186,7 +186,7 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
-#define HNS3_TX_LAST_SIZE_M 0xffff
+#define HNS3_TX_LAST_SIZE_M 0xffff
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
@@ -195,9 +195,13 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_NUM_NORMAL 8
-#define HNS3_MAX_BD_NUM_TSO 63
-#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
+#define HNS3_MAX_NON_TSO_BD_NUM 8U
+#define HNS3_MAX_TSO_BD_NUM 63U
+#define HNS3_MAX_TSO_SIZE \
+ (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+
+#define HNS3_MAX_NON_TSO_SIZE \
+ (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -309,7 +313,7 @@ struct hns3_desc_cb {
u16 reuse_flag;
- /* desc type, used by the ring user to mark the type of the priv data */
+ /* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
@@ -405,6 +409,7 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp;
+ int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */
@@ -430,18 +435,7 @@ struct hns3_enet_ring {
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
-};
-
-struct hns_queue;
-
-struct hns3_nic_ring_data {
- struct hns3_enet_ring *ring;
- struct napi_struct napi;
- int queue_index;
- int (*poll_one)(struct hns3_nic_ring_data *, int, void *);
- void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *);
- void (*fini_process)(struct hns3_nic_ring_data *);
-};
+} ____cacheline_internodealigned_in_smp;
enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0,
@@ -518,7 +512,7 @@ struct hns3_nic_priv {
* the cb for nic to manage the ring buffer, the first half of the
* array is for tx_ring and vice versa for the second half
*/
- struct hns3_nic_ring_data *ring_data;
+ struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
@@ -613,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define ring_to_dev(ring) ((ring)->dev)
+#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
+
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
-#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-
#define hns3_buf_size(_ring) ((_ring)->buf_size)
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 680c3508876d..6e0212b79438 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
-struct hns3_link_mode_mapping {
- u32 hns3_link_mode;
- u32 ethtool_link_mode;
-};
-
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -203,7 +198,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
kinfo = &h->kinfo;
for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring *ring = &priv->ring[i];
struct hns3_enet_ring_group *rx_group;
u64 pre_rx_pkt;
@@ -226,7 +221,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
u32 i;
for (i = start_ringid; i <= end_ringid; i++) {
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring *ring = &priv->ring[i];
hns3_clean_tx_ring(ring);
}
@@ -491,7 +486,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
- ring = nic_priv->ring_data[i].ring;
+ ring = &nic_priv->ring[i];
for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
@@ -500,7 +495,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
- ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
@@ -603,8 +598,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
- param->tx_pending = priv->ring_data[0].ring->desc_num;
- param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
+ param->tx_pending = priv->ring[0].desc_num;
+ param->rx_pending = priv->ring[queue_num].desc_num;
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -906,9 +901,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
h->kinfo.num_rx_desc = rx_desc_num;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- priv->ring_data[i].ring->desc_num = tx_desc_num;
- priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
- rx_desc_num;
+ priv->ring[i].desc_num = tx_desc_num;
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
}
}
@@ -924,7 +918,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
return NULL;
for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
- memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ memcpy(&tmp_rings[i], &priv->ring[i],
sizeof(struct hns3_enet_ring));
tmp_rings[i].skb = NULL;
}
@@ -972,8 +966,8 @@ static int hns3_set_ringparam(struct net_device *ndev,
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring_data[0].ring->desc_num;
- old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
+ old_tx_desc_num = priv->ring[0].desc_num;
+ old_rx_desc_num = priv->ring[queue_num].desc_num;
if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num)
return 0;
@@ -986,7 +980,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
old_tx_desc_num, old_rx_desc_num,
new_tx_desc_num, new_rx_desc_num);
@@ -1002,7 +996,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
- memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
} else {
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
@@ -1098,13 +1092,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
if (queue >= queue_num) {
netdev_err(netdev,
- "Invalid queue value %d! Queue max id=%d\n",
+ "Invalid queue value %u! Queue max id=%u\n",
queue, queue_num - 1);
return -EINVAL;
}
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce =
tx_vector->tx_group.coal.gl_adapt_enable;
@@ -1148,14 +1142,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev,
- "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->rx_coalesce_usecs, rx_gl);
}
tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
if (tx_gl != cmd->tx_coalesce_usecs) {
netdev_info(netdev,
- "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->tx_coalesce_usecs, tx_gl);
}
@@ -1183,7 +1177,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
if (rl != cmd->rx_coalesce_usecs_high) {
netdev_info(netdev,
- "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
cmd->rx_coalesce_usecs_high, rl);
}
@@ -1212,7 +1206,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev,
- "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
cmd->use_adaptive_tx_coalesce,
cmd->use_adaptive_rx_coalesce);
}
@@ -1229,8 +1223,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
tx_vector->tx_group.coal.gl_adapt_enable =
cmd->use_adaptive_tx_coalesce;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ecf58cfd253d..940ead3970d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
@@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
} while (timeout < hw->cmq.tx_timeout);
}
- if (!complete) {
+ if (!complete)
retval = -EBADE;
- } else {
+ else
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
- }
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 1426eb5ddf3d..d97da67f07a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -5,8 +5,10 @@
#define __HCLGE_CMD_H
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000
+#define HCLGE_DESC_DATA_LEN 6
struct hclge_dev;
struct hclge_desc {
@@ -18,7 +20,7 @@ struct hclge_desc {
__le16 flag;
__le16 retval;
__le16 rsv;
- __le32 data[6];
+ __le32 data[HCLGE_DESC_DATA_LEN];
};
struct hclge_cmq_ring {
@@ -244,7 +246,7 @@ enum hclge_opcode_type {
/* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
@@ -259,6 +261,7 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
@@ -428,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd {
#define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD 4
+
struct hclge_func_status_cmd {
- __le32 vf_rst_state[4];
+ __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
u8 pf_state;
u8 mac_id;
u8 rsv1;
@@ -485,10 +490,12 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+#define HCLGE_CFG_CMD_CNT 4
+
struct hclge_cfg_param_cmd {
__le32 offset;
__le32 rsv;
- __le32 param[4];
+ __le32 param[HCLGE_CFG_CMD_CNT];
};
#define HCLGE_MAC_MODE 0x0
@@ -712,8 +719,7 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- __le32 mac_addr_hi32;
- __le16 mac_addr_lo16;
+ u8 mac_addr[ETH_ALEN];
__le16 rsv1;
__le16 ethter_type;
__le16 egress_port;
@@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd {
u8 rsv2[19];
};
+#define HCLGE_VLAN_ID_OFFSET_STEP 160
+#define HCLGE_VLAN_BYTE_SIZE 8
+#define HCLGE_VLAN_OFFSET_BITMAP \
+ (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset;
u8 vlan_cfg;
u8 rsv[2];
- u8 vlan_offset_bitmap[20];
+ u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
};
+#define HCLGE_MAX_VF_BYTES 16
+
struct hclge_vlan_filter_vf_cfg_cmd {
__le16 vlan_id;
u8 resp_code;
u8 rsv;
u8 vlan_cfg;
u8 rsv1[3];
- u8 vf_bitmap[16];
+ u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
@@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
@@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd {
u8 rsv1[2];
__le16 def_vlan_tag1;
__le16 def_vlan_tag2;
- u8 vf_bitmap[8];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
@@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
u8 rsv1[6];
- u8 vf_bitmap[8];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
@@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -1090,9 +1104,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read);
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param);
-
enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
struct hclge_desc *desc);
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index c063301d6060..d6c3952aba04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (prio_tc[i] >= num_tc) {
dev_err(&hdev->pdev->dev,
- "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+ "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
i, prio_tc[i], num_tc);
return -EINVAL;
}
@@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (ret)
return ret;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
+ int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
@@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hclge_tm_pfc_info_update(hdev);
- return hclge_pause_setup_hw(hdev, false);
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret) {
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
+ }
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
/* DCBX configuration */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index d0128d792717..112df34b3869 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -145,7 +145,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
+ buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src) {
dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
@@ -153,7 +153,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
if (ret) {
kfree(desc_src);
return;
@@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
- desc->data[i % entries_per_desc]);
+ le32_to_cpu(desc->data[i % entries_per_desc]));
dfx_message++;
}
@@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
if (ret)
return;
- dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return;
- dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return;
- dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
- dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
- dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
- dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
- dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
- dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
+ dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
+ le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "tx_private_waterline: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
+ dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_CNT);
if (ret)
return;
- dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_STS_1);
if (ret)
return;
- dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
- dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
- dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
- dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
+ dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[5]));
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
- pg_shap_cfg_cmd->pg_shapping_para);
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
- pg_shap_cfg_cmd->pg_shapping_para);
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PORT_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
- port_shap_cfg_cmd->port_shapping_para);
+ le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
+ le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
+ le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
+ le32_to_cpu(desc.data[0]));
if (!hnae3_dev_dcb_supported(hdev)) {
dev_info(&hdev->pdev->dev,
@@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
- bp_to_qs_map_cmd->qs_bit_map);
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
return;
err_tm_pg_cmd_send:
@@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
- qs_to_pri_map->qs_id);
+ le16_to_cpu(qs_to_pri_map->qs_id));
dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
qs_to_pri_map->priority);
dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
@@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send;
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
+ le16_to_cpu(nq_to_qs_map->nq_id));
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
- nq_to_qs_map->qset_id);
+ le16_to_cpu(nq_to_qs_map->qset_id));
cmd = HCLGE_OPC_TM_PG_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send;
qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
+ le16_to_cpu(qs_weight->qs_id));
dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
cmd = HCLGE_OPC_TM_PRI_WEIGHT;
@@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
- shap_cfg_cmd->pri_shapping_para);
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
- shap_cfg_cmd->pri_shapping_para);
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
hclge_dbg_dump_tm_pg(hdev);
@@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
pause_param->pause_trans_gap);
dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
- pause_param->pause_trans_time);
+ le16_to_cpu(pause_param->pause_trans_time));
}
static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
@@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
- tx_buf_cmd->tx_pkt_buff[i]);
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
- rx_buf_cmd->buf_num[i]);
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
- rx_buf_cmd->shared_buf);
+ le16_to_cpu(rx_buf_cmd->shared_buf));
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
dev_info(&hdev->pdev->dev, "\n");
if (!hnae3_dev_dcb_supported(hdev)) {
@@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
- rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
@@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- rx_com_thrd->com_thrd[i].high,
- rx_com_thrd->com_thrd[i].low);
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
- rx_com_thrd->com_thrd[i].high,
- rx_com_thrd->com_thrd[i].low);
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
return;
err_qos_cmd_send:
@@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ le16_to_cpu(req0->index),
+ req0->mac_addr[0], req0->mac_addr[1],
req0->mac_addr[2], req0->mac_addr[3],
req0->mac_addr[4], req0->mac_addr[5]);
@@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
}
}
-static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
{
dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
hdev->rst_stats.pf_rst_cnt);
@@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.hw_reset_done_cnt);
dev_info(&hdev->pdev->dev, "reset count: %u\n",
hdev->rst_stats.reset_cnt);
- dev_info(&hdev->pdev->dev, "reset count: %u\n",
- hdev->rst_stats.reset_cnt);
dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt);
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
@@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
@@ -1110,6 +1123,82 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
}
}
+static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ u8 ir_u, ir_b, ir_s, bs_b, bs_s;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "qs%u failed to get tx_rate, ret=%d\n",
+ qsid, ret);
+ return;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ bs_s = hclge_tm_get_field(shapping_para, BS_S);
+
+ dev_info(&hdev->pdev->dev,
+ "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
+ qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
+}
+
+static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+{
+ struct hnae3_knic_private_info *kinfo;
+ struct hclge_vport *vport;
+ int vport_id, i;
+
+ for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
+ vport = &hdev->vport[vport_id];
+ kinfo = &vport->nic.kinfo;
+
+ dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ u16 qsid = vport->qs_offset + i;
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+ }
+ }
+}
+
+static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+#define HCLGE_MAX_QSET_NUM 1024
+
+ u16 qsid;
+ int ret;
+
+ ret = kstrtou16(cmd_buf, 0, &qsid);
+ if (ret) {
+ hclge_dbg_dump_qs_shaper_all(hdev);
+ return;
+ }
+
+ if (qsid >= HCLGE_MAX_QSET_NUM) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
+ qsid);
+ return;
+ }
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
@@ -1145,6 +1234,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
+ hclge_dbg_dump_qs_shaper(hdev,
+ &cmd_buf[sizeof("dump qs shaper")]);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 87dece0e745d..dc66b4e13377 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%d)\n", vf_id);
+ dev_err(dev, "invalid vf id(%u)\n", vf_id);
return;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 16f7d0e15b4f..7c7038676d6d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -55,6 +55,8 @@
#define HCLGE_LINK_STATUS_MS 10
+#define HCLGE_VF_VPORT_START_NUM 1
+
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -323,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP),
- .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
- .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.i_port_bitmap = 0x1,
},
};
@@ -1194,6 +1195,35 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+static u32 hclge_get_max_speed(u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ return HCLGE_MAC_SPEED_100G;
+
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ return HCLGE_MAC_SPEED_50G;
+
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ return HCLGE_MAC_SPEED_40G;
+
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ return HCLGE_MAC_SPEED_25G;
+
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ return HCLGE_MAC_SPEED_10G;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ return HCLGE_MAC_SPEED_1G;
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
+ return HCLGE_MAC_SPEED_100M;
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
+ return HCLGE_MAC_SPEED_10M;
+
+ return HCLGE_MAC_SPEED_1G;
+}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1364,9 +1394,11 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_parse_link_mode(hdev, cfg.speed_ability);
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
+
if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
@@ -1626,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
- dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
hdev->num_tqps, num_vport);
return -EINVAL;
}
@@ -1649,6 +1681,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
@@ -2312,7 +2345,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
@@ -2744,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
mac->module_type = HNAE3_MODULE_TYPE_TP;
- if (mac->support_autoneg == true) {
+ if (mac->support_autoneg) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
linkmode_copy(mac->advertising, mac->supported);
} else {
@@ -2871,6 +2904,62 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+{
+ if (pci_num_vf(hdev->pdev) == 0) {
+ dev_err(&hdev->pdev->dev,
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
+ return NULL;
+ }
+
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
+ vf, pci_num_vf(hdev->pdev));
+ return NULL;
+ }
+
+ /* VF start from 1 in vport */
+ vf += HCLGE_VF_VPORT_START_NUM;
+ return &hdev->vport[vf];
+}
+
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ ivf->vf = vf;
+ ivf->linkstate = vport->vf_info.link_state;
+ ivf->spoofchk = vport->vf_info.spoofchk;
+ ivf->trusted = vport->vf_info.trusted;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
+
+ return 0;
+}
+
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ int link_state)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ vport->vf_info.link_state = link_state;
+
+ return 0;
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -3191,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev,
- "flr wait timeout: %d\n", cnt);
+ "flr wait timeout: %u\n", cnt);
return -EBUSY;
}
@@ -3241,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) {
dev_err(&hdev->pdev->dev,
- "set vf(%d) rst failed %d!\n",
+ "set vf(%u) rst failed %d!\n",
vport->vport_id, ret);
return ret;
}
@@ -3256,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret)
dev_warn(&hdev->pdev->dev,
- "inform reset to vf(%d) failed %d!\n",
+ "inform reset to vf(%u) failed %d!\n",
vport->vport_id, ret);
}
@@ -3569,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt++;
set_bit(hdev->reset_type, &hdev->reset_pending);
dev_info(&hdev->pdev->dev,
- "re-schedule reset task(%d)\n",
+ "re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
return true;
}
@@ -3580,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_reset_handshake(hdev, true);
dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+ hclge_dbg_dump_rst_info(hdev);
+
return false;
}
@@ -3779,12 +3871,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- } else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
- else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
hdev->reset_level = HNAE3_FUNC_RESET;
+ }
dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
hdev->reset_level);
@@ -3909,6 +4002,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_sync_vlan_filter(hdev);
+
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
@@ -4415,7 +4509,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
*/
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
dev_err(&hdev->pdev->dev,
- "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
rss_size);
return -EINVAL;
}
@@ -4593,8 +4687,8 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
return ret;
}
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param)
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
+ struct hclge_promisc_param *param)
{
struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
@@ -4621,8 +4715,9 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
return ret;
}
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id)
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
+ bool en_uc, bool en_mc, bool en_bc,
+ int vport_id)
{
if (!param)
return;
@@ -4637,12 +4732,21 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_promisc_param param;
+
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
+ vport->vport_id);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
+}
+
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hclge_promisc_param param;
bool en_bc_pmc = true;
/* For revision 0x20, if broadcast promisc enabled, vlan filter is
@@ -4652,9 +4756,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
if (handle->pdev->revision == 0x20)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
- vport->vport_id);
- return hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -4756,7 +4859,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "Unsupported flow director mode %d\n",
+ "Unsupported flow director mode %u\n",
hdev->fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
@@ -5086,7 +5189,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret) {
dev_err(&hdev->pdev->dev,
- "fd key_y config fail, loc=%d, ret=%d\n",
+ "fd key_y config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5095,7 +5198,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret)
dev_err(&hdev->pdev->dev,
- "fd key_x config fail, loc=%d, ret=%d\n",
+ "fd key_x config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5344,7 +5447,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
}
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
- "delete fail, rule %d is inexistent\n",
+ "delete fail, rule %u is inexistent\n",
location);
return -EINVAL;
}
@@ -5584,7 +5687,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%d) > max vf num (%d)\n",
+ "Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs);
return -EINVAL;
}
@@ -5594,7 +5697,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
- "Error: queue id (%d) > max tqp num (%d)\n",
+ "Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1);
return -EINVAL;
}
@@ -5653,7 +5756,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n", fs->location);
+ "Delete fail, rule %u is inexistent\n", fs->location);
return -ENOENT;
}
@@ -5730,7 +5833,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (ret) {
dev_warn(&hdev->pdev->dev,
- "Restore rule %d failed, remove it\n",
+ "Restore rule %u failed, remove it\n",
rule->location);
clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
@@ -6263,11 +6366,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+ /* read current config parameter */
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
- false);
+ true);
req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
req->func_id = cpu_to_le32(func_id);
- req->switch_param = switch_param;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6723,7 +6838,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -6975,7 +7090,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %d, get %d\n",
+ "Alloc umv space failed, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size);
mutex_init(&hdev->umv_mutex);
@@ -7143,7 +7258,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
/* check if we just hit the duplicate */
if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
vport->vport_id, addr);
return 0;
}
@@ -7324,7 +7439,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
if (uc_flag && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_addr);
@@ -7396,7 +7511,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -7418,7 +7533,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
break;
default:
dev_err(&hdev->pdev->dev,
- "add mac ethertype failed for undefined, code=%d.\n",
+ "add mac ethertype failed for undefined, code=%u.\n",
resp_code);
return_status = -EIO;
}
@@ -7426,6 +7541,67 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
return return_status;
}
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
+ u8 *mac_addr)
+{
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int i;
+
+ if (is_zero_ether_addr(mac_addr))
+ return false;
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+ req.egress_port = cpu_to_le16(egress_port);
+ hclge_prepare_mac_addr(&req, mac_addr, false);
+
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
+ return true;
+
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
+ if (i != vf_idx &&
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
+ return true;
+
+ return false;
+}
+
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ u8 *mac_addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ dev_info(&hdev->pdev->dev,
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
+ mac_addr);
+ return 0;
+ }
+
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
+ mac_addr);
+ return -EEXIST;
+ }
+
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+
+ return hclge_inform_reset_assert_to_vf(vport);
+}
+
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
const struct hclge_mac_mgr_tbl_entry_cmd *req)
{
@@ -7598,7 +7774,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan,
__be16 proto)
{
-#define HCLGE_MAX_VF_BYTES 16
+ struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
@@ -7607,10 +7783,18 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
int ret;
/* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
*/
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
return 0;
+ }
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
@@ -7654,7 +7838,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
}
dev_err(&hdev->pdev->dev,
- "Add vf vlan filter fail, ret =%d.\n",
+ "Add vf vlan filter fail, ret =%u.\n",
req0->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
@@ -7670,7 +7854,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return 0;
dev_err(&hdev->pdev->dev,
- "Kill vf vlan filter fail, ret =%d.\n",
+ "Kill vf vlan filter fail, ret =%u.\n",
req0->resp_code);
}
@@ -7689,9 +7873,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
- vlan_offset_160 = vlan_id / 160;
- vlan_offset_byte = (vlan_id % 160) / 8;
- vlan_offset_byte_val = 1 << (vlan_id % 8);
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+ HCLGE_VLAN_BYTE_SIZE;
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160;
@@ -7719,7 +7904,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set %d vport vlan filter config fail, ret =%d.\n",
+ "Set %u vport vlan filter config fail, ret =%d.\n",
vport_id, ret);
return ret;
}
@@ -7731,7 +7916,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %d is already in vlan %d\n",
+ "Add port vlan failed, vport %u is already in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -7739,7 +7924,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill &&
!test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %d is not in vlan %d\n",
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -8107,12 +8292,15 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
}
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- if (vlan->hd_tbl_status)
- hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id,
- false);
+ int ret;
+
+ if (!vlan->hd_tbl_status)
+ continue;
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
}
}
@@ -8392,6 +8580,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret;
+ /* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
@@ -8807,16 +8996,16 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
- dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
- dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
- dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
@@ -8832,10 +9021,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt;
+ int rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
- rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic);
if (ret)
return ret;
@@ -8935,7 +9123,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
-
hdev->nic_client = client;
vport->nic.client = client;
ret = hclge_init_nic_client_instance(ae_dev, vport);
@@ -9134,7 +9321,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%d) rst failed %d!\n",
+ "clear vf(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
@@ -9156,6 +9343,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+
+ /* HW supprt 2 layer vlan */
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
@@ -9354,6 +9543,219 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_config_switch_param(hdev, vf, enable,
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
+}
+
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
+ enable, vf);
+}
+
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
+{
+ int ret;
+
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+ return ret;
+ }
+
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+
+ return ret;
+}
+
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
+ bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_spoofchk = enable ? 1 : 0;
+ int ret;
+
+ if (hdev->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.spoofchk == new_spoofchk)
+ return 0;
+
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+ else if (enable && hclge_is_umv_space_full(vport))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
+ if (ret)
+ return ret;
+
+ vport->vf_info.spoofchk = new_spoofchk;
+ return 0;
+}
+
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ int i;
+
+ if (hdev->pdev->revision == 0x20)
+ return 0;
+
+ /* resume the vf spoof check state after reset */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
+ vport->vf_info.spoofchk);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_trusted = enable ? 1 : 0;
+ bool en_bc_pmc;
+ int ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.trusted == new_trusted)
+ return 0;
+
+ /* Disable promisc mode for VF if it is not trusted any more. */
+ if (!enable && vport->vf_info.promisc_enable) {
+ en_bc_pmc = hdev->pdev->revision != 0x20;
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
+ en_bc_pmc);
+ if (ret)
+ return ret;
+ vport->vf_info.promisc_enable = 0;
+ hclge_inform_vf_promisc_info(vport);
+ }
+
+ vport->vf_info.trusted = new_trusted;
+
+ return 0;
+}
+
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
+{
+ int ret;
+ int vf;
+
+ /* reset vf rate to default value */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ struct hclge_vport *vport = &hdev->vport[vf];
+
+ vport->vf_info.max_tx_rate = 0;
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to reset to default, ret=%d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ if (min_tx_rate != 0 ||
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ dev_err(&hdev->pdev->dev,
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ return 0;
+
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport->vf_info.max_tx_rate = max_tx_rate;
+
+ return 0;
+}
+
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* resume the vf max_tx_rate after reset */
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ /* zero means max rate, after reset, firmware already set it to
+ * max rate, so just continue.
+ */
+ if (!vport->vf_info.max_tx_rate)
+ continue;
+
+ ret = hclge_set_vf_rate(handle, vf, 0,
+ vport->vf_info.max_tx_rate, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
+ vf, vport->vf_info.max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_reset_vport_state(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
@@ -9431,6 +9833,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
*/
@@ -9453,6 +9858,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
}
hclge_reset_vport_state(hdev);
+ ret = hclge_reset_vport_spoofchk(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_resume_vf_rate(hdev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -9465,6 +9877,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_reset_vf_rate(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
@@ -9529,8 +9942,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
- int cur_rss_size = kinfo->rss_size;
- int cur_tqps = kinfo->num_tqps;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
@@ -9584,7 +9997,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
out:
if (!ret)
dev_info(&hdev->pdev->dev,
- "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
cur_tqps, kinfo->rss_size * kinfo->num_tc);
@@ -10187,6 +10600,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
.restore_vlan_table = hclge_restore_vlan_table,
+ .get_vf_config = hclge_get_vf_config,
+ .set_vf_link_state = hclge_set_vf_link_state,
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
+ .set_vf_trust = hclge_set_vf_trust,
+ .set_vf_rate = hclge_set_vf_rate,
+ .set_vf_mac = hclge_set_vf_mac,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 59b824347ba4..ebb4c6e9aed3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -141,7 +141,6 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
-#define HCLGE_VF_NUM_PER_BYTE 8
enum HLCGE_PORT_TYPE {
HOST_PORT,
@@ -166,7 +165,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2
-#define HCLGE_RESET_INT_M GENMASK(2, 0)
+#define HCLGE_RESET_INT_M GENMASK(7, 5)
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -226,8 +225,6 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_OTHER,
};
-#define HCLGE_MPF_ENBALE 1
-
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
@@ -258,6 +255,7 @@ struct hclge_mac {
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u32 speed;
+ u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
u32 fec_mode; /* active fec mode */
@@ -655,7 +653,6 @@ struct hclge_rst_stats {
u32 hw_reset_done_cnt; /* the number of HW reset has completed */
u32 pf_rst_cnt; /* the number of PF reset */
u32 flr_rst_cnt; /* the number of FLR */
- u32 core_rst_cnt; /* the number of CORE reset */
u32 global_rst_cnt; /* the number of GLOBAL */
u32 imp_rst_cnt; /* the number of IMP reset */
u32 reset_cnt; /* the number of reset */
@@ -886,6 +883,15 @@ struct hclge_port_base_vlan_config {
struct hclge_vlan_info vlan_info;
};
+struct hclge_vf_info {
+ int link_state;
+ u8 mac[ETH_ALEN];
+ u32 spoofchk;
+ u32 max_tx_rate;
+ u32 trusted;
+ u16 promisc_enable;
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -917,15 +923,15 @@ struct hclge_vport {
unsigned long state;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
+ struct hclge_vf_info vf_info;
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
};
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id);
-
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
@@ -994,4 +1000,6 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f5da28a60d00..0b433ebe6a2d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
- "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ "PF fail to gen resp to VF len %u exceeds max len %u\n",
resp_data_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
/* If resp_data_len is too long, set the value to max length
@@ -205,12 +205,38 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_bc = req->msg[1] ? true : false;
- struct hclge_promisc_param param;
+#define HCLGE_MBX_BC_INDEX 1
+#define HCLGE_MBX_UC_INDEX 2
+#define HCLGE_MBX_MC_INDEX 3
- /* vf is not allowed to enable unicast/multicast broadcast */
- hclge_promisc_param_init(&param, false, false, en_bc, vport->vport_id);
- return hclge_cmd_set_promisc_mode(vport->back, &param);
+ bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
+ bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
+ bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
+ int ret;
+
+ if (!vport->vf_info.trusted) {
+ en_uc = false;
+ en_mc = false;
+ }
+
+ ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
+ if (req->mbx_need_resp)
+ hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
+
+ vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
+
+ return ret;
+}
+
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
+{
+ u8 dest_vfid = (u8)vport->vport_id;
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
+
+ hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
@@ -223,6 +249,20 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+ /* If VF MAC has been configured by the host then it
+ * cannot be overridden by the MAC specified by the VM.
+ */
+ if (!is_zero_ether_addr(vport->vf_info.mac) &&
+ !ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ status = -EPERM;
+ goto out;
+ }
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ status = -EINVAL;
+ goto out;
+ }
+
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
if (status) {
@@ -245,11 +285,12 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_UC);
} else {
dev_err(&hdev->pdev->dev,
- "failed to set unicast mac addr, unknown subcode %d\n",
+ "failed to set unicast mac addr, unknown subcode %u\n",
mbx_req->msg[1]);
return -EIO;
}
+out:
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
@@ -278,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_MC);
} else {
dev_err(&hdev->pdev->dev,
- "failed to set mcast mac addr, unknown subcode %d\n",
+ "failed to set mcast mac addr, unknown subcode %u\n",
mbx_req->msg[1]);
return -EIO;
}
@@ -324,6 +365,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
+ if (mbx_req->mbx_need_resp)
+ return hclge_gen_resp_to_vf(vport, mbx_req, status,
+ NULL, 0);
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = msg_cmd->is_kill ? true : false;
@@ -398,6 +442,13 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport,
HCLGE_TQPS_RSS_INFO_LEN);
}
+static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
+ ETH_ALEN);
+}
+
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -428,6 +479,9 @@ static int hclge_get_vf_media_type(struct hclge_vport *vport,
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+#define HCLGE_VF_LINK_STATE_UP 1U
+#define HCLGE_VF_LINK_STATE_DOWN 0U
+
struct hclge_dev *hdev = vport->back;
u16 link_status;
u8 msg_data[8];
@@ -435,7 +489,19 @@ static int hclge_get_link_info(struct hclge_vport *vport,
u16 duplex;
/* mac.link can only be 0 or 1 */
- link_status = (u16)hdev->hw.mac.link;
+ switch (vport->vf_info.link_state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link_status = HCLGE_VF_LINK_STATE_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link_status = HCLGE_VF_LINK_STATE_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ default:
+ link_status = (u16)hdev->hw.mac.link;
+ break;
+ }
+
duplex = hdev->hw.mac.duplex;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
@@ -489,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
int ret;
- dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
+ dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
vport->vport_id);
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
@@ -524,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ sizeof(resp_data));
}
static int hclge_get_rss_key(struct hclge_vport *vport,
@@ -614,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %d\n",
+ "dropped invalid mailbox message, code = %u\n",
req->msg[0]);
/* dropping/not processing this invalid message */
@@ -749,12 +816,19 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_PUSH_LINK_STATUS:
hclge_handle_link_change_event(hdev, req);
break;
+ case HCLGE_MBX_GET_MAC_ADDR:
+ ret = hclge_get_vf_mac_addr(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get MAC for VF\n",
+ ret);
+ break;
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
break;
default:
dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %d\n",
+ "un-supported mailbox message, code = %u\n",
req->msg[0]);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index dc4dfd4602ab..696c5ae922e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
"no phy device is connected to mdio bus\n");
return 0;
} else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
- dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
hdev->hw.mac.phy_addr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 62399cc1c5a6..fbc39a2480d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -46,7 +46,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
- const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
+ static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */
6 * 8, /* Port level */
@@ -511,6 +511,49 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u8 ir_b, ir_u, ir_s;
+ u32 shaper_para;
+ int ret, i;
+
+ if (!max_tx_rate)
+ max_tx_rate = HCLGE_ETHER_MAX_RATE;
+
+ ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
+ &ir_b, &ir_u, &ir_s);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
+ false);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
+ shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ vport->vport_id, shap_cfg_cmd->qs_id,
+ max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -532,7 +575,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
kinfo->rss_size, kinfo->req_rss_size);
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 260f22d19d81..45bcb67f90fd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -96,6 +96,12 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para;
};
+struct hclge_qs_shapping_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ __le32 qs_shapping_para;
+};
+
#define HCLGE_BP_GRP_NUM 32
#define HCLGE_BP_SUB_GRP_ID_S 0
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
@@ -154,4 +160,6 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5d1cc5d1b6e..af2245e3bb95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
rmb(); /* Make sure head is ready before touch any data */
if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
@@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
u32 reg_val;
if (ring->flag == HCLGEVF_TYPE_CSQ) {
- reg_val = (u32)ring->desc_dma_addr;
+ reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
@@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
} else {
- reg_val = (u32)ring->desc_dma_addr;
+ reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7d7e712691b9..25d78a5aaa34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1113,6 +1113,7 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
}
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc,
bool en_bc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
@@ -1120,10 +1121,11 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
int ret;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
-
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
req->msg[1] = en_bc_pmc ? 1 : 0;
+ req->msg[2] = en_uc_pmc ? 1 : 0;
+ req->msg[3] = en_mc_pmc ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -1133,9 +1135,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
return ret;
}
-static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
- return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct pci_dev *pdev = hdev->pdev;
+ bool en_bc_pmc;
+
+ en_bc_pmc = pdev->revision != 0x20;
+
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
@@ -1174,11 +1184,37 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
+{
+ u8 host_mac[ETH_ALEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0,
+ true, host_mac, ETH_ALEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "fail to get VF MAC from host %d", status);
+ return status;
+ }
+
+ ether_addr_copy(p, host_mac);
+
+ return 0;
+}
+
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 host_mac_addr[ETH_ALEN];
- ether_addr_copy(p, hdev->hw.mac.mac_addr);
+ if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
+ return;
+
+ hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
+ if (hdev->has_pf_mac)
+ ether_addr_copy(p, host_mac_addr);
+ else
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
@@ -1275,7 +1311,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
memcpy(&msg_data[3], &proto, sizeof(proto));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+ HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
@@ -1513,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
return ret;
}
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
+ hdev->rst_stats.vf_func_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
+ hdev->rst_stats.vf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+ hdev->rst_stats.rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
+ hdev->rst_stats.rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+ hdev->rst_stats.rst_fail_cnt);
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
+ dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
/* recover handshake status with IMP when reset fail */
hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
- dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
@@ -1527,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
+ } else {
+ hclgevf_dump_rst_info(hdev);
}
}
@@ -1748,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t)
static void hclgevf_reset_service_task(struct work_struct *work)
{
+#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
+
struct hclgevf_dev *hdev =
container_of(work, struct hclgevf_dev, rst_service_task);
int ret;
@@ -1800,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* We cannot do much for 2. but to check first we can try reset
* our PCIe + stack and see if it alleviates the problem.
*/
- if (hdev->reset_attempts > 3) {
+ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
@@ -2103,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
if (ret)
return ret;
-
}
/* Initialize RSS indirect table */
@@ -2272,7 +2338,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
@@ -2348,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "PF media type of this VF: %d\n",
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %u\n",
hdev->hw.mac.media_type);
dev_info(dev, "VF info end.\n");
@@ -2648,12 +2714,6 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
- if (pdev->revision >= 0x21) {
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- return ret;
- }
-
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2728,17 +2788,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_config;
- /* vf is not allowed to enable unicast/multicast promisc mode.
- * For revision 0x20, default to disable broadcast promisc mode,
- * firmware makes sure broadcast packets can be accepted.
- * For revision 0x21, default to enable broadcast promisc mode.
- */
- if (pdev->revision >= 0x21) {
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- goto err_config;
- }
-
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -3152,6 +3201,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_global_queue_id = hclgevf_get_qid_global,
.set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2b8d6bc6d224..2f4c81bf4169 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -150,8 +150,6 @@ enum hclgevf_states {
HCLGEVF_STATE_CMD_DISABLE,
};
-#define HCLGEVF_MPF_ENBALE 1
-
struct hclgevf_mac {
u8 media_type;
u8 module_type;
@@ -266,6 +264,7 @@ struct hclgevf_dev {
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
+ u8 has_pf_mac;
u16 num_msi;
u16 num_msi_left;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index a108191c9e50..7cbd715d5e7a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
- "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ "VF mbx response len(=%u) exceeds maximum(=%u)\n",
resp_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
return -EINVAL;
@@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
- "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
@@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
- "VF could not match resp code(code0=%d,code1=%d), %d\n",
+ "VF could not match resp code(code0=%u,code1=%u), %d\n",
code0, code1, mbx_resp->resp_status);
dev_err(&hdev->pdev->dev,
- "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+ "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
r_code0, r_code1);
return -EIO;
}
@@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %d\n",
+ "dropped invalid mailbox message, code = %u\n",
req->msg[0]);
/* dropping/not processing this invalid message */
@@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_PF_VF_RESP:
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
- "VF mbx resp flag not clear(%d)\n",
+ "VF mbx resp flag not clear(%u)\n",
req->msg[1]);
resp->received_resp = true;
@@ -205,6 +205,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HCLGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -218,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
if (atomic_read(&hdev->arq.count) >=
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
- "Async Q full, dropping msg(%d)\n",
+ "Async Q full, dropping msg(%u)\n",
req->msg[1]);
break;
}
@@ -235,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "VF received unsupported(%d) mbx msg from PF\n",
+ "VF received unsupported(%u) mbx msg from PF\n",
req->msg[0]);
break;
}
@@ -248,6 +249,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq->next_to_use);
}
+static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
+ u16 promisc_info)
+{
+ if (!promisc_info)
+ dev_info(&hdev->pdev->dev,
+ "Promisc mode is closed by host for being untrusted.\n");
+}
+
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
@@ -313,9 +322,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
break;
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ hclgevf_parse_promisc_info(hdev, msg_q[1]);
+ break;
default:
dev_err(&hdev->pdev->dev,
- "fetched unsupported(%d) message from arq\n",
+ "fetched unsupported(%u) message from arq\n",
msg_q[0]);
break;
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 6e70658d50c4..db45373ea31c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -670,13 +670,10 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
static int ehea_is_hugepage(unsigned long pfn)
{
- int page_order;
-
if (pfn & EHEA_HUGEPAGE_PFN_MASK)
return 0;
- page_order = compound_order(pfn_to_page(pfn));
- if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
+ if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT)
return 0;
return 1;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9e43c9ace9c2..2e40425d8a34 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2849,6 +2849,7 @@ static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
+ int err;
/* Read config from device-tree */
if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2897,8 +2898,8 @@ static int emac_init_config(struct emac_instance *dev)
dev->mal_burst_size = 256;
/* PHY mode needs some decoding */
- dev->phy_mode = of_get_phy_mode(np);
- if (dev->phy_mode < 0)
+ err = of_get_phy_mode(np, &dev->phy_mode);
+ if (err)
dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index e9cda024cbf6..89a1b0fea158 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -171,7 +171,7 @@ struct emac_instance {
struct mal_commac commac;
/* PHY infos */
- int phy_mode;
+ phy_interface_t phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index b9e821de2ac6..57a25c7a9e70 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -78,7 +78,8 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int zmii_attach(struct platform_device *ofdev, int input, int *mode)
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode)
{
struct zmii_instance *dev = platform_get_drvdata(ofdev);
struct zmii_regs __iomem *p = dev->base;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 41d46e9b87ba..65daedc78594 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -50,7 +50,8 @@ struct zmii_instance {
int zmii_init(void);
void zmii_exit(void);
-int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode);
void zmii_detach(struct platform_device *ofdev, int input);
void zmii_get_mdio(struct platform_device *ofdev, int input);
void zmii_put_mdio(struct platform_device *ofdev, int input);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c5be4ebd8437..84121aab7ff1 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1011,6 +1011,29 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
return 0;
}
+static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ethhdr *ether_header;
+ int ret = 0;
+
+ ether_header = eth_hdr(skb);
+
+ if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
+ netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
+ netdev->stats.tx_dropped++;
+ ret = -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
+ netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
+ netdev->stats.tx_dropped++;
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -1022,6 +1045,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
dma_addr_t dma_addr;
unsigned long mss = 0;
+ if (ibmveth_is_packet_unsupported(skb, netdev))
+ goto out;
+
/* veth doesn't handle frag_list, so linearize the skb.
* When GRO is enabled SKB's can have frag_list.
*/
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f59d9a8e35e2..0686ded7ad3a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2878,10 +2878,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- struct irq_desc *desc = irq_to_desc(scrq->irq);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ u64 val = (0xff000000) | scrq->hw_irq;
- chip->irq_eoi(&desc->irq_data);
+ rc = plpar_hcall_norets(H_EOI, val);
+ /* H_EOI would fail with rc = H_FUNCTION when running
+ * in XIVE mode which is expected, but not an error.
+ */
+ if (rc && (rc != H_FUNCTION))
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 86493fea56e4..416da9619928 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3565,8 +3565,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- pr_info("%s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index de8c5818a305..adce7e319b9e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -894,8 +894,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- /* fall through */
case e1000_pch_cnp:
+ /* fall through */
+ case e1000_pch_tgp:
mask |= BIT(18);
break;
default:
@@ -1559,6 +1560,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index eff75bd8a8f0..f556163481cb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -86,6 +86,17 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0
#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1
#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2
+#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E
+#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F
+#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C
+#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D
+#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53
+#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55
+#define E1000_DEV_ID_PCH_TGP_I219_LM13 0x15FB
+#define E1000_DEV_ID_PCH_TGP_I219_V13 0x15FC
+#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
+#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
+#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_REVISION_4 4
@@ -109,6 +120,7 @@ enum e1000_mac_type {
e1000_pch_lpt,
e1000_pch_spt,
e1000_pch_cnp,
+ e1000_pch_tgp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a1fab77b2096..b4135c50e905 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -316,6 +316,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -458,6 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -700,6 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1638,6 +1641,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2090,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3127,6 +3132,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
switch (hw->mac.type) {
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4070,6 +4076,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d7d56e42a6aa..fe7997c18a10 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3538,6 +3538,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
break;
case e1000_pch_cnp:
+ case e1000_pch_tgp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4049,6 +4050,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ /* fall-through */
+ case e1000_pch_tgp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4715,12 +4718,12 @@ int e1000e_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (netif_device_present(netdev)) {
e1000e_down(adapter, true);
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", netdev->name);
}
napi_disable(&adapter->napi);
@@ -6028,7 +6031,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
usleep_range(1000, 1100);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
adapter->max_frame_size = max_frame;
- e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
pm_runtime_get_sync(netdev->dev.parent);
@@ -6294,14 +6298,188 @@ fl_out:
pm_runtime_put_sync(netdev->dev.parent);
}
+#ifdef CONFIG_PM_SLEEP
+/* S0ix implementation */
+static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mac_data;
+ u16 phy_data;
+
+ /* Disable the periodic inband message,
+ * don't request PCIe clock in K1 page770_17[10:9] = 10b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
+ phy_data |= BIT(10);
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
+
+ /* Make sure we don't exit K1 every time a new packet arrives
+ * 772_29[5] = 1 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data |= BIT(5);
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to SMBus
+ * Force the SMBus in PHY page769_23[0] = 1
+ * Force the SMBus in MAC CTRL_EXT[11] = 1
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+
+ /* DFT control: PHY bit: page769_20[0] = 1
+ * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
+ */
+ e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
+ phy_data |= BIT(0);
+ e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+
+ mac_data = er32(EXTCNF_CTRL);
+ mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, mac_data);
+
+ /* Check MAC Tx/Rx packet buffer pointers.
+ * Reset MAC Tx/Rx packet buffer pointers to suppress any
+ * pending traffic indication that would prevent power gating.
+ */
+ mac_data = er32(TDFH);
+ if (mac_data)
+ ew32(TDFH, 0);
+ mac_data = er32(TDFT);
+ if (mac_data)
+ ew32(TDFT, 0);
+ mac_data = er32(TDFHS);
+ if (mac_data)
+ ew32(TDFHS, 0);
+ mac_data = er32(TDFTS);
+ if (mac_data)
+ ew32(TDFTS, 0);
+ mac_data = er32(TDFPC);
+ if (mac_data)
+ ew32(TDFPC, 0);
+ mac_data = er32(RDFH);
+ if (mac_data)
+ ew32(RDFH, 0);
+ mac_data = er32(RDFT);
+ if (mac_data)
+ ew32(RDFT, 0);
+ mac_data = er32(RDFHS);
+ if (mac_data)
+ ew32(RDFHS, 0);
+ mac_data = er32(RDFTS);
+ if (mac_data)
+ ew32(RDFTS, 0);
+ mac_data = er32(RDFPC);
+ if (mac_data)
+ ew32(RDFPC, 0);
+
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(31);
+ mac_data &= ~BIT(0);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Dynamic Power Gating Enable */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= BIT(3);
+ ew32(CTRL_EXT, mac_data);
+
+ /* Enable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+}
+
+static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mac_data;
+ u16 phy_data;
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Enable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(0);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable Dynamic Power Gating */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFFFFFF7;
+ ew32(CTRL_EXT, mac_data);
+
+ /* Disable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFF7FFFF;
+ ew32(CTRL_EXT, mac_data);
+
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
+
+ /* Enable the periodic inband message,
+ * Request PCIe clock in K1 page770_17[10:9] =01b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= 0xFBFF;
+ phy_data |= HV_PM_CTRL_K1_CLK_REQ;
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
+
+ /* Return back configuration
+ * 772_29[5] = 0 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data &= 0xFFDF;
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to Kumeran
+ * Unforce the SMBus in PHY page769_23[0] = 0
+ * Unforce the SMBus in MAC CTRL_EXT[11] = 0
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+}
+#endif /* CONFIG_PM_SLEEP */
+
static int e1000e_pm_freeze(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool present;
+
+ rtnl_lock();
+ present = netif_device_present(netdev);
netif_device_detach(netdev);
- if (netif_running(netdev)) {
+ if (present && netif_running(netdev)) {
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
@@ -6313,6 +6491,8 @@ static int e1000e_pm_freeze(struct device *dev)
e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
+ rtnl_unlock();
+
e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
@@ -6560,6 +6740,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state, 1);
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ e1000e_set_interrupt_capability(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ rc = e1000_request_irq(adapter);
+ if (rc)
+ goto err_irq;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+err_irq:
+ rtnl_unlock();
+
+ return rc;
+}
+
#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
@@ -6627,29 +6831,12 @@ static int __e1000_resume(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_thaw(struct device *dev)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- u32 err = e1000_request_irq(adapter);
-
- if (err)
- return err;
-
- e1000e_up(adapter);
- }
-
- netif_device_attach(netdev);
-
- return 0;
-}
-
static int e1000e_pm_suspend(struct device *dev)
{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6660,14 +6847,25 @@ static int e1000e_pm_suspend(struct device *dev)
if (rc)
e1000e_pm_thaw(dev);
+ /* Introduce S0ix implementation */
+ if (hw->mac.type >= e1000_pch_cnp)
+ e1000e_s0ix_entry_flow(adapter);
+
return rc;
}
static int e1000e_pm_resume(struct device *dev)
{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
int rc;
+ /* Introduce S0ix implementation */
+ if (hw->mac.type >= e1000_pch_cnp)
+ e1000e_s0ix_exit_flow(adapter);
+
rc = __e1000_resume(pdev);
if (rc)
return rc;
@@ -6818,16 +7016,11 @@ static void e1000_netpoll(struct net_device *netdev)
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
+ e1000e_pm_freeze(&pdev->dev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (netif_running(netdev))
- e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6893,10 +7086,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
+ e1000e_pm_thaw(&pdev->dev);
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
@@ -7407,15 +7597,13 @@ static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- bool down = test_bit(__E1000_DOWN, &adapter->state);
e1000e_ptp_remove(adapter);
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
- if (!down)
- set_bit(__E1000_DOWN, &adapter->state);
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
@@ -7435,9 +7623,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
- /* Don't lie to e1000_close() down the road. */
- if (!down)
- clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
if (pci_dev_run_wake(pdev))
@@ -7567,6 +7752,17 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 1a4c65d9feb4..eaa5a0fb99f0 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,6 +295,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ /* fall-through */
+ case e1000_pch_tgp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 47f5ca793970..df59fd1d660c 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -18,6 +18,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
@@ -234,4 +235,7 @@
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+/* PHY registers */
+#define I82579_DFT_CTRL PHY_REG(769, 20)
+
#endif
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b14441944b4b..f306084ca12c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -534,6 +534,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
+void fm10k_iov_update_stats(struct fm10k_intfc *interface);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
@@ -542,6 +543,8 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
int __always_unused min_rate, int max_rate);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats);
/* DebugFS */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index afe1fafd2447..8c50a128df29 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -520,6 +520,27 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
+/**
+ * fm10k_iov_update_stats - Update stats for all VFs
+ * @interface: device private structure
+ *
+ * Updates the VF statistics for all enabled VFs. Expects to be called by
+ * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
+ * bit is already handled.
+ */
+void fm10k_iov_update_stats(struct fm10k_intfc *interface)
+{
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ int i;
+
+ if (!iov_data)
+ return;
+
+ for (i = 0; i < iov_data->num_vfs; i++)
+ hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
+}
+
static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
struct fm10k_vf_info *vf_info)
{
@@ -650,3 +671,30 @@ int fm10k_ndo_get_vf_config(struct net_device *netdev,
return 0;
}
+
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ struct fm10k_hw_stats_q *hw_stats;
+ u32 idx, qpp;
+
+ /* verify SR-IOV is active and that vf idx is valid */
+ if (!iov_data || vf_idx >= iov_data->num_vfs)
+ return -EINVAL;
+
+ qpp = fm10k_queues_per_pool(hw);
+ hw_stats = iov_data->vf_info[vf_idx].stats;
+
+ for (idx = 0; idx < qpp; idx++) {
+ stats->rx_packets += hw_stats[idx].rx_packets.count;
+ stats->tx_packets += hw_stats[idx].tx_packets.count;
+ stats->rx_bytes += hw_stats[idx].rx_bytes.count;
+ stats->tx_bytes += hw_stats[idx].tx_bytes.count;
+ stats->rx_dropped += hw_stats[idx].rx_drops.count;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2be9222510e7..17738b0a9873 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,7 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.26.1-k"
+#define DRV_VERSION "0.27.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 09f7a246e134..68baee04dc58 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1643,6 +1643,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
+ .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
.ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index bb236fa44048..d122d0087191 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -630,6 +630,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
+ /* Update VF statistics */
+ fm10k_iov_update_stats(interface);
+
clear_bit(__FM10K_UPDATING_STATS, interface->state);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 160bc5b78f99..ceb9b791f799 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
@@ -76,8 +76,8 @@ struct fm10k_tlv_attr {
#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
-#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
-#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED, 0 }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR, 0, 0 }
struct fm10k_msg_data {
unsigned int id;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 15ac1c7885bc..63968c5d7c5d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -581,6 +581,7 @@ struct fm10k_vf_info {
* at the same offset as the mailbox
*/
struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ struct fm10k_hw_stats_q stats[FM10K_MAX_QUEUES_POOL];
int rate; /* Tx BW cap as defined by OS */
u16 glort; /* resource tag for this VF */
u16 sw_vid; /* Switch API assigned VLAN */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2af9f6308f84..cb6367334ca7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1118,6 +1118,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+int i40e_count_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 72c04881d290..9f0a4e92a231 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -508,6 +508,59 @@ shutdown_arq_out:
}
/**
+ * i40e_set_hw_flags - set HW flags
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ hw->flags = 0;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* The ability to RX (not drop) 802.1ad frames */
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ }
+ break;
+ case I40E_MAC_X722:
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* fall through */
+ default:
+ break;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 8)) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 9))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
+/**
* i40e_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
@@ -571,6 +624,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
+ /* Some features were introduced in different FW API version
+ * for different MAC type.
+ */
+ i40e_set_hw_flags(hw);
+
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
&hw->nvm.version);
@@ -596,25 +654,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
- /* Newer versions of firmware require lock when reading the NVM */
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 5))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
hw->aq.api_min_ver >= 7))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8)) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
- hw->flags |= I40E_HW_FLAG_DROP_MODE;
- }
-
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 69a2daaca5c5..aa5f1c0aa721 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2251,7 +2251,13 @@ struct i40e_aqc_phy_register_access {
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_address;
- u8 reserved1[2];
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+ u8 reserved1;
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 7560f06768e0..d4055037af89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -29,6 +29,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_B:
case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
@@ -933,10 +934,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
- if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
status = i40e_init_nvm(hw);
return status;
}
@@ -1441,9 +1438,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
u32 gpio_val = 0;
u32 port;
- if (!hw->func_caps.led[idx])
+ if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ !hw->func_caps.led[idx])
return 0;
-
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
@@ -1462,8 +1459,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
#define I40E_MAC_ACTIVITY 0xD
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
#define I40E_LED0 22
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -1508,8 +1512,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
int i;
- if (mode & 0xfffffff0)
+ if (mode & ~I40E_LED_MODE_VALID) {
hw_dbg(hw, "invalid mode passed in %X\n", mode);
+ return;
+ }
/* as per the documentation GPIO 22-29 are the LED
* GPIO pins named LED0..LED7
@@ -1519,6 +1525,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
+
+ if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
+ u32 pin_func = 0;
+
+ if (mode & I40E_FW_LED)
+ pin_func = I40E_PIN_FUNC_SDP;
+ else
+ pin_func = I40E_PIN_FUNC_LED;
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
+ gpio_val |= ((pin_func <<
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ }
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -2571,9 +2591,16 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- hw->phy.link_info.req_fec_info =
- abilities.fec_cfg_curr_mod_ext_info &
- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+ if (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_ENABLE_FEC_AUTO)
+ hw->phy.link_info.req_fec_info =
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+ else
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type));
@@ -4885,6 +4912,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -5044,7 +5072,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -5077,7 +5105,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
status =
i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -5116,7 +5144,7 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
@@ -5321,20 +5349,49 @@ do_retry:
}
/**
- * i40e_aq_set_phy_register
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+ u8 mdio_num,
+ struct i40e_aqc_phy_register_access *cmd)
+{
+ if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ ((mdio_num <<
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+ else
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "MDIO I/F number selection not supported by current FW version.\n");
+ }
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
*
* Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
**/
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -5349,26 +5406,36 @@ i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
cmd->reg_address = cpu_to_le32(reg_addr);
cmd->reg_value = cpu_to_le32(reg_val);
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
- * i40e_aq_get_phy_register
+ * i40e_aq_get_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
*
* Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
**/
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -5382,6 +5449,11 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = le32_to_cpu(cmd->reg_value);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 200a1cb3b536..9de503c5f99b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -889,7 +889,9 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
ret = i40e_read_nvm_module_data(hw,
I40E_SR_EMP_SR_SETTINGS_PTR,
- offset, 1,
+ offset,
+ I40E_LLDP_CURRENT_STATUS_OFFSET,
+ I40E_LLDP_CURRENT_STATUS_SIZE,
&lldp_cfg.adminstatus);
} else {
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2a80c5daa376..ba86ad833bee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -32,6 +32,9 @@
#define I40E_CEE_MAX_FEAT_TYPE 3
#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
+
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index bac4da031f9b..bf15a868292f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -23,6 +23,8 @@
#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
+#define I40E_IS_X710TL_DEVICE(d) \
+ ((d) == I40E_DEV_ID_10G_BASE_T_BC)
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 41e1240acaea..d24d8731bef0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -722,7 +722,14 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
} else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -730,12 +737,6 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
} else {
ethtool_link_ksettings_add_link_mode(ks, advertising,
FEC_NONE);
- if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
- }
}
}
@@ -1437,6 +1438,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_hw *hw = &pf->hw;
i40e_status status = 0;
int err = 0;
+ u8 fec_cfg;
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
@@ -1448,18 +1450,16 @@ static int i40e_get_fec_param(struct net_device *netdev,
}
fecparam->fec = 0;
- if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
fecparam->fec |= ETHTOOL_FEC_AUTO;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_RS) ||
- (abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_ABILITY_RS))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS))
fecparam->fec |= ETHTOOL_FEC_RS;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_KR) ||
- (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR))
fecparam->fec |= ETHTOOL_FEC_BASER;
- if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ if (fec_cfg == 0)
fecparam->fec |= ETHTOOL_FEC_OFF;
if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
@@ -5112,7 +5112,7 @@ static int i40e_get_module_info(struct net_device *netdev,
case I40E_MODULE_TYPE_SFP:
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
I40E_MODULE_SFF_8472_COMP,
&sff8472_comp, NULL);
if (status)
@@ -5120,7 +5120,7 @@ static int i40e_get_module_info(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
I40E_MODULE_SFF_8472_SWAP,
&sff8472_swap, NULL);
if (status)
@@ -5152,7 +5152,7 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Read from memory page 0. */
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- 0,
+ 0, true,
I40E_MODULE_REVISION_ADDR,
&sff8636_rev, NULL);
if (status)
@@ -5223,7 +5223,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- addr, offset, &value, NULL);
+ true, addr, offset, &value, NULL);
if (status)
return -EIO;
data[i] = value;
@@ -5242,6 +5242,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .get_drvinfo = i40e_get_drvinfo,
.set_eeprom = i40e_set_eeprom,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6031223eafab..1ccabeafa44c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1110,6 +1110,25 @@ void i40e_update_stats(struct i40e_vsi *vsi)
}
/**
+ * i40e_count_filters - counts VSI mac filters
+ * @vsi: the VSI to be searched
+ *
+ * Returns count of mac filters
+ **/
+int i40e_count_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
+ int cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ ++cnt;
+
+ return cnt;
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -2645,8 +2664,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev_info(netdev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
@@ -3534,14 +3553,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->rx.target_itr =
ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
- q_vector->rx.target_itr);
+ q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr =
ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
wr32(hw, I40E_PFINT_RATEN(vector - 1),
@@ -3646,11 +3665,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
i40e_enable_misc_int_causes(pf);
@@ -7168,6 +7187,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ch->num_queue_pairs = qcnt;
if (!i40e_setup_channel(pf, vsi, ch)) {
ret = -EINVAL;
+ kfree(ch);
goto err_free;
}
ch->parent_vsi = vsi;
@@ -11396,7 +11416,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* associate no queues to the misc vector */
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
i40e_flush(hw);
@@ -12850,6 +12870,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_get_vf_stats = i40e_get_vf_stats,
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
@@ -12911,6 +12932,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_SCTP_CRC |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index e4d8d20baf3b..7164f4ad8120 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -323,20 +323,24 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
/**
* i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
- * @hw: pointer to the HW structure
+ * @hw: Pointer to the HW structure
* @module_ptr: Pointer to module in words with respect to NVM beginning
- * @offset: offset in words from module start
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr)
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
i40e_status status;
+ u16 specific_ptr = 0;
u16 ptr_value = 0;
- u32 flat_offset;
+ u32 offset = 0;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -352,36 +356,35 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
/* Pointer not initialized */
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
- ptr_value == I40E_NVM_INVALID_VAL)
+ ptr_value == I40E_NVM_INVALID_VAL) {
+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
return I40E_ERR_BAD_PTR;
+ }
/* Check whether the module is in SR mapped area or outside */
if (ptr_value & I40E_PTR_TYPE) {
/* Pointer points outside of the Shared RAM mapped area */
- ptr_value &= ~I40E_PTR_TYPE;
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
- /* PtrValue in 4kB units, need to convert to words */
- ptr_value /= 2;
- flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
- status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!status) {
- status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
- 2 * words_data_size,
- data_ptr, true, NULL);
- i40e_release_nvm(hw);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Reading nvm aq failed.Error code: %d.\n",
- status);
- return I40E_ERR_NVM;
- }
- } else {
- return I40E_ERR_NVM;
- }
+ return I40E_ERR_PARAM;
} else {
/* Read from the Shadow RAM */
- status = i40e_read_nvm_buffer(hw, ptr_value + offset,
- &words_data_size, data_ptr);
+
+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+ &specific_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+
+ offset = ptr_value + module_offset + specific_ptr +
+ data_offset;
+
+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+ data_ptr);
if (status) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm buffer failed.Error code: %d.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5250441bf75b..bbb478f09093 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,10 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr);
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
@@ -409,14 +411,24 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index e3f29dc8b290..b8496037ef7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2960,10 +2960,16 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ }
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index b43ec94a0f29..6ea2867ff60f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -624,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3d2440838822..6a3f0fc56c3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -955,7 +955,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
- vf->num_mac = 0;
}
/* do the accounting and remove additional ADq VSI's */
@@ -2548,20 +2547,12 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ int mac2add_cnt = 0;
int i;
- /* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
- */
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
- }
-
for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
if (is_broadcast_ether_addr(addr) ||
@@ -2585,8 +2576,24 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
+
+ /*count filters that really will be added*/
+ f = i40e_find_mac(vsi, addr);
+ if (!f)
+ ++mac2add_cnt;
}
+ /* If this VF is not privileged, then we can't add more than a limited
+ * number of addresses. Check to make sure that the additions do not
+ * push us over the limit.
+ */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
return 0;
}
@@ -2640,8 +2647,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac++;
}
}
}
@@ -2689,16 +2694,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
-
- if (vf->pf_set_mac &&
- ether_addr_equal(al->list[i].addr,
- vf->default_lan_addr.addr)) {
- dev_err(&pf->pdev->dev,
- "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
- vf->default_lan_addr.addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- goto error_param;
- }
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2709,8 +2704,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -4531,3 +4524,51 @@ out:
clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
+
+/**
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_eth_stats *stats;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+
+ /* validate the request */
+ if (i40e_validate_vf(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ i40e_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 7164b9bb294f..631248c0981a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -101,7 +101,6 @@ struct i40e_vf {
bool link_up; /* only valid if VF link is forced */
bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
- u16 num_mac;
u16 num_vlan;
/* ADq related variables */
@@ -139,5 +138,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9edde960b4f2..7cb829132d28 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -13,9 +13,12 @@ ice-y := ice_main.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
+ ice_base.o \
ice_lib.o \
+ ice_txrx_lib.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
-ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
+ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 45e100666049..f972dce8aebb 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -29,10 +29,13 @@
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
+#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
+#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
+#include <net/xdp_sock.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -42,6 +45,7 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_xsk.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -78,8 +82,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
- (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
+#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
@@ -127,6 +130,16 @@ extern const char ice_drv_ver[];
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
+#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+
+struct ice_txq_meta {
+ u32 q_teid; /* Tx-scheduler element identifier */
+ u16 q_id; /* Entry in VSI's txq_map bitmap */
+ u16 q_handle; /* Relative index of Tx queue within TC */
+ u16 vsi_idx; /* VSI index that Tx queue belongs to */
+ u8 tc; /* TC number that Tx queue belongs to */
+};
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -169,6 +182,7 @@ enum ice_state {
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
+ __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -271,9 +285,18 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
+ u16 req_txq; /* User requested Tx queues */
+ u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring **xdp_rings; /* XDP ring array */
+ u16 num_xdp_txq; /* Used XDP queues */
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -313,6 +336,7 @@ enum ice_pf_flags {
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
+ ICE_FLAG_LEGACY_RX,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -346,6 +370,7 @@ struct ice_pf {
struct work_struct serv_task;
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
+ struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
@@ -417,6 +442,37 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
return np->vsi->back;
}
+static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
+
+static inline void ice_set_ring_xdp(struct ice_ring *ring)
+{
+ ring->flags |= ICE_TX_FLAGS_RING_XDP;
+}
+
+/**
+ * ice_xsk_umem - get XDP UMEM bound to a ring
+ * @ring - ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * NULL otherwise.
+ */
+static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+{
+ struct xdp_umem **umems = ring->vsi->xsk_umems;
+ int qid = ring->q_index;
+
+ if (ice_ring_is_xdp(ring))
+ qid -= ring->vsi->num_xdp_txq;
+
+ if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ return NULL;
+
+ return umems[qid];
+}
+
/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
@@ -437,20 +493,23 @@ void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
+int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
-#ifdef CONFIG_DCB
-int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
-void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
-#endif /* CONFIG_DCB */
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 023e3d2fee5f..5421fc413f94 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -742,6 +742,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
+struct ice_aqc_conf_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
struct ice_aqc_get_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
@@ -783,6 +787,44 @@ struct ice_aqc_port_ets_elem {
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
};
+/* Rate limiting profile for
+ * Add RL profile (indirect 0x0410)
+ * Query RL profile (indirect 0x0411)
+ * Remove RL profile (indirect 0x0415)
+ * These indirect commands acts on single or multiple
+ * RL profiles with specified data.
+ */
+struct ice_aqc_rl_profile {
+ __le16 num_profiles;
+ __le16 num_processed; /* Only for response. Reserved in Command. */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_rl_profile_elem {
+ u8 level;
+ u8 flags;
+#define ICE_AQC_RL_PROFILE_TYPE_S 0x0
+#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S)
+#define ICE_AQC_RL_PROFILE_TYPE_CIR 0
+#define ICE_AQC_RL_PROFILE_TYPE_EIR 1
+#define ICE_AQC_RL_PROFILE_TYPE_SRL 2
+/* The following flag is used for Query RL Profile Data */
+#define ICE_AQC_RL_PROFILE_INVAL_S 0x7
+#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S)
+
+ __le16 profile_id;
+ __le16 max_burst_size;
+ __le16 rl_multiply;
+ __le16 wake_up_calc;
+ __le16 rl_encode;
+};
+
+struct ice_aqc_rl_profile_generic_elem {
+ struct ice_aqc_rl_profile_elem generic[1];
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -1044,6 +1086,10 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1)
#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2)
+#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 reserved1;
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
@@ -1147,6 +1193,33 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ice_aqc_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F
+#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF
+#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10)
+#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0
+#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S)
+#define ICE_AQC_SFF_NO_PAGE_CHANGE 0
+#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1
+#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2
+#define ICE_AQC_SFF_IS_WRITE BIT(15)
+ __le16 i2c_mem_addr;
+ __le16 eeprom_page;
+#define ICE_AQC_SFF_EEPROM_BANK_S 0
+#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S)
+#define ICE_AQC_SFF_EEPROM_PAGE_S 8
+#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S)
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1618,6 +1691,7 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
@@ -1625,6 +1699,7 @@ struct ice_aq_desc {
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
+ struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
struct ice_aqc_pf_vf_msg virt;
@@ -1726,12 +1801,15 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_cfg_sched_elems = 0x0403,
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
+ ice_aqc_opc_add_rl_profiles = 0x0410,
ice_aqc_opc_query_sched_res = 0x0412,
+ ice_aqc_opc_remove_rl_profiles = 0x0415,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
@@ -1741,6 +1819,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
new file mode 100644
index 000000000000..77d6a0291e97
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_base.h"
+#include "ice_dcb_lib.h"
+
+/**
+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
+{
+ int offset, i;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
+ 0, qs_cfg->q_count, 0);
+ if (offset >= qs_cfg->pf_map_size) {
+ mutex_unlock(qs_cfg->qs_mutex);
+ return -ENOMEM;
+ }
+
+ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
+ for (i = 0; i < qs_cfg->q_count; i++)
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
+{
+ int i, index = 0;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ for (i = 0; i < qs_cfg->q_count; i++) {
+ index = find_next_zero_bit(qs_cfg->pf_map,
+ qs_cfg->pf_map_size, index);
+ if (index >= qs_cfg->pf_map_size)
+ goto err_scatter;
+ set_bit(index, qs_cfg->pf_map);
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+err_scatter:
+ for (index = 0; index < i; index++) {
+ clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
+ qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ usleep_range(20, 40);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
+ GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ /* This will not be called in the driver load path because the netdev
+ * will not be created yet. All other cases with register the NAPI
+ * handler here (i.e. resume, reset/rebuild, etc.)
+ */
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+out:
+ /* tie q_vector and VSI together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
+ struct ice_ring *ring;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
+ * ice_calc_q_handle - calculate the queue handle
+ * @vsi: VSI that ring belongs to
+ * @ring: ring to get the absolute queue index
+ * @tc: traffic class number
+ */
+static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+{
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc,
+ "XDP ring can't belong to TC other than 0");
+
+ /* Idea here for calculation is that we subtract the number of queue
+ * count from TC that ring belongs to from it's absolute queue index
+ * and as a result we get the queue's index within TC.
+ */
+ return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF ID */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ struct ice_vsi *vsi = ring->vsi;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ struct ice_hw *hw;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ hw = &vsi->back->hw;
+
+ /* what is Rx queue number in global space of 2K Rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index);
+
+ ring->xsk_umem = ice_xsk_umem(ring);
+ if (ring->xsk_umem) {
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ chain_len = 1;
+ ring->zca.free = ice_zca_free;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca);
+ if (err)
+ return err;
+
+ dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index);
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
+ }
+ }
+ /* Receive Queue Base Address.
+ * Indicates the starting address of the descriptor queue defined in
+ * 128 Byte units.
+ */
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ chain_len * ring->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile ID;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ ice_clear_ring_build_skb_ena(ring);
+ else
+ ice_set_ring_build_skb_ena(ring);
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ err = ring->xsk_umem ?
+ ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+ if (err)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_umem ? "UMEM enabled " : "",
+ ring->q_index, pf_q);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
+{
+ int ret = 0;
+
+ ret = __ice_vsi_get_qs_contig(qs_cfg);
+ if (ret) {
+ /* contig failed, so try with scatter approach */
+ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
+ qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->scatter_count);
+ ret = __ice_vsi_get_qs_sc(qs_cfg);
+ }
+ return ret;
+}
+
+/**
+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ * @rxq_idx: Rx queue index
+ */
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+ u32 rx_reg;
+
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf),
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+
+ return ret;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->q_vectors[0]) {
+ dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ num_q_vectors = vsi->num_q_vectors;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_cfg_txq - Configure single Tx queue
+ * @vsi: the VSI that queue belongs to
+ * @ring: Tx ring to be configured
+ * @qg_buf: queue group buffer
+ */
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf)
+{
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ u8 buf_len = sizeof(*qg_buf);
+ enum ice_status status;
+ u16 pf_q;
+ u8 tc;
+
+ pf_q = ring->reg_idx;
+ ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ /* Add unique software queue handle of the Tx queue per
+ * TC into the VSI Tx ring
+ */
+ ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ ring->txq_teid = le32_to_cpu(txq->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ ice_cfg_itr_gran(hw);
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_RX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_TX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+}
+
+/**
+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * @vsi: the VSI being configured
+ * @txq: Tx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
+ val);
+ }
+ ice_flush(hw);
+}
+
+/**
+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
+ * @vsi: the VSI being configured
+ * @rxq: Rx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_trigger_sw_intr - trigger a software interrupt
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector to trigger the software interrupt for
+ */
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_M);
+}
+
+/**
+ * ice_vsi_stop_tx_ring - Disable single Tx ring
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative ID of VF/VM
+ * @ring: Tx ring to be stopped
+ * @txq_meta: Meta data of Tx ring to be stopped
+ */
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 val;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(ring->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(ring->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ q_vector = ring->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
+ status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
+ txq_meta->tc, 1, &txq_meta->q_handle,
+ &txq_meta->q_id, &txq_meta->q_teid, rst_src,
+ rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an
+ * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * This is not an error as the reset operation disables
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_txq_meta - Prepare the Tx queue's meta data
+ * @vsi: VSI that ring belongs to
+ * @ring: ring that txq_meta will be based on
+ * @txq_meta: a helper struct that wraps Tx queue's information
+ *
+ * Set up a helper struct that will contain all the necessary fields that
+ * are needed for stopping Tx queue
+ */
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ u8 tc;
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ txq_meta->q_id = ring->reg_idx;
+ txq_meta->q_teid = ring->txq_teid;
+ txq_meta->q_handle = ring->q_handle;
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
new file mode 100644
index 000000000000..407995e8e944
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_BASE_H_
+#define _ICE_BASE_H_
+
+#include "ice.h"
+
+int ice_setup_rx_ctx(struct ice_ring *ring);
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf);
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3a6b3950eb0e..fb1d930470c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -855,6 +855,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
+ /* Initialize max burst size */
+ if (!hw->max_burst_size)
+ ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
@@ -1067,6 +1070,72 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
+ * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ */
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+{
+ enum ice_status status;
+ u16 pfa_len, pfa_ptr;
+ u16 next_tlv;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ while (next_tlv < pfa_ptr + pfa_len) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+ /* Read TLV type */
+ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ break;
+ }
+ /* Read TLV length */
+ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return ICE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
@@ -1182,56 +1251,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
-/**
- * ice_debug_cq
- * @hw: pointer to the hardware structure
- * @mask: debug mask
- * @desc: pointer to control queue descriptor
- * @buf: pointer to command buffer
- * @buf_len: max length of buf
- *
- * Dumps debug log about control command with descriptor contents.
- */
-void
-ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
- u16 buf_len)
-{
- struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
- u16 len;
-
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (!(mask & hw->debug_mask))
- return;
-#endif
-
- if (!desc)
- return;
-
- len = le16_to_cpu(cq_desc->datalen);
-
- ice_debug(hw, mask,
- "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(cq_desc->params.generic.addr_high),
- le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, mask, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
- }
-}
-
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
@@ -1654,6 +1673,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
ice_debug(hw, ICE_DBG_INIT,
"%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
+
+ /* store func count for resource management purposes */
+ if (dev_p)
+ dev_p->num_funcs = hweight32(number);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
@@ -1760,6 +1783,18 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
break;
}
}
+
+ /* Re-calculate capabilities that are dependent on the number of
+ * physical ports; i.e. some features are not supported or function
+ * differently on devices with more than 4 ports.
+ */
+ if (hw->dev_caps.num_funcs > 4) {
+ /* Max 4 TCs per port */
+ caps->maxtc = 4;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d (based on #ports)\n", prefix,
+ caps->maxtc);
+ }
}
/**
@@ -1856,8 +1891,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
u32 valid_func, rxq_first_id, txq_first_id;
u32 msix_vector_first_id, max_mtu;
- u32 num_func = 0;
- u8 i;
+ u32 num_funcs;
/* cache some func_caps values that should be restored after memset */
valid_func = func_caps->common_cap.valid_functions;
@@ -1890,6 +1924,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
rxq_first_id = dev_caps->common_cap.rxq_first_id;
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
max_mtu = dev_caps->common_cap.max_mtu;
+ num_funcs = dev_caps->num_funcs;
/* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps));
@@ -1900,19 +1935,14 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
dev_caps->common_cap.rxq_first_id = rxq_first_id;
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
dev_caps->common_cap.max_mtu = max_mtu;
-
- /* valid_func is a bitmap. get number of functions */
-#define ICE_MAX_FUNCS 8
- for (i = 0; i < ICE_MAX_FUNCS; i++)
- if (valid_func & BIT(i))
- num_func++;
+ dev_caps->num_funcs = num_funcs;
/* one Tx and one Rx queue per function in safe mode */
- dev_caps->common_cap.num_rxq = num_func;
- dev_caps->common_cap.num_txq = num_func;
+ dev_caps->common_cap.num_rxq = num_funcs;
+ dev_caps->common_cap.num_txq = num_funcs;
/* two MSIX vectors per function */
- dev_caps->common_cap.num_msix_vectors = 2 * num_func;
+ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
}
/**
@@ -2556,6 +2586,52 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_sff_eeprom
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @set_page: set or ignore the page
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read/Write SFF EEPROM (0x06EE)
+ */
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_sff_eeprom *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || (mem_addr & 0xff00))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
+ ICE_AQC_SFF_I2CBUS_7BIT_M) |
+ ((set_page <<
+ ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
+ ICE_AQC_SFF_SET_EEPROM_PAGE_M));
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
+ cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ if (write)
+ cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
+
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+ return status;
+}
+
+/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
@@ -3148,7 +3224,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @tc: TC number
* @q_handle: software queue handle
*/
-static struct ice_q_ctx *
+struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
{
struct ice_vsi_ctx *vsi;
@@ -3245,9 +3321,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
q_ctx->q_handle = q_handle;
+ q_ctx->q_teid = le32_to_cpu(node.node_teid);
- /* add a leaf node into schduler tree queue layer */
+ /* add a leaf node into scheduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ if (!status)
+ status = ice_sched_replay_q_bw(pi, q_ctx);
ena_txq_exit:
mutex_unlock(&pi->sched_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index c3df92f57777..b22aa561e253 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,16 +6,18 @@
#include "ice.h"
#include "ice_type.h"
+#include "ice_nvm.h"
#include "ice_flex_pipe.h"
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-void
-ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
@@ -117,6 +119,10 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
@@ -133,6 +139,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2353166c654e..dd946866d7b8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -810,6 +810,52 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_debug_cq
+ * @hw: pointer to the hardware structure
+ * @desc: pointer to control queue descriptor
+ * @buf: pointer to command buffer
+ * @buf_len: max length of buf
+ *
+ * Dumps debug log about control command with descriptor contents.
+ */
+static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+{
+ struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ u16 len;
+
+ if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
+ !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
+ return;
+
+ if (!desc)
+ return;
+
+ len = le16_to_cpu(cq_desc->datalen);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ le16_to_cpu(cq_desc->opcode),
+ le16_to_cpu(cq_desc->flags),
+ le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_high),
+ le32_to_cpu(cq_desc->cookie_low));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param0),
+ le32_to_cpu(cq_desc->params.generic.param1));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ if (buf && cq_desc->datalen != 0) {
+ ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
+ }
+}
+
+/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -934,10 +980,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/* Debug desc and buffer */
- ice_debug(hw, ICE_DBG_AQ_MSG,
+ ice_debug(hw, ICE_DBG_AQ_DESC,
"ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -948,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ice_sq_done(hw, cq))
break;
- mdelay(1);
+ udelay(ICE_CTL_Q_SQ_CMD_USEC);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
@@ -971,7 +1017,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval = le16_to_cpu(desc->retval);
if (retval) {
ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue command completed with error 0x%x\n",
+ "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ le16_to_cpu(desc->opcode),
retval);
/* strip off FW internal code */
@@ -986,7 +1033,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG,
"ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, (void *)desc, buf, buf_size);
/* save writeback AQ if requested */
if (details->wb_desc)
@@ -1075,7 +1122,8 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive Queue Event received with error 0x%x\n",
+ "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ le16_to_cpu(desc->opcode),
cq->rq_last_status);
}
memcpy(&e->desc, desc, sizeof(e->desc));
@@ -1084,10 +1132,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
- ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
- cq->rq_buf_size);
+ ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 44945c2165d8..bf0ebe6149e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -22,7 +22,7 @@
*/
#define EXP_FW_API_VER_BRANCH 0x00
#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x03
+#define EXP_FW_API_VER_MINOR 0x05
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
@@ -31,8 +31,9 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue default settings */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
+/* Control Queue timeout settings - max delay 250ms */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index dd7efff121bd..713e8a892e14 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -965,9 +965,9 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
- pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
if (ret)
return ret;
+ pi->is_sw_lldp = false;
} else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@@ -975,8 +975,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
/* Configure the LLDP MIB change event */
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
- if (!ret)
- pi->is_sw_lldp = false;
+ if (ret)
+ pi->is_sw_lldp = true;
}
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index dd47869c4ad4..d3d3ec29def9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
@@ -100,6 +101,16 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_dcb_get_tc - Get the TC associated with the queue
+ * @vsi: ptr to the VSI
+ * @queue_index: queue number associated with VSI
+ */
+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
+{
+ return vsi->tx_rings[queue_index]->dcb_tc;
+}
+
+/**
* ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
* @vsi: VSI owner of rings being updated
*/
@@ -138,83 +149,62 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
/**
- * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
- * @pf: pointer to the PF struct
- *
- * Assumed caller has already disabled all VSIs before
- * calling this function. Reconfiguring DCB based on
- * local_dcbx_cfg.
- */
-static void ice_pf_dcb_recfg(struct ice_pf *pf)
-{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
- u8 tc_map = 0;
- int v, ret;
-
- /* Update each VSI */
- ice_for_each_vsi(pf, v) {
- if (!pf->vsi[v])
- continue;
-
- if (pf->vsi[v]->type == ICE_VSI_PF)
- tc_map = ice_dcb_get_ena_tc(dcbcfg);
- else
- tc_map = ICE_DFLT_TRAFFIC_CLASS;
-
- ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to config TC for VSI index: %d\n",
- pf->vsi[v]->idx);
- continue;
- }
-
- ice_vsi_map_rings_to_vectors(pf->vsi[v]);
- }
-}
-
-/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
* @locked: is the RTNL held
*/
-static
int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{
- struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
- int ret = 0;
+ struct ice_dcbx_cfg *old_cfg, *curr_cfg;
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret = ICE_DCB_NO_HW_CHG;
+ struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ /* FW does not care if change happened */
+ if (!pf->hw.port_info->is_sw_lldp)
+ ret = ICE_DCB_HW_CHG_RST;
+
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
- dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
- dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
- dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
+ dev_dbg(dev, "No change in DCB config required\n");
return ret;
}
/* Store old config in case FW config fails */
- old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
- memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
+ old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
+ if (!old_cfg)
+ return -ENOMEM;
+
+ dev_info(dev, "Commit DCB Configuration to the hardware\n");
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_dbg(dev, "PF VSI doesn't exist\n");
+ ret = -EINVAL;
+ goto free_cfg;
+ }
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
if (!locked)
rtnl_lock();
- ice_pf_dis_all_vsi(pf, true);
+ ice_dis_vsi(pf_vsi, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
+ memcpy(&new_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
@@ -222,7 +212,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
if (pf->hw.port_info->is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
+ dev_err(dev, "Set DCB Config failed\n");
/* Restore previous settings to local config */
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
goto out;
@@ -231,17 +221,18 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
goto out;
}
ice_pf_dcb_recfg(pf);
out:
- ice_pf_ena_all_vsi(pf, true);
+ ice_ena_vsi(pf_vsi, true);
if (!locked)
rtnl_unlock();
- devm_kfree(&pf->pdev->dev, old_cfg);
+free_cfg:
+ kfree(old_cfg);
return ret;
}
@@ -277,6 +268,7 @@ static bool
ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
+ struct device *dev = ice_pf_to_dev(pf);
bool need_reconfig = false;
/* Check if ETS configuration has changed */
@@ -287,33 +279,33 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
&old_cfg->etscfg.prio_table,
sizeof(new_cfg->etscfg.prio_table))) {
need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ dev_dbg(dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
- dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+ dev_dbg(dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
- dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ dev_dbg(dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ dev_dbg(dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ dev_dbg(dev, "APP Table change detected.\n");
}
- dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ dev_dbg(dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
@@ -325,11 +317,12 @@ void ice_dcb_rebuild(struct ice_pf *pf)
{
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
+ struct device *dev = ice_pf_to_dev(pf);
enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
goto dcb_error;
}
@@ -348,17 +341,14 @@ void ice_dcb_rebuild(struct ice_pf *pf)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
+ dev_err(dev, "Failed to set DCB to unwilling\n");
goto dcb_error;
}
/* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
- sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg) {
- dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
+ prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg)
goto dcb_error;
- }
ice_init_dcb(&pf->hw, true);
if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
@@ -368,12 +358,13 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
- dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
+ dev_err(dev, "Set local MIB not accurate\n");
+ kfree(prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
- devm_kfree(&pf->pdev->dev, prev_cfg);
+ kfree(prev_cfg);
/* Set the local desired config */
if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
@@ -383,27 +374,30 @@ void ice_dcb_rebuild(struct ice_pf *pf)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to set desired config\n");
+ dev_err(dev, "Failed to set desired config\n");
goto dcb_error;
}
- dev_info(&pf->pdev->dev, "DCB restored after reset\n");
+ dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
goto dcb_error;
}
return;
dcb_error:
- dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
- prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
+ dev_err(dev, "Disabling DCB until new settings occur\n");
+ prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg)
+ return;
+
prev_cfg->etscfg.willing = true;
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
ice_pf_dcb_cfg(pf, prev_cfg, false);
- devm_kfree(&pf->pdev->dev, prev_cfg);
+ kfree(prev_cfg);
}
/**
@@ -418,18 +412,17 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
int ret = 0;
pi = pf->hw.port_info;
- newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
+ newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL);
if (!newcfg)
return -ENOMEM;
- memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
- dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
+ dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg, locked))
ret = -EINVAL;
- devm_kfree(&pf->pdev->dev, newcfg);
+ kfree(newcfg);
return ret;
}
@@ -437,9 +430,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
/**
* ice_dcb_sw_default_config - Apply a default DCB config
* @pf: PF to apply config to
+ * @ets_willing: configure ets willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -449,12 +443,13 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
hw = &pf->hw;
pi = hw->port_info;
- dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
+ dcbcfg = kzalloc(sizeof(*dcbcfg), GFP_KERNEL);
+ if (!dcbcfg)
+ return -ENOMEM;
- memset(dcbcfg, 0, sizeof(*dcbcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
- dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
dcbcfg->etscfg.tcbwtable[0] = 100;
dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
@@ -472,7 +467,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
- devm_kfree(&pf->pdev->dev, dcbcfg);
+ kfree(dcbcfg);
if (ret)
return ret;
@@ -480,13 +475,112 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
}
/**
+ * ice_dcb_tc_contig - Check that TCs are contiguous
+ * @prio_table: pointer to priority table
+ *
+ * Check if TCs begin with TC0 and are contiguous
+ */
+static bool ice_dcb_tc_contig(u8 *prio_table)
+{
+ u8 max_tc = 0;
+ int i;
+
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
+ u8 cur_tc = prio_table[i];
+
+ if (cur_tc > max_tc)
+ return false;
+ else if (cur_tc == max_tc)
+ max_tc++;
+ }
+
+ return true;
+}
+
+/**
+ * ice_dcb_noncontig_cfg - Configure DCB for non-contiguous TCs
+ * @pf: pointer to the PF struct
+ *
+ * If non-contiguous TCs, then configure SW DCB with TC0 and ETS non-willing
+ */
+static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret;
+
+ /* Configure SW DCB default with ETS non-willing */
+ ret = ice_dcb_sw_dflt_cfg(pf, false, true);
+ if (ret) {
+ dev_err(dev, "Failed to set local DCB config %d\n", ret);
+ return ret;
+ }
+
+ /* Reconfigure with ETS willing so that FW will send LLDP MIB event */
+ dcbcfg->etscfg.willing = 1;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret)
+ dev_err(dev, "Failed to set DCB to unwilling\n");
+
+ return ret;
+}
+
+/**
+ * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+ * @pf: pointer to the PF struct
+ *
+ * Assumed caller has already disabled all VSIs before
+ * calling this function. Reconfiguring DCB based on
+ * local_dcbx_cfg.
+ */
+void ice_pf_dcb_recfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ u8 tc_map = 0;
+ int v, ret;
+
+ /* Update each VSI */
+ ice_for_each_vsi(pf, v) {
+ struct ice_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type == ICE_VSI_PF) {
+ tc_map = ice_dcb_get_ena_tc(dcbcfg);
+
+ /* If DCBX request non-contiguous TC, then configure
+ * default TC
+ */
+ if (!ice_dcb_tc_contig(dcbcfg->etscfg.prio_table)) {
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+ ice_dcb_noncontig_cfg(pf);
+ }
+ } else {
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+ }
+
+ ret = ice_vsi_cfg_tc(vsi, tc_map);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n",
+ vsi->idx);
+ continue;
+ }
+
+ ice_vsi_map_rings_to_vectors(vsi);
+ if (vsi->type == ICE_VSI_PF)
+ ice_dcbnl_set_all(vsi);
+ }
+}
+
+/**
* ice_init_pf_dcb - initialize DCB for a PF
* @pf: PF to initialize DCB for
* @locked: Was function called with RTNL held
*/
int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw;
int err;
@@ -495,26 +589,39 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
err = ice_init_dcb(hw, false);
if (err && !port_info->is_sw_lldp) {
- dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err);
+ dev_err(dev, "Error initializing DCB %d\n", err);
goto dcb_init_err;
}
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
+ struct ice_vsi *pf_vsi;
+
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
- dev_info(&pf->pdev->dev,
- "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
+ dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
- err = ice_dcb_sw_dflt_cfg(pf, locked);
+ err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Failed to set local DCB config %d\n", err);
err = -EIO;
goto dcb_init_err;
}
+ /* If the FW DCBX engine is not running then Rx LLDP packets
+ * need to be redirected up the stack.
+ */
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_err(dev, "Failed to set local DCB config\n");
+ err = -EIO;
+ goto dcb_init_err;
+ }
+
+ ice_cfg_sw_lldp(pf_vsi, false, true);
+
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
return 0;
}
@@ -623,10 +730,12 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event)
{
struct ice_aqc_port_ets_elem buf = { 0 };
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
+ struct ice_vsi *pf_vsi;
u8 type;
int ret;
@@ -635,8 +744,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
return;
if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
- dev_dbg(&pf->pdev->dev,
- "MIB Change Event in HOST mode\n");
+ dev_dbg(dev, "MIB Change Event in HOST mode\n");
return;
}
@@ -645,21 +753,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M);
- dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type);
+ dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
- dev_dbg(&pf->pdev->dev,
- "LLDP event mib type %s\n", type ? "remote" : "local");
+ dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
if (type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->remote_dcbx_cfg);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n");
+ dev_err(dev, "Failed to get remote DCB config\n");
return;
}
}
@@ -673,37 +780,43 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
+ dev_err(dev, "Failed to get DCB config\n");
return;
}
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
- dev_dbg(&pf->pdev->dev,
- "No change detected in DCBX configuration.\n");
+ dev_dbg(dev, "No change detected in DCBX configuration.\n");
return;
}
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
+ ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
return;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
- dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
- dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi) {
+ dev_dbg(dev, "PF VSI doesn't exist\n");
+ return;
+ }
+
rtnl_lock();
- ice_pf_dis_all_vsi(pf, true);
+ ice_dis_vsi(pf_vsi, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
- dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ dev_err(dev, "Query Port ETS failed\n");
rtnl_unlock();
return;
}
@@ -711,6 +824,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_pf_ena_all_vsi(pf, true);
+ ice_ena_vsi(pf_vsi, true);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 661a6f7bca64..f15e5776f287 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -5,14 +5,22 @@
#define _ICE_DCB_LIB_H_
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#ifdef CONFIG_DCB
-#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+#define ICE_DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
+#define ICE_DCB_NO_HW_CHG 1 /* DCB configuration did not change */
+#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
+int
+ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -41,10 +49,25 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1;
}
+static inline u8
+ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,
+ int __always_unused queue_index)
+{
+ return 0;
+}
+
static inline int
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{
- dev_dbg(&pf->pdev->dev, "DCB not supported\n");
+ dev_dbg(ice_pf_to_dev(pf), "DCB not supported\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
+ struct ice_dcbx_cfg __always_unused *new_cfg,
+ bool __always_unused locked)
+{
return -EOPNOTSUPP;
}
@@ -56,6 +79,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
}
#define ice_update_dcb_stats(pf) do {} while (0)
+#define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
new file mode 100644
index 000000000000..d870c1aedc17
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_dcb.h"
+#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
+#include <net/dcbnl.h>
+
+#define ICE_APP_PROT_ID_ROCE 0x8915
+
+/**
+ * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
+ * @netdev: device associated with interface that needs reset
+ */
+static void ice_dcbnl_devreset(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ while (ice_is_reset_in_progress(pf->state))
+ usleep_range(1000, 2000);
+
+ set_bit(__ICE_DCBNL_DEVRESET, pf->state);
+ dev_close(netdev);
+ netdev_state_change(netdev);
+ dev_open(netdev, NULL);
+ netdev_state_change(netdev);
+ clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
+}
+
+/**
+ * ice_dcbnl_getets - retrieve local ETS configuration
+ * @netdev: the relevant netdev
+ * @ets: struct to hold ETS configuration
+ */
+static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
+{
+ struct ice_dcbx_cfg *dcbxcfg;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+
+ pf = ice_netdev_to_pf(netdev);
+ pi = pf->hw.port_info;
+ dcbxcfg = &pi->local_dcbx_cfg;
+
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_setets - set IEEE ETS configuration
+ * @netdev: pointer to relevant netdev
+ * @ets: struct to hold ETS configuration
+ */
+static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int bwcfg = 0, bwrec = 0;
+ int err, i, max_tc = 0;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg->etscfg.willing = ets->willing;
+ new_cfg->etscfg.cbs = ets->cbs;
+ ice_for_each_traffic_class(i) {
+ new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
+ bwcfg += ets->tc_tx_bw[i];
+ new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+ new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
+ bwrec += ets->tc_reco_bw[i];
+ new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
+
+ /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
+ * for the TC's index number. Add one to value if not zero, and
+ * for zero set it to the FW's default value
+ */
+ if (max_tc)
+ max_tc++;
+ else
+ max_tc = IEEE_8021QAZ_MAX_TCS;
+
+ new_cfg->etscfg.maxtcs = max_tc;
+
+ if (!bwcfg)
+ new_cfg->etscfg.tcbwtable[0] = 100;
+
+ if (!bwrec)
+ new_cfg->etsrec.tcbwtable[0] = 100;
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (err == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (err == ICE_DCB_NO_HW_CHG)
+ err = ICE_DCB_HW_CHG_RST;
+
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+}
+
+/**
+ * ice_dcbnl_getnumtcs - Get max number of traffic classes supported
+ * @dev: pointer to netdev struct
+ * @tcid: TC ID
+ * @num: total number of TCs supported by the adapter
+ *
+ * Return the total number of TCs supported
+ */
+static int
+ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return -EINVAL;
+
+ *num = IEEE_8021QAZ_MAX_TCS;
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getdcbx - retrieve current DCBX capability
+ * @netdev: pointer to the netdev struct
+ */
+static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * ice_dcbnl_setdcbx - set required DCBX capability
+ * @netdev: the corresponding netdev
+ * @mode: required mode
+ */
+static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return ICE_DCB_NO_HW_CHG;
+
+ /* Already set to the given mode no change */
+ if (mode == pf->dcbx_cap)
+ return ICE_DCB_NO_HW_CHG;
+
+ pf->dcbx_cap = mode;
+ if (mode & DCB_CAP_DCBX_VER_CEE)
+ pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
+ else
+ pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+
+ dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
+ return ICE_DCB_HW_CHG_RST;
+}
+
+/**
+ * ice_dcbnl_get_perm_hw_addr - MAC address used by DCBX
+ * @netdev: pointer to netdev struct
+ * @perm_addr: buffer to return permanent MAC address
+ */
+static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < netdev->addr_len; i++)
+ perm_addr[i] = pi->mac.perm_addr[i];
+
+ for (j = 0; j < netdev->addr_len; j++, i++)
+ perm_addr[i] = pi->mac.perm_addr[j];
+}
+
+/**
+ * ice_get_pfc_delay - Retrieve PFC Link Delay
+ * @hw: pointer to HW struct
+ * @delay: holds the PFC Link Delay value
+ */
+static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, PRTDCB_GENC);
+ *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
+}
+
+/**
+ * ice_dcbnl_getpfc - retrieve local IEEE PFC config
+ * @netdev: pointer to netdev struct
+ * @pfc: struct to hold PFC info
+ */
+static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+ struct ice_dcbx_cfg *dcbxcfg;
+ int i;
+
+ dcbxcfg = &pi->local_dcbx_cfg;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcena;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ ice_get_pfc_delay(&pf->hw, &pfc->delay);
+
+ ice_for_each_traffic_class(i) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_setpfc - set local IEEE PFC config
+ * @netdev: pointer to relevant netdev
+ * @pfc: pointer to struct holding PFC config
+ */
+static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int err;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ if (pfc->pfc_cap)
+ new_cfg->pfc.pfccap = pfc->pfc_cap;
+ else
+ new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+
+ new_cfg->pfc.pfcena = pfc->pfc_en;
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+ if (err == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (err == ICE_DCB_NO_HW_CHG)
+ err = ICE_DCB_HW_CHG_RST;
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+}
+
+/**
+ * ice_dcbnl_get_pfc_cfg - Get CEE PFC config
+ * @netdev: pointer to netdev struct
+ * @prio: corresponding user priority
+ * @setting: the PFC setting for given priority
+ */
+static void
+ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
+ dev_dbg(ice_pf_to_dev(pf),
+ "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+ prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
+}
+
+/**
+ * ice_dcbnl_set_pfc_cfg - Set CEE PFC config
+ * @netdev: the corresponding netdev
+ * @prio: User Priority
+ * @set: PFC setting to apply
+ */
+static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+ if (set)
+ new_cfg->pfc.pfcena |= BIT(prio);
+ else
+ new_cfg->pfc.pfcena &= ~BIT(prio);
+
+ dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
+ prio, set, new_cfg->pfc.pfcena);
+}
+
+/**
+ * ice_dcbnl_getpfcstate - get CEE PFC mode
+ * @netdev: pointer to netdev struct
+ */
+static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ /* Return enabled if any UP enabled for PFC */
+ if (pi->local_dcbx_cfg.pfc.pfcena)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getstate - get DCB enabled state
+ * @netdev: pointer to netdev struct
+ */
+static u8 ice_dcbnl_getstate(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u8 state = 0;
+
+ state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+
+ dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
+ return state;
+}
+
+/**
+ * ice_dcbnl_setstate - Set CEE DCB state
+ * @netdev: pointer to relevant netdev
+ * @state: state value to set
+ */
+static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return ICE_DCB_NO_HW_CHG;
+
+ /* Nothing to do */
+ if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return ICE_DCB_NO_HW_CHG;
+
+ if (state) {
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ memcpy(&pf->hw.port_info->desired_dcbx_cfg,
+ &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(struct ice_dcbx_cfg));
+ } else {
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ }
+
+ return ICE_DCB_HW_CHG;
+}
+
+/**
+ * ice_dcbnl_get_pg_tc_cfg_tx - get CEE PG Tx config
+ * @netdev: pointer to netdev struct
+ * @prio: the corresponding user priority
+ * @prio_type: traffic priority type
+ * @pgid: the BW group ID the traffic class belongs to
+ * @bw_pct: BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type, u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ dev_dbg(ice_pf_to_dev(pf),
+ "Get PG config prio=%d tc=%d\n", prio, *pgid);
+}
+
+/**
+ * ice_dcbnl_set_pg_tc_cfg_tx - set CEE PG Tx config
+ * @netdev: pointer to relevant netdev
+ * @tc: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @bwg_id: the BW group ID the TC belongs to
+ * @bw_pct: the BW perventage for the BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 __always_unused prio_type,
+ u8 __always_unused bwg_id,
+ u8 __always_unused bw_pct, u8 up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int i;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ /* prio_type, bwg_id and bw_pct per UP are not supported */
+
+ ice_for_each_traffic_class(i) {
+ if (up_map & BIT(i))
+ new_cfg->etscfg.prio_table[i] = tc;
+ }
+ new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS;
+}
+
+/**
+ * ice_dcbnl_get_pg_bwg_cfg_tx - Get CEE PGBW config
+ * @netdev: pointer to the netdev struct
+ * @pgid: corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
+ dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
+ pgid, *bw_pct);
+}
+
+/**
+ * ice_dcbnl_set_pg_bwg_cfg_tx - set CEE PG Tx BW config
+ * @netdev: the corresponding netdev
+ * @pgid: Correspongind traffic class
+ * @bw_pct: the BW percentage for the specified TC
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ return;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
+}
+
+/**
+ * ice_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
+ * @netdev: pointer to netdev struct
+ * @prio: the corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type, u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_port_info *pi = pf->hw.port_info;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ if (prio >= ICE_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+}
+
+/**
+ * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
+ * @netdev: pointer to netdev struct
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ */
+static void
+ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+ u8 *bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return;
+
+ *bw_pct = 0;
+}
+
+/**
+ * ice_dcbnl_get_cap - Get DCBX capabilities of adapter
+ * @netdev: pointer to netdev struct
+ * @capid: the capability type
+ * @cap: the capability value
+ */
+static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
+ return ICE_DCB_NO_HW_CHG;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = pf->dcbx_cap;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
+ capid, *cap);
+ return 0;
+}
+
+/**
+ * ice_dcbnl_getapp - get CEE APP
+ * @netdev: pointer to netdev struct
+ * @idtype: the App selector
+ * @id: the App ethtype or port number
+ */
+static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+/**
+ * ice_dcbnl_find_app - Search for APP in given DCB config
+ * @cfg: struct to hold DCBX config
+ * @app: struct to hold app data to look for
+ */
+static bool
+ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
+ struct ice_dcb_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->prot_id == cfg->app[i].prot_id &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_dcbnl_setapp - set local IEEE App config
+ * @netdev: relevant netdev struct
+ * @app: struct to hold app config info
+ */
+static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcb_app_priority_table new_app;
+ struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ int ret;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ goto setapp_out;
+
+ new_app.selector = app->selector;
+ new_app.prot_id = app->protocol;
+ new_app.priority = app->priority;
+ if (ice_dcbnl_find_app(old_cfg, &new_app)) {
+ ret = 0;
+ goto setapp_out;
+ }
+
+ new_cfg->app[new_cfg->numapps++] = new_app;
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (ret == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (ret == ICE_DCB_NO_HW_CHG)
+ ret = ICE_DCB_HW_CHG_RST;
+
+setapp_out:
+ mutex_unlock(&pf->tc_mutex);
+ return ret;
+}
+
+/**
+ * ice_dcbnl_delapp - Delete local IEEE App config
+ * @netdev: relevant netdev
+ * @app: struct to hold app too delete
+ *
+ * Will not delete first application required by the FW
+ */
+static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ int i, j, ret = 0;
+
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return -EINVAL;
+
+ mutex_lock(&pf->tc_mutex);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ goto delapp_out;
+
+ old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ if (old_cfg->numapps == 1)
+ goto delapp_out;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ for (i = 1; i < new_cfg->numapps; i++) {
+ if (app->selector == new_cfg->app[i].selector &&
+ app->protocol == new_cfg->app[i].prot_id &&
+ app->priority == new_cfg->app[i].priority) {
+ new_cfg->app[i].selector = 0;
+ new_cfg->app[i].prot_id = 0;
+ new_cfg->app[i].priority = 0;
+ break;
+ }
+ }
+
+ /* Did not find DCB App */
+ if (i == new_cfg->numapps) {
+ ret = -EINVAL;
+ goto delapp_out;
+ }
+
+ new_cfg->numapps--;
+
+ for (j = i; j < new_cfg->numapps; j++) {
+ new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ }
+
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ /* return of zero indicates new cfg applied */
+ if (ret == ICE_DCB_HW_CHG_RST)
+ ice_dcbnl_devreset(netdev);
+ if (ret == ICE_DCB_NO_HW_CHG)
+ ret = ICE_DCB_HW_CHG_RST;
+
+delapp_out:
+ mutex_unlock(&pf->tc_mutex);
+ return ret;
+}
+
+/**
+ * ice_dcbnl_cee_set_all - Commit CEE DCB settings to HW
+ * @netdev: the corresponding netdev
+ */
+static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *new_cfg;
+ int err;
+
+ if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
+ !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return ICE_DCB_NO_HW_CHG;
+
+ new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
+ mutex_lock(&pf->tc_mutex);
+
+ err = ice_pf_dcb_cfg(pf, new_cfg, true);
+
+ mutex_unlock(&pf->tc_mutex);
+ return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err;
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = ice_dcbnl_getets,
+ .ieee_setets = ice_dcbnl_setets,
+ .ieee_getpfc = ice_dcbnl_getpfc,
+ .ieee_setpfc = ice_dcbnl_setpfc,
+ .ieee_setapp = ice_dcbnl_setapp,
+ .ieee_delapp = ice_dcbnl_delapp,
+
+ /* CEE std */
+ .getstate = ice_dcbnl_getstate,
+ .setstate = ice_dcbnl_setstate,
+ .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+ .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = ice_dcbnl_set_pfc_cfg,
+ .getpfccfg = ice_dcbnl_get_pfc_cfg,
+ .setall = ice_dcbnl_cee_set_all,
+ .getcap = ice_dcbnl_get_cap,
+ .getnumtcs = ice_dcbnl_getnumtcs,
+ .getpfcstate = ice_dcbnl_getpfcstate,
+ .getapp = ice_dcbnl_getapp,
+
+ /* DCBX configuration */
+ .getdcbx = ice_dcbnl_getdcbx,
+ .setdcbx = ice_dcbnl_setdcbx,
+};
+
+/**
+ * ice_dcbnl_set_all - set all the apps and ieee data from DCBX config
+ * @vsi: pointer to VSI struct
+ */
+void ice_dcbnl_set_all(struct ice_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_dcbx_cfg *dcbxcfg;
+ struct ice_port_info *pi;
+ struct dcb_app sapp;
+ struct ice_pf *pf;
+ int i;
+
+ if (!netdev)
+ return;
+
+ pf = ice_netdev_to_pf(netdev);
+ pi = pf->hw.port_info;
+
+ /* SW DCB taken care of by SW Default Config */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return;
+
+ /* DCB not enabled */
+ if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return;
+
+ dcbxcfg = &pi->local_dcbx_cfg;
+
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ u8 prio, tc_map;
+
+ prio = dcbxcfg->app[i].priority;
+ tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_cfg.ena_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].prot_id;
+ sapp.priority = prio;
+ dcb_ieee_setapp(netdev, &sapp);
+ }
+ }
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * ice_dcbnl_vsi_del_app - Delete APP on all VSIs
+ * @vsi: pointer to the main VSI
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ */
+static void
+ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
+ struct ice_dcb_app_priority_table *app)
+{
+ struct dcb_app sapp;
+ int err;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->prot_id;
+ sapp.priority = app->priority;
+ err = ice_dcbnl_delapp(vsi->netdev, &sapp);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ vsi->idx, err, app->selector, app->prot_id, app->priority);
+}
+
+/**
+ * ice_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPS that are not present in the passed
+ * DCB configuration
+ */
+void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
+ int i;
+
+ if (!main_vsi)
+ return;
+
+ for (i = 0; i < old_cfg->numapps; i++) {
+ struct ice_dcb_app_priority_table app = old_cfg->app[i];
+
+ /* The APP is not available anymore delete it */
+ if (!ice_dcbnl_find_app(new_cfg, &app))
+ ice_dcbnl_vsi_del_app(main_vsi, &app);
+ }
+}
+
+/**
+ * ice_dcbnl_setup - setup DCBNL
+ * @vsi: VSI to get associated netdev from
+ */
+void ice_dcbnl_setup(struct ice_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf;
+
+ pf = ice_netdev_to_pf(netdev);
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return;
+
+ netdev->dcbnl_ops = &dcbnl_ops;
+ ice_dcbnl_set_all(vsi);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
new file mode 100644
index 000000000000..6c630a362293
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_NL_H_
+#define _ICE_DCB_NL_H_
+
+#ifdef CONFIG_DCB
+void ice_dcbnl_setup(struct ice_vsi *vsi);
+void ice_dcbnl_set_all(struct ice_vsi *vsi);
+void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg);
+#else
+#define ice_dcbnl_setup(vsi) do {} while (0)
+#define ice_dcbnl_set_all(vsi) do {} while (0)
+#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0)
+#endif /* CONFIG_DCB */
+
+#endif /* _ICE_DCB_NL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7e23034df955..aec3c6c379df 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -156,6 +156,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -247,7 +248,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
int ret = 0;
u16 *buf;
- dev = &pf->pdev->dev;
+ dev = ice_pf_to_dev(pf);
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -342,6 +343,7 @@ static u64 ice_eeprom_test(struct net_device *netdev)
static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
{
struct ice_pf *pf = (struct ice_pf *)hw->back;
+ struct device *dev = ice_pf_to_dev(pf);
static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5,
0x00000000, 0xFFFFFFFF
@@ -357,7 +359,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
val = rd32(hw, reg);
if (val == pattern)
continue;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
, __func__, reg, pattern, val);
return 1;
@@ -366,7 +368,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
wr32(hw, reg, orig_val);
val = rd32(hw, reg);
if (val != orig_val) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
, __func__, reg, orig_val, val);
return 1;
@@ -506,7 +508,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -623,7 +625,7 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
continue;
rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page);
+ received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -648,9 +650,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
LIST_HEAD(tmp_list);
+ struct device *dev;
u8 *tx_frame;
int i;
+ dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -711,12 +715,12 @@ static u64 ice_loopback_test(struct net_device *netdev)
ret = 10;
lbtest_free_frame:
- devm_kfree(&pf->pdev->dev, tx_frame);
+ devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
netdev_err(netdev, "Could not remove MAC filter for the test VSI");
free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_list);
+ ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
@@ -773,6 +777,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
struct ice_netdev_priv *np = netdev_priv(netdev);
bool if_running = netif_running(netdev);
struct ice_pf *pf = np->vsi->back;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
netdev_info(netdev, "offline testing starting\n");
@@ -780,7 +787,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
set_bit(__ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) {
- dev_warn(&pf->pdev->dev,
+ dev_warn(dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[ICE_ETH_TEST_REG] = 1;
data[ICE_ETH_TEST_EEPROM] = 1;
@@ -815,8 +822,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev);
if (status) {
- dev_err(&pf->pdev->dev,
- "Could not open device %s, err %d",
+ dev_err(dev, "Could not open device %s, err %d",
pf->int_name, status);
}
}
@@ -961,7 +967,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
}
/* Get last SW configuration */
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
@@ -1006,7 +1012,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
}
done:
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
return err;
}
@@ -1082,7 +1088,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
break;
}
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
@@ -1109,7 +1115,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fecparam->fec |= ETHTOOL_FEC_OFF;
done:
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
return err;
}
@@ -1154,12 +1160,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct device *dev;
int ret = 0;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
+ dev = ice_pf_to_dev(pf);
set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
@@ -1188,7 +1196,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* events to respond to.
*/
if (status)
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate
@@ -1196,20 +1204,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/
status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
if (status)
- dev_warn(&pf->pdev->dev,
- "Fail to stop LLDP agent\n");
+ dev_warn(dev, "Fail to stop LLDP agent\n");
/* Use case for having the FW LLDP agent stopped
* will likely not need DCB, so failure to init is
* not a concern of ethtool
*/
status = ice_init_pf_dcb(pf, true);
if (status)
- dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
-
- /* Forward LLDP packets to default VSI so that they
- * are passed up the stack
- */
- ice_cfg_sw_lldp(vsi, false, true);
+ dev_warn(dev, "Fail to init DCB\n");
} else {
enum ice_status status;
bool dcbx_agent_status;
@@ -1219,8 +1221,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/
status = ice_aq_start_lldp(&pf->hw, true, NULL);
if (status)
- dev_warn(&pf->pdev->dev,
- "Fail to start LLDP Agent\n");
+ dev_warn(dev, "Fail to start LLDP Agent\n");
/* AQ command to start FW DCBX agent will fail if
* the agent is already started
@@ -1229,10 +1230,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
&dcbx_agent_status,
NULL);
if (status)
- dev_dbg(&pf->pdev->dev,
- "Failed to start FW DCBX\n");
+ dev_dbg(dev, "Failed to start FW DCBX\n");
- dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
+ dev_info(dev, "FW DCBX agent is %s\n",
dcbx_agent_status ? "ACTIVE" : "DISABLED");
/* Failure to configure MIB change or init DCB is not
@@ -1242,7 +1242,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/
status = ice_init_pf_dcb(pf, true);
if (status)
- dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+ dev_dbg(dev, "Fail to init DCB\n");
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
@@ -1252,10 +1252,15 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Fail to enable MIB change events\n");
}
}
+ if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
+ /* down and up VSI so that changes of Rx cfg are reflected. */
+ ice_down(vsi);
+ ice_up(vsi);
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2140,7 +2145,7 @@ ice_get_link_ksettings(struct net_device *netdev,
/* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
@@ -2198,7 +2203,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
done:
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
return err;
}
@@ -2427,8 +2432,7 @@ ice_set_link_ksettings(struct net_device *netdev,
usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
}
- abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities),
- GFP_KERNEL);
+ abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
if (!abilities)
return -ENOMEM;
@@ -2520,7 +2524,7 @@ ice_set_link_ksettings(struct net_device *netdev,
}
done:
- devm_kfree(&pf->pdev->dev, abilities);
+ kfree(abilities);
clear_bit(__ICE_CFG_BUSY, pf->state);
return err;
@@ -2577,6 +2581,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *xdp_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2611,6 +2616,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
return 0;
}
+ /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ * disallow changing the number of descriptors -- regardless
+ * if the netdev is running or not.
+ */
+ if (ice_xsk_any_rx_ring_ena(vsi))
+ return -EBUSY;
+
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
@@ -2624,6 +2636,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rx_rings[i]->count = new_rx_cnt;
+ if (ice_is_xdp_ena_vsi(vsi))
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->count = new_tx_cnt;
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2635,14 +2652,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt);
- tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
- sizeof(*tx_rings), GFP_KERNEL);
+ tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
@@ -2650,15 +2666,42 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
tx_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
- while (i) {
- i--;
+ while (i--)
ice_clean_tx_ring(&tx_rings[i]);
- }
- devm_kfree(&pf->pdev->dev, tx_rings);
+ kfree(tx_rings);
goto done;
}
}
+ if (!ice_is_xdp_ena_vsi(vsi))
+ goto process_rx;
+
+ /* alloc updated XDP resources */
+ netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
+ vsi->xdp_rings[0]->count, new_tx_cnt);
+
+ xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL);
+ if (!xdp_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ /* clone ring and setup updated count */
+ xdp_rings[i] = *vsi->xdp_rings[i];
+ xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].desc = NULL;
+ xdp_rings[i].tx_buf = NULL;
+ err = ice_setup_tx_ring(&xdp_rings[i]);
+ if (err) {
+ while (i--)
+ ice_clean_tx_ring(&xdp_rings[i]);
+ kfree(xdp_rings);
+ goto free_tx;
+ }
+ ice_set_ring_xdp(&xdp_rings[i]);
+ }
+
process_rx:
if (new_rx_cnt == vsi->rx_rings[0]->count)
goto process_link;
@@ -2667,14 +2710,13 @@ process_rx:
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt);
- rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
- sizeof(*rx_rings), GFP_KERNEL);
+ rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
@@ -2698,7 +2740,7 @@ rx_unwind:
i--;
ice_free_rx_ring(&rx_rings[i]);
}
- devm_kfree(&pf->pdev->dev, rx_rings);
+ kfree(rx_rings);
err = -ENOMEM;
goto free_tx;
}
@@ -2712,15 +2754,15 @@ process_link:
ice_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
- devm_kfree(&pf->pdev->dev, tx_rings);
+ kfree(tx_rings);
}
if (rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
@@ -2734,9 +2776,19 @@ process_link:
rx_rings[i].next_to_alloc = 0;
*vsi->rx_rings[i] = rx_rings[i];
}
- devm_kfree(&pf->pdev->dev, rx_rings);
+ kfree(rx_rings);
+ }
+
+ if (xdp_rings) {
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ *vsi->xdp_rings[i] = xdp_rings[i];
+ }
+ kfree(xdp_rings);
}
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
ice_up(vsi);
}
goto done;
@@ -2744,9 +2796,9 @@ process_link:
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_txq(vsi, i)
ice_free_tx_ring(&tx_rings[i]);
- devm_kfree(&pf->pdev->dev, tx_rings);
+ kfree(tx_rings);
}
done:
@@ -2794,7 +2846,6 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_vsi *vsi = np->vsi;
struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status status;
@@ -2804,8 +2855,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
dcbx_cfg = &pi->local_dcbx_cfg;
- pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return;
@@ -2828,7 +2878,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = 1;
out:
- devm_kfree(&vsi->back->pdev->dev, pcaps);
+ kfree(pcaps);
}
/**
@@ -3009,7 +3059,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
return -EIO;
}
- lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -3022,7 +3072,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
indir[i] = (u32)(lut[i]);
out:
- devm_kfree(&pf->pdev->dev, lut);
+ kfree(lut);
return ret;
}
@@ -3043,8 +3093,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct device *dev;
u8 *seed = NULL;
+ dev = ice_pf_to_dev(pf);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -3057,8 +3109,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
- devm_kzalloc(&pf->pdev->dev,
- ICE_VSIQF_HKEY_ARRAY_SIZE,
+ devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
@@ -3068,8 +3119,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
- vsi->rss_table_size,
+ vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
GFP_KERNEL);
if (!vsi->rss_lut_user)
return -ENOMEM;
@@ -3092,6 +3142,188 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return 0;
}
+/**
+ * ice_get_max_txq - return the maximum number of Tx queues for in a PF
+ * @pf: PF structure
+ */
+static int ice_get_max_txq(struct ice_pf *pf)
+{
+ return min_t(int, num_online_cpus(),
+ pf->hw.func_caps.common_cap.num_txq);
+}
+
+/**
+ * ice_get_max_rxq - return the maximum number of Rx queues for in a PF
+ * @pf: PF structure
+ */
+static int ice_get_max_rxq(struct ice_pf *pf)
+{
+ return min_t(int, num_online_cpus(),
+ pf->hw.func_caps.common_cap.num_rxq);
+}
+
+/**
+ * ice_get_combined_cnt - return the current number of combined channels
+ * @vsi: PF VSI pointer
+ *
+ * Go through all queue vectors and count ones that have both Rx and Tx ring
+ * attached
+ */
+static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
+{
+ u32 combined = 0;
+ int q_idx;
+
+ ice_for_each_q_vector(vsi, q_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+ combined++;
+ }
+
+ return combined;
+}
+
+/**
+ * ice_get_channels - get the current and max supported channels
+ * @dev: network interface device structure
+ * @ch: ethtool channel data structure
+ */
+static void
+ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ /* check to see if VSI is active */
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ /* report maximum channels */
+ ch->max_rx = ice_get_max_rxq(pf);
+ ch->max_tx = ice_get_max_txq(pf);
+ ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
+
+ /* report current channels */
+ ch->combined_count = ice_get_combined_cnt(vsi);
+ ch->rx_count = vsi->num_rxq - ch->combined_count;
+ ch->tx_count = vsi->num_txq - ch->combined_count;
+}
+
+/**
+ * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
+ * @vsi: VSI to reconfigure RSS LUT on
+ * @req_rss_size: requested range of queue numbers for hashing
+ *
+ * Set the VSI's RSS parameters, configure the RSS LUT based on these.
+ */
+static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_hw *hw;
+ int err = 0;
+ u8 *lut;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+
+ if (!req_rss_size)
+ return -EINVAL;
+
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* set RSS LUT parameters */
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ vsi->rss_size = 1;
+ } else {
+ struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
+
+ vsi->rss_size = min_t(int, req_rss_size,
+ BIT(caps->rss_table_entry_width));
+ }
+
+ /* create/set RSS LUT */
+ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+ status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
+ vsi->rss_table_size);
+ if (status) {
+ dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
+ status, hw->adminq.rq_last_status);
+ err = -EIO;
+ }
+
+ kfree(lut);
+ return err;
+}
+
+/**
+ * ice_set_channels - set the number channels
+ * @dev: network interface device structure
+ * @ch: ethtool channel data structure
+ */
+static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ int new_rx = 0, new_tx = 0;
+ u32 curr_combined;
+
+ /* do not support changing channels in Safe Mode */
+ if (ice_is_safe_mode(pf)) {
+ netdev_err(dev, "Changing channel in Safe Mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ /* do not support changing other_count */
+ if (ch->other_count)
+ return -EINVAL;
+
+ curr_combined = ice_get_combined_cnt(vsi);
+
+ /* these checks are for cases where user didn't specify a particular
+ * value on cmd line but we get non-zero value anyway via
+ * get_channels(); look at ethtool.c in ethtool repository (the user
+ * space part), particularly, do_schannels() routine
+ */
+ if (ch->rx_count == vsi->num_rxq - curr_combined)
+ ch->rx_count = 0;
+ if (ch->tx_count == vsi->num_txq - curr_combined)
+ ch->tx_count = 0;
+ if (ch->combined_count == curr_combined)
+ ch->combined_count = 0;
+
+ if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
+ netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
+ return -EINVAL;
+ }
+
+ new_rx = ch->combined_count + ch->rx_count;
+ new_tx = ch->combined_count + ch->tx_count;
+
+ if (new_rx > ice_get_max_rxq(pf)) {
+ netdev_err(dev, "Maximum allowed Rx channels is %d\n",
+ ice_get_max_rxq(pf));
+ return -EINVAL;
+ }
+ if (new_tx > ice_get_max_txq(pf)) {
+ netdev_err(dev, "Maximum allowed Tx channels is %d\n",
+ ice_get_max_txq(pf));
+ return -EINVAL;
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+
+ if (new_rx && !netif_is_rxfh_configured(dev))
+ return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+
+ return 0;
+}
+
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
@@ -3131,7 +3363,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
break;
default:
- dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
return -EINVAL;
}
@@ -3271,7 +3503,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
- dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
+ c_type);
return -EINVAL;
}
@@ -3368,10 +3601,17 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
- int i;
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ /* In some cases if DCB is configured the num_[rx|tx]q
+ * can be less than vsi->num_q_vectors. This check
+ * accounts for that so we don't report a false failure
+ */
+ if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
+ goto set_complete;
- ice_for_each_q_vector(vsi, i) {
- if (ice_set_q_coalesce(vsi, ec, i))
+ if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
}
goto set_complete;
@@ -3398,6 +3638,151 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
+#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
+#define ICE_MODULE_TYPE_SFP 0x03
+#define ICE_MODULE_TYPE_QSFP_PLUS 0x0D
+#define ICE_MODULE_TYPE_QSFP28 0x11
+#define ICE_MODULE_SFF_ADDR_MODE 0x04
+#define ICE_MODULE_SFF_DIAG_CAPAB 0x40
+#define ICE_MODULE_REVISION_ADDR 0x01
+#define ICE_MODULE_SFF_8472_COMP 0x5E
+#define ICE_MODULE_SFF_8472_SWAP 0x5C
+#define ICE_MODULE_QSFP_MAX_LEN 640
+
+/**
+ * ice_get_module_info - get SFF module type and revision information
+ * @netdev: network interface device structure
+ * @modinfo: module EEPROM size and layout information structure
+ */
+static int
+ice_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 sff8472_comp = 0;
+ u8 sff8472_swap = 0;
+ u8 sff8636_rev = 0;
+ u8 value = 0;
+
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
+ 0, &value, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ switch (value) {
+ case ICE_MODULE_TYPE_SFP:
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_SFF_8472_COMP, 0x00, 0,
+ &sff8472_comp, 1, 0, NULL);
+ if (status)
+ return -EIO;
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
+ &sff8472_swap, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp &&
+ (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ }
+ break;
+ case ICE_MODULE_TYPE_QSFP_PLUS:
+ case ICE_MODULE_TYPE_QSFP28:
+ status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
+ ICE_MODULE_REVISION_ADDR, 0x00, 0,
+ &sff8636_rev, 1, 0, NULL);
+ if (status)
+ return -EIO;
+ /* Check revision compliance */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ default:
+ netdev_warn(netdev,
+ "SFF Module Type not recognized.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ice_get_module_eeprom - fill buffer with SFF EEPROM contents
+ * @netdev: network interface device structure
+ * @ee: EEPROM dump request structure
+ * @data: buffer to be filled with EEPROM contents
+ */
+static int
+ice_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ bool is_sfp = false;
+ u16 offset = 0;
+ u8 value = 0;
+ u8 page = 0;
+ int i;
+
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
+ &value, 1, 0, NULL);
+ if (status)
+ return -EIO;
+
+ if (!ee || !ee->len || !data)
+ return -EINVAL;
+
+ if (value == ICE_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < ee->len; i++) {
+ offset = i + ee->offset;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= ETH_MODULE_SFF_8079_LEN) {
+ offset -= ETH_MODULE_SFF_8079_LEN;
+ addr = ICE_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= ETH_MODULE_SFF_8436_LEN / 2;
+ page++;
+ }
+ }
+
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp,
+ &value, 1, 0, NULL);
+ if (status)
+ value = 0;
+ data[i] = value;
+ }
+ return 0;
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
@@ -3428,11 +3813,15 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_channels = ice_get_channels,
+ .set_channels = ice_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
+ .get_module_info = ice_get_module_info,
+ .get_module_eeprom = ice_get_module_eeprom,
};
static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
@@ -3451,6 +3840,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
+ .get_channels = ice_get_channels,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 152fbd556e9b..e8f32350fed2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -52,6 +52,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PRTDCB_GENC 0x00083000
+#define PRTDCB_GENC_PFCLDA_S 16
+#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 2aac8f13daeb..ad34f22d44ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -211,7 +211,7 @@ enum ice_flex_rx_mdid {
/* Rx/Tx Flag64 packet flag bits */
enum ice_flg64_bits {
ICE_FLG_PKT_DSI = 0,
- ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x8100 = 14,
ICE_FLG_EVLAN_x9100,
ICE_FLG_VLAN_x8100,
ICE_FLG_TNL_MAC = 22,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index cc755382df25..e7449248fab4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2,232 +2,26 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
- *
- * Configure the Rx descriptor ring in RLAN context.
+ * ice_vsi_type_str - maps VSI type enum to string equivalents
+ * @type: VSI type enum
*/
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+const char *ice_vsi_type_str(enum ice_vsi_type type)
{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
- u32 rxdid = ICE_RXDID_FLEX_NIC;
- struct ice_rlan_ctx rlan_ctx;
- u32 regval;
- u16 pf_q;
- int err;
-
- /* what is Rx queue number in global space of 2K Rx queues */
- pf_q = vsi->rxq_map[ring->q_index];
-
- /* clear the context structure first */
- memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
- rlan_ctx.base = ring->dma >> 7;
-
- rlan_ctx.qlen = ring->count;
-
- /* Receive Packet Data Buffer Size.
- * The Packet Data Buffer Size is defined in 128 byte units.
- */
- rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
-
- /* use 32 byte descriptors */
- rlan_ctx.dsize = 1;
-
- /* Strip the Ethernet CRC bytes before the packet is posted to host
- * memory.
- */
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
-
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
- rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
-
- /* This controls whether VLAN is stripped from inner headers
- * The VLAN in the inner L2 header is stripped to the receive
- * descriptor if enabled by this flag.
- */
- rlan_ctx.showiv = 0;
-
- /* Max packet size for this queue - must not be set to a larger value
- * than 5 x DBUF
- */
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
- /* Rx queue threshold in units of 64 */
- rlan_ctx.lrxqthresh = 1;
-
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
- }
-
- /* Absolute queue number out of 2K needs to be passed */
- err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
- if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
- pf_q, err);
- return -EIO;
- }
-
- if (vsi->type == ICE_VSI_VF)
- return 0;
-
- /* init queue specific tail register */
- ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
- writel(0, ring->tail);
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
-
- return 0;
-}
-
-/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
- *
- * Configure the Tx descriptor ring in TLAN context.
- */
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring);
-
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
-
- /* queue belongs to a specific VSI type
- * VF / VM index should be programmed per vmvf_type setting:
- * for vmvf_type = VF, it is VF number between 0-256
- * for vmvf_type = VM, it is VM number between 0-767
- * for PF or EMP this field should be set to zero
- */
- switch (vsi->type) {
- case ICE_VSI_LB:
- /* fall through */
+ switch (type) {
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
- break;
+ return "ICE_VSI_PF";
case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
- break;
+ return "ICE_VSI_VF";
+ case ICE_VSI_LB:
+ return "ICE_VSI_LB";
default:
- return;
- }
-
- /* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
-
- tlan_ctx->tso_ena = ICE_TX_LEGACY;
- tlan_ctx->tso_qnum = pf_q;
-
- /* Legacy or Advanced Host Interface:
- * 0: Advanced Host Interface
- * 1: Legacy Host Interface
- */
- tlan_ctx->legacy_int = ICE_TX_LEGACY;
-}
-
-/**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
- */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
-{
- int i;
-
- for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
- QRX_CTRL_QENA_STAT_M))
- return 0;
-
- usleep_range(20, 40);
+ return "unknown";
}
-
- return -ETIMEDOUT;
-}
-
-/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
- * @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
-{
- int pf_q = vsi->rxq_map[rxq_idx];
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int ret = 0;
- u32 rx_reg;
-
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
-
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- return 0;
-
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
-
- return ret;
}
/**
@@ -258,36 +52,39 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
/* allocate memory for both Tx and Rx ring pointers */
- vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings)
return -ENOMEM;
- vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings)
goto err_rings;
- vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
+ vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
goto err_txq_map;
- vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
sizeof(*vsi->rxq_map), GFP_KERNEL);
if (!vsi->rxq_map)
goto err_rxq_map;
-
/* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB)
return 0;
/* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
+ vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
sizeof(*vsi->q_vectors), GFP_KERNEL);
if (!vsi->q_vectors)
goto err_vectors;
@@ -295,13 +92,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0;
err_vectors:
- devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+ devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
- devm_kfree(&pf->pdev->dev, vsi->txq_map);
+ devm_kfree(dev, vsi->txq_map);
err_txq_map:
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ devm_kfree(dev, vsi->rx_rings);
err_rings:
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ devm_kfree(dev, vsi->tx_rings);
return -ENOMEM;
}
@@ -345,15 +142,24 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
case ICE_VSI_PF:
vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
num_online_cpus());
+ if (vsi->req_txq) {
+ vsi->alloc_txq = vsi->req_txq;
+ vsi->num_txq = vsi->req_txq;
+ }
pf->num_lan_tx = vsi->alloc_txq;
/* only 1 Rx queue unless RSS is enabled */
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
- else
+ } else {
vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
num_online_cpus());
+ if (vsi->req_rxq) {
+ vsi->alloc_rxq = vsi->req_rxq;
+ vsi->num_rxq = vsi->req_rxq;
+ }
+ }
pf->num_lan_rx = vsi->alloc_rxq;
@@ -375,7 +181,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
break;
}
@@ -421,7 +227,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
struct ice_vsi_ctx *ctxt;
enum ice_status status;
- ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
@@ -433,10 +239,10 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
- vsi->vsi_num);
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
+ vsi->vsi_num, status);
- devm_kfree(&pf->pdev->dev, ctxt);
+ kfree(ctxt);
}
/**
@@ -446,26 +252,29 @@ void ice_vsi_delete(struct ice_vsi *vsi)
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
/* free the ring and vector containers */
if (vsi->q_vectors) {
- devm_kfree(&pf->pdev->dev, vsi->q_vectors);
+ devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
if (vsi->tx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ devm_kfree(dev, vsi->tx_rings);
vsi->tx_rings = NULL;
}
if (vsi->rx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ devm_kfree(dev, vsi->rx_rings);
vsi->rx_rings = NULL;
}
if (vsi->txq_map) {
- devm_kfree(&pf->pdev->dev, vsi->txq_map);
+ devm_kfree(dev, vsi->txq_map);
vsi->txq_map = NULL;
}
if (vsi->rxq_map) {
- devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+ devm_kfree(dev, vsi->rxq_map);
vsi->rxq_map = NULL;
}
}
@@ -482,6 +291,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
+ struct device *dev;
if (!vsi)
return 0;
@@ -490,10 +300,10 @@ int ice_vsi_clear(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
+ dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
- dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
- vsi->idx);
+ dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
return -EINVAL;
}
@@ -506,7 +316,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
- devm_kfree(&pf->pdev->dev, vsi);
+ devm_kfree(dev, vsi);
return 0;
}
@@ -539,6 +349,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
/* Need to protect the allocation of the VSIs at the PF level */
@@ -549,11 +360,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
* is available to be populated
*/
if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
+ dev_dbg(dev, "out of VSI slots!\n");
goto unlock_pf;
}
- vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
+ vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
if (!vsi)
goto unlock_pf;
@@ -585,7 +396,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
goto err_rings;
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
}
@@ -598,7 +409,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
goto unlock_pf;
err_rings:
- devm_kfree(&pf->pdev->dev, vsi);
+ devm_kfree(dev, vsi);
vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
@@ -606,88 +417,6 @@ unlock_pf:
}
/**
- * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
-{
- int offset, i;
-
- mutex_lock(qs_cfg->qs_mutex);
- offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
- 0, qs_cfg->q_count, 0);
- if (offset >= qs_cfg->pf_map_size) {
- mutex_unlock(qs_cfg->qs_mutex);
- return -ENOMEM;
- }
-
- bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
- for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-}
-
-/**
- * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
-{
- int i, index = 0;
-
- mutex_lock(qs_cfg->qs_mutex);
- for (i = 0; i < qs_cfg->q_count; i++) {
- index = find_next_zero_bit(qs_cfg->pf_map,
- qs_cfg->pf_map_size, index);
- if (index >= qs_cfg->pf_map_size)
- goto err_scatter;
- set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-err_scatter:
- for (index = 0; index < i; index++) {
- clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
- qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return -ENOMEM;
-}
-
-/**
- * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * This function first tries to find contiguous space. If it is not successful,
- * it tries with the scatter approach.
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
-{
- int ret = 0;
-
- ret = __ice_vsi_get_qs_contig(qs_cfg);
- if (ret) {
- /* contig failed, so try with scatter approach */
- qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
- qs_cfg->scatter_count);
- ret = __ice_vsi_get_qs_sc(qs_cfg);
- }
- return ret;
-}
-
-/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -769,14 +498,15 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
static void ice_rss_clean(struct ice_vsi *vsi)
{
- struct ice_pf *pf;
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
- pf = vsi->back;
+ dev = ice_pf_to_dev(pf);
if (vsi->rss_hkey_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
+ devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
+ devm_kfree(dev, vsi->rss_lut_user);
}
/**
@@ -814,7 +544,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_LB:
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
vsi->type);
break;
}
@@ -918,7 +648,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
else
max_rss = ICE_MAX_SMALL_RSS_QS;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
- qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
+ if (!vsi->req_rxq)
+ qcount_rx = min_t(int, qcount_rx,
+ vsi->rss_size);
}
}
@@ -990,9 +722,11 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type;
+ struct device *dev;
struct ice_pf *pf;
pf = vsi->back;
+ dev = ice_pf_to_dev(pf);
switch (vsi->type) {
case ICE_VSI_PF:
@@ -1006,10 +740,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_LB:
- dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
+ dev_dbg(dev, "Unsupported VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
return;
default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
return;
}
@@ -1022,18 +757,21 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
+ * @init_vsi: is this call creating a VSI
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_init(struct ice_vsi *vsi)
+static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct ice_vsi_ctx *ctxt;
+ struct device *dev;
int ret = 0;
- ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -1050,7 +788,8 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
ice_set_dflt_vsi_ctx(ctxt);
@@ -1059,11 +798,24 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_set_rss_vsi_ctx(ctxt, vsi);
+ /* if updating VSI context, make sure to set valid_section:
+ * to indicate which section of VSI context being updated
+ */
+ if (!init_vsi)
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ }
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+ */
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
/* Enable MAC Antispoof with new VSI being initialized or updated */
if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
@@ -1080,11 +832,20 @@ static int ice_vsi_init(struct ice_vsi *vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
- ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Add VSI failed, err %d\n", ret);
- return -EIO;
+ if (init_vsi) {
+ ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(dev, "Add VSI failed, err %d\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+ } else {
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(dev, "Update VSI failed, err %d\n", ret);
+ ret = -EIO;
+ goto out;
+ }
}
/* keep context for update VSI operations */
@@ -1093,131 +854,9 @@ static int ice_vsi_init(struct ice_vsi *vsi)
/* record VSI number returned */
vsi->vsi_num = ctxt->vsi_num;
- devm_kfree(&pf->pdev->dev, ctxt);
- return ret;
-}
-
-/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
- */
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_q_vector *q_vector;
- struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
-
- if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
- v_idx);
- return;
- }
- q_vector = vsi->q_vectors[v_idx];
-
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
-
- /* only VSI with an associated netdev is set up with NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
-
- devm_kfree(&pf->pdev->dev, q_vector);
- vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
- int v_idx;
-
- ice_for_each_q_vector(vsi, v_idx)
- ice_free_q_vector(vsi, v_idx);
-}
-
-/**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the VSI struct
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
-
- /* allocate q_vector */
- q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
- q_vector->vsi = vsi;
- q_vector->v_idx = v_idx;
- if (vsi->type == ICE_VSI_VF)
- goto out;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
- /* This will not be called in the driver load path because the netdev
- * will not be created yet. All other cases with register the NAPI
- * handler here (i.e. resume, reset/rebuild, etc.)
- */
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
-
out:
- /* tie q_vector and VSI together */
- vsi->q_vectors[v_idx] = q_vector;
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- int err;
-
- if (vsi->q_vectors[0]) {
- dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
- vsi->vsi_num);
- return -EEXIST;
- }
-
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = ice_vsi_alloc_q_vector(vsi, v_idx);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
-
- dev_err(&pf->pdev->dev,
- "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ kfree(ctxt);
+ return ret;
}
/**
@@ -1233,14 +872,16 @@ err_out:
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
u16 num_q_vectors;
+ dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF)
return 0;
if (vsi->base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+ dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
vsi->vsi_num, vsi->base_vector);
return -EEXIST;
}
@@ -1250,7 +891,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
@@ -1293,8 +934,10 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
+ struct device *dev;
int i;
+ dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
@@ -1309,7 +952,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
- ring->dev = &pf->pdev->dev;
+ ring->dev = dev;
ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
@@ -1328,7 +971,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
- ring->dev = &pf->pdev->dev;
+ ring->dev = dev;
ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
@@ -1341,66 +984,6 @@ err_out:
}
/**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
- */
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#else
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#endif /* CONFIG_DCB */
-{
- int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
- int v_id;
-
- /* initially assigning remaining rings count to VSIs num queue value */
- tx_rings_rem = vsi->num_txq;
- rx_rings_rem = vsi->num_rxq;
-
- for (v_id = 0; v_id < q_vectors; v_id++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
- /* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
- q_vector->tx.itr_idx = ICE_TX_ITR;
- q_base = vsi->num_txq - tx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- }
- tx_rings_rem -= tx_rings_per_v;
-
- /* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
- q_vector->rx.itr_idx = ICE_RX_ITR;
- q_base = vsi->num_rxq - rx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- }
- rx_rings_rem -= rx_rings_per_v;
- }
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1414,8 +997,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
int err = 0;
u8 *lut;
- lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
- GFP_KERNEL);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -1428,7 +1010,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
}
err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
- devm_kfree(&vsi->back->pdev->dev, lut);
+ kfree(lut);
return err;
}
@@ -1441,12 +1023,14 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
enum ice_status status;
+ struct device *dev;
int err = 0;
u8 *lut;
+ dev = ice_pf_to_dev(pf);
vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
- lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -1459,13 +1043,12 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(&pf->pdev->dev,
- "set_rss_lut failed, error %d\n", status);
+ dev_err(dev, "set_rss_lut failed, error %d\n", status);
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
- key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
@@ -1482,14 +1065,13 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
- status);
+ dev_err(dev, "set_rss_key failed, error %d\n", status);
err = -EIO;
}
- devm_kfree(&pf->pdev->dev, key);
+ kfree(key);
ice_vsi_cfg_rss_exit:
- devm_kfree(&pf->pdev->dev, lut);
+ kfree(lut);
return err;
}
@@ -1509,7 +1091,7 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
+ tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
@@ -1601,9 +1183,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
int err = 0;
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -1620,11 +1204,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_add_vlan(&pf->hw, &tmp_add_list);
if (status) {
err = -ENODEV;
- dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
- vid, vsi->vsi_num);
+ dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
+ vsi->vsi_num);
}
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1641,9 +1225,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
int err = 0;
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return -ENOMEM;
@@ -1659,21 +1245,46 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
} else if (status) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
}
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+#if (PAGE_SIZE < 8192)
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+#else
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#endif
+ }
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1687,13 +1298,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
- if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
- vsi->max_frame = vsi->netdev->mtu +
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = ICE_RXBUF_2048;
-
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq; i++) {
@@ -1712,101 +1317,34 @@ setup_rings:
}
/**
- * ice_vsi_cfg_txq - Configure single Tx queue
- * @vsi: the VSI that queue belongs to
- * @ring: Tx ring to be configured
- * @tc_q_idx: queue index within given TC
- * @qg_buf: queue group buffer
- * @tc: TC that Tx ring belongs to
- */
-static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
- struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
-{
- struct ice_tlan_ctx tlan_ctx = { 0 };
- struct ice_aqc_add_txqs_perq *txq;
- struct ice_pf *pf = vsi->back;
- u8 buf_len = sizeof(*qg_buf);
- enum ice_status status;
- u16 pf_q;
-
- pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as
- * transmit comm scheduler queue doorbell.
- */
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
-
- /* Add unique software queue handle of the Tx queue per
- * TC into the VSI Tx ring
- */
- ring->q_handle = tc_q_idx;
-
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- return -ENODEV;
- }
-
- /* Add Tx Queue TEID into the VSI Tx ring from the
- * response. This will complete configuring and
- * enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- ring->txq_teid = le32_to_cpu(txq->q_teid);
-
- return 0;
-}
-
-/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
- * @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_pf *pf = vsi->back;
- u16 q_idx = 0, i;
+ u16 q_idx = 0;
int err = 0;
- u8 tc;
- qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
- /* set up and configure the Tx queues for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
- qg_buf, tc);
- if (err)
- goto err_cfg_txqs;
-
- q_idx++;
- }
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ goto err_cfg_txqs;
}
+
err_cfg_txqs:
- devm_kfree(&pf->pdev->dev, qg_buf);
+ kfree(qg_buf);
return err;
}
@@ -1819,159 +1357,46 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
-}
-
-/**
- * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
- * @intrl: interrupt rate limit in usecs
- * @gran: interrupt rate limit granularity in usecs
- *
- * This function converts a decimal interrupt rate limit in usecs to the format
- * expected by firmware.
- */
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
-{
- u32 val = intrl / gran;
-
- if (val)
- return val | GLINT_RATE_INTRL_ENA_M;
- return 0;
-}
-
-/**
- * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
- * @hw: board specific structure
- */
-static void ice_cfg_itr_gran(struct ice_hw *hw)
-{
- u32 regval = rd32(hw, GLINT_CTL);
-
- /* no need to update global register if ITR gran is already set */
- if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
- return;
-
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
- wr32(hw, GLINT_CTL, regval);
-}
-
-/**
- * ice_cfg_itr - configure the initial interrupt throttle values
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector that's being configured
- *
- * Configure interrupt throttling values for the ring containers that are
- * associated with the interrupt vector passed in.
- */
-static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- ice_cfg_itr_gran(hw);
-
- if (q_vector->num_ring_rx) {
- struct ice_ring_container *rc = &q_vector->rx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
-
- if (q_vector->num_ring_tx) {
- struct ice_ring_container *rc = &q_vector->tx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
}
/**
- * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
- * @txq: Tx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
*
- * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
- * within the function space.
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
+ int ret;
+ int i;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ if (ret)
+ return ret;
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ return ret;
}
/**
- * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
- * @vsi: the VSI being configured
- * @rxq: Rx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
*
- * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
- * within the function space.
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
-
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
-
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
-
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ u32 val = intrl / gran;
- ice_flush(hw);
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
}
/**
@@ -2028,13 +1453,12 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
*/
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2052,7 +1476,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -2060,7 +1484,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -2071,13 +1495,12 @@ out:
*/
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2099,7 +1522,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -2107,7 +1530,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -2134,109 +1557,6 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_trigger_sw_intr - trigger a software interrupt
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector to trigger the software interrupt for
- */
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
- (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_M);
-}
-
-/**
- * ice_vsi_stop_tx_ring - Disable single Tx ring
- * @vsi: the VSI being configured
- * @rst_src: reset source
- * @rel_vmvf_num: Relative ID of VF/VM
- * @ring: Tx ring to be stopped
- * @txq_meta: Meta data of Tx ring to be stopped
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
- struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u32 val;
-
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(ring->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(ring->reg_idx), val);
-
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector
- * associated to the queue to schedule NAPI handler
- */
- q_vector = ring->q_vector;
- if (q_vector)
- ice_trigger_sw_intr(hw, q_vector);
-
- status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
- txq_meta->tc, 1, &txq_meta->q_handle,
- &txq_meta->q_id, &txq_meta->q_teid, rst_src,
- rel_vmvf_num, NULL);
-
- /* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
- * This is not an error as the reset operation disables
- * queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
- } else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * ice_fill_txq_meta - Prepare the Tx queue's meta data
- * @vsi: VSI that ring belongs to
- * @ring: ring that txq_meta will be based on
- * @txq_meta: a helper struct that wraps Tx queue's information
- *
- * Set up a helper struct that will contain all the necessary fields that
- * are needed for stopping Tx queue
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- u8 tc = 0;
-
-#ifdef CONFIG_DCB
- tc = ring->dcb_tc;
-#endif /* CONFIG_DCB */
- txq_meta->q_id = ring->reg_idx;
- txq_meta->q_teid = ring->txq_teid;
- txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
-}
-
-/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
@@ -2247,34 +1567,24 @@ static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings)
{
- u16 i, q_idx = 0;
- int status;
- u8 tc;
+ u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- /* set up the Tx queue list to be disabled for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_txq_meta txq_meta = { };
-
- if (!rings || !rings[q_idx])
- return -EINVAL;
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ struct ice_txq_meta txq_meta = { };
+ int status;
- ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
- status = ice_vsi_stop_tx_ring(vsi, rst_src,
- rel_vmvf_num,
- rings[q_idx], &txq_meta);
+ if (!rings || !rings[q_idx])
+ return -EINVAL;
- if (status)
- return status;
+ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
+ status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
+ rings[q_idx], &txq_meta);
- q_idx++;
- }
+ if (status)
+ return status;
}
return 0;
@@ -2294,6 +1604,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
}
/**
+ * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
+ * @vsi: the VSI being configured
+ */
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2304,7 +1623,6 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
- struct device *dev;
struct ice_pf *pf;
int status;
@@ -2312,8 +1630,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
return -EINVAL;
pf = vsi->back;
- dev = &pf->pdev->dev;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2347,11 +1664,11 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return 0;
err_out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return -EIO;
}
@@ -2420,8 +1737,10 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
@@ -2441,11 +1760,11 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status);
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
}
/**
@@ -2460,8 +1779,10 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
+ struct device *dev;
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ dev = ice_pf_to_dev(pf);
+ list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
@@ -2488,12 +1809,11 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev,
- "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, status);
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
}
/**
@@ -2515,7 +1835,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2551,7 +1871,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi);
+ ret = ice_vsi_init(vsi, true);
if (ret)
goto unroll_get_qs;
@@ -2624,8 +1944,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(&pf->pdev->dev,
- "VSI %d failed lan queue config, error %d\n",
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
goto unroll_vector_base;
}
@@ -2635,23 +1954,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
- * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
- * need to be dropped so that VFs cannot send LLDP packets to reconfig
- * DCB settings in the HW. Also, if the FW DCBX engine is not running
- * then Rx LLDP packets need to be redirected up the stack.
+ * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
+ * be dropped so that VFs cannot send LLDP packets to reconfig DCB
+ * settings in the HW.
*/
- if (!ice_is_safe_mode(pf)) {
+ if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
/* Tx LLDP packets */
ice_cfg_sw_lldp(vsi, true, true);
-
- /* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, true);
}
- }
return vsi;
@@ -2690,6 +2003,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
+ }
txq++;
}
@@ -2738,8 +2056,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
+ devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
}
@@ -2790,6 +2107,62 @@ void ice_vsi_close(struct ice_vsi *vsi)
}
/**
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
+ * @locked: is the rtnl_lock already held
+ */
+int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
+{
+ int err = 0;
+
+ if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return 0;
+
+ clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+
+ err = ice_open(vsi->netdev);
+
+ if (!locked)
+ rtnl_unlock();
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
+ */
+void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked)
+ rtnl_lock();
+
+ ice_stop(vsi->netdev);
+
+ if (!locked)
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
+}
+
+/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
@@ -2869,7 +2242,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
@@ -3031,10 +2404,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
+ * @init_vsi: is this an initialization or a reconfigure of the VSI
*
* Returns 0 on success and negative value on failure
*/
-int ice_vsi_rebuild(struct ice_vsi *vsi)
+int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vf *vf = NULL;
@@ -3064,6 +2438,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
@@ -3081,11 +2460,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_init(vsi);
+ ret = ice_vsi_init(vsi, init_vsi);
if (ret < 0)
goto err_vsi;
-
switch (vsi->type) {
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3105,6 +2483,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto err_vectors;
+ }
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -3131,16 +2515,25 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ for (i = 0; i < vsi->tc_cfg.numtc; i++) {
max_txqs[i] = vsi->alloc_txq;
+ if (ice_is_xdp_ena_vsi(vsi))
+ max_txqs[i] += vsi->num_xdp_txq;
+ }
+
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
- goto err_vectors;
+ if (init_vsi) {
+ ret = -EIO;
+ goto err_vectors;
+ } else {
+ return ice_schedule_reset(pf, ICE_RESET_PFR);
+ }
}
return 0;
@@ -3166,6 +2559,7 @@ err_vsi:
bool ice_is_reset_in_progress(unsigned long *state)
{
return test_bit(__ICE_RESET_OICR_RECV, state) ||
+ test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
@@ -3199,9 +2593,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
enum ice_status status;
+ struct device *dev;
int i, ret = 0;
u8 num_tc = 0;
+ dev = ice_pf_to_dev(pf);
+
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
if (ena_tc & BIT(i))
@@ -3213,7 +2610,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
- ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -3226,7 +2623,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
- dev_info(&pf->pdev->dev, "Failed VSI Update\n");
+ dev_info(dev, "Failed VSI Update\n");
ret = -EIO;
goto out;
}
@@ -3235,8 +2632,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs);
if (status) {
- dev_err(&pf->pdev->dev,
- "VSI %d failed TC config, error %d\n",
+ dev_err(dev, "VSI %d failed TC config, error %d\n",
vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -3246,7 +2642,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
- devm_kfree(&pf->pdev->dev, ctx);
+ kfree(ctx);
return ret;
}
#endif /* CONFIG_DCB */
@@ -3271,6 +2667,51 @@ char *ice_nvm_version_str(struct ice_hw *hw)
}
/**
+ * ice_update_ring_stats - Update ring statistics
+ * @ring: ring to update
+ * @cont: used to increment per-vector counters
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ *
+ * This function assumes that caller has acquired a u64_stats_sync lock.
+ */
+static void
+ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
+ u64 pkts, u64 bytes)
+{
+ ring->stats.bytes += bytes;
+ ring->stats.pkts += pkts;
+ cont->total_bytes += bytes;
+ cont->total_pkts += pkts;
+}
+
+/**
+ * ice_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
+ u64_stats_update_end(&tx_ring->syncp);
+}
+
+/**
+ * ice_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
+ u64_stats_update_end(&rx_ring->syncp);
+}
+
+/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
* @macaddr: the MAC address to be added.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 47bc033fff20..6e31e30aba39 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,18 +6,7 @@
#include "ice.h"
-struct ice_txq_meta {
- /* Tx-scheduler element identifier */
- u32 q_teid;
- /* Entry in VSI's txq_map bitmap */
- u16 q_id;
- /* Relative index of Tx queue within TC */
- u16 q_handle;
- /* VSI index that Tx queue belongs to */
- u16 vsi_idx;
- /* TC number that Tx queue belongs to */
- u8 tc;
-};
+const char *ice_vsi_type_str(enum ice_vsi_type type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -33,24 +22,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
-
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
-#endif /* CONFIG_PCI_IOV */
-
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -67,6 +38,10 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
+
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -89,25 +64,21 @@ int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
+int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+
+void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
+
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-int ice_vsi_rebuild(struct ice_vsi *vsi);
+int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
-
void ice_vsi_put_qs(struct ice_vsi *vsi);
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
-#endif /* CONFIG_DCB */
-
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -118,6 +89,12 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 214cd6eca405..69bff085acf7 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,8 +6,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include "ice_dcb_nl.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -42,6 +44,7 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
+static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
@@ -159,7 +162,7 @@ unregister:
* had an error
*/
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"Could not add MAC filters error %d. Unregistering device\n",
status);
unregister_netdev(vsi->netdev);
@@ -435,42 +438,11 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
- * ice_dis_vsi - pause a VSI
- * @vsi: the VSI being paused
- * @locked: is the rtnl_lock already held
- */
-static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
-{
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
-
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
-
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
- if (netif_running(vsi->netdev)) {
- if (!locked)
- rtnl_lock();
-
- ice_stop(vsi->netdev);
-
- if (!locked)
- rtnl_unlock();
- } else {
- ice_vsi_close(vsi);
- }
- }
-}
-
-/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
-#ifdef CONFIG_DCB
-void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
-#else
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
-#endif /* CONFIG_DCB */
{
int v;
@@ -524,7 +496,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
@@ -636,8 +608,14 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
switch (vsi->port_info->phy.link_info.topo_media_conflict) {
case ICE_AQ_LINK_TOPO_CONFLICT:
case ICE_AQ_LINK_MEDIA_CONFLICT:
+ case ICE_AQ_LINK_TOPO_UNREACH_PRT:
+ case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
+ case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
break;
+ case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
+ netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ break;
default:
break;
}
@@ -747,7 +725,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
an = "False";
/* Get FEC mode requested based on PHY caps last SW configuration */
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps) {
fec_req = "Unknown";
goto done;
@@ -767,7 +745,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
else
fec_req = "NONE";
- devm_kfree(&vsi->back->pdev->dev, caps);
+ kfree(caps);
done:
netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
@@ -815,6 +793,7 @@ static int
ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
u16 link_speed)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
struct ice_vsi *vsi;
u16 old_link_speed;
@@ -832,7 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
result = ice_update_link_info(pi);
if (result)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Failed to update link status and re-enable link events for port %d\n",
pi->lport);
@@ -851,7 +830,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
result = ice_aq_set_link_restart_an(pi, false, NULL);
if (result) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Failed to set link down, VSI %d error %d\n",
vsi->vsi_num, result);
return result;
@@ -947,7 +926,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
!!(link_data->link_info & ICE_AQ_LINK_UP),
le16_to_cpu(link_data->link_speed));
if (status)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(ice_pf_to_dev(pf),
"Could not process link event, error %d\n", status);
return status;
@@ -960,6 +939,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*/
static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct ice_ctl_q_info *cq;
@@ -981,8 +961,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
qtype = "Mailbox";
break;
default:
- dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
- q_type);
+ dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
return 0;
}
@@ -994,15 +973,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ARQLEN_ARQCRIT_M)) {
oldval = val;
if (val & PF_FW_ARQLEN_ARQVFE_M)
- dev_dbg(&pf->pdev->dev,
- "%s Receive Queue VF Error detected\n", qtype);
+ dev_dbg(dev, "%s Receive Queue VF Error detected\n",
+ qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
@@ -1016,16 +995,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Send Queue VF Error detected\n", qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
- dev_dbg(&pf->pdev->dev,
- "%s Send Queue Overflow Error detected\n",
+ dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ATQLEN_ATQCRIT_M)
- dev_dbg(&pf->pdev->dev,
- "%s Send Queue Critical Error detected\n",
+ dev_dbg(dev, "%s Send Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M);
@@ -1034,8 +1011,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
event.buf_len = cq->rq_buf_size;
- event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
- GFP_KERNEL);
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
return 0;
@@ -1047,8 +1023,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
- dev_err(&pf->pdev->dev,
- "%s Receive Queue event error %d\n", qtype,
+ dev_err(dev, "%s Receive Queue event error %d\n", qtype,
ret);
break;
}
@@ -1058,8 +1033,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf, &event))
- dev_err(&pf->pdev->dev,
- "Could not handle link event\n");
+ dev_err(dev, "Could not handle link event\n");
break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
@@ -1071,14 +1045,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
default:
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
break;
}
} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
- devm_kfree(&pf->pdev->dev, event.msg_buf);
+ kfree(event.msg_buf);
return pending && (i == ICE_DFLT_IRQ_WORK);
}
@@ -1222,6 +1196,7 @@ static void ice_service_timer(struct timer_list *t)
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool mdd_detected = false;
u32 reg;
@@ -1243,7 +1218,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
GL_MDET_TX_PQM_QNUM_S);
if (netif_msg_tx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
mdd_detected = true;
@@ -1261,7 +1236,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
GL_MDET_TX_TCLAN_QNUM_S);
if (netif_msg_rx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
mdd_detected = true;
@@ -1279,7 +1254,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
GL_MDET_RX_QNUM_S);
if (netif_msg_rx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
+ dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
mdd_detected = true;
@@ -1291,21 +1266,21 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, PF_MDET_TX_PQM);
if (reg & PF_MDET_TX_PQM_VALID_M) {
wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ dev_info(dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_TX_TCLAN);
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ dev_info(dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_RX);
if (reg & PF_MDET_RX_VALID_M) {
wr32(hw, PF_MDET_RX, 0xFFFF);
- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+ dev_info(dev, "RX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
/* Queue belongs to the PF initiate a reset */
@@ -1325,7 +1300,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ dev_info(dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1333,7 +1308,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ dev_info(dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1341,7 +1316,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ dev_info(dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1349,7 +1324,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf_mdd_detected = true;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ dev_info(dev, "RX driver issue detected on VF %d\n",
i);
}
@@ -1357,7 +1332,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
vf->num_mdd_events++;
if (vf->num_mdd_events &&
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
@@ -1393,7 +1368,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
pi = vsi->port_info;
- pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -1412,7 +1387,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
- cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
@@ -1437,9 +1412,9 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
retcode = -EIO;
}
- devm_kfree(dev, cfg);
+ kfree(cfg);
out:
- devm_kfree(dev, pcaps);
+ kfree(pcaps);
return retcode;
}
@@ -1551,6 +1526,44 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
}
/**
+ * ice_schedule_reset - schedule a reset
+ * @pf: board private structure
+ * @reset: reset being requested
+ */
+int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ /* bail out if earlier reset has failed */
+ if (test_bit(__ICE_RESET_FAILED, pf->state)) {
+ dev_dbg(dev, "earlier reset has failed\n");
+ return -EIO;
+ }
+ /* bail if reset/recovery already in progress */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_dbg(dev, "Reset already in progress\n");
+ return -EBUSY;
+ }
+
+ switch (reset) {
+ case ICE_RESET_PFR:
+ set_bit(__ICE_PFR_REQ, pf->state);
+ break;
+ case ICE_RESET_CORER:
+ set_bit(__ICE_CORER_REQ, pf->state);
+ break;
+ case ICE_RESET_GLOBR:
+ set_bit(__ICE_GLOBR_REQ, pf->state);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ice_service_task_schedule(pf);
+ return 0;
+}
+
+/**
* ice_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
* @mask: the new affinity mask
@@ -1604,11 +1617,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
+ struct device *dev;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
int irq_num;
+ dev = ice_pf_to_dev(pf);
for (vector = 0; vector < q_vectors; vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[vector];
@@ -1628,8 +1643,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(&pf->pdev->dev, irq_num,
- vsi->irq_handler, 0,
+ err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev,
@@ -1655,12 +1669,331 @@ free_q_irqs:
irq_num = pf->msix_entries[base + vector].vector,
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
- devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
+ devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
}
/**
+ * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
+ * @vsi: VSI to setup Tx rings used by XDP
+ *
+ * Return 0 on success and negative value on error
+ */
+static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ int i;
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring *xdp_ring;
+
+ xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
+
+ if (!xdp_ring)
+ goto free_xdp_rings;
+
+ xdp_ring->q_index = xdp_q_idx;
+ xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
+ xdp_ring->ring_active = false;
+ xdp_ring->vsi = vsi;
+ xdp_ring->netdev = NULL;
+ xdp_ring->dev = dev;
+ xdp_ring->count = vsi->num_tx_desc;
+ vsi->xdp_rings[i] = xdp_ring;
+ if (ice_setup_tx_ring(xdp_ring))
+ goto free_xdp_rings;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ return 0;
+
+free_xdp_rings:
+ for (; i >= 0; i--)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
+ * @vsi: VSI to set the bpf prog on
+ * @prog: the bpf prog pointer
+ */
+static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+ int i;
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ ice_for_each_rxq(vsi, i)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+}
+
+/**
+ * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+ * @vsi: VSI to bring up Tx rings used by XDP
+ * @prog: bpf program that will be assigned to VSI
+ *
+ * Return 0 on success and negative value on error
+ */
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ struct ice_pf *pf = vsi->back;
+ struct ice_qs_cfg xdp_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_txqs,
+ .pf_map_size = pf->max_pf_txqs,
+ .q_count = vsi->num_xdp_txq,
+ .scatter_count = ICE_MAX_SCATTER_TXQS,
+ .vsi_map = vsi->txq_map,
+ .vsi_map_offset = vsi->alloc_txq,
+ .mapping_mode = ICE_VSI_MAP_CONTIG
+ };
+ enum ice_status status;
+ struct device *dev;
+ int i, v_idx;
+
+ dev = ice_pf_to_dev(pf);
+ vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
+ sizeof(*vsi->xdp_rings), GFP_KERNEL);
+ if (!vsi->xdp_rings)
+ return -ENOMEM;
+
+ vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
+ if (__ice_vsi_get_qs(&xdp_qs_cfg))
+ goto err_map_xdp;
+
+ if (ice_xdp_alloc_setup_rings(vsi))
+ goto clear_xdp_rings;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ /* omit the scheduler update if in reset path; XDP queues will be
+ * taken into account at the end of ice_vsi_rebuild, where
+ * ice_cfg_vsi_lan is being called
+ */
+ if (ice_is_reset_in_progress(pf->state))
+ return 0;
+
+ /* tell the Tx scheduler that right now we have
+ * additional queues
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
+ status);
+ goto clear_xdp_rings;
+ }
+ ice_vsi_assign_bpf_prog(vsi, prog);
+
+ return 0;
+clear_xdp_rings:
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+err_map_xdp:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ devm_kfree(dev, vsi->xdp_rings);
+ return -ENOMEM;
+}
+
+/**
+ * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+ * @vsi: VSI to remove XDP rings
+ *
+ * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+ * resources
+ */
+int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf = vsi->back;
+ int i, v_idx;
+
+ /* q_vectors are freed in reset path so there's no point in detaching
+ * rings; in case of rebuild being triggered not from reset reset bits
+ * in pf->state won't be set, so additionally check first q_vector
+ * against NULL
+ */
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ goto free_qmap;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_ring *ring;
+
+ ice_for_each_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.ring = ring;
+ }
+
+free_qmap:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ if (vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+ devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ return 0;
+
+ ice_vsi_assign_bpf_prog(vsi, NULL);
+
+ /* notify Tx scheduler that we destroyed XDP queues and bring
+ * back the old number of child nodes
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+}
+
+/**
+ * ice_xdp_setup_prog - Add or remove XDP eBPF program
+ * @vsi: VSI to setup XDP for
+ * @prog: XDP program
+ * @extack: netlink extended ack
+ */
+static int
+ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ bool if_running = netif_running(vsi->netdev);
+ int ret = 0, xdp_ring_err = 0;
+
+ if (frame_size > vsi->rx_buf_len) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
+ return -EOPNOTSUPP;
+ }
+
+ /* need to stop netdev while setting up the program for Rx rings */
+ if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Preparing device for XDP attach failed");
+ return ret;
+ }
+ }
+
+ if (!ice_is_xdp_ena_vsi(vsi) && prog) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting up XDP Tx resources failed");
+ } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Freeing XDP Tx resources failed");
+ } else {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ }
+
+ if (if_running)
+ ret = ice_up(vsi);
+
+ if (!ret && prog && vsi->xsk_umems) {
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_ring *rx_ring = vsi->rx_rings[i];
+
+ if (rx_ring->xsk_umem)
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+ }
+
+ return (ret || xdp_ring_err) ? -ENOMEM : 0;
+}
+
+/**
+ * ice_xdp - implements XDP handler
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (vsi->type != ICE_VSI_PF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "XDP can be loaded only on PF VSI");
+ return -EINVAL;
+ }
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
+ return 0;
+ case XDP_SETUP_XSK_UMEM:
+ return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
@@ -1698,8 +2031,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
struct ice_pf *pf = (struct ice_pf *)data;
struct ice_hw *hw = &pf->hw;
irqreturn_t ret = IRQ_NONE;
+ struct device *dev;
u32 oicr, ena_mask;
+ dev = ice_pf_to_dev(pf);
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
@@ -1735,8 +2070,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
else if (reset == ICE_RESET_EMPR)
pf->empr_count++;
else
- dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
- reset);
+ dev_dbg(dev, "Invalid reset type %d\n", reset);
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
@@ -1770,8 +2104,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_HMC_ERR_M) {
ena_mask &= ~PFINT_OICR_HMC_ERR_M;
- dev_dbg(&pf->pdev->dev,
- "HMC Error interrupt - info 0x%x, data 0x%x\n",
+ dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
rd32(hw, PFHMC_ERRORINFO),
rd32(hw, PFHMC_ERRORDATA));
}
@@ -1779,8 +2112,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* Report any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
- dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
- oicr);
+ dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
/* If a critical error is pending there is no choice but to
* reset the device.
*/
@@ -1838,7 +2170,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
- devm_free_irq(&pf->pdev->dev,
+ devm_free_irq(ice_pf_to_dev(pf),
pf->msix_entries[pf->oicr_idx].vector, pf);
}
@@ -1882,13 +2214,13 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
- dev_driver_string(&pf->pdev->dev),
- dev_name(&pf->pdev->dev));
+ dev_driver_string(dev), dev_name(dev));
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset. Note that this function is called only during
@@ -1905,12 +2237,10 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = oicr_idx;
- err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector,
+ err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
- dev_err(&pf->pdev->dev,
- "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -2043,7 +2373,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -2219,6 +2549,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
status = -ENODEV;
goto unroll_vsi_setup;
}
+ /* netdev has to be configured before setting frame size */
+ ice_vsi_cfg_frame_size(vsi);
+
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
/* registering the NAPI handler requires both the queues and
* netdev to be created, which are done in ice_pf_vsi_setup()
@@ -2300,6 +2635,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
+ mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
if (pf->avail_txqs) {
@@ -2349,6 +2685,7 @@ static int ice_init_pf(struct ice_pf *pf)
ice_set_pf_caps(pf);
mutex_init(&pf->sw_mutex);
+ mutex_init(&pf->tc_mutex);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
@@ -2363,7 +2700,7 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(&pf->pdev->dev, pf->avail_txqs);
+ devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
@@ -2380,6 +2717,7 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
int v_left, v_actual, v_budget = 0;
int needed, err, i;
@@ -2400,7 +2738,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_budget += needed;
v_left -= needed;
- pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
+ pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
@@ -2416,13 +2754,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
ICE_MIN_MSIX, v_budget);
if (v_actual < 0) {
- dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
+ dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
if (v_actual < v_budget) {
- dev_warn(&pf->pdev->dev,
+ dev_warn(dev,
"not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
/* 2 vectors for LAN (traffic + OICR) */
@@ -2441,11 +2779,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
return v_actual;
msix_err:
- devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ devm_kfree(dev, pf->msix_entries);
goto exit_err;
no_hw_vecs_left_err:
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
@@ -2461,7 +2799,7 @@ exit_err:
static void ice_dis_msix(struct ice_pf *pf)
{
pci_disable_msix(pf->pdev);
- devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
pf->msix_entries = NULL;
}
@@ -2474,7 +2812,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
ice_dis_msix(pf);
if (pf->irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
pf->irq_tracker = NULL;
}
}
@@ -2494,7 +2832,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up vector assignment tracking */
pf->irq_tracker =
- devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
+ devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
(sizeof(u16) * vectors), GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
@@ -2510,6 +2848,52 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_vsi_recfg_qs - Change the number of queues on a VSI
+ * @vsi: VSI being changed
+ * @new_rx: new number of Rx queues
+ * @new_tx: new number of Tx queues
+ *
+ * Only change the number of queues if new_tx, or new_rx is non-0.
+ *
+ * Returns 0 on success.
+ */
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+{
+ struct ice_pf *pf = vsi->back;
+ int err = 0, timeout = 50;
+
+ if (!new_rx && !new_tx)
+ return -EINVAL;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ if (new_tx)
+ vsi->req_txq = new_tx;
+ if (new_rx)
+ vsi->req_rxq = new_rx;
+
+ /* set for the next time the netdev is started */
+ if (!netif_running(vsi->netdev)) {
+ ice_vsi_rebuild(vsi, false);
+ dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
+ goto done;
+ }
+
+ ice_vsi_close(vsi);
+ ice_vsi_rebuild(vsi, false);
+ ice_pf_dcb_recfg(pf);
+ ice_vsi_open(vsi);
+done:
+ clear_bit(__ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
@@ -2518,7 +2902,7 @@ static void
ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
{
struct ice_pf *pf = (struct ice_pf *)hw->back;
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
switch (*status) {
case ICE_SUCCESS:
@@ -2598,7 +2982,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
- switch (hw->adminq.sq_last_status) {
+ switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
dev_err(dev,
@@ -2637,7 +3021,7 @@ static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
enum ice_status status = ICE_ERR_PARAM;
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
@@ -2677,7 +3061,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
static void ice_verify_cacheline_size(struct ice_pf *pf)
{
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
- dev_warn(&pf->pdev->dev,
+ dev_warn(ice_pf_to_dev(pf),
"%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
ICE_CACHE_LINE_BYTES);
}
@@ -2747,7 +3131,7 @@ static void ice_request_fw(struct ice_pf *pf)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
const struct firmware *firmware = NULL;
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
int err = 0;
/* optional device-specific DDP (if present) overrides the default DDP
@@ -2831,6 +3215,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
hw->back = pf;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
@@ -2936,7 +3322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2976,12 +3362,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_cfg_lldp_mib_change(&pf->hw, true);
}
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
+
return 0;
err_alloc_sw_unroll:
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
- devm_kfree(&pf->pdev->dev, pf->first_sw);
+ devm_kfree(dev, pf->first_sw);
err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
@@ -3027,12 +3416,13 @@ static void ice_remove(struct pci_dev *pdev)
}
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
- ice_clear_interrupt_scheme(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
+ pci_wait_for_pending_transaction(pdev);
+ ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
}
@@ -3347,6 +3737,48 @@ static void ice_set_rx_mode(struct net_device *netdev)
}
/**
+ * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Queue ID
+ * @maxrate: maximum bandwidth in Mbps
+ */
+static int
+ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
+ u16 q_handle;
+ u8 tc;
+
+ /* Validate maxrate requested is within permitted range */
+ if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
+ netdev_err(netdev,
+ "Invalid max rate %d specified for the queue %d\n",
+ maxrate, queue_index);
+ return -EINVAL;
+ }
+
+ q_handle = vsi->tx_rings[queue_index]->q_handle;
+ tc = ice_dcb_get_tc(vsi, queue_index);
+
+ /* Set BW back to default, when user set maxrate to 0 */
+ if (!maxrate)
+ status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
+ q_handle, ICE_MAX_BW);
+ else
+ status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
+ q_handle, ICE_MAX_BW, maxrate * 1000);
+ if (status) {
+ netdev_err(netdev,
+ "Unable to set Tx max rate, error %d\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
* ice_fdb_add - add an entry to the hardware database
* @ndm: the input from the stack
* @tb: pointer to array of nladdr (unused)
@@ -3426,6 +3858,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
@@ -3435,6 +3868,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return ret;
}
+ /* Do not change setting during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ return -EBUSY;
+ }
+
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
@@ -3505,6 +3945,8 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
+ if (!err && ice_is_xdp_ena_vsi(vsi))
+ err = ice_vsi_cfg_xdp_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -3920,6 +4362,13 @@ int ice_down(struct ice_vsi *vsi)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
+ if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
+ if (tx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop XDP rings, VSI %d error %d\n",
+ vsi->vsi_num, tx_err);
+ }
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
@@ -3970,8 +4419,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- vsi->tx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_tx_ring(vsi->tx_rings[i]);
+ struct ice_ring *ring = vsi->tx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_tx_ring(ring);
if (err)
break;
}
@@ -3996,8 +4450,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- vsi->rx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_rx_ring(vsi->rx_rings[i]);
+ struct ice_ring *ring = vsi->rx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_rx_ring(ring);
if (err)
break;
}
@@ -4033,7 +4492,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
err = ice_vsi_req_irq_msix(vsi, int_name);
if (err)
goto err_setup_rx;
@@ -4082,61 +4541,13 @@ static void ice_vsi_release_all(struct ice_pf *pf)
err = ice_vsi_release(pf->vsi[i]);
if (err)
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(ice_pf_to_dev(pf),
"Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
i, err, pf->vsi[i]->vsi_num);
}
}
/**
- * ice_ena_vsi - resume a VSI
- * @vsi: the VSI being resume
- * @locked: is the rtnl_lock already held
- */
-static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
-{
- int err = 0;
-
- if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
- return 0;
-
- clear_bit(__ICE_NEEDS_RESTART, vsi->state);
-
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
- if (netif_running(vsi->netdev)) {
- if (!locked)
- rtnl_lock();
-
- err = ice_open(vsi->netdev);
-
- if (!locked)
- rtnl_unlock();
- }
- }
-
- return err;
-}
-
-/**
- * ice_pf_ena_all_vsi - Resume all VSIs on a PF
- * @pf: the PF
- * @locked: is the rtnl_lock already held
- */
-#ifdef CONFIG_DCB
-int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
-{
- int v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- if (ice_ena_vsi(pf->vsi[v], locked))
- return -EIO;
-
- return 0;
-}
-#endif /* CONFIG_DCB */
-
-/**
* ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @pf: pointer to the PF instance
* @type: VSI type to rebuild
@@ -4145,6 +4556,7 @@ int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
*/
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
+ struct device *dev = ice_pf_to_dev(pf);
enum ice_status status;
int i, err;
@@ -4155,20 +4567,20 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue;
/* rebuild the VSI */
- err = ice_vsi_rebuild(vsi);
+ err = ice_vsi_rebuild(vsi, true);
if (err) {
- dev_err(&pf->pdev->dev,
- "rebuild VSI failed, err %d, VSI index %d, type %d\n",
- err, vsi->idx, type);
+ dev_err(dev,
+ "rebuild VSI failed, err %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
return err;
}
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(&pf->pdev->dev,
- "replay VSI failed, status %d, VSI index %d, type %d\n",
- status, vsi->idx, type);
+ dev_err(dev,
+ "replay VSI failed, status %d, VSI index %d, type %s\n",
+ status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
@@ -4180,14 +4592,14 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* enable the VSI */
err = ice_ena_vsi(vsi, false);
if (err) {
- dev_err(&pf->pdev->dev,
- "enable VSI failed, err %d, VSI index %d, type %d\n",
- err, vsi->idx, type);
+ dev_err(dev,
+ "enable VSI failed, err %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
return err;
}
- dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
- vsi->idx, type);
+ dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
+ ice_vsi_type_str(type));
}
return 0;
@@ -4226,7 +4638,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
int err;
@@ -4272,7 +4684,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
err = ice_update_link_info(hw->port_info);
if (err)
- dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
+ dev_err(dev, "Get link status error %d\n", err);
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
@@ -4329,6 +4741,18 @@ clear_recovery:
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -4347,6 +4771,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ int frame_size = ice_max_xdp_frame_size(vsi);
+
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
+ netdev_err(netdev, "max MTU for XDP usage is %d\n",
+ frame_size - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
+ }
+
if (new_mtu < netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
@@ -4409,7 +4843,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
+ struct device *dev;
+ dev = ice_pf_to_dev(pf);
if (seed) {
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
@@ -4417,8 +4853,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot set RSS key, err %d aq_err %d\n",
+ dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4428,8 +4863,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot set RSS lut, err %d aq_err %d\n",
+ dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4452,15 +4886,16 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
+ struct device *dev;
+ dev = ice_pf_to_dev(pf);
if (seed) {
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot get RSS key, err %d aq_err %d\n",
+ dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4470,8 +4905,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(&pf->pdev->dev,
- "Cannot get RSS lut, err %d aq_err %d\n",
+ dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
return -EIO;
}
@@ -4515,7 +4949,6 @@ ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
*/
static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
@@ -4524,7 +4957,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
vsi_props = &vsi->info;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -4540,7 +4973,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -4549,7 +4982,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
vsi_props->sw_flags = ctxt->info.sw_flags;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -4864,12 +5297,14 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
+ .ndo_set_tx_maxrate = ice_set_tx_maxrate,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
.ndo_set_vf_trust = ice_set_vf_trust,
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
+ .ndo_get_vf_stats = ice_get_vf_stats,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
@@ -4878,4 +5313,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index bcb431f1bd92..57c73f613f32 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -219,8 +219,7 @@ static void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-static enum ice_status
-ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -242,9 +241,10 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
+ u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_nvm_info *nvm = &hw->nvm;
u16 eetrack_lo, eetrack_hi;
- enum ice_status status = 0;
+ enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -261,15 +261,15 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
nvm->blank_nvm_mode = false;
- } else { /* Blank programming mode */
+ } else {
+ /* Blank programming mode */
nvm->blank_nvm_mode = true;
- status = ICE_ERR_NVM_BLANK_MODE;
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: unsupported blank mode.\n");
- return status;
+ return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver);
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
@@ -287,9 +287,42 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
- hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
- return status;
+ status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
+ ICE_SR_BOOT_CFG_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read Boot Configuration Block TLV.\n");
+ return status;
+ }
+
+ /* Boot Configuration Block must have length at least 2 words
+ * (Combo Image Version High and Combo Image Version Low)
+ */
+ if (boot_cfg_tlv_len < 2) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Invalid Boot Configuration Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF),
+ &oem_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1),
+ &oem_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n");
+ return status;
+ }
+
+ nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
new file mode 100644
index 000000000000..a9fa011c22c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_NVM_H_
+#define _ICE_NVM_H_
+
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2fde9653a608..eae707ddf8e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -411,6 +411,27 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
+ * ice_aq_cfg_sched_elems - configures scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_cfgd: returns total number of elements configured
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure scheduling elements (0x0403)
+ */
+static enum ice_status
+ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_conf_elem *buf, u16 buf_size,
+ u16 *elems_cfgd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_cfgd, cd);
+}
+
+/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
* @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
@@ -557,6 +578,149 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
}
/**
+ * ice_aq_rl_profile - performs a rate limiting task
+ * @hw: pointer to the HW struct
+ * @opcode:opcode for add, query, or remove profile(s)
+ * @num_profiles: the number of profiles
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_processed: number of processed add or remove profile(s) to return
+ * @cd: pointer to command details structure
+ *
+ * RL profile function to add, query, or remove profile(s)
+ */
+static enum ice_status
+ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
+ u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_rl_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.rl_profile;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->num_profiles = cpu_to_le16(num_profiles);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && num_processed)
+ *num_processed = le16_to_cpu(cmd->num_processed);
+ return status;
+}
+
+/**
+ * ice_aq_add_rl_profile - adds rate limiting profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to be add
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_added: total number of profiles added to return
+ * @cd: pointer to command details structure
+ *
+ * Add RL profile (0x0410)
+ */
+static enum ice_status
+ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_added,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_added, cd);
+}
+
+/**
+ * ice_aq_remove_rl_profile - removes RL profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to remove
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_removed: total number of profiles removed to return
+ * @cd: pointer to command details structure or NULL
+ *
+ * Remove RL profile (0x0415)
+ */
+static enum ice_status
+ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_removed,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_removed, cd);
+}
+
+/**
+ * ice_sched_del_rl_profile - remove RL profile
+ * @hw: pointer to the HW struct
+ * @rl_info: rate limit profile information
+ *
+ * If the profile ID is not referenced anymore, it removes profile ID with
+ * its associated parameters from HW DB,and locally. The caller needs to
+ * hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_del_rl_profile(struct ice_hw *hw,
+ struct ice_aqc_rl_profile_info *rl_info)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ u16 num_profiles_removed;
+ enum ice_status status;
+ u16 num_profiles = 1;
+
+ if (rl_info->prof_id_ref != 0)
+ return ICE_ERR_IN_USE;
+
+ /* Safe to remove profile ID */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_info->profile;
+ status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &num_profiles_removed, NULL);
+ if (status || num_profiles_removed != num_profiles)
+ return ICE_ERR_CFG;
+
+ /* Delete stale entry now */
+ list_del(&rl_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), rl_info);
+ return status;
+}
+
+/**
+ * ice_sched_clear_rl_prof - clears RL prof entries
+ * @pi: port information structure
+ *
+ * This function removes all RL profile from HW as well as from SW DB.
+ */
+static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln], list_entry) {
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ rl_prof_elem->prof_id_ref = 0;
+ status = ice_sched_del_rl_profile(hw, rl_prof_elem);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ /* On error, free mem required */
+ list_del(&rl_prof_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
+ }
+ }
+ }
+}
+
+/**
* ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
@@ -592,6 +756,8 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
if (!pi)
return;
+ /* remove RL profiles related lists */
+ ice_sched_clear_rl_prof(pi);
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -632,8 +798,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
hw->layer_info = NULL;
}
- if (hw->port_info)
- ice_sched_clear_port(hw->port_info);
+ ice_sched_clear_port(hw->port_info);
hw->num_tx_sched_layers = 0;
hw->num_tx_sched_phys_layers = 0;
@@ -1014,6 +1179,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
/* initialize the port for handling the scheduler tree */
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
+ for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
+ INIT_LIST_HEAD(&pi->rl_prof_list[i]);
err_init_port:
if (status && pi->root) {
@@ -1062,8 +1229,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
* and so on. This array will be populated from root (index 0) to
* qgroup layer 7. Leaf node has no children.
*/
- for (i = 0; i < hw->num_tx_sched_layers; i++) {
- max_sibl = buf->layer_props[i].max_sibl_grp_sz;
+ for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
+ max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
hw->max_children[i] = le16_to_cpu(max_sibl);
}
@@ -1670,3 +1837,1095 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
+
+/**
+ * ice_sched_rm_unused_rl_prof - remove unused RL profile
+ * @pi: port information structure
+ *
+ * This function removes unused rate limit profiles from the HW and
+ * SW DB. The caller needs to hold scheduler lock.
+ */
+static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln], list_entry) {
+ if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Removed rl profile\n");
+ }
+ }
+}
+
+/**
+ * ice_sched_update_elem - update element
+ * @hw: pointer to the HW struct
+ * @node: pointer to node
+ * @info: node info to update
+ *
+ * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * parameters of node from argument info data buffer (Info->data buf) and
+ * returns success or error on config sched element failure. The caller
+ * needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_aqc_conf_elem buf;
+ enum ice_status status;
+ u16 elem_cfgd = 0;
+ u16 num_elems = 1;
+
+ buf.generic[0] = *info;
+ /* Parent TEID is reserved field in this aq call */
+ buf.generic[0].parent_teid = 0;
+ /* Element type is reserved field in this aq call */
+ buf.generic[0].data.elem_type = 0;
+ /* Flags is reserved field in this aq call */
+ buf.generic[0].data.flags = 0;
+
+ /* Update HW DB */
+ /* Configure element node */
+ status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
+ &elem_cfgd, NULL);
+ if (status || elem_cfgd != num_elems) {
+ ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Config success case */
+ /* Now update local SW DB */
+ /* Only copy the data portion of info buffer */
+ node->info.data = info->data;
+ return status;
+}
+
+/**
+ * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @bw_alloc: BW weight/allocation
+ *
+ * This function configures node element's BW allocation.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 bw_alloc)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ if (rl_type == ICE_MIN_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
+ } else if (rl_type == ICE_MAX_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
+ } else {
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_set_clear_cir_bw - set or clear CIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = 0;
+ } else {
+ /* Save type of BW information */
+ set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_eir_bw - set or clear EIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved shared BW information.
+ */
+ clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ /* save EIR BW information */
+ set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_shared_bw - set or clear shared BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved EIR BW information.
+ */
+ clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ /* save shared BW information */
+ set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = bw;
+ }
+}
+
+/**
+ * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
+ * @bw: bandwidth in Kbps
+ *
+ * This function calculates the wakeup parameter of RL profile.
+ */
+static u16 ice_sched_calc_wakeup(s32 bw)
+{
+ s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
+ s32 wakeup_f_int;
+ u16 wakeup = 0;
+
+ /* Get the wakeup integer value */
+ bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
+ wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
+ if (wakeup_int > 63) {
+ wakeup = (u16)((1 << 15) | wakeup_int);
+ } else {
+ /* Calculate fraction value up to 4 decimals
+ * Convert Integer value to a constant multiplier
+ */
+ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
+ wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
+ ICE_RL_PROF_FREQUENCY,
+ bytes_per_sec);
+
+ /* Get Fraction value */
+ wakeup_f = wakeup_a - wakeup_b;
+
+ /* Round up the Fractional value via Ceil(Fractional value) */
+ if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
+ wakeup_f += 1;
+
+ wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
+ ICE_RL_PROF_MULTIPLIER);
+ wakeup |= (u16)(wakeup_int << 9);
+ wakeup |= (u16)(0x1ff & wakeup_f_int);
+ }
+
+ return wakeup;
+}
+
+/**
+ * ice_sched_bw_to_rl_profile - convert BW to profile parameters
+ * @bw: bandwidth in Kbps
+ * @profile: profile parameters to return
+ *
+ * This function converts the BW to profile structure format.
+ */
+static enum ice_status
+ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ s64 bytes_per_sec, ts_rate, mv_tmp;
+ bool found = false;
+ s32 encode = 0;
+ s64 mv = 0;
+ s32 i;
+
+ /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
+ if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
+ return status;
+
+ /* Bytes per second from Kbps */
+ bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
+
+ /* encode is 6 bits but really useful are 5 bits */
+ for (i = 0; i < 64; i++) {
+ u64 pow_result = BIT_ULL(i);
+
+ ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
+ pow_result * ICE_RL_PROF_TS_MULTIPLIER);
+ if (ts_rate <= 0)
+ continue;
+
+ /* Multiplier value */
+ mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
+ ts_rate);
+
+ /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
+ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
+
+ /* First multiplier value greater than the given
+ * accuracy bytes
+ */
+ if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
+ encode = i;
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ u16 wm;
+
+ wm = ice_sched_calc_wakeup(bw);
+ profile->rl_multiply = cpu_to_le16(mv);
+ profile->wake_up_calc = cpu_to_le16(wm);
+ profile->rl_encode = cpu_to_le16(encode);
+ status = 0;
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_add_rl_profile - add RL profile
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: specifies in which layer to create profile
+ *
+ * This function first checks the existing list for corresponding BW
+ * parameter. If it exists, it returns the associated profile otherwise
+ * it creates a new rate limit profile for requested BW, and adds it to
+ * the HW DB and local list. It returns the new profile or null on error.
+ * The caller needs to hold the scheduler lock.
+ */
+static struct ice_aqc_rl_profile_info *
+ice_sched_add_rl_profile(struct ice_port_info *pi,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ u16 profiles_added = 0, num_profiles = 1;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return NULL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!pi)
+ return NULL;
+ hw = pi->hw;
+ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ rl_prof_elem->bw == bw)
+ /* Return existing profile ID info */
+ return rl_prof_elem;
+
+ /* Create new profile ID */
+ rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
+ GFP_KERNEL);
+
+ if (!rl_prof_elem)
+ return NULL;
+
+ status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
+ if (status)
+ goto exit_add_rl_prof;
+
+ rl_prof_elem->bw = bw;
+ /* layer_num is zero relative, and fw expects level from 1 to 9 */
+ rl_prof_elem->profile.level = layer_num + 1;
+ rl_prof_elem->profile.flags = profile_type;
+ rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
+
+ /* Create new entry in HW DB */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_prof_elem->profile;
+ status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &profiles_added, NULL);
+ if (status || profiles_added != num_profiles)
+ goto exit_add_rl_prof;
+
+ /* Good entry - add in the list */
+ rl_prof_elem->prof_id_ref = 0;
+ list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
+ return rl_prof_elem;
+
+exit_add_rl_prof:
+ devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
+ return NULL;
+}
+
+/**
+ * ice_sched_cfg_node_bw_lmt - configure node sched params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @rl_prof_id: rate limit profile ID
+ *
+ * This function configures node element's BW limit.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u16 rl_prof_id)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
+ break;
+ case ICE_MAX_BW:
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ return ICE_ERR_CFG;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
+ break;
+ case ICE_SHARED_BW:
+ /* Check for removing shared BW */
+ if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
+ /* remove shared profile */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = 0; /* clear SRL field */
+
+ /* enable back EIR to default profile */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ break;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
+ (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
+ ICE_SCHED_DFLT_RL_PROF_ID))
+ return ICE_ERR_CFG;
+ /* EIR BW is set to default, disable it */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
+ /* Okay to enable shared BW now */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = cpu_to_le16(rl_prof_id);
+ break;
+ default:
+ /* Unknown rate limit type */
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ return ice_sched_update_elem(hw, node, &buf);
+}
+
+/**
+ * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
+ * @node: sched node
+ * @rl_type: rate limit type
+ *
+ * If existing profile matches, it returns the corresponding rate
+ * limit profile ID, otherwise it returns an invalid ID as error.
+ */
+static u16
+ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
+ struct ice_aqc_txsched_elem *data;
+
+ data = &node->info.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
+ rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
+ break;
+ case ICE_MAX_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
+ rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
+ break;
+ case ICE_SHARED_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ rl_prof_id = le16_to_cpu(data->srl_id);
+ break;
+ default:
+ break;
+ }
+
+ return rl_prof_id;
+}
+
+/**
+ * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @layer_index: layer index
+ *
+ * This function returns requested profile creation layer.
+ */
+static u8
+ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
+ u8 layer_index)
+{
+ struct ice_hw *hw = pi->hw;
+
+ if (layer_index >= hw->num_tx_sched_layers)
+ return ICE_SCHED_INVAL_LAYER_NUM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (hw->layer_info[layer_index].max_cir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_MAX_BW:
+ if (hw->layer_info[layer_index].max_eir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_SHARED_BW:
+ /* if current layer doesn't support SRL profile creation
+ * then try a layer up or down.
+ */
+ if (hw->layer_info[layer_index].max_srl_profiles)
+ return layer_index;
+ else if (layer_index < hw->num_tx_sched_layers - 1 &&
+ hw->layer_info[layer_index + 1].max_srl_profiles)
+ return layer_index + 1;
+ else if (layer_index > 0 &&
+ hw->layer_info[layer_index - 1].max_srl_profiles)
+ return layer_index - 1;
+ break;
+ default:
+ break;
+ }
+ return ICE_SCHED_INVAL_LAYER_NUM;
+}
+
+/**
+ * ice_sched_get_srl_node - get shared rate limit node
+ * @node: tree node
+ * @srl_layer: shared rate limit layer
+ *
+ * This function returns SRL node to be used for shared rate limit purpose.
+ * The caller needs to hold scheduler lock.
+ */
+static struct ice_sched_node *
+ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
+{
+ if (srl_layer > node->tx_sched_layer)
+ return node->children[0];
+ else if (srl_layer < node->tx_sched_layer)
+ /* Node can't be created without a parent. It will always
+ * have a valid parent except root node.
+ */
+ return node->parent;
+ else
+ return node;
+}
+
+/**
+ * ice_sched_rm_rl_profile - remove RL profile ID
+ * @pi: port information structure
+ * @layer_num: layer number where profiles are saved
+ * @profile_type: profile type like EIR, CIR, or SRL
+ * @profile_id: profile ID to remove
+ *
+ * This function removes rate limit profile from layer 'layer_num' of type
+ * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
+ * scheduler lock.
+ */
+static enum ice_status
+ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
+ u16 profile_id)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ enum ice_status status = 0;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return ICE_ERR_PARAM;
+ /* Check the existing list for RL profile */
+ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ le16_to_cpu(rl_prof_elem->profile.profile_id) ==
+ profile_id) {
+ if (rl_prof_elem->prof_id_ref)
+ rl_prof_elem->prof_id_ref--;
+
+ /* Remove old profile ID from database */
+ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
+ if (status && status != ICE_ERR_IN_USE)
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ break;
+ }
+ if (status == ICE_ERR_IN_USE)
+ status = 0;
+ return status;
+}
+
+/**
+ * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ * @layer_num: layer number where RL profiles are saved
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 layer_num)
+{
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+ u16 rl_prof_id;
+ u16 old_id;
+
+ hw = pi->hw;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ /* No SRL is configured for default case */
+ rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* Remove stale RL profile ID */
+ if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
+ old_id == ICE_SCHED_INVAL_PROF_ID)
+ return 0;
+
+ return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
+}
+
+/**
+ * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @layer_num: layer number where rate limit profiles are saved
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth value
+ *
+ * This function prepares node element's bandwidth to SRL or EIR exclusively.
+ * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
+ * them may be set for any given element. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ u8 layer_num, enum ice_rl_type rl_type, u32 bw)
+{
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node passed in this case, it may be different node */
+ if (bw == ICE_SCHED_DFLT_BW)
+ /* SRL being removed, ice_sched_cfg_node_bw_lmt()
+ * enables EIR to default. EIR is not set in this
+ * case, so no additional action is required.
+ */
+ return 0;
+
+ /* SRL being configured, set EIR to default here.
+ * ice_sched_cfg_node_bw_lmt() disables EIR when it
+ * configures SRL
+ */
+ return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
+ layer_num);
+ } else if (rl_type == ICE_MAX_BW &&
+ node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
+ /* Remove Shared profile. Set default shared BW call
+ * removes shared profile for a node.
+ */
+ return ice_sched_set_node_bw_dflt(pi, node,
+ ICE_SHARED_BW,
+ layer_num);
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_set_node_bw - set node's bandwidth
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: layer number
+ *
+ * This function adds new profile corresponding to requested BW, configures
+ * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
+ * ID from local database. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_hw *hw = pi->hw;
+ u16 old_id, rl_prof_id;
+
+ rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
+ if (!rl_prof_info)
+ return status;
+
+ rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
+
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* New changes has been applied */
+ /* Increment the profile ID reference count */
+ rl_prof_info->prof_id_ref++;
+
+ /* Check for old ID removal */
+ if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
+ old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
+ return 0;
+
+ return ice_sched_rm_rl_profile(pi, layer_num,
+ rl_prof_info->profile.flags,
+ old_id);
+}
+
+/**
+ * ice_sched_set_node_bw_lmt - set node's BW limit
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * It updates node's BW limit parameters like BW RL profile ID of type CIR,
+ * EIR, or SRL. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_sched_node *cfg_node = node;
+ enum ice_status status;
+
+ struct ice_hw *hw;
+ u8 layer_num;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+ /* Remove unused RL profile IDs from HW and SW DB */
+ ice_sched_rm_unused_rl_prof(pi);
+ layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (layer_num >= hw->num_tx_sched_layers)
+ return ICE_ERR_PARAM;
+
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node may be different */
+ cfg_node = ice_sched_get_srl_node(node, layer_num);
+ if (!cfg_node)
+ return ICE_ERR_CFG;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
+ bw);
+ if (status)
+ return status;
+ if (bw == ICE_SCHED_DFLT_BW)
+ return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
+ layer_num);
+ return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
+}
+
+/**
+ * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ return ice_sched_set_node_bw_lmt(pi, node, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_sched_validate_srl_node - Check node for SRL applicability
+ * @node: sched node to configure
+ * @sel_layer: selected SRL layer
+ *
+ * This function checks if the SRL can be applied to a selected layer node on
+ * behalf of the requested node (first argument). This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
+{
+ /* SRL profiles are not available on all layers. Check if the
+ * SRL profile can be applied to a node above or below the
+ * requested node. SRL configuration is possible only if the
+ * selected layer's node has single child.
+ */
+ if (sel_layer == node->tx_sched_layer ||
+ ((sel_layer == node->tx_sched_layer + 1) &&
+ node->num_children == 1) ||
+ ((sel_layer == node->tx_sched_layer - 1) &&
+ (node->parent && node->parent->num_children == 1)))
+ return 0;
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_sched_save_q_bw - save queue node's BW information
+ * @q_ctx: queue context structure
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of queue type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
+{
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_set_q_bw_lmt - sets queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of queue scheduling node.
+ */
+static enum ice_status
+ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+ struct ice_q_ctx *q_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ mutex_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
+ if (!q_ctx)
+ goto exit_q_bw_lmt;
+ node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
+ goto exit_q_bw_lmt;
+ }
+
+ /* Return error if it is not a leaf node */
+ if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
+ goto exit_q_bw_lmt;
+
+ /* SRL bandwidth layer selection */
+ if (rl_type == ICE_SHARED_BW) {
+ u8 sel_layer; /* selected layer */
+
+ sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (sel_layer >= pi->hw->num_tx_sched_layers) {
+ status = ICE_ERR_PARAM;
+ goto exit_q_bw_lmt;
+ }
+ status = ice_sched_validate_srl_node(node, sel_layer);
+ if (status)
+ goto exit_q_bw_lmt;
+ }
+
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+ if (!status)
+ status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
+
+exit_q_bw_lmt:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_q_bw_lmt - configure queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ bw);
+}
+
+/**
+ * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ *
+ * This function configures BW default limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_cfg_rl_burst_size - Set burst size value
+ * @hw: pointer to the HW struct
+ * @bytes: burst size in bytes
+ *
+ * This function configures/set the burst size to requested new value. The new
+ * burst size value is used for future rate limit calls. It doesn't change the
+ * existing or previously created RL profiles.
+ */
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+{
+ u16 burst_size_to_prog;
+
+ if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
+ bytes > ICE_MAX_BURST_SIZE_ALLOWED)
+ return ICE_ERR_PARAM;
+ if (ice_round_to_num(bytes, 64) <=
+ ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
+ /* 64 byte granularity case */
+ /* Disable MSB granularity bit */
+ burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
+ /* round number to nearest 64 byte granularity */
+ bytes = ice_round_to_num(bytes, 64);
+ /* The value is in 64 byte chunks */
+ burst_size_to_prog |= (u16)(bytes / 64);
+ } else {
+ /* k bytes granularity case */
+ /* Enable MSB granularity bit */
+ burst_size_to_prog = ICE_KBYTE_GRANULARITY;
+ /* round number to nearest 1024 granularity */
+ bytes = ice_round_to_num(bytes, 1024);
+ /* check rounding doesn't go beyond allowed */
+ if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
+ bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
+ /* The value is in k bytes */
+ burst_size_to_prog |= (u16)(bytes / 1024);
+ }
+ hw->max_burst_size = burst_size_to_prog;
+ return 0;
+}
+
+/**
+ * ice_sched_replay_node_prio - re-configure node priority
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @priority: priority value
+ *
+ * This function configures node element's priority value. It
+ * needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
+ u8 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic = priority;
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_sched_replay_node_bw - replay node(s) BW
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @bw_t_info: BW type information
+ *
+ * This function restores node's BW from bw_t_info. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_bw_type_info *bw_t_info)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ u16 bw_alloc;
+
+ if (!node)
+ return status;
+ if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
+ return 0;
+ if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_replay_node_prio(hw, node,
+ bw_t_info->generic);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
+ bw_t_info->cir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
+ bw_alloc = bw_t_info->cir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
+ bw_t_info->eir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
+ bw_alloc = bw_t_info->eir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
+ bw_t_info->shared_bw);
+ return status;
+}
+
+/**
+ * ice_sched_replay_q_bw - replay queue type node BW
+ * @pi: port information structure
+ * @q_ctx: queue context structure
+ *
+ * This function replays queue type node bandwidth. This function needs to be
+ * called with scheduler lock held.
+ */
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+{
+ struct ice_sched_node *q_node;
+
+ /* Following also checks the presence of node in tree */
+ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!q_node)
+ return ICE_ERR_PARAM;
+ return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 3902a8ad3025..f0593cfb6521 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -8,6 +8,36 @@
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
+#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
+/* Burst size is a 12 bits register that is configured while creating the RL
+ * profile(s). MSB is a granularity bit and tells the granularity type
+ * 0 - LSB bits are in 64 bytes granularity
+ * 1 - LSB bits are in 1K bytes granularity
+ */
+#define ICE_64_BYTE_GRANULARITY 0
+#define ICE_KBYTE_GRANULARITY BIT(11)
+#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
+#define ICE_MAX_BURST_SIZE_ALLOWED \
+ ((BIT(11) - 1) * 1024) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
+ ((BIT(11) - 1) * 64) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
+
+#define ICE_RL_PROF_FREQUENCY 446000000
+#define ICE_RL_PROF_ACCURACY_BYTES 128
+#define ICE_RL_PROF_MULTIPLIER 10000
+#define ICE_RL_PROF_TS_MULTIPLIER 32
+#define ICE_RL_PROF_FRACTION 512
+
+/* BW rate limit profile parameters list entry along
+ * with bandwidth maintained per layer in port info
+ */
+struct ice_aqc_rl_profile_info {
+ struct ice_aqc_rl_profile_elem profile;
+ struct list_head list_entry;
+ u32 bw; /* requested */
+ u16 prof_id_ref; /* profile ID to node association ref count */
+};
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
@@ -48,4 +78,13 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 1acdd43a2edd..b5a53f862a83 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -416,8 +416,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
} else {
/* update with new HW VSI num */
- if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
- tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
return 0;
@@ -2429,7 +2428,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
- if (vid)
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
else
recipe_id = ICE_SW_LKUP_PROMISC;
@@ -2441,13 +2440,18 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
mutex_lock(rule_lock);
list_for_each_entry(itr, rule_head, list_entry) {
+ struct ice_fltr_info *fltr_info;
u8 fltr_promisc_mask = 0;
if (!ice_vsi_uses_fltr(itr, vsi_handle))
continue;
+ fltr_info = &itr->fltr_info;
+
+ if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
+ vid != fltr_info->l_data.mac_vlan.vlan_id)
+ continue;
- fltr_promisc_mask |=
- ice_determine_promisc_mask(&itr->fltr_info);
+ fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
/* Skip if filter is not completely specified by given mask */
if (fltr_promisc_mask & ~promisc_mask)
@@ -2455,7 +2459,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
&remove_list_head,
- &itr->fltr_info);
+ fltr_info);
if (status) {
mutex_unlock(rule_lock);
goto free_fltr_list;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index cb123fbe30be..fa14b9545dab 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,11 +14,6 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-/* VSI queue context structure */
-struct ice_q_ctx {
- u16 q_handle;
-};
-
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 33dd103035dc..2c212f64d99f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -5,8 +5,13 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
#include "ice.h"
#include "ice_dcb_lib.h"
+#include "ice_xsk.h"
#define ICE_RX_HDR_SIZE 256
@@ -19,7 +24,10 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- dev_kfree_skb_any(tx_buf->skb);
+ if (ice_ring_is_xdp(ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buf->skb);
if (dma_unmap_len(tx_buf, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buf, dma),
@@ -51,6 +59,11 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ ice_xsk_clean_xdp_ring(tx_ring);
+ goto tx_skip_free;
+ }
+
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buf)
return;
@@ -59,6 +72,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
+tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
/* Zero out the descriptor ring */
@@ -136,8 +150,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ if (ice_ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -188,12 +205,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.pkts += total_pkts;
- u64_stats_update_end(&tx_ring->syncp);
- tx_ring->q_vector->tx.total_bytes += total_bytes;
- tx_ring->q_vector->tx.total_pkts += total_pkts;
+
+ ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
+
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -273,6 +289,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
+ if (rx_ring->xsk_umem) {
+ ice_xsk_clean_rx_ring(rx_ring);
+ goto rx_skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -289,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
*/
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
rx_buf->page_offset,
- ICE_RXBUF_2048, DMA_FROM_DEVICE);
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
/* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
@@ -300,6 +322,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
rx_buf->page_offset = 0;
}
+rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
@@ -319,6 +342,10 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
void ice_free_rx_ring(struct ice_ring *rx_ring)
{
ice_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == ICE_VSI_PF)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
devm_kfree(rx_ring->dev, rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
@@ -363,6 +390,15 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+
+ if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->q_index))
+ goto err;
return 0;
err:
@@ -372,34 +408,110 @@ err:
}
/**
- * ice_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
*/
-static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
- rx_ring->next_to_use = val;
+ return 0;
+}
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
+/**
+ * ice_run_xdp - Executes an XDP program on initialized xdp_buff
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = ICE_XDP_PASS;
+ struct ice_ring *xdp_ring;
+ u32 act;
- /* QRX_TAIL will be updated with any tail value, but hardware ignores
- * the lower 3 bits. This makes it so we only bump tail on meaningful
- * boundaries. Also, this allows us to bump tail on intervals of 8 up to
- * the budget depending on the current traffic load.
- */
- val &= ~0x7;
- if (prev_ntu != val) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
}
+
+ return result;
+}
+
+/**
+ * ice_xdp_xmit - submit packets to XDP ring for transmission
+ * @dev: netdev
+ * @n: number of XDP frames to be transmitted
+ * @frames: XDP frames to be transmitted
+ * @flags: transmit flags
+ *
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ */
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *xdp_ring;
+ int drops = 0, i;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdp_ring = vsi->xdp_rings[queue_index];
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ if (err != ICE_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ ice_xdp_ring_update_tail(xdp_ring);
+
+ return n - drops;
}
/**
@@ -423,28 +535,28 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
}
/* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, ice_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ice_rx_offset(rx_ring);
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -486,7 +598,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- ICE_RXBUF_2048,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -557,9 +669,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
*/
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
-#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -572,7 +681,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
#else
- if (rx_buf->page_offset > last_offset)
+#define ICE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
+ if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
#endif /* PAGE_SIZE < 8192) */
@@ -590,6 +701,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
/**
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: buffer containing page to add
* @skb: sk_buff to place the data into
* @size: packet length from rx_desc
@@ -599,13 +711,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
if (!size)
@@ -679,10 +791,64 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
}
/**
+ * ice_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @xdp: xdp_buff pointing to the data
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *
+ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ struct sk_buff *skb;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
+#if L1_CACHE_BYTES < 128
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
+#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(xdp->data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* must to record Rx queue, otherwise OS features such as
+ * symmetric queue won't work
+ */
+ skb_record_rx_queue(skb, rx_ring->q_index);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+
+ return skb;
+}
+
+/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -690,16 +856,16 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
*/
static struct sk_buff *
ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch((u8 *)va + L1_CACHE_BYTES);
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
#endif /* L1_CACHE_BYTES */
/* allocate a skb to store the frags */
@@ -712,10 +878,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* Determine available headroom for copy */
headlen = size;
if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -723,7 +890,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE >= 8192)
unsigned int truesize = SKB_DATA_ALIGN(size);
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
skb_add_rx_frag(skb, 0, rx_buf->page,
rx_buf->page_offset + headlen, size, truesize);
@@ -745,11 +912,18 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
*
- * This function will clean up the contents of the rx_buf. It will
- * either recycle the buffer or unmap it and free the associated resources.
+ * This function will update next_to_clean and then clean up the contents
+ * of the rx_buf. It will either recycle the buffer or unmap it and free
+ * the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
if (!rx_buf)
return;
@@ -759,8 +933,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
+ ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
}
@@ -770,227 +945,31 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
- * ice_cleanup_headers - Correct empty headers
- * @skb: pointer to current skb being fixed
- *
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
- */
-static bool ice_cleanup_headers(struct sk_buff *skb)
-{
- /* if eth_skb_pad returns an error the skb was freed */
- if (eth_skb_pad(skb))
- return true;
-
- return false;
-}
-
-/**
- * ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
-{
- return !!(rx_desc->wb.status_error0 &
- cpu_to_le16(stat_err_bits));
-}
-
-/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb)
{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false;
/* place skb in next buffer to be received */
- rx_ring->rx_buf[ntc].skb = skb;
+ rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- */
-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
-{
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
- * ice_rx_hash - set the hash value in the skb
- * @rx_ring: descriptor ring
- * @rx_desc: specific descriptor
- * @skb: pointer to current skb
- * @rx_ptype: the ptype value from the descriptor
- */
-static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 rx_ptype)
-{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
- u32 hash;
-
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
- return;
-
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
-}
-
-/**
- * ice_rx_csum - Indicate in skb if checksum is good
- * @ring: the ring we care about
- * @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
- * @ptype: the packet type decoded by hardware
- *
- * skb->protocol must be set before this function is called
- */
-static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
-{
- struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
- bool ipv4, ipv6;
-
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
- /* Start with CHECKSUM_NONE and by default csum_level = 0 */
- skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
-
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
- return;
-
- /* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
- return;
-
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
-
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
- goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
- goto checksum_fail;
-
- /* check for L4 errors and handle packets that were not able to be
- * checksummed due to arrival speed
- */
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
- goto checksum_fail;
-
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- default:
- break;
- }
- return;
-
-checksum_fail:
- ring->vsi->back->hw_csum_rx_error++;
-}
-
-/**
- * ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: Rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
- *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, protocol, and
- * other fields within the skb.
- */
-static void
-ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
-{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
-
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
- ice_rx_csum(rx_ring, skb, rx_desc, ptype);
-}
-
-/**
- * ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: Rx ring in play
- * @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
- *
- * This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without VLAN tag)
- */
-static void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
-{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1006,8 +985,13 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog = NULL;
+ struct xdp_buff xdp;
bool failure;
+ xdp.rxq = &rx_ring->xdp_rxq;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1042,10 +1026,57 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ if (!size) {
+ xdp.data = NULL;
+ xdp.data_end = NULL;
+ xdp.data_hard_start = NULL;
+ xdp.data_meta = NULL;
+ goto construct_skb;
+ }
+
+ xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
+ xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
+ xdp.data_meta = xdp.data;
+ xdp.data_end = xdp.data + size;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ goto construct_skb;
+ }
+
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ rcu_read_unlock();
+ if (!xdp_res)
+ goto construct_skb;
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
+ size);
+#endif
+ xdp_xmit |= xdp_res;
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ } else {
+ rx_buf->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_pkts++;
+
+ cleaned_count++;
+ ice_put_rx_buf(rx_ring, rx_buf);
+ continue;
+construct_skb:
if (skb)
- ice_add_rx_frag(rx_buf, skb, size);
+ ice_add_rx_frag(rx_ring, rx_buf, skb, size);
+ else if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
- skb = ice_construct_skb(rx_ring, rx_buf, size);
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1072,10 +1103,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (ice_test_staterr(rx_desc, stat_err_bits))
vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
- /* correct empty headers and pad skb if needed (to make valid
- * ethernet frame
- */
- if (ice_cleanup_headers(skb)) {
+ /* pad the skb if needed, to make a valid ethernet frame */
+ if (eth_skb_pad(skb)) {
skb = NULL;
continue;
}
@@ -1099,13 +1128,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
- /* update queue and vector specific stats */
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.pkts += total_rx_pkts;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ if (xdp_prog)
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1483,9 +1509,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx)
- if (!ice_clean_tx_irq(ring, budget))
+ ice_for_each_ring(ring, q_vector->tx) {
+ bool wd = ring->xsk_umem ?
+ ice_clean_tx_irq_zc(ring, budget) :
+ ice_clean_tx_irq(ring, budget);
+
+ if (!wd)
clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (unlikely(budget <= 0))
@@ -1505,7 +1536,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
- cleaned = ice_clean_rx_irq(ring, budget_per_ring);
+ /* A dedicated path for zero-copy allows making a single
+ * comparison in the irq context instead of many inside the
+ * ice_clean_rx_irq function and makes the codebase cleaner.
+ */
+ cleaned = ring->xsk_umem ?
+ ice_clean_rx_irq_zc(ring, budget_per_ring) :
+ ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1527,17 +1564,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
return min_t(int, work_done, budget - 1);
}
-/* helper function for building cmd/type/offset */
-static __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
-{
- return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
- (td_cmd << ICE_TXD_QW1_CMD_S) |
- (td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
- (td_tag << ICE_TXD_QW1_L2TAG1_S));
-}
-
/**
* __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
@@ -1689,9 +1715,9 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
i = 0;
/* write last descriptor with RS and EOP bits */
- td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag);
+ td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
+ td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 94a9280193e2..a84cc0e6dd27 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,8 +4,12 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include "ice_type.h"
+
#define ICE_DFLT_IRQ_WORK 256
+#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
@@ -22,6 +26,71 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
+/* Attempt to maximize the headroom available for incoming frames. We use a 2K
+ * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
+ * This leaves us with 512 bytes of room. From that we need to deduct the
+ * space needed for the shared info and the padding needed to IP align the
+ * frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define ICE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+
+/**
+ * ice_compute_pad - compute the padding
+ * rx_buf_len: buffer length
+ *
+ * Figure out the size of half page based on given buffer length and
+ * then subtract the skb_shared_info followed by subtraction of the
+ * actual buffer length; this in turn results in the actual space that
+ * is left for padding usage
+ */
+static inline int ice_compute_pad(int rx_buf_len)
+{
+ int half_page_size;
+
+ half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
+}
+
+/**
+ * ice_skb_pad - determine the padding that we can supply
+ *
+ * Figure out the right Rx buffer size and based on that calculate the
+ * padding
+ */
+static inline int ice_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (ICE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = ICE_RXBUF_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ice_compute_pad(rx_buf_len);
+}
+
+#define ICE_SKB_PAD ice_skb_pad()
+#else
+#define ICE_2K_TOO_SMALL_WITH_PADDING false
+#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -49,12 +118,24 @@
#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_XDP_PASS 0
+#define ICE_XDP_CONSUMED BIT(0)
+#define ICE_XDP_TX BIT(1)
+#define ICE_XDP_REDIR BIT(2)
+
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
+#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *raw_buf; /* used for XDP */
+ };
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
@@ -76,9 +157,17 @@ struct ice_tx_offload_params {
struct ice_rx_buf {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ice_q_stats {
@@ -198,18 +287,44 @@ struct ice_ring {
};
struct rcu_head rcu; /* to avoid race on free */
+ struct bpf_prog *xdp_prog;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca;
+ /* CL3 - 3rd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u8 flags;
dma_addr_t dma; /* physical address of ring */
unsigned int size; /* length of descriptor ring in bytes */
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
-#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */
-#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp;
+static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
+}
+
+static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
+}
+
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
@@ -230,6 +345,19 @@ struct ice_ring_container {
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
+static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
+
+union ice_32b_rx_flex_desc;
+
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_clean_tx_ring(struct ice_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
new file mode 100644
index 000000000000..35bbc4ff603c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_txrx_lib.h"
+
+/**
+ * ice_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ */
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+{
+ u16 prev_ntu = rx_ring->next_to_use;
+
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
+ */
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
+}
+
+/**
+ * ice_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ */
+static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+{
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * ice_rx_hash - set the hash value in the skb
+ * @rx_ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: pointer to current skb
+ * @rx_ptype: the ptype value from the descriptor
+ */
+static void
+ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 rx_ptype)
+{
+ struct ice_32b_rx_flex_desc_nic *nic_mdid;
+ u32 hash;
+
+ if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
+ return;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ hash = le32_to_cpu(nic_mdid->rss_hash);
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+}
+
+/**
+ * ice_rx_csum - Indicate in skb if checksum is good
+ * @ring: the ring we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void
+ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+
+ rx_status = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_error = rx_status;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
+ /* check if Rx checksum is enabled */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ return;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ goto checksum_fail;
+ else if (ipv6 && (rx_status &
+ (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+ goto checksum_fail;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ default:
+ break;
+ }
+ return;
+
+checksum_fail:
+ ring->vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * ice_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: Rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @ptype: the packet type decoded by hardware
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
+{
+ ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+}
+
+/**
+ * ice_receive_skb - Send a completed packet up the stack
+ * @rx_ring: Rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: VLAN tag for packet
+ *
+ * This function sends the completed packet (via. skb) up the stack using
+ * gro receive functions (with/without VLAN tag)
+ */
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+{
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
+ * @data: packet data pointer
+ * @size: packet data size
+ * @xdp_ring: XDP ring for transmission
+ */
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+{
+ u16 i = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return ICE_XDP_CONSUMED;
+
+ tx_buf = &xdp_ring->tx_buf[i];
+ tx_buf->bytecount = size;
+ tx_buf->gso_segs = 1;
+ tx_buf->raw_buf = data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, i);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_buf->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return ICE_XDP_TX;
+}
+
+/**
+ * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
+ * @xdp: XDP buffer
+ * @xdp_ring: XDP Tx ring
+ *
+ * Returns negative on failure, 0 on success.
+ */
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+{
+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!xdpf))
+ return ICE_XDP_CONSUMED;
+
+ return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+}
+
+/**
+ * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+ * @rx_ring: Rx ring
+ * @xdp_res: Result of the receive batch
+ *
+ * This function bumps XDP Tx tail and/or flush redirect map, and
+ * should be called when a batch of packets has been processed in the
+ * napi loop.
+ */
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+{
+ if (xdp_res & ICE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & ICE_XDP_TX) {
+ struct ice_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->q_index];
+
+ ice_xdp_ring_update_tail(xdp_ring);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
new file mode 100644
index 000000000000..ba9164dad9ae
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_TXRX_LIB_H_
+#define _ICE_TXRX_LIB_H_
+#include "ice.h"
+
+/**
+ * ice_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+{
+ return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+}
+
+static inline __le64
+build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ (td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << ICE_TXD_QW1_L2TAG1_S));
+}
+
+/**
+ * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
+ * @xdp_ring: XDP Tx ring
+ *
+ * This function updates the XDP Tx ring tail register.
+ */
+static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+{
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype);
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6667d17a4206..c4854a987130 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -19,6 +19,17 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
return test_bit(tc, &bitmap);
}
+static inline u64 round_up_64bit(u64 a, u32 b)
+{
+ return div64_long(((a) + (b) / 2), (b));
+}
+
+static inline u32 ice_round_to_num(u32 N, u32 R)
+{
+ return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
+ ((((N) + (R) - 1) / (R)) * (R)));
+}
+
/* Driver always calls main vsi_handle first */
#define ICE_MAIN_VSI_HANDLE 0
@@ -35,6 +46,8 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
+#define ICE_DBG_AQ_DESC BIT_ULL(25)
+#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
#define ICE_DBG_USER BIT_ULL(31)
@@ -189,6 +202,7 @@ struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_funcs;
};
/* MAC info */
@@ -272,10 +286,56 @@ enum ice_agg_type {
ICE_AGG_TYPE_QG
};
+/* Rate limit types */
+enum ice_rl_type {
+ ICE_UNKNOWN_BW = 0,
+ ICE_MIN_BW, /* for CIR profile */
+ ICE_MAX_BW, /* for EIR profile */
+ ICE_SHARED_BW /* for shared profile */
+};
+
+#define ICE_SCHED_MIN_BW 500 /* in Kbps */
+#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
+#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
+#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
-/* VSI type list entry to locate corresponding VSI/ag nodes */
+ /* Data structure for saving BW information */
+enum ice_bw_type {
+ ICE_BW_TYPE_PRIO,
+ ICE_BW_TYPE_CIR,
+ ICE_BW_TYPE_CIR_WT,
+ ICE_BW_TYPE_EIR,
+ ICE_BW_TYPE_EIR_WT,
+ ICE_BW_TYPE_SHARED,
+ ICE_BW_TYPE_CNT /* This must be last */
+};
+
+struct ice_bw {
+ u32 bw;
+ u16 bw_alloc;
+};
+
+struct ice_bw_type_info {
+ DECLARE_BITMAP(bw_t_bitmap, ICE_BW_TYPE_CNT);
+ u8 generic;
+ struct ice_bw cir_bw;
+ struct ice_bw eir_bw;
+ u32 shared_bw;
+};
+
+/* VSI queue context structure for given TC */
+struct ice_q_ctx {
+ u16 q_handle;
+ u32 q_teid;
+ /* bw_t_info saves queue BW information */
+ struct ice_bw_type_info bw_t_info;
+};
+
+/* VSI type list entry to locate corresponding VSI/aggregator nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
@@ -364,6 +424,8 @@ struct ice_port_info {
struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ /* List contain profile ID(s) and other params per layer */
+ struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
@@ -415,6 +477,8 @@ struct ice_hw {
u8 pf_id; /* device profile info */
+ u16 max_burst_size; /* driver sets this value */
+
/* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
@@ -555,6 +619,8 @@ struct ice_hw_port_stats {
};
/* Checksum and Shadow RAM pointers */
+#define ICE_SR_BOOT_CFG_PTR 0x132
+#define ICE_NVM_OEM_VER_OFF 0x02
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
@@ -568,6 +634,7 @@ struct ice_hw_port_stats {
#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
#define ICE_OEM_VER_SHIFT 24
#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
+#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index b45797f39b2f..edb374296d1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -2,9 +2,39 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
/**
+ * ice_validate_vf_id - helper to check if VF ID is valid
+ * @pf: pointer to the PF structure
+ * @vf_id: the ID of the VF to check
+ */
+static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
+{
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @pf: pointer to the PF structure
+ * @vf: the pointer to the VF to check
+ */
+static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
+{
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
* ice_err_to_virt err - translate errors for VF return code
* @ice_err: error return code
*/
@@ -184,12 +214,14 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ struct device *dev;
int first, last, v;
struct ice_hw *hw;
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
+ dev = ice_pf_to_dev(pf);
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
@@ -208,13 +240,12 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
else
- dev_err(&pf->pdev->dev,
- "Scattered mode for VF Tx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
else
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Scattered mode for VF Rx queues is not yet implemented\n");
}
@@ -289,6 +320,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf)
*/
void ice_free_vfs(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int tmp, i;
@@ -310,24 +342,24 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+ dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
- /* disable VF qp mappings */
+ /* disable VF qp mappings and set VF disable state */
ice_dis_vf_mappings(&pf->vf[i]);
+ set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
}
if (ice_sriov_free_msix_res(pf))
- dev_err(&pf->pdev->dev,
- "Failed to free MSIX resources used by SR-IOV\n");
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
- devm_kfree(&pf->pdev->dev, pf->vf);
+ devm_kfree(dev, pf->vf);
pf->vf = NULL;
/* This check is for when the driver is unloaded while VFs are
@@ -366,9 +398,11 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
{
struct ice_pf *pf = vf->pf;
u32 reg, reg_idx, bit_idx;
+ struct device *dev;
struct ice_hw *hw;
int vf_abs_id, i;
+ dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
@@ -389,7 +423,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
* by the time we get here.
*/
if (!is_pfr)
- wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
+ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -414,7 +448,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
@@ -457,13 +491,12 @@ static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
*/
static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
- struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
- ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -475,7 +508,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -483,7 +516,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
vsi->info = ctxt->info;
out:
- devm_kfree(dev, ctxt);
+ kfree(ctxt);
return ret;
}
@@ -531,14 +564,16 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
LIST_HEAD(tmp_add_list);
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
+ struct device *dev;
int status = 0;
+ dev = ice_pf_to_dev(pf);
/* first vector index is the VFs OICR index */
vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
if (!vsi) {
- dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+ dev_err(dev, "Failed to create VF VSI\n");
return -ENOMEM;
}
@@ -566,8 +601,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev,
- "could not add mac filters error %d\n", status);
+ dev_err(dev, "could not add mac filters error %d\n", status);
else
vf->num_mac = 1;
@@ -578,7 +612,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* more vectors.
*/
ice_alloc_vsi_res_exit:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(dev, &tmp_add_list);
return status;
}
@@ -634,10 +668,12 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
int abs_vf_id, abs_first, abs_last;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ struct device *dev;
int first, last, v;
struct ice_hw *hw;
u32 reg;
+ dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
@@ -685,8 +721,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
VPLAN_TX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
- dev_err(&pf->pdev->dev,
- "Scattered mode for VF Tx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
}
/* set regardless of mapping mode */
@@ -704,8 +739,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
VPLAN_RX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
- dev_err(&pf->pdev->dev,
- "Scattered mode for VF Rx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
}
}
@@ -851,6 +885,7 @@ static int ice_check_avail_res(struct ice_pf *pf)
{
int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
u16 num_msix, num_txq, num_rxq, num_avail_msix;
+ struct device *dev = ice_pf_to_dev(pf);
if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
@@ -883,8 +918,7 @@ static int ice_check_avail_res(struct ice_pf *pf)
ICE_DFLT_INTR_PER_VF,
ICE_MIN_INTR_PER_VF);
} else {
- dev_err(&pf->pdev->dev,
- "Number of VFs %d exceeds max VF count %d\n",
+ dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
return -EIO;
}
@@ -1022,12 +1056,12 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
*/
static bool ice_config_res_vfs(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int v;
if (ice_check_avail_res(pf)) {
- dev_err(&pf->pdev->dev,
- "Cannot allocate VF resources, try with fewer number of VFs\n");
+ dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
return false;
}
@@ -1040,9 +1074,8 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
- dev_dbg(&pf->pdev->dev,
- "VF-id %d has %d queues configured\n",
- vf->vf_id, vf->num_vf_qs);
+ dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
+ vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}
@@ -1066,6 +1099,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
*/
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
int v, i;
@@ -1124,7 +1158,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* time, but continue on with the operation.
*/
if (v < pf->num_alloc_vfs)
- dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+ dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
@@ -1141,8 +1175,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
if (ice_sriov_free_msix_res(pf))
- dev_err(&pf->pdev->dev,
- "Failed to free MSIX resources used by SR-IOV\n");
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
if (!ice_config_res_vfs(pf))
return false;
@@ -1151,6 +1184,25 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * Returns true if the PF or VF is disabled, false otherwise.
+ */
+static bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ /* If the PF has been disabled, there is no need resetting VF until
+ * PF is active again. Similarly, if the VF has been disabled, this
+ * means something else is resetting the VF, so we shouldn't continue.
+ * Otherwise, set disable VF state bit for actual reset, and continue.
+ */
+ return (test_bit(__ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
* ice_reset_vf - Reset a particular VF
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -1161,25 +1213,23 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ struct device *dev;
struct ice_hw *hw;
bool rsd = false;
u8 promisc_m;
u32 reg;
int i;
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again.
- */
- if (test_bit(__ICE_VF_DIS, pf->state))
- return false;
+ dev = ice_pf_to_dev(pf);
- /* If the VF has been disabled, this means something else is
- * resetting the VF, so we shouldn't continue. Otherwise, set
- * disable VF state bit for actual reset, and continue.
- */
- if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
+ if (ice_is_vf_disabled(vf)) {
+ dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return true;
+ }
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
ice_trigger_vf_reset(vf, is_vflr, false);
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -1216,8 +1266,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
* continue on with the operation.
*/
if (!rsd)
- dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- vf->vf_id);
+ dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
@@ -1231,7 +1280,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
- dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
+ dev_err(dev, "disabling promiscuous mode failed\n");
}
/* free VF resources to begin resetting the VSI state */
@@ -1282,19 +1331,26 @@ void ice_vc_notify_reset(struct ice_pf *pf)
static void ice_vc_notify_vf_reset(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe;
+ struct ice_pf *pf;
+
+ if (!vf)
+ return;
- /* validate the request */
- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ pf = vf->pf;
+ if (ice_validate_vf_id(pf, vf->vf_id))
return;
- /* verify if the VF is in either init or active before proceeding */
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
return;
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
NULL);
}
@@ -1306,6 +1362,7 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
*/
static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_vf *vfs;
int i, ret;
@@ -1322,8 +1379,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_unroll_intr;
}
/* allocate memory */
- vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
- GFP_KERNEL);
+ vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
goto err_pci_disable_sriov;
@@ -1352,7 +1408,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
err_unroll_sriov:
pf->vf = NULL;
- devm_kfree(&pf->pdev->dev, vfs);
+ devm_kfree(dev, vfs);
vfs = NULL;
pf->num_alloc_vfs = 0;
err_pci_disable_sriov:
@@ -1397,7 +1453,7 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf)
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
int pre_existing_vfs = pci_num_vf(pf->pdev);
- struct device *dev = &pf->pdev->dev;
+ struct device *dev = ice_pf_to_dev(pf);
int err;
if (!ice_pf_state_is_nominal(pf)) {
@@ -1407,7 +1463,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
dev_err(dev, "This device is not capable of SR-IOV\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
@@ -1442,10 +1498,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
+ struct device *dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
- dev_err(&pf->pdev->dev,
- "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
return -EOPNOTSUPP;
}
@@ -1455,8 +1511,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pdev)) {
ice_free_vfs(pf);
} else {
- dev_err(&pf->pdev->dev,
- "can't free VFs because some are assigned to VMs.\n");
+ dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
}
@@ -1495,12 +1550,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
}
/**
- * ice_vc_dis_vf - Disable a given VF via SW reset
+ * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
* @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset
*/
-static void ice_vc_dis_vf(struct ice_vf *vf)
+static void ice_vc_reset_vf(struct ice_vf *vf)
{
ice_vc_notify_vf_reset(vf);
ice_reset_vf(vf, false);
@@ -1521,24 +1574,28 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
+ struct device *dev;
struct ice_pf *pf;
- /* validate the request */
- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ if (!vf)
return -EINVAL;
pf = vf->pf;
+ if (ice_validate_vf_id(pf, vf->vf_id))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_inval_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
+ dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
+ v_opcode, v_retval);
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ dev_err(dev, "Use PF Control I/F to enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
return -EIO;
}
@@ -1551,7 +1608,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
@@ -1599,14 +1656,14 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
int len = 0;
int ret;
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ if (ice_check_vf_init(pf, vf)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
len = sizeof(struct virtchnl_vf_resource);
- vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+ vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0;
@@ -1672,6 +1729,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->dflt_lan_addr.addr);
+ /* match guest capabilities */
+ vf->driver_caps = vfres->vf_cap_flags;
+
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
err:
@@ -1679,7 +1739,7 @@ err:
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
- devm_kfree(&pf->pdev->dev, vfres);
+ kfree(vfres);
return ret;
}
@@ -1775,7 +1835,7 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi = NULL;
+ struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1822,7 +1882,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi = NULL;
+ struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1869,8 +1929,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
+ struct ice_eth_stats stats = { 0 };
struct ice_pf *pf = vf->pf;
- struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -1889,7 +1949,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- memset(&stats, 0, sizeof(struct ice_eth_stats));
ice_update_eth_stats(vsi);
stats = vsi->eth_stats;
@@ -2159,9 +2218,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vector_id = map->vector_id;
vsi_id = map->vsi_id;
- /* validate msg params */
- if (!(vector_id < pf->hw.func_caps.common_cap
- .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2252,7 +2313,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(&pf->pdev->dev,
+ dev_err(ice_pf_to_dev(pf),
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2365,9 +2426,12 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
enum virtchnl_ops vc_op;
enum ice_status status;
struct ice_vsi *vsi;
+ struct device *dev;
int mac_count = 0;
int i;
+ dev = ice_pf_to_dev(pf);
+
if (set)
vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
else
@@ -2381,7 +2445,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
/* There is no need to let VF know about not being trusted
@@ -2406,13 +2470,13 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* VF is trying to add filters that the PF
* already added. Just continue.
*/
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
/* VF can't remove dflt_lan_addr/bcast MAC */
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
continue;
@@ -2421,7 +2485,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2430,7 +2494,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2441,12 +2505,12 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
if (status == ICE_ERR_DOES_NOT_EXIST ||
status == ICE_ERR_ALREADY_EXISTS) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"can't %s MAC filters %pM for VF %d, error %d\n",
set ? "add" : "remove", maddr, vf->vf_id,
status);
} else if (status) {
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"can't %s MAC filters for VF %d, error %d\n",
set ? "add" : "remove", vf->vf_id, status);
v_ret = ice_err_to_virt_err(status);
@@ -2511,7 +2575,9 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
u16 max_allowed_vf_queues;
u16 tx_rx_queue_left;
u16 cur_queues;
+ struct device *dev;
+ dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2522,17 +2588,15 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
ice_get_avail_rxq_count(pf));
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (!req_queues) {
- dev_err(&pf->pdev->dev,
- "VF %d tried to request 0 queues. Ignoring.\n",
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
vf->vf_id);
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
- dev_err(&pf->pdev->dev,
- "VF %d tried to request more than %d queues.\n",
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(&pf->pdev->dev,
+ dev_warn(dev,
"VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
@@ -2540,9 +2604,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
- ice_vc_dis_vf(vf);
- dev_info(&pf->pdev->dev,
- "VF %d granted request of %u queues.\n",
+ ice_vc_reset_vf(vf);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
}
@@ -2568,39 +2631,34 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi;
+ struct device *dev;
struct ice_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
if (vlan_id > ICE_MAX_VLANID || qos > 7) {
- dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ dev_err(dev, "Invalid VF Parameters\n");
return -EINVAL;
}
if (vlan_proto != htons(ETH_P_8021Q)) {
- dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ dev_err(dev, "VF VLAN protocol is not supported\n");
return -EPROTONOSUPPORT;
}
vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
/* duplicate request, so just return success */
- dev_info(&pf->pdev->dev,
- "Duplicate pvid %d request\n", vlanprio);
+ dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
return ret;
}
@@ -2619,7 +2677,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
}
if (vlan_id) {
- dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
/* add new VLAN filter for each MAC */
@@ -2638,6 +2696,17 @@ error_set_pvid:
}
/**
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ */
+static bool ice_vf_vlan_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
+
+/**
* ice_vc_process_vlan_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2653,16 +2722,23 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
struct ice_pf *pf = vf->pf;
bool vlan_promisc = false;
struct ice_vsi *vsi;
+ struct device *dev;
struct ice_hw *hw;
int status = 0;
u8 promisc_m;
int i;
+ dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2670,7 +2746,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
@@ -2682,7 +2758,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
}
@@ -2700,14 +2776,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
- dev_err(&pf->pdev->dev,
- "%sable VLAN stripping failed for VSI %i\n",
- add_v ? "en" : "dis", vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
vlan_promisc = true;
@@ -2718,7 +2786,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(&pf->pdev->dev,
+ dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
@@ -2739,7 +2807,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
@@ -2753,7 +2821,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
promisc_m, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(&pf->pdev->dev,
+ dev_err(dev,
"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status);
}
@@ -2848,6 +2916,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_manage_vlan_stripping(vsi, true))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2874,6 +2947,11 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2889,6 +2967,33 @@ error_param:
}
/**
+ * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
+ * @vf: VF to enable/disable VLAN stripping for on initialization
+ *
+ * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
+ * the flag is cleared then we want to disable stripping. For example, the flag
+ * will be cleared when port VLANs are configured by the administrator before
+ * passing the VF to the guest or if the AVF driver doesn't support VLAN
+ * offloads.
+ */
+static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* don't modify stripping if port VLAN is configured */
+ if (vsi->info.pvid)
+ return 0;
+
+ if (ice_vf_vlan_offload_ena(vf->driver_caps))
+ return ice_vsi_manage_vlan_stripping(vsi, true);
+ else
+ return ice_vsi_manage_vlan_stripping(vsi, false);
+}
+
+/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
* @event: pointer to the AQ event
@@ -2903,9 +3008,11 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
u16 msglen = event->msg_len;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
+ struct device *dev;
int err = 0;
- if (vf_id >= pf->num_alloc_vfs) {
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
goto error_handler;
}
@@ -2931,7 +3038,7 @@ error_handler:
if (err) {
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
NULL, 0);
- dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
return;
}
@@ -2942,6 +3049,10 @@ error_handler:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ice_vc_get_vf_res_msg(vf, msg);
+ if (ice_vf_init_vlan_stripping(vf))
+ dev_err(dev,
+ "Failed to initialize VLAN stripping for VF %d\n",
+ vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
@@ -2992,8 +3103,8 @@ error_handler:
break;
case VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
- v_opcode, vf_id);
+ dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
+ vf_id);
err = ice_vc_send_msg_to_vf(vf, v_opcode,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
@@ -3003,8 +3114,7 @@ error_handler:
/* Helper function cares less about error return values here
* as it is busy with pending work.
*/
- dev_info(&pf->pdev->dev,
- "PF failed to honor VF %d, opcode %d, error %d\n",
+ dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
}
@@ -3020,24 +3130,18 @@ error_handler:
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
struct ice_vf *vf;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
ivi->vf = vf_id;
ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
@@ -3070,33 +3174,29 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi = pf->vsi[0];
struct ice_vsi_ctx *ctx;
enum ice_status status;
+ struct device *dev;
struct ice_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
if (ena == vf->spoofchk) {
- dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+ dev_dbg(dev, "VF spoofchk already %s\n",
ena ? "ON" : "OFF");
return 0;
}
- ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -3109,7 +3209,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
- dev_dbg(&pf->pdev->dev,
+ dev_dbg(dev,
"Error %d, failed to update VSI* parameters\n", status);
ret = -EIO;
goto out;
@@ -3119,11 +3219,28 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
vsi->info.sec_flags = ctx->info.sec_flags;
vsi->info.sw_flags2 = ctx->info.sw_flags2;
out:
- devm_kfree(&pf->pdev->dev, ctx);
+ kfree(ctx);
return ret;
}
/**
+ * ice_wait_on_vf_reset
+ * @vf: The VF being resseting
+ *
+ * Poll to make sure a given VF is ready after reset
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(20);
+ }
+}
+
+/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3133,23 +3250,25 @@ out:
*/
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vf *vf;
int ret = 0;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ /* Don't set MAC on disabled VF */
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ /* In case VF is in reset mode, wait until it is completed. Depending
+ * on factors like queue disabling routine, this could take ~250ms
+ */
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac);
@@ -3167,7 +3286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
"MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
- ice_vc_dis_vf(vf);
+ ice_vc_reset_vf(vf);
return ret;
}
@@ -3181,30 +3300,34 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
*/
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct device *dev;
struct ice_vf *vf;
- /* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ /* Don't set Trusted Mode on disabled VF */
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ /* In case VF is in reset mode, wait until it is completed. Depending
+ * on factors like queue disabling routine, this could take ~250ms
+ */
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
/* Check if already trusted */
if (trusted == vf->trusted)
return 0;
vf->trusted = trusted;
- ice_vc_dis_vf(vf);
- dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ ice_vc_reset_vf(vf);
+ dev_info(dev, "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
return 0;
@@ -3220,26 +3343,21 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
*/
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct virtchnl_pf_event pfe = { 0 };
struct ice_link_status *ls;
struct ice_vf *vf;
struct ice_hw *hw;
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- }
vf = &pf->vf[vf_id];
hw = &pf->hw;
ls = &pf->hw.port_info->phy.link_info;
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+ if (ice_check_vf_init(pf, vf))
return -EBUSY;
- }
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
@@ -3273,3 +3391,48 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
return 0;
}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_eth_stats *stats;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+
+ if (ice_check_vf_init(pf, vf))
+ return -EBUSY;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ ice_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 0d9880c8bba3..88aa65d5cb31 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -38,6 +38,7 @@
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_WAIT 15
/* Specific VF states */
enum ice_vf_states {
@@ -121,6 +122,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+int
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -193,5 +197,13 @@ ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
{
return 0;
}
+
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_stats __always_unused *vf_stats)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
new file mode 100644
index 000000000000..cf9b8b22d24f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_type.h"
+#include "ice_xsk.h"
+#include "ice_txrx.h"
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
+ sizeof(vsi->rx_rings[q_idx]->rx_stats));
+ memset(&vsi->tx_rings[q_idx]->stats, 0,
+ sizeof(vsi->tx_rings[q_idx]->stats));
+ if (ice_is_xdp_ena_vsi(vsi))
+ memset(&vsi->xdp_rings[q_idx]->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
+ * @vsi: VSI that has netdev
+ * @q_vector: q_vector that has NAPI context
+ * @enable: true for enable, false for disable
+ */
+static void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable)
+{
+ if (!vsi->netdev || !q_vector)
+ return;
+
+ if (enable)
+ napi_enable(&q_vector->napi);
+ else
+ napi_disable(&q_vector->napi);
+}
+
+/**
+ * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
+ * @vsi: the VSI that contains queue vector being un-configured
+ * @rx_ring: Rx ring that will have its IRQ disabled
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u16 reg;
+ u32 val;
+
+ /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
+ * here only QINT_RQCTL
+ */
+ reg = rx_ring->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+
+ if (q_vector) {
+ u16 v_idx = q_vector->v_idx;
+
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
+ ice_flush(hw);
+ synchronize_irq(pf->msix_entries[v_idx + base].vector);
+ }
+}
+
+/**
+ * ice_qvec_cfg_msix - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ u16 reg_idx = q_vector->reg_idx;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_ring *ring;
+
+ ice_cfg_itr(hw, q_vector);
+
+ wr32(hw, GLINT_RATE(reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->tx.itr_idx);
+
+ ice_for_each_ring(ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->rx.itr_idx);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qvec_ena_irq - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ ice_irq_dynamic_ena(hw, vsi, q_vector);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int timeout = 50;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+ return err;
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (err)
+ return err;
+ }
+ err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ if (err)
+ return err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return 0;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ qg_buf->num_txqs = 1;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ if (err)
+ goto free_buf;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(qg_buf, 0, sizeof(*qg_buf));
+ qg_buf->num_txqs = 1;
+ err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ if (err)
+ goto free_buf;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ err = ice_setup_rx_ctx(rx_ring);
+ if (err)
+ goto free_buf;
+
+ ice_qvec_cfg_msix(vsi, q_vector);
+
+ err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ if (err)
+ goto free_buf;
+
+ clear_bit(__ICE_CFG_BUSY, vsi->state);
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+free_buf:
+ kfree(qg_buf);
+ return err;
+}
+
+/**
+ * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
+ * @vsi: VSI to allocate the UMEM on
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+{
+ if (vsi->xsk_umems)
+ return 0;
+
+ vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ GFP_KERNEL);
+
+ if (!vsi->xsk_umems) {
+ vsi->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_xsk_add_umem - add a UMEM region for XDP sockets
+ * @vsi: VSI to which the UMEM will be added
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ int err;
+
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * @vsi: VSI from which the VSI will be removed
+ * @qid: Ring/qid associated with the UMEM
+ */
+static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+{
+ vsi->xsk_umems[qid] = NULL;
+ vsi->num_xsk_umems_used--;
+
+ if (vsi->num_xsk_umems_used == 0) {
+ kfree(vsi->xsk_umems);
+ vsi->xsk_umems = NULL;
+ vsi->num_xsk_umems = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
+ * @vsi: VSI to map the UMEM region
+ * @umem: UMEM to map
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = ice_pf_to_dev(pf);
+ for (i = 0; i < umem->npgs; i++) {
+ dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ ICE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma)) {
+ dev_dbg(dev,
+ "XSK UMEM DMA mapping error on page num %d", i);
+ goto out_unmap;
+ }
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (; i > 0; i--) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -EFAULT;
+}
+
+/**
+ * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
+ * @vsi: VSI from which the UMEM will be unmapped
+ * @umem: UMEM to unmap
+ */
+static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = ice_pf_to_dev(pf);
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_disable - disable a UMEM region
+ * @vsi: Current VSI
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+{
+ if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
+ !vsi->xsk_umems[qid])
+ return -EINVAL;
+
+ ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ ice_xsk_remove_umem(vsi, qid);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_enable - enable a UMEM region
+ * @vsi: Current VSI
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ int err;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_umems)
+ return -EINVAL;
+
+ if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ return -EBUSY;
+
+ reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ice_xsk_umem_dma_map(vsi, umem);
+ if (err)
+ return err;
+
+ err = ice_xsk_add_umem(vsi, umem, qid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * @vsi: Current VSI
+ * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ bool if_running, umem_present = !!umem;
+ int ret = 0, umem_failure = 0;
+
+ if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+
+ if (if_running) {
+ ret = ice_qp_dis(vsi, qid);
+ if (ret) {
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ goto xsk_umem_if_up;
+ }
+ }
+
+ umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
+ ice_xsk_umem_disable(vsi, qid);
+
+xsk_umem_if_up:
+ if (if_running) {
+ ret = ice_qp_ena(vsi, qid);
+ if (!ret && umem_present)
+ napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ else if (ret)
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ }
+
+ if (umem_failure) {
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ umem_present ? "en" : "dis", umem_failure);
+ return umem_failure;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
+ * @zca: zero-cpoy allocator
+ * @handle: Buffer handle
+ */
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
+{
+ struct ice_rx_buf *rx_buf;
+ struct ice_ring *rx_ring;
+ struct xdp_umem *umem;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(zca, struct ice_ring, zca);
+ umem = rx_ring->xsk_umem;
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ mask = umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ rx_buf = &rx_ring->rx_buf[nta];
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = (u64)handle + umem->headroom;
+}
+
+/**
+ * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the hot path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = rx_buf->addr;
+ u64 handle, hr;
+
+ if (addr) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the slow path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, headroom;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ handle &= umem->chunk_mask;
+ headroom = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += headroom;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += headroom;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ * @alloc: the function pointer to call for allocation
+ *
+ * This function allocates a number of Rx buffers from the fill ring
+ * or the internal recycle mechanism and places them on the Rx ring.
+ *
+ * Returns false if all allocations were successful, true if any fail.
+ */
+static bool
+ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
+ bool alloc(struct ice_ring *, struct ice_rx_buf *))
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ struct ice_rx_buf *rx_buf;
+ bool ret = false;
+
+ if (!count)
+ return false;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ rx_buf = &rx_ring->rx_buf[ntu];
+
+ do {
+ if (!alloc(rx_ring, rx_buf)) {
+ ret = true;
+ break;
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ rx_desc->wb.status_error0 = 0;
+
+ rx_desc++;
+ rx_buf++;
+ ntu++;
+
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ rx_buf = rx_ring->rx_buf;
+ ntu = 0;
+ }
+ } while (--count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return ret;
+}
+
+/**
+ * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_fast_zc);
+}
+
+/**
+ * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_slow_zc);
+}
+
+/**
+ * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
+ * @rx_ring: Rx ring
+ */
+static void ice_bump_ntc(struct ice_ring *rx_ring)
+{
+ int ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(ICE_RX_DESC(rx_ring, ntc));
+}
+
+/**
+ * ice_get_rx_buf_zc - Fetch the current Rx buffer
+ * @rx_ring: Rx ring
+ * @size: size of a buffer
+ *
+ * This function returns the current, received Rx buffer and does
+ * DMA synchronization.
+ *
+ * Returns a pointer to the received Rx buffer.
+ */
+static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
+{
+ struct ice_rx_buf *rx_buf;
+
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
+ size, DMA_BIDIRECTIONAL);
+
+ return rx_buf;
+}
+
+/**
+ * ice_reuse_rx_buf_zc - reuse an Rx buffer
+ * @rx_ring: Rx ring
+ * @old_buf: The buffer to recycle
+ *
+ * This function recycles a finished Rx buffer, and places it on the recycle
+ * queue (next_to_alloc).
+ */
+static void
+ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ice_rx_buf *new_buf;
+
+ new_buf = &rx_ring->rx_buf[nta++];
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ new_buf->dma = old_buf->dma & mask;
+ new_buf->dma += hr;
+
+ new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
+ new_buf->addr += hr;
+
+ new_buf->handle = old_buf->handle & mask;
+ new_buf->handle += rx_ring->xsk_umem->headroom;
+
+ old_buf->addr = NULL;
+}
+
+/**
+ * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
+ * @rx_ring: Rx ring
+ * @rx_buf: zero-copy Rx buffer
+ * @xdp: XDP buffer
+ *
+ * This function allocates a new skb from a zero-copy Rx buffer.
+ *
+ * Returns the skb on success, NULL on failure.
+ */
+static struct sk_buff *
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end -
+ xdp->data_hard_start;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+
+ return skb;
+}
+
+/**
+ * ice_run_xdp_zc - Executes an XDP program in zero-copy path
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+{
+ int err, result = ICE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring *xdp_ring;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return ICE_XDP_PASS;
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
+ }
+
+ rcu_read_unlock();
+ return result;
+}
+
+/**
+ * ice_clean_rx_irq_zc - consumes packets from the hardware ring
+ * @rx_ring: AF_XDP Rx ring
+ * @budget: NAPI budget
+ *
+ * Returns number of processed packets on success, remaining budget on failure.
+ */
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+ bool failure = 0;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ union ice_32b_rx_flex_desc *rx_desc;
+ unsigned int size, xdp_res = 0;
+ struct ice_rx_buf *rx_buf;
+ struct sk_buff *skb;
+ u16 stat_err_bits;
+ u16 vlan_tag = 0;
+ u8 rx_ptype;
+
+ if (cleaned_count >= ICE_RX_BUF_WRITE) {
+ failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc, stat_err_bits))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (!size)
+ break;
+
+ rx_buf = ice_get_rx_buf_zc(rx_ring, size);
+ if (!rx_buf->addr)
+ break;
+
+ xdp.data = rx_buf->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = rx_buf->handle;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ if (xdp_res) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ rx_buf->addr = NULL;
+ } else {
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+ }
+
+ total_rx_bytes += size;
+ total_rx_packets++;
+ cleaned_count++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ice_bump_ntc(rx_ring);
+
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc, stat_err_bits))
+ vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+
+ rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+
+ ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_receive_skb(rx_ring, skb, vlan_tag);
+ }
+
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+/**
+ * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: max number of frames to xmit
+ *
+ * Returns true if cleanup/transmission is done.
+ */
+static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+{
+ struct ice_tx_desc *tx_desc = NULL;
+ bool work_done = true;
+ struct xdp_desc desc;
+ dma_addr_t dma;
+
+ while (likely(budget-- > 0)) {
+ struct ice_tx_buf *tx_buf;
+
+ if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ work_done = false;
+ break;
+ }
+
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ break;
+
+ dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
+ DMA_BIDIRECTIONAL);
+
+ tx_buf->bytecount = desc.len;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
+ 0, desc.len, 0);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ice_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return budget > 0 && work_done;
+}
+
+/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @xdp_ring: XDP Tx ring
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: NAPI budget
+ *
+ * Returns true if cleanup/tranmission is done.
+ */
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+{
+ int total_packets = 0, total_bytes = 0;
+ s16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ bool xmit_done = true;
+ u32 xsk_frames = 0;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntc);
+ tx_buf = &xdp_ring->tx_buf[ntc];
+ ntc -= xdp_ring->count;
+
+ do {
+ if (!(tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets++;
+
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
+ }
+
+ tx_desc->cmd_type_offset_bsz = 0;
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+
+ if (unlikely(!ntc)) {
+ ntc -= xdp_ring->count;
+ tx_buf = xdp_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(xdp_ring, 0);
+ }
+
+ prefetch(tx_desc);
+
+ } while (likely(--budget));
+
+ ntc += xdp_ring->count;
+ xdp_ring->next_to_clean = ntc;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+
+ ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
+ xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
+
+ return budget > 0 && xmit_done;
+}
+
+/**
+ * ice_xsk_wakeup - Implements ndo_xsk_wakeup
+ * @netdev: net_device
+ * @queue_id: queue to wake up
+ * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
+ *
+ * Returns negative on error, zero otherwise.
+ */
+int
+ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ u32 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_q_vector *q_vector;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *ring;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return -ENXIO;
+
+ if (queue_id >= vsi->num_txq)
+ return -ENXIO;
+
+ if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ return -ENXIO;
+
+ ring = vsi->xdp_rings[queue_id];
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+ * schedule the NAPI from interrupt context. If NAPI would be
+ * scheduled here, the interrupt affinity would not be
+ * honored.
+ */
+ q_vector = ring->q_vector;
+ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+ ice_trigger_sw_intr(&vsi->back->hw, q_vector);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * @vsi: VSI to be checked
+ *
+ * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ */
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->xsk_umems)
+ return false;
+
+ for (i = 0; i < vsi->num_xsk_umems; i++) {
+ if (vsi->xsk_umems[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * @rx_ring: ring to be cleaned
+ */
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+{
+ u16 i;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+
+ if (!rx_buf->addr)
+ continue;
+
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
+ rx_buf->addr = NULL;
+ }
+}
+
+/**
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * @xdp_ring: XDP_Tx ring
+ */
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->raw_buf)
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ else
+ xsk_frames++;
+
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
new file mode 100644
index 000000000000..3479e1de98fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_XSK_H_
+#define _ICE_XSK_H_
+#include "ice_txrx.h"
+#include "ice.h"
+
+struct ice_vsi;
+
+#ifdef CONFIG_XDP_SOCKETS
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+#else
+static inline int
+ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
+ struct xdp_umem __always_unused *umem,
+ u16 __always_unused qid)
+{
+ return -ENOTSUPP;
+}
+
+static inline void
+ice_zca_free(struct zero_copy_allocator __always_unused *zca,
+ unsigned long __always_unused handle)
+{
+}
+
+static inline int
+ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ int __always_unused budget)
+{
+ return 0;
+}
+
+static inline bool
+ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ int __always_unused budget)
+{
+ return false;
+}
+
+static inline bool
+ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
+{
+ return false;
+}
+
+static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
+{
+ return false;
+}
+
+static inline int
+ice_xsk_wakeup(struct net_device __always_unused *netdev,
+ u32 __always_unused queue_id, u32 __always_unused flags)
+{
+ return -ENOTSUPP;
+}
+
+#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
+#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
+#endif /* CONFIG_XDP_SOCKETS */
+#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 6ad775b1a4c5..63ec253ac788 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -127,6 +127,7 @@ struct e1000_adv_tx_context_desc {
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed7e667d7eb2..98346eb064d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2518,6 +2518,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -2526,6 +2527,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -3122,7 +3124,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
if (hw->mac.type >= e1000_i350)
netdev->features |= NETIF_F_HW_TC;
@@ -5696,6 +5698,7 @@ static int igb_tso(struct igb_ring *tx_ring,
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
@@ -5715,7 +5718,8 @@ static int igb_tso(struct igb_ring *tx_ring,
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -5743,12 +5747,19 @@ static int igb_tso(struct igb_ring *tx_ring,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -6225,7 +6236,6 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
/* adjust max frame to be at least the size of a standard frame */
@@ -6241,8 +6251,8 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igb_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index fd3071f55bd3..c39e921757ba 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests failing to enable both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
@@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0f2b68f4bb0f..6003dc3ff5fd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2437,8 +2437,8 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
ETH_FCS_LEN;
- dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 7e16345d836e..0868677d43ed 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -411,7 +411,6 @@ struct igc_adapter {
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
- u32 *shadow_vfta;
u32 rss_queues;
u32 rss_indir_tbl_init;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f3f2325fe567..f3788f0b95b4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -282,7 +282,10 @@
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
/* Receive Descriptor bit definitions */
-#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
@@ -402,4 +405,7 @@
#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */
#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+/* Maximum size of the MTA register table in all supported adapters */
+#define MAX_MTA_REG 128
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index abb2d72911ff..20f710645746 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -91,6 +91,7 @@ struct igc_mac_info {
u16 mta_reg_count;
u16 uta_reg_count;
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 5eeb4c8caf4a..12aa6b5fcb5d 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -784,3 +784,107 @@ bool igc_enable_mng_pass_thru(struct igc_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igc_hash_mc_addr - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value. See
+ * igc_mta_set()
+ **/
+static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16)mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * igc_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igc_update_mc_addr_list(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32)i < mc_addr_count; i++) {
+ hash_value = igc_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
+ mc_addr_list += ETH_ALEN;
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
index 782bc995badc..832cccec87cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.h
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -29,6 +29,8 @@ s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex);
bool igc_enable_mng_pass_thru(struct igc_hw *hw);
+void igc_update_mc_addr_list(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
enum igc_mng_mode {
igc_mng_mode_none = 0,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 24888676f69b..9700527dd797 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -795,6 +795,44 @@ static int igc_set_mac(struct net_device *netdev, void *p)
return 0;
}
+/**
+ * igc_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int igc_write_mc_addr_list(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ igc_update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ igc_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
@@ -1163,6 +1201,46 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
}
+static void igc_rx_checksum(struct igc_ring *ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
+ return;
+
+ /* Rx checksum disabled via ethtool */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (igc_test_staterr(rx_desc,
+ IGC_RXDEXT_STATERR_TCPE |
+ IGC_RXDEXT_STATERR_IPE)) {
+ /* work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets (aka let the stack check the crc32c)
+ */
+ if (!(skb->len == 60 &&
+ test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.csum_err++;
+ u64_stats_update_end(&ring->rx_syncp);
+ }
+ /* let the stack verify checksum errors */
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
+ IGC_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ dev_dbg(ring->dev, "cksum success: bits %08X\n",
+ le32_to_cpu(rx_desc->wb.upper.status_error));
+}
+
static inline void igc_rx_hash(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -1189,6 +1267,8 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
{
igc_rx_hash(rx_ring, rx_desc, skb);
+ igc_rx_checksum(rx_ring, rx_desc, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -2192,7 +2272,6 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
{
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct igc_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
@@ -2207,8 +2286,8 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igc_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -2518,6 +2597,110 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags);
}
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, 0))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].state != 0)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue = 0;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return min_t(int, ret, 0);
+}
+
+static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return 0;
+}
+
/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2529,6 +2712,44 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
*/
static void igc_set_rx_mode(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IGC_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igc_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= IGC_RCTL_MPE;
+ }
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
+ rctl |= IGC_RCTL_UPE;
+
+ /* update state of unicast and multicast */
+ rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+ wr32(IGC_RCTL, rctl);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
+ rlpml = IGC_MAX_FRAME_BUILD_SKB;
+#endif
+ wr32(IGC_RLPML, rlpml);
}
/**
@@ -3982,6 +4203,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
.ndo_start_xmit = igc_xmit_frame,
+ .ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
@@ -4211,7 +4433,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Add supported features to the features list*/
+ netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_SCTP_CRC;
/* setup the private structure */
err = igc_sw_init(adapter);
@@ -4349,7 +4573,6 @@ static void igc_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
kfree(adapter->mac_table);
- kfree(adapter->shadow_vfta);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cc3196ae5aea..fd9f5d41b594 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -832,9 +832,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx)
{
+ int node = dev_to_node(&adapter->pdev->dev);
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count;
u8 tcs = adapter->hw_tcs;
@@ -845,10 +845,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if (rss_i > 1 && adapter->atr_sample_rate) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
- }
+ cpu = cpumask_local_spread(v_idx, node);
+ node = cpu_to_node(cpu);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 91b3780ddb04..25c097cd8100 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6725,7 +6725,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
(new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
- e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
@@ -7945,6 +7946,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
@@ -7968,7 +7970,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -7998,12 +8001,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+ if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -8639,7 +8650,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -10189,6 +10201,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -10197,6 +10210,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -10906,7 +10920,7 @@ skip_sriov:
IXGBE_GSO_PARTIAL_FEATURES;
if (hw->mac.type >= ixgbe_mac_82599EB)
- netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
#ifdef CONFIG_IXGBE_IPSEC
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fb942167ee54..3d5caea096fb 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -61,6 +61,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
select MVMDIO
select PHYLINK
+ select PAGE_POOL
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 82ea55ae5053..d5b644131cff 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2959,15 +2959,16 @@ static void set_params(struct mv643xx_eth_private *mp,
static int get_phy_mode(struct mv643xx_eth_private *mp)
{
struct device *dev = mp->dev->dev.parent;
- int iface = -1;
+ phy_interface_t iface;
+ int err;
if (dev->of_node)
- iface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &iface);
/* Historical default if unspecified. We could also read/write
* the interface state in the PSC1
*/
- if (iface < 0)
+ if (!dev->of_node || err)
iface = PHY_INTERFACE_MODE_GMII;
return iface;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e49820675c8c..71a872d46bc4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -37,6 +37,8 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -322,6 +324,13 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
+#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
+#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
+ MVNETA_SKB_HEADROOM))
+#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
+#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
+
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_phys) && \
(addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
@@ -346,6 +355,11 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
+#define MVNETA_XDP_PASS BIT(0)
+#define MVNETA_XDP_DROPPED BIT(1)
+#define MVNETA_XDP_TX BIT(2)
+#define MVNETA_XDP_REDIR BIT(3)
+
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
{ 0x3010, T_REG_32, "good_frames_received", },
@@ -425,6 +439,8 @@ struct mvneta_port {
u32 cause_rx_tx;
struct napi_struct napi;
+ struct bpf_prog *xdp_prog;
+
/* Core clock */
struct clk *clk;
/* AXI clock */
@@ -545,6 +561,20 @@ struct mvneta_rx_desc {
};
#endif
+enum mvneta_tx_buf_type {
+ MVNETA_TYPE_SKB,
+ MVNETA_TYPE_XDP_TX,
+ MVNETA_TYPE_XDP_NDO,
+};
+
+struct mvneta_tx_buf {
+ enum mvneta_tx_buf_type type;
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ };
+};
+
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
u8 id;
@@ -560,8 +590,8 @@ struct mvneta_tx_queue {
int tx_stop_threshold;
int tx_wake_threshold;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
+ /* Array of transmitted buffers */
+ struct mvneta_tx_buf *buf;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -603,6 +633,10 @@ struct mvneta_rx_queue {
u32 pkts_coal;
u32 time_coal;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
/* Virtual address of the RX buffer */
void **buf_virt_addr;
@@ -641,7 +675,6 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
-static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1761,24 +1794,25 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
int i;
for (i = 0; i < num; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
txq->txq_get_index;
- struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
-
- if (skb) {
- bytes_compl += skb->len;
- pkts_compl++;
- }
mvneta_txq_inc_get(txq);
- if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
+ buf->type != MVNETA_TYPE_XDP_TX)
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
- if (!skb)
- continue;
- dev_kfree_skb_any(skb);
+ if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
+ bytes_compl += buf->skb->len;
+ pkts_compl++;
+ dev_kfree_skb_any(buf->skb);
+ } else if (buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) {
+ xdp_return_frame(buf->xdpf);
+ }
}
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -1815,20 +1849,14 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
dma_addr_t phys_addr;
struct page *page;
- page = __dev_alloc_page(gfp_mask);
+ page = page_pool_alloc_pages(rxq->page_pool,
+ gfp_mask | __GFP_NOWARN);
if (!page)
return -ENOMEM;
- /* map page for use */
- phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- __free_page(page);
- return -ENOMEM;
- }
-
- phys_addr += pp->rx_offset_correction;
+ phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
+
return 0;
}
@@ -1894,10 +1922,29 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(data);
+ page_pool_put_page(rxq->page_pool, data, false);
+ }
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+}
+
+static void
+mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
+ u32 len, bool tx)
+{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ if (tx) {
+ stats->tx_packets += pkts;
+ stats->tx_bytes += len;
+ } else {
+ stats->rx_packets += pkts;
+ stats->rx_bytes += len;
}
+ u64_stats_update_end(&stats->syncp);
}
static inline
@@ -1925,43 +1972,300 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
return i;
}
+static int
+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct mvneta_tx_desc *tx_desc;
+ struct mvneta_tx_buf *buf;
+ dma_addr_t dma_addr;
+
+ if (txq->count >= txq->tx_stop_threshold)
+ return MVNETA_XDP_DROPPED;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ buf = &txq->buf[txq->txq_put_index];
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ return MVNETA_XDP_DROPPED;
+ }
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = xdpf;
+
+ tx_desc->command = MVNETA_TXD_FLZ_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = xdpf->len;
+
+ mvneta_update_stats(pp, 1, xdpf->len, true);
+ mvneta_txq_inc_put(txq);
+ txq->pending++;
+ txq->count++;
+
+ return MVNETA_XDP_TX;
+}
+
+static int
+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
+{
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int cpu;
+ u32 ret;
+
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ return MVNETA_XDP_DROPPED;
+
+ cpu = smp_processor_id();
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ if (ret == MVNETA_XDP_TX)
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int
+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ int i, drops = 0;
+ u32 ret;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ for (i = 0; i < num_frame; i++) {
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ if (ret != MVNETA_XDP_TX) {
+ xdp_return_frame_rx_napi(frames[i]);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return num_frame - drops;
+}
+
+static int
+mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ struct bpf_prog *prog, struct xdp_buff *xdp)
+{
+ u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ ret = MVNETA_XDP_PASS;
+ break;
+ case XDP_REDIRECT: {
+ int err;
+
+ err = xdp_do_redirect(pp->dev, xdp, prog);
+ if (err) {
+ ret = MVNETA_XDP_DROPPED;
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
+ } else {
+ ret = MVNETA_XDP_REDIR;
+ }
+ break;
+ }
+ case XDP_TX:
+ ret = mvneta_xdp_xmit_back(pp, xdp);
+ if (ret != MVNETA_XDP_TX)
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(pp->dev, prog, act);
+ /* fall through */
+ case XDP_DROP:
+ __page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data),
+ xdp->data_end - xdp->data_hard_start,
+ true);
+ ret = MVNETA_XDP_DROPPED;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mvneta_swbm_rx_frame(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog,
+ struct page *page, u32 *xdp_ret)
+{
+ unsigned char *data = page_address(page);
+ int data_len = -MVNETA_MH_SIZE, len;
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+
+ if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len += len;
+ } else {
+ len = rx_desc->data_size;
+ data_len += len - ETH_FCS_LEN;
+ }
+
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+
+ /* Prefetch header */
+ prefetch(data);
+
+ xdp->data_hard_start = data;
+ xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
+ xdp->data_end = xdp->data + data_len;
+ xdp_set_data_meta_invalid(xdp);
+
+ if (xdp_prog) {
+ u32 ret;
+
+ ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
+ if (ret != MVNETA_XDP_PASS) {
+ mvneta_update_stats(pp, 1,
+ xdp->data_end - xdp->data,
+ false);
+ rx_desc->buf_phys_addr = 0;
+ *xdp_ret |= ret;
+ return ret;
+ }
+ }
+
+ rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ return -ENOMEM;
+ }
+ page_pool_release_page(rxq->page_pool, page);
+
+ skb_reserve(rxq->skb,
+ xdp->data - xdp->data_hard_start);
+ skb_put(rxq->skb, xdp->data_end - xdp->data);
+ mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
+
+ rxq->left_size = rx_desc->data_size - len;
+ rx_desc->buf_phys_addr = 0;
+
+ return 0;
+}
+
+static void
+mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct page *page)
+{
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+ int data_len, len;
+
+ if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len = len;
+ } else {
+ len = rxq->left_size;
+ data_len = len - ETH_FCS_LEN;
+ }
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+ if (data_len > 0) {
+ /* refill descriptor with new buffer later */
+ skb_add_rx_frag(rxq->skb,
+ skb_shinfo(rxq->skb)->nr_frags,
+ page, pp->rx_offset_correction, data_len,
+ PAGE_SIZE);
+ }
+ page_pool_release_page(rxq->page_pool, page);
+ rx_desc->buf_phys_addr = 0;
+ rxq->left_size -= len;
+}
+
/* Main rx processing when using software buffer management */
static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
+ int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
struct net_device *dev = pp->dev;
- int rx_todo, rx_proc;
- int refill = 0;
- u32 rcvd_pkts = 0;
- u32 rcvd_bytes = 0;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp_buf;
+ int rx_todo, refill;
+ u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
- rx_proc = 0;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(pp->xdp_prog);
+ xdp_buf.rxq = &rxq->xdp_rxq;
/* Fairness NAPI loop */
- while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
+ while (rx_proc < budget && rx_proc < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- unsigned char *data;
- struct page *page;
- dma_addr_t phys_addr;
u32 rx_status, index;
- int rx_bytes, skb_size, copy_size;
- int frag_num, frag_size, frag_offset;
+ struct page *page;
index = rx_desc - rxq->descs;
page = (struct page *)rxq->buf_virt_addr[index];
- data = page_address(page);
- /* Prefetch header */
- prefetch(data);
- phys_addr = rx_desc->buf_phys_addr;
rx_status = rx_desc->status;
rx_proc++;
rxq->refill_num++;
if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ int err;
+
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
@@ -1969,85 +2273,18 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* leave the descriptor untouched */
continue;
}
- rx_bytes = rx_desc->data_size -
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
- /* Allocate small skb for each new packet */
- skb_size = max(rx_copybreak, rx_header_size);
- rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
- if (unlikely(!rxq->skb)) {
- netdev_err(dev,
- "Can't allocate skb on queue %d\n",
- rxq->id);
- dev->stats.rx_dropped++;
- rxq->skb_alloc_err++;
+ err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
+ xdp_prog, page, &xdp_ret);
+ if (err)
continue;
- }
- copy_size = min(skb_size, rx_bytes);
-
- /* Copy data from buffer to SKB, skip Marvell header */
- memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
- copy_size);
- skb_put(rxq->skb, copy_size);
- rxq->left_size = rx_bytes - copy_size;
-
- mvneta_rx_csum(pp, rx_status, rxq->skb);
- if (rxq->left_size == 0) {
- int size = copy_size + MVNETA_MH_SIZE;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr, 0,
- size,
- DMA_FROM_DEVICE);
-
- /* leave the descriptor and buffer untouched */
- } else {
- /* refill descriptor with new buffer later */
- rx_desc->buf_phys_addr = 0;
-
- frag_num = 0;
- frag_offset = copy_size + MVNETA_MH_SIZE;
- frag_size = min(rxq->left_size,
- (int)(PAGE_SIZE - frag_offset));
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- rxq->left_size -= frag_size;
- }
} else {
- /* Middle or Last descriptor */
if (unlikely(!rxq->skb)) {
pr_debug("no skb for rx_status 0x%x\n",
rx_status);
continue;
}
- if (!rxq->left_size) {
- /* last descriptor has only FCS */
- /* and can be discarded */
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr, 0,
- ETH_FCS_LEN,
- DMA_FROM_DEVICE);
- /* leave the descriptor and buffer untouched */
- } else {
- /* refill descriptor with new buffer later */
- rx_desc->buf_phys_addr = 0;
-
- frag_num = skb_shinfo(rxq->skb)->nr_frags;
- frag_offset = 0;
- frag_size = min(rxq->left_size,
- (int)(PAGE_SIZE - frag_offset));
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
-
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- rxq->left_size -= frag_size;
- }
+ mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2072,17 +2309,14 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* clean uncomplete skb pointer in queue */
rxq->skb = NULL;
- rxq->left_size = 0;
}
+ rcu_read_unlock();
- if (rcvd_pkts) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ if (xdp_ret & MVNETA_XDP_REDIR)
+ xdp_do_flush_map();
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
+ if (rcvd_pkts)
+ mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2206,14 +2440,8 @@ err_drop_frame:
napi_gro_receive(napi, skb);
}
- if (rcvd_pkts) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
+ if (rcvd_pkts)
+ mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2225,16 +2453,19 @@ static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
- struct mvneta_tx_desc *tx_desc;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
- txq->tx_skb[txq->txq_put_index] = NULL;
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
+
mvneta_txq_inc_put(txq);
}
@@ -2243,6 +2474,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
struct sk_buff *skb, char *data, int size,
bool last_tcp, bool is_last)
{
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2256,7 +2488,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
}
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
if (last_tcp) {
/* last descriptor in the TCP packet */
@@ -2264,7 +2497,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
/* last descriptor in SKB */
if (is_last)
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
}
mvneta_txq_inc_put(txq);
return 0;
@@ -2349,6 +2582,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
int i, nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);
@@ -2368,12 +2602,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
}
+ buf->type = MVNETA_TYPE_SKB;
mvneta_txq_inc_put(txq);
}
@@ -2401,6 +2636,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
int len = skb->len;
int frags = 0;
@@ -2433,16 +2669,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ buf->type = MVNETA_TYPE_SKB;
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVNETA_TXD_FLZ_DESC;
tx_desc->command = tx_cmd;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
mvneta_txq_inc_put(txq);
} else {
/* First but not Last */
tx_cmd |= MVNETA_TXD_F_DESC;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
mvneta_txq_inc_put(txq);
tx_desc->command = tx_cmd;
/* Continue with other skb fragments */
@@ -2459,7 +2696,6 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
netdev_tx_sent_queue(nq, len);
@@ -2474,10 +2710,7 @@ out:
else
txq->pending += frags;
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
- u64_stats_update_end(&stats->syncp);
+ mvneta_update_stats(pp, 1, len, true);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -2830,11 +3063,57 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
+static int mvneta_create_page_pool(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, int size)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = cpu_to_node(0),
+ .dev = pp->dev->dev.parent,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = pp->rx_offset_correction,
+ .max_len = MVNETA_MAX_RX_BUF_SIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- int i;
+ int i, err;
+
+ err = mvneta_create_page_pool(pp, rxq, num);
+ if (err < 0)
+ return err;
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
@@ -2908,7 +3187,7 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
/* Set Offset */
mvneta_rxq_offset_set(pp, rxq, 0);
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
- PAGE_SIZE :
+ MVNETA_MAX_RX_BUF_SIZE :
MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
@@ -2989,9 +3268,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
- txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
- GFP_KERNEL);
- if (!txq->tx_skb) {
+ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
+ if (!txq->buf) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3003,7 +3281,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) {
- kfree(txq->tx_skb);
+ kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3056,7 +3334,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
- kfree(txq->tx_skb);
+ kfree(txq->buf);
if (txq->tso_hdrs)
dma_free_coherent(pp->dev->dev.parent,
@@ -3263,6 +3541,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
+ if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ return -EINVAL;
+ }
+
dev->mtu = mtu;
if (!netif_running(dev)) {
@@ -3411,8 +3694,8 @@ static void mvneta_validate(struct phylink_config *config,
phylink_helper_basex_speed(state);
}
-static int mvneta_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvneta_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
@@ -3438,8 +3721,6 @@ static int mvneta_mac_link_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_RX;
if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static void mvneta_mac_an_restart(struct phylink_config *config)
@@ -3632,7 +3913,7 @@ static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
static const struct phylink_mac_ops mvneta_phylink_ops = {
.validate = mvneta_validate,
- .mac_link_state = mvneta_mac_link_state,
+ .mac_pcs_get_state = mvneta_mac_pcs_get_state,
.mac_an_restart = mvneta_mac_an_restart,
.mac_config = mvneta_mac_config,
.mac_link_down = mvneta_mac_link_down,
@@ -3932,6 +4213,47 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phylink_mii_ioctl(pp->phylink, ifr, cmd);
}
+static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(dev);
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!pp->xdp_prog != !!prog;
+ if (running && need_update)
+ mvneta_stop(dev);
+
+ old_prog = xchg(&pp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running && need_update)
+ return mvneta_open(dev);
+
+ return 0;
+}
+
+static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/* Ethtool methods */
/* Set link ksettings (phy address, speed) for ethtools */
@@ -4328,6 +4650,8 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_fix_features = mvneta_fix_features,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
+ .ndo_bpf = mvneta_xdp,
+ .ndo_xdp_xmit = mvneta_xdp_xmit,
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
@@ -4477,9 +4801,9 @@ static int mvneta_probe(struct platform_device *pdev)
struct phy *comphy;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
+ phy_interface_t phy_mode;
const char *mac_from;
int tx_csum_limit;
- int phy_mode;
int err;
int cpu;
@@ -4492,10 +4816,9 @@ static int mvneta_probe(struct platform_device *pdev)
if (dev->irq == 0)
return -EINVAL;
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(dn, &phy_mode);
+ if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto err_free_irq;
}
@@ -4618,7 +4941,7 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
- pp->rx_offset_correction = 0; /* not relevant for SW BM */
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 111b3b8239e1..62dc2f362a16 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2863,7 +2863,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int pool)
{
@@ -2871,7 +2871,6 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
phys_addr_t phys_addr;
void *buf;
- /* No recycle or too many buffers are in use, so allocate a new skb */
buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
GFP_ATOMIC);
if (!buf)
@@ -2957,14 +2956,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
* by the hardware, and the information about the buffer is
* comprised by the RX descriptor.
*/
- if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
-err_drop_frame:
- dev->stats.rx_errors++;
- mvpp2_rx_error(port, rx_desc);
- /* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
- continue;
- }
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ rx_bytes + MVPP2_MH_SIZE,
+ DMA_FROM_DEVICE);
+ prefetch(data);
if (bm_pool->frag_size > PAGE_SIZE)
frag_size = 0;
@@ -2983,8 +2981,9 @@ err_drop_frame:
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2995,6 +2994,13 @@ err_drop_frame:
mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(napi, skb);
+ continue;
+
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
if (rcvd_pkts) {
@@ -4817,8 +4823,8 @@ empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_link_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
+static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
{
u32 val;
@@ -4837,8 +4843,8 @@ static void mvpp22_xlg_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_RX;
}
-static void mvpp2_gmac_link_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
+static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
{
u32 val;
@@ -4871,8 +4877,8 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_TX;
}
-static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct mvpp2_port *port = container_of(config, struct mvpp2_port,
phylink_config);
@@ -4882,13 +4888,12 @@ static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_link_state(port, state);
- return 1;
+ mvpp22_xlg_pcs_get_state(port, state);
+ return;
}
}
- mvpp2_gmac_link_state(port, state);
- return 1;
+ mvpp2_gmac_pcs_get_state(port, state);
}
static void mvpp2_mac_an_restart(struct phylink_config *config)
@@ -5180,7 +5185,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_link_state = mvpp2_phylink_mac_link_state,
+ .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
.mac_an_restart = mvpp2_mac_an_restart,
.mac_config = mvpp2_mac_config,
.mac_link_up = mvpp2_mac_link_up,
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 711ada7139d3..fb34fbd62088 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -16,3 +16,12 @@ config OCTEONTX2_AF
Unit's admin function manager which manages all RVU HW resources
and provides a medium to other PF/VFs to configure HW. Should be
enabled for other RVU device drivers to work.
+
+config NDC_DIS_DYNAMIC_CACHING
+ bool "Disable caching of dynamic entries in NDC"
+ depends on OCTEONTX2_AF
+ default n
+ ---help---
+ This config option disables caching of dynamic entries such as NIX SQEs
+ , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
+ NPA Aura/Pool contexts.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 06329acf9c2c..1b25948c662b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 6d55e3d0b7ea..5ca788691911 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -138,6 +138,16 @@ void *cgx_get_pdata(int cgx_id)
}
EXPORT_SYMBOL(cgx_get_pdata);
+int cgx_get_cgxid(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -EINVAL;
+
+ return cgx->cgx_id;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -281,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
+/* Enable or disable forwarding received pause frames to Tx block */
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
+
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
@@ -321,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg, last;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ last = cfg;
+ if (enable)
+ cfg |= DATA_PKT_TX_EN;
+ else
+ cfg &= ~DATA_PKT_TX_EN;
+
+ if (cfg != last)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return !!(last & DATA_PKT_TX_EN);
+}
+EXPORT_SYMBOL(cgx_lmac_tx_enable);
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 206dc5dc1df8..9343bf39cfac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -56,6 +56,11 @@
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+#define CGXX_SMUX_RX_FRM_CTL 0x20020
+#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
+#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
@@ -63,6 +68,11 @@
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
+enum cgx_nix_stat_type {
+ NIX_STATS_RX,
+ NIX_STATS_TX,
+};
+
enum LMAC_TYPE {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
@@ -96,6 +106,7 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
int cgx_get_cgxcnt_max(void);
+int cgx_get_cgxid(void *cgxd);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
@@ -104,9 +115,11 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fb3ba4968a9b..473d9751601f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 CGX driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index e332e82fc066..784207bae5f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -196,4 +196,20 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+/* NDC info */
+enum ndc_idx_e {
+ NIX0_RX = 0x0,
+ NIX0_TX = 0x1,
+ NPA0_U = 0x2,
+};
+
+enum ndc_ctype_e {
+ CACHING = 0x0,
+ BYPASS = 0x1,
+};
+
+#define NDC_MAX_PORT 6
+#define NDC_READ_TRANS 0
+#define NDC_WRITE_TRANS 1
+
#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index d6f9ed8ea966..387e33fa417a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -19,17 +19,20 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
- tx_hdr = mdev->mbase + mbox->tx_start;
- rx_hdr = mdev->mbase + mbox->rx_start;
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
spin_lock(&mdev->mbox_lock);
mdev->msg_size = 0;
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
+ tx_hdr->msg_size = 0;
rx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
spin_unlock(&mdev->mbox_lock);
}
EXPORT_SYMBOL(otx2_mbox_reset);
@@ -133,16 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- int timeout = 0, sleep = 1;
+ struct device *sender = &mbox->pdev->dev;
- while (mdev->num_msgs != mdev->msgs_acked) {
- msleep(sleep);
- timeout += sleep;
- if (timeout >= MBOX_RSP_TIMEOUT)
- return -EIO;
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ usleep_range(800, 1000);
}
- return 0;
+ dev_dbg(sender, "timed out while waiting for rsp\n");
+ return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -162,13 +166,25 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
- tx_hdr = mdev->mbase + mbox->tx_start;
- rx_hdr = mdev->mbase + mbox->rx_start;
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+
+ /* If bounce buffer is implemented copy mbox messages from
+ * bounce buffer to hw mbox memory.
+ */
+ if (mdev->mbase != hw_mbase)
+ memcpy(hw_mbase + mbox->tx_start + msgs_offset,
+ mdev->mbase + mbox->tx_start + msgs_offset,
+ mdev->msg_size);
spin_lock(&mdev->mbox_lock);
+
+ tx_hdr->msg_size = mdev->msg_size;
+
/* Reset header for next messages */
mdev->msg_size = 0;
mdev->rsp_size = 0;
@@ -215,7 +231,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
/* Clear the whole msg region */
- memset(msghdr, 0, sizeof(*msghdr) + size);
+ memset(msghdr, 0, size);
/* Init message header with reset values */
msghdr->ver = OTX2_MBOX_VERSION;
mdev->msg_size += size;
@@ -236,8 +252,10 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
u16 msgs;
+ spin_lock(&mdev->mbox_lock);
+
if (mdev->num_msgs != mdev->msgs_acked)
- return ERR_PTR(-ENODEV);
+ goto error;
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
struct mbox_msghdr *pmsg = mdev->mbase + imsg;
@@ -245,18 +263,55 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
if (msg == pmsg) {
if (pmsg->id != prsp->id)
- return ERR_PTR(-ENODEV);
+ goto error;
+ spin_unlock(&mdev->mbox_lock);
return prsp;
}
- imsg = pmsg->next_msgoff;
- irsp = prsp->next_msgoff;
+ imsg = mbox->tx_start + pmsg->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
}
+error:
+ spin_unlock(&mdev->mbox_lock);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(otx2_mbox_get_rsp);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
+{
+ unsigned long ireq = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = -ENODEV;
+ u16 msgs;
+
+ spin_lock(&mdev->mbox_lock);
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ goto exit;
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *preq = mdev->mbase + ireq;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (preq->id != prsp->id)
+ goto exit;
+ if (prsp->rc) {
+ rc = prsp->rc;
+ goto exit;
+ }
+
+ ireq = mbox->tx_start + preq->next_msgoff;
+ irsp = mbox->rx_start + prsp->next_msgoff;
+ }
+ rc = 0;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
+
int
otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 76a4575d18ff..a589748f1240 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -75,6 +75,7 @@ struct otx2_mbox {
/* Header which preceeds all mbox messages */
struct mbox_hdr {
+ u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
};
@@ -103,6 +104,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp);
struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *msg);
+int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
u16 pcifunc, u16 id);
bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
@@ -125,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -300,6 +303,12 @@ struct msix_offset_rsp {
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
};
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ u8 nix_shaping; /* Is shaping and coloring supported */
+};
+
/* CGX mbox message formats */
struct cgx_stats_rsp {
@@ -352,6 +361,7 @@ struct npa_lf_alloc_req {
int node;
int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
+ u64 way_mask;
};
struct npa_lf_alloc_rsp {
@@ -442,6 +452,7 @@ struct nix_lf_alloc_req {
u16 npa_func;
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ u64 way_mask;
};
struct nix_lf_alloc_rsp {
@@ -512,6 +523,9 @@ struct nix_txsch_alloc_rsp {
/* Scheduler queue list allocated at each level */
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 aggr_level; /* Traffic aggregation scheduler level */
+ u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
+ u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
};
struct nix_txsch_free_req {
@@ -578,6 +592,18 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
+#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
+#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
+#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
+#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 8d6d90fdfb73..3803af9231c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -27,26 +27,45 @@ enum NPC_LID_E {
enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_IH_8_ETHER,
+ NPC_LT_LA_IH_4_ETHER,
+ NPC_LT_LA_IH_2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM0 = 0xE,
+ NPC_LT_LA_CUSTOM1 = 0xF,
};
enum npc_kpu_lb_ltype {
NPC_LT_LB_ETAG = 1,
NPC_LT_LB_CTAG,
- NPC_LT_LB_STAG,
+ NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_QINQ,
NPC_LT_LB_ITAG,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_CUSTOM0 = 0xE,
+ NPC_LT_LB_CUSTOM1 = 0xF,
};
enum npc_kpu_lc_ltype {
NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
NPC_LT_LC_ARP,
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_CUSTOM0 = 0xE,
+ NPC_LT_LC_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
@@ -57,49 +76,67 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_UDP,
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
- NPC_LT_LD_IGMP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_IGMP = 8,
NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
- NPC_LT_LD_GRE_MPLS,
- NPC_LT_LD_GRE_NSH,
- NPC_LT_LD_TU_MPLS,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_CUSTOM0 = 0xE,
+ NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
- NPC_LT_LE_TU_ETHER = 1,
- NPC_LT_LE_TU_PPP,
- NPC_LT_LE_TU_MPLS_IN_NSH,
- NPC_LT_LE_TU_3RD_NSH,
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+ NPC_LT_LE_CUSTOM0 = 0xE,
+ NPC_LT_LE_CUSTOM1 = 0xF,
};
enum npc_kpu_lf_ltype {
- NPC_LT_LF_TU_IP = 1,
- NPC_LT_LF_TU_IP6,
- NPC_LT_LF_TU_ARP,
- NPC_LT_LF_TU_MPLS_IP,
- NPC_LT_LF_TU_MPLS_IP6,
- NPC_LT_LF_TU_MPLS_ETHER,
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+ NPC_LT_LF_CUSTOM0 = 0xE,
+ NPC_LT_LF_CUSTOM1 = 0xF,
};
enum npc_kpu_lg_ltype {
- NPC_LT_LG_TU_TCP = 1,
- NPC_LT_LG_TU_UDP,
- NPC_LT_LG_TU_SCTP,
- NPC_LT_LG_TU_ICMP,
- NPC_LT_LG_TU_IGMP,
- NPC_LT_LG_TU_ICMP6,
- NPC_LT_LG_TU_ESP,
- NPC_LT_LG_TU_AH,
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+ NPC_LT_LG_CUSTOM0 = 0xE,
+ NPC_LT_LG_CUSTOM1 = 0xF,
};
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
enum npc_kpu_lh_ltype {
- NPC_LT_LH_TCP_DATA = 1,
- NPC_LT_LH_HTTP_DATA,
- NPC_LT_LH_HTTPS_DATA,
- NPC_LT_LH_PPTP_DATA,
- NPC_LT_LH_UDP_DATA,
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+ NPC_LT_LH_CUSTOM0 = 0xE,
+ NPC_LT_LH_CUSTOM1 = 0xF,
};
struct npc_kpu_profile_cam {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index b2ce957605bb..aa2727e6211a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -11,6 +11,11 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
+#define NPC_KPU_PROFILE_VER 0x0000000100050000
+
+#define NPC_IH_W 0x8000
+#define NPC_IH_UTAG 0x2000
+
#define NPC_ETYPE_IP 0x0800
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
@@ -27,6 +32,7 @@
#define NPC_ETYPE_TRANS_ETH_BR 0x6558
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
+#define NPC_ETYPE_DSA 0xdada
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -44,13 +50,19 @@
#define NPC_IPNH_NONH 59
#define NPC_IPNH_DEST 60
#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MOBILITY 135
#define NPC_IPNH_MPLS 137
+#define NPC_IPNH_HOSTID 139
+#define NPC_IPNH_SHIM6 140
+#define NPC_UDP_PORT_PTP_E 319
+#define NPC_UDP_PORT_PTP_G 320
#define NPC_UDP_PORT_GTPC 2123
#define NPC_UDP_PORT_GTPU 2152
#define NPC_UDP_PORT_VXLAN 4789
#define NPC_UDP_PORT_VXLANGPE 4790
#define NPC_UDP_PORT_GENEVE 6081
+#define NPC_UDP_PORT_MPLS 6635
#define NPC_VXLANGPE_NP_IP 0x1
#define NPC_VXLANGPE_NP_IP6 0x2
@@ -72,11 +84,17 @@
#define NPC_MPLS_S 0x0100
+#define NPC_IP_TTL_MASK 0xff00
#define NPC_IP_VER_4 0x4000
#define NPC_IP_VER_6 0x6000
#define NPC_IP_VER_MASK 0xf000
#define NPC_IP_HDR_LEN_5 0x0500
#define NPC_IP_HDR_LEN_MASK 0x0f00
+#define NPC_IP_HDR_MF 0x2000
+#define NPC_IP_HDR_FRAGOFF 0x1fff
+
+#define NPC_IP6_HOP_MASK 0x00ff
+#define NPC_IP6_FRAG_FRAGOFF 0xfff8
#define NPC_GRE_F_CSUM (0x1 << 15)
#define NPC_GRE_F_ROUTE (0x1 << 14)
@@ -108,22 +126,44 @@
#define NPC_GTP_MT_G_PDU 0xff
#define NPC_GTP_MT_MASK 0xff
+#define NPC_TCP_FLAGS_FIN 0x0001
+#define NPC_TCP_FLAGS_SYN 0x0002
+#define NPC_TCP_FLAGS_RST 0x0004
+#define NPC_TCP_FLAGS_PSH 0x0008
+#define NPC_TCP_FLAGS_ACK 0x0010
+#define NPC_TCP_FLAGS_URG 0x0020
+#define NPC_TCP_FLAGS_MASK 0x003f
+
#define NPC_TCP_DATA_OFFSET_5 0x5000
#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+#define NPC_DSA_EXTEND 0x1000
+#define NPC_DSA_EDSA 0x8000
+
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
- NPC_S_KPU1_PKI,
+ NPC_S_KPU1_IH_NIX,
+ NPC_S_KPU1_IH,
+ NPC_S_KPU1_EXDSA,
+ NPC_S_KPU1_HIGIG2,
+ NPC_S_KPU1_IH_NIX_HIGIG2,
NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_ITAG,
+ NPC_S_KPU2_PREHEADER,
+ NPC_S_KPU2_EXDSA,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
NPC_S_KPU3_ITAG,
+ NPC_S_KPU3_CTAG_C,
+ NPC_S_KPU3_STAG_C,
+ NPC_S_KPU3_QINQ_C,
+ NPC_S_KPU3_DSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU5_IP,
@@ -136,7 +176,12 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU6_IP6_HOP_DEST,
+ NPC_S_KPU6_IP6_ROUT,
+ NPC_S_KPU6_IP6_FRAG,
NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU7_IP6_ROUT,
+ NPC_S_KPU7_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -146,16 +191,26 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_GRE,
NPC_S_KPU8_ESP,
NPC_S_KPU8_AH,
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
- NPC_S_KPU9_TU_MPLS,
- NPC_S_KPU9_TU_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE,
+ NPC_S_KPU9_TU_MPLS_IN_NSH,
+ NPC_S_KPU9_TU_MPLS_IN_IP,
+ NPC_S_KPU9_TU_MPLS_IN_UDP,
+ NPC_S_KPU9_TU_NSH_IN_GRE,
+ NPC_S_KPU9_VXLAN,
+ NPC_S_KPU9_VXLANGPE,
+ NPC_S_KPU9_GENEVE,
+ NPC_S_KPU9_GTPC,
+ NPC_S_KPU9_GTPU,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
- NPC_S_KPU10_TU_NSH,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE,
NPC_S_KPU11_TU_ETHER,
NPC_S_KPU11_TU_PPP,
NPC_S_KPU11_TU_MPLS_IN_NSH,
- NPC_S_KPU11_TU_3RD_NSH,
+ NPC_S_KPU11_TU_MPLS_PL,
+ NPC_S_KPU11_TU_MPLS,
+ NPC_S_KPU11_TU_ETHER_IN_NSH,
NPC_S_KPU12_TU_IP,
NPC_S_KPU12_TU_IP6,
NPC_S_KPU12_TU_ARP,
@@ -174,135 +229,172 @@ enum npc_kpu_parser_state {
NPC_S_KPU16_PPTP_DATA,
NPC_S_KPU16_TCP_DATA,
NPC_S_KPU16_UDP_DATA,
+ NPC_S_KPU16_UDP_PTP,
NPC_S_LAST /* has to be the last item */
};
-enum npc_kpu_parser_flag {
- NPC_F_NA = 0,
- NPC_F_PKI,
- NPC_F_PKI_VLAN,
- NPC_F_PKI_ETAG,
- NPC_F_PKI_ITAG,
- NPC_F_PKI_MPLS,
- NPC_F_PKI_NSH,
- NPC_F_ETYPE_UNK,
- NPC_F_ETHER_VLAN,
- NPC_F_ETHER_ETAG,
- NPC_F_ETHER_ITAG,
- NPC_F_ETHER_MPLS,
- NPC_F_ETHER_NSH,
- NPC_F_STAG_CTAG,
- NPC_F_STAG_CTAG_UNK,
- NPC_F_STAG_STAG_CTAG,
- NPC_F_STAG_STAG_STAG,
- NPC_F_QINQ_CTAG,
- NPC_F_QINQ_CTAG_UNK,
- NPC_F_QINQ_QINQ_CTAG,
- NPC_F_QINQ_QINQ_QINQ,
- NPC_F_BTAG_ITAG,
- NPC_F_BTAG_ITAG_STAG,
- NPC_F_BTAG_ITAG_CTAG,
- NPC_F_BTAG_ITAG_UNK,
- NPC_F_ETAG_CTAG,
- NPC_F_ETAG_BTAG_ITAG,
- NPC_F_ETAG_STAG,
- NPC_F_ETAG_QINQ,
- NPC_F_ETAG_ITAG,
- NPC_F_ETAG_ITAG_STAG,
- NPC_F_ETAG_ITAG_CTAG,
- NPC_F_ETAG_ITAG_UNK,
- NPC_F_ITAG_STAG_CTAG,
- NPC_F_ITAG_STAG,
- NPC_F_ITAG_CTAG,
- NPC_F_MPLS_4_LABELS,
- NPC_F_MPLS_3_LABELS,
- NPC_F_MPLS_2_LABELS,
- NPC_F_IP_HAS_OPTIONS,
- NPC_F_IP_IP_IN_IP,
- NPC_F_IP_6TO4,
- NPC_F_IP_MPLS_IN_IP,
- NPC_F_IP_UNK_PROTO,
- NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
- NPC_F_IP_6TO4_HAS_OPTIONS,
- NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
- NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
- NPC_F_IP6_HAS_EXT,
- NPC_F_IP6_TUN_IP6,
- NPC_F_IP6_MPLS_IN_IP,
- NPC_F_TCP_HAS_OPTIONS,
- NPC_F_TCP_HTTP,
- NPC_F_TCP_HTTPS,
- NPC_F_TCP_PPTP,
- NPC_F_TCP_UNK_PORT,
- NPC_F_TCP_HTTP_HAS_OPTIONS,
- NPC_F_TCP_HTTPS_HAS_OPTIONS,
- NPC_F_TCP_PPTP_HAS_OPTIONS,
- NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
- NPC_F_UDP_VXLAN,
- NPC_F_UDP_VXLAN_NOVNI,
- NPC_F_UDP_VXLAN_NOVNI_NSH,
- NPC_F_UDP_VXLANGPE,
- NPC_F_UDP_VXLANGPE_NSH,
- NPC_F_UDP_VXLANGPE_MPLS,
- NPC_F_UDP_VXLANGPE_NOVNI,
- NPC_F_UDP_VXLANGPE_NOVNI_NSH,
- NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
- NPC_F_UDP_VXLANGPE_UNK,
- NPC_F_UDP_VXLANGPE_NONP,
- NPC_F_UDP_GTP_GTPC,
- NPC_F_UDP_GTP_GTPU_G_PDU,
- NPC_F_UDP_GTP_GTPU_UNK,
- NPC_F_UDP_UNK_PORT,
- NPC_F_UDP_GENEVE,
- NPC_F_UDP_GENEVE_OAM,
- NPC_F_UDP_GENEVE_CRI_OPT,
- NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- NPC_F_GRE_NVGRE,
- NPC_F_GRE_HAS_SRE,
- NPC_F_GRE_HAS_CSUM,
- NPC_F_GRE_HAS_KEY,
- NPC_F_GRE_HAS_SEQ,
- NPC_F_GRE_HAS_CSUM_KEY,
- NPC_F_GRE_HAS_CSUM_SEQ,
- NPC_F_GRE_HAS_KEY_SEQ,
- NPC_F_GRE_HAS_CSUM_KEY_SEQ,
- NPC_F_GRE_HAS_ROUTE,
- NPC_F_GRE_UNK_PROTO,
- NPC_F_GRE_VER1,
- NPC_F_GRE_VER1_HAS_SEQ,
- NPC_F_GRE_VER1_HAS_ACK,
- NPC_F_GRE_VER1_HAS_SEQ_ACK,
- NPC_F_GRE_VER1_UNK_PROTO,
- NPC_F_TU_ETHER_UNK,
- NPC_F_TU_ETHER_CTAG,
- NPC_F_TU_ETHER_CTAG_UNK,
- NPC_F_TU_ETHER_STAG_CTAG,
- NPC_F_TU_ETHER_STAG_CTAG_UNK,
- NPC_F_TU_ETHER_STAG,
- NPC_F_TU_ETHER_STAG_UNK,
- NPC_F_TU_ETHER_QINQ_CTAG,
- NPC_F_TU_ETHER_QINQ_CTAG_UNK,
- NPC_F_TU_ETHER_QINQ,
- NPC_F_TU_ETHER_QINQ_UNK,
- NPC_F_LAST /* has to be the last item */
+enum npc_kpu_la_uflag {
+ NPC_F_LA_U_HAS_TAG = 0x10,
+ NPC_F_LA_U_HAS_IH_NIX = 0x20,
+ NPC_F_LA_U_HAS_HIGIG2 = 0x40,
+};
+enum npc_kpu_la_lflag {
+ NPC_F_LA_L_UNK_ETYPE = 1,
+ NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_L_WITH_NSH,
+};
+
+enum npc_kpu_lb_uflag {
+ NPC_F_LB_U_UNK_ETYPE = 0x80,
+ NPC_F_LB_U_MORE_TAG = 0x40,
+};
+enum npc_kpu_lb_lflag {
+ NPC_F_LB_L_WITH_CTAG = 1,
+ NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_L_WITH_QINQ_CTAG,
+ NPC_F_LB_L_WITH_QINQ_QINQ,
+ NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_L_DSA,
+ NPC_F_LB_L_DSA_VLAN,
+ NPC_F_LB_L_EDSA,
+ NPC_F_LB_L_EDSA_VLAN,
+ NPC_F_LB_L_EXDSA,
+ NPC_F_LB_L_EXDSA_VLAN,
+};
+
+enum npc_kpu_lc_uflag {
+ NPC_F_LC_U_UNK_PROTO = 0x10,
+ NPC_F_LC_U_IP_FRAG = 0x20,
+ NPC_F_LC_U_IP6_FRAG = 0x40,
+};
+enum npc_kpu_lc_lflag {
+ NPC_F_LC_L_IP_IN_IP = 1,
+ NPC_F_LC_L_6TO4,
+ NPC_F_LC_L_MPLS_IN_IP,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ NPC_F_LC_L_EXT_HOP,
+ NPC_F_LC_L_EXT_DEST,
+ NPC_F_LC_L_EXT_ROUT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ NPC_F_LC_L_EXT_HOSTID,
+ NPC_F_LC_L_EXT_SHIM6,
+};
+
+enum npc_kpu_ld_lflag {
+ NPC_F_LD_L_TCP_UNK_PORT = 1,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_LD_L_UDP_UNK_PORT,
+ NPC_F_LD_L_GRE_NVGRE,
+ NPC_F_LD_L_GRE_HAS_SRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ NPC_F_LD_L_GRE_VER1,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ NPC_F_LD_L_MPLS_2_LABELS,
+};
+
+enum npc_kpu_le_lflag {
+ NPC_F_LE_L_VXLAN_NOVNI,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ NPC_F_LE_L_GENEVE_OAM,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ NPC_F_LE_L_GTPU_G_PDU,
+ NPC_F_LE_L_GTPU_UNK,
+};
+
+enum npc_kpu_lf_uflag {
+ NPC_F_LF_U_UNK_ETYPE = 0x10,
+ NPC_F_LF_U_HAS_TAG = 0x20,
+};
+
+enum npc_kpu_lf_lflag {
+ NPC_F_LF_L_WITH_CTAG = 1,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ NPC_F_LF_L_WITH_STAG,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ NPC_F_LF_L_WITH_QINQ,
+};
+
+enum npc_kpu_lg_uflag {
+ NPC_F_LG_U_UNK_IP_PROTO = 0x10,
+ NPC_F_LG_U_IP_HAS_OPTIONS = 0x20,
+ NPC_F_LG_U_IP6_HAS_EXT = 0x40,
+};
+
+enum npc_kpu_lh_uflag {
+ NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80,
+};
+
+enum npc_kpu_lh_lflag {
+ NPC_F_LH_L_TCP_HTTP = 1,
+ NPC_F_LH_L_TCP_HTTPS,
+ NPC_F_LH_L_TCP_PPTP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ NPC_F_LH_L_UDP_UNK_PORT,
};
enum npc_kpu_err_code {
NPC_EC_NOERR = 0, /* has to be zero */
NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
NPC_EC_L2_K1,
NPC_EC_L2_K2,
NPC_EC_L2_K3,
NPC_EC_L2_K3_ETYPE_UNK,
- NPC_EC_L2_MPLS_2MANY,
NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
NPC_EC_VXLAN,
NPC_EC_NVGRE,
NPC_EC_GRE,
NPC_EC_GRE_VER1,
NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
NPC_EC_LAST /* has to be the last item */
};
@@ -328,5282 +420,12598 @@ enum NPC_ERRLEV_E {
static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 28, 32, 36, 0, 0,
+ NPC_S_KPU1_HIGIG2, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_EXDSA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
- 0, 0, NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 20, 24, 28, 0, 0,
+ NPC_S_KPU1_IH_NIX, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+
},
};
static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0010, 0x0010, 0x0000, 0xffff,
- },
- {
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0010, 0x0010, 0x0000, 0xffff,
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_DSA,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0xfc00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0400,
+ 0xfe00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ NPC_IH_W,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ NPC_IH_W|NPC_IH_UTAG,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_EXTEND,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EXTEND,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
{
- NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_PREHEADER, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ NPC_DSA_EDSA,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_EXDSA, 0xff,
+ 0x0000,
+ NPC_DSA_EDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
- NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ_C, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU3_DSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU4_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ NPC_NSH_NP_MPLS,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
};
-static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU6_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU7_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLAN,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_VXLANGPE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GENEVE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPC,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_GTPU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_E,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_PTP_G,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_MPLS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_GRE_F_ROUTE,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x4fff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_PPP,
+ 0xffff,
+ NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x2001,
+ 0xef7f,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0001,
+ 0x0003,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_IP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH_IN_GRE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLAN_I,
+ NPC_VXLAN_I,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU9_VXLAN, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS,
+ NPC_VXLANGPE_NP_MASK,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_VXLANGPE_P,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+ NPC_S_KPU9_GENEVE, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6,
+ 0xffff,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_S_KPU9_GTPC, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_GTPU, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_IP6,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ NPC_NSH_NP_ETH,
+ NPC_NSH_NP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ NPC_MPLS_S,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS, 0xff,
+ 0x0000,
+ NPC_MPLS_S,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_PL, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU13_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU14_TU_IP6_EXT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_TCP_DATA_OFFSET_5,
+ NPC_TCP_DATA_OFFSET_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_HTTPS,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ NPC_TCP_PORT_PPTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_S_KPU16_TCP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_PTP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
},
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 1, 0,
+ NPC_S_KPU3_DSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 20, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 8, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 4, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 14, 16, 0, 0,
+ NPC_S_KPU2_PREHEADER, 2, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
+ 0,
+ 1, 0xff, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 0, 0,
+ NPC_S_KPU2_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LA, NPC_EC_L2_K1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 1,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ 2,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
- NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 16, 20, 24, 0, 0,
+ NPC_S_KPU3_ITAG, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_STAG, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_STAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 20, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 28, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 24, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG,
+ NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 20, 0, 0,
+ NPC_S_KPU3_STAG_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 0, 0, 0,
+ NPC_S_KPU3_QINQ_C, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 1, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 16, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA_VLAN,
+ NPC_F_LB_L_EDSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN,
+ NPC_F_LB_L_EXDSA_VLAN,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
- NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_EXDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
- NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
- {
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
- },
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- 0x0000, 0xffff, 0x0000, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
- NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
- 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
- NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_ARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_RARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
- NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0000, 0x4fff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0003, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
- NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
- 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x2001, 0xef7f, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
- 0x0001, 0x0003, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_DSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K3,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
- },
- {
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
- },
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS,
+ NPC_F_LC_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 7, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 7, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 6, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 4, 0,
+ NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LB, NPC_EC_L2_K4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
- {
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
- },
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
- NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
- NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
- NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
- NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_NA, 0X00, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
},
-};
-
-static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
- NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
},
{
- NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000, 0x0000,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU5_IP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU5_IP6, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_ARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_RARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_PTP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_FCOE, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_CTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
- 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_QINQ, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
- 0, 0, NPC_S_KPU2_ETAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU2_ITAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU4_NSH, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU5_IP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU5_IP6, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_ARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_RARP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_PTP, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU5_FCOE, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_CTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
- 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU2_QINQ, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
- 0, 0, NPC_S_KPU2_ETAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU2_ITAG, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_MPLS, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 2, 0, NPC_S_KPU4_NSH, 14, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 5, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu2_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_STAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_STAG, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 22, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_QINQ, 8, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 1, 0, NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_PTP, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_FCOE, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_MPLS, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 1, 0, NPC_S_KPU4_NSH, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
- 0, 0, NPC_S_KPU3_ITAG, 12, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_STAG, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
- 0, 0, NPC_S_KPU3_QINQ, 8, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_STAG, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU3_CTAG, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 18, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 26, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 22, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LC, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP,
+ NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_VXLANGPE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GENEVE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPC, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 2, 0, 0,
+ NPC_S_KPU9_GTPU, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_KPU16_UDP_PTP, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 7, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ESP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_NVGRE,
+ NPC_F_LD_L_GRE_NVGRE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_NVGRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_RARP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_PTP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_FCOE, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU4_MPLS, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU4_NSH, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU5_MPLS, 12, 1,
- NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 7, 0, NPC_S_KPU12_TU_IP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU5_NSH, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_ROUTE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_GRE,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu5_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_ACK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_IGMP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_VER1_UNK_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_ESP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_AH, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LD, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_IGMP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_2_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_ESP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_3_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU8_AH, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_F_LD_L_MPLS_4_LABELS,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
- 0, 0xf, 0, 2,
+ NPC_ERRLEV_LE, NPC_EC_NSH_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLAN,
+ NPC_F_LE_L_VXLAN_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_VXLAN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU8_TCP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
- 2, 0, NPC_S_KPU8_UDP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_SCTP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ICMP6, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_ESP, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_AH, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 0, 0,
+ NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NOVNI,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU8_GRE, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_VXLANGPE,
+ NPC_F_LE_L_VXLANGPE_NONP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 1, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ 0,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GENEVE,
+ NPC_F_LE_L_GENEVE_OAM_CRI_OPT,
+ 0, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPC,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ NPC_F_LE_L_GTPU_G_PDU,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_GTPU,
+ NPC_F_LE_L_GTPU_UNK,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu6_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu7_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu8_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
- 12, 0xf0, 1, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
- 8, 0x3f, 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
- NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 4, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 8, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS_PL, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 4, 0, 0, 0,
+ NPC_S_KPU11_TU_MPLS, 12, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 1, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 1, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 1, 0x3f, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_NSH_UNK,
+ 6, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
- 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_ERRLEV_LE, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ETHER,
+ NPC_F_LF_U_UNK_ETYPE,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_PPP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_MPLS_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu9_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
- NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
- 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
- NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 2, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
-};
-
-static struct npc_kpu_profile_action kpu10_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ARP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
- 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 0, 0,
+ NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ NPC_F_LG_U_IP6_HAS_EXT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
- 0, 2,
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LF, NPC_EC_UNK,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu11_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER,
- NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER,
- NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
- 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
{
- NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
-static struct npc_kpu_profile_action kpu12_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 20, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
- 0, 2,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP,
- NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
- 2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
- },
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
- NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 2, 0, NPC_S_KPU15_TU_AH, 40, 1,
- NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
- 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu13_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ 0,
+ 0, 0, 0, 0,
},
-};
-
-static struct npc_kpu_profile_action kpu14_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
- 0, 0,
- },
-};
-
-static struct npc_kpu_profile_action kpu15_action_entries[] = {
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_HTTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
- NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_PPTP,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_L_TCP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_TCP,
+ NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT,
12, 0xf0, 1, 2,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
- NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_UDP,
+ NPC_F_LH_L_UDP_UNK_PORT,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_SCTP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_IGMP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ICMP6,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_ESP,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TU_AH,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 0,
- NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_LG, NPC_EC_L4,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
static struct npc_kpu_profile_action kpu16_action_entries[] = {
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
- 0, 1, NPC_S_NA, 0, 1,
- NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
- 0, 0,
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LH, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
},
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e581091c09c4..5c190c3ce898 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -56,12 +56,34 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static void rvu_setup_hw_capabilities(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
+ hw->cap.nix_fixed_txschq_mapping = false;
+ hw->cap.nix_shaping = true;
+ hw->cap.nix_tx_link_bp = true;
+ hw->cap.nix_rx_multicast = true;
+
+ if (is_rvu_96xx_B0(rvu)) {
+ hw->cap.nix_fixed_txschq_mapping = true;
+ hw->cap.nix_txsch_per_cgx_lmac = 4;
+ hw->cap.nix_txsch_per_lbk_lmac = 132;
+ hw->cap.nix_txsch_per_sdp_lmac = 76;
+ hw->cap.nix_shaping = false;
+ hw->cap.nix_tx_link_bp = false;
+ if (is_rvu_96xx_A0(rvu))
+ hw->cap.nix_rx_multicast = false;
+ }
+}
+
/* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask'
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
- unsigned long timeout = jiffies + usecs_to_jiffies(100);
+ unsigned long timeout = jiffies + usecs_to_jiffies(10000);
void __iomem *reg;
u64 reg_val;
@@ -73,7 +95,6 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
if (!zero && (reg_val & mask))
return 0;
usleep_range(1, 5);
- timeout--;
}
return -EBUSY;
}
@@ -433,9 +454,9 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
- rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
}
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
@@ -877,8 +898,8 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
-static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
- struct ready_msg_rsp *rsp)
+int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
{
return 0;
}
@@ -1023,9 +1044,9 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
return 0;
}
-static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
- struct rsrc_detach *detach,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
{
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
@@ -1171,9 +1192,9 @@ fail:
return -ENOSPC;
}
-static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
- struct rsrc_attach *attach,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_attach_resources(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
{
u16 pcifunc = attach->hdr.pcifunc;
int err;
@@ -1294,8 +1315,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
-static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
- struct msix_offset_rsp *rsp)
+int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -1343,8 +1364,8 @@ static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
+int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
u16 vf, numvfs;
@@ -1363,6 +1384,17 @@ static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
+ struct get_hw_cap_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
+ rsp->nix_shaping = hw->cap.nix_shaping;
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -1440,12 +1472,12 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
- if (req_hdr->num_msgs == 0)
+ if (mw->mbox_wrk[devid].num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < req_hdr->num_msgs; id++) {
+ for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
@@ -1471,13 +1503,14 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, devid,
+ msg->id, rvu_get_pf(msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+ mw->mbox_wrk[devid].num_msgs = 0;
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
@@ -1523,14 +1556,14 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
- if (rsp_hdr->num_msgs == 0) {
+ if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
return;
}
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
@@ -1560,6 +1593,7 @@ end:
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
+ mw->mbox_wrk_up[devid].up_num_msgs = 0;
otx2_mbox_reset(mbox, devid);
}
@@ -1697,14 +1731,28 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ /*The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+
+ if (hdr->num_msgs) {
+ mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+ }
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
+ if (hdr->num_msgs) {
+ mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
}
}
@@ -2316,18 +2364,6 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (vfs > chans)
vfs = chans;
- /* AF's VFs work in pairs and talk over consecutive loopback channels.
- * Thus we want to enable maximum even number of VFs. In case
- * odd number of VFs are available then the last VF on the list
- * remains disabled.
- */
- if (vfs & 0x1) {
- dev_warn(&pdev->dev,
- "Number of VFs should be even. Enabling %d out of %d.\n",
- vfs - 1, vfs);
- vfs--;
- }
-
if (!vfs)
return 0;
@@ -2432,6 +2468,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_reset_all_blocks(rvu);
+ rvu_setup_hw_capabilities(rvu);
+
err = rvu_setup_hw_resources(rvu);
if (err)
goto err_release_regions;
@@ -2456,6 +2494,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_irq;
+ /* Initialize debugfs */
+ rvu_dbg_init(rvu);
+
return 0;
err_irq:
rvu_unregister_interrupts(rvu);
@@ -2482,6 +2523,7 @@ static void rvu_remove(struct pci_dev *pdev)
{
struct rvu *rvu = pci_get_drvdata(pdev);
+ rvu_dbg_exit(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c9d60b0554c0..51c206f4fe6f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -35,9 +35,36 @@
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
+#ifdef CONFIG_DEBUG_FS
+struct dump_ctx {
+ int lf;
+ int id;
+ bool all;
+};
+
+struct rvu_debugfs {
+ struct dentry *root;
+ struct dentry *cgx_root;
+ struct dentry *cgx;
+ struct dentry *lmac;
+ struct dentry *npa;
+ struct dentry *nix;
+ struct dentry *npc;
+ struct dump_ctx npa_aura_ctx;
+ struct dump_ctx npa_pool_ctx;
+ struct dump_ctx nix_cq_ctx;
+ struct dump_ctx nix_rq_ctx;
+ struct dump_ctx nix_sq_ctx;
+ int npa_qsize_id;
+ int nix_qsize_id;
+};
+#endif
+
struct rvu_work {
struct work_struct work;
struct rvu *rvu;
+ int num_msgs;
+ int up_num_msgs;
};
struct rsrc_bmap {
@@ -99,6 +126,7 @@ struct npc_mcam {
u16 lprio_start;
u16 hprio_count;
u16 hprio_end;
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
};
/* Structure for per RVU func info ie PF/VF */
@@ -151,15 +179,20 @@ struct rvu_pfvf {
struct mcam_entry entry;
int rxvlan_index;
bool rxvlan;
+
+ bool cgx_in_use; /* this PF/VF using CGX? */
+ int cgx_users; /* number of cgx users - used only by PFs */
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
-#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
+#define NIX_TXSCHQ_FREE BIT_ULL(1)
+#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
u32 *pfvf_map;
};
@@ -193,6 +226,21 @@ struct nix_hw {
struct nix_lso lso;
};
+/* RVU block's capabilities or functionality,
+ * which vary by silicon version/skew.
+ */
+struct hw_cap {
+ /* Transmit side supported functionality */
+ u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
+ u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
+ u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
+ u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
+ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
+ bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
+ bool nix_rx_multicast; /* Rx packet replication support */
+};
+
struct rvu_hwinfo {
u8 total_pfs; /* MAX RVU PFs HW supports */
u16 total_vfs; /* Max RVU VFs HW supports */
@@ -204,7 +252,7 @@ struct rvu_hwinfo {
u8 sdp_links;
u8 npc_kpus; /* No of parser units */
-
+ struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
struct nix_hw *nix0;
struct npc_pkind pkind;
@@ -261,8 +309,13 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */
+ struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+
+#ifdef CONFIG_DEBUG_FS
+ struct rvu_debugfs rvu_dbg;
+#endif
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -285,7 +338,8 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
-static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+/* Silicon revisions */
+static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
@@ -293,6 +347,14 @@ static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
+static inline bool is_rvu_96xx_B0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
/* Function Prototypes
* RVU
*/
@@ -342,52 +404,25 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+#define M(_name, _id, fn_name, req, rsp) \
+int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
+MBOX_MESSAGES
+#undef M
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
- struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
+ int rxtxflag, u64 *stat);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
-int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
- struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
- struct hwctx_disable_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
- struct npa_lf_alloc_req *req,
- struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
/* NIX APIs */
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
@@ -397,55 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
-int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
- struct nix_lf_alloc_req *req,
- struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
- struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
- struct hwctx_disable_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
- struct nix_txsch_alloc_req *req,
- struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
- struct nix_txsch_free_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
- struct nix_txschq_config *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
- struct nix_vtag_config *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
- struct nix_rss_flowkey_cfg *req,
- struct nix_rss_flowkey_cfg_rsp *rsp);
-int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
- struct nix_set_mac_addr *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
- struct nix_mark_format_cfg *req,
- struct nix_mark_format_cfg_rsp *rsp);
-int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
- struct nix_lso_format_cfg *req,
- struct nix_lso_format_cfg_rsp *rsp);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -460,45 +447,25 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
+void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
-int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
- struct npc_mcam_alloc_entry_req *req,
- struct npc_mcam_alloc_entry_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
- struct npc_mcam_free_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
- struct npc_mcam_write_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
- struct npc_mcam_ena_dis_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
- struct npc_mcam_ena_dis_entry_req *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
- struct npc_mcam_shift_entry_req *req,
- struct npc_mcam_shift_entry_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
- struct npc_mcam_alloc_counter_req *req,
- struct npc_mcam_alloc_counter_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
- struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req,
- struct npc_mcam_oper_counter_rsp *rsp);
-int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
- struct npc_mcam_alloc_and_write_entry_req *req,
- struct npc_mcam_alloc_and_write_entry_rsp *rsp);
-int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
- struct npc_get_kex_cfg_rsp *rsp);
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt);
+
+#ifdef CONFIG_DEBUG_FS
+void rvu_dbg_init(struct rvu *rvu);
+void rvu_dbg_exit(struct rvu *rvu);
+#else
+static inline void rvu_dbg_init(struct rvu *rvu) {}
+static inline void rvu_dbg_exit(struct rvu *rvu) {}
+#endif
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 7d7133c5f799..11e5921c55b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "rvu_reg.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -40,12 +41,25 @@ MBOX_UP_CGX_MESSAGES
#undef M
/* Returns bitmap of mapped PFs */
-static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+ unsigned long pfmap;
+
+ pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
+
+ /* Assumes only one pf mapped to a cgx lmac port */
+ if (!pfmap)
+ return -ENODEV;
+ else
+ return find_first_bit(&pfmap, 16);
+}
+
+static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
{
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
}
@@ -294,6 +308,8 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
+ mutex_init(&rvu->cgx_cfg_lock);
+
/* Ensure event handler registration is completed, before
* we turn on the links
*/
@@ -334,6 +350,24 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
+void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
+ if (enable)
+ cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ else
+ cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
@@ -562,3 +596,95 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
return 0;
}
+
+/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
+ * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
+ */
+int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
+ int index, int rxtxflag, u64 *stat)
+{
+ struct rvu_block *block;
+ int blkaddr;
+ u16 pcifunc;
+ int pf, lf;
+
+ *stat = 0;
+
+ if (!cgxd || !rvu)
+ return -EINVAL;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ if (pf < 0)
+ return pf;
+
+ /* Assumes LF of a PF and all of its VF belongs to the same
+ * NIX block
+ */
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ /* Check if a lf is attached to this PF or one of its VFs */
+ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ if (rxtxflag == NIX_STATS_RX)
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_STATX(lf, index));
+ else
+ *stat += rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_TX_STATX(lf, index));
+ }
+
+ return 0;
+}
+
+int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ struct rvu_pfvf *parent_pf, *pfvf;
+ int cgx_users, err = 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return 0;
+
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ mutex_lock(&rvu->cgx_cfg_lock);
+
+ if (start && pfvf->cgx_in_use)
+ goto exit; /* CGX is already started hence nothing to do */
+ if (!start && !pfvf->cgx_in_use)
+ goto exit; /* CGX is already stopped hence nothing to do */
+
+ if (start) {
+ cgx_users = parent_pf->cgx_users;
+ parent_pf->cgx_users++;
+ } else {
+ parent_pf->cgx_users--;
+ cgx_users = parent_pf->cgx_users;
+ }
+
+ /* Start CGX when first of all NIXLFs is started.
+ * Stop CGX when last of all NIXLFs is stopped.
+ */
+ if (!cgx_users) {
+ err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ start);
+ if (err) {
+ dev_err(rvu->dev, "Unable to %s CGX\n",
+ start ? "start" : "stop");
+ /* Revert the usage count in case of error */
+ parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
+ : parent_pf->cgx_users + 1;
+ goto exit;
+ }
+ }
+ pfvf->cgx_in_use = start;
+exit:
+ mutex_unlock(&rvu->cgx_cfg_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
new file mode 100644
index 000000000000..77adad4adb1b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -0,0 +1,1711 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "npc.h"
+
+#define DEBUGFS_DIR_NAME "octeontx2"
+
+enum {
+ CGX_STAT0,
+ CGX_STAT1,
+ CGX_STAT2,
+ CGX_STAT3,
+ CGX_STAT4,
+ CGX_STAT5,
+ CGX_STAT6,
+ CGX_STAT7,
+ CGX_STAT8,
+ CGX_STAT9,
+ CGX_STAT10,
+ CGX_STAT11,
+ CGX_STAT12,
+ CGX_STAT13,
+ CGX_STAT14,
+ CGX_STAT15,
+ CGX_STAT16,
+ CGX_STAT17,
+ CGX_STAT18,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+static char *cgx_rx_stats_fields[] = {
+ [CGX_STAT0] = "Received packets",
+ [CGX_STAT1] = "Octets of received packets",
+ [CGX_STAT2] = "Received PAUSE packets",
+ [CGX_STAT3] = "Received PAUSE and control packets",
+ [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
+ [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
+ [CGX_STAT6] = "Packets dropped due to RX FIFO full",
+ [CGX_STAT7] = "Octets dropped due to RX FIFO full",
+ [CGX_STAT8] = "Error packets",
+ [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
+ [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
+ [CGX_STAT11] = "NCSI-bound packets dropped",
+ [CGX_STAT12] = "NCSI-bound octets dropped",
+};
+
+static char *cgx_tx_stats_fields[] = {
+ [CGX_STAT0] = "Packets dropped due to excessive collisions",
+ [CGX_STAT1] = "Packets dropped due to excessive deferral",
+ [CGX_STAT2] = "Multiple collisions before successful transmission",
+ [CGX_STAT3] = "Single collisions before successful transmission",
+ [CGX_STAT4] = "Total octets sent on the interface",
+ [CGX_STAT5] = "Total frames sent on the interface",
+ [CGX_STAT6] = "Packets sent with an octet count < 64",
+ [CGX_STAT7] = "Packets sent with an octet count == 64",
+ [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT9] = "Packets sent with an octet count of 128-255",
+ [CGX_STAT10] = "Packets sent with an octet count of 256-511",
+ [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
+ [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
+ [CGX_STAT13] = "Packets sent with an octet count of > 1518",
+ [CGX_STAT14] = "Packets sent to a broadcast DMAC",
+ [CGX_STAT15] = "Packets sent to the multicast DMAC",
+ [CGX_STAT16] = "Transmit underflow and were truncated",
+ [CGX_STAT17] = "Control/PAUSE packets sent",
+};
+
+#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
+ blk_addr, NDC_AF_CONST) & 0xFF)
+
+#define rvu_dbg_NULL NULL
+#define rvu_dbg_open_NULL NULL
+
+#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
+static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, rvu_dbg_##read_op, inode->i_private); \
+} \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = rvu_dbg_open_##name, \
+ .read = seq_read, \
+ .write = rvu_dbg_##write_op, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define RVU_DEBUG_FOPS(name, read_op, write_op) \
+static const struct file_operations rvu_dbg_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = rvu_dbg_##read_op, \
+ .write = rvu_dbg_##write_op \
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
+/* Dumps current provisioning status of all RVU block LFs */
+static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int index, off = 0, flag = 0, go_back = 0, off_prev;
+ struct rvu *rvu = filp->private_data;
+ int lf, pf, vf, pcifunc;
+ struct rvu_block block;
+ int bytes_not_copied;
+ int buf_size = 2048;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
+ for (index = 0; index < BLK_COUNT; index++)
+ if (strlen(rvu->hw->block[index].name))
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%*s\t", (index - 1) * 2,
+ rvu->hw->block[index].name);
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ go_back = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "PF%d:VF%d\t\t", pf,
+ vf - 1);
+ } else {
+ go_back = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "PF%d\t\t", pf);
+ }
+
+ off += go_back;
+ for (index = 0; index < BLKTYPE_MAX; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ off_prev = off;
+ for (lf = 0; lf < block.lf.max; lf++) {
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+ flag = 1;
+ off += scnprintf(&buf[off], buf_size - 1
+ - off, "%3d,", lf);
+ }
+ if (flag && off_prev != off)
+ off--;
+ else
+ go_back++;
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\t");
+ }
+ if (!flag)
+ off -= go_back;
+ else
+ flag = 0;
+ off--;
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ }
+ }
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+ u16 *pcifunc)
+{
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0) {
+ dev_warn(rvu->dev, "Invalid blktype\n");
+ return false;
+ }
+
+ hw = rvu->hw;
+ block = &hw->block[blkaddr];
+
+ if (lf < 0 || lf >= block->lf.max) {
+ dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
+ block->lf.max - 1);
+ return false;
+ }
+
+ *pcifunc = block->fn_map[lf];
+ if (!*pcifunc) {
+ dev_warn(rvu->dev,
+ "This LF is not attached to any RVU PFFUNC\n");
+ return false;
+ }
+ return true;
+}
+
+static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (!pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
+ pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
+ seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
+ }
+
+ if (!pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ } else {
+ bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
+ pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
+ seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
+ }
+ kfree(buf);
+}
+
+/* The 'qsize' entry dumps current Aura/Pool context Qsize
+ * and each context's current enable/disable status in a bitmap.
+ */
+static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+ int blktype)
+{
+ void (*print_qsize)(struct seq_file *filp,
+ struct rvu_pfvf *pfvf) = NULL;
+ struct rvu_pfvf *pfvf;
+ struct rvu *rvu;
+ int qsize_id;
+ u16 pcifunc;
+
+ rvu = filp->private;
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ qsize_id = rvu->rvu_dbg.npa_qsize_id;
+ print_qsize = print_npa_qsize;
+ break;
+
+ case BLKTYPE_NIX:
+ qsize_id = rvu->rvu_dbg.nix_qsize_id;
+ print_qsize = print_nix_qsize;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ print_qsize(filp, pfvf);
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, int blktype)
+{
+ char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
+ struct seq_file *seqfile = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ struct rvu *rvu = seqfile->private;
+ u16 pcifunc;
+ int ret, lf;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
+ if (cmd_buf)
+ ret = -EINVAL;
+
+ if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
+ goto qsize_write_done;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ ret = -EINVAL;
+ goto qsize_write_done;
+ }
+ if (blktype == BLKTYPE_NPA)
+ rvu->rvu_dbg.npa_qsize_id = lf;
+ else
+ rvu->rvu_dbg.nix_qsize_id = lf;
+
+qsize_write_done:
+ kfree(cmd_buf_tmp);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NPA);
+}
+
+static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
+
+/* Dumps given NPA Aura's context */
+static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_aura_s *aura = &rsp->aura;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
+ aura->pool_way_mask, aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
+ (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+}
+
+/* Dumps given NPA Pool's context */
+static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
+{
+ struct npa_pool_s *pool = &rsp->pool;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
+ pool->stack_caching, pool->stack_way_mask);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+}
+
+/* Reads aura/pool's ctx from admin queue */
+static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+{
+ void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
+ struct npa_aq_enq_req aq_req;
+ struct npa_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ int aura, rc, max_id;
+ int npalf, id, all;
+ struct rvu *rvu;
+ u16 pcifunc;
+
+ rvu = m->private;
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
+ id = rvu->rvu_dbg.npa_aura_ctx.id;
+ all = rvu->rvu_dbg.npa_aura_ctx.all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
+ id = rvu->rvu_dbg.npa_pool_ctx.id;
+ all = rvu->rvu_dbg.npa_pool_ctx.all;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
+ seq_puts(m, "Aura context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
+ seq_puts(m, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NPA_AQ_INSTOP_READ;
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ max_id = pfvf->aura_ctx->qsize;
+ print_npa_ctx = print_npa_aura_ctx;
+ } else {
+ max_id = pfvf->pool_ctx->qsize;
+ print_npa_ctx = print_npa_pool_ctx;
+ }
+
+ if (id < 0 || id >= max_id) {
+ seq_printf(m, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+
+ for (aura = id; aura < max_id; aura++) {
+ aq_req.aura_id = aura;
+ seq_printf(m, "======%s : %d=======\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ aq_req.aura_id);
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(m, "Failed to read context\n");
+ return -EINVAL;
+ }
+ print_npa_ctx(m, &rsp);
+ }
+ return 0;
+}
+
+static int write_npa_ctx(struct rvu *rvu, bool all,
+ int npalf, int id, int ctype)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NPA_AQ_CTYPE_AURA) {
+ if (!pfvf->aura_ctx) {
+ dev_warn(rvu->dev, "Aura context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->aura_ctx->qsize;
+ } else if (ctype == NPA_AQ_CTYPE_POOL) {
+ if (!pfvf->pool_ctx) {
+ dev_warn(rvu->dev, "Pool context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->pool_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
+ max_id - 1);
+ return -EINVAL;
+ }
+
+ switch (ctype) {
+ case NPA_AQ_CTYPE_AURA:
+ rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_aura_ctx.id = id;
+ rvu->rvu_dbg.npa_aura_ctx.all = all;
+ break;
+
+ case NPA_AQ_CTYPE_POOL:
+ rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
+ rvu->rvu_dbg.npa_pool_ctx.id = id;
+ rvu->rvu_dbg.npa_pool_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *npalf,
+ int *id, bool *all)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+ int ret;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else {
+ ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos, int ctype)
+{
+ char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
+ "aura" : "pool";
+ struct seq_file *seqfp = filp->private_data;
+ struct rvu *rvu = seqfp->private;
+ int npalf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &npalf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_npa_ctx(rvu, all, npalf, id, ctype);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_AURA);
+}
+
+static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
+
+static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
+ NPA_AQ_CTYPE_POOL);
+}
+
+static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
+
+static void ndc_cache_stats(struct seq_file *s, int blk_addr,
+ int ctype, int transaction)
+{
+ u64 req, out_req, lat, cant_alloc;
+ struct rvu *rvu = s->private;
+ int port;
+
+ for (port = 0; port < NDC_MAX_PORT; port++) {
+ req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
+ (port, ctype, transaction));
+ lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
+ (port, ctype, transaction));
+ out_req = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_RWX_OSTDN_PC
+ (port, ctype, transaction));
+ cant_alloc = rvu_read64(rvu, blk_addr,
+ NDC_AF_PORTX_RTX_CANT_ALLOC_PC
+ (port, transaction));
+ seq_printf(s, "\nPort:%d\n", port);
+ seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
+ seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
+ seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
+ seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
+ seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
+ }
+}
+
+static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ seq_puts(s, "\n***** CACHE mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
+ seq_puts(s, "\n***** CACHE mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
+ seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
+ ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
+ return 0;
+}
+
+static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
+
+static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+{
+ struct rvu *rvu = s->private;
+ int bank, max_bank;
+
+ max_bank = NDC_MAX_BANK(rvu, blk_addr);
+ for (bank = 0; bank < max_bank; bank++) {
+ seq_printf(s, "BANK:%d\n", bank);
+ seq_printf(s, "\tHits:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_HIT_PC(bank)));
+ seq_printf(s, "\tMiss:\t%lld\n",
+ (u64)rvu_read64(rvu, blk_addr,
+ NDC_AF_BANKX_MISS_PC(bank)));
+ }
+ return 0;
+}
+
+static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NIX0_RX,
+ BLKADDR_NDC_NIX0_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
+{
+ return ndc_blk_cache_stats(filp, NIX0_TX,
+ BLKADDR_NDC_NIX0_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
+
+static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
+}
+
+RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp,
+ NPA0_U, BLKADDR_NDC_NIX0_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
+
+static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
+ void *unused)
+{
+ return ndc_blk_hits_miss_stats(filp,
+ NPA0_U, BLKADDR_NDC_NIX0_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+
+/* Dumps given nix_sq's context */
+static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+
+ seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
+ sq_ctx->sqe_way_mask, sq_ctx->cq);
+ seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ sq_ctx->sdp_mcast, sq_ctx->substream);
+ seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
+ sq_ctx->qint_idx, sq_ctx->ena);
+
+ seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
+ sq_ctx->sqb_count, sq_ctx->default_chan);
+ seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
+ sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
+ seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
+ sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
+
+ seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
+ sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
+ seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
+ sq_ctx->sq_int, sq_ctx->sqb_aura);
+ seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
+ sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
+ seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
+ sq_ctx->smenq_offset, sq_ctx->tail_offset);
+ seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
+ sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
+ seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
+ sq_ctx->cq_limit, sq_ctx->max_sqe_size);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
+ sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
+ seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
+ sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+/* Dumps given nix_rq's context */
+static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
+ rq_ctx->wqe_aura, rq_ctx->substream);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
+
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->sso_tt);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
+
+ seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
+ rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
+ seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
+ rq_ctx->xqe_imm_size, rq_ctx->later_skip);
+ seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
+ rq_ctx->first_skip, rq_ctx->lpb_sizem1);
+ seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
+ rq_ctx->spb_ena, rq_ctx->wqe_skip);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
+
+ seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
+ rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
+ seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+ seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
+ rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
+ seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
+ rq_ctx->xqe_pass, rq_ctx->xqe_drop);
+
+ seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
+ rq_ctx->qint_idx, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
+
+ seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
+ rq_ctx->flow_tagw, rq_ctx->bad_utag);
+ seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
+ rq_ctx->good_utag, rq_ctx->ltag);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
+/* Dumps given nix_cq's context */
+static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->caching);
+ seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
+ cq_ctx->substream, cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+}
+
+static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
+ void *unused, int ctype)
+{
+ void (*print_nix_ctx)(struct seq_file *filp,
+ struct nix_aq_enq_rsp *rsp) = NULL;
+ struct rvu *rvu = filp->private;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ char *ctype_string = NULL;
+ int qidx, rc, max_id = 0;
+ struct rvu_pfvf *pfvf;
+ int nixlf, id, all;
+ u16 pcifunc;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
+ id = rvu->rvu_dbg.nix_cq_ctx.id;
+ all = rvu->rvu_dbg.nix_cq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
+ id = rvu->rvu_dbg.nix_sq_ctx.id;
+ all = rvu->rvu_dbg.nix_sq_ctx.all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
+ id = rvu->rvu_dbg.nix_rq_ctx.id;
+ all = rvu->rvu_dbg.nix_rq_ctx.all;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
+ seq_puts(filp, "SQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
+ seq_puts(filp, "RQ context is not initialized\n");
+ return -EINVAL;
+ } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
+ seq_puts(filp, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ max_id = pfvf->sq_ctx->qsize;
+ ctype_string = "sq";
+ print_nix_ctx = print_nix_sq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ max_id = pfvf->rq_ctx->qsize;
+ ctype_string = "rq";
+ print_nix_ctx = print_nix_rq_ctx;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ max_id = pfvf->cq_ctx->qsize;
+ ctype_string = "cq";
+ print_nix_ctx = print_nix_cq_ctx;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = ctype;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ if (all)
+ id = 0;
+ else
+ max_id = id + 1;
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+ seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
+ ctype_string, nixlf, aq_req.qidx);
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+ if (rc) {
+ seq_puts(filp, "Failed to read the context\n");
+ return -EINVAL;
+ }
+ print_nix_ctx(filp, &rsp);
+ }
+ return 0;
+}
+
+static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
+ int id, int ctype, char *ctype_string)
+{
+ struct rvu_pfvf *pfvf;
+ int max_id = 0;
+ u16 pcifunc;
+
+ if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (ctype == NIX_AQ_CTYPE_SQ) {
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->sq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_RQ) {
+ if (!pfvf->rq_ctx) {
+ dev_warn(rvu->dev, "RQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->rq_ctx->qsize;
+ } else if (ctype == NIX_AQ_CTYPE_CQ) {
+ if (!pfvf->cq_ctx) {
+ dev_warn(rvu->dev, "CQ context is not initialized\n");
+ return -EINVAL;
+ }
+ max_id = pfvf->cq_ctx->qsize;
+ }
+
+ if (id < 0 || id >= max_id) {
+ dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
+ ctype_string, max_id - 1);
+ return -EINVAL;
+ }
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_cq_ctx.id = id;
+ rvu->rvu_dbg.nix_cq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_SQ:
+ rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_sq_ctx.id = id;
+ rvu->rvu_dbg.nix_sq_ctx.all = all;
+ break;
+
+ case NIX_AQ_CTYPE_RQ:
+ rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
+ rvu->rvu_dbg.nix_rq_ctx.id = id;
+ rvu->rvu_dbg.nix_rq_ctx.all = all;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos,
+ int ctype)
+{
+ struct seq_file *m = filp->private_data;
+ struct rvu *rvu = m->private;
+ char *cmd_buf, *ctype_string;
+ int nixlf, id = 0, ret;
+ bool all = false;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ switch (ctype) {
+ case NIX_AQ_CTYPE_SQ:
+ ctype_string = "sq";
+ break;
+ case NIX_AQ_CTYPE_RQ:
+ ctype_string = "rq";
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ ctype_string = "cq";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
+ &nixlf, &id, &all);
+ if (ret < 0) {
+ dev_info(rvu->dev,
+ "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
+ ctype_string, ctype_string);
+ goto done;
+ } else {
+ ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
+ ctype_string);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_SQ);
+}
+
+static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
+
+static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_RQ);
+}
+
+static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
+
+static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
+ NIX_AQ_CTYPE_CQ);
+}
+
+static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
+
+static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
+ unsigned long *bmap, char *qtype)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ bitmap_print_to_pagebuf(false, buf, bmap, qsize);
+ seq_printf(filp, "%s context count : %d\n", qtype, qsize);
+ seq_printf(filp, "%s context ena/dis bitmap : %s\n",
+ qtype, buf);
+ kfree(buf);
+}
+
+static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
+{
+ if (!pfvf->cq_ctx)
+ seq_puts(filp, "cq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
+ "cq");
+
+ if (!pfvf->rq_ctx)
+ seq_puts(filp, "rq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
+ "rq");
+
+ if (!pfvf->sq_ctx)
+ seq_puts(filp, "sq context is not initialized\n");
+ else
+ print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
+ "sq");
+}
+
+static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_qsize_write(filp, buffer, count, ppos,
+ BLKTYPE_NIX);
+}
+
+static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+
+static void rvu_dbg_nix_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev, "create debugfs dir failed for nix\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_sq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_rq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_cq_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.nix);
+}
+
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.npa)
+ return;
+
+ pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
+ rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.npa);
+}
+
+#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_RX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
+ ({ \
+ u64 cnt; \
+ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
+ NIX_STATS_TX, &(cnt)); \
+ if (!err) \
+ seq_printf(s, "%s: %llu\n", name, cnt); \
+ cnt; \
+ })
+
+static int cgx_print_stats(struct seq_file *s, int lmac_id)
+{
+ struct cgx_link_user_info linfo;
+ void *cgxd = s->private;
+ u64 ucast, mcast, bcast;
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ struct rvu *rvu;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ /* Link status */
+ seq_puts(s, "\n=======Link Status======\n\n");
+ err = cgx_get_link_info(cgxd, lmac_id, &linfo);
+ if (err)
+ seq_puts(s, "Failed to read link status\n");
+ seq_printf(s, "\nLink is %s %d Mbps\n\n",
+ linfo.link_up ? "UP" : "DOWN", linfo.speed);
+
+ /* Rx stats */
+ seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
+ if (err)
+ return err;
+
+ /* Tx stats */
+ seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
+ if (err)
+ return err;
+ mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
+ if (err)
+ return err;
+ bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
+ if (err)
+ return err;
+ seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
+ if (err)
+ return err;
+ PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
+ if (err)
+ return err;
+
+ /* Rx stats */
+ seq_puts(s, "\n=======CGX RX_STATS======\n\n");
+ while (stat < CGX_RX_STATS_COUNT) {
+ err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ if (err)
+ return err;
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ seq_puts(s, "\n=======CGX TX_STATS======\n\n");
+ while (stat < CGX_TX_STATS_COUNT) {
+ err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ if (err)
+ return err;
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
+ stat++;
+ }
+
+ return err;
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ struct dentry *current_dir;
+ int err, lmac_id;
+ char *buf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ buf = strrchr(current_dir->d_name.name, 'c');
+ if (!buf)
+ return -EINVAL;
+
+ err = kstrtoint(buf + 1, 10, &lmac_id);
+ if (!err) {
+ err = cgx_print_stats(filp, lmac_id);
+ if (err)
+ return err;
+ }
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+
+static void rvu_dbg_cgx_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+ int i, lmac_id;
+ char dname[20];
+ void *cgx;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+
+ for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
+ cgx = rvu_cgx_pdata(i, rvu);
+ if (!cgx)
+ continue;
+ /* cgx debugfs dir */
+ sprintf(dname, "cgx%d", i);
+ rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
+ rvu->rvu_dbg.cgx_root);
+ for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+ /* lmac debugfs dir */
+ sprintf(dname, "lmac%d", lmac_id);
+ rvu->rvu_dbg.lmac =
+ debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
+
+ pfile = debugfs_create_file("stats", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_stat_fops);
+ if (!pfile)
+ goto create_failed;
+ }
+ }
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
+}
+
+/* NPC debugfs APIs */
+static void rvu_print_npc_mcam_info(struct seq_file *s,
+ u16 pcifunc, int blkaddr)
+{
+ struct rvu *rvu = s->private;
+ int entry_acnt, entry_ecnt;
+ int cntr_acnt, cntr_ecnt;
+
+ /* Skip PF0 */
+ if (!pcifunc)
+ return;
+ rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
+ &entry_acnt, &entry_ecnt);
+ rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
+ &cntr_acnt, &cntr_ecnt);
+ if (!entry_acnt && !cntr_acnt)
+ return;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+
+ if (entry_acnt) {
+ seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
+ seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
+ }
+ if (cntr_acnt) {
+ seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
+ seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
+ }
+}
+
+static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
+{
+ struct rvu *rvu = filp->private;
+ int pf, vf, numvfs, blkaddr;
+ struct npc_mcam *mcam;
+ u16 pcifunc;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM info:\n");
+ /* MCAM keywidth on receive and transmit sides */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
+ cfg = (cfg >> 32) & 0x07;
+ seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
+ "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
+ "224bits" : "448bits"));
+
+ mutex_lock(&mcam->lock);
+ /* MCAM entries */
+ seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ mcam->total_entries - mcam->bmap_entries);
+ seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
+
+ /* MCAM counters */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ cfg = (cfg >> 48) & 0xFFFF;
+ seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
+ seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\t\t Available \t: %d\n",
+ rvu_rsrc_free_count(&mcam->counters));
+
+ if (mcam->bmap_entries == mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ seq_puts(filp, "\n\t\t Current allocation\n");
+ seq_puts(filp, "\t\t====================\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
+
+static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
+ void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct npc_mcam *mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ mcam = &rvu->hw->mcam;
+
+ seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
+ seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.npc)
+ return;
+
+ pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_mcam_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_rx_miss_act_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.npc);
+}
+
+void rvu_dbg_init(struct rvu *rvu)
+{
+ struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ if (!rvu->rvu_dbg.root) {
+ dev_err(rvu->dev, "%s failed\n", __func__);
+ return;
+ }
+ pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+ if (!pfile)
+ goto create_failed;
+
+ rvu_dbg_npa_init(rvu);
+ rvu_dbg_nix_init(rvu);
+ rvu_dbg_cgx_init(rvu);
+ rvu_dbg_npc_init(rvu);
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+void rvu_dbg_exit(struct rvu *rvu)
+{
+ debugfs_remove_recursive(rvu->rvu_dbg.root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4a7609fd6dd0..8a59f7d53fbf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -64,7 +64,6 @@ enum nix_makr_fmt_indexes {
struct mce {
struct hlist_node node;
- u16 idx;
u16 pcifunc;
};
@@ -127,17 +126,12 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
dev_err(rvu->dev, "NIX RX software sync failed\n");
-
- /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
- * bit too early. Hence wait for 50us more.
- */
- if (is_rvu_9xxx_A0(rvu))
- usleep_range(50, 60);
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u16 map_func;
@@ -155,13 +149,15 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock);
- /* For TL1 schq, sharing across VF's of same PF is ok */
- if (lvl == NIX_TXSCH_LVL_TL1 &&
- rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
- return false;
+ /* TLs aggegating traffic are shared across PF and VFs */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ return false;
+ else
+ return true;
+ }
- if (lvl != NIX_TXSCH_LVL_TL1 &&
- map_func != pcifunc)
+ if (map_func != pcifunc)
return false;
return true;
@@ -198,6 +194,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ /* Note that AF's VFs work in pairs and talk over consecutive
+ * loopback channels.Therefore if odd number of AF VFs are
+ * enabled then the last VF remains with no pair.
+ */
pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
NIX_CHAN_LBK_CHX(0, vf + 1);
@@ -382,7 +383,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
- int rss_sz, int rss_grps, int hwctx_size)
+ int rss_sz, int rss_grps, int hwctx_size,
+ u64 way_mask)
{
int err, grp, num_indices;
@@ -402,7 +404,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
/* Config full RSS table size, enable RSS and caching */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
BIT_ULL(36) | BIT_ULL(4) |
- ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
+ way_mask << 20);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
@@ -663,6 +666,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static const char *nix_get_ctx_name(int ctype)
+{
+ switch (ctype) {
+ case NIX_AQ_CTYPE_CQ:
+ return "CQ";
+ case NIX_AQ_CTYPE_SQ:
+ return "SQ";
+ case NIX_AQ_CTYPE_RQ:
+ return "RQ";
+ case NIX_AQ_CTYPE_RSS:
+ return "RSS";
+ }
+ return "";
+}
+
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -707,21 +725,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
- (req->ctype == NIX_AQ_CTYPE_CQ) ?
- "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
- "RQ" : "SQ"), qidx);
+ nix_get_ctx_name(req->ctype), qidx);
}
}
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+ struct nix_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NIX_AQ_INSTOP_INIT)
+ return 0;
+
+ if (req->ctype == NIX_AQ_CTYPE_MCE ||
+ req->ctype == NIX_AQ_CTYPE_DYNO)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+ lock_ctx_req.qidx = req->qidx;
+ err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+ req->hdr.pcifunc,
+ nix_get_ctx_name(req->ctype), req->qidx);
+ return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = nix_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -745,6 +802,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
return NIX_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -810,7 +870,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
(u64)pfvf->rq_ctx->iova);
/* Set caching and queue count in HW */
- cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
/* Alloc NIX SQ HW context memory and config the base */
@@ -825,7 +885,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
(u64)pfvf->sq_ctx->iova);
- cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
/* Alloc NIX CQ HW context memory and config the base */
@@ -840,13 +901,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
(u64)pfvf->cq_ctx->iova);
- cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
- err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
- req->rss_sz, req->rss_grps, hwctx_size);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
+ req->rss_grps, hwctx_size, req->way_mask);
if (err)
goto free_mem;
@@ -860,7 +922,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
(u64)pfvf->cq_ints_ctx->iova);
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
/* Alloc memory for QINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
@@ -872,7 +936,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
(u64)pfvf->nix_qints_ctx->iova);
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
+ BIT_ULL(36) | req->way_mask << 20);
/* Setup VLANX TPID's.
* Use VLAN1 for 802.1Q
@@ -1048,6 +1113,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int link;
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
+ return;
+
/* Reset TL4's SDP link config */
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
@@ -1061,83 +1129,185 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
-static int
-rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
- u16 *schq_list, u16 *schq_cnt)
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
- struct nix_txsch *txsch;
- struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
- u8 cgx_id, lmac_id;
- u16 schq_base;
- u32 *pfvf_map;
- int pf, intf;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return -ENODEV;
+ if (is_afvf(pcifunc)) {/* LBK links */
+ return hw->cgx_links;
+ } else if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return (cgx_id * hw->lmac_per_cgx) + lmac_id;
+ }
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
- pfvf_map = txsch->pfvf_map;
- pf = rvu_get_pf(pcifunc);
+ /* SDP link */
+ return hw->cgx_links + hw->lbk_links;
+}
- /* static allocation as two TL1's per link */
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
+ int link, int *start, int *end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
- switch (intf) {
- case NIX_INTF_TYPE_CGX:
- rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
- schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
- break;
- case NIX_INTF_TYPE_LBK:
- schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
- break;
- default:
- return -ENODEV;
+ if (is_afvf(pcifunc)) { /* LBK links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
+ } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
+ *start = hw->cap.nix_txsch_per_cgx_lmac * link;
+ *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
+ } else { /* SDP link */
+ *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
+ (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
+ *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
}
+}
- if (schq_base + 1 > txsch->schq.max)
- return -ENODEV;
+static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ struct nix_hw *nix_hw,
+ struct nix_txsch_alloc_req *req)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int schq, req_schq, free_cnt;
+ struct nix_txsch *txsch;
+ int link, start, end;
- /* init pfvf_map as we store flags */
- if (pfvf_map[schq_base] == U32_MAX) {
- pfvf_map[schq_base] =
- TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
- pfvf_map[schq_base + 1] =
- TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
- /* Onetime reset for TL1 */
- nix_reset_tx_linkcfg(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base);
- nix_reset_tx_shaping(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base);
+ if (!req_schq)
+ return 0;
- nix_reset_tx_linkcfg(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base + 1);
- nix_reset_tx_shaping(rvu, blkaddr,
- NIX_TXSCH_LVL_TL1, schq_base + 1);
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ /* For traffic aggregating scheduler level, one queue is enough */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (req_schq != 1)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+ return 0;
}
- if (schq_list && schq_cnt) {
- schq_list[0] = schq_base;
- schq_list[1] = schq_base + 1;
- *schq_cnt = 2;
+ /* Get free SCHQ count and check if request can be accomodated */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
+ if (end <= txsch->schq.max && schq < end &&
+ !test_bit(schq, txsch->schq.bmap))
+ free_cnt = 1;
+ else
+ free_cnt = 0;
+ } else {
+ free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
+ if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
+ /* If contiguous queues are needed, check for availability */
+ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ return NIX_AF_ERR_TLX_ALLOC_FAIL;
+
return 0;
}
+static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
+ struct nix_txsch_alloc_rsp *rsp,
+ int lvl, int start, int end)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = rsp->hdr.pcifunc;
+ int idx, schq;
+
+ /* For traffic aggregating levels, queue alloc is based
+ * on transmit link to which PF_FUNC is mapped to.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ /* A single TL queue is allocated */
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ rsp->schq_contig_list[lvl][0] = start;
+ }
+
+ /* Both contig and non-contig reqs doesn't make sense here */
+ if (rsp->schq_contig[lvl])
+ rsp->schq[lvl] = 0;
+
+ if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ rsp->schq_list[lvl][0] = start;
+ }
+ return;
+ }
+
+ /* Adjust the queue request count if HW supports
+ * only one queue per level configuration.
+ */
+ if (hw->cap.nix_fixed_txschq_mapping) {
+ idx = pcifunc & RVU_PFVF_FUNC_MASK;
+ schq = start + idx;
+ if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
+ rsp->schq_contig[lvl] = 0;
+ rsp->schq[lvl] = 0;
+ return;
+ }
+
+ if (rsp->schq_contig[lvl]) {
+ rsp->schq_contig[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][0] = schq;
+ rsp->schq[lvl] = 0;
+ } else if (rsp->schq[lvl]) {
+ rsp->schq[lvl] = 1;
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][0] = schq;
+ }
+ return;
+ }
+
+ /* Allocate contiguous queue indices requesty first */
+ if (rsp->schq_contig[lvl]) {
+ schq = bitmap_find_next_zero_area(txsch->schq.bmap,
+ txsch->schq.max, start,
+ rsp->schq_contig[lvl], 0);
+ if (schq >= end)
+ rsp->schq_contig[lvl] = 0;
+ for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Allocate non-contiguous queue indices */
+ if (rsp->schq[lvl]) {
+ idx = 0;
+ for (schq = start; schq < end; schq++) {
+ if (!test_bit(schq, txsch->schq.bmap)) {
+ set_bit(schq, txsch->schq.bmap);
+ rsp->schq_list[lvl][idx++] = schq;
+ }
+ if (idx == rsp->schq[lvl])
+ break;
+ }
+ /* Update how many were allocated */
+ rsp->schq[lvl] = idx;
+ }
+}
+
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
+ struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int link, blkaddr, rc = 0;
+ int lvl, idx, start, end;
struct nix_txsch *txsch;
- int lvl, idx, req_schq;
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
- int blkaddr, rc = 0;
u32 *pfvf_map;
u16 schq;
@@ -1151,83 +1321,66 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
return -EINVAL;
mutex_lock(&rvu->rsrc_lock);
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- txsch = &nix_hw->txsch[lvl];
- req_schq = req->schq_contig[lvl] + req->schq[lvl];
- pfvf_map = txsch->pfvf_map;
-
- if (!req_schq)
- continue;
- /* There are only 28 TL1s */
- if (lvl == NIX_TXSCH_LVL_TL1) {
- if (req->schq_contig[lvl] ||
- req->schq[lvl] > 2 ||
- rvu_get_tl1_schqs(rvu, blkaddr,
- pcifunc, NULL, NULL))
- goto err;
- continue;
- }
-
- /* Check if request is valid */
- if (req_schq > MAX_TXSCHQ_PER_FUNC)
- goto err;
-
- /* If contiguous queues are needed, check for availability */
- if (req->schq_contig[lvl] &&
- !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
- goto err;
-
- /* Check if full request can be accommodated */
- if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+ /* Check if request is valid as per HW capabilities
+ * and can be accomodated.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
+ if (rc)
goto err;
}
+ /* Allocate requested Tx scheduler queues */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
- rsp->schq_contig[lvl] = req->schq_contig[lvl];
pfvf_map = txsch->pfvf_map;
- rsp->schq[lvl] = req->schq[lvl];
if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
- /* Handle TL1 specially as it is
- * allocation is restricted to 2 TL1's
- * per link
- */
+ rsp->schq[lvl] = req->schq[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
- if (lvl == NIX_TXSCH_LVL_TL1) {
- rsp->schq_contig[lvl] = 0;
- rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
- &rsp->schq_list[lvl][0],
- &rsp->schq[lvl]);
- continue;
+ link = nix_get_tx_link(rvu, pcifunc);
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ start = link;
+ end = link;
+ } else if (hw->cap.nix_fixed_txschq_mapping) {
+ nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
+ } else {
+ start = 0;
+ end = txsch->schq.max;
}
- /* Alloc contiguous queues first */
- if (req->schq_contig[lvl]) {
- schq = rvu_alloc_rsrc_contig(&txsch->schq,
- req->schq_contig[lvl]);
+ nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
- for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ /* Reset queue config */
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ schq = rsp->schq_contig_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
- nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
- rsp->schq_contig_list[lvl][idx] = schq;
- schq++;
- }
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
}
- /* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) {
- schq = rvu_alloc_rsrc(&txsch->schq);
- pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ schq = rsp->schq_list[lvl][idx];
+ if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
+ NIX_TXSCHQ_CFG_DONE))
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
- rsp->schq_list[lvl][idx] = schq;
}
}
+
+ rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
+ rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+ rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
+ NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
goto exit;
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
@@ -1236,13 +1389,50 @@ exit:
return rc;
}
+static void nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ int err, restore_tx_en = 0;
+ u64 cfg;
+
+ /* enable cgx tx if disabled */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ if (err)
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+
+ rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ /* restore cgx tx state */
+ if (restore_tx_en)
+ cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+}
+
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
{
int blkaddr, nixlf, lvl, schq, err;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
- u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1275,26 +1465,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
-
- /* Wait for flush to complete */
- err = rvu_poll_reg(rvu, blkaddr,
- NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
- if (err) {
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
- }
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
}
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- /* Free all SCHQ's except TL1 as
- * TL1 is shared across all VF's for a RVU PF
- */
- if (lvl == NIX_TXSCH_LVL_TL1)
+ /* TLs above aggregation level are shared across all PF
+ * and it's VFs, hence skip freeing them.
+ */
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
txsch = &nix_hw->txsch[lvl];
@@ -1302,7 +1481,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
- txsch->pfvf_map[schq] = 0;
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
}
mutex_unlock(&rvu->rsrc_lock);
@@ -1319,13 +1498,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch_free_req *req)
{
- int lvl, schq, nixlf, blkaddr, rc;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int lvl, schq, nixlf, blkaddr;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
- u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1343,10 +1521,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
- /* Don't allow freeing TL1 */
- if (lvl > NIX_TXSCH_LVL_TL2 ||
- schq >= txsch->schq.max)
- goto err;
+ if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
+ return 0;
pfvf_map = txsch->pfvf_map;
mutex_lock(&rvu->rsrc_lock);
@@ -1359,24 +1535,12 @@ static int nix_txschq_free_one(struct rvu *rvu,
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
- if (lvl == NIX_TXSCH_LVL_SMQ) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
-
- /* Wait for flush to complete */
- rc = rvu_poll_reg(rvu, blkaddr,
- NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
- if (rc) {
- dev_err(rvu->dev,
- "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
- }
- }
+ if (lvl == NIX_TXSCH_LVL_SMQ)
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
- txsch->pfvf_map[schq] = 0;
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
@@ -1393,8 +1557,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
return nix_txschq_free_one(rvu, req);
}
-static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
- int lvl, u64 reg, u64 regval)
+static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
{
u64 regbase = reg & 0xFFFF;
u16 schq, parent;
@@ -1431,79 +1595,82 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
-static int
-nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
{
- u16 schq_list[2], schq_cnt, schq;
- int blkaddr, idx, err = 0;
- u16 map_func, map_flags;
- struct nix_hw *nix_hw;
- u64 reg, regval;
- u32 *pfvf_map;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ u64 regbase;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return -EINVAL;
-
- pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
-
- mutex_lock(&rvu->rsrc_lock);
-
- err = rvu_get_tl1_schqs(rvu, blkaddr,
- pcifunc, schq_list, &schq_cnt);
- if (err)
- goto unlock;
+ if (hw->cap.nix_shaping)
+ return true;
- for (idx = 0; idx < schq_cnt; idx++) {
- schq = schq_list[idx];
- map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
- map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+ /* If shaping and coloring is not supported, then
+ * *_CIR and *_PIR registers should not be configured.
+ */
+ regbase = reg & 0xFFFF;
- /* check if config is already done or this is pf */
- if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
- continue;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ if (regbase == NIX_AF_TL1X_CIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ if (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ if (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0))
+ return false;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ if (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0))
+ return false;
+ break;
+ }
+ return true;
+}
- /* default configuration */
- reg = NIX_AF_TL1X_TOPOLOGY(schq);
- regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
- rvu_write64(rvu, blkaddr, reg, regval);
- reg = NIX_AF_TL1X_SCHEDULE(schq);
- regval = TXSCH_TL1_DFLT_RR_QTM;
- rvu_write64(rvu, blkaddr, reg, regval);
- reg = NIX_AF_TL1X_CIR(schq);
- regval = 0;
- rvu_write64(rvu, blkaddr, reg, regval);
+static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
+ u16 pcifunc, int blkaddr)
+{
+ u32 *pfvf_map;
+ int schq;
- map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
- pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
- }
-unlock:
- mutex_unlock(&rvu->rsrc_lock);
- return err;
+ schq = nix_get_tx_link(rvu, pcifunc);
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+ /* Skip if PF has already done the config */
+ if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
+ return;
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
+ (TXSCH_TL1_DFLT_RR_PRIO << 1));
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
- u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
- u16 map_func, map_flags;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
+ int nixlf, schq;
u32 *pfvf_map;
- int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
@@ -1512,19 +1679,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
- /* VF is only allowed to trigger
- * setting default cfg on TL1
- */
- if (pcifunc & RVU_PFVF_FUNC_MASK &&
- req->lvl == NIX_TXSCH_LVL_TL1) {
- return nix_tl1_default_cfg(rvu, pcifunc);
+ if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
+ pcifunc & RVU_PFVF_FUNC_MASK) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (req->lvl == NIX_TXSCH_LVL_TL1)
+ nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
}
for (idx = 0; idx < req->num_regs; idx++) {
@@ -1532,10 +1696,14 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
- if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
- txsch->lvl, reg, regval))
+ if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
return NIX_AF_INVAL_TXSCHQ_CFG;
+ /* Check if shaping and coloring is supported */
+ if (!is_txschq_shaping_valid(hw, req->lvl, reg))
+ continue;
+
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
@@ -1544,32 +1712,36 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval |= ((u64)nixlf << 24);
}
+ /* Clear 'BP_ENA' config, if it's not allowed */
+ if (!hw->cap.nix_tx_link_bp) {
+ if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
+ (schq_regbase & 0xFF00) ==
+ NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
+ regval &= ~BIT_ULL(13);
+ }
+
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
-
mutex_lock(&rvu->rsrc_lock);
-
- map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
- map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
-
- map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
- pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
+ NIX_TXSCHQ_CFG_DONE);
mutex_unlock(&rvu->rsrc_lock);
}
- rvu_write64(rvu, blkaddr, reg, regval);
-
- /* Check for SMQ flush, if so, poll for its completion */
+ /* SMQ flush is special hence split register writes such
+ * that flush first and write rest of the bits later.
+ */
if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
(regval & BIT_ULL(49))) {
- err = rvu_poll_reg(rvu, blkaddr,
- reg, BIT_ULL(49), true);
- if (err)
- return NIX_AF_SMQ_FLUSH_FAILED;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ regval &= ~BIT_ULL(49);
}
+ rvu_write64(rvu, blkaddr, reg, regval);
}
+
return 0;
}
@@ -1650,7 +1822,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
}
static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, int idx, bool add)
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -1679,7 +1851,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
- mce->idx = idx;
mce->pcifunc = pcifunc;
if (!tail)
hlist_add_head(&mce->node, &mce_list->head);
@@ -1691,12 +1862,12 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
{
- int err = 0, idx, next_idx, count;
+ int err = 0, idx, next_idx, last_idx;
struct nix_mce_list *mce_list;
- struct mce *mce, *next_mce;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
+ struct mce *mce;
int blkaddr;
/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
@@ -1728,31 +1899,31 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+ err = nix_update_mce_list(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
-
- if (!mce_list->count)
+ if (!mce_list->count) {
+ rvu_npc_disable_bcast_entry(rvu, pcifunc);
goto end;
- count = mce_list->count;
+ }
/* Dump the updated list to HW */
+ idx = pfvf->bcast_mce_idx;
+ last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
- next_idx = 0;
- count--;
- if (count) {
- next_mce = hlist_entry(mce->node.next,
- struct mce, node);
- next_idx = next_mce->idx;
- }
+ if (idx > last_idx)
+ break;
+
+ next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, mce->idx,
- NIX_AQ_INSTOP_WRITE, mce->pcifunc,
- next_idx, count ? false : true);
+ err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
+ idx++;
}
end:
@@ -1849,8 +2020,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
+ int err, lvl, schq;
u64 cfg, reg;
- int err, lvl;
/* Get scheduler queue count of each type and alloc
* bitmap for each for alloc/free/attach operations.
@@ -1888,7 +2059,8 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
- memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
+ for (schq = 0; schq < txsch->schq.max; schq++)
+ txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
return 0;
}
@@ -2032,51 +2204,82 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
if (field_marker)
memset(&tmp, 0, sizeof(tmp));
+ field_marker = true;
+ keyoff_marker = true;
switch (key_type) {
case NIX_FLOW_KEY_TYPE_PORT:
field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
- field_marker = true;
- keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP;
+ }
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */
- field_marker = true;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV6:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
+ field->lid = NPC_LID_LG;
+ field->ltype_match = NPC_LT_LG_TU_IP6;
+ }
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */
- field_marker = true;
- keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
case NIX_FLOW_KEY_TYPE_SCTP:
+ case NIX_FLOW_KEY_TYPE_INNR_TCP:
+ case NIX_FLOW_KEY_TYPE_INNR_UDP:
+ case NIX_FLOW_KEY_TYPE_INNR_SCTP:
field->lid = NPC_LID_LD;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
+ field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
- if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
+
+ /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
+ * so no need to change the ltype_match, just change
+ * the lid for inner protocols
+ */
+ BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
+ (int)NPC_LT_LH_TU_TCP);
+ BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
+ (int)NPC_LT_LH_TU_UDP);
+ BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
+ (int)NPC_LT_LH_TU_SCTP);
+
+ if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_TCP;
group_member = true;
- } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_UDP;
group_member = true;
- } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+ } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP;
group_member = true;
}
field->ltype_mask = ~field->ltype_match;
- if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
+ key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
/* Handle the case where any of the group item
* is enabled in the group but not the final one
*/
@@ -2084,13 +2287,73 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
valid_key = true;
group_member = false;
}
- field_marker = true;
- keyoff_marker = true;
} else {
field_marker = false;
keyoff_marker = false;
}
break;
+ case NIX_FLOW_KEY_TYPE_NVGRE:
+ field->lid = NPC_LID_LD;
+ field->hdr_offset = 4; /* VSID offset */
+ field->bytesm1 = 2;
+ field->ltype_match = NPC_LT_LD_NVGRE;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_VXLAN:
+ case NIX_FLOW_KEY_TYPE_GENEVE:
+ field->lid = NPC_LID_LE;
+ field->bytesm1 = 2;
+ field->hdr_offset = 4;
+ field->ltype_mask = 0xF;
+ field_marker = false;
+ keyoff_marker = false;
+
+ if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
+ field->ltype_match |= NPC_LT_LE_VXLAN;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
+ field->ltype_match |= NPC_LT_LE_GENEVE;
+ group_member = true;
+ }
+
+ if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
+ if (group_member) {
+ field->ltype_mask = ~field->ltype_match;
+ field_marker = true;
+ keyoff_marker = true;
+ valid_key = true;
+ group_member = false;
+ }
+ }
+ break;
+ case NIX_FLOW_KEY_TYPE_ETH_DMAC:
+ case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
+ field->lid = NPC_LID_LA;
+ field->ltype_match = NPC_LT_LA_ETHER;
+ if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
+ field->lid = NPC_LID_LF;
+ field->ltype_match = NPC_LT_LF_TU_ETHER;
+ }
+ field->hdr_offset = 0;
+ field->bytesm1 = 5; /* DMAC 6 Byte */
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6_EXT:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 40; /* IPV6 hdr */
+ field->bytesm1 = 0; /* 1 Byte ext hdr*/
+ field->ltype_match = NPC_LT_LC_IP6_EXT;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_GTPU:
+ field->lid = NPC_LID_LE;
+ field->hdr_offset = 4;
+ field->bytesm1 = 3; /* 4 bytes TID*/
+ field->ltype_match = NPC_LT_LE_GTPU;
+ field->ltype_mask = 0xF;
+ break;
}
field->ena = 1;
@@ -2449,8 +2712,6 @@ linkcfg:
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
-
return 0;
}
@@ -2591,9 +2852,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link),
tx_credits);
- rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_EXPR_CREDIT(link),
- tx_credits);
}
}
@@ -2605,8 +2863,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
- rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
}
}
@@ -2674,6 +2930,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of SQB aka SQEs */
+ cfg |= 0x04ULL;
+#endif
rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
/* Result structure can be followed by RQ/SQ/CQ context at
@@ -2704,13 +2964,25 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
- /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
- * internal state when conditional clocks are turned off.
- * Hence enable them.
- */
- if (is_rvu_9xxx_A0(rvu))
+ if (is_rvu_96xx_B0(rvu)) {
+ /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
- rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+ }
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
@@ -2763,23 +3035,23 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
(NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
(NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
(NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+ (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
0x0F);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
@@ -2825,7 +3097,7 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
-static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -2853,7 +3125,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
- return 0;
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
@@ -2867,7 +3140,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
- return 0;
+
+ return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
@@ -2883,6 +3157,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ rvu_cgx_start_stop_io(rvu, pcifunc, false);
+
if (pfvf->sq_ctx) {
ctx_req.ctype = NIX_AQ_CTYPE_SQ;
err = nix_lf_hwctx_disable(rvu, &ctx_req);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index c0e165dfc403..6e7c7f459f74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp)
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+ struct npa_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NPA_AQ_INSTOP_INIT)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+ lock_ctx_req.aura_id = req->aura_id;
+ err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+ req->hdr.pcifunc,
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", req->aura_id);
+ return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = npa_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
return NPA_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
@@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
/* Clear way partition mask and set aura offset to '0' */
cfg &= ~(BIT_ULL(34) - 1);
/* Set aura size & enable caching of contexts */
- cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
+
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
/* Configure aura HW context's base */
@@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
(u64)pfvf->aura_ctx->iova);
/* Enable caching of qints hw context */
- rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
+ BIT_ULL(36) | req->way_mask << 20);
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
(u64)pfvf->npa_qints_ctx->iova);
@@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
/* Result structure can be followed by Aura/Pool context at
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 15f70273e29c..40e431debbe9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -120,6 +120,31 @@ static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
}
+static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
+ }
+}
+
static void npc_get_keyword(struct mcam_entry *entry, int idx,
u64 *cam0, u64 *cam1)
{
@@ -211,6 +236,12 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
actindex = index;
index &= (mcam->banksize - 1);
+ /* Disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+
+ /* Clear mcam entry to avoid writes being suppressed by NPC */
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
+
/* CAM1 takes the comparison value and
* CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
* CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
@@ -251,8 +282,6 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
/* Enable the entry */
if (enable)
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
- else
- npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
@@ -354,8 +383,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
NIX_INTF_RX, &entry, true);
/* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+ entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
+ entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
entry.vtag_action = VTAG0_VALID_BIT |
FIELD_PREP(VTAG0_TYPE_MASK, 0) |
@@ -448,68 +477,75 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
-#ifdef MCAST_MCE
struct rvu_pfvf *pfvf;
-#endif
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- /* Only PF can add a bcast match entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
+ /* Skip LBK VFs */
+ if (is_afvf(pcifunc))
return;
-#ifdef MCAST_MCE
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
-#endif
+ /* If pkt replication is not supported,
+ * then only PF is allowed to add a bcast match entry.
+ */
+ if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Check for L2B bit and LMAC channel
- * NOTE: Since MKEX default profile(a reduced version intended to
- * accommodate more capability but igoring few bits) a stap-gap
- * approach.
- * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
- * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
- * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
- * DSCP.
- *
- * Reduced layout of MKEX default profile -
- * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
- *
- * BIT_POS[31:28] : LD
- * BIT_POS[27:24] : LC
- * BIT_POS[23:20] : LB
- * BIT_POS[19:16] : LA
- * BIT_POS[15:12] : L3B, L3M, L2B, L2M
- * BIT_POS[11:00] : CHAN
- *
+ /* Match ingress channel */
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xfffull;
+
+ /* Match broadcast MAC address.
+ * DMAC is extracted at 0th bit of PARSE_KEX::KW1
*/
- entry.kw[0] = BIT_ULL(13) | chan;
- entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
+ entry.kw[1] = 0xffffffffffffull;
+ entry.kw_mask[1] = 0xffffffffffffull;
*(u64 *)&action = 0x00;
-#ifdef MCAST_MCE
- /* Early silicon doesn't support pkt replication,
- * so install entry with UCAST action, so that PF
- * receives all broadcast packets.
- */
- action.op = NIX_RX_ACTIONOP_MCAST;
- action.pf_func = pcifunc;
- action.index = pfvf->bcast_mce_idx;
-#else
- action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
-#endif
+ if (!hw->cap.nix_rx_multicast) {
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ } else {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ action.index = pfvf->bcast_mce_idx;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ }
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
}
+void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
@@ -704,8 +740,7 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
/* Layer B: Stacked VLAN (STAG|QinQ) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
/* Layer C: IPv4 */
/* SIP+DIP: 8 bytes, KW2[63:0] */
@@ -806,11 +841,11 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
- /* Due to an errata (35786) in A0 pass silicon,
+ /* Due to an errata (35786) in A0/B0 pass silicon,
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
- if (is_rvu_9xxx_A0(rvu) &&
+ if (is_rvu_96xx_B0(rvu) &&
mcam_kex->keyx_cfg[NIX_INTF_RX] !=
mcam_kex->keyx_cfg[NIX_INTF_TX])
goto load_default;
@@ -1064,6 +1099,13 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop pkt. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -1101,6 +1143,7 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
u64 keyz = NPC_MCAM_KEY_X2;
int blkaddr, entry, bank, err;
u64 cfg, nibble_ena;
@@ -1143,7 +1186,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
@@ -1165,7 +1208,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
- if (!is_rvu_9xxx_A0(rvu))
+ if (!is_rvu_96xx_B0(rvu))
nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
((keyz & 0x3) << 32) | nibble_ena);
@@ -1183,9 +1226,13 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
NIX_TX_ACTIONOP_UCAST_DEFAULT);
- /* If MCAM lookup doesn't result in a match, drop the received packet */
+ /* If MCAM lookup doesn't result in a match, drop the received packet.
+ * And map this action to a counter to count dropped pkts.
+ */
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
NIX_RX_ACTIONOP_DROP);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
+ BIT_ULL(9) | mcam->rx_miss_act_cntr);
return 0;
}
@@ -1200,6 +1247,44 @@ void rvu_npc_freemem(struct rvu *rvu)
mutex_destroy(&mcam->lock);
}
+void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int entry;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (entry = 0; entry < mcam->bmap_entries; entry++) {
+ if (mcam->entry2pfvf_map[entry] == pcifunc) {
+ (*alloc_cnt)++;
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
+ (*enable_cnt)++;
+ }
+ }
+}
+
+void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, int *alloc_cnt,
+ int *enable_cnt)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int cntr;
+
+ *alloc_cnt = 0;
+ *enable_cnt = 0;
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ (*alloc_cnt)++;
+ if (mcam->cntr_refcnt[cntr])
+ (*enable_cnt)++;
+ }
+ }
+}
+
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 09a8d61f3144..7ca599b973c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -246,6 +246,7 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
#define NIX_AF_TX_EXPR_CREDIT (0x830)
@@ -435,7 +436,6 @@
#define CPT_AF_LF_RST (0x44000)
#define CPT_AF_BLK_RST (0x46000)
-#define NDC_AF_BLK_RST (0x002F0)
#define NPC_AF_BLK_RST (0x00040)
/* NPC */
@@ -499,4 +499,30 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+/* NDC */
+#define NDC_AF_CONST (0x00000)
+#define NDC_AF_CLK_EN (0x00020)
+#define NDC_AF_CTL (0x00030)
+#define NDC_AF_BANK_CTL (0x00040)
+#define NDC_AF_BANK_CTL_DONE (0x00048)
+#define NDC_AF_INTR (0x00058)
+#define NDC_AF_INTR_W1S (0x00060)
+#define NDC_AF_INTR_ENA_W1S (0x00068)
+#define NDC_AF_INTR_ENA_W1C (0x00070)
+#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_BP_TEST_ENABLE (0x001F8)
+#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
+#define NDC_AF_BLK_RST (0x002F0)
+#define NDC_PRIV_AF_INT_CFG (0x002F8)
+#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
+#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
+ (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
+ (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
+ (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
+#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
+ (0x00F00 | (a) << 5 | (b) << 4)
+#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
+#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index f920dac74e6c..9d8942acc232 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Admin Function driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -13,22 +13,22 @@
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
- BLKADDR_RVUM = 0x0ULL,
- BLKADDR_LMT = 0x1ULL,
- BLKADDR_MSIX = 0x2ULL,
- BLKADDR_NPA = 0x3ULL,
- BLKADDR_NIX0 = 0x4ULL,
- BLKADDR_NIX1 = 0x5ULL,
- BLKADDR_NPC = 0x6ULL,
- BLKADDR_SSO = 0x7ULL,
- BLKADDR_SSOW = 0x8ULL,
- BLKADDR_TIM = 0x9ULL,
- BLKADDR_CPT0 = 0xaULL,
- BLKADDR_CPT1 = 0xbULL,
- BLKADDR_NDC0 = 0xcULL,
- BLKADDR_NDC1 = 0xdULL,
- BLKADDR_NDC2 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLK_COUNT = 0xfULL,
};
/* RVU Block Type Enumeration */
@@ -474,9 +474,9 @@ struct nix_cq_ctx_s {
u64 ena : 1;
u64 drop_ena : 1;
u64 drop : 8;
- u64 dp : 8;
+ u64 bp : 8;
#else
- u64 dp : 8;
+ u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 51b77c2de400..3fb7ee3d4d13 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1489,8 +1489,10 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
- pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
of_node_put(np);
+ err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
+ if (err && err != -ENODEV)
+ goto err_netdev;
}
/* Hardware supports only 3 ports */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index ef11cf3d1ccc..0fe97155dd8f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -57,7 +57,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
default:
updated = false;
break;
- };
+ }
if (updated) {
val = mtk_r32(eth, MTK_MAC_MISC);
@@ -143,7 +143,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
default:
updated = false;
break;
- };
+ }
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
@@ -174,7 +174,7 @@ static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
break;
default:
updated = false;
- };
+ }
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 703adb96429e..527ad2aadcca 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -361,8 +361,8 @@ init_err:
mac->id, phy_modes(state->interface), err);
}
-static int mtk_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mtk_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
@@ -391,8 +391,6 @@ static int mtk_mac_link_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_RX;
if (pmsr & MAC_MSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static void mtk_mac_an_restart(struct phylink_config *config)
@@ -514,7 +512,7 @@ static void mtk_validate(struct phylink_config *config,
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = mtk_validate,
- .mac_link_state = mtk_mac_link_state,
+ .mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
.mac_link_down = mtk_mac_link_down,
@@ -2180,6 +2178,31 @@ static int mtk_start_dma(struct mtk_eth *eth)
return 0;
}
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ val |= config;
+
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+}
+
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -2200,6 +2223,8 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
+ mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
@@ -2252,6 +2277,8 @@ static int mtk_stop(struct net_device *dev)
if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
@@ -2375,8 +2402,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
@@ -2385,19 +2410,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
-
- /* setup the forward port to send frame to PDMA */
- val &= ~0xffff;
-
- /* Enable RX checksum */
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
-
- /* setup the mac dma */
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
- }
-
return 0;
err_disable_pm:
@@ -2758,9 +2770,10 @@ static const struct net_device_ops mtk_netdev_ops = {
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
+ phy_interface_t phy_mode;
struct phylink *phylink;
- int phy_mode, id, err;
struct mtk_mac *mac;
+ int id, err;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2805,10 +2818,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
/* phylink create */
- phy_mode = of_get_phy_mode(np);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(np, &phy_mode);
+ if (err) {
dev_err(eth->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto free_netdev;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 76bd12cb8150..85830fe14a1b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -84,6 +84,8 @@
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 4db27dfc7ec1..32d83421226a 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -93,7 +93,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
case SPEED_1000:
val |= SGMII_SPEED_1000;
break;
- };
+ }
if (state->duplex == DUPLEX_FULL)
val |= SGMII_DUPLEX_FULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d8313e2ee600..a1202e53710c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
+ cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i);
if (!err)
@@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
+ int total_tx_count;
int port_up = 0;
int xdp_count;
int err = 0;
@@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
- if (channel->tx_count * priv->prof->num_up + xdp_count >
- priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
+ total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
+ if (total_tx_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
- channel->tx_count * priv->prof->num_up + xdp_count,
- MAX_TX_RINGS);
+ total_tx_count, MAX_TX_RINGS);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 40ec5acf79c0..7af75b63245f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
+ int total_count;
int port_up = 0;
int err = 0;
@@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up;
+ total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
+ if (total_count > MAX_TX_RINGS) {
+ err = -EINVAL;
+ en_err(priv,
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+ total_count, MAX_TX_RINGS);
+ goto out;
+ }
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
@@ -2286,11 +2295,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
lockdep_is_held(&priv->mdev->state_lock));
if (xdp_prog && carry_xdp_prog) {
- xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
- if (IS_ERR(xdp_prog)) {
- mlx4_en_free_resources(tmp);
- return PTR_ERR(xdp_prog);
- }
+ bpf_prog_add(xdp_prog, tmp->rx_ring_num);
for (i = 0; i < tmp->rx_ring_num; i++)
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
xdp_prog);
@@ -2782,11 +2787,9 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
* program for a new one.
*/
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
- if (prog) {
- prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
- }
+ if (prog)
+ bpf_prog_add(prog, priv->rx_ring_num - 1);
+
mutex_lock(&mdev->state_lock);
for (i = 0; i < priv->rx_ring_num; i++) {
old_prog = rcu_dereference_protected(
@@ -2807,13 +2810,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (!tmp)
return -ENOMEM;
- if (prog) {
- prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto out;
- }
- }
+ if (prog)
+ bpf_prog_add(prog, priv->rx_ring_num - 1);
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
@@ -2862,7 +2860,6 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
unlock_out:
mutex_unlock(&mdev->state_lock);
-out:
kfree(tmp);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 69bb6bb06e76..5716c3d2bb86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3934,13 +3934,17 @@ static void mlx4_restart_one_down(struct pci_dev *pdev);
static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink);
-static int mlx4_devlink_reload_down(struct devlink *devlink,
+static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_dev_persistent *persist = dev->persist;
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
if (persist->num_vfs)
mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
mlx4_restart_one_down(persist->pdev);
@@ -4010,6 +4014,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
pci_save_state(pdev);
return 0;
@@ -4121,6 +4126,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
+ devlink_reload_disable(devlink);
+
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 5708fcc079ca..a6f390fdb971 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -70,7 +70,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o steering/dr_crc32.o \
+ steering/dr_icm_pool.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index ea934cd02448..34cba97f7bf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -866,7 +866,7 @@ static void cmd_work_handler(struct work_struct *work)
if (!ent->page_queue) {
alloc_ret = alloc_ent(cmd);
if (alloc_ret < 0) {
- mlx5_core_err(dev, "failed to allocate command entry\n");
+ mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 381925c90d94..ac108f1e5bd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -85,6 +85,22 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
return 0;
}
+static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ return mlx5_unload_one(dev, false);
+}
+
+static int mlx5_devlink_reload_up(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ return mlx5_load_one(dev, false);
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -96,6 +112,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
+ .reload_down = mlx5_devlink_reload_down,
+ .reload_up = mlx5_devlink_reload_up,
};
struct devlink *mlx5_devlink_alloc(void)
@@ -177,12 +195,29 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
};
+static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_enable_roce_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -197,6 +232,11 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
value);
+
+ value.vbool = MLX5_CAP_GEN(dev, roce);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
@@ -213,6 +253,7 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
return 0;
params_reg_err:
@@ -222,6 +263,7 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ devlink_reload_disable(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 633b117eb13e..7b672ada63a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -175,7 +175,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @port_buffer: <output> port receive buffer configuration
* @change: <output>
*
- * Update buffer configuration based on pfc configuraiton and
+ * Update buffer configuration based on pfc configuration and
* priority to buffer mapping.
* Buffer's lossy bit is changed to:
* lossless if there is at least one PFC enabled priority
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index b860569d4247..6c72b592315b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -222,7 +222,8 @@ static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
}
static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
- void *context)
+ void *context,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_err_ctx *err_ctx = context;
@@ -301,7 +302,8 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_params *params = &priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bfed558637c2..b468549e96ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -135,7 +135,8 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
}
static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
- void *context)
+ void *context,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_err_ctx *err_ctx = context;
@@ -205,7 +206,8 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
}
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 13af72556987..784b1e26f414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,8 +77,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct neighbour **out_n,
u8 *out_ttl)
{
+ struct neighbour *n;
struct rtable *rt;
- struct neighbour *n = NULL;
#if IS_ENABLED(CONFIG_INET)
struct mlx5_core_dev *mdev = priv->mdev;
@@ -138,10 +138,9 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct neighbour **out_n,
u8 *out_ttl)
{
- struct neighbour *n = NULL;
struct dst_entry *dst;
+ struct neighbour *n;
-#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int ret;
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
@@ -157,9 +156,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
dst_release(dst);
return ret;
}
-#else
- return -EOPNOTSUPP;
-#endif
n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst);
@@ -212,8 +208,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
- struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ struct neighbour *n;
int ipv4_encap_size;
char *encap_header;
u8 nud_state, ttl;
@@ -239,12 +235,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto release_neigh;
}
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -295,7 +294,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto out;
+ goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
@@ -315,9 +314,8 @@ destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap:
kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
+release_neigh:
+ neigh_release(n);
return err;
}
@@ -328,9 +326,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
- struct neighbour *n = NULL;
struct flowi6 fl6 = {};
struct ipv6hdr *ip6h;
+ struct neighbour *n;
int ipv6_encap_size;
char *encap_header;
u8 nud_state, ttl;
@@ -355,12 +353,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto release_neigh;
}
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
@@ -410,7 +411,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
- goto out;
+ goto release_neigh;
}
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
@@ -431,9 +432,8 @@ destroy_neigh_entry:
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap:
kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
+release_neigh:
+ neigh_release(n);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index c362b9225dc2..6f9a78c85ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -58,9 +58,16 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+#else
+static inline int
+mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
+#endif
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 327c93a7bd55..95601269fa2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
- u8 connector_type)
+ u8 connector_type, bool ext)
{
- if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
-static u8 get_connector_port(u32 eth_proto, u8 connector_type)
+static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
{
- if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type];
if (eth_proto &
@@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper,
- connector_type);
+ connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
- connector_type);
+ connector_type, ext);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 772bfdbdeb9c..09ed7f5f688b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
+#include "lib/mlx5.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
@@ -408,12 +409,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->stats = &c->priv->channel_stats[c->ix].rq;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
- rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
- if (IS_ERR(rq->xdp_prog)) {
- err = PTR_ERR(rq->xdp_prog);
- rq->xdp_prog = NULL;
- goto err_rq_wq_destroy;
- }
+ if (params->xdp_prog)
+ bpf_prog_inc(params->xdp_prog);
+ rq->xdp_prog = params->xdp_prog;
rq_xdp_ix = rq->ix;
if (xsk)
@@ -4252,9 +4250,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) {
case IPPROTO_GRE:
+ return features;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
- return features;
+ if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
+ return features;
+ break;
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
@@ -4406,16 +4407,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
- if (was_opened && !reset) {
+ if (was_opened && !reset)
/* num_channels is invariant here, so we can take the
* batched reference right upfront.
*/
- prog = bpf_prog_add(prog, priv->channels.num);
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto unlock;
- }
- }
+ bpf_prog_add(prog, priv->channels.num);
if (was_opened && reset) {
struct mlx5e_channels new_channels = {};
@@ -5427,6 +5423,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
}
+ dev_net_set(netdev, mlx5_core_net(mdev));
priv = netdev_priv(netdev);
err = mlx5e_attach(mdev, priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index cd9bb7c7b341..f175cb24bb67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -47,6 +47,7 @@
#include "en/tc_tun.h"
#include "fs_core.h"
#include "lib/port_tun.h"
+#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
@@ -1243,21 +1244,60 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
}
}
-static LIST_HEAD(mlx5e_rep_block_cb_list);
+static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload cls_flower;
+ struct mlx5e_priv *priv = cb_priv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ flags = MLX5_TC_FLAG(INGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+ esw = priv->mdev->priv.eswitch;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
+ return -EOPNOTSUPP;
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ */
+ memcpy(&cls_flower, f, sizeof(*f));
+ cls_flower.common.chain_index = FDB_FT_CHAIN;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
+ memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
+static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct flow_block_offload *f = type_data;
+ f->unlocked_driver_cb = true;
+
switch (type) {
case TC_SETUP_BLOCK:
- f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_cb_list,
+ &mlx5e_rep_block_tc_cb_list,
mlx5e_rep_setup_tc_cb,
priv, priv, true);
+ case TC_SETUP_FT:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_ft_cb_list,
+ mlx5e_rep_setup_ft_cb,
+ priv, priv, true);
default:
return -EOPNOTSUPP;
}
@@ -1877,6 +1917,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
+ dev_net_set(netdev, mlx5_core_net(dev));
rpriv->netdev = netdev;
rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 82cffb3a9964..9e9960146e5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1386,6 +1386,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
if (rq->cqd.left || work_done >= budget)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fda0b37075e8..0d5d84b5fa23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -74,6 +74,7 @@ enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
@@ -276,6 +277,11 @@ static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
+static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, FT);
+}
+
static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
@@ -1074,7 +1080,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
+ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1091,7 +1097,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
+ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1168,7 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->chain > max_chain) {
+ /* We check chain range only for tc flows.
+ * For ft flows, we checked attr->chain was originally 0 and set it to
+ * FDB_FT_CHAIN which is outside tc range.
+ * See mlx5e_rep_setup_ft_cb().
+ */
+ if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
@@ -2241,13 +2252,14 @@ out_err:
struct mlx5_fields {
u8 field;
- u8 size;
+ u8 field_bsize;
+ u32 field_mask;
u32 offset;
u32 match_offset;
};
-#define OFFLOAD(fw_field, size, field, off, match_field) \
- {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
+#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
offsetof(struct pedit_headers, field) + (off), \
MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
@@ -2265,18 +2277,18 @@ struct mlx5_fields {
})
static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
- void *matchmaskp, int size)
+ void *matchmaskp, u8 bsize)
{
bool same = false;
- switch (size) {
- case sizeof(u8):
+ switch (bsize) {
+ case 8:
same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
break;
- case sizeof(u16):
+ case 16:
same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
break;
- case sizeof(u32):
+ case 32:
same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
break;
}
@@ -2285,41 +2297,43 @@ static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
}
static struct mlx5_fields fields[] = {
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
- OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0),
- OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
- OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0),
- OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype),
- OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid),
-
- OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit),
- OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-
- OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
+ OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
+ OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
+ OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
+ OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
+ OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
+ OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
+
+ OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
+ OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
+ OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+
+ OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
- OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0,
+ OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
- OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0,
+ OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
- OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0,
+ OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
+ OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
- OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0,
+ OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
- OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0,
+ OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
- OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0,
+ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
- OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport),
- OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport),
- OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
+ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
+ /* in linux iphdr tcp_flags is 8 bits long */
+ OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
- OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
- OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport),
+ OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
+ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
@@ -2332,19 +2346,17 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- void *headers_c = get_match_headers_criteria(*action_flags,
- &parse_attr->spec);
- void *headers_v = get_match_headers_value(*action_flags,
- &parse_attr->spec);
int i, action_size, nactions, max_actions, first, last, next_z;
- void *s_masks_p, *a_masks_p, *vals_p;
+ void *headers_c, *headers_v, *action, *vals_p;
+ u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5_fields *f;
- u8 cmd, field_bsize;
- u32 s_mask, a_mask;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
- void *action;
+ u8 cmd;
+
+ headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
+ headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
@@ -2369,8 +2381,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
- memcpy(&s_mask, s_masks_p, f->size);
- memcpy(&a_mask, a_masks_p, f->size);
+ s_mask = *s_masks_p & f->field_mask;
+ a_mask = *a_masks_p & f->field_mask;
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
@@ -2399,38 +2411,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
vals_p = (void *)set_vals + f->offset;
/* don't rewrite if we have a match on the same value */
if (cmp_val_mask(vals_p, s_masks_p, match_val,
- match_mask, f->size))
+ match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
- memset(s_masks_p, 0, f->size);
+ *s_masks_p &= ~f->field_mask;
} else {
- u32 zero = 0;
-
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
- if (!memcmp(vals_p, &zero, f->size))
+ if ((*(u32 *)vals_p & f->field_mask) == 0)
skip = true;
/* clear to denote we consumed this field */
- memset(a_masks_p, 0, f->size);
+ *a_masks_p &= ~f->field_mask;
}
if (skip)
continue;
- field_bsize = f->size * BITS_PER_BYTE;
-
- if (field_bsize == 32) {
+ if (f->field_bsize == 32) {
mask_be32 = *(__be32 *)&mask;
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (field_bsize == 16) {
+ } else if (f->field_bsize == 16) {
mask_be16 = *(__be16 *)&mask;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
- first = find_first_bit(&mask, field_bsize);
- next_z = find_next_zero_bit(&mask, field_bsize, first);
- last = find_last_bit(&mask, field_bsize);
+ first = find_first_bit(&mask, f->field_bsize);
+ next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ last = find_last_bit(&mask, f->field_bsize);
if (first < next_z && next_z < last) {
NL_SET_ERR_MSG_MOD(extack,
"rewrite of few sub-fields isn't supported");
@@ -2443,16 +2451,22 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
- MLX5_SET(set_action_in, action, offset, first);
+ int start;
+
+ /* if field is bit sized it can start not from first bit */
+ start = find_first_bit((unsigned long *)&f->field_mask,
+ f->field_bsize);
+
+ MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */
MLX5_SET(set_action_in, action, length, (last - first + 1));
}
- if (field_bsize == 32)
+ if (f->field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
- else if (field_bsize == 16)
+ else if (f->field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
- else if (field_bsize == 8)
+ else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size;
@@ -3214,6 +3228,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
bool encap = false;
u32 action = 0;
@@ -3258,6 +3273,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
+ if (ft_flow && out_dev == priv->netdev) {
+ /* Ignore forward to self rules generated
+ * by adding both mlx5 devs to the flow table
+ * block on a normal nft offload setup.
+ */
+ return -EOPNOTSUPP;
+ }
+
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
@@ -3268,7 +3291,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
+ if (encap) {
+ parse_attr->mirred_ifindex[attr->out_count] =
+ out_dev->ifindex;
+ parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[attr->out_count])
+ return -ENOMEM;
+ encap = false;
+ attr->dests[attr->out_count].flags |=
+ MLX5_ESW_DEST_ENCAP;
+ attr->out_count++;
+ /* attr->dests[].rep is resolved when we
+ * handle encap
+ */
+ } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
@@ -3310,19 +3346,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dests[attr->out_count].rep = rpriv->rep;
attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++;
- } else if (encap) {
- parse_attr->mirred_ifindex[attr->out_count] =
- out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
- if (!parse_attr->tun_info[attr->out_count])
- return -ENOMEM;
- encap = false;
- attr->dests[attr->out_count].flags |=
- MLX5_ESW_DEST_ENCAP;
- attr->out_count++;
- /* attr->dests[].rep is resolved when we
- * handle encap
- */
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -3382,6 +3405,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
if (dest_chain <= attr->chain) {
NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
return -EOPNOTSUPP;
@@ -3443,6 +3470,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
+ if (!(attr->action &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
+ return -EOPNOTSUPP;
+ }
+
if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
@@ -3466,6 +3499,8 @@ static void get_flags(int flags, unsigned long *flow_flags)
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
+ if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
*flow_flags = __flow_flags;
}
@@ -3841,7 +3876,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int err;
rcu_read_lock();
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
if (!flow || !same_flow_direction(flow, flags)) {
err = -EINVAL;
goto errout;
@@ -4000,9 +4035,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
- int prio = TC_H_MAJ(ma->common.prio) >> 16;
- if (prio != 1) {
+ if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 924c6ef86a14..262cdb7b69b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -44,7 +44,8 @@ enum {
MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
- MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
};
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 67dc4f0921b6..66951ff975f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -461,8 +461,14 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
+ struct mlx5e_tx_wqe_info *wi;
+ u16 ci;
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
&sq->recover_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 30aae76b6a1d..2c965ad0d744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -111,42 +111,32 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
}
/* E-Switch vport context HW commands */
-static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
- void *in, int inlen)
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
+ void *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
-int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
- void *in, int inlen)
-{
- return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
-}
-
-static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
- void *out, int outlen)
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
+ void *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
MLX5_SET(query_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
- void *out, int outlen)
-{
- return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
-}
-
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
@@ -179,7 +169,8 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
- return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
+ in, sizeof(in));
}
/* E-Switch FDB */
@@ -452,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+ esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_vepa_table(esw);
+}
+
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
@@ -464,15 +462,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw)
if (ret)
return ret;
- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
- return 0;
-}
-
-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
-{
- esw_cleanup_vepa_rules(esw);
- esw_destroy_legacy_fdb_table(esw);
- esw_destroy_legacy_vepa_table(esw);
+ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ if (ret)
+ esw_destroy_legacy_table(esw);
+ return ret;
}
static void esw_legacy_disable(struct mlx5_eswitch *esw)
@@ -501,7 +494,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Skip mlx5_mpfs_add_mac for eswitch_managers,
* it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (esw->manager_vport == vport)
+ if (mlx5_esw_is_manager_vport(esw, vport))
goto fdb_add;
err = mlx5_mpfs_add_mac(esw->dev, mac);
@@ -530,10 +523,10 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u16 vport = vaddr->vport;
int err = 0;
- /* Skip mlx5_mpfs_del_mac for eswitch managerss,
+ /* Skip mlx5_mpfs_del_mac for eswitch managers,
* it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (!vaddr->mpfs || esw->manager_vport == vport)
+ if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
goto fdb_del;
err = mlx5_mpfs_del_mac(esw->dev, mac);
@@ -1040,14 +1033,15 @@ out:
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
mlx5_del_flow_rules(vport->egress.allowed_vlan);
+ vport->egress.allowed_vlan = NULL;
+ }
- if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
- mlx5_del_flow_rules(vport->egress.drop_rule);
-
- vport->egress.allowed_vlan = NULL;
- vport->egress.drop_rule = NULL;
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
+ mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
+ vport->egress.legacy.drop_rule = NULL;
+ }
}
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
@@ -1067,57 +1061,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int
+esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
- /* The ingress acl table contains 4 groups
- * (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
- */
- int table_size = 4;
- int err = 0;
-
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
- return 0;
-
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
-
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- mlx5_eswitch_vport_num_to_index(esw, vport->vport));
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
- return -EOPNOTSUPP;
- }
+ int err;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
- vport->vport, err);
- goto out;
- }
- vport->ingress.acl = acl;
-
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1127,14 +1085,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto spoof_err;
}
- vport->ingress.allow_untagged_spoofchk_grp = g;
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1142,14 +1100,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto untagged_err;
}
- vport->ingress.allow_untagged_only_grp = g;
+ vport->ingress.legacy.allow_untagged_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -1158,108 +1116,178 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto allow_spoof_err;
}
- vport->ingress.allow_spoofchk_only_grp = g;
+ vport->ingress.legacy.allow_spoofchk_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
- g = mlx5_create_flow_group(acl, flow_group_in);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, err);
- goto out;
+ goto drop_err;
}
- vport->ingress.drop_grp = g;
+ vport->ingress.legacy.drop_grp = g;
+ kvfree(flow_group_in);
+ return 0;
-out:
- if (err) {
- if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_spoofchk_only_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_untagged_only_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
- mlx5_destroy_flow_group(
- vport->ingress.allow_untagged_spoofchk_grp);
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
- mlx5_destroy_flow_table(vport->ingress.acl);
+drop_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
-
+allow_spoof_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+untagged_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+spoof_err:
kvfree(flow_group_in);
return err;
}
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, int table_size)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ int vport_index;
+ int err;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport_index);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
+ vport->vport);
+ return -EOPNOTSUPP;
+ }
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+ vport->ingress.acl = acl;
+ return 0;
+}
+
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.acl)
+ return;
+
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+}
+
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
- mlx5_del_flow_rules(vport->ingress.drop_rule);
+ if (vport->ingress.legacy.drop_rule) {
+ mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
+ vport->ingress.legacy.drop_rule = NULL;
+ }
- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ if (vport->ingress.allow_rule) {
mlx5_del_flow_rules(vport->ingress.allow_rule);
-
- vport->ingress.drop_rule = NULL;
- vport->ingress.allow_rule = NULL;
-
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ vport->ingress.allow_rule = NULL;
+ }
}
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- if (IS_ERR_OR_NULL(vport->ingress.acl))
+ if (!vport->ingress.acl)
return;
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_vport_cleanup_ingress_rules(esw, vport);
- mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
- mlx5_destroy_flow_group(vport->ingress.drop_grp);
- mlx5_destroy_flow_table(vport->ingress.acl);
- vport->ingress.acl = NULL;
- vport->ingress.drop_grp = NULL;
- vport->ingress.allow_spoofchk_only_grp = NULL;
- vport->ingress.allow_untagged_only_grp = NULL;
- vport->ingress.allow_untagged_spoofchk_grp = NULL;
+ if (vport->ingress.legacy.allow_spoofchk_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+ if (vport->ingress.legacy.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
+ vport->ingress.legacy.drop_grp = NULL;
+ }
+ esw_vport_destroy_ingress_acl_table(vport);
}
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
+ struct mlx5_flow_spec *spec = NULL;
int dest_num = 0;
int err = 0;
u8 *smac_v;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
- esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
return 0;
}
- err = esw_vport_enable_ingress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable ingress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
+ if (!vport->ingress.acl) {
+ err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] enable ingress acl err (%d)\n",
+ err, vport->vport);
+ return err;
+ }
+
+ err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
+ if (err)
+ goto out;
}
esw_debug(esw->dev,
@@ -1309,21 +1337,59 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
dst = &drop_ctr_dst;
dest_num++;
}
- vport->ingress.drop_rule =
+ vport->ingress.legacy.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, dst, dest_num);
- if (IS_ERR(vport->ingress.drop_rule)) {
- err = PTR_ERR(vport->ingress.drop_rule);
+ if (IS_ERR(vport->ingress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->ingress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
- vport->ingress.drop_rule = NULL;
+ vport->ingress.legacy.drop_rule = NULL;
goto out;
}
+ kvfree(spec);
+ return 0;
out:
- if (err)
- esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ kvfree(spec);
+ return err;
+}
+
+int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u16 vlan_id, u32 flow_action)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ if (vport->egress.allowed_vlan)
+ return -EEXIST;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = flow_action;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ }
+
kvfree(spec);
return err;
}
@@ -1331,7 +1397,7 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
@@ -1358,34 +1424,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ if (err)
+ return err;
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
- esw_warn(esw->dev,
- "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
+ /* Drop others rule (star rule) */
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
goto out;
- }
- /* Drop others rule (star rule) */
- memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
@@ -1396,15 +1445,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
dst = &drop_ctr_dst;
dest_num++;
}
- vport->egress.drop_rule =
+ vport->egress.legacy.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, dst, dest_num);
- if (IS_ERR(vport->egress.drop_rule)) {
- err = PTR_ERR(vport->egress.drop_rule);
+ if (IS_ERR(vport->egress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->egress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
- vport->egress.drop_rule = NULL;
+ vport->egress.legacy.drop_rule = NULL;
}
out:
kvfree(spec);
@@ -1619,7 +1668,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
u16 vport_num = vport->vport;
int flags;
- if (esw->manager_vport == vport_num)
+ if (mlx5_esw_is_manager_vport(esw, vport_num))
return;
mlx5_modify_vport_admin_state(esw->dev,
@@ -1639,66 +1688,112 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
flags);
-
- /* Only legacy mode needs ACLs */
- if (esw->mode == MLX5_ESWITCH_LEGACY) {
- esw_vport_ingress_config(esw, vport);
- esw_vport_egress_config(esw, vport);
- }
}
-static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = vport->dev;
+ int ret;
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
- vport->ingress.drop_counter = mlx5_fc_create(dev, false);
- if (IS_ERR(vport->ingress.drop_counter)) {
- esw_warn(dev,
+ /* Only non manager vports need ACL in legacy mode */
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return 0;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(vport->ingress.legacy.drop_counter)) {
+ esw_warn(esw->dev,
"vport[%d] configure ingress drop rule counter failed\n",
vport->vport);
- vport->ingress.drop_counter = NULL;
+ vport->ingress.legacy.drop_counter = NULL;
}
}
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
- vport->egress.drop_counter = mlx5_fc_create(dev, false);
- if (IS_ERR(vport->egress.drop_counter)) {
- esw_warn(dev,
+ ret = esw_vport_ingress_config(esw, vport);
+ if (ret)
+ goto ingress_err;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(vport->egress.legacy.drop_counter)) {
+ esw_warn(esw->dev,
"vport[%d] configure egress drop rule counter failed\n",
vport->vport);
- vport->egress.drop_counter = NULL;
+ vport->egress.legacy.drop_counter = NULL;
}
}
+
+ ret = esw_vport_egress_config(esw, vport);
+ if (ret)
+ goto egress_err;
+
+ return 0;
+
+egress_err:
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+
+ingress_err:
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
+ return ret;
}
-static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = vport->dev;
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ return esw_vport_create_legacy_acl_tables(esw, vport);
+ else
+ return esw_vport_create_offloads_acl_tables(esw, vport);
+}
- if (vport->ingress.drop_counter)
- mlx5_fc_destroy(dev, vport->ingress.drop_counter);
- if (vport->egress.drop_counter)
- mlx5_fc_destroy(dev, vport->egress.drop_counter);
+static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+
+{
+ if (mlx5_esw_is_manager_vport(esw, vport->vport))
+ return;
+
+ esw_vport_disable_egress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
}
-static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- enum mlx5_eswitch_vport_event enabled_events)
+static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_vport_destroy_legacy_acl_tables(esw, vport);
+ else
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
+}
+
+static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
+ int ret;
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
- esw_vport_create_drop_counters(vport);
-
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
+ ret = esw_vport_setup_acl(esw, vport);
+ if (ret)
+ goto done;
+
/* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
@@ -1711,7 +1806,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
*/
- if (esw->manager_vport == vport_num ||
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
@@ -1719,7 +1814,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
+done:
mutex_unlock(&esw->state_lock);
+ return ret;
}
static void esw_disable_vport(struct mlx5_eswitch *esw,
@@ -1727,18 +1824,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
{
u16 vport_num = vport->vport;
+ mutex_lock(&esw->state_lock);
if (!vport->enabled)
- return;
+ goto done;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
vport->enabled = false;
- /* Wait for current already scheduled events to complete */
- flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- mutex_lock(&esw->state_lock);
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -1746,17 +1841,18 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
- if (esw->manager_vport != vport_num &&
- esw->mode == MLX5_ESWITCH_LEGACY) {
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- esw_vport_destroy_drop_counters(vport);
- }
+
+ esw_vport_cleanup_acl(esw, vport);
esw->enabled_vports--;
+
+done:
mutex_unlock(&esw->state_lock);
}
@@ -1770,12 +1866,8 @@ static int eswitch_vport_event(struct notifier_block *nb,
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return NOTIFY_OK;
-
- if (vport->enabled)
+ if (!IS_ERR(vport))
queue_work(esw->work_queue, &vport->vport_change_handler);
-
return NOTIFY_OK;
}
@@ -1831,32 +1923,66 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
flush_workqueue(esw->work_queue);
}
+static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ memset(&vport->info, 0, sizeof(vport->info));
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
-void
+int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int num_vfs;
+ int ret;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ return ret;
- /* Enable ECPF vports */
+ /* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ goto ecpf_err;
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
+ ret = esw_enable_vport(esw, vport, enabled_events);
+ if (ret)
+ goto vf_err;
+ }
+ return 0;
+
+vf_err:
+ num_vfs = i - 1;
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
+ esw_disable_vport(esw, vport);
+
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_disable_vport(esw, vport);
+ }
+
+ecpf_err:
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_disable_vport(esw, vport);
+ return ret;
}
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
@@ -1923,7 +2049,7 @@ abort:
return err;
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{
int old_mode;
@@ -1952,6 +2078,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
@@ -2117,7 +2245,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock:
mutex_unlock(&esw->state_lock);
- return 0;
+ return err;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
@@ -2474,12 +2602,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
- if (vport->egress.drop_counter)
- mlx5_fc_query(dev, vport->egress.drop_counter,
+ if (vport->egress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
- if (vport->ingress.drop_counter)
- mlx5_fc_query(dev, vport->ingress.drop_counter,
+ if (vport->ingress.legacy.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
&stats->tx_dropped, &bytes);
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6bd6f5895244..962888a7c3c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -43,6 +43,16 @@
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#define FDB_TC_MAX_CHAIN 3
+#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
+#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
+
+/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
+#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
+
+#define FDB_TC_MAX_PRIO 16
+#define FDB_TC_LEVELS_PER_PRIO 2
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -59,21 +69,22 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
-#define FDB_MAX_CHAIN 3
-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
-#define FDB_MAX_PRIO 16
-
struct vport_ingress {
struct mlx5_flow_table *acl;
- struct mlx5_flow_group *allow_untagged_spoofchk_grp;
- struct mlx5_flow_group *allow_spoofchk_only_grp;
- struct mlx5_flow_group *allow_untagged_only_grp;
- struct mlx5_flow_group *drop_grp;
- struct mlx5_modify_hdr *modify_metadata;
- struct mlx5_flow_handle *modify_metadata_rule;
- struct mlx5_flow_handle *allow_rule;
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
+ struct mlx5_flow_handle *allow_rule;
+ struct {
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
+ struct {
+ struct mlx5_flow_group *metadata_grp;
+ struct mlx5_modify_hdr *modify_metadata;
+ struct mlx5_flow_handle *modify_metadata_rule;
+ } offloads;
};
struct vport_egress {
@@ -81,8 +92,10 @@ struct vport_egress {
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
+ struct {
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
};
struct mlx5_vport_drop_stats {
@@ -139,7 +152,6 @@ enum offloads_fdb_flags {
extern const unsigned int ESW_POOLS[4];
-#define PRIO_LEVELS 2
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -166,7 +178,7 @@ struct mlx5_eswitch_fdb {
struct {
struct mlx5_flow_table *fdb;
u32 num_rules;
- } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
+ } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
/* Protects fdb_prio table */
struct mutex fdb_prio_lock;
@@ -217,8 +229,8 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
- /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
+ /* legacy data structures */
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
@@ -251,18 +263,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ int table_size);
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);
@@ -270,7 +280,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -292,9 +302,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
-int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
void *in, int inlen);
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
+ bool other_vport,
void *out, int outlen);
struct mlx5_flow_spec;
@@ -421,6 +433,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
+int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u16 vlan_id, u32 flow_action);
+
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
@@ -459,6 +475,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
}
+static inline bool
+mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return esw->manager_vport == vport_num;
+}
+
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev) ?
@@ -593,17 +615,24 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
-void
+int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+int
+esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void
+esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
@@ -613,10 +642,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
-#define FDB_MAX_CHAIN 1
-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
-#define FDB_MAX_PRIO 1
-
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9004a07e457a..8ba59a21a163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_MAX_CHAIN;
+ return FDB_TC_MAX_CHAIN;
return 0;
}
@@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_MAX_PRIO;
+ return FDB_TC_MAX_PRIO;
return 1;
}
@@ -599,7 +599,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
+ err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
out, sizeof(out));
if (err)
return err;
@@ -618,7 +618,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
+ return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
in, sizeof(in));
}
@@ -927,7 +927,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
int table_prio, l = 0;
u32 flags = 0;
- if (chain == FDB_SLOW_PATH_CHAIN)
+ if (chain == FDB_TC_SLOW_PATH_CHAIN)
return esw->fdb_table.offloads.slow_fdb;
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
+ table_prio = prio - 1;
/* create earlier levels for correct fs_core lookup when
* connecting tables
@@ -989,7 +989,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
int l;
- if (chain == FDB_SLOW_PATH_CHAIN)
+ if (chain == FDB_TC_SLOW_PATH_CHAIN)
return;
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -1369,7 +1369,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw);
+ mlx5_eswitch_disable(esw, false);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -1777,9 +1777,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
- if (vport->ingress.modify_metadata_rule) {
+ if (vport->ingress.offloads.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_hdr = vport->ingress.modify_metadata;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
}
vport->ingress.allow_rule =
@@ -1815,11 +1815,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
MLX5_SET(set_action_in, action, data,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
- vport->ingress.modify_metadata =
+ vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1, action);
- if (IS_ERR(vport->ingress.modify_metadata)) {
- err = PTR_ERR(vport->ingress.modify_metadata);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
@@ -1827,100 +1827,76 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_hdr = vport->ingress.modify_metadata;
- vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
- &spec, &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.modify_metadata_rule)) {
- err = PTR_ERR(vport->ingress.modify_metadata_rule);
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ vport->ingress.offloads.modify_metadata_rule =
+ mlx5_add_flow_rules(vport->ingress.acl,
+ &spec, &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err);
- vport->ingress.modify_metadata_rule = NULL;
+ vport->ingress.offloads.modify_metadata_rule = NULL;
goto out;
}
out:
if (err)
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
return err;
}
-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- if (vport->ingress.modify_metadata_rule) {
- mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
+ if (vport->ingress.offloads.modify_metadata_rule) {
+ mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
- vport->ingress.modify_metadata_rule = NULL;
+ vport->ingress.offloads.modify_metadata_rule = NULL;
}
}
-static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
- return 0;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) prio tag packets - pop the prio tag VLAN, allow
- * Unmatched traffic is allowed by default
- */
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable egress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure prio tag egress rules\n", vport->vport);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int ret = 0;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out_no_mem;
- }
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
- /* prio tag vlan rule - pop it so VF receives untagged packets */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
esw_warn(esw->dev,
- "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
- goto out;
+ "Failed to create vport[%d] ingress metadata group, err(%d)\n",
+ vport->vport, ret);
+ goto grp_err;
}
+ vport->ingress.offloads.metadata_grp = g;
+grp_err:
+ kvfree(flow_group_in);
+ return ret;
+}
-out:
- kvfree(spec);
-out_no_mem:
- if (err)
- esw_vport_cleanup_egress_rules(esw, vport);
- return err;
+static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
+{
+ if (vport->ingress.offloads.metadata_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
+ vport->ingress.offloads.metadata_grp = NULL;
+ }
}
-static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int err;
@@ -1929,8 +1905,7 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
return 0;
esw_vport_cleanup_ingress_rules(esw, vport);
-
- err = esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_create_ingress_acl_table(esw, vport, 1);
if (err) {
esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n",
@@ -1938,25 +1913,65 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
return err;
}
+ err = esw_vport_create_ingress_acl_group(esw, vport);
+ if (err)
+ goto group_err;
+
esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
if (err)
- goto out;
+ goto metadata_err;
}
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err)
- goto out;
+ goto prio_tag_err;
}
+ return 0;
-out:
+prio_tag_err:
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+metadata_err:
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_group(vport);
+group_err:
+ esw_vport_destroy_ingress_acl_table(vport);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err)
+ return err;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ /* prio tag vlan rule - pop it so VF receives untagged packets */
+ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
if (err)
- esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_disable_egress_acl(esw, vport);
+
return err;
}
@@ -1980,54 +1995,59 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
+int
+esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
- int i, j;
int err;
- if (esw_check_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- err = esw_vport_ingress_common_config(esw, vport);
- if (err)
- goto err_ingress;
+ err = esw_vport_ingress_config(esw, vport);
+ if (err)
+ return err;
- if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_vport_egress_prio_tag_config(esw, vport);
- if (err)
- goto err_egress;
+ if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
+ err = esw_vport_egress_config(esw, vport);
+ if (err) {
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_table(vport);
}
}
+ return err;
+}
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
+void
+esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ esw_vport_destroy_ingress_acl_group(vport);
+ esw_vport_destroy_ingress_acl_table(vport);
+}
- return 0;
+static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
-err_egress:
- esw_vport_disable_ingress_acl(esw, vport);
-err_ingress:
- for (j = MLX5_VPORT_PF; j < i; j++) {
- vport = &esw->vports[j];
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- }
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ err = esw_vport_create_offloads_acl_tables(esw, vport);
+ if (err)
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
return err;
}
-static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
+static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int i;
-
- mlx5_esw_for_all_vports(esw, i, vport) {
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_disable_ingress_acl(esw, vport);
- }
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
@@ -2045,7 +2065,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
- err = esw_create_offloads_acl_tables(esw);
+ err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
return err;
@@ -2070,7 +2090,7 @@ create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
- esw_destroy_offloads_acl_tables(esw);
+ esw_destroy_uplink_offloads_acl_tables(esw);
return err;
}
@@ -2080,7 +2100,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
- esw_destroy_offloads_acl_tables(esw);
+ esw_destroy_uplink_offloads_acl_tables(esw);
}
static void
@@ -2169,7 +2189,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ goto err_vports;
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2182,6 +2204,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
+err_vports:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
@@ -2195,7 +2218,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw);
+ mlx5_eswitch_disable(esw, false);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index eb8b0fe0b4e1..11621d265d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -35,11 +35,11 @@
#include <linux/mlx5/driver.h>
-enum mlx5_fpga_device_id {
- MLX5_FPGA_DEVICE_UNKNOWN = 0,
- MLX5_FPGA_DEVICE_KU040 = 1,
- MLX5_FPGA_DEVICE_KU060 = 2,
- MLX5_FPGA_DEVICE_KU060_2 = 3,
+enum mlx5_fpga_id {
+ MLX5_FPGA_NEWTON = 0,
+ MLX5_FPGA_EDISON = 1,
+ MLX5_FPGA_MORSE = 2,
+ MLX5_FPGA_MORSEQ = 3,
};
enum mlx5_fpga_image {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index d046d1ec2a86..2ce4241459ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -81,19 +81,28 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
}
}
-static const char *mlx5_fpga_device_name(u32 device)
+static const char *mlx5_fpga_name(u32 fpga_id)
{
- switch (device) {
- case MLX5_FPGA_DEVICE_KU040:
- return "ku040";
- case MLX5_FPGA_DEVICE_KU060:
- return "ku060";
- case MLX5_FPGA_DEVICE_KU060_2:
- return "ku060_2";
- case MLX5_FPGA_DEVICE_UNKNOWN:
- default:
- return "unknown";
+ static char ret[32];
+
+ switch (fpga_id) {
+ case MLX5_FPGA_NEWTON:
+ return "Newton";
+ case MLX5_FPGA_EDISON:
+ return "Edison";
+ case MLX5_FPGA_MORSE:
+ return "Morse";
+ case MLX5_FPGA_MORSEQ:
+ return "MorseQ";
}
+
+ snprintf(ret, sizeof(ret), "Unknown %d", fpga_id);
+ return ret;
+}
+
+static int mlx5_is_fpga_lookaside(u32 fpga_id)
+{
+ return fpga_id != MLX5_FPGA_NEWTON && fpga_id != MLX5_FPGA_EDISON;
}
static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
@@ -110,8 +119,12 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
fdev->last_admin_image = query.admin_image;
fdev->last_oper_image = query.oper_image;
- mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n",
- query.status, query.admin_image, query.oper_image);
+ mlx5_fpga_info(fdev, "Status %u; Admin image %u; Oper image %u\n",
+ query.status, query.admin_image, query.oper_image);
+
+ /* for FPGA lookaside projects FPGA load status is not important */
+ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
+ return 0;
if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
@@ -167,25 +180,30 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned int max_num_qps;
unsigned long flags;
- u32 fpga_device_id;
+ u32 fpga_id;
int err;
if (!fdev)
return 0;
- err = mlx5_fpga_device_load_check(fdev);
+ err = mlx5_fpga_caps(fdev->mdev);
if (err)
goto out;
- err = mlx5_fpga_caps(fdev->mdev);
+ err = mlx5_fpga_device_load_check(fdev);
if (err)
goto out;
- fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device);
- mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n",
- mlx5_fpga_device_name(fpga_device_id),
- fpga_device_id,
+ fpga_id = MLX5_CAP_FPGA(fdev->mdev, fpga_id);
+ mlx5_fpga_info(fdev, "FPGA card %s:%u\n", mlx5_fpga_name(fpga_id), fpga_id);
+
+ /* No QPs if FPGA does not participate in net processing */
+ if (mlx5_is_fpga_lookaside(fpga_id))
+ goto out;
+
+ mlx5_fpga_info(fdev, "%s(%d): image, version %u; SBU %06x:%04x version %d\n",
mlx5_fpga_image_name(fdev->last_oper_image),
+ fdev->last_oper_image,
MLX5_CAP_FPGA(fdev->mdev, image_version),
MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id),
MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id),
@@ -264,6 +282,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
if (!fdev)
return;
+ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
+ return;
+
spin_lock_irqsave(&fdev->state_lock, flags);
if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
spin_unlock_irqrestore(&fdev->state_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3bbb49354829..d60577484567 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -531,9 +531,16 @@ static void del_hw_fte(struct fs_node *node)
}
}
+static void del_sw_fte_rcu(struct rcu_head *head)
+{
+ struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
+ struct mlx5_flow_steering *steering = get_steering(&fte->node);
+
+ kmem_cache_free(steering->ftes_cache, fte);
+}
+
static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
struct fs_fte *fte;
int err;
@@ -546,7 +553,8 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
- kmem_cache_free(steering->ftes_cache, fte);
+
+ call_rcu(&fte->rcu, del_sw_fte_rcu);
}
static void del_hw_flow_group(struct fs_node *node)
@@ -579,7 +587,7 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
- if (ft->autogroup.active)
+ if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
@@ -1126,6 +1134,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
+ /* We save place for flow groups in addition to max types */
+ ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
return ft;
}
@@ -1328,8 +1338,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
return ERR_PTR(-ENOENT);
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
- /* We save place for flow groups in addition to max types */
- group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
+ group_size = ft->autogroup.group_size;
/* ft->max_fte == ft->autogroup.max_types */
if (group_size == 0)
@@ -1356,7 +1365,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (IS_ERR(fg))
goto out;
- ft->autogroup.num_groups++;
+ if (group_size == ft->autogroup.group_size)
+ ft->autogroup.num_groups++;
out:
return fg;
@@ -1623,22 +1633,47 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
}
static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g,
- const u32 *match_value,
- bool take_write)
+lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
{
struct fs_fte *fte_tmp;
- if (take_write)
- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
- else
- nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
- rhash_fte);
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
fte_tmp = NULL;
goto out;
}
+
+ if (!fte_tmp->node.active) {
+ tree_put_node(&fte_tmp->node, false);
+ fte_tmp = NULL;
+ goto out;
+ }
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
+out:
+ up_write_ref_node(&g->node, false);
+ return fte_tmp;
+}
+
+static struct fs_fte *
+lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
+{
+ struct fs_fte *fte_tmp;
+
+ if (!tree_get_node(&g->node))
+ return NULL;
+
+ rcu_read_lock();
+ fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ rcu_read_unlock();
+ fte_tmp = NULL;
+ goto out;
+ }
+ rcu_read_unlock();
+
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
@@ -1646,14 +1681,21 @@ lookup_fte_locked(struct mlx5_flow_group *g,
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
out:
- if (take_write)
- up_write_ref_node(&g->node, false);
- else
- up_read_ref_node(&g->node);
+ tree_put_node(&g->node, false);
return fte_tmp;
}
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
+{
+ if (write)
+ return lookup_fte_for_write_locked(g, match_value);
+ else
+ return lookup_fte_for_read_locked(g, match_value);
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1814,6 +1856,13 @@ search_again_locked:
return rule;
}
+ fte = alloc_fte(ft, spec, flow_act);
+ if (IS_ERR(fte)) {
+ up_write_ref_node(&ft->node, false);
+ err = PTR_ERR(fte);
+ goto err_alloc_fte;
+ }
+
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
up_write_ref_node(&ft->node, false);
@@ -1821,17 +1870,9 @@ search_again_locked:
if (err)
goto err_release_fg;
- fte = alloc_fte(ft, spec, flow_act);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- goto err_release_fg;
- }
-
err = insert_fte(g, fte);
- if (err) {
- kmem_cache_free(steering->ftes_cache, fte);
+ if (err)
goto err_release_fg;
- }
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node, false);
@@ -1843,6 +1884,8 @@ search_again_locked:
err_release_fg:
up_write_ref_node(&g->node, false);
+ kmem_cache_free(steering->ftes_cache, fte);
+err_alloc_fte:
tree_put_node(&g->node, false);
return ERR_PTR(err);
}
@@ -2359,9 +2402,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
int acc_level_ns = acc_level;
prio->start_level = acc_level;
- fs_for_each_ns(ns, prio)
+ fs_for_each_ns(ns, prio) {
/* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
+
+ /* If this a prio with chains, and we can jump from one chain
+ * (namepsace) to another, so we accumulate the levels
+ */
+ if (prio->node.type == FS_TYPE_PRIO_CHAINS)
+ acc_level = acc_level_ns;
+ }
+
if (!prio->num_levels)
prio->num_levels = acc_level_ns - prio->start_level;
WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
@@ -2550,58 +2601,109 @@ out_err:
steering->rdma_rx_root_ns = NULL;
return err;
}
-static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+
+/* FT and tc chains are stored in the same array so we can re-use the
+ * mlx5_get_fdb_sub_ns() and tc api for FT chains.
+ * When creating a new ns for each chain store it in the first available slot.
+ * Assume tc chains are created and stored first and only then the FT chain.
+ */
+static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_namespace *ns)
+{
+ int chain = 0;
+
+ while (steering->fdb_sub_ns[chain])
+ ++chain;
+
+ steering->fdb_sub_ns[chain] = ns;
+}
+
+static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct fs_prio *maj_prio)
{
struct mlx5_flow_namespace *ns;
- struct fs_prio *maj_prio;
struct fs_prio *min_prio;
+ int prio;
+
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
+ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
+ if (IS_ERR(min_prio))
+ return PTR_ERR(min_prio);
+ }
+
+ store_fdb_sub_ns_prio_chain(steering, ns);
+
+ return 0;
+}
+
+static int create_fdb_chains(struct mlx5_flow_steering *steering,
+ int fs_prio,
+ int chains)
+{
+ struct fs_prio *maj_prio;
int levels;
int chain;
- int prio;
int err;
- steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
- if (!steering->fdb_root_ns)
- return -ENOMEM;
+ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
+ fs_prio,
+ levels);
+ if (IS_ERR(maj_prio))
+ return PTR_ERR(maj_prio);
+
+ for (chain = 0; chain < chains; chain++) {
+ err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
+{
+ int err;
- steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
- (FDB_MAX_CHAIN + 1), GFP_KERNEL);
+ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
+ sizeof(*steering->fdb_sub_ns),
+ GFP_KERNEL);
if (!steering->fdb_sub_ns)
return -ENOMEM;
+ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
+ if (err)
+ return err;
+
+ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *maj_prio;
+ int err;
+
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
+ return -ENOMEM;
+
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
-
- levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
- maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
- FDB_FAST_PATH,
- levels);
- if (IS_ERR(maj_prio)) {
- err = PTR_ERR(maj_prio);
+ err = create_fdb_fast_path(steering);
+ if (err)
goto out_err;
- }
-
- for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
- ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
- if (IS_ERR(ns)) {
- err = PTR_ERR(ns);
- goto out_err;
- }
-
- for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
- min_prio = fs_create_prio(ns, prio, 2);
- if (IS_ERR(min_prio)) {
- err = PTR_ERR(min_prio);
- goto out_err;
- }
- }
-
- steering->fdb_sub_ns[chain] = ns;
- }
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 00717eba2256..e8cd997f413e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -162,6 +162,7 @@ struct mlx5_flow_table {
struct {
bool active;
unsigned int required_groups;
+ unsigned int group_size;
unsigned int num_groups;
} autogroup;
/* Protect fwd_rules */
@@ -202,6 +203,7 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
+ struct rcu_head rcu;
int modify_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index c07f3154437c..d9f4e8c59c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -390,7 +390,8 @@ static void print_health_info(struct mlx5_core_dev *dev)
static int
mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
struct mlx5_core_health *health = &dev->priv.health;
@@ -491,7 +492,8 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
static int
mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx)
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
int err;
@@ -545,23 +547,22 @@ static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
return mlx5_health_try_recover(dev);
}
-#define MLX5_CR_DUMP_CHUNK_SIZE 256
static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx)
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
u32 crdump_size = dev->priv.health.crdump_size;
u32 *cr_data;
- u32 data_size;
- u32 offset;
int err;
if (!mlx5_core_is_pf(dev))
@@ -582,20 +583,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
goto free_data;
}
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data");
- if (err)
- goto free_data;
- for (offset = 0; offset < crdump_size; offset += data_size) {
- if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE)
- data_size = crdump_size - offset;
- else
- data_size = MLX5_CR_DUMP_CHUNK_SIZE;
- err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
- data_size);
- if (err)
- goto free_data;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
free_data:
kvfree(cr_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index c5ef2ff26465..fc0d9583475d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -145,34 +145,35 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
{
*port1 = 1;
*port2 = 2;
- if (!tracker->netdev_state[0].tx_enabled ||
- !tracker->netdev_state[0].link_up) {
+ if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
+ !tracker->netdev_state[MLX5_LAG_P1].link_up) {
*port1 = 2;
return;
}
- if (!tracker->netdev_state[1].tx_enabled ||
- !tracker->netdev_state[1].link_up)
+ if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
+ !tracker->netdev_state[MLX5_LAG_P2].link_up)
*port2 = 1;
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u8 v2p_port1, v2p_port2;
int err;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2);
- if (v2p_port1 != ldev->v2p_map[0] ||
- v2p_port2 != ldev->v2p_map[1]) {
- ldev->v2p_map[0] = v2p_port1;
- ldev->v2p_map[1] = v2p_port2;
+ if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
+ v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
+ ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
- ldev->v2p_map[0], ldev->v2p_map[1]);
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
@@ -185,16 +186,17 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
- &ldev->v2p_map[1]);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
- ldev->v2p_map[0], ldev->v2p_map[1]);
+ ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
if (err)
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -207,7 +209,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
u8 flags)
{
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
err = mlx5_create_lag(ldev, tracker);
@@ -229,7 +231,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
@@ -252,14 +254,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+ return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
+ ldev->pf[MLX5_LAG_P2].dev);
#else
- return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) &&
- !mlx5_sriov_is_enabled(ldev->pf[1].dev));
+ return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
+ !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
#endif
}
@@ -285,8 +288,8 @@ static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker;
bool do_bond, roce_lag;
int err;
@@ -692,10 +695,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- ndev = ldev->tracker.netdev_state[0].tx_enabled ?
- ldev->pf[0].netdev : ldev->pf[1].netdev;
+ ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
+ ldev->pf[MLX5_LAG_P1].netdev :
+ ldev->pf[MLX5_LAG_P2].netdev;
} else {
- ndev = ldev->pf[0].netdev;
+ ndev = ldev->pf[MLX5_LAG_P1].netdev;
}
if (ndev)
dev_hold(ndev);
@@ -717,7 +721,8 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true;
ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
+ if (!ldev || !__mlx5_lag_is_roce(ldev) ||
+ ldev->pf[MLX5_LAG_P1].dev == dev)
return true;
/* If bonded, we do not add an IB device for PF1. */
@@ -746,11 +751,11 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
- mdev[0] = ldev->pf[0].dev;
- mdev[1] = ldev->pf[1].dev;
+ mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
+ mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
} else {
num_ports = 1;
- mdev[0] = dev;
+ mdev[MLX5_LAG_P1] = dev;
}
for (i = 0; i < num_ports; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 1dea0b1c9826..f1068aac6406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -8,6 +8,11 @@
#include "lag_mp.h"
enum {
+ MLX5_LAG_P1,
+ MLX5_LAG_P2,
+};
+
+enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 5d20d615663e..b70afa310ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,10 +11,11 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
+ ldev->pf[MLX5_LAG_P2].dev);
}
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
@@ -43,7 +44,8 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
* 2 - set affinity to port 2.
*
**/
-static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
+static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
+ enum mlx5_lag_port_affinity port)
{
struct lag_tracker tracker;
@@ -51,37 +53,37 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
return;
switch (port) {
- case 0:
- tracker.netdev_state[0].tx_enabled = true;
- tracker.netdev_state[1].tx_enabled = true;
- tracker.netdev_state[0].link_up = true;
- tracker.netdev_state[1].link_up = true;
+ case MLX5_LAG_NORMAL_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = true;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break;
- case 1:
- tracker.netdev_state[0].tx_enabled = true;
- tracker.netdev_state[0].link_up = true;
- tracker.netdev_state[1].tx_enabled = false;
- tracker.netdev_state[1].link_up = false;
+ case MLX5_LAG_P1_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = true;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = false;
break;
- case 2:
- tracker.netdev_state[0].tx_enabled = false;
- tracker.netdev_state[0].link_up = false;
- tracker.netdev_state[1].tx_enabled = true;
- tracker.netdev_state[1].link_up = true;
+ case MLX5_LAG_P2_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = false;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d",
- port);
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[0].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events,
+ if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events,
+ if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -141,11 +143,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Verify next hops are ports of the same hca */
fib_nh0 = fib_info_nh(fi, 0);
fib_nh1 = fib_info_nh(fi, 1);
- if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[1].netdev) &&
- !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) {
- mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
+ if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
+ !(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -157,7 +160,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH);
}
- mlx5_lag_set_port_affinity(ldev, 0);
+ mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
mp->mfi = fi;
}
@@ -182,7 +185,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
- mlx5_lag_set_port_affinity(ldev, 0);
+ mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
}
}
@@ -248,9 +251,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct net_device *fib_dev;
struct fib_info *fi;
- if (!net_eq(info->net, &init_net))
- return NOTIFY_DONE;
-
if (info->family != AF_INET)
return NOTIFY_DONE;
@@ -270,8 +270,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return notifier_from_errno(-EINVAL);
}
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev != ldev->pf[0].netdev &&
- fib_dev != ldev->pf[1].netdev) {
+ if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
+ fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
return NOTIFY_DONE;
}
fib_work = mlx5_lag_init_fib_work(ldev, event);
@@ -311,8 +311,8 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
return 0;
mp->fib_nb.notifier_call = mlx5_lag_fib_event;
- err = register_fib_notifier(&mp->fib_nb,
- mlx5_lag_fib_event_flush);
+ err = register_fib_notifier(&init_net, &mp->fib_nb,
+ mlx5_lag_fib_event_flush, NULL);
if (err)
mp->fib_nb.notifier_call = NULL;
@@ -326,6 +326,6 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
if (!mp->fib_nb.notifier_call)
return;
- unregister_fib_notifier(&mp->fib_nb);
+ unregister_fib_notifier(&init_net, &mp->fib_nb);
mp->fib_nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
index 6d14b1100be9..79be89e9c7a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -7,6 +7,12 @@
#include "lag.h"
#include "mlx5_core.h"
+enum mlx5_lag_port_affinity {
+ MLX5_LAG_NORMAL_AFFINITY,
+ MLX5_LAG_P1_AFFINITY,
+ MLX5_LAG_P2_AFFINITY,
+};
+
struct lag_mp {
struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0059b290e095..43f97601b500 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index b99d469e4e64..249539247e2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -84,4 +84,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e47dd7c1b909..173e2c12e1c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -837,8 +837,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_qp_table(dev);
- mlx5_init_mkey_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +894,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
@@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
@@ -1168,7 +1164,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_put_uars_page(dev, dev->priv.uar);
}
-static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
{
int err = 0;
@@ -1226,10 +1222,8 @@ function_teardown:
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
- int err = 0;
-
if (cleanup) {
mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
@@ -1257,7 +1251,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
- return err;
+ return 0;
}
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
@@ -1566,6 +1560,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
+ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b100489dc85c..da67b28d6e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -243,4 +243,7 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+
+int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
+int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index c501bf2a0252..42cc3c7ac5b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,16 +36,6 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
-{
- xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
-}
-
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
-{
- WARN_ON(!xa_empty(&dev->priv.mkey_table));
-}
-
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
@@ -54,7 +44,6 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_async_work *context)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
u32 mkey_index;
void *mkc;
int err;
@@ -84,16 +73,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
mkey_index, key, mkey->key);
-
- err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
- GFP_KERNEL));
- if (err) {
- mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
- mlx5_base_mkey(mkey->key), err);
- mlx5_core_destroy_mkey(dev, mkey);
- }
-
- return err;
+ return 0;
}
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
@@ -111,12 +91,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
{
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
- unsigned long flags;
-
- xa_lock_irqsave(mkeys, flags);
- __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
- xa_unlock_irqrestore(mkeys, flags);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 61fcfd8b39b4..03f037811f1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -108,10 +108,10 @@ enable_vfs_hca:
return 0;
}
-static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
+static void
+mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int num_vfs = pci_num_vf(dev->pdev);
int err;
int vf;
@@ -127,7 +127,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch);
+ mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, num_vfs, true);
}
return err;
}
@@ -155,9 +155,10 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
static void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, num_vfs, true);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -192,7 +193,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return;
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
}
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
deleted file mode 100644
index 9e2eccbb1eb8..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved.
- * Slicing-by-16 contributed by Bulat Ziganshin
- *
- * This software is provided 'as-is', without any express or implied warranty.
- * In no event will the author be held liable for any damages arising from the
- * of this software.
- *
- * Permission is granted to anyone to use this software for any purpose,
- * including commercial applications, and to alter it and redistribute it
- * freely, subject to the following restrictions:
- *
- * 1. The origin of this software must not be misrepresented; you must not
- * claim that you wrote the original software.
- * 2. If you use this software in a product, an acknowledgment in the product
- * documentation would be appreciated but is not required.
- * 3. Altered source versions must be plainly marked as such, and must not be
- * misrepresented as being the original software.
- *
- * Taken from http://create.stephan-brumme.com/crc32/ and adapted.
- */
-
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-
-static u32 dr_ste_crc_tab32[8][256];
-
-static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j)
-{
- tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff];
-}
-
-void mlx5dr_crc32_init_table(void)
-{
- u32 crc, i, j;
-
- for (i = 0; i < 256; i++) {
- crc = i;
- for (j = 0; j < 8; j++) {
- if (crc & 0x00000001L)
- crc = (crc >> 1) ^ DR_STE_CRC_POLY;
- else
- crc = crc >> 1;
- }
- dr_ste_crc_tab32[0][i] = crc;
- }
-
- /* Init CRC lookup tables according to crc_slice_8 algorithm */
- for (i = 0; i < 256; i++) {
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i);
- }
-}
-
-/* Compute CRC32 (Slicing-by-8 algorithm) */
-u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length)
-{
- const u32 *curr = (const u32 *)input_data;
- const u8 *curr_char;
- u32 crc = 0, one, two;
-
- if (!input_data)
- return 0;
-
- /* Process eight bytes at once (Slicing-by-8) */
- while (length >= 8) {
- one = *curr++ ^ crc;
- two = *curr++;
-
- crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff]
- ^ dr_ste_crc_tab32[1][(two >> 16) & 0xff]
- ^ dr_ste_crc_tab32[2][(two >> 8) & 0xff]
- ^ dr_ste_crc_tab32[3][two & 0xff]
- ^ dr_ste_crc_tab32[4][(one >> 24) & 0xff]
- ^ dr_ste_crc_tab32[5][(one >> 16) & 0xff]
- ^ dr_ste_crc_tab32[6][(one >> 8) & 0xff]
- ^ dr_ste_crc_tab32[7][one & 0xff];
-
- length -= 8;
- }
-
- curr_char = (const u8 *)curr;
- /* Remaining 1 to 7 bytes (standard algorithm) */
- while (length-- != 0)
- crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff)
- ^ *curr_char++];
-
- return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
- ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5b24732b18c0..a9da961d4d2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -326,9 +326,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_resourses;
}
- /* Init CRC table for htbl CRC calculation */
- mlx5dr_crc32_init_table();
-
return dmn;
uninit_resourses:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 67dea7698fc9..c6dbd856df94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -102,13 +102,52 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
-static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3)
+static bool
+dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->outer_vxlan_gpe_vni ||
misc3->outer_vxlan_gpe_next_protocol ||
misc3->outer_vxlan_gpe_flags);
}
+static bool
+dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols &
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+}
+
+static bool
+dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) &&
+ dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps);
+}
+
+static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
+{
+ return misc->geneve_vni ||
+ misc->geneve_oam ||
+ misc->geneve_protocol_type ||
+ misc->geneve_opt_len;
+}
+
+static bool
+dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols &
+ MLX5_FLEX_PARSER_GENEVE_ENABLED;
+}
+
+static bool
+dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_mask_is_misc_geneve_set(&mask->misc) &&
+ dr_matcher_supp_flex_parser_geneve(&dmn->info.caps);
+}
+
static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->icmpv6_type || misc3->icmpv6_code ||
@@ -137,24 +176,15 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
return (misc->source_sqn || misc->source_port);
}
-static bool
-dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
-{
- return dmn->info.caps.flex_protocols &
- MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
-}
-
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6)
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv)
{
- if (ipv6) {
- nic_matcher->ste_builder = nic_matcher->ste_builder6;
- nic_matcher->num_of_builders = nic_matcher->num_of_builders6;
- } else {
- nic_matcher->ste_builder = nic_matcher->ste_builder4;
- nic_matcher->num_of_builders = nic_matcher->num_of_builders4;
- }
+ nic_matcher->ste_builder =
+ nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
+ nic_matcher->num_of_builders =
+ nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
if (!nic_matcher->num_of_builders) {
mlx5dr_dbg(matcher->tbl->dmn,
@@ -167,26 +197,19 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6)
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {};
struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb;
- u8 *num_of_builders;
bool inner, rx;
int idx = 0;
int ret, i;
- if (ipv6) {
- sb = nic_matcher->ste_builder6;
- num_of_builders = &nic_matcher->num_of_builders6;
- } else {
- sb = nic_matcher->ste_builder4;
- num_of_builders = &nic_matcher->num_of_builders4;
- }
-
+ sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
/* Create a temporary mask to track and clear used mask fields */
@@ -249,7 +272,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
- if (ipv6) {
+ if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
@@ -271,10 +294,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
}
- if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) &&
- dr_matcher_supp_flex_parser_vxlan_gpe(dmn))
- mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask,
- inner, rx);
+ if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn))
+ mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++],
+ &mask,
+ inner, rx);
+ else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn))
+ mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++],
+ &mask,
+ inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
@@ -325,7 +352,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
- if (ipv6) {
+ if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
@@ -373,7 +400,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
}
}
- *num_of_builders = idx;
+ nic_matcher->ste_builder = sb;
+ nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
return 0;
}
@@ -524,24 +552,33 @@ static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
}
}
-static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
- struct mlx5dr_matcher_rx_tx *nic_matcher)
+static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- int ret, ret_v4, ret_v6;
- ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false);
- ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
- if (ret_v4 && ret_v6) {
+ if (!nic_matcher->ste_builder) {
mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
- if (!ret_v4)
- nic_matcher->ste_builder = nic_matcher->ste_builder4;
- else
- nic_matcher->ste_builder = nic_matcher->ste_builder6;
+ return 0;
+}
+
+static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret;
+
+ ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
+ if (ret)
+ return ret;
nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
DR_CHUNK_SIZE_1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 5dcb8baf491a..32e94d2ee5e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
}
}
+static u16 dr_get_bits_per_mask(u16 byte_mask)
+{
+ u16 bits = 0;
+
+ while (byte_mask) {
+ byte_mask = byte_mask & (byte_mask - 1);
+ bits++;
+ }
+
+ return bits;
+}
+
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn)
@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow)
return false;
+ if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+ return false;
+
if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true;
@@ -954,12 +969,12 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
return 0;
}
-static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
+static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
{
- return (param->outer.ip_version == 6 ||
- param->inner.ip_version == 6 ||
- param->outer.ethertype == ETH_P_IPV6 ||
- param->inner.ethertype == ETH_P_IPV6);
+ if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
+ return DR_RULE_IPV6;
+
+ return DR_RULE_IPV4;
}
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
@@ -1023,7 +1038,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
- dr_rule_is_ipv6(param));
+ dr_rule_get_ipv(&param->outer),
+ dr_rule_get_ipv(&param->inner));
if (ret)
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 5df8436b2ae3..51803eef13dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
unsigned int irqn;
void *cqc, *in;
__be64 *pas;
+ int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 4efe1b0be4a8..a5a266983dd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/types.h>
+#include <linux/crc32.h>
#include "dr_types.h"
#define DR_STE_CRC_POLY 0xEDB88320L
@@ -107,6 +108,13 @@ struct dr_hw_ste_format {
u8 mask[DR_STE_SIZE_MASK];
};
+static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
+{
+ u32 crc = crc32(0, input_data, length);
+
+ return htonl(crc);
+}
+
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -128,7 +136,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
bit = bit >> 1;
}
- crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
+ crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
index = crc32 & (htbl->chunk->num_of_entries - 1);
return index;
@@ -560,18 +568,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount);
}
-static u16 get_bits_per_mask(u16 byte_mask)
-{
- u16 bits = 0;
-
- while (byte_mask) {
- byte_mask = byte_mask & (byte_mask - 1);
- bits++;
- }
-
- return bits;
-}
-
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
@@ -620,20 +616,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u32 bits_in_mask;
u8 next_lu_type;
u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
- /* Don't allocate table more than required,
- * the size of the table defined via the byte_mask, so no need
- * to allocate more than that.
- */
- bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
- log_table_size = min(log_table_size, bits_in_mask);
-
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
next_lu_type,
@@ -671,7 +659,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
htbl->ctrl.may_grow = true;
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
+ if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */
@@ -2095,68 +2083,110 @@ void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
}
-static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
+static void
+dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
- if (misc_3_mask->outer_vxlan_gpe_flags ||
- misc_3_mask->outer_vxlan_gpe_next_protocol) {
- MLX5_SET(ste_flex_parser_tnl, bit_mask,
- flex_parser_tunneling_header_63_32,
- (misc_3_mask->outer_vxlan_gpe_flags << 24) |
- (misc_3_mask->outer_vxlan_gpe_next_protocol));
- misc_3_mask->outer_vxlan_gpe_flags = 0;
- misc_3_mask->outer_vxlan_gpe_next_protocol = 0;
- }
-
- if (misc_3_mask->outer_vxlan_gpe_vni) {
- MLX5_SET(ste_flex_parser_tnl, bit_mask,
- flex_parser_tunneling_header_31_0,
- misc_3_mask->outer_vxlan_gpe_vni << 8);
- misc_3_mask->outer_vxlan_gpe_vni = 0;
- }
+ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
+ outer_vxlan_gpe_flags,
+ misc_3_mask, outer_vxlan_gpe_flags);
+ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
+ outer_vxlan_gpe_next_protocol,
+ misc_3_mask, outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
+ outer_vxlan_gpe_vni,
+ misc_3_mask, outer_vxlan_gpe_vni);
}
-static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+static int
+dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 *tag = hw_ste->tag;
- if (misc3->outer_vxlan_gpe_flags ||
- misc3->outer_vxlan_gpe_next_protocol) {
- MLX5_SET(ste_flex_parser_tnl, tag,
- flex_parser_tunneling_header_63_32,
- (misc3->outer_vxlan_gpe_flags << 24) |
- (misc3->outer_vxlan_gpe_next_protocol));
- misc3->outer_vxlan_gpe_flags = 0;
- misc3->outer_vxlan_gpe_next_protocol = 0;
- }
-
- if (misc3->outer_vxlan_gpe_vni) {
- MLX5_SET(ste_flex_parser_tnl, tag,
- flex_parser_tunneling_header_31_0,
- misc3->outer_vxlan_gpe_vni << 8);
- misc3->outer_vxlan_gpe_vni = 0;
- }
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
return 0;
}
-void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
+ sb->bit_mask);
+
+ sb->rx = rx;
+ sb->inner = inner;
+ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static void
+dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_protocol_type,
+ misc_mask, geneve_protocol_type);
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_oam,
+ misc_mask, geneve_oam);
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_opt_len,
+ misc_mask, geneve_opt_len);
+ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
+ geneve_vni,
+ misc_mask, geneve_vni);
+}
+
+static int
+dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *hw_ste_p)
{
- dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask);
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+ struct mlx5dr_match_misc *misc = &value->misc;
+ u8 *tag = hw_ste->tag;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag;
+ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
}
static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1cb3769d4e3c..290fe61c33d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -106,6 +106,12 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_MAX,
};
+enum mlx5dr_ipv {
+ DR_RULE_IPV4,
+ DR_RULE_IPV6,
+ DR_RULE_IPV_MAX,
+};
+
struct mlx5dr_icm_pool;
struct mlx5dr_icm_chunk;
struct mlx5dr_icm_bucket;
@@ -319,9 +325,12 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@@ -679,11 +688,11 @@ struct mlx5dr_matcher_rx_tx {
struct mlx5dr_ste_htbl *s_htbl;
struct mlx5dr_ste_htbl *e_anchor;
struct mlx5dr_ste_build *ste_builder;
- struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
- struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
+ struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
+ [DR_RULE_IPV_MAX]
+ [DR_RULE_MAX_STES];
u8 num_of_builders;
- u8 num_of_builders4;
- u8 num_of_builders6;
+ u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl;
};
@@ -812,7 +821,8 @@ mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6);
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv);
static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
@@ -962,9 +972,6 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask);
-void mlx5dr_crc32_init_table(void);
-u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
-
struct mlx5dr_qp {
struct mlx5_core_dev *mdev;
struct mlx5_wq_qp wq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 596c927220d9..1722f4668269 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -548,6 +548,30 @@ struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits {
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_8[0x10];
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+
+ u8 outer_vxlan_gpe_vni[0x18];
+ u8 reserved_at_38[0x8];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
+ u8 reserved_at_0[0x2];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_oam[0x1];
+ u8 reserved_at_9[0x7];
+ u8 geneve_protocol_type[0x10];
+
+ u8 geneve_vni[0x18];
+ u8 reserved_at_38[0x8];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 30f7848a6f88..1faac31f74d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1064,26 +1064,13 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
- MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
- MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
- MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
- MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
- MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
- MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
- MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
- MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
- MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
- MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
- MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
- MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
- MLX5_SET(hca_vport_context, ctx, lid, req->lid);
- MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
- MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
- MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
- MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
- MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
- MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
- MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
+ if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
+ MLX5_SET(hca_vport_context, ctx, vport_state_policy,
+ req->policy);
+ if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
+ MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+ if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
+ MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
ex:
kfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index dd2315ce4441..f2a0e72285ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -34,26 +34,6 @@
#include "wq.h"
#include "mlx5_core.h"
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
-{
- return (u32)wq->fbc.sz_m1 + 1;
-}
-
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
-{
- return wq->fbc.sz_m1 + 1;
-}
-
-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
-{
- return wq->fbc.log_stride;
-}
-
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
-{
- return (u32)wq->fbc.sz_m1 + 1;
-}
-
static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
return ((u32)1 << log_sz) << log_stride;
@@ -96,6 +76,24 @@ err_db_free:
return err;
}
+void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
+{
+ size_t len;
+ void *wqe;
+
+ if (!net_ratelimit())
+ return;
+
+ nstrides = max_t(u8, nstrides, 1);
+
+ len = nstrides << wq->fbc.log_stride;
+ wqe = mlx5_wq_cyc_get_wqe(wq, ix);
+
+ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
+ mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
+}
+
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 55791f71a778..d9a94bc223c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -79,7 +79,7 @@ struct mlx5_wq_ll {
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -88,16 +88,18 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
{
return wq->cur_sz == wq->sz;
@@ -168,6 +170,16 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
return !equal && !smaller;
}
+static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.sz_m1 + 1;
+}
+
+static inline u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.log_stride;
+}
+
static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
{
return ctr & wq->fbc.sz_m1;
@@ -224,6 +236,11 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe;
}
+static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->fbc.sz_m1;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 67990406cba2..29e95d0a6ad1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -66,6 +66,8 @@ retry:
return err;
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
+ fsm_state_err = min_t(enum mlxfw_fsm_state_err,
+ fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]);
NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 4421ab22182f..e9f791c43f20 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -71,6 +71,7 @@ struct mlxsw_core {
struct list_head trans_list;
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
+ bool enable_string_tlv;
} emad;
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
@@ -127,6 +128,16 @@ bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev)
+{
+ return rev->minor > req_rev->minor ||
+ (rev->minor == req_rev->minor &&
+ rev->subminor >= req_rev->subminor);
+}
+EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
+
struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
@@ -239,6 +250,25 @@ MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
*/
MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+/* emad_string_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x2 (string TLV).
+ */
+MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
+
+/* emad_string_tlv_len
+ * Length of the string TLV in u32.
+ */
+MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
+
+#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
+
+/* emad_string_tlv_string
+ * String provided by the device's firmware in case of erroneous register access
+ */
+MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+
/* emad_reg_tlv_type
* Type of the TLV.
* Must be set to 0x3 (register TLV).
@@ -294,6 +324,12 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
memcpy(reg_tlv + sizeof(u32), payload, reg->len);
}
+static void mlxsw_emad_pack_string_tlv(char *string_tlv)
+{
+ mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
+ mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
+}
+
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type,
@@ -335,7 +371,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
- u64 tid)
+ u64 tid, bool enable_string_tlv)
{
char *buf;
@@ -345,26 +381,82 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
buf = skb_push(skb, reg->len + sizeof(u32));
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+ if (enable_string_tlv) {
+ buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_string_tlv(buf);
+ }
+
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb);
}
+struct mlxsw_emad_tlv_offsets {
+ u16 op_tlv;
+ u16 string_tlv;
+ u16 reg_tlv;
+};
+
+static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
+{
+ u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
+
+ return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
+}
+
+static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
+ offsets->string_tlv = 0;
+ offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+
+ /* If string TLV is present, it must come after the operation TLV. */
+ if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
+ offsets->string_tlv = offsets->reg_tlv;
+ offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ }
+}
+
static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
{
- return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->op_tlv));
+}
+
+static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ if (!offsets->string_tlv)
+ return NULL;
+
+ return ((char *) (skb->data + offsets->string_tlv));
}
static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
{
- return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
- MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->reg_tlv));
}
-static char *mlxsw_emad_reg_payload(const char *op_tlv)
+static char *mlxsw_emad_reg_payload(const char *reg_tlv)
{
- return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+ return ((char *) (reg_tlv + sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
+{
+ return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
}
static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
@@ -430,10 +522,31 @@ struct mlxsw_reg_trans {
const struct mlxsw_reg_info *reg;
enum mlxsw_core_reg_access_type type;
int err;
+ char *emad_err_string;
enum mlxsw_emad_op_tlv_status emad_status;
struct rcu_head rcu;
};
+static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
+ struct mlxsw_reg_trans *trans)
+{
+ char *string_tlv;
+ char *string;
+
+ string_tlv = mlxsw_emad_string_tlv(skb);
+ if (!string_tlv)
+ return;
+
+ trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
+ GFP_ATOMIC);
+ if (!trans->emad_err_string)
+ return;
+
+ string = mlxsw_emad_string_tlv_string_data(string_tlv);
+ strlcpy(trans->emad_err_string, string,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+}
+
#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
#define MLXSW_EMAD_TIMEOUT_MS 200
@@ -525,12 +638,14 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
mlxsw_emad_transmit_retry(mlxsw_core, trans);
} else {
if (err == 0) {
- char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
if (trans->cb)
trans->cb(mlxsw_core,
- mlxsw_emad_reg_payload(op_tlv),
+ mlxsw_emad_reg_payload(reg_tlv),
trans->reg->len, trans->cb_priv);
+ } else {
+ mlxsw_emad_process_string_tlv(skb, trans);
}
mlxsw_emad_trans_finish(trans, err);
}
@@ -546,6 +661,8 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
skb->data, skb->len);
+ mlxsw_emad_tlv_parse(skb);
+
if (!mlxsw_emad_is_resp(skb))
goto free_skb;
@@ -621,7 +738,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
- u16 reg_len)
+ u16 reg_len, bool enable_string_tlv)
{
struct sk_buff *skb;
u16 emad_len;
@@ -629,6 +746,8 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (enable_string_tlv)
+ emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
return NULL;
@@ -650,6 +769,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv, u64 tid)
{
+ bool enable_string_tlv;
struct sk_buff *skb;
int err;
@@ -657,7 +777,12 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ /* Since this can be changed during emad_reg_access, read it once and
+ * use the value all the way.
+ */
+ enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
if (!skb)
return -ENOMEM;
@@ -674,7 +799,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->reg = reg;
trans->type = type;
- mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
+ enable_string_tlv);
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
@@ -985,6 +1111,7 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
+ bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -1005,7 +1132,7 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
- devlink);
+ devlink, extack);
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -1098,7 +1225,8 @@ static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink)
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
@@ -1172,7 +1300,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
if (err)
goto err_driver_init;
}
@@ -1189,6 +1317,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (mlxsw_driver->params_register)
devlink_params_publish(devlink);
+ if (!reload)
+ devlink_reload_enable(devlink);
+
return 0;
err_thermal_init:
@@ -1223,14 +1354,16 @@ err_devlink_alloc:
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink)
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
bool called_again = false;
int err;
again:
err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
- bus_priv, reload, devlink);
+ bus_priv, reload,
+ devlink, extack);
/* -EAGAIN is returned in case the FW was updated. FW needs
* a reset, so lets try to call __mlxsw_core_bus_device_register()
* again.
@@ -1249,6 +1382,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (!reload)
+ devlink_reload_disable(devlink);
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -1376,12 +1511,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_event_listener_item *event_listener_item = priv;
struct mlxsw_reg_info reg;
char *payload;
- char *op_tlv = mlxsw_emad_op_tlv(skb);
- char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+ char *reg_tlv;
+ char *op_tlv;
+
+ mlxsw_emad_tlv_parse(skb);
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ reg_tlv = mlxsw_emad_reg_tlv(skb);
reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
- payload = mlxsw_emad_reg_payload(op_tlv);
+ payload = mlxsw_emad_reg_payload(reg_tlv);
event_listener_item->el.func(&reg, payload, event_listener_item->priv);
dev_kfree_skb(skb);
}
@@ -1599,8 +1738,11 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_reg_trans_write);
+#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
+
static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
{
+ char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
struct mlxsw_core *mlxsw_core = trans->core;
int err;
@@ -1618,9 +1760,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
mlxsw_core_reg_access_type_str(trans->type),
trans->emad_status,
mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
+ "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
+ trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_emad_op_tlv_status_str(trans->emad_status),
+ trans->emad_err_string ? trans->emad_err_string : "");
+
trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
- trans->emad_status,
- mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ trans->emad_status, err_string);
+
+ kfree(trans->emad_err_string);
}
list_del(&trans->bulk_list);
@@ -1694,7 +1844,7 @@ retry:
}
if (!err)
- memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
reg->len);
mlxsw_cmd_mbox_free(out_mbox);
@@ -2003,6 +2153,35 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ enum mlxsw_reg_pmtm_module_type module_type;
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
+
+ /* Here we need to get the module width according to the module type. */
+
+ switch (module_type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP:
+ return 4;
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ return 2;
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_module_max_width);
+
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
@@ -2162,6 +2341,12 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->emad.enable_string_tlv = true;
+}
+EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 5d7d2ab6d155..543476a2e503 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/net_namespace.h>
#include <net/devlink.h>
#include "trap.h"
@@ -23,6 +24,7 @@ struct mlxsw_core_port;
struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
@@ -30,13 +32,18 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev);
+
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink);
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
@@ -193,6 +200,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
@@ -252,7 +260,8 @@ struct mlxsw_driver {
const char *kind;
size_t priv_size;
int (*init)(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info);
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
@@ -338,6 +347,8 @@ void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -350,6 +361,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \
mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
+static inline struct net *mlxsw_core_net(struct mlxsw_core *mlxsw_core)
+{
+ return devlink_net(priv_to_devlink(mlxsw_core));
+}
+
#define MLXSW_BUS_F_TXRX BIT(0)
#define MLXSW_BUS_F_RESET BIT(1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index d2c7ce67c300..08215fed193d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -50,6 +50,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
u16 i2c_addr;
+ u8 page = 0;
int status;
int err;
@@ -62,11 +63,21 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
- offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ page = MLXSW_REG_MCIA_PAGE_GET(offset);
+ offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
+ /* When reading upper pages 1, 2 and 3 the offset starts at
+ * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436
+ * specification for graphical depiction.
+ * MCIA register accepts buffer size <= 48. Page of size 128
+ * should be read by chunks of size 48, 48, 32. Align the size
+ * of the last chunk to avoid reading after the end of the
+ * page.
+ */
+ if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, 0, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -168,7 +179,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
switch (module_id) {
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
@@ -176,10 +187,10 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
module_rev_id >=
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) {
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 5b00726c4346..9bf8da5f6daf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -41,7 +41,7 @@ struct mlxsw_hwmon {
struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
unsigned int attrs_count;
u8 sensor_count;
- u8 module_sensor_count;
+ u8 module_sensor_max;
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -56,7 +56,7 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
int err;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -79,7 +79,7 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
int err;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -109,7 +109,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
return -EINVAL;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -336,7 +336,7 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
int index = mlwsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_count + 1;
+ mlxsw_hwmon->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -528,51 +528,45 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
- unsigned int module_count = mlxsw_core_max_ports(mlxsw_hwmon->core);
- char pmlp_pl[MLXSW_REG_PMLP_LEN] = {0};
- int i, index;
- u8 width;
- int err;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 module_sensor_max;
+ int i, err;
if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
return 0;
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &module_sensor_max);
+
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- index = mlxsw_hwmon->sensor_count;
- for (i = 1; i < module_count; i++) {
- mlxsw_reg_pmlp_pack(pmlp_pl, i);
- err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(pmlp),
- pmlp_pl);
- if (err) {
- dev_err(mlxsw_hwmon->bus_info->dev, "Failed to read module index %d\n",
- i);
- return err;
- }
- width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- if (!width)
- continue;
+ mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon->sensor_count;
+ i < mlxsw_hwmon->module_sensor_max; i++) {
mlxsw_hwmon_attr_add(mlxsw_hwmon,
- MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, index,
- index);
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
- index, index);
+ i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
- MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
- index, index);
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
+ i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
- index, index);
+ i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
- index, index);
- index++;
+ i, i);
}
- mlxsw_hwmon->module_sensor_count = index;
return 0;
}
@@ -590,14 +584,14 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
if (!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_count;
- max_index = mlxsw_hwmon->module_sensor_count + gbox_num;
+ index = mlxsw_hwmon->module_sensor_max;
+ max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_count +
+ sensor_index = index % mlxsw_hwmon->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 35a1dc89c28a..c721b171bd8d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -112,6 +112,7 @@ struct mlxsw_thermal {
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
struct mlxsw_thermal_module *tz_gearbox_arr;
u8 tz_gearbox_num;
unsigned int tz_highest_score;
@@ -775,23 +776,10 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 local_port)
+ struct mlxsw_thermal *thermal, u8 module)
{
struct mlxsw_thermal_module *module_tz;
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
- u8 width, module;
- int err;
-
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
- width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- if (!width)
- return 0;
-
- module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
module_tz = &thermal->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
@@ -819,26 +807,34 @@ static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
- unsigned int module_count = mlxsw_core_max_ports(core);
struct mlxsw_thermal_module *module_tz;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
if (!mlxsw_core_res_query_enabled(core))
return 0;
- thermal->tz_module_arr = kcalloc(module_count,
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &thermal->tz_module_num);
+
+ thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
sizeof(*thermal->tz_module_arr),
GFP_KERNEL);
if (!thermal->tz_module_arr)
return -ENOMEM;
- for (i = 1; i < module_count; i++) {
+ for (i = 0; i < thermal->tz_module_num; i++) {
err = mlxsw_thermal_module_init(dev, core, thermal, i);
if (err)
goto err_unreg_tz_module_arr;
}
- for (i = 0; i < module_count - 1; i++) {
+ for (i = 0; i < thermal->tz_module_num; i++) {
module_tz = &thermal->tz_module_arr[i];
if (!module_tz->parent)
continue;
@@ -850,7 +846,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
return 0;
err_unreg_tz_module_arr:
- for (i = module_count - 1; i >= 0; i--)
+ for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
return err;
@@ -859,13 +855,12 @@ err_unreg_tz_module_arr:
static void
mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
{
- unsigned int module_count = mlxsw_core_max_ports(thermal->core);
int i;
if (!mlxsw_core_res_query_enabled(thermal->core))
return;
- for (i = module_count - 1; i >= 0; i--)
+ for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
}
@@ -913,7 +908,8 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
+ NULL);
if (!thermal->tz_gearbox_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index a33b896f4bb8..acfbbec52424 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -19,10 +19,8 @@
enum {
MLXSW_EMAD_TLV_TYPE_END,
MLXSW_EMAD_TLV_TYPE_OP,
- MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_STRING,
MLXSW_EMAD_TLV_TYPE_REG,
- MLXSW_EMAD_TLV_TYPE_USERDATA,
- MLXSW_EMAD_TLV_TYPE_OOBETH,
};
/* OP TLV */
@@ -89,6 +87,9 @@ enum {
MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
};
+/* STRING TLV */
+#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */
+
/* END TLV */
#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 95f408d0e103..34566eb62c47 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -640,7 +640,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c, false,
- NULL);
+ NULL, NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 471b0ca6d69a..2b543911ae00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -16,6 +16,14 @@
static const char mlxsw_m_driver_name[] = "mlxsw_minimal";
+#define MLXSW_M_FWREV_MINOR 2000
+#define MLXSW_M_FWREV_SUBMINOR 1886
+
+static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
+ .minor = MLXSW_M_FWREV_MINOR,
+ .subminor = MLXSW_M_FWREV_SUBMINOR,
+};
+
struct mlxsw_m_port;
struct mlxsw_m {
@@ -172,6 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
}
SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_m->core));
mlxsw_m_port = netdev_priv(dev);
mlxsw_m_port->dev = dev;
mlxsw_m_port->mlxsw_m = mlxsw_m;
@@ -325,8 +334,27 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
kfree(mlxsw_m->ports);
}
+static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_m->bus_info->fw_rev;
+
+ /* Validate driver and FW are compatible.
+ * Do not check major version, since it defines chip type, while
+ * driver is supposed to support any type.
+ */
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, &mlxsw_m_fw_rev))
+ return 0;
+
+ dev_err(mlxsw_m->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, rev->major,
+ mlxsw_m_fw_rev.minor, mlxsw_m_fw_rev.subminor);
+
+ return -EINVAL;
+}
+
static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
int err;
@@ -334,6 +362,10 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
mlxsw_m->core = mlxsw_core;
mlxsw_m->bus_info = mlxsw_bus_info;
+ err = mlxsw_m_fw_rev_validate(mlxsw_m);
+ if (err)
+ return err;
+
err = mlxsw_m_base_mac_get(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 615455a21567..914c33e46fb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -284,15 +284,18 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ int tclass;
int i;
int err;
q->producer_counter = 0;
q->consumer_counter = 0;
+ tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
+ MLXSW_PCI_SDQ_CTL_TC;
/* Set CQ of same number of this SDQ. */
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
- mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -963,6 +966,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
if (num_sdqs + num_rdqs > num_cqs ||
+ num_sdqs < MLXSW_PCI_SDQS_MIN ||
num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
@@ -1520,7 +1524,15 @@ static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
{
- u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+ u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 sdqn;
+
+ if (tx_info->is_emad) {
+ sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
+ } else {
+ BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
+ sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
+ }
return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
}
@@ -1790,7 +1802,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
- NULL);
+ NULL, NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index e57e42e2d2b2..e0d7d2d9a0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -51,6 +51,11 @@
#define MLXSW_PCI_EQ_ASYNC_NUM 0
#define MLXSW_PCI_EQ_COMP_NUM 1
+#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
+#define MLXSW_PCI_SDQ_EMAD_INDEX 0
+#define MLXSW_PCI_SDQ_EMAD_TC 0
+#define MLXSW_PCI_SDQ_CTL_TC 3
+
#define MLXSW_PCI_AQ_PAGES 8
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index a33eeef0b00c..741fd2989d12 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -24,8 +24,6 @@
#define MLXSW_PORT_DONT_CARE 0xFF
-#define MLXSW_PORT_MODULE_MAX_WIDTH 4
-
enum mlxsw_port_admin_status {
MLXSW_PORT_ADMIN_STATUS_UP = 1,
MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5494cf93f34c..5294a1622643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3969,6 +3969,7 @@ MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
* 1 - Lane 0 is used.
* 2 - Lanes 0 and 1 are used.
* 4 - Lanes 0, 1, 2 and 3 are used.
+ * 8 - Lanes 0-7 are used.
* Access: RW
*/
MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
@@ -3983,14 +3984,14 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
/* reg_pmlp_rx_lane
* Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
* equal to Tx lane.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
{
@@ -4111,6 +4112,7 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
/* reg_ptys_ext_eth_proto_cap
* Extended Ethernet port supported speeds and protocols.
@@ -5373,6 +5375,55 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM allows query or configuration of module types.
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ /* Backplane with 4 lanes */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
+ /* QSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP,
+ /* SFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP,
+ /* Backplane with single lane */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
+ /* Backplane with two lane */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
+ /* Chip2Chip */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C = 10,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
+static inline void
+mlxsw_reg_pmtm_unpack(char *payload,
+ enum mlxsw_reg_pmtm_module_type *module_type)
+{
+ *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -5429,6 +5480,7 @@ enum mlxsw_reg_htgt_trap_group {
enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
};
/* reg_htgt_trap_group
@@ -8411,6 +8463,7 @@ MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
+#define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128
#define MLXSW_REG_MCIA_EEPROM_SIZE 48
#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
@@ -8446,6 +8499,14 @@ enum mlxsw_reg_mcia_eeprom_module_info {
*/
MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
+/* This is used to access the optional upper pages (1-3) in the QSFP+
+ * memory map. Page 1 is available on offset 256 through 383, page 2 -
+ * on offset 384 through 511, page 3 - on offset 512 through 639.
+ */
+#define MLXSW_REG_MCIA_PAGE_GET(off) (((off) - \
+ MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
+ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
+
static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
u8 page_number, u16 device_addr,
u8 size, u8 i2c_device_addr)
@@ -8670,7 +8731,7 @@ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
* properties.
*/
#define MLXSW_REG_MPAR_ID 0x901B
-#define MLXSW_REG_MPAR_LEN 0x08
+#define MLXSW_REG_MPAR_LEN 0x0C
MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
@@ -9531,6 +9592,12 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+/* num_of_modules
+ * Number of modules.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
+
static inline void mlxsw_reg_mgpir_pack(char *payload)
{
MLXSW_REG_ZERO(mgpir, payload);
@@ -9539,7 +9606,7 @@ static inline void mlxsw_reg_mgpir_pack(char *payload)
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash)
+ u8 *devices_per_flash, u8 *num_of_modules)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -9548,6 +9615,8 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
if (devices_per_flash)
*devices_per_flash =
mlxsw_reg_mgpir_devices_per_flash_get(payload);
+ if (num_of_modules)
+ *num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
}
/* TNGCR - Tunneling NVE General Configuration Register
@@ -10526,6 +10595,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
MLXSW_REG(pplr),
+ MLXSW_REG(pmtm),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 33a9fc9ef6a4..6534184cb942 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -26,7 +26,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
- MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
+ MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
@@ -82,7 +83,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
[MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
- [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
+ [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index dcf9562bce8a..556dca328bb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -22,6 +22,7 @@
#include <linux/inetdevice.h>
#include <linux/netlink.h>
#include <linux/jhash.h>
+#include <linux/log2.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -48,7 +49,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 1886
+#define MLXSW_SP1_FWREV_SUBMINOR 2308
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,6 +64,21 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_MINOR) \
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP2_FWREV_MAJOR 29
+#define MLXSW_SP2_FWREV_MINOR 2000
+#define MLXSW_SP2_FWREV_SUBMINOR 2308
+
+static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
+ .major = MLXSW_SP2_FWREV_MAJOR,
+ .minor = MLXSW_SP2_FWREV_MINOR,
+ .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP2_FW_FILENAME \
+ "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP2_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -409,9 +425,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
}
if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
- (rev->minor > req_rev->minor ||
- (rev->minor == req_rev->minor &&
- rev->subminor >= req_rev->subminor)))
+ mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
@@ -735,35 +749,69 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ bool separate_rxtx;
+ u8 module;
+ u8 width;
int err;
+ int i;
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
if (err)
return err;
- *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
- *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
+ module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+
+ if (width && !is_power_of_2(width)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < width; i++) {
+ if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (separate_rxtx &&
+ mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
+ mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
+ local_port);
+ return -EINVAL;
+ }
+ }
+
+ port_mapping->module = module;
+ port_mapping->width = width;
+ port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 module, u8 width, u8 lane)
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i;
mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
- mlxsw_reg_pmlp_width_set(pmlp_pl, width);
- for (i = 0; i < width; i++) {
- mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
- mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
+ mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
+ for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
+ mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
@@ -2914,9 +2962,22 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_8[] = {
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
+
#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
+#define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3)
static u8 mlxsw_sp_port_mask_width_get(u8 width)
{
@@ -2927,6 +2988,8 @@ static u8 mlxsw_sp_port_mask_width_get(u8 width)
return MLXSW_SP_PORT_MASK_WIDTH_2X;
case 4:
return MLXSW_SP_PORT_MASK_WIDTH_4X;
+ case 8:
+ return MLXSW_SP_PORT_MASK_WIDTH_8X;
default:
WARN_ON_ONCE(1);
return 0;
@@ -2948,7 +3011,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100,
},
{
@@ -2957,7 +3021,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_1000,
},
{
@@ -2966,7 +3031,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_2500,
},
{
@@ -2975,7 +3041,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_5000,
},
{
@@ -2984,14 +3051,16 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_40000,
},
{
@@ -3000,7 +3069,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_25000,
},
{
@@ -3008,7 +3078,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_50000,
},
{
@@ -3022,7 +3093,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100000,
},
{
@@ -3036,9 +3108,17 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_200000,
},
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_400000,
+ },
};
#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
@@ -3435,7 +3515,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
};
static int
-mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3451,7 +3531,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
&base_speed);
if (err)
return err;
- upper_speed = base_speed * width;
+ upper_speed = base_speed * mlxsw_sp_port->mapping.width;
eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
@@ -3612,15 +3692,18 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
+ u8 split_base_local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool split = !!split_base_local_port;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
int err;
err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
- module + 1, split, lane / width,
+ port_mapping->module + 1, split,
+ port_mapping->lane / port_mapping->width,
mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
if (err) {
@@ -3635,15 +3718,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_etherdev;
}
SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
+ dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
- mlxsw_sp_port->mapping.module = module;
- mlxsw_sp_port->mapping.width = width;
- mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->split_base_local_port = split_base_local_port;
+ mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
@@ -3668,7 +3751,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
- err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+ err = mlxsw_sp_port_module_map(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
mlxsw_sp_port->local_port);
@@ -3710,7 +3793,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
+ err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
mlxsw_sp_port->local_port);
@@ -3933,14 +4016,13 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
- kfree(mlxsw_sp->port_to_module);
kfree(mlxsw_sp->ports);
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- u8 module, width, lane;
+ struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
int err;
@@ -3950,66 +4032,100 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
- GFP_KERNEL);
- if (!mlxsw_sp->port_to_module) {
- err = -ENOMEM;
- goto err_port_to_module_alloc;
- }
-
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- /* Mark as invalid */
- mlxsw_sp->port_to_module[i] = -1;
-
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width, &lane);
- if (err)
- goto err_port_module_info_get;
- if (!width)
+ port_mapping = mlxsw_sp->port_mapping[i];
+ if (!port_mapping)
continue;
- mlxsw_sp->port_to_module[i] = module;
- err = mlxsw_sp_port_create(mlxsw_sp, i, false,
- module, width, lane);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
-err_port_module_info_get:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
- kfree(mlxsw_sp->port_to_module);
-err_port_to_module_alloc:
kfree(mlxsw_sp->ports);
return err;
}
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
+static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping port_mapping;
+ int i;
+ int err;
+
+ mlxsw_sp->port_mapping = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_port_mapping *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping)
+ return -ENOMEM;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ if (err)
+ goto err_port_module_info_get;
+ if (!port_mapping.width)
+ continue;
+
+ mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
+ sizeof(port_mapping),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping[i]) {
+ err = -ENOMEM;
+ goto err_port_module_info_dup;
+ }
+ }
+ return 0;
+
+err_port_module_info_get:
+err_port_module_info_dup:
+ for (i--; i >= 1; i--)
+ kfree(mlxsw_sp->port_mapping[i]);
+ kfree(mlxsw_sp->port_mapping);
+ return err;
+}
+
+static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
{
- u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
+ int i;
+
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ kfree(mlxsw_sp->port_mapping[i]);
+ kfree(mlxsw_sp->port_mapping);
+}
+
+static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
+{
+ u8 offset = (local_port - 1) % max_width;
return local_port - offset;
}
-static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
- u8 module, unsigned int count, u8 offset)
+static int
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ struct mlxsw_sp_port_mapping *port_mapping,
+ unsigned int count, u8 offset)
{
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ struct mlxsw_sp_port_mapping split_port_mapping;
int err, i;
+ split_port_mapping = *port_mapping;
+ split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
- true, module, width, i * width);
+ base_port, &split_port_mapping);
if (err)
goto err_port_create;
+ split_port_mapping.lane += split_port_mapping.width;
}
return 0;
@@ -4022,45 +4138,55 @@ err_port_create:
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
- u8 base_port, unsigned int count)
+ u8 base_port,
+ unsigned int count, u8 offset)
{
- u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
- /* Split by four means we need to re-create two ports, otherwise
- * only one.
- */
- count = count / 2;
-
- for (i = 0; i < count; i++) {
- local_port = base_port + i * 2;
- if (mlxsw_sp->port_to_module[local_port] < 0)
+ /* Go over original unsplit ports in the gap and recreate them. */
+ for (i = 0; i < count * offset; i++) {
+ port_mapping = mlxsw_sp->port_mapping[base_port + i];
+ if (!port_mapping)
continue;
- module = mlxsw_sp->port_to_module[local_port];
-
- mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
- width, 0);
+ mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
}
}
+static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
+ unsigned int count,
+ unsigned int max_width)
+{
+ enum mlxsw_res_id local_ports_in_x_res_id;
+ int split_width = max_width / count;
+
+ if (split_width == 1)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
+ else if (split_width == 2)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
+ else if (split_width == 4)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
+ else
+ return -EINVAL;
+
+ if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
+ return -EINVAL;
+ return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 local_ports_in_1x, local_ports_in_2x, offset;
+ struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ int max_width;
+ u8 base_port;
+ int offset;
int i;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
- return -EIO;
-
- local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
- local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4069,47 +4195,70 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- module = mlxsw_sp_port->mapping.module;
- cur_width = mlxsw_sp_port->mapping.width;
+ /* Split ports cannot be split. */
+ if (mlxsw_sp_port->split) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+ return -EINVAL;
+ }
+
+ max_width = mlxsw_core_module_max_width(mlxsw_core,
+ mlxsw_sp_port->mapping.module);
+ if (max_width < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+ return max_width;
+ }
- if (count != 2 && count != 4) {
- netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
- NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
+ /* Split port with non-max and 1 module width cannot be split. */
+ if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
return -EINVAL;
}
- if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
- netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
- NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+ if (count == 1 || !is_power_of_2(count) || count > max_width) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split count\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid split count");
return -EINVAL;
}
- /* Make sure we have enough slave (even) ports for the split. */
- if (count == 2) {
- offset = local_ports_in_2x;
- base_port = local_port;
- if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
- netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
- NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
- return -EINVAL;
- }
- } else {
- offset = local_ports_in_1x;
- base_port = mlxsw_sp_cluster_base_port_get(local_port);
- if (mlxsw_sp->ports[base_port + 1] ||
- mlxsw_sp->ports[base_port + 3]) {
+ offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+ if (offset < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ return -EINVAL;
+ }
+
+ /* Only in case max split is being done, the local port and
+ * base port may differ.
+ */
+ base_port = count == max_width ?
+ mlxsw_sp_cluster_base_port_get(local_port, max_width) :
+ local_port;
+
+ for (i = 0; i < count * offset; i++) {
+ /* Expect base port to exist and also the one in the middle in
+ * case of maximal split count.
+ */
+ if (i == 0 || (count == max_width && i == count / 2))
+ continue;
+
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
}
+ port_mapping = mlxsw_sp_port->mapping;
+
for (i = 0; i < count; i++)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
- offset);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
+ count, offset);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -4118,7 +4267,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
err_port_split_create:
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
return err;
}
@@ -4126,19 +4275,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 cur_width, base_port;
unsigned int count;
+ int max_width;
+ u8 base_port;
+ int offset;
int i;
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
- return -EIO;
-
- local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
- local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4153,25 +4296,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- cur_width = mlxsw_sp_port->mapping.width;
- count = cur_width == 1 ? 4 : 2;
+ max_width = mlxsw_core_module_max_width(mlxsw_core,
+ mlxsw_sp_port->mapping.module);
+ if (max_width < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+ return max_width;
+ }
- if (count == 2)
- offset = local_ports_in_2x;
- else
- offset = local_ports_in_1x;
+ count = max_width / mlxsw_sp_port->mapping.width;
- base_port = mlxsw_sp_cluster_base_port_get(local_port);
+ offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+ if (WARN_ON(offset < 0)) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ return -EINVAL;
+ }
- /* Determine which ports to remove. */
- if (count == 2 && local_port >= base_port + 2)
- base_port = base_port + 2;
+ base_port = mlxsw_sp_port->split_base_local_port;
for (i = 0; i < count; i++)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
return 0;
}
@@ -4364,8 +4512,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
false),
/* L3 traps */
- MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
@@ -4392,8 +4538,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
- MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
@@ -4408,7 +4552,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
- MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
@@ -4738,7 +4881,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
@@ -4750,6 +4894,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
if (err)
return err;
+ mlxsw_core_emad_string_tlv_enable(mlxsw_core);
+
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
@@ -4831,7 +4977,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
- err = mlxsw_sp_router_init(mlxsw_sp);
+ err = mlxsw_sp_router_init(mlxsw_sp, extack);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
@@ -4864,7 +5010,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
* respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
- err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
goto err_netdev_notifier;
@@ -4876,6 +5023,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_dpipe_init;
}
+ err = mlxsw_sp_port_module_info_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
+ goto err_port_module_info_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -4885,9 +5038,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
+err_port_module_info_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
err_netdev_notifier:
if (mlxsw_sp->clock)
mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
@@ -4924,7 +5080,8 @@ err_fids_init:
}
static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -4944,14 +5101,17 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
- return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -4964,7 +5124,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
- return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
@@ -4972,8 +5132,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
if (mlxsw_sp->clock) {
mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
@@ -5165,14 +5327,61 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&kvd_size_params);
}
+static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params span_size_params;
+ u32 max_span;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
+ return -EIO;
+
+ max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
+ devlink_resource_size_params_init(&span_size_params, max_span, max_span,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ return 0;
+
+err_resources_span_register:
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ return err;
}
static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ return 0;
+
+err_resources_span_register:
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ return err;
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -6565,3 +6774,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index b2a0028b1694..347bec9d1ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -14,6 +14,7 @@
#include <linux/dcbnl.h>
#include <linux/in6.h>
#include <linux/notifier.h>
+#include <linux/net_namespace.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
@@ -31,8 +32,6 @@
#define MLXSW_SP_MID_MAX 7000
-#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-
#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
@@ -47,6 +46,8 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
+#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
+
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
@@ -55,6 +56,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_SPAN,
};
struct mlxsw_sp_port;
@@ -139,6 +141,12 @@ struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_port_mapping {
+ u8 module;
+ u8 width;
+ u8 lane;
+};
+
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
@@ -146,7 +154,7 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- int *port_to_module;
+ struct mlxsw_sp_port_mapping **port_mapping;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
@@ -255,11 +263,11 @@ struct mlxsw_sp_port {
struct ieee_pfc *pfc;
enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
- struct {
- u8 module;
- u8 width;
- u8 lane;
- } mapping;
+ struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
+ * mlxsw_sp_port lifetime, however
+ * the same localport can have
+ * different mapping.
+ */
/* TC handles */
struct list_head mall_tc_list;
struct {
@@ -283,6 +291,7 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
+ u8 split_base_local_port;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -524,7 +533,8 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr);
@@ -982,4 +992,9 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_net(mlxsw_sp->core);
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index b9eeae37a4dc..968f0902e4fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -35,6 +35,7 @@ struct mlxsw_sp_sb_cm {
};
#define MLXSW_SP_SB_INFI -1U
+#define MLXSW_SP_SB_REST -2U
struct mlxsw_sp_sb_pm {
u32 min_buff;
@@ -421,19 +422,16 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.freeze_size = _freeze_size, \
}
-#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
-#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
@@ -445,19 +443,16 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
-#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
-#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
- MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
@@ -471,11 +466,33 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sb_pr *prs,
+ const struct mlxsw_sp_sb_pool_des *pool_dess,
size_t prs_len)
{
+ /* Round down, unlike mlxsw_sp_bytes_cells(). */
+ u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
+ u32 rest_cells[2] = {sb_cells, sb_cells};
int i;
int err;
+ /* Calculate how much space to give to the "REST" pools in either
+ * direction.
+ */
+ for (i = 0; i < prs_len; i++) {
+ enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
+ u32 size = prs[i].size;
+ u32 size_cells;
+
+ if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
+ continue;
+
+ size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
+ continue;
+
+ rest_cells[dir] -= size_cells;
+ }
+
for (i = 0; i < prs_len; i++) {
u32 size = prs[i].size;
u32 size_cells;
@@ -483,6 +500,10 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
if (size == MLXSW_SP_SB_INFI) {
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
0, true);
+ } else if (size == MLXSW_SP_SB_REST) {
+ size_cells = rest_cells[pool_dess[i].dir];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ size_cells, false);
} else {
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
@@ -904,7 +925,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
return -EIO;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
@@ -915,7 +936,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_BUFFER_SIZE);
+ GUARANTEED_SHARED_BUFFER);
max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_HEADROOM_SIZE);
/* Round down, because this limit must not be overstepped. */
@@ -926,6 +947,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
+ mlxsw_sp->sb_vals->pool_dess,
mlxsw_sp->sb_vals->pool_count);
if (err)
goto err_sb_prs_init;
@@ -1013,7 +1035,8 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
pr = &mlxsw_sp->sb_vals->prs[pool_index];
- if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
return -EINVAL;
}
@@ -1021,12 +1044,12 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
if (pr->freeze_mode && pr->mode != mode) {
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
return -EINVAL;
- };
+ }
if (pr->freeze_size && pr->size != size) {
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
return -EINVAL;
- };
+ }
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
pool_size, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 17f334b46c40..2153bcc4b585 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -870,7 +870,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_vni(fid, &vni)))
goto out;
- nve_dev = dev_get_by_index(&init_net, nve_ifindex);
+ nve_dev = dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!nve_dev)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index bdf53cf350f6..68cc6737d45c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -305,7 +305,8 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
p->max);
return -EINVAL;
}
- if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index a330b369e899..30bfe3880faf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -16,6 +16,7 @@
#include <linux/if_macvlan.h>
#include <linux/refcount.h>
#include <linux/jhash.h>
+#include <linux/net_namespace.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -76,6 +77,8 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
+ u32 adj_discard_index;
+ bool adj_discard_index_valid;
};
struct mlxsw_sp_rif {
@@ -366,6 +369,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
+ MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
/* This is a special case of local delivery, where a packet should be
* decapsulated on reception. Note that there is no corresponding ENCAP,
@@ -994,7 +998,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
if (d)
return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
- return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
+ return RT_TABLE_MAIN;
}
static struct mlxsw_sp_rif *
@@ -1598,27 +1602,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
- enum mlxsw_sp_l3proto ul_proto;
- union mlxsw_sp_l3addr saddr;
- u32 ul_tb_id;
if (!ipip_entry)
return 0;
- /* For flat configuration cases, moving overlay to a different VRF might
- * cause local address conflict, and the conflicting tunnels need to be
- * demoted.
- */
- ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
- ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
- if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
- saddr, ul_tb_id,
- ipip_entry)) {
- mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
- return 0;
- }
-
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack);
}
@@ -1627,8 +1614,25 @@ static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct net_device *ul_dev,
+ bool *demote_this,
struct netlink_ext_ack *extack)
{
+ u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+
+ /* Moving underlay to a different VRF might cause local address
+ * conflict, and the conflicting tunnels need to be demoted.
+ */
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ ipip_entry)) {
+ *demote_this = true;
+ return 0;
+ }
+
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, true, false, extack);
}
@@ -1779,6 +1783,7 @@ static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct net_device *ul_dev,
+ bool *demote_this,
unsigned long event,
struct netdev_notifier_info *info)
{
@@ -1793,6 +1798,7 @@ __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
ipip_entry,
ul_dev,
+ demote_this,
extack);
break;
@@ -1819,13 +1825,31 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
ul_dev,
ipip_entry))) {
+ struct mlxsw_sp_ipip_entry *prev;
+ bool demote_this = false;
+
err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
- ul_dev, event, info);
+ ul_dev, &demote_this,
+ event, info);
if (err) {
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
ul_dev);
return err;
}
+
+ if (demote_this) {
+ if (list_is_first(&ipip_entry->ipip_list_node,
+ &mlxsw_sp->router->ipip_list))
+ prev = NULL;
+ else
+ /* This can't be cached from previous iteration,
+ * because that entry could be gone now.
+ */
+ prev = list_prev_entry(ipip_entry,
+ ipip_list_node);
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ ipip_entry = prev;
+ }
}
return 0;
@@ -2551,14 +2575,14 @@ static int mlxsw_sp_router_schedule_work(struct net *net,
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_router *router;
- if (!net_eq(net, &init_net))
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
return NOTIFY_DONE;
net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
if (!net_work)
return NOTIFY_BAD;
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
INIT_WORK(&net_work->work, cb);
net_work->mlxsw_sp = router->mlxsw_sp;
mlxsw_core_schedule_work(&net_work->work);
@@ -4195,15 +4219,50 @@ mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
}
}
+static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
+{
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ if (mlxsw_sp->router->adj_discard_index_valid)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_discard_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ mlxsw_sp->router->adj_discard_index, rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ mlxsw_sp->router->adj_discard_index_valid = true;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_discard_index);
+ return err;
+}
+
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
u16 ecmp_size = 0;
+ int err;
/* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass
@@ -4213,6 +4272,15 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = fib_entry->nh_group->adj_index;
ecmp_size = fib_entry->nh_group->ecmp_size;
+ } else if (!nh_group->adj_index_valid && nh_group->count &&
+ nh_group->nh_rif) {
+ err = mlxsw_sp_adj_discard_write(mlxsw_sp,
+ nh_group->nh_rif->rif_index);
+ if (err)
+ return err;
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = mlxsw_sp->router->adj_discard_index;
+ ecmp_size = 1;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
@@ -4273,6 +4341,23 @@ static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u16 trap_id;
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -4313,6 +4398,9 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
@@ -4390,7 +4478,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
* can do so with a lower priority than packets directed
* at the host, so use action type local instead of trap.
*/
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
return 0;
case RTN_UNICAST:
if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
@@ -5350,7 +5438,7 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
@@ -5908,6 +5996,16 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
}
+
+ /* After flushing all the routes, it is not possible anyone is still
+ * using the adjacency index that is discarding packets, so free it in
+ * case it was allocated.
+ */
+ if (!mlxsw_sp->router->adj_discard_index_valid)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_discard_index);
+ mlxsw_sp->router->adj_discard_index_valid = false;
}
static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
@@ -6019,12 +6117,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
@@ -6065,12 +6157,6 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
fib_work->fib6_work.nrt6);
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
}
rtnl_unlock();
kfree(fib_work);
@@ -6112,12 +6198,6 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
&fib_work->ven_info);
dev_put(fib_work->ven_info.dev);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
}
rtnl_unlock();
kfree(fib_work);
@@ -6213,7 +6293,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
rule = fr_info->rule;
/* Rule only affects locally generated traffic */
- if (rule->iifindex == info->net->loopback_dev->ifindex)
+ if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
return 0;
switch (info->family) {
@@ -6250,8 +6330,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct mlxsw_sp_router *router;
int err;
- if (!net_eq(info->net, &init_net) ||
- (info->family != AF_INET && info->family != AF_INET6 &&
+ if ((info->family != AF_INET && info->family != AF_INET6 &&
info->family != RTNL_FAMILY_IPMR &&
info->family != RTNL_FAMILY_IP6MR))
return NOTIFY_DONE;
@@ -6263,9 +6342,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
- if (!err || info->extack)
- return notifier_from_errno(err);
- break;
+ return notifier_from_errno(err);
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
@@ -7974,9 +8051,10 @@ static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
}
-static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
+static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
{
- bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
@@ -7991,9 +8069,9 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
}
-static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
+static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
{
- bool only_l3 = !ip6_multipath_hash_policy(&init_net);
+ bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
@@ -8021,8 +8099,8 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_recr2_pack(recr2_pl, seed);
- mlxsw_sp_mp4_hash_init(recr2_pl);
- mlxsw_sp_mp6_hash_init(recr2_pl);
+ mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
+ mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}
@@ -8053,7 +8131,8 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
- bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -8079,7 +8158,8 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_router *router;
int err;
@@ -8155,8 +8235,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
goto err_dscp_init;
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
- mlxsw_sp_router_fib_dump_flush);
+ err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb,
+ mlxsw_sp_router_fib_dump_flush, extack);
if (err)
goto err_register_fib_notifier;
@@ -8195,7 +8276,8 @@ err_register_inetaddr_notifier:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 560a60e522f9..200d324e6d99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -14,8 +14,23 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
+static u64 mlxsw_sp_span_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (mlxsw_sp->span.entries[i].ref_count)
+ occ++;
+ }
+
+ return occ;
+}
+
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
@@ -36,13 +51,19 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
curr->id = i;
}
+ devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
+
return 0;
}
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5ecb45118400..a3af171c6358 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2591,7 +2591,7 @@ __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- dev = __dev_get_by_index(&init_net, nve_ifindex);
+ dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!dev)
return -EINVAL;
*nve_dev = dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 7c03b661ae7e..e0d7c49ffae0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -13,16 +13,27 @@
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
void *priv);
+static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx);
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
false, SP_##_group_id, DISCARD)
+#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
+ MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
+ _action, false, SP_##_group_id, DISCARD)
+
static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
@@ -30,6 +41,23 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(RPF, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
};
static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
@@ -40,6 +68,28 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, ROUTER_EXP, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, ROUTER_EXP, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RPF, RPF, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, REMOTE_ROUTE, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, HOST_MISS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, HOST_MISS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, REMOTE_ROUTE,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
@@ -54,6 +104,25 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -104,6 +173,30 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
+static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ skb_pull(skb, ETH_HLEN);
+ skb->offload_fwd_mark = 1;
+ netif_receive_skb(skb);
+}
+
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -211,6 +304,7 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
u32 rate;
switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */
case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
policer_id = MLXSW_SP_DISCARD_POLICER_ID;
ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
@@ -242,6 +336,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
priority = 0;
tc = 1;
break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index 0d9356b3f65d..4ff1e623aa76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -446,7 +446,8 @@ static int mlxsw_sib_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
}
static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 1c14c051ee52..de6cb22f68b1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -992,6 +992,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core));
mlxsw_sx_port = netdev_priv(dev);
mlxsw_sx_port->dev = dev;
mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
@@ -1563,7 +1564,8 @@ static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
}
static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 7618f084cae9..0c1c142bb6b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -49,6 +49,7 @@ enum {
MLXSW_TRAP_ID_IPV6_DHCP = 0x69,
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
@@ -66,6 +67,8 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
+ MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
@@ -73,6 +76,18 @@ enum {
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_NON_IP_PACKET = 0x160,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
/* Multicast trap used for routes with trap action */
MLXSW_TRAP_ID_ACL1 = 0x1C1,
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 57b26c2acf87..afe52463dc57 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -11,7 +11,9 @@
#include "lan743x_ptp.h"
-#define LAN743X_NUMBER_OF_GPIO (12)
+#define LAN743X_LED0_ENABLE 20 /* LED0 offset in HW_CFG */
+#define LAN743X_LED_ENABLE(pin) BIT(LAN743X_LED0_ENABLE + (pin))
+
#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
@@ -139,19 +141,20 @@ done:
spin_unlock_bh(&ptp->tx_ts_lock);
}
-static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter)
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
{
struct lan743x_ptp *ptp = &adapter->ptp;
int result = -ENODEV;
- int index = 0;
mutex_lock(&ptp->command_lock);
- for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) {
- if (!(test_bit(index, &ptp->used_event_ch))) {
- ptp->used_event_ch |= BIT(index);
- result = index;
- break;
- }
+ if (!(test_bit(event_channel, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(event_channel);
+ result = event_channel;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted to reserved a used event_channel = %d\n",
+ event_channel);
}
mutex_unlock(&ptp->command_lock);
return result;
@@ -179,12 +182,62 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns);
+static void lan743x_led_mux_enable(struct lan743x_adapter *adapter,
+ int pin, bool enable)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ if (ptp->leds_multiplexed &&
+ ptp->led_enabled[pin]) {
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ if (enable)
+ val |= LAN743X_LED_ENABLE(pin);
+ else
+ val &= ~LAN743X_LED_ENABLE(pin);
+
+ lan743x_csr_write(adapter, HW_CFG, val);
+ }
+}
+
+static void lan743x_led_mux_save(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+ u32 val = lan743x_csr_read(adapter, HW_CFG);
+
+ for (i = 0; i < LAN7430_N_LED; i++) {
+ bool led_enabled = (val & LAN743X_LED_ENABLE(i)) != 0;
+
+ ptp->led_enabled[i] = led_enabled;
+ }
+ ptp->leds_multiplexed = true;
+ } else {
+ ptp->leds_multiplexed = false;
+ }
+}
+
+static void lan743x_led_mux_restore(struct lan743x_adapter *adapter)
+{
+ u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (id_rev == ID_REV_ID_LAN7430_) {
+ int i;
+
+ for (i = 0; i < LAN7430_N_LED; i++)
+ lan743x_led_mux_enable(adapter, i, true);
+ }
+}
+
static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
- int bit, int ptp_channel)
+ int pin, int event_channel)
{
struct lan743x_gpio *gpio = &adapter->gpio;
unsigned long irq_flags = 0;
- int bit_mask = BIT(bit);
+ int bit_mask = BIT(pin);
int ret = -EBUSY;
spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
@@ -194,41 +247,44 @@ static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
gpio->output_bits |= bit_mask;
gpio->ptp_bits |= bit_mask;
+ /* assign pin to GPIO function */
+ lan743x_led_mux_enable(adapter, pin, false);
+
/* set as output, and zero initial value */
- gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit);
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
/* enable gpio, and set buffer type to push pull */
- gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit);
- gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
/* set 1588 polarity to high */
- gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit);
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
- if (!ptp_channel) {
+ if (event_channel == 0) {
/* use channel A */
- gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(pin);
} else {
/* use channel B */
- gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(pin);
}
- gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit);
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
- ret = bit;
+ ret = pin;
}
spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
return ret;
}
-static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int pin)
{
struct lan743x_gpio *gpio = &adapter->gpio;
unsigned long irq_flags = 0;
- int bit_mask = BIT(bit);
+ int bit_mask = BIT(pin);
spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
if (gpio->used_bits & bit_mask) {
@@ -239,21 +295,24 @@ static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
if (gpio->ptp_bits & bit_mask) {
gpio->ptp_bits &= ~bit_mask;
/* disable ptp output */
- gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit);
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG3,
gpio->gpio_cfg3);
}
/* release gpio output */
/* disable gpio */
- gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit);
- gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(pin);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
/* reset back to input */
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit);
- gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(pin);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin);
lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* assign pin to original function */
+ lan743x_led_mux_enable(adapter, pin, true);
}
}
spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
@@ -391,89 +450,95 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
return 0;
}
-static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter)
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter,
+ unsigned int index)
{
struct lan743x_ptp *ptp = &adapter->ptp;
u32 general_config = 0;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
- if (ptp->perout_gpio_bit >= 0) {
- lan743x_gpio_release(adapter, ptp->perout_gpio_bit);
- ptp->perout_gpio_bit = -1;
+ if (perout->gpio_pin >= 0) {
+ lan743x_gpio_release(adapter, perout->gpio_pin);
+ perout->gpio_pin = -1;
}
- if (ptp->perout_event_ch >= 0) {
+ if (perout->event_ch >= 0) {
/* set target to far in the future, effectively disabling it */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
0xFFFF0000);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
0);
general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
- (ptp->perout_event_ch);
+ (perout->event_ch);
lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
- lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch);
- ptp->perout_event_ch = -1;
+ lan743x_ptp_release_event_ch(adapter, perout->event_ch);
+ perout->event_ch = -1;
}
}
static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
- struct ptp_perout_request *perout)
+ struct ptp_perout_request *perout_request)
{
struct lan743x_ptp *ptp = &adapter->ptp;
u32 period_sec = 0, period_nsec = 0;
u32 start_sec = 0, start_nsec = 0;
u32 general_config = 0;
int pulse_width = 0;
- int perout_bit = 0;
-
- if (!on) {
- lan743x_ptp_perout_off(adapter);
+ int perout_pin = 0;
+ unsigned int index = perout_request->index;
+ struct lan743x_ptp_perout *perout = &ptp->perout[index];
+
+ /* Reject requests with unsupported flags */
+ if (perout_request->flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ perout_request->index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_perout_off(adapter, index);
return 0;
}
- if (ptp->perout_event_ch >= 0 ||
- ptp->perout_gpio_bit >= 0) {
+ if (perout->event_ch >= 0 ||
+ perout->gpio_pin >= 0) {
/* already on, turn off first */
- lan743x_ptp_perout_off(adapter);
+ lan743x_ptp_perout_off(adapter, index);
}
- ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter);
- if (ptp->perout_event_ch < 0) {
+ perout->event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+
+ if (perout->event_ch < 0) {
netif_warn(adapter, drv, adapter->netdev,
- "Failed to reserve event channel for PEROUT\n");
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
goto failed;
}
- switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
- case ID_REV_ID_LAN7430_:
- perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */
- break;
- case ID_REV_ID_LAN7431_:
- perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */
- break;
- }
+ perout->gpio_pin = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_pin,
+ perout->event_ch);
- ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter,
- perout_bit,
- ptp->perout_event_ch);
-
- if (ptp->perout_gpio_bit < 0) {
+ if (perout->gpio_pin < 0) {
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
- perout_bit);
+ perout_pin);
goto failed;
}
- start_sec = perout->start.sec;
- start_sec += perout->start.nsec / 1000000000;
- start_nsec = perout->start.nsec % 1000000000;
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
- period_sec = perout->period.sec;
- period_sec += perout->period.nsec / 1000000000;
- period_nsec = perout->period.nsec % 1000000000;
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
if (period_sec == 0) {
if (period_nsec >= 400000000) {
@@ -499,41 +564,41 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
0xFFFF0000);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0);
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0);
/* Configure to pulse every period */
general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
- (ptp->perout_event_ch));
+ (perout->event_ch));
general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
- (ptp->perout_event_ch, pulse_width);
+ (perout->event_ch, pulse_width);
general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
- (ptp->perout_event_ch);
+ (perout->event_ch);
lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
/* set the reload to one toggle cycle */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(perout->event_ch),
period_sec);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_RELOAD_NS_X(perout->event_ch),
period_nsec);
/* set the start time */
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_SEC_X(perout->event_ch),
start_sec);
lan743x_csr_write(adapter,
- PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ PTP_CLOCK_TARGET_NS_X(perout->event_ch),
start_nsec);
return 0;
failed:
- lan743x_ptp_perout_off(adapter);
+ lan743x_ptp_perout_off(adapter, index);
return -ENODEV;
}
@@ -550,7 +615,7 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
case PTP_CLK_REQ_EXTTS:
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
- if (request->perout.index == 0)
+ if (request->perout.index < ptpci->n_per_out)
return lan743x_ptp_perout(adapter, on,
&request->perout);
return -EINVAL;
@@ -568,6 +633,29 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
return 0;
}
+static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ int result = 0;
+
+ /* Confirm the requested function is supported. Parameter
+ * validation is done by the caller.
+ */
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ default:
+ result = -1;
+ break;
+ }
+ return result;
+}
+
static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
{
struct lan743x_ptp *ptp =
@@ -861,12 +949,19 @@ void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
int lan743x_ptp_init(struct lan743x_adapter *adapter)
{
struct lan743x_ptp *ptp = &adapter->ptp;
+ int i;
mutex_init(&ptp->command_lock);
spin_lock_init(&ptp->tx_ts_lock);
ptp->used_event_ch = 0;
- ptp->perout_event_ch = -1;
- ptp->perout_gpio_bit = -1;
+
+ for (i = 0; i < LAN743X_PTP_N_EVENT_CHAN; i++) {
+ ptp->perout[i].event_ch = -1;
+ ptp->perout[i].gpio_pin = -1;
+ }
+
+ lan743x_led_mux_save(adapter);
+
return 0;
}
@@ -875,6 +970,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
int ret = -ENODEV;
u32 temp;
+ int i;
+ int n_pins;
lan743x_ptp_reset(adapter);
lan743x_ptp_sync_to_system_clock(adapter);
@@ -890,10 +987,32 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
return 0;
- snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0");
- ptp->pin_config[0].index = 0;
- ptp->pin_config[0].func = PTP_PF_PEROUT;
- ptp->pin_config[0].chan = 0;
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ n_pins = LAN7430_N_GPIO;
+ break;
+ case ID_REV_ID_LAN7431_:
+ n_pins = LAN7431_N_GPIO;
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ "Unknown LAN743x (%08x). Assuming no GPIO\n",
+ adapter->csr.id_rev);
+ n_pins = 0;
+ break;
+ }
+
+ if (n_pins > LAN743X_PTP_N_GPIO)
+ n_pins = LAN743X_PTP_N_GPIO;
+
+ for (i = 0; i < n_pins; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "lan743x_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
ptp->ptp_clock_info.owner = THIS_MODULE;
snprintf(ptp->ptp_clock_info.name, 16, "%pm",
@@ -901,10 +1020,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
ptp->ptp_clock_info.n_alarm = 0;
ptp->ptp_clock_info.n_ext_ts = 0;
- ptp->ptp_clock_info.n_per_out = 1;
- ptp->ptp_clock_info.n_pins = 0;
+ ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
+ ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = 0;
- ptp->ptp_clock_info.pin_config = NULL;
+ ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -913,7 +1032,7 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
- ptp->ptp_clock_info.verify = NULL;
+ ptp->ptp_clock_info.verify = lan743x_ptpci_verify_pin_config;
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
&adapter->pdev->dev);
@@ -939,7 +1058,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
int index;
if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
- ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) {
+ (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED)) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
@@ -973,6 +1092,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
ptp->pending_tx_timestamps = 0;
spin_unlock_bh(&ptp->tx_ts_lock);
+ lan743x_led_mux_restore(adapter);
+
lan743x_ptp_disable(adapter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 5fc1b3cd5e33..7663bf5d2e33 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -7,6 +7,18 @@
#include "linux/ptp_clock_kernel.h"
#include "linux/netdevice.h"
+#define LAN7430_N_LED 4
+#define LAN7430_N_GPIO 4 /* multiplexed with PHY LEDs */
+#define LAN7431_N_GPIO 12
+
+#define LAN743X_PTP_N_GPIO LAN7431_N_GPIO
+
+/* the number of periodic outputs is limited by number of
+ * PTP clock event channels
+ */
+#define LAN743X_PTP_N_EVENT_CHAN 2
+#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+
struct lan743x_adapter;
/* GPIO */
@@ -40,9 +52,14 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
-#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
#define PTP_FLAG_ISR_ENABLED BIT(2)
+struct lan743x_ptp_perout {
+ int event_ch; /* PTP event channel (0=channel A, 1=channel B) */
+ int gpio_pin; /* GPIO pin where output appears */
+};
+
struct lan743x_ptp {
int flags;
@@ -51,13 +68,13 @@ struct lan743x_ptp {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
- struct ptp_pin_desc pin_config[1];
+ struct ptp_pin_desc pin_config[LAN743X_PTP_N_GPIO];
-#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2)
unsigned long used_event_ch;
+ struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
- int perout_event_ch;
- int perout_gpio_bit;
+ bool leds_multiplexed;
+ bool led_enabled[LAN7430_N_LED];
/* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
spinlock_t tx_ts_lock;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 672ea1342add..0e96ffab3b05 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -132,11 +132,11 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
-static void ocelot_vcap_enable(struct ocelot *ocelot, struct ocelot_port *port)
+static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
{
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
- ANA_PORT_VCAP_S2_CFG, port->chip_port);
+ ANA_PORT_VCAP_S2_CFG, port);
}
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
@@ -169,117 +169,178 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_vlan_mode(struct ocelot_port *port,
+static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
netdev_features_t features)
{
- struct ocelot *ocelot = port->ocelot;
- u8 p = port->chip_port;
u32 val;
/* Filtering */
val = ocelot_read(ocelot, ANA_VLANMASK);
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- val |= BIT(p);
+ val |= BIT(port);
else
- val &= ~BIT(p);
+ val &= ~BIT(port);
ocelot_write(ocelot, val, ANA_VLANMASK);
}
-static void ocelot_vlan_port_apply(struct ocelot *ocelot,
- struct ocelot_port *port)
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val;
- /* Ingress clasification (ANA_PORT_VLAN_CFG) */
- /* Default vlan to clasify for untagged frames (may be zero) */
- val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
- if (port->vlan_aware)
- val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
-
+ if (vlan_aware)
+ val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+ else
+ val = 0;
ocelot_rmw_gix(ocelot, val,
- ANA_PORT_VLAN_CFG_VLAN_VID_M |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
- ANA_PORT_VLAN_CFG, port->chip_port);
+ ANA_PORT_VLAN_CFG, port);
- /* Drop frames with multicast source address */
- val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
- if (port->vlan_aware && !port->vid)
+ if (vlan_aware && !ocelot_port->vid)
/* If port is vlan-aware and tagged, drop untagged and priority
* tagged frames.
*/
- val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ else
+ val = 0;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
- ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
-
- /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
- val = REW_TAG_CFG_TAG_TPID_CFG(0);
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
- if (port->vlan_aware) {
- if (port->vid)
+ if (vlan_aware) {
+ if (ocelot_port->vid)
/* Tag all frames except when VID == DEFAULT_VLAN */
val |= REW_TAG_CFG_TAG_CFG(1);
else
/* Tag all frames */
val |= REW_TAG_CFG_TAG_CFG(3);
+ } else {
+ /* Port tagging disabled. */
+ val = REW_TAG_CFG_TAG_CFG(0);
}
ocelot_rmw_gix(ocelot, val,
- REW_TAG_CFG_TAG_TPID_CFG_M |
REW_TAG_CFG_TAG_CFG_M,
- REW_TAG_CFG, port->chip_port);
+ REW_TAG_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_vlan_filtering);
- /* Set default VLAN and tag type to 8021Q. */
- val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
- REW_PORT_VLAN_CFG_PORT_VID(port->vid);
- ocelot_rmw_gix(ocelot, val,
- REW_PORT_VLAN_CFG_PORT_TPID_M |
+static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (ocelot_port->vid != vid) {
+ /* Always permit deleting the native VLAN (vid = 0) */
+ if (ocelot_port->vid && vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->vid);
+ return -EBUSY;
+ }
+ ocelot_port->vid = vid;
+ }
+
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port->chip_port);
+ REW_PORT_VLAN_CFG, port);
+
+ return 0;
}
-static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
- bool untagged)
+/* Default vlan to clasify for untagged frames (may be zero) */
+static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int ret;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- /* Add the port MAC address to with the right VLAN information */
- ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
- ENTRYTYPE_LOCKED);
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
+ ANA_PORT_VLAN_CFG_VLAN_VID_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ ocelot_port->pvid = pvid;
+}
+
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
+{
+ int ret;
/* Make the port a member of the VLAN */
- ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ocelot->vlan_mask[vid] |= BIT(port);
ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
if (ret)
return ret;
/* Default ingress vlan classification */
if (pvid)
- port->pvid = vid;
+ ocelot_port_set_pvid(ocelot, port, vid);
/* Untagged egress vlan clasification */
- if (untagged && port->vid != vid) {
- if (port->vid) {
- dev_err(ocelot->dev,
- "Port already has a native VLAN: %d\n",
- port->vid);
- return -EBUSY;
- }
- port->vid = vid;
+ if (untagged) {
+ ret = ocelot_port_set_native_vlan(ocelot, port, vid);
+ if (ret)
+ return ret;
}
- ocelot_vlan_port_apply(ocelot, port);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vlan_add);
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
+ if (ret)
+ return ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int ret;
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (ocelot_port->pvid == vid)
+ ocelot_port_set_pvid(ocelot, port, 0);
+
+ /* Egress */
+ if (ocelot_port->vid == vid)
+ ocelot_port_set_native_vlan(ocelot, port, 0);
return 0;
}
+EXPORT_SYMBOL(ocelot_vlan_del);
static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
int ret;
/* 8021q removes VID 0 on module unload for all interfaces
@@ -289,24 +350,12 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
if (vid == 0)
return 0;
- /* Del the port MAC address to with the right VLAN information */
- ocelot_mact_forget(ocelot, dev->dev_addr, vid);
-
- /* Stop the port from being a member of the vlan */
- ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
- ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ ret = ocelot_vlan_del(ocelot, port, vid);
if (ret)
return ret;
- /* Ingress */
- if (port->pvid == vid)
- port->pvid = 0;
-
- /* Egress */
- if (port->vid == vid)
- port->vid = 0;
-
- ocelot_vlan_port_apply(ocelot, port);
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
return 0;
}
@@ -333,16 +382,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
- /* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
- ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
-
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
*/
- ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+ ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_VLANMASK);
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
@@ -362,14 +406,13 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
-static void ocelot_port_adjust_link(struct net_device *dev)
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- u8 p = port->chip_port;
- int speed, atop_wm, mode = 0;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int speed, mode = 0;
- switch (dev->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
speed = OCELOT_SPEED_10;
break;
@@ -385,87 +428,41 @@ static void ocelot_port_adjust_link(struct net_device *dev)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
break;
default:
- netdev_err(dev, "Unsupported PHY speed: %d\n",
- dev->phydev->speed);
+ dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n",
+ port, phydev->speed);
return;
}
- phy_print_status(dev->phydev);
+ phy_print_status(phydev);
- if (!dev->phydev->link)
+ if (!phydev->link)
return;
/* Only full duplex supported for now */
- ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA |
+ ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
mode, DEV_MAC_MODE_CFG);
- /* Set MAC IFG Gaps
- * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
- * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
- */
- ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG);
-
- /* Load seed (0) and set MAC HDX late collision */
- ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
- DEV_MAC_HDX_CFG_SEED_LOAD,
- DEV_MAC_HDX_CFG);
- mdelay(1);
- ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
- DEV_MAC_HDX_CFG);
-
- /* Disable HDX fast control */
- ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG);
- ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(port, 0, PCS1G_ANEG_CFG);
-
- /* No loopback */
- ocelot_port_writel(port, 0, PCS1G_LB_CFG);
-
- /* Set Max Length and maximum tags allowed */
- ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG);
- ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
- DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
- DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
- DEV_MAC_TAGS_CFG);
+ if (ocelot->ops->pcs_init)
+ ocelot->ops->pcs_init(ocelot, port);
/* Enable MAC module */
- ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA |
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
/* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
* reset */
- ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed),
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
DEV_CLOCK_CFG);
- /* Set SMAC of Pause frame (00:00:00:00:00:00) */
- ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
- ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG);
-
/* No PFC */
ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, p);
-
- /* Set Pause WM hysteresis
- * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
- * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
- */
- ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
- SYS_PAUSE_CFG_PAUSE_STOP(101) |
- SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p);
+ ANA_PFC_PFC_CFG, port);
/* Core: Enable port for frame transfer */
ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, p);
+ QSYS_SWITCH_PORT_MODE, port);
/* Flow control */
ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
@@ -473,64 +470,88 @@ static void ocelot_port_adjust_link(struct net_device *dev)
SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
- SYS_MAC_FC_CFG, p);
- ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p);
-
- /* Tail dropping watermark */
- atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ;
- ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN),
- SYS_ATOP, p);
- ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+ SYS_MAC_FC_CFG, port);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
}
+EXPORT_SYMBOL(ocelot_adjust_link);
-static int ocelot_port_open(struct net_device *dev)
+static void ocelot_port_adjust_link(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int err;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ ocelot_adjust_link(ocelot, port, dev->phydev);
+}
+
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy)
+{
/* Enable receiving frames on the port, and activate auto-learning of
* MAC addresses.
*/
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
- ANA_PORT_PORT_CFG, port->chip_port);
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_enable);
+
+static int ocelot_port_open(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int err;
- if (port->serdes) {
- err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
- port->phy_mode);
+ if (priv->serdes) {
+ err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
+ priv->phy_mode);
if (err) {
netdev_err(dev, "Could not set mode of SerDes\n");
return err;
}
}
- err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
- port->phy_mode);
+ err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
+ priv->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
}
- dev->phydev = port->phy;
+ dev->phydev = priv->phy;
+
+ phy_attached_info(priv->phy);
+ phy_start(priv->phy);
+
+ ocelot_port_enable(ocelot, port, priv->phy);
- phy_attached_info(port->phy);
- phy_start(port->phy);
return 0;
}
+void ocelot_port_disable(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+EXPORT_SYMBOL(ocelot_port_disable);
+
static int ocelot_port_stop(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
- phy_disconnect(port->phy);
+ phy_disconnect(priv->phy);
dev->phydev = NULL;
- ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG);
- ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, port->chip_port);
+ ocelot_port_disable(ocelot, port);
+
return 0;
}
@@ -554,15 +575,43 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
return 0;
}
+int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
+ struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
+ ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct ocelot_skb *oskb =
+ kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
+
+ if (unlikely(!oskb))
+ return -ENOMEM;
+
+ shinfo->tx_flags |= SKBTX_IN_PROGRESS;
+
+ oskb->skb = skb;
+ oskb->id = ocelot_port->ts_id % 4;
+
+ list_add_tail(&oskb->head, &ocelot_port->skbs);
+ return 0;
+ }
+ return -ENODATA;
+}
+EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
+
static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ocelot_port_private *priv = netdev_priv(dev);
struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- u32 val, ifh[IFH_LEN];
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ u32 val, ifh[OCELOT_TAG_LEN / 4];
struct frame_info info = {};
u8 grp = 0; /* Send everything on CPU group 0 */
unsigned int i, count, last;
+ int port = priv->chip_port;
val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
@@ -572,20 +621,20 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- info.port = BIT(port->chip_port);
+ info.port = BIT(port);
info.tag_type = IFH_TAG_TYPE_C;
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
- info.rew_op = port->ptp_cmd;
- if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= (port->ts_id % 4) << 3;
+ info.rew_op = ocelot_port->ptp_cmd;
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= (ocelot_port->ts_id % 4) << 3;
}
ocelot_gen_ifh(ifh, &info);
- for (i = 0; i < IFH_LEN; i++)
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
QS_INJ_WR, grp);
@@ -614,31 +663,17 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
- port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- struct ocelot_skb *oskb =
- kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
-
- if (unlikely(!oskb))
- goto out;
-
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
- oskb->skb = skb;
- oskb->id = port->ts_id % 4;
- port->ts_id++;
-
- list_add_tail(&oskb->head, &port->skbs);
-
+ if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) {
+ ocelot_port->ts_id++;
return NETDEV_TX_OK;
}
-out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
+static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ struct timespec64 *ts)
{
unsigned long flags;
u32 val;
@@ -663,29 +698,90 @@ void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
-EXPORT_SYMBOL(ocelot_get_hwtimestamp);
+
+void ocelot_get_txtstamp(struct ocelot *ocelot)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+
+ while (budget--) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct list_head *pos, *tmp;
+ struct sk_buff *skb = NULL;
+ struct ocelot_skb *entry;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ u32 val, id, txport;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+
+ /* Retrieve its associated skb */
+ port = ocelot->ports[txport];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+ if (entry->id != id)
+ continue;
+
+ skb = entry->skb;
+
+ list_del(pos);
+ kfree(entry);
+ }
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+
+ if (unlikely(!skb))
+ continue;
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ dev_kfree_skb_any(skb);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_txtstamp);
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_forget(port->ocelot, addr, port->pvid);
+ return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
}
static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_learn(port->ocelot, PGID_CPU, addr, port->pvid,
+ return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
}
static void ocelot_set_rx_mode(struct net_device *dev)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- int i;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
u32 val;
+ int i;
/* This doesn't handle promiscuous mode because the bridge core is
* setting IFF_PROMISC on all slave interfaces and all frames would be
@@ -701,10 +797,11 @@ static void ocelot_set_rx_mode(struct net_device *dev)
static int ocelot_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ int port = priv->chip_port;
int ret;
- ret = snprintf(buf, len, "p%d", port->chip_port);
+ ret = snprintf(buf, len, "p%d", port);
if (ret >= len)
return -EINVAL;
@@ -713,15 +810,16 @@ static int ocelot_port_get_phys_port_name(struct net_device *dev,
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
const struct sockaddr *addr = p;
/* Learn the new net device MAC address in the mac table. */
- ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid,
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
ether_addr_copy(dev->dev_addr, addr->sa_data);
return 0;
@@ -730,11 +828,12 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
static void ocelot_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
/* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port),
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
SYS_STAT_CFG);
/* Get Rx stats */
@@ -765,21 +864,18 @@ static void ocelot_get_stats64(struct net_device *dev,
stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
}
-static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid, bool vlan_aware)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!vid) {
- if (!port->vlan_aware)
+ if (!vlan_aware)
/* If the bridge is not VLAN aware and no VID was
* provided, set it to pvid to ensure the MAC entry
* matches incoming untagged packets
*/
- vid = port->pvid;
+ vid = ocelot_port->pvid;
else
/* If the bridge is VLAN aware a VID must be provided as
* otherwise the learnt entry wouldn't match any frame.
@@ -787,19 +883,40 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
- ENTRYTYPE_LOCKED);
+ return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
}
+EXPORT_SYMBOL(ocelot_fdb_add);
-static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware);
+}
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid)
+{
return ocelot_mact_forget(ocelot, addr, vid);
}
+EXPORT_SYMBOL(ocelot_fdb_del);
+
+static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
struct ocelot_dump_ctx {
struct net_device *dev;
@@ -808,9 +925,10 @@ struct ocelot_dump_ctx {
int idx;
};
-static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
- struct ocelot_dump_ctx *dump)
+static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
{
+ struct ocelot_dump_ctx *dump = data;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
@@ -831,12 +949,12 @@ static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac))
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
- if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid))
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
goto nla_put_failure;
nlmsg_end(dump->skb, nlh);
@@ -850,12 +968,11 @@ nla_put_failure:
return -EMSGSIZE;
}
-static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
- struct ocelot_mact_entry *entry)
+static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
+ struct ocelot_mact_entry *entry)
{
- struct ocelot *ocelot = port->ocelot;
- char mac[ETH_ALEN];
u32 val, dst, macl, mach;
+ char mac[ETH_ALEN];
/* Set row and column to read from */
ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
@@ -878,7 +995,7 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
* do not report it.
*/
dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
- if (dst != port->chip_port)
+ if (dst != port)
return -EINVAL;
/* Get the entry's MAC address and VLAN id */
@@ -898,43 +1015,61 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
return 0;
}
-static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev, int *idx)
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- int i, j, ret = 0;
- struct ocelot_dump_ctx dump = {
- .dev = dev,
- .skb = skb,
- .cb = cb,
- .idx = *idx,
- };
-
- struct ocelot_mact_entry entry;
+ int i, j;
/* Loop through all the mac tables entries. There are 1024 rows of 4
* entries.
*/
for (i = 0; i < 1024; i++) {
for (j = 0; j < 4; j++) {
- ret = ocelot_mact_read(port, i, j, &entry);
+ struct ocelot_mact_entry entry;
+ bool is_static;
+ int ret;
+
+ ret = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...),
* skip it.
*/
if (ret == -EINVAL)
continue;
else if (ret)
- goto end;
+ return ret;
+
+ is_static = (entry.type == ENTRYTYPE_LOCKED);
- ret = ocelot_fdb_do_dump(&entry, &dump);
+ ret = cb(entry.mac, entry.vid, is_static, data);
if (ret)
- goto end;
+ return ret;
}
}
-end:
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_fdb_dump);
+
+static int ocelot_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
+
*idx = dump.idx;
+
return ret;
}
@@ -953,18 +1088,20 @@ static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
static int ocelot_set_features(struct net_device *dev,
netdev_features_t features)
{
- struct ocelot_port *port = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
- port->tc.offload_cnt) {
+ priv->tc.offload_cnt) {
netdev_err(dev,
"Cannot disable HW TC offload while offloads active\n");
return -EBUSY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
- ocelot_vlan_mode(port, features);
+ ocelot_vlan_mode(ocelot, port, features);
return 0;
}
@@ -972,8 +1109,8 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
ppid->id_len = sizeof(ocelot->base_mac);
memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
@@ -981,17 +1118,16 @@ static int ocelot_get_port_parent_id(struct net_device *dev,
return 0;
}
-static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr)
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
- struct ocelot *ocelot = port->ocelot;
-
return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(ocelot_hwstamp_get);
-static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1004,16 +1140,16 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
- port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
* need to update the origin time.
*/
- port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
break;
case HWTSTAMP_TX_OFF:
- port->ptp_cmd = 0;
+ ocelot_port->ptp_cmd = 0;
break;
default:
return -ERANGE;
@@ -1052,11 +1188,13 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(ocelot_hwstamp_set);
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
/* The function is only used for PTP operations for now */
if (!ocelot->ptp)
@@ -1064,9 +1202,9 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(port, ifr);
+ return ocelot_hwstamp_set(ocelot, port, ifr);
case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(port, ifr);
+ return ocelot_hwstamp_get(ocelot, port, ifr);
default:
return -EOPNOTSUPP;
}
@@ -1080,9 +1218,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
- .ndo_fdb_add = ocelot_fdb_add,
- .ndo_fdb_del = ocelot_fdb_del,
- .ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_fdb_add = ocelot_port_fdb_add,
+ .ndo_fdb_del = ocelot_port_fdb_del,
+ .ndo_fdb_dump = ocelot_port_fdb_dump,
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
@@ -1091,10 +1229,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_do_ioctl = ocelot_ioctl,
};
-static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
- struct ocelot_port *port = netdev_priv(netdev);
- struct ocelot *ocelot = port->ocelot;
int i;
if (sset != ETH_SS_STATS)
@@ -1104,6 +1240,17 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ struct ocelot_port_private *priv = netdev_priv(netdev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_get_strings(ocelot, port, sset, data);
+}
static void ocelot_update_stats(struct ocelot *ocelot)
{
@@ -1145,11 +1292,8 @@ static void ocelot_check_stats_work(struct work_struct *work)
OCELOT_STATS_CHECK_DELAY);
}
-static void ocelot_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
int i;
/* check and update now */
@@ -1157,28 +1301,42 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i];
+ *data++ = ocelot->stats[port * ocelot->num_stats + i];
}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
-static int ocelot_get_sset_count(struct net_device *dev, int sset)
+static void ocelot_port_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
+
return ocelot->num_stats;
}
+EXPORT_SYMBOL(ocelot_get_sset_count);
-static int ocelot_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
- if (!ocelot->ptp)
- return ethtool_op_get_ts_info(dev, info);
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1193,36 +1351,43 @@ static int ocelot_get_ts_info(struct net_device *dev,
return 0;
}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+static int ocelot_port_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
static const struct ethtool_ops ocelot_ethtool_ops = {
- .get_strings = ocelot_get_strings,
- .get_ethtool_stats = ocelot_get_ethtool_stats,
- .get_sset_count = ocelot_get_sset_count,
+ .get_strings = ocelot_port_get_strings,
+ .get_ethtool_stats = ocelot_port_get_ethtool_stats,
+ .get_sset_count = ocelot_port_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
- .get_ts_info = ocelot_get_ts_info,
+ .get_ts_info = ocelot_port_get_ts_info,
};
-static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
- struct switchdev_trans *trans,
- u8 state)
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
u32 port_cfg;
- int port, i;
-
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ int p, i;
- if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask))
- return 0;
+ if (!(BIT(port) & ocelot->bridge_mask))
+ return;
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG,
- ocelot_port->chip_port);
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
switch (state) {
case BR_STATE_FORWARDING:
- ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port);
+ ocelot->bridge_fwd_mask |= BIT(port);
/* Fallthrough */
case BR_STATE_LEARNING:
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
@@ -1230,19 +1395,18 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
default:
port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
- ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port);
+ ocelot->bridge_fwd_mask &= ~BIT(port);
break;
}
- ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG,
- ocelot_port->chip_port);
+ ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
/* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports.
*/
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (ocelot->bridge_fwd_mask & BIT(port)) {
- unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port);
+ for (p = 0; p < ocelot->num_phys_ports; p++) {
+ if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) {
+ unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) {
unsigned long bond_mask = ocelot->lags[i];
@@ -1250,78 +1414,93 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
if (!bond_mask)
continue;
- if (bond_mask & BIT(port)) {
+ if (bond_mask & BIT(p)) {
mask &= ~bond_mask;
break;
}
}
- ocelot_write_rix(ocelot,
- BIT(ocelot->num_phys_ports) | mask,
- ANA_PGID_PGID, PGID_SRC + port);
+ /* Avoid the NPI port from looping back to itself */
+ if (p != ocelot->cpu)
+ mask |= BIT(ocelot->cpu);
+
+ ocelot_write_rix(ocelot, mask,
+ ANA_PGID_PGID, PGID_SRC + p);
} else {
/* Only the CPU port, this is compatible with link
* aggregation.
*/
ocelot_write_rix(ocelot,
- BIT(ocelot->num_phys_ports),
- ANA_PGID_PGID, PGID_SRC + port);
+ BIT(ocelot->cpu),
+ ANA_PGID_PGID, PGID_SRC + p);
}
}
+}
+EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
- return 0;
+static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+
+ ocelot_bridge_stp_state_set(ocelot, port, state);
}
-static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port,
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
+{
+ ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
+ ANA_AUTOAGE);
+}
+EXPORT_SYMBOL(ocelot_set_ageing_time);
+
+static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
unsigned long ageing_clock_t)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2),
- ANA_AUTOAGE);
+ ocelot_set_ageing_time(ocelot, ageing_time);
}
-static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc)
+static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
{
- struct ocelot *ocelot = port->ocelot;
- u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG,
- port->chip_port);
+ u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+ u32 val = 0;
if (mc)
- val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
- else
- val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
- ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA);
+ val = cpu_fwd_mcast;
- ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port);
+ ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
+ ANA_PORT_CPU_FWD_CFG, port);
}
static int ocelot_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot_port, trans,
+ ocelot_port_attr_stp_state_set(ocelot, port, trans,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
+ ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port->vlan_aware = attr->u.vlan_filtering;
- ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ priv->vlan_aware = attr->u.vlan_filtering;
+ ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
+ ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
default:
err = -EOPNOTSUPP;
@@ -1383,15 +1562,17 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb,
struct switchdev_trans *trans)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- struct ocelot_multicast *mc;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ int port = priv->chip_port;
u16 vid = mdb->vid;
bool new = false;
if (!vid)
- vid = port->pvid;
+ vid = ocelot_port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -1415,7 +1596,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
ocelot_mact_forget(ocelot, addr, vid);
}
- mc->ports |= BIT(port->chip_port);
+ mc->ports |= BIT(port);
addr[2] = mc->ports << 0;
addr[1] = mc->ports << 8;
@@ -1425,14 +1606,16 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
static int ocelot_port_obj_del_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *port = netdev_priv(dev);
- struct ocelot *ocelot = port->ocelot;
- struct ocelot_multicast *mc;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ int port = priv->chip_port;
u16 vid = mdb->vid;
if (!vid)
- vid = port->pvid;
+ vid = ocelot_port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1444,7 +1627,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
addr[0] = 0;
ocelot_mact_forget(ocelot, addr, vid);
- mc->ports &= ~BIT(port->chip_port);
+ mc->ports &= ~BIT(port);
if (!mc->ports) {
list_del(&mc->list);
devm_kfree(ocelot->dev, mc);
@@ -1501,11 +1684,9 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
- struct net_device *bridge)
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
-
if (!ocelot->bridge_mask) {
ocelot->hw_bridge_dev = bridge;
} else {
@@ -1515,26 +1696,25 @@ static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
return -ENODEV;
}
- ocelot->bridge_mask |= BIT(ocelot_port->chip_port);
+ ocelot->bridge_mask |= BIT(port);
return 0;
}
+EXPORT_SYMBOL(ocelot_port_bridge_join);
-static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
- struct net_device *bridge)
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
-
- ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port);
+ ocelot->bridge_mask &= ~BIT(port);
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
- ocelot_port->vlan_aware = 0;
- ocelot_port->pvid = 0;
- ocelot_port->vid = 0;
+ ocelot_port_vlan_filtering(ocelot, port, 0);
+ ocelot_port_set_pvid(ocelot, port, 0);
+ return ocelot_port_set_native_vlan(ocelot, port, 0);
}
+EXPORT_SYMBOL(ocelot_port_bridge_leave);
static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
{
@@ -1594,20 +1774,18 @@ static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
}
}
-static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+static int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
- int p = ocelot_port->chip_port;
- int lag, lp;
struct net_device *ndev;
u32 bond_mask = 0;
+ int lag, lp;
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond, ndev) {
- struct ocelot_port *port = netdev_priv(ndev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
- bond_mask |= BIT(port->chip_port);
+ bond_mask |= BIT(priv->chip_port);
}
rcu_read_unlock();
@@ -1616,17 +1794,17 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
/* If the new port is the lowest one, use it as the logical port from
* now on
*/
- if (p == lp) {
- lag = p;
- ocelot->lags[p] = bond_mask;
- bond_mask &= ~BIT(p);
+ if (port == lp) {
+ lag = port;
+ ocelot->lags[port] = bond_mask;
+ bond_mask &= ~BIT(port);
if (bond_mask) {
lp = __ffs(bond_mask);
ocelot->lags[lp] = 0;
}
} else {
lag = lp;
- ocelot->lags[lp] |= BIT(p);
+ ocelot->lags[lp] |= BIT(port);
}
ocelot_setup_lag(ocelot, lag);
@@ -1635,34 +1813,32 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
return 0;
}
-static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+static void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- struct ocelot *ocelot = ocelot_port->ocelot;
- int p = ocelot_port->chip_port;
u32 port_cfg;
int i;
/* Remove port from any lag */
for (i = 0; i < ocelot->num_phys_ports; i++)
- ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+ ocelot->lags[i] &= ~BIT(port);
/* if it was the logical port of the lag, move the lag config to the
* next port
*/
- if (ocelot->lags[p]) {
- int n = __ffs(ocelot->lags[p]);
+ if (ocelot->lags[port]) {
+ int n = __ffs(ocelot->lags[port]);
- ocelot->lags[n] = ocelot->lags[p];
- ocelot->lags[p] = 0;
+ ocelot->lags[n] = ocelot->lags[port];
+ ocelot->lags[port] = 0;
ocelot_setup_lag(ocelot, n);
}
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
- ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
- ANA_PORT_PORT_CFG, p);
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
ocelot_set_aggr_pgids(ocelot);
}
@@ -1677,28 +1853,30 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
unsigned long event,
struct netdev_notifier_changeupper_info *info)
{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
int err = 0;
switch (event) {
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(info->upper_dev)) {
- if (info->linking)
- err = ocelot_port_bridge_join(ocelot_port,
+ if (info->linking) {
+ err = ocelot_port_bridge_join(ocelot, port,
info->upper_dev);
- else
- ocelot_port_bridge_leave(ocelot_port,
- info->upper_dev);
-
- ocelot_vlan_port_apply(ocelot_port->ocelot,
- ocelot_port);
+ } else {
+ err = ocelot_port_bridge_leave(ocelot, port,
+ info->upper_dev);
+ priv->vlan_aware = false;
+ }
}
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_port_lag_join(ocelot_port,
+ err = ocelot_port_lag_join(ocelot, port,
info->upper_dev);
else
- ocelot_port_lag_leave(ocelot_port,
+ ocelot_port_lag_leave(ocelot, port,
info->upper_dev);
}
break;
@@ -2001,24 +2179,97 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
return 0;
}
+static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int atop_wm;
+
+ ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+
+ /* Set Pause WM hysteresis
+ * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+ */
+ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
+ SYS_PAUSE_CFG_PAUSE_STOP(101) |
+ SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
+
+ /* Tail dropping watermark */
+ atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+ SYS_ATOP, port);
+ ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+}
+
+void ocelot_init_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ INIT_LIST_HEAD(&ocelot_port->skbs);
+
+ /* Basic L2 initialization */
+
+ /* Set MAC IFG Gaps
+ * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
+ * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
+ */
+ ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
+ DEV_MAC_IFG_CFG);
+
+ /* Load seed (0) and set MAC HDX late collision */
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ DEV_MAC_HDX_CFG);
+ mdelay(1);
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
+ DEV_MAC_HDX_CFG);
+
+ /* Set Max Length and maximum tags allowed */
+ ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+ ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
+
+ /* Drop frames with multicast source address */
+ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG, port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
+ REW_PORT_VLAN_CFG_PORT_TPID_M,
+ REW_PORT_VLAN_CFG, port);
+
+ /* Enable vcap lookups */
+ ocelot_vcap_enable(ocelot, port);
+}
+EXPORT_SYMBOL(ocelot_init_port);
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
{
+ struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct net_device *dev;
int err;
- dev = alloc_etherdev(sizeof(struct ocelot_port));
+ dev = alloc_etherdev(sizeof(struct ocelot_port_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, ocelot->dev);
- ocelot_port = netdev_priv(dev);
- ocelot_port->dev = dev;
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->phy = phy;
+ priv->chip_port = port;
+ ocelot_port = &priv->port;
ocelot_port->ocelot = ocelot;
ocelot_port->regs = regs;
- ocelot_port->chip_port = port;
- ocelot_port->phy = phy;
ocelot->ports[port] = ocelot_port;
dev->netdev_ops = &ocelot_port_netdev_ops;
@@ -2033,33 +2284,81 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
- INIT_LIST_HEAD(&ocelot_port->skbs);
+ ocelot_init_port(ocelot, port);
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- goto err_register_netdev;
+ free_netdev(dev);
}
- /* Basic L2 initialization */
- ocelot_vlan_port_apply(ocelot, ocelot_port);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_probe_port);
- /* Enable vcap lookups */
- ocelot_vcap_enable(ocelot, ocelot_port);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction)
+{
+ /* Configure and enable the CPU port. */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
+ ANA_PORT_PORT_CFG, cpu);
- return 0;
+ /* If the CPU port is a physical port, set up the port in Node
+ * Processor Interface (NPI) mode. This is the mode through which
+ * frames can be injected from and extracted to an external CPU.
+ * Only one port can be an NPI at the same time.
+ */
+ if (cpu < ocelot->num_phys_ports) {
+ int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
-err_register_netdev:
- free_netdev(dev);
- return err;
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
+ QSYS_EXT_CPU_CFG);
+
+ if (injection == OCELOT_TAG_PREFIX_SHORT)
+ mtu += OCELOT_SHORT_PREFIX_LEN;
+ else if (injection == OCELOT_TAG_PREFIX_LONG)
+ mtu += OCELOT_LONG_PREFIX_LEN;
+
+ ocelot_port_set_mtu(ocelot, cpu, mtu);
+ }
+
+ /* CPU port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, cpu);
+ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
+ SYS_PORT_MODE_INCL_INJ_HDR(injection),
+ SYS_PORT_MODE, cpu);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, cpu);
+
+ ocelot->cpu = cpu;
}
-EXPORT_SYMBOL(ocelot_probe_port);
+EXPORT_SYMBOL(ocelot_set_cpu_port);
int ocelot_init(struct ocelot *ocelot)
{
- u32 port;
- int i, ret, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ int i, ret;
+ u32 port;
+
+ if (ocelot->ops->reset) {
+ ret = ocelot->ops->reset(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev, "Switch reset failed\n");
+ return ret;
+ }
+ }
ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(u32), GFP_KERNEL);
@@ -2081,6 +2380,7 @@ int ocelot_init(struct ocelot *ocelot)
if (!ocelot->stats_queue)
return -ENOMEM;
+ INIT_LIST_HEAD(&ocelot->multicast);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_ace_init(ocelot);
@@ -2138,13 +2438,6 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
- /* Configure and enable the CPU port. */
- ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
- ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
- ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
- ANA_PORT_PORT_CFG, cpu);
-
/* Allow broadcast MAC frames. */
for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
@@ -2157,13 +2450,6 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
- /* CPU port Injection/Extraction configuration */
- ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
- QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
- QSYS_SWITCH_PORT_MODE_PORT_ENA,
- QSYS_SWITCH_PORT_MODE, cpu);
- ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) |
- SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu);
/* Allow manual injection via DEVCPU_QS registers, and byte swap these
* registers endianness.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 06ac806052bc..c259114c48fd 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -18,11 +18,12 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
#include "ocelot_ana.h"
#include "ocelot_dev.h"
#include "ocelot_qsys.h"
#include "ocelot_rew.h"
-#include "ocelot_sys.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
#include "ocelot_ptp.h"
@@ -43,8 +44,6 @@
#define OCELOT_PTP_QUEUE_SZ 128
-#define IFH_LEN 4
-
struct frame_info {
u32 len;
u16 port;
@@ -54,372 +53,6 @@ struct frame_info {
u32 timestamp; /* rew_val */
};
-#define IFH_INJ_BYPASS BIT(31)
-#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
-
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
-
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
-
-#define OCELOT_SPEED_2500 0
-#define OCELOT_SPEED_1000 1
-#define OCELOT_SPEED_100 2
-#define OCELOT_SPEED_10 3
-
-#define TARGET_OFFSET 24
-#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
-#define REG(reg, offset) [reg & REG_MASK] = offset
-
-enum ocelot_target {
- ANA = 1,
- QS,
- QSYS,
- REW,
- SYS,
- S2,
- HSIO,
- PTP,
- TARGET_MAX,
-};
-
-enum ocelot_reg {
- ANA_ADVLEARN = ANA << TARGET_OFFSET,
- ANA_VLANMASK,
- ANA_PORT_B_DOMAIN,
- ANA_ANAGEFIL,
- ANA_ANEVENTS,
- ANA_STORMLIMIT_BURST,
- ANA_STORMLIMIT_CFG,
- ANA_ISOLATED_PORTS,
- ANA_COMMUNITY_PORTS,
- ANA_AUTOAGE,
- ANA_MACTOPTIONS,
- ANA_LEARNDISC,
- ANA_AGENCTRL,
- ANA_MIRRORPORTS,
- ANA_EMIRRORPORTS,
- ANA_FLOODING,
- ANA_FLOODING_IPMC,
- ANA_SFLOW_CFG,
- ANA_PORT_MODE,
- ANA_CUT_THRU_CFG,
- ANA_PGID_PGID,
- ANA_TABLES_ANMOVED,
- ANA_TABLES_MACHDATA,
- ANA_TABLES_MACLDATA,
- ANA_TABLES_STREAMDATA,
- ANA_TABLES_MACACCESS,
- ANA_TABLES_MACTINDX,
- ANA_TABLES_VLANACCESS,
- ANA_TABLES_VLANTIDX,
- ANA_TABLES_ISDXACCESS,
- ANA_TABLES_ISDXTIDX,
- ANA_TABLES_ENTRYLIM,
- ANA_TABLES_PTP_ID_HIGH,
- ANA_TABLES_PTP_ID_LOW,
- ANA_TABLES_STREAMACCESS,
- ANA_TABLES_STREAMTIDX,
- ANA_TABLES_SEQ_HISTORY,
- ANA_TABLES_SEQ_MASK,
- ANA_TABLES_SFID_MASK,
- ANA_TABLES_SFIDACCESS,
- ANA_TABLES_SFIDTIDX,
- ANA_MSTI_STATE,
- ANA_OAM_UPM_LM_CNT,
- ANA_SG_ACCESS_CTRL,
- ANA_SG_CONFIG_REG_1,
- ANA_SG_CONFIG_REG_2,
- ANA_SG_CONFIG_REG_3,
- ANA_SG_CONFIG_REG_4,
- ANA_SG_CONFIG_REG_5,
- ANA_SG_GCL_GS_CONFIG,
- ANA_SG_GCL_TI_CONFIG,
- ANA_SG_STATUS_REG_1,
- ANA_SG_STATUS_REG_2,
- ANA_SG_STATUS_REG_3,
- ANA_PORT_VLAN_CFG,
- ANA_PORT_DROP_CFG,
- ANA_PORT_QOS_CFG,
- ANA_PORT_VCAP_CFG,
- ANA_PORT_VCAP_S1_KEY_CFG,
- ANA_PORT_VCAP_S2_CFG,
- ANA_PORT_PCP_DEI_MAP,
- ANA_PORT_CPU_FWD_CFG,
- ANA_PORT_CPU_FWD_BPDU_CFG,
- ANA_PORT_CPU_FWD_GARP_CFG,
- ANA_PORT_CPU_FWD_CCM_CFG,
- ANA_PORT_PORT_CFG,
- ANA_PORT_POL_CFG,
- ANA_PORT_PTP_CFG,
- ANA_PORT_PTP_DLY1_CFG,
- ANA_PORT_PTP_DLY2_CFG,
- ANA_PORT_SFID_CFG,
- ANA_PFC_PFC_CFG,
- ANA_PFC_PFC_TIMER,
- ANA_IPT_OAM_MEP_CFG,
- ANA_IPT_IPT,
- ANA_PPT_PPT,
- ANA_FID_MAP_FID_MAP,
- ANA_AGGR_CFG,
- ANA_CPUQ_CFG,
- ANA_CPUQ_CFG2,
- ANA_CPUQ_8021_CFG,
- ANA_DSCP_CFG,
- ANA_DSCP_REWR_CFG,
- ANA_VCAP_RNG_TYPE_CFG,
- ANA_VCAP_RNG_VAL_CFG,
- ANA_VRAP_CFG,
- ANA_VRAP_HDR_DATA,
- ANA_VRAP_HDR_MASK,
- ANA_DISCARD_CFG,
- ANA_FID_CFG,
- ANA_POL_PIR_CFG,
- ANA_POL_CIR_CFG,
- ANA_POL_MODE_CFG,
- ANA_POL_PIR_STATE,
- ANA_POL_CIR_STATE,
- ANA_POL_STATE,
- ANA_POL_FLOWC,
- ANA_POL_HYST,
- ANA_POL_MISC_CFG,
- QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
- QS_XTR_RD,
- QS_XTR_FRM_PRUNING,
- QS_XTR_FLUSH,
- QS_XTR_DATA_PRESENT,
- QS_XTR_CFG,
- QS_INJ_GRP_CFG,
- QS_INJ_WR,
- QS_INJ_CTRL,
- QS_INJ_STATUS,
- QS_INJ_ERR,
- QS_INH_DBG,
- QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
- QSYS_SWITCH_PORT_MODE,
- QSYS_STAT_CNT_CFG,
- QSYS_EEE_CFG,
- QSYS_EEE_THRES,
- QSYS_IGR_NO_SHARING,
- QSYS_EGR_NO_SHARING,
- QSYS_SW_STATUS,
- QSYS_EXT_CPU_CFG,
- QSYS_PAD_CFG,
- QSYS_CPU_GROUP_MAP,
- QSYS_QMAP,
- QSYS_ISDX_SGRP,
- QSYS_TIMED_FRAME_ENTRY,
- QSYS_TFRM_MISC,
- QSYS_TFRM_PORT_DLY,
- QSYS_TFRM_TIMER_CFG_1,
- QSYS_TFRM_TIMER_CFG_2,
- QSYS_TFRM_TIMER_CFG_3,
- QSYS_TFRM_TIMER_CFG_4,
- QSYS_TFRM_TIMER_CFG_5,
- QSYS_TFRM_TIMER_CFG_6,
- QSYS_TFRM_TIMER_CFG_7,
- QSYS_TFRM_TIMER_CFG_8,
- QSYS_RED_PROFILE,
- QSYS_RES_QOS_MODE,
- QSYS_RES_CFG,
- QSYS_RES_STAT,
- QSYS_EGR_DROP_MODE,
- QSYS_EQ_CTRL,
- QSYS_EVENTS_CORE,
- QSYS_QMAXSDU_CFG_0,
- QSYS_QMAXSDU_CFG_1,
- QSYS_QMAXSDU_CFG_2,
- QSYS_QMAXSDU_CFG_3,
- QSYS_QMAXSDU_CFG_4,
- QSYS_QMAXSDU_CFG_5,
- QSYS_QMAXSDU_CFG_6,
- QSYS_QMAXSDU_CFG_7,
- QSYS_PREEMPTION_CFG,
- QSYS_CIR_CFG,
- QSYS_EIR_CFG,
- QSYS_SE_CFG,
- QSYS_SE_DWRR_CFG,
- QSYS_SE_CONNECT,
- QSYS_SE_DLB_SENSE,
- QSYS_CIR_STATE,
- QSYS_EIR_STATE,
- QSYS_SE_STATE,
- QSYS_HSCH_MISC_CFG,
- QSYS_TAG_CONFIG,
- QSYS_TAS_PARAM_CFG_CTRL,
- QSYS_PORT_MAX_SDU,
- QSYS_PARAM_CFG_REG_1,
- QSYS_PARAM_CFG_REG_2,
- QSYS_PARAM_CFG_REG_3,
- QSYS_PARAM_CFG_REG_4,
- QSYS_PARAM_CFG_REG_5,
- QSYS_GCL_CFG_REG_1,
- QSYS_GCL_CFG_REG_2,
- QSYS_PARAM_STATUS_REG_1,
- QSYS_PARAM_STATUS_REG_2,
- QSYS_PARAM_STATUS_REG_3,
- QSYS_PARAM_STATUS_REG_4,
- QSYS_PARAM_STATUS_REG_5,
- QSYS_PARAM_STATUS_REG_6,
- QSYS_PARAM_STATUS_REG_7,
- QSYS_PARAM_STATUS_REG_8,
- QSYS_PARAM_STATUS_REG_9,
- QSYS_GCL_STATUS_REG_1,
- QSYS_GCL_STATUS_REG_2,
- REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
- REW_TAG_CFG,
- REW_PORT_CFG,
- REW_DSCP_CFG,
- REW_PCP_DEI_QOS_MAP_CFG,
- REW_PTP_CFG,
- REW_PTP_DLY1_CFG,
- REW_RED_TAG_CFG,
- REW_DSCP_REMAP_DP1_CFG,
- REW_DSCP_REMAP_CFG,
- REW_STAT_CFG,
- REW_REW_STICKY,
- REW_PPT,
- SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
- SYS_COUNT_RX_UNICAST,
- SYS_COUNT_RX_MULTICAST,
- SYS_COUNT_RX_BROADCAST,
- SYS_COUNT_RX_SHORTS,
- SYS_COUNT_RX_FRAGMENTS,
- SYS_COUNT_RX_JABBERS,
- SYS_COUNT_RX_CRC_ALIGN_ERRS,
- SYS_COUNT_RX_SYM_ERRS,
- SYS_COUNT_RX_64,
- SYS_COUNT_RX_65_127,
- SYS_COUNT_RX_128_255,
- SYS_COUNT_RX_256_1023,
- SYS_COUNT_RX_1024_1526,
- SYS_COUNT_RX_1527_MAX,
- SYS_COUNT_RX_PAUSE,
- SYS_COUNT_RX_CONTROL,
- SYS_COUNT_RX_LONGS,
- SYS_COUNT_RX_CLASSIFIED_DROPS,
- SYS_COUNT_TX_OCTETS,
- SYS_COUNT_TX_UNICAST,
- SYS_COUNT_TX_MULTICAST,
- SYS_COUNT_TX_BROADCAST,
- SYS_COUNT_TX_COLLISION,
- SYS_COUNT_TX_DROPS,
- SYS_COUNT_TX_PAUSE,
- SYS_COUNT_TX_64,
- SYS_COUNT_TX_65_127,
- SYS_COUNT_TX_128_511,
- SYS_COUNT_TX_512_1023,
- SYS_COUNT_TX_1024_1526,
- SYS_COUNT_TX_1527_MAX,
- SYS_COUNT_TX_AGING,
- SYS_RESET_CFG,
- SYS_SR_ETYPE_CFG,
- SYS_VLAN_ETYPE_CFG,
- SYS_PORT_MODE,
- SYS_FRONT_PORT_MODE,
- SYS_FRM_AGING,
- SYS_STAT_CFG,
- SYS_SW_STATUS,
- SYS_MISC_CFG,
- SYS_REW_MAC_HIGH_CFG,
- SYS_REW_MAC_LOW_CFG,
- SYS_TIMESTAMP_OFFSET,
- SYS_CMID,
- SYS_PAUSE_CFG,
- SYS_PAUSE_TOT_CFG,
- SYS_ATOP,
- SYS_ATOP_TOT_CFG,
- SYS_MAC_FC_CFG,
- SYS_MMGT,
- SYS_MMGT_FAST,
- SYS_EVENTS_DIF,
- SYS_EVENTS_CORE,
- SYS_CNT,
- SYS_PTP_STATUS,
- SYS_PTP_TXSTAMP,
- SYS_PTP_NXT,
- SYS_PTP_CFG,
- SYS_RAM_INIT,
- SYS_CM_ADDR,
- SYS_CM_DATA_WR,
- SYS_CM_DATA_RD,
- SYS_CM_OP,
- SYS_CM_DATA,
- S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
- S2_CORE_MV_CFG,
- S2_CACHE_ENTRY_DAT,
- S2_CACHE_MASK_DAT,
- S2_CACHE_ACTION_DAT,
- S2_CACHE_CNT_DAT,
- S2_CACHE_TG_DAT,
- PTP_PIN_CFG = PTP << TARGET_OFFSET,
- PTP_PIN_TOD_SEC_MSB,
- PTP_PIN_TOD_SEC_LSB,
- PTP_PIN_TOD_NSEC,
- PTP_CFG_MISC,
- PTP_CLK_CFG_ADJ_CFG,
- PTP_CLK_CFG_ADJ_FREQ,
-};
-
-enum ocelot_regfield {
- ANA_ADVLEARN_VLAN_CHK,
- ANA_ADVLEARN_LEARN_MIRROR,
- ANA_ANEVENTS_FLOOD_DISCARD,
- ANA_ANEVENTS_MSTI_DROP,
- ANA_ANEVENTS_ACLKILL,
- ANA_ANEVENTS_ACLUSED,
- ANA_ANEVENTS_AUTOAGE,
- ANA_ANEVENTS_VS2TTL1,
- ANA_ANEVENTS_STORM_DROP,
- ANA_ANEVENTS_LEARN_DROP,
- ANA_ANEVENTS_AGED_ENTRY,
- ANA_ANEVENTS_CPU_LEARN_FAILED,
- ANA_ANEVENTS_AUTO_LEARN_FAILED,
- ANA_ANEVENTS_LEARN_REMOVE,
- ANA_ANEVENTS_AUTO_LEARNED,
- ANA_ANEVENTS_AUTO_MOVED,
- ANA_ANEVENTS_DROPPED,
- ANA_ANEVENTS_CLASSIFIED_DROP,
- ANA_ANEVENTS_CLASSIFIED_COPY,
- ANA_ANEVENTS_VLAN_DISCARD,
- ANA_ANEVENTS_FWD_DISCARD,
- ANA_ANEVENTS_MULTICAST_FLOOD,
- ANA_ANEVENTS_UNICAST_FLOOD,
- ANA_ANEVENTS_DEST_KNOWN,
- ANA_ANEVENTS_BUCKET3_MATCH,
- ANA_ANEVENTS_BUCKET2_MATCH,
- ANA_ANEVENTS_BUCKET1_MATCH,
- ANA_ANEVENTS_BUCKET0_MATCH,
- ANA_ANEVENTS_CPU_OPERATION,
- ANA_ANEVENTS_DMAC_LOOKUP,
- ANA_ANEVENTS_SMAC_LOOKUP,
- ANA_ANEVENTS_SEQ_GEN_ERR_0,
- ANA_ANEVENTS_SEQ_GEN_ERR_1,
- ANA_TABLES_MACACCESS_B_DOM,
- ANA_TABLES_MACTINDX_BUCKET,
- ANA_TABLES_MACTINDX_M_INDEX,
- QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
- QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
- QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
- QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
- QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
- SYS_RESET_CFG_CORE_ENA,
- SYS_RESET_CFG_MEM_ENA,
- SYS_RESET_CFG_MEM_INIT,
- REGFIELD_MAX
-};
-
-enum ocelot_clk_pins {
- ALT_PPS_PIN = 1,
- EXT_CLK_PIN,
- ALT_LDST_PIN,
- TOD_ACC_PIN
-};
-
struct ocelot_multicast {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -427,133 +60,40 @@ struct ocelot_multicast {
u16 ports;
};
-struct ocelot_port;
-
-struct ocelot_stat_layout {
- u32 offset;
- char name[ETH_GSTRING_LEN];
-};
-
-struct ocelot {
- struct device *dev;
-
- struct regmap *targets[TARGET_MAX];
- struct regmap_field *regfields[REGFIELD_MAX];
- const u32 *const *map;
- const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
-
- u8 base_mac[ETH_ALEN];
-
- struct net_device *hw_bridge_dev;
- u16 bridge_mask;
- u16 bridge_fwd_mask;
-
- struct workqueue_struct *ocelot_owq;
-
- int shared_queue_sz;
-
- u8 num_phys_ports;
- u8 num_cpu_ports;
- struct ocelot_port **ports;
-
- u32 *lags;
-
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
-
- struct list_head multicast;
-
- /* Workqueue to check statistics for overflow with its lock */
- struct mutex stats_lock;
- u64 *stats;
- struct delayed_work stats_work;
- struct workqueue_struct *stats_queue;
-
- u8 ptp:1;
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_info;
- struct hwtstamp_config hwtstamp_config;
- struct mutex ptp_lock; /* Protects the PTP interface state */
- spinlock_t ptp_clock_lock; /* Protects the PTP clock */
-};
-
-struct ocelot_port {
+struct ocelot_port_private {
+ struct ocelot_port port;
struct net_device *dev;
- struct ocelot *ocelot;
struct phy_device *phy;
- void __iomem *regs;
u8 chip_port;
- /* Ingress default VLAN (pvid) */
- u16 pvid;
-
- /* Egress default VLAN (vid) */
- u16 vid;
-
u8 vlan_aware;
- u64 *stats;
-
phy_interface_t phy_mode;
struct phy *serdes;
struct ocelot_port_tc tc;
-
- u8 ptp_cmd;
- struct list_head skbs;
- u8 ts_id;
-};
-
-struct ocelot_skb {
- struct list_head head;
- struct sk_buff *skb;
- u8 id;
};
-u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
-#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
-#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
-#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
-
-void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
-#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
-#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
-#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
-
-void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
- u32 offset);
-#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
-#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
-#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
-
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
-int ocelot_regfields_init(struct ocelot *ocelot,
- const struct reg_field *const regfields);
-struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
- struct platform_device *pdev,
- const char *name);
-
#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-int ocelot_init(struct ocelot *ocelot);
-void ocelot_deinit(struct ocelot *ocelot);
-int ocelot_chip_init(struct ocelot *ocelot);
+int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops);
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
-void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index e98944c87259..c08e3e8482e7 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -224,9 +224,9 @@ int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
int ocelot_ace_init(struct ocelot *ocelot);
void ocelot_ace_deinit(void);
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
struct flow_block_offload *f);
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
struct flow_block_offload *f);
#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index aac115136720..2da8eee27e98 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -95,6 +95,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
do {
struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
u64 tod_in_ns, full_ts_in_ns;
struct frame_info info = {};
struct net_device *dev;
@@ -103,7 +105,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
int sz, len, buf_len;
struct sk_buff *skb;
- for (i = 0; i < IFH_LEN; i++) {
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
if (err != 4)
break;
@@ -114,7 +116,10 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
ocelot_parse_ifh(ifh, &info);
- dev = ocelot->ports[info.port]->dev;
+ ocelot_port = ocelot->ports[info.port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
skb = netdev_alloc_skb(dev, info.len);
@@ -185,69 +190,69 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
{
- int budget = OCELOT_PTP_QUEUE_SZ;
struct ocelot *ocelot = arg;
- while (budget--) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct list_head *pos, *tmp;
- struct sk_buff *skb = NULL;
- struct ocelot_skb *entry;
- struct ocelot_port *port;
- struct timespec64 ts;
- u32 val, id, txport;
+ ocelot_get_txtstamp(ocelot);
- val = ocelot_read(ocelot, SYS_PTP_STATUS);
+ return IRQ_HANDLED;
+}
- /* Check if a timestamp can be retrieved */
- if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
- break;
+static const struct of_device_id mscc_ocelot_match[] = {
+ { .compatible = "mscc,vsc7514-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
- WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+static void ocelot_port_pcs_init(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- /* Retrieve the ts ID and Tx port */
- id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
- txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
- /* Retrieve its associated skb */
- port = ocelot->ports[txport];
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
- if (entry->id != id)
- continue;
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
- skb = entry->skb;
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
- list_del(pos);
- kfree(entry);
- }
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+}
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+static int ocelot_reset(struct ocelot *ocelot)
+{
+ int retries = 100;
+ u32 val;
- if (unlikely(!skb))
- continue;
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
+ do {
+ msleep(1);
+ regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+ } while (val && --retries);
- /* Set the timestamp into the skb */
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
+ if (!retries)
+ return -ETIMEDOUT;
- dev_kfree_skb_any(skb);
- }
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
- return IRQ_HANDLED;
+ return 0;
}
-static const struct of_device_id mscc_ocelot_match[] = {
- { .compatible = "mscc,vsc7514-switch" },
- { }
+static const struct ocelot_ops ocelot_ops = {
+ .pcs_init = ocelot_port_pcs_init,
+ .reset = ocelot_reset,
};
-MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
static int mscc_ocelot_probe(struct platform_device *pdev)
{
@@ -257,13 +262,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
- u32 val;
struct {
enum ocelot_target id;
char *name;
u8 optional:1;
- } res[] = {
+ } io_target[] = {
{ SYS, "sys" },
{ REW, "rew" },
{ QSYS, "qsys" },
@@ -283,20 +287,23 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
- for (i = 0; i < ARRAY_SIZE(res); i++) {
+ for (i = 0; i < ARRAY_SIZE(io_target); i++) {
struct regmap *target;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ io_target[i].name);
- target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
+ target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
- if (res[i].optional) {
- ocelot->targets[res[i].id] = NULL;
+ if (io_target[i].optional) {
+ ocelot->targets[io_target[i].id] = NULL;
continue;
}
-
return PTR_ERR(target);
}
- ocelot->targets[res[i].id] = target;
+ ocelot->targets[io_target[i].id] = target;
}
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
@@ -307,7 +314,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[HSIO] = hsio;
- err = ocelot_chip_init(ocelot);
+ err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
return err;
@@ -334,18 +341,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ptp = 1;
}
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
-
- do {
- msleep(1);
- regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
- } while (val);
-
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-
ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
ports = of_get_child_by_name(np, "ethernet-ports");
@@ -359,17 +354,20 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
- INIT_LIST_HEAD(&ocelot->multicast);
ocelot_init(ocelot);
+ ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports,
+ OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE);
for_each_available_child_of_node(ports, portnp) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ phy_interface_t phy_mode;
struct phy_device *phy;
struct resource *res;
struct phy *serdes;
void __iomem *regs;
char res_name[8];
- int phy_mode;
u32 port;
if (of_property_read_u32(portnp, "reg", &port))
@@ -398,13 +396,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
goto out_put_ports;
}
- phy_mode = of_get_phy_mode(portnp);
- if (phy_mode < 0)
- ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
- else
- ocelot->ports[port]->phy_mode = phy_mode;
+ ocelot_port = ocelot->ports[port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ of_get_phy_mode(portnp, &phy_mode);
+
+ priv->phy_mode = phy_mode;
- switch (ocelot->ports[port]->phy_mode) {
+ switch (priv->phy_mode) {
case PHY_INTERFACE_MODE_NA:
continue;
case PHY_INTERFACE_MODE_SGMII:
@@ -413,7 +413,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
/* Ensure clock signals and speed is set on all
* QSGMII links
*/
- ocelot_port_writel(ocelot->ports[port],
+ ocelot_port_writel(ocelot_port,
DEV_CLOCK_CFG_LINK_SPEED
(OCELOT_SPEED_1000),
DEV_CLOCK_CFG);
@@ -441,7 +441,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
goto out_put_ports;
}
- ocelot->ports[port]->serdes = serdes;
+ priv->serdes = serdes;
}
register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index b894bc0c9c16..3d65b99b9734 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -10,7 +10,7 @@
struct ocelot_port_block {
struct ocelot_acl_block *block;
- struct ocelot_port *port;
+ struct ocelot_port_private *priv;
};
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
@@ -177,8 +177,8 @@ struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
if (!rule)
return NULL;
- rule->port = block->port;
- rule->chip_port = block->port->chip_port;
+ rule->port = &block->priv->port;
+ rule->chip_port = block->priv->chip_port;
return rule;
}
@@ -202,7 +202,7 @@ static int ocelot_flower_replace(struct flow_cls_offload *f,
if (ret)
return ret;
- port_block->port->tc.offload_cnt++;
+ port_block->priv->tc.offload_cnt++;
return 0;
}
@@ -213,14 +213,14 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
int ret;
rule.prio = f->common.prio;
- rule.port = port_block->port;
+ rule.port = &port_block->priv->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_offload_del(&rule);
if (ret)
return ret;
- port_block->port->tc.offload_cnt--;
+ port_block->priv->tc.offload_cnt--;
return 0;
}
@@ -231,7 +231,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
int ret;
rule.prio = f->common.prio;
- rule.port = port_block->port;
+ rule.port = &port_block->priv->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_stats_update(&rule);
if (ret)
@@ -261,7 +261,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
{
struct ocelot_port_block *port_block = cb_priv;
- if (!tc_cls_can_offload_and_chain0(port_block->port->dev, type_data))
+ if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -275,7 +275,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
}
static struct ocelot_port_block*
-ocelot_port_block_create(struct ocelot_port *port)
+ocelot_port_block_create(struct ocelot_port_private *priv)
{
struct ocelot_port_block *port_block;
@@ -283,7 +283,7 @@ ocelot_port_block_create(struct ocelot_port *port)
if (!port_block)
return NULL;
- port_block->port = port;
+ port_block->priv = priv;
return port_block;
}
@@ -300,7 +300,7 @@ static void ocelot_tc_block_unbind(void *cb_priv)
ocelot_port_block_destroy(port_block);
}
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct ocelot_port_block *port_block;
@@ -311,14 +311,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
return -EOPNOTSUPP;
block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, port);
+ ocelot_setup_tc_block_cb_flower, priv);
if (!block_cb) {
- port_block = ocelot_port_block_create(port);
+ port_block = ocelot_port_block_create(priv);
if (!port_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
- port, port_block,
+ priv, port_block,
ocelot_tc_block_unbind);
if (IS_ERR(block_cb)) {
ret = PTR_ERR(block_cb);
@@ -339,13 +339,13 @@ err_cb_register:
return ret;
}
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, port);
+ ocelot_setup_tc_block_cb_flower, priv);
if (!block_cb)
return;
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index c6db8ad31fdf..b229b1cb68ef 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -97,20 +97,16 @@ static struct regmap_config ocelot_regmap_config = {
.reg_stride = 4,
};
-struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
- struct platform_device *pdev,
- const char *name)
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
{
- struct resource *res;
void __iomem *regs;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(regs))
return ERR_CAST(regs);
- ocelot_regmap_config.name = name;
- return devm_regmap_init_mmio(ocelot->dev, regs,
- &ocelot_regmap_config);
+ ocelot_regmap_config.name = res->name;
+
+ return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
-EXPORT_SYMBOL(ocelot_io_platform_init);
+EXPORT_SYMBOL(ocelot_regmap_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index 701e82dd749a..faddce43f2e3 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -40,13 +40,12 @@ struct qos_policer_conf {
u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
};
-static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
+static int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
struct qos_policer_conf *conf)
{
u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
bool cir_discard = 0, pir_discard = 0;
- struct ocelot *ocelot = port->ocelot;
u32 pbs_max = 0, cbs_max = 0;
u8 ipg = 20;
u32 value;
@@ -123,22 +122,26 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
/* Check limits */
if (pir > GENMASK(15, 0)) {
- netdev_err(port->dev, "Invalid pir\n");
+ dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n",
+ port, pir, GENMASK(15, 0));
return -EINVAL;
}
if (cir > GENMASK(15, 0)) {
- netdev_err(port->dev, "Invalid cir\n");
+ dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n",
+ port, cir, GENMASK(15, 0));
return -EINVAL;
}
if (pbs > pbs_max) {
- netdev_err(port->dev, "Invalid pbs\n");
+ dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n",
+ port, pbs, pbs_max);
return -EINVAL;
}
if (cbs > cbs_max) {
- netdev_err(port->dev, "Invalid cbs\n");
+ dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n",
+ port, cbs, cbs_max);
return -EINVAL;
}
@@ -171,10 +174,9 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
return 0;
}
-int ocelot_port_policer_add(struct ocelot_port *port,
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
- struct ocelot *ocelot = port->ocelot;
struct qos_policer_conf pp = { 0 };
int err;
@@ -185,11 +187,10 @@ int ocelot_port_policer_add(struct ocelot_port *port,
pp.pir = pol->rate;
pp.pbs = pol->burst;
- netdev_dbg(port->dev,
- "%s: port %u pir %u kbps, pbs %u bytes\n",
- __func__, port->chip_port, pp.pir, pp.pbs);
+ dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
+ __func__, port, pp.pir, pp.pbs);
- err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -198,22 +199,21 @@ int ocelot_port_policer_add(struct ocelot_port *port,
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
- ANA_PORT_POL_CFG, port->chip_port);
+ ANA_PORT_POL_CFG, port);
return 0;
}
-int ocelot_port_policer_del(struct ocelot_port *port)
+int ocelot_port_policer_del(struct ocelot *ocelot, int port)
{
- struct ocelot *ocelot = port->ocelot;
struct qos_policer_conf pp = { 0 };
int err;
- netdev_dbg(port->dev, "%s: port %u\n", __func__, port->chip_port);
+ dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port);
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -221,7 +221,7 @@ int ocelot_port_policer_del(struct ocelot_port *port)
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
- ANA_PORT_POL_CFG, port->chip_port);
+ ANA_PORT_POL_CFG, port);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index d1137f79efda..ae9509229463 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -14,9 +14,9 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-int ocelot_port_policer_add(struct ocelot_port *port,
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol);
-int ocelot_port_policer_del(struct ocelot_port *port);
+int ocelot_port_policer_del(struct ocelot *ocelot, int port);
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index e59977d20400..b88b5899b227 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -423,7 +423,7 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
-int ocelot_chip_init(struct ocelot *ocelot)
+int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot)
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->shared_queue_sz = 224 * 1024;
+ ocelot->ops = ops;
ret = ocelot_regfields_init(ocelot, ocelot_regfields);
if (ret)
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index 16a6db71ca5e..a4f7fbd76507 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -9,17 +9,19 @@
#include "ocelot_ace.h"
#include <net/pkt_cls.h>
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
struct tc_cls_matchall_offload *f,
bool ingress)
{
struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
struct flow_action_entry *action;
+ int port = priv->chip_port;
int err;
- netdev_dbg(port->dev, "%s: port %u command %d cookie %lu\n",
- __func__, port->chip_port, f->command, f->cookie);
+ netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n",
+ __func__, port, f->command, f->cookie);
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
@@ -34,7 +36,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
return -EOPNOTSUPP;
}
- if (port->tc.block_shared) {
+ if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
"Rate limit is not supported on shared blocks");
return -EOPNOTSUPP;
@@ -47,7 +49,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
return -EOPNOTSUPP;
}
- if (port->tc.police_id && port->tc.police_id != f->cookie) {
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
"Only one policer per port is supported\n");
return -EEXIST;
@@ -58,27 +60,27 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
PSCHED_NS2TICKS(action->police.burst),
PSCHED_TICKS_PER_SEC);
- err = ocelot_port_policer_add(port, &pol);
+ err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
return err;
}
- port->tc.police_id = f->cookie;
- port->tc.offload_cnt++;
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
return 0;
case TC_CLSMATCHALL_DESTROY:
- if (port->tc.police_id != f->cookie)
+ if (priv->tc.police_id != f->cookie)
return -ENOENT;
- err = ocelot_port_policer_del(port);
+ err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Could not delete policer\n");
return err;
}
- port->tc.police_id = 0;
- port->tc.offload_cnt--;
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
return 0;
case TC_CLSMATCHALL_STATS: /* fall through */
default:
@@ -90,21 +92,21 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
void *type_data,
void *cb_priv, bool ingress)
{
- struct ocelot_port *port = cb_priv;
+ struct ocelot_port_private *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(port->dev, type_data))
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
- netdev_dbg(port->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
+ netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
ingress ? "ingress" : "egress");
- return ocelot_setup_tc_cls_matchall(port, type_data, ingress);
+ return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
return 0;
default:
- netdev_dbg(port->dev, "tc_block_cb: type %d %s\n",
+ netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n",
type,
ingress ? "ingress" : "egress");
@@ -130,19 +132,19 @@ static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
static LIST_HEAD(ocelot_block_cb_list);
-static int ocelot_setup_tc_block(struct ocelot_port *port,
+static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
int err;
- netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
+ netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n",
f->command, f->binder_type);
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
- port->tc.block_shared = f->block_shared;
+ priv->tc.block_shared = f->block_shared;
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = ocelot_setup_tc_block_cb_eg;
} else {
@@ -153,14 +155,14 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
switch (f->command) {
case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
+ if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
return -EBUSY;
- block_cb = flow_block_cb_alloc(cb, port, port, NULL);
+ block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
- err = ocelot_setup_tc_block_flower_bind(port, f);
+ err = ocelot_setup_tc_block_flower_bind(priv, f);
if (err < 0) {
flow_block_cb_free(block_cb);
return err;
@@ -169,11 +171,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
- block_cb = flow_block_cb_lookup(f->block, cb, port);
+ block_cb = flow_block_cb_lookup(f->block, cb, priv);
if (!block_cb)
return -ENOENT;
- ocelot_setup_tc_block_flower_unbind(port, f);
+ ocelot_setup_tc_block_flower_unbind(priv, f);
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
@@ -185,11 +187,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot_port_private *priv = netdev_priv(dev);
switch (type) {
case TC_SETUP_BLOCK:
- return ocelot_setup_tc_block(port, type_data);
+ return ocelot_setup_tc_block(priv, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 5afcb3c4c2ef..c80bb83c8ac9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3952,7 +3952,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta1, *meta2;
- const s32 exp_mask[] = {
+ static const s32 exp_mask[] = {
[BPF_B] = 0x000000ffU,
[BPF_H] = 0x0000ffffU,
[BPF_W] = 0xffffffffU,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 88fab6a82acf..95a0d3910e31 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -46,9 +46,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
/* Grab a single ref to the map for our record. The prog destroy ndo
* happens after free_used_maps().
*/
- map = bpf_map_inc(map, false);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ bpf_map_inc(map);
record = kmalloc(sizeof(*record), GFP_KERNEL);
if (!record) {
@@ -460,8 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return -EINVAL;
rcu_read_lock();
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
- nfp_bpf_maps_neutral_params);
+ record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
+ nfp_bpf_maps_neutral_params);
if (!record || map_id_full > U32_MAX) {
rcu_read_unlock();
cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 61aabffc8888..bcdcd6de7dea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -872,7 +872,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
/* jump forward, a TX may have gotten lost, need to sync TX */
if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
- tls_offload_tx_resync_request(nskb->sk);
+ tls_offload_tx_resync_request(nskb->sk, seq,
+ ntls->next_seq);
*nr_frags = 0;
return nskb;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2761f3a3ae50..49c7987c2abd 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1346,10 +1346,9 @@ static int nixge_probe(struct platform_device *pdev)
}
}
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)priv->phy_mode < 0) {
+ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (err) {
netdev_err(ndev, "not find \"phy-mode\" property\n");
- err = -EINVAL;
goto unregister_mdio;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 05d2b478c99b..6b54cb3b681d 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2225,6 +2225,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct nv_skb_map *prev_tx_ctx;
struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
unsigned long flags;
+ netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -2240,7 +2241,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_BUSY;
+
+ /* When normal packets and/or xmit_more packets fill up
+ * tx_desc, it is necessary to trigger NIC tx reg.
+ */
+ ret = NETDEV_TX_BUSY;
+ goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
@@ -2259,7 +2265,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
@@ -2305,7 +2314,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
@@ -2357,8 +2369,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- return NETDEV_TX_OK;
+txkick:
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+ u32 txrxctl_kick;
+dma_error:
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+ }
+
+ return ret;
}
static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
@@ -2381,6 +2400,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
struct nv_skb_map *start_tx_ctx = NULL;
struct nv_skb_map *tmp_tx_ctx = NULL;
unsigned long flags;
+ netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -2396,7 +2416,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
- return NETDEV_TX_BUSY;
+
+ /* When normal packets and/or xmit_more packets fill up
+ * tx_desc, it is necessary to trigger NIC tx reg.
+ */
+ ret = NETDEV_TX_BUSY;
+
+ goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
@@ -2416,7 +2442,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
@@ -2463,7 +2492,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
- return NETDEV_TX_OK;
+
+ ret = NETDEV_TX_OK;
+
+ goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
@@ -2542,8 +2574,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
spin_unlock_irqrestore(&np->lock, flags);
- writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- return NETDEV_TX_OK;
+txkick:
+ if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+ u32 txrxctl_kick;
+dma_error:
+ txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+ writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+ }
+
+ return ret;
}
static inline void nv_tx_flip_ownership(struct net_device *dev)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 544012a67221..ebb81d6d4ca1 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -391,6 +392,7 @@ struct rx_status_t {
struct netdata_local {
struct platform_device *pdev;
struct net_device *ndev;
+ struct device_node *phy_node;
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
@@ -749,22 +751,26 @@ static void lpc_handle_link_change(struct net_device *ndev)
static int lpc_mii_probe(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = phy_find_first(pldat->mii_bus);
-
- if (!phydev) {
- netdev_err(ndev, "no PHY found\n");
- return -ENODEV;
- }
+ struct phy_device *phydev;
/* Attach to the PHY */
if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
netdev_info(ndev, "using MII interface\n");
else
netdev_info(ndev, "using RMII interface\n");
+
+ if (pldat->phy_node)
+ phydev = of_phy_find_device(pldat->phy_node);
+ else
+ phydev = phy_find_first(pldat->mii_bus);
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -ENODEV;
+ }
+
phydev = phy_connect(ndev, phydev_name(phydev),
&lpc_handle_link_change,
lpc_phy_interface_mode(&pldat->pdev->dev));
-
if (IS_ERR(phydev)) {
netdev_err(ndev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
@@ -783,6 +789,7 @@ static int lpc_mii_probe(struct net_device *ndev)
static int lpc_mii_init(struct netdata_local *pldat)
{
+ struct device_node *node;
int err = -ENXIO;
pldat->mii_bus = mdiobus_alloc();
@@ -812,7 +819,10 @@ static int lpc_mii_init(struct netdata_local *pldat)
platform_set_drvdata(pldat->pdev, pldat->mii_bus);
- if (mdiobus_register(pldat->mii_bus))
+ node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
+ err = of_mdiobus_register(pldat->mii_bus, node);
+ of_node_put(node);
+ if (err)
goto err_out_unregister_bus;
if (lpc_mii_probe(pldat->ndev) != 0)
@@ -1345,6 +1355,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
pldat->dma_buff_base_v);
+ pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
/* Get MAC address from current HW setting (POR state is all zeros) */
__lpc_get_mac(pldat, ndev->dev_addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 7a7060677f15..98e102af7756 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,7 +12,7 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.15.0-k"
+#define IONIC_DRV_VERSION "0.18.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
@@ -46,6 +46,8 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
struct notifier_block nb;
+ struct timer_list watchdog_timer;
+ int watchdog_period;
};
struct ionic_admin_ctx {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d168a6435322..5f9d2ec70446 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -11,6 +11,16 @@
#include "ionic_dev.h"
#include "ionic_lif.h"
+static void ionic_watchdog_cb(struct timer_list *t)
+{
+ struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
+ ionic_heartbeat_check(ionic);
+}
+
void ionic_init_devinfo(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -72,6 +82,11 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
+ timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
+ ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -80,10 +95,53 @@ int ionic_dev_setup(struct ionic *ionic)
void ionic_dev_teardown(struct ionic *ionic)
{
- /* place holder */
+ del_timer_sync(&ionic->watchdog_timer);
}
/* Devcmd Interface */
+int ionic_heartbeat_check(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ unsigned long hb_time;
+ u32 fw_status;
+ u32 hb;
+
+ /* wait a little more than one second before testing again */
+ hb_time = jiffies;
+ if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period)))
+ return 0;
+
+ /* firmware is useful only if fw_status is non-zero */
+ fw_status = ioread32(&idev->dev_info_regs->fw_status);
+ if (!fw_status)
+ return -ENXIO;
+
+ /* early FW has no heartbeat, else FW will return non-zero */
+ hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
+ if (!hb)
+ return 0;
+
+ /* are we stalled? */
+ if (hb == idev->last_hb) {
+ /* only complain once for each stall seen */
+ if (idev->last_hb_time != 1) {
+ dev_info(ionic->dev, "FW heartbeat stalled at %d\n",
+ idev->last_hb);
+ idev->last_hb_time = 1;
+ }
+
+ return -ENXIO;
+ }
+
+ if (idev->last_hb_time == 1)
+ dev_info(ionic->dev, "FW heartbeat restored at %d\n", hb);
+
+ idev->last_hb = hb;
+ idev->last_hb_time = hb_time;
+
+ return 0;
+}
+
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 9610aeb7d5f4..4665c5dc5324 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -16,6 +16,7 @@
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
+#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
#define IONIC_DEV_CMD_REG_VERSION 1
@@ -123,6 +124,9 @@ struct ionic_dev {
union ionic_dev_info_regs __iomem *dev_info_regs;
union ionic_dev_cmd_regs __iomem *dev_cmd_regs;
+ unsigned long last_hb_time;
+ u32 last_hb;
+
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -151,12 +155,19 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
+struct ionic_page_info {
+ struct page *page;
+ dma_addr_t dma_addr;
+};
+
struct ionic_desc_info {
void *desc;
void *sg_desc;
struct ionic_desc_info *next;
unsigned int index;
unsigned int left;
+ unsigned int npages;
+ struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
ionic_desc_cb cb;
void *cb_arg;
};
@@ -295,5 +306,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
+int ionic_heartbeat_check(struct ionic *ionic);
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index af1647afa4e8..6fb27dcc5787 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -19,31 +19,30 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
if (err)
- goto info_out;
+ return err;
err = devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
idev->dev_info.fw_version);
if (err)
- goto info_out;
+ return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
buf);
if (err)
- goto info_out;
+ return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
buf);
if (err)
- goto info_out;
+ return err;
err = devlink_info_serial_number_put(req, idev->dev_info.serial_num);
-info_out:
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 7d10265f782a..f778fff034f5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -254,12 +254,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
struct ionic_dev *idev;
- u32 req_rs, req_fc;
- u8 fec_type;
int err = 0;
idev = &lif->ionic->idev;
- fec_type = IONIC_PORT_FEC_TYPE_NONE;
/* set autoneg */
if (ks->base.autoneg != idev->port_info->config.an_enable) {
@@ -281,29 +278,6 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
return err;
}
- /* set FEC */
- req_rs = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_RS);
- req_fc = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_BASER);
- if (req_rs && req_fc) {
- netdev_info(netdev, "Only select one FEC mode at a time\n");
- return -EINVAL;
- } else if (req_fc) {
- fec_type = IONIC_PORT_FEC_TYPE_FC;
- } else if (req_rs) {
- fec_type = IONIC_PORT_FEC_TYPE_RS;
- } else if (!(req_rs | req_fc)) {
- fec_type = IONIC_PORT_FEC_TYPE_NONE;
- }
-
- if (fec_type != idev->port_info->config.fec_type) {
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_port_fec(idev, fec_type);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
- mutex_unlock(&ionic->dev_cmd_lock);
- if (err)
- return err;
- }
-
return 0;
}
@@ -353,6 +327,70 @@ static int ionic_set_pauseparam(struct net_device *netdev,
return 0;
}
+static int ionic_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ switch (lif->ionic->idev.port_info->config.fec_type) {
+ case IONIC_PORT_FEC_TYPE_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case IONIC_PORT_FEC_TYPE_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ case IONIC_PORT_FEC_TYPE_FC:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ }
+
+ fec->fec = ETHTOOL_FEC_OFF | ETHTOOL_FEC_RS | ETHTOOL_FEC_BASER;
+
+ return 0;
+}
+
+static int ionic_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ u8 fec_type;
+ int ret = 0;
+
+ if (lif->ionic->idev.port_info->config.an_enable) {
+ netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n");
+ return -EINVAL;
+ }
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_NONE:
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ break;
+ case ETHTOOL_FEC_OFF:
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_type = IONIC_PORT_FEC_TYPE_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_type = IONIC_PORT_FEC_TYPE_FC;
+ break;
+ case ETHTOOL_FEC_AUTO:
+ default:
+ netdev_err(netdev, "FEC request 0x%04x not supported\n",
+ fec->fec);
+ return -EINVAL;
+ }
+
+ if (fec_type != lif->ionic->idev.port_info->config.fec_type) {
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_fec(&lif->ionic->idev, fec_type);
+ ret = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&lif->ionic->dev_cmd_lock);
+ }
+
+ return ret;
+}
+
static int ionic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coalesce)
{
@@ -372,7 +410,6 @@ static int ionic_set_coalesce(struct net_device *netdev,
struct ionic_identity *ident;
struct ionic_qcq *qcq;
unsigned int i;
- u32 usecs;
u32 coal;
if (coalesce->rx_max_coalesced_frames ||
@@ -410,26 +447,27 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ /* Convert the usec request to a HW useable value. If they asked
+ * for non-zero and it resolved to zero, bump it up
+ */
coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
-
- if (coal > IONIC_INTR_CTRL_COAL_MAX)
- return -ERANGE;
-
- /* If they asked for non-zero and it resolved to zero, bump it up */
if (!coal && coalesce->rx_coalesce_usecs)
coal = 1;
- /* Convert it back to get device resolution */
- usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+ if (coal > IONIC_INTR_CTRL_COAL_MAX)
+ return -ERANGE;
- if (usecs != lif->rx_coalesce_usecs) {
- lif->rx_coalesce_usecs = usecs;
+ /* Save the new value */
+ lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
+ if (coal != lif->rx_coalesce_hw) {
+ lif->rx_coalesce_hw = coal;
if (test_bit(IONIC_LIF_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index, coal);
+ qcq->intr.index,
+ lif->rx_coalesce_hw);
}
}
}
@@ -453,6 +491,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
bool running;
+ int err;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -470,8 +509,9 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = test_bit(IONIC_LIF_UP, lif->state);
if (running)
@@ -504,6 +544,7 @@ static int ionic_set_channels(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
bool running;
+ int err;
if (!ch->combined_count || ch->other_count ||
ch->rx_count || ch->tx_count)
@@ -512,8 +553,9 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = test_bit(IONIC_LIF_UP, lif->state);
if (running)
@@ -747,6 +789,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_regs = ionic_get_regs,
.get_link = ethtool_op_get_link,
.get_link_ksettings = ionic_get_link_ksettings,
+ .set_link_ksettings = ionic_set_link_ksettings,
.get_coalesce = ionic_get_coalesce,
.set_coalesce = ionic_set_coalesce,
.get_ringparam = ionic_get_ringparam,
@@ -769,7 +812,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_module_eeprom = ionic_get_module_eeprom,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
- .set_link_ksettings = ionic_set_link_ksettings,
+ .get_fecparam = ionic_get_fecparam,
+ .set_fecparam = ionic_set_fecparam,
.nway_reset = ionic_nway_reset,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 5bfdda19f64d..dbdb7c5ae8f1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -111,7 +111,7 @@ struct ionic_admin_cmd {
};
/**
- * struct admin_comp - General admin command completion format
+ * struct ionic_admin_comp - General admin command completion format
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -134,7 +134,7 @@ static inline u8 color_match(u8 color, u8 done_color)
}
/**
- * struct nop_cmd - NOP command
+ * struct ionic_nop_cmd - NOP command
* @opcode: opcode
*/
struct ionic_nop_cmd {
@@ -143,7 +143,7 @@ struct ionic_nop_cmd {
};
/**
- * struct nop_comp - NOP command completion
+ * struct ionic_nop_comp - NOP command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_nop_comp {
@@ -152,7 +152,7 @@ struct ionic_nop_comp {
};
/**
- * struct dev_init_cmd - Device init command
+ * struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
* @type: device type
*/
@@ -172,7 +172,7 @@ struct ionic_dev_init_comp {
};
/**
- * struct dev_reset_cmd - Device reset command
+ * struct ionic_dev_reset_cmd - Device reset command
* @opcode: opcode
*/
struct ionic_dev_reset_cmd {
@@ -192,7 +192,7 @@ struct ionic_dev_reset_comp {
#define IONIC_IDENTITY_VERSION_1 1
/**
- * struct dev_identify_cmd - Driver/device identify command
+ * struct ionic_dev_identify_cmd - Driver/device identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
*/
@@ -284,7 +284,7 @@ enum ionic_lif_type {
};
/**
- * struct lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - lif identify command
* @opcode: opcode
* @type: lif type (enum lif_type)
* @ver: version of identify returned by device
@@ -297,7 +297,7 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct lif_identify_comp - lif identify command completion
+ * struct ionic_lif_identify_comp - lif identify command completion
* @status: status of the command (enum status_code)
* @ver: version of identify returned by device
*/
@@ -325,7 +325,7 @@ enum ionic_logical_qtype {
};
/**
- * struct lif_logical_qtype - Descriptor of logical to hardware queue type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
* @qtype: Hardware Queue Type.
* @qid_count: Number of Queue IDs of the logical type.
* @qid_base: Minimum Queue ID of the logical type.
@@ -349,7 +349,7 @@ enum ionic_lif_state {
* @name: lif name
* @mtu: mtu
* @mac: station mac address
- * @features: features (enum eth_hw_features)
+ * @features: features (enum ionic_eth_hw_features)
* @queue_count: queue counts per queue-type
*/
union ionic_lif_config {
@@ -367,7 +367,7 @@ union ionic_lif_config {
};
/**
- * struct lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - lif identity information (type-specific)
*
* @capabilities LIF capabilities
*
@@ -441,11 +441,11 @@ union ionic_lif_identity {
};
/**
- * struct lif_init_cmd - LIF init command
+ * struct ionic_lif_init_cmd - LIF init command
* @opcode: opcode
* @type: LIF type (enum lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct lif_info)
+ * @info_pa: destination address for lif info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -457,7 +457,7 @@ struct ionic_lif_init_cmd {
};
/**
- * struct lif_init_comp - LIF init command completion
+ * struct ionic_lif_init_comp - LIF init command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_lif_init_comp {
@@ -468,7 +468,7 @@ struct ionic_lif_init_comp {
};
/**
- * struct q_init_cmd - Queue init command
+ * struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
* @ver: Queue version (defines opcode/descriptor scope)
@@ -525,7 +525,7 @@ struct ionic_q_init_cmd {
};
/**
- * struct q_init_comp - Queue init command completion
+ * struct ionic_q_init_comp - Queue init command completion
* @status: The status of the command (enum status_code)
* @ver: Queue version (defines opcode/descriptor scope)
* @comp_index: The index in the descriptor ring for which this
@@ -556,7 +556,7 @@ enum ionic_txq_desc_opcode {
};
/**
- * struct txq_desc - Ethernet Tx queue descriptor format
+ * struct ionic_txq_desc - Ethernet Tx queue descriptor format
* @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
@@ -735,7 +735,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
#define IONIC_RX_MAX_SG_ELEMS 8
/**
- * struct txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
@@ -748,7 +748,7 @@ struct ionic_txq_sg_desc {
};
/**
- * struct txq_comp - Ethernet transmit queue completion descriptor
+ * struct ionic_txq_comp - Ethernet transmit queue completion descriptor
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -768,7 +768,7 @@ enum ionic_rxq_desc_opcode {
};
/**
- * struct rxq_desc - Ethernet Rx queue descriptor format
+ * struct ionic_rxq_desc - Ethernet Rx queue descriptor format
* @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
*
* RXQ_DESC_OPCODE_SIMPLE:
@@ -789,7 +789,7 @@ struct ionic_rxq_desc {
};
/**
- * struct rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
@@ -802,7 +802,7 @@ struct ionic_rxq_sg_desc {
};
/**
- * struct rxq_comp - Ethernet receive queue completion descriptor
+ * struct ionic_rxq_comp - Ethernet receive queue completion descriptor
* @status: The status of the command (enum status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
* @comp_index: The index in the descriptor ring for which this
@@ -896,7 +896,7 @@ enum ionic_eth_hw_features {
};
/**
- * struct q_control_cmd - Queue control command
+ * struct ionic_q_control_cmd - Queue control command
* @opcode: opcode
* @type: Queue type
* @lif_index: LIF index
@@ -1033,8 +1033,8 @@ enum ionic_port_loopback_mode {
/**
* Transceiver Status information
- * @state: Transceiver status (enum xcvr_state)
- * @phy: Physical connection type (enum phy_type)
+ * @state: Transceiver status (enum ionic_xcvr_state)
+ * @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
* @sprom: Transceiver sprom contents
*/
@@ -1051,9 +1051,9 @@ struct ionic_xcvr_status {
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
* @an_enable: autoneg enable
- * @fec_type: fec type (enum port_fec_type)
- * @pause_type: pause type (enum port_pause_type)
- * @loopback_mode: loopback mode (enum port_loopback_mode)
+ * @fec_type: fec type (enum ionic_port_fec_type)
+ * @pause_type: pause type (enum ionic_port_pause_type)
+ * @loopback_mode: loopback mode (enum ionic_port_loopback_mode)
*/
union ionic_port_config {
struct {
@@ -1080,7 +1080,7 @@ union ionic_port_config {
/**
* Port Status information
- * @status: link status (enum port_oper_status)
+ * @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
* @xcvr: tranceiver status
@@ -1094,7 +1094,7 @@ struct ionic_port_status {
};
/**
- * struct port_identify_cmd - Port identify command
+ * struct ionic_port_identify_cmd - Port identify command
* @opcode: opcode
* @index: port index
* @ver: Highest version of identify supported by driver
@@ -1107,7 +1107,7 @@ struct ionic_port_identify_cmd {
};
/**
- * struct port_identify_comp - Port identify command completion
+ * struct ionic_port_identify_comp - Port identify command completion
* @status: The status of the command (enum status_code)
* @ver: Version of identify returned by device
*/
@@ -1118,10 +1118,10 @@ struct ionic_port_identify_comp {
};
/**
- * struct port_init_cmd - Port initialization command
+ * struct ionic_port_init_cmd - Port initialization command
* @opcode: opcode
* @index: port index
- * @info_pa: destination address for port info (struct port_info)
+ * @info_pa: destination address for port info (struct ionic_port_info)
*/
struct ionic_port_init_cmd {
u8 opcode;
@@ -1132,7 +1132,7 @@ struct ionic_port_init_cmd {
};
/**
- * struct port_init_comp - Port initialization command completion
+ * struct ionic_port_init_comp - Port initialization command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_port_init_comp {
@@ -1141,7 +1141,7 @@ struct ionic_port_init_comp {
};
/**
- * struct port_reset_cmd - Port reset command
+ * struct ionic_port_reset_cmd - Port reset command
* @opcode: opcode
* @index: port index
*/
@@ -1152,7 +1152,7 @@ struct ionic_port_reset_cmd {
};
/**
- * struct port_reset_comp - Port reset command completion
+ * struct ionic_port_reset_comp - Port reset command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_port_reset_comp {
@@ -1183,7 +1183,7 @@ enum ionic_port_attr {
};
/**
- * struct port_setattr_cmd - Set port attributes on the NIC
+ * struct ionic_port_setattr_cmd - Set port attributes on the NIC
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
@@ -1207,7 +1207,7 @@ struct ionic_port_setattr_cmd {
};
/**
- * struct port_setattr_comp - Port set attr command completion
+ * struct ionic_port_setattr_comp - Port set attr command completion
* @status: The status of the command (enum status_code)
* @color: Color bit
*/
@@ -1218,7 +1218,7 @@ struct ionic_port_setattr_comp {
};
/**
- * struct port_getattr_cmd - Get port attributes from the NIC
+ * struct ionic_port_getattr_cmd - Get port attributes from the NIC
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
@@ -1231,7 +1231,7 @@ struct ionic_port_getattr_cmd {
};
/**
- * struct port_getattr_comp - Port get attr command completion
+ * struct ionic_port_getattr_comp - Port get attr command completion
* @status: The status of the command (enum status_code)
* @color: Color bit
*/
@@ -1252,10 +1252,10 @@ struct ionic_port_getattr_comp {
};
/**
- * struct lif_status - Lif status register
+ * struct ionic_lif_status - Lif status register
* @eid: most recent NotifyQ event id
* @port_num: port the lif is connected to
- * @link_status: port status (enum port_oper_status)
+ * @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
* @link_down_count: number of times link status changes
*/
@@ -1270,7 +1270,7 @@ struct ionic_lif_status {
};
/**
- * struct lif_reset_cmd - LIF reset command
+ * struct ionic_lif_reset_cmd - LIF reset command
* @opcode: opcode
* @index: LIF index
*/
@@ -1290,7 +1290,7 @@ enum ionic_dev_state {
};
/**
- * enum dev_attr - List of device attributes
+ * enum ionic_dev_attr - List of device attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1299,10 +1299,10 @@ enum ionic_dev_attr {
};
/**
- * struct dev_setattr_cmd - Set Device attributes on the NIC
+ * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC
* @opcode: Opcode
- * @attr: Attribute type (enum dev_attr)
- * @state: Device state (enum dev_state)
+ * @attr: Attribute type (enum ionic_dev_attr)
+ * @state: Device state (enum ionic_dev_state)
* @name: The bus info, e.g. PCI slot-device-function, 0 terminated
* @features: Device features
*/
@@ -1319,7 +1319,7 @@ struct ionic_dev_setattr_cmd {
};
/**
- * struct dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_setattr_comp - Device set attr command completion
* @status: The status of the command (enum status_code)
* @features: Device features
* @color: Color bit
@@ -1335,9 +1335,9 @@ struct ionic_dev_setattr_comp {
};
/**
- * struct dev_getattr_cmd - Get Device attributes from the NIC
+ * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC
* @opcode: opcode
- * @attr: Attribute type (enum dev_attr)
+ * @attr: Attribute type (enum ionic_dev_attr)
*/
struct ionic_dev_getattr_cmd {
u8 opcode;
@@ -1346,7 +1346,7 @@ struct ionic_dev_getattr_cmd {
};
/**
- * struct dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_setattr_comp - Device set attr command completion
* @status: The status of the command (enum status_code)
* @features: Device features
* @color: Color bit
@@ -1376,7 +1376,7 @@ enum ionic_rss_hash_types {
};
/**
- * enum lif_attr - List of LIF attributes
+ * enum ionic_lif_attr - List of LIF attributes
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1389,15 +1389,15 @@ enum ionic_lif_attr {
};
/**
- * struct lif_setattr_cmd - Set LIF attributes on the NIC
+ * struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum lif_attr)
+ * @type: Attribute type (enum ionic_lif_attr)
* @index: LIF index
* @state: lif state (enum lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
- * @features: Features (enum eth_hw_features)
+ * @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
* @types: The hash types to enable (see rss_hash_types).
* @key: The hash secret key.
@@ -1426,11 +1426,11 @@ struct ionic_lif_setattr_cmd {
};
/**
- * struct lif_setattr_comp - LIF set attr command completion
+ * struct ionic_lif_setattr_comp - LIF set attr command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
- * @features: features (enum eth_hw_features)
+ * @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
struct ionic_lif_setattr_comp {
@@ -1445,9 +1445,9 @@ struct ionic_lif_setattr_comp {
};
/**
- * struct lif_getattr_cmd - Get LIF attributes from the NIC
+ * struct ionic_lif_getattr_cmd - Get LIF attributes from the NIC
* @opcode: Opcode
- * @attr: Attribute type (enum lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
*/
struct ionic_lif_getattr_cmd {
@@ -1458,7 +1458,7 @@ struct ionic_lif_getattr_cmd {
};
/**
- * struct lif_getattr_comp - LIF get attr command completion
+ * struct ionic_lif_getattr_comp - LIF get attr command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -1466,7 +1466,7 @@ struct ionic_lif_getattr_cmd {
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
- * @features: Features (enum eth_hw_features)
+ * @features: Features (enum ionic_eth_hw_features)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1492,7 +1492,7 @@ enum ionic_rx_mode {
};
/**
- * struct rx_mode_set_cmd - Set LIF's Rx mode command
+ * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
@@ -1519,7 +1519,7 @@ enum ionic_rx_filter_match_type {
};
/**
- * struct rx_filter_add_cmd - Add LIF Rx filter command
+ * struct ionic_rx_filter_add_cmd - Add LIF Rx filter command
* @opcode: opcode
* @qtype: Queue type
* @lif_index: LIF index
@@ -1550,7 +1550,7 @@ struct ionic_rx_filter_add_cmd {
};
/**
- * struct rx_filter_add_comp - Add LIF Rx filter command completion
+ * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -1567,7 +1567,7 @@ struct ionic_rx_filter_add_comp {
};
/**
- * struct rx_filter_del_cmd - Delete LIF Rx filter command
+ * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command
* @opcode: opcode
* @lif_index: LIF index
* @filter_id: Filter ID
@@ -1583,7 +1583,7 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
/**
- * struct qos_identify_cmd - QoS identify command
+ * struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
*
@@ -1595,7 +1595,7 @@ struct ionic_qos_identify_cmd {
};
/**
- * struct qos_identify_comp - QoS identify command completion
+ * struct ionic_qos_identify_comp - QoS identify command completion
* @status: The status of the command (enum status_code)
* @ver: Version of identify returned by device
*/
@@ -1610,7 +1610,7 @@ struct ionic_qos_identify_comp {
#define IONIC_QOS_DSCP_MAX_VALUES 64
/**
- * enum qos_class
+ * enum ionic_qos_class
*/
enum ionic_qos_class {
IONIC_QOS_CLASS_DEFAULT = 0,
@@ -1623,7 +1623,7 @@ enum ionic_qos_class {
};
/**
- * enum qos_class_type - Traffic classification criteria
+ * enum ionic_qos_class_type - Traffic classification criteria
*/
enum ionic_qos_class_type {
IONIC_QOS_CLASS_TYPE_NONE = 0,
@@ -1632,7 +1632,7 @@ enum ionic_qos_class_type {
};
/**
- * enum qos_sched_type - Qos class scheduling type
+ * enum ionic_qos_sched_type - Qos class scheduling type
*/
enum ionic_qos_sched_type {
IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
@@ -1640,15 +1640,15 @@ enum ionic_qos_sched_type {
};
/**
- * union qos_config - Qos configuration structure
+ * union ionic_qos_config - Qos configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
* IONIC_QOS_CONFIG_F_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum qos_sched_type)
- * @class_type: Qos class type (enum qos_class_type)
- * @pause_type: Qos pause type (enum qos_pause_type)
+ * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: Qos class type (enum ionic_qos_class_type)
+ * @pause_type: Qos pause type (enum ionic_qos_pause_type)
* @name: Qos class name
* @mtu: MTU of the class
* @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
@@ -1697,7 +1697,7 @@ union ionic_qos_config {
};
/**
- * union qos_identity - QoS identity structure
+ * union ionic_qos_identity - QoS identity structure
* @version: Version of the identify structure
* @type: QoS system type
* @nclasses: Number of usable QoS classes
@@ -1730,7 +1730,7 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - Qos config reset command
* @opcode: Opcode
*/
struct ionic_qos_reset_cmd {
@@ -1742,7 +1742,7 @@ struct ionic_qos_reset_cmd {
typedef struct ionic_admin_comp ionic_qos_reset_comp;
/**
- * struct fw_download_cmd - Firmware download command
+ * struct ionic_fw_download_cmd - Firmware download command
* @opcode: opcode
* @addr: dma address of the firmware buffer
* @offset: offset of the firmware buffer within the full image
@@ -1765,9 +1765,9 @@ enum ionic_fw_control_oper {
};
/**
- * struct fw_control_cmd - Firmware control command
+ * struct ionic_fw_control_cmd - Firmware control command
* @opcode: opcode
- * @oper: firmware control operation (enum fw_control_oper)
+ * @oper: firmware control operation (enum ionic_fw_control_oper)
* @slot: slot to activate
*/
struct ionic_fw_control_cmd {
@@ -1779,7 +1779,7 @@ struct ionic_fw_control_cmd {
};
/**
- * struct fw_control_comp - Firmware control copletion
+ * struct ionic_fw_control_comp - Firmware control copletion
* @opcode: opcode
* @slot: slot where the firmware was installed
*/
@@ -1797,13 +1797,13 @@ struct ionic_fw_control_comp {
******************************************************************/
/**
- * struct rdma_reset_cmd - Reset RDMA LIF cmd
+ * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
* @lif_index: lif index
*
* There is no rdma specific dev command completion struct. Completion uses
- * the common struct admin_comp. Only the status is indicated. Nonzero status
- * means the LIF does not support rdma.
+ * the common struct ionic_admin_comp. Only the status is indicated.
+ * Nonzero status means the LIF does not support rdma.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1813,7 +1813,7 @@ struct ionic_rdma_reset_cmd {
};
/**
- * struct rdma_queue_cmd - Create RDMA Queue command
+ * struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
* @lif_index lif index
* @qid_ver: (qid | (rdma version << 24))
@@ -1839,7 +1839,7 @@ struct ionic_rdma_reset_cmd {
* memory registration.
*
* There is no rdma specific dev command completion struct. Completion uses
- * the common struct admin_comp. Only the status is indicated.
+ * the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
u8 opcode;
@@ -1860,7 +1860,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct notifyq_event
+ * struct ionic_notifyq_event
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1875,7 +1875,7 @@ struct ionic_notifyq_event {
};
/**
- * struct link_change_event
+ * struct ionic_link_change_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
@@ -1892,7 +1892,7 @@ struct ionic_link_change_event {
};
/**
- * struct reset_event
+ * struct ionic_reset_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_RESET
* @reset_code: reset type
@@ -1910,7 +1910,7 @@ struct ionic_reset_event {
};
/**
- * struct heartbeat_event
+ * struct ionic_heartbeat_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_HEARTBEAT
*
@@ -1923,7 +1923,7 @@ struct ionic_heartbeat_event {
};
/**
- * struct log_event
+ * struct ionic_log_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_LOG
* @data: log data
@@ -1937,7 +1937,7 @@ struct ionic_log_event {
};
/**
- * struct port_stats
+ * struct ionic_port_stats
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2067,7 +2067,7 @@ struct ionic_mgmt_port_stats {
};
/**
- * struct port_identity - port identity structure
+ * struct ionic_port_identity - port identity structure
* @version: identity structure version
* @type: type of port (enum port_type)
* @num_lanes: number of lanes for the port
@@ -2099,7 +2099,7 @@ union ionic_port_identity {
};
/**
- * struct port_info - port info structure
+ * struct ionic_port_info - port info structure
* @port_status: port status
* @port_stats: port stats
*/
@@ -2110,7 +2110,7 @@ struct ionic_port_info {
};
/**
- * struct lif_stats
+ * struct ionic_lif_stats
*/
struct ionic_lif_stats {
/* RX */
@@ -2264,7 +2264,7 @@ struct ionic_lif_stats {
};
/**
- * struct lif_info - lif info structure
+ * struct ionic_lif_info - lif info structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2357,7 +2357,7 @@ union ionic_dev_info_regs {
};
/**
- * union dev_cmd_regs - Device command register format (read-write)
+ * union ionic_dev_cmd_regs - Device command register format (read-write)
* @doorbell: Device Cmd Doorbell, write-only.
* Write a 1 to signal device to process cmd,
* poll done for completion.
@@ -2379,7 +2379,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format in for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2433,7 +2433,7 @@ union ionic_adminq_comp {
#define IONIC_ASIC_TYPE_CAPRI 0
/**
- * struct doorbell - Doorbell register layout
+ * struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
* @ring: Selects the specific ring of the queue to update.
* Type-specific meaning:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 20faa8d24c9f..60fd14df49d7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -244,6 +244,21 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
+static void ionic_lif_quiesce(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .attr = IONIC_LIF_ATTR_STATE,
+ .index = lif->index,
+ .state = IONIC_LIF_DISABLE
+ },
+ };
+
+ ionic_adminq_post_wait(lif, &ctx);
+}
+
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
@@ -609,12 +624,14 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
+ IONIC_QINIT_F_SG),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
.cq_ring_base = cpu_to_le64(cq->base_pa),
+ .sg_ring_base = cpu_to_le64(q->sg_base_pa),
},
};
int err;
@@ -1432,7 +1449,6 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
unsigned int flags;
unsigned int i;
int err = 0;
- u32 coal;
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
@@ -1448,21 +1464,22 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
}
- flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR;
- coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs);
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
lif->nrxq_descs,
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
- 0, lif->kern_pid, &lif->rxqcqs[i].qcq);
+ sizeof(struct ionic_rxq_sg_desc),
+ lif->kern_pid, &lif->rxqcqs[i].qcq);
if (err)
goto err_out;
lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[i].qcq->intr.index, coal);
+ lif->rxqcqs[i].qcq->intr.index,
+ lif->rx_coalesce_hw);
ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
lif->txqcqs[i].qcq);
}
@@ -1592,6 +1609,7 @@ int ionic_stop(struct net_device *netdev)
netif_tx_disable(netdev);
ionic_txrx_disable(lif);
+ ionic_lif_quiesce(lif);
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
@@ -1621,8 +1639,9 @@ int ionic_reset_queues(struct ionic_lif *lif)
/* Put off the next watchdog timeout */
netif_trans_update(lif->netdev);
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = netif_running(lif->netdev);
if (running)
@@ -1641,7 +1660,6 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
struct net_device *netdev;
struct ionic_lif *lif;
int tbl_sz;
- u32 coal;
int err;
netdev = alloc_etherdev_mqs(sizeof(*lif),
@@ -1672,8 +1690,9 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
/* Convert the default coalesce value to actual hw resolution */
- coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT);
- lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
+ lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
+ lif->rx_coalesce_usecs);
snprintf(lif->name, sizeof(lif->name), "lif%u", index);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 6a95b42a8d8c..a55fd1f8c31b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -175,7 +175,9 @@ struct ionic_lif {
unsigned long *dbid_inuse;
unsigned int dbid_count;
struct dentry *dentry;
- u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs; /* what the user asked for */
+ u32 rx_coalesce_hw; /* what the hw is using */
+
u32 flags;
struct work_struct tx_timeout_work;
};
@@ -187,15 +189,10 @@ struct ionic_lif {
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+/* return 0 if successfully set the bit, else non-zero */
static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
{
- unsigned long tlimit = jiffies + HZ;
-
- while (test_and_set_bit(bitname, lif->state) &&
- time_before(jiffies, tlimit))
- usleep_range(100, 200);
-
- return test_bit(bitname, lif->state);
+ return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index aab311413412..3590ea7fd88a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -247,6 +247,10 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
goto err_out;
}
+ err = ionic_heartbeat_check(lif->ionic);
+ if (err)
+ goto err_out;
+
memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
@@ -307,6 +311,14 @@ int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
return work_done;
}
+static void ionic_dev_cmd_clean(struct ionic *ionic)
+{
+ union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+
+ iowrite32(0, &regs->doorbell);
+ memset_io(&regs->cmd, 0, sizeof(regs->cmd));
+}
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
{
struct ionic_dev *idev = &ionic->idev;
@@ -316,6 +328,7 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
int opcode;
int done;
int err;
+ int hb;
WARN_ON(in_interrupt());
@@ -330,7 +343,8 @@ try_again:
if (done)
break;
msleep(20);
- } while (!done && time_before(jiffies, max_wait));
+ hb = ionic_heartbeat_check(ionic);
+ } while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
@@ -338,7 +352,15 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
+ if (!done && hb) {
+ ionic_dev_cmd_clean(ionic);
+ dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
+ ionic_opcode_to_str(opcode), opcode);
+ return -ENXIO;
+ }
+
if (!done && !time_before(jiffies, max_wait)) {
+ ionic_dev_cmd_clean(ionic);
dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n",
ionic_opcode_to_str(opcode), opcode, max_seconds);
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ab6663d94f42..97e79949b359 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -34,52 +34,110 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct sk_buff *skb)
+static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
+ unsigned int len, bool frags)
{
- struct ionic_rxq_desc *old = desc_info->desc;
- struct ionic_rxq_desc *new = q->head->desc;
+ struct ionic_lif *lif = q->lif;
+ struct ionic_rx_stats *stats;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+
+ netdev = lif->netdev;
+ stats = q_to_rx_stats(q);
+
+ if (frags)
+ skb = napi_get_frags(&q_to_qcq(q)->napi);
+ else
+ skb = netdev_alloc_skb_ip_align(netdev, len);
- new->addr = old->addr;
- new->len = old->len;
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
+ netdev->name, q->name);
+ stats->alloc_err++;
+ return NULL;
+ }
- ionic_rxq_post(q, true, ionic_rx_clean, skb);
+ return skb;
}
-static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, struct sk_buff **skb)
+static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct ionic_rxq_desc *desc = desc_info->desc;
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->lif->ionic->dev;
- struct sk_buff *new_skb;
- u16 clen, dlen;
-
- clen = le16_to_cpu(comp->len);
- dlen = le16_to_cpu(desc->len);
- if (clen > q->lif->rx_copybreak) {
- dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- dlen, DMA_FROM_DEVICE);
- return false;
- }
+ struct ionic_page_info *page_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ u16 frag_len;
+ u16 len;
- new_skb = netdev_alloc_skb_ip_align(netdev, clen);
- if (!new_skb) {
- dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- dlen, DMA_FROM_DEVICE);
- return false;
- }
+ page_info = &desc_info->pages[0];
+ len = le16_to_cpu(comp->len);
- dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- clen, DMA_FROM_DEVICE);
+ prefetch(page_address(page_info->page) + NET_IP_ALIGN);
- memcpy(new_skb->data, (*skb)->data, clen);
+ skb = ionic_rx_skb_alloc(q, len, true);
+ if (unlikely(!skb))
+ return NULL;
- ionic_rx_recycle(q, desc_info, *skb);
- *skb = new_skb;
+ i = comp->num_sg_elems + 1;
+ do {
+ if (unlikely(!page_info->page)) {
+ struct napi_struct *napi = &q_to_qcq(q)->napi;
- return true;
+ napi->skb = NULL;
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ frag_len = min(len, (u16)PAGE_SIZE);
+ len -= frag_len;
+
+ dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page_info->page, 0, frag_len, PAGE_SIZE);
+ page_info->page = NULL;
+ page_info++;
+ i--;
+ } while (i > 0);
+
+ return skb;
+}
+
+static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info)
+{
+ struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_page_info *page_info;
+ struct sk_buff *skb;
+ u16 len;
+
+ page_info = &desc_info->pages[0];
+ len = le16_to_cpu(comp->len);
+
+ skb = ionic_rx_skb_alloc(q, len, false);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (unlikely(!page_info->page)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
+ len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, page_address(page_info->page), len);
+ dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
+ len, DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, q->lif->netdev);
+
+ return skb;
}
static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
@@ -87,35 +145,34 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
- struct sk_buff *skb = cb_arg;
struct ionic_rx_stats *stats;
struct net_device *netdev;
+ struct sk_buff *skb;
stats = q_to_rx_stats(q);
netdev = q->lif->netdev;
- if (comp->status) {
- ionic_rx_recycle(q, desc_info, skb);
+ if (comp->status)
return;
- }
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
- /* no packet processing while resetting */
- ionic_rx_recycle(q, desc_info, skb);
+ /* no packet processing while resetting */
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
return;
- }
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
- ionic_rx_copybreak(q, desc_info, cq_info, &skb);
+ if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ skb = ionic_rx_copybreak(q, desc_info, cq_info);
+ else
+ skb = ionic_rx_frags(q, desc_info, cq_info);
- skb_put(skb, le16_to_cpu(comp->len));
- skb->protocol = eth_type_trans(skb, netdev);
+ if (unlikely(!skb))
+ return;
skb_record_rx_queue(skb, q->index);
- if (netdev->features & NETIF_F_RXHASH) {
+ if (likely(netdev->features & NETIF_F_RXHASH)) {
switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
case IONIC_PKT_TYPE_IPV4:
case IONIC_PKT_TYPE_IPV6:
@@ -132,7 +189,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
}
}
- if (netdev->features & NETIF_F_RXCSUM) {
+ if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__wsum)le16_to_cpu(comp->csum);
@@ -142,18 +199,21 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
stats->csum_none++;
}
- if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
- (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
- (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))
+ if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(comp->vlan_tci));
}
- napi_gro_receive(&qcq->napi, skb);
+ if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ napi_gro_receive(&qcq->napi, skb);
+ else
+ napi_gro_frags(&qcq->napi);
}
static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
@@ -213,66 +273,125 @@ void ionic_rx_flush(struct ionic_cq *cq)
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
-static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len,
- dma_addr_t *dma_addr)
+static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
+ dma_addr_t *dma_addr)
{
struct ionic_lif *lif = q->lif;
struct ionic_rx_stats *stats;
struct net_device *netdev;
- struct sk_buff *skb;
struct device *dev;
+ struct page *page;
netdev = lif->netdev;
dev = lif->ionic->dev;
stats = q_to_rx_stats(q);
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
+ page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!page)) {
+ net_err_ratelimited("%s: Page alloc failed on %s!\n",
+ netdev->name, q->name);
stats->alloc_err++;
return NULL;
}
- *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *dma_addr)) {
- dev_kfree_skb(skb);
- net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- netdev->name, q->name);
+ *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, *dma_addr))) {
+ __free_page(page);
+ net_err_ratelimited("%s: DMA single map failed on %s!\n",
+ netdev->name, q->name);
stats->dma_map_err++;
return NULL;
}
- return skb;
+ return page;
+}
+
+static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
+ dma_addr_t dma_addr)
+{
+ struct ionic_lif *lif = q->lif;
+ struct net_device *netdev;
+ struct device *dev;
+
+ netdev = lif->netdev;
+ dev = lif->ionic->dev;
+
+ if (unlikely(!page)) {
+ net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+ netdev->name, q->name);
+ return;
+ }
+
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ __free_page(page);
}
-#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1)
+#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
+#define IONIC_RX_RING_HEAD_BUF_SZ 2048
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
+ struct ionic_desc_info *desc_info;
+ struct ionic_page_info *page_info;
+ struct ionic_rxq_sg_desc *sg_desc;
+ struct ionic_rxq_sg_elem *sg_elem;
struct ionic_rxq_desc *desc;
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ unsigned int nfrags;
bool ring_doorbell;
+ unsigned int i, j;
unsigned int len;
- unsigned int i;
len = netdev->mtu + ETH_HLEN;
+ nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
- skb = ionic_rx_skb_alloc(q, len, &dma_addr);
- if (!skb)
- return;
+ desc_info = q->head;
+ desc = desc_info->desc;
+ sg_desc = desc_info->sg_desc;
+ page_info = &desc_info->pages[0];
+
+ if (page_info->page) { /* recycle the buffer */
+ ring_doorbell = ((q->head->index + 1) &
+ IONIC_RX_RING_DOORBELL_STRIDE) == 0;
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ continue;
+ }
- desc = q->head->desc;
- desc->addr = cpu_to_le64(dma_addr);
- desc->len = cpu_to_le16(len);
- desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ /* fill main descriptor - pages[0] */
+ desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
+ IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ desc_info->npages = nfrags;
+ page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
+ if (unlikely(!page_info->page)) {
+ desc->addr = 0;
+ desc->len = 0;
+ return;
+ }
+ desc->addr = cpu_to_le64(page_info->dma_addr);
+ desc->len = cpu_to_le16(PAGE_SIZE);
+ page_info++;
+
+ /* fill sg descriptors - pages[1..n] */
+ for (j = 0; j < nfrags - 1; j++) {
+ if (page_info->page) /* recycle the sg buffer */
+ continue;
+
+ sg_elem = &sg_desc->elems[j];
+ page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
+ if (unlikely(!page_info->page)) {
+ sg_elem->addr = 0;
+ sg_elem->len = 0;
+ return;
+ }
+ sg_elem->addr = cpu_to_le64(page_info->dma_addr);
+ sg_elem->len = cpu_to_le16(PAGE_SIZE);
+ page_info++;
+ }
ring_doorbell = ((q->head->index + 1) &
IONIC_RX_RING_DOORBELL_STRIDE) == 0;
-
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb);
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
}
}
@@ -283,15 +402,24 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *cur;
struct ionic_rxq_desc *desc;
+ unsigned int i;
for (cur = q->tail; cur != q->head; cur = cur->next) {
desc = cur->desc;
- dma_unmap_single(dev, le64_to_cpu(desc->addr),
- le16_to_cpu(desc->len), DMA_FROM_DEVICE);
- dev_kfree_skb(cur->cb_arg);
+ desc->addr = 0;
+ desc->len = 0;
+
+ for (i = 0; i < cur->npages; i++) {
+ if (likely(cur->pages[i].page)) {
+ ionic_rx_page_free(q, cur->pages[i].page,
+ cur->pages[i].dma_addr);
+ cur->pages[i].page = NULL;
+ cur->pages[i].dma_addr = 0;
+ }
+ }
+
cur->cb_arg = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index d473b522afc5..9ad568d93ae6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -37,14 +37,14 @@
#include <linux/slab.h>
#include "qed.h"
-/* Fields of IGU PF CONFIGRATION REGISTER */
+/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
-/* Fields of IGU VF CONFIGRATION REGISTER */
+/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 9a8fd79611f2..368e88565783 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -305,7 +305,7 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
/**
* @brief Read sriov related information and allocated resources
- * reads from configuraiton space, shmem, etc.
+ * reads from configuration space, shmem, etc.
*
* @param p_hwfn
*
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9a6a9a008714..d6cfe4ffbaf3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1298,7 +1298,7 @@ void qede_config_rx_mode(struct net_device *ndev)
rx_mode.type = QED_FILTER_TYPE_RX_MODE;
/* Remove all previous unicast secondary macs and multicast macs
- * (configrue / leave the primary mac)
+ * (configure / leave the primary mac)
*/
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
edev->ndev->dev_addr);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a220cc7c947a..481b096e984d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2115,12 +2115,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
if (rc)
goto out;
- fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
- if (IS_ERR(fp->rxq->xdp_prog)) {
- rc = PTR_ERR(fp->rxq->xdp_prog);
- fp->rxq->xdp_prog = NULL;
- goto out;
- }
+ bpf_prog_add(edev->xdp_prog, 1);
+ fp->rxq->xdp_prog = edev->xdp_prog;
}
if (fp->type & QEDE_FASTPATH_TX) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index c84ab052ef26..98f92268cbaa 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -213,9 +213,9 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
{
struct emac_adapter *adpt = netdev_priv(netdev);
- netif_info(adpt, hw, adpt->netdev,
- "changing MTU from %d to %d\n", netdev->mtu,
- new_mtu);
+ netif_dbg(adpt, hw, adpt->netdev,
+ "changing MTU from %d to %d\n", netdev->mtu,
+ new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5ecf61df78bd..baac016f3ec0 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -363,7 +363,7 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available > QCASPI_HW_BUF_LEN) {
+ if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
/* This could only happen by interferences on the SPI line.
* So retry later ...
*/
@@ -496,7 +496,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
u16 signature = 0;
u16 spi_config;
u16 wrbuf_space = 0;
- static u16 reset_count;
if (event == QCASPI_EVENT_CPUON) {
/* Read signature twice, if not valid
@@ -549,13 +548,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++;
- reset_count = 0;
+ qca->reset_count = 0;
break;
case QCASPI_SYNC_RESET:
- reset_count++;
+ qca->reset_count++;
netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
- reset_count);
- if (reset_count >= QCASPI_RESET_TIMEOUT) {
+ qca->reset_count);
+ if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */
qca->sync = QCASPI_SYNC_UNKNOWN;
qca->stats.reset_timeout++;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index eb9af45fcc5e..d13a67e20d65 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -94,6 +94,7 @@ struct qcaspi {
unsigned int intr_req;
unsigned int intr_svc;
+ u16 reset_count;
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index 8f54a2c832eb..355cc810e322 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -37,7 +37,7 @@ struct fw_info {
u8 chksum;
} __packed;
-#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0])
static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
{
@@ -92,19 +92,24 @@ static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw)
for (index = 0; index < pa->size; index++) {
u32 action = le32_to_cpu(pa->code[index]);
+ u32 val = action & 0x0000ffff;
u32 regno = (action & 0x0fff0000) >> 16;
switch (action >> 28) {
case PHY_READ:
case PHY_DATA_OR:
case PHY_DATA_AND:
- case PHY_MDIO_CHG:
case PHY_CLEAR_READCOUNT:
case PHY_WRITE:
case PHY_WRITE_PREVIOUS:
case PHY_DELAY_MS:
break;
+ case PHY_MDIO_CHG:
+ if (val > 1)
+ goto out;
+ break;
+
case PHY_BJMPN:
if (regno > index)
goto out;
@@ -164,12 +169,12 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= (regno + 1);
break;
case PHY_MDIO_CHG:
- if (data == 0) {
- fw_write = rtl_fw->phy_write;
- fw_read = rtl_fw->phy_read;
- } else if (data == 1) {
+ if (data) {
fw_write = rtl_fw->mac_mcu_write;
fw_read = rtl_fw->mac_mcu_read;
+ } else {
+ fw_write = rtl_fw->phy_write;
+ fw_read = rtl_fw->phy_read;
}
break;
@@ -198,7 +203,7 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index += regno;
break;
case PHY_DELAY_MS:
- mdelay(data);
+ msleep(data);
break;
}
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index c4e961ea44d5..d47a038cb8d0 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -52,6 +52,7 @@
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
+#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
@@ -135,6 +136,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_52,
RTL_GIGA_MAC_VER_60,
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_NONE
@@ -202,6 +204,7 @@ static const struct {
[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
[RTL_GIGA_MAC_VER_60] = {"RTL8125" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3},
};
@@ -680,6 +683,7 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
+ int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -712,6 +716,7 @@ MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
+MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
@@ -741,12 +746,6 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
-static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
-{
- pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ, force);
-}
-
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_60;
@@ -756,7 +755,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_51;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -1092,6 +1091,39 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
+static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
+ int reg, u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0007);
+
+ __phy_write(phydev, 0x1e, extpage);
+ __phy_modify(phydev, reg, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0005);
+
+ __phy_write(phydev, 0x05, parm);
+ __phy_modify(phydev, 0x06, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0a43);
+
+ __phy_write(phydev, 0x13, parm);
+ __phy_modify(phydev, 0x14, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1262,9 +1294,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
rtl8168dp_driver_start(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_driver_start(tp);
break;
default:
@@ -1296,9 +1326,7 @@ static void rtl8168_driver_stop(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
rtl8168dp_driver_stop(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_driver_stop(tp);
break;
default:
@@ -1326,9 +1354,7 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp);
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1503,7 +1529,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
@@ -1571,7 +1597,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
if (dev->mtu > JUMBO_1K &&
tp->mac_version > RTL_GIGA_MAC_VER_06)
- features &= ~NETIF_F_IP_CSUM;
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
return features;
}
@@ -2074,6 +2100,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
}
ret = phy_ethtool_set_eee(tp->phydev, data);
+
+ if (!ret)
+ tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV);
out:
pm_runtime_put_noidle(d);
return ret;
@@ -2104,10 +2134,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl_enable_eee(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ int adv;
+
+ /* respect EEE advertisement the user may have set */
+ if (tp->eee_adv >= 0)
+ adv = tp->eee_adv;
+ else
+ adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (supported > 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
+ if (adv >= 0)
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
@@ -2132,6 +2168,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
{ 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ /* RTL8117 */
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
+
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
{ 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
@@ -2260,14 +2299,6 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
rtl_fw_write_firmware(tp, tp->rtl_fw);
}
-static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
-{
- if (rtl_readphy(tp, reg) != val)
- netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
- else
- rtl_apply_firmware(tp);
-}
-
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
{
/* Adjust EEE LED frequency */
@@ -2287,15 +2318,8 @@ static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- phy_write(phydev, 0x1f, 0x0007);
- phy_write(phydev, 0x1e, 0x0020);
- phy_set_bits(phydev, 0x15, BIT(8));
-
- phy_write(phydev, 0x1f, 0x0005);
- phy_write(phydev, 0x05, 0x8b85);
- phy_set_bits(phydev, 0x06, BIT(13));
-
- phy_write(phydev, 0x1f, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
+ r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
}
static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
@@ -2392,13 +2416,7 @@ static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x01, 0x90d0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0);
}
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
@@ -2409,9 +2427,7 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
(pdev->subsystem_device != 0xe000))
return;
- rtl_writephy(tp, 0x1f, 0x0001);
- rtl_writephy(tp, 0x10, 0xf01b);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
}
static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
@@ -2516,54 +2532,28 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x10, 0xf41b },
- { 0x1f, 0x0000 }
- };
-
rtl_writephy(tp, 0x1f, 0x0001);
rtl_patchphy(tp, 0x16, 1 << 0);
-
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy(tp, 0x10, 0xf41b);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x10, 0xf41b },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b);
}
static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0000 },
- { 0x1d, 0x0f00 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x1ec8 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write(tp->phydev, 0x1d, 0x0f00);
+ phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8);
}
static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_set_bits(tp->phydev, 0x14, BIT(5));
+ phy_set_bits(tp->phydev, 0x0d, BIT(5));
+ phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98);
}
static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2645,11 +2635,6 @@ static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl8168c_3_hw_phy_config(tp);
-}
-
static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
/* Channel Estimation */
{ 0x1f, 0x0001 },
@@ -2700,6 +2685,21 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
{ 0x1f, 0x0002 }
};
+static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val)
+{
+ u16 reg_val;
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+ reg_val = rtl_readphy(tp, 0x06);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ if (reg_val != val)
+ netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+ else
+ rtl_apply_firmware(tp);
+}
+
static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{
rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
@@ -2733,15 +2733,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x6662 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x6662 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662);
+ r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662);
}
/* RSET couple improve */
@@ -2753,13 +2746,9 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0002);
rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
-
- rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
-
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, 0xbf00);
}
static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
@@ -2786,15 +2775,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x2642 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x2642 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642);
+ r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642);
}
/* Fine tune PLL performance */
@@ -2805,13 +2787,9 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
/* Switching regulator Slew rate */
rtl_writephy(tp, 0x1f, 0x0002);
rtl_patchphy(tp, 0x0f, 0x0017);
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
-
- rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
-
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, 0xb300);
}
static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
@@ -2865,41 +2843,23 @@ static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x04, 0xf800 },
{ 0x04, 0xf000 },
{ 0x1f, 0x0000 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x0023 },
- { 0x16, 0x0000 },
- { 0x1f, 0x0000 }
};
rtl_writephy_batch(tp, phy_reg_init);
+
+ r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000);
}
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x002d },
- { 0x18, 0x0040 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
- rtl_patchphy(tp, 0x0d, 1 << 5);
+ phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0);
+ r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040);
+ phy_set_bits(tp->phydev, 0x0d, BIT(5));
}
static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
- /* Enable Delay cap */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b80 },
- { 0x06, 0xc896 },
- { 0x1f, 0x0000 },
-
/* Channel estimation fine tune */
{ 0x1f, 0x0001 },
{ 0x0b, 0x6c20 },
@@ -2908,60 +2868,38 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0003 },
{ 0x14, 0x6420 },
{ 0x1f, 0x0000 },
-
- /* Update PFM & 10M TX idle timer */
- { 0x1f, 0x0007 },
- { 0x1e, 0x002f },
- { 0x15, 0x1919 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0007 },
- { 0x1e, 0x00ac },
- { 0x18, 0x0006 },
- { 0x1f, 0x0000 }
};
+ struct phy_device *phydev = tp->phydev;
rtl_apply_firmware(tp);
+ /* Enable Delay cap */
+ r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
+
rtl_writephy_batch(tp, phy_reg_init);
+ /* Update PFM & 10M TX idle timer */
+ r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
+
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
/* DCO enable for 10M IDLE Power */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0023);
- rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
/* For impedance matching */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
+ phy_set_bits(phydev, 0x14, BIT(15));
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+ r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
- rtl_writephy(tp, 0x1f, 0x0006);
- rtl_writephy(tp, 0x00, 0x5a00);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
+ phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
}
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
@@ -2980,36 +2918,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Enable Delay cap */
- { 0x1f, 0x0004 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x00ac },
- { 0x18, 0x0006 },
- { 0x1f, 0x0002 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
+ struct phy_device *phydev = tp->phydev;
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
+ rtl_apply_firmware(tp);
- /* Green Setting */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b5b },
- { 0x06, 0x9222 },
- { 0x05, 0x8b6d },
- { 0x06, 0x8000 },
- { 0x05, 0x8b76 },
- { 0x06, 0x8000 },
- { 0x1f, 0x0000 }
- };
+ /* Enable Delay cap */
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
- rtl_apply_firmware(tp);
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Green Setting */
+ r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
+ r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
+ r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
/* For 4-corner performance improve */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3018,25 +2940,14 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
/* improve 10M EEE waveform */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
rtl8168f_config_eee_phy(tp);
rtl_enable_eee(tp);
@@ -3056,24 +2967,17 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
/* For 4-corner performance improve */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b80);
- rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
/* PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x002d);
- rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
/* Improve 10M EEE waveform */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b86);
- rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
rtl8168f_config_eee_phy(tp);
rtl_enable_eee(tp);
@@ -3081,52 +2985,31 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
+ struct phy_device *phydev = tp->phydev;
- /* Modify green table for giga & fnet */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b55 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b5e },
- { 0x06, 0x0000 },
- { 0x05, 0x8b67 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b70 },
- { 0x06, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x0078 },
- { 0x17, 0x0000 },
- { 0x19, 0x00fb },
- { 0x1f, 0x0000 },
+ rtl_apply_firmware(tp);
- /* Modify green table for 10M */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b79 },
- { 0x06, 0xaa00 },
- { 0x1f, 0x0000 },
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
- /* Disable hiimpedance detection (RTCT) */
- { 0x1f, 0x0003 },
- { 0x01, 0x328a },
- { 0x1f, 0x0000 }
- };
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
- rtl_apply_firmware(tp);
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
rtl8168f_hw_phy_config(tp);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3138,77 +3021,43 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0003 },
- { 0x09, 0xa20f },
- { 0x1f, 0x0000 },
-
- /* Modify green table for giga & fnet */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b55 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b5e },
- { 0x06, 0x0000 },
- { 0x05, 0x8b67 },
- { 0x06, 0x0000 },
- { 0x05, 0x8b70 },
- { 0x06, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0007 },
- { 0x1e, 0x0078 },
- { 0x17, 0x0000 },
- { 0x19, 0x00aa },
- { 0x1f, 0x0000 },
-
- /* Modify green table for 10M */
- { 0x1f, 0x0005 },
- { 0x05, 0x8b79 },
- { 0x06, 0xaa00 },
- { 0x1f, 0x0000 },
-
- /* Disable hiimpedance detection (RTCT) */
- { 0x1f, 0x0003 },
- { 0x01, 0x328a },
- { 0x1f, 0x0000 }
- };
-
+ struct phy_device *phydev = tp->phydev;
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
/* Improve 2-pair detection performance */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
- rtl_writephy_batch(tp, phy_reg_init);
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
/* Modify green table for giga */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b54);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
- rtl_writephy(tp, 0x05, 0x8b5d);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
- rtl_writephy(tp, 0x05, 0x8a7c);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a7f);
- rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
- rtl_writephy(tp, 0x05, 0x8a82);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x05, 0x8a88);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
+ r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
/* uc same-seed solution */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
@@ -3228,12 +3077,8 @@ static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x8084);
- phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
- phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
-
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3263,9 +3108,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
+ r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000);
phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
@@ -3295,73 +3138,48 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
u16 dout_tapbin;
u32 data;
rtl_apply_firmware(tp);
/* CHN EST parameters adjust - giga master */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x809b);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
- rtl_writephy(tp, 0x13, 0x80a2);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
- rtl_writephy(tp, 0x13, 0x80a4);
- rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
- rtl_writephy(tp, 0x13, 0x809c);
- rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
+ r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
/* CHN EST parameters adjust - giga slave */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80ad);
- rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
- rtl_writephy(tp, 0x13, 0x80b4);
- rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
- rtl_writephy(tp, 0x13, 0x80ac);
- rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
+ r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
/* CHN EST parameters adjust - fnet */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x808e);
- rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
- rtl_writephy(tp, 0x13, 0x8090);
- rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
- rtl_writephy(tp, 0x13, 0x8092);
- rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
/* enable R-tune & PGA-retune function */
dout_tapbin = 0;
- rtl_writephy(tp, 0x1f, 0x0a46);
- data = rtl_readphy(tp, 0x13);
+ data = phy_read_paged(phydev, 0x0a46, 0x13);
data &= 3;
data <<= 2;
dout_tapbin |= data;
- data = rtl_readphy(tp, 0x12);
+ data = phy_read_paged(phydev, 0x0a46, 0x12);
data &= 0xc000;
data >>= 14;
dout_tapbin |= data;
dout_tapbin = ~(dout_tapbin^0x08);
dout_tapbin <<= 12;
dout_tapbin &= 0xf000;
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x827a);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827b);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827c);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
- rtl_writephy(tp, 0x13, 0x827d);
- rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
-
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x0811);
- rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a42);
- rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
/* enable GPHY 10M */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
@@ -3369,22 +3187,13 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
/* SAR ADC performance */
phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x803f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8047);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x804f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8057);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x805f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x8067);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x13, 0x806f);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
/* disable phy pfm mode */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
@@ -3397,24 +3206,18 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
{
u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
+ struct phy_device *phydev = tp->phydev;
u16 rlen;
u32 data;
rtl_apply_firmware(tp);
/* CHIN EST parameter update */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x808a);
- rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
/* enable R-tune & PGA-retune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x0811);
- rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a42);
- rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
/* enable GPHY 10M */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
@@ -3434,26 +3237,20 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
- rtl_writephy(tp, 0x1f, 0x0bcf);
- rtl_writephy(tp, 0x16, data);
- rtl_writephy(tp, 0x1f, 0x0000);
- }
+ (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f))
+ phy_write_paged(phydev, 0x0bcf, 0x16, data);
/* Modify rlen (TX LPF corner frequency) level */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- data = rtl_readphy(tp, 0x16);
+ data = phy_read_paged(phydev, 0x0bcd, 0x16);
data &= 0x000f;
rlen = 0;
if (data > 3)
rlen = data - 3;
data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
- rtl_writephy(tp, 0x17, data);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_write_paged(phydev, 0x0bcd, 0x17, data);
/* disable phy pfm mode */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3462,22 +3259,21 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
/* Enable PHY auto speed down */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
- phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
/* set rg_sel_sdm_rate */
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3486,63 +3282,38 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
{
+ struct phy_device *phydev = tp->phydev;
+
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable UC LPF tune function */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8012);
- rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
/* Set rg_sel_sdm_rate */
phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Channel estimation parameters */
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80f3);
- rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
- rtl_writephy(tp, 0x13, 0x80f0);
- rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
- rtl_writephy(tp, 0x13, 0x80ef);
- rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
- rtl_writephy(tp, 0x13, 0x80f6);
- rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
- rtl_writephy(tp, 0x13, 0x80ec);
- rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
- rtl_writephy(tp, 0x13, 0x80ed);
- rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
- rtl_writephy(tp, 0x13, 0x80f2);
- rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
- rtl_writephy(tp, 0x13, 0x80f4);
- rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8110);
- rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
- rtl_writephy(tp, 0x13, 0x810f);
- rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
- rtl_writephy(tp, 0x13, 0x8111);
- rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
- rtl_writephy(tp, 0x13, 0x8113);
- rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
- rtl_writephy(tp, 0x13, 0x8115);
- rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
- rtl_writephy(tp, 0x13, 0x810e);
- rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
- rtl_writephy(tp, 0x13, 0x810c);
- rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
- rtl_writephy(tp, 0x13, 0x810b);
- rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x80d1);
- rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
- rtl_writephy(tp, 0x13, 0x80cd);
- rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
- rtl_writephy(tp, 0x13, 0x80d3);
- rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
- rtl_writephy(tp, 0x13, 0x80d5);
- rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
- rtl_writephy(tp, 0x13, 0x80d7);
- rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
+ r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
+ r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
+ r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
+ r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
+ r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
+ r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
+ r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
+ r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
+ r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
+ r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
+ r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
+ r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
+ r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
+ r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
+ r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
+ r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
+ r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
/* Force PWM-mode */
rtl_writephy(tp, 0x1f, 0x0bcd);
@@ -3561,6 +3332,46 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_enable_eee(tp);
}
+static void rtl8117_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
+
+ r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
+ r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
+ r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
+ r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
+ r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
+ r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
+ r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
+ r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
+ r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
+ r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
+
+ r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
+
+ rtl8168g_disable_aldps(tp);
+ rtl8168h_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3580,35 +3391,21 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0005 },
- { 0x1a, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0004 },
- { 0x1c, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x15, 0x7701 },
- { 0x1f, 0x0000 }
- };
-
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(100);
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init);
+ phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000);
+ phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000);
+ phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(20);
rtl_apply_firmware(tp);
@@ -3631,8 +3428,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
+ phy_write(tp->phydev, 0x18, 0x0310);
msleep(100);
rtl_apply_firmware(tp);
@@ -3657,38 +3453,22 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x80ea);
- phy_modify(phydev, 0x14, 0xff00, 0xc400);
- phy_write(phydev, 0x13, 0x80eb);
- phy_modify(phydev, 0x14, 0x0700, 0x0300);
- phy_write(phydev, 0x13, 0x80f8);
- phy_modify(phydev, 0x14, 0xff00, 0x1c00);
- phy_write(phydev, 0x13, 0x80f1);
- phy_modify(phydev, 0x14, 0xff00, 0x3000);
- phy_write(phydev, 0x13, 0x80fe);
- phy_modify(phydev, 0x14, 0xff00, 0xa500);
- phy_write(phydev, 0x13, 0x8102);
- phy_modify(phydev, 0x14, 0xff00, 0x5000);
- phy_write(phydev, 0x13, 0x8105);
- phy_modify(phydev, 0x14, 0xff00, 0x3300);
- phy_write(phydev, 0x13, 0x8100);
- phy_modify(phydev, 0x14, 0xff00, 0x7000);
- phy_write(phydev, 0x13, 0x8104);
- phy_modify(phydev, 0x14, 0xff00, 0xf000);
- phy_write(phydev, 0x13, 0x8106);
- phy_modify(phydev, 0x14, 0xff00, 0x6500);
- phy_write(phydev, 0x13, 0x80dc);
- phy_modify(phydev, 0x14, 0xff00, 0xed00);
- phy_write(phydev, 0x13, 0x80df);
- phy_set_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x13, 0x80e1);
- phy_clear_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
+ r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
+ r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
+ r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
+ r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
+ r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
+ r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
+ r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
+ r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
+ r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
+ r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- phy_write_paged(phydev, 0xa43, 0x13, 0x819f);
- phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6);
+ r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
@@ -3743,22 +3523,16 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
phy_write(phydev, 0x14, 0x0002);
for (i = 0; i < 25; i++)
phy_write(phydev, 0x14, 0x0000);
-
- phy_write(phydev, 0x13, 0x8257);
- phy_write(phydev, 0x14, 0x020F);
-
- phy_write(phydev, 0x13, 0x80EA);
- phy_write(phydev, 0x14, 0x7843);
phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
+ r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
+
rtl_apply_firmware(tp);
phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x81a2);
- phy_set_bits(phydev, 0x14, BIT(8));
- phy_write(phydev, 0x1f, 0x0000);
+ r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
@@ -3796,7 +3570,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
[RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
[RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
[RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
@@ -3826,6 +3600,7 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
};
@@ -3919,7 +3694,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3955,6 +3730,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_52:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
@@ -3986,6 +3762,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_52:
case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
@@ -4017,7 +3794,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
@@ -4039,14 +3816,12 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -4064,7 +3839,6 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x3f);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4072,32 +3846,15 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x0c);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-}
-
-static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- rtl_tx_performance_tweak(tp,
- PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
-}
-
-static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- rtl_tx_performance_tweak(tp,
- PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
}
static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
{
- r8168b_0_hw_jumbo_enable(tp);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
}
static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
{
- r8168b_0_hw_jumbo_disable(tp);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
@@ -4105,9 +3862,6 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- r8168b_0_hw_jumbo_enable(tp);
- break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
r8168b_1_hw_jumbo_enable(tp);
@@ -4131,9 +3885,6 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- r8168b_0_hw_jumbo_disable(tp);
- break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
r8168b_1_hw_jumbo_disable(tp);
@@ -4229,7 +3980,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
break;
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
@@ -4454,18 +4205,11 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
}
-static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
+static void rtl_hw_start_8168b(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
}
-static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
-{
- rtl_hw_start_8168bb(tp);
-
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
-}
-
static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
{
RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
@@ -4557,19 +4301,6 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-}
-
-static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
-{
- rtl_set_def_aspm_entry_latency(tp);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
- rtl_disable_clock_request(tp);
}
static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
@@ -4583,8 +4314,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp);
@@ -4659,8 +4388,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -4723,8 +4450,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
@@ -4775,8 +4500,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_2);
}
@@ -4961,8 +4685,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
@@ -5020,8 +4742,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
@@ -5106,6 +4826,71 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+static void rtl_hw_start_8117(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8117[] = {
+ { 0x19, 0x0040, 0x1100 },
+ { 0x59, 0x0040, 0x1100 },
+ };
+ int rg_saw_cnt;
+
+ rtl8168ep_stop_cmac(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8117);
+
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ rtl_reset_packet_filter(tp);
+
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90);
+
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+
+ rtl8168_config_eee_mac(tp);
+
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
+
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+
+ rtl_pcie_state_l2l3_disable(tp);
+
+ rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
+ if (rg_saw_cnt > 0) {
+ u16 sw_cnt_1ms_ini;
+
+ sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
+ r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
+ }
+
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_write(tp, 0xea80, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
+ r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
+
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc094, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ /* firmware is for MAC only */
+ rtl_apply_firmware(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
+}
+
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -5124,8 +4909,6 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
RTL_W8(tp, DBG_REG, FIX_NAK_1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
RTL_W8(tp, Config1,
LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5141,8 +4924,6 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
}
@@ -5203,8 +4984,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8402);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
@@ -5358,13 +5137,13 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
- [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
+ [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = NULL,
[RTL_GIGA_MAC_VER_15] = NULL,
[RTL_GIGA_MAC_VER_16] = NULL,
- [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
@@ -5378,7 +5157,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
- [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
+ [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
@@ -5399,6 +5178,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
};
@@ -5420,11 +5200,6 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
- tp->mac_version == RTL_GIGA_MAC_VER_16)
- pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_NOSNOOP_EN);
-
if (rtl_is_8168evl_up(tp))
RTL_W8(tp, MaxTxPacketSize, EarlySize);
else
@@ -5573,18 +5348,15 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
- goto err_out;
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
}
tp->Rx_databuff[i] = data;
}
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
- return 0;
-err_out:
- rtl8169_rx_clear(tp);
- return -ENOMEM;
+ return 0;
}
static int rtl8169_init_ring(struct rtl8169_private *tp)
@@ -6953,7 +6725,7 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
/* fall through */
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
@@ -7063,6 +6835,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
+ tp->eee_adv = -1;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
@@ -7179,8 +6952,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
- /* RTL8168e-vl has a HW issue with TSO */
- if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ /* RTL8168e-vl and one RTL8168c variant are known to have a
+ * HW issue with TSO.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a9c89d5d8898..9f88b5db4f89 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -955,6 +955,8 @@ enum RAVB_QUEUE {
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
+#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
+
/* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1
@@ -1018,7 +1020,6 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE];
- u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work;
/* MII transceiver section. */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index de9aa8c47f1c..4b13a184bfc7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+ rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size;
int i;
- priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
- ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
-
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->rx_buf_sz,
+ RX_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
+ desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
- priv->rx_buf_sz +
+ RX_BUF_SZ +
RAVB_ALIGN - 1);
if (!skb)
break; /* Better luck next round. */
@@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
- if (netif_running(ndev))
- return -EBUSY;
+ struct ravb_private *priv = netdev_priv(ndev);
ndev->mtu = new_mtu;
+
+ if (netif_running(ndev)) {
+ synchronize_irq(priv->emac_irq);
+ ravb_emac_init(ndev);
+ }
+
netdev_update_features(ndev);
return 0;
@@ -2046,7 +2048,9 @@ static int ravb_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
- priv->phy_interface = of_get_phy_mode(np);
+ error = of_get_phy_mode(np, &priv->phy_interface);
+ if (error && error != -ENODEV)
+ goto out_release;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 9a42580693cb..6984bd5b7da9 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
if (req->index)
return -EINVAL;
@@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
+ /* Reject requests with unsupported flags */
+ if (req->flags)
+ return -EOPNOTSUPP;
+
if (req->index)
return -EINVAL;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7ba35a0bdb29..e19b49c4013e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3183,6 +3183,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
+ phy_interface_t interface;
const char *mac_addr;
int ret;
@@ -3190,10 +3191,10 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
if (!pdata)
return NULL;
- ret = of_get_phy_mode(np);
- if (ret < 0)
+ ret = of_get_phy_mode(np, &interface);
+ if (ret)
return NULL;
- pdata->phy_interface = ret;
+ pdata->phy_interface = interface;
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 786b158bd305..bc4f951315da 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2189,9 +2189,6 @@ static int rocker_router_fib_event(struct notifier_block *nb,
struct rocker_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
- if (!net_eq(info->net, &init_net))
- return NOTIFY_DONE;
-
if (info->family != AF_INET)
return NOTIFY_DONE;
@@ -2994,7 +2991,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* the device, so no need to pass a callback.
*/
rocker->fib_nb.notifier_call = rocker_router_fib_event;
- err = register_fib_notifier(&rocker->fib_nb, NULL);
+ err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL);
if (err)
goto err_register_fib_notifier;
@@ -3021,7 +3018,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_register_switchdev_blocking_notifier:
unregister_switchdev_notifier(&rocker_switchdev_notifier);
err_register_switchdev_notifier:
- unregister_fib_notifier(&rocker->fib_nb);
+ unregister_fib_notifier(&init_net, &rocker->fib_nb);
err_register_fib_notifier:
rocker_remove_ports(rocker);
err_probe_ports:
@@ -3057,7 +3054,7 @@ static void rocker_remove(struct pci_dev *pdev)
unregister_switchdev_blocking_notifier(nb);
unregister_switchdev_notifier(&rocker_switchdev_notifier);
- unregister_fib_notifier(&rocker->fib_nb);
+ unregister_fib_notifier(&init_net, &rocker->fib_nb);
rocker_remove_ports(rocker);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
destroy_workqueue(rocker->rocker_owq);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 2412c87561e0..33f79402850d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -30,12 +30,15 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct sxgbe_dma_cfg *dma_cfg;
+ int err;
if (!np)
return -ENODEV;
*mac = of_get_mac_address(np);
- plat->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &plat->interface);
+ if (err && err != -ENODEV)
+ return err;
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 0ec13f520e90..4d9bbccc6f89 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -946,8 +946,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Extra channels, even those with TXQs (PTP), do not require
* PIO resources.
*/
- if (!channel->type->want_pio)
+ if (!channel->type->want_pio ||
+ channel->channel >= efx->xdp_channel_offset)
continue;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
@@ -1296,8 +1298,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
int rc;
channel_vis = max(efx->n_channels,
- (efx->n_tx_channels + efx->n_extra_tx_channels) *
- EFX_TXQ_TYPES);
+ ((efx->n_tx_channels + efx->n_extra_tx_channels) *
+ EFX_TXQ_TYPES) +
+ efx->n_xdp_channels * efx->xdp_tx_per_channel);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -2434,11 +2437,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
- * TSOv2 cannot be used with Hardware timestamping.
+ * TSOv2 cannot be used with Hardware timestamping, and is never needed
+ * for XDP tx.
*/
if (csum_offload && (nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
- !tx_queue->timestamping) {
+ !tx_queue->timestamping && !tx_queue->xdp_tx) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
@@ -4198,11 +4202,15 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
+ size_t outlen;
int rc;
efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc && spec->priority != EFX_FILTER_PRI_HINT)
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
+ outbuf, outlen, rc);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
if (rc == -ENOSPC)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2fef7402233e..992c773620ec 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -226,6 +226,10 @@ static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
@@ -340,6 +344,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
+ xdp_do_flush_map();
+
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
@@ -349,7 +355,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */
- schedule_work(&channel->filter_work);
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
@@ -481,7 +487,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
#ifdef CONFIG_RFS_ACCEL
- INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif
rx_queue = &channel->rx_queue;
@@ -527,7 +533,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
#ifdef CONFIG_RFS_ACCEL
- INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif
return channel;
@@ -579,9 +585,14 @@ efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
int number;
number = channel->channel;
- if (efx->tx_channel_offset == 0) {
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
type = "";
- } else if (channel->channel < efx->tx_channel_offset) {
+ } else if (number < efx->tx_channel_offset) {
type = "-rx";
} else {
type = "-tx";
@@ -651,7 +662,7 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
@@ -774,6 +785,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
}
+ efx->xdp_rxq_info_failed = false;
}
static void efx_remove_channel(struct efx_channel *channel)
@@ -798,6 +810,8 @@ static void efx_remove_channels(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
}
int
@@ -1435,6 +1449,101 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
return count;
}
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ efx->n_channels = n_channels;
+
+ /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
+ if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
+ n_channels--;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ if (efx->n_xdp_channels)
+ efx->xdp_channel_offset = efx->tx_channel_offset +
+ efx->n_tx_channels;
+ else
+ efx->xdp_channel_offset = efx->n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
@@ -1449,19 +1558,19 @@ static int efx_probe_interrupts(struct efx_nic *efx)
++extra_channels;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
struct msix_entry xentries[EFX_MAX_CHANNELS];
unsigned int n_channels;
- n_channels = efx_wanted_parallelism(efx);
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
- n_channels = min(n_channels, efx->max_channels);
-
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev,
- xentries, 1, n_channels);
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
if (rc < 0) {
/* Fall back to single channel MSI */
netif_err(efx, drv, efx->net_dev,
@@ -1480,21 +1589,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
if (rc > 0) {
- efx->n_channels = n_channels;
- if (n_channels > extra_channels)
- n_channels -= extra_channels;
- if (efx_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
- efx->n_rx_channels = max(n_channels -
- efx->n_tx_channels,
- 1U);
- } else {
- efx->n_tx_channels = min(n_channels,
- efx->max_tx_channels);
- efx->n_rx_channels = n_channels;
- }
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
@@ -1506,6 +1600,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -1524,12 +1620,14 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
}
- /* Assign extra channels if possible */
+ /* Assign extra channels if possible, before XDP channels */
efx->n_extra_tx_channels = 0;
- j = efx->n_channels;
+ j = efx->xdp_channel_offset;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
continue;
@@ -1724,29 +1822,50 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-static void efx_set_channels(struct efx_nic *efx)
+static int efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
efx->tx_channel_offset =
efx_separate_tx_channels ?
efx->n_channels - efx->n_tx_channels : 0;
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
/* We need to mark which channels really have RX and TX
* queues, and adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
+ xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
}
+ return 0;
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1776,7 +1895,9 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
- efx_set_channels(efx);
+ rc = efx_set_channels(efx);
+ if (rc)
+ goto fail1;
/* dimension_resources can fail with EAGAIN */
rc = efx->type->dimension_resources(efx);
@@ -1848,6 +1969,8 @@ static int efx_probe_filters(struct efx_nic *efx)
++i)
channel->rps_flow_id[i] =
RPS_FLOW_ID_INVALID;
+ channel->rfs_expire_index = 0;
+ channel->rfs_filter_count = 0;
}
if (!success) {
@@ -1857,8 +1980,6 @@ static int efx_probe_filters(struct efx_nic *efx)
rc = -ENOMEM;
goto out_unlock;
}
-
- efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
@@ -1872,8 +1993,10 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
struct efx_channel *channel;
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
+ cancel_delayed_work_sync(&channel->filter_work);
kfree(channel->rps_flow_id);
+ }
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
@@ -2022,6 +2145,10 @@ static void efx_stop_all(struct efx_nic *efx)
static void efx_remove_all(struct efx_nic *efx)
{
+ rtnl_lock();
+ efx_xdp_setup_prog(efx, NULL);
+ rtnl_unlock();
+
efx_remove_channels(efx);
efx_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV
@@ -2082,6 +2209,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
channel->irq_moderation_us = rx_usecs;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation_us = tx_usecs;
+ else if (efx_channel_is_xdp_tx(channel))
+ channel->irq_moderation_us = tx_usecs;
}
return 0;
@@ -2277,6 +2406,17 @@ static void efx_watchdog(struct net_device *net_dev)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
+static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+ return PAGE_SIZE - overhead;
+}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
@@ -2288,6 +2428,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (rc)
return rc;
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
@@ -2489,8 +2637,65 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_udp_tunnel_add = efx_udp_tunnel_add,
.ndo_udp_tunnel_del = efx_udp_tunnel_del,
+ .ndo_xdp_xmit = efx_xdp_xmit,
+ .ndo_bpf = efx_xdp
};
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+
+ if (efx->xdp_rxq_info_failed) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to bind XDP program due to previous failure of rxq_info\n");
+ return -EINVAL;
+ }
+
+ if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to configure XDP with MTU of %d (max: %d)\n",
+ efx->net_dev->mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ old_prog = rtnl_dereference(efx->xdp_prog);
+ rcu_assign_pointer(efx->xdp_prog, prog);
+ /* Release the reference that was originally passed by the caller. */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+ struct bpf_prog *xdp_prog;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return efx_xdp_setup_prog(efx, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp_prog = rtnl_dereference(efx->xdp_prog);
+ xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 04fed7c06618..2dd8d5002315 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -166,15 +166,20 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
static inline void efx_filter_rfs_expire(struct work_struct *data)
{
- struct efx_channel *channel = container_of(data, struct efx_channel,
- filter_work);
-
- if (channel->rfs_filters_added >= 60 &&
- __efx_filter_rfs_expire(channel->efx, 100))
- channel->rfs_filters_added -= 60;
+ struct delayed_work *dwork = to_delayed_work(data);
+ struct efx_channel *channel;
+ unsigned int time, quota;
+
+ channel = container_of(dwork, struct efx_channel, filter_work);
+ time = jiffies - channel->rfs_last_expiry;
+ quota = channel->rfs_filter_count * time / (30 * HZ);
+ if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+ channel->rfs_last_expiry += time;
+ /* Ensure we do more work eventually even if NAPI poll is not happening */
+ schedule_delayed_work(dwork, 30 * HZ);
}
#define efx_filter_rfs_enabled() 1
#else
@@ -322,4 +327,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
return true;
}
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush);
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 86b965875540..b31032da4bcb 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -56,6 +56,9 @@ static u64 efx_get_atomic_stat(void *field)
#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
EFX_ETHTOOL_STAT(field, channel, n_##field, \
unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
+ EFX_ETHTOOL_STAT(field, channel, field, \
+ unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
@@ -83,6 +86,15 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+ EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
};
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
@@ -399,6 +411,19 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
}
}
}
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
return n_stats;
}
@@ -509,6 +534,14 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
data++;
}
}
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
efx_ptp_update_stats(efx, data);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 284a1b047ac2..1f88212be085 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <net/busy_poll.h>
+#include <net/xdp.h>
#include "enum.h"
#include "bitfield.h"
@@ -136,7 +137,8 @@ struct efx_special_buffer {
* struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* freed when descriptor completes
- * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
+ * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
+ * member is the associated buffer to drop a page reference on.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -146,7 +148,10 @@ struct efx_special_buffer {
* Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
- const struct sk_buff *skb;
+ union {
+ const struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
union {
efx_qword_t option;
dma_addr_t dma_addr;
@@ -160,6 +165,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
+#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -189,6 +195,7 @@ struct efx_tx_buffer {
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
+ * @xdp_tx: Is this an XDP tx queue?
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@@ -250,6 +257,7 @@ struct efx_tx_queue {
unsigned int piobuf_offset;
bool initialised;
bool timestamping;
+ bool xdp_tx;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -363,6 +371,8 @@ struct efx_rx_page_state {
* refill was triggered.
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @xdp_rxq_info: XDP specific RX queue information.
+ * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -394,6 +404,8 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
+ struct xdp_rxq_info xdp_rxq_info;
+ bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -427,6 +439,13 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rfs_filter_count: number of accelerated RFS filters currently in place;
+ * equals the count of @rps_flow_id slots filled
+ * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters
+ * were checked for expiry
+ * @rfs_expire_index: next accelerated RFS filter ID to check for expiry
+ * @n_rfs_succeeded: number of successful accelerated RFS filter insertions
+ * @n_rfs_failed; number of failed accelerated RFS filter insertions
* @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -441,6 +460,10 @@ enum efx_sync_events_state {
* lack of descriptors
* @n_rx_merge_events: Number of RX merged completion events
* @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP
+ * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
+ * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
+ * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -473,8 +496,12 @@ struct efx_channel {
unsigned int irq_count;
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
- unsigned int rfs_filters_added;
- struct work_struct filter_work;
+ unsigned int rfs_filter_count;
+ unsigned int rfs_last_expiry;
+ unsigned int rfs_expire_index;
+ unsigned int n_rfs_succeeded;
+ unsigned int n_rfs_failed;
+ struct delayed_work filter_work;
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
u32 *rps_flow_id;
#endif
@@ -494,6 +521,10 @@ struct efx_channel {
unsigned int n_rx_nodesc_trunc;
unsigned int n_rx_merge_events;
unsigned int n_rx_merge_packets;
+ unsigned int n_rx_xdp_drops;
+ unsigned int n_rx_xdp_bad_drops;
+ unsigned int n_rx_xdp_tx;
+ unsigned int n_rx_xdp_redirect;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -818,6 +849,8 @@ struct efx_async_filter_insertion {
* @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
+ * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
+ * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -830,6 +863,9 @@ struct efx_async_filter_insertion {
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @n_extra_tx_channels: Number of extra channels with TX queues
+ * @n_xdp_channels: Number of channels used for XDP TX
+ * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
+ * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
@@ -894,12 +930,10 @@ struct efx_async_filter_insertion {
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
+ * @xdp_prog: Current XDP programme for this interface
* @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_state: Architecture-dependent filter table state
* @rps_mutex: Protects RPS state of all channels
- * @rps_expire_channel: Next channel to check for expiry
- * @rps_expire_index: Next index to check for expiry in
- * @rps_expire_channel's @rps_flow_id
* @rps_slot_map: bitmap of in-flight entries in @rps_slot
* @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
* @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
@@ -919,6 +953,8 @@ struct efx_async_filter_insertion {
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
+ * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
+ * xdp_rxq_info structures?
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -966,6 +1002,9 @@ struct efx_nic {
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+ unsigned int xdp_tx_queue_count;
+ struct efx_tx_queue **xdp_tx_queues;
+
unsigned rxq_entries;
unsigned txq_entries;
unsigned int txq_stop_thresh;
@@ -984,6 +1023,9 @@ struct efx_nic {
unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned n_extra_tx_channels;
+ unsigned int n_xdp_channels;
+ unsigned int xdp_channel_offset;
+ unsigned int xdp_tx_per_channel;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
@@ -1053,13 +1095,15 @@ struct efx_nic {
u64 loopback_modes;
void *loopback_selftest;
+ /* We access loopback_selftest immediately before running XDP,
+ * so we want them next to each other.
+ */
+ struct bpf_prog __rcu *xdp_prog;
struct rw_semaphore filter_sem;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
struct mutex rps_mutex;
- unsigned int rps_expire_channel;
- unsigned int rps_expire_index;
unsigned long rps_slot_map;
struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
spinlock_t rps_hash_lock;
@@ -1082,6 +1126,7 @@ struct efx_nic {
bool ptp_warned;
char *vpd_sn;
+ bool xdp_rxq_info_failed;
/* The following fields may be written more often */
@@ -1473,10 +1518,24 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
+static inline struct efx_channel *
+efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels);
+ return efx->channel[efx->xdp_channel_offset + index];
+}
+
+static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->xdp_channel_offset <
+ channel->efx->n_xdp_channels;
+}
+
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return channel->type && channel->type->want_txqs &&
- channel->type->want_txqs(channel);
+ return efx_channel_is_xdp_tx(channel) ||
+ (channel->type && channel->type->want_txqs &&
+ channel->type->want_txqs(channel));
}
static inline struct efx_tx_queue *
@@ -1500,7 +1559,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
else \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
- efx_tx_queue_used(_tx_queue); \
+ (efx_tx_queue_used(_tx_queue) || \
+ efx_channel_is_xdp_tx(_channel)); \
_tx_queue++)
/* Iterate over all possible TX queues belonging to a channel */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 02ed6d1b716c..af15a737c675 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
- cancel_work_sync(&efx->ptp_data->pps_work);
+ if (efx->ptp_data->pps_workwq)
+ cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 85ec07f5a674..ef52b24ad9e7 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -17,6 +17,8 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
#include "filter.h"
@@ -27,6 +29,9 @@
/* Preferred number of descriptors to fill at once */
#define EFX_RX_PREFERRED_BATCH 8U
+/* Maximum rx prefix used by any architecture. */
+#define EFX_MAX_RX_PREFIX_SIZE 16
+
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
@@ -95,7 +100,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- efx->rx_page_buf_step);
+ (efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@@ -185,6 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
page_offset = sizeof(struct efx_rx_page_state);
do {
+ page_offset += XDP_PACKET_HEADROOM;
+ dma_addr += XDP_PACKET_HEADROOM;
+
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
@@ -635,6 +643,126 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
netif_receive_skb(skb);
}
+/** efx_do_xdp: perform XDP processing on a received packet
+ *
+ * Returns true if packet should still be delivered.
+ */
+static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf, u8 **ehp)
+{
+ u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
+ struct efx_rx_queue *rx_queue;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 xdp_act;
+ s16 offset;
+ int err;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(efx->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return true;
+ }
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(channel->rx_pkt_n_frags > 1)) {
+ /* We can't do XDP on fragmented packets - drop. */
+ rcu_read_unlock();
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP is not possible with multiple receive fragments (%d)\n",
+ channel->rx_pkt_n_frags);
+ channel->n_rx_xdp_bad_drops++;
+ return false;
+ }
+
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
+
+ /* Save the rx prefix. */
+ EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
+ memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
+ efx->rx_prefix_size);
+
+ xdp.data = *ehp;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+
+ /* No support yet for XDP metadata */
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + rx_buf->len;
+ xdp.rxq = &rx_queue->xdp_rxq_info;
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ offset = (u8 *)xdp.data - *ehp;
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ /* Fix up rx prefix. */
+ if (offset) {
+ *ehp += offset;
+ rx_buf->page_offset += offset;
+ rx_buf->len -= offset;
+ memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
+ efx->rx_prefix_size);
+ }
+ break;
+
+ case XDP_TX:
+ /* Buffer ownership passes to tx on success. */
+ xdpf = convert_to_xdp_frame(&xdp);
+ err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+ if (unlikely(err != 1)) {
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP TX failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ } else {
+ channel->n_rx_xdp_tx++;
+ }
+ break;
+
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+ if (unlikely(err)) {
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP redirect failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ } else {
+ channel->n_rx_xdp_redirect++;
+ }
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ /* Fall through */
+ case XDP_DROP:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_drops++;
+ break;
+ }
+
+ return xdp_act == XDP_PASS;
+}
+
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel)
{
@@ -663,6 +791,9 @@ void __efx_rx_packet(struct efx_channel *channel)
goto out;
}
+ if (!efx_do_xdp(efx, channel, rx_buf, &eh))
+ goto out;
+
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
@@ -731,6 +862,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, max_trigger;
+ int rc = 0;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -764,6 +896,19 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->fast_fill_trigger = trigger;
rx_queue->refill_enabled = true;
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
/* Set up RX descriptor ring */
efx_nic_init_rx(rx_queue);
}
@@ -805,6 +950,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
}
kfree(rx_queue->page_ring);
rx_queue->page_ring = NULL;
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -838,6 +988,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
rc = efx->type->filter_insert(efx, &req->spec, true);
if (rc >= 0)
+ /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
rc %= efx->type->max_rx_ip_filters;
if (efx->rps_hash_table) {
spin_lock_bh(&efx->rps_hash_lock);
@@ -862,8 +1013,9 @@ static void efx_filter_rfs_work(struct work_struct *data)
* later.
*/
mutex_lock(&efx->rps_mutex);
+ if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
+ channel->rfs_filter_count++;
channel->rps_flow_id[rc] = req->flow_id;
- ++channel->rfs_filters_added;
mutex_unlock(&efx->rps_mutex);
if (req->spec.ether_type == htons(ETH_P_IP))
@@ -880,6 +1032,28 @@ static void efx_filter_rfs_work(struct work_struct *data)
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
req->rxq_index, req->flow_id, rc, arfs_id);
+ channel->n_rfs_succeeded++;
+ } else {
+ if (req->spec.ether_type == htons(ETH_P_IP))
+ netif_dbg(efx, rx_status, efx->net_dev,
+ "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ else
+ netif_dbg(efx, rx_status, efx->net_dev,
+ "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ channel->n_rfs_failed++;
+ /* We're overloading the NIC's filter tables, so let's do a
+ * chunk of extra expiry work.
+ */
+ __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
+ 100u));
}
/* Release references */
@@ -989,38 +1163,44 @@ out_clear:
return rc;
}
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int channel_idx, index, size;
+ struct efx_nic *efx = channel->efx;
+ unsigned int index, size, start;
u32 flow_id;
if (!mutex_trylock(&efx->rps_mutex))
return false;
expire_one = efx->type->filter_rfs_expire_one;
- channel_idx = efx->rps_expire_channel;
- index = efx->rps_expire_index;
+ index = channel->rfs_expire_index;
+ start = index;
size = efx->type->max_rx_ip_filters;
- while (quota--) {
- struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ while (quota) {
flow_id = channel->rps_flow_id[index];
- if (flow_id != RPS_FLOW_ID_INVALID &&
- expire_one(efx, flow_id, index)) {
- netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [queue %u flow %u]\n",
- index, channel_idx, flow_id);
- channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ if (flow_id != RPS_FLOW_ID_INVALID) {
+ quota--;
+ if (expire_one(efx, flow_id, index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [channel %u flow %u]\n",
+ index, channel->channel, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ channel->rfs_filter_count--;
+ }
}
- if (++index == size) {
- if (++channel_idx == efx->n_channels)
- channel_idx = 0;
+ if (++index == size)
index = 0;
- }
+ /* If we were called with a quota that exceeds the total number
+ * of filters in the table (which shouldn't happen, but could
+ * if two callers race), ensure that we don't loop forever -
+ * stop when we've examined every row of the table.
+ */
+ if (index == start)
+ break;
}
- efx->rps_expire_channel = channel_idx;
- efx->rps_expire_index = index;
+ channel->rfs_expire_index = index;
mutex_unlock(&efx->rps_mutex);
return true;
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 65e81ec1b314..00c1c4402451 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -95,6 +95,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
}
buffer->len = 0;
@@ -597,6 +599,94 @@ err:
return NETDEV_TX_OK;
}
+static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ xdp_return_frame_rx_napi(xdpfs[i]);
+}
+
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush)
+{
+ struct efx_tx_buffer *tx_buffer;
+ struct efx_tx_queue *tx_queue;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ unsigned int len;
+ int space;
+ int cpu;
+ int i;
+
+ cpu = raw_smp_processor_id();
+
+ if (!efx->xdp_tx_queue_count ||
+ unlikely(cpu >= efx->xdp_tx_queue_count))
+ return -EINVAL;
+
+ tx_queue = efx->xdp_tx_queues[cpu];
+ if (unlikely(!tx_queue))
+ return -EINVAL;
+
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+
+ if (!n)
+ return 0;
+
+ /* Check for available space. We should never need multiple
+ * descriptors per frame.
+ */
+ space = efx->txq_entries +
+ tx_queue->read_count - tx_queue->insert_count;
+
+ for (i = 0; i < n; i++) {
+ xdpf = xdpfs[i];
+
+ if (i >= space)
+ break;
+
+ /* We'll want a descriptor for this tx. */
+ prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+ len = xdpf->len;
+
+ /* Map for DMA. */
+ dma_addr = dma_map_single(&efx->pci_dev->dev,
+ xdpf->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+ break;
+
+ /* Create descriptor and set up for unmapping DMA. */
+ tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+ tx_buffer->xdpf = xdpf;
+ tx_buffer->flags = EFX_TX_BUF_XDP |
+ EFX_TX_BUF_MAP_SINGLE;
+ tx_buffer->dma_offset = 0;
+ tx_buffer->unmap_len = len;
+ tx_queue->tx_packets++;
+ }
+
+ /* Pass mapped frames to hardware. */
+ if (flush && i > 0)
+ efx_nic_push_buffers(tx_queue);
+
+ if (i == 0)
+ return -EIO;
+
+ efx_xdp_return_frames(n - i, xdpfs + i);
+
+ return i;
+}
+
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
@@ -857,6 +947,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
/* Set up default function pointers. These may get replaced by
* efx_nic_init_tx() based off NIC/queue capabilities.
*/
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index deb636d653f3..d242906ae233 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -48,7 +48,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
-#include <linux/dma-direct.h>
+#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -89,6 +89,7 @@ struct ioc3_private {
struct device *dma_dev;
u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
+ void *tx_ring;
struct ioc3_etxd *txr;
dma_addr_t rxr_dma;
dma_addr_t txr_dma;
@@ -1173,26 +1174,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ioc3 *ioc3;
unsigned long ioc3_base, ioc3_size;
u32 vendor, model, rev;
- int err, pci_using_dac;
+ int err;
/* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err < 0) {
- pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
- pci_name(pdev));
- goto out;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("%s: No usable DMA configuration, aborting.\n",
- pci_name(pdev));
- goto out;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
+ goto out;
}
if (pci_enable_device(pdev))
@@ -1204,9 +1193,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable;
}
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
-
err = pci_request_regions(pdev, "ioc3");
if (err)
goto out_free;
@@ -1242,8 +1228,8 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_stop(ip);
/* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
- ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
- &ip->rxr_dma, GFP_ATOMIC, 0);
+ ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
+ GFP_KERNEL);
if (!ip->rxr) {
pr_err("ioc3-eth: rx ring allocation failed\n");
err = -ENOMEM;
@@ -1251,14 +1237,16 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
- ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
- &ip->txr_dma,
- GFP_KERNEL | __GFP_ZERO, 0);
- if (!ip->txr) {
+ ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
+ &ip->txr_dma, GFP_KERNEL);
+ if (!ip->tx_ring) {
pr_err("ioc3-eth: tx ring allocation failed\n");
err = -ENOMEM;
goto out_stop;
}
+ /* Align TX ring */
+ ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
+ ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
ioc3_init(dev);
@@ -1288,7 +1276,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &ioc3_netdev_ops;
dev->ethtool_ops = &ioc3_ethtool_ops;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
@@ -1313,11 +1301,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_stop:
del_timer_sync(&ip->ioc3_timer);
if (ip->rxr)
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- if (ip->txr)
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma);
+ if (ip->tx_ring)
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
+ ip->txr_dma);
out_res:
pci_release_regions(pdev);
out_free:
@@ -1335,10 +1323,8 @@ static void ioc3_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f9e6744d8fd6..869a498e3b5e 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -252,7 +252,6 @@
#define NETSEC_XDP_CONSUMED BIT(0)
#define NETSEC_XDP_TX BIT(1)
#define NETSEC_XDP_REDIR BIT(2)
-#define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
enum ring_id {
NETSEC_RING_TX = 0,
@@ -661,6 +660,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
} else {
+ bytes += desc->xdpf->len;
xdp_return_frame(desc->xdpf);
}
next:
@@ -847,8 +847,8 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
enum dma_data_direction dma_dir =
page_pool_get_dma_dir(rx_ring->page_pool);
- dma_handle = page_pool_get_dma_addr(page) +
- NETSEC_RXBUF_HEADROOM;
+ dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
+ sizeof(*xdpf);
dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
dma_dir);
tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
@@ -858,6 +858,7 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
tx_desc.addr = xdpf->data;
tx_desc.len = xdpf->len;
+ netdev_sent_queue(priv->ndev, xdpf->len);
netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
return NETSEC_XDP_TX;
@@ -1030,7 +1031,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
next:
if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result & NETSEC_XDP_RX_OK) {
+ xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6e984d5a729f..f7e927ad67fa 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1565,10 +1565,10 @@ static int ave_probe(struct platform_device *pdev)
return -EINVAL;
np = dev->of_node;
- phy_mode = of_get_phy_mode(np);
- if ((int)phy_mode < 0) {
+ ret = of_get_phy_mode(np, &phy_mode);
+ if (ret) {
dev_err(dev, "phy-mode not found\n");
- return -EINVAL;
+ return ret;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 912bbb6515b2..b210e987a1db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -248,12 +248,13 @@ struct stmmac_safety_stats {
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x10
+#define DEF_DMA_RIWT 0xa0
/* Tx coalesce parameters */
#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 1
-#define STMMAC_RX_FRAMES 25
+#define STMMAC_TX_FRAMES 25
+#define STMMAC_RX_FRAMES 0
/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 527f93320a5a..d0d2d0fc5f0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -61,9 +61,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
- int phy_mode;
- void __iomem *ctl_block;
struct anarion_gmac *gmac;
+ phy_interface_t phy_mode;
+ void __iomem *ctl_block;
+ int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
@@ -78,7 +79,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = (uintptr_t)ctl_block;
- phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
+ if (err)
+ return ERR_PTR(err);
+
switch (phy_mode) {
case PHY_INTERFACE_MODE_RGMII: /* Fall through */
case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 0d21082ceb93..6ae13dc19510 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -189,9 +189,10 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
{
struct device *dev = &gmac->pdev->dev;
+ int ret;
- gmac->phy_mode = of_get_phy_mode(dev->of_node);
- if ((int)gmac->phy_mode < 0) {
+ ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
+ if (ret) {
dev_err(dev, "missing phy mode property\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 79f2ee37afed..bdb80421acac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -54,7 +54,7 @@ struct mediatek_dwmac_plat_data {
struct device_node *np;
struct regmap *peri_regmap;
struct device *dev;
- int phy_mode;
+ phy_interface_t phy_mode;
bool rmii_rxc;
};
@@ -130,6 +130,31 @@ static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
}
}
+static void mt2712_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay *= 550;
+ mac_delay->rx_delay *= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay *= 170;
+ mac_delay->rx_delay *= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -199,6 +224,8 @@ static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val);
regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val);
+ mt2712_delay_stage2ps(plat);
+
return 0;
}
@@ -216,6 +243,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
+ int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -223,10 +251,10 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- plat->phy_mode = of_get_phy_mode(plat->np);
- if (plat->phy_mode < 0) {
+ err = of_get_phy_mode(plat->np, &plat->phy_mode);
+ if (err) {
dev_err(plat->dev, "not find phy-mode\n");
- return -EINVAL;
+ return err;
}
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 306da8f6b7d5..bd6c01004913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -338,10 +338,9 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
}
dwmac->dev = &pdev->dev;
- dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)dwmac->phy_mode < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
+ if (ret) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- ret = -EINVAL;
goto err_remove_config_dt;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index e2e469c37a4d..dc50ba13a746 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -37,7 +37,7 @@ struct rk_gmac_ops {
struct rk_priv_data {
struct platform_device *pdev;
- int phy_iface;
+ phy_interface_t phy_iface;
struct regulator *regulator;
bool suspended;
const struct rk_gmac_ops *ops;
@@ -1224,7 +1224,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+ of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
bsp_priv->ops = ops;
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index e9fd661f7995..e1b63df6f96f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -116,7 +116,7 @@
#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
- int interface; /* MII interface */
+ phy_interface_t interface; /* MII interface */
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
@@ -269,7 +269,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return err;
}
- dwmac->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &dwmac->interface);
+ if (err && err != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ return err;
+ }
+
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 4ef041bdf6a1..9b7be996d07b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -155,18 +155,14 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
return ret;
-
- if (dwmac->clk_eth_ck) {
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
- }
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->clk_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
}
@@ -175,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val, ret;
+ int val;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
@@ -211,8 +207,8 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
}
/* Need to update PMCCLRR (clear register) */
- ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ dwmac->ops->syscfg_eth_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
@@ -320,12 +316,10 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return PTR_ERR(dwmac->clk_ethstp);
}
- /* Clock for sysconfig */
+ /* Optional Clock for sysconfig */
dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk");
- if (IS_ERR(dwmac->syscfg_clk)) {
- dev_err(dev, "No syscfg clock provided...\n");
- return PTR_ERR(dwmac->syscfg_clk);
- }
+ if (IS_ERR(dwmac->syscfg_clk))
+ dwmac->syscfg_clk = NULL;
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
@@ -437,8 +431,7 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->clk_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index ddcc191febdb..1c8d84ed8410 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1105,6 +1105,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
+ phy_interface_t interface;
int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
@@ -1178,10 +1179,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node);
- if (ret < 0)
+ ret = of_get_phy_mode(dev->of_node, &interface);
+ if (ret)
return -EINVAL;
- plat_dat->interface = ret;
+ plat_dat->interface = interface;
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
@@ -1226,7 +1227,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
dwmac_mux:
sun8i_dwmac_unset_syscon(gmac);
dwmac_exit:
- sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+ stmmac_pltfr_remove(pdev);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index a299da3971b4..26353ef616b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,7 @@
#include "stmmac_platform.h"
struct sunxi_priv_data {
- int interface;
+ phy_interface_t interface;
int clk_enabled;
struct clk *tx_clk;
struct regulator *regulator;
@@ -118,7 +118,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- gmac->interface = of_get_phy_mode(dev->of_node);
+ ret = of_get_phy_mode(dev->of_node, &gmac->interface);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ goto err_remove_config_dt;
+ }
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3d69da112625..d0356fbd1e43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -130,7 +130,6 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW);
writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH);
return;
- break;
case 7:
numhashregs = 4;
break;
@@ -140,7 +139,6 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
default:
pr_debug("STMMAC: err in setting multicast filter\n");
return;
- break;
}
for (regs = 0; regs < numhashregs; regs++)
writel(mcfilterbits[regs],
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 89a3420eba42..2dc70d104161 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -14,6 +14,7 @@
/* MAC registers */
#define GMAC_CONFIG 0x00000000
+#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
#define GMAC_VLAN_TAG 0x00000050
@@ -43,6 +44,10 @@
#define GMAC_ARP_ADDR 0x00000210
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
+#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30)
+#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30)
+#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30)
/* RX Queues Routing */
#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
@@ -67,6 +72,7 @@
#define GMAC_PACKET_FILTER_PCF BIT(7)
#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_PACKET_FILTER_VTFE BIT(16)
+#define GMAC_PACKET_FILTER_IPFE BIT(20)
#define GMAC_MAX_PERFECT_ADDRESSES 128
@@ -183,6 +189,11 @@ enum power_event {
#define GMAC_CONFIG_TE BIT(1)
#define GMAC_CONFIG_RE BIT(0)
+/* MAC extended config */
+#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
+#define GMAC_CONFIG_HDSMS_SHIFT 20
+#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
+
/* MAC HW features0 bitmap */
#define GMAC_HW_FEAT_SAVLANINS BIT(27)
#define GMAC_HW_FEAT_ADDMAC BIT(18)
@@ -202,9 +213,12 @@ enum power_event {
#define GMAC_HW_FEAT_MIISEL BIT(0)
/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_L3L4FNUM GENMASK(30, 27)
#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_FEAT_SPHEN BIT(17)
+#define GMAC_HW_ADDR64 GENMASK(15, 14)
#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
@@ -227,6 +241,21 @@ enum power_event {
#define GMAC_HI_DCS_SHIFT 16
#define GMAC_HI_REG_AE BIT(31)
+/* L3/L4 Filters regs */
+#define GMAC_L4DPIM0 BIT(21)
+#define GMAC_L4DPM0 BIT(20)
+#define GMAC_L4SPIM0 BIT(19)
+#define GMAC_L4SPM0 BIT(18)
+#define GMAC_L4PEN0 BIT(16)
+#define GMAC_L3DAIM0 BIT(5)
+#define GMAC_L3DAM0 BIT(4)
+#define GMAC_L3SAIM0 BIT(3)
+#define GMAC_L3SAM0 BIT(2)
+#define GMAC_L3PEN0 BIT(0)
+#define GMAC_L4DP0 GENMASK(31, 16)
+#define GMAC_L4DP0_SHIFT 16
+#define GMAC_L4SP0 GENMASK(15, 0)
+
/* MTL registers */
#define MTL_OPERATION_MODE 0x00000c00
#define MTL_FRPE BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 66e60c7e9850..40ca00e596dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -733,7 +733,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- bool is_double)
+ __le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -748,6 +748,16 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
}
writel(value, ioaddr + GMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = GMAC_VLAN_ETV;
+
+ if (is_double) {
+ value |= GMAC_VLAN_EDVLP;
+ value |= GMAC_VLAN_ESVL;
+ value |= GMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + GMAC_VLAN_TAG);
@@ -799,6 +809,106 @@ static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + GMAC_CONFIG);
}
+static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool ipv6, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ /* For IPv6 not both SA/DA filters can be active */
+ if (ipv6) {
+ value |= GMAC_L3PEN0;
+ value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
+ value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ } else {
+ value &= ~GMAC_L3PEN0;
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
+ } else {
+ writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
+ }
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
+static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool udp, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+ if (udp) {
+ value |= GMAC_L4PEN0;
+ } else {
+ value &= ~GMAC_L4PEN0;
+ }
+
+ value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
+ value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
+ if (sa) {
+ value |= GMAC_L4SPM0;
+ if (inv)
+ value |= GMAC_L4SPIM0;
+ } else {
+ value |= GMAC_L4DPM0;
+ if (inv)
+ value |= GMAC_L4DPIM0;
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ value = match & GMAC_L4SP0;
+ } else {
+ value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
+ }
+
+ writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
@@ -828,11 +938,14 @@ const struct stmmac_ops dwmac4_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
const struct stmmac_ops dwmac410_ops = {
@@ -869,6 +982,8 @@ const struct stmmac_ops dwmac410_ops = {
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
const struct stmmac_ops dwmac510_ops = {
@@ -910,6 +1025,8 @@ const struct stmmac_ops dwmac510_ops = {
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 15eb1abba91d..3e14da69f378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -83,9 +83,10 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes3 & RDES3_OWN))
return dma_own;
- /* Verify rx error by looking at the last segment. */
- if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
return discard_frame;
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return rx_not_ls;
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
@@ -188,7 +189,7 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+ p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
@@ -431,8 +432,8 @@ static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
{
- p->des0 = cpu_to_le32(addr);
- p->des1 = 0;
+ p->des0 = cpu_to_le32(lower_32_bits(addr));
+ p->des1 = cpu_to_le32(upper_32_bits(addr));
}
static void dwmac4_clear(struct dma_desc *p)
@@ -492,6 +493,18 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
}
+static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & RDES2_HL;
+ return 0;
+}
+
+static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -519,6 +532,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_sarc = dwmac4_set_sarc,
.set_vlan_tag = dwmac4_set_vlan_tag,
.set_vlan = dwmac4_set_vlan,
+ .get_rx_header_len = dwmac4_get_rx_header_len,
+ .set_sec_addr = dwmac4_set_sec_addr,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0d7b3bbcd5a7..6d92109dc9aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -109,6 +109,7 @@
#define RDES2_L4_FILTER_MATCH BIT(28)
#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+#define RDES2_HL GENMASK(9, 0)
/* RDES3 (write back format) */
#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 68c157979b94..c15409030710 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -79,6 +79,10 @@ static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_rx_phy),
+ ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan));
+
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
}
@@ -97,6 +101,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_tx_phy),
+ ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan));
+
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
@@ -132,6 +140,9 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= DMA_SYS_BUS_AAL;
+ if (dma_cfg->eame)
+ value |= DMA_SYS_BUS_EAME;
+
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
@@ -241,19 +252,9 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
rfa = 0x01; /* Full-1.5K */
break;
- case 8192:
- rfd = 0x06; /* Full-4K */
- rfa = 0x0a; /* Full-6K */
- break;
-
- case 16384:
- rfd = 0x06; /* Full-4K */
- rfa = 0x12; /* Full-10K */
- break;
-
default:
- rfd = 0x06; /* Full-4K */
- rfa = 0x1e; /* Full-16K */
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
break;
}
@@ -353,9 +354,28 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
+
+ dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
+ switch (dma_cap->addr64) {
+ case 0:
+ dma_cap->addr64 = 32;
+ break;
+ case 1:
+ dma_cap->addr64 = 40;
+ break;
+ case 2:
+ dma_cap->addr64 = 48;
+ break;
+ default:
+ dma_cap->addr64 = 32;
+ break;
+ }
+
/* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
* shifting and store the sizes in bytes.
*/
@@ -431,6 +451,22 @@ static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
}
+static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
+
+ value &= ~GMAC_CONFIG_HDSMS;
+ value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + GMAC_EXT_CONFIG);
+
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (en)
+ value |= DMA_CONTROL_SPH;
+ else
+ value &= ~DMA_CONTROL_SPH;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -457,6 +493,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
};
const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -485,4 +522,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.enable_tso = dwmac4_enable_tso,
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index b66da0237d2a..589931795847 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -65,6 +65,7 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_EAME BIT(11)
#define DMA_AXI_BLEN256 BIT(7)
#define DMA_AXI_BLEN128 BIT(6)
#define DMA_AXI_BLEN64 BIT(5)
@@ -91,7 +92,9 @@
#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18)
#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
@@ -107,6 +110,7 @@
#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
/* DMA Control X */
+#define DMA_CONTROL_SPH BIT(24)
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 775db776b3cc..23fecf68f781 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
// stmmac Support for 5.xx Ethernet QoS cores
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 99037386080a..3b6e559aa0b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
* stmmac XGMAC definitions.
@@ -360,7 +360,7 @@
#define XGMAC_TBUE BIT(2)
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
- XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE)
+ XGMAC_RIE | XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 070bd7d1ae4c..082f5ee9e525 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -556,7 +556,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- bool is_double)
+ __le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -577,6 +577,21 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
}
writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index f70ca5300b82..22a7f0cc1b90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -27,7 +27,10 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= XGMAC_AAL;
- writel(value | XGMAC_EAME, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ if (dma_cfg->eame)
+ value |= XGMAC_EAME;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
}
static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
@@ -180,19 +183,9 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
rfa = 0x01; /* Full-1.5K */
break;
- case 8192:
- rfd = 0x06; /* Full-4K */
- rfa = 0x0a; /* Full-6K */
- break;
-
- case 16384:
- rfd = 0x06; /* Full-4K */
- rfa = 0x12; /* Full-10K */
- break;
-
default:
- rfd = 0x06; /* Full-4K */
- rfa = 0x1e; /* Full-16K */
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ddb851d99618..aa5b917398fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
// stmmac HW Interface Callbacks
@@ -357,7 +357,7 @@ struct stmmac_ops {
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- bool is_double);
+ __le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f826365c979d..644cb5d1fd4f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -36,6 +36,7 @@
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
#include <linux/phylink.h>
+#include <linux/udp.h>
#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -867,10 +868,10 @@ static void stmmac_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int stmmac_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void stmmac_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
- return -EOPNOTSUPP;
+ state->link = 0;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -964,7 +965,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.validate = stmmac_validate,
- .mac_link_state = stmmac_mac_link_state,
+ .mac_pcs_get_state = stmmac_mac_pcs_get_state,
.mac_config = stmmac_mac_config,
.mac_an_restart = stmmac_mac_an_restart,
.mac_link_down = stmmac_mac_link_down,
@@ -1502,10 +1503,8 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
rx_q->dma_erx, rx_q->dma_rx_phy);
kfree(rx_q->buf_pool);
- if (rx_q->page_pool) {
- page_pool_request_shutdown(rx_q->page_pool);
+ if (rx_q->page_pool)
page_pool_destroy(rx_q->page_pool);
- }
}
}
@@ -2604,9 +2603,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
- if (!ret)
- priv->rx_riwt = MIN_DMA_RIWT;
+ if (!priv->rx_riwt)
+ priv->rx_riwt = DEF_DMA_RIWT;
+
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
}
if (priv->hw->pcs)
@@ -2914,19 +2914,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
+ unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- unsigned int first_entry;
- int tmp_pay_len = 0;
+ u8 proto_hdr_len, hdr;
+ bool has_vlan, set_ic;
u32 pay_len, mss;
- u8 proto_hdr_len;
dma_addr_t des;
- bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
/* Compute header lengths */
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ hdr = sizeof(struct udphdr);
+ } else {
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr = tcp_hdrlen(skb);
+ }
/* Desc availability based on threshold should be enough safe */
if (unlikely(stmmac_tx_avail(priv, queue) <
@@ -2956,8 +2963,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (netif_msg_tx_queued(priv)) {
- pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
- __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, hdr, proto_hdr_len, pay_len, mss);
pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
skb->data_len);
}
@@ -3025,16 +3032,27 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
/* Manage tx mitigation */
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames)
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames)
+ set_ic = true;
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
desc = &tx_q->dma_tx[tx_q->cur_tx];
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
/* We've used all descriptors we need for this skb, however,
@@ -3071,7 +3089,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len,
pay_len,
1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -3125,27 +3143,30 @@ dma_map_err:
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ unsigned int first_entry, tx_packets, enh_desc;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ int gso = skb_shinfo(skb)->gso_type;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- unsigned int first_entry;
- unsigned int enh_desc;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
dma_addr_t des;
- bool has_vlan;
- int entry;
tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+ if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
@@ -3230,12 +3251,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* This approach takes care about the fragments: desc is the first
* element in case of no SG.
*/
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
+ tx_packets = (entry + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames)
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames)
+ set_ic = true;
+ else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic;
else
@@ -3244,6 +3274,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
/* We've used all descriptors we need for this skb, however,
@@ -3430,7 +3462,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_count_frames += priv->rx_coal_frames;
if (rx_q->rx_count_frames > priv->rx_coal_frames)
rx_q->rx_count_frames = 0;
- use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
+
+ use_rx_wd = !priv->rx_coal_frames;
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
@@ -3443,6 +3479,55 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int ret, coe = priv->hw->rx_csum;
+ unsigned int plen = 0, hlen = 0;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ ret = stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
@@ -3472,11 +3557,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- unsigned int hlen = 0, prev_len = 0;
+ unsigned int buf1_len = 0, buf2_len = 0;
enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- unsigned int sec_len;
int entry;
u32 hash;
@@ -3495,7 +3579,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
break;
read_again:
- sec_len = 0;
+ buf1_len = 0;
+ buf2_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3520,7 +3605,6 @@ read_again:
np = rx_q->dma_rx + next_entry;
prefetch(np);
- prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3537,69 +3621,61 @@ read_again:
goto read_again;
if (unlikely(error)) {
dev_kfree_skb(skb);
+ skb = NULL;
count++;
continue;
}
/* Buffer is good. Go on. */
- if (likely(status & rx_not_ls)) {
- len += priv->dma_buf_sz;
- } else {
- prev_len = len;
- len = stmmac_get_rx_frame_len(priv, p, coe);
-
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))
- len -= ETH_FCS_LEN;
+ prefetch(page_address(buf->page));
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap)) {
+ if (buf2_len)
+ buf2_len -= ETH_FCS_LEN;
+ else
+ buf1_len -= ETH_FCS_LEN;
+
+ len -= ETH_FCS_LEN;
}
if (!skb) {
- int ret = stmmac_get_rx_header_len(priv, p, &hlen);
-
- if (priv->sph && !ret && (hlen > 0)) {
- sec_len = len;
- if (!(status & rx_not_ls))
- sec_len = sec_len - hlen;
- len = hlen;
-
- prefetch(page_address(buf->sec_page));
- priv->xstats.rx_split_hdr_pkt_n++;
- }
-
- skb = napi_alloc_skb(&ch->rx_napi, len);
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
priv->dev->stats.rx_dropped++;
count++;
- continue;
+ goto drain_data;
}
- dma_sync_single_for_cpu(priv->device, buf->addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- len);
- skb_put(skb, len);
+ buf1_len);
+ skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
- } else {
- unsigned int buf_len = len - prev_len;
-
- if (likely(status & rx_not_ls))
- buf_len = priv->dma_buf_sz;
-
+ } else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
- buf_len, DMA_FROM_DEVICE);
+ buf1_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->page, 0, buf_len,
+ buf->page, 0, buf1_len,
priv->dma_buf_sz);
/* Data payload appended into SKB */
@@ -3607,22 +3683,23 @@ read_again:
buf->page = NULL;
}
- if (sec_len > 0) {
+ if (buf2_len) {
dma_sync_single_for_cpu(priv->device, buf->sec_addr,
- sec_len, DMA_FROM_DEVICE);
+ buf2_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->sec_page, 0, sec_len,
+ buf->sec_page, 0, buf2_len,
priv->dma_buf_sz);
- len += sec_len;
-
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
+drain_data:
if (likely(status & rx_not_ls))
goto read_again;
+ if (!skb)
+ continue;
/* Got entire packet into SKB. Finish it. */
@@ -3640,13 +3717,14 @@ read_again:
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
count++;
}
- if (status & rx_not_ls) {
+ if (status & rx_not_ls || skb) {
rx_q->state_saved = true;
rx_q->state.skb = skb;
rx_q->state.error = error;
@@ -3994,11 +4072,13 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ int gso = skb_shinfo(skb)->gso_type;
+
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
/*
- * There is no way to determine the number of TSO
+ * There is no way to determine the number of TSO/USO
* capable Queues. Let's use always the Queue 0
- * because if TSO is supported then at least this
+ * because if TSO/USO is supported then at least this
* one will be capable.
*/
return 0;
@@ -4214,15 +4294,26 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- u16 vid;
+ __le16 pmatch = 0;
+ int count = 0;
+ u16 vid = 0;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
__le16 vid_le = cpu_to_le16(vid);
crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
hash |= (1 << crc);
+ count++;
}
- return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ pmatch = cpu_to_le16(vid);
+ hash = 0;
+ }
+
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -4231,8 +4322,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4251,8 +4340,6 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4506,6 +4593,8 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (priv->plat->has_gmac4)
+ ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
@@ -4522,6 +4611,13 @@ int stmmac_dvr_probe(struct device *device,
if (!ret) {
dev_info(priv->device, "Using %d bits DMA width\n",
priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
} else {
ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 40c42637ad75..cfe5d8b73142 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -41,20 +41,32 @@
#define MII_XGMAC_BUSY BIT(22)
#define MII_XGMAC_MAX_C22ADDR 3
#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+#define MII_XGMAC_PA_SHIFT 16
+#define MII_XGMAC_DA_SHIFT 21
+
+static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ u32 tmp;
+
+ /* Set port as Clause 45 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
+ *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
+ return 0;
+}
static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
int phyreg, u32 *hw_addr)
{
- unsigned int mii_data = priv->hw->mii.data;
u32 tmp;
/* HW does not support C22 addr >= 4 */
if (phyaddr > MII_XGMAC_MAX_C22ADDR)
return -ENODEV;
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000))
- return -EBUSY;
/* Set port as Clause 22 */
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
@@ -62,7 +74,7 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
tmp |= BIT(phyaddr);
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
- *hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
return 0;
}
@@ -75,17 +87,28 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
if (phyreg & MII_ADDR_C45) {
- return -EOPNOTSUPP;
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
return ret;
+
+ value |= MII_XGMAC_SADDR;
}
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
+ value |= MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -115,17 +138,28 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
if (phyreg & MII_ADDR_C45) {
- return -EOPNOTSUPP;
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
} else {
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
if (ret)
return ret;
+
+ value |= MII_XGMAC_SADDR;
}
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- value |= phydata | MII_XGMAC_SADDR;
+ value |= phydata;
value |= MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
@@ -363,6 +397,10 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
+ /* Looks like we need a dummy read for XGMAC only and C45 PHYs */
+ if (priv->plat->has_xgmac)
+ stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 170c3a052b14..bedaff0c13bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- plat->phy_interface = of_get_phy_mode(np);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ rc = of_get_phy_mode(np, &plat->phy_interface);
+ if (rc)
+ return ERR_PTR(rc);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index df638b18b72c..0989e2bb6ee3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
cfg = &priv->pps[rq->perout.index];
cfg->start.tv_sec = rq->perout.start.sec;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index ac3f658105c0..f3d8b9336b8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -877,16 +877,13 @@ out:
return 0;
}
-static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
-
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
@@ -952,16 +949,32 @@ cleanup:
return ret;
}
-static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_vlanfilt(priv);
+}
+
+static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_vlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
+static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
-
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
@@ -1028,6 +1041,25 @@ cleanup:
return ret;
}
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_dvlanfilt(priv);
+}
+
+static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_dvlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
#ifdef CONFIG_NET_CLS_ACT
static int stmmac_test_rxp(struct stmmac_priv *priv)
{
@@ -1702,119 +1734,127 @@ static const struct stmmac_test {
int (*fn)(struct stmmac_priv *priv);
} stmmac_selftests[] = {
{
- .name = "MAC Loopback ",
+ .name = "MAC Loopback ",
.lb = STMMAC_LOOPBACK_MAC,
.fn = stmmac_test_mac_loopback,
}, {
- .name = "PHY Loopback ",
+ .name = "PHY Loopback ",
.lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
.fn = stmmac_test_phy_loopback,
}, {
- .name = "MMC Counters ",
+ .name = "MMC Counters ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mmc,
}, {
- .name = "EEE ",
+ .name = "EEE ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_eee,
}, {
- .name = "Hash Filter MC ",
+ .name = "Hash Filter MC ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_hfilt,
}, {
- .name = "Perfect Filter UC ",
+ .name = "Perfect Filter UC ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_pfilt,
}, {
- .name = "MC Filter ",
+ .name = "MC Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mcfilt,
}, {
- .name = "UC Filter ",
+ .name = "UC Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_ucfilt,
}, {
- .name = "Flow Control ",
+ .name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
}, {
- .name = "RSS ",
+ .name = "RSS ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rss,
}, {
- .name = "VLAN Filtering ",
+ .name = "VLAN Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanfilt,
}, {
- .name = "Double VLAN Filtering",
+ .name = "VLAN Filtering (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt_perfect,
+ }, {
+ .name = "Double VLAN Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_dvlanfilt,
}, {
- .name = "Flexible RX Parser ",
+ .name = "Double VLAN Filter (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt_perfect,
+ }, {
+ .name = "Flexible RX Parser ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rxp,
}, {
- .name = "SA Insertion (desc) ",
+ .name = "SA Insertion (desc) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_desc_sai,
}, {
- .name = "SA Replacement (desc)",
+ .name = "SA Replacement (desc) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_desc_sar,
}, {
- .name = "SA Insertion (reg) ",
+ .name = "SA Insertion (reg) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_reg_sai,
}, {
- .name = "SA Replacement (reg)",
+ .name = "SA Replacement (reg) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_reg_sar,
}, {
- .name = "VLAN TX Insertion ",
+ .name = "VLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanoff,
}, {
- .name = "SVLAN TX Insertion ",
+ .name = "SVLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_svlanoff,
}, {
- .name = "L3 DA Filtering ",
+ .name = "L3 DA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_da,
}, {
- .name = "L3 SA Filtering ",
+ .name = "L3 SA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_sa,
}, {
- .name = "L4 DA TCP Filtering ",
+ .name = "L4 DA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_tcp,
}, {
- .name = "L4 SA TCP Filtering ",
+ .name = "L4 SA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_tcp,
}, {
- .name = "L4 DA UDP Filtering ",
+ .name = "L4 DA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_udp,
}, {
- .name = "L4 SA UDP Filtering ",
+ .name = "L4 SA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_udp,
}, {
- .name = "ARP Offload ",
+ .name = "ARP Offload ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_arpoffload,
}, {
- .name = "Jumbo Frame ",
+ .name = "Jumbo Frame ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_jumbo,
}, {
- .name = "Multichannel Jumbo ",
+ .name = "Multichannel Jumbo ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mjumbo,
}, {
- .name = "Split Header ",
+ .name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index f9a9a9d82233..7d972e0fd2b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -321,8 +321,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return -EINVAL;
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
- return -EOPNOTSUPP;
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 834afca3a019..9170572346b5 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -22,6 +22,7 @@ config TI_DAVINCI_EMAC
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
select PHYLIB
+ select GENERIC_ALLOCATOR
---help---
This driver supports TI's DaVinci Ethernet .
@@ -58,9 +59,24 @@ config TI_CPSW
To compile this driver as a module, choose M here: the module
will be called cpsw.
+config TI_CPSW_SWITCHDEV
+ tristate "TI CPSW Switch Support with switchdev"
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ select NET_SWITCHDEV
+ select TI_DAVINCI_MDIO
+ select MFD_SYSCON
+ select REGMAP
+ select NET_DEVLINK
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI's CPSW Ethernet Switch.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cpsw_new.
+
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
+ depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
depends on COMMON_CLK
depends on POSIX_TIMERS
---help---
@@ -72,7 +88,7 @@ config TI_CPTS
config TI_CPTS_MOD
tristate
depends on TI_CPTS
- default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+ default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
select NET_PTP_CLASSIFY
imply PTP_1588_CLOCK
default m
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index ed12e1e5df2f..d34df8e5cf94 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
+ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o cpsw_ale.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f298d714efd6..6ae4a72e6f43 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,7 +34,6 @@
#include <net/page_pool.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
-#include <linux/filter.h>
#include <linux/pinctrl/consumer.h>
#include <net/pkt_cls.h>
@@ -64,10 +63,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
-/* The buf includes headroom compatible with both skb and xdpf */
-#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
-
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
@@ -82,10 +77,16 @@ MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
(func)(slave++, ##arg); \
} while (0)
-#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave;
+}
-#define CPSW_XDP_CONSUMED 1
-#define CPSW_XDP_PASS 0
+static int cpsw_get_slave_port(u32 slave_num)
+{
+ return slave_num + 1;
+}
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid);
@@ -332,218 +333,6 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_del_mc_addr);
}
-void cpsw_intr_enable(struct cpsw_common *cpsw)
-{
- writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
- writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
-
- cpdma_ctlr_int_ctrl(cpsw->dma, true);
- return;
-}
-
-void cpsw_intr_disable(struct cpsw_common *cpsw)
-{
- writel_relaxed(0, &cpsw->wr_regs->tx_en);
- writel_relaxed(0, &cpsw->wr_regs->rx_en);
-
- cpdma_ctlr_int_ctrl(cpsw->dma, false);
- return;
-}
-
-static int cpsw_is_xdpf_handle(void *handle)
-{
- return (unsigned long)handle & BIT(0);
-}
-
-static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
-{
- return (void *)((unsigned long)xdpf | BIT(0));
-}
-
-static struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
-{
- return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
-}
-
-struct __aligned(sizeof(long)) cpsw_meta_xdp {
- struct net_device *ndev;
- int ch;
-};
-
-void cpsw_tx_handler(void *token, int len, int status)
-{
- struct cpsw_meta_xdp *xmeta;
- struct xdp_frame *xdpf;
- struct net_device *ndev;
- struct netdev_queue *txq;
- struct sk_buff *skb;
- int ch;
-
- if (cpsw_is_xdpf_handle(token)) {
- xdpf = cpsw_handle_to_xdpf(token);
- xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
- ndev = xmeta->ndev;
- ch = xmeta->ch;
- xdp_return_frame(xdpf);
- } else {
- skb = token;
- ndev = skb->dev;
- ch = skb_get_queue_mapping(skb);
- cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
- dev_kfree_skb_any(skb);
- }
-
- /* Check whether the queue is stopped due to stalled tx dma, if the
- * queue is stopped then start the queue as we have free desc for tx
- */
- txq = netdev_get_tx_queue(ndev, ch);
- if (unlikely(netif_tx_queue_stopped(txq)))
- netif_tx_wake_queue(txq);
-
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += len;
-}
-
-static void cpsw_rx_vlan_encap(struct sk_buff *skb)
-{
- struct cpsw_priv *priv = netdev_priv(skb->dev);
- struct cpsw_common *cpsw = priv->cpsw;
- u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
- u16 vtag, vid, prio, pkt_type;
-
- /* Remove VLAN header encapsulation word */
- skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
-
- pkt_type = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
- CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
- /* Ignore unknown & Priority-tagged packets*/
- if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
- pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
- return;
-
- vid = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
- VLAN_VID_MASK;
- /* Ignore vid 0 and pass packet as is */
- if (!vid)
- return;
- /* Ignore default vlans in dual mac mode */
- if (cpsw->data.dual_emac &&
- vid == cpsw->slaves[priv->emac_port].port_vlan)
- return;
-
- prio = (rx_vlan_encap_hdr >>
- CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
- CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
-
- vtag = (prio << VLAN_PRIO_SHIFT) | vid;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
-
- /* strip vlan tag for VLAN-tagged packet */
- if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
- skb_pull(skb, VLAN_HLEN);
- }
-}
-
-static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
- struct page *page)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_meta_xdp *xmeta;
- struct cpdma_chan *txch;
- dma_addr_t dma;
- int ret, port;
-
- xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
- xmeta->ndev = priv->ndev;
- xmeta->ch = 0;
- txch = cpsw->txv[0].ch;
-
- port = priv->emac_port + cpsw->data.dual_emac;
- if (page) {
- dma = page_pool_get_dma_addr(page);
- dma += xdpf->headroom + sizeof(struct xdp_frame);
- ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
- dma, xdpf->len, port);
- } else {
- if (sizeof(*xmeta) > xdpf->headroom) {
- xdp_return_frame_rx_napi(xdpf);
- return -EINVAL;
- }
-
- ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
- xdpf->data, xdpf->len, port);
- }
-
- if (ret) {
- priv->ndev->stats.tx_dropped++;
- xdp_return_frame_rx_napi(xdpf);
- }
-
- return ret;
-}
-
-static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct net_device *ndev = priv->ndev;
- int ret = CPSW_XDP_CONSUMED;
- struct xdp_frame *xdpf;
- struct bpf_prog *prog;
- u32 act;
-
- rcu_read_lock();
-
- prog = READ_ONCE(priv->xdp_prog);
- if (!prog) {
- ret = CPSW_XDP_PASS;
- goto out;
- }
-
- act = bpf_prog_run_xdp(prog, xdp);
- switch (act) {
- case XDP_PASS:
- ret = CPSW_XDP_PASS;
- break;
- case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
- if (unlikely(!xdpf))
- goto drop;
-
- cpsw_xdp_tx_frame(priv, xdpf, page);
- break;
- case XDP_REDIRECT:
- if (xdp_do_redirect(ndev, xdp, prog))
- goto drop;
-
- /* Have to flush here, per packet, instead of doing it in bulk
- * at the end of the napi handler. The RX devices on this
- * particular hardware is sharing a common queue, so the
- * incoming device might change per packet.
- */
- xdp_do_flush_map();
- break;
- default:
- bpf_warn_invalid_xdp_action(act);
- /* fall through */
- case XDP_ABORTED:
- trace_xdp_exception(ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
- case XDP_DROP:
- goto drop;
- }
-out:
- rcu_read_unlock();
- return ret;
-drop:
- rcu_read_unlock();
- page_pool_recycle_direct(cpsw->page_pool[ch], page);
- return ret;
-}
-
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
len += CPSW_HEADROOM;
@@ -552,123 +341,6 @@ static unsigned int cpsw_rxbuf_total_len(unsigned int len)
return SKB_DATA_ALIGN(len);
}
-static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
- int size)
-{
- struct page_pool_params pp_params;
- struct page_pool *pool;
-
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = size;
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dma_dir = DMA_BIDIRECTIONAL;
- pp_params.dev = cpsw->dev;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- dev_err(cpsw->dev, "cannot create rx page pool\n");
-
- return pool;
-}
-
-static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct xdp_rxq_info *rxq;
- struct page_pool *pool;
- int ret;
-
- pool = cpsw->page_pool[ch];
- rxq = &priv->xdp_rxq[ch];
-
- ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
- if (ret)
- return ret;
-
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- xdp_rxq_info_unreg(rxq);
-
- return ret;
-}
-
-static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
-{
- struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
-
- if (!xdp_rxq_info_is_reg(rxq))
- return;
-
- xdp_rxq_info_unreg(rxq);
-}
-
-static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
-{
- struct page_pool *pool;
- int ret = 0, pool_size;
-
- pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
- pool = cpsw_create_page_pool(cpsw, pool_size);
- if (IS_ERR(pool))
- ret = PTR_ERR(pool);
- else
- cpsw->page_pool[ch] = pool;
-
- return ret;
-}
-
-void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
-{
- struct net_device *ndev;
- int i, ch;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (!ndev)
- continue;
-
- cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
- }
-
- page_pool_destroy(cpsw->page_pool[ch]);
- cpsw->page_pool[ch] = NULL;
- }
-}
-
-int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
-{
- struct net_device *ndev;
- int i, ch, ret;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- ret = cpsw_create_rx_pool(cpsw, ch);
- if (ret)
- goto err_cleanup;
-
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (!ndev)
- continue;
-
- ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
- if (ret)
- goto err_cleanup;
- }
- }
-
- return 0;
-
-err_cleanup:
- cpsw_destroy_xdp_rxqs(cpsw);
-
- return ret;
-}
-
static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
@@ -735,7 +407,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
- ret = cpsw_run_xdp(priv, ch, &xdp, page);
+ port = priv->emac_port + cpsw->data.dual_emac;
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
if (ret != CPSW_XDP_PASS)
goto requeue;
@@ -785,274 +458,6 @@ requeue:
}
}
-void cpsw_split_res(struct cpsw_common *cpsw)
-{
- u32 consumed_rate = 0, bigest_rate = 0;
- struct cpsw_vector *txv = cpsw->txv;
- int i, ch_weight, rlim_ch_num = 0;
- int budget, bigest_rate_ch = 0;
- u32 ch_rate, max_rate;
- int ch_budget = 0;
-
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (!ch_rate)
- continue;
-
- rlim_ch_num++;
- consumed_rate += ch_rate;
- }
-
- if (cpsw->tx_ch_num == rlim_ch_num) {
- max_rate = consumed_rate;
- } else if (!rlim_ch_num) {
- ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
- bigest_rate = 0;
- max_rate = consumed_rate;
- } else {
- max_rate = cpsw->speed * 1000;
-
- /* if max_rate is less then expected due to reduced link speed,
- * split proportionally according next potential max speed
- */
- if (max_rate < consumed_rate)
- max_rate *= 10;
-
- if (max_rate < consumed_rate)
- max_rate *= 10;
-
- ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
- ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
- (cpsw->tx_ch_num - rlim_ch_num);
- bigest_rate = (max_rate - consumed_rate) /
- (cpsw->tx_ch_num - rlim_ch_num);
- }
-
- /* split tx weight/budget */
- budget = CPSW_POLL_WEIGHT;
- for (i = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(txv[i].ch);
- if (ch_rate) {
- txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
- if (!txv[i].budget)
- txv[i].budget++;
- if (ch_rate > bigest_rate) {
- bigest_rate_ch = i;
- bigest_rate = ch_rate;
- }
-
- ch_weight = (ch_rate * 100) / max_rate;
- if (!ch_weight)
- ch_weight++;
- cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
- } else {
- txv[i].budget = ch_budget;
- if (!bigest_rate_ch)
- bigest_rate_ch = i;
- cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
- }
-
- budget -= txv[i].budget;
- }
-
- if (budget)
- txv[bigest_rate_ch].budget += budget;
-
- /* split rx budget */
- budget = CPSW_POLL_WEIGHT;
- ch_budget = budget / cpsw->rx_ch_num;
- for (i = 0; i < cpsw->rx_ch_num; i++) {
- cpsw->rxv[i].budget = ch_budget;
- budget -= ch_budget;
- }
-
- if (budget)
- cpsw->rxv[0].budget += budget;
-}
-
-static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
-{
- struct cpsw_common *cpsw = dev_id;
-
- writel(0, &cpsw->wr_regs->tx_en);
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
-
- if (cpsw->quirk_irq) {
- disable_irq_nosync(cpsw->irqs_table[1]);
- cpsw->tx_irq_disabled = true;
- }
-
- napi_schedule(&cpsw->napi_tx);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
-{
- struct cpsw_common *cpsw = dev_id;
-
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
- writel(0, &cpsw->wr_regs->rx_en);
-
- if (cpsw->quirk_irq) {
- disable_irq_nosync(cpsw->irqs_table[0]);
- cpsw->rx_irq_disabled = true;
- }
-
- napi_schedule(&cpsw->napi_rx);
- return IRQ_HANDLED;
-}
-
-static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
-{
- u32 ch_map;
- int num_tx, cur_budget, ch;
- struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
- struct cpsw_vector *txv;
-
- /* process every unprocessed channel */
- ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
- if (!(ch_map & 0x80))
- continue;
-
- txv = &cpsw->txv[ch];
- if (unlikely(txv->budget > budget - num_tx))
- cur_budget = budget - num_tx;
- else
- cur_budget = txv->budget;
-
- num_tx += cpdma_chan_process(txv->ch, cur_budget);
- if (num_tx >= budget)
- break;
- }
-
- if (num_tx < budget) {
- napi_complete(napi_tx);
- writel(0xff, &cpsw->wr_regs->tx_en);
- }
-
- return num_tx;
-}
-
-static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
-{
- struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
- int num_tx;
-
- num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
- if (num_tx < budget) {
- napi_complete(napi_tx);
- writel(0xff, &cpsw->wr_regs->tx_en);
- if (cpsw->tx_irq_disabled) {
- cpsw->tx_irq_disabled = false;
- enable_irq(cpsw->irqs_table[1]);
- }
- }
-
- return num_tx;
-}
-
-static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
-{
- u32 ch_map;
- int num_rx, cur_budget, ch;
- struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
- struct cpsw_vector *rxv;
-
- /* process every unprocessed channel */
- ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
- for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
- if (!(ch_map & 0x01))
- continue;
-
- rxv = &cpsw->rxv[ch];
- if (unlikely(rxv->budget > budget - num_rx))
- cur_budget = budget - num_rx;
- else
- cur_budget = rxv->budget;
-
- num_rx += cpdma_chan_process(rxv->ch, cur_budget);
- if (num_rx >= budget)
- break;
- }
-
- if (num_rx < budget) {
- napi_complete_done(napi_rx, num_rx);
- writel(0xff, &cpsw->wr_regs->rx_en);
- }
-
- return num_rx;
-}
-
-static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
-{
- struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
- int num_rx;
-
- num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
- if (num_rx < budget) {
- napi_complete_done(napi_rx, num_rx);
- writel(0xff, &cpsw->wr_regs->rx_en);
- if (cpsw->rx_irq_disabled) {
- cpsw->rx_irq_disabled = false;
- enable_irq(cpsw->irqs_table[0]);
- }
- }
-
- return num_rx;
-}
-
-static inline void soft_reset(const char *module, void __iomem *reg)
-{
- unsigned long timeout = jiffies + HZ;
-
- writel_relaxed(1, reg);
- do {
- cpu_relax();
- } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
-
- WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
-}
-
-static void cpsw_set_slave_mac(struct cpsw_slave *slave,
- struct cpsw_priv *priv)
-{
- slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
- slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
-}
-
-static bool cpsw_shp_is_off(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 shift, mask, val;
-
- val = readl_relaxed(&cpsw->regs->ptype);
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
- mask = 7 << shift;
- val = val & mask;
-
- return !val;
-}
-
-static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 shift, mask, val;
-
- val = readl_relaxed(&cpsw->regs->ptype);
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
- mask = (1 << --fifo) << shift;
- val = on ? val | mask : val & ~mask;
-
- writel_relaxed(val, &cpsw->regs->ptype);
-}
-
static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct cpsw_priv *priv, bool *link)
{
@@ -1118,44 +523,6 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
slave->mac_control = mac_control;
}
-static int cpsw_get_common_speed(struct cpsw_common *cpsw)
-{
- int i, speed;
-
- for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
- if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
- speed += cpsw->slaves[i].phy->speed;
-
- return speed;
-}
-
-static int cpsw_need_resplit(struct cpsw_common *cpsw)
-{
- int i, rlim_ch_num;
- int speed, ch_rate;
-
- /* re-split resources only in case speed was changed */
- speed = cpsw_get_common_speed(cpsw);
- if (speed == cpsw->speed || !speed)
- return 0;
-
- cpsw->speed = speed;
-
- for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
- ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
- if (!ch_rate)
- break;
-
- rlim_ch_num++;
- }
-
- /* cases not dependent on speed */
- if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
- return 0;
-
- return 1;
-}
-
static void cpsw_adjust_link(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1348,51 +715,6 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
-int cpsw_fill_rx_channels(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_meta_xdp *xmeta;
- struct page_pool *pool;
- struct page *page;
- int ch_buf_num;
- int ch, i, ret;
- dma_addr_t dma;
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- pool = cpsw->page_pool[ch];
- ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
- for (i = 0; i < ch_buf_num; i++) {
- page = page_pool_dev_alloc_pages(pool);
- if (!page) {
- cpsw_err(priv, ifup, "allocate rx page err\n");
- return -ENOMEM;
- }
-
- xmeta = page_address(page) + CPSW_XMETA_OFFSET;
- xmeta->ndev = priv->ndev;
- xmeta->ch = ch;
-
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
- ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
- page, dma,
- cpsw->rx_packet_max,
- 0);
- if (ret < 0) {
- cpsw_err(priv, ifup,
- "cannot submit page to channel %d rx, error %d\n",
- ch, ret);
- page_pool_recycle_direct(pool, page);
- return ret;
- }
- }
-
- cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
- ch, ch_buf_num);
- }
-
- return 0;
-}
-
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
{
u32 slave_port;
@@ -1410,221 +732,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
cpsw_sl_ctl_reset(slave->mac_sl);
}
-static int cpsw_tc_to_fifo(int tc, int num_tc)
-{
- if (tc == num_tc - 1)
- return 0;
-
- return CPSW_FIFO_SHAPERS_NUM - tc;
-}
-
-static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- u32 val = 0, send_pct, shift;
- struct cpsw_slave *slave;
- int pct = 0, i;
-
- if (bw > priv->shp_cfg_speed * 1000)
- goto err;
-
- /* shaping has to stay enabled for highest fifos linearly
- * and fifo bw no more then interface can allow
- */
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- send_pct = slave_read(slave, SEND_PERCENT);
- for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
- if (!bw) {
- if (i >= fifo || !priv->fifo_bw[i])
- continue;
-
- dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
- continue;
- }
-
- if (!priv->fifo_bw[i] && i > fifo) {
- dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
- return -EINVAL;
- }
-
- shift = (i - 1) * 8;
- if (i == fifo) {
- send_pct &= ~(CPSW_PCT_MASK << shift);
- val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
- if (!val)
- val = 1;
-
- send_pct |= val << shift;
- pct += val;
- continue;
- }
-
- if (priv->fifo_bw[i])
- pct += (send_pct >> shift) & CPSW_PCT_MASK;
- }
-
- if (pct >= 100)
- goto err;
-
- slave_write(slave, send_pct, SEND_PERCENT);
- priv->fifo_bw[fifo] = bw;
-
- dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
- DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
-
- return 0;
-err:
- dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
- return -EINVAL;
-}
-
-static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 tx_in_ctl_rg, val;
- int ret;
-
- ret = cpsw_set_fifo_bw(priv, fifo, bw);
- if (ret)
- return ret;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
-
- if (!bw)
- cpsw_fifo_shp_on(priv, fifo, bw);
-
- val = slave_read(slave, tx_in_ctl_rg);
- if (cpsw_shp_is_off(priv)) {
- /* disable FIFOs rate limited queues */
- val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
-
- /* set type of FIFO queues to normal priority mode */
- val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
-
- /* set type of FIFO queues to be rate limited */
- if (bw)
- val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
- else
- priv->shp_cfg_speed = 0;
- }
-
- /* toggle a FIFO rate limited queue */
- if (bw)
- val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
- else
- val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
- slave_write(slave, val, tx_in_ctl_rg);
-
- /* FIFO transmit shape enable */
- cpsw_fifo_shp_on(priv, fifo, bw);
- return 0;
-}
-
-/* Defaults:
- * class A - prio 3
- * class B - prio 2
- * shaping for class A should be set first
- */
-static int cpsw_set_cbs(struct net_device *ndev,
- struct tc_cbs_qopt_offload *qopt)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int prev_speed = 0;
- int tc, ret, fifo;
- u32 bw = 0;
-
- tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
-
- /* enable channels in backward order, as highest FIFOs must be rate
- * limited first and for compliance with CPDMA rate limited channels
- * that also used in bacward order. FIFO0 cannot be rate limited.
- */
- fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
- if (!fifo) {
- dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
- return -EINVAL;
- }
-
- /* do nothing, it's disabled anyway */
- if (!qopt->enable && !priv->fifo_bw[fifo])
- return 0;
-
- /* shapers can be set if link speed is known */
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- if (slave->phy && slave->phy->link) {
- if (priv->shp_cfg_speed &&
- priv->shp_cfg_speed != slave->phy->speed)
- prev_speed = priv->shp_cfg_speed;
-
- priv->shp_cfg_speed = slave->phy->speed;
- }
-
- if (!priv->shp_cfg_speed) {
- dev_err(priv->dev, "Link speed is not known");
- return -1;
- }
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- bw = qopt->enable ? qopt->idleslope : 0;
- ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
- if (ret) {
- priv->shp_cfg_speed = prev_speed;
- prev_speed = 0;
- }
-
- if (bw && prev_speed)
- dev_warn(priv->dev,
- "Speed was changed, CBS shaper speeds are changed!");
-
- pm_runtime_put_sync(cpsw->dev);
- return ret;
-}
-
-static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- int fifo, bw;
-
- for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
- bw = priv->fifo_bw[fifo];
- if (!bw)
- continue;
-
- cpsw_set_fifo_rlimit(priv, fifo, bw);
- }
-}
-
-static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- u32 tx_prio_map = 0;
- int i, tc, fifo;
- u32 tx_prio_rg;
-
- if (!priv->mqprio_hw)
- return;
-
- for (i = 0; i < 8; i++) {
- tc = netdev_get_prio_tc_map(priv->ndev, i);
- fifo = CPSW_FIFO_SHAPERS_NUM - tc;
- tx_prio_map |= fifo << (4 * i);
- }
-
- tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
-
- slave_write(slave, tx_prio_map, tx_prio_rg);
-}
-
static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
{
struct cpsw_priv *priv = arg;
@@ -1853,207 +960,6 @@ fail:
return NETDEV_TX_BUSY;
}
-#if IS_ENABLED(CONFIG_TI_CPTS)
-
-static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
- u32 ts_en, seq_id;
-
- if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
- slave_write(slave, 0, CPSW1_TS_CTL);
- return;
- }
-
- seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
- ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
-
- if (priv->tx_ts_enabled)
- ts_en |= CPSW_V1_TS_TX_EN;
-
- if (priv->rx_ts_enabled)
- ts_en |= CPSW_V1_TS_RX_EN;
-
- slave_write(slave, ts_en, CPSW1_TS_CTL);
- slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
-}
-
-static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
-{
- struct cpsw_slave *slave;
- struct cpsw_common *cpsw = priv->cpsw;
- u32 ctrl, mtype;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
-
- ctrl = slave_read(slave, CPSW2_CONTROL);
- switch (cpsw->version) {
- case CPSW_VERSION_2:
- ctrl &= ~CTRL_V2_ALL_TS_MASK;
-
- if (priv->tx_ts_enabled)
- ctrl |= CTRL_V2_TX_TS_BITS;
-
- if (priv->rx_ts_enabled)
- ctrl |= CTRL_V2_RX_TS_BITS;
- break;
- case CPSW_VERSION_3:
- default:
- ctrl &= ~CTRL_V3_ALL_TS_MASK;
-
- if (priv->tx_ts_enabled)
- ctrl |= CTRL_V3_TX_TS_BITS;
-
- if (priv->rx_ts_enabled)
- ctrl |= CTRL_V3_RX_TS_BITS;
- break;
- }
-
- mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
-
- slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
- slave_write(slave, ctrl, CPSW2_CONTROL);
- writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
- writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->version != CPSW_VERSION_1 &&
- cpsw->version != CPSW_VERSION_2 &&
- cpsw->version != CPSW_VERSION_3)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
- return -ERANGE;
-
- switch (cfg.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- priv->rx_ts_enabled = 0;
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- break;
- default:
- return -ERANGE;
- }
-
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
-
- switch (cpsw->version) {
- case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(priv);
- break;
- case CPSW_VERSION_2:
- case CPSW_VERSION_3:
- cpsw_hwtstamp_v2(priv);
- break;
- default:
- WARN_ON(1);
- }
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
-
- if (cpsw->version != CPSW_VERSION_1 &&
- cpsw->version != CPSW_VERSION_2 &&
- cpsw->version != CPSW_VERSION_3)
- return -EOPNOTSUPP;
-
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-#endif /*CONFIG_TI_CPTS*/
-
-static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
-
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
- return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
-}
-
-static void cpsw_ndo_tx_timeout(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ch;
-
- cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
- ndev->stats.tx_errors++;
- cpsw_intr_disable(cpsw);
- for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_stop(cpsw->txv[ch].ch);
- cpdma_chan_start(cpsw->txv[ch].ch);
- }
-
- cpsw_intr_enable(cpsw);
- netif_trans_update(ndev);
- netif_tx_wake_all_queues(ndev);
-}
-
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -2215,168 +1121,13 @@ err:
return ret;
}
-static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- u32 min_rate;
- u32 ch_rate;
- int i, ret;
-
- ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
- if (ch_rate == rate)
- return 0;
-
- ch_rate = rate * 1000;
- min_rate = cpdma_chan_get_min_rate(cpsw->dma);
- if ((ch_rate < min_rate && ch_rate)) {
- dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
- min_rate);
- return -EINVAL;
- }
-
- if (rate > cpsw->speed) {
- dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
- return -EINVAL;
- }
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
- pm_runtime_put(cpsw->dev);
-
- if (ret)
- return ret;
-
- /* update rates for slaves tx queues */
- for (i = 0; i < cpsw->data.slaves; i++) {
- slave = &cpsw->slaves[i];
- if (!slave->ndev)
- continue;
-
- netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
- }
-
- cpsw_split_res(cpsw);
- return ret;
-}
-
-static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
-{
- struct tc_mqprio_qopt_offload *mqprio = type_data;
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int fifo, num_tc, count, offset;
- struct cpsw_slave *slave;
- u32 tx_prio_map = 0;
- int i, tc, ret;
-
- num_tc = mqprio->qopt.num_tc;
- if (num_tc > CPSW_TC_NUM)
- return -EINVAL;
-
- if (mqprio->mode != TC_MQPRIO_MODE_DCB)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
- return ret;
- }
-
- if (num_tc) {
- for (i = 0; i < 8; i++) {
- tc = mqprio->qopt.prio_tc_map[i];
- fifo = cpsw_tc_to_fifo(tc, num_tc);
- tx_prio_map |= fifo << (4 * i);
- }
-
- netdev_set_num_tc(ndev, num_tc);
- for (i = 0; i < num_tc; i++) {
- count = mqprio->qopt.count[i];
- offset = mqprio->qopt.offset[i];
- netdev_set_tc_queue(ndev, i, count, offset);
- }
- }
-
- if (!mqprio->qopt.hw) {
- /* restore default configuration */
- netdev_reset_tc(ndev);
- tx_prio_map = TX_PRIORITY_MAPPING;
- }
-
- priv->mqprio_hw = mqprio->qopt.hw;
-
- offset = cpsw->version == CPSW_VERSION_1 ?
- CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
-
- slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
- slave_write(slave, tx_prio_map, offset);
-
- pm_runtime_put_sync(cpsw->dev);
-
- return 0;
-}
-
-static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_CBS:
- return cpsw_set_cbs(ndev, type_data);
-
- case TC_SETUP_QDISC_MQPRIO:
- return cpsw_set_mqprio(ndev, type_data);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
-{
- struct bpf_prog *prog = bpf->prog;
-
- if (!priv->xdpi.prog && !prog)
- return 0;
-
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
- WRITE_ONCE(priv->xdp_prog, prog);
-
- xdp_attachment_setup(&priv->xdpi, bpf);
-
- return 0;
-}
-
-static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
-
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return cpsw_xdp_prog_setup(priv, bpf);
-
- case XDP_QUERY_PROG:
- return xdp_attachment_query(&priv->xdpi, bpf);
-
- default:
- return -EINVAL;
- }
-}
-
static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
struct xdp_frame *xdpf;
- int i, drops = 0;
+ int i, drops = 0, port;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -2389,7 +1140,8 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
continue;
}
- if (cpsw_xdp_tx_frame(priv, xdpf, NULL))
+ port = priv->emac_port + cpsw->data.dual_emac;
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
drops++;
}
@@ -2619,11 +1371,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
i);
goto no_phy_slave;
}
- slave_data->phy_if = of_get_phy_mode(slave_node);
- if (slave_data->phy_if < 0) {
+ ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
+ if (ret) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
- ret = slave_data->phy_if;
goto err_node_put;
}
@@ -2776,6 +1527,8 @@ static int cpsw_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, cpsw);
+ cpsw_slave_index = cpsw_slave_index_priv;
+
cpsw->dev = dev;
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 84025dcc78d5..929f3d3354e3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -5,6 +5,8 @@
* Copyright (C) 2012 Texas Instruments
*
*/
+#include <linux/bitmap.h>
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -382,6 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int mcast_members;
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
@@ -390,11 +393,15 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask)
- cpsw_ale_set_port_mask(ale_entry, port_mask,
+ if (port_mask) {
+ mcast_members = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
+ mcast_members &= ~port_mask;
+ cpsw_ale_set_port_mask(ale_entry, mcast_members,
ale->port_mask_bits);
- else
+ } else {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
cpsw_ale_write(ale, idx, ale_entry);
return 0;
@@ -415,7 +422,18 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
}
-int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int untag_mask)
+{
+ cpsw_ale_set_vlan_untag_force(ale_entry,
+ untag_mask, ale->vlan_field_bits);
+ if (untag_mask & ALE_PORT_HOST)
+ bitmap_set(ale->p0_untag_vid_mask, vid, 1);
+ else
+ bitmap_clear(ale->p0_untag_vid_mask, vid, 1);
+}
+
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
int reg_mcast, int unreg_mcast)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -427,8 +445,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
cpsw_ale_set_vlan_id(ale_entry, vid);
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
- cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
if (!ale->params.nu_switch_ale) {
cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
ale->vlan_field_bits);
@@ -437,7 +455,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
} else {
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
+ ale->vlan_field_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -450,6 +469,41 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
return 0;
}
+static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
+{
+ int reg_mcast, unreg_mcast;
+ int members, untag;
+
+ members = cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+ members &= ~port_mask;
+
+ untag = cpsw_ale_get_vlan_untag_force(ale_entry,
+ ale->vlan_field_bits);
+ reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ untag &= members;
+ reg_mcast &= members;
+ unreg_mcast &= members;
+
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
+
+ if (!ale->params.nu_switch_ale) {
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
+ ale->vlan_field_bits);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+ ale->vlan_field_bits);
+ } else {
+ cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast,
+ unreg_mcast);
+ }
+ cpsw_ale_set_vlan_member_list(ale_entry, members,
+ ale->vlan_field_bits);
+}
+
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -461,16 +515,83 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask)
- cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
- ale->vlan_field_bits);
- else
+ if (port_mask) {
+ cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask);
+ } else {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
cpsw_ale_write(ale, idx, ale_entry);
+
return 0;
}
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mask, int unreg_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int reg_mcast_members, unreg_mcast_members;
+ int vlan_members, untag_members;
+ int idx, ret = 0;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ vlan_members = cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+ reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ unreg_mcast_members =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ untag_members = cpsw_ale_get_vlan_untag_force(ale_entry,
+ ale->vlan_field_bits);
+
+ vlan_members |= port_mask;
+ untag_members = (untag_members & ~port_mask) | untag_mask;
+ reg_mcast_members = (reg_mcast_members & ~port_mask) | reg_mask;
+ unreg_mcast_members = (unreg_mcast_members & ~port_mask) | unreg_mask;
+
+ ret = cpsw_ale_add_vlan(ale, vid, vlan_members, untag_members,
+ reg_mcast_members, unreg_mcast_members);
+ if (ret) {
+ dev_err(ale->params.dev, "Unable to add vlan\n");
+ return ret;
+ }
+ dev_dbg(ale->params.dev, "port mask 0x%x untag 0x%x\n", vlan_members,
+ untag_mask);
+
+ return ret;
+}
+
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int unreg_members = 0;
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+
+ unreg_members =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
+ if (add)
+ unreg_members |= unreg_mcast_mask;
+ else
+ unreg_members &= ~unreg_mcast_mask;
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members,
+ ale->vlan_field_bits);
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+}
+
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -779,6 +900,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
@@ -791,6 +913,13 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
if (!ale)
return NULL;
+ ale->p0_untag_vid_mask =
+ devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!ale->p0_untag_vid_mask)
+ return ERR_PTR(-ENOMEM);
+
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
@@ -862,6 +991,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
}
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
return ale;
}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 370df254eb12..70d0955c2652 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -35,6 +35,7 @@ struct cpsw_ale {
u32 port_mask_bits;
u32 port_num_bits;
u32 vlan_field_bits;
+ unsigned long *p0_untag_vid_mask;
};
enum cpsw_ale_control {
@@ -115,4 +116,14 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
+{
+ return test_bit(vid, ale->p0_untag_vid_mask);
+}
+
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mcast, int unreg_mcast);
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add);
+
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
new file mode 100644
index 000000000000..71215db7934b
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -0,0 +1,2048 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
+
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "cpsw_switchdev.h"
+#include "cpts.h"
+#include "davinci_cpdma.h"
+
+#include <net/pkt_sched.h>
+
+static int debug_level;
+static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+
+struct cpsw_devlink {
+ struct cpsw_common *cpsw;
+};
+
+enum cpsw_devlink_param_id {
+ CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ CPSW_DL_PARAM_SWITCH_MODE,
+ CPSW_DL_PARAM_ALE_BYPASS,
+};
+
+/* struct cpsw_common is not needed, kept here for compatibility
+ * reasons witrh the old driver
+ */
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ if (priv->emac_port == HOST_PORT_NUM)
+ return -1;
+
+ return priv->emac_port - 1;
+}
+
+static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
+{
+ return !cpsw->data.dual_emac;
+}
+
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ bool enable_uni = false;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw))
+ return;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev &&
+ (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
+ enable_uni = true;
+
+ if (!enable && enable_uni) {
+ enable = enable_uni;
+ dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable unknown unicast, reg/unreg mcast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 1);
+
+ dev_dbg(cpsw->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable unknown unicast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "promiscuity disabled\n");
+ }
+}
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret, slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (vid < 0)
+ vid = cpsw->slaves[slave_no].port_vlan;
+
+ mask = ALE_PORT_HOST;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ cpsw_set_promiscious(ndev, true);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
+ return;
+ }
+
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, priv->emac_port);
+
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
+}
+
+static unsigned int cpsw_rxbuf_total_len(unsigned int len)
+{
+ len += CPSW_HEADROOM;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static void cpsw_rx_handler(void *token, int len, int status)
+{
+ struct page *new_page, *page = token;
+ void *pa = page_address(page);
+ int headroom = CPSW_HEADROOM;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpsw_common *cpsw;
+ struct net_device *ndev;
+ int port, ch, pkt_size;
+ struct cpsw_priv *priv;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int ret = 0;
+ dma_addr_t dma;
+
+ xmeta = pa + CPSW_XMETA_OFFSET;
+ cpsw = ndev_to_cpsw(xmeta->ndev);
+ ndev = xmeta->ndev;
+ pkt_size = cpsw->rx_packet_max;
+ ch = xmeta->ch;
+
+ if (status >= 0) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port)
+ ndev = cpsw->slaves[--port].ndev;
+ }
+
+ priv = netdev_priv(ndev);
+ pool = cpsw->page_pool[ch];
+
+ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->usage_count && status >= 0) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, instead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue page back to cpdma.
+ */
+ new_page = page;
+ goto requeue;
+ }
+
+ /* the interface is going down, pages are purged */
+ page_pool_recycle_direct(pool, page);
+ return;
+ }
+
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ if (priv->xdp_prog) {
+ if (status & CPDMA_RX_VLAN_ENCAP) {
+ xdp.data = pa + CPSW_HEADROOM +
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ xdp.data_end = xdp.data + len -
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ } else {
+ xdp.data = pa + CPSW_HEADROOM;
+ xdp.data_end = xdp.data + len;
+ }
+
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp.data_hard_start = pa;
+ xdp.rxq = &priv->xdp_rxq[ch];
+
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
+ if (ret != CPSW_XDP_PASS)
+ goto requeue;
+
+ /* XDP prog might have changed packet data and boundaries */
+ len = xdp.data_end - xdp.data;
+ headroom = xdp.data - xdp.data_hard_start;
+
+ /* XDP prog can modify vlan tag, so can't use encap header */
+ status &= ~CPDMA_RX_VLAN_ENCAP;
+ }
+
+ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
+ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb->offload_fwd_mark = priv->offload_fwd_mark;
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ skb->dev = ndev;
+ if (status & CPDMA_RX_VLAN_ENCAP)
+ cpsw_rx_vlan_encap(skb);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* unmap page as no netstack skb page recycling */
+ page_pool_release_page(pool, page);
+ netif_receive_skb(skb);
+
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
+ pkt_size, 0);
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
+ page_pool_recycle_direct(pool, new_page);
+ }
+}
+
+static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int mcast_mask;
+ u32 port_mask;
+ int ret;
+
+ port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
+
+ mcast_mask = ALE_PORT_HOST;
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = mcast_mask;
+
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
+ unreg_mcast_mask);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ mcast_mask, ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev || !vid)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
+ /* restore MQPRIO offload */
+ cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* restore CBS offload */
+ cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+}
+
+static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
+{
+ char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
+
+ cpsw_ale_add_mcast(cpsw->ale, stpa,
+ ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ ALE_PORT_1 | ALE_PORT_2);
+
+ cpsw_init_stp_ale_entry(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 control_reg;
+
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
+
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&cpsw->regs->control);
+ control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
+ writel(control_reg, &cpsw->regs->control);
+
+ /* setup host port priority mapping */
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+
+ /* disable priority elevation */
+ writel_relaxed(0, &cpsw->regs->ptype);
+
+ /* enable statistics collection only on all ports */
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
+
+ /* Enable internal fifo flow control */
+ writel(0x7, &cpsw->regs->flow_control);
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_init_host_port_switch(cpsw);
+ else
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+}
+
+static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 0);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 0);
+ /* disabling SA_UPDATE required to make stp work, without this setting
+ * Host MAC addresses will jump between ports.
+ * As per TRM MAC address can be defined as unicast supervisory (super)
+ * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
+ * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
+ * causes STP packets to be dropped due to ingress filter
+ * if (source address found) and (secure) and
+ * (receive port number != port_number))
+ * then discard the packet
+ */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NO_SA_UPDATE, 1);
+
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ struct phy_device *phy;
+ u32 mac_control = 0;
+
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ phy = slave->phy;
+
+ if (!phy)
+ return;
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* set speed_in input in case RMII mode is used in 100Mbps */
+ if (phy->speed == 100)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
+
+ if (priv->rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (priv->tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
+ } else {
+ netif_tx_stop_all_queues(ndev);
+
+ mac_control = 0;
+ /* disable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
+ }
+
+ if (mac_control != slave->mac_control)
+ phy_print_status(phy);
+
+ slave->mac_control = mac_control;
+
+ if (phy->link && cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct phy_device *phy;
+
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
+
+ /* setup priority mapping */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+ break;
+ }
+
+ /* setup max packet size, and mac address */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_port_add_switch_def_ale_entries(priv, slave);
+ else
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+
+ if (!slave->data->phy_node)
+ dev_err(priv->dev, "no phy found on slave %d\n",
+ slave->slave_num);
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (!phy) {
+ dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
+ slave->data->phy_node,
+ slave->slave_num);
+ return;
+ }
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
+ slave->data->phy_if);
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+
+ cpsw_info(priv, ifdown, "shutting down ndev\n");
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ if (slave->phy)
+ phy_stop(slave->phy);
+
+ netif_tx_stop_all_queues(priv->ndev);
+
+ if (slave->phy) {
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+ }
+
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
+
+ if (cpsw->usage_count <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+
+ cpsw->usage_count--;
+ pm_runtime_put_sync(cpsw->dev);
+ return 0;
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ dev_info(priv->dev, "starting ndev. mode: %s\n",
+ cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto pm_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto pm_cleanup;
+ }
+
+ /* Initialize host and slave ports */
+ if (!cpsw->usage_count)
+ cpsw_init_host_port(priv);
+ cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* initialize shared resources for every ndev */
+ if (!cpsw->usage_count) {
+ /* create rxqs for both infs in dual mac as they use same pool
+ * and must be destroyed together when no users.
+ */
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
+
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
+
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ cpsw_restore(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (cpsw->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
+ cpsw_set_coalesce(ndev, &coal);
+ }
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ cpsw->usage_count++;
+
+ return 0;
+
+err_cleanup:
+ cpsw_ndo_stop(ndev);
+
+pm_cleanup:
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
+
+ if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ cpsw_err(priv, tx_err, "packet pad failed\n");
+ ndev->stats.tx_dropped++;
+ return NET_XMIT_DROP;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+ }
+
+ return NETDEV_TX_OK;
+fail:
+ ndev->stats.tx_dropped++;
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+}
+
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, slave_no;
+ int flags = 0;
+ u16 vid = 0;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ vid = cpsw->slaves[slave_no].port_vlan;
+ flags = ALE_VLAN | ALE_SECURE;
+
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+ flags, vid);
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
+ flags, vid);
+
+ ether_addr_copy(priv->mac_addr, addr->sa_data);
+ ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
+
+ pm_runtime_put(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan)
+ goto err;
+ }
+
+ dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = snprintf(name, len, "p%d", priv->emac_port);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
+static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct xdp_frame *xdpf;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
+ drops++;
+ }
+
+ return n - drops;
+}
+
+static int cpsw_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ppid->id_len = sizeof(cpsw->base_mac);
+ memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+ .ndo_start_xmit = cpsw_ndo_start_xmit,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
+ .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
+ .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
+ .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
+ .ndo_bpf = cpsw_ndo_bpf,
+ .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_get_port_parent_id = cpsw_get_port_parent_id,
+};
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(cpsw->dev);
+ strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strlcpy(info->version, "2.0", sizeof(info->version));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!cpsw->slaves[slave_no].phy)
+ return -EINVAL;
+
+ if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
+ return -EINVAL;
+
+ priv->rx_pause = pause->rx_pause ? true : false;
+ priv->tx_pause = pause->tx_pause ? true : false;
+
+ phy_set_asym_pause(cpsw->slaves[slave_no].phy,
+ priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_pauseparam = cpsw_get_pauseparam,
+ .set_pauseparam = cpsw_set_pauseparam,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
+ .get_regs_len = cpsw_get_regs_len,
+ .get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
+ .get_link_ksettings = cpsw_get_link_ksettings,
+ .set_link_ksettings = cpsw_set_link_ksettings,
+ .get_eee = cpsw_get_eee,
+ .set_eee = cpsw_set_eee,
+ .nway_reset = cpsw_nway_reset,
+ .get_ringparam = cpsw_get_ringparam,
+ .set_ringparam = cpsw_set_ringparam,
+};
+
+static int cpsw_probe_dt(struct cpsw_common *cpsw)
+{
+ struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device *dev = cpsw->dev;
+ int ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ tmp_node = of_get_child_by_name(node, "ethernet-ports");
+ if (!tmp_node)
+ return -ENOENT;
+ data->slaves = of_get_child_count(tmp_node);
+ if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
+ of_node_put(tmp_node);
+ return -ENOENT;
+ }
+
+ data->active_slave = 0;
+ data->channels = CPSW_MAX_QUEUES;
+ data->ale_entries = CPSW_ALE_NUM_ENTRIES;
+ data->dual_emac = 1;
+ data->bd_ram_size = CPSW_BD_RAM_SIZE;
+ data->mac_control = 0;
+
+ data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
+ return -ENOMEM;
+
+ /* Populate all the child nodes here...
+ */
+ ret = devm_of_platform_populate(dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(dev, "Doesn't have any child node\n");
+
+ for_each_child_of_node(tmp_node, port_np) {
+ struct cpsw_slave_data *slave_data;
+ const void *mac_addr;
+ u32 port_id;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
+ dev_err(dev, "%pOF has invalid port_id %u\n",
+ port_np, port_id);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ slave_data = &data->slave_data[port_id - 1];
+
+ slave_data->disabled = !of_device_is_available(port_np);
+ if (slave_data->disabled)
+ continue;
+
+ slave_data->slave_node = port_np;
+ slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(slave_data->ifphy)) {
+ ret = PTR_ERR(slave_data->ifphy);
+ dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (of_phy_is_fixed_link(port_np)) {
+ ret = of_phy_register_fixed_link(port_np);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+ slave_data->phy_node = of_node_get(port_np);
+ } else {
+ slave_data->phy_node =
+ of_parse_phandle(port_np, "phy-handle", 0);
+ }
+
+ if (!slave_data->phy_node) {
+ dev_err(dev, "%pOF no phy found\n", port_np);
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ ret = of_get_phy_mode(port_np, &slave_data->phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ mac_addr = of_get_mac_address(port_np);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(slave_data->mac_addr, mac_addr);
+ } else {
+ ret = ti_cm_get_macid(dev, port_id - 1,
+ slave_data->mac_addr);
+ if (ret)
+ goto err_node_put;
+ }
+
+ if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
+ &prop)) {
+ dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
+ port_np);
+ slave_data->dual_emac_res_vlan = port_id;
+ dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
+ port_np, slave_data->dual_emac_res_vlan);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
+ of_node_put(tmp_node);
+ return 0;
+
+err_node_put:
+ of_node_put(port_np);
+ return ret;
+}
+
+static void cpsw_remove_dt(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+ struct device_node *port_np = slave_data->phy_node;
+
+ if (port_np) {
+ if (of_phy_is_fixed_link(port_np))
+ of_phy_deregister_fixed_link(port_np);
+
+ of_node_put(port_np);
+ }
+ }
+}
+
+static int cpsw_create_ports(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct net_device *ndev, *napi_ndev = NULL;
+ struct device *dev = cpsw->dev;
+ struct cpsw_priv *priv;
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (slave_data->disabled)
+ continue;
+
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES,
+ CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = i + 1;
+
+ if (is_valid_ether_addr(slave_data->mac_addr)) {
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+ dev_info(cpsw->dev, "Detected MACID = %pM\n",
+ priv->mac_addr);
+ } else {
+ eth_random_addr(slave_data->mac_addr);
+ dev_info(cpsw->dev, "Random MACID = %pM\n",
+ priv->mac_addr);
+ }
+ ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+
+ cpsw->slaves[i].ndev = ndev;
+
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!napi_ndev) {
+ /* CPSW Host port CPDMA interface is shared between
+ * ports and there is only one TX and one RX IRQs
+ * available for all possible TX and RX channels
+ * accordingly.
+ */
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ?
+ cpsw_rx_poll : cpsw_rx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ?
+ cpsw_tx_poll : cpsw_tx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ }
+
+ napi_ndev = ndev;
+ }
+
+ return ret;
+}
+
+static void cpsw_unregister_ports(struct cpsw_common *cpsw)
+{
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ unregister_netdev(cpsw->slaves[i].ndev);
+ }
+}
+
+static int cpsw_register_ports(struct cpsw_common *cpsw)
+{
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ /* register the network device */
+ ret = register_netdev(cpsw->slaves[i].ndev);
+ if (ret) {
+ dev_err(cpsw->dev,
+ "cpsw: err registering net device%d\n", i);
+ cpsw->slaves[i].ndev = NULL;
+ break;
+ }
+ }
+
+ if (ret)
+ cpsw_unregister_ports(cpsw);
+ return ret;
+}
+
+bool cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &cpsw_netdev_ops) {
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return !cpsw->data.dual_emac;
+ }
+
+ return false;
+}
+
+static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
+{
+ int set_val = 0;
+ int i;
+
+ if (!cpsw->ale_bypass &&
+ (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
+ set_val = 1;
+
+ dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *sl_ndev = cpsw->slaves[i].ndev;
+ struct cpsw_priv *priv = netdev_priv(sl_ndev);
+
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+static int cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (!cpsw->br_members) {
+ cpsw->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (cpsw->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ cpsw->br_members |= BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ return NOTIFY_DONE;
+}
+
+static void cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ cpsw->br_members &= ~BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ if (!cpsw->br_members)
+ cpsw->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = cpsw_netdevice_port_link(ndev,
+ info->upper_dev);
+ else
+ cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block cpsw_netdevice_nb __read_mostly = {
+ .notifier_call = cpsw_netdevice_event,
+};
+
+static int cpsw_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_netdevice_notifier(&cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops cpsw_devlink_ops = {
+};
+
+static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !cpsw->data.dual_emac;
+
+ return 0;
+}
+
+static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int vlan = cpsw->data.default_vlan;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->data.dual_emac)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ cpsw->data.dual_emac = !switch_en;
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ if (switch_en)
+ vlan = cpsw->data.default_vlan;
+ else
+ vlan = slave->data->dual_emac_res_vlan;
+ slave->port_vlan = vlan;
+ }
+ goto exit;
+ }
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ slave->port_vlan = vlan;
+ if (netif_running(sl_ndev))
+ cpsw_port_add_switch_def_ale_entries(priv,
+ slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = false;
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(slave->ndev);
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = true;
+ }
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int ret = -EOPNOTSUPP;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
+ ctx->val.vbool);
+ if (!ret) {
+ cpsw->ale_bypass = ctx->val.vbool;
+ cpsw_port_offload_fwd_mark_update(cpsw);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
+ "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
+ NULL),
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
+ "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
+};
+
+static int cpsw_register_devlink(struct cpsw_common *cpsw)
+{
+ struct device *dev = cpsw->dev;
+ struct cpsw_devlink *dl_priv;
+ int ret = 0;
+
+ cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
+ if (!cpsw->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(cpsw->devlink);
+ dl_priv->cpsw = cpsw;
+
+ ret = devlink_register(cpsw->devlink, dev);
+ if (ret) {
+ dev_err(dev, "DL reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
+
+ ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "DL params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+
+ devlink_params_publish(cpsw->devlink);
+ return ret;
+
+dl_unreg:
+ devlink_unregister(cpsw->devlink);
+dl_free:
+ devlink_free(cpsw->devlink);
+ return ret;
+}
+
+static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
+{
+ devlink_params_unpublish(cpsw->devlink);
+ devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ devlink_unregister(cpsw->devlink);
+ devlink_free(cpsw->devlink);
+}
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw-switch"},
+ { .compatible = "ti,am335x-cpsw-switch"},
+ { .compatible = "ti,am4372-cpsw-switch"},
+ { .compatible = "ti,dra7-cpsw-switch"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
+static int cpsw_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ struct cpsw_common *cpsw;
+ struct resource *ss_res;
+ struct gpio_descs *mode;
+ void __iomem *ss_regs;
+ int ret = 0, ch;
+ struct clk *clk;
+ int irq;
+
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
+ cpsw_slave_index = cpsw_slave_index_priv;
+
+ cpsw->dev = dev;
+
+ cpsw->slaves = devm_kcalloc(dev,
+ CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave),
+ GFP_KERNEL);
+ if (!cpsw->slaves)
+ return -ENOMEM;
+
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(dev, ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ return ret;
+ }
+ cpsw->regs = ss_regs;
+
+ irq = platform_get_irq_byname(pdev, "rx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ irq = platform_get_irq_byname(pdev, "tx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
+ platform_set_drvdata(pdev, cpsw);
+ /* This may be required here for child devices. */
+ pm_runtime_enable(dev);
+
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ ret = cpsw_probe_dt(cpsw);
+ if (ret)
+ goto clean_dt_ret;
+
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
+
+ cpsw->rx_packet_max = rx_packet_max;
+ cpsw->descs_pool_size = descs_pool_size;
+ eth_random_addr(cpsw->base_mac);
+
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
+ goto clean_dt_ret;
+
+ cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
+ ss_regs + CPSW1_WR_OFFSET :
+ ss_regs + CPSW2_WR_OFFSET;
+
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_cpts;
+ }
+
+ cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
+ goto clean_cpts;
+ }
+ cpsw_split_res(cpsw);
+
+ /* setup netdevs */
+ ret = cpsw_create_ports(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+ * MISC IRQs which are always kept disabled with this driver so
+ * we will not request them.
+ *
+ * If anyone wants to implement support for those, make sure to
+ * first request and append them to irqs_table array.
+ */
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = cpsw_register_notifiers(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ ret = cpsw_register_devlink(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ ret = cpsw_register_ports(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
+ &ss_res->start, descs_pool_size,
+ cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
+ CPSW_MINOR_VERSION(cpsw->version),
+ CPSW_RTL_VERSION(cpsw->version));
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+clean_unregister_notifiers:
+ cpsw_unregister_notifiers(cpsw);
+clean_unregister_netdev:
+ cpsw_unregister_ports(cpsw);
+clean_cpts:
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int cpsw_remove(struct platform_device *pdev)
+{
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ cpsw_unregister_notifiers(cpsw);
+ cpsw_unregister_devlink(cpsw);
+ cpsw_unregister_ports(cpsw);
+
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver cpsw_driver = {
+ .driver = {
+ .name = "cpsw-switch",
+ .of_match_table = cpsw_of_mtable,
+ },
+ .probe = cpsw_probe,
+ .remove = cpsw_remove,
+};
+
+module_platform_driver(cpsw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 476d050a022c..b833cc1d188c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -5,20 +5,415 @@
* Copyright (C) 2019 Texas Instruments
*/
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include "cpsw.h"
#include "cpts.h"
#include "cpsw_ale.h"
#include "cpsw_priv.h"
#include "cpsw_sl.h"
#include "davinci_cpdma.h"
+int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
+
+void cpsw_intr_enable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, true);
+}
+
+void cpsw_intr_disable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, false);
+}
+
+void cpsw_tx_handler(void *token, int len, int status)
+{
+ struct cpsw_meta_xdp *xmeta;
+ struct xdp_frame *xdpf;
+ struct net_device *ndev;
+ struct netdev_queue *txq;
+ struct sk_buff *skb;
+ int ch;
+
+ if (cpsw_is_xdpf_handle(token)) {
+ xdpf = cpsw_handle_to_xdpf(token);
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ ndev = xmeta->ndev;
+ ch = xmeta->ch;
+ xdp_return_frame(xdpf);
+ } else {
+ skb = token;
+ ndev = skb->dev;
+ ch = skb_get_queue_mapping(skb);
+ cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
+ txq = netdev_get_tx_queue(ndev, ch);
+ if (unlikely(netif_tx_queue_stopped(txq)))
+ netif_tx_wake_queue(txq);
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+}
+
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->tx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_tx);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ writel(0, &cpsw->wr_regs->rx_en);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_rx);
+ return IRQ_HANDLED;
+}
+
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *txv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
+ continue;
+
+ txv = &cpsw->txv[ch];
+ if (unlikely(txv->budget > budget - num_tx))
+ cur_budget = budget - num_tx;
+ else
+ cur_budget = txv->budget;
+
+ num_tx += cpdma_chan_process(txv->ch, cur_budget);
+ if (num_tx >= budget)
+ break;
+ }
+
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ }
+
+ return num_tx;
+}
+
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx;
+
+ num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+ }
+
+ return num_tx;
+}
+
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *rxv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
+ if (!(ch_map & 0x01))
+ continue;
+
+ rxv = &cpsw->rxv[ch];
+ if (unlikely(rxv->budget > budget - num_rx))
+ cur_budget = budget - num_rx;
+ else
+ cur_budget = rxv->budget;
+
+ num_rx += cpdma_chan_process(rxv->ch, cur_budget);
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ }
+
+ return num_rx;
+}
+
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx;
+
+ num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ return num_rx;
+}
+
+void cpsw_rx_vlan_encap(struct sk_buff *skb)
+{
+ struct cpsw_priv *priv = netdev_priv(skb->dev);
+ u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
+ struct cpsw_common *cpsw = priv->cpsw;
+ u16 vtag, vid, prio, pkt_type;
+
+ /* Remove VLAN header encapsulation word */
+ skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
+
+ pkt_type = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
+ /* Ignore unknown & Priority-tagged packets*/
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
+ pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
+ return;
+
+ vid = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
+ VLAN_VID_MASK;
+ /* Ignore vid 0 and pass packet as is */
+ if (!vid)
+ return;
+
+ /* Untag P0 packets if set for vlan */
+ if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
+ prio = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
+
+ vtag = (prio << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ /* strip vlan tag for VLAN-tagged packet */
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, VLAN_HLEN);
+ }
+}
+
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+ slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
+}
+
+void soft_reset(const char *module, void __iomem *reg)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ writel_relaxed(1, reg);
+ do {
+ cpu_relax();
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
+
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
+}
+
+void cpsw_ndo_tx_timeout(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ch;
+
+ cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
+ ndev->stats.tx_errors++;
+ cpsw_intr_disable(cpsw);
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_stop(cpsw->txv[ch].ch);
+ cpdma_chan_start(cpsw->txv[ch].ch);
+ }
+
+ cpsw_intr_enable(cpsw);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int cpsw_get_common_speed(struct cpsw_common *cpsw)
+{
+ int i, speed;
+
+ for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
+ speed += cpsw->slaves[i].phy->speed;
+
+ return speed;
+}
+
+int cpsw_need_resplit(struct cpsw_common *cpsw)
+{
+ int i, rlim_ch_num;
+ int speed, ch_rate;
+
+ /* re-split resources only in case speed was changed */
+ speed = cpsw_get_common_speed(cpsw);
+ if (speed == cpsw->speed || !speed)
+ return 0;
+
+ cpsw->speed = speed;
+
+ for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
+ if (!ch_rate)
+ break;
+
+ rlim_ch_num++;
+ }
+
+ /* cases not dependent on speed */
+ if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
+ return 0;
+
+ return 1;
+}
+
+void cpsw_split_res(struct cpsw_common *cpsw)
+{
+ u32 consumed_rate = 0, bigest_rate = 0;
+ struct cpsw_vector *txv = cpsw->txv;
+ int i, ch_weight, rlim_ch_num = 0;
+ int budget, bigest_rate_ch = 0;
+ u32 ch_rate, max_rate;
+ int ch_budget = 0;
+
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (!ch_rate)
+ continue;
+
+ rlim_ch_num++;
+ consumed_rate += ch_rate;
+ }
+
+ if (cpsw->tx_ch_num == rlim_ch_num) {
+ max_rate = consumed_rate;
+ } else if (!rlim_ch_num) {
+ ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+ bigest_rate = 0;
+ max_rate = consumed_rate;
+ } else {
+ max_rate = cpsw->speed * 1000;
+
+ /* if max_rate is less then expected due to reduced link speed,
+ * split proportionally according next potential max speed
+ */
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
+ ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ bigest_rate = (max_rate - consumed_rate) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ }
+
+ /* split tx weight/budget */
+ budget = CPSW_POLL_WEIGHT;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (ch_rate) {
+ txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ if (!txv[i].budget)
+ txv[i].budget++;
+ if (ch_rate > bigest_rate) {
+ bigest_rate_ch = i;
+ bigest_rate = ch_rate;
+ }
+
+ ch_weight = (ch_rate * 100) / max_rate;
+ if (!ch_weight)
+ ch_weight++;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
+ } else {
+ txv[i].budget = ch_budget;
+ if (!bigest_rate_ch)
+ bigest_rate_ch = i;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
+ }
+
+ budget -= txv[i].budget;
+ }
+
+ if (budget)
+ txv[bigest_rate_ch].budget += budget;
+
+ /* split rx budget */
+ budget = CPSW_POLL_WEIGHT;
+ ch_budget = budget / cpsw->rx_ch_num;
+ for (i = 0; i < cpsw->rx_ch_num; i++) {
+ cpsw->rxv[i].budget = ch_budget;
+ budget -= ch_budget;
+ }
+
+ if (budget)
+ cpsw->rxv[0].budget += budget;
+}
+
int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
int ale_ageout, phys_addr_t desc_mem_phys,
int descs_pool_size)
@@ -28,6 +423,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
struct cpsw_platform_data *data;
struct cpdma_params dma_params;
struct device *dev = cpsw->dev;
+ struct device_node *cpts_node;
void __iomem *cpts_regs;
int ret = 0, i;
@@ -122,11 +518,859 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
return -ENOMEM;
}
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
+ if (!cpts_node)
+ cpts_node = cpsw->dev->of_node;
+
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
}
+ of_node_put(cpts_node);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ u32 ts_en, seq_id;
+
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+ if (priv->tx_ts_enabled)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+ if (priv->rx_ts_enabled)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+ slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 ctrl, mtype;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ switch (cpsw->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V2_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+ slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+ slave_write(slave, ctrl, CPSW2_CONTROL);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw_hwtstamp_v1(priv);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ cpsw_hwtstamp_v2(priv);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /*CONFIG_TI_CPTS*/
+
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+}
+
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 min_rate;
+ u32 ch_rate;
+ int i, ret;
+
+ ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+ if (ch_rate == rate)
+ return 0;
+
+ ch_rate = rate * 1000;
+ min_rate = cpdma_chan_get_min_rate(cpsw->dma);
+ if ((ch_rate < min_rate && ch_rate)) {
+ dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
+ min_rate);
+ return -EINVAL;
+ }
+
+ if (rate > cpsw->speed) {
+ dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
+ pm_runtime_put(cpsw->dev);
+
+ if (ret)
+ return ret;
+
+ /* update rates for slaves tx queues */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ slave = &cpsw->slaves[i];
+ if (!slave->ndev)
+ continue;
+
+ netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
+ }
+
+ cpsw_split_res(cpsw);
+ return ret;
+}
+
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct page_pool *pool;
+ struct page *page;
+ int ch_buf_num;
+ int ch, i, ret;
+ dma_addr_t dma;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ pool = cpsw->page_pool[ch];
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ for (i = 0; i < ch_buf_num; i++) {
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ cpsw_err(priv, ifup, "allocate rx page err\n");
+ return -ENOMEM;
+ }
+
+ xmeta = page_address(page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
+ page, dma,
+ cpsw->rx_packet_max,
+ 0);
+ if (ret < 0) {
+ cpsw_err(priv, ifup,
+ "cannot submit page to channel %d rx, error %d\n",
+ ch, ret);
+ page_pool_recycle_direct(pool, page);
+ return ret;
+ }
+ }
+
+ cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+ ch, ch_buf_num);
+ }
+
+ return 0;
+}
+
+static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
+ int size)
+{
+ struct page_pool_params pp_params;
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = size;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = cpsw->dev;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ dev_err(cpsw->dev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
+{
+ struct page_pool *pool;
+ int ret = 0, pool_size;
+
+ pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ pool = cpsw_create_page_pool(cpsw, pool_size);
+ if (IS_ERR(pool))
+ ret = PTR_ERR(pool);
+ else
+ cpsw->page_pool[ch] = pool;
+
+ return ret;
+}
+
+static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int ret;
+
+ pool = cpsw->page_pool[ch];
+ rxq = &priv->xdp_rxq[ch];
+
+ ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
+ }
+
+ page_pool_destroy(cpsw->page_pool[ch]);
+ cpsw->page_pool[ch] = NULL;
+ }
+}
+
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ret = cpsw_create_rx_pool(cpsw, ch);
+ if (ret)
+ goto err_cleanup;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
+ if (ret)
+ goto err_cleanup;
+ }
+ }
+
+ return 0;
+
+err_cleanup:
+ cpsw_destroy_xdp_rxqs(cpsw);
+
+ return ret;
+}
+
+static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!priv->xdpi.prog && !prog)
+ return 0;
+
+ if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
+ return -EBUSY;
+
+ WRITE_ONCE(priv->xdp_prog, prog);
+
+ xdp_attachment_setup(&priv->xdpi, bpf);
+
+ return 0;
+}
+
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return cpsw_xdp_prog_setup(priv, bpf);
+
+ case XDP_QUERY_PROG:
+ return xdp_attachment_query(&priv->xdpi, bpf);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpdma_chan *txch;
+ dma_addr_t dma;
+ int ret;
+
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = 0;
+ txch = cpsw->txv[0].ch;
+
+ if (page) {
+ dma = page_pool_get_dma_addr(page);
+ dma += xdpf->headroom + sizeof(struct xdp_frame);
+ ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
+ dma, xdpf->len, port);
+ } else {
+ if (sizeof(*xmeta) > xdpf->headroom) {
+ xdp_return_frame_rx_napi(xdpf);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
+ xdpf->data, xdpf->len, port);
+ }
+
+ if (ret) {
+ priv->ndev->stats.tx_dropped++;
+ xdp_return_frame_rx_napi(xdpf);
+ }
+
+ return ret;
+}
+
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
+ int ret = CPSW_XDP_CONSUMED;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ ret = CPSW_XDP_PASS;
+ goto out;
+ }
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ ret = CPSW_XDP_PASS;
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ goto drop;
+
+ cpsw_xdp_tx_frame(priv, xdpf, page, port);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(ndev, xdp, prog))
+ goto drop;
+
+ /* Have to flush here, per packet, instead of doing it in bulk
+ * at the end of the napi handler. The RX devices on this
+ * particular hardware is sharing a common queue, so the
+ * incoming device might change per packet.
+ */
+ xdp_do_flush_map();
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* fall through -- handle aborts by dropping packet */
+ case XDP_DROP:
+ goto drop;
+ }
+out:
+ rcu_read_unlock();
+ return ret;
+drop:
+ rcu_read_unlock();
+ page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 362c5a986869..bc726356a72c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -54,6 +54,7 @@ do { \
#define HOST_PORT_NUM 0
#define CPSW_ALE_PORTS_NUM 3
+#define CPSW_SLAVE_PORTS_NUM 2
#define SLIVER_SIZE 0x40
#define CPSW1_HOST_PORT_OFFSET 0x028
@@ -65,6 +66,7 @@ do { \
#define CPSW1_CPTS_OFFSET 0x500
#define CPSW1_ALE_OFFSET 0x600
#define CPSW1_SLIVER_OFFSET 0x700
+#define CPSW1_WR_OFFSET 0x900
#define CPSW2_HOST_PORT_OFFSET 0x108
#define CPSW2_SLAVE_OFFSET 0x200
@@ -76,6 +78,7 @@ do { \
#define CPSW2_ALE_OFFSET 0xd00
#define CPSW2_SLIVER_OFFSET 0xd80
#define CPSW2_BD_OFFSET 0x2000
+#define CPSW2_WR_OFFSET 0x1200
#define CPDMA_RXTHRESH 0x0c0
#define CPDMA_RXFREE 0x0e0
@@ -113,12 +116,15 @@ do { \
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */
+#define CPSW_ALE_NUM_ENTRIES 1024
#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
#define CPSW_FIFO_SHAPE_EN_SHIFT 16
#define CPSW_FIFO_RATE_EN_SHIFT 20
#define CPSW_TC_NUM 4
#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
#define CPSW_PCT_MASK 0x7f
+#define CPSW_BD_RAM_SIZE 0x2000
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
@@ -275,10 +281,11 @@ struct cpsw_slave_data {
struct device_node *slave_node;
struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
+ phy_interface_t phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
struct phy *ifphy;
+ bool disabled;
};
struct cpsw_platform_data {
@@ -286,9 +293,9 @@ struct cpsw_platform_data {
u32 ss_reg_ofs; /* Subsystem control register offset */
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */
u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
bool dual_emac; /* Enable Dual EMAC mode */
@@ -344,10 +351,15 @@ struct cpsw_common {
bool tx_irq_disabled;
u32 irqs_table[IRQ_NUM];
struct cpts *cpts;
+ struct devlink *devlink;
int rx_ch_num, tx_ch_num;
int speed;
int usage_count;
struct page_pool *page_pool[CPSW_MAX_QUEUES];
+ u8 br_members;
+ struct net_device *hw_bridge_dev;
+ bool ale_bypass;
+ u8 base_mac[ETH_ALEN];
};
struct cpsw_priv {
@@ -368,19 +380,14 @@ struct cpsw_priv {
u32 emac_port;
struct cpsw_common *cpsw;
+ int offload_fwd_mark;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
-#define cpsw_slave_index(cpsw, priv) \
- ((cpsw->data.dual_emac) ? priv->emac_port : \
- cpsw->data.active_slave)
-
-static inline int cpsw_get_slave_port(u32 slave_num)
-{
- return slave_num + 1;
-}
+extern int (*cpsw_slave_index)(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv);
struct addr_sync_ctx {
struct net_device *ndev;
@@ -389,6 +396,35 @@ struct addr_sync_ctx {
int flush; /* flush flag */
};
+#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+
+#define CPSW_XDP_CONSUMED 1
+#define CPSW_XDP_PASS 0
+
+struct __aligned(sizeof(long)) cpsw_meta_xdp {
+ struct net_device *ndev;
+ int ch;
+};
+
+/* The buf includes headroom compatible with both skb and xdpf */
+#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
+
+static inline int cpsw_is_xdpf_handle(void *handle)
+{
+ return (unsigned long)handle & BIT(0);
+}
+
+static inline void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
+{
+ return (void *)((unsigned long)xdpf | BIT(0));
+}
+
+static inline struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
+{
+ return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
+}
+
int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
int ale_ageout, phys_addr_t desc_mem_phys,
int descs_pool_size);
@@ -399,6 +435,29 @@ void cpsw_intr_disable(struct cpsw_common *cpsw);
void cpsw_tx_handler(void *token, int len, int status);
int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw);
void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw);
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port);
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port);
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
+void cpsw_rx_vlan_encap(struct sk_buff *skb);
+void soft_reset(const char *module, void __iomem *reg);
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_ndo_tx_timeout(struct net_device *ndev);
+int cpsw_need_resplit(struct cpsw_common *cpsw);
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+bool cpsw_shp_is_off(struct cpsw_priv *priv);
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
new file mode 100644
index 000000000000..985a929bb957
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments switchdev Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_switchdev.h"
+
+struct cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct cpsw_priv *priv;
+ unsigned long event;
+};
+
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
+ struct switchdev_trans *trans, u8 state)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u8 cpsw_state;
+ int ret = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, cpsw_state);
+ dev_dbg(priv->dev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ unsigned long brport_flags)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ bool unreg_mcast_add = false;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (brport_flags & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+
+ return 0;
+}
+
+static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_port_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ dev_dbg(priv->dev, "attr: id %u port: %u\n", attr->id, priv->emac_port);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 cpsw_get_pvid(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 __iomem *port_vlan_reg;
+ u32 pvid;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ pvid = slave_read(cpsw->slaves + (priv->emac_port - 1), reg);
+ } else {
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ pvid = readl(port_vlan_reg);
+ }
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void cpsw_set_pvid(struct cpsw_priv *priv, u16 vid, bool cfi, u32 cos)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void __iomem *port_vlan_reg;
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ /* no barrier */
+ slave_write(cpsw->slaves + (priv->emac_port - 1), pvid, reg);
+ } else {
+ /* CPU port */
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ writel(pvid, port_vlan_reg);
+ }
+}
+
+static int cpsw_port_vlan_add(struct cpsw_priv *priv, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(priv->emac_port);
+ flags = priv->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ dev_err(priv->dev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (!pvid)
+ return ret;
+
+ cpsw_set_pvid(priv, vid, 0, 0);
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+ return ret;
+}
+
+static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == cpsw_get_pvid(priv))
+ cpsw_set_pvid(priv, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, vid);
+ dev_dbg(priv->dev, "VID del: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int cpsw_port_vlans_add(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
+ priv->ndev->name, vlan->vid_begin, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cpsw_port_vlans_del(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = cpsw_port_vlan_del(priv, vid, orig_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cpsw_port_mdb_add(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ dev_dbg(priv->dev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int cpsw_port_mdb_del(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int del_mask;
+ int err;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ dev_dbg(priv->dev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return err;
+}
+
+static int cpsw_port_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_add: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_add(priv, vlan, trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_add(priv, mdb, trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int cpsw_port_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_del: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_del(priv, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_del(priv, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct cpsw_switchdev_event_work, work);
+ struct cpsw_priv *priv = switchdev_work->priv;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port = priv->emac_port;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ cpsw_fdb_offload_notify(priv->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(priv->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct cpsw_switchdev_event_work *switchdev_work;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work);
+ switchdev_work->priv = priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = cpsw_switchdev_event,
+};
+
+static int cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = cpsw_switchdev_blocking_event,
+};
+
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.h b/drivers/net/ethernet/ti/cpsw_switchdev.h
new file mode 100644
index 000000000000..04a045dba7d4
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+
+#include <net/switchdev.h>
+
+bool cpsw_port_dev_check(const struct net_device *dev);
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw);
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 61136428e2c0..729ce09dded9 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -459,7 +459,7 @@ int cpts_register(struct cpts *cpts)
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
- timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+ timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
if (IS_ERR(cpts->clock)) {
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2c1fac33136c..86a3f42a3dcc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2291,6 +2291,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
phy_interface_t phy_mode;
bool has_phy = false;
+ int err;
void (*hndlr)(struct net_device *) = gbe_adjust_link;
@@ -2320,11 +2321,11 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
slave->phy_port_t = PORT_MII;
} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
has_phy = true;
- phy_mode = of_get_phy_mode(slave->node);
+ err = of_get_phy_mode(slave->node, &phy_mode);
/* if phy-mode is not present, default to
* PHY_INTERFACE_MODE_RGMII
*/
- if (phy_mode < 0)
+ if (err)
phy_mode = PHY_INTERFACE_MODE_RGMII;
if (!phy_interface_mode_is_rgmii(phy_mode)) {
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 8d994cebb6b0..6304ebd8b5c6 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,6 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -26,11 +25,10 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
select PHYLINK
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
- AXI bus interface used in Xilinx Virtex FPGAs.
+ AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 676006f32f91..20746b801959 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1405,8 +1405,8 @@ static void axienet_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int axienet_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void axienet_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
@@ -1431,8 +1431,6 @@ static int axienet_mac_link_state(struct phylink_config *config,
state->an_complete = 0;
state->duplex = 1;
-
- return 1;
}
static void axienet_mac_an_restart(struct phylink_config *config)
@@ -1497,7 +1495,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
- .mac_link_state = axienet_mac_link_state,
+ .mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
@@ -1761,11 +1759,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
} else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)lp->phy_mode < 0) {
- ret = -EINVAL;
+ ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ if (ret)
goto free_netdev;
- }
}
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
@@ -1790,10 +1786,6 @@ static int axienet_probe(struct platform_device *pdev)
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "unable to get DMA memory resource\n");
- goto free_netdev;
- }
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 670ef682f268..250bd90627a5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -609,7 +609,8 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */
u32 count;
- /* The offset of the send indirection table from top of this struct.
+ /* The offset of the send indirection table from the beginning of
+ * struct nvsp_message.
* The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number.
*/
@@ -822,7 +823,8 @@ struct nvsp_message {
#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
- NETIF_F_TSO6 | NETIF_F_LRO | NETIF_F_SG)
+ NETIF_F_TSO6 | NETIF_F_LRO | \
+ NETIF_F_SG | NETIF_F_RXHASH)
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
@@ -853,6 +855,7 @@ struct multi_recv_comp {
struct nvsc_rsc {
const struct ndis_pkt_8021q_info *vlan;
const struct ndis_tcp_ip_checksum_info *csum_info;
+ const u32 *hash_info;
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d22a36fc7a7c..eab83e71567a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1178,20 +1178,39 @@ static int netvsc_receive(struct net_device *ndev,
}
static void netvsc_send_table(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- u32 count, *tab;
+ u32 count, offset, *tab;
int i;
count = nvmsg->msg.v5_msg.send_table.count;
+ offset = nvmsg->msg.v5_msg.send_table.offset;
+
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
return;
}
- tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
- nvmsg->msg.v5_msg.send_table.offset);
+ /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
+ * wrong due to a host bug. So fix the offset here.
+ */
+ if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
+ msglen >= sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
+ offset = sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber);
+
+ /* Boundary check for all versions */
+ if (offset > msglen - count * sizeof(u32)) {
+ netdev_err(ndev, "Received send-table offset too big:%u\n",
+ offset);
+ return;
+ }
+
+ tab = (void *)nvmsg + offset;
for (i = 0; i < count; i++)
net_device_ctx->tx_table[i] = tab[i];
@@ -1209,12 +1228,14 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc ? "added" : "removed");
}
-static void netvsc_receive_inband(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+static void netvsc_receive_inband(struct net_device *ndev,
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
- netvsc_send_table(ndev, nvmsg);
+ netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
@@ -1232,6 +1253,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{
struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
@@ -1247,7 +1269,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(ndev, nvmsg);
+ netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
break;
default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 963509add611..868e22e286ca 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash(
else if (flow.basic.n_proto == htons(ETH_P_IPV6))
hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
else
- hash = 0;
+ return 0;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ __skb_set_sw_hash(skb, hash, false);
}
return hash;
@@ -766,6 +766,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
nvchan->rsc.csum_info;
+ const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
int i;
@@ -795,14 +796,16 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->protocol == htons(ETH_P_IP))
netvsc_comp_ipcsum(skb);
- /* Do L4 checksum offload if enabled and present.
- */
+ /* Do L4 checksum offload if enabled and present. */
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+ if (hash_info && (net->features & NETIF_F_RXHASH))
+ skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
+
if (vlan) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index abaf8156d19d..206b4e77eaf0 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -358,6 +358,7 @@ static inline
void rsc_add_data(struct netvsc_channel *nvchan,
const struct ndis_pkt_8021q_info *vlan,
const struct ndis_tcp_ip_checksum_info *csum_info,
+ const u32 *hash_info,
void *data, u32 len)
{
u32 cnt = nvchan->rsc.cnt;
@@ -368,6 +369,7 @@ void rsc_add_data(struct netvsc_channel *nvchan,
nvchan->rsc.vlan = vlan;
nvchan->rsc.csum_info = csum_info;
nvchan->rsc.pktlen = len;
+ nvchan->rsc.hash_info = hash_info;
}
nvchan->rsc.data[cnt] = data;
@@ -385,6 +387,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
+ const u32 *hash_info;
u32 data_offset;
void *data;
bool rsc_more = false;
@@ -411,6 +414,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
+ hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
+
pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
data = (void *)msg + data_offset;
@@ -441,7 +446,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len);
+ rsc_add_data(nvchan, vlan, csum_info, hash_info,
+ data, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
@@ -1208,6 +1214,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
/* Compute tx offload settings based on hw capabilities */
net->hw_features |= NETIF_F_RXCSUM;
net->hw_features |= NETIF_F_SG;
+ net->hw_features |= NETIF_F_RXHASH;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 8af5b7e9f4ed..c92a62dbf398 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -74,9 +74,9 @@ config IEEE802154_ATUSB
The module will be called 'atusb'.
config IEEE802154_ADF7242
- tristate "ADF7242 transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
+ tristate "ADF7242 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
---help---
Say Y here to enable the ADF7242 SPI 802.15.4 wireless
controller.
@@ -107,9 +107,9 @@ config IEEE802154_CA8210_DEBUGFS
management entities.
config IEEE802154_MCR20A
- tristate "MCR20A transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
+ tristate "MCR20A transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
---help---
Say Y here to enable the MCR20A SPI 802.15.4 wireless
controller.
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 43506948e444..89c046b204e0 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -218,7 +218,6 @@ static int
cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
{
int ret;
- u8 status = 0xff;
struct spi_message msg;
struct spi_transfer xfer = {
.len = 0,
@@ -236,8 +235,6 @@ cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
priv->buf[0]);
ret = spi_sync(priv->spi, &msg);
- if (!ret)
- status = priv->buf[0];
dev_vdbg(&priv->spi->dev,
"buf[0] = %02x\n", priv->buf[0]);
mutex_unlock(&priv->buffer_mutex);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index ba3dfac1d904..a70662261a5a 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -108,8 +108,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
- NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
#define IPVLAN_STATE_MASK \
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 14545a8797a8..a1c77cc00416 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,7 +68,6 @@ EXPORT_SYMBOL(blackhole_netdev);
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct pcpu_lstats *lb_stats;
int len;
skb_tx_timestamp(skb);
@@ -85,27 +84,20 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
- /* it's OK to use per_cpu_ptr() because BHs are off */
- lb_stats = this_cpu_ptr(dev->lstats);
-
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
- u64_stats_update_begin(&lb_stats->syncp);
- lb_stats->bytes += len;
- lb_stats->packets++;
- u64_stats_update_end(&lb_stats->syncp);
- }
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ dev_lstats_add(dev, len);
return NETDEV_TX_OK;
}
-static void loopback_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
{
- u64 bytes = 0;
- u64 packets = 0;
int i;
+ *packets = 0;
+ *bytes = 0;
+
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
@@ -114,12 +106,22 @@ static void loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
- tbytes = lb_stats->bytes;
- tpackets = lb_stats->packets;
+ tpackets = u64_stats_read(&lb_stats->packets);
+ tbytes = u64_stats_read(&lb_stats->bytes);
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
- bytes += tbytes;
- packets += tpackets;
+ *bytes += tbytes;
+ *packets += tpackets;
}
+}
+EXPORT_SYMBOL(dev_lstats_read);
+
+static void loopback_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 packets, bytes;
+
+ dev_lstats_read(dev, &packets, &bytes);
+
stats->rx_packets = packets;
stats->tx_packets = packets;
stats->rx_bytes = bytes;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 34fc59bd1e20..05631d97eeb4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -359,10 +359,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
+ schedule_work(&port->bc_work);
+
if (err)
goto free_nskb;
- schedule_work(&port->bc_work);
return;
free_nskb:
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 09f1315d2f2a..f4d8f62f28c2 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o fib.o bus.o
+ netdev.o dev.o fib.o bus.o health.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 1a0ff3d7747b..6aeed0c600f8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -283,6 +283,7 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.bus = &nsim_bus;
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->initial_net = current->nsproxy->net_ns;
err = device_register(&nsim_bus_dev->dev);
if (err)
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 54ca6681ba31..059711edfc61 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -90,6 +90,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->test1);
debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
&nsim_dev_take_snapshot_fops);
+ debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->dont_allow_reload);
+ debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->fail_reload);
return 0;
}
@@ -123,39 +127,6 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
debugfs_remove_recursive(nsim_dev_port->ddir);
}
-static struct net *nsim_devlink_net(struct devlink *devlink)
-{
- return &init_net;
-}
-
-static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
-}
-
-static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
-}
-
-static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
-}
-
-static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
-}
-
static int nsim_dev_resources_register(struct devlink *devlink)
{
struct devlink_resource_size_params params = {
@@ -163,9 +134,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY
};
- struct net *net = nsim_devlink_net(devlink);
int err;
- u64 n;
/* Resources for IPv4 */
err = devlink_resource_register(devlink, "IPv4", (u64)-1,
@@ -177,8 +146,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV4_FIB,
NSIM_RESOURCE_IPV4, &params);
if (err) {
@@ -186,8 +154,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV4, &params);
if (err) {
@@ -205,8 +172,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6, &params);
if (err) {
@@ -214,8 +180,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_IPV6, &params);
if (err) {
@@ -223,22 +188,6 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_dev_ipv4_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_dev_ipv4_fib_rules_res_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_dev_ipv6_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_dev_ipv6_fib_rules_res_occ_get,
- net);
out:
return err;
}
@@ -524,36 +473,48 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
kfree(nsim_dev->trap_data);
}
-static int nsim_dev_reload_down(struct devlink *devlink,
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack);
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
+
+static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->dont_allow_reload) {
+ /* For testing purposes, user set debugfs dont_allow_reload
+ * value to true. So forbid it.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
+ return -EOPNOTSUPP;
+ }
+
+ nsim_dev_reload_destroy(nsim_dev);
return 0;
}
static int nsim_dev_reload_up(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- struct net *net = nsim_devlink_net(devlink);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- int err;
- u64 val;
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
- if (!err) {
- err = nsim_fib_set_max(net, res_ids[i], val, extack);
- if (err)
- return err;
- }
+ if (nsim_dev->fail_reload) {
+ /* For testing purposes, user set debugfs fail_reload
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
+ return -EINVAL;
}
- nsim_devlink_param_load_driverinit_values(devlink);
- return 0;
+ return nsim_dev_reload_create(nsim_dev, extack);
+}
+
+static int nsim_dev_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
}
#define NSIM_DEV_FLASH_SIZE 500000
@@ -649,6 +610,7 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
+ .info_get = nsim_dev_info_get,
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
@@ -657,93 +619,6 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
#define NSIM_DEV_MAX_MACS_DEFAULT 32
#define NSIM_DEV_TEST1_DEFAULT true
-static struct nsim_dev *
-nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
-{
- struct nsim_dev *nsim_dev;
- struct devlink *devlink;
- int err;
-
- devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
- if (!devlink)
- return ERR_PTR(-ENOMEM);
- nsim_dev = devlink_priv(devlink);
- nsim_dev->nsim_bus_dev = nsim_bus_dev;
- nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
- get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
- INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->port_list_lock);
- nsim_dev->fw_update_status = true;
- nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
- nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
-
- err = nsim_dev_resources_register(devlink);
- if (err)
- goto err_devlink_free;
-
- err = devlink_register(devlink, &nsim_bus_dev->dev);
- if (err)
- goto err_resources_unregister;
-
- err = devlink_params_register(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
- if (err)
- goto err_dl_unregister;
- nsim_devlink_set_params_init_values(nsim_dev, devlink);
-
- err = nsim_dev_dummy_region_init(nsim_dev, devlink);
- if (err)
- goto err_params_unregister;
-
- err = nsim_dev_traps_init(devlink);
- if (err)
- goto err_dummy_region_exit;
-
- err = nsim_dev_debugfs_init(nsim_dev);
- if (err)
- goto err_traps_exit;
-
- err = nsim_bpf_dev_init(nsim_dev);
- if (err)
- goto err_debugfs_exit;
-
- devlink_params_publish(devlink);
- return nsim_dev;
-
-err_debugfs_exit:
- nsim_dev_debugfs_exit(nsim_dev);
-err_traps_exit:
- nsim_dev_traps_exit(devlink);
-err_dummy_region_exit:
- nsim_dev_dummy_region_exit(nsim_dev);
-err_params_unregister:
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
-err_dl_unregister:
- devlink_unregister(devlink);
-err_resources_unregister:
- devlink_resources_unregister(devlink, NULL);
-err_devlink_free:
- devlink_free(devlink);
- return ERR_PTR(err);
-}
-
-static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
-{
- struct devlink *devlink = priv_to_devlink(nsim_dev);
-
- nsim_bpf_dev_exit(nsim_dev);
- nsim_dev_debugfs_exit(nsim_dev);
- nsim_dev_traps_exit(devlink);
- nsim_dev_dummy_region_exit(nsim_dev);
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
- devlink_unregister(devlink);
- devlink_resources_unregister(devlink, NULL);
- mutex_destroy(&nsim_dev->port_list_lock);
- devlink_free(devlink);
-}
-
static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
unsigned int port_index)
{
@@ -813,39 +688,195 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
mutex_unlock(&nsim_dev->port_list_lock);
}
-int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
+ unsigned int port_count)
{
- struct nsim_dev *nsim_dev;
- int i;
- int err;
-
- nsim_dev = nsim_dev_create(nsim_bus_dev, nsim_bus_dev->port_count);
- if (IS_ERR(nsim_dev))
- return PTR_ERR(nsim_dev);
- dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+ int i, err;
- mutex_lock(&nsim_dev->port_list_lock);
- for (i = 0; i < nsim_bus_dev->port_count; i++) {
+ for (i = 0; i < port_count; i++) {
err = __nsim_dev_port_add(nsim_dev, i);
if (err)
goto err_port_del_all;
}
- mutex_unlock(&nsim_dev->port_list_lock);
return 0;
err_port_del_all:
- mutex_unlock(&nsim_dev->port_list_lock);
nsim_dev_port_del_all(nsim_dev);
- nsim_dev_destroy(nsim_dev);
return err;
}
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = priv_to_devlink(nsim_dev);
+ nsim_dev = devlink_priv(devlink);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+ if (IS_ERR(nsim_dev->fib_data))
+ return PTR_ERR(nsim_dev->fib_data);
+
+ nsim_devlink_param_load_driverinit_values(devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_traps_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_health_exit;
+
+ return 0;
+
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ return err;
+}
+
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ if (!devlink)
+ return -ENOMEM;
+ devlink_net_set(devlink, nsim_bus_dev->initial_net);
+ nsim_dev = devlink_priv(devlink);
+ nsim_dev->nsim_bus_dev = nsim_bus_dev;
+ nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
+ get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+ nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
+ nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
+ err = nsim_dev_resources_register(devlink);
+ if (err)
+ goto err_devlink_free;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_resources_unregister;
+ }
+
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+ goto err_fib_destroy;
+
+ err = devlink_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ if (err)
+ goto err_dl_unregister;
+ nsim_devlink_set_params_init_values(nsim_dev, devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_params_unregister;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_traps_exit;
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_debugfs_exit;
+
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+ goto err_health_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_bpf_dev_exit;
+
+ devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
+ return 0;
+
+err_bpf_dev_exit:
+ nsim_bpf_dev_exit(nsim_dev);
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_debugfs_exit:
+ nsim_dev_debugfs_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_params_unregister:
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+err_dl_unregister:
+ devlink_unregister(devlink);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+err_devlink_free:
+ devlink_free(devlink);
+ return err;
+}
+
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+
+ if (devlink_is_reload_failed(devlink))
+ return;
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_health_exit(nsim_dev);
+ nsim_dev_traps_exit(devlink);
+ nsim_dev_dummy_region_exit(nsim_dev);
+ mutex_destroy(&nsim_dev->port_list_lock);
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+}
+
void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
- nsim_dev_port_del_all(nsim_dev);
- nsim_dev_destroy(nsim_dev);
+ devlink_reload_disable(devlink);
+
+ nsim_dev_reload_destroy(nsim_dev);
+
+ nsim_bpf_dev_exit(nsim_dev);
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+ devlink_free(devlink);
}
static struct nsim_dev_port *
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 1a251f76d09b..13540dee7364 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,7 +18,7 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
-#include <net/netns/generic.h>
+#include <net/net_namespace.h>
#include "netdevsim.h"
@@ -33,15 +33,14 @@ struct nsim_per_fib_data {
};
struct nsim_fib_data {
+ struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-static unsigned int nsim_fib_net_id;
-
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
@@ -64,12 +63,10 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
- struct netlink_ext_ack *extack)
+static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
- int err = 0;
switch (res_id) {
case NSIM_RESOURCE_IPV4_FIB:
@@ -85,20 +82,10 @@ int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
entry = &fib_data->ipv6.rules;
break;
default:
- return 0;
- }
-
- /* not allowing a new max to be less than curren occupancy
- * --> no means of evicting entries
- */
- if (val < entry->num) {
- NL_SET_ERR_MSG_MOD(extack, "New size is less than current occupancy");
- err = -EINVAL;
- } else {
- entry->max = val;
+ WARN_ON(1);
+ return;
}
-
- return err;
+ entry->max = val;
}
static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
@@ -120,9 +107,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -157,9 +144,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -178,18 +165,22 @@ static int nsim_fib_event(struct fib_notifier_info *info, bool add)
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(data, info,
+ event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info,
+ event == FIB_EVENT_ENTRY_ADD);
break;
}
@@ -199,69 +190,116 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data;
- struct net *net;
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
- rcu_read_lock();
- for_each_net_rcu(net) {
- data = net_generic(net, nsim_fib_net_id);
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+}
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
+static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
- }
- rcu_read_unlock();
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB, false);
}
-static struct notifier_block nsim_fib_nb = {
- .notifier_call = nsim_fib_event_nb,
-};
-
-/* Initialize per network namespace state */
-static int __net_init nsim_fib_netns_init(struct net *net)
+static u64 nsim_fib_ipv4_rules_res_occ_get(void *priv)
{
- struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_data *data = priv;
- data->ipv4.fib.max = (u64)-1;
- data->ipv4.rules.max = (u64)-1;
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
- data->ipv6.fib.max = (u64)-1;
- data->ipv6.rules.max = (u64)-1;
+static u64 nsim_fib_ipv6_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
- return 0;
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB, false);
}
-static struct pernet_operations nsim_fib_net_ops = {
- .init = nsim_fib_netns_init,
- .id = &nsim_fib_net_id,
- .size = sizeof(struct nsim_fib_data),
-};
+static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
-void nsim_fib_exit(void)
+static void nsim_fib_set_max_all(struct nsim_fib_data *data,
+ struct devlink *devlink)
{
- unregister_fib_notifier(&nsim_fib_nb);
- unregister_pernet_subsys(&nsim_fib_net_ops);
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); i++) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (err)
+ val = (u64) -1;
+ nsim_fib_set_max(data, res_ids[i], val);
+ }
}
-int nsim_fib_init(void)
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
+ struct nsim_fib_data *data;
int err;
- err = register_pernet_subsys(&nsim_fib_net_ops);
- if (err < 0) {
- pr_err("Failed to register pernet subsystem\n");
- goto err_out;
- }
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_set_max_all(data, devlink);
- err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
- if (err < 0) {
+ data->fib_nb.notifier_call = nsim_fib_event_nb;
+ err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
+ nsim_fib_dump_inconsistent, extack);
+ if (err) {
pr_err("Failed to register fib notifier\n");
- unregister_pernet_subsys(&nsim_fib_net_ops);
goto err_out;
}
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_fib_ipv4_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_fib_ipv4_rules_res_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_fib_ipv6_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_fib_ipv6_rules_res_occ_get,
+ data);
+ return data;
+
err_out:
- return err;
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
+{
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB);
+ unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ kfree(data);
}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
new file mode 100644
index 000000000000..9aa637d162eb
--- /dev/null
+++ b/drivers/net/netdevsim/health.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static int
+nsim_dev_empty_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_dev_empty_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_empty_reporter_ops = {
+ .name = "empty",
+ .dump = nsim_dev_empty_reporter_dump,
+ .diagnose = nsim_dev_empty_reporter_diagnose,
+};
+
+struct nsim_dev_dummy_reporter_ctx {
+ char *break_msg;
+};
+
+static int
+nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+
+ if (health->fail_recover) {
+ /* For testing purposes, user set debugfs fail_recover
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the recover to fail for testing purposes");
+ return -EINVAL;
+ }
+ if (ctx) {
+ kfree(health->recovered_break_msg);
+ health->recovered_break_msg = kstrdup(ctx->break_msg,
+ GFP_KERNEL);
+ if (!health->recovered_break_msg)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
+{
+ char *binary;
+ int err;
+ int i;
+
+ err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring");
+ if (err)
+ return err;
+
+ binary = kmalloc(binary_len, GFP_KERNEL);
+ if (!binary)
+ return -ENOMEM;
+ get_random_bytes(binary, binary_len);
+ err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len);
+ kfree(binary);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, "test_nest");
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_bool_put(fmsg, true);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u8_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u32_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u64_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg,
+ "in_array_nested_test_bool",
+ false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg,
+ "in_array_nested_test_u8",
+ i);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int
+nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+ int err;
+
+ if (ctx) {
+ err = devlink_fmsg_string_pair_put(fmsg, "break_message",
+ ctx->break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static int
+nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ int err;
+
+ if (health->recovered_break_msg) {
+ err = devlink_fmsg_string_pair_put(fmsg,
+ "recovered_break_message",
+ health->recovered_break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_dummy_reporter_ops = {
+ .name = "dummy",
+ .recover = nsim_dev_dummy_reporter_recover,
+ .dump = nsim_dev_dummy_reporter_dump,
+ .diagnose = nsim_dev_dummy_reporter_diagnose,
+};
+
+static ssize_t nsim_dev_health_break_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_health *health = file->private_data;
+ struct nsim_dev_dummy_reporter_ctx ctx;
+ char *break_msg;
+ int err;
+
+ break_msg = kmalloc(count + 1, GFP_KERNEL);
+ if (!break_msg)
+ return -ENOMEM;
+
+ if (copy_from_user(break_msg, data, count)) {
+ err = -EFAULT;
+ goto out;
+ }
+ break_msg[count] = '\0';
+ if (break_msg[count - 1] == '\n')
+ break_msg[count - 1] = '\0';
+
+ ctx.break_msg = break_msg;
+ err = devlink_health_report(health->dummy_reporter, break_msg, &ctx);
+ if (err)
+ goto out;
+
+out:
+ kfree(break_msg);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_dev_health_break_fops = {
+ .open = simple_open,
+ .write = nsim_dev_health_break_write,
+ .llseek = generic_file_llseek,
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+ int err;
+
+ health->empty_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_empty_reporter_ops,
+ 0, false, health);
+ if (IS_ERR(health->empty_reporter))
+ return PTR_ERR(health->empty_reporter);
+
+ health->dummy_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_dummy_reporter_ops,
+ 0, false, health);
+ if (IS_ERR(health->dummy_reporter)) {
+ err = PTR_ERR(health->dummy_reporter);
+ goto err_empty_reporter_destroy;
+ }
+
+ health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(health->ddir)) {
+ err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
+ goto err_dummy_reporter_destroy;
+ }
+
+ health->recovered_break_msg = NULL;
+ debugfs_create_file("break_health", 0200, health->ddir, health,
+ &nsim_dev_health_break_fops);
+ health->binary_len = 16;
+ debugfs_create_u32("binary_len", 0600, health->ddir,
+ &health->binary_len);
+ health->fail_recover = false;
+ debugfs_create_bool("fail_recover", 0600, health->ddir,
+ &health->fail_recover);
+ return 0;
+
+err_dummy_reporter_destroy:
+ devlink_health_reporter_destroy(health->dummy_reporter);
+err_empty_reporter_destroy:
+ devlink_health_reporter_destroy(health->empty_reporter);
+ return err;
+}
+
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+
+ debugfs_remove_recursive(health->ddir);
+ kfree(health->recovered_break_msg);
+ devlink_health_reporter_destroy(health->dummy_reporter);
+ devlink_health_reporter_destroy(health->empty_reporter);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 55f57f76d01b..2908e0a0d6e1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -290,6 +290,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
if (!dev)
return ERR_PTR(-ENOMEM);
+ dev_net_set(dev, nsim_dev_net(nsim_dev));
ns = netdev_priv(dev);
ns->netdev = dev;
ns->nsim_dev = nsim_dev;
@@ -357,18 +358,12 @@ static int __init nsim_module_init(void)
if (err)
goto err_dev_exit;
- err = nsim_fib_init();
- if (err)
- goto err_bus_exit;
-
err = rtnl_link_register(&nsim_link_ops);
if (err)
- goto err_fib_exit;
+ goto err_bus_exit;
return 0;
-err_fib_exit:
- nsim_fib_exit();
err_bus_exit:
nsim_bus_exit();
err_dev_exit:
@@ -379,7 +374,6 @@ err_dev_exit:
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
- nsim_fib_exit();
nsim_bus_exit();
nsim_dev_exit();
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 66bf13765ad0..94df795ef4d3 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -134,6 +134,18 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6_FIB_RULES,
};
+struct nsim_dev_health {
+ struct devlink_health_reporter *empty_reporter;
+ struct devlink_health_reporter *dummy_reporter;
+ struct dentry *ddir;
+ char *recovered_break_msg;
+ u32 binary_len;
+ bool fail_recover;
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+
struct nsim_dev_port {
struct list_head list;
struct devlink_port devlink_port;
@@ -161,9 +173,17 @@ struct nsim_dev {
bool fw_update_status;
u32 max_macs;
bool test1;
+ bool dont_allow_reload;
+ bool fail_reload;
struct devlink_region *dummy_region;
+ struct nsim_dev_health health;
};
+static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
+{
+ return devlink_net(priv_to_devlink(nsim_dev));
+}
+
int nsim_dev_init(void);
void nsim_dev_exit(void);
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
@@ -173,11 +193,11 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
unsigned int port_index);
-int nsim_fib_init(void);
-void nsim_fib_exit(void);
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
- struct netlink_ext_ack *extack);
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack);
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max);
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
@@ -215,6 +235,9 @@ struct nsim_bus_dev {
struct device dev;
struct list_head list;
unsigned int port_count;
+ struct net *initial_net; /* Purpose of this is to carry net pointer
+ * during the probe time only.
+ */
unsigned int num_vfs;
struct nsim_vf_config *vfconfigs;
};
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 68771b2f351a..afb119f38325 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -9,13 +9,7 @@
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len = skb->len;
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += len;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_lstats_add(dev, skb->len);
dev_kfree_skb(skb);
@@ -56,25 +50,9 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
- u64 bytes = 0, packets = 0;
-
- for_each_possible_cpu(i) {
- const struct pcpu_lstats *nl_stats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- nl_stats = per_cpu_ptr(dev->lstats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
- tbytes = nl_stats->bytes;
- tpackets = nl_stats->packets;
- } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
+ u64 packets, bytes;
- packets += tpackets;
- bytes += tbytes;
- }
+ dev_lstats_read(dev, &packets, &bytes);
stats->rx_packets = packets;
stats->tx_packets = 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fe602648b99f..5848219005d7 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -282,11 +282,6 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
-config AT803X_PHY
- tristate "AT803X PHYs"
- ---help---
- Currently supports the AT8030 and AT8035 model
-
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX || COMPILE_TEST
@@ -364,6 +359,12 @@ config DP83867_PHY
---help---
Currently supports the DP83867 PHY.
+config DP83869_PHY
+ tristate "Texas Instruments DP83869 Gigabit PHY"
+ ---help---
+ Currently supports the DP83869 PHY. This PHY supports copper and
+ fiber connections.
+
config FIXED_PHY
tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -444,6 +445,12 @@ config NXP_TJA11XX_PHY
---help---
Currently supports the NXP TJA1100 and TJA1101 PHY.
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033 and AR8035 model
+
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a03437e091f3..b433ec3bf9a6 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_DP83822_PHY) += dp83822.o
obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
+obj-$(CONFIG_DP83869_PHY) += dp83869.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 1eb5d4fb8925..aee62610bade 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -2,7 +2,7 @@
/*
* drivers/net/phy/at803x.c
*
- * Driver for Atheros 803x PHY
+ * Driver for Qualcomm Atheros AR803x PHY
*
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
*/
@@ -13,7 +13,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/of_gpio.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/net/qca-ar803x.h>
#define AT803X_SPECIFIC_STATUS 0x11
#define AT803X_SS_SPEED_MASK (3 << 14)
@@ -62,17 +67,60 @@
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
+#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
#define AT803X_PHY_ID_MASK 0xffffffef
-MODULE_DESCRIPTION("Atheros 803x PHY driver");
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
struct at803x_priv {
- bool phy_reset:1;
+ int flags;
+#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+ struct regulator *vddio;
};
struct at803x_context {
@@ -240,6 +288,193 @@ static int at803x_resume(struct phy_device *phydev)
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
+static int at803x_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static bool at803x_match_phy_id(struct phy_device *phydev, u32 phy_id)
+{
+ return (phydev->phy_id & phydev->drv->phy_id_mask)
+ == (phy_id & phydev->drv->phy_id_mask);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ unsigned int sel, mask;
+ u32 freq, strength;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ mask = AT803X_CLK_OUT_MASK;
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(mask, sel);
+ priv->clk_25m_mask |= mask;
+
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
+ at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
+ priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK;
+ }
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev,
+ "vddio");
+ if (IS_ERR(priv->vddio)) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return PTR_ERR(priv->vddio);
+ }
+
+ ret = regulator_enable(priv->vddio);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -251,7 +486,40 @@ static int at803x_probe(struct phy_device *phydev)
phydev->priv = priv;
- return 0;
+ return at803x_parse_dt(phydev);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int val;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M);
+ if (val < 0)
+ return val;
+
+ val &= ~priv->clk_25m_mask;
+ val |= priv->clk_25m_reg;
+
+ return phy_write_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M, val);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
}
static int at803x_config_init(struct phy_device *phydev)
@@ -276,8 +544,20 @@ static int at803x_config_init(struct phy_device *phydev)
ret = at803x_enable_tx_delay(phydev);
else
ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
- return ret;
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
static int at803x_ack_interrupt(struct phy_device *phydev)
@@ -426,9 +706,9 @@ static int at803x_read_status(struct phy_device *phydev)
static struct phy_driver at803x_driver[] = {
{
- /* ATHEROS 8035 */
+ /* Qualcomm Atheros AR8035 */
.phy_id = ATH8035_PHY_ID,
- .name = "Atheros 8035 ethernet",
+ .name = "Qualcomm Atheros AR8035",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
@@ -441,9 +721,9 @@ static struct phy_driver at803x_driver[] = {
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
- /* ATHEROS 8030 */
+ /* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
- .name = "Atheros 8030 ethernet",
+ .name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
@@ -456,9 +736,9 @@ static struct phy_driver at803x_driver[] = {
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
- /* ATHEROS 8031 */
+ /* Qualcomm Atheros AR8031/AR8033 */
.phy_id = ATH8031_PHY_ID,
- .name = "Atheros 8031 ethernet",
+ .name = "Qualcomm Atheros AR8031/AR8033",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.config_init = at803x_config_init,
@@ -471,6 +751,15 @@ static struct phy_driver at803x_driver[] = {
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
} };
module_phy_driver(at803x_driver);
@@ -479,6 +768,7 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 937d0059e8ac..7d68b28bb893 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -26,18 +26,13 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+static int bcm54xx_config_clock_delay(struct phy_device *phydev);
+
static int bcm54210e_config_init(struct phy_device *phydev)
{
int val;
- val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- val |= MII_BCM54XX_AUXCTL_MISC_WREN;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
-
- val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
- val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
- bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ bcm54xx_config_clock_delay(phydev);
if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) {
val = phy_read(phydev, MII_CTRL1000);
@@ -52,26 +47,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
{
int reg;
- /* Clear TX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
- /* Disable TXD to GTXCLK clock delay (default set) */
- /* Bit 9 is the only field in shadow register 00011 */
- bcm_phy_write_shadow(phydev, 0x03, 0);
- }
-
- /* Clear RX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
- reg = bcm54xx_auxctl_read(phydev,
- MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- /* Disable RXD to RXC delay (default set) */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- /* Clear shadow selector field */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
- MII_BCM54XX_AUXCTL_MISC_WREN | reg);
- }
+ bcm54xx_config_clock_delay(phydev);
/* Enable CLK125 MUX on LED4 if ref clock is enabled. */
if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
@@ -383,9 +359,9 @@ static int bcm5482_config_init(struct phy_device *phydev)
/*
* Select 1000BASE-X register set (primary SerDes)
*/
- reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_MODE);
- bcm_phy_write_shadow(phydev, BCM5482_SHD_MODE,
- reg | BCM5482_SHD_MODE_1000BX);
+ reg = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE,
+ reg | BCM54XX_SHD_MODE_1000BX);
/*
* LED1=ACTIVITYLED, LED3=LINKSPD[2]
@@ -451,12 +427,47 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_probe(struct phy_device *phydev)
+{
+ int val, intf_sel;
+
+ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ if (val < 0)
+ return val;
+
+ /* The PHY is strapped in RGMII-fiber mode when INTERF_SEL[1:0]
+ * is 01b, and the link between PHY and its link partner can be
+ * either 1000Base-X or 100Base-FX.
+ * RGMII-1000Base-X is properly supported, but RGMII-100Base-FX
+ * support is still missing as of now.
+ */
+ intf_sel = (val & BCM54XX_SHD_INTF_SEL_MASK) >> 1;
+ if (intf_sel == 1) {
+ val = bcm_phy_read_shadow(phydev, BCM54616S_SHD_100FX_CTRL);
+ if (val < 0)
+ return val;
+
+ /* Bit 0 of the SerDes 100-FX Control register, when set
+ * to 1, sets the MII/RGMII -> 100BASE-FX configuration.
+ * When this bit is set to 0, it sets the GMII/RGMII ->
+ * 1000BASE-X configuration.
+ */
+ if (!(val & BCM54616S_100FX_MODE))
+ phydev->dev_flags |= PHY_BCM_FLAGS_MODE_1000BX;
+ }
+
+ return 0;
+}
+
static int bcm54616s_config_aneg(struct phy_device *phydev)
{
int ret;
/* Aneg firsly. */
- ret = genphy_config_aneg(phydev);
+ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ ret = genphy_c37_config_aneg(phydev);
+ else
+ ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
bcm54xx_config_clock_delay(phydev);
@@ -464,6 +475,18 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_read_status(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ err = genphy_c37_read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
+
+ return err;
+}
+
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -655,6 +678,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .read_status = bcm54616s_read_status,
+ .probe = bcm54616s_probe,
}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6580094161a9..8f241b57fcf6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
index = rq->extts.index;
if (index >= N_EXT_TS)
return -EINVAL;
@@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 37fceaf9fa10..0b95e7a2e273 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -21,8 +23,9 @@
#define MII_DP83867_PHYCTRL 0x10
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
-#define DP83867_CTRL 0x1f
+#define DP83867_CFG2 0x14
#define DP83867_CFG3 0x1e
+#define DP83867_CTRL 0x1f
/* Extended Registers */
#define DP83867_CFG4 0x0031
@@ -36,6 +39,13 @@
#define DP83867_STRAP_STS1 0x006E
#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
+#define DP83867_RXFCFG 0x0134
+#define DP83867_RXFPMD1 0x0136
+#define DP83867_RXFPMD2 0x0137
+#define DP83867_RXFPMD3 0x0138
+#define DP83867_RXFSOP1 0x0139
+#define DP83867_RXFSOP2 0x013A
+#define DP83867_RXFSOP3 0x013B
#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_SGMIICTL 0x00D3
#define DP83867_10M_SGMII_CFG 0x016F
@@ -65,6 +75,13 @@
/* SGMIICTL bits */
#define DP83867_SGMII_TYPE BIT(14)
+/* RXFCFG bits*/
+#define DP83867_WOL_MAGIC_EN BIT(0)
+#define DP83867_WOL_BCAST_EN BIT(2)
+#define DP83867_WOL_UCAST_EN BIT(4)
+#define DP83867_WOL_SEC_EN BIT(5)
+#define DP83867_WOL_ENH_MAC BIT(7)
+
/* STRAP_STS1 bits */
#define DP83867_STRAP_STS1_RESERVED BIT(11)
@@ -95,6 +112,10 @@
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+/* CFG3 bits */
+#define DP83867_CFG3_INT_OE BIT(7)
+#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
+
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
@@ -126,6 +147,115 @@ static int dp83867_ack_interrupt(struct phy_device *phydev)
return 0;
}
+static int dp83867_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ u16 val_rxcfg, val_micr;
+ u8 *mac;
+
+ val_rxcfg = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+ val_micr = phy_read(phydev, MII_DP83867_MICR);
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_UCAST |
+ WAKE_BCAST)) {
+ val_rxcfg |= DP83867_WOL_ENH_MAC;
+ val_micr |= MII_DP83867_MICR_WOL_INT_EN;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ mac = (u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD1,
+ (mac[1] << 8 | mac[0]));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD2,
+ (mac[3] << 8 | mac[2]));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD3,
+ (mac[5] << 8 | mac[4]));
+
+ val_rxcfg |= DP83867_WOL_MAGIC_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_MAGIC_EN;
+ }
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+
+ val_rxcfg |= DP83867_WOL_SEC_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_SEC_EN;
+ }
+
+ if (wol->wolopts & WAKE_UCAST)
+ val_rxcfg |= DP83867_WOL_UCAST_EN;
+ else
+ val_rxcfg &= ~DP83867_WOL_UCAST_EN;
+
+ if (wol->wolopts & WAKE_BCAST)
+ val_rxcfg |= DP83867_WOL_BCAST_EN;
+ else
+ val_rxcfg &= ~DP83867_WOL_BCAST_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_ENH_MAC;
+ val_micr &= ~MII_DP83867_MICR_WOL_INT_EN;
+ }
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG, val_rxcfg);
+ phy_write(phydev, MII_DP83867_MICR, val_micr);
+
+ return 0;
+}
+
+static void dp83867_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ u16 value, sopass_val;
+
+ wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+
+ if (value & DP83867_WOL_UCAST_EN)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (value & DP83867_WOL_BCAST_EN)
+ wol->wolopts |= WAKE_BCAST;
+
+ if (value & DP83867_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83867_WOL_SEC_EN) {
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP1);
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP2);
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP3);
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ if (!(value & DP83867_WOL_ENH_MAC))
+ wol->wolopts = 0;
+}
+
static int dp83867_config_intr(struct phy_device *phydev)
{
int micr_status;
@@ -295,7 +425,7 @@ static int dp83867_probe(struct phy_device *phydev)
phydev->priv = dp83867;
- return 0;
+ return dp83867_of_init(phydev);
}
static int dp83867_config_init(struct phy_device *phydev)
@@ -304,10 +434,6 @@ static int dp83867_config_init(struct phy_device *phydev)
int ret, val, bs;
u16 delay;
- ret = dp83867_of_init(phydev);
- if (ret)
- return ret;
-
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
@@ -410,12 +536,13 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
}
+ val = phy_read(phydev, DP83867_CFG3);
/* Enable Interrupt output INT_OE in CFG3 register */
- if (phy_interrupt_is_valid(phydev)) {
- val = phy_read(phydev, DP83867_CFG3);
- val |= BIT(7);
- phy_write(phydev, DP83867_CFG3, val);
- }
+ if (phy_interrupt_is_valid(phydev))
+ val |= DP83867_CFG3_INT_OE;
+
+ val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
+ phy_write(phydev, DP83867_CFG3, val);
if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
dp83867_config_port_mirroring(phydev);
@@ -463,6 +590,9 @@ static struct phy_driver dp83867_driver[] = {
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
+ .get_wol = dp83867_get_wol,
+ .set_wol = dp83867_set_wol,
+
/* IRQ related */
.ack_interrupt = dp83867_ack_interrupt,
.config_intr = dp83867_config_intr,
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
new file mode 100644
index 000000000000..1c7a7c57dec3
--- /dev/null
+++ b/drivers/net/phy/dp83869.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for the Texas Instruments DP83869 PHY
+ * Copyright (C) 2019 Texas Instruments Inc.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+
+#include <dt-bindings/net/ti-dp83869.h>
+
+#define DP83869_PHY_ID 0x2000a0f1
+#define DP83869_DEVADDR 0x1f
+
+#define MII_DP83869_PHYCTRL 0x10
+#define MII_DP83869_MICR 0x12
+#define MII_DP83869_ISR 0x13
+#define DP83869_CTRL 0x1f
+#define DP83869_CFG4 0x1e
+
+/* Extended Registers */
+#define DP83869_GEN_CFG3 0x0031
+#define DP83869_RGMIICTL 0x0032
+#define DP83869_STRAP_STS1 0x006e
+#define DP83869_RGMIIDCTL 0x0086
+#define DP83869_IO_MUX_CFG 0x0170
+#define DP83869_OP_MODE 0x01df
+#define DP83869_FX_CTRL 0x0c00
+
+#define DP83869_SW_RESET BIT(15)
+#define DP83869_SW_RESTART BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83869_MICR_AN_ERR_INT_EN BIT(15)
+#define MII_DP83869_MICR_SPEED_CHNG_INT_EN BIT(14)
+#define MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN BIT(13)
+#define MII_DP83869_MICR_PAGE_RXD_INT_EN BIT(12)
+#define MII_DP83869_MICR_AUTONEG_COMP_INT_EN BIT(11)
+#define MII_DP83869_MICR_LINK_STS_CHNG_INT_EN BIT(10)
+#define MII_DP83869_MICR_FALSE_CARRIER_INT_EN BIT(8)
+#define MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4)
+#define MII_DP83869_MICR_WOL_INT_EN BIT(3)
+#define MII_DP83869_MICR_XGMII_ERR_INT_EN BIT(2)
+#define MII_DP83869_MICR_POL_CHNG_INT_EN BIT(1)
+#define MII_DP83869_MICR_JABBER_INT_EN BIT(0)
+
+#define MII_DP83869_BMCR_DEFAULT (BMCR_ANENABLE | \
+ BMCR_FULLDPLX | \
+ BMCR_SPEED1000)
+
+/* This is the same bit mask as the BMCR so re-use the BMCR default */
+#define DP83869_FX_CTRL_DEFAULT MII_DP83869_BMCR_DEFAULT
+
+/* CFG1 bits */
+#define DP83869_CFG1_DEFAULT (ADVERTISE_1000HALF | \
+ ADVERTISE_1000FULL | \
+ CTL1000_AS_MASTER)
+
+/* RGMIICTL bits */
+#define DP83869_RGMII_TX_CLK_DELAY_EN BIT(1)
+#define DP83869_RGMII_RX_CLK_DELAY_EN BIT(0)
+
+/* STRAP_STS1 bits */
+#define DP83869_STRAP_STS1_RESERVED BIT(11)
+
+/* PHYCTRL bits */
+#define DP83869_RX_FIFO_SHIFT 12
+#define DP83869_TX_FIFO_SHIFT 14
+
+/* PHY_CTRL lower bytes 0x48 are declared as reserved */
+#define DP83869_PHY_CTRL_DEFAULT 0x48
+#define DP83869_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 12)
+#define DP83869_PHYCR_RESERVED_MASK BIT(11)
+
+/* RGMIIDCTL bits */
+#define DP83869_RGMII_TX_CLK_DELAY_SHIFT 4
+
+/* IO_MUX_CFG bits */
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
+
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
+#define DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+#define DP83869_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
+#define DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+
+/* CFG3 bits */
+#define DP83869_CFG3_PORT_MIRROR_EN BIT(0)
+
+/* CFG4 bits */
+#define DP83869_INT_OE BIT(7)
+
+/* OP MODE */
+#define DP83869_OP_MODE_MII BIT(5)
+#define DP83869_SGMII_RGMII_BRIDGE BIT(6)
+
+enum {
+ DP83869_PORT_MIRRORING_KEEP,
+ DP83869_PORT_MIRRORING_EN,
+ DP83869_PORT_MIRRORING_DIS,
+};
+
+struct dp83869_private {
+ int tx_fifo_depth;
+ int rx_fifo_depth;
+ int io_impedance;
+ int port_mirroring;
+ bool rxctrl_strap_quirk;
+ int clk_output_sel;
+ int mode;
+};
+
+static int dp83869_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83869_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83869_config_intr(struct phy_device *phydev)
+{
+ int micr_status = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ micr_status = phy_read(phydev, MII_DP83869_MICR);
+ if (micr_status < 0)
+ return micr_status;
+
+ micr_status |=
+ (MII_DP83869_MICR_AN_ERR_INT_EN |
+ MII_DP83869_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83869_MICR_AUTONEG_COMP_INT_EN |
+ MII_DP83869_MICR_LINK_STS_CHNG_INT_EN |
+ MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN |
+ MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+ return phy_write(phydev, MII_DP83869_MICR, micr_status);
+ }
+
+ return phy_write(phydev, MII_DP83869_MICR, micr_status);
+}
+
+static int dp83869_config_port_mirroring(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+
+ if (dp83869->port_mirroring == DP83869_PORT_MIRRORING_EN)
+ phy_set_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
+ else
+ phy_clear_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83869_of_init(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ int ret;
+
+ if (!of_node)
+ return -ENODEV;
+
+ dp83869->io_impedance = -EINVAL;
+
+ /* Optional configuration */
+ ret = of_property_read_u32(of_node, "ti,clk-output-sel",
+ &dp83869->clk_output_sel);
+ if (ret || dp83869->clk_output_sel > DP83869_CLK_O_SEL_REF_CLK)
+ dp83869->clk_output_sel = DP83869_CLK_O_SEL_REF_CLK;
+
+ ret = of_property_read_u32(of_node, "ti,op-mode", &dp83869->mode);
+ if (ret == 0) {
+ if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
+ dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node, "ti,max-output-impedance"))
+ dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MAX;
+ else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
+ dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+
+ if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
+ else
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
+
+ if (of_property_read_u32(of_node, "rx-fifo-depth",
+ &dp83869->rx_fifo_depth))
+ dp83869->rx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ if (of_property_read_u32(of_node, "tx-fifo-depth",
+ &dp83869->tx_fifo_depth))
+ dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ return 0;
+}
+#else
+static int dp83869_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83869_configure_rgmii(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int ret, val;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ val = phy_read(phydev, MII_DP83869_PHYCTRL);
+ if (val < 0)
+ return val;
+
+ val &= ~DP83869_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83869->tx_fifo_depth << DP83869_TX_FIFO_SHIFT);
+ val |= (dp83869->rx_fifo_depth << DP83869_RX_FIFO_SHIFT);
+
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83869->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83869_DEVADDR,
+ DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83869->io_impedance &
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
+
+ return 0;
+}
+
+static int dp83869_configure_mode(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int phy_ctrl_val;
+ int ret;
+
+ if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
+ dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
+ return -EINVAL;
+
+ /* Below init sequence for each operational mode is defined in
+ * section 9.4.8 of the datasheet.
+ */
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ dp83869->mode);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_BMCR, MII_DP83869_BMCR_DEFAULT);
+ if (ret)
+ return ret;
+
+ phy_ctrl_val = (dp83869->rx_fifo_depth << DP83869_RX_FIFO_SHIFT |
+ dp83869->tx_fifo_depth << DP83869_TX_FIFO_SHIFT |
+ DP83869_PHY_CTRL_DEFAULT);
+
+ switch (dp83869->mode) {
+ case DP83869_RGMII_COPPER_ETHERNET:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_CTRL1000, DP83869_CFG1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = dp83869_configure_rgmii(phydev, dp83869);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_RGMII_SGMII_BRIDGE:
+ phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ DP83869_SGMII_RGMII_BRIDGE,
+ DP83869_SGMII_RGMII_BRIDGE);
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+
+ break;
+ case DP83869_1000M_MEDIA_CONVERT:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_100M_MEDIA_CONVERT:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+ break;
+ case DP83869_SGMII_COPPER_ETHERNET:
+ ret = phy_write(phydev, MII_DP83869_PHYCTRL,
+ phy_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phydev, MII_CTRL1000, DP83869_CFG1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
+ if (ret)
+ return ret;
+
+ break;
+ case DP83869_RGMII_1000_BASE:
+ case DP83869_RGMII_100_BASE:
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int dp83869_config_init(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int ret, val;
+
+ ret = dp83869_configure_mode(phydev, dp83869);
+ if (ret)
+ return ret;
+
+ /* Enable Interrupt output INT_OE in CFG4 register */
+ if (phy_interrupt_is_valid(phydev)) {
+ val = phy_read(phydev, DP83869_CFG4);
+ val |= DP83869_INT_OE;
+ phy_write(phydev, DP83869_CFG4, val);
+ }
+
+ if (dp83869->port_mirroring != DP83869_PORT_MIRRORING_KEEP)
+ dp83869_config_port_mirroring(phydev);
+
+ /* Clock output selection if muxing property is set */
+ if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK)
+ phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83869->clk_output_sel <<
+ DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+
+ return 0;
+}
+
+static int dp83869_probe(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869;
+ int ret;
+
+ dp83869 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83869),
+ GFP_KERNEL);
+ if (!dp83869)
+ return -ENOMEM;
+
+ phydev->priv = dp83869;
+
+ ret = dp83869_of_init(phydev);
+ if (ret)
+ return ret;
+
+ return dp83869_config_init(phydev);
+}
+
+static int dp83869_phy_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, DP83869_CTRL, DP83869_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10, 20);
+
+ /* Global sw reset sets all registers to default.
+ * Need to set the registers in the PHY to the right config.
+ */
+ return dp83869_config_init(phydev);
+}
+
+static struct phy_driver dp83869_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(DP83869_PHY_ID),
+ .name = "TI DP83869",
+
+ .probe = dp83869_probe,
+ .config_init = dp83869_config_init,
+ .soft_reset = dp83869_phy_reset,
+
+ /* IRQ related */
+ .ack_interrupt = dp83869_ack_interrupt,
+ .config_intr = dp83869_config_intr,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ },
+};
+module_phy_driver(dp83869_driver);
+
+static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+ { PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83869_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83869 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a7796134e3be..b1fbd1937328 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -53,16 +53,22 @@
#define MII_M1011_PHY_SCR 0x10
#define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11)
-#define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12
-#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK 0x7800
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MASK GENMASK(14, 12)
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MAX 8
#define MII_M1011_PHY_SCR_MDI (0x0 << 5)
#define MII_M1011_PHY_SCR_MDI_X (0x1 << 5)
#define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5)
+#define MII_M1011_PHY_SSR 0x11
+#define MII_M1011_PHY_SSR_DOWNSHIFT BIT(5)
+
#define MII_M1111_PHY_LED_CONTROL 0x18
#define MII_M1111_PHY_LED_DIRECT 0x4100
#define MII_M1111_PHY_LED_COMBINE 0x411c
#define MII_M1111_PHY_EXT_CR 0x14
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK GENMASK(11, 9)
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX 8
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN BIT(8)
#define MII_M1111_RGMII_RX_DELAY BIT(7)
#define MII_M1111_RGMII_TX_DELAY BIT(1)
#define MII_M1111_PHY_EXT_SR 0x1b
@@ -273,23 +279,6 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity)
return val != reg;
}
-static int marvell_set_downshift(struct phy_device *phydev, bool enable,
- u8 retries)
-{
- int reg;
-
- reg = phy_read(phydev, MII_M1011_PHY_SCR);
- if (reg < 0)
- return reg;
-
- reg &= MII_M1011_PHY_SRC_DOWNSHIFT_MASK;
- reg |= ((retries - 1) << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT);
- if (enable)
- reg |= MII_M1011_PHY_SCR_DOWNSHIFT_EN;
-
- return phy_write(phydev, MII_M1011_PHY_SCR, reg);
-}
-
static int marvell_config_aneg(struct phy_device *phydev)
{
int changed = 0;
@@ -658,41 +647,6 @@ static int marvell_config_init(struct phy_device *phydev)
return marvell_of_reg_init(phydev);
}
-static int m88e1116r_config_init(struct phy_device *phydev)
-{
- int err;
-
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
-
- msleep(500);
-
- err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
- if (err < 0)
- return err;
-
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
- if (err < 0)
- return err;
-
- err = marvell_set_downshift(phydev, true, 8);
- if (err < 0)
- return err;
-
- if (phy_interface_is_rgmii(phydev)) {
- err = m88e1121_config_aneg_rgmii_delays(phydev);
- if (err < 0)
- return err;
- }
-
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
-
- return marvell_config_init(phydev);
-}
-
static int m88e3016_config_init(struct phy_device *phydev)
{
int ret;
@@ -833,6 +787,172 @@ static int m88e1111_config_init(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
+static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, val) + 1;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val;
+
+ if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
+
+ val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+
+ return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+ val);
+}
+
+static int m88e1111_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1111_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1111_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1111_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, MII_M1011_PHY_SCR);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, val) + 1;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val;
+
+ if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN);
+
+ val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+
+ return phy_modify(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN |
+ MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
+ val);
+}
+
+static int m88e1011_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1011_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void m88e1011_link_change_notify(struct phy_device *phydev)
+{
+ int status;
+
+ if (phydev->state != PHY_RUNNING)
+ return;
+
+ /* we may be on fiber page currently */
+ status = phy_read_paged(phydev, MII_MARVELL_COPPER_PAGE,
+ MII_M1011_PHY_SSR);
+
+ if (status > 0 && status & MII_M1011_PHY_SSR_DOWNSHIFT)
+ phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
+}
+
+static int m88e1116r_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ msleep(500);
+
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ return err;
+
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ if (err < 0)
+ return err;
+
+ err = m88e1011_set_downshift(phydev, 8);
+ if (err < 0)
+ return err;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ err = m88e1121_config_aneg_rgmii_delays(phydev);
+ if (err < 0)
+ return err;
+ }
+
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ return marvell_config_init(phydev);
+}
+
static int m88e1318_config_init(struct phy_device *phydev)
{
if (phy_interrupt_is_valid(phydev)) {
@@ -1117,6 +1237,8 @@ static int m88e1540_get_tunable(struct phy_device *phydev,
switch (tuna->id) {
case ETHTOOL_PHY_FAST_LINK_DOWN:
return m88e1540_get_fld(phydev, data);
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_get_downshift(phydev, data);
default:
return -EOPNOTSUPP;
}
@@ -1128,6 +1250,8 @@ static int m88e1540_set_tunable(struct phy_device *phydev,
switch (tuna->id) {
case ETHTOOL_PHY_FAST_LINK_DOWN:
return m88e1540_set_fld(phydev, data);
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_set_downshift(phydev, *(const u8 *)data);
default:
return -EOPNOTSUPP;
}
@@ -2163,6 +2287,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1111,
@@ -2182,6 +2309,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1118,
@@ -2220,6 +2350,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1318S,
@@ -2261,6 +2394,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -2314,6 +2450,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1510,
@@ -2337,6 +2476,9 @@ static struct phy_driver marvell_drivers[] = {
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
.set_loopback = genphy_loopback,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
@@ -2359,6 +2501,7 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2379,6 +2522,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2421,6 +2567,7 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 3b99882692e3..1bf13017d288 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,7 @@
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
@@ -206,6 +207,28 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+
+ sfp_parse_support(phydev->sfp_bus, id, support);
+ iface = sfp_select_interface(phydev->sfp_bus, id, support);
+
+ if (iface != PHY_INTERFACE_MODE_10GKR) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct sfp_upstream_ops mv3310_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = mv3310_sfp_insert,
+};
+
static int mv3310_probe(struct phy_device *phydev)
{
struct mv3310_priv *priv;
@@ -236,7 +259,7 @@ static int mv3310_probe(struct phy_device *phydev)
if (ret)
return ret;
- return 0;
+ return phy_sfp_probe(phydev, &mv3310_sfp_ops);
}
static int mv3310_suspend(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 58d6504495e0..f798de3276dc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -145,8 +145,11 @@ err_out_free_mdiobus:
static int sun4i_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct sun4i_mdio_data *data = bus->priv;
mdiobus_unregister(bus);
+ if (data->regulator)
+ regulator_disable(data->regulator);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e29ab841b4d..229e480179ff 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -62,13 +62,14 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
struct reset_control *reset = NULL;
if (mdiodev->dev.of_node)
- reset = devm_reset_control_get_exclusive(&mdiodev->dev,
- "phy");
- if (PTR_ERR(reset) == -ENOENT ||
- PTR_ERR(reset) == -ENOTSUPP)
- reset = NULL;
- else if (IS_ERR(reset))
- return PTR_ERR(reset);
+ reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
+ "phy");
+ if (IS_ERR(reset)) {
+ if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
+ reset = NULL;
+ else
+ return PTR_ERR(reset);
+ }
mdiodev->reset_ctrl = reset;
@@ -106,6 +107,8 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
+ reset_control_put(mdiodev->reset_ctrl);
+
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
return 0;
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7ada1fd9ca71..d5f8f351d9ef 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -252,13 +252,21 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_TR_LSB 17
#define MSCC_PHY_TR_MSB 18
-/* Microsemi PHY ID's */
+/* Microsemi PHY ID's
+ * Code assumes lowest nibble is 0
+ */
+#define PHY_ID_VSC8504 0x000704c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8552 0x000704e0
+#define PHY_ID_VSC856X 0x000707e0
+#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8575 0x000707d0
+#define PHY_ID_VSC8582 0x000707b0
#define PHY_ID_VSC8584 0x000707c0
#define MSCC_VDDMAC_1500 1500
@@ -895,7 +903,7 @@ static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
static int vsc8531_pre_init_seq_set(struct phy_device *phydev)
{
int rc;
- const struct reg_val init_seq[] = {
+ static const struct reg_val init_seq[] = {
{0x0f90, 0x00688980},
{0x0696, 0x00000003},
{0x07fa, 0x0050100f},
@@ -939,7 +947,7 @@ out_unlock:
static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
{
- const struct reg_val init_eee[] = {
+ static const struct reg_val init_eee[] = {
{0x0f82, 0x0012b00a},
{0x1686, 0x00000004},
{0x168c, 0x00d2c46f},
@@ -1224,7 +1232,7 @@ out:
/* bus->mdio_lock should be locked when using this function */
static int vsc8574_config_pre_init(struct phy_device *phydev)
{
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x0fae, 0x000401bd},
{0x0fac, 0x000f000f},
{0x17a0, 0x00a0f147},
@@ -1272,7 +1280,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev)
{0x0fee, 0x0004a6a1},
{0x0ffe, 0x00b01807},
};
- const struct reg_val pre_init2[] = {
+ static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
@@ -1427,7 +1435,7 @@ out:
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_config_pre_init(struct phy_device *phydev)
{
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x07fa, 0x0050100f},
{0x1688, 0x00049f81},
{0x0f90, 0x00688980},
@@ -1451,7 +1459,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
- const struct reg_val pre_init2[] = {
+ static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
@@ -1595,6 +1603,9 @@ static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
else
addr = vsc8531->base_addr + i;
+ if (!map[addr])
+ continue;
+
phy = container_of(map[addr], struct phy_device, mdio);
if ((phy->phy_id & phydev->drv->phy_id_mask) !=
@@ -1647,14 +1658,29 @@ static int vsc8584_config_init(struct phy_device *phydev)
* in this pre-init function.
*/
if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
- if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
- (PHY_ID_VSC8574 & phydev->drv->phy_id_mask))
+ /* The following switch statement assumes that the lowest
+ * nibble of the phy_id_mask is always 0. This works because
+ * the lowest nibble of the PHY_ID's below are also 0.
+ */
+ WARN_ON(phydev->drv->phy_id_mask & 0xf);
+
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC8504:
+ case PHY_ID_VSC8552:
+ case PHY_ID_VSC8572:
+ case PHY_ID_VSC8574:
ret = vsc8574_config_pre_init(phydev);
- else if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
- (PHY_ID_VSC8584 & phydev->drv->phy_id_mask))
+ break;
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
ret = vsc8584_config_pre_init(phydev);
- else
+ break;
+ default:
ret = -EINVAL;
+ break;
+ }
if (ret)
goto err;
@@ -1786,7 +1812,7 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
* values to handle hardware performance of PHY. They
* are set at Power-On state and remain until PHY Reset.
*/
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x0f90, 0x00688980},
{0x0786, 0x00000003},
{0x07fa, 0x0050100f},
@@ -2322,6 +2348,32 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8504,
+ .name = "Microsemi GE VSC8504 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8514,
.name = "Microsemi GE VSC8514 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2445,6 +2497,82 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8552,
+ .name = "Microsemi GE VSC8552 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC856X,
+ .name = "Microsemi GE VSC856X SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8572,
+ .name = "Microsemi GE VSC8572 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8574,
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2471,6 +2599,54 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8575,
+ .name = "Microsemi GE VSC8575 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8582,
+ .name = "Microsemi GE VSC8582 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8584,
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2500,12 +2676,18 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8504, 0xfffffff0, },
{ PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
+ { PHY_ID_VSC8552, 0xfffffff0, },
+ { PHY_ID_VSC856X, 0xfffffff0, },
+ { PHY_ID_VSC8572, 0xfffffff0, },
{ PHY_ID_VSC8574, 0xfffffff0, },
+ { PHY_ID_VSC8575, 0xfffffff0, },
+ { PHY_ID_VSC8582, 0xfffffff0, },
{ PHY_ID_VSC8584, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 9412669b579c..769a076514b0 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 69,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 74,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -42,6 +42,8 @@ const char *phy_speed_to_str(int speed)
return "100Gbps";
case SPEED_200000:
return "200Gbps";
+ case SPEED_400000:
+ return "400Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -70,6 +72,12 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
.bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
static const struct phy_setting settings[] = {
+ /* 400G */
+ PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseDR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseSR8_Full ),
/* 200G */
PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
@@ -411,9 +419,9 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_read_mmd(phydev, devad, regnum);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -472,9 +480,9 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_write_mmd(phydev, devad, regnum, val);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -528,9 +536,9 @@ int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify_changed(phydev, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -572,9 +580,9 @@ int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify(phydev, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -631,9 +639,9 @@ int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -679,9 +687,9 @@ int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
{
int ret;
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
ret = __phy_modify_mmd(phydev, devad, regnum, mask, set);
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
@@ -689,11 +697,17 @@ EXPORT_SYMBOL_GPL(phy_modify_mmd);
static int __phy_read_page(struct phy_device *phydev)
{
+ if (WARN_ONCE(!phydev->drv->read_page, "read_page callback not available, PHY driver not loaded?\n"))
+ return -EOPNOTSUPP;
+
return phydev->drv->read_page(phydev);
}
static int __phy_write_page(struct phy_device *phydev, int page)
{
+ if (WARN_ONCE(!phydev->drv->write_page, "write_page callback not available, PHY driver not loaded?\n"))
+ return -EOPNOTSUPP;
+
return phydev->drv->write_page(phydev, page);
}
@@ -707,7 +721,7 @@ static int __phy_write_page(struct phy_device *phydev, int page)
*/
int phy_save_page(struct phy_device *phydev)
{
- mutex_lock(&phydev->mdio.bus->mdio_lock);
+ phy_lock_mdio_bus(phydev);
return __phy_read_page(phydev);
}
EXPORT_SYMBOL_GPL(phy_save_page);
@@ -774,7 +788,7 @@ int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
ret = oldpage;
}
- mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ phy_unlock_mdio_bus(phydev);
return ret;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 105d389b58e7..80be4d691e5b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -23,6 +23,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/sfp.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
#include <linux/io.h>
@@ -252,66 +253,6 @@ static void phy_sanitize_settings(struct phy_device *phydev)
}
}
-/**
- * phy_ethtool_sset - generic ethtool sset function, handles all the details
- * @phydev: target phy_device struct
- * @cmd: ethtool_cmd
- *
- * A few notes about parameter checking:
- *
- * - We don't set port or transceiver, so we don't care what they
- * were set to.
- * - phy_start_aneg() will make sure forced settings are sane, and
- * choose the next best ones from the ones selected, so we don't
- * care if ethtool tries to give us bad values.
- */
-int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
- u32 speed = ethtool_cmd_speed(cmd);
-
- if (cmd->phy_address != phydev->mdio.addr)
- return -EINVAL;
-
- /* We make sure that we don't pass unsupported values in to the PHY */
- ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising);
- linkmode_and(advertising, advertising, phydev->supported);
-
- /* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
- return -EINVAL;
-
- if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
- return -EINVAL;
-
- if (cmd->autoneg == AUTONEG_DISABLE &&
- ((speed != SPEED_1000 &&
- speed != SPEED_100 &&
- speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
- return -EINVAL;
-
- phydev->autoneg = cmd->autoneg;
-
- phydev->speed = speed;
-
- linkmode_copy(phydev->advertising, advertising);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising, AUTONEG_ENABLE == cmd->autoneg);
-
- phydev->duplex = cmd->duplex;
-
- phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
-
- /* Restart the PHY */
- phy_start_aneg(phydev);
-
- return 0;
-}
-EXPORT_SYMBOL(phy_ethtool_sset);
-
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
@@ -841,6 +782,9 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
+ if (phydev->sfp_bus)
+ sfp_upstream_stop(phydev->sfp_bus);
+
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
@@ -875,6 +819,9 @@ void phy_start(struct phy_device *phydev)
goto out;
}
+ if (phydev->sfp_bus)
+ sfp_upstream_start(phydev->sfp_bus);
+
/* if phy was suspended, bring the physical link up again */
__phy_resume(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index adb66a2fae18..0887ed2bb050 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -27,6 +27,7 @@
#include <linux/bitmap.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/sfp.h>
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -488,7 +489,7 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv)
if (phydev->is_c45) {
for (i = 1; i < num_ids; i++) {
- if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+ if (phydev->c45_ids.device_ids[i] == 0xffffffff)
continue;
if ((phydrv->phy_id & phydrv->phy_id_mask) ==
@@ -596,8 +597,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mdiodev->device_free = phy_mdio_device_free;
mdiodev->device_remove = phy_mdio_device_remove;
- dev->speed = 0;
- dev->duplex = -1;
+ dev->speed = SPEED_UNKNOWN;
+ dev->duplex = DUPLEX_UNKNOWN;
dev->pause = 0;
dev->asym_pause = 0;
dev->link = 0;
@@ -632,7 +633,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
int i;
for (i = 1; i < num_ids; i++) {
- if (!(c45_ids->devices_in_package & (1 << i)))
+ if (c45_ids->device_ids[i] == 0xffffffff)
continue;
ret = phy_request_driver_module(dev,
@@ -812,10 +813,13 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
*/
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
- struct phy_c45_device_ids c45_ids = {0};
+ struct phy_c45_device_ids c45_ids;
u32 phy_id = 0;
int r;
+ c45_ids.devices_in_package = 0;
+ memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids));
+
r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
if (r)
return ERR_PTR(r);
@@ -1175,6 +1179,65 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(phy_standalone);
/**
+ * phy_sfp_attach - attach the SFP bus to the PHY upstream network device
+ * @upstream: pointer to the phy device
+ * @bus: sfp bus representing cage being attached
+ *
+ * This is used to fill in the sfp_upstream_ops .attach member.
+ */
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus)
+{
+ struct phy_device *phydev = upstream;
+
+ if (phydev->attached_dev)
+ phydev->attached_dev->sfp_bus = bus;
+ phydev->sfp_bus_attached = true;
+}
+EXPORT_SYMBOL(phy_sfp_attach);
+
+/**
+ * phy_sfp_detach - detach the SFP bus from the PHY upstream network device
+ * @upstream: pointer to the phy device
+ * @bus: sfp bus representing cage being attached
+ *
+ * This is used to fill in the sfp_upstream_ops .detach member.
+ */
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus)
+{
+ struct phy_device *phydev = upstream;
+
+ if (phydev->attached_dev)
+ phydev->attached_dev->sfp_bus = NULL;
+ phydev->sfp_bus_attached = false;
+}
+EXPORT_SYMBOL(phy_sfp_detach);
+
+/**
+ * phy_sfp_probe - probe for a SFP cage attached to this PHY device
+ * @phydev: Pointer to phy_device
+ * @ops: SFP's upstream operations
+ */
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops)
+{
+ struct sfp_bus *bus;
+ int ret;
+
+ if (phydev->mdio.dev.fwnode) {
+ bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode);
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ phydev->sfp_bus = bus;
+
+ ret = sfp_bus_add_upstream(bus, phydev, ops);
+ sfp_bus_put(bus);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(phy_sfp_probe);
+
+/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
* @phydev: Pointer to phy_device to attach
@@ -1249,6 +1312,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (dev) {
phydev->attached_dev = dev;
dev->phydev = phydev;
+
+ if (phydev->sfp_bus_attached)
+ dev->sfp_bus = phydev->sfp_bus;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -1270,7 +1336,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev_err(phydev, "error creating 'phy_standalone' sysfs entry\n");
}
- phydev->dev_flags = flags;
+ phydev->dev_flags |= flags;
phydev->interface = interface;
@@ -1608,6 +1674,40 @@ static int genphy_config_advert(struct phy_device *phydev)
}
/**
+ * genphy_c37_config_advert - sanitize and advertise auto-negotiation parameters
+ * @phydev: target phy_device struct
+ *
+ * Description: Writes MII_ADVERTISE with the appropriate values,
+ * after sanitizing the values to make sure we only advertise
+ * what is supported. Returns < 0 on error, 0 if the PHY's advertisement
+ * hasn't changed, and > 0 if it has changed. This function is intended
+ * for Clause 37 1000Base-X mode.
+ */
+static int genphy_c37_config_advert(struct phy_device *phydev)
+{
+ u16 adv = 0;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XHALF | ADVERTISE_1000XPSE_ASYM,
+ adv);
+}
+
+/**
* genphy_config_eee_advert - disable unwanted eee mode advertisement
* @phydev: target phy_device struct
*
@@ -1716,6 +1816,54 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
EXPORT_SYMBOL(__genphy_config_aneg);
/**
+ * genphy_c37_config_aneg - restart auto-negotiation or write BMCR
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR. This function is intended
+ * for use with Clause 37 1000Base-X mode.
+ */
+int genphy_c37_config_aneg(struct phy_device *phydev)
+{
+ int err, changed;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return genphy_setup_forced(phydev);
+
+ err = phy_modify(phydev, MII_BMCR, BMCR_SPEED1000 | BMCR_SPEED100,
+ BMCR_SPEED1000);
+ if (err)
+ return err;
+
+ changed = genphy_c37_config_advert(phydev);
+ if (changed < 0) /* error */
+ return changed;
+
+ if (!changed) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+ changed = 1; /* do restart aneg */
+ }
+
+ /* Only restart aneg if we are advertising something different
+ * than we were before.
+ */
+ if (changed > 0)
+ return genphy_restart_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c37_config_aneg);
+
+/**
* genphy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
@@ -1887,6 +2035,63 @@ int genphy_read_status(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_read_status);
/**
+ * genphy_c37_read_status - check the link status and update current link state
+ * @phydev: target phy_device struct
+ *
+ * Description: Check the link, then figure out the current state
+ * by comparing what we advertise with what the link partner
+ * advertises. This function is for Clause 37 1000Base-X mode.
+ */
+int genphy_c37_read_status(struct phy_device *phydev)
+{
+ int lpa, err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->lp_advertising, lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising,
+ lpa & LPA_1000XPAUSE_ASYM);
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c37_read_status);
+
+/**
* genphy_soft_reset - software reset the PHY via BMCR_RESET bit
* @phydev: target phy_device struct
*
@@ -2279,6 +2484,9 @@ static int phy_remove(struct device *dev)
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
+ sfp_bus_del_upstream(phydev->sfp_bus);
+ phydev->sfp_bus = NULL;
+
if (phydev->drv && phydev->drv->remove) {
phydev->drv->remove(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a578f7ebf715..9a616d6bc4eb 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -133,9 +133,7 @@ static int phylink_is_empty_linkmode(const unsigned long *linkmode)
phylink_set(tmp, Pause);
phylink_set(tmp, Asym_Pause);
- bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- return linkmode_empty(tmp);
+ return linkmode_subset(linkmode, tmp);
}
static const char *phylink_an_mode_str(unsigned int mode)
@@ -359,9 +357,9 @@ static void phylink_mac_an_restart(struct phylink *pl)
pl->ops->mac_an_restart(pl->config);
}
-static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state)
+static void phylink_mac_pcs_get_state(struct phylink *pl,
+ struct phylink_link_state *state)
{
-
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
@@ -372,7 +370,7 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
state->an_complete = 0;
state->link = 1;
- return pl->ops->mac_link_state(pl->config, state);
+ pl->ops->mac_pcs_get_state(pl->config, state);
}
/* The fixed state is... fixed except for the link state,
@@ -495,7 +493,7 @@ static void phylink_resolve(struct work_struct *w)
break;
case MLO_AN_INBAND:
- phylink_get_mac_state(pl, &link_state);
+ phylink_mac_pcs_get_state(pl, &link_state);
/* If we have a phy, the "up" state is the union of
* both the PHY and the MAC */
@@ -566,28 +564,22 @@ static const struct sfp_upstream_ops sfp_phylink_ops;
static int phylink_register_sfp(struct phylink *pl,
struct fwnode_handle *fwnode)
{
- struct fwnode_reference_args ref;
+ struct sfp_bus *bus;
int ret;
- if (!fwnode)
- return 0;
-
- ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
- 0, 0, &ref);
- if (ret < 0) {
- if (ret == -ENOENT)
- return 0;
-
- phylink_err(pl, "unable to parse \"sfp\" node: %d\n",
- ret);
+ bus = sfp_bus_find_fwnode(fwnode);
+ if (IS_ERR(bus)) {
+ ret = PTR_ERR(bus);
+ phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
return ret;
}
- pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
- if (!pl->sfp_bus)
- return -ENOMEM;
+ pl->sfp_bus = bus;
- return 0;
+ ret = sfp_bus_add_upstream(bus, pl, &sfp_phylink_ops);
+ sfp_bus_put(bus);
+
+ return ret;
}
/**
@@ -601,6 +593,8 @@ static int phylink_register_sfp(struct phylink *pl,
* Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration.
*
+ * Note: the rtnl lock must not be held when calling this function.
+ *
* Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function.
*/
@@ -678,11 +672,12 @@ EXPORT_SYMBOL_GPL(phylink_create);
*
* Destroy a phylink instance. Any PHY that has been attached must have been
* cleaned up via phylink_disconnect_phy() prior to calling this function.
+ *
+ * Note: the rtnl lock must not be held when calling this function.
*/
void phylink_destroy(struct phylink *pl)
{
- if (pl->sfp_bus)
- sfp_unregister_upstream(pl->sfp_bus);
+ sfp_bus_del_upstream(pl->sfp_bus);
if (pl->link_gpio)
gpiod_put(pl->link_gpio);
@@ -722,11 +717,6 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
int ret;
- memset(&config, 0, sizeof(config));
- linkmode_copy(supported, phy->supported);
- linkmode_copy(config.advertising, phy->advertising);
- config.interface = pl->link_config.interface;
-
/*
* This is the new way of dealing with flow control for PHYs,
* as described by Timur Tabi in commit 529ed1275263 ("net: phy:
@@ -734,10 +724,12 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
* using our validate call to the MAC, we rely upon the MAC
* clearing the bits from both supported and advertising fields.
*/
- if (phylink_test(supported, Pause))
- phylink_set(config.advertising, Pause);
- if (phylink_test(supported, Asym_Pause))
- phylink_set(config.advertising, Asym_Pause);
+ phy_support_asym_pause(phy);
+
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
+ config.interface = pl->link_config.interface;
ret = phylink_validate(pl, supported, &config);
if (ret)
@@ -1150,7 +1142,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
if (pl->phydev)
break;
- phylink_get_mac_state(pl, &link_state);
+ phylink_mac_pcs_get_state(pl, &link_state);
/* The MAC is reporting the link results from its own PCS
* layer via in-band status. Report these as the current
@@ -1254,7 +1246,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
- if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ /* If we have a PHY, phylib will call our link state function if the
+ * mode has changed, which will trigger a resolve and update the MAC
+ * configuration. For a fixed link, this isn't able to change any
+ * parameters, which just leaves inband mode.
+ */
+ if (pl->link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
phylink_mac_config(pl, &pl->link_config);
phylink_mac_an_restart(pl);
}
@@ -1334,15 +1332,16 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX;
- if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
+ /* If we have a PHY, phylib will call our link state function if the
+ * mode has changed, which will trigger a resolve and update the MAC
+ * configuration.
+ */
+ if (pl->phydev) {
+ phy_set_asym_pause(pl->phydev, pause->rx_pause,
+ pause->tx_pause);
+ } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
switch (pl->link_an_mode) {
- case MLO_AN_PHY:
- /* Silently mark the carrier down, and then trigger a resolve */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- phylink_run_resolve(pl);
- break;
-
case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config);
@@ -1562,10 +1561,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
case MLO_AN_INBAND:
if (phy_id == 0) {
- val = phylink_get_mac_state(pl, &state);
- if (val < 0)
- return val;
-
+ phylink_mac_pcs_get_state(pl, &state);
val = phylink_mii_emul_read(reg, &state);
}
break;
@@ -1744,8 +1740,7 @@ static int phylink_sfp_module_insert(void *upstream,
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
- changed = !bitmap_equal(pl->supported, support,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ changed = !linkmode_equal(pl->supported, support);
if (changed) {
linkmode_copy(pl->supported, support);
linkmode_copy(pl->link_config.advertising, config.advertising);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index b23fc41896ef..5a72093ab6e7 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -4,11 +4,18 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/phylink.h>
+#include <linux/property.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "sfp.h"
+struct sfp_quirk {
+ const char *vendor;
+ const char *part;
+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes);
+};
+
/**
* struct sfp_bus - internal representation of a sfp bus
*/
@@ -21,6 +28,7 @@ struct sfp_bus {
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
struct sfp *sfp;
+ const struct sfp_quirk *sfp_quirk;
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
@@ -30,6 +38,71 @@ struct sfp_bus {
bool started;
};
+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ unsigned long *modes)
+{
+ phylink_set(modes, 2500baseX_Full);
+}
+
+static const struct sfp_quirk sfp_quirks[] = {
+ {
+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM
+ .vendor = "ALCATELLUCENT",
+ .part = "G010SP",
+ .modes = sfp_quirk_2500basex,
+ }, {
+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but
+ // report 3.2GBd NRZ in their EEPROM
+ .vendor = "ALCATELLUCENT",
+ .part = "3FE46541AA",
+ .modes = sfp_quirk_2500basex,
+ }, {
+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd
+ // NRZ in their EEPROM
+ .vendor = "HUAWEI",
+ .part = "MA5671A",
+ .modes = sfp_quirk_2500basex,
+ },
+};
+
+static size_t sfp_strlen(const char *str, size_t maxlen)
+{
+ size_t size, i;
+
+ /* Trailing characters should be filled with space chars */
+ for (i = 0, size = 0; i < maxlen; i++)
+ if (str[i] != ' ')
+ size = i + 1;
+
+ return size;
+}
+
+static bool sfp_match(const char *qs, const char *str, size_t len)
+{
+ if (!qs)
+ return true;
+ if (strlen(qs) != len)
+ return false;
+ return !strncmp(qs, str, len);
+}
+
+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
+{
+ const struct sfp_quirk *q;
+ unsigned int i;
+ size_t vs, ps;
+
+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
+
+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
+ if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
+ sfp_match(q->part, id->base.vendor_pn, ps))
+ return q;
+
+ return NULL;
+}
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -233,6 +306,9 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(modes, 1000baseX_Full);
}
+ if (bus->sfp_quirk)
+ bus->sfp_quirk->modes(id, modes);
+
bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
phylink_set(support, Autoneg);
@@ -328,10 +404,19 @@ static void sfp_bus_release(struct kref *kref)
kfree(bus);
}
-static void sfp_bus_put(struct sfp_bus *bus)
+/**
+ * sfp_bus_put() - put a reference on the &struct sfp_bus
+ * @bus: the &struct sfp_bus found via sfp_bus_find_fwnode()
+ *
+ * Put a reference on the &struct sfp_bus and free the underlying structure
+ * if this was the last reference.
+ */
+void sfp_bus_put(struct sfp_bus *bus)
{
- kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex);
+ if (bus)
+ kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex);
}
+EXPORT_SYMBOL_GPL(sfp_bus_put);
static int sfp_register_bus(struct sfp_bus *bus)
{
@@ -347,11 +432,11 @@ static int sfp_register_bus(struct sfp_bus *bus)
return ret;
}
}
+ bus->registered = true;
bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
bus->upstream_ops->attach(bus->upstream, bus);
- bus->registered = true;
return 0;
}
@@ -445,64 +530,111 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
}
/**
- * sfp_register_upstream() - Register the neighbouring device
- * @fwnode: firmware node for the SFP bus
+ * sfp_bus_find_fwnode() - parse and locate the SFP bus from fwnode
+ * @fwnode: firmware node for the parent device (MAC or PHY)
+ *
+ * Parse the parent device's firmware node for a SFP bus, and locate
+ * the sfp_bus structure, incrementing its reference count. This must
+ * be put via sfp_bus_put() when done.
+ *
+ * Returns: on success, a pointer to the sfp_bus structure,
+ * %NULL if no SFP is specified,
+ * on failure, an error pointer value:
+ * corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * %-ENOMEM if we failed to allocate the bus.
+ * an error from the upstream's connect_phy() method.
+ */
+struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+{
+ struct fwnode_reference_args ref;
+ struct sfp_bus *bus;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+ 0, 0, &ref);
+ if (ret == -ENOENT)
+ return NULL;
+ else if (ret < 0)
+ return ERR_PTR(ret);
+
+ bus = sfp_bus_get(ref.fwnode);
+ fwnode_handle_put(ref.fwnode);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
+
+/**
+ * sfp_bus_add_upstream() - parse and register the neighbouring device
+ * @bus: the &struct sfp_bus found via sfp_bus_find_fwnode()
* @upstream: the upstream private data
* @ops: the upstream's &struct sfp_upstream_ops
*
- * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
- * should use phylink, which will call this function for them. Returns
- * a pointer to the allocated &struct sfp_bus.
+ * Add upstream driver for the SFP bus, and if the bus is complete, register
+ * the SFP bus using sfp_register_upstream(). This takes a reference on the
+ * bus, so it is safe to put the bus after this call.
*
- * On error, returns %NULL.
+ * Returns: on success, a pointer to the sfp_bus structure,
+ * %NULL if no SFP is specified,
+ * on failure, an error pointer value:
+ * corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * %-ENOMEM if we failed to allocate the bus.
+ * an error from the upstream's connect_phy() method.
*/
-struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops)
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(fwnode);
- int ret = 0;
+ int ret;
- if (bus) {
- rtnl_lock();
- bus->upstream_ops = ops;
- bus->upstream = upstream;
+ /* If no bus, return success */
+ if (!bus)
+ return 0;
- if (bus->sfp) {
- ret = sfp_register_bus(bus);
- if (ret)
- sfp_upstream_clear(bus);
- }
- rtnl_unlock();
+ rtnl_lock();
+ kref_get(&bus->kref);
+ bus->upstream_ops = ops;
+ bus->upstream = upstream;
+
+ if (bus->sfp) {
+ ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_upstream_clear(bus);
+ } else {
+ ret = 0;
}
+ rtnl_unlock();
- if (ret) {
+ if (ret)
sfp_bus_put(bus);
- bus = NULL;
- }
- return bus;
+ return ret;
}
-EXPORT_SYMBOL_GPL(sfp_register_upstream);
+EXPORT_SYMBOL_GPL(sfp_bus_add_upstream);
/**
- * sfp_unregister_upstream() - Unregister sfp bus
+ * sfp_bus_del_upstream() - Delete a sfp bus
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
*
- * Unregister a previously registered upstream connection for the SFP
- * module. @bus is returned from sfp_register_upstream().
+ * Delete a previously registered upstream connection for the SFP
+ * module. @bus should have been added by sfp_bus_add_upstream().
*/
-void sfp_unregister_upstream(struct sfp_bus *bus)
+void sfp_bus_del_upstream(struct sfp_bus *bus)
{
- rtnl_lock();
- if (bus->sfp)
- sfp_unregister_bus(bus);
- sfp_upstream_clear(bus);
- rtnl_unlock();
+ if (bus) {
+ rtnl_lock();
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
+ sfp_upstream_clear(bus);
+ rtnl_unlock();
- sfp_bus_put(bus);
+ sfp_bus_put(bus);
+ }
}
-EXPORT_SYMBOL_GPL(sfp_unregister_upstream);
+EXPORT_SYMBOL_GPL(sfp_bus_del_upstream);
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
@@ -553,6 +685,8 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
+ bus->sfp_quirk = sfp_lookup_quirk(id);
+
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
@@ -566,6 +700,8 @@ void sfp_module_remove(struct sfp_bus *bus)
if (ops && ops->module_remove)
ops->module_remove(bus->upstream);
+
+ bus->sfp_quirk = NULL;
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 272d5773573e..bdbbb76f8fd3 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -36,6 +36,8 @@ enum {
SFP_E_INSERT = 0,
SFP_E_REMOVE,
+ SFP_E_DEV_ATTACH,
+ SFP_E_DEV_DETACH,
SFP_E_DEV_DOWN,
SFP_E_DEV_UP,
SFP_E_TX_FAULT,
@@ -45,16 +47,21 @@ enum {
SFP_E_TIMEOUT,
SFP_MOD_EMPTY = 0,
+ SFP_MOD_ERROR,
SFP_MOD_PROBE,
+ SFP_MOD_WAITDEV,
SFP_MOD_HPOWER,
+ SFP_MOD_WAITPWR,
SFP_MOD_PRESENT,
- SFP_MOD_ERROR,
- SFP_DEV_DOWN = 0,
+ SFP_DEV_DETACHED = 0,
+ SFP_DEV_DOWN,
SFP_DEV_UP,
SFP_S_DOWN = 0,
+ SFP_S_WAIT,
SFP_S_INIT,
+ SFP_S_INIT_TX_FAULT,
SFP_S_WAIT_LOS,
SFP_S_LINK_UP,
SFP_S_TX_FAULT,
@@ -64,10 +71,12 @@ enum {
static const char * const mod_state_strings[] = {
[SFP_MOD_EMPTY] = "empty",
+ [SFP_MOD_ERROR] = "error",
[SFP_MOD_PROBE] = "probe",
+ [SFP_MOD_WAITDEV] = "waitdev",
[SFP_MOD_HPOWER] = "hpower",
+ [SFP_MOD_WAITPWR] = "waitpwr",
[SFP_MOD_PRESENT] = "present",
- [SFP_MOD_ERROR] = "error",
};
static const char *mod_state_to_str(unsigned short mod_state)
@@ -78,6 +87,7 @@ static const char *mod_state_to_str(unsigned short mod_state)
}
static const char * const dev_state_strings[] = {
+ [SFP_DEV_DETACHED] = "detached",
[SFP_DEV_DOWN] = "down",
[SFP_DEV_UP] = "up",
};
@@ -92,6 +102,8 @@ static const char *dev_state_to_str(unsigned short dev_state)
static const char * const event_strings[] = {
[SFP_E_INSERT] = "insert",
[SFP_E_REMOVE] = "remove",
+ [SFP_E_DEV_ATTACH] = "dev_attach",
+ [SFP_E_DEV_DETACH] = "dev_detach",
[SFP_E_DEV_DOWN] = "dev_down",
[SFP_E_DEV_UP] = "dev_up",
[SFP_E_TX_FAULT] = "tx_fault",
@@ -110,7 +122,9 @@ static const char *event_to_str(unsigned short event)
static const char * const sm_state_strings[] = {
[SFP_S_DOWN] = "down",
+ [SFP_S_WAIT] = "wait",
[SFP_S_INIT] = "init",
+ [SFP_S_INIT_TX_FAULT] = "init_tx_fault",
[SFP_S_WAIT_LOS] = "wait_los",
[SFP_S_LINK_UP] = "link_up",
[SFP_S_TX_FAULT] = "tx_fault",
@@ -141,6 +155,7 @@ static const enum gpiod_flags gpio_flags[] = {
GPIOD_ASIS,
};
+#define T_WAIT msecs_to_jiffies(50)
#define T_INIT_JIFFIES msecs_to_jiffies(300)
#define T_RESET_US 10
#define T_FAULT_RECOVER msecs_to_jiffies(1000)
@@ -149,22 +164,21 @@ static const enum gpiod_flags gpio_flags[] = {
* the same length on the PCB, which means it's possible for MOD DEF 0 to
* connect before the I2C bus on MOD DEF 1/2.
*
- * The SFP MSA specifies 300ms as t_init (the time taken for TX_FAULT to
- * be deasserted) but makes no mention of the earliest time before we can
- * access the I2C EEPROM. However, Avago modules require 300ms.
+ * The SFF-8472 specifies t_serial ("Time from power on until module is
+ * ready for data transmission over the two wire serial bus.") as 300ms.
*/
-#define T_PROBE_INIT msecs_to_jiffies(300)
-#define T_HPOWER_LEVEL msecs_to_jiffies(300)
-#define T_PROBE_RETRY msecs_to_jiffies(100)
+#define T_SERIAL msecs_to_jiffies(300)
+#define T_HPOWER_LEVEL msecs_to_jiffies(300)
+#define T_PROBE_RETRY_INIT msecs_to_jiffies(100)
+#define R_PROBE_RETRY_INIT 10
+#define T_PROBE_RETRY_SLOW msecs_to_jiffies(5000)
+#define R_PROBE_RETRY_SLOW 12
/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
*/
#define SFP_PHY_ADDR 22
-/* Give this long for the PHY to reset. */
-#define T_PHY_RESET_MS 50
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -187,20 +201,28 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];
int gpio_irq[GPIO_MAX];
- bool attached;
+ bool need_poll;
+
struct mutex st_mutex; /* Protects state */
+ unsigned int state_soft_mask;
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
struct mutex sm_mutex; /* Protects state machine */
unsigned char sm_mod_state;
+ unsigned char sm_mod_tries_init;
+ unsigned char sm_mod_tries;
unsigned char sm_dev_state;
unsigned short sm_state;
unsigned int sm_retries;
struct sfp_eeprom_id id;
+ unsigned int module_power_mW;
+
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
+ struct delayed_work hwmon_probe;
+ unsigned int hwmon_tries;
struct device *hwmon_dev;
char *hwmon_name;
#endif
@@ -376,24 +398,90 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
}
/* Interface */
-static unsigned int sfp_get_state(struct sfp *sfp)
+static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
- return sfp->get_state(sfp);
+ return sfp->read(sfp, a2, addr, buf, len);
}
-static void sfp_set_state(struct sfp *sfp, unsigned int state)
+static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
- sfp->set_state(sfp, state);
+ return sfp->write(sfp, a2, addr, buf, len);
}
-static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
+static unsigned int sfp_soft_get_state(struct sfp *sfp)
{
- return sfp->read(sfp, a2, addr, buf, len);
+ unsigned int state = 0;
+ u8 status;
+
+ if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
+ sizeof(status)) {
+ if (status & SFP_STATUS_RX_LOS)
+ state |= SFP_F_LOS;
+ if (status & SFP_STATUS_TX_FAULT)
+ state |= SFP_F_TX_FAULT;
+ }
+
+ return state & sfp->state_soft_mask;
}
-static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
+static void sfp_soft_set_state(struct sfp *sfp, unsigned int state)
{
- return sfp->write(sfp, a2, addr, buf, len);
+ u8 status;
+
+ if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
+ sizeof(status)) {
+ if (state & SFP_F_TX_DISABLE)
+ status |= SFP_STATUS_TX_DISABLE_FORCE;
+ else
+ status &= ~SFP_STATUS_TX_DISABLE_FORCE;
+
+ sfp_write(sfp, true, SFP_STATUS, &status, sizeof(status));
+ }
+}
+
+static void sfp_soft_start_poll(struct sfp *sfp)
+{
+ const struct sfp_eeprom_id *id = &sfp->id;
+
+ sfp->state_soft_mask = 0;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE &&
+ !sfp->gpio[GPIO_TX_DISABLE])
+ sfp->state_soft_mask |= SFP_F_TX_DISABLE;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT &&
+ !sfp->gpio[GPIO_TX_FAULT])
+ sfp->state_soft_mask |= SFP_F_TX_FAULT;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS &&
+ !sfp->gpio[GPIO_LOS])
+ sfp->state_soft_mask |= SFP_F_LOS;
+
+ if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
+ !sfp->need_poll)
+ mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+}
+
+static void sfp_soft_stop_poll(struct sfp *sfp)
+{
+ sfp->state_soft_mask = 0;
+}
+
+static unsigned int sfp_get_state(struct sfp *sfp)
+{
+ unsigned int state = sfp->get_state(sfp);
+
+ if (state & SFP_F_PRESENT &&
+ sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT))
+ state |= sfp_soft_get_state(sfp);
+
+ return state;
+}
+
+static void sfp_set_state(struct sfp *sfp, unsigned int state)
+{
+ sfp->set_state(sfp, state);
+
+ if (state & SFP_F_PRESENT &&
+ sfp->state_soft_mask & SFP_F_TX_DISABLE)
+ sfp_soft_set_state(sfp, state);
}
static unsigned int sfp_check(void *buf, size_t len)
@@ -1142,29 +1230,27 @@ static const struct hwmon_chip_info sfp_hwmon_chip_info = {
.info = sfp_hwmon_info,
};
-static int sfp_hwmon_insert(struct sfp *sfp)
+static void sfp_hwmon_probe(struct work_struct *work)
{
+ struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
int err, i;
- if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
- return 0;
-
- if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
- return 0;
-
- if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
- /* This driver in general does not support address
- * change.
- */
- return 0;
-
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
- if (err < 0)
- return err;
+ if (err < 0) {
+ if (sfp->hwmon_tries--) {
+ mod_delayed_work(system_wq, &sfp->hwmon_probe,
+ T_PROBE_RETRY_SLOW);
+ } else {
+ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ }
+ return;
+ }
sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
- if (!sfp->hwmon_name)
- return -ENODEV;
+ if (!sfp->hwmon_name) {
+ dev_err(sfp->dev, "out of memory for hwmon name\n");
+ return;
+ }
for (i = 0; sfp->hwmon_name[i]; i++)
if (hwmon_is_bad_char(sfp->hwmon_name[i]))
@@ -1174,18 +1260,52 @@ static int sfp_hwmon_insert(struct sfp *sfp)
sfp->hwmon_name, sfp,
&sfp_hwmon_chip_info,
NULL);
+ if (IS_ERR(sfp->hwmon_dev))
+ dev_err(sfp->dev, "failed to register hwmon device: %ld\n",
+ PTR_ERR(sfp->hwmon_dev));
+}
+
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
+ return 0;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
+ return 0;
- return PTR_ERR_OR_ZERO(sfp->hwmon_dev);
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+ /* This driver in general does not support address
+ * change.
+ */
+ return 0;
+
+ mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
+ sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
+
+ return 0;
}
static void sfp_hwmon_remove(struct sfp *sfp)
{
+ cancel_delayed_work_sync(&sfp->hwmon_probe);
if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
hwmon_device_unregister(sfp->hwmon_dev);
sfp->hwmon_dev = NULL;
kfree(sfp->hwmon_name);
}
}
+
+static int sfp_hwmon_init(struct sfp *sfp)
+{
+ INIT_DELAYED_WORK(&sfp->hwmon_probe, sfp_hwmon_probe);
+
+ return 0;
+}
+
+static void sfp_hwmon_exit(struct sfp *sfp)
+{
+ cancel_delayed_work_sync(&sfp->hwmon_probe);
+}
#else
static int sfp_hwmon_insert(struct sfp *sfp)
{
@@ -1195,6 +1315,15 @@ static int sfp_hwmon_insert(struct sfp *sfp)
static void sfp_hwmon_remove(struct sfp *sfp)
{
}
+
+static int sfp_hwmon_init(struct sfp *sfp)
+{
+ return 0;
+}
+
+static void sfp_hwmon_exit(struct sfp *sfp)
+{
+}
#endif
/* Helpers */
@@ -1245,7 +1374,7 @@ static void sfp_sm_next(struct sfp *sfp, unsigned int state,
sfp_sm_set_timer(sfp, timeout);
}
-static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state,
+static void sfp_sm_mod_next(struct sfp *sfp, unsigned int state,
unsigned int timeout)
{
sfp->sm_mod_state = state;
@@ -1266,8 +1395,6 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
struct phy_device *phy;
int err;
- msleep(T_PHY_RESET_MS);
-
phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
if (phy == ERR_PTR(-ENODEV)) {
dev_info(sfp->dev, "no PHY detected\n");
@@ -1335,7 +1462,7 @@ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
event == SFP_E_LOS_LOW);
}
-static void sfp_sm_fault(struct sfp *sfp, bool warn)
+static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
{
if (sfp->sm_retries && !--sfp->sm_retries) {
dev_err(sfp->dev,
@@ -1345,21 +1472,12 @@ static void sfp_sm_fault(struct sfp *sfp, bool warn)
if (warn)
dev_err(sfp->dev, "module transmit fault indicated\n");
- sfp_sm_next(sfp, SFP_S_TX_FAULT, T_FAULT_RECOVER);
+ sfp_sm_next(sfp, next_state, T_FAULT_RECOVER);
}
}
-static void sfp_sm_mod_init(struct sfp *sfp)
+static void sfp_sm_probe_for_phy(struct sfp *sfp)
{
- sfp_module_tx_enable(sfp);
-
- /* Wait t_init before indicating that the link is up, provided the
- * current state indicates no TX_FAULT. If TX_FAULT clears before
- * this time, that's fine too.
- */
- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
- sfp->sm_retries = 5;
-
/* Setting the serdes link mode is guesswork: there's no
* field in the EEPROM which indicates what mode should
* be used.
@@ -1375,69 +1493,83 @@ static void sfp_sm_mod_init(struct sfp *sfp)
sfp_sm_probe_phy(sfp);
}
-static int sfp_sm_mod_hpower(struct sfp *sfp)
+static int sfp_module_parse_power(struct sfp *sfp)
{
- u32 power;
- u8 val;
- int err;
+ u32 power_mW = 1000;
- power = 1000;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
- power = 1500;
+ power_mW = 1500;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
- power = 2000;
-
- if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE &&
- (sfp->id.ext.diagmon & (SFP_DIAGMON_DDM | SFP_DIAGMON_ADDRMODE)) !=
- SFP_DIAGMON_DDM) {
- /* The module appears not to implement bus address 0xa2,
- * or requires an address change sequence, so assume that
- * the module powers up in the indicated power mode.
- */
- if (power > sfp->max_power_mW) {
+ power_mW = 2000;
+
+ if (power_mW > sfp->max_power_mW) {
+ /* Module power specification exceeds the allowed maximum. */
+ if (sfp->id.ext.sff8472_compliance ==
+ SFP_SFF8472_COMPLIANCE_NONE &&
+ !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) {
+ /* The module appears not to implement bus address
+ * 0xa2, so assume that the module powers up in the
+ * indicated mode.
+ */
dev_err(sfp->dev,
"Host does not support %u.%uW modules\n",
- power / 1000, (power / 100) % 10);
+ power_mW / 1000, (power_mW / 100) % 10);
return -EINVAL;
+ } else {
+ dev_warn(sfp->dev,
+ "Host does not support %u.%uW modules, module left in power mode 1\n",
+ power_mW / 1000, (power_mW / 100) % 10);
+ return 0;
}
- return 0;
}
- if (power > sfp->max_power_mW) {
+ /* If the module requires a higher power mode, but also requires
+ * an address change sequence, warn the user that the module may
+ * not be functional.
+ */
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) {
dev_warn(sfp->dev,
- "Host does not support %u.%uW modules, module left in power mode 1\n",
- power / 1000, (power / 100) % 10);
+ "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n",
+ power_mW / 1000, (power_mW / 100) % 10);
return 0;
}
- if (power <= 1000)
- return 0;
+ sfp->module_power_mW = power_mW;
+
+ return 0;
+}
+
+static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
+{
+ u8 val;
+ int err;
err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
- err = -EAGAIN;
- goto err;
+ return -EAGAIN;
}
- val |= BIT(0);
+ if (enable)
+ val |= BIT(0);
+ else
+ val &= ~BIT(0);
err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
- err = -EAGAIN;
- goto err;
+ return -EAGAIN;
}
- dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
- power / 1000, (power / 100) % 10);
- return T_HPOWER_LEVEL;
+ if (enable)
+ dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
+ sfp->module_power_mW / 1000,
+ (sfp->module_power_mW / 100) % 10);
-err:
- return err;
+ return 0;
}
-static int sfp_sm_mod_probe(struct sfp *sfp)
+static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
@@ -1447,7 +1579,8 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
ret = sfp_read(sfp, false, 0, &id, sizeof(id));
if (ret < 0) {
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ if (report)
+ dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
return -EAGAIN;
}
@@ -1505,7 +1638,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
(int)sizeof(id.ext.datecode), id.ext.datecode);
/* Check whether we support this module */
- if (!sfp->type->module_supported(&sfp->id)) {
+ if (!sfp->type->module_supported(&id)) {
dev_err(sfp->dev,
"module is not supported - phys id 0x%02x 0x%02x\n",
sfp->id.base.phys_id, sfp->id.base.phys_ext_id);
@@ -1517,106 +1650,174 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
- ret = sfp_hwmon_insert(sfp);
- if (ret < 0)
- return ret;
-
- ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ /* Parse the module power requirement */
+ ret = sfp_module_parse_power(sfp);
if (ret < 0)
return ret;
- return sfp_sm_mod_hpower(sfp);
+ return 0;
}
static void sfp_sm_mod_remove(struct sfp *sfp)
{
- sfp_module_remove(sfp->sfp_bus);
+ if (sfp->sm_mod_state > SFP_MOD_WAITDEV)
+ sfp_module_remove(sfp->sfp_bus);
sfp_hwmon_remove(sfp);
- if (sfp->mod_phy)
- sfp_sm_phy_detach(sfp);
-
- sfp_module_tx_disable(sfp);
-
memset(&sfp->id, 0, sizeof(sfp->id));
+ sfp->module_power_mW = 0;
dev_info(sfp->dev, "module removed\n");
}
-static void sfp_sm_event(struct sfp *sfp, unsigned int event)
+/* This state machine tracks the upstream's state */
+static void sfp_sm_device(struct sfp *sfp, unsigned int event)
{
- mutex_lock(&sfp->sm_mutex);
+ switch (sfp->sm_dev_state) {
+ default:
+ if (event == SFP_E_DEV_ATTACH)
+ sfp->sm_dev_state = SFP_DEV_DOWN;
+ break;
- dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
- mod_state_to_str(sfp->sm_mod_state),
- dev_state_to_str(sfp->sm_dev_state),
- sm_state_to_str(sfp->sm_state),
- event_to_str(event));
+ case SFP_DEV_DOWN:
+ if (event == SFP_E_DEV_DETACH)
+ sfp->sm_dev_state = SFP_DEV_DETACHED;
+ else if (event == SFP_E_DEV_UP)
+ sfp->sm_dev_state = SFP_DEV_UP;
+ break;
+
+ case SFP_DEV_UP:
+ if (event == SFP_E_DEV_DETACH)
+ sfp->sm_dev_state = SFP_DEV_DETACHED;
+ else if (event == SFP_E_DEV_DOWN)
+ sfp->sm_dev_state = SFP_DEV_DOWN;
+ break;
+ }
+}
+
+/* This state machine tracks the insert/remove state of the module, probes
+ * the on-board EEPROM, and sets up the power level.
+ */
+static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+{
+ int err;
+
+ /* Handle remove event globally, it resets this state machine */
+ if (event == SFP_E_REMOVE) {
+ if (sfp->sm_mod_state > SFP_MOD_PROBE)
+ sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
+ return;
+ }
+
+ /* Handle device detach globally */
+ if (sfp->sm_dev_state < SFP_DEV_DOWN &&
+ sfp->sm_mod_state > SFP_MOD_WAITDEV) {
+ if (sfp->module_power_mW > 1000 &&
+ sfp->sm_mod_state > SFP_MOD_HPOWER)
+ sfp_sm_mod_hpower(sfp, false);
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ return;
+ }
- /* This state machine tracks the insert/remove state of
- * the module, and handles probing the on-board EEPROM.
- */
switch (sfp->sm_mod_state) {
default:
- if (event == SFP_E_INSERT && sfp->attached) {
- sfp_module_tx_disable(sfp);
- sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
+ if (event == SFP_E_INSERT) {
+ sfp_sm_mod_next(sfp, SFP_MOD_PROBE, T_SERIAL);
+ sfp->sm_mod_tries_init = R_PROBE_RETRY_INIT;
+ sfp->sm_mod_tries = R_PROBE_RETRY_SLOW;
}
break;
case SFP_MOD_PROBE:
- if (event == SFP_E_REMOVE) {
- sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
- } else if (event == SFP_E_TIMEOUT) {
- int val = sfp_sm_mod_probe(sfp);
-
- if (val == 0)
- sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
- else if (val > 0)
- sfp_sm_ins_next(sfp, SFP_MOD_HPOWER, val);
- else if (val != -EAGAIN)
- sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0);
- else
- sfp_sm_set_timer(sfp, T_PROBE_RETRY);
+ /* Wait for T_PROBE_INIT to time out */
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ err = sfp_sm_mod_probe(sfp, sfp->sm_mod_tries == 1);
+ if (err == -EAGAIN) {
+ if (sfp->sm_mod_tries_init &&
+ --sfp->sm_mod_tries_init) {
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_INIT);
+ break;
+ } else if (sfp->sm_mod_tries && --sfp->sm_mod_tries) {
+ if (sfp->sm_mod_tries == R_PROBE_RETRY_SLOW - 1)
+ dev_warn(sfp->dev,
+ "please wait, module slow to respond\n");
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_SLOW);
+ break;
+ }
+ }
+ if (err < 0) {
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
+ break;
}
- break;
- case SFP_MOD_HPOWER:
- if (event == SFP_E_TIMEOUT) {
- sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
+ /* fall through */
+ case SFP_MOD_WAITDEV:
+ /* Ensure that the device is attached before proceeding */
+ if (sfp->sm_dev_state < SFP_DEV_DOWN)
+ break;
+
+ /* Report the module insertion to the upstream device */
+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ if (err < 0) {
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
break;
}
- /* fallthrough */
- case SFP_MOD_PRESENT:
- case SFP_MOD_ERROR:
- if (event == SFP_E_REMOVE) {
- sfp_sm_mod_remove(sfp);
- sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
+
+ /* If this is a power level 1 module, we are done */
+ if (sfp->module_power_mW <= 1000)
+ goto insert;
+
+ sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0);
+ /* fall through */
+ case SFP_MOD_HPOWER:
+ /* Enable high power mode */
+ err = sfp_sm_mod_hpower(sfp, true);
+ if (err < 0) {
+ if (err != -EAGAIN) {
+ sfp_module_remove(sfp->sfp_bus);
+ sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
+ } else {
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY_INIT);
+ }
+ break;
}
+
+ sfp_sm_mod_next(sfp, SFP_MOD_WAITPWR, T_HPOWER_LEVEL);
break;
- }
- /* This state machine tracks the netdev up/down state */
- switch (sfp->sm_dev_state) {
- default:
- if (event == SFP_E_DEV_UP)
- sfp->sm_dev_state = SFP_DEV_UP;
+ case SFP_MOD_WAITPWR:
+ /* Wait for T_HPOWER_LEVEL to time out */
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ insert:
+ sfp_sm_mod_next(sfp, SFP_MOD_PRESENT, 0);
break;
- case SFP_DEV_UP:
- if (event == SFP_E_DEV_DOWN) {
- /* If the module has a PHY, avoid raising TX disable
- * as this resets the PHY. Otherwise, raise it to
- * turn the laser off.
- */
- if (!sfp->mod_phy)
- sfp_module_tx_disable(sfp);
- sfp->sm_dev_state = SFP_DEV_DOWN;
- }
+ case SFP_MOD_PRESENT:
+ case SFP_MOD_ERROR:
break;
}
+#if IS_ENABLED(CONFIG_HWMON)
+ if (sfp->sm_mod_state >= SFP_MOD_WAITDEV &&
+ IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+ err = sfp_hwmon_insert(sfp);
+ if (err)
+ dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ }
+#endif
+}
+
+static void sfp_sm_main(struct sfp *sfp, unsigned int event)
+{
+ unsigned long timeout;
+
/* Some events are global */
if (sfp->sm_state != SFP_S_DOWN &&
(sfp->sm_mod_state != SFP_MOD_PRESENT ||
@@ -1626,29 +1827,87 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
sfp_sm_link_down(sfp);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
+ sfp_module_tx_disable(sfp);
+ sfp_soft_stop_poll(sfp);
sfp_sm_next(sfp, SFP_S_DOWN, 0);
- mutex_unlock(&sfp->sm_mutex);
return;
}
/* The main state machine */
switch (sfp->sm_state) {
case SFP_S_DOWN:
- if (sfp->sm_mod_state == SFP_MOD_PRESENT &&
- sfp->sm_dev_state == SFP_DEV_UP)
- sfp_sm_mod_init(sfp);
+ if (sfp->sm_mod_state != SFP_MOD_PRESENT ||
+ sfp->sm_dev_state != SFP_DEV_UP)
+ break;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE))
+ sfp_soft_start_poll(sfp);
+
+ sfp_module_tx_enable(sfp);
+
+ /* Initialise the fault clearance retries */
+ sfp->sm_retries = 5;
+
+ /* We need to check the TX_FAULT state, which is not defined
+ * while TX_DISABLE is asserted. The earliest we want to do
+ * anything (such as probe for a PHY) is 50ms.
+ */
+ sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT);
+ break;
+
+ case SFP_S_WAIT:
+ if (event != SFP_E_TIMEOUT)
+ break;
+
+ if (sfp->state & SFP_F_TX_FAULT) {
+ /* Wait t_init before indicating that the link is up,
+ * provided the current state indicates no TX_FAULT. If
+ * TX_FAULT clears before this time, that's fine too.
+ */
+ timeout = T_INIT_JIFFIES;
+ if (timeout > T_WAIT)
+ timeout -= T_WAIT;
+ else
+ timeout = 1;
+
+ sfp_sm_next(sfp, SFP_S_INIT, timeout);
+ } else {
+ /* TX_FAULT is not asserted, assume the module has
+ * finished initialising.
+ */
+ goto init_done;
+ }
break;
case SFP_S_INIT:
- if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT)
- sfp_sm_fault(sfp, true);
- else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR)
+ if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
+ /* TX_FAULT is still asserted after t_init, so assume
+ * there is a fault.
+ */
+ sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
+ sfp->sm_retries == 5);
+ } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
+ init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ sfp_sm_probe_for_phy(sfp);
sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+ sfp->sm_retries = 5;
+ }
+ break;
+
+ case SFP_S_INIT_TX_FAULT:
+ if (event == SFP_E_TIMEOUT) {
+ sfp_module_tx_fault_reset(sfp);
+ sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
+ }
break;
case SFP_S_WAIT_LOS:
if (event == SFP_E_TX_FAULT)
- sfp_sm_fault(sfp, true);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, true);
else if (sfp_los_event_inactive(sfp, event))
sfp_sm_link_up(sfp);
break;
@@ -1656,7 +1915,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_LINK_UP:
if (event == SFP_E_TX_FAULT) {
sfp_sm_link_down(sfp);
- sfp_sm_fault(sfp, true);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, true);
} else if (sfp_los_event_active(sfp, event)) {
sfp_sm_link_down(sfp);
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -1672,7 +1931,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_REINIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- sfp_sm_fault(sfp, false);
+ sfp_sm_fault(sfp, SFP_S_TX_FAULT, false);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
dev_info(sfp->dev, "module transmit fault recovered\n");
sfp_sm_link_check_los(sfp);
@@ -1682,6 +1941,21 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_TX_DISABLE:
break;
}
+}
+
+static void sfp_sm_event(struct sfp *sfp, unsigned int event)
+{
+ mutex_lock(&sfp->sm_mutex);
+
+ dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state),
+ event_to_str(event));
+
+ sfp_sm_device(sfp, event);
+ sfp_sm_module(sfp, event);
+ sfp_sm_main(sfp, event);
dev_dbg(sfp->dev, "SM: exit %s:%s:%s\n",
mod_state_to_str(sfp->sm_mod_state),
@@ -1693,15 +1967,12 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
static void sfp_attach(struct sfp *sfp)
{
- sfp->attached = true;
- if (sfp->state & SFP_F_PRESENT)
- sfp_sm_event(sfp, SFP_E_INSERT);
+ sfp_sm_event(sfp, SFP_E_DEV_ATTACH);
}
static void sfp_detach(struct sfp *sfp)
{
- sfp->attached = false;
- sfp_sm_event(sfp, SFP_E_REMOVE);
+ sfp_sm_event(sfp, SFP_E_DEV_DETACH);
}
static void sfp_start(struct sfp *sfp)
@@ -1828,7 +2099,10 @@ static void sfp_poll(struct work_struct *work)
struct sfp *sfp = container_of(work, struct sfp, poll.work);
sfp_check_state(sfp);
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+
+ if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) ||
+ sfp->need_poll)
+ mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
}
static struct sfp *sfp_alloc(struct device *dev)
@@ -1846,6 +2120,8 @@ static struct sfp *sfp_alloc(struct device *dev)
INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
+ sfp_hwmon_init(sfp);
+
return sfp;
}
@@ -1853,6 +2129,8 @@ static void sfp_cleanup(void *data)
{
struct sfp *sfp = data;
+ sfp_hwmon_exit(sfp);
+
cancel_delayed_work_sync(&sfp->poll);
cancel_delayed_work_sync(&sfp->timeout);
if (sfp->i2c_mii) {
@@ -1869,7 +2147,6 @@ static int sfp_probe(struct platform_device *pdev)
const struct sff_data *sff;
struct i2c_adapter *i2c;
struct sfp *sfp;
- bool poll = false;
int err, i;
sfp = sfp_alloc(&pdev->dev);
@@ -1964,6 +2241,11 @@ static int sfp_probe(struct platform_device *pdev)
sfp->state |= SFP_F_RATE_SELECT;
sfp_set_state(sfp, sfp->state);
sfp_module_tx_disable(sfp);
+ if (sfp->state & SFP_F_PRESENT) {
+ rtnl_lock();
+ sfp_sm_event(sfp, SFP_E_INSERT);
+ rtnl_unlock();
+ }
for (i = 0; i < GPIO_MAX; i++) {
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1971,7 +2253,7 @@ static int sfp_probe(struct platform_device *pdev)
sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
if (!sfp->gpio_irq[i]) {
- poll = true;
+ sfp->need_poll = true;
continue;
}
@@ -1983,11 +2265,11 @@ static int sfp_probe(struct platform_device *pdev)
dev_name(sfp->dev), sfp);
if (err) {
sfp->gpio_irq[i] = 0;
- poll = true;
+ sfp->need_poll = true;
}
}
- if (poll)
+ if (sfp->need_poll)
mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
/* We could have an issue in cases no Tx disable pin is available or
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cac64b96d545..2a91c192659f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -855,6 +855,8 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ sl_free_netdev(sl->dev);
+ free_netdev(sl->dev);
err_exit:
rtnl_unlock();
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8156b33ee3e7..ca70a1d840eb 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2074,7 +2074,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
if (team_port_txable(port)) {
if (port->state.speed != SPEED_UNKNOWN)
speed += port->state.speed;
@@ -2083,6 +2084,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.duplex = port->state.duplex;
}
}
+ rcu_read_unlock();
+
cmd->base.speed = speed ? : SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a8d3141582a5..683d371e6e82 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -136,10 +136,10 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
struct tun_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -313,8 +313,8 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
tfile->napi_enabled = napi_en;
tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
- netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
+ NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
}
}
@@ -1167,10 +1167,10 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
p = per_cpu_ptr(tun->pcpu_stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
+ rxpackets = u64_stats_read(&p->rx_packets);
+ rxbytes = u64_stats_read(&p->rx_bytes);
+ txpackets = u64_stats_read(&p->tx_packets);
+ txbytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
@@ -1998,8 +1998,8 @@ drop:
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
@@ -2052,8 +2052,8 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += ret;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, ret);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
@@ -2147,8 +2147,8 @@ done:
/* caller is in process context, */
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len + vlan_hlen;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
@@ -2290,7 +2290,13 @@ static void tun_free_netdev(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
+
free_percpu(tun->pcpu_stats);
+ /* We clear pcpu_stats so that tun_set_iff() can tell if
+ * tun_free_netdev() has been called from register_netdevice().
+ */
+ tun->pcpu_stats = NULL;
+
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2505,8 +2511,8 @@ build:
*/
stats = this_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += datasize;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, datasize);
u64_stats_update_end(&stats->syncp);
if (rxhash)
@@ -2782,9 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
- err = dev_get_valid_name(net, dev, name);
- if (err < 0)
- goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
@@ -2859,8 +2862,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err_detach:
tun_detach_all(dev);
- /* register_netdevice() already called tun_free_netdev() */
- goto err_free_dev;
+ /* We are here because register_netdevice() has failed.
+ * If register_netdevice() already called tun_free_netdev()
+ * while dealing with the error, tun->pcpu_stats has been cleared.
+ */
+ if (!tun->pcpu_stats)
+ goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 011bd4cb546e..af3994e0853b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
- if (ret < 0) {
+ if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index daa54486ab09..93044cf1417a 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -827,6 +827,7 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
.get_link_ksettings = ax88179_get_link_ksettings,
.set_link_ksettings = ax88179_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void ax88179_set_multicast(struct net_device *net)
@@ -1214,6 +1215,32 @@ static int ax88179_led_setting(struct usbnet *dev)
return 0;
}
+static void ax88179_get_mac_addr(struct usbnet *dev)
+{
+ u8 mac[ETH_ALEN];
+
+ /* Maybe the boot loader passed the MAC address via device tree */
+ if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from device tree");
+ } else {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, mac);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from ASIX chip");
+ }
+
+ if (is_valid_ether_addr(mac)) {
+ memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ } else {
+ netdev_info(dev->net, "invalid MAC address, using random\n");
+ eth_hw_addr_random(dev->net);
+ }
+
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
+ dev->net->dev_addr);
+}
+
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
u8 buf[5];
@@ -1240,8 +1267,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
msleep(100);
- ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
- ETH_ALEN, dev->net->dev_addr);
+ /* Read MAC address from DTB or asix chip */
+ ax88179_get_mac_addr(dev);
memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
/* RX bulk configuration */
@@ -1541,8 +1568,8 @@ static int ax88179_reset(struct usbnet *dev)
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev, 0);
- ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
- dev->net->dev_addr);
+ /* Read MAC address from DTB or asix chip */
+ ax88179_get_mac_addr(dev);
/* RX bulk configuration */
memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe630438f67b..0cdb2ce47645 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -766,6 +766,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a245597a3902..c2c82e6391b4 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -579,7 +579,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
- if (err < sizeof(max_datagram_size)) {
+ if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f24a1b0b801f..cf1f3f0a4b9b 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3995,9 +3995,6 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
u32 buf;
int ret;
- int event;
-
- event = message.event;
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 56d334b9ad45..4196c0e32740 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1371,6 +1371,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
+ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d4a95b50bda6..c5ebf35d2488 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,9 +24,11 @@
#include <linux/suspend.h>
#include <linux/atomic.h>
#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <crypto/hash.h>
/* Information for net-next */
-#define NETNEXT_VERSION "10"
+#define NETNEXT_VERSION "11"
/* Information for net */
#define NET_VERSION "10"
@@ -54,8 +56,11 @@
#define PLA_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
+#define PLA_UPHY_TIMER 0xd388
#define PLA_SUSPEND_FLAG 0xd38a
#define PLA_INDICATE_FALG 0xd38c
+#define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */
+#define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */
#define PLA_EXTRA_STATUS 0xd398
#define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02
@@ -110,7 +115,12 @@
#define USB_CONNECT_TIMER 0xcbf8
#define USB_MSC_TIMER 0xcbfc
#define USB_BURST_SIZE 0xcfc0
+#define USB_FW_FIX_EN0 0xcfca
+#define USB_FW_FIX_EN1 0xcfcc
#define USB_LPM_CONFIG 0xcfd8
+#define USB_CSTMR 0xcfef /* RTL8153A */
+#define USB_FW_CTRL 0xd334 /* RTL8153B */
+#define USB_FC_TIMER 0xd340
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
@@ -126,6 +136,7 @@
#define USB_LPM_CTRL 0xd41a
#define USB_BMU_RESET 0xd4b0
#define USB_U1U2_TIMER 0xd4da
+#define USB_FW_TASK 0xd4e8 /* RTL8153B */
#define USB_UPS_CTRL 0xd800
#define USB_POWER_CUT 0xd80a
#define USB_MISC_0 0xd81a
@@ -133,18 +144,19 @@
#define USB_AFE_CTRL2 0xd824
#define USB_UPS_CFG 0xd842
#define USB_UPS_FLAGS 0xd848
+#define USB_WDT1_CTRL 0xe404
#define USB_WDT11_CTRL 0xe43c
-#define USB_BP_BA 0xfc26
-#define USB_BP_0 0xfc28
-#define USB_BP_1 0xfc2a
-#define USB_BP_2 0xfc2c
-#define USB_BP_3 0xfc2e
-#define USB_BP_4 0xfc30
-#define USB_BP_5 0xfc32
-#define USB_BP_6 0xfc34
-#define USB_BP_7 0xfc36
-#define USB_BP_EN 0xfc38
-#define USB_BP_8 0xfc38
+#define USB_BP_BA PLA_BP_BA
+#define USB_BP_0 PLA_BP_0
+#define USB_BP_1 PLA_BP_1
+#define USB_BP_2 PLA_BP_2
+#define USB_BP_3 PLA_BP_3
+#define USB_BP_4 PLA_BP_4
+#define USB_BP_5 PLA_BP_5
+#define USB_BP_6 PLA_BP_6
+#define USB_BP_7 PLA_BP_7
+#define USB_BP_EN PLA_BP_EN /* RTL8153A */
+#define USB_BP_8 0xfc38 /* RTL8153B */
#define USB_BP_9 0xfc3a
#define USB_BP_10 0xfc3c
#define USB_BP_11 0xfc3e
@@ -175,6 +187,7 @@
#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_PHY_PATCH_STAT 0xb800
#define OCP_PHY_PATCH_CMD 0xb820
+#define OCP_PHY_LOCK 0xb82e
#define OCP_ADC_IOFFSET 0xbcfc
#define OCP_ADC_CFG 0xbc06
#define OCP_SYSCLK_CFG 0xc416
@@ -185,6 +198,7 @@
#define SRAM_10M_AMP1 0x8080
#define SRAM_10M_AMP2 0x8082
#define SRAM_IMPEDANCE 0x8084
+#define SRAM_PHY_LOCK 0xb82e
/* PLA_RCR */
#define RCR_AAP 0x00000001
@@ -346,7 +360,12 @@
/* PLA_INDICATE_FALG */
#define UPCOMING_RUNTIME_D3 BIT(0)
+/* PLA_MACDBG_PRE and PLA_MACDBG_POST */
+#define DEBUG_OE BIT(0)
+#define DEBUG_LTSSM 0x0082
+
/* PLA_EXTRA_STATUS */
+#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
/* USB_USB2PHY */
@@ -368,6 +387,12 @@
#define STAT_SPEED_HIGH 0x0000
#define STAT_SPEED_FULL 0x0002
+/* USB_FW_FIX_EN0 */
+#define FW_FIX_SUSPEND BIT(14)
+
+/* USB_FW_FIX_EN1 */
+#define FW_IP_RESET_EN BIT(9)
+
/* USB_LPM_CONFIG */
#define LPM_U1U2_EN BIT(0)
@@ -392,12 +417,24 @@
#define OWN_UPDATE BIT(0)
#define OWN_CLEAR BIT(1)
+/* USB_FW_TASK */
+#define FC_PATCH_TASK BIT(1)
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
#define RESUME_INDICATE 0x0001
+/* USB_CSTMR */
+#define FORCE_SUPER BIT(0)
+
+/* USB_FW_CTRL */
+#define FLOW_CTRL_PATCH_OPT BIT(1)
+
+/* USB_FC_TIMER */
+#define CTRL_TIMER_EN BIT(15)
+
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
#define RX_ZERO_EN 0x0080
@@ -419,6 +456,9 @@
#define COALESCE_HIGH 250000U
#define COALESCE_SLOW 524280U
+/* USB_WDT1_CTRL */
+#define WTD1_EN BIT(0)
+
/* USB_WDT11_CTRL */
#define TIMER11_EN 0x0001
@@ -539,6 +579,9 @@ enum spd_duplex {
/* OCP_PHY_PATCH_CMD */
#define PATCH_REQUEST BIT(4)
+/* OCP_PHY_LOCK */
+#define PATCH_LOCK BIT(0)
+
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
@@ -563,6 +606,9 @@ enum spd_duplex {
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* SRAM_PHY_LOCK */
+#define PHY_PATCH_LOCK 0x0001
+
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
#define BND_MASK 0x0004
@@ -570,6 +616,8 @@ enum spd_duplex {
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
+#define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
@@ -622,6 +670,7 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
+ LENOVO_MACPASSTHRU,
};
/* Define these values to match your device */
@@ -736,16 +785,16 @@ struct r8152 {
struct tasklet_struct tx_tl;
struct rtl_ops {
- void (*init)(struct r8152 *);
- int (*enable)(struct r8152 *);
- void (*disable)(struct r8152 *);
- void (*up)(struct r8152 *);
- void (*down)(struct r8152 *);
- void (*unload)(struct r8152 *);
- int (*eee_get)(struct r8152 *, struct ethtool_eee *);
- int (*eee_set)(struct r8152 *, struct ethtool_eee *);
- bool (*in_nway)(struct r8152 *);
- void (*hw_phy_cfg)(struct r8152 *);
+ void (*init)(struct r8152 *tp);
+ int (*enable)(struct r8152 *tp);
+ void (*disable)(struct r8152 *tp);
+ void (*up)(struct r8152 *tp);
+ void (*down)(struct r8152 *tp);
+ void (*unload)(struct r8152 *tp);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ bool (*in_nway)(struct r8152 *tp);
+ void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
@@ -766,6 +815,19 @@ struct r8152 {
u32 ctap_short_off:1;
} ups_info;
+#define RTL_VER_SIZE 32
+
+ struct rtl_fw {
+ const char *fw_name;
+ const struct firmware *fw;
+
+ char version[RTL_VER_SIZE];
+ int (*pre_fw)(struct r8152 *tp);
+ int (*post_fw)(struct r8152 *tp);
+
+ bool retry;
+ } rtl_fw;
+
atomic_t rx_count;
bool eee_en;
@@ -788,6 +850,131 @@ struct r8152 {
u8 autoneg;
};
+/**
+ * struct fw_block - block type and total length
+ * @type: type of the current block, such as RTL_FW_END, RTL_FW_PLA,
+ * RTL_FW_USB and so on.
+ * @length: total length of the current block.
+ */
+struct fw_block {
+ __le32 type;
+ __le32 length;
+} __packed;
+
+/**
+ * struct fw_header - header of the firmware file
+ * @checksum: checksum of sha256 which is calculated from the whole file
+ * except the checksum field of the file. That is, calculate sha256
+ * from the version field to the end of the file.
+ * @version: version of this firmware.
+ * @blocks: the first firmware block of the file
+ */
+struct fw_header {
+ u8 checksum[32];
+ char version[RTL_VER_SIZE];
+ struct fw_block blocks[0];
+} __packed;
+
+/**
+ * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
+ * The layout of the firmware block is:
+ * <struct fw_mac> + <info> + <firmware data>.
+ * @fw_offset: offset of the firmware binary data. The start address of
+ * the data would be the address of struct fw_mac + @fw_offset.
+ * @fw_reg: the register to load the firmware. Depends on chip.
+ * @bp_ba_addr: the register to write break point base address. Depends on
+ * chip.
+ * @bp_ba_value: break point base address. Depends on chip.
+ * @bp_en_addr: the register to write break point enabled mask. Depends
+ * on chip.
+ * @bp_en_value: break point enabled mask. Depends on the firmware.
+ * @bp_start: the start register of break points. Depends on chip.
+ * @bp_num: the break point number which needs to be set for this firmware.
+ * Depends on the firmware.
+ * @bp: break points. Depends on firmware.
+ * @fw_ver_reg: the register to store the fw version.
+ * @fw_ver_data: the firmware version of the current type.
+ * @info: additional information for debugging, and is followed by the
+ * binary data of firmware.
+ */
+struct fw_mac {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ __le16 bp_ba_addr;
+ __le16 bp_ba_value;
+ __le16 bp_en_addr;
+ __le16 bp_en_value;
+ __le16 bp_start;
+ __le16 bp_num;
+ __le16 bp[16]; /* any value determined by firmware */
+ __le32 reserved;
+ __le16 fw_ver_reg;
+ u8 fw_ver_data;
+ char info[0];
+} __packed;
+
+/**
+ * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START.
+ * This is used to set patch key when loading the firmware of PHY.
+ * @key_reg: the register to write the patch key.
+ * @key_data: patch key.
+ */
+struct fw_phy_patch_key {
+ struct fw_block blk_hdr;
+ __le16 key_reg;
+ __le16 key_data;
+ __le32 reserved;
+} __packed;
+
+/**
+ * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC.
+ * The layout of the firmware block is:
+ * <struct fw_phy_nc> + <info> + <firmware data>.
+ * @fw_offset: offset of the firmware binary data. The start address of
+ * the data would be the address of struct fw_phy_nc + @fw_offset.
+ * @fw_reg: the register to load the firmware. Depends on chip.
+ * @ba_reg: the register to write the base address. Depends on chip.
+ * @ba_data: base address. Depends on chip.
+ * @patch_en_addr: the register of enabling patch mode. Depends on chip.
+ * @patch_en_value: patch mode enabled mask. Depends on the firmware.
+ * @mode_reg: the regitster of switching the mode.
+ * @mod_pre: the mode needing to be set before loading the firmware.
+ * @mod_post: the mode to be set when finishing to load the firmware.
+ * @bp_start: the start register of break points. Depends on chip.
+ * @bp_num: the break point number which needs to be set for this firmware.
+ * Depends on the firmware.
+ * @bp: break points. Depends on firmware.
+ * @info: additional information for debugging, and is followed by the
+ * binary data of firmware.
+ */
+struct fw_phy_nc {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ __le16 ba_reg;
+ __le16 ba_data;
+ __le16 patch_en_addr;
+ __le16 patch_en_value;
+ __le16 mode_reg;
+ __le16 mode_pre;
+ __le16 mode_post;
+ __le16 reserved;
+ __le16 bp_start;
+ __le16 bp_num;
+ __le16 bp[4];
+ char info[0];
+} __packed;
+
+enum rtl_fw_type {
+ RTL_FW_END = 0,
+ RTL_FW_PLA,
+ RTL_FW_USB,
+ RTL_FW_PHY_START,
+ RTL_FW_PHY_STOP,
+ RTL_FW_PHY_NC,
+};
+
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
@@ -1222,38 +1409,52 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
int ret = -EINVAL;
u32 ocp_data;
unsigned char buf[6];
-
- /* test for -AD variant of RTL8153 */
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- if ((ocp_data & AD_MASK) == 0x1000) {
- /* test for MAC address pass-through bit */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
- if ((ocp_data & PASS_THRU_MASK) != 1) {
- netif_dbg(tp, probe, tp->netdev,
- "No efuse for RTL8153-AD MAC pass through\n");
- return -ENODEV;
- }
+ char *mac_obj_name;
+ acpi_object_type mac_obj_type;
+ int mac_strlen;
+
+ if (test_bit(LENOVO_MACPASSTHRU, &tp->flags)) {
+ mac_obj_name = "\\MACA";
+ mac_obj_type = ACPI_TYPE_STRING;
+ mac_strlen = 0x16;
} else {
- /* test for RTL8153-BND and RTL8153-BD */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
- netif_dbg(tp, probe, tp->netdev,
- "Invalid variant for MAC pass through\n");
- return -ENODEV;
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) == 0x1000) {
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1) {
+ netif_dbg(tp, probe, tp->netdev,
+ "No efuse for RTL8153-AD MAC pass through\n");
+ return -ENODEV;
+ }
+ } else {
+ /* test for RTL8153-BND and RTL8153-BD */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
+ netif_dbg(tp, probe, tp->netdev,
+ "Invalid variant for MAC pass through\n");
+ return -ENODEV;
+ }
}
+
+ mac_obj_name = "\\_SB.AMAC";
+ mac_obj_type = ACPI_TYPE_BUFFER;
+ mac_strlen = 0x17;
}
/* returns _AUXMAC_#AABBCCDDEEFF# */
- status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer);
obj = (union acpi_object *)buffer.pointer;
if (!ACPI_SUCCESS(status))
return -ENODEV;
- if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ if (obj->type != mac_obj_type || obj->string.length != mac_strlen) {
netif_warn(tp, probe, tp->netdev,
"Invalid buffer for pass-thru MAC addr: (%d, %d)\n",
obj->type, obj->string.length);
goto amacout;
}
+
if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
netif_warn(tp, probe, tp->netdev,
@@ -1688,7 +1889,7 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
}
/* r8152_csum_workaround()
- * The hw limites the value the transport offset. When the offset is out of the
+ * The hw limits the value of the transport offset. When the offset is out of
* range, calculate the checksum by sw.
*/
static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
@@ -2178,6 +2379,7 @@ static void tx_bottom(struct r8152 *tp)
int res;
do {
+ struct net_device *netdev = tp->netdev;
struct tx_agg *agg;
if (skb_queue_empty(&tp->tx_queue))
@@ -2188,24 +2390,23 @@ static void tx_bottom(struct r8152 *tp)
break;
res = r8152_tx_agg_fill(tp, agg);
- if (res) {
- struct net_device *netdev = tp->netdev;
+ if (!res)
+ continue;
- if (res == -ENODEV) {
- rtl_set_unplug(tp);
- netif_device_detach(netdev);
- } else {
- struct net_device_stats *stats = &netdev->stats;
- unsigned long flags;
+ if (res == -ENODEV) {
+ rtl_set_unplug(tp);
+ netif_device_detach(netdev);
+ } else {
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned long flags;
- netif_warn(tp, tx_err, netdev,
- "failed tx_urb %d\n", res);
- stats->tx_dropped += agg->skb_num;
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
- spin_lock_irqsave(&tp->tx_lock, flags);
- list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock_irqrestore(&tp->tx_lock, flags);
- }
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
}
} while (res == 0);
}
@@ -3226,6 +3427,688 @@ static void rtl_reset_bmu(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
}
+/* Clear the bp to stop the firmware before loading a new one */
+static void rtl_clear_bp(struct r8152 *tp, u16 type)
+{
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ ocp_write_byte(tp, type, PLA_BP_EN, 0);
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ default:
+ if (type == MCU_TYPE_USB) {
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0);
+ } else {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ }
+ break;
+ }
+
+ ocp_write_word(tp, type, PLA_BP_0, 0);
+ ocp_write_word(tp, type, PLA_BP_1, 0);
+ ocp_write_word(tp, type, PLA_BP_2, 0);
+ ocp_write_word(tp, type, PLA_BP_3, 0);
+ ocp_write_word(tp, type, PLA_BP_4, 0);
+ ocp_write_word(tp, type, PLA_BP_5, 0);
+ ocp_write_word(tp, type, PLA_BP_6, 0);
+ ocp_write_word(tp, type, PLA_BP_7, 0);
+
+ /* wait 3 ms to make sure the firmware is stopped */
+ usleep_range(3000, 6000);
+ ocp_write_word(tp, type, PLA_BP_BA, 0);
+}
+
+static int r8153_patch_request(struct r8152 *tp, bool request)
+{
+ u16 data;
+ int i;
+
+ data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
+ if (request)
+ data |= PATCH_REQUEST;
+ else
+ data &= ~PATCH_REQUEST;
+ ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+
+ for (i = 0; request && i < 5000; i++) {
+ usleep_range(1000, 2000);
+ if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ break;
+ }
+
+ if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
+ netif_err(tp, drv, tp->netdev, "patch request fail\n");
+ r8153_patch_request(tp, false);
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
+static int r8153_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key)
+{
+ if (r8153_patch_request(tp, true)) {
+ dev_err(&tp->intf->dev, "patch request fail\n");
+ return -ETIME;
+ }
+
+ sram_write(tp, key_addr, patch_key);
+ sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK);
+
+ return 0;
+}
+
+static int r8153_post_ram_code(struct r8152 *tp, u16 key_addr)
+{
+ u16 data;
+
+ sram_write(tp, 0x0000, 0x0000);
+
+ data = ocp_reg_read(tp, OCP_PHY_LOCK);
+ data &= ~PATCH_LOCK;
+ ocp_reg_write(tp, OCP_PHY_LOCK, data);
+
+ sram_write(tp, key_addr, 0x0000);
+
+ r8153_patch_request(tp, false);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+
+ return 0;
+}
+
+static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy)
+{
+ u32 length;
+ u16 fw_offset, fw_reg, ba_reg, patch_en_addr, mode_reg, bp_start;
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ fw_reg = 0xa014;
+ ba_reg = 0xa012;
+ patch_en_addr = 0xa01a;
+ mode_reg = 0xb820;
+ bp_start = 0xa000;
+ break;
+ default:
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(phy->fw_offset);
+ if (fw_offset < sizeof(*phy)) {
+ dev_err(&tp->intf->dev, "fw_offset too small\n");
+ goto out;
+ }
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ if (length < fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= __le16_to_cpu(phy->fw_offset);
+ if (!length || (length & 1)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->fw_reg) != fw_reg) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->ba_reg) != ba_reg) {
+ dev_err(&tp->intf->dev, "invalid base address register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->patch_en_addr) != patch_en_addr) {
+ dev_err(&tp->intf->dev,
+ "invalid patch mode enabled register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->mode_reg) != mode_reg) {
+ dev_err(&tp->intf->dev,
+ "invalid register to switch the mode\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->bp_start) != bp_start) {
+ dev_err(&tp->intf->dev,
+ "invalid start register of break point\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->bp_num) > 4) {
+ dev_err(&tp->intf->dev, "invalid break point number\n");
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
+{
+ u16 fw_reg, bp_ba_addr, bp_en_addr, bp_start, fw_offset;
+ bool rc = false;
+ u32 length, type;
+ int i, max_bp;
+
+ type = __le32_to_cpu(mac->blk_hdr.type);
+ if (type == RTL_FW_PLA) {
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = 0;
+ bp_start = PLA_BP_0;
+ max_bp = 8;
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_08:
+ case RTL_VER_09:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = PLA_BP_EN;
+ bp_start = PLA_BP_0;
+ max_bp = 8;
+ break;
+ default:
+ goto out;
+ }
+ } else if (type == RTL_FW_USB) {
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ fw_reg = 0xf800;
+ bp_ba_addr = USB_BP_BA;
+ bp_en_addr = USB_BP_EN;
+ bp_start = USB_BP_0;
+ max_bp = 8;
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ fw_reg = 0xe600;
+ bp_ba_addr = USB_BP_BA;
+ bp_en_addr = USB_BP2_EN;
+ bp_start = USB_BP_0;
+ max_bp = 16;
+ break;
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ default:
+ goto out;
+ }
+ } else {
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(mac->fw_offset);
+ if (fw_offset < sizeof(*mac)) {
+ dev_err(&tp->intf->dev, "fw_offset too small\n");
+ goto out;
+ }
+
+ length = __le32_to_cpu(mac->blk_hdr.length);
+ if (length < fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= fw_offset;
+ if (length < 4 || (length & 3)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->fw_reg) != fw_reg) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_ba_addr) != bp_ba_addr) {
+ dev_err(&tp->intf->dev, "invalid base address register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_en_addr) != bp_en_addr) {
+ dev_err(&tp->intf->dev, "invalid enabled mask register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_start) != bp_start) {
+ dev_err(&tp->intf->dev,
+ "invalid start register of break point\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_num) > max_bp) {
+ dev_err(&tp->intf->dev, "invalid break point number\n");
+ goto out;
+ }
+
+ for (i = __le16_to_cpu(mac->bp_num); i < max_bp; i++) {
+ if (mac->bp[i]) {
+ dev_err(&tp->intf->dev, "unused bp%u is not zero\n", i);
+ goto out;
+ }
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+/* Verify the checksum for the firmware file. It is calculated from the version
+ * field to the end of the file. Compare the result with the checksum field to
+ * make sure the file is correct.
+ */
+static long rtl8152_fw_verify_checksum(struct r8152 *tp,
+ struct fw_header *fw_hdr, size_t size)
+{
+ unsigned char checksum[sizeof(fw_hdr->checksum)];
+ struct crypto_shash *alg;
+ struct shash_desc *sdesc;
+ size_t len;
+ long rc;
+
+ alg = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(alg)) {
+ rc = PTR_ERR(alg);
+ goto out;
+ }
+
+ if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) {
+ rc = -EFAULT;
+ dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n",
+ crypto_shash_digestsize(alg));
+ goto free_shash;
+ }
+
+ len = sizeof(*sdesc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(len, GFP_KERNEL);
+ if (!sdesc) {
+ rc = -ENOMEM;
+ goto free_shash;
+ }
+ sdesc->tfm = alg;
+
+ len = size - sizeof(fw_hdr->checksum);
+ rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum);
+ kfree(sdesc);
+ if (rc)
+ goto free_shash;
+
+ if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) {
+ dev_err(&tp->intf->dev, "checksum fail\n");
+ rc = -EFAULT;
+ }
+
+free_shash:
+ crypto_free_shash(alg);
+out:
+ return rc;
+}
+
+static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_header *fw_hdr = (struct fw_header *)fw->data;
+ struct fw_mac *pla = NULL, *usb = NULL;
+ struct fw_phy_patch_key *start = NULL;
+ struct fw_phy_nc *phy_nc = NULL;
+ struct fw_block *stop = NULL;
+ long ret = -EFAULT;
+ int i;
+
+ if (fw->size < sizeof(*fw_hdr)) {
+ dev_err(&tp->intf->dev, "file too small\n");
+ goto fail;
+ }
+
+ ret = rtl8152_fw_verify_checksum(tp, fw_hdr, fw->size);
+ if (ret)
+ goto fail;
+
+ ret = -EFAULT;
+
+ for (i = sizeof(*fw_hdr); i < fw->size;) {
+ struct fw_block *block = (struct fw_block *)&fw->data[i];
+ u32 type;
+
+ if ((i + sizeof(*block)) > fw->size)
+ goto fail;
+
+ type = __le32_to_cpu(block->type);
+ switch (type) {
+ case RTL_FW_END:
+ if (__le32_to_cpu(block->length) != sizeof(*block))
+ goto fail;
+ goto fw_end;
+ case RTL_FW_PLA:
+ if (pla) {
+ dev_err(&tp->intf->dev,
+ "multiple PLA firmware encountered");
+ goto fail;
+ }
+
+ pla = (struct fw_mac *)block;
+ if (!rtl8152_is_fw_mac_ok(tp, pla)) {
+ dev_err(&tp->intf->dev,
+ "check PLA firmware failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_USB:
+ if (usb) {
+ dev_err(&tp->intf->dev,
+ "multiple USB firmware encountered");
+ goto fail;
+ }
+
+ usb = (struct fw_mac *)block;
+ if (!rtl8152_is_fw_mac_ok(tp, usb)) {
+ dev_err(&tp->intf->dev,
+ "check USB firmware failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_PHY_START:
+ if (start || phy_nc || stop) {
+ dev_err(&tp->intf->dev,
+ "check PHY_START fail\n");
+ goto fail;
+ }
+
+ if (__le32_to_cpu(block->length) != sizeof(*start)) {
+ dev_err(&tp->intf->dev,
+ "Invalid length for PHY_START\n");
+ goto fail;
+ }
+
+ start = (struct fw_phy_patch_key *)block;
+ break;
+ case RTL_FW_PHY_STOP:
+ if (stop || !start) {
+ dev_err(&tp->intf->dev,
+ "Check PHY_STOP fail\n");
+ goto fail;
+ }
+
+ if (__le32_to_cpu(block->length) != sizeof(*block)) {
+ dev_err(&tp->intf->dev,
+ "Invalid length for PHY_STOP\n");
+ goto fail;
+ }
+
+ stop = block;
+ break;
+ case RTL_FW_PHY_NC:
+ if (!start || stop) {
+ dev_err(&tp->intf->dev,
+ "check PHY_NC fail\n");
+ goto fail;
+ }
+
+ if (phy_nc) {
+ dev_err(&tp->intf->dev,
+ "multiple PHY NC encountered\n");
+ goto fail;
+ }
+
+ phy_nc = (struct fw_phy_nc *)block;
+ if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) {
+ dev_err(&tp->intf->dev,
+ "check PHY NC firmware failed\n");
+ goto fail;
+ }
+
+ break;
+ default:
+ dev_warn(&tp->intf->dev, "Unknown type %u is found\n",
+ type);
+ break;
+ }
+
+ /* next block */
+ i += ALIGN(__le32_to_cpu(block->length), 8);
+ }
+
+fw_end:
+ if ((phy_nc || start) && !stop) {
+ dev_err(&tp->intf->dev, "without PHY_STOP\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return ret;
+}
+
+static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
+{
+ u16 mode_reg, bp_index;
+ u32 length, i, num;
+ __le16 *data;
+
+ mode_reg = __le16_to_cpu(phy->mode_reg);
+ sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
+ sram_write(tp, __le16_to_cpu(phy->ba_reg),
+ __le16_to_cpu(phy->ba_data));
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ length -= __le16_to_cpu(phy->fw_offset);
+ num = length / 2;
+ data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset));
+
+ ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg));
+ for (i = 0; i < num; i++)
+ ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i]));
+
+ sram_write(tp, __le16_to_cpu(phy->patch_en_addr),
+ __le16_to_cpu(phy->patch_en_value));
+
+ bp_index = __le16_to_cpu(phy->bp_start);
+ num = __le16_to_cpu(phy->bp_num);
+ for (i = 0; i < num; i++) {
+ sram_write(tp, bp_index, __le16_to_cpu(phy->bp[i]));
+ bp_index += 2;
+ }
+
+ sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_post));
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info);
+}
+
+static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
+{
+ u16 bp_en_addr, bp_index, type, bp_num, fw_ver_reg;
+ u32 length;
+ u8 *data;
+ int i;
+
+ switch (__le32_to_cpu(mac->blk_hdr.type)) {
+ case RTL_FW_PLA:
+ type = MCU_TYPE_PLA;
+ break;
+ case RTL_FW_USB:
+ type = MCU_TYPE_USB;
+ break;
+ default:
+ return;
+ }
+
+ rtl_clear_bp(tp, type);
+
+ /* Enable backup/restore of MACDBG. This is required after clearing PLA
+ * break points and before applying the PLA firmware.
+ */
+ if (tp->version == RTL_VER_04 && type == MCU_TYPE_PLA &&
+ !(ocp_read_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST) & DEBUG_OE)) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_PRE, DEBUG_LTSSM);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST, DEBUG_LTSSM);
+ }
+
+ length = __le32_to_cpu(mac->blk_hdr.length);
+ length -= __le16_to_cpu(mac->fw_offset);
+
+ data = (u8 *)mac;
+ data += __le16_to_cpu(mac->fw_offset);
+
+ generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
+ type);
+
+ ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
+ __le16_to_cpu(mac->bp_ba_value));
+
+ bp_index = __le16_to_cpu(mac->bp_start);
+ bp_num = __le16_to_cpu(mac->bp_num);
+ for (i = 0; i < bp_num; i++) {
+ ocp_write_word(tp, type, bp_index, __le16_to_cpu(mac->bp[i]));
+ bp_index += 2;
+ }
+
+ bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
+ if (bp_en_addr)
+ ocp_write_word(tp, type, bp_en_addr,
+ __le16_to_cpu(mac->bp_en_value));
+
+ fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg);
+ if (fw_ver_reg)
+ ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg,
+ mac->fw_ver_data);
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info);
+}
+
+static void rtl8152_apply_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+ const struct firmware *fw;
+ struct fw_header *fw_hdr;
+ struct fw_phy_patch_key *key;
+ u16 key_addr = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rtl_fw->fw))
+ return;
+
+ fw = rtl_fw->fw;
+ fw_hdr = (struct fw_header *)fw->data;
+
+ if (rtl_fw->pre_fw)
+ rtl_fw->pre_fw(tp);
+
+ for (i = offsetof(struct fw_header, blocks); i < fw->size;) {
+ struct fw_block *block = (struct fw_block *)&fw->data[i];
+
+ switch (__le32_to_cpu(block->type)) {
+ case RTL_FW_END:
+ goto post_fw;
+ case RTL_FW_PLA:
+ case RTL_FW_USB:
+ rtl8152_fw_mac_apply(tp, (struct fw_mac *)block);
+ break;
+ case RTL_FW_PHY_START:
+ key = (struct fw_phy_patch_key *)block;
+ key_addr = __le16_to_cpu(key->key_reg);
+ r8153_pre_ram_code(tp, key_addr,
+ __le16_to_cpu(key->key_data));
+ break;
+ case RTL_FW_PHY_STOP:
+ WARN_ON(!key_addr);
+ r8153_post_ram_code(tp, key_addr);
+ break;
+ case RTL_FW_PHY_NC:
+ rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block);
+ break;
+ default:
+ break;
+ }
+
+ i += ALIGN(__le32_to_cpu(block->length), 8);
+ }
+
+post_fw:
+ if (rtl_fw->post_fw)
+ rtl_fw->post_fw(tp);
+
+ strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
+ dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
+}
+
+static void rtl8152_release_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+
+ if (!IS_ERR_OR_NULL(rtl_fw->fw)) {
+ release_firmware(rtl_fw->fw);
+ rtl_fw->fw = NULL;
+ }
+}
+
+static int rtl8152_request_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+ long rc;
+
+ if (rtl_fw->fw || !rtl_fw->fw_name) {
+ dev_info(&tp->intf->dev, "skip request firmware\n");
+ rc = 0;
+ goto result;
+ }
+
+ rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, &tp->intf->dev);
+ if (rc < 0)
+ goto result;
+
+ rc = rtl8152_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ release_firmware(rtl_fw->fw);
+
+result:
+ if (rc) {
+ rtl_fw->fw = ERR_PTR(rc);
+
+ dev_warn(&tp->intf->dev,
+ "unable to load firmware patch %s (%ld)\n",
+ rtl_fw->fw_name, rc);
+ }
+
+ return rc;
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@@ -3370,6 +4253,7 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
+ rtl8152_apply_firmware(tp);
rtl_eee_enable(tp, tp->eee_en);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3377,11 +4261,23 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
set_bit(PHY_RESET, &tp->flags);
}
-static void r8152b_exit_oob(struct r8152 *tp)
+static void wait_oob_link_list_ready(struct r8152 *tp)
{
u32 ocp_data;
int i;
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ usleep_range(1000, 2000);
+ }
+}
+
+static void r8152b_exit_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -3399,23 +4295,13 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
rtl8152_nic_reset(tp);
@@ -3457,7 +4343,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
static void r8152b_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
- int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -3469,23 +4354,13 @@ static void r8152b_enter_oob(struct r8152 *tp)
rtl_disable(tp);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
@@ -3506,31 +4381,124 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static int r8153_patch_request(struct r8152 *tp, bool request)
+static int r8153_pre_firmware_1(struct r8152 *tp)
{
- u16 data;
int i;
- data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
- if (request)
- data |= PATCH_REQUEST;
- else
- data &= ~PATCH_REQUEST;
- ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+ /* Wait till the WTD timer is ready. It would take at most 104 ms. */
+ for (i = 0; i < 104; i++) {
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
- for (i = 0; request && i < 5000; i++) {
- usleep_range(1000, 2000);
- if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ if (!(ocp_data & WTD1_EN))
break;
+ usleep_range(1000, 2000);
}
- if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
- netif_err(tp, drv, tp->netdev, "patch request fail\n");
- r8153_patch_request(tp, false);
- return -ETIME;
- } else {
- return 0;
+ return 0;
+}
+
+static int r8153_post_firmware_1(struct r8152 *tp)
+{
+ /* set USB_BP_4 to support USB_SPEED_SUPER only */
+ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER)
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, BP4_SUPER_ONLY);
+
+ /* reset UPHY timer to 36 ms */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16);
+
+ return 0;
+}
+
+static int r8153_pre_firmware_2(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ r8153_pre_firmware_1(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0);
+ ocp_data &= ~FW_FIX_SUSPEND;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data);
+
+ return 0;
+}
+
+static int r8153_post_firmware_2(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ /* enable bp0 if support USB_SPEED_SUPER only */
+ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN);
+ ocp_data |= BIT(0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data);
+ }
+
+ /* reset UPHY timer to 36 ms */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16);
+
+ /* enable U3P3 check, set the counter to 4 */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, U3P3_CHECK_EN | 4);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0);
+ ocp_data |= FW_FIX_SUSPEND;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+ ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+
+ return 0;
+}
+
+static int r8153_post_firmware_3(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+ ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ return 0;
+}
+
+static int r8153b_pre_firmware_1(struct r8152 *tp)
+{
+ /* enable fc timer and set timer to 1 second. */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER,
+ CTRL_TIMER_EN | (1000 / 8));
+
+ return 0;
+}
+
+static int r8153b_post_firmware_1(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ /* enable bp0 for RTL8153-BND */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if (ocp_data & BND_MASK) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN);
+ ocp_data |= BIT(0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data);
}
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL);
+ ocp_data |= FLOW_CTRL_PATCH_OPT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ return 0;
}
static void r8153_aldps_en(struct r8152 *tp, bool enable)
@@ -3567,6 +4535,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
+ rtl8152_apply_firmware(tp);
+
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
data &= ~CTAP_SHORT_EN;
@@ -3639,6 +4609,8 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
+ rtl8152_apply_firmware(tp);
+
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
data = sram_read(tp, SRAM_GREEN_CFG);
@@ -3711,7 +4683,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
r8153_mac_clk_spd(tp, false);
rxdy_gated_en(tp, true);
@@ -3732,23 +4703,13 @@ static void r8153_first_init(struct r8152 *tp)
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
@@ -3773,7 +4734,6 @@ static void r8153_first_init(struct r8152 *tp)
static void r8153_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
- int i;
r8153_mac_clk_spd(tp, true);
@@ -3784,23 +4744,13 @@ static void r8153_enter_oob(struct r8152 *tp)
rtl_disable(tp);
rtl_reset_bmu(tp);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
@@ -4187,11 +5137,22 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
mutex_lock(&tp->control);
+ if (rtl8152_request_firmware(tp) == -ENODEV && tp->rtl_fw.retry) {
+ tp->rtl_fw.retry = false;
+ tp->rtl_fw.fw = NULL;
+
+ /* Delay execution in case request_firmware() is not ready yet.
+ */
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, HZ * 10);
+ goto ignore_once;
+ }
+
tp->rtl_ops.hw_phy_cfg(tp);
rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex,
tp->advertising);
+ignore_once:
mutex_unlock(&tp->control);
usb_autopm_put_interface(tp->intf);
@@ -4229,6 +5190,11 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) {
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ rtl_hw_phy_work_func_t(&tp->hw_phy_work.work);
+ }
+
res = alloc_all_mem(tp);
if (res)
goto out;
@@ -4283,10 +5249,10 @@ static int rtl8152_close(struct net_device *netdev)
unregister_pm_notifier(&tp->pm_notifier);
#endif
tasklet_disable(&tp->tx_tl);
- napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ napi_disable(&tp->napi);
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
@@ -4552,10 +5518,10 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
netif_stop_queue(netdev);
tasklet_disable(&tp->tx_tl);
- napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
@@ -4673,7 +5639,7 @@ static int rtl8152_system_resume(struct r8152 *tp)
netif_device_attach(netdev);
- if (netif_running(netdev) && netdev->flags & IFF_UP) {
+ if (netif_running(netdev) && (netdev->flags & IFF_UP)) {
tp->rtl_ops.up(tp);
netif_carrier_off(netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -4875,6 +5841,9 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
+ if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
+ strlcpy(info->fw_version, tp->rtl_fw.version,
+ sizeof(info->fw_version));
}
static
@@ -5244,9 +6213,15 @@ static int rtl8152_set_tunable(struct net_device *netdev,
}
if (tp->rx_copybreak != val) {
- napi_disable(&tp->napi);
- tp->rx_copybreak = val;
- napi_enable(&tp->napi);
+ if (netdev->flags & IFF_UP) {
+ mutex_lock(&tp->control);
+ napi_disable(&tp->napi);
+ tp->rx_copybreak = val;
+ napi_enable(&tp->napi);
+ mutex_unlock(&tp->control);
+ } else {
+ tp->rx_copybreak = val;
+ }
}
break;
default:
@@ -5274,9 +6249,15 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return -EINVAL;
if (tp->rx_pending != ring->rx_pending) {
- napi_disable(&tp->napi);
- tp->rx_pending = ring->rx_pending;
- napi_enable(&tp->napi);
+ if (netdev->flags & IFF_UP) {
+ mutex_lock(&tp->control);
+ napi_disable(&tp->napi);
+ tp->rx_pending = ring->rx_pending;
+ napi_enable(&tp->napi);
+ mutex_unlock(&tp->control);
+ } else {
+ tp->rx_pending = ring->rx_pending;
+ }
}
return 0;
@@ -5499,6 +6480,47 @@ static int rtl_ops_init(struct r8152 *tp)
return ret;
}
+#define FIRMWARE_8153A_2 "rtl_nic/rtl8153a-2.fw"
+#define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw"
+#define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw"
+#define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw"
+
+MODULE_FIRMWARE(FIRMWARE_8153A_2);
+MODULE_FIRMWARE(FIRMWARE_8153A_3);
+MODULE_FIRMWARE(FIRMWARE_8153A_4);
+MODULE_FIRMWARE(FIRMWARE_8153B_2);
+
+static int rtl_fw_init(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+
+ switch (tp->version) {
+ case RTL_VER_04:
+ rtl_fw->fw_name = FIRMWARE_8153A_2;
+ rtl_fw->pre_fw = r8153_pre_firmware_1;
+ rtl_fw->post_fw = r8153_post_firmware_1;
+ break;
+ case RTL_VER_05:
+ rtl_fw->fw_name = FIRMWARE_8153A_3;
+ rtl_fw->pre_fw = r8153_pre_firmware_2;
+ rtl_fw->post_fw = r8153_post_firmware_2;
+ break;
+ case RTL_VER_06:
+ rtl_fw->fw_name = FIRMWARE_8153A_4;
+ rtl_fw->post_fw = r8153_post_firmware_3;
+ break;
+ case RTL_VER_09:
+ rtl_fw->fw_name = FIRMWARE_8153B_2;
+ rtl_fw->pre_fw = r8153b_pre_firmware_1;
+ rtl_fw->post_fw = r8153b_post_firmware_1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static u8 rtl_get_version(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
@@ -5606,6 +6628,8 @@ static int rtl8152_probe(struct usb_interface *intf,
if (ret)
goto out;
+ rtl_fw_init(tp);
+
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
@@ -5632,8 +6656,13 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
+ set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
- (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
+ (!strcmp(udev->serial, "000001000000") ||
+ !strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
}
@@ -5676,6 +6705,10 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
+#if IS_BUILTIN(CONFIG_USB_RTL8152)
+ /* Retry in case request_firmware() is not ready yet. */
+ tp->rtl_fw.retry = true;
+#endif
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
@@ -5721,6 +6754,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
+ rtl8152_release_firmware(tp);
free_netdev(tp->netdev);
}
}
@@ -5752,6 +6786,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 9f3c839f9e5f..a552df37a347 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -260,14 +260,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- if (!rcv_xdp) {
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
- }
+ if (!rcv_xdp)
+ dev_lstats_add(dev, length);
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -281,26 +275,11 @@ drop:
return NETDEV_TX_OK;
}
-static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
{
struct veth_priv *priv = netdev_priv(dev);
- int cpu;
-
- result->packets = 0;
- result->bytes = 0;
- for_each_possible_cpu(cpu) {
- struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
- u64 packets, bytes;
- unsigned int start;
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- result->packets += packets;
- result->bytes += bytes;
- }
+ dev_lstats_read(dev, packets, bytes);
return atomic64_read(&priv->dropped);
}
@@ -335,11 +314,11 @@ static void veth_get_stats64(struct net_device *dev,
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
struct veth_rq_stats rx;
- struct pcpu_lstats tx;
+ u64 packets, bytes;
- tot->tx_dropped = veth_stats_tx(&tx, dev);
- tot->tx_bytes = tx.bytes;
- tot->tx_packets = tx.packets;
+ tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
+ tot->tx_bytes = bytes;
+ tot->tx_packets = packets;
veth_stats_rx(&rx, dev);
tot->rx_dropped = rx.xdp_drops;
@@ -349,9 +328,9 @@ static void veth_get_stats64(struct net_device *dev,
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped += veth_stats_tx(&tx, peer);
- tot->rx_bytes += tx.bytes;
- tot->rx_packets += tx.packets;
+ tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+ tot->rx_bytes += bytes;
+ tot->rx_packets += packets;
veth_stats_rx(&rx, peer);
tot->tx_bytes += rx.xdp_bytes;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5a635f028bdc..4d7d5434cc5d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2445,11 +2445,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (!prog && !old_prog)
return 0;
- if (prog) {
- prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
- }
+ if (prog)
+ bpf_prog_add(prog, vi->max_queue_pairs - 1);
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 14e324b84617..e8563acf98e8 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -47,13 +47,7 @@ static int vsockmon_close(struct net_device *dev)
static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len = skb->len;
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += len;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_lstats_add(dev, skb->len);
dev_kfree_skb(skb);
@@ -63,30 +57,9 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
- u64 bytes = 0, packets = 0;
-
- for_each_possible_cpu(i) {
- const struct pcpu_lstats *vstats;
- u64 tbytes, tpackets;
- unsigned int start;
-
- vstats = per_cpu_ptr(dev->lstats, i);
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
- do {
- start = u64_stats_fetch_begin_irq(&vstats->syncp);
- tbytes = vstats->bytes;
- tpackets = vstats->packets;
- } while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
-
- packets += tpackets;
- bytes += tbytes;
- }
-
- stats->rx_packets = packets;
stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
stats->tx_bytes = 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8869154fad88..bf04bc2e68c2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -793,8 +793,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
- const u8 *mac, __u16 state,
+static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state,
__be32 src_vni, __u16 ndm_flags)
{
struct vxlan_fdb *f;
@@ -835,7 +834,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return -ENOSPC;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+ f = vxlan_fdb_alloc(mac, state, src_vni, ndm_flags);
if (!f)
return -ENOMEM;
@@ -3176,9 +3175,29 @@ static void vxlan_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
+static int vxlan_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+ dst->remote_ifindex);
+
+ if (!lowerdev) {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ return 0;
+ }
+
+ return __ethtool_get_link_ksettings(lowerdev, cmd);
+}
+
static const struct ethtool_ops vxlan_ethtool_ops = {
- .get_drvinfo = vxlan_get_drvinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = vxlan_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = vxlan_get_link_ksettings,
};
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 73f5892ce6c1..1c640b41ea4c 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -26,7 +26,7 @@ int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
*val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_netdev_queue_stopped,
debugfs_netdev_queue_stopped_get,
NULL, "%llu\n");
@@ -154,7 +154,7 @@ int debugfs_i2400m_suspend_set(void *data, u64 val)
result = 0;
return result;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_suspend,
NULL, debugfs_i2400m_suspend_set,
"%llu\n");
@@ -183,7 +183,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
}
return result;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_reset,
NULL, debugfs_i2400m_reset_set,
"%llu\n");
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 6953f904232f..9659f9e1aaa6 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -511,7 +511,7 @@ error_alloc_netdev:
/*
- * Disconect a i2400m from the system.
+ * Disconnect a i2400m from the system.
*
* i2400m_stop() has been called before, so al the rx and tx contexts
* have been taken down already. Make sure the queue is stopped,
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 46f1427e6e9e..ba326f6c1214 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1781,8 +1781,8 @@ static int adm8211_probe(struct pci_dev *pdev,
{
struct ieee80211_hw *dev;
struct adm8211_priv *priv;
- unsigned long mem_addr, mem_len;
- unsigned int io_addr, io_len;
+ unsigned long mem_len;
+ unsigned int io_len;
int err;
u32 reg;
u8 perm_addr[ETH_ALEN];
@@ -1794,9 +1794,7 @@ static int adm8211_probe(struct pci_dev *pdev,
return err;
}
- io_addr = pci_resource_start(pdev, 0);
io_len = pci_resource_len(pdev, 0);
- mem_addr = pci_resource_start(pdev, 1);
mem_len = pci_resource_len(pdev, 1);
if (io_len < 256 || mem_len < 1024) {
printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 56616d988c96..7b90b8546162 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -30,12 +30,12 @@ config ATH_DEBUG
Right now only ath9k makes use of this.
config ATH_TRACEPOINTS
- bool "Atheros wireless tracing"
- depends on ATH_DEBUG
- depends on EVENT_TRACING
- ---help---
- This option enables tracepoints for atheros wireless drivers.
- Currently, ath9k makes use of this facility.
+ bool "Atheros wireless tracing"
+ depends on ATH_DEBUG
+ depends on EVENT_TRACING
+ ---help---
+ This option enables tracepoints for atheros wireless drivers.
+ Currently, ath9k makes use of this facility.
config ATH_REG_DYNAMIC_USER_REG_HINTS
bool "Atheros dynamic user regulatory hints"
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
index 65b39c7d035d..e82df5f1ea67 100644
--- a/drivers/net/wireless/ath/ar5523/Kconfig
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: ISC
config AR5523
- tristate "Atheros AR5523 wireless driver support"
- depends on MAC80211 && USB
- select ATH_COMMON
- select FW_LOADER
- ---help---
- This module add support for AR5523 based USB dongles such as D-Link
- DWL-G132, Netgear WPN111 and many more.
+ tristate "Atheros AR5523 wireless driver support"
+ depends on MAC80211 && USB
+ select ATH_COMMON
+ select FW_LOADER
+ ---help---
+ This module add support for AR5523 based USB dongles such as D-Link
+ DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index b94759daeacc..da2d179430ca 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -255,7 +255,8 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
if (flags & AR5523_CMD_FLAG_MAGIC)
hdr->magic = cpu_to_be32(1 << 24);
- memcpy(hdr + 1, idata, ilen);
+ if (ilen)
+ memcpy(hdr + 1, idata, ilen);
cmd->odata = odata;
cmd->olen = olen;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index eca87f7c5b6c..294fbc1e89ab 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1704,9 +1704,6 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
/* Correctly initialize memory to 0 to prevent garbage
* data crashing system when download firmware
*/
- memset(dest_ring->base_addr_owner_space_unaligned, 0,
- nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
-
dest_ring->base_addr_owner_space =
PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
@@ -2019,8 +2016,6 @@ void ath10k_ce_alloc_rri(struct ath10k *ar)
value |= ar->hw_ce_regs->upd->mask;
ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
}
-
- memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
}
EXPORT_SYMBOL(ath10k_ce_alloc_rri);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 383d4fa555a8..4f76ba5d78a9 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -11,6 +11,7 @@
#include <linux/property.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/pm_qos.h>
#include <asm/byteorder.h>
#include "core.h"
@@ -677,13 +678,22 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend);
}
-static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
+ int ret;
u32 param = 0;
- ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
- ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
- ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ ret = ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ if (ret)
+ return ret;
/* Data transfer is not initiated, when reduced Tx completion
* is used for SDIO. disable it until fixed
@@ -700,14 +710,23 @@ static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
else
param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
- ath10k_bmi_write32(ar, hi_acs_flags, param);
+ ret = ath10k_bmi_write32(ar, hi_acs_flags, param);
+ if (ret)
+ return ret;
/* Explicitly set fwlog prints to zero as target may turn it on
* based on scratch registers.
*/
- ath10k_bmi_read32(ar, hi_option_flag, &param);
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
+ if (ret)
+ return ret;
+
param |= HI_OPTION_DISABLE_DBGLOG;
- ath10k_bmi_write32(ar, hi_option_flag, param);
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int ath10k_init_configure_target(struct ath10k *ar)
@@ -1009,6 +1028,7 @@ static int ath10k_download_fw(struct ath10k *ar)
u32 address, data_len;
const void *data;
int ret;
+ struct pm_qos_request latency_qos;
address = ar->hw_params.patch_load_addr;
@@ -1042,8 +1062,14 @@ static int ath10k_download_fw(struct ath10k *ar)
ret);
}
- return ath10k_bmi_fast_download(ar, address,
- data, data_len);
+ memset(&latency_qos, 0, sizeof(latency_qos));
+ pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+
+ pm_qos_remove_request(&latency_qos);
+
+ return ret;
}
void ath10k_core_free_board_files(struct ath10k *ar)
@@ -2565,8 +2591,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (status)
goto err;
- if (ar->hif.bus == ATH10K_BUS_SDIO)
- ath10k_init_sdio(ar, mode);
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ status = ath10k_init_sdio(ar, mode);
+ if (status) {
+ ath10k_err(ar, "failed to init SDIO: %d\n", status);
+ goto err;
+ }
+ }
}
ar->htc.htc_ops.target_send_suspend_complete =
@@ -2787,7 +2818,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
status = ath10k_hif_set_target_log_mode(ar, fw_diag_log);
if (status && status != -EOPNOTSUPP) {
- ath10k_warn(ar, "set traget log mode faileds: %d\n", status);
+ ath10k_warn(ar, "set target log mode failed: %d\n", status);
goto err_hif_stop;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 4d7db07db6ba..af68eb5d0776 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -169,6 +169,7 @@ struct ath10k_wmi {
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
+ struct wmi_peer_param_map *peer_param;
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
@@ -963,12 +964,20 @@ struct ath10k {
u32 hw_eeprom_rd;
u32 ht_cap_info;
u32 vht_cap_info;
+ u32 vht_supp_mcs;
u32 num_rf_chains;
u32 max_spatial_stream;
/* protected by conf_mutex */
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
u32 low_5ghz_chan;
u32 high_5ghz_chan;
bool ani_enabled;
+ u32 sys_cap_info;
+
+ /* protected by data_lock */
+ bool hw_rfkill_on;
+
/* protected by conf_mutex */
u8 ps_state_enable;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index b6d2932383cf..2a4498067024 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -703,7 +703,7 @@ static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -786,7 +786,7 @@ static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -891,7 +891,7 @@ static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
- .start = 0x98000,
+ .start = 0x980000,
.len = 0x50000,
.name = "IRAM",
.section_table = {
@@ -951,6 +951,19 @@ static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
},
};
+static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = {
+ {
+ /* MSA region start is not fixed, hence it is assigned at runtime */
+ .type = ATH10K_MEM_REGION_TYPE_MSA,
+ .len = 0x100000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
@@ -1048,6 +1061,14 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
.size = ARRAY_SIZE(qca4019_hw10_mem_regions),
},
},
+ {
+ .hw_id = WCN3990_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_WCN3990,
+ .region_table = {
+ .regions = wcn399x_hw10_mem_regions,
+ .size = ARRAY_SIZE(wcn399x_hw10_mem_regions),
+ },
+ },
};
static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
@@ -1208,9 +1229,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
- memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
- crash_data->ramdump_buf_len);
- sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ if (crash_data->ramdump_buf_len) {
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
}
mutex_unlock(&ar->dump_mutex);
@@ -1257,6 +1280,9 @@ int ath10k_coredump_register(struct ath10k *ar)
if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+ if (!crash_data->ramdump_buf_len)
+ return 0;
+
crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
if (!crash_data->ramdump_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 09de41922f97..8bf03e8c1d3a 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -115,6 +115,7 @@ enum ath10k_mem_region_type {
ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
ATH10K_MEM_REGION_TYPE_IOSRAM = 6,
ATH10K_MEM_REGION_TYPE_IOREG = 7,
+ ATH10K_MEM_REGION_TYPE_MSA = 8,
};
/* Define a section of the region which should be copied. As not all parts
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index bd2b5628f850..04c50a26a4f4 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1516,7 +1516,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
*len += scnprintf(buf + *len, buf_len - *len,
"No. Preamble Rate_code ");
- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
+ for (i = 0; i < tpc_stats->num_tx_chain; i++)
*len += scnprintf(buf + *len, buf_len - *len,
"tpc_value%d ", i);
@@ -2532,6 +2532,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
+ kfree(ar->debug.tpc_stats_final);
}
int ath10k_debug_register(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 42931a669b02..367539f2c370 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -430,7 +430,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
}
ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
- WMI_PEER_DEBUG, peer_debug_trigger);
+ ar->wmi.peer_param->debug, peer_debug_trigger);
if (ret) {
ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
ret);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 53f1095de8ff..d95b63f133ab 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2073,7 +2073,7 @@ static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
case 24:
pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
break;
- };
+ }
}
static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
@@ -2726,7 +2726,7 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
- if (!peer) {
+ if (!peer || !peer->sta) {
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
continue;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index c415e971735b..2451e0fb8ee5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -155,6 +155,9 @@ const struct ath10k_hw_values qca6174_values = {
.num_target_ce_config_wlan = 7,
.ce_desc_meta_data_mask = 0xFFFC,
.ce_desc_meta_data_lsb = 2,
+ .rfkill_pin = 16,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 1,
};
const struct ath10k_hw_values qca99x0_values = {
@@ -1145,6 +1148,7 @@ static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
.rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
const struct ath10k_hw_ops qca6174_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 2ae57c1de7b5..35a362329a4f 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -379,6 +379,9 @@ struct ath10k_hw_values {
u8 num_target_ce_config_wlan;
u16 ce_desc_meta_data_mask;
u8 ce_desc_meta_data_lsb;
+ u32 rfkill_pin;
+ u32 rfkill_cfg;
+ bool rfkill_on_level;
};
extern const struct ath10k_hw_values qca988x_values;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a6d21856b7e7..83cc8778ca1e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
#include "hif.h"
#include "core.h"
@@ -2773,7 +2774,7 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
return -EINVAL;
return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
- WMI_PEER_SMPS_STATE,
+ ar->wmi.peer_param->smps_state,
ath10k_smps_map[smps]);
}
@@ -2930,7 +2931,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
* poked with peer param command.
*/
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
- WMI_PEER_DUMMY_VAR, 1);
+ ar->wmi.peer_param->dummy_var, 1);
if (ret) {
ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
arvif->bssid, arvif->vdev_id, ret);
@@ -3708,7 +3709,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
struct ieee80211_vif *vif,
enum ath10k_hw_txrx_mode txmode,
enum ath10k_mac_tx_path txpath,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool noque_offchan)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -3738,10 +3739,10 @@ static int ath10k_mac_tx(struct ath10k *ar,
}
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
ieee80211_queue_work(hw, &ar->offchan_tx_work);
@@ -3803,8 +3804,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
@@ -3850,7 +3851,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
if (ret) {
ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
ret);
@@ -3860,8 +3861,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
- skb);
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ skb, skb->len);
if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
@@ -3903,8 +3904,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ar->running_fw->fw_file.fw_features)) {
paddr = dma_map_single(ar->dev, skb->data,
skb->len, DMA_TO_DEVICE);
- if (!paddr)
+ if (dma_mapping_error(ar->dev, paddr)) {
+ ieee80211_free_txskb(ar->hw, skb);
continue;
+ }
ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
if (ret) {
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
@@ -4065,7 +4068,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
if (ret)
return ret;
- skb = ieee80211_tx_dequeue(hw, txq);
+ skb = ieee80211_tx_dequeue_ni(hw, txq);
if (!skb) {
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
@@ -4097,7 +4100,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (unlikely(ret)) {
ath10k_warn(ar, "failed to push frame: %d\n", ret);
@@ -4378,7 +4381,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (ret) {
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) {
@@ -4754,6 +4757,63 @@ static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
return 0;
}
+static int ath10k_mac_rfkill_config(struct ath10k *ar)
+{
+ u32 param;
+ int ret;
+
+ if (ar->hw_values->rfkill_pin == 0) {
+ ath10k_warn(ar, "ath10k does not support hardware rfkill with this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
+ ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg,
+ ar->hw_values->rfkill_on_level);
+
+ param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL,
+ ar->hw_values->rfkill_on_level) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM,
+ ar->hw_values->rfkill_pin) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO,
+ ar->hw_values->rfkill_cfg);
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->rfkill_config,
+ param);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set rfkill config 0x%x: %d\n",
+ param, ret);
+ return ret;
+ }
+ return 0;
+}
+
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable)
+{
+ enum wmi_tlv_rfkill_enable_radio param;
+ int ret;
+
+ if (enable)
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_ON;
+ else
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param);
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rfkill_enable,
+ param);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rfkill enable param %d: %d\n",
+ param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
@@ -4788,6 +4848,16 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err;
}
+ spin_lock_bh(&ar->data_lock);
+
+ if (ar->hw_rfkill_on) {
+ ar->hw_rfkill_on = false;
+ spin_unlock_bh(&ar->data_lock);
+ goto err;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath10k_err(ar, "Could not init hif: %d\n", ret);
@@ -4801,6 +4871,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_power_down;
}
+ if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
+ ret = ath10k_mac_rfkill_config(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure rfkill: %d", ret);
+ goto err_core_stop;
+ }
+ }
+
param = ar->wmi.pdev_param->pmf_qos;
ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
@@ -4960,7 +5038,8 @@ static void ath10k_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on)
+ ath10k_halt(ar);
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
@@ -5635,6 +5714,37 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = ar->wmi.vdev_param->mgmt_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+}
+
static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -5645,10 +5755,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate, basic_rate_idx, rateidx;
- int ret = 0, hw_rate_code, mcast_rate;
+ u8 rate, rateidx;
+ int ret = 0, mcast_rate;
enum nl80211_band band;
- const struct ieee80211_supported_band *sband;
mutex_lock(&ar->conf_mutex);
@@ -5872,29 +5981,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- if (ath10k_mac_vif_chan(vif, &def)) {
- mutex_unlock(&ar->conf_mutex);
- return;
- }
-
- sband = ar->hw->wiphy->bands[def.chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
- bitrate = sband->bitrates[basic_rate_idx].bitrate;
-
- hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
- if (hw_rate_code < 0) {
- ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
- mutex_unlock(&ar->conf_mutex);
- return;
- }
-
- vdev_param = ar->wmi.vdev_param->mgmt_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- hw_rate_code);
- if (ret)
- ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
- }
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath10k_mac_vif_chan(arvif->vif, &def))
+ ath10k_recalculate_mgmt_rate(ar, vif, &def);
mutex_unlock(&ar->conf_mutex);
}
@@ -6239,7 +6328,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_AUTHORIZE, 1);
+ ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -6330,7 +6419,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_PHYMODE, mode);
+ ar->wmi.peer_param->phymode, mode);
if (err) {
ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
sta->addr, mode, err);
@@ -6338,7 +6427,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_CHAN_WIDTH, bw);
+ ar->wmi.peer_param->chan_width, bw);
if (err)
ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
@@ -6349,7 +6438,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, nss);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_NSS, nss);
+ ar->wmi.peer_param->nss, nss);
if (err)
ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
sta->addr, nss, err);
@@ -6360,7 +6449,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_SMPS_STATE, smps);
+ ar->wmi.peer_param->smps_state, smps);
if (err)
ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
sta->addr, smps, err);
@@ -6434,7 +6523,7 @@ static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_USE_FIXED_PWR, txpwr);
+ ar->wmi.peer_param->use_fixed_power, txpwr);
if (ret) {
ath10k_warn(ar, "failed to set tx power for station ret: %d\n",
ret);
@@ -6515,6 +6604,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
GFP_KERNEL);
if (!arsta->tx_stats) {
+ ath10k_mac_dec_num_stations(arvif, sta);
ret = -ENOMEM;
goto exit;
}
@@ -7419,7 +7509,7 @@ static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_PARAM_FIXED_RATE, rate);
if (err)
- ath10k_warn(ar, "failed to eanble STA %pM peer fixed rate: %d\n",
+ ath10k_warn(ar, "failed to enable STA %pM peer fixed rate: %d\n",
sta->addr, err);
return true;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 1fe84948b868..98d83a26ea60 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -72,6 +72,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
void ath10k_mac_wait_tx_complete(struct ath10k *ar);
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable);
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index a0b4d265c6eb..bb44f5a0941b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2567,35 +2567,31 @@ static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
}
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CE_RST_MASK);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
msleep(10);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
}
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_LF_TIMER_CONTROL0_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_LF_TIMER_CONTROL0_ADDRESS,
- val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
}
static int ath10k_pci_warm_reset(struct ath10k *ar)
@@ -3490,7 +3486,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
struct ath10k_bus_params bus_params = {};
- bool pci_ps;
+ bool pci_ps, is_qca988x = false;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
@@ -3500,6 +3496,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ is_qca988x = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
@@ -3619,25 +3616,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.link_can_suspend = true;
+ /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
+ * fall off the bus during chip_reset. These chips have the same pci
+ * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
+ */
+ if (is_qca988x) {
+ bus_params.chip_id =
+ ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id != 0xffffffff) {
+ if (!ath10k_pci_chip_is_supported(pdev->device,
+ bus_params.chip_id))
+ goto err_unsupported;
+ }
+ }
+
ret = ath10k_pci_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_free_irq;
}
- bus_params.dev_type = ATH10K_DEV_TYPE_LL;
- bus_params.link_can_suspend = true;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (bus_params.chip_id == 0xffffffff) {
- ath10k_err(ar, "failed to get chip id\n");
- goto err_free_irq;
- }
+ if (bus_params.chip_id == 0xffffffff)
+ goto err_unsupported;
- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
- ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
- pdev->device, bus_params.chip_id);
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
goto err_free_irq;
- }
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
@@ -3647,6 +3653,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
return 0;
+err_unsupported:
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, bus_params.chip_id);
+
err_free_irq:
ath10k_pci_free_irq(ar);
ath10k_pci_rx_retry_sync(ar);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 3b63b6257c43..a0ba07b85362 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -111,6 +111,7 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_msa_info_resp_msg_v01 resp = {};
struct wlfw_msa_info_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ phys_addr_t max_mapped_addr;
struct qmi_txn txn;
int ret;
int i;
@@ -150,8 +151,20 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
}
+ max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
+ if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
+ resp.mem_region_info[i].region_addr > max_mapped_addr ||
+ resp.mem_region_info[i].region_addr < qmi->msa_pa ||
+ resp.mem_region_info[i].size +
+ resp.mem_region_info[i].region_addr > max_mapped_addr) {
+ ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
+ resp.mem_region_info[i].region_addr,
+ resp.mem_region_info[i].size);
+ ret = -EINVAL;
+ goto fail_unwind;
+ }
qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
qmi->mem_region[i].size = resp.mem_region_info[i].size;
qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
@@ -165,6 +178,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
return 0;
+fail_unwind:
+ memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
out:
return ret;
}
@@ -291,10 +306,16 @@ static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
struct wlfw_cal_report_resp_msg_v01 resp = {};
struct wlfw_cal_report_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int i, j = 0;
int ret;
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_data_valid = 1;
+ req.xo_cal_data = ar_snoc->xo_cal_data;
+ }
+
ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
&resp);
if (ret < 0)
@@ -581,22 +602,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
{
struct wlfw_host_cap_resp_msg_v01 resp = {};
struct wlfw_host_cap_req_msg_v01 req = {};
+ struct qmi_elem_info *req_ei;
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
req.daemon_support_valid = 1;
req.daemon_support = 0;
- ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
- wlfw_host_cap_resp_msg_v01_ei, &resp);
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
+ &resp);
if (ret < 0)
goto out;
+ if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
+ req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
+ else
+ req_ei = wlfw_host_cap_req_msg_v01_ei;
+
ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
QMI_WLFW_HOST_CAP_REQ_V01,
WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
- wlfw_host_cap_req_msg_v01_ei, &req);
+ req_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath10k_err(ar, "failed to send host capability request: %d\n", ret);
@@ -643,7 +671,7 @@ int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
wlfw_ini_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- ath10k_err(ar, "fail to send fw log reqest: %d\n", ret);
+ ath10k_err(ar, "failed to send fw log request: %d\n", ret);
goto out;
}
@@ -652,7 +680,7 @@ int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath10k_err(ar, "fw log request rejectedr: %d\n",
+ ath10k_err(ar, "fw log request rejected: %d\n",
resp.resp.error);
ret = -EINVAL;
goto out;
@@ -671,6 +699,7 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_ind_register_resp_msg_v01 resp = {};
struct wlfw_ind_register_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
@@ -681,6 +710,11 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
req.msa_ready_enable_valid = 1;
req.msa_ready_enable = 1;
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_enable_valid = 1;
+ req.xo_cal_enable = 1;
+ }
+
ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_ind_register_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -739,6 +773,13 @@ static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
if (ret)
return;
+ /*
+ * HACK: sleep for a while inbetween receiving the msa info response
+ * and the XPU update to prevent SDM845 from crashing due to a security
+ * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
+ */
+ msleep(20);
+
ret = ath10k_qmi_setup_msa_permissions(qmi);
if (ret)
return;
@@ -795,9 +836,13 @@ ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
{
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
ath10k_qmi_remove_msa_permission(qmi);
ath10k_core_free_board_files(ar);
+ if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
+ ath10k_snoc_fw_crashed_dump(ar);
+
ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
}
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
index 1fe05c6218c3..86fcf4e1de5f 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{}
};
+struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {}
+};
+
struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
index bca1186e1560..4d107e1364a8 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 {
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 9870d2d095c8..120200a93bcc 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2086,9 +2086,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
- /* TODO: remove this once SDIO support is fully implemented */
- ath10k_warn(ar, "WARNING: ath10k SDIO support is work-in-progress, problems may arise!\n");
-
return 0;
err_free_wq:
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index b491361e6ed4..16177497bba7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -9,9 +9,11 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include "ce.h"
+#include "coredump.h"
#include "debug.h"
#include "hif.h"
#include "htc.h"
@@ -36,15 +38,15 @@ static char *const ce_name[] = {
"WLAN_CE_11",
};
-static struct ath10k_vreg_info vreg_cfg[] = {
- {NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
- {NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
- {NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
- {NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
+static const char * const ath10k_regulators[] = {
+ "vdd-0.8-cx-mx",
+ "vdd-1.8-xo",
+ "vdd-1.3-rfa",
+ "vdd-3.3-ch0",
};
-static struct ath10k_clk_info clk_cfg[] = {
- {NULL, "cxo_ref_clk_pin", 0, false},
+static const char * const ath10k_clocks[] = {
+ "cxo_ref_clk_pin",
};
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -976,8 +978,7 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar,
sizeof(struct ath10k_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
- cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
- sizeof(struct ath10k_shadow_reg_cfg);
+ cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
@@ -1257,10 +1258,29 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
ar_snoc->ce_irqs[i].irq_line = res->start;
}
+ ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
+ &ar_snoc->xo_cal_data);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
+ if (ret == 0) {
+ ar_snoc->xo_cal_supported = true;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
+ ar_snoc->xo_cal_data);
+ }
+ ret = 0;
+
out:
return ret;
}
+static void ath10k_snoc_quirks_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = &ar_snoc->dev->dev;
+
+ if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
+ set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
+}
+
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1337,296 +1357,102 @@ static void ath10k_snoc_release_resource(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
- struct ath10k_vreg_info *vreg_info)
-{
- struct regulator *reg;
- int ret = 0;
-
- reg = devm_regulator_get_optional(dev, vreg_info->name);
-
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
-
- if (ret == -EPROBE_DEFER) {
- ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
- vreg_info->name);
- return ret;
- }
- if (vreg_info->required) {
- ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- return ret;
- }
- ath10k_dbg(ar, ATH10K_DBG_SNOC,
- "Optional regulator %s doesn't exist: %d\n",
- vreg_info->name, ret);
- goto done;
- }
-
- vreg_info->reg = reg;
-
-done:
- ath10k_dbg(ar, ATH10K_DBG_SNOC,
- "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v,
- vreg_info->load_ua, vreg_info->settle_delay);
-
- return 0;
-}
-
-static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
- struct ath10k_clk_info *clk_info)
-{
- struct clk *handle;
- int ret = 0;
-
- handle = devm_clk_get(dev, clk_info->name);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- if (clk_info->required) {
- ath10k_err(ar, "snoc clock %s isn't available: %d\n",
- clk_info->name, ret);
- return ret;
- }
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
- clk_info->name,
- ret);
- return 0;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
- clk_info->name, clk_info->freq);
-
- clk_info->handle = handle;
-
- return ret;
-}
-
-static int __ath10k_snoc_vreg_on(struct ath10k *ar,
- struct ath10k_vreg_info *vreg_info)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
- vreg_info->name);
-
- ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
- vreg_info->max_v);
- if (ret) {
- ath10k_err(ar,
- "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v);
- return ret;
- }
-
- if (vreg_info->load_ua) {
- ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
- if (ret < 0) {
- ath10k_err(ar, "failed to set regulator %s load: %d\n",
- vreg_info->name, vreg_info->load_ua);
- goto err_set_load;
- }
- }
-
- ret = regulator_enable(vreg_info->reg);
- if (ret) {
- ath10k_err(ar, "failed to enable regulator %s\n",
- vreg_info->name);
- goto err_enable;
- }
-
- if (vreg_info->settle_delay)
- udelay(vreg_info->settle_delay);
-
- return 0;
-
-err_enable:
- regulator_set_load(vreg_info->reg, 0);
-err_set_load:
- regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
-
- return ret;
-}
-
-static int __ath10k_snoc_vreg_off(struct ath10k *ar,
- struct ath10k_vreg_info *vreg_info)
+static int ath10k_hw_power_on(struct ath10k *ar)
{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int ret;
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
- vreg_info->name);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
- ret = regulator_disable(vreg_info->reg);
+ ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
if (ret)
- ath10k_err(ar, "failed to disable regulator %s\n",
- vreg_info->name);
-
- ret = regulator_set_load(vreg_info->reg, 0);
- if (ret < 0)
- ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
+ return ret;
- ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
if (ret)
- ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
+ goto vreg_off;
return ret;
-}
-
-static int ath10k_snoc_vreg_on(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_vreg_info *vreg_info;
- int ret = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
- vreg_info = &ar_snoc->vreg[i];
-
- if (!vreg_info->reg)
- continue;
-
- ret = __ath10k_snoc_vreg_on(ar, vreg_info);
- if (ret)
- goto err_reg_config;
- }
-
- return 0;
-
-err_reg_config:
- for (i = i - 1; i >= 0; i--) {
- vreg_info = &ar_snoc->vreg[i];
-
- if (!vreg_info->reg)
- continue;
-
- __ath10k_snoc_vreg_off(ar, vreg_info);
- }
+vreg_off:
+ regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
return ret;
}
-static int ath10k_snoc_vreg_off(struct ath10k *ar)
+static int ath10k_hw_power_off(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_vreg_info *vreg_info;
- int ret = 0;
- int i;
-
- for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
- vreg_info = &ar_snoc->vreg[i];
- if (!vreg_info->reg)
- continue;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
- ret = __ath10k_snoc_vreg_off(ar, vreg_info);
- }
+ clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
- return ret;
+ return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
}
-static int ath10k_snoc_clk_init(struct ath10k *ar)
+static void ath10k_msa_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_clk_info *clk_info;
- int ret = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- clk_info = &ar_snoc->clk[i];
-
- if (!clk_info->handle)
- continue;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
- clk_info->name);
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ size_t buf_len;
+ u8 *buf;
- if (clk_info->freq) {
- ret = clk_set_rate(clk_info->handle, clk_info->freq);
-
- if (ret) {
- ath10k_err(ar, "failed to set clock %s freq %u\n",
- clk_info->name, clk_info->freq);
- goto err_clock_config;
- }
- }
-
- ret = clk_prepare_enable(clk_info->handle);
- if (ret) {
- ath10k_err(ar, "failed to enable clock %s\n",
- clk_info->name);
- goto err_clock_config;
- }
- }
-
- return 0;
-
-err_clock_config:
- for (i = i - 1; i >= 0; i--) {
- clk_info = &ar_snoc->clk[i];
-
- if (!clk_info->handle)
- continue;
-
- clk_disable_unprepare(clk_info->handle);
- }
+ if (!crash_data || !crash_data->ramdump_buf)
+ return;
- return ret;
-}
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
-static int ath10k_snoc_clk_deinit(struct ath10k *ar)
-{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_clk_info *clk_info;
- int i;
+ current_region = &mem_layout->region_table.regions[0];
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- clk_info = &ar_snoc->clk[i];
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+ memset(buf, 0, buf_len);
- if (!clk_info->handle)
- continue;
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
- clk_info->name);
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
+ hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
- clk_disable_unprepare(clk_info->handle);
+ if (current_region->len < ar_snoc->qmi->msa_mem_size) {
+ memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
+ ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
+ current_region->len, ar_snoc->qmi->msa_mem_size);
+ } else {
+ memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
}
-
- return 0;
}
-static int ath10k_hw_power_on(struct ath10k *ar)
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
{
- int ret;
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+ mutex_lock(&ar->dump_mutex);
- ret = ath10k_snoc_vreg_on(ar);
- if (ret)
- return ret;
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
- ret = ath10k_snoc_clk_init(ar);
- if (ret)
- goto vreg_off;
+ crash_data = ath10k_coredump_new(ar);
- return ret;
-
-vreg_off:
- ath10k_snoc_vreg_off(ar);
- return ret;
-}
-
-static int ath10k_hw_power_off(struct ath10k *ar)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
-
- ath10k_snoc_clk_deinit(ar);
-
- ret = ath10k_snoc_vreg_off(ar);
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
- return ret;
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_msa_dump_memory(ar, crash_data);
+ mutex_unlock(&ar->dump_mutex);
}
static const struct of_device_id ath10k_snoc_dt_match[] = {
@@ -1678,6 +1504,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar->ce_priv = &ar_snoc->ce;
msa_size = drv_data->msa_size;
+ ath10k_snoc_quirks_init(ar);
+
ret = ath10k_snoc_resource_init(ar);
if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
@@ -1695,20 +1523,37 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_release_resource;
}
- ar_snoc->vreg = vreg_cfg;
- for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
- ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
- if (ret)
- goto err_free_irq;
+ ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
+ ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
+ sizeof(*ar_snoc->vregs), GFP_KERNEL);
+ if (!ar_snoc->vregs) {
+ ret = -ENOMEM;
+ goto err_free_irq;
}
+ for (i = 0; i < ar_snoc->num_vregs; i++)
+ ar_snoc->vregs[i].supply = ath10k_regulators[i];
- ar_snoc->clk = clk_cfg;
- for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
- ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
- if (ret)
- goto err_free_irq;
+ ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
+ ar_snoc->vregs);
+ if (ret < 0)
+ goto err_free_irq;
+
+ ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
+ ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
+ sizeof(*ar_snoc->clks), GFP_KERNEL);
+ if (!ar_snoc->clks) {
+ ret = -ENOMEM;
+ goto err_free_irq;
}
+ for (i = 0; i < ar_snoc->num_clks; i++)
+ ar_snoc->clks[i].id = ath10k_clocks[i];
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
+ ar_snoc->clks);
+ if (ret)
+ goto err_free_irq;
+
ret = ath10k_hw_power_on(ar);
if (ret) {
ath10k_err(ar, "failed to power on device: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d62f53501fbb..c05df45a3945 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -42,29 +42,16 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
-struct ath10k_vreg_info {
- struct regulator *reg;
- const char *name;
- u32 min_v;
- u32 max_v;
- u32 load_ua;
- unsigned long settle_delay;
- bool required;
-};
-
-struct ath10k_clk_info {
- struct clk *handle;
- const char *name;
- u32 freq;
- bool required;
-};
-
enum ath10k_snoc_flags {
ATH10K_SNOC_FLAG_REGISTERED,
ATH10K_SNOC_FLAG_UNREGISTERING,
ATH10K_SNOC_FLAG_RECOVERY,
+ ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
};
+struct clk_bulk_data;
+struct regulator_bulk_data;
+
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
@@ -76,10 +63,14 @@ struct ath10k_snoc {
struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
struct ath10k_ce ce;
struct timer_list rx_post_retry;
- struct ath10k_vreg_info *vreg;
- struct ath10k_clk_info *clk;
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+ struct clk_bulk_data *clks;
+ size_t num_clks;
struct ath10k_qmi *qmi;
unsigned long flags;
+ bool xo_cal_supported;
+ u32 xo_cal_data;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -88,5 +79,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
}
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar);
#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 4102df016931..39abf8b12903 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -95,6 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index e1420f67f776..1e0343081be9 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
struct ath10k_urb_context *urb_context = NULL;
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return NULL;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context = list_first_entry(&pipe->urb_list_head,
@@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
{
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;
@@ -435,6 +443,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk transmit failed: %d\n", ret);
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
ret = -EINVAL;
goto err_free_urb_to_pipe;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 4d5d10c01064..69a1ec53df29 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -409,6 +409,49 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
return 0;
}
+static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct wmi_tlv_rfkill_state_change_ev *ev;
+ const void **tb;
+ bool radio;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar,
+ "failed to parse rfkill state change event: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
+ __le32_to_cpu(ev->gpio_pin_num),
+ __le32_to_cpu(ev->int_type),
+ __le32_to_cpu(ev->radio_state));
+
+ radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!radio)
+ ar->hw_rfkill_on = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* notify cfg80211 radio state change */
+ ath10k_mac_rfkill_enable_radio(ar, radio);
+ wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
+}
+
static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -629,6 +672,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TX_PAUSE_EVENTID:
ath10k_wmi_tlv_event_tx_pause(ar, skb);
break;
+ case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
+ ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
+ break;
case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_tlv_event_temperature(ar, skb);
break;
@@ -1201,17 +1247,21 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->abi.abi_ver0;
arg->sw_ver1 = ev->abi.abi_ver1;
arg->fw_build = ev->fw_build_vers;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_2ghz_chan = reg->low_2ghz_chan;
+ arg->high_2ghz_chan = reg->high_2ghz_chan;
arg->low_5ghz_chan = reg->low_5ghz_chan;
arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+ arg->sys_cap_info = ev->sys_cap_info;
ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
ath10k_wmi_tlv_parse_mem_reqs, arg);
@@ -1649,8 +1699,9 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
static void
ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
{
- struct host_memory_chunk *chunk;
+ struct host_memory_chunk_tlv *chunk;
struct wmi_tlv *tlv;
+ dma_addr_t paddr;
int i;
__le16 tlv_len, tlv_tag;
@@ -1666,6 +1717,12 @@ ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+ if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ ar->wmi.svc_map)) {
+ paddr = ar->wmi.mem_chunks[i].paddr;
+ chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
+ }
+
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
i,
@@ -1689,7 +1746,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
void *ptr;
chunks_len = ar->wmi.num_mem_chunks *
- (sizeof(struct host_memory_chunk) + sizeof(*tlv));
+ (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
len = (sizeof(*tlv) + sizeof(*cmd)) +
(sizeof(*tlv) + sizeof(*cfg)) +
(sizeof(*tlv) + chunks_len);
@@ -4204,6 +4261,26 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+};
+
+static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
+ .smps_state = WMI_TLV_PEER_SMPS_STATE,
+ .ampdu = WMI_TLV_PEER_AMPDU,
+ .authorize = WMI_TLV_PEER_AUTHORIZE,
+ .chan_width = WMI_TLV_PEER_CHAN_WIDTH,
+ .nss = WMI_TLV_PEER_NSS,
+ .use_4addr = WMI_TLV_PEER_USE_4ADDR,
+ .membership = WMI_TLV_PEER_MEMBERSHIP,
+ .user_pos = WMI_TLV_PEER_USERPOS,
+ .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
+ .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
+ .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
+ .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
+ .phymode = WMI_TLV_PEER_PHYMODE,
+ .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
+ .dummy_var = WMI_TLV_PEER_DUMMY_VAR,
};
static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -4394,6 +4471,7 @@ void ath10k_wmi_tlv_attach(struct ath10k *ar)
ar->wmi.cmd = &wmi_tlv_cmd_map;
ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.peer_param = &wmi_tlv_peer_param_map;
ar->wmi.ops = &wmi_tlv_ops;
ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 649b229a41e9..4972dc12991c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -7,6 +7,8 @@
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
+#include <linux/bitops.h>
+
#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_TLV_CMD_UNSUPPORTED 0
@@ -528,6 +530,24 @@ enum wmi_tlv_vdev_param {
WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
};
+enum wmi_tlv_peer_param {
+ WMI_TLV_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_TLV_PEER_AMPDU = 0x2,
+ WMI_TLV_PEER_AUTHORIZE = 0x3,
+ WMI_TLV_PEER_CHAN_WIDTH = 0x4,
+ WMI_TLV_PEER_NSS = 0x5,
+ WMI_TLV_PEER_USE_4ADDR = 0x6,
+ WMI_TLV_PEER_MEMBERSHIP = 0x7,
+ WMI_TLV_PEER_USERPOS = 0x8,
+ WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED = 0x9,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR = 0xa,
+ WMI_TLV_PEER_SET_HW_RETRY_CTS2S = 0xb,
+ WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH = 0xc,
+ WMI_TLV_PEER_PHYMODE = 0xd,
+ WMI_TLV_PEER_USE_FIXED_PWR = 0xe,
+ WMI_TLV_PEER_DUMMY_VAR = 0xff,
+};
+
enum wmi_tlv_peer_flags {
WMI_TLV_PEER_AUTH = 0x00000001,
WMI_TLV_PEER_QOS = 0x00000002,
@@ -1409,6 +1429,11 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
WMI_TLV_MAX_EXT_SERVICE = 256,
};
@@ -1588,6 +1613,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
WMI_TLV_MAX_SERVICE);
SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
@@ -1743,6 +1771,21 @@ struct wmi_tlv_resource_config {
__le32 host_capab;
} __packed;
+/* structure describing host memory chunk. */
+struct host_memory_chunk_tlv {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+
+ /* the physical address the memory chunk */
+ __le32 ptr;
+
+ /* size of the chunk */
+ __le32 size;
+
+ /* the upper 32 bit address valid only for more than 32 bit target */
+ __le32 ptr_high;
+} __packed;
+
struct wmi_tlv_init_cmd {
struct wmi_tlv_abi_version abi;
__le32 num_host_mem_chunks;
@@ -2235,6 +2278,31 @@ struct wmi_tlv_tdls_peer_event {
__le32 vdev_id;
} __packed;
+enum wmi_tlv_sys_cap_info_flags {
+ WMI_TLV_SYS_CAP_INFO_RXTX_LED = BIT(0),
+ WMI_TLV_SYS_CAP_INFO_RFKILL = BIT(1),
+};
+
+#define WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
+#define WMI_TLV_RFKILL_CFG_RADIO_LEVEL BIT(6)
+#define WMI_TLV_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
+
+enum wmi_tlv_rfkill_enable_radio {
+ WMI_TLV_RFKILL_ENABLE_RADIO_ON = 0,
+ WMI_TLV_RFKILL_ENABLE_RADIO_OFF = 1,
+};
+
+enum wmi_tlv_rfkill_radio_state {
+ WMI_TLV_RFKILL_RADIO_STATE_OFF = 1,
+ WMI_TLV_RFKILL_RADIO_STATE_ON = 2,
+};
+
+struct wmi_tlv_rfkill_state_change_ev {
+ __le32 gpio_pin_num;
+ __le32 int_type;
+ __le32 radio_state;
+};
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
enum wmi_nlo_auth_algorithm {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 4f707c6394bb..9f564e2b7a14 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -742,6 +742,19 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
};
+static struct wmi_peer_param_map wmi_peer_param_map = {
+ .smps_state = WMI_PEER_SMPS_STATE,
+ .ampdu = WMI_PEER_AMPDU,
+ .authorize = WMI_PEER_AUTHORIZE,
+ .chan_width = WMI_PEER_CHAN_WIDTH,
+ .nss = WMI_PEER_NSS,
+ .use_4addr = WMI_PEER_USE_4ADDR,
+ .use_fixed_power = WMI_PEER_USE_FIXED_PWR,
+ .debug = WMI_PEER_DEBUG,
+ .phymode = WMI_PEER_PHYMODE,
+ .dummy_var = WMI_PEER_DUMMY_VAR,
+};
+
/* MAIN WMI VDEV param map */
static struct wmi_vdev_param_map wmi_vdev_param_map = {
.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
@@ -4668,16 +4681,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
rate_code[i],
type);
@@ -4790,7 +4800,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_config_event *ev;
@@ -4806,6 +4816,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
+ rate_max, WMI_TPC_RATE_MAX);
+ rate_max = WMI_TPC_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
@@ -4822,8 +4839,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5018,16 +5035,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
rate_code[i],
type, pream_idx);
@@ -5043,7 +5057,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_final_table_event *ev;
@@ -5051,12 +5065,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
+ rate_max, WMI_TPC_FINAL_RATE_MAX);
+ rate_max = WMI_TPC_FINAL_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
- num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
-
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -5069,8 +5095,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5344,11 +5370,14 @@ ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->sw_version;
arg->sw_ver1 = ev->sw_version_1;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
@@ -5383,16 +5412,25 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
arg->sw_ver0 = ev->sw_version;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+ /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
+ * different values. We would need a translation to handle that,
+ * but as we don't currently need anything from sys_cap_info from
+ * WMI interface (only from WMI-TLV) safest it to skip it.
+ */
+
n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
ARRAY_SIZE(arg->mem_reqs));
for (i = 0; i < n; i++)
@@ -5432,6 +5470,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+ ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
ar->fw_version_major =
(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
@@ -5441,11 +5480,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->phy_capability = __le32_to_cpu(arg.phy_capab);
ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
+ ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
+ ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
+ ar->sys_cap_info);
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
@@ -5544,17 +5588,22 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
skip_mem_alloc:
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
__le32_to_cpu(arg.min_tx_power),
__le32_to_cpu(arg.max_tx_power),
__le32_to_cpu(arg.ht_cap),
__le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.vht_supp_mcs),
__le32_to_cpu(arg.sw_ver0),
__le32_to_cpu(arg.sw_ver1),
__le32_to_cpu(arg.fw_build),
__le32_to_cpu(arg.phy_capab),
__le32_to_cpu(arg.num_rf_chains),
__le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.low_2ghz_chan),
+ __le32_to_cpu(arg.high_2ghz_chan),
+ __le32_to_cpu(arg.low_5ghz_chan),
+ __le32_to_cpu(arg.high_5ghz_chan),
__le32_to_cpu(arg.num_mem_reqs));
dev_kfree_skb(skb);
@@ -5623,7 +5672,7 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
__le32_to_cpu(arg.sw_version),
__le32_to_cpu(arg.abi_version),
arg.mac_addr,
@@ -9332,6 +9381,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.cmd = &wmi_10_4_cmd_map;
ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9340,6 +9390,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_2_4_ops;
ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9348,6 +9399,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_2_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9356,6 +9408,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_10_1_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9364,6 +9417,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.ops = &wmi_ops;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
ar->wmi.peer_flags = &wmi_peer_flags_map;
ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
@@ -9440,7 +9494,5 @@ void ath10k_wmi_detach(struct ath10k *ar)
}
cancel_work_sync(&ar->svc_rdy_work);
-
- if (ar->svc_rdy_skb)
- dev_kfree_skb(ar->svc_rdy_skb);
+ dev_kfree_skb(ar->svc_rdy_skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index e80dbe7e8f4c..74adce1dd3a9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -202,6 +202,7 @@ enum wmi_service {
WMI_SERVICE_REPORT_AIRTIME,
WMI_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_TX_PWR_PER_PEER,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
/* Remember to add the new value to wmi_service_name()! */
@@ -496,6 +497,7 @@ static inline char *wmi_service_name(enum wmi_service service_id)
SVCSTR(WMI_SERVICE_REPORT_AIRTIME);
SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
+ SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS);
case WMI_SERVICE_MAX:
return NULL;
@@ -3786,6 +3788,8 @@ struct wmi_pdev_param_map {
u32 arp_srcaddr;
u32 arp_dstaddr;
u32 enable_btcoex;
+ u32 rfkill_config;
+ u32 rfkill_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -5071,6 +5075,25 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
+struct wmi_peer_param_map {
+ u32 smps_state;
+ u32 ampdu;
+ u32 authorize;
+ u32 chan_width;
+ u32 nss;
+ u32 use_4addr;
+ u32 membership;
+ u32 use_fixed_power;
+ u32 user_pos;
+ u32 crit_proto_hint_enabled;
+ u32 tx_fail_cnt_thr;
+ u32 set_hw_retry_cts2s;
+ u32 ibss_atim_win_len;
+ u32 debug;
+ u32 phymode;
+ u32 dummy_var;
+};
+
struct wmi_vdev_param_map {
u32 rts_threshold;
u32 fragmentation_threshold;
@@ -6842,6 +6865,7 @@ struct wmi_svc_rdy_ev_arg {
__le32 max_tx_power;
__le32 ht_cap;
__le32 vht_cap;
+ __le32 vht_supp_mcs;
__le32 sw_ver0;
__le32 sw_ver1;
__le32 fw_build;
@@ -6849,8 +6873,11 @@ struct wmi_svc_rdy_ev_arg {
__le32 num_rf_chains;
__le32 eeprom_rd;
__le32 num_mem_reqs;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
__le32 low_5ghz_chan;
__le32 high_5ghz_chan;
+ __le32 sys_cap_info;
const __le32 *service_map;
size_t service_map_len;
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 94d34ee02265..307f1fea0a88 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1707,7 +1707,7 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 offset;
u16 val;
- int ret = 0, i;
+ int i;
offset = AR5K_EEPROM_CTL(ee->ee_version) +
AR5K_EEPROM_N_CTLS(ee->ee_version);
@@ -1730,7 +1730,7 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
}
}
- return ret;
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index d5ee32ce9eb3..43b4ae86e5fb 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -300,8 +300,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 2382c6c46851..6885d2ded53a 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -3650,7 +3650,7 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
if (wait)
return -EINVAL; /* Offload for wait not supported */
- buf = kmalloc(data_len, GFP_KERNEL);
+ buf = kmemdup(data, data_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -3661,7 +3661,6 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
}
kfree(wmi->last_mgmt_tx_frame);
- memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
@@ -3689,7 +3688,7 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
if (wait)
return -EINVAL; /* Offload for wait not supported */
- buf = kmalloc(data_len, GFP_KERNEL);
+ buf = kmemdup(data, data_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -3700,7 +3699,6 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
}
kfree(wmi->last_mgmt_tx_frame);
- memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index c99f42284465..78620c6b64a2 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -144,13 +144,13 @@ config ATH9K_RFKILL
a platform that can toggle the RF-Kill GPIO.
config ATH9K_CHANNEL_CONTEXT
- bool "Channel Context support"
- depends on ATH9K
- default n
- ---help---
- This option enables channel context support in ath9k, which is needed
- for multi-channel concurrency. Enable this if P2P PowerSave support
- is required.
+ bool "Channel Context support"
+ depends on ATH9K
+ default n
+ ---help---
+ This option enables channel context support in ath9k, which is needed
+ for multi-channel concurrency. Enable this if P2P PowerSave support
+ is required.
config ATH9K_PCOEM
bool "Atheros ath9k support for PC OEM cards" if EXPERT
@@ -162,32 +162,32 @@ config ATH9K_PCI_NO_EEPROM
depends on ATH9K_PCI
default n
help
- This separate driver provides a loader in order to support the
- AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have
- their initialization data (which contains the real PCI Device ID
- that ath9k will need) stored together with the calibration data out
- of reach for the ath9k chip.
+ This separate driver provides a loader in order to support the
+ AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have
+ their initialization data (which contains the real PCI Device ID
+ that ath9k will need) stored together with the calibration data out
+ of reach for the ath9k chip.
- These devices are usually various network appliances, routers or
- access Points and such.
+ These devices are usually various network appliances, routers or
+ access Points and such.
- If unsure say N.
+ If unsure say N.
config ATH9K_HTC
- tristate "Atheros HTC based wireless cards support"
- depends on USB && MAC80211
- select ATH9K_HW
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
- select ATH9K_COMMON
- ---help---
- Support for Atheros HTC based cards.
- Chipsets supported: AR9271
-
- For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
-
- The built module will be ath9k_htc.
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
config ATH9K_HTC_DEBUGFS
bool "Atheros ath9k_htc debugging"
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 2b29bf4730f6..b4885a700296 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
{
- u32 data, ko, kg;
+ u32 data = 0, ko, kg;
if (!AR_SREV_9462_20_OR_LATER(ah))
return;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 2fe12b0de5b4..42f00a2a8c80 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1037,7 +1037,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
}
/*
- * Configire PCIE after Ini init. SERDES values now come from ini file
+ * Configure PCIE after Ini init. SERDES values now come from ini file
* This enables PCIe low power mode.
*/
array = power_off ? &ah->iniPcieSerdes :
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 159490f5a111..956fa7828d0c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -12,7 +12,6 @@
* initialize the chip when the user-space is ready to extract the init code.
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/completion.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index a82ad739ab80..791f6633667c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1674,7 +1674,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
if (!ret)
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 4e8e80ac8341..9cec5c216e1f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
+ __be16 rs_datalen;
+ bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
rxstatus = (struct ath_htc_rx_status *)skb->data;
- if (be16_to_cpu(rxstatus->rs_datalen) -
- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
+ if (unlikely(rs_datalen -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
ath_err(common,
"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
- rxstatus->rs_datalen, skb->len);
+ rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
+ /*
+ * Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
+ ath_warn(common,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
@@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* Process PHY errors and return so that the packet
* can be dropped.
*/
- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ if (unlikely(is_phyerr)) {
/* TODO: Not using DFS processing now. */
if (ath_cmn_process_fft(&priv->spec_priv, hdr,
&rx_stats, rx_status->mactime)) {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 34121fbf32e3..0548aa3702e3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1921,7 +1921,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 92b2dd396436..f3461b193c7a 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -1021,13 +1021,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
static int ath_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(device);
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
if (test_bit(ATH_OP_WOW_ENABLED, &common->op_flags)) {
- dev_info(&pdev->dev, "WOW is enabled, bypassing PCI suspend\n");
+ dev_info(device, "WOW is enabled, bypassing PCI suspend\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 40a8054f8aa6..5914926a5c5b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1449,8 +1449,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
rcu_assign_pointer(sta_info->agg[tid], tid_info);
spin_unlock_bh(&ar->tx_ampdu_list_lock);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index e25bfdf78c2e..20f4f8ea9f89 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -33,33 +33,33 @@ static int __ath_regd_init(struct ath_regulatory *reg);
*/
/* Only these channels all allow active scan on all world regulatory domains */
-#define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+#define ATH_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
/* We enable active scan on these a case by case basis by regulatory domain */
-#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+#define ATH_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
+#define ATH_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
NL80211_RRF_NO_IR | \
NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
-#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
+#define ATH_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
+#define ATH_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
+#define ATH_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
-#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
- ATH9K_2GHZ_CH12_13, \
- ATH9K_2GHZ_CH14
+#define ATH_2GHZ_ALL ATH_2GHZ_CH01_11, \
+ ATH_2GHZ_CH12_13, \
+ ATH_2GHZ_CH14
-#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
- ATH9K_5GHZ_5470_5850
+#define ATH_5GHZ_ALL ATH_5GHZ_5150_5350, \
+ ATH_5GHZ_5470_5850
/* This one skips what we call "mid band" */
-#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
- ATH9K_5GHZ_5725_5850
+#define ATH_5GHZ_NO_MIDBAND ATH_5GHZ_5150_5350, \
+ ATH_5GHZ_5725_5850
/* Can be used for:
* 0x60, 0x61, 0x62 */
@@ -67,8 +67,8 @@ static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = {
.n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_ALL,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_ALL,
+ ATH_5GHZ_ALL,
}
};
@@ -77,9 +77,9 @@ static const struct ieee80211_regdomain ath_world_regdom_63_65 = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_2GHZ_CH12_13,
- ATH9K_5GHZ_NO_MIDBAND,
+ ATH_2GHZ_CH01_11,
+ ATH_2GHZ_CH12_13,
+ ATH_5GHZ_NO_MIDBAND,
}
};
@@ -88,8 +88,8 @@ static const struct ieee80211_regdomain ath_world_regdom_64 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_5GHZ_NO_MIDBAND,
+ ATH_2GHZ_CH01_11,
+ ATH_5GHZ_NO_MIDBAND,
}
};
@@ -98,8 +98,8 @@ static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_CH01_11,
+ ATH_5GHZ_ALL,
}
};
@@ -108,9 +108,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
- ATH9K_2GHZ_CH01_11,
- ATH9K_2GHZ_CH12_13,
- ATH9K_5GHZ_ALL,
+ ATH_2GHZ_CH01_11,
+ ATH_2GHZ_CH12_13,
+ ATH_5GHZ_ALL,
}
};
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 8abda2760e04..6ba0fd57c951 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2091,7 +2091,7 @@ struct wcn36xx_hal_set_bss_key_rsp_msg {
/*
* This is used configure the key information on a given station.
* When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
- * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a preconfigured key from a BSS the station associated with; otherwise
* a new key descriptor is created based on the key field.
*/
struct wcn36xx_hal_set_sta_key_req_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 79998a3ddb7a..c30fdd0cbf1e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -935,8 +935,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
out:
mutex_unlock(&wcn->conf_mutex);
-
- return;
}
/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
@@ -1084,6 +1082,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
+ int ret = 0;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@@ -1106,7 +1105,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
spin_unlock_bh(&sta_priv->ampdu_lock);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
spin_lock_bh(&sta_priv->ampdu_lock);
@@ -1131,7 +1130,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
mutex_unlock(&wcn->conf_mutex);
- return 0;
+ return ret;
}
static const struct ieee80211_ops wcn36xx_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index d32c1f4e533a..a8a43c25f843 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains the definitions for the boot loader
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c70854ea5634..7d6f14420855 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index a9befb971cc4..396c94c53702 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 304b4d4e506a..11d0c79e9056 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index a04c87ffd37b..912c4eaf017b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 3e2bbbceca06..6d3413a44b05 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/firmware.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index fa3164765b20..540fa1607794 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL_FW_H__
#define __WIL_FW_H__
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 94ebfa338e3f..fbc84c03406b 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Algorithmic part of the firmware download.
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index b00a13d6d530..b1480b41cd3a 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 9b72202eeadc..06091d8a9e23 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index a87bb84a8286..07b4a252a23c 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index db087ea58ddf..f26bf046d889 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 18dd8b246022..c174323c5c0b 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -629,8 +618,7 @@ static int __maybe_unused wil6210_pm_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
wil_dbg_pm(wil, "Runtime idle\n");
@@ -644,8 +632,7 @@ static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 56143e7670ed..ed4df561e5c5 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 4b7ac14fc2a7..9b4ca6b256d2 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
index 92b8c4d84a6a..b3d79eb50a43 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.h
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: ISC */
+/* Copyright (c) 2012-2015 Qualcomm Atheros, Inc. */
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 13246d216803..d385bc03033a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
index cd2534b9c5aa..6909e989baec 100644
--- a/drivers/net/wireless/ath/wil6210/trace.c
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 36ebfcf9ef30..11c989e95880 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 598c1fba9dac..8ebc6d59aa74 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5120475b0cd7..1f4c8ec75be8 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_H
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 04d576deae72..778b63be6a9a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index 136c51c338cf..c744c65225da 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_EDMA_H
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 0783c7963621..97626bfd4dac 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL6210_H__
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 772cb00c2002..1332eb8c831f 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 4eed05bddb60..10e10dc9fedf 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index bca090611477..5ff66202238d 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __WIL_PLATFORM_H__
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 153b84447e40..7a0d934eb271 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/moduleparam.h>
@@ -2505,7 +2494,8 @@ int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie)
cmd->mgmt_frm_type = type;
/* BUG: FW API define ieLen as u8. Will fix FW */
cmd->ie_len = cpu_to_le16(ie_len);
- memcpy(cmd->ie_info, ie, ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len);
kfree(cmd);
out:
@@ -2541,7 +2531,8 @@ int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
}
cmd->ie_len = cpu_to_le16(ie_len);
- memcpy(cmd->ie_info, ie, ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
kfree(cmd);
@@ -2715,7 +2706,7 @@ int wmi_get_all_temperatures(struct wil6210_priv *wil,
return rc;
if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
- wil_err(wil, "Failed geting TEMP_SENSE_ALL\n");
+ wil_err(wil, "Failed getting TEMP_SENSE_ALL\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index a2f7034489ae..6bd4ccee28ab 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
diff --git a/drivers/net/wireless/atmel/Kconfig b/drivers/net/wireless/atmel/Kconfig
index 4c0556b3a5ba..c2142c70f25d 100644
--- a/drivers/net/wireless/atmel/Kconfig
+++ b/drivers/net/wireless/atmel/Kconfig
@@ -13,29 +13,29 @@ config WLAN_VENDOR_ATMEL
if WLAN_VENDOR_ATMEL
config ATMEL
- tristate "Atmel at76c50x chipset 802.11b support"
- depends on CFG80211 && (PCI || PCMCIA)
- select WIRELESS_EXT
- select WEXT_PRIV
- select FW_LOADER
- select CRC32
- ---help---
- A driver 802.11b wireless cards based on the Atmel fast-vnet
- chips. This driver supports standard Linux wireless extensions.
-
- Many cards based on this chipset do not have flash memory
- and need their firmware loaded at start-up. If yours is
- one of these, you will need to provide a firmware image
- to be loaded into the card by the driver. The Atmel
- firmware package can be downloaded from
- <http://www.thekelleys.org.uk/atmel>
+ tristate "Atmel at76c50x chipset 802.11b support"
+ depends on CFG80211 && (PCI || PCMCIA)
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select FW_LOADER
+ select CRC32
+ ---help---
+ A driver 802.11b wireless cards based on the Atmel fast-vnet
+ chips. This driver supports standard Linux wireless extensions.
+
+ Many cards based on this chipset do not have flash memory
+ and need their firmware loaded at start-up. If yours is
+ one of these, you will need to provide a firmware image
+ to be loaded into the card by the driver. The Atmel
+ firmware package can be downloaded from
+ <http://www.thekelleys.org.uk/atmel>
config PCI_ATMEL
- tristate "Atmel at76c506 PCI cards"
- depends on ATMEL && PCI
- ---help---
- Enable support for PCI and mini-PCI cards containing the
- Atmel at76c506 chip.
+ tristate "Atmel at76c506 PCI cards"
+ depends on ATMEL && PCI
+ ---help---
+ Enable support for PCI and mini-PCI cards containing the
+ Atmel at76c506 chip.
config PCMCIA_ATMEL
tristate "Atmel at76c502/at76c504 PCMCIA cards"
diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
index 7afc9c5329fb..368eebefa741 100644
--- a/drivers/net/wireless/atmel/atmel_cs.c
+++ b/drivers/net/wireless/atmel/atmel_cs.c
@@ -117,11 +117,9 @@ static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int atmel_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
const struct pcmcia_device_id *did;
- dev = link->priv;
did = dev_get_drvdata(&link->dev);
dev_dbg(&link->dev, "atmel_config\n");
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 31bf71a80c26..9733c64bf978 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1400,7 +1400,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
- dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1566,7 +1566,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
- dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ dev->wl->tx_queue_stopped[ring->queue_prio] = false;
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index b85603e91c7a..39da1a4c30ac 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3600,7 +3600,7 @@ static void b43_tx_work(struct work_struct *work)
else
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
- wl->tx_queue_stopped[queue_num] = 1;
+ wl->tx_queue_stopped[queue_num] = true;
ieee80211_stop_queue(wl->hw, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
@@ -3611,7 +3611,7 @@ static void b43_tx_work(struct work_struct *work)
}
if (!err)
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
#if B43_DEBUG
@@ -5603,7 +5603,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
/* Initialize queues and flags. */
for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
skb_queue_head_init(&wl->tx_queue[queue_num]);
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
snprintf(chip_name, ARRAY_SIZE(chip_name),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index fc12598b2dd3..96fd8e2bf773 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1108,7 +1108,8 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct sdio_func *func;
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
- mmc_pm_flag_t sdio_flags;
+ mmc_pm_flag_t pm_caps, sdio_flags;
+ int ret = 0;
func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
@@ -1119,19 +1120,33 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- brcmf_sdiod_freezer_on(sdiodev);
- brcmf_sdio_wd_timer(sdiodev->bus, 0);
+ pm_caps = sdio_get_host_pm_caps(func);
+
+ if (pm_caps & MMC_PM_KEEP_POWER) {
+ /* preserve card power during suspend */
+ brcmf_sdiod_freezer_on(sdiodev);
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+ sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
- sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->wowl_enabled) {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ } else {
+ /* power will be cut so remove device, probe again in resume */
+ brcmf_sdiod_intr_unregister(sdiodev);
+ ret = brcmf_sdiod_remove(sdiodev);
+ if (ret)
+ brcmf_err("Failed to remove device on suspend\n");
}
- if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
- brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
- return 0;
+
+ return ret;
}
static int brcmf_ops_sdio_resume(struct device *dev)
@@ -1139,13 +1154,23 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
+ mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
+ int ret = 0;
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- brcmf_sdiod_freezer_off(sdiodev);
- return 0;
+ if (!(pm_caps & MMC_PM_KEEP_POWER)) {
+ /* bus was powered off and device removed, probe again */
+ ret = brcmf_sdiod_probe(sdiodev);
+ if (ret)
+ brcmf_err("Failed to probe device on resume\n");
+ } else {
+ brcmf_sdiod_freezer_off(sdiodev);
+ }
+
+ return ret;
}
static const struct dev_pm_ops brcmf_sdio_pm_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index e3ebb7abbdae..5598bbd09b62 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1282,6 +1282,31 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
return err;
}
+static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
+ u16 pwd_len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, pwd_data, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
+}
+
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
@@ -1505,6 +1530,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+ val = WPA3_AUTH_SAE_PSK;
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
@@ -1537,6 +1564,10 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
val = 1;
brcmf_dbg(CONN, "shared key\n");
break;
+ case NL80211_AUTHTYPE_SAE:
+ val = 3;
+ brcmf_dbg(CONN, "SAE authentication\n");
+ break;
default:
val = 2;
brcmf_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type);
@@ -1647,6 +1678,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
u16 count;
profile->use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
+ profile->is_ft = false;
if (!sme->crypto.n_akm_suites)
return 0;
@@ -1691,11 +1723,23 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
break;
case WLAN_AKM_SUITE_FT_8021X:
val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
+ profile->is_ft = true;
if (sme->want_1x)
profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
break;
case WLAN_AKM_SUITE_FT_PSK:
val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
+ profile->is_ft = true;
+ break;
+ default:
+ bphy_err(drvr, "invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
+ }
+ } else if (val & WPA3_AUTH_SAE_PSK) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_SAE:
+ val = WPA3_AUTH_SAE_PSK;
break;
default:
bphy_err(drvr, "invalid cipher group (%d)\n",
@@ -1773,7 +1817,8 @@ brcmf_set_sharedkey(struct net_device *ndev,
brcmf_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise);
- if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
+ if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3))
return 0;
if (!(sec->cipher_pairwise &
@@ -1980,7 +2025,13 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- if (sme->crypto.psk) {
+ if (sme->crypto.sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
+ }
+
+ if (sme->crypto.psk &&
+ profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) {
if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) {
err = -EINVAL;
goto done;
@@ -1998,12 +2049,23 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
}
}
- if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK) {
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
- if (err)
+ else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) {
+ /* clean up user-space RSNE */
+ if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) {
+ bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
+ }
+ err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
+ sme->crypto.sae_pwd_len);
+ if (!err && sme->crypto.psk)
+ err = brcmf_set_pmk(ifp, sme->crypto.psk,
+ BRCMF_WSEC_MAX_PSK_LEN);
}
+ if (err)
+ goto done;
/* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
@@ -5359,7 +5421,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
brcmf_dbg(CONN, "Processing set ssid\n");
memcpy(vif->profile.bssid, e->addr, ETH_ALEN);
- if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_PSK)
+ if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_PSK &&
+ vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_SAE)
return true;
set_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
@@ -5554,6 +5617,11 @@ done:
cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
brcmf_dbg(CONN, "Report roaming result\n");
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X && profile->is_ft) {
+ cfg80211_port_authorized(ndev, profile->bssid, GFP_KERNEL);
+ brcmf_dbg(CONN, "Report port authorized\n");
+ }
+
set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
brcmf_dbg(TRACE, "Exit\n");
return err;
@@ -6664,6 +6732,9 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD);
}
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 14d5bbad1db1..6ce48f6275a4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -107,7 +107,8 @@ struct brcmf_cfg80211_security {
enum brcmf_profile_fwsup {
BRCMF_PROFILE_FWSUP_NONE,
BRCMF_PROFILE_FWSUP_PSK,
- BRCMF_PROFILE_FWSUP_1X
+ BRCMF_PROFILE_FWSUP_1X,
+ BRCMF_PROFILE_FWSUP_SAE
};
/**
@@ -122,6 +123,7 @@ struct brcmf_cfg80211_profile {
struct brcmf_cfg80211_security sec;
struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
enum brcmf_profile_fwsup use_fwsup;
+ bool is_ft;
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index dd586a96b57a..a795d781b4c5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -778,7 +778,6 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
{
u8 desc;
u32 val, szdesc;
- u8 mpnum = 0;
u8 stype, sztype, wraptype;
*regbase = 0;
@@ -786,7 +785,6 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
if (desc == DMP_DESC_MASTER_PORT) {
- mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
wraptype = DMP_SLAVE_TYPE_MWRAP;
} else if (desc == DMP_DESC_ADDRESS) {
/* revert erom address */
@@ -854,7 +852,7 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
u8 desc_type = 0;
u32 val;
u16 id;
- u8 nmp, nsp, nmw, nsw, rev;
+ u8 nmw, nsw, rev;
u32 base, wrap;
int err;
@@ -880,8 +878,6 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
return -EFAULT;
/* only look at cores with master port(s) */
- nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
- nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 406b367c284c..85cf96461dde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1350,6 +1350,11 @@ void brcmf_detach(struct device *dev)
brcmf_fweh_detach(drvr);
brcmf_proto_detach(drvr);
+ if (drvr->mon_if) {
+ brcmf_net_detach(drvr->mon_if->ndev, false);
+ drvr->mon_if = NULL;
+ }
+
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
if (drvr->iflist[i])
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 2c3526aeca6f..1c9c74cc958e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -39,7 +39,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
- { BRCMF_FEAT_DOT11H, "802.11h" }
+ { BRCMF_FEAT_DOT11H, "802.11h" },
+ { BRCMF_FEAT_SAE, "sae" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 736a8179f62f..280a1f6412d4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -26,6 +26,7 @@
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
+ * SAE: simultaneous authentication of equals
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -45,7 +46,8 @@
BRCMF_FEAT_DEF(MONITOR) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
- BRCMF_FEAT_DEF(DOT11H)
+ BRCMF_FEAT_DEF(DOT11H) \
+ BRCMF_FEAT_DEF(SAE)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 37c512036e0e..de0ef1b545c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -61,6 +61,8 @@
#define BRCMF_WSEC_MAX_PSK_LEN 32
#define BRCMF_WSEC_PASSPHRASE BIT(0)
+#define BRCMF_WSEC_MAX_SAE_PASSWORD_LEN 128
+
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
@@ -518,6 +520,17 @@ struct brcmf_wsec_pmk_le {
u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
};
+/**
+ * struct brcmf_wsec_sae_pwd_le - firmware SAE password material.
+ *
+ * @key_len: number of octets in key materials.
+ * @key: SAE password material.
+ */
+struct brcmf_wsec_sae_pwd_le {
+ __le16 key_len;
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
+};
+
/* Used to get specific STA parameters */
struct brcmf_scb_val_le {
__le32 val;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 6c463475e90b..f64ce5074a55 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1024,8 +1024,6 @@ brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
- memset(ring, 0, size);
-
return (ring);
}
@@ -1427,6 +1425,8 @@ static int brcmf_pcie_reset(struct device *dev)
struct brcmf_fw_request *fwreq;
int err;
+ brcmf_pcie_intr_disable(devinfo);
+
brcmf_pcie_bus_console_read(devinfo, true);
brcmf_detach(dev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 14e530601ef3..fabfbb0b40b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -57,6 +57,10 @@ static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
mutex_lock(&pi->req_lock);
+ /* Nothing to do if we have no requests */
+ if (pi->n_reqs == 0)
+ goto done;
+
/* find request */
for (i = 0; i < pi->n_reqs; i++) {
if (pi->reqs[i]->reqid == reqid)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index db783e94f929..5a6d9c86552a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -496,13 +496,11 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
* table and override CDD later
*/
if (li_mimo == &locale_bn) {
- if (li_mimo == &locale_bn) {
- maxpwr20 = QDB(16);
- maxpwr40 = 0;
+ maxpwr20 = QDB(16);
+ maxpwr40 = 0;
- if (chan >= 3 && chan <= 11)
- maxpwr40 = QDB(16);
- }
+ if (chan >= 3 && chan <= 11)
+ maxpwr40 = QDB(16);
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_siso[i] = (u8) maxpwr20;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 6188275b17e5..8e8b685cfe09 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -850,8 +850,7 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
"START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 080e829da9b3..3f09d89ba922 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -838,9 +838,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
struct dma_pub *dma = NULL;
struct d11txh *txh = NULL;
struct scb *scb = NULL;
- bool free_pdu;
- int tx_rts, tx_frame_count, tx_rts_count;
- uint totlen, supr_status;
+ int tx_frame_count;
+ uint supr_status;
bool lastframe;
struct ieee80211_hdr *h;
u16 mcl;
@@ -917,11 +916,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
CHSPEC_CHANNEL(wlc->default_bss->chanspec));
}
- tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
tx_frame_count =
(txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
- tx_rts_count =
- (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
lastframe = !ieee80211_has_morefrags(h->frame_control);
@@ -989,9 +985,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = p->len;
- free_pdu = true;
-
if (lastframe) {
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
@@ -1816,8 +1809,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
udelay(2);
brcms_b_core_phy_clk(wlc_hw, ON);
- if (pih)
- wlc_phy_anacore(pih, ON);
+ wlc_phy_anacore(pih, ON);
}
/* switch to and initialize new band */
@@ -7384,9 +7376,7 @@ static void brcms_c_update_beacon_hw(struct brcms_c_info *wlc,
false, true);
/* mark beacon0 valid */
bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN1VLD);
- return;
}
- return;
}
/*
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index 7b31c212694d..7552bdb91991 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -231,6 +231,8 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WPA2_AUTH_FT 0x4000 /* Fast BSS Transition */
#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
+#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
+
#define DOT11_DEFAULT_RTS_LEN 2347
#define DOT11_DEFAULT_FRAG_LEN 2346
diff --git a/drivers/net/wireless/cisco/Kconfig b/drivers/net/wireless/cisco/Kconfig
index 01e173ede894..7a3b3bb2ce15 100644
--- a/drivers/net/wireless/cisco/Kconfig
+++ b/drivers/net/wireless/cisco/Kconfig
@@ -17,7 +17,7 @@ config AIRO
depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
select WIRELESS_EXT
select CRYPTO
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select WEXT_SPY
select WEXT_PRIV
---help---
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 8dfbaff2d1fe..c4c83ab60cbc 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5565,7 +5565,7 @@ static void shim__set_security(struct net_device *dev,
struct libipw_security *sec)
{
struct ipw2100_priv *priv = libipw_priv(dev);
- int i, force_update = 0;
+ int i;
mutex_lock(&priv->action_mutex);
if (!(priv->status & STATUS_INITIALIZED))
@@ -5605,7 +5605,6 @@ static void shim__set_security(struct net_device *dev,
priv->ieee->sec.flags |= SEC_ENABLED;
priv->ieee->sec.enabled = sec->enabled;
priv->status |= STATUS_SECURITY_UPDATED;
- force_update = 1;
}
if (sec->flags & SEC_ENCRYPT)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ed0f06532d5e..31e43fc1d12b 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -6788,9 +6788,6 @@ static int ipw_wx_set_mlme(struct net_device *dev,
{
struct ipw_priv *priv = libipw_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 34cfd8162855..0cb36d1b983a 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -999,13 +999,12 @@ static int libipw_read_qos_info_element(struct
/*
* Write QoS parameters from the ac parameters.
*/
-static int libipw_qos_convert_ac_to_parameters(struct
+static void libipw_qos_convert_ac_to_parameters(struct
libipw_qos_parameter_info
*param_elm, struct
libipw_qos_parameters
*qos_param)
{
- int rc = 0;
int i;
struct libipw_qos_ac_parameter *ac_params;
u32 txop;
@@ -1030,7 +1029,6 @@ static int libipw_qos_convert_ac_to_parameters(struct
txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
qos_param->tx_op_limit[i] = cpu_to_le16(txop);
}
- return rc;
}
/*
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 4fbcc7fba3cc..1168055da182 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2301,9 +2301,7 @@ __il3945_down(struct il_priv *il)
il3945_hw_txq_ctx_free(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
/* clear out any free frames */
@@ -3846,9 +3844,7 @@ il3945_pci_remove(struct pci_dev *pdev)
il_free_channel_map(il);
il_free_geos(il);
kfree(il->scan_cmd);
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
ieee80211_free_hw(il->hw);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index ffb705b18fb1..3664f56f8cbd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -2265,7 +2265,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
if (tid_data->tfds_in_queue == 0) {
D_HT("HW queue is empty\n");
tid_data->agg.state = IL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
D_HT("HW queue is NOT empty: %d packets in HW queue\n",
tid_data->tfds_in_queue);
@@ -3331,7 +3331,6 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
unsigned long flags;
- int ret = 0;
__le16 key_flags = 0;
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
@@ -3368,7 +3367,7 @@ il4965_set_tkip_dynamic_key_info(struct il_priv *il,
spin_unlock_irqrestore(&il->sta_lock, flags);
- return ret;
+ return 0;
}
void
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 73f7bbf742bc..d966b29b45ee 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1072,7 +1072,7 @@ EXPORT_SYMBOL(il_get_channel_info);
static void
il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
{
- const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+ static const __le32 interval[3][IL_POWER_VEC_SIZE] = {
SLP_VEC(2, 2, 4, 6, 0xFF),
SLP_VEC(2, 4, 7, 10, 10),
SLP_VEC(4, 7, 10, 10, 0xFF)
@@ -5182,8 +5182,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
il->timestamp = 0;
@@ -5302,10 +5301,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_irqsave(&il->lock, flags);
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = skb;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff41987a7e35..0aae3fa4128c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -14,7 +14,8 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
+iwlwifi-objs += fw/dbg.o
+iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 5e355c4957df..56dc335a788c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -54,9 +54,10 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 50
+#define IWL_22000_UCODE_API_MAX 52
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -137,7 +138,7 @@ static const struct iwl_base_params iwl_22000_base_params = {
.pcie_l1_allowed = true,
};
-static const struct iwl_base_params iwl_22560_base_params = {
+static const struct iwl_base_params iwl_ax210_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -183,33 +184,57 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.min_umac_error_event_table = 0x400000, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 60 * 1024, \
- .fw_mon_smem_write_ptr_addr = 0xa0c16c, \
- .fw_mon_smem_write_ptr_msk = 0xfffff, \
- .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
- .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
.trans.csr = &iwl_csr_v1, \
- .gp2_reg_addr = 0xa02c68
-
-#define IWL_DEVICE_22560 \
- IWL_DEVICE_22000_COMMON, \
- .trans.device_family = IWL_DEVICE_FAMILY_22560, \
- .trans.base_params = &iwl_22560_base_params, \
- .trans.csr = &iwl_csr_v2
+ .gp2_reg_addr = 0xa02c68, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = MON_BUFF_WRPTR_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ .cycle_cnt = { \
+ .addr = MON_BUFF_CYCLE_CNT_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ }
#define IWL_DEVICE_AX210 \
IWL_DEVICE_22000_COMMON, \
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_AX210, \
- .trans.base_params = &iwl_22560_base_params, \
+ .trans.base_params = &iwl_ax210_base_params, \
.trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 512
+ .min_256_ba_txq_size = 512, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = DBGC_DBGBUF_WRAP_AROUND, \
+ .mask = 0xffffffff, \
+ }, \
+ .cur_frag = { \
+ .addr = DBGC_CUR_DBGBUF_STATUS, \
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
+ }, \
+ }
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
@@ -292,39 +317,39 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
};
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e8372b67df03..e9155b9b5ee4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,6 +55,7 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "fw/file.h"
+#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 46
@@ -149,10 +150,26 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .fw_mon_smem_write_ptr_addr = 0xa0476c, \
- .fw_mon_smem_write_ptr_msk = 0xfffff, \
- .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
- .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
+ .mon_smem_regs = { \
+ .write_ptr = { \
+ .addr = LDBG_M2S_BUF_WPTR, \
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
+ }, \
+ .cycle_cnt = { \
+ .addr = LDBG_M2S_BUF_WRAP_CNT, \
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
+ }, \
+ }, \
+ .mon_dram_regs = { \
+ .write_ptr = { \
+ .addr = MON_BUFF_WRPTR_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ .cycle_cnt = { \
+ .addr = MON_BUFF_CYCLE_CNT_VER2, \
+ .mask = 0xffffffff, \
+ }, \
+ }
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index dd387aba3317..e8a4d604b910 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -171,6 +171,9 @@ void iwl_leds_init(struct iwl_priv *priv)
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
+ if (!priv->led.name)
+ return;
+
priv->led.brightness_set = iwl_led_brightness_set;
priv->led.blink_set = iwl_led_blink_set;
priv->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 74229fcb63a9..226165db7dfd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -851,7 +851,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
* Is there a need to switch between
* full concurrency and 3-wire?
*/
- if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
+ if (priv->bt_ci_compliance)
full_concurrent = true;
else
full_concurrent = false;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 3029e3f6de63..cd73fc5cfcbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -621,7 +621,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
tid_data->agg.ssn);
tid_data->agg.state = IWL_AGG_STARTING;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
"next_reclaimed = %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index c2db758b9d54..40fe2d667622 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -61,6 +61,7 @@
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
+#include "fw/runtime.h"
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
{
@@ -245,3 +246,289 @@ out_free:
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
+
+int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
+{
+ int i;
+
+ profile->enabled = enabled;
+
+ for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
+ if (table[i].type != ACPI_TYPE_INTEGER ||
+ table[i].integer.value > U8_MAX)
+ return -EINVAL;
+
+ profile->table[i] = table[i].integer.value;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_set_profile);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b)
+{
+ int i, j, idx;
+ int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
+
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
+ ACPI_SAR_TABLE_SIZE);
+
+ for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+ if (profs[i] > ACPI_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /* if one of the profiles is disabled, we fail all */
+ return -ENOENT;
+ }
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
+ idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
+ per_chain_restriction[i][j] =
+ cpu_to_le16(prof->table[idx]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->table[idx]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *table, *data;
+ bool enabled;
+ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled);
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ bool enabled;
+ int i, n_profiles, tbl_rev;
+ int ret = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+ int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
+ &fwrt->sar_profiles[i + 1],
+ enabled);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += ACPI_SAR_TABLE_SIZE;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int i, j, ret, tbl_rev;
+ int idx = 1;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ fwrt->geo_rev = tbl_rev;
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[idx++];
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > U8_MAX) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->geo_profiles[i].values[j] = entry->integer.value;
+ }
+ }
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_geo_tx_power_profiles_resp *resp;
+ int ret;
+
+ resp = (void *)cmd->resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
+ ret = -EIO;
+ IWL_WARN(fwrt, "Invalid geographic profile idx (%d)\n", ret);
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
+
+void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table)
+{
+ int ret, i, j;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return;
+
+ ret = iwl_sar_get_wgds_table(fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ return;
+ }
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
+
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+ (struct iwl_per_chain_offset *)&table[i];
+
+ for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ u8 *value;
+
+ value = &fwrt->geo_profiles[i].values[j *
+ ACPI_GEO_PER_CHAIN_SIZE];
+ chain[j].max_tx_power = cpu_to_le16(value[0]);
+ chain[j].chain_a = value[1];
+ chain[j].chain_b = value[2];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j, value[1], value[2], value[0]);
+ }
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 6cb2d1f5efea..4a6e8262974b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -61,6 +61,12 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
#define ACPI_WRDS_METHOD "WRDS"
#define ACPI_EWRD_METHOD "EWRD"
@@ -104,9 +110,21 @@
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
+struct iwl_sar_profile {
+ bool enabled;
+ u8 table[ACPI_SAR_TABLE_SIZE];
+};
+
+struct iwl_geo_profile {
+ u8 values[ACPI_GEO_TABLE_SIZE];
+};
+
#ifdef CONFIG_ACPI
+struct iwl_fw_runtime;
+
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev);
@@ -134,6 +152,27 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev);
*/
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
+int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd);
+
+void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -164,5 +203,50 @@ static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
return -ENOENT;
}
+static inline int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ int prof_a, int prof_b)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
+static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
+ struct iwl_host_cmd *cmd)
+{
+ return -ENOENT;
+}
+
+static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset_group *table)
+{
+}
+
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 4c3219e7beb6..3643b6ba6385 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -65,6 +65,14 @@
#define __iwl_fw_api_d3_h__
/**
+ * enum iwl_d0i3_flags - d0i3 flags
+ * @IWL_D0I3_RESET_REQUIRE: FW require reset upon resume
+ */
+enum iwl_d0i3_flags {
+ IWL_D0I3_RESET_REQUIRE = BIT(0),
+};
+
+/**
* enum iwl_d3_wakeup_flags - D3 manager wakeup flags
* @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index ba586f148c14..b9d7ed93311c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -60,52 +60,10 @@
#include <linux/bitops.h>
-/**
- * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
- *
- * @tlv_version: version info
- * @apply_point: &enum iwl_fw_ini_apply_point
- * @data: TLV data followed
- */
-struct iwl_fw_ini_header {
- __le32 tlv_version;
- __le32 apply_point;
- u8 data[];
-} __packed; /* FW_DEBUG_TLV_HEADER_S */
-
-/**
- * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
- * buffer allocation TLV - for debug
- *
- * @iwl_fw_ini_header: header
- * @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd
- * if needed (DBGC1/DBGC2/SDFX/...)
- * @buffer_location: type of iwl_fw_ini_buffer_location
- * @size: size in bytes
- * @max_fragments: the maximum allowed fragmentation in the desired memory
- * allocation above
- * @min_frag_size: the minimum allowed fragmentation size in bytes
- */
-struct iwl_fw_ini_allocation_tlv {
- struct iwl_fw_ini_header header;
- __le32 allocation_id;
- __le32 buffer_location;
- __le32 size;
- __le32 max_fragments;
- __le32 min_frag_size;
-} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
-
-/**
- * enum iwl_fw_ini_dbg_domain - debug domains
- * allows to send host cmd or collect memory region if a given domain is enabled
- *
- * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
- * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
- */
-enum iwl_fw_ini_dbg_domain {
- IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
- IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
-}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
+#define IWL_FW_INI_MAX_REGION_ID 64
+#define IWL_FW_INI_MAX_NAME 32
+#define IWL_FW_INI_MAX_CFG_NAME 64
+#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
/**
* struct iwl_fw_ini_hcmd
@@ -123,279 +81,198 @@ struct iwl_fw_ini_hcmd {
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
- * Generic Host command pass through TLV
- *
- * @header: header
- * @domain: send command only if the specific domain is enabled
- * &enum iwl_fw_ini_dbg_domain
- * @period_msec: period in which the hcmd will be sent to FW. Measured in msec
- * (0 = one time command).
- * @hcmd: a variable length host-command to be sent to apply the configuration.
+ * struct iwl_fw_ini_header - Common Header for all ini debug TLV's structures
+ *
+ * @version: TLV version
+ * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain
+ * @data: TLV data
*/
-struct iwl_fw_ini_hcmd_tlv {
- struct iwl_fw_ini_header header;
+struct iwl_fw_ini_header {
+ __le32 version;
__le32 domain;
- __le32 period_msec;
- struct iwl_fw_ini_hcmd hcmd;
-} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
+ u8 data[];
+} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
-#define IWL_FW_INI_MAX_REGION_ID 64
-#define IWL_FW_INI_MAX_NAME 32
+/**
+ * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses
+ *
+ * @size: size of each memory chunk
+ * @offset: offset to add to the base address of each chunk
+ */
+struct iwl_fw_ini_region_dev_addr {
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
+ * struct iwl_fw_ini_region_fifos - Configuration to read Tx/Rx fifos
*
- * @id_and_grp: id and group of dhc response.
- * @desc: dhc response descriptor.
+ * @fid: fifos ids array. Used to determine what fifos to collect
+ * @hdr_only: if non zero, collect only the registers
+ * @offset: offset to add to the registers addresses
*/
-struct iwl_fw_ini_region_cfg_dhc {
- __le32 id_and_grp;
- __le32 desc;
-} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
+struct iwl_fw_ini_region_fifos {
+ __le32 fid[2];
+ __le32 hdr_only;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_FIFOS_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
+ * struct iwl_fw_ini_region_err_table - error table region data
+ *
+ * Configuration to read Umac/Lmac error table
*
- * @num_of_range: the amount of ranges in the region
- * @range_data_size: size of the data to read per range, in bytes.
+ * @version: version of the error table
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
*/
-struct iwl_fw_ini_region_cfg_internal {
- __le32 num_of_ranges;
- __le32 range_data_size;
-} __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */
+struct iwl_fw_ini_region_err_table {
+ __le32 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
- *
- * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
- * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
- * It is unused for tx.
- * @num_of_registers: number of prph registers in the region, each register is
- * 4 bytes size.
- * @header_only: none zero value indicates that this region does not include
- * fifo data and includes only the given registers.
+ * struct iwl_fw_ini_region_internal_buffer - internal buffer region data
+ *
+ * Configuration to read internal monitor buffer
+ *
+ * @alloc_id: allocation id one of &enum iwl_fw_ini_allocation_id
+ * @base_addr: internal buffer base address
+ * @size: size internal buffer size
*/
-struct iwl_fw_ini_region_cfg_fifos {
- __le32 fid1;
- __le32 fid2;
- __le32 num_of_registers;
- __le32 header_only;
-} __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */
+struct iwl_fw_ini_region_internal_buffer {
+ __le32 alloc_id;
+ __le32 base_addr;
+ __le32 size;
+} __packed; /* FW_TLV_DEBUG_REGION_INTERNAL_BUFFER_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_cfg
- *
- * @region_id: ID of this dump configuration
- * @region_type: &enum iwl_fw_ini_region_type
- * @domain: dump this region only if the specific domain is enabled
- * &enum iwl_fw_ini_dbg_domain
- * @name_len: name length
- * @name: file name to use for this region
- * @internal: used in case the region uses internal memory.
- * @allocation_id: For DRAM type field substitutes for allocation_id
- * @fifos: used in case of fifos region.
- * @dhc_desc: dhc response descriptor.
- * @notif_id_and_grp: dump this region only if the specific notification
- * occurred.
- * @offset: offset to use for each memory base address
- * @start_addr: array of addresses.
+ * struct iwl_fw_ini_region_tlv - region TLV
+ *
+ * Configures parameters for region data collection
+ *
+ * @hdr: debug header
+ * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @type: region type. One of &enum iwl_fw_ini_region_type
+ * @name: region name
+ * @dev_addr: device address configuration. Used by
+ * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
+ * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
+ * &IWL_FW_INI_REGION_RXF
+ * @err_table: error table configuration. Used by
+ * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * @internal_buffer: internal monitor buffer configuration. Used by
+ * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
+ * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * @addrs: array of addresses attached to the end of the region tlv
*/
-struct iwl_fw_ini_region_cfg {
- __le32 region_id;
- __le32 region_type;
- __le32 domain;
- __le32 name_len;
+struct iwl_fw_ini_region_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 id;
+ __le32 type;
u8 name[IWL_FW_INI_MAX_NAME];
union {
- struct iwl_fw_ini_region_cfg_internal internal;
- __le32 allocation_id;
- struct iwl_fw_ini_region_cfg_fifos fifos;
- struct iwl_fw_ini_region_cfg_dhc dhc_desc;
- __le32 notif_id_and_grp;
- }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
- __le32 offset;
- __le32 start_addr[];
-} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
+ struct iwl_fw_ini_region_dev_addr dev_addr;
+ struct iwl_fw_ini_region_fifos fifos;
+ struct iwl_fw_ini_region_err_table err_table;
+ struct iwl_fw_ini_region_internal_buffer internal_buffer;
+ __le32 dram_alloc_id;
+ __le32 tlv_mask;
+ }; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
+ __le32 addrs[];
+} __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
- * defines memory regions to dump
+ * struct iwl_fw_ini_debug_info_tlv
+ *
+ * debug configuration name for a specific image
*
- * @header: header
- * @num_regions: how many different region section and IDs are coming next
- * @region_config: list of dump configurations
+ * @hdr: debug header
+ * @image_type: image type
+ * @debug_cfg_name: debug configuration name
*/
-struct iwl_fw_ini_region_tlv {
- struct iwl_fw_ini_header header;
- __le32 num_regions;
- struct iwl_fw_ini_region_cfg region_config[];
-} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
+struct iwl_fw_ini_debug_info_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 image_type;
+ u8 debug_cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed; /* FW_TLV_DEBUG_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_allocation_tlv - Allocates DRAM buffers
+ *
+ * @hdr: debug header
+ * @alloc_id: allocation id. One of &enum iwl_fw_ini_allocation_id
+ * @buf_location: buffer location. One of &enum iwl_fw_ini_buffer_location
+ * @req_size: requested buffer size
+ * @max_frags_num: maximum number of fragments
+ * @min_size: minimum buffer size
+ */
+struct iwl_fw_ini_allocation_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 alloc_id;
+ __le32 buf_location;
+ __le32 req_size;
+ __le32 max_frags_num;
+ __le32 min_size;
+} __packed; /* FW_TLV_DEBUG_BUFFER_ALLOCATION_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger
- *
- * @trigger_id: &enum iwl_fw_ini_trigger_id
- * @override_trig: determines how apply trigger in case a trigger with the
- * same id is already in use. Using the first 2 bytes:
- * Byte 0: if 0, override trigger configuration, otherwise use the
- * existing configuration.
- * Byte 1: if 0, override trigger regions, otherwise append regions to
- * existing trigger.
+ * struct iwl_fw_ini_trigger_tlv - trigger TLV
+ *
+ * Trigger that upon firing, determines what regions to collect
+ *
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @trigger_reason: trigger reason
+ * @apply_policy: uses &enum iwl_fw_ini_trigger_apply_policy
* @dump_delay: delay from trigger fire to dump, in usec
- * @occurrences: max amount of times to be fired
- * @reserved: to align to FW struct
+ * @occurrences: max trigger fire occurrences allowed
+ * @reserved: unused
* @ignore_consec: ignore consecutive triggers, in usec
- * @force_restart: force FW restart
+ * @reset_fw: if non zero, will reset and reload the FW
* @multi_dut: initiate debug dump data on several DUTs
- * @trigger_data: generic data to be utilized per trigger
- * @num_regions: number of dump regions defined for this trigger
- * @data: region IDs
+ * @regions_mask: mask of regions to collect
+ * @data: trigger data
*/
-struct iwl_fw_ini_trigger {
- __le32 trigger_id;
- __le32 override_trig;
+struct iwl_fw_ini_trigger_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 trigger_reason;
+ __le32 apply_policy;
__le32 dump_delay;
__le32 occurrences;
__le32 reserved;
__le32 ignore_consec;
- __le32 force_restart;
+ __le32 reset_fw;
__le32 multi_dut;
- __le32 trigger_data;
- __le32 num_regions;
+ __le64 regions_mask;
__le32 data[];
-} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
-
-/**
- * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
- * Triggers that hold memory regions to dump in case a trigger fires
- *
- * @header: header
- * @num_triggers: how many different triggers section and IDs are coming next
- * @trigger_config: list of trigger configurations
- */
-struct iwl_fw_ini_trigger_tlv {
- struct iwl_fw_ini_header header;
- __le32 num_triggers;
- struct iwl_fw_ini_trigger trigger_config[];
-} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
-
-#define IWL_FW_INI_MAX_IMG_NAME_LEN 32
-#define IWL_FW_INI_MAX_DBG_CFG_NAME_LEN 64
+} __packed; /* FW_TLV_DEBUG_TRIGGER_API_S_VER_1 */
/**
- * struct iwl_fw_ini_debug_info_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_INFO)
- *
- * holds image name and debug configuration name
+ * struct iwl_fw_ini_hcmd_tlv - Generic Host command pass through TLV
*
- * @header: header
- * @img_name_len: length of the image name string
- * @img_name: image name string
- * @dbg_cfg_name_len : length of the debug configuration name string
- * @dbg_cfg_name: debug configuration name string
- */
-struct iwl_fw_ini_debug_info_tlv {
- struct iwl_fw_ini_header header;
- __le32 img_name_len;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- __le32 dbg_cfg_name_len;
- u8 dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
-} __packed; /* FW_DEBUG_TLV_INFO_API_S_VER_1 */
-
-/**
- * enum iwl_fw_ini_trigger_id
- *
- * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
- * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
- * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
- * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
- * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
- * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
- * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
- * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
- * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
- * threshold was crossed
- * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
- * @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host
- * @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request
- * @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request
- * @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined
- * @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined
- * @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received
- * @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed
- * @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed
- * @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth
- * @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed
- * @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start
- * @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end
- * @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events
- * @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events
- * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined
- * @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined
- * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association
- * failed
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event
- * @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete
- * @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received
- * @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed
- * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @period_msec: interval at which the hcmd will be sent to the FW.
+ * Measured in msec (0 = one time command)
+ * @hcmd: a variable length host-command to be sent to apply the configuration
*/
-enum iwl_fw_ini_trigger_id {
- IWL_FW_TRIGGER_ID_INVALID = 0,
-
- /* Errors triggers */
- IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
- IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2,
- IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3,
-
- /* FW triggers */
- IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
- IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
-
- /* User trigger */
- IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
-
- /* periodic uses the data field for the interval time */
- IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
-
- /* Host triggers */
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
- IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
- IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
- IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
- IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
- IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
- IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
- IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
- IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
- IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
- IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
- IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
- IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
- IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
- IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
- IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
- IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
- IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
- IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
- IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
- IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
- IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
- IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
- IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
- IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
- IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
-
- IWL_FW_TRIGGER_ID_NUM,
-}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
+struct iwl_fw_ini_hcmd_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 period_msec;
+ struct iwl_fw_ini_hcmd hcmd;
+} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
* enum iwl_fw_ini_allocation_id
@@ -404,9 +281,6 @@ enum iwl_fw_ini_trigger_id {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
- * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module
- * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps
- * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@@ -414,9 +288,6 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
- IWL_FW_INI_ALLOCATION_ID_SDFX,
- IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
- IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@@ -436,58 +307,47 @@ enum iwl_fw_ini_buffer_location {
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
- * enum iwl_fw_ini_debug_flow
- *
- * @IWL_FW_INI_DEBUG_INVALID: invalid
- * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
- * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
- */
-enum iwl_fw_ini_debug_flow {
- IWL_FW_INI_DEBUG_INVALID,
- IWL_FW_INI_DEBUG_DBTR_FLOW,
- IWL_FW_INI_DEBUG_TB2DTF_FLOW,
-}; /* FW_DEBUG_TLV_FLOW_E_VER_1 */
-
-/**
* enum iwl_fw_ini_region_type
*
* @IWL_FW_INI_REGION_INVALID: invalid
+ * @IWL_FW_INI_REGION_TLV: uCode and debug TLVs
+ * @IWL_FW_INI_REGION_INTERNAL_BUFFER: monitor SMEM buffer
+ * @IWL_FW_INI_REGION_DRAM_BUFFER: monitor DRAM buffer
+ * @IWL_FW_INI_REGION_TXF: TX fifos
+ * @IWL_FW_INI_REGION_RXF: RX fifo
+ * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
+ * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_RSP_OR_NOTIF: FW response or notification data
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
* @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
* @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
- * @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer
- * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
- * @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined
- * @IWL_FW_INI_REGION_TXF: TX fifos
- * @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_PAGING: paging memory
* @IWL_FW_INI_REGION_CSR: CSR registers
- * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
- * @IWL_FW_INI_REGION_DHC: dhc response to dump
- * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
- * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
+ * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_INVALID,
+ IWL_FW_INI_REGION_TLV,
+ IWL_FW_INI_REGION_INTERNAL_BUFFER,
+ IWL_FW_INI_REGION_DRAM_BUFFER,
+ IWL_FW_INI_REGION_TXF,
+ IWL_FW_INI_REGION_RXF,
+ IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_RSP_OR_NOTIF,
IWL_FW_INI_REGION_DEVICE_MEMORY,
IWL_FW_INI_REGION_PERIPHERY_MAC,
IWL_FW_INI_REGION_PERIPHERY_PHY,
IWL_FW_INI_REGION_PERIPHERY_AUX,
- IWL_FW_INI_REGION_DRAM_BUFFER,
- IWL_FW_INI_REGION_DRAM_IMR,
- IWL_FW_INI_REGION_INTERNAL_BUFFER,
- IWL_FW_INI_REGION_TXF,
- IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
- IWL_FW_INI_REGION_NOTIFICATION,
- IWL_FW_INI_REGION_DHC,
- IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
- IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_DRAM_IMR,
+ IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
IWL_FW_INI_REGION_NUM
-}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
+}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
/**
* enum iwl_fw_ini_time_point
@@ -557,4 +417,22 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
+/**
+ * enum iwl_fw_ini_trigger_apply_policy - Determines how to apply triggers
+ *
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT: match by time point
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_DATA: match by trigger data
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS: override regions mask.
+ * Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
+ * Append otherwise
+ */
+enum iwl_fw_ini_trigger_apply_policy {
+ IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
+ IWL_FW_INI_APPLY_POLICY_MATCH_DATA = BIT(1),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 6b4d59daacd6..e7a1acedbcf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -78,6 +78,20 @@ enum iwl_mac_conf_subcmd_ids {
*/
CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4,
/**
+ * @MISSED_VAP_NOTIF: &struct iwl_missed_vap_notif
+ */
+ MISSED_VAP_NOTIF = 0xFA,
+ /**
+ * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
+ */
+ SESSION_PROTECTION_CMD = 0x5,
+
+ /**
+ * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
+ */
+ SESSION_PROTECTION_NOTIF = 0xFB,
+
+ /**
* @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
*/
PROBE_RESPONSE_DATA_NOTIF = 0xFC,
@@ -131,6 +145,21 @@ struct iwl_probe_resp_data_notif {
} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
/**
+ * struct iwl_missed_vap_notif - notification of missing vap detection
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @num_beacon_intervals_elapsed: beacons elpased with no vap profile inside
+ * @profile_periodicity: beacons period to have our profile inside
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_missed_vap_notif {
+ __le32 mac_id;
+ u8 num_beacon_intervals_elapsed;
+ u8 profile_periodicity;
+ u8 reserved[2];
+} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
+
+/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
* @id_and_color: ID and color of the MAC
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index a93449db7bb2..88bc7733065f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -260,6 +260,11 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
+#define RX_MPDU_BAND_POS 6
+#define RX_MPDU_BAND_MASK 0xC0
+#define BAND_IN_RX_STATUS(_val) \
+ (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
+
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
IWL_RX_L3_TYPE_IPV4,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index c0750ced5ac2..408798f351c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -70,6 +70,9 @@
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
+#define SCAN_SHORT_SSID_MAX_SIZE 8
+#define SCAN_BSSID_MAX_SIZE 16
+
/**
* struct iwl_ssid_ie - directed scan network information element
*
@@ -278,6 +281,9 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
+ IWL_SCAN_CHANNEL_FLAG_FORCE_EBS = BIT(4),
+ IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER = BIT(5),
+ IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
};
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
@@ -638,6 +644,47 @@ enum iwl_umac_scan_general_flags2 {
};
/**
+ * enum iwl_umac_scan_general_flags_v2 - UMAC scan general flags version 2
+ *
+ * The FW flags were reordered and hence the driver introduce version 2
+ *
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons
+ * during scan iterations
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification
+ * on every iteration instead of only once after the last iteration
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell
+ * for active channel
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned
+ * as passive
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and
+ * 5.2Ghz bands scan, trigger scan on 6GHz band to discover
+ * the reported collocated APs
+ */
+enum iwl_umac_scan_general_flags_v2 {
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START = BIT(9),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
+};
+
+/**
* struct iwl_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
@@ -831,6 +878,217 @@ struct iwl_scan_req_umac {
#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
/**
+ * struct iwl_scan_probe_params_v3
+ * @preq: scan probe request params
+ * @ssid_num: number of valid SSIDs in direct scan array
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v3 {
+ struct iwl_scan_probe_req preq;
+ u8 ssid_num;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ u8 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */
+
+/**
+ * struct iwl_scan_probe_params_v4
+ * @preq: scan probe request params
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v4 {
+ struct iwl_scan_probe_req preq;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ __le16 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
+
+#define SCAN_MAX_NUM_CHANS_V3 67
+
+/**
+ * struct iwl_scan_channel_params_v3
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwl_scan_channel_params_v3 {
+ u8 flags;
+ u8 count;
+ __le16 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
+
+/**
+ * struct iwl_scan_channel_params_v4
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @num_of_aps_override: override the number of APs the FW uses to calculate
+ * dwell time when adaptive dwell is used
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ * @adwell_ch_override_bitmap: when using adaptive dwell, override the number
+ * of APs value with &num_of_aps_override for the channel.
+ * To cast channel to index, use &iwl_mvm_scan_ch_and_band_to_idx
+ */
+struct iwl_scan_channel_params_v4 {
+ u8 flags;
+ u8 count;
+ u8 num_of_aps_override;
+ u8 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+ u8 adwell_ch_override_bitmap[16];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 also
+ SCAN_CHANNEL_PARAMS_API_S_VER_5 */
+/**
+ * struct iwl_scan_general_params_v10
+ * @flags: &enum iwl_umac_scan_flags
+ * @reserved: reserved for future
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @active_dwell: dwell time for active scan per LMAC
+ * @adwell_default_2g: adaptive dwell default number of APs
+ * for 2.4GHz channel
+ * @adwell_default_5g: adaptive dwell default number of APs
+ * for 5GHz channels
+ * @adwell_default_social_chn: adaptive dwell default number of
+ * APs per social channel
+ * @reserved1: reserved for future
+ * @adwell_max_budget: the maximal number of TUs that adaptive dwell
+ * can add to the total scan time
+ * @max_out_of_time: max out of serving channel time, per LMAC
+ * @suspend_time: max suspend time, per LMAC
+ * @scan_priority: priority of the request
+ * @passive_dwell: continues dwell time for passive channel
+ * (without adaptive dwell)
+ * @num_of_fragments: number of fragments needed for full fragmented
+ * scan coverage.
+ */
+struct iwl_scan_general_params_v10 {
+ __le16 flags;
+ u8 reserved;
+ u8 scan_start_mac_id;
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 adwell_default_2g;
+ u8 adwell_default_5g;
+ u8 adwell_default_social_chn;
+ u8 reserved1;
+ __le16 adwell_max_budget;
+ __le32 max_out_of_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
+
+/**
+ * struct iwl_scan_periodic_parms_v1
+ * @schedule: can scheduling parameter
+ * @delay: initial delay of the periodic scan in seconds
+ * @reserved: reserved for future
+ */
+struct iwl_scan_periodic_parms_v1 {
+ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ __le16 delay;
+ __le16 reserved;
+} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_params_v11
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v3
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v11 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v3 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
+
+/**
+ * struct iwl_scan_req_params_v12
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v4
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v12 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v4 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_params_v13
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v4
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v4
+ */
+struct iwl_scan_req_params_v13 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v4 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
+
+/**
+ * struct iwl_scan_req_umac_v11
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v11 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v11 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
+
+/**
+ * struct iwl_scan_req_umac_v12
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v12 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v12 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_umac_v13
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v13 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v13 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
+
+/**
* struct iwl_umac_scan_abort
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 450227f81706..970e9e508ad0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -288,8 +288,7 @@ struct iwl_mvm_keyinfo {
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
- * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
- * alone. 1 - modify, 0 - don't change.
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
* @reserved3: reserved
* @station_flags: look at &enum iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed,
@@ -369,8 +368,7 @@ enum iwl_sta_type {
* @addr: station's MAC address
* @reserved2: reserved
* @sta_id: index of station in uCode's station table
- * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
- * alone. 1 - modify, 0 - don't change.
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
* @reserved3: reserved
* @station_flags: look at &enum iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 4621ef93a2cf..a731f28e101a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -393,4 +393,82 @@ struct iwl_hs20_roc_res {
__le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+/**
+ * enum iwl_mvm_session_prot_conf_id - session protection's configurations
+ * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
+ * The firmware will allocate two events.
+ * Valid for BSS_STA and P2P_STA.
+ * * A rather short event that can't be fragmented and with a very
+ * high priority. If every goes well (99% of the cases) the
+ * association should complete within this first event. During
+ * that event, no other activity will happen in the firmware,
+ * which is why it can't be too long.
+ * The length of this event is hard-coded in the firmware: 300TUs.
+ * * Another event which can be much longer (it's duration is
+ * configurable by the driver) which has a slightly lower
+ * priority and that can be fragmented allowing other activities
+ * to run while this event is running.
+ * The firmware will automatically remove both events once the driver sets
+ * the BSS MAC as associated. Neither of the events will be removed
+ * for the P2P_STA MAC.
+ * Only the duration is configurable for this protection.
+ * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
+ * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
+ * listen mode. Will be fragmented. Valid only on the P2P Device MAC.
+ * Valid only on the P2P Device MAC. The firmware will take into account
+ * the duration, the interval and the repetition count.
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * able to run the GO Negotiation. Will not be fragmented and not
+ * repetitive. Valid only on the P2P Device MAC. Only the duration will
+ * be taken into account.
+ */
+enum iwl_mvm_session_prot_conf_id {
+ SESSION_PROTECT_CONF_ASSOC,
+ SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_cmd - configure a session protection
+ * @id_and_color: the id and color of the mac for which this session protection
+ * is sent
+ * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
+ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @duration_tu: the duration of the whole protection in TUs.
+ * @repetition_count: not used
+ * @interval: not used
+ *
+ * Note: the session protection will always be scheduled to start as
+ * early as possible, but the maximum delay is configuration dependent.
+ * The firmware supports only one concurrent session protection per vif.
+ * Adding a new session protection will remove any currently running session.
+ */
+struct iwl_mvm_session_prot_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 conf_id;
+ __le32 duration_tu;
+ __le32 repetition_count;
+ __le32 interval;
+} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_notif - session protection started / ended
+ * @mac_id: the mac id for which the session protection started / ended
+ * @status: 1 means success, 0 means failure
+ * @start: 1 means the session protection started, 0 means it ended
+ * @conf_id: the configuration id of the session that started / eneded
+ *
+ * Note that any session protection will always get two notifications: start
+ * and end even the firmware could not schedule it.
+ */
+struct iwl_mvm_session_prot_notif {
+ __le32 mac_id;
+ __le32 status;
+ __le32 start;
+ __le32 conf_id;
+} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+
#endif /* __iwl_fw_api_time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 8511e735c374..f89a9e16a8c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -323,7 +323,7 @@ struct iwl_tx_cmd_gen2 {
} __packed; /* TX_CMD_API_S_VER_7 */
/**
- * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @flags: combination of &enum iwl_tx_cmd_flags
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 87421807e040..ed90dd104366 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1055,19 +1055,31 @@ out:
return dump_file;
}
+/**
+ * struct iwl_dump_ini_region_data - region data
+ * @reg_tlv: region TLV
+ * @dump_data: dump data
+ */
+struct iwl_dump_ini_region_data {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fwrt_dump_data *dump_data;
+};
+
static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
u32 prph_val;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
prph_val = iwl_read_prph(fwrt->trans, addr + i);
if (prph_val == 0x5a5a5a5a)
return -EBUSY;
@@ -1078,39 +1090,42 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4)
*val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->internal.range_data_size;
+ range->range_data_size = reg->dev_addr.size;
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
- le32_to_cpu(reg->internal.range_data_size));
+ le32_to_cpu(reg->dev_addr.size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
void *range_ptr, int idx)
{
/* increase idx by 1 since the pages are from 1 to
@@ -1133,14 +1148,14 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
if (!fwrt->trans->trans_cfg->gen2)
- return _iwl_dump_ini_paging_iter(fwrt, reg, range_ptr, idx);
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1155,45 +1170,61 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *range_ptr,
- int idx)
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 start_addr = iwl_read_umac_prph(fwrt->trans,
- MON_BUFF_BASE_ADDR_VER2);
+ struct iwl_dram_data *frag;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
- if (start_addr == 0x5a5a5a5a)
- return -EBUSY;
+ frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx];
+
+ range->dram_base_addr = cpu_to_le64(frag->physical);
+ range->range_data_size = cpu_to_le32(frag->size);
+
+ memcpy(range->data, frag->block, frag->size);
- range->dram_base_addr = cpu_to_le64(start_addr);
- range->range_data_size = cpu_to_le32(fwrt->trans->dbg.fw_mon[idx].size);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
- memcpy(range->data, fwrt->trans->dbg.fw_mon[idx].block,
- fwrt->trans->dbg.fw_mon[idx].size);
+static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(reg->internal_buffer.base_addr);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->internal_buffer.size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(reg->internal_buffer.size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, int idx)
+ struct iwl_dump_ini_region_data *reg_data, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
int txf_num = cfg->num_txfifo_entries;
int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size);
- u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1);
+ u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]);
if (!idx) {
- if (le32_to_cpu(reg->offset) &&
- WARN_ONCE(cfg->num_lmacs == 1,
- "Invalid lmac offset: 0x%x\n",
- le32_to_cpu(reg->offset)))
+ if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) {
+ IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n",
+ le32_to_cpu(reg->fifos.offset));
return false;
+ }
iter->internal_txf = 0;
iter->fifo_size = 0;
iter->fifo = -1;
- if (le32_to_cpu(reg->offset))
+ if (le32_to_cpu(reg->fifos.offset))
iter->lmac = 1;
else
iter->lmac = 0;
@@ -1224,27 +1255,28 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
- u32 offs = le32_to_cpu(reg->offset), addr;
- u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
- if (!iwl_ini_txf_iter(fwrt, reg, idx))
+ if (!iwl_ini_txf_iter(fwrt, reg_data, idx))
return -EIO;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
- range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
@@ -1253,8 +1285,8 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
* read txf registers. for each register, write to the dump the
* register address and its value
*/
- for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
- addr = le32_to_cpu(reg->start_addr[i]) + offs;
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
reg_dump->addr = cpu_to_le32(addr);
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
@@ -1263,7 +1295,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
reg_dump++;
}
- if (reg->fifos.header_only) {
+ if (reg->fifos.hdr_only) {
range->range_data_size = cpu_to_le32(registers_size);
goto out;
}
@@ -1294,11 +1326,12 @@ struct iwl_ini_rxf_data {
};
static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
struct iwl_ini_rxf_data *data)
{
- u32 fid1 = le32_to_cpu(reg->fifos.fid1);
- u32 fid2 = le32_to_cpu(reg->fifos.fid2);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
+ u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
u32 fifo_idx;
if (!data)
@@ -1330,20 +1363,21 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
}
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_ini_rxf_data rxf_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
- u32 offs = le32_to_cpu(reg->offset), addr;
- u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
- iwl_ini_get_rxf_data(fwrt, reg, &rxf_data);
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data);
if (!rxf_data.size)
return -EIO;
@@ -1351,15 +1385,15 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
- range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
/*
* read rxf registers. for each register, write to the dump the
* register address and its value
*/
- for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
- addr = le32_to_cpu(reg->start_addr[i]) + offs;
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
reg_dump->addr = cpu_to_le32(addr);
reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
@@ -1368,7 +1402,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
reg_dump++;
}
- if (reg->fifos.header_only) {
+ if (reg->fifos.hdr_only) {
range->range_data_size = cpu_to_le32(registers_size);
goto out;
}
@@ -1399,9 +1433,50 @@ out:
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
-static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static int
+iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(err_table->base_addr) +
+ le32_to_cpu(err_table->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = err_table->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(err_table->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
+ u32 pkt_len;
+
+ if (!pkt)
+ return -EIO;
+
+ pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr));
+ range->range_data_size = cpu_to_le32(pkt_len);
+
+ memcpy(range->data, pkt->data, pkt_len);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static void *
+iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_error_dump *dump = data;
@@ -1410,14 +1485,45 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->ranges;
}
-static void
-*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- struct iwl_fw_ini_monitor_dump *data,
- u32 write_ptr_addr, u32 write_ptr_msk,
- u32 cycle_cnt_addr, u32 cycle_cnt_msk)
+/**
+ * mask_apply_and_normalize - applies mask on val and normalize the result
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * @val: value
+ * @mask: mask to apply and to normalize with
+ */
+static u32 mask_apply_and_normalize(u32 val, u32 mask)
+{
+ return (val & mask) >> (ffs(mask) - 1);
+}
+
+static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ const struct iwl_fw_mon_reg *reg_info)
+{
+ u32 val, offs;
+
+ /* The header addresses of DBGCi is calculate as follows:
+ * DBGC1 address + (0x100 * i)
+ */
+ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100;
+
+ if (!reg_info || !reg_info->addr || !reg_info->mask)
+ return 0;
+
+ val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs);
+
+ return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask));
+}
+
+static void *
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ struct iwl_fw_ini_monitor_dump *data,
+ const struct iwl_fw_mon_regs *addrs)
{
- u32 write_ptr, cycle_cnt;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
unsigned long flags;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
@@ -1425,76 +1531,66 @@ static void
return NULL;
}
- write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr);
- cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr);
+ data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->write_ptr);
+ data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cycle_cnt);
+ data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cur_frag);
iwl_trans_release_nic_access(fwrt->trans, &flags);
data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
- data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
- data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
return data->ranges;
}
-static void
-*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static void *
+iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk;
-
- switch (fwrt->trans->trans_cfg->device_family) {
- case IWL_DEVICE_FAMILY_9000:
- case IWL_DEVICE_FAMILY_22000:
- write_ptr_addr = MON_BUFF_WRPTR_VER2;
- write_ptr_msk = -1;
- cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2;
- cycle_cnt_msk = -1;
- break;
- default:
- IWL_ERR(fwrt, "Unsupported device family %d\n",
- fwrt->trans->trans_cfg->device_family);
- return NULL;
- }
- return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr,
- write_ptr_msk, cycle_cnt_addr,
- cycle_cnt_msk);
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_dram_regs);
}
-static void
-*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+static void *
+iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- const struct iwl_cfg *cfg = fwrt->trans->cfg;
- if (fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
- fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) {
- IWL_ERR(fwrt, "Unsupported device family %d\n",
- fwrt->trans->trans_cfg->device_family);
- return NULL;
- }
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_smem_regs);
+}
- return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump,
- cfg->fw_mon_smem_write_ptr_addr,
- cfg->fw_mon_smem_write_ptr_msk,
- cfg->fw_mon_smem_cycle_cnt_ptr_addr,
- cfg->fw_mon_smem_cycle_cnt_ptr_msk);
+static void *
+iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_err_table_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->version = reg->err_table.version;
+
+ return dump->ranges;
}
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
- return le32_to_cpu(reg->internal.num_of_ranges);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+
+ return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
}
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
if (fwrt->trans->trans_cfg->gen2)
return fwrt->trans->init_dram.paging_cnt;
@@ -1502,54 +1598,73 @@ static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
return fwrt->num_of_paging_blk;
}
-static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- return 1;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ if (!fw_mon->frags[i].size)
+ break;
+
+ ranges++;
+ }
+
+ return ranges;
}
static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
u32 num_of_fifos = 0;
- while (iwl_ini_txf_iter(fwrt, reg, num_of_fifos))
+ while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos))
num_of_fifos++;
return num_of_fifos;
}
-static u32 iwl_dump_ini_rxf_ranges(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- /* Each Rx fifo needs a different offset and therefore, it's
- * region can contain only one fifo, i.e. 1 memory range.
- */
return 1;
}
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
- return sizeof(struct iwl_fw_ini_error_dump) +
- iwl_dump_ini_mem_ranges(fwrt, reg) *
- (sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->internal.range_data_size));
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_error_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
}
-static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
int i;
u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
u32 size = sizeof(struct iwl_fw_ini_error_dump);
if (fwrt->trans->trans_cfg->gen2) {
- for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg_data); i++)
size += range_header_len +
fwrt->trans->init_dram.paging[i].size;
} else {
- for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data);
+ i++)
size += range_header_len +
fwrt->fw_paging_db[i].fw_paging_size;
}
@@ -1557,66 +1672,128 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
-static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
- sizeof(struct iwl_fw_ini_error_dump_range);
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
- if (fwrt->trans->dbg.num_blocks)
- size += fwrt->trans->dbg.fw_mon[0].size;
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ if (!frag->size)
+ break;
+
+ size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size;
+ }
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_monitor_dump);
return size;
}
-static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+static u32
+iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
{
- return sizeof(struct iwl_fw_ini_monitor_dump) +
- iwl_dump_ini_mem_ranges(fwrt, reg) *
- (sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->internal.range_data_size));
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id), size;
+
+ fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_SRAM_PATH)
+ return 0;
+
+ size = le32_to_cpu(reg->internal_buffer.size);
+ if (!size)
+ return 0;
+
+ size += sizeof(struct iwl_fw_ini_monitor_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
}
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 size = 0;
u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) *
- sizeof(struct iwl_fw_ini_error_dump_register);
+ registers_num *
+ sizeof(struct iwl_fw_ini_error_dump_register);
- while (iwl_ini_txf_iter(fwrt, reg, size)) {
+ while (iwl_ini_txf_iter(fwrt, reg_data, size)) {
size += fifo_hdr;
- if (!reg->fifos.header_only)
+ if (!reg->fifos.hdr_only)
size += iter->fifo_size;
}
- if (size)
- size += sizeof(struct iwl_fw_ini_error_dump);
+ if (!size)
+ return 0;
- return size;
+ return size + sizeof(struct iwl_fw_ini_error_dump);
}
static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg)
+ struct iwl_dump_ini_region_data *reg_data)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_ini_rxf_data rx_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 size = sizeof(struct iwl_fw_ini_error_dump) +
sizeof(struct iwl_fw_ini_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) *
- sizeof(struct iwl_fw_ini_error_dump_register);
+ registers_num * sizeof(struct iwl_fw_ini_error_dump_register);
- if (reg->fifos.header_only)
+ if (reg->fifos.hdr_only)
return size;
- iwl_ini_get_rxf_data(fwrt, reg, &rx_data);
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data);
size += rx_data.size;
return size;
}
+static u32
+iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->err_table.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_err_table_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+
+ if (!reg_data->dump_data->fw_pkt)
+ return 0;
+
+ size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt);
+ if (size)
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1628,14 +1805,15 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
*/
struct iwl_dump_ini_mem_ops {
u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg);
+ struct iwl_dump_ini_region_data *reg_data);
u32 (*get_size)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg);
+ struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *data);
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg, void *range,
- int idx);
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range, int idx);
};
/**
@@ -1650,60 +1828,61 @@ struct iwl_dump_ini_mem_ops {
* @ops: memory dump operations
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
- struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_region_data *reg_data,
const struct iwl_dump_ini_mem_ops *ops)
{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
- u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type), size;
+ u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id);
+ u32 num_of_ranges, i, size;
void *range;
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
return 0;
- size = ops->get_size(fwrt, reg);
+ size = ops->get_size(fwrt, reg_data);
if (!size)
return 0;
- entry = kmalloc(sizeof(*entry) + sizeof(*tlv) + size, GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
return 0;
entry->size = sizeof(*tlv) + size;
tlv = (void *)entry->data;
- tlv->type = cpu_to_le32(type);
+ tlv->type = reg->type;
tlv->len = cpu_to_le32(size);
- IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
+ type);
- num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
+ num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
- header->region_id = reg->region_id;
+ header->region_id = reg->id;
header->num_of_ranges = cpu_to_le32(num_of_ranges);
- header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
- le32_to_cpu(reg->name_len)));
- memcpy(header->name, reg->name, le32_to_cpu(header->name_len));
+ header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
+ memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
- range = ops->fill_mem_hdr(fwrt, reg, header);
+ range = ops->fill_mem_hdr(fwrt, reg_data, header);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ id, type);
goto out_err;
}
for (i = 0; i < num_of_ranges; i++) {
- int range_size = ops->fill_range(fwrt, reg, range, i);
+ int range_size = ops->fill_range(fwrt, reg_data, range, i);
if (range_size < 0) {
IWL_ERR(fwrt,
"WRT: Failed to dump region: id=%d, type=%d\n",
- le32_to_cpu(reg->region_id), type);
+ id, type);
goto out_err;
}
range = range + range_size;
@@ -1714,22 +1893,29 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
return entry->size;
out_err:
- kfree(entry);
+ vfree(entry);
return 0;
}
static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fw_ini_trigger_tlv *trigger,
struct list_head *list)
{
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_dump_info *dump;
- u32 reg_ids_size = le32_to_cpu(trigger->num_regions) * sizeof(__le32);
- u32 size = sizeof(*tlv) + sizeof(*dump) + reg_ids_size;
+ struct iwl_dbg_tlv_node *node;
+ struct iwl_fw_ini_dump_cfg_name *cfg_name;
+ u32 size = sizeof(*tlv) + sizeof(*dump);
+ u32 num_of_cfg_names = 0;
+
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ size += sizeof(*cfg_name);
+ num_of_cfg_names++;
+ }
- entry = kmalloc(sizeof(*entry) + size, GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + size);
if (!entry)
return 0;
@@ -1737,13 +1923,14 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
tlv = (void *)entry->data;
tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE);
- tlv->len = cpu_to_le32(sizeof(*dump) + reg_ids_size);
+ tlv->len = cpu_to_le32(size - sizeof(*tlv));
dump = (void *)tlv->data;
dump->version = cpu_to_le32(IWL_INI_DUMP_VER);
- dump->trigger_id = trigger->trigger_id;
- dump->is_external_cfg =
+ dump->time_point = trigger->time_point;
+ dump->trigger_reason = trigger->trigger_reason;
+ dump->external_cfg_state =
cpu_to_le32(fwrt->trans->dbg.external_ini_cfg);
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
@@ -1763,26 +1950,26 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major);
dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
+ dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
+ dump->regions_mask = trigger->regions_mask;
+
dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
memcpy(dump->build_tag, fwrt->fw->human_readable,
sizeof(dump->build_tag));
- dump->img_name_len = cpu_to_le32(sizeof(dump->img_name));
- memcpy(dump->img_name, fwrt->dump.img_name, sizeof(dump->img_name));
-
- dump->internal_dbg_cfg_name_len =
- cpu_to_le32(sizeof(dump->internal_dbg_cfg_name));
- memcpy(dump->internal_dbg_cfg_name, fwrt->dump.internal_dbg_cfg_name,
- sizeof(dump->internal_dbg_cfg_name));
-
- dump->external_dbg_cfg_name_len =
- cpu_to_le32(sizeof(dump->external_dbg_cfg_name));
-
- memcpy(dump->external_dbg_cfg_name, fwrt->dump.external_dbg_cfg_name,
- sizeof(dump->external_dbg_cfg_name));
-
- dump->regions_num = trigger->num_regions;
- memcpy(dump->region_ids, trigger->data, reg_ids_size);
+ cfg_name = dump->cfg_names;
+ dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names);
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ struct iwl_fw_ini_debug_info_tlv *debug_info =
+ (void *)node->tlv.data;
+
+ cfg_name->image_type = debug_info->image_type;
+ cfg_name->cfg_name_len =
+ cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
+ sizeof(cfg_name->cfg_name));
+ cfg_name++;
+ }
/* add dump info TLV to the beginning of the list since it needs to be
* the first TLV in the dump
@@ -1794,33 +1981,18 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
[IWL_FW_INI_REGION_INVALID] = {},
- [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
- },
- [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_prph_iter,
+ [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_mon_smem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
+ .fill_range = iwl_dump_ini_mon_smem_iter,
},
- [IWL_FW_INI_REGION_PERIPHERY_PHY] = {},
- [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
[IWL_FW_INI_REGION_DRAM_BUFFER] = {
.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges,
.get_size = iwl_dump_ini_mon_dram_get_size,
.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header,
.fill_range = iwl_dump_ini_mon_dram_iter,
},
- [IWL_FW_INI_REGION_DRAM_IMR] = {},
- [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
- .get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mon_smem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
- },
[IWL_FW_INI_REGION_TXF] = {
.get_num_of_ranges = iwl_dump_ini_txf_ranges,
.get_size = iwl_dump_ini_txf_get_size,
@@ -1828,70 +2000,91 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_range = iwl_dump_ini_txf_iter,
},
[IWL_FW_INI_REGION_RXF] = {
- .get_num_of_ranges = iwl_dump_ini_rxf_ranges,
+ .get_num_of_ranges = iwl_dump_ini_single_range,
.get_size = iwl_dump_ini_rxf_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_rxf_iter,
},
- [IWL_FW_INI_REGION_PAGING] = {
+ [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_RSP_OR_NOTIF] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_fw_pkt_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .get_num_of_ranges = iwl_dump_ini_paging_ranges,
- .get_size = iwl_dump_ini_paging_get_size,
- .fill_range = iwl_dump_ini_paging_iter,
+ .fill_range = iwl_dump_ini_fw_pkt_iter,
},
- [IWL_FW_INI_REGION_CSR] = {
+ [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_csr_iter,
+ .fill_range = iwl_dump_ini_dev_mem_iter,
},
- [IWL_FW_INI_REGION_NOTIFICATION] = {},
- [IWL_FW_INI_REGION_DHC] = {},
- [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
+ .fill_range = iwl_dump_ini_prph_iter,
},
- [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ [IWL_FW_INI_REGION_PERIPHERY_PHY] = {},
+ [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
+ [IWL_FW_INI_REGION_PAGING] = {
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_num_of_ranges = iwl_dump_ini_paging_ranges,
+ .get_size = iwl_dump_ini_paging_get_size,
+ .fill_range = iwl_dump_ini_paging_iter,
+ },
+ [IWL_FW_INI_REGION_CSR] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
- .fill_range = iwl_dump_ini_dev_mem_iter,
+ .fill_range = iwl_dump_ini_csr_iter,
},
+ [IWL_FW_INI_REGION_DRAM_IMR] = {},
+ [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {},
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ struct iwl_dump_ini_region_data reg_data = {
+ .dump_data = dump_data,
+ };
int i;
u32 size = 0;
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask);
- for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
- u32 reg_id = le32_to_cpu(trigger->data[i]), reg_type;
- struct iwl_fw_ini_region_cfg *reg;
+ for (i = 0; i < 64; i++) {
+ u32 reg_type;
+ struct iwl_fw_ini_region_tlv *reg;
- if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
+ if (!(BIT_ULL(i) & regions_mask))
continue;
- reg = fwrt->dump.active_regs[reg_id];
- if (!reg) {
+ reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ if (!reg_data.reg_tlv) {
IWL_WARN(fwrt,
- "WRT: Unassigned region id %d, skipping\n",
- reg_id);
+ "WRT: Unassigned region id %d, skipping\n", i);
continue;
}
- /* currently the driver supports always on domain only */
- if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
- continue;
-
- reg_type = le32_to_cpu(reg->region_type);
+ reg = (void *)reg_data.reg_tlv->data;
+ reg_type = le32_to_cpu(reg->type);
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
- size += iwl_dump_ini_mem(fwrt, list, reg,
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
@@ -1901,31 +2094,43 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
return size;
}
+static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger_tlv *trig)
+{
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
+ u32 usec = le32_to_cpu(trig->ignore_consec);
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM ||
+ iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec))
+ return false;
+
+ return true;
+}
+
static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id trig_id,
+ struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_ini_dump_file_hdr *hdr;
- struct iwl_fw_ini_trigger *trigger;
u32 size;
- if (!iwl_fw_ini_trigger_on(fwrt, trig_id))
- return 0;
-
- trigger = fwrt->dump.active_trigs[trig_id].trig;
- if (!trigger || !le32_to_cpu(trigger->num_regions))
+ if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) ||
+ !le64_to_cpu(trigger->regions_mask))
return 0;
- entry = kmalloc(sizeof(*entry) + sizeof(*hdr), GFP_KERNEL);
+ entry = vzalloc(sizeof(*entry) + sizeof(*hdr));
if (!entry)
return 0;
entry->size = sizeof(*hdr);
- size = iwl_dump_ini_trigger(fwrt, trigger, list);
+ size = iwl_dump_ini_trigger(fwrt, dump_data, list);
if (!size) {
- kfree(entry);
+ vfree(entry);
return 0;
}
@@ -1991,18 +2196,24 @@ static void iwl_dump_ini_list_free(struct list_head *list)
list_entry(list->next, typeof(*entry), list);
list_del(&entry->list);
- kfree(entry);
+ vfree(entry);
}
}
-static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
+{
+ dump_data->trig = NULL;
+ kfree(dump_data->fw_pkt);
+ dump_data->fw_pkt = NULL;
+}
+
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
- enum iwl_fw_ini_trigger_id trig_id = fwrt->dump.wks[wk_idx].ini_trig_id;
struct list_head dump_list = LIST_HEAD_INIT(dump_list);
struct scatterlist *sg_dump_data;
- u32 file_len;
+ u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
- file_len = iwl_dump_ini_file_gen(fwrt, trig_id, &dump_list);
if (!file_len)
goto out;
@@ -2023,7 +2234,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_dump_ini_list_free(&dump_list);
out:
- fwrt->dump.wks[wk_idx].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@@ -2038,15 +2249,9 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
- u32 trig_type = le32_to_cpu(desc->trig_desc.type);
- int ret;
-
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
- ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
- if (!ret)
- iwl_fw_free_dump_desc(fwrt);
-
- return ret;
+ iwl_fw_free_dump_desc(fwrt);
+ return 0;
}
/* use wks[0] since dump flow prior to ini does not need to support
@@ -2138,35 +2343,29 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
-int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id)
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
- struct iwl_fw_ini_active_triggers *active;
+ struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
u32 occur, delay;
unsigned long idx;
- if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
- return -EINVAL;
+ if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
+ return -EBUSY;
- if (!iwl_fw_ini_trigger_on(fwrt, id)) {
+ if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
- id);
+ tp_id);
return -EINVAL;
}
- active = &fwrt->dump.active_trigs[id];
- delay = le32_to_cpu(active->trig->dump_delay);
- occur = le32_to_cpu(active->trig->occurrences);
+ delay = le32_to_cpu(trig->dump_delay);
+ occur = le32_to_cpu(trig->occurrences);
if (!occur)
return 0;
- active->trig->occurrences = cpu_to_le32(--occur);
-
- if (le32_to_cpu(active->trig->force_restart)) {
- IWL_WARN(fwrt, "WRT: Force restart: trigger %d fired.\n", id);
- iwl_force_nmi(fwrt->trans);
- return 0;
- }
+ trig->occurrences = cpu_to_le32(--occur);
/* Check there is an available worker.
* ffz return value is undefined if no zero exists,
@@ -2181,36 +2380,14 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
- fwrt->dump.wks[idx].ini_trig_id = id;
+ fwrt->dump.wks[idx].dump_data = *dump_data;
- IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", id);
+ IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", tp_id);
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
-IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect);
-
-int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id)
-{
- int id;
-
- switch (legacy_trigger_id) {
- case FW_DBG_TRIGGER_FW_ASSERT:
- case FW_DBG_TRIGGER_ALIVE_TIMEOUT:
- case FW_DBG_TRIGGER_DRIVER:
- id = IWL_FW_TRIGGER_ID_FW_ASSERT;
- break;
- case FW_DBG_TRIGGER_USER:
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
- break;
- default:
- return -EIO;
- }
-
- return _iwl_fw_dbg_ini_collect(fwrt, id);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
@@ -2219,6 +2396,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
int ret, len = 0;
char buf[64];
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ return 0;
+
if (fmt) {
va_list ap;
@@ -2322,7 +2502,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
- iwl_fw_error_ini_dump(fwrt, wk_idx);
+ iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
iwl_fw_error_dump(fwrt);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
@@ -2335,11 +2515,10 @@ out:
void iwl_fw_error_dump_wk(struct work_struct *work)
{
- struct iwl_fw_runtime *fwrt;
- typeof(fwrt->dump.wks[0]) *wks;
-
- wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work);
- fwrt = container_of(wks, struct iwl_fw_runtime, dump.wks[wks->idx]);
+ struct iwl_fwrt_wk_data *wks =
+ container_of(work, typeof(*wks), wk.work);
+ struct iwl_fw_runtime *fwrt =
+ container_of(wks, typeof(*fwrt), dump.wks[wks->idx]);
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index e3b5dd34643f..179f2905d56b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -114,9 +114,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only, unsigned int delay);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type);
-int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id);
-int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig, const char *str,
size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
@@ -222,29 +221,6 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
-static inline bool
-iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_trigger_id id)
-{
- struct iwl_fw_ini_trigger *trig;
- u32 usec;
-
- if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
- id == IWL_FW_TRIGGER_ID_INVALID || id >= IWL_FW_TRIGGER_ID_NUM ||
- !fwrt->dump.active_trigs[id].active)
- return false;
-
- trig = fwrt->dump.active_trigs[id].trig;
- usec = le32_to_cpu(trig->ignore_consec);
-
- if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) {
- IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id);
- return false;
- }
-
- return true;
-}
-
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -315,10 +291,8 @@ static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt)
int i;
iwl_dbg_tlv_del_timers(fwrt->trans);
- for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
flush_delayed_work(&fwrt->dump.wks[i].wk);
- fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
- }
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -381,12 +355,21 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
- if (iwl_trans_dbg_ini_valid(fwrt->trans) && fwrt->trans->dbg.hw_error) {
- _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
+ enum iwl_fw_ini_time_point tp_id;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ return;
+ }
+
+ if (fwrt->trans->dbg.hw_error) {
+ tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
fwrt->trans->dbg.hw_error = false;
} else {
- iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
}
+
+ iwl_dbg_tlv_time_point(fwrt, tp_id, NULL);
}
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index c1aa4360736b..ca3b1a461dea 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -320,10 +320,45 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
+static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ u32 new_domain;
+ int ret;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return -EIO;
+
+ ret = kstrtou32(buf, 0, &new_domain);
+ if (ret)
+ return ret;
+
+ if (new_domain != fwrt->trans->dbg.domains_bitmap) {
+ ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
+ if (ret)
+ return ret;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+ NULL);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ return scnprintf(buf, size, "0x%08x\n",
+ fwrt->trans->dbg.domains_bitmap);
+}
+
+FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
+
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 2e763678dbdb..f008e1bbfdf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -65,6 +65,7 @@
#define __fw_error_dump_h__
#include <linux/types.h>
+#include "fw/api/cmdhdr.h"
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
#define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633
@@ -327,6 +328,7 @@ struct iwl_fw_ini_fifo_hdr {
* @dram_base_addr: base address of dram monitor range
* @page_num: page number of memory range
* @fifo_hdr: fifo header of memory range
+ * @fw_pkt: FW packet header of memory range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
@@ -336,6 +338,7 @@ struct iwl_fw_ini_error_dump_range {
__le64 dram_base_addr;
__le32 page_num;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ struct iwl_cmd_header fw_pkt_hdr;
};
__le32 data[];
} __packed;
@@ -379,12 +382,23 @@ struct iwl_fw_ini_error_dump_register {
__le32 data;
} __packed;
+/**
+ * struct iwl_fw_ini_dump_cfg_name - configuration name
+ * @image_type: image type the configuration is related to
+ * @cfg_name_len: length of the configuration name
+ * @cfg_name: name of the configuraiton
+ */
+struct iwl_fw_ini_dump_cfg_name {
+ __le32 image_type;
+ __le32 cfg_name_len;
+ u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed;
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
- * @trigger_id: trigger id that caused the dump collection
- * @trigger_reason: not supported yet
- * @is_external_cfg: 1 if an external debug configuration was loaded
- * and 0 otherwise
+ * @time_point: time point that caused the dump collection
+ * @trigger_reason: reason of the trigger
+ * @external_cfg_state: &enum iwl_ini_cfg_state
* @ver_type: FW version type
* @ver_subtype: FW version subype
* @hw_step: HW step
@@ -397,22 +411,18 @@ struct iwl_fw_ini_error_dump_register {
* @lmac_minor: lmac minor version
* @umac_major: umac major version
* @umac_minor: umac minor version
+ * @fw_mon_mode: FW monitor mode &enum iwl_fw_ini_buffer_location
+ * @regions_mask: bitmap mask of regions ids in the dump
* @build_tag_len: length of the build tag
* @build_tag: build tag string
- * @img_name_len: length of the FW image name
- * @img_name: FW image name
- * @internal_dbg_cfg_name_len: length of the internal debug configuration name
- * @internal_dbg_cfg_name: internal debug configuration name
- * @external_dbg_cfg_name_len: length of the external debug configuration name
- * @external_dbg_cfg_name: external debug configuration name
- * @regions_num: number of region ids
- * @region_ids: region ids the trigger configured to collect
+ * @num_of_cfg_names: number of configuration name structs
+ * @cfg_names: configuration names
*/
struct iwl_fw_ini_dump_info {
__le32 version;
- __le32 trigger_id;
+ __le32 time_point;
__le32 trigger_reason;
- __le32 is_external_cfg;
+ __le32 external_cfg_state;
__le32 ver_type;
__le32 ver_subtype;
__le32 hw_step;
@@ -425,17 +435,24 @@ struct iwl_fw_ini_dump_info {
__le32 lmac_minor;
__le32 umac_major;
__le32 umac_minor;
+ __le32 fw_mon_mode;
+ __le64 regions_mask;
__le32 build_tag_len;
u8 build_tag[FW_VER_HUMAN_READABLE_SZ];
- __le32 img_name_len;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- __le32 internal_dbg_cfg_name_len;
- u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- __le32 external_dbg_cfg_name_len;
- u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- __le32 regions_num;
- __le32 region_ids[];
+ __le32 num_of_cfg_names;
+ struct iwl_fw_ini_dump_cfg_name cfg_names[];
+} __packed;
+/**
+ * struct iwl_fw_ini_err_table_dump - ini error table dump
+ * @header: header of the region
+ * @version: error table version
+ * @ranges: the memory ranges of this this region
+ */
+struct iwl_fw_ini_err_table_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ __le32 version;
+ struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
/**
@@ -457,12 +474,14 @@ struct iwl_fw_error_dump_rb {
* @header: header of the region
* @write_ptr: write pointer position in the buffer
* @cycle_cnt: cycles count
+ * @cur_frag: current fragment in use
* @ranges: the memory ranges of this this region
*/
struct iwl_fw_ini_monitor_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 write_ptr;
__le32 cycle_cnt;
+ __le32 cur_frag;
struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 0d5bc4ce5c07..1554f5fdd483 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
} u;
};
-#define IWL_UCODE_INI_TLV_GROUP 0x1000000
+#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
/*
* new TLV uCode file layout
@@ -151,7 +151,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
- IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_INI_TLV_GROUP,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
@@ -326,6 +325,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
+
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -449,6 +450,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 039576d71276..994880a83652 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -228,18 +228,6 @@ struct iwl_fw_dbg {
};
/**
- * struct iwl_fw_ini_active_triggers
- * @active: is this trigger active
- * @size: allocated memory size of the trigger
- * @trig: trigger
- */
-struct iwl_fw_ini_active_triggers {
- bool active;
- size_t size;
- struct iwl_fw_ini_trigger *trig;
-};
-
-/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -325,4 +313,22 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
+static inline u8 iwl_mvm_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return IWL_FW_CMD_VER_UNKNOWN;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWL_FW_CMD_VER_UNKNOWN;
+}
+
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index be436c18a047..c24575ff0e54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -65,7 +65,11 @@
#include "img.h"
#include "fw/api/debug.h"
#include "fw/api/paging.h"
+#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
+#include "fw/acpi.h"
+
+#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
@@ -91,6 +95,27 @@ struct iwl_fwrt_shared_mem_cfg {
#define IWL_FW_RUNTIME_DUMP_WK_NUM 5
/**
+ * struct iwl_fwrt_dump_data - dump data
+ * @trig: trigger the worker was scheduled upon
+ * @fw_pkt: packet received from FW
+ */
+struct iwl_fwrt_dump_data {
+ struct iwl_fw_ini_trigger_tlv *trig;
+ struct iwl_rx_packet *fw_pkt;
+};
+
+/**
+ * struct iwl_fwrt_wk_data - dump worker data struct
+ * @idx: index of the worker
+ * @wk: worker
+ */
+struct iwl_fwrt_wk_data {
+ u8 idx;
+ struct delayed_work wk;
+ struct iwl_fwrt_dump_data dump_data;
+};
+
+/**
* struct iwl_txf_iter_data - Tx fifo iterator data struct
* @fifo: fifo number
* @lmac: lmac number
@@ -105,6 +130,14 @@ struct iwl_txf_iter_data {
};
/**
+ * enum iwl_fw_runtime_status - fw runtime status flags
+ * @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
+ */
+enum iwl_fw_runtime_status {
+ STATUS_GEN_ACTIVE_TRIGS,
+};
+
+/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
@@ -117,6 +150,7 @@ struct iwl_txf_iter_data {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
+ * @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@@ -137,33 +171,25 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
+ unsigned long status;
+
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
- struct {
- u8 idx;
- enum iwl_fw_ini_trigger_id ini_trig_id;
- struct delayed_work wk;
- } wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
+ struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
- unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM];
+ unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
- struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID];
- struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
- u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
- u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
- u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
-
struct {
u8 type;
u8 subtype;
@@ -179,7 +205,16 @@ struct iwl_fw_runtime {
u32 delay;
u64 seq;
} timestamp;
+ bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
+#ifdef CONFIG_ACPI
+ struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ u8 sar_chain_a_profile;
+ u8 sar_chain_b_profile;
+ struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+ u32 geo_rev;
+ struct iwl_ppag_table_cmd ppag_table;
+#endif
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
@@ -194,16 +229,6 @@ static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
kfree(fwrt->dump.d3_debug_data);
fwrt->dump.d3_debug_data = NULL;
- for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) {
- struct iwl_fw_ini_active_triggers *active =
- &fwrt->dump.active_trigs[i];
-
- active->active = false;
- active->size = 0;
- kfree(active->trig);
- active->trig = NULL;
- }
-
iwl_dbg_tlv_del_timers(fwrt->trans);
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
cancel_delayed_work_sync(&fwrt->dump.wks[i].wk);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 214495a7165f..317eac066082 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -88,7 +88,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
- IWL_DEVICE_FAMILY_22560,
IWL_DEVICE_FAMILY_AX210,
};
@@ -360,6 +359,28 @@ struct iwl_cfg_trans_params {
};
/**
+ * struct iwl_fw_mon_reg - FW monitor register info
+ * @addr: register address
+ * @mask: register mask
+ */
+struct iwl_fw_mon_reg {
+ u32 addr;
+ u32 mask;
+};
+
+/**
+ * struct iwl_fw_mon_regs - FW monitor registers
+ * @write_ptr: write pointer register
+ * @cycle_cnt: cycle count register
+ * @cur_frag: current fragment in use
+ */
+struct iwl_fw_mon_regs {
+ struct iwl_fw_mon_reg write_ptr;
+ struct iwl_fw_mon_reg cycle_cnt;
+ struct iwl_fw_mon_reg cur_frag;
+};
+
+/**
* struct iwl_cfg
* @trans: the trans-specific configuration part
* @name: Official name of the device
@@ -388,7 +409,6 @@ struct iwl_cfg_trans_params {
* @mac_addr_from_csr: read HW address from CSR registers
* @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
- * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
* station can receive in HT
@@ -460,7 +480,6 @@ struct iwl_cfg {
u8 valid_rx_ant;
u8 non_shared_ant;
u8 nvm_hw_section_num;
- u8 max_rx_agg_size;
u8 max_tx_agg_size;
u8 max_ht_ampdu_exponent;
u8 max_vht_ampdu_exponent;
@@ -471,12 +490,10 @@ struct iwl_cfg {
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
u32 min_txq_size;
- u32 fw_mon_smem_write_ptr_addr;
- u32 fw_mon_smem_write_ptr_msk;
- u32 fw_mon_smem_cycle_cnt_ptr_addr;
- u32 fw_mon_smem_cycle_cnt_ptr_msk;
u32 gp2_reg_addr;
u32 min_256_ba_txq_size;
+ const struct iwl_fw_mon_regs mon_dram_regs;
+ const struct iwl_fw_mon_regs mon_smem_regs;
};
extern const struct iwl_csr_params iwl_csr_v1;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 695bbaa86273..92d9898ab7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -603,9 +603,7 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
- MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
MSIX_HW_INT_CAUSES_REG_IML = BIT(2),
- MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3d7f8ff8ef58..f266647dc08c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -95,6 +95,20 @@ struct iwl_dbg_tlv_ver_data {
int max_ver;
};
+/**
+ * struct iwl_dbg_tlv_timer_node - timer node struct
+ * @list: list of &struct iwl_dbg_tlv_timer_node
+ * @timer: timer
+ * @fwrt: &struct iwl_fw_runtime
+ * @tlv: TLV attach to the timer node
+ */
+struct iwl_dbg_tlv_timer_node {
+ struct list_head list;
+ struct timer_list timer;
+ struct iwl_fw_runtime *fwrt;
+ struct iwl_ucode_tlv *tlv;
+};
+
static const struct iwl_dbg_tlv_ver_data
dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
@@ -104,12 +118,27 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
+static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
+{
+ u32 len = le32_to_cpu(tlv->length);
+ struct iwl_dbg_tlv_node *node;
+
+ node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+ list_add_tail(&node->list, list);
+
+ return 0;
+}
+
static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
- u32 ver = le32_to_cpu(hdr->tlv_version);
+ u32 ver = le32_to_cpu(hdr->version);
if (ver < dbg_ver_table[tlv_idx].min_ver ||
ver > dbg_ver_table[tlv_idx].max_ver)
@@ -118,27 +147,169 @@ static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
return true;
}
+static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
+
+ if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
+ return -EINVAL;
+
+ IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
+ debug_info->debug_cfg_name);
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+}
+
+static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
+ u32 buf_location = le32_to_cpu(alloc->buf_location);
+ u32 alloc_id = le32_to_cpu(alloc->alloc_id);
+
+ if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
+ (buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
+ buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
+ return -EINVAL;
+
+ if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
+ (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
+ (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
+ IWL_ERR(trans,
+ "WRT: Invalid allocation id %u for allocation TLV\n",
+ alloc_id);
+ return -EINVAL;
+ }
+
+ trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
+ u32 tp = le32_to_cpu(hcmd->time_point);
+
+ if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
+ return -EINVAL;
+
+ /* Host commands can not be sent in early time point since the FW
+ * is not ready
+ */
+ if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM ||
+ tp == IWL_FW_INI_TIME_POINT_EARLY) {
+ IWL_ERR(trans,
+ "WRT: Invalid time point %u for host command TLV\n",
+ tp);
+ return -EINVAL;
+ }
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+}
+
+static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
+ struct iwl_ucode_tlv **active_reg;
+ u32 id = le32_to_cpu(reg->id);
+ u32 type = le32_to_cpu(reg->type);
+ u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
+
+ if (le32_to_cpu(tlv->length) < sizeof(*reg))
+ return -EINVAL;
+
+ if (id >= IWL_FW_INI_MAX_REGION_ID) {
+ IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
+ return -EINVAL;
+ }
+
+ if (type <= IWL_FW_INI_REGION_INVALID ||
+ type >= IWL_FW_INI_REGION_NUM) {
+ IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
+ return -EINVAL;
+ }
+
+ active_reg = &trans->dbg.active_regions[id];
+ if (*active_reg) {
+ IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
+
+ kfree(*active_reg);
+ }
+
+ *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
+ if (!*active_reg)
+ return -ENOMEM;
+
+ IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ u32 tp = le32_to_cpu(trig->time_point);
+
+ if (le32_to_cpu(tlv->length) < sizeof(*trig))
+ return -EINVAL;
+
+ if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM) {
+ IWL_ERR(trans,
+ "WRT: Invalid time point %u for trigger TLV\n",
+ tp);
+ return -EINVAL;
+ }
+
+ if (!le32_to_cpu(trig->occurrences))
+ trig->occurrences = cpu_to_le32(-1);
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+}
+
+static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
+ struct iwl_ucode_tlv *tlv) = {
+ [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
+ [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
+ [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
+ [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
+ [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
+};
+
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
- u32 pnt = le32_to_cpu(hdr->apply_point);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
+ int ret;
- IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
- type, pnt);
-
- if (tlv_idx >= IWL_DBG_TLV_TYPE_NUM) {
- IWL_ERR(trans, "WRT: Unsupported TLV 0x%x\n", type);
+ if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
+ IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
}
if (!iwl_dbg_tlv_ver_support(tlv)) {
IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
- le32_to_cpu(hdr->tlv_version));
+ le32_to_cpu(hdr->version));
+ goto out_err;
+ }
+
+ ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
+ if (ret) {
+ IWL_ERR(trans,
+ "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
+ type, ret, ext);
goto out_err;
}
@@ -153,13 +324,91 @@ out_err:
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
{
- /* will be used later */
+ struct list_head *timer_list = &trans->dbg.periodic_trig_list;
+ struct iwl_dbg_tlv_timer_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, timer_list, list) {
+ del_timer(&node->timer);
+ list_del(&node->list);
+ kfree(node);
+ }
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
+static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ int i;
+
+ if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return;
+
+ fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ dma_free_coherent(trans->dev, frag->size, frag->block,
+ frag->physical);
+
+ frag->physical = 0;
+ frag->block = NULL;
+ frag->size = 0;
+ }
+
+ kfree(fw_mon->frags);
+ fw_mon->frags = NULL;
+ fw_mon->num_frags = 0;
+}
+
void iwl_dbg_tlv_free(struct iwl_trans *trans)
{
- /* will be used again later */
+ struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
+ int i;
+
+ iwl_dbg_tlv_del_timers(trans);
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv **active_reg =
+ &trans->dbg.active_regions[i];
+
+ kfree(*active_reg);
+ *active_reg = NULL;
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &trans->dbg.debug_info_tlv_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &trans->dbg.time_point[i];
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
+ list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
+ list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &tp->active_trig_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
+ iwl_dbg_tlv_fragments_free(trans, i);
}
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
@@ -196,7 +445,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
if (!iwlwifi_mod_params.enable_ini)
return;
- res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
+ res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
if (res)
return;
@@ -205,10 +454,628 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
release_firmware(fw);
}
+void iwl_dbg_tlv_init(struct iwl_trans *trans)
+{
+ int i;
+
+ INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
+ INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
+
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &trans->dbg.time_point[i];
+
+ INIT_LIST_HEAD(&tp->trig_list);
+ INIT_LIST_HEAD(&tp->hcmd_list);
+ INIT_LIST_HEAD(&tp->active_trig_list);
+ }
+}
+
+static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
+ struct iwl_dram_data *frag, u32 pages)
+{
+ void *block = NULL;
+ dma_addr_t physical;
+
+ if (!frag || frag->size || !pages)
+ return -EIO;
+
+ while (pages) {
+ block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
+ &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (block)
+ break;
+
+ IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
+ pages * PAGE_SIZE);
+
+ pages = DIV_ROUND_UP(pages, 2);
+ }
+
+ if (!block)
+ return -ENOMEM;
+
+ frag->physical = physical;
+ frag->block = block;
+ frag->size = pages * PAGE_SIZE;
+
+ return pages;
+}
+
+static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 num_frags, remain_pages, frag_pages;
+ int i;
+
+ if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return -EIO;
+
+ fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ if (fw_mon->num_frags ||
+ fw_mon_cfg->buf_location !=
+ cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
+ return 0;
+
+ num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+ if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ return -EIO;
+ num_frags = 1;
+ }
+
+ remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
+ PAGE_SIZE);
+ num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+ num_frags = min_t(u32, num_frags, remain_pages);
+ frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
+
+ fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
+ if (!fw_mon->frags)
+ return -ENOMEM;
+
+ for (i = 0; i < num_frags; i++) {
+ int pages = min_t(u32, frag_pages, remain_pages);
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
+ alloc_id, i, pages * PAGE_SIZE);
+
+ pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
+ pages);
+ if (pages < 0) {
+ u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
+ (remain_pages * PAGE_SIZE);
+
+ if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
+ iwl_dbg_tlv_fragments_free(fwrt->trans,
+ alloc_id);
+ return pages;
+ }
+ break;
+ }
+
+ remain_pages -= pages;
+ fw_mon->num_frags++;
+ }
+
+ return 0;
+}
+
+static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id)
+{
+ struct iwl_fw_mon *fw_mon;
+ u32 remain_frags, num_commands;
+ int i, fw_mon_idx = 0;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
+ return 0;
+
+ if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ return -EIO;
+
+ if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH)
+ return 0;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ /* the first fragment of DBGC1 is given to the FW via register
+ * or context info
+ */
+ if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ fw_mon_idx++;
+
+ remain_frags = fw_mon->num_frags - fw_mon_idx;
+ if (!remain_frags)
+ return 0;
+
+ num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ for (i = 0; i < num_commands; i++) {
+ u32 num_frags = min_t(u32, remain_frags,
+ BUF_ALLOC_MAX_NUM_FRAGS);
+ struct iwl_buf_alloc_cmd data = {
+ .alloc_id = cpu_to_le32(alloc_id),
+ .num_frags = cpu_to_le32(num_frags),
+ .buf_location =
+ cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
+ .data[0] = &data,
+ .len[0] = sizeof(data),
+ };
+ int ret, j;
+
+ for (j = 0; j < num_frags; j++) {
+ struct iwl_buf_alloc_frag *frag = &data.frags[j];
+ struct iwl_dram_data *fw_mon_frag =
+ &fw_mon->frags[fw_mon_idx++];
+
+ frag->addr = cpu_to_le64(fw_mon_frag->physical);
+ frag->size = cpu_to_le32(fw_mon_frag->size);
+ }
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (ret)
+ return ret;
+
+ remain_frags -= num_frags;
+ }
+
+ return 0;
+}
+
+static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
+{
+ int ret, i;
+
+ for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
+ ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
+ if (ret)
+ IWL_WARN(fwrt,
+ "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
+ i, ret);
+ }
+}
+
+static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
+ struct list_head *hcmd_list)
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, hcmd_list, list) {
+ struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
+ struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
+ u32 domain = le32_to_cpu(hcmd->hdr.domain);
+ u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
+ .len = { hcmd_len, },
+ .data = { hcmd_data->data, },
+ };
+
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & fwrt->trans->dbg.domains_bitmap))
+ continue;
+
+ iwl_trans_send_cmd(fwrt->trans, &cmd);
+ }
+}
+
+static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
+{
+ struct iwl_dbg_tlv_timer_node *timer_node =
+ from_timer(timer_node, t, timer);
+ struct iwl_fwrt_dump_data dump_data = {
+ .trig = (void *)timer_node->tlv->data,
+ };
+ int ret;
+
+ ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
+ if (!ret || ret == -EBUSY) {
+ u32 occur = le32_to_cpu(dump_data.trig->occurrences);
+ u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
+
+ if (!occur)
+ return;
+
+ mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
+
+static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_dbg_tlv_node *node;
+ struct list_head *trig_list =
+ &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
+
+ list_for_each_entry(node, trig_list, list) {
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
+ struct iwl_dbg_tlv_timer_node *timer_node;
+ u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
+ u32 min_interval = 100;
+
+ if (!occur)
+ continue;
+
+ /* make sure there is at least one dword of data for the
+ * interval value
+ */
+ if (le32_to_cpu(node->tlv.length) <
+ sizeof(*trig) + sizeof(__le32)) {
+ IWL_ERR(fwrt,
+ "WRT: Invalid periodic trigger data was not given\n");
+ continue;
+ }
+
+ if (le32_to_cpu(trig->data[0]) < min_interval) {
+ IWL_WARN(fwrt,
+ "WRT: Override min interval from %u to %u msec\n",
+ le32_to_cpu(trig->data[0]), min_interval);
+ trig->data[0] = cpu_to_le32(min_interval);
+ }
+
+ collect_interval = le32_to_cpu(trig->data[0]);
+
+ timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
+ if (!timer_node) {
+ IWL_ERR(fwrt,
+ "WRT: Failed to allocate periodic trigger\n");
+ continue;
+ }
+
+ timer_node->fwrt = fwrt;
+ timer_node->tlv = &node->tlv;
+ timer_setup(&timer_node->timer,
+ iwl_dbg_tlv_periodic_trig_handler, 0);
+
+ list_add_tail(&timer_node->list,
+ &fwrt->trans->dbg.periodic_trig_list);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
+
+ mod_timer(&timer_node->timer,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
+
+static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
+ struct iwl_ucode_tlv *old)
+{
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
+ struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
+ __le32 *new_data = new_trig->data, *old_data = old_trig->data;
+ u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
+ u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data);
+ int i, j;
+
+ for (i = 0; i < new_dwords_num; i++) {
+ bool match = false;
+
+ for (j = 0; j < old_dwords_num; j++) {
+ if (new_data[i] == old_data[j]) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+}
+
+static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
+ struct iwl_ucode_tlv *trig_tlv,
+ struct iwl_dbg_tlv_node *node)
+{
+ struct iwl_ucode_tlv *node_tlv = &node->tlv;
+ struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
+ u32 policy = le32_to_cpu(trig->apply_policy);
+ u32 size = le32_to_cpu(trig_tlv->length);
+ u32 trig_data_len = size - sizeof(*trig);
+ u32 offset = 0;
+
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
+ u32 data_len = le32_to_cpu(node_tlv->length) -
+ sizeof(*node_trig);
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Appending trigger data (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ offset += data_len;
+ size += data_len;
+ } else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger data (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+ }
+
+ if (size != le32_to_cpu(node_tlv->length)) {
+ struct list_head *prev = node->list.prev;
+ struct iwl_dbg_tlv_node *tmp;
+
+ list_del(&node->list);
+
+ tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
+ if (!tmp) {
+ IWL_WARN(fwrt,
+ "WRT: No memory to override trigger (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ list_add(&node->list, prev);
+
+ return -ENOMEM;
+ }
+
+ list_add(&tmp->list, prev);
+ node_tlv = &tmp->tlv;
+ node_trig = (void *)node_tlv->data;
+ }
+
+ memcpy(node_trig->data + offset, trig->data, trig_data_len);
+ node_tlv->length = cpu_to_le32(size);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger configuration (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ /* the first 11 dwords are configuration related */
+ memcpy(node_trig, trig, sizeof(__le32) * 11);
+ }
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Overriding trigger regions (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ node_trig->regions_mask = trig->regions_mask;
+ } else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Appending trigger regions (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+
+ node_trig->regions_mask |= trig->regions_mask;
+ }
+
+ return 0;
+}
+
+static int
+iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
+ struct list_head *trig_list,
+ struct iwl_ucode_tlv *trig_tlv)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
+ struct iwl_dbg_tlv_node *node, *match = NULL;
+ u32 policy = le32_to_cpu(trig->apply_policy);
+
+ list_for_each_entry(node, trig_list, list) {
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
+ break;
+
+ if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
+ is_trig_data_contained(trig_tlv, &node->tlv)) {
+ match = node;
+ break;
+ }
+ }
+
+ if (!match) {
+ IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
+ le32_to_cpu(trig->time_point));
+ return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ }
+
+ return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
+}
+
+static void
+iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
+ struct iwl_dbg_tlv_time_point_data *tp)
+{
+ struct iwl_dbg_tlv_node *node, *tmp;
+ struct list_head *trig_list = &tp->trig_list;
+ struct list_head *active_trig_list = &tp->active_trig_list;
+
+ list_for_each_entry_safe(node, tmp, active_trig_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ list_for_each_entry(node, trig_list, list) {
+ struct iwl_ucode_tlv *tlv = &node->tlv;
+ struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
+ u32 domain = le32_to_cpu(trig->hdr.domain);
+
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & fwrt->trans->dbg.domains_bitmap))
+ continue;
+
+ iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
+ }
+}
+
+int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
+{
+ int i;
+
+ if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
+ return -EBUSY;
+
+ iwl_fw_flush_dumps(fwrt);
+
+ fwrt->trans->dbg.domains_bitmap = new_domain;
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Generating active triggers list, domain 0x%x\n",
+ fwrt->trans->dbg.domains_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &fwrt->trans->dbg.time_point[i];
+
+ iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
+ }
+
+ clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
+
+ return 0;
+}
+
+static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ u32 trig_data)
+{
+ struct iwl_rx_packet *pkt = tp_data->fw_pkt;
+ struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
+
+ if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
+ (pkt->hdr.cmd == wanted_hdr->cmd &&
+ pkt->hdr.group_id == wanted_hdr->group_id))) {
+ struct iwl_rx_packet *fw_pkt =
+ kmemdup(pkt,
+ sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
+ GFP_ATOMIC);
+
+ if (!fw_pkt)
+ return false;
+
+ dump_data->fw_pkt = fw_pkt;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int
+iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
+ struct list_head *active_trig_list,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ bool (*data_check)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ union iwl_dbg_tlv_tp_data *tp_data,
+ u32 trig_data))
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, active_trig_list, list) {
+ struct iwl_fwrt_dump_data dump_data = {
+ .trig = (void *)node->tlv.data,
+ };
+ u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
+ data);
+ int ret, i;
+
+ if (!num_data) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_data; i++) {
+ if (!data_check ||
+ data_check(fwrt, &dump_data, tp_data,
+ le32_to_cpu(dump_data.trig->data[i]))) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
+ if (ret)
+ return ret;
+
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
+{
+ enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
+ int ret, i;
+
+ iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
+
+ *ini_dest = IWL_FW_INI_LOCATION_INVALID;
+ for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &fwrt->trans->dbg.fw_mon_cfg[i];
+ u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
+
+ if (dest == IWL_FW_INI_LOCATION_INVALID)
+ continue;
+
+ if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
+ *ini_dest = dest;
+
+ if (dest != *ini_dest)
+ continue;
+
+ ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
+ if (ret)
+ IWL_WARN(fwrt,
+ "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
+ i, ret);
+ }
+}
+
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
- /* will be used later */
+ struct list_head *hcmd_list, *trig_list;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM)
+ return;
+
+ hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
+ trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
+
+ switch (tp_id) {
+ case IWL_FW_INI_TIME_POINT_EARLY:
+ iwl_dbg_tlv_init_cfg(fwrt);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
+ iwl_dbg_tlv_apply_buffers(fwrt);
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ case IWL_FW_INI_TIME_POINT_PERIODIC:
+ iwl_dbg_tlv_set_periodic_trigs(fwrt);
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ break;
+ case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
+ case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
+ iwl_dbg_tlv_check_fw_pkt);
+ break;
+ default:
+ iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
+ break;
+ }
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index e257ad358c94..f18946872569 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -65,11 +65,11 @@
#include <linux/types.h>
/**
- * struct iwl_apply_point_data
- * @list: list to go through the TLVs of the apply point
- * @tlv: a debug TLV
+ * struct iwl_dbg_tlv_node - debug TLV node
+ * @list: list of &struct iwl_dbg_tlv_node
+ * @tlv: debug TLV
*/
-struct iwl_apply_point_data {
+struct iwl_dbg_tlv_node {
struct list_head list;
struct iwl_ucode_tlv tlv;
};
@@ -82,6 +82,18 @@ union iwl_dbg_tlv_tp_data {
struct iwl_rx_packet *fw_pkt;
};
+/**
+ * struct iwl_dbg_tlv_time_point_data
+ * @trig_list: list of triggers
+ * @active_trig_list: list of active triggers
+ * @hcmd_list: list of host commands
+ */
+struct iwl_dbg_tlv_time_point_data {
+ struct list_head trig_list;
+ struct list_head active_trig_list;
+ struct list_head hcmd_list;
+};
+
struct iwl_trans;
struct iwl_fw_runtime;
@@ -89,9 +101,11 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans);
void iwl_dbg_tlv_free(struct iwl_trans *trans);
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext);
+void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
+int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 9e8643618578..1bc6ecc32140 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -21,16 +21,18 @@
TRACE_EVENT(iwlwifi_dev_tx_tb,
TP_PROTO(const struct device *dev, struct sk_buff *skb,
- u8 *data_src, size_t data_len),
- TP_ARGS(dev, skb, data_src, data_len),
+ u8 *data_src, dma_addr_t phys, size_t data_len),
+ TP_ARGS(dev, skb, data_src, phys, data_len),
TP_STRUCT__entry(
DEV_ENTRY
+ __field(u64, phys)
__dynamic_array(u8, data,
iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
+ __entry->phys = phys;
if (iwl_trace_data(skb))
memcpy(__get_dynamic_array(data), data_src, data_len);
),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ff0519ea00a5..4096ccf58b07 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1560,6 +1560,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
+ iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@@ -1636,8 +1638,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
- iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the device debugfs entries. */
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
@@ -1804,7 +1804,7 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, "
"4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 0c12df558240..8836f85afe85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -148,7 +148,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*
* Bits 3:0:
* Define the maximum number of pending read requests.
- * Maximum configration value allowed is 0xC
+ * Maximum configuration value allowed is 0xC
* Bits 9:8:
* Define the maximum transfer size. (64 / 128 / 256)
* Bit 10:
@@ -768,7 +768,7 @@ struct iwlagn_scd_bc_tbl {
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
- * For 22560 and on:
+ * For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c8972f6e38ba..1e240a2a8329 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,11 +156,10 @@ static const u16 iwl_uhb_nvm_channels[] = {
96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165, 169, 173, 177, 181,
/* 6-7 GHz */
- 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241,
- 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297,
- 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353,
- 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409,
- 413, 417, 421
+ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
+ 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
+ 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
};
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
@@ -256,12 +255,12 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
#undef CHECK_AND_PRINT_I
}
-static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
u32 nvm_flags, const struct iwl_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
- if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (ch_num <= LAST_2GHZ_HT_PLUS)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
if (ch_num >= FIRST_2GHZ_HT_MINUS)
@@ -299,6 +298,13 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
return flags;
}
+static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
+{
+ if (ch_idx >= NUM_2GHZ_CHANNELS)
+ return NL80211_BAND_5GHZ;
+ return NL80211_BAND_2GHZ;
+}
+
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
@@ -308,7 +314,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int n_channels = 0;
struct ieee80211_channel *channel;
u32 ch_flags;
- int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
+ int num_of_ch;
const u16 *nvm_chan;
if (cfg->uhb_supported) {
@@ -323,7 +329,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
- bool is_5ghz = (ch_idx >= num_2ghz_channels);
+ enum nl80211_band band =
+ iwl_nl80211_band_from_channel_idx(ch_idx);
if (v4)
ch_flags =
@@ -332,12 +339,13 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ch_flags =
__le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
- if (is_5ghz && !data->sku_cap_band_52ghz_enable)
+ if (band == NL80211_BAND_5GHZ &&
+ !data->sku_cap_band_52ghz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
- is_5ghz) {
+ band == NL80211_BAND_5GHZ) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
@@ -362,8 +370,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = is_5ghz ?
- NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+ channel->band = band;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -379,7 +386,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* don't put limitations in case we're using LAR */
if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
- ch_idx, is_5ghz,
+ ch_idx, band,
ch_flags, cfg);
else
channel->flags = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 23c25a7665f2..14c8ba23f3b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -374,6 +374,7 @@
#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
+#define DBGC_CUR_DBGBUF_STATUS_IDX_MSK (0x0f000000)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
@@ -381,6 +382,12 @@
#define DBGC_IN_SAMPLE (0xa03c00)
#define DBGC_OUT_CTRL (0xa03c0c)
+/* M2S registers */
+#define LDBG_M2S_BUF_WPTR (0xa0476c)
+#define LDBG_M2S_BUF_WRAP_CNT (0xa04774)
+#define LDBG_M2S_BUF_WPTR_VAL_MSK (0x000fffff)
+#define LDBG_M2S_BUF_WRAP_CNT_VAL_MSK (0x000fffff)
+
/* enable the ID buf for read */
#define WFPM_PS_CTL_CLR 0xA0300C
#define WFMP_MAC_ADDR_0 0xA03080
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index a31408188ed0..8cadad7364ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -679,6 +679,16 @@ struct iwl_dram_data {
};
/**
+ * struct iwl_fw_mon - fw monitor per allocation id
+ * @num_frags: number of fragments
+ * @frags: an array of DRAM buffer fragments
+ */
+struct iwl_fw_mon {
+ u32 num_frags;
+ struct iwl_dram_data *frags;
+};
+
+/**
* struct iwl_self_init_dram - dram data used by self init process
* @fw: lmac and umac dram data
* @fw_cnt: total number of items in array
@@ -706,10 +716,17 @@ struct iwl_self_init_dram {
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
* @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
- * @num_blocks: number of blocks in fw_mon
- * @fw_mon: address of the buffers for firmware monitor
+ * @fw_mon_cfg: debug buffer allocation configuration
+ * @fw_mon_ini: DRAM buffer fragments per allocation id
+ * @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @active_regions: active regions
+ * @debug_info_tlv_list: list of debug info TLVs
+ * @time_point: array of debug time points
+ * @periodic_trig_list: periodic triggers list
+ * @domains_bitmap: bitmap of active domains other than
+ * &IWL_FW_INI_DOMAIN_ALWAYS_ON
*/
struct iwl_trans_debug {
u8 n_dest_reg;
@@ -726,11 +743,21 @@ struct iwl_trans_debug {
enum iwl_ini_cfg_state internal_ini_cfg;
enum iwl_ini_cfg_state external_ini_cfg;
- int num_blocks;
- struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM];
+ struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
+ struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
+
+ struct iwl_dram_data fw_mon;
bool hw_error;
enum iwl_fw_ini_buffer_location ini_dest;
+
+ struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
+ struct list_head debug_info_tlv_list;
+ struct iwl_dbg_tlv_time_point_data
+ time_point[IWL_FW_INI_TIME_POINT_NUM];
+ struct list_head periodic_trig_list;
+
+ u32 domains_bitmap;
};
/**
@@ -1222,6 +1249,11 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
iwl_op_mode_nic_error(trans->op_mode);
}
+static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
+{
+ return trans->state == IWL_TRANS_FW_ALIVE;
+}
+
static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
if (trans->ops->sync_nmi)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 86c2c587e755..43ebb2149b63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1939,6 +1939,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (iwl_mvm_check_rt_status(mvm, vif)) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
ret = 1;
@@ -1955,12 +1957,39 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
if (d0i3_first) {
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0) {
IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
ret);
goto err;
}
+ switch (mvm->cmd_ver.d0i3_resp) {
+ case 0:
+ break;
+ case 1:
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len != sizeof(u32)) {
+ IWL_ERR(mvm,
+ "Error with D0I3_END_CMD response size (%d)\n",
+ len);
+ goto err;
+ }
+ if (IWL_D0I3_RESET_REQUIRE &
+ le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
+ iwl_write32(mvm->trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+ iwl_free_resp(&cmd);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index ad18c2f1a806..aa659162a7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -148,7 +148,8 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
"FLUSHING all tids queues on sta_id = %d\n",
flush_arg);
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF, 0)
+ ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
@@ -377,7 +378,7 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
pos = scnprintf(buf, bufsz,
"SAR geographic profile disabled\n");
} else {
- value = &mvm->geo_profiles[tbl_idx - 1].values[0];
+ value = &mvm->fwrt.geo_profiles[tbl_idx - 1].values[0];
pos += scnprintf(buf + pos, bufsz - pos,
"Use geographic profile %d\n", tbl_idx);
@@ -1174,7 +1175,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
int bin_len = count / 2;
int ret = -EINVAL;
size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
@@ -1375,6 +1376,9 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (count == 0)
return 0;
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER,
+ NULL);
+
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index d9eb2b286438..dd685f7eb410 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -514,6 +514,19 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
struct iwl_phy_cfg_cmd phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+ if (iwl_mvm_has_unified_ucode(mvm) &&
+ !mvm->trans->cfg->tx_with_siso_diversity)
+ return 0;
+
+ if (mvm->trans->cfg->tx_with_siso_diversity) {
+ /*
+ * TODO: currently we don't set the antenna but letting the NIC
+ * to decide which antenna to use. This should come from BIOS.
+ */
+ phy_cfg_cmd.phy_cfg =
+ cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
+ }
+
/* Set parameters */
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@@ -665,181 +678,14 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
}
#ifdef CONFIG_ACPI
-static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
- union acpi_object *table,
- struct iwl_mvm_sar_profile *profile,
- bool enabled)
-{
- int i;
-
- profile->enabled = enabled;
-
- for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
- if ((table[i].type != ACPI_TYPE_INTEGER) ||
- (table[i].integer.value > U8_MAX))
- return -EINVAL;
-
- profile->table[i] = table[i].integer.value;
- }
-
- return 0;
-}
-
-static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *table, *data;
- bool enabled;
- int ret, tbl_rev;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
- tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
-
- /* position of the actual table */
- table = &wifi_pkg->package.elements[2];
-
- /* The profile from WRDS is officially profile 1, but goes
- * into sar_profiles[0] (because we don't have a profile 0).
- */
- ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
- enabled);
-out_free:
- kfree(data);
- return ret;
-}
-
-static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data;
- bool enabled;
- int i, n_profiles, ret, tbl_rev;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
- (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
- tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
- n_profiles = wifi_pkg->package.elements[2].integer.value;
-
- /*
- * Check the validity of n_profiles. The EWRD profiles start
- * from index 1, so the maximum value allowed here is
- * ACPI_SAR_PROFILES_NUM - 1.
- */
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
- ret = -EINVAL;
- goto out_free;
- }
-
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
-
- /* The EWRD profiles officially go from 2 to 4, but we
- * save them in sar_profiles[1-3] (because we don't
- * have profile 0). So in the array we start from 1.
- */
- ret = iwl_mvm_sar_set_profile(mvm,
- &wifi_pkg->package.elements[pos],
- &mvm->sar_profiles[i + 1],
- enabled);
- if (ret < 0)
- break;
-
- /* go to the next table */
- pos += ACPI_SAR_TABLE_SIZE;
- }
-
-out_free:
- kfree(data);
- return ret;
-}
-
-static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data;
- int i, j, ret, tbl_rev;
- int idx = 1;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->geo_rev = tbl_rev;
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
- union acpi_object *entry;
-
- entry = &wifi_pkg->package.elements[idx++];
- if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX)) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->geo_profiles[i].values[j] = entry->integer.value;
- }
- }
- ret = 0;
-out_free:
- kfree(data);
- return ret;
-}
-
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
union {
struct iwl_dev_tx_power_cmd v5;
struct iwl_dev_tx_power_cmd_v4 v4;
} cmd;
- int i, j, idx;
- int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
- int len;
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
- ACPI_SAR_TABLE_SIZE);
+ u16 len = 0;
cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
@@ -848,174 +694,76 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
len = sizeof(cmd.v5);
else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(cmd.v4);
+ len = sizeof(struct iwl_dev_tx_power_cmd_v4);
else
len = sizeof(cmd.v4.v3);
- for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
- struct iwl_mvm_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &mvm->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
- profs[i]);
- /* if one of the profiles is disabled, we fail all */
- return -ENOENT;
- }
-
- IWL_DEBUG_INFO(mvm,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
- idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
- cmd.v5.v3.per_chain_restriction[i][j] =
- cpu_to_le16(prof->table[idx]);
- IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
- j, prof->table[idx]);
- }
- }
+ if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
+ prof_a, prof_b))
+ return -ENOENT;
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
-
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
-static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
-{
- /*
- * The GEO_TX_POWER_LIMIT command is not supported on earlier
- * firmware versions. Unfortunately, we don't have a TLV API
- * flag to rely on, so rely on the major version which is in
- * the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
- ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
- struct iwl_geo_tx_power_profiles_resp *resp;
- int ret;
+ union geo_tx_power_profiles_cmd geo_tx_cmd;
u16 len;
- void *data;
- struct iwl_geo_tx_power_profiles_cmd geo_cmd;
- struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+ int ret;
struct iwl_host_cmd cmd;
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- geo_cmd.ops =
+ if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ geo_tx_cmd.geo_cmd.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_cmd);
- data = &geo_cmd;
+ len = sizeof(geo_tx_cmd.geo_cmd);
} else {
- geo_cmd_v1.ops =
+ geo_tx_cmd.geo_cmd_v1.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_cmd_v1);
- data = &geo_cmd_v1;
+ len = sizeof(geo_tx_cmd.geo_cmd_v1);
}
+ if (!iwl_sar_geo_support(&mvm->fwrt))
+ return -EOPNOTSUPP;
+
cmd = (struct iwl_host_cmd){
.id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
.len = { len, },
.flags = CMD_WANT_SKB,
- .data = { data },
+ .data = { &geo_tx_cmd },
};
- if (!iwl_mvm_sar_geo_support(mvm))
- return -EOPNOTSUPP;
-
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
return ret;
}
-
- resp = (void *)cmd.resp_pkt->data;
- ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
- ret = -EIO;
- IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
- }
-
+ ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd);
iwl_free_resp(&cmd);
return ret;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
- struct iwl_geo_tx_power_profiles_cmd cmd = {
- .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
- };
- int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ union geo_tx_power_profiles_cmd cmd;
+ u16 len;
- if (!iwl_mvm_sar_geo_support(mvm))
- return 0;
-
- ret = iwl_mvm_sar_get_wgds_table(mvm);
- if (ret < 0) {
- IWL_DEBUG_RADIO(mvm,
- "Geo SAR BIOS table invalid or unavailable. (%d)\n",
- ret);
- /* we don't fail if the table is not available */
- return 0;
- }
-
- IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
- ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
-
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- struct iwl_per_chain_offset *chain =
- (struct iwl_per_chain_offset *)&cmd.table[i];
-
- for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
- u8 *value;
+ cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
- value = &mvm->geo_profiles[i].values[j *
- ACPI_GEO_PER_CHAIN_SIZE];
- chain[j].max_tx_power = cpu_to_le16(value[0]);
- chain[j].chain_a = value[1];
- chain[j].chain_b = value[2];
- IWL_DEBUG_RADIO(mvm,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j, value[1], value[2], value[0]);
- }
- }
+ iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
- cmd.table_revision = cpu_to_le32(mvm->geo_rev);
+ cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
- sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
- &cmd);
+ if (!fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1);
+ } else {
+ len = sizeof(cmd.geo_cmd);
}
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd);
}
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
@@ -1024,7 +772,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
int i, j, ret, tbl_rev;
int idx = 2;
- mvm->ppag_table.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -1049,8 +797,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
- mvm->ppag_table.enabled = cpu_to_le32(enabled->integer.value);
- if (!mvm->ppag_table.enabled) {
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value);
+ if (!mvm->fwrt.ppag_table.enabled) {
ret = 0;
goto out_free;
}
@@ -1070,11 +818,11 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
(j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
(j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
(j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
- mvm->ppag_table.enabled = cpu_to_le32(0);
+ mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
- mvm->ppag_table.gain[i][j] = ent->integer.value;
+ mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value;
}
}
ret = 0;
@@ -1095,20 +843,20 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->ppag_table.enabled ? "enabled" : "disabled");
+ mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, mvm->ppag_table.gain[i][j]);
+ i, j, mvm->fwrt.ppag_table.gain[i][j]);
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, sizeof(mvm->ppag_table),
- &mvm->ppag_table);
+ 0, sizeof(mvm->fwrt.ppag_table),
+ &mvm->fwrt.ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -1131,17 +879,14 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
}
#else /* CONFIG_ACPI */
-static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
+ int prof_a, int prof_b)
{
return -ENOENT;
}
-static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
return -ENOENT;
}
@@ -1151,17 +896,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
return 0;
}
-int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
- int prof_b)
-{
- return -ENOENT;
-}
-
-int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
return -ENOENT;
@@ -1169,7 +903,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
- return -ENOENT;
+ return 0;
}
#endif /* CONFIG_ACPI */
@@ -1228,7 +962,7 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
int ret;
- ret = iwl_mvm_sar_get_wrds_table(mvm);
+ ret = iwl_sar_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1240,16 +974,14 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return 1;
}
- ret = iwl_mvm_sar_get_ewrd_table(mvm);
+ ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use WRDS, so don't fail */
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
- /* choose profile 1 (WRDS) as default for both chains */
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
-
/*
* If we don't have profile 0 from BIOS, just skip it. This
* means that SAR Geo will not be enabled either, even if we
@@ -1344,12 +1076,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
-
- ret = iwl_send_phy_cfg_cmd(mvm);
- if (ret)
- goto error;
}
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
ret = iwl_mvm_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -1480,7 +1212,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
- } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+ } else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index d104da9170ca..72c4b2b8399d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -129,6 +129,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mvm->hw->wiphy));
+ if (!mvm->led.name)
+ return -ENOMEM;
+
mvm->led.brightness_set = iwl_led_brightness_set;
mvm->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 9c417dd06291..b78992e341d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -855,11 +855,10 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
{
u8 rate;
-
- if (info->band == NL80211_BAND_5GHZ || vif->p2p)
- rate = IWL_FIRST_OFDM_RATE;
- else
+ if (info->band == NL80211_BAND_2GHZ && !vif->p2p)
rate = IWL_FIRST_CCK_RATE;
+ else
+ rate = IWL_FIRST_OFDM_RATE;
return rate;
}
@@ -1404,6 +1403,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(mb->mac_id);
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
IWL_DEBUG_INFO(mvm,
"missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
@@ -1432,7 +1432,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
ieee80211_beacon_loss(vif);
iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_MISSED_BEACONS, NULL);
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
@@ -1609,3 +1609,26 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
out_unlock:
rcu_read_unlock();
}
+
+void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_vap_notif *mb = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(mb->mac_id);
+
+ IWL_DEBUG_INFO(mvm,
+ "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n",
+ le32_to_cpu(mb->mac_id),
+ mb->num_beacon_intervals_elapsed,
+ mb->profile_periodicity);
+
+ rcu_read_lock();
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (vif)
+ iwl_mvm_connection_loss(mvm, vif, "missed vap beacon");
+
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d31f96c3f925..32dc9d6f0fb6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -339,14 +339,14 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
-const static u8 he_if_types_ext_capa_sta[] = {
+static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
-const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
+static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
{
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = he_if_types_ext_capa_sta,
@@ -355,6 +355,15 @@ const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
},
};
+static int
+iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ *tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+ *rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+ return 0;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -734,6 +743,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
+ hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
+
ret = ieee80211_register_hw(mvm->hw);
if (ret) {
iwl_mvm_leds_exit(mvm);
@@ -2280,7 +2292,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
+ &mvm->status) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
/*
* If we're restarting then the firmware will
* obviously have lost synchronisation with
@@ -2294,6 +2308,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*
* Set a large maximum delay to allow for more
* than a single interface.
+ *
+ * For new firmware versions, rely on the
+ * firmware. This is relevant for DCM scenarios
+ * only anyway.
*/
u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
iwl_mvm_protect_session(mvm, vif, dur, dur,
@@ -2384,8 +2402,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
+ * A firmware with the new API will remove it automatically.
*/
- iwl_mvm_stop_session_protection(mvm, vif);
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -3255,8 +3276,22 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
duration = req_duration;
mutex_lock(&mvm->mutex);
- /* Try really hard to protect the session and hear a beacon */
- iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
+ /* Try really hard to protect the session and hear a beacon
+ * The new session protection command allows us to protect the
+ * session for a much longer time since the firmware will internally
+ * create two events: a 300TU one with a very high priority that
+ * won't be fragmented which should be enough for 99% of the cases,
+ * and another one (which we configure here to be 900TU long) which
+ * will have a slightly lower priority, but more importantly, can be
+ * fragmented so that it'll allow other activities to run.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, 900,
+ min_duration);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ min_duration, 500, false);
mutex_unlock(&mvm->mutex);
}
@@ -3613,8 +3648,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
- (channel->band == NL80211_BAND_2GHZ) ?
- PHY_BAND_24 : PHY_BAND_5,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
PHY_VHT_CHANNEL_MODE20,
0);
@@ -3848,7 +3882,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex);
- iwl_mvm_stop_roc(mvm);
+ iwl_mvm_stop_roc(mvm, vif);
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
@@ -4622,7 +4656,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
continue;
if (drop)
- iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0);
else
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
@@ -5006,6 +5040,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
+ .get_antenna = iwl_mvm_op_get_antenna,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
.stop = iwl_mvm_mac_stop,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 5ca50f39a023..3ec8de00f3aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -188,6 +188,11 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
+union geo_tx_power_profiles_cmd {
+ struct iwl_geo_tx_power_profiles_cmd geo_cmd;
+ struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+};
+
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -774,14 +779,6 @@ enum iwl_mvm_queue_status {
#define IWL_MVM_NUM_CIPHERS 10
-struct iwl_mvm_sar_profile {
- bool enabled;
- u8 table[ACPI_SAR_TABLE_SIZE];
-};
-
-struct iwl_mvm_geo_profile {
- u8 values[ACPI_GEO_TABLE_SIZE];
-};
struct iwl_mvm_txq {
struct list_head list;
@@ -1122,6 +1119,10 @@ struct iwl_mvm {
int responses[IWL_MVM_TOF_MAX_APS];
} ftm_initiator;
+ struct {
+ u8 d0i3_resp;
+ } cmd_ver;
+
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
@@ -1140,14 +1141,6 @@ struct iwl_mvm {
/* sniffer data to include in radiotap */
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
-
-#ifdef CONFIG_ACPI
- struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
- struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
- u32 geo_rev;
- struct iwl_ppag_table_cmd ppag_table;
- u32 ppag_rev;
-#endif
};
/* Extract MVM priv from op_mode and _hw */
@@ -1405,12 +1398,19 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
}
+
static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
}
+static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA);
+}
+
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
@@ -1682,6 +1682,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
@@ -2077,6 +2079,19 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return PHY_BAND_24;
+ case NL80211_BAND_5GHZ:
+ return PHY_BAND_5;
+ default:
+ WARN_ONCE(1, "Unsupported band (%u)\n", band);
+ return PHY_BAND_5;
+ }
+}
+
/* Channel info utils */
static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
{
@@ -2125,11 +2140,12 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
struct cfg80211_chan_def *chandef)
{
+ enum nl80211_band band = chandef->chan->band;
+
iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
- (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5),
- iwl_mvm_get_channel_width(chandef),
- iwl_mvm_get_ctrl_pos(chandef));
+ iwl_mvm_phy_band_from_nl80211(band),
+ iwl_mvm_get_channel_width(chandef),
+ iwl_mvm_get_ctrl_pos(chandef));
}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 3acbd5b7ab4b..1b07a8e8f069 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -263,6 +263,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+ iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED),
@@ -432,6 +434,8 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
+ HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(SESSION_PROTECTION_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
@@ -608,6 +612,27 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
+static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!mvm->fw->ucode_capa.cmd_versions ||
+ !mvm->fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = mvm->fw->ucode_capa.cmd_versions;
+ for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->notif_ver;
+ }
+ }
+
+ return def;
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -639,10 +664,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
- if (cfg->max_rx_agg_size)
- hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
- else
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
@@ -667,7 +689,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
op_mode->ops = &iwl_mvm_ops_mq;
trans->rx_mpdu_cmd_hdr_size =
(trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
} else {
@@ -722,6 +744,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+ mvm->cmd_ver.d0i3_resp =
+ iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
+ /* we only support version 1 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
+ goto out_free;
+
/*
* Populate the state variables that the transport layer needs
* to know about.
@@ -730,7 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
rb_size_default = IWL_AMSDU_2K;
else
rb_size_default = IWL_AMSDU_4K;
@@ -756,7 +784,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
- mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560;
+ mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 22136e4832ea..25d7faea1c62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -370,8 +370,6 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
if (dtimper >= 10)
return;
- /* TODO: check that multicast wake lock is off */
-
if (host_awake) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8f50e2b121bd..e2cf9e015ef8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -341,16 +341,24 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
lq_sta = &mvmsta->lq_sta.rs_fw;
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ char pretty_rate[100];
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
- IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
- lq_sta->last_rate_n_flags);
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+ lq_sta->last_rate_n_flags);
+ IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) {
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- if (WARN_ON(sta->max_amsdu_len < size))
+ /*
+ * In debug sta->max_amsdu_len < size
+ * so also check with orig_amsdu_len which holds the original
+ * data before debugfs changed the value
+ */
+ if (WARN_ON(sta->max_amsdu_len < size &&
+ mvmsta->orig_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
@@ -378,7 +386,7 @@ out:
rcu_read_unlock();
}
-static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 42d525e46e80..1a990ed9c3ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1533,6 +1533,8 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
+ sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
+
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
* or 0, since there is no per-TID alg.
@@ -3683,7 +3685,6 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
IWL_DEBUG_RATE(mvm, "leave\n");
}
-#ifdef CONFIG_MAC80211_DEBUGFS
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
@@ -3739,14 +3740,15 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
- type, rs_pretty_ant(ant), bw, mcs, nss,
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
+#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
* This is for debugging/testing only
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 428642e66658..32104c9f8f5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -445,10 +445,6 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
#endif
-#ifdef CONFIG_MAC80211_DEBUGFS
-void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
-#endif
-
void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update);
@@ -456,4 +452,6 @@ int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 0ad8ed23a455..5ee33c8ae9d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -60,6 +60,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <asm/unaligned.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "iwl-trans.h"
@@ -357,7 +358,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
len = le16_to_cpu(rx_res->byte_count);
- rx_pkt_status = le32_to_cpup((__le32 *)
+ rx_pkt_status = get_unaligned_le32((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
/* Dont use dev_alloc_skb(), we'll have enough headroom once
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 77b03b757193..ef99c49247b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -23,7 +23,7 @@
* in the file called COPYING.
*
* Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
@@ -1542,6 +1542,19 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
+static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1565,7 +1578,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
channel = desc->v3.channel;
gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
@@ -1667,7 +1680,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u64 tsf_on_air_rise;
if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560)
+ IWL_DEVICE_FAMILY_AX210)
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
else
tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
@@ -1678,8 +1691,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
rx_status->device_timestamp = gp2_on_air_rise;
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
+ if (iwl_mvm_is_band_in_rx_supported(mvm)) {
+ u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
+
+ rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
+ } else {
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ }
rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index fcafa22ec6ce..a046ac9fa852 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -79,9 +79,6 @@
#define IWL_SCAN_NUM_OF_FRAGS 3
#define IWL_SCAN_LAST_2_4_CHN 14
-#define IWL_SCAN_BAND_5_2 0
-#define IWL_SCAN_BAND_2_4 1
-
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
/* adaptive dwell max budget time [TU] for directed scan */
@@ -92,6 +89,10 @@
#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
/* adaptive dwell default APs number in social channels (1, 6, 11) */
#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+/* number of scan channels */
+#define IWL_SCAN_NUM_CHANNELS 112
+/* adaptive dwell default number of APs override */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE 10
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
@@ -196,14 +197,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
-{
- if (band == NL80211_BAND_2GHZ)
- return cpu_to_le32(PHY_BAND_24);
- else
- return cpu_to_le32(PHY_BAND_5);
-}
-
static inline __le32
iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
bool no_cck)
@@ -550,6 +543,7 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
{
int i, j;
int index;
+ u32 tmp_bitmap = 0;
/*
* copy SSIDs from match list.
@@ -569,7 +563,6 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
}
/* add SSIDs from scan SSID list */
- *ssid_bitmap = 0;
for (j = params->n_ssids - 1;
j >= 0 && i < PROBE_OPTION_MAX;
i++, j--) {
@@ -581,11 +574,13 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
ssids[i].len = params->ssids[j].ssid_len;
memcpy(ssids[i].ssid, params->ssids[j].ssid,
ssids[i].len);
- *ssid_bitmap |= BIT(i);
+ tmp_bitmap |= BIT(i);
} else {
- *ssid_bitmap |= BIT(index);
+ tmp_bitmap |= BIT(index);
}
}
+ if (ssid_bitmap)
+ *ssid_bitmap = tmp_bitmap;
}
static int
@@ -981,10 +976,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->fw->ucode_capa.n_scan_channels);
u32 ssid_bitmap = 0;
int i;
-
- lockdep_assert_held(&mvm->mutex);
-
- memset(cmd, 0, ksize(cmd));
+ u8 band;
if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
return -EINVAL;
@@ -1000,7 +992,8 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
vif));
- cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
+ band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
+ cmd->flags = cpu_to_le32(band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
@@ -1414,21 +1407,176 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
+static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
+{
+ return iwl_mvm_is_regular_scan(params) ?
+ IWL_SCAN_PRIORITY_EXT_6 :
+ IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static void
+iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
+ struct iwl_scan_general_params_v10 *general_params,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+ u8 active_dwell, passive_dwell;
+
+ timing = &scan_timing[params->type];
+ active_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+
+ general_params->adwell_default_social_chn =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ /* if custom max budget was configured with debugfs */
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+ else if (params->ssids && params->ssids[0].ssid_len)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ general_params->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+
+ hb_timing = &scan_timing[params->hb_type];
+
+ general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ general_params->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+
+ general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+ general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
+}
+
+struct iwl_mvm_scan_channel_segment {
+ u8 start_idx;
+ u8 end_idx;
+ u8 first_channel_id;
+ u8 last_channel_id;
+ u8 channel_spacing_shift;
+ u8 band;
+};
+
+static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
+ {
+ .start_idx = 0,
+ .end_idx = 13,
+ .first_channel_id = 1,
+ .last_channel_id = 14,
+ .channel_spacing_shift = 0,
+ .band = PHY_BAND_24
+ },
+ {
+ .start_idx = 14,
+ .end_idx = 41,
+ .first_channel_id = 36,
+ .last_channel_id = 144,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+ {
+ .start_idx = 42,
+ .end_idx = 50,
+ .first_channel_id = 149,
+ .last_channel_id = 181,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+};
+
+static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
+{
+ int i, index;
+
+ if (!channel_id)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
+ const struct iwl_mvm_scan_channel_segment *ch_segment =
+ &scan_channel_segments[i];
+ u32 ch_offset;
+
+ if (ch_segment->band != band ||
+ ch_segment->first_channel_id > channel_id ||
+ ch_segment->last_channel_id < channel_id)
+ continue;
+
+ ch_offset = (channel_id - ch_segment->first_channel_id) >>
+ ch_segment->channel_spacing_shift;
+
+ index = scan_channel_segments[i].start_idx + ch_offset;
+ if (index < IWL_SCAN_NUM_CHANNELS)
+ return index;
+
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
+ u8 ch_id, u8 band, u8 *ch_bitmap,
+ size_t bitmap_n_entries)
+{
+ int i;
+ static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+ };
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ int ch_idx, bitmap_idx;
+
+ ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
+ if (ch_idx < 0)
+ return;
+
+ bitmap_idx = ch_idx / 8;
+ if (bitmap_idx >= bitmap_n_entries)
+ return;
+
+ ch_idx = ch_idx % 8;
+ ch_bitmap[bitmap_idx] |= BIT(ch_idx);
+
+ return;
+ }
+ }
+}
+
static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
- int n_channels, u32 ssid_bitmap,
+ int n_channels, u32 flags,
struct iwl_scan_channel_cfg_umac *channel_cfg)
{
int i;
for (i = 0; i < n_channels; i++) {
- channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+ channel_cfg[i].flags = cpu_to_le32(flags);
channel_cfg[i].v1.channel_num = channels[i]->hw_value;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
+ enum nl80211_band band = channels[i]->band;
+
channel_cfg[i].v2.band =
- channels[i]->hw_value <= IWL_SCAN_LAST_2_4_CHN ?
- IWL_SCAN_BAND_2_4 : IWL_SCAN_BAND_5_2;
+ iwl_mvm_phy_band_from_nl80211(band);
channel_cfg[i].v2.iter_count = 1;
channel_cfg[i].v2.iter_interval = 0;
} else {
@@ -1438,6 +1586,92 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
+static void
+iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v4 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ u8 *bitmap = cp->adwell_ch_override_bitmap;
+ size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[i];
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+
+ iwl_mvm_scan_ch_add_n_aps_override(vif_type,
+ cfg->v2.channel_num,
+ cfg->v2.band, bitmap,
+ bitmap_n_entries);
+ }
+}
+
+static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ u8 flags = 0;
+
+ flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan on HB channels */
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+ return flags;
+}
+
+static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ int type)
+{
+ u16 flags = 0;
+
+ if (params->n_ssids == 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+
+ if (iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+ if (!iwl_mvm_is_regular_scan(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+ if (params->measurement_dwell ||
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+ if (IWL_MVM_ADWELL_ENABLE)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+ return flags;
+}
+
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -1481,8 +1715,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
- if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE &&
- vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
/*
@@ -1517,9 +1750,42 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
return flags;
}
+static int
+iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_umac_schedule *schedule,
+ __le16 *delay)
+{
+ int i;
+ if (WARN_ON(!params->n_scan_plans ||
+ params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ schedule[i].iter_count = scan_plan->iterations;
+ schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!schedule[params->n_scan_plans - 1].iter_count)
+ schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+ *delay = cpu_to_le16(params->delay);
+
+ return 0;
+}
+
static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params,
- int type)
+ int type, int uid)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
@@ -1530,7 +1796,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
struct iwl_scan_req_umac_tail_v1 *tail_v1;
struct iwl_ssid_ie *direct_scan;
- int uid, i;
+ int ret = 0;
u32 ssid_bitmap = 0;
u8 channel_flags = 0;
u16 gen_flags;
@@ -1538,17 +1804,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
- lockdep_assert_held(&mvm->mutex);
-
- if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
- return -EINVAL;
-
- uid = iwl_mvm_scan_uid_by_status(mvm, 0);
- if (uid < 0)
- return uid;
-
- memset(cmd, 0, ksize(cmd));
-
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
mvm->scan_uid_status[uid] = type;
@@ -1591,25 +1846,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
chan_param->flags = channel_flags;
chan_param->count = params->n_channels;
- for (i = 0; i < params->n_scan_plans; i++) {
- struct cfg80211_sched_scan_plan *scan_plan =
- &params->scan_plans[i];
-
- tail_v2->schedule[i].iter_count = scan_plan->iterations;
- tail_v2->schedule[i].interval =
- cpu_to_le16(scan_plan->interval);
- }
-
- /*
- * If the number of iterations of the last scan plan is set to
- * zero, it should run infinitely. However, this is not always the case.
- * For example, when regular scan is requested the driver sets one scan
- * plan with one iteration.
- */
- if (!tail_v2->schedule[i - 1].iter_count)
- tail_v2->schedule[i - 1].iter_count = 0xff;
-
- tail_v2->delay = cpu_to_le16(params->delay);
+ ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
+ &tail_v2->delay);
+ if (ret)
+ return ret;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
tail_v2->preq = params->preq;
@@ -1627,6 +1867,174 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static void
+iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_general_params_v10 *gp,
+ u16 gen_flags)
+{
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_scan_umac_dwell_v10(mvm, gp, params);
+
+ gp->flags = cpu_to_le16(gen_flags);
+
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+ gp->scan_start_mac_id = scan_vif->id;
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v3 *pp)
+{
+ pp->preq = params->preq;
+ pp->ssid_num = params->n_ssids;
+ iwl_scan_build_ssids(params, pp->direct_scan, NULL);
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp,
+ u32 *bitmap_ssid)
+{
+ pp->preq = params->preq;
+ iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v3 *cp)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, 0,
+ cp->channel_config);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v4 *cp,
+ u32 channel_cfg_flags)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->num_of_aps_override = IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE;
+
+ iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
+ params->n_channels,
+ channel_cfg_flags,
+ vif->type);
+}
+
+static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
+ &scan_p->channel_params);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
+ &scan_p->channel_params, 0);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v13 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v13 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+ u32 bitmap_ssid = 0;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
+ &bitmap_ssid);
+ iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
+ &scan_p->channel_params, bitmap_ssid);
+
+ return 0;
+}
+
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@@ -1729,6 +2137,64 @@ static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
}
}
+struct iwl_scan_umac_handler {
+ u8 version;
+ int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type, int uid);
+};
+
+#define IWL_SCAN_UMAC_HANDLER(_ver) { \
+ .version = _ver, \
+ .handler = iwl_mvm_scan_umac_v##_ver, \
+}
+
+static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
+ /* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(13),
+ IWL_SCAN_UMAC_HANDLER(12),
+ IWL_SCAN_UMAC_HANDLER(11),
+};
+
+static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_host_cmd *hcmd,
+ struct iwl_mvm_scan_params *params,
+ int type)
+{
+ int uid, i;
+ u8 scan_ver;
+
+ lockdep_assert_held(&mvm->mutex);
+ memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
+
+ return iwl_mvm_scan_lmac(mvm, vif, params);
+ }
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
+
+ hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+
+ scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
+ const struct iwl_scan_umac_handler *ver_handler =
+ &iwl_scan_umac_handlers[i];
+
+ if (ver_handler->version != scan_ver)
+ continue;
+
+ return ver_handler->handler(mvm, vif, params, type, uid);
+ }
+
+ return iwl_mvm_scan_umac(mvm, vif, params, type, uid);
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1786,14 +2252,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params,
- IWL_MVM_SCAN_REGULAR);
- } else {
- hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
- ret = iwl_mvm_scan_lmac(mvm, vif, &params);
- }
+ ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
+ IWL_MVM_SCAN_REGULAR);
if (ret)
return ret;
@@ -1891,13 +2351,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
- } else {
- hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
- ret = iwl_mvm_scan_lmac(mvm, vif, &params);
- }
+ ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
if (ret)
return ret;
@@ -2048,10 +2502,31 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
+#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \
+ case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \
+}
+
+static int iwl_scan_req_umac_get_size(u8 scan_ver)
+{
+ switch (scan_ver) {
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
+ }
+
+ return 0;
+}
+
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- int tail_size;
+ int base_size, tail_size;
+ u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
+
+ base_size = iwl_scan_req_umac_get_size(scan_ver);
+ if (base_size)
+ return base_size;
+
if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
@@ -2059,6 +2534,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
else if (iwl_mvm_cdb_scan_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+ else
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
if (iwl_mvm_is_scan_ext_chan_supported(mvm))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b3768d5d852a..7b35f416404c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2844,13 +2844,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (normalized_ssn == tid_data->next_reclaimed) {
tid_data->state = IWL_AGG_STARTING;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ ret = 0;
}
- ret = 0;
-
out:
spin_unlock_bh(&mvmsta->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a06bc63fb516..51b138673ddb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -734,6 +734,11 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
return;
}
+/*
+ * When the firmware supports the session protection API,
+ * this is not needed since it'll automatically remove the
+ * session protection after association + beacon reception.
+ */
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -757,6 +762,101 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
}
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
+ true);
+
+ if (!vif)
+ goto out_unlock;
+
+ /* The vif is not a P2P_DEVICE, maintain its time_event_data */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data =
+ &mvmvif->time_event_data;
+
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "Session protection failure");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ }
+
+ if (le32_to_cpu(notif->start)) {
+ spin_lock_bh(&mvm->time_event_lock);
+ te_data->running = le32_to_cpu(notif->start);
+ te_data->end_jiffies =
+ TU_TO_EXP_TIME(te_data->duration);
+ spin_unlock_bh(&mvm->time_event_lock);
+ } else {
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "No beacon heard and the session protection is over already...");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ }
+
+ goto out_unlock;
+ }
+
+ if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ /* End TE, notify mac80211 */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ iwl_mvm_roc_finished(mvm);
+ } else if (le32_to_cpu(notif->start)) {
+ set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ }
+
+ out_unlock:
+ rcu_read_unlock();
+}
+
+static int
+iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ cmd.conf_id =
+ cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ cmd.conf_id =
+ cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid ROC type\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type)
{
@@ -770,6 +870,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
+ duration,
+ type);
+
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
@@ -847,11 +953,44 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
-void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_cancel_session_protection(mvm, mvmvif);
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+
+ iwl_mvm_roc_finished(mvm);
+
+ return;
+ }
+
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
@@ -916,3 +1055,51 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
+
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (te_data->running &&
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return;
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ te_data->duration = le32_to_cpu(cmd.duration_tu);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 1dd3d01245ea..df6832b79666 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -178,12 +180,13 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/**
* iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is stopped
*
* This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session.
*/
-void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
@@ -242,4 +245,20 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
return !!te_data->uid;
}
+/**
+ * iwl_mvm_schedule_session_protection - schedule a session protection
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the duration of the protection
+ */
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration);
+
+/**
+ * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+ */
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f0c539b37ea7..b5a16f00bada 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -8,6 +8,7 @@
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -482,26 +484,27 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
/* budget in mWatt */
static const u32 iwl_mvm_cdev_budgets[] = {
- 2000, /* cooling state 0 */
- 1800, /* cooling state 1 */
- 1600, /* cooling state 2 */
- 1400, /* cooling state 3 */
- 1200, /* cooling state 4 */
- 1000, /* cooling state 5 */
- 900, /* cooling state 6 */
- 800, /* cooling state 7 */
- 700, /* cooling state 8 */
- 650, /* cooling state 9 */
- 600, /* cooling state 10 */
- 550, /* cooling state 11 */
- 500, /* cooling state 12 */
- 450, /* cooling state 13 */
- 400, /* cooling state 14 */
- 350, /* cooling state 15 */
- 300, /* cooling state 16 */
- 250, /* cooling state 17 */
- 200, /* cooling state 18 */
- 150, /* cooling state 19 */
+ 2400, /* cooling state 0 */
+ 2000, /* cooling state 1 */
+ 1800, /* cooling state 2 */
+ 1600, /* cooling state 3 */
+ 1400, /* cooling state 4 */
+ 1200, /* cooling state 5 */
+ 1000, /* cooling state 6 */
+ 900, /* cooling state 7 */
+ 800, /* cooling state 8 */
+ 700, /* cooling state 9 */
+ 650, /* cooling state 10 */
+ 600, /* cooling state 11 */
+ 550, /* cooling state 12 */
+ 500, /* cooling state 13 */
+ 450, /* cooling state 14 */
+ 400, /* cooling state 15 */
+ 350, /* cooling state 16 */
+ 300, /* cooling state 17 */
+ 250, /* cooling state 18 */
+ 200, /* cooling state 19 */
+ 150, /* cooling state 20 */
};
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8a059da7a1fa..dc5c02fbc65a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -341,8 +341,11 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == NL80211_BAND_5GHZ)
+ /*
+ * For non 2 GHZ band, remap mac80211 rate
+ * indices into driver indices
+ */
+ if (info->band != NL80211_BAND_2GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@@ -547,7 +550,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
}
if (mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) {
+ IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
cmd->offload_assist |= cpu_to_le32(offload_assist);
@@ -935,7 +938,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
- max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
+ /*
+ * Take the min of ieee80211 station and mvm station
+ */
+ max_amsdu_len =
+ min_t(unsigned int, sta->max_amsdu_len,
+ iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
@@ -2051,7 +2059,7 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xff | BIT(IWL_MGMT_TID), flags);
+ 0xffff, flags);
if (internal)
return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 8686107da116..6096276cb0d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -217,7 +217,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
int band_offset = 0;
/* Legacy rate format, search for match in table */
- if (band == NL80211_BAND_5GHZ)
+ if (band != NL80211_BAND_2GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (fw_rate_idx_to_plcp[idx] == rate)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 74980382e64c..a4e09a5b1816 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -55,6 +55,66 @@
#include "internal.h"
#include "iwl-prph.h"
+static void
+iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
+ u32 *control_flags)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 dbg_flags = 0;
+
+ if (!iwl_trans_dbg_ini_valid(trans)) {
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+
+ if (fw_mon->size) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM buffer destination\n");
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
+ }
+
+ goto out;
+ }
+
+ fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying SMEM buffer destination\n");
+
+ goto out;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_DRAM_PATH &&
+ trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ }
+
+out:
+ if (dbg_flags)
+ *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
+}
+
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
@@ -86,24 +146,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
- IWL_PRPH_SCRATCH_MTR_FORMAT) |
- IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
- IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
- prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ IWL_PRPH_SCRATCH_MTR_FORMAT);
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
- /* Configure debug, for integration */
- if (!iwl_trans_dbg_ini_valid(trans))
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->dbg.num_blocks) {
- prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans->dbg.fw_mon[0].physical);
- prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans->dbg.fw_mon[0].size);
- }
+ iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
+ &control_flags);
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 1047d48beaa5..a091690f6c79 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -166,7 +166,7 @@ struct iwl_rx_completion_desc {
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
- * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
+ * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
@@ -264,7 +264,7 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
@@ -646,7 +646,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
/*****************************************************
* RX
******************************************************/
-int _iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
@@ -660,7 +659,6 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
-int iwl_pcie_rx_alloc(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
@@ -702,9 +700,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 19dd075f2f63..a4d325fcf94a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -200,8 +200,8 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* TODO: remove this for 22560 once fw does it */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* TODO: remove this once fw does it */
iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -247,11 +247,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560)
- iwl_write32(trans, HBUS_TARG_WRPTR,
- (rxq->write_actual |
- ((FIRST_RX_QUEUE + rxq->id) << 16)));
- else if (trans->trans_cfg->mq_rx_supported)
+ if (trans->trans_cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -279,7 +275,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -326,7 +322,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
- rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+ rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
rxq->free_count--;
}
spin_unlock(&rxq->lock);
@@ -691,7 +687,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
{
struct device *dev = trans->dev;
bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd)
@@ -712,7 +708,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (rxq->tr_tail)
@@ -736,7 +732,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
int i;
int free_size;
bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ IWL_DEVICE_FAMILY_AX210);
size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status);
@@ -784,11 +780,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
&rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
- /*
- * W/A 22560 device step Z0 must be non zero bug
- * TODO: remove this when stop supporting Z0
- */
- *rxq->cr_tail = cpu_to_le16(500);
return 0;
@@ -802,13 +793,13 @@ err:
return -ENOMEM;
}
-int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
size_t rb_stts_size = trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
if (WARN_ON(trans_pcie->rxq))
@@ -1033,7 +1024,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-int _iwl_pcie_rx_init(struct iwl_trans *trans)
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *def_rxq;
@@ -1074,8 +1065,9 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ memset(rxq->rb_stts, 0,
+ (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1152,7 +1144,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
size_t rb_stts_size = trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
/*
@@ -1347,7 +1339,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1399,7 +1391,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
}
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
else
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
@@ -1515,7 +1507,7 @@ out:
/* Backtrack one entry */
rxq->read = i;
/* update cr tail with the rxq read pointer */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
@@ -2152,8 +2144,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -2185,17 +2176,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
- inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
- /* Reflect IML transfer status */
- int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
-
- IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
- if (res == IWL_IMAGE_RESP_FAIL) {
- isr_stats->sw++;
- iwl_pcie_irq_handle_error(trans);
- }
- } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ca3bb4d65b00..0252716c0b24 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -193,7 +193,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
@@ -365,7 +365,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 6961f00ff812..af9bc6b64542 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -190,32 +190,36 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
- int i;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- for (i = 0; i < trans->dbg.num_blocks; i++) {
- dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
- trans->dbg.fw_mon[i].block,
- trans->dbg.fw_mon[i].physical);
- trans->dbg.fw_mon[i].block = NULL;
- trans->dbg.fw_mon[i].physical = 0;
- trans->dbg.fw_mon[i].size = 0;
- trans->dbg.num_blocks--;
- }
+ if (!fw_mon->size)
+ return;
+
+ dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
+ fw_mon->physical);
+
+ fw_mon->block = NULL;
+ fw_mon->physical = 0;
+ fw_mon->size = 0;
}
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
u8 max_power, u8 min_power)
{
- void *cpu_addr = NULL;
- dma_addr_t phys = 0;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ void *block = NULL;
+ dma_addr_t physical = 0;
u32 size = 0;
u8 power;
+ if (fw_mon->size)
+ return;
+
for (power = max_power; power >= min_power; power--) {
size = BIT(power);
- cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
- GFP_KERNEL | __GFP_NOWARN);
- if (!cpu_addr)
+ block = dma_alloc_coherent(trans->dev, size, &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!block)
continue;
IWL_INFO(trans,
@@ -224,7 +228,7 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
break;
}
- if (WARN_ON_ONCE(!cpu_addr))
+ if (WARN_ON_ONCE(!block))
return;
if (power != max_power)
@@ -233,10 +237,9 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
- trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
- trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
- trans->dbg.num_blocks++;
+ fw_mon->block = block;
+ fw_mon->physical = physical;
+ fw_mon->size = size;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@@ -253,11 +256,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
max_power))
return;
- /*
- * This function allocats the default fw monitor.
- * The optional additional ones will be allocated in runtime
- */
- if (trans->dbg.num_blocks)
+ if (trans->dbg.fw_mon.size)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@@ -891,24 +890,51 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
+static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &trans->dbg.fw_mon_cfg[alloc_id];
+ struct iwl_dram_data *frag;
+
+ if (!iwl_trans_dbg_ini_valid(trans))
+ return;
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+
+ return;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH ||
+ !trans->dbg.fw_mon_ini[alloc_id].num_frags)
+ return;
+
+ frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ frag->physical >> MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (frag->physical + frag->size - 256) >>
+ MON_BUFF_SHIFT_VER2);
+}
+
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
+ const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
int i;
if (iwl_trans_dbg_ini_valid(trans)) {
- if (!trans->dbg.num_blocks)
- return;
-
- IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM buffer[0] destination\n");
- iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->dbg.fw_mon[0].physical >>
- MON_BUFF_SHIFT_VER2);
- iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- MON_BUFF_SHIFT_VER2);
+ iwl_pcie_apply_destination_ini(trans);
return;
}
@@ -959,20 +985,17 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans->dbg.fw_mon[0].physical >>
- dest->base_shift);
+ fw_mon->physical >> dest->base_shift);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size -
+ 256) >> dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size) >>
+ dest->end_shift);
}
}
@@ -1006,14 +1029,14 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_pcie_alloc_fw_monitor(trans, 0);
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- if (trans->dbg.fw_mon[0].size) {
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ if (fw_mon->size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans->dbg.fw_mon[0].physical >> 4);
+ fw_mon->physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >> 4);
+ (fw_mon->physical + fw_mon->size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@@ -1112,30 +1135,12 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
-static struct iwl_causes_list causes_list_v2[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
-};
-
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i, arr_size =
- (trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
- ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
+ int i, arr_size = ARRAY_SIZE(causes_list);
+ struct iwl_causes_list *causes = causes_list;
/*
* Access all non RX causes and map them to the default irq.
@@ -1143,11 +1148,6 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < arr_size; i++) {
- struct iwl_causes_list *causes =
- (trans->trans_cfg->device_family !=
- IWL_DEVICE_FAMILY_22560) ?
- causes_list : causes_list_v2;
-
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
causes[i].cause_num);
@@ -1871,7 +1871,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -2559,7 +2559,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char *buf;
int pos = 0, i, ret;
- size_t bufsz = sizeof(buf);
+ size_t bufsz;
bufsz = sizeof(char) * 121 * trans->num_rx_queues;
@@ -2801,7 +2801,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
+ void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -2840,7 +2840,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
- size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
+ size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@@ -3087,10 +3087,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
u32 monitor_len)
{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
u32 len = 0;
if (trans->dbg.dest_tlv ||
- (trans->dbg.num_blocks &&
+ (fw_mon->size &&
(trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
@@ -3101,12 +3102,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans->dbg.num_blocks) {
- memcpy(fw_mon_data->data,
- trans->dbg.fw_mon[0].block,
- trans->dbg.fw_mon[0].size);
-
- monitor_len = trans->dbg.fw_mon[0].size;
+ if (fw_mon->size) {
+ memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
+ monitor_len = fw_mon->size;
} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
@@ -3145,11 +3143,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
- if (trans->dbg.num_blocks) {
+ if (trans->dbg.fw_mon.size) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->dbg.fw_mon[0].size;
- return trans->dbg.fw_mon[0].size;
+ trans->dbg.fw_mon.size;
+ return trans->dbg.fw_mon.size;
} else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
@@ -3604,6 +3602,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->fw_mon_data.mutex);
#endif
+ iwl_dbg_tlv_init(trans);
+
return trans;
out_free_ict:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8894027429d6..8ca0250de99e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -86,9 +86,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -113,14 +113,14 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* Starting from 22560, the HW expects bytes */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
- /* Until 22560, the HW expects DW */
+ /* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
@@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, iv_len, amsdu_pad;
+ u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr;
struct tso_t tso;
- /* if the packet is protected, then it must be CCMP or GCMP */
- iv_len = ieee80211_has_protected(hdr->frame_control) ?
- IEEE80211_CCMP_HDR_LEN : 0;
-
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
/* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
hdr_page = get_page_hdr(trans, hdr_room);
@@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
start_hdr = hdr_page->pos;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
*page_ptr = hdr_page->page;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
/*
- * Pull the ieee80211 header + IV to be able to use TSO core,
+ * Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow.
*/
- skb_pull(skb, hdr_len + iv_len);
+ skb_pull(skb, hdr_len);
/*
* Remove the length of all the headers that we don't actually
@@ -339,7 +333,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -357,15 +352,15 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_len);
+ tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
}
}
- /* re -add the WiFi header and IV */
- skb_push(skb, hdr_len + iv_len);
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
return 0;
@@ -447,9 +442,8 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb_frag_address(frag),
- skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
+ tb_phys, skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
@@ -474,6 +468,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
dma_addr_t tb_phys;
int len, tb1_len, tb2_len;
void *tb1_addr;
+ struct sk_buff *frag;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
@@ -514,14 +509,25 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb->data + hdr_len,
- tb2_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
+ tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
+ skb_walk_frags(skb, frag) {
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
+ tb_phys, skb_headlen(frag));
+ if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
return tfd;
out_err:
@@ -547,7 +553,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
memset(tfd, 0, sizeof(*tfd));
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
@@ -629,7 +635,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
(void *)dev_cmd->payload;
@@ -1130,7 +1136,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
(trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4806a04cec8c..f21f16ab2a97 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -949,7 +949,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
bc_tbls_size *= (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
@@ -2019,9 +2019,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb->data + hdr_len,
- head_tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
+ tb_phys, head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
@@ -2039,9 +2038,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb_frag_address(frag),
- skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
+ tb_phys, skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
@@ -2222,7 +2220,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- hdr_tb_len);
+ hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -2248,7 +2246,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- size);
+ tb_phys, size);
data_left -= size;
tso_build_data(skb, &tso, size);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 14f562cd715c..03738107fd10 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -148,23 +148,25 @@ static const char *hwsim_alpha2s[] = {
};
static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = {
- .n_reg_rules = 4,
+ .n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(2484-10, 2484+10, 40, 0, 20, 0),
REG_RULE(5150-10, 5240+10, 40, 0, 30, 0),
REG_RULE(5745-10, 5825+10, 40, 0, 30, 0),
+ REG_RULE(5855-10, 5925+10, 40, 0, 33, 0),
}
};
static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
- .n_reg_rules = 2,
+ .n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(5725-10, 5850+10, 40, 0, 30,
NL80211_RRF_NO_IR),
+ REG_RULE(5855-10, 5925+10, 40, 0, 33, 0),
}
};
@@ -354,6 +356,24 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5805), /* Channel 161 */
CHAN5G(5825), /* Channel 165 */
CHAN5G(5845), /* Channel 169 */
+
+ CHAN5G(5855), /* Channel 171 */
+ CHAN5G(5860), /* Channel 172 */
+ CHAN5G(5865), /* Channel 173 */
+ CHAN5G(5870), /* Channel 174 */
+
+ CHAN5G(5875), /* Channel 175 */
+ CHAN5G(5880), /* Channel 176 */
+ CHAN5G(5885), /* Channel 177 */
+ CHAN5G(5890), /* Channel 178 */
+ CHAN5G(5895), /* Channel 179 */
+ CHAN5G(5900), /* Channel 180 */
+ CHAN5G(5905), /* Channel 181 */
+
+ CHAN5G(5910), /* Channel 182 */
+ CHAN5G(5915), /* Channel 183 */
+ CHAN5G(5920), /* Channel 184 */
+ CHAN5G(5925), /* Channel 185 */
};
static const struct ieee80211_rate hwsim_rates[] = {
@@ -749,8 +769,8 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
+ "%llu\n");
static int hwsim_write_simulate_radar(void *dat, u64 val)
{
@@ -761,8 +781,8 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
- hwsim_write_simulate_radar, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_simulate_radar, NULL,
+ hwsim_write_simulate_radar, "%llu\n");
static int hwsim_fops_group_read(void *dat, u64 *val)
{
@@ -778,9 +798,9 @@ static int hwsim_fops_group_write(void *dat, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
- hwsim_fops_group_read, hwsim_fops_group_write,
- "%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
+ hwsim_fops_group_read, hwsim_fops_group_write,
+ "%llx\n");
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -1550,7 +1570,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_ADHOC)
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ vif->type != NL80211_IFTYPE_OCB)
return;
skb = ieee80211_beacon_get(hw, vif);
@@ -1604,6 +1625,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
}
static const char * const hwsim_chanwidths[] = {
+ [NL80211_CHAN_WIDTH_5] = "ht5",
+ [NL80211_CHAN_WIDTH_10] = "ht10",
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
[NL80211_CHAN_WIDTH_20] = "ht20",
[NL80211_CHAN_WIDTH_40] = "ht40",
@@ -1979,8 +2002,7 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@@ -2723,7 +2745,8 @@ static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_P2P_CLIENT) | \
BIT(NL80211_IFTYPE_P2P_GO) | \
BIT(NL80211_IFTYPE_ADHOC) | \
- BIT(NL80211_IFTYPE_MESH_POINT))
+ BIT(NL80211_IFTYPE_MESH_POINT) | \
+ BIT(NL80211_IFTYPE_OCB))
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
@@ -2847,6 +2870,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
+ BIT(NL80211_CHAN_WIDTH_5) |
+ BIT(NL80211_CHAN_WIDTH_10) |
BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 242d8845da3f..30f1025ecb9b 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1179,6 +1179,10 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!card->workqueue)) {
+ ret = -ENOMEM;
+ goto err_queue;
+ }
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1230,6 +1234,7 @@ err_activate_card:
lbs_remove_card(priv);
free:
destroy_workqueue(card->workqueue);
+err_queue:
while (card->packets) {
packet = card->packets;
card->packets = card->packets->next;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 2747c957d18c..44c8a550da4c 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -1003,7 +1003,6 @@ static int lbs_add_mesh(struct lbs_private *priv)
if (priv->mesh_tlv) {
sprintf(mesh_wdev->ssid, "mesh");
mesh_wdev->mesh_id_up_len = 4;
- ret = 1;
}
mesh_wdev->netdev = mesh_dev;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a9657ae6d782..d14e55e3c9da 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
+ adapter->is_up = true;
goto done;
err_add_intf:
@@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
mwifiex_deauthenticate(priv, NULL);
mwifiex_uninit_sw(adapter);
+ adapter->is_up = false;
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
@@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
if (!adapter)
return 0;
- mwifiex_uninit_sw(adapter);
+ if (adapter->is_up)
+ mwifiex_uninit_sw(adapter);
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 095837fba300..547ff3c578ee 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1017,6 +1017,7 @@ struct mwifiex_adapter {
/* For synchronizing FW initialization with device lifecycle. */
struct completion *fw_done;
+ bool is_up;
bool ext_scan;
u8 fw_api_ver;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index eff06d59e9df..fc1706d0647d 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -687,8 +687,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
+ kfree(card->evtbd_ring_vbase);
return -1;
+ }
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1029,8 +1032,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
return -1;
+ }
card->cmdrsp_buf = skb;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 593c594982cb..98f942b797f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1270,7 +1270,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_FH_PARAMS:
- if (element_len + 2 < sizeof(*fh_param_set))
+ if (total_ie_len < sizeof(*fh_param_set))
return -EINVAL;
fh_param_set =
(struct ieee_types_fh_param_set *) current_ptr;
@@ -1280,7 +1280,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_DS_PARAMS:
- if (element_len + 2 < sizeof(*ds_param_set))
+ if (total_ie_len < sizeof(*ds_param_set))
return -EINVAL;
ds_param_set =
(struct ieee_types_ds_param_set *) current_ptr;
@@ -1293,7 +1293,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_CF_PARAMS:
- if (element_len + 2 < sizeof(*cf_param_set))
+ if (total_ie_len < sizeof(*cf_param_set))
return -EINVAL;
cf_param_set =
(struct ieee_types_cf_param_set *) current_ptr;
@@ -1303,7 +1303,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_IBSS_PARAMS:
- if (element_len + 2 < sizeof(*ibss_param_set))
+ if (total_ie_len < sizeof(*ibss_param_set))
return -EINVAL;
ibss_param_set =
(struct ieee_types_ibss_param_set *)
@@ -1460,10 +1460,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
}
- current_ptr += element_len + 2;
-
- /* Need to account for IE ID and IE Len */
- bytes_left -= (element_len + 2);
+ current_ptr += total_ie_len;
+ bytes_left -= total_ie_len;
} /* while (bytes_left > 2) */
return ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 24c041dad9f6..fec38b6e86ff 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
return 0;
}
+ if (!adapter->is_up)
+ return -EBUSY;
+
mwifiex_enable_wake(adapter);
/* Enable the Host Sleep */
@@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
struct sdio_func *func = card->func;
int ret;
+ /* Prepare the adapter for the reset. */
mwifiex_shutdown_sw(adapter);
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- /* power cycle the adapter */
+ /* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card->host);
sdio_release_host(func);
- /* Previous save_adapter won't be valid after this. We will cancel
- * pending work requests.
- */
- clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
-
- ret = mwifiex_reinit_sw(adapter);
- if (ret)
- dev_err(&func->dev, "reinit failed: %d\n", ret);
+ switch (ret) {
+ case 1:
+ dev_dbg(&func->dev, "SDIO HW reset asynchronous\n");
+ complete_all(adapter->fw_done);
+ break;
+ case 0:
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret)
+ dev_err(&func->dev, "reinit failed: %d\n", ret);
+ break;
+ default:
+ dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret);
+ break;
+ }
}
/* This function read/write firmware */
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index c4db6417748f..d55f229abeea 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5520,7 +5520,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rc = -EBUSY;
break;
}
- ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+ rc = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index d7a1ddc9e407..99bbc74acda8 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o
+ tx.o agg-rx.o mcu.o airtime.o
mt76-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 8f3d36a15e17..53b5a4b2dcc5 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -130,8 +130,10 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
return;
spin_lock_bh(&tid->lock);
- mt76_rx_aggr_release_frames(tid, frames, seqno);
- mt76_rx_aggr_release_head(tid, frames);
+ if (!tid->stopped) {
+ mt76_rx_aggr_release_frames(tid, frames, seqno);
+ mt76_rx_aggr_release_head(tid, frames);
+ }
spin_unlock_bh(&tid->lock);
}
@@ -257,8 +259,6 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
u8 size = tid->size;
int i;
- cancel_delayed_work(&tid->reorder_work);
-
spin_lock_bh(&tid->lock);
tid->stopped = true;
@@ -273,21 +273,19 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
}
spin_unlock_bh(&tid->lock);
+
+ cancel_delayed_work_sync(&tid->reorder_work);
}
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
{
- struct mt76_rx_tid *tid;
+ struct mt76_rx_tid *tid = NULL;
- rcu_read_lock();
-
- tid = rcu_dereference(wcid->aggr[tidno]);
+ rcu_swap_protected(wcid->aggr[tidno], tid,
+ lockdep_is_held(&dev->mutex));
if (tid) {
- rcu_assign_pointer(wcid->aggr[tidno], NULL);
mt76_rx_aggr_shutdown(dev, tid);
kfree_rcu(tid, rcu_head);
}
-
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
new file mode 100644
index 000000000000..55116f395f9a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/airtime.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt76.h"
+
+#define AVG_PKT_SIZE 1024
+
+/* Number of bits for an average sized packet */
+#define MCS_NBITS (AVG_PKT_SIZE << 3)
+
+/* Number of symbols for a packet with (bps) bits per symbol */
+#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
+
+/* Transmission time (1024 usec) for a packet containing (syms) * symbols */
+#define MCS_SYMBOL_TIME(sgi, syms) \
+ (sgi ? \
+ ((syms) * 18 * 1024 + 4 * 1024) / 5 : /* syms * 3.6 us */ \
+ ((syms) * 1024) << 2 /* syms * 4 us */ \
+ )
+
+/* Transmit duration for the raw data part of an average sized packet */
+#define MCS_DURATION(streams, sgi, bps) \
+ MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+
+#define BW_20 0
+#define BW_40 1
+#define BW_80 2
+
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define MT_MAX_STREAMS 4
+#define MT_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
+#define MT_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
+
+#define MT_HT_GROUPS_NB (MT_MAX_STREAMS * \
+ MT_HT_STREAM_GROUPS)
+#define MT_VHT_GROUPS_NB (MT_MAX_STREAMS * \
+ MT_VHT_STREAM_GROUPS)
+#define MT_GROUPS_NB (MT_HT_GROUPS_NB + \
+ MT_VHT_GROUPS_NB)
+
+#define MT_HT_GROUP_0 0
+#define MT_VHT_GROUP_0 (MT_HT_GROUP_0 + MT_HT_GROUPS_NB)
+
+#define MCS_GROUP_RATES 10
+
+#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
+ MT_HT_GROUP_0 + \
+ MT_MAX_STREAMS * 2 * _ht40 + \
+ MT_MAX_STREAMS * _sgi + \
+ _streams - 1
+
+#define _MAX(a, b) (((a)>(b))?(a):(b))
+
+#define GROUP_SHIFT(duration) \
+ _MAX(0, 16 - __builtin_clz(duration))
+
+/* MCS rate information for an MCS group */
+#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
+ [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
+ .shift = _s, \
+ .duration = { \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
+ } \
+}
+
+#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
+ GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
+
+#define MCS_GROUP(_streams, _sgi, _ht40) \
+ __MCS_GROUP(_streams, _sgi, _ht40, \
+ MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
+
+#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
+ (MT_VHT_GROUP_0 + \
+ MT_MAX_STREAMS * 2 * (_bw) + \
+ MT_MAX_STREAMS * (_sgi) + \
+ (_streams) - 1)
+
+#define BW2VBPS(_bw, r3, r2, r1) \
+ (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
+
+#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
+ [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 117, 54, 26)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 234, 108, 52)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 351, 162, 78)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 468, 216, 104)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 702, 324, 156)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 936, 432, 208)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1560, 720, 346)) >> _s \
+ } \
+}
+
+#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
+ GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 117, 54, 26)))
+
+#define VHT_GROUP(_streams, _sgi, _bw) \
+ __VHT_GROUP(_streams, _sgi, _bw, \
+ VHT_GROUP_SHIFT(_streams, _sgi, _bw))
+
+struct mcs_group {
+ u8 shift;
+ u16 duration[MCS_GROUP_RATES];
+};
+
+static const struct mcs_group airtime_mcs_groups[] = {
+ MCS_GROUP(1, 0, BW_20),
+ MCS_GROUP(2, 0, BW_20),
+ MCS_GROUP(3, 0, BW_20),
+ MCS_GROUP(4, 0, BW_20),
+
+ MCS_GROUP(1, 1, BW_20),
+ MCS_GROUP(2, 1, BW_20),
+ MCS_GROUP(3, 1, BW_20),
+ MCS_GROUP(4, 1, BW_20),
+
+ MCS_GROUP(1, 0, BW_40),
+ MCS_GROUP(2, 0, BW_40),
+ MCS_GROUP(3, 0, BW_40),
+ MCS_GROUP(4, 0, BW_40),
+
+ MCS_GROUP(1, 1, BW_40),
+ MCS_GROUP(2, 1, BW_40),
+ MCS_GROUP(3, 1, BW_40),
+ MCS_GROUP(4, 1, BW_40),
+
+ VHT_GROUP(1, 0, BW_20),
+ VHT_GROUP(2, 0, BW_20),
+ VHT_GROUP(3, 0, BW_20),
+ VHT_GROUP(4, 0, BW_20),
+
+ VHT_GROUP(1, 1, BW_20),
+ VHT_GROUP(2, 1, BW_20),
+ VHT_GROUP(3, 1, BW_20),
+ VHT_GROUP(4, 1, BW_20),
+
+ VHT_GROUP(1, 0, BW_40),
+ VHT_GROUP(2, 0, BW_40),
+ VHT_GROUP(3, 0, BW_40),
+ VHT_GROUP(4, 0, BW_40),
+
+ VHT_GROUP(1, 1, BW_40),
+ VHT_GROUP(2, 1, BW_40),
+ VHT_GROUP(3, 1, BW_40),
+ VHT_GROUP(4, 1, BW_40),
+
+ VHT_GROUP(1, 0, BW_80),
+ VHT_GROUP(2, 0, BW_80),
+ VHT_GROUP(3, 0, BW_80),
+ VHT_GROUP(4, 0, BW_80),
+
+ VHT_GROUP(1, 1, BW_80),
+ VHT_GROUP(2, 1, BW_80),
+ VHT_GROUP(3, 1, BW_80),
+ VHT_GROUP(4, 1, BW_80),
+};
+
+static u32
+mt76_calc_legacy_rate_duration(const struct ieee80211_rate *rate, bool short_pre,
+ int len)
+{
+ u32 duration;
+
+ switch (rate->hw_value >> 8) {
+ case MT_PHY_TYPE_CCK:
+ duration = 144 + 48; /* preamble + PLCP */
+ if (short_pre)
+ duration >>= 1;
+
+ duration += 10; /* SIFS */
+ break;
+ case MT_PHY_TYPE_OFDM:
+ duration = 20 + 16; /* premable + SIFS */
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ len <<= 3;
+ duration += (len * 10) / rate->bitrate;
+
+ return duration;
+}
+
+u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rate;
+ bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
+ bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
+ int bw, streams;
+ u32 duration;
+ int group, idx;
+
+ switch (status->bw) {
+ case RATE_INFO_BW_20:
+ bw = BW_20;
+ break;
+ case RATE_INFO_BW_40:
+ bw = BW_40;
+ break;
+ case RATE_INFO_BW_80:
+ bw = BW_80;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ switch (status->encoding) {
+ case RX_ENC_LEGACY:
+ if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+ return 0;
+
+ sband = dev->hw->wiphy->bands[status->band];
+ if (!sband || status->rate_idx > sband->n_bitrates)
+ return 0;
+
+ rate = &sband->bitrates[status->rate_idx];
+
+ return mt76_calc_legacy_rate_duration(rate, sp, len);
+ case RX_ENC_VHT:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = VHT_GROUP_IDX(streams, sgi, bw);
+ break;
+ case RX_ENC_HT:
+ streams = ((status->rate_idx >> 3) & 3) + 1;
+ idx = status->rate_idx & 7;
+ group = HT_GROUP_IDX(streams, sgi, bw);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(streams > 4))
+ return 0;
+
+ duration = airtime_mcs_groups[group].duration[idx];
+ duration <<= airtime_mcs_groups[group].shift;
+ duration *= len;
+ duration /= AVG_PKT_SIZE;
+ duration /= 1024;
+
+ duration += 36 + (streams << 2);
+
+ return duration;
+}
+
+u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
+ int len)
+{
+ struct mt76_rx_status stat = {
+ .band = info->band,
+ };
+ u32 duration = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ struct ieee80211_tx_rate *rate = &info->status.rates[i];
+ u32 cur_duration;
+
+ if (rate->idx < 0 || !rate->count)
+ break;
+
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ stat.bw = RATE_INFO_BW_80;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ stat.bw = RATE_INFO_BW_40;
+ else
+ stat.bw = RATE_INFO_BW_20;
+
+ stat.enc_flags = 0;
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ stat.rate_idx = rate->idx;
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ stat.encoding = RX_ENC_VHT;
+ stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ stat.nss = ieee80211_rate_get_vht_nss(rate);
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ stat.encoding = RX_ENC_HT;
+ } else {
+ stat.encoding = RX_ENC_LEGACY;
+ }
+
+ cur_duration = mt76_calc_rx_airtime(dev, &stat, len);
+ duration += cur_duration * rate->count;
+ }
+
+ return duration;
+}
+EXPORT_SYMBOL_GPL(mt76_calc_tx_airtime);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index d95b73fd0d2b..d2202acb8dc6 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -25,8 +25,7 @@ mt76_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
"0x%08llx\n");
-static int
-mt76_queues_read(struct seq_file *s, void *data)
+int mt76_queues_read(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
int i;
@@ -45,6 +44,7 @@ mt76_queues_read(struct seq_file *s, void *data)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76_queues_read);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len)
@@ -90,7 +90,6 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
- debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
mt76_read_rate_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 8f69d00bd940..6173c80189ba 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -166,7 +166,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
- if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+ if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
@@ -301,7 +301,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
- if (dev->drv->tx_aligned4_skbs)
+ if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
@@ -365,7 +365,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int frames = 0;
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
- int idx;
spin_lock_bh(&q->lock);
@@ -384,7 +383,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
- idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
@@ -539,10 +538,8 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
rcu_read_unlock();
- if (done < budget) {
- napi_complete(napi);
+ if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
- }
return done;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 1a2c143b34d0..b9f2a401041a 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -105,7 +105,15 @@ static int mt76_led_init(struct mt76_dev *dev)
dev->led_al = of_property_read_bool(np, "led-active-low");
}
- return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+ return led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static void mt76_led_cleanup(struct mt76_dev *dev)
+{
+ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ return;
+
+ led_classdev_unregister(&dev->led_cdev);
}
static void mt76_init_stream_cap(struct mt76_dev *dev,
@@ -180,6 +188,7 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
sband->bitrates = rates;
sband->n_bitrates = n_rates;
dev->chandef.chan = &sband->channels[0];
+ dev->chan_state = &msband->chan[0];
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
@@ -306,6 +315,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy->available_antennas_tx = dev->antenna_mask;
wiphy->available_antennas_rx = dev->antenna_mask;
@@ -327,8 +337,16 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
if (dev->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
@@ -360,6 +378,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
+ mt76_led_cleanup(dev);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
}
@@ -398,28 +417,64 @@ bool mt76_has_tx_pending(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
+static struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+ struct mt76_sband *msband;
+ int idx;
+
+ if (c->band == NL80211_BAND_2GHZ)
+ msband = &dev->sband_2g;
+ else
+ msband = &dev->sband_5g;
+
+ idx = c - &msband->sband.channels[0];
+ return &msband->chan[idx];
+}
+
+void mt76_update_survey(struct mt76_dev *dev)
+{
+ struct mt76_channel_state *state = dev->chan_state;
+ ktime_t cur_time;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ return;
+
+ if (dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ cur_time = ktime_get_boottime();
+ state->cc_active += ktime_to_us(ktime_sub(cur_time,
+ dev->survey_time));
+ dev->survey_time = cur_time;
+
+ if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
+ spin_lock_bh(&dev->cc_lock);
+ state->cc_bss_rx += dev->cur_cc_bss_rx;
+ dev->cur_cc_bss_rx = 0;
+ spin_unlock_bh(&dev->cc_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_update_survey);
+
void mt76_set_channel(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
- struct mt76_channel_state *state;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
-
- if (dev->drv->update_survey)
- dev->drv->update_survey(dev);
+ mt76_update_survey(dev);
dev->chandef = *chandef;
+ dev->chan_state = mt76_channel_state(dev, chandef->chan);
if (!offchannel)
dev->main_chan = chandef->chan;
- if (chandef->chan != dev->main_chan) {
- state = mt76_channel_state(dev, chandef->chan);
- memset(state, 0, sizeof(*state));
- }
+ if (chandef->chan != dev->main_chan)
+ memset(dev->chan_state, 0, sizeof(*dev->chan_state));
}
EXPORT_SYMBOL_GPL(mt76_set_channel);
@@ -432,8 +487,9 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct mt76_channel_state *state;
int ret = 0;
+ mutex_lock(&dev->mutex);
if (idx == 0 && dev->drv->update_survey)
- dev->drv->update_survey(dev);
+ mt76_update_survey(dev);
sband = &dev->sband_2g;
if (idx >= sband->sband.n_channels) {
@@ -441,8 +497,10 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
sband = &dev->sband_5g;
}
- if (idx >= sband->sband.n_channels)
- return -ENOENT;
+ if (idx >= sband->sband.n_channels) {
+ ret = -ENOENT;
+ goto out;
+ }
chan = &sband->sband.channels[idx];
state = mt76_channel_state(dev, chan);
@@ -450,14 +508,26 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
memset(survey, 0, sizeof(*survey));
survey->channel = chan;
survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
- if (chan == dev->main_chan)
+ survey->filled |= dev->drv->survey_flags;
+ if (chan == dev->main_chan) {
survey->filled |= SURVEY_INFO_IN_USE;
- spin_lock_bh(&dev->cc_lock);
- survey->time = div_u64(state->cc_active, 1000);
+ if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
+ survey->filled |= SURVEY_INFO_TIME_BSS_RX;
+ }
+
survey->time_busy = div_u64(state->cc_busy, 1000);
+ survey->time_rx = div_u64(state->cc_rx, 1000);
+ survey->time = div_u64(state->cc_active, 1000);
+
+ spin_lock_bh(&dev->cc_lock);
+ survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
+ survey->time_tx = div_u64(state->cc_tx, 1000);
spin_unlock_bh(&dev->cc_lock);
+out:
+ mutex_unlock(&dev->mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(mt76_get_survey);
@@ -502,6 +572,7 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
status->band = mstat.band;
status->signal = mstat.signal;
status->chains = mstat.chains;
+ status->ampdu_reference = mstat.ampdu_ref;
BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
BUILD_BUG_ON(sizeof(status->chain_signal) !=
@@ -552,6 +623,84 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
}
static void
+mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len)
+{
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_sta *sta;
+ u32 airtime;
+
+ airtime = mt76_calc_rx_airtime(dev, status, len);
+ spin_lock(&dev->cc_lock);
+ dev->cur_cc_bss_rx += airtime;
+ spin_unlock(&dev->cc_lock);
+
+ if (!wcid || !wcid->sta)
+ return;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ ieee80211_sta_register_airtime(sta, status->tid, 0, airtime);
+}
+
+static void
+mt76_airtime_flush_ampdu(struct mt76_dev *dev)
+{
+ struct mt76_wcid *wcid;
+ int wcid_idx;
+
+ if (!dev->rx_ampdu_len)
+ return;
+
+ wcid_idx = dev->rx_ampdu_status.wcid_idx;
+ if (wcid_idx < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+ else
+ wcid = NULL;
+ dev->rx_ampdu_status.wcid = wcid;
+
+ mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
+
+ dev->rx_ampdu_len = 0;
+ dev->rx_ampdu_ref = 0;
+}
+
+static void
+mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+
+ if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
+ return;
+
+ if (!wcid || !wcid->sta) {
+ if (!ether_addr_equal(hdr->addr1, dev->macaddr))
+ return;
+
+ wcid = NULL;
+ }
+
+ if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
+ status->ampdu_ref != dev->rx_ampdu_ref)
+ mt76_airtime_flush_ampdu(dev);
+
+ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+ if (!dev->rx_ampdu_len ||
+ status->ampdu_ref != dev->rx_ampdu_ref) {
+ dev->rx_ampdu_status = *status;
+ dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
+ dev->rx_ampdu_ref = status->ampdu_ref;
+ }
+
+ dev->rx_ampdu_len += skb->len;
+ return;
+ }
+
+ mt76_airtime_report(dev, status, skb->len);
+}
+
+static void
mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -567,6 +716,8 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
}
+ mt76_airtime_check(dev, skb);
+
if (!wcid || !wcid->sta)
return;
@@ -886,3 +1037,16 @@ void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
clear_bit(MT76_SCANNING, &dev->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
+
+int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct mt76_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ *tx_ant = dev->antenna_mask;
+ *rx_ant = dev->antenna_mask;
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_antenna);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 8aec7ccf2d79..fb077760347a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -49,8 +49,8 @@ struct mt76_bus_ops {
enum mt76_bus_type type;
};
-#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
-#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
+#define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
@@ -152,10 +152,6 @@ struct mt76_queue_ops {
int idx, int n_desc, int bufsize,
u32 ring_base);
- int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_buf *buf, int nbufs, u32 info,
- struct sk_buff *skb, void *txwi);
-
int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
@@ -191,8 +187,6 @@ DECLARE_EWMA(signal, 10, 8);
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
- struct work_struct aggr_work;
-
unsigned long flags;
struct ewma_signal rssi;
@@ -282,11 +276,13 @@ struct mt76_hw_cap {
bool has_5ghz;
};
-#define MT_TXWI_NO_FREE BIT(0)
+#define MT_DRV_TXWI_NO_FREE BIT(0)
+#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
+#define MT_DRV_SW_RX_AIRTIME BIT(2)
struct mt76_driver_ops {
- bool tx_aligned4_skbs;
- u32 txwi_flags;
+ u32 drv_flags;
+ u32 survey_flags;
u16 txwi_size;
void (*update_survey)(struct mt76_dev *dev);
@@ -322,6 +318,9 @@ struct mt76_driver_ops {
struct mt76_channel_state {
u64 cc_active;
u64 cc_busy;
+ u64 cc_rx;
+ u64 cc_bss_rx;
+ u64 cc_tx;
};
struct mt76_sband {
@@ -367,8 +366,8 @@ enum mt76u_in_ep {
enum mt76u_out_ep {
MT_EP_OUT_INBAND_CMD,
- MT_EP_OUT_AC_BK,
MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_BK,
MT_EP_OUT_AC_VI,
MT_EP_OUT_AC_VO,
MT_EP_OUT_HCCA,
@@ -388,7 +387,8 @@ struct mt76_usb {
};
struct tasklet_struct rx_tasklet;
- struct delayed_work stat_work;
+ struct workqueue_struct *stat_wq;
+ struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
u8 in_ep[__MT_EP_IN_MAX];
@@ -421,14 +421,49 @@ struct mt76_mmio {
u32 irqmask;
};
+struct mt76_rx_status {
+ union {
+ struct mt76_wcid *wcid;
+ u8 wcid_idx;
+ };
+
+ unsigned long reorder_time;
+
+ u32 ampdu_ref;
+
+ u8 iv[6];
+
+ u8 aggr:1;
+ u8 tid;
+ u16 seqno;
+
+ u16 freq;
+ u32 flag;
+ u8 enc_flags;
+ u8 encoding:2, bw:3;
+ u8 rate_idx;
+ u8 nss;
+ u8 band;
+ s8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
+ struct mt76_channel_state *chan_state;
spinlock_t lock;
spinlock_t cc_lock;
+ u32 cur_cc_bss_rx;
+
+ struct mt76_rx_status rx_ampdu_status;
+ u32 rx_ampdu_len;
+ u32 rx_ampdu_ref;
+
struct mutex mutex;
const struct mt76_bus_ops *bus;
@@ -440,6 +475,7 @@ struct mt76_dev {
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+ u32 ampdu_ref;
struct list_head txwi_cache;
struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
@@ -463,6 +499,8 @@ struct mt76_dev {
u32 rev;
unsigned long state;
+ u32 aggr_stats[32];
+
u8 antenna_mask;
u16 chainmask;
@@ -509,29 +547,6 @@ enum mt76_phy_type {
MT_PHY_TYPE_VHT,
};
-struct mt76_rx_status {
- struct mt76_wcid *wcid;
-
- unsigned long reorder_time;
-
- u8 iv[6];
-
- u8 aggr:1;
- u8 tid;
- u16 seqno;
-
- u16 freq;
- u32 flag;
- u8 enc_flags;
- u8 encoding:2, bw:3;
- u8 rate_idx;
- u8 nss;
- u8 band;
- s8 signal;
- u8 chains;
- s8 chain_signal[IEEE80211_MAX_CHAINS];
-};
-
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
@@ -602,21 +617,6 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
-static inline struct mt76_channel_state *
-mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
-{
- struct mt76_sband *msband;
- int idx;
-
- if (c->band == NL80211_BAND_2GHZ)
- msband = &dev->sband_2g;
- else
- msband = &dev->sband_5g;
-
- idx = c - &msband->sband.channels[0];
- return &msband->chan[idx];
-}
-
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops);
@@ -626,6 +626,7 @@ void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+int mt76_queues_read(struct seq_file *s, void *data);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
@@ -718,6 +719,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
bool more_data);
bool mt76_has_tx_pending(struct mt76_dev *dev);
void mt76_set_channel(struct mt76_dev *dev);
+void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
@@ -759,6 +761,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
+int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
@@ -768,6 +771,8 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
+ int len);
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
@@ -778,6 +783,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
+ int len);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -799,7 +806,8 @@ static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
@@ -817,6 +825,7 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
void mt76u_stop_rx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index 5942fe76c6e9..47c85a9fac28 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -69,6 +69,41 @@ mt7603_edcca_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
mt7603_edcca_set, "%lld\n");
+static int
+mt7603_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7603_dev *dev = file->private;
+ int bound[3], i, range;
+
+ range = mt76_rr(dev, MT_AGG_ASRCR);
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i], bound[i + 1]);
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ return 0;
+}
+
+static int
+mt7603_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7603_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt7603_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
struct dentry *dir;
@@ -77,6 +112,9 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
if (!dir)
return;
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 24d82a20d046..a6ab73060aad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -152,6 +152,8 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
+ mt7603_mac_sta_poll(dev);
+
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index ad2ccdbe7258..0696dbf28c5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -7,6 +7,8 @@
const struct mt76_driver_ops mt7603_drv_ops = {
.txwi_size = MT_TXD_SIZE,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.tx_prepare_skb = mt7603_tx_prepare_skb,
.tx_complete_skb = mt7603_tx_complete_skb,
.rx_skb = mt7603_queue_rx_skb,
@@ -524,6 +526,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
bus_ops->rmw = mt7603_rmw;
dev->mt76.bus = bus_ops;
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
spin_lock_init(&dev->ps_lock);
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
@@ -552,7 +556,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
/* init led callbacks */
@@ -561,16 +564,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
}
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
-
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index c328192307c4..812d081ad943 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -31,6 +31,16 @@ mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
}
+void mt7603_mac_reset_counters(struct mt7603_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+}
+
void mt7603_mac_set_timing(struct mt7603_dev *dev)
{
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
@@ -150,6 +160,8 @@ void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
addr = mt7603_wtbl4_addr(idx);
for (i = 0; i < MT_WTBL4_SIZE; i += 4)
mt76_wr(dev, addr + i, 0);
+
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
}
static void
@@ -370,6 +382,84 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
}
+void mt7603_mac_sta_poll(struct mt7603_dev *dev)
+{
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7603_sta *msta;
+ u32 total_airtime = 0;
+ u32 airtime[4];
+ u32 addr;
+ int i;
+
+ rcu_read_lock();
+
+ while (1) {
+ bool clear = false;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&dev->sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+
+ msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta,
+ poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7603_wtbl4_addr(msta->wcid.idx);
+ for (i = 0; i < 4; i++) {
+ u32 airtime_last = msta->tx_airtime_ac[i];
+
+ msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8);
+ airtime[i] = msta->tx_airtime_ac[i] - airtime_last;
+ airtime[i] *= 32;
+ total_airtime += airtime[i];
+
+ if (msta->tx_airtime_ac[i] & BIT(22))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7603_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->tx_airtime_ac, 0,
+ sizeof(msta->tx_airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ for (i = 0; i < 4; i++) {
+ struct mt76_queue *q = dev->mt76.q_tx[i].q;
+ u8 qidx = q->hw_idx;
+ u8 tid = ac_to_tid[i];
+ u32 txtime = airtime[qidx];
+
+ if (!txtime)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, txtime, 0);
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!total_airtime)
+ return;
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ dev->mt76.chan_state->cc_tx += total_airtime;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
static struct mt76_wcid *
mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
{
@@ -435,6 +525,20 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (dev->rx_ampdu_ts != rxd[12]) {
+ if (!++dev->mt76.ampdu_ref)
+ dev->mt76.ampdu_ref++;
+ }
+ dev->rx_ampdu_ts = rxd[12];
+
+ status->ampdu_ref = dev->mt76.ampdu_ref;
+ }
+
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -1032,8 +1136,10 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
if (idx && (cur_rate->idx != info->status.rates[i].idx ||
cur_rate->flags != info->status.rates[i].flags)) {
i++;
- if (i == ARRAY_SIZE(info->status.rates))
+ if (i == ARRAY_SIZE(info->status.rates)) {
+ i--;
break;
+ }
info->status.rates[i] = *cur_rate;
info->status.rates[i].count = 0;
@@ -1135,6 +1241,12 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
msta = container_of(wcid, struct mt7603_sta, wcid);
sta = wcid_to_sta(wcid);
+ if (list_empty(&msta->poll_list)) {
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1461,22 +1573,9 @@ void mt7603_update_channel(struct mt76_dev *mdev)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
- ktime_t cur_time;
- u32 busy;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return;
- state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
- busy = mt76_rr(dev, MT_MIB_STAT_PSCCA);
-
- spin_lock_bh(&dev->mt76.cc_lock);
- cur_time = ktime_get_boottime();
- state->cc_busy += busy;
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- dev->mt76.survey_time));
- dev->mt76.survey_time = cur_time;
- spin_unlock_bh(&dev->mt76.cc_lock);
+ state = mdev->chan_state;
+ state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
void
@@ -1677,15 +1776,23 @@ void mt7603_mac_work(struct work_struct *work)
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
mt76.mac_work.work);
bool reset = false;
+ int i, idx;
mt76_tx_status_check(&dev->mt76, NULL, false);
mutex_lock(&dev->mt76.mutex);
dev->mac_work_count++;
- mt7603_update_channel(&dev->mt76);
+ mt76_update_survey(&dev->mt76);
mt7603_edcca_check(dev);
+ for (i = 0, idx = 0; i < 2; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
+ }
+
if (dev->mac_work_count == 10)
mt7603_false_cca_check(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 25d5b1608bc9..962e2822d19f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -13,6 +13,7 @@ mt7603_start(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
+ mt7603_mac_reset_counters(dev);
mt7603_mac_start(dev);
dev->mt76.survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -65,6 +66,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->vif_mask |= BIT(mvif->idx);
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@@ -86,8 +88,9 @@ static void
mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_sta *msta = &mvif->sta;
struct mt7603_dev *dev = hw->priv;
- int idx = mvif->sta.wcid.idx;
+ int idx = msta->wcid.idx;
mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0);
mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0);
@@ -98,6 +101,11 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mt76_txq_remove(&dev->mt76, vif->txq);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
@@ -324,6 +332,7 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ INIT_LIST_HEAD(&msta->poll_list);
__skb_queue_head_init(&msta->psq);
msta->ps = ~0;
msta->smps = ~0;
@@ -360,6 +369,11 @@ mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7603_filter_tx(dev, wcid->idx, true);
spin_unlock_bh(&dev->ps_lock);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
mt7603_wtbl_clear(dev, wcid->idx);
}
@@ -555,12 +569,14 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 ssn = params->ssn;
u8 ba_size = params->buf_size;
struct mt76_txq *mtxq;
+ int ret = 0;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
@@ -582,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
@@ -590,8 +606,9 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
static void
@@ -676,6 +693,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_coverage_class = mt7603_set_coverage_class,
.set_tim = mt76_set_tim,
.get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
};
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 257300fec4f8..ab54b0612e98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -61,6 +61,9 @@ struct mt7603_sta {
struct mt7603_vif *vif;
+ struct list_head poll_list;
+ u32 tx_airtime_ac[4];
+
struct sk_buff_head psq;
struct ieee80211_tx_rate rates[4];
@@ -103,12 +106,16 @@ struct mt7603_dev {
u8 vif_mask;
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
struct mt7603_sta global_sta;
u32 agc0, agc3;
u32 false_cca_ofdm, false_cca_cck;
unsigned long last_cca_adj;
+ __le32 rx_ampdu_ts;
u8 rssi_offset[3];
u8 slottime;
@@ -118,8 +125,6 @@ struct mt7603_dev {
ktime_t ed_time;
- struct mt76_queue q_rx;
-
spinlock_t ps_lock;
u8 mac_work_count;
@@ -191,6 +196,7 @@ static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
+void mt7603_mac_reset_counters(struct mt7603_dev *dev);
void mt7603_mac_dma_start(struct mt7603_dev *dev);
void mt7603_mac_start(struct mt7603_dev *dev);
void mt7603_mac_stop(struct mt7603_dev *dev);
@@ -202,6 +208,7 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size);
+void mt7603_mac_sta_poll(struct mt7603_dev *dev);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index eb9eefe8e125..6e23ed3dfdff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -212,6 +212,9 @@
#define MT_AGG_PCR_RTS_THR GENMASK(19, 0)
#define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25)
+#define MT_AGG_ASRCR MT_WF_AGG(0x060)
+#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
+
#define MT_AGG_CONTROL MT_WF_AGG(0x070)
#define MT_AGG_CONTROL_NO_BA_RULE BIT(0)
#define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1)
@@ -555,6 +558,8 @@ enum {
#define MT_MIB_STAT_PSCCA MT_MIB_STAT(16)
#define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0)
+#define MT_TX_AGG_CNT(n) MT_MIB(0xa8 + ((n) << 2))
+
#define MT_MIB_STAT_ED MT_MIB_STAT(18)
#define MT_MIB_STAT_ED_MASK GENMASK(23, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 2428a4659a1c..f6b75f832e6a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -37,6 +37,44 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
mt7615_scs_set, "%lld\n");
static int
+mt7615_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7615_dev *dev = file->private;
+ int bound[7], i, range;
+
+ range = mt76_rr(dev, MT_AGG_ASRCR0);
+ for (i = 0; i < 4; i++)
+ bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+ range = mt76_rr(dev, MT_AGG_ASRCR1);
+ for (i = 0; i < 3; i++)
+ bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i], bound[i + 1]);
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ return 0;
+}
+
+static int
+mt7615_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7615_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt7615_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
mt7615_radio_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
@@ -61,6 +99,63 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
return 0;
}
+static int
+mt7615_queues_acq(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ int j, acs = i / 4, index = i % 4;
+ u32 ctrl, val, qlen = 0;
+
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ ctrl = BIT(31) | BIT(15) | (acs << 8);
+
+ for (j = 0; j < 32; j++) {
+ if (val & BIT(j))
+ continue;
+
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
+ ctrl | (j + (index << 5)));
+ qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ }
+ seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ }
+
+ return 0;
+}
+
+static int
+mt7615_queues_read(struct seq_file *s, void *data)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ static const struct {
+ char *queue;
+ int id;
+ } queue_map[] = {
+ { "PDMA0", MT_TXQ_BE },
+ { "MCUQ", MT_TXQ_MCU },
+ { "MCUFWQ", MT_TXQ_FWDL },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+
+ if (!q->q)
+ continue;
+
+ seq_printf(s,
+ "%s: queued=%d head=%d tail=%d\n",
+ queue_map[i].queue, q->q->queued, q->q->head,
+ q->q->tail);
+ }
+
+ return 0;
+}
+
int mt7615_init_debugfs(struct mt7615_dev *dev)
{
struct dentry *dir;
@@ -69,6 +164,11 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
if (!dir)
return -ENOMEM;
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7615_queues_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
+ mt7615_queues_acq);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index fe532cecbbdd..285d4f1d6178 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -110,6 +110,8 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_mac_sta_poll(dev);
+
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 515bb58e19fd..eccad4987ac8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -93,6 +93,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
u8 val, *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask, rx_mask, max_nss;
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
@@ -108,6 +109,23 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
dev->mt76.cap.has_5ghz = true;
break;
}
+
+ /* read tx-rx mask from eeprom */
+ val = mt76_rr(dev, MT_TOP_STRAP_STA);
+ max_nss = val & MT_TOP_3NSS ? 3 : 4;
+
+ rx_mask = FIELD_GET(MT_EE_NIC_CONF_RX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ if (!rx_mask || rx_mask > max_nss)
+ rx_mask = max_nss;
+
+ tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+
+ dev->mt76.chainmask = tx_mask << 8 | rx_mask;
+ dev->mt76.antenna_mask = BIT(tx_mask) - 1;
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index f4a4280768d2..c3bc69ac210e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -24,6 +24,9 @@ enum mt7615_eeprom_field {
__MT_EE_MAX = 0x3bf
};
+#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
+#define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0)
+
#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
#define MT_EE_NIC_CONF_TSSI_5G BIT(6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 1104e4c8aaa6..553bd4d988f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -20,7 +20,8 @@ static void mt7615_phy_init(struct mt7615_dev *dev)
static void mt7615_mac_init(struct mt7615_dev *dev)
{
- u32 val;
+ u32 val, mask, set;
+ int i;
/* enable band 0/1 clk */
mt76_set(dev, MT_CFG_CCR,
@@ -50,7 +51,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_TMAC_CTCR0_INS_DDLMT_EN);
mt7615_mcu_set_rts_thresh(dev, 0x92b);
- mt7615_mac_set_scs(dev, false);
+ mt7615_mac_set_scs(dev, true);
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
@@ -85,6 +86,24 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+ mask = MT_DMA_RCFR0_MCU_RX_MGMT |
+ MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_DMA_RCFR0_MCU_RX_CTL_BAR |
+ MT_DMA_RCFR0_MCU_RX_BYPASS |
+ MT_DMA_RCFR0_RX_DROPPED_UCAST |
+ MT_DMA_RCFR0_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
+ FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
+ mt76_rmw(dev, MT_DMA_BN0RCFR0, mask, set);
+ mt76_rmw(dev, MT_DMA_BN1RCFR0, mask, set);
+
+ for (i = 0; i < MT7615_WTBL_SIZE; i++)
+ mt7615_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
@@ -158,6 +177,9 @@ static struct ieee80211_rate mt7615_rates[] = {
static const struct ieee80211_iface_limit if_limits[] = {
{
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
.max = MT7615_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
@@ -249,12 +271,14 @@ int mt7615_register_device(struct mt7615_dev *dev)
struct wiphy *wiphy = hw->wiphy;
int ret;
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+
ret = mt7615_init_hardware(dev);
if (ret)
return ret;
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
-
hw->queues = 4;
hw->max_rates = 3;
hw->max_report_rates = 7;
@@ -268,7 +292,8 @@ int mt7615_register_device(struct mt7615_dev *dev)
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
@@ -278,16 +303,8 @@ int mt7615_register_device(struct mt7615_dev *dev)
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- dev->mt76.chainmask = 0x404;
- dev->mt76.antenna_mask = 0xf;
dev->dfs_state = -1;
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_AP);
-
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index e07ce2c10013..c77adc5d2552 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -41,6 +41,25 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
+void mt7615_mac_reset_counters(struct mt7615_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+
+ /* TODO: add DBDC support */
+
+ /* reset airtime counters */
+ mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR37(0));
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -62,6 +81,16 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
+ if (status->wcid) {
+ struct mt7615_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7615_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
/* TODO: properly support DBDC */
status->freq = dev->mt76.chandef.chan->center_freq;
status->band = dev->mt76.chandef.chan->band;
@@ -83,6 +112,20 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (dev->rx_ampdu_ts != rxd[12]) {
+ if (!++dev->mt76.ampdu_ref)
+ dev->mt76.ampdu_ref++;
+ }
+ dev->rx_ampdu_ts = rxd[12];
+
+ status->ampdu_ref = dev->mt76.ampdu_ref;
+ }
+
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
@@ -460,6 +503,91 @@ static u32 mt7615_mac_wtbl_addr(int wcid)
return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
}
+bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+void mt7615_mac_sta_poll(struct mt7615_dev *dev)
+{
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ static const u8 hw_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+ u32 addr, tx_time[4], rx_time[4];
+ int i;
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&dev->sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&dev->sta_poll_list,
+ struct mt7615_sta, poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ addr = mt7615_mac_wtbl_addr(msta->wcid.idx) + 19 * 4;
+
+ for (i = 0; i < 4; i++, addr += 8) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7615_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < 4; i++) {
+ u32 tx_cur = tx_time[i];
+ u32 rx_cur = rx_time[hw_queue_map[i]];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
@@ -692,11 +820,8 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
mt76_wr(dev, MT_WTBL_RICR0, w0);
mt76_wr(dev, MT_WTBL_RICR1, w1);
- mt76_wr(dev, MT_WTBL_UPDATE,
- FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid->idx) |
- MT_WTBL_UPDATE_RXINFO_UPDATE);
-
- if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ if (!mt7615_mac_wtbl_update(dev, wcid->idx,
+ MT_WTBL_UPDATE_RXINFO_UPDATE))
return -ETIMEDOUT;
return 0;
@@ -914,8 +1039,10 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
if (idx && (cur_rate->idx != info->status.rates[i].idx ||
cur_rate->flags != info->status.rates[i].flags)) {
i++;
- if (i == ARRAY_SIZE(info->status.rates))
+ if (i == ARRAY_SIZE(info->status.rates)) {
+ i--;
break;
+ }
info->status.rates[i] = *cur_rate;
info->status.rates[i].count = 0;
@@ -1026,6 +1153,11 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
msta = container_of(wcid, struct mt7615_sta, wcid);
sta = wcid_to_sta(wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
goto out;
@@ -1239,38 +1371,49 @@ void mt7615_update_channel(struct mt76_dev *mdev)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_channel_state *state;
- ktime_t cur_time;
- u32 busy;
-
- if (!test_bit(MT76_STATE_RUNNING, &mdev->state))
- return;
+ u64 busy_time, tx_time, rx_time, obss_time;
- state = mt76_channel_state(mdev, mdev->chandef.chan);
/* TODO: add DBDC support */
- busy = mt76_get_field(dev, MT_MIB_SDR16(0), MT_MIB_BUSY_MASK);
-
- spin_lock_bh(&mdev->cc_lock);
- cur_time = ktime_get_boottime();
- state->cc_busy += busy;
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- mdev->survey_time));
- mdev->survey_time = cur_time;
- spin_unlock_bh(&mdev->cc_lock);
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(0),
+ MT_MIB_SDR9_BUSY_MASK);
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(0),
+ MT_MIB_SDR36_TXTIME_MASK);
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(0),
+ MT_MIB_SDR37_RXTIME_MASK);
+ obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_TIME5,
+ MT_MIB_OBSSTIME_MASK);
+
+ state = mdev->chan_state;
+ state->cc_busy += busy_time;
+ state->cc_tx += tx_time;
+ state->cc_rx += rx_time + obss_time;
+ state->cc_bss_rx += rx_time;
+
+ /* reset obss airtime */
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_dev *dev;
+ int i, idx;
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
mac_work.work);
mutex_lock(&dev->mt76.mutex);
- mt7615_update_channel(&dev->mt76);
+ mt76_update_survey(&dev->mt76);
if (++dev->mac_work_count == 5) {
mt7615_mac_scs_check(dev);
dev->mac_work_count = 0;
}
+
+ for (i = 0, idx = 0; i < 4; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
+ }
mutex_unlock(&dev->mt76.mutex);
mt76_tx_status_check(&dev->mt76, NULL, false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 87c748715b5d..070b03403894 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -16,6 +16,8 @@ static int mt7615_start(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = hw->priv;
+ mt7615_mac_reset_counters(dev);
+
dev->mt76.survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
@@ -39,6 +41,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
switch (type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
/* ap use hw bssid 0 and ext bssid */
if (~mask & BIT(HW_BSSID_0))
return HW_BSSID_0;
@@ -58,7 +61,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
default:
WARN_ON(1);
break;
- };
+ }
return -1;
}
@@ -97,8 +100,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
idx = MT7615_WTBL_RESERVED - mvif->idx;
+
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
@@ -115,8 +122,9 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = &mvif->sta;
struct mt7615_dev *dev = hw->priv;
- int idx = mvif->sta.wcid.idx;
+ int idx = msta->wcid.idx;
/* TODO: disable beacon for the bss */
@@ -129,6 +137,11 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
dev->vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
}
static int mt7615_set_channel(struct mt7615_dev *dev)
@@ -151,8 +164,8 @@ static int mt7615_set_channel(struct mt7615_dev *dev)
ret = mt7615_dfs_init_radar_detector(dev);
mt7615_mac_cca_stats_reset(dev);
dev->mt76.survey_time = ktime_get_boottime();
- /* TODO: add DBDC support */
- mt76_rr(dev, MT_MIB_SDR16(0));
+
+ mt7615_mac_reset_counters(dev);
out:
clear_bit(MT76_RESET, &dev->mt76.state);
@@ -263,6 +276,11 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7615_dev *dev = hw->priv;
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
@@ -296,6 +314,11 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
*total_flags = flags;
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1, ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1, ctl_flags);
}
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@@ -348,9 +371,12 @@ int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ INIT_LIST_HEAD(&msta->poll_list);
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
mt7615_mcu_add_wtbl(dev, vif, sta);
mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
@@ -371,9 +397,18 @@ void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
mt7615_mcu_del_wtbl(dev, sta);
+
+ mt7615_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
}
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
@@ -449,12 +484,14 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
+ int ret = 0;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
@@ -477,7 +514,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
@@ -485,8 +522,9 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
const struct ieee80211_ops mt7615_ops = {
@@ -511,4 +549,5 @@ const struct ieee80211_ops mt7615_ops = {
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7615_channel_switch_beacon,
.get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 842cd81704db..f229c9ce9f65 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -848,6 +848,11 @@ int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
conn_type = CONNECTION_INFRA_STA;
break;
}
+ case NL80211_IFTYPE_ADHOC:
+ conn_type = CONNECTION_IBSS_ADHOC;
+ tx_wlan_idx = mvif->sta.wcid.idx;
+ net_type = NETWORK_IBSS;
+ break;
default:
WARN_ON(1);
break;
@@ -1073,10 +1078,13 @@ int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
case NL80211_IFTYPE_STATION:
req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
break;
+ case NL80211_IFTYPE_ADHOC:
+ req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
default:
WARN_ON(1);
break;
- };
+ }
if (en) {
req.basic.conn_state = CONN_STATE_PORT_SECURE;
@@ -1297,8 +1305,10 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
};
int ret;
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 7963e302d705..21486831172c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -56,6 +56,9 @@ struct mt7615_sta {
struct mt7615_vif *vif;
+ struct list_head poll_list;
+ u32 airtime_ac[8];
+
struct ieee80211_tx_rate rates[4];
struct mt7615_rate_set rateset[2];
@@ -81,6 +84,11 @@ struct mt7615_dev {
u32 vif_mask;
u32 omac_mask;
+ __le32 rx_ampdu_ts;
+
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
struct {
u8 n_pulses;
u32 period;
@@ -229,8 +237,11 @@ static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
}
void mt7615_update_channel(struct mt76_dev *mdev);
+bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
+void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev);
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index e250607e0a80..1eb1eb659c3f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -72,7 +72,10 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
- .txwi_flags = MT_TXWI_NO_FREE,
+ .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_skb = mt7615_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index b193814d5cf8..61a4aa9ac6e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -6,6 +6,8 @@
#define MT_HW_REV 0x1000
#define MT_HW_CHIPID 0x1008
+#define MT_TOP_STRAP_STA 0x1010
+#define MT_TOP_3NSS BIT(24)
#define MT_TOP_MISC2 0x1134
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
@@ -65,6 +67,17 @@
#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+#define MT_PLE_BASE 0x8000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+ ((n) << 2))
+
#define MT_WF_PHY_BASE 0x10000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
@@ -125,6 +138,10 @@
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+#define MT_AGG_ASRCR0 MT_WF_AGG(0x060)
+#define MT_AGG_ASRCR1 MT_WF_AGG(0x064)
+#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
+
#define MT_AGG_ACR0 MT_WF_AGG(0x070)
#define MT_AGG_ACR1 MT_WF_AGG(0x170)
#define MT_AGG_ACR_NO_BA_RULE BIT(0)
@@ -176,6 +193,22 @@
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+#define MT_WF_RFCR1 MT_WF_RMAC(0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
+
+#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380)
+
+#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8)
+#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
+
#define MT_WF_DMA_BASE 0x21800
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
@@ -183,6 +216,15 @@
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+#define MT_DMA_BN0RCFR0 MT_WF_DMA(0x070)
+#define MT_DMA_BN1RCFR0 MT_WF_DMA(0x0b0)
+#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
+#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
+#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
+#define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21)
+#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
+#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
+
#define MT_WTBL_BASE 0x30000
#define MT_WTBL_ENTRY_SIZE 256
@@ -198,6 +240,7 @@
#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
#define MT_WTBL_UPDATE_RXINFO_UPDATE BIT(11)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_BUSY BIT(31)
@@ -255,8 +298,18 @@
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR16(n) MT_WF_MIB(0x48 + ((n) << 9))
-#define MT_MIB_BUSY_MASK GENMASK(23, 0)
+#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
+#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR16(n) MT_WF_MIB(0x048 + ((n) << 9))
+#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR36(n) MT_WF_MIB(0x098 + ((n) << 9))
+#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
+#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+
+#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
#define MT_EFUSE_BASE 0x81070000
#define MT_EFUSE_BASE_CTRL 0x000
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 9d4426f6905f..a03e2d01fba7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -212,7 +212,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
void mt76x0_get_power_info(struct mt76x02_dev *dev,
struct ieee80211_channel *chan, s8 *tp)
{
- struct mt76x0_chan_map {
+ static const struct mt76x0_chan_map {
u8 chan;
u8 offset;
} chan_map[] = {
@@ -343,6 +343,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
version, fae);
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76_eeprom_override(&dev->mt76);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index cf7fc307322b..388b54cded1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -150,31 +150,6 @@ static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201);
}
-static void mt76x0_reset_counters(struct mt76x02_dev *dev)
-{
- mt76_rr(dev, MT_RX_STAT_0);
- mt76_rr(dev, MT_RX_STAT_1);
- mt76_rr(dev, MT_RX_STAT_2);
- mt76_rr(dev, MT_TX_STA_0);
- mt76_rr(dev, MT_TX_STA_1);
- mt76_rr(dev, MT_TX_STA_2);
-}
-
-int mt76x0_mac_start(struct mt76x02_dev *dev)
-{
- mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
-
- if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
- return -ETIMEDOUT;
-
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-
- return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0;
-}
-EXPORT_SYMBOL_GPL(mt76x0_mac_start);
-
void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
int i = 200, ok = 0;
@@ -244,8 +219,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
for (i = 0; i < 256; i++)
mt76x02_mac_wcid_setup(dev, i, 0, NULL);
- mt76x0_reset_counters(dev);
-
ret = mt76x0_eeprom_init(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index efb7ca93863d..b2ccf50512dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -13,19 +13,16 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
cancel_delayed_work_sync(&dev->cal_work);
mt76x02_pre_tbtt_enable(dev, false);
- if (mt76_is_mmio(dev))
+ if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mt76_set_channel(&dev->mt76);
mt76x0_phy_set_channel(dev, chandef);
- /* channel cycle counters read-and-clear */
- mt76_rr(dev, MT_CH_IDLE);
- mt76_rr(dev, MT_CH_BUSY);
-
+ mt76x02_mac_cc_reset(dev);
mt76x02_edcca_init(dev);
- if (mt76_is_mmio(dev)) {
+ if (mt76_is_mmio(&dev->mt76)) {
mt76x02_dfs_init_params(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 26517e062bdb..6953f253a28a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -30,7 +30,7 @@
static inline bool is_mt7610e(struct mt76x02_dev *dev)
{
- if (!mt76_is_mmio(dev))
+ if (!mt76_is_mmio(&dev->mt76))
return false;
return mt76_chip(&dev->mt76) == 0x7610;
@@ -46,7 +46,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev);
int mt76x0_register_device(struct mt76x02_dev *dev);
void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
-int mt76x0_mac_start(struct mt76x02_dev *dev);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 7705e55aa3d1..e2974e0ae1fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -51,19 +51,6 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
mt76x0e_stop_hw(dev);
}
-static int
-mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- if (is_mt7630(dev))
- return -EOPNOTSUPP;
-
- return mt76x02_set_key(hw, cmd, vif, sta, key);
-}
-
static void
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
@@ -80,7 +67,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
- .set_key = mt76x0e_set_key,
+ .set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,
@@ -94,6 +81,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
+ .get_antenna = mt76_get_antenna,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@@ -132,15 +120,6 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY |
- MT_CH_CCA_RC_EN |
- FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
-
err = mt76x0_register_device(dev);
if (err < 0)
return err;
@@ -155,7 +134,9 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
- .tx_aligned4_skbs = true,
+ .drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+ MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 711a352dfd5c..2ecd45f8af90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -102,7 +102,7 @@ out:
static int
mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
struct mt76_reg_pair pair = {
.reg = offset,
.value = val,
@@ -121,7 +121,7 @@ static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
int ret;
u32 val;
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
struct mt76_reg_pair pair = {
.reg = offset,
};
@@ -176,7 +176,7 @@ mt76x0_phy_rf_csr_wr_rp(struct mt76x02_dev *dev,
}
#define RF_RANDOM_WRITE(dev, tab) do { \
- if (mt76_is_mmio(dev)) \
+ if (mt76_is_mmio(&dev->mt76)) \
mt76x0_phy_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
else \
mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
@@ -744,7 +744,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
if (!tx_mode) {
data = mt76_rr(dev, MT_BBP(CORE, 1));
- if (is_mt7630(dev) && mt76_is_mmio(dev)) {
+ if (is_mt7630(dev) && mt76_is_mmio(&dev->mt76)) {
int offset;
/* 2.3 * 8192 or 1.5 * 8192 */
@@ -899,7 +899,6 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
}
mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val);
- msleep(350);
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
usleep_range(15000, 20000);
@@ -967,7 +966,7 @@ void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
break;
}
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
mt76x0_phy_bbp_set_bw(dev, chandef->width);
} else {
if (chandef->width == NL80211_CHAN_WIDTH_80 ||
@@ -1123,7 +1122,7 @@ static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
switch (reg) {
case MT_RF(0, 3):
- if (mt76_is_mmio(dev)) {
+ if (mt76_is_mmio(&dev->mt76)) {
if (is_mt7630(dev))
val = 0x70;
else
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 00a445d27599..65ba9fc6ea0b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -103,7 +103,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- ret = mt76x0_mac_start(dev);
+ ret = mt76x02u_mac_start(dev);
if (ret)
return ret;
@@ -138,6 +138,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.get_survey = mt76_get_survey,
.set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
+ .get_antenna = mt76_get_antenna,
};
static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
@@ -165,13 +166,6 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY);
-
return 0;
}
@@ -211,6 +205,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
@@ -226,7 +222,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
u32 mac_rev;
int ret;
- mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -278,6 +274,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
err:
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
+ mt76u_deinit(&dev->mt76);
ieee80211_free_hw(mdev->hw);
return ret;
@@ -297,6 +294,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf)
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
+ mt76u_deinit(&dev->mt76);
ieee80211_free_hw(dev->mt76.hw);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index e858bba8c8ff..0ca0bbfe8769 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -81,6 +81,7 @@ struct mt76x02_dev {
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
spinlock_t txstatus_fifo_lock;
+ u32 tx_airtime;
struct sk_buff *rx_head;
@@ -92,8 +93,6 @@ struct mt76x02_dev {
const struct mt76x02_beacon_ops *beacon_ops;
- u32 aggr_stats[32];
-
struct sk_buff *beacons[8];
u8 beacon_data_mask;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 92305bd31aa1..4209209ac940 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -77,10 +77,7 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
+ dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 0cb2a7b35fe5..68b40d63a46d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -19,7 +19,8 @@ mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
seq_puts(file, "\n");
seq_puts(file, "Count: ");
for (j = 0; j < 8; j++)
- seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+ seq_printf(file, "%8d | ",
+ dev->mt76.aggr_stats[i * 8 + j]);
seq_puts(file, "\n");
seq_puts(file, "--------");
for (j = 0; j < 8; j++)
@@ -143,6 +144,8 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
if (!dir)
return;
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index abacb4ea7179..4460548f346a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -7,6 +7,27 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
+void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
+{
+ int i;
+
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
+
static enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
@@ -462,8 +483,8 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
if (st->pktid & MT_PACKET_ID_HAS_RATE) {
- first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
- first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
+ first_rate = st->rate & ~MT_PKTID_RATE;
+ first_rate |= st->pktid & MT_PKTID_RATE;
mt76x02_mac_process_tx_rate(&rate[0], first_rate,
dev->mt76.chandef.chan->band);
@@ -516,10 +537,20 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct ieee80211_tx_status status = {
.info = &info
};
+ static const u8 ac_to_tid[4] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
struct mt76_wcid *wcid = NULL;
struct mt76x02_sta *msta = NULL;
struct mt76_dev *mdev = &dev->mt76;
struct sk_buff_head list;
+ u32 duration = 0;
+ u8 cur_pktid;
+ u32 ac = 0;
+ int len = 0;
if (stat->pktid == MT_PACKET_ID_NO_ACK)
return;
@@ -549,10 +580,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
mt76_tx_status_unlock(mdev, &list);
- rcu_read_unlock();
- return;
+ goto out;
}
+
if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache;
@@ -565,10 +596,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
msta->n_frames++;
mt76_tx_status_unlock(mdev, &list);
- rcu_read_unlock();
- return;
+ goto out;
}
+ cur_pktid = msta->status.pktid;
mt76x02_mac_fill_tx_status(dev, msta, status.info,
&msta->status, msta->n_frames);
@@ -576,16 +607,39 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
msta->n_frames = 1;
*update = 0;
} else {
+ cur_pktid = stat->pktid;
mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
*update = 1;
}
- if (status.skb)
+ if (status.skb) {
+ info = *status.info;
+ len = status.skb->len;
+ ac = skb_get_queue_mapping(status.skb);
mt76_tx_status_skb_done(mdev, status.skb, &list);
+ } else if (msta) {
+ len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
+ ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
+ }
+
mt76_tx_status_unlock(mdev, &list);
if (!status.skb)
ieee80211_tx_status_ext(mt76_hw(dev), &status);
+
+ if (!len)
+ goto out;
+
+ duration = mt76_calc_tx_airtime(&dev->mt76, &info, len);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ dev->tx_airtime += duration;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+
+ if (msta)
+ ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0);
+
+out:
rcu_read_unlock();
}
@@ -768,6 +822,21 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
status->aggr = true;
+ if (rxinfo & MT_RXINFO_AMPDU) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+ status->ampdu_ref = dev->mt76.ampdu_ref;
+
+ /*
+ * When receiving an A-MPDU subframe and RSSI info is not valid,
+ * we can assume that more subframes belonging to the same A-MPDU
+ * are coming. The last one will have valid RSSI info
+ */
+ if (rxinfo & MT_RXINFO_RSSI) {
+ if (!++dev->mt76.ampdu_ref)
+ dev->mt76.ampdu_ref++;
+ }
+ }
+
if (WARN_ON_ONCE(len > skb->len))
return -EINVAL;
@@ -948,16 +1017,13 @@ void mt76x02_update_channel(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
- u32 active, busy;
- state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
-
- busy = mt76_rr(dev, MT_CH_BUSY);
- active = busy + mt76_rr(dev, MT_CH_IDLE);
+ state = mdev->chan_state;
+ state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
- state->cc_busy += busy;
- state->cc_active += active;
+ state->cc_tx += dev->tx_airtime;
+ dev->tx_airtime = 0;
spin_unlock_bh(&dev->mt76.cc_lock);
}
EXPORT_SYMBOL_GPL(mt76x02_update_channel);
@@ -1094,12 +1160,12 @@ void mt76x02_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
- mt76x02_update_channel(&dev->mt76);
+ mt76_update_survey(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
- dev->aggr_stats[idx++] += val & 0xffff;
- dev->aggr_stats[idx++] += val >> 16;
+ dev->mt76.aggr_stats[idx++] += val & 0xffff;
+ dev->mt76.aggr_stats[idx++] += val >> 16;
}
if (!dev->mt76.beacon_mask)
@@ -1116,6 +1182,25 @@ void mt76x02_mac_work(struct work_struct *work)
MT_MAC_WORK_INTERVAL);
}
+void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
+{
+ dev->mt76.survey_time = ktime_get_boottime();
+
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+ /* channel cycle counters read-and-clear */
+ mt76_rr(dev, MT_CH_BUSY);
+ mt76_rr(dev, MT_CH_IDLE);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset);
+
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index efa4ef945e35..7d946aa77182 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -23,11 +23,16 @@ struct mt76x02_tx_status {
#define MT_VIF_WCID(_n) (254 - ((_n) & 7))
#define MT_MAX_VIFS 8
+#define MT_PKTID_RATE GENMASK(4, 0)
+#define MT_PKTID_AC GENMASK(6, 5)
+
struct mt76x02_vif {
struct mt76_wcid group_wcid; /* must be first */
u8 idx;
};
+DECLARE_EWMA(pktlen, 8, 8);
+
struct mt76x02_sta {
struct mt76_wcid wcid; /* must be first */
@@ -35,6 +40,7 @@ struct mt76x02_sta {
struct mt76x02_tx_status status;
int n_frames;
+ struct ewma_pktlen pktlen;
};
#define MT_RXINFO_BA BIT(0)
@@ -161,6 +167,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
return false;
}
+void mt76x02_mac_reset_counters(struct mt76x02_dev *dev);
void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
@@ -192,6 +199,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
+void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 4be7a24097cc..6274b6a24b07 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -114,7 +114,7 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
.id = cpu_to_le32(type),
.value = cpu_to_le32(param),
};
- bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev);
+ bool is_mt76x2e = mt76_is_mmio(&dev->mt76) && is_mt76x2(dev);
int ret;
if (is_mt76x2e)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index dc773070481d..4e2371c926d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -343,6 +343,7 @@ EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
void mt76x02_mac_start(struct mt76x02_dev *dev)
{
+ mt76x02_mac_reset_counters(dev);
mt76x02_dma_enable(dev);
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
mt76_wr(dev, MT_MAC_SYS_CTRL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index f27aade34c1e..13825f642087 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -158,7 +158,9 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
/* encode packet rate for no-skb packet id to fix up status reporting */
if (pid == MT_PACKET_ID_NO_SKB)
pid = MT_PACKET_ID_HAS_RATE |
- (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+ (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX) |
+ FIELD_PREP(MT_PKTID_AC,
+ skb_get_queue_mapping(tx_info->skb));
txwi->pktid = pid;
@@ -171,6 +173,12 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
tx_info->info |= MT_TXD_INFO_WIV;
+ if (sta) {
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+
+ ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 98329debc033..a57dcc8820aa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -8,6 +8,7 @@
#include "mt76x02.h"
+int mt76x02u_mac_start(struct mt76x02_dev *dev);
void mt76x02u_init_mcu(struct mt76_dev *dev);
void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 78dfc1e7f27b..d03d3c8e296c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -23,6 +23,27 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
+int mt76x02u_mac_start(struct mt76x02_dev *dev)
+{
+ mt76x02_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
+
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
struct sk_buff *iter, *last = skb;
@@ -83,7 +104,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
/* encode packet rate for no-skb packet id to fix up status reporting */
if (pid == MT_PACKET_ID_NO_SKB)
pid = MT_PACKET_ID_HAS_RATE |
- (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+ (le16_to_cpu(txwi->rate) & MT_PKTID_RATE) |
+ FIELD_PREP(MT_PKTID_AC,
+ skb_get_queue_mapping(tx_info->skb));
txwi->pktid = pid;
@@ -97,6 +120,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
flags |= MT_TXD_INFO_WIV;
+ if (sta) {
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+
+ ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
+ }
+
return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index aec73a0295e8..0960fc56b672 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -153,15 +153,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
- if (mt76_is_usb(dev)) {
+ if (mt76_is_usb(&dev->mt76)) {
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
wiphy->iface_combinations = mt76x02u_if_comb;
@@ -190,7 +182,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
@@ -264,6 +255,7 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.hw_key_idx = -1;
mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
mt76x02_mac_wcid_set_drop(dev, idx, false);
+ ewma_pktlen_init(&msta->pktlen);
if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
@@ -365,12 +357,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
+ int ret = 0;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
@@ -393,15 +387,16 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
+ mutex_unlock(&dev->mt76.mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
@@ -443,7 +438,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* data registers and sent via HW beacons engine, they require to
* be already encrypted.
*/
- if (mt76_is_usb(dev) &&
+ if (mt76_is_usb(&dev->mt76) &&
vif->type == NL80211_IFTYPE_AP &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
@@ -624,7 +619,7 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- if (mt76_is_mmio(dev))
+ if (mt76_is_mmio(mdev))
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
index a1583021e1e9..d5c3d26b94c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -12,7 +12,6 @@ struct mt76x02_dev;
struct mt76x2_sta;
struct mt76x02_vif;
-int mt76x2_mac_start(struct mt76x02_dev *dev);
void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
static inline void mt76x2_mac_resume(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
index c876bac43751..f9d37c6cf1f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -24,7 +24,6 @@ void mt76x2u_cleanup(struct mt76x02_dev *dev);
void mt76x2u_stop_hw(struct mt76x02_dev *dev);
int mt76x2u_mac_reset(struct mt76x02_dev *dev);
-int mt76x2u_mac_start(struct mt76x02_dev *dev);
int mt76x2u_mac_stop(struct mt76x02_dev *dev);
int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index cf611d1b817c..53ca0cedf026 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -21,7 +21,9 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
- .tx_aligned4_skbs = true,
+ .drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
+ MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 343127f2d621..33fcec9179b2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -7,6 +7,7 @@
#include "mt76x2.h"
#include "eeprom.h"
#include "mcu.h"
+#include "../mt76x02_mac.h"
static void
mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
@@ -132,36 +133,11 @@ int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
for (i = 0; i < 16; i++)
mt76_rr(dev, MT_TX_STAT_FIFO);
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY |
- MT_CH_CCA_RC_EN |
- FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
-
mt76x02_set_tx_ackto(dev);
return 0;
}
-int mt76x2_mac_start(struct mt76x02_dev *dev)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- mt76_rr(dev, MT_TX_AGG_CNT(i));
-
- for (i = 0; i < 16; i++)
- mt76_rr(dev, MT_TX_STAT_FIFO);
-
- memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
- mt76x02_mac_start(dev);
-
- return 0;
-}
-
static void
mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
{
@@ -264,9 +240,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
- ret = mt76x2_mac_start(dev);
- if (ret)
- return ret;
+ mt76x02_mac_start(dev);
ret = mt76x2_mcu_init(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 4971685aafe8..cfe8905ce73f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -4,6 +4,7 @@
*/
#include "mt76x2.h"
+#include "../mt76x02_mac.h"
static int
mt76x2_start(struct ieee80211_hw *hw)
@@ -11,10 +12,7 @@ mt76x2_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- ret = mt76x2_mac_start(dev);
- if (ret)
- return ret;
-
+ mt76x02_mac_start(dev);
ret = mt76x2_phy_start(dev);
if (ret)
return ret;
@@ -54,10 +52,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
- /* channel cycle counters read-and-clear */
- mt76_rr(dev, MT_CH_IDLE);
- mt76_rr(dev, MT_CH_BUSY);
-
+ mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
@@ -140,19 +135,6 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
return 0;
}
-static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
- u32 *rx_ant)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
- *tx_ant = dev->mt76.antenna_mask;
- *rx_ant = dev->mt76.antenna_mask;
- mutex_unlock(&dev->mt76.mutex);
-
- return 0;
-}
-
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x02_tx,
.start = mt76x2_start,
@@ -177,7 +159,7 @@ const struct ieee80211_ops mt76x2_ops = {
.get_survey = mt76_get_survey,
.set_tim = mt76_set_tim,
.set_antenna = mt76x2_set_antenna,
- .get_antenna = mt76x2_get_antenna,
+ .get_antenna = mt76_get_antenna,
.set_rts_threshold = mt76x02_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index da5e0f9a8bae..b64ad816cc25 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -25,6 +25,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
+ .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
@@ -39,7 +41,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
struct mt76_dev *mdev;
int err;
- mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
+ mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -71,6 +73,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
err:
ieee80211_free_hw(mt76_hw(dev));
+ mt76u_deinit(&dev->mt76);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
@@ -86,6 +89,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
set_bit(MT76_REMOVED, &dev->mt76.state);
ieee80211_unregister_hw(hw);
mt76x2u_cleanup(dev);
+ mt76u_deinit(&dev->mt76);
ieee80211_free_hw(hw);
usb_set_intfdata(intf, NULL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index e305b374c904..2910068f4e79 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -184,13 +184,6 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_phy_set_rxpath(dev);
mt76x02_phy_set_txdac(dev);
- mt76_wr(dev, MT_CH_TIME_CFG,
- MT_CH_TIME_CFG_TIMER_EN |
- MT_CH_TIME_CFG_TX_AS_BUSY |
- MT_CH_TIME_CFG_RX_AS_BUSY |
- MT_CH_TIME_CFG_NAV_AS_BUSY |
- MT_CH_TIME_CFG_EIFS_AS_BUSY);
-
return mt76x2u_mac_stop(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index e7fea3a6f1fd..59cbe826188a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -6,16 +6,6 @@
#include "mt76x2u.h"
#include "eeprom.h"
-static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev)
-{
- mt76_rr(dev, MT_RX_STAT_0);
- mt76_rr(dev, MT_RX_STAT_1);
- mt76_rr(dev, MT_RX_STAT_2);
- mt76_rr(dev, MT_TX_STA_0);
- mt76_rr(dev, MT_TX_STA_1);
- mt76_rr(dev, MT_TX_STA_2);
-}
-
static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
{
s8 offset = 0;
@@ -102,23 +92,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
return 0;
}
-int mt76x2u_mac_start(struct mt76x02_dev *dev)
-{
- mt76x2u_mac_reset_counters(dev);
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
- mt76x02_wait_for_wpdma(&dev->mt76, 1000);
- usleep_range(50, 100);
-
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
-
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX |
- MT_MAC_SYS_CTRL_ENABLE_RX);
-
- return 0;
-}
-
int mt76x2u_mac_stop(struct mt76x02_dev *dev)
{
int i, count = 0, val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index eb73cb856c81..9e97204841f5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -4,13 +4,14 @@
*/
#include "mt76x2u.h"
+#include "../mt76x02_usb.h"
static int mt76x2u_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
int ret;
- ret = mt76x2u_mac_start(dev);
+ ret = mt76x02u_mac_start(dev);
if (ret)
return ret;
@@ -48,10 +49,7 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
err = mt76x2u_phy_set_channel(dev, chandef);
- /* channel cycle counters read-and-clear */
- mt76_rr(dev, MT_CH_IDLE);
- mt76_rr(dev, MT_CH_BUSY);
-
+ mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
clear_bit(MT76_RESET, &dev->mt76.state);
@@ -121,4 +119,5 @@ const struct ieee80211_ops mt76x2u_ops = {
.get_survey = mt76_get_survey,
.set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
+ .get_antenna = mt76_get_antenna,
};
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index c22a05f06fd0..7ee91d946882 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
- struct mt76_txq *mtxq, bool *empty)
+ struct mt76_txq *mtxq)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
@@ -392,16 +392,12 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
bool probe;
int idx;
- if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
- *empty = true;
+ if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- }
skb = mt76_txq_dequeue(dev, mtxq, false);
- if (!skb) {
- *empty = true;
+ if (!skb)
return 0;
- }
info = IEEE80211_SKB_CB(skb);
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
@@ -431,10 +427,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
return -EBUSY;
skb = mt76_txq_dequeue(dev, mtxq, false);
- if (!skb) {
- *empty = true;
+ if (!skb)
break;
- }
info = IEEE80211_SKB_CB(skb);
cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
@@ -481,8 +475,6 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
while (1) {
- bool empty = false;
-
if (sq->swq_queued >= 4)
break;
@@ -513,10 +505,9 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
}
- ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
- if (skb_queue_empty(&mtxq->retry_q))
- empty = true;
- ieee80211_return_txq(dev->hw, txq, !empty);
+ ret += mt76_txq_send_burst(dev, sq, mtxq);
+ ieee80211_return_txq(dev->hw, txq,
+ !skb_queue_empty(&mtxq->retry_q));
}
spin_unlock_bh(&hwq->lock);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 20c6fe510e9d..d6d47081e281 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -15,15 +15,17 @@ static bool disable_usb_sg;
module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
-/* should be called with usb_ctrl_mtx locked */
static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
int i, ret;
+ lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
+
pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
: usb_sndctrlpipe(udev, 0);
for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
@@ -60,7 +62,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-/* should be called with usb_ctrl_mtx locked */
static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
@@ -103,7 +104,6 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-/* should be called with usb_ctrl_mtx locked */
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
@@ -235,7 +235,8 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
static bool mt76u_check_sg(struct mt76_dev *dev)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
@@ -370,7 +371,8 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
struct urb *urb, usb_complete_t complete_fn,
void *context)
{
- struct usb_device *udev = to_usb_device(dev->dev);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
if (dir == USB_DIR_IN)
@@ -695,10 +697,7 @@ static void mt76u_tx_tasklet(unsigned long data)
mt76_txq_schedule(dev, i);
if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
- ieee80211_queue_delayed_work(dev->hw,
- &dev->usb.stat_work,
- msecs_to_jiffies(10));
-
+ queue_work(dev->usb.stat_wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@@ -711,7 +710,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
u8 update = 1;
u16 count = 0;
- usb = container_of(work, struct mt76_usb, stat_work.work);
+ usb = container_of(work, struct mt76_usb, stat_work);
dev = container_of(usb, struct mt76_dev, usb);
while (true) {
@@ -724,8 +723,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
- ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
- msecs_to_jiffies(10));
+ queue_work(usb->stat_wq, &usb->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->state);
}
@@ -906,7 +904,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
}
}
- cancel_delayed_work_sync(&dev->usb.stat_work);
+ cancel_work_sync(&dev->usb.stat_work);
clear_bit(MT76_READING_STATS, &dev->state);
mt76_tx_status_check(dev, NULL, true);
@@ -952,24 +950,40 @@ int mt76u_init(struct mt76_dev *dev,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
};
+ struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
- INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+ INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+ usb->stat_wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
+ if (!usb->stat_wq)
+ return -ENOMEM;
+
mutex_init(&usb->mcu.mutex);
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
dev->queue_ops = &usb_queue_ops;
+ dev_set_drvdata(&udev->dev, dev);
+
usb->sg_en = mt76u_check_sg(dev);
return mt76u_set_endpoints(intf, usb);
}
EXPORT_SYMBOL_GPL(mt76u_init);
+void mt76u_deinit(struct mt76_dev *dev)
+{
+ if (dev->usb.stat_wq) {
+ destroy_workqueue(dev->usb.stat_wq);
+ dev->usb.stat_wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76u_deinit);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 5e549831370c..300242bce799 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -27,7 +27,7 @@ mt76_reg_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 72e608cc53af..671d8897ae76 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -372,8 +372,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
msta->agg_ssn[tid] = ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 06f5702ab4bd..d863ab4a66c9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -213,7 +213,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
do {
val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
- if (val && ~val)
+ if (val && val != 0xff)
break;
} while (--i);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 7cea08f71838..87d048df09d1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -12,6 +12,16 @@
#define QTNF_MAX_MAC 3
+#define HBM_FRAME_META_MAGIC_PATTERN_S 0xAB
+#define HBM_FRAME_META_MAGIC_PATTERN_E 0xBA
+
+struct qtnf_frame_meta_info {
+ u8 magic_s;
+ u8 ifidx;
+ u8 macid;
+ u8 magic_e;
+} __packed;
+
enum qtnf_fw_state {
QTNF_FW_STATE_DETACHED,
QTNF_FW_STATE_BOOT_DONE,
@@ -31,8 +41,10 @@ struct qtnf_bus_ops {
int (*control_tx)(struct qtnf_bus *, struct sk_buff *);
/* data xfer methods */
- int (*data_tx)(struct qtnf_bus *, struct sk_buff *);
+ int (*data_tx)(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid);
void (*data_tx_timeout)(struct qtnf_bus *, struct net_device *);
+ void (*data_tx_use_meta_set)(struct qtnf_bus *bus, bool use_meta);
void (*data_rx_start)(struct qtnf_bus *);
void (*data_rx_stop)(struct qtnf_bus *);
};
@@ -42,7 +54,7 @@ struct qtnf_bus {
enum qtnf_fw_state fw_state;
u32 chip;
u32 chiprev;
- const struct qtnf_bus_ops *bus_ops;
+ struct qtnf_bus_ops *bus_ops;
struct qtnf_wmac *mac[QTNF_MAX_MAC];
struct qtnf_qlink_transport trans;
struct qtnf_hw_info hw_info;
@@ -54,6 +66,8 @@ struct qtnf_bus {
struct work_struct event_work;
struct mutex bus_lock; /* lock during command/event processing */
struct dentry *dbg_dir;
+ struct notifier_block netdev_nb;
+ u8 hw_id[ETH_ALEN];
/* bus private data */
char bus_priv[0] __aligned(sizeof(void *));
};
@@ -99,9 +113,10 @@ static inline void qtnf_bus_stop(struct qtnf_bus *bus)
bus->bus_ops->stop(bus);
}
-static inline int qtnf_bus_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+static inline int qtnf_bus_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
{
- return bus->bus_ops->data_tx(bus, skb);
+ return bus->bus_ops->data_tx(bus, skb, macid, vifid);
}
static inline void
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index d90016125dfc..59d089e092f9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -238,22 +238,29 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
mac->macid, vif->vifid, vif->mac_addr);
ret = -EINVAL;
- goto err_mac;
+ goto error_del_vif;
}
ret = qtnf_core_net_attach(mac, vif, name, name_assign_t);
if (ret) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
- goto err_net;
+ goto error_del_vif;
+ }
+
+ if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
+ if (ret) {
+ unregister_netdevice(vif->netdev);
+ vif->netdev = NULL;
+ goto error_del_vif;
+ }
}
vif->wdev.netdev = vif->netdev;
return &vif->wdev;
-err_net:
- vif->netdev = NULL;
-err_mac:
+error_del_vif:
qtnf_cmd_send_del_intf(vif);
err_cmd:
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
@@ -897,6 +904,45 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
+ int ret;
+
+ ret = qtnf_cmd_get_tx_power(vif, dbm);
+ if (ret)
+ pr_err("MAC%u: failed to get Tx power\n", vif->mac->macid);
+
+ return ret;
+}
+
+static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ struct qtnf_vif *vif;
+ int ret;
+
+ if (wdev) {
+ vif = qtnf_netdev_get_priv(wdev->netdev);
+ } else {
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n",
+ mac->macid);
+ return -EFAULT;
+ }
+ }
+
+ ret = qtnf_cmd_set_tx_power(vif, type, mbm);
+ if (ret)
+ pr_err("MAC%u: failed to set Tx power\n", vif->mac->macid);
+
+ return ret;
+}
+
#ifdef CONFIG_PM
static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
{
@@ -991,6 +1037,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
.set_power_mgmt = qtnf_set_power_mgmt,
+ .get_tx_power = qtnf_get_tx_power,
+ .set_tx_power = qtnf_set_tx_power,
#ifdef CONFIG_PM
.suspend = qtnf_suspend,
.resume = qtnf_resume,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index dc0c7244b60e..548f6ff6d0f2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -83,6 +83,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
struct qlink_cmd *cmd;
struct qlink_resp *resp = NULL;
struct sk_buff *resp_skb = NULL;
+ int resp_res = 0;
u16 cmd_id;
u8 mac_id;
u8 vif_id;
@@ -113,6 +114,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
}
resp = (struct qlink_resp *)resp_skb->data;
+ resp_res = le16_to_cpu(resp->result);
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
const_resp_size);
if (ret)
@@ -128,8 +130,8 @@ out:
else
consume_skb(resp_skb);
- if (!ret && resp)
- return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
+ if (!ret)
+ return qtnf_cmd_resp_result_decode(resp_res);
pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
mac_id, vif_id, cmd_id, ret);
@@ -212,6 +214,20 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
return true;
}
+static void qtnf_cmd_tlv_ie_ext_add(struct sk_buff *cmd_skb, u8 eid_ext,
+ const void *buf, size_t len)
+{
+ struct qlink_tlv_ext_ie *tlv;
+
+ tlv = (struct qlink_tlv_ext_ie *)skb_put(cmd_skb, sizeof(*tlv) + len);
+ tlv->hdr.type = cpu_to_le16(WLAN_EID_EXTENSION);
+ tlv->hdr.len = cpu_to_le16(sizeof(*tlv) + len - sizeof(tlv->hdr));
+ tlv->eid_ext = eid_ext;
+
+ if (len && buf)
+ memcpy(tlv->ie_data, buf, len);
+}
+
int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s)
{
@@ -307,6 +323,10 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
}
+ if (s->he_cap)
+ qtnf_cmd_tlv_ie_ext_add(cmd_skb, WLAN_EID_EXT_HE_CAPABILITY,
+ s->he_cap, sizeof(*s->he_cap));
+
if (s->acl) {
size_t acl_size = struct_size(s->acl, mac_addrs,
s->acl->n_acl_entries);
@@ -1240,10 +1260,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info = &mac->macinfo;
mac_info->bands_cap = resp_info->bands_cap;
- memcpy(&mac_info->dev_mac, &resp_info->dev_mac,
- sizeof(mac_info->dev_mac));
-
- ether_addr_copy(mac->macaddr, mac_info->dev_mac);
+ ether_addr_copy(mac->macaddr, resp_info->dev_mac);
vif = qtnf_mac_get_base_vif(mac);
if (vif)
@@ -1293,6 +1310,69 @@ static void qtnf_cmd_resp_band_fill_vhtcap(const u8 *info,
memcpy(&bcap->vht_mcs, &vht_cap->supp_mcs, sizeof(bcap->vht_mcs));
}
+static void qtnf_cmd_conv_iftype(struct ieee80211_sband_iftype_data
+ *iftype_data,
+ const struct qlink_sband_iftype_data
+ *qlink_data)
+{
+ iftype_data->types_mask = le16_to_cpu(qlink_data->types_mask);
+
+ iftype_data->he_cap.has_he = true;
+ memcpy(&iftype_data->he_cap.he_cap_elem, &qlink_data->he_cap_elem,
+ sizeof(qlink_data->he_cap_elem));
+ memcpy(iftype_data->he_cap.ppe_thres, qlink_data->ppe_thres,
+ ARRAY_SIZE(qlink_data->ppe_thres));
+
+ iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 =
+ qlink_data->he_mcs_nss_supp.rx_mcs_80;
+ iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 =
+ qlink_data->he_mcs_nss_supp.tx_mcs_80;
+ iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 =
+ qlink_data->he_mcs_nss_supp.rx_mcs_160;
+ iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 =
+ qlink_data->he_mcs_nss_supp.tx_mcs_160;
+ iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80p80 =
+ qlink_data->he_mcs_nss_supp.rx_mcs_80p80;
+ iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80p80 =
+ qlink_data->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+static int qtnf_cmd_band_fill_iftype(const u8 *data,
+ struct ieee80211_supported_band *band)
+{
+ unsigned int i;
+ struct ieee80211_sband_iftype_data *iftype_data;
+ const struct qlink_tlv_iftype_data *tlv =
+ (const struct qlink_tlv_iftype_data *)data;
+ size_t payload_len = tlv->n_iftype_data * sizeof(*tlv->iftype_data) +
+ sizeof(*tlv) -
+ sizeof(struct qlink_tlv_hdr);
+
+ if (tlv->hdr.len != cpu_to_le16(payload_len)) {
+ pr_err("bad IFTYPE_DATA TLV len %u\n", tlv->hdr.len);
+ return -EINVAL;
+ }
+
+ kfree(band->iftype_data);
+ band->iftype_data = NULL;
+ band->n_iftype_data = tlv->n_iftype_data;
+ if (band->n_iftype_data == 0)
+ return 0;
+
+ iftype_data = kcalloc(band->n_iftype_data, sizeof(*iftype_data),
+ GFP_KERNEL);
+ if (!iftype_data) {
+ band->n_iftype_data = 0;
+ return -ENOMEM;
+ }
+ band->iftype_data = iftype_data;
+
+ for (i = 0; i < band->n_iftype_data; i++)
+ qtnf_cmd_conv_iftype(iftype_data++, &tlv->iftype_data[i]);
+
+ return 0;
+}
+
static int
qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
struct qlink_resp_band_info_get *resp,
@@ -1306,6 +1386,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
struct ieee80211_channel *chan;
unsigned int chidx = 0;
u32 qflags;
+ int ret = -EINVAL;
memset(&band->ht_cap, 0, sizeof(band->ht_cap));
memset(&band->vht_cap, 0, sizeof(band->vht_cap));
@@ -1443,6 +1524,12 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
qtnf_cmd_resp_band_fill_vhtcap(tlv->val,
&band->vht_cap);
break;
+ case QTN_TLV_ID_IFTYPE_DATA:
+ ret = qtnf_cmd_band_fill_iftype((const uint8_t *)tlv,
+ band);
+ if (ret)
+ goto error_ret;
+ break;
default:
pr_warn("unknown TLV type: %#x\n", tlv_type);
break;
@@ -1470,7 +1557,7 @@ error_ret:
band->channels = NULL;
band->n_channels = 0;
- return -EINVAL;
+ return ret;
}
static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
@@ -2641,6 +2728,71 @@ out:
return ret;
}
+int qtnf_cmd_get_tx_power(const struct qtnf_vif *vif, int *dbm)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_txpwr *resp;
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_cmd_txpwr *cmd;
+ struct sk_buff *cmd_skb;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_TXPWR, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_txpwr *)cmd_skb->data;
+ cmd->op_type = QLINK_TXPWR_GET;
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), NULL);
+ if (ret)
+ goto out;
+
+ resp = (const struct qlink_resp_txpwr *)resp_skb->data;
+ *dbm = MBM_TO_DBM(le32_to_cpu(resp->txpwr));
+
+out:
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_txpwr *resp;
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_cmd_txpwr *cmd;
+ struct sk_buff *cmd_skb;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_TXPWR, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_txpwr *)cmd_skb->data;
+ cmd->op_type = QLINK_TXPWR_SET;
+ cmd->txpwr_setting = type;
+ cmd->txpwr = cpu_to_le32(mbm);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), NULL);
+
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl)
{
@@ -2689,3 +2841,35 @@ out:
qtnf_bus_unlock(bus);
return ret;
}
+
+int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_ndev_changeupper *cmd;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_NDEV_EVENT,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ pr_debug("[VIF%u.%u] set broadcast domain to %d\n",
+ vif->mac->macid, vif->vifid, br_domain);
+
+ cmd = (struct qlink_cmd_ndev_changeupper *)cmd_skb->data;
+ cmd->nehdr.event = cpu_to_le16(QLINK_NDEV_EVENT_CHANGEUPPER);
+ cmd->upper_type = QLINK_NDEV_UPPER_TYPE_BRIDGE;
+ cmd->br_domain = cpu_to_le32(br_domain);
+
+ qtnf_bus_lock(bus);
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ qtnf_bus_unlock(bus);
+
+ if (ret)
+ pr_err("[VIF%u.%u] failed to set broadcast domain\n",
+ vif->mac->macid, vif->vifid);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 88d7a3cd90d2..761755bf9ede 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -70,7 +70,11 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_get_tx_power(const struct qtnf_vif *vif, int *dbm);
+int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
+ enum nl80211_tx_power_setting type, int mbm);
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl);
+int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 8d699cc03d26..5fb598389487 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -12,6 +12,7 @@
#include "cfg80211.h"
#include "event.h"
#include "util.h"
+#include "switchdev.h"
#define QTNF_DMP_MAX_LEN 48
#define QTNF_PRIMARY_VIF_IDX 0
@@ -22,13 +23,6 @@ MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
static struct dentry *qtnf_debugfs_dir;
-struct qtnf_frame_meta_info {
- u8 magic_s;
- u8 ifidx;
- u8 macid;
- u8 magic_e;
-} __packed;
-
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
@@ -67,6 +61,14 @@ static int qtnf_netdev_close(struct net_device *ndev)
return 0;
}
+static void qtnf_packet_send_hi_pri(struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
+
+ skb_queue_tail(&vif->high_pri_tx_queue, skb);
+ queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
+}
+
/* Netdev handler for data transmission.
*/
static netdev_tx_t
@@ -107,7 +109,13 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* tx path is enabled: reset vif timeout */
vif->cons_tx_timeout_cnt = 0;
- return qtnf_bus_data_tx(mac->bus, skb);
+ if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
+ qtnf_packet_send_hi_pri(skb);
+ qtnf_update_tx_stats(ndev, skb);
+ return NETDEV_TX_OK;
+ }
+
+ return qtnf_bus_data_tx(mac->bus, skb, mac->macid, vif->vifid);
}
/* Netdev handler for getting stats.
@@ -197,6 +205,21 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
+static int qtnf_netdev_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ const struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ const struct qtnf_bus *bus = vif->mac->bus;
+
+ if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bus->hw_id);
+ memcpy(&ppid->id, bus->hw_id, ppid->id_len);
+
+ return 0;
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
.ndo_open = qtnf_netdev_open,
@@ -205,6 +228,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
.ndo_get_stats64 = qtnf_netdev_get_stats64,
.ndo_set_mac_address = qtnf_netdev_set_mac_address,
+ .ndo_get_port_parent_id = qtnf_netdev_port_parent_id,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -451,10 +475,8 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev = alloc_netdev_mqs(sizeof(struct qtnf_vif *), name,
name_assign_type, ether_setup, 1, 1);
- if (!dev) {
- vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ if (!dev)
return -ENOMEM;
- }
vif->netdev = dev;
@@ -469,6 +491,9 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
+ if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)
+ dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
+
qdev_vif = netdev_priv(dev);
*((void **)qdev_vif) = vif;
@@ -477,7 +502,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
ret = register_netdevice(dev);
if (ret) {
free_netdev(dev);
- vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ vif->netdev = NULL;
}
return ret;
@@ -518,6 +543,9 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
if (!wiphy->bands[band])
continue;
+ kfree(wiphy->bands[band]->iftype_data);
+ wiphy->bands[band]->n_iftype_data = 0;
+
kfree(wiphy->bands[band]->channels);
wiphy->bands[band]->n_channels = 0;
@@ -557,6 +585,10 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
+ /* Use MAC address of the first active radio as a unique device ID */
+ if (is_zero_ether_addr(mac->bus->hw_id))
+ ether_addr_copy(mac->bus->hw_id, mac->macaddr);
+
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not ready\n", macid);
@@ -574,19 +606,19 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
ret = qtnf_cmd_send_get_phy_params(mac);
if (ret) {
pr_err("MAC%u: failed to get PHY settings\n", macid);
- goto error;
+ goto error_del_vif;
}
ret = qtnf_mac_init_bands(mac);
if (ret) {
pr_err("MAC%u: failed to init bands\n", macid);
- goto error;
+ goto error_del_vif;
}
ret = qtnf_wiphy_register(&bus->hw_info, mac);
if (ret) {
pr_err("MAC%u: wiphy registration failed\n", macid);
- goto error;
+ goto error_del_vif;
}
mac->wiphy_registered = 1;
@@ -598,20 +630,75 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
if (ret) {
pr_err("MAC%u: failed to attach netdev\n", macid);
- vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- vif->netdev = NULL;
- goto error;
+ goto error_del_vif;
+ }
+
+ if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
+ if (ret)
+ goto error;
}
pr_debug("MAC%u initialized\n", macid);
return 0;
+error_del_vif:
+ qtnf_cmd_send_del_intf(vif);
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
error:
qtnf_core_mac_detach(bus, macid);
return ret;
}
+bool qtnf_netdev_is_qtn(const struct net_device *ndev)
+{
+ return ndev->netdev_ops == &qtnf_netdev_ops;
+}
+
+static int qtnf_core_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ const struct netdev_notifier_changeupper_info *info;
+ struct qtnf_vif *vif;
+ int br_domain;
+ int ret = 0;
+
+ if (!qtnf_netdev_is_qtn(ndev))
+ return NOTIFY_DONE;
+
+ if (!net_eq(dev_net(ndev), &init_net))
+ return NOTIFY_OK;
+
+ vif = qtnf_netdev_get_priv(ndev);
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (!netif_is_bridge_master(info->upper_dev))
+ break;
+
+ pr_debug("[VIF%u.%u] change bridge: %s %s\n",
+ vif->mac->macid, vif->vifid,
+ netdev_name(info->upper_dev),
+ info->linking ? "add" : "del");
+
+ if (info->linking)
+ br_domain = info->upper_dev->ifindex;
+ else
+ br_domain = ndev->ifindex;
+
+ ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
int qtnf_core_attach(struct qtnf_bus *bus)
{
unsigned int i;
@@ -656,6 +743,10 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ if ((bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) &&
+ bus->bus_ops->data_tx_use_meta_set)
+ bus->bus_ops->data_tx_use_meta_set(bus, true);
+
if (bus->hw_info.num_mac > QTNF_MAX_MAC) {
pr_err("no support for number of MACs=%u\n",
bus->hw_info.num_mac);
@@ -672,6 +763,15 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
+ if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
+ ret = register_netdevice_notifier(&bus->netdev_nb);
+ if (ret) {
+ pr_err("failed to register netdev notifier: %d\n", ret);
+ goto error;
+ }
+ }
+
bus->fw_state = QTNF_FW_STATE_RUNNING;
return 0;
@@ -685,6 +785,7 @@ void qtnf_core_detach(struct qtnf_bus *bus)
{
unsigned int macid;
+ unregister_netdevice_notifier(&bus->netdev_nb);
qtnf_bus_data_rx_stop(bus);
for (macid = 0; macid < QTNF_MAX_MAC; macid++)
@@ -713,7 +814,8 @@ EXPORT_SYMBOL_GPL(qtnf_core_detach);
static inline int qtnf_is_frame_meta_magic_valid(struct qtnf_frame_meta_info *m)
{
- return m->magic_s == 0xAB && m->magic_e == 0xBA;
+ return m->magic_s == HBM_FRAME_META_MAGIC_PATTERN_S &&
+ m->magic_e == HBM_FRAME_META_MAGIC_PATTERN_E;
}
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
@@ -768,6 +870,8 @@ struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
}
__skb_trim(skb, skb->len - sizeof(*meta));
+ /* Firmware always handles packets that require flooding */
+ qtnfmac_switch_mark_skb_flooded(skb);
out:
return ndev;
@@ -841,15 +945,6 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
-void qtnf_packet_send_hi_pri(struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
-
- skb_queue_tail(&vif->high_pri_tx_queue, skb);
- queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
-}
-EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
-
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 322858df600c..116ec16aa15b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -74,7 +74,6 @@ struct qtnf_vif {
struct qtnf_mac_info {
u8 bands_cap;
- u8 dev_mac[ETH_ALEN];
u8 num_tx_chain;
u8 num_rx_chain;
u16 max_ap_assoc_sta;
@@ -152,8 +151,8 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
-void qtnf_packet_send_hi_pri(struct sk_buff *skb);
struct dentry *qtnf_get_debugfs_dir(void);
+bool qtnf_netdev_is_qtn(const struct net_device *ndev);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index b57c8c18a8d0..51af93bdf06e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -171,8 +171,9 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- pr_debug("VIF%u.%u: BSSID:%pM status:%u\n",
- vif->mac->macid, vif->vifid, join_info->bssid, status);
+ pr_debug("VIF%u.%u: BSSID:%pM chan:%u status:%u\n",
+ vif->mac->macid, vif->vifid, join_info->bssid,
+ le16_to_cpu(join_info->chan.chan.center_freq), status);
if (status != WLAN_STATUS_SUCCESS)
goto done;
@@ -181,7 +182,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
if (!cfg80211_chandef_valid(&chandef)) {
pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
vif->mac->macid, vif->vifid,
- chandef.chan->center_freq,
+ chandef.chan ? chandef.chan->center_freq : 0,
chandef.center_freq1,
chandef.center_freq2,
chandef.width);
@@ -617,6 +618,42 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
return ret;
}
+static int
+qtnf_event_handle_mic_failure(struct qtnf_vif *vif,
+ const struct qlink_event_mic_failure *mic_ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ u8 pairwise;
+
+ if (len < sizeof(*mic_ev)) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_mic_failure));
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ pr_err("VIF%u.%u: MIC_FAILURE event when not in STA mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ pairwise = mic_ev->pairwise ?
+ NL80211_KEYTYPE_PAIRWISE : NL80211_KEYTYPE_GROUP;
+
+ pr_info("%s: MIC error: src=%pM key_index=%u pairwise=%u\n",
+ vif->netdev->name, mic_ev->src, mic_ev->key_index, pairwise);
+
+ cfg80211_michael_mic_failure(vif->netdev, mic_ev->src, pairwise,
+ mic_ev->key_index, NULL, GFP_KERNEL);
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -679,6 +716,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_external_auth(vif, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_MIC_FAILURE:
+ ret = qtnf_event_handle_mic_failure(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 8ae318b5fe54..5337e67092ca 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -33,7 +33,7 @@ static unsigned int tx_bd_size_param;
module_param(tx_bd_size_param, uint, 0644);
MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
-static unsigned int rx_bd_size_param = 256;
+static unsigned int rx_bd_size_param;
module_param(rx_bd_size_param, uint, 0644);
MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
@@ -130,6 +130,8 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
{
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ char card_id[64];
int ret;
bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
@@ -137,7 +139,9 @@ int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
if (ret) {
pr_err("failed to attach core\n");
} else {
- qtnf_debugfs_init(bus, DRV_NAME);
+ snprintf(card_id, sizeof(card_id), "%s:%s",
+ DRV_NAME, pci_name(priv->pdev));
+ qtnf_debugfs_init(bus, card_id);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
@@ -337,7 +341,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
- pcie_priv->rx_bd_num = rx_bd_size_param;
pcie_priv->flashboot = flashboot;
if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ)
@@ -354,7 +357,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
- pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
@@ -377,7 +379,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->epmem_bar = epmem_bar;
pci_save_state(pdev);
- ret = pcie_priv->probe_cb(bus, tx_bd_size_param);
+ ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param);
if (ret)
goto error;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index 5e8b9cb68419..2a6a928e13bd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -23,7 +23,8 @@
struct qtnf_pcie_bus_priv {
struct pci_dev *pdev;
- int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size);
+ int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size,
+ unsigned int rx_bd_size);
void (*remove_cb)(struct qtnf_bus *bus);
int (*suspend_cb)(struct qtnf_bus *bus);
int (*resume_cb)(struct qtnf_bus *bus);
@@ -62,7 +63,6 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
- u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 3aa3714d4dfd..8e0d8018208a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -24,6 +24,7 @@
#include "debug.h"
#define PEARL_TX_BD_SIZE_DEFAULT 32
+#define PEARL_RX_BD_SIZE_DEFAULT 256
struct qtnf_pearl_bda {
__le16 bda_len;
@@ -244,8 +245,6 @@ static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
/* tx bd */
- memset(vaddr, 0, len);
-
ps->bd_table_vaddr = vaddr;
ps->bd_table_paddr = paddr;
ps->bd_table_len = len;
@@ -399,7 +398,8 @@ static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
}
static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
- unsigned int tx_bd_size)
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
@@ -411,28 +411,29 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("bad tx_bd_size value %u\n", tx_bd_size);
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, PEARL_TX_BD_SIZE_DEFAULT);
priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
} else {
priv->tx_bd_num = tx_bd_size;
}
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
+ if (rx_bd_size == 0)
+ rx_bd_size = PEARL_RX_BD_SIZE_DEFAULT;
- if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
- pr_err("rx_bd_size_param %u is not power of two\n",
- priv->rx_bd_num);
- return -EINVAL;
- }
+ val = rx_bd_size * sizeof(dma_addr_t);
- val = priv->rx_bd_num * sizeof(dma_addr_t);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("rx_bd_size_param %u is too large\n",
- priv->rx_bd_num);
- return -EINVAL;
+ if (!is_power_of_2(rx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ rx_bd_size, PEARL_RX_BD_SIZE_DEFAULT);
+ priv->rx_bd_num = PEARL_RX_BD_SIZE_DEFAULT;
+ } else {
+ priv->rx_bd_num = rx_bd_size;
}
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
ret = pearl_hhbm_init(ps);
if (ret) {
pr_err("failed to init h/w queues\n");
@@ -531,7 +532,7 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
return 1;
}
-static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
{
struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
struct qtnf_pcie_bus_priv *priv = &ps->base;
@@ -607,6 +608,38 @@ tx_done:
return NETDEV_TX_OK;
}
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
+{
+ return qtnf_pcie_skb_send(bus, skb);
+}
+
+static int qtnf_pcie_data_tx_meta(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
+{
+ struct qtnf_frame_meta_info *meta;
+ int tail_need = sizeof(*meta) - skb_tailroom(skb);
+ int ret;
+
+ if (tail_need > 0 && pskb_expand_head(skb, 0, tail_need, GFP_ATOMIC)) {
+ skb->dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ meta = skb_put(skb, sizeof(*meta));
+ meta->magic_s = HBM_FRAME_META_MAGIC_PATTERN_S;
+ meta->magic_e = HBM_FRAME_META_MAGIC_PATTERN_E;
+ meta->macid = macid;
+ meta->ifidx = vifid;
+
+ ret = qtnf_pcie_skb_send(bus, skb);
+ if (unlikely(ret == NETDEV_TX_BUSY))
+ __skb_trim(skb, skb->len - sizeof(*meta));
+
+ return ret;
+}
+
static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
{
struct qtnf_bus *bus = (struct qtnf_bus *)data;
@@ -795,13 +828,22 @@ static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
qtnf_disable_hdp_irqs(ps);
}
-static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
+static void qtnf_pearl_tx_use_meta_info_set(struct qtnf_bus *bus, bool use_meta)
+{
+ if (use_meta)
+ bus->bus_ops->data_tx = qtnf_pcie_data_tx_meta;
+ else
+ bus->bus_ops->data_tx = qtnf_pcie_data_tx;
+}
+
+static struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
/* control path methods */
.control_tx = qtnf_pcie_control_tx,
/* data path methods */
.data_tx = qtnf_pcie_data_tx,
.data_tx_timeout = qtnf_pcie_data_tx_timeout,
+ .data_tx_use_meta_set = qtnf_pearl_tx_use_meta_info_set,
.data_rx_start = qtnf_pcie_data_rx_start,
.data_rx_stop = qtnf_pcie_data_rx_stop,
};
@@ -904,7 +946,7 @@ static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
memcpy(pdata, pblk, len);
hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
- ret = qtnf_pcie_data_tx(bus, skb);
+ ret = qtnf_pcie_skb_send(bus, skb);
return (ret == NETDEV_TX_OK) ? len : 0;
}
@@ -1066,7 +1108,8 @@ static u64 qtnf_pearl_dma_mask_get(void)
#endif
}
-static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
@@ -1081,7 +1124,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
- ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size);
+ ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size, rx_bd_size);
if (ret) {
pr_err("PCIE xfer init failed\n");
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 9a4380ed7f1b..dbf3c5fd751f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -23,6 +23,7 @@
#include "debug.h"
#define TOPAZ_TX_BD_SIZE_DEFAULT 128
+#define TOPAZ_RX_BD_SIZE_DEFAULT 256
struct qtnf_topaz_tx_bd {
__le32 addr;
@@ -199,8 +200,6 @@ static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
if (!vaddr)
return -ENOMEM;
- memset(vaddr, 0, len);
-
/* tx bd */
ts->tx_bd_vbase = vaddr;
@@ -333,7 +332,8 @@ static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
}
static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
- unsigned int tx_bd_size)
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
{
struct qtnf_topaz_bda __iomem *bda = ts->bda;
struct qtnf_pcie_bus_priv *priv = &ts->base;
@@ -351,6 +351,17 @@ static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
priv->tx_bd_num = tx_bd_size;
qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
+
+ if (rx_bd_size == 0)
+ rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
+
+ if (rx_bd_size > TOPAZ_RX_BD_SIZE_DEFAULT) {
+ pr_warn("RX BD queue cannot exceed %d\n",
+ TOPAZ_RX_BD_SIZE_DEFAULT);
+ rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
+ }
+
+ priv->rx_bd_num = rx_bd_size;
qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
priv->rx_bd_w_index = 0;
@@ -486,7 +497,8 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
return 1;
}
-static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
+ unsigned int macid, unsigned int vifid)
{
struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
struct qtnf_pcie_bus_priv *priv = &ts->base;
@@ -498,13 +510,6 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
- if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
- qtnf_packet_send_hi_pri(skb);
- qtnf_update_tx_stats(skb->dev, skb);
- priv->tx_eapol++;
- return NETDEV_TX_OK;
- }
-
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
@@ -736,7 +741,7 @@ static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
napi_disable(&bus->mux_napi);
}
-static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
+static struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
/* control path methods */
.control_tx = qtnf_pcie_control_tx,
@@ -768,7 +773,6 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
- seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
@@ -1113,7 +1117,8 @@ static u64 qtnf_topaz_dma_mask_get(void)
return DMA_BIT_MASK(32);
}
-static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
+static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
+ unsigned int tx_bd_num, unsigned int rx_bd_num)
{
struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
struct pci_dev *pdev = ts->base.pdev;
@@ -1147,7 +1152,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
return ret;
}
- ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
+ ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num, rx_bd_num);
if (ret) {
pr_err("PCIE xfer init failed\n");
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 8a3c6344fa8e..75527f1bb306 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -59,6 +59,7 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
* Randomization in probe requests.
* @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning.
+ * @QLINK_HW_CAPAB_HW_BRIDGE: device has hardware switch capabilities.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
@@ -69,6 +70,7 @@ enum qlink_hw_capab {
QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
QLINK_HW_CAPAB_SAE = BIT(8),
+ QLINK_HW_CAPAB_HW_BRIDGE = BIT(9),
};
enum qlink_iface_type {
@@ -217,6 +219,10 @@ struct qlink_sta_info_state {
* command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
* capability.
* @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
+ * @QLINK_CMD_TXPWR: get or set current channel transmit power for
+ * the specified MAC.
+ * @QLINK_CMD_NDEV_EVENT: signalizes changes made with a corresponding network
+ * device.
*/
enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
@@ -249,11 +255,13 @@ enum qlink_cmd_type {
QLINK_CMD_DEL_STA = 0x0052,
QLINK_CMD_SCAN = 0x0053,
QLINK_CMD_CHAN_STATS = 0x0054,
+ QLINK_CMD_NDEV_EVENT = 0x0055,
QLINK_CMD_CONNECT = 0x0060,
QLINK_CMD_DISCONNECT = 0x0061,
QLINK_CMD_PM_SET = 0x0062,
QLINK_CMD_WOWLAN_SET = 0x0063,
QLINK_CMD_EXTERNAL_AUTH = 0x0066,
+ QLINK_CMD_TXPWR = 0x0067,
};
/**
@@ -719,6 +727,32 @@ struct qlink_cmd_pm_set {
} __packed;
/**
+ * enum qlink_txpwr_op - transmit power operation type
+ * @QLINK_TXPWR_SET: set tx power
+ * @QLINK_TXPWR_GET: get current tx power setting
+ */
+enum qlink_txpwr_op {
+ QLINK_TXPWR_SET,
+ QLINK_TXPWR_GET
+};
+
+/**
+ * struct qlink_cmd_txpwr - get or set current transmit power
+ *
+ * @txpwr: new transmit power setting, in mBm
+ * @txpwr_setting: transmit power setting type, one of
+ * &enum nl80211_tx_power_setting
+ * @op_type: type of operation, one of &enum qlink_txpwr_op
+ */
+struct qlink_cmd_txpwr {
+ struct qlink_cmd chdr;
+ __le32 txpwr;
+ u8 txpwr_setting;
+ u8 op_type;
+ u8 rsvd[2];
+} __packed;
+
+/**
* enum qlink_wowlan_trigger
*
* @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
@@ -742,6 +776,42 @@ struct qlink_cmd_wowlan_set {
u8 data[0];
} __packed;
+enum qlink_ndev_event_type {
+ QLINK_NDEV_EVENT_CHANGEUPPER,
+};
+
+/**
+ * struct qlink_cmd_ndev_event - data for QLINK_CMD_NDEV_EVENT command
+ *
+ * @event: type of event, one of &enum qlink_ndev_event_type
+ */
+struct qlink_cmd_ndev_event {
+ struct qlink_cmd chdr;
+ __le16 event;
+ u8 rsvd[2];
+} __packed;
+
+enum qlink_ndev_upper_type {
+ QLINK_NDEV_UPPER_TYPE_NONE,
+ QLINK_NDEV_UPPER_TYPE_BRIDGE,
+};
+
+/**
+ * struct qlink_cmd_ndev_changeupper - data for QLINK_NDEV_EVENT_CHANGEUPPER
+ *
+ * @br_domain: layer 2 broadcast domain ID that ndev is a member of
+ * @upper_type: type of upper device, one of &enum qlink_ndev_upper_type
+ */
+struct qlink_cmd_ndev_changeupper {
+ struct qlink_cmd_ndev_event nehdr;
+ __le64 flags;
+ __le32 br_domain;
+ __le32 netspace_id;
+ __le16 vlanid;
+ u8 upper_type;
+ u8 rsvd[1];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -944,6 +1014,19 @@ struct qlink_resp_channel_get {
struct qlink_chandef chan;
} __packed;
+/**
+ * struct qlink_resp_txpwr - response for QLINK_CMD_TXPWR command
+ *
+ * This response is intended for QLINK_TXPWR_GET operation and does not
+ * contain any meaningful information in case of QLINK_TXPWR_SET operation.
+ *
+ * @txpwr: current transmit power setting, in mBm
+ */
+struct qlink_resp_txpwr {
+ struct qlink_resp rhdr;
+ __le32 txpwr;
+} __packed;
+
/* QLINK Events messages related definitions
*/
@@ -958,6 +1041,7 @@ enum qlink_event_type {
QLINK_EVENT_FREQ_CHANGE = 0x0028,
QLINK_EVENT_RADAR = 0x0029,
QLINK_EVENT_EXTERNAL_AUTH = 0x0030,
+ QLINK_EVENT_MIC_FAILURE = 0x0031,
};
/**
@@ -1151,6 +1235,20 @@ struct qlink_event_external_auth {
u8 action;
} __packed;
+/**
+ * struct qlink_event_mic_failure - data for QLINK_EVENT_MIC_FAILURE event
+ *
+ * @src: source MAC address of the frame
+ * @key_index: index of the key being reported
+ * @pairwise: whether the key is pairwise or group
+ */
+struct qlink_event_mic_failure {
+ struct qlink_event ehdr;
+ u8 src[ETH_ALEN];
+ u8 key_index;
+ u8 pairwise;
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
@@ -1171,6 +1269,7 @@ struct qlink_event_external_auth {
* @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
* during a scan including off-channel dwell time and operating channel
* time.
+ * @QTN_TLV_ID_IFTYPE_DATA: supported band data.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1206,6 +1305,7 @@ enum qlink_tlv_id {
QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
+ QTN_TLV_ID_IFTYPE_DATA = 0x0418,
};
struct qlink_tlv_hdr {
@@ -1367,6 +1467,39 @@ struct qlink_tlv_ie_set {
u8 ie_data[0];
} __packed;
+/**
+ * struct qlink_tlv_ext_ie - extension IE
+ *
+ * @eid_ext: element ID extension, one of &enum ieee80211_eid_ext.
+ * @ie_data: IEs data.
+ */
+struct qlink_tlv_ext_ie {
+ struct qlink_tlv_hdr hdr;
+ u8 eid_ext;
+ u8 ie_data[0];
+} __packed;
+
+#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
+struct qlink_sband_iftype_data {
+ __le16 types_mask;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+} __packed;
+
+/**
+ * struct qlink_tlv_iftype_data - data for QTN_TLV_ID_IFTYPE_DATA
+ *
+ * @n_iftype_data: number of entries in iftype_data.
+ * @iftype_data: interface type data entries.
+ */
+struct qlink_tlv_iftype_data {
+ struct qlink_tlv_hdr hdr;
+ u8 n_iftype_data;
+ u8 rsvd[3];
+ struct qlink_sband_iftype_data iftype_data[0];
+} __packed;
+
struct qlink_chan_stats {
__le32 chan_num;
__le32 cca_tx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/switchdev.h b/drivers/net/wireless/quantenna/qtnfmac/switchdev.h
new file mode 100644
index 000000000000..b962e670c4b0
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/switchdev.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2019 Quantenna Communications. All rights reserved. */
+
+#ifndef QTNFMAC_SWITCHDEV_H_
+#define QTNFMAC_SWITCHDEV_H_
+
+#include <linux/skbuff.h>
+
+#ifdef CONFIG_NET_SWITCHDEV
+
+static inline void qtnfmac_switch_mark_skb_flooded(struct sk_buff *skb)
+{
+ skb->offload_fwd_mark = 1;
+}
+
+#else
+
+static inline void qtnfmac_switch_mark_skb_flooded(struct sk_buff *skb)
+{
+}
+
+#endif
+
+#endif /* QTNFMAC_SWITCHDEV_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig
index f8a9244ce012..d4969d617822 100644
--- a/drivers/net/wireless/ralink/rt2x00/Kconfig
+++ b/drivers/net/wireless/ralink/rt2x00/Kconfig
@@ -95,20 +95,20 @@ config RT2800PCI_RT35XX
config RT2800PCI_RT53XX
- bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
- default y
- ---help---
- This adds support for rt53xx wireless chipset family to the
- rt2800pci driver.
- Supported chips: RT5390
+ bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
+ default y
+ ---help---
+ This adds support for rt53xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT5390
config RT2800PCI_RT3290
- bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
- default y
- ---help---
- This adds support for rt3290 wireless chipset family to the
- rt2800pci driver.
- Supported chips: RT3290
+ bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
+ default y
+ ---help---
+ This adds support for rt3290 wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3290
endif
config RT2500USB
@@ -174,18 +174,18 @@ config RT2800USB_RT3573
in the rt2800usb driver.
config RT2800USB_RT53XX
- bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
- ---help---
- This adds support for rt53xx wireless chipset family to the
- rt2800usb driver.
- Supported chips: RT5370
+ bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
+ ---help---
+ This adds support for rt53xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT5370
config RT2800USB_RT55XX
- bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
- ---help---
- This adds support for rt55xx wireless chipset family to the
- rt2800usb driver.
- Supported chips: RT5572
+ bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
+ ---help---
+ This adds support for rt55xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT5572
config RT2800USB_UNKNOWN
bool "rt2800usb - Include support for unknown (USB) devices"
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index f1cdcd61c54a..a36c3fea7495 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5839,8 +5839,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT6352)) {
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -5854,8 +5853,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
- rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
@@ -10476,7 +10473,7 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* when the hw reorders frames due to aggregation.
*/
if (sta_priv->wcid > WCID_END)
- return 1;
+ return -ENOSPC;
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -10489,7 +10486,7 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
break;
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index 23cd4ff78e54..e1bf41c278a5 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -37,53 +37,11 @@ static const u8 cck_ofdm_gain_settings[] = {
0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
};
-static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
- 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
-};
-
-static const u8 rtl8225se_tx_power_cck[] = {
- 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
- 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
- 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
- 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
- 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
- 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
-};
-
-static const u8 rtl8225se_tx_power_cck_ch14[] = {
- 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
- 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
- 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
- 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
- 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
- 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
-};
-
-static const u8 rtl8225se_tx_power_ofdm[] = {
- 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
-};
-
static const u32 rtl8225se_chan[] = {
0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
};
-static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
- 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
-};
-
-static const u8 rtl8225sez2_tx_power_cck_B[] = {
- 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
-};
-
-static const u8 rtl8225sez2_tx_power_cck_A[] = {
- 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
-};
-
-static const u8 rtl8225sez2_tx_power_cck[] = {
- 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
-};
-
static const u8 ZEBRA_AGC[] = {
0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index ade057d868f7..6598c8d786ea 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1187,6 +1187,79 @@ struct rtl8723bu_c2h {
struct rtl8xxxu_fileops;
+/*mlme related.*/
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0,
+ /* Sub-Element */
+ WIRELESS_MODE_B = BIT(0),
+ WIRELESS_MODE_G = BIT(1),
+ WIRELESS_MODE_A = BIT(2),
+ WIRELESS_MODE_N_24G = BIT(3),
+ WIRELESS_MODE_N_5G = BIT(4),
+ WIRELESS_AUTO = BIT(5),
+ WIRELESS_MODE_AC = BIT(6),
+ WIRELESS_MODE_MAX = 0x7F,
+};
+
+/* from rtlwifi/wifi.h */
+enum ratr_table_mode_new {
+ RATEID_IDX_BGN_40M_2SS = 0,
+ RATEID_IDX_BGN_40M_1SS = 1,
+ RATEID_IDX_BGN_20M_2SS_BN = 2,
+ RATEID_IDX_BGN_20M_1SS_BN = 3,
+ RATEID_IDX_GN_N2SS = 4,
+ RATEID_IDX_GN_N1SS = 5,
+ RATEID_IDX_BG = 6,
+ RATEID_IDX_G = 7,
+ RATEID_IDX_B = 8,
+ RATEID_IDX_VHT_2SS = 9,
+ RATEID_IDX_VHT_1SS = 10,
+ RATEID_IDX_MIX1 = 11,
+ RATEID_IDX_MIX2 = 12,
+ RATEID_IDX_VHT_3SS = 13,
+ RATEID_IDX_BGN_3SS = 14,
+};
+
+#define BT_INFO_8723B_1ANT_B_FTP BIT(7)
+#define BT_INFO_8723B_1ANT_B_A2DP BIT(6)
+#define BT_INFO_8723B_1ANT_B_HID BIT(5)
+#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT(4)
+#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT(3)
+#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT(2)
+#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT(1)
+#define BT_INFO_8723B_1ANT_B_CONNECTION BIT(0)
+
+enum _BT_8723B_1ANT_STATUS {
+ BT_8723B_1ANT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_1ANT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_1ANT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_1ANT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_1ANT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_1ANT_STATUS_MAX
+};
+
+struct rtl8xxxu_btcoex {
+ u8 bt_status;
+ bool bt_busy;
+ bool has_sco;
+ bool has_a2dp;
+ bool has_hid;
+ bool has_pan;
+ bool hid_only;
+ bool a2dp_only;
+ bool c2h_bt_inquiry;
+};
+
+#define RTL8XXXU_RATR_STA_INIT 0
+#define RTL8XXXU_RATR_STA_HIGH 1
+#define RTL8XXXU_RATR_STA_MID 2
+#define RTL8XXXU_RATR_STA_LOW 3
+
+#define RTL8XXXU_NOISE_FLOOR_MIN -100
+#define RTL8XXXU_SNR_THRESH_HIGH 50
+#define RTL8XXXU_SNR_THRESH_LOW 20
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1291,6 +1364,17 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
+ u8 rssi_level;
+ /*
+ * Only one virtual interface permitted because only STA mode
+ * is supported and no iface_combinations are provided.
+ */
+ struct ieee80211_vif *vif;
+ struct delayed_work ra_watchdog;
+ struct work_struct c2hcmd_work;
+ struct sk_buff_head c2hcmd_queue;
+ spinlock_t c2hcmd_lock;
+ struct rtl8xxxu_btcoex bt_coex;
};
struct rtl8xxxu_rx_urb {
@@ -1326,7 +1410,7 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1341,6 +1425,7 @@ struct rtl8xxxu_fileops {
u8 has_s0s1:1;
u8 has_tx_report:1;
u8 gen2_thermal_meter:1;
+ u8 needs_full_init:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
@@ -1411,9 +1496,9 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi);
+ u32 ramask, u8 rateid, int sgi);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
@@ -1437,6 +1522,8 @@ void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
+void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index c747f6a1922d..9f1f93d04145 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1011,7 +1011,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok, path_b_ok;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -1021,11 +1021,11 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index ceffe05bd65b..a71e1816e632 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -882,7 +882,7 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok /*, path_b_ok */;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -892,11 +892,11 @@ static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
@@ -1580,9 +1580,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
/*
* Software control, antenna at WiFi side
*/
-#ifdef NEED_PS_TDMA
rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
-#endif
rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
@@ -1670,6 +1668,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.has_s0s1 = 1,
.has_tx_report = 1,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index c6c41fb962ff..aa2bb2ae9809 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1255,7 +1255,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
- u32 val32, rsr;
+ u32 val32;
u8 val8, subchannel;
u16 rf_mode_bw;
bool ht = true;
@@ -1264,7 +1264,6 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
rf_mode_bw = rtl8xxxu_read16(priv, REG_WMAC_TRXPTCL_CTL);
rf_mode_bw &= ~WMAC_TRXPTCL_CTL_BW_MASK;
- rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
channel = hw->conf.chandef.chan->hw_value;
/* Hack */
@@ -3115,7 +3114,7 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
u32 i, val32;
int path_a_ok, path_b_ok;
int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
@@ -3125,11 +3124,11 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
REG_RX_TO_RX, REG_STANDBY,
REG_SLEEP, REG_PMPD_ANAEN
};
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
REG_TXPAUSE, REG_BEACON_CTRL,
REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
};
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
@@ -3820,9 +3819,8 @@ void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
}
-#ifdef NEED_PS_TDMA
-static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
- u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
+void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
{
struct h2c_cmd h2c;
@@ -3835,7 +3833,6 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
h2c.b_type_dma.data5 = arg5;
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
}
-#endif
void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
{
@@ -3902,6 +3899,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
+ if (fops->needs_full_init)
+ macpower = false;
+
ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
@@ -4304,7 +4304,8 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
-void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
@@ -4324,7 +4325,7 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+ u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
u8 bw = 0;
@@ -4338,7 +4339,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
h2c.ramask.arg = 0x80;
- h2c.b_macid_cfg.data1 = 0;
+ h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
@@ -4478,6 +4479,35 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
+static u16
+rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
+{
+ u16 network_type = WIRELESS_MODE_UNKNOWN;
+
+ if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) {
+ if (sta->vht_cap.vht_supported)
+ network_type = WIRELESS_MODE_AC;
+ else if (sta->ht_cap.ht_supported)
+ network_type = WIRELESS_MODE_N_5G;
+
+ network_type |= WIRELESS_MODE_A;
+ } else {
+ if (sta->vht_cap.vht_supported)
+ network_type = WIRELESS_MODE_AC;
+ else if (sta->ht_cap.ht_supported)
+ network_type = WIRELESS_MODE_N_24G;
+
+ if (sta->supp_rates[0] <= 0xf)
+ network_type |= WIRELESS_MODE_B;
+ else if (sta->supp_rates[0] & 0xf)
+ network_type |= (WIRELESS_MODE_B | WIRELESS_MODE_G);
+ else
+ network_type |= WIRELESS_MODE_G;
+ }
+
+ return network_type;
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changed)
@@ -4520,7 +4550,10 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sgi = 1;
rcu_read_unlock();
- priv->fops->update_rate_mask(priv, ramask, sgi);
+ priv->vif = vif;
+ priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
+
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -5148,12 +5181,263 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
+/*
+ * The RTL8723BU/RTL8192EU vendor driver use coexistence table type
+ * 0-7 to represent writing different combinations of register values
+ * to REG_BT_COEX_TABLEs. It's for different kinds of coexistence use
+ * cases which Realtek doesn't provide detail for these settings. Keep
+ * this aligned with vendor driver for easier maintenance.
+ */
+static
+void rtl8723bu_set_coex_with_type(struct rtl8xxxu_priv *priv, u8 type)
+{
+ switch (type) {
+ case 0:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 1:
+ case 3:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 2:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 4:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaa5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 5:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x5a5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaa5a5a5a);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 6:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ case 7:
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0xaaaaaaaa);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
+{
+ struct rtl8xxxu_btcoex *btcoex = &priv->bt_coex;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+ btcoex->c2h_bt_inquiry = true;
+ else
+ btcoex->c2h_bt_inquiry = false;
+
+ if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_NON_CONNECTED_IDLE;
+ btcoex->has_sco = false;
+ btcoex->has_hid = false;
+ btcoex->has_pan = false;
+ btcoex->has_a2dp = false;
+ } else {
+ if ((bt_info & 0x1f) == BT_INFO_8723B_1ANT_B_CONNECTION)
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_CONNECTED_IDLE;
+ else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY))
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_SCO_BUSY;
+ else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY)
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_ACL_BUSY;
+ else
+ btcoex->bt_status = BT_8723B_1ANT_STATUS_MAX;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+ btcoex->has_pan = true;
+ else
+ btcoex->has_pan = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+ btcoex->has_a2dp = true;
+ else
+ btcoex->has_a2dp = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+ btcoex->has_hid = true;
+ else
+ btcoex->has_hid = false;
+
+ if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ btcoex->has_sco = true;
+ else
+ btcoex->has_sco = false;
+ }
+
+ if (!btcoex->has_a2dp && !btcoex->has_sco &&
+ !btcoex->has_pan && btcoex->has_hid)
+ btcoex->hid_only = true;
+ else
+ btcoex->hid_only = false;
+
+ if (!btcoex->has_sco && !btcoex->has_pan &&
+ !btcoex->has_hid && btcoex->has_a2dp)
+ btcoex->has_a2dp = true;
+ else
+ btcoex->has_a2dp = false;
+
+ if (btcoex->bt_status == BT_8723B_1ANT_STATUS_SCO_BUSY ||
+ btcoex->bt_status == BT_8723B_1ANT_STATUS_ACL_BUSY)
+ btcoex->bt_busy = true;
+ else
+ btcoex->bt_busy = false;
+}
+
+static
+void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_btcoex *btcoex;
+ bool wifi_connected;
+
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ wifi_connected = (vif && vif->bss_conf.assoc);
+
+ if (!wifi_connected) {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 0);
+ } else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35, 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if (btcoex->has_pan) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x3f, 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 7);
+ }
+}
+
+static
+void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_btcoex *btcoex;
+ bool wifi_connected;
+
+ vif = priv->vif;
+ btcoex = &priv->bt_coex;
+ wifi_connected = (vif && vif->bss_conf.assoc);
+
+ if (wifi_connected) {
+ u32 val32 = 0;
+ u32 high_prio_tx = 0, high_prio_rx = 0;
+
+ val32 = rtl8xxxu_read32(priv, 0x770);
+ high_prio_tx = val32 & 0x0000ffff;
+ high_prio_rx = (val32 & 0xffff0000) >> 16;
+
+ if (btcoex->bt_busy) {
+ if (btcoex->hid_only) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x20,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 5);
+ } else if (btcoex->a2dp_only) {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if ((btcoex->has_a2dp && btcoex->has_pan) ||
+ (btcoex->has_hid && btcoex->has_a2dp &&
+ btcoex->has_pan)) {
+ rtl8723bu_set_ps_tdma(priv, 0x51, 0x21,
+ 0x3, 0x10, 0x10);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ } else if (btcoex->has_hid && btcoex->has_a2dp) {
+ rtl8723bu_set_ps_tdma(priv, 0x51, 0x21,
+ 0x3, 0x10, 0x10);
+ rtl8723bu_set_coex_with_type(priv, 3);
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
+ rtl8723bu_set_coex_with_type(priv, 4);
+ }
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ if (high_prio_tx + high_prio_rx <= 60)
+ rtl8723bu_set_coex_with_type(priv, 2);
+ else
+ rtl8723bu_set_coex_with_type(priv, 7);
+ }
+ } else {
+ rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
+ rtl8723bu_set_coex_with_type(priv, 0);
+ }
+}
+
+static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv;
+ struct rtl8723bu_c2h *c2h;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ u8 bt_info = 0;
+ struct rtl8xxxu_btcoex *btcoex;
+
+ priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
+ btcoex = &priv->bt_coex;
+
+ if (priv->rf_paths > 1)
+ goto out;
+
+ while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ spin_lock_irqsave(&priv->c2hcmd_lock, flags);
+ skb = __skb_dequeue(&priv->c2hcmd_queue);
+ spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+
+ c2h = (struct rtl8723bu_c2h *)skb->data;
+
+ switch (c2h->id) {
+ case C2H_8723B_BT_INFO:
+ bt_info = c2h->bt_info.bt_info;
+
+ rtl8723bu_update_bt_link_info(priv, bt_info);
+ if (btcoex->c2h_bt_inquiry) {
+ rtl8723bu_handle_bt_inquiry(priv);
+ break;
+ }
+ rtl8723bu_handle_bt_info(priv);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
struct sk_buff *skb)
{
struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
struct device *dev = &priv->udev->dev;
int len;
+ unsigned long flags;
len = skb->len - 2;
@@ -5191,6 +5475,12 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
16, 1, c2h->raw.payload, len, false);
break;
}
+
+ spin_lock_irqsave(&priv->c2hcmd_lock, flags);
+ __skb_queue_tail(&priv->c2hcmd_queue, skb);
+ spin_unlock_irqrestore(&priv->c2hcmd_lock, flags);
+
+ schedule_work(&priv->c2hcmd_work);
}
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
@@ -5315,7 +5605,6 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "%s: C2H packet\n", __func__);
rtl8723bu_handle_c2h(priv, skb);
- dev_kfree_skb(skb);
return RX_TYPE_C2H;
}
@@ -5444,6 +5733,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
goto error;
}
@@ -5464,6 +5754,10 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ if (!priv->vif)
+ priv->vif = vif;
+ else
+ return -EOPNOTSUPP;
rtl8xxxu_stop_tx_beacon(priv);
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
@@ -5487,6 +5781,9 @@ static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
+
+ if (priv->vif)
+ priv->vif = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -5772,6 +6069,178 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static u8 rtl8xxxu_signal_to_snr(int signal)
+{
+ if (signal < RTL8XXXU_NOISE_FLOOR_MIN)
+ signal = RTL8XXXU_NOISE_FLOOR_MIN;
+ else if (signal > 0)
+ signal = 0;
+ return (u8)(signal - RTL8XXXU_NOISE_FLOOR_MIN);
+}
+
+static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
+ int signal, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ u16 wireless_mode;
+ u8 rssi_level, ratr_idx;
+ u8 txbw_40mhz;
+ u8 snr, snr_thresh_high, snr_thresh_low;
+ u8 go_up_gap = 5;
+
+ rssi_level = priv->rssi_level;
+ snr = rtl8xxxu_signal_to_snr(signal);
+ snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
+ snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
+ txbw_40mhz = (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) ? 1 : 0;
+
+ switch (rssi_level) {
+ case RTL8XXXU_RATR_STA_MID:
+ snr_thresh_high += go_up_gap;
+ break;
+ case RTL8XXXU_RATR_STA_LOW:
+ snr_thresh_high += go_up_gap;
+ snr_thresh_low += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (snr > snr_thresh_high)
+ rssi_level = RTL8XXXU_RATR_STA_HIGH;
+ else if (snr > snr_thresh_low)
+ rssi_level = RTL8XXXU_RATR_STA_MID;
+ else
+ rssi_level = RTL8XXXU_RATR_STA_LOW;
+
+ if (rssi_level != priv->rssi_level) {
+ int sgi = 0;
+ u32 rate_bitmap = 0;
+
+ rcu_read_lock();
+ rate_bitmap = (sta->supp_rates[0] & 0xfff) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12) |
+ (sta->ht_cap.mcs.rx_mask[1] << 20);
+ if (sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+ sgi = 1;
+ rcu_read_unlock();
+
+ wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
+ switch (wireless_mode) {
+ case WIRELESS_MODE_B:
+ ratr_idx = RATEID_IDX_B;
+ if (rate_bitmap & 0x0000000c)
+ rate_bitmap &= 0x0000000d;
+ else
+ rate_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_A:
+ case WIRELESS_MODE_G:
+ ratr_idx = RATEID_IDX_G;
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH)
+ rate_bitmap &= 0x00000f00;
+ else
+ rate_bitmap &= 0x00000ff0;
+ break;
+ case (WIRELESS_MODE_B | WIRELESS_MODE_G):
+ ratr_idx = RATEID_IDX_BG;
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH)
+ rate_bitmap &= 0x00000f00;
+ else if (rssi_level == RTL8XXXU_RATR_STA_MID)
+ rate_bitmap &= 0x00000ff0;
+ else
+ rate_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ case (WIRELESS_MODE_G | WIRELESS_MODE_N_24G):
+ case (WIRELESS_MODE_A | WIRELESS_MODE_N_5G):
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_GN_N2SS;
+ else
+ ratr_idx = RATEID_IDX_GN_N1SS;
+ break;
+ case (WIRELESS_MODE_B | WIRELESS_MODE_G | WIRELESS_MODE_N_24G):
+ case (WIRELESS_MODE_B | WIRELESS_MODE_N_24G):
+ if (txbw_40mhz) {
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_BGN_40M_2SS;
+ else
+ ratr_idx = RATEID_IDX_BGN_40M_1SS;
+ } else {
+ if (priv->tx_paths == 2 && priv->rx_paths == 2)
+ ratr_idx = RATEID_IDX_BGN_20M_2SS_BN;
+ else
+ ratr_idx = RATEID_IDX_BGN_20M_1SS_BN;
+ }
+
+ if (priv->tx_paths == 2 && priv->rx_paths == 2) {
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH) {
+ rate_bitmap &= 0x0f8f0000;
+ } else if (rssi_level == RTL8XXXU_RATR_STA_MID) {
+ rate_bitmap &= 0x0f8ff000;
+ } else {
+ if (txbw_40mhz)
+ rate_bitmap &= 0x0f8ff015;
+ else
+ rate_bitmap &= 0x0f8ff005;
+ }
+ } else {
+ if (rssi_level == RTL8XXXU_RATR_STA_HIGH) {
+ rate_bitmap &= 0x000f0000;
+ } else if (rssi_level == RTL8XXXU_RATR_STA_MID) {
+ rate_bitmap &= 0x000ff000;
+ } else {
+ if (txbw_40mhz)
+ rate_bitmap &= 0x000ff015;
+ else
+ rate_bitmap &= 0x000ff005;
+ }
+ }
+ break;
+ default:
+ ratr_idx = RATEID_IDX_BGN_40M_2SS;
+ rate_bitmap &= 0x0fffffff;
+ break;
+ }
+
+ priv->rssi_level = rssi_level;
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi);
+ }
+}
+
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
+ struct ieee80211_vif *vif;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ vif = priv->vif;
+
+ if (vif && vif->type == NL80211_IFTYPE_STATION) {
+ int signal;
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ struct device *dev = &priv->udev->dev;
+
+ dev_dbg(dev, "%s: no sta found\n", __func__);
+ rcu_read_unlock();
+ goto out;
+ }
+ rcu_read_unlock();
+
+ signal = ieee80211_ave_rssi(vif);
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta);
+ }
+
+out:
+ schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
+}
+
static int rtl8xxxu_start(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -5828,6 +6297,8 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
}
+
+ schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
exit:
/*
* Accept all data and mgmt frames
@@ -5879,6 +6350,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_delayed_work_sync(&priv->ra_watchdog);
+
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
}
@@ -6001,7 +6474,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
break;
case 0x7392:
- if (id->idProduct == 0x7811)
+ if (id->idProduct == 0x7811 || id->idProduct == 0xa611)
untested = 0;
break;
case 0x050d:
@@ -6051,6 +6524,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+ INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
+ spin_lock_init(&priv->c2hcmd_lock);
+ INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
+ skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -6205,6 +6682,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192eu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723bu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa611, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723bu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ac746c322554..c75192c4447f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1776,8 +1776,7 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
tid_data->agg.agg_state = RTL_AGG_START;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- return 0;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
}
int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 3ebc7c93b1e9..3c96c320236c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -1578,10 +1578,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
static int up, dn, m, n, wait_cnt;
- /* 0: no change, +1: increase WiFi duration,
- * -1: decrease WiFi duration
- */
- int result;
u8 retry_cnt = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -1669,7 +1665,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
m = 1;
n = 3;
- result = 0;
wait_cnt = 0;
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
@@ -1679,7 +1674,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
up, dn, m, n, wait_cnt);
- result = 0;
wait_cnt++;
/* no retry in the last 2-second duration */
if (retry_cnt == 0) {
@@ -1694,7 +1688,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
n = 3;
up = 0;
dn = 0;
- result = 1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex]Increase wifi duration!!\n");
}
@@ -1718,7 +1711,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- result = -1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Reduce wifi duration for retry<3\n");
}
@@ -1735,7 +1727,6 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- result = -1;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Decrease wifi duration for retryCounter>3!!\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 5f5739904edf..528e442f25a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -1424,17 +1424,11 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
* -1: decrease WiFi duration
*/
s32 result;
- u8 retry_count = 0, bt_info_ext;
- bool wifi_busy = false;
+ u8 retry_count = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], TdmaDurationAdjustForAcl()\n");
- if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY)
- wifi_busy = true;
- else
- wifi_busy = false;
-
if ((wifi_status ==
BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
(wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN) ||
@@ -1472,7 +1466,6 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
} else {
/* acquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- bt_info_ext = coex_sta->bt_info_ext;
if ((coex_sta->low_priority_tx) > 1050 ||
(coex_sta->low_priority_rx) > 1250)
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 264667203f6f..cef9f2a9303b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -915,7 +915,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct target_pkt;
u8 write_state = PG_STATE_HEADER;
- int continual = true, dataempty = true, result = true;
+ int continual = true, result = true;
u16 efuse_addr = 0;
u8 efuse_data;
u8 target_word_cnts = 0;
@@ -942,7 +942,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
if (write_state == PG_STATE_HEADER) {
- dataempty = true;
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER\n");
@@ -1179,13 +1178,12 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
int continual = true;
u16 efuse_addr = 0;
- u8 hoffset, hworden;
+ u8 hworden;
u8 efuse_data, word_cnts;
while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
(efuse_addr < EFUSE_MAX_SIZE)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index fff8dda14023..e5e1ec5a41dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -68,7 +68,6 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
bool actionallowed = false;
u16 rfwait_cnt = 0;
@@ -102,8 +101,6 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
}
}
- rtstate = ppsc->rfpwr_state;
-
switch (state_toset) {
case ERFON:
ppsc->rfoff_reason &= (~changesource);
@@ -161,8 +158,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
if (ppsc->inactive_pwrstate == ERFON &&
rtlhal->interface == INTF_PCI) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
- RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
- rtlhal->interface == INTF_PCI) {
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
rtlpriv->intf_ops->disable_aspm(hw);
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index c10432cd703e..8be31e0ad878 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -386,7 +386,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
struct wiphy *wiphy = hw->wiphy;
struct country_code_to_enum_rd *country = NULL;
- if (wiphy == NULL || &rtlpriv->regd == NULL)
+ if (!wiphy)
return -EINVAL;
/* init country_code from efuse channel plan */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 333e355c9281..dceb04a9b3f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -759,14 +759,8 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
}
/* Indicate Rx signal strength to FW. */
- if (rtlpriv->dm.useramask) {
- u8 h2c_parameter[3] = { 0 };
-
- h2c_parameter[2] = (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0x20;
- } else {
+ if (!rtlpriv->dm.useramask)
rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
- }
}
void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 96d8f25b120f..5ca900f97d66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -85,20 +85,19 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -112,13 +111,12 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl88e_phy_rf_serial_read(hw,
@@ -133,7 +131,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl88e_phy_rf_serial_write(hw, rfpath, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -166,9 +164,9 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
(newoffset << 23) | BLSSIREADEDGE;
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
- mdelay(1);
+ udelay(10);
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(2);
+ udelay(120);
if (rfpath == RF90_PATH_A)
rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
BIT(8));
@@ -649,7 +647,7 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
int i;
u32 *phy_reg_page;
u16 phy_reg_page_len;
- u32 v1 = 0, v2 = 0, v3 = 0;
+ u32 v1 = 0, v2 = 0;
phy_reg_page_len = RTL8188EEPHY_REG_ARRAY_PGLEN;
phy_reg_page = RTL8188EEPHY_REG_ARRAY_PG;
@@ -658,7 +656,6 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
for (i = 0; i < phy_reg_page_len; i = i + 3) {
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
if (v1 < 0xcdcdcdcd) {
if (phy_reg_page[i] == 0xfe)
@@ -689,13 +686,11 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
while (v2 != 0xDEAD &&
i < phy_reg_page_len - 5) {
i += 3;
v1 = phy_reg_page[i];
v2 = phy_reg_page[i+1];
- v3 = phy_reg_page[i+2];
}
}
}
@@ -769,7 +764,6 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
@@ -778,7 +772,6 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
process_path_a(hw, radioa_arraylen, radioa_array_table);
@@ -1940,9 +1933,9 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
struct rtl_phy *rtlphy = &rtlpriv->phy;
long result[4][8];
u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ bool b_patha_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[9] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1971,7 +1964,6 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
final_candidate = 0xff;
b_patha_ok = false;
- b_pathb_ok = false;
is12simular = false;
is23simular = false;
is13simular = false;
@@ -2014,27 +2006,20 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
rtlphy->reg_eb4 = reg_eb4;
rtlphy->reg_ebc = reg_ebc;
rtlphy->reg_e94 = reg_e94;
rtlphy->reg_e9c = reg_e9c;
b_patha_ok = true;
- b_pathb_ok = true;
} else {
rtlphy->reg_e94 = 0x100;
rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index f2908ee5f860..4bef237f488d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -1649,8 +1649,6 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
(rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- } else if (rtlpriv->btcoexist.bt_service == BT_PAN) {
- rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 0efd19aa4fe5..661249d618c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -1369,8 +1369,8 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[10] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1445,21 +1445,17 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 56cc3bc30860..f070f25bb735 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1540,6 +1540,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
* This is maybe necessary:
* rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
*/
+ dev_kfree_skb(skb);
+
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index c7f29a9be50d..146fe144f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1176,6 +1176,7 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
}
void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
@@ -1185,7 +1186,7 @@ void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- synchronize_irq(rtlpci->pdev->irq);
+ rtlpci->irq_enabled = false;
}
static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
@@ -1351,7 +1352,7 @@ void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
bcn_interval = mac->beacon_interval;
atim_window = 2;
- /*rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
@@ -1371,9 +1372,9 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
"beacon_interval:%d\n", bcn_interval);
- /* rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
- /* rtl92de_enable_interrupt(hw); */
+ rtl92de_enable_interrupt(hw);
}
void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 0ae6371b6318..4b672199c81d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -307,16 +307,15 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
regaddr, rfpath, bitmask, original_value);
@@ -329,14 +328,13 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
if (bitmask == 0)
return;
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92d_phy_rf_serial_read(hw,
@@ -347,7 +345,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
}
_rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
}
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 99e5cd9a5c86..1dbdddce0823 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -216,6 +216,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
.get_desc = rtl92de_get_desc,
+ .is_tx_desc_closed = rtl92de_is_tx_desc_closed,
.tx_polling = rtl92de_tx_polling,
.enable_hw_sec = rtl92de_enable_hw_security_config,
.set_key = rtl92de_set_key,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 2494e1f118f8..92c9fb45f800 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -804,13 +804,15 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
break;
}
} else {
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = GET_RX_DESC_OWN(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = GET_RX_DESC_PKT_LEN(p_desc);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ ret = GET_RX_DESC_BUFF_ADDR(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
@@ -821,6 +823,23 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
return ret;
}
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN);
+
+ /* a beacon packet will only use the first
+ * descriptor by defaut, and the own bit may not
+ * be cleared by the hardware
+ */
+ if (own)
+ return false;
+ return true;
+}
+
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 36820070fd76..635989e15282 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -715,6 +715,8 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
u64 rtl92de_get_desc(struct ieee80211_hw *hw,
u8 *p_desc, bool istx, u8 desc_name);
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 67305ce915ec..05462422d247 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -108,7 +108,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
- int err;
enum version_8192e version = rtlhal->version;
if (!rtlhal->pfirmware)
@@ -146,9 +145,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
_rtl92ee_write_fw(hw, version, pfwdata, fwsize);
_rtl92ee_enable_fw_download(hw, false);
- err = _rtl92ee_fw_free_to_go(hw);
-
- return 0;
+ return _rtl92ee_fw_free_to_go(hw);
}
static bool _rtl92ee_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 222abc41669c..6dba576aa81e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -84,19 +84,18 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n",
@@ -111,13 +110,12 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
addr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
@@ -127,7 +125,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl92ee_phy_rf_serial_write(hw, rfpath, addr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -160,9 +158,8 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
(newoffset << 23) | BLSSIREADEDGE;
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
- mdelay(1);
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(2);
+ udelay(20);
if (rfpath == RF90_PATH_A)
rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
BIT(8));
@@ -2801,8 +2798,8 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac;
- long reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ long reg_e94, reg_e9c, reg_ea4;
+ long reg_eb4, reg_ebc, reg_ec4;
bool is12simular, is13simular, is23simular;
u8 idx;
u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
@@ -2873,11 +2870,9 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
@@ -2886,13 +2881,11 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e9c = result[final_candidate][1];
rtlphy->reg_e9c = reg_e9c;
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
rtlphy->reg_eb4 = reg_eb4;
reg_ebc = result[final_candidate][5];
rtlphy->reg_ebc = reg_ebc;
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 27f1a631b569..dc7b515bdc85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -651,7 +651,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
u16 seq_number;
__le16 fc = hdr->frame_control;
- unsigned int buf_len;
u8 fw_qsel = _rtl92ee_map_hwqueue_to_fwqueue(skb, hw_queue);
bool firstseg = ((hdr->seq_ctrl &
cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
@@ -659,7 +658,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
dma_addr_t mapping;
u8 bw_40 = 0;
- u8 short_gi;
__le32 *pdesc = (__le32 *)pdesc8;
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -677,7 +675,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
skb_push(skb, EM_HDR_LEN);
memset(skb->data, 0, EM_HDR_LEN);
}
- buf_len = skb->len;
mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
@@ -724,11 +721,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- if (ptcb_desc->hw_rate > DESC_RATEMCS0)
- short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
- else
- short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
-
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(pdesc, 1);
set_tx_desc_max_agg_num(pdesc, 0x14);
@@ -1010,14 +1002,13 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
{
- u16 cur_tx_rp, cur_tx_wp;
+ u16 cur_tx_rp;
u32 tmpu32;
tmpu32 =
rtl_read_dword(rtlpriv,
get_desc_addr_fr_q_idx(hw_queue));
cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
- cur_tx_wp = (u16)(tmpu32 & 0x0fff);
/* don't need to update ring->cur_tx_wp */
ring->cur_tx_rp = cur_tx_rp;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
index bb6b60814762..f43331224851 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
@@ -24,192 +24,186 @@
#define TX_DESC_SIZE_RTL8192S (16 * 4)
#define TX_CMDDESC_SIZE_RTL8192S (16 * 4)
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
/* Dword 0 */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_AMSDU(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GREEN_FIELD(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
/* Dword 1 */
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
-#define SET_TX_DESC_MORE_DATA(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 5, 1, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 6, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 8, 5, __val)
-#define SET_TX_DESC_ACK_POLICY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 13, 2, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
-#define SET_TX_DESC_NON_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 16, 1, __val)
-#define SET_TX_DESC_KEY_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 17, 2, __val)
-#define SET_TX_DESC_OUI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 19, 1, __val)
-#define SET_TX_DESC_PKT_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 22, 2, __val)
-#define SET_TX_DESC_WDS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 26, 5, __val)
-#define SET_TX_DESC_HWPC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_non_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
/* Dword 2 */
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 0, 6, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 6, 1, __val)
-#define SET_TX_DESC_TSFL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 7, 5, __val)
-#define SET_TX_DESC_RTS_RETRY_COUNT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 12, 6, __val)
-#define SET_TX_DESC_DATA_RETRY_COUNT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 18, 6, __val)
-#define SET_TX_DESC_RSVD_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(((__pdesc) + 8), 24, 5, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 29, 1, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
-#define SET_TX_DESC_OWN_MAC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 31, 1, __val)
+static inline void set_tx_desc_rsvd_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(28, 24));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(29));
+}
/* Dword 3 */
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 16, 12, __val)
-#define SET_TX_DESC_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 28, 4, __val)
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
/* Dword 4 */
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 0, 6, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 6, 1, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 7, 4, __val)
-#define SET_TX_DESC_CTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 12, 1, __val)
-#define SET_TX_DESC_RA_BRSR_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 13, 3, __val)
-#define SET_TX_DESC_TXHT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 16, 1, __val)
-#define SET_TX_DESC_TX_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 17, 1, __val)
-#define SET_TX_DESC_TX_BANDWIDTH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 18, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 19, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 21, 2, __val)
-#define SET_TX_DESC_TX_REVERSE_DIRECTION(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 23, 1, __val)
-#define SET_TX_DESC_RTS_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 24, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 25, 1, __val)
-#define SET_TX_DESC_RTS_BANDWIDTH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 26, 1, __val)
-#define SET_TX_DESC_RTS_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 27, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 29, 2, __val)
-#define SET_TX_DESC_USER_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 31, 1, __val)
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_cts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_ra_brsr_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(15, 13));
+}
+
+static inline void set_tx_desc_txht(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(16));
+}
+
+static inline void set_tx_desc_tx_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(17));
+}
+
+static inline void set_tx_desc_tx_bandwidth(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(18));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(20, 19));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_bandwidth(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(28, 27));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(30, 29));
+}
+
+static inline void set_tx_desc_user_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(31));
+}
/* Dword 5 */
-#define SET_TX_DESC_PACKET_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 0, 9, __val)
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 9, 6, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 15, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 16, 5, __val)
-#define SET_TX_DESC_TX_AGC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 21, 11, __val)
-
-/* Dword 6 */
-#define SET_TX_DESC_IP_CHECK_SUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 0, 16, __val)
-#define SET_TX_DESC_TCP_CHECK_SUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 16, 16, __val)
+static inline void set_tx_desc_packet_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(8, 0));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(14, 9));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(20, 16));
+}
/* Dword 7 */
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 0, 16, __val)
-#define SET_TX_DESC_IP_HEADER_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 16, 8, __val)
-#define SET_TX_DESC_TCP_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 28, 31, 1, __val)
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
/* Dword 8 */
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 32, 0, 32, __val)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 32, 0, 32)
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 8)));
+}
/* Dword 9 */
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 36, 0, 32, __val)
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 9) = cpu_to_le32(__val);
+}
/* Because the PCI Tx descriptors are chaied at the
* initialization and all the NextDescAddresses in
@@ -226,208 +220,115 @@
#define RX_DRV_INFO_SIZE_UNIT 8
/* DWORD 0 */
-#define SET_RX_STATUS_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_STATUS_DESC_CRC32(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 14, 1, __val)
-#define SET_RX_STATUS_DESC_ICV(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 15, 1, __val)
-#define SET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 4, __val)
-#define SET_RX_STATUS_DESC_SECURITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 20, 3, __val)
-#define SET_RX_STATUS_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 23, 1, __val)
-#define SET_RX_STATUS_DESC_SHIFT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
-#define SET_RX_STATUS_DESC_PHY_STATUS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_RX_STATUS_DESC_SWDEC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_RX_STATUS_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_RX_STATUS_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_RX_STATUS_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_STATUS_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_STATUS_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_STATUS_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_STATUS_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_STATUS_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_STATUS_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_STATUS_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_STATUS_DESC_PHY_STATUS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_STATUS_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_STATUS_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_STATUS_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_STATUS_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_STATUS_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+static inline void set_rx_status_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_status_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_status_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_status_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline u32 get_rx_status_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline u32 get_rx_status_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline u32 get_rx_status_desc_drvinfo_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline u32 get_rx_status_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline u32 get_rx_status_desc_phy_status(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline u32 get_rx_status_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline u32 get_rx_status_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
/* DWORD 1 */
-#define SET_RX_STATUS_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
-#define SET_RX_STATUS_DESC_TID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 5, 4, __val)
-#define SET_RX_STATUS_DESC_PAGGR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 14, 1, __val)
-#define SET_RX_STATUS_DESC_FAGGR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
-#define SET_RX_STATUS_DESC_A1_FIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 16, 4, __val)
-#define SET_RX_STATUS_DESC_A2_FIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 20, 4, __val)
-#define SET_RX_STATUS_DESC_PAM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
-#define SET_RX_STATUS_DESC_PWR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
-#define SET_RX_STATUS_DESC_MOREDATA(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 26, 1, __val)
-#define SET_RX_STATUS_DESC_MOREFRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
-#define SET_RX_STATUS_DESC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 28, 2, __val)
-#define SET_RX_STATUS_DESC_MC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 30, 1, __val)
-#define SET_RX_STATUS_DESC_BC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 4, 31, 1, __val)
-
-#define GET_RX_STATUS_DEC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 0, 5)
-#define GET_RX_STATUS_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 5, 4)
-#define GET_RX_STATUS_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 14, 1)
-#define GET_RX_STATUS_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 15, 1)
-#define GET_RX_STATUS_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 16, 4)
-#define GET_RX_STATUS_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 20, 4)
-#define GET_RX_STATUS_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 24, 1)
-#define GET_RX_STATUS_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 25, 1)
-#define GET_RX_STATUS_DESC_MORE_DATA(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 26, 1)
-#define GET_RX_STATUS_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 27, 1)
-#define GET_RX_STATUS_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 28, 2)
-#define GET_RX_STATUS_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 30, 1)
-#define GET_RX_STATUS_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 4, 31, 1)
-
-/* DWORD 2 */
-#define SET_RX_STATUS_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 0, 12, __val)
-#define SET_RX_STATUS_DESC_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 12, 4, __val)
-#define SET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 16, 8, __val)
-#define SET_RX_STATUS_DESC_NEXT_IND(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
-
-#define GET_RX_STATUS_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 0, 12)
-#define GET_RX_STATUS_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 12, 4)
-#define GET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 16, 8)
-#define GET_RX_STATUS_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 8, 30, 1)
+static inline u32 get_rx_status_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_status_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
/* DWORD 3 */
-#define SET_RX_STATUS_DESC_RX_MCS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 0, 6, __val)
-#define SET_RX_STATUS_DESC_RX_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 6, 1, __val)
-#define SET_RX_STATUS_DESC_AMSDU(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 7, 1, __val)
-#define SET_RX_STATUS_DESC_SPLCP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 8, 1, __val)
-#define SET_RX_STATUS_DESC_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 9, 1, __val)
-#define SET_RX_STATUS_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 10, 1, __val)
-#define SET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 11, 1, __val)
-#define SET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 12, 1, __val)
-#define SET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 13, 1, __val)
-#define SET_RX_STATUS_DESC_HWPC_ERR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 14, 1, __val)
-#define SET_RX_STATUS_DESC_HWPC_IND(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 15, 1, __val)
-#define SET_RX_STATUS_DESC_IV0(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 12, 16, 16, __val)
-
-#define GET_RX_STATUS_DESC_RX_MCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 0, 6)
-#define GET_RX_STATUS_DESC_RX_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 6, 1)
-#define GET_RX_STATUS_DESC_AMSDU(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 7, 1)
-#define GET_RX_STATUS_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 8, 1)
-#define GET_RX_STATUS_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 9, 1)
-#define GET_RX_STATUS_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 10, 1)
-#define GET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 11, 1)
-#define GET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 12, 1)
-#define GET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 13, 1)
-#define GET_RX_STATUS_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 14, 1)
-#define GET_RX_STATUS_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 15, 1)
-#define GET_RX_STATUS_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 12, 16, 16)
-
-/* DWORD 4 */
-#define SET_RX_STATUS_DESC_IV1(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 16, 0, 32, __val)
-#define GET_RX_STATUS_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 16, 0, 32)
+static inline u32 get_rx_status_desc_rx_mcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_status_desc_rx_ht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_status_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_status_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
/* DWORD 5 */
-#define SET_RX_STATUS_DESC_TSFL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 20, 0, 32, __val)
-#define GET_RX_STATUS_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 20, 0, 32)
+static inline u32 get_rx_status_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 5)));
+}
/* DWORD 6 */
-#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
-#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
+static inline void set_rx_status__desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_status_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
- (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M || \
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
- GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
+ (get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE1M || \
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE2M || \
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE5_5M ||\
+ get_rx_status_desc_rx_mcs(_pdesc) == DESC_RATE11M)
enum rf_optype {
RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 541b7881735e..47a5b95ca2b9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -442,17 +442,20 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
memset((ph2c_buffer + totallen + tx_desclen), 0, len);
/* CMD len */
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 0, 16, pcmd_len[i]);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), pcmd_len[i],
+ GENMASK(15, 0));
/* CMD ID */
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 16, 8, pelement_id[i]);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), pelement_id[i],
+ GENMASK(23, 16));
/* CMD Sequence */
*cmd_start_seq = *cmd_start_seq % 0x80;
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
- 24, 7, *cmd_start_seq);
+ le32p_replace_bits((__le32 *)(ph2c_buffer + totallen +
+ tx_desclen), *cmd_start_seq,
+ GENMASK(30, 24));
++*cmd_start_seq;
/* Copy memory */
@@ -462,8 +465,9 @@ static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
/* CMD continue */
/* set the continue in prevoius cmd. */
if (i < cmd_num - 1)
- SET_BITS_TO_LE_4BYTE((ph2c_buffer + pre_continueoffset),
- 31, 1, 1);
+ le32p_replace_bits((__le32 *)(ph2c_buffer +
+ pre_continueoffset),
+ 1, BIT(31));
pre_continueoffset = totallen;
@@ -559,8 +563,8 @@ void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
pwrmode.flag_dps_en = 0;
pwrmode.bcn_rx_en = 0;
pwrmode.bcn_to = 0;
- SET_BITS_TO_LE_2BYTE((u8 *)(&pwrmode) + 8, 0, 16,
- mac->vif->bss_conf.beacon_int);
+ le16p_replace_bits((__le16 *)(((u8 *)(&pwrmode) + 8)),
+ mac->vif->bss_conf.beacon_int, GENMASK(15, 0));
pwrmode.app_itv = 0;
pwrmode.awake_bcn_itvl = ppsc->reg_max_lps_awakeintvl;
pwrmode.smart_ps = 1;
@@ -602,9 +606,10 @@ void rtl92s_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
joinbss_rpt.bssid[3] = mac->bssid[3];
joinbss_rpt.bssid[4] = mac->bssid[4];
joinbss_rpt.bssid[5] = mac->bssid[5];
- SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 8, 0, 16,
- mac->vif->bss_conf.beacon_int);
- SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 10, 0, 16, mac->assoc_id);
+ le16p_replace_bits((__le16 *)(((u8 *)(&joinbss_rpt) + 8)),
+ mac->vif->bss_conf.beacon_int, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)(((u8 *)(&joinbss_rpt) + 10)),
+ mac->assoc_id, GENMASK(15, 0));
_rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_JOINBSSRPT, (u8 *)&joinbss_rpt);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 6d6e8994460d..81313e0ca834 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1352,9 +1352,9 @@ static void _rtl92s_phy_set_rfhalt(struct ieee80211_hw *hw)
/* SW/HW radio off or halt adapter!! For example S3/S4 */
} else {
/* LED function disable. Power range is about 8mA now. */
- /* if write 0xF1 disconnet_pci power
+ /* if write 0xF1 disconnect_pci power
* ifconfig wlan0 down power are both high 35:70 */
- /* if write oxF9 disconnet_pci power
+ /* if write oxF9 disconnect_pci power
* ifconfig wlan0 down power are both low 12:45*/
rtl_write_byte(rtlpriv, 0x03, 0xF9);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index efb432c6d785..9eaa5348b556 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -33,7 +33,7 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
}
static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstats, u8 *pdesc,
+ struct rtl_stats *pstats, __le32 *pdesc,
struct rx_fwinfo *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
@@ -193,11 +193,10 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb, struct rtl_stats *pstats,
- u8 *pdesc, struct rx_fwinfo *p_drvinfo)
+ __le32 *pdesc, struct rx_fwinfo *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
@@ -232,29 +231,30 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
}
bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status, u8 *pdesc,
+ struct ieee80211_rx_status *rx_status, u8 *pdesc8,
struct sk_buff *skb)
{
struct rx_fwinfo *p_drvinfo;
- u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
+ __le32 *pdesc = (__le32 *)pdesc8;
+ u32 phystatus = (u32)get_rx_status_desc_phy_status(pdesc);
struct ieee80211_hdr *hdr;
- stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
- stats->rx_bufshift = (u8)(GET_RX_STATUS_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16)GET_RX_STATUS_DESC_ICV(pdesc);
- stats->crc = (u16)GET_RX_STATUS_DESC_CRC32(pdesc);
+ stats->length = (u16)get_rx_status_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_status_desc_drvinfo_size(pdesc) * 8;
+ stats->rx_bufshift = (u8)(get_rx_status_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_status_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_status_desc_crc32(pdesc);
stats->hwerror = (u16)(stats->crc | stats->icv);
- stats->decrypted = !GET_RX_STATUS_DESC_SWDEC(pdesc);
-
- stats->rate = (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc);
- stats->shortpreamble = (u16)GET_RX_STATUS_DESC_SPLCP(pdesc);
- stats->isampdu = (bool)(GET_RX_STATUS_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc);
+ stats->decrypted = !get_rx_status_desc_swdec(pdesc);
+
+ stats->rate = (u8)get_rx_status_desc_rx_mcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_status_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_status_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_status_desc_paggr(pdesc) == 1) &&
+ (get_rx_status_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_status_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_status_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_status_desc_rx_ht(pdesc);
stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc);
if (stats->hwerror)
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -320,7 +320,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
@@ -360,13 +360,13 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
if (rtlpriv->dm.useramask) {
/* set txdesc macId */
if (ptcb_desc->mac_id < 32) {
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
reserved_macid |= ptcb_desc->mac_id;
}
}
- SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
+ set_tx_desc_rsvd_macid(pdesc, reserved_macid);
- SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
+ set_tx_desc_txht(pdesc, ((ptcb_desc->hw_rate >=
DESC_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
@@ -378,31 +378,32 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
}
}
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
- SET_TX_DESC_TX_SHORT(pdesc, 0);
+ set_tx_desc_tx_short(pdesc, 0);
/* Aggregation related */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ set_tx_desc_agg_enable(pdesc, 1);
/* For AMPDU, we must insert SSN into TX_DESC */
- SET_TX_DESC_SEQ(pdesc, seq_number);
+ set_tx_desc_seq(pdesc, seq_number);
/* Protection mode related */
/* For 92S, if RTS/CTS are set, HW will execute RTS. */
/* We choose only one protection mode to execute */
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS_ENABLE(pdesc, ((ptcb_desc->cts_enable) ?
+ set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ?
+ 1 : 0));
+ set_tx_desc_cts_enable(pdesc, ((ptcb_desc->cts_enable) ?
1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bandwidth(pdesc, 0);
+ set_tx_desc_rts_sub_carrier(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <=
DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
@@ -411,27 +412,27 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
/* Set Bandwidth and sub-channel settings. */
if (bw_40) {
if (ptcb_desc->packet_bw) {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 1);
+ set_tx_desc_tx_bandwidth(pdesc, 1);
/* use duplicated mode */
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
} else {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_tx_bandwidth(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_tx_bandwidth(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
/* 3 Fill necessary field in First Descriptor */
/*DWORD 0*/
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_OFFSET(pdesc, 32);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_offset(pdesc, 32);
+ set_tx_desc_pkt_size(pdesc, (u16)skb->len);
/*DWORD 1*/
- SET_TX_DESC_RA_BRSR_ID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_ra_brsr_id(pdesc, ptcb_desc->ratr_index);
/* Fill security related */
if (info->control.hw_key) {
@@ -441,62 +442,63 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x2);
+ set_tx_desc_sec_type(pdesc, 0x2);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
/* Set Packet ID */
- SET_TX_DESC_PACKET_ID(pdesc, 0);
+ set_tx_desc_packet_id(pdesc, 0);
/* We will assign magement queue to BK. */
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
/* Alwasy enable all rate fallback range */
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
/* Fix: I don't kown why hw use 6.5M to tx when set it */
- SET_TX_DESC_USER_RATE(pdesc,
+ set_tx_desc_user_rate(pdesc,
ptcb_desc->use_driver_rate ? 1 : 0);
/* Set NON_QOS bit. */
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_NON_QOS(pdesc, 1);
+ set_tx_desc_non_qos(pdesc, 1);
}
/* Fill fields that are required to be initialized
* in all of the descriptors */
/*DWORD 0 */
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
/* DWORD 7 */
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
/* DOWRD 8 */
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
-void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
- bool firstseg, bool lastseg, struct sk_buff *skb)
+void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
+ bool firstseg, bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -512,53 +514,55 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
/* This bit indicate this packet is used for FW download. */
if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
/* For firmware downlaod we only need to set LINIP */
- SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);
+ set_tx_desc_linip(pdesc, tcb_desc->last_inipkt);
/* 92SE must set as 1 for firmware download HW DMA error */
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
/* 92SE need not to set TX packet size when firmware download */
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
} else { /* H2C Command Desc format (Host TXCMD) */
/* 92SE must set as 1 for firmware download HW DMA error */
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
/* Buffer size + command header */
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
/* Fixed queue of H2C command */
- SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);
-
- SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
+ set_tx_desc_queue_sel(pdesc, 0x13);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ le32p_replace_bits((__le32 *)skb->data, rtlhal->h2c_txcmd_seq,
+ GENMASK(30, 24));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
}
}
-void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
@@ -569,16 +573,16 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_STATUS_DESC_OWN(pdesc, 1);
+ set_rx_status_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_STATUS__DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_status__desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_STATUS_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_status_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_STATUS_DESC_EOR(pdesc, 1);
+ set_rx_status_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
@@ -589,17 +593,18 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
u64 rtl92se_get_desc(struct ieee80211_hw *hw,
- u8 *desc, bool istx, u8 desc_name)
+ u8 *desc8, bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *desc = (__le32 *)desc8;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(desc);
+ ret = get_tx_desc_own(desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
+ ret = get_tx_desc_tx_buffer_address(desc);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
@@ -609,13 +614,13 @@ u64 rtl92se_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_STATUS_DESC_OWN(desc);
+ ret = get_rx_status_desc_own(desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
+ ret = get_rx_status_desc_pkt_len(desc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
+ ret = get_rx_status_desc_buff_addr(desc);
break;
default:
WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 54a3aec1dfa7..772aecedf0b4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -37,13 +37,12 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value = 0, readback_value, bitshift;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
original_value = rtl8723_phy_rf_serial_read(hw,
@@ -53,7 +52,7 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -69,13 +68,12 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 original_value = 0, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
@@ -99,7 +97,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl8723e_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
}
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -485,15 +483,12 @@ bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
int i;
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- rtstatus = true;
-
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -1341,9 +1336,9 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate;
- bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ bool b_patha_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc,
+ reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[10] = {
ROFDM0_XARXIQIMBALANCE,
@@ -1372,7 +1367,6 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
final_candidate = 0xff;
b_patha_ok = false;
- b_pathb_ok = false;
is12simular = false;
is23simular = false;
is13simular = false;
@@ -1412,23 +1406,16 @@ void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
- reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
- b_pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index aa8a0950fcea..9528ac3f3b87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -33,19 +33,18 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -59,13 +58,12 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, path);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = rtl8723_phy_rf_serial_read(hw, path,
@@ -77,7 +75,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
rtl8723_phy_rf_serial_write(hw, path, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -2251,8 +2249,8 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
long result[4][8];
u8 i, final_candidate, idx;
bool b_patha_ok, b_pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4;
- long reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4;
+ long reg_tmp = 0;
bool is12simular, is13simular, is23simular;
u32 iqk_bb_reg[9] = {
ROFDM0_XARXIQIMBALANCE,
@@ -2334,11 +2332,9 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
@@ -2346,13 +2342,11 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
reg_e9c = result[final_candidate][1];
rtlphy->reg_e9c = reg_e9c;
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
reg_eb4 = result[final_candidate][4];
rtlphy->reg_eb4 = reg_eb4;
reg_ebc = result[final_candidate][5];
rtlphy->reg_ebc = reg_ebc;
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
b_patha_ok = true;
b_pathb_ok = true;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 18ce2856a91b..37036e653e56 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -223,7 +223,6 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
struct sk_buff *pskb = NULL;
- u8 own;
unsigned long flags;
ring = &rtlpci->tx_ring[BEACON_QUEUE];
@@ -233,9 +232,6 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true,
- HW_DESC_OWN);
-
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
__skb_queue_tail(&ring->queue, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index aae14c68bf69..debecc623a01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -89,12 +89,10 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
(newoffset << 23) | BLSSIREADEDGE;
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
- mdelay(1);
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(1);
rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong | BLSSIREADEDGE);
- mdelay(1);
+ udelay(120);
if (rfpath == RF90_PATH_A)
rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
BIT(8));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 979e434a4e73..b8a2b2326902 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -139,19 +139,18 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl8821ae_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -166,13 +165,12 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value =
@@ -183,7 +181,7 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
@@ -2076,7 +2074,6 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- bool rtstatus = true;
u32 *radioa_array_table_a, *radioa_array_table_b;
u16 radioa_arraylen_a, radioa_arraylen_b;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2088,7 +2085,6 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2111,7 +2107,6 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2121,7 +2116,6 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2351,7 +2345,7 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
short band_temp = -1, regulation = -1, bandwidth_temp = -1,
rate_section = -1, channel_temp = -1;
- u16 bd, regu, bdwidth, sec, chnl;
+ u16 regu, bdwidth, sec, chnl;
s8 power_limit = MAX_POWER_INDEX;
if (rtlefuse->eeprom_regulatory == 2)
@@ -2472,7 +2466,6 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
return MAX_POWER_INDEX;
}
- bd = band_temp;
regu = regulation;
bdwidth = bandwidth_temp;
sec = rate_section;
@@ -3553,8 +3546,6 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
if (36 <= channel && channel <= 64)
data = 0x114E9;
- else if (100 <= channel && channel <= 140)
- data = 0x110E9;
else
data = 0x110E9;
rtl8821ae_phy_set_rf_reg(hw, path, RF_APK,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 4b59f3b46b28..348b0072cdd6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1021,8 +1021,10 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->hw = hw;
rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32),
GFP_KERNEL);
- if (!rtlpriv->usb_data)
+ if (!rtlpriv->usb_data) {
+ ieee80211_free_hw(hw);
return -ENOMEM;
+ }
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
@@ -1083,6 +1085,7 @@ error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
+ kfree(rtlpriv->usb_data);
return -ENODEV;
}
EXPORT_SYMBOL(rtl_usb_probe);
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 77edee2df8b8..15e12155a04c 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -14,6 +14,7 @@ rtw88-y += main.o \
fw.o \
ps.o \
sec.o \
+ bf.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
new file mode 100644
index 000000000000..fda771d23f71
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation.
+ */
+
+#include "main.h"
+#include "reg.h"
+#include "bf.h"
+#include "debug.h"
+
+void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ if (bfee->role == RTW_BFEE_NONE)
+ return;
+
+ if (bfee->role == RTW_BFEE_MU)
+ bfinfo->bfer_mu_cnt--;
+ else if (bfee->role == RTW_BFEE_SU)
+ bfinfo->bfer_su_cnt--;
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, false);
+
+ bfee->role = RTW_BFEE_NONE;
+}
+
+void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_sta *sta;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_sta_vht_cap *ic_vht_cap;
+ const u8 *bssid = bss_conf->bssid;
+ u32 sound_dim;
+ u8 bfee_role = RTW_BFEE_NONE;
+ u8 i;
+
+ if (!(chip->band & RTW_BAND_5G))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, bssid);
+ if (!sta) {
+ rtw_warn(rtwdev, "failed to find station entry for bss %pM\n",
+ bssid);
+ goto out_unlock;
+ }
+
+ ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
+ vht_cap = &sta->vht_cap;
+
+ if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n");
+ goto out_unlock;
+ }
+
+ ether_addr_copy(bfee->mac_addr, bssid);
+ bfee_role = RTW_BFEE_MU;
+ bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
+ bfee->aid = bss_conf->aid;
+ bfinfo->bfer_mu_cnt++;
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ } else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n");
+ goto out_unlock;
+ }
+
+ sound_dim = vht_cap->cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+ ether_addr_copy(bfee->mac_addr, bssid);
+ bfee_role = RTW_BFEE_SU;
+ bfee->sound_dim = (u8)sound_dim;
+ bfee->g_id = 0;
+ bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
+ bfinfo->bfer_su_cnt++;
+ for (i = 0; i < chip->bfer_su_max_num; i++) {
+ if (!test_bit(i, bfinfo->bfer_su_reg_maping)) {
+ set_bit(i, bfinfo->bfer_su_reg_maping);
+ bfee->su_reg_index = i;
+ break;
+ }
+ }
+
+ chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ }
+
+out_unlock:
+ bfee->role = bfee_role;
+ rcu_read_unlock();
+}
+
+void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
+ struct mu_bfer_init_para *param)
+{
+ u16 mu_bf_ctl = 0;
+ u8 *addr = param->bfer_address;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, REG_ASSOCIATED_BFMER0_INFO + i, addr[i]);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 6, param->paid);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, param->csi_para);
+
+ mu_bf_ctl = rtw_read16(rtwdev, REG_WMAC_MU_BF_CTL) & 0xC000;
+ mu_bf_ctl |= param->my_aid | (param->csi_length_sel << 12);
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, mu_bf_ctl);
+}
+
+void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ enum rtw_trx_desc_rate rate)
+{
+ u32 psf_ctl = 0;
+ u8 csi_rsc = 0x1;
+
+ psf_ctl = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
+ BIT_WMAC_USE_NDPARATE |
+ (csi_rsc << 13);
+
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26);
+ rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL);
+ rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL);
+
+ if (vif->net_type == RTW_NET_AP_MODE)
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl | BIT(12));
+ else
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl & ~BIT(12));
+}
+
+void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param)
+{
+ u8 mu_tbl_sel;
+ u8 mu_valid;
+
+ mu_valid = rtw_read8(rtwdev, REG_MU_TX_CTL) &
+ ~BIT_MASK_R_MU_TABLE_VALID;
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL,
+ (mu_valid | BIT(0) | BIT(1)) & ~(BIT(7)));
+
+ mu_tbl_sel = rtw_read8(rtwdev, REG_MU_TX_CTL + 1) & 0xF8;
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel);
+ rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[0]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[0]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4,
+ param->given_user_pos[1]);
+
+ rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel | 1);
+ rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[1]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[2]);
+ rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4,
+ param->given_user_pos[3]);
+}
+
+void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0);
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0);
+ rtw_write8(rtwdev, REG_MU_TX_CTL, 0);
+}
+
+void rtw_bf_del_sounding(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, 0);
+}
+
+void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ u8 nc_index = 1;
+ u8 nr_index = bfee->sound_dim;
+ u8 grouping = 0, codebookinfo = 1, coefficientsize = 3;
+ u32 addr_bfer_info, addr_csi_rpt, csi_param;
+ u8 i;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "config as an su bfee\n");
+
+ switch (bfee->su_reg_index) {
+ case 1:
+ addr_bfer_info = REG_ASSOCIATED_BFMER1_INFO;
+ addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20 + 2;
+ break;
+ case 0:
+ default:
+ addr_bfer_info = REG_ASSOCIATED_BFMER0_INFO;
+ addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20;
+ break;
+ }
+
+ /* Sounding protocol control */
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+
+ /* MAC address/Partial AID of Beamformer */
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, addr_bfer_info + i, bfee->mac_addr[i]);
+
+ csi_param = (u16)((coefficientsize << 10) |
+ (codebookinfo << 8) |
+ (grouping << 6) |
+ (nr_index << 3) |
+ nc_index);
+ rtw_write16(rtwdev, addr_csi_rpt, csi_param);
+
+ /* ndp rx standby timer */
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME);
+}
+
+/* nc index: 1 2T2R 0 1T1R
+ * nr index: 1 use Nsts 0 use reg setting
+ * codebookinfo: 1 802.11ac 3 802.11n
+ */
+void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bf_info = &rtwdev->bf_info;
+ struct mu_bfer_init_para param;
+ u8 nc_index = 1, nr_index = 1;
+ u8 grouping = 0, codebookinfo = 1, coefficientsize = 0;
+ u32 csi_param;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "config as an mu bfee\n");
+
+ csi_param = (u16)((coefficientsize << 10) |
+ (codebookinfo << 8) |
+ (grouping << 6) |
+ (nr_index << 3) |
+ nc_index);
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "nc=%d nr=%d group=%d codebookinfo=%d coefficientsize=%d\n",
+ nc_index, nr_index, grouping, codebookinfo,
+ coefficientsize);
+
+ param.paid = bfee->p_aid;
+ param.csi_para = csi_param;
+ param.my_aid = bfee->aid & 0xfff;
+ param.csi_length_sel = HAL_CSI_SEG_4K;
+ ether_addr_copy(param.bfer_address, bfee->mac_addr);
+
+ rtw_bf_init_bfer_entry_mu(rtwdev, &param);
+
+ bf_info->cur_csi_rpt_rate = DESC_RATE6M;
+ rtw_bf_cfg_sounding(rtwdev, vif, DESC_RATE6M);
+
+ /* accept action_no_ack */
+ rtw_write16_set(rtwdev, REG_RXFLTMAP0, BIT_RXFLTMAP0_ACTIONNOACK);
+
+ /* accept NDPA and BF report poll */
+ rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF);
+}
+
+void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n");
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+
+ switch (bfee->su_reg_index) {
+ case 0:
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, 0);
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_ASSOCIATED_BFMER1_INFO, 0);
+ rtw_write16(rtwdev, REG_ASSOCIATED_BFMER1_INFO + 4, 0);
+ rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20 + 2, 0);
+ break;
+ }
+
+ clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping);
+ bfee->su_reg_index = 0xFF;
+}
+
+void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_bfee *bfee)
+{
+ struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
+
+ rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+
+ rtw_bf_del_bfer_entry_mu(rtwdev);
+
+ if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0)
+ rtw_bf_del_sounding(rtwdev);
+}
+
+void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_bfee *bfee = &rtwvif->bfee;
+ struct cfg_mumimo_para param;
+
+ if (bfee->role != RTW_BFEE_MU) {
+ rtw_dbg(rtwdev, RTW_DBG_BF, "this vif is not mu bfee\n");
+ return;
+ }
+
+ param.grouping_bitmap = 0;
+ param.mu_tx_en = 0;
+ memset(param.sounding_sts, 0, 6);
+ memcpy(param.given_gid_tab, conf->mu_group.membership, 8);
+ memcpy(param.given_user_pos, conf->mu_group.position, 16);
+ rtw_dbg(rtwdev, RTW_DBG_BF, "STA0: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n",
+ param.given_gid_tab[0], param.given_user_pos[0],
+ param.given_user_pos[1]);
+
+ rtw_dbg(rtwdev, RTW_DBG_BF, "STA1: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n",
+ param.given_gid_tab[1], param.given_user_pos[2],
+ param.given_user_pos[3]);
+
+ rtw_bf_cfg_mu_bfee(rtwdev, &param);
+}
+
+void rtw_bf_phy_init(struct rtw_dev *rtwdev)
+{
+ u8 tmp8;
+ u32 tmp32;
+ u8 retry_limit = 0xA;
+ u8 ndpa_rate = 0x10;
+ u8 ack_policy = 3;
+
+ tmp32 = rtw_read32(rtwdev, REG_MU_TX_CTL);
+ /* Enable P1 aggr new packet according to P0 transfer time */
+ tmp32 |= BIT_MU_P1_WAIT_STATE_EN;
+ /* MU Retry Limit */
+ tmp32 &= ~BIT_MASK_R_MU_RL;
+ tmp32 |= (retry_limit << BIT_SHIFT_R_MU_RL) & BIT_MASK_R_MU_RL;
+ /* Disable Tx MU-MIMO until sounding done */
+ tmp32 &= ~BIT_EN_MU_MIMO;
+ /* Clear validity of MU STAs */
+ tmp32 &= ~BIT_MASK_R_MU_TABLE_VALID;
+ rtw_write32(rtwdev, REG_MU_TX_CTL, tmp32);
+
+ /* MU-MIMO Option as default value */
+ tmp8 = ack_policy << BIT_SHIFT_WMAC_TXMU_ACKPOLICY;
+ tmp8 |= BIT_WMAC_TXMU_ACKPOLICY_EN;
+ rtw_write8(rtwdev, REG_WMAC_MU_BF_OPTION, tmp8);
+
+ /* MU-MIMO Control as default value */
+ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0);
+ /* Set MU NDPA rate & BW source */
+ rtw_write32_set(rtwdev, REG_TXBF_CTRL, BIT_USE_NDPA_PARAMETER);
+ /* Set NDPA Rate */
+ rtw_write8(rtwdev, REG_NDPA_OPT_CTRL, ndpa_rate);
+
+ rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE,
+ DESC_RATE6M);
+}
+
+void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate)
+{
+ u32 csi_cfg;
+ u16 cur_rrsr;
+
+ csi_cfg = rtw_read32(rtwdev, REG_BBPSF_CTRL) & ~BIT_MASK_CSI_RATE;
+ cur_rrsr = rtw_read16(rtwdev, REG_RRSR);
+
+ if (rssi >= 40) {
+ if (cur_rate != DESC_RATE54M) {
+ cur_rrsr |= BIT(DESC_RATE54M);
+ csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) <<
+ BIT_SHIFT_CSI_RATE;
+ rtw_write16(rtwdev, REG_RRSR, cur_rrsr);
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg);
+ }
+ *new_rate = DESC_RATE54M;
+ } else {
+ if (cur_rate != DESC_RATE24M) {
+ cur_rrsr &= ~BIT(DESC_RATE54M);
+ csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) <<
+ BIT_SHIFT_CSI_RATE;
+ rtw_write16(rtwdev, REG_RRSR, cur_rrsr);
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg);
+ }
+ *new_rate = DESC_RATE24M;
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h
new file mode 100644
index 000000000000..96a8216dd11f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/bf.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation.
+ */
+
+#ifndef __RTW_BF_H_
+#define __RTW_BF_H_
+
+#define REG_TXBF_CTRL 0x042C
+#define REG_RRSR 0x0440
+#define REG_NDPA_OPT_CTRL 0x045F
+
+#define REG_ASSOCIATED_BFMER0_INFO 0x06E4
+#define REG_ASSOCIATED_BFMER1_INFO 0x06EC
+#define REG_TX_CSI_RPT_PARAM_BW20 0x06F4
+#define REG_SND_PTCL_CTRL 0x0718
+#define REG_MU_TX_CTL 0x14C0
+#define REG_MU_STA_GID_VLD 0x14C4
+#define REG_MU_STA_USER_POS_INFO 0x14C8
+#define REG_CSI_RRSR 0x1678
+#define REG_WMAC_MU_BF_OPTION 0x167C
+#define REG_WMAC_MU_BF_CTL 0x1680
+
+#define BIT_WMAC_USE_NDPARATE BIT(30)
+#define BIT_WMAC_TXMU_ACKPOLICY_EN BIT(6)
+#define BIT_USE_NDPA_PARAMETER BIT(30)
+#define BIT_MU_P1_WAIT_STATE_EN BIT(16)
+#define BIT_EN_MU_MIMO BIT(7)
+
+#define R_MU_RL 0xf
+#define BIT_SHIFT_R_MU_RL 12
+#define BIT_SHIFT_WMAC_TXMU_ACKPOLICY 4
+#define BIT_SHIFT_CSI_RATE 24
+
+#define BIT_MASK_R_MU_RL (R_MU_RL << BIT_SHIFT_R_MU_RL)
+#define BIT_MASK_R_MU_TABLE_VALID 0x3f
+#define BIT_MASK_CSI_RATE_VAL 0x3F
+#define BIT_MASK_CSI_RATE (BIT_MASK_CSI_RATE_VAL << BIT_SHIFT_CSI_RATE)
+
+#define BIT_RXFLTMAP0_ACTIONNOACK BIT(14)
+#define BIT_RXFLTMAP1_BF (BIT(4) | BIT(5))
+#define BIT_RXFLTMAP1_BF_REPORT_POLL BIT(4)
+#define BIT_RXFLTMAP4_BF_REPORT_POLL BIT(4)
+
+#define RTW_NDP_RX_STANDBY_TIME 0x70
+#define RTW_SND_CTRL_REMOVE 0xD8
+#define RTW_SND_CTRL_SOUNDING 0xDB
+
+enum csi_seg_len {
+ HAL_CSI_SEG_4K = 0,
+ HAL_CSI_SEG_8K = 1,
+ HAL_CSI_SEG_11K = 2,
+};
+
+struct cfg_mumimo_para {
+ u8 sounding_sts[6];
+ u16 grouping_bitmap;
+ u8 mu_tx_en;
+ u32 given_gid_tab[2];
+ u32 given_user_pos[4];
+};
+
+struct mu_bfer_init_para {
+ u16 paid;
+ u16 csi_para;
+ u16 my_aid;
+ enum csi_seg_len csi_length_sel;
+ u8 bfer_address[ETH_ALEN];
+};
+
+void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
+ struct mu_bfer_init_para *param);
+void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ enum rtw_trx_desc_rate rate);
+void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param);
+void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev);
+void rtw_bf_del_sounding(struct rtw_dev *rtwdev);
+void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee);
+void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee);
+void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bfee *bfee);
+void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, struct rtw_bfee *bfee);
+void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+void rtw_bf_phy_init(struct rtw_dev *rtwdev);
+void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 793b40bdbf7c..4dfb2ec395ee 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -383,9 +383,9 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
u8 rssi_step;
u8 rssi;
- scan = rtw_flag_check(rtwdev, RTW_FLAG_SCANNING);
+ scan = test_bit(RTW_FLAG_SCANNING, rtwdev->flags);
coex_stat->wl_connected = !!rtwdev->sta_cnt;
- coex_stat->wl_gl_busy = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
if (stats->tx_throughput > stats->rx_throughput)
coex_stat->wl_tput_dir = COEX_WL_TPUT_TX;
@@ -810,8 +810,6 @@ static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable)
static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
u8 lps_val, u8 rpwm_val)
{
- struct rtw_lps_conf *lps_conf = &rtwdev->lps_conf;
- struct rtw_vif *rtwvif;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 lps_mode = 0x0;
@@ -823,18 +821,14 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
/* recover to original 32k low power setting */
coex_stat->wl_force_lps_ctrl = false;
- rtwvif = lps_conf->rtwvif;
- if (rtwvif && rtw_in_lps(rtwdev))
- rtw_leave_lps(rtwdev, rtwvif);
+ rtw_leave_lps(rtwdev);
break;
case COEX_PS_LPS_OFF:
coex_stat->wl_force_lps_ctrl = true;
if (lps_mode)
rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0);
- rtwvif = lps_conf->rtwvif;
- if (rtwvif && rtw_in_lps(rtwdev))
- rtw_leave_lps(rtwdev, rtwvif);
+ rtw_leave_lps(rtwdev);
break;
default:
break;
@@ -1308,6 +1302,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
bool wl_hi_pri = false;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 ||
coex_stat->wl_hi_pri_task2)
@@ -1318,14 +1313,16 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
if (wl_hi_pri) {
table_case = 15;
if (coex_stat->bt_a2dp_exist &&
- !coex_stat->bt_pan_exist)
+ !coex_stat->bt_pan_exist) {
+ slot_type = TDMA_4SLOT;
tdma_case = 11;
- else if (coex_stat->wl_hi_pri_task1)
+ } else if (coex_stat->wl_hi_pri_task1) {
tdma_case = 6;
- else if (!coex_stat->bt_page)
+ } else if (!coex_stat->bt_page) {
tdma_case = 8;
- else
+ } else {
tdma_case = 9;
+ }
} else if (coex_stat->wl_connected) {
table_case = 10;
tdma_case = 10;
@@ -1361,7 +1358,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
@@ -1475,13 +1472,13 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
+ slot_type = TDMA_4SLOT;
+
if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
table_case = 10;
else
table_case = 9;
- slot_type = TDMA_4SLOT;
-
if (coex_stat->wl_gl_busy)
tdma_case = 13;
else
@@ -1585,13 +1582,14 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
+ slot_type = TDMA_4SLOT;
+
if (coex_stat->bt_ble_exist)
table_case = 26;
else
table_case = 9;
if (coex_stat->wl_gl_busy) {
- slot_type = TDMA_4SLOT;
tdma_case = 13;
} else {
tdma_case = 14;
@@ -1794,10 +1792,12 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
+ u32 slot_type = 0;
if (efuse->share_ant) {
/* Shared-Ant */
if (coex_stat->bt_a2dp_exist) {
+ slot_type = TDMA_4SLOT;
table_case = 9;
tdma_case = 11;
} else {
@@ -1818,7 +1818,7 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, table_case);
- rtw_coex_tdma(rtwdev, false, tdma_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 6ad985e98e42..5a181e01ebef 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -498,12 +498,32 @@ static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate)
seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n);
}
+static void rtw_print_rate(struct seq_file *m, u8 rate)
+{
+ switch (rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ rtw_print_cck_rate_txt(m, rate);
+ break;
+ case DESC_RATE6M...DESC_RATE54M:
+ rtw_print_ofdm_rate_txt(m, rate);
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS15:
+ rtw_print_ht_rate_txt(m, rate);
+ break;
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ rtw_print_vht_rate_txt(m, rate);
+ break;
+ default:
+ seq_printf(m, " Unknown rate=0x%x\n", rate);
+ break;
+ }
+}
+
static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_hal *hal = &rtwdev->hal;
- void (*print_rate)(struct seq_file *, u8) = NULL;
u8 path, rate;
struct rtw_power_params pwr_param = {0};
u8 bw = hal->current_band_width;
@@ -528,30 +548,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
rate < DESC_RATEVHT1SS_MCS0)
continue;
- switch (rate) {
- case DESC_RATE1M...DESC_RATE11M:
- print_rate = rtw_print_cck_rate_txt;
- break;
- case DESC_RATE6M...DESC_RATE54M:
- print_rate = rtw_print_ofdm_rate_txt;
- break;
- case DESC_RATEMCS0...DESC_RATEMCS15:
- print_rate = rtw_print_ht_rate_txt;
- break;
- case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
- print_rate = rtw_print_vht_rate_txt;
- break;
- default:
- print_rate = NULL;
- break;
- }
-
rtw_get_tx_power_params(rtwdev, path, rate, bw,
ch, regd, &pwr_param);
seq_printf(m, "%4c ", path + 'A');
- if (print_rate)
- print_rate(m, rate);
+ rtw_print_rate(m, rate);
seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n",
hal->tx_pwr_tbl[path][rate],
hal->tx_pwr_tbl[path][rate],
@@ -567,6 +568,132 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
return 0;
}
+static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_pkt_count *last_cnt = &dm_info->last_pkt_count;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct ewma_evm *ewma_evm = dm_info->ewma_evm;
+ struct ewma_snr *ewma_snr = dm_info->ewma_snr;
+ u8 ss, rate_id;
+
+ seq_puts(m, "==========[Common Info]========\n");
+ seq_printf(m, "Is link = %c\n", rtw_is_assoc(rtwdev) ? 'Y' : 'N');
+ seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
+ seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
+ seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
+ seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+ stats->tx_throughput, stats->rx_throughput);
+
+ seq_puts(m, "==========[Tx Phy Info]========\n");
+ seq_puts(m, "[Tx Rate] = ");
+ rtw_print_rate(m, dm_info->tx_rate);
+ seq_printf(m, "(0x%x)\n\n", dm_info->tx_rate);
+
+ seq_puts(m, "==========[Rx Phy Info]========\n");
+ seq_printf(m, "[Rx Beacon Count] = %u\n", last_cnt->num_bcn_pkt);
+ seq_puts(m, "[Rx Rate] = ");
+ rtw_print_rate(m, dm_info->curr_rx_rate);
+ seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate);
+
+ seq_puts(m, "[Rx Rate Count]:\n");
+ seq_printf(m, " * CCK = {%u, %u, %u, %u}\n",
+ last_cnt->num_qry_pkt[DESC_RATE1M],
+ last_cnt->num_qry_pkt[DESC_RATE2M],
+ last_cnt->num_qry_pkt[DESC_RATE5_5M],
+ last_cnt->num_qry_pkt[DESC_RATE11M]);
+
+ seq_printf(m, " * OFDM = {%u, %u, %u, %u, %u, %u, %u, %u}\n",
+ last_cnt->num_qry_pkt[DESC_RATE6M],
+ last_cnt->num_qry_pkt[DESC_RATE9M],
+ last_cnt->num_qry_pkt[DESC_RATE12M],
+ last_cnt->num_qry_pkt[DESC_RATE18M],
+ last_cnt->num_qry_pkt[DESC_RATE24M],
+ last_cnt->num_qry_pkt[DESC_RATE36M],
+ last_cnt->num_qry_pkt[DESC_RATE48M],
+ last_cnt->num_qry_pkt[DESC_RATE54M]);
+
+ for (ss = 0; ss < efuse->hw_cap.nss; ss++) {
+ rate_id = DESC_RATEMCS0 + ss * 8;
+ seq_printf(m, " * HT_MCS[%u:%u] = {%u, %u, %u, %u, %u, %u, %u, %u}\n",
+ ss * 8, ss * 8 + 7,
+ last_cnt->num_qry_pkt[rate_id],
+ last_cnt->num_qry_pkt[rate_id + 1],
+ last_cnt->num_qry_pkt[rate_id + 2],
+ last_cnt->num_qry_pkt[rate_id + 3],
+ last_cnt->num_qry_pkt[rate_id + 4],
+ last_cnt->num_qry_pkt[rate_id + 5],
+ last_cnt->num_qry_pkt[rate_id + 6],
+ last_cnt->num_qry_pkt[rate_id + 7]);
+ }
+
+ for (ss = 0; ss < efuse->hw_cap.nss; ss++) {
+ rate_id = DESC_RATEVHT1SS_MCS0 + ss * 10;
+ seq_printf(m, " * VHT_MCS-%uss MCS[0:9] = {%u, %u, %u, %u, %u, %u, %u, %u, %u, %u}\n",
+ ss + 1,
+ last_cnt->num_qry_pkt[rate_id],
+ last_cnt->num_qry_pkt[rate_id + 1],
+ last_cnt->num_qry_pkt[rate_id + 2],
+ last_cnt->num_qry_pkt[rate_id + 3],
+ last_cnt->num_qry_pkt[rate_id + 4],
+ last_cnt->num_qry_pkt[rate_id + 5],
+ last_cnt->num_qry_pkt[rate_id + 6],
+ last_cnt->num_qry_pkt[rate_id + 7],
+ last_cnt->num_qry_pkt[rate_id + 8],
+ last_cnt->num_qry_pkt[rate_id + 9]);
+ }
+
+ seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n",
+ dm_info->rssi[RF_PATH_A] - 100,
+ dm_info->rssi[RF_PATH_B] - 100);
+ seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n",
+ dm_info->rx_evm_dbm[RF_PATH_A],
+ dm_info->rx_evm_dbm[RF_PATH_B]);
+ seq_printf(m, "[Rx SNR] = {%d, %d}\n",
+ dm_info->rx_snr[RF_PATH_A],
+ dm_info->rx_snr[RF_PATH_B]);
+ seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n",
+ dm_info->cfo_tail[RF_PATH_A],
+ dm_info->cfo_tail[RF_PATH_B]);
+
+ if (dm_info->curr_rx_rate >= DESC_RATE11M) {
+ seq_puts(m, "[Rx Average Status]:\n");
+ seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_OFDM_A]));
+ seq_printf(m, " * 1SS, EVM: {-%d}, SNR: {%d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_1SS]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_1SS_A]));
+ seq_printf(m, " * 2SS, EVM: {-%d, -%d}, SNR: {%d, %d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_A]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B]));
+ }
+
+ seq_puts(m, "[Rx Counter]:\n");
+ seq_printf(m, " * CCA (CCK, OFDM, Total) = (%u, %u, %u)\n",
+ dm_info->cck_cca_cnt,
+ dm_info->ofdm_cca_cnt,
+ dm_info->total_cca_cnt);
+ seq_printf(m, " * False Alarm (CCK, OFDM, Total) = (%u, %u, %u)\n",
+ dm_info->cck_fa_cnt,
+ dm_info->ofdm_fa_cnt,
+ dm_info->total_fa_cnt);
+ seq_printf(m, " * CCK cnt (ok, err) = (%u, %u)\n",
+ dm_info->cck_ok_cnt, dm_info->cck_err_cnt);
+ seq_printf(m, " * OFDM cnt (ok, err) = (%u, %u)\n",
+ dm_info->ofdm_ok_cnt, dm_info->ofdm_err_cnt);
+ seq_printf(m, " * HT cnt (ok, err) = (%u, %u)\n",
+ dm_info->ht_ok_cnt, dm_info->ht_err_cnt);
+ seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n",
+ dm_info->vht_ok_cnt, dm_info->vht_err_cnt);
+ return 0;
+}
+
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
@@ -653,6 +780,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
.cb_read = rtw_debugfs_get_rsvd_page,
};
+static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
+ .cb_read = rtw_debugfs_get_phy_info,
+};
+
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -682,6 +813,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_rw(rf_read);
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
+ rtw_debugfs_add_r(phy_info);
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 45851cbbd2ab..cd28f675e9cb 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -16,6 +16,8 @@ enum rtw_debug_mask {
RTW_DBG_RFK = 0x00000080,
RTW_DBG_REGD = 0x00000100,
RTW_DBG_DEBUGFS = 0x00000200,
+ RTW_DBG_PS = 0x00000400,
+ RTW_DBG_BF = 0x00000800,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index b082e2cc95f5..b8c581161f61 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -7,7 +7,9 @@
#include "fw.h"
#include "tx.h"
#include "reg.h"
+#include "sec.h"
#include "debug.h"
+#include "util.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -27,6 +29,100 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
}
}
+static u16 get_max_amsdu_len(u32 bit_rate)
+{
+ /* lower than ofdm, do not aggregate */
+ if (bit_rate < 550)
+ return 1;
+
+ /* lower than 20M 2ss mcs8, make it small */
+ if (bit_rate < 1800)
+ return 1200;
+
+ /* lower than 40M 2ss mcs9, make it medium */
+ if (bit_rate < 4000)
+ return 2600;
+
+ /* not yet 80M 2ss mcs8/9, make it twice regular packet size */
+ if (bit_rate < 7000)
+ return 3500;
+
+ /* unlimited */
+ return 0;
+}
+
+struct rtw_fw_iter_ra_data {
+ struct rtw_dev *rtwdev;
+ u8 *payload;
+};
+
+static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_fw_iter_ra_data *ra_data = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ u8 mac_id, rate, sgi, bw;
+ u8 mcs, nss;
+ u32 bit_rate;
+
+ mac_id = GET_RA_REPORT_MACID(ra_data->payload);
+ if (si->mac_id != mac_id)
+ return;
+
+ si->ra_report.txrate.flags = 0;
+
+ rate = GET_RA_REPORT_RATE(ra_data->payload);
+ sgi = GET_RA_REPORT_SGI(ra_data->payload);
+ bw = GET_RA_REPORT_BW(ra_data->payload);
+
+ if (rate < DESC_RATEMCS0) {
+ si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
+ goto legacy;
+ }
+
+ rtw_desc_to_mcsrate(rate, &mcs, &nss);
+ if (rate >= DESC_RATEVHT1SS_MCS0)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+ else if (rate >= DESC_RATEMCS0)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ if (rate >= DESC_RATEMCS0) {
+ si->ra_report.txrate.mcs = mcs;
+ si->ra_report.txrate.nss = nss;
+ }
+
+ if (sgi)
+ si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (bw == RTW_CHANNEL_WIDTH_80)
+ si->ra_report.txrate.bw = RATE_INFO_BW_80;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ si->ra_report.txrate.bw = RATE_INFO_BW_40;
+ else
+ si->ra_report.txrate.bw = RATE_INFO_BW_20;
+
+legacy:
+ bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
+
+ si->ra_report.desc_rate = rate;
+ si->ra_report.bit_rate = bit_rate;
+
+ sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
+}
+
+static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_fw_iter_ra_data ra_data;
+
+ if (WARN(length < 7, "invalid ra report c2h length\n"))
+ return;
+
+ rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
+ ra_data.rtwdev = rtwdev;
+ ra_data.payload = payload;
+ rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -49,6 +145,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
+ case C2H_RA_RPT:
+ rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
+ break;
default:
break;
}
@@ -397,6 +496,24 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
return location;
}
+void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 loc_pg, loc_dpk;
+
+ loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
+ loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
+
+ LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
+ LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
+ LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -442,6 +559,58 @@ rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return skb_new;
}
+static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
+ struct rtw_lps_pg_dpk_hdr *dpk_hdr;
+ struct sk_buff *skb;
+ u32 size;
+
+ size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+ dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
+ dpk_hdr->dpk_ch = dpk_info->dpk_ch;
+ dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
+ memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
+ memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
+ memcpy(dpk_hdr->coef, dpk_info->coef, 160);
+
+ return skb;
+}
+
+static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ struct rtw_lps_pg_info_hdr *pg_info_hdr;
+ struct sk_buff *skb;
+ u32 size;
+
+ size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+ pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
+ pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
+ pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ pg_info_hdr->sec_cam_count =
+ rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
+
+ conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
+
+ return skb;
+}
+
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum rtw_rsvd_packet_type type)
@@ -464,6 +633,12 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
case RSVD_QOS_NULL:
skb_new = ieee80211_nullfunc_get(hw, vif, true);
break;
+ case RSVD_LPS_PG_DPK:
+ skb_new = rtw_lps_pg_dpk_get(hw);
+ break;
+ case RSVD_LPS_PG_INFO:
+ skb_new = rtw_lps_pg_info_get(hw, vif);
+ break;
default:
return NULL;
}
@@ -498,9 +673,6 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
{
struct sk_buff *skb = rsvd_pkt->skb;
- if (rsvd_pkt->add_txdesc)
- rtw_fill_rsvd_page_desc(rtwdev, skb);
-
if (page >= 1)
memcpy(buf + page_margin + page_size * (page - 1),
skb->data, skb->len);
@@ -625,16 +797,37 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
if (!iter) {
- rtw_err(rtwdev, "fail to build rsvd packet\n");
+ rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
}
+
+ /* Fill the tx_desc for the rsvd pkt that requires one.
+ * And iter->len will be added with size of tx_desc_sz.
+ */
+ if (rsvd_pkt->add_txdesc)
+ rtw_fill_rsvd_page_desc(rtwdev, iter);
+
rsvd_pkt->skb = iter;
rsvd_pkt->page = total_page;
- if (rsvd_pkt->add_txdesc)
+
+ /* Reserved page is downloaded via TX path, and TX path will
+ * generate a tx_desc at the header to describe length of
+ * the buffer. If we are not counting page numbers with the
+ * size of tx_desc added at the first rsvd_pkt (usually a
+ * beacon, firmware default refer to the first page as the
+ * content of beacon), we could generate a buffer which size
+ * is smaller than the actual size of the whole rsvd_page
+ */
+ if (total_page == 0) {
+ if (rsvd_pkt->type != RSVD_BEACON) {
+ rtw_err(rtwdev, "first page should be a beacon\n");
+ goto release_skb;
+ }
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
page_size);
- else
+ } else {
total_page += rtw_len_to_page(iter->len, page_size);
+ }
}
if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
@@ -647,13 +840,24 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
if (!buf)
goto release_skb;
+ /* Copy the content of each rsvd_pkt to the buf, and they should
+ * be aligned to the pages.
+ *
+ * Note that the first rsvd_pkt is a beacon no matter what vif->type.
+ * And that rsvd_pkt does not require tx_desc because when it goes
+ * through TX path, the TX path will generate one for it.
+ */
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
- page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
- }
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ if (page == 0)
+ page += rtw_len_to_page(rsvd_pkt->skb->len +
+ tx_desc_sz, page_size);
+ else
+ page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
+
kfree_skb(rsvd_pkt->skb);
+ }
return buf;
@@ -706,6 +910,11 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
goto free;
}
+ /* The last thing is to download the *ONLY* beacon again, because
+ * the previous tx_desc is to describe the total rsvd page. Download
+ * the beacon again to replace the TX desc header, and we will get
+ * a correct tx_desc for the beacon in the rsvd page.
+ */
ret = rtw_download_beacon(rtwdev, vif);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index e95d85bd097f..73d1b9ca8efc 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -11,22 +11,6 @@
/* FW bin information */
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
-#define FW_HDR_VERSION 4
-#define FW_HDR_SUBVERSION 6
-#define FW_HDR_SUBINDEX 7
-#define FW_HDR_MONTH 16
-#define FW_HDR_DATE 17
-#define FW_HDR_HOUR 18
-#define FW_HDR_MIN 19
-#define FW_HDR_YEAR 20
-#define FW_HDR_MEM_USAGE 24
-#define FW_HDR_H2C_FMT_VER 28
-#define FW_HDR_DMEM_ADDR 32
-#define FW_HDR_DMEM_SIZE 36
-#define FW_HDR_IMEM_SIZE 48
-#define FW_HDR_EMEM_SIZE 52
-#define FW_HDR_EMEM_ADDR 56
-#define FW_HDR_IMEM_ADDR 60
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
@@ -36,6 +20,7 @@
enum rtw_c2h_cmd_id {
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
+ C2H_RA_RPT = 0x0c,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
C2H_HW_FEATURE_DUMP = 0xfd,
@@ -58,6 +43,8 @@ enum rtw_rsvd_packet_type {
RSVD_PROBE_RESP,
RSVD_NULL,
RSVD_QOS_NULL,
+ RSVD_LPS_PG_DPK,
+ RSVD_LPS_PG_INFO,
};
enum rtw_fw_rf_type {
@@ -86,6 +73,25 @@ struct rtw_iqk_para {
u8 segment_iqk;
};
+struct rtw_lps_pg_dpk_hdr {
+ u16 dpk_path_ok;
+ u8 dpk_txagc[2];
+ u16 dpk_gs[2];
+ u32 coef[2][20];
+ u8 dpk_ch;
+} __packed;
+
+struct rtw_lps_pg_info_hdr {
+ u8 macid;
+ u8 mbssid;
+ u8 pattern_count;
+ u8 mu_tab_group_id;
+ u8 sec_cam_count;
+ u8 tx_bu_page_count;
+ u16 rsvd;
+ u8 sec_cam[MAX_PG_CAM_BACKUP_NUM];
+} __packed;
+
struct rtw_rsvd_page {
struct list_head list;
struct sk_buff *skb;
@@ -94,10 +100,44 @@ struct rtw_rsvd_page {
bool add_txdesc;
};
+struct rtw_fw_hdr {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version; /* 0x04 */
+ u8 subversion;
+ u8 subindex;
+ __le32 rsvd; /* 0x08 */
+ __le32 rsvd2; /* 0x0C */
+ u8 month; /* 0x10 */
+ u8 day;
+ u8 hour;
+ u8 min;
+ __le16 year; /* 0x14 */
+ __le16 rsvd3;
+ u8 mem_usage; /* 0x18 */
+ u8 rsvd4[3];
+ __le16 h2c_fmt_ver; /* 0x1C */
+ __le16 rsvd5;
+ __le32 dmem_addr; /* 0x20 */
+ __le32 dmem_size;
+ __le32 rsvd6;
+ __le32 rsvd7;
+ __le32 imem_size; /* 0x30 */
+ __le32 emem_size;
+ __le32 emem_addr;
+ __le32 imem_addr;
+} __packed;
+
/* C2H */
#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f)
+#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7)
+#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6])
+#define GET_RA_REPORT_MACID(c2h_payload) (c2h_payload[1])
+
/* PKT H2C */
#define H2C_PKT_CMD_ID 0xFF
#define H2C_PKT_CATEGORY 0x01
@@ -146,6 +186,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
#define H2C_CMD_SET_PWR_MODE 0x20
+#define H2C_CMD_LPS_PG_INFO 0x2b
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
@@ -177,6 +218,12 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5))
#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define LPS_PG_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define LPS_PG_DPK_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
@@ -270,6 +317,7 @@ void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_set_pg_info(struct rtw_dev *rtwdev);
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw);
void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index aba329c9d0cf..3d91aea942c3 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -13,6 +13,8 @@ struct rtw_hci_ops {
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
+ void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
+ void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -47,6 +49,16 @@ static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
rtwdev->hci.ops->stop(rtwdev);
}
+static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ rtwdev->hci.ops->deep_ps(rtwdev, enter);
+}
+
+static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ rtwdev->hci.ops->link_ps(rtwdev, enter);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index b61b073031e5..507970387b2a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -47,7 +47,7 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
value8 = value8 & ~BIT_CHECK_CCK_EN;
- if (channel > 35)
+ if (IS_CH_5G_BAND(channel))
value8 |= BIT_CHECK_CCK_EN;
rtw_write8(rtwdev, REG_CCK_CHECK, value8);
}
@@ -261,7 +261,7 @@ static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
- rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
@@ -312,15 +312,16 @@ void rtw_mac_power_off(struct rtw_dev *rtwdev)
static bool check_firmware_size(const u8 *data, u32 size)
{
+ const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
u32 dmem_size;
u32 imem_size;
u32 emem_size;
u32 real_size;
- dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
- imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
- emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
- le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size = le32_to_cpu(fw_hdr->dmem_size);
+ imem_size = le32_to_cpu(fw_hdr->imem_size);
+ emem_size = (fw_hdr->mem_usage & BIT(4)) ?
+ le32_to_cpu(fw_hdr->emem_size) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
@@ -566,27 +567,10 @@ download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
return 0;
}
-static void update_firmware_info(struct rtw_dev *rtwdev,
- struct rtw_fw_state *fw)
-{
- const u8 *data = fw->firmware->data;
-
- fw->h2c_version =
- le16_to_cpu(*((__le16 *)(data + FW_HDR_H2C_FMT_VER)));
- fw->version =
- le16_to_cpu(*((__le16 *)(data + FW_HDR_VERSION)));
- fw->sub_version = *(data + FW_HDR_SUBVERSION);
- fw->sub_index = *(data + FW_HDR_SUBINDEX);
-
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw h2c version: %x\n", fw->h2c_version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw version: %x\n", fw->version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub version: %x\n", fw->sub_version);
- rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub index: %x\n", fw->sub_index);
-}
-
static int
start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
{
+ const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
const u8 *cur_fw;
u16 val;
u32 imem_size;
@@ -595,10 +579,10 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
u32 addr;
int ret;
- dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
- imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
- emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
- le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size = le32_to_cpu(fw_hdr->dmem_size);
+ imem_size = le32_to_cpu(fw_hdr->imem_size);
+ emem_size = (fw_hdr->mem_usage & BIT(4)) ?
+ le32_to_cpu(fw_hdr->emem_size) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
@@ -608,14 +592,14 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
cur_fw = data + FW_HDR_SIZE;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->dmem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
if (ret)
return ret;
cur_fw = data + FW_HDR_SIZE + dmem_size;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->imem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
if (ret)
@@ -623,7 +607,7 @@ start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
if (emem_size) {
cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
- addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_ADDR)));
+ addr = le32_to_cpu(fw_hdr->emem_addr);
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
emem_size);
@@ -699,15 +683,13 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
if (ret)
goto dlfw_fail;
- update_firmware_info(rtwdev, fw);
-
/* reset desc and index */
rtw_hci_setup(rtwdev);
rtwdev->h2c.last_box_num = 0;
rtwdev->h2c.seq = 0;
- rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
+ set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
return 0;
@@ -719,6 +701,93 @@ dlfw_fail:
return ret;
}
+static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
+{
+ struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ u32 prio_queues = 0;
+
+ if (queues & BIT(IEEE80211_AC_VO))
+ prio_queues |= BIT(rqpn->dma_map_vo);
+ if (queues & BIT(IEEE80211_AC_VI))
+ prio_queues |= BIT(rqpn->dma_map_vi);
+ if (queues & BIT(IEEE80211_AC_BE))
+ prio_queues |= BIT(rqpn->dma_map_be);
+ if (queues & BIT(IEEE80211_AC_BK))
+ prio_queues |= BIT(rqpn->dma_map_bk);
+
+ return prio_queues;
+}
+
+static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
+ u32 prio_queue, bool drop)
+{
+ u32 addr;
+ u16 avail_page, rsvd_page;
+ int i;
+
+ switch (prio_queue) {
+ case RTW_DMA_MAPPING_EXTRA:
+ addr = REG_FIFOPAGE_INFO_4;
+ break;
+ case RTW_DMA_MAPPING_LOW:
+ addr = REG_FIFOPAGE_INFO_2;
+ break;
+ case RTW_DMA_MAPPING_NORMAL:
+ addr = REG_FIFOPAGE_INFO_3;
+ break;
+ case RTW_DMA_MAPPING_HIGH:
+ addr = REG_FIFOPAGE_INFO_1;
+ break;
+ default:
+ return;
+ }
+
+ /* check if all of the reserved pages are available for 100 msecs */
+ for (i = 0; i < 5; i++) {
+ rsvd_page = rtw_read16(rtwdev, addr);
+ avail_page = rtw_read16(rtwdev, addr + 2);
+ if (rsvd_page == avail_page)
+ return;
+
+ msleep(20);
+ }
+
+ /* priority queue is still not empty, throw a warning,
+ *
+ * Note that if we want to flush the tx queue when having a lot of
+ * traffic (ex, 100Mbps up), some of the packets could be dropped.
+ * And it requires like ~2secs to flush the full priority queue.
+ */
+ if (!drop)
+ rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+}
+
+static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
+ u32 prio_queues, bool drop)
+{
+ u32 q;
+
+ for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
+ if (prio_queues & BIT(q))
+ __rtw_mac_flush_prio_queue(rtwdev, q, drop);
+}
+
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
+{
+ u32 prio_queues = 0;
+
+ /* If all of the hardware queues are requested to flush,
+ * or the priority queues are not mapped yet,
+ * flush all of the priority queues
+ */
+ if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
+ prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
+ else
+ prio_queues = get_priority_queues(rtwdev, queues);
+
+ rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
+}
+
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -743,6 +812,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
return -EINVAL;
}
+ rtwdev->fifo.rqpn = rqpn;
txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index efe6f731f240..592dc830160c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -31,5 +31,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
+
+static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
+{
+ rtw_mac_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, drop);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index e5e3605bb693..34a1c3b53cd4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -10,6 +10,7 @@
#include "coex.h"
#include "ps.h"
#include "reg.h"
+#include "bf.h"
#include "debug.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
@@ -17,19 +18,30 @@ static void rtw_ops_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_tx_pkt_info pkt_info = {0};
- if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
- goto out;
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
- rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
- if (rtw_hci_tx(rtwdev, &pkt_info, skb))
- goto out;
+ rtw_tx(rtwdev, control, skb);
+}
- return;
+static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv;
-out:
- ieee80211_free_txskb(hw, skb);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ return;
+
+ spin_lock_bh(&rtwdev->txq_lock);
+ if (list_empty(&rtwtxq->list))
+ list_add_tail(&rtwtxq->list, &rtwdev->txqs);
+ spin_unlock_bh(&rtwdev->txq_lock);
+
+ tasklet_schedule(&rtwdev->tx_tasklet);
}
static int rtw_ops_start(struct ieee80211_hw *hw)
@@ -60,6 +72,8 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (hw->conf.flags & IEEE80211_CONF_IDLE) {
rtw_enter_ips(rtwdev);
@@ -72,6 +86,15 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ rtwdev->ps_enabled = true;
+ } else {
+ rtwdev->ps_enabled = false;
+ rtw_leave_lps(rtwdev);
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
@@ -135,10 +158,14 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
rtwvif->in_lps = false;
+ memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
+ rtw_txq_init(rtwdev, vif->txq);
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
@@ -181,6 +208,10 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
+ rtw_txq_cleanup(rtwdev, vif->txq);
+
eth_zero_addr(rtwvif->mac_addr);
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = RTW_NET_NO_LINK;
@@ -204,6 +235,8 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
rtwdev->hal.rcr |= BIT_AM | BIT_AB;
@@ -238,6 +271,54 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+/* Only have one group of EDCA parameters now */
+static const u32 ac_to_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+};
+
+static u8 rtw_aifsn_to_aifs(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif, u8 aifsn)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u8 slot_time;
+ u8 sifs;
+
+ slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+ sifs = rtwdev->hal.current_band_type == RTW_BAND_5G ? 16 : 10;
+
+ return aifsn * slot_time + sifs;
+}
+
+static void __rtw_conf_tx(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif, u16 ac)
+{
+ struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
+ u32 edca_param = ac_to_edca_param[ac];
+ u8 ecw_max, ecw_min;
+ u8 aifs;
+
+ /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */
+ ecw_max = ilog2(params->cw_max + 1);
+ ecw_min = ilog2(params->cw_min + 1);
+ aifs = rtw_aifsn_to_aifs(rtwdev, rtwvif, params->aifs);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_TXOP_LMT, params->txop);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMAX, ecw_max);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMIN, ecw_min);
+ rtw_write32_mask(rtwdev, edca_param, BIT_MASK_AIFS, aifs);
+}
+
+static void rtw_conf_tx(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ u16 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ __rtw_conf_tx(rtwdev, rtwvif, ac);
+}
+
static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
@@ -249,6 +330,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (changed & BSS_CHANGED_ASSOC) {
struct rtw_chip_info *chip = rtwdev->chip;
enum rtw_net_type net_type;
@@ -262,13 +345,19 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
+ if (rtw_bf_support)
+ rtw_bf_assoc(rtwdev, vif, conf);
} else {
+ rtw_leave_lps(rtwdev);
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
rtw_reset_rsvd_page(rtwdev);
+ rtw_bf_disassoc(rtwdev, vif, conf);
}
rtwvif->net_type = net_type;
@@ -284,11 +373,39 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON)
rtw_fw_download_rsvd_page(rtwdev, vif);
+ if (changed & BSS_CHANGED_MU_GROUPS) {
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->set_gid_table(rtwdev, vif, conf);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT)
+ rtw_conf_tx(rtwdev, rtwvif);
+
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ rtw_leave_lps_deep(rtwdev);
+
+ rtwvif->tx_params[ac] = *params;
+ __rtw_conf_tx(rtwdev, rtwvif, ac);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
@@ -311,6 +428,7 @@ static int rtw_ops_sta_add(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
int ret = 0;
mutex_lock(&rtwdev->mutex);
@@ -325,6 +443,8 @@ static int rtw_ops_sta_add(struct ieee80211_hw *hw,
si->vif = vif;
si->init_ra_lv = 1;
ewma_rssi_init(&si->avg_rssi);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_init(rtwdev, sta->txq[i]);
rtw_update_sta_info(rtwdev, si);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@@ -345,12 +465,18 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
mutex_lock(&rtwdev->mutex);
rtw_release_macid(rtwdev, si->mac_id);
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_cleanup(rtwdev, sta->txq[i]);
+
+ kfree(si->mask);
+
rtwdev->sta_cnt--;
rtw_info(rtwdev, "sta %pM with macid %d left\n",
@@ -397,6 +523,8 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
hw_key_idx = rtw_sec_get_free_cam(sec);
} else {
@@ -418,10 +546,15 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
hw_key_type, hw_key_idx);
break;
case DISABLE_KEY:
+ rtw_mac_flush_all_queues(rtwdev, false);
rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
break;
}
+ /* download new cam settings for PG to backup */
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+
out:
mutex_unlock(&rtwdev->mutex);
@@ -434,17 +567,21 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
{
struct ieee80211_sta *sta = params->sta;
u16 tid = params->tid;
+ struct ieee80211_txq *txq = sta->txq[tid];
+ struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv;
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ clear_bit(RTW_TXQ_AMPDU, &rtwtxq->flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ set_bit(RTW_TXQ_AMPDU, &rtwtxq->flags);
+ break;
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
break;
@@ -464,18 +601,18 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_leave_lps(rtwdev, rtwvif);
-
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps(rtwdev);
+
ether_addr_copy(rtwvif->mac_addr, mac_addr);
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
- rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
- rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
+ set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
+ set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
mutex_unlock(&rtwdev->mutex);
}
@@ -489,8 +626,8 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
- rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
+ clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
ether_addr_copy(rtwvif->mac_addr, vif->addr);
config |= PORT_SET_MAC_ADDR;
@@ -508,12 +645,99 @@ static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtwdev->rts_threshold = value;
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static void rtw_ops_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ sinfo->txrate = si->ra_report.txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static void rtw_ops_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_leave_lps_deep(rtwdev);
+
+ rtw_mac_flush_queues(rtwdev, queues, drop);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+struct rtw_iter_bitrate_mask_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_vif *vif;
+ const struct cfg80211_bitrate_mask *mask;
+};
+
+static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_iter_bitrate_mask_data *br_data = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ if (si->vif != br_data->vif)
+ return;
+
+ /* free previous mask setting */
+ kfree(si->mask);
+ si->mask = kmemdup(br_data->mask, sizeof(struct cfg80211_bitrate_mask),
+ GFP_ATOMIC);
+ if (!si->mask) {
+ si->use_cfg_mask = false;
+ return;
+ }
+
+ si->use_cfg_mask = true;
+ rtw_update_sta_info(br_data->rtwdev, si);
+}
+
+static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw_iter_bitrate_mask_data br_data;
+
+ br_data.rtwdev = rtwdev;
+ br_data.vif = vif;
+ br_data.mask = mask;
+ rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
+}
+
+static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_ra_mask_info_update(rtwdev, vif, mask);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
+ .wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
.stop = rtw_ops_stop,
.config = rtw_ops_config,
@@ -521,6 +745,7 @@ const struct ieee80211_ops rtw_ops = {
.remove_interface = rtw_ops_remove_interface,
.configure_filter = rtw_ops_configure_filter,
.bss_info_changed = rtw_ops_bss_info_changed,
+ .conf_tx = rtw_ops_conf_tx,
.sta_add = rtw_ops_sta_add,
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
@@ -528,5 +753,9 @@ const struct ieee80211_ops rtw_ops = {
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
+ .set_rts_threshold = rtw_ops_set_rts_threshold,
+ .sta_statistics = rtw_ops_sta_statistics,
+ .flush = rtw_ops_flush,
+ .set_bitrate_mask = rtw_ops_set_bitrate_mask,
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6dd457741b15..ae61415e1665 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -12,16 +12,22 @@
#include "phy.h"
#include "reg.h"
#include "efuse.h"
+#include "tx.h"
#include "debug.h"
+#include "bf.h"
-static bool rtw_fw_support_lps;
+unsigned int rtw_fw_lps_deep_mode;
+EXPORT_SYMBOL(rtw_fw_lps_deep_mode);
+bool rtw_bf_support = true;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
-module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
+module_param_named(lps_deep_mode, rtw_fw_lps_deep_mode, uint, 0644);
+module_param_named(support_bf, rtw_bf_support, bool, 0644);
module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
-MODULE_PARM_DESC(support_lps, "Set Y to enable Leisure Power Save support, to turn radio off between beacons");
+MODULE_PARM_DESC(lps_deep_mode, "Deeper PS mode. If 0, deep PS is disabled");
+MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support");
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static struct ieee80211_channel rtw_channeltable_2g[] = {
@@ -84,6 +90,18 @@ static struct ieee80211_rate rtw_ratetable[] = {
{.bitrate = 540, .hw_value = 0x0b,},
};
+u16 rtw_desc_to_bitrate(u8 desc_rate)
+{
+ struct ieee80211_rate rate;
+
+ if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n"))
+ return 0;
+
+ rate = rtw_ratetable[desc_rate];
+
+ return rate.bitrate;
+}
+
static struct ieee80211_supported_band rtw_band_2ghz = {
.band = NL80211_BAND_2GHZ,
@@ -112,29 +130,40 @@ static struct ieee80211_supported_band rtw_band_5ghz = {
};
struct rtw_watch_dog_iter_data {
+ struct rtw_dev *rtwdev;
struct rtw_vif *rtwvif;
- bool active;
- u8 assoc_cnt;
};
+static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_bf_info *bf_info = &rtwdev->bf_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 fix_rate_enable = 0;
+ u8 new_csi_rate_idx;
+
+ if (rtwvif->bfee.role != RTW_BFEE_SU &&
+ rtwvif->bfee.role != RTW_BFEE_MU)
+ return;
+
+ chip->ops->cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
+ bf_info->cur_csi_rpt_rate,
+ fix_rate_enable, &new_csi_rate_idx);
+
+ if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate)
+ bf_info->cur_csi_rpt_rate = new_csi_rate_idx;
+}
+
static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rtw_watch_dog_iter_data *iter_data = data;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
- if (vif->type == NL80211_IFTYPE_STATION) {
- if (vif->bss_conf.assoc) {
- iter_data->assoc_cnt++;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->bss_conf.assoc)
iter_data->rtwvif = rtwvif;
- }
- if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD ||
- rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
- iter_data->active = true;
- } else {
- /* only STATION mode can enter lps */
- iter_data->active = true;
- }
+
+ rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif);
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
@@ -149,46 +178,74 @@ static void rtw_watch_dog_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
watch_dog_work.work);
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
struct rtw_watch_dog_iter_data data = {};
- bool busy_traffic = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ bool ps_active;
- if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
- return;
+ mutex_lock(&rtwdev->mutex);
+
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ goto unlock;
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100)
- rtw_flag_set(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
else
- rtw_flag_clear(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
- if (busy_traffic != rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC))
+ if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev);
+ if (stats->tx_cnt > RTW_LPS_THRESHOLD ||
+ stats->rx_cnt > RTW_LPS_THRESHOLD)
+ ps_active = true;
+ else
+ ps_active = false;
+
+ ewma_tp_add(&stats->tx_ewma_tp,
+ (u32)(stats->tx_unicast >> RTW_TP_SHIFT));
+ ewma_tp_add(&stats->rx_ewma_tp,
+ (u32)(stats->rx_unicast >> RTW_TP_SHIFT));
+ stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
+ stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
+
/* reset tx/rx statictics */
- rtwdev->stats.tx_unicast = 0;
- rtwdev->stats.rx_unicast = 0;
- rtwdev->stats.tx_cnt = 0;
- rtwdev->stats.rx_cnt = 0;
+ stats->tx_unicast = 0;
+ stats->rx_unicast = 0;
+ stats->tx_cnt = 0;
+ stats->rx_cnt = 0;
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ goto unlock;
+
+ /* make sure BB/RF is working for dynamic mech */
+ rtw_leave_lps(rtwdev);
+
+ rtw_phy_dynamic_mechanism(rtwdev);
+ data.rtwdev = rtwdev;
/* use atomic version to avoid taking local->iflist_mtx mutex */
rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
/* fw supports only one station associated to enter lps, if there are
* more than two stations associated to the AP, then we can not enter
* lps, because fw does not handle the overlapped beacon interval
+ *
+ * mac80211 should iterate vifs and determine if driver can enter
+ * ps by passing IEEE80211_CONF_PS to us, all we need to do is to
+ * get that vif and check if device is having traffic more than the
+ * threshold.
*/
- if (rtw_fw_support_lps &&
- data.rtwvif && !data.active && data.assoc_cnt == 1)
- rtw_enter_lps(rtwdev, data.rtwvif);
-
- if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
- return;
-
- rtw_phy_dynamic_mechanism(rtwdev);
+ if (rtwdev->ps_enabled && data.rtwvif && !ps_active)
+ rtw_enter_lps(rtwdev, data.rtwvif->port);
rtwdev->watch_dog_cnt++;
+
+unlock:
+ mutex_unlock(&rtwdev->mutex);
}
static void rtw_c2h_work(struct work_struct *work)
@@ -203,6 +260,40 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+struct rtw_txq_ba_iter_data {
+};
+
+static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int ret;
+ u8 tid;
+
+ tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS);
+ while (tid != IEEE80211_NUM_TIDS) {
+ clear_bit(tid, si->tid_ba);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+ if (ret == -EINVAL) {
+ struct ieee80211_txq *txq;
+ struct rtw_txq *rtwtxq;
+
+ txq = sta->txq[tid];
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags);
+ }
+
+ tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS);
+ }
+}
+
+static void rtw_txq_ba_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work);
+ struct rtw_txq_ba_iter_data data;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
@@ -311,7 +402,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
if (hal->current_band_type == RTW_BAND_5G) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
} else {
- if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G);
else
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN);
@@ -529,12 +620,71 @@ static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
+static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
+ struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable,
+ u8 wireless_set)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct cfg80211_bitrate_mask *mask = si->mask;
+ u64 cfg_mask = GENMASK_ULL(63, 0);
+ u8 rssi_level, band;
+
+ if (wireless_set != WIRELESS_CCK) {
+ rssi_level = si->rssi_level;
+ if (rssi_level == 0)
+ ra_mask &= 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ ra_mask &= 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ ra_mask &= 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ ra_mask &= 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ ra_mask &= 0xffffffffffff8f80ULL;
+ else if (rssi_level >= 5)
+ ra_mask &= 0xffffffffffff0f00ULL;
+ }
+
+ if (!si->use_cfg_mask)
+ return ra_mask;
+
+ band = hal->current_band_type;
+ if (band == RTW_BAND_2G) {
+ band = NL80211_BAND_2GHZ;
+ cfg_mask = mask->control[band].legacy;
+ } else if (band == RTW_BAND_5G) {
+ band = NL80211_BAND_5GHZ;
+ cfg_mask = u64_encode_bits(mask->control[band].legacy,
+ RA_MASK_OFDM_RATES);
+ }
+
+ if (!is_vht_enable) {
+ if (ra_mask & RA_MASK_HT_RATES_1SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
+ RA_MASK_HT_RATES_1SS);
+ if (ra_mask & RA_MASK_HT_RATES_2SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
+ RA_MASK_HT_RATES_2SS);
+ } else {
+ if (ra_mask & RA_MASK_VHT_RATES_1SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
+ RA_MASK_VHT_RATES_1SS);
+ if (ra_mask & RA_MASK_VHT_RATES_2SS)
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
+ RA_MASK_VHT_RATES_2SS);
+ }
+
+ ra_mask &= cfg_mask;
+
+ return ra_mask;
+}
+
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
struct ieee80211_sta *sta = si->sta;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_hal *hal = &rtwdev->hal;
- u8 rssi_level;
u8 wireless_set;
u8 bw_mode;
u8 rate_id;
@@ -627,21 +777,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
+ wireless_set);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
@@ -737,7 +874,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
- rtw_flag_set(rtwdev, RTW_FLAG_RUNNING);
+ set_bit(RTW_FLAG_RUNNING, rtwdev->flags);
return 0;
}
@@ -752,8 +889,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
{
struct rtw_coex *coex = &rtwdev->coex;
- rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
- rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
+ clear_bit(RTW_FLAG_RUNNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
@@ -814,6 +951,12 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
0;
+
+ vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ vht_cap->cap |= (rtwdev->hal.bfee_sts_cap <<
+ IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
@@ -879,12 +1022,25 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
struct rtw_dev *rtwdev = context;
struct rtw_fw_state *fw = &rtwdev->fw;
+ const struct rtw_fw_hdr *fw_hdr;
- if (!firmware)
+ if (!firmware || !firmware->data) {
rtw_err(rtwdev, "failed to request firmware\n");
+ complete_all(&fw->completion);
+ return;
+ }
+
+ fw_hdr = (const struct rtw_fw_hdr *)firmware->data;
+ fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
+ fw->version = le16_to_cpu(fw_hdr->version);
+ fw->sub_version = fw_hdr->subversion;
+ fw->sub_index = fw_hdr->subindex;
fw->firmware = firmware;
complete_all(&fw->completion);
+
+ rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n",
+ fw->version, fw->sub_version, fw->sub_index, fw->h2c_version);
}
static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
@@ -914,6 +1070,7 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtwdev->hci.rpwm_addr = 0x03d9;
+ rtwdev->hci.cpwm_addr = 0x03da;
break;
default:
rtw_err(rtwdev, "unsupported hci type\n");
@@ -948,6 +1105,8 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
/* default use ack */
rtwdev->hal.rcr |= BIT_VHT_DACK;
+ hal->bfee_sts_cap = 3;
+
return ret;
}
@@ -1020,7 +1179,8 @@ static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
- if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
+ if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE ||
+ efuse->hw_cap.nss > rtwdev->hal.rf_path_num)
efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
rtw_dbg(rtwdev, RTW_DBG_EFUSE,
@@ -1047,19 +1207,19 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
/* power on mac to read efuse */
ret = rtw_chip_efuse_enable(rtwdev);
if (ret)
- goto out;
+ goto out_unlock;
ret = rtw_parse_efuse_map(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
ret = rtw_dump_hw_feature(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
ret = rtw_check_supported_rfe(rtwdev);
if (ret)
- goto out;
+ goto out_disable;
if (efuse->crystal_cap == 0xff)
efuse->crystal_cap = 0;
@@ -1086,9 +1246,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+out_disable:
rtw_chip_efuse_disable(rtwdev);
-out:
+out_unlock:
mutex_unlock(&rtwdev->mutex);
return ret;
}
@@ -1141,22 +1302,41 @@ err_out:
}
EXPORT_SYMBOL(rtw_chip_info_setup);
+static void rtw_stats_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ int i;
+
+ ewma_tp_init(&stats->tx_ewma_tp);
+ ewma_tp_init(&stats->rx_ewma_tp);
+
+ for (i = 0; i < RTW_EVM_NUM; i++)
+ ewma_evm_init(&dm_info->ewma_evm[i]);
+ for (i = 0; i < RTW_SNR_NUM; i++)
+ ewma_snr_init(&dm_info->ewma_snr[i]);
+}
+
int rtw_core_init(struct rtw_dev *rtwdev)
{
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
int ret;
INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
+ INIT_LIST_HEAD(&rtwdev->txqs);
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
+ tasklet_init(&rtwdev->tx_tasklet, rtw_tx_tasklet,
+ (unsigned long)rtwdev);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
- INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work);
INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
@@ -1164,6 +1344,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
+ spin_lock_init(&rtwdev->txq_lock);
spin_lock_init(&rtwdev->tx_report.q_lock);
mutex_init(&rtwdev->mutex);
@@ -1175,11 +1356,17 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
+ if (!(BIT(rtw_fw_lps_deep_mode) & chip->lps_deep_mode_supported))
+ rtwdev->lps_conf.deep_mode = LPS_DEEP_MODE_NONE;
+ else
+ rtwdev->lps_conf.deep_mode = rtw_fw_lps_deep_mode;
mutex_lock(&rtwdev->mutex);
rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
mutex_unlock(&rtwdev->mutex);
+ rtw_stats_init(rtwdev);
+
/* default rx filter setting */
rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
@@ -1204,6 +1391,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
if (fw->firmware)
release_firmware(fw->firmware);
+ tasklet_kill(&rtwdev->tx_tasklet);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
@@ -1229,6 +1417,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->extra_tx_headroom = max_tx_headroom;
hw->queues = IEEE80211_NUM_ACS;
+ hw->txq_data_size = sizeof(struct rtw_txq);
hw->sta_data_size = sizeof(struct rtw_sta_info);
hw->vif_data_size = sizeof(struct rtw_vif);
@@ -1241,6 +1430,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, TX_AMSDU);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1252,6 +1443,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
@@ -1268,6 +1461,9 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_debugfs_init(rtwdev);
+ rtwdev->bf_info.bfer_mu_cnt = 0;
+ rtwdev->bf_info.bfer_su_cnt = 0;
+
return 0;
}
EXPORT_SYMBOL(rtw_register_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index bede3f38516e..d012eefcd0da 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -11,11 +11,13 @@
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/interrupt.h>
#include "util.h"
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
+#define MAX_PG_CAM_BACKUP_NUM 8
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
@@ -27,6 +29,10 @@
#define RTW_RF_PATH_MAX 4
#define HW_FEATURE_LEN 13
+#define RTW_TP_SHIFT 18 /* bytes/2s --> Mbps */
+
+extern bool rtw_bf_support;
+extern unsigned int rtw_fw_lps_deep_mode;
extern unsigned int rtw_debug_mask;
extern const struct ieee80211_ops rtw_ops;
extern struct rtw_chip_info rtw8822b_hw_spec;
@@ -50,10 +56,24 @@ struct rtw_hci {
enum rtw_hci_type type;
u32 rpwm_addr;
+ u32 cpwm_addr;
u8 bulkout_num;
};
+#define IS_CH_5G_BAND_1(channel) ((channel) >= 36 && (channel <= 48))
+#define IS_CH_5G_BAND_2(channel) ((channel) >= 52 && (channel <= 64))
+#define IS_CH_5G_BAND_3(channel) ((channel) >= 100 && (channel <= 144))
+#define IS_CH_5G_BAND_4(channel) ((channel) >= 149 && (channel <= 177))
+
+#define IS_CH_5G_BAND_MID(channel) \
+ (IS_CH_5G_BAND_2(channel) || IS_CH_5G_BAND_3(channel))
+
+#define IS_CH_2G_BAND(channel) ((channel) <= 14)
+#define IS_CH_5G_BAND(channel) \
+ (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel) || \
+ IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel))
+
enum rtw_supported_band {
RTW_BAND_2G = 1 << 0,
RTW_BAND_5G = 1 << 1,
@@ -303,18 +323,50 @@ enum rtw_regulatory_domains {
RTW_REGD_MAX
};
+enum rtw_txq_flags {
+ RTW_TXQ_AMPDU,
+ RTW_TXQ_BLOCK_BA,
+};
+
enum rtw_flags {
RTW_FLAG_RUNNING,
RTW_FLAG_FW_RUNNING,
RTW_FLAG_SCANNING,
RTW_FLAG_INACTIVE_PS,
RTW_FLAG_LEISURE_PS,
+ RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
NUM_OF_RTW_FLAGS,
};
+enum rtw_evm {
+ RTW_EVM_OFDM = 0,
+ RTW_EVM_1SS,
+ RTW_EVM_2SS_A,
+ RTW_EVM_2SS_B,
+ /* keep it last */
+ RTW_EVM_NUM
+};
+
+enum rtw_snr {
+ RTW_SNR_OFDM_A = 0,
+ RTW_SNR_OFDM_B,
+ RTW_SNR_OFDM_C,
+ RTW_SNR_OFDM_D,
+ RTW_SNR_1SS_A,
+ RTW_SNR_1SS_B,
+ RTW_SNR_1SS_C,
+ RTW_SNR_1SS_D,
+ RTW_SNR_2SS_A,
+ RTW_SNR_2SS_B,
+ RTW_SNR_2SS_C,
+ RTW_SNR_2SS_D,
+ /* keep it last */
+ RTW_SNR_NUM
+};
+
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
@@ -480,6 +532,7 @@ struct rtw_tx_pkt_info {
bool fs;
bool short_gi;
bool report;
+ bool rts;
};
struct rtw_rx_pkt_stat {
@@ -502,10 +555,16 @@ struct rtw_rx_pkt_stat {
s8 rx_power[RTW_RF_PATH_MAX];
u8 rssi;
u8 rxsc;
+ s8 rx_snr[RTW_RF_PATH_MAX];
+ u8 rx_evm[RTW_RF_PATH_MAX];
+ s8 cfo_tail[RTW_RF_PATH_MAX];
+
struct rtw_sta_info *si;
struct ieee80211_vif *vif;
};
+DECLARE_EWMA(tp, 10, 2);
+
struct rtw_traffic_stats {
/* units in bytes */
u64 tx_unicast;
@@ -518,6 +577,8 @@ struct rtw_traffic_stats {
/* units in Mbps */
u32 tx_throughput;
u32 rx_throughput;
+ struct ewma_tp tx_ewma_tp;
+ struct ewma_tp rx_ewma_tp;
};
enum rtw_lps_mode {
@@ -526,6 +587,12 @@ enum rtw_lps_mode {
RTW_MODE_WMM_PS = 2,
};
+enum rtw_lps_deep_mode {
+ LPS_DEEP_MODE_NONE = 0,
+ LPS_DEEP_MODE_LCLK = 1,
+ LPS_DEEP_MODE_PG = 2,
+};
+
enum rtw_pwr_state {
RTW_RF_OFF = 0x0,
RTW_RF_ON = 0x4,
@@ -533,14 +600,14 @@ enum rtw_pwr_state {
};
struct rtw_lps_conf {
- /* the interface to enter lps */
- struct rtw_vif *rtwvif;
enum rtw_lps_mode mode;
+ enum rtw_lps_deep_mode deep_mode;
enum rtw_pwr_state state;
u8 awake_interval;
u8 rlbm;
u8 smart_ps;
u8 port_id;
+ bool sec_cam_backup;
};
enum rtw_hw_key_type {
@@ -576,6 +643,19 @@ struct rtw_tx_report {
struct timer_list purge_timer;
};
+struct rtw_ra_report {
+ struct rate_info txrate;
+ u32 bit_rate;
+ u8 desc_rate;
+};
+
+struct rtw_txq {
+ struct list_head list;
+
+ unsigned long flags;
+ unsigned long last_push;
+};
+
#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
@@ -598,6 +678,41 @@ struct rtw_sta_info {
bool updated;
u8 init_ra_lv;
u64 ra_mask;
+
+ DECLARE_BITMAP(tid_ba, IEEE80211_NUM_TIDS);
+
+ struct rtw_ra_report ra_report;
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask *mask;
+};
+
+enum rtw_bfee_role {
+ RTW_BFEE_NONE,
+ RTW_BFEE_SU,
+ RTW_BFEE_MU
+};
+
+struct rtw_bfee {
+ enum rtw_bfee_role role;
+
+ u16 p_aid;
+ u8 g_id;
+ u8 mac_addr[ETH_ALEN];
+ u8 sound_dim;
+
+ /* SU-MIMO */
+ u8 su_reg_index;
+
+ /* MU-MIMO */
+ u16 aid;
+};
+
+struct rtw_bf_info {
+ u8 bfer_mu_cnt;
+ u8 bfer_su_cnt;
+ DECLARE_BITMAP(bfer_su_reg_maping, 2);
+ u8 cur_csi_rpt_rate;
};
struct rtw_vif {
@@ -608,10 +723,13 @@ struct rtw_vif {
u8 bssid[ETH_ALEN];
u8 port;
u8 bcn_ctrl;
+ struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
bool in_lps;
+
+ struct rtw_bfee bfee;
};
struct rtw_regulatory {
@@ -643,6 +761,14 @@ struct rtw_chip_ops {
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
void (*cck_pd_set)(struct rtw_dev *rtwdev, u8 level);
+ void (*pwr_track)(struct rtw_dev *rtwdev);
+ void (*config_bfee)(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable);
+ void (*set_gid_table)(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+ void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -747,6 +873,7 @@ enum rtw_dma_mapping {
RTW_DMA_MAPPING_NORMAL = 2,
RTW_DMA_MAPPING_HIGH = 3,
+ RTW_DMA_MAPPING_MAX,
RTW_DMA_MAPPING_UNDEF,
};
@@ -818,6 +945,34 @@ struct rtw_rfe_def {
.txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
}
+#define RTW_PWR_TRK_5G_1 0
+#define RTW_PWR_TRK_5G_2 1
+#define RTW_PWR_TRK_5G_3 2
+#define RTW_PWR_TRK_5G_NUM 3
+
+#define RTW_PWR_TRK_TBL_SZ 30
+
+/* This table stores the values of TX power that will be adjusted by power
+ * tracking.
+ *
+ * For 5G bands, there are 3 different settings.
+ * For 2G there are cck rate and ofdm rate with different settings.
+ */
+struct rtw_pwr_track_tbl {
+ const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_2gb_n;
+ const u8 *pwrtrk_2gb_p;
+ const u8 *pwrtrk_2ga_n;
+ const u8 *pwrtrk_2ga_p;
+ const u8 *pwrtrk_2g_cckb_n;
+ const u8 *pwrtrk_2g_cckb_p;
+ const u8 *pwrtrk_2g_ccka_n;
+ const u8 *pwrtrk_2g_ccka_p;
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
@@ -844,6 +999,7 @@ struct rtw_chip_info {
bool ht_supported;
bool vht_supported;
+ u8 lps_deep_mode_supported;
/* init values */
u8 sys_func_en;
@@ -868,6 +1024,11 @@ struct rtw_chip_info {
bool en_dis_dpd;
u16 dpd_ratemask;
+ u8 iqk_threshold;
+ const struct rtw_pwr_track_tbl *pwr_track_tbl;
+
+ u8 bfer_su_max_num;
+ u8 bfer_mu_max_num;
/* coex paras */
u32 coex_para_ver;
@@ -1121,10 +1282,26 @@ struct rtw_phy_cck_pd_reg {
#define DACK_MSBK_BACKUP_NUM 0xf
#define DACK_DCK_BACKUP_NUM 0x2
+struct rtw_swing_table {
+ const u8 *p[RTW_RF_PATH_MAX];
+ const u8 *n[RTW_RF_PATH_MAX];
+};
+
+struct rtw_pkt_count {
+ u16 num_bcn_pkt;
+ u16 num_qry_pkt[DESC_RATE_MAX];
+};
+
+DECLARE_EWMA(evm, 10, 4);
+DECLARE_EWMA(snr, 10, 4);
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 total_fa_cnt;
+ u32 cck_cca_cnt;
+ u32 ofdm_cca_cnt;
+ u32 total_cca_cnt;
u32 cck_ok_cnt;
u32 cck_err_cnt;
@@ -1147,6 +1324,15 @@ struct rtw_dm_info {
u8 cck_gi_u_bnd;
u8 cck_gi_l_bnd;
+ u8 tx_rate;
+ u8 thermal_avg[RTW_RF_PATH_MAX];
+ u8 thermal_meter_k;
+ s8 delta_power_index[RTW_RF_PATH_MAX];
+ u8 default_ofdm_index;
+ bool pwr_trk_triggered;
+ bool pwr_trk_init_trigger;
+ struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX];
+
/* backup dack results for each path and I/Q */
u32 dack_adck[RTW_RF_PATH_MAX];
u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM];
@@ -1157,6 +1343,17 @@ struct rtw_dm_info {
/* [bandwidth 0:20M/1:40M][number of path] */
u8 cck_pd_lv[2][RTW_RF_PATH_MAX];
u32 cck_fa_avg;
+
+ /* save the last rx phy status for debug */
+ s8 rx_snr[RTW_RF_PATH_MAX];
+ u8 rx_evm_dbm[RTW_RF_PATH_MAX];
+ s16 cfo_tail[RTW_RF_PATH_MAX];
+ u8 rssi[RTW_RF_PATH_MAX];
+ u8 curr_rx_rate;
+ struct rtw_pkt_count cur_pkt_count;
+ struct rtw_pkt_count last_pkt_count;
+ struct ewma_evm ewma_evm[RTW_EVM_NUM];
+ struct ewma_snr ewma_snr[RTW_SNR_NUM];
};
struct rtw_efuse {
@@ -1170,7 +1367,9 @@ struct rtw_efuse {
u8 country_code[2];
u8 rf_board_option;
u8 rfe_option;
- u8 thermal_meter;
+ u8 power_track_type;
+ u8 thermal_meter[RTW_RF_PATH_MAX];
+ u8 thermal_meter_k;
u8 crystal_cap;
u8 ant_div_cfg;
u8 ant_div_type;
@@ -1252,7 +1451,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
- enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
+ struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
@@ -1289,6 +1488,7 @@ struct rtw_hal {
u8 rf_path_num;
u8 antenna_tx;
u8 antenna_rx;
+ u8 bfee_sts_cap;
/* protect tx power section */
struct mutex tx_power_mutex;
@@ -1326,6 +1526,7 @@ struct rtw_dev {
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
struct rtw_regulatory regd;
+ struct rtw_bf_info bf_info;
struct rtw_dm_info dm_info;
struct rtw_coex coex;
@@ -1349,6 +1550,12 @@ struct rtw_dev {
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ /* used to protect txqs list */
+ spinlock_t txq_lock;
+ struct list_head txqs;
+ struct tasklet_struct tx_tasklet;
+ struct work_struct ba_work;
+
struct rtw_tx_report tx_report;
struct {
@@ -1361,11 +1568,12 @@ struct rtw_dev {
/* lps power state & handler work */
struct rtw_lps_conf lps_conf;
- struct delayed_work lps_work;
+ bool ps_enabled;
struct dentry *debugfs;
u8 sta_cnt;
+ u32 rts_threshold;
DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
@@ -1378,24 +1586,23 @@ struct rtw_dev {
#include "hci.h"
-static inline bool rtw_flag_check(struct rtw_dev *rtwdev, enum rtw_flags flag)
+static inline bool rtw_is_assoc(struct rtw_dev *rtwdev)
{
- return test_bit(flag, rtwdev->flags);
+ return !!rtwdev->sta_cnt;
}
-static inline void rtw_flag_clear(struct rtw_dev *rtwdev, enum rtw_flags flag)
+static inline struct ieee80211_txq *rtwtxq_to_txq(struct rtw_txq *rtwtxq)
{
- clear_bit(flag, rtwdev->flags);
-}
+ void *p = rtwtxq;
-static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag)
-{
- set_bit(flag, rtwdev->flags);
+ return container_of(p, struct ieee80211_txq, drv_priv);
}
-static inline bool rtw_is_assoc(struct rtw_dev *rtwdev)
+static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
{
- return !!rtwdev->sta_cnt;
+ void *p = rtwvif;
+
+ return container_of(p, struct ieee80211_vif, drv_priv);
}
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -1405,6 +1612,7 @@ bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val);
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value);
void rtw_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp, u32 num);
+void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss);
void rtw_set_channel(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
@@ -1417,5 +1625,6 @@ int rtw_core_init(struct rtw_dev *rtwdev);
void rtw_core_deinit(struct rtw_dev *rtwdev);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+u16 rtw_desc_to_bitrate(u8 desc_rate);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index d90928be663b..a58e8276a41a 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -9,6 +9,7 @@
#include "tx.h"
#include "rx.h"
#include "fw.h"
+#include "ps.h"
#include "debug.h"
static bool rtw_disable_msi;
@@ -457,9 +458,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
/* reset read/write point */
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
- /* rest H2C Queue index */
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
+ /* reset H2C Queue index in a single write */
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+ BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@ -536,6 +537,69 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
+static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ bool tx_empty = true;
+ u8 queue;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ /* Deep PS state is not allowed to TX-DMA */
+ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
+ /* BCN queue is rsvd page, does not have DMA interrupt
+ * H2C queue is managed by firmware
+ */
+ if (queue == RTW_TX_QUEUE_BCN ||
+ queue == RTW_TX_QUEUE_H2C)
+ continue;
+
+ tx_ring = &rtwpci->tx_rings[queue];
+
+ /* check if there is any skb DMAing */
+ if (skb_queue_len(&tx_ring->queue)) {
+ tx_empty = false;
+ break;
+ }
+ }
+
+ if (!tx_empty) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "TX path not empty, cannot enter deep power save state\n");
+ return;
+ }
+
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+ rtw_power_mode_change(rtwdev, true);
+}
+
+static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ lockdep_assert_held(&rtwpci->irq_lock);
+
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_power_mode_change(rtwdev, false);
+}
+
+static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_enter(rtwdev);
+
+ if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
+ rtw_pci_deep_ps_leave(rtwdev);
+
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
static u8 ac_to_hwq[] = {
[IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
[IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
@@ -616,6 +680,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
u32 bd_idx;
+ unsigned long flags;
ring = &rtwpci->tx_rings[queue];
@@ -651,6 +716,10 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
tx_data = rtw_pci_get_tx_data(skb);
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ rtw_pci_deep_ps_leave(rtwdev);
skb_queue_tail(&ring->queue, skb);
/* kick off tx queue */
@@ -666,6 +735,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
}
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return 0;
}
@@ -990,23 +1060,49 @@ static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
{
u16 write_addr;
- u16 remainder = addr & 0x3;
+ u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
u8 flag;
- u8 cnt = 20;
+ u8 cnt;
- write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
+ write_addr = addr & BITS_DBI_ADDR_MASK;
+ write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
- rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
+
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ if (flag == 0)
+ return;
- flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
- while (flag && (cnt != 0)) {
udelay(10);
+ }
+
+ WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
+}
+
+static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & BITS_DBI_ADDR_MASK;
+ u8 flag;
+ u8 cnt;
+
+ rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
+
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
- cnt--;
+ if (flag == 0) {
+ read_addr = REG_DBI_RDATA_V1 + (addr & 3);
+ *value = rtw_read8(rtwdev, read_addr);
+ return 0;
+ }
+
+ udelay(10);
}
- WARN(flag, "DBI write fail\n");
+ WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
+ return -EIO;
}
static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
@@ -1017,23 +1113,113 @@ static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
rtw_write16(rtwdev, REG_MDIO_V1, data);
- page = addr < 0x20 ? 0 : 1;
- page += g1 ? 0 : 2;
- rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
+ page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
+ page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
-
rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
- wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
- cnt = 20;
- while (wflag && (cnt != 0)) {
- udelay(10);
+ for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
BIT_MDIO_WFLAG_V1);
- cnt--;
+ if (wflag == 0)
+ return;
+
+ udelay(10);
+ }
+
+ WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
+}
+
+static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
+ return;
}
- WARN(wflag, "MDIO write fail\n");
+ if (enable)
+ value |= BIT_CLKREQ_SW_EN;
+ else
+ value &= ~BIT_CLKREQ_SW_EN;
+
+ rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
+}
+
+static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
+ return;
+ }
+
+ if (enable)
+ value |= BIT_L1_SW_EN;
+ else
+ value &= ~BIT_L1_SW_EN;
+
+ rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
+}
+
+static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
+ * only be enabled when host supports it.
+ *
+ * And ASPM mechanism should be enabled when driver/firmware enters
+ * power save mode, without having heavy traffic. Because we've
+ * experienced some inter-operability issues that the link tends
+ * to enter L1 state on the fly even when driver is having high
+ * throughput. This is probably because the ASPM behavior slightly
+ * varies from different SOC.
+ */
+ if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
+ rtw_pci_aspm_set(rtwdev, enter);
+}
+
+static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 link_ctrl;
+ int ret;
+
+ /* Though there is standard PCIE configuration space to set the
+ * link control register, but by Realtek's design, driver should
+ * check if host supports CLKREQ/ASPM to enable the HW module.
+ *
+ * These functions are implemented by two HW modules associated,
+ * one is responsible to access PCIE configuration space to
+ * follow the host settings, and another is in charge of doing
+ * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
+ * the host does not support it, and due to some reasons or wrong
+ * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
+ * loss if HW misbehaves on the link.
+ *
+ * Hence it's designed that driver should first check the PCIE
+ * configuration space is sync'ed and enabled, then driver can turn
+ * on the other module that is actually working on the mechanism.
+ */
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
+ return;
+ }
+
+ if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ rtw_pci_clkreq_set(rtwdev, true);
+
+ rtwpci->link_ctrl = link_ctrl;
}
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
@@ -1074,6 +1260,8 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
else
rtw_dbi_write8(rtwdev, offset, value);
}
+
+ rtw_pci_link_cfg(rtwdev);
}
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
@@ -1120,8 +1308,6 @@ static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
goto err_io_unmap;
}
- rtw_pci_phy_cfg(rtwdev);
-
return 0;
err_io_unmap:
@@ -1142,6 +1328,8 @@ static struct rtw_hci_ops rtw_pci_ops = {
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
+ .deep_ps = rtw_pci_deep_ps,
+ .link_ps = rtw_pci_link_ps,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1233,6 +1421,8 @@ static int rtw_pci_probe(struct pci_dev *pdev,
goto err_destroy_pci;
}
+ rtw_pci_phy_cfg(rtwdev);
+
ret = rtw_register_hw(rtwdev, hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 87824a4caba9..49bf29a92152 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -20,10 +20,25 @@
#define BIT_RST_TRXDMA_INTF BIT(20)
#define BIT_RX_TAG_EN BIT(15)
#define REG_DBI_WDATA_V1 0x03E8
+#define REG_DBI_RDATA_V1 0x03EC
#define REG_DBI_FLAG_V1 0x03F0
+#define BIT_DBI_RFLAG BIT(17)
+#define BIT_DBI_WFLAG BIT(16)
+#define BITS_DBI_WREN GENMASK(15, 12)
+#define BITS_DBI_ADDR_MASK GENMASK(11, 2)
+
#define REG_MDIO_V1 0x03F4
#define REG_PCIE_MIX_CFG 0x03F8
+#define BITS_MDIO_ADDR_MASK GENMASK(4, 0)
#define BIT_MDIO_WFLAG_V1 BIT(5)
+#define RTW_PCI_MDIO_PG_SZ BIT(5)
+#define RTW_PCI_MDIO_PG_OFFS_G1 0
+#define RTW_PCI_MDIO_PG_OFFS_G2 2
+#define RTW_PCI_WR_RETRY_CNT 20
+
+#define RTK_PCIE_LINK_CFG 0x0719
+#define BIT_CLKREQ_SW_EN BIT(4)
+#define BIT_L1_SW_EN BIT(3)
#define BIT_PCI_BCNQ_FLAG BIT(4)
#define RTK_PCI_TXBD_DESA_BCNQ 0x308
@@ -190,6 +205,7 @@ struct rtw_pci {
u16 rx_tag;
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
+ u16 link_ctrl;
void __iomem *mmap;
};
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index d3d3f40de75e..a3e1e9578b65 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -20,15 +20,6 @@ union phy_table_tile {
struct phy_cfg_pair cfg;
};
-struct phy_pg_cfg_pair {
- u32 band;
- u32 rf_path;
- u32 tx_num;
- u32 addr;
- u32 bitmask;
- u32 data;
-};
-
static const u32 db_invert_table[12][8] = {
{10, 13, 16, 20,
25, 32, 40, 50},
@@ -118,7 +109,7 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) {
for (j = 0; j < RTW_RF_PATH_MAX; j++)
- dm_info->cck_pd_lv[i][j] = 0;
+ dm_info->cck_pd_lv[i][j] = CCK_PD_LV0;
}
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
@@ -222,10 +213,19 @@ static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
dm_info->min_rssi = data.min_rssi;
}
+static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->last_pkt_count = dm_info->cur_pkt_count;
+ memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count));
+}
+
static void rtw_phy_statistics(struct rtw_dev *rtwdev)
{
rtw_phy_stat_rssi(rtwdev);
rtw_phy_stat_false_alarm(rtwdev);
+ rtw_phy_stat_rate_cnt(rtwdev);
}
#define DIG_PERF_FA_TH_LOW 250
@@ -394,7 +394,7 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev)
u8 step[3];
bool linked;
- if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
+ if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags))
return;
if (rtw_phy_dig_check_damping(dm_info))
@@ -461,7 +461,6 @@ static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
chip->ops->dpk_track(rtwdev);
}
-#define CCK_PD_LV_MAX 5
#define CCK_PD_FA_LV1_MIN 1000
#define CCK_PD_FA_LV0_MAX 500
@@ -471,10 +470,10 @@ static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev)
u32 cck_fa_avg = dm_info->cck_fa_avg;
if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
- return 1;
+ return CCK_PD_LV1;
if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
- return 0;
+ return CCK_PD_LV0;
return CCK_PD_LV_MAX;
}
@@ -494,15 +493,15 @@ static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev)
u32 cck_fa_avg = dm_info->cck_fa_avg;
if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL)
- return 4;
+ return CCK_PD_LV4;
if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL)
- return 3;
+ return CCK_PD_LV3;
if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL)
- return 2;
+ return CCK_PD_LV2;
if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
- return 1;
+ return CCK_PD_LV1;
if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
- return 0;
+ return CCK_PD_LV0;
return CCK_PD_LV_MAX;
}
@@ -539,6 +538,11 @@ static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
chip->ops->cck_pd_set(rtwdev, level);
}
+static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
+{
+ rtwdev->chip->ops->pwr_track(rtwdev);
+}
+
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
{
/* for further calculation */
@@ -547,6 +551,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_cck_pd(rtwdev);
rtw_phy_ra_info_update(rtwdev);
rtw_phy_dpk_track(rtwdev);
+ rtw_phy_pwr_track(rtwdev);
}
#define FRAC_BITS 3
@@ -1211,10 +1216,8 @@ static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
{
- const struct phy_pg_cfg_pair *p = tbl->data;
- const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
-
- BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
+ const struct rtw_phy_pg_cfg_pair *p = tbl->data;
+ const struct rtw_phy_pg_cfg_pair *end = p + tbl->size;
for (; p < end; p++) {
if (p->addr == 0xfe || p->addr == 0xffe) {
@@ -1748,7 +1751,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
group = rtw_get_channel_group(ch);
/* base power index for 2.4G/5G */
- if (ch <= 14) {
+ if (IS_CH_2G_BAND(ch)) {
band = PHY_BAND_2G;
*base = rtw_phy_get_2g_tx_power_index(rtwdev,
&pwr_idx->pwr_idx_2g,
@@ -1968,3 +1971,123 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
rs);
}
+
+void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table)
+{
+ const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
+ u8 channel = rtwdev->hal.current_channel;
+
+ if (IS_CH_2G_BAND(channel)) {
+ if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
+ } else {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ }
+ } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
+ } else if (IS_CH_5G_BAND_3(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
+ } else if (IS_CH_5G_BAND_4(channel)) {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
+ } else {
+ swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
+ swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
+ swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
+ swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ }
+}
+
+void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ ewma_thermal_add(&dm_info->avg_thermal[path], thermal);
+ dm_info->thermal_avg[path] =
+ ewma_thermal_read(&dm_info->avg_thermal[path]);
+}
+
+bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]);
+
+ if (avg == thermal)
+ return false;
+
+ return true;
+}
+
+u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 therm_avg, therm_efuse, therm_delta;
+
+ therm_avg = dm_info->thermal_avg[path];
+ therm_efuse = rtwdev->efuse.thermal_meter[path];
+ therm_delta = abs(therm_avg - therm_efuse);
+
+ return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1);
+}
+
+s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 tbl_path, u8 therm_path, u8 delta)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ const u8 *delta_swing_table_idx_pos;
+ const u8 *delta_swing_table_idx_neg;
+
+ if (delta >= RTW_PWR_TRK_TBL_SZ) {
+ rtw_warn(rtwdev, "power track table overflow\n");
+ return 0;
+ }
+
+ if (!swing_table) {
+ rtw_warn(rtwdev, "swing table not configured\n");
+ return 0;
+ }
+
+ delta_swing_table_idx_pos = swing_table->p[tbl_path];
+ delta_swing_table_idx_neg = swing_table->n[tbl_path];
+
+ if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) {
+ rtw_warn(rtwdev, "invalid swing table index\n");
+ return 0;
+ }
+
+ if (dm_info->thermal_avg[therm_path] >
+ rtwdev->efuse.thermal_meter[therm_path])
+ return delta_swing_table_idx_pos[delta];
+ else
+ return -delta_swing_table_idx_neg[delta];
+}
+
+bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 delta_iqk;
+
+ delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k);
+ if (delta_iqk >= rtwdev->chip->iqk_threshold) {
+ dm_info->thermal_meter_k = dm_info->thermal_avg[0];
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index e79b084628e7..af916d8784cd 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -41,9 +41,21 @@ void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_init_tx_power(struct rtw_dev *rtwdev);
void rtw_phy_load_tables(struct rtw_dev *rtwdev);
+u8 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
+ enum rtw_bandwidth bw, u8 channel, u8 regd);
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path);
+bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
+ u8 path);
+u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
+s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 tbl_path, u8 therm_path, u8 delta);
+bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
+void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table);
struct rtw_txpwr_lmt_cfg_pair {
u8 regd;
@@ -54,6 +66,15 @@ struct rtw_txpwr_lmt_cfg_pair {
s8 txpwr_lmt;
};
+struct rtw_phy_pg_cfg_pair {
+ u32 band;
+ u32 rf_path;
+ u32 tx_num;
+ u32 addr;
+ u32 bitmask;
+ u32 data;
+};
+
#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
const struct rtw_table name ## _tbl = { \
.data = name, \
@@ -125,6 +146,15 @@ rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path,
u8 rate, u8 bw, u8 ch, u8 regd,
struct rtw_power_params *pwr_param);
+enum rtw_phy_cck_pd_lv {
+ CCK_PD_LV0,
+ CCK_PD_LV1,
+ CCK_PD_LV2,
+ CCK_PD_LV3,
+ CCK_PD_LV4,
+ CCK_PD_LV_MAX,
+};
+
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 9ecd14feb76b..913e6f47130f 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "reg.h"
#include "fw.h"
#include "ps.h"
#include "mac.h"
@@ -18,18 +19,19 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
- rtw_flag_clear(rtwdev, RTW_FLAG_INACTIVE_PS);
+ clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+ set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
rtw_core_stop(rtwdev);
+ rtw_hci_link_ps(rtwdev, true);
return 0;
}
@@ -48,6 +50,8 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
+ rtw_hci_link_ps(rtwdev, false);
+
ret = rtw_ips_pwr_up(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to leave ips state\n");
@@ -61,6 +65,85 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
return 0;
}
+void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
+{
+ u8 request, confirm, polling;
+ u8 polling_cnt;
+ u8 retry_cnt = 0;
+
+ for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) {
+ request = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+ confirm = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
+
+ /* toggle to request power mode, others remain 0 */
+ request ^= request | BIT_RPWM_TOGGLE;
+ if (!enter) {
+ request |= POWER_MODE_ACK;
+ } else {
+ request |= POWER_MODE_LCLK;
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ request |= POWER_MODE_PG;
+ }
+
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+ if (enter)
+ return;
+
+ /* check confirm power mode has left power save state */
+ for (polling_cnt = 0; polling_cnt < 3; polling_cnt++) {
+ polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
+ if ((polling ^ confirm) & BIT_RPWM_TOGGLE)
+ return;
+ mdelay(20);
+ }
+
+ /* in case of fw/hw missed the request, retry */
+ rtw_warn(rtwdev, "failed to leave deep PS, retry=%d\n",
+ retry_cnt);
+ }
+
+ /* Hit here means that driver failed to change hardware power mode to
+ * active state after retry 3 times. If the power state is locked at
+ * Deep sleep, most of the hardware circuits is not working, even
+ * register read/write. It should be treated as fatal error and
+ * requires an entire analysis about the firmware/hardware
+ */
+ WARN(1, "Hardware power state locked\n");
+}
+EXPORT_SYMBOL(rtw_power_mode_change);
+
+static void __rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+{
+ rtw_hci_deep_ps(rtwdev, false);
+}
+
+static void rtw_fw_leave_lps_state_check(struct rtw_dev *rtwdev)
+{
+ int i;
+
+ /* Driver needs to wait for firmware to leave LPS state
+ * successfully. Firmware will send null packet to inform AP,
+ * and see if AP sends an ACK back, then firmware will restore
+ * the REG_TCR register.
+ *
+ * If driver does not wait for firmware, null packet with
+ * PS bit could be sent due to incorrect REG_TCR setting.
+ *
+ * In our test, 100ms should be enough for firmware to finish
+ * the flow. If REG_TCR Register is still incorrect after 100ms,
+ * just modify it directly, and throw a warn message.
+ */
+ for (i = 0 ; i < LEAVE_LPS_TRY_CNT; i++) {
+ if (rtw_read32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN) == 0)
+ return;
+ msleep(20);
+ }
+
+ rtw_write32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN, 0);
+ rtw_warn(rtwdev, "firmware failed to restore hardware setting\n");
+}
+
static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -70,12 +153,32 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
conf->rlbm = 0;
conf->smart_ps = 0;
+ rtw_hci_link_ps(rtwdev, false);
rtw_fw_set_pwr_mode(rtwdev);
- rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+ rtw_fw_leave_lps_state_check(rtwdev);
+
+ clear_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags);
rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE);
}
+static void __rtw_enter_lps_deep(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->lps_conf.deep_mode == LPS_DEEP_MODE_NONE)
+ return;
+
+ if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags)) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "Should enter LPS before entering deep PS\n");
+ return;
+ }
+
+ if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
+ rtw_fw_set_pg_info(rtwdev);
+
+ rtw_hci_deep_ps(rtwdev, true);
+}
+
static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -88,88 +191,64 @@ static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
rtw_coex_lps_notify(rtwdev, COEX_LPS_ENABLE);
rtw_fw_set_pwr_mode(rtwdev);
- rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
-}
+ rtw_hci_link_ps(rtwdev, true);
-void rtw_lps_work(struct work_struct *work)
-{
- struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
- lps_work.work);
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- struct rtw_vif *rtwvif = conf->rtwvif;
-
- if (WARN_ON(!rtwvif))
- return;
-
- if (conf->mode == RTW_MODE_LPS)
- rtw_enter_lps_core(rtwdev);
- else
- rtw_leave_lps_core(rtwdev);
+ set_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags);
}
-void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+static void __rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- if (rtwvif->in_lps)
+ if (test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags))
return;
conf->mode = RTW_MODE_LPS;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = true;
+ conf->port_id = port_id;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+ rtw_enter_lps_core(rtwdev);
}
-void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+static void __rtw_leave_lps(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
- if (!rtwvif->in_lps)
+ if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) {
+ rtw_dbg(rtwdev, RTW_DBG_PS,
+ "Should leave deep PS before leaving LPS\n");
+ __rtw_leave_lps_deep(rtwdev);
+ }
+
+ if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags))
return;
conf->mode = RTW_MODE_ACTIVE;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = false;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
-}
-
-bool rtw_in_lps(struct rtw_dev *rtwdev)
-{
- return rtw_flag_check(rtwdev, RTW_FLAG_LEISURE_PS);
+ rtw_leave_lps_core(rtwdev);
}
-void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id)
{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ lockdep_assert_held(&rtwdev->mutex);
- if (WARN_ON(!rtwvif))
+ if (rtwdev->coex.stat.wl_force_lps_ctrl)
return;
- if (rtwvif->in_lps)
- return;
-
- conf->mode = RTW_MODE_LPS;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = true;
-
- rtw_enter_lps_core(rtwdev);
+ __rtw_enter_lps(rtwdev, port_id);
+ __rtw_enter_lps_deep(rtwdev);
}
-void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+void rtw_leave_lps(struct rtw_dev *rtwdev)
{
- struct rtw_lps_conf *conf = &rtwdev->lps_conf;
-
- if (WARN_ON(!rtwvif))
- return;
+ lockdep_assert_held(&rtwdev->mutex);
- if (!rtwvif->in_lps)
- return;
+ __rtw_leave_lps_deep(rtwdev);
+ __rtw_leave_lps(rtwdev);
+}
- conf->mode = RTW_MODE_ACTIVE;
- conf->rtwvif = rtwvif;
- rtwvif->in_lps = false;
+void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+{
+ lockdep_assert_held(&rtwdev->mutex);
- rtw_leave_lps_core(rtwdev);
+ __rtw_leave_lps_deep(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
index 09e57405dc1b..19afceca7d0e 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.h
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -5,16 +5,20 @@
#ifndef __RTW_PS_H_
#define __RTW_PS_H_
-#define RTW_LPS_THRESHOLD 2
+#define RTW_LPS_THRESHOLD 50
+
+#define POWER_MODE_ACK BIT(6)
+#define POWER_MODE_PG BIT(4)
+#define POWER_MODE_LCLK BIT(0)
+
+#define LEAVE_LPS_TRY_CNT 5
int rtw_enter_ips(struct rtw_dev *rtwdev);
int rtw_leave_ips(struct rtw_dev *rtwdev);
-void rtw_lps_work(struct work_struct *work);
-void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
-bool rtw_in_lps(struct rtw_dev *rtwdev);
+void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter);
+void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
+void rtw_leave_lps(struct rtw_dev *rtwdev);
+void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index fe793e270d22..7e817bc997eb 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -239,6 +239,10 @@
#define REG_EDCA_VI_PARAM 0x0504
#define REG_EDCA_BE_PARAM 0x0508
#define REG_EDCA_BK_PARAM 0x050C
+#define BIT_MASK_TXOP_LMT GENMASK(26, 16)
+#define BIT_MASK_CWMAX GENMASK(15, 12)
+#define BIT_MASK_CWMIN GENMASK(11, 8)
+#define BIT_MASK_AIFS GENMASK(7, 0)
#define REG_PIFS 0x0512
#define REG_SIFS 0x0514
#define BIT_SHIFT_SIFS_OFDM_CTX 8
@@ -271,6 +275,7 @@
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
#define REG_TCR 0x0604
+#define BIT_PWRMGT_HWDATA_EN BIT(7)
#define REG_RCR 0x0608
#define BIT_APP_FCS BIT(31)
#define BIT_APP_MIC BIT(30)
@@ -305,6 +310,7 @@
#define REG_RX_PKT_LIMIT 0x060C
#define REG_RX_DRVINFO_SZ 0x060F
#define BIT_APP_PHYSTS BIT(28)
+#define REG_MAR 0x0620
#define REG_USTIME_EDCA 0x0638
#define REG_ACKTO_CCK 0x0639
#define REG_RESP_SIFS_CCK 0x063C
@@ -320,6 +326,7 @@
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
+#define REG_RXFLTMAP4 0x068A
#define REG_BT_COEX_TABLE0 0x06C0
#define REG_BT_COEX_TABLE1 0x06C4
#define REG_BT_COEX_BRK_TABLE 0x06C8
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 63abda3b0ebf..4bc14b1a6340 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -13,6 +13,7 @@
#include "mac.h"
#include "reg.h"
#include "debug.h"
+#include "bf.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -43,6 +44,8 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
@@ -75,6 +78,56 @@ static void rtw8822b_phy_rfe_init(struct rtw_dev *rtwdev)
rtw_write32_mask(rtwdev, 0x974, (BIT(11) | BIT(10)), 0x3);
}
+#define RTW_TXSCALE_SIZE 37
+static const u32 rtw8822b_txscale_tbl[RTW_TXSCALE_SIZE] = {
+ 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8,
+ 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180,
+ 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab,
+ 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
+};
+
+static const u8 rtw8822b_get_swing_index(struct rtw_dev *rtwdev)
+{
+ u8 i = 0;
+ u32 swing, table_value;
+
+ swing = rtw_read32_mask(rtwdev, 0xc1c, 0xffe00000);
+ for (i = 0; i < RTW_TXSCALE_SIZE; i++) {
+ table_value = rtw8822b_txscale_tbl[i];
+ if (swing == table_value)
+ break;
+ }
+
+ return i;
+}
+
+static void rtw8822b_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 swing_idx = rtw8822b_get_swing_index(rtwdev);
+ u8 path;
+
+ if (swing_idx >= RTW_TXSCALE_SIZE)
+ dm_info->default_ofdm_index = 24;
+ else
+ dm_info->default_ofdm_index = swing_idx;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
+static void rtw8822b_phy_bf_init(struct rtw_dev *rtwdev)
+{
+ rtw_bf_phy_init(rtwdev);
+ /* Grouping bitmap parameters */
+ rtw_write32(rtwdev, 0x1C94, 0xAFFFAFFF);
+}
+
static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
@@ -106,6 +159,9 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
rtw_phy_init(rtwdev);
rtw8822b_phy_rfe_init(rtwdev);
+ rtw8822b_pwrtrack_init(rtwdev);
+
+ rtw8822b_phy_bf_init(rtwdev);
}
#define WLAN_SLOT_TIME 0x09
@@ -211,9 +267,8 @@ static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
- bool is_channel_2g = (channel <= 14) ? true : false;
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x705770);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(4), 0);
@@ -241,9 +296,8 @@ static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
- bool is_channel_2g = (channel <= 14) ? true : false;
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
/* signal source */
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x745774);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
@@ -255,7 +309,7 @@ static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
- if (is_channel_2g) {
+ if (IS_CH_2G_BAND(channel)) {
if (hal->antenna_rx == BB_PATH_AB ||
hal->antenna_tx == BB_PATH_AB) {
/* 2TX or 2RX */
@@ -337,6 +391,7 @@ struct rtw8822b_rfe_info {
static const struct rtw8822b_rfe_info rtw8822b_rfe_info[] = {
[2] = I2GE5G_CCUT(efem),
+ [3] = IFEM_EXT_CCUT(ifem),
[5] = IFEM_EXT_CCUT(ifem),
};
@@ -350,7 +405,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u32 reg82c, reg830, reg838;
bool is_efem_cca = false, is_ifem_cca = false, is_rfe_type = false;
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
cca_ccut = rfe_info->cca_ccut_2g;
if (hal->antenna_rx == BB_PATH_A ||
@@ -381,7 +436,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
is_efem_cca = true;
break;
case RTW_RFE_IFEM2G_EFEM5G:
- if (channel <= 14)
+ if (IS_CH_2G_BAND(channel))
is_ifem_cca = true;
else
is_efem_cca = true;
@@ -405,9 +460,7 @@ static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
if (is_efem_cca && !(hal->cut_version == RTW_CHIP_VER_CUT_B))
rtw_write32_mask(rtwdev, REG_L1WT, MASKDWORD, 0x9194b2b9);
- if (bw == RTW_CHANNEL_WIDTH_20 &&
- ((channel >= 52 && channel <= 64) ||
- (channel >= 100 && channel <= 144)))
+ if (bw == RTW_CHANNEL_WIDTH_20 && IS_CH_5G_BAND_MID(channel))
rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0, 0x4);
}
@@ -442,7 +495,7 @@ static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
- rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (IS_CH_2G_BAND(channel) ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
if (channel > 144)
rf_reg18 |= RF18_RFSI_GT_CH144;
@@ -464,13 +517,13 @@ static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
break;
}
- if (channel <= 14)
+ if (IS_CH_2G_BAND(channel))
rf_reg_be = 0x0;
- else if (channel >= 36 && channel <= 64)
+ else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel))
rf_reg_be = low_band[(channel - 36) >> 1];
- else if (channel >= 100 && channel <= 144)
+ else if (IS_CH_5G_BAND_3(channel))
rf_reg_be = middle_band[(channel - 100) >> 1];
- else if (channel >= 149 && channel <= 177)
+ else if (IS_CH_5G_BAND_4(channel))
rf_reg_be = high_band[(channel - 149) >> 1];
else
goto err;
@@ -539,7 +592,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 rfe_option = efuse->rfe_option;
u32 val32;
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
@@ -556,22 +609,22 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
}
rtw_write32_mask(rtwdev, REG_RFEINV, 0x300, 0x2);
- } else if (channel > 35) {
+ } else if (IS_CH_5G_BAND(channel)) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 34);
- if (channel >= 36 && channel <= 64)
+ if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x1);
- else if (channel >= 100 && channel <= 144)
+ else if (IS_CH_5G_BAND_3(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x2);
- else if (channel >= 149)
+ else if (IS_CH_5G_BAND_4(channel))
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x3);
- if (channel >= 36 && channel <= 48)
+ if (IS_CH_5G_BAND_1(channel))
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
- else if (channel >= 52 && channel <= 64)
+ else if (IS_CH_5G_BAND_2(channel))
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
else if (channel >= 100 && channel <= 116)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
@@ -612,7 +665,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
- if (rfe_option == 2) {
+ if (rfe_option == 2 || rfe_option == 3) {
rtw_write32_mask(rtwdev, REG_L1PKWT, 0x0000f000, 0x6);
rtw_write32_mask(rtwdev, REG_ADC40, BIT(10), 0x1);
}
@@ -763,6 +816,7 @@ static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s8 min_rx_power = -120;
u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
@@ -772,13 +826,19 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
}
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
+ s8 rx_evm;
+ u8 evm_dbm = 0;
+ u8 rssi;
+ int path;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -801,6 +861,34 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_evm[RF_PATH_B] = GET_PHY_STAT_P1_RXEVM_B(phy_status);
+
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_B] = GET_PHY_STAT_P1_RXSNR_B(phy_status);
+
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_B] = GET_PHY_STAT_P1_CFO_TAIL_B(phy_status);
+
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
+ dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
+
+ rx_evm = pkt_stat->rx_evm[path];
+
+ if (rx_evm < 0) {
+ if (rx_evm == S8_MIN)
+ evm_dbm = 0;
+ else
+ evm_dbm = ((u8)-rx_evm >> 1);
+ }
+ dm_info->rx_evm_dbm[path] = evm_dbm;
+ }
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -836,7 +924,8 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
@@ -946,6 +1035,7 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 crc32_cnt;
+ u32 cca32_cnt;
cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
@@ -969,6 +1059,15 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ cca32_cnt = rtw_read32(rtwdev, 0xf08);
+ dm_info->ofdm_cca_cnt = ((cca32_cnt & 0xffff0000) >> 16);
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable) {
+ cca32_cnt = rtw_read32(rtwdev, 0xfcc);
+ dm_info->cck_cca_cnt = cca32_cnt & 0xffff;
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+ }
+
rtw_write32_set(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
@@ -1255,6 +1354,195 @@ static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
+static void rtw8822b_txagc_swing_offset(struct rtw_dev *rtwdev, u8 path,
+ u8 tx_pwr_idx_offset,
+ s8 *txagc_idx, u8 *swing_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 delta_pwr_idx = dm_info->delta_power_index[path];
+ u8 swing_upper_bound = dm_info->default_ofdm_index + 10;
+ u8 swing_lower_bound = 0;
+ u8 max_tx_pwr_idx_offset = 0xf;
+ s8 agc_index = 0;
+ u8 swing_index = dm_info->default_ofdm_index;
+
+ tx_pwr_idx_offset = min_t(u8, tx_pwr_idx_offset, max_tx_pwr_idx_offset);
+
+ if (delta_pwr_idx >= 0) {
+ if (delta_pwr_idx <= tx_pwr_idx_offset) {
+ agc_index = delta_pwr_idx;
+ swing_index = dm_info->default_ofdm_index;
+ } else if (delta_pwr_idx > tx_pwr_idx_offset) {
+ agc_index = tx_pwr_idx_offset;
+ swing_index = dm_info->default_ofdm_index +
+ delta_pwr_idx - tx_pwr_idx_offset;
+ swing_index = min_t(u8, swing_index, swing_upper_bound);
+ }
+ } else {
+ if (dm_info->default_ofdm_index > abs(delta_pwr_idx))
+ swing_index =
+ dm_info->default_ofdm_index + delta_pwr_idx;
+ else
+ swing_index = swing_lower_bound;
+ swing_index = max_t(u8, swing_index, swing_lower_bound);
+
+ agc_index = 0;
+ }
+
+ if (swing_index >= RTW_TXSCALE_SIZE) {
+ rtw_warn(rtwdev, "swing index overflow\n");
+ swing_index = RTW_TXSCALE_SIZE - 1;
+ }
+ *txagc_idx = agc_index;
+ *swing_idx = swing_index;
+}
+
+static void rtw8822b_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 path,
+ u8 pwr_idx_offset)
+{
+ s8 txagc_idx;
+ u8 swing_idx;
+ u32 reg1, reg2;
+
+ if (path == RF_PATH_A) {
+ reg1 = 0xc94;
+ reg2 = 0xc1c;
+ } else if (path == RF_PATH_B) {
+ reg1 = 0xe94;
+ reg2 = 0xe1c;
+ } else {
+ return;
+ }
+
+ rtw8822b_txagc_swing_offset(rtwdev, path, pwr_idx_offset,
+ &txagc_idx, &swing_idx);
+ rtw_write32_mask(rtwdev, reg1, GENMASK(29, 25), txagc_idx);
+ rtw_write32_mask(rtwdev, reg2, GENMASK(31, 21),
+ rtw8822b_txscale_tbl[swing_idx]);
+}
+
+static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 pwr_idx_offset, tx_pwr_idx;
+ u8 channel = rtwdev->hal.current_channel;
+ u8 band_width = rtwdev->hal.current_band_width;
+ u8 regd = rtwdev->regd.txpwr_regd;
+ u8 tx_rate = dm_info->tx_rate;
+ u8 max_pwr_idx = rtwdev->chip->max_power_index;
+
+ tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, tx_rate,
+ band_width, channel, regd);
+
+ tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx);
+
+ pwr_idx_offset = max_pwr_idx - tx_pwr_idx;
+
+ rtw8822b_pwrtrack_set_pwr(rtwdev, path, pwr_idx_offset);
+}
+
+static void rtw8822b_phy_pwrtrack_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 power_idx_cur, power_idx_last;
+ u8 delta;
+
+ /* 8822B only has one thermal meter at PATH A */
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ power_idx_last = dm_info->delta_power_index[path];
+ power_idx_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table,
+ path, RF_PATH_A, delta);
+
+ /* if delta of power indexes are the same, just skip */
+ if (power_idx_cur == power_idx_last)
+ return;
+
+ dm_info->delta_power_index[path] = power_idx_cur;
+ rtw8822b_pwrtrack_set(rtwdev, path);
+}
+
+static void rtw8822b_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, path;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[RF_PATH_A] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++)
+ rtw8822b_phy_pwrtrack_path(rtwdev, &swing_table, path);
+
+iqk:
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8822b_do_iqk(rtwdev);
+}
+
+static void rtw8822b_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8822b_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8822b_bf_config_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8822b_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (bfee->role == RTW_BFEE_SU)
+ rtw8822b_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+ else if (bfee->role == RTW_BFEE_MU)
+ rtw8822b_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+ else
+ rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -1754,6 +2042,7 @@ static struct rtw_intf_phy_para_table phy_para_table_8822b = {
static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822b, 2, 2),
+ [3] = RTW_DEF_RFE(8822b, 3, 0),
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
@@ -1801,6 +2090,10 @@ static struct rtw_chip_ops rtw8822b_ops = {
.cfg_ldo25 = rtw8822b_cfg_ldo25,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.phy_calibration = rtw8822b_phy_calibration,
+ .pwr_track = rtw8822b_pwr_track,
+ .config_bfee = rtw8822b_bf_config_bfee,
+ .set_gid_table = rtw_bf_set_gid_table,
+ .cfg_csi_rate = rtw_bf_cfg_csi_rate,
.coex_set_init = rtw8822b_coex_cfg_init,
.coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
@@ -1955,6 +2248,129 @@ static const struct coex_rf_para rf_para_rx_8822b[] = {
static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b));
+static const u8
+rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 8, 9, 10, 11, 11, 12, 13, 14, 14,
+ 15, 16, 17, 17, 18, 19, 20, 20, 21, 22 },
+};
+
+static const u8
+rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+ { 0, 1, 2, 2, 3, 4, 5, 5, 6, 7,
+ 8, 9, 9, 10, 11, 12, 13, 14, 14, 15,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23},
+};
+
+static const u8 rtw8822b_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 12, 13, 13
+};
+
+static const u8 rtw8822b_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 15
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
+ 5, 5, 6, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 12, 13, 13
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 5, 6, 6, 7, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12
+};
+
+static const u8 rtw8822b_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 15
+};
+
+static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = {
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gb_n = rtw8822b_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8822b_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8822b_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8822b_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8822b_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8822b_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8822b_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8822b_pwrtrk_2g_cck_a_p,
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -1977,6 +2393,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
.sys_func_en = 0xDC,
.pwr_on_seq = card_enable_flow_8822b,
.pwr_off_seq = card_disable_flow_8822b,
@@ -1992,6 +2409,10 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
.rfe_defs = rtw8822b_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+ .pwr_track_tbl = &rtw8822b_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+ .bfer_su_max_num = 2,
+ .bfer_mu_max_num = 1,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 0cb93d7d4cfd..6211f4b547b9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -127,6 +127,18 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
index 465f58411cab..b9010b111a13 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -11643,104 +11643,155 @@ static const u32 rtw8822b_bb[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822b_bb, rtw_phy_cfg_bb);
-static const u32 rtw8822b_bb_pg_type2[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type2[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032, },
};
RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type2);
-static const u32 rtw8822b_bb_pg_type5[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type3[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type3);
+
+static const struct rtw_phy_pg_cfg_pair rtw8822b_bb_pg_type5[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
};
RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type5);
@@ -20382,6 +20433,596 @@ static const u32 rtw8822b_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 26, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 28, },
+ { 2, 1, 0, 1, 144, 32, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 32, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 26, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 32, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 32, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 32, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 32, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 32, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 22, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 30, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 24, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 24, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 30, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 30, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 30, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 30, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 30, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 20, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 22, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 22, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 20, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 20, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 20, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 30, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 30, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 18, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 18, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 30, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 30, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type0);
+
static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type2[] = {
{ 0, 0, 0, 0, 1, 32, },
{ 2, 0, 0, 0, 1, 28, },
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
index d4c268889368..4140e1ccb7b1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
@@ -9,9 +9,11 @@ extern const struct rtw_table rtw8822b_mac_tbl;
extern const struct rtw_table rtw8822b_agc_tbl;
extern const struct rtw_table rtw8822b_bb_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type3_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type5_tbl;
extern const struct rtw_table rtw8822b_rf_a_tbl;
extern const struct rtw_table rtw8822b_rf_b_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type0_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type2_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type5_tbl;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index c2f6cd76a658..174029836833 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -14,6 +14,7 @@
#include "reg.h"
#include "debug.h"
#include "util.h"
+#include "bf.h"
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -40,6 +41,11 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->path_a_thermal;
+ efuse->thermal_meter[RF_PATH_B] = map->path_b_thermal;
+ efuse->thermal_meter_k =
+ (map->path_a_thermal + map->path_b_thermal) >> 1;
+ efuse->power_track_type = (map->tx_pwr_calibrate_rate >> 4) & 0xf;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
@@ -1000,6 +1006,21 @@ static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
rtw8822c_rf_x2_check(rtwdev);
}
+static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++) {
+ dm_info->delta_power_index[path] = 0;
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->thermal_avg[path] = 0xff;
+ }
+
+ dm_info->pwr_trk_triggered = false;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -1047,6 +1068,9 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
rtw8822c_rf_init(rtwdev);
+ rtw8822c_pwrtrack_init(rtwdev);
+
+ rtw_bf_phy_init(rtwdev);
}
#define WLAN_TXQ_RPT_EN 0x1F
@@ -1088,8 +1112,8 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_AMPDU_MAX_TIME 0x70
#define WLAN_RTS_LEN_TH 0xFF
#define WLAN_RTS_TX_TIME_TH 0x08
-#define WLAN_MAX_AGG_PKT_LIMIT 0x20
-#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_MAX_AGG_PKT_LIMIT 0x3f
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x3f
#define WLAN_PRE_TXCNT_TIME_TH 0x1E0
#define FAST_EDCA_VO_TH 0x06
#define FAST_EDCA_VI_TH 0x06
@@ -1112,6 +1136,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_RTS_RATE_FB_RATE4_H 0x400003E0
#define WLAN_RTS_RATE_FB_RATE5 0x0600F015
#define WLAN_RTS_RATE_FB_RATE5_H 0x000000E0
+#define WLAN_MULTI_ADDR 0xFFFFFFFF
#define WLAN_TX_FUNC_CFG1 0x30
#define WLAN_TX_FUNC_CFG2 0x30
@@ -1221,6 +1246,8 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_BCN_MAX_ERR, WLAN_BCN_MAX_ERR);
/* WMAC configuration */
+ rtw_write32(rtwdev, REG_MAR, WLAN_MULTI_ADDR);
+ rtw_write32(rtwdev, REG_MAR + 4, WLAN_MULTI_ADDR);
rtw_write8(rtwdev, REG_BBPSF_CTRL + 2, WLAN_RESP_TXRATE);
rtw_write8(rtwdev, REG_ACKTO, WLAN_ACK_TO);
rtw_write8(rtwdev, REG_ACKTO_CCK, WLAN_ACK_TO_CCK);
@@ -1284,11 +1311,11 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
- rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (IS_CH_2G_BAND(channel) ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
- if (channel > 144)
+ if (IS_CH_5G_BAND_4(channel))
rf_reg18 |= RF18_RFSI_GT_CH140;
- else if (channel >= 80)
+ else if (IS_CH_5G_BAND_3(channel))
rf_reg18 |= RF18_RFSI_GE_CH80;
switch (bw) {
@@ -1338,7 +1365,7 @@ static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
- if (channel <= 14) {
+ if (IS_CH_2G_BAND(channel)) {
rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
@@ -1403,7 +1430,7 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
else
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x1);
- } else if (channel > 35) {
+ } else if (IS_CH_5G_BAND(channel)) {
rtw_write32_set(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
@@ -1411,17 +1438,17 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
- if (channel >= 36 && channel <= 64) {
+ if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x1);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
0x1);
- } else if (channel >= 100 && channel <= 144) {
+ } else if (IS_CH_5G_BAND_3(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x2);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
0x2);
- } else if (channel >= 149) {
+ } else if (IS_CH_5G_BAND_4(channel)) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
0x3);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
@@ -1616,6 +1643,8 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
u8 gain_a, gain_b;
s8 rx_power[RTW_RF_PATH_MAX];
s8 min_rx_power = -120;
+ u8 rssi;
+ int path;
rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
rx_power[RF_PATH_B] = GET_PHY_STAT_P0_PWDB_B(phy_status);
@@ -1638,6 +1667,11 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A];
pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B];
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ }
+
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
@@ -1647,8 +1681,13 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
+ s8 rx_evm;
+ u8 evm_dbm = 0;
+ u8 rssi;
+ int path;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -1669,6 +1708,34 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_evm[RF_PATH_B] = GET_PHY_STAT_P1_RXEVM_B(phy_status);
+
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_B] = GET_PHY_STAT_P1_RXSNR_B(phy_status);
+
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_B] = GET_PHY_STAT_P1_CFO_TAIL_B(phy_status);
+
+ for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
+ dm_info->rssi[path] = rssi;
+ dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
+ dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
+
+ rx_evm = pkt_stat->rx_evm[path];
+
+ if (rx_evm < 0) {
+ if (rx_evm == S8_MIN)
+ evm_dbm = 0;
+ else
+ evm_dbm = ((u8)-rx_evm >> 1);
+ }
+ dm_info->rx_evm_dbm[path] = evm_dbm;
+ }
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
@@ -1704,7 +1771,8 @@ static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
- pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
@@ -1822,6 +1890,7 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_enable;
u32 cck_fa_cnt;
u32 crc32_cnt;
+ u32 cca32_cnt;
u32 ofdm_fa_cnt;
u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5;
u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail,
@@ -1866,6 +1935,13 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ cca32_cnt = rtw_read32(rtwdev, 0x2c08);
+ dm_info->ofdm_cca_cnt = ((cca32_cnt & 0xffff0000) >> 16);
+ dm_info->cck_cca_cnt = cca32_cnt & 0xffff;
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable)
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
@@ -2053,6 +2129,57 @@ static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
+static void rtw8822c_bf_enable_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee)
+{
+ u8 csi_rsc = 0;
+ u32 tmp6dc;
+
+ rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+
+ tmp6dc = rtw_read32(rtwdev, REG_BBPSF_CTRL) |
+ BIT_WMAC_USE_NDPARATE |
+ (csi_rsc << 13);
+ if (vif->net_type == RTW_NET_AP_MODE)
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, tmp6dc | BIT(12));
+ else
+ rtw_write32(rtwdev, REG_BBPSF_CTRL, tmp6dc & ~BIT(12));
+
+ rtw_write32(rtwdev, REG_CSI_RRSR, 0x550);
+}
+
+static void rtw8822c_bf_config_bfee_su(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw8822c_bf_enable_bfee_su(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8822c_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+ struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (enable)
+ rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+ else
+ rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8822c_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (bfee->role == RTW_BFEE_SU)
+ rtw8822c_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+ else if (bfee->role == RTW_BFEE_MU)
+ rtw8822c_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+ else
+ rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
struct dpk_cfg_pair {
u32 addr;
u32 bitmask;
@@ -2603,9 +2730,9 @@ static bool rtw8822c_dpk_coef_iq_check(struct rtw_dev *rtwdev,
{
if (coef_i == 0x1000 || coef_i == 0x0fff ||
coef_q == 0x1000 || coef_q == 0x0fff)
- return 1;
- else
- return 0;
+ return true;
+
+ return false;
}
static u32 rtw8822c_dpk_coef_transfer(struct rtw_dev *rtwdev)
@@ -2843,7 +2970,7 @@ static void rtw8822c_dpk_cal_gs(struct rtw_dev *rtwdev, u8 path)
dpk_info->dpk_gs[path] = tmp_gs;
}
-void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
+static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
{
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
u32 offset[DPK_RF_PATH_NUM] = {0, 0x58};
@@ -3084,7 +3211,7 @@ static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev)
rtw8822c_do_dpk(rtwdev);
}
-void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
+static void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
{
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
u8 path;
@@ -3168,8 +3295,8 @@ rtw8822c_phy_cck_pd_set_reg(struct rtw_dev *rtwdev,
static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- s8 pd_lvl[4] = {2, 4, 6, 8};
- s8 cs_lvl[4] = {2, 2, 2, 4};
+ s8 pd_lvl[CCK_PD_LV_MAX] = {0, 2, 4, 6, 8};
+ s8 cs_lvl[CCK_PD_LV_MAX] = {0, 2, 2, 2, 4};
u8 cur_lvl;
u8 nrx, bw;
@@ -3191,6 +3318,87 @@ static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
dm_info->cck_pd_lv[bw][nrx] = new_lvl;
}
+#define PWR_TRACK_MASK 0x7f
+static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ rtw_write32_mask(rtwdev, 0x18a0, PWR_TRACK_MASK,
+ dm_info->delta_power_index[rf_path]);
+ break;
+ case RF_PATH_B:
+ rtw_write32_mask(rtwdev, 0x41a0, PWR_TRACK_MASK,
+ dm_info->delta_power_index[rf_path]);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 thermal_value, delta;
+
+ if (rtwdev->efuse.thermal_meter[path] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
+
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+
+ dm_info->delta_power_index[path] =
+ rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
+ delta);
+
+ rtw8822c_pwrtrack_set(rtwdev, path);
+}
+
+static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_swing_table swing_table;
+ u8 i;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
+
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8822c_do_iqk(rtwdev);
+}
+
+static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x00);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, BIT(19), 0x01);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x00);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_T_METER, BIT(19), 0x01);
+
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ __rtw8822c_pwr_track(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -3571,6 +3779,10 @@ static struct rtw_chip_ops rtw8822c_ops = {
.dpk_track = rtw8822c_dpk_track,
.phy_calibration = rtw8822c_phy_calibration,
.cck_pd_set = rtw8822c_phy_cck_pd_set,
+ .pwr_track = rtw8822c_pwr_track,
+ .config_bfee = rtw8822c_bf_config_bfee,
+ .set_gid_table = rtw_bf_set_gid_table,
+ .cfg_csi_rate = rtw_bf_cfg_csi_rate,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -3725,6 +3937,129 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
+static const u8
+rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+ { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 32 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25, 26, 27 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 10,
+ 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 33 },
+};
+
+static const u8
+rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
+};
+
+static const u8 rtw8822c_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 4, 5, 6, 7, 8,
+ 9, 9, 10, 11, 12, 13, 14, 15, 15, 16,
+ 17, 18, 19, 20, 20, 21, 22, 23, 24, 25
+};
+
+static const u8 rtw8822c_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28
+};
+
+static const u8 rtw8822c_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6,
+ 7, 8, 8, 9, 9, 10, 11, 11, 12, 13,
+ 13, 14, 15, 15, 16, 17, 17, 18, 19, 19
+};
+
+static const u8 rtw8822c_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 25, 26, 27
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 13, 14, 15, 16, 17,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7,
+ 8, 9, 9, 10, 11, 12, 12, 13, 14, 15,
+ 15, 16, 17, 18, 18, 19, 20, 21, 21, 22
+};
+
+static const u8 rtw8822c_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 3, 4, 5, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 13, 14, 15, 16, 17,
+ 18, 18, 19, 20, 21, 22, 23, 24, 24, 25
+};
+
+static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gb_n = rtw8822c_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8822c_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8822c_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8822c_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8822c_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8822c_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8822c_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
+};
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -3747,6 +4082,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK) | BIT(LPS_DEEP_MODE_PG),
.sys_func_en = 0xD8,
.pwr_on_seq = card_enable_flow_8822c,
.pwr_off_seq = card_disable_flow_8822c,
@@ -3765,6 +4101,10 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
.en_dis_dpd = true,
.dpd_ratemask = DIS_DPD_RATEALL,
+ .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+ .bfer_su_max_num = 2,
+ .bfer_mu_max_num = 1,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 438db74d8e7a..abd9f300bedd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -149,6 +149,18 @@ const struct rtw_table name ## _tbl = { \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index e2dd4c766077..d102a2c27757 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -1762,53 +1762,53 @@ static const u32 rtw8822c_bb[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
-static const u32 rtw8822c_bb_pg_type0[] = {
- 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054,
- 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
- 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
- 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
- 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
- 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
- 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
- 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
- 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
- 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
- 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
- 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
- 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054,
- 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
- 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
- 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
- 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
- 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
- 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
- 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
- 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
- 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
- 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
- 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044,
- 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
- 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
- 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
- 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
- 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
- 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
- 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
- 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
- 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
- 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
- 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
- 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
- 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
- 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
- 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
- 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
- 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
- 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
- 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
- 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
- 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
- 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044
+static const struct rtw_phy_pg_cfg_pair rtw8822c_bb_pg_type0[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
};
RTW_DECL_TABLE_BB_PG(rtw8822c_bb_pg_type0);
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 48b9ed49b79a..9b90339ab697 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -5,6 +5,7 @@
#include "main.h"
#include "rx.h"
#include "ps.h"
+#include "debug.h"
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb)
@@ -25,8 +26,6 @@ void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.rx_unicast += skb->len;
rtwvif->stats.rx_cnt++;
- if (rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
- rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
@@ -39,6 +38,60 @@ struct rtw_rx_addr_match_data {
u8 *bssid;
};
+static void rtw_rx_phy_stat(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_pkt_count *cur_pkt_cnt = &dm_info->cur_pkt_count;
+ u8 rate_ss, rate_ss_evm, evm_id;
+ u8 i, idx;
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ cur_pkt_cnt->num_bcn_pkt++;
+
+ switch (pkt_stat->rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ goto pkt_num;
+ case DESC_RATE6M...DESC_RATE54M:
+ rate_ss = 0;
+ rate_ss_evm = 1;
+ evm_id = RTW_EVM_OFDM;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS7:
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT1SS_MCS9:
+ rate_ss = 1;
+ rate_ss_evm = 1;
+ evm_id = RTW_EVM_1SS;
+ break;
+ case DESC_RATEMCS8...DESC_RATEMCS15:
+ case DESC_RATEVHT2SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ rate_ss = 2;
+ rate_ss_evm = 2;
+ evm_id = RTW_EVM_2SS_A;
+ break;
+ default:
+ rtw_warn(rtwdev, "unknown pkt rate = %d\n", pkt_stat->rate);
+ return;
+ }
+
+ for (i = 0; i < rate_ss_evm; i++) {
+ idx = evm_id + i;
+ ewma_evm_add(&dm_info->ewma_evm[idx],
+ dm_info->rx_evm_dbm[i]);
+ }
+
+ for (i = 0; i < rtwdev->hal.rf_path_num; i++) {
+ idx = RTW_SNR_OFDM_A + 4 * rate_ss + i;
+ ewma_snr_add(&dm_info->ewma_snr[idx],
+ dm_info->rx_snr[i]);
+ }
+pkt_num:
+ cur_pkt_cnt->num_qry_pkt[pkt_stat->rate]++;
+}
+
static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -50,14 +103,16 @@ static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
u8 *bssid = iter_data->bssid;
- if (ether_addr_equal(vif->bss_conf.bssid, bssid) &&
- (ether_addr_equal(vif->addr, hdr->addr1) ||
- ieee80211_is_beacon(hdr->frame_control)))
- sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
- vif->addr);
- else
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
return;
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ rtw_rx_phy_stat(rtwdev, pkt_stat, hdr);
+ sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
+ vif->addr);
if (!sta)
return;
@@ -105,35 +160,17 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
else if (pkt_stat->rate >= DESC_RATEMCS0)
rx_status->encoding = RX_ENC_HT;
- if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT1SS_MCS9) {
- rx_status->nss = 1;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT1SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT2SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT2SS_MCS9) {
- rx_status->nss = 2;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT2SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT3SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT3SS_MCS9) {
- rx_status->nss = 3;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT3SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEVHT4SS_MCS0 &&
- pkt_stat->rate <= DESC_RATEVHT4SS_MCS9) {
- rx_status->nss = 4;
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT4SS_MCS0;
- } else if (pkt_stat->rate >= DESC_RATEMCS0 &&
- pkt_stat->rate <= DESC_RATEMCS15) {
- rx_status->rate_idx = pkt_stat->rate - DESC_RATEMCS0;
- } else if (rx_status->band == NL80211_BAND_5GHZ &&
- pkt_stat->rate >= DESC_RATE6M &&
- pkt_stat->rate <= DESC_RATE54M) {
+ if (rx_status->band == NL80211_BAND_5GHZ &&
+ pkt_stat->rate >= DESC_RATE6M &&
+ pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE6M;
} else if (rx_status->band == NL80211_BAND_2GHZ &&
pkt_stat->rate >= DESC_RATE1M &&
pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE1M;
- } else {
- rx_status->rate_idx = 0;
+ } else if (pkt_stat->rate >= DESC_RATEMCS0) {
+ rtw_desc_to_mcsrate(pkt_stat->rate, &rx_status->rate_idx,
+ &rx_status->nss);
}
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index 383f3b2babc1..3342e3761281 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -5,6 +5,15 @@
#ifndef __RTW_RX_H_
#define __RTW_RX_H_
+enum rtw_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
#define GET_RX_DESC_PHYST(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(26))
#define GET_RX_DESC_ICV_ERR(rxdesc) \
@@ -21,6 +30,8 @@
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(19, 16))
#define GET_RX_DESC_SHIFT(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(25, 24))
+#define GET_RX_DESC_ENC_TYPE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(22, 20))
#define GET_RX_DESC_RX_RATE(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x03), GENMASK(6, 0))
#define GET_RX_DESC_MACID(rxdesc) \
diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c
index c594fc02804d..d0d7fbb10d58 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.c
+++ b/drivers/net/wireless/realtek/rtw88/sec.c
@@ -96,6 +96,27 @@ void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
}
+u8 rtw_sec_cam_pg_backup(struct rtw_dev *rtwdev, u8 *used_cam)
+{
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u8 offset = 0;
+ u8 count, n;
+
+ if (!used_cam)
+ return 0;
+
+ for (count = 0; count < MAX_PG_CAM_BACKUP_NUM; count++) {
+ n = find_next_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM, offset);
+ if (n == RTW_MAX_SEC_CAM_NUM)
+ break;
+
+ used_cam[count] = n;
+ offset = n + 1;
+ }
+
+ return count;
+}
+
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev)
{
struct rtw_sec_desc *sec = &rtwdev->sec;
diff --git a/drivers/net/wireless/realtek/rtw88/sec.h b/drivers/net/wireless/realtek/rtw88/sec.h
index 8c50a895c797..efcf45433999 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.h
+++ b/drivers/net/wireless/realtek/rtw88/sec.h
@@ -34,6 +34,7 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev,
void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
u8 hw_key_idx);
+u8 rtw_sec_cam_pg_backup(struct rtw_dev *rtwdev, u8 *used_cam);
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 8eaa9809ca44..24c39c60c99a 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -27,8 +27,6 @@ void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.tx_unicast += skb->len;
rtwvif->stats.tx_cnt++;
- if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD)
- rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
@@ -58,6 +56,7 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
+ SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
@@ -260,6 +259,9 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
ampdu_density = get_tx_ampdu_density(sta);
}
+ if (info->control.use_rts)
+ pkt_info->rts = true;
+
if (sta->vht_cap.vht_supported)
rate = get_highest_vht_tx_rate(rtwdev, sta);
else if (sta->ht_cap.ht_supported)
@@ -365,3 +367,132 @@ void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
pkt_info->qsel = TX_DESC_QSEL_MGMT;
pkt_info->ls = true;
}
+
+void rtw_tx(struct rtw_dev *rtwdev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_tx_pkt_info pkt_info = {0};
+
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
+ if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ goto out;
+
+ return;
+
+out:
+ ieee80211_free_txskb(rtwdev->hw, skb);
+}
+
+static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct ieee80211_tx_info *info;
+ struct rtw_sta_info *si;
+
+ if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) {
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ return;
+ }
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ if (!txq->sta)
+ return;
+
+ si = (struct rtw_sta_info *)txq->sta->drv_priv;
+ set_bit(txq->tid, si->tid_ba);
+
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
+}
+
+static bool rtw_txq_dequeue(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct ieee80211_tx_control control;
+ struct sk_buff *skb;
+
+ skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
+ if (!skb)
+ return false;
+
+ rtw_txq_check_agg(rtwdev, rtwtxq, skb);
+
+ control.sta = txq->sta;
+ rtw_tx(rtwdev, &control, skb);
+ rtwtxq->last_push = jiffies;
+
+ return true;
+}
+
+static void rtw_txq_push(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ unsigned long frames)
+{
+ int i;
+
+ rcu_read_lock();
+
+ for (i = 0; i < frames; i++)
+ if (!rtw_txq_dequeue(rtwdev, rtwtxq))
+ break;
+
+ rcu_read_unlock();
+}
+
+void rtw_tx_tasklet(unsigned long data)
+{
+ struct rtw_dev *rtwdev = (void *)data;
+ struct rtw_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->txq_lock);
+
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) {
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ rtw_txq_push(rtwdev, rtwtxq, frame_cnt);
+
+ list_del_init(&rtwtxq->list);
+ }
+
+ spin_unlock_bh(&rtwdev->txq_lock);
+}
+
+void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
+{
+ struct rtw_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ INIT_LIST_HEAD(&rtwtxq->list);
+}
+
+void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
+{
+ struct rtw_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw_txq *)txq->drv_priv;
+ spin_lock_bh(&rtwdev->txq_lock);
+ if (!list_empty(&rtwtxq->list))
+ list_del_init(&rtwtxq->list);
+ spin_unlock_bh(&rtwdev->txq_lock);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 8338dbf55576..9ca4f74a501b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -35,6 +35,8 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
+#define SET_TX_DESC_USE_RTS(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(12))
#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
#define SET_TX_DESC_DATA_STBC(txdesc, value) \
@@ -75,6 +77,12 @@ enum rtw_tx_desc_queue_select {
TX_DESC_QSEL_H2C = 19,
};
+void rtw_tx(struct rtw_dev *rtwdev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
+void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
+void rtw_tx_tasklet(unsigned long data);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 212070c2baa8..10f1117c0cfb 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -70,3 +70,30 @@ void rtw_restore_reg(struct rtw_dev *rtwdev,
}
}
}
+
+void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE54M)
+ return;
+
+ if (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT1SS_MCS9) {
+ *nss = 1;
+ *mcs = rate - DESC_RATEVHT1SS_MCS0;
+ } else if (rate >= DESC_RATEVHT2SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9) {
+ *nss = 2;
+ *mcs = rate - DESC_RATEVHT2SS_MCS0;
+ } else if (rate >= DESC_RATEVHT3SS_MCS0 &&
+ rate <= DESC_RATEVHT3SS_MCS9) {
+ *nss = 3;
+ *mcs = rate - DESC_RATEVHT3SS_MCS0;
+ } else if (rate >= DESC_RATEVHT4SS_MCS0 &&
+ rate <= DESC_RATEVHT4SS_MCS9) {
+ *nss = 4;
+ *mcs = rate - DESC_RATEVHT4SS_MCS0;
+ } else if (rate >= DESC_RATEMCS0 &&
+ rate <= DESC_RATEMCS15) {
+ *mcs = rate - DESC_RATEMCS0;
+ }
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index ce5e92d82efc..440088293aff 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1140,8 +1140,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
else if ((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO))
rsta->seq_start[tid] = seq_no;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- status = 0;
+ status = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 6c7f26ef6476..9cc8a335d519 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1756,6 +1756,7 @@ static int rsi_send_beacon(struct rsi_common *common)
skb_pull(skb, (64 - dword_align_bytes));
if (rsi_prepare_beacon(common, skb)) {
rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n");
+ dev_kfree_skb(skb);
return -EINVAL;
}
skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 760eaffeebd6..53f41fc2cadf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -785,10 +785,10 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
- if (id && id->idProduct == RSI_USB_PID_9113) {
+ if (id->idProduct == RSI_USB_PID_9113) {
rsi_dbg(INIT_ZONE, "%s: 9113 module detected\n", __func__);
adapter->device_model = RSI_DEV_9113;
- } else if (id && id->idProduct == RSI_USB_PID_9116) {
+ } else if (id->idProduct == RSI_USB_PID_9116) {
rsi_dbg(INIT_ZONE, "%s: 9116 module detected\n", __func__);
adapter->device_model = RSI_DEV_9116;
} else {
diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c
index 6574e78e05ea..2a03dc533b6a 100644
--- a/drivers/net/wireless/st/cw1200/fwio.c
+++ b/drivers/net/wireless/st/cw1200/fwio.c
@@ -320,12 +320,12 @@ int cw1200_load_firmware(struct cw1200_common *priv)
goto out;
}
- priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
- if (priv->hw_type < 0) {
+ ret = cw1200_get_hw_type(val32, &major_revision);
+ if (ret < 0) {
pr_err("Can't deduce hardware type.\n");
- ret = -ENOTSUPP;
goto out;
}
+ priv->hw_type = ret;
/* Set DPLL Reg value, and read back to confirm writes work */
ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 14133eedb3b6..12952b1c29df 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -79,10 +79,9 @@ static void cw1200_queue_register_post_gc(struct list_head *gc_list,
struct cw1200_queue_item *item)
{
struct cw1200_queue_item *gc_item;
- gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+ gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
GFP_ATOMIC);
BUG_ON(!gc_item);
- memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
list_add_tail(&gc_item->head, gc_list);
}
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index c46b044b7f7b..988581cc134b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -120,8 +120,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- if (frame.skb)
- dev_kfree_skb(frame.skb);
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 677f1146ccf0..94569cd695c8 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -16,17 +16,12 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include "wl1251.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x104c
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1251
-#define SDIO_DEVICE_ID_TI_WL1251 0x9066
-#endif
-
struct wl1251_sdio {
struct sdio_func *func;
u32 elp_val;
@@ -49,7 +44,7 @@ static void wl1251_sdio_interrupt(struct sdio_func *func)
}
static const struct sdio_device_id wl1251_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
@@ -217,6 +212,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
const struct wl1251_platform_data *wl1251_board_data;
+ struct device_node *np = func->dev.of_node;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -248,6 +244,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
+ } else if (np) {
+ wl->use_eeprom = of_property_read_bool(np,
+ "ti,wl1251-has-eeprom");
+ wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->irq = of_irq_get(np, 0);
+
+ if (wl->power_gpio == -EPROBE_DEFER ||
+ wl->irq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto disable;
+ }
}
if (gpio_is_valid(wl->power_gpio)) {
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
index e409042ee9a0..9c4511604b67 100644
--- a/drivers/net/wireless/ti/wl12xx/Kconfig
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config WL12XX
- tristate "TI wl12xx support"
+ tristate "TI wl12xx support"
depends on MAC80211
- select WLCORE
- ---help---
+ select WLCORE
+ ---help---
This module adds support for wireless adapters based on TI wl1271,
wl1273, wl1281 and wl1283 chipsets. This module does *not* include
support for wl1251. For wl1251 support, use the separate homonymous
- driver instead.
+ driver instead.
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 547ad538d8b6..e994995d79ab 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -544,11 +544,6 @@ static int wlcore_irq_locked(struct wl1271 *wl)
}
while (!done && loopcount--) {
- /*
- * In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip.
- */
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status);
@@ -668,7 +663,7 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ goto out_handled;
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -692,6 +687,11 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
mutex_unlock(&wl->mutex);
+out_handled:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
return IRQ_HANDLED;
}
@@ -1434,7 +1434,7 @@ int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
field = &filter->fields[filter->num_fields];
- field->pattern = kzalloc(len, GFP_KERNEL);
+ field->pattern = kmemdup(pattern, len, GFP_KERNEL);
if (!field->pattern) {
wl1271_warning("Failed to allocate RX filter pattern");
return -ENOMEM;
@@ -1445,7 +1445,6 @@ int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
field->offset = cpu_to_le16(offset);
field->flags = flags;
field->len = len;
- memcpy(field->pattern, pattern, len);
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 7afaf35f2453..9fd8cf2d270c 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -26,14 +26,6 @@
#include "wl12xx_80211.h"
#include "io.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
static bool dump = false;
struct wl12xx_sdio_glue {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index d4c09e54fd63..18c4d998ce4b 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -186,7 +186,7 @@ static void wl12xx_spi_init(struct device *child)
spi_sync(to_spi_device(glue->dev), &m);
- /* Restore chip select configration to normal */
+ /* Restore chip select configuration to normal */
spi->mode ^= SPI_CS_HIGH;
kfree(cmd);
}
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 7997cc6de334..01305ba2d3aa 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -450,7 +450,6 @@ static void virt_wifi_net_device_destructor(struct net_device *dev)
*/
kfree(dev->ieee80211_ptr);
dev->ieee80211_ptr = NULL;
- free_netdev(dev);
}
/* No lock interaction. */
@@ -458,7 +457,7 @@ static void virt_wifi_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &virt_wifi_ops;
- dev->priv_destructor = virt_wifi_net_device_destructor;
+ dev->needs_free_netdev = true;
}
/* Called in a RCU read critical section from netif_receive_skb */
@@ -544,6 +543,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
goto unregister_netdev;
}
+ dev->priv_destructor = virt_wifi_net_device_destructor;
priv->being_deleted = false;
priv->is_connected = false;
priv->is_up = false;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 103ed00775eb..68dd7bb07ca6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -626,6 +626,38 @@ err:
return err;
}
+static void xenvif_disconnect_queue(struct xenvif_queue *queue)
+{
+ if (queue->tx_irq) {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ if (queue->tx_irq == queue->rx_irq)
+ queue->rx_irq = 0;
+ queue->tx_irq = 0;
+ }
+
+ if (queue->rx_irq) {
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ queue->rx_irq = 0;
+ }
+
+ if (queue->task) {
+ kthread_stop(queue->task);
+ queue->task = NULL;
+ }
+
+ if (queue->dealloc_task) {
+ kthread_stop(queue->dealloc_task);
+ queue->dealloc_task = NULL;
+ }
+
+ if (queue->napi.poll) {
+ netif_napi_del(&queue->napi);
+ queue->napi.poll = NULL;
+ }
+
+ xenvif_unmap_frontend_data_rings(queue);
+}
+
int xenvif_connect_data(struct xenvif_queue *queue,
unsigned long tx_ring_ref,
unsigned long rx_ring_ref,
@@ -651,13 +683,27 @@ int xenvif_connect_data(struct xenvif_queue *queue,
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
+ queue->stalled = true;
+
+ task = kthread_run(xenvif_kthread_guest_rx, queue,
+ "%s-guest-rx", queue->name);
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->task = task;
+
+ task = kthread_run(xenvif_dealloc_kthread, queue,
+ "%s-dealloc", queue->name);
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->dealloc_task = task;
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
- goto err_unmap;
+ goto err;
queue->tx_irq = queue->rx_irq = err;
disable_irq(queue->tx_irq);
} else {
@@ -668,7 +714,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
- goto err_unmap;
+ goto err;
queue->tx_irq = err;
disable_irq(queue->tx_irq);
@@ -678,47 +724,18 @@ int xenvif_connect_data(struct xenvif_queue *queue,
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
- goto err_tx_unbind;
+ goto err;
queue->rx_irq = err;
disable_irq(queue->rx_irq);
}
- queue->stalled = true;
-
- task = kthread_create(xenvif_kthread_guest_rx,
- (void *)queue, "%s-guest-rx", queue->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", queue->name);
- err = PTR_ERR(task);
- goto err_rx_unbind;
- }
- queue->task = task;
- get_task_struct(task);
-
- task = kthread_create(xenvif_dealloc_kthread,
- (void *)queue, "%s-dealloc", queue->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", queue->name);
- err = PTR_ERR(task);
- goto err_rx_unbind;
- }
- queue->dealloc_task = task;
-
- wake_up_process(queue->task);
- wake_up_process(queue->dealloc_task);
-
return 0;
-err_rx_unbind:
- unbind_from_irqhandler(queue->rx_irq, queue);
- queue->rx_irq = 0;
-err_tx_unbind:
- unbind_from_irqhandler(queue->tx_irq, queue);
- queue->tx_irq = 0;
-err_unmap:
- xenvif_unmap_frontend_data_rings(queue);
- netif_napi_del(&queue->napi);
+kthread_err:
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
+ err = PTR_ERR(task);
err:
+ xenvif_disconnect_queue(queue);
return err;
}
@@ -746,30 +763,7 @@ void xenvif_disconnect_data(struct xenvif *vif)
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
- netif_napi_del(&queue->napi);
-
- if (queue->task) {
- kthread_stop(queue->task);
- put_task_struct(queue->task);
- queue->task = NULL;
- }
-
- if (queue->dealloc_task) {
- kthread_stop(queue->dealloc_task);
- queue->dealloc_task = NULL;
- }
-
- if (queue->tx_irq) {
- if (queue->tx_irq == queue->rx_irq)
- unbind_from_irqhandler(queue->tx_irq, queue);
- else {
- unbind_from_irqhandler(queue->tx_irq, queue);
- unbind_from_irqhandler(queue->rx_irq, queue);
- }
- queue->tx_irq = 0;
- }
-
- xenvif_unmap_frontend_data_rings(queue);
+ xenvif_disconnect_queue(queue);
}
xenvif_mcast_addr_list_free(vif);
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
index 06f34fb4e0b0..ded0d03c0015 100644
--- a/drivers/nfc/nfcmrvl/Kconfig
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -15,7 +15,7 @@ config NFC_MRVL_USB
Marvell NFC-over-USB driver.
This driver provides support for Marvell NFC-over-USB devices:
- 8897.
+ 8897.
Say Y here to compile support for Marvell NFC-over-USB driver
into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 0f22379887ca..18cd96284b77 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -278,7 +278,6 @@ static struct i2c_driver nfcmrvl_i2c_driver = {
.remove = nfcmrvl_i2c_remove,
.driver = {
.name = "nfcmrvl_i2c",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_nfcmrvl_i2c_match),
},
};
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 307bd2afbe05..4d1909aecd6c 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
if (r == -EREMOTEIO) {
phy->hard_fault = r;
- skb = NULL;
- } else if (r < 0) {
+ if (info->mode == NXP_NCI_MODE_FW)
+ nxp_nci_fw_recv_frame(phy->ndev, NULL);
+ }
+ if (r < 0) {
nfc_err(&client->dev, "Read failed with error %d\n", r);
goto exit_irq_handled;
}
diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig
index f6d6b345ba0d..7fe1bbe26568 100644
--- a/drivers/nfc/pn533/Kconfig
+++ b/drivers/nfc/pn533/Kconfig
@@ -26,3 +26,14 @@ config NFC_PN533_I2C
If you choose to build a module, it'll be called pn533_i2c.
Say N if unsure.
+
+config NFC_PN532_UART
+ tristate "NFC PN532 device support (UART)"
+ depends on SERIAL_DEV_BUS
+ select NFC_PN533
+ ---help---
+ This module adds support for the NXP pn532 UART interface.
+ Select this if your platform is using the UART bus.
+
+ If you choose to build a module, it'll be called pn532_uart.
+ Say N if unsure.
diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile
index 43c25b4f9466..b9648337576f 100644
--- a/drivers/nfc/pn533/Makefile
+++ b/drivers/nfc/pn533/Makefile
@@ -4,7 +4,9 @@
#
pn533_usb-objs = usb.o
pn533_i2c-objs = i2c.o
+pn532_uart-objs = uart.o
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o
obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o
+obj-$(CONFIG_NFC_PN532_UART) += pn532_uart.o
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 1832cd921ea7..7507176cca0a 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -193,12 +193,10 @@ static int pn533_i2c_probe(struct i2c_client *client,
phy->i2c_dev = client;
i2c_set_clientdata(client, phy);
- priv = pn533_register_device(PN533_DEVICE_PN532,
- PN533_NO_TYPE_B_PROTOCOLS,
- PN533_PROTO_REQ_ACK_RESP,
- phy, &i2c_phy_ops, NULL,
- &phy->i2c_dev->dev,
- &client->dev);
+ priv = pn53x_common_init(PN533_DEVICE_PN532,
+ PN533_PROTO_REQ_ACK_RESP,
+ phy, &i2c_phy_ops, NULL,
+ &phy->i2c_dev->dev);
if (IS_ERR(priv)) {
r = PTR_ERR(priv);
@@ -206,6 +204,9 @@ static int pn533_i2c_probe(struct i2c_client *client,
}
phy->priv = priv;
+ r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev);
+ if (r)
+ goto nfc_alloc_err;
r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn,
IRQF_TRIGGER_FALLING |
@@ -220,13 +221,20 @@ static int pn533_i2c_probe(struct i2c_client *client,
if (r)
goto fn_setup_err;
- return 0;
+ r = nfc_register_device(priv->nfc_dev);
+ if (r)
+ goto fn_setup_err;
+
+ return r;
fn_setup_err:
free_irq(client->irq, phy);
irq_rqst_err:
- pn533_unregister_device(phy->priv);
+ nfc_free_device(priv->nfc_dev);
+
+nfc_alloc_err:
+ pn53x_common_clean(phy->priv);
return r;
}
@@ -239,12 +247,18 @@ static int pn533_i2c_remove(struct i2c_client *client)
free_irq(client->irq, phy);
- pn533_unregister_device(phy->priv);
+ pn53x_unregister_nfc(phy->priv);
+ pn53x_common_clean(phy->priv);
return 0;
}
static const struct of_device_id of_pn533_i2c_match[] = {
+ { .compatible = "nxp,pn532", },
+ /*
+ * NOTE: The use of the compatibles with the trailing "...-i2c" is
+ * deprecated and will be removed.
+ */
{ .compatible = "nxp,pn533-i2c", },
{ .compatible = "nxp,pn532-i2c", },
{},
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a172a32aa9d9..346e084387f7 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -185,6 +185,32 @@ struct pn533_cmd_jump_dep_response {
u8 gt[];
} __packed;
+struct pn532_autopoll_resp {
+ u8 type;
+ u8 ln;
+ u8 tg;
+ u8 tgdata[];
+};
+
+/* PN532_CMD_IN_AUTOPOLL */
+#define PN532_AUTOPOLL_POLLNR_INFINITE 0xff
+#define PN532_AUTOPOLL_PERIOD 0x03 /* in units of 150 ms */
+
+#define PN532_AUTOPOLL_TYPE_GENERIC_106 0x00
+#define PN532_AUTOPOLL_TYPE_GENERIC_212 0x01
+#define PN532_AUTOPOLL_TYPE_GENERIC_424 0x02
+#define PN532_AUTOPOLL_TYPE_JEWEL 0x04
+#define PN532_AUTOPOLL_TYPE_MIFARE 0x10
+#define PN532_AUTOPOLL_TYPE_FELICA212 0x11
+#define PN532_AUTOPOLL_TYPE_FELICA424 0x12
+#define PN532_AUTOPOLL_TYPE_ISOA 0x20
+#define PN532_AUTOPOLL_TYPE_ISOB 0x23
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106 0x40
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212 0x41
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424 0x42
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106 0x80
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_212 0x81
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_424 0x82
/* PN533_TG_INIT_AS_TARGET */
#define PN533_INIT_TARGET_PASSIVE 0x1
@@ -1389,6 +1415,101 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
return rc;
}
+static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct pn532_autopoll_resp *apr;
+ struct nfc_target nfc_tgt;
+ u8 nbtg;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+
+ nfc_err(dev->dev, "%s autopoll complete error %d\n",
+ __func__, rc);
+
+ if (rc == -ENOENT) {
+ if (dev->poll_mod_count != 0)
+ return rc;
+ goto stop_poll;
+ } else if (rc < 0) {
+ nfc_err(dev->dev,
+ "Error %d when running autopoll\n", rc);
+ goto stop_poll;
+ }
+ }
+
+ nbtg = resp->data[0];
+ if ((nbtg > 2) || (nbtg <= 0))
+ return -EAGAIN;
+
+ apr = (struct pn532_autopoll_resp *)&resp->data[1];
+ while (nbtg--) {
+ memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+ switch (apr->type) {
+ case PN532_AUTOPOLL_TYPE_ISOA:
+ dev_dbg(dev->dev, "ISOA\n");
+ rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_FELICA212:
+ case PN532_AUTOPOLL_TYPE_FELICA424:
+ dev_dbg(dev->dev, "FELICA\n");
+ rc = pn533_target_found_felica(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_JEWEL:
+ dev_dbg(dev->dev, "JEWEL\n");
+ rc = pn533_target_found_jewel(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_ISOB:
+ dev_dbg(dev->dev, "ISOB\n");
+ rc = pn533_target_found_type_b(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_MIFARE:
+ dev_dbg(dev->dev, "Mifare\n");
+ rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ default:
+ nfc_err(dev->dev,
+ "Unknown current poll modulation\n");
+ rc = -EPROTO;
+ }
+
+ if (rc)
+ goto done;
+
+ if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
+ nfc_err(dev->dev,
+ "The Tg found doesn't have the desired protocol\n");
+ rc = -EAGAIN;
+ goto done;
+ }
+
+ dev->tgt_available_prots = nfc_tgt.supported_protocols;
+ apr = (struct pn532_autopoll_resp *)
+ (apr->tgdata + (apr->ln - 1));
+ }
+
+ pn533_poll_reset_mod_list(dev);
+ nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+done:
+ dev_kfree_skb(resp);
+ return rc;
+
+stop_poll:
+ nfc_err(dev->dev, "autopoll operation has been stopped\n");
+
+ pn533_poll_reset_mod_list(dev);
+ dev->poll_protocols = 0;
+ return rc;
+}
+
static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
@@ -1532,6 +1653,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_poll_modulations *cur_mod;
+ struct sk_buff *skb;
u8 rand_mod;
int rc;
@@ -1557,9 +1679,73 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
tm_protocols = 0;
}
- pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
dev->poll_protocols = im_protocols;
dev->listen_protocols = tm_protocols;
+ if (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL) {
+ skb = pn533_alloc_skb(dev, 4 + 6);
+ if (!skb)
+ return -ENOMEM;
+
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_POLLNR_INFINITE;
+ *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_PERIOD;
+
+ if ((im_protocols & NFC_PROTO_MIFARE_MASK) &&
+ (im_protocols & NFC_PROTO_ISO14443_MASK) &&
+ (im_protocols & NFC_PROTO_NFC_DEP_MASK))
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_GENERIC_106;
+ else {
+ if (im_protocols & NFC_PROTO_MIFARE_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_MIFARE;
+
+ if (im_protocols & NFC_PROTO_ISO14443_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_ISOA;
+
+ if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424;
+ }
+ }
+
+ if (im_protocols & NFC_PROTO_FELICA_MASK ||
+ im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_FELICA212;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_FELICA424;
+ }
+
+ if (im_protocols & NFC_PROTO_JEWEL_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_JEWEL;
+
+ if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_ISOB;
+
+ if (tm_protocols)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106;
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_AUTOPOLL, skb,
+ pn533_autopoll_complete, NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+ else
+ dev->poll_mod_count++;
+
+ return rc;
+ }
+
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
/* Do not always start polling from the same modulation */
get_random_bytes(&rand_mod, sizeof(rand_mod));
@@ -2457,9 +2643,17 @@ static int pn532_sam_configuration(struct nfc_dev *nfc_dev)
static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ if (dev->phy_ops->dev_up) {
+ rc = dev->phy_ops->dev_up(dev);
+ if (rc)
+ return rc;
+ }
- if (dev->device_type == PN533_DEVICE_PN532) {
- int rc = pn532_sam_configuration(nfc_dev);
+ if ((dev->device_type == PN533_DEVICE_PN532) ||
+ (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) {
+ rc = pn532_sam_configuration(nfc_dev);
if (rc)
return rc;
@@ -2470,7 +2664,14 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
static int pn533_dev_down(struct nfc_dev *nfc_dev)
{
- return pn533_rf_field(nfc_dev, 0);
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int ret;
+
+ ret = pn533_rf_field(nfc_dev, 0);
+ if (dev->phy_ops->dev_down && !ret)
+ ret = dev->phy_ops->dev_down(dev);
+
+ return ret;
}
static struct nfc_ops pn533_nfc_ops = {
@@ -2498,6 +2699,7 @@ static int pn533_setup(struct pn533 *dev)
case PN533_DEVICE_PASORI:
case PN533_DEVICE_ACR122U:
case PN533_DEVICE_PN532:
+ case PN533_DEVICE_PN532_AUTOPOLL:
max_retries.mx_rty_atr = 0x2;
max_retries.mx_rty_psl = 0x1;
max_retries.mx_rty_passive_act =
@@ -2534,6 +2736,7 @@ static int pn533_setup(struct pn533 *dev)
switch (dev->device_type) {
case PN533_DEVICE_STD:
case PN533_DEVICE_PN532:
+ case PN533_DEVICE_PN532_AUTOPOLL:
break;
case PN533_DEVICE_PASORI:
@@ -2580,14 +2783,12 @@ int pn533_finalize_setup(struct pn533 *dev)
}
EXPORT_SYMBOL_GPL(pn533_finalize_setup);
-struct pn533 *pn533_register_device(u32 device_type,
- u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
- struct device *dev,
- struct device *parent)
+ struct device *dev)
{
struct pn533 *priv;
int rc = -ENOMEM;
@@ -2628,43 +2829,18 @@ struct pn533 *pn533_register_device(u32 device_type,
skb_queue_head_init(&priv->fragment_skb);
INIT_LIST_HEAD(&priv->cmd_queue);
-
- priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
- priv->ops->tx_header_len +
- PN533_CMD_DATAEXCH_HEAD_LEN,
- priv->ops->tx_tail_len);
- if (!priv->nfc_dev) {
- rc = -ENOMEM;
- goto destroy_wq;
- }
-
- nfc_set_parent_dev(priv->nfc_dev, parent);
- nfc_set_drvdata(priv->nfc_dev, priv);
-
- rc = nfc_register_device(priv->nfc_dev);
- if (rc)
- goto free_nfc_dev;
-
return priv;
-free_nfc_dev:
- nfc_free_device(priv->nfc_dev);
-
-destroy_wq:
- destroy_workqueue(priv->wq);
error:
kfree(priv);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(pn533_register_device);
+EXPORT_SYMBOL_GPL(pn53x_common_init);
-void pn533_unregister_device(struct pn533 *priv)
+void pn53x_common_clean(struct pn533 *priv)
{
struct pn533_cmd *cmd, *n;
- nfc_unregister_device(priv->nfc_dev);
- nfc_free_device(priv->nfc_dev);
-
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
@@ -2679,8 +2855,47 @@ void pn533_unregister_device(struct pn533 *priv)
kfree(priv);
}
-EXPORT_SYMBOL_GPL(pn533_unregister_device);
+EXPORT_SYMBOL_GPL(pn53x_common_clean);
+
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+ struct device *parent)
+{
+ priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+ priv->ops->tx_header_len +
+ PN533_CMD_DATAEXCH_HEAD_LEN,
+ priv->ops->tx_tail_len);
+ if (!priv->nfc_dev)
+ return -ENOMEM;
+
+ nfc_set_parent_dev(priv->nfc_dev, parent);
+ nfc_set_drvdata(priv->nfc_dev, priv);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pn532_i2c_nfc_alloc);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+ struct device *parent)
+{
+ int rc;
+
+ rc = pn532_i2c_nfc_alloc(priv, protocols, parent);
+ if (rc)
+ return rc;
+
+ rc = nfc_register_device(priv->nfc_dev);
+ if (rc)
+ nfc_free_device(priv->nfc_dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pn53x_register_nfc);
+
+void pn53x_unregister_nfc(struct pn533 *priv)
+{
+ nfc_unregister_device(priv->nfc_dev);
+ nfc_free_device(priv->nfc_dev);
+}
+EXPORT_SYMBOL_GPL(pn53x_unregister_nfc);
MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index 8bf9d6ece0f5..5f94f38a2a08 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -6,10 +6,11 @@
* Copyright (C) 2012-2013 Tieto Poland
*/
-#define PN533_DEVICE_STD 0x1
-#define PN533_DEVICE_PASORI 0x2
-#define PN533_DEVICE_ACR122U 0x3
-#define PN533_DEVICE_PN532 0x4
+#define PN533_DEVICE_STD 0x1
+#define PN533_DEVICE_PASORI 0x2
+#define PN533_DEVICE_ACR122U 0x3
+#define PN533_DEVICE_PN532 0x4
+#define PN533_DEVICE_PN532_AUTOPOLL 0x5
#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
@@ -43,6 +44,11 @@
/* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */
#define PN533_STD_FRAME_ACK_SIZE 6
+/*
+ * Preamble (1), SoPC (2), Packet Length (1), Packet Length Checksum (1),
+ * Specific Application Level Error Code (1) , Postamble (1)
+ */
+#define PN533_STD_ERROR_FRAME_SIZE 8
#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
/* Half start code (3), LEN (4) should be 0xffff for extended frame */
@@ -70,6 +76,7 @@
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+#define PN533_CMD_IN_AUTOPOLL 0x60
#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
#define PN533_CMD_TG_GET_DATA 0x86
@@ -84,6 +91,9 @@
#define PN533_CMD_MI_MASK 0x40
#define PN533_CMD_RET_SUCCESS 0x00
+#define PN533_FRAME_DATALEN_ACK 0x00
+#define PN533_FRAME_DATALEN_ERROR 0x01
+#define PN533_FRAME_DATALEN_EXTENDED 0xFF
enum pn533_protocol_type {
PN533_PROTO_REQ_ACK_RESP = 0,
@@ -207,21 +217,33 @@ struct pn533_phy_ops {
struct sk_buff *out);
int (*send_ack)(struct pn533 *dev, gfp_t flags);
void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
+ /*
+ * dev_up and dev_down are optional.
+ * They are used to inform the phy layer that the nfc chip
+ * is going to be really used very soon. The phy layer can then
+ * bring up it's interface to the chip and have it suspended for power
+ * saving reasons otherwise.
+ */
+ int (*dev_up)(struct pn533 *priv);
+ int (*dev_down)(struct pn533 *priv);
};
-struct pn533 *pn533_register_device(u32 device_type,
- u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
- struct device *dev,
- struct device *parent);
+ struct device *dev);
int pn533_finalize_setup(struct pn533 *dev);
-void pn533_unregister_device(struct pn533 *priv);
+void pn53x_common_clean(struct pn533 *priv);
void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+ struct device *parent);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+ struct device *parent);
+void pn53x_unregister_nfc(struct pn533 *priv);
bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame);
bool pn533_rx_frame_is_ack(void *_frame);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
new file mode 100644
index 000000000000..a0665d8ea85b
--- /dev/null
+++ b/drivers/nfc/pn533/uart.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for NXP PN532 NFC Chip - UART transport layer
+ *
+ * Copyright (C) 2018 Lemonage Software GmbH
+ * Author: Lars Pöschel <poeschel@lemonage.de>
+ * All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include "pn533.h"
+
+#define PN532_UART_SKB_BUFF_LEN (PN533_CMD_DATAEXCH_DATA_MAXLEN * 2)
+
+enum send_wakeup {
+ PN532_SEND_NO_WAKEUP = 0,
+ PN532_SEND_WAKEUP,
+ PN532_SEND_LAST_WAKEUP,
+};
+
+
+struct pn532_uart_phy {
+ struct serdev_device *serdev;
+ struct sk_buff *recv_skb;
+ struct pn533 *priv;
+ /*
+ * send_wakeup variable is used to control if we need to send a wakeup
+ * request to the pn532 chip prior to our actual command. There is a
+ * little propability of a race condition. We decided to not mutex the
+ * variable as the worst that could happen is, that we send a wakeup
+ * to the chip that is already awake. This does not hurt. It is a
+ * no-op to the chip.
+ */
+ enum send_wakeup send_wakeup;
+ struct timer_list cmd_timeout;
+ struct sk_buff *cur_out_buf;
+};
+
+static int pn532_uart_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+{
+ /* wakeup sequence and dummy bytes for waiting time */
+ static const u8 wakeup[] = {
+ 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int err;
+
+ print_hex_dump_debug("PN532_uart TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ pn532->cur_out_buf = out;
+ if (pn532->send_wakeup) {
+ err = serdev_device_write(pn532->serdev,
+ wakeup, sizeof(wakeup),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+ }
+
+ if (pn532->send_wakeup == PN532_SEND_LAST_WAKEUP)
+ pn532->send_wakeup = PN532_SEND_NO_WAKEUP;
+
+ err = serdev_device_write(pn532->serdev, out->data, out->len,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ mod_timer(&pn532->cmd_timeout, HZ / 40 + jiffies);
+ return 0;
+}
+
+static int pn532_uart_send_ack(struct pn533 *dev, gfp_t flags)
+{
+ /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
+ static const u8 ack[PN533_STD_FRAME_ACK_SIZE] = {
+ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int err;
+
+ err = serdev_device_write(pn532->serdev, ack, sizeof(ack),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void pn532_uart_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+ /* An ack will cancel the last issued command */
+ pn532_uart_send_ack(dev, flags);
+ /* schedule cmd_complete_work to finish current command execution */
+ pn533_recv_frame(dev, NULL, -ENOENT);
+}
+
+static int pn532_dev_up(struct pn533 *dev)
+{
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int ret = 0;
+
+ ret = serdev_device_open(pn532->serdev);
+ if (ret)
+ return ret;
+
+ pn532->send_wakeup = PN532_SEND_LAST_WAKEUP;
+ return ret;
+}
+
+static int pn532_dev_down(struct pn533 *dev)
+{
+ struct pn532_uart_phy *pn532 = dev->phy;
+
+ serdev_device_close(pn532->serdev);
+ pn532->send_wakeup = PN532_SEND_WAKEUP;
+
+ return 0;
+}
+
+static struct pn533_phy_ops uart_phy_ops = {
+ .send_frame = pn532_uart_send_frame,
+ .send_ack = pn532_uart_send_ack,
+ .abort_cmd = pn532_uart_abort_cmd,
+ .dev_up = pn532_dev_up,
+ .dev_down = pn532_dev_down,
+};
+
+static void pn532_cmd_timeout(struct timer_list *t)
+{
+ struct pn532_uart_phy *dev = from_timer(dev, t, cmd_timeout);
+
+ pn532_uart_send_frame(dev->priv, dev->cur_out_buf);
+}
+
+/*
+ * scans the buffer if it contains a pn532 frame. It is not checked if the
+ * frame is really valid. This is later done with pn533_rx_frame_is_valid.
+ * This is useful for malformed or errornous transmitted frames. Adjusts the
+ * bufferposition where the frame starts, since pn533_recv_frame expects a
+ * well formed frame.
+ */
+static int pn532_uart_rx_is_frame(struct sk_buff *skb)
+{
+ struct pn533_std_frame *std;
+ struct pn533_ext_frame *ext;
+ u16 frame_len;
+ int i;
+
+ for (i = 0; i + PN533_STD_FRAME_ACK_SIZE <= skb->len; i++) {
+ std = (struct pn533_std_frame *)&skb->data[i];
+ /* search start code */
+ if (std->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+ continue;
+
+ /* frame type */
+ switch (std->datalen) {
+ case PN533_FRAME_DATALEN_ACK:
+ if (std->datalen_checksum == 0xff) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ case PN533_FRAME_DATALEN_ERROR:
+ if ((std->datalen_checksum == 0xff) &&
+ (skb->len >=
+ PN533_STD_ERROR_FRAME_SIZE)) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ case PN533_FRAME_DATALEN_EXTENDED:
+ ext = (struct pn533_ext_frame *)&skb->data[i];
+ frame_len = be16_to_cpu(ext->datalen);
+ if (skb->len >= frame_len +
+ sizeof(struct pn533_ext_frame) +
+ 2 /* CKS + Postamble */) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ default: /* normal information frame */
+ frame_len = std->datalen;
+ if (skb->len >= frame_len +
+ sizeof(struct pn533_std_frame) +
+ 2 /* CKS + Postamble */) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int pn532_receive_buf(struct serdev_device *serdev,
+ const unsigned char *data, size_t count)
+{
+ struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
+ size_t i;
+
+ del_timer(&dev->cmd_timeout);
+ for (i = 0; i < count; i++) {
+ skb_put_u8(dev->recv_skb, *data++);
+ if (!pn532_uart_rx_is_frame(dev->recv_skb))
+ continue;
+
+ pn533_recv_frame(dev->priv, dev->recv_skb, 0);
+ dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!dev->recv_skb)
+ return 0;
+ }
+
+ return i;
+}
+
+static struct serdev_device_ops pn532_serdev_ops = {
+ .receive_buf = pn532_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static const struct of_device_id pn532_uart_of_match[] = {
+ { .compatible = "nxp,pn532", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pn532_uart_of_match);
+
+static int pn532_uart_probe(struct serdev_device *serdev)
+{
+ struct pn532_uart_phy *pn532;
+ struct pn533 *priv;
+ int err;
+
+ err = -ENOMEM;
+ pn532 = kzalloc(sizeof(*pn532), GFP_KERNEL);
+ if (!pn532)
+ goto err_exit;
+
+ pn532->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!pn532->recv_skb)
+ goto err_free;
+
+ pn532->serdev = serdev;
+ serdev_device_set_drvdata(serdev, pn532);
+ serdev_device_set_client_ops(serdev, &pn532_serdev_ops);
+ err = serdev_device_open(serdev);
+ if (err) {
+ dev_err(&serdev->dev, "Unable to open device\n");
+ goto err_skb;
+ }
+
+ err = serdev_device_set_baudrate(serdev, 115200);
+ if (err != 115200) {
+ err = -EINVAL;
+ goto err_serdev;
+ }
+
+ serdev_device_set_flow_control(serdev, false);
+ pn532->send_wakeup = PN532_SEND_WAKEUP;
+ timer_setup(&pn532->cmd_timeout, pn532_cmd_timeout, 0);
+ priv = pn53x_common_init(PN533_DEVICE_PN532_AUTOPOLL,
+ PN533_PROTO_REQ_ACK_RESP,
+ pn532, &uart_phy_ops, NULL,
+ &pn532->serdev->dev);
+ if (IS_ERR(priv)) {
+ err = PTR_ERR(priv);
+ goto err_serdev;
+ }
+
+ pn532->priv = priv;
+ err = pn533_finalize_setup(pn532->priv);
+ if (err)
+ goto err_clean;
+
+ serdev_device_close(serdev);
+ err = pn53x_register_nfc(priv, PN533_NO_TYPE_B_PROTOCOLS, &serdev->dev);
+ if (err) {
+ pn53x_common_clean(pn532->priv);
+ goto err_skb;
+ }
+
+ return err;
+
+err_clean:
+ pn53x_common_clean(pn532->priv);
+err_serdev:
+ serdev_device_close(serdev);
+err_skb:
+ kfree_skb(pn532->recv_skb);
+err_free:
+ kfree(pn532);
+err_exit:
+ return err;
+}
+
+static void pn532_uart_remove(struct serdev_device *serdev)
+{
+ struct pn532_uart_phy *pn532 = serdev_device_get_drvdata(serdev);
+
+ pn53x_unregister_nfc(pn532->priv);
+ serdev_device_close(serdev);
+ pn53x_common_clean(pn532->priv);
+ kfree_skb(pn532->recv_skb);
+ kfree(pn532);
+}
+
+static struct serdev_device_driver pn532_uart_driver = {
+ .probe = pn532_uart_probe,
+ .remove = pn532_uart_remove,
+ .driver = {
+ .name = "pn532_uart",
+ .of_match_table = of_match_ptr(pn532_uart_of_match),
+ },
+};
+
+module_serdev_device_driver(pn532_uart_driver);
+
+MODULE_AUTHOR("Lars Pöschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("PN532 UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index e897e4d768ef..4590fbf82dc2 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -534,9 +534,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
goto error;
}
- priv = pn533_register_device(id->driver_info, protocols, protocol_type,
+ priv = pn53x_common_init(id->driver_info, protocol_type,
phy, &usb_phy_ops, fops,
- &phy->udev->dev, &interface->dev);
+ &phy->udev->dev);
if (IS_ERR(priv)) {
rc = PTR_ERR(priv);
@@ -547,14 +547,17 @@ static int pn533_usb_probe(struct usb_interface *interface,
rc = pn533_finalize_setup(priv);
if (rc)
- goto err_deregister;
+ goto err_clean;
usb_set_intfdata(interface, phy);
+ rc = pn53x_register_nfc(priv, protocols, &interface->dev);
+ if (rc)
+ goto err_clean;
return 0;
-err_deregister:
- pn533_unregister_device(phy->priv);
+err_clean:
+ pn53x_common_clean(priv);
error:
usb_kill_urb(phy->in_urb);
usb_kill_urb(phy->out_urb);
@@ -577,7 +580,8 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
if (!phy)
return;
- pn533_unregister_device(phy->priv);
+ pn53x_unregister_nfc(phy->priv);
+ pn53x_common_clean(phy->priv);
usb_set_intfdata(interface, NULL);
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 145ddf3f0a45..604dba4f18af 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc)
- usb_unlink_urb(dev->out_urb);
+ usb_kill_urb(dev->out_urb);
exit:
mutex_unlock(&dev->out_urb_lock);
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index e4f7fa00862d..b4eb926d220a 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -279,7 +279,6 @@ MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
static struct i2c_driver s3fwrn5_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = S3FWRN5_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
},
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index bb0d63a44f41..f70ba58dbc55 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -163,7 +163,7 @@ unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ unsigned char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
while (len) {
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 36af7af6b7cf..b7d1eb38b27d 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -4,6 +4,7 @@ menuconfig LIBNVDIMM
depends on PHYS_ADDR_T_64BIT
depends on HAS_IOMEM
depends on BLK_DEV
+ select MEMREGION
help
Generic support for non-volatile memory devices including
ACPI-6-NFIT defined resources. On platforms that define an
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 9204f1e9fd14..e592c4964674 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -455,7 +455,6 @@ static __exit void libnvdimm_exit(void)
nd_region_exit();
nvdimm_exit();
nvdimm_bus_exit();
- nd_region_devs_exit();
nvdimm_devs_exit();
}
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 25fa121104d0..aa059439fca0 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -114,7 +114,6 @@ struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
void nvdimm_devs_exit(void);
-void nd_region_devs_exit(void);
struct nd_region;
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
void nd_region_create_ns_seed(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ef423ba1a711..fbf34cf688f4 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -3,6 +3,7 @@
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/scatterlist.h>
+#include <linux/memregion.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -19,7 +20,6 @@
*/
#include <linux/io-64-nonatomic-hi-lo.h>
-static DEFINE_IDA(region_ida);
static DEFINE_PER_CPU(int, flush_idx);
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
@@ -133,7 +133,7 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- ida_simple_remove(&region_ida, nd_region->id);
+ memregion_free(nd_region->id);
if (is_nd_blk(dev))
kfree(to_nd_blk_region(dev));
else
@@ -985,7 +985,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!region_buf)
return NULL;
- nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
+ nd_region->id = memregion_alloc(GFP_KERNEL);
if (nd_region->id < 0)
goto err_id;
@@ -1044,7 +1044,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
err_percpu:
- ida_simple_remove(&region_ida, nd_region->id);
+ memregion_free(nd_region->id);
err_id:
kfree(region_buf);
return NULL;
@@ -1216,8 +1216,3 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
-
-void __exit nd_region_devs_exit(void)
-{
- ida_destroy(&region_ida);
-}
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 2b36f052bfb9..c6439638a419 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -23,6 +23,16 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespaces,
even if it is accessible through multiple controllers.
+config NVME_HWMON
+ bool "NVMe hardware monitoring"
+ depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
+ help
+ This provides support for NVMe hardware monitoring. If enabled,
+ a hardware monitoring device will be created for each NVMe drive
+ in the system.
+
+ If unsure, say N.
+
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 8a4b671c5f0c..fc7b26be692d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -14,6 +14,7 @@ nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
+nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa7ba09dca77..8e8527408db3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -283,6 +283,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
if (nvme_req(req)->ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true;
@@ -313,7 +315,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
if (blk_mq_request_completed(req))
return true;
- nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(req);
return true;
}
@@ -611,8 +613,14 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_dsm_range *range;
struct bio *bio;
- range = kmalloc_array(segments, sizeof(*range),
- GFP_ATOMIC | __GFP_NOWARN);
+ /*
+ * Some devices do not consider the DSM 'Number of Ranges' field when
+ * determining how much data to DMA. Always allocate memory for maximum
+ * number of segments to prevent device reading beyond end of buffer.
+ */
+ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
+
+ range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
if (!range) {
/*
* If we fail allocation our range, fallback to the controller
@@ -626,7 +634,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
}
__rq_for_each_bio(bio, req) {
- u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
if (n < segments) {
@@ -652,7 +660,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range) * segments;
+ req->special_vec.bv_len = alloc_size;
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_STS_OK;
@@ -667,7 +675,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba =
- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->write_zeroes.control = 0;
@@ -691,7 +699,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
@@ -1647,7 +1655,7 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
static void nvme_set_chunk_size(struct nvme_ns *ns)
{
- u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
+ u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob);
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
@@ -1684,8 +1692,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
{
- u32 max_sectors;
- unsigned short bs = 1 << ns->lba_shift;
+ u64 max_blocks;
if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
(ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
@@ -1701,11 +1708,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
* nvme_init_identify() if available.
*/
if (ns->ctrl->max_hw_sectors == UINT_MAX)
- max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
+ max_blocks = (u64)USHRT_MAX + 1;
else
- max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
+ max_blocks = ns->ctrl->max_hw_sectors + 1;
- blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
+ blk_queue_max_write_zeroes_sectors(disk->queue,
+ nvme_lba_to_sect(ns, max_blocks));
}
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1748,7 +1756,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
- sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
+ sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt;
@@ -2796,6 +2804,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->mtfa = le16_to_cpu(id->mtfa);
ctrl->oaes = le32_to_cpu(id->oaes);
+ ctrl->wctemp = le16_to_cpu(id->wctemp);
+ ctrl->cctemp = le16_to_cpu(id->cctemp);
+
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
if (id->mdts)
@@ -2895,6 +2906,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
+ if (!ctrl->identified)
+ nvme_hwmon_init(ctrl);
+
ctrl->identified = true;
return 0;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 265f89e11d8b..679a721ae229 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1224,7 +1224,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
lsreq->rqstlen = sizeof(*assoc_rqst);
lsreq->rspaddr = assoc_acc;
lsreq->rsplen = sizeof(*assoc_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1264,7 +1264,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
ctrl->association_id =
@@ -1332,7 +1332,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
lsreq->rqstlen = sizeof(*conn_rqst);
lsreq->rspaddr = conn_acc;
lsreq->rsplen = sizeof(*conn_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1363,7 +1363,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create I/O Connection LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
queue->connection_id =
@@ -1376,7 +1376,7 @@ out_free_buffer:
out_no_memory:
if (ret)
dev_err(ctrl->dev,
- "queue %d connect command failed (%d).\n",
+ "queue %d connect I/O queue failed (%d).\n",
queue->qnum, ret);
return ret;
}
@@ -1413,8 +1413,8 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
{
- struct fcnvme_ls_disconnect_rqst *discon_rqst;
- struct fcnvme_ls_disconnect_acc *discon_acc;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
struct nvmefc_ls_req_op *lsop;
struct nvmefc_ls_req *lsreq;
int ret;
@@ -1430,11 +1430,11 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
lsreq = &lsop->ls_req;
lsreq->private = (void *)&lsop[1];
- discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
- discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
- discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
discon_rqst->desc_list_len = cpu_to_be32(
sizeof(struct fcnvme_lsdesc_assoc_id) +
sizeof(struct fcnvme_lsdesc_disconn_cmd));
@@ -1451,22 +1451,17 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
discon_rqst->discon_cmd.desc_len =
fcnvme_lsdesc_len(
sizeof(struct fcnvme_lsdesc_disconn_cmd));
- discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
- discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
lsreq->rqstaddr = discon_rqst;
lsreq->rqstlen = sizeof(*discon_rqst);
lsreq->rspaddr = discon_acc;
lsreq->rsplen = sizeof(*discon_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
nvme_fc_disconnect_assoc_done);
if (ret)
kfree(lsop);
-
- /* only meaningful part to terminating the association */
- ctrl->association_id = 0;
}
@@ -1662,7 +1657,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
(freq->rcv_rsplen / 4) ||
be32_to_cpu(op->rsp_iu.xfrd_len) !=
freq->transferred_length ||
- op->rsp_iu.status_code ||
+ op->rsp_iu.ersp_result ||
sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
@@ -1672,7 +1667,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
be32_to_cpu(op->rsp_iu.xfrd_len),
freq->transferred_length,
- op->rsp_iu.status_code,
+ op->rsp_iu.ersp_result,
sqe->common.command_id,
cqe->command_id);
goto done;
@@ -1731,9 +1726,14 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
op->rq = rq;
op->rqno = rqno;
- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->format_id = NVME_CMD_FORMAT_ID;
cmdiu->fc_id = NVME_CMD_FC_ID;
cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ if (queue->qnum)
+ cmdiu->rsv_cat = fccmnd_set_cat_css(0,
+ (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
+ else
+ cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
@@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq));
- nvme_cleanup_cmd(rq);
-
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0;
@@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
@@ -2695,7 +2694,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
/* warn if maxcmd is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl maxcmd %u, reducing "
- "to queue_size\n",
+ "to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
@@ -2703,7 +2702,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ "queue_size %zu > ctrl sqsize %u, reducing "
+ "to sqsize\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
@@ -2739,6 +2739,7 @@ out_term_aen_ops:
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
+ ctrl->association_id = 0;
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
@@ -2830,6 +2831,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
+ ctrl->association_id = 0;
+
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
nvme_fc_free_io_queues(ctrl);
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
new file mode 100644
index 000000000000..a5af21f5d370
--- /dev/null
+++ b/drivers/nvme/host/hwmon.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express hardware monitoring support
+ * Copyright (c) 2019, Guenter Roeck
+ */
+
+#include <linux/hwmon.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+/* These macros should be moved to linux/temperature.h */
+#define MILLICELSIUS_TO_KELVIN(t) DIV_ROUND_CLOSEST((t) + 273150, 1000)
+#define KELVIN_TO_MILLICELSIUS(t) ((t) * 1000L - 273150)
+
+struct nvme_hwmon_data {
+ struct nvme_ctrl *ctrl;
+ struct nvme_smart_log log;
+ struct mutex read_lock;
+};
+
+static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long *temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ u32 status;
+ int ret;
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ &status);
+ if (ret > 0)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ *temp = KELVIN_TO_MILLICELSIUS(status & NVME_TEMP_THRESH_MASK);
+
+ return 0;
+}
+
+static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ int ret;
+
+ temp = MILLICELSIUS_TO_KELVIN(temp);
+ threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ NULL);
+ if (ret > 0)
+ return -EIO;
+
+ return ret;
+}
+
+static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
+{
+ int ret;
+
+ ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+ &data->log, sizeof(data->log), 0);
+
+ return ret <= 0 ? ret : -EIO;
+}
+
+static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+ struct nvme_smart_log *log = &data->log;
+ int temp;
+ int err;
+
+ /*
+ * First handle attributes which don't require us to read
+ * the smart log.
+ */
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_get_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_get_temp_thresh(data->ctrl, channel, true, val);
+ case hwmon_temp_crit:
+ *val = KELVIN_TO_MILLICELSIUS(data->ctrl->cctemp);
+ return 0;
+ default:
+ break;
+ }
+
+ mutex_lock(&data->read_lock);
+ err = nvme_hwmon_get_smart_log(data);
+ if (err)
+ goto unlock;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!channel)
+ temp = get_unaligned_le16(log->temperature);
+ else
+ temp = le16_to_cpu(log->temp_sensor[channel - 1]);
+ *val = KELVIN_TO_MILLICELSIUS(temp);
+ break;
+ case hwmon_temp_alarm:
+ *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&data->read_lock);
+ return err;
+}
+
+static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_set_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_set_temp_thresh(data->ctrl, channel, true, val);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const char * const nvme_hwmon_sensor_names[] = {
+ "Composite",
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4",
+ "Sensor 5",
+ "Sensor 6",
+ "Sensor 7",
+ "Sensor 8",
+};
+
+static int nvme_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ *str = nvme_hwmon_sensor_names[channel];
+ return 0;
+}
+
+static umode_t nvme_hwmon_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct nvme_hwmon_data *data = _data;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ if (!channel && data->ctrl->cctemp)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ if ((!channel && data->ctrl->wctemp) ||
+ (channel && data->log.temp_sensor[channel - 1])) {
+ if (data->ctrl->quirks &
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ return 0444;
+ return 0644;
+ }
+ break;
+ case hwmon_temp_alarm:
+ if (!channel)
+ return 0444;
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ if (!channel || data->log.temp_sensor[channel - 1])
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops nvme_hwmon_ops = {
+ .is_visible = nvme_hwmon_is_visible,
+ .read = nvme_hwmon_read,
+ .read_string = nvme_hwmon_read_string,
+ .write = nvme_hwmon_write,
+};
+
+static const struct hwmon_chip_info nvme_hwmon_chip_info = {
+ .ops = &nvme_hwmon_ops,
+ .info = nvme_hwmon_info,
+};
+
+void nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ struct nvme_hwmon_data *data;
+ struct device *hwmon;
+ int err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ devm_kfree(dev, data);
+ return;
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
+ &nvme_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ devm_kfree(dev, data);
+ }
+}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index e0f064dcbd02..797c18337d96 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -95,6 +95,7 @@ void nvme_failover_req(struct request *req)
}
break;
case NVME_SC_HOST_PATH_ERROR:
+ case NVME_SC_HOST_ABORTED_CMD:
/*
* Temporary transport disruption in talking to the controller.
* Try to send on a new path.
@@ -446,8 +447,14 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
struct nvme_ana_group_desc *desc = base + offset;
- u32 nr_nsids = le32_to_cpu(desc->nnsids);
- size_t nsid_buf_size = nr_nsids * sizeof(__le32);
+ u32 nr_nsids;
+ size_t nsid_buf_size;
+
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+
+ nr_nsids = le32_to_cpu(desc->nnsids);
+ nsid_buf_size = nr_nsids * sizeof(__le32);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
@@ -467,8 +474,6 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return error;
offset += nsid_buf_size;
- if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
- return -EINVAL;
}
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 22e8401352c2..3b9cbe0668fa 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -115,6 +115,11 @@ enum nvme_quirks {
* Prevent tag overlap between queues
*/
NVME_QUIRK_SHARED_TAGS = (1 << 13),
+
+ /*
+ * Don't change the value of the temperature threshold feature
+ */
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
};
/*
@@ -231,6 +236,8 @@ struct nvme_ctrl {
u16 kas;
u8 npss;
u8 apsta;
+ u16 wctemp;
+ u16 cctemp;
u32 oaes;
u32 aen_result;
u32 ctratt;
@@ -419,9 +426,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}
-static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+/*
+ * Convert a 512B sector number to a device logical block number.
+ */
+static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
+{
+ return sector >> (ns->lba_shift - SECTOR_SHIFT);
+}
+
+/*
+ * Convert a device logical block number to a 512B sector number.
+ */
+static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
{
- return (sector >> (ns->lba_shift - 9));
+ return lba << (ns->lba_shift - SECTOR_SHIFT);
}
static inline void nvme_end_request(struct request *req, __le16 status,
@@ -446,6 +464,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
put_device(ctrl->device);
}
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+ return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -652,4 +675,10 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
return dev_to_disk(dev)->private_data;
}
+#ifdef CONFIG_NVME_HWMON
+void nvme_hwmon_init(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
+#endif
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 869f462e6b6e..dcaad5831cee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -925,7 +925,6 @@ static void nvme_pci_complete_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_cleanup_cmd(req);
if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
@@ -968,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvmeq->qid == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
@@ -2982,7 +2980,7 @@ static int nvme_suspend(struct device *dev)
/*
* Clearing npss forces a controller reset on resume. The
- * correct value will be resdicovered then.
+ * correct value will be rediscovered then.
*/
ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
@@ -3082,7 +3080,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
- NVME_QUIRK_MEDIUM_PRIO_SQ },
+ NVME_QUIRK_MEDIUM_PRIO_SQ |
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cb4c3000a57e..dce59459ed41 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-
- nvme_cleanup_cmd(rq);
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
}
@@ -1501,8 +1499,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -1768,7 +1766,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", err);
- nvme_cleanup_cmd(rq);
goto err;
}
@@ -1779,18 +1776,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr ? &req->reg_wr.wr : NULL);
- if (unlikely(err)) {
- nvme_rdma_unmap_data(queue, rq);
- goto err;
- }
+ if (unlikely(err))
+ goto err_unmap;
return BLK_STS_OK;
+err_unmap:
+ nvme_rdma_unmap_data(queue, rq);
err:
if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
+ nvme_cleanup_cmd(rq);
unmap_qe:
ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7544be84ab35..6d43b23a0fc8 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 831a062d27cb..56c21b501185 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -31,7 +31,7 @@ u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
- nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
}
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
@@ -134,7 +134,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
u16 status = NVME_SC_INTERNAL;
unsigned long flags;
- if (req->data_len != sizeof(*log))
+ if (req->transfer_len != sizeof(*log))
goto out;
log = kzalloc(sizeof(*log), GFP_KERNEL);
@@ -196,7 +196,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
u16 status = NVME_SC_INTERNAL;
size_t len;
- if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+ if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
goto out;
mutex_lock(&ctrl->lock);
@@ -206,7 +206,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
len = ctrl->nr_changed_ns * sizeof(__le32);
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
if (!status)
- status = nvmet_zero_sgl(req, len, req->data_len - len);
+ status = nvmet_zero_sgl(req, len, req->transfer_len - len);
ctrl->nr_changed_ns = 0;
nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
mutex_unlock(&ctrl->lock);
@@ -282,6 +282,36 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
+ return;
+
+ switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_ERROR:
+ return nvmet_execute_get_log_page_error(req);
+ case NVME_LOG_SMART:
+ return nvmet_execute_get_log_page_smart(req);
+ case NVME_LOG_FW_SLOT:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ return nvmet_execute_get_log_page_noop(req);
+ case NVME_LOG_CHANGED_NS:
+ return nvmet_execute_get_log_changed_ns(req);
+ case NVME_LOG_CMD_EFFECTS:
+ return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ANA:
+ return nvmet_execute_get_log_page_ana(req);
+ }
+ pr_err("unhandled lid %d on qid %d\n",
+ req->cmd->get_log_page.lid, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -565,6 +595,28 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_identify(struct nvmet_req *req)
+{
+ if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_NS:
+ return nvmet_execute_identify_ns(req);
+ case NVME_ID_CNS_CTRL:
+ return nvmet_execute_identify_ctrl(req);
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
+ return nvmet_execute_identify_nslist(req);
+ case NVME_ID_CNS_NS_DESC_LIST:
+ return nvmet_execute_identify_desclist(req);
+ }
+
+ pr_err("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
/*
* A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
@@ -574,6 +626,8 @@ out:
*/
static void nvmet_execute_abort(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
}
@@ -658,6 +712,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
nvmet_set_result(req,
@@ -721,6 +778,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
/*
* These features are mandatory in the spec, but we don't
@@ -785,6 +845,9 @@ void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock);
@@ -801,6 +864,9 @@ void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
@@ -813,77 +879,36 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_cmd(req);
+ if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ return nvmet_parse_discovery_cmd(req);
+
ret = nvmet_check_ctrl_status(req, cmd);
if (unlikely(ret))
return ret;
switch (cmd->common.opcode) {
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_ERROR:
- req->execute = nvmet_execute_get_log_page_error;
- return 0;
- case NVME_LOG_SMART:
- req->execute = nvmet_execute_get_log_page_smart;
- return 0;
- case NVME_LOG_FW_SLOT:
- /*
- * We only support a single firmware slot which always
- * is active, so we can zero out the whole firmware slot
- * log and still claim to fully implement this mandatory
- * log page.
- */
- req->execute = nvmet_execute_get_log_page_noop;
- return 0;
- case NVME_LOG_CHANGED_NS:
- req->execute = nvmet_execute_get_log_changed_ns;
- return 0;
- case NVME_LOG_CMD_EFFECTS:
- req->execute = nvmet_execute_get_log_cmd_effects_ns;
- return 0;
- case NVME_LOG_ANA:
- req->execute = nvmet_execute_get_log_page_ana;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_NS:
- req->execute = nvmet_execute_identify_ns;
- return 0;
- case NVME_ID_CNS_CTRL:
- req->execute = nvmet_execute_identify_ctrl;
- return 0;
- case NVME_ID_CNS_NS_ACTIVE_LIST:
- req->execute = nvmet_execute_identify_nslist;
- return 0;
- case NVME_ID_CNS_NS_DESC_LIST:
- req->execute = nvmet_execute_identify_desclist;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_identify;
+ return 0;
case nvme_admin_abort_cmd:
req->execute = nvmet_execute_abort;
- req->data_len = 0;
return 0;
case nvme_admin_set_features:
req->execute = nvmet_execute_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 3a67e244e568..28438b833c1b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -892,14 +892,10 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
}
if (unlikely(!req->sq->ctrl))
- /* will return an error for any Non-connect command: */
+ /* will return an error for any non-connect command: */
status = nvmet_parse_connect_cmd(req);
else if (likely(req->sq->qid != 0))
status = nvmet_parse_io_cmd(req);
- else if (nvme_is_fabrics(req->cmd))
- status = nvmet_parse_fabrics_cmd(req);
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
- status = nvmet_parse_discovery_cmd(req);
else
status = nvmet_parse_admin_cmd(req);
@@ -930,15 +926,17 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-void nvmet_req_execute(struct nvmet_req *req)
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
{
- if (unlikely(req->data_len != req->transfer_len)) {
+ if (unlikely(data_len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
- } else
- req->execute(req);
+ return false;
+ }
+
+ return true;
}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
+EXPORT_SYMBOL_GPL(nvmet_check_data_len);
int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
@@ -966,7 +964,7 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req)
}
req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
- if (!req->sg)
+ if (unlikely(!req->sg))
return -ENOMEM;
return 0;
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 3764a8900850..0c2274b21e15 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -157,7 +157,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
return entries;
}
-static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -171,6 +171,16 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
u16 status = 0;
void *buffer;
+ if (!nvmet_check_data_len(req, data_len))
+ return;
+
+ if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lid);
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ goto out;
+ }
+
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
@@ -227,20 +237,35 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
+ const char model[] = "Linux";
u16 status = 0;
+ if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ goto out;
+ }
+
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
goto out;
}
+ memset(id->sn, ' ', sizeof(id->sn));
+ bin2hex(id->sn, &ctrl->subsys->serial,
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -273,6 +298,9 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
stat = nvmet_set_feat_kato(req);
@@ -296,6 +324,9 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
nvmet_get_feat_kato(req);
@@ -328,47 +359,22 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_admin_set_features:
req->execute = nvmet_execute_disc_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_disc_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_DISC:
- req->execute = nvmet_execute_get_disc_log_page;
- return 0;
- default:
- pr_err("unsupported get_log_page lid %d\n",
- cmd->get_log_page.lid);
- req->error_loc =
- offsetof(struct nvme_get_log_page_command, lid);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_CTRL:
- req->execute =
- nvmet_execute_identify_disc_ctrl;
- return 0;
- default:
- pr_err("unsupported identify cns %d\n",
- cmd->identify.cns);
- req->error_loc = offsetof(struct nvme_identify, cns);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_identify;
+ return 0;
default:
pr_err("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d16b55ffe79f..f7297473d9eb 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,6 +12,9 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
if (req->cmd->prop_set.attrib & 1) {
req->error_loc =
offsetof(struct nvmf_property_set_command, attrib);
@@ -38,6 +41,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
u16 status = 0;
u64 val = 0;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
if (req->cmd->prop_get.attrib & 1) {
switch (le32_to_cpu(req->cmd->prop_get.offset)) {
case NVME_REG_CAP:
@@ -82,11 +88,9 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
switch (cmd->fabrics.fctype) {
case nvme_fabrics_type_property_set:
- req->data_len = 0;
req->execute = nvmet_execute_prop_set;
break;
case nvme_fabrics_type_property_get:
- req->data_len = 0;
req->execute = nvmet_execute_prop_get;
break;
default:
@@ -147,6 +151,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -211,6 +218,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
+ if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -281,7 +291,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
- req->data_len = sizeof(struct nvmf_connect_data);
if (cmd->connect.qid == 0)
req->execute = nvmet_execute_admin_connect;
else
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ce8d819f86cc..a0db6371b43e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1495,20 +1495,20 @@ static void
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_disconnect_rqst *rqst =
- (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
- struct fcnvme_ls_disconnect_acc *acc =
- (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
memset(acc, 0, sizeof(*acc));
- if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+ if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
ret = VERR_DISCONN_LEN;
else if (rqst->desc_list_len !=
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_rqst)))
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
ret = VERR_DISCONN_RQST_LEN;
else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
ret = VERR_ASSOC_ID;
@@ -1523,8 +1523,11 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_lsdesc_disconn_cmd)))
ret = VERR_DISCONN_CMD_LEN;
- else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
- (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
ret = VERR_DISCONN_SCOPE;
else {
/* match an active association */
@@ -1556,8 +1559,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_acc)),
- FCNVME_LS_DISCONNECT);
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
nvmet_fc_tgt_a_put(iod->assoc);
@@ -1632,7 +1635,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
/* Creates an IO Queue/Connection */
nvmet_fc_ls_create_connection(tgtport, iod);
break;
- case FCNVME_LS_DISCONNECT:
+ case FCNVME_LS_DISCONNECT_ASSOC:
/* Terminate a Queue/Connection or the Association */
nvmet_fc_ls_disconnect(tgtport, iod);
break;
@@ -2015,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
/* data transfer complete, resume with nvmet layer */
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
break;
case NVMET_FCOP_READDATA:
@@ -2231,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
* can invoke the nvmet_layer now. If read data, cmd completion will
* push the data
*/
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
return;
transport_error:
@@ -2299,7 +2302,7 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
- (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+ (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
(cmdiu->fc_id != NVME_CMD_FC_ID) ||
(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
return -EIO;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 32008d85172b..b6fca0e421ef 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -147,8 +147,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
+ struct blk_plug plug;
sector_t sector;
- int op, op_flags = 0, i;
+ int op, i;
+
+ if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ return;
if (!req->sg_cnt) {
nvmet_req_complete(req, 0);
@@ -156,21 +160,20 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE;
- op_flags = REQ_SYNC | REQ_IDLE;
+ op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op_flags |= REQ_FUA;
+ op |= REQ_FUA;
} else {
op = REQ_OP_READ;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op_flags |= REQ_NOMERGE;
+ op |= REQ_NOMERGE;
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
@@ -180,8 +183,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
+ blk_start_plug(&plug);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
@@ -190,7 +194,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -201,12 +205,16 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
submit_bio(bio);
+ blk_finish_plug(&plug);
}
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
@@ -261,12 +269,10 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- if (status) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- } else {
+ if (status)
+ bio_io_error(bio);
+ else
submit_bio(bio);
- }
} else {
nvmet_req_complete(req, status);
}
@@ -274,6 +280,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ return;
+
switch (le32_to_cpu(req->cmd->dsm.attributes)) {
case NVME_DSMGMT_AD:
nvmet_bdev_execute_discard(req);
@@ -295,6 +304,9 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
sector_t nr_sector;
int ret;
+ if (!nvmet_check_data_len(req, 0))
+ return;
+
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
@@ -319,20 +331,15 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_bdev_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_bdev_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 05453f5d1448..caebfce06605 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -126,7 +126,7 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
mempool_free(req->f.bvec, req->ns->bvec_pool);
}
- if (unlikely(ret != req->data_len))
+ if (unlikely(ret != req->transfer_len))
status = errno_to_nvme_status(req, ret);
nvmet_req_complete(req, status);
}
@@ -146,7 +146,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
is_sync = true;
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
- if (unlikely(pos + req->data_len > req->ns->size)) {
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
return true;
}
@@ -173,7 +173,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
nr_bvec--;
}
- if (WARN_ON_ONCE(total_len != req->data_len)) {
+ if (WARN_ON_ONCE(total_len != req->transfer_len)) {
ret = -EIO;
goto complete;
}
@@ -232,6 +232,9 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
{
ssize_t nr_bvec = req->sg_cnt;
+ if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ return;
+
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
return;
@@ -273,6 +276,8 @@ static void nvmet_file_flush_work(struct work_struct *w)
static void nvmet_file_execute_flush(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work);
}
@@ -331,6 +336,8 @@ static void nvmet_file_dsm_work(struct work_struct *w)
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work);
}
@@ -359,6 +366,8 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work);
}
@@ -371,20 +380,15 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_file_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_file_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 11f5aea97d1b..a758bb3d5dd4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
- nvme_cleanup_cmd(req);
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
nvme_complete_rq(req);
}
@@ -102,8 +101,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+ cqe->command_id))) {
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
@@ -126,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work);
- nvmet_req_execute(&iod->req);
+ iod->req.execute(&iod->req);
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c51f8dd01dc4..46df45e837c9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -304,8 +304,6 @@ struct nvmet_req {
} f;
};
int sg_cnt;
- /* data length as parsed from the command: */
- size_t data_len;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
@@ -375,7 +373,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
-void nvmet_req_execute(struct nvmet_req *req);
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req);
@@ -495,6 +493,12 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
req->ns->blksize_shift;
}
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
+{
+ return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
+ sizeof(struct nvme_dsm_range);
+}
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
/* Convert a 32-bit number to a 16-bit 0's based number */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 36d906a7f70d..37d262a65877 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -672,13 +672,13 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
return 0;
ret = nvmet_req_alloc_sgl(&rsp->req);
- if (ret < 0)
+ if (unlikely(ret < 0))
goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
- if (ret < 0)
+ if (unlikely(ret < 0))
goto error_out;
rsp->n_rdma += ret;
@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
return true;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d535080b781f..af674fc0bb1e 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -320,7 +320,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
u32 len = le32_to_cpu(sgl->length);
- if (!cmd->req.data_len)
+ if (!len)
return 0;
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
@@ -813,13 +813,11 @@ free_crypto:
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{
+ size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- /* recover the expected data transfer length */
- req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
-
if (!nvme_is_write(cmd->req.cmd) ||
- req->data_len > cmd->req.port->inline_data_size) {
+ data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
}
@@ -932,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
goto out;
}
- nvmet_req_execute(&queue->cmd->req);
+ queue->cmd->req.execute(&queue->cmd->req);
out:
nvmet_prepare_receive_pdu(queue);
return ret;
@@ -1052,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
}
- nvmet_req_execute(&cmd->req);
+ cmd->req.execute(&cmd->req);
}
nvmet_prepare_receive_pdu(queue);
@@ -1092,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len)
- nvmet_req_execute(&cmd->req);
+ cmd->req.execute(&cmd->req);
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index c2ec750cae6e..73567e922491 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -50,6 +50,7 @@ config NVMEM_IMX_OCOTP
config NVMEM_IMX_OCOTP_SCU
tristate "i.MX8 SCU On-Chip OTP Controller support"
depends on IMX_SCU
+ depends on HAVE_ARM_SMCCC
help
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
@@ -119,6 +120,17 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config ROCKCHIP_OTP
+ tristate "Rockchip OTP controller support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple drive to dump specified values of Rockchip SoC
+ from otp, such as cpu-leakage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_rockchip_otp.
+
config NVMEM_BCM_OCOTP
tristate "Broadcom On-Chip OTP Controller support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -230,4 +242,15 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
+config SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index e5c153d99a67..9e667823edb3 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -30,6 +30,8 @@ obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_stm32_romem-y := stm32-romem.o
obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
@@ -50,3 +52,5 @@ obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
+obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 057d1ff87d5d..9f1ee9c766ec 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -76,33 +76,6 @@ static struct bus_type nvmem_bus_type = {
.name = "nvmem",
};
-static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
-{
- struct device *d;
-
- if (!nvmem_np)
- return NULL;
-
- d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
-
- if (!d)
- return NULL;
-
- return to_nvmem_device(d);
-}
-
-static struct nvmem_device *nvmem_find(const char *name)
-{
- struct device *d;
-
- d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
-
- if (!d)
- return NULL;
-
- return to_nvmem_device(d);
-}
-
static void nvmem_cell_drop(struct nvmem_cell *cell)
{
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
@@ -532,13 +505,16 @@ int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
}
EXPORT_SYMBOL(devm_nvmem_unregister);
-static struct nvmem_device *__nvmem_device_get(struct device_node *np,
- const char *nvmem_name)
+static struct nvmem_device *__nvmem_device_get(void *data,
+ int (*match)(struct device *dev, const void *data))
{
struct nvmem_device *nvmem = NULL;
+ struct device *dev;
mutex_lock(&nvmem_mutex);
- nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
+ dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
+ if (dev)
+ nvmem = to_nvmem_device(dev);
mutex_unlock(&nvmem_mutex);
if (!nvmem)
return ERR_PTR(-EPROBE_DEFER);
@@ -587,7 +563,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-ENOENT);
- return __nvmem_device_get(nvmem_np, NULL);
+ return __nvmem_device_get(nvmem_np, device_match_of_node);
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
@@ -613,10 +589,26 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
}
- return __nvmem_device_get(NULL, dev_name);
+ return __nvmem_device_get((void *)dev_name, device_match_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
+/**
+ * nvmem_device_find() - Find nvmem device with matching function
+ *
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ return __nvmem_device_get(data, match);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_find);
+
static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
{
struct nvmem_device **nvmem = res;
@@ -710,7 +702,8 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if ((strcmp(lookup->dev_id, dev_id) == 0) &&
(strcmp(lookup->con_id, con_id) == 0)) {
/* This is the right entry. */
- nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
+ nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
+ device_match_name);
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
@@ -780,7 +773,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-EINVAL);
- nvmem = __nvmem_device_get(nvmem_np, NULL);
+ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
if (IS_ERR(nvmem))
return ERR_CAST(nvmem);
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 61a17f943f47..03f1ab23ad51 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -7,6 +7,7 @@
* Peng Fan <peng.fan@nxp.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
@@ -14,14 +15,28 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define IMX_SIP_OTP 0xC200000A
+#define IMX_SIP_OTP_WRITE 0x2
+
enum ocotp_devtype {
IMX8QXP,
IMX8QM,
};
+#define ECC_REGION BIT(0)
+#define HOLE_REGION BIT(1)
+
+struct ocotp_region {
+ u32 start;
+ u32 end;
+ u32 flag;
+};
+
struct ocotp_devtype_data {
int devtype;
int nregs;
+ u32 num_region;
+ struct ocotp_region region[];
};
struct ocotp_priv {
@@ -35,16 +50,63 @@ struct imx_sc_msg_misc_fuse_read {
u32 word;
} __packed;
+static DEFINE_MUTEX(scu_ocotp_mutex);
+
static struct ocotp_devtype_data imx8qxp_data = {
.devtype = IMX8QXP,
.nregs = 800,
+ .num_region = 3,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x110, 0x21F, HOLE_REGION},
+ {0x220, 0x31F, ECC_REGION},
+ },
};
static struct ocotp_devtype_data imx8qm_data = {
.devtype = IMX8QM,
.nregs = 800,
+ .num_region = 2,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x1a0, 0x1ff, ECC_REGION},
+ },
};
+static bool in_hole(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & HOLE_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool in_ecc(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & ECC_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
@@ -88,18 +150,19 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
if (!p)
return -ENOMEM;
+ mutex_lock(&scu_ocotp_mutex);
+
buf = p;
for (i = index; i < (index + count); i++) {
- if (priv->data->devtype == IMX8QXP) {
- if ((i > 271) && (i < 544)) {
- *buf++ = 0;
- continue;
- }
+ if (in_hole(context, i)) {
+ *buf++ = 0;
+ continue;
}
ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
if (ret) {
+ mutex_unlock(&scu_ocotp_mutex);
kfree(p);
return ret;
}
@@ -108,18 +171,63 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
memcpy(val, (u8 *)p + offset % 4, bytes);
+ mutex_unlock(&scu_ocotp_mutex);
+
kfree(p);
return 0;
}
+static int imx_scu_ocotp_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ struct arm_smccc_res res;
+ u32 *buf = val;
+ u32 tmp;
+ u32 index;
+ int ret;
+
+ /* allow only writing one complete OTP word at a time */
+ if ((bytes != 4) || (offset % 4))
+ return -EINVAL;
+
+ index = offset >> 2;
+
+ if (in_hole(context, index))
+ return -EINVAL;
+
+ if (in_ecc(context, index)) {
+ pr_warn("ECC region, only program once\n");
+ mutex_lock(&scu_ocotp_mutex);
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp);
+ mutex_unlock(&scu_ocotp_mutex);
+ if (ret)
+ return ret;
+ if (tmp) {
+ pr_warn("ECC region, already has value: %x\n", tmp);
+ return -EIO;
+ }
+ }
+
+ mutex_lock(&scu_ocotp_mutex);
+
+ arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
+ 0, 0, 0, 0, &res);
+
+ mutex_unlock(&scu_ocotp_mutex);
+
+ return res.a0;
+}
+
static struct nvmem_config imx_scu_ocotp_nvmem_config = {
.name = "imx-scu-ocotp",
- .read_only = true,
+ .read_only = false,
.word_size = 4,
.stride = 1,
.owner = THIS_MODULE,
.reg_read = imx_scu_ocotp_read,
+ .reg_write = imx_scu_ocotp_write,
};
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index dff2f3c357f5..fc40555ca4cd 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -521,6 +521,10 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv->base);
+ clk_disable_unprepare(priv->clk);
+
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
new file mode 100644
index 000000000000..9f53bcce2f87
--- /dev/null
+++ b/drivers/nvmem/rockchip-otp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip OTP Driver
+ *
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* OTP Register Offsets */
+#define OTPC_SBPI_CTRL 0x0020
+#define OTPC_SBPI_CMD_VALID_PRE 0x0024
+#define OTPC_SBPI_CS_VALID_PRE 0x0028
+#define OTPC_SBPI_STATUS 0x002C
+#define OTPC_USER_CTRL 0x0100
+#define OTPC_USER_ADDR 0x0104
+#define OTPC_USER_ENABLE 0x0108
+#define OTPC_USER_Q 0x0124
+#define OTPC_INT_STATUS 0x0304
+#define OTPC_SBPI_CMD0_OFFSET 0x1000
+#define OTPC_SBPI_CMD1_OFFSET 0x1004
+
+/* OTP Register bits and masks */
+#define OTPC_USER_ADDR_MASK GENMASK(31, 16)
+#define OTPC_USE_USER BIT(0)
+#define OTPC_USE_USER_MASK GENMASK(16, 16)
+#define OTPC_USER_FSM_ENABLE BIT(0)
+#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16)
+#define OTPC_SBPI_DONE BIT(1)
+#define OTPC_USER_DONE BIT(2)
+
+#define SBPI_DAP_ADDR 0x02
+#define SBPI_DAP_ADDR_SHIFT 8
+#define SBPI_DAP_ADDR_MASK GENMASK(31, 24)
+#define SBPI_CMD_VALID_MASK GENMASK(31, 16)
+#define SBPI_DAP_CMD_WRF 0xC0
+#define SBPI_DAP_REG_ECC 0x3A
+#define SBPI_ECC_ENABLE 0x00
+#define SBPI_ECC_DISABLE 0x09
+#define SBPI_ENABLE BIT(0)
+#define SBPI_ENABLE_MASK GENMASK(16, 16)
+
+#define OTPC_TIMEOUT 10000
+
+struct rockchip_otp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rst;
+};
+
+/* list of required clocks */
+static const char * const rockchip_otp_clocks[] = {
+ "otp", "apb_pclk", "phy",
+};
+
+struct rockchip_data {
+ int size;
+};
+
+static int rockchip_otp_reset(struct rockchip_otp *otp)
+{
+ int ret;
+
+ ret = reset_control_assert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to assert otp phy %d\n", ret);
+ return ret;
+ }
+
+ udelay(2);
+
+ ret = reset_control_deassert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to deassert otp phy %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
+{
+ u32 status = 0;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(otp->base + OTPC_INT_STATUS, status,
+ (status & flag), 1, OTPC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* clean int status */
+ writel(flag, otp->base + OTPC_INT_STATUS);
+
+ return 0;
+}
+
+static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
+{
+ int ret = 0;
+
+ writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT),
+ otp->base + OTPC_SBPI_CTRL);
+
+ writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE);
+ writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC,
+ otp->base + OTPC_SBPI_CMD0_OFFSET);
+ if (enable)
+ writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+ else
+ writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+
+ writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
+
+ ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE);
+ if (ret < 0)
+ dev_err(otp->dev, "timeout during ecc_enable\n");
+
+ return ret;
+}
+
+static int rockchip_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ u8 *buf = val;
+ int ret = 0;
+
+ ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
+ if (ret < 0) {
+ dev_err(otp->dev, "failed to prepare/enable clks\n");
+ return ret;
+ }
+
+ ret = rockchip_otp_reset(otp);
+ if (ret) {
+ dev_err(otp->dev, "failed to reset otp phy\n");
+ goto disable_clks;
+ }
+
+ ret = rockchip_otp_ecc_enable(otp, false);
+ if (ret < 0) {
+ dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
+ goto disable_clks;
+ }
+
+ writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+ udelay(5);
+ while (bytes--) {
+ writel(offset++ | OTPC_USER_ADDR_MASK,
+ otp->base + OTPC_USER_ADDR);
+ writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
+ otp->base + OTPC_USER_ENABLE);
+ ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+ *buf++ = readb(otp->base + OTPC_USER_Q);
+ }
+
+read_end:
+ writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+disable_clks:
+ clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
+
+ return ret;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "rockchip-otp",
+ .owner = THIS_MODULE,
+ .read_only = true,
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = rockchip_otp_read,
+};
+
+static const struct rockchip_data px30_data = {
+ .size = 0x40,
+};
+
+static const struct of_device_id rockchip_otp_match[] = {
+ {
+ .compatible = "rockchip,px30-otp",
+ .data = (void *)&px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3308-otp",
+ .data = (void *)&px30_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_otp_match);
+
+static int rockchip_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_otp *otp;
+ const struct rockchip_data *data;
+ struct nvmem_device *nvmem;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
+ GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks);
+ otp->clks = devm_kcalloc(dev, otp->num_clks,
+ sizeof(*otp->clks), GFP_KERNEL);
+ if (!otp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < otp->num_clks; ++i)
+ otp->clks[i].id = rockchip_otp_clocks[i];
+
+ ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks);
+ if (ret)
+ return ret;
+
+ otp->rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(otp->rst))
+ return PTR_ERR(otp->rst);
+
+ otp_config.size = data->size;
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver rockchip_otp_driver = {
+ .probe = rockchip_otp_probe,
+ .driver = {
+ .name = "rockchip-otp",
+ .of_match_table = rockchip_otp_match,
+ },
+};
+
+module_platform_driver(rockchip_otp_driver);
+MODULE_DESCRIPTION("Rockchip OTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index c6ee21018d80..ab5e7e0bc3d8 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -211,7 +211,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
return ret;
}
- efuse->hwlock = hwspin_lock_request_specific(ret);
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!efuse->hwlock) {
dev_err(&pdev->dev, "failed to request hwspinlock\n");
return -ENXIO;
@@ -219,7 +219,6 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
- platform_set_drvdata(pdev, efuse);
econfig.stride = 1;
econfig.word_size = 1;
@@ -232,21 +231,12 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem config\n");
- hwspin_lock_free(efuse->hwlock);
return PTR_ERR(nvmem);
}
return 0;
}
-static int sc27xx_efuse_remove(struct platform_device *pdev)
-{
- struct sc27xx_efuse *efuse = platform_get_drvdata(pdev);
-
- hwspin_lock_free(efuse->hwlock);
- return 0;
-}
-
static const struct of_device_id sc27xx_efuse_of_match[] = {
{ .compatible = "sprd,sc2731-efuse" },
{ }
@@ -254,7 +244,6 @@ static const struct of_device_id sc27xx_efuse_of_match[] = {
static struct platform_driver sc27xx_efuse_driver = {
.probe = sc27xx_efuse_probe,
- .remove = sc27xx_efuse_remove,
.driver = {
.name = "sc27xx-efuse",
.of_match_table = sc27xx_efuse_of_match,
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
new file mode 100644
index 000000000000..2f1e0fbd1901
--- /dev/null
+++ b/drivers/nvmem/sprd-efuse.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define SPRD_EFUSE_ENABLE 0x20
+#define SPRD_EFUSE_ERR_FLAG 0x24
+#define SPRD_EFUSE_ERR_CLR 0x28
+#define SPRD_EFUSE_MAGIC_NUM 0x2c
+#define SPRD_EFUSE_FW_CFG 0x50
+#define SPRD_EFUSE_PW_SWT 0x54
+#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2))
+
+#define SPRD_EFUSE_VDD_EN BIT(0)
+#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1)
+#define SPRD_EFUSE_DOUBLE_EN BIT(2)
+#define SPRD_EFUSE_MARGIN_RD_EN BIT(3)
+#define SPRD_EFUSE_LOCK_WR_EN BIT(4)
+
+#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0)
+
+#define SPRD_EFUSE_ENK1_ON BIT(0)
+#define SPRD_EFUSE_ENK2_ON BIT(1)
+#define SPRD_EFUSE_PROG_EN BIT(2)
+
+#define SPRD_EFUSE_MAGIC_NUMBER 0x8810
+
+/* Block width (bytes) definitions */
+#define SPRD_EFUSE_BLOCK_WIDTH 4
+
+/*
+ * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse,
+ * and we can only access the normal efuse in kernel. So define the normal
+ * block offset index and normal block numbers.
+ */
+#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24
+#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000
+
+/*
+ * Since different Spreadtrum SoC chip can have different normal block numbers
+ * and offset. And some SoC can support block double feature, which means
+ * when reading or writing data to efuse memory, the controller can save double
+ * data in case one data become incorrect after a long period.
+ *
+ * Thus we should save them in the device data structure.
+ */
+struct sprd_efuse_variant_data {
+ u32 blk_nums;
+ u32 blk_offset;
+ bool blk_double;
+};
+
+struct sprd_efuse {
+ struct device *dev;
+ struct clk *clk;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ void __iomem *base;
+ const struct sprd_efuse_variant_data *data;
+};
+
+static const struct sprd_efuse_variant_data ums312_data = {
+ .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS,
+ .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET,
+ .blk_double = false,
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sprd_efuse_lock(struct sprd_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SPRD_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_efuse_unlock(struct sprd_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val &= ~SPRD_EFUSE_ENK2_ON;
+ else
+ val &= ~SPRD_EFUSE_ENK1_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+
+ if (en)
+ val |= SPRD_EFUSE_ENK1_ON;
+ else
+ val |= SPRD_EFUSE_ENK2_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_VDD_EN;
+ else
+ val &= ~SPRD_EFUSE_VDD_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_LOCK_WR_EN;
+ else
+ val &= ~SPRD_EFUSE_LOCK_WR_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_AUTO_CHECK_EN;
+ else
+ val &= ~SPRD_EFUSE_AUTO_CHECK_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_DOUBLE_EN;
+ else
+ val &= ~SPRD_EFUSE_DOUBLE_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val |= SPRD_EFUSE_PROG_EN;
+ else
+ val &= ~SPRD_EFUSE_PROG_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+}
+
+static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
+ bool lock, u32 *data)
+{
+ u32 status;
+ int ret = 0;
+
+ /*
+ * We need set the correct magic number before writing the efuse to
+ * allow programming, and block other programming until we clear the
+ * magic number.
+ */
+ writel(SPRD_EFUSE_MAGIC_NUMBER,
+ efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ /*
+ * Power on the efuse, enable programme and enable double data
+ * if asked.
+ */
+ sprd_efuse_set_prog_power(efuse, true);
+ sprd_efuse_set_prog_en(efuse, true);
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /*
+ * Enable the auto-check function to validate if the programming is
+ * successful.
+ */
+ sprd_efuse_set_auto_check(efuse, true);
+
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable auto-check and data double after programming */
+ sprd_efuse_set_auto_check(efuse, false);
+ sprd_efuse_set_data_double(efuse, false);
+
+ /*
+ * Check the efuse error status, if the programming is successful,
+ * we should lock this efuse block to avoid programming again.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "write error status %d of block %d\n", ret, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ ret = -EBUSY;
+ } else {
+ sprd_efuse_set_prog_lock(efuse, lock);
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+ sprd_efuse_set_prog_lock(efuse, false);
+ }
+
+ sprd_efuse_set_prog_power(efuse, false);
+ writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ return ret;
+}
+
+static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val,
+ bool doub)
+{
+ u32 status;
+
+ /*
+ * Need power on the efuse before reading data from efuse, and will
+ * power off the efuse after reading process.
+ */
+ sprd_efuse_set_read_power(efuse, true);
+
+ /* Enable double data if asked */
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /* Start to read data from efuse block */
+ *val = readl(efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable double data */
+ sprd_efuse_set_data_double(efuse, false);
+
+ /* Power off the efuse */
+ sprd_efuse_set_read_power(efuse, false);
+
+ /*
+ * Check the efuse error status and clear them if there are some
+ * errors occurred.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "read error status %d of block %d\n", status, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset;
+ u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
+ u32 data;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_read(efuse, index, &data, blk_double);
+ if (!ret) {
+ data >>= blk_offset;
+ memcpy(val, &data, bytes);
+ }
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = { };
+ struct sprd_efuse *efuse;
+ const struct sprd_efuse_variant_data *pdata;
+ int ret;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!efuse->base)
+ return -ENOMEM;
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwlock\n");
+ return -ENXIO;
+ }
+
+ efuse->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(efuse->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(efuse->clk);
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ efuse->data = pdata;
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = false;
+ econfig.name = "sprd-efuse";
+ econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sprd_efuse_read;
+ econfig.reg_write = sprd_efuse_write;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_efuse_of_match[] = {
+ { .compatible = "sprd,ums312-efuse", .data = &ums312_data },
+ { }
+};
+
+static struct platform_driver sprd_efuse_driver = {
+ .probe = sprd_efuse_probe,
+ .driver = {
+ .name = "sprd-efuse",
+ .of_match_table = sprd_efuse_of_match,
+ },
+};
+
+module_platform_driver(sprd_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum AP efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 223d617ecfe1..f1c23aad951e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -412,8 +412,8 @@ void *__unflatten_device_tree(const void *blob,
/* Second pass, do actual unflattening */
unflatten_dt_nodes(blob, mem, dad, mynodes);
if (be32_to_cpup(mem + size) != 0xdeadbeef)
- pr_warning("End of tree marker overwritten: %08x\n",
- be32_to_cpup(mem + size));
+ pr_warn("End of tree marker overwritten: %08x\n",
+ be32_to_cpup(mem + size));
if (detached && mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
@@ -1120,25 +1120,25 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
size &= PAGE_MASK;
if (base > MAX_MEMBLOCK_ADDR) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
+ pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
return;
}
if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
+ pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
+ ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
size = MAX_MEMBLOCK_ADDR - base + 1;
}
if (base + size < phys_offset) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
+ pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
return;
}
if (base < phys_offset) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- base, phys_offset);
+ pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, phys_offset);
size -= phys_offset - base;
base = phys_offset;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index bd6129db6417..c6b87ce2b0cc 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -361,8 +361,8 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
struct phy_device *phy;
int ret;
- iface = of_get_phy_mode(np);
- if ((int)iface < 0)
+ ret = of_get_phy_mode(np, &iface);
+ if (ret)
return NULL;
if (of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index b02734aff8c1..6e411821583e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -15,16 +15,20 @@
/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
+ * @interface: Pointer to the result
*
* The function gets phy interface string from property 'phy-mode' or
- * 'phy-connection-type', and return its index in phy_modes table, or errno in
- * error case.
+ * 'phy-connection-type'. The index in phy_modes table is set in
+ * interface and 0 returned. In case of error interface is set to
+ * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV.
*/
-int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
{
const char *pm;
int err, i;
+ *interface = PHY_INTERFACE_MODE_NA;
+
err = of_property_read_string(np, "phy-mode", &pm);
if (err < 0)
err = of_property_read_string(np, "phy-connection-type", &pm);
@@ -32,8 +36,10 @@ int of_get_phy_mode(struct device_node *np)
return err;
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
- if (!strcasecmp(pm, phy_modes(i)))
- return i;
+ if (!strcasecmp(pm, phy_modes(i))) {
+ *interface = i;
+ return 0;
+ }
return -ENODEV;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index d7fa75e31f22..e8202f61a5d9 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -872,6 +872,20 @@ of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
of_property_count_strings(node, propname);
}
+static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return kbasename(to_of_node(fwnode)->full_name);
+}
+
+static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ /* Root needs no prefix here (its name is "/"). */
+ if (!to_of_node(fwnode)->parent)
+ return "";
+
+ return "/";
+}
+
static struct fwnode_handle *
of_fwnode_get_parent(const struct fwnode_handle *fwnode)
{
@@ -993,6 +1007,8 @@ const struct fwnode_operations of_fwnode_ops = {
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
+ .get_name = of_fwnode_get_name,
+ .get_name_prefix = of_fwnode_get_name_prefix,
.get_parent = of_fwnode_get_parent,
.get_next_child_node = of_fwnode_get_next_child_node,
.get_named_child_node = of_fwnode_get_named_child_node,
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 9ff0538ee83a..be7a7d332332 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2103,6 +2103,75 @@ put_table:
}
/**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to adjust voltage of
+ * @u_volt: new OPP target voltage
+ * @u_volt_min: new OPP min voltage
+ * @u_volt_max: new OPP max voltage
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ return r;
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto adjust_unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->supplies->u_volt == u_volt)
+ goto adjust_unlock;
+
+ opp->supplies->u_volt = u_volt;
+ opp->supplies->u_volt_min = u_volt_min;
+ opp->supplies->u_volt_max = u_volt_max;
+
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+
+ /* Notify the voltage change of the OPP */
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
+ opp);
+
+ dev_pm_opp_put(opp);
+ goto adjust_put_table;
+
+adjust_unlock:
+ mutex_unlock(&opp_table->lock);
+adjust_put_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return r;
+}
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index 4b150a754890..98a63a5f8763 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -46,8 +46,8 @@ static void op_overflow_handler(struct perf_event *event,
if (id != num_counters)
oprofile_add_sample(regs, id);
else
- pr_warning("oprofile: ignoring spurious overflow "
- "on cpu %u\n", cpu);
+ pr_warn("oprofile: ignoring spurious overflow on cpu %u\n",
+ cpu);
}
/*
@@ -88,8 +88,8 @@ static int op_create_counter(int cpu, int event)
if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
perf_event_release_kernel(pevent);
- pr_warning("oprofile: failed to enable event %d "
- "on CPU %d\n", event, cpu);
+ pr_warn("oprofile: failed to enable event %d on CPU %d\n",
+ event, cpu);
return -EBUSY;
}
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..3b00e2c8e2e9 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -45,6 +45,7 @@ static struct daisydev {
static DEFINE_SPINLOCK(topology_lock);
static int numdevs;
+static bool daisy_init_done;
/* Forward-declaration of lower-level functions. */
static int mux_present(struct parport *port);
@@ -87,6 +88,24 @@ static struct parport *clone_parport(struct parport *real, int muxport)
return extra;
}
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
* Return value is number of devices actually detected. */
int parport_daisy_init(struct parport *port)
@@ -98,6 +117,23 @@ int parport_daisy_init(struct parport *port)
int i;
int last_try = 0;
+ if (!daisy_init_done) {
+ /*
+ * flag should be marked true first as
+ * parport_register_driver() might try to load the low
+ * level driver which will lead to announcing new ports
+ * and which will again come back here at
+ * parport_daisy_init()
+ */
+ daisy_init_done = true;
+ i = parport_register_driver(&daisy_driver);
+ if (i) {
+ pr_err("daisy registration failed\n");
+ daisy_init_done = false;
+ return i;
+ }
+ }
+
again:
/* Because this is called before any other devices exist,
* we don't have to claim exclusive access. */
@@ -213,10 +249,12 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
+ struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
+ memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -230,7 +268,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open (devnum, "Device ID probe");
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 7b4ee33c1935..d6920ebeabcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
return 0;
}
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
@@ -266,9 +278,6 @@ static int port_check(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (list_empty(&portlist))
- get_lowlevel_driver();
-
if (drv->devmodel) {
/* using device model */
int ret;
@@ -282,6 +291,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
if (ret)
return ret;
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
mutex_lock(&registration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
@@ -292,6 +310,8 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
drv->devmodel = false;
+ if (list_empty(&portlist))
+ get_lowlevel_driver();
mutex_lock(&registration_lock);
list_for_each_entry(port, &portlist, list)
drv->attach(port);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a97e2571a527..fcfaadc774ee 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5854,6 +5854,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+#ifdef CONFIG_ACPI
+bool pci_pr3_present(struct pci_dev *pdev)
+{
+ struct acpi_device *adev;
+
+ if (acpi_disabled)
+ return false;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return false;
+
+ return adev->power.flags.power_resources &&
+ acpi_has_method(adev->handle, "_PR3");
+}
+EXPORT_SYMBOL_GPL(pci_pr3_present);
+#endif
+
/**
* pci_add_dma_alias - Add a DMA devfn alias for a device
* @dev: the PCI device for which alias is added
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index c502dfbf66e3..45c8252c8edc 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -22,7 +22,9 @@
#include <linux/pci.h>
#include <pcmcia/ss.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
{
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 629359fe3513..cf109d9a1112 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -28,6 +28,7 @@
#include <pcmcia/ss.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
#include "cs_internal.h"
static const u_char mantissa[] = {
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 245d60189375..aad8a46605be 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -431,27 +431,25 @@ static int i82092aa_get_status(struct pcmcia_socket *socket, u_int *value)
/* IO cards have a different meaning of bits 0,1 */
/* Also notice the inverse-logic on the bits */
- if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
- /* IO card */
- if (!(status & I365_CS_STSCHG))
- *value |= SS_STSCHG;
- } else { /* non I/O card */
- if (!(status & I365_CS_BVD1))
- *value |= SS_BATDEAD;
- if (!(status & I365_CS_BVD2))
- *value |= SS_BATWARN;
-
- }
+ if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
+ /* IO card */
+ if (!(status & I365_CS_STSCHG))
+ *value |= SS_STSCHG;
+ } else { /* non I/O card */
+ if (!(status & I365_CS_BVD1))
+ *value |= SS_BATDEAD;
+ if (!(status & I365_CS_BVD2))
+ *value |= SS_BATWARN;
+ }
- if (status & I365_CS_WRPROT)
- (*value) |= SS_WRPROT; /* card is write protected */
+ if (status & I365_CS_WRPROT)
+ (*value) |= SS_WRPROT; /* card is write protected */
- if (status & I365_CS_READY)
- (*value) |= SS_READY; /* card is not busy */
+ if (status & I365_CS_READY)
+ (*value) |= SS_READY; /* card is not busy */
- if (status & I365_CS_POWERON)
- (*value) |= SS_POWERON; /* power is applied to the card */
-
+ if (status & I365_CS_POWERON)
+ (*value) |= SS_POWERON; /* power is applied to the card */
leave("i82092aa_get_status");
return 0;
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index fabe08c3e33d..4586c43c78e2 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -8,11 +8,9 @@
#ifdef NOTRACE
#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
#define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
-#define dprintk(fmt, args...) printk(fmt , ## args)
#else
#define enter(x) do {} while (0)
#define leave(x) do {} while (0)
-#define dprintk(fmt, args...) do {} while (0)
#endif
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 810761ab8e9d..49b1c6a1bdbe 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -173,8 +173,7 @@ static void exca_writew(struct yenta_socket *socket, unsigned reg, u16 val)
static ssize_t show_yenta_registers(struct device *yentadev, struct device_attribute *attr, char *buf)
{
- struct pci_dev *dev = to_pci_dev(yentadev);
- struct yenta_socket *socket = pci_get_drvdata(dev);
+ struct yenta_socket *socket = dev_get_drvdata(yentadev);
int offset = 0, i;
offset = snprintf(buf, PAGE_SIZE, "CB registers:");
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 8f8606b9bc9e..1b8e337a29ca 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1642,7 +1642,6 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev)
static int cci_pmu_probe(struct platform_device *pdev)
{
- struct resource *res;
struct cci_pmu *cci_pmu;
int i, ret, irq;
@@ -1650,8 +1649,7 @@ static int cci_pmu_probe(struct platform_device *pdev)
if (IS_ERR(cci_pmu))
return PTR_ERR(cci_pmu);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cci_pmu->base))
return -ENOMEM;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 6fc0273b6129..fea354d6fb29 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1477,8 +1477,7 @@ static int arm_ccn_probe(struct platform_device *pdev)
ccn->dev = &pdev->dev;
platform_set_drvdata(pdev, ccn);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ccn->base = devm_ioremap_resource(ccn->dev, res);
+ ccn->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccn->base))
return PTR_ERR(ccn->base);
@@ -1537,6 +1536,7 @@ static int arm_ccn_remove(struct platform_device *pdev)
static const struct of_device_id arm_ccn_match[] = {
{ .compatible = "arm,ccn-502", },
{ .compatible = "arm,ccn-504", },
+ { .compatible = "arm,ccn-512", },
{},
};
MODULE_DEVICE_TABLE(of, arm_ccn_match);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index abcf54f7d19c..773128f411f1 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -727,7 +727,7 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
static int smmu_pmu_probe(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu;
- struct resource *res_0, *res_1;
+ struct resource *res_0;
u32 cfgr, reg_size;
u64 ceid_64[2];
int irq, err;
@@ -764,8 +764,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
/* Determine if page 1 is present */
if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
- res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
+ smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(smmu_pmu->reloc_base))
return PTR_ERR(smmu_pmu->reloc_base);
} else {
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index ce7345745b42..55083c67b2bb 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -45,7 +45,8 @@
static DEFINE_IDA(ddr_ida);
/* DDR Perf hardware feature */
-#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */
@@ -57,9 +58,14 @@ static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
};
+static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
+ { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -78,6 +84,61 @@ struct ddr_pmu {
int id;
};
+enum ddr_perf_filter_capabilities {
+ PERF_CAP_AXI_ID_FILTER = 0,
+ PERF_CAP_AXI_ID_FILTER_ENHANCED,
+ PERF_CAP_AXI_ID_FEAT_MAX,
+};
+
+static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
+{
+ u32 quirks = pmu->devtype_data->quirks;
+
+ switch (cap) {
+ case PERF_CAP_AXI_ID_FILTER:
+ return !!(quirks & DDR_CAP_AXI_ID_FILTER);
+ case PERF_CAP_AXI_ID_FILTER_ENHANCED:
+ quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ default:
+ WARN(1, "unknown filter cap %d\n", cap);
+ }
+
+ return 0;
+}
+
+static ssize_t ddr_perf_filter_cap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ ddr_perf_filter_cap_get(pmu, cap));
+}
+
+#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
+ (&((struct dev_ext_attribute) { \
+ __ATTR(_name, 0444, _func, NULL), (void *)_var \
+ }).attr.attr)
+
+#define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
+ PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
+
+static struct attribute *ddr_perf_filter_cap_attr[] = {
+ PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
+ PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
+ NULL,
+};
+
+static struct attribute_group ddr_perf_filter_cap_attr_group = {
+ .name = "caps",
+ .attrs = ddr_perf_filter_cap_attr,
+};
+
static ssize_t ddr_perf_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -175,9 +236,40 @@ static const struct attribute_group *attr_groups[] = {
&ddr_perf_events_attr_group,
&ddr_perf_format_attr_group,
&ddr_perf_cpumask_attr_group,
+ &ddr_perf_filter_cap_attr_group,
NULL,
};
+static bool ddr_perf_is_filtered(struct perf_event *event)
+{
+ return event->attr.config == 0x41 || event->attr.config == 0x42;
+}
+
+static u32 ddr_perf_filter_val(struct perf_event *event)
+{
+ return event->attr.config1;
+}
+
+static bool ddr_perf_filters_compatible(struct perf_event *a,
+ struct perf_event *b)
+{
+ if (!ddr_perf_is_filtered(a))
+ return true;
+ if (!ddr_perf_is_filtered(b))
+ return true;
+ return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+}
+
+static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
+{
+ unsigned int filt;
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+
+ filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
+ ddr_perf_is_filtered(event);
+}
+
static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
{
int i;
@@ -209,27 +301,17 @@ static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
{
- return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
-}
-
-static bool ddr_perf_is_filtered(struct perf_event *event)
-{
- return event->attr.config == 0x41 || event->attr.config == 0x42;
-}
+ struct perf_event *event = pmu->events[counter];
+ void __iomem *base = pmu->base;
-static u32 ddr_perf_filter_val(struct perf_event *event)
-{
- return event->attr.config1;
-}
-
-static bool ddr_perf_filters_compatible(struct perf_event *a,
- struct perf_event *b)
-{
- if (!ddr_perf_is_filtered(a))
- return true;
- if (!ddr_perf_is_filtered(b))
- return true;
- return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+ /*
+ * return bytes instead of bursts from ddr transaction for
+ * axid-read and axid-write event if PMU core supports enhanced
+ * filter.
+ */
+ base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
+ COUNTER_READ;
+ return readl_relaxed(base + counter * 4);
}
static int ddr_perf_event_init(struct perf_event *event)
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index e42d4464c2cf..453f1c6a16ca 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -243,8 +243,6 @@ MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
- struct resource *res;
-
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
@@ -263,8 +261,7 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
/* DDRC PMUs only share the same SCCL */
ddrc_pmu->ccl_id = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
return PTR_ERR(ddrc_pmu->base);
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index f28063873e11..6a1dd72d8abb 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -234,7 +234,6 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *hha_pmu)
{
unsigned long long id;
- struct resource *res;
acpi_status status;
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
@@ -256,8 +255,7 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
/* HHA PMUs only share the same SCCL */
hha_pmu->ccl_id = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
return PTR_ERR(hha_pmu->base);
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 078b8dc57250..1151e99b241c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -233,7 +233,6 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
unsigned long long id;
- struct resource *res;
acpi_status status;
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
@@ -259,8 +258,7 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- l3c_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(l3c_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
return PTR_ERR(l3c_pmu->base);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 79f76f8dda8e..96183e31b96a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <asm/cputype.h>
#include <asm/local64.h>
#include "hisi_uncore_pmu.h"
@@ -338,8 +339,10 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
/*
* Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2]
- * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID
+ * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
+ * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
+ * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
+ * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
* is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
*/
static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
@@ -347,12 +350,19 @@ static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
u64 mpidr = read_cpuid_mpidr();
if (mpidr & MPIDR_MT_BITMASK) {
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
- if (sccl_id)
- *sccl_id = aff2 >> 3;
- if (ccl_id)
- *ccl_id = aff2 & 0x7;
+ if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+
+ if (sccl_id)
+ *sccl_id = aff2 >> 3;
+ if (ccl_id)
+ *ccl_id = aff2 & 0x7;
+ } else {
+ if (sccl_id)
+ *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ if (ccl_id)
+ *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ }
} else {
if (sccl_id)
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 43d76c85da56..51b31d6ff2c4 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -16,23 +16,36 @@
* they need to be sampled before overflow(i.e, at every 2 seconds).
*/
-#define TX2_PMU_MAX_COUNTERS 4
+#define TX2_PMU_DMC_L3C_MAX_COUNTERS 4
+#define TX2_PMU_CCPI2_MAX_COUNTERS 8
+#define TX2_PMU_MAX_COUNTERS TX2_PMU_CCPI2_MAX_COUNTERS
+
+
#define TX2_PMU_DMC_CHANNELS 8
#define TX2_PMU_L3_TILES 16
#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
-#define GET_EVENTID(ev) ((ev->hw.config) & 0x1f)
-#define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3)
+#define GET_EVENTID(ev, mask) ((ev->hw.config) & mask)
+#define GET_COUNTERID(ev, mask) ((ev->hw.idx) & mask)
/* 1 byte per counter(4 counters).
* Event id is encoded in bits [5:1] of a byte,
*/
#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
+/* bits[3:0] to select counters, are indexed from 8 to 15. */
+#define CCPI2_COUNTER_OFFSET 8
+
#define L3C_COUNTER_CTL 0xA8
#define L3C_COUNTER_DATA 0xAC
#define DMC_COUNTER_CTL 0x234
#define DMC_COUNTER_DATA 0x240
+#define CCPI2_PERF_CTL 0x108
+#define CCPI2_COUNTER_CTL 0x10C
+#define CCPI2_COUNTER_SEL 0x12c
+#define CCPI2_COUNTER_DATA_L 0x130
+#define CCPI2_COUNTER_DATA_H 0x134
+
/* L3C event IDs */
#define L3_EVENT_READ_REQ 0xD
#define L3_EVENT_WRITEBACK_REQ 0xE
@@ -51,15 +64,28 @@
#define DMC_EVENT_READ_TXNS 0xF
#define DMC_EVENT_MAX 0x10
+#define CCPI2_EVENT_REQ_PKT_SENT 0x3D
+#define CCPI2_EVENT_SNOOP_PKT_SENT 0x65
+#define CCPI2_EVENT_DATA_PKT_SENT 0x105
+#define CCPI2_EVENT_GIC_PKT_SENT 0x12D
+#define CCPI2_EVENT_MAX 0x200
+
+#define CCPI2_PERF_CTL_ENABLE BIT(0)
+#define CCPI2_PERF_CTL_START BIT(1)
+#define CCPI2_PERF_CTL_RESET BIT(4)
+#define CCPI2_EVENT_LEVEL_RISING_EDGE BIT(10)
+#define CCPI2_EVENT_TYPE_EDGE_SENSITIVE BIT(11)
+
enum tx2_uncore_type {
PMU_TYPE_L3C,
PMU_TYPE_DMC,
+ PMU_TYPE_CCPI2,
PMU_TYPE_INVALID,
};
/*
- * pmu on each socket has 2 uncore devices(dmc and l3c),
- * each device has 4 counters.
+ * Each socket has 3 uncore devices associated with a PMU. The DMC and
+ * L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters.
*/
struct tx2_uncore_pmu {
struct hlist_node hpnode;
@@ -69,8 +95,10 @@ struct tx2_uncore_pmu {
int node;
int cpu;
u32 max_counters;
+ u32 counters_mask;
u32 prorate_factor;
u32 max_events;
+ u32 events_mask;
u64 hrtimer_interval;
void __iomem *base;
DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
@@ -79,6 +107,7 @@ struct tx2_uncore_pmu {
struct hrtimer hrtimer;
const struct attribute_group **attr_groups;
enum tx2_uncore_type type;
+ enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb);
void (*init_cntr_base)(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu);
void (*stop_event)(struct perf_event *event);
@@ -92,7 +121,21 @@ static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
return container_of(pmu, struct tx2_uncore_pmu, pmu);
}
-PMU_FORMAT_ATTR(event, "config:0-4");
+#define TX2_PMU_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t \
+__tx2_pmu_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+ \
+static struct device_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __tx2_pmu_##_var##_show, NULL)
+
+TX2_PMU_FORMAT_ATTR(event, event, "config:0-4");
+TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9");
static struct attribute *l3c_pmu_format_attrs[] = {
&format_attr_event.attr,
@@ -104,6 +147,11 @@ static struct attribute *dmc_pmu_format_attrs[] = {
NULL,
};
+static struct attribute *ccpi2_pmu_format_attrs[] = {
+ &format_attr_event_ccpi2.attr,
+ NULL,
+};
+
static const struct attribute_group l3c_pmu_format_attr_group = {
.name = "format",
.attrs = l3c_pmu_format_attrs,
@@ -114,6 +162,11 @@ static const struct attribute_group dmc_pmu_format_attr_group = {
.attrs = dmc_pmu_format_attrs,
};
+static const struct attribute_group ccpi2_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = ccpi2_pmu_format_attrs,
+};
+
/*
* sysfs event attributes
*/
@@ -164,6 +217,19 @@ static struct attribute *dmc_pmu_events_attrs[] = {
NULL,
};
+TX2_EVENT_ATTR(req_pktsent, CCPI2_EVENT_REQ_PKT_SENT);
+TX2_EVENT_ATTR(snoop_pktsent, CCPI2_EVENT_SNOOP_PKT_SENT);
+TX2_EVENT_ATTR(data_pktsent, CCPI2_EVENT_DATA_PKT_SENT);
+TX2_EVENT_ATTR(gic_pktsent, CCPI2_EVENT_GIC_PKT_SENT);
+
+static struct attribute *ccpi2_pmu_events_attrs[] = {
+ &tx2_pmu_event_attr_req_pktsent.attr.attr,
+ &tx2_pmu_event_attr_snoop_pktsent.attr.attr,
+ &tx2_pmu_event_attr_data_pktsent.attr.attr,
+ &tx2_pmu_event_attr_gic_pktsent.attr.attr,
+ NULL,
+};
+
static const struct attribute_group l3c_pmu_events_attr_group = {
.name = "events",
.attrs = l3c_pmu_events_attrs,
@@ -174,6 +240,11 @@ static const struct attribute_group dmc_pmu_events_attr_group = {
.attrs = dmc_pmu_events_attrs,
};
+static const struct attribute_group ccpi2_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ccpi2_pmu_events_attrs,
+};
+
/*
* sysfs cpumask attributes
*/
@@ -213,6 +284,13 @@ static const struct attribute_group *dmc_pmu_attr_groups[] = {
NULL
};
+static const struct attribute_group *ccpi2_pmu_attr_groups[] = {
+ &ccpi2_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &ccpi2_pmu_events_attr_group,
+ NULL
+};
+
static inline u32 reg_readl(unsigned long addr)
{
return readl((void __iomem *)addr);
@@ -245,33 +323,58 @@ static void init_cntr_base_l3c(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
/* counter ctrl/data reg offset at 8 */
hwc->config_base = (unsigned long)tx2_pmu->base
- + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
+ + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask));
hwc->event_base = (unsigned long)tx2_pmu->base
- + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
+ + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask));
}
static void init_cntr_base_dmc(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
hwc->config_base = (unsigned long)tx2_pmu->base
+ DMC_COUNTER_CTL;
/* counter data reg offset at 0xc */
hwc->event_base = (unsigned long)tx2_pmu->base
- + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
+ + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask));
+}
+
+static void init_cntr_base_ccpi2(struct perf_event *event,
+ struct tx2_uncore_pmu *tx2_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+
+ cmask = tx2_pmu->counters_mask;
+
+ hwc->config_base = (unsigned long)tx2_pmu->base
+ + CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask));
+ hwc->event_base = (unsigned long)tx2_pmu->base;
}
static void uncore_start_event_l3c(struct perf_event *event, int flags)
{
- u32 val;
+ u32 val, emask;
struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ emask = tx2_pmu->events_mask;
/* event id encoded in bits [07:03] */
- val = GET_EVENTID(event) << 3;
+ val = GET_EVENTID(event, emask) << 3;
reg_writel(val, hwc->config_base);
local64_set(&hwc->prev_count, 0);
reg_writel(0, hwc->event_base);
@@ -284,10 +387,17 @@ static inline void uncore_stop_event_l3c(struct perf_event *event)
static void uncore_start_event_dmc(struct perf_event *event, int flags)
{
- u32 val;
+ u32 val, cmask, emask;
struct hw_perf_event *hwc = &event->hw;
- int idx = GET_COUNTERID(event);
- int event_id = GET_EVENTID(event);
+ struct tx2_uncore_pmu *tx2_pmu;
+ int idx, event_id;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
+ emask = tx2_pmu->events_mask;
+
+ idx = GET_COUNTERID(event, cmask);
+ event_id = GET_EVENTID(event, emask);
/* enable and start counters.
* 8 bits for each counter, bits[05:01] of a counter to set event type.
@@ -302,9 +412,14 @@ static void uncore_start_event_dmc(struct perf_event *event, int flags)
static void uncore_stop_event_dmc(struct perf_event *event)
{
- u32 val;
+ u32 val, cmask;
struct hw_perf_event *hwc = &event->hw;
- int idx = GET_COUNTERID(event);
+ struct tx2_uncore_pmu *tx2_pmu;
+ int idx;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ cmask = tx2_pmu->counters_mask;
+ idx = GET_COUNTERID(event, cmask);
/* clear event type(bits[05:01]) to stop counter */
val = reg_readl(hwc->config_base);
@@ -312,27 +427,72 @@ static void uncore_stop_event_dmc(struct perf_event *event)
reg_writel(val, hwc->config_base);
}
+static void uncore_start_event_ccpi2(struct perf_event *event, int flags)
+{
+ u32 emask;
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ emask = tx2_pmu->events_mask;
+
+ /* Bit [09:00] to set event id.
+ * Bits [10], set level to rising edge.
+ * Bits [11], set type to edge sensitive.
+ */
+ reg_writel((CCPI2_EVENT_TYPE_EDGE_SENSITIVE |
+ CCPI2_EVENT_LEVEL_RISING_EDGE |
+ GET_EVENTID(event, emask)), hwc->config_base);
+
+ /* reset[4], enable[0] and start[1] counters */
+ reg_writel(CCPI2_PERF_CTL_RESET |
+ CCPI2_PERF_CTL_START |
+ CCPI2_PERF_CTL_ENABLE,
+ hwc->event_base + CCPI2_PERF_CTL);
+ local64_set(&event->hw.prev_count, 0ULL);
+}
+
+static void uncore_stop_event_ccpi2(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* disable and stop counter */
+ reg_writel(0, hwc->event_base + CCPI2_PERF_CTL);
+}
+
static void tx2_uncore_event_update(struct perf_event *event)
{
- s64 prev, delta, new = 0;
+ u64 prev, delta, new = 0;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
enum tx2_uncore_type type;
u32 prorate_factor;
+ u32 cmask, emask;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
type = tx2_pmu->type;
+ cmask = tx2_pmu->counters_mask;
+ emask = tx2_pmu->events_mask;
prorate_factor = tx2_pmu->prorate_factor;
-
- new = reg_readl(hwc->event_base);
- prev = local64_xchg(&hwc->prev_count, new);
-
- /* handles rollover of 32 bit counter */
- delta = (u32)(((1UL << 32) - prev) + new);
+ if (type == PMU_TYPE_CCPI2) {
+ reg_writel(CCPI2_COUNTER_OFFSET +
+ GET_COUNTERID(event, cmask),
+ hwc->event_base + CCPI2_COUNTER_SEL);
+ new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H);
+ new = (new << 32) +
+ reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L);
+ prev = local64_xchg(&hwc->prev_count, new);
+ delta = new - prev;
+ } else {
+ new = reg_readl(hwc->event_base);
+ prev = local64_xchg(&hwc->prev_count, new);
+ /* handles rollover of 32 bit counter */
+ delta = (u32)(((1UL << 32) - prev) + new);
+ }
/* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
if (type == PMU_TYPE_DMC &&
- GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
+ GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS)
delta = delta/4;
/* L3C and DMC has 16 and 8 interleave channels respectively.
@@ -351,6 +511,7 @@ static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
} devices[] = {
{"CAV901D", PMU_TYPE_L3C},
{"CAV901F", PMU_TYPE_DMC},
+ {"CAV901E", PMU_TYPE_CCPI2},
{"", PMU_TYPE_INVALID}
};
@@ -380,7 +541,8 @@ static bool tx2_uncore_validate_event(struct pmu *pmu,
* Make sure the group of events can be scheduled at once
* on the PMU.
*/
-static bool tx2_uncore_validate_event_group(struct perf_event *event)
+static bool tx2_uncore_validate_event_group(struct perf_event *event,
+ int max_counters)
{
struct perf_event *sibling, *leader = event->group_leader;
int counters = 0;
@@ -403,7 +565,7 @@ static bool tx2_uncore_validate_event_group(struct perf_event *event)
* If the group requires more counters than the HW has,
* it cannot ever be scheduled.
*/
- return counters <= TX2_PMU_MAX_COUNTERS;
+ return counters <= max_counters;
}
@@ -439,7 +601,7 @@ static int tx2_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config;
/* Validate the group */
- if (!tx2_uncore_validate_event_group(event))
+ if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
return -EINVAL;
return 0;
@@ -456,6 +618,10 @@ static void tx2_uncore_event_start(struct perf_event *event, int flags)
tx2_pmu->start_event(event, flags);
perf_event_update_userpage(event);
+ /* No hrtimer needed for CCPI2, 64-bit counters */
+ if (!tx2_pmu->hrtimer_callback)
+ return;
+
/* Start timer for first event */
if (bitmap_weight(tx2_pmu->active_counters,
tx2_pmu->max_counters) == 1) {
@@ -510,15 +676,23 @@ static void tx2_uncore_event_del(struct perf_event *event, int flags)
{
struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ u32 cmask;
+ cmask = tx2_pmu->counters_mask;
tx2_uncore_event_stop(event, PERF_EF_UPDATE);
/* clear the assigned counter */
- free_counter(tx2_pmu, GET_COUNTERID(event));
+ free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
perf_event_update_userpage(event);
tx2_pmu->events[hwc->idx] = NULL;
hwc->idx = -1;
+
+ if (!tx2_pmu->hrtimer_callback)
+ return;
+
+ if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
+ hrtimer_cancel(&tx2_pmu->hrtimer);
}
static void tx2_uncore_event_read(struct perf_event *event)
@@ -580,8 +754,12 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
cpu_online_mask);
tx2_pmu->cpu = cpu;
- hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
+
+ if (tx2_pmu->hrtimer_callback) {
+ hrtimer_init(&tx2_pmu->hrtimer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ }
ret = tx2_uncore_pmu_register(tx2_pmu);
if (ret) {
@@ -653,10 +831,13 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
switch (tx2_pmu->type) {
case PMU_TYPE_L3C:
- tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
+ tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
+ tx2_pmu->counters_mask = 0x3;
tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
tx2_pmu->max_events = L3_EVENT_MAX;
+ tx2_pmu->events_mask = 0x1f;
tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
+ tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
tx2_pmu->attr_groups = l3c_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_l3c_%d", tx2_pmu->node);
@@ -665,10 +846,13 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
tx2_pmu->stop_event = uncore_stop_event_l3c;
break;
case PMU_TYPE_DMC:
- tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
+ tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
+ tx2_pmu->counters_mask = 0x3;
tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
tx2_pmu->max_events = DMC_EVENT_MAX;
+ tx2_pmu->events_mask = 0x1f;
tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
+ tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
tx2_pmu->attr_groups = dmc_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_dmc_%d", tx2_pmu->node);
@@ -676,6 +860,21 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
tx2_pmu->start_event = uncore_start_event_dmc;
tx2_pmu->stop_event = uncore_stop_event_dmc;
break;
+ case PMU_TYPE_CCPI2:
+ /* CCPI2 has 8 counters */
+ tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
+ tx2_pmu->counters_mask = 0x7;
+ tx2_pmu->prorate_factor = 1;
+ tx2_pmu->max_events = CCPI2_EVENT_MAX;
+ tx2_pmu->events_mask = 0x1ff;
+ tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
+ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
+ "uncore_ccpi2_%d", tx2_pmu->node);
+ tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
+ tx2_pmu->start_event = uncore_start_event_ccpi2;
+ tx2_pmu->stop_event = uncore_stop_event_ccpi2;
+ tx2_pmu->hrtimer_callback = NULL;
+ break;
case PMU_TYPE_INVALID:
devm_kfree(dev, tx2_pmu);
return NULL;
@@ -744,7 +943,9 @@ static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
if (cpu != tx2_pmu->cpu)
return 0;
- hrtimer_cancel(&tx2_pmu->hrtimer);
+ if (tx2_pmu->hrtimer_callback)
+ hrtimer_cancel(&tx2_pmu->hrtimer);
+
cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
new_cpu = cpumask_any_and(
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 7e328d6385c3..46ee6807d533 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1282,25 +1282,21 @@ static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
- struct resource *res;
unsigned int reg;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ csw_csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(csw_csr)) {
dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
return PTR_ERR(csw_csr);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- mcba_csr = devm_ioremap_resource(&pdev->dev, res);
+ mcba_csr = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(mcba_csr)) {
dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
return PTR_ERR(mcba_csr);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
+ mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(mcbb_csr)) {
dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
return PTR_ERR(mcbb_csr);
@@ -1332,13 +1328,11 @@ static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
void __iomem *csw_csr;
- struct resource *res;
unsigned int reg;
u32 mcb0routing;
u32 mcb1routing;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ csw_csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(csw_csr)) {
dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
return PTR_ERR(csw_csr);
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 215425296c77..3dab79e9d52b 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -45,3 +45,14 @@ config PHY_SUN9I_USB
sun9i SoCs.
This driver controls each individual USB 2 host PHY.
+
+config PHY_SUN50I_USB3
+ tristate "Allwinner H6 SoC USB3 PHY driver"
+ depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on RESET_CONTROLLER
+ select GENERIC_PHY
+ help
+ Enable this to support the USB3.0-capable transceiver that is
+ part of Allwinner H6 SoC.
+
+ This driver controls each individual USB 2+3 host PHY combo.
diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile
index 799a65c0b58d..bd74901a1255 100644
--- a/drivers/phy/allwinner/Makefile
+++ b/drivers/phy/allwinner/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o
+obj-$(CONFIG_PHY_SUN50I_USB3) += phy-sun50i-usb3.o
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
new file mode 100644
index 000000000000..1169f3e83a6f
--- /dev/null
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Allwinner sun50i(H6) USB 3.0 phy driver
+ *
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on phy-sun9i-usb.c, which is:
+ *
+ * Copyright (C) 2014-2015 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on code from Allwinner BSP, which is:
+ *
+ * Copyright (c) 2010-2015 Allwinner Technology Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Interface Status and Control Registers */
+#define SUNXI_ISCR 0x00
+#define SUNXI_PIPE_CLOCK_CONTROL 0x14
+#define SUNXI_PHY_TUNE_LOW 0x18
+#define SUNXI_PHY_TUNE_HIGH 0x1c
+#define SUNXI_PHY_EXTERNAL_CONTROL 0x20
+
+/* USB2.0 Interface Status and Control Register */
+#define SUNXI_ISCR_FORCE_VBUS (3 << 12)
+
+/* PIPE Clock Control Register */
+#define SUNXI_PCC_PIPE_CLK_OPEN (1 << 6)
+
+/* PHY External Control Register */
+#define SUNXI_PEC_EXTERN_VBUS (3 << 1)
+#define SUNXI_PEC_SSC_EN (1 << 24)
+#define SUNXI_PEC_REF_SSP_EN (1 << 26)
+
+/* PHY Tune High Register */
+#define SUNXI_TX_DEEMPH_3P5DB(n) ((n) << 19)
+#define SUNXI_TX_DEEMPH_3P5DB_MASK GENMASK(24, 19)
+#define SUNXI_TX_DEEMPH_6DB(n) ((n) << 13)
+#define SUNXI_TX_DEEMPH_6GB_MASK GENMASK(18, 13)
+#define SUNXI_TX_SWING_FULL(n) ((n) << 6)
+#define SUNXI_TX_SWING_FULL_MASK GENMASK(12, 6)
+#define SUNXI_LOS_BIAS(n) ((n) << 3)
+#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
+#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
+#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
+
+struct sun50i_usb3_phy {
+ struct phy *phy;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct clk *clk;
+};
+
+static void sun50i_usb3_phy_open(struct sun50i_usb3_phy *phy)
+{
+ u32 val;
+
+ val = readl(phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+ val |= SUNXI_PEC_EXTERN_VBUS;
+ val |= SUNXI_PEC_SSC_EN | SUNXI_PEC_REF_SSP_EN;
+ writel(val, phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+
+ val = readl(phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+ val |= SUNXI_PCC_PIPE_CLK_OPEN;
+ writel(val, phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+
+ val = readl(phy->regs + SUNXI_ISCR);
+ val |= SUNXI_ISCR_FORCE_VBUS;
+ writel(val, phy->regs + SUNXI_ISCR);
+
+ /*
+ * All the magic numbers written to the PHY_TUNE_{LOW_HIGH}
+ * registers are directly taken from the BSP USB3 driver from
+ * Allwiner.
+ */
+ writel(0x0047fc87, phy->regs + SUNXI_PHY_TUNE_LOW);
+
+ val = readl(phy->regs + SUNXI_PHY_TUNE_HIGH);
+ val &= ~(SUNXI_TXVBOOSTLVL_MASK | SUNXI_LOS_BIAS_MASK |
+ SUNXI_TX_SWING_FULL_MASK | SUNXI_TX_DEEMPH_6GB_MASK |
+ SUNXI_TX_DEEMPH_3P5DB_MASK);
+ val |= SUNXI_TXVBOOSTLVL(0x7);
+ val |= SUNXI_LOS_BIAS(0x7);
+ val |= SUNXI_TX_SWING_FULL(0x55);
+ val |= SUNXI_TX_DEEMPH_6DB(0x20);
+ val |= SUNXI_TX_DEEMPH_3P5DB(0x15);
+ writel(val, phy->regs + SUNXI_PHY_TUNE_HIGH);
+}
+
+static int sun50i_usb3_phy_init(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ ret = clk_prepare_enable(phy->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(phy->reset);
+ if (ret) {
+ clk_disable_unprepare(phy->clk);
+ return ret;
+ }
+
+ sun50i_usb3_phy_open(phy);
+ return 0;
+}
+
+static int sun50i_usb3_phy_exit(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops sun50i_usb3_phy_ops = {
+ .init = sun50i_usb3_phy_init,
+ .exit = sun50i_usb3_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int sun50i_usb3_phy_probe(struct platform_device *pdev)
+{
+ struct sun50i_usb3_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(phy->clk)) {
+ if (PTR_ERR(phy->clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get phy clock\n");
+ return PTR_ERR(phy->clk);
+ }
+
+ phy->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(phy->reset)) {
+ dev_err(dev, "failed to get reset control\n");
+ return PTR_ERR(phy->reset);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->phy = devm_phy_create(dev, NULL, &sun50i_usb3_phy_ops);
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy->phy);
+ }
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id sun50i_usb3_phy_of_match[] = {
+ { .compatible = "allwinner,sun50i-h6-usb3-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun50i_usb3_phy_of_match);
+
+static struct platform_driver sun50i_usb3_phy_driver = {
+ .probe = sun50i_usb3_phy_probe,
+ .driver = {
+ .of_match_table = sun50i_usb3_phy_of_match,
+ .name = "sun50i-usb3-phy",
+ }
+};
+module_platform_driver(sun50i_usb3_phy_driver);
+
+MODULE_DESCRIPTION("Allwinner H6 USB 3.0 phy driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 3c53625f8bc2..91b5b09589d6 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -126,8 +126,8 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
#define USB_CTRL_MASK(reg, field) \
USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
@@ -416,7 +416,7 @@ void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -428,7 +428,7 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -707,7 +707,7 @@ static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
void __iomem *xhci_ec_base = params->xhci_ec_regs;
u32 val;
- if (params->family_id != 0x74371000 || xhci_ec_base == 0)
+ if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 9b16f13b5ab2..34a6a9a1ceb2 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -114,7 +114,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
- struct resource *res;
int i = 0;
int ret;
@@ -122,8 +121,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index 62d10ef20296..f1cb3e4d2add 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -195,7 +195,6 @@ static int histb_combphy_probe(struct platform_device *pdev)
struct histb_combphy_priv *priv;
struct device_node *np = dev->of_node;
struct histb_combphy_mode *mode;
- struct resource *res;
u32 vals[3];
int ret;
@@ -203,8 +202,7 @@ static int histb_combphy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 544d64a84cc0..6e457967653e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
goto err_disable_pdi_clk;
/* Check if we are in "startup ready" status */
- if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
+ ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
+ if (ret)
goto err_disable_phy_clk;
ltq_vrx200_pcie_phy_apply_workarounds(phy);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index ded900b06f5a..23bc3bf5c4c0 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -216,20 +216,13 @@ static int mvebu_a3700_utmi_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mvebu_a3700_utmi *utmi;
struct phy_provider *provider;
- struct resource *res;
utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
if (!utmi)
return -ENOMEM;
/* Get UTMI memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "Missing UTMI PHY memory resource\n");
- return -ENODEV;
- }
-
- utmi->regs = devm_ioremap_resource(dev, res);
+ utmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(utmi->regs))
return PTR_ERR(utmi->regs);
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 3c9189473407..7a33ec12f71b 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1342,7 +1342,7 @@ static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
{
int i;
- struct {
+ static const struct {
u32 reg;
u32 val;
} serdes_reg[] = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 39e8deb8001e..091e20303a14 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -165,6 +165,11 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
[QPHY_PCS_READY_STATUS] = 0x160,
};
+static const unsigned int sm8150_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x180,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -879,6 +884,93 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -1276,6 +1368,31 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8150_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
+ .tx_tbl = sm8150_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
+ .rx_tbl = sm8150_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
+ .pcs_tbl = sm8150_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .is_dual_lane_phy = true,
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1998,6 +2115,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,msm8998-qmp-usb3-phy",
.data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 335ea5d7ef40..ab6ff9b45a32 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -313,4 +313,100 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
+/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_PLL_IVCO 0x058
+#define QSERDES_V4_COM_CMN_IPTRIM 0x060
+#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V4_COM_HSCLK_SEL 0x158
+#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+
+/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
+#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
+#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
+#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+
+/* Only for QMP V4 PHY - RX registers */
+#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V4_RX_SIGDET_LVL 0x120
+#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V4_RX_RX_BAND 0x128
+#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170
+#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174
+#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178
+#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c
+#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180
+#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184
+#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188
+#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c
+#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190
+#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194
+#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198
+#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c
+#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
+#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
+#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+
+/* Only for QMP V4 PHY - PCS registers */
+#define QPHY_V4_PHY_START 0x000
+#define QPHY_V4_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_SW_RESET 0x008
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PLL_CNTL 0x02c
+#define QPHY_V4_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_LINECFG_DISABLE 0x148
+#define QPHY_V4_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_READY_STATUS 0x180
+#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index b163b3a1558d..61054272a7c8 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -158,8 +158,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
/* setup initial state */
qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
uphy->vbus_edev);
- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
- EXTCON_USB, &uphy->vbus_notify);
+ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
if (ret)
goto err_ulpi;
}
@@ -180,6 +180,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
{
struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
+ if (uphy->vbus_edev)
+ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
regulator_disable(uphy->v3p3);
regulator_disable(uphy->v1p8);
clk_disable_unprepare(uphy->sleep_clk);
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 2926e4937301..2e279ac0fa4d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -71,6 +71,7 @@ struct rcar_gen2_phy_driver {
struct rcar_gen2_phy_data {
const struct phy_ops *gen2_phy_ops;
const u32 (*select_value)[PHYS_PER_CHANNEL];
+ const u32 num_channels;
};
static int rcar_gen2_phy_init(struct phy *p)
@@ -271,11 +272,13 @@ static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
.gen2_phy_ops = &rcar_gen2_phy_ops,
.select_value = pci_select_value,
+ .num_channels = ARRAY_SIZE(pci_select_value),
};
static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
.gen2_phy_ops = &rz_g1c_phy_ops,
.select_value = usb20_select_value,
+ .num_channels = ARRAY_SIZE(usb20_select_value),
};
static const struct of_device_id rcar_gen2_phy_match_table[] = {
@@ -389,7 +392,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
channel->selected_phy = -1;
error = of_property_read_u32(np, "reg", &channel_num);
- if (error || channel_num > 2) {
+ if (error || channel_num >= data->num_channels) {
dev_err(dev, "Invalid \"reg\" property\n");
of_node_put(np);
return error;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index b7f6b1324395..bfb22f868857 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
@@ -320,9 +321,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode = PHY_MODE_USB_HOST;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
@@ -614,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->base);
/* call request_irq for OTG */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index c454c90cd99e..dbd2de4d28b1 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -35,6 +35,14 @@ config PHY_ROCKCHIP_INNO_USB2
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+config PHY_ROCKCHIP_INNO_DSIDPHY
+ tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
+ Innosilicon IP block.
+
config PHY_ROCKCHIP_PCIE
tristate "Rockchip PCIe PHY Driver"
depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index fd21cbaf40dd..9f59a81e4e0d 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
new file mode 100644
index 000000000000..fc729ecd3fe9
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Wyon Bi <bivvy.bi@rock-chips.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/*
+ * The offset address[7:0] is distributed two parts, one from the bit7 to bit5
+ * is the first address, the other from the bit4 to bit0 is the second address.
+ * when you configure the registers, you must set both of them. The Clock Lane
+ * and Data Lane use the same registers with the same second address, but the
+ * first address is different.
+ */
+#define FIRST_ADDRESS(x) (((x) & 0x7) << 5)
+#define SECOND_ADDRESS(x) (((x) & 0x1f) << 0)
+#define PHY_REG(first, second) (FIRST_ADDRESS(first) | \
+ SECOND_ADDRESS(second))
+
+/* Analog Register Part: reg00 */
+#define BANDGAP_POWER_MASK BIT(7)
+#define BANDGAP_POWER_DOWN BIT(7)
+#define BANDGAP_POWER_ON 0
+#define LANE_EN_MASK GENMASK(6, 2)
+#define LANE_EN_CK BIT(6)
+#define LANE_EN_3 BIT(5)
+#define LANE_EN_2 BIT(4)
+#define LANE_EN_1 BIT(3)
+#define LANE_EN_0 BIT(2)
+#define POWER_WORK_MASK GENMASK(1, 0)
+#define POWER_WORK_ENABLE UPDATE(1, 1, 0)
+#define POWER_WORK_DISABLE UPDATE(2, 1, 0)
+/* Analog Register Part: reg01 */
+#define REG_SYNCRST_MASK BIT(2)
+#define REG_SYNCRST_RESET BIT(2)
+#define REG_SYNCRST_NORMAL 0
+#define REG_LDOPD_MASK BIT(1)
+#define REG_LDOPD_POWER_DOWN BIT(1)
+#define REG_LDOPD_POWER_ON 0
+#define REG_PLLPD_MASK BIT(0)
+#define REG_PLLPD_POWER_DOWN BIT(0)
+#define REG_PLLPD_POWER_ON 0
+/* Analog Register Part: reg03 */
+#define REG_FBDIV_HI_MASK BIT(5)
+#define REG_FBDIV_HI(x) UPDATE((x >> 8), 5, 5)
+#define REG_PREDIV_MASK GENMASK(4, 0)
+#define REG_PREDIV(x) UPDATE(x, 4, 0)
+/* Analog Register Part: reg04 */
+#define REG_FBDIV_LO_MASK GENMASK(7, 0)
+#define REG_FBDIV_LO(x) UPDATE(x, 7, 0)
+/* Analog Register Part: reg05 */
+#define SAMPLE_CLOCK_PHASE_MASK GENMASK(6, 4)
+#define SAMPLE_CLOCK_PHASE(x) UPDATE(x, 6, 4)
+#define CLOCK_LANE_SKEW_PHASE_MASK GENMASK(2, 0)
+#define CLOCK_LANE_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg06 */
+#define DATA_LANE_3_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_3_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_2_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_2_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg07 */
+#define DATA_LANE_1_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_1_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_0_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_0_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg08 */
+#define SAMPLE_CLOCK_DIRECTION_MASK BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_REVERSE BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_FORWARD 0
+/* Digital Register Part: reg00 */
+#define REG_DIG_RSTN_MASK BIT(0)
+#define REG_DIG_RSTN_NORMAL BIT(0)
+#define REG_DIG_RSTN_RESET 0
+/* Digital Register Part: reg01 */
+#define INVERT_TXCLKESC_MASK BIT(1)
+#define INVERT_TXCLKESC_ENABLE BIT(1)
+#define INVERT_TXCLKESC_DISABLE 0
+#define INVERT_TXBYTECLKHS_MASK BIT(0)
+#define INVERT_TXBYTECLKHS_ENABLE BIT(0)
+#define INVERT_TXBYTECLKHS_DISABLE 0
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg05 */
+#define T_LPX_CNT_MASK GENMASK(5, 0)
+#define T_LPX_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
+#define T_HS_PREPARE_CNT_MASK GENMASK(6, 0)
+#define T_HS_PREPARE_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
+#define T_HS_ZERO_CNT_MASK GENMASK(5, 0)
+#define T_HS_ZERO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
+#define T_HS_TRAIL_CNT_MASK GENMASK(6, 0)
+#define T_HS_TRAIL_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
+#define T_HS_EXIT_CNT_MASK GENMASK(4, 0)
+#define T_HS_EXIT_CNT(x) UPDATE(x, 4, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
+#define T_CLK_POST_CNT_MASK GENMASK(3, 0)
+#define T_CLK_POST_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
+#define LPDT_TX_PPI_SYNC_MASK BIT(2)
+#define LPDT_TX_PPI_SYNC_ENABLE BIT(2)
+#define LPDT_TX_PPI_SYNC_DISABLE 0
+#define T_WAKEUP_CNT_HI_MASK GENMASK(1, 0)
+#define T_WAKEUP_CNT_HI(x) UPDATE(x, 1, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0d */
+#define T_WAKEUP_CNT_LO_MASK GENMASK(7, 0)
+#define T_WAKEUP_CNT_LO(x) UPDATE(x, 7, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0e */
+#define T_CLK_PRE_CNT_MASK GENMASK(3, 0)
+#define T_CLK_PRE_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
+#define T_TA_GO_CNT_MASK GENMASK(5, 0)
+#define T_TA_GO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
+#define T_TA_SURE_CNT_MASK GENMASK(5, 0)
+#define T_TA_SURE_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
+#define T_TA_WAIT_CNT_MASK GENMASK(5, 0)
+#define T_TA_WAIT_CNT(x) UPDATE(x, 5, 0)
+/* LVDS Register Part: reg00 */
+#define LVDS_DIGITAL_INTERNAL_RESET_MASK BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_DISABLE BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_ENABLE 0
+/* LVDS Register Part: reg01 */
+#define LVDS_DIGITAL_INTERNAL_ENABLE_MASK BIT(7)
+#define LVDS_DIGITAL_INTERNAL_ENABLE BIT(7)
+#define LVDS_DIGITAL_INTERNAL_DISABLE 0
+/* LVDS Register Part: reg03 */
+#define MODE_ENABLE_MASK GENMASK(2, 0)
+#define TTL_MODE_ENABLE BIT(2)
+#define LVDS_MODE_ENABLE BIT(1)
+#define MIPI_MODE_ENABLE BIT(0)
+/* LVDS Register Part: reg0b */
+#define LVDS_LANE_EN_MASK GENMASK(7, 3)
+#define LVDS_DATA_LANE0_EN BIT(7)
+#define LVDS_DATA_LANE1_EN BIT(6)
+#define LVDS_DATA_LANE2_EN BIT(5)
+#define LVDS_DATA_LANE3_EN BIT(4)
+#define LVDS_CLK_LANE_EN BIT(3)
+#define LVDS_PLL_POWER_MASK BIT(2)
+#define LVDS_PLL_POWER_OFF BIT(2)
+#define LVDS_PLL_POWER_ON 0
+#define LVDS_BANDGAP_POWER_MASK BIT(0)
+#define LVDS_BANDGAP_POWER_DOWN BIT(0)
+#define LVDS_BANDGAP_POWER_ON 0
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_ENABLECLK BIT(2)
+#define DSI_PHY_STATUS 0xb0
+#define PHY_LOCK BIT(0)
+
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+struct inno_dsidphy {
+ struct device *dev;
+ struct clk *ref_clk;
+ struct clk *pclk_phy;
+ struct clk *pclk_host;
+ void __iomem *phy_base;
+ void __iomem *host_base;
+ struct reset_control *rst;
+ enum phy_mode mode;
+
+ struct {
+ struct clk_hw hw;
+ u8 prediv;
+ u16 fbdiv;
+ unsigned long rate;
+ } pll;
+};
+
+enum {
+ REGISTER_PART_ANALOG,
+ REGISTER_PART_DIGITAL,
+ REGISTER_PART_CLOCK_LANE,
+ REGISTER_PART_DATA0_LANE,
+ REGISTER_PART_DATA1_LANE,
+ REGISTER_PART_DATA2_LANE,
+ REGISTER_PART_DATA3_LANE,
+ REGISTER_PART_LVDS,
+};
+
+static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_dsidphy, pll.hw);
+}
+
+static void phy_update_bits(struct inno_dsidphy *inno,
+ u8 first, u8 second, u8 mask, u8 val)
+{
+ u32 reg = PHY_REG(first, second) << 2;
+ unsigned int tmp, orig;
+
+ orig = readl(inno->phy_base + reg);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ writel(tmp, inno->phy_base + reg);
+}
+
+static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ /* Global Operation Timing Parameters */
+ timing->clkmiss = 0;
+ timing->clkpost = 70000 + 52 * period;
+ timing->clkpre = 8 * period;
+ timing->clkprepare = 65000;
+ timing->clksettle = 95000;
+ timing->clktermen = 0;
+ timing->clktrail = 80000;
+ timing->clkzero = 260000;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120000;
+ timing->hsprepare = 65000 + 4 * period;
+ timing->hszero = 145000 + 6 * period;
+ timing->hssettle = 85000 + 6 * period;
+ timing->hsskip = 40000;
+ timing->hstrail = max(8 * period, 60000 + 4 * period);
+ timing->init = 100000000;
+ timing->lpx = 60000;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000000;
+}
+
+static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
+{
+ struct mipi_dphy_timing gotp;
+ const struct {
+ unsigned long rate;
+ u8 hs_prepare;
+ u8 clk_lane_hs_zero;
+ u8 data_lane_hs_zero;
+ u8 hs_trail;
+ } timings[] = {
+ { 110000000, 0x20, 0x16, 0x02, 0x22},
+ { 150000000, 0x06, 0x16, 0x03, 0x45},
+ { 200000000, 0x18, 0x17, 0x04, 0x0b},
+ { 250000000, 0x05, 0x17, 0x05, 0x16},
+ { 300000000, 0x51, 0x18, 0x06, 0x2c},
+ { 400000000, 0x64, 0x19, 0x07, 0x33},
+ { 500000000, 0x20, 0x1b, 0x07, 0x4e},
+ { 600000000, 0x6a, 0x1d, 0x08, 0x3a},
+ { 700000000, 0x3e, 0x1e, 0x08, 0x6a},
+ { 800000000, 0x21, 0x1f, 0x09, 0x29},
+ {1000000000, 0x09, 0x20, 0x09, 0x27},
+ };
+ u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 txbyteclkhs, txclkesc, esc_clk_div;
+ u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
+ u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
+ unsigned int i;
+
+ /* Select MIPI mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(inno->pll.prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
+ /* Enable PLL and LDO */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_ON | REG_PLLPD_POWER_ON);
+ /* Reset analog */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_NORMAL);
+ /* Reset digital */
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_NORMAL);
+
+ txbyteclkhs = inno->pll.rate / 8;
+ t_txbyteclkhs = div_u64(PSEC_PER_SEC, txbyteclkhs);
+
+ esc_clk_div = DIV_ROUND_UP(txbyteclkhs, 20000000);
+ txclkesc = txbyteclkhs / esc_clk_div;
+ t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
+
+ ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
+
+ memset(&gotp, 0, sizeof(gotp));
+ mipi_dphy_timing_get_default(&gotp, ui);
+
+ /*
+ * The value of counter for HS Ths-exit
+ * Ths-exit = Tpin_txbyteclkhs * value
+ */
+ hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-post
+ * Tclk-post = Tpin_txbyteclkhs * value
+ */
+ clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-pre
+ * Tclk-pre = Tpin_txbyteclkhs * value
+ */
+ clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+
+ /*
+ * The value of counter for HS Tlpx Time
+ * Tlpx = Tpin_txbyteclkhs * (2 + value)
+ */
+ lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ if (lpx >= 2)
+ lpx -= 2;
+
+ /*
+ * The value of counter for HS Tta-go
+ * Tta-go for turnaround
+ * Tta-go = Ttxclkesc * value
+ */
+ ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-sure
+ * Tta-sure for turnaround
+ * Tta-sure = Ttxclkesc * value
+ */
+ ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-wait
+ * Tta-wait for turnaround
+ * Tta-wait = Ttxclkesc * value
+ */
+ ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+
+ for (i = 0; i < ARRAY_SIZE(timings); i++)
+ if (inno->pll.rate <= timings[i].rate)
+ break;
+
+ if (i == ARRAY_SIZE(timings))
+ --i;
+
+ hs_prepare = timings[i].hs_prepare;
+ hs_trail = timings[i].hs_trail;
+ clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
+ data_lane_hs_zero = timings[i].data_lane_hs_zero;
+ wakeup = 0x3ff;
+
+ for (i = REGISTER_PART_CLOCK_LANE; i <= REGISTER_PART_DATA3_LANE; i++) {
+ if (i == REGISTER_PART_CLOCK_LANE)
+ hs_zero = clk_lane_hs_zero;
+ else
+ hs_zero = data_lane_hs_zero;
+
+ phy_update_bits(inno, i, 0x05, T_LPX_CNT_MASK,
+ T_LPX_CNT(lpx));
+ phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
+ T_HS_PREPARE_CNT(hs_prepare));
+ phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
+ T_HS_ZERO_CNT(hs_zero));
+ phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
+ T_HS_TRAIL_CNT(hs_trail));
+ phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
+ T_HS_EXIT_CNT(hs_exit));
+ phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
+ T_CLK_POST_CNT(clk_post));
+ phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
+ T_CLK_PRE_CNT(clk_pre));
+ phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
+ T_WAKEUP_CNT_HI(wakeup >> 8));
+ phy_update_bits(inno, i, 0x0d, T_WAKEUP_CNT_LO_MASK,
+ T_WAKEUP_CNT_LO(wakeup));
+ phy_update_bits(inno, i, 0x10, T_TA_GO_CNT_MASK,
+ T_TA_GO_CNT(ta_go));
+ phy_update_bits(inno, i, 0x11, T_TA_SURE_CNT_MASK,
+ T_TA_SURE_CNT(ta_sure));
+ phy_update_bits(inno, i, 0x12, T_TA_WAIT_CNT_MASK,
+ T_TA_WAIT_CNT(ta_wait));
+ }
+
+ /* Enable all lanes on analog part */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ LANE_EN_MASK, LANE_EN_CK | LANE_EN_3 | LANE_EN_2 |
+ LANE_EN_1 | LANE_EN_0);
+}
+
+static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
+{
+ u8 prediv = 2;
+ u16 fbdiv = 28;
+
+ /* Sample clock reverse direction */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
+ SAMPLE_CLOCK_DIRECTION_MASK,
+ SAMPLE_CLOCK_DIRECTION_REVERSE);
+
+ /* Select LVDS mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, LVDS_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x08, 0xff, 0xfc);
+ /* Enable PLL and Bandgap */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_ON | LVDS_BANDGAP_POWER_ON);
+
+ msleep(20);
+
+ /* Reset LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_ENABLE);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_DISABLE);
+ /* Enable LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_ENABLE);
+ /* Enable LVDS analog driver */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_LANE_EN_MASK, LVDS_CLK_LANE_EN |
+ LVDS_DATA_LANE0_EN | LVDS_DATA_LANE1_EN |
+ LVDS_DATA_LANE2_EN | LVDS_DATA_LANE3_EN);
+}
+
+static int inno_dsidphy_power_on(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ clk_prepare_enable(inno->pclk_phy);
+ pm_runtime_get_sync(inno->dev);
+
+ /* Bandgap power on */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_ON);
+ /* Enable power work */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_ENABLE);
+
+ switch (inno->mode) {
+ case PHY_MODE_MIPI_DPHY:
+ inno_dsidphy_mipi_mode_enable(inno);
+ break;
+ case PHY_MODE_LVDS:
+ inno_dsidphy_lvds_mode_enable(inno);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int inno_dsidphy_power_off(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00, LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_DOWN | REG_PLLPD_POWER_DOWN);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_DOWN);
+
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b, LVDS_LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
+
+ pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->pclk_phy);
+
+ return 0;
+}
+
+static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ switch (mode) {
+ case PHY_MODE_MIPI_DPHY:
+ case PHY_MODE_LVDS:
+ inno->mode = mode;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct phy_ops inno_dsidphy_ops = {
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
+ unsigned long prate,
+ unsigned long rate,
+ u8 *prediv, u16 *fbdiv)
+{
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ *prediv = best_prediv;
+ *fbdiv = best_fbdiv;
+ }
+
+ return best_freq;
+}
+
+static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
+ &prediv, &fbdiv);
+
+ return fout;
+}
+
+static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
+ &prediv, &fbdiv);
+
+ dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
+ parent_rate, fout, prediv, fbdiv);
+
+ inno->pll.prediv = prediv;
+ inno->pll.fbdiv = fbdiv;
+ inno->pll.rate = fout;
+
+ return 0;
+}
+
+static unsigned long
+inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+
+ /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
+ return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
+}
+
+static const struct clk_ops inno_dsidphy_pll_clk_ops = {
+ .round_rate = inno_dsidphy_pll_clk_round_rate,
+ .set_rate = inno_dsidphy_pll_clk_set_rate,
+ .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+};
+
+static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
+{
+ struct device *dev = inno->dev;
+ struct clk *clk;
+ const char *parent_name;
+ struct clk_init_data init;
+ int ret;
+
+ parent_name = __clk_get_name(inno->ref_clk);
+
+ init.name = "mipi_dphy_pll";
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &init.name);
+ if (ret < 0)
+ dev_dbg(dev, "phy should set clock-output-names property\n");
+
+ init.ops = &inno_dsidphy_pll_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ inno->pll.hw.init = &init;
+ clk = devm_clk_register(dev, &inno->pll.hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &inno->pll.hw);
+}
+
+static int inno_dsidphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct inno_dsidphy *inno;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int ret;
+
+ inno = devm_kzalloc(dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = dev;
+ platform_set_drvdata(pdev, inno);
+
+ inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!inno->phy_base)
+ return -ENOMEM;
+
+ inno->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(inno->ref_clk)) {
+ ret = PTR_ERR(inno->ref_clk);
+ dev_err(dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->pclk_phy = devm_clk_get(dev, "pclk");
+ if (IS_ERR(inno->pclk_phy)) {
+ ret = PTR_ERR(inno->pclk_phy);
+ dev_err(dev, "failed to get phy pclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(inno->rst)) {
+ ret = PTR_ERR(inno->rst);
+ dev_err(dev, "failed to get system reset control: %d\n", ret);
+ return ret;
+ }
+
+ phy = devm_phy_create(dev, NULL, &inno_dsidphy_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ dev_err(dev, "failed to create phy: %d\n", ret);
+ return ret;
+ }
+
+ phy_set_drvdata(phy, inno);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register phy provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = inno_dsidphy_pll_register(inno);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int inno_dsidphy_remove(struct platform_device *pdev)
+{
+ struct inno_dsidphy *inno = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(inno->dev);
+
+ return 0;
+}
+
+static const struct of_device_id inno_dsidphy_of_match[] = {
+ { .compatible = "rockchip,px30-dsi-dphy", },
+ { .compatible = "rockchip,rk3128-dsi-dphy", },
+ { .compatible = "rockchip,rk3368-dsi-dphy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
+
+static struct platform_driver inno_dsidphy_driver = {
+ .driver = {
+ .name = "inno-dsidphy",
+ .of_match_table = of_match_ptr(inno_dsidphy_of_match),
+ },
+ .probe = inno_dsidphy_probe,
+ .remove = inno_dsidphy_remove,
+};
+module_platform_driver(inno_dsidphy_driver);
+
+MODULE_AUTHOR("Wyon Bi <bivvy.bi@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilicon MIPI/LVDS/TTL Video Combo PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eae865ff312c..680cc0c8825c 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1423,6 +1423,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
};
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+ { .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 6f3afaf9398f..84c27394c181 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -857,9 +857,32 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
+static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+
+ if (status) {
+ value |= VBUS_OVERRIDE;
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ } else {
+ value &= ~VBUS_OVERRIDE;
+ }
+
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ return 0;
+}
+
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
+ .vbus_override = tegra186_xusb_padctl_vbus_override,
};
static const char * const tegra186_xusb_padctl_supply_names[] = {
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index 0c0df6897a3b..394913bb2f20 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -39,7 +39,10 @@
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(x) (0x0 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(x) (0x2 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP 0x014
@@ -47,6 +50,7 @@
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
+#define XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED 0x7
#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
@@ -61,9 +65,14 @@
#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(x) (0x080 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP (1 << 18)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN (1 << 22)
+
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL 0x1
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
@@ -222,6 +231,12 @@
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
+#define XUSB_PADCTL_USB2_VBUS_ID 0xc60
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON (1 << 14)
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
+
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
u32 hs_term_range_adj;
@@ -940,6 +955,34 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
priv = to_tegra210_xusb_padctl(padctl);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(
+ port->usb3_port_fake);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(
+ port->usb3_port_fake, index);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+ }
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
@@ -957,7 +1000,14 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
- value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ if (port->mode == USB_DR_MODE_UNKNOWN)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(index);
+ else if (port->mode == USB_DR_MODE_PERIPHERAL)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(index);
+ else if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ else if (port->mode == USB_DR_MODE_OTG)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(index);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -989,7 +1039,12 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
- value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ else
+ value |=
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL <<
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT;
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
@@ -1062,6 +1117,32 @@ static int tegra210_usb2_phy_power_off(struct phy *phy)
mutex_lock(&padctl->lock);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->usb3_port_fake,
+ XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+ }
+
if (WARN_ON(pad->enable == 0))
goto out;
@@ -1225,13 +1306,10 @@ static int tegra210_hsic_phy_power_on(struct phy *phy)
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- struct tegra210_xusb_padctl *priv;
unsigned int index = lane->index;
u32 value;
int err;
- priv = to_tegra210_xusb_padctl(padctl);
-
err = regulator_enable(pad->supply);
if (err)
return err;
@@ -1945,6 +2023,52 @@ static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
.map = tegra210_usb3_port_map,
};
+static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+
+ if (status) {
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ } else {
+ value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra210_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl;
+ struct tegra_xusb_lane *lane;
+ u32 value;
+
+ lane = phy_get_drvdata(phy);
+ padctl = lane->pad->padctl;
+
+ value = padctl_readl(padctl,
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(lane->index));
+
+ if ((value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP) ||
+ (value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN)) {
+ tegra210_xusb_padctl_vbus_override(padctl, false);
+ tegra210_xusb_padctl_vbus_override(padctl, true);
+ return 1;
+ }
+
+ return 0;
+}
+
static int
tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
{
@@ -2007,6 +2131,8 @@ static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
.remove = tegra210_xusb_padctl_remove,
.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
.hsic_set_idle = tegra210_hsic_set_idle,
+ .vbus_override = tegra210_xusb_padctl_vbus_override,
+ .utmi_port_reset = tegra210_utmi_port_reset,
};
static const char * const tegra210_xusb_padctl_supply_names[] = {
@@ -2036,6 +2162,7 @@ const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
.ops = &tegra210_xusb_padctl_ops,
.supply_names = tegra210_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_xusb_padctl_supply_names),
+ .need_fake_usb3_port = true,
};
EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 2ea8497af82a..f98ec3922c02 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -800,9 +800,62 @@ static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
}
}
+static int tegra_xusb_find_unused_usb3_port(struct tegra_xusb_padctl *padctl)
+{
+ struct device_node *np;
+ unsigned int i;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ np = tegra_xusb_find_port_node(padctl, "usb3", i);
+ if (!np || !of_device_is_available(np))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static bool tegra_xusb_port_is_companion(struct tegra_xusb_usb2_port *usb2)
+{
+ unsigned int i;
+ struct tegra_xusb_usb3_port *usb3;
+ struct tegra_xusb_padctl *padctl = usb2->base.padctl;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ usb3 = tegra_xusb_find_usb3_port(padctl, i);
+ if (usb3 && usb3->port == usb2->base.index)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_xusb_update_usb3_fake_port(struct tegra_xusb_usb2_port *usb2)
+{
+ int fake;
+
+ /* Disable usb3_port_fake usage by default and assign if needed */
+ usb2->usb3_port_fake = -1;
+
+ if ((usb2->mode == USB_DR_MODE_OTG ||
+ usb2->mode == USB_DR_MODE_PERIPHERAL) &&
+ !tegra_xusb_port_is_companion(usb2)) {
+ fake = tegra_xusb_find_unused_usb3_port(usb2->base.padctl);
+ if (fake < 0) {
+ dev_err(&usb2->base.dev, "no unused USB3 ports available\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&usb2->base.dev, "Found unused usb3 port: %d\n", fake);
+ usb2->usb3_port_fake = fake;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port;
+ struct tegra_xusb_usb2_port *usb2;
unsigned int i;
int err = 0;
@@ -832,6 +885,18 @@ static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
goto remove_ports;
}
+ if (padctl->soc->need_fake_usb3_port) {
+ for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
+ usb2 = tegra_xusb_find_usb2_port(padctl, i);
+ if (!usb2)
+ continue;
+
+ err = tegra_xusb_update_usb3_fake_port(usb2);
+ if (err < 0)
+ goto remove_ports;
+ }
+ }
+
list_for_each_entry(port, &padctl->ports, list) {
err = port->ops->enable(port);
if (err < 0)
@@ -862,7 +927,6 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
struct resource *res;
- unsigned int i;
int err;
/* for backwards compatibility with old device trees */
@@ -907,8 +971,9 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto remove;
}
- for (i = 0; i < padctl->soc->num_supplies; i++)
- padctl->supplies[i].supply = padctl->soc->supply_names[i];
+ regulator_bulk_set_supply_names(padctl->supplies,
+ padctl->soc->supply_names,
+ padctl->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
padctl->supplies);
@@ -1056,6 +1121,28 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val)
+{
+ if (padctl->soc->ops->vbus_override)
+ return padctl->soc->ops->vbus_override(padctl, val);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_set_vbus_override);
+
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_port_reset)
+ return padctl->soc->ops->utmi_port_reset(phy);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 093076ca27fd..da94fcce6307 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -291,6 +291,7 @@ struct tegra_xusb_usb2_port {
struct regulator *supply;
enum usb_dr_mode mode;
bool internal;
+ int usb3_port_fake;
};
static inline struct tegra_xusb_usb2_port *
@@ -372,6 +373,8 @@ struct tegra_xusb_padctl_ops {
unsigned int index, bool idle);
int (*usb3_set_lfps_detect)(struct tegra_xusb_padctl *padctl,
unsigned int index, bool enable);
+ int (*vbus_override)(struct tegra_xusb_padctl *padctl, bool set);
+ int (*utmi_port_reset)(struct phy *phy);
};
struct tegra_xusb_padctl_soc {
@@ -389,6 +392,7 @@ struct tegra_xusb_padctl_soc {
const char * const *supply_names;
unsigned int num_supplies;
+ bool need_fake_usb3_port;
};
struct tegra_xusb_padctl {
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index c3fa1840f8de..174888609779 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -90,8 +90,8 @@ config TWL4030_USB
config PHY_TI_GMII_SEL
tristate
- default y if TI_CPSW=y
- depends on TI_CPSW || COMPILE_TEST
+ default y if TI_CPSW=y || TI_CPSW_SWITCHDEV=y
+ depends on TI_CPSW || TI_CPSW_SWITCHDEV || COMPILE_TEST
select GENERIC_PHY
select REGMAP
default m
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index cbcce7cf0028..26f194779064 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -189,7 +189,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct usb_otg *otg;
const struct of_device_id *of_id;
- const struct usb_phy_data *phy_data;
int error;
of_id = of_match_device(of_match_ptr(dm816x_usb_phy_id_table),
@@ -220,8 +219,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
if (phy->usbphy_ctrl == 0x2c)
phy->instance = 1;
- phy_data = of_id->data;
-
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index a52c5bb35033..a28bd15297f5 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -69,11 +69,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
rgmii_id = 1;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b372419d61f2..3bfbf2ff6e2b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,15 +32,15 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_ARTPEC6
- bool "Axis ARTPEC-6 pin controller driver"
- depends on MACH_ARTPEC6
- select PINMUX
- select GENERIC_PINCONF
- help
- This is the driver for the Axis ARTPEC-6 pin controller. This driver
- supports pin function multiplexing as well as pin bias and drive
- strength configuration. Device tree integration instructions can be
- found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
+ bool "Axis ARTPEC-6 pin controller driver"
+ depends on MACH_ARTPEC6
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This is the driver for the Axis ARTPEC-6 pin controller. This driver
+ supports pin function multiplexing as well as pin bias and drive
+ strength configuration. Device tree integration instructions can be
+ found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
config PINCTRL_AS3722
tristate "Pinctrl and GPIO driver for ams AS3722 PMIC"
@@ -420,4 +420,22 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
+config PINCTRL_EQUILIBRIUM
+ tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+
+ help
+ Equilibrium pinctrl driver is a pinctrl & GPIO driver for Intel Lightning
+ Mountain network processor SoC that supports both the linux GPIO and pin
+ control frameworks. It provides interfaces to setup pinmux, assign desired
+ pin functions, configure GPIO attributes for LGM SoC pins. Pinmux and
+ pinconf settings are retrieved from device tree.
+
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index ac537fdbc998..879f312bfb75 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 5dfe7188a5f8..5a0c8e87aa7c 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -915,7 +915,6 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
int owl_pinctrl_probe(struct platform_device *pdev,
struct owl_pinctrl_soc_data *soc_data)
{
- struct resource *res;
struct owl_pinctrl *pctrl;
int ret, i;
@@ -923,8 +922,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index bc3b232a727a..f690fc5cd688 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1400,12 +1400,10 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = {
static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
{
struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
- struct resource *res;
struct pinctrl_dev *pctl;
/* So far We can assume there is only 1 bank of registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
return -ENODEV;
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index dcab2204c60c..4344c5732400 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -940,7 +940,6 @@ static int cygnus_mux_log_init(struct cygnus_pinctrl *pinctrl)
static int cygnus_pinmux_probe(struct platform_device *pdev)
{
struct cygnus_pinctrl *pinctrl;
- struct resource *res;
int i, ret;
struct pinctrl_pin_desc *pins;
unsigned num_pins = ARRAY_SIZE(cygnus_pins);
@@ -953,15 +952,13 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base0);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pinctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pinctrl->base1)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base1);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 42f7ab383ad9..831a9318c384 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -795,8 +795,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
@@ -850,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
irqc = &chip->irqchip;
- irqc->name = "bcm-iproc-gpio";
+ irqc->name = dev_name(dev);
irqc->irq_ack = iproc_gpio_irq_ack;
irqc->irq_mask = iproc_gpio_irq_mask;
irqc->irq_unmask = iproc_gpio_irq_unmask;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 9fabc451550e..32f268f173d1 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1042,8 +1042,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -1057,8 +1056,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->pinconf_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->pinconf_base))
return PTR_ERR(pinctrl->pinconf_base);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index e67ae52023ad..bed0124388c0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -64,17 +64,16 @@
* @gc: GPIO chip
* @pctl: pointer to pinctrl_dev
* @pctldesc: pinctrl descriptor
- * @irq_domain: pointer to irq domain
* @lock: lock to protect access to I/O registers
*/
struct nsp_gpio {
struct device *dev;
void __iomem *base;
void __iomem *io_ctrl;
+ struct irq_chip irqchip;
struct gpio_chip gc;
struct pinctrl_dev *pctl;
struct pinctrl_desc pctldesc;
- struct irq_domain *irq_domain;
raw_spinlock_t lock;
};
@@ -136,8 +135,8 @@ static inline bool nsp_get_bit(struct nsp_gpio *chip, enum base_type address,
static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
{
- struct nsp_gpio *chip = (struct nsp_gpio *)data;
- struct gpio_chip gc = chip->gc;
+ struct gpio_chip *gc = (struct gpio_chip *)data;
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
int bit;
unsigned long int_bits = 0;
u32 int_status;
@@ -155,14 +154,14 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
level &= readl(chip->base + NSP_GPIO_INT_MASK);
int_bits = level | event;
- for_each_set_bit(bit, &int_bits, gc.ngpio) {
+ for_each_set_bit(bit, &int_bits, gc->ngpio) {
/*
* Clear the interrupt before invoking the
* handler, so we do not leave any window
*/
writel(BIT(bit), chip->base + NSP_GPIO_EVENT);
generic_handle_irq(
- irq_linear_revmap(chip->irq_domain, bit));
+ irq_linear_revmap(gc->irq.domain, bit));
}
}
@@ -171,7 +170,8 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
static void nsp_gpio_irq_ack(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 val = BIT(gpio);
u32 trigger_type;
@@ -189,7 +189,8 @@ static void nsp_gpio_irq_ack(struct irq_data *d)
*/
static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 trigger_type;
@@ -202,7 +203,8 @@ static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
static void nsp_gpio_irq_mask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -212,7 +214,8 @@ static void nsp_gpio_irq_mask(struct irq_data *d)
static void nsp_gpio_irq_unmask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -222,7 +225,8 @@ static void nsp_gpio_irq_unmask(struct irq_data *d)
static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
bool level_low;
bool falling;
@@ -265,16 +269,6 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip nsp_gpio_irq_chip = {
- .name = "gpio-a",
- .irq_enable = nsp_gpio_irq_unmask,
- .irq_disable = nsp_gpio_irq_mask,
- .irq_ack = nsp_gpio_irq_ack,
- .irq_mask = nsp_gpio_irq_mask,
- .irq_unmask = nsp_gpio_irq_unmask,
- .irq_set_type = nsp_gpio_irq_set_type,
-};
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -303,30 +297,36 @@ static int nsp_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
return 0;
}
-static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
+ int val;
raw_spin_lock_irqsave(&chip->lock, flags);
- nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ val = nsp_get_bit(chip, REG, NSP_GPIO_OUT_EN, gpio);
raw_spin_unlock_irqrestore(&chip->lock, flags);
- dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+ return !val;
}
-static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
+static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
+ unsigned long flags;
- return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
+ raw_spin_lock_irqsave(&chip->lock, flags);
+ nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
}
-static int nsp_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
- return irq_linear_revmap(chip->irq_domain, offset);
+ return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
}
static int nsp_get_groups_count(struct pinctrl_dev *pctldev)
@@ -613,10 +613,9 @@ static const struct of_device_id nsp_gpio_of_match[] = {
static int nsp_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct nsp_gpio *chip;
struct gpio_chip *gc;
- u32 val, count;
+ u32 val;
int irq, ret;
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &val)) {
@@ -631,15 +630,13 @@ static int nsp_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- chip->io_ctrl = devm_ioremap_resource(dev, res);
+ chip->io_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->io_ctrl)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->io_ctrl);
@@ -657,46 +654,47 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
+ gc->get_direction = nsp_gpio_get_direction;
gc->set = nsp_gpio_set;
gc->get = nsp_gpio_get;
- gc->to_irq = nsp_gpio_to_irq;
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- /* Create irq domain so that each pin can be assigned an IRQ.*/
- chip->irq_domain = irq_domain_add_linear(gc->of_node, gc->ngpio,
- &irq_domain_simple_ops,
- chip);
- if (!chip->irq_domain) {
- dev_err(&pdev->dev, "Couldn't allocate IRQ domain\n");
- return -ENXIO;
- }
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
- /* Map each gpio to an IRQ and set the handler for gpiolib. */
- for (count = 0; count < gc->ngpio; count++) {
- int irq = irq_create_mapping(chip->irq_domain, count);
+ irqc = &chip->irqchip;
+ irqc->name = "gpio-a";
+ irqc->irq_ack = nsp_gpio_irq_ack;
+ irqc->irq_mask = nsp_gpio_irq_mask;
+ irqc->irq_unmask = nsp_gpio_irq_unmask;
+ irqc->irq_set_type = nsp_gpio_irq_set_type;
- irq_set_chip_and_handler(irq, &nsp_gpio_irq_chip,
- handle_simple_irq);
- irq_set_chip_data(irq, chip);
- }
+ val = readl(chip->base + NSP_CHIP_A_INT_MASK);
+ val = val | NSP_CHIP_A_GPIO_INT_BIT;
+ writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
/* Install ISR for this GPIO controller. */
- ret = devm_request_irq(&pdev->dev, irq, nsp_gpio_irq_handler,
- IRQF_SHARED, "gpio-a", chip);
+ ret = devm_request_irq(dev, irq, nsp_gpio_irq_handler,
+ IRQF_SHARED, "gpio-a", &chip->gc);
if (ret) {
dev_err(&pdev->dev, "Unable to request IRQ%d: %d\n",
irq, ret);
- goto err_rm_gpiochip;
+ return ret;
}
- val = readl(chip->base + NSP_CHIP_A_INT_MASK);
- val = val | NSP_CHIP_A_GPIO_INT_BIT;
- writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
+ girq = &chip->gc.irq;
+ girq->chip = irqc;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
}
- ret = gpiochip_add_data(gc, chip);
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret < 0) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -705,15 +703,10 @@ static int nsp_gpio_probe(struct platform_device *pdev)
ret = nsp_gpio_register_pinconf(chip);
if (ret) {
dev_err(dev, "unable to register pinconf\n");
- goto err_rm_gpiochip;
+ return ret;
}
return 0;
-
-err_rm_gpiochip:
- gpiochip_remove(gc);
-
- return ret;
}
static struct platform_driver nsp_gpio_driver = {
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 87618a4e90e4..3756fc9d5826 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -571,8 +571,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -586,8 +585,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->base2 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base2 = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->base2))
return PTR_ERR(pinctrl->base2);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 5d6d8b1e9062..674920daac26 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -29,6 +29,13 @@ struct pinctrl_dt_map {
static void dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ int i;
+
+ for (i = 0; i < num_maps; ++i) {
+ kfree_const(map[i].dev_name);
+ map[i].dev_name = NULL;
+ }
+
if (pctldev) {
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
if (ops->dt_free_map)
@@ -63,7 +70,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Initialize common mapping table entry fields */
for (i = 0; i < num_maps; i++) {
- map[i].dev_name = dev_name(p->dev);
+ const char *devname;
+
+ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
+ if (!devname)
+ goto err_free_map;
+
+ map[i].dev_name = devname;
map[i].name = statename;
if (pctldev)
map[i].ctrl_dev_name = dev_name(pctldev->dev);
@@ -71,10 +84,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Remember the converted mapping table entries */
dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
- if (!dt_map) {
- dt_free_map(pctldev, map, num_maps);
- return -ENOMEM;
- }
+ if (!dt_map)
+ goto err_free_map;
dt_map->pctldev = pctldev;
dt_map->map = map;
@@ -82,6 +93,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
list_add_tail(&dt_map->node, &p->dt_maps);
return pinctrl_register_map(map, num_maps, false);
+
+err_free_map:
+ dt_free_map(pctldev, map, num_maps);
+ return -ENOMEM;
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
@@ -147,6 +162,16 @@ static int dt_to_map_one_config(struct pinctrl *p,
ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps);
if (ret < 0)
return ret;
+ else if (num_maps == 0) {
+ /*
+ * If we have no valid maps (maybe caused by empty pinctrl node
+ * or typing error) ther is no need remember this, so just
+ * return.
+ */
+ dev_info(p->dev,
+ "there is not valid maps for state %s\n", statename);
+ return 0;
+ }
/* Stash the mapping table chunk away for later use */
return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
@@ -166,21 +191,6 @@ static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
return dt_remember_or_free_map(p, statename, NULL, map, 1);
}
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- struct device_node *np;
- struct property *prop;
- int size;
-
- np = pctldev->dev->of_node;
- if (!np)
- return false;
-
- prop = of_find_property(np, "pinctrl-0", &size);
-
- return prop ? true : false;
-}
-
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
{
struct device_node *np = p->dev->of_node;
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index 00e645d7fac7..efa80779de4f 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -9,8 +9,6 @@ struct of_phandle_args;
#ifdef CONFIG_OF
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev);
-
void pinctrl_dt_free_maps(struct pinctrl *p);
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
@@ -23,11 +21,6 @@ int pinctrl_parse_index_with_args(const struct device_node *np,
#else
-static inline bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- return false;
-}
-
static inline int pinctrl_dt_to_map(struct pinctrl *p,
struct pinctrl_dev *pctldev)
{
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 5f4058033ec6..3ea9ce3e0cd9 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -39,12 +39,12 @@ config PINCTRL_IMX27
config PINCTRL_IMX25
- bool "IMX25 pinctrl driver"
- depends on OF
- depends on SOC_IMX25
- select PINCTRL_IMX
- help
- Say Y here to enable the imx25 pinctrl driver
+ bool "IMX25 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX25
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx25 pinctrl driver
config PINCTRL_IMX35
bool "IMX35 pinctrl driver"
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 452a14f78707..6091947a8f51 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -115,4 +115,11 @@ config PINCTRL_SUNRISEPOINT
provides an interface that allows configuring of PCH pins and
using them as GPIOs.
+config PINCTRL_TIGERLAKE
+ tristate "Intel Tiger Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Tiger Lake PCH pins and using them as GPIOs.
endif
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index cb491e655749..7e620b471ef6 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
+obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2c419fa5d1c1..582fa8a75559 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -165,7 +165,7 @@ struct chv_pinctrl {
struct gpio_chip chip;
struct irq_chip irqchip;
void __iomem *regs;
- unsigned intr_lines[16];
+ unsigned int intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
struct chv_pin_context *saved_pin_context;
@@ -379,7 +379,7 @@ static const struct chv_community southwest_community = {
.gpio_ranges = southwest_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
/*
- * Southwest community can benerate GPIO interrupts only for the
+ * Southwest community can generate GPIO interrupts only for the
* first 8 interrupts. The upper half (8-15) can only be used to
* trigger GPEs.
*/
@@ -1480,7 +1480,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
pending = readl(pctrl->regs + CHV_INTSTAT);
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
- unsigned irq, offset;
+ unsigned int irq, offset;
offset = pctrl->intr_lines[intr_line];
irq = irq_find_mapping(gc->irq.domain, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 83981ad66a71..4860bc9a4e48 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1131,7 +1131,7 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
pending &= enabled;
for_each_set_bit(gpp_offset, &pending, padgrp->size) {
- unsigned irq;
+ unsigned int irq;
irq = irq_find_mapping(gc->irq.domain,
padgrp->gpio_base + gpp_offset);
@@ -1181,7 +1181,7 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
-static unsigned intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
+static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
unsigned int ngpio = 0;
@@ -1595,16 +1595,65 @@ intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size)
return requested;
}
-static u32
-intel_gpio_update_pad_mode(void __iomem *hostown, u32 mask, u32 value)
+static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
- curr = readl(hostown);
+ curr = readl(reg);
+
updated = (curr & ~mask) | (value & mask);
- writel(updated, hostown);
+ if (curr == updated)
+ return false;
+
+ writel(updated, reg);
+ return true;
+}
+
+static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ const struct intel_community *community = &pctrl->communities[c];
+ const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ struct device *dev = pctrl->dev;
+ u32 requested;
+
+ if (padgrp->gpio_base < 0)
+ return;
- return curr;
+ requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
+ if (!intel_gpio_update_reg(base + gpp * 4, requested, saved))
+ return;
+
+ dev_dbg(dev, "restored hostown %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_intmask(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ struct device *dev = pctrl->dev;
+
+ if (!intel_gpio_update_reg(base + gpp * 4, ~0U, saved))
+ return;
+
+ dev_dbg(dev, "restored mask %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
+ unsigned int reg, u32 saved)
+{
+ u32 mask = (reg == PADCFG0) ? PADCFG0_GPIORXSTATE : 0;
+ unsigned int n = reg / sizeof(u32);
+ struct device *dev = pctrl->dev;
+ void __iomem *padcfg;
+
+ padcfg = intel_get_padcfg(pctrl, pin, reg);
+ if (!padcfg)
+ return;
+
+ if (!intel_gpio_update_reg(padcfg, ~mask, saved))
+ return;
+
+ dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
}
int intel_pinctrl_resume_noirq(struct device *dev)
@@ -1620,37 +1669,13 @@ int intel_pinctrl_resume_noirq(struct device *dev)
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- void __iomem *padcfg;
- u32 val;
if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
- val = readl(padcfg) & ~PADCFG0_GPIORXSTATE;
- if (val != pads[i].padcfg0) {
- writel(pads[i].padcfg0, padcfg);
- dev_dbg(dev, "restored pin %u padcfg0 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1);
- val = readl(padcfg);
- if (val != pads[i].padcfg1) {
- writel(pads[i].padcfg1, padcfg);
- dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
- if (padcfg) {
- val = readl(padcfg);
- if (val != pads[i].padcfg2) {
- writel(pads[i].padcfg2, padcfg);
- dev_dbg(dev, "restored pin %u padcfg2 %#08x\n",
- desc->number, readl(padcfg));
- }
- }
+ intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG1, pads[i].padcfg1);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG2, pads[i].padcfg2);
}
communities = pctrl->context.communities;
@@ -1660,30 +1685,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
unsigned int gpp;
base = community->regs + community->ie_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- writel(communities[i].intmask[gpp], base + gpp * 4);
- dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
- readl(base + gpp * 4));
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_intmask(pctrl, i, base, gpp, communities[i].intmask[gpp]);
base = community->regs + community->hostown_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- const struct intel_padgroup *padgrp = &community->gpps[gpp];
- u32 requested = 0, value = 0;
- u32 saved = communities[i].hostown[gpp];
-
- if (padgrp->gpio_base < 0)
- continue;
-
- requested = intel_gpio_is_requested(&pctrl->chip,
- padgrp->gpio_base, padgrp->size);
- value = intel_gpio_update_pad_mode(base + gpp * 4,
- requested, saved);
- if ((value ^ saved) & requested) {
- dev_warn(dev, "restore hostown %d/%u %#8x->%#8x\n",
- i, gpp, value, saved);
- }
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_hostown(pctrl, i, base, gpp, communities[i].hostown[gpp]);
}
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 2e06fb1464ab..7fdf4257df1e 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -33,6 +33,7 @@
.npins = ((e) - (s) + 1), \
}
+/* Lewisburg */
static const struct pinctrl_pin_desc lbg_pins[] = {
/* GPP_A */
PINCTRL_PIN(0, "RCINB"),
@@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(33, "SRCCLKREQB_4"),
PINCTRL_PIN(34, "SRCCLKREQB_5"),
PINCTRL_PIN(35, "GPP_B_11"),
- PINCTRL_PIN(36, "GLB_RST_WARN_N"),
+ PINCTRL_PIN(36, "SLP_S0B"),
PINCTRL_PIN(37, "PLTRSTB"),
PINCTRL_PIN(38, "SPKR"),
PINCTRL_PIN(39, "GPP_B_15"),
@@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(141, "GBE_PCI_DIS"),
PINCTRL_PIN(142, "GBE_LAN_DIS"),
PINCTRL_PIN(143, "GPP_I_10"),
- PINCTRL_PIN(144, "GPIO_RCOMP_3P3"),
/* GPP_J */
- PINCTRL_PIN(145, "GBE_LED_0_0"),
- PINCTRL_PIN(146, "GBE_LED_0_1"),
- PINCTRL_PIN(147, "GBE_LED_1_0"),
- PINCTRL_PIN(148, "GBE_LED_1_1"),
- PINCTRL_PIN(149, "GBE_LED_2_0"),
- PINCTRL_PIN(150, "GBE_LED_2_1"),
- PINCTRL_PIN(151, "GBE_LED_3_0"),
- PINCTRL_PIN(152, "GBE_LED_3_1"),
- PINCTRL_PIN(153, "GBE_SCL_0"),
- PINCTRL_PIN(154, "GBE_SDA_0"),
- PINCTRL_PIN(155, "GBE_SCL_1"),
- PINCTRL_PIN(156, "GBE_SDA_1"),
- PINCTRL_PIN(157, "GBE_SCL_2"),
- PINCTRL_PIN(158, "GBE_SDA_2"),
- PINCTRL_PIN(159, "GBE_SCL_3"),
- PINCTRL_PIN(160, "GBE_SDA_3"),
- PINCTRL_PIN(161, "GBE_SDP_0_0"),
- PINCTRL_PIN(162, "GBE_SDP_0_1"),
- PINCTRL_PIN(163, "GBE_SDP_1_0"),
- PINCTRL_PIN(164, "GBE_SDP_1_1"),
- PINCTRL_PIN(165, "GBE_SDP_2_0"),
- PINCTRL_PIN(166, "GBE_SDP_2_1"),
- PINCTRL_PIN(167, "GBE_SDP_3_0"),
- PINCTRL_PIN(168, "GBE_SDP_3_1"),
+ PINCTRL_PIN(144, "GBE_LED_0_0"),
+ PINCTRL_PIN(145, "GBE_LED_0_1"),
+ PINCTRL_PIN(146, "GBE_LED_1_0"),
+ PINCTRL_PIN(147, "GBE_LED_1_1"),
+ PINCTRL_PIN(148, "GBE_LED_2_0"),
+ PINCTRL_PIN(149, "GBE_LED_2_1"),
+ PINCTRL_PIN(150, "GBE_LED_3_0"),
+ PINCTRL_PIN(151, "GBE_LED_3_1"),
+ PINCTRL_PIN(152, "GBE_SCL_0"),
+ PINCTRL_PIN(153, "GBE_SDA_0"),
+ PINCTRL_PIN(154, "GBE_SCL_1"),
+ PINCTRL_PIN(155, "GBE_SDA_1"),
+ PINCTRL_PIN(156, "GBE_SCL_2"),
+ PINCTRL_PIN(157, "GBE_SDA_2"),
+ PINCTRL_PIN(158, "GBE_SCL_3"),
+ PINCTRL_PIN(159, "GBE_SDA_3"),
+ PINCTRL_PIN(160, "GBE_SDP_0_0"),
+ PINCTRL_PIN(161, "GBE_SDP_0_1"),
+ PINCTRL_PIN(162, "GBE_SDP_1_0"),
+ PINCTRL_PIN(163, "GBE_SDP_1_1"),
+ PINCTRL_PIN(164, "GBE_SDP_2_0"),
+ PINCTRL_PIN(165, "GBE_SDP_2_1"),
+ PINCTRL_PIN(166, "GBE_SDP_3_0"),
+ PINCTRL_PIN(167, "GBE_SDP_3_1"),
/* GPP_K */
- PINCTRL_PIN(169, "GBE_RMIICLK"),
- PINCTRL_PIN(170, "GBE_RMII_TXD_0"),
- PINCTRL_PIN(171, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(168, "GBE_RMIICLK"),
+ PINCTRL_PIN(169, "GBE_RMII_RXD_0"),
+ PINCTRL_PIN(170, "GBE_RMII_RXD_1"),
+ PINCTRL_PIN(171, "GBE_RMII_CRS_DV"),
PINCTRL_PIN(172, "GBE_RMII_TX_EN"),
- PINCTRL_PIN(173, "GBE_RMII_CRS_DV"),
- PINCTRL_PIN(174, "GBE_RMII_RXD_0"),
- PINCTRL_PIN(175, "GBE_RMII_RXD_1"),
- PINCTRL_PIN(176, "GBE_RMII_RX_ER"),
- PINCTRL_PIN(177, "GBE_RMII_ARBIN"),
- PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"),
- PINCTRL_PIN(179, "PE_RST_N"),
- PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"),
+ PINCTRL_PIN(173, "GBE_RMII_TXD_0"),
+ PINCTRL_PIN(174, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(175, "GBE_RMII_RX_ER"),
+ PINCTRL_PIN(176, "GBE_RMII_ARBIN"),
+ PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"),
+ PINCTRL_PIN(178, "PE_RST_N"),
/* GPP_G */
- PINCTRL_PIN(181, "FAN_TACH_0"),
- PINCTRL_PIN(182, "FAN_TACH_1"),
- PINCTRL_PIN(183, "FAN_TACH_2"),
- PINCTRL_PIN(184, "FAN_TACH_3"),
- PINCTRL_PIN(185, "FAN_TACH_4"),
- PINCTRL_PIN(186, "FAN_TACH_5"),
- PINCTRL_PIN(187, "FAN_TACH_6"),
- PINCTRL_PIN(188, "FAN_TACH_7"),
- PINCTRL_PIN(189, "FAN_PWM_0"),
- PINCTRL_PIN(190, "FAN_PWM_1"),
- PINCTRL_PIN(191, "FAN_PWM_2"),
- PINCTRL_PIN(192, "FAN_PWM_3"),
- PINCTRL_PIN(193, "GSXDOUT"),
- PINCTRL_PIN(194, "GSXSLOAD"),
- PINCTRL_PIN(195, "GSXDIN"),
- PINCTRL_PIN(196, "GSXSRESETB"),
- PINCTRL_PIN(197, "GSXCLK"),
- PINCTRL_PIN(198, "ADR_COMPLETE"),
- PINCTRL_PIN(199, "NMIB"),
- PINCTRL_PIN(200, "SMIB"),
- PINCTRL_PIN(201, "SSATA_DEVSLP_0"),
- PINCTRL_PIN(202, "SSATA_DEVSLP_1"),
- PINCTRL_PIN(203, "SSATA_DEVSLP_2"),
- PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"),
+ PINCTRL_PIN(179, "FAN_TACH_0"),
+ PINCTRL_PIN(180, "FAN_TACH_1"),
+ PINCTRL_PIN(181, "FAN_TACH_2"),
+ PINCTRL_PIN(182, "FAN_TACH_3"),
+ PINCTRL_PIN(183, "FAN_TACH_4"),
+ PINCTRL_PIN(184, "FAN_TACH_5"),
+ PINCTRL_PIN(185, "FAN_TACH_6"),
+ PINCTRL_PIN(186, "FAN_TACH_7"),
+ PINCTRL_PIN(187, "FAN_PWM_0"),
+ PINCTRL_PIN(188, "FAN_PWM_1"),
+ PINCTRL_PIN(189, "FAN_PWM_2"),
+ PINCTRL_PIN(190, "FAN_PWM_3"),
+ PINCTRL_PIN(191, "GSXDOUT"),
+ PINCTRL_PIN(192, "GSXSLOAD"),
+ PINCTRL_PIN(193, "GSXDIN"),
+ PINCTRL_PIN(194, "GSXSRESETB"),
+ PINCTRL_PIN(195, "GSXCLK"),
+ PINCTRL_PIN(196, "ADR_COMPLETE"),
+ PINCTRL_PIN(197, "NMIB"),
+ PINCTRL_PIN(198, "SMIB"),
+ PINCTRL_PIN(199, "SSATA_DEVSLP_0"),
+ PINCTRL_PIN(200, "SSATA_DEVSLP_1"),
+ PINCTRL_PIN(201, "SSATA_DEVSLP_2"),
+ PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"),
/* GPP_H */
- PINCTRL_PIN(205, "SRCCLKREQB_6"),
- PINCTRL_PIN(206, "SRCCLKREQB_7"),
- PINCTRL_PIN(207, "SRCCLKREQB_8"),
- PINCTRL_PIN(208, "SRCCLKREQB_9"),
- PINCTRL_PIN(209, "SRCCLKREQB_10"),
- PINCTRL_PIN(210, "SRCCLKREQB_11"),
- PINCTRL_PIN(211, "SRCCLKREQB_12"),
- PINCTRL_PIN(212, "SRCCLKREQB_13"),
- PINCTRL_PIN(213, "SRCCLKREQB_14"),
- PINCTRL_PIN(214, "SRCCLKREQB_15"),
- PINCTRL_PIN(215, "SML2CLK"),
- PINCTRL_PIN(216, "SML2DATA"),
- PINCTRL_PIN(217, "SML2ALERTB"),
- PINCTRL_PIN(218, "SML3CLK"),
- PINCTRL_PIN(219, "SML3DATA"),
- PINCTRL_PIN(220, "SML3ALERTB"),
- PINCTRL_PIN(221, "SML4CLK"),
- PINCTRL_PIN(222, "SML4DATA"),
- PINCTRL_PIN(223, "SML4ALERTB"),
- PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"),
- PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"),
- PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"),
- PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"),
- PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"),
+ PINCTRL_PIN(203, "SRCCLKREQB_6"),
+ PINCTRL_PIN(204, "SRCCLKREQB_7"),
+ PINCTRL_PIN(205, "SRCCLKREQB_8"),
+ PINCTRL_PIN(206, "SRCCLKREQB_9"),
+ PINCTRL_PIN(207, "SRCCLKREQB_10"),
+ PINCTRL_PIN(208, "SRCCLKREQB_11"),
+ PINCTRL_PIN(209, "SRCCLKREQB_12"),
+ PINCTRL_PIN(210, "SRCCLKREQB_13"),
+ PINCTRL_PIN(211, "SRCCLKREQB_14"),
+ PINCTRL_PIN(212, "SRCCLKREQB_15"),
+ PINCTRL_PIN(213, "SML2CLK"),
+ PINCTRL_PIN(214, "SML2DATA"),
+ PINCTRL_PIN(215, "SML2ALERTB"),
+ PINCTRL_PIN(216, "SML3CLK"),
+ PINCTRL_PIN(217, "SML3DATA"),
+ PINCTRL_PIN(218, "SML3ALERTB"),
+ PINCTRL_PIN(219, "SML4CLK"),
+ PINCTRL_PIN(220, "SML4DATA"),
+ PINCTRL_PIN(221, "SML4ALERTB"),
+ PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"),
+ PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"),
+ PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"),
+ PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"),
+ PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"),
/* GPP_L */
+ PINCTRL_PIN(227, "GPP_L_0"),
+ PINCTRL_PIN(228, "EC_CSME_INTR_OUT"),
PINCTRL_PIN(229, "VISA2CH0_D0"),
PINCTRL_PIN(230, "VISA2CH0_D1"),
PINCTRL_PIN(231, "VISA2CH0_D2"),
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
new file mode 100644
index 000000000000..58572b15b3ce
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Tiger Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define TGL_PAD_OWN 0x020
+#define TGL_PADCFGLOCK 0x080
+#define TGL_HOSTSW_OWN 0x0b0
+#define TGL_GPI_IS 0x100
+#define TGL_GPI_IE 0x120
+
+#define TGL_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define TGL_COMMUNITY(s, e, g) \
+ { \
+ .padown_offset = TGL_PAD_OWN, \
+ .padcfglock_offset = TGL_PADCFGLOCK, \
+ .hostown_offset = TGL_HOSTSW_OWN, \
+ .is_offset = TGL_GPI_IS, \
+ .ie_offset = TGL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Tiger Lake-LP */
+static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+ /* GPP_B */
+ PINCTRL_PIN(0, "CORE_VID_0"),
+ PINCTRL_PIN(1, "CORE_VID_1"),
+ PINCTRL_PIN(2, "VRALERTB"),
+ PINCTRL_PIN(3, "CPU_GP_2"),
+ PINCTRL_PIN(4, "CPU_GP_3"),
+ PINCTRL_PIN(5, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(6, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(7, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(8, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(9, "I2C5_SDA"),
+ PINCTRL_PIN(10, "I2C5_SCL"),
+ PINCTRL_PIN(11, "PMCALERTB"),
+ PINCTRL_PIN(12, "SLP_S0B"),
+ PINCTRL_PIN(13, "PLTRSTB"),
+ PINCTRL_PIN(14, "SPKR"),
+ PINCTRL_PIN(15, "GSPI0_CS0B"),
+ PINCTRL_PIN(16, "GSPI0_CLK"),
+ PINCTRL_PIN(17, "GSPI0_MISO"),
+ PINCTRL_PIN(18, "GSPI0_MOSI"),
+ PINCTRL_PIN(19, "GSPI1_CS0B"),
+ PINCTRL_PIN(20, "GSPI1_CLK"),
+ PINCTRL_PIN(21, "GSPI1_MISO"),
+ PINCTRL_PIN(22, "GSPI1_MOSI"),
+ PINCTRL_PIN(23, "SML1ALERTB"),
+ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"),
+ /* GPP_T */
+ PINCTRL_PIN(26, "I2C6_SDA"),
+ PINCTRL_PIN(27, "I2C6_SCL"),
+ PINCTRL_PIN(28, "I2C7_SDA"),
+ PINCTRL_PIN(29, "I2C7_SCL"),
+ PINCTRL_PIN(30, "UART4_RXD"),
+ PINCTRL_PIN(31, "UART4_TXD"),
+ PINCTRL_PIN(32, "UART4_RTSB"),
+ PINCTRL_PIN(33, "UART4_CTSB"),
+ PINCTRL_PIN(34, "UART5_RXD"),
+ PINCTRL_PIN(35, "UART5_TXD"),
+ PINCTRL_PIN(36, "UART5_RTSB"),
+ PINCTRL_PIN(37, "UART5_CTSB"),
+ PINCTRL_PIN(38, "UART6_RXD"),
+ PINCTRL_PIN(39, "UART6_TXD"),
+ PINCTRL_PIN(40, "UART6_RTSB"),
+ PINCTRL_PIN(41, "UART6_CTSB"),
+ /* GPP_A */
+ PINCTRL_PIN(42, "ESPI_IO_0"),
+ PINCTRL_PIN(43, "ESPI_IO_1"),
+ PINCTRL_PIN(44, "ESPI_IO_2"),
+ PINCTRL_PIN(45, "ESPI_IO_3"),
+ PINCTRL_PIN(46, "ESPI_CSB"),
+ PINCTRL_PIN(47, "ESPI_CLK"),
+ PINCTRL_PIN(48, "ESPI_RESETB"),
+ PINCTRL_PIN(49, "I2S2_SCLK"),
+ PINCTRL_PIN(50, "I2S2_SFRM"),
+ PINCTRL_PIN(51, "I2S2_TXD"),
+ PINCTRL_PIN(52, "I2S2_RXD"),
+ PINCTRL_PIN(53, "PMC_I2C_SDA"),
+ PINCTRL_PIN(54, "SATAXPCIE_1"),
+ PINCTRL_PIN(55, "PMC_I2C_SCL"),
+ PINCTRL_PIN(56, "USB2_OCB_1"),
+ PINCTRL_PIN(57, "USB2_OCB_2"),
+ PINCTRL_PIN(58, "USB2_OCB_3"),
+ PINCTRL_PIN(59, "DDSP_HPD_C"),
+ PINCTRL_PIN(60, "DDSP_HPD_B"),
+ PINCTRL_PIN(61, "DDSP_HPD_1"),
+ PINCTRL_PIN(62, "DDSP_HPD_2"),
+ PINCTRL_PIN(63, "GPPC_A_21"),
+ PINCTRL_PIN(64, "GPPC_A_22"),
+ PINCTRL_PIN(65, "I2S1_SCLK"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25), /* GPP_B */
+ TGL_GPP(1, 26, 41), /* GPP_T */
+ TGL_GPP(2, 42, 66), /* GPP_A */
+};
+
+static const struct intel_community tgllp_community0[] = {
+ TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
+ .uid = "0",
+ .pins = tgllp_community0_pins,
+ .npins = ARRAY_SIZE(tgllp_community0_pins),
+ .communities = tgllp_community0,
+ .ncommunities = ARRAY_SIZE(tgllp_community0),
+};
+
+static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
+ /* GPP_S */
+ PINCTRL_PIN(0, "SNDW0_CLK"),
+ PINCTRL_PIN(1, "SNDW0_DATA"),
+ PINCTRL_PIN(2, "SNDW1_CLK"),
+ PINCTRL_PIN(3, "SNDW1_DATA"),
+ PINCTRL_PIN(4, "SNDW2_CLK"),
+ PINCTRL_PIN(5, "SNDW2_DATA"),
+ PINCTRL_PIN(6, "SNDW3_CLK"),
+ PINCTRL_PIN(7, "SNDW3_DATA"),
+ /* GPP_H */
+ PINCTRL_PIN(8, "GPPC_H_0"),
+ PINCTRL_PIN(9, "GPPC_H_1"),
+ PINCTRL_PIN(10, "GPPC_H_2"),
+ PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(12, "I2C2_SDA"),
+ PINCTRL_PIN(13, "I2C2_SCL"),
+ PINCTRL_PIN(14, "I2C3_SDA"),
+ PINCTRL_PIN(15, "I2C3_SCL"),
+ PINCTRL_PIN(16, "I2C4_SDA"),
+ PINCTRL_PIN(17, "I2C4_SCL"),
+ PINCTRL_PIN(18, "SRCCLKREQB_4"),
+ PINCTRL_PIN(19, "SRCCLKREQB_5"),
+ PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(24, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(25, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(26, "CPU_C10_GATEB"),
+ PINCTRL_PIN(27, "TIME_SYNC_0"),
+ PINCTRL_PIN(28, "IMGCLKOUT_1"),
+ PINCTRL_PIN(29, "IMGCLKOUT_2"),
+ PINCTRL_PIN(30, "IMGCLKOUT_3"),
+ PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ /* GPP_D */
+ PINCTRL_PIN(32, "ISH_GP_0"),
+ PINCTRL_PIN(33, "ISH_GP_1"),
+ PINCTRL_PIN(34, "ISH_GP_2"),
+ PINCTRL_PIN(35, "ISH_GP_3"),
+ PINCTRL_PIN(36, "IMGCLKOUT_0"),
+ PINCTRL_PIN(37, "SRCCLKREQB_0"),
+ PINCTRL_PIN(38, "SRCCLKREQB_1"),
+ PINCTRL_PIN(39, "SRCCLKREQB_2"),
+ PINCTRL_PIN(40, "SRCCLKREQB_3"),
+ PINCTRL_PIN(41, "ISH_SPI_CSB"),
+ PINCTRL_PIN(42, "ISH_SPI_CLK"),
+ PINCTRL_PIN(43, "ISH_SPI_MISO"),
+ PINCTRL_PIN(44, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(45, "ISH_UART0_RXD"),
+ PINCTRL_PIN(46, "ISH_UART0_TXD"),
+ PINCTRL_PIN(47, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(48, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(49, "ISH_GP_4"),
+ PINCTRL_PIN(50, "ISH_GP_5"),
+ PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ /* GPP_U */
+ PINCTRL_PIN(53, "UART3_RXD"),
+ PINCTRL_PIN(54, "UART3_TXD"),
+ PINCTRL_PIN(55, "UART3_RTSB"),
+ PINCTRL_PIN(56, "UART3_CTSB"),
+ PINCTRL_PIN(57, "GSPI3_CS0B"),
+ PINCTRL_PIN(58, "GSPI3_CLK"),
+ PINCTRL_PIN(59, "GSPI3_MISO"),
+ PINCTRL_PIN(60, "GSPI3_MOSI"),
+ PINCTRL_PIN(61, "GSPI4_CS0B"),
+ PINCTRL_PIN(62, "GSPI4_CLK"),
+ PINCTRL_PIN(63, "GSPI4_MISO"),
+ PINCTRL_PIN(64, "GSPI4_MOSI"),
+ PINCTRL_PIN(65, "GSPI5_CS0B"),
+ PINCTRL_PIN(66, "GSPI5_CLK"),
+ PINCTRL_PIN(67, "GSPI5_MISO"),
+ PINCTRL_PIN(68, "GSPI5_MOSI"),
+ PINCTRL_PIN(69, "GSPI6_CS0B"),
+ PINCTRL_PIN(70, "GSPI6_CLK"),
+ PINCTRL_PIN(71, "GSPI6_MISO"),
+ PINCTRL_PIN(72, "GSPI6_MOSI"),
+ PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(77, "CNV_BTEN"),
+ PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(88, "vUART0_TXD"),
+ PINCTRL_PIN(89, "vUART0_RXD"),
+ PINCTRL_PIN(90, "vUART0_CTS_B"),
+ PINCTRL_PIN(91, "vUART0_RTS_B"),
+ PINCTRL_PIN(92, "vISH_UART0_TXD"),
+ PINCTRL_PIN(93, "vISH_UART0_RXD"),
+ PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(100, "vI2S2_SCLK"),
+ PINCTRL_PIN(101, "vI2S2_SFRM"),
+ PINCTRL_PIN(102, "vI2S2_TXD"),
+ PINCTRL_PIN(103, "vI2S2_RXD"),
+};
+
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_S */
+ TGL_GPP(1, 8, 31), /* GPP_H */
+ TGL_GPP(2, 32, 52), /* GPP_D */
+ TGL_GPP(3, 53, 76), /* GPP_U */
+ TGL_GPP(4, 77, 103), /* vGPIO */
+};
+
+static const struct intel_community tgllp_community1[] = {
+ TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
+ .uid = "1",
+ .pins = tgllp_community1_pins,
+ .npins = ARRAY_SIZE(tgllp_community1_pins),
+ .communities = tgllp_community1,
+ .ncommunities = ARRAY_SIZE(tgllp_community1),
+};
+
+static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ /* GPP_C */
+ PINCTRL_PIN(0, "SMBCLK"),
+ PINCTRL_PIN(1, "SMBDATA"),
+ PINCTRL_PIN(2, "SMBALERTB"),
+ PINCTRL_PIN(3, "SML0CLK"),
+ PINCTRL_PIN(4, "SML0DATA"),
+ PINCTRL_PIN(5, "SML0ALERTB"),
+ PINCTRL_PIN(6, "SML1CLK"),
+ PINCTRL_PIN(7, "SML1DATA"),
+ PINCTRL_PIN(8, "UART0_RXD"),
+ PINCTRL_PIN(9, "UART0_TXD"),
+ PINCTRL_PIN(10, "UART0_RTSB"),
+ PINCTRL_PIN(11, "UART0_CTSB"),
+ PINCTRL_PIN(12, "UART1_RXD"),
+ PINCTRL_PIN(13, "UART1_TXD"),
+ PINCTRL_PIN(14, "UART1_RTSB"),
+ PINCTRL_PIN(15, "UART1_CTSB"),
+ PINCTRL_PIN(16, "I2C0_SDA"),
+ PINCTRL_PIN(17, "I2C0_SCL"),
+ PINCTRL_PIN(18, "I2C1_SDA"),
+ PINCTRL_PIN(19, "I2C1_SCL"),
+ PINCTRL_PIN(20, "UART2_RXD"),
+ PINCTRL_PIN(21, "UART2_TXD"),
+ PINCTRL_PIN(22, "UART2_RTSB"),
+ PINCTRL_PIN(23, "UART2_CTSB"),
+ /* GPP_F */
+ PINCTRL_PIN(24, "CNV_BRI_DT"),
+ PINCTRL_PIN(25, "CNV_BRI_RSP"),
+ PINCTRL_PIN(26, "CNV_RGI_DT"),
+ PINCTRL_PIN(27, "CNV_RGI_RSP"),
+ PINCTRL_PIN(28, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(29, "GPPC_F_5"),
+ PINCTRL_PIN(30, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(31, "GPPC_F_7"),
+ PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(33, "BOOTMPC"),
+ PINCTRL_PIN(34, "GPPC_F_10"),
+ PINCTRL_PIN(35, "GPPC_F_11"),
+ PINCTRL_PIN(36, "GSXDOUT"),
+ PINCTRL_PIN(37, "GSXSLOAD"),
+ PINCTRL_PIN(38, "GSXDIN"),
+ PINCTRL_PIN(39, "GSXSRESETB"),
+ PINCTRL_PIN(40, "GSXCLK"),
+ PINCTRL_PIN(41, "GMII_MDC"),
+ PINCTRL_PIN(42, "GMII_MDIO"),
+ PINCTRL_PIN(43, "SRCCLKREQB_6"),
+ PINCTRL_PIN(44, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(46, "VNN_CTRL"),
+ PINCTRL_PIN(47, "V1P05_CTRL"),
+ PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ /* HVCMOS */
+ PINCTRL_PIN(49, "L_BKLTEN"),
+ PINCTRL_PIN(50, "L_BKLTCTL"),
+ PINCTRL_PIN(51, "L_VDDEN"),
+ PINCTRL_PIN(52, "SYS_PWROK"),
+ PINCTRL_PIN(53, "SYS_RESETB"),
+ PINCTRL_PIN(54, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(55, "SATAXPCIE_0"),
+ PINCTRL_PIN(56, "SPI1_IO_2"),
+ PINCTRL_PIN(57, "SPI1_IO_3"),
+ PINCTRL_PIN(58, "CPU_GP_0"),
+ PINCTRL_PIN(59, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(60, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(61, "GPPC_E_6"),
+ PINCTRL_PIN(62, "CPU_GP_1"),
+ PINCTRL_PIN(63, "SPI1_CS1B"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "SPI1_CSB"),
+ PINCTRL_PIN(66, "SPI1_CLK"),
+ PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(69, "DDSP_HPD_A"),
+ PINCTRL_PIN(70, "ISH_GP_6"),
+ PINCTRL_PIN(71, "ISH_GP_7"),
+ PINCTRL_PIN(72, "GPPC_E_17"),
+ PINCTRL_PIN(73, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(74, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(75, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(76, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(77, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(78, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ /* JTAG */
+ PINCTRL_PIN(80, "JTAG_TDO"),
+ PINCTRL_PIN(81, "JTAGX"),
+ PINCTRL_PIN(82, "PRDYB"),
+ PINCTRL_PIN(83, "PREQB"),
+ PINCTRL_PIN(84, "CPU_TRSTB"),
+ PINCTRL_PIN(85, "JTAG_TDI"),
+ PINCTRL_PIN(86, "JTAG_TMS"),
+ PINCTRL_PIN(87, "JTAG_TCK"),
+ PINCTRL_PIN(88, "DBG_PMODE"),
+};
+
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 0, 23), /* GPP_C */
+ TGL_GPP(1, 24, 48), /* GPP_F */
+ TGL_GPP(2, 49, 54), /* HVCMOS */
+ TGL_GPP(3, 55, 79), /* GPP_E */
+ TGL_GPP(4, 80, 88), /* JTAG */
+};
+
+static const struct intel_community tgllp_community4[] = {
+ TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
+ .uid = "4",
+ .pins = tgllp_community4_pins,
+ .npins = ARRAY_SIZE(tgllp_community4_pins),
+ .communities = tgllp_community4,
+ .ncommunities = ARRAY_SIZE(tgllp_community4),
+};
+
+static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
+ /* GPP_R */
+ PINCTRL_PIN(0, "HDA_BCLK"),
+ PINCTRL_PIN(1, "HDA_SYNC"),
+ PINCTRL_PIN(2, "HDA_SDO"),
+ PINCTRL_PIN(3, "HDA_SDI_0"),
+ PINCTRL_PIN(4, "HDA_RSTB"),
+ PINCTRL_PIN(5, "HDA_SDI_1"),
+ PINCTRL_PIN(6, "GPP_R_6"),
+ PINCTRL_PIN(7, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(8, "SPI0_IO_2"),
+ PINCTRL_PIN(9, "SPI0_IO_3"),
+ PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(12, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(15, "SPI0_CLK"),
+ PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community5_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_R */
+ TGL_GPP(1, 8, 16), /* SPI */
+};
+
+static const struct intel_community tgllp_community5[] = {
+ TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
+ .uid = "5",
+ .pins = tgllp_community5_pins,
+ .npins = ARRAY_SIZE(tgllp_community5_pins),
+ .communities = tgllp_community5,
+ .ncommunities = ARRAY_SIZE(tgllp_community5),
+};
+
+static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
+ &tgllp_community0_soc_data,
+ &tgllp_community1_soc_data,
+ &tgllp_community4_soc_data,
+ &tgllp_community5_soc_data,
+ NULL
+};
+
+static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
+ { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
+
+static struct platform_driver tgl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_uid,
+ .driver = {
+ .name = "tigerlake-pinctrl",
+ .acpi_match_table = tgl_pinctrl_acpi_match,
+ .pm = &tgl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(tgl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Tiger Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 53f52b9a0acd..67f8444f7a0c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -982,7 +982,6 @@ static const struct mtk_eint_xt mtk_eint_xt = {
static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
if (!of_property_read_bool(np, "interrupt-controller"))
return -ENODEV;
@@ -991,8 +990,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
if (!pctl->eint)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->eint->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->eint->base))
return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index df55f617aa98..3cb119105ddb 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -54,4 +54,10 @@ config PINCTRL_MESON_G12A
select PINCTRL_MESON_AXG_PMX
default y
+config PINCTRL_MESON_A1
+ bool "Meson a1 Soc pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON_AXG_PMX
+ default y
+
endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index a69c565f2f13..1a5bffe953f9 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
obj-$(CONFIG_PINCTRL_MESON_AXG_PMX) += pinctrl-meson-axg-pmx.o
obj-$(CONFIG_PINCTRL_MESON_AXG) += pinctrl-meson-axg.o
obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
+obj-$(CONFIG_PINCTRL_MESON_A1) += pinctrl-meson-a1.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
new file mode 100644
index 000000000000..0bcec03f344a
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Pin controller and GPIO driver for Amlogic Meson A1 SoC.
+ *
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#include <dt-bindings/gpio/meson-a1-gpio.h>
+#include "pinctrl-meson.h"
+#include "pinctrl-meson-axg-pmx.h"
+
+static const struct pinctrl_pin_desc meson_a1_periphs_pins[] = {
+ MESON_PIN(GPIOP_0),
+ MESON_PIN(GPIOP_1),
+ MESON_PIN(GPIOP_2),
+ MESON_PIN(GPIOP_3),
+ MESON_PIN(GPIOP_4),
+ MESON_PIN(GPIOP_5),
+ MESON_PIN(GPIOP_6),
+ MESON_PIN(GPIOP_7),
+ MESON_PIN(GPIOP_8),
+ MESON_PIN(GPIOP_9),
+ MESON_PIN(GPIOP_10),
+ MESON_PIN(GPIOP_11),
+ MESON_PIN(GPIOP_12),
+ MESON_PIN(GPIOB_0),
+ MESON_PIN(GPIOB_1),
+ MESON_PIN(GPIOB_2),
+ MESON_PIN(GPIOB_3),
+ MESON_PIN(GPIOB_4),
+ MESON_PIN(GPIOB_5),
+ MESON_PIN(GPIOB_6),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOF_0),
+ MESON_PIN(GPIOF_1),
+ MESON_PIN(GPIOF_2),
+ MESON_PIN(GPIOF_3),
+ MESON_PIN(GPIOF_4),
+ MESON_PIN(GPIOF_5),
+ MESON_PIN(GPIOF_6),
+ MESON_PIN(GPIOF_7),
+ MESON_PIN(GPIOF_8),
+ MESON_PIN(GPIOF_9),
+ MESON_PIN(GPIOF_10),
+ MESON_PIN(GPIOF_11),
+ MESON_PIN(GPIOF_12),
+ MESON_PIN(GPIOA_0),
+ MESON_PIN(GPIOA_1),
+ MESON_PIN(GPIOA_2),
+ MESON_PIN(GPIOA_3),
+ MESON_PIN(GPIOA_4),
+ MESON_PIN(GPIOA_5),
+ MESON_PIN(GPIOA_6),
+ MESON_PIN(GPIOA_7),
+ MESON_PIN(GPIOA_8),
+ MESON_PIN(GPIOA_9),
+ MESON_PIN(GPIOA_10),
+ MESON_PIN(GPIOA_11),
+};
+
+/* psram */
+static const unsigned int psram_clkn_pins[] = { GPIOP_0 };
+static const unsigned int psram_clkp_pins[] = { GPIOP_1 };
+static const unsigned int psram_ce_n_pins[] = { GPIOP_2 };
+static const unsigned int psram_rst_n_pins[] = { GPIOP_3 };
+static const unsigned int psram_adq0_pins[] = { GPIOP_4 };
+static const unsigned int psram_adq1_pins[] = { GPIOP_5 };
+static const unsigned int psram_adq2_pins[] = { GPIOP_6 };
+static const unsigned int psram_adq3_pins[] = { GPIOP_7 };
+static const unsigned int psram_adq4_pins[] = { GPIOP_8 };
+static const unsigned int psram_adq5_pins[] = { GPIOP_9 };
+static const unsigned int psram_adq6_pins[] = { GPIOP_10 };
+static const unsigned int psram_adq7_pins[] = { GPIOP_11 };
+static const unsigned int psram_dqs_dm_pins[] = { GPIOP_12 };
+
+/* sdcard */
+static const unsigned int sdcard_d0_b_pins[] = { GPIOB_0 };
+static const unsigned int sdcard_d1_b_pins[] = { GPIOB_1 };
+static const unsigned int sdcard_d2_b_pins[] = { GPIOB_2 };
+static const unsigned int sdcard_d3_b_pins[] = { GPIOB_3 };
+static const unsigned int sdcard_clk_b_pins[] = { GPIOB_4 };
+static const unsigned int sdcard_cmd_b_pins[] = { GPIOB_5 };
+
+static const unsigned int sdcard_d0_x_pins[] = { GPIOX_0 };
+static const unsigned int sdcard_d1_x_pins[] = { GPIOX_1 };
+static const unsigned int sdcard_d2_x_pins[] = { GPIOX_2 };
+static const unsigned int sdcard_d3_x_pins[] = { GPIOX_3 };
+static const unsigned int sdcard_clk_x_pins[] = { GPIOX_4 };
+static const unsigned int sdcard_cmd_x_pins[] = { GPIOX_5 };
+
+/* spif */
+static const unsigned int spif_mo_pins[] = { GPIOB_0 };
+static const unsigned int spif_mi_pins[] = { GPIOB_1 };
+static const unsigned int spif_wp_n_pins[] = { GPIOB_2 };
+static const unsigned int spif_hold_n_pins[] = { GPIOB_3 };
+static const unsigned int spif_clk_pins[] = { GPIOB_4 };
+static const unsigned int spif_cs_pins[] = { GPIOB_5 };
+
+/* i2c0 */
+static const unsigned int i2c0_sck_f9_pins[] = { GPIOF_9 };
+static const unsigned int i2c0_sda_f10_pins[] = { GPIOF_10 };
+static const unsigned int i2c0_sck_f11_pins[] = { GPIOF_11 };
+static const unsigned int i2c0_sda_f12_pins[] = { GPIOF_12 };
+
+/* i2c1 */
+static const unsigned int i2c1_sda_x_pins[] = { GPIOX_9 };
+static const unsigned int i2c1_sck_x_pins[] = { GPIOX_10 };
+static const unsigned int i2c1_sda_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c1_sck_a_pins[] = { GPIOA_11 };
+
+/* i2c2 */
+static const unsigned int i2c2_sck_x0_pins[] = { GPIOX_0 };
+static const unsigned int i2c2_sda_x1_pins[] = { GPIOX_1 };
+static const unsigned int i2c2_sck_x15_pins[] = { GPIOX_15 };
+static const unsigned int i2c2_sda_x16_pins[] = { GPIOX_16 };
+static const unsigned int i2c2_sck_a4_pins[] = { GPIOA_4 };
+static const unsigned int i2c2_sda_a5_pins[] = { GPIOA_5 };
+static const unsigned int i2c2_sck_a8_pins[] = { GPIOA_8 };
+static const unsigned int i2c2_sda_a9_pins[] = { GPIOA_9 };
+
+/* i2c3 */
+static const unsigned int i2c3_sck_f_pins[] = { GPIOF_4 };
+static const unsigned int i2c3_sda_f_pins[] = { GPIOF_5 };
+static const unsigned int i2c3_sck_x_pins[] = { GPIOX_11 };
+static const unsigned int i2c3_sda_x_pins[] = { GPIOX_12 };
+
+/* i2c slave */
+static const unsigned int i2c_slave_sck_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c_slave_sda_a_pins[] = { GPIOA_11 };
+static const unsigned int i2c_slave_sck_f_pins[] = { GPIOF_11 };
+static const unsigned int i2c_slave_sda_f_pins[] = { GPIOF_12 };
+
+/* uart_a */
+static const unsigned int uart_a_tx_pins[] = { GPIOX_11 };
+static const unsigned int uart_a_rx_pins[] = { GPIOX_12 };
+static const unsigned int uart_a_cts_pins[] = { GPIOX_13 };
+static const unsigned int uart_a_rts_pins[] = { GPIOX_14 };
+
+/* uart_b */
+static const unsigned int uart_b_tx_x_pins[] = { GPIOX_7 };
+static const unsigned int uart_b_rx_x_pins[] = { GPIOX_8 };
+static const unsigned int uart_b_tx_f_pins[] = { GPIOF_0 };
+static const unsigned int uart_b_rx_f_pins[] = { GPIOF_1 };
+
+/* uart_c */
+static const unsigned int uart_c_tx_x0_pins[] = { GPIOX_0 };
+static const unsigned int uart_c_rx_x1_pins[] = { GPIOX_1 };
+static const unsigned int uart_c_cts_pins[] = { GPIOX_2 };
+static const unsigned int uart_c_rts_pins[] = { GPIOX_3 };
+static const unsigned int uart_c_tx_x15_pins[] = { GPIOX_15 };
+static const unsigned int uart_c_rx_x16_pins[] = { GPIOX_16 };
+
+/* pmw_a */
+static const unsigned int pwm_a_x6_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_x7_pins[] = { GPIOX_7 };
+static const unsigned int pwm_a_f6_pins[] = { GPIOF_6 };
+static const unsigned int pwm_a_f10_pins[] = { GPIOF_10 };
+static const unsigned int pwm_a_a_pins[] = { GPIOA_5 };
+
+/* pmw_b */
+static const unsigned int pwm_b_x_pins[] = { GPIOX_8 };
+static const unsigned int pwm_b_f_pins[] = { GPIOF_7 };
+static const unsigned int pwm_b_a_pins[] = { GPIOA_11 };
+
+/* pmw_c */
+static const unsigned int pwm_c_x_pins[] = { GPIOX_9 };
+static const unsigned int pwm_c_f3_pins[] = { GPIOF_3 };
+static const unsigned int pwm_c_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_c_a_pins[] = { GPIOA_10 };
+
+/* pwm_d */
+static const unsigned int pwm_d_x10_pins[] = { GPIOX_10 };
+static const unsigned int pwm_d_x13_pins[] = { GPIOX_13 };
+static const unsigned int pwm_d_x15_pins[] = { GPIOX_15 };
+static const unsigned int pwm_d_f_pins[] = { GPIOF_11 };
+
+/* pwm_e */
+static const unsigned int pwm_e_p_pins[] = { GPIOP_3 };
+static const unsigned int pwm_e_x2_pins[] = { GPIOX_2 };
+static const unsigned int pwm_e_x14_pins[] = { GPIOX_14 };
+static const unsigned int pwm_e_x16_pins[] = { GPIOX_16 };
+static const unsigned int pwm_e_f_pins[] = { GPIOF_3 };
+static const unsigned int pwm_e_a_pins[] = { GPIOA_0 };
+
+/* pwm_f */
+static const unsigned int pwm_f_b_pins[] = { GPIOB_6 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_3 };
+static const unsigned int pwm_f_f4_pins[] = { GPIOF_4 };
+static const unsigned int pwm_f_f12_pins[] = { GPIOF_12 };
+
+/* pwm_a_hiz */
+static const unsigned int pwm_a_hiz_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_a_hiz_f10_pins[] = { GPIOF_10 };
+static const unsigned int pmw_a_hiz_f6_pins[] = { GPIOF_6 };
+
+/* pwm_b_hiz */
+static const unsigned int pwm_b_hiz_pins[] = { GPIOF_7 };
+
+/* pmw_c_hiz */
+static const unsigned int pwm_c_hiz_pins[] = { GPIOF_8 };
+
+/* tdm_a */
+static const unsigned int tdm_a_dout1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_dout0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_sclk_pins[] = { GPIOX_10 };
+static const unsigned int tdm_a_din1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_din0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_slv_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_slv_sclk_pins[] = { GPIOX_10 };
+
+/* spi_a */
+static const unsigned int spi_a_mosi_x2_pins[] = { GPIOX_2 };
+static const unsigned int spi_a_ss0_x3_pins[] = { GPIOX_3 };
+static const unsigned int spi_a_sclk_x4_pins[] = { GPIOX_4 };
+static const unsigned int spi_a_miso_x5_pins[] = { GPIOX_5 };
+static const unsigned int spi_a_mosi_x7_pins[] = { GPIOX_7 };
+static const unsigned int spi_a_miso_x8_pins[] = { GPIOX_8 };
+static const unsigned int spi_a_ss0_x9_pins[] = { GPIOX_9 };
+static const unsigned int spi_a_sclk_x10_pins[] = { GPIOX_10 };
+
+static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_6 };
+static const unsigned int spi_a_miso_a_pins[] = { GPIOA_7 };
+static const unsigned int spi_a_ss0_a_pins[] = { GPIOA_8 };
+static const unsigned int spi_a_sclk_a_pins[] = { GPIOA_9 };
+
+/* pdm */
+static const unsigned int pdm_din0_x_pins[] = { GPIOX_7 };
+static const unsigned int pdm_din1_x_pins[] = { GPIOX_8 };
+static const unsigned int pdm_din2_x_pins[] = { GPIOX_9 };
+static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
+
+static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
+static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
+static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
+static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+
+/* gen_clk */
+static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
+static const unsigned int gen_clk_f8_pins[] = { GPIOF_8 };
+static const unsigned int gen_clk_f10_pins[] = { GPIOF_10 };
+static const unsigned int gen_clk_a_pins[] = { GPIOA_11 };
+
+/* jtag_a */
+static const unsigned int jtag_a_clk_pins[] = { GPIOF_4 };
+static const unsigned int jtag_a_tms_pins[] = { GPIOF_5 };
+static const unsigned int jtag_a_tdi_pins[] = { GPIOF_6 };
+static const unsigned int jtag_a_tdo_pins[] = { GPIOF_7 };
+
+/* clk_32_in */
+static const unsigned int clk_32k_in_pins[] = { GPIOF_2 };
+
+/* ir in */
+static const unsigned int remote_input_f_pins[] = { GPIOF_3 };
+static const unsigned int remote_input_a_pins[] = { GPIOA_11 };
+
+/* ir out */
+static const unsigned int remote_out_pins[] = { GPIOF_5 };
+
+/* spdif */
+static const unsigned int spdif_in_f6_pins[] = { GPIOF_6 };
+static const unsigned int spdif_in_f7_pins[] = { GPIOF_7 };
+
+/* sw */
+static const unsigned int swclk_pins[] = { GPIOF_4 };
+static const unsigned int swdio_pins[] = { GPIOF_5 };
+
+/* clk_25 */
+static const unsigned int clk25_pins[] = { GPIOF_10 };
+
+/* cec_a */
+static const unsigned int cec_a_pins[] = { GPIOF_2 };
+
+/* cec_b */
+static const unsigned int cec_b_pins[] = { GPIOF_2 };
+
+/* clk12_24 */
+static const unsigned int clk12_24_pins[] = { GPIOF_10 };
+
+/* mclk_0 */
+static const unsigned int mclk_0_pins[] = { GPIOA_0 };
+
+/* tdm_b */
+static const unsigned int tdm_b_sclk_pins[] = { GPIOA_1 };
+static const unsigned int tdm_b_fs_pins[] = { GPIOA_2 };
+static const unsigned int tdm_b_dout0_pins[] = { GPIOA_3 };
+static const unsigned int tdm_b_dout1_pins[] = { GPIOA_4 };
+static const unsigned int tdm_b_dout2_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_dout3_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_dout4_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_dout5_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_slv_sclk_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_slv_fs_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_din0_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_din1_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_din2_pins[] = { GPIOA_9 };
+
+/* mclk_vad */
+static const unsigned int mclk_vad_pins[] = { GPIOA_0 };
+
+/* tdm_vad */
+static const unsigned int tdm_vad_sclk_a1_pins[] = { GPIOA_1 };
+static const unsigned int tdm_vad_fs_a2_pins[] = { GPIOA_2 };
+static const unsigned int tdm_vad_sclk_a5_pins[] = { GPIOA_5 };
+static const unsigned int tdm_vad_fs_a6_pins[] = { GPIOA_6 };
+
+/* tst_out */
+static const unsigned int tst_out0_pins[] = { GPIOA_0 };
+static const unsigned int tst_out1_pins[] = { GPIOA_1 };
+static const unsigned int tst_out2_pins[] = { GPIOA_2 };
+static const unsigned int tst_out3_pins[] = { GPIOA_3 };
+static const unsigned int tst_out4_pins[] = { GPIOA_4 };
+static const unsigned int tst_out5_pins[] = { GPIOA_5 };
+static const unsigned int tst_out6_pins[] = { GPIOA_6 };
+static const unsigned int tst_out7_pins[] = { GPIOA_7 };
+static const unsigned int tst_out8_pins[] = { GPIOA_8 };
+static const unsigned int tst_out9_pins[] = { GPIOA_9 };
+static const unsigned int tst_out10_pins[] = { GPIOA_10 };
+static const unsigned int tst_out11_pins[] = { GPIOA_11 };
+
+/* mute */
+static const unsigned int mute_key_pins[] = { GPIOA_4 };
+static const unsigned int mute_en_pins[] = { GPIOA_5 };
+
+static struct meson_pmx_group meson_a1_periphs_groups[] = {
+ GPIO_GROUP(GPIOP_0),
+ GPIO_GROUP(GPIOP_1),
+ GPIO_GROUP(GPIOP_2),
+ GPIO_GROUP(GPIOP_3),
+ GPIO_GROUP(GPIOP_4),
+ GPIO_GROUP(GPIOP_5),
+ GPIO_GROUP(GPIOP_6),
+ GPIO_GROUP(GPIOP_7),
+ GPIO_GROUP(GPIOP_8),
+ GPIO_GROUP(GPIOP_9),
+ GPIO_GROUP(GPIOP_10),
+ GPIO_GROUP(GPIOP_11),
+ GPIO_GROUP(GPIOP_12),
+ GPIO_GROUP(GPIOB_0),
+ GPIO_GROUP(GPIOB_1),
+ GPIO_GROUP(GPIOB_2),
+ GPIO_GROUP(GPIOB_3),
+ GPIO_GROUP(GPIOB_4),
+ GPIO_GROUP(GPIOB_5),
+ GPIO_GROUP(GPIOB_6),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOF_0),
+ GPIO_GROUP(GPIOF_1),
+ GPIO_GROUP(GPIOF_2),
+ GPIO_GROUP(GPIOF_3),
+ GPIO_GROUP(GPIOF_4),
+ GPIO_GROUP(GPIOF_5),
+ GPIO_GROUP(GPIOF_6),
+ GPIO_GROUP(GPIOF_7),
+ GPIO_GROUP(GPIOF_8),
+ GPIO_GROUP(GPIOF_9),
+ GPIO_GROUP(GPIOF_10),
+ GPIO_GROUP(GPIOF_11),
+ GPIO_GROUP(GPIOF_12),
+ GPIO_GROUP(GPIOA_0),
+ GPIO_GROUP(GPIOA_1),
+ GPIO_GROUP(GPIOA_2),
+ GPIO_GROUP(GPIOA_3),
+ GPIO_GROUP(GPIOA_4),
+ GPIO_GROUP(GPIOA_5),
+ GPIO_GROUP(GPIOA_6),
+ GPIO_GROUP(GPIOA_7),
+ GPIO_GROUP(GPIOA_8),
+ GPIO_GROUP(GPIOA_9),
+ GPIO_GROUP(GPIOA_10),
+ GPIO_GROUP(GPIOA_11),
+
+ /* bank P func1 */
+ GROUP(psram_clkn, 1),
+ GROUP(psram_clkp, 1),
+ GROUP(psram_ce_n, 1),
+ GROUP(psram_rst_n, 1),
+ GROUP(psram_adq0, 1),
+ GROUP(psram_adq1, 1),
+ GROUP(psram_adq2, 1),
+ GROUP(psram_adq3, 1),
+ GROUP(psram_adq4, 1),
+ GROUP(psram_adq5, 1),
+ GROUP(psram_adq6, 1),
+ GROUP(psram_adq7, 1),
+ GROUP(psram_dqs_dm, 1),
+
+ /*bank P func2 */
+ GROUP(pwm_e_p, 2),
+
+ /*bank B func1 */
+ GROUP(spif_mo, 1),
+ GROUP(spif_mi, 1),
+ GROUP(spif_wp_n, 1),
+ GROUP(spif_hold_n, 1),
+ GROUP(spif_clk, 1),
+ GROUP(spif_cs, 1),
+ GROUP(pwm_f_b, 1),
+
+ /*bank B func2 */
+ GROUP(sdcard_d0_b, 2),
+ GROUP(sdcard_d1_b, 2),
+ GROUP(sdcard_d2_b, 2),
+ GROUP(sdcard_d3_b, 2),
+ GROUP(sdcard_clk_b, 2),
+ GROUP(sdcard_cmd_b, 2),
+
+ /*bank X func1 */
+ GROUP(sdcard_d0_x, 1),
+ GROUP(sdcard_d1_x, 1),
+ GROUP(sdcard_d2_x, 1),
+ GROUP(sdcard_d3_x, 1),
+ GROUP(sdcard_clk_x, 1),
+ GROUP(sdcard_cmd_x, 1),
+ GROUP(pwm_a_x6, 1),
+ GROUP(tdm_a_dout1, 1),
+ GROUP(tdm_a_dout0, 1),
+ GROUP(tdm_a_fs, 1),
+ GROUP(tdm_a_sclk, 1),
+ GROUP(uart_a_tx, 1),
+ GROUP(uart_a_rx, 1),
+ GROUP(uart_a_cts, 1),
+ GROUP(uart_a_rts, 1),
+ GROUP(pwm_d_x15, 1),
+ GROUP(pwm_e_x16, 1),
+
+ /*bank X func2 */
+ GROUP(i2c2_sck_x0, 2),
+ GROUP(i2c2_sda_x1, 2),
+ GROUP(spi_a_mosi_x2, 2),
+ GROUP(spi_a_ss0_x3, 2),
+ GROUP(spi_a_sclk_x4, 2),
+ GROUP(spi_a_miso_x5, 2),
+ GROUP(tdm_a_din1, 2),
+ GROUP(tdm_a_din0, 2),
+ GROUP(tdm_a_slv_fs, 2),
+ GROUP(tdm_a_slv_sclk, 2),
+ GROUP(i2c3_sck_x, 2),
+ GROUP(i2c3_sda_x, 2),
+ GROUP(pwm_d_x13, 2),
+ GROUP(pwm_e_x14, 2),
+ GROUP(i2c2_sck_x15, 2),
+ GROUP(i2c2_sda_x16, 2),
+
+ /*bank X func3 */
+ GROUP(uart_c_tx_x0, 3),
+ GROUP(uart_c_rx_x1, 3),
+ GROUP(uart_c_cts, 3),
+ GROUP(uart_c_rts, 3),
+ GROUP(pdm_din0_x, 3),
+ GROUP(pdm_din1_x, 3),
+ GROUP(pdm_din2_x, 3),
+ GROUP(pdm_dclk_x, 3),
+ GROUP(uart_c_tx_x15, 3),
+ GROUP(uart_c_rx_x16, 3),
+
+ /*bank X func4 */
+ GROUP(pwm_e_x2, 4),
+ GROUP(pwm_f_x, 4),
+ GROUP(spi_a_mosi_x7, 4),
+ GROUP(spi_a_miso_x8, 4),
+ GROUP(spi_a_ss0_x9, 4),
+ GROUP(spi_a_sclk_x10, 4),
+
+ /*bank X func5 */
+ GROUP(uart_b_tx_x, 5),
+ GROUP(uart_b_rx_x, 5),
+ GROUP(i2c1_sda_x, 5),
+ GROUP(i2c1_sck_x, 5),
+
+ /*bank X func6 */
+ GROUP(pwm_a_x7, 6),
+ GROUP(pwm_b_x, 6),
+ GROUP(pwm_c_x, 6),
+ GROUP(pwm_d_x10, 6),
+
+ /*bank X func7 */
+ GROUP(gen_clk_x, 7),
+
+ /*bank F func1 */
+ GROUP(uart_b_tx_f, 1),
+ GROUP(uart_b_rx_f, 1),
+ GROUP(remote_input_f, 1),
+ GROUP(jtag_a_clk, 1),
+ GROUP(jtag_a_tms, 1),
+ GROUP(jtag_a_tdi, 1),
+ GROUP(jtag_a_tdo, 1),
+ GROUP(gen_clk_f8, 1),
+ GROUP(pwm_a_f10, 1),
+ GROUP(i2c0_sck_f11, 1),
+ GROUP(i2c0_sda_f12, 1),
+
+ /*bank F func2 */
+ GROUP(clk_32k_in, 2),
+ GROUP(pwm_e_f, 2),
+ GROUP(pwm_f_f4, 2),
+ GROUP(remote_out, 2),
+ GROUP(spdif_in_f6, 2),
+ GROUP(spdif_in_f7, 2),
+ GROUP(pwm_a_hiz_f8, 2),
+ GROUP(pwm_a_hiz_f10, 2),
+ GROUP(pwm_d_f, 2),
+ GROUP(pwm_f_f12, 2),
+
+ /*bank F func3 */
+ GROUP(pwm_c_f3, 3),
+ GROUP(swclk, 3),
+ GROUP(swdio, 3),
+ GROUP(pwm_a_f6, 3),
+ GROUP(pwm_b_f, 3),
+ GROUP(pwm_c_f8, 3),
+ GROUP(clk25, 3),
+ GROUP(i2c_slave_sck_f, 3),
+ GROUP(i2c_slave_sda_f, 3),
+
+ /*bank F func4 */
+ GROUP(cec_a, 4),
+ GROUP(i2c3_sck_f, 4),
+ GROUP(i2c3_sda_f, 4),
+ GROUP(pmw_a_hiz_f6, 4),
+ GROUP(pwm_b_hiz, 4),
+ GROUP(pwm_c_hiz, 4),
+ GROUP(i2c0_sck_f9, 4),
+ GROUP(i2c0_sda_f10, 4),
+
+ /*bank F func5 */
+ GROUP(cec_b, 5),
+ GROUP(clk12_24, 5),
+
+ /*bank F func7 */
+ GROUP(gen_clk_f10, 7),
+
+ /*bank A func1 */
+ GROUP(mclk_0, 1),
+ GROUP(tdm_b_sclk, 1),
+ GROUP(tdm_b_fs, 1),
+ GROUP(tdm_b_dout0, 1),
+ GROUP(tdm_b_dout1, 1),
+ GROUP(tdm_b_dout2, 1),
+ GROUP(tdm_b_dout3, 1),
+ GROUP(tdm_b_dout4, 1),
+ GROUP(tdm_b_dout5, 1),
+ GROUP(remote_input_a, 1),
+
+ /*bank A func2 */
+ GROUP(pwm_e_a, 2),
+ GROUP(tdm_b_slv_sclk, 2),
+ GROUP(tdm_b_slv_fs, 2),
+ GROUP(tdm_b_din0, 2),
+ GROUP(tdm_b_din1, 2),
+ GROUP(tdm_b_din2, 2),
+ GROUP(i2c1_sda_a, 2),
+ GROUP(i2c1_sck_a, 2),
+
+ /*bank A func3 */
+ GROUP(i2c2_sck_a4, 3),
+ GROUP(i2c2_sda_a5, 3),
+ GROUP(pdm_din2_a, 3),
+ GROUP(pdm_din1_a, 3),
+ GROUP(pdm_din0_a, 3),
+ GROUP(pdm_dclk, 3),
+ GROUP(pwm_c_a, 3),
+ GROUP(pwm_b_a, 3),
+
+ /*bank A func4 */
+ GROUP(pwm_a_a, 4),
+ GROUP(spi_a_mosi_a, 4),
+ GROUP(spi_a_miso_a, 4),
+ GROUP(spi_a_ss0_a, 4),
+ GROUP(spi_a_sclk_a, 4),
+ GROUP(i2c_slave_sck_a, 4),
+ GROUP(i2c_slave_sda_a, 4),
+
+ /*bank A func5 */
+ GROUP(mclk_vad, 5),
+ GROUP(tdm_vad_sclk_a1, 5),
+ GROUP(tdm_vad_fs_a2, 5),
+ GROUP(tdm_vad_sclk_a5, 5),
+ GROUP(tdm_vad_fs_a6, 5),
+ GROUP(i2c2_sck_a8, 5),
+ GROUP(i2c2_sda_a9, 5),
+
+ /*bank A func6 */
+ GROUP(tst_out0, 6),
+ GROUP(tst_out1, 6),
+ GROUP(tst_out2, 6),
+ GROUP(tst_out3, 6),
+ GROUP(tst_out4, 6),
+ GROUP(tst_out5, 6),
+ GROUP(tst_out6, 6),
+ GROUP(tst_out7, 6),
+ GROUP(tst_out8, 6),
+ GROUP(tst_out9, 6),
+ GROUP(tst_out10, 6),
+ GROUP(tst_out11, 6),
+
+ /*bank A func7 */
+ GROUP(mute_key, 7),
+ GROUP(mute_en, 7),
+ GROUP(gen_clk_a, 7),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOP_0", "GPIOP_1", "GPIOP_2", "GPIOP_3", "GPIOP_4",
+ "GPIOP_5", "GPIOP_6", "GPIOP_7", "GPIOP_8", "GPIOP_9",
+ "GPIOP_10", "GPIOP_11", "GPIOP_12",
+
+ "GPIOB_0", "GPIOB_1", "GPIOB_2", "GPIOB_3", "GPIOB_4",
+ "GPIOB_5", "GPIOB_6",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16",
+
+ "GPIOF_0", "GPIOF_1", "GPIOF_2", "GPIOF_3", "GPIOF_4",
+ "GPIOF_5", "GPIOF_6", "GPIOF_7", "GPIOF_8", "GPIOF_9",
+ "GPIOF_10", "GPIOF_11", "GPIOF_12",
+
+ "GPIOA_0", "GPIOA_1", "GPIOA_2", "GPIOA_3", "GPIOA_4",
+ "GPIOA_5", "GPIOA_6", "GPIOA_7", "GPIOA_8", "GPIOA_9",
+ "GPIOA_10", "GPIOA_11",
+};
+
+static const char * const psram_groups[] = {
+ "psram_clkn", "psram_clkp", "psram_ce_n", "psram_rst_n", "psram_adq0",
+ "psram_adq1", "psram_adq2", "psram_adq3", "psram_adq4", "psram_adq5",
+ "psram_adq6", "psram_adq7", "psram_dqs_dm",
+};
+
+static const char * const pwm_a_groups[] = {
+ "pwm_a_x6", "pwm_a_x7", "pwm_a_f10", "pwm_a_f6", "pwm_a_a",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b_x", "pwm_b_f", "pwm_b_a",
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c_x", "pwm_c_f3", "pwm_c_f8", "pwm_c_a",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d_x15", "pwm_d_x13", "pwm_d_x10", "pwm_d_f",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e_p", "pwm_e_x16", "pwm_e_x14", "pwm_e_x2", "pwm_e_f",
+ "pwm_e_a",
+};
+
+static const char * const pwm_f_groups[] = {
+ "pwm_f_b", "pwm_f_x", "pwm_f_f4", "pwm_f_f12",
+};
+
+static const char * const pwm_a_hiz_groups[] = {
+ "pwm_a_hiz_f8", "pwm_a_hiz_f10", "pwm_a_hiz_f6",
+};
+
+static const char * const pwm_b_hiz_groups[] = {
+ "pwm_b_hiz",
+};
+
+static const char * const pwm_c_hiz_groups[] = {
+ "pwm_c_hiz",
+};
+
+static const char * const spif_groups[] = {
+ "spif_mo", "spif_mi", "spif_wp_n", "spif_hold_n", "spif_clk",
+ "spif_cs",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0_b", "sdcard_d1_b", "sdcard_d2_b", "sdcard_d3_b",
+ "sdcard_clk_b", "sdcard_cmd_b",
+
+ "sdcard_d0_x", "sdcard_d1_x", "sdcard_d2_x", "sdcard_d3_x",
+ "sdcard_clk_x", "sdcard_cmd_x",
+};
+
+static const char * const tdm_a_groups[] = {
+ "tdm_a_din0", "tdm_a_din1", "tdm_a_fs", "tdm_a_sclk",
+ "tdm_a_slv_fs", "tdm_a_slv_sclk", "tdm_a_dout0", "tdm_a_dout1",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_a_tx", "uart_a_rx", "uart_a_cts", "uart_a_rts",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_b_tx_x", "uart_b_rx_x", "uart_b_tx_f", "uart_b_rx_f",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_c_tx_x0", "uart_c_rx_x1", "uart_c_cts", "uart_c_rts",
+ "uart_c_tx_x15", "uart_c_rx_x16",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_sck_f11", "i2c0_sda_f12", "i2c0_sck_f9", "i2c0_sda_f10",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_sda_x", "i2c1_sck_x", "i2c1_sda_a", "i2c1_sck_a",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_sck_x0", "i2c2_sda_x1", "i2c2_sck_x15", "i2c2_sda_x16",
+ "i2c2_sck_a4", "i2c2_sda_a5", "i2c2_sck_a8", "i2c2_sda_a9",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_sck_x", "i2c3_sda_x", "i2c3_sck_f", "i2c3_sda_f",
+};
+
+static const char * const i2c_slave_groups[] = {
+ "i2c_slave_sda_a", "i2c_slave_sck_a",
+ "i2c_slave_sda_f", "i2c_slave_sck_f",
+};
+
+static const char * const spi_a_groups[] = {
+ "spi_a_mosi_x2", "spi_a_ss0_x3", "spi_a_sclk_x4", "spi_a_miso_x5",
+ "spi_a_mosi_x7", "spi_a_miso_x8", "spi_a_ss0_x9", "spi_a_sclk_x10",
+
+ "spi_a_mosi_a", "spi_a_miso_a", "spi_a_ss0_a", "spi_a_sclk_a",
+};
+
+static const char * const pdm_groups[] = {
+ "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+};
+
+static const char * const gen_clk_groups[] = {
+ "gen_clk_x", "gen_clk_f8", "gen_clk_f10", "gen_clk_a",
+};
+
+static const char * const remote_input_groups[] = {
+ "remote_input_f",
+ "remote_input_a",
+};
+
+static const char * const jtag_a_groups[] = {
+ "jtag_a_clk", "jtag_a_tms", "jtag_a_tdi", "jtag_a_tdo",
+};
+
+static const char * const clk_32k_in_groups[] = {
+ "clk_32k_in",
+};
+
+static const char * const remote_out_groups[] = {
+ "remote_out",
+};
+
+static const char * const spdif_in_groups[] = {
+ "spdif_in_f6", "spdif_in_f7",
+};
+
+static const char * const sw_groups[] = {
+ "swclk", "swdio",
+};
+
+static const char * const clk25_groups[] = {
+ "clk_25",
+};
+
+static const char * const cec_a_groups[] = {
+ "cec_a",
+};
+
+static const char * const cec_b_groups[] = {
+ "cec_b",
+};
+
+static const char * const clk12_24_groups[] = {
+ "clk12_24",
+};
+
+static const char * const mclk_0_groups[] = {
+ "mclk_0",
+};
+
+static const char * const tdm_b_groups[] = {
+ "tdm_b_din0", "tdm_b_din1", "tdm_b_din2",
+ "tdm_b_sclk", "tdm_b_fs", "tdm_b_dout0", "tdm_b_dout1",
+ "tdm_b_dout2", "tdm_b_dout3", "tdm_b_dout4", "tdm_b_dout5",
+ "tdm_b_slv_sclk", "tdm_b_slv_fs",
+};
+
+static const char * const mclk_vad_groups[] = {
+ "mclk_vad",
+};
+
+static const char * const tdm_vad_groups[] = {
+ "tdm_vad_sclk_a1", "tdm_vad_fs_a2", "tdm_vad_sclk_a5", "tdm_vad_fs_a6",
+};
+
+static const char * const tst_out_groups[] = {
+ "tst_out0", "tst_out1", "tst_out2", "tst_out3",
+ "tst_out4", "tst_out5", "tst_out6", "tst_out7",
+ "tst_out8", "tst_out9", "tst_out10", "tst_out11",
+};
+
+static const char * const mute_groups[] = {
+ "mute_key", "mute_en",
+};
+
+static struct meson_pmx_func meson_a1_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(psram),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f),
+ FUNCTION(pwm_a_hiz),
+ FUNCTION(pwm_b_hiz),
+ FUNCTION(pwm_c_hiz),
+ FUNCTION(spif),
+ FUNCTION(sdcard),
+ FUNCTION(tdm_a),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(i2c0),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(spi_a),
+ FUNCTION(pdm),
+ FUNCTION(gen_clk),
+ FUNCTION(remote_input),
+ FUNCTION(jtag_a),
+ FUNCTION(clk_32k_in),
+ FUNCTION(remote_out),
+ FUNCTION(spdif_in),
+ FUNCTION(sw),
+ FUNCTION(clk25),
+ FUNCTION(cec_a),
+ FUNCTION(cec_b),
+ FUNCTION(clk12_24),
+ FUNCTION(mclk_0),
+ FUNCTION(tdm_b),
+ FUNCTION(mclk_vad),
+ FUNCTION(tdm_vad),
+ FUNCTION(tst_out),
+ FUNCTION(mute),
+};
+
+static struct meson_bank meson_a1_periphs_banks[] = {
+ /* name first last irq pullen pull dir out in ds*/
+ BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0,
+ 0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0),
+ BANK_DS("B", GPIOB_0, GPIOB_6, 13, 19, 0x13, 0, 0x14, 0,
+ 0x12, 0, 0x11, 0, 0x10, 0, 0x15, 0),
+ BANK_DS("X", GPIOX_0, GPIOX_16, 20, 36, 0x23, 0, 0x24, 0,
+ 0x22, 0, 0x21, 0, 0x20, 0, 0x25, 0),
+ BANK_DS("F", GPIOF_0, GPIOF_12, 37, 49, 0x33, 0, 0x34, 0,
+ 0x32, 0, 0x31, 0, 0x30, 0, 0x35, 0),
+ BANK_DS("A", GPIOA_0, GPIOA_11, 50, 61, 0x43, 0, 0x44, 0,
+ 0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0),
+};
+
+static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
+ /* name first lask reg offset */
+ BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0),
+ BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0),
+ BANK_PMX("X", GPIOX_0, GPIOX_16, 0x3, 0),
+ BANK_PMX("F", GPIOF_0, GPIOF_12, 0x6, 0),
+ BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0),
+};
+
+static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
+ .pmx_banks = meson_a1_periphs_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks),
+};
+
+static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pins = meson_a1_periphs_pins,
+ .groups = meson_a1_periphs_groups,
+ .funcs = meson_a1_periphs_functions,
+ .banks = meson_a1_periphs_banks,
+ .num_pins = ARRAY_SIZE(meson_a1_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_a1_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_a1_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_a1_periphs_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &meson_a1_periphs_pmx_banks_data,
+ .parse_dt = &meson_a1_parse_dt_extra,
+};
+
+static const struct of_device_id meson_a1_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-a1-periphs-pinctrl",
+ .data = &meson_a1_periphs_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_a1_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-a1-pinctrl",
+ .of_match_table = meson_a1_pinctrl_dt_match,
+ },
+};
+
+builtin_platform_driver(meson_a1_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index ad502eda4afa..072765db93d7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -1066,6 +1066,7 @@ static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_axg_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_axg_aobus_pmx_banks_data,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_axg_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 582665fd362a..41850e3c0091 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -1362,6 +1362,14 @@ static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
.num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
};
+static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+
+ return 0;
+}
+
static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_g12a_periphs_pins,
@@ -1388,6 +1396,7 @@ static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_g12a_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_g12a_aobus_pmx_banks_data,
+ .parse_dt = meson_g12a_aobus_parse_dt_extra,
};
static const struct of_device_id meson_g12a_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 5bfa56f3847e..926b9997159a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -851,6 +851,7 @@ static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 72c5373c8dc1..1b6e8646700f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -820,6 +820,7 @@ static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 8bba9d053d9f..3c80828a5e50 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -625,7 +625,7 @@ static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
i = of_property_match_string(node, "reg-names", name);
if (of_address_to_resource(node, i, &res))
- return ERR_PTR(-ENOENT);
+ return NULL;
base = devm_ioremap_resource(pc->dev, &res);
if (IS_ERR(base))
@@ -665,26 +665,24 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->of_node = gpio_np;
pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
- if (IS_ERR(pc->reg_mux)) {
+ if (IS_ERR_OR_NULL(pc->reg_mux)) {
dev_err(pc->dev, "mux registers not found\n");
- return PTR_ERR(pc->reg_mux);
+ return pc->reg_mux ? PTR_ERR(pc->reg_mux) : -ENOENT;
}
pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
- if (IS_ERR(pc->reg_gpio)) {
+ if (IS_ERR_OR_NULL(pc->reg_gpio)) {
dev_err(pc->dev, "gpio registers not found\n");
- return PTR_ERR(pc->reg_gpio);
+ return pc->reg_gpio ? PTR_ERR(pc->reg_gpio) : -ENOENT;
}
pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
- /* Use gpio region if pull one is not present */
if (IS_ERR(pc->reg_pull))
- pc->reg_pull = pc->reg_gpio;
+ pc->reg_pull = NULL;
pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
- /* Use pull region if pull-enable one is not present */
if (IS_ERR(pc->reg_pullen))
- pc->reg_pullen = pc->reg_pull;
+ pc->reg_pullen = NULL;
pc->reg_ds = meson_map_resource(pc, gpio_np, "ds");
if (IS_ERR(pc->reg_ds)) {
@@ -692,6 +690,28 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->reg_ds = NULL;
}
+ if (pc->data->parse_dt)
+ return pc->data->parse_dt(pc);
+
+ return 0;
+}
+
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ if (!pc->reg_pull)
+ return -EINVAL;
+
+ pc->reg_pullen = pc->reg_pull;
+
+ return 0;
+}
+
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+ pc->reg_ds = pc->reg_gpio;
+
return 0;
}
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index c696f3241a36..f8b0ff9d419a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -11,6 +11,8 @@
#include <linux/regmap.h>
#include <linux/types.h>
+struct meson_pinctrl;
+
/**
* struct meson_pmx_group - a pinmux group
*
@@ -114,6 +116,7 @@ struct meson_pinctrl_data {
unsigned int num_banks;
const struct pinmux_ops *pmx_ops;
void *pmx_data;
+ int (*parse_dt)(struct meson_pinctrl *pc);
};
struct meson_pinctrl {
@@ -171,3 +174,7 @@ int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
/* Common probe function */
int meson_pinctrl_probe(struct platform_device *pdev);
+/* Common ao groups extra dt parse function for SoCs before g12a */
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc);
+/* Common extra dt parse function for SoCs like A1 */
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 0b97befa6335..dd17100efdcf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1103,6 +1103,7 @@ static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index a7de388388e6..2d5339edd0b7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -962,6 +962,7 @@ static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8b_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index d69c25798871..0d12894d3ee1 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -46,8 +46,8 @@ config PINCTRL_ORION
select PINCTRL_MVEBU
config PINCTRL_ARMADA_37XX
- bool
- select GENERIC_PINCONF
- select MFD_SYSCON
- select PINCONF
- select PINMUX
+ bool
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+ select PINCONF
+ select PINMUX
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f2f5fcd9a237..aa9dcde0f069 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -595,10 +595,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
regmap_read(info->regmap, in_reg, &in_val);
/* Set initial polarity based on current input level. */
- if (in_val & d->mask)
- val |= d->mask; /* falling */
+ if (in_val & BIT(d->hwirq % GPIO_PER_REG))
+ val |= BIT(d->hwirq % GPIO_PER_REG); /* falling */
else
- val &= ~d->mask; /* rising */
+ val &= ~(BIT(d->hwirq % GPIO_PER_REG)); /* rising */
break;
}
default:
@@ -722,6 +722,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip;
+ struct gpio_irq_chip *girq = &gc->irq;
+ struct device *dev = &pdev->dev;
struct resource res;
int ret = -ENODEV, i, nr_irq_parent;
@@ -732,19 +734,21 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
break;
}
};
- if (ret)
+ if (ret) {
+ dev_err(dev, "no gpio-controller child node\n");
return ret;
+ }
nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
- dev_err(&pdev->dev, "Invalid or no IRQ\n");
+ dev_err(dev, "invalid or no IRQ\n");
return 0;
}
if (of_address_to_resource(info->dev->of_node, 1, &res)) {
- dev_err(info->dev, "cannot find IO resource\n");
+ dev_err(dev, "cannot find IO resource\n");
return -ENOENT;
}
@@ -759,27 +763,27 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
irqchip->irq_set_type = armada_37xx_irq_set_type;
irqchip->irq_startup = armada_37xx_irq_startup;
irqchip->name = info->data->name;
- ret = gpiochip_irqchip_add(gc, irqchip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_info(&pdev->dev, "could not add irqchip\n");
- return ret;
- }
-
+ girq->chip = irqchip;
+ girq->parent_handler = armada_37xx_irq_handler;
/*
* Many interrupts are connected to the parent interrupt
* controller. But we do not take advantage of this and use
* the chained irq with all of them.
*/
+ girq->num_parents = nr_irq_parent;
+ girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
if (irq < 0)
continue;
-
- gpiochip_set_chained_irqchip(gc, irqchip, irq,
- armada_37xx_irq_handler);
+ girq->parents[i] = irq;
}
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
return 0;
}
@@ -809,10 +813,10 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc->of_node = np;
gc->label = info->data->name;
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
+ ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
- ret = armada_37xx_irqchip_register(pdev, info);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 00cfaf2c9d4a..a1f93859e7ca 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -759,12 +759,10 @@ int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
- struct resource *res;
void __iomem *base;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 29bb9d8cbbb5..cc97d270be61 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -220,17 +220,14 @@ static int orion_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(orion_pinctrl_of_match, &pdev->dev);
- struct resource *res;
pdev->dev.platform_data = (void*)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ mpp_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mpp_base))
return PTR_ERR(mpp_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- high_mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ high_mpp_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(high_mpp_base))
return PTR_ERR(high_mpp_base);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 726c0b5501fa..b9246e0b4fe2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -391,6 +391,15 @@ static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, /* MC0_CMDDIR */
DB8500_PIN_AA2, /* MC0_DAT2 */
DB8500_PIN_AA1 /* MC0_DAT3 */
};
+/* MMC/SD card 0 interface without CMD/DAT0/DAT2 direction control */
+static const unsigned mc0_a_2_pins[] = { DB8500_PIN_AA3, /* MC0_FBCLK */
+ DB8500_PIN_AA4, /* MC0_CLK */
+ DB8500_PIN_AB2, /* MC0_CMD */
+ DB8500_PIN_Y4, /* MC0_DAT0 */
+ DB8500_PIN_Y2, /* MC0_DAT1 */
+ DB8500_PIN_AA2, /* MC0_DAT2 */
+ DB8500_PIN_AA1 /* MC0_DAT3 */
+};
/* Often only 4 bits are used, then these are not needed (only used for MMC) */
static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, /* MC0_DAT4 */
DB8500_PIN_W3, /* MC0_DAT5 */
@@ -670,6 +679,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
@@ -828,7 +838,7 @@ DB8500_FUNC_GROUPS(ipi2c, "ipi2c_a_1", "ipi2c_a_2");
*/
DB8500_FUNC_GROUPS(msp0, "msp0txrx_a_1", "msp0tfstck_a_1", "msp0rfstck_a_1",
"msp0txrx_b_1", "msp0sck_b_1");
-DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_dat47_a_1", "mc0dat31dir_a_1");
+DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_a_2", "mc0_dat47_a_1", "mc0dat31dir_a_1");
/* MSP0 can swap RX/TX like MSP0 but has no SCK pin available */
DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 2a8190b11d10..95f864dfdef4 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -248,9 +248,6 @@ struct nmk_gpio_chip {
void __iomem *addr;
struct clk *clk;
unsigned int bank;
- unsigned int parent_irq;
- int latent_parent_irq;
- u32 (*get_latent_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
spinlock_t lock;
bool sleepmode;
@@ -802,13 +799,19 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
clk_disable(nmk_chip->clk);
}
-static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
+static void nmk_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *host_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ u32 status;
chained_irq_enter(host_chip, desc);
+ clk_enable(nmk_chip->clk);
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+ clk_disable(nmk_chip->clk);
+
while (status) {
int bit = __ffs(status);
@@ -819,28 +822,6 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
chained_irq_exit(host_chip, desc);
}
-static void nmk_gpio_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status;
-
- clk_enable(nmk_chip->clk);
- status = readl(nmk_chip->addr + NMK_GPIO_IS);
- clk_disable(nmk_chip->clk);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
-static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
/* I/O Functions */
static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
@@ -1103,8 +1084,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
struct irq_chip *irqchip;
- int latent_irq;
bool supports_sleepmode;
int irq;
int ret;
@@ -1125,15 +1106,10 @@ static int nmk_gpio_probe(struct platform_device *dev)
if (irq < 0)
return irq;
- /* It's OK for this IRQ not to be present */
- latent_irq = platform_get_irq(dev, 1);
-
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->parent_irq = irq;
- nmk_chip->latent_parent_irq = latent_irq;
nmk_chip->sleepmode = supports_sleepmode;
spin_lock_init(&nmk_chip->lock);
@@ -1163,6 +1139,19 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->base,
chip->base + chip->ngpio - 1);
+ girq = &chip->irq;
+ girq->chip = irqchip;
+ girq->parent_handler = nmk_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&dev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
@@ -1174,33 +1163,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- /*
- * Let the generic code handle this edge IRQ, the the chained
- * handler will perform the actual work of handling the parent
- * interrupt.
- */
- ret = gpiochip_irqchip_add(chip,
- irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&dev->dev, "could not add irqchip\n");
- gpiochip_remove(&nmk_chip->chip);
- return -ENODEV;
- }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->parent_irq,
- nmk_gpio_irq_handler);
- if (nmk_chip->latent_parent_irq > 0)
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->latent_parent_irq,
- nmk_gpio_latent_irq_handler);
-
- dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
+ dev_info(&dev->dev, "chip registered\n");
return 0;
}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 17f909d8b63a..22077cbe6880 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1954,6 +1954,22 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
int ret, id;
for (id = 0 ; id < pctrl->bank_num ; id++) {
+ struct gpio_irq_chip *girq;
+
+ girq = &pctrl->gpio_bank[id].gc.irq;
+ girq->chip = &pctrl->gpio_bank[id].irq_chip;
+ girq->parent_handler = npcmgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctrl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
+ goto err_register;
+ }
+ girq->parents[0] = pctrl->gpio_bank[id].irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
ret = devm_gpiochip_add_data(pctrl->dev,
&pctrl->gpio_bank[id].gc,
&pctrl->gpio_bank[id]);
@@ -1972,22 +1988,6 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
gpiochip_remove(&pctrl->gpio_bank[id].gc);
goto err_register;
}
-
- ret = gpiochip_irqchip_add(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(pctrl->dev,
- "Failed to add IRQ chip %u\n", id);
- gpiochip_remove(&pctrl->gpio_bank[id].gc);
- goto err_register;
- }
-
- gpiochip_set_chained_irqchip(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- pctrl->gpio_bank[id].irq,
- npcmgpio_irq_handler);
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 2c61141519f8..eab078244a4c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -540,7 +540,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
- u32 *regs, regval;
+ u32 __iomem *regs;
+ u32 regval;
u64 status, mask;
/* Read the wake status */
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index e3239cf926f9..986e04ac6b5b 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -936,7 +936,6 @@ static void artpec6_pmx_reset(struct artpec6_pmx *pmx)
static int artpec6_pmx_probe(struct platform_device *pdev)
{
struct artpec6_pmx *pmx;
- struct resource *res;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx)
@@ -944,8 +943,7 @@ static int artpec6_pmx_probe(struct platform_device *pdev)
pmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->base = devm_ioremap_resource(&pdev->dev, res);
+ pmx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->base))
return PTR_ERR(pmx->base);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index d6de4d360cd4..694912409fd9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -328,6 +328,33 @@ static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & BIT(pin->line));
}
+static int atmel_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ bitmap_zero(bits, atmel_pioctrl->npins);
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int word = bank;
+ unsigned int offset = 0;
+ unsigned int reg;
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+ offset = bank * ATMEL_PIO_NPINS_PER_BANK % BITS_PER_LONG;
+#endif
+ if (!mask[word])
+ continue;
+
+ reg = atmel_gpio_read(atmel_pioctrl, bank, ATMEL_PIO_PDSR);
+ bits[word] |= mask[word] & (reg << offset);
+ }
+
+ return 0;
+}
+
static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
@@ -358,11 +385,46 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(pin->line));
}
+static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int bitmask;
+ unsigned int word = bank;
+
+/*
+ * On a 64-bit platform, BITS_PER_LONG is 64 so it is necessary to iterate over
+ * two 32bit words to handle the whole bitmask
+ */
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+#endif
+ if (!mask[word])
+ continue;
+
+ bitmask = mask[word] & bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_SODR, bitmask);
+
+ bitmask = mask[word] & ~bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_CODR, bitmask);
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ mask[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+ bits[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+#endif
+ }
+}
+
static struct gpio_chip atmel_gpio_chip = {
.direction_input = atmel_gpio_direction_input,
.get = atmel_gpio_get,
+ .get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
.set = atmel_gpio_set,
+ .set_multiple = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -955,8 +1017,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
+ atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d6e7e9f0ddec..207f266e9cf2 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -85,8 +85,8 @@ enum drive_strength_bit {
DRIVE_STRENGTH_SHIFT)
enum slewrate_bit {
- SLEWRATE_BIT_DIS,
SLEWRATE_BIT_ENA,
+ SLEWRATE_BIT_DIS,
};
#define SLEWRATE_BIT_MSK(name) (SLEWRATE_BIT_##name << SLEWRATE_SHIFT)
@@ -669,7 +669,7 @@ static void at91_mux_sam9x60_set_slewrate(void __iomem *pio, unsigned pin,
{
unsigned int tmp;
- if (setting < SLEWRATE_BIT_DIS || setting > SLEWRATE_BIT_ENA)
+ if (setting < SLEWRATE_BIT_ENA || setting > SLEWRATE_BIT_DIS)
return;
tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR);
@@ -1723,9 +1723,11 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
struct irq_chip *gpio_irqchip;
- int ret, i;
+ struct gpio_irq_chip *girq;
+ int i;
- gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL);
+ gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip),
+ GFP_KERNEL);
if (!gpio_irqchip)
return -ENOMEM;
@@ -1747,33 +1749,30 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
* handler will perform the actual work of handling the parent
* interrupt.
*/
- ret = gpiochip_irqchip_add(&at91_gpio->chip,
- gpio_irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n",
- at91_gpio->pioc_idx);
- return ret;
- }
+ girq = &at91_gpio->chip.irq;
+ girq->chip = gpio_irqchip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- /* The top level handler handles one bank of GPIOs, except
+ /*
+ * The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
if (!gpiochip_prev) {
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ girq->parent_handler = gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = at91_gpio->pioc_virq;
return 0;
}
prev = gpiochip_get_data(gpiochip_prev);
-
/* we can only have 2 banks before */
for (i = 0; i < 2; i++) {
if (prev->next) {
@@ -1812,7 +1811,6 @@ static const struct of_device_id at91_gpio_of_match[] = {
static int at91_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct at91_gpio_chip *at91_chip = NULL;
struct gpio_chip *chip;
struct pinctrl_gpio_range *range;
@@ -1840,8 +1838,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_chip->regbase = devm_ioremap_resource(&pdev->dev, res);
+ at91_chip->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(at91_chip->regbase)) {
ret = PTR_ERR(at91_chip->regbase);
goto err;
@@ -1903,6 +1900,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
range->npins = chip->ngpio;
range->gc = chip;
+ ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+ if (ret)
+ goto gpiochip_add_err;
+
ret = gpiochip_add_data(chip, at91_chip);
if (ret)
goto gpiochip_add_err;
@@ -1910,16 +1911,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- ret = at91_gpio_of_irq_setup(pdev, at91_chip);
- if (ret)
- goto irq_setup_err;
-
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
return 0;
-irq_setup_err:
- gpiochip_remove(chip);
gpiochip_add_err:
clk_enable_err:
clk_disable_unprepare(at91_chip->clock);
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index 63b130cb1ffb..f7dff4f14101 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -1308,15 +1308,13 @@ static struct pinctrl_desc bm1880_desc = {
static int bm1880_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
struct bm1880_pinctrl *pctrl;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 08b9e909e917..2905348ff430 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -615,7 +615,7 @@ static struct coh901_pinpair coh901_pintable[] = {
static int __init u300_gpio_probe(struct platform_device *pdev)
{
struct u300_gpio *gpio;
- struct resource *memres;
+ struct gpio_irq_chip *girq;
int err = 0;
int portno;
u32 val;
@@ -632,8 +632,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->chip.base = 0;
gpio->dev = &pdev->dev;
- memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, memres);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
@@ -672,26 +671,17 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->base + U300_GPIO_CR);
u300_gpio_init_coh901571(gpio);
-#ifdef CONFIG_OF_GPIO
- gpio->chip.of_node = pdev->dev.of_node;
-#endif
- err = gpiochip_add_data(&gpio->chip, gpio);
- if (err) {
- dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
- goto err_no_chip;
- }
-
- err = gpiochip_irqchip_add(&gpio->chip,
- &u300_gpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_EDGE_FALLING);
- if (err) {
- dev_err(gpio->dev, "no GPIO irqchip\n");
- goto err_no_irqchip;
+ girq = &gpio->chip.irq;
+ girq->chip = &u300_gpio_irqchip;
+ girq->parent_handler = u300_gpio_irq_handler;
+ girq->num_parents = U300_GPIO_NUM_PORTS;
+ girq->parents = devm_kcalloc(gpio->dev, U300_GPIO_NUM_PORTS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ err = -ENOMEM;
+ goto err_dis_clk;
}
-
- /* Add each port with its IRQ separately */
for (portno = 0 ; portno < U300_GPIO_NUM_PORTS; portno++) {
struct u300_gpio_port *port = &gpio->ports[portno];
@@ -700,16 +690,21 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
port->gpio = gpio;
port->irq = platform_get_irq(pdev, portno);
-
- gpiochip_set_chained_irqchip(&gpio->chip,
- &u300_gpio_irqchip,
- port->irq,
- u300_gpio_irq_handler);
+ girq->parents[portno] = port->irq;
/* Turns off irq force (test register) for this port */
writel(0x0, gpio->base + portno * gpio->stride + ifr);
}
- dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
+ girq->default_type = IRQ_TYPE_EDGE_FALLING;
+ girq->handler = handle_simple_irq;
+#ifdef CONFIG_OF_GPIO
+ gpio->chip.of_node = pdev->dev.of_node;
+#endif
+ err = gpiochip_add_data(&gpio->chip, gpio);
+ if (err) {
+ dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+ goto err_dis_clk;
+ }
/*
* Add pinctrl pin ranges, the pin controller must be registered
@@ -729,9 +724,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
return 0;
err_no_range:
-err_no_irqchip:
gpiochip_remove(&gpio->chip);
-err_no_chip:
+err_dis_clk:
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
return err;
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index d06f13a79740..5a0a1f20c843 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -146,14 +146,12 @@ static int da850_pupd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da850_pupd_data *data;
- struct resource *res;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base)) {
dev_err(dev, "Could not map resource\n");
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 7e1ceee5895b..ff702cfbaa28 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -270,7 +270,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
static int dc_pinctrl_probe(struct platform_device *pdev)
{
struct dc_pinmap *pmap;
- struct resource *r;
struct pinctrl_pin_desc *pins;
struct pinctrl_desc *pctl_desc;
char *pin_names;
@@ -281,8 +280,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
if (!pmap)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+ pmap->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmap->regs))
return PTR_ERR(pmap->regs);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
new file mode 100644
index 000000000000..36c9072c5ece
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Intel Corporation */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinmux.h"
+#include "pinctrl-equilibrium.h"
+
+#define PIN_NAME_FMT "io-%d"
+#define PIN_NAME_LEN 10
+#define PAD_REG_OFF 0x100
+
+static void eqbr_gpio_disable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_enable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ gc->direction_input(gc, offset);
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNRNSET);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_ack_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
+{
+ eqbr_gpio_disable_irq(d);
+ eqbr_gpio_ack_irq(d);
+}
+
+static inline void eqbr_cfg_bit(void __iomem *addr,
+ unsigned int offset, unsigned int set)
+{
+ if (set)
+ writel(readl(addr) | BIT(offset), addr);
+ else
+ writel(readl(addr) & ~BIT(offset), addr);
+}
+
+static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ struct eqbr_gpio_ctrl *gctrl,
+ unsigned int offset)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ eqbr_cfg_bit(gctrl->membase + GPIO_IRNCFG, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR1, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR0, offset, type->logic_type);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+
+ return 0;
+}
+
+static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ struct gpio_irq_type it;
+
+ memset(&it, 0, sizeof(it));
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE)
+ return 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_BOTH_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ eqbr_irq_type_cfg(&it, gctrl, offset);
+ if (it.trig_type == GPIO_EDGE_TRIG)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ return 0;
+}
+
+static void eqbr_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ unsigned long pins, offset;
+
+ chained_irq_enter(ic, desc);
+ pins = readl(gctrl->membase + GPIO_IRNCR);
+
+ for_each_set_bit(offset, &pins, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+
+ chained_irq_exit(ic, desc);
+}
+
+static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
+{
+ struct gpio_irq_chip *girq;
+ struct gpio_chip *gc;
+
+ gc = &gctrl->chip;
+ gc->label = gctrl->name;
+#if defined(CONFIG_OF_GPIO)
+ gc->of_node = gctrl->node;
+#endif
+
+ if (!of_property_read_bool(gctrl->node, "interrupt-controller")) {
+ dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n",
+ gctrl->name);
+ return 0;
+ }
+
+ gctrl->ic.name = "gpio_irq";
+ gctrl->ic.irq_mask = eqbr_gpio_disable_irq;
+ gctrl->ic.irq_unmask = eqbr_gpio_enable_irq;
+ gctrl->ic.irq_ack = eqbr_gpio_ack_irq;
+ gctrl->ic.irq_mask_ack = eqbr_gpio_mask_ack_irq;
+ gctrl->ic.irq_set_type = eqbr_gpio_set_irq_type;
+
+ girq = &gctrl->chip.irq;
+ girq->chip = &gctrl->ic;
+ girq->parent_handler = eqbr_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = gctrl->virq;
+
+ return 0;
+}
+
+static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_gpio_ctrl *gctrl;
+ struct device_node *np;
+ struct resource res;
+ int i, ret;
+
+ for (i = 0; i < drvdata->nr_gpio_ctrls; i++) {
+ gctrl = drvdata->gpio_ctrls + i;
+ np = gctrl->node;
+
+ gctrl->name = devm_kasprintf(dev, GFP_KERNEL, "gpiochip%d", i);
+ if (!gctrl->name)
+ return -ENOMEM;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(dev, "Failed to get GPIO register address\n");
+ return -ENXIO;
+ }
+
+ gctrl->membase = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(gctrl->membase))
+ return PTR_ERR(gctrl->membase);
+
+ gctrl->virq = irq_of_parse_and_map(np, 0);
+ if (!gctrl->virq) {
+ dev_err(dev, "%s: failed to parse and map irq\n",
+ gctrl->name);
+ return -ENXIO;
+ }
+ raw_spin_lock_init(&gctrl->lock);
+
+ ret = bgpio_init(&gctrl->chip, dev, gctrl->bank->nr_pins / 8,
+ gctrl->membase + GPIO_IN,
+ gctrl->membase + GPIO_OUTSET,
+ gctrl->membase + GPIO_OUTCLR,
+ gctrl->membase + GPIO_DIR,
+ NULL, 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ ret = gpiochip_setup(dev, gctrl);
+ if (ret)
+ return ret;
+
+ ret = devm_gpiochip_add_data(dev, &gctrl->chip, gctrl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct eqbr_pin_bank
+*find_pinbank_via_pin(struct eqbr_pinctrl_drv_data *pctl, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ int i;
+
+ for (i = 0; i < pctl->nr_banks; i++) {
+ bank = &pctl->pin_banks[i];
+ if (pin >= bank->pin_base &&
+ (pin - bank->pin_base) < bank->nr_pins)
+ return bank;
+ }
+
+ return NULL;
+}
+
+static const struct pinctrl_ops eqbr_pctl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int eqbr_set_pin_mux(struct eqbr_pinctrl_drv_data *pctl,
+ unsigned int pmx, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ writel(pmx, mem + (offset * 4));
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return 0;
+}
+
+static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ unsigned int *pinmux;
+ int i;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ pinmux = grp->data;
+ for (i = 0; i < grp->num_pins; i++)
+ eqbr_set_pin_mux(pctl, pinmux[i], grp->pins[i]);
+
+ return 0;
+}
+
+static int eqbr_pinmux_gpio_request(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return eqbr_set_pin_mux(pctl, EQBR_GPIO_MODE, pin);
+}
+
+static const struct pinmux_ops eqbr_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = eqbr_pinmux_set_mux,
+ .gpio_request_enable = eqbr_pinmux_gpio_request,
+ .strict = true,
+};
+
+static int get_drv_cur(void __iomem *mem, unsigned int offset)
+{
+ unsigned int idx = offset / DRV_CUR_PINS; /* 0-15, 16-31 per register*/
+ unsigned int pin_offset = offset % DRV_CUR_PINS;
+
+ return PARSE_DRV_CURRENT(readl(mem + REG_DRCC(idx)), pin_offset);
+}
+
+static struct eqbr_gpio_ctrl
+*get_gpio_ctrls_via_bank(struct eqbr_pinctrl_drv_data *pctl,
+ struct eqbr_pin_bank *bank)
+{
+ int i;
+
+ for (i = 0; i < pctl->nr_gpio_ctrls; i++) {
+ if (pctl->gpio_ctrls[i].bank == bank)
+ return &pctl->gpio_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int eqbr_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct eqbr_gpio_ctrl *gctrl;
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+ u32 val;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = !!(readl(mem + REG_PUEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = !!(readl(mem + REG_PDEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ val = !!(readl(mem + REG_OD) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val = get_drv_cur(mem, offset);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ val = !!(readl(mem + REG_SRC) & BIT(offset));
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENODEV;
+ }
+ val = !!(readl(gctrl->membase + GPIO_DIR) & BIT(offset));
+ break;
+ default:
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENOTSUPP;
+ }
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ *config = pinconf_to_config_packed(param, val);
+;
+ return 0;
+}
+
+static int eqbr_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct eqbr_gpio_ctrl *gctrl;
+ enum pin_config_param param;
+ struct eqbr_pin_bank *bank;
+ unsigned int val, offset;
+ struct gpio_chip *gc;
+ unsigned long flags;
+ void __iomem *mem;
+ u32 regval, mask;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ val = pinconf_to_config_argument(configs[i]);
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev,
+ "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mem += REG_PUEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mem += REG_PDEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mem += REG_OD;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mem += REG_DRCC(offset / DRV_CUR_PINS);
+ offset = (offset % DRV_CUR_PINS) * 2;
+ mask = GENMASK(1, 0) << offset;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ mem += REG_SRC;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ return -ENODEV;
+ }
+ gc = &gctrl->chip;
+ gc->direction_output(gc, offset, 0);
+ continue;
+ default:
+ return -ENOTSUPP;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ regval = readl(mem);
+ regval = (regval & ~mask) | ((val << offset) & mask);
+ writel(regval, mem);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ }
+
+ return 0;
+}
+
+static int eqbr_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ unsigned int i, npins, old = 0;
+ const unsigned int *pins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ if (eqbr_pinconf_get(pctldev, pins[i], config))
+ return -ENOTSUPP;
+
+ if (i && old != *config)
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+ return 0;
+}
+
+static int eqbr_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = eqbr_pinconf_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct pinconf_ops eqbr_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eqbr_pinconf_get,
+ .pin_config_set = eqbr_pinconf_set,
+ .pin_config_group_get = eqbr_pinconf_group_get,
+ .pin_config_group_set = eqbr_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static bool is_func_exist(struct eqbr_pmx_func *funcs, const char *name,
+ unsigned int nr_funcs, unsigned int *idx)
+{
+ int i;
+
+ if (!funcs)
+ return false;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (funcs[i].name && !strcmp(funcs[i].name, name)) {
+ *idx = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
+ unsigned int *nr_funcs, funcs_util_ops op)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *np;
+ struct property *prop;
+ const char *fn_name;
+ unsigned int fid;
+ int i, j;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ if (of_property_read_string(np, "function", &fn_name)) {
+ /* some groups may not have function, it's OK */
+ dev_dbg(dev, "Group %s: not function binded!\n",
+ (char *)prop->value);
+ continue;
+ }
+
+ switch (op) {
+ case OP_COUNT_NR_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ *nr_funcs = *nr_funcs + 1;
+ break;
+
+ case OP_ADD_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[i].name = fn_name;
+ break;
+
+ case OP_COUNT_NR_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[fid].nr_groups++;
+ break;
+
+ case OP_ADD_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid)) {
+ for (j = 0; j < funcs[fid].nr_groups; j++)
+ if (!funcs[fid].groups[j])
+ break;
+ funcs[fid].groups[j] = prop->value;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_pmx_func *funcs = NULL;
+ unsigned int nr_funcs = 0;
+ int i, ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNCS);
+ if (ret)
+ return ret;
+
+ funcs = devm_kcalloc(dev, nr_funcs, sizeof(*funcs), GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNCS);
+ if (ret)
+ return ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (!funcs[i].nr_groups)
+ continue;
+ funcs[i].groups = devm_kcalloc(dev, funcs[i].nr_groups,
+ sizeof(*(funcs[i].groups)),
+ GFP_KERNEL);
+ if (!funcs[i].groups)
+ return -ENOMEM;
+ }
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ ret = pinmux_generic_add_function(drvdata->pctl_dev,
+ funcs[i].name,
+ funcs[i].groups,
+ funcs[i].nr_groups,
+ drvdata);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register function %s\n",
+ funcs[i].name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *node = dev->of_node;
+ unsigned int *pinmux, pin_id, pinmux_id;
+ struct group_desc group;
+ struct device_node *np;
+ struct property *prop;
+ int j, err;
+
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ group.num_pins = of_property_count_u32_elems(np, "pins");
+ if (group.num_pins < 0) {
+ dev_err(dev, "No pins in the group: %s\n", prop->name);
+ return -EINVAL;
+ }
+ group.name = prop->value;
+ group.pins = devm_kcalloc(dev, group.num_pins,
+ sizeof(*(group.pins)), GFP_KERNEL);
+ if (!group.pins)
+ return -ENOMEM;
+
+ pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
+ GFP_KERNEL);
+ if (!pinmux)
+ return -ENOMEM;
+
+ for (j = 0; j < group.num_pins; j++) {
+ if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
+ dev_err(dev, "Group %s: Read intel pins id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ if (pin_id >= drvdata->pctl_desc.npins) {
+ dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
+ group.name, j, pin_id);
+ return -EINVAL;
+ }
+ group.pins[j] = pin_id;
+ if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
+ dev_err(dev, "Group %s: Read intel pinmux id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ pinmux[j] = pinmux_id;
+ }
+
+ err = pinctrl_generic_add_group(drvdata->pctl_dev, group.name,
+ group.pins, group.num_pins,
+ pinmux);
+ if (err < 0) {
+ dev_err(dev, "Failed to register group %s\n", group.name);
+ return err;
+ }
+ memset(&group, 0, sizeof(group));
+ pinmux = NULL;
+ }
+
+ return 0;
+}
+
+static int pinctrl_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct pinctrl_desc *pctl_desc;
+ struct pinctrl_pin_desc *pdesc;
+ struct device *dev;
+ unsigned int nr_pins;
+ char *pin_names;
+ int i, ret;
+
+ dev = drvdata->dev;
+ pctl_desc = &drvdata->pctl_desc;
+ pctl_desc->name = "eqbr-pinctrl";
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &eqbr_pctl_ops;
+ pctl_desc->pmxops = &eqbr_pinmux_ops;
+ pctl_desc->confops = &eqbr_pinconf_ops;
+ raw_spin_lock_init(&drvdata->lock);
+
+ for (i = 0, nr_pins = 0; i < drvdata->nr_banks; i++)
+ nr_pins += drvdata->pin_banks[i].nr_pins;
+
+ pdesc = devm_kcalloc(dev, nr_pins, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+ pin_names = devm_kcalloc(dev, nr_pins, PIN_NAME_LEN, GFP_KERNEL);
+ if (!pin_names)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pins; i++) {
+ sprintf(pin_names, PIN_NAME_FMT, i);
+ pdesc[i].number = i;
+ pdesc[i].name = pin_names;
+ pin_names += PIN_NAME_LEN;
+ }
+ pctl_desc->pins = pdesc;
+ pctl_desc->npins = nr_pins;
+ dev_dbg(dev, "pinctrl total pin number: %u\n", nr_pins);
+
+ ret = devm_pinctrl_register_and_init(dev, pctl_desc, drvdata,
+ &drvdata->pctl_dev);
+ if (ret)
+ return ret;
+
+ ret = eqbr_build_groups(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ ret = eqbr_build_functions(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ return pinctrl_enable(drvdata->pctl_dev);
+}
+
+static int pinbank_init(struct device_node *np,
+ struct eqbr_pinctrl_drv_data *drvdata,
+ struct eqbr_pin_bank *bank, unsigned int id)
+{
+ struct device *dev = drvdata->dev;
+ struct of_phandle_args spec;
+ int ret;
+
+ bank->membase = drvdata->membase + id * PAD_REG_OFF;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &spec);
+ if (ret) {
+ dev_err(dev, "gpio-range not available!\n");
+ return ret;
+ }
+
+ bank->pin_base = spec.args[1];
+ bank->nr_pins = spec.args[2];
+
+ bank->aval_pinmap = readl(bank->membase + REG_AVAIL);
+ bank->id = id;
+
+ dev_dbg(dev, "pinbank id: %d, reg: %px, pinbase: %u, pin number: %u, pinmap: 0x%x\n",
+ id, bank->membase, bank->pin_base,
+ bank->nr_pins, bank->aval_pinmap);
+
+ return ret;
+}
+
+static int pinbank_probe(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *np_gpio;
+ struct eqbr_gpio_ctrl *gctrls;
+ struct eqbr_pin_bank *banks;
+ int i, nr_gpio;
+
+ /* Count gpio bank number */
+ nr_gpio = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (of_device_is_available(np_gpio))
+ nr_gpio++;
+ }
+
+ if (!nr_gpio) {
+ dev_err(dev, "NO pin bank available!\n");
+ return -ENODEV;
+ }
+
+ /* Count pin bank number and gpio controller number */
+ banks = devm_kcalloc(dev, nr_gpio, sizeof(*banks), GFP_KERNEL);
+ if (!banks)
+ return -ENOMEM;
+
+ gctrls = devm_kcalloc(dev, nr_gpio, sizeof(*gctrls), GFP_KERNEL);
+ if (!gctrls)
+ return -ENOMEM;
+
+ dev_dbg(dev, "found %d gpio controller!\n", nr_gpio);
+
+ /* Initialize Pin bank */
+ i = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (!of_device_is_available(np_gpio))
+ continue;
+
+ pinbank_init(np_gpio, drvdata, banks + i, i);
+
+ gctrls[i].node = np_gpio;
+ gctrls[i].bank = banks + i;
+ i++;
+ }
+
+ drvdata->pin_banks = banks;
+ drvdata->nr_banks = nr_gpio;
+ drvdata->gpio_ctrls = gctrls;
+ drvdata->nr_gpio_ctrls = nr_gpio;
+
+ return 0;
+}
+
+static int eqbr_pinctrl_probe(struct platform_device *pdev)
+{
+ struct eqbr_pinctrl_drv_data *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ drvdata->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->membase))
+ return PTR_ERR(drvdata->membase);
+
+ ret = pinbank_probe(drvdata);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_reg(drvdata);
+ if (ret)
+ return ret;
+
+ ret = gpiolib_reg(drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drvdata);
+ return 0;
+}
+
+static const struct of_device_id eqbr_pinctrl_dt_match[] = {
+ { .compatible = "intel,lgm-io" },
+ {}
+};
+
+static struct platform_driver eqbr_pinctrl_driver = {
+ .probe = eqbr_pinctrl_probe,
+ .driver = {
+ .name = "eqbr-pinctrl",
+ .of_match_table = eqbr_pinctrl_dt_match,
+ },
+};
+
+module_platform_driver(eqbr_pinctrl_driver);
+
+MODULE_AUTHOR("Zhu Yixin <yixin.zhu@intel.com>, Rahul Tanwar <rahul.tanwar@intel.com>");
+MODULE_DESCRIPTION("Pinctrl Driver for LGM SoC (Equilibrium)");
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
new file mode 100644
index 000000000000..83cb7dafc657
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef __PINCTRL_EQUILIBRIUM_H
+#define __PINCTRL_EQUILIBRIUM_H
+
+/* PINPAD register offset */
+#define REG_PMX_BASE 0x0 /* Port Multiplexer Control Register */
+#define REG_PUEN 0x80 /* PULL UP Enable Register */
+#define REG_PDEN 0x84 /* PULL DOWN Enable Register */
+#define REG_SRC 0x88 /* Slew Rate Control Register */
+#define REG_DCC0 0x8C /* Drive Current Control Register 0 */
+#define REG_DCC1 0x90 /* Drive Current Control Register 1 */
+#define REG_OD 0x94 /* Open Drain Enable Register */
+#define REG_AVAIL 0x98 /* Pad Control Availability Register */
+#define DRV_CUR_PINS 16 /* Drive Current pin number per register */
+#define REG_DRCC(x) (REG_DCC0 + (x) * 4) /* Driver current macro */
+
+/* GPIO register offset */
+#define GPIO_OUT 0x0 /* Data Output Register */
+#define GPIO_IN 0x4 /* Data Input Register */
+#define GPIO_DIR 0x8 /* Direction Register */
+#define GPIO_EXINTCR0 0x18 /* External Interrupt Control Register 0 */
+#define GPIO_EXINTCR1 0x1C /* External Interrupt Control Register 1 */
+#define GPIO_IRNCR 0x20 /* IRN Capture Register */
+#define GPIO_IRNICR 0x24 /* IRN Interrupt Control Register */
+#define GPIO_IRNEN 0x28 /* IRN Interrupt Enable Register */
+#define GPIO_IRNCFG 0x2C /* IRN Interrupt Configuration Register */
+#define GPIO_IRNRNSET 0x30 /* IRN Interrupt Enable Set Register */
+#define GPIO_IRNENCLR 0x34 /* IRN Interrupt Enable Clear Register */
+#define GPIO_OUTSET 0x40 /* Output Set Register */
+#define GPIO_OUTCLR 0x44 /* Output Clear Register */
+#define GPIO_DIRSET 0x48 /* Direction Set Register */
+#define GPIO_DIRCLR 0x4C /* Direction Clear Register */
+
+/* parse given pin's driver current value */
+#define PARSE_DRV_CURRENT(val, pin) (((val) >> ((pin) * 2)) & 0x3)
+
+#define GPIO_EDGE_TRIG 0
+#define GPIO_LEVEL_TRIG 1
+#define GPIO_SINGLE_EDGE 0
+#define GPIO_BOTH_EDGE 1
+#define GPIO_POSITIVE_TRIG 0
+#define GPIO_NEGATIVE_TRIG 1
+
+#define EQBR_GPIO_MODE 0
+
+typedef enum {
+ OP_COUNT_NR_FUNCS,
+ OP_ADD_FUNCS,
+ OP_COUNT_NR_FUNC_GRPS,
+ OP_ADD_FUNC_GRPS,
+ OP_NONE,
+} funcs_util_ops;
+
+/**
+ * struct gpio_irq_type: gpio irq configuration
+ * @trig_type: level trigger or edge trigger
+ * @edge_type: sigle edge or both edge
+ * @logic_type: positive trigger or negative trigger
+ */
+struct gpio_irq_type {
+ unsigned int trig_type;
+ unsigned int edge_type;
+ unsigned int logic_type;
+};
+
+/**
+ * struct eqbr_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @nr_groups: number of groups included in @groups.
+ */
+struct eqbr_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned int nr_groups;
+};
+
+/**
+ * struct eqbr_pin_bank: represent a pin bank.
+ * @membase: base address of the pin bank register.
+ * @id: bank id, to idenify the unique bank.
+ * @pin_base: starting pin number of the pin bank.
+ * @nr_pins: number of the pins of the pin bank.
+ * @aval_pinmap: available pin bitmap of the pin bank.
+ */
+struct eqbr_pin_bank {
+ void __iomem *membase;
+ unsigned int id;
+ unsigned int pin_base;
+ unsigned int nr_pins;
+ u32 aval_pinmap;
+};
+
+/**
+ * struct eqbr_gpio_ctrl: represent a gpio controller.
+ * @node: device node of gpio controller.
+ * @bank: pointer to corresponding pin bank.
+ * @membase: base address of the gpio controller.
+ * @chip: gpio chip.
+ * @ic: irq chip.
+ * @name: gpio chip name.
+ * @virq: irq number of the gpio chip to parent's irq domain.
+ * @lock: spin lock to protect gpio register write.
+ */
+struct eqbr_gpio_ctrl {
+ struct device_node *node;
+ struct eqbr_pin_bank *bank;
+ void __iomem *membase;
+ struct gpio_chip chip;
+ struct irq_chip ic;
+ const char *name;
+ unsigned int virq;
+ raw_spinlock_t lock; /* protect gpio register */
+};
+
+/**
+ * struct eqbr_pinctrl_drv_data:
+ * @dev: device instance representing the controller.
+ * @pctl_desc: pin controller descriptor.
+ * @pctl_dev: pin control class device
+ * @membase: base address of pin controller
+ * @pin_banks: list of pin banks of the driver.
+ * @nr_banks: number of pin banks.
+ * @gpio_ctrls: list of gpio controllers.
+ * @nr_gpio_ctrls: number of gpio controllers.
+ * @lock: protect pinctrl register write
+ */
+struct eqbr_pinctrl_drv_data {
+ struct device *dev;
+ struct pinctrl_desc pctl_desc;
+ struct pinctrl_dev *pctl_dev;
+ void __iomem *membase;
+ struct eqbr_pin_bank *pin_banks;
+ unsigned int nr_banks;
+ struct eqbr_gpio_ctrl *gpio_ctrls;
+ unsigned int nr_gpio_ctrls;
+ raw_spinlock_t lock; /* protect pinpad register */
+};
+
+#endif /* __PINCTRL_EQUILIBRIUM_H */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6e2683016c1f..24e0e2ef47a4 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -686,6 +686,7 @@ static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
+static int jz4770_otg_pins[] = { 0x8a, };
static int jz4770_uart0_data_funcs[] = { 0, 0, };
static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
@@ -744,6 +745,7 @@ static int jz4770_pwm_pwm6_funcs[] = { 0, };
static int jz4770_pwm_pwm7_funcs[] = { 0, };
static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
static int jz4770_mac_mii_funcs[] = { 0, 0, };
+static int jz4770_otg_funcs[] = { 0, };
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -799,6 +801,7 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii),
INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii),
+ INGENIC_PIN_GROUP("otg-vbus", jz4770_otg),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -841,6 +844,7 @@ static const char *jz4770_pwm5_groups[] = { "pwm5", };
static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
+static const char *jz4770_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4770_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -871,6 +875,7 @@ static const struct function_desc jz4770_functions[] = {
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
{ "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
+ { "otg", jz4770_otg_groups, ARRAY_SIZE(jz4770_otg_groups), },
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -1801,19 +1806,30 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
}
+static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool high)
+{
+ if (jzpc->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+}
+
static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- unsigned int cfg;
+ unsigned int cfg, arg;
+ int ret;
for (cfg = 0; cfg < num_configs; cfg++) {
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_OUTPUT:
continue;
default:
return -ENOTSUPP;
@@ -1821,6 +1837,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
}
for (cfg = 0; cfg < num_configs; cfg++) {
+ arg = pinconf_to_config_argument(configs[cfg]);
+
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
@@ -1844,6 +1862,14 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_bias(jzpc, pin, true);
break;
+ case PIN_CONFIG_OUTPUT:
+ ret = pinctrl_gpio_direction_output(pin);
+ if (ret)
+ return ret;
+
+ ingenic_set_output_level(jzpc, pin, arg);
+ break;
+
default:
unreachable();
}
@@ -1940,6 +1966,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
{
struct ingenic_gpio_chip *jzgc;
struct device *dev = jzpc->dev;
+ struct gpio_irq_chip *girq;
unsigned int bank;
int err;
@@ -1982,10 +2009,6 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.free = gpiochip_generic_free;
}
- err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
- if (err)
- return err;
-
jzgc->irq = irq_of_parse_and_map(node, 0);
if (!jzgc->irq)
return -EINVAL;
@@ -2000,13 +2023,22 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
- handle_level_irq, IRQ_TYPE_NONE);
+ girq = &jzgc->gc.irq;
+ girq->chip = &jzgc->irq_chip;
+ girq->parent_handler = ingenic_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = jzgc->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
if (err)
return err;
- gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
- jzgc->irq, ingenic_gpio_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 06be55dab341..e4677546aec4 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1324,15 +1324,13 @@ static int lpc18xx_create_group_func_map(struct device *dev,
static int lpc18xx_scu_probe(struct platform_device *pdev)
{
struct lpc18xx_scu_data *scu;
- struct resource *res;
int ret;
scu = devm_kzalloc(&pdev->dev, sizeof(*scu), GFP_KERNEL);
if (!scu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scu->base = devm_ioremap_resource(&pdev->dev, res);
+ scu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scu->base))
return PTR_ERR(scu->base);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index fb76fb2e9ea5..eb3dd0d46d6c 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -736,6 +736,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
struct ocelot_pinctrl *info)
{
struct gpio_chip *gc;
+ struct gpio_irq_chip *girq;
int ret, irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -747,22 +748,26 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc->of_node = info->dev->of_node;
gc->label = "ocelot-gpio";
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
-
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq <= 0)
return irq;
- ret = gpiochip_irqchip_add(gc, &ocelot_irqchip, 0, handle_edge_irq,
- IRQ_TYPE_NONE);
+ girq = &gc->irq;
+ girq->chip = &ocelot_irqchip;
+ girq->parent_handler = ocelot_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
- gpiochip_set_chained_irqchip(gc, &ocelot_irqchip, irq,
- ocelot_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 55488ca246f1..674b7b5919df 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1196,7 +1196,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
struct oxnas_gpio_bank *bank;
unsigned int id, ngpios;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_parse_phandle_with_fixed_args(np, "gpio-ranges",
3, 0, &pinspec)) {
@@ -1219,8 +1219,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank = &oxnas_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -1232,6 +1231,18 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
bank->gpio_chip.ngpio = ngpios;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = oxnas_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
@@ -1239,18 +1250,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, oxnas_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index e7f6dd5ab578..e5d6d3f9753e 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2202,7 +2202,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
struct pic32_gpio_bank *bank;
u32 id;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_property_read_u32(np, "microchip,gpio-bank", &id)) {
dev_err(&pdev->dev, "microchip,gpio-bank property not found\n");
@@ -2216,8 +2216,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank = &pic32_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -2240,25 +2239,23 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pic32_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->parents[0] = irq;
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
id, ret);
return ret;
}
-
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pic32_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 379e9a6a6d89..fa370c171cad 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1352,6 +1352,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
for (i = 0; i < pctl->nbanks; i++) {
char child_name[sizeof("gpioXX")];
struct device_node *child;
+ struct gpio_irq_chip *girq;
snprintf(child_name, sizeof(child_name), "gpio%d", i);
child = of_get_child_by_name(node, child_name);
@@ -1383,23 +1384,28 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
bank->gpio_chip.parent = pctl->dev;
bank->gpio_chip.of_node = child;
- ret = gpiochip_add_data(&bank->gpio_chip, bank);
- if (ret < 0) {
- dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
- i, ret);
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pistachio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
goto err;
}
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
- dev_err(pctl->dev, "Failed to add IRQ chip %u: %d\n",
+ dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
i, ret);
- gpiochip_remove(&bank->gpio_chip);
goto err;
}
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pistachio_gpio_irq_handler);
ret = gpiochip_add_pin_range(&bank->gpio_chip,
dev_name(pctl->dev), 0,
@@ -1429,7 +1435,6 @@ static const struct of_device_id pistachio_pinctrl_of_match[] = {
static int pistachio_pinctrl_probe(struct platform_device *pdev)
{
struct pistachio_pinctrl *pctl;
- struct resource *res;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1437,8 +1442,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
pctl->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, pctl);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->base))
return PTR_ERR(pctl->base);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index dc0bbf198cbc..fc9a2a9959d9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -58,6 +58,7 @@ enum rockchip_pinctrl_type {
RK3128,
RK3188,
RK3288,
+ RK3308,
RK3368,
RK3399,
};
@@ -70,6 +71,7 @@ enum rockchip_pinctrl_type {
#define IOMUX_SOURCE_PMU BIT(2)
#define IOMUX_UNROUTED BIT(3)
#define IOMUX_WIDTH_3BIT BIT(4)
+#define IOMUX_WIDTH_2BIT BIT(5)
/**
* @type: iomux variant using IOMUX_* constants
@@ -656,6 +658,100 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
},
};
+static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+ {
+ .num = 1,
+ .pin = 14,
+ .reg = 0x28,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 15,
+ .reg = 0x2c,
+ .bit = 0,
+ .mask = 0x3
+ }, {
+ .num = 1,
+ .pin = 18,
+ .reg = 0x30,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 19,
+ .reg = 0x30,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 20,
+ .reg = 0x30,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 21,
+ .reg = 0x34,
+ .bit = 0,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 22,
+ .reg = 0x34,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 23,
+ .reg = 0x34,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 12,
+ .reg = 0x68,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 13,
+ .reg = 0x68,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 2,
+ .pin = 2,
+ .reg = 0x608,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 3,
+ .reg = 0x608,
+ .bit = 4,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 16,
+ .reg = 0x610,
+ .bit = 8,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 10,
+ .reg = 0x610,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 11,
+ .reg = 0x610,
+ .bit = 4,
+ .mask = 0x7
+ },
+};
+
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
.num = 2,
@@ -982,6 +1078,192 @@ static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ {
+ /* rtc_clk */
+ .bank_num = 0,
+ .pin = 19,
+ .func = 1,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 0) | BIT(0),
+ }, {
+ /* uart2_rxm0 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3),
+ }, {
+ /* uart2_rxm1 */
+ .bank_num = 4,
+ .pin = 26,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2),
+ }, {
+ /* i2c3_sdam0 */
+ .bank_num = 0,
+ .pin = 15,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9),
+ }, {
+ /* i2c3_sdam1 */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8),
+ }, {
+ /* i2c3_sdam2 */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 3,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9),
+ }, {
+ /* i2s-8ch-1-sclktxm0 */
+ .bank_num = 1,
+ .pin = 3,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclkrxm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclktxm1 */
+ .bank_num = 1,
+ .pin = 13,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* i2s-8ch-1-sclkrxm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* pdm-clkm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* pdm-clkm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 4,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* pdm-clkm2 */
+ .bank_num = 2,
+ .pin = 6,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* pdm-clkm-m2 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x600,
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* spi1_miso */
+ .bank_num = 3,
+ .pin = 10,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9),
+ }, {
+ /* spi1_miso_m1 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9) | BIT(9),
+ }, {
+ /* owire_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11),
+ }, {
+ /* owire_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 7,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
+ }, {
+ /* owire_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
+ }, {
+ /* can_rxd_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* can_rxd_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* can_rxd_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* mac_rxd0_m0 */
+ .bank_num = 1,
+ .pin = 20,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14),
+ }, {
+ /* mac_rxd0_m1 */
+ .bank_num = 4,
+ .pin = 2,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14) | BIT(14),
+ }, {
+ /* uart3_rx */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15),
+ }, {
+ /* uart3_rx_m1 */
+ .bank_num = 0,
+ .pin = 17,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15) | BIT(15),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -1475,6 +1757,26 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3308_SCHMITT_PINS_PER_REG 8
+#define RK3308_SCHMITT_BANK_STRIDE 16
+#define RK3308_SCHMITT_GRF_OFFSET 0x1a0
+
+static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_SCHMITT_GRF_OFFSET;
+
+ *reg += bank->bank_num * RK3308_SCHMITT_BANK_STRIDE;
+ *reg += ((pin_num / RK3308_SCHMITT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3308_SCHMITT_PINS_PER_REG;
+
+ return 0;
+}
+
#define RK2928_PULL_OFFSET 0x118
#define RK2928_PULL_PINS_PER_REG 16
#define RK2928_PULL_BANK_STRIDE 8
@@ -1646,6 +1948,40 @@ static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit *= RK3288_DRV_BITS_PER_PIN;
}
+#define RK3308_PULL_OFFSET 0xa0
+
+static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_PULL_OFFSET;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+}
+
+#define RK3308_DRV_GRF_OFFSET 0x100
+
+static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_DRV_GRF_OFFSET;
+ *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3288_DRV_PINS_PER_REG);
+ *bit *= RK3288_DRV_BITS_PER_PIN;
+}
+
#define RK3368_PULL_GRF_OFFSET 0x100
#define RK3368_PULL_PMU_OFFSET 0x10
@@ -1986,6 +2322,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2030,6 +2367,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2293,6 +2631,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
@@ -3303,7 +3642,8 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
* 4bit iomux'es are spread over two registers.
*/
inc = (iom->type & (IOMUX_WIDTH_4BIT |
- IOMUX_WIDTH_3BIT)) ? 8 : 4;
+ IOMUX_WIDTH_3BIT |
+ IOMUX_WIDTH_2BIT)) ? 8 : 4;
if (iom->type & IOMUX_SOURCE_PMU)
pmu_offs += inc;
else
@@ -3709,6 +4049,44 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3308_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+};
+
+static struct rockchip_pin_ctrl rk3308_pin_ctrl = {
+ .pin_banks = rk3308_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3308_pin_banks),
+ .label = "RK3308-GPIO",
+ .type = RK3308,
+ .grf_mux_offset = 0x0,
+ .iomux_recalced = rk3308_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rk3308_mux_recalced_data),
+ .iomux_routes = rk3308_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3308_mux_route_data),
+ .pull_calc_reg = rk3308_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3308_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3308_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
@@ -3849,6 +4227,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3228_pin_ctrl },
{ .compatible = "rockchip,rk3288-pinctrl",
.data = &rk3288_pin_ctrl },
+ { .compatible = "rockchip,rk3308-pinctrl",
+ .data = &rk3308_pin_ctrl },
{ .compatible = "rockchip,rk3328-pinctrl",
.data = &rk3328_pin_ctrl },
{ .compatible = "rockchip,rk3368-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 017fc6b3e27e..215db220d795 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -617,12 +617,6 @@ static void rza1_pin_reset(struct rza1_port *port, unsigned int pin)
spin_unlock_irqrestore(&port->lock, irqflags);
}
-static inline int rza1_pin_get_direction(struct rza1_port *port,
- unsigned int pin)
-{
- return !!rza1_get_bit(port, RZA1_PM_REG, pin);
-}
-
/**
* rza1_pin_set_direction() - set I/O direction on a pin in port mode
*
@@ -783,7 +777,7 @@ static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct rza1_port *port = gpiochip_get_data(chip);
- return rza1_pin_get_direction(port, gpio);
+ return !!rza1_get_bit(port, RZA1_PM_REG, gpio);
}
static int rza1_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index 3be1d833bf25..a205964e839b 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -213,8 +213,8 @@ static const char * const rza2_gpio_names[] = {
"PC_0", "PC_1", "PC_2", "PC_3", "PC_4", "PC_5", "PC_6", "PC_7",
"PD_0", "PD_1", "PD_2", "PD_3", "PD_4", "PD_5", "PD_6", "PD_7",
"PE_0", "PE_1", "PE_2", "PE_3", "PE_4", "PE_5", "PE_6", "PE_7",
- "PF_0", "PF_1", "PF_2", "PF_3", "P0_4", "PF_5", "PF_6", "PF_7",
- "PG_0", "PG_1", "PG_2", "P0_3", "PG_4", "PG_5", "PG_6", "PG_7",
+ "PF_0", "PF_1", "PF_2", "PF_3", "PF_4", "PF_5", "PF_6", "PF_7",
+ "PG_0", "PG_1", "PG_2", "PG_3", "PG_4", "PG_5", "PG_6", "PG_7",
"PH_0", "PH_1", "PH_2", "PH_3", "PH_4", "PH_5", "PH_6", "PH_7",
/* port I does not exist */
"PJ_0", "PJ_1", "PJ_2", "PJ_3", "PJ_4", "PJ_5", "PJ_6", "PJ_7",
@@ -462,7 +462,6 @@ static const struct pinmux_ops rza2_pinmux_ops = {
static int rza2_pinctrl_probe(struct platform_device *pdev)
{
struct rza2_pinctrl_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -471,8 +470,7 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/pinctrl/pinctrl-rzn1.c b/drivers/pinctrl/pinctrl-rzn1.c
index 0f6f8a10a53a..39538d40dbf3 100644
--- a/drivers/pinctrl/pinctrl-rzn1.c
+++ b/drivers/pinctrl/pinctrl-rzn1.c
@@ -487,7 +487,7 @@ static int rzn1_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- const u32 reg_drive[4] = { 4, 6, 8, 12 };
+ static const u32 reg_drive[4] = { 4, 6, 8, 12 };
u32 pull, drive, l1mux;
u32 l1, l2, arg = 0;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 00db8b9efb2c..4f39a7945d01 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1477,7 +1477,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
struct device *dev = info->dev;
int bank_num = of_alias_get_id(np, "gpio");
struct resource res, irq_res;
- int gpio_irq = 0, err;
+ int err;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
@@ -1500,12 +1500,6 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
range->pin_base = range->base = range->id * ST_GPIO_PINS_PER_BANK;
range->npins = bank->gpio_chip.ngpio;
range->gc = &bank->gpio_chip;
- err = gpiochip_add_data(&bank->gpio_chip, bank);
- if (err) {
- dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
- return err;
- }
- dev_info(dev, "%s bank added.\n", range->name);
/**
* GPIO bank can have one of the two possible types of
@@ -1527,23 +1521,40 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
*/
if (of_irq_to_resource(np, 0, &irq_res) > 0) {
- gpio_irq = irq_res.start;
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &st_gpio_irqchip,
- gpio_irq, st_gpio_irq_handler);
- }
+ struct gpio_irq_chip *girq;
+ int gpio_irq = irq_res.start;
- if (info->irqmux_base || gpio_irq > 0) {
- err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
- 0, handle_simple_irq,
- IRQ_TYPE_NONE);
- if (err) {
- gpiochip_remove(&bank->gpio_chip);
- dev_info(dev, "could not add irqchip\n");
- return err;
+ /* This is not a valid IRQ */
+ if (gpio_irq <= 0) {
+ dev_err(dev, "invalid IRQ for %pOF bank\n", np);
+ goto skip_irq;
}
- } else {
- dev_info(dev, "No IRQ support for %pOF bank\n", np);
+ /* We need to have a mux as well */
+ if (!info->irqmux_base) {
+ dev_err(dev, "no irqmux for %pOF bank\n", np);
+ goto skip_irq;
+ }
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &st_gpio_irqchip;
+ girq->parent_handler = st_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = gpio_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
+
+skip_irq:
+ err = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (err) {
+ dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
+ return err;
}
+ dev_info(dev, "%s bank added.\n", range->name);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index ccdf0bb21414..16723797fa7c 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -505,6 +505,25 @@ static void stmfx_pinctrl_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&pctl->lock);
}
+static int stmfx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = stmfx_gpio_direction_input(gpio_chip, data->hwirq);
+ if (ret)
+ return ret;
+
+ return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+}
+
+static void stmfx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+
+ return gpiochip_relres_irq(gpio_chip, data->hwirq);
+}
+
static void stmfx_pinctrl_irq_toggle_trigger(struct stmfx_pinctrl *pctl,
unsigned int offset)
{
@@ -664,6 +683,8 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
+ pctl->irq_chip.irq_request_resources = stmfx_gpio_irq_request_resources;
+ pctl->irq_chip.irq_release_resources = stmfx_gpio_irq_release_resources;
ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
0, handle_bad_irq, IRQ_TYPE_NONE);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 1f64e2e7efd9..ab49bd708969 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -747,7 +747,6 @@ static struct pinctrl_desc tb10x_pindesc = {
static int tb10x_pinctrl_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *child;
@@ -768,8 +767,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, state);
mutex_init(&state->mutex);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->base = devm_ioremap_resource(dev, mem);
+ state->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->base)) {
ret = PTR_ERR(state->base);
goto fail;
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 348423bb39dd..cc306448259e 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1055,7 +1055,6 @@ static struct pinctrl_desc u300_pmx_desc = {
static int u300_pmx_probe(struct platform_device *pdev)
{
struct u300_pmx *upmx;
- struct resource *res;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1064,8 +1063,7 @@ static int u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- upmx->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ upmx->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(upmx->virtbase))
return PTR_ERR(upmx->virtbase);
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 913d38f29b73..5e3f31b55eb7 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1705,12 +1705,10 @@ static int pinmux_xway_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct pinctrl_xway_soc *xway_soc;
- struct resource *res;
int ret, i;
/* get and remap our register range */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
+ xway_info.membase[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xway_info.membase[0]))
return PTR_ERR(xway_info.membase[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa25x.c b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
index 8d1247078ae5..95640698422f 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa25x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
@@ -216,25 +216,20 @@ static int pxa25x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa27x.c b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
index 64943e819af6..48ccfb50b23e 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa27x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
@@ -508,25 +508,20 @@ static int pxa27x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 32fc2458b8eb..811af2f81c39 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -90,6 +90,16 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8976
+ tristate "Qualcomm 8976 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8976 platform.
+ The Qualcomm MSM8956, APQ8056, APQ8076 platforms are also
+ supported by this driver.
+
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on GPIOLIB && OF
@@ -132,32 +142,33 @@ config PINCTRL_QDF2XXX
Qualcomm Technologies QDF2xxx SOCs.
config PINCTRL_QCOM_SPMI_PMIC
- tristate "Qualcomm SPMI PMIC pin controller driver"
- depends on GPIOLIB && OF && SPMI
- select REGMAP_SPMI
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SPMI for communication with SoC. Example PMIC's
- devices are pm8841, pm8941 and pma8084.
+ tristate "Qualcomm SPMI PMIC pin controller driver"
+ depends on GPIOLIB && OF && SPMI
+ select REGMAP_SPMI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SPMI for communication with SoC. Example PMIC's
+ devices are pm8841, pm8941 and pma8084.
config PINCTRL_QCOM_SSBI_PMIC
- tristate "Qualcomm SSBI PMIC pin controller driver"
- depends on GPIOLIB && OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SSBI for communication with SoC. Example PMIC's
- devices are pm8058 and pm8921.
+ tristate "Qualcomm SSBI PMIC pin controller driver"
+ depends on GPIOLIB && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SSBI for communication with SoC. Example PMIC's
+ devices are pm8058 and pm8921.
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
@@ -169,30 +180,30 @@ config PINCTRL_SC7180
Technologies Inc SC7180 platform.
config PINCTRL_SDM660
- tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM660 platform.
+ tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM660 platform.
config PINCTRL_SDM845
- tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
- depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM845 platform.
+ tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
+ depends on GPIOLIB && (OF || ACPI)
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM845 platform.
config PINCTRL_SM8150
- tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SM8150 platform.
+ tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8150 platform.
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index f8bb0c265381..c2c2f9ad6827 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 763da0be10d6..62fcae9f05ae 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1150,8 +1150,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->regs[i]);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
new file mode 100644
index 000000000000..e1259ce27396
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2016, AngeloGioacchino Del Regno <kholk11@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8976_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "SDC1_CLK"),
+ PINCTRL_PIN(146, "SDC1_CMD"),
+ PINCTRL_PIN(147, "SDC1_DATA"),
+ PINCTRL_PIN(148, "SDC1_RCLK"),
+ PINCTRL_PIN(149, "SDC2_CLK"),
+ PINCTRL_PIN(150, "SDC2_CMD"),
+ PINCTRL_PIN(151, "SDC2_DATA"),
+ PINCTRL_PIN(152, "QDSD_CLK"),
+ PINCTRL_PIN(153, "QDSD_CMD"),
+ PINCTRL_PIN(154, "QDSD_DATA0"),
+ PINCTRL_PIN(155, "QDSD_DATA1"),
+ PINCTRL_PIN(156, "QDSD_DATA2"),
+ PINCTRL_PIN(157, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+
+static const unsigned int sdc1_clk_pins[] = { 145 };
+static const unsigned int sdc1_cmd_pins[] = { 146 };
+static const unsigned int sdc1_data_pins[] = { 147 };
+static const unsigned int sdc1_rclk_pins[] = { 148 };
+static const unsigned int sdc2_clk_pins[] = { 149 };
+static const unsigned int sdc2_cmd_pins[] = { 150 };
+static const unsigned int sdc2_data_pins[] = { 151 };
+static const unsigned int qdsd_clk_pins[] = { 152 };
+static const unsigned int qdsd_cmd_pins[] = { 153 };
+static const unsigned int qdsd_data0_pins[] = { 154 };
+static const unsigned int qdsd_data1_pins[] = { 155 };
+static const unsigned int qdsd_data2_pins[] = { 156 };
+static const unsigned int qdsd_data3_pins[] = { 157 };
+
+enum msm8976_functions {
+ msm_mux_gpio,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_spi1,
+ msm_mux_smb_int,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_i2c2,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_blsp_spi3,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_blsp_spi4,
+ msm_mux_cap_int,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_uart5,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_m_voc,
+ msm_mux_blsp_i2c5,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart6,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_blsp_i2c6,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_mdp_vsync,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_cam_mclk,
+ msm_mux_cci0_i2c,
+ msm_mux_cci1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp3_spi,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_uim_batt,
+ msm_mux_sd_write,
+ msm_mux_uim1_data,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_present,
+ msm_mux_uim2_data,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_reset,
+ msm_mux_uim2_present,
+ msm_mux_ts_xvdd,
+ msm_mux_mipi_dsi0,
+ msm_mux_us_euro,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_pri_mi2s,
+ msm_mux_codec_reset,
+ msm_mux_cdc_pdm0,
+ msm_mux_us_emitter,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_mclk_c,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_wcss_bt,
+ msm_mux_sdc3,
+ msm_mux_wcss_wlan2,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_fm,
+ msm_mux_key_volp,
+ msm_mux_key_snapshot,
+ msm_mux_key_focus,
+ msm_mux_key_home,
+ msm_mux_pwr_down,
+ msm_mux_dmic0_clk,
+ msm_mux_hdmi_int,
+ msm_mux_dmic0_data,
+ msm_mux_wsa_vi,
+ msm_mux_wsa_en,
+ msm_mux_blsp_spi8,
+ msm_mux_wsa_irq,
+ msm_mux_blsp_i2c8,
+ msm_mux_pa_indicator,
+ msm_mux_modem_tsync,
+ msm_mux_ssbi_wtr1,
+ msm_mux_gsm1_tx,
+ msm_mux_gsm0_tx,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_ss_switch,
+ msm_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const smb_int_groups[] = {
+ "gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio105",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30",
+ "gpio31", "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio38",
+ "gpio116", "gpio126", "gpio128", "gpio129",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio12",
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const cap_int_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio46",
+};
+const char * const m_voc_groups[] = {
+ "gpio123", "gpio124",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio136", "gpio137",
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio45",
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio47", "gpio48", "gpio62", "gpio69", "gpio120",
+ "gpio121", "gpio130", "gpio131",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio5",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio126",
+};
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio62",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+static const char * const cci0_i2c_groups[] = {
+ "gpio30", "gpio29",
+};
+static const char * const cci1_i2c_groups[] = {
+ "gpio104", "gpio103",
+};
+static const char * const blsp1_spi_groups[] = {
+ "gpio101",
+};
+static const char * const blsp3_spi_groups[] = {
+ "gpio106", "gpio107",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio49",
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio50",
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio51",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio61",
+};
+static const char * const sd_write_groups[] = {
+ "gpio50",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio51",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio52",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio53",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio54",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio55",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio56",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio57",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio58",
+};
+static const char * const ts_xvdd_groups[] = {
+ "gpio60",
+};
+static const char * const mipi_dsi0_groups[] = {
+ "gpio61",
+};
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio127",
+};
+static const char * const codec_reset_groups[] = {
+ "gpio67",
+};
+static const char * const cdc_pdm0_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+};
+static const char * const us_emitter_groups[] = {
+ "gpio68",
+};
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio62",
+};
+static const char * const pri_mi2s_mclk_c_groups[] = {
+ "gpio116",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio117",
+};
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio118",
+};
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio119",
+};
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+static const char * const wcss_bt_groups[] = {
+ "gpio39", "gpio47", "gpio88",
+};
+static const char * const sdc3_groups[] = {
+ "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44",
+};
+static const char * const wcss_wlan2_groups[] = {
+ "gpio40",
+};
+static const char * const wcss_wlan1_groups[] = {
+ "gpio41",
+};
+static const char * const wcss_wlan0_groups[] = {
+ "gpio42",
+};
+static const char * const wcss_wlan_groups[] = {
+ "gpio43", "gpio44",
+};
+static const char * const wcss_fm_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const key_volp_groups[] = {
+ "gpio85",
+};
+static const char * const key_snapshot_groups[] = {
+ "gpio86",
+};
+static const char * const key_focus_groups[] = {
+ "gpio87",
+};
+static const char * const key_home_groups[] = {
+ "gpio88",
+};
+static const char * const pwr_down_groups[] = {
+ "gpio89",
+};
+static const char * const dmic0_clk_groups[] = {
+ "gpio66",
+};
+static const char * const hdmi_int_groups[] = {
+ "gpio90",
+};
+static const char * const dmic0_data_groups[] = {
+ "gpio67",
+};
+static const char * const wsa_vi_groups[] = {
+ "gpio108", "gpio109",
+};
+static const char * const wsa_en_groups[] = {
+ "gpio96",
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const wsa_irq_groups[] = {
+ "gpio97",
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio92",
+};
+static const char * const modem_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const nav_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio79", "gpio94",
+};
+static const char * const gsm1_tx_groups[] = {
+ "gpio95",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio99",
+};
+static const char * const sdcard_det_groups[] = {
+ "gpio133",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio102", "gpio105", "gpio134", "gpio135",
+};
+
+static const char * const ss_switch_groups[] = {
+ "gpio139",
+};
+
+static const struct msm_function msm8976_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(blsp_spi1),
+ FUNCTION(smb_int),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(blsp_spi3),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(blsp_spi4),
+ FUNCTION(cap_int),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uart5),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(m_voc),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart6),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(mdp_vsync),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(sec_mi2s_mclk_a),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci0_i2c),
+ FUNCTION(cci1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp3_spi),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(uim_batt),
+ FUNCTION(sd_write),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_present),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim2_present),
+ FUNCTION(ts_xvdd),
+ FUNCTION(mipi_dsi0),
+ FUNCTION(us_euro),
+ FUNCTION(ts_resout),
+ FUNCTION(ts_sample),
+ FUNCTION(sec_mi2s_mclk_b),
+ FUNCTION(pri_mi2s),
+ FUNCTION(codec_reset),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(us_emitter),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_mclk_c),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(lpass_slimbus0),
+ FUNCTION(lpass_slimbus1),
+ FUNCTION(codec_int1),
+ FUNCTION(codec_int2),
+ FUNCTION(wcss_bt),
+ FUNCTION(sdc3),
+ FUNCTION(wcss_wlan2),
+ FUNCTION(wcss_wlan1),
+ FUNCTION(wcss_wlan0),
+ FUNCTION(wcss_wlan),
+ FUNCTION(wcss_fm),
+ FUNCTION(key_volp),
+ FUNCTION(key_snapshot),
+ FUNCTION(key_focus),
+ FUNCTION(key_home),
+ FUNCTION(pwr_down),
+ FUNCTION(dmic0_clk),
+ FUNCTION(hdmi_int),
+ FUNCTION(dmic0_data),
+ FUNCTION(wsa_vi),
+ FUNCTION(wsa_en),
+ FUNCTION(blsp_spi8),
+ FUNCTION(wsa_irq),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(pa_indicator),
+ FUNCTION(modem_tsync),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(gsm1_tx),
+ FUNCTION(gsm0_tx),
+ FUNCTION(sdcard_det),
+ FUNCTION(sec_mi2s),
+ FUNCTION(ss_switch),
+};
+
+static const struct msm_pingroup msm8976_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_tracectl_b, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_traceclk_b, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, NA, NA, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, NA, blsp_i2c3, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, NA, blsp_i2c3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, blsp_spi4, NA, gcc_gp2_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi4, NA, gcc_gp3_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(27, cam_mclk, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(28, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(29, cci0_i2c, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(30, cci0_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, wcss_fm, NA, NA, qdss_traceclk_a, NA, NA, NA, NA, NA),
+ PINGROUP(47, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, gcc_gp1_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, sd_write, gcc_gp2_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, uim2_data, gcc_gp3_clk_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, sec_mi2s_mclk_a, pri_mi2s_mclk_b, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, dmic0_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, dmic0_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, modem_tsync, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, gsm0_tx, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, blsp1_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, sec_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, pri_mi2s_mclk_c, cdc_pdm0, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(117, lpass_slimbus, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, lpass_slimbus0, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, lpass_slimbus1, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(121, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(122, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, pri_mi2s_mclk_a, sec_mi2s_mclk_b, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(127, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(129, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8976_pinctrl = {
+ .pins = msm8976_pins,
+ .npins = ARRAY_SIZE(msm8976_pins),
+ .functions = msm8976_functions,
+ .nfunctions = ARRAY_SIZE(msm8976_functions),
+ .groups = msm8976_groups,
+ .ngroups = ARRAY_SIZE(msm8976_groups),
+ .ngpios = 145,
+};
+
+static int msm8976_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8976_pinctrl);
+}
+
+static const struct of_device_id msm8976_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8976-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8976_pinctrl_driver = {
+ .driver = {
+ .name = "msm8976-pinctrl",
+ .of_match_table = msm8976_pinctrl_of_match,
+ },
+ .probe = msm8976_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8976_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8976_pinctrl_driver);
+}
+arch_initcall(msm8976_pinctrl_init);
+
+static void __exit msm8976_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8976_pinctrl_driver);
+}
+module_exit(msm8976_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8976 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8976_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6399c8a2bc22..d6cfad7417b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -77,6 +77,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -102,6 +103,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = 3, \
.drv_bit = 0, \
@@ -1087,14 +1089,14 @@ static const struct msm_pingroup sc7180_groups[] = {
[116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
- [119] = UFS_RESET(ufs_reset, 0x97f000),
- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x97a000, 15, 0),
- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x97a000, 13, 6),
- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x97a000, 11, 3),
- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x97a000, 9, 0),
- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x97b000, 14, 6),
- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x97b000, 11, 3),
- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x97b000, 9, 0),
+ [119] = UFS_RESET(ufs_reset, 0x7f000),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x7a000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x7a000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x7a000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x7a000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x7b000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x7b000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index f1fece5b9c06..653d1095bfea 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1108,6 +1108,9 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
+ /* pm8950 has 8 GPIOs with holes on 3 */
+ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
@@ -1121,6 +1124,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 91407b024cf3..48602dba4967 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -915,6 +915,8 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,pm8950-mpp" }, /* 4 MPP's */
+ { .compatible = "qcom,pmi8950-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8994-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,spmi-mpp" }, /* Generic */
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c1f7d0799ebe..dca86886b1f9 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -56,7 +56,6 @@
/**
* struct pm8xxx_pin_data - dynamic configuration for a pin
* @reg: address of the control register
- * @irq: IRQ from the PMIC interrupt controller
* @power_source: logical selected voltage source, mapping in static data
* is used translate to register values
* @mode: operating mode for the pin (input/output)
@@ -72,7 +71,6 @@
*/
struct pm8xxx_pin_data {
unsigned reg;
- int irq;
u8 power_source;
u8 mode;
bool open_drain;
@@ -93,9 +91,6 @@ struct pm8xxx_gpio {
struct pinctrl_desc desc;
unsigned npins;
-
- struct fwnode_handle *fwnode;
- struct irq_domain *domain;
};
static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
@@ -491,13 +486,16 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ int ret, irq;
bool state;
- int ret;
- if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
- ret = pin->output_value;
- } else if (pin->irq >= 0) {
- ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT)
+ return pin->output_value;
+
+ irq = chip->to_irq(chip, offset);
+ if (irq >= 0) {
+ ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+ &state);
if (!ret)
ret = !!state;
} else
@@ -535,37 +533,6 @@ static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
}
-static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
- struct irq_fwspec fwspec;
- int ret;
-
- fwspec.fwnode = pctrl->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-
- ret = irq_create_fwspec_mapping(&fwspec);
-
- /*
- * Cache the IRQ since pm8xxx_gpio_get() needs this to get determine the
- * line level.
- */
- pin->irq = ret;
-
- return ret;
-}
-
-static void pm8xxx_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
-
- pin->irq = -1;
-}
-
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
@@ -624,13 +591,11 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#endif
static const struct gpio_chip pm8xxx_gpio_template = {
- .free = pm8xxx_gpio_free,
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
.set = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
- .to_irq = pm8xxx_gpio_to_irq,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
};
@@ -712,43 +677,24 @@ static int pm8xxx_domain_translate(struct irq_domain *domain,
return 0;
}
-static int pm8xxx_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
+static unsigned int pm8xxx_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
- struct pm8xxx_gpio *pctrl = container_of(domain->host_data,
- struct pm8xxx_gpio, chip);
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq;
- unsigned int type;
- int ret, i;
-
- ret = pm8xxx_domain_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
- &pm8xxx_irq_chip, pctrl, handle_level_irq,
- NULL, NULL);
+ return offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
+}
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = hwirq + 0xc0;
- parent_fwspec.param[1] = fwspec->param[1];
+static int pm8xxx_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = child_hwirq + 0xc0;
+ *parent_type = child_type;
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
+ return 0;
}
-static const struct irq_domain_ops pm8xxx_domain_ops = {
- .activate = gpiochip_irq_domain_activate,
- .alloc = pm8xxx_domain_alloc,
- .deactivate = gpiochip_irq_domain_deactivate,
- .free = irq_domain_free_irqs_common,
- .translate = pm8xxx_domain_translate,
-};
-
static const struct of_device_id pm8xxx_gpio_of_match[] = {
{ .compatible = "qcom,pm8018-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pm8038-gpio", .data = (void *) 12 },
@@ -765,6 +711,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent_domain;
struct device_node *parent_node;
struct pinctrl_pin_desc *pins;
+ struct gpio_irq_chip *girq;
struct pm8xxx_gpio *pctrl;
int ret, i;
@@ -800,7 +747,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
for (i = 0; i < pctrl->desc.npins; i++) {
pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
- pin_data[i].irq = -1;
ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
if (ret)
@@ -841,19 +787,21 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- pctrl->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
- pctrl->domain = irq_domain_create_hierarchy(parent_domain, 0,
- pctrl->chip.ngpio,
- pctrl->fwnode,
- &pm8xxx_domain_ops,
- &pctrl->chip);
- if (!pctrl->domain)
- return -ENODEV;
+ girq = &pctrl->chip.irq;
+ girq->chip = &pm8xxx_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
+ girq->parent_domain = parent_domain;
+ girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
+ girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
+ girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(&pdev->dev, "failed register gpiochip\n");
- goto err_chip_add_data;
+ return ret;
}
/*
@@ -883,8 +831,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
unregister_gpiochip:
gpiochip_remove(&pctrl->chip);
-err_chip_add_data:
- irq_domain_remove(pctrl->domain);
return ret;
}
@@ -894,7 +840,6 @@ static int pm8xxx_gpio_remove(struct platform_device *pdev)
struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- irq_domain_remove(pctrl->domain);
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index ebc27b06718c..0599f5127b01 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -486,8 +486,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
if (match) {
irq_chip = kmemdup(match->data,
sizeof(*irq_chip), GFP_KERNEL);
- if (!irq_chip)
+ if (!irq_chip) {
+ of_node_put(np);
return -ENOMEM;
+ }
wkup_np = np;
break;
}
@@ -504,6 +506,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
+ of_node_put(wkup_np);
return -ENXIO;
}
@@ -518,8 +521,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kcalloc(dev,
bank->nr_pins, sizeof(*weint_data),
GFP_KERNEL);
- if (!weint_data)
+ if (!weint_data) {
+ of_node_put(wkup_np);
return -ENOMEM;
+ }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(bank->of_node, idx);
@@ -536,10 +541,13 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
}
- if (!muxed_banks)
+ if (!muxed_banks) {
+ of_node_put(wkup_np);
return 0;
+ }
irq = irq_of_parse_and_map(wkup_np, 0);
+ of_node_put(wkup_np);
if (!irq) {
dev_err(dev, "irq number for muxed EINTs not found\n");
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 7e824e4d20f4..9bd0a3de101d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -490,8 +490,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
- if (!eint_data)
+ if (!eint_data) {
+ of_node_put(eint_np);
return -ENOMEM;
+ }
eint_data->drvdata = d;
@@ -503,12 +505,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint_np);
return -ENXIO;
}
eint_data->parents[i] = irq;
irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
}
+ of_node_put(eint_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index c399f0932af5..f97f8179f2b1 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -704,8 +704,10 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ of_node_put(eint0_np);
return -ENOMEM;
+ }
data->drvdata = d;
for (i = 0; i < NUM_EINT0_IRQ; ++i) {
@@ -714,6 +716,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint0_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint0_np);
return -ENXIO;
}
@@ -721,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
s3c64xx_eint0_handlers[i],
data);
}
+ of_node_put(eint0_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index de0477bb469d..f26574ef234a 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -272,6 +272,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
+ of_node_put(np);
return ret;
}
}
@@ -785,8 +786,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
@@ -797,8 +800,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
for_each_child_of_node(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(func_np);
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 2dd716b016a3..28d66e7cb098 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -17,6 +17,7 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
select PINCTRL_PFC_R8A77470 if ARCH_R8A77470
select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1
+ select PINCTRL_PFC_R8A774B1 if ARCH_R8A774B1
select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0
select PINCTRL_PFC_R8A7778 if ARCH_R8A7778
select PINCTRL_PFC_R8A7779 if ARCH_R8A7779
@@ -26,7 +27,8 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A7796 if ARCH_R8A7796
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
select PINCTRL_PFC_R8A77980 if ARCH_R8A77980
@@ -86,6 +88,9 @@ config PINCTRL_PFC_R8A77470
config PINCTRL_PFC_R8A774A1
bool "RZ/G2M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774B1
+ bool "RZ/G2N pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A774C0
bool "RZ/G2E pin control support" if COMPILE_TEST
@@ -113,9 +118,12 @@ config PINCTRL_PFC_R8A7794
config PINCTRL_PFC_R8A7795
bool "R-Car H3 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7796
+config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77961
+ bool "R-Car M3-W+ pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A77965
bool "R-Car M3-N pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 8c95abcfcc00..3bc05666e1a6 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
obj-$(CONFIG_PINCTRL_PFC_R8A774A1) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774B1) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A774C0) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
@@ -19,7 +20,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index b8640ad41bef..65e52688f091 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,12 +29,12 @@
static int sh_pfc_map_resources(struct sh_pfc *pfc,
struct platform_device *pdev)
{
- unsigned int num_windows, num_irqs;
struct sh_pfc_window *windows;
unsigned int *irqs = NULL;
+ unsigned int num_windows;
struct resource *res;
unsigned int i;
- int irq;
+ int num_irqs;
/* Count the MEM and IRQ resources. */
for (num_windows = 0;; num_windows++) {
@@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
if (!res)
break;
}
- for (num_irqs = 0;; num_irqs++) {
- irq = platform_get_irq(pdev, num_irqs);
- if (irq == -EPROBE_DEFER)
- return irq;
- if (irq < 0)
- break;
- }
-
if (num_windows == 0)
return -EINVAL;
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0)
+ return num_irqs;
+
/* Allocate memory windows and IRQs arrays. */
windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows),
GFP_KERNEL);
@@ -518,6 +514,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a774a1_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+ {
+ .compatible = "renesas,pfc-r8a774b1",
+ .data = &r8a774b1_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A774C0
{
.compatible = "renesas,pfc-r8a774c0",
@@ -579,10 +581,16 @@ static const struct of_device_id sh_pfc_of_table[] = {
},
#endif /* DEBUG */
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
.compatible = "renesas,pfc-r8a7796",
- .data = &r8a7796_pinmux_info,
+ .data = &r8a77960_pinmux_info,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+ {
+ .compatible = "renesas,pfc-r8a77961",
+ .data = &r8a77961_pinmux_info,
},
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77965
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95f9aae3bfba..ad05da8f6516 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -718,7 +718,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_MSEL(IP1_27_24, A20, I2C_SEL_3_0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 7df010f757b1..d3145aa135d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -726,7 +726,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 61db7c7a35ec..a2496baca85d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7796 processor support - PFC hardware block.
+ * R8A7796 (R-Car M3-W/W+) support - PFC hardware block.
*
* Copyright (C) 2016-2019 Renesas Electronics Corp.
*
@@ -729,7 +729,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -6210,8 +6210,8 @@ const struct sh_pfc_soc_info r8a774a1_pinmux_info = {
};
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
-const struct sh_pfc_soc_info r8a7796_pinmux_info = {
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
+const struct sh_pfc_soc_info r8a77960_pinmux_info = {
.name = "r8a77960_pfc",
.ops = &r8a7796_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
@@ -6236,3 +6236,30 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+const struct sh_pfc_soc_info r8a77961_pinmux_info = {
+ .name = "r8a77961_pfc",
+ .ops = &r8a7796_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 697c77a4ea95..8bdf33c807f6 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -732,7 +732,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -4378,355 +4378,362 @@ static const unsigned int vin5_clk_mux[] = {
VI5_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a_a),
- SH_PFC_PIN_GROUP(audio_clk_a_b),
- SH_PFC_PIN_GROUP(audio_clk_a_c),
- SH_PFC_PIN_GROUP(audio_clk_b_a),
- SH_PFC_PIN_GROUP(audio_clk_b_b),
- SH_PFC_PIN_GROUP(audio_clk_c_a),
- SH_PFC_PIN_GROUP(audio_clk_c_b),
- SH_PFC_PIN_GROUP(audio_clkout_a),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(audio_clkout1_a),
- SH_PFC_PIN_GROUP(audio_clkout1_b),
- SH_PFC_PIN_GROUP(audio_clkout2_a),
- SH_PFC_PIN_GROUP(audio_clkout2_b),
- SH_PFC_PIN_GROUP(audio_clkout3_a),
- SH_PFC_PIN_GROUP(audio_clkout3_b),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
- SH_PFC_PIN_GROUP(avb_avtp_match_b),
- SH_PFC_PIN_GROUP(avb_avtp_capture_b),
- SH_PFC_PIN_GROUP(can0_data_a),
- SH_PFC_PIN_GROUP(can0_data_b),
- SH_PFC_PIN_GROUP(can1_data),
- SH_PFC_PIN_GROUP(can_clk),
- SH_PFC_PIN_GROUP(canfd0_data_a),
- SH_PFC_PIN_GROUP(canfd0_data_b),
- SH_PFC_PIN_GROUP(canfd1_data),
- SH_PFC_PIN_GROUP(drif0_ctrl_a),
- SH_PFC_PIN_GROUP(drif0_data0_a),
- SH_PFC_PIN_GROUP(drif0_data1_a),
- SH_PFC_PIN_GROUP(drif0_ctrl_b),
- SH_PFC_PIN_GROUP(drif0_data0_b),
- SH_PFC_PIN_GROUP(drif0_data1_b),
- SH_PFC_PIN_GROUP(drif0_ctrl_c),
- SH_PFC_PIN_GROUP(drif0_data0_c),
- SH_PFC_PIN_GROUP(drif0_data1_c),
- SH_PFC_PIN_GROUP(drif1_ctrl_a),
- SH_PFC_PIN_GROUP(drif1_data0_a),
- SH_PFC_PIN_GROUP(drif1_data1_a),
- SH_PFC_PIN_GROUP(drif1_ctrl_b),
- SH_PFC_PIN_GROUP(drif1_data0_b),
- SH_PFC_PIN_GROUP(drif1_data1_b),
- SH_PFC_PIN_GROUP(drif1_ctrl_c),
- SH_PFC_PIN_GROUP(drif1_data0_c),
- SH_PFC_PIN_GROUP(drif1_data1_c),
- SH_PFC_PIN_GROUP(drif2_ctrl_a),
- SH_PFC_PIN_GROUP(drif2_data0_a),
- SH_PFC_PIN_GROUP(drif2_data1_a),
- SH_PFC_PIN_GROUP(drif2_ctrl_b),
- SH_PFC_PIN_GROUP(drif2_data0_b),
- SH_PFC_PIN_GROUP(drif2_data1_b),
- SH_PFC_PIN_GROUP(drif3_ctrl_a),
- SH_PFC_PIN_GROUP(drif3_data0_a),
- SH_PFC_PIN_GROUP(drif3_data1_a),
- SH_PFC_PIN_GROUP(drif3_ctrl_b),
- SH_PFC_PIN_GROUP(drif3_data0_b),
- SH_PFC_PIN_GROUP(drif3_data1_b),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync),
- SH_PFC_PIN_GROUP(du_oddf),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_a),
- SH_PFC_PIN_GROUP(hscif1_clk_a),
- SH_PFC_PIN_GROUP(hscif1_ctrl_a),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_a),
- SH_PFC_PIN_GROUP(hscif2_clk_a),
- SH_PFC_PIN_GROUP(hscif2_ctrl_a),
- SH_PFC_PIN_GROUP(hscif2_data_b),
- SH_PFC_PIN_GROUP(hscif2_clk_b),
- SH_PFC_PIN_GROUP(hscif2_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_c),
- SH_PFC_PIN_GROUP(hscif2_clk_c),
- SH_PFC_PIN_GROUP(hscif2_ctrl_c),
- SH_PFC_PIN_GROUP(hscif3_data_a),
- SH_PFC_PIN_GROUP(hscif3_clk),
- SH_PFC_PIN_GROUP(hscif3_ctrl),
- SH_PFC_PIN_GROUP(hscif3_data_b),
- SH_PFC_PIN_GROUP(hscif3_data_c),
- SH_PFC_PIN_GROUP(hscif3_data_d),
- SH_PFC_PIN_GROUP(hscif4_data_a),
- SH_PFC_PIN_GROUP(hscif4_clk),
- SH_PFC_PIN_GROUP(hscif4_ctrl),
- SH_PFC_PIN_GROUP(hscif4_data_b),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c1_a),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c2_a),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(i2c5),
- SH_PFC_PIN_GROUP(i2c6_a),
- SH_PFC_PIN_GROUP(i2c6_b),
- SH_PFC_PIN_GROUP(i2c6_c),
- SH_PFC_PIN_GROUP(intc_ex_irq0),
- SH_PFC_PIN_GROUP(intc_ex_irq1),
- SH_PFC_PIN_GROUP(intc_ex_irq2),
- SH_PFC_PIN_GROUP(intc_ex_irq3),
- SH_PFC_PIN_GROUP(intc_ex_irq4),
- SH_PFC_PIN_GROUP(intc_ex_irq5),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_txd),
- SH_PFC_PIN_GROUP(msiof0_rxd),
- SH_PFC_PIN_GROUP(msiof1_clk_a),
- SH_PFC_PIN_GROUP(msiof1_sync_a),
- SH_PFC_PIN_GROUP(msiof1_ss1_a),
- SH_PFC_PIN_GROUP(msiof1_ss2_a),
- SH_PFC_PIN_GROUP(msiof1_txd_a),
- SH_PFC_PIN_GROUP(msiof1_rxd_a),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_sync_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_txd_b),
- SH_PFC_PIN_GROUP(msiof1_rxd_b),
- SH_PFC_PIN_GROUP(msiof1_clk_c),
- SH_PFC_PIN_GROUP(msiof1_sync_c),
- SH_PFC_PIN_GROUP(msiof1_ss1_c),
- SH_PFC_PIN_GROUP(msiof1_ss2_c),
- SH_PFC_PIN_GROUP(msiof1_txd_c),
- SH_PFC_PIN_GROUP(msiof1_rxd_c),
- SH_PFC_PIN_GROUP(msiof1_clk_d),
- SH_PFC_PIN_GROUP(msiof1_sync_d),
- SH_PFC_PIN_GROUP(msiof1_ss1_d),
- SH_PFC_PIN_GROUP(msiof1_ss2_d),
- SH_PFC_PIN_GROUP(msiof1_txd_d),
- SH_PFC_PIN_GROUP(msiof1_rxd_d),
- SH_PFC_PIN_GROUP(msiof1_clk_e),
- SH_PFC_PIN_GROUP(msiof1_sync_e),
- SH_PFC_PIN_GROUP(msiof1_ss1_e),
- SH_PFC_PIN_GROUP(msiof1_ss2_e),
- SH_PFC_PIN_GROUP(msiof1_txd_e),
- SH_PFC_PIN_GROUP(msiof1_rxd_e),
- SH_PFC_PIN_GROUP(msiof1_clk_f),
- SH_PFC_PIN_GROUP(msiof1_sync_f),
- SH_PFC_PIN_GROUP(msiof1_ss1_f),
- SH_PFC_PIN_GROUP(msiof1_ss2_f),
- SH_PFC_PIN_GROUP(msiof1_txd_f),
- SH_PFC_PIN_GROUP(msiof1_rxd_f),
- SH_PFC_PIN_GROUP(msiof1_clk_g),
- SH_PFC_PIN_GROUP(msiof1_sync_g),
- SH_PFC_PIN_GROUP(msiof1_ss1_g),
- SH_PFC_PIN_GROUP(msiof1_ss2_g),
- SH_PFC_PIN_GROUP(msiof1_txd_g),
- SH_PFC_PIN_GROUP(msiof1_rxd_g),
- SH_PFC_PIN_GROUP(msiof2_clk_a),
- SH_PFC_PIN_GROUP(msiof2_sync_a),
- SH_PFC_PIN_GROUP(msiof2_ss1_a),
- SH_PFC_PIN_GROUP(msiof2_ss2_a),
- SH_PFC_PIN_GROUP(msiof2_txd_a),
- SH_PFC_PIN_GROUP(msiof2_rxd_a),
- SH_PFC_PIN_GROUP(msiof2_clk_b),
- SH_PFC_PIN_GROUP(msiof2_sync_b),
- SH_PFC_PIN_GROUP(msiof2_ss1_b),
- SH_PFC_PIN_GROUP(msiof2_ss2_b),
- SH_PFC_PIN_GROUP(msiof2_txd_b),
- SH_PFC_PIN_GROUP(msiof2_rxd_b),
- SH_PFC_PIN_GROUP(msiof2_clk_c),
- SH_PFC_PIN_GROUP(msiof2_sync_c),
- SH_PFC_PIN_GROUP(msiof2_ss1_c),
- SH_PFC_PIN_GROUP(msiof2_ss2_c),
- SH_PFC_PIN_GROUP(msiof2_txd_c),
- SH_PFC_PIN_GROUP(msiof2_rxd_c),
- SH_PFC_PIN_GROUP(msiof2_clk_d),
- SH_PFC_PIN_GROUP(msiof2_sync_d),
- SH_PFC_PIN_GROUP(msiof2_ss1_d),
- SH_PFC_PIN_GROUP(msiof2_ss2_d),
- SH_PFC_PIN_GROUP(msiof2_txd_d),
- SH_PFC_PIN_GROUP(msiof2_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_a),
- SH_PFC_PIN_GROUP(msiof3_sync_a),
- SH_PFC_PIN_GROUP(msiof3_ss1_a),
- SH_PFC_PIN_GROUP(msiof3_ss2_a),
- SH_PFC_PIN_GROUP(msiof3_txd_a),
- SH_PFC_PIN_GROUP(msiof3_rxd_a),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_ss1_b),
- SH_PFC_PIN_GROUP(msiof3_ss2_b),
- SH_PFC_PIN_GROUP(msiof3_txd_b),
- SH_PFC_PIN_GROUP(msiof3_rxd_b),
- SH_PFC_PIN_GROUP(msiof3_clk_c),
- SH_PFC_PIN_GROUP(msiof3_sync_c),
- SH_PFC_PIN_GROUP(msiof3_txd_c),
- SH_PFC_PIN_GROUP(msiof3_rxd_c),
- SH_PFC_PIN_GROUP(msiof3_clk_d),
- SH_PFC_PIN_GROUP(msiof3_sync_d),
- SH_PFC_PIN_GROUP(msiof3_ss1_d),
- SH_PFC_PIN_GROUP(msiof3_txd_d),
- SH_PFC_PIN_GROUP(msiof3_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_e),
- SH_PFC_PIN_GROUP(msiof3_sync_e),
- SH_PFC_PIN_GROUP(msiof3_ss1_e),
- SH_PFC_PIN_GROUP(msiof3_ss2_e),
- SH_PFC_PIN_GROUP(msiof3_txd_e),
- SH_PFC_PIN_GROUP(msiof3_rxd_e),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm1_a),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2_a),
- SH_PFC_PIN_GROUP(pwm2_b),
- SH_PFC_PIN_GROUP(pwm3_a),
- SH_PFC_PIN_GROUP(pwm3_b),
- SH_PFC_PIN_GROUP(pwm4_a),
- SH_PFC_PIN_GROUP(pwm4_b),
- SH_PFC_PIN_GROUP(pwm5_a),
- SH_PFC_PIN_GROUP(pwm5_b),
- SH_PFC_PIN_GROUP(pwm6_a),
- SH_PFC_PIN_GROUP(pwm6_b),
- SH_PFC_PIN_GROUP(sata0_devslp_a),
- SH_PFC_PIN_GROUP(sata0_devslp_b),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_a),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif2_data_a),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif3_data_a),
- SH_PFC_PIN_GROUP(scif3_clk),
- SH_PFC_PIN_GROUP(scif3_ctrl),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif4_data_a),
- SH_PFC_PIN_GROUP(scif4_clk_a),
- SH_PFC_PIN_GROUP(scif4_ctrl_a),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_clk_b),
- SH_PFC_PIN_GROUP(scif4_ctrl_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif4_clk_c),
- SH_PFC_PIN_GROUP(scif4_ctrl_c),
- SH_PFC_PIN_GROUP(scif5_data_a),
- SH_PFC_PIN_GROUP(scif5_clk_a),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scif5_clk_b),
- SH_PFC_PIN_GROUP(scif_clk_a),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_data8),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd_a),
- SH_PFC_PIN_GROUP(sdhi2_wp_a),
- SH_PFC_PIN_GROUP(sdhi2_cd_b),
- SH_PFC_PIN_GROUP(sdhi2_wp_b),
- SH_PFC_PIN_GROUP(sdhi2_ds),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_data8),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(sdhi3_ds),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi01239_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data_a),
- SH_PFC_PIN_GROUP(ssi1_data_b),
- SH_PFC_PIN_GROUP(ssi1_ctrl_a),
- SH_PFC_PIN_GROUP(ssi1_ctrl_b),
- SH_PFC_PIN_GROUP(ssi2_data_a),
- SH_PFC_PIN_GROUP(ssi2_data_b),
- SH_PFC_PIN_GROUP(ssi2_ctrl_a),
- SH_PFC_PIN_GROUP(ssi2_ctrl_b),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi349_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5_data),
- SH_PFC_PIN_GROUP(ssi5_ctrl),
- SH_PFC_PIN_GROUP(ssi6_data),
- SH_PFC_PIN_GROUP(ssi6_ctrl),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi9_data_a),
- SH_PFC_PIN_GROUP(ssi9_data_b),
- SH_PFC_PIN_GROUP(ssi9_ctrl_a),
- SH_PFC_PIN_GROUP(ssi9_ctrl_b),
- SH_PFC_PIN_GROUP(tmu_tclk1_a),
- SH_PFC_PIN_GROUP(tmu_tclk1_b),
- SH_PFC_PIN_GROUP(tmu_tclk2_a),
- SH_PFC_PIN_GROUP(tmu_tclk2_b),
- SH_PFC_PIN_GROUP(tpu_to0),
- SH_PFC_PIN_GROUP(tpu_to1),
- SH_PFC_PIN_GROUP(tpu_to2),
- SH_PFC_PIN_GROUP(tpu_to3),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb30),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
- SH_PFC_PIN_GROUP(vin4_data18_a),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
- SH_PFC_PIN_GROUP(vin4_data18_b),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
- SH_PFC_PIN_GROUP(vin4_sync),
- SH_PFC_PIN_GROUP(vin4_field),
- SH_PFC_PIN_GROUP(vin4_clkenb),
- SH_PFC_PIN_GROUP(vin4_clk),
- VIN_DATA_PIN_GROUP(vin5_data, 8),
- VIN_DATA_PIN_GROUP(vin5_data, 10),
- VIN_DATA_PIN_GROUP(vin5_data, 12),
- VIN_DATA_PIN_GROUP(vin5_data, 16),
- SH_PFC_PIN_GROUP(vin5_sync),
- SH_PFC_PIN_GROUP(vin5_field),
- SH_PFC_PIN_GROUP(vin5_clkenb),
- SH_PFC_PIN_GROUP(vin5_clk),
+static const struct {
+ struct sh_pfc_pin_group common[318];
+ struct sh_pfc_pin_group automotive[30];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(sata0_devslp_a),
+ SH_PFC_PIN_GROUP(sata0_devslp_b),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_clk_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
+ SH_PFC_PIN_GROUP(tpu_to0),
+ SH_PFC_PIN_GROUP(tpu_to1),
+ SH_PFC_PIN_GROUP(tpu_to2),
+ SH_PFC_PIN_GROUP(tpu_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb30),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
+ SH_PFC_PIN_GROUP(vin4_data18_a),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
+ SH_PFC_PIN_GROUP(vin4_data18_b),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -5241,62 +5248,69 @@ static const char * const vin5_groups[] = {
"vin5_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(can0),
- SH_PFC_FUNCTION(can1),
- SH_PFC_FUNCTION(can_clk),
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
- SH_PFC_FUNCTION(drif0),
- SH_PFC_FUNCTION(drif1),
- SH_PFC_FUNCTION(drif2),
- SH_PFC_FUNCTION(drif3),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(hscif2),
- SH_PFC_FUNCTION(hscif3),
- SH_PFC_FUNCTION(hscif4),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(i2c5),
- SH_PFC_FUNCTION(i2c6),
- SH_PFC_FUNCTION(intc_ex),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(sata0),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tmu),
- SH_PFC_FUNCTION(tpu),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb30),
- SH_PFC_FUNCTION(vin4),
- SH_PFC_FUNCTION(vin5),
+static const struct {
+ struct sh_pfc_function common[51];
+ struct sh_pfc_function automotive[4];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(sata0),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
+ SH_PFC_FUNCTION(tpu),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6425,6 +6439,32 @@ static const struct sh_pfc_soc_operations r8a77965_pinmux_ops = {
.set_bias = r8a77965_pinmux_set_bias,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+const struct sh_pfc_soc_info r8a774b1_pinmux_info = {
+ .name = "r8a774b1_pfc",
+ .ops = &r8a77965_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.name = "r8a77965_pfc",
.ops = &r8a77965_pinmux_ops,
@@ -6434,10 +6474,12 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
@@ -6447,3 +6489,4 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index 2dfb8d9cfda1..c926a59dd21c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -232,8 +232,8 @@
#define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -448,6 +448,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
+#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
@@ -468,7 +470,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define PINMUX_MOD_SELS \
\
-MOD_SEL0_30_29 \
+ MOD_SEL1_31 \
+MOD_SEL0_30_29 MOD_SEL1_30 \
MOD_SEL1_29 \
MOD_SEL0_28 MOD_SEL1_28 \
MOD_SEL0_27_26 \
@@ -634,7 +637,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N),
PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A),
+ PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH),
PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N),
PINMUX_IPSR_GPSR(IP2_23_20, TX5_B),
PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2),
@@ -642,7 +645,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0),
PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A),
+ PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE),
PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N),
PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0),
@@ -1058,7 +1061,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B),
+ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
@@ -1067,7 +1070,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
/* IPSR11 */
@@ -1085,13 +1088,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
- PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
- PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
@@ -1196,7 +1199,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A),
+ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
@@ -1264,7 +1267,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
- PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B),
+ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
@@ -1524,22 +1527,22 @@ static const unsigned int avb_avtp_pps_mux[] = {
AVB_AVTP_PPS_MARK,
};
-static const unsigned int avb_avtp_match_a_pins[] = {
- /* AVB_AVTP_MATCH_A */
+static const unsigned int avb_avtp_match_pins[] = {
+ /* AVB_AVTP_MATCH */
RCAR_GP_PIN(2, 24),
};
-static const unsigned int avb_avtp_match_a_mux[] = {
- AVB_AVTP_MATCH_A_MARK,
+static const unsigned int avb_avtp_match_mux[] = {
+ AVB_AVTP_MATCH_MARK,
};
-static const unsigned int avb_avtp_capture_a_pins[] = {
- /* AVB_AVTP_CAPTURE_A */
+static const unsigned int avb_avtp_capture_pins[] = {
+ /* AVB_AVTP_CAPTURE */
RCAR_GP_PIN(2, 25),
};
-static const unsigned int avb_avtp_capture_a_mux[] = {
- AVB_AVTP_CAPTURE_A_MARK,
+static const unsigned int avb_avtp_capture_mux[] = {
+ AVB_AVTP_CAPTURE_MARK,
};
/* - CAN ------------------------------------------------------------------ */
@@ -3784,8 +3787,8 @@ static const struct {
SH_PFC_PIN_GROUP(avb_phy_int),
SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match),
+ SH_PFC_PIN_GROUP(avb_avtp_capture),
SH_PFC_PIN_GROUP(can0_data),
SH_PFC_PIN_GROUP(can1_data),
SH_PFC_PIN_GROUP(can_clk),
@@ -4061,8 +4064,8 @@ static const char * const avb_groups[] = {
"avb_phy_int",
"avb_mii",
"avb_avtp_pps",
- "avb_avtp_match_a",
- "avb_avtp_capture_a",
+ "avb_avtp_match",
+ "avb_avtp_capture",
};
static const char * const can0_groups[] = {
@@ -4957,11 +4960,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1,
- 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, 4),
GROUP(
- /* RESERVED 31, 30 */
- 0, 0, 0, 0,
+ MOD_SEL1_31
+ MOD_SEL1_30
MOD_SEL1_29
MOD_SEL1_28
/* RESERVED 27 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 5dfd991ffdaa..dbc36079c381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1450,7 +1450,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(ET0_ETXD2_A),
GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
GPIO_FN(ET0_ETXD3_A),
- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_20 [1] */
FN_EX_WAIT0, FN_TCLK1_B,
/* IP3_19_18 [2] */
- FN_RD_WR, FN_TCLK1_B, 0, 0,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
/* IP3_17_15 [3] */
FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
FN_ET0_ETXD3_A, 0, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 835148fc0f28..640d2a4cb838 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -309,6 +309,7 @@ extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
extern const struct sh_pfc_soc_info r8a774a1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774b1_pinmux_info;
extern const struct sh_pfc_soc_info r8a774c0_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
@@ -319,7 +320,8 @@ extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
@@ -422,12 +424,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
/*
* Describe a pinmux configuration in which a pin is physically multiplexed
* with other pins.
- * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - ipsr: IPSR field
* - fn: Function name
* - psel: Physical multiplexing selector
*/
#define PINMUX_IPSR_PHYS(ipsr, fn, psel) \
- PINMUX_DATA(fn##_MARK, FN_##psel)
+ PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr)
/*
* Describe a pinmux configuration for a single-function pin with GPIO
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 924080362bf7..b1a9611f46b3 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5996,6 +5996,7 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
struct gpio_chip *chip;
u32 nbank;
int ret, idx;
+ struct gpio_irq_chip *girq;
ret = of_property_read_u32(np, "gpio-banks", &nbank);
if (ret) {
@@ -6048,24 +6049,15 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
chip->of_gpio_n_cells = 2;
chip->parent = &pdev->dev;
- /* Add gpio chip to system */
- ret = gpiochip_add_data(chip, a7gc);
- if (ret) {
- dev_err(&pdev->dev,
- "%pOF: error in probe function with status %d\n",
- np, ret);
- goto failed;
- }
-
- /* Add gpio chip to irq subsystem */
- ret = gpiochip_irqchip_add(chip, &atlas7_gpio_irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto failed;
- }
-
+ girq = &chip->irq;
+ girq->chip = &atlas7_gpio_irq_chip;
+ girq->parent_handler = atlas7_gpio_handle_irq;
+ girq->num_parents = nbank;
+ girq->parents = devm_kcalloc(&pdev->dev, nbank,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (idx = 0; idx < nbank; idx++) {
struct atlas7_gpio_bank *bank;
@@ -6084,9 +6076,18 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
goto failed;
}
bank->irq = ret;
+ girq->parents[idx] = ret;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(chip, &atlas7_gpio_irq_chip,
- bank->irq, atlas7_gpio_handle_irq);
+ /* Add gpio chip to system */
+ ret = gpiochip_add_data(chip, a7gc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%pOF: error in probe function with status %d\n",
+ np, ret);
+ goto failed;
}
platform_set_drvdata(pdev, a7gc);
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 780c31bb4009..1ebcb957c654 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -785,6 +785,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
+ struct gpio_irq_chip *girq;
u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
@@ -816,36 +817,33 @@ static int sirfsoc_gpio_probe(struct device_node *np)
sgpio->chip.gc.parent = &pdev->dev;
sgpio->chip.regs = regs;
- err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
- if (err) {
- dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
- np, err);
- goto out;
- }
-
- err = gpiochip_irqchip_add(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (err) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto out_banks;
- }
-
+ girq = &sgpio->chip.gc.irq;
+ girq->chip = &sirfsoc_irq_chip;
+ girq->parent_handler = sirfsoc_gpio_handle_irq;
+ girq->num_parents = SIRFSOC_GPIO_NO_OF_BANKS;
+ girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio->sgpio_bank[i];
spin_lock_init(&bank->lock);
bank->parent_irq = platform_get_irq(pdev, i);
if (bank->parent_irq < 0) {
err = bank->parent_irq;
- goto out_banks;
+ goto out;
}
+ girq->parents[i] = bank->parent_irq;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- bank->parent_irq,
- sirfsoc_gpio_handle_irq);
+ err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
+ if (err) {
+ dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
+ np, err);
+ goto out;
}
err = gpiochip_add_pin_range(&sgpio->chip.gc, dev_name(&pdev->dev),
@@ -867,7 +865,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
return 0;
out_no_range:
-out_banks:
gpiochip_remove(&sgpio->chip.gc);
out:
iounmap(regs);
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 9d906474f3e4..1ebbc49b16f1 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -515,15 +515,13 @@ end:
static int plgpio_probe(struct platform_device *pdev)
{
struct plgpio *plgpio;
- struct resource *res;
int ret, irq;
plgpio = devm_kzalloc(&pdev->dev, sizeof(*plgpio), GFP_KERNEL);
if (!plgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- plgpio->base = devm_ioremap_resource(&pdev->dev, res);
+ plgpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(plgpio->base))
return PTR_ERR(plgpio->base);
@@ -569,40 +567,35 @@ static int plgpio_probe(struct platform_device *pdev)
}
}
- ret = gpiochip_add_data(&plgpio->chip, plgpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpio chip\n");
- goto unprepare_clk;
- }
-
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_info(&pdev->dev, "PLGPIO registered without IRQs\n");
- return 0;
+ if (irq > 0) {
+ struct gpio_irq_chip *girq;
+
+ girq = &plgpio->chip.irq;
+ girq->chip = &plgpio_irqchip;
+ girq->parent_handler = plgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ dev_info(&pdev->dev, "PLGPIO registering with IRQs\n");
+ } else {
+ dev_info(&pdev->dev, "PLGPIO registering without IRQs\n");
}
- ret = gpiochip_irqchip_add(&plgpio->chip,
- &plgpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&plgpio->chip, plgpio);
if (ret) {
- dev_err(&pdev->dev, "failed to add irqchip to gpiochip\n");
- goto remove_gpiochip;
+ dev_err(&pdev->dev, "unable to add gpio chip\n");
+ goto unprepare_clk;
}
- gpiochip_set_chained_irqchip(&plgpio->chip,
- &plgpio_irqchip,
- irq,
- plgpio_irq_handler);
-
- dev_info(&pdev->dev, "PLGPIO registered with IRQs\n");
-
return 0;
-remove_gpiochip:
- dev_info(&pdev->dev, "Remove gpiochip\n");
- gpiochip_remove(&plgpio->chip);
unprepare_clk:
if (!IS_ERR(plgpio->clk))
clk_unprepare(plgpio->clk);
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 7ec19c73f870..948f56abb9ae 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -358,7 +358,6 @@ int spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct spear_pmx *pmx;
if (!machdata)
@@ -368,8 +367,7 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!pmx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->vbase = devm_ioremap_resource(&pdev->dev, res);
+ pmx->vbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->vbase))
return PTR_ERR(pmx->vbase);
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 7b95bf5a82a9..157712ab05a8 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -41,7 +41,8 @@
#define PUBCP_SLEEP_MODE BIT(14)
#define TGLDSP_SLEEP_MODE BIT(15)
#define AGDSP_SLEEP_MODE BIT(16)
-#define SLEEP_MODE_MASK GENMASK(3, 0)
+#define CM4_SLEEP_MODE BIT(17)
+#define SLEEP_MODE_MASK GENMASK(5, 0)
#define SLEEP_MODE_SHIFT 13
#define SLEEP_INPUT BIT(1)
@@ -81,6 +82,7 @@ enum pin_sleep_mode {
PUBCP_SLEEP = BIT(1),
TGLDSP_SLEEP = BIT(2),
AGDSP_SLEEP = BIT(3),
+ CM4_SLEEP = BIT(4),
};
enum pin_func_sel {
@@ -484,6 +486,13 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
SLEEP_PULL_UP_MASK) << 16;
arg |= (reg >> PULL_UP_SHIFT) & PULL_UP_MASK;
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if ((reg & (SLEEP_PULL_DOWN | SLEEP_PULL_UP)) ||
+ (reg & (PULL_DOWN | PULL_UP_4_7K | PULL_UP_20K)))
+ return -EINVAL;
+
+ arg = 1;
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
arg = 0;
break;
@@ -609,6 +618,8 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
val |= TGLDSP_SLEEP_MODE;
if (arg & AGDSP_SLEEP)
val |= AGDSP_SLEEP_MODE;
+ if (arg & CM4_SLEEP)
+ val |= CM4_SLEEP_MODE;
mask = SLEEP_MODE_MASK;
shift = SLEEP_MODE_SHIFT;
@@ -674,6 +685,16 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = PULL_UP_SHIFT;
}
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (is_sleep_config == true) {
+ val = shift = 0;
+ mask = SLEEP_PULL_DOWN | SLEEP_PULL_UP;
+ } else {
+ val = shift = 0;
+ mask = PULL_DOWN | PULL_UP_20K |
+ PULL_UP_4_7K;
+ }
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
continue;
default:
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0cbca30b75dc..b35c3245ab3f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1385,7 +1385,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
struct pinmux_ops *pmxops;
- struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1396,8 +1395,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
raw_spin_lock_init(&pctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->membase = devm_ioremap_resource(&pdev->dev, res);
+ pctl->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->membase))
return PTR_ERR(pctl->membase);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 95002e3ecaff..6f7b3767f453 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -873,7 +873,6 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
- struct resource *res;
struct phy *phy;
int err;
@@ -885,11 +884,16 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
mutex_init(&padctl->lock);
padctl->dev = &pdev->dev;
+ /*
+ * Note that we can't replace this by of_device_get_match_data()
+ * because we need the separate matching table for this legacy code on
+ * Tegra124. of_device_get_match_data() would attempt to use the table
+ * from the updated driver and fail.
+ */
match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
padctl->soc = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+ padctl->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(padctl->regs))
return PTR_ERR(padctl->regs);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index e9a7cbb9aa33..692d8b3e2a20 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -781,8 +781,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
for (i = 0; i < pmx->nbanks; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ pmx->regs[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(pmx->regs[i]))
return PTR_ERR(pmx->regs[i]);
}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index e5e7f1f22813..b522ca010332 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
return -EINVAL;
rows = pinctrl_count_index_with_args(np, name);
- if (rows == -EINVAL)
+ if (rows < 0)
return rows;
*map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 4d5cd7d8c760..ea910a18b4d7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -553,10 +553,8 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
struct wmt_pinctrl_data *data)
{
int err;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 9512045420ec..786bf89487d6 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -387,7 +387,6 @@ int zx_pinctrl_init(struct platform_device *pdev,
struct pinctrl_desc *pctldesc;
struct zx_pinctrl *zpctl;
struct device_node *np;
- struct resource *res;
int ret;
zpctl = devm_kzalloc(&pdev->dev, sizeof(*zpctl), GFP_KERNEL);
@@ -396,8 +395,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
spin_lock_init(&zpctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- zpctl->base = devm_ioremap_resource(&pdev->dev, res);
+ zpctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(zpctl->base))
return PTR_ERR(zpctl->base);
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 6f80ff4532ae..5af1d66d9eca 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -98,7 +98,10 @@
TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
- TRACE_SYMBOL(EC_CMD_CODEC_I2S), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
TRACE_SYMBOL(EC_CMD_ACPI_READ), \
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 77b35df3a801..f3d09b1631e3 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
- depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 62ea1934fb6a..f4d0a86c00d0 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -17,8 +17,8 @@ menuconfig MIPS_PLATFORM_DEVICES
if MIPS_PLATFORM_DEVICES
config CPU_HWMON
- tristate "Loongson CPU HWMon Driver"
- depends on LOONGSON_MACH3X
+ tristate "Loongson-3 CPU HWMon Driver"
+ depends on CONFIG_MACH_LOONGSON64
select HWMON
default y
help
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index a7f184bb47e0..0d27cb7a9e3c 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -9,6 +9,9 @@
#include <loongson.h>
#include <boot_param.h>
#include <loongson_hwmon.h>
+#include <loongson_regs.h>
+
+static int csr_temp_enable = 0;
/*
* Loongson-3 series cpu has two sensors inside,
@@ -20,8 +23,14 @@ int loongson3_cpu_temp(int cpu)
{
u32 reg, prid_rev;
+ if (csr_temp_enable) {
+ reg = (csr_readl(LOONGSON_CSR_CPUTEMP) & 0xff);
+ goto out;
+ }
+
reg = LOONGSON_CHIPTEMP(cpu);
prid_rev = read_c0_prid() & PRID_REV_MASK;
+
switch (prid_rev) {
case PRID_REV_LOONGSON3A_R1:
reg = (reg >> 8) & 0xff;
@@ -34,9 +43,12 @@ int loongson3_cpu_temp(int cpu)
break;
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
+ default:
reg = (reg & 0xffff)*731/0x4000 - 273;
break;
}
+
+out:
return (int)reg * 1000;
}
@@ -159,9 +171,12 @@ static int __init loongson_hwmon_init(void)
pr_info("Loongson Hwmon Enter...\n");
+ if (cpu_has_csr())
+ csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_TEMP;
+
cpu_hwmon_dev = hwmon_device_register(NULL);
if (IS_ERR(cpu_hwmon_dev)) {
- ret = -ENOMEM;
+ ret = PTR_ERR(cpu_hwmon_dev);
pr_err("hwmon_device_register fail!\n");
goto fail_hwmon_device_register;
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 472af7edf0af..ca65e1039f92 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1148,7 +1148,7 @@ static void asus_als_switch(struct asus_laptop *asus, int value)
ret = write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value);
}
if (ret)
- pr_warning("Error setting light sensor switch\n");
+ pr_warn("Error setting light sensor switch\n");
asus->light_switch = value;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index f3f74a9c109e..776868d5e458 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,7 +578,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
port = acpi_get_pci_dev(handle);
if (!port) {
- pr_warning("Unable to find port\n");
+ pr_warn("Unable to find port\n");
goto out_unlock;
}
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 3c0438ba385e..1a09a75bd16d 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -243,7 +243,7 @@ static int oaktrail_backlight_init(void)
if (IS_ERR(bd)) {
oaktrail_bl_device = NULL;
- pr_warning("Unable to register backlight device\n");
+ pr_warn("Unable to register backlight device\n");
return PTR_ERR(bd);
}
@@ -313,20 +313,20 @@ static int __init oaktrail_init(void)
ret = platform_driver_register(&oaktrail_driver);
if (ret) {
- pr_warning("Unable to register platform driver\n");
+ pr_warn("Unable to register platform driver\n");
goto err_driver_reg;
}
oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
if (!oaktrail_device) {
- pr_warning("Unable to allocate platform device\n");
+ pr_warn("Unable to allocate platform device\n");
ret = -ENOMEM;
goto err_device_alloc;
}
ret = platform_device_add(oaktrail_device);
if (ret) {
- pr_warning("Unable to add platform device\n");
+ pr_warn("Unable to add platform device\n");
goto err_device_add;
}
@@ -338,7 +338,7 @@ static int __init oaktrail_init(void)
ret = oaktrail_rfkill_init();
if (ret) {
- pr_warning("Setup rfkill failed\n");
+ pr_warn("Setup rfkill failed\n");
goto err_rfkill;
}
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 4684e7df833a..5376f3d22f31 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -905,7 +905,7 @@ static int omap_sr_probe(struct platform_device *pdev)
sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
- (void *)sr_info, &pm_sr_fops);
+ sr_info, &pm_sr_fops);
debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_weight);
debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 44ca983a49a1..d94e3267c3b6 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -131,7 +131,7 @@ static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST),
+ writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
at91_rstc_base);
return NOTIFY_DONE;
@@ -140,9 +140,7 @@ static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
static int samx7_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PROCRST),
- at91_rstc_base);
-
+ writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base);
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e341cc5c0ea6..1c18f465a245 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -269,6 +269,12 @@ static const struct of_device_id at91_shdwc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at91_shdwc_of_match);
+static const struct of_device_id at91_pmc_ids[] = {
+ { .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "microchip,sam9x60-pmc" },
+ { /* Sentinel. */ }
+};
+
static int __init at91_shdwc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -313,7 +319,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ np = of_find_matching_node(NULL, at91_pmc_ids);
if (!np) {
ret = -ENODEV;
goto clk_disable;
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index ad8c51ef8b8b..909f0242bacb 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -1099,6 +1099,11 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_btemp_irq[i].name, di);
@@ -1121,13 +1126,13 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->btemp_psy);
-
/* We also have to free all successfully registered irqs */
for (i = i - 1; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
free_irq(irq, di);
}
+
+ power_supply_unregister(di->btemp_psy);
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
return ret;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index c053ede47eb2..8a0f9d769690 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3393,7 +3393,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
if (PTR_ERR(di->adc_main_charger_c) == -ENODEV)
return -EPROBE_DEFER;
dev_err(&pdev->dev, "failed to get ADC main charger current\n");
- return PTR_ERR(di->adc_main_charger_v);
+ return PTR_ERR(di->adc_main_charger_c);
}
di->adc_vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
if (IS_ERR(di->adc_vbus_v)) {
@@ -3594,6 +3594,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_charger_irq[i].name, di);
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index f7909dfd3b61..c3912ee9eb99 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -3158,6 +3158,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_irq(irq, ab8500_fg_irq_th[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_fg_irq_th[i].name, di);
@@ -3165,7 +3170,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
@@ -3173,6 +3178,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register threaded interrupt handler */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_fg_irq_bh[0].isr,
IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_fg_irq_bh[0].name, di);
@@ -3180,7 +3190,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
@@ -3219,15 +3229,17 @@ static int ab8500_fg_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->fg_psy);
-
/* We also have to free all registered irqs */
- for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ free_irq(irq, di);
+free_irq_th:
+ while (--i >= 0) {
+ /* Last assignment of i from primary interrupt handlers */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
free_irq(irq, di);
}
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- free_irq(irq, di);
+
+ power_supply_unregister(di->fg_psy);
free_inst_curr_wq:
destroy_workqueue(di->fg_wq);
return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 23757fb10479..e6e37d4f20e4 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -354,13 +354,13 @@ static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
if (di->chg_info.charger_type & USB_CHG) {
return di->usb_chg->ops.check_enable(di->usb_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
} else if ((di->chg_info.charger_type & AC_CHG) &&
!(di->ac_chg->external)) {
return di->ac_chg->ops.check_enable(di->ac_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
}
return 0;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index dc4c316eff81..5f0a5722b19e 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -48,6 +48,8 @@
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+#define AXP813_BC_EN BIT(0)
+
/*
* Note do not raise the debounce time, we must report Vusb high within
* 100ms otherwise we get Vbus errors in musb.
@@ -495,6 +497,12 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (power->axp20x_id == AXP813_ID) {
+ /* Enable USB Battery Charging specification detection */
+ regmap_update_bits(axp20x->regmap, AXP288_BC_GLOBAL,
+ AXP813_BC_EN, AXP813_BC_EN);
+ }
+
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
index 1bb32b7226d7..b8e1ec106627 100644
--- a/drivers/power/supply/bd70528-charger.c
+++ b/drivers/power/supply/bd70528-charger.c
@@ -741,3 +741,4 @@ module_platform_driver(bd70528_power);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 power-supply driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-power");
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 61d6447d1966..6e9392901b0a 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -33,8 +33,6 @@
#include <linux/iio/types.h>
#include <linux/mfd/motorola-cpcap.h>
-#include <asm/div64.h>
-
/*
* Register bit defines for CPCAP_REG_BPEOL. Some of these seem to
* map to MC13783UG.pdf "Table 5-19. Register 13, Power Control 0"
@@ -52,6 +50,26 @@
#define CPCAP_REG_BPEOL_BIT_BATTDETEN BIT(1) /* Enable battery detect */
#define CPCAP_REG_BPEOL_BIT_EOLSEL BIT(0) /* BPDET = 0, EOL = 1 */
+/*
+ * Register bit defines for CPCAP_REG_CCC1. These seem similar to the twl6030
+ * coulomb counter registers rather than the mc13892 registers. Both twl6030
+ * and mc13892 set bits 2 and 1 to reset and clear registers. But mc13892
+ * sets bit 0 to start the coulomb counter while twl6030 sets bit 0 to stop
+ * the coulomb counter like cpcap does. So for now, we use the twl6030 style
+ * naming for the registers.
+ */
+#define CPCAP_REG_CCC1_ACTIVE_MODE1 BIT(4) /* Update rate */
+#define CPCAP_REG_CCC1_ACTIVE_MODE0 BIT(3) /* Update rate */
+#define CPCAP_REG_CCC1_AUTOCLEAR BIT(2) /* Resets sample registers */
+#define CPCAP_REG_CCC1_CAL_EN BIT(1) /* Clears after write in 1s */
+#define CPCAP_REG_CCC1_PAUSE BIT(0) /* Stop counters, allow write */
+#define CPCAP_REG_CCC1_RESET_MASK (CPCAP_REG_CCC1_AUTOCLEAR | \
+ CPCAP_REG_CCC1_CAL_EN)
+
+#define CPCAP_REG_CCCC2_RATE1 BIT(5)
+#define CPCAP_REG_CCCC2_RATE0 BIT(4)
+#define CPCAP_REG_CCCC2_ENABLE BIT(3)
+
#define CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS 250
enum {
@@ -64,6 +82,7 @@ enum {
enum cpcap_battery_irq_action {
CPCAP_BATTERY_IRQ_ACTION_NONE,
+ CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE,
CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW,
CPCAP_BATTERY_IRQ_ACTION_POWEROFF,
};
@@ -76,15 +95,16 @@ struct cpcap_interrupt_desc {
};
struct cpcap_battery_config {
- int ccm;
int cd_factor;
struct power_supply_info info;
+ struct power_supply_battery_info bat;
};
struct cpcap_coulomb_counter_data {
s32 sample; /* 24 or 32 bits */
s32 accumulator;
s16 offset; /* 9 bits */
+ s16 integrator; /* 13 or 16 bits */
};
enum cpcap_battery_state {
@@ -110,6 +130,7 @@ struct cpcap_battery_ddata {
struct power_supply *psy;
struct cpcap_battery_config config;
struct cpcap_battery_state_data state[CPCAP_BATTERY_STATE_NR];
+ u32 cc_lsb; /* μAms per LSB */
atomic_t active;
int status;
u16 vendor;
@@ -217,41 +238,17 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
s16 offset, u32 divider)
{
s64 acc;
- u64 tmp;
- int avg_current;
- u32 cc_lsb;
if (!divider)
return 0;
- switch (ddata->vendor) {
- case CPCAP_VENDOR_ST:
- cc_lsb = 95374; /* μAms per LSB */
- break;
- case CPCAP_VENDOR_TI:
- cc_lsb = 91501; /* μAms per LSB */
- break;
- default:
- return -EINVAL;
- }
-
acc = accumulator;
- acc = acc - ((s64)sample * offset);
- cc_lsb = (cc_lsb * ddata->config.cd_factor) / 1000;
+ acc -= (s64)sample * offset;
+ acc *= ddata->cc_lsb;
+ acc *= -1;
+ acc = div_s64(acc, divider);
- if (acc >= 0)
- tmp = acc;
- else
- tmp = acc * -1;
-
- tmp = tmp * cc_lsb;
- do_div(tmp, divider);
- avg_current = tmp;
-
- if (acc >= 0)
- return -avg_current;
- else
- return avg_current;
+ return acc;
}
/* 3600000μAms = 1μAh */
@@ -293,12 +290,13 @@ static int
cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
struct cpcap_coulomb_counter_data *ccd)
{
- u16 buf[7]; /* CPCAP_REG_CC1 to CCI */
+ u16 buf[7]; /* CPCAP_REG_CCS1 to CCI */
int error;
ccd->sample = 0;
ccd->accumulator = 0;
ccd->offset = 0;
+ ccd->integrator = 0;
/* Read coulomb counter register range */
error = regmap_bulk_read(ddata->reg, CPCAP_REG_CCS1,
@@ -323,6 +321,12 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
ccd->offset = buf[4];
ccd->offset = sign_extend32(ccd->offset, 9);
+ /* Integrator register CPCAP_REG_CCI */
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->integrator = sign_extend32(buf[6], 13);
+ else
+ ccd->integrator = (s16)buf[6];
+
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
ccd->accumulator,
@@ -336,31 +340,28 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
static int cpcap_battery_cc_get_avg_current(struct cpcap_battery_ddata *ddata)
{
int value, acc, error;
- s32 sample = 1;
+ s32 sample;
s16 offset;
- if (ddata->vendor == CPCAP_VENDOR_ST)
- sample = 4;
-
/* Coulomb counter integrator */
error = regmap_read(ddata->reg, CPCAP_REG_CCI, &value);
if (error)
return error;
- if ((ddata->vendor == CPCAP_VENDOR_TI) && (value > 0x2000))
- value = value | 0xc000;
-
- acc = (s16)value;
+ if (ddata->vendor == CPCAP_VENDOR_TI) {
+ acc = sign_extend32(value, 13);
+ sample = 1;
+ } else {
+ acc = (s16)value;
+ sample = 4;
+ }
- /* Coulomb counter sample time */
+ /* Coulomb counter calibration offset */
error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
if (error)
return error;
- if (value < 0x200)
- offset = value;
- else
- offset = value | 0xfc00;
+ offset = sign_extend32(value, 9);
return cpcap_battery_cc_to_ua(ddata, sample, acc, offset);
}
@@ -369,8 +370,8 @@ static bool cpcap_battery_full(struct cpcap_battery_ddata *ddata)
{
struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
- /* Basically anything that measures above 4347000 is full */
- if (state->voltage >= (ddata->config.info.voltage_max_design - 4000))
+ if (state->voltage >=
+ (ddata->config.bat.constant_charge_voltage_max_uv - 18000))
return true;
return false;
@@ -417,6 +418,7 @@ static enum power_supply_property cpcap_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
@@ -475,6 +477,9 @@ static int cpcap_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = ddata->config.info.voltage_min_design;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->config.bat.constant_charge_voltage_max_uv;
+ break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
sample = latest->cc.sample - previous->cc.sample;
if (!sample) {
@@ -540,6 +545,69 @@ static int cpcap_battery_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
+ int const_charge_voltage)
+{
+ union power_supply_propval prop;
+ union power_supply_propval val;
+ struct power_supply *charger;
+ int error;
+
+ charger = power_supply_get_by_name("usb");
+ if (!charger)
+ return -ENODEV;
+
+ error = power_supply_get_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (error)
+ return error;
+
+ /* Allow charger const voltage lower than battery const voltage */
+ if (const_charge_voltage > prop.intval)
+ return 0;
+
+ val.intval = const_charge_voltage;
+
+ return power_supply_set_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &val);
+}
+
+static int cpcap_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_battery_ddata *ddata = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ if (val->intval < ddata->config.info.voltage_min_design)
+ return -EINVAL;
+ if (val->intval > ddata->config.info.voltage_max_design)
+ return -EINVAL;
+
+ ddata->config.bat.constant_charge_voltage_max_uv = val->intval;
+
+ return cpcap_battery_update_charger(ddata, val->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
{
struct cpcap_battery_ddata *ddata = data;
@@ -560,14 +628,19 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
latest = cpcap_battery_latest(ddata);
switch (d->action) {
+ case CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE:
+ dev_info(ddata->dev, "Coulomb counter calibration done\n");
+ break;
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
if (latest->current_ua >= 0)
- dev_warn(ddata->dev, "Battery low at 3.3V!\n");
+ dev_warn(ddata->dev, "Battery low at %imV!\n",
+ latest->voltage / 1000);
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->current_ua >= 0) {
+ if (latest->current_ua >= 0 && latest->voltage <= 3200000) {
dev_emerg(ddata->dev,
- "Battery empty at 3.1V, powering off\n");
+ "Battery empty at %imV, powering off\n",
+ latest->voltage / 1000);
orderly_poweroff(true);
}
break;
@@ -609,7 +682,9 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
d->name = name;
d->irq = irq;
- if (!strncmp(name, "lowbph", 6))
+ if (!strncmp(name, "cccal", 5))
+ d->action = CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE;
+ else if (!strncmp(name, "lowbph", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW;
else if (!strncmp(name, "lowbpl", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_POWEROFF;
@@ -635,6 +710,9 @@ static int cpcap_battery_init_interrupts(struct platform_device *pdev,
return error;
}
+ /* Enable calibration interrupt if already available in dts */
+ cpcap_battery_init_irq(pdev, ddata, "cccal");
+
/* Enable low battery interrupts for 3.3V high and 3.1V low */
error = regmap_update_bits(ddata->reg, CPCAP_REG_BPEOL,
0xffff,
@@ -676,6 +754,60 @@ out_err:
return error;
}
+/* Calibrate coulomb counter */
+static int cpcap_battery_calibrate(struct cpcap_battery_ddata *ddata)
+{
+ int error, ccc1, value;
+ unsigned long timeout;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &ccc1);
+ if (error)
+ return error;
+
+ timeout = jiffies + msecs_to_jiffies(6000);
+
+ /* Start calibration */
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff,
+ CPCAP_REG_CCC1_CAL_EN);
+ if (error)
+ goto restore;
+
+ while (time_before(jiffies, timeout)) {
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &value);
+ if (error)
+ goto restore;
+
+ if (!(value & CPCAP_REG_CCC1_CAL_EN))
+ break;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ msleep(300);
+ }
+
+ /* Read calibration offset from CCM */
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ dev_info(ddata->dev, "calibration done: 0x%04x\n", value);
+
+restore:
+ if (error)
+ dev_err(ddata->dev, "%s: error %i\n", __func__, error);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff, ccc1);
+ if (error)
+ dev_err(ddata->dev, "%s: restore error %i\n",
+ __func__, error);
+
+ return error;
+}
+
/*
* Based on the values from Motorola mapphone Linux kernel. In the
* the Motorola mapphone Linux kernel tree the value for pm_cd_factor
@@ -687,12 +819,12 @@ out_err:
* at 3078000. The device will die around 2743000.
*/
static const struct cpcap_battery_config cpcap_battery_default_data = {
- .ccm = 0x3ff,
.cd_factor = 0x3cc,
.info.technology = POWER_SUPPLY_TECHNOLOGY_LION,
.info.voltage_max_design = 4351000,
.info.voltage_min_design = 3100000,
.info.charge_full_design = 1740000,
+ .bat.constant_charge_voltage_max_uv = 4200000,
};
#ifdef CONFIG_OF
@@ -741,12 +873,19 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, ddata);
+ switch (ddata->vendor) {
+ case CPCAP_VENDOR_ST:
+ ddata->cc_lsb = 95374; /* μAms per LSB */
+ break;
+ case CPCAP_VENDOR_TI:
+ ddata->cc_lsb = 91501; /* μAms per LSB */
+ break;
+ default:
+ return -EINVAL;
+ }
+ ddata->cc_lsb = (ddata->cc_lsb * ddata->config.cd_factor) / 1000;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_CCM,
- 0xffff, ddata->config.ccm);
- if (error)
- return error;
+ platform_set_drvdata(pdev, ddata);
error = cpcap_battery_init_interrupts(pdev, ddata);
if (error)
@@ -760,11 +899,13 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (!psy_desc)
return -ENOMEM;
- psy_desc->name = "battery",
- psy_desc->type = POWER_SUPPLY_TYPE_BATTERY,
- psy_desc->properties = cpcap_battery_props,
- psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props),
- psy_desc->get_property = cpcap_battery_get_property,
+ psy_desc->name = "battery";
+ psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ psy_desc->properties = cpcap_battery_props;
+ psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props);
+ psy_desc->get_property = cpcap_battery_get_property;
+ psy_desc->set_property = cpcap_battery_set_property;
+ psy_desc->property_is_writeable = cpcap_battery_property_is_writeable;
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
@@ -779,6 +920,10 @@ static int cpcap_battery_probe(struct platform_device *pdev)
atomic_set(&ddata->active, 1);
+ error = cpcap_battery_calibrate(ddata);
+ if (error)
+ return error;
+
return 0;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 74258c7fe17d..c0d452e3dc8b 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -120,6 +120,13 @@ enum {
CPCAP_CHARGER_IIO_NR,
};
+enum {
+ CPCAP_CHARGER_DISCONNECTED,
+ CPCAP_CHARGER_DETECTING,
+ CPCAP_CHARGER_CHARGING,
+ CPCAP_CHARGER_DONE,
+};
+
struct cpcap_charger_ddata {
struct device *dev;
struct regmap *reg;
@@ -138,6 +145,8 @@ struct cpcap_charger_ddata {
atomic_t active;
int status;
+ int state;
+ int voltage;
};
struct cpcap_interrupt_desc {
@@ -153,6 +162,7 @@ struct cpcap_charger_ints_state {
bool chrg_se1b;
bool rvrs_mode;
+ bool chrgcurr2;
bool chrgcurr1;
bool vbusvld;
@@ -162,24 +172,26 @@ struct cpcap_charger_ints_state {
static enum power_supply_property cpcap_charger_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
+/* No battery always shows temperature of -40000 */
static bool cpcap_charger_battery_found(struct cpcap_charger_ddata *ddata)
{
struct iio_channel *channel;
- int error, value;
+ int error, temperature;
channel = ddata->channels[CPCAP_CHARGER_IIO_BATTDET];
- error = iio_read_channel_raw(channel, &value);
+ error = iio_read_channel_processed(channel, &temperature);
if (error < 0) {
dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
return false;
}
- return value == 1;
+ return temperature > -20000 && temperature < 60000;
}
static int cpcap_charger_get_charge_voltage(struct cpcap_charger_ddata *ddata)
@@ -224,6 +236,9 @@ static int cpcap_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
val->intval = ddata->status;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->voltage;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (ddata->status == POWER_SUPPLY_STATUS_CHARGING)
val->intval = cpcap_charger_get_charge_voltage(ddata) *
@@ -248,6 +263,83 @@ static int cpcap_charger_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_charger_match_voltage(int voltage)
+{
+ switch (voltage) {
+ case 0 ... 4100000 - 1: return 3800000;
+ case 4100000 ... 4120000 - 1: return 4100000;
+ case 4120000 ... 4150000 - 1: return 4120000;
+ case 4150000 ... 4170000 - 1: return 4150000;
+ case 4170000 ... 4200000 - 1: return 4170000;
+ case 4200000 ... 4230000 - 1: return 4200000;
+ case 4230000 ... 4250000 - 1: return 4230000;
+ case 4250000 ... 4270000 - 1: return 4250000;
+ case 4270000 ... 4300000 - 1: return 4270000;
+ case 4300000 ... 4330000 - 1: return 4300000;
+ case 4330000 ... 4350000 - 1: return 4330000;
+ case 4350000 ... 4380000 - 1: return 4350000;
+ case 4380000 ... 4400000 - 1: return 4380000;
+ case 4400000 ... 4420000 - 1: return 4400000;
+ case 4420000 ... 4440000 - 1: return 4420000;
+ case 4440000: return 4440000;
+ default: return 0;
+ }
+}
+
+static int
+cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
+{
+ union power_supply_propval prop;
+ struct power_supply *battery;
+ int voltage = ddata->voltage;
+ int error;
+
+ battery = power_supply_get_by_name("battery");
+ if (battery) {
+ error = power_supply_get_property(battery,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (!error)
+ voltage = prop.intval;
+ }
+
+ return voltage;
+}
+
+static int cpcap_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_charger_ddata *ddata = dev_get_drvdata(psy->dev.parent);
+ int voltage, batvolt;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ voltage = cpcap_charger_match_voltage(val->intval);
+ batvolt = cpcap_charger_get_bat_const_charge_voltage(ddata);
+ if (voltage > batvolt)
+ voltage = batvolt;
+ ddata->voltage = voltage;
+ schedule_delayed_work(&ddata->detect_work, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static void cpcap_charger_set_cable_path(struct cpcap_charger_ddata *ddata,
bool enabled)
{
@@ -422,6 +514,7 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
s->chrg_se1b = val & BIT(13);
s->rvrs_mode = val & BIT(6);
+ s->chrgcurr2 = val & BIT(5);
s->chrgcurr1 = val & BIT(4);
s->vbusvld = val & BIT(3);
@@ -434,6 +527,79 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
return 0;
}
+static void cpcap_charger_update_state(struct cpcap_charger_ddata *ddata,
+ int state)
+{
+ const char *status;
+
+ if (state > CPCAP_CHARGER_DONE) {
+ dev_warn(ddata->dev, "unknown state: %i\n", state);
+
+ return;
+ }
+
+ ddata->state = state;
+
+ switch (state) {
+ case CPCAP_CHARGER_DISCONNECTED:
+ status = "DISCONNECTED";
+ break;
+ case CPCAP_CHARGER_DETECTING:
+ status = "DETECTING";
+ break;
+ case CPCAP_CHARGER_CHARGING:
+ status = "CHARGING";
+ break;
+ case CPCAP_CHARGER_DONE:
+ status = "DONE";
+ break;
+ default:
+ return;
+ }
+
+ dev_dbg(ddata->dev, "state: %s\n", status);
+}
+
+static int cpcap_charger_voltage_to_regval(int voltage)
+{
+ int offset;
+
+ switch (voltage) {
+ case 0 ... 4100000 - 1:
+ return 0;
+ case 4100000 ... 4200000 - 1:
+ offset = 1;
+ break;
+ case 4200000 ... 4300000 - 1:
+ offset = 0;
+ break;
+ case 4300000 ... 4380000 - 1:
+ offset = -1;
+ break;
+ case 4380000 ... 4440000:
+ offset = -2;
+ break;
+ default:
+ return 0;
+ }
+
+ return ((voltage - 4100000) / 20000) + offset;
+}
+
+static void cpcap_charger_disconnect(struct cpcap_charger_ddata *ddata,
+ int state, unsigned long delay)
+{
+ int error;
+
+ error = cpcap_charger_set_state(ddata, 0, 0, 0);
+ if (error)
+ return;
+
+ cpcap_charger_update_state(ddata, state);
+ power_supply_changed(ddata->usb);
+ schedule_delayed_work(&ddata->detect_work, delay);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_charger_ddata *ddata;
@@ -447,24 +613,67 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
return;
+ /* Just init the state if a charger is connected with no chrg_det set */
+ if (!s.chrg_det && s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DETECTING);
+
+ return;
+ }
+
+ /*
+ * If battery voltage is higher than charge voltage, it may have been
+ * charged to 4.35V by Android. Try again in 10 minutes.
+ */
+ if (cpcap_charger_get_charge_voltage(ddata) > ddata->voltage) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 60 * 10);
+
+ return;
+ }
+
+ /* Throttle chrgcurr2 interrupt for charger done and retry */
+ switch (ddata->state) {
+ case CPCAP_CHARGER_CHARGING:
+ if (s.chrgcurr2)
+ break;
+ if (s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DONE,
+ HZ * 5);
+ return;
+ }
+ break;
+ case CPCAP_CHARGER_DONE:
+ if (!s.chrgcurr2)
+ break;
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 5);
+ return;
+ default:
+ break;
+ }
+
if (!ddata->feeding_vbus && cpcap_charger_vbus_valid(ddata) &&
s.chrgcurr1) {
int max_current;
+ int vchrg;
if (cpcap_charger_battery_found(ddata))
max_current = CPCAP_REG_CRM_ICHRG_1A596;
else
max_current = CPCAP_REG_CRM_ICHRG_0A532;
+ vchrg = cpcap_charger_voltage_to_regval(ddata->voltage);
error = cpcap_charger_set_state(ddata,
- CPCAP_REG_CRM_VCHRG_4V35,
+ CPCAP_REG_CRM_VCHRG(vchrg),
max_current, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_CHARGING);
} else {
error = cpcap_charger_set_state(ddata, 0, 0, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DISCONNECTED);
}
power_supply_changed(ddata->usb);
@@ -524,7 +733,7 @@ static const char * const cpcap_charger_irqs[] = {
"chrg_det", "rvrs_chrg",
/* REG_INT1 */
- "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr1", "vbusvld",
+ "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
/* REG_INT_3 */
"battdetb",
@@ -596,6 +805,8 @@ static const struct power_supply_desc cpcap_charger_usb_desc = {
.properties = cpcap_charger_props,
.num_properties = ARRAY_SIZE(cpcap_charger_props),
.get_property = cpcap_charger_get_property,
+ .set_property = cpcap_charger_set_property,
+ .property_is_writeable = cpcap_charger_property_is_writeable,
};
#ifdef CONFIG_OF
@@ -625,6 +836,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
return -ENOMEM;
ddata->dev = &pdev->dev;
+ ddata->voltage = 4200000;
ddata->reg = dev_get_regmap(ddata->dev->parent, NULL);
if (!ddata->reg)
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index c3cad2b6daba..65c23ef6408d 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -33,6 +33,8 @@ static int battery_present = 1; /* true */
static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
static int battery_capacity = 50;
static int battery_voltage = 3300;
+static int battery_charge_counter = -1000;
+static int battery_current = 1600;
static bool module_initialized;
@@ -100,6 +102,9 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_NOW:
val->intval = battery_capacity;
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = battery_charge_counter;
+ break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CHARGE_FULL:
val->intval = 100;
@@ -114,6 +119,10 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = battery_voltage;
break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battery_current;
+ break;
default:
pr_info("%s: some properties deliberately report errors.\n",
__func__);
@@ -135,6 +144,7 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -144,6 +154,8 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
};
static char *test_power_ac_supplied_to[] = {
@@ -447,6 +459,36 @@ static int param_set_battery_voltage(const char *key,
#define param_get_battery_voltage param_get_int
+static int param_set_battery_charge_counter(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_charge_counter = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_charge_counter param_get_int
+
+static int param_set_battery_current(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_current = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_current param_get_int
+
static const struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
@@ -487,6 +529,16 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
.get = param_get_battery_voltage,
};
+static const struct kernel_param_ops param_ops_battery_charge_counter = {
+ .set = param_set_battery_charge_counter,
+ .get = param_get_battery_charge_counter,
+};
+
+static const struct kernel_param_ops param_ops_battery_current = {
+ .set = param_set_battery_current,
+ .get = param_get_battery_current,
+};
+
#define param_check_ac_online(name, p) __param_check(name, p, void);
#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
@@ -495,6 +547,8 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
#define param_check_battery_health(name, p) __param_check(name, p, void);
#define param_check_battery_capacity(name, p) __param_check(name, p, void);
#define param_check_battery_voltage(name, p) __param_check(name, p, void);
+#define param_check_battery_charge_counter(name, p) __param_check(name, p, void);
+#define param_check_battery_current(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
@@ -525,6 +579,13 @@ MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
module_param(battery_voltage, battery_voltage, 0644);
MODULE_PARM_DESC(battery_voltage, "battery voltage (millivolts)");
+module_param(battery_charge_counter, battery_charge_counter, 0644);
+MODULE_PARM_DESC(battery_charge_counter,
+ "battery charge counter (microampere-hours)");
+
+module_param(battery_current, battery_current, 0644);
+MODULE_PARM_DESC(battery_current, "battery current (milliampere)");
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 94ddd7d659c8..a67701ed93e8 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -978,6 +978,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 0517272a268e..b45d2b86d8ca 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -119,4 +119,16 @@ config PTP_1588_CLOCK_KVM
To compile this driver as a module, choose M here: the module
will be called ptp_kvm.
+config PTP_1588_CLOCK_IDTCM
+ tristate "IDT CLOCKMATRIX as PTP clock"
+ depends on PTP_1588_CLOCK
+ default n
+ help
+ This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_clockmatrix.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 677d1d178a3e..69a06f86a450 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o \ No newline at end of file
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
new file mode 100644
index 000000000000..9263bc33b8f4
--- /dev/null
+++ b/drivers/ptp/idt8a340_reg.h
@@ -0,0 +1,659 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt8a340_reg.h
+ *
+ * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
+ * https://github.com/richardcochran/regen
+ *
+ * Hand modified to include some HW registers.
+ * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+
+#define STATUS 0xc03c
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+
+#define INPUT_1 0xc1c0
+
+#define INPUT_2 0xc1d0
+
+#define INPUT_3 0xc200
+
+#define INPUT_4 0xc210
+
+#define INPUT_5 0xc220
+
+#define INPUT_6 0xc230
+
+#define INPUT_7 0xc240
+
+#define INPUT_8 0xc250
+
+#define INPUT_9 0xc260
+
+#define INPUT_10 0xc280
+
+#define INPUT_11 0xc290
+
+#define INPUT_12 0xc2a0
+
+#define INPUT_13 0xc2b0
+
+#define INPUT_14 0xc2c0
+
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+
+#define REF_MON_1 0xc2ec
+
+#define REF_MON_2 0xc300
+
+#define REF_MON_3 0xc30c
+
+#define REF_MON_4 0xc318
+
+#define REF_MON_5 0xc324
+
+#define REF_MON_6 0xc330
+
+#define REF_MON_7 0xc33c
+
+#define REF_MON_8 0xc348
+
+#define REF_MON_9 0xc354
+
+#define REF_MON_10 0xc360
+
+#define REF_MON_11 0xc36c
+
+#define REF_MON_12 0xc380
+
+#define REF_MON_13 0xc38c
+
+#define REF_MON_14 0xc398
+
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+
+#define DPLL_1 0xc400
+
+#define DPLL_2 0xc438
+
+#define DPLL_3 0xc480
+
+#define DPLL_4 0xc4b8
+
+#define DPLL_5 0xc500
+
+#define DPLL_6 0xc538
+
+#define DPLL_7 0xc580
+
+#define SYS_DPLL 0xc5b8
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+
+#define DPLL_CTRL_1 0xc63c
+
+#define DPLL_CTRL_2 0xc680
+
+#define DPLL_CTRL_3 0xc6bc
+
+#define DPLL_CTRL_4 0xc700
+
+#define DPLL_CTRL_5 0xc73c
+
+#define DPLL_CTRL_6 0xc780
+
+#define DPLL_CTRL_7 0xc7bc
+
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+
+#define DPLL_PHASE_1 0xc81c
+
+#define DPLL_PHASE_2 0xc820
+
+#define DPLL_PHASE_3 0xc824
+
+#define DPLL_PHASE_4 0xc828
+
+#define DPLL_PHASE_5 0xc82c
+
+#define DPLL_PHASE_6 0xc830
+
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+
+#define DPLL_FREQ_1 0xc840
+
+#define DPLL_FREQ_2 0xc848
+
+#define DPLL_FREQ_3 0xc850
+
+#define DPLL_FREQ_4 0xc858
+
+#define DPLL_FREQ_5 0xc860
+
+#define DPLL_FREQ_6 0xc868
+
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+
+#define DPLL_PHASE_PULL_IN_1 0xc888
+
+#define DPLL_PHASE_PULL_IN_2 0xc890
+
+#define DPLL_PHASE_PULL_IN_3 0xc898
+
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+
+#define GPIO_1 0xc8d4
+
+#define GPIO_2 0xc8e6
+
+#define GPIO_3 0xc900
+
+#define GPIO_4 0xc912
+
+#define GPIO_5 0xc924
+
+#define GPIO_6 0xc936
+
+#define GPIO_7 0xc948
+
+#define GPIO_8 0xc95a
+
+#define GPIO_9 0xc980
+
+#define GPIO_10 0xc992
+
+#define GPIO_11 0xc9a4
+
+#define GPIO_12 0xc9b6
+
+#define GPIO_13 0xc9c8
+
+#define GPIO_14 0xc9da
+
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+
+#define OUTPUT_0 0xca14
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+
+#define OUTPUT_1 0xca24
+
+#define OUTPUT_2 0xca34
+
+#define OUTPUT_3 0xca44
+
+#define OUTPUT_4 0xca54
+
+#define OUTPUT_5 0xca64
+
+#define OUTPUT_6 0xca80
+
+#define OUTPUT_7 0xca90
+
+#define OUTPUT_8 0xcaa0
+
+#define OUTPUT_9 0xcab0
+
+#define OUTPUT_10 0xcac0
+
+#define OUTPUT_11 0xcad0
+
+#define SERIAL 0xcae0
+
+#define PWM_ENCODER_0 0xcb00
+
+#define PWM_ENCODER_1 0xcb08
+
+#define PWM_ENCODER_2 0xcb10
+
+#define PWM_ENCODER_3 0xcb18
+
+#define PWM_ENCODER_4 0xcb20
+
+#define PWM_ENCODER_5 0xcb28
+
+#define PWM_ENCODER_6 0xcb30
+
+#define PWM_ENCODER_7 0xcb38
+
+#define PWM_DECODER_0 0xcb40
+
+#define PWM_DECODER_1 0xcb48
+
+#define PWM_DECODER_2 0xcb50
+
+#define PWM_DECODER_3 0xcb58
+
+#define PWM_DECODER_4 0xcb60
+
+#define PWM_DECODER_5 0xcb68
+
+#define PWM_DECODER_6 0xcb70
+
+#define PWM_DECODER_7 0xcb80
+
+#define PWM_DECODER_8 0xcb88
+
+#define PWM_DECODER_9 0xcb90
+
+#define PWM_DECODER_10 0xcb98
+
+#define PWM_DECODER_11 0xcba0
+
+#define PWM_DECODER_12 0xcba8
+
+#define PWM_DECODER_13 0xcbb0
+
+#define PWM_DECODER_14 0xcbb8
+
+#define PWM_DECODER_15 0xcbc0
+
+#define PWM_USER_DATA 0xcbc8
+
+#define TOD_0 0xcbcc
+
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+
+#define TOD_1 0xcbce
+
+#define TOD_2 0xcbd0
+
+#define TOD_3 0xcbd2
+
+
+#define TOD_WRITE_0 0xcc00
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+
+#define TOD_WRITE_1 0xcc10
+
+#define TOD_WRITE_2 0xcc20
+
+#define TOD_WRITE_3 0xcc30
+
+#define TOD_READ_PRIMARY_0 0xcc40
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+
+#define TOD_READ_PRIMARY_1 0xcc50
+
+#define TOD_READ_PRIMARY_2 0xcc60
+
+#define TOD_READ_PRIMARY_3 0xcc80
+
+#define TOD_READ_SECONDARY_0 0xcc90
+
+#define TOD_READ_SECONDARY_1 0xcca0
+
+#define TOD_READ_SECONDARY_2 0xccb0
+
+#define TOD_READ_SECONDARY_3 0xccc0
+
+#define OUTPUT_TDC_CFG 0xccd0
+
+#define OUTPUT_TDC_0 0xcd00
+
+#define OUTPUT_TDC_1 0xcd08
+
+#define OUTPUT_TDC_2 0xcd10
+
+#define OUTPUT_TDC_3 0xcd18
+
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+
+#define EEPROM 0xcf68
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+#endif
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 67d0199840fd..9d72ab593f13 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
}
- if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
- req.extts.rsv[0] || req.extts.rsv[1]) &&
- cmd == PTP_EXTTS_REQUEST2) {
- err = -EINVAL;
- break;
+ if (cmd == PTP_EXTTS_REQUEST2) {
+ /* Tell the drivers to check the flags carefully. */
+ req.extts.flags |= PTP_STRICT_FLAGS;
+ /* Make sure no reserved bit is set. */
+ if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
+ req.extts.rsv[0] || req.extts.rsv[1]) {
+ err = -EINVAL;
+ break;
+ }
+ /* Ensure one of the rising/falling edge bits is set. */
+ if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
+ (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
+ err = -EINVAL;
+ break;
+ }
} else if (cmd == PTP_EXTTS_REQUEST) {
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req.extts.rsv[0] = 0;
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
new file mode 100644
index 000000000000..a5110b7b4ece
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+
+#include "ptp_private.h"
+#include "ptp_clockmatrix.h"
+
+MODULE_DESCRIPTION("Driver for IDT ClockMatrix(TM) family");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+#define SETTIME_CORRECTION (0)
+
+static int char_array_to_timespec(u8 *buf,
+ u8 count,
+ struct timespec64 *ts)
+{
+ u8 i;
+ u64 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ nsec = buf[4];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[3 - i];
+ }
+
+ sec = buf[10];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[9 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
+static int timespec_to_char_array(struct timespec64 const *ts,
+ u8 *buf,
+ u8 count)
+{
+ u8 i;
+ s32 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ buf[0] = 0;
+ for (i = 1; i < 5; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 5; i < TOD_BYTE_COUNT; i++) {
+
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+
+ return 0;
+}
+
+static int idtcm_xfer(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ struct i2c_client *client = idtcm->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
+{
+ u8 buf[4];
+ int err;
+
+ if (idtcm->page_offset == val)
+ return 0;
+
+ buf[0] = 0x0;
+ buf[1] = val;
+ buf[2] = 0x10;
+ buf[3] = 0x20;
+
+ err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
+
+ if (err)
+ dev_err(&idtcm->client->dev, "failed to set page offset\n");
+ else
+ idtcm->page_offset = val;
+
+ return err;
+}
+
+static int _idtcm_rdwr(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ u8 hi;
+ u8 lo;
+ int err;
+
+ hi = (regaddr >> 8) & 0xff;
+ lo = regaddr & 0xff;
+
+ err = idtcm_page_offset(idtcm, hi);
+
+ if (err)
+ goto out;
+
+ err = idtcm_xfer(idtcm, lo, buf, count, write);
+out:
+ return err;
+}
+
+static int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
+}
+
+static int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+}
+
+static int _idtcm_gettime(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
+ trigger |= TOD_READ_TRIGGER_MODE;
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idtcm->calculate_overhead_flag)
+ idtcm->start_time = ktime_get_raw();
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = char_array_to_timespec(buf, sizeof(buf), ts);
+
+ return err;
+}
+
+static int _sync_pll_output(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 sync_ctrl0;
+ u16 sync_ctrl1;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ sync_ctrl0 = HW_Q0_Q1_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q0_Q1_CH_SYNC_CTRL_1;
+ break;
+ case 1:
+ sync_ctrl0 = HW_Q2_Q3_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q2_Q3_CH_SYNC_CTRL_1;
+ break;
+ case 2:
+ sync_ctrl0 = HW_Q4_Q5_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q4_Q5_CH_SYNC_CTRL_1;
+ break;
+ case 3:
+ sync_ctrl0 = HW_Q6_Q7_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q6_Q7_CH_SYNC_CTRL_1;
+ break;
+ case 4:
+ sync_ctrl0 = HW_Q8_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q8_CH_SYNC_CTRL_1;
+ break;
+ case 5:
+ sync_ctrl0 = HW_Q9_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q9_CH_SYNC_CTRL_1;
+ break;
+ case 6:
+ sync_ctrl0 = HW_Q10_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q10_CH_SYNC_CTRL_1;
+ break;
+ case 7:
+ sync_ctrl0 = HW_Q11_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q11_CH_SYNC_CTRL_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = SYNCTRL1_MASTER_SYNC_RST;
+
+ /* Place master sync in reset */
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl0, &sync_src, sizeof(sync_src));
+ if (err)
+ return err;
+
+ /* Set sync trigger mask */
+ val |= SYNCTRL1_FBDIV_FRAME_SYNC_TRIG | SYNCTRL1_FBDIV_SYNC_TRIG;
+
+ if (qn)
+ val |= SYNCTRL1_Q0_DIV_SYNC_TRIG;
+
+ if (qn_plus_1)
+ val |= SYNCTRL1_Q1_DIV_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ /* Place master sync out of reset */
+ val &= ~(SYNCTRL1_MASTER_SYNC_RST);
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+
+ return err;
+}
+
+static int idtcm_sync_pps_output(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
+ int err = 0;
+
+ u16 output_mask = channel->output_mask;
+
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
+ break;
+ case DPLL_1:
+ sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
+ break;
+ case DPLL_2:
+ sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
+ break;
+ case DPLL_3:
+ sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (pll = 0; pll < 8; pll++) {
+
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else {
+ qn_plus_1 = 0;
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _sync_pll_output(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[TOD_BYTE_COUNT];
+ u8 cmd;
+ int err;
+ struct timespec64 local_ts = *ts;
+ s64 total_overhead_ns;
+
+ /* Configure HW TOD write trigger. */
+ err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ cmd &= ~(0x0f);
+ cmd |= wr_trig | 0x08;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+ }
+
+ /* ARM HW TOD write trigger. */
+ cmd &= ~(0x08);
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
+
+ if (idtcm->calculate_overhead_flag) {
+ total_overhead_ns = ktime_to_ns(ktime_get_raw()
+ - idtcm->start_time)
+ + idtcm->tod_write_overhead_ns
+ + SETTIME_CORRECTION;
+
+ timespec64_add_ns(&local_ts, total_overhead_ns);
+
+ idtcm->calculate_overhead_flag = 0;
+ }
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+ }
+
+ return err;
+}
+
+static int _idtcm_settime(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ s32 retval;
+ int err;
+ int i;
+ u8 trig_sel;
+
+ err = _idtcm_set_dpll_tod(channel, ts, wr_trig);
+
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ for (i = 0; i < 10000; i++) {
+ err = idtcm_read(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_CTRL_1, &trig_sel,
+ sizeof(trig_sel));
+
+ if (err)
+ return err;
+
+ if (trig_sel == 0x4a)
+ break;
+
+ err = 1;
+ }
+
+ if (err)
+ return err;
+
+ retval = idtcm_sync_pps_output(channel);
+
+ return retval;
+}
+
+static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
+ s32 offset_ns)
+{
+ int err;
+ int i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[4];
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = 0xff & (offset_ns);
+ offset_ns >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in, PULL_IN_OFFSET,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
+ u32 max_ffo_ppb)
+{
+ int err;
+ u8 i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[3];
+
+ if (max_ffo_ppb & 0xff000000)
+ max_ffo_ppb = 0;
+
+ for (i = 0; i < 3; i++) {
+ buf[i] = 0xff & (max_ffo_ppb);
+ max_ffo_ppb >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_SLOPE_LIMIT, buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf;
+
+ err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
+ &buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ if (buf == 0) {
+ buf = 0x01;
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_CTRL, &buf, sizeof(buf));
+ } else {
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
+{
+ int err;
+
+ err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
+
+ if (err)
+ return err;
+
+ err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
+
+ if (err)
+ return err;
+
+ err = idtcm_start_phase_pull_in(channel);
+
+ return err;
+}
+
+static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ s64 now;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ } else {
+ idtcm->calculate_overhead_flag = 1;
+
+ err = _idtcm_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now = timespec64_to_ns(&ts);
+ now += delta;
+
+ ts = ns_to_timespec64(now);
+
+ err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ }
+
+ return err;
+}
+
+static int idtcm_state_machine_reset(struct idtcm *idtcm)
+{
+ int err;
+ u8 byte = SM_RESET_CMD;
+
+ err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+
+ if (!err)
+ msleep_interruptible(POST_SM_RESET_DELAY_MS);
+
+ return err;
+}
+
+static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HW_REV_ID,
+ hw_rev_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ BOND_ID,
+ bond_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
+
+ *hw_csr_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
+
+ *hw_irq_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, PRODUCT_ID, buf, sizeof(buf));
+
+ *product_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_major_release(struct idtcm *idtcm, u8 *major)
+{
+ int err;
+ u8 buf = 0;
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, MAJ_REL, &buf, sizeof(buf));
+
+ *major = buf >> 1;
+
+ return err;
+}
+
+static int idtcm_read_minor_release(struct idtcm *idtcm, u8 *minor)
+{
+ return idtcm_read(idtcm, GENERAL_STATUS, MIN_REL, minor, sizeof(u8));
+}
+
+static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HOTFIX_REL,
+ hotfix,
+ sizeof(u8));
+}
+
+static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+{
+ int err;
+ u8 buf[4] = {0};
+
+ err = idtcm_read(idtcm,
+ GENERAL_STATUS,
+ PIPELINE_ID,
+ &buf[0],
+ sizeof(buf));
+
+ *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
+{
+ int err = 0;
+
+ if (addr == PLL_MASK_ADDR) {
+ if ((val & 0xf0) || !(val & 0xf)) {
+ dev_err(&idtcm->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ }
+ *mask = val;
+ }
+
+ return err;
+}
+
+static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
+{
+ int err = 0;
+
+ switch (addr) {
+ case OUTPUT_MASK_PLL0_ADDR:
+ SET_U16_LSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL0_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR:
+ SET_U16_LSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR:
+ SET_U16_LSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR:
+ SET_U16_LSB(idtcm->channel[3].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[3].output_mask, val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int check_and_set_masks(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 val)
+{
+ int err = 0;
+
+ if (set_pll_output_mask(idtcm, regaddr, val)) {
+ /* Not an output mask, check for pll mask */
+ err = process_pll_mask(idtcm, regaddr, val, &idtcm->pll_mask);
+ }
+
+ return err;
+}
+
+static void display_pll_and_output_masks(struct idtcm *idtcm)
+{
+ u8 i;
+ u8 mask;
+
+ dev_dbg(&idtcm->client->dev, "pllmask = 0x%02x\n", idtcm->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idtcm->pll_mask)
+ dev_dbg(&idtcm->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idtcm->channel[i].output_mask);
+ }
+}
+
+static int idtcm_load_firmware(struct idtcm *idtcm,
+ struct device *dev)
+{
+ const struct firmware *fw;
+ struct idtcm_fwrc *rec;
+ u32 regaddr;
+ int err;
+ s32 len;
+ u8 val;
+ u8 loaddr;
+
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtcm_fwrc *) fw->data;
+
+ if (fw->size > 0)
+ idtcm_state_machine_reset(idtcm);
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idtcm->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ regaddr = rec->hiaddr << 8;
+ regaddr |= rec->loaddr;
+
+ val = rec->value;
+ loaddr = rec->loaddr;
+
+ rec++;
+
+ err = check_and_set_masks(idtcm, regaddr, val);
+ }
+
+ if (err == 0) {
+ /* Top (status registers) and bottom are read-only */
+ if ((regaddr < GPIO_USER_CONTROL)
+ || (regaddr >= SCRATCH))
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ continue;
+
+ err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ display_pll_and_output_masks(idtcm);
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u32 module;
+ u8 val;
+ int err;
+
+ /*
+ * This assumes that the 1-PPS is on the second of the two
+ * output. But is this always true?
+ */
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ module = OUTPUT_1;
+ break;
+ case DPLL_1:
+ module = OUTPUT_3;
+ break;
+ case DPLL_2:
+ module = OUTPUT_5;
+ break;
+ case DPLL_3:
+ module = OUTPUT_7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = idtcm_read(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ if (enable)
+ val |= SQUELCH_DISABLE;
+ else
+ val &= ~SQUELCH_DISABLE;
+
+ err = idtcm_write(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int idtcm_set_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode pll_mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 dpll_mode;
+
+ err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
+
+ channel->pll_mode = pll_mode;
+
+ err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* PTP Hardware Clock interface */
+
+static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ u8 i;
+ bool neg_adj = 0;
+ int err;
+ u8 buf[6] = {0};
+ s64 fcw;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Frequency Control Word unit is: 1.11 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 111
+ *
+ * adjfine:
+ * ppm_16 * 5^12
+ * FCW = -------------
+ * 111 * 2^4
+ */
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
+ fcw = ppb * 1000000000000ULL;
+
+ fcw = div_u64(fcw, 111022);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 6; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
+ buf, sizeof(buf));
+
+ mutex_unlock(&idtcm->reg_lock);
+ return err;
+}
+
+static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_gettime(channel, ts);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjtime(channel, delta);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ return idtcm_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ return -ERANGE;
+
+ return idtcm_pps_enable(channel, true);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int idtcm_enable_tod(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts = {0, 0};
+ u8 cfg;
+ int err;
+
+ err = idtcm_pps_enable(channel, false);
+ if (err)
+ return err;
+
+ /*
+ * Start the TOD clock ticking.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg |= TOD_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+}
+
+static void idtcm_display_version_info(struct idtcm *idtcm)
+{
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u32 pipeline;
+ u16 product_id;
+ u16 csr_id;
+ u16 irq_id;
+ u8 hw_rev_id;
+ u8 bond_id;
+
+ idtcm_read_major_release(idtcm, &major);
+ idtcm_read_minor_release(idtcm, &minor);
+ idtcm_read_hotfix_release(idtcm, &hotfix);
+ idtcm_read_pipeline(idtcm, &pipeline);
+
+ idtcm_read_product_id(idtcm, &product_id);
+ idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
+ idtcm_read_bond_id(idtcm, &bond_id);
+ idtcm_read_hw_csr_id(idtcm, &csr_id);
+ idtcm_read_hw_irq_id(idtcm, &irq_id);
+
+ dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
+ "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
+ major, minor, hotfix, pipeline,
+ product_id, hw_rev_id, bond_id, csr_id, irq_id);
+}
+
+static struct ptp_clock_info idtcm_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 1,
+ .adjfreq = &idtcm_adjfreq,
+ .adjtime = &idtcm_adjtime,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime,
+ .enable = &idtcm_enable,
+};
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ switch (index) {
+ case 0:
+ channel->dpll_freq = DPLL_FREQ_0;
+ channel->dpll_n = DPLL_0;
+ channel->tod_read_primary = TOD_READ_PRIMARY_0;
+ channel->tod_write = TOD_WRITE_0;
+ channel->tod_n = TOD_0;
+ channel->hw_dpll_n = HW_DPLL_0;
+ channel->dpll_phase = DPLL_PHASE_0;
+ channel->dpll_ctrl_n = DPLL_CTRL_0;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_0;
+ break;
+ case 1:
+ channel->dpll_freq = DPLL_FREQ_1;
+ channel->dpll_n = DPLL_1;
+ channel->tod_read_primary = TOD_READ_PRIMARY_1;
+ channel->tod_write = TOD_WRITE_1;
+ channel->tod_n = TOD_1;
+ channel->hw_dpll_n = HW_DPLL_1;
+ channel->dpll_phase = DPLL_PHASE_1;
+ channel->dpll_ctrl_n = DPLL_CTRL_1;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_1;
+ break;
+ case 2:
+ channel->dpll_freq = DPLL_FREQ_2;
+ channel->dpll_n = DPLL_2;
+ channel->tod_read_primary = TOD_READ_PRIMARY_2;
+ channel->tod_write = TOD_WRITE_2;
+ channel->tod_n = TOD_2;
+ channel->hw_dpll_n = HW_DPLL_2;
+ channel->dpll_phase = DPLL_PHASE_2;
+ channel->dpll_ctrl_n = DPLL_CTRL_2;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_2;
+ break;
+ case 3:
+ channel->dpll_freq = DPLL_FREQ_3;
+ channel->dpll_n = DPLL_3;
+ channel->tod_read_primary = TOD_READ_PRIMARY_3;
+ channel->tod_write = TOD_WRITE_3;
+ channel->tod_n = TOD_3;
+ channel->hw_dpll_n = HW_DPLL_3;
+ channel->dpll_phase = DPLL_PHASE_3;
+ channel->dpll_ctrl_n = DPLL_CTRL_3;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ channel->idtcm = idtcm;
+
+ channel->caps = idtcm_caps;
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT CM PLL%u", index);
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+
+ err = idtcm_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static void ptp_clock_unregister_all(struct idtcm *idtcm)
+{
+ u8 i;
+ struct idtcm_channel *channel;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idtcm->channel[i];
+
+ if (channel->ptp_clock)
+ ptp_clock_unregister(channel->ptp_clock);
+ }
+}
+
+static void set_default_masks(struct idtcm *idtcm)
+{
+ idtcm->pll_mask = DEFAULT_PLL_MASK;
+
+ idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idtcm->channel[2].output_mask = DEFAULT_OUTPUT_MASK_PLL2;
+ idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
+}
+
+static int set_tod_write_overhead(struct idtcm *idtcm)
+{
+ int err;
+ u8 i;
+
+ s64 total_ns = 0;
+
+ ktime_t start;
+ ktime_t stop;
+
+ char buf[TOD_BYTE_COUNT];
+
+ struct idtcm_channel *channel = &idtcm->channel[2];
+
+ /* Set page offset */
+ idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
+ buf, sizeof(buf));
+
+ for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
+
+ start = ktime_get_raw();
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop - start);
+ }
+
+ idtcm->tod_write_overhead_ns = div_s64(total_ns,
+ TOD_WRITE_OVERHEAD_COUNT_MAX);
+
+ return err;
+}
+
+static int idtcm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idtcm *idtcm;
+ int err;
+ u8 i;
+
+ /* Unused for now */
+ (void)id;
+
+ idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+
+ if (!idtcm)
+ return -ENOMEM;
+
+ idtcm->client = client;
+ idtcm->page_offset = 0xff;
+ idtcm->calculate_overhead_flag = 0;
+
+ set_default_masks(idtcm);
+
+ mutex_init(&idtcm->reg_lock);
+ mutex_lock(&idtcm->reg_lock);
+
+ idtcm_display_version_info(idtcm);
+
+ err = set_tod_write_overhead(idtcm);
+
+ if (err) {
+ mutex_unlock(&idtcm->reg_lock);
+ return err;
+ }
+
+ err = idtcm_load_firmware(idtcm, &client->dev);
+
+ if (err)
+ dev_warn(&idtcm->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idtcm->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idtcm->pll_mask & (1 << i)) {
+ err = idtcm_enable_channel(idtcm, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idtcm->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ if (err) {
+ ptp_clock_unregister_all(idtcm);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idtcm);
+
+ return 0;
+}
+
+static int idtcm_remove(struct i2c_client *client)
+{
+ struct idtcm *idtcm = i2c_get_clientdata(client);
+
+ ptp_clock_unregister_all(idtcm);
+
+ mutex_destroy(&idtcm->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idtcm_dt_id[] = {
+ { .compatible = "idt,8a34000" },
+ { .compatible = "idt,8a34001" },
+ { .compatible = "idt,8a34002" },
+ { .compatible = "idt,8a34003" },
+ { .compatible = "idt,8a34004" },
+ { .compatible = "idt,8a34005" },
+ { .compatible = "idt,8a34006" },
+ { .compatible = "idt,8a34007" },
+ { .compatible = "idt,8a34008" },
+ { .compatible = "idt,8a34009" },
+ { .compatible = "idt,8a34010" },
+ { .compatible = "idt,8a34011" },
+ { .compatible = "idt,8a34012" },
+ { .compatible = "idt,8a34013" },
+ { .compatible = "idt,8a34014" },
+ { .compatible = "idt,8a34015" },
+ { .compatible = "idt,8a34016" },
+ { .compatible = "idt,8a34017" },
+ { .compatible = "idt,8a34018" },
+ { .compatible = "idt,8a34019" },
+ { .compatible = "idt,8a34040" },
+ { .compatible = "idt,8a34041" },
+ { .compatible = "idt,8a34042" },
+ { .compatible = "idt,8a34043" },
+ { .compatible = "idt,8a34044" },
+ { .compatible = "idt,8a34045" },
+ { .compatible = "idt,8a34046" },
+ { .compatible = "idt,8a34047" },
+ { .compatible = "idt,8a34048" },
+ { .compatible = "idt,8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idtcm_dt_id);
+#endif
+
+static const struct i2c_device_id idtcm_i2c_id[] = {
+ { "8a34000" },
+ { "8a34001" },
+ { "8a34002" },
+ { "8a34003" },
+ { "8a34004" },
+ { "8a34005" },
+ { "8a34006" },
+ { "8a34007" },
+ { "8a34008" },
+ { "8a34009" },
+ { "8a34010" },
+ { "8a34011" },
+ { "8a34012" },
+ { "8a34013" },
+ { "8a34014" },
+ { "8a34015" },
+ { "8a34016" },
+ { "8a34017" },
+ { "8a34018" },
+ { "8a34019" },
+ { "8a34040" },
+ { "8a34041" },
+ { "8a34042" },
+ { "8a34043" },
+ { "8a34044" },
+ { "8a34045" },
+ { "8a34046" },
+ { "8a34047" },
+ { "8a34048" },
+ { "8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
+
+static struct i2c_driver idtcm_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idtcm_dt_id),
+ .name = "idtcm",
+ },
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
+ .id_table = idtcm_i2c_id,
+};
+
+module_i2c_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
new file mode 100644
index 000000000000..6c1f93ab46f3
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTCLOCKMATRIX_H
+#define PTP_IDTCLOCKMATRIX_H
+
+#include <linux/ktime.h>
+
+#include "idt8a340_reg.h"
+
+#define FW_FILENAME "idtcm.bin"
+#define MAX_PHC_PLL 4
+
+#define PLL_MASK_ADDR (0xFFA5)
+#define DEFAULT_PLL_MASK (0x04)
+
+#define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8))
+#define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8))
+
+#define OUTPUT_MASK_PLL0_ADDR (0xFFB0)
+#define OUTPUT_MASK_PLL1_ADDR (0xFFB2)
+#define OUTPUT_MASK_PLL2_ADDR (0xFFB4)
+#define OUTPUT_MASK_PLL3_ADDR (0xFFB6)
+
+#define DEFAULT_OUTPUT_MASK_PLL0 (0x003)
+#define DEFAULT_OUTPUT_MASK_PLL1 (0x00c)
+#define DEFAULT_OUTPUT_MASK_PLL2 (0x030)
+#define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0)
+
+#define POST_SM_RESET_DELAY_MS (3000)
+#define PHASE_PULL_IN_THRESHOLD_NS (150000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define TOD_BYTE_COUNT (11)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_MAX = PLL_MODE_PHASE_MEASUREMENT,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+struct idtcm;
+
+struct idtcm_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idtcm *idtcm;
+ u16 dpll_phase;
+ u16 dpll_freq;
+ u16 dpll_n;
+ u16 dpll_ctrl_n;
+ u16 dpll_phase_pull_in;
+ u16 tod_read_primary;
+ u16 tod_write;
+ u16 tod_n;
+ u16 hw_dpll_n;
+ enum pll_mode pll_mode;
+ u16 output_mask;
+};
+
+struct idtcm {
+ struct idtcm_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+
+ /* Overhead calculation for adjtime */
+ u8 calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ ktime_t start_time;
+
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+struct idtcm_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+#endif /* PTP_IDTCLOCKMATRIX_H */
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 0dcfdc806f57..82d31ba32690 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -240,14 +240,12 @@ static int ptp_dte_probe(struct platform_device *pdev)
{
struct ptp_dte *ptp_dte;
struct device *dev = &pdev->dev;
- struct resource *res;
ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
if (!ptp_dte)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ptp_dte->regs = devm_ioremap_resource(dev, res);
+ ptp_dte->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ptp_dte->regs))
return PTR_ERR(ptp_dte->regs);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 3ee63531f6d5..74eb5af7295f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -841,10 +841,10 @@ config REGULATOR_SKY81452
will be called sky81452-regulator.
config REGULATOR_SLG51000
- tristate "Dialog Semiconductor SLG51000 regulators"
- depends on I2C
- select REGMAP_I2C
- help
+ tristate "Dialog Semiconductor SLG51000 regulators"
+ depends on I2C
+ select REGMAP_I2C
+ help
Say y here to support for the Dialog Semiconductor SLG51000.
The SLG51000 is seven compact and customizable low dropout
regulators.
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index efb2f01a9101..f60e1b26c2d2 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -953,23 +953,6 @@ static struct ab8500_regulator_info
.update_val_idle = 0x82,
.update_val_normal = 0x02,
},
- [AB8505_LDO_USB] = {
- .desc = {
- .name = "LDO-USB",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8505_LDO_USB,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_3300000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x82,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- },
[AB8505_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index 0248a61f1006..ec764022621f 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -286,3 +286,4 @@ module_platform_driver(bd70528_regulator);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 voltage regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-pmic");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index bdab46a5c461..13a43eee2e46 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1293,3 +1293,4 @@ module_platform_driver(bd718xx_regulator);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD71837/BD71847 voltage regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd718xx-pmic");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a46be221dbdc..679ad3d2ed23 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1403,7 +1403,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
rdev_err(rdev, "failed to enable\n");
return ret;
}
- rdev->use_count++;
+
+ if (rdev->constraints->always_on)
+ rdev->use_count++;
}
print_constraints(rdev);
@@ -1844,6 +1846,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator;
const char *devname = dev ? dev_name(dev) : "deviceless";
+ struct device_link *link;
int ret;
if (get_type >= MAX_GET_TYPE) {
@@ -1951,7 +1954,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
rdev->use_count = 0;
}
- device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
+ link = device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
+ if (!IS_ERR_OR_NULL(link))
+ regulator->device_link = true;
return regulator;
}
@@ -2046,7 +2051,8 @@ static void _regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
if (regulator->dev) {
- device_link_remove(regulator->dev, &rdev->dev);
+ if (regulator->device_link)
+ device_link_remove(regulator->dev, &rdev->dev);
/* remove any sysfs entries */
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
@@ -4963,6 +4969,12 @@ static int generic_coupler_attach(struct regulator_coupler *coupler,
return -EPERM;
}
+ if (!rdev->constraints->always_on) {
+ rdev_err(rdev,
+ "Coupling of a non always-on regulator is unimplemented\n");
+ return -ENOTSUPP;
+ }
+
return 0;
}
@@ -5198,6 +5210,7 @@ unset_supplies:
regulator_remove_coupling(rdev);
mutex_unlock(&regulator_list_mutex);
wash:
+ kfree(rdev->coupling_desc.coupled_rdevs);
kfree(rdev->constraints);
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 710e67081d53..d3ce0278bfbe 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -16,6 +16,7 @@
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/da9062/core.h>
#include <linux/mfd/da9062/registers.h>
+#include <dt-bindings/regulator/dlg,da9063-regulator.h>
/* Regulator IDs */
enum {
@@ -75,14 +76,6 @@ struct da9062_regulators {
struct da9062_regulator regulator[0];
};
-/* BUCK modes */
-enum {
- BUCK_MODE_MANUAL, /* 0 */
- BUCK_MODE_SLEEP, /* 1 */
- BUCK_MODE_SYNC, /* 2 */
- BUCK_MODE_AUTO /* 3 */
-};
-
/* Regulator operations */
/* Current limits array (in uA)
@@ -105,6 +98,20 @@ static const unsigned int da9062_buck_b_limits[] = {
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
+static unsigned int da9062_map_buck_mode(unsigned int mode)
+{
+ switch (mode) {
+ case DA9063_BUCK_MODE_SLEEP:
+ return REGULATOR_MODE_STANDBY;
+ case DA9063_BUCK_MODE_SYNC:
+ return REGULATOR_MODE_FAST;
+ case DA9063_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
@@ -112,13 +119,13 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- val = BUCK_MODE_SYNC;
+ val = DA9063_BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
- val = BUCK_MODE_AUTO;
+ val = DA9063_BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
- val = BUCK_MODE_SLEEP;
+ val = DA9063_BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
@@ -136,7 +143,7 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- unsigned int val, mode = 0;
+ unsigned int val;
int ret;
ret = regmap_field_read(regl->mode, &val);
@@ -145,15 +152,13 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
switch (val) {
default:
- case BUCK_MODE_MANUAL:
- mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY;
/* Sleep flag bit decides the mode */
break;
- case BUCK_MODE_SLEEP:
+ case DA9063_BUCK_MODE_SLEEP:
return REGULATOR_MODE_STANDBY;
- case BUCK_MODE_SYNC:
+ case DA9063_BUCK_MODE_SYNC:
return REGULATOR_MODE_FAST;
- case BUCK_MODE_AUTO:
+ case DA9063_BUCK_MODE_AUTO:
return REGULATOR_MODE_NORMAL;
}
@@ -162,11 +167,9 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
return 0;
if (val)
- mode &= REGULATOR_MODE_STANDBY;
+ return REGULATOR_MODE_STANDBY;
else
- mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
-
- return mode;
+ return REGULATOR_MODE_FAST;
}
/*
@@ -282,13 +285,13 @@ static int da9062_buck_set_suspend_mode(struct regulator_dev *rdev,
switch (mode) {
case REGULATOR_MODE_FAST:
- val = BUCK_MODE_SYNC;
+ val = DA9063_BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
- val = BUCK_MODE_AUTO;
+ val = DA9063_BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
- val = BUCK_MODE_SLEEP;
+ val = DA9063_BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
@@ -371,6 +374,7 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK1_A,
.desc.vsel_mask = DA9062AA_VBUCK1_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK1_A,
__builtin_ffs((int)DA9062AA_BUCK1_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -407,6 +411,7 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK3_A,
.desc.vsel_mask = DA9062AA_VBUCK3_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK3_A,
__builtin_ffs((int)DA9062AA_BUCK3_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -443,6 +448,7 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK4_A,
.desc.vsel_mask = DA9062AA_VBUCK4_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK4_A,
__builtin_ffs((int)DA9062AA_BUCK4_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -615,6 +621,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK1_A,
.desc.vsel_mask = DA9062AA_VBUCK1_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK1_A,
__builtin_ffs((int)DA9062AA_BUCK1_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -651,6 +658,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK2_A,
.desc.vsel_mask = DA9062AA_VBUCK2_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK2_A,
__builtin_ffs((int)DA9062AA_BUCK2_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -687,6 +695,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK3_A,
.desc.vsel_mask = DA9062AA_VBUCK3_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK3_A,
__builtin_ffs((int)DA9062AA_BUCK3_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -723,6 +732,7 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.vsel_reg = DA9062AA_VBUCK4_A,
.desc.vsel_mask = DA9062AA_VBUCK4_A_MASK,
.desc.linear_min_sel = 0,
+ .desc.of_map_mode = da9062_map_buck_mode,
.sleep = REG_FIELD(DA9062AA_VBUCK4_A,
__builtin_ffs((int)DA9062AA_BUCK4_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -942,8 +952,7 @@ static int da9062_regulator_probe(struct platform_device *pdev)
regulators->n_regulators = max_regulators;
platform_set_drvdata(pdev, regulators);
- n = 0;
- while (n < regulators->n_regulators) {
+ for (n = 0; n < regulators->n_regulators; n++) {
/* Initialise regulator structure */
regl = &regulators->regulator[n];
regl->hw = chip;
@@ -1002,8 +1011,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
regl->desc.name);
return PTR_ERR(regl->rdev);
}
-
- n++;
}
/* LDOs overcurrent event support */
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 28b1b20f45bd..2aceb3b7afc2 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -225,7 +225,7 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
struct regmap_field *field;
- unsigned int val, mode = 0;
+ unsigned int val;
int ret;
ret = regmap_field_read(regl->mode, &val);
@@ -235,7 +235,6 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
switch (val) {
default:
case BUCK_MODE_MANUAL:
- mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY;
/* Sleep flag bit decides the mode */
break;
case BUCK_MODE_SLEEP:
@@ -262,11 +261,9 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
return 0;
if (val)
- mode &= REGULATOR_MODE_STANDBY;
+ return REGULATOR_MODE_STANDBY;
else
- mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
-
- return mode;
+ return REGULATOR_MODE_FAST;
}
/*
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index bf80748f1ccc..523dc1b95826 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -283,12 +283,12 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
pdata->init_data[n] = da9211_matches[i].init_data;
pdata->reg_node[n] = da9211_matches[i].of_node;
- pdata->gpiod_ren[n] = devm_gpiod_get_from_of_node(dev,
- da9211_matches[i].of_node,
- "enable-gpios",
- 0,
- GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
- "da9211-enable");
+ pdata->gpiod_ren[n] = devm_fwnode_gpiod_get(dev,
+ of_fwnode_handle(pdata->reg_node[n]),
+ "enable",
+ GPIOD_OUT_HIGH |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "da9211-enable");
if (IS_ERR(pdata->gpiod_ren[n]))
pdata->gpiod_ren[n] = NULL;
n++;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index dbe477da4e55..00c83492f774 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -83,6 +83,7 @@ enum {
enum {
SILERGY_SYR82X = 8,
+ SILERGY_SYR83X = 9,
};
struct fan53555_device_info {
@@ -302,6 +303,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
/* Init voltage range and step */
switch (di->chip_id) {
case SILERGY_SYR82X:
+ case SILERGY_SYR83X:
di->vsel_min = 712500;
di->vsel_step = 12500;
break;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f81533070058..bc0bbd99e98d 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -123,6 +123,7 @@ of_get_fixed_voltage_config(struct device *dev,
config->enabled_at_boot = true;
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
+ of_property_read_u32(np, "off-on-delay-us", &config->off_on_delay);
if (of_find_property(np, "vin-supply", NULL))
config->input_supply = "vin";
@@ -189,6 +190,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->desc.enable_time = config->startup_delay;
+ drvdata->desc.off_on_delay = config->off_on_delay;
if (config->input_supply) {
drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 83ae442f515b..2391b565ef11 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -36,6 +36,7 @@ struct regulator {
struct list_head list;
unsigned int always_on:1;
unsigned int bypass:1;
+ unsigned int device_link:1;
int uA_load;
unsigned int enable_count;
unsigned int deferred_disables;
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index c8e579e99316..9089ec608fcc 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -256,8 +256,9 @@ static int max77686_of_parse_cb(struct device_node *np,
case MAX77686_BUCK8:
case MAX77686_BUCK9:
case MAX77686_LDO20 ... MAX77686_LDO22:
- config->ena_gpiod = gpiod_get_from_of_node(np,
- "maxim,ena-gpios",
+ config->ena_gpiod = fwnode_gpiod_get_index(
+ of_fwnode_handle(np),
+ "maxim,ena",
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"max77686-regulator");
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 76152aaa330b..96dc0eea7659 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -296,7 +296,10 @@ static int max8907_regulator_probe(struct platform_device *pdev)
memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
/* Backwards compatibility with MAX8907B; SD1 uses different voltages */
- regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_II2RR_VERSION_MASK) ==
MAX8907_II2RR_VERSION_REV_B) {
pmic->desc[MAX8907_SD1].min_uV = 637500;
@@ -333,14 +336,20 @@ static int max8907_regulator_probe(struct platform_device *pdev)
}
if (pmic->desc[i].ops == &max8907_ldo_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_MASK_LDO_SEQ) !=
MAX8907_MASK_LDO_SEQ)
pmic->desc[i].ops = &max8907_ldo_hwctl_ops;
} else if (pmic->desc[i].ops == &max8907_out5v_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & (MAX8907_MASK_OUT5V_VINEN |
MAX8907_MASK_OUT5V_ENSRC)) !=
MAX8907_MASK_OUT5V_ENSRC)
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 92b41a6a4dc2..bfc15dd3f730 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -38,15 +38,6 @@ struct pbias_reg_info {
int n_voltages;
};
-struct pbias_regulator_data {
- struct regulator_desc desc;
- void __iomem *pbias_addr;
- struct regulator_dev *dev;
- struct regmap *syscon;
- const struct pbias_reg_info *info;
- int voltage;
-};
-
struct pbias_of_data {
unsigned int offset;
};
@@ -157,14 +148,13 @@ MODULE_DEVICE_TABLE(of, pbias_of_match);
static int pbias_regulator_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct pbias_regulator_data *drvdata;
struct resource *res;
struct regulator_config cfg = { };
+ struct regulator_desc *desc;
+ struct regulator_dev *rdev;
struct regmap *syscon;
const struct pbias_reg_info *info;
- int ret = 0;
- int count, idx, data_idx = 0;
- const struct of_device_id *match;
+ int ret, count, idx;
const struct pbias_of_data *data;
unsigned int offset;
@@ -173,19 +163,16 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (count < 0)
return count;
- drvdata = devm_kcalloc(&pdev->dev,
- count, sizeof(struct pbias_regulator_data),
- GFP_KERNEL);
- if (!drvdata)
+ desc = devm_kcalloc(&pdev->dev, count, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(syscon))
return PTR_ERR(syscon);
- match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
- if (match && match->data) {
- data = match->data;
+ data = of_device_get_match_data(&pdev->dev);
+ if (data) {
offset = data->offset;
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -200,7 +187,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
cfg.regmap = syscon;
cfg.dev = &pdev->dev;
- for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
+ for (idx = 0; idx < PBIAS_NUM_REGS && count; idx++) {
if (!pbias_matches[idx].init_data ||
!pbias_matches[idx].of_node)
continue;
@@ -209,41 +196,35 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (!info)
return -ENODEV;
- drvdata[data_idx].syscon = syscon;
- drvdata[data_idx].info = info;
- drvdata[data_idx].desc.name = info->name;
- drvdata[data_idx].desc.owner = THIS_MODULE;
- drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
- drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
- drvdata[data_idx].desc.volt_table = info->pbias_volt_table;
- drvdata[data_idx].desc.n_voltages = info->n_voltages;
- drvdata[data_idx].desc.enable_time = info->enable_time;
- drvdata[data_idx].desc.vsel_reg = offset;
- drvdata[data_idx].desc.vsel_mask = info->vmode;
- drvdata[data_idx].desc.enable_reg = offset;
- drvdata[data_idx].desc.enable_mask = info->enable_mask;
- drvdata[data_idx].desc.enable_val = info->enable;
- drvdata[data_idx].desc.disable_val = info->disable_val;
+ desc->name = info->name;
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->ops = &pbias_regulator_voltage_ops;
+ desc->volt_table = info->pbias_volt_table;
+ desc->n_voltages = info->n_voltages;
+ desc->enable_time = info->enable_time;
+ desc->vsel_reg = offset;
+ desc->vsel_mask = info->vmode;
+ desc->enable_reg = offset;
+ desc->enable_mask = info->enable_mask;
+ desc->enable_val = info->enable;
+ desc->disable_val = info->disable_val;
cfg.init_data = pbias_matches[idx].init_data;
- cfg.driver_data = &drvdata[data_idx];
cfg.of_node = pbias_matches[idx].of_node;
- drvdata[data_idx].dev = devm_regulator_register(&pdev->dev,
- &drvdata[data_idx].desc, &cfg);
- if (IS_ERR(drvdata[data_idx].dev)) {
- ret = PTR_ERR(drvdata[data_idx].dev);
+ rdev = devm_regulator_register(&pdev->dev, desc, &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(&pdev->dev,
"Failed to register regulator: %d\n", ret);
- goto err_regulator;
+ return ret;
}
- data_idx++;
+ desc++;
+ count--;
}
- platform_set_drvdata(pdev, drvdata);
-
-err_regulator:
- return ret;
+ return 0;
}
static struct platform_driver pbias_regulator_driver = {
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index c2469263db95..0345f38f6f78 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -86,10 +86,6 @@ static const unsigned int SW1_table[] = {
#define SW2_table SW1_table
-static const unsigned int SW3_table[] = {
- 4000000, 4500000, 5000000, 5500000,
-};
-
struct pcap_regulator {
const u8 reg;
const u8 en;
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 0246b6f99fb5..c86ad40015ce 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -878,6 +878,58 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
{},
};
+static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4-l7-l8"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo, "vdd-l4-l7-l8"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l4-l7-l8"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo_lv, "vdd-l10-l14-l15"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo_lv, "vdd-l11-l12-l13"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l11-l12-l13"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo_lv, "vdd-l11-l12-l13"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l10-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l10-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_hfsmps510, "vdd-s8"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_pldo_lv, "vdd-l1-l8"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo, "vdd-l1-l8"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
+ RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
+ {},
+};
+
static int rpmh_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -940,6 +992,14 @@ static const struct of_device_id rpmh_regulator_match_table[] = {
.compatible = "qcom,pmi8998-rpmh-regulators",
.data = pmi8998_vreg_data,
},
+ {
+ .compatible = "qcom,pm6150-rpmh-regulators",
+ .data = pm6150_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm6150l-rpmh-regulators",
+ .data = pm6150l_vreg_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 3b0828c79e2b..fff8d5fdef6a 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -338,6 +338,63 @@ static const struct regulator_desc pm8916_buck_hvo_smps = {
.ops = &rpm_smps_ldo_ops,
};
+static const struct regulator_desc pm8950_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 96, 127, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_ftsmps2p5 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(80000, 0, 255, 5000),
+ REGULATOR_LINEAR_RANGE(160000, 256, 460, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 461,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_ult_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 202, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 203,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_ult_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_pldo_lv = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1500000, 0, 16, 25000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 17,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8950_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(975000, 0, 164, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 165,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+
static const struct regulator_desc pm8994_hfsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
@@ -638,6 +695,40 @@ static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8950_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8950_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8950_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_hfsmps, "vdd_s6" },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8950_ult_nldo, "vdd_l1_l19" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8950_ult_nldo, "vdd_l2_l23" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8950_ult_nldo, "vdd_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_nldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16"},
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l19", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l1_l19"},
+ { "l20", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l20"},
+ { "l21", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l21"},
+ { "l22", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22"},
+ { "l23", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l2_l23"},
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
@@ -767,6 +858,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 7f51c5fc8194..95737e4dd6bb 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -1869,6 +1869,39 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8950_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l19", },
+ { "l2", 0x4100, "vdd_l2_l23", },
+ { "l3", 0x4200, "vdd_l3", },
+ { "l4", 0x4300, "vdd_l4_l5_l6_l7_l16", },
+ { "l5", 0x4400, "vdd_l4_l5_l6_l7_l16", },
+ { "l6", 0x4500, "vdd_l4_l5_l6_l7_l16", },
+ { "l7", 0x4600, "vdd_l4_l5_l6_l7_l16", },
+ { "l8", 0x4700, "vdd_l8_l11_l12_l17_l22", },
+ { "l9", 0x4800, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l10", 0x4900, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l11", 0x4a00, "vdd_l8_l11_l12_l17_l22", },
+ { "l12", 0x4b00, "vdd_l8_l11_l12_l17_l22", },
+ { "l13", 0x4c00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l14", 0x4d00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l15", 0x4e00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l16", 0x4f00, "vdd_l4_l5_l6_l7_l16", },
+ { "l17", 0x5000, "vdd_l8_l11_l12_l17_l22", },
+ { "l18", 0x5100, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l19", 0x5200, "vdd_l1_l19", },
+ { "l20", 0x5300, "vdd_l20", },
+ { "l21", 0x5400, "vdd_l21", },
+ { "l22", 0x5500, "vdd_l8_l11_l12_l17_l22", },
+ { "l23", 0x5600, "vdd_l2_l23", },
+ { }
+};
+
static const struct spmi_regulator_data pm8994_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -1927,6 +1960,12 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8004_regulators[] = {
+ { "s2", 0x1700, "vdd_s2", },
+ { "s5", 0x2000, "vdd_s5", },
+ { }
+};
+
static const struct spmi_regulator_data pm8005_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -1941,10 +1980,12 @@ static const struct spmi_regulator_data pms405_regulators[] = {
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
+ { .compatible = "qcom,pm8004-regulators", .data = &pm8004_regulators },
{ .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
+ { .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 61bd5ef0806c..5b4003226484 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -388,7 +388,7 @@ static int rk817_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
break;
default:
dev_warn(&rdev->dev,
- "%s ramp_delay: %d not supported, setting 10000\n",
+ "%s ramp_delay: %d not supported, setting 25000\n",
rdev->desc->name, ramp_delay);
}
@@ -411,21 +411,6 @@ static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
sel);
}
-static int rk817_set_suspend_voltage(struct regulator_dev *rdev, int uv)
-{
- unsigned int reg;
- int sel = regulator_map_voltage_linear(rdev, uv, uv);
- /* only ldo1~ldo9 */
- if (sel < 0)
- return -EINVAL;
-
- reg = rdev->desc->vsel_reg + RK808_SLP_REG_OFFSET;
-
- return regmap_update_bits(rdev->regmap, reg,
- rdev->desc->vsel_mask,
- sel);
-}
-
static int rk808_set_suspend_voltage_range(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
@@ -686,7 +671,7 @@ static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0),
};
-static struct regulator_ops rk809_buck5_ops_range = {
+static const struct regulator_ops rk809_buck5_ops_range = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -700,7 +685,7 @@ static struct regulator_ops rk809_buck5_ops_range = {
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_reg_ops = {
+static const struct regulator_ops rk817_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -708,12 +693,12 @@ static struct regulator_ops rk817_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = rk8xx_is_enabled_wmsk_regmap,
- .set_suspend_voltage = rk817_set_suspend_voltage,
+ .set_suspend_voltage = rk808_set_suspend_voltage,
.set_suspend_enable = rk817_set_suspend_enable,
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_boost_ops = {
+static const struct regulator_ops rk817_boost_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -725,7 +710,7 @@ static struct regulator_ops rk817_boost_ops = {
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_buck_ops_range = {
+static const struct regulator_ops rk817_buck_ops_range = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -743,7 +728,7 @@ static struct regulator_ops rk817_buck_ops_range = {
.set_suspend_disable = rk817_set_suspend_disable,
};
-static struct regulator_ops rk817_switch_ops = {
+static const struct regulator_ops rk817_switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = rk8xx_is_enabled_wmsk_regmap,
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index eb807a059479..4a91be0ad5ae 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -90,7 +90,7 @@ static const struct regulator_desc rc5t619_regulators[] = {
REG(LDO7, LDOEN1, BIT(6), LDO7DAC, 0x7f, 900000, 3500000, 25000),
REG(LDO8, LDOEN1, BIT(7), LDO8DAC, 0x7f, 900000, 3500000, 25000),
REG(LDO9, LDOEN2, BIT(0), LDO9DAC, 0x7f, 900000, 3500000, 25000),
- REG(LDO10, LDOEN2, BIT(0), LDO10DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO10, LDOEN2, BIT(1), LDO10DAC, 0x7f, 900000, 3500000, 25000),
/* LDO RTC */
REG(LDORTC1, LDOEN2, BIT(4), LDORTCDAC, 0x7f, 1700000, 3500000, 25000),
REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 5bc00884cf51..4f2dc5ebffdc 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -844,10 +844,9 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
if (!rdata[reg].init_data || !rdata[reg].of_node)
continue;
- gpio[reg] = devm_gpiod_get_from_of_node(&pdev->dev,
- rdata[reg].of_node,
- "samsung,ext-control-gpios",
- 0,
+ gpio[reg] = devm_fwnode_gpiod_get(&pdev->dev,
+ of_fwnode_handle(rdata[reg].of_node),
+ "samsung,ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s2mps11-regulator");
if (PTR_ERR(gpio[reg]) == -ENOENT)
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 6ca27e9d5ef7..bdc07739e9a2 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -567,11 +567,10 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
continue;
}
- rdata->ext_control_gpiod = devm_gpiod_get_from_of_node(
+ rdata->ext_control_gpiod = devm_fwnode_gpiod_get(
&pdev->dev,
- reg_np,
- "s5m8767,pmic-ext-control-gpios",
- 0,
+ of_fwnode_handle(reg_np),
+ "s5m8767,pmic-ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s5m8767");
if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index a0565daecace..bf1a3508ebc4 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -198,17 +198,14 @@ static int slg51000_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
- struct slg51000 *chip = config->driver_data;
struct gpio_desc *ena_gpiod;
- enum gpiod_flags gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
- ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
- "enable-gpios", 0,
- gflags, "gpio-en-ldo");
- if (!IS_ERR(ena_gpiod)) {
+ ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0,
+ GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "gpio-en-ldo");
+ if (!IS_ERR(ena_gpiod))
config->ena_gpiod = ena_gpiod;
- devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
- }
return 0;
}
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 8919a5130bec..bdfaf7edb75a 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -181,7 +181,6 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
static int stm32_vrefbuf_probe(struct platform_device *pdev)
{
- struct resource *res;
struct stm32_vrefbuf *priv;
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -192,8 +191,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index f09061473613..f3d7d007ecbb 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -54,6 +54,8 @@ enum {
/* Enable time worst case is 5000mV/(2250uV/uS) */
#define PMIC_ENABLE_TIME_US 2200
+/* Ramp delay worst case is (2250uV/uS) */
+#define PMIC_RAMP_DELAY 2200
static const struct regulator_linear_range buck1_ranges[] = {
REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
@@ -208,6 +210,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.supply_name = #base, \
}
@@ -227,6 +230,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.bypass_reg = LDO3_ACTIVE_CR, \
.bypass_mask = LDO_BYPASS_MASK, \
.bypass_val_on = LDO_BYPASS_MASK, \
@@ -248,6 +252,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.supply_name = #base, \
}
@@ -267,6 +272,7 @@ static const struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
+ .ramp_delay = PMIC_RAMP_DELAY, \
.of_map_mode = stpmic1_map_mode, \
.pull_down_reg = ids##_PULL_DOWN_REG, \
.pull_down_mask = ids##_PULL_DOWN_MASK, \
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 06059a94f7c6..f8939af0bd2c 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -37,6 +37,7 @@ static struct regulator_ops tps6105x_regulator_ops = {
static const struct regulator_desc tps6105x_regulator_desc = {
.name = "tps6105x-boost",
+ .of_match = of_match_ptr("regulator"),
.ops = &tps6105x_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = 0,
@@ -71,6 +72,7 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
config.dev = &tps6105x->client->dev;
config.init_data = pdata->regulator_data;
config.driver_data = tps6105x;
+ config.of_node = pdev->dev.parent->of_node;
config.regmap = tps6105x->regmap;
/* Register regulator with framework */
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 10ea4b5a0f55..f0b660e9f15f 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -346,16 +346,20 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
for (idx = 0; idx < ARRAY_SIZE(tps65090_matches); idx++) {
struct regulator_init_data *ri_data;
struct tps65090_regulator_plat_data *rpdata;
+ struct device_node *np;
rpdata = &reg_pdata[idx];
ri_data = tps65090_matches[idx].init_data;
- if (!ri_data || !tps65090_matches[idx].of_node)
+ if (!ri_data)
+ continue;
+
+ np = tps65090_matches[idx].of_node;
+ if (!np)
continue;
rpdata->reg_init_data = ri_data;
- rpdata->enable_ext_control = of_property_read_bool(
- tps65090_matches[idx].of_node,
- "ti,enable-ext-control");
+ rpdata->enable_ext_control = of_property_read_bool(np,
+ "ti,enable-ext-control");
if (rpdata->enable_ext_control) {
enum gpiod_flags gflags;
@@ -366,11 +370,12 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
gflags = GPIOD_OUT_LOW;
gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
- rpdata->gpiod = devm_gpiod_get_from_of_node(&pdev->dev,
- tps65090_matches[idx].of_node,
- "dcdc-ext-control-gpios", 0,
- gflags,
- "tps65090");
+ rpdata->gpiod = devm_fwnode_gpiod_get(
+ &pdev->dev,
+ of_fwnode_handle(np),
+ "dcdc-ext-control",
+ gflags,
+ "tps65090");
if (PTR_ERR(rpdata->gpiod) == -ENOENT) {
dev_err(&pdev->dev,
"could not find DCDC external control GPIO\n");
@@ -379,8 +384,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
return ERR_CAST(rpdata->gpiod);
}
- if (of_property_read_u32(tps65090_matches[idx].of_node,
- "ti,overcurrent-wait",
+ if (of_property_read_u32(np, "ti,overcurrent-wait",
&rpdata->overcurrent_wait) == 0)
rpdata->overcurrent_wait_valid = true;
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index e302bd01a084..7b0e38f8d627 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -136,9 +136,10 @@ static int tps65132_of_parse_cb(struct device_node *np,
struct tps65132_reg_pdata *rpdata = &tps->reg_pdata[desc->id];
int ret;
- rpdata->en_gpiod = devm_fwnode_get_index_gpiod_from_child(tps->dev,
- "enable", 0, &np->fwnode, 0, "enable");
- if (IS_ERR_OR_NULL(rpdata->en_gpiod)) {
+ rpdata->en_gpiod = devm_fwnode_gpiod_get(tps->dev, of_fwnode_handle(np),
+ "enable", GPIOD_ASIS,
+ "enable");
+ if (IS_ERR(rpdata->en_gpiod)) {
ret = PTR_ERR(rpdata->en_gpiod);
/* Ignore the error other than probe defer */
@@ -147,10 +148,12 @@ static int tps65132_of_parse_cb(struct device_node *np,
return 0;
}
- rpdata->act_dis_gpiod = devm_fwnode_get_index_gpiod_from_child(
- tps->dev, "active-discharge", 0,
- &np->fwnode, 0, "active-discharge");
- if (IS_ERR_OR_NULL(rpdata->act_dis_gpiod)) {
+ rpdata->act_dis_gpiod = devm_fwnode_gpiod_get(tps->dev,
+ of_fwnode_handle(np),
+ "active-discharge",
+ GPIOD_ASIS,
+ "active-discharge");
+ if (IS_ERR(rpdata->act_dis_gpiod)) {
ret = PTR_ERR(rpdata->act_dis_gpiod);
/* Ignore the error other than probe defer */
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 2311924c3103..2e02e26b516c 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -45,7 +45,6 @@ static int uniphier_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev *rdev;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
const char *name;
int i, ret, nr;
@@ -58,8 +57,7 @@ static int uniphier_regulator_probe(struct platform_device *pdev)
if (WARN_ON(!priv->data))
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/regulator/vexpress-regulator.c b/drivers/regulator/vexpress-regulator.c
index 1235f46e633e..5d39663efcaa 100644
--- a/drivers/regulator/vexpress-regulator.c
+++ b/drivers/regulator/vexpress-regulator.c
@@ -75,10 +75,7 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
config.of_node = pdev->dev.of_node;
rdev = devm_regulator_register(&pdev->dev, desc, &config);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
-
- return 0;
+ return PTR_ERR_OR_ZERO(rdev);
}
static const struct of_device_id vexpress_regulator_of_match[] = {
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5542d9eadfe0..7d079154f849 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -116,7 +116,9 @@ int dasd_scan_partitions(struct dasd_block *block)
return -ENODEV;
}
- rc = blkdev_reread_part(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ rc = bdev_disk_changed(bdev, false);
+ mutex_unlock(&bdev->bd_mutex);
if (rc)
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, rc %d", rc);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f6a8db04177c..23eae4188876 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -5,7 +5,7 @@
# The following is required for define_trace.h to find ./trace.h
CFLAGS_trace.o := -I$(src)
-CFLAGS_vfio_ccw_fsm.o := -I$(src)
+CFLAGS_vfio_ccw_trace.o := -I$(src)
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
@@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
- vfio_ccw_async.o
+ vfio_ccw_async.o vfio_ccw_trace.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a58b45df95d7..4b0798472643 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -82,6 +82,7 @@ enum qdio_irq_states {
#define QDIO_SIGA_WRITE 0x00
#define QDIO_SIGA_READ 0x01
#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEM 0x03
#define QDIO_SIGA_WRITEQ 0x04
#define QDIO_SIGA_QEBSM_FLAG 0x80
@@ -252,9 +253,6 @@ struct qdio_q {
/* input or output queue */
int is_input_q;
- /* list of thinint input queues */
- struct list_head entry;
-
/* upper-layer program handler */
qdio_handler_t (*handler);
@@ -272,6 +270,7 @@ struct qdio_irq {
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
+ struct list_head entry; /* list of thinint devices */
struct dentry *debugfs_dev;
struct dentry *debugfs_perf;
@@ -317,13 +316,15 @@ struct qdio_irq {
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
-#define qperf_inc(__q, __attr) \
+#define QDIO_PERF_STAT_INC(__irq, __attr) \
({ \
- struct qdio_irq *qdev = (__q)->irq_ptr; \
+ struct qdio_irq *qdev = __irq; \
if (qdev->perf_stat_enabled) \
(qdev->perf_stat.__attr)++; \
})
+#define qperf_inc(__q, __attr) QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr)
+
static inline void account_sbals_error(struct qdio_q *q, int count)
{
q->q_stats.nr_sbal_error += count;
@@ -355,14 +356,10 @@ static inline int multicast_outbound(struct qdio_q *q)
for (i = 0; i < irq_ptr->nr_output_qs && \
({ q = irq_ptr->output_qs[i]; 1; }); i++)
-#define prev_buf(bufnr) \
- ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
-#define next_buf(bufnr) \
- ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
-#define add_buf(bufnr, inc) \
- ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
-#define sub_buf(bufnr, dec) \
- ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc) QDIO_BUFNR((bufnr) + (inc))
+#define next_buf(bufnr) add_buf(bufnr, 1)
+#define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec))
+#define prev_buf(bufnr) sub_buf(bufnr, 1)
#define queue_irqs_enabled(q) \
(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
@@ -375,8 +372,8 @@ extern u64 last_ai_time;
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
-void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
-void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_add_device(struct qdio_irq *irq_ptr);
+void tiqdio_remove_device(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q);
int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5b63c505a2f7..f8b897b7e78b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -131,7 +131,7 @@ again:
case 96:
/* not all buffers processed */
qperf_inc(q, eqbs_partial);
- DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
tmp_count);
return count - tmp_count;
case 97:
@@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
- unsigned long aob)
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+ unsigned int *busy_bit, unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
- unsigned long laob = 0;
- if (aob) {
- fc = QDIO_SIGA_WRITEQ;
- laob = aob;
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+ if (count > 1)
+ fc = QDIO_SIGA_WRITEM;
+ else if (aob)
+ fc = QDIO_SIGA_WRITEQ;
}
if (is_qebsm(q)) {
@@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
@@ -423,9 +424,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
static void process_buffer_error(struct qdio_q *q, unsigned int start,
int count)
{
- unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
- SLSB_P_OUTPUT_NOT_INIT;
-
q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */
@@ -433,7 +431,7 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
- goto set;
+ return;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@@ -442,13 +440,6 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
DBF_ERROR("F14:%2x F15:%2x",
q->sbal[start]->element[14].sflags,
q->sbal[start]->element[15].sflags);
-
-set:
- /*
- * Interrupts may be avoided as long as the error is present
- * so change the buffer state immediately to avoid starvation.
- */
- set_buf_states(q, start, state, count);
}
static inline void inbound_primed(struct qdio_q *q, unsigned int start,
@@ -530,6 +521,11 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
return count;
case SLSB_P_INPUT_ERROR:
process_buffer_error(q, start, count);
+ /*
+ * Interrupts may be avoided as long as the error is present
+ * so change the buffer state immediately to avoid starvation.
+ */
+ set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
@@ -781,7 +777,8 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
return count;
}
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+ unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -793,7 +790,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
retry:
qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit, aob);
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
switch (cc) {
case 0:
break;
@@ -963,7 +960,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) {
- qperf_inc(q, int_discarded);
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
continue;
}
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
@@ -1162,7 +1159,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
- tiqdio_remove_input_queues(irq_ptr);
+ tiqdio_remove_device(irq_ptr);
qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr);
@@ -1284,6 +1281,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
init_data->no_output_qs))
goto out_rel;
+ INIT_LIST_HEAD(&irq_ptr->entry);
init_data->cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
@@ -1428,7 +1426,7 @@ int qdio_activate(struct ccw_device *cdev)
}
if (is_thinint_irq(irq_ptr))
- tiqdio_add_input_queues(irq_ptr);
+ tiqdio_add_device(irq_ptr);
/* wait for subchannel to become active */
msleep(5);
@@ -1526,7 +1524,7 @@ set:
* @count: how many buffers are filled
*/
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+ unsigned int bufnr, unsigned int count)
{
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
@@ -1549,13 +1547,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = 0;
- /* One SIGA-W per buffer required for unicast HSI */
- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
-
- if (q->u.out.use_cq)
+ if (q->u.out.use_cq && count == 1)
phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
- rc = qdio_kick_outbound_q(q, phys_aob);
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
@@ -1564,7 +1559,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* The previous buffer is not processed yet, tack on. */
qperf_inc(q, fast_requeue);
} else {
- rc = qdio_kick_outbound_q(q, 0);
+ rc = qdio_kick_outbound_q(q, count, 0);
}
/* Let drivers implement their own completion scanning: */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index cd164886132f..dc430bd86ade 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,7 +150,6 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
- INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -179,7 +178,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
- INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 93ee067c10ca..7c4e4ec08a12 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -39,14 +39,6 @@ struct indicator_t {
static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock);
-/* Adapter interrupt definitions */
-static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
-
-static struct airq_struct tiqdio_airq = {
- .handler = tiqdio_thinint_handler,
- .isc = QDIO_AIRQ_ISC,
-};
-
static struct indicator_t *q_indicators;
u64 last_ai_time;
@@ -74,26 +66,20 @@ static void put_indicator(u32 *addr)
atomic_dec(&ind->count);
}
-void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+void tiqdio_add_device(struct qdio_irq *irq_ptr)
{
mutex_lock(&tiq_list_lock);
- list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+ list_add_rcu(&irq_ptr->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
}
-void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+void tiqdio_remove_device(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
-
- q = irq_ptr->input_qs[0];
- if (!q)
- return;
-
mutex_lock(&tiq_list_lock);
- list_del_rcu(&q->entry);
+ list_del_rcu(&irq_ptr->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
- INIT_LIST_HEAD(&q->entry);
+ INIT_LIST_HEAD(&irq_ptr->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@@ -154,7 +140,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) {
- qperf_inc(q, int_discarded);
+ QDIO_PERF_STAT_INC(irq, int_discarded);
continue;
}
@@ -182,7 +168,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
u32 si_used = clear_shared_ind();
- struct qdio_q *q;
+ struct qdio_irq *irq;
last_ai_time = S390_lowcore.int_clock;
inc_irq_stat(IRQIO_QAI);
@@ -190,12 +176,8 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
- /* check for work on all inbound thinint queues */
- list_for_each_entry_rcu(q, &tiq_list, entry) {
- struct qdio_irq *irq;
-
+ list_for_each_entry_rcu(irq, &tiq_list, entry) {
/* only process queues from changed sets */
- irq = q->irq_ptr;
if (unlikely(references_shared_dsci(irq))) {
if (!si_used)
continue;
@@ -204,11 +186,16 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
tiqdio_call_inq_handlers(irq);
- qperf_inc(q, adapter_int);
+ QDIO_PERF_STAT_INC(irq, adapter_int);
}
rcu_read_unlock();
}
+static struct airq_struct tiqdio_airq = {
+ .handler = tiqdio_thinint_handler,
+ .isc = QDIO_AIRQ_ISC,
+};
+
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 7cdc38049033..ba31240ce965 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -15,6 +15,7 @@
#include <asm/scsw.h>
#include "orb.h"
+#include "vfio_ccw_trace.h"
/*
* Max length for ccw chain.
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 4a1e727c62d9..23e61aa638e4 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -15,9 +15,6 @@
#include "ioasm.h"
#include "vfio_ccw_private.h"
-#define CREATE_TRACE_POINTS
-#include "vfio_ccw_trace.h"
-
static int fsm_io_helper(struct vfio_ccw_private *private)
{
struct subchannel *sch;
@@ -321,8 +318,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
err_out:
- trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid,
- io_region->ret_code, errstr);
+ trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
+ io_region->ret_code, errstr);
}
/*
@@ -344,6 +341,10 @@ static void fsm_async_request(struct vfio_ccw_private *private,
/* should not happen? */
cmd_region->ret_code = -EINVAL;
}
+
+ trace_vfio_ccw_fsm_async_request(get_schid(private),
+ cmd_region->command,
+ cmd_region->ret_code);
}
/*
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index bbe9babf767b..9b9bb4982972 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -135,6 +135,7 @@ extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
int event)
{
+ trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);
}
diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c
new file mode 100644
index 000000000000..8c671d2519f6
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoint definitions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Eric Farman <farman@linux.ibm.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index b1da53ddec1f..30162a318a8a 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -7,6 +7,8 @@
* Halil Pasic <pasic@linux.vnet.ibm.com>
*/
+#include "cio.h"
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM vfio_ccw
@@ -15,28 +17,88 @@
#include <linux/tracepoint.h>
-TRACE_EVENT(vfio_ccw_io_fctl,
+TRACE_EVENT(vfio_ccw_fsm_async_request,
+ TP_PROTO(struct subchannel_id schid,
+ int command,
+ int errno),
+ TP_ARGS(schid, command, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, command)
+ __field(int, errno)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->command = command;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("schid=%x.%x.%04x command=0x%x errno=%d",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->command,
+ __entry->errno)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_event,
+ TP_PROTO(struct subchannel_id schid, int state, int event),
+ TP_ARGS(schid, state, event),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(int, state)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->state = state;
+ __entry->event = event;
+ ),
+
+ TP_printk("schid=%x.%x.%04x state=%d event=%d",
+ __entry->cssid, __entry->ssid, __entry->schno,
+ __entry->state,
+ __entry->event)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_io_request,
TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
TP_ARGS(fctl, schid, errno, errstr),
TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
__field(int, fctl)
- __field_struct(struct subchannel_id, schid)
__field(int, errno)
__field(char*, errstr)
),
TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
__entry->fctl = fctl;
- __entry->schid = schid;
__entry->errno = errno;
__entry->errstr = errstr;
),
- TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
- __entry->schid.cssid,
- __entry->schid.ssid,
- __entry->schid.sch_no,
+ TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
__entry->fctl,
__entry->errno,
__entry->errstr)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 9de3d46b3253..d78d77686d7b 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -715,36 +715,18 @@ out:
static void *_copy_key_from_user(void __user *ukey, size_t keylen)
{
- void *kkey;
-
if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
return ERR_PTR(-EINVAL);
- kkey = kmalloc(keylen, GFP_KERNEL);
- if (!kkey)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(kkey, ukey, keylen)) {
- kfree(kkey);
- return ERR_PTR(-EFAULT);
- }
- return kkey;
+ return memdup_user(ukey, keylen);
}
static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
{
- void *kapqns = NULL;
- size_t nbytes;
-
- if (uapqns && nr_apqns > 0) {
- nbytes = nr_apqns * sizeof(struct pkey_apqn);
- kapqns = kmalloc(nbytes, GFP_KERNEL);
- if (!kapqns)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(kapqns, uapqns, nbytes))
- return ERR_PTR(-EFAULT);
- }
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
- return kapqns;
+ return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
}
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 66eac2b9704d..1901e9c80ed8 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -32,8 +32,6 @@
#define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12
-#define ISM_ERROR 0xFFFF
-
struct ism_req_hdr {
u32 cmd;
u16 : 16;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e4b55f9aa062..293dd99b7fef 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -368,6 +368,7 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_L3_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
+ QETH_HEADER_MASK_INVAL = 0x80,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@@ -477,12 +478,16 @@ struct qeth_card_stats {
u64 rx_sg_frags;
u64 rx_sg_alloc_page;
+ u64 rx_dropped_nomem;
+ u64 rx_dropped_notsupp;
+
/* rtnl_link_stats64 */
u64 rx_packets;
u64 rx_bytes;
- u64 rx_errors;
- u64 rx_dropped;
u64 rx_multicast;
+ u64 rx_length_errors;
+ u64 rx_frame_errors;
+ u64 rx_fifo_errors;
};
struct qeth_out_q_stats {
@@ -532,6 +537,8 @@ struct qeth_qdio_out_q {
struct timer_list timer;
struct qeth_hdr *prev_hdr;
u8 bulk_start;
+ u8 bulk_count;
+ u8 bulk_max;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -817,7 +824,6 @@ struct qeth_card {
struct workqueue_struct *event_wq;
struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
struct mutex ip_lock;
@@ -839,6 +845,7 @@ struct qeth_card {
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
debug_info_t *debug;
+ struct mutex sbp_lock;
struct mutex conf_mutex;
struct mutex discipline_mutex;
struct napi_struct napi;
@@ -878,6 +885,13 @@ static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
return txq;
}
+static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
+ QETH_IQD_MCAST_TXQ;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dda274351c21..efcbe60220d1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -901,30 +901,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
- return 1;
+ return -EIO;
}
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND");
- return 1;
+ return -EIO;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi");
- return 1;
+ return -EIO;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE");
- return 1;
+ return -EIO;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
- return 1;
+ return -EIO;
}
return 0;
}
@@ -1513,7 +1513,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
- card->state = CARD_STATE_DOWN;
return rc;
}
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
@@ -1957,6 +1956,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
+ port |= QETH_IDX_ACT_INVAL_FRAME;
memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
@@ -2634,6 +2634,18 @@ static int qeth_init_input_buffer(struct qeth_card *card,
return 0;
}
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ if (!IS_IQD(card) ||
+ qeth_iqd_is_mcast_queue(card, queue) ||
+ card->options.cq == QETH_CQ_ENABLED ||
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
+ return 1;
+
+ return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
+}
+
int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
@@ -2673,6 +2685,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
queue->do_pack = 0;
queue->prev_hdr = NULL;
queue->bulk_start = 0;
+ queue->bulk_count = 0;
+ queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3080,7 +3094,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
- QETH_CARD_STAT_INC(card, rx_dropped);
+ QETH_CARD_STAT_INC(card, rx_fifo_errors);
return 0;
} else
return 1;
@@ -3107,7 +3121,7 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
for (i = queue->next_buf_to_init;
i < queue->next_buf_to_init + count; ++i) {
if (qeth_init_input_buffer(card,
- &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+ &queue->bufs[QDIO_BUFNR(i)])) {
break;
} else {
newcount++;
@@ -3149,8 +3163,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
- queue->next_buf_to_init = (queue->next_buf_to_init + count) %
- QDIO_MAX_BUFFERS_PER_Q;
+ queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
+ count);
}
}
@@ -3198,7 +3212,7 @@ static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
return 1;
}
return 0;
@@ -3252,7 +3266,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ unsigned int bidx = QDIO_BUFNR(i);
+
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
@@ -3318,10 +3333,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
- qeth_flush_buffers(queue, queue->bulk_start, 1);
+ qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
- queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
queue->prev_hdr = NULL;
+ queue->bulk_count = 0;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3419,8 +3435,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
}
for (i = first_element; i < first_element + count; ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
+ struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
int e = 0;
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
@@ -3441,8 +3456,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
"QDIO reported an error, rc=%i\n", rc);
QETH_CARD_TEXT(card, 2, "qcqherr");
}
- card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
- + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+ cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
}
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3468,7 +3483,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
- struct qeth_qdio_out_buffer *buffer;
struct net_device *dev = card->dev;
struct netdev_queue *txq;
int i;
@@ -3482,10 +3496,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
for (i = first_element; i < (first_element + count); ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- buffer = queue->bufs[bidx];
- qeth_handle_send_error(card, buffer, qdio_error);
- qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
+ struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
+
+ qeth_handle_send_error(card, buf, qdio_error);
+ qeth_clear_output_buffer(queue, buf, qdio_error, 0);
}
atomic_sub(count, &queue->used_buffers);
@@ -3680,10 +3694,10 @@ check_layout:
}
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buffer,
struct sk_buff *curr_skb,
struct qeth_hdr *curr_hdr)
{
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
struct qeth_hdr *prev_hdr = queue->prev_hdr;
if (!prev_hdr)
@@ -3803,13 +3817,14 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len)
{
- struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
unsigned int bytes = qdisc_pkt_len(skb);
+ struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
bool flush;
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* Just a sanity check, the wake/stop logic should ensure that we always
@@ -3818,11 +3833,23 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
- !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
- atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
- buffer = queue->bufs[queue->bulk_start];
+ flush = !qeth_iqd_may_bulk(queue, skb, hdr);
+
+ if (flush ||
+ (buffer->next_element_to_fill + elements > queue->max_elements)) {
+ if (buffer->next_element_to_fill > 0) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->bulk_count++;
+ }
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
+
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
+ queue->bulk_count)];
/* Sanity-check again: */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
@@ -3848,7 +3875,13 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush || next_element >= queue->max_elements) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
+ queue->bulk_count++;
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
}
if (stopped && !qeth_out_queue_is_full(queue))
@@ -3898,8 +3931,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
buffer = queue->bufs[queue->next_buf_to_fill];
/* We stepped forward, so sanity-check again: */
@@ -3932,8 +3964,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
flush_count++;
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ queue->next_buf_to_fill =
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
}
if (flush_count)
@@ -4261,7 +4293,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
}
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
void qeth_tx_timeout(struct net_device *dev)
{
@@ -4316,7 +4347,9 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_NWAYTEST: /* N-way auto-neg test register */
break;
case MII_RERRCOUNTER: /* rx error counter */
- rc = card->stats.rx_errors;
+ rc = card->stats.rx_length_errors +
+ card->stats.rx_frame_errors +
+ card->stats.rx_fifo_errors;
break;
case MII_SREVISION: /* silicon revision */
break;
@@ -4822,7 +4855,6 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->data);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
@@ -4977,6 +5009,15 @@ retriable:
goto out;
}
}
+
+ if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
+ (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
+ card->info.hwtrap = 0;
+
+ rc = qeth_set_access_ctrl_online(card, 0);
+ if (rc)
+ goto out;
+
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5023,13 +5064,14 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
int offset = *__offset;
+ bool use_rx_sg = false;
+ unsigned int headroom;
struct sk_buff *skb;
int skb_len = 0;
void *data_ptr;
int data_len;
- int headroom = 0;
- int use_rx_sg = 0;
+next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
@@ -5043,27 +5085,45 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
switch ((*hdr)->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb_len = (*hdr)->hdr.l2.pkt_length;
+ headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
+ if (!IS_LAYER3(card)) {
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+ skb = NULL;
+ goto walk_packet;
+ }
+
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
skb_len = (*hdr)->hdr.osn.pdu_length;
+ if (!IS_OSN(card)) {
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+ skb = NULL;
+ goto walk_packet;
+ }
+
headroom = sizeof(struct qeth_hdr);
break;
default:
- break;
+ if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+ QETH_CARD_STAT_INC(card, rx_frame_errors);
+ else
+ QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+
+ /* Can't determine packet length, drop the whole buffer. */
+ return NULL;
}
if (!skb_len)
return NULL;
- if (((skb_len >= card->options.rx_sg_cb) &&
- !IS_OSN(card) &&
- (!atomic_read(&card->force_alloc_skb))) ||
- (card->options.cq == QETH_CQ_ENABLED))
- use_rx_sg = 1;
+ use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
+ ((skb_len >= card->options.rx_sg_cb) &&
+ !atomic_read(&card->force_alloc_skb) &&
+ !IS_OSN(card));
if (use_rx_sg && qethbuffer->rx_skb) {
/* QETH_CQ_ENABLED only: */
@@ -5074,15 +5134,18 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb = napi_alloc_skb(&card->napi, linear + headroom);
}
+
if (!skb)
- goto no_mem;
- if (headroom)
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+ else if (headroom)
skb_reserve(skb, headroom);
+walk_packet:
data_ptr = element->addr + offset;
while (skb_len) {
data_len = min(skb_len, (int)(element->length - offset));
- if (data_len) {
+
+ if (skb && data_len) {
if (use_rx_sg)
qeth_create_skb_frag(element, skb, offset,
data_len);
@@ -5094,8 +5157,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
if (qeth_is_last_sbale(element)) {
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
- dev_kfree_skb_any(skb);
- QETH_CARD_STAT_INC(card, rx_errors);
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ QETH_CARD_STAT_INC(card,
+ rx_length_errors);
+ }
return NULL;
}
element++;
@@ -5105,6 +5171,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
offset += data_len;
}
}
+
+ /* This packet was skipped, go get another one: */
+ if (!skb)
+ goto next_packet;
+
*__element = element;
*__offset = offset;
if (use_rx_sg) {
@@ -5113,12 +5184,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb_shinfo(skb)->nr_frags);
}
return skb;
-no_mem:
- if (net_ratelimit()) {
- QETH_CARD_TEXT(card, 2, "noskbmem");
- }
- QETH_CARD_STAT_INC(card, rx_dropped);
- return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
@@ -5165,8 +5230,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
card->rx.b_count--;
if (card->rx.b_count) {
card->rx.b_index =
- (card->rx.b_index + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(card->rx.b_index + 1);
card->rx.b_element =
&card->qdio.in_q
->bufs[card->rx.b_index]
@@ -5182,9 +5246,9 @@ int qeth_poll(struct napi_struct *napi, int budget)
}
}
- napi_complete_done(napi, work_done);
- if (qdio_start_irq(card->data.ccwdev, 0))
- napi_schedule(&card->napi);
+ if (napi_complete_done(napi, work_done) &&
+ qdio_start_irq(CARD_DDEV(card), 0))
+ napi_schedule(napi);
out:
return work_done;
}
@@ -5703,6 +5767,8 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
+ qeth_free_qdio_queues(card);
+
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
@@ -6198,9 +6264,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_packets = card->stats.rx_packets;
stats->rx_bytes = card->stats.rx_bytes;
- stats->rx_errors = card->stats.rx_errors;
- stats->rx_dropped = card->stats.rx_dropped;
+ stats->rx_errors = card->stats.rx_length_errors +
+ card->stats.rx_frame_errors +
+ card->stats.rx_fifo_errors;
+ stats->rx_dropped = card->stats.rx_dropped_nomem +
+ card->stats.rx_dropped_notsupp;
stats->multicast = card->stats.rx_multicast;
+ stats->rx_length_errors = card->stats.rx_length_errors;
+ stats->rx_frame_errors = card->stats.rx_frame_errors;
+ stats->rx_fifo_errors = card->stats.rx_fifo_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6420b58cf42b..53fcf6641154 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -11,6 +11,7 @@
#include <asm/qeth.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in6.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -365,8 +366,7 @@ struct qeth_ipacmd_setdelip6 {
struct qeth_ipacmd_setdelipm {
__u8 mac[6];
__u8 padding[2];
- __u8 ip6[12];
- __u8 ip4[4];
+ struct in6_addr ip;
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelmac {
@@ -900,6 +900,7 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define IDX_ACTIVATE_SIZE 0x22
#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
+#define QETH_IDX_ACT_INVAL_FRAME 0x40
#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9f392497d570..e81170ab6d9a 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -20,8 +20,6 @@ static ssize_t qeth_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
switch (card->state) {
case CARD_STATE_DOWN:
@@ -45,8 +43,6 @@ static ssize_t qeth_dev_chpid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%02X\n", card->info.chpid);
}
@@ -57,8 +53,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
+
return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
}
@@ -68,8 +63,6 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
}
@@ -94,8 +87,6 @@ static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
}
@@ -106,8 +97,6 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
return sprintf(buf, "%i\n", card->dev->dev_port);
}
@@ -120,9 +109,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
unsigned int portno, limit;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -171,9 +157,6 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
return sprintf(buf, "%s\n", "by precedence");
@@ -195,9 +178,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (IS_IQD(card))
return -EOPNOTSUPP;
@@ -262,9 +242,6 @@ static ssize_t qeth_dev_bufcnt_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
@@ -276,9 +253,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
int cnt, old_cnt;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -307,9 +281,6 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
char *tmp;
int i;
- if (!card)
- return -EINVAL;
-
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
@@ -325,11 +296,6 @@ static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "1\n");
}
@@ -342,9 +308,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
bool reset;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtobool(buf, &reset);
if (rc)
return rc;
@@ -370,9 +333,6 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.layer);
}
@@ -385,9 +345,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
int i, rc = 0;
enum qeth_discipline_id newdis;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->discipline_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -453,9 +410,6 @@ static ssize_t qeth_dev_isolation_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
switch (card->options.isolation) {
case ISOLATION_MODE_NONE:
return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
@@ -475,9 +429,6 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
enum qeth_ipa_isolation_modes isolation;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
@@ -522,9 +473,6 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
struct qeth_switch_info sw_info;
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (!qeth_card_hw_is_reachable(card))
return sprintf(buf, "n/a\n");
@@ -555,8 +503,6 @@ static ssize_t qeth_hw_trap_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
if (card->info.hwtrap)
return snprintf(buf, 5, "arm\n");
else
@@ -570,9 +516,6 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
int rc = 0;
int state = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card))
state = 1;
@@ -607,24 +550,12 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
qeth_hw_trap_store);
-static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
-{
-
- if (!card)
- return -EINVAL;
-
- return sprintf(buf, "%i\n", value);
-}
-
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
int i, rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -645,7 +576,7 @@ static ssize_t qeth_dev_blkt_total_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+ return sprintf(buf, "%i\n", card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
@@ -657,8 +588,6 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
&card->info.blkt.time_total, 5000);
}
-
-
static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
qeth_dev_blkt_total_store);
@@ -667,7 +596,7 @@ static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+ return sprintf(buf, "%i\n", card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
@@ -687,8 +616,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return qeth_dev_blkt_show(buf, card,
- card->info.blkt.inter_packet_jumbo);
+ return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 096698df3886..f7485c6dea25 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -49,6 +49,8 @@ static const struct qeth_stats card_stats[] = {
QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
+ QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
+ QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
};
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bd8143e51747..989935d67b31 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -315,29 +315,19 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
*done = 1;
break;
}
- switch (hdr->hdr.l2.id) {
- case QETH_HEADER_TYPE_LAYER2:
+
+ if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
skb->protocol = eth_type_trans(skb, skb->dev);
qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
len = skb->len;
napi_gro_receive(&card->napi, skb);
- break;
- case QETH_HEADER_TYPE_OSN:
- if (IS_OSN(card)) {
- skb_push(skb, sizeof(struct qeth_hdr));
- skb_copy_to_linear_data(skb, hdr,
- sizeof(struct qeth_hdr));
- len = skb->len;
- card->osn_info.data_cb(skb);
- break;
- }
- /* Else, fall through */
- default:
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
- continue;
+ } else {
+ skb_push(skb, sizeof(*hdr));
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+ len = skb->len;
+ card->osn_info.data_cb(skb);
}
+
work_done++;
budget--;
QETH_CARD_STAT_INC(card, rx_packets);
@@ -467,10 +457,14 @@ static void qeth_l2_set_promisc_mode(struct qeth_card *card)
if (card->info.promisc_mode == enable)
return;
- if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
qeth_setadp_promisc_mode(card, enable);
- else if (card->options.sbp.reflect_promisc)
- qeth_l2_promisc_to_bridge(card, enable);
+ } else {
+ mutex_lock(&card->sbp_lock);
+ if (card->options.sbp.reflect_promisc)
+ qeth_l2_promisc_to_bridge(card, enable);
+ mutex_unlock(&card->sbp_lock);
+ }
}
/* New MAC address is added to the hash table and marked to be written on card
@@ -631,6 +625,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
int rc;
qeth_l2_vnicc_set_defaults(card);
+ mutex_init(&card->sbp_lock);
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
@@ -759,14 +754,6 @@ add_napi:
return rc;
}
-static int qeth_l2_start_ipassists(struct qeth_card *card)
-{
- /* configure isolation level */
- if (qeth_set_access_ctrl_online(card, 0))
- return -ENODEV;
- return 0;
-}
-
static void qeth_l2_trace_features(struct qeth_card *card)
{
/* Set BridgePort features */
@@ -797,17 +784,12 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
- if (card->info.hwtrap &&
- qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
- card->info.hwtrap = 0;
- } else
- card->info.hwtrap = 0;
-
+ mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
+ mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
@@ -825,12 +807,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
- if (IS_OSD(card) || IS_OSX(card)) {
- rc = qeth_l2_start_ipassists(card);
- if (rc)
- goto out_remove;
- }
-
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
@@ -1162,9 +1138,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
/* Role should not change by itself, but if it did, */
/* information from the hardware is authoritative. */
- mutex_lock(&data->card->conf_mutex);
+ mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.role = entry->role;
- mutex_unlock(&data->card->conf_mutex);
+ mutex_unlock(&data->card->sbp_lock);
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s",
@@ -1230,9 +1206,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
: (data->hostevs.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
- mutex_lock(&data->card->conf_mutex);
+ mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0;
- mutex_unlock(&data->card->conf_mutex);
+ mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
} else
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index f2c3b127b1e4..f70c7aac2dcc 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,12 +18,10 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
int rc = 0;
char *word;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
+ mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
@@ -57,6 +55,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
else
rc = sprintf(buf, "%s\n", word);
}
+ mutex_unlock(&card->sbp_lock);
return rc;
}
@@ -79,8 +78,6 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
int rc = 0;
enum qeth_sbp_roles role;
- if (!card)
- return -EINVAL;
if (sysfs_streq(buf, "primary"))
role = QETH_SBP_ROLE_PRIMARY;
else if (sysfs_streq(buf, "secondary"))
@@ -91,6 +88,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -104,6 +102,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
} else
card->options.sbp.role = role;
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -132,9 +131,6 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -150,14 +146,12 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
bool enable;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtobool(buf, &enable);
if (rc)
return rc;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -168,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
} else
card->options.sbp.hostnotification = enable;
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -183,9 +178,6 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
- if (!card)
- return -EINVAL;
-
if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
@@ -207,9 +199,6 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
int enable, primary;
int rc = 0;
- if (!card)
- return -EINVAL;
-
if (sysfs_streq(buf, "none")) {
enable = 0;
primary = 0;
@@ -223,6 +212,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY;
@@ -234,6 +224,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
rc = 0;
}
+ mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -269,6 +260,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
return;
if (!card->options.sbp.supported_funcs)
return;
+
+ mutex_lock(&card->sbp_lock);
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role);
@@ -280,8 +273,10 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
rc = qeth_bridgeport_an_set(card, 1);
if (rc)
card->options.sbp.hostnotification = 0;
- } else
+ } else {
qeth_bridgeport_an_set(card, 0);
+ }
+ mutex_unlock(&card->sbp_lock);
}
/* VNIC CHARS support */
@@ -315,9 +310,6 @@ static ssize_t qeth_vnicc_timeout_show(struct device *dev,
u32 timeout;
int rc;
- if (!card)
- return -EINVAL;
-
rc = qeth_l2_vnicc_get_timeout(card, &timeout);
if (rc == -EBUSY)
return sprintf(buf, "n/a (BridgePort)\n");
@@ -335,9 +327,6 @@ static ssize_t qeth_vnicc_timeout_store(struct device *dev,
u32 timeout;
int rc;
- if (!card)
- return -EINVAL;
-
rc = kstrtou32(buf, 10, &timeout);
if (rc)
return rc;
@@ -357,9 +346,6 @@ static ssize_t qeth_vnicc_char_show(struct device *dev,
u32 vnicc;
int rc;
- if (!card)
- return -EINVAL;
-
vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
@@ -380,9 +366,6 @@ static ssize_t qeth_vnicc_char_store(struct device *dev,
u32 vnicc;
int rc;
- if (!card)
- return -EINVAL;
-
if (kstrtobool(buf, &state))
return -EINVAL;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 87659cfc9066..5db04fe472c0 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,8 +13,6 @@
#include "qeth_core.h"
#include <linux/hashtable.h>
-#define QETH_SNIFF_AVAIL 0x0008
-
enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
@@ -24,7 +22,6 @@ enum qeth_ip_types {
struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
- unsigned char mac[ETH_ALEN];
u8 is_multicast:1;
u8 in_progress:1;
u8 disp_flag:2;
@@ -37,7 +34,7 @@ struct qeth_ipaddr {
enum qeth_prot_versions proto;
union {
struct {
- unsigned int addr;
+ __be32 addr;
unsigned int mask;
} a4;
struct {
@@ -55,6 +52,7 @@ static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr,
addr->type = type;
addr->proto = proto;
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ addr->ref_counter = 1;
}
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
@@ -74,12 +72,10 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
* so 'proto' and 'addr' match for sure.
*
* For ucast:
- * - 'mac' is always 0.
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
* values are required to avoid mixups in takeover eligibility.
*
* For mcast,
- * - 'mac' is mapped from the IP, and thus always matches.
* - 'mask'/'pfxlen' is always 0.
*/
if (a1->type != a2->type)
@@ -89,21 +85,12 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
return a1->u.a4.mask == a2->u.a4.mask;
}
-static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{
- u64 ret = 0;
- u8 *point;
-
- if (addr->proto == QETH_PROT_IPV6) {
- point = (u8 *) &addr->u.a6.addr;
- ret = get_unaligned((u64 *)point) ^
- get_unaligned((u64 *) (point + 8));
- }
- if (addr->proto == QETH_PROT_IPV4) {
- point = (u8 *) &addr->u.a4.addr;
- ret = get_unaligned((u32 *) point);
- }
- return ret;
+ if (addr->proto == QETH_PROT_IPV6)
+ return ipv6_addr_hash(&addr->u.a6.addr);
+ else
+ return ipv4_addr_hash(addr->u.a4.addr);
}
struct qeth_ipato_entry {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d7bfc7a0e4c0..e7ce73b9f016 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -39,7 +39,6 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
-static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
@@ -64,19 +63,10 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
qeth_l3_ipaddr6_to_string(addr, buf);
}
-static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
-{
- struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
-
- if (addr)
- qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
- return addr;
-}
-
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
- u64 key = qeth_l3_ipaddr_hash(query);
+ u32 key = qeth_l3_ipaddr_hash(query);
struct qeth_ipaddr *addr;
if (query->is_multicast) {
@@ -217,13 +207,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
"Registering IP address %s failed\n", buf);
return -EADDRINUSE;
} else {
- addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+ addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
- memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
- addr->ref_counter = 1;
-
if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->ipato = 1;
@@ -381,12 +368,13 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
- if (addr->proto == QETH_PROT_IPV6)
- memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
- sizeof(struct in6_addr));
- else
- memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+ if (addr->proto == QETH_PROT_IPV6) {
+ cmd->data.setdelipm.ip = addr->u.a6.addr;
+ ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
+ } else {
+ cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
+ ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
+ }
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
@@ -953,8 +941,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
- if (qeth_set_access_ctrl_online(card, 0))
- return -EIO;
qeth_l3_start_ipa_arp_processing(card); /* go on*/
qeth_l3_start_ipa_source_mac(card); /* go on*/
qeth_l3_start_ipa_vlan(card); /* go on*/
@@ -1115,176 +1101,83 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
-static void
-qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
+static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
{
+ struct qeth_card *card = arg;
+ struct inet6_dev *in6_dev;
+ struct in_device *in4_dev;
+ struct qeth_ipaddr *ipm;
+ struct qeth_ipaddr tmp;
struct ip_mc_list *im4;
- struct qeth_ipaddr *tmp, *ipm;
+ struct ifmcaddr6 *im6;
QETH_CARD_TEXT(card, 4, "addmc");
- tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!tmp)
- return;
+ if (!dev || !(dev->flags & IFF_UP))
+ goto out;
+
+ in4_dev = __in_dev_get_rtnl(dev);
+ if (!in4_dev)
+ goto walk_ipv6;
- for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
- im4 = rcu_dereference(im4->next_rcu)) {
- ip_eth_mc_map(im4->multiaddr, tmp->mac);
- tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
- tmp->is_multicast = 1;
+ qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
+ tmp.disp_flag = QETH_DISP_ADDR_ADD;
+ tmp.is_multicast = 1;
- ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
+ im4 = rtnl_dereference(im4->next_rcu)) {
+ tmp.u.a4.addr = im4->multiaddr;
+
+ ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- } else {
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!ipm)
- continue;
- ether_addr_copy(ipm->mac, tmp->mac);
- ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
- ipm->is_multicast = 1;
- ipm->disp_flag = QETH_DISP_ADDR_ADD;
- hash_add(card->ip_mc_htable,
- &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+ continue;
}
- }
-
- kfree(tmp);
-}
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc(struct qeth_card *card)
-{
- struct in_device *in_dev;
- u16 vid;
-
- QETH_CARD_TEXT(card, 4, "addmcvl");
-
- if (!qeth_is_supported(card, IPA_FULL_VLAN))
- return;
-
- for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
- struct net_device *netdev;
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
- vid);
- if (netdev == NULL ||
- !(netdev->flags & IFF_UP))
- continue;
- in_dev = __in_dev_get_rcu(netdev);
- if (!in_dev)
+ ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
+ if (!ipm)
continue;
- qeth_l3_add_mc_to_hash(card, in_dev);
- }
-}
-static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
-{
- struct in_device *in4_dev;
-
- QETH_CARD_TEXT(card, 4, "chkmcv4");
-
- rcu_read_lock();
- in4_dev = __in_dev_get_rcu(card->dev);
- if (in4_dev == NULL)
- goto unlock;
- qeth_l3_add_mc_to_hash(card, in4_dev);
- qeth_l3_add_vlan_mc(card);
-unlock:
- rcu_read_unlock();
-}
+ hash_add(card->ip_mc_htable, &ipm->hnode,
+ qeth_l3_ipaddr_hash(ipm));
+ }
-static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
- struct inet6_dev *in6_dev)
-{
- struct qeth_ipaddr *ipm;
- struct ifmcaddr6 *im6;
- struct qeth_ipaddr *tmp;
+walk_ipv6:
+ if (!qeth_is_supported(card, IPA_IPV6))
+ goto out;
- QETH_CARD_TEXT(card, 4, "addmc6");
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+ goto out;
- tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!tmp)
- return;
+ qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
+ tmp.disp_flag = QETH_DISP_ADDR_ADD;
+ tmp.is_multicast = 1;
+ read_lock_bh(&in6_dev->lock);
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
- ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
- memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
- sizeof(struct in6_addr));
- tmp->is_multicast = 1;
+ tmp.u.a6.addr = im6->mca_addr;
- ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
if (!ipm)
continue;
- ether_addr_copy(ipm->mac, tmp->mac);
- memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
- sizeof(struct in6_addr));
- ipm->is_multicast = 1;
- ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
}
- kfree(tmp);
-}
-
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
-{
- struct inet6_dev *in_dev;
- u16 vid;
-
- QETH_CARD_TEXT(card, 4, "admc6vl");
-
- if (!qeth_is_supported(card, IPA_FULL_VLAN))
- return;
-
- for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
- struct net_device *netdev;
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
- vid);
- if (netdev == NULL ||
- !(netdev->flags & IFF_UP))
- continue;
- in_dev = in6_dev_get(netdev);
- if (!in_dev)
- continue;
- read_lock_bh(&in_dev->lock);
- qeth_l3_add_mc6_to_hash(card, in_dev);
- read_unlock_bh(&in_dev->lock);
- in6_dev_put(in_dev);
- }
-}
-
-static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
-{
- struct inet6_dev *in6_dev;
-
- QETH_CARD_TEXT(card, 4, "chkmcv6");
-
- if (!qeth_is_supported(card, IPA_IPV6))
- return ;
- in6_dev = in6_dev_get(card->dev);
- if (!in6_dev)
- return;
-
- rcu_read_lock();
- read_lock_bh(&in6_dev->lock);
- qeth_l3_add_mc6_to_hash(card, in6_dev);
- qeth_l3_add_vlan_mc6(card);
read_unlock_bh(&in6_dev->lock);
- rcu_read_unlock();
- in6_dev_put(in6_dev);
+
+out:
+ return 0;
}
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
@@ -1292,7 +1185,7 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- set_bit(vid, card->active_vlans);
+ QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
return 0;
}
@@ -1302,9 +1195,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
-
- clear_bit(vid, card->active_vlans);
- qeth_l3_set_rx_mode(dev);
return 0;
}
@@ -1372,7 +1262,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
- unsigned int len;
*done = 0;
WARN_ON_ONCE(!budget);
@@ -1384,25 +1273,17 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
*done = 1;
break;
}
- switch (hdr->hdr.l3.id) {
- case QETH_HEADER_TYPE_LAYER3:
+
+ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
qeth_l3_rebuild_skb(card, skb, hdr);
- /* fall through */
- case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
- skb->protocol = eth_type_trans(skb, skb->dev);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- break;
- default:
- dev_kfree_skb_any(skb);
- QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
- continue;
- }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+
+ napi_gro_receive(&card->napi, skb);
work_done++;
budget--;
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
@@ -1468,8 +1349,11 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
- qeth_l3_add_multicast_ipv4(card);
- qeth_l3_add_multicast_ipv6(card);
+ rtnl_lock();
+ qeth_l3_add_mcast_rtnl(card->dev, 0, card);
+ if (qeth_is_supported(card, IPA_FULL_VLAN))
+ vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
+ rtnl_unlock();
hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
switch (addr->disp_flag) {
@@ -2313,13 +2197,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
- if (card->info.hwtrap &&
- qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
- card->info.hwtrap = 0;
- } else
- card->info.hwtrap = 0;
-
card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
@@ -2557,7 +2434,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
QETH_CARD_TEXT(card, 3, "ipevent");
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
- addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
+ addr.u.a4.addr = ifa->ifa_address;
addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
return qeth_l3_handle_ip_event(card, &addr, event);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 2f73b33c9347..f9067ed6c7d3 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -60,9 +60,6 @@ static ssize_t qeth_l3_dev_route4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_show(card, &card->options.route4, buf);
}
@@ -109,9 +106,6 @@ static ssize_t qeth_l3_dev_route4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_store(card, &card->options.route4,
QETH_PROT_IPV4, buf, count);
}
@@ -124,9 +118,6 @@ static ssize_t qeth_l3_dev_route6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_show(card, &card->options.route6, buf);
}
@@ -135,9 +126,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_route_store(card, &card->options.route6,
QETH_PROT_IPV6, buf, count);
}
@@ -150,9 +138,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
}
@@ -163,9 +148,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
char *tmp;
int i, rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -190,9 +172,6 @@ static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
@@ -203,9 +182,6 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
int rc = 0;
unsigned long i;
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
@@ -228,7 +204,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
break;
case 1:
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
- if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+ if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
card->options.sniffer = i;
if (card->qdio.init_pool.buf_count !=
QETH_IN_BUF_COUNT_MAX)
@@ -248,16 +224,12 @@ out:
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
qeth_l3_dev_sniffer_store);
-
static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char tmp_hsuid[9];
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
@@ -273,9 +245,6 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
char *tmp;
int rc;
- if (!card)
- return -EINVAL;
-
if (!IS_IQD(card))
return -EPERM;
if (card->state != CARD_STATE_DOWN)
@@ -336,9 +305,6 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
}
@@ -349,9 +315,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
bool enable;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -385,9 +348,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
}
@@ -399,9 +359,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
bool invert;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
@@ -460,9 +417,6 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
}
@@ -528,9 +482,6 @@ static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -558,9 +509,6 @@ static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -572,9 +520,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
}
@@ -585,9 +530,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
bool invert;
int rc = 0;
- if (!card)
- return -EINVAL;
-
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
@@ -617,9 +559,6 @@ static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
}
@@ -628,9 +567,6 @@ static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -643,9 +579,6 @@ static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -679,9 +612,6 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
- if (!card)
- return -EINVAL;
-
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
mutex_lock(&card->ip_lock);
@@ -741,9 +671,6 @@ static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -771,9 +698,6 @@ static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -793,9 +717,6 @@ static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -808,9 +729,6 @@ static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -884,9 +802,6 @@ static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -914,9 +829,6 @@ static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
}
@@ -936,9 +848,6 @@ static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
}
@@ -951,9 +860,6 @@ static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (!card)
- return -EINVAL;
-
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
}
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 222c77c9621f..b6a0432f305a 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -39,7 +39,7 @@ static irqreturn_t a3000_intr(int irq, void *data)
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
- pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
+ pr_warn("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
return IRQ_NONE;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 6afad68e5ba2..238240984bc1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -76,9 +76,11 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
- wait_event_timeout(vha->vref_waitq,
- atomic_read(&vha->vref_count), HZ);
+ for (i = 0; i < 10; i++) {
+ if (wait_event_timeout(vha->vref_waitq,
+ !atomic_read(&vha->vref_count), HZ) > 0)
+ break;
+ }
spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 337162ac3a77..726ad4cbf4a6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1119,9 +1119,11 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
qla2x00_mark_all_devices_lost(vha, 0);
- for (i = 0; i < 10; i++)
- wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
- HZ);
+ for (i = 0; i < 10; i++) {
+ if (wait_event_timeout(vha->fcport_waitQ,
+ test_fcport_count(vha), HZ) > 0)
+ break;
+ }
flush_workqueue(vha->hw->wq);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f5b5c8a7f72..7a1b6c76f263 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -434,8 +434,8 @@ static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
return;
mutex_lock(&sdev->inquiry_mutex);
- rcu_swap_protected(*sdev_vpd_buf, vpd_buf,
- lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_buf)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5447738906ac..91c007d26c1e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1883,7 +1883,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
- sgl_size = scsi_mq_inline_sgl_size(shost);
+ sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+ scsi_mq_inline_sgl_size(shost));
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) +
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6d7362e7367e..cc51f4756077 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -466,10 +466,10 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
sdev->request_queue = NULL;
mutex_lock(&sdev->inquiry_mutex);
- rcu_swap_protected(sdev->vpd_pg80, vpd_pg80,
- lockdep_is_held(&sdev->inquiry_mutex));
- rcu_swap_protected(sdev->vpd_pg83, vpd_pg83,
- lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
+ lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg83)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ebb40160539f..470ee6dc3f7e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1291,9 +1291,17 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
- return sd_zbc_setup_reset_cmnd(cmd, false);
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
+ false);
case REQ_OP_ZONE_RESET_ALL:
- return sd_zbc_setup_reset_cmnd(cmd, true);
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
+ true);
+ case REQ_OP_ZONE_OPEN:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
+ case REQ_OP_ZONE_CLOSE:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
+ case REQ_OP_ZONE_FINISH:
+ return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
default:
WARN_ON_ONCE(1);
return BLK_STS_NOTSUPP;
@@ -1961,6 +1969,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 1eab779f812b..42fd3f00e4a5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -209,11 +209,12 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all);
+blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op, bool all);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
-extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -225,8 +226,9 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd,
- bool all)
+static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op,
+ bool all)
{
return BLK_STS_TARGET;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index de4019dc0f0b..0e5ede48f045 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -19,34 +19,27 @@
#include "sd.h"
-/**
- * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
- * @sdkp: The disk the report originated from
- * @buf: Address of the report zone descriptor
- * @zone: the destination zone structure
- *
- * All LBA sized values are converted to 512B sectors unit.
- */
-static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
- struct blk_zone *zone)
+static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+ unsigned int idx, report_zones_cb cb, void *data)
{
struct scsi_device *sdp = sdkp->device;
+ struct blk_zone zone = { 0 };
- memset(zone, 0, sizeof(struct blk_zone));
-
- zone->type = buf[0] & 0x0f;
- zone->cond = (buf[1] >> 4) & 0xf;
+ zone.type = buf[0] & 0x0f;
+ zone.cond = (buf[1] >> 4) & 0xf;
if (buf[1] & 0x01)
- zone->reset = 1;
+ zone.reset = 1;
if (buf[1] & 0x02)
- zone->non_seq = 1;
-
- zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
- zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone->type != ZBC_ZONE_TYPE_CONV &&
- zone->cond == ZBC_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
+ zone.non_seq = 1;
+
+ zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
+ if (zone.type != ZBC_ZONE_TYPE_CONV &&
+ zone.cond == ZBC_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+
+ return cb(&zone, idx, data);
}
/**
@@ -104,11 +97,6 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
-/*
- * Maximum number of zones to get with one report zones command.
- */
-#define SD_ZBC_REPORT_MAX_ZONES 8192U
-
/**
* Allocate a buffer for report zones reply.
* @sdkp: The target disk
@@ -138,82 +126,94 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
*/
- nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES);
- bufsize = roundup((nr_zones + 1) * 64, 512);
+ nr_zones = min(nr_zones, sdkp->nr_zones);
+ bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
- buf = vzalloc(bufsize);
- if (buf)
- *buflen = bufsize;
+ while (bufsize >= SECTOR_SIZE) {
+ buf = __vmalloc(bufsize,
+ GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
+ PAGE_KERNEL);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
- return buf;
+ return NULL;
}
/**
- * sd_zbc_report_zones - Disk report zones operation.
- * @disk: The target disk
- * @sector: Start 512B sector of the report
- * @zones: Array of zone descriptors
- * @nr_zones: Number of descriptors in the array
- *
- * Execute a report zones command on the target disk.
+ * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
+ * @sdkp: The target disk
*/
+static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
+{
+ return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+}
+
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones)
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
- unsigned int i, nrz = *nr_zones;
+ unsigned int nr, i;
unsigned char *buf;
- size_t buflen = 0, offset = 0;
- int ret = 0;
+ size_t offset, buflen = 0;
+ int zone_idx = 0;
+ int ret;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
return -EOPNOTSUPP;
- buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen);
+ buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
if (!buf)
return -ENOMEM;
- ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
- sectors_to_logical(sdkp->device, sector), true);
- if (ret)
- goto out;
+ while (zone_idx < nr_zones && sector < get_capacity(disk)) {
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+ goto out;
+
+ offset = 0;
+ nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
+ if (!nr)
+ break;
+
+ for (i = 0; i < nr && zone_idx < nr_zones; i++) {
+ offset += 64;
+ ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
+ cb, data);
+ if (ret)
+ goto out;
+ zone_idx++;
+ }
- nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
- for (i = 0; i < nrz; i++) {
- offset += 64;
- sd_zbc_parse_report(sdkp, buf + offset, zones);
- zones++;
+ sector += sd_zbc_zone_sectors(sdkp) * i;
}
- *nr_zones = nrz;
-
+ ret = zone_idx;
out:
kvfree(buf);
-
return ret;
}
/**
- * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
- * @sdkp: The target disk
- */
-static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
-{
- return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
-}
-
-/**
- * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
+ * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
+ * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
* @cmd: the command to setup
- * @all: Reset all zones control.
+ * @op: Operation to be performed
+ * @all: All zones control
*
- * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
+ * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
+ * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
*/
-blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
+blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
+ unsigned char op, bool all)
{
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -234,7 +234,7 @@ blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
cmd->cmnd[0] = ZBC_OUT;
- cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
+ cmd->cmnd[1] = op;
if (all)
cmd->cmnd[14] = 0x1;
else
@@ -263,25 +263,16 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
int result = cmd->result;
struct request *rq = cmd->request;
- switch (req_op(rq)) {
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_RESET_ALL:
-
- if (result &&
- sshdr->sense_key == ILLEGAL_REQUEST &&
- sshdr->asc == 0x24)
- /*
- * INVALID FIELD IN CDB error: reset of a conventional
- * zone was attempted. Nothing to worry about, so be
- * quiet about the error.
- */
- rq->rq_flags |= RQF_QUIET;
- break;
-
- case REQ_OP_WRITE:
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
- break;
+ if (op_is_zone_mgmt(req_op(rq)) &&
+ result &&
+ sshdr->sense_key == ILLEGAL_REQUEST &&
+ sshdr->asc == 0x24) {
+ /*
+ * INVALID FIELD IN CDB error: a zone management command was
+ * attempted on a conventional zone. Nothing to worry about,
+ * so be quiet about the error.
+ */
+ rq->rq_flags |= RQF_QUIET;
}
}
@@ -344,32 +335,18 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
* Returns the zone size in number of blocks upon success or an error code
* upon failure.
*/
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ u32 *zblocks)
{
- size_t bufsize, buflen;
- unsigned int noio_flag;
u64 zone_blocks = 0;
- sector_t max_lba, block = 0;
- unsigned char *buf;
+ sector_t max_lba;
unsigned char *rec;
int ret;
- u8 same;
-
- /* Do all memory allocations as if GFP_NOIO was specified */
- noio_flag = memalloc_noio_save();
-
- /* Get a buffer */
- buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES,
- &bufsize);
- if (!buf) {
- ret = -ENOMEM;
- goto out;
- }
- /* Do a report zone to get max_lba and the same field */
- ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false);
+ /* Do a report zone to get max_lba and the size of the first zone */
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
if (ret)
- goto out_free;
+ return ret;
if (sdkp->rc_basis == 0) {
/* The max_lba field is the capacity of this device */
@@ -384,82 +361,27 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
}
}
- /*
- * Check same field: for any value other than 0, we know that all zones
- * have the same size.
- */
- same = buf[4] & 0x0f;
- if (same > 0) {
- rec = &buf[64];
- zone_blocks = get_unaligned_be64(&rec[8]);
- goto out;
- }
-
- /*
- * Check the size of all zones: all zones must be of
- * equal size, except the last zone which can be smaller
- * than other zones.
- */
- do {
-
- /* Parse REPORT ZONES header */
- buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64,
- bufsize);
- rec = buf + 64;
-
- /* Parse zone descriptors */
- while (rec < buf + buflen) {
- u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
-
- if (zone_blocks == 0) {
- zone_blocks = this_zone_blocks;
- } else if (this_zone_blocks != zone_blocks &&
- (block + this_zone_blocks < sdkp->capacity
- || this_zone_blocks > zone_blocks)) {
- zone_blocks = 0;
- goto out;
- }
- block += this_zone_blocks;
- rec += 64;
- }
-
- if (block < sdkp->capacity) {
- ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
- true);
- if (ret)
- goto out_free;
- }
-
- } while (block < sdkp->capacity);
-
-out:
- if (!zone_blocks) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Devices with non constant zone "
- "size are not supported\n");
- ret = -ENODEV;
- } else if (!is_power_of_2(zone_blocks)) {
+ /* Parse REPORT ZONES header */
+ rec = buf + 64;
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (!zone_blocks || !is_power_of_2(zone_blocks)) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Devices with non power of 2 zone "
"size are not supported\n");
- ret = -ENODEV;
- } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ return -ENODEV;
+ }
+
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- ret = -EFBIG;
- } else {
- *zblocks = zone_blocks;
- ret = 0;
+ return -EFBIG;
}
-out_free:
- memalloc_noio_restore(noio_flag);
- kvfree(buf);
+ *zblocks = zone_blocks;
- return ret;
+ return 0;
}
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
@@ -485,7 +407,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+ ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
if (ret != 0)
goto err;
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 46f0f322d4d8..8485e812d9b2 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -100,8 +100,8 @@ static void __init intc_register_irq(struct intc_desc *desc,
primary = 1;
if (!data[0] && !data[1])
- pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
- irq, irq2evt(irq));
+ pr_warn("missing unique irq mask for irq %d (vect 0x%04x)\n",
+ irq, irq2evt(irq));
data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1);
data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index bf68d86d80ee..1e164e03410a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu)
}
EXPORT_SYMBOL(qman_get_affine_portal);
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+ return (!device_link_add(dev, p->config->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
return __poll_portal_fast(p, limit);
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c8c80df090d1..c725d0a8b288 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -24,7 +24,7 @@ config SOUNDWIRE_CADENCE
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- depends on X86 && ACPI && SND_SOC
+ depends on ACPI && SND_SOC
help
SoundWire Intel Master driver.
If you have an Intel platform which has a SoundWire Master then
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index fc53dbe57f85..be5d437058ed 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -422,10 +422,11 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
{
- if (slave->id.unique_id != id.unique_id ||
- slave->id.mfg_id != id.mfg_id ||
+ if (slave->id.mfg_id != id.mfg_id ||
slave->id.part_id != id.part_id ||
- slave->id.class_id != id.class_id)
+ slave->id.class_id != id.class_id ||
+ (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
+ slave->id.unique_id != id.unique_id))
return -ENODEV;
return 0;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 502ed4ec8f07..fed21e2b2277 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -183,9 +183,6 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_DEFAULT_SSP_INTERVAL 0x18
#define CDNS_TX_TIMEOUT 2000
-#define CDNS_PCM_PDI_OFFSET 0x2
-#define CDNS_PDM_PDI_OFFSET 0x6
-
#define CDNS_SCP_RX_FIFOLEVEL 0x2
/*
@@ -232,6 +229,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
}
/*
+ * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
+ * need to be confirmed with a write to MCP_CONFIG_UPDATE
+ */
+static int cdns_update_config(struct sdw_cdns *cdns)
+{
+ int ret;
+
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ dev_err(cdns->dev, "Config update timedout\n");
+
+ return ret;
+}
+
+/*
* debugfs
*/
#ifdef CONFIG_DEBUG_FS
@@ -279,11 +292,7 @@ static int cdns_reg_show(struct seq_file *s, void *data)
ret += scnprintf(buf + ret, RD_BUF - ret,
"\nDPn B0 Registers\n");
- /*
- * in sdw_cdns_pdi_init() we filter out the Bulk PDIs,
- * so the indices need to be corrected again
- */
- num_ports = cdns->num_ports + CDNS_PCM_PDI_OFFSET;
+ num_ports = cdns->num_ports;
for (i = 0; i < num_ports; i++) {
ret += scnprintf(buf + ret, RD_BUF - ret,
@@ -324,6 +333,26 @@ static int cdns_reg_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(cdns_reg);
+static int cdns_hw_reset(void *data, u64 value)
+{
+ struct sdw_cdns *cdns = data;
+ int ret;
+
+ if (value != 1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ ret = sdw_cdns_exit_reset(cdns);
+
+ dev_dbg(cdns->dev, "link hw_reset done: %d\n", ret);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdns_hw_reset_fops, NULL, cdns_hw_reset, "%llu\n");
+
/**
* sdw_cdns_debugfs_init() - Cadence debugfs init
* @cdns: Cadence instance
@@ -332,6 +361,9 @@ DEFINE_SHOW_ATTRIBUTE(cdns_reg);
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root)
{
debugfs_create_file("cdns-registers", 0400, root, cdns, &cdns_reg_fops);
+
+ debugfs_create_file("cdns-hw-reset", 0200, root, cdns,
+ &cdns_hw_reset_fops);
}
EXPORT_SYMBOL_GPL(sdw_cdns_debugfs_init);
@@ -752,14 +784,48 @@ EXPORT_SYMBOL(sdw_cdns_thread);
/*
* init routines
*/
-static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
+
+/**
+ * sdw_cdns_exit_reset() - Program reset parameters and start bus operations
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
{
- u32 mask;
+ /* program maximum length reset to be safe */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_RST_DELAY,
+ CDNS_MCP_CONTROL_RST_DELAY);
+
+ /* use hardware generated reset */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_HW_RST,
+ CDNS_MCP_CONTROL_HW_RST);
+
+ /* enable bus operations with clock and data */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG,
+ CDNS_MCP_CONFIG_OP,
+ CDNS_MCP_CONFIG_OP_NORMAL);
+
+ /* commit changes */
+ return cdns_update_config(cdns);
+}
+EXPORT_SYMBOL(sdw_cdns_exit_reset);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
- CDNS_MCP_SLAVE_INTMASK0_MASK);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
- CDNS_MCP_SLAVE_INTMASK1_MASK);
+/**
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
+{
+ u32 slave_intmask0 = 0;
+ u32 slave_intmask1 = 0;
+ u32 mask = 0;
+
+ if (!state)
+ goto update_masks;
+
+ slave_intmask0 = CDNS_MCP_SLAVE_INTMASK0_MASK;
+ slave_intmask1 = CDNS_MCP_SLAVE_INTMASK1_MASK;
/* enable detection of all slave state changes */
mask = CDNS_MCP_INT_SLAVE_MASK;
@@ -782,26 +848,13 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
if (interrupt_mask) /* parameter override */
mask = interrupt_mask;
+update_masks:
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- return 0;
-}
-
-/**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
- * @cdns: Cadence instance
- */
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
-{
- int ret;
-
- _cdns_enable_interrupt(cdns);
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT);
- if (ret < 0)
- dev_err(cdns->dev, "Config update timedout\n");
-
- return ret;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -821,7 +874,6 @@ static int cdns_allocate_pdi(struct sdw_cdns *cdns,
for (i = 0; i < num; i++) {
pdi[i].num = i + pdi_offset;
- pdi[i].assigned = false;
}
*stream = pdi;
@@ -838,7 +890,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config)
{
struct sdw_cdns_streams *stream;
- int offset, i, ret;
+ int offset;
+ int ret;
cdns->pcm.num_bd = config.pcm_bd;
cdns->pcm.num_in = config.pcm_in;
@@ -850,11 +903,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PCMs */
stream = &cdns->pcm;
- /* First two PDIs are reserved for bulk transfers */
- if (stream->num_bd < CDNS_PCM_PDI_OFFSET)
- return -EINVAL;
- stream->num_bd -= CDNS_PCM_PDI_OFFSET;
- offset = CDNS_PCM_PDI_OFFSET;
+ /* we allocate PDI0 and PDI1 which are used for Bulk */
+ offset = 0;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
@@ -881,7 +931,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PDMs */
stream = &cdns->pdm;
- offset = CDNS_PDM_PDI_OFFSET;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
if (ret)
@@ -898,6 +947,9 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
ret = cdns_allocate_pdi(cdns, &stream->out,
stream->num_out, offset);
+
+ offset += stream->num_out;
+
if (ret)
return ret;
@@ -905,18 +957,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
cdns->num_ports += stream->num_pdi;
- cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports,
- sizeof(*cdns->ports), GFP_KERNEL);
- if (!cdns->ports) {
- ret = -ENOMEM;
- return ret;
- }
-
- for (i = 0; i < cdns->num_ports; i++) {
- cdns->ports[i].assigned = false;
- cdns->ports[i].num = i + 1; /* Port 0 reserved for bulk */
- }
-
return 0;
}
EXPORT_SYMBOL(sdw_cdns_pdi_init);
@@ -939,7 +979,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
* sdw_cdns_init() - Cadence initialization
* @cdns: Cadence instance
*/
-int sdw_cdns_init(struct sdw_cdns *cdns)
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
@@ -947,12 +987,13 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
int divider;
int ret;
- /* Exit clock stop */
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
- if (ret < 0) {
- dev_err(cdns->dev, "Couldn't exit from clock stop\n");
- return ret;
+ if (clock_stop_exit) {
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
}
/* Set clock divider */
@@ -975,6 +1016,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ /* flush command FIFOs */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
+ CDNS_MCP_CONTROL_CMD_RST);
+
/* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT);
@@ -997,13 +1042,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
/* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_MCP_CONFIG_CMD;
- /* Set operation to normal */
- val &= ~CDNS_MCP_CONFIG_OP;
- val |= CDNS_MCP_CONFIG_OP_NORMAL;
-
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- return 0;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_init);
@@ -1185,20 +1227,20 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* @num: Number of PDIs
* @pdi: PDI instances
*
- * Find and return a free PDI for a given PDI array
+ * Find a PDI for a given PDI array. The PDI num and dai_id are
+ * expected to match, return NULL otherwise.
*/
static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
+ unsigned int offset,
unsigned int num,
- struct sdw_cdns_pdi *pdi)
+ struct sdw_cdns_pdi *pdi,
+ int dai_id)
{
int i;
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
- pdi[i].assigned = true;
- return &pdi[i];
- }
+ for (i = offset; i < offset + num; i++)
+ if (pdi[i].num == dai_id)
+ return &pdi[i];
return NULL;
}
@@ -1207,13 +1249,11 @@ static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
* sdw_cdns_config_stream: Configure a stream
*
* @cdns: Cadence instance
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
* @pdi: PDI to be used
*/
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_port *port,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
{
u32 offset, val = 0;
@@ -1221,113 +1261,51 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
if (dir == SDW_DATA_DIR_RX)
val = CDNS_PORTCTRL_DIRN;
- offset = CDNS_PORTCTRL + port->num * CDNS_PORT_OFFSET;
+ offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
- val = port->num;
+ val = pdi->num;
val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
EXPORT_SYMBOL(sdw_cdns_config_stream);
/**
- * cdns_get_num_pdi() - Get number of PDIs required
- *
- * @cdns: Cadence instance
- * @pdi: PDI to be used
- * @num: Number of PDIs
- * @ch_count: Channel count
- */
-static int cdns_get_num_pdi(struct sdw_cdns *cdns,
- struct sdw_cdns_pdi *pdi,
- unsigned int num, u32 ch_count)
-{
- int i, pdis = 0;
-
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
-
- if (pdi[i].ch_count < ch_count)
- ch_count -= pdi[i].ch_count;
- else
- ch_count = 0;
-
- pdis++;
-
- if (!ch_count)
- break;
- }
-
- if (ch_count)
- return 0;
-
- return pdis;
-}
-
-/**
- * sdw_cdns_get_stream() - Get stream information
- *
- * @cdns: Cadence instance
- * @stream: Stream to be allocated
- * @ch: Channel count
- * @dir: Data direction
- */
-int sdw_cdns_get_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- u32 ch, u32 dir)
-{
- int pdis = 0;
-
- if (dir == SDW_DATA_DIR_RX)
- pdis = cdns_get_num_pdi(cdns, stream->in, stream->num_in, ch);
- else
- pdis = cdns_get_num_pdi(cdns, stream->out, stream->num_out, ch);
-
- /* check if we found PDI, else find in bi-directional */
- if (!pdis)
- pdis = cdns_get_num_pdi(cdns, stream->bd, stream->num_bd, ch);
-
- return pdis;
-}
-EXPORT_SYMBOL(sdw_cdns_get_stream);
-
-/**
- * sdw_cdns_alloc_stream() - Allocate a stream
+ * sdw_cdns_alloc_pdi() - Allocate a PDI
*
* @cdns: Cadence instance
* @stream: Stream to be allocated
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
*/
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir)
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id)
{
struct sdw_cdns_pdi *pdi = NULL;
if (dir == SDW_DATA_DIR_RX)
- pdi = cdns_find_pdi(cdns, stream->num_in, stream->in);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_in, stream->in,
+ dai_id);
else
- pdi = cdns_find_pdi(cdns, stream->num_out, stream->out);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_out, stream->out,
+ dai_id);
/* check if we found a PDI, else find in bi-directional */
if (!pdi)
- pdi = cdns_find_pdi(cdns, stream->num_bd, stream->bd);
-
- if (!pdi)
- return -EIO;
-
- port->pdi = pdi;
- pdi->l_ch_num = 0;
- pdi->h_ch_num = ch - 1;
- pdi->dir = dir;
- pdi->ch_count = ch;
+ pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
+ dai_id);
+
+ if (pdi) {
+ pdi->l_ch_num = 0;
+ pdi->h_ch_num = ch - 1;
+ pdi->dir = dir;
+ pdi->ch_count = ch;
+ }
- return 0;
+ return pdi;
}
-EXPORT_SYMBOL(sdw_cdns_alloc_stream);
+EXPORT_SYMBOL(sdw_cdns_alloc_pdi);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 0b72b7094735..001457cbe5ad 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -8,7 +8,6 @@
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
- * @assigned: pdi assigned
* @num: pdi number
* @intel_alh_id: link identifier
* @l_ch_num: low channel for PDI
@@ -18,7 +17,6 @@
* @type: stream type, PDM or PCM
*/
struct sdw_cdns_pdi {
- bool assigned;
int num;
int intel_alh_id;
int l_ch_num;
@@ -29,23 +27,6 @@ struct sdw_cdns_pdi {
};
/**
- * struct sdw_cdns_port: Cadence port structure
- *
- * @num: port number
- * @assigned: port assigned
- * @ch: channel count
- * @direction: data port direction
- * @pdi: pdi for this port
- */
-struct sdw_cdns_port {
- unsigned int num;
- bool assigned;
- unsigned int ch;
- enum sdw_data_direction direction;
- struct sdw_cdns_pdi *pdi;
-};
-
-/**
* struct sdw_cdns_streams: Cadence stream data structure
*
* @num_bd: number of bidirectional streams
@@ -95,8 +76,8 @@ struct sdw_cdns_stream_config {
* struct sdw_cdns_dma_data: Cadence DMA data
*
* @name: SoundWire stream name
- * @nr_ports: Number of ports
- * @port: Ports
+ * @stream: stream runtime
+ * @pdi: PDI used for this dai
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
@@ -104,8 +85,7 @@ struct sdw_cdns_stream_config {
struct sdw_cdns_dma_data {
char *name;
struct sdw_stream_runtime *stream;
- int nr_ports;
- struct sdw_cdns_port **port;
+ struct sdw_cdns_pdi *pdi;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
@@ -158,10 +138,11 @@ extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
-int sdw_cdns_init(struct sdw_cdns *cdns);
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config);
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns);
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state);
#ifdef CONFIG_DEBUG_FS
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
@@ -170,10 +151,10 @@ void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
int sdw_cdns_get_stream(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir);
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir);
-void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port,
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id);
+void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 13c54eac0cc3..99dc61021211 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -479,7 +480,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
int pdi_conf = 0;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/*
* Program stream parameters to stream SHIM register
@@ -508,7 +512,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
unsigned int conf;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/* Program Stream config ALH register */
conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
@@ -603,66 +610,6 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
-static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw,
- u32 ch, u32 dir, bool pcm)
-{
- struct sdw_cdns *cdns = &sdw->cdns;
- struct sdw_cdns_port *port = NULL;
- int i, ret = 0;
-
- for (i = 0; i < cdns->num_ports; i++) {
- if (cdns->ports[i].assigned)
- continue;
-
- port = &cdns->ports[i];
- port->assigned = true;
- port->direction = dir;
- port->ch = ch;
- break;
- }
-
- if (!port) {
- dev_err(cdns->dev, "Unable to find a free port\n");
- return NULL;
- }
-
- if (pcm) {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pcm, port, ch, dir);
- if (ret)
- goto out;
-
- intel_pdi_shim_configure(sdw, port->pdi);
- sdw_cdns_config_stream(cdns, port, ch, dir, port->pdi);
-
- intel_pdi_alh_configure(sdw, port->pdi);
-
- } else {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pdm, port, ch, dir);
- }
-
-out:
- if (ret) {
- port->assigned = false;
- port = NULL;
- }
-
- return port;
-}
-
-static void intel_port_cleanup(struct sdw_cdns_dma_data *dma)
-{
- int i;
-
- for (i = 0; i < dma->nr_ports; i++) {
- if (dma->port[i]) {
- dma->port[i]->pdi->assigned = false;
- dma->port[i]->pdi = NULL;
- dma->port[i]->assigned = false;
- dma->port[i] = NULL;
- }
- }
-}
-
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -670,9 +617,11 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
struct sdw_port_config *pconfig;
- int ret, i, ch, dir;
+ int ch, dir;
+ int ret;
bool pcm = true;
dma = snd_soc_dai_get_dma_data(dai, substream);
@@ -685,38 +634,30 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
else
dir = SDW_DATA_DIR_TX;
- if (dma->stream_type == SDW_STREAM_PDM) {
- /* TODO: Check whether PDM decimator is already in use */
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pdm, ch, dir);
+ if (dma->stream_type == SDW_STREAM_PDM)
pcm = false;
- } else {
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pcm, ch, dir);
- }
- if (!dma->nr_ports) {
- dev_err(dai->dev, "ports/resources not available\n");
- return -EINVAL;
+ if (pcm)
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
+ else
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
+
+ if (!pdi) {
+ ret = -EINVAL;
+ goto error;
}
- dma->port = kcalloc(dma->nr_ports, sizeof(*dma->port), GFP_KERNEL);
- if (!dma->port)
- return -ENOMEM;
+ /* do run-time configurations for SHIM, ALH and PDI/PORT */
+ intel_pdi_shim_configure(sdw, pdi);
+ intel_pdi_alh_configure(sdw, pdi);
+ sdw_cdns_config_stream(cdns, ch, dir, pdi);
- for (i = 0; i < dma->nr_ports; i++) {
- dma->port[i] = intel_alloc_port(sdw, ch, dir, pcm);
- if (!dma->port[i]) {
- ret = -EINVAL;
- goto port_error;
- }
- }
/* Inform DSP about PDI stream number */
- for (i = 0; i < dma->nr_ports; i++) {
- ret = intel_config_stream(sdw, substream, dai, params,
- dma->port[i]->pdi->intel_alh_id);
- if (ret)
- goto port_error;
- }
+ ret = intel_config_stream(sdw, substream, dai, params,
+ pdi->intel_alh_id);
+ if (ret)
+ goto error;
sconfig.direction = dir;
sconfig.ch_count = ch;
@@ -731,32 +672,22 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
/* Port configuration */
- pconfig = kcalloc(dma->nr_ports, sizeof(*pconfig), GFP_KERNEL);
+ pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto port_error;
+ goto error;
}
- for (i = 0; i < dma->nr_ports; i++) {
- pconfig[i].num = dma->port[i]->num;
- pconfig[i].ch_mask = (1 << ch) - 1;
- }
+ pconfig->num = pdi->num;
+ pconfig->ch_mask = (1 << ch) - 1;
ret = sdw_stream_add_master(&cdns->bus, &sconfig,
- pconfig, dma->nr_ports, dma->stream);
- if (ret) {
+ pconfig, 1, dma->stream);
+ if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
- goto stream_error;
- }
kfree(pconfig);
- return ret;
-
-stream_error:
- kfree(pconfig);
-port_error:
- intel_port_cleanup(dma);
- kfree(dma->port);
+error:
return ret;
}
@@ -776,8 +707,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
dma->stream->name, ret);
- intel_port_cleanup(dma);
- kfree(dma->port);
return ret;
}
@@ -842,14 +771,6 @@ static int intel_create_dai(struct sdw_cdns *cdns,
return -ENOMEM;
if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
- dais[i].playback.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Tx%d",
- cdns->instance, i);
- if (!dais[i].playback.stream_name) {
- kfree(dais[i].name);
- return -ENOMEM;
- }
-
dais[i].playback.channels_min = 1;
dais[i].playback.channels_max = max_ch;
dais[i].playback.rates = SNDRV_PCM_RATE_48000;
@@ -857,23 +778,12 @@ static int intel_create_dai(struct sdw_cdns *cdns,
}
if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
- dais[i].capture.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Rx%d",
- cdns->instance, i);
- if (!dais[i].capture.stream_name) {
- kfree(dais[i].name);
- kfree(dais[i].playback.stream_name);
- return -ENOMEM;
- }
-
dais[i].capture.channels_min = 1;
dais[i].capture.channels_max = max_ch;
dais[i].capture.rates = SNDRV_PCM_RATE_48000;
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
- dais[i].id = SDW_DAI_ID_RANGE_START + i;
-
if (pcm)
dais[i].ops = &intel_pcm_dai_ops;
else
@@ -993,6 +903,15 @@ static struct sdw_master_ops sdw_intel_ops = {
.post_bank_switch = intel_post_bank_switch,
};
+static int intel_init(struct sdw_intel *sdw)
+{
+ /* Initialize shim and controller */
+ intel_link_power_up(sdw);
+ intel_shim_init(sdw);
+
+ return sdw_cdns_init(&sdw->cdns, false);
+}
+
/*
* probe and init
*/
@@ -1026,7 +945,7 @@ static int intel_probe(struct platform_device *pdev)
ret = sdw_add_bus_master(&sdw->cdns.bus);
if (ret) {
dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
- goto err_master_reg;
+ return ret;
}
if (sdw->cdns.bus.prop.hw_disabled) {
@@ -1035,16 +954,11 @@ static int intel_probe(struct platform_device *pdev)
return 0;
}
- /* Initialize shim and controller */
- intel_link_power_up(sdw);
- intel_shim_init(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
+ /* Initialize shim, controller and Cadence IP */
+ ret = intel_init(sdw);
if (ret)
goto err_init;
- ret = sdw_cdns_enable_interrupt(&sdw->cdns);
-
/* Read the PDI config and initialize cadence PDI */
intel_pdi_init(sdw, &config);
ret = sdw_cdns_pdi_init(&sdw->cdns, config);
@@ -1062,23 +976,35 @@ static int intel_probe(struct platform_device *pdev)
goto err_init;
}
+ ret = sdw_cdns_enable_interrupt(&sdw->cdns, true);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "cannot enable interrupts\n");
+ goto err_init;
+ }
+
+ ret = sdw_cdns_exit_reset(&sdw->cdns);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n");
+ goto err_interrupt;
+ }
+
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
snd_soc_unregister_component(sdw->cdns.dev);
- goto err_dai;
+ goto err_interrupt;
}
intel_debugfs_init(sdw);
return 0;
-err_dai:
+err_interrupt:
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
err_init:
sdw_delete_bus_master(&sdw->cdns.bus);
-err_master_reg:
return ret;
}
@@ -1090,6 +1016,7 @@ static int intel_remove(struct platform_device *pdev)
if (!sdw->cdns.bus.prop.hw_disabled) {
intel_debugfs_exit(sdw);
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index b74c2f144962..2a2b4d8df462 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 6473fa602f82..19919975bb6d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -29,10 +29,17 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.parent = bus->dev;
slave->dev.fwnode = fwnode;
- /* name shall be sdw:link:mfg:part:class:unique */
- dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
- bus->link_id, id->mfg_id, id->part_id,
- id->class_id, id->unique_id);
+ if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
+ /* name shall be sdw:link:mfg:part:class */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id);
+ } else {
+ /* name shall be sdw:link:mfg:part:class:unique */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+ }
slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
@@ -64,6 +71,36 @@ static int sdw_slave_add(struct sdw_bus *bus,
}
#if IS_ENABLED(CONFIG_ACPI)
+
+static bool find_slave(struct sdw_bus *bus,
+ struct acpi_device *adev,
+ struct sdw_slave_id *id)
+{
+ unsigned long long addr;
+ unsigned int link_id;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle,
+ METHOD_NAME__ADR, NULL, &addr);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(bus->dev, "_ADR resolution failed: %x\n",
+ status);
+ return false;
+ }
+
+ /* Extract link id from ADR, Bit 51 to 48 (included) */
+ link_id = (addr >> 48) & GENMASK(3, 0);
+
+ /* Check for link_id match */
+ if (link_id != bus->link_id)
+ return false;
+
+ sdw_extract_slave_id(bus, addr, id);
+
+ return true;
+}
+
/*
* sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
* @bus: SDW bus instance
@@ -73,6 +110,7 @@ static int sdw_slave_add(struct sdw_bus *bus,
int sdw_acpi_find_slaves(struct sdw_bus *bus)
{
struct acpi_device *adev, *parent;
+ struct acpi_device *adev2, *parent2;
parent = ACPI_COMPANION(bus->dev);
if (!parent) {
@@ -81,28 +119,46 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
}
list_for_each_entry(adev, &parent->children, node) {
- unsigned long long addr;
struct sdw_slave_id id;
- unsigned int link_id;
- acpi_status status;
+ struct sdw_slave_id id2;
+ bool ignore_unique_id = true;
- status = acpi_evaluate_integer(adev->handle,
- METHOD_NAME__ADR, NULL, &addr);
+ if (!find_slave(bus, adev, &id))
+ continue;
- if (ACPI_FAILURE(status)) {
- dev_err(bus->dev, "_ADR resolution failed: %x\n",
- status);
- return status;
+ /* brute-force O(N^2) search for duplicates */
+ parent2 = parent;
+ list_for_each_entry(adev2, &parent2->children, node) {
+
+ if (adev == adev2)
+ continue;
+
+ if (!find_slave(bus, adev2, &id2))
+ continue;
+
+ if (id.sdw_version != id2.sdw_version ||
+ id.mfg_id != id2.mfg_id ||
+ id.part_id != id2.part_id ||
+ id.class_id != id2.class_id)
+ continue;
+
+ if (id.unique_id != id2.unique_id) {
+ dev_dbg(bus->dev,
+ "Valid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ ignore_unique_id = false;
+ } else {
+ dev_err(bus->dev,
+ "Invalid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ return -ENODEV;
+ }
}
- /* Extract link id from ADR, Bit 51 to 48 (included) */
- link_id = (addr >> 48) & GENMASK(3, 0);
-
- /* Check for link_id match */
- if (link_id != bus->link_id)
- continue;
-
- sdw_extract_slave_id(bus, addr, &id);
+ if (ignore_unique_id)
+ id.unique_id = SDW_IGNORED_UNIQUE_ID;
/*
* don't error check for sdw_slave_add as we want to continue
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 6f7fdcbb9151..870f7797b56b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,7 @@ config SPI_ARMADA_3700
config SPI_ATMEL
tristate "Atmel SPI Controller"
depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
help
This selects a driver for the Atmel SPI Controller, present on
many AT91 ARM chips.
@@ -143,7 +144,7 @@ config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller"
depends on BCM63XX || COMPILE_TEST
help
- Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+ Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
@@ -234,11 +235,11 @@ config SPI_DLN2
tristate "Diolan DLN-2 USB SPI adapter"
depends on MFD_DLN2
help
- If you say yes to this option, support will be included for Diolan
- DLN2, a USB to SPI interface.
+ If you say yes to this option, support will be included for Diolan
+ DLN2, a USB to SPI interface.
- This driver can also be built as a module. If so, the module
- will be called spi-dln2.
+ This driver can also be built as a module. If so, the module
+ will be called spi-dln2.
config SPI_EFM32
tristate "EFM32 SPI controller"
@@ -747,10 +748,10 @@ config SPI_SYNQUACER
It also supports the new dual-bit and quad-bit SPI protocol.
config SPI_MXIC
- tristate "Macronix MX25F0A SPI controller"
- depends on SPI_MASTER
- help
- This selects the Macronix MX25F0A SPI controller driver.
+ tristate "Macronix MX25F0A SPI controller"
+ depends on SPI_MASTER
+ help
+ This selects the Macronix MX25F0A SPI controller driver.
config SPI_MXS
tristate "Freescale MXS SPI controller"
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index a40bb2ef89dc..88033422a42a 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -132,7 +132,7 @@ static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
if (IS_ERR(ctlr->dma_tx)) {
err = PTR_ERR(ctlr->dma_tx);
@@ -145,7 +145,7 @@ static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
goto at91_usart_spi_error_clear;
}
- ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
if (IS_ERR(ctlr->dma_rx)) {
err = PTR_ERR(ctlr->dma_rx);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index acf318e7330c..56f0ca361deb 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -222,37 +222,13 @@
| SPI_BF(name, value))
/* Register access macros */
-#ifdef CONFIG_AVR32
-#define spi_readl(port, reg) \
- __raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port, reg, value) \
- __raw_writel((value), (port)->regs + SPI_##reg)
-
-#define spi_readw(port, reg) \
- __raw_readw((port)->regs + SPI_##reg)
-#define spi_writew(port, reg, value) \
- __raw_writew((value), (port)->regs + SPI_##reg)
-
-#define spi_readb(port, reg) \
- __raw_readb((port)->regs + SPI_##reg)
-#define spi_writeb(port, reg, value) \
- __raw_writeb((value), (port)->regs + SPI_##reg)
-#else
#define spi_readl(port, reg) \
readl_relaxed((port)->regs + SPI_##reg)
#define spi_writel(port, reg, value) \
writel_relaxed((value), (port)->regs + SPI_##reg)
-
-#define spi_readw(port, reg) \
- readw_relaxed((port)->regs + SPI_##reg)
#define spi_writew(port, reg, value) \
writew_relaxed((value), (port)->regs + SPI_##reg)
-#define spi_readb(port, reg) \
- readb_relaxed((port)->regs + SPI_##reg)
-#define spi_writeb(port, reg, value) \
- writeb_relaxed((value), (port)->regs + SPI_##reg)
-#endif
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
@@ -299,17 +275,16 @@ struct atmel_spi {
bool use_dma;
bool use_pdc;
- bool use_cs_gpios;
bool keep_cs;
- bool cs_active;
u32 fifo_size;
+ u8 native_cs_free;
+ u8 native_cs_for_gpio;
};
/* Controller-specific per-slave state */
struct atmel_spi_device {
- struct gpio_desc *npcs_pin;
u32 csr;
};
@@ -336,11 +311,9 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
* transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
* controllers have CSAAT and friends.
*
- * Since the CSAAT functionality is a bit weird on newer controllers as
- * well, we use GPIO to control nCSx pins on all controllers, updating
- * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
- * support active-high chipselects despite the controller's belief that
- * only active-low devices/systems exists.
+ * Even controller newer than ar91rm9200, using GPIOs can make sens as
+ * it lets us support active-high chipselects despite the controller's
+ * belief that only active-low devices/systems exists.
*
* However, at91rm9200 has a second erratum whereby nCS0 doesn't work
* right when driven with GPIO. ("Mode Fault does not allow more than one
@@ -352,30 +325,36 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
+ int chip_select;
u32 mr;
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
if (atmel_spi_is_v2(as)) {
- spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr);
+ spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
/* For the low SPI version, there is a issue that PDC transfer
* on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
*/
spi_writel(as, CSR0, asd->csr);
if (as->caps.has_wdrbt) {
spi_writel(as, MR,
- SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ SPI_BF(PCS, ~(0x01 << chip_select))
| SPI_BIT(WDRBT)
| SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
} else {
spi_writel(as, MR,
- SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ SPI_BF(PCS, ~(0x01 << chip_select))
| SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
}
mr = spi_readl(as, MR);
- if (as->use_cs_gpios)
- gpiod_set_value(asd->npcs_pin, 1);
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -390,9 +369,9 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
}
mr = spi_readl(as, MR);
- mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
- if (as->use_cs_gpios && spi->chip_select != 0)
- gpiod_set_value(asd->npcs_pin, 1);
+ mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
spi_writel(as, MR, mr);
}
@@ -401,24 +380,29 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
- struct atmel_spi_device *asd = spi->controller_state;
+ int chip_select;
u32 mr;
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
/* only deactivate *this* device; sometimes transfers to
* another device may be active when this routine is called.
*/
mr = spi_readl(as, MR);
- if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
+ if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
mr = SPI_BFINS(PCS, 0xf, mr);
spi_writel(as, MR, mr);
}
dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
- if (!as->use_cs_gpios)
+ if (!spi->cs_gpiod)
spi_writel(as, CR, SPI_BIT(LASTXFER));
- else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
- gpiod_set_value(asd->npcs_pin, 0);
+ else
+ gpiod_set_value(spi->cs_gpiod, 0);
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -527,7 +511,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx);
if (err == -EPROBE_DEFER) {
@@ -844,6 +828,12 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
{
u32 scbr, csr;
unsigned long bus_hz;
+ int chip_select;
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
/* v1 chips start out at half the peripheral bus speed. */
bus_hz = as->spi_clk;
@@ -872,9 +862,9 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
xfer->speed_hz, scbr, bus_hz);
return -EINVAL;
}
- csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+ csr = spi_readl(as, CSR0 + 4 * chip_select);
csr = SPI_BFINS(SCBR, scbr, csr);
- spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
return 0;
}
@@ -1173,40 +1163,105 @@ atmel_spi_pdc_interrupt(int irq, void *dev_id)
return ret;
}
+static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
+{
+ struct spi_delay *delay = &spi->word_delay;
+ u32 value = delay->value;
+
+ switch (delay->unit) {
+ case SPI_DELAY_UNIT_NSECS:
+ value /= 1000;
+ break;
+ case SPI_DELAY_UNIT_USECS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (as->spi_clk / 1000000 * value) >> 5;
+}
+
+static void initialize_native_cs_for_gpio(struct atmel_spi *as)
+{
+ int i;
+ struct spi_master *master = platform_get_drvdata(as->pdev);
+
+ if (!as->native_cs_free)
+ return; /* already initialized */
+
+ if (!master->cs_gpiods)
+ return; /* No CS GPIO */
+
+ /*
+ * On the first version of the controller (AT91RM9200), CS0
+ * can't be used associated with GPIO
+ */
+ if (atmel_spi_is_v2(as))
+ i = 0;
+ else
+ i = 1;
+
+ for (; i < 4; i++)
+ if (master->cs_gpiods[i])
+ as->native_cs_free |= BIT(i);
+
+ if (as->native_cs_free)
+ as->native_cs_for_gpio = ffs(as->native_cs_free);
+}
+
static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
struct atmel_spi_device *asd;
u32 csr;
unsigned int bits = spi->bits_per_word;
+ int chip_select;
+ int word_delay_csr;
as = spi_master_get_devdata(spi->master);
/* see notes above re chipselect */
- if (!atmel_spi_is_v2(as)
- && spi->chip_select == 0
- && (spi->mode & SPI_CS_HIGH)) {
- dev_dbg(&spi->dev, "setup: can't be active-high\n");
+ if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
return -EINVAL;
}
+ /* Setup() is called during spi_register_controller(aka
+ * spi_register_master) but after all membmers of the cs_gpiod
+ * array have been filled, so we can looked for which native
+ * CS will be free for using with GPIO
+ */
+ initialize_native_cs_for_gpio(as);
+
+ if (spi->cs_gpiod && as->native_cs_free) {
+ dev_err(&spi->dev,
+ "No native CS available to support this GPIO CS\n");
+ return -EBUSY;
+ }
+
+ if (spi->cs_gpiod)
+ chip_select = as->native_cs_for_gpio;
+ else
+ chip_select = spi->chip_select;
+
csr = SPI_BF(BITS, bits - 8);
if (spi->mode & SPI_CPOL)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
csr |= SPI_BIT(NCPHA);
- if (!as->use_cs_gpios)
- csr |= SPI_BIT(CSAAT);
- /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
- */
+ if (!spi->cs_gpiod)
+ csr |= SPI_BIT(CSAAT);
csr |= SPI_BF(DLYBS, 0);
+ word_delay_csr = atmel_word_delay_csr(spi, as);
+ if (word_delay_csr < 0)
+ return word_delay_csr;
+
/* DLYBCT adds delays between words. This is useful for slow devices
* that need a bit of time to setup the next transfer.
*/
- csr |= SPI_BF(DLYBCT,
- (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5);
+ csr |= SPI_BF(DLYBCT, word_delay_csr);
asd = spi->controller_state;
if (!asd) {
@@ -1214,21 +1269,6 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- /*
- * If use_cs_gpios is true this means that we have "cs-gpios"
- * defined in the device tree node so we should have
- * gotten the GPIO lines from the device tree inside the
- * SPI core. Warn if this is not the case but continue since
- * CS GPIOs are after all optional.
- */
- if (as->use_cs_gpios) {
- if (!spi->cs_gpiod) {
- dev_err(&spi->dev,
- "host claims to use CS GPIOs but no CS found in DT by the SPI core\n");
- }
- asd->npcs_pin = spi->cs_gpiod;
- }
-
spi->controller_state = asd;
}
@@ -1239,7 +1279,7 @@ static int atmel_spi_setup(struct spi_device *spi)
bits, spi->mode, spi->chip_select, csr);
if (!atmel_spi_is_v2(as))
- spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+ spi_writel(as, CSR0 + 4 * chip_select, csr);
return 0;
}
@@ -1368,19 +1408,16 @@ static int atmel_spi_one_transfer(struct spi_master *master,
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
as->keep_cs = true;
} else {
- as->cs_active = !as->cs_active;
- if (as->cs_active)
- cs_activate(as, msg->spi);
- else
- cs_deactivate(as, msg->spi);
+ cs_deactivate(as, msg->spi);
+ udelay(10);
+ cs_activate(as, msg->spi);
}
}
@@ -1403,7 +1440,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
atmel_spi_lock(as);
cs_activate(as, spi);
- as->cs_active = true;
as->keep_cs = false;
msg->status = 0;
@@ -1527,7 +1563,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
- master->num_chipselect = master->dev.of_node ? 0 : 4;
+ master->num_chipselect = 4;
master->setup = atmel_spi_setup;
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
master->transfer_one_message = atmel_spi_transfer_one_message;
@@ -1555,19 +1591,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
atmel_get_caps(as);
- /*
- * If there are chip selects in the device tree, those will be
- * discovered by the SPI core when registering the SPI master
- * and assigned to each SPI device.
- */
- as->use_cs_gpios = true;
- if (atmel_spi_is_v2(as) &&
- pdev->dev.of_node &&
- !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
- as->use_cs_gpios = false;
- master->num_chipselect = 4;
- }
-
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
@@ -1775,20 +1798,18 @@ static const struct dev_pm_ops atmel_spi_pm_ops = {
#define ATMEL_SPI_PM_OPS NULL
#endif
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_spi_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-spi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
-#endif
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.pm = ATMEL_SPI_PM_OPS,
- .of_match_table = of_match_ptr(atmel_spi_dt_ids),
+ .of_match_table = atmel_spi_dt_ids,
},
.probe = atmel_spi_probe,
.remove = atmel_spi_remove,
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 74842f6019ed..eb9b78a90dcf 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -163,10 +163,21 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
}
static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
- struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
+ struct spi_engine *spi_engine, unsigned int clk_div,
+ struct spi_transfer *xfer)
{
unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
unsigned int t;
+ int delay;
+
+ if (xfer->delay_usecs) {
+ delay = xfer->delay_usecs;
+ } else {
+ delay = spi_delay_to_ns(&xfer->delay, xfer);
+ if (delay < 0)
+ return;
+ delay /= 1000;
+ }
if (delay == 0)
return;
@@ -218,8 +229,7 @@ static int spi_engine_compile_message(struct spi_engine *spi_engine,
spi_engine_gen_cs(p, dry, spi, true);
spi_engine_gen_xfer(p, dry, xfer);
- spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
- xfer->delay_usecs);
+ spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
cs_change = xfer->cs_change;
if (list_is_last(&xfer->transfer_list, &msg->transfers))
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 7a3531856491..85bad70f59e3 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -803,7 +803,8 @@ static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
return -EIO;
from = op->addr.val;
- bcm_qspi_chip_select(qspi, spi->chip_select);
+ if (!spi->cs_gpiod)
+ bcm_qspi_chip_select(qspi, spi->chip_select);
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
/*
@@ -882,7 +883,8 @@ static int bcm_qspi_transfer_one(struct spi_master *master,
int slots;
unsigned long timeo = msecs_to_jiffies(100);
- bcm_qspi_chip_select(qspi, spi->chip_select);
+ if (!spi->cs_gpiod)
+ bcm_qspi_chip_select(qspi, spi->chip_select);
qspi->trans_pos.trans = trans;
qspi->trans_pos.byte = 0;
@@ -1234,6 +1236,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
master->cleanup = bcm_qspi_cleanup;
master->dev.of_node = dev->of_node;
master->num_chipselect = NUM_CHIPSELECT;
+ master->use_gpio_descriptors = true;
qspi->big_endian = of_device_is_big_endian(dev->of_node);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index b4070c0de3df..fb61a620effc 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1248,7 +1248,7 @@ static int bcm2835_spi_setup(struct spi_device *spi)
/*
* Retrieve the corresponding GPIO line used for CS.
* The inversion semantics will be handled by the GPIO core
- * code, so we pass GPIOS_OUT_LOW for "unasserted" and
+ * code, so we pass GPIOD_OUT_LOW for "unasserted" and
* the correct flag for inversion semantics. The SPI_CS_HIGH
* on spi->mode cannot be checked for polarity in this case
* as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index c6836a931dbf..7327309ea3d5 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -291,8 +291,7 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
msg->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (t->cs_change)
bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index fdd7eaa0b8ed..0f1b10a4ef0c 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -368,7 +368,7 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
}
/* CS will be deasserted directly after transfer */
- if (t->delay_usecs) {
+ if (t->delay_usecs || t->delay.value) {
dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
status = -EINVAL;
goto exit;
diff --git a/drivers/spi/spi-cavium.c b/drivers/spi/spi-cavium.c
index 5aaf21582cb5..6854c3ce423b 100644
--- a/drivers/spi/spi-cavium.c
+++ b/drivers/spi/spi-cavium.c
@@ -119,8 +119,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
*rx_buf++ = (u8)v;
}
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
return xfer->len;
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index bd46fca3f094..384a3ab6dc2d 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
@@ -193,6 +194,8 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
goto out;
}
+ pm_runtime_enable(&pdev->dev);
+
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto out;
@@ -201,6 +204,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
return 0;
out:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
out_clk:
clk_disable_unprepare(dwsmmio->clk);
@@ -212,6 +216,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsmmio->dws);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
@@ -223,6 +228,7 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
{ .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
+ { .compatible = "renesas,rzn1-spi", },
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 140644913e6c..12c131b5fb4e 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
@@ -35,7 +36,7 @@ static struct spi_pci_desc spi_pci_mid_desc_2 = {
};
static struct spi_pci_desc spi_pci_ehl_desc = {
- .num_cs = 1,
+ .num_cs = 2,
.bus_num = -1,
.max_freq = 100000000,
};
@@ -57,13 +58,18 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
+ pci_set_master(pdev);
ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev));
if (ret)
return ret;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
dws->regs = pcim_iomap_table(pdev)[pci_bar];
- dws->irq = pdev->irq;
+ dws->irq = pci_irq_vector(pdev, 0);
/*
* Specific handling for platforms, like dma setup,
@@ -80,12 +86,15 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
} else {
+ pci_free_irq_vectors(pdev);
return -ENODEV;
}
ret = dw_spi_add_host(&pdev->dev, dws);
- if (ret)
+ if (ret) {
+ pci_free_irq_vectors(pdev);
return ret;
+ }
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dws);
@@ -93,6 +102,11 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
return 0;
}
@@ -100,7 +114,11 @@ static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi *dws = pci_get_drvdata(pdev);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
dw_spi_remove_host(dws);
+ pci_free_irq_vectors(pdev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 9a49e073e8b7..a92aa5cd4fbe 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -308,7 +308,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
| ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
- (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
+ (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
+ (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
| (chip->tmode << SPI_TMOD_OFFSET);
/*
@@ -493,6 +494,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->dev.of_node = dev->of_node;
master->dev.fwnode = dev->fwnode;
master->flags = SPI_MASTER_GPIO_SS;
+ master->auto_runtime_pm = true;
if (dws->set_cs)
master->set_cs = dws->set_cs;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index c9c15881e982..38c7de1f0aa9 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -4,7 +4,6 @@
#include <linux/io.h>
#include <linux/scatterlist.h>
-#include <linux/gpio.h>
/* Register offsets */
#define DW_SPI_CTRL0 0x00
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 00f46c816a56..d3336a63f462 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -377,7 +377,7 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
m->actual_length += t->len;
- WARN_ON(t->delay_usecs || t->cs_change);
+ WARN_ON(t->delay_usecs || t->delay.value || t->cs_change);
spi_flags = 0;
}
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 858f0544289e..54ad0ac121e5 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -392,7 +392,8 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
- cpm_muram_free(cpm_muram_offset(mspi->pram));
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
fsl_spi_free_dummy_rx();
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index bec758e978fb..442cff71a0d2 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -129,6 +129,7 @@ enum dspi_trans_mode {
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
+ bool ptp_sts_supported;
bool xspi_mode;
};
@@ -140,12 +141,14 @@ static const struct fsl_dspi_devtype_data vf610_data = {
static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
.trans_mode = DSPI_TCFQ_MODE,
.max_clock_factor = 8,
+ .ptp_sts_supported = true,
.xspi_mode = true,
};
static const struct fsl_dspi_devtype_data ls2085a_data = {
.trans_mode = DSPI_TCFQ_MODE,
.max_clock_factor = 8,
+ .ptp_sts_supported = true,
};
static const struct fsl_dspi_devtype_data coldfire_data = {
@@ -654,6 +657,9 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
u16 spi_tcnt;
u32 spi_tcr;
+ spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
+ dspi->tx - dspi->bytes_per_word, !dspi->irq);
+
/* Get transfer counter (in number of SPI transfers). It was
* reset to 0 when transfer(s) were started.
*/
@@ -672,6 +678,9 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
/* Success! */
return 0;
+ spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
+ dspi->tx, !dspi->irq);
+
if (trans_mode == DSPI_EOQ_MODE)
dspi_eoq_write(dspi);
else if (trans_mode == DSPI_TCFQ_MODE)
@@ -707,7 +716,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)))
+ if (!(spi_sr & SPI_SR_EOQF))
return IRQ_NONE;
if (dspi_rxtx(dspi) == 0) {
@@ -779,6 +788,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_FRAME_EBITS(transfer->bits_per_word) |
SPI_CTARE_DTCP(1));
+ spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
+ dspi->tx, !dspi->irq);
+
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
case DSPI_EOQ_MODE:
@@ -815,8 +827,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dev_err(&dspi->pdev->dev,
"Waiting for transfer to complete failed!\n");
- if (transfer->delay_usecs)
- udelay(transfer->delay_usecs);
+ spi_transfer_delay_exec(transfer);
}
out:
@@ -1006,6 +1017,25 @@ static void dspi_init(struct fsl_dspi *dspi)
SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
}
+static int dspi_slave_abort(struct spi_master *master)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /*
+ * Terminate all pending DMA transactions for the SPI working
+ * in SLAVE mode.
+ */
+ dmaengine_terminate_sync(dspi->dma->chan_rx);
+ dmaengine_terminate_sync(dspi->dma->chan_tx);
+
+ /* Clear the internal DSPI RX and TX FIFO buffers */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+
+ return 0;
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1030,6 +1060,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
+ ctlr->slave_abort = dspi_slave_abort;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
pdata = dev_get_platdata(&pdev->dev);
@@ -1114,6 +1145,9 @@ static int dspi_probe(struct platform_device *pdev)
dspi_init(dspi);
+ if (dspi->devtype_data->trans_mode == DSPI_TCFQ_MODE)
+ goto poll_mode;
+
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
dev_info(&pdev->dev,
@@ -1132,6 +1166,7 @@ static int dspi_probe(struct platform_device *pdev)
init_waitqueue_head(&dspi->waitq);
poll_mode:
+
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
@@ -1143,6 +1178,8 @@ poll_mode:
ctlr->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
+ ctlr->ptp_sts_supported = dspi->devtype_data->ptp_sts_supported;
+
platform_set_drvdata(pdev, ctlr);
ret = spi_register_controller(ctlr);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index f20326714b9d..e60581283a24 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -427,8 +427,7 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
ret = fsl_espi_bufs(spi, trans);
- if (trans->delay_usecs)
- udelay(trans->delay_usecs);
+ spi_transfer_delay_exec(trans);
return ret;
}
@@ -437,6 +436,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
unsigned int delay_usecs = 0, rx_nbits = 0;
+ unsigned int delay_nsecs = 0, delay_nsecs1 = 0;
struct spi_transfer *t, trans = {};
int ret;
@@ -445,8 +445,16 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
goto out;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->delay_usecs > delay_usecs)
- delay_usecs = t->delay_usecs;
+ if (t->delay_usecs) {
+ if (t->delay_usecs > delay_usecs) {
+ delay_usecs = t->delay_usecs;
+ delay_nsecs = delay_usecs * 1000;
+ }
+ } else {
+ delay_nsecs1 = spi_delay_to_ns(&t->delay, t);
+ if (delay_nsecs1 > delay_nsecs)
+ delay_nsecs = delay_nsecs1;
+ }
if (t->rx_nbits > rx_nbits)
rx_nbits = t->rx_nbits;
}
@@ -457,7 +465,8 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
trans.len = m->frame_length;
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
- trans.delay_usecs = delay_usecs;
+ trans.delay.value = delay_nsecs;
+ trans.delay.unit = SPI_DELAY_UNIT_NSECS;
trans.rx_nbits = rx_nbits;
if (trans.len)
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index d08e9324140e..2cc0ddb4a988 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -675,7 +675,7 @@ static int fsl_lpspi_dma_init(struct device *dev,
int ret;
/* Prepare for TX DMA: */
- controller->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
@@ -684,7 +684,7 @@ static int fsl_lpspi_dma_init(struct device *dev,
}
/* Prepare for RX DMA: */
- controller->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
@@ -779,7 +779,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
- complete(&fsl_lpspi->xfer_done);
+ complete(&fsl_lpspi->xfer_done);
return IRQ_HANDLED;
}
@@ -938,7 +938,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
- return ret;
+ goto out_controller_put;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index c02e24c01136..79b1558b74b8 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -63,6 +63,16 @@
#define QUADSPI_IPCR 0x08
#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+#define QUADSPI_FLSHCR 0x0c
+#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
+#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
+#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
#define QUADSPI_BUF3CR 0x1c
#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
@@ -95,6 +105,9 @@
#define QUADSPI_FR 0x160
#define QUADSPI_FR_TFF_MASK BIT(0)
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE BIT(0)
+
#define QUADSPI_SPTRCLR 0x16c
#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
@@ -112,9 +125,6 @@
#define QUADSPI_LCKER_LOCK BIT(0)
#define QUADSPI_LCKER_UNLOCK BIT(1)
-#define QUADSPI_RSER 0x164
-#define QUADSPI_RSER_TFIE BIT(0)
-
#define QUADSPI_LUT_BASE 0x310
#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
#define QUADSPI_LUT_REG(idx) \
@@ -181,9 +191,16 @@
*/
#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+/*
+ * Controller uses TDH bits in register QUADSPI_FLSHCR.
+ * They need to be set in accordance with the DDR/SDR mode.
+ */
+#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
+
struct fsl_qspi_devtype_data {
unsigned int rxfifo;
unsigned int txfifo;
+ int invalid_mstrid;
unsigned int ahb_buf_size;
unsigned int quirks;
bool little_endian;
@@ -192,6 +209,7 @@ struct fsl_qspi_devtype_data {
static const struct fsl_qspi_devtype_data vybrid_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
.little_endian = true,
@@ -200,6 +218,7 @@ static const struct fsl_qspi_devtype_data vybrid_data = {
static const struct fsl_qspi_devtype_data imx6sx_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
.little_endian = true,
@@ -208,22 +227,27 @@ static const struct fsl_qspi_devtype_data imx6sx_data = {
static const struct fsl_qspi_devtype_data imx7d_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
- .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx6ul_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
- .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data ls1021a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
+ .invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = 0,
.little_endian = false,
@@ -233,6 +257,7 @@ static const struct fsl_qspi_devtype_data ls2080a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.ahb_buf_size = SZ_1K,
+ .invalid_mstrid = 0x0,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
.little_endian = true,
};
@@ -275,6 +300,11 @@ static inline int needs_amba_base_offset(struct fsl_qspi *q)
return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
}
+static inline int needs_tdh_setting(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
+}
+
/*
* An IC bug makes it necessary to rearrange the 32-bit data.
* Later chips, such as IMX6SLX, have fixed this bug.
@@ -615,6 +645,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
void __iomem *base = q->iobase;
u32 addr_offset = 0;
int err = 0;
+ int invalid_mstrid = q->devtype_data->invalid_mstrid;
mutex_lock(&q->lock);
@@ -638,6 +669,10 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
base + QUADSPI_SPTRCLR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR);
+ qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR);
+
fsl_qspi_prepare_lut(q, op);
/*
@@ -710,6 +745,16 @@ static int fsl_qspi_default_setup(struct fsl_qspi *q)
qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
base + QUADSPI_MCR);
+ /*
+ * Previous boot stages (BootROM, bootloader) might have used DDR
+ * mode and did not clear the TDH bits. As we currently use SDR mode
+ * only, clear the TDH bits if necessary.
+ */
+ if (needs_tdh_setting(q))
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
+ ~QUADSPI_FLSHCR_TDH_MASK,
+ base + QUADSPI_FLSHCR);
+
reg = qspi_readl(q, base + QUADSPI_SMPR);
qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
| QUADSPI_SMPR_FSPHS_MASK
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 4b80ace1d137..114801a32371 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -416,8 +416,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
}
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change) {
ndelay(nsecs);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 1d3e23ec20a6..7ceb0ba27b75 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -362,19 +362,18 @@ static int spi_gpio_probe(struct platform_device *pdev)
struct spi_gpio *spi_gpio;
struct device *dev = &pdev->dev;
struct spi_bitbang *bb;
- const struct of_device_id *of_id;
-
- of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev);
master = spi_alloc_master(dev, sizeof(*spi_gpio));
if (!master)
return -ENOMEM;
status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
- if (status)
+ if (status) {
+ spi_master_put(master);
return status;
+ }
- if (of_id)
+ if (pdev->dev.of_node)
status = spi_gpio_probe_dt(pdev, master);
else
status = spi_gpio_probe_pdata(pdev, master);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 439b01e4a2c8..f4a8f470aecc 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -673,6 +673,8 @@ static int img_spfi_probe(struct platform_device *pdev)
dma_release_channel(spfi->tx_ch);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
+ spfi->tx_ch = NULL;
+ spfi->rx_ch = NULL;
dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
} else {
master->dma_tx = spfi->tx_ch;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 09c9a1edb2c6..49f0099db0cb 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1272,7 +1272,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
/* Prepare for TX DMA: */
- master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
ret = PTR_ERR(master->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
@@ -1281,7 +1281,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
}
/* Prepare for RX : */
- master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ master->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(master->dma_rx)) {
ret = PTR_ERR(master->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index 9dfe8b04e688..1fd7ee53d451 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -797,7 +797,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_master *master;
- struct resource *res;
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
const struct of_device_id *match;
@@ -812,12 +811,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
}
hwcfg = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get resources\n");
- return -ENXIO;
- }
-
rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
if (rx_irq < 0)
return -ENXIO;
@@ -839,8 +832,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
spi->dev = dev;
spi->hwcfg = hwcfg;
platform_set_drvdata(pdev, spi);
-
- spi->regbase = devm_ioremap_resource(dev, res);
+ spi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regbase)) {
err = PTR_ERR(spi->regbase);
goto err_master_put;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 6f18d4952767..b6d79cd156fb 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -298,12 +298,18 @@ static struct spi_test spi_tests[] = {
{
.tx_buf = TX(0),
.rx_buf = RX(0),
- .delay_usecs = 1000,
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
},
{
.tx_buf = TX(0),
.rx_buf = RX(0),
- .delay_usecs = 1000,
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
},
},
},
@@ -537,7 +543,7 @@ static int spi_test_check_elapsed_time(struct spi_device *spi,
unsigned long long nbits = (unsigned long long)BITS_PER_BYTE *
xfer->len;
- delay_usecs += xfer->delay_usecs;
+ delay_usecs += xfer->delay.value;
if (!xfer->speed_hz)
continue;
estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 9f0fa9f3116d..e5a46f0eb93b 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -286,7 +286,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
- if (ctlr->mem_ops) {
+ if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index a337b842ae8c..ea1b07953d38 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -311,8 +311,7 @@ static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
break;
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change)
mpc512x_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index c7e478b9b586..17935e71b02f 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -234,8 +234,7 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
break;
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change)
mpc52xx_psc_spi_deactivate_cs(spi);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 6888a4dcff6d..6783e12c40c2 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -139,7 +139,6 @@ static const struct mtk_spi_compatible mt8183_compat = {
* supplies it.
*/
static const struct mtk_chip_config mtk_default_chip_info = {
- .cs_pol = 0,
.sample_sel = 0,
};
@@ -230,10 +229,12 @@ static int mtk_spi_prepare_message(struct spi_master *master,
#endif
if (mdata->dev_comp->enhance_timing) {
- if (chip_config->cs_pol)
+ /* set CS polarity */
+ if (spi->mode & SPI_CS_HIGH)
reg_val |= SPI_CMD_CS_POL;
else
reg_val &= ~SPI_CMD_CS_POL;
+
if (chip_config->sample_sel)
reg_val |= SPI_CMD_SAMPLE_SEL;
else
@@ -264,6 +265,9 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
u32 reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
reg_val = readl(mdata->base + SPI_CMD_REG);
if (!enable) {
reg_val |= SPI_CMD_PAUSE_EN;
@@ -619,7 +623,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct mtk_spi *mdata;
const struct of_device_id *of_id;
- struct resource *res;
int i, irq, ret, addr_bits;
master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
@@ -647,6 +650,10 @@ static int mtk_spi_probe(struct platform_device *pdev)
mdata = spi_master_get_devdata(master);
mdata->dev_comp = of_id->data;
+
+ if (mdata->dev_comp->enhance_timing)
+ master->mode_bits |= SPI_CS_HIGH;
+
if (mdata->dev_comp->must_tx)
master->flags = SPI_MASTER_MUST_TX;
@@ -682,15 +689,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, master);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "failed to determine base address\n");
- goto err_put_master;
- }
-
- mdata->base = devm_ioremap_resource(&pdev->dev, res);
+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdata->base)) {
ret = PTR_ERR(mdata->base);
goto err_put_master;
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index f48563c09b97..69491f3a515d 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -145,8 +145,8 @@
#define LWR_SUSP_CTRL_EN BIT(31)
#define DMAS_CTRL 0x9c
-#define DMAS_CTRL_DIR_READ BIT(31)
-#define DMAS_CTRL_EN BIT(30)
+#define DMAS_CTRL_EN BIT(31)
+#define DMAS_CTRL_DIR_READ BIT(30)
#define DATA_STROB 0xa0
#define DATA_STROB_EDO_EN BIT(2)
@@ -275,7 +275,7 @@ static void mxic_spi_hw_init(struct mxic_spi *mxic)
writel(0, mxic->regs + HC_EN);
writel(0, mxic->regs + LRD_CFG);
writel(0, mxic->regs + LRD_CTRL);
- writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NAND) |
+ writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NOR) |
HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
mxic->regs + HC_CFG);
}
@@ -346,7 +346,7 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
if (op->addr.nbytes > 7)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
static int mxic_spi_mem_exec_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index b191d57d1dc0..fe624731c74c 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -293,7 +293,6 @@ static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
{
struct npcm_pspi *priv = dev_id;
- u16 val;
u8 stat;
stat = ioread8(priv->base + NPCM_PSPI_STAT);
@@ -303,7 +302,7 @@ static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
if (priv->tx_buf) {
if (stat & NPCM_PSPI_STAT_RBF) {
- val = ioread8(NPCM_PSPI_DATA + priv->base);
+ ioread8(NPCM_PSPI_DATA + priv->base);
if (priv->tx_bytes == 0) {
npcm_pspi_disable(priv);
complete(&priv->xfer_done);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 501b923f2c27..c36bb1bb464e 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1027,7 +1027,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
- ret = spi_register_controller(ctlr);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret)
goto err_destroy_mutex;
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index b955ca8796d2..5c704ba6d8ea 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -128,7 +128,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
static int spi100k_read_data(struct spi_master *master, int len)
{
- int dataH, dataL;
+ int dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
@@ -146,7 +146,7 @@ static int spi100k_read_data(struct spi_master *master, int len)
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
- dataH = readw(spi100k->base + SPI_RX_MSB);
+ readw(spi100k->base + SPI_RX_MSB);
spi100k_disable_clock(master);
return dataL;
@@ -321,8 +321,7 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
}
}
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
/* ignore the "leave it on after last xfer" hint */
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 848e03e5f42d..7e2292c11d12 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -397,30 +397,26 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
+ struct dma_async_tx_descriptor *tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- if (mcspi_dma->dma_tx) {
- struct dma_async_tx_descriptor *tx;
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
- dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
- xfer->tx_sg.nents,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (tx) {
- tx->callback = omap2_mcspi_tx_callback;
- tx->callback_param = spi;
- dmaengine_submit(tx);
- } else {
- /* FIXME: fall back to PIO? */
- }
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
}
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
-
}
static unsigned
@@ -439,6 +435,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
int word_len, element_count;
struct omap2_mcspi_cs *cs = spi->controller_state;
void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ struct dma_async_tx_descriptor *tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -462,55 +459,47 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
else /* word_len <= 32 */
element_count = count >> 2;
- if (mcspi_dma->dma_rx) {
- struct dma_async_tx_descriptor *tx;
- dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+ /*
+ * Reduce DMA transfer length by one more if McSPI is
+ * configured in turbo mode.
+ */
+ if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
+ transfer_reduction += es;
+
+ if (transfer_reduction) {
+ /* Split sgl into two. The second sgl won't be used. */
+ sizes[0] = count - transfer_reduction;
+ sizes[1] = transfer_reduction;
+ nb_sizes = 2;
+ } else {
/*
- * Reduce DMA transfer length by one more if McSPI is
- * configured in turbo mode.
+ * Don't bother splitting the sgl. This essentially
+ * clones the original sgl.
*/
- if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
- transfer_reduction += es;
-
- if (transfer_reduction) {
- /* Split sgl into two. The second sgl won't be used. */
- sizes[0] = count - transfer_reduction;
- sizes[1] = transfer_reduction;
- nb_sizes = 2;
- } else {
- /*
- * Don't bother splitting the sgl. This essentially
- * clones the original sgl.
- */
- sizes[0] = count;
- nb_sizes = 1;
- }
+ sizes[0] = count;
+ nb_sizes = 1;
+ }
- ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents,
- 0, nb_sizes,
- sizes,
- sg_out, out_mapped_nents,
- GFP_KERNEL);
+ ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
+ sizes, sg_out, out_mapped_nents, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&spi->dev, "sg_split failed\n");
- return 0;
- }
+ if (ret < 0) {
+ dev_err(&spi->dev, "sg_split failed\n");
+ return 0;
+ }
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx,
- sg_out[0],
- out_mapped_nents[0],
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (tx) {
- tx->callback = omap2_mcspi_rx_callback;
- tx->callback_param = spi;
- dmaengine_submit(tx);
- } else {
- /* FIXME: fall back to PIO? */
- }
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
+ out_mapped_nents[0], DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
}
dma_async_issue_pending(mcspi_dma->dma_rx);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 6643ccdc2508..c7266ef295fd 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -467,8 +467,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
goto out;
count--;
- if (xfer->word_delay_usecs)
- udelay(xfer->word_delay_usecs);
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (count);
} else if (word_len == 16) {
const u16 *tx = xfer->tx_buf;
@@ -478,8 +477,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
goto out;
count -= 2;
- if (xfer->word_delay_usecs)
- udelay(xfer->word_delay_usecs);
+ spi_delay_exec(&xfer->word_delay, xfer);
} while (count);
}
@@ -772,9 +770,6 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status < 0)
goto out_rel_pm;
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
-
master->dev.of_node = pdev->dev.of_node;
status = spi_register_master(master);
if (status < 0)
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 69f517ec59c6..156961b4ca86 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -606,25 +606,30 @@ static void pic32_spi_cleanup(struct spi_device *spi)
gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
}
-static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
+static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
{
struct spi_master *master = pic32s->master;
- dma_cap_mask_t mask;
+ int ret = 0;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ master->dma_rx = dma_request_chan(dev, "spi-rx");
+ if (IS_ERR(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ dev_warn(dev, "RX channel not found.\n");
- master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL,
- dev, "spi-rx");
- if (!master->dma_rx) {
- dev_warn(dev, "RX channel not found.\n");
+ master->dma_rx = NULL;
goto out_err;
}
- master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL,
- dev, "spi-tx");
- if (!master->dma_tx) {
- dev_warn(dev, "TX channel not found.\n");
+ master->dma_tx = dma_request_chan(dev, "spi-tx");
+ if (IS_ERR(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ ret = -EPROBE_DEFER;
+ else
+ dev_warn(dev, "TX channel not found.\n");
+
+ master->dma_tx = NULL;
goto out_err;
}
@@ -634,14 +639,20 @@ static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
/* DMA chnls allocated and prepared */
set_bit(PIC32F_DMA_PREP, &pic32s->flags);
- return;
+ return 0;
out_err:
- if (master->dma_rx)
+ if (master->dma_rx) {
dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
- if (master->dma_tx)
+ if (master->dma_tx) {
dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
+ return ret;
}
static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
@@ -776,7 +787,10 @@ static int pic32_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
/* optional DMA support */
- pic32_spi_dma_prep(pic32s, &pdev->dev);
+ ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
+ if (ret)
+ goto err_bailout;
+
if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
master->can_dma = pic32_spi_can_dma;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 7fedea67159c..66028ebbc336 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -485,12 +485,11 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
- if (last_transfer->delay_usecs)
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- udelay(last_transfer->delay_usecs);
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ spi_transfer_delay_exec(last_transfer);
if (!last_transfer->cs_change) {
struct spi_message *next_msg;
@@ -1159,7 +1158,7 @@ static int pl022_dma_autoprobe(struct pl022 *pl022)
int err;
/* automatically configure DMA channels from platform, normally using DT */
- chan = dma_request_slave_channel_reason(dev, "rx");
+ chan = dma_request_chan(dev, "rx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_no_rxchan;
@@ -1167,7 +1166,7 @@ static int pl022_dma_autoprobe(struct pl022 *pl022)
pl022->dma_rx_channel = chan;
- chan = dma_request_slave_channel_reason(dev, "tx");
+ chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_no_txchan;
@@ -1401,12 +1400,11 @@ static void pump_transfers(unsigned long data)
previous = list_entry(transfer->transfer_list.prev,
struct spi_transfer,
transfer_list);
- if (previous->delay_usecs)
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- udelay(previous->delay_usecs);
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ spi_transfer_delay_exec(previous);
/* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
@@ -1520,8 +1518,7 @@ static void do_polling_transfer(struct pl022 *pl022)
previous =
list_entry(transfer->transfer_list.prev,
struct spi_transfer, transfer_list);
- if (previous->delay_usecs)
- udelay(previous->delay_usecs);
+ spi_transfer_delay_exec(previous);
if (previous->cs_change)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
} else {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index bb6a14d1ab0f..16b6b2ad4e7c 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -4,27 +4,29 @@
* Copyright (C) 2013, Intel Corporation
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <linux/acpi.h>
-#include <linux/of_device.h>
#include "spi-pxa2xx.h"
@@ -1457,6 +1459,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
+ /* CML-H */
+ { PCI_VDEVICE(INTEL, 0x06aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x06ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x06fb), LPSS_CNL_SSP },
/* TGL-LP */
{ PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
@@ -1476,11 +1482,13 @@ MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
#ifdef CONFIG_ACPI
-static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+static int pxa2xx_spi_get_port_id(struct device *dev)
{
+ struct acpi_device *adev;
unsigned int devid;
int port_id = -1;
+ adev = ACPI_COMPANION(dev);
if (adev && adev->pnp.unique_id &&
!kstrtouint(adev->pnp.unique_id, 0, &devid))
port_id = devid;
@@ -1489,7 +1497,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
#else /* !CONFIG_ACPI */
-static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+static int pxa2xx_spi_get_port_id(struct device *dev)
{
return -1;
}
@@ -1510,34 +1518,22 @@ static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_controller *pdata;
- struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
- const struct acpi_device_id *adev_id = NULL;
+ struct device *parent = pdev->dev.parent;
+ struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL;
const struct pci_device_id *pcidev_id = NULL;
- const struct of_device_id *of_id = NULL;
enum pxa_ssp_type type;
+ const void *match;
- adev = ACPI_COMPANION(&pdev->dev);
-
- if (pdev->dev.of_node)
- of_id = of_match_device(pdev->dev.driver->of_match_table,
- &pdev->dev);
- else if (dev_is_pci(pdev->dev.parent))
- pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
- to_pci_dev(pdev->dev.parent));
- else if (adev)
- adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
- &pdev->dev);
- else
- return NULL;
+ if (pcidev)
+ pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev);
- if (adev_id)
- type = (enum pxa_ssp_type)adev_id->driver_data;
+ match = device_get_match_data(&pdev->dev);
+ if (match)
+ type = (enum pxa_ssp_type)match;
else if (pcidev_id)
type = (enum pxa_ssp_type)pcidev_id->driver_data;
- else if (of_id)
- type = (enum pxa_ssp_type)of_id->data;
else
return NULL;
@@ -1545,32 +1541,36 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
if (!pdata)
return NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return NULL;
-
ssp = &pdata->ssp;
- ssp->phys_base = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ssp->mmio_base))
return NULL;
+ ssp->phys_base = res->start;
+
#ifdef CONFIG_PCI
if (pcidev_id) {
- pdata->tx_param = pdev->dev.parent;
- pdata->rx_param = pdev->dev.parent;
+ pdata->tx_param = parent;
+ pdata->rx_param = parent;
pdata->dma_filter = pxa2xx_spi_idma_filter;
}
#endif
ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return NULL;
+
ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return NULL;
+
ssp->type = type;
- ssp->pdev = pdev;
- ssp->port_id = pxa2xx_spi_get_port_id(adev);
+ ssp->dev = &pdev->dev;
+ ssp->port_id = pxa2xx_spi_get_port_id(&pdev->dev);
- pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
+ pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
pdata->dma_burst_size = 1;
@@ -1602,6 +1602,11 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
return cs;
}
+static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
+{
+ return MAX_DMA_LEN;
+}
+
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1707,6 +1712,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
} else {
controller->can_dma = pxa2xx_spi_can_dma;
controller->max_dma_len = MAX_DMA_LEN;
+ controller->max_transfer_size =
+ pxa2xx_spi_max_dma_transfer_size;
}
}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 2f559e531100..dd3434a407ea 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -932,11 +932,11 @@ static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
int ret;
/* allocate dma resources, if available */
- master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ master->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(master->dma_rx))
return PTR_ERR(master->dma_rx);
- master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
ret = PTR_ERR(master->dma_tx);
goto err_tx;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 15f5723d9f95..7222c7689c3c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1257,9 +1257,9 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
- ret = platform_get_irq_byname(pdev, "rx");
+ ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
- ret = platform_get_irq_byname(pdev, "mux");
+ ret = platform_get_irq_byname_optional(pdev, "mux");
if (ret < 0)
ret = platform_get_irq(pdev, 0);
if (ret >= 0)
@@ -1270,10 +1270,6 @@ static int rspi_probe(struct platform_device *pdev)
if (ret >= 0)
rspi->tx_irq = ret;
}
- if (ret < 0) {
- dev_err(&pdev->dev, "platform_get_irq error\n");
- goto error2;
- }
if (rspi->rx_irq == rspi->tx_irq) {
/* Single multiplexed interrupt */
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7b7151ec14c8..cf67ea60dc0e 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1154,15 +1154,13 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (!is_polling(sdd)) {
/* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
- "rx");
+ sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
if (IS_ERR(sdd->rx_dma.ch)) {
dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
ret = PTR_ERR(sdd->rx_dma.ch);
goto err_disable_io_clk;
}
- sdd->tx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
- "tx");
+ sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR(sdd->tx_dma.ch)) {
dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
ret = PTR_ERR(sdd->tx_dma.ch);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 11acddc83304..5497eeb3bf3e 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -211,8 +211,7 @@ static int sc18is602_transfer_one(struct spi_master *master,
}
status = 0;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
}
m->status = status;
spi_finalize_current_message(master);
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 7f73f91d412a..a62034e2a7cb 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -190,8 +190,7 @@ static int hspi_transfer_one_message(struct spi_controller *ctlr,
msg->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (cs_change) {
ndelay(nsecs);
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
index 35254bdc42c4..f7c1e20432e0 100644
--- a/drivers/spi/spi-sifive.c
+++ b/drivers/spi/spi-sifive.c
@@ -357,14 +357,14 @@ static int sifive_spi_probe(struct platform_device *pdev)
if (!cs_bits) {
dev_err(&pdev->dev, "Could not auto probe CS lines\n");
ret = -EINVAL;
- goto put_master;
+ goto disable_clk;
}
num_cs = ilog2(cs_bits) + 1;
if (num_cs > SIFIVE_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
ret = -EINVAL;
- goto put_master;
+ goto disable_clk;
}
/* Define our master */
@@ -393,7 +393,7 @@ static int sifive_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), spi);
if (ret) {
dev_err(&pdev->dev, "Unable to bind to interrupt\n");
- goto put_master;
+ goto disable_clk;
}
dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
@@ -402,11 +402,13 @@ static int sifive_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master failed\n");
- goto put_master;
+ goto disable_clk;
}
return 0;
+disable_clk:
+ clk_disable_unprepare(spi->clk);
put_master:
spi_master_put(master);
@@ -420,6 +422,7 @@ static int sifive_spi_remove(struct platform_device *pdev)
/* Disable all the interrupts just in case */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+ clk_disable_unprepare(spi->clk);
return 0;
}
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
index 61bc43b0fe57..44edaa360405 100644
--- a/drivers/spi/spi-slave-mt27xx.c
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -368,7 +368,6 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_spi_slave *mdata;
- struct resource *res;
int irq, ret;
ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
@@ -392,17 +391,8 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctlr);
init_completion(&mdata->xfer_done);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "failed to determine base address\n");
- goto err_put_ctlr;
- }
-
mdata->dev = &pdev->dev;
-
- mdata->base = devm_ioremap_resource(&pdev->dev, res);
+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdata->base)) {
ret = PTR_ERR(mdata->base);
goto err_put_ctlr;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 9a051286f120..87dadb6b8ebf 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -77,6 +77,7 @@
/* Bits definitions for register REG_WDG_CTRL */
#define BIT_WDG_RUN BIT(1)
+#define BIT_WDG_NEW BIT(2)
#define BIT_WDG_RST BIT(3)
/* Registers definitions for PMIC */
@@ -383,6 +384,10 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
/* Unlock the watchdog */
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
+ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+ val |= BIT_WDG_NEW;
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+
/* Load the watchdog timeout value, 50ms is always enough. */
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
WDG_LOAD_VAL & WDG_LOAD_MASK);
@@ -393,6 +398,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
val |= BIT_WDG_RUN | BIT_WDG_RST;
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+ /* Lock the watchdog */
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
+
mdelay(1000);
dev_emerg(sadi->dev, "Unable to restart system\n");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 8c9021b7f7a9..2ee1feb41681 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -669,11 +669,15 @@ static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
}
-static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
+static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
{
+ struct spi_delay *d = &t->word_delay;
u16 word_delay, interval;
u32 val;
+ if (d->unit != SPI_DELAY_UNIT_SCK)
+ return -EINVAL;
+
val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
/* Set default chip selection, clock phase and clock polarity */
@@ -686,7 +690,7 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
* formula as below per datasheet:
* interval time (source clock cycles) = interval * 4 + 10.
*/
- word_delay = clamp_t(u16, t->word_delay, SPRD_SPI_MIN_DELAY_CYCLE,
+ word_delay = clamp_t(u16, d->value, SPRD_SPI_MIN_DELAY_CYCLE,
SPRD_SPI_MAX_DELAY_CYCLE);
interval = DIV_ROUND_UP(word_delay - 10, 4);
ss->word_delay = interval * 4 + 10;
@@ -711,6 +715,8 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
val &= ~SPRD_SPI_DATA_LINE2_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
+
+ return 0;
}
static int sprd_spi_setup_transfer(struct spi_device *sdev,
@@ -719,13 +725,16 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev,
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u8 bits_per_word = t->bits_per_word;
u32 val, mode = 0;
+ int ret;
ss->len = t->len;
ss->tx_buf = t->tx_buf;
ss->rx_buf = t->rx_buf;
ss->hw_mode = sdev->mode;
- sprd_spi_init_hw(ss, t);
+ ret = sprd_spi_init_hw(ss, t);
+ if (ret)
+ return ret;
/* Set tansfer speed and valid bits */
sprd_spi_set_speed(ss, t->speed_hz);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 0c24c494f386..77d26d64541a 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -381,6 +381,7 @@ static int spi_st_probe(struct platform_device *pdev)
return 0;
clk_disable:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
@@ -392,6 +393,8 @@ static int spi_st_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
+ pm_runtime_disable(&pdev->dev);
+
clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 9ac6f9fe13cf..4e726929bb4f 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -528,7 +528,6 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
clk_disable_unprepare(qspi->clk);
- spi_master_put(qspi->ctrl);
}
static int stm32_qspi_probe(struct platform_device *pdev)
@@ -626,6 +625,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
err:
stm32_qspi_release(qspi);
+ spi_master_put(qspi->ctrl);
+
return ret;
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 39374c2edcf3..fc40ab146c86 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -666,8 +666,7 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
dma_addr_t dma_phys;
int ret;
- dma_chan = dma_request_slave_channel_reason(tspi->dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
if (ret != -EPROBE_DEFER)
@@ -723,15 +722,31 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
-static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
- u8 hold_dly, u8 inactive_dly)
+static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
+ struct spi_delay *setup,
+ struct spi_delay *hold,
+ struct spi_delay *inactive)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u8 setup_dly, hold_dly, inactive_dly;
u32 setup_hold;
u32 spi_cs_timing;
u32 inactive_cycles;
u8 cs_state;
+ if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
+ SPI_DELAY_UNIT_SCK);
+ return -EINVAL;
+ }
+
+ setup_dly = setup ? setup->value : 0;
+ hold_dly = hold ? hold->value : 0;
+ inactive_dly = inactive ? inactive->value : 0;
+
setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
if (setup_dly && hold_dly) {
@@ -758,6 +773,8 @@ static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
tspi->spi_cs_timing2 = spi_cs_timing;
tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
}
+
+ return 0;
}
static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
@@ -984,17 +1001,6 @@ static int tegra_spi_setup(struct spi_device *spi)
return 0;
}
-static void tegra_spi_transfer_delay(int delay)
-{
- if (!delay)
- return;
-
- if (delay >= 1000)
- mdelay(delay / 1000);
-
- udelay(delay % 1000);
-}
-
static void tegra_spi_transfer_end(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
@@ -1098,7 +1104,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
complete_xfer:
if (ret < 0 || skip) {
tegra_spi_transfer_end(spi);
- tegra_spi_transfer_delay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
goto exit;
} else if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
@@ -1106,11 +1112,11 @@ complete_xfer:
tspi->cs_control = spi;
else {
tegra_spi_transfer_end(spi);
- tegra_spi_transfer_delay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
}
} else if (xfer->cs_change) {
tegra_spi_transfer_end(spi);
- tegra_spi_transfer_delay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
}
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index a841a7250d14..514429379206 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -341,10 +341,11 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
goto exit;
}
msg->actual_length += xfer->len;
- if (xfer->cs_change && xfer->delay_usecs) {
+ if (xfer->cs_change &&
+ (xfer->delay_usecs || xfer->delay.value)) {
tegra_sflash_writel(tsd, tsd->def_command_reg,
SPI_COMMAND);
- udelay(xfer->delay_usecs);
+ spi_transfer_delay_exec(xfer);
}
}
ret = 0;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 111fffc91435..7f4d932dade7 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -599,8 +599,7 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
int ret;
struct dma_slave_config dma_sconfig;
- dma_chan = dma_request_slave_channel_reason(tspi->dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
if (ret != -EPROBE_DEFER)
@@ -1073,7 +1072,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
ret = clk_enable(tspi->clk);
if (ret < 0) {
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
- goto exit_free_master;
+ goto exit_clk_unprepare;
}
spi_irq = platform_get_irq(pdev, 0);
@@ -1146,6 +1145,8 @@ exit_free_irq:
free_irq(spi_irq, tspi);
exit_clk_disable:
clk_disable(tspi->clk);
+exit_clk_unprepare:
+ clk_unprepare(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1159,6 +1160,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
clk_disable(tspi->clk);
+ clk_unprepare(tspi->clk);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index f88cbb94ce12..223353fa2d8a 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1229,12 +1229,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
- /* check for delay */
- if (data->cur_trans->delay_usecs) {
- dev_dbg(&data->master->dev, "%s:delay in usec=%d\n",
- __func__, data->cur_trans->delay_usecs);
- udelay(data->cur_trans->delay_usecs);
- }
+ spi_transfer_delay_exec(data->cur_trans);
spin_lock(&data->lock);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 51759d3fd45f..3606232f190f 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -26,7 +26,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#define SPI_FIFO_SIZE 4
@@ -79,7 +80,7 @@ struct txx9spi {
void __iomem *membase;
int baseclk;
struct clk *clk;
- int last_chipselect;
+ struct gpio_desc *last_chipselect;
int last_chipselect_val;
};
@@ -95,20 +96,22 @@ static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
int on, unsigned int cs_delay)
{
- int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
-
+ /*
+ * The GPIO descriptor will track polarity inversion inside
+ * gpiolib.
+ */
if (on) {
/* deselect the chip with cs_change hint in last transfer */
- if (c->last_chipselect >= 0)
- gpio_set_value(c->last_chipselect,
+ if (c->last_chipselect)
+ gpiod_set_value(c->last_chipselect,
!c->last_chipselect_val);
- c->last_chipselect = spi->chip_select;
- c->last_chipselect_val = val;
+ c->last_chipselect = spi->cs_gpiod;
+ c->last_chipselect_val = on;
} else {
- c->last_chipselect = -1;
+ c->last_chipselect = NULL;
ndelay(cs_delay); /* CS Hold Time */
}
- gpio_set_value(spi->chip_select, val);
+ gpiod_set_value(spi->cs_gpiod, on);
ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
}
@@ -119,12 +122,6 @@ static int txx9spi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- if (gpio_direction_output(spi->chip_select,
- !(spi->mode & SPI_CS_HIGH))) {
- dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
- return -EINVAL;
- }
-
/* deselect chip */
spin_lock(&c->lock);
txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
@@ -248,8 +245,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
len -= count * wsize;
}
m->actual_length += t->len;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
if (!cs_change)
continue;
@@ -320,6 +316,47 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
+/*
+ * Chip select uses GPIO only, further the driver is using the chip select
+ * numer (from the device tree "reg" property, and this can only come from
+ * device tree since this i MIPS and there is no way to pass platform data) as
+ * the GPIO number. As the platform has only one GPIO controller (the txx9 GPIO
+ * chip) it is thus using the chip select number as an offset into that chip.
+ * This chip has a maximum of 16 GPIOs 0..15 and this is what all platforms
+ * register.
+ *
+ * We modernized this behaviour by explicitly converting that offset to an
+ * offset on the GPIO chip using a GPIO descriptor machine table of the same
+ * size as the txx9 GPIO chip with a 1-to-1 mapping of chip select to GPIO
+ * offset.
+ *
+ * This is admittedly a hack, but it is countering the hack of using "reg" to
+ * contain a GPIO offset when it should be using "cs-gpios" as the SPI bindings
+ * state.
+ */
+static struct gpiod_lookup_table txx9spi_cs_gpio_table = {
+ .dev_id = "spi0",
+ .table = {
+ GPIO_LOOKUP_IDX("TXx9", 0, "cs", 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 1, "cs", 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 2, "cs", 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 3, "cs", 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 4, "cs", 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 5, "cs", 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 6, "cs", 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 7, "cs", 7, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 8, "cs", 8, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 9, "cs", 9, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 10, "cs", 10, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 11, "cs", 11, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 12, "cs", 12, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 13, "cs", 13, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 14, "cs", 14, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("TXx9", 15, "cs", 15, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
static int txx9spi_probe(struct platform_device *dev)
{
struct spi_master *master;
@@ -373,12 +410,14 @@ static int txx9spi_probe(struct platform_device *dev)
if (ret)
goto exit;
- c->last_chipselect = -1;
+ c->last_chipselect = NULL;
dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
(unsigned long long)res->start, irq,
(c->baseclk + 500000) / 1000000);
+ gpiod_add_lookup_table(&txx9spi_cs_gpio_table);
+
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
@@ -387,6 +426,7 @@ static int txx9spi_probe(struct platform_device *dev)
master->transfer = txx9spi_transfer;
master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->use_gpio_descriptors = true;
ret = devm_spi_register_master(&dev->dev, master);
if (ret)
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index a3496c46cc1b..1d9b3f03d986 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -188,8 +188,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
}
status = 0;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ spi_transfer_delay_exec(t);
is_first = false;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index d5f9d5fbb3e8..8dd2bb99cb4d 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -391,7 +391,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
struct xilinx_spi *xspi;
struct xspi_platform_data *pdata;
struct resource *res;
- int ret, num_cs = 0, bits_per_word = 8;
+ int ret, num_cs = 0, bits_per_word;
struct spi_master *master;
u32 tmp;
u8 i;
@@ -403,6 +403,11 @@ static int xilinx_spi_probe(struct platform_device *pdev)
} else {
of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
&num_cs);
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,num-transfer-bits",
+ &bits_per_word);
+ if (ret)
+ bits_per_word = 8;
}
if (!num_cs) {
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 86516eb1e143..fc2b5eb7d614 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -80,7 +80,6 @@ static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
static int xtfpga_spi_probe(struct platform_device *pdev)
{
struct xtfpga_spi *xspi;
- struct resource *mem;
int ret;
struct spi_master *master;
@@ -97,14 +96,7 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
xspi->bitbang.master = master;
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No memory resource\n");
- ret = -ENODEV;
- goto err;
- }
- xspi->regs = devm_ioremap_resource(&pdev->dev, mem);
+ xspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->regs)) {
ret = PTR_ERR(xspi->regs);
goto err;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 5cf6993ddce5..17641157354d 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -51,7 +50,6 @@
#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
-#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK BIT(10) /* Slave Select Mask */
#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
@@ -61,9 +59,9 @@
* These are the values used in the calculation of baud rate divisor and
* setting the slave select.
*/
-#define ZYNQ_QSPI_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
-#define ZYNQ_QSPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
-#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
+#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
+#define ZYNQ_QSPI_CONFIG_PCS BIT(10) /* Peripheral Chip Select */
/*
* QSPI Interrupt Registers bit Masks
@@ -99,9 +97,9 @@
* It is named Linear Configuration but it controls other modes when not in
* linear mode also.
*/
-#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK BIT(30) /* LQSPI Two memories Mask */
-#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK BIT(29) /* LQSPI Separate bus Mask */
-#define ZYNQ_QSPI_LCFG_U_PAGE_MASK BIT(28) /* LQSPI Upper Page Mask */
+#define ZYNQ_QSPI_LCFG_TWO_MEM BIT(30) /* LQSPI Two memories */
+#define ZYNQ_QSPI_LCFG_SEP_BUS BIT(29) /* LQSPI Separate bus */
+#define ZYNQ_QSPI_LCFG_U_PAGE BIT(28) /* LQSPI Upper Page */
#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
@@ -116,8 +114,8 @@
*/
#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
-/* Default number of chip selects */
-#define ZYNQ_QSPI_DEFAULT_NUM_CS 1
+/* Maximum number of chip selects */
+#define ZYNQ_QSPI_MAX_NUM_CS 2
/**
* struct zynq_qspi - Defines qspi driver instance
@@ -161,6 +159,7 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
/**
* zynq_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynq_qspi structure
+ * @num_cs: Number of connected CS (to enable dual memories if needed)
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
@@ -178,7 +177,7 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
* - Set the little endian mode of TX FIFO and
* - Enable the QSPI controller
*/
-static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
+static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
{
u32 config_reg;
@@ -186,7 +185,12 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
/* Disable linear mode as the boot loader may have used it */
- zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, 0);
+ config_reg = 0;
+ /* At the same time, enable dual mode if more than 1 CS is available */
+ if (num_cs > 1)
+ config_reg |= ZYNQ_QSPI_LCFG_TWO_MEM;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
/* Clear the RX FIFO */
while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
@@ -284,21 +288,28 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
*/
static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
{
- struct spi_controller *ctrl = spi->master;
- struct zynq_qspi *xqspi = spi_controller_get_devdata(ctrl);
+ struct spi_controller *ctlr = spi->master;
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
u32 config_reg;
- config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
- if (assert) {
- /* Select the slave */
- config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
- config_reg |= (((~(BIT(spi->chip_select))) <<
- ZYNQ_QSPI_SS_SHIFT) &
- ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
- } else {
- config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ /* Select the lower (CS0) or upper (CS1) memory */
+ if (ctlr->num_chipselect > 1) {
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET);
+ if (!spi->chip_select)
+ config_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
+ else
+ config_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
}
+ /* Ground the line to assert the CS */
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ if (assert)
+ config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
+ else
+ config_reg |= ZYNQ_QSPI_CONFIG_PCS;
+
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
}
@@ -332,7 +343,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
* ----------------
* 111 - divide by 256
*/
- while ((baud_rate_val < ZYNQ_QSPI_BAUD_DIV_MAX) &&
+ while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
spi->max_speed_hz)
baud_rate_val++;
@@ -348,7 +359,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
- config_reg |= (baud_rate_val << ZYNQ_QSPI_BAUD_DIV_SHIFT);
+ config_reg |= (baud_rate_val << ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT);
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
return 0;
@@ -365,10 +376,10 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
- struct zynq_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct spi_controller *ctlr = spi->master;
+ struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
- if (ctrl->busy)
+ if (ctlr->busy)
return -EBUSY;
clk_enable(qspi->refclk);
@@ -663,9 +674,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
goto clk_dis_pclk;
}
- /* QSPI controller initializations */
- zynq_qspi_init_hw(xqspi);
-
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
@@ -681,10 +689,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "num-cs",
&num_cs);
- if (ret < 0)
- ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
- else
+ if (ret < 0) {
+ ctlr->num_chipselect = 1;
+ } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+ dev_err(&pdev->dev, "only 2 chip selects are available\n");
+ goto remove_master;
+ } else {
ctlr->num_chipselect = num_cs;
+ }
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
@@ -692,6 +704,10 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
+
+ /* QSPI controller initializations */
+ zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
+
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f9502dbbb5c1..5e4c4532f7f3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -92,7 +92,7 @@ static ssize_t driver_override_store(struct device *dev,
if (len) {
spi->driver_override = driver_override;
} else {
- /* Emptry string, disable driver override */
+ /* Empty string, disable driver override */
spi->driver_override = NULL;
kfree(driver_override);
}
@@ -469,7 +469,7 @@ static LIST_HEAD(board_list);
static LIST_HEAD(spi_controller_list);
/*
- * Used to protect add/del opertion for board_info list and
+ * Used to protect add/del operation for board_info list and
* spi_controller list, and their matching process
* also used to protect object of type struct idr
*/
@@ -775,6 +775,15 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
static void spi_set_cs(struct spi_device *spi, bool enable)
{
+ bool enable1 = enable;
+
+ if (!spi->controller->set_cs_timing) {
+ if (enable1)
+ spi_delay_exec(&spi->controller->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->controller->cs_hold, NULL);
+ }
+
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
@@ -800,6 +809,11 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (!spi->controller->set_cs_timing) {
+ if (!enable1)
+ spi_delay_exec(&spi->controller->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
@@ -1106,42 +1120,79 @@ static void _spi_transfer_delay_ns(u32 ns)
}
}
-static void _spi_transfer_cs_change_delay(struct spi_message *msg,
- struct spi_transfer *xfer)
+int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
{
- u32 delay = xfer->cs_change_delay;
- u32 unit = xfer->cs_change_delay_unit;
+ u32 delay = _delay->value;
+ u32 unit = _delay->unit;
u32 hz;
- /* return early on "fast" mode - for everything but USECS */
- if (!delay && unit != SPI_DELAY_UNIT_USECS)
- return;
+ if (!delay)
+ return 0;
switch (unit) {
case SPI_DELAY_UNIT_USECS:
- /* for compatibility use default of 10us */
- if (!delay)
- delay = 10000;
- else
- delay *= 1000;
+ delay *= 1000;
break;
case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
+ /* clock cycles need to be obtained from spi_transfer */
+ if (!xfer)
+ return -EINVAL;
/* if there is no effective speed know, then approximate
* by underestimating with half the requested hz
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ if (!hz)
+ return -EINVAL;
delay *= DIV_ROUND_UP(1000000000, hz);
break;
default:
+ return -EINVAL;
+ }
+
+ return delay;
+}
+EXPORT_SYMBOL_GPL(spi_delay_to_ns);
+
+int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ int delay;
+
+ if (!_delay)
+ return -EINVAL;
+
+ delay = spi_delay_to_ns(_delay, xfer);
+ if (delay < 0)
+ return delay;
+
+ _spi_transfer_delay_ns(delay);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_delay_exec);
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 delay = xfer->cs_change_delay.value;
+ u32 unit = xfer->cs_change_delay.unit;
+ int ret;
+
+ /* return early on "fast" mode - for everything but USECS */
+ if (!delay) {
+ if (unit == SPI_DELAY_UNIT_USECS)
+ _spi_transfer_delay_ns(10000);
+ return;
+ }
+
+ ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
+ if (ret) {
dev_err_once(&msg->spi->dev,
"Use of unsupported delay unit %i, using default of 10us\n",
- xfer->cs_change_delay_unit);
- delay = 10000;
+ unit);
+ _spi_transfer_delay_ns(10000);
}
- /* now sleep for the requested amount of time */
- _spi_transfer_delay_ns(delay);
}
/*
@@ -1171,6 +1222,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion);
@@ -1197,13 +1253,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
xfer->len);
}
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs)
- _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
@@ -1265,6 +1325,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
+ struct spi_transfer *xfer;
struct spi_message *msg;
bool was_busy = false;
unsigned long flags;
@@ -1391,6 +1452,13 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
goto out;
}
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
@@ -1419,6 +1487,99 @@ static void spi_pump_messages(struct kthread_work *work)
}
/**
+ * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. The frequency with which this function
+ * must be called (once per word, once for the whole
+ * transfer, once per batch of words etc) is arbitrary
+ * as long as the @tx buffer offset is greater than or
+ * equal to the requested byte at the time of the
+ * call. The timestamp is only taken once, at the
+ * first such call. It is assumed that the driver
+ * advances its @tx buffer pointer monotonically.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
+ * preparing to transmit right now.
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off)
+{
+ u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped_pre)
+ return;
+
+ if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+
+ xfer->timestamped_pre = true;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper for drivers to collect the end of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds
+ * or is equal to the requested word will be
+ * timestamped.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
+ * just transmitted.
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off)
+{
+ u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped_post)
+ return;
+
+ if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+
+ xfer->timestamped_post = true;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
* spi_set_thread_rt - set the controller to pump at realtime priority
* @ctlr: controller to boost priority of
*
@@ -1503,6 +1664,7 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
+ struct spi_transfer *xfer;
struct spi_message *mesg;
unsigned long flags;
int ret;
@@ -1511,6 +1673,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
mesg = ctlr->cur_msg;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -1711,15 +1880,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
-
- /*
- * For descriptors associated with the device, polarity inversion is
- * handled in the gpiolib, so all chip selects are "active high" in
- * the logical sense, the gpiolib will invert the line if need be.
- */
- if (ctlr->use_gpio_descriptors)
- spi->mode |= SPI_CS_HIGH;
- else if (of_property_read_bool(nc, "spi-cs-high"))
+ if (of_property_read_bool(nc, "spi-cs-high"))
spi->mode |= SPI_CS_HIGH;
/* Device DUAL/QUAD mode */
@@ -1783,6 +1944,15 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
spi->chip_select = value;
+ /*
+ * For descriptors associated with the device, polarity inversion is
+ * handled in the gpiolib, so all gpio chip selects are "active high"
+ * in the logical sense, the gpiolib will invert the line if need be.
+ */
+ if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ spi->mode |= SPI_CS_HIGH;
+
/* Device speed */
rc = of_property_read_u32(nc, "spi-max-frequency", &value);
if (rc) {
@@ -2871,10 +3041,11 @@ struct spi_replaced_transfers *spi_replace_transfers(
/* add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
- /* clear cs_change and delay_usecs for all but the last */
+ /* clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
xfer->delay_usecs = 0;
+ xfer->delay.value = 0;
}
}
@@ -3091,7 +3262,29 @@ int spi_setup(struct spi_device *spi)
if (spi->controller->setup)
status = spi->controller->setup(spi);
- spi_set_cs(spi, false);
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ status = pm_runtime_get_sync(spi->controller->dev.parent);
+ if (status < 0) {
+ pm_runtime_put_noidle(spi->controller->dev.parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ /*
+ * We do not want to return positive value from pm_runtime_get,
+ * there are many instances of devices calling spi_setup() and
+ * checking for a non-zero return value instead of a negative
+ * return value.
+ */
+ status = 0;
+
+ spi_set_cs(spi, false);
+ pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ } else {
+ spi_set_cs(spi, false);
+ }
if (spi->rt && !spi->controller->rt) {
spi->controller->rt = true;
@@ -3114,18 +3307,71 @@ EXPORT_SYMBOL_GPL(spi_setup);
/**
* spi_set_cs_timing - configure CS setup, hold, and inactive delays
* @spi: the device that requires specific CS timing configuration
- * @setup: CS setup time in terms of clock count
- * @hold: CS hold time in terms of clock count
- * @inactive_dly: CS inactive delay between transfers in terms of clock count
+ * @setup: CS setup time specified via @spi_delay
+ * @hold: CS hold time specified via @spi_delay
+ * @inactive: CS inactive delay between transfers specified via @spi_delay
+ *
+ * Return: zero on success, else a negative error code.
*/
-void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
- u8 inactive_dly)
+int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive)
{
+ size_t len;
+
if (spi->controller->set_cs_timing)
- spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
+ return spi->controller->set_cs_timing(spi, setup, hold,
+ inactive);
+
+ if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Clock-cycle delays for CS not supported in SW mode\n");
+ return -ENOTSUPP;
+ }
+
+ len = sizeof(struct spi_delay);
+
+ /* copy delays to controller */
+ if (setup)
+ memcpy(&spi->controller->cs_setup, setup, len);
+ else
+ memset(&spi->controller->cs_setup, 0, len);
+
+ if (hold)
+ memcpy(&spi->controller->cs_hold, hold, len);
+ else
+ memset(&spi->controller->cs_hold, 0, len);
+
+ if (inactive)
+ memcpy(&spi->controller->cs_inactive, inactive, len);
+ else
+ memset(&spi->controller->cs_inactive, 0, len);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(spi_set_cs_timing);
+static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
+ struct spi_device *spi)
+{
+ int delay1, delay2;
+
+ delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (delay1 < 0)
+ return delay1;
+
+ delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (delay2 < 0)
+ return delay2;
+
+ if (delay1 < delay2)
+ memcpy(&xfer->word_delay, &spi->word_delay,
+ sizeof(xfer->word_delay));
+
+ return 0;
+}
+
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -3261,8 +3507,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
}
- if (xfer->word_delay_usecs < spi->word_delay_usecs)
- xfer->word_delay_usecs = spi->word_delay_usecs;
+ if (_spi_xfer_word_delay_update(xfer, spi))
+ return -EINVAL;
}
message->status = -EINPROGRESS;
@@ -3273,6 +3519,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
@@ -3288,6 +3535,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
trace_spi_message_submit(message);
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
return ctlr->transfer(spi, message);
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 255786f2e844..1e217e3e9486 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -265,9 +265,11 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->tx_nbits = u_tmp->tx_nbits;
k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
- k_tmp->delay_usecs = u_tmp->delay_usecs;
+ k_tmp->delay.value = u_tmp->delay_usecs;
+ k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
k_tmp->speed_hz = u_tmp->speed_hz;
- k_tmp->word_delay_usecs = u_tmp->word_delay_usecs;
+ k_tmp->word_delay.value = u_tmp->word_delay_usecs;
+ k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
if (!k_tmp->speed_hz)
k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
@@ -627,6 +629,9 @@ static int spidev_release(struct inode *inode, struct file *filp)
if (dofree)
kfree(spidev);
}
+#ifdef CONFIG_SPI_SLAVE
+ spi_slave_abort(spidev->spi);
+#endif
mutex_unlock(&device_list_lock);
return 0;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 0f19b086865a..eaf753b70ec5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,7 +125,7 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
-source "drivers/staging/vboxsf/Kconfig"
+source "drivers/staging/hp/Kconfig"
source "drivers/staging/wfx/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b7c31b9fe044..0a4396c9067b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -53,5 +53,5 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
-obj-$(CONFIG_VBOXSF_FS) += vboxsf/
+obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/staging/hp/Kconfig
index fb395cfe6b92..fb395cfe6b92 100644
--- a/drivers/net/ethernet/hp/Kconfig
+++ b/drivers/staging/hp/Kconfig
diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/staging/hp/Makefile
index 5ed723bb11e2..5ed723bb11e2 100644
--- a/drivers/net/ethernet/hp/Makefile
+++ b/drivers/staging/hp/Makefile
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/staging/hp/hp100.c
index 6ec78f5c602f..6ec78f5c602f 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/staging/hp/hp100.c
diff --git a/drivers/net/ethernet/hp/hp100.h b/drivers/staging/hp/hp100.h
index 7239b94c9de5..7239b94c9de5 100644
--- a/drivers/net/ethernet/hp/hp100.h
+++ b/drivers/staging/hp/hp100.h
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index f670bbde4159..deb90ae37859 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -26,21 +26,9 @@
#include "hantro_hw.h"
-#define VP8_MB_DIM 16
-#define VP8_MB_WIDTH(w) DIV_ROUND_UP(w, VP8_MB_DIM)
-#define VP8_MB_HEIGHT(h) DIV_ROUND_UP(h, VP8_MB_DIM)
-
-#define H264_MB_DIM 16
-#define H264_MB_WIDTH(w) DIV_ROUND_UP(w, H264_MB_DIM)
-#define H264_MB_HEIGHT(h) DIV_ROUND_UP(h, H264_MB_DIM)
-
-#define MPEG2_MB_DIM 16
-#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
-#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
-
-#define JPEG_MB_DIM 16
-#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
-#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+#define MB_DIM 16
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
struct hantro_ctx;
struct hantro_codec_ops;
@@ -379,7 +367,7 @@ static inline void hantro_reg_write(struct hantro_dev *vpu,
bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts);
static inline struct vb2_v4l2_buffer *
hantro_get_src_buf(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 6d9d41170832..26108c96b674 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -43,8 +43,9 @@ void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
return ctrl ? ctrl->p_cur.p : NULL;
}
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts)
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
{
+ struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
struct vb2_buffer *buf;
int index;
@@ -413,20 +414,18 @@ static int hantro_open(struct file *filp)
if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
ctx->buf_finish = hantro_enc_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
ctx->buf_finish = hantro_dec_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else {
- ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_ctx_free;
}
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
- kfree(ctx);
- return ret;
+ goto err_ctx_free;
}
v4l2_fh_init(&ctx->fh, vdev);
@@ -447,6 +446,7 @@ static int hantro_open(struct file *filp)
err_fh_free:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+err_ctx_free:
kfree(ctx);
return ret;
}
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 7ab534936843..3cd40a8f0daa 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -34,9 +34,11 @@ static void set_params(struct hantro_ctx *ctx)
reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
- reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
- if (dec_param->nal_ref_idc)
- reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ if (sps->profile_idc > 66) {
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ }
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
@@ -49,8 +51,8 @@ static void set_params(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Decoder control register 1. */
- reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(sps->pic_width_in_mbs_minus1 + 1) |
- G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(sps->pic_height_in_map_units_minus1 + 1) |
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
@@ -61,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx)
/* always use the matrix sent from userspace */
reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
- if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
@@ -79,7 +81,7 @@ static void set_params(struct hantro_ctx *ctx)
reg |= G1_REG_DEC_CTRL4_CABAC_E;
if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
- if (sps->chroma_format_idc == 0)
+ if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
@@ -220,10 +222,9 @@ static void set_ref(struct hantro_ctx *ctx)
/* Set up addresses of DPB buffers. */
for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
- struct vb2_buffer *buf = hantro_h264_get_ref_buf(ctx, i);
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
- vdpu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(buf, 0),
- G1_REG_ADDR_REF(i));
+ vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
}
}
@@ -233,6 +234,7 @@ static void set_buffers(struct hantro_ctx *ctx)
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
@@ -243,18 +245,30 @@ static void set_buffers(struct hantro_ctx *ctx)
/* Destination (decoded frame) buffer. */
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
/* Higher profiles require DMV buffer appended to reference frames. */
- if (ctrls->sps->profile_idc > 66) {
- size_t pic_size = ctx->h264_dec.pic_size;
- size_t mv_offset = round_up(pic_size, 8);
-
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
- mv_offset += 32 * H264_MB_WIDTH(ctx->dst_fmt.width);
-
- vdpu_write_relaxed(vpu, dst_dma + mv_offset,
- G1_REG_ADDR_DIR_MV);
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
}
/* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index 80f0e94f8afa..f3bf67d8a289 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -105,17 +105,14 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -207,8 +204,8 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
G1_REG_DEC_AXI_WR_ID(0);
vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
- reg = G1_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- G1_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
G1_REG_ALT_SCAN_E(picture->alternate_scan) |
G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 6d99c2be01cf..cad18094fee0 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -370,19 +370,18 @@ static void cfg_tap(struct hantro_ctx *ctx,
static void cfg_ref(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame_header *hdr)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
dma_addr_t ref;
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -390,7 +389,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= G1_REG_ADDR_REF_TOPC_E;
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -470,8 +469,8 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index ecd34a7db190..938b48d4d3d9 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -116,8 +116,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
/* Make sure that all registers are written at this point. */
vepu_write(vpu, reg, H1_REG_AXI_CTRL);
- reg = H1_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | H1_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| H1_REG_ENC_CTRL_ENC_MODE_JPEG
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 0d758e0c0f99..568640eab3a6 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -20,9 +20,9 @@
/* Size with u32 units. */
#define CABAC_INIT_BUFFER_SIZE (460 * 2)
#define POC_BUFFER_SIZE 34
-#define SCALING_LIST_SIZE (6 * 16 + 6 * 64)
+#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
-#define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1)
+#define HANTRO_CMP(a, b) ((a) < (b) ? -1 : 1)
/* Data structure describing auxiliary buffer format. */
struct hantro_h264_dec_priv_tbl {
@@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = {
0x1f0c2517, 0x1f261440
};
-/*
- * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process
- * to get the values in matrix order. In addition, the hardware requires bytes
- * swapped within each subsequent 4 bytes. Both arrays below include both
- * transformations.
- */
-static const u32 zig_zag_4x4[] = {
- 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12
-};
-
-static const u32 zig_zag_8x8[] = {
- 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6,
- 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31,
- 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48,
- 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60
-};
-
static void
reorder_scaling_list(struct hantro_ctx *ctx)
{
@@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx)
const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
- const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
- u8 *dst = tbl->scaling_list;
- const u8 *src;
+ u32 *dst = (u32 *)tbl->scaling_list;
+ const u32 *src;
int i, j;
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
- BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) !=
- num_list_4x4 * list_len_4x4 +
- num_list_8x8 * list_len_8x8);
-
- src = &scaling->scaling_list_4x4[0][0];
- for (i = 0; i < num_list_4x4; ++i) {
- for (j = 0; j < list_len_4x4; ++j)
- dst[zig_zag_4x4[j]] = src[j];
- src += list_len_4x4;
- dst += list_len_4x4;
+ for (i = 0; i < num_list_4x4; i++) {
+ src = (u32 *)&scaling->scaling_list_4x4[i];
+ for (j = 0; j < list_len_4x4 / 4; j++)
+ *dst++ = swab32(src[j]);
}
- src = &scaling->scaling_list_8x8[0][0];
- for (i = 0; i < num_list_8x8; ++i) {
- for (j = 0; j < list_len_8x8; ++j)
- dst[zig_zag_8x8[j]] = src[j];
- src += list_len_8x8;
- dst += list_len_8x8;
+ /* Only Intra/Inter Y lists */
+ for (i = 0; i < 2; i++) {
+ src = (u32 *)&scaling->scaling_list_8x8[i];
+ for (j = 0; j < list_len_8x8 / 4; j++)
+ *dst++ = swab32(src[j]);
}
}
@@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder {
const struct v4l2_h264_dpb_entry *dpb;
s32 pocs[HANTRO_H264_DPB_SIZE];
u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
+ int frame_nums[HANTRO_H264_DPB_SIZE];
s32 curpoc;
u8 num_valid;
};
@@ -294,13 +268,20 @@ static void
init_reflist_builder(struct hantro_ctx *ctx,
struct hantro_h264_reflist_builder *b)
{
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_param;
+ const struct v4l2_ctrl_h264_sps *sps;
struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ int cur_frame_num, max_frame_num;
unsigned int i;
dec_param = ctx->h264_dec.ctrls.decode;
+ slice_params = &ctx->h264_dec.ctrls.slices[0];
+ sps = ctx->h264_dec.ctrls.sps;
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = slice_params->frame_num;
memset(b, 0, sizeof(*b));
b->dpb = dpb;
@@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx,
continue;
buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * TODO: This logic will have to be adjusted when we start
+ * supporting interlaced content.
+ */
+ if (dpb[i].frame_num > cur_frame_num)
+ b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num;
+ else
+ b->frame_nums[i] = dpb[i].frame_num;
+
b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
dpb[i].bottom_field_order_cnt);
b->unordered_reflist[b->num_valid] = i;
@@ -353,9 +346,10 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* ascending order.
*/
if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return b->frame_num - a->frame_num;
+ return HANTRO_CMP(builder->frame_nums[idxb],
+ builder->frame_nums[idxa]);
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
}
static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -381,7 +375,7 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
@@ -392,11 +386,11 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -422,22 +416,22 @@ static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
/*
* Short term pics with POC > cur POC first in POC ascending order
- * followed by short term pics with POC > cur POC in POC descending
+ * followed by short term pics with POC < cur POC in POC descending
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static void
@@ -537,22 +531,18 @@ static void update_dpb(struct hantro_ctx *ctx)
}
}
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx)
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
- struct vb2_buffer *buf;
- int buf_idx = -1;
+ dma_addr_t dma_addr = 0;
if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
- if (buf_idx >= 0) {
- buf = vb2_get_buffer(cap_q, buf_idx);
- } else {
+ if (!dma_addr) {
struct vb2_v4l2_buffer *dst_buf;
+ struct vb2_buffer *buf;
/*
* If a DPB entry is unused or invalid, address of current
@@ -560,9 +550,10 @@ struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
*/
dst_buf = hantro_get_dst_buf(ctx);
buf = &dst_buf->vb2_buf;
+ dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
}
- return buf;
+ return dma_addr;
}
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
@@ -627,7 +618,6 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
struct hantro_aux_buf *priv = &h264_dec->priv;
struct hantro_h264_dec_priv_tbl *tbl;
- struct v4l2_pix_format_mplane pix_mp;
priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
GFP_KERNEL);
@@ -638,9 +628,5 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
tbl = priv->cpu;
memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
- v4l2_fill_pixfmt_mp(&pix_mp, ctx->dst_fmt.pixelformat,
- ctx->dst_fmt.width, ctx->dst_fmt.height);
- h264_dec->pic_size = pix_mp.plane_fmt[0].sizeimage;
-
return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 2fab655bf098..fa91dd1848b7 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -80,15 +80,12 @@ struct hantro_h264_dec_reflists {
* @dpb: DPB
* @reflists: P/B0/B1 reflists
* @ctrls: V4L2 controls attached to a run
- * @pic_size: Size in bytes of decoded picture, this is needed
- * to pass the location of motion vectors.
*/
struct hantro_h264_dec_hw_ctx {
struct hantro_aux_buf priv;
struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
struct hantro_h264_dec_reflists reflists;
struct hantro_h264_dec_ctrls ctrls;
- size_t pic_size;
};
/**
@@ -158,8 +155,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx);
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
int hantro_h264_dec_init(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 3dae52abb96c..1dae76f20034 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -240,14 +240,30 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
/*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
* The H264 decoder needs extra space on the output buffers
* to store motion vectors. This is needed for reference
* frames.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
*/
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
pix_mp->plane_fmt[0].sizeimage +=
- 128 * DIV_ROUND_UP(pix_mp->width, 16) *
- DIV_ROUND_UP(pix_mp->height, 16);
+ 64 * MB_WIDTH(pix_mp->width) *
+ MB_WIDTH(pix_mp->height) + 32;
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -367,20 +383,27 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
const struct hantro_fmt *formats;
unsigned int num_fmts;
- struct vb2_queue *vq;
int ret;
- /* Change not allowed if queue is busy. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_busy(vq))
- return -EBUSY;
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
if (!hantro_is_encoder_ctx(ctx)) {
struct vb2_queue *peer_vq;
/*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ pix_mp->pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
* Since format change on the OUTPUT queue will reset
* the CAPTURE queue, we can't allow doing so
* when the CAPTURE queue has buffers allocated.
@@ -389,12 +412,15 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
}
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
- if (ret)
- return ret;
-
formats = hantro_get_formats(ctx, &num_fmts);
ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
pix_mp->pixelformat);
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index 6bfcc47d1e58..f8db6fcaad73 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -48,10 +48,10 @@ static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,11 +67,11 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.max_depth = 2,
.frmsize = {
.min_width = 48,
- .max_width = 3840,
- .step_width = H264_MB_DIM,
+ .max_width = 4096,
+ .step_width = MB_DIM,
.min_height = 48,
- .max_height = 2160,
- .step_height = H264_MB_DIM,
+ .max_height = 2304,
+ .step_height = MB_DIM,
},
},
{
@@ -81,10 +81,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -94,10 +94,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
index 14d14bc6b12b..9ac1f2cb6a16 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -47,10 +47,10 @@ static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,10 +67,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -80,10 +80,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 06162f569b5e..067892345b5d 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -149,8 +149,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
- reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| VEPU_REG_FRAME_TYPE_INTRA
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index e7ba5c0441cc..b40d2cdf832f 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -107,17 +107,14 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -223,8 +220,8 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
VDPU_REG_DEC_CLK_GATE_E(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
- reg = VDPU_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- VDPU_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
index f17e32620b08..76d7ed3fd69a 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -449,18 +449,16 @@ static void cfg_ref(struct hantro_ctx *ctx,
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
- struct vb2_queue *cap_q;
dma_addr_t ref;
- cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -468,7 +466,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -563,8 +561,8 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 35e60a120dc1..2a4f77e83ed3 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -428,32 +428,19 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
- int i, ret;
u32 code;
- for (i = 0; i < PRP_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRP_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
-
/* init default frame interval */
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
/* set a default mbus format */
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
- ret = imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
- V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, PRP_NUM_PADS, priv->pad);
+ return imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
+ V4L2_FIELD_NONE, NULL);
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
@@ -487,6 +474,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -496,7 +484,12 @@ static int prp_init(struct imx_ic_priv *ic_priv)
ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
- return 0;
+ for (i = 0; i < PRP_NUM_PADS; i++)
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
+ priv->pad);
}
static void prp_remove(struct imx_ic_priv *ic_priv)
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 67ffa46a8e96..09c4e3f33807 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1240,21 +1240,16 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
int i, ret;
u32 code;
+ /* set a default mbus format */
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
- /* set a default mbus format */
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
640, 480, code, V4L2_FIELD_NONE,
&priv->cc[i]);
@@ -1266,22 +1261,26 @@ static int prp_registered(struct v4l2_subdev *sd)
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
- ret = media_entity_pads_init(&sd->entity, PRPENCVF_NUM_PADS,
- priv->pad);
- if (ret)
- return ret;
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
+ PRPENCVF_SRC_PAD);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- return ret;
+ goto remove_vdev;
ret = prp_init_controls(priv);
if (ret)
- goto unreg;
+ goto unreg_vdev;
return 0;
-unreg:
+
+unreg_vdev:
imx_media_capture_device_unregister(priv->vdev);
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -1290,6 +1289,8 @@ static void prp_unregistered(struct v4l2_subdev *sd)
struct prp_priv *priv = sd_to_priv(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
@@ -1325,6 +1326,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i, ret;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1336,15 +1338,19 @@ static int prp_init(struct imx_ic_priv *ic_priv)
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
- priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
- &ic_priv->sd,
- PRPENCVF_SRC_PAD);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
-
mutex_init(&priv->lock);
- return 0;
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ mutex_destroy(&priv->lock);
+
+ return ret;
}
static void prp_remove(struct imx_ic_priv *ic_priv)
@@ -1352,7 +1358,6 @@ static void prp_remove(struct imx_ic_priv *ic_priv)
struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
}
struct imx_ic_ops imx_ic_prpencvf_ops = {
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b33a07bc9105..7712e7be8625 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -26,6 +26,8 @@
#include <media/imx.h>
#include "imx-media.h"
+#define IMX_CAPTURE_NAME "imx-capture"
+
struct capture_priv {
struct imx_media_video_dev vdev;
@@ -69,8 +71,8 @@ static int vidioc_querycap(struct file *file, void *fh,
{
struct capture_priv *priv = video_drvdata(file);
- strscpy(cap->driver, "imx-media-capture", sizeof(cap->driver));
- strscpy(cap->card, "imx-media-capture", sizeof(cap->card));
+ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", priv->src_sd->name);
@@ -765,13 +767,6 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
INIT_LIST_HEAD(&priv->ready_q);
- priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
- if (ret) {
- v4l2_err(sd, "failed to init dev pad\n");
- goto unreg;
- }
-
/* create the link from the src_sd devnode pad to device node */
ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
&vfd->entity, 0, 0);
@@ -834,6 +829,7 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
{
struct capture_priv *priv;
struct video_device *vfd;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -858,6 +854,13 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
vfd->queue = &priv->q;
priv->vdev.vfd = vfd;
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
INIT_LIST_HEAD(&priv->vdev.list);
video_set_drvdata(vfd, priv);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 367e39f5b382..b60ed4f22f6d 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -627,8 +627,8 @@ static int csi_idmac_start(struct csi_priv *priv)
}
priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
- priv->idmac_ch,
- IPU_IRQ_NFB4EOF);
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
csi_idmac_nfb4eof_interrupt, 0,
"imx-smfc-nfb4eof", priv);
@@ -1472,7 +1472,7 @@ static void csi_try_fmt(struct csi_priv *priv,
imx_media_enum_mbus_format(&code, 0,
CS_SEL_ANY, false);
*cc = imx_media_find_mbus_format(code,
- CS_SEL_ANY, false);
+ CS_SEL_ANY, false);
sdformat->format.code = (*cc)->codes[0];
}
@@ -1740,9 +1740,6 @@ static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_unsubscribe(fh, sub);
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi_registered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1759,9 +1756,6 @@ static int csi_registered(struct v4l2_subdev *sd)
priv->csi = csi;
for (i = 0; i < CSI_NUM_PADS; i++) {
- priv->pad[i].flags = (i == CSI_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
code = 0;
if (i != CSI_SINK_PAD)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -1793,16 +1787,22 @@ static int csi_registered(struct v4l2_subdev *sd)
goto put_csi;
}
- ret = media_entity_pads_init(&sd->entity, CSI_NUM_PADS, priv->pad);
- if (ret)
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev,
+ &priv->sd,
+ CSI_SRC_PAD_IDMAC);
+ if (IS_ERR(priv->vdev)) {
+ ret = PTR_ERR(priv->vdev);
goto free_fim;
+ }
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- goto free_fim;
+ goto remove_vdev;
return 0;
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1816,6 +1816,7 @@ static void csi_unregistered(struct v4l2_subdev *sd)
struct csi_priv *priv = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1923,7 +1924,7 @@ static int imx_csi_probe(struct platform_device *pdev)
struct ipu_client_platformdata *pdata;
struct pinctrl *pinctrl;
struct csi_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1963,10 +1964,14 @@ static int imx_csi_probe(struct platform_device *pdev)
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
- priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
- CSI_SRC_PAD_IDMAC);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
+ for (i = 0; i < CSI_NUM_PADS; i++)
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
mutex_init(&priv->lock);
@@ -1997,7 +2002,6 @@ static int imx_csi_probe(struct platform_device *pdev)
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -2008,7 +2012,6 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 4cc6a7462ae2..0788a1874557 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -184,7 +184,15 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
- .fourcc = V4L2_PIX_FMT_BGR32,
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
},
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index cfad65a16917..0d83c2c41606 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -841,9 +841,6 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int vdic_registered(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -851,9 +848,6 @@ static int vdic_registered(struct v4l2_subdev *sd)
u32 code;
for (i = 0; i < VDIC_NUM_PADS; i++) {
- priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
-
code = 0;
if (i != VDIC_SINK_PAD_IDMAC)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -874,15 +868,7 @@ static int vdic_registered(struct v4l2_subdev *sd)
priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
- ret = vdic_init_controls(priv);
- if (ret)
- return ret;
-
- ret = media_entity_pads_init(&sd->entity, VDIC_NUM_PADS, priv->pad);
- if (ret)
- v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
-
- return ret;
+ return vdic_init_controls(priv);
}
static void vdic_unregistered(struct v4l2_subdev *sd)
@@ -927,7 +913,7 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
u32 grp_id)
{
struct vdic_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -949,6 +935,15 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
mutex_init(&priv->lock);
+ for (i = 0; i < VDIC_NUM_PADS; i++)
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
+ priv->pad);
+ if (ret)
+ goto free;
+
ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index bfa4b254c4e4..cd3dd6e33ef0 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -497,26 +497,13 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi2_registered(struct v4l2_subdev *sd)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
- int i, ret;
-
- for (i = 0; i < CSI2_NUM_PADS; i++) {
- csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* set a default mbus format */
- ret = imx_media_init_mbus_fmt(&csi2->format_mbus,
+ return imx_media_init_mbus_fmt(&csi2->format_mbus,
640, 480, 0, V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, CSI2_NUM_PADS, csi2->pad);
}
static const struct media_entity_operations csi2_entity_ops = {
@@ -573,7 +560,7 @@ static int csi2_probe(struct platform_device *pdev)
unsigned int sink_port = 0;
struct csi2_dev *csi2;
struct resource *res;
- int ret;
+ int i, ret;
csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
@@ -592,6 +579,16 @@ static int csi2_probe(struct platform_device *pdev)
csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
+ csi2->pad);
+ if (ret)
+ return ret;
+
csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(csi2->pllref_clk)) {
v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index bfd6b5fbf484..db30e2c70f2f 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1100,9 +1100,6 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
int i;
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&csi->format_mbus[i],
800, 600, 0, V4L2_FIELD_NONE,
@@ -1115,11 +1112,16 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
csi->frame_interval[i].denominator = 30;
}
- ret = media_entity_pads_init(&sd->entity, IMX7_CSI_PADS_NUM, csi->pad);
- if (ret < 0)
- return ret;
+ csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
+ IMX7_CSI_PAD_SRC);
+ if (IS_ERR(csi->vdev))
+ return PTR_ERR(csi->vdev);
+
+ ret = imx_media_capture_device_register(csi->vdev);
+ if (ret)
+ imx_media_capture_device_remove(csi->vdev);
- return imx_media_capture_device_register(csi->vdev);
+ return ret;
}
static void imx7_csi_unregistered(struct v4l2_subdev *sd)
@@ -1127,6 +1129,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd)
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(csi->vdev);
+ imx_media_capture_device_remove(csi->vdev);
}
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
@@ -1189,7 +1192,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- int ret;
+ int i, ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
@@ -1251,14 +1254,18 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
- csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
- IMX7_CSI_PAD_SRC);
- if (IS_ERR(csi->vdev))
- return PTR_ERR(csi->vdev);
-
v4l2_ctrl_handler_init(&csi->ctrl_hdlr, 0);
csi->sd.ctrl_handler = &csi->ctrl_hdlr;
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
+ csi->pad);
+ if (ret < 0)
+ goto free;
+
ret = v4l2_async_register_fwnode_subdev(&csi->sd,
sizeof(struct v4l2_async_subdev),
NULL, 0,
@@ -1269,8 +1276,6 @@ static int imx7_csi_probe(struct platform_device *pdev)
return 0;
free:
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
cleanup:
@@ -1298,9 +1303,6 @@ static int imx7_csi_remove(struct platform_device *pdev)
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
-
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 73d8354e618c..99166afca071 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -293,7 +293,7 @@ static int mipi_csis_dump_regs(struct csi_state *state)
struct device *dev = &state->pdev->dev;
unsigned int i;
u32 cfg;
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
@@ -350,6 +350,8 @@ static void mipi_csis_sw_reset(struct csi_state *state)
static int mipi_csis_phy_init(struct csi_state *state)
{
state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
+ if (IS_ERR(state->mipi_phy_regulator))
+ return PTR_ERR(state->mipi_phy_regulator);
return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
1000000);
@@ -780,17 +782,6 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mipi_csis_registered(struct v4l2_subdev *mipi_sd)
-{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
-
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- return media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
- state->pads);
-}
-
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
@@ -816,10 +807,6 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
-static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
- .registered = mipi_csis_registered,
-};
-
static int mipi_csis_parse_dt(struct platform_device *pdev,
struct csi_state *state)
{
@@ -880,7 +867,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
mipi_sd->entity.ops = &mipi_csis_entity_ops;
- mipi_sd->internal_ops = &mipi_csis_internal_ops;
mipi_sd->dev = &pdev->dev;
@@ -892,6 +878,13 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
v4l2_set_subdevdata(mipi_sd, &pdev->dev);
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
+ state->pads);
+ if (ret)
+ return ret;
+
ret = v4l2_async_register_fwnode_subdev(mipi_sd,
sizeof(struct v4l2_async_subdev),
&sink_port, 1,
@@ -947,7 +940,6 @@ static void mipi_csis_debugfs_exit(struct csi_state *state)
static int mipi_csis_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csi_state *state;
int ret;
@@ -966,11 +958,13 @@ static int mipi_csis_probe(struct platform_device *pdev)
return ret;
}
- mipi_csis_phy_init(state);
+ ret = mipi_csis_phy_init(state);
+ if (ret < 0)
+ return ret;
+
mipi_csis_phy_reset(state);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
index cc288ae6d5f2..9def80ef28f3 100644
--- a/drivers/staging/media/ipu3/Makefile
+++ b/drivers/staging/media/ipu3/Makefile
@@ -10,9 +10,3 @@ ipu3-imgu-objs += \
ipu3-css.o ipu3-v4l2.o ipu3.o
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, packed-not-aligned)
-ccflags-y += $(call cc-disable-warning, type-limits)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index 5e55baeaea1a..b44bb4a72ca7 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -9,12 +9,9 @@ staging directory.
relevant. (Sakari)
- IPU3 driver documentation (Laurent)
- Add diagram in driver rst to describe output capability.
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
- uAPI documentation:
- Further clarification on some ambiguities such as data type conversion of
- IEFD CU inputs. (Sakari)
Move acronyms to doc-rst file. (Mauro)
- Switch to yavta from v4l2n in driver docs.
@@ -27,5 +24,3 @@ staging directory.
- Document different operation modes, and which buffer queues are relevant
in each mode. To process an image, which queues require a buffer an in
which ones is it optional?
-
-- Make sure it builds fine with no warnings with W=1
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index c7cd27efac8a..08eaa0bad0de 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -1217,6 +1217,11 @@ struct ipu3_uapi_shd_config {
*
* All CU inputs are unsigned, they will be converted to signed when written
* to register, i.e. a01 will be written to 9 bit register in s4.4 format.
+ * The data precision s4.4 means 4 bits for integer parts and 4 bits for the
+ * fractional part, the first bit indicates positive or negative value.
+ * For userspace software (commonly the imaging library), the computation for
+ * the CU slope values should be based on the slope resolution 1/16 (binary
+ * 0.0001 - the minimal interval value), the slope value range is [-256, +255].
* This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
* &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
*/
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 1a966cb2f3a6..6fb60b58447a 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -908,11 +908,7 @@ static int iss_map_mem_resource(struct platform_device *pdev,
struct iss_device *iss,
enum iss_mem_resources res)
{
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
-
- iss->regs[res] = devm_ioremap_resource(iss->dev, mem);
+ iss->regs[res] = devm_platform_ioremap_resource(pdev, res);
return PTR_ERR_OR_ZERO(iss->regs[res]);
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 54144dc9f509..673aa3a5f2bd 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -671,7 +671,7 @@ iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
/*
@@ -726,7 +726,7 @@ iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
sdsel.pad = pad;
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index c85ac6db0302..1bce49d3e7e2 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
- cedrus_mpeg2.o cedrus_h264.o
+ cedrus_mpeg2.o cedrus_h264.o cedrus_h265.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 2d3ea8b74dfd..c6ddd46eff82 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -95,6 +95,45 @@ static const struct cedrus_control cedrus_controls[] = {
.codec = CEDRUS_CODEC_H264,
.required = false,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -241,6 +280,16 @@ static int cedrus_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctrls;
}
+ ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ cedrus_prepare_format(&ctx->dst_fmt);
+ ctx->src_fmt.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
+ /*
+ * TILED_NV12 has more strict requirements, so copy the width and
+ * height to src_fmt to ensure that is matches the dst_fmt resolution.
+ */
+ ctx->src_fmt.width = ctx->dst_fmt.width;
+ ctx->src_fmt.height = ctx->dst_fmt.height;
+ cedrus_prepare_format(&ctx->src_fmt);
v4l2_fh_add(&ctx->fh);
@@ -330,6 +379,7 @@ static int cedrus_probe(struct platform_device *pdev)
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
dev->dec_ops[CEDRUS_CODEC_H264] = &cedrus_dec_ops_h264;
+ dev->dec_ops[CEDRUS_CODEC_H265] = &cedrus_dec_ops_h265;
mutex_init(&dev->dev_mutex);
@@ -357,6 +407,8 @@ static int cedrus_probe(struct platform_device *pdev)
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &cedrus_m2m_media_ops;
@@ -438,22 +490,26 @@ static const struct cedrus_variant sun8i_a33_cedrus_variant = {
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 2f017a651848..96765555ab8a 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -27,12 +27,14 @@
#define CEDRUS_NAME "cedrus"
#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+#define CEDRUS_CAPABILITY_H265_DEC BIT(1)
#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
+ CEDRUS_CODEC_H265,
CEDRUS_CODEC_LAST,
};
@@ -67,6 +69,12 @@ struct cedrus_mpeg2_run {
const struct v4l2_ctrl_mpeg2_quantization *quantization;
};
+struct cedrus_h265_run {
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+};
+
struct cedrus_run {
struct vb2_v4l2_buffer *src;
struct vb2_v4l2_buffer *dst;
@@ -74,6 +82,7 @@ struct cedrus_run {
union {
struct cedrus_h264_run h264;
struct cedrus_mpeg2_run mpeg2;
+ struct cedrus_h265_run h265;
};
};
@@ -107,9 +116,24 @@ struct cedrus_ctx {
ssize_t mv_col_buf_size;
void *pic_info_buf;
dma_addr_t pic_info_buf_dma;
+ ssize_t pic_info_buf_size;
void *neighbor_info_buf;
dma_addr_t neighbor_info_buf_dma;
+ void *deblk_buf;
+ dma_addr_t deblk_buf_dma;
+ ssize_t deblk_buf_size;
+ void *intra_pred_buf;
+ dma_addr_t intra_pred_buf_dma;
+ ssize_t intra_pred_buf_size;
} h264;
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_addr;
+ ssize_t mv_col_buf_size;
+ ssize_t mv_col_buf_unit_size;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_addr;
+ } h265;
} codec;
};
@@ -155,6 +179,7 @@ struct cedrus_dev {
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
extern struct cedrus_dec_ops cedrus_dec_ops_h264;
+extern struct cedrus_dec_ops cedrus_dec_ops_h265;
static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
{
@@ -179,12 +204,16 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
int index, unsigned int plane)
{
- struct vb2_buffer *buf;
+ struct vb2_buffer *buf = NULL;
+ struct vb2_queue *vq;
if (index < 0)
return 0;
- buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index];
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vq)
+ buf = vb2_get_buffer(vq, index);
+
return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 56ca4c9ad01c..4a2fc33a1d79 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -59,6 +59,15 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_H264_SPS);
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ run.h265.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ run.h265.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ run.h265.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
+ break;
+
default:
break;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index d6a782703c9b..bfb4a4820a67 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -6,6 +6,7 @@
* Copyright (c) 2018 Bootlin
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
@@ -38,7 +39,7 @@ struct cedrus_h264_sram_ref_pic {
#define CEDRUS_H264_FRAME_NUM 18
#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
-#define CEDRUS_PIC_INFO_BUF_SIZE (128 * SZ_1K)
+#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
enum cedrus_h264_sram_off off,
@@ -96,7 +97,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_buffer *output_buf;
struct cedrus_dev *dev = ctx->dev;
unsigned long used_dpbs = 0;
@@ -104,6 +105,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
unsigned int output = 0;
unsigned int i;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(pic_list, 0, sizeof(pic_list));
for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
@@ -167,12 +170,14 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
enum cedrus_h264_sram_off sram)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_dev *dev = ctx->dev;
u8 sram_array[CEDRUS_MAX_REF_IDX];
unsigned int i;
size_t size;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(sram_array, 0, sizeof(sram_array));
for (i = 0; i < num_ref; i++) {
@@ -240,8 +245,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
sizeof(scaling->scaling_list_8x8[0]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
- scaling->scaling_list_8x8[3],
- sizeof(scaling->scaling_list_8x8[3]));
+ scaling->scaling_list_8x8[1],
+ sizeof(scaling->scaling_list_8x8[1]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
scaling->scaling_list_4x4,
@@ -289,6 +294,28 @@ static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
}
}
+/*
+ * It turns out that using VE_H264_VLD_OFFSET to skip bits is not reliable. In
+ * rare cases frame is not decoded correctly. However, setting offset to 0 and
+ * skipping appropriate amount of bits with flush bits trigger always works.
+ */
+static void cedrus_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_FLUSH_BITS |
+ VE_H264_TRIGGER_TYPE_N_BITS(tmp));
+ while (cedrus_read(dev, VE_H264_STATUS) & VE_H264_STATUS_VLD_BUSY)
+ udelay(1);
+
+ count += tmp;
+ }
+}
+
static void cedrus_set_params(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -299,12 +326,13 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t src_buf_addr;
- u32 offset = slice->header_bit_size;
- u32 len = (slice->size * 8) - offset;
+ u32 len = slice->size * 8;
+ unsigned int pic_width_in_mbs;
+ bool mbaff_pic;
u32 reg;
cedrus_write(dev, VE_H264_VLD_LEN, len);
- cedrus_write(dev, VE_H264_VLD_OFFSET, offset);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
cedrus_write(dev, VE_H264_VLD_END,
@@ -314,6 +342,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
VE_H264_VLD_ADDR_LAST);
+ if (ctx->src_fmt.width > 2048) {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_MIXED_RAM |
+ VE_BUF_CTRL_DBLK_MIXED_RAM);
+ cedrus_write(dev, VE_DBLK_DRAM_BUF_ADDR,
+ ctx->codec.h264.deblk_buf_dma);
+ cedrus_write(dev, VE_INTRAPRED_DRAM_BUF_ADDR,
+ ctx->codec.h264.intra_pred_buf_dma);
+ } else {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_INT_SRAM |
+ VE_BUF_CTRL_DBLK_INT_SRAM);
+ }
+
/*
* FIXME: Since the bitstream parsing is done in software, and
* in userspace, this shouldn't be needed anymore. But it
@@ -323,6 +365,8 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+ cedrus_skip_bits(dev, slice->header_bit_size);
+
if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
(slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
@@ -370,12 +414,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
cedrus_write(dev, VE_H264_SPS, reg);
+ mbaff_pic = !(slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
+
// slice parameters
reg = 0;
+ reg |= ((slice->first_mb_in_slice % pic_width_in_mbs) & 0xff) << 24;
+ reg |= (((slice->first_mb_in_slice / pic_width_in_mbs) *
+ (mbaff_pic + 1)) & 0xff) << 16;
reg |= decode->nal_ref_idc ? BIT(12) : 0;
reg |= (slice->slice_type & 0xf) << 8;
reg |= slice->cabac_init_idc & 0x3;
- reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (ctx->fh.m2m_ctx->new_frame)
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
reg |= VE_H264_SHS_FIELD_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
@@ -447,7 +499,7 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
{
struct cedrus_dev *dev = ctx->dev;
- cedrus_engine_enable(dev, CEDRUS_CODEC_H264);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H264);
cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
@@ -464,18 +516,30 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
static int cedrus_h264_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
+ unsigned int pic_info_size;
unsigned int field_size;
unsigned int mv_col_size;
int ret;
+ /* Formula for picture buffer size is taken from CedarX source. */
+
+ if (ctx->src_fmt.width > 2048)
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x4000;
+ else
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x1000;
+
/*
- * FIXME: It seems that the H6 cedarX code is using a formula
- * here based on the size of the frame, while all the older
- * code is using a fixed size, so that might need to be
- * changed at some point.
+ * FIXME: If V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is set,
+ * there is no need to multiply by 2.
*/
+ pic_info_size += ctx->src_fmt.height * 2 * 64;
+
+ if (pic_info_size < CEDRUS_MIN_PIC_INFO_BUF_SIZE)
+ pic_info_size = CEDRUS_MIN_PIC_INFO_BUF_SIZE;
+
+ ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_alloc_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
&ctx->codec.h264.pic_info_buf_dma,
GFP_KERNEL);
if (!ctx->codec.h264.pic_info_buf)
@@ -528,15 +592,56 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
goto err_neighbor_buf;
}
+ if (ctx->src_fmt.width > 2048) {
+ /*
+ * Formulas for deblock and intra prediction buffer sizes
+ * are taken from CedarX source.
+ */
+
+ ctx->codec.h264.deblk_buf_size =
+ ALIGN(ctx->src_fmt.width, 32) * 12;
+ ctx->codec.h264.deblk_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.deblk_buf) {
+ ret = -ENOMEM;
+ goto err_mv_col_buf;
+ }
+
+ ctx->codec.h264.intra_pred_buf_size =
+ ALIGN(ctx->src_fmt.width, 64) * 5;
+ ctx->codec.h264.intra_pred_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.intra_pred_buf) {
+ ret = -ENOMEM;
+ goto err_deblk_buf;
+ }
+ }
+
return 0;
+err_deblk_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+
+err_mv_col_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma);
+
err_neighbor_buf:
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
err_pic_buf:
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
return ret;
@@ -552,9 +657,17 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
+ if (ctx->codec.h264.deblk_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+ if (ctx->codec.h264.intra_pred_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
new file mode 100644
index 000000000000..6945dc74e1d7
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/*
+ * These are the sizes for side buffers required by the hardware for storing
+ * internal decoding metadata. They match the values used by the early BSP
+ * implementations, that were initially exposed in libvdpau-sunxi.
+ * Subsequent BSP implementations seem to double the neighbor info buffer size
+ * for the H6 SoC, which may be related to 10 bit H265 support.
+ */
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
+#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
+#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
+
+struct cedrus_h265_sram_frame_info {
+ __le32 top_pic_order_cnt;
+ __le32 bottom_pic_order_cnt;
+ __le32 top_mv_col_buf_addr;
+ __le32 bottom_mv_col_buf_addr;
+ __le32 luma_addr;
+ __le32 chroma_addr;
+} __packed;
+
+struct cedrus_h265_sram_pred_weight {
+ __s8 delta_weight;
+ __s8 offset;
+} __packed;
+
+static enum cedrus_irq_status cedrus_h265_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_H265_STATUS);
+ reg &= VE_DEC_H265_STATUS_CHECK_MASK;
+
+ if (reg & VE_DEC_H265_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_H265_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_h265_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_CHECK_MASK);
+}
+
+static void cedrus_h265_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_H265_CTRL);
+
+ reg &= ~VE_DEC_H265_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_H265_CTRL, reg);
+}
+
+static void cedrus_h265_sram_write_offset(struct cedrus_dev *dev, u32 offset)
+{
+ cedrus_write(dev, VE_DEC_H265_SRAM_OFFSET, offset);
+}
+
+static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
+ unsigned int size)
+{
+ u32 *word = data;
+
+ while (size >= sizeof(u32)) {
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, *word++);
+ size -= sizeof(u32);
+ }
+}
+
+static inline dma_addr_t
+cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int index, unsigned int field)
+{
+ return ctx->codec.h265.mv_col_buf_addr + index *
+ ctx->codec.h265.mv_col_buf_unit_size +
+ field * ctx->codec.h265.mv_col_buf_unit_size / 2;
+}
+
+static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+ unsigned int index,
+ bool field_pic,
+ u32 pic_order_cnt[],
+ int buffer_index)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 0);
+ dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 1);
+ dma_addr_t mv_col_buf_addr[2] = {
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index, 0),
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index,
+ field_pic ? 1 : 0)
+ };
+ u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
+ VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
+ struct cedrus_h265_sram_frame_info frame_info = {
+ .top_pic_order_cnt = cpu_to_le32(pic_order_cnt[0]),
+ .bottom_pic_order_cnt = cpu_to_le32(field_pic ?
+ pic_order_cnt[1] :
+ pic_order_cnt[0]),
+ .top_mv_col_buf_addr =
+ cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .bottom_mv_col_buf_addr = cpu_to_le32(field_pic ?
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[1]) :
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .luma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_luma_addr)),
+ .chroma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_chroma_addr)),
+ };
+
+ cedrus_h265_sram_write_offset(dev, offset);
+ cedrus_h265_sram_write_data(dev, &frame_info, sizeof(frame_info));
+}
+
+static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ u8 num_active_dpb_entries)
+{
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int i;
+
+ for (i = 0; i < num_active_dpb_entries; i++) {
+ int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0);
+ u32 pic_order_cnt[2] = {
+ dpb[i].pic_order_cnt[0],
+ dpb[i].pic_order_cnt[1]
+ };
+
+ cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
+ pic_order_cnt,
+ buffer_index);
+ }
+}
+
+static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ const u8 list[],
+ u8 num_ref_idx_active,
+ u32 sram_offset)
+{
+ unsigned int i;
+ u32 word = 0;
+
+ cedrus_h265_sram_write_offset(dev, sram_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int shift = (i % 4) * 8;
+ unsigned int index = list[i];
+ u8 value = list[i];
+
+ if (dpb[index].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
+
+ /* Each SRAM word gathers up to 4 references. */
+ word |= value << shift;
+
+ /* Write the word to SRAM and clear it for the next batch. */
+ if ((i % 4) == 3 || i == (num_ref_idx_active - 1)) {
+ cedrus_h265_sram_write_data(dev, &word, sizeof(word));
+ word = 0;
+ }
+ }
+}
+
+static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
+ const s8 delta_luma_weight[],
+ const s8 luma_offset[],
+ const s8 delta_chroma_weight[][2],
+ const s8 chroma_offset[][2],
+ u8 num_ref_idx_active,
+ u32 sram_luma_offset,
+ u32 sram_chroma_offset)
+{
+ struct cedrus_h265_sram_pred_weight pred_weight[2] = { { 0 } };
+ unsigned int i, j;
+
+ cedrus_h265_sram_write_offset(dev, sram_luma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int index = i % 2;
+
+ pred_weight[index].delta_weight = delta_luma_weight[i];
+ pred_weight[index].offset = luma_offset[i];
+
+ if (index == 1 || i == (num_ref_idx_active - 1))
+ cedrus_h265_sram_write_data(dev, (u32 *)&pred_weight,
+ sizeof(pred_weight));
+ }
+
+ cedrus_h265_sram_write_offset(dev, sram_chroma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ for (j = 0; j < 2; j++) {
+ pred_weight[j].delta_weight = delta_chroma_weight[i][j];
+ pred_weight[j].offset = chroma_offset[i][j];
+ }
+
+ cedrus_h265_sram_write_data(dev, &pred_weight,
+ sizeof(pred_weight));
+ }
+}
+
+static void cedrus_h265_setup(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_hevc_pred_weight_table *pred_weight_table;
+ dma_addr_t src_buf_addr;
+ dma_addr_t src_buf_end_addr;
+ u32 chroma_log2_weight_denom;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
+ u32 reg;
+
+ sps = run->h265.sps;
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ pred_weight_table = &slice_params->pred_weight_table;
+
+ /* MV column buffer size and allocation. */
+ if (!ctx->codec.h265.mv_col_buf_size) {
+ unsigned int num_buffers =
+ run->dst->vb2_buf.vb2_queue->num_buffers;
+ unsigned int log2_max_luma_coding_block_size =
+ sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ unsigned int ctb_size_luma =
+ 1UL << log2_max_luma_coding_block_size;
+
+ /*
+ * Each CTB requires a MV col buffer with a specific unit size.
+ * Since the address is given with missing lsb bits, 1 KiB is
+ * added to each buffer to ensure proper alignment.
+ */
+ ctx->codec.h265.mv_col_buf_unit_size =
+ DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
+ DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
+ CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
+
+ ctx->codec.h265.mv_col_buf_size = num_buffers *
+ ctx->codec.h265.mv_col_buf_unit_size;
+
+ ctx->codec.h265.mv_col_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h265.mv_col_buf_size,
+ &ctx->codec.h265.mv_col_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.mv_col_buf) {
+ ctx->codec.h265.mv_col_buf_size = 0;
+ // TODO: Abort the process here.
+ return;
+ }
+ }
+
+ /* Activate H265 engine. */
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H265);
+
+ /* Source offset and length in bits. */
+
+ reg = slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, reg);
+
+ reg = slice_params->bit_size - slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_H265_BITS_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA;
+
+ cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+
+ src_buf_end_addr = src_buf_addr +
+ DIV_ROUND_UP(slice_params->bit_size, 8);
+
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
+ cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+
+ /* Coding tree block address: start at the beginning. */
+ reg = VE_DEC_H265_DEC_CTB_ADDR_X(0) | VE_DEC_H265_DEC_CTB_ADDR_Y(0);
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
+
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+
+ /* Clear the number of correctly-decoded coding tree blocks. */
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_NUM, 0);
+
+ /* Initialize bitstream access. */
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
+
+ /* Bitstream parameters. */
+
+ reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
+ VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(slice_params->nuh_temporal_id_plus1);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_NAL_HDR, reg);
+
+ /* SPS. */
+
+ reg = VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(sps->max_transform_hierarchy_depth_intra) |
+ VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(sps->max_transform_hierarchy_depth_inter) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(sps->log2_diff_max_min_luma_transform_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(sps->log2_min_luma_transform_block_size_minus2) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
+ V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_AMP_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE,
+ V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SPS_HDR, reg);
+
+ reg = VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_pcm_luma_coding_block_size) |
+ VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_pcm_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(sps->pcm_sample_bit_depth_chroma_minus1) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(sps->pcm_sample_bit_depth_luma_minus1);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PCM_CTRL, reg);
+
+ /* PPS. */
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(pps->pps_cr_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(pps->pps_cb_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(pps->init_qp_minus26) |
+ VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(pps->diff_cu_qp_delta_depth);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED,
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED,
+ V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED,
+ V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED,
+ pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL0, reg);
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(pps->log2_parallel_merge_level_minus2);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ pps->flags);
+
+ /* TODO: VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED */
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED, pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED, pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL1, reg);
+
+ /* Slice Parameters. */
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(slice_params->pic_struct) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(slice_params->five_minus_max_num_merge_cand) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(slice_params->num_ref_idx_l1_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(slice_params->num_ref_idx_l0_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(slice_params->collocated_ref_idx) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(slice_params->colour_plane_id) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(slice_params->slice_type);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
+ V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
+ pps->flags);
+
+ /* FIXME: For multi-slice support. */
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO0, reg);
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(slice_params->num_rps_poc_st_curr_after == 0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ slice_params->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
+
+ chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
+ pred_weight_table->delta_chroma_log2_weight_denom;
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
+
+ /* Decoded picture size. */
+
+ reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
+ VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PIC_SIZE, reg);
+
+ /* Scaling list. */
+
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
+
+ /* Neightbor information address. */
+ reg = VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(ctx->codec.h265.neighbor_info_buf_addr);
+ cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
+
+ /* Write decoded picture buffer in pic list. */
+ cedrus_h265_frame_info_write_dpb(ctx, slice_params->dpb,
+ slice_params->num_active_dpb_entries);
+
+ /* Output frame. */
+
+ output_pic_list_index = V4L2_HEVC_DPB_ENTRIES_NUM_MAX;
+ pic_order_cnt[0] = slice_params->slice_pic_order_cnt;
+ pic_order_cnt[1] = slice_params->slice_pic_order_cnt;
+
+ cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
+ slice_params->pic_struct != 0,
+ pic_order_cnt,
+ run->dst->vb2_buf.index);
+
+ cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
+
+ /* Reference picture list 0 (for P/B frames). */
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
+
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED))
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l0,
+ pred_weight_table->luma_offset_l0,
+ pred_weight_table->delta_chroma_weight_l0,
+ pred_weight_table->chroma_offset_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0);
+ }
+
+ /* Reference picture list 1 (for B frames). */
+ if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED)
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l1,
+ pred_weight_table->luma_offset_l1,
+ pred_weight_table->delta_chroma_weight_l1,
+ pred_weight_table->chroma_offset_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1);
+ }
+
+ /* Enable appropriate interruptions. */
+ cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
+}
+
+static int cedrus_h265_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ /* The buffer size is calculated at setup time. */
+ ctx->codec.h265.mv_col_buf_size = 0;
+
+ ctx->codec.h265.neighbor_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.neighbor_info_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cedrus_h265_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (ctx->codec.h265.mv_col_buf_size > 0) {
+ dma_free_coherent(dev->dev, ctx->codec.h265.mv_col_buf_size,
+ ctx->codec.h265.mv_col_buf,
+ ctx->codec.h265.mv_col_buf_addr);
+
+ ctx->codec.h265.mv_col_buf_size = 0;
+ }
+
+ dma_free_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr);
+}
+
+static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_DEC_SLICE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h265 = {
+ .irq_clear = cedrus_h265_irq_clear,
+ .irq_disable = cedrus_h265_irq_disable,
+ .irq_status = cedrus_h265_irq_status,
+ .setup = cedrus_h265_setup,
+ .start = cedrus_h265_start,
+ .stop = cedrus_h265_stop,
+ .trigger = cedrus_h265_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index a942cd9bed57..daf5f244f93b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -30,7 +30,7 @@
#include "cedrus_hw.h"
#include "cedrus_regs.h"
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec)
{
u32 reg = 0;
@@ -50,11 +50,20 @@ int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
reg |= VE_MODE_DEC_H264;
break;
+ case CEDRUS_CODEC_H265:
+ reg |= VE_MODE_DEC_H265;
+ break;
+
default:
return -EINVAL;
}
- cedrus_write(dev, VE_MODE, reg);
+ if (ctx->src_fmt.width == 4096)
+ reg |= VE_MODE_PIC_WIDTH_IS_4096;
+ if (ctx->src_fmt.width > 2048)
+ reg |= VE_MODE_PIC_WIDTH_MORE_2048;
+
+ cedrus_write(ctx->dev, VE_MODE, reg);
return 0;
}
@@ -103,7 +112,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
{
struct cedrus_dev *dev = data;
struct cedrus_ctx *ctx;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
enum vb2_buffer_state state;
enum cedrus_irq_status status;
@@ -121,24 +129,13 @@ static irqreturn_t cedrus_irq(int irq, void *data)
dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- if (!src_buf || !dst_buf) {
- v4l2_err(&dev->v4l2_dev,
- "Missing source and/or destination buffers\n");
- return IRQ_HANDLED;
- }
-
if (status == CEDRUS_IRQ_ERROR)
state = VB2_BUF_STATE_ERROR;
else
state = VB2_BUF_STATE_DONE;
- v4l2_m2m_buf_done(src_buf, state);
- v4l2_m2m_buf_done(dst_buf, state);
-
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ state);
return IRQ_HANDLED;
}
@@ -146,7 +143,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
int cedrus_hw_probe(struct cedrus_dev *dev)
{
const struct cedrus_variant *variant;
- struct resource *res;
int irq_dec;
int ret;
@@ -225,8 +221,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
goto err_sram;
}
- res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(dev->dev, res);
+ dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
if (IS_ERR(dev->base)) {
dev_err(dev->dev, "Failed to map registers\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index 27d0882397aa..604ff932fbf5 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -16,7 +16,7 @@
#ifndef _CEDRUS_HW_H_
#define _CEDRUS_HW_H_
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec);
void cedrus_engine_disable(struct cedrus_dev *dev);
void cedrus_dst_format_set(struct cedrus_dev *dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 13c34927bad5..8bcd6b8f9e2d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -96,7 +96,7 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
quantization = run->mpeg2.quantization;
/* Activate MPEG engine. */
- cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_MPEG2);
/* Set intra quantization matrix. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index ddd29788d685..7beb03d3bb39 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -10,6 +10,9 @@
#ifndef _CEDRUS_REGS_H_
#define _CEDRUS_REGS_H_
+#define SHIFT_AND_MASK_BITS(v, h, l) \
+ (((unsigned long)(v) << (l)) & GENMASK(h, l))
+
/*
* Common acronyms and contractions used in register descriptions:
* * VLD : Variable-Length Decoder
@@ -18,13 +21,22 @@
* * MC: Motion Compensation
* * STCD: Start Code Detect
* * SDRT: Scale Down and Rotate
+ * * WB: Writeback
+ * * BITS/BS: Bitstream
+ * * MB: Macroblock
+ * * CTU: Coding Tree Unit
+ * * CTB: Coding Tree Block
+ * * IDX: Index
*/
#define VE_ENGINE_DEC_MPEG 0x100
#define VE_ENGINE_DEC_H264 0x200
+#define VE_ENGINE_DEC_H265 0x500
#define VE_MODE 0x00
+#define VE_MODE_PIC_WIDTH_IS_4096 BIT(22)
+#define VE_MODE_PIC_WIDTH_MORE_2048 BIT(21)
#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
@@ -34,11 +46,22 @@
#define VE_MODE_DEC_H264 (0x01 << 0)
#define VE_MODE_DEC_MPEG (0x00 << 0)
+#define VE_BUF_CTRL 0x50
+
+#define VE_BUF_CTRL_INTRAPRED_EXT_RAM (0x02 << 2)
+#define VE_BUF_CTRL_INTRAPRED_MIXED_RAM (0x01 << 2)
+#define VE_BUF_CTRL_INTRAPRED_INT_SRAM (0x00 << 2)
+#define VE_BUF_CTRL_DBLK_EXT_RAM (0x02 << 0)
+#define VE_BUF_CTRL_DBLK_MIXED_RAM (0x01 << 0)
+#define VE_BUF_CTRL_DBLK_INT_SRAM (0x00 << 0)
+
+#define VE_DBLK_DRAM_BUF_ADDR 0x54
+#define VE_INTRAPRED_DRAM_BUF_ADDR 0x58
#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
-#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
-#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16)
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0)
#define VE_CHROMA_BUF_LEN 0xe8
@@ -46,7 +69,7 @@
#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
-#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
+#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0)
#define VE_PRIMARY_OUT_FMT 0xec
@@ -69,15 +92,15 @@
#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
-#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28)
#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
- (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+ (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
- (((p) << 10) & GENMASK(11, 10))
+ SHIFT_AND_MASK_BITS(p, 11, 10)
#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
- (((s) << 8) & GENMASK(9, 8))
+ SHIFT_AND_MASK_BITS(s, 9, 8)
#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
((v) ? BIT(7) : 0)
#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
@@ -98,19 +121,19 @@
#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
- ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(w, 16), 15, 8)
#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
- ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(h, 16), 7, 0)
#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
-#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
-#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
-#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
-#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0))
+#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8)
+#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0)
#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
@@ -225,13 +248,277 @@
#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
- (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
+ (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0))
#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+#define VE_DEC_H265_DEC_NAL_HDR (VE_ENGINE_DEC_H265 + 0x00)
+
+#define VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 0)
+
+#define VE_DEC_H265_FLAG(reg_flag, ctrl_flag, flags) \
+ (((flags) & (ctrl_flag)) ? reg_flag : 0)
+
+#define VE_DEC_H265_DEC_SPS_HDR (VE_ENGINE_DEC_H265 + 0x04)
+
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE BIT(26)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED BIT(25)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED BIT(24)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED BIT(23)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE BIT(2)
+
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(v) \
+ SHIFT_AND_MASK_BITS(v, 22, 20)
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 17)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 16, 15)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 13)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 11)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 9)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 3)
+#define VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(v) \
+ SHIFT_AND_MASK_BITS(v, 1, 0)
+
+#define VE_DEC_H265_DEC_PIC_SIZE (VE_ENGINE_DEC_H265 + 0x08)
+
+#define VE_DEC_H265_DEC_PIC_SIZE_WIDTH(w) (((w) << 0) & GENMASK(13, 0))
+#define VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(h) (((h) << 16) & GENMASK(29, 16))
+
+#define VE_DEC_H265_DEC_PCM_CTRL (VE_ENGINE_DEC_H265 + 0x0c)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED BIT(15)
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED BIT(14)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 11, 10)
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 9, 8)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 7, 4)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0 (VE_ENGINE_DEC_H265 + 0x10)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 24)
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 16)
+#define VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 8)
+#define VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1 (VE_ENGINE_DEC_H265 + 0x14)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(6)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED BIT(5)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED BIT(4)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 8)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0 (VE_ENGINE_DEC_H265 + 0x18)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED BIT(31)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_SRAM (0 << 30)
+#define VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT (1 << 30)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0 (VE_ENGINE_DEC_H265 + 0x20)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0 BIT(11)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT BIT(10)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO BIT(9)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA BIT(8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA BIT(7)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE BIT(6)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT BIT(1)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC BIT(0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(v) \
+ SHIFT_AND_MASK_BITS(v, 26, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 23, 20)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(v) \
+ SHIFT_AND_MASK_BITS(v, 15, 12)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 2)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1 (VE_ENGINE_DEC_H265 + 0x24)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 31, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 27, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(v) \
+ ((v) ? BIT(21) : 0)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 20, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2 (VE_ENGINE_DEC_H265 + 0x28)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 2, 0)
+
+#define VE_DEC_H265_DEC_CTB_ADDR (VE_ENGINE_DEC_H265 + 0x2c)
+
+#define VE_DEC_H265_DEC_CTB_ADDR_Y(y) SHIFT_AND_MASK_BITS(y, 25, 16)
+#define VE_DEC_H265_DEC_CTB_ADDR_X(x) SHIFT_AND_MASK_BITS(x, 9, 0)
+
+#define VE_DEC_H265_CTRL (VE_ENGINE_DEC_H265 + 0x30)
+
+#define VE_DEC_H265_CTRL_DDR_CONSISTENCY_EN BIT(31)
+#define VE_DEC_H265_CTRL_STCD_EN BIT(25)
+#define VE_DEC_H265_CTRL_EPTB_DEC_BYPASS_EN BIT(24)
+#define VE_DEC_H265_CTRL_TQ_BYPASS_EN BIT(12)
+#define VE_DEC_H265_CTRL_VLD_BYPASS_EN BIT(11)
+#define VE_DEC_H265_CTRL_NCRI_CACHE_DISABLE BIT(10)
+#define VE_DEC_H265_CTRL_ROTATE_SCALE_OUT_EN BIT(9)
+#define VE_DEC_H265_CTRL_MC_NO_WRITEBACK BIT(8)
+#define VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN BIT(2)
+#define VE_DEC_H265_CTRL_ERROR_IRQ_EN BIT(1)
+#define VE_DEC_H265_CTRL_FINISH_IRQ_EN BIT(0)
+#define VE_DEC_H265_CTRL_IRQ_MASK \
+ (VE_DEC_H265_CTRL_FINISH_IRQ_EN | VE_DEC_H265_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_H265_TRIGGER (VE_ENGINE_DEC_H265 + 0x34)
+
+#define VE_DEC_H265_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_AVS (0x01 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_HEVC (0x00 << 4)
+#define VE_DEC_H265_TRIGGER_DEC_SLICE (0x08 << 0)
+#define VE_DEC_H265_TRIGGER_INIT_SWDEC (0x07 << 0)
+#define VE_DEC_H265_TRIGGER_BYTE_ALIGN (0x06 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCUE (0x05 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCSE (0x04 << 0)
+#define VE_DEC_H265_TRIGGER_FLUSH_BITS (0x03 << 0)
+#define VE_DEC_H265_TRIGGER_GET_BITS (0x02 << 0)
+#define VE_DEC_H265_TRIGGER_SHOW_BITS (0x01 << 0)
+
+#define VE_DEC_H265_STATUS (VE_ENGINE_DEC_H265 + 0x38)
+
+#define VE_DEC_H265_STATUS_STCD BIT(24)
+#define VE_DEC_H265_STATUS_STCD_BUSY BIT(21)
+#define VE_DEC_H265_STATUS_WB_BUSY BIT(20)
+#define VE_DEC_H265_STATUS_BS_DMA_BUSY BIT(19)
+#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(18)
+#define VE_DEC_H265_STATUS_INTER_BUSY BIT(17)
+#define VE_DEC_H265_STATUS_MORE_DATA BIT(16)
+#define VE_DEC_H265_STATUS_VLD_BUSY BIT(14)
+#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY BIT(13)
+#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY BIT(12)
+#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(11)
+#define VE_DEC_H265_STATUS_SAO_BUSY BIT(10)
+#define VE_DEC_H265_STATUS_MVP_BUSY BIT(9)
+#define VE_DEC_H265_STATUS_SWDEC_BUSY BIT(8)
+#define VE_DEC_H265_STATUS_OVER_TIME BIT(3)
+#define VE_DEC_H265_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_H265_STATUS_ERROR BIT(1)
+#define VE_DEC_H265_STATUS_SUCCESS BIT(0)
+#define VE_DEC_H265_STATUS_STCD_TYPE_MASK GENMASK(23, 22)
+#define VE_DEC_H265_STATUS_CHECK_MASK \
+ (VE_DEC_H265_STATUS_SUCCESS | VE_DEC_H265_STATUS_ERROR | \
+ VE_DEC_H265_STATUS_VLD_DATA_REQ)
+#define VE_DEC_H265_STATUS_CHECK_ERROR \
+ (VE_DEC_H265_STATUS_ERROR | VE_DEC_H265_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_H265_DEC_CTB_NUM (VE_ENGINE_DEC_H265 + 0x3c)
+
+#define VE_DEC_H265_BITS_ADDR (VE_ENGINE_DEC_H265 + 0x40)
+
+#define VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA BIT(30)
+#define VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA BIT(29)
+#define VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA BIT(28)
+#define VE_DEC_H265_BITS_ADDR_BASE(a) (((a) >> 8) & GENMASK(27, 0))
+
+#define VE_DEC_H265_BITS_OFFSET (VE_ENGINE_DEC_H265 + 0x44)
+#define VE_DEC_H265_BITS_LEN (VE_ENGINE_DEC_H265 + 0x48)
+
+#define VE_DEC_H265_BITS_END_ADDR (VE_ENGINE_DEC_H265 + 0x4c)
+
+#define VE_DEC_H265_BITS_END_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_SDRT_CTRL (VE_ENGINE_DEC_H265 + 0x50)
+#define VE_DEC_H265_SDRT_LUMA_ADDR (VE_ENGINE_DEC_H265 + 0x54)
+#define VE_DEC_H265_SDRT_CHROMA_ADDR (VE_ENGINE_DEC_H265 + 0x58)
+
+#define VE_DEC_H265_OUTPUT_FRAME_IDX (VE_ENGINE_DEC_H265 + 0x5c)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR (VE_ENGINE_DEC_H265 + 0x60)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
+#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
+#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+
+#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
+
+#define VE_DEC_H265_LOW_ADDR_PRIMARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 31, 24)
+#define VE_DEC_H265_LOW_ADDR_SECONDARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 23, 16)
+#define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ SHIFT_AND_MASK_BITS(a, 7, 0)
+
+#define VE_DEC_H265_SRAM_OFFSET (VE_ENGINE_DEC_H265 + 0xe0)
+
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0 0x00
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0 0x20
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1 0x60
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1 0x80
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO 0x400
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT 0x20
+#define VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS 0x800
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0 0xc00
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1 0xc10
+
+#define VE_DEC_H265_SRAM_DATA (VE_ENGINE_DEC_H265 + 0xe4)
+
+#define VE_DEC_H265_SRAM_DATA_ADDR_BASE(a) ((a) >> 8)
+#define VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF BIT(7)
+
#define VE_H264_SPS 0x200
#define VE_H264_SPS_MBS_ONLY BIT(18)
#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
@@ -267,13 +554,16 @@
VE_H264_CTRL_SLICE_DECODE_INT)
#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+#define VE_H264_TRIGGER_TYPE_FLUSH_BITS (3 << 0)
#define VE_H264_STATUS 0x228
#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+#define VE_H264_STATUS_VLD_BUSY BIT(8)
#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index eeee3efd247b..15cf1f10221b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -29,8 +29,8 @@
#define CEDRUS_MIN_WIDTH 16U
#define CEDRUS_MIN_HEIGHT 16U
-#define CEDRUS_MAX_WIDTH 3840U
-#define CEDRUS_MAX_HEIGHT 2160U
+#define CEDRUS_MAX_WIDTH 4096U
+#define CEDRUS_MAX_HEIGHT 2304U
static struct cedrus_format cedrus_formats[] = {
{
@@ -42,6 +42,11 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
+ .pixelformat = V4L2_PIX_FMT_HEVC_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
.directions = CEDRUS_DECODE_DST,
},
@@ -62,34 +67,31 @@ static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions,
unsigned int capabilities)
{
+ struct cedrus_format *first_valid_fmt = NULL;
struct cedrus_format *fmt;
unsigned int i;
for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
fmt = &cedrus_formats[i];
- if (fmt->capabilities && (fmt->capabilities & capabilities) !=
- fmt->capabilities)
+ if ((fmt->capabilities & capabilities) != fmt->capabilities ||
+ !(fmt->directions & directions))
continue;
- if (fmt->pixelformat == pixelformat &&
- (fmt->directions & directions) != 0)
+ if (fmt->pixelformat == pixelformat)
break;
+
+ if (!first_valid_fmt)
+ first_valid_fmt = fmt;
}
if (i == CEDRUS_FORMATS_COUNT)
- return NULL;
+ return first_valid_fmt;
return &cedrus_formats[i];
}
-static bool cedrus_check_format(u32 pixelformat, u32 directions,
- unsigned int capabilities)
-{
- return cedrus_find_format(pixelformat, directions, capabilities);
-}
-
-static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
{
unsigned int width = pix_fmt->width;
unsigned int height = pix_fmt->height;
@@ -105,9 +107,11 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
-
+ /* Choose some minimum size since this can't be 0 */
+ sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
case V4L2_PIX_FMT_SUNXI_TILED_NV12:
@@ -214,16 +218,7 @@ static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->dst_fmt;
-
return 0;
}
@@ -232,17 +227,7 @@ static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
- f->fmt.pix.sizeimage = SZ_1K;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->src_fmt;
-
return 0;
}
@@ -252,11 +237,14 @@ static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
- dev->capabilities))
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -268,15 +256,14 @@ static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
- dev->capabilities))
- return -EINVAL;
-
- /* Source image size has to be provided by userspace. */
- if (pix_fmt->sizeimage == 0)
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -322,6 +309,17 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
ctx->src_fmt = f->fmt.pix;
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ vq->subsystem_flags |=
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ vq->subsystem_flags &=
+ ~VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ }
+
/* Propagate colorspace information to capture. */
ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
@@ -355,6 +353,9 @@ const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -364,21 +365,12 @@ static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
struct device *alloc_devs[])
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
- struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt;
- u32 directions;
- if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
- directions = CEDRUS_DECODE_SRC;
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
- } else {
- directions = CEDRUS_DECODE_DST;
+ else
pix_fmt = &ctx->dst_fmt;
- }
-
- if (!cedrus_check_format(pix_fmt->pixelformat, directions,
- dev->capabilities))
- return -EINVAL;
if (*nplanes) {
if (sizes[0] < pix_fmt->sizeimage)
@@ -453,6 +445,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_H264;
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ ctx->current_codec = CEDRUS_CODEC_H265;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
index 0e4f7a8cccf2..05050c0a0921 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -26,5 +26,6 @@ extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt);
#endif
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 1359f289ffd5..723d0bd1cc21 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -344,8 +344,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
pr_err("Requested number of channels not supported.\n");
return -EINVAL;
}
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
/**
@@ -359,7 +358,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
*/
static int pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/**
@@ -469,7 +468,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int split_arg_list(char *buf, u16 *ch_num, char **sample_res)
@@ -663,6 +661,8 @@ skip_adpt_alloc:
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
diff --git a/drivers/staging/vboxsf/Kconfig b/drivers/staging/vboxsf/Kconfig
deleted file mode 100644
index b84586ae08b3..000000000000
--- a/drivers/staging/vboxsf/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config VBOXSF_FS
- tristate "VirtualBox guest shared folder (vboxsf) support"
- depends on X86 && VBOXGUEST
- select NLS
- help
- VirtualBox hosts can share folders with guests, this driver
- implements the Linux-guest side of this allowing folders exported
- by the host to be mounted under Linux.
-
- If you want to use shared folders in VirtualBox guests, answer Y or M.
diff --git a/drivers/staging/vboxsf/Makefile b/drivers/staging/vboxsf/Makefile
deleted file mode 100644
index 9e4328e79623..000000000000
--- a/drivers/staging/vboxsf/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
-
-vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o
diff --git a/drivers/staging/vboxsf/TODO b/drivers/staging/vboxsf/TODO
deleted file mode 100644
index 8b9193d0d4f0..000000000000
--- a/drivers/staging/vboxsf/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
-- Find a file-system developer to review this and give their Reviewed-By
-- Address any items coming up during review
-- Move to fs/vboxfs
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Hans de Goede <hdegoede@redhat.com>
diff --git a/drivers/staging/vboxsf/dir.c b/drivers/staging/vboxsf/dir.c
deleted file mode 100644
index f260b5cc1646..000000000000
--- a/drivers/staging/vboxsf/dir.c
+++ /dev/null
@@ -1,418 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Directory inode and file operations
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/namei.h>
-#include <linux/vbox_utils.h>
-#include "vfsmod.h"
-
-static int vboxsf_dir_open(struct inode *inode, struct file *file)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
- struct shfl_createparms params = {};
- struct vboxsf_dir_info *sf_d;
- int err;
-
- sf_d = vboxsf_dir_info_alloc();
- if (!sf_d)
- return -ENOMEM;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS |
- SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
-
- err = vboxsf_create_at_dentry(file_dentry(file), &params);
- if (err)
- goto err_free_dir_info;
-
- if (params.result != SHFL_FILE_EXISTS) {
- err = -ENOENT;
- goto err_close;
- }
-
- err = vboxsf_dir_read_all(sbi, sf_d, params.handle);
- if (err)
- goto err_close;
-
- vboxsf_close(sbi->root, params.handle);
- file->private_data = sf_d;
- return 0;
-
-err_close:
- vboxsf_close(sbi->root, params.handle);
-err_free_dir_info:
- vboxsf_dir_info_free(sf_d);
- return err;
-}
-
-static int vboxsf_dir_release(struct inode *inode, struct file *file)
-{
- if (file->private_data)
- vboxsf_dir_info_free(file->private_data);
-
- return 0;
-}
-
-static unsigned int vboxsf_get_d_type(u32 mode)
-{
- unsigned int d_type;
-
- switch (mode & SHFL_TYPE_MASK) {
- case SHFL_TYPE_FIFO:
- d_type = DT_FIFO;
- break;
- case SHFL_TYPE_DEV_CHAR:
- d_type = DT_CHR;
- break;
- case SHFL_TYPE_DIRECTORY:
- d_type = DT_DIR;
- break;
- case SHFL_TYPE_DEV_BLOCK:
- d_type = DT_BLK;
- break;
- case SHFL_TYPE_FILE:
- d_type = DT_REG;
- break;
- case SHFL_TYPE_SYMLINK:
- d_type = DT_LNK;
- break;
- case SHFL_TYPE_SOCKET:
- d_type = DT_SOCK;
- break;
- case SHFL_TYPE_WHITEOUT:
- d_type = DT_WHT;
- break;
- default:
- d_type = DT_UNKNOWN;
- break;
- }
- return d_type;
-}
-
-static bool vboxsf_dir_emit(struct file *dir, struct dir_context *ctx)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(file_inode(dir)->i_sb);
- struct vboxsf_dir_info *sf_d = dir->private_data;
- struct shfl_dirinfo *info;
- struct vboxsf_dir_buf *b;
- unsigned int d_type;
- loff_t i, cur = 0;
- ino_t fake_ino;
- size_t size;
- int err;
-
- list_for_each_entry(b, &sf_d->info_list, head) {
-try_next_entry:
- if (ctx->pos >= cur + b->entries) {
- cur += b->entries;
- continue;
- }
-
- /*
- * Note the vboxsf_dir_info objects we are iterating over here
- * are variable sized, so the info pointer may end up being
- * unaligned. This is how we get the data from the host.
- * Since vboxsf is only supported on x86 machines this is not
- * a problem.
- */
- for (i = 0, info = b->buf; i < ctx->pos - cur; i++) {
- size = offsetof(struct shfl_dirinfo, name.string) +
- info->name.size;
- info = (struct shfl_dirinfo *)((uintptr_t)info + size);
- }
-
- /* Info now points to the right entry, emit it. */
- d_type = vboxsf_get_d_type(info->info.attr.mode);
-
- /*
- * On 32 bit systems pos is 64 signed, while ino is 32 bit
- * unsigned so fake_ino may overflow, check for this.
- */
- if ((ino_t)(ctx->pos + 1) != (u64)(ctx->pos + 1)) {
- vbg_err("vboxsf: fake ino overflow, truncating dir\n");
- return false;
- }
- fake_ino = ctx->pos + 1;
-
- if (sbi->nls) {
- char d_name[NAME_MAX];
-
- err = vboxsf_nlscpy(sbi, d_name, NAME_MAX,
- info->name.string.utf8,
- info->name.length);
- if (err) {
- /* skip erroneous entry and proceed */
- ctx->pos += 1;
- goto try_next_entry;
- }
-
- return dir_emit(ctx, d_name, strlen(d_name),
- fake_ino, d_type);
- }
-
- return dir_emit(ctx, info->name.string.utf8, info->name.length,
- fake_ino, d_type);
- }
-
- return false;
-}
-
-static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx)
-{
- bool keep_iterating;
-
- for (keep_iterating = true; keep_iterating; ctx->pos += 1)
- keep_iterating = vboxsf_dir_emit(dir, ctx);
-
- return 0;
-}
-
-const struct file_operations vboxsf_dir_fops = {
- .open = vboxsf_dir_open,
- .iterate = vboxsf_dir_iterate,
- .release = vboxsf_dir_release,
- .read = generic_read_dir,
- .llseek = generic_file_llseek,
-};
-
-/*
- * This is called during name resolution/lookup to check if the @dentry in
- * the cache is still valid. the job is handled by vboxsf_inode_revalidate.
- */
-static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags)
-{
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- if (d_really_is_positive(dentry))
- return vboxsf_inode_revalidate(dentry) == 0;
- else
- return vboxsf_stat_dentry(dentry, NULL) == -ENOENT;
-}
-
-const struct dentry_operations vboxsf_dentry_ops = {
- .d_revalidate = vboxsf_dentry_revalidate
-};
-
-/* iops */
-
-static struct dentry *vboxsf_dir_lookup(struct inode *parent,
- struct dentry *dentry,
- unsigned int flags)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct shfl_fsobjinfo fsinfo;
- struct inode *inode;
- int err;
-
- dentry->d_time = jiffies;
-
- err = vboxsf_stat_dentry(dentry, &fsinfo);
- if (err) {
- inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
- } else {
- inode = vboxsf_new_inode(parent->i_sb);
- if (!IS_ERR(inode))
- vboxsf_init_inode(sbi, inode, &fsinfo);
- }
-
- return d_splice_alias(inode, dentry);
-}
-
-static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
- struct shfl_fsobjinfo *info)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct vboxsf_inode *sf_i;
- struct inode *inode;
-
- inode = vboxsf_new_inode(parent->i_sb);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- sf_i = VBOXSF_I(inode);
- /* The host may have given us different attr then requested */
- sf_i->force_restat = 1;
- vboxsf_init_inode(sbi, inode, info);
-
- d_instantiate(dentry, inode);
-
- return 0;
-}
-
-static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
- umode_t mode, int is_dir)
-{
- struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct shfl_createparms params = {};
- int err;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
- SHFL_CF_ACT_FAIL_IF_EXISTS |
- SHFL_CF_ACCESS_READWRITE |
- (is_dir ? SHFL_CF_DIRECTORY : 0);
- params.info.attr.mode = (mode & 0777) |
- (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
- params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
-
- err = vboxsf_create_at_dentry(dentry, &params);
- if (err)
- return err;
-
- if (params.result != SHFL_FILE_CREATED)
- return -EPERM;
-
- vboxsf_close(sbi->root, params.handle);
-
- err = vboxsf_dir_instantiate(parent, dentry, &params.info);
- if (err)
- return err;
-
- /* parent directory access/change time changed */
- sf_parent_i->force_restat = 1;
-
- return 0;
-}
-
-static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry,
- umode_t mode, bool excl)
-{
- return vboxsf_dir_create(parent, dentry, mode, 0);
-}
-
-static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry,
- umode_t mode)
-{
- return vboxsf_dir_create(parent, dentry, mode, 1);
-}
-
-static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
- struct inode *inode = d_inode(dentry);
- struct shfl_string *path;
- u32 flags;
- int err;
-
- if (S_ISDIR(inode->i_mode))
- flags = SHFL_REMOVE_DIR;
- else
- flags = SHFL_REMOVE_FILE;
-
- if (S_ISLNK(inode->i_mode))
- flags |= SHFL_REMOVE_SYMLINK;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- err = vboxsf_remove(sbi->root, path, flags);
- __putname(path);
- if (err)
- return err;
-
- /* parent directory access/change time changed */
- sf_parent_i->force_restat = 1;
-
- return 0;
-}
-
-static int vboxsf_dir_rename(struct inode *old_parent,
- struct dentry *old_dentry,
- struct inode *new_parent,
- struct dentry *new_dentry,
- unsigned int flags)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(old_parent->i_sb);
- struct vboxsf_inode *sf_old_parent_i = VBOXSF_I(old_parent);
- struct vboxsf_inode *sf_new_parent_i = VBOXSF_I(new_parent);
- u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS;
- struct shfl_string *old_path, *new_path;
- int err;
-
- if (flags)
- return -EINVAL;
-
- old_path = vboxsf_path_from_dentry(sbi, old_dentry);
- if (IS_ERR(old_path))
- return PTR_ERR(old_path);
-
- new_path = vboxsf_path_from_dentry(sbi, new_dentry);
- if (IS_ERR(new_path)) {
- err = PTR_ERR(new_path);
- goto err_put_old_path;
- }
-
- if (d_inode(old_dentry)->i_mode & S_IFDIR)
- shfl_flags = 0;
-
- err = vboxsf_rename(sbi->root, old_path, new_path, shfl_flags);
- if (err == 0) {
- /* parent directories access/change time changed */
- sf_new_parent_i->force_restat = 1;
- sf_old_parent_i->force_restat = 1;
- }
-
- __putname(new_path);
-err_put_old_path:
- __putname(old_path);
- return err;
-}
-
-static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry,
- const char *symname)
-{
- struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
- struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
- int symname_size = strlen(symname) + 1;
- struct shfl_string *path, *ssymname;
- struct shfl_fsobjinfo info;
- int err;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- ssymname = kmalloc(SHFLSTRING_HEADER_SIZE + symname_size, GFP_KERNEL);
- if (!ssymname) {
- __putname(path);
- return -ENOMEM;
- }
- ssymname->length = symname_size - 1;
- ssymname->size = symname_size;
- memcpy(ssymname->string.utf8, symname, symname_size);
-
- err = vboxsf_symlink(sbi->root, path, ssymname, &info);
- kfree(ssymname);
- __putname(path);
- if (err) {
- /* -EROFS means symlinks are note support -> -EPERM */
- return (err == -EROFS) ? -EPERM : err;
- }
-
- err = vboxsf_dir_instantiate(parent, dentry, &info);
- if (err)
- return err;
-
- /* parent directory access/change time changed */
- sf_parent_i->force_restat = 1;
- return 0;
-}
-
-const struct inode_operations vboxsf_dir_iops = {
- .lookup = vboxsf_dir_lookup,
- .create = vboxsf_dir_mkfile,
- .mkdir = vboxsf_dir_mkdir,
- .rmdir = vboxsf_dir_unlink,
- .unlink = vboxsf_dir_unlink,
- .rename = vboxsf_dir_rename,
- .symlink = vboxsf_dir_symlink,
- .getattr = vboxsf_getattr,
- .setattr = vboxsf_setattr,
-};
diff --git a/drivers/staging/vboxsf/file.c b/drivers/staging/vboxsf/file.c
deleted file mode 100644
index 4b61ccf83fca..000000000000
--- a/drivers/staging/vboxsf/file.c
+++ /dev/null
@@ -1,370 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Regular file inode and file ops.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <linux/sizes.h>
-#include "vfsmod.h"
-
-struct vboxsf_handle {
- u64 handle;
- u32 root;
- u32 access_flags;
- struct kref refcount;
- struct list_head head;
-};
-
-static int vboxsf_file_open(struct inode *inode, struct file *file)
-{
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct shfl_createparms params = {};
- struct vboxsf_handle *sf_handle;
- u32 access_flags = 0;
- int err;
-
- sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
- if (!sf_handle)
- return -ENOMEM;
-
- /*
- * We check the value of params.handle afterwards to find out if
- * the call succeeded or failed, as the API does not seem to cleanly
- * distinguish error and informational messages.
- *
- * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
- * make the shared folders host service use our mode parameter.
- */
- params.handle = SHFL_HANDLE_NIL;
- if (file->f_flags & O_CREAT) {
- params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
- /*
- * We ignore O_EXCL, as the Linux kernel seems to call create
- * beforehand itself, so O_EXCL should always fail.
- */
- if (file->f_flags & O_TRUNC)
- params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
- else
- params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
- } else {
- params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
- if (file->f_flags & O_TRUNC)
- params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
- }
-
- switch (file->f_flags & O_ACCMODE) {
- case O_RDONLY:
- access_flags |= SHFL_CF_ACCESS_READ;
- break;
-
- case O_WRONLY:
- access_flags |= SHFL_CF_ACCESS_WRITE;
- break;
-
- case O_RDWR:
- access_flags |= SHFL_CF_ACCESS_READWRITE;
- break;
-
- default:
- WARN_ON(1);
- }
-
- if (file->f_flags & O_APPEND)
- access_flags |= SHFL_CF_ACCESS_APPEND;
-
- params.create_flags |= access_flags;
- params.info.attr.mode = inode->i_mode;
-
- err = vboxsf_create_at_dentry(file_dentry(file), &params);
- if (err == 0 && params.handle == SHFL_HANDLE_NIL)
- err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
- if (err) {
- kfree(sf_handle);
- return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
-
- /* init our handle struct and add it to the inode's handles list */
- sf_handle->handle = params.handle;
- sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
- sf_handle->access_flags = access_flags;
- kref_init(&sf_handle->refcount);
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_add(&sf_handle->head, &sf_i->handle_list);
- mutex_unlock(&sf_i->handle_list_mutex);
-
- file->private_data = sf_handle;
- return 0;
-}
-
-static void vboxsf_handle_release(struct kref *refcount)
-{
- struct vboxsf_handle *sf_handle =
- container_of(refcount, struct vboxsf_handle, refcount);
-
- vboxsf_close(sf_handle->root, sf_handle->handle);
- kfree(sf_handle);
-}
-
-static int vboxsf_file_release(struct inode *inode, struct file *file)
-{
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct vboxsf_handle *sf_handle = file->private_data;
-
- /*
- * When a file is closed on our (the guest) side, we want any subsequent
- * accesses done on the host side to see all changes done from our side.
- */
- filemap_write_and_wait(inode->i_mapping);
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_del(&sf_handle->head);
- mutex_unlock(&sf_i->handle_list_mutex);
-
- kref_put(&sf_handle->refcount, vboxsf_handle_release);
- return 0;
-}
-
-/*
- * Write back dirty pages now, because there may not be any suitable
- * open files later
- */
-static void vboxsf_vma_close(struct vm_area_struct *vma)
-{
- filemap_write_and_wait(vma->vm_file->f_mapping);
-}
-
-static const struct vm_operations_struct vboxsf_file_vm_ops = {
- .close = vboxsf_vma_close,
- .fault = filemap_fault,
- .map_pages = filemap_map_pages,
-};
-
-static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int err;
-
- err = generic_file_mmap(file, vma);
- if (!err)
- vma->vm_ops = &vboxsf_file_vm_ops;
-
- return err;
-}
-
-/*
- * Note that since we are accessing files on the host's filesystem, files
- * may always be changed underneath us by the host!
- *
- * The vboxsf API between the guest and the host does not offer any functions
- * to deal with this. There is no inode-generation to check for changes, no
- * events / callback on changes and no way to lock files.
- *
- * To avoid returning stale data when a file gets *opened* on our (the guest)
- * side, we do a "stat" on the host side, then compare the mtime with the
- * last known mtime and invalidate the page-cache if they differ.
- * This is done from vboxsf_inode_revalidate().
- *
- * When reads are done through the read_iter fop, it is possible to do
- * further cache revalidation then, there are 3 options to deal with this:
- *
- * 1) Rely solely on the revalidation done at open time
- * 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf
- * host API does not allow stat on handles, so we would need to use
- * file->f_path.dentry and the stat will then fail if the file was unlinked
- * or renamed (and there is no thing like NFS' silly-rename). So we get:
- * 2a) "stat" and compare mtime, on stat failure invalidate the cache
- * 2b) "stat" and compare mtime, on stat failure do nothing
- * 3) Simply always call invalidate_inode_pages2_range on the range of the read
- *
- * Currently we are keeping things KISS and using option 1. this allows
- * directly using generic_file_read_iter without wrapping it.
- *
- * This means that only data written on the host side before open() on
- * the guest side is guaranteed to be seen by the guest. If necessary
- * we may provide other read-cache strategies in the future and make this
- * configurable through a mount option.
- */
-const struct file_operations vboxsf_reg_fops = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .mmap = vboxsf_file_mmap,
- .open = vboxsf_file_open,
- .release = vboxsf_file_release,
- .fsync = noop_fsync,
- .splice_read = generic_file_splice_read,
-};
-
-const struct inode_operations vboxsf_reg_iops = {
- .getattr = vboxsf_getattr,
- .setattr = vboxsf_setattr
-};
-
-static int vboxsf_readpage(struct file *file, struct page *page)
-{
- struct vboxsf_handle *sf_handle = file->private_data;
- loff_t off = page_offset(page);
- u32 nread = PAGE_SIZE;
- u8 *buf;
- int err;
-
- buf = kmap(page);
-
- err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
- if (err == 0) {
- memset(&buf[nread], 0, PAGE_SIZE - nread);
- flush_dcache_page(page);
- SetPageUptodate(page);
- } else {
- SetPageError(page);
- }
-
- kunmap(page);
- unlock_page(page);
- return err;
-}
-
-static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
-{
- struct vboxsf_handle *h, *sf_handle = NULL;
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_for_each_entry(h, &sf_i->handle_list, head) {
- if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
- h->access_flags == SHFL_CF_ACCESS_READWRITE) {
- kref_get(&h->refcount);
- sf_handle = h;
- break;
- }
- }
- mutex_unlock(&sf_i->handle_list_mutex);
-
- return sf_handle;
-}
-
-static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct vboxsf_handle *sf_handle;
- loff_t off = page_offset(page);
- loff_t size = i_size_read(inode);
- u32 nwrite = PAGE_SIZE;
- u8 *buf;
- int err;
-
- if (off + PAGE_SIZE > size)
- nwrite = size & ~PAGE_MASK;
-
- sf_handle = vboxsf_get_write_handle(sf_i);
- if (!sf_handle)
- return -EBADF;
-
- buf = kmap(page);
- err = vboxsf_write(sf_handle->root, sf_handle->handle,
- off, &nwrite, buf);
- kunmap(page);
-
- kref_put(&sf_handle->refcount, vboxsf_handle_release);
-
- if (err == 0) {
- ClearPageError(page);
- /* mtime changed */
- sf_i->force_restat = 1;
- } else {
- ClearPageUptodate(page);
- }
-
- unlock_page(page);
- return err;
-}
-
-static int vboxsf_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = mapping->host;
- struct vboxsf_handle *sf_handle = file->private_data;
- unsigned int from = pos & ~PAGE_MASK;
- u32 nwritten = len;
- u8 *buf;
- int err;
-
- buf = kmap(page);
- err = vboxsf_write(sf_handle->root, sf_handle->handle,
- pos, &nwritten, buf + from);
- kunmap(page);
-
- if (err) {
- nwritten = 0;
- goto out;
- }
-
- /* mtime changed */
- VBOXSF_I(inode)->force_restat = 1;
-
- if (!PageUptodate(page) && nwritten == PAGE_SIZE)
- SetPageUptodate(page);
-
- pos += nwritten;
- if (pos > inode->i_size)
- i_size_write(inode, pos);
-
-out:
- unlock_page(page);
- put_page(page);
-
- return nwritten;
-}
-
-const struct address_space_operations vboxsf_reg_aops = {
- .readpage = vboxsf_readpage,
- .writepage = vboxsf_writepage,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .write_begin = simple_write_begin,
- .write_end = vboxsf_write_end,
-};
-
-static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *done)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
- struct shfl_string *path;
- char *link;
- int err;
-
- if (!dentry)
- return ERR_PTR(-ECHILD);
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return (char *)path;
-
- link = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!link) {
- __putname(path);
- return ERR_PTR(-ENOMEM);
- }
-
- err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
- __putname(path);
- if (err) {
- kfree(link);
- return ERR_PTR(err);
- }
-
- set_delayed_call(done, kfree_link, link);
- return link;
-}
-
-const struct inode_operations vboxsf_lnk_iops = {
- .get_link = vboxsf_get_link
-};
diff --git a/drivers/staging/vboxsf/shfl_hostintf.h b/drivers/staging/vboxsf/shfl_hostintf.h
deleted file mode 100644
index aca829062c12..000000000000
--- a/drivers/staging/vboxsf/shfl_hostintf.h
+++ /dev/null
@@ -1,901 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * VirtualBox Shared Folders: host interface definition.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#ifndef SHFL_HOSTINTF_H
-#define SHFL_HOSTINTF_H
-
-#include <linux/vbox_vmmdev_types.h>
-
-/* The max in/out buffer size for a FN_READ or FN_WRITE call */
-#define SHFL_MAX_RW_COUNT (16 * SZ_1M)
-
-/*
- * Structures shared between guest and the service
- * can be relocated and use offsets to point to variable
- * length parts.
- *
- * Shared folders protocol works with handles.
- * Before doing any action on a file system object,
- * one have to obtain the object handle via a SHFL_FN_CREATE
- * request. A handle must be closed with SHFL_FN_CLOSE.
- */
-
-enum {
- SHFL_FN_QUERY_MAPPINGS = 1, /* Query mappings changes. */
- SHFL_FN_QUERY_MAP_NAME = 2, /* Query map name. */
- SHFL_FN_CREATE = 3, /* Open/create object. */
- SHFL_FN_CLOSE = 4, /* Close object handle. */
- SHFL_FN_READ = 5, /* Read object content. */
- SHFL_FN_WRITE = 6, /* Write new object content. */
- SHFL_FN_LOCK = 7, /* Lock/unlock a range in the object. */
- SHFL_FN_LIST = 8, /* List object content. */
- SHFL_FN_INFORMATION = 9, /* Query/set object information. */
- /* Note function number 10 is not used! */
- SHFL_FN_REMOVE = 11, /* Remove object */
- SHFL_FN_MAP_FOLDER_OLD = 12, /* Map folder (legacy) */
- SHFL_FN_UNMAP_FOLDER = 13, /* Unmap folder */
- SHFL_FN_RENAME = 14, /* Rename object */
- SHFL_FN_FLUSH = 15, /* Flush file */
- SHFL_FN_SET_UTF8 = 16, /* Select UTF8 filename encoding */
- SHFL_FN_MAP_FOLDER = 17, /* Map folder */
- SHFL_FN_READLINK = 18, /* Read symlink dest (as of VBox 4.0) */
- SHFL_FN_SYMLINK = 19, /* Create symlink (as of VBox 4.0) */
- SHFL_FN_SET_SYMLINKS = 20, /* Ask host to show symlinks (4.0+) */
-};
-
-/* Root handles for a mapping are of type u32, Root handles are unique. */
-#define SHFL_ROOT_NIL UINT_MAX
-
-/* Shared folders handle for an opened object are of type u64. */
-#define SHFL_HANDLE_NIL ULLONG_MAX
-
-/* Hardcoded maximum length (in chars) of a shared folder name. */
-#define SHFL_MAX_LEN (256)
-/* Hardcoded maximum number of shared folder mapping available to the guest. */
-#define SHFL_MAX_MAPPINGS (64)
-
-/** Shared folder string buffer structure. */
-struct shfl_string {
- /** Allocated size of the string member in bytes. */
- u16 size;
-
- /** Length of string without trailing nul in bytes. */
- u16 length;
-
- /** UTF-8 or UTF-16 string. Nul terminated. */
- union {
- u8 utf8[2];
- u16 utf16[1];
- u16 ucs2[1]; /* misnomer, use utf16. */
- } string;
-};
-VMMDEV_ASSERT_SIZE(shfl_string, 6);
-
-/* The size of shfl_string w/o the string part. */
-#define SHFLSTRING_HEADER_SIZE 4
-
-/* Calculate size of the string. */
-static inline u32 shfl_string_buf_size(const struct shfl_string *string)
-{
- return string ? SHFLSTRING_HEADER_SIZE + string->size : 0;
-}
-
-/* Set user id on execution (S_ISUID). */
-#define SHFL_UNIX_ISUID 0004000U
-/* Set group id on execution (S_ISGID). */
-#define SHFL_UNIX_ISGID 0002000U
-/* Sticky bit (S_ISVTX / S_ISTXT). */
-#define SHFL_UNIX_ISTXT 0001000U
-
-/* Owner readable (S_IRUSR). */
-#define SHFL_UNIX_IRUSR 0000400U
-/* Owner writable (S_IWUSR). */
-#define SHFL_UNIX_IWUSR 0000200U
-/* Owner executable (S_IXUSR). */
-#define SHFL_UNIX_IXUSR 0000100U
-
-/* Group readable (S_IRGRP). */
-#define SHFL_UNIX_IRGRP 0000040U
-/* Group writable (S_IWGRP). */
-#define SHFL_UNIX_IWGRP 0000020U
-/* Group executable (S_IXGRP). */
-#define SHFL_UNIX_IXGRP 0000010U
-
-/* Other readable (S_IROTH). */
-#define SHFL_UNIX_IROTH 0000004U
-/* Other writable (S_IWOTH). */
-#define SHFL_UNIX_IWOTH 0000002U
-/* Other executable (S_IXOTH). */
-#define SHFL_UNIX_IXOTH 0000001U
-
-/* Named pipe (fifo) (S_IFIFO). */
-#define SHFL_TYPE_FIFO 0010000U
-/* Character device (S_IFCHR). */
-#define SHFL_TYPE_DEV_CHAR 0020000U
-/* Directory (S_IFDIR). */
-#define SHFL_TYPE_DIRECTORY 0040000U
-/* Block device (S_IFBLK). */
-#define SHFL_TYPE_DEV_BLOCK 0060000U
-/* Regular file (S_IFREG). */
-#define SHFL_TYPE_FILE 0100000U
-/* Symbolic link (S_IFLNK). */
-#define SHFL_TYPE_SYMLINK 0120000U
-/* Socket (S_IFSOCK). */
-#define SHFL_TYPE_SOCKET 0140000U
-/* Whiteout (S_IFWHT). */
-#define SHFL_TYPE_WHITEOUT 0160000U
-/* Type mask (S_IFMT). */
-#define SHFL_TYPE_MASK 0170000U
-
-/* Checks the mode flags indicate a directory (S_ISDIR). */
-#define SHFL_IS_DIRECTORY(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_DIRECTORY)
-/* Checks the mode flags indicate a symbolic link (S_ISLNK). */
-#define SHFL_IS_SYMLINK(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_SYMLINK)
-
-/** The available additional information in a shfl_fsobjattr object. */
-enum shfl_fsobjattr_add {
- /** No additional information is available / requested. */
- SHFLFSOBJATTRADD_NOTHING = 1,
- /**
- * The additional unix attributes (shfl_fsobjattr::u::unix_attr) are
- * available / requested.
- */
- SHFLFSOBJATTRADD_UNIX,
- /**
- * The additional extended attribute size (shfl_fsobjattr::u::size) is
- * available / requested.
- */
- SHFLFSOBJATTRADD_EASIZE,
- /**
- * The last valid item (inclusive).
- * The valid range is SHFLFSOBJATTRADD_NOTHING thru
- * SHFLFSOBJATTRADD_LAST.
- */
- SHFLFSOBJATTRADD_LAST = SHFLFSOBJATTRADD_EASIZE,
-
- /** The usual 32-bit hack. */
- SHFLFSOBJATTRADD_32BIT_SIZE_HACK = 0x7fffffff
-};
-
-/**
- * Additional unix Attributes, these are available when
- * shfl_fsobjattr.additional == SHFLFSOBJATTRADD_UNIX.
- */
-struct shfl_fsobjattr_unix {
- /**
- * The user owning the filesystem object (st_uid).
- * This field is ~0U if not supported.
- */
- u32 uid;
-
- /**
- * The group the filesystem object is assigned (st_gid).
- * This field is ~0U if not supported.
- */
- u32 gid;
-
- /**
- * Number of hard links to this filesystem object (st_nlink).
- * This field is 1 if the filesystem doesn't support hardlinking or
- * the information isn't available.
- */
- u32 hardlinks;
-
- /**
- * The device number of the device which this filesystem object resides
- * on (st_dev). This field is 0 if this information is not available.
- */
- u32 inode_id_device;
-
- /**
- * The unique identifier (within the filesystem) of this filesystem
- * object (st_ino). Together with inode_id_device, this field can be
- * used as a OS wide unique id, when both their values are not 0.
- * This field is 0 if the information is not available.
- */
- u64 inode_id;
-
- /**
- * User flags (st_flags).
- * This field is 0 if this information is not available.
- */
- u32 flags;
-
- /**
- * The current generation number (st_gen).
- * This field is 0 if this information is not available.
- */
- u32 generation_id;
-
- /**
- * The device number of a char. or block device type object (st_rdev).
- * This field is 0 if the file isn't a char. or block device or when
- * the OS doesn't use the major+minor device idenfication scheme.
- */
- u32 device;
-} __packed;
-
-/** Extended attribute size. */
-struct shfl_fsobjattr_easize {
- /** Size of EAs. */
- s64 cb;
-} __packed;
-
-/** Shared folder filesystem object attributes. */
-struct shfl_fsobjattr {
- /** Mode flags (st_mode). SHFL_UNIX_*, SHFL_TYPE_*, and SHFL_DOS_*. */
- u32 mode;
-
- /** The additional attributes available. */
- enum shfl_fsobjattr_add additional;
-
- /**
- * Additional attributes.
- *
- * Unless explicitly specified to an API, the API can provide additional
- * data as it is provided by the underlying OS.
- */
- union {
- struct shfl_fsobjattr_unix unix_attr;
- struct shfl_fsobjattr_easize size;
- } __packed u;
-} __packed;
-VMMDEV_ASSERT_SIZE(shfl_fsobjattr, 44);
-
-struct shfl_timespec {
- s64 ns_relative_to_unix_epoch;
-};
-
-/** Filesystem object information structure. */
-struct shfl_fsobjinfo {
- /**
- * Logical size (st_size).
- * For normal files this is the size of the file.
- * For symbolic links, this is the length of the path name contained
- * in the symbolic link.
- * For other objects this fields needs to be specified.
- */
- s64 size;
-
- /** Disk allocation size (st_blocks * DEV_BSIZE). */
- s64 allocated;
-
- /** Time of last access (st_atime). */
- struct shfl_timespec access_time;
-
- /** Time of last data modification (st_mtime). */
- struct shfl_timespec modification_time;
-
- /**
- * Time of last status change (st_ctime).
- * If not available this is set to modification_time.
- */
- struct shfl_timespec change_time;
-
- /**
- * Time of file birth (st_birthtime).
- * If not available this is set to change_time.
- */
- struct shfl_timespec birth_time;
-
- /** Attributes. */
- struct shfl_fsobjattr attr;
-
-} __packed;
-VMMDEV_ASSERT_SIZE(shfl_fsobjinfo, 92);
-
-/**
- * result of an open/create request.
- * Along with handle value the result code
- * identifies what has happened while
- * trying to open the object.
- */
-enum shfl_create_result {
- SHFL_NO_RESULT,
- /** Specified path does not exist. */
- SHFL_PATH_NOT_FOUND,
- /** Path to file exists, but the last component does not. */
- SHFL_FILE_NOT_FOUND,
- /** File already exists and either has been opened or not. */
- SHFL_FILE_EXISTS,
- /** New file was created. */
- SHFL_FILE_CREATED,
- /** Existing file was replaced or overwritten. */
- SHFL_FILE_REPLACED
-};
-
-/* No flags. Initialization value. */
-#define SHFL_CF_NONE (0x00000000)
-
-/*
- * Only lookup the object, do not return a handle. When this is set all other
- * flags are ignored.
- */
-#define SHFL_CF_LOOKUP (0x00000001)
-
-/*
- * Open parent directory of specified object.
- * Useful for the corresponding Windows FSD flag
- * and for opening paths like \\dir\\*.* to search the 'dir'.
- */
-#define SHFL_CF_OPEN_TARGET_DIRECTORY (0x00000002)
-
-/* Create/open a directory. */
-#define SHFL_CF_DIRECTORY (0x00000004)
-
-/*
- * Open/create action to do if object exists
- * and if the object does not exists.
- * REPLACE file means atomically DELETE and CREATE.
- * OVERWRITE file means truncating the file to 0 and
- * setting new size.
- * When opening an existing directory REPLACE and OVERWRITE
- * actions are considered invalid, and cause returning
- * FILE_EXISTS with NIL handle.
- */
-#define SHFL_CF_ACT_MASK_IF_EXISTS (0x000000f0)
-#define SHFL_CF_ACT_MASK_IF_NEW (0x00000f00)
-
-/* What to do if object exists. */
-#define SHFL_CF_ACT_OPEN_IF_EXISTS (0x00000000)
-#define SHFL_CF_ACT_FAIL_IF_EXISTS (0x00000010)
-#define SHFL_CF_ACT_REPLACE_IF_EXISTS (0x00000020)
-#define SHFL_CF_ACT_OVERWRITE_IF_EXISTS (0x00000030)
-
-/* What to do if object does not exist. */
-#define SHFL_CF_ACT_CREATE_IF_NEW (0x00000000)
-#define SHFL_CF_ACT_FAIL_IF_NEW (0x00000100)
-
-/* Read/write requested access for the object. */
-#define SHFL_CF_ACCESS_MASK_RW (0x00003000)
-
-/* No access requested. */
-#define SHFL_CF_ACCESS_NONE (0x00000000)
-/* Read access requested. */
-#define SHFL_CF_ACCESS_READ (0x00001000)
-/* Write access requested. */
-#define SHFL_CF_ACCESS_WRITE (0x00002000)
-/* Read/Write access requested. */
-#define SHFL_CF_ACCESS_READWRITE (0x00003000)
-
-/* Requested share access for the object. */
-#define SHFL_CF_ACCESS_MASK_DENY (0x0000c000)
-
-/* Allow any access. */
-#define SHFL_CF_ACCESS_DENYNONE (0x00000000)
-/* Do not allow read. */
-#define SHFL_CF_ACCESS_DENYREAD (0x00004000)
-/* Do not allow write. */
-#define SHFL_CF_ACCESS_DENYWRITE (0x00008000)
-/* Do not allow access. */
-#define SHFL_CF_ACCESS_DENYALL (0x0000c000)
-
-/* Requested access to attributes of the object. */
-#define SHFL_CF_ACCESS_MASK_ATTR (0x00030000)
-
-/* No access requested. */
-#define SHFL_CF_ACCESS_ATTR_NONE (0x00000000)
-/* Read access requested. */
-#define SHFL_CF_ACCESS_ATTR_READ (0x00010000)
-/* Write access requested. */
-#define SHFL_CF_ACCESS_ATTR_WRITE (0x00020000)
-/* Read/Write access requested. */
-#define SHFL_CF_ACCESS_ATTR_READWRITE (0x00030000)
-
-/*
- * The file is opened in append mode.
- * Ignored if SHFL_CF_ACCESS_WRITE is not set.
- */
-#define SHFL_CF_ACCESS_APPEND (0x00040000)
-
-/** Create parameters buffer struct for SHFL_FN_CREATE call */
-struct shfl_createparms {
- /** Returned handle of opened object. */
- u64 handle;
-
- /** Returned result of the operation */
- enum shfl_create_result result;
-
- /** SHFL_CF_* */
- u32 create_flags;
-
- /**
- * Attributes of object to create and
- * returned actual attributes of opened/created object.
- */
- struct shfl_fsobjinfo info;
-} __packed;
-
-/** Shared Folder directory information */
-struct shfl_dirinfo {
- /** Full information about the object. */
- struct shfl_fsobjinfo info;
- /**
- * The length of the short field (number of UTF16 chars).
- * It is 16-bit for reasons of alignment.
- */
- u16 short_name_len;
- /**
- * The short name for 8.3 compatibility.
- * Empty string if not available.
- */
- u16 short_name[14];
- struct shfl_string name;
-};
-
-/** Shared folder filesystem properties. */
-struct shfl_fsproperties {
- /**
- * The maximum size of a filesystem object name.
- * This does not include the '\\0'.
- */
- u32 max_component_len;
-
- /**
- * True if the filesystem is remote.
- * False if the filesystem is local.
- */
- bool remote;
-
- /**
- * True if the filesystem is case sensitive.
- * False if the filesystem is case insensitive.
- */
- bool case_sensitive;
-
- /**
- * True if the filesystem is mounted read only.
- * False if the filesystem is mounted read write.
- */
- bool read_only;
-
- /**
- * True if the filesystem can encode unicode object names.
- * False if it can't.
- */
- bool supports_unicode;
-
- /**
- * True if the filesystem is compresses.
- * False if it isn't or we don't know.
- */
- bool compressed;
-
- /**
- * True if the filesystem compresses of individual files.
- * False if it doesn't or we don't know.
- */
- bool file_compression;
-};
-VMMDEV_ASSERT_SIZE(shfl_fsproperties, 12);
-
-struct shfl_volinfo {
- s64 total_allocation_bytes;
- s64 available_allocation_bytes;
- u32 bytes_per_allocation_unit;
- u32 bytes_per_sector;
- u32 serial;
- struct shfl_fsproperties properties;
-};
-
-
-/** SHFL_FN_MAP_FOLDER Parameters structure. */
-struct shfl_map_folder {
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, out: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in: UTF16
- * Path delimiter
- */
- struct vmmdev_hgcm_function_parameter delimiter;
-
- /**
- * pointer, in: SHFLROOT (u32)
- * Case senstive flag
- */
- struct vmmdev_hgcm_function_parameter case_sensitive;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_MAP_FOLDER (4)
-
-
-/** SHFL_FN_UNMAP_FOLDER Parameters structure. */
-struct shfl_unmap_folder {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_UNMAP_FOLDER (1)
-
-
-/** SHFL_FN_CREATE Parameters structure. */
-struct shfl_create {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, in/out:
- * Points to struct shfl_createparms buffer.
- */
- struct vmmdev_hgcm_function_parameter parms;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_CREATE (3)
-
-
-/** SHFL_FN_CLOSE Parameters structure. */
-struct shfl_close {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to close.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_CLOSE (2)
-
-
-/** SHFL_FN_READ Parameters structure. */
-struct shfl_read {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to read from.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value64, in:
- * Offset to read from.
- */
- struct vmmdev_hgcm_function_parameter offset;
-
- /**
- * value64, in/out:
- * Bytes to read/How many were read.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, out:
- * Buffer to place data to.
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_READ (5)
-
-
-/** SHFL_FN_WRITE Parameters structure. */
-struct shfl_write {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to write to.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value64, in:
- * Offset to write to.
- */
- struct vmmdev_hgcm_function_parameter offset;
-
- /**
- * value64, in/out:
- * Bytes to write/How many were written.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, in:
- * Data to write.
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_WRITE (5)
-
-
-/*
- * SHFL_FN_LIST
- * Listing information includes variable length RTDIRENTRY[EX] structures.
- */
-
-#define SHFL_LIST_NONE 0
-#define SHFL_LIST_RETURN_ONE 1
-
-/** SHFL_FN_LIST Parameters structure. */
-struct shfl_list {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to be listed.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value32, in:
- * List flags SHFL_LIST_*.
- */
- struct vmmdev_hgcm_function_parameter flags;
-
- /**
- * value32, in/out:
- * Bytes to be used for listing information/How many bytes were used.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, in/optional
- * Points to struct shfl_string buffer that specifies a search path.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, out:
- * Buffer to place listing information to. (struct shfl_dirinfo)
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
- /**
- * value32, in/out:
- * Indicates a key where the listing must be resumed.
- * in: 0 means start from begin of object.
- * out: 0 means listing completed.
- */
- struct vmmdev_hgcm_function_parameter resume_point;
-
- /**
- * pointer, out:
- * Number of files returned
- */
- struct vmmdev_hgcm_function_parameter file_count;
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_LIST (8)
-
-
-/** SHFL_FN_READLINK Parameters structure. */
-struct shfl_readLink {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * pointer, out:
- * Buffer to place data to.
- */
- struct vmmdev_hgcm_function_parameter buffer;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_READLINK (3)
-
-
-/* SHFL_FN_INFORMATION */
-
-/* Mask of Set/Get bit. */
-#define SHFL_INFO_MODE_MASK (0x1)
-/* Get information */
-#define SHFL_INFO_GET (0x0)
-/* Set information */
-#define SHFL_INFO_SET (0x1)
-
-/* Get name of the object. */
-#define SHFL_INFO_NAME (0x2)
-/* Set size of object (extend/trucate); only applies to file objects */
-#define SHFL_INFO_SIZE (0x4)
-/* Get/Set file object info. */
-#define SHFL_INFO_FILE (0x8)
-/* Get volume information. */
-#define SHFL_INFO_VOLUME (0x10)
-
-/** SHFL_FN_INFORMATION Parameters structure. */
-struct shfl_information {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * value64, in:
- * SHFLHANDLE (u64) of object to be listed.
- */
- struct vmmdev_hgcm_function_parameter handle;
-
- /**
- * value32, in:
- * SHFL_INFO_*
- */
- struct vmmdev_hgcm_function_parameter flags;
-
- /**
- * value32, in/out:
- * Bytes to be used for information/How many bytes were used.
- */
- struct vmmdev_hgcm_function_parameter cb;
-
- /**
- * pointer, in/out:
- * Information to be set/get (shfl_fsobjinfo or shfl_string). Do not
- * forget to set the shfl_fsobjinfo::attr::additional for a get
- * operation as well.
- */
- struct vmmdev_hgcm_function_parameter info;
-
-};
-
-/* Number of parameters */
-#define SHFL_CPARMS_INFORMATION (5)
-
-
-/* SHFL_FN_REMOVE */
-
-#define SHFL_REMOVE_FILE (0x1)
-#define SHFL_REMOVE_DIR (0x2)
-#define SHFL_REMOVE_SYMLINK (0x4)
-
-/** SHFL_FN_REMOVE Parameters structure. */
-struct shfl_remove {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string buffer.
- */
- struct vmmdev_hgcm_function_parameter path;
-
- /**
- * value32, in:
- * remove flags (file/directory)
- */
- struct vmmdev_hgcm_function_parameter flags;
-
-};
-
-#define SHFL_CPARMS_REMOVE (3)
-
-
-/* SHFL_FN_RENAME */
-
-#define SHFL_RENAME_FILE (0x1)
-#define SHFL_RENAME_DIR (0x2)
-#define SHFL_RENAME_REPLACE_IF_EXISTS (0x4)
-
-/** SHFL_FN_RENAME Parameters structure. */
-struct shfl_rename {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string src.
- */
- struct vmmdev_hgcm_function_parameter src;
-
- /**
- * pointer, in:
- * Points to struct shfl_string dest.
- */
- struct vmmdev_hgcm_function_parameter dest;
-
- /**
- * value32, in:
- * rename flags (file/directory)
- */
- struct vmmdev_hgcm_function_parameter flags;
-
-};
-
-#define SHFL_CPARMS_RENAME (4)
-
-
-/** SHFL_FN_SYMLINK Parameters structure. */
-struct shfl_symlink {
- /**
- * pointer, in: SHFLROOT (u32)
- * Root handle of the mapping which name is queried.
- */
- struct vmmdev_hgcm_function_parameter root;
-
- /**
- * pointer, in:
- * Points to struct shfl_string of path for the new symlink.
- */
- struct vmmdev_hgcm_function_parameter new_path;
-
- /**
- * pointer, in:
- * Points to struct shfl_string of destination for symlink.
- */
- struct vmmdev_hgcm_function_parameter old_path;
-
- /**
- * pointer, out:
- * Information about created symlink.
- */
- struct vmmdev_hgcm_function_parameter info;
-
-};
-
-#define SHFL_CPARMS_SYMLINK (4)
-
-#endif
diff --git a/drivers/staging/vboxsf/super.c b/drivers/staging/vboxsf/super.c
deleted file mode 100644
index 0bf4d724aefd..000000000000
--- a/drivers/staging/vboxsf/super.c
+++ /dev/null
@@ -1,501 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Virtual File System.
- *
- * Module initialization/finalization
- * File system registration/deregistration
- * Superblock reading
- * Few utility functions
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/idr.h>
-#include <linux/fs_parser.h>
-#include <linux/magic.h>
-#include <linux/module.h>
-#include <linux/nls.h>
-#include <linux/statfs.h>
-#include <linux/vbox_utils.h>
-#include "vfsmod.h"
-
-#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
-
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
-
-static int follow_symlinks;
-module_param(follow_symlinks, int, 0444);
-MODULE_PARM_DESC(follow_symlinks,
- "Let host resolve symlinks rather than showing them");
-
-static DEFINE_IDA(vboxsf_bdi_ida);
-static DEFINE_MUTEX(vboxsf_setup_mutex);
-static bool vboxsf_setup_done;
-static struct super_operations vboxsf_super_ops; /* forward declaration */
-static struct kmem_cache *vboxsf_inode_cachep;
-
-static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
-
-enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
- opt_dmask, opt_fmask };
-
-static const struct fs_parameter_spec vboxsf_param_specs[] = {
- fsparam_string ("nls", opt_nls),
- fsparam_u32 ("uid", opt_uid),
- fsparam_u32 ("gid", opt_gid),
- fsparam_u32 ("ttl", opt_ttl),
- fsparam_u32oct ("dmode", opt_dmode),
- fsparam_u32oct ("fmode", opt_fmode),
- fsparam_u32oct ("dmask", opt_dmask),
- fsparam_u32oct ("fmask", opt_fmask),
- {}
-};
-
-static const struct fs_parameter_description vboxsf_fs_parameters = {
- .name = "vboxsf",
- .specs = vboxsf_param_specs,
-};
-
-static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
-{
- struct vboxsf_fs_context *ctx = fc->fs_private;
- struct fs_parse_result result;
- kuid_t uid;
- kgid_t gid;
- int opt;
-
- opt = fs_parse(fc, &vboxsf_fs_parameters, param, &result);
- if (opt < 0)
- return opt;
-
- switch (opt) {
- case opt_nls:
- if (fc->purpose != FS_CONTEXT_FOR_MOUNT) {
- vbg_err("vboxsf: Cannot reconfigure nls option\n");
- return -EINVAL;
- }
- ctx->nls_name = param->string;
- param->string = NULL;
- break;
- case opt_uid:
- uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(uid))
- return -EINVAL;
- ctx->o.uid = uid;
- break;
- case opt_gid:
- gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(gid))
- return -EINVAL;
- ctx->o.gid = gid;
- break;
- case opt_ttl:
- ctx->o.ttl = msecs_to_jiffies(result.uint_32);
- break;
- case opt_dmode:
- if (result.uint_32 & ~0777)
- return -EINVAL;
- ctx->o.dmode = result.uint_32;
- ctx->o.dmode_set = true;
- break;
- case opt_fmode:
- if (result.uint_32 & ~0777)
- return -EINVAL;
- ctx->o.fmode = result.uint_32;
- ctx->o.fmode_set = true;
- break;
- case opt_dmask:
- if (result.uint_32 & ~07777)
- return -EINVAL;
- ctx->o.dmask = result.uint_32;
- break;
- case opt_fmask:
- if (result.uint_32 & ~07777)
- return -EINVAL;
- ctx->o.fmask = result.uint_32;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
-{
- struct vboxsf_fs_context *ctx = fc->fs_private;
- struct shfl_string *folder_name, root_path;
- struct vboxsf_sbi *sbi;
- struct dentry *droot;
- struct inode *iroot;
- char *nls_name;
- size_t size;
- int err;
-
- if (!fc->source)
- return -EINVAL;
-
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sbi->o = ctx->o;
- idr_init(&sbi->ino_idr);
- spin_lock_init(&sbi->ino_idr_lock);
- sbi->next_generation = 1;
- sbi->bdi_id = -1;
-
- /* Load nls if not utf8 */
- nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
- if (strcmp(nls_name, "utf8") != 0) {
- if (nls_name == vboxsf_default_nls)
- sbi->nls = load_nls_default();
- else
- sbi->nls = load_nls(nls_name);
-
- if (!sbi->nls) {
- vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
- err = -EINVAL;
- goto fail_free;
- }
- }
-
- sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
- if (sbi->bdi_id < 0) {
- err = sbi->bdi_id;
- goto fail_free;
- }
-
- err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id);
- if (err)
- goto fail_free;
-
- /* Turn source into a shfl_string and map the folder */
- size = strlen(fc->source) + 1;
- folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
- if (!folder_name) {
- err = -ENOMEM;
- goto fail_free;
- }
- folder_name->size = size;
- folder_name->length = size - 1;
- strlcpy(folder_name->string.utf8, fc->source, size);
- err = vboxsf_map_folder(folder_name, &sbi->root);
- kfree(folder_name);
- if (err) {
- vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
- fc->source, err);
- goto fail_free;
- }
-
- root_path.length = 1;
- root_path.size = 2;
- root_path.string.utf8[0] = '/';
- root_path.string.utf8[1] = 0;
- err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
- if (err)
- goto fail_unmap;
-
- sb->s_magic = VBOXSF_SUPER_MAGIC;
- sb->s_blocksize = 1024;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_op = &vboxsf_super_ops;
- sb->s_d_op = &vboxsf_dentry_ops;
-
- iroot = iget_locked(sb, 0);
- if (!iroot) {
- err = -ENOMEM;
- goto fail_unmap;
- }
- vboxsf_init_inode(sbi, iroot, &sbi->root_info);
- unlock_new_inode(iroot);
-
- droot = d_make_root(iroot);
- if (!droot) {
- err = -ENOMEM;
- goto fail_unmap;
- }
-
- sb->s_root = droot;
- sb->s_fs_info = sbi;
- return 0;
-
-fail_unmap:
- vboxsf_unmap_folder(sbi->root);
-fail_free:
- if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
- if (sbi->nls)
- unload_nls(sbi->nls);
- idr_destroy(&sbi->ino_idr);
- kfree(sbi);
- return err;
-}
-
-static void vboxsf_inode_init_once(void *data)
-{
- struct vboxsf_inode *sf_i = data;
-
- mutex_init(&sf_i->handle_list_mutex);
- inode_init_once(&sf_i->vfs_inode);
-}
-
-static struct inode *vboxsf_alloc_inode(struct super_block *sb)
-{
- struct vboxsf_inode *sf_i;
-
- sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS);
- if (!sf_i)
- return NULL;
-
- sf_i->force_restat = 0;
- INIT_LIST_HEAD(&sf_i->handle_list);
-
- return &sf_i->vfs_inode;
-}
-
-static void vboxsf_free_inode(struct inode *inode)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
- unsigned long flags;
-
- spin_lock_irqsave(&sbi->ino_idr_lock, flags);
- idr_remove(&sbi->ino_idr, inode->i_ino);
- spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
- kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
-}
-
-static void vboxsf_put_super(struct super_block *sb)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
-
- vboxsf_unmap_folder(sbi->root);
- if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
- if (sbi->nls)
- unload_nls(sbi->nls);
-
- /*
- * vboxsf_free_inode uses the idr, make sure all delayed rcu free
- * inodes are flushed.
- */
- rcu_barrier();
- idr_destroy(&sbi->ino_idr);
- kfree(sbi);
-}
-
-static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
-{
- struct super_block *sb = dentry->d_sb;
- struct shfl_volinfo shfl_volinfo;
- struct vboxsf_sbi *sbi;
- u32 buf_len;
- int err;
-
- sbi = VBOXSF_SBI(sb);
- buf_len = sizeof(shfl_volinfo);
- err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
- &buf_len, &shfl_volinfo);
- if (err)
- return err;
-
- stat->f_type = VBOXSF_SUPER_MAGIC;
- stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
-
- do_div(shfl_volinfo.total_allocation_bytes,
- shfl_volinfo.bytes_per_allocation_unit);
- stat->f_blocks = shfl_volinfo.total_allocation_bytes;
-
- do_div(shfl_volinfo.available_allocation_bytes,
- shfl_volinfo.bytes_per_allocation_unit);
- stat->f_bfree = shfl_volinfo.available_allocation_bytes;
- stat->f_bavail = shfl_volinfo.available_allocation_bytes;
-
- stat->f_files = 1000;
- /*
- * Don't return 0 here since the guest may then think that it is not
- * possible to create any more files.
- */
- stat->f_ffree = 1000000;
- stat->f_fsid.val[0] = 0;
- stat->f_fsid.val[1] = 0;
- stat->f_namelen = 255;
- return 0;
-}
-
-static struct super_operations vboxsf_super_ops = {
- .alloc_inode = vboxsf_alloc_inode,
- .free_inode = vboxsf_free_inode,
- .put_super = vboxsf_put_super,
- .statfs = vboxsf_statfs,
-};
-
-static int vboxsf_setup(void)
-{
- int err;
-
- mutex_lock(&vboxsf_setup_mutex);
-
- if (vboxsf_setup_done)
- goto success;
-
- vboxsf_inode_cachep =
- kmem_cache_create("vboxsf_inode_cache",
- sizeof(struct vboxsf_inode), 0,
- (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
- SLAB_ACCOUNT),
- vboxsf_inode_init_once);
- if (!vboxsf_inode_cachep) {
- err = -ENOMEM;
- goto fail_nomem;
- }
-
- err = vboxsf_connect();
- if (err) {
- vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
- vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
- vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
- goto fail_free_cache;
- }
-
- err = vboxsf_set_utf8();
- if (err) {
- vbg_err("vboxsf_setutf8 error %d\n", err);
- goto fail_disconnect;
- }
-
- if (!follow_symlinks) {
- err = vboxsf_set_symlinks();
- if (err)
- vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
- }
-
- vboxsf_setup_done = true;
-success:
- mutex_unlock(&vboxsf_setup_mutex);
- return 0;
-
-fail_disconnect:
- vboxsf_disconnect();
-fail_free_cache:
- kmem_cache_destroy(vboxsf_inode_cachep);
-fail_nomem:
- mutex_unlock(&vboxsf_setup_mutex);
- return err;
-}
-
-static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
-{
- char *options = data;
-
- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
- vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
- return -EINVAL;
- }
-
- return generic_parse_monolithic(fc, data);
-}
-
-static int vboxsf_get_tree(struct fs_context *fc)
-{
- int err;
-
- err = vboxsf_setup();
- if (err)
- return err;
-
- return vfs_get_super(fc, vfs_get_independent_super, vboxsf_fill_super);
-}
-
-static int vboxsf_reconfigure(struct fs_context *fc)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
- struct vboxsf_fs_context *ctx = fc->fs_private;
- struct inode *iroot;
-
- iroot = ilookup(fc->root->d_sb, 0);
- if (!iroot)
- return -ENOENT;
-
- /* Apply changed options to the root inode */
- sbi->o = ctx->o;
- vboxsf_init_inode(sbi, iroot, &sbi->root_info);
-
- return 0;
-}
-
-static void vboxsf_free_fc(struct fs_context *fc)
-{
- struct vboxsf_fs_context *ctx = fc->fs_private;
-
- kfree(ctx->nls_name);
- kfree(ctx);
-}
-
-static const struct fs_context_operations vboxsf_context_ops = {
- .free = vboxsf_free_fc,
- .parse_param = vboxsf_parse_param,
- .parse_monolithic = vboxsf_parse_monolithic,
- .get_tree = vboxsf_get_tree,
- .reconfigure = vboxsf_reconfigure,
-};
-
-static int vboxsf_init_fs_context(struct fs_context *fc)
-{
- struct vboxsf_fs_context *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- current_uid_gid(&ctx->o.uid, &ctx->o.gid);
-
- fc->fs_private = ctx;
- fc->ops = &vboxsf_context_ops;
- return 0;
-}
-
-static struct file_system_type vboxsf_fs_type = {
- .owner = THIS_MODULE,
- .name = "vboxsf",
- .init_fs_context = vboxsf_init_fs_context,
- .parameters = &vboxsf_fs_parameters,
- .kill_sb = kill_anon_super
-};
-
-/* Module initialization/finalization handlers */
-static int __init vboxsf_init(void)
-{
- return register_filesystem(&vboxsf_fs_type);
-}
-
-static void __exit vboxsf_fini(void)
-{
- unregister_filesystem(&vboxsf_fs_type);
-
- mutex_lock(&vboxsf_setup_mutex);
- if (vboxsf_setup_done) {
- vboxsf_disconnect();
- /*
- * Make sure all delayed rcu free inodes are flushed
- * before we destroy the cache.
- */
- rcu_barrier();
- kmem_cache_destroy(vboxsf_inode_cachep);
- }
- mutex_unlock(&vboxsf_setup_mutex);
-}
-
-module_init(vboxsf_init);
-module_exit(vboxsf_fini);
-
-MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
-MODULE_AUTHOR("Oracle Corporation");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_FS("vboxsf");
diff --git a/drivers/staging/vboxsf/utils.c b/drivers/staging/vboxsf/utils.c
deleted file mode 100644
index 34a49e6f74fc..000000000000
--- a/drivers/staging/vboxsf/utils.c
+++ /dev/null
@@ -1,551 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * VirtualBox Guest Shared Folders support: Utility functions.
- * Mainly conversion from/to VirtualBox/Linux data structures.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/namei.h>
-#include <linux/nls.h>
-#include <linux/sizes.h>
-#include <linux/vfs.h>
-#include "vfsmod.h"
-
-struct inode *vboxsf_new_inode(struct super_block *sb)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
- struct inode *inode;
- unsigned long flags;
- int cursor, ret;
- u32 gen;
-
- inode = new_inode(sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&sbi->ino_idr_lock, flags);
- cursor = idr_get_cursor(&sbi->ino_idr);
- ret = idr_alloc_cyclic(&sbi->ino_idr, inode, 1, 0, GFP_ATOMIC);
- if (ret >= 0 && ret < cursor)
- sbi->next_generation++;
- gen = sbi->next_generation;
- spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
- idr_preload_end();
-
- if (ret < 0) {
- iput(inode);
- return ERR_PTR(ret);
- }
-
- inode->i_ino = ret;
- inode->i_generation = gen;
- return inode;
-}
-
-/* set [inode] attributes based on [info], uid/gid based on [sbi] */
-void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
- const struct shfl_fsobjinfo *info)
-{
- const struct shfl_fsobjattr *attr;
- s64 allocated;
- int mode;
-
- attr = &info->attr;
-
-#define mode_set(r) ((attr->mode & (SHFL_UNIX_##r)) ? (S_##r) : 0)
-
- mode = mode_set(IRUSR);
- mode |= mode_set(IWUSR);
- mode |= mode_set(IXUSR);
-
- mode |= mode_set(IRGRP);
- mode |= mode_set(IWGRP);
- mode |= mode_set(IXGRP);
-
- mode |= mode_set(IROTH);
- mode |= mode_set(IWOTH);
- mode |= mode_set(IXOTH);
-
-#undef mode_set
-
- /* We use the host-side values for these */
- inode->i_flags |= S_NOATIME | S_NOCMTIME;
- inode->i_mapping->a_ops = &vboxsf_reg_aops;
-
- if (SHFL_IS_DIRECTORY(attr->mode)) {
- inode->i_mode = sbi->o.dmode_set ? sbi->o.dmode : mode;
- inode->i_mode &= ~sbi->o.dmask;
- inode->i_mode |= S_IFDIR;
- inode->i_op = &vboxsf_dir_iops;
- inode->i_fop = &vboxsf_dir_fops;
- /*
- * XXX: this probably should be set to the number of entries
- * in the directory plus two (. ..)
- */
- set_nlink(inode, 1);
- } else if (SHFL_IS_SYMLINK(attr->mode)) {
- inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
- inode->i_mode &= ~sbi->o.fmask;
- inode->i_mode |= S_IFLNK;
- inode->i_op = &vboxsf_lnk_iops;
- set_nlink(inode, 1);
- } else {
- inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
- inode->i_mode &= ~sbi->o.fmask;
- inode->i_mode |= S_IFREG;
- inode->i_op = &vboxsf_reg_iops;
- inode->i_fop = &vboxsf_reg_fops;
- set_nlink(inode, 1);
- }
-
- inode->i_uid = sbi->o.uid;
- inode->i_gid = sbi->o.gid;
-
- inode->i_size = info->size;
- inode->i_blkbits = 12;
- /* i_blocks always in units of 512 bytes! */
- allocated = info->allocated + 511;
- do_div(allocated, 512);
- inode->i_blocks = allocated;
-
- inode->i_atime = ns_to_timespec64(
- info->access_time.ns_relative_to_unix_epoch);
- inode->i_ctime = ns_to_timespec64(
- info->change_time.ns_relative_to_unix_epoch);
- inode->i_mtime = ns_to_timespec64(
- info->modification_time.ns_relative_to_unix_epoch);
-}
-
-int vboxsf_create_at_dentry(struct dentry *dentry,
- struct shfl_createparms *params)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
- struct shfl_string *path;
- int err;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- err = vboxsf_create(sbi->root, path, params);
- __putname(path);
-
- return err;
-}
-
-int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
- struct shfl_fsobjinfo *info)
-{
- struct shfl_createparms params = {};
- int err;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
-
- err = vboxsf_create(sbi->root, path, &params);
- if (err)
- return err;
-
- if (params.result != SHFL_FILE_EXISTS)
- return -ENOENT;
-
- if (info)
- *info = params.info;
-
- return 0;
-}
-
-int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info)
-{
- struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
- struct shfl_string *path;
- int err;
-
- path = vboxsf_path_from_dentry(sbi, dentry);
- if (IS_ERR(path))
- return PTR_ERR(path);
-
- err = vboxsf_stat(sbi, path, info);
- __putname(path);
- return err;
-}
-
-int vboxsf_inode_revalidate(struct dentry *dentry)
-{
- struct vboxsf_sbi *sbi;
- struct vboxsf_inode *sf_i;
- struct shfl_fsobjinfo info;
- struct timespec64 prev_mtime;
- struct inode *inode;
- int err;
-
- if (!dentry || !d_really_is_positive(dentry))
- return -EINVAL;
-
- inode = d_inode(dentry);
- prev_mtime = inode->i_mtime;
- sf_i = VBOXSF_I(inode);
- sbi = VBOXSF_SBI(dentry->d_sb);
- if (!sf_i->force_restat) {
- if (time_before(jiffies, dentry->d_time + sbi->o.ttl))
- return 0;
- }
-
- err = vboxsf_stat_dentry(dentry, &info);
- if (err)
- return err;
-
- dentry->d_time = jiffies;
- sf_i->force_restat = 0;
- vboxsf_init_inode(sbi, inode, &info);
-
- /*
- * If the file was changed on the host side we need to invalidate the
- * page-cache for it. Note this also gets triggered by our own writes,
- * this is unavoidable.
- */
- if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
- invalidate_inode_pages2(inode->i_mapping);
-
- return 0;
-}
-
-int vboxsf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int flags)
-{
- int err;
- struct dentry *dentry = path->dentry;
- struct inode *inode = d_inode(dentry);
- struct vboxsf_inode *sf_i = VBOXSF_I(inode);
-
- switch (flags & AT_STATX_SYNC_TYPE) {
- case AT_STATX_DONT_SYNC:
- err = 0;
- break;
- case AT_STATX_FORCE_SYNC:
- sf_i->force_restat = 1;
- /* fall-through */
- default:
- err = vboxsf_inode_revalidate(dentry);
- }
- if (err)
- return err;
-
- generic_fillattr(d_inode(dentry), kstat);
- return 0;
-}
-
-int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr)
-{
- struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry));
- struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
- struct shfl_createparms params = {};
- struct shfl_fsobjinfo info = {};
- u32 buf_len;
- int err;
-
- params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_ACT_OPEN_IF_EXISTS |
- SHFL_CF_ACT_FAIL_IF_NEW |
- SHFL_CF_ACCESS_ATTR_WRITE;
-
- /* this is at least required for Posix hosts */
- if (iattr->ia_valid & ATTR_SIZE)
- params.create_flags |= SHFL_CF_ACCESS_WRITE;
-
- err = vboxsf_create_at_dentry(dentry, &params);
- if (err || params.result != SHFL_FILE_EXISTS)
- return err ? err : -ENOENT;
-
-#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0)
-
- /*
- * Setting the file size and setting the other attributes has to
- * be handled separately.
- */
- if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
- if (iattr->ia_valid & ATTR_MODE) {
- info.attr.mode = mode_set(IRUSR);
- info.attr.mode |= mode_set(IWUSR);
- info.attr.mode |= mode_set(IXUSR);
- info.attr.mode |= mode_set(IRGRP);
- info.attr.mode |= mode_set(IWGRP);
- info.attr.mode |= mode_set(IXGRP);
- info.attr.mode |= mode_set(IROTH);
- info.attr.mode |= mode_set(IWOTH);
- info.attr.mode |= mode_set(IXOTH);
-
- if (iattr->ia_mode & S_IFDIR)
- info.attr.mode |= SHFL_TYPE_DIRECTORY;
- else
- info.attr.mode |= SHFL_TYPE_FILE;
- }
-
- if (iattr->ia_valid & ATTR_ATIME)
- info.access_time.ns_relative_to_unix_epoch =
- timespec64_to_ns(&iattr->ia_atime);
-
- if (iattr->ia_valid & ATTR_MTIME)
- info.modification_time.ns_relative_to_unix_epoch =
- timespec64_to_ns(&iattr->ia_mtime);
-
- /*
- * Ignore ctime (inode change time) as it can't be set
- * from userland anyway.
- */
-
- buf_len = sizeof(info);
- err = vboxsf_fsinfo(sbi->root, params.handle,
- SHFL_INFO_SET | SHFL_INFO_FILE, &buf_len,
- &info);
- if (err) {
- vboxsf_close(sbi->root, params.handle);
- return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
- }
-
-#undef mode_set
-
- if (iattr->ia_valid & ATTR_SIZE) {
- memset(&info, 0, sizeof(info));
- info.size = iattr->ia_size;
- buf_len = sizeof(info);
- err = vboxsf_fsinfo(sbi->root, params.handle,
- SHFL_INFO_SET | SHFL_INFO_SIZE, &buf_len,
- &info);
- if (err) {
- vboxsf_close(sbi->root, params.handle);
- return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
- }
-
- vboxsf_close(sbi->root, params.handle);
-
- /* Update the inode with what the host has actually given us. */
- if (sf_i->force_restat)
- vboxsf_inode_revalidate(dentry);
-
- return 0;
-}
-
-/*
- * [dentry] contains string encoded in coding system that corresponds
- * to [sbi]->nls, we must convert it to UTF8 here.
- * Returns a shfl_string allocated through __getname (must be freed using
- * __putname), or an ERR_PTR on error.
- */
-struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
- struct dentry *dentry)
-{
- struct shfl_string *shfl_path;
- int path_len, out_len, nb;
- char *buf, *path;
- wchar_t uni;
- u8 *out;
-
- buf = __getname();
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- path = dentry_path_raw(dentry, buf, PATH_MAX);
- if (IS_ERR(path)) {
- __putname(buf);
- return (struct shfl_string *)path;
- }
- path_len = strlen(path);
-
- if (sbi->nls) {
- shfl_path = __getname();
- if (!shfl_path) {
- __putname(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- out = shfl_path->string.utf8;
- out_len = PATH_MAX - SHFLSTRING_HEADER_SIZE - 1;
-
- while (path_len) {
- nb = sbi->nls->char2uni(path, path_len, &uni);
- if (nb < 0) {
- __putname(shfl_path);
- __putname(buf);
- return ERR_PTR(-EINVAL);
- }
- path += nb;
- path_len -= nb;
-
- nb = utf32_to_utf8(uni, out, out_len);
- if (nb < 0) {
- __putname(shfl_path);
- __putname(buf);
- return ERR_PTR(-ENAMETOOLONG);
- }
- out += nb;
- out_len -= nb;
- }
- *out = 0;
- shfl_path->length = out - shfl_path->string.utf8;
- shfl_path->size = shfl_path->length + 1;
- __putname(buf);
- } else {
- if ((SHFLSTRING_HEADER_SIZE + path_len + 1) > PATH_MAX) {
- __putname(buf);
- return ERR_PTR(-ENAMETOOLONG);
- }
- /*
- * dentry_path stores the name at the end of buf, but the
- * shfl_string string we return must be properly aligned.
- */
- shfl_path = (struct shfl_string *)buf;
- memmove(shfl_path->string.utf8, path, path_len);
- shfl_path->string.utf8[path_len] = 0;
- shfl_path->length = path_len;
- shfl_path->size = path_len + 1;
- }
-
- return shfl_path;
-}
-
-int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
- const unsigned char *utf8_name, size_t utf8_len)
-{
- const char *in;
- char *out;
- size_t out_len;
- size_t out_bound_len;
- size_t in_bound_len;
-
- in = utf8_name;
- in_bound_len = utf8_len;
-
- out = name;
- out_len = 0;
- /* Reserve space for terminating 0 */
- out_bound_len = name_bound_len - 1;
-
- while (in_bound_len) {
- int nb;
- unicode_t uni;
-
- nb = utf8_to_utf32(in, in_bound_len, &uni);
- if (nb < 0)
- return -EINVAL;
-
- in += nb;
- in_bound_len -= nb;
-
- nb = sbi->nls->uni2char(uni, out, out_bound_len);
- if (nb < 0)
- return nb;
-
- out += nb;
- out_bound_len -= nb;
- out_len += nb;
- }
-
- *out = 0;
-
- return 0;
-}
-
-static struct vboxsf_dir_buf *vboxsf_dir_buf_alloc(struct list_head *list)
-{
- struct vboxsf_dir_buf *b;
-
- b = kmalloc(sizeof(*b), GFP_KERNEL);
- if (!b)
- return NULL;
-
- b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
- if (!b->buf) {
- kfree(b);
- return NULL;
- }
-
- b->entries = 0;
- b->used = 0;
- b->free = DIR_BUFFER_SIZE;
- list_add(&b->head, list);
-
- return b;
-}
-
-static void vboxsf_dir_buf_free(struct vboxsf_dir_buf *b)
-{
- list_del(&b->head);
- kfree(b->buf);
- kfree(b);
-}
-
-struct vboxsf_dir_info *vboxsf_dir_info_alloc(void)
-{
- struct vboxsf_dir_info *p;
-
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return NULL;
-
- INIT_LIST_HEAD(&p->info_list);
- return p;
-}
-
-void vboxsf_dir_info_free(struct vboxsf_dir_info *p)
-{
- struct list_head *list, *pos, *tmp;
-
- list = &p->info_list;
- list_for_each_safe(pos, tmp, list) {
- struct vboxsf_dir_buf *b;
-
- b = list_entry(pos, struct vboxsf_dir_buf, head);
- vboxsf_dir_buf_free(b);
- }
- kfree(p);
-}
-
-int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
- u64 handle)
-{
- struct vboxsf_dir_buf *b;
- u32 entries, size;
- int err = 0;
- void *buf;
-
- /* vboxsf_dirinfo returns 1 on end of dir */
- while (err == 0) {
- b = vboxsf_dir_buf_alloc(&sf_d->info_list);
- if (!b) {
- err = -ENOMEM;
- break;
- }
-
- buf = b->buf;
- size = b->free;
-
- err = vboxsf_dirinfo(sbi->root, handle, NULL, 0, 0,
- &size, buf, &entries);
- if (err < 0)
- break;
-
- b->entries += entries;
- b->free -= size;
- b->used += size;
- }
-
- if (b && b->used == 0)
- vboxsf_dir_buf_free(b);
-
- /* -EILSEQ means the host could not translate a filename, ignore */
- if (err > 0 || err == -EILSEQ)
- err = 0;
-
- return err;
-}
diff --git a/drivers/staging/vboxsf/vboxsf_wrappers.c b/drivers/staging/vboxsf/vboxsf_wrappers.c
deleted file mode 100644
index bfc78a097dae..000000000000
--- a/drivers/staging/vboxsf/vboxsf_wrappers.c
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Wrapper functions for the shfl host calls.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vbox_err.h>
-#include <linux/vbox_utils.h>
-#include "vfsmod.h"
-
-#define SHFL_REQUEST \
- (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER | \
- VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
-
-static u32 vboxsf_client_id;
-
-int vboxsf_connect(void)
-{
- struct vbg_dev *gdev;
- struct vmmdev_hgcm_service_location loc;
- int err, vbox_status;
-
- loc.type = VMMDEV_HGCM_LOC_LOCALHOST_EXISTING;
- strcpy(loc.u.localhost.service_name, "VBoxSharedFolders");
-
- gdev = vbg_get_gdev();
- if (IS_ERR(gdev))
- return -ENODEV; /* No guest-device */
-
- err = vbg_hgcm_connect(gdev, SHFL_REQUEST, &loc,
- &vboxsf_client_id, &vbox_status);
- vbg_put_gdev(gdev);
-
- return err ? err : vbg_status_code_to_errno(vbox_status);
-}
-
-void vboxsf_disconnect(void)
-{
- struct vbg_dev *gdev;
- int vbox_status;
-
- gdev = vbg_get_gdev();
- if (IS_ERR(gdev))
- return; /* guest-device is gone, already disconnected */
-
- vbg_hgcm_disconnect(gdev, SHFL_REQUEST, vboxsf_client_id, &vbox_status);
- vbg_put_gdev(gdev);
-}
-
-static int vboxsf_call(u32 function, void *parms, u32 parm_count, int *status)
-{
- struct vbg_dev *gdev;
- int err, vbox_status;
-
- gdev = vbg_get_gdev();
- if (IS_ERR(gdev))
- return -ESHUTDOWN; /* guest-dev removed underneath us */
-
- err = vbg_hgcm_call(gdev, SHFL_REQUEST, vboxsf_client_id, function,
- U32_MAX, parms, parm_count, &vbox_status);
- vbg_put_gdev(gdev);
-
- if (err < 0)
- return err;
-
- if (status)
- *status = vbox_status;
-
- return vbg_status_code_to_errno(vbox_status);
-}
-
-int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root)
-{
- struct shfl_map_folder parms;
- int err, status;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.path.u.pointer.size = shfl_string_buf_size(folder_name);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)folder_name;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = 0;
-
- parms.delimiter.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.delimiter.u.value32 = '/';
-
- parms.case_sensitive.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.case_sensitive.u.value32 = 1;
-
- err = vboxsf_call(SHFL_FN_MAP_FOLDER, &parms, SHFL_CPARMS_MAP_FOLDER,
- &status);
- if (err == -ENOSYS && status == VERR_NOT_IMPLEMENTED)
- vbg_err("%s: Error host is too old\n", __func__);
-
- *root = parms.root.u.value32;
- return err;
-}
-
-int vboxsf_unmap_folder(u32 root)
-{
- struct shfl_unmap_folder parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- return vboxsf_call(SHFL_FN_UNMAP_FOLDER, &parms,
- SHFL_CPARMS_UNMAP_FOLDER, NULL);
-}
-
-/**
- * vboxsf_create - Create a new file or folder
- * @root: Root of the shared folder in which to create the file
- * @parsed_path: The path of the file or folder relative to the shared folder
- * @param: create_parms Parameters for file/folder creation.
- *
- * Create a new file or folder or open an existing one in a shared folder.
- * Note this function always returns 0 / success unless an exceptional condition
- * occurs - out of memory, invalid arguments, etc. If the file or folder could
- * not be opened or created, create_parms->handle will be set to
- * SHFL_HANDLE_NIL on return. In this case the value in create_parms->result
- * provides information as to why (e.g. SHFL_FILE_EXISTS), create_parms->result
- * is also set on success as additional information.
- *
- * Returns:
- * 0 or negative errno value.
- */
-int vboxsf_create(u32 root, struct shfl_string *parsed_path,
- struct shfl_createparms *create_parms)
-{
- struct shfl_create parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
-
- parms.parms.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.parms.u.pointer.size = sizeof(struct shfl_createparms);
- parms.parms.u.pointer.u.linear_addr = (uintptr_t)create_parms;
-
- return vboxsf_call(SHFL_FN_CREATE, &parms, SHFL_CPARMS_CREATE, NULL);
-}
-
-int vboxsf_close(u32 root, u64 handle)
-{
- struct shfl_close parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
-
- return vboxsf_call(SHFL_FN_CLOSE, &parms, SHFL_CPARMS_CLOSE, NULL);
-}
-
-int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags)
-{
- struct shfl_remove parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
-
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
-
- return vboxsf_call(SHFL_FN_REMOVE, &parms, SHFL_CPARMS_REMOVE, NULL);
-}
-
-int vboxsf_rename(u32 root, struct shfl_string *src_path,
- struct shfl_string *dest_path, u32 flags)
-{
- struct shfl_rename parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.src.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.src.u.pointer.size = shfl_string_buf_size(src_path);
- parms.src.u.pointer.u.linear_addr = (uintptr_t)src_path;
-
- parms.dest.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.dest.u.pointer.size = shfl_string_buf_size(dest_path);
- parms.dest.u.pointer.u.linear_addr = (uintptr_t)dest_path;
-
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
-
- return vboxsf_call(SHFL_FN_RENAME, &parms, SHFL_CPARMS_RENAME, NULL);
-}
-
-int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
-{
- struct shfl_read parms;
- int err;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.offset.u.value64 = offset;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.buffer.u.pointer.size = *buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- err = vboxsf_call(SHFL_FN_READ, &parms, SHFL_CPARMS_READ, NULL);
-
- *buf_len = parms.cb.u.value32;
- return err;
-}
-
-int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
-{
- struct shfl_write parms;
- int err;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.offset.u.value64 = offset;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.buffer.u.pointer.size = *buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- err = vboxsf_call(SHFL_FN_WRITE, &parms, SHFL_CPARMS_WRITE, NULL);
-
- *buf_len = parms.cb.u.value32;
- return err;
-}
-
-/* Returns 0 on success, 1 on end-of-dir, negative errno otherwise */
-int vboxsf_dirinfo(u32 root, u64 handle,
- struct shfl_string *parsed_path, u32 flags, u32 index,
- u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count)
-{
- struct shfl_list parms;
- int err, status;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- if (parsed_path) {
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
- } else {
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_IN;
- parms.path.u.pointer.size = 0;
- parms.path.u.pointer.u.linear_addr = 0;
- }
-
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.buffer.u.pointer.size = *buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- parms.resume_point.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.resume_point.u.value32 = index;
- parms.file_count.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.file_count.u.value32 = 0; /* out parameter only */
-
- err = vboxsf_call(SHFL_FN_LIST, &parms, SHFL_CPARMS_LIST, &status);
- if (err == -ENODATA && status == VERR_NO_MORE_FILES)
- err = 1;
-
- *buf_len = parms.cb.u.value32;
- *file_count = parms.file_count.u.value32;
- return err;
-}
-
-int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
- u32 *buf_len, void *buf)
-{
- struct shfl_information parms;
- int err;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
- parms.handle.u.value64 = handle;
- parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.flags.u.value32 = flags;
- parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.cb.u.value32 = *buf_len;
- parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
- parms.info.u.pointer.size = *buf_len;
- parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- err = vboxsf_call(SHFL_FN_INFORMATION, &parms, SHFL_CPARMS_INFORMATION,
- NULL);
-
- *buf_len = parms.cb.u.value32;
- return err;
-}
-
-int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
- u32 buf_len, u8 *buf)
-{
- struct shfl_readLink parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
- parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
-
- parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.buffer.u.pointer.size = buf_len;
- parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- return vboxsf_call(SHFL_FN_READLINK, &parms, SHFL_CPARMS_READLINK,
- NULL);
-}
-
-int vboxsf_symlink(u32 root, struct shfl_string *new_path,
- struct shfl_string *old_path, struct shfl_fsobjinfo *buf)
-{
- struct shfl_symlink parms;
-
- parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
- parms.root.u.value32 = root;
-
- parms.new_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.new_path.u.pointer.size = shfl_string_buf_size(new_path);
- parms.new_path.u.pointer.u.linear_addr = (uintptr_t)new_path;
-
- parms.old_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
- parms.old_path.u.pointer.size = shfl_string_buf_size(old_path);
- parms.old_path.u.pointer.u.linear_addr = (uintptr_t)old_path;
-
- parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
- parms.info.u.pointer.size = sizeof(struct shfl_fsobjinfo);
- parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
-
- return vboxsf_call(SHFL_FN_SYMLINK, &parms, SHFL_CPARMS_SYMLINK, NULL);
-}
-
-int vboxsf_set_utf8(void)
-{
- return vboxsf_call(SHFL_FN_SET_UTF8, NULL, 0, NULL);
-}
-
-int vboxsf_set_symlinks(void)
-{
- return vboxsf_call(SHFL_FN_SET_SYMLINKS, NULL, 0, NULL);
-}
diff --git a/drivers/staging/vboxsf/vfsmod.h b/drivers/staging/vboxsf/vfsmod.h
deleted file mode 100644
index 18f95b00fc33..000000000000
--- a/drivers/staging/vboxsf/vfsmod.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * VirtualBox Guest Shared Folders support: module header.
- *
- * Copyright (C) 2006-2018 Oracle Corporation
- */
-
-#ifndef VFSMOD_H
-#define VFSMOD_H
-
-#include <linux/backing-dev.h>
-#include <linux/idr.h>
-#include "shfl_hostintf.h"
-
-#define DIR_BUFFER_SIZE SZ_16K
-
-/* The cast is to prevent assignment of void * to pointers of arbitrary type */
-#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
-#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
-
-struct vboxsf_options {
- unsigned long ttl;
- kuid_t uid;
- kgid_t gid;
- bool dmode_set;
- bool fmode_set;
- umode_t dmode;
- umode_t fmode;
- umode_t dmask;
- umode_t fmask;
-};
-
-struct vboxsf_fs_context {
- struct vboxsf_options o;
- char *nls_name;
-};
-
-/* per-shared folder information */
-struct vboxsf_sbi {
- struct vboxsf_options o;
- struct shfl_fsobjinfo root_info;
- struct idr ino_idr;
- spinlock_t ino_idr_lock; /* This protects ino_idr */
- struct nls_table *nls;
- u32 next_generation;
- u32 root;
- int bdi_id;
-};
-
-/* per-inode information */
-struct vboxsf_inode {
- /* some information was changed, update data on next revalidate */
- int force_restat;
- /* list of open handles for this inode + lock protecting it */
- struct list_head handle_list;
- /* This mutex protects handle_list accesses */
- struct mutex handle_list_mutex;
- /* The VFS inode struct */
- struct inode vfs_inode;
-};
-
-struct vboxsf_dir_info {
- struct list_head info_list;
-};
-
-struct vboxsf_dir_buf {
- size_t entries;
- size_t free;
- size_t used;
- void *buf;
- struct list_head head;
-};
-
-/* globals */
-extern const struct inode_operations vboxsf_dir_iops;
-extern const struct inode_operations vboxsf_lnk_iops;
-extern const struct inode_operations vboxsf_reg_iops;
-extern const struct file_operations vboxsf_dir_fops;
-extern const struct file_operations vboxsf_reg_fops;
-extern const struct address_space_operations vboxsf_reg_aops;
-extern const struct dentry_operations vboxsf_dentry_ops;
-
-/* from utils.c */
-struct inode *vboxsf_new_inode(struct super_block *sb);
-void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
- const struct shfl_fsobjinfo *info);
-int vboxsf_create_at_dentry(struct dentry *dentry,
- struct shfl_createparms *params);
-int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
- struct shfl_fsobjinfo *info);
-int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
-int vboxsf_inode_revalidate(struct dentry *dentry);
-int vboxsf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int query_flags);
-int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
-struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
- struct dentry *dentry);
-int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
- const unsigned char *utf8_name, size_t utf8_len);
-struct vboxsf_dir_info *vboxsf_dir_info_alloc(void);
-void vboxsf_dir_info_free(struct vboxsf_dir_info *p);
-int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
- u64 handle);
-
-/* from vboxsf_wrappers.c */
-int vboxsf_connect(void);
-void vboxsf_disconnect(void);
-
-int vboxsf_create(u32 root, struct shfl_string *parsed_path,
- struct shfl_createparms *create_parms);
-
-int vboxsf_close(u32 root, u64 handle);
-int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags);
-int vboxsf_rename(u32 root, struct shfl_string *src_path,
- struct shfl_string *dest_path, u32 flags);
-
-int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
-int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
-
-int vboxsf_dirinfo(u32 root, u64 handle,
- struct shfl_string *parsed_path, u32 flags, u32 index,
- u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count);
-int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
- u32 *buf_len, void *buf);
-
-int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root);
-int vboxsf_unmap_folder(u32 root);
-
-int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
- u32 buf_len, u8 *buf);
-int vboxsf_symlink(u32 root, struct shfl_string *new_path,
- struct shfl_string *old_path, struct shfl_fsobjinfo *buf);
-
-int vboxsf_set_utf8(void);
-int vboxsf_set_symlinks(void);
-
-#endif
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 8bf8e031f0bc..fdd77bb4628d 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -33,9 +33,9 @@ static int tb_port_enable_tmu(struct tb_port *port, bool enable)
* Legacy devices need to have TMU access enabled before port
* space can be fully accessed.
*/
- if (tb_switch_is_lr(sw))
+ if (tb_switch_is_light_ridge(sw))
offset = 0x26;
- else if (tb_switch_is_er(sw))
+ else if (tb_switch_is_eagle_ridge(sw))
offset = 0x2a;
else
return 0;
@@ -60,7 +60,7 @@ static void tb_port_dummy_read(struct tb_port *port)
* reading stale data on next read perform one dummy read after
* port capabilities are walked.
*/
- if (tb_switch_is_lr(port->sw)) {
+ if (tb_switch_is_light_ridge(port->sw)) {
u32 dummy;
tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2ec1af8f7968..d97813e80e5f 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -962,8 +962,8 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
+ route, space, offset);
break;
default:
@@ -988,8 +988,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
+ route, space, offset);
break;
default:
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index ee5196479854..8dd7de0cc826 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -514,17 +514,6 @@ int tb_drom_read(struct tb_switch *sw)
* no entries). Hardcode the configuration here.
*/
tb_drom_read_uid_only(sw, &sw->uid);
-
- sw->ports[1].link_nr = 0;
- sw->ports[2].link_nr = 1;
- sw->ports[1].dual_link_port = &sw->ports[2];
- sw->ports[2].dual_link_port = &sw->ports[1];
-
- sw->ports[3].link_nr = 0;
- sw->ports[4].link_nr = 1;
- sw->ports[3].dual_link_port = &sw->ports[4];
- sw->ports[4].dual_link_port = &sw->ports[3];
-
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 245588f691e7..13e88109742e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
@@ -43,6 +44,10 @@
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
+static bool start_icm;
+module_param(start_icm, bool, 0444);
+MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
+
/**
* struct icm - Internal connection manager private data
* @request_lock: Makes sure only one message is send to ICM at time
@@ -147,6 +152,17 @@ static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
return NULL;
}
+static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
+{
+ const struct intel_vss *vss;
+
+ vss = parse_intel_vss(ep_name, size);
+ if (vss)
+ return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+ return false;
+}
+
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
@@ -339,6 +355,14 @@ static void icm_veto_end(struct tb *tb)
}
}
+static bool icm_firmware_running(const struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_ICM_EN);
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -562,58 +586,42 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0;
}
-static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, const u8 *ep_name,
- size_t ep_name_size, u8 connection_id,
- u8 connection_key, u8 link, u8 depth,
- enum tb_security_level security_level,
- bool authorized, bool boot)
+static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid)
{
- const struct intel_vss *vss;
+ struct tb *tb = parent_sw->tb;
struct tb_switch *sw;
- int ret;
-
- pm_runtime_get_sync(&parent_sw->dev);
- sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
- if (IS_ERR(sw))
- goto out;
+ sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+ if (IS_ERR(sw)) {
+ tb_warn(tb, "failed to allocate switch at %llx\n", route);
+ return sw;
+ }
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
if (!sw->uuid) {
- tb_sw_warn(sw, "cannot allocate memory for switch\n");
tb_switch_put(sw);
- goto out;
+ return ERR_PTR(-ENOMEM);
}
- sw->connection_id = connection_id;
- sw->connection_key = connection_key;
- sw->link = link;
- sw->depth = depth;
- sw->authorized = authorized;
- sw->security_level = security_level;
- sw->boot = boot;
+
init_completion(&sw->rpm_complete);
+ return sw;
+}
- vss = parse_intel_vss(ep_name, ep_name_size);
- if (vss)
- sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
+{
+ u64 route = tb_route(sw);
+ int ret;
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
ret = tb_switch_add(sw);
- if (ret) {
+ if (ret)
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
- tb_switch_put(sw);
- sw = ERR_PTR(ret);
- }
-
-out:
- pm_runtime_mark_last_busy(&parent_sw->dev);
- pm_runtime_put_autosuspend(&parent_sw->dev);
- return sw;
+ return ret;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -697,11 +705,11 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
(const struct icm_fr_event_device_connected *)hdr;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
+ bool boot, dual_lane, speed_gen3;
struct icm *icm = tb_priv(tb);
bool authorized = false;
struct tb_xdomain *xd;
u8 link, depth;
- bool boot;
u64 route;
int ret;
@@ -714,6 +722,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
@@ -811,10 +821,27 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id,
- pkg->connection_key, link, depth, security_level,
- authorized, boot);
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1142,10 +1169,10 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
+ bool authorized, boot, dual_lane, speed_gen3;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
struct tb_xdomain *xd;
- bool authorized, boot;
u64 route;
icm_postpone_rescan(tb);
@@ -1163,6 +1190,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
@@ -1205,11 +1234,27 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
return;
}
- sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
- security_level, authorized, boot);
- if (!IS_ERR(sw) && force_rtd3)
- sw->rpm = true;
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = force_rtd3;
+ if (!sw->rpm)
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+ sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1349,9 +1394,12 @@ static bool icm_ar_is_supported(struct tb *tb)
/*
* Starting from Alpine Ridge we can use ICM on Apple machines
* as well. We just need to reset and re-enable it first.
+ * However, only start it if explicitly asked by the user.
*/
- if (!x86_apple_machine)
+ if (icm_firmware_running(tb->nhi))
return true;
+ if (!start_icm)
+ return false;
/*
* Find the upstream PCIe port in case we need to do reset
@@ -1704,8 +1752,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
u32 val;
/* Check if the ICM firmware is already running */
- val = ioread32(nhi->iobase + REG_FW_STS);
- if (val & REG_FW_STS_ICM_EN)
+ if (icm_firmware_running(nhi))
return 0;
dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
@@ -1893,14 +1940,12 @@ static int icm_suspend(struct tb *tb)
*/
static void icm_unplug_children(struct tb_switch *sw)
{
- unsigned int i;
+ struct tb_port *port;
if (tb_route(sw))
sw->is_unplugged = true;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain)
port->xdomain->is_unplugged = true;
else if (tb_port_has_remote(port))
@@ -1936,11 +1981,9 @@ static void remove_unplugged_switch(struct tb_switch *sw)
static void icm_free_unplugged_children(struct tb_switch *sw)
{
- unsigned int i;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain && port->xdomain->is_unplugged) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
@@ -2216,7 +2259,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
- icm->is_supported = icm_ar_is_supported;
+ icm->is_supported = icm_fr_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
icm->device_connected = icm_icl_device_connected;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index ae1e92611c3e..bd44d50246d2 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -94,7 +94,7 @@ int tb_lc_configure_link(struct tb_switch *sw)
struct tb_port *up, *down;
int ret;
- if (!sw->config.enabled || !tb_route(sw))
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
@@ -124,7 +124,7 @@ void tb_lc_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
+ if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
@@ -177,3 +177,192 @@ int tb_lc_set_sleep(struct tb_switch *sw)
return 0;
}
+
+/**
+ * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
+ * @sw: Switch to check
+ *
+ * Checks whether conditions for lane bonding from parent to @sw are
+ * possible.
+ */
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int cap, ret;
+ u32 val;
+
+ if (sw->generation < 2)
+ return false;
+
+ up = tb_upstream_port(sw);
+ cap = find_port_lc_cap(up);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_PORT_ATTR_BE);
+}
+
+static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
+ struct tb_port *in)
+{
+ struct tb_port *port;
+
+ /* The first DP IN port is sink 0 and second is sink 1 */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_dpin(port))
+ return in != port;
+ }
+
+ return -EINVAL;
+}
+
+static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
+{
+ u32 val, alloc;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Sink is available for CM/SW to use if the allocation valie is
+ * either 0 or 1.
+ */
+ if (!sink) {
+ alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
+ return 0;
+ } else {
+ alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
+ * @sw: Switch whose DP sink is queried
+ * @in: DP IN port to check
+ *
+ * Queries through LC SNK_ALLOCATION registers whether DP sink is available
+ * for the given DP IN port or not.
+ */
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
+{
+ int sink;
+
+ /*
+ * For older generations sink is always available as there is no
+ * allocation mechanism.
+ */
+ if (sw->generation < 3)
+ return true;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return false;
+
+ return !tb_lc_dp_sink_available(sw, sink);
+}
+
+/**
+ * tb_lc_dp_sink_alloc() - Allocate DP sink
+ * @sw: Switch whose DP sink is allocated
+ * @in: DP IN port the DP sink is allocated for
+ *
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
+ * resource is available and allocation is successful returns %0. In all
+ * other cases returs negative errno. In particular %-EBUSY is returned if
+ * the resource was not available.
+ */
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink) {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
+ } else {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ }
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d allocated\n", sink);
+ return 0;
+}
+
+/**
+ * tb_lc_dp_sink_dealloc() - De-allocate DP sink
+ * @sw: Switch whose DP sink is de-allocated
+ * @in: DP IN port whose DP sink is de-allocated
+ *
+ * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ */
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ /* Needs to be owned by CM/SW */
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink)
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ else
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d de-allocated\n", sink);
+ return 0;
+}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index afe5f8391ebf..ad58559ea88e 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -220,7 +220,8 @@ err:
* Creates path between two ports starting with given @src_hopid. Reserves
* HopIDs for each port (they can be different from @src_hopid depending on
* how many HopIDs each port already have reserved). If there are dual
- * links on the path, prioritizes using @link_nr.
+ * links on the path, prioritizes using @link_nr but takes into account
+ * that the lanes may be bonded.
*
* Return: Returns a tb_path on success or NULL on failure.
*/
@@ -259,7 +260,9 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!in_port)
goto err;
- if (in_port->dual_link_port && in_port->link_nr != link_nr)
+ /* When lanes are bonded primary link must be used */
+ if (!in_port->bonded && in_port->dual_link_port &&
+ in_port->link_nr != link_nr)
in_port = in_port->dual_link_port;
ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
@@ -271,8 +274,27 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!out_port)
goto err;
- if (out_port->dual_link_port && out_port->link_nr != link_nr)
- out_port = out_port->dual_link_port;
+ /*
+ * Pick up right port when going from non-bonded to
+ * bonded or from bonded to non-bonded.
+ */
+ if (out_port->dual_link_port) {
+ if (!in_port->bonded && out_port->bonded &&
+ out_port->link_nr) {
+ /*
+ * Use primary link when going from
+ * non-bonded to bonded.
+ */
+ out_port = out_port->dual_link_port;
+ } else if (!out_port->bonded &&
+ out_port->link_nr != link_nr) {
+ /*
+ * If out port is not bonded follow
+ * link_nr.
+ */
+ out_port = out_port->dual_link_port;
+ }
+ }
if (i == num_hops - 1)
ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
@@ -535,3 +557,25 @@ bool tb_path_is_invalid(struct tb_path *path)
}
return false;
}
+
+/**
+ * tb_path_switch_on_path() - Does the path go through certain switch
+ * @path: Path to check
+ * @sw: Switch to check
+ *
+ * Goes over all hops on path and checks if @sw is any of them.
+ * Direction does not matter.
+ */
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port->sw == sw ||
+ path->hops[i].out_port->sw == sw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5ea8db667e83..ca86a8e09c77 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw)
static int nvm_authenticate_host(struct tb_switch *sw)
{
- int ret;
+ int ret = 0;
/*
* Root switch NVM upgrade requires that we disconnect the
@@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* already).
*/
if (!sw->safe_mode) {
+ u32 status;
+
ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
@@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* everything goes well so getting timeout is expected.
*/
ret = dma_port_flash_update_auth(sw->dma_port);
- return ret == -ETIMEDOUT ? 0 : ret;
+ if (!ret || ret == -ETIMEDOUT)
+ return 0;
+
+ /*
+ * Any error from update auth operation requires power
+ * cycling of the host router.
+ */
+ tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
+ if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
+ nvm_set_auth_status(sw, status);
}
/*
@@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* switch.
*/
dma_port_power_cycle(sw->dma_port);
- return 0;
+ return ret;
}
static int nvm_authenticate_device(struct tb_switch *sw)
@@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw)
int ret, retries = 10;
ret = dma_port_flash_update_auth(sw->dma_port);
- if (ret && ret != -ETIMEDOUT)
+ switch (ret) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EACCES:
+ case -EINVAL:
+ /* Power cycle is required */
+ break;
+ default:
return ret;
+ }
/*
* Poll here for the authentication status. It takes some time
@@ -553,17 +572,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged)
return 0;
- nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits;
- tb_port_dbg(port, "adding %d NFC credits to %lu",
- credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+ tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
+ port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
- port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+ port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
port->config.nfc_credits |= nfc_credits;
return tb_port_write(port, &port->config.nfc_credits,
- TB_CFG_PORT, 4, 1);
+ TB_CFG_PORT, ADP_CS_4, 1);
}
/**
@@ -578,14 +597,14 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
- data &= ~TB_PORT_LCA_MASK;
- data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+ data &= ~ADP_CS_5_LCA_MASK;
+ data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
- return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+ return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
}
/**
@@ -645,6 +664,7 @@ static int tb_init_port(struct tb_port *port)
ida_init(&port->out_hopids);
}
+ INIT_LIST_HEAD(&port->list);
return 0;
}
@@ -775,6 +795,132 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
return next;
}
+static int tb_port_get_link_speed(struct tb_port *port)
+{
+ u32 val, speed;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
+ LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
+ return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+}
+
+static int tb_port_get_link_width(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
+ LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+}
+
+static bool tb_port_is_width_supported(struct tb_port *port, int width)
+{
+ u32 phy, widths;
+ int ret;
+
+ if (!port->cap_phy)
+ return false;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return ret;
+
+ widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+
+ return !!(widths & width);
+}
+
+static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
+ switch (width) {
+ case 1:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ case 2:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= LANE_ADP_CS_1_LB;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_lane_bonding_enable(struct tb_port *port)
+{
+ int ret;
+
+ /*
+ * Enable lane bonding for both links if not already enabled by
+ * for example the boot firmware.
+ */
+ ret = tb_port_get_link_width(port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port, 2);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_port_get_link_width(port->dual_link_port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port->dual_link_port, 2);
+ if (ret) {
+ tb_port_set_link_width(port, 1);
+ return ret;
+ }
+ }
+
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
+
+ return 0;
+}
+
+static void tb_port_lane_bonding_disable(struct tb_port *port)
+{
+ port->dual_link_port->bonded = false;
+ port->bonded = false;
+
+ tb_port_set_link_width(port->dual_link_port, 1);
+ tb_port_set_link_width(port, 1);
+}
+
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
@@ -803,10 +949,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
{
u32 data;
- if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1))
return false;
- return !!(data & TB_PCI_EN);
+ return !!(data & ADP_PCIE_CS_0_PE);
}
/**
@@ -816,10 +963,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
*/
int tb_pci_port_enable(struct tb_port *port, bool enable)
{
- u32 word = enable ? TB_PCI_EN : 0x0;
+ u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
if (!port->cap_adap)
return -ENXIO;
- return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1);
}
/**
@@ -833,11 +981,12 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
- return !!(data & TB_DP_HDP);
+ return !!(data & ADP_DP_CS_2_HDP);
}
/**
@@ -851,12 +1000,14 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
if (ret)
return ret;
- data |= TB_DP_HPDC;
- return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ data |= ADP_DP_CS_3_HDPC;
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
}
/**
@@ -874,20 +1025,23 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
- data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
- data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+ data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
- data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
- data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+ data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+ ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
+ ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/**
@@ -898,11 +1052,11 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
{
u32 data[2];
- if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
ARRAY_SIZE(data)))
return false;
- return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+ return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
}
/**
@@ -918,18 +1072,18 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
if (enable)
- data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+ data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
else
- data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+ data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/* switch utility functions */
@@ -986,7 +1140,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
- if (!sw->config.enabled)
+ if (tb_switch_is_icm(sw))
return 0;
sw->config.plug_events_delay = 0xff;
@@ -1114,6 +1268,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(device_name);
+static ssize_t
+generation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->generation);
+}
+static DEVICE_ATTR_RO(generation);
+
static ssize_t key_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1166,6 +1329,36 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+}
+
+/*
+ * Currently all lanes must run at the same speed but we expose here
+ * both directions to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
+
+static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->link_width);
+}
+
+/*
+ * Currently link has same amount of lanes both directions (1 or 2) but
+ * expose them separately to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
+static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+
static void nvm_authenticate_start(struct tb_switch *sw)
{
struct pci_dev *root_port;
@@ -1246,8 +1439,6 @@ static ssize_t nvm_authenticate_store(struct device *dev,
*/
nvm_authenticate_start(sw);
ret = nvm_authenticate_host(sw);
- if (ret)
- nvm_authenticate_complete(sw);
} else {
ret = nvm_authenticate_device(sw);
}
@@ -1319,9 +1510,14 @@ static struct attribute *switch_attrs[] = {
&dev_attr_boot.attr,
&dev_attr_device.attr,
&dev_attr_device_name.attr,
+ &dev_attr_generation.attr,
&dev_attr_key.attr,
&dev_attr_nvm_authenticate.attr,
&dev_attr_nvm_version.attr,
+ &dev_attr_rx_speed.attr,
+ &dev_attr_rx_lanes.attr,
+ &dev_attr_tx_speed.attr,
+ &dev_attr_tx_lanes.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
&dev_attr_unique_id.attr,
@@ -1352,6 +1548,13 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
+ } else if (attr == &dev_attr_rx_speed.attr ||
+ attr == &dev_attr_rx_lanes.attr ||
+ attr == &dev_attr_tx_speed.attr ||
+ attr == &dev_attr_tx_lanes.attr) {
+ if (tb_route(sw))
+ return attr->mode;
+ return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
if (sw->dma_port && !sw->no_nvm_upgrade)
return attr->mode;
@@ -1382,14 +1585,14 @@ static const struct attribute_group *switch_groups[] = {
static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
- int i;
+ struct tb_port *port;
dma_port_free(sw->dma_port);
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (!sw->ports[i].disabled) {
- ida_destroy(&sw->ports[i].in_hopids);
- ida_destroy(&sw->ports[i].out_hopids);
+ tb_switch_for_each_port(sw, port) {
+ if (!port->disabled) {
+ ida_destroy(&port->in_hopids);
+ ida_destroy(&port->out_hopids);
}
}
@@ -1690,13 +1893,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
int ret;
switch (sw->generation) {
- case 3:
- break;
-
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
return 0;
+
+ /* fallthrough */
+ case 3:
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
break;
default:
@@ -1710,7 +1916,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
}
/* Root switch DMA port requires running firmware */
- if (!tb_route(sw) && sw->config.enabled)
+ if (!tb_route(sw) && !tb_switch_is_icm(sw))
return 0;
sw->dma_port = dma_port_alloc(sw);
@@ -1721,6 +1927,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return 0;
/*
+ * If there is status already set then authentication failed
+ * when the dma_port_flash_update_auth() returned. Power cycling
+ * is not needed (it was done already) so only thing we do here
+ * is to unblock runtime PM of the root port.
+ */
+ nvm_get_auth_status(sw, &status);
+ if (status) {
+ if (!tb_route(sw))
+ nvm_authenticate_complete(sw);
+ return 0;
+ }
+
+ /*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
* it functional again.
@@ -1735,9 +1954,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
- ret = tb_switch_set_uuid(sw);
- if (ret)
- return ret;
nvm_set_auth_status(sw, status);
}
@@ -1751,6 +1967,153 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return -ESHUTDOWN;
}
+static void tb_switch_default_link_ports(struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i += 2) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_port *subordinate;
+
+ if (!tb_port_is_null(port))
+ continue;
+
+ /* Check for the subordinate port */
+ if (i == sw->config.max_port_number ||
+ !tb_port_is_null(&sw->ports[i + 1]))
+ continue;
+
+ /* Link them if not already done so (by DROM) */
+ subordinate = &sw->ports[i + 1];
+ if (!port->dual_link_port && !subordinate->dual_link_port) {
+ port->link_nr = 0;
+ port->dual_link_port = subordinate;
+ subordinate->link_nr = 1;
+ subordinate->dual_link_port = port;
+
+ tb_sw_dbg(sw, "linked ports %d <-> %d\n",
+ port->port, subordinate->port);
+ }
+ }
+}
+
+static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ const struct tb_port *up = tb_upstream_port(sw);
+
+ if (!up->dual_link_port || !up->dual_link_port->remote)
+ return false;
+
+ return tb_lc_lane_bonding_possible(sw);
+}
+
+static int tb_switch_update_link_attributes(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ bool change = false;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+
+ ret = tb_port_get_link_speed(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_speed != ret)
+ change = true;
+ sw->link_speed = ret;
+
+ ret = tb_port_get_link_width(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_width != ret)
+ change = true;
+ sw->link_width = ret;
+
+ /* Notify userspace that there is possible link attribute change */
+ if (device_is_registered(&sw->dev) && change)
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+ *
+ * Connection manager can call this function to enable lane bonding of a
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+ u64 route = tb_route(sw);
+ int ret;
+
+ if (!route)
+ return 0;
+
+ if (!tb_switch_lane_bonding_possible(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(route, parent);
+
+ if (!tb_port_is_width_supported(up, 2) ||
+ !tb_port_is_width_supported(down, 2))
+ return 0;
+
+ ret = tb_port_lane_bonding_enable(up);
+ if (ret) {
+ tb_port_warn(up, "failed to enable lane bonding\n");
+ return ret;
+ }
+
+ ret = tb_port_lane_bonding_enable(down);
+ if (ret) {
+ tb_port_warn(down, "failed to enable lane bonding\n");
+ tb_port_lane_bonding_disable(up);
+ return ret;
+ }
+
+ tb_switch_update_link_attributes(sw);
+
+ tb_sw_dbg(sw, "lane bonding enabled\n");
+ return ret;
+}
+
+/**
+ * tb_switch_lane_bonding_disable() - Disable lane bonding
+ * @sw: Switch whose lane bonding to disable
+ *
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+
+ if (!tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (!up->bonded)
+ return;
+
+ down = tb_port_at(tb_route(sw), parent);
+
+ tb_port_lane_bonding_disable(up);
+ tb_port_lane_bonding_disable(down);
+
+ tb_switch_update_link_attributes(sw);
+ tb_sw_dbg(sw, "lane bonding disabled\n");
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -1775,21 +2138,25 @@ int tb_switch_add(struct tb_switch *sw)
* configuration based mailbox.
*/
ret = tb_switch_add_dma_port(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add DMA port\n");
return ret;
+ }
if (!sw->safe_mode) {
/* read drom */
ret = tb_drom_read(sw);
if (ret) {
- tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ dev_err(&sw->dev, "reading DROM failed\n");
return ret;
}
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
ret = tb_switch_set_uuid(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to set UUID\n");
return ret;
+ }
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
@@ -1797,14 +2164,24 @@ int tb_switch_add(struct tb_switch *sw)
continue;
}
ret = tb_init_port(&sw->ports[i]);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to initialize port %d\n", i);
return ret;
+ }
}
+
+ tb_switch_default_link_ports(sw);
+
+ ret = tb_switch_update_link_attributes(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add device: %d\n", ret);
return ret;
+ }
if (tb_route(sw)) {
dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
@@ -1816,6 +2193,7 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_nvm_add(sw);
if (ret) {
+ dev_err(&sw->dev, "failed to add NVM devices\n");
device_del(&sw->dev);
return ret;
}
@@ -1842,7 +2220,7 @@ int tb_switch_add(struct tb_switch *sw)
*/
void tb_switch_remove(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
@@ -1850,13 +2228,13 @@ void tb_switch_remove(struct tb_switch *sw)
}
/* port 0 is the switch itself and never has a remote */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i])) {
- tb_switch_remove(sw->ports[i].remote->sw);
- sw->ports[i].remote = NULL;
- } else if (sw->ports[i].xdomain) {
- tb_xdomain_remove(sw->ports[i].xdomain);
- sw->ports[i].xdomain = NULL;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
}
}
@@ -1876,7 +2254,8 @@ void tb_switch_remove(struct tb_switch *sw)
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
+
if (sw == sw->tb->root_switch) {
tb_sw_WARN(sw, "cannot unplug root switch\n");
return;
@@ -1886,17 +2265,19 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
return;
}
sw->is_unplugged = true;
- for (i = 0; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_sw_set_unplugged(sw->ports[i].remote->sw);
- else if (sw->ports[i].xdomain)
- sw->ports[i].xdomain->is_unplugged = true;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
}
}
int tb_switch_resume(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
tb_sw_dbg(sw, "resuming switch\n");
/*
@@ -1944,9 +2325,7 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
/* check for surviving downstream switches */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;
@@ -1970,19 +2349,64 @@ int tb_switch_resume(struct tb_switch *sw)
void tb_switch_suspend(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
err = tb_plug_events_active(sw, false);
if (err)
return;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_switch_suspend(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_switch_suspend(port->remote->sw);
}
tb_lc_set_sleep(sw);
}
+/**
+ * tb_switch_query_dp_resource() - Query availability of DP resource
+ * @sw: Switch whose DP resource is queried
+ * @in: DP IN port
+ *
+ * Queries availability of DP resource for DP tunneling using switch
+ * specific means. Returns %true if resource is available.
+ */
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_query(sw, in);
+}
+
+/**
+ * tb_switch_alloc_dp_resource() - Allocate available DP resource
+ * @sw: Switch whose DP resource is allocated
+ * @in: DP IN port
+ *
+ * Allocates DP resource for DP tunneling. The resource must be
+ * available for this to succeed (see tb_switch_query_dp_resource()).
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_alloc(sw, in);
+}
+
+/**
+ * tb_switch_dealloc_dp_resource() - De-allocate DP resource
+ * @sw: Switch whose DP resource is de-allocated
+ * @in: DP IN port
+ *
+ * De-allocates DP resource that was previously allocated for DP
+ * tunneling.
+ */
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ if (tb_lc_dp_sink_dealloc(sw, in)) {
+ tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
+ in->port);
+ }
+}
+
struct tb_sw_lookup {
struct tb *tb;
u8 link;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1f7a9e1cc09c..ea8727f769d6 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
@@ -18,6 +17,7 @@
/**
* struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels
+ * @dp_resources: List of available DP resources for DP tunneling
* @hotplug_active: tb_handle_hotplug will stop progressing plug
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq
@@ -25,6 +25,7 @@
*/
struct tb_cm {
struct list_head tunnel_list;
+ struct list_head dp_resources;
bool hotplug_active;
};
@@ -56,17 +57,51 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
/* enumeration & hot plug handling */
+static void tb_add_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (!tb_switch_query_dp_resource(sw, port))
+ continue;
+
+ list_add_tail(&port->list, &tcm->dp_resources);
+ tb_port_dbg(port, "DP IN resource available\n");
+ }
+}
+
+static void tb_remove_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port, *tmp;
+
+ /* Clear children resources first */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_remove_dp_resources(port->remote->sw);
+ }
+
+ list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
+ if (port->sw == sw) {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ list_del_init(&port->list);
+ }
+ }
+}
+
static void tb_discover_tunnels(struct tb_switch *sw)
{
struct tb *tb = sw->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port;
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
+ tb_switch_for_each_port(sw, port) {
struct tb_tunnel *tunnel = NULL;
- port = &sw->ports[i];
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port);
@@ -95,9 +130,9 @@ static void tb_discover_tunnels(struct tb_switch *sw)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
}
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_discover_tunnels(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_discover_tunnels(port->remote->sw);
}
}
@@ -130,9 +165,10 @@ static void tb_scan_port(struct tb_port *port);
*/
static void tb_scan_switch(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- tb_scan_port(&sw->ports[i]);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_scan_port(port);
}
/**
@@ -217,11 +253,16 @@ static void tb_scan_port(struct tb_port *port)
upstream_port->dual_link_port->remote = port->dual_link_port;
}
+ /* Enable lane bonding if supported */
+ if (tb_switch_lane_bonding_enable(sw))
+ tb_sw_warn(sw, "failed to enable lane bonding\n");
+
tb_scan_switch(sw);
}
-static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
- struct tb_port *src_port, struct tb_port *dst_port)
+static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
@@ -230,14 +271,32 @@ static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
if (tunnel->type == type &&
((src_port && src_port == tunnel->src_port) ||
(dst_port && dst_port == tunnel->dst_port))) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- return 0;
+ return tunnel;
}
}
- return -ENODEV;
+ return NULL;
+}
+
+static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+{
+ if (!tunnel)
+ return;
+
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+
+ /*
+ * In case of DP tunnel make sure the DP IN resource is deallocated
+ * properly.
+ */
+ if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+
+ tb_switch_dealloc_dp_resource(in->sw, in);
+ }
+
+ tb_tunnel_free(tunnel);
}
/**
@@ -250,11 +309,8 @@ static void tb_free_invalid_tunnels(struct tb *tb)
struct tb_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_invalid(tunnel)) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- }
+ if (tb_tunnel_is_invalid(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
}
}
@@ -263,14 +319,15 @@ static void tb_free_invalid_tunnels(struct tb *tb)
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (port->remote->sw->is_unplugged) {
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -289,10 +346,13 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
static struct tb_port *tb_find_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- if (sw->ports[i].config.type == type)
- return &sw->ports[i];
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
return NULL;
}
@@ -304,18 +364,18 @@ static struct tb_port *tb_find_port(struct tb_switch *sw,
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_is_upstream_port(&sw->ports[i]))
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
continue;
- if (sw->ports[i].config.type != type)
+ if (port->config.type != type)
continue;
- if (!sw->ports[i].cap_adap)
+ if (port->cap_adap)
continue;
- if (tb_port_is_enabled(&sw->ports[i]))
+ if (tb_port_is_enabled(port))
continue;
- return &sw->ports[i];
+ return port;
}
return NULL;
}
@@ -336,10 +396,13 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
* Hard-coded Thunderbolt port to PCIe down port mapping
* per controller.
*/
- if (tb_switch_is_cr(sw))
+ if (tb_switch_is_cactus_ridge(sw) ||
+ tb_switch_is_alpine_ridge(sw))
index = !phy_port ? 6 : 7;
- else if (tb_switch_is_fr(sw))
+ else if (tb_switch_is_falcon_ridge(sw))
index = !phy_port ? 6 : 8;
+ else if (tb_switch_is_titan_ridge(sw))
+ index = !phy_port ? 8 : 9;
else
goto out;
@@ -358,42 +421,162 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
+static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
{
- struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw = out->sw;
struct tb_tunnel *tunnel;
- struct tb_port *in;
+ int bw, available_bw = 40000;
- if (tb_port_is_enabled(out))
- return 0;
+ while (sw && sw != in->sw) {
+ bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
+ /* Leave 10% guard band */
+ bw -= bw / 10;
+
+ /*
+ * Check for any active DP tunnels that go through this
+ * switch and reduce their consumed bandwidth from
+ * available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int consumed_bw;
+
+ if (!tb_tunnel_switch_on_path(tunnel, sw))
+ continue;
+
+ consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
+ if (consumed_bw < 0)
+ return consumed_bw;
+
+ bw -= consumed_bw;
+ }
- do {
- sw = tb_to_switch(sw->dev.parent);
- if (!sw)
- return 0;
- in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
- } while (!in);
+ if (bw < available_bw)
+ available_bw = bw;
- tunnel = tb_tunnel_alloc_dp(tb, in, out);
+ sw = tb_switch_parent(sw);
+ }
+
+ return available_bw;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+ struct tb_tunnel *tunnel;
+ int available_bw;
+
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "in use\n");
+ continue;
+ }
+
+ tb_port_dbg(port, "available\n");
+
+ if (!in && tb_port_is_dpin(port))
+ in = port;
+ else if (!out && tb_port_is_dpout(port))
+ out = port;
+ }
+
+ if (!in) {
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+ return;
+ }
+ if (!out) {
+ tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+ return;
+ }
+
+ if (tb_switch_alloc_dp_resource(in->sw, in)) {
+ tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
+ return;
+ }
+
+ /* Calculate available bandwidth between in and out */
+ available_bw = tb_available_bw(tcm, in, out);
+ if (available_bw < 0) {
+ tb_warn(tb, "failed to determine available bandwidth\n");
+ return;
+ }
+
+ tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
+ available_bw);
+
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
if (!tunnel) {
- tb_port_dbg(out, "DP tunnel allocation failed\n");
- return -ENOMEM;
+ tb_port_dbg(out, "could not allocate DP tunnel\n");
+ goto dealloc_dp;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
tb_tunnel_free(tunnel);
- return -EIO;
+ goto dealloc_dp;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- return 0;
+ return;
+
+dealloc_dp:
+ tb_switch_dealloc_dp_resource(in->sw, in);
}
-static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
- tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ if (tb_port_is_dpin(port)) {
+ tb_port_dbg(port, "DP IN resource unavailable\n");
+ in = port;
+ out = NULL;
+ } else {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ in = NULL;
+ out = port;
+ }
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+ tb_deactivate_and_free_tunnel(tunnel);
+ list_del_init(&port->list);
+
+ /*
+ * See if there is another DP OUT port that can be used for
+ * to create another tunnel.
+ */
+ tb_tunnel_dp(tb);
+}
+
+static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ if (tb_port_is_enabled(port))
+ return;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+
+ /* Look for suitable DP IN <-> DP OUT pairs now */
+ tb_tunnel_dp(tb);
}
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
@@ -468,6 +651,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
{
struct tb_port *dst_port;
+ struct tb_tunnel *tunnel;
struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent);
@@ -478,7 +662,8 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
* case of cable disconnect) so it is fine if we cannot find it
* here anymore.
*/
- tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tb_deactivate_and_free_tunnel(tunnel);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
@@ -533,10 +718,14 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_port_dbg(port, "switch unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
+ /* Maybe we can create another DP tunnel */
+ tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@@ -553,8 +742,8 @@ static void tb_handle_hotplug(struct work_struct *work)
port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd);
tb_xdomain_put(xd);
- } else if (tb_port_is_dpout(port)) {
- tb_teardown_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_unavailable(tb, port);
} else {
tb_port_dbg(port,
"got unplug event for disconnected port, ignoring\n");
@@ -567,8 +756,8 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_scan_port(port);
if (!port->remote)
tb_port_dbg(port, "hotplug: no switch found\n");
- } else if (tb_port_is_dpout(port)) {
- tb_tunnel_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_available(tb, port);
}
}
@@ -681,6 +870,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /* Add DP IN resources for the root switch */
+ tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -702,6 +893,21 @@ static int tb_suspend_noirq(struct tb *tb)
return 0;
}
+static void tb_restore_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (tb_switch_lane_bonding_enable(port->remote->sw))
+ dev_warn(&sw->dev, "failed to restore lane bonding\n");
+
+ tb_restore_children(port->remote->sw);
+ }
+}
+
static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
@@ -715,6 +921,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
if (!list_empty(&tcm->tunnel_list)) {
@@ -734,11 +941,10 @@ static int tb_resume_noirq(struct tb *tb)
static int tb_free_unplugged_xdomains(struct tb_switch *sw)
{
- int i, ret = 0;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ int ret = 0;
+ tb_switch_for_each_port(sw, port) {
if (tb_is_upstream_port(port))
continue;
if (port->xdomain && port->xdomain->is_unplugged) {
@@ -783,9 +989,6 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm;
struct tb *tb;
- if (!x86_apple_machine)
- return NULL;
-
tb = tb_domain_alloc(nhi, sizeof(*tcm));
if (!tb)
return NULL;
@@ -795,6 +998,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list);
+ INIT_LIST_HEAD(&tcm->dp_resources);
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 6407d529871d..ec851f20c571 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -61,6 +61,8 @@ struct tb_switch_nvm {
* @device: Device ID of the switch
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
* @cap_lc: Offset to the link controller capability (%0 if not found)
@@ -97,6 +99,8 @@ struct tb_switch {
u16 device;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
unsigned int generation;
int cap_plug_events;
int cap_lc;
@@ -127,11 +131,13 @@ struct tb_switch {
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
+ * @bonded: true if the port is bonded (two lanes combined as one)
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
* @link_nr: Is this primary or secondary port on the dual_link.
* @in_hopids: Currently allocated input HopIDs
* @out_hopids: Currently allocated output HopIDs
+ * @list: Used to link ports to DP resources list
*/
struct tb_port {
struct tb_regs_port_header config;
@@ -142,10 +148,12 @@ struct tb_port {
int cap_adap;
u8 port;
bool disabled;
+ bool bonded;
struct tb_port *dual_link_port;
u8 link_nr:1;
struct ida in_hopids;
struct ida out_hopids;
+ struct list_head list;
};
/**
@@ -399,7 +407,7 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
length);
}
-static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
if (sw->is_unplugged)
@@ -530,6 +538,17 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+/**
+ * tb_switch_for_each_port() - Iterate over each switch port
+ * @sw: Switch whose ports to iterate
+ * @p: Port used as iterator
+ *
+ * Iterates over each switch port skipping the control port (port %0).
+ */
+#define tb_switch_for_each_port(sw, p) \
+ for ((p) = &(sw)->ports[1]; \
+ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
+
static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
{
if (sw)
@@ -559,17 +578,17 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
return tb_to_switch(sw->dev.parent);
}
-static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
}
-static inline bool tb_switch_is_er(const struct tb_switch *sw)
+static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
}
-static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
@@ -580,7 +599,7 @@ static inline bool tb_switch_is_cr(const struct tb_switch *sw)
}
}
-static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
@@ -591,6 +610,52 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw)
}
}
+static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * tb_switch_is_icm() - Is the switch handled by ICM firmware
+ * @sw: Switch to check
+ *
+ * In case there is a need to differentiate whether ICM firmware or SW CM
+ * is handling @sw this function can be called. It is valid to call this
+ * after tb_switch_alloc() and tb_switch_configure() has been called
+ * (latter only for SW CM case).
+ */
+static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+{
+ return !sw->config.enabled;
+}
+
+int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
@@ -626,6 +691,8 @@ void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
bool tb_path_is_invalid(struct tb_path *path);
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw);
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
@@ -634,6 +701,10 @@ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
int tb_lc_configure_link(struct tb_switch *sw);
void tb_lc_unconfigure_link(struct tb_switch *sw);
int tb_lc_set_sleep(struct tb_switch *sw);
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
static inline int tb_route_length(u64 route)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 4b641e4ee0c5..3705057723b6 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -122,6 +122,8 @@ struct icm_pkg_header {
#define ICM_FLAGS_NO_KEY BIT(1)
#define ICM_FLAGS_SLEVEL_SHIFT 3
#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
+#define ICM_FLAGS_DUAL_LANE BIT(5)
+#define ICM_FLAGS_SPEED_GEN3 BIT(7)
#define ICM_FLAGS_WRITE BIT(7)
struct icm_pkg_driver_ready {
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index deb9d4a977b9..7ee45b73c7f7 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -211,37 +211,71 @@ struct tb_regs_port_header {
} __packed;
-/* DWORD 4 */
-#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
-#define TB_PORT_MAX_CREDITS_SHIFT 20
-#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
-/* DWORD 5 */
-#define TB_PORT_LCA_SHIFT 22
-#define TB_PORT_LCA_MASK GENMASK(28, 22)
+/* Basic adapter configuration registers */
+#define ADP_CS_4 0x04
+#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
+#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
+#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_5 0x05
+#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
+#define ADP_CS_5_LCA_SHIFT 22
+
+/* Lane adapter registers */
+#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
+#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_LB BIT(15)
+#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
+#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
/* Display Port adapter registers */
-
-/* DWORD 0 */
-#define TB_DP_VIDEO_HOPID_SHIFT 16
-#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16)
-#define TB_DP_AUX_EN BIT(30)
-#define TB_DP_VIDEO_EN BIT(31)
-/* DWORD 1 */
-#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0)
-#define TB_DP_AUX_RX_HOPID_SHIFT 11
-#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11)
-/* DWORD 2 */
-#define TB_DP_HDP BIT(6)
-/* DWORD 3 */
-#define TB_DP_HPDC BIT(9)
-/* DWORD 4 */
-#define TB_DP_LOCAL_CAP 0x4
-/* DWORD 5 */
-#define TB_DP_REMOTE_CAP 0x5
+#define ADP_DP_CS_0 0x00
+#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
+#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16
+#define ADP_DP_CS_0_AE BIT(30)
+#define ADP_DP_CS_0_VE BIT(31)
+#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0)
+#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
+#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
+#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_3 0x03
+#define ADP_DP_CS_3_HDPC BIT(9)
+#define DP_LOCAL_CAP 0x04
+#define DP_REMOTE_CAP 0x05
+#define DP_STATUS_CTRL 0x06
+#define DP_STATUS_CTRL_CMHS BIT(25)
+#define DP_STATUS_CTRL_UF BIT(26)
+#define DP_COMMON_CAP 0x07
+/*
+ * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
+ * with exception of DPRX done.
+ */
+#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8)
+#define DP_COMMON_CAP_RATE_SHIFT 8
+#define DP_COMMON_CAP_RATE_RBR 0x0
+#define DP_COMMON_CAP_RATE_HBR 0x1
+#define DP_COMMON_CAP_RATE_HBR2 0x2
+#define DP_COMMON_CAP_RATE_HBR3 0x3
+#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12)
+#define DP_COMMON_CAP_LANES_SHIFT 12
+#define DP_COMMON_CAP_1_LANE 0x0
+#define DP_COMMON_CAP_2_LANES 0x1
+#define DP_COMMON_CAP_4_LANES 0x2
+#define DP_COMMON_CAP_DPRX_DONE BIT(31)
/* PCIe adapter registers */
-
-#define TB_PCI_EN BIT(31)
+#define ADP_PCIE_CS_0 0x00
+#define ADP_PCIE_CS_0_PE BIT(31)
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
@@ -278,8 +312,17 @@ struct tb_regs_hop {
#define TB_LC_DESC_PORT_SIZE_SHIFT 16
#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
/* Link controller registers */
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
#define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_L1C BIT(16)
#define TB_LC_SX_CTRL_L2C BIT(20)
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 5a99234826e7..0d3463c4e24a 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -6,6 +6,7 @@
* Copyright (C) 2019, Intel Corporation
*/
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -90,6 +91,22 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
return 0;
}
+static int tb_initial_credits(const struct tb_switch *sw)
+{
+ /* If the path is complete sw is not NULL */
+ if (sw) {
+ /* More credits for faster link */
+ switch (sw->link_speed * sw->link_width) {
+ case 40:
+ return 32;
+ case 20:
+ return 24;
+ }
+ }
+
+ return 16;
+}
+
static void tb_pci_init_path(struct tb_path *path)
{
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
@@ -101,7 +118,8 @@ static void tb_pci_init_path(struct tb_path *path)
path->drop_packages = 0;
path->nfc_credits = 0;
path->hops[0].initial_credits = 7;
- path->hops[1].initial_credits = 16;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
}
/**
@@ -225,11 +243,174 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ /* Both ends need to support this */
+ if (!tb_switch_is_titan_ridge(in->sw) ||
+ !tb_switch_is_titan_ridge(out->sw))
+ return 0;
+
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
+
+ ret = tb_port_write(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+ if (!(val & DP_STATUS_CTRL_CMHS))
+ return 0;
+ usleep_range(10, 100);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static inline u32 tb_dp_cap_get_rate(u32 val)
+{
+ u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
+
+ switch (rate) {
+ case DP_COMMON_CAP_RATE_RBR:
+ return 1620;
+ case DP_COMMON_CAP_RATE_HBR:
+ return 2700;
+ case DP_COMMON_CAP_RATE_HBR2:
+ return 5400;
+ case DP_COMMON_CAP_RATE_HBR3:
+ return 8100;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
+{
+ val &= ~DP_COMMON_CAP_RATE_MASK;
+ switch (rate) {
+ default:
+ WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
+ /* Fallthrough */
+ case 1620:
+ val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 2700:
+ val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 5400:
+ val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 8100:
+ val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static inline u32 tb_dp_cap_get_lanes(u32 val)
+{
+ u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
+
+ switch (lanes) {
+ case DP_COMMON_CAP_1_LANE:
+ return 1;
+ case DP_COMMON_CAP_2_LANES:
+ return 2;
+ case DP_COMMON_CAP_4_LANES:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
+{
+ val &= ~DP_COMMON_CAP_LANES_MASK;
+ switch (lanes) {
+ default:
+ WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
+ lanes);
+ /* Fallthrough */
+ case 1:
+ val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
+{
+ /* Tunneling removes the DP 8b/10b encoding */
+ return rate * lanes * 8 / 10;
+}
+
+static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
+ u32 out_rate, u32 out_lanes, u32 *new_rate,
+ u32 *new_lanes)
+{
+ static const u32 dp_bw[][2] = {
+ /* Mb/s, lanes */
+ { 8100, 4 }, /* 25920 Mb/s */
+ { 5400, 4 }, /* 17280 Mb/s */
+ { 8100, 2 }, /* 12960 Mb/s */
+ { 2700, 4 }, /* 8640 Mb/s */
+ { 5400, 2 }, /* 8640 Mb/s */
+ { 8100, 1 }, /* 6480 Mb/s */
+ { 1620, 4 }, /* 5184 Mb/s */
+ { 5400, 1 }, /* 4320 Mb/s */
+ { 2700, 2 }, /* 4320 Mb/s */
+ { 1620, 2 }, /* 2592 Mb/s */
+ { 2700, 1 }, /* 2160 Mb/s */
+ { 1620, 1 }, /* 1296 Mb/s */
+ };
+ unsigned int i;
+
+ /*
+ * Find a combination that can fit into max_bw and does not
+ * exceed the maximum rate and lanes supported by the DP OUT and
+ * DP IN adapters.
+ */
+ for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
+ if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
+ continue;
+
+ if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
+ continue;
+
+ if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
+ *new_rate = dp_bw[i][0];
+ *new_lanes = dp_bw[i][1];
+ return 0;
+ }
+ }
+
+ return -ENOSR;
+}
+
static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
{
+ u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- u32 in_dp_cap, out_dp_cap;
int ret;
/*
@@ -239,25 +420,71 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
if (in->sw->generation < 2 || out->sw->generation < 2)
return 0;
+ /*
+ * Perform connection manager handshake between IN and OUT ports
+ * before capabilities exchange can take place.
+ */
+ ret = tb_dp_cm_handshake(in, out);
+ if (ret)
+ return ret;
+
/* Read both DP_LOCAL_CAP registers */
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_LOCAL_CAP, 1);
+ in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_LOCAL_CAP, 1);
+ out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
/* Write IN local caps to OUT remote caps */
ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_REMOTE_CAP, 1);
+ out->cap_adap + DP_REMOTE_CAP, 1);
if (ret)
return ret;
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+ /*
+ * If the tunnel bandwidth is limited (max_bw is set) then see
+ * if we need to reduce bandwidth to fit there.
+ */
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+ bw = tb_dp_bandwidth(out_rate, out_lanes);
+ tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
+
+ if (tunnel->max_bw && bw > tunnel->max_bw) {
+ u32 new_rate, new_lanes, new_bw;
+
+ ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
+ out_rate, out_lanes, &new_rate,
+ &new_lanes);
+ if (ret) {
+ tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ return ret;
+ }
+
+ new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+ tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
+
+ /*
+ * Set new rate and number of lanes before writing it to
+ * the IN port remote caps.
+ */
+ out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
+ out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
+ }
+
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_REMOTE_CAP, 1);
+ in->cap_adap + DP_REMOTE_CAP, 1);
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
@@ -297,6 +524,56 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
+static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+ const struct tb_switch *sw = in->sw;
+ u32 val, rate = 0, lanes = 0;
+ int ret;
+
+ if (tb_switch_is_titan_ridge(sw)) {
+ int timeout = 10;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set
+ * for active tunnel.
+ */
+ do {
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ break;
+ }
+ msleep(250);
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ } else if (sw->generation >= 2) {
+ /*
+ * Read from the copied remote cap so that we take into
+ * account if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ } else {
+ /* No bandwidth management for legacy devices */
+ return 0;
+ }
+
+ return tb_dp_bandwidth(rate, lanes);
+}
+
static void tb_dp_init_aux_path(struct tb_path *path)
{
int i;
@@ -324,12 +601,12 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover)
path->weight = 1;
if (discover) {
- path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
} else {
u32 max_credits;
- max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
/* Leave some credits for AUX path */
path->nfc_credits = min(max_credits - 2, 12U);
}
@@ -361,6 +638,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
@@ -419,6 +697,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
*
* Allocates a tunnel between @in and @out that is capable of tunneling
* Display Port traffic.
@@ -426,7 +705,7 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out)
+ struct tb_port *out, int max_bw)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -441,8 +720,10 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
+ tunnel->max_bw = max_bw;
paths = tunnel->paths;
@@ -478,8 +759,8 @@ static u32 tb_dma_credits(struct tb_port *nhi)
{
u32 max_credits;
- max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
return min(max_credits, 13U);
}
@@ -689,3 +970,62 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
tb_path_deactivate(tunnel->paths[i]);
}
}
+
+/**
+ * tb_tunnel_switch_on_path() - Does the tunnel go through switch
+ * @tunnel: Tunnel to check
+ * @sw: Switch to check
+ *
+ * Returns true if @tunnel goes through @sw (direction does not matter),
+ * false otherwise.
+ */
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ continue;
+ if (tb_path_switch_on_path(tunnel->paths[i], sw))
+ return true;
+ }
+
+ return false;
+}
+
+static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ return false;
+ if (!tunnel->paths[i]->activated)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
+ * @tunnel: Tunnel to check
+ *
+ * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
+ * is not active or does consume bandwidth.
+ */
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return 0;
+
+ if (tunnel->consumed_bandwidth) {
+ int ret = tunnel->consumed_bandwidth(tunnel);
+
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index c68bbcd3a62c..ba888da005f5 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -27,8 +27,11 @@ enum tb_tunnel_type {
* @npaths: Number of paths in @paths
* @init: Optional tunnel specific initialization
* @activate: Optional tunnel specific activation/deactivation
+ * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP).
+ * Only set if the bandwidth needs to be limited.
*/
struct tb_tunnel {
struct tb *tb;
@@ -38,8 +41,10 @@ struct tb_tunnel {
size_t npaths;
int (*init)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*consumed_bandwidth)(struct tb_tunnel *tunnel);
struct list_head list;
enum tb_tunnel_type type;
+ unsigned int max_bw;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
@@ -47,7 +52,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out);
+ struct tb_port *out, int max_bw);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
@@ -58,6 +63,9 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel);
int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw);
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel);
static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
{
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 4e17a7c7bf0a..880d784398a3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1404,10 +1404,9 @@ struct tb_xdomain_lookup {
static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
const struct tb_xdomain_lookup *lookup)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ tb_switch_for_each_port(sw, port) {
struct tb_xdomain *xd;
if (port->xdomain) {
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 8df89e9cd254..e985f344b2dd 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -174,3 +174,4 @@ MODULE_AUTHOR("Michael Moese <michael.moese@men.de");
MODULE_ALIAS("mcb:16z125");
MODULE_ALIAS("mcb:16z025");
MODULE_ALIAS("mcb:16z057");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index e5d3ebab6dae..4f53a4caabf6 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -930,3 +930,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MEN 16z135 High Speed UART");
MODULE_ALIAS("mcb:16z135");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 60ff236a3d63..ce8291053af3 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -303,7 +303,7 @@ static int __ldsem_down_read_nested(struct ld_semaphore *sem,
if (count <= 0) {
lock_contended(&sem->dep_map, _RET_IP_);
if (!down_read_failed(sem, count, timeout)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return 0;
}
}
@@ -322,7 +322,7 @@ static int __ldsem_down_write_nested(struct ld_semaphore *sem,
if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
lock_contended(&sem->dep_map, _RET_IP_);
if (!down_write_failed(sem, count, timeout)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return 0;
}
}
@@ -390,7 +390,7 @@ void ldsem_up_read(struct ld_semaphore *sem)
{
long count;
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
@@ -404,7 +404,7 @@ void ldsem_up_write(struct ld_semaphore *sem)
{
long count;
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
if (count < 0)
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index ebcf1434e296..81c88f7bbbcb 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -151,8 +151,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
int i;
if (pdev->dev.of_node) {
- int irq;
-
/* alloc uioinfo for one device */
uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
if (!uioinfo) {
@@ -163,13 +161,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
uioinfo->version = "devicetree";
-
- /* Multiple IRQs are not supported */
- irq = platform_get_irq(pdev, 0);
- if (irq == -ENXIO)
- uioinfo->irq = UIO_IRQ_NONE;
- else
- uioinfo->irq = irq;
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
@@ -199,8 +190,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
mutex_init(&priv->alloc_lock);
if (!uioinfo->irq) {
+ /* Multiple IRQs are not supported */
ret = platform_get_irq(pdev, 0);
- if (ret < 0)
+ if (ret == -ENXIO && pdev->dev.of_node)
+ ret = UIO_IRQ_NONE;
+ else if (ret < 0)
goto bad1;
uioinfo->irq = ret;
}
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index d0331613a355..2a1e89d12ed9 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -43,4 +43,14 @@ config USB_CDNS3_PCI_WRAP
If you choose to build this driver as module it will
be dynamically linked and module will be called cdns3-pci.ko
+config USB_CDNS3_TI
+ tristate "Cadence USB3 support on TI platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for Texas Instruments
+ platforms that contain Cadence USB3 controller core.
+
+ e.g. J721e.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index a703547350bb..948e6b88d1a9 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -14,3 +14,4 @@ endif
cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
new file mode 100644
index 000000000000..c6a79ca15858
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-ti.c - TI specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+/* USB Wrapper register offsets */
+#define USBSS_PID 0x0
+#define USBSS_W1 0x4
+#define USBSS_STATIC_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_DEBUG_CTRL 0x10
+#define USBSS_DEBUG_INFO 0x14
+#define USBSS_DEBUG_LINK_STATE 0x18
+#define USBSS_DEVICE_CTRL 0x1c
+
+/* Wrapper 1 register bits */
+#define USBSS_W1_PWRUP_RST BIT(0)
+#define USBSS_W1_OVERCURRENT_SEL BIT(8)
+#define USBSS_W1_MODESTRAP_SEL BIT(9)
+#define USBSS_W1_OVERCURRENT BIT(16)
+#define USBSS_W1_MODESTRAP_MASK GENMASK(18, 17)
+#define USBSS_W1_MODESTRAP_SHIFT 17
+#define USBSS_W1_USB2_ONLY BIT(19)
+
+/* Static config register bits */
+#define USBSS1_STATIC_PLL_REF_SEL_MASK GENMASK(8, 5)
+#define USBSS1_STATIC_PLL_REF_SEL_SHIFT 5
+#define USBSS1_STATIC_LOOPBACK_MODE_MASK GENMASK(4, 3)
+#define USBSS1_STATIC_LOOPBACK_MODE_SHIFT 3
+#define USBSS1_STATIC_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS1_STATIC_VBUS_SEL_SHIFT 1
+#define USBSS1_STATIC_LANE_REVERSE BIT(0)
+
+/* Modestrap modes */
+enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE,
+ USBSS_MODESTRAP_MODE_HOST,
+ USBSS_MODESTRAP_MODE_PERIPHERAL};
+
+struct cdns_ti {
+ struct device *dev;
+ void __iomem *usbss;
+ int usb2_only:1;
+ int vbus_divider:1;
+ struct clk *usb2_refclk;
+ struct clk *lpm_clk;
+};
+
+static const int cdns_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 cdns_ti_readl(struct cdns_ti *data, u32 offset)
+{
+ return readl(data->usbss + offset);
+}
+
+static inline void cdns_ti_writel(struct cdns_ti *data, u32 offset, u32 value)
+{
+ writel(value, data->usbss + offset);
+}
+
+static int cdns_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct cdns_ti *data;
+ int error;
+ u32 reg;
+ int rate_code, i;
+ unsigned long rate;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->dev = dev;
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ data->lpm_clk = devm_clk_get(dev, "lpm");
+ if (IS_ERR(data->lpm_clk)) {
+ dev_err(dev, "can't get lpm_clk\n");
+ return PTR_ERR(data->lpm_clk);
+ }
+
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; /* To KHz */
+ for (i = 0; i < ARRAY_SIZE(cdns_ti_rate_table); i++) {
+ if (cdns_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cdns_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ return -EINVAL;
+ }
+
+ rate_code = i;
+
+ pm_runtime_enable(dev);
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
+ goto err_get;
+ }
+
+ /* assert RESET */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ reg &= ~USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* set static config */
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+ reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
+ reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
+
+ reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ if (data->vbus_divider)
+ reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
+
+ cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+
+ /* set USB2_ONLY mode if requested */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
+ if (data->usb2_only)
+ reg |= USBSS_W1_USB2_ONLY;
+
+ /* set default modestrap */
+ reg |= USBSS_W1_MODESTRAP_SEL;
+ reg &= ~USBSS_W1_MODESTRAP_MASK;
+ reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* de-assert RESET */
+ reg |= USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ error = of_platform_populate(node, NULL, NULL, dev);
+ if (error) {
+ dev_err(dev, "failed to create children: %d\n", error);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_put_sync(data->dev);
+err_get:
+ pm_runtime_disable(data->dev);
+
+ return error;
+}
+
+static int cdns_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_ti_remove_core);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_ti_of_match[] = {
+ { .compatible = "ti,j721e-usb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
+
+static struct platform_driver cdns_ti_driver = {
+ .probe = cdns_ti_probe,
+ .remove = cdns_ti_remove,
+ .driver = {
+ .name = "cdns3-ti",
+ .of_match_table = cdns_ti_of_match,
+ },
+};
+
+module_platform_driver(cdns_ti_driver);
+
+MODULE_ALIAS("platform:cdns3-ti");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 TI Glue Layer");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index df8812c30640..d8e7eb2f97b9 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -274,11 +274,14 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
- ret = pinctrl_select_state(data->pinctrl,
- data->pinctrl_hsic_active);
- if (ret)
- dev_err(dev, "hsic_active select failed, err=%d\n",
- ret);
+ if (data->pinctrl) {
+ ret = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_hsic_active);
+ if (ret)
+ dev_err(dev,
+ "hsic_active select failed, err=%d\n",
+ ret);
+ }
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
ret = imx_usbmisc_hsic_set_connect(data->usbmisc_data);
@@ -306,7 +309,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct pinctrl_state *pinctrl_hsic_idle;
of_id = of_match_device(ci_hdrc_imx_dt_ids, dev);
if (!of_id)
@@ -330,12 +332,42 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.flags |= CI_HDRC_IMX_IS_HSIC;
data->usbmisc_data->hsic = 1;
data->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(data->pinctrl)) {
- dev_err(dev, "pinctrl get failed, err=%ld\n",
+ if (PTR_ERR(data->pinctrl) == -ENODEV)
+ data->pinctrl = NULL;
+ else if (IS_ERR(data->pinctrl)) {
+ if (PTR_ERR(data->pinctrl) != -EPROBE_DEFER)
+ dev_err(dev, "pinctrl get failed, err=%ld\n",
PTR_ERR(data->pinctrl));
return PTR_ERR(data->pinctrl);
}
+ data->hsic_pad_regulator =
+ devm_regulator_get_optional(dev, "hsic");
+ if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
+ /* no pad regualator is needed */
+ data->hsic_pad_regulator = NULL;
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ if (PTR_ERR(data->hsic_pad_regulator) != -EPROBE_DEFER)
+ dev_err(dev,
+ "Get HSIC pad regulator error: %ld\n",
+ PTR_ERR(data->hsic_pad_regulator));
+ return PTR_ERR(data->hsic_pad_regulator);
+ }
+
+ if (data->hsic_pad_regulator) {
+ ret = regulator_enable(data->hsic_pad_regulator);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable HSIC pad regulator\n");
+ return ret;
+ }
+ }
+ }
+
+ /* HSIC pinctrl handling */
+ if (data->pinctrl) {
+ struct pinctrl_state *pinctrl_hsic_idle;
+
pinctrl_hsic_idle = pinctrl_lookup_state(data->pinctrl, "idle");
if (IS_ERR(pinctrl_hsic_idle)) {
dev_err(dev,
@@ -358,27 +390,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
PTR_ERR(data->pinctrl_hsic_active));
return PTR_ERR(data->pinctrl_hsic_active);
}
-
- data->hsic_pad_regulator = devm_regulator_get(dev, "hsic");
- if (PTR_ERR(data->hsic_pad_regulator) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
- /* no pad regualator is needed */
- data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator)) {
- dev_err(dev, "Get HSIC pad regulator error: %ld\n",
- PTR_ERR(data->hsic_pad_regulator));
- return PTR_ERR(data->hsic_pad_regulator);
- }
-
- if (data->hsic_pad_regulator) {
- ret = regulator_enable(data->hsic_pad_regulator);
- if (ret) {
- dev_err(dev,
- "Failed to enable HSIC pad regulator\n");
- return ret;
- }
- }
}
if (pdata.flags & CI_HDRC_PMQOS)
@@ -433,6 +444,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
goto err_clk;
}
+ if (data->usbmisc_data) {
+ if (!IS_ERR(pdata.id_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_id = 1;
+
+ if (!IS_ERR(pdata.vbus_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_vbus = 1;
+ }
+
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc post failed, ret=%d\n", ret);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c842e03f8767..de2aac9a2868 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -22,6 +22,8 @@ struct imx_usbmisc_data {
unsigned int evdo:1; /* set external vbus divider option */
unsigned int ulpi:1; /* connected to an ULPI phy */
unsigned int hsic:1; /* HSIC controlller */
+ unsigned int ext_id:1; /* ID from exteranl event */
+ unsigned int ext_vbus:1; /* Vbus from exteranl event */
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 12025358bb3c..0c9911d44ee5 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -24,35 +24,23 @@ struct tegra_udc_soc_info {
unsigned long flags;
};
-static const struct tegra_udc_soc_info tegra20_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra30_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra114_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra124_udc_soc_info = {
+static const struct tegra_udc_soc_info tegra_udc_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct of_device_id tegra_udc_of_match[] = {
{
.compatible = "nvidia,tegra20-udc",
- .data = &tegra20_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
- .data = &tegra30_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
- .data = &tegra114_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
- .data = &tegra124_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
/* sentinel */
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 98ee575ee500..dce5db41501c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -683,7 +683,7 @@ static int ci_get_platdata(struct device *dev,
if (platdata->dr_mode != USB_DR_MODE_PERIPHERAL) {
/* Get the vbus regulator */
- platdata->reg_vbus = devm_regulator_get(dev, "vbus");
+ platdata->reg_vbus = devm_regulator_get_optional(dev, "vbus");
if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index fcc91a338875..e0376ee646ad 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -342,7 +342,7 @@ DEFINE_SHOW_ATTRIBUTE(ci_registers);
*/
void dbg_create_files(struct ci_hdrc *ci)
{
- ci->debugfs = debugfs_create_dir(dev_name(ci->dev), NULL);
+ ci->debugfs = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8f18e7b6cadf..ffaf46f5d062 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1524,42 +1524,53 @@ static const struct usb_ep_ops usb_ep_ops = {
/******************************************************************************
* GADGET block
*****************************************************************************/
+/**
+ * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
+ */
+static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+ if (is_active) {
+ pm_runtime_get_sync(&_gadget->dev);
+ hw_device_reset(ci);
+ spin_lock_irqsave(&ci->lock, flags);
+ if (ci->driver) {
+ hw_device_state(ci, ci->ep0out->qh.dma);
+ usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+ usb_udc_vbus_handler(_gadget, true);
+ }
+ spin_unlock_irqrestore(&ci->lock, flags);
+ } else {
+ usb_udc_vbus_handler(_gadget, false);
+ if (ci->driver)
+ ci->driver->disconnect(&ci->gadget);
+ hw_device_state(ci, 0);
+ if (ci->platdata->notify_event)
+ ci->platdata->notify_event(ci,
+ CI_HDRC_CONTROLLER_STOPPED_EVENT);
+ _gadget_stop_activity(&ci->gadget);
+ pm_runtime_put_sync(&_gadget->dev);
+ usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
+ }
+}
+
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
- int gadget_ready = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
- if (ci->driver)
- gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->usb_phy)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
- if (gadget_ready) {
- if (is_active) {
- pm_runtime_get_sync(&_gadget->dev);
- hw_device_reset(ci);
- hw_device_state(ci, ci->ep0out->qh.dma);
- usb_gadget_set_state(_gadget, USB_STATE_POWERED);
- usb_udc_vbus_handler(_gadget, true);
- } else {
- usb_udc_vbus_handler(_gadget, false);
- if (ci->driver)
- ci->driver->disconnect(&ci->gadget);
- hw_device_state(ci, 0);
- if (ci->platdata->notify_event)
- ci->platdata->notify_event(ci,
- CI_HDRC_CONTROLLER_STOPPED_EVENT);
- _gadget_stop_activity(&ci->gadget);
- pm_runtime_put_sync(&_gadget->dev);
- usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
- }
- }
+ if (ci->driver)
+ ci_hdrc_gadget_connect(_gadget, is_active);
return 0;
}
@@ -1612,7 +1623,7 @@ static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
@@ -1785,18 +1796,10 @@ static int ci_udc_start(struct usb_gadget *gadget,
return retval;
}
- pm_runtime_get_sync(&ci->gadget.dev);
- if (ci->vbus_active) {
- hw_device_reset(ci);
- } else {
+ if (ci->vbus_active)
+ ci_hdrc_gadget_connect(gadget, 1);
+ else
usb_udc_vbus_handler(&ci->gadget, false);
- pm_runtime_put_sync(&ci->gadget.dev);
- return retval;
- }
-
- retval = hw_device_state(ci, ci->ep0out->qh.dma);
- if (retval)
- pm_runtime_put_sync(&ci->gadget.dev);
return retval;
}
@@ -1826,6 +1829,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
+ ci->driver = NULL;
if (ci->vbus_active) {
hw_device_state(ci, 0);
@@ -1838,7 +1842,6 @@ static int ci_udc_stop(struct usb_gadget *gadget)
pm_runtime_put(&ci->gadget.dev);
}
- ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
ci_udc_stop_for_otg_fsm(ci);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 078c1fdce493..e81e33c26e6c 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -100,6 +100,9 @@
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
+#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+ MX6_BM_ID_WAKEUP)
+
struct usbmisc_ops {
/* It's called once when probe a usb device */
int (*init)(struct imx_usbmisc_data *data);
@@ -330,14 +333,25 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
return 0;
}
+static u32 usbmisc_wakeup_setting(struct imx_usbmisc_data *data)
+{
+ u32 wakeup_setting = MX6_USB_OTG_WAKEUP_BITS;
+
+ if (data->ext_id)
+ wakeup_setting &= ~MX6_BM_ID_WAKEUP;
+
+ if (data->ext_vbus)
+ wakeup_setting &= ~MX6_BM_VBUS_WAKEUP;
+
+ return wakeup_setting;
+}
+
static int usbmisc_imx6q_set_wakeup
(struct imx_usbmisc_data *data, bool enabled)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
int ret = 0;
if (data->index > 3)
@@ -346,11 +360,12 @@ static int usbmisc_imx6q_set_wakeup
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + data->index * 4);
if (enabled) {
- val |= wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
} else {
if (val & MX6_BM_WAKEUP_INTR)
pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
- val &= ~wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
}
writel(val, usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -547,17 +562,17 @@ static int usbmisc_imx7d_set_wakeup
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base);
if (enabled) {
- writel(val | wakeup_setting, usbmisc->base);
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
+ writel(val, usbmisc->base);
} else {
if (val & MX6_BM_WAKEUP_INTR)
dev_dbg(data->dev, "wakeup int\n");
- writel(val & ~wakeup_setting, usbmisc->base);
+ writel(val & ~MX6_USB_OTG_WAKEUP_BITS, usbmisc->base);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1ac1095bfeac..5f40117e68e7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -805,10 +805,10 @@ int usb_get_configuration(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
int ncfg = dev->descriptor.bNumConfigurations;
- int result = -ENOMEM;
unsigned int cfgno, length;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
+ int result;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
@@ -824,16 +824,16 @@ int usb_get_configuration(struct usb_device *dev)
length = ncfg * sizeof(struct usb_host_config);
dev->config = kzalloc(length, GFP_KERNEL);
if (!dev->config)
- goto err2;
+ return -ENOMEM;
length = ncfg * sizeof(char *);
dev->rawdescriptors = kzalloc(length, GFP_KERNEL);
if (!dev->rawdescriptors)
- goto err2;
+ return -ENOMEM;
desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
if (!desc)
- goto err2;
+ return -ENOMEM;
for (cfgno = 0; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
@@ -895,9 +895,7 @@ int usb_get_configuration(struct usb_device *dev)
err:
kfree(desc);
dev->descriptor.bNumConfigurations = cfgno;
-err2:
- if (result == -ENOMEM)
- dev_err(ddev, "out of memory\n");
+
return result;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3f899552f6e3..879d03f5127c 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -764,8 +764,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
- else
+ else {
+ unsigned int old_suppress;
+
+ /* suppress uevents while claiming interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ }
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
@@ -785,7 +792,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
+ unsigned int old_suppress;
+
+ /* suppress uevents while releasing interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
@@ -1550,10 +1563,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
uurb->buffer_length = le16_to_cpu(dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
- is_in = 1;
+ is_in = true;
uurb->endpoint |= USB_DIR_IN;
} else {
- is_in = 0;
+ is_in = false;
uurb->endpoint &= ~USB_DIR_IN;
}
if (is_in)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 236313f41f4a..1709895387b9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4930,6 +4930,91 @@ hub_power_remaining(struct usb_hub *hub)
return remaining;
}
+
+static int descriptors_changed(struct usb_device *udev,
+ struct usb_device_descriptor *old_device_descriptor,
+ struct usb_host_bos *old_bos)
+{
+ int changed = 0;
+ unsigned index;
+ unsigned serial_len = 0;
+ unsigned len;
+ unsigned old_length;
+ int length;
+ char *buf;
+
+ if (memcmp(&udev->descriptor, old_device_descriptor,
+ sizeof(*old_device_descriptor)) != 0)
+ return 1;
+
+ if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+ return 1;
+ if (udev->bos) {
+ len = le16_to_cpu(udev->bos->desc->wTotalLength);
+ if (len != le16_to_cpu(old_bos->desc->wTotalLength))
+ return 1;
+ if (memcmp(udev->bos->desc, old_bos->desc, len))
+ return 1;
+ }
+
+ /* Since the idVendor, idProduct, and bcdDevice values in the
+ * device descriptor haven't changed, we will assume the
+ * Manufacturer and Product strings haven't changed either.
+ * But the SerialNumber string could be different (e.g., a
+ * different flash card of the same brand).
+ */
+ if (udev->serial)
+ serial_len = strlen(udev->serial) + 1;
+
+ len = serial_len;
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ len = max(len, old_length);
+ }
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (!buf)
+ /* assume the worst */
+ return 1;
+
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
+ old_length);
+ if (length != old_length) {
+ dev_dbg(&udev->dev, "config index %d, error %d\n",
+ index, length);
+ changed = 1;
+ break;
+ }
+ if (memcmp(buf, udev->rawdescriptors[index], old_length)
+ != 0) {
+ dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
+ index,
+ ((struct usb_config_descriptor *) buf)->
+ bConfigurationValue);
+ changed = 1;
+ break;
+ }
+ }
+
+ if (!changed && serial_len) {
+ length = usb_string(udev, udev->descriptor.iSerialNumber,
+ buf, serial_len);
+ if (length + 1 != serial_len) {
+ dev_dbg(&udev->dev, "serial string error %d\n",
+ length);
+ changed = 1;
+ } else if (memcmp(buf, udev->serial, length) != 0) {
+ dev_dbg(&udev->dev, "serial string changed\n");
+ changed = 1;
+ }
+ }
+
+ kfree(buf);
+ return changed;
+}
+
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
@@ -5167,7 +5252,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
+ struct usb_device_descriptor descriptor;
int status = -ENODEV;
+ int retval;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
@@ -5188,7 +5275,30 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
if (portstatus & USB_PORT_STAT_ENABLE) {
- status = 0; /* Nothing to do */
+ /*
+ * USB-3 connections are initialized automatically by
+ * the hostcontroller hardware. Therefore check for
+ * changed device descriptors before resuscitating the
+ * device.
+ */
+ descriptor = udev->descriptor;
+ retval = usb_get_device_descriptor(udev,
+ sizeof(udev->descriptor));
+ if (retval < 0) {
+ dev_dbg(&udev->dev,
+ "can't read device descriptor %d\n",
+ retval);
+ } else {
+ if (descriptors_changed(udev, &descriptor,
+ udev->bos)) {
+ dev_dbg(&udev->dev,
+ "device descriptor has changed\n");
+ /* for disconnect() calls */
+ udev->descriptor = descriptor;
+ } else {
+ status = 0; /* Nothing to do */
+ }
+ }
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
udev->persist_enabled) {
@@ -5550,90 +5660,6 @@ void usb_hub_cleanup(void)
usb_deregister(&hub_driver);
} /* usb_hub_cleanup() */
-static int descriptors_changed(struct usb_device *udev,
- struct usb_device_descriptor *old_device_descriptor,
- struct usb_host_bos *old_bos)
-{
- int changed = 0;
- unsigned index;
- unsigned serial_len = 0;
- unsigned len;
- unsigned old_length;
- int length;
- char *buf;
-
- if (memcmp(&udev->descriptor, old_device_descriptor,
- sizeof(*old_device_descriptor)) != 0)
- return 1;
-
- if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
- return 1;
- if (udev->bos) {
- len = le16_to_cpu(udev->bos->desc->wTotalLength);
- if (len != le16_to_cpu(old_bos->desc->wTotalLength))
- return 1;
- if (memcmp(udev->bos->desc, old_bos->desc, len))
- return 1;
- }
-
- /* Since the idVendor, idProduct, and bcdDevice values in the
- * device descriptor haven't changed, we will assume the
- * Manufacturer and Product strings haven't changed either.
- * But the SerialNumber string could be different (e.g., a
- * different flash card of the same brand).
- */
- if (udev->serial)
- serial_len = strlen(udev->serial) + 1;
-
- len = serial_len;
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- len = max(len, old_length);
- }
-
- buf = kmalloc(len, GFP_NOIO);
- if (!buf)
- /* assume the worst */
- return 1;
-
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
- old_length);
- if (length != old_length) {
- dev_dbg(&udev->dev, "config index %d, error %d\n",
- index, length);
- changed = 1;
- break;
- }
- if (memcmp(buf, udev->rawdescriptors[index], old_length)
- != 0) {
- dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
- index,
- ((struct usb_config_descriptor *) buf)->
- bConfigurationValue);
- changed = 1;
- break;
- }
- }
-
- if (!changed && serial_len) {
- length = usb_string(udev, udev->descriptor.iSerialNumber,
- buf, serial_len);
- if (length + 1 != serial_len) {
- dev_dbg(&udev->dev, "serial string error %d\n",
- length);
- changed = 1;
- } else if (memcmp(buf, udev->serial, length) != 0) {
- dev_dbg(&udev->dev, "serial string changed\n");
- changed = 1;
- }
- }
-
- kfree(buf);
- return changed;
-}
-
/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
@@ -5814,7 +5840,7 @@ re_enumerate_no_bos:
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
- * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
+ * @udev: device to reset (not in NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
@@ -5842,8 +5868,7 @@ int usb_reset_device(struct usb_device *udev)
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
- if (udev->state == USB_STATE_NOTATTACHED ||
- udev->state == USB_STATE_SUSPENDED) {
+ if (udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8e41d70fd298..78a4925aa118 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
__func__);
return -EBUSY;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index d08d070a0fb6..968e03b89d04 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -134,7 +134,7 @@ struct dwc2_hsotg_req;
* @target_frame: Targeted frame num to setup next ISOC transfer
* @frame_overrun: Indicates SOF number overrun in DSTS
*
- * This is the driver's state for each registered enpoint, allowing it
+ * This is the driver's state for each registered endpoint, allowing it
* to keep track of transactions that need doing. Each endpoint has a
* lock to protect the state, to try and avoid using an overall lock
* for the host controller as much as possible.
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 7f62f4cdc265..b8f2790abf91 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -770,7 +770,7 @@ int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
int ret;
struct dentry *root;
- root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+ root = debugfs_create_dir(dev_name(hsotg->dev), usb_debug_root);
hsotg->debug_root = root;
debugfs_create_file("params", 0444, root, hsotg, &params_fops);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 556a876c7896..206caa0ea1c6 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -97,24 +97,24 @@ config USB_DWC3_KEYSTONE
Say 'Y' or 'M' here if you have one such device
config USB_DWC3_MESON_G12A
- tristate "Amlogic Meson G12A Platforms"
- depends on OF && COMMON_CLK
- depends on ARCH_MESON || COMPILE_TEST
- default USB_DWC3
- select USB_ROLE_SWITCH
+ tristate "Amlogic Meson G12A Platforms"
+ depends on OF && COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ default USB_DWC3
+ select USB_ROLE_SWITCH
select REGMAP_MMIO
- help
- Support USB2/3 functionality in Amlogic G12A platforms.
- Say 'Y' or 'M' if you have one such device.
+ help
+ Support USB2/3 functionality in Amlogic G12A platforms.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_OF_SIMPLE
- tristate "Generic OF Simple Glue Layer"
- depends on OF && COMMON_CLK
- default USB_DWC3
- help
- Support USB2/3 functionality in simple SoC integrations.
- Currently supports Xilinx and Qualcomm DWC USB3 IP.
- Say 'Y' or 'M' if you have one such device.
+ tristate "Generic OF Simple Glue Layer"
+ depends on OF && COMMON_CLK
+ default USB_DWC3
+ help
+ Support USB2/3 functionality in simple SoC integrations.
+ Currently supports Xilinx and Qualcomm DWC USB3 IP.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 97d6ae3c4df2..f561c6c9e8a9 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -566,8 +566,11 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
*/
static int dwc3_phy_setup(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
@@ -585,6 +588,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
@@ -668,6 +679,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ /*
+ * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -902,9 +921,12 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*/
static int dwc3_core_init(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
int ret;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
/*
* Write Linux Version Code to our GUID register so it's easy to figure
* out which kernel version a bug was found.
@@ -940,6 +962,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0a;
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+ dwc->revision > DWC3_REVISION_194A) {
+ if (!dwc->dis_u3_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ }
+
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+ }
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 9baabed87d61..e56beb9d1e36 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -112,7 +112,7 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
@@ -141,7 +141,7 @@ dwc3_gadget_hs_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 1c792710348f..4fe8b1e1485c 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -916,7 +916,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
- root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->root = root;
debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index bdac3e7d7b18..e64754be47b4 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -110,12 +110,9 @@ err_resetc_put:
return ret;
}
-static int dwc3_of_simple_remove(struct platform_device *pdev)
+static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
{
- struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
-
- of_platform_depopulate(dev);
+ of_platform_depopulate(simple->dev);
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
clk_bulk_put_all(simple->num_clocks, simple->clks);
@@ -126,13 +123,27 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets);
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
- pm_runtime_set_suspended(dev);
+ pm_runtime_disable(simple->dev);
+ pm_runtime_put_noidle(simple->dev);
+ pm_runtime_set_suspended(simple->dev);
+}
+
+static int dwc3_of_simple_remove(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
return 0;
}
+static void dwc3_of_simple_shutdown(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
+}
+
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -190,6 +201,7 @@ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
static struct platform_driver dwc3_of_simple_driver = {
.probe = dwc3_of_simple_probe,
.remove = dwc3_of_simple_remove,
+ .shutdown = dwc3_of_simple_shutdown,
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ec54b69c29c..3b4f67000315 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -794,9 +794,9 @@ static int set_config(struct usb_composite_dev *cdev,
result = 0;
}
- INFO(cdev, "%s config #%d: %s\n",
- usb_speed_string(gadget->speed),
- number, c ? c->label : "unconfigured");
+ DBG(cdev, "%s config #%d: %s\n",
+ usb_speed_string(gadget->speed),
+ number, c ? c->label : "unconfigured");
if (!c)
goto done;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 33852c2b29d1..ab9ac48a751a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1544,6 +1544,7 @@ static struct config_group *gadgets_make(
gi->composite.resume = NULL;
gi->composite.max_speed = USB_SPEED_SUPER;
+ spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 9fc98de83624..7c152c28b26c 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -771,6 +771,24 @@ static struct configfs_item_operations acm_item_ops = {
.release = acm_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_acm_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_acm_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_acm_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -779,6 +797,9 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_acm_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_acm_attr_console,
+#endif
&f_acm_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index 55b7f57d2dc7..ab26d84ed95e 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -432,7 +432,7 @@ static struct usb_function_instance *obex_alloc_inst(void)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = obex_free_inst;
- ret = gserial_alloc_line(&opts->port_num);
+ ret = gserial_alloc_line_no_console(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index c860f30a0ea2..1406255d0865 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -266,6 +266,24 @@ static struct configfs_item_operations serial_item_ops = {
.release = serial_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_serial_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_serial_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_serial_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -274,6 +292,9 @@ static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_serial_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_serial_attr_console,
+#endif
&f_serial_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7f01f78b1d23..36504931b2d1 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -846,7 +846,7 @@ static void uasp_set_alt(struct f_uas *fu)
fu->flags = USBG_IS_UAS;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed >= USB_SPEED_SUPER)
fu->flags |= USBG_USE_STREAMS;
config_ep_by_speed(gadget, f, fu->ep_in);
@@ -2093,6 +2093,16 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
usb_composite_setup_continue(fu->function.config->cdev);
}
+static int tcm_get_alt(struct usb_function *f, unsigned intf)
+{
+ if (intf == bot_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_BBB;
+ if (intf == uasp_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_UAS;
+
+ return -EOPNOTSUPP;
+}
+
static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
@@ -2300,6 +2310,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
+ fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 56906d15fb55..7ec6a996af26 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -585,7 +585,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 65f634ec7fc2..f986e5c55974 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -82,14 +82,13 @@
#define GS_CONSOLE_BUF_SIZE 8192
/* console info */
-struct gscons_info {
- struct gs_port *port;
- struct task_struct *console_thread;
- struct kfifo con_buf;
- /* protect the buf and busy flag */
- spinlock_t con_lock;
- int req_busy;
- struct usb_request *console_req;
+struct gs_console {
+ struct console console;
+ struct work_struct work;
+ spinlock_t lock;
+ struct usb_request *req;
+ struct kfifo buf;
+ size_t missed;
};
/*
@@ -101,8 +100,10 @@ struct gs_port {
spinlock_t port_lock; /* guard port_* access */
struct gserial *port_usb;
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ struct gs_console *console;
+#endif
- bool openclose; /* open/close in progress */
u8 port_num;
struct list_head read_pool;
@@ -586,82 +587,45 @@ static int gs_open(struct tty_struct *tty, struct file *file)
{
int port_num = tty->index;
struct gs_port *port;
- int status;
-
- do {
- mutex_lock(&ports[port_num].lock);
- port = ports[port_num].port;
- if (!port)
- status = -ENODEV;
- else {
- spin_lock_irq(&port->port_lock);
-
- /* already open? Great. */
- if (port->port.count) {
- status = 0;
- port->port.count++;
-
- /* currently opening/closing? wait ... */
- } else if (port->openclose) {
- status = -EBUSY;
-
- /* ... else we do the work */
- } else {
- status = -EAGAIN;
- port->openclose = true;
- }
- spin_unlock_irq(&port->port_lock);
- }
- mutex_unlock(&ports[port_num].lock);
+ int status = 0;
- switch (status) {
- default:
- /* fully handled */
- return status;
- case -EAGAIN:
- /* must do the work */
- break;
- case -EBUSY:
- /* wait for EAGAIN task to finish */
- msleep(1);
- /* REVISIT could have a waitchannel here, if
- * concurrent open performance is important
- */
- break;
- }
- } while (status != -EAGAIN);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+ if (!port) {
+ status = -ENODEV;
+ goto out;
+ }
- /* Do the "real open" */
spin_lock_irq(&port->port_lock);
/* allocate circular buffer on first open */
if (!kfifo_initialized(&port->port_write_buf)) {
spin_unlock_irq(&port->port_lock);
+
+ /*
+ * portmaster's mutex still protects from simultaneous open(),
+ * and close() can't happen, yet.
+ */
+
status = kfifo_alloc(&port->port_write_buf,
WRITE_BUF_SIZE, GFP_KERNEL);
- spin_lock_irq(&port->port_lock);
-
if (status) {
pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
- port->port_num, tty, file);
- port->openclose = false;
- goto exit_unlock_port;
+ port_num, tty, file);
+ goto out;
}
- }
- /* REVISIT if REMOVED (ports[].port NULL), abort the open
- * to let rmmod work faster (but this way isn't wrong).
- */
+ spin_lock_irq(&port->port_lock);
+ }
- /* REVISIT maybe wait for "carrier detect" */
+ /* already open? Great. */
+ if (port->port.count++)
+ goto exit_unlock_port;
tty->driver_data = port;
port->port.tty = tty;
- port->port.count = 1;
- port->openclose = false;
-
/* if connected, start the I/O stream */
if (port->port_usb) {
struct gserial *gser = port->port_usb;
@@ -675,20 +639,21 @@ static int gs_open(struct tty_struct *tty, struct file *file)
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
- status = 0;
-
exit_unlock_port:
spin_unlock_irq(&port->port_lock);
+out:
+ mutex_unlock(&ports[port_num].lock);
return status;
}
-static int gs_writes_finished(struct gs_port *p)
+static int gs_close_flush_done(struct gs_port *p)
{
int cond;
- /* return true on disconnect or empty buffer */
+ /* return true on disconnect or empty buffer or if raced with open() */
spin_lock_irq(&p->port_lock);
- cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf);
+ cond = p->port_usb == NULL || !kfifo_len(&p->port_write_buf) ||
+ p->port.count > 1;
spin_unlock_irq(&p->port_lock);
return cond;
@@ -702,6 +667,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
if (port->port.count != 1) {
+raced_with_open:
if (port->port.count == 0)
WARN_ON(1);
else
@@ -711,12 +677,6 @@ static void gs_close(struct tty_struct *tty, struct file *file)
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
- /* mark port as closing but in use; we can drop port lock
- * and sleep if necessary
- */
- port->openclose = true;
- port->port.count = 0;
-
gser = port->port_usb;
if (gser && gser->disconnect)
gser->disconnect(gser);
@@ -727,9 +687,13 @@ static void gs_close(struct tty_struct *tty, struct file *file)
if (kfifo_len(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
- gs_writes_finished(port),
+ gs_close_flush_done(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+
+ if (port->port.count != 1)
+ goto raced_with_open;
+
gser = port->port_usb;
}
@@ -742,10 +706,9 @@ static void gs_close(struct tty_struct *tty, struct file *file)
else
kfifo_reset(&port->port_write_buf);
+ port->port.count = 0;
port->port.tty = NULL;
- port->openclose = false;
-
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
@@ -889,36 +852,9 @@ static struct tty_driver *gs_tty_driver;
#ifdef CONFIG_U_SERIAL_CONSOLE
-static struct gscons_info gscons_info;
-static struct console gserial_cons;
-
-static struct usb_request *gs_request_new(struct usb_ep *ep)
-{
- struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (!req)
- return NULL;
-
- req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- return NULL;
- }
-
- return req;
-}
-
-static void gs_request_free(struct usb_request *req, struct usb_ep *ep)
+static void gs_console_complete_out(struct usb_ep *ep, struct usb_request *req)
{
- if (!req)
- return;
-
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
-static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
-{
- struct gscons_info *info = &gscons_info;
+ struct gs_console *cons = req->context;
switch (req->status) {
default:
@@ -927,12 +863,12 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
/* fall through */
case 0:
/* normal completion */
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
-
- wake_up_process(info->console_thread);
+ spin_lock(&cons->lock);
+ req->length = 0;
+ schedule_work(&cons->work);
+ spin_unlock(&cons->lock);
break;
+ case -ECONNRESET:
case -ESHUTDOWN:
/* disconnect */
pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
@@ -940,190 +876,250 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
}
}
-static int gs_console_connect(int port_num)
+static void __gs_console_push(struct gs_console *cons)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct usb_request *req = cons->req;
struct usb_ep *ep;
+ size_t size;
- if (port_num != gserial_cons.index) {
- pr_err("%s: port num [%d] is not support console\n",
- __func__, port_num);
- return -ENXIO;
- }
+ if (!req)
+ return; /* disconnected */
- port = ports[port_num].port;
- ep = port->port_usb->in;
- if (!info->console_req) {
- info->console_req = gs_request_new(ep);
- if (!info->console_req)
- return -ENOMEM;
- info->console_req->complete = gs_complete_out;
+ if (req->length)
+ return; /* busy */
+
+ ep = cons->console.data;
+ size = kfifo_out(&cons->buf, req->buf, ep->maxpacket);
+ if (!size)
+ return;
+
+ if (cons->missed && ep->maxpacket >= 64) {
+ char buf[64];
+ size_t len;
+
+ len = sprintf(buf, "\n[missed %zu bytes]\n", cons->missed);
+ kfifo_in(&cons->buf, buf, len);
+ cons->missed = 0;
}
- info->port = port;
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
- pr_vdebug("port[%d] console connect!\n", port_num);
- return 0;
+ req->length = size;
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ req->length = 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_work(struct work_struct *work)
{
- struct gscons_info *info = &gscons_info;
- struct usb_request *req = info->console_req;
+ struct gs_console *cons = container_of(work, struct gs_console, work);
+
+ spin_lock_irq(&cons->lock);
- gs_request_free(req, ep);
- info->console_req = NULL;
+ __gs_console_push(cons);
+
+ spin_unlock_irq(&cons->lock);
}
-static int gs_console_thread(void *data)
+static void gs_console_write(struct console *co,
+ const char *buf, unsigned count)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct gs_console *cons = container_of(co, struct gs_console, console);
+ unsigned long flags;
+ size_t n;
+
+ spin_lock_irqsave(&cons->lock, flags);
+
+ n = kfifo_in(&cons->buf, buf, count);
+ if (n < count)
+ cons->missed += count - n;
+
+ if (cons->req && !cons->req->length)
+ schedule_work(&cons->work);
+
+ spin_unlock_irqrestore(&cons->lock, flags);
+}
+
+static struct tty_driver *gs_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return gs_tty_driver;
+}
+
+static int gs_console_connect(struct gs_port *port)
+{
+ struct gs_console *cons = port->console;
struct usb_request *req;
struct usb_ep *ep;
- int xfer, ret, count, size;
-
- do {
- port = info->port;
- set_current_state(TASK_INTERRUPTIBLE);
- if (!port || !port->port_usb
- || !port->port_usb->in || !info->console_req)
- goto sched;
-
- req = info->console_req;
- ep = port->port_usb->in;
-
- spin_lock_irq(&info->con_lock);
- count = kfifo_len(&info->con_buf);
- size = ep->maxpacket;
-
- if (count > 0 && !info->req_busy) {
- set_current_state(TASK_RUNNING);
- if (count < size)
- size = count;
-
- xfer = kfifo_out(&info->con_buf, req->buf, size);
- req->length = xfer;
-
- spin_unlock(&info->con_lock);
- ret = usb_ep_queue(ep, req, GFP_ATOMIC);
- spin_lock(&info->con_lock);
- if (ret < 0)
- info->req_busy = 0;
- else
- info->req_busy = 1;
-
- spin_unlock_irq(&info->con_lock);
- } else {
- spin_unlock_irq(&info->con_lock);
-sched:
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- schedule();
- }
- } while (1);
+
+ if (!cons)
+ return 0;
+
+ ep = port->port_usb->in;
+ req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->complete = gs_console_complete_out;
+ req->context = cons;
+ req->length = 0;
+
+ spin_lock(&cons->lock);
+ cons->req = req;
+ cons->console.data = ep;
+ spin_unlock(&cons->lock);
+
+ pr_debug("ttyGS%d: console connected!\n", port->port_num);
+
+ schedule_work(&cons->work);
return 0;
}
-static int gs_console_setup(struct console *co, char *options)
+static void gs_console_disconnect(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- int status;
+ struct gs_console *cons = port->console;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ if (!cons)
+ return;
- info->port = NULL;
- info->console_req = NULL;
- info->req_busy = 0;
- spin_lock_init(&info->con_lock);
+ spin_lock(&cons->lock);
- status = kfifo_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
- if (status) {
- pr_err("%s: allocate console buffer failed\n", __func__);
- return status;
- }
+ req = cons->req;
+ ep = cons->console.data;
+ cons->req = NULL;
+
+ spin_unlock(&cons->lock);
- info->console_thread = kthread_create(gs_console_thread,
- co, "gs_console");
- if (IS_ERR(info->console_thread)) {
- pr_err("%s: cannot create console thread\n", __func__);
- kfifo_free(&info->con_buf);
- return PTR_ERR(info->console_thread);
+ if (!req)
+ return;
+
+ usb_ep_dequeue(ep, req);
+ gs_free_req(ep, req);
+}
+
+static int gs_console_init(struct gs_port *port)
+{
+ struct gs_console *cons;
+ int err;
+
+ if (port->console)
+ return 0;
+
+ cons = kzalloc(sizeof(*port->console), GFP_KERNEL);
+ if (!cons)
+ return -ENOMEM;
+
+ strcpy(cons->console.name, "ttyGS");
+ cons->console.write = gs_console_write;
+ cons->console.device = gs_console_device;
+ cons->console.flags = CON_PRINTBUFFER;
+ cons->console.index = port->port_num;
+
+ INIT_WORK(&cons->work, gs_console_work);
+ spin_lock_init(&cons->lock);
+
+ err = kfifo_alloc(&cons->buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
+ if (err) {
+ pr_err("ttyGS%d: allocate console buffer failed\n", port->port_num);
+ kfree(cons);
+ return err;
}
- wake_up_process(info->console_thread);
+
+ port->console = cons;
+ register_console(&cons->console);
+
+ spin_lock_irq(&port->port_lock);
+ if (port->port_usb)
+ gs_console_connect(port);
+ spin_unlock_irq(&port->port_lock);
return 0;
}
-static void gs_console_write(struct console *co,
- const char *buf, unsigned count)
+static void gs_console_exit(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- unsigned long flags;
+ struct gs_console *cons = port->console;
+
+ if (!cons)
+ return;
+
+ unregister_console(&cons->console);
- spin_lock_irqsave(&info->con_lock, flags);
- kfifo_in(&info->con_buf, buf, count);
- spin_unlock_irqrestore(&info->con_lock, flags);
+ spin_lock_irq(&port->port_lock);
+ if (cons->req)
+ gs_console_disconnect(port);
+ spin_unlock_irq(&port->port_lock);
- wake_up_process(info->console_thread);
+ cancel_work_sync(&cons->work);
+ kfifo_free(&cons->buf);
+ kfree(cons);
+ port->console = NULL;
}
-static struct tty_driver *gs_console_device(struct console *co, int *index)
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count)
{
- struct tty_driver **p = (struct tty_driver **)co->data;
+ struct gs_port *port;
+ bool enable;
+ int ret;
- if (!*p)
- return NULL;
+ ret = strtobool(page, &enable);
+ if (ret)
+ return ret;
- *index = co->index;
- return *p;
-}
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
-static struct console gserial_cons = {
- .name = "ttyGS",
- .write = gs_console_write,
- .device = gs_console_device,
- .setup = gs_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &gs_tty_driver,
-};
+ if (WARN_ON(port == NULL)) {
+ ret = -ENXIO;
+ goto out;
+ }
-static void gserial_console_init(void)
-{
- register_console(&gserial_cons);
+ if (enable)
+ ret = gs_console_init(port);
+ else
+ gs_console_exit(port);
+out:
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret < 0 ? ret : count;
}
+EXPORT_SYMBOL_GPL(gserial_set_console);
-static void gserial_console_exit(void)
+ssize_t gserial_get_console(unsigned char port_num, char *page)
{
- struct gscons_info *info = &gscons_info;
+ struct gs_port *port;
+ ssize_t ret;
- unregister_console(&gserial_cons);
- if (!IS_ERR_OR_NULL(info->console_thread))
- kthread_stop(info->console_thread);
- kfifo_free(&info->con_buf);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+
+ if (WARN_ON(port == NULL))
+ ret = -ENXIO;
+ else
+ ret = sprintf(page, "%u\n", !!port->console);
+
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(gserial_get_console);
#else
-static int gs_console_connect(int port_num)
+static int gs_console_connect(struct gs_port *port)
{
return 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_disconnect(struct gs_port *port)
{
}
-static void gserial_console_init(void)
+static int gs_console_init(struct gs_port *port)
{
+ return -ENOSYS;
}
-static void gserial_console_exit(void)
+static void gs_console_exit(struct gs_port *port)
{
}
@@ -1172,8 +1168,9 @@ static int gs_closed(struct gs_port *port)
int cond;
spin_lock_irq(&port->port_lock);
- cond = (port->port.count == 0) && !port->openclose;
+ cond = port->port.count == 0;
spin_unlock_irq(&port->port_lock);
+
return cond;
}
@@ -1197,18 +1194,19 @@ void gserial_free_line(unsigned char port_num)
return;
}
port = ports[port_num].port;
+ gs_console_exit(port);
ports[port_num].port = NULL;
mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
tty_unregister_device(gs_tty_driver, port_num);
- gserial_console_exit();
}
EXPORT_SYMBOL_GPL(gserial_free_line);
-int gserial_alloc_line(unsigned char *line_num)
+int gserial_alloc_line_no_console(unsigned char *line_num)
{
struct usb_cdc_line_coding coding;
+ struct gs_port *port;
struct device *tty_dev;
int ret;
int port_num;
@@ -1231,24 +1229,35 @@ int gserial_alloc_line(unsigned char *line_num)
/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
- tty_dev = tty_port_register_device(&ports[port_num].port->port,
+ port = ports[port_num].port;
+ tty_dev = tty_port_register_device(&port->port,
gs_tty_driver, port_num, NULL);
if (IS_ERR(tty_dev)) {
- struct gs_port *port;
pr_err("%s: failed to register tty for port %d, err %ld\n",
__func__, port_num, PTR_ERR(tty_dev));
ret = PTR_ERR(tty_dev);
- port = ports[port_num].port;
+ mutex_lock(&ports[port_num].lock);
ports[port_num].port = NULL;
+ mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
goto err;
}
*line_num = port_num;
- gserial_console_init();
err:
return ret;
}
+EXPORT_SYMBOL_GPL(gserial_alloc_line_no_console);
+
+int gserial_alloc_line(unsigned char *line_num)
+{
+ int ret = gserial_alloc_line_no_console(line_num);
+
+ if (!ret && !*line_num)
+ gs_console_init(ports[*line_num].port);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(gserial_alloc_line);
/**
@@ -1327,7 +1336,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
gser->disconnect(gser);
}
- status = gs_console_connect(port_num);
+ status = gs_console_connect(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -1359,12 +1368,14 @@ void gserial_disconnect(struct gserial *gser)
/* tell the TTY glue not to do I/O here any more */
spin_lock_irqsave(&port->port_lock, flags);
+ gs_console_disconnect(port);
+
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
port->port_usb = NULL;
gser->ioport = NULL;
- if (port->port.count > 0 || port->openclose) {
+ if (port->port.count > 0) {
wake_up_interruptible(&port->drain_wait);
if (port->port.tty)
tty_hangup(port->port.tty);
@@ -1377,7 +1388,7 @@ void gserial_disconnect(struct gserial *gser)
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port.count == 0 && !port->openclose)
+ if (port->port.count == 0)
kfifo_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
@@ -1386,7 +1397,6 @@ void gserial_disconnect(struct gserial *gser)
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
- gs_console_disconnect(gser->in);
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 9acaac1cbb75..e5b08ab8cf7a 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -54,9 +54,17 @@ struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
void gs_free_req(struct usb_ep *, struct usb_request *req);
/* management of individual TTY ports */
+int gserial_alloc_line_no_console(unsigned char *port_line);
int gserial_alloc_line(unsigned char *port_line);
void gserial_free_line(unsigned char port_line);
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count);
+ssize_t gserial_get_console(unsigned char port_num, char *page);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 69ff7f8c86f5..119a4e47681f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -149,21 +149,21 @@ config USB_ETH_RNDIS
is given in comments found in that info file.
config USB_ETH_EEM
- bool "Ethernet Emulation Model (EEM) support"
- depends on USB_ETH
+ bool "Ethernet Emulation Model (EEM) support"
+ depends on USB_ETH
select USB_LIBCOMPOSITE
select USB_F_EEM
- help
- CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
- and therefore can be supported by more hardware. Technically ECM and
- EEM are designed for different applications. The ECM model extends
- the network interface to the target (e.g. a USB cable modem), and the
- EEM model is for mobile devices to communicate with hosts using
- ethernet over USB. For Linux gadgets, however, the interface with
- the host is the same (a usbX device), so the differences are minimal.
-
- If you say "y" here, the Ethernet gadget driver will use the EEM
- protocol rather than ECM. If unsure, say "n".
+ help
+ CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+ and therefore can be supported by more hardware. Technically ECM and
+ EEM are designed for different applications. The ECM model extends
+ the network interface to the target (e.g. a USB cable modem), and the
+ EEM model is for mobile devices to communicate with hosts using
+ ethernet over USB. For Linux gadgets, however, the interface with
+ the host is the same (a usbX device), so the differences are minimal.
+
+ If you say "y" here, the Ethernet gadget driver will use the EEM
+ protocol rather than ECM. If unsure, say "n".
config USB_G_NCM
tristate "Network Control Model (NCM) support"
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index af16672d5118..59be2d8417c9 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -105,7 +105,6 @@ static struct usb_function *f_msg;
*/
static int acm_ms_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_acm = usb_get_function(f_acm_inst);
if (IS_ERR(f_acm))
return PTR_ERR(f_acm);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index fd5595ac5bf7..f18f77584fc2 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -105,7 +105,6 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static int msg_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int msg_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg))
return PTR_ERR(f_msg);
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index de30d7628eef..da44f89f5e73 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -97,6 +97,36 @@ static unsigned n_ports = 1;
module_param(n_ports, uint, 0);
MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
+static bool enable = true;
+
+static int switch_gserial_enable(bool do_enable);
+
+static int enable_set(const char *s, const struct kernel_param *kp)
+{
+ bool do_enable;
+ int ret;
+
+ if (!s) /* called for no-arg enable == default */
+ return 0;
+
+ ret = strtobool(s, &do_enable);
+ if (ret || enable == do_enable)
+ return ret;
+
+ ret = switch_gserial_enable(do_enable);
+ if (!ret)
+ enable = do_enable;
+
+ return ret;
+}
+
+static const struct kernel_param_ops enable_ops = {
+ .set = enable_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(enable, &enable_ops, &enable, 0644);
+
/*-------------------------------------------------------------------------*/
static struct usb_configuration serial_config_driver = {
@@ -240,6 +270,19 @@ static struct usb_composite_driver gserial_driver = {
.unbind = gs_unbind,
};
+static int switch_gserial_enable(bool do_enable)
+{
+ if (!serial_config_driver.label)
+ /* init() was not called, yet */
+ return 0;
+
+ if (do_enable)
+ return usb_composite_probe(&gserial_driver);
+
+ usb_composite_unregister(&gserial_driver);
+ return 0;
+}
+
static int __init init(void)
{
/* We *could* export two configs; that'd be much cleaner...
@@ -266,12 +309,16 @@ static int __init init(void)
}
strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
+ if (!enable)
+ return 0;
+
return usb_composite_probe(&gserial_driver);
}
module_init(init);
static void __exit cleanup(void)
{
- usb_composite_unregister(&gserial_driver);
+ if (enable)
+ usb_composite_unregister(&gserial_driver);
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d354036ff6c8..ae70ce29d5e4 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -120,10 +120,10 @@ config USB_FOTG210_UDC
dynamically linked module called "fotg210_udc".
config USB_GR_UDC
- tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
- depends on HAS_DMA
- help
- Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
+ tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
+ depends on HAS_DMA
+ help
+ Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
VHDL IP core library.
config USB_OMAP
@@ -441,6 +441,17 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_TEGRA_XUDC
+ tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on PHY_TEGRA_XUSB
+ help
+ Enables NVIDIA Tegra USB 3.0 device mode controller driver.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "tegra_xudc" and force all
+ gadget drivers to also be dynamically linked.
+
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 897f648f3cf1..f6777e654a8e 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
+obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 194ffb1ed462..1b2b548c59a0 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1808,7 +1808,6 @@ static int at91udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct at91_udc *udc;
int retval;
- struct resource *res;
struct at91_ep *ep;
int i;
@@ -1839,8 +1838,7 @@ static int at91udc_probe(struct platform_device *pdev)
ep->is_pingpong = 1;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr))
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 1d0d8952a74b..8a42768e3213 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ctype.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/delay.h>
@@ -226,7 +227,7 @@ static void usba_init_debugfs(struct usba_udc *udc)
struct dentry *root;
struct resource *regs_resource;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 97b16463f3ef..54501814dc3f 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2248,7 +2248,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
return;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
@@ -2282,7 +2282,6 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
struct bcm63xx_udc *udc;
- struct resource *res;
int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
@@ -2298,13 +2297,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->usbd_regs = devm_ioremap_resource(dev, res);
+ udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->usbd_regs))
return PTR_ERR(udc->usbd_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- udc->iudma_regs = devm_ioremap_resource(dev, res);
+ udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(udc->iudma_regs))
return PTR_ERR(udc->iudma_regs);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index cc4a16e253ac..02a3a774670b 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -480,7 +480,6 @@ static void bdc_phy_exit(struct bdc *bdc)
static int bdc_probe(struct platform_device *pdev)
{
struct bdc *bdc;
- struct resource *res;
int ret = -ENOMEM;
int irq;
u32 temp;
@@ -508,8 +507,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdc->regs = devm_ioremap_resource(dev, res);
+ bdc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdc->regs)) {
dev_err(dev, "ioremap error\n");
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7bfd58c846f7..248426a3e88a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -195,7 +195,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc)
break;
case BDC_LINK_STATE_U0:
if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
- bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
if (bdc->gadget.speed == USB_SPEED_SUPER) {
bdc_function_wake_fh(bdc, 0);
bdc->devstatus |= FUNC_WAKE_ISSUED;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 3d499d93c083..4c9d1e49d5ed 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1321,7 +1321,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 this_sg;
bool next_sg;
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
@@ -1409,7 +1409,7 @@ top:
/* FIXME update emulated data toggle too */
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
@@ -1830,7 +1830,7 @@ restart:
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
- if (usb_pipein(urb->pipe))
+ if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
@@ -2385,7 +2385,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
s = "?";
break;
} s; }),
- ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
+ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
@@ -2725,7 +2725,7 @@ static struct platform_driver dummy_hcd_driver = {
};
/*-------------------------------------------------------------------------*/
-#define MAX_NUM_UDC 2
+#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
index 2c537a904ee7..53ca0ff7c2cb 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.h
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -333,8 +333,8 @@ struct qe_udc {
u32 resume_state; /* USB state to resume*/
u32 usb_state; /* USB current state */
u32 usb_next_state; /* USB next state */
- u32 ep0_state; /* Enpoint zero state */
- u32 ep0_dir; /* Enpoint zero direction: can be
+ u32 ep0_state; /* Endpoint zero state */
+ u32 ep0_dir; /* Endpoint zero direction: can be
USB_DIR_IN or USB_DIR_OUT*/
u32 usb_sof_count; /* SOF count */
u32 errors; /* USB ERRORs count */
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 9a05863b2876..ec6eda426223 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1052,10 +1052,11 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
u32 bitmask;
struct ep_queue_head *qh;
- ep = container_of(_ep, struct fsl_ep, ep);
- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0))
+ if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
+ ep = container_of(_ep, struct fsl_ep, ep);
+
udc = (struct fsl_udc *)ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
@@ -1208,7 +1209,7 @@ static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int fsl_pullup(struct usb_gadget *gadget, int is_on)
{
@@ -1595,14 +1596,13 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
struct fsl_req *curr_req)
{
struct ep_td_struct *curr_td;
- int td_complete, actual, remaining_length, j, tmp;
+ int actual, remaining_length, j, tmp;
int status = 0;
int errors = 0;
struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
int direction = pipe % 2;
curr_td = curr_req->head;
- td_complete = 0;
actual = curr_req->req.length;
for (j = 0; j < curr_req->dtd_count; j++) {
@@ -1647,11 +1647,9 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
status = -EPROTO;
break;
} else {
- td_complete++;
break;
}
} else {
- td_complete++;
VDBG("dTD transmitted successful");
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 7a0e9a58c2d8..64d80c65bb96 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/dma-mapping.h>
@@ -208,7 +209,7 @@ static void gr_dfs_create(struct gr_udc *dev)
{
const char *name = "gr_udc_state";
- dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
+ dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
}
@@ -2118,7 +2119,6 @@ static int gr_request_irq(struct gr_udc *dev, int irq)
static int gr_probe(struct platform_device *pdev)
{
struct gr_udc *dev;
- struct resource *res;
struct gr_regs __iomem *regs;
int retval;
u32 status;
@@ -2128,8 +2128,7 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
dev->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index bf6c81e2f8cc..d14b2bb3f67c 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3000,7 +3000,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct lpc32xx_udc *udc;
int retval, i;
- struct resource *res;
dma_addr_t dma_handle;
struct device_node *isp1301_node;
@@ -3048,9 +3047,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
* IORESOURCE_IRQ, USB device interrupt number
* IORESOURCE_IRQ, USB transceiver interrupt number
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
spin_lock_init(&udc->lock);
@@ -3061,7 +3057,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
return udc->udp_irq[i];
}
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
index 982625b7197a..66b84f792f64 100644
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -138,7 +138,7 @@ struct mv_u3d_op_regs {
u32 doorbell; /* doorbell register */
};
-/* control enpoint enable registers */
+/* control endpoint enable registers */
struct epxcr {
u32 epxoutcr0; /* ep out control 0 register */
u32 epxoutcr1; /* ep out control 1 register */
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 265dab2bbfac..3344fb8c4181 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1519,7 +1519,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
dma_pool_free(dev->data_requests, td, addr);
- td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index d4be53559f2e..cfafdd92c2a8 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2321,7 +2321,6 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
struct pxa25x_udc *dev = &memory;
int retval, irq;
u32 chiprev;
- struct resource *res;
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
@@ -2367,8 +2366,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 014233252299..78902d13fc27 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -207,7 +207,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
{
struct dentry *root;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
@@ -2356,7 +2356,6 @@ MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
*/
static int pxa_udc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct pxa_udc *udc = &memory;
int retval = 0, gpio;
struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
@@ -2378,8 +2377,7 @@ static int pxa_udc_probe(struct platform_device *pdev)
udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
udc->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 11e25a3f4f1f..582a16165ea9 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1838,7 +1838,7 @@ static int r8a66597_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[8];
- struct resource *res, *ires;
+ struct resource *ires;
int irq;
void __iomem *reg = NULL;
struct r8a66597 *r8a66597 = NULL;
@@ -1846,8 +1846,7 @@ static int r8a66597_probe(struct platform_device *pdev)
int i;
unsigned long irq_trigger;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 33703140233a..c5c3c14df67a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -775,6 +775,18 @@ static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3)
usb3_transition_to_default_state(usb3, false);
}
+static void usb3_irq_epc_int_1_suspend(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_SPND);
+
+ if (usb3->gadget.speed != USB_SPEED_UNKNOWN &&
+ usb3->gadget.state != USB_STATE_NOTATTACHED) {
+ if (usb3->driver && usb3->driver->suspend)
+ usb3->driver->suspend(&usb3->gadget);
+ usb_gadget_set_state(&usb3->gadget, USB_STATE_SUSPENDED);
+ }
+}
+
static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3)
{
usb3_stop_usb3_connection(usb3);
@@ -860,6 +872,9 @@ static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1)
if (int_sta_1 & USB_INT_1_B2_RSUM)
usb3_irq_epc_int_1_resume(usb3);
+ if (int_sta_1 & USB_INT_1_B2_SPND)
+ usb3_irq_epc_int_1_suspend(usb3);
+
if (int_sta_1 & USB_INT_1_SPEED)
usb3_irq_epc_int_1_speed(usb3);
@@ -2536,7 +2551,7 @@ static const struct file_operations renesas_usb3_b_device_fops = {
static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
struct device *dev)
{
- usb3->dentry = debugfs_create_dir(dev_name(dev), NULL);
+ usb3->dentry = debugfs_create_dir(dev_name(dev), usb_debug_root);
debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
&renesas_usb3_b_device_fops);
@@ -2733,7 +2748,6 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- struct resource *res;
int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2752,8 +2766,7 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (!usb3)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- usb3->reg = devm_ioremap_resource(&pdev->dev, res);
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 858993c73442..21252fbc0319 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1263,7 +1263,6 @@ static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
static int s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
int ret, i;
@@ -1290,9 +1289,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
goto err_supplies;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hsudc->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsudc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsudc->regs)) {
ret = PTR_ERR(hsudc->regs);
goto err_res;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index f82208fbc249..0507a2ca0f55 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1978,7 +1978,8 @@ static int __init udc_init(void)
dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
- s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
+ s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name,
+ usb_debug_root);
retval = platform_driver_register(&udc_driver_24x0);
if (retval)
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
new file mode 100644
index 000000000000..634c2c19a176
--- /dev/null
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -0,0 +1,3810 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra XUSB device mode controller
+ *
+ * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/role.h>
+#include <linux/workqueue.h>
+
+/* XUSB_DEV registers */
+#define SPARAM 0x000
+#define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
+#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
+#define DB 0x004
+#define DB_TARGET_MASK GENMASK(15, 8)
+#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
+#define DB_STREAMID_MASK GENMASK(31, 16)
+#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
+#define ERSTSZ 0x008
+#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
+#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
+#define ERSTXBALO(x) (0x010 + 8 * (x))
+#define ERSTXBAHI(x) (0x014 + 8 * (x))
+#define ERDPLO 0x020
+#define ERDPLO_EHB BIT(3)
+#define ERDPHI 0x024
+#define EREPLO 0x028
+#define EREPLO_ECS BIT(0)
+#define EREPLO_SEGI BIT(1)
+#define EREPHI 0x02c
+#define CTRL 0x030
+#define CTRL_RUN BIT(0)
+#define CTRL_LSE BIT(1)
+#define CTRL_IE BIT(4)
+#define CTRL_SMI_EVT BIT(5)
+#define CTRL_SMI_DSE BIT(6)
+#define CTRL_EWE BIT(7)
+#define CTRL_DEVADDR_MASK GENMASK(30, 24)
+#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
+#define CTRL_ENABLE BIT(31)
+#define ST 0x034
+#define ST_RC BIT(0)
+#define ST_IP BIT(4)
+#define RT_IMOD 0x038
+#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
+#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
+#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
+#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
+#define PORTSC 0x03c
+#define PORTSC_CCS BIT(0)
+#define PORTSC_PED BIT(1)
+#define PORTSC_PR BIT(4)
+#define PORTSC_PLS_SHIFT 5
+#define PORTSC_PLS_MASK GENMASK(8, 5)
+#define PORTSC_PLS_U0 0x0
+#define PORTSC_PLS_U2 0x2
+#define PORTSC_PLS_U3 0x3
+#define PORTSC_PLS_DISABLED 0x4
+#define PORTSC_PLS_RXDETECT 0x5
+#define PORTSC_PLS_INACTIVE 0x6
+#define PORTSC_PLS_RESUME 0xf
+#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
+#define PORTSC_PS_SHIFT 10
+#define PORTSC_PS_MASK GENMASK(13, 10)
+#define PORTSC_PS_UNDEFINED 0x0
+#define PORTSC_PS_FS 0x1
+#define PORTSC_PS_LS 0x2
+#define PORTSC_PS_HS 0x3
+#define PORTSC_PS_SS 0x4
+#define PORTSC_LWS BIT(16)
+#define PORTSC_CSC BIT(17)
+#define PORTSC_WRC BIT(19)
+#define PORTSC_PRC BIT(21)
+#define PORTSC_PLC BIT(22)
+#define PORTSC_CEC BIT(23)
+#define PORTSC_WPR BIT(30)
+#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
+ PORTSC_PLC | PORTSC_CEC)
+#define ECPLO 0x040
+#define ECPHI 0x044
+#define MFINDEX 0x048
+#define MFINDEX_FRAME_SHIFT 3
+#define MFINDEX_FRAME_MASK GENMASK(13, 3)
+#define PORTPM 0x04c
+#define PORTPM_L1S_MASK GENMASK(1, 0)
+#define PORTPM_L1S_DROP 0x0
+#define PORTPM_L1S_ACCEPT 0x1
+#define PORTPM_L1S_NYET 0x2
+#define PORTPM_L1S_STALL 0x3
+#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
+#define PORTPM_RWE BIT(3)
+#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
+#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
+#define PORTPM_FLA BIT(24)
+#define PORTPM_VBA BIT(25)
+#define PORTPM_WOC BIT(26)
+#define PORTPM_WOD BIT(27)
+#define PORTPM_U1E BIT(28)
+#define PORTPM_U2E BIT(29)
+#define PORTPM_FRWE BIT(30)
+#define PORTPM_PNG_CYA BIT(31)
+#define EP_HALT 0x050
+#define EP_PAUSE 0x054
+#define EP_RELOAD 0x058
+#define EP_STCHG 0x05c
+#define DEVNOTIF_LO 0x064
+#define DEVNOTIF_LO_TRIG BIT(0)
+#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
+#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
+#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
+#define DEVNOTIF_HI 0x068
+#define PORTHALT 0x06c
+#define PORTHALT_HALT_LTSSM BIT(0)
+#define PORTHALT_HALT_REJECT BIT(1)
+#define PORTHALT_STCHG_REQ BIT(20)
+#define PORTHALT_STCHG_INTR_EN BIT(24)
+#define PORT_TM 0x070
+#define EP_THREAD_ACTIVE 0x074
+#define EP_STOPPED 0x078
+#define HSFSPI_COUNT0 0x100
+#define HSFSPI_COUNT13 0x134
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
+ HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
+#define BLCG 0x840
+#define SSPX_CORE_CNT0 0x610
+#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
+#define SSPX_CORE_CNT30 0x688
+#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
+ SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
+#define SSPX_CORE_CNT32 0x690
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
+ SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_PADCTL4 0x750
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
+ SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
+#define BLCG_DFPCI BIT(0)
+#define BLCG_UFPCI BIT(1)
+#define BLCG_FE BIT(2)
+#define BLCG_COREPLL_PWRDN BIT(8)
+#define BLCG_IOPLL_0_PWRDN BIT(9)
+#define BLCG_IOPLL_1_PWRDN BIT(10)
+#define BLCG_IOPLL_2_PWRDN BIT(11)
+#define BLCG_ALL 0x1ff
+#define CFG_DEV_SSPI_XFER 0x858
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
+ CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
+#define CFG_DEV_FE 0x85c
+#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
+#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
+#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
+#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
+#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
+
+/* FPCI registers */
+#define XUSB_DEV_CFG_1 0x004
+#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
+#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
+#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
+#define XUSB_DEV_CFG_4 0x010
+#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
+#define XUSB_DEV_CFG_5 0x014
+
+/* IPFS registers */
+#define XUSB_DEV_CONFIGURATION_0 0x180
+#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
+#define XUSB_DEV_INTR_MASK_0 0x188
+#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
+
+struct tegra_xudc_ep_context {
+ __le32 info0;
+ __le32 info1;
+ __le32 deq_lo;
+ __le32 deq_hi;
+ __le32 tx_info;
+ __le32 rsvd[11];
+};
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+
+#define EP_TYPE_INVALID 0
+#define EP_TYPE_ISOCH_OUT 1
+#define EP_TYPE_BULK_OUT 2
+#define EP_TYPE_INTERRUPT_OUT 3
+#define EP_TYPE_CONTROL 4
+#define EP_TYPE_ISCOH_IN 5
+#define EP_TYPE_BULK_IN 6
+#define EP_TYPE_INTERRUPT_IN 7
+
+#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
+static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
+{ \
+ return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
+} \
+static inline void \
+ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ ctx->member = cpu_to_le32(tmp); \
+}
+
+BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
+BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
+BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
+BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
+BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
+BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
+BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
+BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
+BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
+BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
+BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
+BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
+BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
+BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
+BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
+BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
+BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
+BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
+BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
+BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
+
+static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
+{
+ return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
+ (ep_ctx_read_deq_lo(ctx) << 4);
+}
+
+static inline void
+ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
+{
+ ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
+ ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
+}
+
+struct tegra_xudc_trb {
+ __le32 data_lo;
+ __le32 data_hi;
+ __le32 status;
+ __le32 control;
+};
+
+#define TRB_TYPE_RSVD 0
+#define TRB_TYPE_NORMAL 1
+#define TRB_TYPE_SETUP_STAGE 2
+#define TRB_TYPE_DATA_STAGE 3
+#define TRB_TYPE_STATUS_STAGE 4
+#define TRB_TYPE_ISOCH 5
+#define TRB_TYPE_LINK 6
+#define TRB_TYPE_TRANSFER_EVENT 32
+#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
+#define TRB_TYPE_STREAM 48
+#define TRB_TYPE_SETUP_PACKET_EVENT 63
+
+#define TRB_CMPL_CODE_INVALID 0
+#define TRB_CMPL_CODE_SUCCESS 1
+#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
+#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
+#define TRB_CMPL_CODE_USB_TRANS_ERR 4
+#define TRB_CMPL_CODE_TRB_ERR 5
+#define TRB_CMPL_CODE_STALL 6
+#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
+#define TRB_CMPL_CODE_SHORT_PACKET 13
+#define TRB_CMPL_CODE_RING_UNDERRUN 14
+#define TRB_CMPL_CODE_RING_OVERRUN 15
+#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
+#define TRB_CMPL_CODE_STOPPED 26
+#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
+#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
+#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
+#define TRB_CMPL_CODE_HOST_REJECTED 221
+#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
+#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
+
+#define BUILD_TRB_RW(name, member, shift, mask) \
+static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
+{ \
+ return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
+} \
+static inline void \
+trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ trb->member = cpu_to_le32(tmp); \
+}
+
+BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
+BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
+BUILD_TRB_RW(seq_num, status, 0, 0xffff)
+BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
+BUILD_TRB_RW(td_size, status, 17, 0x1f)
+BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
+BUILD_TRB_RW(cycle, control, 0, 0x1)
+BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
+BUILD_TRB_RW(isp, control, 2, 0x1)
+BUILD_TRB_RW(chain, control, 4, 0x1)
+BUILD_TRB_RW(ioc, control, 5, 0x1)
+BUILD_TRB_RW(type, control, 10, 0x3f)
+BUILD_TRB_RW(stream_id, control, 16, 0xffff)
+BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
+BUILD_TRB_RW(tlbpc, control, 16, 0xf)
+BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
+BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
+BUILD_TRB_RW(sia, control, 31, 0x1)
+
+static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
+{
+ return ((u64)trb_read_data_hi(trb) << 32) |
+ trb_read_data_lo(trb);
+}
+
+static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
+{
+ trb_write_data_lo(trb, lower_32_bits(addr));
+ trb_write_data_hi(trb, upper_32_bits(addr));
+}
+
+struct tegra_xudc_request {
+ struct usb_request usb_req;
+
+ size_t buf_queued;
+ unsigned int trbs_queued;
+ unsigned int trbs_needed;
+ bool need_zlp;
+
+ struct tegra_xudc_trb *first_trb;
+ struct tegra_xudc_trb *last_trb;
+
+ struct list_head list;
+};
+
+struct tegra_xudc_ep {
+ struct tegra_xudc *xudc;
+ struct usb_ep usb_ep;
+ unsigned int index;
+ char name[8];
+
+ struct tegra_xudc_ep_context *context;
+
+#define XUDC_TRANSFER_RING_SIZE 64
+ struct tegra_xudc_trb *transfer_ring;
+ dma_addr_t transfer_ring_phys;
+
+ unsigned int enq_ptr;
+ unsigned int deq_ptr;
+ bool pcs;
+ bool ring_full;
+ bool stream_rejected;
+
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+};
+
+struct tegra_xudc_sel_timing {
+ __u8 u1sel;
+ __u8 u1pel;
+ __le16 u2sel;
+ __le16 u2pel;
+};
+
+enum tegra_xudc_setup_state {
+ WAIT_FOR_SETUP,
+ DATA_STAGE_XFER,
+ DATA_STAGE_RECV,
+ STATUS_STAGE_XFER,
+ STATUS_STAGE_RECV,
+};
+
+struct tegra_xudc_setup_packet {
+ struct usb_ctrlrequest ctrl_req;
+ unsigned int seq_num;
+};
+
+struct tegra_xudc_save_regs {
+ u32 ctrl;
+ u32 portpm;
+};
+
+struct tegra_xudc {
+ struct device *dev;
+ const struct tegra_xudc_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+
+ spinlock_t lock;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+#define XUDC_NR_EVENT_RINGS 2
+#define XUDC_EVENT_RING_SIZE 4096
+ struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
+ dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
+ unsigned int event_ring_index;
+ unsigned int event_ring_deq_ptr;
+ bool ccs;
+
+#define XUDC_NR_EPS 32
+ struct tegra_xudc_ep ep[XUDC_NR_EPS];
+ struct tegra_xudc_ep_context *ep_context;
+ dma_addr_t ep_context_phys;
+
+ struct device *genpd_dev_device;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_device;
+ struct device_link *genpd_dl_ss;
+
+ struct dma_pool *transfer_ring_pool;
+
+ bool queued_setup_packet;
+ struct tegra_xudc_setup_packet setup_packet;
+ enum tegra_xudc_setup_state setup_state;
+ u16 setup_seq_num;
+
+ u16 dev_addr;
+ u16 isoch_delay;
+ struct tegra_xudc_sel_timing sel_timing;
+ u8 test_mode_pattern;
+ u16 status_buf;
+ struct tegra_xudc_request *ep0_req;
+
+ bool pullup;
+
+ unsigned int nr_enabled_eps;
+ unsigned int nr_isoch_eps;
+
+ unsigned int device_state;
+ unsigned int resume_state;
+
+ int irq;
+
+ void __iomem *base;
+ resource_size_t phys_base;
+ void __iomem *ipfs;
+ void __iomem *fpci;
+
+ struct regulator_bulk_data *supplies;
+
+ struct clk_bulk_data *clks;
+
+ enum usb_role device_mode;
+ struct usb_role_switch *usb_role_sw;
+ struct work_struct usb_role_sw_work;
+
+ struct phy *usb3_phy;
+ struct phy *utmi_phy;
+
+ struct tegra_xudc_save_regs saved_regs;
+ bool suspended;
+ bool powergated;
+
+ struct completion disconnect_complete;
+
+ bool selfpowered;
+
+#define TOGGLE_VBUS_WAIT_MS 100
+ struct delayed_work plc_reset_work;
+ bool wait_csc;
+
+ struct delayed_work port_reset_war_work;
+ bool wait_for_sec_prc;
+};
+
+#define XUDC_TRB_MAX_BUFFER_SIZE 65536
+#define XUDC_MAX_ISOCH_EPS 4
+#define XUDC_INTERRUPT_MODERATION_US 0
+
+static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+struct tegra_xudc_soc {
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ const char * const *clock_names;
+ unsigned int num_clks;
+ bool u1_enable;
+ bool u2_enable;
+ bool lpm_enable;
+ bool invalid_seq_num;
+ bool pls_quirk;
+ bool port_reset_quirk;
+ bool has_ipfs;
+};
+
+static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->fpci + offset);
+}
+
+static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->fpci + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->ipfs + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->ipfs + offset);
+}
+
+static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->base + offset);
+}
+
+static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->base + offset);
+}
+
+static inline int xudc_readl_poll(struct tegra_xudc *xudc,
+ unsigned int offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ return readl_poll_timeout_atomic(xudc->base + offset, regval,
+ (regval & mask) == val, 1, 100);
+}
+
+static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct tegra_xudc, gadget);
+}
+
+static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct tegra_xudc_ep, usb_ep);
+}
+
+static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
+{
+ return container_of(req, struct tegra_xudc_request, usb_req);
+}
+
+static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(xudc->dev,
+ "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
+ type, trb, trb->data_lo, trb->data_hi, trb->status,
+ trb->control);
+}
+
+static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
+{
+ int err;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ err = phy_power_on(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi power on failed %d\n", err);
+
+ err = phy_power_on(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
+
+ dev_dbg(xudc->dev, "device mode on\n");
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+
+ xudc->device_mode = USB_ROLE_DEVICE;
+}
+
+static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
+{
+ bool connected = false;
+ u32 pls, val;
+ int err;
+
+ dev_dbg(xudc->dev, "device mode off\n");
+
+ connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
+
+ reinit_completion(&xudc->disconnect_complete);
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, false);
+
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ /* Direct link to U0 if disconnected in RESUME or U2. */
+ if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
+ (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
+ val = xudc_readl(xudc, PORTPM);
+ val |= PORTPM_FRWE;
+ xudc_writel(xudc, val, PORTPM);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ xudc->device_mode = USB_ROLE_NONE;
+
+ /* Wait for disconnect event. */
+ if (connected)
+ wait_for_completion(&xudc->disconnect_complete);
+
+ /* Make sure interrupt handler has completed before powergating. */
+ synchronize_irq(xudc->irq);
+
+ err = phy_power_off(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
+
+ err = phy_power_off(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
+
+ pm_runtime_put(xudc->dev);
+}
+
+static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
+{
+ struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
+ usb_role_sw_work);
+
+ if (!xudc->usb_role_sw ||
+ usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE)
+ tegra_xudc_device_mode_on(xudc);
+ else
+ tegra_xudc_device_mode_off(xudc);
+
+}
+
+static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ dev_dbg(dev, "%s role is %d\n", __func__, role);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (!xudc->suspended)
+ schedule_work(&xudc->usb_role_sw_work);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return 0;
+}
+
+static void tegra_xudc_plc_reset_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
+ plc_reset_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->wait_csc) {
+ u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ if (pls == PORTSC_PLS_INACTIVE) {
+ dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl,
+ false);
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+ xudc->wait_csc = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static void tegra_xudc_port_reset_war_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc =
+ container_of(dwork, struct tegra_xudc, port_reset_war_work);
+ unsigned long flags;
+ u32 pls;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if ((xudc->device_mode == USB_ROLE_DEVICE)
+ && xudc->wait_for_sec_prc) {
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+ dev_dbg(xudc->dev, "pls = %x\n", pls);
+
+ if (pls == PORTSC_PLS_DISABLED) {
+ dev_dbg(xudc->dev, "toggle vbus\n");
+ /* PRC doesn't complete in 100ms, toggle the vbus */
+ ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy);
+ if (ret == 1)
+ xudc->wait_for_sec_prc = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ unsigned int index;
+
+ index = trb - ep->transfer_ring;
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return 0;
+
+ return (ep->transfer_ring_phys + index * sizeof(*trb));
+}
+
+static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
+ dma_addr_t addr)
+{
+ struct tegra_xudc_trb *trb;
+ unsigned int index;
+
+ index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return NULL;
+
+ trb = &ep->transfer_ring[index];
+
+ return trb;
+}
+
+static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_writel(xudc, BIT(ep), EP_RELOAD);
+ xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
+}
+
+static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+
+ xudc_writel(xudc, 0, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!val)
+ return;
+ xudc_writel(xudc, 0, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
+ xudc_writel(xudc, BIT(ep), EP_STOPPED);
+}
+
+static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
+}
+
+static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req, int status)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
+ req, ep->index, status);
+
+ if (likely(req->usb_req.status == -EINPROGRESS))
+ req->usb_req.status = status;
+
+ list_del_init(&req->list);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ spin_unlock(&xudc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&xudc->lock);
+}
+
+static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
+{
+ struct tegra_xudc_request *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ tegra_xudc_req_done(ep, req, status);
+ }
+}
+
+static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
+{
+ if (ep->ring_full)
+ return 0;
+
+ if (ep->deq_ptr > ep->enq_ptr)
+ return ep->deq_ptr - ep->enq_ptr - 1;
+
+ return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
+}
+
+static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb,
+ bool ioc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ dma_addr_t buf_addr;
+ size_t len;
+
+ len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
+ req->buf_queued);
+ if (len > 0)
+ buf_addr = req->usb_req.dma + req->buf_queued;
+ else
+ buf_addr = 0;
+
+ trb_write_data_ptr(trb, buf_addr);
+
+ trb_write_transfer_len(trb, len);
+ trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
+
+ if (req->trbs_queued == req->trbs_needed - 1 ||
+ (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
+ trb_write_chain(trb, 0);
+ else
+ trb_write_chain(trb, 1);
+
+ trb_write_ioc(trb, ioc);
+
+ if (usb_endpoint_dir_out(ep->desc) ||
+ (usb_endpoint_xfer_control(ep->desc) &&
+ (xudc->setup_state == DATA_STAGE_RECV)))
+ trb_write_isp(trb, 1);
+ else
+ trb_write_isp(trb, 0);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == DATA_STAGE_RECV)
+ trb_write_type(trb, TRB_TYPE_DATA_STAGE);
+ else
+ trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
+
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == STATUS_STAGE_XFER)
+ trb_write_data_stage_dir(trb, 1);
+ else
+ trb_write_data_stage_dir(trb, 0);
+ } else if (usb_endpoint_xfer_isoc(ep->desc)) {
+ trb_write_type(trb, TRB_TYPE_ISOCH);
+ trb_write_sia(trb, 1);
+ trb_write_frame_id(trb, 0);
+ trb_write_tlbpc(trb, 0);
+ } else if (usb_ss_max_streams(ep->comp_desc)) {
+ trb_write_type(trb, TRB_TYPE_STREAM);
+ trb_write_stream_id(trb, req->usb_req.stream_id);
+ } else {
+ trb_write_type(trb, TRB_TYPE_NORMAL);
+ trb_write_stream_id(trb, 0);
+ }
+
+ trb_write_cycle(trb, ep->pcs);
+
+ req->trbs_queued++;
+ req->buf_queued += len;
+
+ dump_trb(xudc, "TRANSFER", trb);
+}
+
+static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ unsigned int i, count, available;
+ bool wait_td = false;
+
+ available = ep_available_trbs(ep);
+ count = req->trbs_needed - req->trbs_queued;
+ if (available < count) {
+ count = available;
+ ep->ring_full = true;
+ }
+
+ /*
+ * To generate zero-length packet on USB bus, SW needs schedule a
+ * standalone zero-length TD. According to HW's behavior, SW needs
+ * to schedule TDs in different ways for different endpoint types.
+ *
+ * For control endpoint:
+ * - Data stage TD (IOC = 1, CH = 0)
+ * - Ring doorbell and wait transfer event
+ * - Data stage TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ *
+ * For bulk and interrupt endpoints:
+ * - Normal transfer TD (IOC = 0, CH = 0)
+ * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ */
+
+ if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
+ wait_td = true;
+
+ if (!req->first_trb)
+ req->first_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ for (i = 0; i < count; i++) {
+ struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
+ bool ioc = false;
+
+ if ((i == count - 1) || (wait_td && i == count - 2))
+ ioc = true;
+
+ tegra_xudc_queue_one_trb(ep, req, trb, ioc);
+ req->last_trb = trb;
+
+ ep->enq_ptr++;
+ if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
+ trb = &ep->transfer_ring[ep->enq_ptr];
+ trb_write_cycle(trb, ep->pcs);
+ ep->pcs = !ep->pcs;
+ ep->enq_ptr = 0;
+ }
+
+ if (ioc)
+ break;
+ }
+
+ return count;
+}
+
+static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ u32 val;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ val = DB_TARGET(ep->index);
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ val |= DB_STREAMID(xudc->setup_seq_num);
+ } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
+ struct tegra_xudc_request *req;
+
+ /* Don't ring doorbell if the stream has been rejected. */
+ if (ep->stream_rejected)
+ return;
+
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ val |= DB_STREAMID(req->usb_req.stream_id);
+ }
+
+ dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
+ xudc_writel(xudc, val, DB);
+}
+
+static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc_request *req;
+ bool trbs_queued = false;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (ep->ring_full)
+ break;
+
+ if (tegra_xudc_queue_trbs(ep, req) > 0)
+ trbs_queued = true;
+ }
+
+ if (trbs_queued)
+ tegra_xudc_ep_ring_doorbell(ep);
+}
+
+static int
+__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ int err;
+
+ if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "control EP has pending transfers\n");
+ return -EINVAL;
+ }
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to map request: %d\n", err);
+ return err;
+ }
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ req->need_zlp = false;
+ req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
+ XUDC_TRB_MAX_BUFFER_SIZE);
+ if (req->usb_req.length == 0)
+ req->trbs_needed++;
+
+ if (!usb_endpoint_xfer_isoc(ep->desc) &&
+ req->usb_req.zero && req->usb_req.length &&
+ ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
+ req->trbs_needed++;
+ req->need_zlp = true;
+ }
+
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ list_add_tail(&req->list, &ep->queue);
+
+ tegra_xudc_ep_kick_queue(ep);
+
+ return 0;
+}
+
+static int
+tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
+ gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_queue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc_trb *trb = req->first_trb;
+ bool pcs_enq = trb_read_cycle(trb);
+ bool pcs;
+
+ /*
+ * Clear out all the TRBs part of or after the cancelled request,
+ * and must correct trb cycle bit to the last un-enqueued state.
+ */
+ while (trb != &ep->transfer_ring[ep->enq_ptr]) {
+ pcs = trb_read_cycle(trb);
+ memset(trb, 0, sizeof(*trb));
+ trb_write_cycle(trb, !pcs);
+ trb++;
+
+ if (trb_read_type(trb) == TRB_TYPE_LINK)
+ trb = ep->transfer_ring;
+ }
+
+ /* Requests will be re-queued at the start of the cancelled request. */
+ ep->enq_ptr = req->first_trb - ep->transfer_ring;
+ /*
+ * Retrieve the correct cycle bit state from the first trb of
+ * the cancelled request.
+ */
+ ep->pcs = pcs_enq;
+ ep->ring_full = false;
+ list_for_each_entry_continue(req, &ep->queue, list) {
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ }
+}
+
+/*
+ * Determine if the given TRB is in the range [first trb, last trb] for the
+ * given request.
+ */
+static bool trb_in_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
+ req->first_trb, req->last_trb, trb);
+
+ if (trb >= req->first_trb && (trb <= req->last_trb ||
+ req->last_trb < req->first_trb))
+ return true;
+
+ if (trb < req->first_trb && trb <= req->last_trb &&
+ req->last_trb < req->first_trb)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
+ * for the given endpoint and request.
+ */
+static bool trb_before_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
+ __func__, req->first_trb, req->last_trb, enq_trb, trb);
+
+ if (trb < req->first_trb && (enq_trb <= trb ||
+ req->first_trb < enq_trb))
+ return true;
+
+ if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
+ return true;
+
+ return false;
+}
+
+static int
+__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ struct tegra_xudc_request *r;
+ struct tegra_xudc_trb *deq_trb;
+ bool busy, kick_queue = false;
+ int ret = 0;
+
+ /* Make sure the request is actually queued to this endpoint. */
+ list_for_each_entry(r, &ep->queue, list) {
+ if (r == req)
+ break;
+ }
+
+ if (r != req)
+ return -EINVAL;
+
+ /* Request hasn't been queued in the transfer ring yet. */
+ if (!req->trbs_queued) {
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ return 0;
+ }
+
+ /* Halt DMA for this endpiont. */
+ if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
+ ep_pause(xudc, ep->index);
+ ep_wait_for_inactive(xudc, ep->index);
+ }
+
+ deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
+ /* Is the hardware processing the TRB at the dequeue pointer? */
+ busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
+
+ if (trb_in_request(ep, req, deq_trb) && busy) {
+ /*
+ * Request has been partially completed or it hasn't
+ * started processing yet.
+ */
+ dma_addr_t deq_ptr;
+
+ squeeze_transfer_ring(ep, req);
+
+ req->usb_req.actual = ep_ctx_read_edtla(ep->context);
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+
+ /* EDTLA is > 0: request has been partially completed */
+ if (req->usb_req.actual > 0) {
+ /*
+ * Abort the pending transfer and update the dequeue
+ * pointer
+ */
+ ep_ctx_write_edtla(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_data_offset(ep->context, 0);
+
+ deq_ptr = trb_virt_to_phys(ep,
+ &ep->transfer_ring[ep->enq_ptr]);
+
+ if (dma_mapping_error(xudc->dev, deq_ptr)) {
+ ret = -EINVAL;
+ } else {
+ ep_ctx_write_deq_ptr(ep->context, deq_ptr);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+ ep_reload(xudc, ep->index);
+ }
+ }
+ } else if (trb_before_request(ep, req, deq_trb) && busy) {
+ /* Request hasn't started processing yet. */
+ squeeze_transfer_ring(ep, req);
+
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+ } else {
+ /*
+ * Request has completed, but we haven't processed the
+ * completion event yet.
+ */
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ ret = -EINVAL;
+ }
+
+ /* Resume the endpoint. */
+ ep_unpause(xudc, ep->index);
+
+ if (kick_queue)
+ tegra_xudc_ep_kick_queue(ep);
+
+ return ret;
+}
+
+static int
+tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_dequeue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (!ep->desc)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ dev_err(xudc->dev, "can't halt isoc EP\n");
+ return -ENOTSUPP;
+ }
+
+ if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
+ dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
+ halt ? "halted" : "not halted");
+ return 0;
+ }
+
+ if (halt) {
+ ep_halt(xudc, ep->index);
+ } else {
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_seq_num(ep->context, 0);
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ tegra_xudc_ep_ring_doorbell(ep);
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ if (value && usb_endpoint_dir_in(ep->desc) &&
+ !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "can't halt EP with requests pending\n");
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_set_halt(ep, value);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
+{
+ const struct usb_endpoint_descriptor *desc = ep->desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+ struct tegra_xudc *xudc = ep->xudc;
+ u16 maxpacket, maxburst = 0, esit = 0;
+ u32 val;
+
+ maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (!usb_endpoint_xfer_control(desc))
+ maxburst = comp_desc->bMaxBurst;
+
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
+ esit = le16_to_cpu(comp_desc->wBytesPerInterval);
+ } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc))) {
+ if (xudc->gadget.speed == USB_SPEED_HIGH) {
+ maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
+ if (maxburst == 0x3) {
+ dev_warn(xudc->dev,
+ "invalid endpoint maxburst\n");
+ maxburst = 0x2;
+ }
+ }
+ esit = maxpacket * (maxburst + 1);
+ }
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_interval(ep->context, desc->bInterval);
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (usb_endpoint_xfer_isoc(desc)) {
+ ep_ctx_write_mult(ep->context,
+ comp_desc->bmAttributes & 0x3);
+ }
+
+ if (usb_endpoint_xfer_bulk(desc)) {
+ ep_ctx_write_max_pstreams(ep->context,
+ comp_desc->bmAttributes &
+ 0x1f);
+ ep_ctx_write_lsa(ep->context, 1);
+ }
+ }
+
+ if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
+ val = usb_endpoint_type(desc);
+ else
+ val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
+
+ ep_ctx_write_type(ep->context, val);
+ ep_ctx_write_cerr(ep->context, 0x3);
+ ep_ctx_write_max_packet_size(ep->context, maxpacket);
+ ep_ctx_write_max_burst_size(ep->context, maxburst);
+
+ ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+
+ /* Select a reasonable average TRB length based on endpoint type. */
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ val = 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ val = 1024;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_ISOC:
+ default:
+ val = 3072;
+ break;
+ }
+
+ ep_ctx_write_avg_trb_len(ep->context, val);
+ ep_ctx_write_max_esit_payload(ep->context, esit);
+
+ ep_ctx_write_cerrcnt(ep->context, 0x3);
+}
+
+static void setup_link_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ trb_write_data_ptr(trb, ep->transfer_ring_phys);
+ trb_write_type(trb, TRB_TYPE_LINK);
+ trb_write_toggle_cycle(trb, 1);
+}
+
+static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_err(xudc->dev, "endpoint %u already disabled\n",
+ ep->index);
+ return -EINVAL;
+ }
+
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
+
+ xudc->nr_enabled_eps--;
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ xudc->nr_isoch_eps--;
+
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+ if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
+ xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
+
+ /*
+ * If this is the last endpoint disabled in a de-configure request,
+ * switch back to address state.
+ */
+ if ((xudc->device_state == USB_STATE_CONFIGURED) &&
+ (xudc->nr_enabled_eps == 1)) {
+ u32 val;
+
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ dev_info(xudc->dev, "ep %u disabled\n", ep->index);
+
+ return 0;
+}
+
+static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_disable(ep);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ unsigned int i;
+ u32 val;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER &&
+ !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
+ return -EINVAL;
+
+ /* Disable the EP if it is not disabled */
+ if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
+ __tegra_xudc_ep_disable(ep);
+
+ ep->desc = desc;
+ ep->comp_desc = ep->usb_ep.comp_desc;
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
+ dev_err(xudc->dev, "too many isoch endpoints\n");
+ return -EBUSY;
+ }
+ xudc->nr_isoch_eps++;
+ }
+
+ memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
+ sizeof(*ep->transfer_ring));
+ setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
+
+ ep->enq_ptr = 0;
+ ep->deq_ptr = 0;
+ ep->pcs = true;
+ ep->ring_full = false;
+ xudc->nr_enabled_eps++;
+
+ tegra_xudc_ep_context_setup(ep);
+
+ /*
+ * No need to reload and un-halt EP0. This will be done automatically
+ * once a valid SETUP packet is received.
+ */
+ if (usb_endpoint_xfer_control(desc))
+ goto out;
+
+ /*
+ * Transition to configured state once the first non-control
+ * endpoint is enabled.
+ */
+ if (xudc->device_state == USB_STATE_ADDRESS) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+
+ xudc->device_state = USB_STATE_CONFIGURED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ /*
+ * Pause all bulk endpoints when enabling an isoch endpoint
+ * to ensure the isoch endpoint is allocated enough bandwidth.
+ */
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_pause(xudc, i);
+ }
+ }
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_unpause(xudc, i);
+ }
+ }
+
+out:
+ dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
+ usb_ep_type_string(usb_endpoint_type(ep->desc)),
+ usb_endpoint_dir_in(ep->desc) ? "in" : "out");
+
+ return 0;
+}
+
+static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_enable(ep, desc);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *
+tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+
+ req = kzalloc(sizeof(*req), gfp);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->list);
+
+ return &req->usb_req;
+}
+
+static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req = to_xudc_req(usb_req);
+
+ kfree(req);
+}
+
+static struct usb_ep_ops tegra_xudc_ep_ops = {
+ .enable = tegra_xudc_ep_enable,
+ .disable = tegra_xudc_ep_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EBUSY;
+}
+
+static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
+{
+ return -EBUSY;
+}
+
+static struct usb_ep_ops tegra_xudc_ep0_ops = {
+ .enable = tegra_xudc_ep0_enable,
+ .disable = tegra_xudc_ep0_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
+ MFINDEX_FRAME_SHIFT;
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ ep_unpause_all(xudc);
+
+ /* Direct link to U0. */
+ val = xudc_readl(xudc, PORTSC);
+ if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ if (xudc->device_state == USB_STATE_SUSPENDED) {
+ xudc->device_state = xudc->resume_state;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ xudc->resume_state = 0;
+ }
+
+ /*
+ * Doorbells may be dropped if they are sent too soon (< ~200ns)
+ * after unpausing the endpoint. Wait for 500ns just to be safe.
+ */
+ ndelay(500);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
+}
+
+static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+ val = xudc_readl(xudc, PORTPM);
+ dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
+ val, gadget->speed);
+
+ if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
+ (val & PORTPM_RWE)) ||
+ ((xudc->gadget.speed == USB_SPEED_SUPER) &&
+ (val & PORTPM_FRWE))) {
+ tegra_xudc_resume_device_state(xudc);
+
+ /* Send Device Notification packet. */
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
+ | DEVNOTIF_LO_TRIG;
+ xudc_writel(xudc, 0, DEVNOTIF_HI);
+ xudc_writel(xudc, val, DEVNOTIF_LO);
+ }
+ }
+
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (is_on != xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ if (is_on)
+ val |= CTRL_ENABLE;
+ else
+ val &= ~CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->pullup = is_on;
+ dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (!driver)
+ return -EINVAL;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
+ if (ret < 0)
+ goto unlock;
+
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_IE | CTRL_LSE;
+ xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, PORTHALT);
+ val |= PORTHALT_STCHG_INTR_EN;
+ xudc_writel(xudc, val, PORTHALT);
+
+ if (xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->driver = driver;
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_IE | CTRL_ENABLE);
+ xudc_writel(xudc, val, CTRL);
+
+ __tegra_xudc_ep_disable(&xudc->ep[0]);
+
+ xudc->driver = NULL;
+ dev_dbg(xudc->dev, "Gadget stopped");
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
+ xudc->selfpowered = !!is_on;
+
+ return 0;
+}
+
+static struct usb_gadget_ops tegra_xudc_gadget_ops = {
+ .get_frame = tegra_xudc_gadget_get_frame,
+ .wakeup = tegra_xudc_gadget_wakeup,
+ .pullup = tegra_xudc_gadget_pullup,
+ .udc_start = tegra_xudc_gadget_start,
+ .udc_stop = tegra_xudc_gadget_stop,
+ .set_selfpowered = tegra_xudc_set_selfpowered,
+};
+
+static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int
+tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = NULL;
+ xudc->ep0_req->usb_req.dma = 0;
+ xudc->ep0_req->usb_req.length = 0;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static int
+tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = buf;
+ xudc->ep0_req->usb_req.length = len;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
+{
+ switch (xudc->setup_state) {
+ case DATA_STAGE_XFER:
+ xudc->setup_state = STATUS_STAGE_RECV;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ case DATA_STAGE_RECV:
+ xudc->setup_state = STATUS_STAGE_XFER;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ default:
+ xudc->setup_state = WAIT_FOR_SETUP;
+ break;
+ }
+}
+
+static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&xudc->lock);
+ ret = xudc->driver->setup(&xudc->gadget, ctrl);
+ spin_lock(&xudc->lock);
+
+ return ret;
+}
+
+static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if (xudc->test_mode_pattern) {
+ xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
+ xudc->test_mode_pattern = 0;
+ }
+}
+
+static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ u32 feature = le16_to_cpu(ctrl->wValue);
+ u32 index = le16_to_cpu(ctrl->wIndex);
+ u32 val, ep;
+ int ret;
+
+ if (le16_to_cpu(ctrl->wLength) != 0)
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (feature) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
+ (xudc->device_state == USB_STATE_DEFAULT))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if (set)
+ val |= PORTPM_RWE;
+ else
+ val &= ~PORTPM_RWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ if ((xudc->device_state != USB_STATE_CONFIGURED) ||
+ (xudc->gadget.speed != USB_SPEED_SUPER))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if ((feature == USB_DEVICE_U1_ENABLE) &&
+ xudc->soc->u1_enable) {
+ if (set)
+ val |= PORTPM_U1E;
+ else
+ val &= ~PORTPM_U1E;
+ }
+
+ if ((feature == USB_DEVICE_U2_ENABLE) &&
+ xudc->soc->u2_enable) {
+ if (set)
+ val |= PORTPM_U2E;
+ else
+ val &= ~PORTPM_U2E;
+ }
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (xudc->gadget.speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ if (!set)
+ return -EINVAL;
+
+ xudc->test_mode_pattern = index >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->device_state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ switch (feature) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (set) {
+ val = xudc_readl(xudc, PORTPM);
+
+ if (index & USB_INTRF_FUNC_SUSPEND_RW)
+ val |= PORTPM_FRWE;
+ else
+ val &= ~PORTPM_FRWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ return tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) ||
+ ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (index != 0)))
+ return -EINVAL;
+
+ ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
+}
+
+static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep_context *ep_ctx;
+ u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
+ u16 status = 0;
+
+ if (!(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 2))
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ val = xudc_readl(xudc, PORTPM);
+
+ if (xudc->selfpowered)
+ status |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (val & PORTPM_RWE))
+ status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (val & PORTPM_U1E)
+ status |= BIT(USB_DEV_STAT_U1_ENABLED);
+ if (val & PORTPM_U2E)
+ status |= BIT(USB_DEV_STAT_U2_ENABLED);
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ status |= USB_INTRF_STAT_FUNC_RW_CAP;
+ val = xudc_readl(xudc, PORTPM);
+ if (val & PORTPM_FRWE)
+ status |= USB_INTRF_STAT_FUNC_RW;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+ ep_ctx = &xudc->ep_context[ep];
+
+ if ((xudc->device_state != USB_STATE_CONFIGURED) &&
+ ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
+ return -EINVAL;
+
+ if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
+ return -EINVAL;
+
+ if (xudc_readl(xudc, EP_HALT) & BIT(ep))
+ status |= BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xudc->status_buf = cpu_to_le16(status);
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
+ sizeof(xudc->status_buf),
+ no_op_complete);
+}
+
+static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with SEL values */
+}
+
+static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 6))
+ return -EINVAL;
+
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
+ sizeof(xudc->sel_timing),
+ set_sel_complete);
+}
+
+static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with isoch delay */
+}
+
+static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ u32 delay = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ xudc->isoch_delay = delay;
+
+ return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
+}
+
+static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) &&
+ (xudc->dev_addr != 0)) {
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (xudc->dev_addr == 0)) {
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+}
+
+static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u32 val, addr = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ dev_dbg(xudc->dev, "set address: %u\n", addr);
+
+ xudc->dev_addr = addr;
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_DEVADDR_MASK);
+ val |= CTRL_DEVADDR(addr);
+ xudc_writel(xudc, val, CTRL);
+
+ ep_ctx_write_devaddr(ep0->context, addr);
+
+ return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
+}
+
+static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
+ ret = tegra_xudc_ep0_get_status(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = tegra_xudc_ep0_set_address(xudc, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
+ ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
+ ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
+ ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ /*
+ * In theory we need to clear RUN bit before status stage of
+ * deconfig request sent, but this seems to be causing problems.
+ * Clear RUN once all endpoints are disabled instead.
+ */
+ fallthrough;
+ default:
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl,
+ u16 seq_num)
+{
+ int ret;
+
+ xudc->setup_seq_num = seq_num;
+
+ /* Ensure EP0 is unhalted. */
+ ep_unhalt(xudc, 0);
+
+ /*
+ * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
+ * are invalid. Halt EP0 until we get a valid packet.
+ */
+ if (xudc->soc->invalid_seq_num &&
+ (seq_num == 0xfffe || seq_num == 0xffff)) {
+ dev_warn(xudc->dev, "invalid sequence number detected\n");
+ ep_halt(xudc, 0);
+ return;
+ }
+
+ if (ctrl->wLength)
+ xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
+ DATA_STAGE_XFER : DATA_STAGE_RECV;
+ else
+ xudc->setup_state = STATUS_STAGE_XFER;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
+ else
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+
+ if (ret < 0) {
+ dev_warn(xudc->dev, "setup request failed: %d\n", ret);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ ep_halt(xudc, 0);
+ }
+}
+
+static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
+ u16 seq_num = trb_read_seq_num(event);
+
+ if (xudc->setup_state != WAIT_FOR_SETUP) {
+ /*
+ * The controller is in the process of handling another
+ * setup request. Queue subsequent requests and handle
+ * the last one once the controller reports a sequence
+ * number error.
+ */
+ memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
+ xudc->setup_packet.seq_num = seq_num;
+ xudc->queued_setup_packet = true;
+ } else {
+ tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
+ }
+}
+
+static struct tegra_xudc_request *
+trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_request *req;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (!req->trbs_queued)
+ break;
+
+ if (trb_in_request(ep, req, trb))
+ return req;
+ }
+
+ return NULL;
+}
+
+static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
+ struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *event)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_trb *trb;
+ bool short_packet;
+
+ short_packet = (trb_read_cmpl_code(event) ==
+ TRB_CMPL_CODE_SHORT_PACKET);
+
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ req = trb_to_request(ep, trb);
+
+ /*
+ * TDs are complete on short packet or when the completed TRB is the
+ * last TRB in the TD (the CHAIN bit is unset).
+ */
+ if (req && (short_packet || (!trb_read_chain(trb) &&
+ (req->trbs_needed == req->trbs_queued)))) {
+ struct tegra_xudc_trb *last = req->last_trb;
+ unsigned int residual;
+
+ residual = trb_read_transfer_len(event);
+ req->usb_req.actual = req->usb_req.length - residual;
+
+ dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
+ req->usb_req.actual, req->usb_req.length);
+
+ tegra_xudc_req_done(ep, req, 0);
+
+ if (ep->desc && usb_endpoint_xfer_control(ep->desc))
+ tegra_xudc_ep0_req_done(xudc);
+
+ /*
+ * Advance the dequeue pointer past the end of the current TD
+ * on short packet completion.
+ */
+ if (short_packet) {
+ ep->deq_ptr = (last - ep->transfer_ring) + 1;
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ }
+ } else if (!req) {
+ dev_warn(xudc->dev, "transfer event on dequeued request\n");
+ }
+
+ if (ep->desc)
+ tegra_xudc_ep_kick_queue(ep);
+}
+
+static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ unsigned int ep_index = trb_read_endpoint_id(event);
+ struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
+ struct tegra_xudc_trb *trb;
+ u16 comp_code;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
+ ep_index);
+ return;
+ }
+
+ /* Update transfer ring dequeue pointer. */
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ comp_code = trb_read_cmpl_code(event);
+ if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
+ ep->deq_ptr = (trb - ep->transfer_ring) + 1;
+
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ ep->ring_full = false;
+ }
+
+ switch (comp_code) {
+ case TRB_CMPL_CODE_SUCCESS:
+ case TRB_CMPL_CODE_SHORT_PACKET:
+ tegra_xudc_handle_transfer_completion(xudc, ep, event);
+ break;
+ case TRB_CMPL_CODE_HOST_REJECTED:
+ dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
+
+ ep->stream_rejected = true;
+ break;
+ case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
+ dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
+
+ if (ep->stream_rejected) {
+ ep->stream_rejected = false;
+ /*
+ * An EP is stopped when a stream is rejected. Wait
+ * for the EP to report that it is stopped and then
+ * un-stop it.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ }
+ tegra_xudc_ep_ring_doorbell(ep);
+ break;
+ case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
+ /*
+ * Wait for the EP to be stopped so the controller stops
+ * processing doorbells.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ ep->enq_ptr = ep->deq_ptr;
+ tegra_xudc_ep_nuke(ep, -EIO);
+ /* FALLTHROUGH */
+ case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
+ case TRB_CMPL_CODE_CTRL_DIR_ERR:
+ case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
+ case TRB_CMPL_CODE_RING_UNDERRUN:
+ case TRB_CMPL_CODE_RING_OVERRUN:
+ case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
+ case TRB_CMPL_CODE_USB_TRANS_ERR:
+ case TRB_CMPL_CODE_TRB_ERR:
+ dev_err(xudc->dev, "completion error %#x on EP %u\n",
+ comp_code, ep_index);
+
+ ep_halt(xudc, ep_index);
+ break;
+ case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
+ dev_info(xudc->dev, "sequence number error\n");
+
+ /*
+ * Kill any queued control request and skip to the last
+ * setup packet we received.
+ */
+ tegra_xudc_ep_nuke(ep, -EINVAL);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ if (!xudc->queued_setup_packet)
+ break;
+
+ tegra_xudc_handle_ep0_setup_packet(xudc,
+ &xudc->setup_packet.ctrl_req,
+ xudc->setup_packet.seq_num);
+ xudc->queued_setup_packet = false;
+ break;
+ case TRB_CMPL_CODE_STOPPED:
+ dev_dbg(xudc->dev, "stop completion code on EP %u\n",
+ ep_index);
+
+ /* Disconnected. */
+ tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
+ break;
+ default:
+ dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
+ comp_code, ep_index);
+ break;
+ }
+}
+
+static void tegra_xudc_reset(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ dma_addr_t deq_ptr;
+ unsigned int i;
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ep_unpause_all(xudc);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
+
+ /*
+ * Reset sequence number and dequeue pointer to flush the transfer
+ * ring.
+ */
+ ep0->deq_ptr = ep0->enq_ptr;
+ ep0->ring_full = false;
+
+ xudc->setup_seq_num = 0;
+ xudc->queued_setup_packet = false;
+
+ ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
+
+ deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
+
+ if (!dma_mapping_error(xudc->dev, deq_ptr)) {
+ ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
+ ep_ctx_write_dcs(ep0->context, ep0->pcs);
+ }
+
+ ep_unhalt_all(xudc);
+ ep_reload(xudc, 0);
+ ep_unpause(xudc, 0);
+}
+
+static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u16 maxpacket;
+ u32 val;
+
+ val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
+ switch (val) {
+ case PORTSC_PS_LS:
+ xudc->gadget.speed = USB_SPEED_LOW;
+ break;
+ case PORTSC_PS_FS:
+ xudc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSC_PS_HS:
+ xudc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case PORTSC_PS_SS:
+ xudc->gadget.speed = USB_SPEED_SUPER;
+ break;
+ default:
+ xudc->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ maxpacket = 512;
+ else
+ maxpacket = 64;
+
+ ep_ctx_write_max_packet_size(ep0->context, maxpacket);
+ tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
+ usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
+
+ if (!xudc->soc->u1_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U1TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (!xudc->soc->u2_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U2TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (xudc->gadget.speed <= USB_SPEED_HIGH) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_L1S_MASK);
+ if (xudc->soc->lpm_enable)
+ val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
+ else
+ val |= PORTPM_L1S(PORTPM_L1S_NYET);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
+}
+
+static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver && xudc->driver->disconnect) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->disconnect(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+
+ xudc->device_state = USB_STATE_NOTATTACHED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ complete(&xudc->disconnect_complete);
+}
+
+static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver) {
+ spin_unlock(&xudc->lock);
+ usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
+ spin_lock(&xudc->lock);
+ }
+
+ tegra_xudc_port_connect(xudc);
+}
+
+static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port suspend\n");
+
+ xudc->resume_state = xudc->device_state;
+ xudc->device_state = USB_STATE_SUSPENDED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ if (xudc->driver->suspend) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->suspend(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port resume\n");
+
+ tegra_xudc_resume_device_state(xudc);
+
+ if (xudc->driver->resume) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->resume(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~PORTSC_CHANGE_MASK;
+ val |= flag;
+ xudc_writel(xudc, val, PORTSC);
+}
+
+static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ u32 portsc, porthalt;
+
+ porthalt = xudc_readl(xudc, PORTHALT);
+ if ((porthalt & PORTHALT_STCHG_REQ) &&
+ (porthalt & PORTHALT_HALT_LTSSM)) {
+ dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
+ porthalt &= ~PORTHALT_HALT_LTSSM;
+ xudc_writel(xudc, porthalt, PORTHALT);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+#define TOGGLE_VBUS_WAIT_MS 100
+ if (xudc->soc->port_reset_quirk) {
+ schedule_delayed_work(&xudc->port_reset_war_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_for_sec_prc = 1;
+ }
+ }
+
+ if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+ tegra_xudc_port_reset(xudc);
+ cancel_delayed_work(&xudc->port_reset_war_work);
+ xudc->wait_for_sec_prc = 0;
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_WRC) {
+ dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
+ if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
+ tegra_xudc_port_reset(xudc);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_CSC) {
+ dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CSC);
+
+ if (portsc & PORTSC_CCS)
+ tegra_xudc_port_connect(xudc);
+ else
+ tegra_xudc_port_disconnect(xudc);
+
+ if (xudc->wait_csc) {
+ cancel_delayed_work(&xudc->plc_reset_work);
+ xudc->wait_csc = false;
+ }
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_PLC) {
+ u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
+
+ dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PLC);
+ switch (pls) {
+ case PORTSC_PLS_U3:
+ tegra_xudc_port_suspend(xudc);
+ break;
+ case PORTSC_PLS_U0:
+ if (xudc->gadget.speed < USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_RESUME:
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_INACTIVE:
+ schedule_delayed_work(&xudc->plc_reset_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_csc = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (portsc & PORTSC_CEC) {
+ dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CEC);
+ }
+
+ dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
+}
+
+static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
+ (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
+ __tegra_xudc_handle_port_status(xudc);
+}
+
+static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ u32 type = trb_read_type(event);
+
+ dump_trb(xudc, "EVENT", event);
+
+ switch (type) {
+ case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
+ tegra_xudc_handle_port_status(xudc);
+ break;
+ case TRB_TYPE_TRANSFER_EVENT:
+ tegra_xudc_handle_transfer_event(xudc, event);
+ break;
+ case TRB_TYPE_SETUP_PACKET_EVENT:
+ tegra_xudc_handle_ep0_event(xudc, event);
+ break;
+ default:
+ dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
+ break;
+ }
+}
+
+static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_trb *event;
+ dma_addr_t erdp;
+
+ while (true) {
+ event = xudc->event_ring[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr;
+
+ if (trb_read_cycle(event) != xudc->ccs)
+ break;
+
+ tegra_xudc_handle_event(xudc, event);
+
+ xudc->event_ring_deq_ptr++;
+ if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
+ xudc->event_ring_deq_ptr = 0;
+ xudc->event_ring_index++;
+ }
+
+ if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
+ xudc->event_ring_index = 0;
+ xudc->ccs = !xudc->ccs;
+ }
+ }
+
+ erdp = xudc->event_ring_phys[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr * sizeof(*event);
+
+ xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
+ xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
+}
+
+static irqreturn_t tegra_xudc_irq(int irq, void *data)
+{
+ struct tegra_xudc *xudc = data;
+ unsigned long flags;
+ u32 val;
+
+ val = xudc_readl(xudc, ST);
+ if (!(val & ST_IP))
+ return IRQ_NONE;
+ xudc_writel(xudc, ST_IP, ST);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ tegra_xudc_process_event_ring(xudc);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ ep->xudc = xudc;
+ ep->index = index;
+ ep->context = &xudc->ep_context[index];
+ INIT_LIST_HEAD(&ep->queue);
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return 0;
+
+ ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
+ GFP_KERNEL,
+ &ep->transfer_ring_phys);
+ if (!ep->transfer_ring)
+ return -ENOMEM;
+
+ if (index) {
+ snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
+ (index % 2 == 0) ? "out" : "in");
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 16;
+ ep->usb_ep.ops = &tegra_xudc_ep_ops;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ if (index & 1)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+ list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
+ } else {
+ strscpy(ep->name, "ep0", 3);
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
+ ep->usb_ep.ops = &tegra_xudc_ep0_ops;
+ ep->usb_ep.caps.type_control = true;
+ ep->usb_ep.caps.dir_in = true;
+ ep->usb_ep.caps.dir_out = true;
+ }
+
+ return 0;
+}
+
+static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return;
+
+ dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
+ ep->transfer_ring_phys);
+}
+
+static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
+{
+ struct usb_request *req;
+ unsigned int i;
+ int err;
+
+ xudc->ep_context =
+ dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
+ sizeof(*xudc->ep_context),
+ &xudc->ep_context_phys, GFP_KERNEL);
+ if (!xudc->ep_context)
+ return -ENOMEM;
+
+ xudc->transfer_ring_pool =
+ dmam_pool_create(dev_name(xudc->dev), xudc->dev,
+ XUDC_TRANSFER_RING_SIZE *
+ sizeof(struct tegra_xudc_trb),
+ sizeof(struct tegra_xudc_trb), 0);
+ if (!xudc->transfer_ring_pool) {
+ err = -ENOMEM;
+ goto free_ep_context;
+ }
+
+ INIT_LIST_HEAD(&xudc->gadget.ep_list);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ err = tegra_xudc_alloc_ep(xudc, i);
+ if (err < 0)
+ goto free_eps;
+ }
+
+ req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ goto free_eps;
+ }
+ xudc->ep0_req = to_xudc_req(req);
+
+ return 0;
+
+free_eps:
+ for (; i > 0; i--)
+ tegra_xudc_free_ep(xudc, i - 1);
+free_ep_context:
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+ return err;
+}
+
+static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
+{
+ xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
+ xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
+}
+
+static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
+ &xudc->ep0_req->usb_req);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_free_ep(xudc, i);
+
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+}
+
+static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ xudc->event_ring[i] =
+ dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ &xudc->event_ring_phys[i],
+ GFP_KERNEL);
+ if (!xudc->event_ring[i])
+ goto free_dma;
+ }
+
+ return 0;
+
+free_dma:
+ for (; i > 0; i--) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i - 1]),
+ xudc->event_ring[i - 1],
+ xudc->event_ring_phys[i - 1]);
+ }
+ return -ENOMEM;
+}
+
+static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ val = xudc_readl(xudc, SPARAM);
+ val &= ~(SPARAM_ERSTMAX_MASK);
+ val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
+ xudc_writel(xudc, val, SPARAM);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]));
+
+ val = xudc_readl(xudc, ERSTSZ);
+ val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
+ val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
+ xudc_writel(xudc, val, ERSTSZ);
+
+ xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBALO(i));
+ xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBAHI(i));
+ }
+
+ val = lower_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPLO);
+ val |= EREPLO_ECS;
+ xudc_writel(xudc, val, EREPLO);
+
+ val = upper_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPHI);
+ xudc_writel(xudc, val, EREPHI);
+
+ xudc->ccs = true;
+ xudc->event_ring_index = 0;
+ xudc->event_ring_deq_ptr = 0;
+}
+
+static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ xudc->event_ring[i],
+ xudc->event_ring_phys[i]);
+ }
+}
+
+static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ if (xudc->soc->has_ipfs) {
+ val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
+ val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
+ ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
+ usleep_range(10, 15);
+ }
+
+ /* Enable bus master */
+ val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
+ XUSB_DEV_CFG_1_BUS_MASTER_EN;
+ fpci_writel(xudc, val, XUSB_DEV_CFG_1);
+
+ /* Program BAR0 space */
+ val = fpci_readl(xudc, XUSB_DEV_CFG_4);
+ val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+ val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+
+ fpci_writel(xudc, val, XUSB_DEV_CFG_4);
+ fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
+
+ usleep_range(100, 200);
+
+ if (xudc->soc->has_ipfs) {
+ /* Enable interrupt assertion */
+ val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
+ val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
+ ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
+ }
+}
+
+static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+{
+ u32 val, imod;
+
+ if (xudc->soc->has_ipfs) {
+ val = xudc_readl(xudc, BLCG);
+ val |= BLCG_ALL;
+ val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
+ BLCG_COREPLL_PWRDN);
+ val |= BLCG_IOPLL_0_PWRDN;
+ val |= BLCG_IOPLL_1_PWRDN;
+ val |= BLCG_IOPLL_2_PWRDN;
+
+ xudc_writel(xudc, val, BLCG);
+ }
+
+ /* Set a reasonable U3 exit timer value. */
+ val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
+ val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
+ val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
+ xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
+
+ /* Default ping LFPS tBurst is too large. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT0);
+ val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
+ val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
+ xudc_writel(xudc, val, SSPX_CORE_CNT0);
+
+ /* Default tPortConfiguration timeout is too small. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT30);
+ val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
+ val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
+ xudc_writel(xudc, val, SSPX_CORE_CNT30);
+
+ if (xudc->soc->lpm_enable) {
+ /* Set L1 resume duration to 95 us. */
+ val = xudc_readl(xudc, HSFSPI_COUNT13);
+ val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
+ val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
+ xudc_writel(xudc, val, HSFSPI_COUNT13);
+ }
+
+ /*
+ * Compliacne suite appears to be violating polling LFPS tBurst max
+ * of 1.4us. Send 1.45us instead.
+ */
+ val = xudc_readl(xudc, SSPX_CORE_CNT32);
+ val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
+ val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT32);
+
+ /* Direct HS/FS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Direct SS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Restore port instance. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /*
+ * Enable INFINITE_SS_RETRY to prevent device from entering
+ * Disabled.Error when attached to buggy SuperSpeed hubs.
+ */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val |= CFG_DEV_FE_INFINITE_SS_RETRY;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /* Set interrupt moderation. */
+ imod = XUDC_INTERRUPT_MODERATION_US * 4;
+ val = xudc_readl(xudc, RT_IMOD);
+ val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
+ val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
+ xudc_writel(xudc, val, RT_IMOD);
+
+ /* increase SSPI transaction timeout from 32us to 512us */
+ val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
+ val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
+ val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
+ xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
+}
+
+static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+{
+ int err;
+
+ err = phy_init(xudc->utmi_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(xudc->usb3_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
+ goto exit_utmi_phy;
+ }
+
+ return 0;
+
+exit_utmi_phy:
+ phy_exit(xudc->utmi_phy);
+ return err;
+}
+
+static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
+{
+ phy_exit(xudc->usb3_phy);
+ phy_exit(xudc->utmi_phy);
+}
+
+static const char * const tegra210_xudc_supply_names[] = {
+ "hvdd-usb",
+ "avddio-usb",
+};
+
+static const char * const tegra210_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "hs_src",
+ "fs_src",
+};
+
+static const char * const tegra186_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "fs_src",
+};
+
+static struct tegra_xudc_soc tegra210_xudc_soc_data = {
+ .supply_names = tegra210_xudc_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
+ .clock_names = tegra210_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
+ .u1_enable = false,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = true,
+ .pls_quirk = true,
+ .port_reset_quirk = true,
+ .has_ipfs = true,
+};
+
+static struct tegra_xudc_soc tegra186_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .has_ipfs = false,
+};
+
+static const struct of_device_id tegra_xudc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-xudc",
+ .data = &tegra210_xudc_soc_data
+ },
+ {
+ .compatible = "nvidia,tegra186-xudc",
+ .data = &tegra186_xudc_soc_data
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
+
+static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
+{
+ if (xudc->genpd_dl_ss)
+ device_link_del(xudc->genpd_dl_ss);
+ if (xudc->genpd_dl_device)
+ device_link_del(xudc->genpd_dl_device);
+ if (xudc->genpd_dev_ss)
+ dev_pm_domain_detach(xudc->genpd_dev_ss, true);
+ if (xudc->genpd_dev_device)
+ dev_pm_domain_detach(xudc->genpd_dev_device, true);
+}
+
+static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
+{
+ struct device *dev = xudc->dev;
+ int err;
+
+ xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev,
+ "dev");
+ if (IS_ERR(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device);
+ dev_err(dev, "failed to get dev pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
+ if (IS_ERR(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_device) {
+ dev_err(dev, "adding usb device device link failed!\n");
+ return -ENODEV;
+ }
+
+ xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_probe(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc;
+ struct resource *res;
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+ unsigned int i;
+ int err;
+
+ xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC);
+ if (!xudc)
+ return -ENOMEM;
+
+ xudc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xudc);
+
+ xudc->soc = of_device_get_match_data(&pdev->dev);
+ if (!xudc->soc)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ xudc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->base))
+ return PTR_ERR(xudc->base);
+ xudc->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fpci");
+ xudc->fpci = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->fpci))
+ return PTR_ERR(xudc->fpci);
+
+ if (xudc->soc->has_ipfs) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipfs");
+ xudc->ipfs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->ipfs))
+ return PTR_ERR(xudc->ipfs);
+ }
+
+ xudc->irq = platform_get_irq(pdev, 0);
+ if (xudc->irq < 0) {
+ dev_err(xudc->dev, "failed to get IRQ: %d\n",
+ xudc->irq);
+ return xudc->irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
+ dev_name(&pdev->dev), xudc);
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
+ err);
+ return err;
+ }
+
+ xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks,
+ sizeof(*xudc->clks), GFP_KERNEL);
+ if (!xudc->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_clks; i++)
+ xudc->clks[i].id = xudc->soc->clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks,
+ xudc->clks);
+ if (err) {
+ dev_err(xudc->dev, "failed to request clks %d\n", err);
+ return err;
+ }
+
+ xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
+ sizeof(*xudc->supplies), GFP_KERNEL);
+ if (!xudc->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_supplies; i++)
+ xudc->supplies[i].supply = xudc->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to request regulators %d\n", err);
+ return err;
+ }
+
+ xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
+ if (IS_ERR(xudc->padctl))
+ return PTR_ERR(xudc->padctl);
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to enable regulators %d\n", err);
+ goto put_padctl;
+ }
+
+ xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3");
+ if (IS_ERR(xudc->usb3_phy)) {
+ err = PTR_ERR(xudc->usb3_phy);
+ dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2");
+ if (IS_ERR(xudc->utmi_phy)) {
+ err = PTR_ERR(xudc->utmi_phy);
+ dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ err = tegra_xudc_powerdomain_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_phy_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_alloc_event_ring(xudc);
+ if (err)
+ goto disable_phy;
+
+ err = tegra_xudc_alloc_eps(xudc);
+ if (err)
+ goto free_event_ring;
+
+ spin_lock_init(&xudc->lock);
+
+ init_completion(&xudc->disconnect_complete);
+
+ INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
+
+ INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
+
+ INIT_DELAYED_WORK(&xudc->port_reset_war_work,
+ tegra_xudc_port_reset_war_work);
+
+ if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) {
+ role_sx_desc.set = tegra_xudc_usb_role_sw_set;
+ role_sx_desc.fwnode = dev_fwnode(xudc->dev);
+
+ xudc->usb_role_sw = usb_role_switch_register(xudc->dev,
+ &role_sx_desc);
+ if (IS_ERR(xudc->usb_role_sw)) {
+ err = PTR_ERR(xudc->usb_role_sw);
+ dev_err(xudc->dev, "Failed to register USB role SW: %d",
+ err);
+ goto free_eps;
+ }
+ } else {
+ /* Set the mode as device mode and this keeps phy always ON */
+ dev_info(xudc->dev, "Set usb role to device mode always");
+ schedule_work(&xudc->usb_role_sw_work);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ xudc->gadget.ops = &tegra_xudc_gadget_ops;
+ xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
+ xudc->gadget.name = "tegra-xudc";
+ xudc->gadget.max_speed = USB_SPEED_SUPER;
+
+ err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
+ goto free_eps;
+ }
+
+ return 0;
+
+free_eps:
+ tegra_xudc_free_eps(xudc);
+free_event_ring:
+ tegra_xudc_free_event_ring(xudc);
+disable_phy:
+ tegra_xudc_phy_exit(xudc);
+put_powerdomains:
+ tegra_xudc_powerdomain_remove(xudc);
+disable_regulator:
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+put_padctl:
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return err;
+}
+
+static int tegra_xudc_remove(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(xudc->dev);
+
+ cancel_delayed_work(&xudc->plc_reset_work);
+
+ if (xudc->usb_role_sw) {
+ usb_role_switch_unregister(xudc->usb_role_sw);
+ cancel_work_sync(&xudc->usb_role_sw_work);
+ }
+
+ usb_del_gadget_udc(&xudc->gadget);
+
+ tegra_xudc_free_eps(xudc);
+ tegra_xudc_free_event_ring(xudc);
+
+ tegra_xudc_powerdomain_remove(xudc);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ phy_power_off(xudc->utmi_phy);
+ phy_power_off(xudc->usb3_phy);
+
+ tegra_xudc_phy_exit(xudc);
+
+ pm_runtime_disable(xudc->dev);
+ pm_runtime_put(xudc->dev);
+
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+
+ dev_dbg(xudc->dev, "entering ELPG\n");
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ xudc->powergated = true;
+ xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
+ xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
+ xudc_writel(xudc, 0, CTRL);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ dev_dbg(xudc->dev, "entering ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+ int err;
+
+ dev_dbg(xudc->dev, "exiting ELPG\n");
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err < 0)
+ return err;
+
+ err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
+ if (err < 0)
+ return err;
+
+ tegra_xudc_fpci_ipfs_init(xudc);
+
+ tegra_xudc_device_params_init(xudc);
+
+ tegra_xudc_init_event_ring(xudc);
+
+ tegra_xudc_init_eps(xudc);
+
+ xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
+ xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->powergated = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ dev_dbg(xudc->dev, "exiting ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = true;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ flush_work(&xudc->usb_role_sw_work);
+
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
+
+ if (!pm_runtime_status_suspended(dev))
+ tegra_xudc_powergate(xudc);
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ err = tegra_xudc_unpowergate(xudc);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ schedule_work(&xudc->usb_role_sw_work);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_powergate(xudc);
+}
+
+static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_unpowergate(xudc);
+}
+
+static const struct dev_pm_ops tegra_xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
+ SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
+ tegra_xudc_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra_xudc_driver = {
+ .probe = tegra_xudc_probe,
+ .remove = tegra_xudc_remove,
+ .driver = {
+ .name = "tegra-xudc",
+ .pm = &tegra_xudc_pm_ops,
+ .of_match_table = tegra_xudc_of_match,
+ },
+};
+module_platform_driver(tegra_xudc_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
+MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 79b2e79dddd0..8d730180db06 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -38,9 +38,9 @@ config USB_XHCI_DBGCAP
before enabling this option. If unsure, say 'N'.
config USB_XHCI_PCI
- tristate
- depends on USB_PCI
- default y
+ tristate
+ depends on USB_PCI
+ default y
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
@@ -220,12 +220,12 @@ config USB_EHCI_HCD_ORION
Marvell PXA/MMP USB controller" for those.
config USB_EHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
@@ -237,21 +237,21 @@ config USB_EHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_EHCI_HCD_AT91
- tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- Atmel chips.
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ Atmel chips.
config USB_EHCI_TEGRA
- tristate "NVIDIA Tegra HCD support"
- depends on ARCH_TEGRA
- select USB_EHCI_ROOT_HUB_TT
- select USB_TEGRA_PHY
- help
- This driver enables support for the internal USB Host Controllers
- found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+ tristate "NVIDIA Tegra HCD support"
+ depends on ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_TEGRA_PHY
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
@@ -269,10 +269,10 @@ config USB_EHCI_SH
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
- help
- Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
@@ -409,12 +409,12 @@ config USB_OHCI_HCD_OMAP1
Enables support for the OHCI controller on OMAP1/2 chips.
config USB_OHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
@@ -426,12 +426,12 @@ config USB_OHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_OHCI_HCD_S3C2410
- tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- S3C24xx/S3C64xx chips.
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
@@ -440,8 +440,8 @@ config USB_OHCI_HCD_LPC32XX
depends on USB_ISP1301
default y
---help---
- Enables support for the on-chip OHCI controller on
- NXP chips.
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
config USB_OHCI_HCD_PXA27X
tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
@@ -456,8 +456,8 @@ config USB_OHCI_HCD_AT91
depends on USB_OHCI_HCD && ARCH_AT91 && OF
default y
---help---
- Enables support for the on-chip OHCI controller on
- Atmel chips.
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
@@ -545,7 +545,7 @@ config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS
help
- Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
@@ -609,8 +609,8 @@ config USB_UHCI_PLATFORM
default y if (ARCH_VT8500 || ARCH_ASPEED)
config USB_UHCI_ASPEED
- bool
- default y if ARCH_ASPEED
+ bool
+ default y if ARCH_ASPEED
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
@@ -713,14 +713,14 @@ config USB_RENESAS_USBHS_HCD
module will be called renesas-usbhs.
config USB_IMX21_HCD
- tristate "i.MX21 HCD support"
- depends on ARM && ARCH_MXC
- help
- This driver enables support for the on-chip USB host in the
- i.MX21 processor.
-
- To compile this driver as a module, choose M here: the
- module will be called "imx21-hcd".
+ tristate "i.MX21 HCD support"
+ depends on ARM && ARCH_MXC
+ help
+ This driver enables support for the on-chip USB host in the
+ i.MX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
config USB_HCD_BCMA
tristate "BCMA usb host driver"
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 2400a826397a..652fa29beb27 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -406,9 +406,12 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node)
+ if (core->dev.of_node) {
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
GPIOD_OUT_HIGH);
+ if (IS_ERR(usb_dev->gpio_desc))
+ return PTR_ERR(usb_dev->gpio_desc);
+ }
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 9e0c98d6bdb0..f967adf2d8df 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5646,8 +5646,10 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
return retval;
failed_dis_clk:
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
@@ -5665,8 +5667,10 @@ static int fotg210_hcd_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 7fcf1d9dd7f3..02a1344fbd6a 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -419,7 +419,7 @@ static void create_debug_files(struct imx21 *imx21)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ root = debugfs_create_dir(dev_name(imx21->dev), usb_debug_root);
imx21->debug_root = root;
debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 96f8daa11f25..4a3a2852523f 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2627,7 +2627,7 @@ static int isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
- struct resource *addr, *data, *irq_res;
+ struct resource *data, *irq_res;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
@@ -2651,8 +2651,7 @@ static int isp1362_probe(struct platform_device *pdev)
irq = irq_res->start;
- addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- addr_reg = devm_ioremap_resource(&pdev->dev, addr);
+ addr_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr_reg))
return PTR_ERR(addr_reg);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index fc35a7993b7b..b635c6a1b1a9 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -115,7 +115,6 @@ static void at91_start_hc(struct platform_device *pdev)
static void at91_stop_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_regs __iomem *regs = hcd->regs;
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
@@ -123,7 +122,7 @@ static void at91_stop_hc(struct platform_device *pdev)
/*
* Put the USB host controller into reset.
*/
- writel(0, &regs->control);
+ usb_hcd_platform_shutdown(pdev);
/*
* Stop the USB clocks.
@@ -628,6 +627,7 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
+ msleep(1);
at91_stop_clock(ohci_at91);
}
@@ -642,8 +642,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
-
- at91_start_clock(ohci_at91);
+ else
+ at91_start_clock(ohci_at91);
ohci_resume(hcd, false);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index c561881d0e79..85878e8ad331 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -150,7 +150,7 @@ static void ohci_nxp_stop_hc(void)
static int ohci_hcd_nxp_probe(struct platform_device *pdev)
{
- struct usb_hcd *hcd = 0;
+ struct usb_hcd *hcd = NULL;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e67242e437ed..fe09b8626329 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -676,12 +676,12 @@ static int oxu_hub_control(struct usb_hcd *hcd,
*/
/* Low level read/write registers functions */
-static inline u32 oxu_readl(void *base, u32 reg)
+static inline u32 oxu_readl(void __iomem *base, u32 reg)
{
return readl(base + reg);
}
-static inline void oxu_writel(void *base, u32 reg, u32 val)
+static inline void oxu_writel(void __iomem *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
@@ -4063,7 +4063,7 @@ static const struct hc_driver oxu_hc_driver = {
* Module stuff
*/
-static void oxu_configuration(struct platform_device *pdev, void *base)
+static void oxu_configuration(struct platform_device *pdev, void __iomem *base)
{
u32 tmp;
@@ -4093,7 +4093,7 @@ static void oxu_configuration(struct platform_device *pdev, void *base)
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
-static int oxu_verify_id(struct platform_device *pdev, void *base)
+static int oxu_verify_id(struct platform_device *pdev, void __iomem *base)
{
u32 id;
static const char * const bo[] = {
@@ -4121,7 +4121,7 @@ static int oxu_verify_id(struct platform_device *pdev, void *base)
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq, int otg)
+ void __iomem *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
@@ -4158,7 +4158,7 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq)
+ void __iomem *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
@@ -4207,7 +4207,7 @@ error_create_otg:
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
- void *base;
+ void __iomem *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 4efee34f154f..e9209e3e6248 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -71,7 +71,7 @@ INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
static bool distrust_firmware = true;
module_param(distrust_firmware, bool, 0);
-MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
+MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurrent"
"t setup");
static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
/*
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1e0236e90687..a0025d23b257 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
+#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -212,7 +213,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e7aab31fd9a5..6475c3d3b43b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -280,6 +280,9 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
return;
xhci_dbg(xhci, "// Ding dong!\n");
+
+ trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
+
writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
readl(&xhci->dba->doorbell[0]);
@@ -401,6 +404,9 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
return;
+
+ trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
+
writel(DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
@@ -651,10 +657,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock(&xhci->lock);
trace_xhci_urb_giveback(urb);
usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&xhci->lock);
}
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
@@ -2741,6 +2745,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
}
/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ union xhci_trb *event_ring_deq)
+{
+ u64 temp_64;
+ dma_addr_t deq;
+
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != xhci->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall
+ * always advance the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
+ ((u64) deq & (u64) ~ERST_PTR_MASK))
+ return;
+
+ /* Update HC event ring dequeue pointer */
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp_64 |= ERST_EHB;
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+}
+
+/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
@@ -2751,9 +2791,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
- dma_addr_t deq;
u64 temp_64;
u32 status;
+ int event_loop = 0;
spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -2807,24 +2847,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- while (xhci_handle_event(xhci) > 0) {}
-
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != xhci->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event "
- "ring dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp_64 &= ERST_PTR_MASK;
- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ while (xhci_handle_event(xhci) > 0) {
+ if (event_loop++ < TRBS_PER_SEGMENT / 2)
+ continue;
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_loop = 0;
}
- /* Clear the event handler busy flag (RW1C); event ring is empty. */
- temp_64 |= ERST_EHB;
- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
ret = IRQ_HANDLED;
out:
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 2ff7c911fbd0..bf9065438320 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -42,19 +42,18 @@
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
-#define XUSB_CFG_ARU_MBOX_CMD 0x0e4
+/* XUSB_CFG_ARU_MBOX_CMD */
#define MBOX_DEST_FALC BIT(27)
#define MBOX_DEST_PME BIT(28)
#define MBOX_DEST_SMI BIT(29)
#define MBOX_DEST_XHCI BIT(30)
#define MBOX_INT_EN BIT(31)
-#define XUSB_CFG_ARU_MBOX_DATA_IN 0x0e8
+/* XUSB_CFG_ARU_MBOX_DATA_IN and XUSB_CFG_ARU_MBOX_DATA_OUT */
#define CMD_DATA_SHIFT 0
#define CMD_DATA_MASK 0xffffff
#define CMD_TYPE_SHIFT 24
#define CMD_TYPE_MASK 0xff
-#define XUSB_CFG_ARU_MBOX_DATA_OUT 0x0ec
-#define XUSB_CFG_ARU_MBOX_OWNER 0x0f0
+/* XUSB_CFG_ARU_MBOX_OWNER */
#define MBOX_OWNER_NONE 0
#define MBOX_OWNER_FW 1
#define MBOX_OWNER_SW 2
@@ -146,6 +145,13 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
+struct tega_xusb_mbox_regs {
+ u16 cmd;
+ u16 data_in;
+ u16 data_out;
+ u16 owner;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
@@ -160,6 +166,8 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
+ struct tega_xusb_mbox_regs mbox;
+
bool scale_ss_clock;
bool has_ipfs;
};
@@ -395,15 +403,15 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
- fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
@@ -413,17 +421,17 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
value = tegra_xusb_mbox_pack(msg);
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+ fpci_writel(tegra, value, tegra->soc->mbox.data_in);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
@@ -431,7 +439,7 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
if (time_after(jiffies, timeout))
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
@@ -598,16 +606,16 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
mutex_lock(&tegra->lock);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+ value = fpci_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
- fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
@@ -755,7 +763,6 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- tegra_xusb_phy_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_clk_disable(tegra);
@@ -779,16 +786,8 @@ static int tegra_xusb_runtime_resume(struct device *dev)
goto disable_clk;
}
- err = tegra_xusb_phy_enable(tegra);
- if (err < 0) {
- dev_err(dev, "failed to enable PHYs: %d\n", err);
- goto disable_regulator;
- }
-
return 0;
-disable_regulator:
- regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
return err;
@@ -970,7 +969,7 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *res, *regs;
+ struct resource *regs;
struct tegra_xusb *tegra;
struct xhci_hcd *xhci;
unsigned int i, j, k;
@@ -992,14 +991,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->fpci_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->fpci_base))
return PTR_ERR(tegra->fpci_base);
if (tegra->soc->has_ipfs) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
}
@@ -1128,8 +1125,9 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
tegra->supplies);
@@ -1181,6 +1179,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
*/
platform_set_drvdata(pdev, tegra);
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
+ goto put_hcd;
+ }
+
pm_runtime_enable(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
err = pm_runtime_get_sync(&pdev->dev);
@@ -1189,7 +1193,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_rpm;
+ goto disable_phy;
}
tegra_xusb_config(tegra, regs);
@@ -1275,9 +1279,11 @@ remove_usb2:
put_rpm:
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_xusb_runtime_suspend(&pdev->dev);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
+put_hcd:
usb_put_hcd(tegra->hcd);
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+ pm_runtime_disable(&pdev->dev);
put_powerdomains:
if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
@@ -1314,6 +1320,8 @@ static int tegra_xusb_remove(struct platform_device *pdev)
tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
}
+ tegra_xusb_phy_disable(tegra);
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
@@ -1375,6 +1383,12 @@ static const struct tegra_xusb_soc tegra124_soc = {
},
.scale_ss_clock = true,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -1407,6 +1421,12 @@ static const struct tegra_xusb_soc tegra210_soc = {
},
.scale_ss_clock = false,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
@@ -1432,12 +1452,48 @@ static const struct tegra_xusb_soc tegra186_soc = {
},
.scale_ss_clock = false,
.has_ipfs = false,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
+};
+
+static const char * const tegra194_supply_names[] = {
+};
+
+static const struct tegra_xusb_phy_type tegra194_phy_types[] = {
+ { .name = "usb3", .num = 4, },
+ { .name = "usb2", .num = 4, },
+};
+
+static const struct tegra_xusb_soc tegra194_soc = {
+ .firmware = "nvidia/tegra194/xusb.bin",
+ .supply_names = tegra194_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_supply_names),
+ .phy_types = tegra194_phy_types,
+ .num_types = ARRAY_SIZE(tegra194_phy_types),
+ .ports = {
+ .usb3 = { .offset = 0, .count = 4, },
+ .usb2 = { .offset = 4, .count = 4, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+ .mbox = {
+ .cmd = 0x68,
+ .data_in = 0x6c,
+ .data_out = 0x70,
+ .owner = 0x74,
+ },
};
+MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
+ { .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 052a269d86f2..56eb867803a6 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -560,6 +560,32 @@ DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
TP_ARGS(portnum, portsc)
);
+DECLARE_EVENT_CLASS(xhci_log_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell),
+ TP_STRUCT__entry(
+ __field(u32, slot)
+ __field(u32, doorbell)
+ ),
+ TP_fast_assign(
+ __entry->slot = slot;
+ __entry->doorbell = doorbell;
+ ),
+ TP_printk("Ring doorbell for %s",
+ xhci_decode_doorbell(__entry->slot, __entry->doorbell)
+ )
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
DECLARE_EVENT_CLASS(xhci_dbc_log_request,
TP_PROTO(struct dbc_request *req),
TP_ARGS(req),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6c17e3fe181a..6721d059f58a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -5301,7 +5301,8 @@ static const struct hc_driver xhci_hc_driver = {
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
+ HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f9f88626a57a..dc6f62a4b197 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2580,6 +2580,35 @@ static inline const char *xhci_decode_portsc(u32 portsc)
return str;
}
+static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell)
+{
+ static char str[256];
+ u8 ep;
+ u16 stream;
+ int ret;
+
+ ep = (doorbell & 0xff);
+ stream = doorbell >> 16;
+
+ if (slot == 0) {
+ sprintf(str, "Command Ring %d", doorbell);
+ return str;
+ }
+ ret = sprintf(str, "Slot %d ", slot);
+ if (ep > 0 && ep < 32)
+ ret = sprintf(str + ret, "ep%d%s",
+ ep / 2,
+ ep % 2 ? "in" : "out");
+ else if (ep == 0 || ep < 248)
+ ret = sprintf(str + ret, "Reserved %d", ep);
+ else
+ ret = sprintf(str + ret, "Vendor Defined %d", ep);
+ if (stream)
+ ret = sprintf(str + ret, " Stream %d", stream);
+
+ return str;
+}
+
static inline const char *xhci_ep_state_string(u8 state)
{
switch (state) {
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 7a6b122c833f..360416680e82 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -566,7 +566,6 @@ static int
mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
- int err = 0;
int res;
MTS_DEBUG_GOT_HERE();
@@ -613,7 +612,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback
}
out:
- return err;
+ return 0;
}
static DEF_SCSI_QCMD(mts_scsi_queuecommand)
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 320fc4739835..579a21bd70ad 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1032,8 +1032,6 @@ static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
urb->status = -EOVERFLOW;
else if (FROM_DW3_CERR(ptd->dw3))
urb->status = -EPIPE; /* Stall */
- else if (ptd->dw3 & DW3_ERROR_BIT)
- urb->status = -EPROTO; /* XactErr */
else
urb->status = -EPROTO; /* Unknown */
/*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 9bce583aada3..834b2494da73 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -181,8 +181,8 @@ config USB_TEST
including sample test device firmware and "how to use it".
config USB_EHSET_TEST_FIXTURE
- tristate "USB EHSET Test Fixture driver"
- help
+ tristate "USB EHSET Test Fixture driver"
+ help
Say Y here if you want to support the special test fixture device
used for the USB-IF Embedded Host High-Speed Electrical Test procedure.
@@ -233,17 +233,17 @@ config USB_HUB_USB251XB
Say Y or M here if you need to configure such a device via SMBus.
config USB_HSIC_USB3503
- tristate "USB3503 HSIC to USB20 Driver"
- depends on I2C
- select REGMAP_I2C
- help
- This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
+ tristate "USB3503 HSIC to USB20 Driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
config USB_HSIC_USB4604
- tristate "USB4604 HSIC to USB20 Driver"
- depends on I2C
- help
- This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
+ tristate "USB4604 HSIC to USB20 Driver"
+ depends on I2C
+ help
+ This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
config USB_LINK_LAYER_TEST
tristate "USB Link Layer Test driver"
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ac92725458b5..ba1eaabc7796 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
- brightness = pdata->msgdata[1];
+ if (retval < 2) {
+ if (retval >= 0)
+ retval = -EMSGSIZE;
+ } else {
+ brightness = pdata->msgdata[1];
+ }
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
@@ -299,6 +304,7 @@ error:
if (pdata) {
if (pdata->urb) {
usb_kill_urb(pdata->urb);
+ cancel_delayed_work_sync(&pdata->work);
if (pdata->urbdata)
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 34e6cd6f40d3..87067c3d6109 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
!dev->reading,
(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
- if (result < 0)
+ if (result < 0) {
+ usb_kill_urb(dev->urb);
goto out;
+ }
- if (result == 0)
+ if (result == 0) {
result = -ETIMEDOUT;
- else
+ usb_kill_urb(dev->urb);
+ } else {
result = dev->valid;
+ }
out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
static int chaoskey_resume(struct usb_interface *interface)
{
+ struct chaoskey *dev;
+ struct usb_device *udev = interface_to_usbdev(interface);
+
usb_dbg(interface, "resume");
+ dev = usb_get_intfdata(interface);
+
+ /*
+ * We may have lost power.
+ * In that case the device that needs a long time
+ * for the first requests needs an extended timeout
+ * again
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
+ dev->reads_started = false;
+
return 0;
}
#else
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cdee3af33ad7..8a3d9c0c8d8b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -333,7 +333,8 @@ static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi)
*respond->result = -ESHUTDOWN;
*respond->value = 0;
complete(&respond->wait_completion);
- } mutex_unlock(&ftdi->u132_lock);
+ }
+ mutex_unlock(&ftdi->u132_lock);
}
static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi)
@@ -763,7 +764,8 @@ static int ftdi_elan_total_command_size(struct usb_ftdi *ftdi, int command_size)
struct u132_command *command = &ftdi->command[COMMAND_MASK &
i++];
total_size += 5 + command->follows;
- } return total_size;
+ }
+ return total_size;
}
static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 20b0f91a5d9b..4afb5ddfd361 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -56,11 +56,10 @@ static const struct usb_device_id idmouse_table[] = {
#define FTIP_SCROLL 0x24
#define ftip_command(dev, command, value, index) \
- usb_control_msg (dev->udev, usb_sndctrlpipe (dev->udev, 0), command, \
+ usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000)
MODULE_DEVICE_TABLE(usb, idmouse_table);
-static DEFINE_MUTEX(open_disc_mutex);
/* structure to hold all of our device specific stuff */
struct usb_idmouse {
@@ -158,8 +157,8 @@ static int idmouse_create_image(struct usb_idmouse *dev)
/* loop over a blocking bulk read to get data from the device */
while (bytes_read < IMGSIZE) {
- result = usb_bulk_msg (dev->udev,
- usb_rcvbulkpipe (dev->udev, dev->bulk_in_endpointAddr),
+ result = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
dev->bulk_in_buffer + bytes_read,
dev->bulk_in_size, &bulk_read, 5000);
if (result < 0) {
@@ -223,21 +222,17 @@ static int idmouse_open(struct inode *inode, struct file *file)
int result;
/* get the interface from minor number and driver information */
- interface = usb_find_interface (&idmouse_driver, iminor (inode));
+ interface = usb_find_interface(&idmouse_driver, iminor(inode));
if (!interface)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&open_disc_mutex);
+ if (!dev)
return -ENODEV;
- }
/* lock this device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* check if already open */
if (dev->open) {
@@ -251,7 +246,7 @@ static int idmouse_open(struct inode *inode, struct file *file)
result = usb_autopm_get_interface(interface);
if (result)
goto error;
- result = idmouse_create_image (dev);
+ result = idmouse_create_image(dev);
usb_autopm_put_interface(interface);
if (result)
goto error;
@@ -280,27 +275,17 @@ static int idmouse_release(struct inode *inode, struct file *file)
if (dev == NULL)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* lock our device */
mutex_lock(&dev->lock);
- /* are we really open? */
- if (dev->open <= 0) {
- mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
- return -ENODEV;
- }
-
--dev->open;
if (!dev->present) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
idmouse_delete(dev);
} else {
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
}
return 0;
}
@@ -379,7 +364,6 @@ static int idmouse_probe(struct usb_interface *interface,
if (result) {
/* something prevented us from registering this device */
dev_err(&interface->dev, "Unable to allocate minor number.\n");
- usb_set_intfdata(interface, NULL);
idmouse_delete(dev);
return result;
}
@@ -392,19 +376,13 @@ static int idmouse_probe(struct usb_interface *interface,
static void idmouse_disconnect(struct usb_interface *interface)
{
- struct usb_idmouse *dev;
-
- /* get device structure */
- dev = usb_get_intfdata(interface);
+ struct usb_idmouse *dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata(interface, NULL);
/* lock the device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* prevent device read, write and ioctl */
dev->present = 0;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 23061f1526b4..ab4b98b04115 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -157,19 +157,19 @@ MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms");
#define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD
struct tower_reset_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
-} __attribute__ ((packed));
+};
struct tower_get_version_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
__u8 major;
__u8 minor;
- __le16 build_no; /* little-endian */
-} __attribute__ ((packed));
+ __le16 build_no;
+};
/* table of devices that work with this driver */
@@ -178,7 +178,7 @@ static const struct usb_device_id tower_table[] = {
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, tower_table);
+MODULE_DEVICE_TABLE(usb, tower_table);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -186,13 +186,13 @@ MODULE_DEVICE_TABLE (usb, tower_table);
/* Structure to hold all of our device specific stuff */
struct lego_usb_tower {
struct mutex lock; /* locks this structure */
- struct usb_device* udev; /* save off the usb device pointer */
+ struct usb_device *udev; /* save off the usb device pointer */
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
unsigned long disconnected:1;
- char* read_buffer;
+ char *read_buffer;
size_t read_buffer_length; /* this much came in */
size_t read_packet_length; /* this much will be returned on read */
spinlock_t read_buffer_lock;
@@ -202,16 +202,15 @@ struct lego_usb_tower {
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
- char* interrupt_in_buffer;
- struct usb_endpoint_descriptor* interrupt_in_endpoint;
- struct urb* interrupt_in_urb;
+ char *interrupt_in_buffer;
+ struct usb_endpoint_descriptor *interrupt_in_endpoint;
+ struct urb *interrupt_in_urb;
int interrupt_in_interval;
- int interrupt_in_running;
int interrupt_in_done;
- char* interrupt_out_buffer;
- struct usb_endpoint_descriptor* interrupt_out_endpoint;
- struct urb* interrupt_out_urb;
+ char *interrupt_out_buffer;
+ struct usb_endpoint_descriptor *interrupt_out_endpoint;
+ struct urb *interrupt_out_urb;
int interrupt_out_interval;
int interrupt_out_busy;
@@ -219,21 +218,20 @@ struct lego_usb_tower {
/* local function prototypes */
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos);
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
-static inline void tower_delete (struct lego_usb_tower *dev);
-static int tower_open (struct inode *inode, struct file *file);
-static int tower_release (struct inode *inode, struct file *file);
-static __poll_t tower_poll (struct file *file, poll_table *wait);
-static loff_t tower_llseek (struct file *file, loff_t off, int whence);
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos);
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
+static inline void tower_delete(struct lego_usb_tower *dev);
+static int tower_open(struct inode *inode, struct file *file);
+static int tower_release(struct inode *inode, struct file *file);
+static __poll_t tower_poll(struct file *file, poll_table *wait);
+static loff_t tower_llseek(struct file *file, loff_t off, int whence);
-static void tower_abort_transfers (struct lego_usb_tower *dev);
-static void tower_check_for_read_packet (struct lego_usb_tower *dev);
-static void tower_interrupt_in_callback (struct urb *urb);
-static void tower_interrupt_out_callback (struct urb *urb);
+static void tower_check_for_read_packet(struct lego_usb_tower *dev);
+static void tower_interrupt_in_callback(struct urb *urb);
+static void tower_interrupt_out_callback(struct urb *urb);
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id);
-static void tower_disconnect (struct usb_interface *interface);
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id);
+static void tower_disconnect(struct usb_interface *interface);
/* file operations needed when we register this driver */
@@ -288,23 +286,23 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
/**
* tower_delete
*/
-static inline void tower_delete (struct lego_usb_tower *dev)
+static inline void tower_delete(struct lego_usb_tower *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
- kfree (dev->read_buffer);
- kfree (dev->interrupt_in_buffer);
- kfree (dev->interrupt_out_buffer);
+ kfree(dev->read_buffer);
+ kfree(dev->interrupt_in_buffer);
+ kfree(dev->interrupt_out_buffer);
usb_put_dev(dev->udev);
- kfree (dev);
+ kfree(dev);
}
/**
* tower_open
*/
-static int tower_open (struct inode *inode, struct file *file)
+static int tower_open(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev = NULL;
int subminor;
@@ -314,7 +312,6 @@ static int tower_open (struct inode *inode, struct file *file)
int result;
reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
-
if (!reset_reply) {
retval = -ENOMEM;
goto exit;
@@ -323,8 +320,7 @@ static int tower_open (struct inode *inode, struct file *file)
nonseekable_open(inode, file);
subminor = iminor(inode);
- interface = usb_find_interface (&tower_driver, subminor);
-
+ interface = usb_find_interface(&tower_driver, subminor);
if (!interface) {
pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
@@ -351,15 +347,15 @@ static int tower_open (struct inode *inode, struct file *file)
}
/* reset the tower */
- result = usb_control_msg (dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- LEGO_USB_TOWER_REQUEST_RESET,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- reset_reply,
- sizeof(*reset_reply),
- 1000);
+ result = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ LEGO_USB_TOWER_REQUEST_RESET,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ reset_reply,
+ sizeof(*reset_reply),
+ 1000);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
@@ -370,24 +366,22 @@ static int tower_open (struct inode *inode, struct file *file)
/* initialize in direction */
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
- usb_fill_int_urb (dev->interrupt_in_urb,
- dev->udev,
- usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
- dev->interrupt_in_buffer,
- usb_endpoint_maxp(dev->interrupt_in_endpoint),
- tower_interrupt_in_callback,
- dev,
- dev->interrupt_in_interval);
-
- dev->interrupt_in_running = 1;
+ usb_fill_int_urb(dev->interrupt_in_urb,
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
+ dev->interrupt_in_buffer,
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
+ tower_interrupt_in_callback,
+ dev,
+ dev->interrupt_in_interval);
+
dev->interrupt_in_done = 0;
mb();
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
- dev->interrupt_in_running = 0;
goto unlock_exit;
}
@@ -407,13 +401,12 @@ exit:
/**
* tower_release
*/
-static int tower_release (struct inode *inode, struct file *file)
+static int tower_release(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev;
int retval = 0;
dev = file->private_data;
-
if (dev == NULL) {
retval = -ENODEV;
goto exit;
@@ -421,56 +414,32 @@ static int tower_release (struct inode *inode, struct file *file)
mutex_lock(&dev->lock);
- if (dev->open_count != 1) {
- dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
- __func__);
- retval = -ENODEV;
- goto unlock_exit;
- }
-
if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy) {
- wait_event_interruptible_timeout (dev->write_wait, !dev->interrupt_out_busy, 2 * HZ);
+ wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy,
+ 2 * HZ);
}
- tower_abort_transfers (dev);
+
+ /* shutdown transfers */
+ usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
+
dev->open_count = 0;
-unlock_exit:
mutex_unlock(&dev->lock);
exit:
return retval;
}
-
-/**
- * tower_abort_transfers
- * aborts transfers and frees associated data structures
- */
-static void tower_abort_transfers (struct lego_usb_tower *dev)
-{
- if (dev == NULL)
- return;
-
- /* shutdown transfer */
- if (dev->interrupt_in_running) {
- dev->interrupt_in_running = 0;
- mb();
- usb_kill_urb(dev->interrupt_in_urb);
- }
- if (dev->interrupt_out_busy)
- usb_kill_urb(dev->interrupt_out_urb);
-}
-
-
/**
* tower_check_for_read_packet
*
@@ -479,23 +448,23 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
* until it has been there unchanged for at least
* dev->packet_timeout_jiffies, or until the buffer is full.
*/
-static void tower_check_for_read_packet (struct lego_usb_tower *dev)
+static void tower_check_for_read_packet(struct lego_usb_tower *dev)
{
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
if (!packet_timeout
|| time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies)
|| dev->read_buffer_length == read_buffer_size) {
dev->read_packet_length = dev->read_buffer_length;
}
dev->interrupt_in_done = 0;
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
}
/**
* tower_poll
*/
-static __poll_t tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll(struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
__poll_t mask = 0;
@@ -509,12 +478,10 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
poll_wait(file, &dev->write_wait, wait);
tower_check_for_read_packet(dev);
- if (dev->read_packet_length > 0) {
+ if (dev->read_packet_length > 0)
mask |= EPOLLIN | EPOLLRDNORM;
- }
- if (!dev->interrupt_out_busy) {
+ if (!dev->interrupt_out_busy)
mask |= EPOLLOUT | EPOLLWRNORM;
- }
return mask;
}
@@ -523,7 +490,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
/**
* tower_llseek
*/
-static loff_t tower_llseek (struct file *file, loff_t off, int whence)
+static loff_t tower_llseek(struct file *file, loff_t off, int whence)
{
return -ESPIPE; /* unseekable */
}
@@ -532,7 +499,7 @@ static loff_t tower_llseek (struct file *file, loff_t off, int whence)
/**
* tower_read
*/
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_read;
@@ -551,7 +518,6 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -561,21 +527,19 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
goto unlock_exit;
}
- if (read_timeout) {
+ if (read_timeout)
timeout = jiffies + msecs_to_jiffies(read_timeout);
- }
/* wait for data */
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
while (dev->read_packet_length == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies);
- if (retval < 0) {
+ if (retval < 0)
goto unlock_exit;
- }
/* reset read timeout during read or write activity */
if (read_timeout
@@ -583,28 +547,27 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* check for read timeout */
- if (read_timeout && time_after (jiffies, timeout)) {
+ if (read_timeout && time_after(jiffies, timeout)) {
retval = -ETIMEDOUT;
goto unlock_exit;
}
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
}
/* copy the data from read_buffer into userspace */
bytes_to_read = min(count, dev->read_packet_length);
- if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) {
+ if (copy_to_user(buffer, dev->read_buffer, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
dev->read_buffer_length -= bytes_to_read;
dev->read_packet_length -= bytes_to_read;
- for (i=0; i<dev->read_buffer_length; i++) {
+ for (i = 0; i < dev->read_buffer_length; i++)
dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read];
- }
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
retval = bytes_to_read;
@@ -620,7 +583,7 @@ exit:
/**
* tower_write
*/
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_write;
@@ -637,7 +600,6 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -653,10 +615,10 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
retval = -EAGAIN;
goto unlock_exit;
}
- retval = wait_event_interruptible (dev->write_wait, !dev->interrupt_out_busy);
- if (retval) {
+ retval = wait_event_interruptible(dev->write_wait,
+ !dev->interrupt_out_busy);
+ if (retval)
goto unlock_exit;
- }
}
/* write the data into interrupt_out_buffer from userspace */
@@ -664,7 +626,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
- if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) {
+ if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
@@ -682,7 +644,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev->interrupt_out_busy = 1;
wmb();
- retval = usb_submit_urb (dev->interrupt_out_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->udev->dev,
@@ -703,7 +665,7 @@ exit:
/**
* tower_interrupt_in_callback
*/
-static void tower_interrupt_in_callback (struct urb *urb)
+static void tower_interrupt_in_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -729,9 +691,9 @@ static void tower_interrupt_in_callback (struct urb *urb)
if (urb->actual_length > 0) {
spin_lock_irqsave(&dev->read_buffer_lock, flags);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
- memcpy (dev->read_buffer + dev->read_buffer_length,
- dev->interrupt_in_buffer,
- urb->actual_length);
+ memcpy(dev->read_buffer + dev->read_buffer_length,
+ dev->interrupt_in_buffer,
+ urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
@@ -744,25 +706,21 @@ static void tower_interrupt_in_callback (struct urb *urb)
}
resubmit:
- /* resubmit if we're still running */
- if (dev->interrupt_in_running) {
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
- if (retval)
- dev_err(&dev->udev->dev,
- "%s: usb_submit_urb failed (%d)\n",
- __func__, retval);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
+ if (retval) {
+ dev_err(&dev->udev->dev, "%s: usb_submit_urb failed (%d)\n",
+ __func__, retval);
}
-
exit:
dev->interrupt_in_done = 1;
- wake_up_interruptible (&dev->read_wait);
+ wake_up_interruptible(&dev->read_wait);
}
/**
* tower_interrupt_out_callback
*/
-static void tower_interrupt_out_callback (struct urb *urb)
+static void tower_interrupt_out_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -790,48 +748,27 @@ static void tower_interrupt_out_callback (struct urb *urb)
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
- struct lego_usb_tower *dev = NULL;
+ struct lego_usb_tower *dev;
struct tower_get_version_reply *get_version_reply = NULL;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
-
- dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
-
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
-
dev->udev = usb_get_dev(udev);
- dev->open_count = 0;
- dev->disconnected = 0;
-
- dev->read_buffer = NULL;
- dev->read_buffer_length = 0;
- dev->read_packet_length = 0;
- spin_lock_init (&dev->read_buffer_lock);
+ spin_lock_init(&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
-
- init_waitqueue_head (&dev->read_wait);
- init_waitqueue_head (&dev->write_wait);
-
- dev->interrupt_in_buffer = NULL;
- dev->interrupt_in_endpoint = NULL;
- dev->interrupt_in_urb = NULL;
- dev->interrupt_in_running = 0;
- dev->interrupt_in_done = 0;
-
- dev->interrupt_out_buffer = NULL;
- dev->interrupt_out_endpoint = NULL;
- dev->interrupt_out_urb = NULL;
- dev->interrupt_out_busy = 0;
+ init_waitqueue_head(&dev->read_wait);
+ init_waitqueue_head(&dev->write_wait);
result = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
@@ -843,16 +780,16 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
goto error;
}
- dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
+ dev->read_buffer = kmalloc(read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
- dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
+ dev->interrupt_in_buffer = kmalloc(usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
- dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
+ dev->interrupt_out_buffer = kmalloc(write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -862,22 +799,21 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
-
if (!get_version_reply) {
retval = -ENOMEM;
goto error;
}
/* get the firmware version and log it */
- result = usb_control_msg (udev,
- usb_rcvctrlpipe(udev, 0),
- LEGO_USB_TOWER_REQUEST_GET_VERSION,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- get_version_reply,
- sizeof(*get_version_reply),
- 1000);
+ result = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ LEGO_USB_TOWER_REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ get_version_reply,
+ sizeof(*get_version_reply),
+ 1000);
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
@@ -892,10 +828,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
le16_to_cpu(get_version_reply->build_no));
/* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
+ usb_set_intfdata(interface, dev);
+ retval = usb_register_dev(interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
@@ -924,17 +859,17 @@ error:
*
* Called by the usb core when the device is removed from the system.
*/
-static void tower_disconnect (struct usb_interface *interface)
+static void tower_disconnect(struct usb_interface *interface)
{
struct lego_usb_tower *dev;
int minor;
- dev = usb_get_intfdata (interface);
+ dev = usb_get_intfdata(interface);
minor = dev->minor;
/* give back our minor and prevent further open() */
- usb_deregister_dev (interface, &tower_class);
+ usb_deregister_dev(interface, &tower_class);
/* stop I/O */
usb_poison_urb(dev->interrupt_in_urb);
@@ -945,7 +880,7 @@ static void tower_disconnect (struct usb_interface *interface)
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
} else {
dev->disconnected = 1;
/* wake up pollers */
@@ -962,6 +897,4 @@ module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 9b632ab24f03..c16121276a21 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -4,7 +4,7 @@ config USB_SISUSBVGA
tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
depends on (USB_MUSB_HDRC || USB_EHCI_HCD)
select FONT_SUPPORT if USB_SISUSBVGA_CON
- ---help---
+ ---help---
Say Y here if you intend to attach a USB2VGA dongle based on a
Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 6ca9111d150a..10c9e7f6273e 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
@@ -26,10 +27,6 @@
#define USB251XB_ADDR_PRODUCT_ID_LSB 0x02
#define USB251XB_ADDR_PRODUCT_ID_MSB 0x03
-#define USB251XB_DEF_PRODUCT_ID_12 0x2512 /* USB2512B/12Bi */
-#define USB251XB_DEF_PRODUCT_ID_13 0x2513 /* USB2513B/13Bi */
-#define USB251XB_DEF_PRODUCT_ID_14 0x2514 /* USB2514B/14Bi */
-#define USB251XB_DEF_PRODUCT_ID_17 0x2517 /* USB2517/17i */
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
@@ -74,7 +71,6 @@
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
-#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi/7i"
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
@@ -116,6 +112,7 @@
struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
+ struct regulator *vdd;
u8 skip_config;
struct gpio_desc *gpio_reset;
u16 vendor_id;
@@ -159,6 +156,14 @@ struct usb251xb_data {
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
+static const struct usb251xb_data usb2422_data = {
+ .product_id = 0x2422,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
+ .product_str = "USB2422",
+};
+
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
.port_cnt = 2,
@@ -261,20 +266,19 @@ static int usb251x_check_gpio_chip(struct usb251xb *hub)
}
#endif
-static void usb251xb_reset(struct usb251xb *hub, int state)
+static void usb251xb_reset(struct usb251xb *hub)
{
if (!hub->gpio_reset)
return;
i2c_lock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
- gpiod_set_value_cansleep(hub->gpio_reset, state);
+ gpiod_set_value_cansleep(hub->gpio_reset, 1);
+ usleep_range(1, 10); /* >=1us RESET_N asserted */
+ gpiod_set_value_cansleep(hub->gpio_reset, 0);
/* wait for hub recovery/stabilization */
- if (!state)
- usleep_range(500, 750); /* >=500us at power on */
- else
- usleep_range(1, 10); /* >=1us at power down */
+ usleep_range(500, 750); /* >=500us after RESET_N deasserted */
i2c_unlock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
}
@@ -292,7 +296,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
@@ -342,7 +346,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
@@ -420,6 +424,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return err;
}
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -593,6 +601,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
static const struct of_device_id usb251xb_of_match[] = {
{
+ .compatible = "microchip,usb2422",
+ .data = &usb2422_data,
+ }, {
.compatible = "microchip,usb2512b",
.data = &usb2512b_data,
}, {
@@ -665,6 +676,10 @@ static int usb251xb_probe(struct usb251xb *hub)
if (err)
return err;
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
@@ -692,7 +707,31 @@ static int usb251xb_i2c_probe(struct i2c_client *i2c,
return usb251xb_probe(hub);
}
+static int __maybe_unused usb251xb_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+
+ return regulator_disable(hub->vdd);
+}
+
+static int __maybe_unused usb251xb_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+ int err;
+
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
+ return usb251xb_connect(hub);
+}
+
+static SIMPLE_DEV_PM_OPS(usb251xb_pm_ops, usb251xb_suspend, usb251xb_resume);
+
static const struct i2c_device_id usb251xb_id[] = {
+ { "usb2422", 0 },
{ "usb2512b", 0 },
{ "usb2512bi", 0 },
{ "usb2513b", 0 },
@@ -709,6 +748,7 @@ static struct i2c_driver usb251xb_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(usb251xb_of_match),
+ .pm = &usb251xb_pm_ops,
},
.probe = usb251xb_i2c_probe,
.id_table = usb251xb_id,
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 4da216c99726..2be182bd793a 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -153,6 +153,15 @@ static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
}
+static void ep0_do_status_stage(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+}
+
static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
@@ -297,8 +306,7 @@ static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
ep0_load_test_packet(mtu);
/* send status before entering test mode. */
- value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
- mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+ ep0_do_status_stage(mtu);
/* wait for ACK status sent by host */
readl_poll_timeout_atomic(mbase + U3D_EP0CSR, value,
@@ -632,7 +640,6 @@ __acquires(mtu->lock)
{
struct usb_ctrlrequest setup;
struct mtu3_request *mreq;
- void __iomem *mbase = mtu->mac_base;
int handled = 0;
ep0_read_setup(mtu, &setup);
@@ -664,14 +671,19 @@ finish:
if (mtu->test_mode) {
; /* nothing to do */
} else if (handled == USB_GADGET_DELAYED_STATUS) {
- /* handle the delay STATUS phase till receive ep_queue on ep0 */
- mtu->delayed_status = true;
- } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
- mtu3_writel(mbase, U3D_EP0CSR,
- (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
- | EP0_SETUPPKTRDY | EP0_DATAEND);
+ mreq = next_ep0_request(mtu);
+ if (mreq) {
+ /* already asked us to continue delayed status */
+ ep0_do_status_stage(mtu);
+ ep0_req_giveback(mtu, &mreq->request);
+ } else {
+ /* do delayed STATUS stage till receive ep0_queue */
+ mtu->delayed_status = true;
+ }
+ } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+ ep0_do_status_stage(mtu);
/* complete zlp request directly */
mreq = next_ep0_request(mtu);
if (mreq && !mreq->request.length)
@@ -802,12 +814,9 @@ static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
}
if (mtu->delayed_status) {
- u32 csr;
mtu->delayed_status = false;
- csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
- csr |= EP0_SETUPPKTRDY | EP0_DATAEND;
- mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+ ep0_do_status_stage(mtu);
/* needn't giveback the request for handling delay STATUS */
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bd63450af76a..15cca912c53e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2431,14 +2431,12 @@ static int musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq_byname(pdev, "mc");
- struct resource *iomem;
void __iomem *base;
if (irq <= 0)
return -ENODEV;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, iomem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index f42858e2b54c..7b6281ab62ed 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -325,7 +325,7 @@ void musb_init_debugfs(struct musb *musb)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(musb->controller), NULL);
+ root = debugfs_create_dir(dev_name(musb->controller), usb_debug_root);
musb->debugfs_root = root;
debugfs_create_file("regdump", S_IRUGO, root, musb, &musb_regdump_fops);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 327d4f7baaf7..88923175f71e 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -411,7 +411,7 @@ static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
char buf[128];
sprintf(buf, "%s.dsps", dev_name(musb->controller));
- root = debugfs_create_dir(buf, NULL);
+ root = debugfs_create_dir(buf, usb_debug_root);
glue->dbgfs_root = root;
glue->regset.regs = dsps_musb_regs;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ffe462a657b1..f62ffaede1ab 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1085,7 +1085,6 @@ static int musb_gadget_disable(struct usb_ep *ep)
u8 epnum;
struct musb_ep *musb_ep;
void __iomem *epio;
- int status = 0;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
@@ -1118,7 +1117,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
musb_dbg(musb, "%s", musb_ep->end_point.name);
- return status;
+ return 0;
}
/*
@@ -1316,7 +1315,7 @@ done:
}
/*
- * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
* data but will queue requests.
*
* exported to ep0 code
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 19871266312d..110e6e9ad621 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -66,15 +66,13 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_usbphy *k_phy;
- struct resource *res;
int ret;
k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
if (!k_phy)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- k_phy->phy_ctrl = devm_ioremap_resource(dev, res);
+ k_phy->phy_ctrl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 70b8c8248caf..67b39dc62b37 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -710,7 +710,6 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
static int mxs_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *base;
struct clk *clk;
struct mxs_phy *mxs_phy;
@@ -723,8 +722,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index a3c30b609433..d438b7871446 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -590,7 +590,7 @@ static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
- struct resource *res, *irq_res;
+ struct resource *irq_res;
struct device *dev = &pdev->dev;
int ret, gpio;
u32 tmp;
@@ -619,8 +619,7 @@ static int usbhs_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 0824099b905e..ef1735d014da 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -161,11 +161,12 @@ struct usbhs_priv;
#define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
#define VALID (1 << 3) /* USB Request Receive */
-#define DVSQ_MASK (0x3 << 4) /* Device State */
+#define DVSQ_MASK (0x7 << 4) /* Device State */
#define POWER_STATE (0 << 4)
#define DEFAULT_STATE (1 << 4)
#define ADDRESS_STATE (2 << 4)
#define CONFIGURATION_STATE (3 << 4)
+#define SUSPENDED_STATE (4 << 4)
#define CTSQ_MASK (0x7) /* Control Transfer Stage */
#define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 86637cd066cf..01c6a48c41bc 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1273,11 +1273,11 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
*/
snprintf(name, sizeof(name), "ch%d", channel);
if (channel & 1) {
- fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->tx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->tx_chan))
fifo->tx_chan = NULL;
} else {
- fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->rx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->rx_chan))
fifo->rx_chan = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 10fc65596014..b98112cefaa4 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -169,17 +169,7 @@ void usbhs_mod_remove(struct usbhs_priv *priv)
*/
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state)
{
- int state = irq_state->intsts0 & DVSQ_MASK;
-
- switch (state) {
- case POWER_STATE:
- case DEFAULT_STATE:
- case ADDRESS_STATE:
- case CONFIGURATION_STATE:
- return state;
- }
-
- return -EIO;
+ return (int)irq_state->intsts0 & DVSQ_MASK;
}
int usbhs_status_get_ctrl_stage(struct usbhs_irq_state *irq_state)
@@ -348,10 +338,6 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
* usbhs_interrupt
*/
- /*
- * it don't enable DVSE (intenb0) here
- * but "mod->irq_dev_state" will be called.
- */
if (info->irq_vbus)
intenb0 |= VBSE;
@@ -362,6 +348,9 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_ctrl_stage)
intenb0 |= CTRE;
+ if (mod->irq_dev_state)
+ intenb0 |= DVSE;
+
if (mod->irq_empty && mod->irq_bempsts) {
usbhs_write(priv, BEMPENB, mod->irq_bempsts);
intenb0 |= BEMPE;
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index cd38d74b3223..53489cafecc1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -457,12 +457,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
- dev_dbg(dev, "state = %x : speed : %d\n",
- usbhs_status_get_device_state(irq_state),
- gpriv->gadget.speed);
+ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
+
+ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
+ (state & SUSPENDED_STATE)) {
+ if (gpriv->driver && gpriv->driver->suspend)
+ gpriv->driver->suspend(&gpriv->gadget);
+ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
+ }
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 94b4e7db2b94..8273126ffdf4 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -175,6 +175,27 @@ void usb_role_switch_put(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
+/**
+ * usb_role_switch_find_by_fwnode - Find USB role switch with its fwnode
+ * @fwnode: fwnode of the USB Role Switch
+ *
+ * Finds and returns role switch with @fwnode. The reference count for the
+ * found switch is incremented.
+ */
+struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = class_find_device_by_fwnode(role_class, fwnode);
+
+ return dev ? to_role_switch(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+
static umode_t
usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 67279c6bce33..ed4a18b435a0 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -269,19 +269,19 @@ config USB_SERIAL_F8153X
config USB_SERIAL_GARMIN
- tristate "USB Garmin GPS driver"
- help
- Say Y here if you want to connect to your Garmin GPS.
- Should work with most Garmin GPS devices which have a native USB port.
+ tristate "USB Garmin GPS driver"
+ help
+ Say Y here if you want to connect to your Garmin GPS.
+ Should work with most Garmin GPS devices which have a native USB port.
- See <http://sourceforge.net/projects/garmin-gps> for the latest
- version of the driver.
+ See <http://sourceforge.net/projects/garmin-gps> for the latest
+ version of the driver.
- To compile this driver as a module, choose M here: the
- module will be called garmin_gps.
+ To compile this driver as a module, choose M here: the
+ module will be called garmin_gps.
config USB_SERIAL_IPW
- tristate "USB IPWireless (3G UMTS TDD) Driver"
+ tristate "USB IPWireless (3G UMTS TDD) Driver"
select USB_SERIAL_WWAN
help
Say Y here if you want to use a IPWireless USB modem such as
@@ -341,20 +341,20 @@ config USB_SERIAL_KLSI
module will be called kl5kusb105.
config USB_SERIAL_KOBIL_SCT
- tristate "USB KOBIL chipcard reader"
- ---help---
- Say Y here if you want to use one of the following KOBIL USB chipcard
- readers:
-
- - USB TWIN
- - KAAN Standard Plus
- - KAAN SIM
- - SecOVID Reader Plus
- - B1 Professional
- - KAAN Professional
-
- Note that you need a current CT-API.
- To compile this driver as a module, choose M here: the
+ tristate "USB KOBIL chipcard reader"
+ ---help---
+ Say Y here if you want to use one of the following KOBIL USB chipcard
+ readers:
+
+ - USB TWIN
+ - KAAN Standard Plus
+ - KAAN SIM
+ - SecOVID Reader Plus
+ - B1 Professional
+ - KAAN Professional
+
+ Note that you need a current CT-API.
+ To compile this driver as a module, choose M here: the
module will be called kobil_sct.
config USB_SERIAL_MCT_U232
@@ -458,7 +458,7 @@ config USB_SERIAL_OTI6858
tristate "USB Ours Technology Inc. OTi-6858 USB To RS232 Bridge Controller"
help
Say Y here if you want to use the OTi-6858 single port USB to serial
- converter device.
+ converter device.
To compile this driver as a module, choose M here: the
module will be called oti6858.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 3bb1fff02bed..df582fe855f0 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -48,12 +48,6 @@
#define CH341_BIT_DCD 0x08
#define CH341_BITS_MODEM_STAT 0x0f /* all bits */
-/*******************************/
-/* baudrate calculation factor */
-/*******************************/
-#define CH341_BAUDBASE_FACTOR 1532620800
-#define CH341_BAUDBASE_DIVMAX 3
-
/* Break support - the information used to implement this was gleaned from
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
@@ -144,37 +138,96 @@ static int ch341_control_in(struct usb_device *dev,
return 0;
}
+#define CH341_CLKRATE 48000000
+#define CH341_CLK_DIV(ps, fact) (1 << (12 - 3 * (ps) - (fact)))
+#define CH341_MIN_RATE(ps) (CH341_CLKRATE / (CH341_CLK_DIV((ps), 1) * 512))
+
+static const speed_t ch341_min_rates[] = {
+ CH341_MIN_RATE(0),
+ CH341_MIN_RATE(1),
+ CH341_MIN_RATE(2),
+ CH341_MIN_RATE(3),
+};
+
+/*
+ * The device line speed is given by the following equation:
+ *
+ * baudrate = 48000000 / (2^(12 - 3 * ps - fact) * div), where
+ *
+ * 0 <= ps <= 3,
+ * 0 <= fact <= 1,
+ * 2 <= div <= 256 if fact = 0, or
+ * 9 <= div <= 256 if fact = 1
+ */
+static int ch341_get_divisor(speed_t speed)
+{
+ unsigned int fact, div, clk_div;
+ int ps;
+
+ /*
+ * Clamp to supported range, this makes the (ps < 0) and (div < 2)
+ * sanity checks below redundant.
+ */
+ speed = clamp(speed, 46U, 3000000U);
+
+ /*
+ * Start with highest possible base clock (fact = 1) that will give a
+ * divisor strictly less than 512.
+ */
+ fact = 1;
+ for (ps = 3; ps >= 0; ps--) {
+ if (speed > ch341_min_rates[ps])
+ break;
+ }
+
+ if (ps < 0)
+ return -EINVAL;
+
+ /* Determine corresponding divisor, rounding down. */
+ clk_div = CH341_CLK_DIV(ps, fact);
+ div = CH341_CLKRATE / (clk_div * speed);
+
+ /* Halve base clock (fact = 0) if required. */
+ if (div < 9 || div > 255) {
+ div /= 2;
+ clk_div *= 2;
+ fact = 0;
+ }
+
+ if (div < 2)
+ return -EINVAL;
+
+ /*
+ * Pick next divisor if resulting rate is closer to the requested one,
+ * scale up to avoid rounding errors on low rates.
+ */
+ if (16 * CH341_CLKRATE / (clk_div * div) - 16 * speed >=
+ 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
+ div++;
+
+ return (0x100 - div) << 8 | fact << 2 | ps;
+}
+
static int ch341_set_baudrate_lcr(struct usb_device *dev,
struct ch341_private *priv, u8 lcr)
{
- short a;
+ int val;
int r;
- unsigned long factor;
- short divisor;
if (!priv->baud_rate)
return -EINVAL;
- factor = (CH341_BAUDBASE_FACTOR / priv->baud_rate);
- divisor = CH341_BAUDBASE_DIVMAX;
-
- while ((factor > 0xfff0) && divisor) {
- factor >>= 3;
- divisor--;
- }
- if (factor > 0xfff0)
+ val = ch341_get_divisor(priv->baud_rate);
+ if (val < 0)
return -EINVAL;
- factor = 0x10000 - factor;
- a = (factor & 0xff00) | divisor;
-
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
*/
- a |= BIT(7);
+ val |= BIT(7);
- r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, val);
if (r)
return r;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 979bef9bfb6b..f5143eedbc48 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 25e81faf4c24..9ad44a96dfe3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* U-Blox devices */
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 22d66217cb41..e8373528264c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1558,3 +1558,10 @@
*/
#define UNJO_VID 0x22B7
#define UNJO_ISODEBUG_V1_PID 0x150D
+
+/*
+ * U-Blox products (http://www.u-blox.com).
+ */
+#define UBLOX_VID 0x1546
+#define UBLOX_C099F9P_ZED_PID 0x0502
+#define UBLOX_C099F9P_ODIN_PID 0x0503
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 18110225d506..2ec4eeacebc7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial)
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
-
if (product == MOSCHIP_DEVICE_ID_7715) {
struct urb *urb = serial->port[0]->interrupt_in_urb;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a698d46ba773..23f91d658cb4 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -89,17 +89,10 @@
/* For higher baud Rates use TIOCEXBAUD */
#define TIOCEXBAUD 0x5462
-/* vendor id and device id defines */
-
-/* The native mos7840/7820 component */
-#define USB_VENDOR_ID_MOSCHIP 0x9710
-#define MOSCHIP_DEVICE_ID_7840 0x7840
-#define MOSCHIP_DEVICE_ID_7843 0x7843
-#define MOSCHIP_DEVICE_ID_7820 0x7820
-#define MOSCHIP_DEVICE_ID_7810 0x7810
-/* The native component can have its vendor/device id's overridden
- * in vendor-specific implementations. Such devices can be handled
- * by making a change here, in id_table.
+/*
+ * Vendor id and device id defines
+ *
+ * NOTE: Do not add new defines, add entries directly to the id_table instead.
*/
#define USB_VENDOR_ID_BANDB 0x0856
#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
@@ -116,14 +109,6 @@
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
-/* This driver also supports
- * ATEN UC2324 device using Moschip MCS7840
- * ATEN UC2322 device using Moschip MCS7820
- */
-#define USB_VENDOR_ID_ATENINTL 0x0557
-#define ATENINTL_DEVICE_ID_UC2324 0x2011
-#define ATENINTL_DEVICE_ID_UC2322 0x7820
-
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
@@ -171,30 +156,37 @@
#define LED_OFF_MS 500
enum mos7840_flag {
- MOS7840_FLAG_CTRL_BUSY,
MOS7840_FLAG_LED_BUSY,
};
+#define MCS_PORT_MASK GENMASK(2, 0)
+#define MCS_PORTS(nr) ((nr) & MCS_PORT_MASK)
+#define MCS_LED BIT(3)
+
+#define MCS_DEVICE(vid, pid, flags) \
+ USB_DEVICE((vid), (pid)), .driver_info = (flags)
+
static const struct usb_device_id id_table[] = {
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7843)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
+ { MCS_DEVICE(0x0557, 0x2011, MCS_PORTS(4)) }, /* ATEN UC2324 */
+ { MCS_DEVICE(0x0557, 0x7820, MCS_PORTS(2)) }, /* ATEN UC2322 */
+ { MCS_DEVICE(0x110a, 0x2210, MCS_PORTS(2)) }, /* Moxa UPort 2210 */
+ { MCS_DEVICE(0x9710, 0x7810, MCS_PORTS(1) | MCS_LED) }, /* ASIX MCS7810 */
+ { MCS_DEVICE(0x9710, 0x7820, MCS_PORTS(2)) }, /* MosChip MCS7820 */
+ { MCS_DEVICE(0x9710, 0x7840, MCS_PORTS(4)) }, /* MosChip MCS7840 */
+ { MCS_DEVICE(0x9710, 0x7843, MCS_PORTS(3)) }, /* ASIX MCS7840 3 port */
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -206,19 +198,12 @@ struct moschip_port {
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
- char open;
- char open_ports;
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
__u8 SpRegOffset;
__u8 ControlRegOffset;
__u8 DcrRegOffset;
- /* for processing control URBS in interrupt context */
- struct urb *control_urb;
- struct usb_ctrlrequest *dr;
- char *ctrl_buf;
- int MsrLsr;
spinlock_t pool_lock;
struct urb *write_urb_pool[NUM_URBS];
@@ -360,150 +345,11 @@ static void mos7840_dump_serial_port(struct usb_serial_port *port,
/************************************************************************/
/************************************************************************/
-/* I N T E R F A C E F U N C T I O N S */
-/* I N T E R F A C E F U N C T I O N S */
-/************************************************************************/
-/************************************************************************/
-
-static inline void mos7840_set_port_private(struct usb_serial_port *port,
- struct moschip_port *data)
-{
- usb_set_serial_port_data(port, (void *)data);
-}
-
-static inline struct moschip_port *mos7840_get_port_private(struct
- usb_serial_port
- *port)
-{
- return (struct moschip_port *)usb_get_serial_port_data(port);
-}
-
-static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
-{
- struct moschip_port *mos7840_port;
- struct async_icount *icount;
- mos7840_port = port;
- if (new_msr &
- (MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI |
- MOS_MSR_DELTA_CD)) {
- icount = &mos7840_port->port->icount;
-
- /* update input line counters */
- if (new_msr & MOS_MSR_DELTA_CTS)
- icount->cts++;
- if (new_msr & MOS_MSR_DELTA_DSR)
- icount->dsr++;
- if (new_msr & MOS_MSR_DELTA_CD)
- icount->dcd++;
- if (new_msr & MOS_MSR_DELTA_RI)
- icount->rng++;
-
- wake_up_interruptible(&port->port->port.delta_msr_wait);
- }
-}
-
-static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
-{
- struct async_icount *icount;
-
- if (new_lsr & SERIAL_LSR_BI) {
- /*
- * Parity and Framing errors only count if they
- * occur exclusive of a break being
- * received.
- */
- new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
- }
-
- /* update input line counters */
- icount = &port->port->icount;
- if (new_lsr & SERIAL_LSR_BI)
- icount->brk++;
- if (new_lsr & SERIAL_LSR_OE)
- icount->overrun++;
- if (new_lsr & SERIAL_LSR_PE)
- icount->parity++;
- if (new_lsr & SERIAL_LSR_FE)
- icount->frame++;
-}
-
-/************************************************************************/
-/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
-static void mos7840_control_callback(struct urb *urb)
-{
- unsigned char *data;
- struct moschip_port *mos7840_port;
- struct device *dev = &urb->dev->dev;
- __u8 regval = 0x0;
- int status = urb->status;
-
- mos7840_port = urb->context;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
- goto out;
- default:
- dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- goto out;
- }
-
- dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
- if (urb->actual_length < 1)
- goto out;
-
- dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
- mos7840_port->MsrLsr, mos7840_port->port_num);
- data = urb->transfer_buffer;
- regval = (__u8) data[0];
- dev_dbg(dev, "%s data is %x\n", __func__, regval);
- if (mos7840_port->MsrLsr == 0)
- mos7840_handle_new_msr(mos7840_port, regval);
- else if (mos7840_port->MsrLsr == 1)
- mos7840_handle_new_lsr(mos7840_port, regval);
-out:
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
-}
-
-static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
- __u16 *val)
-{
- struct usb_device *dev = mcs->port->serial->dev;
- struct usb_ctrlrequest *dr = mcs->dr;
- unsigned char *buffer = mcs->ctrl_buf;
- int ret;
-
- if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
- return -EBUSY;
-
- dr->bRequestType = MCS_RD_RTYPE;
- dr->bRequest = MCS_RDREQ;
- dr->wValue = cpu_to_le16(Wval); /* 0 */
- dr->wIndex = cpu_to_le16(reg);
- dr->wLength = cpu_to_le16(2);
-
- usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0),
- (unsigned char *)dr, buffer, 2,
- mos7840_control_callback, mcs);
- mcs->control_urb->transfer_buffer_length = 2;
- ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
- if (ret)
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
-
- return ret;
-}
-
static void mos7840_set_led_callback(struct urb *urb)
{
switch (urb->status) {
@@ -580,146 +426,6 @@ static void mos7840_led_activity(struct usb_serial_port *port)
}
/*****************************************************************************
- * mos7840_interrupt_callback
- * this is the callback function for when we have received data on the
- * interrupt endpoint.
- *****************************************************************************/
-
-static void mos7840_interrupt_callback(struct urb *urb)
-{
- int result;
- int length;
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
- __u16 Data;
- unsigned char *data;
- __u8 sp[5];
- int i, rv = 0;
- __u16 wval, wreg = 0;
- int status = urb->status;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
- __func__, status);
- return;
- default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
- __func__, status);
- goto exit;
- }
-
- length = urb->actual_length;
- data = urb->transfer_buffer;
-
- serial = urb->context;
-
- /* Moschip get 5 bytes
- * Byte 1 IIR Port 1 (port.number is 0)
- * Byte 2 IIR Port 2 (port.number is 1)
- * Byte 3 IIR Port 3 (port.number is 2)
- * Byte 4 IIR Port 4 (port.number is 3)
- * Byte 5 FIFO status for both */
-
- if (length > 5) {
- dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n");
- return;
- }
-
- sp[0] = (__u8) data[0];
- sp[1] = (__u8) data[1];
- sp[2] = (__u8) data[2];
- sp[3] = (__u8) data[3];
-
- for (i = 0; i < serial->num_ports; i++) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
- wval = ((__u16)serial->port[i]->port_number + 1) << 8;
- if (mos7840_port->open) {
- if (sp[i] & 0x01) {
- dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i);
- } else {
- switch (sp[i] & 0x0f) {
- case SERIAL_IIR_RLS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Receiver status error or \n", i);
- dev_dbg(&urb->dev->dev, "address bit detected in 9-bit mode\n");
- mos7840_port->MsrLsr = 1;
- wreg = LINE_STATUS_REGISTER;
- break;
- case SERIAL_IIR_MS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Modem status change\n", i);
- mos7840_port->MsrLsr = 0;
- wreg = MODEM_STATUS_REGISTER;
- break;
- }
- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
- }
- }
- }
- if (!(rv < 0))
- /* the completion handler for the control urb will resubmit */
- return;
-exit:
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result) {
- dev_err(&urb->dev->dev,
- "%s - Error %d submitting interrupt urb\n",
- __func__, result);
- }
-}
-
-static int mos7840_port_paranoia_check(struct usb_serial_port *port,
- const char *function)
-{
- if (!port) {
- pr_debug("%s - port == NULL\n", function);
- return -1;
- }
- if (!port->serial) {
- pr_debug("%s - port->serial == NULL\n", function);
- return -1;
- }
-
- return 0;
-}
-
-/* Inline functions to check the sanity of a pointer that is passed to us */
-static int mos7840_serial_paranoia_check(struct usb_serial *serial,
- const char *function)
-{
- if (!serial) {
- pr_debug("%s - serial == NULL\n", function);
- return -1;
- }
- if (!serial->type) {
- pr_debug("%s - serial->type == NULL!\n", function);
- return -1;
- }
-
- return 0;
-}
-
-static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
- const char *function)
-{
- /* if no port was specified, or it fails a paranoia check */
- if (!port ||
- mos7840_port_paranoia_check(port, function) ||
- mos7840_serial_paranoia_check(port->serial, function)) {
- /* then say that we don't have a valid usb_serial thing,
- * which will end up genrating -ENODEV return values */
- return NULL;
- }
-
- return port->serial;
-}
-
-/*****************************************************************************
* mos7840_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -727,35 +433,18 @@ static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
static void mos7840_bulk_in_callback(struct urb *urb)
{
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int retval;
unsigned char *data;
- struct usb_serial *serial;
- struct usb_serial_port *port;
- struct moschip_port *mos7840_port;
int status = urb->status;
- mos7840_port = urb->context;
- if (!mos7840_port)
- return;
-
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
mos7840_port->read_urb_busy = false;
return;
}
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__)) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
data = urb->transfer_buffer;
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
@@ -767,12 +456,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx);
}
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!\n");
- mos7840_port->read_urb_busy = false;
- return;
- }
-
if (mos7840_port->has_led)
mos7840_led_activity(port);
@@ -793,14 +476,12 @@ static void mos7840_bulk_in_callback(struct urb *urb)
static void mos7840_bulk_out_data_callback(struct urb *urb)
{
- struct moschip_port *mos7840_port;
- struct usb_serial_port *port;
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int status = urb->status;
unsigned long flags;
int i;
- mos7840_port = urb->context;
- port = mos7840_port->port;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
@@ -815,11 +496,7 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
return;
}
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port->open)
- tty_port_tty_wakeup(&port->port);
+ tty_port_tty_wakeup(&port->port);
}
@@ -836,32 +513,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int response;
int j;
- struct usb_serial *serial;
struct urb *urb;
__u16 Data;
int status;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -ENODEV;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -ENODEV;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- port0->open_ports++;
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
@@ -1012,41 +673,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
- /* Check to see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the structures *
- * were not set up at that time.) */
- if (port0->open_ports == 1) {
- /* FIXME: Buffer never NULL, so URB is not submitted. */
- if (serial->port[0]->interrupt_in_buffer == NULL) {
- /* set up interrupt urb */
- usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
- serial->dev,
- usb_rcvintpipe(serial->dev,
- serial->port[0]->interrupt_in_endpointAddress),
- serial->port[0]->interrupt_in_buffer,
- serial->port[0]->interrupt_in_urb->
- transfer_buffer_length,
- mos7840_interrupt_callback,
- serial,
- serial->port[0]->interrupt_in_urb->interval);
-
- /* start interrupt read for mos7840 */
- response =
- usb_submit_urb(serial->port[0]->interrupt_in_urb,
- GFP_KERNEL);
- if (response) {
- dev_err(&port->dev, "%s - Error %d submitting "
- "interrupt urb\n", __func__, response);
- }
-
- }
-
- }
-
- /* see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the *
- * structures were not set up at that time.) */
-
dev_dbg(&port->dev, "port number is %d\n", port->port_number);
dev_dbg(&port->dev, "minor number is %d\n", port->minor);
dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
@@ -1086,9 +712,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
- /* send a open port command */
- mos7840_port->open = 1;
- /* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
err:
@@ -1115,17 +738,10 @@ err:
static int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int chars = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return 0;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return 0;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1147,25 +763,10 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
static void mos7840_close(struct usb_serial_port *port)
{
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int j;
__u16 Data;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return;
-
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7840_port->write_urb_pool[j]);
@@ -1180,22 +781,11 @@ static void mos7840_close(struct usb_serial_port *port)
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
- port0->open_ports--;
- dev_dbg(&port->dev, "%s in close%d\n", __func__, port0->open_ports);
- if (port0->open_ports == 0) {
- if (serial->port[0]->interrupt_in_urb) {
- dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n");
- usb_kill_urb(serial->port[0]->interrupt_in_urb);
- }
- }
-
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
-
- mos7840_port->open = 0;
}
/*****************************************************************************
@@ -1205,21 +795,8 @@ static void mos7840_close(struct usb_serial_port *port)
static void mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned char data;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
@@ -1244,17 +821,10 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
static int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int room = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1280,29 +850,17 @@ static int mos7840_write_room(struct tty_struct *tty)
static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int status;
int i;
int bytes_sent = 0;
int transfer_size;
unsigned long flags;
-
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
struct urb *urb;
/* __u16 Data; */
const unsigned char *current_position = data;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- serial = port->serial;
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
-
/* try to find a free urb in the list */
urb = NULL;
@@ -1383,22 +941,9 @@ exit:
static void mos7840_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s", "port not opened\n");
- return;
- }
-
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
@@ -1425,19 +970,8 @@ static void mos7840_throttle(struct tty_struct *tty)
static void mos7840_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
@@ -1460,15 +994,10 @@ static void mos7840_unthrottle(struct tty_struct *tty)
static int mos7840_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
unsigned int result;
__u16 msr;
__u16 mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
if (status < 0)
@@ -1493,15 +1022,10 @@ static int mos7840_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned int mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
-
/* FIXME: What locks the port registers ? */
mcr = mos7840_port->shadowMCR;
if (clear & TIOCM_RTS)
@@ -1578,21 +1102,11 @@ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
int baudRate)
{
+ struct usb_serial_port *port = mos7840_port->port;
int divisor = 0;
int status;
__u16 Data;
__u16 clk_sel_val;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return -1;
-
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return -1;
dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate);
/* reset clk_uart_sel in spregOffset */
@@ -1681,6 +1195,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
static void mos7840_change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7840_port, struct ktermios *old_termios)
{
+ struct usb_serial_port *port = mos7840_port->port;
int baud;
unsigned cflag;
__u8 lData;
@@ -1688,23 +1203,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
__u8 lStop;
int status;
__u16 Data;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return;
-
- port = mos7840_port->port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
@@ -1839,37 +1337,13 @@ static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!!!\n");
- return;
- }
-
if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1916,7 +1390,7 @@ static int mos7840_get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
ss->type = PORT_16550A;
ss->line = mos7840_port->port->minor;
@@ -1939,15 +1413,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -1;
switch (cmd) {
/* return number of bytes available */
@@ -1962,6 +1427,13 @@ static int mos7840_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+/*
+ * Check if GPO (pin 42) is connected to GPI (pin 33) as recommended by ASIX
+ * for MCS7810 by bit-banging a 16-bit word.
+ *
+ * Note that GPO is really RTS of the third port so this will toggle RTS of
+ * port two or three on two- and four-port devices.
+ */
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
@@ -2019,16 +1491,12 @@ static int mos7810_check(struct usb_serial *serial)
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ unsigned long device_flags = id->driver_info;
u8 *buf;
- int device_type;
- if (product == MOSCHIP_DEVICE_ID_7810 ||
- product == MOSCHIP_DEVICE_ID_7820 ||
- product == MOSCHIP_DEVICE_ID_7843) {
- device_type = product;
+ /* Skip device-type detection if we already have device flags. */
+ if (device_flags)
goto out;
- }
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
@@ -2040,15 +1508,15 @@ static int mos7840_probe(struct usb_serial *serial,
/* For a MCS7840 device GPIO0 must be set to 1 */
if (buf[0] & 0x01)
- device_type = MOSCHIP_DEVICE_ID_7840;
+ device_flags = MCS_PORTS(4);
else if (mos7810_check(serial))
- device_type = MOSCHIP_DEVICE_ID_7810;
+ device_flags = MCS_PORTS(1) | MCS_LED;
else
- device_type = MOSCHIP_DEVICE_ID_7820;
+ device_flags = MCS_PORTS(2);
kfree(buf);
out:
- usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2056,19 +1524,10 @@ out:
static int mos7840_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
- int device_type = (unsigned long)usb_get_serial_data(serial);
- int num_ports;
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
+ int num_ports = MCS_PORTS(device_flags);
- if (device_type == MOSCHIP_DEVICE_ID_7843)
- num_ports = 3;
- else
- num_ports = (device_type >> 4) & 0x000F;
-
- /*
- * num_ports is currently never zero as device_type is one of
- * MOSCHIP_DEVICE_ID_78{1,2,4}0.
- */
- if (num_ports == 0)
+ if (num_ports == 0 || num_ports > 4)
return -ENODEV;
if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) {
@@ -2079,10 +1538,27 @@ static int mos7840_calc_num_ports(struct usb_serial *serial,
return num_ports;
}
+static int mos7840_attach(struct usb_serial *serial)
+{
+ struct device *dev = &serial->interface->dev;
+ int status;
+ u16 val;
+
+ /* Zero Length flag enable */
+ val = 0x0f;
+ status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val);
+ if (status < 0)
+ dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
+ else
+ dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status);
+
+ return status;
+}
+
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- int device_type = (unsigned long)usb_get_serial_data(serial);
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
@@ -2103,7 +1579,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
* common to all port */
mos7840_port->port = port;
- mos7840_set_port_private(port, mos7840_port);
spin_lock_init(&mos7840_port->pool_lock);
/* minor is not initialised until later by
@@ -2129,14 +1604,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->DcrRegOffset = 0x16 + 3 * (phy_num - 2);
}
mos7840_dump_serial_port(port, mos7840_port);
- mos7840_set_port_private(port, mos7840_port);
+ usb_set_serial_port_data(port, mos7840_port);
/* enable rx_disable bit in control register */
status = mos7840_get_reg_sync(port,
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
Data |= 0x08; /* setting driver done bit */
@@ -2148,7 +1623,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
@@ -2159,7 +1634,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
@@ -2168,7 +1643,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
@@ -2177,7 +1652,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
@@ -2186,7 +1661,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
@@ -2203,7 +1678,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
@@ -2217,7 +1692,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
} else {
@@ -2229,27 +1704,16 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
}
- mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
- mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
- mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
- if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
- !mos7840_port->dr) {
- status = -ENOMEM;
- goto error;
- }
- mos7840_port->has_led = false;
+ mos7840_port->has_led = device_flags & MCS_LED;
/* Initialize LED timers */
- if (device_type == MOSCHIP_DEVICE_ID_7810) {
- mos7840_port->has_led = true;
-
+ if (mos7840_port->has_led) {
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
GFP_KERNEL);
@@ -2269,29 +1733,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
-out:
- if (pnum == serial->num_ports - 1) {
- /* Zero Length flag enable */
- Data = 0x0f;
- status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
- if (status < 0) {
- dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
- goto error;
- } else
- dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
- MOS_WDR_TIMEOUT);
- }
return 0;
error:
kfree(mos7840_port->led_dr);
usb_free_urb(mos7840_port->led_urb);
- kfree(mos7840_port->dr);
- kfree(mos7840_port->ctrl_buf);
- usb_free_urb(mos7840_port->control_urb);
kfree(mos7840_port);
return status;
@@ -2299,9 +1745,7 @@ error:
static int mos7840_port_remove(struct usb_serial_port *port)
{
- struct moschip_port *mos7840_port;
-
- mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (mos7840_port->has_led) {
/* Turn off LED */
@@ -2314,10 +1758,7 @@ static int mos7840_port_remove(struct usb_serial_port *port)
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->led_dr);
}
- usb_kill_urb(mos7840_port->control_urb);
- usb_free_urb(mos7840_port->control_urb);
- kfree(mos7840_port->ctrl_buf);
- kfree(mos7840_port->dr);
+
kfree(mos7840_port);
return 0;
@@ -2340,18 +1781,17 @@ static struct usb_serial_driver moschip7840_4port_device = {
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
+ .attach = mos7840_attach,
.ioctl = mos7840_ioctl,
.get_serial = mos7840_get_serial_info,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
- .read_int_callback = mos7840_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 06ab016be0b6..e9491d400a24 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7
+#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9d27b76c5c6e..aab737e1e7b6 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -47,6 +47,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GC) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GT) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GL) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GE) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GS) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
@@ -130,9 +136,11 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define VENDOR_WRITE_REQUEST_TYPE 0x40
#define VENDOR_WRITE_REQUEST 0x01
+#define VENDOR_WRITE_NREQUEST 0x80
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
+#define VENDOR_READ_NREQUEST 0x81
#define UART_STATE_INDEX 8
#define UART_STATE_MSR_MASK 0x8b
@@ -148,11 +156,24 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define PL2303_FLOWCTRL_MASK 0xf0
+#define PL2303_READ_TYPE_HX_STATUS 0x8080
+
+#define PL2303_HXN_RESET_REG 0x07
+#define PL2303_HXN_RESET_UPSTREAM_PIPE 0x02
+#define PL2303_HXN_RESET_DOWNSTREAM_PIPE 0x01
+
+#define PL2303_HXN_FLOWCTRL_REG 0x0a
+#define PL2303_HXN_FLOWCTRL_MASK 0x1c
+#define PL2303_HXN_FLOWCTRL_NONE 0x1c
+#define PL2303_HXN_FLOWCTRL_RTS_CTS 0x18
+#define PL2303_HXN_FLOWCTRL_XON_XOFF 0x0c
+
static void pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
TYPE_01, /* Type 0 and 1 (difference unknown) */
TYPE_HX, /* HX version of the pl2303 chip */
+ TYPE_HXN, /* HXN version of the pl2303 chip */
TYPE_COUNT
};
@@ -184,16 +205,26 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
[TYPE_HX] = {
.max_baud_rate = 12000000,
},
+ [TYPE_HXN] = {
+ .max_baud_rate = 12000000,
+ },
};
static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
unsigned char buf[1])
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_READ_NREQUEST;
+ else
+ request = VENDOR_READ_REQUEST;
+
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ request, VENDOR_READ_REQUEST_TYPE,
value, 0, buf, 1, 100);
if (res != 1) {
dev_err(dev, "%s - failed to read [%04x]: %d\n", __func__,
@@ -211,13 +242,20 @@ static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, index);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_WRITE_NREQUEST;
+ else
+ request = VENDOR_WRITE_REQUEST;
+
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- VENDOR_WRITE_REQUEST, VENDOR_WRITE_REQUEST_TYPE,
+ request, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
if (res) {
dev_err(dev, "%s - failed to write [%04x]: %d\n", __func__,
@@ -230,6 +268,7 @@ static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int ret = 0;
u8 *buf;
@@ -237,7 +276,11 @@ static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
if (!buf)
return -ENOMEM;
- ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ ret = pl2303_vendor_read(serial, reg, buf);
+ else
+ ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+
if (ret)
goto out_free;
@@ -320,6 +363,7 @@ static int pl2303_startup(struct usb_serial *serial)
struct pl2303_serial_private *spriv;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
+ int res;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
@@ -341,26 +385,37 @@ static int pl2303_startup(struct usb_serial *serial)
type = TYPE_01; /* type 1 */
dev_dbg(&serial->interface->dev, "device type: %d\n", type);
+ if (type == TYPE_HX) {
+ res = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ PL2303_READ_TYPE_HX_STATUS, 0, buf, 1, 100);
+ if (res != 1)
+ type = TYPE_HXN;
+ }
+
spriv->type = &pl2303_type_data[type];
spriv->quirks = (unsigned long)usb_get_serial_data(serial);
spriv->quirks |= spriv->type->quirks;
usb_set_serial_data(serial, spriv);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 0);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 1);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_write(serial, 0, 1);
- pl2303_vendor_write(serial, 1, 0);
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
- pl2303_vendor_write(serial, 2, 0x24);
- else
- pl2303_vendor_write(serial, 2, 0x44);
+ if (type != TYPE_HXN) {
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 0);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 1);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_write(serial, 0, 1);
+ pl2303_vendor_write(serial, 1, 0);
+ if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ pl2303_vendor_write(serial, 2, 0x24);
+ else
+ pl2303_vendor_write(serial, 2, 0x44);
+ }
kfree(buf);
@@ -719,14 +774,31 @@ static void pl2303_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty)) {
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ if (spriv->quirks & PL2303_QUIRK_LEGACY) {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x40);
- else
+ } else if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_RTS_CTS);
+ } else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x60);
+ }
} else if (pl2303_enable_xonxoff(tty, spriv->type)) {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_XON_XOFF);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ }
} else {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_NONE);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ }
}
kfree(buf);
@@ -767,8 +839,14 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
- pl2303_vendor_write(serial, 8, 0);
- pl2303_vendor_write(serial, 9, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_vendor_write(serial, PL2303_HXN_RESET_REG,
+ PL2303_HXN_RESET_UPSTREAM_PIPE |
+ PL2303_HXN_RESET_DOWNSTREAM_PIPE);
+ } else {
+ pl2303_vendor_write(serial, 8, 0);
+ pl2303_vendor_write(serial, 9, 0);
+ }
}
/* Setup termios */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index b0175f17d1a2..a019ea7e6e0e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -9,6 +9,12 @@
#define PL2303_VENDOR_ID 0x067b
#define PL2303_PRODUCT_ID 0x2303
#define PL2303_PRODUCT_ID_TB 0x2304
+#define PL2303_PRODUCT_ID_GC 0x23a3
+#define PL2303_PRODUCT_ID_GB 0x23b3
+#define PL2303_PRODUCT_ID_GT 0x23c3
+#define PL2303_PRODUCT_ID_GL 0x23d3
+#define PL2303_PRODUCT_ID_GE 0x23e3
+#define PL2303_PRODUCT_ID_GS 0x23f3
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 54a3c8195c96..66a4dcbbb1fc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -369,8 +369,8 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* check for state-transition errors */
if (us->srb != NULL) {
- printk(KERN_ERR "usb-storage: Error in %s: us->srb = %p\n",
- __func__, us->srb);
+ dev_err(&us->pusb_intf->dev,
+ "Error in %s: us->srb = %p\n", __func__, us->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 34538253f12c..475b9c692827 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -825,6 +825,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->wce_default_on = 1;
}
+ /* Some disks cannot handle READ_CAPACITY_16 */
+ if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
+ sdev->no_read_capacity_16 = 1;
+
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
@@ -834,6 +838,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->fix_capacity = 1;
/*
+ * in some cases we have to guess
+ */
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d0bdebd87ce3..1b23741036ee 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -87,12 +87,15 @@ UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
-/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+/*
+ * Initially Reported-by: Takeo Nakayama <javhera@gmx.com>
+ * UAS Ignore Reported by Steven Ellis <sellis@redhat.com>
+ */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
"JMS566",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 895e2418de53..b4f2aac7ae8a 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -50,6 +50,17 @@ source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
+config TYPEC_HD3SS3220
+ tristate "TI HD3SS3220 Type-C DRP Port controller driver"
+ depends on I2C
+ depends on USB_ROLE_SWITCH
+ help
+ Say Y or M here if your system has TI HD3SS3220 Type-C DRP Port
+ controller driver.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called hd3ss3220.ko.
+
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 6696b7263d61..7753a5c3cd46 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -4,5 +4,6 @@ typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
+obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 94a3eda62add..7ece6ca6e690 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -53,6 +53,7 @@ struct typec_port {
struct typec_mux *mux;
const struct typec_capability *cap;
+ const struct typec_operations *ops;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
@@ -955,7 +956,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EOPNOTSUPP;
}
- if (!port->cap->try_role) {
+ if (!port->ops || !port->ops->try_role) {
dev_dbg(dev, "Setting preferred role not supported\n");
return -EOPNOTSUPP;
}
@@ -968,7 +969,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ret = port->cap->try_role(port->cap, role);
+ ret = port->ops->try_role(port, role);
if (ret)
return ret;
@@ -999,7 +1000,7 @@ static ssize_t data_role_store(struct device *dev,
struct typec_port *port = to_typec_port(dev);
int ret;
- if (!port->cap->dr_set) {
+ if (!port->ops || !port->ops->dr_set) {
dev_dbg(dev, "data role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1014,7 +1015,7 @@ static ssize_t data_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->dr_set(port->cap, ret);
+ ret = port->ops->dr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1049,7 +1050,7 @@ static ssize_t power_role_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->pr_set) {
+ if (!port->ops || !port->ops->pr_set) {
dev_dbg(dev, "power role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1071,7 +1072,7 @@ static ssize_t power_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->pr_set(port->cap, ret);
+ ret = port->ops->pr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1102,7 +1103,8 @@ port_type_store(struct device *dev, struct device_attribute *attr,
int ret;
enum typec_port_type type;
- if (!port->cap->port_type_set || port->cap->type != TYPEC_PORT_DRP) {
+ if (port->cap->type != TYPEC_PORT_DRP ||
+ !port->ops || !port->ops->port_type_set) {
dev_dbg(dev, "changing port type not supported\n");
return -EOPNOTSUPP;
}
@@ -1119,7 +1121,7 @@ port_type_store(struct device *dev, struct device_attribute *attr,
goto unlock_and_ret;
}
- ret = port->cap->port_type_set(port->cap, type);
+ ret = port->ops->port_type_set(port, type);
if (ret)
goto unlock_and_ret;
@@ -1175,7 +1177,7 @@ static ssize_t vconn_source_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->vconn_set) {
+ if (!port->ops || !port->ops->vconn_set) {
dev_dbg(dev, "VCONN swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1184,7 +1186,7 @@ static ssize_t vconn_source_store(struct device *dev,
if (ret)
return ret;
- ret = port->cap->vconn_set(port->cap, (enum typec_role)source);
+ ret = port->ops->vconn_set(port, (enum typec_role)source);
if (ret)
return ret;
@@ -1278,6 +1280,7 @@ static void typec_release(struct device *dev)
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
+ kfree(port->cap);
kfree(port);
}
@@ -1487,6 +1490,16 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
/* --------------------------------------- */
/**
+ * typec_get_drvdata - Return private driver data pointer
+ * @port: USB Type-C port
+ */
+void *typec_get_drvdata(struct typec_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(typec_get_drvdata);
+
+/**
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
@@ -1579,7 +1592,7 @@ struct typec_port *typec_register_port(struct device *parent,
mutex_init(&port->port_type_lock);
port->id = id;
- port->cap = cap;
+ port->ops = cap->ops;
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
@@ -1589,6 +1602,13 @@ struct typec_port *typec_register_port(struct device *parent,
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
+ dev_set_drvdata(&port->dev, cap->driver_data);
+
+ port->cap = kmemdup(cap, sizeof(*cap), GFP_KERNEL);
+ if (!port->cap) {
+ put_device(&port->dev);
+ return ERR_PTR(-ENOMEM);
+ }
port->sw = typec_switch_get(&port->dev);
if (IS_ERR(port->sw)) {
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
new file mode 100644
index 000000000000..323dfa8160ab
--- /dev/null
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TI HD3SS3220 Type-C DRP Port Controller Driver
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/usb/role.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/delay.h>
+
+#define HD3SS3220_REG_CN_STAT_CTRL 0x09
+#define HD3SS3220_REG_GEN_CTRL 0x0A
+#define HD3SS3220_REG_DEV_REV 0xA0
+
+/* Register HD3SS3220_REG_CN_STAT_CTRL*/
+#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_UFP BIT(7)
+#define HD3SS3220_REG_CN_STAT_CTRL_TO_ACCESSORY (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
+
+/* Register HD3SS3220_REG_GEN_CTRL*/
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
+
+struct hd3ss3220 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_role_switch *role_sw;
+ struct typec_port *port;
+};
+
+static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
+{
+ return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
+ src_pref);
+}
+
+static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
+{
+ unsigned int reg_val;
+ enum usb_role attached_state;
+ int ret;
+
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
+ &reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (reg_val & HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK) {
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_DFP:
+ attached_state = USB_ROLE_HOST;
+ break;
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_UFP:
+ attached_state = USB_ROLE_DEVICE;
+ break;
+ default:
+ attached_state = USB_ROLE_NONE;
+ break;
+ }
+
+ return attached_state;
+}
+
+static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
+{
+ struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
+ enum usb_role role_val;
+ int pref, ret = 0;
+
+ if (role == TYPEC_HOST) {
+ role_val = USB_ROLE_HOST;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
+ } else {
+ role_val = USB_ROLE_DEVICE;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
+ }
+
+ ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
+ usleep_range(10, 100);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
+ typec_set_data_role(hd3ss3220->port, role);
+
+ return ret;
+}
+
+static const struct typec_operations hd3ss3220_ops = {
+ .dr_set = hd3ss3220_dr_set
+};
+
+static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
+{
+ enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
+ if (role_state == USB_ROLE_NONE)
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+
+ switch (role_state) {
+ case USB_ROLE_HOST:
+ typec_set_data_role(hd3ss3220->port, TYPEC_HOST);
+ break;
+ case USB_ROLE_DEVICE:
+ typec_set_data_role(hd3ss3220->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
+{
+ int err;
+
+ hd3ss3220_set_role(hd3ss3220);
+ err = regmap_update_bits_base(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ NULL, false, true);
+ if (err < 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
+{
+ struct i2c_client *client = to_i2c_client(data);
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ return hd3ss3220_irq(hd3ss3220);
+}
+
+static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0A,
+};
+
+static int hd3ss3220_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct typec_capability typec_cap = { };
+ struct hd3ss3220 *hd3ss3220;
+ struct fwnode_handle *connector;
+ int ret;
+ unsigned int data;
+
+ hd3ss3220 = devm_kzalloc(&client->dev, sizeof(struct hd3ss3220),
+ GFP_KERNEL);
+ if (!hd3ss3220)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, hd3ss3220);
+
+ hd3ss3220->dev = &client->dev;
+ hd3ss3220->regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(hd3ss3220->regmap))
+ return PTR_ERR(hd3ss3220->regmap);
+
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+ connector = device_get_named_child_node(hd3ss3220->dev, "connector");
+ if (!connector)
+ return -ENODEV;
+
+ hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
+ if (IS_ERR(hd3ss3220->role_sw)) {
+ ret = PTR_ERR(hd3ss3220->role_sw);
+ goto err_put_fwnode;
+ }
+
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = hd3ss3220;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.ops = &hd3ss3220_ops;
+ typec_cap.fwnode = connector;
+
+ hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
+ if (IS_ERR(hd3ss3220->port)) {
+ ret = PTR_ERR(hd3ss3220->port);
+ goto err_put_role;
+ }
+
+ hd3ss3220_set_role(hd3ss3220);
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ if (data & HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS) {
+ ret = regmap_write(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ data | HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
+ if (ret < 0)
+ goto err_unreg_port;
+ }
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ hd3ss3220_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hd3ss3220", &client->dev);
+ if (ret)
+ goto err_unreg_port;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ fwnode_handle_put(connector);
+
+ dev_info(&client->dev, "probed revision=0x%x\n", ret);
+
+ return 0;
+err_unreg_port:
+ typec_unregister_port(hd3ss3220->port);
+err_put_role:
+ usb_role_switch_put(hd3ss3220->role_sw);
+err_put_fwnode:
+ fwnode_handle_put(connector);
+
+ return ret;
+}
+
+static int hd3ss3220_remove(struct i2c_client *client)
+{
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ typec_unregister_port(hd3ss3220->port);
+ usb_role_switch_put(hd3ss3220->role_sw);
+
+ return 0;
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "ti,hd3ss3220"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver hd3ss3220_driver = {
+ .driver = {
+ .name = "hd3ss3220",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = hd3ss3220_probe,
+ .remove = hd3ss3220_remove,
+};
+
+module_i2c_driver(hd3ss3220_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das@bp.renesas.com>");
+MODULE_DESCRIPTION("TI HD3SS3220 DRP Port Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 5f61d9977a15..56fc356bc55c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -380,9 +380,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- else if (port->tcpc->config &&
- port->tcpc->config->default_role == TYPEC_SINK)
- return SNK_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
return SNK_UNATTACHED;
@@ -390,12 +387,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SRC_UNATTACHED;
}
-static inline
-struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
-{
- return container_of(cap, struct tcpm_port, typec_caps);
-}
-
static bool tcpm_port_is_disconnected(struct tcpm_port *port)
{
return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
@@ -3970,10 +3961,9 @@ void tcpm_pd_hard_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
-static int tcpm_dr_set(const struct typec_capability *cap,
- enum typec_data_role data)
+static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4038,10 +4028,9 @@ swap_unlock:
return ret;
}
-static int tcpm_pr_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4082,10 +4071,9 @@ swap_unlock:
return ret;
}
-static int tcpm_vconn_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4122,16 +4110,16 @@ swap_unlock:
return ret;
}
-static int tcpm_try_role(const struct typec_capability *cap, int role)
+static int tcpm_try_role(struct typec_port *p, int role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
struct tcpc_dev *tcpc = port->tcpc;
int ret = 0;
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
- if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
+ if (!ret)
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
@@ -4331,10 +4319,9 @@ static void tcpm_init(struct tcpm_port *port)
tcpm_set_state(port, PORT_RESET, 0);
}
-static int tcpm_port_type_set(const struct typec_capability *cap,
- enum typec_port_type type)
+static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
mutex_lock(&port->lock);
if (type == port->port_type)
@@ -4359,6 +4346,14 @@ port_unlock:
return 0;
}
+static const struct typec_operations tcpm_ops = {
+ .try_role = tcpm_try_role,
+ .dr_set = tcpm_dr_set,
+ .pr_set = tcpm_pr_set,
+ .vconn_set = tcpm_vconn_set,
+ .port_type_set = tcpm_port_type_set
+};
+
void tcpm_tcpc_reset(struct tcpm_port *port)
{
mutex_lock(&port->lock);
@@ -4368,34 +4363,6 @@ void tcpm_tcpc_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
-static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
- unsigned int nr_pdo)
-{
- unsigned int i;
-
- if (nr_pdo > PDO_MAX_OBJECTS)
- nr_pdo = PDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_pdo; i++)
- dest_pdo[i] = src_pdo[i];
-
- return nr_pdo;
-}
-
-static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
- unsigned int nr_vdo)
-{
- unsigned int i;
-
- if (nr_vdo > VDO_MAX_OBJECTS)
- nr_vdo = VDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_vdo; i++)
- dest_vdo[i] = src_vdo[i];
-
- return nr_vdo;
-}
-
static int tcpm_fw_get_caps(struct tcpm_port *port,
struct fwnode_handle *fwnode)
{
@@ -4698,35 +4665,10 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
return PTR_ERR_OR_ZERO(port->psy);
}
-static int tcpm_copy_caps(struct tcpm_port *port,
- const struct tcpc_config *tcfg)
-{
- if (tcpm_validate_caps(port, tcfg->src_pdo, tcfg->nr_src_pdo) ||
- tcpm_validate_caps(port, tcfg->snk_pdo, tcfg->nr_snk_pdo))
- return -EINVAL;
-
- port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcfg->src_pdo,
- tcfg->nr_src_pdo);
- port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcfg->snk_pdo,
- tcfg->nr_snk_pdo);
-
- port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcfg->snk_vdo,
- tcfg->nr_snk_vdo);
-
- port->operating_snk_mw = tcfg->operating_snk_mw;
-
- port->typec_caps.prefer_role = tcfg->default_role;
- port->typec_caps.type = tcfg->type;
- port->typec_caps.data = tcfg->data;
- port->self_powered = tcfg->self_powered;
-
- return 0;
-}
-
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
- int i, err;
+ int err;
if (!dev || !tcpc ||
!tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
@@ -4759,24 +4701,16 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
tcpm_debugfs_init(port);
err = tcpm_fw_get_caps(port, tcpc->fwnode);
- if ((err < 0) && tcpc->config)
- err = tcpm_copy_caps(port, tcpc->config);
if (err < 0)
goto out_destroy_wq;
- if (!tcpc->config || !tcpc->config->try_role_hw)
- port->try_role = port->typec_caps.prefer_role;
- else
- port->try_role = TYPEC_NO_PREFERRED_ROLE;
+ port->try_role = port->typec_caps.prefer_role;
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
- port->typec_caps.dr_set = tcpm_dr_set;
- port->typec_caps.pr_set = tcpm_pr_set;
- port->typec_caps.vconn_set = tcpm_vconn_set;
- port->typec_caps.try_role = tcpm_try_role;
- port->typec_caps.port_type_set = tcpm_port_type_set;
+ port->typec_caps.driver_data = port;
+ port->typec_caps.ops = &tcpm_ops;
port->partner_desc.identity = &port->partner_ident;
port->port_type = port->typec_caps.type;
@@ -4797,29 +4731,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_role_sw_put;
}
- if (tcpc->config && tcpc->config->alt_modes) {
- const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
-
- i = 0;
- while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
- struct typec_altmode *alt;
-
- alt = typec_port_register_altmode(port->typec_port,
- paltmode);
- if (IS_ERR(alt)) {
- tcpm_log(port,
- "%s: failed to register port alternate mode 0x%x",
- dev_name(dev), paltmode->svid);
- break;
- }
- typec_altmode_set_drvdata(alt, port);
- alt->ops = &tcpm_altmode_ops;
- port->port_altmode[i] = alt;
- i++;
- paltmode++;
- }
- }
-
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index a38d1409f15b..0698addd1185 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -94,7 +94,6 @@ struct tps6598x {
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
- struct typec_capability typec_cap;
};
/*
@@ -307,11 +306,10 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return 0;
}
-static int
-tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role)
+static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -338,11 +336,10 @@ out_unlock:
return ret;
}
-static int
-tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role)
+static int tps6598x_pr_set(struct typec_port *port, enum typec_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -369,6 +366,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations tps6598x_ops = {
+ .dr_set = tps6598x_dr_set,
+ .pr_set = tps6598x_pr_set,
+};
+
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
@@ -448,6 +450,7 @@ static const struct regmap_config tps6598x_regmap_config = {
static int tps6598x_probe(struct i2c_client *client)
{
+ struct typec_capability typec_cap = { };
struct tps6598x *tps;
u32 status;
u32 conf;
@@ -492,40 +495,40 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- tps->typec_cap.revision = USB_TYPEC_REV_1_2;
- tps->typec_cap.pd_revision = 0x200;
- tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
- tps->typec_cap.pr_set = tps6598x_pr_set;
- tps->typec_cap.dr_set = tps6598x_dr_set;
+ typec_cap.revision = USB_TYPEC_REV_1_2;
+ typec_cap.pd_revision = 0x200;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = tps;
+ typec_cap.ops = &tps6598x_ops;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
case TPS_PORTINFO_SINK:
- tps->typec_cap.type = TYPEC_PORT_SNK;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_SNK;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_UFP_DRD:
case TPS_PORTINFO_DRP_DFP_DRD:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
break;
case TPS_PORTINFO_DRP_UFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_DFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
case TPS_PORTINFO_SOURCE:
- tps->typec_cap.type = TYPEC_PORT_SRC;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_SRC;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
default:
return -ENODEV;
}
- tps->port = typec_register_port(&client->dev, &tps->typec_cap);
+ tps->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(tps->port))
return PTR_ERR(tps->port);
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d99700cb4dca..d4d5189edfb8 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -48,7 +48,8 @@ struct ucsi_dp {
static int ucsi_displayport_enter(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ struct ucsi *ucsi = dp->con->ucsi;
+ u64 command;
u8 cur = 0;
int ret;
@@ -59,25 +60,21 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
- mutex_unlock(&dp->con->lock);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto err_unlock;
}
- UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
+ ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (dp->con->ucsi->ppm->data->version > 0x0100) {
- mutex_unlock(&dp->con->lock);
- return ret;
- }
+ if (ucsi->version > 0x0100)
+ goto err_unlock;
cur = 0xff;
}
if (cur != 0xff) {
- mutex_unlock(&dp->con->lock);
- if (dp->con->port_altmode[cur] == alt)
- return 0;
- return -EBUSY;
+ ret = dp->con->port_altmode[cur] == alt ? 0 : -EBUSY;
+ goto err_unlock;
}
/*
@@ -94,16 +91,17 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dp->vdo_size = 1;
schedule_work(&dp->work);
-
+ ret = 0;
+err_unlock:
mutex_unlock(&dp->con->lock);
- return 0;
+ return ret;
}
static int ucsi_displayport_exit(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ u64 command;
int ret = 0;
mutex_lock(&dp->con->lock);
@@ -117,8 +115,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
goto out_unlock;
}
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
+ ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
if (ret < 0)
goto out_unlock;
@@ -172,14 +170,14 @@ static int ucsi_displayport_status_update(struct ucsi_dp *dp)
static int ucsi_displayport_configure(struct ucsi_dp *dp)
{
u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
- struct ucsi_control ctrl;
+ u64 command;
if (!dp->override)
return 0;
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
- return ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 1dabafb74320..48ad1dc1b1b2 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -33,17 +33,6 @@ const char *ucsi_cmd_str(u64 raw_cmd)
return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
}
-static const char * const ucsi_ack_strs[] = {
- [0] = "",
- [UCSI_ACK_EVENT] = "event",
- [UCSI_ACK_CMD] = "command",
-};
-
-const char *ucsi_ack_str(u8 ack)
-{
- return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
-}
-
const char *ucsi_cci_str(u32 cci)
{
if (cci & GENMASK(7, 0)) {
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index 783ec9c72055..a0d3a934d3d9 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -10,54 +10,18 @@
#include <linux/usb/typec_altmode.h>
const char *ucsi_cmd_str(u64 raw_cmd);
-const char *ucsi_ack_str(u8 ack);
const char *ucsi_cci_str(u32 cci);
const char *ucsi_recipient_str(u8 recipient);
-DECLARE_EVENT_CLASS(ucsi_log_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack),
- TP_STRUCT__entry(
- __field(u8, ack)
- ),
- TP_fast_assign(
- __entry->ack = ack;
- ),
- TP_printk("ACK %s", ucsi_ack_str(__entry->ack))
-);
-
-DEFINE_EVENT(ucsi_log_ack, ucsi_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_control,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl),
- TP_STRUCT__entry(
- __field(u64, ctrl)
- ),
- TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
- ),
- TP_printk("control=%08llx (%s)", __entry->ctrl,
- ucsi_cmd_str(__entry->ctrl))
-);
-
-DEFINE_EVENT(ucsi_log_control, ucsi_command,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl)
-);
-
DECLARE_EVENT_CLASS(ucsi_log_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret),
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret),
TP_STRUCT__entry(
__field(u64, ctrl)
__field(int, ret)
),
TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
+ __entry->ctrl = command;
__entry->ret = ret;
),
TP_printk("%s -> %s (err=%d)", ucsi_cmd_str(__entry->ctrl),
@@ -66,30 +30,13 @@ DECLARE_EVENT_CLASS(ucsi_log_command,
);
DEFINE_EVENT(ucsi_log_command, ucsi_run_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_cci,
- TP_PROTO(u32 cci),
- TP_ARGS(cci),
- TP_STRUCT__entry(
- __field(u32, cci)
- ),
- TP_fast_assign(
- __entry->cci = cci;
- ),
- TP_printk("CCI=%08x %s", __entry->cci, ucsi_cci_str(__entry->cci))
-);
-
-DEFINE_EVENT(ucsi_log_cci, ucsi_notify,
- TP_PROTO(u32 cci),
- TP_ARGS(cci)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DECLARE_EVENT_CLASS(ucsi_log_connector_status,
@@ -109,13 +56,13 @@ DECLARE_EVENT_CLASS(ucsi_log_connector_status,
TP_fast_assign(
__entry->port = port - 1;
__entry->change = status->change;
- __entry->opmode = status->pwr_op_mode;
- __entry->connected = status->connected;
- __entry->pwr_dir = status->pwr_dir;
- __entry->partner_flags = status->partner_flags;
- __entry->partner_type = status->partner_type;
+ __entry->opmode = UCSI_CONSTAT_PWR_OPMODE(status->flags);
+ __entry->connected = !!(status->flags & UCSI_CONSTAT_CONNECTED);
+ __entry->pwr_dir = !!(status->flags & UCSI_CONSTAT_PWR_DIR);
+ __entry->partner_flags = UCSI_CONSTAT_PARTNER_FLAGS(status->flags);
+ __entry->partner_type = UCSI_CONSTAT_PARTNER_TYPE(status->flags);
__entry->request_data_obj = status->request_data_obj;
- __entry->bc_status = status->bc_status;
+ __entry->bc_status = UCSI_CONSTAT_BC_STATUS(status->pwr_status);
),
TP_printk("port%d status: change=%04x, opmode=%x, connected=%d, "
"sourcing=%d, partner_flags=%x, partner_type=%x, "
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index ba288b964dc8..4459bc68aa33 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -17,9 +17,6 @@
#include "ucsi.h"
#include "trace.h"
-#define to_ucsi_connector(_cap_) container_of(_cap_, struct ucsi_connector, \
- typec_cap)
-
/*
* UCSI_TIMEOUT_MS - PPM communication timeout
*
@@ -39,169 +36,148 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
-static inline int ucsi_sync(struct ucsi *ucsi)
+static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
- if (ucsi->ppm && ucsi->ppm->sync)
- return ucsi->ppm->sync(ucsi->ppm);
- return 0;
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_COMMAND_COMPLETE;
+
+ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+}
+
+static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
+{
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
+
+ return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
}
-static int ucsi_command(struct ucsi *ucsi, struct ucsi_control *ctrl)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
+
+static int ucsi_read_error(struct ucsi *ucsi)
{
+ u16 error;
int ret;
- trace_ucsi_command(ctrl);
+ /* Acknowlege the command that failed */
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- set_bit(COMMAND_PENDING, &ucsi->flags);
+ ret = ucsi_exec_command(ucsi, UCSI_GET_ERROR_STATUS);
+ if (ret < 0)
+ return ret;
- ret = ucsi->ppm->cmd(ucsi->ppm, ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
if (ret)
- goto err_clear_flag;
+ return ret;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS))) {
- dev_warn(ucsi->dev, "PPM NOT RESPONDING\n");
- ret = -ETIMEDOUT;
+ switch (error) {
+ case UCSI_ERROR_INCOMPATIBLE_PARTNER:
+ return -EOPNOTSUPP;
+ case UCSI_ERROR_CC_COMMUNICATION_ERR:
+ return -ECOMM;
+ case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
+ return -EPROTO;
+ case UCSI_ERROR_DEAD_BATTERY:
+ dev_warn(ucsi->dev, "Dead battery condition!\n");
+ return -EPERM;
+ case UCSI_ERROR_INVALID_CON_NUM:
+ case UCSI_ERROR_UNREGONIZED_CMD:
+ case UCSI_ERROR_INVALID_CMD_ARGUMENT:
+ dev_err(ucsi->dev, "possible UCSI driver bug %u\n", error);
+ return -EINVAL;
+ case UCSI_ERROR_OVERCURRENT:
+ dev_warn(ucsi->dev, "Overcurrent condition\n");
+ break;
+ case UCSI_ERROR_PARTNER_REJECTED_SWAP:
+ dev_warn(ucsi->dev, "Partner rejected swap\n");
+ break;
+ case UCSI_ERROR_HARD_RESET:
+ dev_warn(ucsi->dev, "Hard reset occurred\n");
+ break;
+ case UCSI_ERROR_PPM_POLICY_CONFLICT:
+ dev_warn(ucsi->dev, "PPM Policy conflict\n");
+ break;
+ case UCSI_ERROR_SWAP_REJECTED:
+ dev_warn(ucsi->dev, "Swap rejected\n");
+ break;
+ case UCSI_ERROR_UNDEFINED:
+ default:
+ dev_err(ucsi->dev, "unknown error %u\n", error);
+ break;
}
-err_clear_flag:
- clear_bit(COMMAND_PENDING, &ucsi->flags);
-
- return ret;
+ return -EIO;
}
-static int ucsi_ack(struct ucsi *ucsi, u8 ack)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
{
- struct ucsi_control ctrl;
+ u32 cci;
int ret;
- trace_ucsi_ack(ack);
-
- set_bit(ACK_PENDING, &ucsi->flags);
+ ret = ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- UCSI_CMD_ACK(ctrl, ack);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
- goto out_clear_bit;
+ return ret;
- /* Waiting for ACK with ACK CMD, but not with EVENT for now */
- if (ack == UCSI_ACK_EVENT)
- goto out_clear_bit;
+ if (cci & UCSI_CCI_BUSY)
+ return -EBUSY;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ return -EIO;
-out_clear_bit:
- clear_bit(ACK_PENDING, &ucsi->flags);
+ if (cci & UCSI_CCI_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
- if (ret)
- dev_err(ucsi->dev, "%s: failed\n", __func__);
+ if (cci & UCSI_CCI_ERROR) {
+ if (cmd == UCSI_GET_ERROR_STATUS)
+ return -EIO;
+ return ucsi_read_error(ucsi);
+ }
- return ret;
+ return UCSI_CCI_LENGTH(cci);
}
-static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+static int ucsi_run_command(struct ucsi *ucsi, u64 command,
void *data, size_t size)
{
- struct ucsi_control _ctrl;
- u8 data_length;
- u16 error;
+ u8 length;
int ret;
- ret = ucsi_command(ucsi, ctrl);
- if (ret)
- goto err;
-
- switch (ucsi->status) {
- case UCSI_IDLE:
- ret = ucsi_sync(ucsi);
- if (ret)
- dev_warn(ucsi->dev, "%s: sync failed\n", __func__);
-
- if (data)
- memcpy(data, ucsi->ppm->data->message_in, size);
-
- data_length = ucsi->ppm->data->cci.data_length;
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (!ret)
- ret = data_length;
- break;
- case UCSI_BUSY:
- /* The caller decides whether to cancel or not */
- ret = -EBUSY;
- break;
- case UCSI_ERROR:
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (ret)
- break;
-
- _ctrl.raw_cmd = 0;
- _ctrl.cmd.cmd = UCSI_GET_ERROR_STATUS;
- ret = ucsi_command(ucsi, &_ctrl);
- if (ret) {
- dev_err(ucsi->dev, "reading error failed!\n");
- break;
- }
+ ret = ucsi_exec_command(ucsi, command);
+ if (ret < 0)
+ return ret;
- memcpy(&error, ucsi->ppm->data->message_in, sizeof(error));
+ length = ret;
- /* Something has really gone wrong */
- if (WARN_ON(ucsi->status == UCSI_ERROR)) {
- ret = -ENODEV;
- break;
- }
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ if (data) {
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
- break;
-
- switch (error) {
- case UCSI_ERROR_INCOMPATIBLE_PARTNER:
- ret = -EOPNOTSUPP;
- break;
- case UCSI_ERROR_CC_COMMUNICATION_ERR:
- ret = -ECOMM;
- break;
- case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
- ret = -EPROTO;
- break;
- case UCSI_ERROR_DEAD_BATTERY:
- dev_warn(ucsi->dev, "Dead battery condition!\n");
- ret = -EPERM;
- break;
- /* The following mean a bug in this driver */
- case UCSI_ERROR_INVALID_CON_NUM:
- case UCSI_ERROR_UNREGONIZED_CMD:
- case UCSI_ERROR_INVALID_CMD_ARGUMENT:
- dev_warn(ucsi->dev,
- "%s: possible UCSI driver bug - error 0x%x\n",
- __func__, error);
- ret = -EINVAL;
- break;
- default:
- dev_warn(ucsi->dev,
- "%s: error without status\n", __func__);
- ret = -EIO;
- break;
- }
- break;
+ return ret;
}
-err:
- trace_ucsi_run_command(ctrl, ret);
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- return ret;
+ return length;
}
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size)
{
int ret;
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, ctrl, retval, size);
+ ret = ucsi_run_command(ucsi, command, retval, size);
mutex_unlock(&ucsi->ppm_lock);
return ret;
@@ -210,11 +186,12 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
int ucsi_resume(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command;
/* Restore UCSI notification enable mask after system resume */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- return ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+
+ return ucsi_send_command(ucsi, command, NULL, 0);
}
EXPORT_SYMBOL_GPL(ucsi_resume);
/* -------------------------------------------------------------------------- */
@@ -222,15 +199,15 @@ EXPORT_SYMBOL_GPL(ucsi_resume);
void ucsi_altmode_update_active(struct ucsi_connector *con)
{
const struct typec_altmode *altmode = NULL;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
u8 cur;
int i;
- UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num);
- ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (con->ucsi->ppm->data->version > 0x0100) {
+ if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
"GET_CURRENT_CAM command failed\n");
return;
@@ -346,7 +323,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
int max_altmodes = UCSI_MAX_ALTMODES;
struct typec_altmode_desc desc;
struct ucsi_altmode alt[2];
- struct ucsi_control ctrl;
+ u64 command;
int num = 1;
int ret;
int len;
@@ -364,8 +341,11 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
for (i = 0; i < max_altmodes;) {
memset(alt, 0, sizeof(alt));
- UCSI_CMD_GET_ALTERNATE_MODES(ctrl, recipient, con->num, i, 1);
- len = ucsi_run_command(con->ucsi, &ctrl, alt, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
if (len <= 0)
return len;
@@ -427,7 +407,7 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
- switch (con->status.pwr_op_mode) {
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
break;
@@ -445,6 +425,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
static int ucsi_register_partner(struct ucsi_connector *con)
{
+ u8 pwr_opmode = UCSI_CONSTAT_PWR_OPMODE(con->status.flags);
struct typec_partner_desc desc;
struct typec_partner *partner;
@@ -453,7 +434,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
memset(&desc, 0, sizeof(desc));
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
desc.accessory = TYPEC_ACCESSORY_DEBUG;
break;
@@ -464,7 +445,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
- desc.usb_pd = con->status.pwr_op_mode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -496,7 +477,7 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (!con->partner)
return;
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -521,29 +502,33 @@ static void ucsi_partner_change(struct ucsi_connector *con)
ucsi_altmode_update_active(con);
}
-static void ucsi_connector_change(struct work_struct *work)
+static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
work);
struct ucsi *ucsi = con->ucsi;
- struct ucsi_control ctrl;
+ enum typec_role role;
+ u64 command;
int ret;
mutex_lock(&con->lock);
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_send_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
goto out_unlock;
}
+ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
/* Complete pending power role swap */
if (!completion_done(&con->complete))
@@ -551,9 +536,9 @@ static void ucsi_connector_change(struct work_struct *work)
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -564,7 +549,7 @@ static void ucsi_connector_change(struct work_struct *work)
break;
}
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED)
ucsi_register_partner(con);
else
ucsi_unregister_partner(con);
@@ -576,14 +561,15 @@ static void ucsi_connector_change(struct work_struct *work)
* Running GET_CAM_SUPPORTED command just to make sure the PPM
* does not get stuck in case it assumes we do so.
*/
- UCSI_CMD_GET_CAM_SUPPORTED(ctrl, con->num);
- ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_GET_CAM_SUPPORTED;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ucsi_run_command(con->ucsi, command, NULL, 0);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
ucsi_partner_change(con);
- ret = ucsi_ack(ucsi, UCSI_ACK_EVENT);
+ ret = ucsi_acknowledge_connector_change(ucsi);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -595,117 +581,83 @@ out_unlock:
}
/**
- * ucsi_notify - PPM notification handler
- * @ucsi: Source UCSI Interface for the notifications
- *
- * Handle notifications from PPM of @ucsi.
+ * ucsi_connector_change - Process Connector Change Event
+ * @ucsi: UCSI Interface
+ * @num: Connector number
*/
-void ucsi_notify(struct ucsi *ucsi)
+void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
- struct ucsi_cci *cci;
-
- /* There is no requirement to sync here, but no harm either. */
- ucsi_sync(ucsi);
-
- cci = &ucsi->ppm->data->cci;
-
- if (cci->error)
- ucsi->status = UCSI_ERROR;
- else if (cci->busy)
- ucsi->status = UCSI_BUSY;
- else
- ucsi->status = UCSI_IDLE;
+ struct ucsi_connector *con = &ucsi->connector[num - 1];
- if (cci->cmd_complete && test_bit(COMMAND_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->ack_complete && test_bit(ACK_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->connector_change) {
- struct ucsi_connector *con;
-
- con = &ucsi->connector[cci->connector_change - 1];
-
- if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
- schedule_work(&con->work);
- }
-
- trace_ucsi_notify(ucsi->ppm->data->raw_cci);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ schedule_work(&con->work);
}
-EXPORT_SYMBOL_GPL(ucsi_notify);
+EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
- struct ucsi_control ctrl;
+ u64 command;
- UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard);
+ command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
- return ucsi_send_command(con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(con->ucsi, command, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command = UCSI_PPM_RESET;
unsigned long tmo;
+ u32 cci;
int ret;
- ctrl.raw_cmd = 0;
- ctrl.cmd.cmd = UCSI_PPM_RESET;
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- /* Here sync is critical. */
- ret = ucsi_sync(ucsi);
- if (ret)
- goto err;
+ if (time_is_before_jiffies(tmo))
+ return -ETIMEDOUT;
- if (ucsi->ppm->data->cci.reset_complete)
- break;
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return ret;
/* If the PPM is still doing something else, reset it again. */
- if (ucsi->ppm->data->raw_cci) {
- dev_warn_ratelimited(ucsi->dev,
- "Failed to reset PPM! Trying again..\n");
-
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ if (cci & ~UCSI_CCI_RESET_COMPLETE) {
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL,
+ &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
}
- /* Letting the PPM settle down. */
msleep(20);
+ } while (!(cci & UCSI_CCI_RESET_COMPLETE));
- ret = -ETIMEDOUT;
- } while (time_is_after_jiffies(tmo));
-
-err:
- trace_ucsi_reset_ppm(&ctrl, ret);
-
- return ret;
+ return 0;
}
-static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
+static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
- ret = ucsi_send_command(con->ucsi, ctrl, NULL, 0);
+ ret = ucsi_send_command(con->ucsi, command, NULL, 0);
if (ret == -ETIMEDOUT) {
- struct ucsi_control c;
+ u64 c;
/* PPM most likely stopped responding. Resetting everything. */
mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL);
- ucsi_send_command(con->ucsi, &c, NULL, 0);
+ c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
}
@@ -713,11 +665,11 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
return ret;
}
-static int
-ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
+static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ u8 partner_type;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -727,14 +679,17 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
goto out_unlock;
}
- if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
+ partner_type = UCSI_CONSTAT_PARTNER_TYPE(con->status.flags);
+ if ((partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
role == TYPEC_DEVICE) ||
- (con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
+ (partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
role == TYPEC_HOST))
goto out_unlock;
- UCSI_CMD_SET_UOR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_UOR_ROLE(role);
+ command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -748,11 +703,11 @@ out_unlock:
return ret < 0 ? ret : 0;
}
-static int
-ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
+static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ enum typec_role cur_role;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -762,11 +717,15 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
goto out_unlock;
}
- if (con->status.pwr_dir == role)
+ cur_role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
+ if (cur_role == role)
goto out_unlock;
- UCSI_CMD_SET_PDR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_PDR_ROLE(role);
+ command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -777,7 +736,8 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
}
/* Something has gone wrong while swapping the role */
- if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) {
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_reset_connector(con, true);
ret = -EPROTO;
}
@@ -788,6 +748,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations ucsi_ops = {
+ .dr_set = ucsi_dr_swap,
+ .pr_set = ucsi_pr_swap
+};
+
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
@@ -804,18 +769,19 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
- INIT_WORK(&con->work, ucsi_connector_change);
+ INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
con->num = index + 1;
con->ucsi = ucsi;
/* Get connector capability */
- UCSI_CMD_GET_CONNECTOR_CAPABILITY(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->cap, sizeof(con->cap));
+ command = UCSI_GET_CONNECTOR_CAPABILITY;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
return ret;
@@ -826,11 +792,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP)
cap->data = TYPEC_PORT_UFP;
- if (con->cap.provider && con->cap.consumer)
+ if ((con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) &&
+ (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER))
cap->type = TYPEC_PORT_DRP;
- else if (con->cap.provider)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER)
cap->type = TYPEC_PORT_SRC;
- else if (con->cap.consumer)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER)
cap->type = TYPEC_PORT_SNK;
cap->revision = ucsi->cap.typec_version;
@@ -843,8 +810,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
*accessory = TYPEC_ACCESSORY_DEBUG;
cap->fwnode = ucsi_find_fwnode(con);
- cap->dr_set = ucsi_dr_swap;
- cap->pr_set = ucsi_pr_swap;
+ cap->driver_data = con;
+ cap->ops = &ucsi_ops;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
@@ -858,17 +825,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num);
/* Get the status */
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
return 0;
}
- ucsi_pwr_opmode_change(con);
- typec_set_pwr_role(con->port, con->status.pwr_dir);
-
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -880,8 +845,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
}
/* Check if there is already something connected */
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
+ typec_set_pwr_role(con->port,
+ !!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
+ ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ }
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
@@ -898,11 +867,16 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
return 0;
}
-static void ucsi_init(struct work_struct *work)
+/**
+ * ucsi_init - Initialize UCSI interface
+ * @ucsi: UCSI to be initialized
+ *
+ * Registers all ports @ucsi has and enables all notification events.
+ */
+int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi *ucsi = container_of(work, struct ucsi, work);
struct ucsi_connector *con;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
int i;
@@ -916,15 +890,15 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable basic notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE |
- UCSI_ENABLE_NTFY_ERROR);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
- UCSI_CMD_GET_CAPABILITY(ctrl);
- ret = ucsi_run_command(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
+ command = UCSI_GET_CAPABILITY;
+ ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
@@ -949,14 +923,14 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable all notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
mutex_unlock(&ucsi->ppm_lock);
- return;
+ return 0;
err_unregister:
for (con = ucsi->connector; con->port; con++) {
@@ -970,59 +944,115 @@ err_reset:
ucsi_reset_ppm(ucsi);
err:
mutex_unlock(&ucsi->ppm_lock);
- dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ucsi_init);
+
+static void ucsi_init_work(struct work_struct *work)
+{
+ struct ucsi *ucsi = container_of(work, struct ucsi, work);
+ int ret;
+
+ ret = ucsi_init(ucsi);
+ if (ret)
+ dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
}
/**
- * ucsi_register_ppm - Register UCSI PPM Interface
- * @dev: Device interface to the PPM
- * @ppm: The PPM interface
- *
- * Allocates UCSI instance, associates it with @ppm and returns it to the
- * caller, and schedules initialization of the interface.
+ * ucsi_get_drvdata - Return private driver data pointer
+ * @ucsi: UCSI interface
+ */
+void *ucsi_get_drvdata(struct ucsi *ucsi)
+{
+ return ucsi->driver_data;
+}
+EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
+
+/**
+ * ucsi_get_drvdata - Assign private driver data pointer
+ * @ucsi: UCSI interface
+ * @data: Private data pointer
*/
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm)
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
+{
+ ucsi->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
+
+/**
+ * ucsi_create - Allocate UCSI instance
+ * @dev: Device interface to the PPM (Platform Policy Manager)
+ * @ops: I/O routines
+ */
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
{
struct ucsi *ucsi;
+ if (!ops || !ops->read || !ops->sync_write || !ops->async_write)
+ return ERR_PTR(-EINVAL);
+
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
if (!ucsi)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&ucsi->work, ucsi_init);
- init_completion(&ucsi->complete);
+ INIT_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
-
ucsi->dev = dev;
- ucsi->ppm = ppm;
+ ucsi->ops = ops;
+
+ return ucsi;
+}
+EXPORT_SYMBOL_GPL(ucsi_create);
+
+/**
+ * ucsi_destroy - Free UCSI instance
+ * @ucsi: UCSI instance to be freed
+ */
+void ucsi_destroy(struct ucsi *ucsi)
+{
+ kfree(ucsi);
+}
+EXPORT_SYMBOL_GPL(ucsi_destroy);
+
+/**
+ * ucsi_register - Register UCSI interface
+ * @ucsi: UCSI instance
+ */
+int ucsi_register(struct ucsi *ucsi)
+{
+ int ret;
+
+ ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ucsi->version,
+ sizeof(ucsi->version));
+ if (ret)
+ return ret;
+
+ if (!ucsi->version)
+ return -ENODEV;
- /*
- * Communication with the PPM takes a lot of time. It is not reasonable
- * to initialize the driver here. Using a work for now.
- */
queue_work(system_long_wq, &ucsi->work);
- return ucsi;
+ return 0;
}
-EXPORT_SYMBOL_GPL(ucsi_register_ppm);
+EXPORT_SYMBOL_GPL(ucsi_register);
/**
- * ucsi_unregister_ppm - Unregister UCSI PPM Interface
- * @ucsi: struct ucsi associated with the PPM
+ * ucsi_unregister - Unregister UCSI interface
+ * @ucsi: UCSI interface to be unregistered
*
- * Unregister UCSI PPM that was created with ucsi_register().
+ * Unregister UCSI interface that was created with ucsi_register().
*/
-void ucsi_unregister_ppm(struct ucsi *ucsi)
+void ucsi_unregister(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_work_sync(&ucsi->work);
- /* Disable everything except command complete notification */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE)
- ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ /* Disable notifications */
+ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
@@ -1032,12 +1062,9 @@ void ucsi_unregister_ppm(struct ucsi *ucsi)
typec_unregister_port(ucsi->connector[i].port);
}
- ucsi_reset_ppm(ucsi);
-
kfree(ucsi->connector);
- kfree(ucsi);
}
-EXPORT_SYMBOL_GPL(ucsi_unregister_ppm);
+EXPORT_SYMBOL_GPL(ucsi_unregister);
MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index de87d0b8319d..8569bbd3762f 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,177 +10,55 @@
/* -------------------------------------------------------------------------- */
-/* Command Status and Connector Change Indication (CCI) data structure */
-struct ucsi_cci {
- u8:1; /* reserved */
- u8 connector_change:7;
- u8 data_length;
- u16:9; /* reserved */
- u16 not_supported:1;
- u16 cancel_complete:1;
- u16 reset_complete:1;
- u16 busy:1;
- u16 ack_complete:1;
- u16 error:1;
- u16 cmd_complete:1;
-} __packed;
-
-/* Default fields in CONTROL data structure */
-struct ucsi_command {
- u8 cmd;
- u8 length;
- u64 data:48;
-} __packed;
-
-/* ACK Command structure */
-struct ucsi_ack_cmd {
- u8 cmd;
- u8 length;
- u8 cci_ack:1;
- u8 cmd_ack:1;
- u8:6; /* reserved */
-} __packed;
-
-/* Connector Reset Command structure */
-struct ucsi_con_rst {
- u8 cmd;
- u8 length;
- u8 con_num:7;
- u8 hard_reset:1;
-} __packed;
-
-/* Set USB Operation Mode Command structure */
-struct ucsi_uor_cmd {
- u8 cmd;
- u8 length;
- u16 con_num:7;
- u16 role:3;
-#define UCSI_UOR_ROLE_DFP BIT(0)
-#define UCSI_UOR_ROLE_UFP BIT(1)
-#define UCSI_UOR_ROLE_DRP BIT(2)
- u16:6; /* reserved */
-} __packed;
-
-/* Get Alternate Modes Command structure */
-struct ucsi_altmode_cmd {
- u8 cmd;
- u8 length;
- u8 recipient;
-#define UCSI_RECIPIENT_CON 0
-#define UCSI_RECIPIENT_SOP 1
-#define UCSI_RECIPIENT_SOP_P 2
-#define UCSI_RECIPIENT_SOP_PP 3
- u8 con_num;
- u8 offset;
- u8 num_altmodes;
-} __packed;
+struct ucsi;
-struct ucsi_control {
- union {
- u64 raw_cmd;
- struct ucsi_command cmd;
- struct ucsi_uor_cmd uor;
- struct ucsi_ack_cmd ack;
- struct ucsi_con_rst con_rst;
- struct ucsi_altmode_cmd alt;
- };
+/* UCSI offsets (Bytes) */
+#define UCSI_VERSION 0
+#define UCSI_CCI 4
+#define UCSI_CONTROL 8
+#define UCSI_MESSAGE_IN 16
+#define UCSI_MESSAGE_OUT 32
+
+/* Command Status and Connector Change Indication (CCI) bits */
+#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1)
+#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8)
+#define UCSI_CCI_NOT_SUPPORTED BIT(25)
+#define UCSI_CCI_CANCEL_COMPLETE BIT(26)
+#define UCSI_CCI_RESET_COMPLETE BIT(27)
+#define UCSI_CCI_BUSY BIT(28)
+#define UCSI_CCI_ACK_COMPLETE BIT(29)
+#define UCSI_CCI_ERROR BIT(30)
+#define UCSI_CCI_COMMAND_COMPLETE BIT(31)
+
+/**
+ * struct ucsi_operations - UCSI I/O operations
+ * @read: Read operation
+ * @sync_write: Blocking write operation
+ * @async_write: Non-blocking write operation
+ *
+ * Read and write routines for UCSI interface. @sync_write must wait for the
+ * Command Completion Event from the PPM before returning, and @async_write must
+ * return immediately after sending the data to the PPM.
+ */
+struct ucsi_operations {
+ int (*read)(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len);
+ int (*sync_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
+ int (*async_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
};
-#define __UCSI_CMD(_ctrl_, _cmd_) \
-{ \
- (_ctrl_).raw_cmd = 0; \
- (_ctrl_).cmd.cmd = _cmd_; \
-}
-
-/* Helper for preparing ucsi_control for CONNECTOR_RESET command. */
-#define UCSI_CMD_CONNECTOR_RESET(_ctrl_, _con_, _hard_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_CONNECTOR_RESET) \
- (_ctrl_).con_rst.con_num = (_con_)->num; \
- (_ctrl_).con_rst.hard_reset = _hard_; \
-}
-
-/* Helper for preparing ucsi_control for ACK_CC_CI command. */
-#define UCSI_CMD_ACK(_ctrl_, _ack_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_ACK_CC_CI) \
- (_ctrl_).ack.cci_ack = ((_ack_) == UCSI_ACK_EVENT); \
- (_ctrl_).ack.cmd_ack = ((_ack_) == UCSI_ACK_CMD); \
-}
-
-/* Helper for preparing ucsi_control for SET_NOTIFY_ENABLE command. */
-#define UCSI_CMD_SET_NTFY_ENABLE(_ctrl_, _ntfys_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_SET_NOTIFICATION_ENABLE) \
- (_ctrl_).cmd.data = _ntfys_; \
-}
-
-/* Helper for preparing ucsi_control for GET_CAPABILITY command. */
-#define UCSI_CMD_GET_CAPABILITY(_ctrl_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CAPABILITY) \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_CAPABILITY command. */
-#define UCSI_CMD_GET_CONNECTOR_CAPABILITY(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_CAPABILITY) \
- (_ctrl_).cmd.data = _con_; \
-}
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
+void ucsi_destroy(struct ucsi *ucsi);
+int ucsi_register(struct ucsi *ucsi);
+void ucsi_unregister(struct ucsi *ucsi);
+void *ucsi_get_drvdata(struct ucsi *ucsi);
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
-/* Helper for preparing ucsi_control for GET_ALTERNATE_MODES command. */
-#define UCSI_CMD_GET_ALTERNATE_MODES(_ctrl_, _r_, _con_num_, _o_, _num_)\
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_ALTERNATE_MODES) \
- _ctrl_.alt.recipient = (_r_); \
- _ctrl_.alt.con_num = (_con_num_); \
- _ctrl_.alt.offset = (_o_); \
- _ctrl_.alt.num_altmodes = (_num_) - 1; \
-}
+void ucsi_connector_change(struct ucsi *ucsi, u8 num);
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CAM_SUPPORTED(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CAM_SUPPORTED) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CURRENT_CAM(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CURRENT_CAM) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */
-#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_STATUS) \
- (_ctrl_).cmd.data = _con_; \
-}
-
-#define __UCSI_ROLE(_ctrl_, _cmd_, _con_num_) \
-{ \
- __UCSI_CMD(_ctrl_, _cmd_) \
- (_ctrl_).uor.con_num = _con_num_; \
- (_ctrl_).uor.role = UCSI_UOR_ROLE_DRP; \
-}
-
-/* Helper for preparing ucsi_control for SET_UOR command. */
-#define UCSI_CMD_SET_UOR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_UOR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_HOST ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
-
-/* Helper for preparing ucsi_control for SET_PDR command. */
-#define UCSI_CMD_SET_PDR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_PDR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_SOURCE ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
+/* -------------------------------------------------------------------------- */
/* Commands */
#define UCSI_PPM_RESET 0x01
@@ -203,24 +81,49 @@ struct ucsi_control {
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
-/* ACK_CC_CI commands */
-#define UCSI_ACK_EVENT 1
-#define UCSI_ACK_CMD 2
-
-/* Bits for SET_NOTIFICATION_ENABLE command */
-#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0)
-#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1)
-#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(2)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(5)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(6)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(7)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(8)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(9)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(11)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(12)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(14)
-#define UCSI_ENABLE_NTFY_ERROR BIT(15)
-#define UCSI_ENABLE_NTFY_ALL 0xdbe7
+#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+
+/* CONNECTOR_RESET command bits */
+#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
+
+/* ACK_CC_CI bits */
+#define UCSI_ACK_CONNECTOR_CHANGE BIT(16)
+#define UCSI_ACK_COMMAND_COMPLETE BIT(17)
+
+/* SET_NOTIFICATION_ENABLE command bits */
+#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
+#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
+#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
+#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
+
+/* SET_UOR command bits */
+#define UCSI_SET_UOR_ROLE(_r_) (((_r_) == TYPEC_HOST ? 1 : 2) << 23)
+#define UCSI_SET_UOR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* SET_PDF command bits */
+#define UCSI_SET_PDR_ROLE(_r_) (((_r_) == TYPEC_SOURCE ? 1 : 2) << 23)
+#define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* GET_ALTERNATE_MODES command bits */
+#define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16)
+#define UCSI_RECIPIENT_CON 0
+#define UCSI_RECIPIENT_SOP 1
+#define UCSI_RECIPIENT_SOP_P 2
+#define UCSI_RECIPIENT_SOP_PP 3
+#define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24)
+#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
+#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
+
+/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
#define UCSI_ERROR_UNREGONIZED_CMD BIT(0)
@@ -230,6 +133,12 @@ struct ucsi_control {
#define UCSI_ERROR_CC_COMMUNICATION_ERR BIT(4)
#define UCSI_ERROR_DEAD_BATTERY BIT(5)
#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL BIT(6)
+#define UCSI_ERROR_OVERCURRENT BIT(7)
+#define UCSI_ERROR_UNDEFINED BIT(8)
+#define UCSI_ERROR_PARTNER_REJECTED_SWAP BIT(9)
+#define UCSI_ERROR_HARD_RESET BIT(10)
+#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
+#define UCSI_ERROR_SWAP_REJECTED BIT(12)
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
@@ -241,8 +150,8 @@ struct ucsi_capability {
#define UCSI_CAP_ATTR_POWER_AC_SUPPLY BIT(8)
#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
- u32 num_connectors:8;
- u32 features:24;
+ u8 num_connectors;
+ u8 features;
#define UCSI_CAP_SET_UOM BIT(0)
#define UCSI_CAP_SET_PDM BIT(1)
#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
@@ -251,8 +160,9 @@ struct ucsi_capability {
#define UCSI_CAP_CABLE_DETAILS BIT(5)
#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
#define UCSI_CAP_PD_RESET BIT(7)
+ u16 reserved_1;
u8 num_alt_modes;
- u8 reserved;
+ u8 reserved_2;
u16 bc_version;
u16 pd_version;
u16 typec_version;
@@ -269,9 +179,9 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 provider:1;
- u8 consumer:1;
- u8:6; /* reserved */
+ u8 flags;
+#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
+#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
} __packed;
struct ucsi_altmode {
@@ -283,18 +193,17 @@ struct ucsi_altmode {
struct ucsi_cable_property {
u16 speed_supported;
u8 current_capability;
- u8 vbus_in_cable:1;
- u8 active_cable:1;
- u8 directionality:1;
- u8 plug_type:2;
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
-#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
- u8 mode_support:1;
- u8:2; /* reserved */
- u8 latency:4;
- u8:4; /* reserved */
+ u8 flags;
+#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
+#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
+#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+ u8 latency;
} __packed;
/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
@@ -311,83 +220,47 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12)
#define UCSI_CONSTAT_CONNECT_CHANGE BIT(14)
#define UCSI_CONSTAT_ERROR BIT(15)
- u16 pwr_op_mode:3;
-#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
-#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
-#define UCSI_CONSTAT_PWR_OPMODE_BC 2
-#define UCSI_CONSTAT_PWR_OPMODE_PD 3
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
- u16 connected:1;
- u16 pwr_dir:1;
- u16 partner_flags:8;
-#define UCSI_CONSTAT_PARTNER_FLAG_USB BIT(0)
-#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE BIT(1)
- u16 partner_type:3;
-#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
-#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
-#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
+ u16 flags;
+#define UCSI_CONSTAT_PWR_OPMODE(_f_) ((_f_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
+#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
+#define UCSI_CONSTAT_PWR_OPMODE_BC 2
+#define UCSI_CONSTAT_PWR_OPMODE_PD 3
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
+#define UCSI_CONSTAT_CONNECTED BIT(3)
+#define UCSI_CONSTAT_PWR_DIR BIT(4)
+#define UCSI_CONSTAT_PARTNER_FLAGS(_f_) (((_f_) & GENMASK(12, 5)) >> 5)
+#define UCSI_CONSTAT_PARTNER_FLAG_USB 1
+#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE 2
+#define UCSI_CONSTAT_PARTNER_TYPE(_f_) (((_f_) & GENMASK(15, 13)) >> 13)
+#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
+#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
+#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 bc_status:2;
-#define UCSI_CONSTAT_BC_NOT_CHARGING 0
-#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
-#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
-#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
- u8 provider_cap_limit_reason:4;
-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
- u8:2; /* reserved */
+ u8 pwr_status;
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_BC_NOT_CHARGING 0
+#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
+#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
+#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
+#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
} __packed;
/* -------------------------------------------------------------------------- */
-struct ucsi;
-
-struct ucsi_data {
- u16 version;
- u16 reserved;
- union {
- u32 raw_cci;
- struct ucsi_cci cci;
- };
- struct ucsi_control ctrl;
- u32 message_in[4];
- u32 message_out[4];
-} __packed;
-
-/*
- * struct ucsi_ppm - Interface to UCSI Platform Policy Manager
- * @data: memory location to the UCSI data structures
- * @cmd: UCSI command execution routine
- * @sync: Refresh UCSI mailbox (the data structures)
- */
-struct ucsi_ppm {
- struct ucsi_data *data;
- int (*cmd)(struct ucsi_ppm *, struct ucsi_control *);
- int (*sync)(struct ucsi_ppm *);
-};
-
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm);
-void ucsi_unregister_ppm(struct ucsi *ucsi);
-void ucsi_notify(struct ucsi *ucsi);
-
-/* -------------------------------------------------------------------------- */
-
-enum ucsi_status {
- UCSI_IDLE = 0,
- UCSI_BUSY,
- UCSI_ERROR,
-};
-
struct ucsi {
+ u16 version;
struct device *dev;
- struct ucsi_ppm *ppm;
+ struct driver_data *driver_data;
+
+ const struct ucsi_operations *ops;
- enum ucsi_status status;
- struct completion complete;
struct ucsi_capability cap;
struct ucsi_connector *connector;
@@ -426,7 +299,7 @@ struct ucsi_connector {
struct ucsi_connector_capability cap;
};
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size);
void ucsi_altmode_update_active(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index a18112a83fae..3f1786170098 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -19,7 +19,9 @@
struct ucsi_acpi {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
+ void __iomem *base;
+ struct completion complete;
+ unsigned long flags;
guid_t guid;
};
@@ -39,27 +41,73 @@ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
return 0;
}
-static int ucsi_acpi_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ memcpy(val, (const void __force *)(ua->base + offset), val_len);
+
+ return 0;
+}
+
+static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ memcpy((void __force *)(ua->base + offset), val, val_len);
return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
}
-static int ucsi_acpi_sync(struct ucsi_ppm *ppm)
+static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ set_bit(COMMAND_PENDING, &ua->flags);
+
+ ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto out_clear_bit;
- return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
+
+out_clear_bit:
+ clear_bit(COMMAND_PENDING, &ua->flags);
+
+ return ret;
}
+static const struct ucsi_operations ucsi_acpi_ops = {
+ .read = ucsi_acpi_read,
+ .sync_write = ucsi_acpi_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ucsi_acpi *ua = data;
+ u32 cci;
+ int ret;
+
+ ret = ucsi_acpi_read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return;
- ucsi_notify(ua->ucsi);
+ if (test_bit(COMMAND_PENDING, &ua->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&ua->complete);
+ else if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
}
static int ucsi_acpi_probe(struct platform_device *pdev)
@@ -90,35 +138,39 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
* it can not be requested here, and we can not use
* devm_ioremap_resource().
*/
- ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ua->ppm.data)
+ ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ua->base)
return -ENOMEM;
- if (!ua->ppm.data->version)
- return -ENODEV;
-
ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
if (ret)
return ret;
- ua->ppm.cmd = ucsi_acpi_cmd;
- ua->ppm.sync = ucsi_acpi_sync;
+ init_completion(&ua->complete);
ua->dev = &pdev->dev;
+ ua->ucsi = ucsi_create(&pdev->dev, &ucsi_acpi_ops);
+ if (IS_ERR(ua->ucsi))
+ return PTR_ERR(ua->ucsi);
+
+ ucsi_set_drvdata(ua->ucsi, ua);
+
status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify, ua);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to install notify handler\n");
+ ucsi_destroy(ua->ucsi);
return -ENODEV;
}
- ua->ucsi = ucsi_register_ppm(&pdev->dev, &ua->ppm);
- if (IS_ERR(ua->ucsi)) {
+ ret = ucsi_register(ua->ucsi);
+ if (ret) {
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
- return PTR_ERR(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
+ return ret;
}
platform_set_drvdata(pdev, ua);
@@ -130,7 +182,8 @@ static int ucsi_acpi_remove(struct platform_device *pdev)
{
struct ucsi_acpi *ua = platform_get_drvdata(pdev);
- ucsi_unregister_ppm(ua->ucsi);
+ ucsi_unregister(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index d772fce51905..3370b3fc37b1 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -176,8 +176,8 @@ struct ccg_resp {
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
struct i2c_client *client;
+
struct ccg_dev_info info;
/* version info for boot, primary and secondary */
struct version_info version[FW2 + 1];
@@ -196,6 +196,8 @@ struct ucsi_ccg {
/* fw build with vendor information */
u16 fw_build;
struct work_struct pm_work;
+
+ struct completion complete;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -243,7 +245,7 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
return 0;
}
-static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
{
struct i2c_client *client = uc->client;
unsigned char *buf;
@@ -317,88 +319,85 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
-static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
- status = ccg_write(uc, rab, ppm +
- offsetof(struct ucsi_data, message_out),
- sizeof(uc->ppm.data->message_out));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
- return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
- sizeof(uc->ppm.data->ctrl));
+ return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
- status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
- sizeof(uc->ppm.data->cci));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
- return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
- sizeof(uc->ppm.data->message_in));
+ return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- int status;
- unsigned char data;
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
- status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
- if (status < 0)
- return status;
+ mutex_lock(&uc->lock);
+ pm_runtime_get_sync(uc->dev);
+ set_bit(DEV_CMD_PENDING, &uc->flags);
- return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
-}
+ ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto err_clear_bit;
-static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
- int status;
+ if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
- status = ucsi_ccg_recv_data(uc);
- if (status < 0)
- return status;
+err_clear_bit:
+ clear_bit(DEV_CMD_PENDING, &uc->flags);
+ pm_runtime_put_sync(uc->dev);
+ mutex_unlock(&uc->lock);
- /* ack interrupt to allow next command to run */
- return ucsi_ccg_ack_interrupt(uc);
+ return ret;
}
-static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
-
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
- return ucsi_ccg_send_data(uc);
-}
+static const struct ucsi_operations ucsi_ccg_ops = {
+ .read = ucsi_ccg_read,
+ .sync_write = ucsi_ccg_sync_write,
+ .async_write = ucsi_ccg_async_write
+};
static irqreturn_t ccg_irq_handler(int irq, void *data)
{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
+ u8 intr_reg;
+ u32 cci;
+ int ret;
+
+ ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (ret)
+ return ret;
+
+ ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
+ if (ret)
+ goto err_clear_irq;
+
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+ if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
- ucsi_notify(uc->ucsi);
+err_clear_irq:
+ ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
return IRQ_HANDLED;
}
static void ccg_pm_workaround_work(struct work_struct *pm_work)
{
- struct ucsi_ccg *uc = container_of(pm_work, struct ucsi_ccg, pm_work);
-
- ucsi_notify(uc->ucsi);
+ ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
}
static int get_fw_info(struct ucsi_ccg *uc)
@@ -1027,10 +1026,10 @@ static int ccg_restart(struct ucsi_ccg *uc)
return status;
}
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
+ status = ucsi_register(uc->ucsi);
+ if (status) {
+ dev_err(uc->dev, "failed to register the interface\n");
+ return status;
}
return 0;
@@ -1047,7 +1046,7 @@ static void ccg_update_firmware(struct work_struct *work)
return;
if (flash_mode != FLASH_NOT_NEEDED) {
- ucsi_unregister_ppm(uc->ucsi);
+ ucsi_unregister(uc->ucsi);
free_irq(uc->irq, uc);
ccg_fw_update(uc, flash_mode);
@@ -1091,21 +1090,15 @@ static int ucsi_ccg_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
int status;
- u16 rab;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
if (!uc)
return -ENOMEM;
- uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
- if (!uc->ppm.data)
- return -ENOMEM;
-
- uc->ppm.cmd = ucsi_ccg_cmd;
- uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
mutex_init(&uc->lock);
+ init_completion(&uc->complete);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1133,30 +1126,25 @@ static int ucsi_ccg_probe(struct i2c_client *client,
if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
uc->port_num++;
+ uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
+ if (IS_ERR(uc->ucsi))
+ return PTR_ERR(uc->ucsi);
+
+ ucsi_set_drvdata(uc->ucsi, uc);
+
status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
dev_name(dev), uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
- return status;
+ goto out_ucsi_destroy;
}
uc->irq = client->irq;
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
- }
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
- status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
- offsetof(struct ucsi_data, version),
- sizeof(uc->ppm.data->version));
- if (status < 0) {
- ucsi_unregister_ppm(uc->ucsi);
- return status;
- }
+ status = ucsi_register(uc->ucsi);
+ if (status)
+ goto out_free_irq;
i2c_set_clientdata(client, uc);
@@ -1167,6 +1155,13 @@ static int ucsi_ccg_probe(struct i2c_client *client,
pm_runtime_idle(uc->dev);
return 0;
+
+out_free_irq:
+ free_irq(uc->irq, uc);
+out_ucsi_destroy:
+ ucsi_destroy(uc->ucsi);
+
+ return status;
}
static int ucsi_ccg_remove(struct i2c_client *client)
@@ -1175,8 +1170,9 @@ static int ucsi_ccg_remove(struct i2c_client *client)
cancel_work_sync(&uc->pm_work);
cancel_work_sync(&uc->work);
- ucsi_unregister_ppm(uc->ucsi);
pm_runtime_disable(uc->dev);
+ ucsi_unregister(uc->ucsi);
+ ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
return 0;
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index 2f86b28fa3da..7bbae7a08642 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -4,6 +4,7 @@ config USBIP_CORE
tristate "USB/IP support"
depends on NET
select USB_COMMON
+ select SGL_ALLOC
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 66edfeea68fe..e2b019532234 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
if (pipe == -1)
return;
+ /*
+ * Smatch reported the error case where use_sg is true and buf_len is 0.
+ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
+ * released by stub event handler and connection will be shut down.
+ */
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
+ if (use_sg && !buf_len) {
+ dev_err(&udev->dev, "sg buffer with zero length\n");
+ goto err_malloc;
+ }
+
/* allocate urb transfer buffer, if needed */
if (buf_len) {
if (use_sg) {
sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
if (!sgl)
goto err_malloc;
+
+ /* Check if the server's HCD supports SG */
+ if (!udev->bus->sg_tablesize) {
+ /*
+ * If the server's HCD doesn't support SG, break
+ * a single SG request into several URBs and map
+ * each SG list entry to corresponding URB
+ * buffer. The previously allocated SG list is
+ * stored in priv->sgl (If the server's HCD
+ * support SG, SG list is stored only in
+ * urb->sg) and it is used as an indicator that
+ * the server split single SG request into
+ * several URBs. Later, priv->sgl is used by
+ * stub_complete() and stub_send_ret_submit() to
+ * reassemble the divied URBs.
+ */
+ support_sg = 0;
+ num_urbs = nents;
+ priv->completed_urbs = 0;
+ pdu->u.cmd_submit.transfer_flags &=
+ ~URB_DMA_MAP_SG;
+ }
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
if (!buffer)
@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
}
- /* Check if the server's HCD supports SG */
- if (use_sg && !udev->bus->sg_tablesize) {
- /*
- * If the server's HCD doesn't support SG, break a single SG
- * request into several URBs and map each SG list entry to
- * corresponding URB buffer. The previously allocated SG
- * list is stored in priv->sgl (If the server's HCD support SG,
- * SG list is stored only in urb->sg) and it is used as an
- * indicator that the server split single SG request into
- * several URBs. Later, priv->sgl is used by stub_complete() and
- * stub_send_ret_submit() to reassemble the divied URBs.
- */
- support_sg = 0;
- num_urbs = nents;
- priv->completed_urbs = 0;
- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
- }
-
/* allocate urb array */
priv->num_urbs = num_urbs;
priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 36010a82b359..b1c2f6781cb3 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -291,7 +291,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
kfree(iov);
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_TCP);
- return -1;
+ return -1;
}
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 9f57736fe15e..dde392b91bb3 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -384,6 +384,49 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .module = THIS_MODULE,
+
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
+
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -438,7 +481,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
/* Only accept correctly addressed packets */
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
- virtio_transport_recv_pkt(pkt);
+ virtio_transport_recv_pkt(&vhost_transport, pkt);
else
virtio_transport_free_pkt(pkt);
@@ -675,6 +718,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
if (guest_cid > U32_MAX)
return -EINVAL;
+ /* Refuse if CID is assigned to the guest->host transport (i.e. nested
+ * VM), to make the loopback work.
+ */
+ if (vsock_find_cid(guest_cid))
+ return -EADDRINUSE;
+
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
other = vhost_vsock_get(guest_cid);
@@ -786,57 +835,12 @@ static struct miscdevice vhost_vsock_misc = {
.fops = &vhost_vsock_fops,
};
-static struct virtio_transport vhost_transport = {
- .transport = {
- .get_local_cid = vhost_transport_get_local_cid,
-
- .init = virtio_transport_do_socket_init,
- .destruct = virtio_transport_destruct,
- .release = virtio_transport_release,
- .connect = virtio_transport_connect,
- .shutdown = virtio_transport_shutdown,
- .cancel_pkt = vhost_transport_cancel_pkt,
-
- .dgram_enqueue = virtio_transport_dgram_enqueue,
- .dgram_dequeue = virtio_transport_dgram_dequeue,
- .dgram_bind = virtio_transport_dgram_bind,
- .dgram_allow = virtio_transport_dgram_allow,
-
- .stream_enqueue = virtio_transport_stream_enqueue,
- .stream_dequeue = virtio_transport_stream_dequeue,
- .stream_has_data = virtio_transport_stream_has_data,
- .stream_has_space = virtio_transport_stream_has_space,
- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
- .stream_is_active = virtio_transport_stream_is_active,
- .stream_allow = virtio_transport_stream_allow,
-
- .notify_poll_in = virtio_transport_notify_poll_in,
- .notify_poll_out = virtio_transport_notify_poll_out,
- .notify_recv_init = virtio_transport_notify_recv_init,
- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
- .notify_send_init = virtio_transport_notify_send_init,
- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
- },
-
- .send_pkt = vhost_transport_send_pkt,
-};
-
static int __init vhost_vsock_init(void)
{
int ret;
- ret = vsock_core_init(&vhost_transport.transport);
+ ret = vsock_core_register(&vhost_transport.transport,
+ VSOCK_TRANSPORT_F_H2G);
if (ret < 0)
return ret;
return misc_register(&vhost_vsock_misc);
@@ -845,7 +849,7 @@ static int __init vhost_vsock_init(void)
static void __exit vhost_vsock_exit(void)
{
misc_deregister(&vhost_vsock_misc);
- vsock_core_exit();
+ vsock_core_unregister(&vhost_transport.transport);
};
module_init(vhost_vsock_init);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index c6b3bdbbdbc9..de7b8382aba9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -113,9 +113,9 @@ static int __init text_mode(char *str)
{
vgacon_text_mode_force = true;
- pr_warning("You have booted with nomodeset. This means your GPU drivers are DISABLED\n");
- pr_warning("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n");
- pr_warning("Unless you actually understand what nomodeset does, you should reboot without enabling it\n");
+ pr_warn("You have booted with nomodeset. This means your GPU drivers are DISABLED\n");
+ pr_warn("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n");
+ pr_warn("Unless you actually understand what nomodeset does, you should reboot without enabling it\n");
return 1;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 226fbb995fb0..e05679c478e2 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -772,6 +772,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
}
+static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
+ unsigned long pages_to_free)
+{
+ return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
unsigned long pages_to_free)
{
@@ -782,11 +789,10 @@ static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
* VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
* multiple times to deflate pages till reaching pages_to_free.
*/
- while (vb->num_pages && pages_to_free) {
- pages_freed += leak_balloon(vb, pages_to_free) /
- VIRTIO_BALLOON_PAGES_PER_PAGE;
- pages_to_free -= pages_freed;
- }
+ while (vb->num_pages && pages_freed < pages_to_free)
+ pages_freed += leak_balloon_pages(vb,
+ pages_to_free - pages_freed);
+
update_balloon_size(vb);
return pages_freed;
@@ -799,7 +805,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+ pages_to_free = sc->nr_to_scan;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
pages_freed = shrink_free_pages(vb, pages_to_free);
@@ -820,7 +826,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
unsigned long count;
count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
return count;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a8041e451e9e..867c7ebd3f10 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -583,7 +583,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@@ -1085,7 +1085,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
static inline int virtqueue_add_packed(struct virtqueue *_vq,
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index 1b2d96b945be..e8c7fa68d3cc 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -77,15 +77,13 @@ static int sgi_w1_probe(struct platform_device *pdev)
{
struct sgi_w1_device *sdev;
struct sgi_w1_platform_data *pdata;
- struct resource *res;
sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
+ sdev->mcr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mcr))
return PTR_ERR(sdev->mcr);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index b7847636501d..687753889c34 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -74,6 +74,14 @@ config W1_SLAVE_DS2805
organized as 7 pages of 16 bytes each with 64bit
unique number. Requires OverDrive Speed to talk to.
+config W1_SLAVE_DS2430
+ tristate "256b EEPROM family support (DS2430)"
+ help
+ Say Y here if you want to use a 1-wire 256bit EEPROM
+ family device (DS2430).
+ This EEPROM is organized as one page of 32 bytes for random
+ access.
+
config W1_SLAVE_DS2431
tristate "1kb EEPROM family support (DS2431)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 8e9655eaa478..278bcf2a9bfd 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
+obj-$(CONFIG_W1_SLAVE_DS2430) += w1_ds2430.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
new file mode 100644
index 000000000000..6fb0563fb2ae
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * w1_ds2430.c - w1 family 14 (DS2430) driver
+ **
+ * Copyright (c) 2019 Angelo Dureghello <angelo.dureghello@timesys.com>
+ *
+ * Cloned and modified from ds2431
+ * Copyright (c) 2008 Bernhard Weirich <bernhard.weirich@riedel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2430 0x14
+
+#define W1_F14_EEPROM_SIZE 32
+#define W1_F14_PAGE_COUNT 1
+#define W1_F14_PAGE_BITS 5
+#define W1_F14_PAGE_SIZE (1 << W1_F14_PAGE_BITS)
+#define W1_F14_PAGE_MASK 0x1F
+
+#define W1_F14_SCRATCH_BITS 5
+#define W1_F14_SCRATCH_SIZE (1 << W1_F14_SCRATCH_BITS)
+#define W1_F14_SCRATCH_MASK (W1_F14_SCRATCH_SIZE-1)
+
+#define W1_F14_READ_EEPROM 0xF0
+#define W1_F14_WRITE_SCRATCH 0x0F
+#define W1_F14_READ_SCRATCH 0xAA
+#define W1_F14_COPY_SCRATCH 0x55
+#define W1_F14_VALIDATION_KEY 0xa5
+
+#define W1_F14_TPROG_MS 11
+#define W1_F14_READ_RETRIES 10
+#define W1_F14_READ_MAXLEN W1_F14_SCRATCH_SIZE
+
+/*
+ * Check the file size bounds and adjusts count as needed.
+ * This would not be needed if the file size didn't reset to 0 after a write.
+ */
+static inline size_t w1_f14_fix_count(loff_t off, size_t count, size_t size)
+{
+ if (off > size)
+ return 0;
+
+ if ((off + count) > size)
+ return size - off;
+
+ return count;
+}
+
+/*
+ * Read a block from W1 ROM two times and compares the results.
+ * If they are equal they are returned, otherwise the read
+ * is repeated W1_F14_READ_RETRIES times.
+ *
+ * count must not exceed W1_F14_READ_MAXLEN.
+ */
+static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
+{
+ u8 wrbuf[2];
+ u8 cmp[W1_F14_READ_MAXLEN];
+ int tries = W1_F14_READ_RETRIES;
+
+ do {
+ wrbuf[0] = W1_F14_READ_EEPROM;
+ wrbuf[1] = off & 0xff;
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, buf, count);
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, cmp, count);
+
+ if (!memcmp(cmp, buf, count))
+ return 0;
+ } while (--tries);
+
+ dev_err(&sl->dev, "proof reading failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+}
+
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int todo = count;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* read directly from the EEPROM in chunks of W1_F14_READ_MAXLEN */
+ while (todo > 0) {
+ int block_read;
+
+ if (todo >= W1_F14_READ_MAXLEN)
+ block_read = W1_F14_READ_MAXLEN;
+ else
+ block_read = todo;
+
+ if (w1_f14_readblock(sl, off, block_read, buf) < 0)
+ count = -EIO;
+
+ todo -= W1_F14_READ_MAXLEN;
+ buf += W1_F14_READ_MAXLEN;
+ off += W1_F14_READ_MAXLEN;
+ }
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+/*
+ * Writes to the scratchpad and reads it back for verification.
+ * Then copies the scratchpad to EEPROM.
+ * The data must be aligned at W1_F14_SCRATCH_SIZE bytes and
+ * must be W1_F14_SCRATCH_SIZE bytes long.
+ * The master must be locked.
+ *
+ * @param sl The slave structure
+ * @param addr Address for the write
+ * @param len length must be <= (W1_F14_PAGE_SIZE - (addr & W1_F14_PAGE_MASK))
+ * @param data The data to write
+ * @return 0=Success -1=failure
+ */
+static int w1_f14_write(struct w1_slave *sl, int addr, int len, const u8 *data)
+{
+ int tries = W1_F14_READ_RETRIES;
+ u8 wrbuf[2];
+ u8 rdbuf[W1_F14_SCRATCH_SIZE + 3];
+
+retry:
+
+ /* Write the data to the scratchpad */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_WRITE_SCRATCH;
+ wrbuf[1] = addr & 0xff;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_write_block(sl->master, data, len);
+
+ /* Read the scratchpad and verify */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_8(sl->master, W1_F14_READ_SCRATCH);
+ w1_read_block(sl->master, rdbuf, len + 2);
+
+ /*
+ * Compare what was read against the data written
+ * Note: on read scratchpad, device returns 2 bulk 0xff bytes,
+ * to be discarded.
+ */
+ if ((memcmp(data, &rdbuf[2], len) != 0)) {
+
+ if (--tries)
+ goto retry;
+
+ dev_err(&sl->dev,
+ "could not write to eeprom, scratchpad compare failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+ }
+
+ /* Copy the scratchpad to EEPROM */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_COPY_SCRATCH;
+ wrbuf[1] = W1_F14_VALIDATION_KEY;
+ w1_write_block(sl->master, wrbuf, 2);
+
+ /* Sleep for tprog ms to wait for the write to complete */
+ msleep(W1_F14_TPROG_MS);
+
+ /* Reset the bus to wake up the EEPROM */
+ w1_reset_bus(sl->master);
+
+ return 0;
+}
+
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int addr, len;
+ int copy;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Can only write data in blocks of the size of the scratchpad */
+ addr = off;
+ len = count;
+ while (len > 0) {
+
+ /* if len too short or addr not aligned */
+ if (len < W1_F14_SCRATCH_SIZE || addr & W1_F14_SCRATCH_MASK) {
+ char tmp[W1_F14_SCRATCH_SIZE];
+
+ /* read the block and update the parts to be written */
+ if (w1_f14_readblock(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp)) {
+ count = -EIO;
+ goto out_up;
+ }
+
+ /* copy at most to the boundary of the PAGE or len */
+ copy = W1_F14_SCRATCH_SIZE -
+ (addr & W1_F14_SCRATCH_MASK);
+
+ if (copy > len)
+ copy = len;
+
+ memcpy(&tmp[addr & W1_F14_SCRATCH_MASK], buf, copy);
+ if (w1_f14_write(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ } else {
+
+ copy = W1_F14_SCRATCH_SIZE;
+ if (w1_f14_write(sl, addr, copy, buf) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ }
+ buf += copy;
+ addr += copy;
+ len -= copy;
+ }
+
+out_up:
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
+
+static struct bin_attribute *w1_f14_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL,
+};
+
+static const struct attribute_group w1_f14_group = {
+ .bin_attrs = w1_f14_bin_attrs,
+};
+
+static const struct attribute_group *w1_f14_groups[] = {
+ &w1_f14_group,
+ NULL,
+};
+
+static struct w1_family_ops w1_f14_fops = {
+ .groups = w1_f14_groups,
+};
+
+static struct w1_family w1_family_14 = {
+ .fid = W1_EEPROM_DS2430,
+ .fops = &w1_f14_fops,
+};
+module_w1_family(w1_family_14);
+
+MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
+MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256kb EEPROM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index ed18238c5407..8973f98bc6a5 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -168,3 +168,4 @@ module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 79cc75096f42..61212fc7f0c7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -106,27 +106,27 @@ config XENFS
If in doubt, say yes.
config XEN_COMPAT_XENFS
- bool "Create compatibility mount point /proc/xen"
- depends on XENFS
- default y
- help
- The old xenstore userspace tools expect to find "xenbus"
- under /proc/xen, but "xenbus" is now found at the root of the
- xenfs filesystem. Selecting this causes the kernel to create
- the compatibility mount point /proc/xen if it is running on
- a xen platform.
- If in doubt, say yes.
+ bool "Create compatibility mount point /proc/xen"
+ depends on XENFS
+ default y
+ help
+ The old xenstore userspace tools expect to find "xenbus"
+ under /proc/xen, but "xenbus" is now found at the root of the
+ xenfs filesystem. Selecting this causes the kernel to create
+ the compatibility mount point /proc/xen if it is running on
+ a xen platform.
+ If in doubt, say yes.
config XEN_SYS_HYPERVISOR
- bool "Create xen entries under /sys/hypervisor"
- depends on SYSFS
- select SYS_HYPERVISOR
- default y
- help
- Create entries under /sys/hypervisor describing the Xen
- hypervisor environment. When running native or in another
- virtual environment, /sys/hypervisor will still be present,
- but will have no xen contents.
+ bool "Create xen entries under /sys/hypervisor"
+ depends on SYSFS
+ select SYS_HYPERVISOR
+ default y
+ help
+ Create entries under /sys/hypervisor describing the Xen
+ hypervisor environment. When running native or in another
+ virtual environment, /sys/hypervisor will still be present,
+ but will have no xen contents.
config XEN_XENBUS_FRONTEND
tristate
@@ -141,7 +141,8 @@ config XEN_GNTDEV
config XEN_GNTDEV_DMABUF
bool "Add support for dma-buf grant access device driver extension"
- depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
+ depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
+ select DMA_SHARED_BUFFER
help
Allows userspace processes and kernel modules to use Xen backed
dma-buf implementation. With this extension grant references to
@@ -270,7 +271,7 @@ config XEN_ACPI_PROCESSOR
depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
- This ACPI processor uploads Power Management information to the Xen
+ This ACPI processor uploads Power Management information to the Xen
hypervisor.
To do that the driver parses the Power Management data and uploads
@@ -279,19 +280,19 @@ config XEN_ACPI_PROCESSOR
SMM so that other drivers (such as ACPI cpufreq scaling driver) will
not load.
- To compile this driver as a module, choose M here: the module will be
+ To compile this driver as a module, choose M here: the module will be
called xen_acpi_processor If you do not know what to choose, select
M here. If the CPUFREQ drivers are built in, select Y here.
config XEN_MCE_LOG
bool "Xen platform mcelog"
- depends on XEN_DOM0 && X86_64 && X86_MCE
+ depends on XEN_DOM0 && X86_MCE
help
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
config XEN_HAVE_PVMMU
- bool
+ bool
config XEN_EFI
def_bool y
@@ -308,15 +309,15 @@ config XEN_ACPI
depends on X86 && ACPI
config XEN_SYMS
- bool "Xen symbols"
- depends on X86 && XEN_DOM0 && XENFS
- default y if KALLSYMS
- help
- Exports hypervisor symbols (along with their types and addresses) via
- /proc/xen/xensyms file, similar to /proc/kallsyms
+ bool "Xen symbols"
+ depends on X86 && XEN_DOM0 && XENFS
+ default y if KALLSYMS
+ help
+ Exports hypervisor symbols (along with their types and addresses) via
+ /proc/xen/xensyms file, similar to /proc/kallsyms
config XEN_HAVE_VPMU
- bool
+ bool
config XEN_FRONT_PGDIR_SHBUF
tristate
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index b8bf61abb65b..e9ac3b8c4167 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -222,7 +222,7 @@ static int convert_log(struct mc_info *mi)
struct mcinfo_global *mc_global;
struct mcinfo_bank *mc_bank;
struct xen_mce m;
- uint32_t i;
+ unsigned int i, j;
mic = NULL;
x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
@@ -248,7 +248,17 @@ static int convert_log(struct mc_info *mi)
m.socketid = g_physinfo[i].mc_chipid;
m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
- m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
+ for (j = 0; j < g_physinfo[i].mc_nmsrvals; ++j)
+ switch (g_physinfo[i].mc_msrvalues[j].reg) {
+ case MSR_IA32_MCG_CAP:
+ m.mcgcap = g_physinfo[i].mc_msrvalues[j].value;
+ break;
+
+ case MSR_PPIN:
+ case MSR_AMD_PPIN:
+ m.ppin = g_physinfo[i].mc_msrvalues[j].value;
+ break;
+ }
mic = NULL;
x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
diff --git a/fs/Kconfig b/fs/Kconfig
index 2501e6f1f965..7b623e9fc1b0 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -322,4 +322,7 @@ source "fs/nls/Kconfig"
source "fs/dlm/Kconfig"
source "fs/unicode/Kconfig"
+config IO_WQ
+ bool
+
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index 14231b4cf383..1148c555c4d3 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_IO_URING) += io_uring.o
+obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a92eb6ae2ae2..a755bef7c4c7 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -43,8 +43,8 @@ struct affs_ext_key {
*/
struct affs_inode_info {
atomic_t i_opencnt;
- struct semaphore i_link_lock; /* Protects internal inode access. */
- struct semaphore i_ext_lock; /* Protects internal inode access. */
+ struct mutex i_link_lock; /* Protects internal inode access. */
+ struct mutex i_ext_lock; /* Protects internal inode access. */
#define i_hash_lock i_ext_lock
u32 i_blkcnt; /* block count */
u32 i_extcnt; /* extended block count */
@@ -293,30 +293,30 @@ affs_adjust_bitmapchecksum(struct buffer_head *bh, u32 val)
static inline void
affs_lock_link(struct inode *inode)
{
- down(&AFFS_I(inode)->i_link_lock);
+ mutex_lock(&AFFS_I(inode)->i_link_lock);
}
static inline void
affs_unlock_link(struct inode *inode)
{
- up(&AFFS_I(inode)->i_link_lock);
+ mutex_unlock(&AFFS_I(inode)->i_link_lock);
}
static inline void
affs_lock_dir(struct inode *inode)
{
- down(&AFFS_I(inode)->i_hash_lock);
+ mutex_lock_nested(&AFFS_I(inode)->i_hash_lock, SINGLE_DEPTH_NESTING);
}
static inline void
affs_unlock_dir(struct inode *inode)
{
- up(&AFFS_I(inode)->i_hash_lock);
+ mutex_unlock(&AFFS_I(inode)->i_hash_lock);
}
static inline void
affs_lock_ext(struct inode *inode)
{
- down(&AFFS_I(inode)->i_ext_lock);
+ mutex_lock(&AFFS_I(inode)->i_ext_lock);
}
static inline void
affs_unlock_ext(struct inode *inode)
{
- up(&AFFS_I(inode)->i_ext_lock);
+ mutex_unlock(&AFFS_I(inode)->i_ext_lock);
}
diff --git a/fs/affs/super.c b/fs/affs/super.c
index cc463ae47c12..47107c6712a6 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -121,8 +121,8 @@ static void init_once(void *foo)
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
- sema_init(&ei->i_link_lock, 1);
- sema_init(&ei->i_ext_lock, 1);
+ mutex_init(&ei->i_link_lock);
+ mutex_init(&ei->i_ext_lock);
inode_init_once(&ei->vfs_inode);
}
@@ -561,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
int root_block;
unsigned long mount_flags;
int res = 0;
- char *new_opts;
char volume[32];
char *prefix = NULL;
- new_opts = kstrdup(data, GFP_KERNEL);
- if (data && !new_opts)
- return -ENOMEM;
-
pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
sync_filesystem(sb);
@@ -579,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data)
&blocksize, &prefix, volume,
&mount_flags)) {
kfree(prefix);
- kfree(new_opts);
return -EINVAL;
}
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 6cdd7047c809..2dca8df1a18d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -312,7 +312,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
_enter("%p,%zu,", server, count);
ASSERT(server != NULL);
- ASSERTCMP(count, <=, AFSCBMAX);
/* TODO: Sort the callback break list by volume ID */
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index cc12772d0a4d..497f979018c2 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -803,7 +803,12 @@ success:
continue;
if (cookie->inodes[i]) {
- afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]),
+ struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
+
+ if (test_bit(AFS_VNODE_UNSET, &iv->flags))
+ continue;
+
+ afs_vnode_commit_status(&fc, iv,
scb->cb_break, NULL, scb);
continue;
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index d5e5a6ddc847..0f2a94ba73cb 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -346,8 +346,8 @@ again:
if (ret < 0) {
trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
ret);
- pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
- vnode->fid.vid, vnode->fid.vnode, ret);
+ pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
+ vnode->fid.vid, vnode->fid.vnode, ret);
}
spin_lock(&vnode->lock);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 46d2d7cb461d..281470fe1183 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -34,8 +34,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
{
static unsigned long once_only;
- pr_warn("kAFS: AFS vnode with undefined type %u\n",
- vnode->status.type);
+ pr_warn("kAFS: AFS vnode with undefined type %u\n", vnode->status.type);
pr_warn("kAFS: A=%d m=%o s=%llx v=%llx\n",
vnode->status.abort_code,
vnode->status.mode,
@@ -175,11 +174,11 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags));
if (status->type != vnode->status.type) {
- pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- status->type, vnode->status.type);
+ pr_warn("Vnode %llx:%llx:%x changed type %u to %u\n",
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ status->type, vnode->status.type);
afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status);
return;
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 0e5269374ac1..61498d9f06ef 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -637,6 +637,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
call->need_attention = false;
__set_current_state(TASK_RUNNING);
afs_deliver_to_call(call);
+ timeout = rtt2;
continue;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f18911e8d770..488641b1a418 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -435,6 +435,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* fill in the superblock */
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
if (!as->dyn_root)
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
index 21eb0c0be912..8fea54eba0c2 100644
--- a/fs/afs/vl_list.c
+++ b/fs/afs/vl_list.c
@@ -279,8 +279,8 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
struct afs_addr_list *old = addrs;
write_lock(&server->lock);
- rcu_swap_protected(server->addresses, old,
- lockdep_is_held(&server->lock));
+ old = rcu_replace_pointer(server->addresses, old,
+ lockdep_is_held(&server->lock));
write_unlock(&server->lock);
afs_put_addrlist(old);
}
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 3ee7abf4b2d0..9ac035c17dc4 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -152,8 +152,8 @@ static void yfs_check_req(struct afs_call *call, __be32 *bp)
pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n",
call->type->name, len, call->request_size);
else if (len < call->request_size)
- pr_warning("kAFS: %s: Request buffer underflow (%zu<%u)\n",
- call->type->name, len, call->request_size);
+ pr_warn("kAFS: %s: Request buffer underflow (%zu<%u)\n",
+ call->type->name, len, call->request_size);
}
/*
diff --git a/fs/aio.c b/fs/aio.c
index 01e0fb9ae45a..0d9a559d488c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
#ifdef CONFIG_COMPAT
struct __compat_aio_sigset {
- compat_sigset_t __user *sigmask;
+ compat_uptr_t sigmask;
compat_size_t sigsetsize;
};
@@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
- struct __compat_aio_sigset ksig = { NULL, };
+ struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
@@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
@@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
struct __kernel_timespec __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
- struct __compat_aio_sigset ksig = { NULL, };
+ struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t;
bool interrupted;
int ret;
@@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
+ ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret)
return ret;
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index 2866fabf497f..91f5787dae7c 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
*/
how &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how);
- if (!found || found != expired)
- /* Something has changed, continue */
+ if (found != expired) { // something has changed, continue
+ dput(found);
goto next;
+ }
if (expired != dentry)
dput(dentry);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9c073dbdc1b0..ee63c2732fa2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1403,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
"resized disk %s\n",
bdev->bd_disk ? bdev->bd_disk->disk_name : "");
}
-
- if (!bdev->bd_disk)
- return;
- if (disk_part_scan_enabled(bdev->bd_disk))
- bdev->bd_invalidated = 1;
+ bdev->bd_invalidated = 1;
}
/**
@@ -1420,8 +1416,8 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
* and adjusts it if it differs. When shrinking the bdev size, its all caches
* are freed.
*/
-void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
- bool verbose)
+static void check_disk_size_change(struct gendisk *disk,
+ struct block_device *bdev, bool verbose)
{
loff_t disk_size, bdev_size;
@@ -1437,6 +1433,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
if (bdev_size > disk_size)
flush_disk(bdev, false);
}
+ bdev->bd_invalidated = 0;
}
/**
@@ -1466,7 +1463,6 @@ int revalidate_disk(struct gendisk *disk)
mutex_lock(&bdev->bd_mutex);
check_disk_size_change(disk, bdev, ret == 0);
- bdev->bd_invalidated = 0;
mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
}
@@ -1512,6 +1508,45 @@ EXPORT_SYMBOL(bd_set_size);
static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
+int bdev_disk_changed(struct block_device *bdev, bool invalidate)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int ret;
+
+ lockdep_assert_held(&bdev->bd_mutex);
+
+rescan:
+ ret = blk_drop_partitions(disk, bdev);
+ if (ret)
+ return ret;
+
+ if (invalidate)
+ set_capacity(disk, 0);
+ else if (disk->fops->revalidate_disk)
+ disk->fops->revalidate_disk(disk);
+
+ check_disk_size_change(disk, bdev, !invalidate);
+
+ if (get_capacity(disk)) {
+ ret = blk_add_partitions(disk, bdev);
+ if (ret == -EAGAIN)
+ goto rescan;
+ } else {
+ /*
+ * Tell userspace that the media / partition table may have
+ * changed.
+ */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ }
+
+ return ret;
+}
+/*
+ * Only exported for for loop and dasd for historic reasons. Don't use in new
+ * code!
+ */
+EXPORT_SYMBOL_GPL(bdev_disk_changed);
+
/*
* bd_mutex locking:
*
@@ -1594,12 +1629,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
* The latter is necessary to prevent ghost
* partitions on a removed medium.
*/
- if (bdev->bd_invalidated) {
- if (!ret)
- rescan_partitions(disk, bdev);
- else if (ret == -ENOMEDIUM)
- invalidate_partitions(disk, bdev);
- }
+ if (bdev->bd_invalidated &&
+ (!ret || ret == -ENOMEDIUM))
+ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
if (ret)
goto out_clear;
@@ -1632,12 +1664,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_disk->fops->open)
ret = bdev->bd_disk->fops->open(bdev, mode);
/* the same as first opener case, read comment there */
- if (bdev->bd_invalidated) {
- if (!ret)
- rescan_partitions(bdev->bd_disk, bdev);
- else if (ret == -ENOMEDIUM)
- invalidate_partitions(bdev->bd_disk, bdev);
- }
+ if (bdev->bd_invalidated &&
+ (!ret || ret == -ENOMEDIUM))
+ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
if (ret)
goto out_unlock_bdev;
}
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 38651fae7f21..75b6d10c9845 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -5,6 +5,8 @@ config BTRFS_FS
select CRYPTO
select CRYPTO_CRC32C
select LIBCRC32C
+ select CRYPTO_XXHASH
+ select CRYPTO_SHA256
select ZLIB_INFLATE
select ZLIB_DEFLATE
select LZO_COMPRESS
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 2e9e13ffbd08..1d32a07bb2d1 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -53,24 +53,12 @@ struct btrfs_workqueue {
struct __btrfs_workqueue *high;
};
-static void normal_work_helper(struct btrfs_work *work);
-
-#define BTRFS_WORK_HELPER(name) \
-noinline_for_stack void btrfs_##name(struct work_struct *arg) \
-{ \
- struct btrfs_work *work = container_of(arg, struct btrfs_work, \
- normal_work); \
- normal_work_helper(work); \
-}
-
-struct btrfs_fs_info *
-btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
{
return wq->fs_info;
}
-struct btrfs_fs_info *
-btrfs_work_owner(const struct btrfs_work *work)
+struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
{
return work->wq->fs_info;
}
@@ -89,29 +77,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
}
-BTRFS_WORK_HELPER(worker_helper);
-BTRFS_WORK_HELPER(delalloc_helper);
-BTRFS_WORK_HELPER(flush_delalloc_helper);
-BTRFS_WORK_HELPER(cache_helper);
-BTRFS_WORK_HELPER(submit_helper);
-BTRFS_WORK_HELPER(fixup_helper);
-BTRFS_WORK_HELPER(endio_helper);
-BTRFS_WORK_HELPER(endio_meta_helper);
-BTRFS_WORK_HELPER(endio_meta_write_helper);
-BTRFS_WORK_HELPER(endio_raid56_helper);
-BTRFS_WORK_HELPER(endio_repair_helper);
-BTRFS_WORK_HELPER(rmw_helper);
-BTRFS_WORK_HELPER(endio_write_helper);
-BTRFS_WORK_HELPER(freespace_write_helper);
-BTRFS_WORK_HELPER(delayed_meta_helper);
-BTRFS_WORK_HELPER(readahead_helper);
-BTRFS_WORK_HELPER(qgroup_rescan_helper);
-BTRFS_WORK_HELPER(extent_refs_helper);
-BTRFS_WORK_HELPER(scrub_helper);
-BTRFS_WORK_HELPER(scrubwrc_helper);
-BTRFS_WORK_HELPER(scrubnc_helper);
-BTRFS_WORK_HELPER(scrubparity_helper);
-
static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags, int limit_active, int thresh)
@@ -252,16 +217,16 @@ out:
}
}
-static void run_ordered_work(struct __btrfs_workqueue *wq)
+static void run_ordered_work(struct __btrfs_workqueue *wq,
+ struct btrfs_work *self)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
unsigned long flags;
+ bool free_self = false;
while (1) {
- void *wtag;
-
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -287,22 +252,53 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
list_del(&work->ordered_list);
spin_unlock_irqrestore(lock, flags);
- /*
- * We don't want to call the ordered free functions with the
- * lock held though. Save the work as tag for the trace event,
- * because the callback could free the structure.
- */
- wtag = work;
- work->ordered_free(work);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ if (work == self) {
+ /*
+ * This is the work item that the worker is currently
+ * executing.
+ *
+ * The kernel workqueue code guarantees non-reentrancy
+ * of work items. I.e., if a work item with the same
+ * address and work function is queued twice, the second
+ * execution is blocked until the first one finishes. A
+ * work item may be freed and recycled with the same
+ * work function; the workqueue code assumes that the
+ * original work item cannot depend on the recycled work
+ * item in that case (see find_worker_executing_work()).
+ *
+ * Note that different types of Btrfs work can depend on
+ * each other, and one type of work on one Btrfs
+ * filesystem may even depend on the same type of work
+ * on another Btrfs filesystem via, e.g., a loop device.
+ * Therefore, we must not allow the current work item to
+ * be recycled until we are really done, otherwise we
+ * break the above assumption and can deadlock.
+ */
+ free_self = true;
+ } else {
+ /*
+ * We don't want to call the ordered free functions with
+ * the lock held.
+ */
+ work->ordered_free(work);
+ /* NB: work must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, work);
+ }
}
spin_unlock_irqrestore(lock, flags);
+
+ if (free_self) {
+ self->ordered_free(self);
+ /* NB: self must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, self);
+ }
}
-static void normal_work_helper(struct btrfs_work *work)
+static void btrfs_work_helper(struct work_struct *normal_work)
{
+ struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
+ normal_work);
struct __btrfs_workqueue *wq;
- void *wtag;
int need_order = 0;
/*
@@ -316,29 +312,26 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
- /* Safe for tracepoints in case work gets freed by the callback */
- wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
work->func(work);
if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags);
- run_ordered_work(wq);
+ run_ordered_work(wq, work);
+ } else {
+ /* NB: work must not be dereferenced past this point. */
+ trace_btrfs_all_work_done(wq->fs_info, work);
}
- if (!need_order)
- trace_btrfs_all_work_done(wq->fs_info, wtag);
}
-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
- btrfs_func_t func,
- btrfs_func_t ordered_func,
- btrfs_func_t ordered_free)
+void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
+ btrfs_func_t ordered_func, btrfs_func_t ordered_free)
{
work->func = func;
work->ordered_func = ordered_func;
work->ordered_free = ordered_free;
- INIT_WORK(&work->normal_work, uniq_func);
+ INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0;
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 7861c9feba5f..a4434301d84d 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -29,49 +29,20 @@ struct btrfs_work {
unsigned long flags;
};
-#define BTRFS_WORK_HELPER_PROTO(name) \
-void btrfs_##name(struct work_struct *arg)
-
-BTRFS_WORK_HELPER_PROTO(worker_helper);
-BTRFS_WORK_HELPER_PROTO(delalloc_helper);
-BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
-BTRFS_WORK_HELPER_PROTO(cache_helper);
-BTRFS_WORK_HELPER_PROTO(submit_helper);
-BTRFS_WORK_HELPER_PROTO(fixup_helper);
-BTRFS_WORK_HELPER_PROTO(endio_helper);
-BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
-BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
-BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
-BTRFS_WORK_HELPER_PROTO(endio_repair_helper);
-BTRFS_WORK_HELPER_PROTO(rmw_helper);
-BTRFS_WORK_HELPER_PROTO(endio_write_helper);
-BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
-BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
-BTRFS_WORK_HELPER_PROTO(readahead_helper);
-BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
-BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
-BTRFS_WORK_HELPER_PROTO(scrub_helper);
-BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
-BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
-BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
-
-
struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
const char *name,
unsigned int flags,
int limit_active,
int thresh);
-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
- btrfs_func_t func,
- btrfs_func_t ordered_func,
- btrfs_func_t ordered_free);
+void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
+ btrfs_func_t ordered_func, btrfs_func_t ordered_free);
void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
-struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work);
-struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
+struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
#endif
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 670700cb1110..6934a5b8708f 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -120,12 +120,12 @@ u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
return get_alloc_profile(fs_info, orig_flags);
}
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+void btrfs_get_block_group(struct btrfs_block_group *cache)
{
atomic_inc(&cache->count);
}
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
+void btrfs_put_block_group(struct btrfs_block_group *cache)
{
if (atomic_dec_and_test(&cache->count)) {
WARN_ON(cache->pinned > 0);
@@ -149,22 +149,21 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
* This adds the block group to the fs_info rb tree for the block group cache
*/
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct rb_node **p;
struct rb_node *parent = NULL;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
spin_lock(&info->block_group_cache_lock);
p = &info->block_group_cache_tree.rb_node;
while (*p) {
parent = *p;
- cache = rb_entry(parent, struct btrfs_block_group_cache,
- cache_node);
- if (block_group->key.objectid < cache->key.objectid) {
+ cache = rb_entry(parent, struct btrfs_block_group, cache_node);
+ if (block_group->start < cache->start) {
p = &(*p)->rb_left;
- } else if (block_group->key.objectid > cache->key.objectid) {
+ } else if (block_group->start > cache->start) {
p = &(*p)->rb_right;
} else {
spin_unlock(&info->block_group_cache_lock);
@@ -176,8 +175,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
rb_insert_color(&block_group->cache_node,
&info->block_group_cache_tree);
- if (info->first_logical_byte > block_group->key.objectid)
- info->first_logical_byte = block_group->key.objectid;
+ if (info->first_logical_byte > block_group->start)
+ info->first_logical_byte = block_group->start;
spin_unlock(&info->block_group_cache_lock);
@@ -188,10 +187,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
* This will return the block group at or after bytenr if contains is 0, else
* it will return the block group that contains the bytenr
*/
-static struct btrfs_block_group_cache *block_group_cache_tree_search(
+static struct btrfs_block_group *block_group_cache_tree_search(
struct btrfs_fs_info *info, u64 bytenr, int contains)
{
- struct btrfs_block_group_cache *cache, *ret = NULL;
+ struct btrfs_block_group *cache, *ret = NULL;
struct rb_node *n;
u64 end, start;
@@ -199,13 +198,12 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search(
n = info->block_group_cache_tree.rb_node;
while (n) {
- cache = rb_entry(n, struct btrfs_block_group_cache,
- cache_node);
- end = cache->key.objectid + cache->key.offset - 1;
- start = cache->key.objectid;
+ cache = rb_entry(n, struct btrfs_block_group, cache_node);
+ end = cache->start + cache->length - 1;
+ start = cache->start;
if (bytenr < start) {
- if (!contains && (!ret || start < ret->key.objectid))
+ if (!contains && (!ret || start < ret->start))
ret = cache;
n = n->rb_left;
} else if (bytenr > start) {
@@ -221,8 +219,8 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search(
}
if (ret) {
btrfs_get_block_group(ret);
- if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
- info->first_logical_byte = ret->key.objectid;
+ if (bytenr == 0 && info->first_logical_byte > ret->start)
+ info->first_logical_byte = ret->start;
}
spin_unlock(&info->block_group_cache_lock);
@@ -232,7 +230,7 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search(
/*
* Return the block group that starts at or after bytenr
*/
-struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
+struct btrfs_block_group *btrfs_lookup_first_block_group(
struct btrfs_fs_info *info, u64 bytenr)
{
return block_group_cache_tree_search(info, bytenr, 0);
@@ -241,14 +239,14 @@ struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
/*
* Return the block group that contains the given bytenr
*/
-struct btrfs_block_group_cache *btrfs_lookup_block_group(
+struct btrfs_block_group *btrfs_lookup_block_group(
struct btrfs_fs_info *info, u64 bytenr)
{
return block_group_cache_tree_search(info, bytenr, 1);
}
-struct btrfs_block_group_cache *btrfs_next_block_group(
- struct btrfs_block_group_cache *cache)
+struct btrfs_block_group *btrfs_next_block_group(
+ struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct rb_node *node;
@@ -257,7 +255,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group(
/* If our block group was removed, we need a full search. */
if (RB_EMPTY_NODE(&cache->cache_node)) {
- const u64 next_bytenr = cache->key.objectid + cache->key.offset;
+ const u64 next_bytenr = cache->start + cache->length;
spin_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
@@ -266,8 +264,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group(
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
if (node) {
- cache = rb_entry(node, struct btrfs_block_group_cache,
- cache_node);
+ cache = rb_entry(node, struct btrfs_block_group, cache_node);
btrfs_get_block_group(cache);
} else
cache = NULL;
@@ -277,7 +274,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group(
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
bool ret = true;
bg = btrfs_lookup_block_group(fs_info, bytenr);
@@ -300,7 +297,7 @@ bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
bg = btrfs_lookup_block_group(fs_info, bytenr);
ASSERT(bg);
@@ -314,7 +311,7 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
btrfs_put_block_group(bg);
}
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
+void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
{
wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
}
@@ -322,7 +319,7 @@ void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start)
{
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
bg = btrfs_lookup_block_group(fs_info, start);
ASSERT(bg);
@@ -331,7 +328,7 @@ void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
btrfs_put_block_group(bg);
}
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
+void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
{
struct btrfs_space_info *space_info = bg->space_info;
@@ -357,7 +354,7 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
}
struct btrfs_caching_control *btrfs_get_caching_control(
- struct btrfs_block_group_cache *cache)
+ struct btrfs_block_group *cache)
{
struct btrfs_caching_control *ctl;
@@ -392,7 +389,7 @@ void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
* Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
* any of the information in this block group.
*/
-void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
+void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
u64 num_bytes)
{
struct btrfs_caching_control *caching_ctl;
@@ -401,13 +398,13 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache
if (!caching_ctl)
return;
- wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) ||
+ wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
(cache->free_space_ctl->free_space >= num_bytes));
btrfs_put_caching_control(caching_ctl);
}
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
+int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
{
struct btrfs_caching_control *caching_ctl;
int ret = 0;
@@ -416,7 +413,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
if (!caching_ctl)
return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
- wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache));
+ wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
if (cache->cached == BTRFS_CACHE_ERROR)
ret = -EIO;
btrfs_put_caching_control(caching_ctl);
@@ -424,11 +421,11 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
}
#ifdef CONFIG_BTRFS_DEBUG
-static void fragment_free_space(struct btrfs_block_group_cache *block_group)
+static void fragment_free_space(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- u64 start = block_group->key.objectid;
- u64 len = block_group->key.offset;
+ u64 start = block_group->start;
+ u64 len = block_group->length;
u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
fs_info->nodesize : fs_info->sectorsize;
u64 step = chunk << 1;
@@ -450,8 +447,7 @@ static void fragment_free_space(struct btrfs_block_group_cache *block_group)
* used yet since their free space will be released as soon as the transaction
* commits.
*/
-u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
- u64 start, u64 end)
+u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
{
struct btrfs_fs_info *info = block_group->fs_info;
u64 extent_start, extent_end, size, total_added = 0;
@@ -491,7 +487,7 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
- struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
+ struct btrfs_block_group *block_group = caching_ctl->block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_path *path;
@@ -507,7 +503,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
if (!path)
return -ENOMEM;
- last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+ last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
#ifdef CONFIG_BTRFS_DEBUG
/*
@@ -587,13 +583,12 @@ next:
goto next;
}
- if (key.objectid < block_group->key.objectid) {
+ if (key.objectid < block_group->start) {
path->slots[0]++;
continue;
}
- if (key.objectid >= block_group->key.objectid +
- block_group->key.offset)
+ if (key.objectid >= block_group->start + block_group->length)
break;
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
@@ -617,8 +612,7 @@ next:
ret = 0;
total_found += add_new_free_space(block_group, last,
- block_group->key.objectid +
- block_group->key.offset);
+ block_group->start + block_group->length);
caching_ctl->progress = (u64)-1;
out:
@@ -628,7 +622,7 @@ out:
static noinline void caching_thread(struct btrfs_work *work)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl;
int ret;
@@ -656,8 +650,7 @@ static noinline void caching_thread(struct btrfs_work *work)
spin_lock(&block_group->space_info->lock);
spin_lock(&block_group->lock);
- bytes_used = block_group->key.offset -
- btrfs_block_group_used(&block_group->item);
+ bytes_used = block_group->length - block_group->used;
block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock);
@@ -677,8 +670,7 @@ static noinline void caching_thread(struct btrfs_work *work)
btrfs_put_block_group(block_group);
}
-int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
- int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
{
DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -693,10 +685,9 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
mutex_init(&caching_ctl->mutex);
init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache;
- caching_ctl->progress = cache->key.objectid;
+ caching_ctl->progress = cache->start;
refcount_set(&caching_ctl->count, 1);
- btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
- caching_thread, NULL, NULL);
+ btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
spin_lock(&cache->lock);
/*
@@ -763,8 +754,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
- bytes_used = cache->key.offset -
- btrfs_block_group_used(&cache->item);
+ bytes_used = cache->length - cache->used;
cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
@@ -833,27 +823,36 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
*
* - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
* in the whole filesystem
+ *
+ * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
*/
static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
- if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ bool found_raid56 = false;
+ bool found_raid1c34 = false;
+
+ if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
+ (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
+ (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
struct list_head *head = &fs_info->space_info;
struct btrfs_space_info *sinfo;
list_for_each_entry_rcu(sinfo, head, list) {
- bool found = false;
-
down_read(&sinfo->groups_sem);
if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
- found = true;
+ found_raid56 = true;
if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
- found = true;
+ found_raid56 = true;
+ if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
+ found_raid1c34 = true;
+ if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
+ found_raid1c34 = true;
up_read(&sinfo->groups_sem);
-
- if (found)
- return;
}
- btrfs_clear_fs_incompat(fs_info, RAID56);
+ if (found_raid56)
+ btrfs_clear_fs_incompat(fs_info, RAID56);
+ if (found_raid1c34)
+ btrfs_clear_fs_incompat(fs_info, RAID1C34);
}
}
@@ -863,7 +862,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_free_cluster *cluster;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_key key;
@@ -886,10 +885,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* remove it.
*/
btrfs_free_excluded_extents(block_group);
- btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
- block_group->key.offset);
+ btrfs_free_ref_tree_range(fs_info, block_group->start,
+ block_group->length);
- memcpy(&key, &block_group->key, sizeof(key));
index = btrfs_bg_flags_to_raid_index(block_group->flags);
factor = btrfs_bg_type_to_factor(block_group->flags);
@@ -967,8 +965,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
}
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
key.type = 0;
+ key.offset = block_group->start;
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
if (ret < 0)
@@ -987,7 +985,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
&fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
- if (fs_info->first_logical_byte == block_group->key.objectid)
+ if (fs_info->first_logical_byte == block_group->start)
fs_info->first_logical_byte = (u64)-1;
spin_unlock(&fs_info->block_group_cache_lock);
@@ -1048,19 +1046,21 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
WARN_ON(block_group->space_info->total_bytes
- < block_group->key.offset);
+ < block_group->length);
WARN_ON(block_group->space_info->bytes_readonly
- < block_group->key.offset);
+ < block_group->length);
WARN_ON(block_group->space_info->disk_total
- < block_group->key.offset * factor);
+ < block_group->length * factor);
}
- block_group->space_info->total_bytes -= block_group->key.offset;
- block_group->space_info->bytes_readonly -= block_group->key.offset;
- block_group->space_info->disk_total -= block_group->key.offset * factor;
+ block_group->space_info->total_bytes -= block_group->length;
+ block_group->space_info->bytes_readonly -= block_group->length;
+ block_group->space_info->disk_total -= block_group->length * factor;
spin_unlock(&block_group->space_info->lock);
- memcpy(&key, &block_group->key, sizeof(key));
+ key.objectid = block_group->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = block_group->length;
mutex_lock(&fs_info->chunk_mutex);
spin_lock(&block_group->lock);
@@ -1180,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
* data in this block group. That check should be done by relocation routine,
* not this function.
*/
-static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
@@ -1209,8 +1209,8 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
goto out;
}
- num_bytes = cache->key.offset - cache->reserved - cache->pinned -
- cache->bytes_super - btrfs_block_group_used(&cache->item);
+ num_bytes = cache->length - cache->reserved - cache->pinned -
+ cache->bytes_super - cache->used;
sinfo_used = btrfs_space_info_used(sinfo, true);
/*
@@ -1231,8 +1231,7 @@ out:
spin_unlock(&sinfo->lock);
if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
btrfs_info(cache->fs_info,
- "unable to make block group %llu ro",
- cache->key.objectid);
+ "unable to make block group %llu ro", cache->start);
btrfs_info(cache->fs_info,
"sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
sinfo_used, num_bytes, min_allocable_bytes);
@@ -1247,7 +1246,7 @@ out:
*/
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
int ret = 0;
@@ -1261,7 +1260,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
int trimming;
block_group = list_first_entry(&fs_info->unused_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
bg_list);
list_del_init(&block_group->bg_list);
@@ -1279,8 +1278,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
if (block_group->reserved || block_group->pinned ||
- btrfs_block_group_used(&block_group->item) ||
- block_group->ro ||
+ block_group->used || block_group->ro ||
list_is_singular(&block_group->list)) {
/*
* We want to bail if we made new allocations or have
@@ -1308,7 +1306,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* properly if we fail to join the transaction.
*/
trans = btrfs_start_trans_remove_block_group(fs_info,
- block_group->key.objectid);
+ block_group->start);
if (IS_ERR(trans)) {
btrfs_dec_block_group_ro(block_group);
ret = PTR_ERR(trans);
@@ -1319,8 +1317,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* We could have pending pinned extents for this block group,
* just delete them, we don't care about them anymore.
*/
- start = block_group->key.objectid;
- end = start + block_group->key.offset - 1;
+ start = block_group->start;
+ end = start + block_group->length - 1;
/*
* Hold the unused_bg_unpin_mutex lock to avoid racing with
* btrfs_finish_extent_commit(). If we are at transaction N,
@@ -1375,7 +1373,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* Btrfs_remove_chunk will abort the transaction if things go
* horribly wrong.
*/
- ret = btrfs_remove_chunk(trans, block_group->key.objectid);
+ ret = btrfs_remove_chunk(trans, block_group->start);
if (ret) {
if (trimming)
@@ -1410,7 +1408,7 @@ next:
spin_unlock(&fs_info->unused_bgs_lock);
}
-void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
+void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
@@ -1478,7 +1476,7 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info,
read_extent_buffer(leaf, &bg,
btrfs_item_ptr_offset(leaf, slot),
sizeof(bg));
- flags = btrfs_block_group_flags(&bg) &
+ flags = btrfs_stack_block_group_flags(&bg) &
BTRFS_BLOCK_GROUP_TYPE_MASK;
if (flags != (em->map_lookup->type &
@@ -1518,7 +1516,7 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
write_sequnlock(&fs_info->profiles_lock);
}
-static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
+static int exclude_super_stripes(struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 bytenr;
@@ -1526,10 +1524,10 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
int stripe_len;
int i, nr, ret;
- if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
- stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
+ if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
+ stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
cache->bytes_super += stripe_len;
- ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid,
+ ret = btrfs_add_excluded_extent(fs_info, cache->start,
stripe_len);
if (ret)
return ret;
@@ -1537,7 +1535,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(fs_info, cache->key.objectid,
+ ret = btrfs_rmap_block(fs_info, cache->start,
bytenr, &logical, &nr, &stripe_len);
if (ret)
return ret;
@@ -1545,21 +1543,19 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
while (nr--) {
u64 start, len;
- if (logical[nr] > cache->key.objectid +
- cache->key.offset)
+ if (logical[nr] > cache->start + cache->length)
continue;
- if (logical[nr] + stripe_len <= cache->key.objectid)
+ if (logical[nr] + stripe_len <= cache->start)
continue;
start = logical[nr];
- if (start < cache->key.objectid) {
- start = cache->key.objectid;
+ if (start < cache->start) {
+ start = cache->start;
len = (logical[nr] + stripe_len) - start;
} else {
len = min_t(u64, stripe_len,
- cache->key.objectid +
- cache->key.offset - start);
+ cache->start + cache->length - start);
}
cache->bytes_super += len;
@@ -1575,7 +1571,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
return 0;
}
-static void link_block_group(struct btrfs_block_group_cache *cache)
+static void link_block_group(struct btrfs_block_group *cache)
{
struct btrfs_space_info *space_info = cache->space_info;
int index = btrfs_bg_flags_to_raid_index(cache->flags);
@@ -1591,10 +1587,10 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
btrfs_sysfs_add_block_group_type(cache);
}
-static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
+static struct btrfs_block_group *btrfs_create_block_group_cache(
struct btrfs_fs_info *fs_info, u64 start, u64 size)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache)
@@ -1607,9 +1603,8 @@ static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
return NULL;
}
- cache->key.objectid = start;
- cache->key.offset = size;
- cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ cache->start = start;
+ cache->length = size;
cache->fs_info = fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
@@ -1640,7 +1635,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
{
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
u64 start = 0;
int ret = 0;
@@ -1665,15 +1660,14 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
free_extent_map(em);
break;
}
- if (bg->key.objectid != em->start ||
- bg->key.offset != em->len ||
+ if (bg->start != em->start || bg->length != em->len ||
(bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
(em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
btrfs_err(fs_info,
"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
em->start, em->len,
em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
- bg->key.objectid, bg->key.offset,
+ bg->start, bg->length,
bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
ret = -EUCLEAN;
free_extent_map(em);
@@ -1687,22 +1681,117 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
return ret;
}
+static int read_one_block_group(struct btrfs_fs_info *info,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ int need_clear)
+{
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_block_group *cache;
+ struct btrfs_space_info *space_info;
+ struct btrfs_block_group_item bgi;
+ const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
+ int slot = path->slots[0];
+ int ret;
+
+ ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
+
+ cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
+ if (!cache)
+ return -ENOMEM;
+
+ if (need_clear) {
+ /*
+ * When we mount with old space cache, we need to
+ * set BTRFS_DC_CLEAR and set dirty flag.
+ *
+ * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
+ * truncate the old free space cache inode and
+ * setup a new one.
+ * b) Setting 'dirty flag' makes sure that we flush
+ * the new space cache info onto disk.
+ */
+ if (btrfs_test_opt(info, SPACE_CACHE))
+ cache->disk_cache_state = BTRFS_DC_CLEAR;
+ }
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ cache->used = btrfs_stack_block_group_used(&bgi);
+ cache->flags = btrfs_stack_block_group_flags(&bgi);
+ if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
+ (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(info,
+"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+ cache->start);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * We need to exclude the super stripes now so that the space info has
+ * super bytes accounted for, otherwise we'll think we have more space
+ * than we actually do.
+ */
+ ret = exclude_super_stripes(cache);
+ if (ret) {
+ /* We may have excluded something, so call this just in case. */
+ btrfs_free_excluded_extents(cache);
+ goto error;
+ }
+
+ /*
+ * Check for two cases, either we are full, and therefore don't need
+ * to bother with the caching work since we won't find any space, or we
+ * are empty, and we can just add all the space in and be done with it.
+ * This saves us _a_lot_ of time, particularly in the full case.
+ */
+ if (key->offset == cache->used) {
+ cache->last_byte_to_unpin = (u64)-1;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ btrfs_free_excluded_extents(cache);
+ } else if (cache->used == 0) {
+ cache->last_byte_to_unpin = (u64)-1;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ add_new_free_space(cache, key->objectid,
+ key->objectid + key->offset);
+ btrfs_free_excluded_extents(cache);
+ }
+
+ ret = btrfs_add_block_group_cache(info, cache);
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ goto error;
+ }
+ trace_btrfs_add_block_group(info, cache, 0);
+ btrfs_update_space_info(info, cache->flags, key->offset,
+ cache->used, cache->bytes_super, &space_info);
+
+ cache->space_info = space_info;
+
+ link_block_group(cache);
+
+ set_avail_alloc_bits(info, cache->flags);
+ if (btrfs_chunk_readonly(info, cache->start)) {
+ inc_block_group_ro(cache, 1);
+ } else if (cache->used == 0) {
+ ASSERT(list_empty(&cache->bg_list));
+ btrfs_mark_bg_unused(cache);
+ }
+ return 0;
+error:
+ btrfs_put_block_group(cache);
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_path *path;
int ret;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_space_info *space_info;
struct btrfs_key key;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
int need_clear = 0;
u64 cache_gen;
- u64 feature;
- int mixed;
-
- feature = btrfs_super_incompat_flags(info->super_copy);
- mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
key.objectid = 0;
key.offset = 0;
@@ -1726,108 +1815,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
if (ret != 0)
goto error;
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- cache = btrfs_create_block_group_cache(info, found_key.objectid,
- found_key.offset);
- if (!cache) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (need_clear) {
- /*
- * When we mount with old space cache, we need to
- * set BTRFS_DC_CLEAR and set dirty flag.
- *
- * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
- * truncate the old free space cache inode and
- * setup a new one.
- * b) Setting 'dirty flag' makes sure that we flush
- * the new space cache info onto disk.
- */
- if (btrfs_test_opt(info, SPACE_CACHE))
- cache->disk_cache_state = BTRFS_DC_CLEAR;
- }
-
- read_extent_buffer(leaf, &cache->item,
- btrfs_item_ptr_offset(leaf, path->slots[0]),
- sizeof(cache->item));
- cache->flags = btrfs_block_group_flags(&cache->item);
- if (!mixed &&
- ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
- (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
- btrfs_err(info,
-"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
- cache->key.objectid);
- btrfs_put_block_group(cache);
- ret = -EINVAL;
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ ret = read_one_block_group(info, path, &key, need_clear);
+ if (ret < 0)
goto error;
- }
-
- key.objectid = found_key.objectid + found_key.offset;
+ key.objectid += key.offset;
+ key.offset = 0;
btrfs_release_path(path);
-
- /*
- * We need to exclude the super stripes now so that the space
- * info has super bytes accounted for, otherwise we'll think
- * we have more space than we actually do.
- */
- ret = exclude_super_stripes(cache);
- if (ret) {
- /*
- * We may have excluded something, so call this just in
- * case.
- */
- btrfs_free_excluded_extents(cache);
- btrfs_put_block_group(cache);
- goto error;
- }
-
- /*
- * Check for two cases, either we are full, and therefore
- * don't need to bother with the caching work since we won't
- * find any space, or we are empty, and we can just add all
- * the space in and be done with it. This saves us _a_lot_ of
- * time, particularly in the full case.
- */
- if (found_key.offset == btrfs_block_group_used(&cache->item)) {
- cache->last_byte_to_unpin = (u64)-1;
- cache->cached = BTRFS_CACHE_FINISHED;
- btrfs_free_excluded_extents(cache);
- } else if (btrfs_block_group_used(&cache->item) == 0) {
- cache->last_byte_to_unpin = (u64)-1;
- cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, found_key.objectid,
- found_key.objectid +
- found_key.offset);
- btrfs_free_excluded_extents(cache);
- }
-
- ret = btrfs_add_block_group_cache(info, cache);
- if (ret) {
- btrfs_remove_free_space_cache(cache);
- btrfs_put_block_group(cache);
- goto error;
- }
-
- trace_btrfs_add_block_group(info, cache, 0);
- btrfs_update_space_info(info, cache->flags, found_key.offset,
- btrfs_block_group_used(&cache->item),
- cache->bytes_super, &space_info);
-
- cache->space_info = space_info;
-
- link_block_group(cache);
-
- set_avail_alloc_bits(info, cache->flags);
- if (btrfs_chunk_readonly(info, cache->key.objectid)) {
- inc_block_group_ro(cache, 1);
- } else if (btrfs_block_group_used(&cache->item) == 0) {
- ASSERT(list_empty(&cache->bg_list));
- btrfs_mark_bg_unused(cache);
- }
}
list_for_each_entry_rcu(space_info, &info->space_info, list) {
@@ -1861,7 +1855,7 @@ error:
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_block_group_item item;
struct btrfs_key key;
@@ -1872,14 +1866,19 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
while (!list_empty(&trans->new_bgs)) {
block_group = list_first_entry(&trans->new_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
bg_list);
if (ret)
goto next;
spin_lock(&block_group->lock);
- memcpy(&item, &block_group->item, sizeof(item));
- memcpy(&key, &block_group->key, sizeof(key));
+ btrfs_set_stack_block_group_used(&item, block_group->used);
+ btrfs_set_stack_block_group_chunk_objectid(&item,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ btrfs_set_stack_block_group_flags(&item, block_group->flags);
+ key.objectid = block_group->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = block_group->length;
spin_unlock(&block_group->lock);
ret = btrfs_insert_item(trans, extent_root, &key, &item,
@@ -1902,7 +1901,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
u64 type, u64 chunk_offset, u64 size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
int ret;
btrfs_set_log_full_commit(trans);
@@ -1911,11 +1910,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
if (!cache)
return -ENOMEM;
- btrfs_set_block_group_used(&cache->item, bytes_used);
- btrfs_set_block_group_chunk_objectid(&cache->item,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID);
- btrfs_set_block_group_flags(&cache->item, type);
-
+ cache->used = bytes_used;
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
@@ -2022,8 +2017,17 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
return flags;
}
-int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
-
+/*
+ * Mark one block group RO, can be called several times for the same block
+ * group.
+ *
+ * @cache: the destination block group
+ * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
+ * ensure we still have some free space after marking this
+ * block group RO.
+ */
+int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
+ bool do_chunk_alloc)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_trans_handle *trans;
@@ -2053,25 +2057,29 @@ again:
goto again;
}
- /*
- * if we are changing raid levels, try to allocate a corresponding
- * block group with the new raid level.
- */
- alloc_flags = update_block_group_flags(fs_info, cache->flags);
- if (alloc_flags != cache->flags) {
- ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ if (do_chunk_alloc) {
/*
- * ENOSPC is allowed here, we may have enough space
- * already allocated at the new raid level to
- * carry on
+ * If we are changing raid levels, try to allocate a
+ * corresponding block group with the new raid level.
*/
- if (ret == -ENOSPC)
- ret = 0;
- if (ret < 0)
- goto out;
+ alloc_flags = update_block_group_flags(fs_info, cache->flags);
+ if (alloc_flags != cache->flags) {
+ ret = btrfs_chunk_alloc(trans, alloc_flags,
+ CHUNK_ALLOC_FORCE);
+ /*
+ * ENOSPC is allowed here, we may have enough space
+ * already allocated at the new raid level to carry on
+ */
+ if (ret == -ENOSPC)
+ ret = 0;
+ if (ret < 0)
+ goto out;
+ }
}
- ret = inc_block_group_ro(cache, 0);
+ ret = inc_block_group_ro(cache, !do_chunk_alloc);
+ if (!do_chunk_alloc)
+ goto unlock_out;
if (!ret)
goto out;
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
@@ -2086,13 +2094,14 @@ out:
check_system_chunk(trans, alloc_flags);
mutex_unlock(&fs_info->chunk_mutex);
}
+unlock_out:
mutex_unlock(&fs_info->ro_block_group_mutex);
btrfs_end_transaction(trans);
return ret;
}
-void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
+void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
@@ -2102,9 +2111,8 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (!--cache->ro) {
- num_bytes = cache->key.offset - cache->reserved -
- cache->pinned - cache->bytes_super -
- btrfs_block_group_used(&cache->item);
+ num_bytes = cache->length - cache->reserved -
+ cache->pinned - cache->bytes_super - cache->used;
sinfo->bytes_readonly -= num_bytes;
list_del_init(&cache->ro_list);
}
@@ -2114,15 +2122,21 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- struct btrfs_block_group_cache *cache)
+ struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_root *extent_root = fs_info->extent_root;
unsigned long bi;
struct extent_buffer *leaf;
+ struct btrfs_block_group_item bgi;
+ struct btrfs_key key;
+
+ key.objectid = cache->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = cache->length;
- ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
+ ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -2131,7 +2145,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
- write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
+ btrfs_set_stack_block_group_used(&bgi, cache->used);
+ btrfs_set_stack_block_group_chunk_objectid(&bgi,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ btrfs_set_stack_block_group_flags(&bgi, cache->flags);
+ write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
btrfs_mark_buffer_dirty(leaf);
fail:
btrfs_release_path(path);
@@ -2139,7 +2157,7 @@ fail:
}
-static int cache_save_setup(struct btrfs_block_group_cache *block_group,
+static int cache_save_setup(struct btrfs_block_group *block_group,
struct btrfs_trans_handle *trans,
struct btrfs_path *path)
{
@@ -2157,7 +2175,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
* If this block group is smaller than 100 megs don't bother caching the
* block group.
*/
- if (block_group->key.offset < (100 * SZ_1M)) {
+ if (block_group->length < (100 * SZ_1M)) {
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
@@ -2258,7 +2276,7 @@ again:
* taking up quite a bit since it's not folded into the other space
* cache.
*/
- num_pages = div_u64(block_group->key.offset, SZ_256M);
+ num_pages = div_u64(block_group->length, SZ_256M);
if (!num_pages)
num_pages = 1;
@@ -2303,7 +2321,7 @@ out:
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache, *tmp;
+ struct btrfs_block_group *cache, *tmp;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_path *path;
@@ -2341,7 +2359,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
@@ -2378,8 +2396,7 @@ again:
while (!list_empty(&dirty)) {
bool drop_reserve = true;
- cache = list_first_entry(&dirty,
- struct btrfs_block_group_cache,
+ cache = list_first_entry(&dirty, struct btrfs_block_group,
dirty_list);
/*
* This can happen if something re-dirties a block group that
@@ -2504,7 +2521,7 @@ again:
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_transaction *cur_trans = trans->transaction;
int ret = 0;
int should_put;
@@ -2534,7 +2551,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
spin_lock(&cur_trans->dirty_bgs_lock);
while (!list_empty(&cur_trans->dirty_bgs)) {
cache = list_first_entry(&cur_trans->dirty_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
dirty_list);
/*
@@ -2616,7 +2633,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
* to use it without any locking
*/
while (!list_empty(io)) {
- cache = list_first_entry(io, struct btrfs_block_group_cache,
+ cache = list_first_entry(io, struct btrfs_block_group,
io_list);
list_del_init(&cache->io_list);
btrfs_wait_cache_io(trans, cache, path);
@@ -2631,7 +2648,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, int alloc)
{
struct btrfs_fs_info *info = trans->fs_info;
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
u64 total = num_bytes;
u64 old_val;
u64 byte_in_group;
@@ -2662,11 +2679,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
* is because we need the unpinning stage to actually add the
* space back to the block group, otherwise we will leak space.
*/
- if (!alloc && cache->cached == BTRFS_CACHE_NO)
+ if (!alloc && !btrfs_block_group_done(cache))
btrfs_cache_block_group(cache, 1);
- byte_in_group = bytenr - cache->key.objectid;
- WARN_ON(byte_in_group > cache->key.offset);
+ byte_in_group = bytenr - cache->start;
+ WARN_ON(byte_in_group > cache->length);
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
@@ -2675,11 +2692,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
- old_val = btrfs_block_group_used(&cache->item);
- num_bytes = min(total, cache->key.offset - byte_in_group);
+ old_val = cache->used;
+ num_bytes = min(total, cache->length - byte_in_group);
if (alloc) {
old_val += num_bytes;
- btrfs_set_block_group_used(&cache->item, old_val);
+ cache->used = old_val;
cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes;
cache->space_info->bytes_used += num_bytes;
@@ -2688,7 +2705,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->space_info->lock);
} else {
old_val -= num_bytes;
- btrfs_set_block_group_used(&cache->item, old_val);
+ cache->used = old_val;
cache->pinned += num_bytes;
btrfs_space_info_update_bytes_pinned(info,
cache->space_info, num_bytes);
@@ -2746,7 +2763,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
* reservation and the block group has become read only we cannot make the
* reservation and return -EAGAIN, otherwise this function always succeeds.
*/
-int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
u64 ram_bytes, u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
@@ -2782,7 +2799,7 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
* A and before transaction A commits you free that leaf, you call this with
* reserve set to 0 in order to clear the reservation.
*/
-void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
@@ -2988,9 +3005,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
}
/*
- * If @is_allocation is true, reserve space in the system space info necessary
- * for allocating a chunk, otherwise if it's false, reserve space necessary for
- * removing a chunk.
+ * Reserve space in the system space for allocating or removing a chunk
*/
void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
{
@@ -3047,7 +3062,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
u64 last = 0;
while (1) {
@@ -3075,7 +3090,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
spin_unlock(&block_group->lock);
ASSERT(block_group->io_ctl.inode == NULL);
iput(inode);
- last = block_group->key.objectid + block_group->key.offset;
+ last = block_group->start + block_group->length;
btrfs_put_block_group(block_group);
}
}
@@ -3087,7 +3102,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
*/
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
@@ -3104,7 +3119,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
spin_lock(&info->unused_bgs_lock);
while (!list_empty(&info->unused_bgs)) {
block_group = list_first_entry(&info->unused_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
bg_list);
list_del_init(&block_group->bg_list);
btrfs_put_block_group(block_group);
@@ -3113,7 +3128,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
spin_lock(&info->block_group_cache_lock);
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
- block_group = rb_entry(n, struct btrfs_block_group_cache,
+ block_group = rb_entry(n, struct btrfs_block_group,
cache_node);
rb_erase(&block_group->cache_node,
&info->block_group_cache_tree);
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index c391800388dd..9b409676c4b2 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -34,7 +34,7 @@ struct btrfs_caching_control {
struct mutex mutex;
wait_queue_head_t wait;
struct btrfs_work work;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
u64 progress;
refcount_t count;
};
@@ -42,14 +42,15 @@ struct btrfs_caching_control {
/* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP SZ_2M
-struct btrfs_block_group_cache {
- struct btrfs_key key;
- struct btrfs_block_group_item item;
+struct btrfs_block_group {
struct btrfs_fs_info *fs_info;
struct inode *inode;
spinlock_t lock;
+ u64 start;
+ u64 length;
u64 pinned;
u64 reserved;
+ u64 used;
u64 delalloc_bytes;
u64 bytes_super;
u64 flags;
@@ -159,7 +160,7 @@ struct btrfs_block_group_cache {
#ifdef CONFIG_BTRFS_DEBUG
static inline int btrfs_should_fragment_free_space(
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -170,29 +171,29 @@ static inline int btrfs_should_fragment_free_space(
}
#endif
-struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
+struct btrfs_block_group *btrfs_lookup_first_block_group(
struct btrfs_fs_info *info, u64 bytenr);
-struct btrfs_block_group_cache *btrfs_lookup_block_group(
+struct btrfs_block_group *btrfs_lookup_block_group(
struct btrfs_fs_info *info, u64 bytenr);
-struct btrfs_block_group_cache *btrfs_next_block_group(
- struct btrfs_block_group_cache *cache);
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
+struct btrfs_block_group *btrfs_next_block_group(
+ struct btrfs_block_group *cache);
+void btrfs_get_block_group(struct btrfs_block_group *cache);
+void btrfs_put_block_group(struct btrfs_block_group *cache);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
+void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
-void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
+void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
+void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
u64 num_bytes);
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache);
-int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
+int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
+int btrfs_cache_block_group(struct btrfs_block_group *cache,
int load_cache_only);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
- struct btrfs_block_group_cache *cache);
-u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *cache);
+u64 add_new_free_space(struct btrfs_block_group *block_group,
u64 start, u64 end);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
@@ -200,21 +201,22 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
u64 group_start, struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
-void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
+void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
u64 type, u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
-int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
-void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
+int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
+ bool do_chunk_alloc);
+void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, int alloc);
-int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
u64 ram_bytes, u64 num_bytes, int delalloc);
-void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
u64 num_bytes, int delalloc);
int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
enum btrfs_chunk_alloc_enum force);
@@ -239,8 +241,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
}
-static inline int btrfs_block_group_cache_done(
- struct btrfs_block_group_cache *cache)
+static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
{
smp_mb();
return cache->cached == BTRFS_CACHE_FINISHED ||
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index f853835c409c..4e12a477d32e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -63,9 +63,6 @@ struct btrfs_inode {
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
- /* held while doing delalloc reservations */
- struct mutex delalloc_mutex;
-
/* used to order data wrt metadata */
struct btrfs_ordered_inode_tree ordered_tree;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b05b361e2062..ee834ef7beb4 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -29,6 +29,41 @@
#include "extent_io.h"
#include "extent_map.h"
+int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out);
+int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+int zlib_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen);
+struct list_head *zlib_alloc_workspace(unsigned int level);
+void zlib_free_workspace(struct list_head *ws);
+struct list_head *zlib_get_workspace(unsigned int level);
+
+int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out);
+int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+int lzo_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen);
+struct list_head *lzo_alloc_workspace(unsigned int level);
+void lzo_free_workspace(struct list_head *ws);
+
+int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out);
+int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+int zstd_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen);
+void zstd_init_workspace_manager(void);
+void zstd_cleanup_workspace_manager(void);
+struct list_head *zstd_alloc_workspace(unsigned int level);
+void zstd_free_workspace(struct list_head *ws);
+struct list_head *zstd_get_workspace(unsigned int level);
+void zstd_put_workspace(struct list_head *ws);
+
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
const char* btrfs_compress_type2str(enum btrfs_compression_type type)
@@ -39,6 +74,8 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
case BTRFS_COMPRESS_ZSTD:
case BTRFS_COMPRESS_NONE:
return btrfs_compress_types[type];
+ default:
+ break;
}
return NULL;
@@ -60,6 +97,70 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
return false;
}
+static int compression_compress_pages(int type, struct list_head *ws,
+ struct address_space *mapping, u64 start, struct page **pages,
+ unsigned long *out_pages, unsigned long *total_in,
+ unsigned long *total_out)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB:
+ return zlib_compress_pages(ws, mapping, start, pages,
+ out_pages, total_in, total_out);
+ case BTRFS_COMPRESS_LZO:
+ return lzo_compress_pages(ws, mapping, start, pages,
+ out_pages, total_in, total_out);
+ case BTRFS_COMPRESS_ZSTD:
+ return zstd_compress_pages(ws, mapping, start, pages,
+ out_pages, total_in, total_out);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here. As a sane fallback, return what the
+ * callers will understand as 'no compression happened'.
+ */
+ return -E2BIG;
+ }
+}
+
+static int compression_decompress_bio(int type, struct list_head *ws,
+ struct compressed_bio *cb)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
+ case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+
+static int compression_decompress(int type, struct list_head *ws,
+ unsigned char *data_in, struct page *dest_page,
+ unsigned long start_byte, size_t srclen, size_t destlen)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
+ start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
+ start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
+ start_byte, srclen, destlen);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+
static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
@@ -311,7 +412,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long compressed_len,
struct page **compressed_pages,
unsigned long nr_pages,
- unsigned int write_flags)
+ unsigned int write_flags,
+ struct cgroup_subsys_state *blkcg_css)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
@@ -320,7 +422,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
- struct block_device *bdev;
blk_status_t ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -339,13 +440,15 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
- bdev = fs_info->fs_devices->latest_bdev;
-
bio = btrfs_bio_alloc(first_byte);
- bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
+
+ if (blkcg_css) {
+ bio->bi_opf |= REQ_CGROUP_PUNT;
+ bio_associate_blkg_from_css(bio, blkcg_css);
+ }
refcount_set(&cb->pending_bios, 1);
/* create and submit bios for the compressed pages */
@@ -378,14 +481,13 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(fs_info, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0);
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
}
bio = btrfs_bio_alloc(first_byte);
- bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
@@ -409,7 +511,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(fs_info, bio, 0, 1);
+ ret = btrfs_map_bio(fs_info, bio, 0);
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
@@ -553,7 +655,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
unsigned long nr_pages;
unsigned long pg_index;
struct page *page;
- struct block_device *bdev;
struct bio *comp_bio;
u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
u64 em_len;
@@ -604,8 +705,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb->compressed_pages)
goto fail1;
- bdev = fs_info->fs_devices->latest_bdev;
-
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
@@ -624,7 +723,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->len = bio->bi_iter.bi_size;
comp_bio = btrfs_bio_alloc(cur_disk_byte);
- bio_set_dev(comp_bio, bdev);
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -668,14 +766,13 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
fs_info->sectorsize);
sums += csum_size * nr_sectors;
- ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret) {
comp_bio->bi_status = ret;
bio_endio(comp_bio);
}
comp_bio = btrfs_bio_alloc(cur_disk_byte);
- bio_set_dev(comp_bio, bdev);
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -693,7 +790,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret) {
comp_bio->bi_status = ret;
bio_endio(comp_bio);
@@ -764,26 +861,6 @@ struct heuristic_ws {
static struct workspace_manager heuristic_wsm;
-static void heuristic_init_workspace_manager(void)
-{
- btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
-}
-
-static void heuristic_cleanup_workspace_manager(void)
-{
- btrfs_cleanup_workspace_manager(&heuristic_wsm);
-}
-
-static struct list_head *heuristic_get_workspace(unsigned int level)
-{
- return btrfs_get_workspace(&heuristic_wsm, level);
-}
-
-static void heuristic_put_workspace(struct list_head *ws)
-{
- btrfs_put_workspace(&heuristic_wsm, ws);
-}
-
static void free_heuristic_ws(struct list_head *ws)
{
struct heuristic_ws *workspace;
@@ -824,12 +901,7 @@ fail:
}
const struct btrfs_compress_op btrfs_heuristic_compress = {
- .init_workspace_manager = heuristic_init_workspace_manager,
- .cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
- .get_workspace = heuristic_get_workspace,
- .put_workspace = heuristic_put_workspace,
- .alloc_workspace = alloc_heuristic_ws,
- .free_workspace = free_heuristic_ws,
+ .workspace_manager = &heuristic_wsm,
};
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
@@ -840,13 +912,44 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};
-void btrfs_init_workspace_manager(struct workspace_manager *wsm,
- const struct btrfs_compress_op *ops)
+static struct list_head *alloc_workspace(int type, unsigned int level)
{
- struct list_head *workspace;
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
+ case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
- wsm->ops = ops;
+static void free_workspace(int type, struct list_head *ws)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
+ case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
+ case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
+ case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+
+static void btrfs_init_workspace_manager(int type)
+{
+ struct workspace_manager *wsm;
+ struct list_head *workspace;
+ wsm = btrfs_compress_op[type]->workspace_manager;
INIT_LIST_HEAD(&wsm->idle_ws);
spin_lock_init(&wsm->ws_lock);
atomic_set(&wsm->total_ws, 0);
@@ -856,7 +959,7 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm,
* Preallocate one workspace for each compression type so we can
* guarantee forward progress in the worst case
*/
- workspace = wsm->ops->alloc_workspace(0);
+ workspace = alloc_workspace(type, 0);
if (IS_ERR(workspace)) {
pr_warn(
"BTRFS: cannot preallocate compression workspace, will try later\n");
@@ -867,14 +970,16 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm,
}
}
-void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
+static void btrfs_cleanup_workspace_manager(int type)
{
+ struct workspace_manager *wsman;
struct list_head *ws;
+ wsman = btrfs_compress_op[type]->workspace_manager;
while (!list_empty(&wsman->idle_ws)) {
ws = wsman->idle_ws.next;
list_del(ws);
- wsman->ops->free_workspace(ws);
+ free_workspace(type, ws);
atomic_dec(&wsman->total_ws);
}
}
@@ -885,9 +990,9 @@ void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
- unsigned int level)
+struct list_head *btrfs_get_workspace(int type, unsigned int level)
{
+ struct workspace_manager *wsm;
struct list_head *workspace;
int cpus = num_online_cpus();
unsigned nofs_flag;
@@ -897,6 +1002,7 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
wait_queue_head_t *ws_wait;
int *free_ws;
+ wsm = btrfs_compress_op[type]->workspace_manager;
idle_ws = &wsm->idle_ws;
ws_lock = &wsm->ws_lock;
total_ws = &wsm->total_ws;
@@ -932,7 +1038,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = wsm->ops->alloc_workspace(level);
+ workspace = alloc_workspace(type, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -965,21 +1071,34 @@ again:
static struct list_head *get_workspace(int type, int level)
{
- return btrfs_compress_op[type]->get_workspace(level);
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
+ case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
}
/*
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
+void btrfs_put_workspace(int type, struct list_head *ws)
{
+ struct workspace_manager *wsm;
struct list_head *idle_ws;
spinlock_t *ws_lock;
atomic_t *total_ws;
wait_queue_head_t *ws_wait;
int *free_ws;
+ wsm = btrfs_compress_op[type]->workspace_manager;
idle_ws = &wsm->idle_ws;
ws_lock = &wsm->ws_lock;
total_ws = &wsm->total_ws;
@@ -995,7 +1114,7 @@ void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
}
spin_unlock(ws_lock);
- wsm->ops->free_workspace(ws);
+ free_workspace(type, ws);
atomic_dec(total_ws);
wake:
cond_wake_up(ws_wait);
@@ -1003,7 +1122,18 @@ wake:
static void put_workspace(int type, struct list_head *ws)
{
- return btrfs_compress_op[type]->put_workspace(ws);
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
+ case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
+ case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
+ case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
}
/*
@@ -1042,10 +1172,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
level = btrfs_compress_set_level(type, level);
workspace = get_workspace(type, level);
- ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
- start, pages,
- out_pages,
- total_in, total_out);
+ ret = compression_compress_pages(type, workspace, mapping, start, pages,
+ out_pages, total_in, total_out);
put_workspace(type, workspace);
return ret;
}
@@ -1071,7 +1199,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
int type = cb->compress_type;
workspace = get_workspace(type, 0);
- ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
+ ret = compression_decompress_bio(type, workspace, cb);
put_workspace(type, workspace);
return ret;
@@ -1089,9 +1217,8 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
int ret;
workspace = get_workspace(type, 0);
- ret = btrfs_compress_op[type]->decompress(workspace, data_in,
- dest_page, start_byte,
- srclen, destlen);
+ ret = compression_decompress(type, workspace, data_in, dest_page,
+ start_byte, srclen, destlen);
put_workspace(type, workspace);
return ret;
@@ -1099,18 +1226,18 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
void __init btrfs_init_compress(void)
{
- int i;
-
- for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
- btrfs_compress_op[i]->init_workspace_manager();
+ btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
+ btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
+ btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
+ zstd_init_workspace_manager();
}
void __cold btrfs_exit_compress(void)
{
- int i;
-
- for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
- btrfs_compress_op[i]->cleanup_workspace_manager();
+ btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
+ btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
+ btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
+ zstd_cleanup_workspace_manager();
}
/*
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 4cb8be9ff88b..d253f7aa8ed5 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long compressed_len,
struct page **compressed_pages,
unsigned long nr_pages,
- unsigned int write_flags);
+ unsigned int write_flags,
+ struct cgroup_subsys_state *blkcg_css);
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
@@ -104,11 +105,10 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2,
BTRFS_COMPRESS_ZSTD = 3,
- BTRFS_COMPRESS_TYPES = 3,
+ BTRFS_NR_COMPRESS_TYPES = 4,
};
struct workspace_manager {
- const struct btrfs_compress_op *ops;
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
@@ -119,50 +119,18 @@ struct workspace_manager {
wait_queue_head_t ws_wait;
};
-void btrfs_init_workspace_manager(struct workspace_manager *wsm,
- const struct btrfs_compress_op *ops);
-struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
- unsigned int level);
-void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
-void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm);
+struct list_head *btrfs_get_workspace(int type, unsigned int level);
+void btrfs_put_workspace(int type, struct list_head *ws);
struct btrfs_compress_op {
- void (*init_workspace_manager)(void);
-
- void (*cleanup_workspace_manager)(void);
-
- struct list_head *(*get_workspace)(unsigned int level);
-
- void (*put_workspace)(struct list_head *ws);
-
- struct list_head *(*alloc_workspace)(unsigned int level);
-
- void (*free_workspace)(struct list_head *workspace);
-
- int (*compress_pages)(struct list_head *workspace,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out);
-
- int (*decompress_bio)(struct list_head *workspace,
- struct compressed_bio *cb);
-
- int (*decompress)(struct list_head *workspace,
- unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen);
-
+ struct workspace_manager *workspace_manager;
/* Maximum level supported by the compression algorithm */
unsigned int max_level;
unsigned int default_level;
};
/* The heuristic workspaces are managed via the 0th workspace manager */
-#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1)
+#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
extern const struct btrfs_compress_op btrfs_heuristic_compress;
extern const struct btrfs_compress_op btrfs_zlib_compress;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index e59cde204b2f..5b6e86aaf2e1 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -32,8 +32,13 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
static const struct btrfs_csums {
u16 size;
const char *name;
+ const char *driver;
} btrfs_csums[] = {
[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
+ [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
+ [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
+ [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
+ .driver = "blake2b-256" },
};
int btrfs_super_csum_size(const struct btrfs_super_block *s)
@@ -51,34 +56,25 @@ const char *btrfs_super_csum_name(u16 csum_type)
return btrfs_csums[csum_type].name;
}
-struct btrfs_path *btrfs_alloc_path(void)
+/*
+ * Return driver name if defined, otherwise the name that's also a valid driver
+ * name
+ */
+const char *btrfs_super_csum_driver(u16 csum_type)
{
- return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
+ /* csum type is validated at mount time */
+ return btrfs_csums[csum_type].driver ?:
+ btrfs_csums[csum_type].name;
}
-/*
- * set all locked nodes in the path to blocking locks. This should
- * be done before scheduling
- */
-noinline void btrfs_set_path_blocking(struct btrfs_path *p)
+size_t __const btrfs_get_num_csums(void)
{
- int i;
- for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- if (!p->nodes[i] || !p->locks[i])
- continue;
- /*
- * If we currently have a spinning reader or writer lock this
- * will bump the count of blocking holders and drop the
- * spinlock.
- */
- if (p->locks[i] == BTRFS_READ_LOCK) {
- btrfs_set_lock_blocking_read(p->nodes[i]);
- p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
- } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
- btrfs_set_lock_blocking_write(p->nodes[i]);
- p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
- }
- }
+ return ARRAY_SIZE(btrfs_csums);
+}
+
+struct btrfs_path *btrfs_alloc_path(void)
+{
+ return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
}
/* this also releases the path */
@@ -1125,7 +1121,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start;
- extent_buffer_get(cow);
+ atomic_inc(&cow->refs);
ret = tree_mod_log_insert_root(root->node, cow, 1);
BUG_ON(ret < 0);
rcu_assign_pointer(root->node, cow);
@@ -1563,7 +1559,7 @@ static int comp_keys(const struct btrfs_disk_key *disk,
/*
* same as comp_keys only with two btrfs_key's
*/
-int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
+int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
{
if (k1->objectid > k2->objectid)
return 1;
@@ -2036,7 +2032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the path */
if (left) {
if (btrfs_header_nritems(left) > orig_slot) {
- extent_buffer_get(left);
+ atomic_inc(&left->refs);
/* left was locked after cow */
path->nodes[level] = left;
path->slots[level + 1] -= 1;
@@ -2379,32 +2375,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
}
/*
- * This releases any locks held in the path starting at level and
- * going all the way up to the root.
- *
- * btrfs_search_slot will keep the lock held on higher nodes in a few
- * corner cases, such as COW of the block at slot zero in the node. This
- * ignores those rules, and it should only be called when there are no
- * more updates to be done higher up in the tree.
- */
-noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
-{
- int i;
-
- if (path->keep_locks)
- return;
-
- for (i = level; i < BTRFS_MAX_LEVEL; i++) {
- if (!path->nodes[i])
- continue;
- if (!path->locks[i])
- continue;
- btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
- path->locks[i] = 0;
- }
-}
-
-/*
* helper function for btrfs_search_slot. The goal is to find a block
* in cache without setting the path to blocking. If we find the block
* we return zero and the path is unchanged.
@@ -2652,7 +2622,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
} else {
b = root->commit_root;
- extent_buffer_get(b);
+ atomic_inc(&b->refs);
}
level = btrfs_header_level(b);
/*
@@ -2785,12 +2755,10 @@ again:
}
while (b) {
+ int dec = 0;
+
level = btrfs_header_level(b);
- /*
- * setup the path here so we can release it under lock
- * contention with the cow code
- */
if (cow) {
bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
@@ -2861,73 +2829,7 @@ cow_done:
if (ret < 0)
goto done;
- if (level != 0) {
- int dec = 0;
- if (ret && slot > 0) {
- dec = 1;
- slot -= 1;
- }
- p->slots[level] = slot;
- err = setup_nodes_for_search(trans, root, p, b, level,
- ins_len, &write_lock_level);
- if (err == -EAGAIN)
- goto again;
- if (err) {
- ret = err;
- goto done;
- }
- b = p->nodes[level];
- slot = p->slots[level];
-
- /*
- * slot 0 is special, if we change the key
- * we have to update the parent pointer
- * which means we must have a write lock
- * on the parent
- */
- if (slot == 0 && ins_len &&
- write_lock_level < level + 1) {
- write_lock_level = level + 1;
- btrfs_release_path(p);
- goto again;
- }
-
- unlock_up(p, level, lowest_unlock,
- min_write_lock_level, &write_lock_level);
-
- if (level == lowest_level) {
- if (dec)
- p->slots[level]++;
- goto done;
- }
-
- err = read_block_for_search(root, p, &b, level,
- slot, key);
- if (err == -EAGAIN)
- goto again;
- if (err) {
- ret = err;
- goto done;
- }
-
- if (!p->skip_locking) {
- level = btrfs_header_level(b);
- if (level <= write_lock_level) {
- if (!btrfs_try_tree_write_lock(b)) {
- btrfs_set_path_blocking(p);
- btrfs_tree_lock(b);
- }
- p->locks[level] = BTRFS_WRITE_LOCK;
- } else {
- if (!btrfs_tree_read_lock_atomic(b)) {
- btrfs_set_path_blocking(p);
- btrfs_tree_read_lock(b);
- }
- p->locks[level] = BTRFS_READ_LOCK;
- }
- p->nodes[level] = b;
- }
- } else {
+ if (level == 0) {
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(b) < ins_len) {
@@ -2952,6 +2854,67 @@ cow_done:
min_write_lock_level, NULL);
goto done;
}
+ if (ret && slot > 0) {
+ dec = 1;
+ slot--;
+ }
+ p->slots[level] = slot;
+ err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
+ &write_lock_level);
+ if (err == -EAGAIN)
+ goto again;
+ if (err) {
+ ret = err;
+ goto done;
+ }
+ b = p->nodes[level];
+ slot = p->slots[level];
+
+ /*
+ * Slot 0 is special, if we change the key we have to update
+ * the parent pointer which means we must have a write lock on
+ * the parent
+ */
+ if (slot == 0 && ins_len && write_lock_level < level + 1) {
+ write_lock_level = level + 1;
+ btrfs_release_path(p);
+ goto again;
+ }
+
+ unlock_up(p, level, lowest_unlock, min_write_lock_level,
+ &write_lock_level);
+
+ if (level == lowest_level) {
+ if (dec)
+ p->slots[level]++;
+ goto done;
+ }
+
+ err = read_block_for_search(root, p, &b, level, slot, key);
+ if (err == -EAGAIN)
+ goto again;
+ if (err) {
+ ret = err;
+ goto done;
+ }
+
+ if (!p->skip_locking) {
+ level = btrfs_header_level(b);
+ if (level <= write_lock_level) {
+ if (!btrfs_try_tree_write_lock(b)) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_lock(b);
+ }
+ p->locks[level] = BTRFS_WRITE_LOCK;
+ } else {
+ if (!btrfs_tree_read_lock_atomic(b)) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_read_lock(b);
+ }
+ p->locks[level] = BTRFS_READ_LOCK;
+ }
+ p->nodes[level] = b;
+ }
}
ret = 1;
done:
@@ -3008,6 +2971,8 @@ again:
p->locks[level] = BTRFS_READ_LOCK;
while (b) {
+ int dec = 0;
+
level = btrfs_header_level(b);
p->nodes[level] = b;
@@ -3028,47 +2993,45 @@ again:
if (ret < 0)
goto done;
- if (level != 0) {
- int dec = 0;
- if (ret && slot > 0) {
- dec = 1;
- slot -= 1;
- }
+ if (level == 0) {
p->slots[level] = slot;
unlock_up(p, level, lowest_unlock, 0, NULL);
+ goto done;
+ }
- if (level == lowest_level) {
- if (dec)
- p->slots[level]++;
- goto done;
- }
+ if (ret && slot > 0) {
+ dec = 1;
+ slot--;
+ }
+ p->slots[level] = slot;
+ unlock_up(p, level, lowest_unlock, 0, NULL);
- err = read_block_for_search(root, p, &b, level,
- slot, key);
- if (err == -EAGAIN)
- goto again;
- if (err) {
- ret = err;
- goto done;
- }
+ if (level == lowest_level) {
+ if (dec)
+ p->slots[level]++;
+ goto done;
+ }
- level = btrfs_header_level(b);
- if (!btrfs_tree_read_lock_atomic(b)) {
- btrfs_set_path_blocking(p);
- btrfs_tree_read_lock(b);
- }
- b = tree_mod_log_rewind(fs_info, p, b, time_seq);
- if (!b) {
- ret = -ENOMEM;
- goto done;
- }
- p->locks[level] = BTRFS_READ_LOCK;
- p->nodes[level] = b;
- } else {
- p->slots[level] = slot;
- unlock_up(p, level, lowest_unlock, 0, NULL);
+ err = read_block_for_search(root, p, &b, level, slot, key);
+ if (err == -EAGAIN)
+ goto again;
+ if (err) {
+ ret = err;
+ goto done;
+ }
+
+ level = btrfs_header_level(b);
+ if (!btrfs_tree_read_lock_atomic(b)) {
+ btrfs_set_path_blocking(p);
+ btrfs_tree_read_lock(b);
+ }
+ b = tree_mod_log_rewind(fs_info, p, b, time_seq);
+ if (!b) {
+ ret = -ENOMEM;
goto done;
}
+ p->locks[level] = BTRFS_READ_LOCK;
+ p->nodes[level] = b;
}
ret = 1;
done:
@@ -3433,7 +3396,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
free_extent_buffer(old);
add_root_to_dirty_list(root);
- extent_buffer_get(c);
+ atomic_inc(&c->refs);
path->nodes[level] = c;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
path->slots[level] = 0;
@@ -4966,7 +4929,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len);
- extent_buffer_get(leaf);
+ atomic_inc(&leaf->refs);
btrfs_free_tree_block(trans, root, leaf, 0, 1);
free_extent_buffer_stale(leaf);
}
@@ -5047,7 +5010,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* for possible call to del_ptr below
*/
slot = path->slots[1];
- extent_buffer_get(leaf);
+ atomic_inc(&leaf->refs);
btrfs_set_path_blocking(path);
wret = push_leaf_left(trans, root, path, 1, 1,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index fe2b8765d9e6..b2e8fd8a8e59 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -28,6 +28,7 @@
#include <linux/dynamic_debug.h>
#include <linux/refcount.h>
#include <linux/crc32c.h>
+#include "extent-io-tree.h"
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -38,7 +39,7 @@ struct btrfs_transaction;
struct btrfs_pending_snapshot;
struct btrfs_delayed_ref_root;
struct btrfs_space_info;
-struct btrfs_block_group_cache;
+struct btrfs_block_group;
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
@@ -56,9 +57,9 @@ struct btrfs_ref;
* filesystem data as well that can be used to read data in order to repair
* read errors on other disks.
*
- * Current value is derived from RAID1 with 2 copies.
+ * Current value is derived from RAID1C4 with 4 copies.
*/
-#define BTRFS_MAX_MIRRORS (2 + 1)
+#define BTRFS_MAX_MIRRORS (4 + 1)
#define BTRFS_MAX_LEVEL 8
@@ -291,7 +292,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
- BTRFS_FEATURE_INCOMPAT_METADATA_UUID)
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
+ BTRFS_FEATURE_INCOMPAT_RAID1C34)
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
@@ -413,7 +415,7 @@ struct btrfs_free_cluster {
/* We did a full search and couldn't create a cluster */
bool fragmented;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
/*
* when a cluster is allocated from a block group, we put the
* cluster onto a list in the block group so that it can
@@ -476,8 +478,8 @@ struct btrfs_swapfile_pin {
void *ptr;
struct inode *inode;
/*
- * If true, ptr points to a struct btrfs_block_group_cache. Otherwise,
- * ptr points to a struct btrfs_device.
+ * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr
+ * points to a struct btrfs_device.
*/
bool is_block_group;
};
@@ -722,7 +724,6 @@ struct btrfs_fs_info {
struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker;
- struct btrfs_workqueue *submit_workers;
struct btrfs_workqueue *caching_workers;
struct btrfs_workqueue *readahead_workers;
@@ -1519,18 +1520,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
}
/* struct btrfs_block_group_item */
-BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item,
used, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item,
+BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item,
used, 64);
-BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid,
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid,
+BTRFS_SETGET_FUNCS(block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_flags,
+BTRFS_SETGET_FUNCS(block_group_flags,
struct btrfs_block_group_item, flags, 64);
-BTRFS_SETGET_STACK_FUNCS(block_group_flags,
+BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags,
struct btrfs_block_group_item, flags, 64);
/* struct btrfs_free_space_info */
@@ -2163,6 +2164,9 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
int btrfs_super_csum_size(const struct btrfs_super_block *s);
const char *btrfs_super_csum_name(u16 csum_type);
+const char *btrfs_super_csum_driver(u16 csum_type);
+size_t __const btrfs_get_num_csums(void);
+
/*
* The leaf data grows from end-to-front in the node.
@@ -2397,7 +2401,7 @@ static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes);
-void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache);
+void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
@@ -2453,8 +2457,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref);
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
-void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
+void btrfs_get_block_group_trimming(struct btrfs_block_group *cache);
+void btrfs_put_block_group_trimming(struct btrfs_block_group *cache);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
enum btrfs_reserve_flush_enum {
@@ -2507,7 +2511,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
int level, int *slot);
-int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
+int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
@@ -2567,8 +2571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
-void btrfs_set_path_blocking(struct btrfs_path *p);
-void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr);
@@ -2870,10 +2872,9 @@ int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new,
- struct btrfs_path *path);
+ struct btrfs_root *root, struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *was_new);
+ struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 end, int create);
@@ -2909,7 +2910,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int btrfs_ioctl_get_supported_features(void __user *arg);
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
-int btrfs_is_empty_uuid(u8 *uuid);
+int __pure btrfs_is_empty_uuid(u8 *uuid);
int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_pages);
@@ -3143,7 +3144,7 @@ __cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
-const char *btrfs_decode_error(int errno);
+const char * __attribute_const__ btrfs_decode_error(int errno);
__cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index db9f2c58eb4a..4cdac4d834f5 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -307,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
- bool delalloc_lock = true;
/*
* If we are a free space inode we need to not flush since we will be in
@@ -320,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
*/
if (btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH;
- delalloc_lock = false;
} else {
if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT;
@@ -329,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
schedule_timeout(1);
}
- if (delalloc_lock)
- mutex_lock(&inode->delalloc_mutex);
-
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
/*
@@ -348,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
&qgroup_reserve);
ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
if (ret)
- goto out_fail;
+ return ret;
ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
- if (ret)
- goto out_qgroup;
+ if (ret) {
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
+ return ret;
+ }
/*
* Now we need to update our outstanding extents and csum bytes _first_
@@ -375,15 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
block_rsv->qgroup_rsv_reserved += qgroup_reserve;
spin_unlock(&block_rsv->lock);
- if (delalloc_lock)
- mutex_unlock(&inode->delalloc_mutex);
return 0;
-out_qgroup:
- btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
-out_fail:
- if (delalloc_lock)
- mutex_unlock(&inode->delalloc_mutex);
- return ret;
}
/**
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 1f7f39b10bd0..d3e15e1d4a91 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -12,6 +12,7 @@
#include "transaction.h"
#include "ctree.h"
#include "qgroup.h"
+#include "locking.h"
#define BTRFS_DELAYED_WRITEBACK 512
#define BTRFS_DELAYED_BACKGROUND 128
@@ -1367,8 +1368,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM;
async_work->delayed_root = delayed_root;
- btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
- btrfs_async_run_delayed_root, NULL, NULL);
+ btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
+ NULL);
async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
@@ -1949,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
}
inode_id = delayed_nodes[n - 1]->inode_id + 1;
-
- for (i = 0; i < n; i++)
- refcount_inc(&delayed_nodes[i]->refs);
+ for (i = 0; i < n; i++) {
+ /*
+ * Don't increase refs in case the node is dead and
+ * about to be removed from the tree in the loop below
+ */
+ if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
+ delayed_nodes[i] = NULL;
+ }
spin_unlock(&root->inode_lock);
for (i = 0; i < n; i++) {
+ if (!delayed_nodes[i])
+ continue;
__btrfs_kill_delayed_node(delayed_nodes[i]);
btrfs_release_delayed_node(delayed_nodes[i]);
}
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 48890826b5e6..f639dde2a679 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -986,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data)
return 0;
}
-int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
+int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
{
if (!dev_replace->is_valid)
return 0;
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index 78c5d8f1adda..60b70dacc299 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
-int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
+int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 402b61bf345c..e0edfdc9c82b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -205,7 +205,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
@@ -213,7 +212,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em) {
- em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
goto out;
}
@@ -228,7 +226,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
em->len = (u64)-1;
em->block_len = (u64)-1;
em->block_start = 0;
- em->bdev = fs_info->fs_devices->latest_bdev;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
@@ -352,6 +349,9 @@ static bool btrfs_supported_super_csum(u16 csum_type)
{
switch (csum_type) {
case BTRFS_CSUM_TYPE_CRC32:
+ case BTRFS_CSUM_TYPE_XXHASH:
+ case BTRFS_CSUM_TYPE_SHA256:
+ case BTRFS_CSUM_TYPE_BLAKE2:
return true;
default:
return false;
@@ -545,9 +545,11 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
ret = btrfs_check_leaf_full(eb);
if (ret < 0) {
+ btrfs_print_tree(eb, 0);
btrfs_err(fs_info,
"block=%llu write time tree block corruption detected",
eb->start);
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
return ret;
}
write_extent_buffer(eb, result, 0, csum_size);
@@ -608,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
/* the pending IO might have been the only thing that kept this buffer
* in memory. Make sure we have a ref for all this other checks
*/
- extent_buffer_get(eb);
+ atomic_inc(&eb->refs);
reads_done = atomic_dec_and_test(&eb->io_pages);
if (!reads_done)
@@ -706,43 +708,31 @@ static void end_workqueue_bio(struct bio *bio)
struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info;
struct btrfs_workqueue *wq;
- btrfs_work_func_t func;
fs_info = end_io_wq->info;
end_io_wq->status = bio->bi_status;
if (bio_op(bio) == REQ_OP_WRITE) {
- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
wq = fs_info->endio_meta_write_workers;
- func = btrfs_endio_meta_write_helper;
- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
wq = fs_info->endio_freespace_worker;
- func = btrfs_freespace_write_helper;
- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
- func = btrfs_endio_raid56_helper;
- } else {
+ else
wq = fs_info->endio_write_workers;
- func = btrfs_endio_write_helper;
- }
} else {
- if (unlikely(end_io_wq->metadata ==
- BTRFS_WQ_ENDIO_DIO_REPAIR)) {
+ if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
wq = fs_info->endio_repair_workers;
- func = btrfs_endio_repair_helper;
- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
- func = btrfs_endio_raid56_helper;
- } else if (end_io_wq->metadata) {
+ else if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
- func = btrfs_endio_meta_helper;
- } else {
+ else
wq = fs_info->endio_workers;
- func = btrfs_endio_helper;
- }
}
- btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
+ btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
btrfs_queue_work(wq, &end_io_wq->work);
}
@@ -803,8 +793,13 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
- async->mirror_num, 1);
+ /*
+ * All of the bios that pass through here are from async helpers.
+ * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
+ * This changes nothing when cgroups aren't in use.
+ */
+ async->bio->bi_opf |= REQ_CGROUP_PUNT;
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
if (ret) {
async->bio->bi_status = ret;
bio_endio(async->bio);
@@ -835,8 +830,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start;
- btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
- run_one_async_done, run_one_async_free);
+ btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
+ run_one_async_free);
async->bio_offset = bio_offset;
@@ -904,12 +899,12 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
} else {
/*
* kthread helpers are used to submit writes so that
@@ -1657,8 +1652,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio->bi_status = end_io_wq->status;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
bio_endio(bio);
+ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
}
static int cleaner_kthread(void *arg)
@@ -1753,7 +1748,7 @@ static int transaction_kthread(void *arg)
}
now = ktime_get_seconds();
- if (cur->state < TRANS_STATE_BLOCKED &&
+ if (cur->state < TRANS_STATE_COMMIT_START &&
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
(now < cur->start_time ||
now - cur->start_time < fs_info->commit_interval)) {
@@ -1792,18 +1787,18 @@ sleep:
}
/*
- * this will find the highest generation in the array of
- * root backups. The index of the highest array is returned,
- * or -1 if we can't find anything.
+ * This will find the highest generation in the array of root backups. The
+ * index of the highest array is returned, or -EINVAL if we can't find
+ * anything.
*
* We check to make sure the array is valid by comparing the
* generation of the latest root in the array with the generation
* in the super block. If they don't match we pitch it.
*/
-static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
+static int find_newest_super_backup(struct btrfs_fs_info *info)
{
+ const u64 newest_gen = btrfs_super_generation(info->super_copy);
u64 cur;
- int newest_index = -1;
struct btrfs_root_backup *root_backup;
int i;
@@ -1811,37 +1806,10 @@ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
root_backup = info->super_copy->super_roots + i;
cur = btrfs_backup_tree_root_gen(root_backup);
if (cur == newest_gen)
- newest_index = i;
+ return i;
}
- /* check to see if we actually wrapped around */
- if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
- root_backup = info->super_copy->super_roots;
- cur = btrfs_backup_tree_root_gen(root_backup);
- if (cur == newest_gen)
- newest_index = 0;
- }
- return newest_index;
-}
-
-
-/*
- * find the oldest backup so we know where to store new entries
- * in the backup array. This will set the backup_root_index
- * field in the fs_info struct
- */
-static void find_oldest_super_backup(struct btrfs_fs_info *info,
- u64 newest_gen)
-{
- int newest_index = -1;
-
- newest_index = find_newest_super_backup(info, newest_gen);
- /* if there was garbage in there, just move along */
- if (newest_index == -1) {
- info->backup_root_index = 0;
- } else {
- info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
- }
+ return -EINVAL;
}
/*
@@ -1851,22 +1819,8 @@ static void find_oldest_super_backup(struct btrfs_fs_info *info,
*/
static void backup_super_roots(struct btrfs_fs_info *info)
{
- int next_backup;
+ const int next_backup = info->backup_root_index;
struct btrfs_root_backup *root_backup;
- int last_backup;
-
- next_backup = info->backup_root_index;
- last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
- BTRFS_NUM_BACKUP_ROOTS;
-
- /*
- * just overwrite the last backup if we're at the same generation
- * this happens only at umount
- */
- root_backup = info->super_for_commit->super_roots + last_backup;
- if (btrfs_backup_tree_root_gen(root_backup) ==
- btrfs_header_generation(info->tree_root->node))
- next_backup = last_backup;
root_backup = info->super_for_commit->super_roots + next_backup;
@@ -1939,40 +1893,31 @@ static void backup_super_roots(struct btrfs_fs_info *info)
}
/*
- * this copies info out of the root backup array and back into
- * the in-memory super block. It is meant to help iterate through
- * the array, so you send it the number of backups you've already
- * tried and the last backup index you used.
+ * read_backup_root - Reads a backup root based on the passed priority. Prio 0
+ * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
*
- * this returns -1 when it has tried all the backups
+ * fs_info - filesystem whose backup roots need to be read
+ * priority - priority of backup root required
+ *
+ * Returns backup root index on success and -EINVAL otherwise.
*/
-static noinline int next_root_backup(struct btrfs_fs_info *info,
- struct btrfs_super_block *super,
- int *num_backups_tried, int *backup_index)
+static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
{
+ int backup_index = find_newest_super_backup(fs_info);
+ struct btrfs_super_block *super = fs_info->super_copy;
struct btrfs_root_backup *root_backup;
- int newest = *backup_index;
-
- if (*num_backups_tried == 0) {
- u64 gen = btrfs_super_generation(super);
- newest = find_newest_super_backup(info, gen);
- if (newest == -1)
- return -1;
+ if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
+ if (priority == 0)
+ return backup_index;
- *backup_index = newest;
- *num_backups_tried = 1;
- } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
- /* we've tried all the backups, all done */
- return -1;
+ backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
+ backup_index %= BTRFS_NUM_BACKUP_ROOTS;
} else {
- /* jump to the next oldest backup */
- newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
- BTRFS_NUM_BACKUP_ROOTS;
- *backup_index = newest;
- *num_backups_tried += 1;
+ return -EINVAL;
}
- root_backup = super->super_roots + newest;
+
+ root_backup = super->super_roots + backup_index;
btrfs_set_super_generation(super,
btrfs_backup_tree_root_gen(root_backup));
@@ -1982,12 +1927,13 @@ static noinline int next_root_backup(struct btrfs_fs_info *info,
btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
/*
- * fixme: the total bytes and num_devices need to match or we should
+ * Fixme: the total bytes and num_devices need to match or we should
* need a fsck
*/
btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
- return 0;
+
+ return backup_index;
}
/* helper to cleanup workers */
@@ -2002,7 +1948,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->rmw_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
- btrfs_destroy_workqueue(fs_info->submit_workers);
btrfs_destroy_workqueue(fs_info->delayed_workers);
btrfs_destroy_workqueue(fs_info->caching_workers);
btrfs_destroy_workqueue(fs_info->readahead_workers);
@@ -2028,7 +1973,7 @@ static void free_root_extent_buffers(struct btrfs_root *root)
}
/* helper to cleanup tree roots */
-static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
+static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
{
free_root_extent_buffers(info->tree_root);
@@ -2037,7 +1982,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
free_root_extent_buffers(info->csum_root);
free_root_extent_buffers(info->quota_root);
free_root_extent_buffers(info->uuid_root);
- if (chunk_root)
+ if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
free_root_extent_buffers(info->free_space_root);
}
@@ -2167,16 +2112,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
fs_info->caching_workers =
btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
- /*
- * a higher idle thresh on the submit workers makes it much more
- * likely that bios will be send down in a sane order to the
- * devices
- */
- fs_info->submit_workers =
- btrfs_alloc_workqueue(fs_info, "submit", flags,
- min_t(u64, fs_devices->num_devices,
- max_active), 64);
-
fs_info->fixup_workers =
btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
@@ -2215,7 +2150,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
if (!(fs_info->workers && fs_info->delalloc_workers &&
- fs_info->submit_workers && fs_info->flush_workers &&
+ fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
fs_info->endio_repair_workers &&
@@ -2233,13 +2168,13 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
{
struct crypto_shash *csum_shash;
- const char *csum_name = btrfs_super_csum_name(csum_type);
+ const char *csum_driver = btrfs_super_csum_driver(csum_type);
- csum_shash = crypto_alloc_shash(csum_name, 0, 0);
+ csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
if (IS_ERR(csum_shash)) {
btrfs_err(fs_info, "error allocating %s hash for checksum",
- csum_name);
+ csum_driver);
return PTR_ERR(csum_shash);
}
@@ -2589,7 +2524,101 @@ out:
return ret;
}
-int open_ctree(struct super_block *sb,
+static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
+{
+ int backup_index = find_newest_super_backup(fs_info);
+ struct btrfs_super_block *sb = fs_info->super_copy;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ bool handle_error = false;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
+ u64 generation;
+ int level;
+
+ if (handle_error) {
+ if (!IS_ERR(tree_root->node))
+ free_extent_buffer(tree_root->node);
+ tree_root->node = NULL;
+
+ if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
+ break;
+
+ free_root_pointers(fs_info, 0);
+
+ /*
+ * Don't use the log in recovery mode, it won't be
+ * valid
+ */
+ btrfs_set_super_log_root(sb, 0);
+
+ /* We can't trust the free space cache either */
+ btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
+
+ ret = read_backup_root(fs_info, i);
+ backup_index = ret;
+ if (ret < 0)
+ return ret;
+ }
+ generation = btrfs_super_generation(sb);
+ level = btrfs_super_root_level(sb);
+ tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
+ generation, level, NULL);
+ if (IS_ERR(tree_root->node) ||
+ !extent_buffer_uptodate(tree_root->node)) {
+ handle_error = true;
+
+ if (IS_ERR(tree_root->node))
+ ret = PTR_ERR(tree_root->node);
+ else if (!extent_buffer_uptodate(tree_root->node))
+ ret = -EUCLEAN;
+
+ btrfs_warn(fs_info, "failed to read tree root");
+ continue;
+ }
+
+ btrfs_set_root_node(&tree_root->root_item, tree_root->node);
+ tree_root->commit_root = btrfs_root_node(tree_root);
+ btrfs_set_root_refs(&tree_root->root_item, 1);
+
+ /*
+ * No need to hold btrfs_root::objectid_mutex since the fs
+ * hasn't been fully initialised and we are the only user
+ */
+ ret = btrfs_find_highest_objectid(tree_root,
+ &tree_root->highest_objectid);
+ if (ret < 0) {
+ handle_error = true;
+ continue;
+ }
+
+ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+
+ ret = btrfs_read_roots(fs_info);
+ if (ret < 0) {
+ handle_error = true;
+ continue;
+ }
+
+ /* All successful */
+ fs_info->generation = generation;
+ fs_info->last_trans_committed = generation;
+
+ /* Always begin writing backup roots after the one being used */
+ if (backup_index < 0) {
+ fs_info->backup_root_index = 0;
+ } else {
+ fs_info->backup_root_index = backup_index + 1;
+ fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options)
{
@@ -2607,8 +2636,6 @@ int open_ctree(struct super_block *sb,
struct btrfs_root *chunk_root;
int ret;
int err = -EINVAL;
- int num_backups_tried = 0;
- int backup_index = 0;
int clear_free_space_tree = 0;
int level;
@@ -2879,13 +2906,6 @@ int open_ctree(struct super_block *sb,
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
/*
- * run through our array of backup supers and setup
- * our ring pointer to the oldest one
- */
- generation = btrfs_super_generation(disk_super);
- find_oldest_super_backup(fs_info, generation);
-
- /*
* In the long term, we'll store the compression type in the super
* block, and it'll be used for per file compression control.
*/
@@ -3031,44 +3051,9 @@ int open_ctree(struct super_block *sb,
goto fail_tree_roots;
}
-retry_root_backup:
- generation = btrfs_super_generation(disk_super);
- level = btrfs_super_root_level(disk_super);
-
- tree_root->node = read_tree_block(fs_info,
- btrfs_super_root(disk_super),
- generation, level, NULL);
- if (IS_ERR(tree_root->node) ||
- !extent_buffer_uptodate(tree_root->node)) {
- btrfs_warn(fs_info, "failed to read tree root");
- if (!IS_ERR(tree_root->node))
- free_extent_buffer(tree_root->node);
- tree_root->node = NULL;
- goto recovery_tree_root;
- }
-
- btrfs_set_root_node(&tree_root->root_item, tree_root->node);
- tree_root->commit_root = btrfs_root_node(tree_root);
- btrfs_set_root_refs(&tree_root->root_item, 1);
-
- mutex_lock(&tree_root->objectid_mutex);
- ret = btrfs_find_highest_objectid(tree_root,
- &tree_root->highest_objectid);
- if (ret) {
- mutex_unlock(&tree_root->objectid_mutex);
- goto recovery_tree_root;
- }
-
- ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
-
- mutex_unlock(&tree_root->objectid_mutex);
-
- ret = btrfs_read_roots(fs_info);
+ ret = init_tree_roots(fs_info);
if (ret)
- goto recovery_tree_root;
-
- fs_info->generation = generation;
- fs_info->last_trans_committed = generation;
+ goto fail_tree_roots;
ret = btrfs_verify_dev_extents(fs_info);
if (ret) {
@@ -3336,7 +3321,7 @@ fail_block_groups:
btrfs_put_block_group_cache(fs_info);
fail_tree_roots:
- free_root_pointers(fs_info, 1);
+ free_root_pointers(fs_info, true);
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_sb_buffer:
@@ -3363,24 +3348,6 @@ fail:
btrfs_free_stripe_hash_table(fs_info);
btrfs_close_devices(fs_info->fs_devices);
return err;
-
-recovery_tree_root:
- if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
- goto fail_tree_roots;
-
- free_root_pointers(fs_info, 0);
-
- /* don't use the log in recovery mode, it won't be valid */
- btrfs_set_super_log_root(disk_super, 0);
-
- /* we can't trust the free space cache either */
- btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
-
- ret = next_root_backup(fs_info, fs_info->super_copy,
- &num_backups_tried, &backup_index);
- if (ret == -1)
- goto fail_block_groups;
- goto retry_root_backup;
}
ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
@@ -3974,7 +3941,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
return btrfs_commit_transaction(trans);
}
-void close_ctree(struct btrfs_fs_info *fs_info)
+void __cold close_ctree(struct btrfs_fs_info *fs_info)
{
int ret;
@@ -4062,7 +4029,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
btrfs_free_block_groups(fs_info);
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
- free_root_pointers(fs_info, 1);
+ free_root_pointers(fs_info, true);
iput(fs_info->btree_inode);
@@ -4439,7 +4406,7 @@ again:
return 0;
}
-static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
+static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
{
struct inode *inode;
@@ -4456,12 +4423,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_fs_info *fs_info)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
spin_lock(&cur_trans->dirty_bgs_lock);
while (!list_empty(&cur_trans->dirty_bgs)) {
cache = list_first_entry(&cur_trans->dirty_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
dirty_list);
if (!list_empty(&cache->io_list)) {
@@ -4489,7 +4456,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
*/
while (!list_empty(&cur_trans->io_bgs)) {
cache = list_first_entry(&cur_trans->io_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
io_list);
list_del_init(&cache->io_list);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index a6958103d87e..76f123ebb292 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -49,10 +49,10 @@ struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr);
void btrfs_clean_tree_block(struct extent_buffer *buf);
-int open_ctree(struct super_block *sb,
+int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
-void close_ctree(struct btrfs_fs_info *fs_info);
+void __cold close_ctree(struct btrfs_fs_info *fs_info);
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index ddf28ecf17f9..72e312cae69d 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(sb, &key, root, NULL);
+ inode = btrfs_iget(sb, &key, root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail;
@@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
+ return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
new file mode 100644
index 000000000000..a3febe746c79
--- /dev/null
+++ b/fs/btrfs/extent-io-tree.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_EXTENT_IO_TREE_H
+#define BTRFS_EXTENT_IO_TREE_H
+
+struct extent_changeset;
+struct io_failure_record;
+
+/* Bits for the extent state */
+#define EXTENT_DIRTY (1U << 0)
+#define EXTENT_UPTODATE (1U << 1)
+#define EXTENT_LOCKED (1U << 2)
+#define EXTENT_NEW (1U << 3)
+#define EXTENT_DELALLOC (1U << 4)
+#define EXTENT_DEFRAG (1U << 5)
+#define EXTENT_BOUNDARY (1U << 6)
+#define EXTENT_NODATASUM (1U << 7)
+#define EXTENT_CLEAR_META_RESV (1U << 8)
+#define EXTENT_NEED_WAIT (1U << 9)
+#define EXTENT_DAMAGED (1U << 10)
+#define EXTENT_NORESERVE (1U << 11)
+#define EXTENT_QGROUP_RESERVED (1U << 12)
+#define EXTENT_CLEAR_DATA_RESV (1U << 13)
+#define EXTENT_DELALLOC_NEW (1U << 14)
+#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
+ EXTENT_CLEAR_DATA_RESV)
+#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
+
+/*
+ * Redefined bits above which are used only in the device allocation tree,
+ * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
+ * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
+ * manipulation functions
+ */
+#define CHUNK_ALLOCATED EXTENT_DIRTY
+#define CHUNK_TRIMMED EXTENT_DEFRAG
+
+enum {
+ IO_TREE_FS_INFO_FREED_EXTENTS0,
+ IO_TREE_FS_INFO_FREED_EXTENTS1,
+ IO_TREE_INODE_IO,
+ IO_TREE_INODE_IO_FAILURE,
+ IO_TREE_RELOC_BLOCKS,
+ IO_TREE_TRANS_DIRTY_PAGES,
+ IO_TREE_ROOT_DIRTY_LOG_PAGES,
+ IO_TREE_SELFTEST,
+};
+
+struct extent_io_tree {
+ struct rb_root state;
+ struct btrfs_fs_info *fs_info;
+ void *private_data;
+ u64 dirty_bytes;
+ bool track_uptodate;
+
+ /* Who owns this io tree, should be one of IO_TREE_* */
+ u8 owner;
+
+ spinlock_t lock;
+ const struct extent_io_ops *ops;
+};
+
+struct extent_state {
+ u64 start;
+ u64 end; /* inclusive */
+ struct rb_node rb_node;
+
+ /* ADD NEW ELEMENTS AFTER THIS */
+ wait_queue_head_t wq;
+ refcount_t refs;
+ unsigned state;
+
+ struct io_failure_record *failrec;
+
+#ifdef CONFIG_BTRFS_DEBUG
+ struct list_head leak_list;
+#endif
+};
+
+int __init extent_state_cache_init(void);
+void __cold extent_state_cache_exit(void);
+
+void extent_io_tree_init(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *tree, unsigned int owner,
+ void *private_data);
+void extent_io_tree_release(struct extent_io_tree *tree);
+
+int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached);
+
+static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+{
+ return lock_extent_bits(tree, start, end, NULL);
+}
+
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
+
+int __init extent_io_init(void);
+void __cold extent_io_exit(void);
+
+u64 count_range_bits(struct extent_io_tree *tree,
+ u64 *start, u64 search_end,
+ u64 max_bytes, unsigned bits, int contig);
+
+void free_extent_state(struct extent_state *state);
+int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int filled,
+ struct extent_state *cached_state);
+int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, struct extent_changeset *changeset);
+int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int wake, int delete,
+ struct extent_state **cached);
+int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int wake, int delete,
+ struct extent_state **cached, gfp_t mask,
+ struct extent_changeset *changeset);
+
+static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+{
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
+}
+
+static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_NOFS, NULL);
+}
+
+static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
+ u64 start, u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_ATOMIC, NULL);
+}
+
+static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
+ u64 end, unsigned bits)
+{
+ int wake = 0;
+
+ if (bits & EXTENT_LOCKED)
+ wake = 1;
+
+ return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
+}
+
+int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, struct extent_changeset *changeset);
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, u64 *failed_start,
+ struct extent_state **cached_state, gfp_t mask);
+int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits);
+
+static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
+ u64 end, unsigned bits)
+{
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
+}
+
+static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached_state)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
+ cached_state, GFP_NOFS, NULL);
+}
+
+static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
+ u64 end, gfp_t mask)
+{
+ return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
+ NULL, mask);
+}
+
+static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return clear_extent_bit(tree, start, end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0, cached);
+}
+
+int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, unsigned clear_bits,
+ struct extent_state **cached_state);
+
+static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
+ u64 end, unsigned int extra_bits,
+ struct extent_state **cached_state)
+{
+ return set_extent_bit(tree, start, end,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
+ NULL, cached_state, GFP_NOFS);
+}
+
+static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached_state)
+{
+ return set_extent_bit(tree, start, end,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
+ NULL, cached_state, GFP_NOFS);
+}
+
+static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
+ u64 end)
+{
+ return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
+ GFP_NOFS);
+}
+
+static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached_state, gfp_t mask)
+{
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
+ cached_state, mask);
+}
+
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, unsigned bits,
+ struct extent_state **cached_state);
+void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, unsigned bits);
+int extent_invalidatepage(struct extent_io_tree *tree,
+ struct page *page, unsigned long offset);
+bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
+ u64 *end, u64 max_bytes,
+ struct extent_state **cached_state);
+
+/* This should be reworked in the future and put elsewhere. */
+int get_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record **failrec);
+int set_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record *failrec);
+void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
+ u64 end);
+int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
+ struct io_failure_record **failrec_ret);
+int free_io_failure(struct extent_io_tree *failure_tree,
+ struct extent_io_tree *io_tree,
+ struct io_failure_record *rec);
+int clean_io_failure(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *failure_tree,
+ struct extent_io_tree *io_tree, u64 start,
+ struct page *page, u64 ino, unsigned int pg_offset);
+
+#endif /* BTRFS_EXTENT_IO_TREE_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 49cb26fa7c63..153f71a5bba9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -54,7 +54,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
-static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
+static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
@@ -70,13 +70,13 @@ int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
return 0;
}
-void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache)
+void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;
- start = cache->key.objectid;
- end = start + cache->key.offset - 1;
+ start = cache->start;
+ end = start + cache->length - 1;
clear_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
@@ -1306,8 +1306,10 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes)
{
- int ret;
+ int ret = 0;
u64 discarded_bytes = 0;
+ u64 end = bytenr + num_bytes;
+ u64 cur = bytenr;
struct btrfs_bio *bbio = NULL;
@@ -1316,15 +1318,23 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
* associated to its stripes that don't go away while we are discarding.
*/
btrfs_bio_counter_inc_blocked(fs_info);
- /* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
- &bbio, 0);
- /* Error condition is -ENOMEM */
- if (!ret) {
- struct btrfs_bio_stripe *stripe = bbio->stripes;
+ while (cur < end) {
+ struct btrfs_bio_stripe *stripe;
int i;
+ num_bytes = end - cur;
+ /* Tell the block device(s) that the sectors can be discarded */
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
+ &num_bytes, &bbio, 0);
+ /*
+ * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
+ * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
+ * thus we can't continue anyway.
+ */
+ if (ret < 0)
+ goto out;
+ stripe = bbio->stripes;
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes;
struct request_queue *req_q;
@@ -1341,10 +1351,19 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
stripe->physical,
stripe->length,
&bytes);
- if (!ret)
+ if (!ret) {
discarded_bytes += bytes;
- else if (ret != -EOPNOTSUPP)
- break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
+ } else if (ret != -EOPNOTSUPP) {
+ /*
+ * Logic errors or -ENOMEM, or -EIO, but
+ * unlikely to happen.
+ *
+ * And since there are two loops, explicitly
+ * go to out to avoid confusion.
+ */
+ btrfs_put_bbio(bbio);
+ goto out;
+ }
/*
* Just in case we get back EOPNOTSUPP for some reason,
@@ -1354,7 +1373,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
ret = 0;
}
btrfs_put_bbio(bbio);
+ cur += num_bytes;
}
+out:
btrfs_bio_counter_dec(fs_info);
if (actual_bytes)
@@ -2516,7 +2537,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
int readonly = 0;
block_group = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2546,7 +2567,7 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 bytenr;
spin_lock(&fs_info->block_group_cache_lock);
@@ -2560,13 +2581,13 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
if (!cache)
return 0;
- bytenr = cache->key.objectid;
+ bytenr = cache->start;
btrfs_put_block_group(cache);
return bytenr;
}
-static int pin_down_extent(struct btrfs_block_group_cache *cache,
+static int pin_down_extent(struct btrfs_block_group *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -2590,13 +2611,12 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache,
return 0;
}
-/*
- * this function must be called within transaction
- */
int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, int reserved)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
+
+ ASSERT(fs_info->running_transaction);
cache = btrfs_lookup_block_group(fs_info, bytenr);
BUG_ON(!cache); /* Logic error */
@@ -2613,7 +2633,7 @@ int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
int ret;
cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2640,7 +2660,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
int ret;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_caching_control *caching_ctl;
block_group = btrfs_lookup_block_group(fs_info, start);
@@ -2652,7 +2672,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
if (!caching_ctl) {
/* Logic error */
- BUG_ON(!btrfs_block_group_cache_done(block_group));
+ BUG_ON(!btrfs_block_group_done(block_group));
ret = btrfs_remove_free_space(block_group, start, num_bytes);
} else {
mutex_lock(&caching_ctl->mutex);
@@ -2717,7 +2737,7 @@ int btrfs_exclude_logged_extents(struct extent_buffer *eb)
}
static void
-btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
+btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
{
atomic_inc(&bg->reservations);
}
@@ -2726,14 +2746,14 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
{
struct btrfs_caching_control *next;
struct btrfs_caching_control *caching_ctl;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
cache = caching_ctl->block_group;
- if (btrfs_block_group_cache_done(cache)) {
+ if (btrfs_block_group_done(cache)) {
cache->last_byte_to_unpin = (u64)-1;
list_del_init(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
@@ -2785,7 +2805,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end,
const bool return_free_space)
{
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
struct btrfs_space_info *space_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_free_cluster *cluster = NULL;
@@ -2797,7 +2817,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
while (start <= end) {
readonly = false;
if (!cache ||
- start >= cache->key.objectid + cache->key.offset) {
+ start >= cache->start + cache->length) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
@@ -2810,7 +2830,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
empty_cluster <<= 1;
}
- len = cache->key.objectid + cache->key.offset - start;
+ len = cache->start + cache->length - start;
len = min(len, end + 1 - start);
if (start < cache->last_byte_to_unpin) {
@@ -2880,7 +2900,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group *block_group, *tmp;
struct list_head *deleted_bgs;
struct extent_io_tree *unpin;
u64 start;
@@ -2926,8 +2946,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
ret = -EROFS;
if (!trans->aborted)
ret = btrfs_discard_extent(fs_info,
- block_group->key.objectid,
- block_group->key.offset,
+ block_group->start,
+ block_group->length,
&trimmed);
list_del_init(&block_group->bg_list);
@@ -3262,7 +3282,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
}
if (last_ref && btrfs_header_generation(buf) == trans->transid) {
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
ret = check_ref_cleanup(trans, buf->start);
@@ -3349,15 +3369,14 @@ enum btrfs_loop_type {
};
static inline void
-btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
+btrfs_lock_block_group(struct btrfs_block_group *cache,
int delalloc)
{
if (delalloc)
down_read(&cache->data_rwsem);
}
-static inline void
-btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
+static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
int delalloc)
{
btrfs_get_block_group(cache);
@@ -3365,12 +3384,12 @@ btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
down_read(&cache->data_rwsem);
}
-static struct btrfs_block_group_cache *
-btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
+static struct btrfs_block_group *btrfs_lock_cluster(
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
int delalloc)
{
- struct btrfs_block_group_cache *used_bg = NULL;
+ struct btrfs_block_group *used_bg = NULL;
spin_lock(&cluster->refill_lock);
while (1) {
@@ -3404,7 +3423,7 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
}
static inline void
-btrfs_release_block_group(struct btrfs_block_group_cache *cache,
+btrfs_release_block_group(struct btrfs_block_group *cache,
int delalloc)
{
if (delalloc)
@@ -3475,12 +3494,12 @@ struct find_free_extent_ctl {
* Return >0 to inform caller that we find nothing
* Return 0 means we have found a location and set ffe_ctl->found_offset.
*/
-static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
+static int find_free_extent_clustered(struct btrfs_block_group *bg,
struct btrfs_free_cluster *last_ptr,
struct find_free_extent_ctl *ffe_ctl,
- struct btrfs_block_group_cache **cluster_bg_ret)
+ struct btrfs_block_group **cluster_bg_ret)
{
- struct btrfs_block_group_cache *cluster_bg;
+ struct btrfs_block_group *cluster_bg;
u64 aligned_cluster;
u64 offset;
int ret;
@@ -3493,7 +3512,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
goto release_cluster;
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
- ffe_ctl->num_bytes, cluster_bg->key.objectid,
+ ffe_ctl->num_bytes, cluster_bg->start,
&ffe_ctl->max_extent_size);
if (offset) {
/* We have a block, we're done */
@@ -3579,7 +3598,7 @@ refill_cluster:
* Return 0 when we found an free extent and set ffe_ctrl->found_offset
* Return -EAGAIN to inform caller that we need to re-search this block group
*/
-static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
+static int find_free_extent_unclustered(struct btrfs_block_group *bg,
struct btrfs_free_cluster *last_ptr,
struct find_free_extent_ctl *ffe_ctl)
{
@@ -3781,7 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
{
int ret = 0;
struct btrfs_free_cluster *last_ptr = NULL;
- struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group *block_group = NULL;
struct find_free_extent_ctl ffe_ctl = {0};
struct btrfs_space_info *space_info;
bool use_cluster = true;
@@ -3904,7 +3923,7 @@ search:
continue;
btrfs_grab_block_group(block_group, delalloc);
- ffe_ctl.search_start = block_group->key.objectid;
+ ffe_ctl.search_start = block_group->start;
/*
* this can happen if we end up cycling through all the
@@ -3935,7 +3954,7 @@ search:
}
have_block_group:
- ffe_ctl.cached = btrfs_block_group_cache_done(block_group);
+ ffe_ctl.cached = btrfs_block_group_done(block_group);
if (unlikely(!ffe_ctl.cached)) {
ffe_ctl.have_caching_bg = true;
ret = btrfs_cache_block_group(block_group, 0);
@@ -3951,7 +3970,7 @@ have_block_group:
* lets look there
*/
if (last_ptr && use_cluster) {
- struct btrfs_block_group_cache *cluster_bg = NULL;
+ struct btrfs_block_group *cluster_bg = NULL;
ret = find_free_extent_clustered(block_group, last_ptr,
&ffe_ctl, &cluster_bg);
@@ -3984,7 +4003,7 @@ checks:
/* move on to the next group */
if (ffe_ctl.search_start + num_bytes >
- block_group->key.objectid + block_group->key.offset) {
+ block_group->start + block_group->length) {
btrfs_add_free_space(block_group, ffe_ctl.found_offset,
num_bytes);
goto loop;
@@ -4133,7 +4152,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len,
int pin, int delalloc)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
int ret = 0;
cache = btrfs_lookup_block_group(fs_info, start);
@@ -4366,7 +4385,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
/*
@@ -5436,7 +5455,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
- extent_buffer_get(parent);
+ atomic_inc(&parent->refs);
path->nodes[parent_level] = parent;
path->slots[parent_level] = btrfs_header_nritems(parent);
@@ -5480,7 +5499,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
*/
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
u64 free_bytes = 0;
int factor;
@@ -5498,9 +5517,8 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
}
factor = btrfs_bg_type_to_factor(block_group->flags);
- free_bytes += (block_group->key.offset -
- btrfs_block_group_used(&block_group->item)) *
- factor;
+ free_bytes += (block_group->length -
+ block_group->used) * factor;
spin_unlock(&block_group->lock);
}
@@ -5623,7 +5641,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
*/
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
struct btrfs_device *device;
struct list_head *devices;
u64 group_trimmed;
@@ -5647,16 +5665,16 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = btrfs_next_block_group(cache)) {
- if (cache->key.objectid >= range_end) {
+ if (cache->start >= range_end) {
btrfs_put_block_group(cache);
break;
}
- start = max(range->start, cache->key.objectid);
- end = min(range_end, cache->key.objectid + cache->key.offset);
+ start = max(range->start, cache->start);
+ end = min(range_end, cache->start + cache->length);
if (end - start >= range->minlen) {
- if (!btrfs_block_group_cache_done(cache)) {
+ if (!btrfs_block_group_done(cache)) {
ret = btrfs_cache_block_group(cache, 0);
if (ret) {
bg_failed++;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cceaf05aada2..eb8bd0258360 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -14,6 +14,7 @@
#include <linux/prefetch.h>
#include <linux/cleancache.h>
#include "extent_io.h"
+#include "extent-io-tree.h"
#include "extent_map.h"
#include "ctree.h"
#include "btrfs_inode.h"
@@ -59,12 +60,23 @@ void btrfs_leak_debug_del(struct list_head *entry)
spin_unlock_irqrestore(&leak_lock, flags);
}
-static inline
-void btrfs_leak_debug_check(void)
+static inline void btrfs_extent_buffer_leak_debug_check(void)
{
- struct extent_state *state;
struct extent_buffer *eb;
+ while (!list_empty(&buffers)) {
+ eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+ pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
+ eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
+ list_del(&eb->leak_list);
+ kmem_cache_free(extent_buffer_cache, eb);
+ }
+}
+
+static inline void btrfs_extent_state_leak_debug_check(void)
+{
+ struct extent_state *state;
+
while (!list_empty(&states)) {
state = list_entry(states.next, struct extent_state, leak_list);
pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
@@ -74,14 +86,6 @@ void btrfs_leak_debug_check(void)
list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
}
-
- while (!list_empty(&buffers)) {
- eb = list_entry(buffers.next, struct extent_buffer, leak_list);
- pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
- eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
- list_del(&eb->leak_list);
- kmem_cache_free(extent_buffer_cache, eb);
- }
}
#define btrfs_debug_check_extent_io_range(tree, start, end) \
@@ -105,7 +109,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
#else
#define btrfs_leak_debug_add(new, head) do {} while (0)
#define btrfs_leak_debug_del(entry) do {} while (0)
-#define btrfs_leak_debug_check() do {} while (0)
+#define btrfs_extent_buffer_leak_debug_check() do {} while (0)
+#define btrfs_extent_state_leak_debug_check() do {} while (0)
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
#endif
@@ -196,19 +201,23 @@ static int __must_check flush_write_bio(struct extent_page_data *epd)
return ret;
}
-int __init extent_io_init(void)
+int __init extent_state_cache_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
+ return 0;
+}
+int __init extent_io_init(void)
+{
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
- goto free_state_cache;
+ return -ENOMEM;
if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
offsetof(struct btrfs_io_bio, bio),
@@ -226,23 +235,24 @@ free_bioset:
free_buffer_cache:
kmem_cache_destroy(extent_buffer_cache);
extent_buffer_cache = NULL;
+ return -ENOMEM;
+}
-free_state_cache:
+void __cold extent_state_cache_exit(void)
+{
+ btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
- extent_state_cache = NULL;
- return -ENOMEM;
}
void __cold extent_io_exit(void)
{
- btrfs_leak_debug_check();
+ btrfs_extent_buffer_leak_debug_check();
/*
* Make sure all delayed rcu free are flushed before we
* destroy caches.
*/
rcu_barrier();
- kmem_cache_destroy(extent_state_cache);
kmem_cache_destroy(extent_buffer_cache);
bioset_exit(&btrfs_bioset);
}
@@ -1676,9 +1686,9 @@ out:
*
* true is returned if we find something, false if nothing was in the tree
*/
-static noinline bool find_delalloc_range(struct extent_io_tree *tree,
- u64 *start, u64 *end, u64 max_bytes,
- struct extent_state **cached_state)
+bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
+ u64 *end, u64 max_bytes,
+ struct extent_state **cached_state)
{
struct rb_node *node;
struct extent_state *state;
@@ -1796,8 +1806,8 @@ again:
/* step one, find a bunch of delalloc bytes starting at start */
delalloc_start = *start;
delalloc_end = 0;
- found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
- max_bytes, &cached_state);
+ found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+ max_bytes, &cached_state);
if (!found || delalloc_end <= *start) {
*start = delalloc_start;
*end = delalloc_end;
@@ -1899,7 +1909,7 @@ static int __process_pages_contig(struct address_space *mapping,
if (page_ops & PAGE_SET_PRIVATE2)
SetPagePrivate2(pages[i]);
- if (pages[i] == locked_page) {
+ if (locked_page && pages[i] == locked_page) {
put_page(pages[i]);
pages_locked++;
continue;
@@ -2014,8 +2024,8 @@ out:
* set the private field for a given byte offset in the tree. If there isn't
* an extent_state there already, this does nothing.
*/
-static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
- struct io_failure_record *failrec)
+int set_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record *failrec)
{
struct rb_node *node;
struct extent_state *state;
@@ -2042,8 +2052,8 @@ out:
return ret;
}
-static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
- struct io_failure_record **failrec)
+int get_state_failrec(struct extent_io_tree *tree, u64 start,
+ struct io_failure_record **failrec)
{
struct rb_node *node;
struct extent_state *state;
@@ -2534,7 +2544,6 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio = btrfs_io_bio_alloc(1);
bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9;
- bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
bio->bi_iter.bi_size = 0;
bio->bi_private = data;
@@ -2920,7 +2929,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
* a contiguous page to the previous one
* @size: portion of page that we want to write
* @offset: starting offset in the page
- * @bdev: attach newly created bios to this bdev
* @bio_ret: must be valid pointer, newly allocated bio will be stored there
* @end_io_func: end_io callback for new bio
* @mirror_num: desired mirror to read/write
@@ -2931,7 +2939,6 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
struct writeback_control *wbc,
struct page *page, u64 offset,
size_t size, unsigned long pg_offset,
- struct block_device *bdev,
struct bio **bio_ret,
bio_end_io_t end_io_func,
int mirror_num,
@@ -2977,13 +2984,16 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
}
bio = btrfs_bio_alloc(offset);
- bio_set_dev(bio, bdev);
bio_add_page(bio, page, page_size, pg_offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
bio->bi_write_hint = page->mapping->host->i_write_hint;
bio->bi_opf = opf;
if (wbc) {
+ struct block_device *bdev;
+
+ bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
+ bio_set_dev(bio, bdev);
wbc_init_bio(wbc, bio);
wbc_account_cgroup_owner(wbc, page, page_size);
}
@@ -3065,7 +3075,6 @@ static int __do_readpage(struct extent_io_tree *tree,
u64 block_start;
u64 cur_end;
struct extent_map *em;
- struct block_device *bdev;
int ret = 0;
int nr = 0;
size_t pg_offset = 0;
@@ -3142,7 +3151,6 @@ static int __do_readpage(struct extent_io_tree *tree,
offset = em->block_start + extent_offset;
disk_io_size = iosize;
}
- bdev = em->bdev;
block_start = em->block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
block_start = EXTENT_MAP_HOLE;
@@ -3232,7 +3240,7 @@ static int __do_readpage(struct extent_io_tree *tree,
ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
page, offset, disk_io_size,
- pg_offset, bdev, bio,
+ pg_offset, bio,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag,
@@ -3409,7 +3417,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
struct extent_page_data *epd,
loff_t i_size,
unsigned long nr_written,
- unsigned int write_flags, int *nr_ret)
+ int *nr_ret)
{
struct extent_io_tree *tree = epd->tree;
u64 start = page_offset(page);
@@ -3420,11 +3428,11 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
u64 block_start;
u64 iosize;
struct extent_map *em;
- struct block_device *bdev;
size_t pg_offset = 0;
size_t blocksize;
int ret = 0;
int nr = 0;
+ const unsigned int write_flags = wbc_to_write_flags(wbc);
bool compressed;
ret = btrfs_writepage_cow_fixup(page, start, page_end);
@@ -3478,7 +3486,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
iosize = min(em_end - cur, end - cur + 1);
iosize = ALIGN(iosize, blocksize);
offset = em->block_start + extent_offset;
- bdev = em->bdev;
block_start = em->block_start;
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
free_extent_map(em);
@@ -3520,7 +3527,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
page, offset, iosize, pg_offset,
- bdev, &epd->bio,
+ &epd->bio,
end_bio_extent_writepage,
0, 0, 0, false);
if (ret) {
@@ -3558,11 +3565,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
size_t pg_offset = 0;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
- unsigned int write_flags = 0;
unsigned long nr_written = 0;
- write_flags = wbc_to_write_flags(wbc);
-
trace___extent_writepage(page, inode, wbc);
WARN_ON(!PageLocked(page));
@@ -3600,7 +3604,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
}
ret = __extent_writepage_io(inode, page, wbc, epd,
- i_size, nr_written, write_flags, &nr);
+ i_size, nr_written, &nr);
if (ret == 1)
goto done_unlocked;
@@ -3849,7 +3853,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct extent_page_data *epd)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
- struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
u32 nritems;
@@ -3884,7 +3887,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
- p, offset, PAGE_SIZE, 0, bdev,
+ p, offset, PAGE_SIZE, 0,
&epd->bio,
end_bio_extent_buffer_writepage,
0, 0, 0, false);
@@ -4121,7 +4124,7 @@ retry:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- done_index = page->index;
+ done_index = page->index + 1;
/*
* At this point we hold neither the i_pages lock nor
* the page lock: the page may be truncated or
@@ -4156,16 +4159,6 @@ retry:
ret = __extent_writepage(page, wbc, epd);
if (ret < 0) {
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
- done_index = page->index + 1;
done = 1;
break;
}
@@ -4240,8 +4233,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
.nr_to_write = nr_pages * 2,
.range_start = start,
.range_end = end + 1,
+ /* We're called from an async helper function */
+ .punt_to_cgroup = 1,
+ .no_cgroup_owner = 1,
};
+ wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
while (start <= end) {
page = find_get_page(mapping, start >> PAGE_SHIFT);
if (clear_page_dirty_for_io(page))
@@ -4256,11 +4253,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
}
ASSERT(ret <= 0);
- if (ret < 0) {
+ if (ret == 0)
+ ret = flush_write_bio(&epd);
+ else
end_write_bio(&epd, ret);
- return ret;
- }
- ret = flush_write_bio(&epd);
+
+ wbc_detach_inode(&wbc_writepages);
return ret;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index cf3424d58fec..a8551a1f56e2 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -7,35 +7,6 @@
#include <linux/refcount.h>
#include "ulist.h"
-/* bits for the extent state */
-#define EXTENT_DIRTY (1U << 0)
-#define EXTENT_UPTODATE (1U << 1)
-#define EXTENT_LOCKED (1U << 2)
-#define EXTENT_NEW (1U << 3)
-#define EXTENT_DELALLOC (1U << 4)
-#define EXTENT_DEFRAG (1U << 5)
-#define EXTENT_BOUNDARY (1U << 6)
-#define EXTENT_NODATASUM (1U << 7)
-#define EXTENT_CLEAR_META_RESV (1U << 8)
-#define EXTENT_NEED_WAIT (1U << 9)
-#define EXTENT_DAMAGED (1U << 10)
-#define EXTENT_NORESERVE (1U << 11)
-#define EXTENT_QGROUP_RESERVED (1U << 12)
-#define EXTENT_CLEAR_DATA_RESV (1U << 13)
-#define EXTENT_DELALLOC_NEW (1U << 14)
-#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
- EXTENT_CLEAR_DATA_RESV)
-#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
-
-/*
- * Redefined bits above which are used only in the device allocation tree,
- * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
- * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
- * manipulation functions
- */
-#define CHUNK_ALLOCATED EXTENT_DIRTY
-#define CHUNK_TRIMMED EXTENT_DEFRAG
-
/*
* flags for bio submission. The high bits indicate the compression
* type for this bio
@@ -89,12 +60,11 @@ enum {
#define BITMAP_LAST_BYTE_MASK(nbits) \
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
-struct extent_state;
struct btrfs_root;
struct btrfs_inode;
struct btrfs_io_bio;
struct io_failure_record;
-
+struct extent_io_tree;
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct bio *bio, u64 bio_offset);
@@ -111,47 +81,6 @@ struct extent_io_ops {
int mirror);
};
-enum {
- IO_TREE_FS_INFO_FREED_EXTENTS0,
- IO_TREE_FS_INFO_FREED_EXTENTS1,
- IO_TREE_INODE_IO,
- IO_TREE_INODE_IO_FAILURE,
- IO_TREE_RELOC_BLOCKS,
- IO_TREE_TRANS_DIRTY_PAGES,
- IO_TREE_ROOT_DIRTY_LOG_PAGES,
- IO_TREE_SELFTEST,
-};
-
-struct extent_io_tree {
- struct rb_root state;
- struct btrfs_fs_info *fs_info;
- void *private_data;
- u64 dirty_bytes;
- bool track_uptodate;
-
- /* Who owns this io tree, should be one of IO_TREE_* */
- u8 owner;
-
- spinlock_t lock;
- const struct extent_io_ops *ops;
-};
-
-struct extent_state {
- u64 start;
- u64 end; /* inclusive */
- struct rb_node rb_node;
-
- /* ADD NEW ELEMENTS AFTER THIS */
- wait_queue_head_t wq;
- refcount_t refs;
- unsigned state;
-
- struct io_failure_record *failrec;
-
-#ifdef CONFIG_BTRFS_DEBUG
- struct list_head leak_list;
-#endif
-};
#define INLINE_EXTENT_BUFFER_PAGES 16
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
@@ -259,152 +188,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
u64 start, u64 len,
int create);
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *tree, unsigned int owner,
- void *private_data);
-void extent_io_tree_release(struct extent_io_tree *tree);
int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
-int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached);
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
-{
- return lock_extent_bits(tree, start, end, NULL);
-}
-
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent, int mirror_num);
-int __init extent_io_init(void);
-void __cold extent_io_exit(void);
-
-u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end,
- u64 max_bytes, unsigned bits, int contig);
-
-void free_extent_state(struct extent_state *state);
-int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int filled,
- struct extent_state *cached_state);
-int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset);
-int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
- struct extent_state **cached);
-int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask,
- struct extent_changeset *changeset);
-
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
-{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
-}
-
-static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- GFP_NOFS, NULL);
-}
-
-static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
- u64 start, u64 end, struct extent_state **cached)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- GFP_ATOMIC, NULL);
-}
-
-static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits)
-{
- int wake = 0;
-
- if (bits & EXTENT_LOCKED)
- wake = 1;
-
- return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
-}
-
-int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset);
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask);
-int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits);
-
-static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits)
-{
- return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
-}
-
-static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
-{
- return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
- cached_state, GFP_NOFS, NULL);
-}
-
-static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask)
-{
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
- NULL, mask);
-}
-
-static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
-{
- return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, cached);
-}
-
-int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state);
-
-static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned int extra_bits,
- struct extent_state **cached_state)
-{
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
- NULL, cached_state, GFP_NOFS);
-}
-
-static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
-{
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
- NULL, cached_state, GFP_NOFS);
-}
-
-static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
- u64 end)
-{
- return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
- GFP_NOFS);
-}
-
-static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
-{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
- cached_state, mask);
-}
-
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits,
- struct extent_state **cached_state);
-void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits);
-int extent_invalidatepage(struct extent_io_tree *tree,
- struct page *page, unsigned long offset);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode);
@@ -442,11 +230,6 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
(eb->start >> PAGE_SHIFT);
}
-static inline void extent_buffer_get(struct extent_buffer *eb)
-{
- atomic_inc(&eb->refs);
-}
-
static inline int extent_buffer_uptodate(struct extent_buffer *eb)
{
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -508,10 +291,6 @@ struct btrfs_inode;
int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num);
-int clean_io_failure(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree, u64 start,
- struct page *page, u64 ino, unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
@@ -535,19 +314,12 @@ struct io_failure_record {
};
-void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
- u64 end);
-int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
- struct io_failure_record **failrec_ret);
bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
struct io_failure_record *failrec, int fail_mirror);
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec,
struct page *page, int pg_offset, int icsum,
bio_end_io_t *endio_func, void *data);
-int free_io_failure(struct extent_io_tree *failure_tree,
- struct extent_io_tree *io_tree,
- struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
struct page *locked_page, u64 *start,
@@ -555,5 +327,4 @@ bool find_lock_delalloc_range(struct inode *inode,
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
-
#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 9d30acca55e1..6f417ff68980 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -214,9 +214,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
prev->block_start != EXTENT_MAP_DELALLOC);
+ if (prev->map_lookup || next->map_lookup)
+ ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
+ test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
+
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
- prev->bdev == next->bdev &&
+ prev->map_lookup == next->map_lookup &&
((next->block_start == EXTENT_MAP_HOLE &&
prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE &&
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 473f039fcd7c..8e217337dff9 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -42,15 +42,8 @@ struct extent_map {
u64 block_len;
u64 generation;
unsigned long flags;
- union {
- struct block_device *bdev;
-
- /*
- * used for chunk mappings
- * flags & EXTENT_FLAG_FS_MAPPING must be set
- */
- struct map_lookup *map_lookup;
- };
+ /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
+ struct map_lookup *map_lookup;
refcount_t refs;
unsigned int compress_type;
struct list_head list;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 1a599f50837b..3270a40b0777 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
- em->bdev = fs_info->fs_devices->latest_bdev;
btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 435a502a3226..0cb43b682789 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
key.objectid = defrag->ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, inode_root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto cleanup;
@@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
}
split->generation = gen;
- split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
replace_extent_mapping(em_tree, em, split, modified);
@@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
split->start = start + len;
split->len = em->start + em->len - (start + len);
- split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
split->generation = gen;
@@ -1636,6 +1634,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
break;
}
+ only_release_metadata = false;
sector_offset = pos & (fs_info->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
fs_info->sectorsize);
@@ -1791,7 +1790,6 @@ again:
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
NULL, GFP_NOFS);
- only_release_metadata = false;
}
btrfs_drop_pages(pages, num_pages);
@@ -1903,9 +1901,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
(iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
- if (!inode_trylock(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
return -EAGAIN;
+ } else {
inode_lock(inode);
}
@@ -2359,7 +2358,6 @@ out:
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
- hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
@@ -3350,29 +3348,30 @@ out:
return ret;
}
-static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
+static loff_t find_desired_extent(struct inode *inode, loff_t offset,
+ int whence)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
+ loff_t i_size = inode->i_size;
u64 lockstart;
u64 lockend;
u64 start;
u64 len;
int ret = 0;
- if (inode->i_size == 0)
+ if (i_size == 0 || offset >= i_size)
return -ENXIO;
/*
- * *offset can be negative, in this case we start finding DATA/HOLE from
+ * offset can be negative, in this case we start finding DATA/HOLE from
* the very start of the file.
*/
- start = max_t(loff_t, 0, *offset);
+ start = max_t(loff_t, 0, offset);
lockstart = round_down(start, fs_info->sectorsize);
- lockend = round_up(i_size_read(inode),
- fs_info->sectorsize);
+ lockend = round_up(i_size, fs_info->sectorsize);
if (lockend <= lockstart)
lockend = lockstart + fs_info->sectorsize;
lockend--;
@@ -3381,7 +3380,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state);
- while (start < inode->i_size) {
+ while (start < i_size) {
em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
@@ -3404,46 +3403,39 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
cond_resched();
}
free_extent_map(em);
- if (!ret) {
- if (whence == SEEK_DATA && start >= inode->i_size)
- ret = -ENXIO;
- else
- *offset = min_t(loff_t, start, inode->i_size);
- }
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state);
- return ret;
+ if (ret) {
+ offset = ret;
+ } else {
+ if (whence == SEEK_DATA && start >= i_size)
+ offset = -ENXIO;
+ else
+ offset = min_t(loff_t, start, i_size);
+ }
+
+ return offset;
}
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- int ret;
- inode_lock(inode);
switch (whence) {
- case SEEK_END:
- case SEEK_CUR:
- offset = generic_file_llseek(file, offset, whence);
- goto out;
+ default:
+ return generic_file_llseek(file, offset, whence);
case SEEK_DATA:
case SEEK_HOLE:
- if (offset >= i_size_read(inode)) {
- inode_unlock(inode);
- return -ENXIO;
- }
-
- ret = find_desired_extent(inode, &offset, whence);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
+ inode_lock_shared(inode);
+ offset = find_desired_extent(inode, offset, whence);
+ inode_unlock_shared(inode);
+ break;
}
- offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
-out:
- inode_unlock(inode);
- return offset;
+ if (offset < 0)
+ return offset;
+
+ return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
}
static int btrfs_file_open(struct inode *inode, struct file *filp)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d54dcd0ab230..3283da419200 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -78,7 +78,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+ inode = btrfs_iget_path(fs_info->sb, &location, root, path);
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
@@ -91,8 +91,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
return inode;
}
-struct inode *lookup_free_space_inode(
- struct btrfs_block_group_cache *block_group,
+struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -107,7 +106,7 @@ struct inode *lookup_free_space_inode(
return inode;
inode = __lookup_free_space_inode(fs_info->tree_root, path,
- block_group->key.objectid);
+ block_group->start);
if (IS_ERR(inode))
return inode;
@@ -190,7 +189,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
}
int create_free_space_inode(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
int ret;
@@ -201,7 +200,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
return ret;
return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
- ino, block_group->key.objectid);
+ ino, block_group->start);
}
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
@@ -224,7 +223,7 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
}
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -385,6 +384,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode
if (uptodate && !PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
+ if (page->mapping != inode->i_mapping) {
+ btrfs_err(BTRFS_I(inode)->root->fs_info,
+ "free space cache page truncated");
+ io_ctl_drop_pages(io_ctl);
+ return -EIO;
+ }
if (!PageUptodate(page)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"error reading free space cache");
@@ -814,7 +819,7 @@ free_cache:
goto out;
}
-int load_free_space_cache(struct btrfs_block_group_cache *block_group)
+int load_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -822,7 +827,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
struct btrfs_path *path;
int ret = 0;
bool matched;
- u64 used = btrfs_block_group_used(&block_group->item);
+ u64 used = block_group->used;
/*
* If this block group has been marked to be cleared for one reason or
@@ -876,13 +881,13 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);
ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
- path, block_group->key.objectid);
+ path, block_group->start);
btrfs_free_path(path);
if (ret <= 0)
goto out;
spin_lock(&ctl->tree_lock);
- matched = (ctl->free_space == (block_group->key.offset - used -
+ matched = (ctl->free_space == (block_group->length - used -
block_group->bytes_super));
spin_unlock(&ctl->tree_lock);
@@ -890,7 +895,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
__btrfs_remove_free_space_cache(ctl);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
- block_group->key.objectid);
+ block_group->start);
ret = -1;
}
out:
@@ -903,7 +908,7 @@ out:
btrfs_warn(fs_info,
"failed to load free space cache for block group %llu, rebuilding it now",
- block_group->key.objectid);
+ block_group->start);
}
iput(inode);
@@ -913,7 +918,7 @@ out:
static noinline_for_stack
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
struct btrfs_free_space_ctl *ctl,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
int *entries, int *bitmaps,
struct list_head *bitmap_list)
{
@@ -1041,7 +1046,7 @@ fail:
}
static noinline_for_stack int write_pinned_extent_entries(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
int *entries)
{
@@ -1061,9 +1066,9 @@ static noinline_for_stack int write_pinned_extent_entries(
*/
unpin = block_group->fs_info->pinned_extents;
- start = block_group->key.objectid;
+ start = block_group->start;
- while (start < block_group->key.objectid + block_group->key.offset) {
+ while (start < block_group->start + block_group->length) {
ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL);
@@ -1071,13 +1076,12 @@ static noinline_for_stack int write_pinned_extent_entries(
return 0;
/* This pinned extent is out of our range */
- if (extent_start >= block_group->key.objectid +
- block_group->key.offset)
+ if (extent_start >= block_group->start + block_group->length)
return 0;
extent_start = max(extent_start, start);
- extent_end = min(block_group->key.objectid +
- block_group->key.offset, extent_end + 1);
+ extent_end = min(block_group->start + block_group->length,
+ extent_end + 1);
len = extent_end - extent_start;
*entries += 1;
@@ -1141,7 +1145,7 @@ cleanup_write_cache_enospc(struct inode *inode,
static int __btrfs_wait_cache_io(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path, u64 offset)
{
@@ -1168,7 +1172,7 @@ out:
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ block_group->start);
#endif
}
}
@@ -1210,12 +1214,12 @@ static int btrfs_wait_cache_io_root(struct btrfs_root *root,
}
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
block_group, &block_group->io_ctl,
- path, block_group->key.objectid);
+ path, block_group->start);
}
/**
@@ -1231,7 +1235,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
*/
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_trans_handle *trans)
{
@@ -1369,7 +1373,7 @@ out_unlock:
}
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1394,7 +1398,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
#ifdef DEBUG
btrfs_err(fs_info,
"failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ block_group->start);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
@@ -1647,11 +1651,11 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
- struct btrfs_block_group_cache *block_group = ctl->private;
+ struct btrfs_block_group *block_group = ctl->private;
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
- u64 size = block_group->key.offset;
+ u64 size = block_group->length;
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
@@ -1991,7 +1995,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- struct btrfs_block_group_cache *block_group = ctl->private;
+ struct btrfs_block_group *block_group = ctl->private;
struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false;
@@ -2028,7 +2032,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* so allow those block groups to still be allowed to have a bitmap
* entry.
*/
- if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
+ if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
return false;
return true;
@@ -2043,7 +2047,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_free_space *bitmap_info;
- struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group *block_group = NULL;
int added = 0;
u64 bytes, offset, bytes_added;
int ret;
@@ -2380,7 +2384,7 @@ out:
return ret;
}
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+int btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size)
{
return __btrfs_add_free_space(block_group->fs_info,
@@ -2388,7 +2392,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
bytenr, size);
}
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+int btrfs_remove_free_space(struct btrfs_block_group *block_group,
u64 offset, u64 bytes)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2478,7 +2482,7 @@ out:
return ret;
}
-void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+void btrfs_dump_free_space(struct btrfs_block_group *block_group,
u64 bytes)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -2503,14 +2507,14 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
"%d blocks of free space at or bigger than bytes is", count);
}
-void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
+void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock);
ctl->unit = fs_info->sectorsize;
- ctl->start = block_group->key.objectid;
+ ctl->start = block_group->start;
ctl->private = block_group;
ctl->op = &free_space_op;
INIT_LIST_HEAD(&ctl->trimming_ranges);
@@ -2532,7 +2536,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
*/
static int
__btrfs_return_cluster_to_free_space(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2598,7 +2602,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
spin_unlock(&ctl->tree_lock);
}
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_cluster *cluster;
@@ -2620,7 +2624,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
}
-u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
+u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 offset, u64 bytes, u64 empty_size,
u64 *max_extent_size)
{
@@ -2674,7 +2678,7 @@ out:
* cluster and remove the cluster from it.
*/
int btrfs_return_cluster_to_free_space(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster)
{
struct btrfs_free_space_ctl *ctl;
@@ -2708,7 +2712,7 @@ int btrfs_return_cluster_to_free_space(
return ret;
}
-static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
+static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
struct btrfs_free_space *entry,
u64 bytes, u64 min_start,
@@ -2741,7 +2745,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
* if it couldn't find anything suitably large, or a logical disk offset
* if things worked out
*/
-u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
+u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start, u64 *max_extent_size)
{
@@ -2827,7 +2831,7 @@ out:
return ret;
}
-static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
+static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_space *entry,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes,
@@ -2909,7 +2913,7 @@ again:
* extent of cont1_bytes, and other clusters of at least min_bytes.
*/
static noinline int
-setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
struct list_head *bitmaps, u64 offset, u64 bytes,
u64 cont1_bytes, u64 min_bytes)
@@ -3000,7 +3004,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
* that we have already failed to find extents that will work.
*/
static noinline int
-setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+setup_cluster_bitmap(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
struct list_head *bitmaps, u64 offset, u64 bytes,
u64 cont1_bytes, u64 min_bytes)
@@ -3050,7 +3054,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
-int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
+int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
{
@@ -3141,7 +3145,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
cluster->block_group = NULL;
}
-static int do_trimming(struct btrfs_block_group_cache *block_group,
+static int do_trimming(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 bytes,
u64 reserved_start, u64 reserved_bytes,
struct btrfs_trim_range *trim_entry)
@@ -3186,7 +3190,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
return ret;
}
-static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
+static int trim_no_bitmap(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -3271,7 +3275,7 @@ out:
return ret;
}
-static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
+static int trim_bitmaps(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -3352,12 +3356,12 @@ next:
return ret;
}
-void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
+void btrfs_get_block_group_trimming(struct btrfs_block_group *cache)
{
atomic_inc(&cache->trimming);
}
-void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
+void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_map_tree *em_tree;
@@ -3373,7 +3377,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
mutex_lock(&fs_info->chunk_mutex);
em_tree = &fs_info->mapping_tree;
write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, block_group->key.objectid,
+ em = lookup_extent_mapping(em_tree, block_group->start,
1);
BUG_ON(!em); /* logic error, can't happen */
remove_extent_mapping(em_tree, em);
@@ -3392,7 +3396,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
}
}
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+int btrfs_trim_block_group(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen)
{
int ret;
@@ -3590,7 +3594,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
* how the free space cache loading stuff works, so you can get really weird
* configurations.
*/
-int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
+int test_add_free_space_entry(struct btrfs_block_group *cache,
u64 offset, u64 bytes, bool bitmap)
{
struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
@@ -3658,7 +3662,7 @@ again:
* just used to check the absence of space, so if there is free space in the
* range at all we will return 1.
*/
-int test_check_exists(struct btrfs_block_group_cache *cache,
+int test_check_exists(struct btrfs_block_group *cache,
u64 offset, u64 bytes)
{
struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 39c32c8fc24f..ba9a23241101 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -50,24 +50,23 @@ struct btrfs_io_ctl {
unsigned check_crcs:1;
};
-struct inode *lookup_free_space_inode(
- struct btrfs_block_group_cache *block_group,
+struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
struct btrfs_path *path);
int create_free_space_inode(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct inode *inode);
-int load_free_space_cache(struct btrfs_block_group_cache *block_group);
+int load_free_space_cache(struct btrfs_block_group *block_group);
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
struct inode *lookup_free_ino_inode(struct btrfs_root *root,
struct btrfs_path *path);
@@ -81,42 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path,
struct inode *inode);
-void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
+void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group);
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size);
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+int btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+int btrfs_remove_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
- *block_group);
-u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
+void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group);
+u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 offset, u64 bytes, u64 empty_size,
u64 *max_extent_size);
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
-void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+void btrfs_dump_free_space(struct btrfs_block_group *block_group,
u64 bytes);
-int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
+int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
-u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
+u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start, u64 *max_extent_size);
int btrfs_return_cluster_to_free_space(
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster);
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+int btrfs_trim_block_group(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen);
/* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
+int test_add_free_space_entry(struct btrfs_block_group *cache,
u64 offset, u64 bytes, bool bitmap);
-int test_check_exists(struct btrfs_block_group_cache *cache,
- u64 offset, u64 bytes);
+int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes);
#endif
#endif
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 48a03f5240f5..258cb3fae17a 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -13,10 +13,10 @@
#include "block-group.h"
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
-void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
+void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
{
u32 bitmap_range;
size_t bitmap_size;
@@ -27,8 +27,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
* exceeds that required for using bitmaps.
*/
bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
- num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
- bitmap_range);
+ num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range);
bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
total_bitmap_size = num_bitmaps * bitmap_size;
cache->bitmap_high_thresh = div_u64(total_bitmap_size,
@@ -45,7 +44,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
}
static int add_new_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_root *root = trans->fs_info->free_space_root;
@@ -54,9 +53,9 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int ret;
- key.objectid = block_group->key.objectid;
+ key.objectid = block_group->start;
key.type = BTRFS_FREE_SPACE_INFO_KEY;
- key.offset = block_group->key.offset;
+ key.offset = block_group->length;
ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
if (ret)
@@ -78,7 +77,7 @@ out:
EXPORT_FOR_TESTS
struct btrfs_free_space_info *search_free_space_info(
struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -86,16 +85,16 @@ struct btrfs_free_space_info *search_free_space_info(
struct btrfs_key key;
int ret;
- key.objectid = block_group->key.objectid;
+ key.objectid = block_group->start;
key.type = BTRFS_FREE_SPACE_INFO_KEY;
- key.offset = block_group->key.offset;
+ key.offset = block_group->length;
ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret != 0) {
btrfs_warn(fs_info, "missing free space info for %llu",
- block_group->key.objectid);
+ block_group->start);
ASSERT(0);
return ERR_PTR(-ENOENT);
}
@@ -180,7 +179,7 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
EXPORT_FOR_TESTS
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -197,7 +196,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
int done = 0, nr;
int ret;
- bitmap_size = free_space_bitmap_size(block_group->key.offset,
+ bitmap_size = free_space_bitmap_size(block_group->length,
fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
@@ -205,8 +204,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
goto out;
}
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
key.objectid = end - 1;
key.type = (u8)-1;
@@ -224,8 +223,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
- ASSERT(found_key.objectid == block_group->key.objectid);
- ASSERT(found_key.offset == block_group->key.offset);
+ ASSERT(found_key.objectid == block_group->start);
+ ASSERT(found_key.offset == block_group->length);
done = 1;
break;
} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
@@ -271,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -320,7 +319,7 @@ out:
EXPORT_FOR_TESTS
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -336,7 +335,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
int done = 0, nr;
int ret;
- bitmap_size = free_space_bitmap_size(block_group->key.offset,
+ bitmap_size = free_space_bitmap_size(block_group->length,
fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
@@ -344,8 +343,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
goto out;
}
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
key.objectid = end - 1;
key.type = (u8)-1;
@@ -363,8 +362,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
- ASSERT(found_key.objectid == block_group->key.objectid);
- ASSERT(found_key.offset == block_group->key.offset);
+ ASSERT(found_key.objectid == block_group->start);
+ ASSERT(found_key.offset == block_group->length);
done = 1;
break;
} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
@@ -413,7 +412,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize);
+ nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize);
start_bit = find_next_bit_le(bitmap, nrbits, 0);
while (start_bit < nrbits) {
@@ -437,7 +436,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -453,7 +452,7 @@ out:
}
static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
int new_extents)
{
@@ -491,7 +490,7 @@ out:
}
EXPORT_FOR_TESTS
-int free_space_test_bit(struct btrfs_block_group_cache *block_group,
+int free_space_test_bit(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 offset)
{
struct extent_buffer *leaf;
@@ -513,7 +512,7 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group,
return !!extent_buffer_test_bit(leaf, ptr, i);
}
-static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
+static void free_space_set_bits(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 *start, u64 *size,
int bit)
{
@@ -581,7 +580,7 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
* the bitmap.
*/
static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
u64 start, u64 size, int remove)
{
@@ -597,7 +596,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
* Read the bit for the block immediately before the extent of space if
* that block is within the block group.
*/
- if (start > block_group->key.objectid) {
+ if (start > block_group->start) {
u64 prev_block = start - block_group->fs_info->sectorsize;
key.objectid = prev_block;
@@ -649,7 +648,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
* Read the bit for the block immediately after the extent of space if
* that block is within the block group.
*/
- if (end < block_group->key.objectid + block_group->key.offset) {
+ if (end < block_group->start + block_group->length) {
/* The next block may be in the next bitmap. */
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (end >= key.objectid + key.offset) {
@@ -694,7 +693,7 @@ out:
}
static int remove_free_space_extent(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
u64 start, u64 size)
{
@@ -781,7 +780,7 @@ out:
EXPORT_FOR_TESTS
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size)
{
struct btrfs_free_space_info *info;
@@ -812,7 +811,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_path *path;
int ret;
@@ -846,7 +845,7 @@ out:
}
static int add_free_space_extent(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path,
u64 start, u64 size)
{
@@ -880,7 +879,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
new_key.offset = size;
/* Search for a neighbor on the left. */
- if (start == block_group->key.objectid)
+ if (start == block_group->start)
goto right;
key.objectid = start - 1;
key.type = (u8)-1;
@@ -900,8 +899,8 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
found_start = key.objectid;
found_end = key.objectid + key.offset;
- ASSERT(found_start >= block_group->key.objectid &&
- found_end > block_group->key.objectid);
+ ASSERT(found_start >= block_group->start &&
+ found_end > block_group->start);
ASSERT(found_start < start && found_end <= start);
/*
@@ -920,7 +919,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans,
right:
/* Search for a neighbor on the right. */
- if (end == block_group->key.objectid + block_group->key.offset)
+ if (end == block_group->start + block_group->length)
goto insert;
key.objectid = end;
key.type = (u8)-1;
@@ -940,8 +939,8 @@ right:
found_start = key.objectid;
found_end = key.objectid + key.offset;
- ASSERT(found_start >= block_group->key.objectid &&
- found_end > block_group->key.objectid);
+ ASSERT(found_start >= block_group->start &&
+ found_end > block_group->start);
ASSERT((found_start < start && found_end <= start) ||
(found_start >= end && found_end > end));
@@ -974,7 +973,7 @@ out:
EXPORT_FOR_TESTS
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size)
{
struct btrfs_free_space_info *info;
@@ -1005,7 +1004,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_path *path;
int ret;
@@ -1043,7 +1042,7 @@ out:
* through the normal add/remove hooks.
*/
static int populate_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_root *extent_root = trans->fs_info->extent_root;
struct btrfs_path *path, *path2;
@@ -1075,7 +1074,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
* BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
* contained in.
*/
- key.objectid = block_group->key.objectid;
+ key.objectid = block_group->start;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
@@ -1084,8 +1083,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
goto out_locked;
ASSERT(ret == 0);
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
while (1) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -1109,7 +1108,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
else
start += key.offset;
} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- if (key.objectid != block_group->key.objectid)
+ if (key.objectid != block_group->start)
break;
}
@@ -1140,7 +1139,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *free_space_root;
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct rb_node *node;
int ret;
@@ -1159,7 +1158,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
node = rb_first(&fs_info->block_group_cache_tree);
while (node) {
- block_group = rb_entry(node, struct btrfs_block_group_cache,
+ block_group = rb_entry(node, struct btrfs_block_group,
cache_node);
ret = populate_free_space_tree(trans, block_group);
if (ret)
@@ -1265,7 +1264,7 @@ abort:
}
static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
int ret;
@@ -1277,12 +1276,12 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
return ret;
return __add_to_free_space_tree(trans, block_group, path,
- block_group->key.objectid,
- block_group->key.offset);
+ block_group->start,
+ block_group->length);
}
int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_path *path = NULL;
@@ -1312,7 +1311,7 @@ out:
}
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
struct btrfs_root *root = trans->fs_info->free_space_root;
struct btrfs_path *path;
@@ -1336,8 +1335,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
goto out;
}
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
+ start = block_group->start;
+ end = block_group->start + block_group->length;
key.objectid = end - 1;
key.type = (u8)-1;
@@ -1355,8 +1354,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
- ASSERT(found_key.objectid == block_group->key.objectid);
- ASSERT(found_key.offset == block_group->key.offset);
+ ASSERT(found_key.objectid == block_group->start);
+ ASSERT(found_key.offset == block_group->length);
done = 1;
nr++;
path->slots[0]--;
@@ -1391,7 +1390,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
struct btrfs_path *path,
u32 expected_extent_count)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
@@ -1407,7 +1406,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
fs_info = block_group->fs_info;
root = fs_info->free_space_root;
- end = block_group->key.objectid + block_group->key.offset;
+ end = block_group->start + block_group->length;
while (1) {
ret = btrfs_next_item(root, path);
@@ -1454,7 +1453,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -1472,7 +1471,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
struct btrfs_path *path,
u32 expected_extent_count)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
struct btrfs_key key;
@@ -1485,7 +1484,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
fs_info = block_group->fs_info;
root = fs_info->free_space_root;
- end = block_group->key.objectid + block_group->key.offset;
+ end = block_group->start + block_group->length;
while (1) {
ret = btrfs_next_item(root, path);
@@ -1516,7 +1515,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
if (extent_count != expected_extent_count) {
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
- block_group->key.objectid, extent_count,
+ block_group->start, extent_count,
expected_extent_count);
ASSERT(0);
ret = -EIO;
@@ -1532,7 +1531,7 @@ out:
int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
struct btrfs_free_space_info *info;
struct btrfs_path *path;
u32 extent_count, flags;
diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h
index 360d50e1cdea..dc2463e4cfe3 100644
--- a/fs/btrfs/free-space-tree.h
+++ b/fs/btrfs/free-space-tree.h
@@ -16,14 +16,14 @@ struct btrfs_caching_control;
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
-void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group);
+void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
int add_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group);
+ struct btrfs_block_group *block_group);
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group);
+ struct btrfs_block_group *block_group);
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size);
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
@@ -32,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_free_space_info *
search_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow);
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size);
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size);
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path);
-int free_space_test_bit(struct btrfs_block_group_cache *block_group,
+int free_space_test_bit(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 offset);
#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c6dc4dd16cf7..56032c518b26 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -368,6 +368,7 @@ struct async_chunk {
u64 end;
unsigned int write_flags;
struct list_head extents;
+ struct cgroup_subsys_state *blkcg_css;
struct btrfs_work work;
atomic_t *pending;
};
@@ -712,10 +713,12 @@ cleanup_and_bail_uncompressed:
* to our extent and set things up for the async work queue to run
* cow_file_range to do the normal delalloc dance.
*/
- if (page_offset(async_chunk->locked_page) >= start &&
- page_offset(async_chunk->locked_page) <= end)
+ if (async_chunk->locked_page &&
+ (page_offset(async_chunk->locked_page) >= start &&
+ page_offset(async_chunk->locked_page)) <= end) {
__set_page_dirty_nobuffers(async_chunk->locked_page);
/* unlocked later on in the async handlers */
+ }
if (redirty)
extent_range_redirty_for_io(inode, start, end);
@@ -795,7 +798,7 @@ retry:
async_extent->start +
async_extent->ram_size - 1,
WB_SYNC_ALL);
- else if (ret)
+ else if (ret && async_chunk->locked_page)
unlock_page(async_chunk->locked_page);
kfree(async_extent);
cond_resched();
@@ -878,7 +881,8 @@ retry:
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages,
- async_chunk->write_flags)) {
+ async_chunk->write_flags,
+ async_chunk->blkcg_css)) {
struct page *p = async_extent->pages[0];
const u64 start = async_extent->start;
const u64 end = start + async_extent->ram_size - 1;
@@ -1196,6 +1200,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
async_chunk = container_of(work, struct async_chunk, work);
if (async_chunk->inode)
btrfs_add_delayed_iput(async_chunk->inode);
+ if (async_chunk->blkcg_css)
+ css_put(async_chunk->blkcg_css);
/*
* Since the pointer to 'pending' is at the beginning of the array of
* async_chunk's, freeing it ensures the whole array has been freed.
@@ -1204,12 +1210,14 @@ static noinline void async_cow_free(struct btrfs_work *work)
kvfree(async_chunk->pending);
}
-static int cow_file_range_async(struct inode *inode, struct page *locked_page,
+static int cow_file_range_async(struct inode *inode,
+ struct writeback_control *wbc,
+ struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- unsigned int write_flags)
+ unsigned long *nr_written)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
struct async_cow *ctx;
struct async_chunk *async_chunk;
unsigned long nr_pages;
@@ -1218,6 +1226,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
int i;
bool should_compress;
unsigned nofs_flag;
+ const unsigned int write_flags = wbc_to_write_flags(wbc);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -1264,14 +1273,45 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_chunk[i].inode = inode;
async_chunk[i].start = start;
async_chunk[i].end = cur_end;
- async_chunk[i].locked_page = locked_page;
async_chunk[i].write_flags = write_flags;
INIT_LIST_HEAD(&async_chunk[i].extents);
- btrfs_init_work(&async_chunk[i].work,
- btrfs_delalloc_helper,
- async_cow_start, async_cow_submit,
- async_cow_free);
+ /*
+ * The locked_page comes all the way from writepage and its
+ * the original page we were actually given. As we spread
+ * this large delalloc region across multiple async_chunk
+ * structs, only the first struct needs a pointer to locked_page
+ *
+ * This way we don't need racey decisions about who is supposed
+ * to unlock it.
+ */
+ if (locked_page) {
+ /*
+ * Depending on the compressibility, the pages might or
+ * might not go through async. We want all of them to
+ * be accounted against wbc once. Let's do it here
+ * before the paths diverge. wbc accounting is used
+ * only for foreign writeback detection and doesn't
+ * need full accuracy. Just account the whole thing
+ * against the first page.
+ */
+ wbc_account_cgroup_owner(wbc, locked_page,
+ cur_end - start);
+ async_chunk[i].locked_page = locked_page;
+ locked_page = NULL;
+ } else {
+ async_chunk[i].locked_page = NULL;
+ }
+
+ if (blkcg_css != blkcg_root_css) {
+ css_get(blkcg_css);
+ async_chunk[i].blkcg_css = blkcg_css;
+ } else {
+ async_chunk[i].blkcg_css = NULL;
+ }
+
+ btrfs_init_work(&async_chunk[i].work, async_cow_start,
+ async_cow_submit, async_cow_free);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
@@ -1697,7 +1737,6 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
{
int ret;
int force_cow = need_force_cow(inode, start, end);
- unsigned int write_flags = wbc_to_write_flags(wbc);
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
@@ -1712,9 +1751,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
- ret = cow_file_range_async(inode, locked_page, start, end,
- page_started, nr_written,
- write_flags);
+ ret = cow_file_range_async(inode, wbc, locked_page, start, end,
+ page_started, nr_written);
}
if (ret)
btrfs_cleanup_ordered_extents(inode, locked_page, start,
@@ -2110,7 +2148,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
}
mapit:
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
out:
if (ret) {
@@ -2214,12 +2252,16 @@ again:
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
ClearPageChecked(page);
- goto out;
+ goto out_reserved;
}
ClearPageChecked(page);
set_page_dirty(page);
+out_reserved:
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+ if (ret)
+ btrfs_delalloc_release_space(inode, data_reserved, page_start,
+ PAGE_SIZE, true);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state);
@@ -2260,8 +2302,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
SetPageChecked(page);
get_page(page);
- btrfs_init_work(&fixup->work, btrfs_fixup_helper,
- btrfs_writepage_fixup_worker, NULL, NULL);
+ btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
fixup->page = page;
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
return -EBUSY;
@@ -2675,7 +2716,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
return 0;
@@ -2999,7 +3040,7 @@ out_kfree:
static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = btrfs_lookup_block_group(fs_info, start);
ASSERT(cache);
@@ -3027,7 +3068,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
int compress_type = 0;
int ret = 0;
u64 logical_len = ordered_extent->len;
- bool nolock;
+ bool freespace_inode;
bool truncated = false;
bool range_locked = false;
bool clear_new_delalloc_bytes = false;
@@ -3038,7 +3079,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
!test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
clear_new_delalloc_bytes = true;
- nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
+ freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
@@ -3069,8 +3110,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
ordered_extent->len);
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
+ if (freespace_inode)
+ trans = btrfs_join_transaction_spacecache(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3104,8 +3145,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
EXTENT_DEFRAG, 0, 0, &cached_state);
}
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
+ if (freespace_inode)
+ trans = btrfs_join_transaction_spacecache(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3254,7 +3295,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered_extent = NULL;
struct btrfs_workqueue *wq;
- btrfs_work_func_t func;
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
@@ -3263,16 +3303,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
end - start + 1, uptodate))
return;
- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (btrfs_is_free_space_inode(BTRFS_I(inode)))
wq = fs_info->endio_freespace_worker;
- func = btrfs_freespace_write_helper;
- } else {
+ else
wq = fs_info->endio_write_workers;
- func = btrfs_endio_write_helper;
- }
- btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
- NULL);
+ btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
btrfs_queue_work(wq, &ordered_extent->work);
}
@@ -3531,7 +3567,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &found_key, root);
ret = PTR_ERR_OR_ZERO(inode);
if (ret && ret != -ENOENT)
goto out;
@@ -5153,7 +5189,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
hole_em->ram_bytes = hole_size;
- hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = fs_info->generation;
@@ -5750,12 +5785,14 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
return inode;
}
-/* Get an inode object given its location and corresponding root.
- * Returns in *is_new if the inode was read from disk
+/*
+ * Get an inode object given its location and corresponding root.
+ * Path can be preallocated to prevent recursing back to iget through
+ * allocator. NULL is also valid but may require an additional allocation
+ * later.
*/
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new,
- struct btrfs_path *path)
+ struct btrfs_root *root, struct btrfs_path *path)
{
struct inode *inode;
@@ -5770,8 +5807,6 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
- if (new)
- *new = 1;
} else {
iget_failed(inode);
/*
@@ -5789,9 +5824,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
}
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new)
+ struct btrfs_root *root)
{
- return btrfs_iget_path(s, location, root, new, NULL);
+ return btrfs_iget_path(s, location, root, NULL);
}
static struct inode *new_simple_dir(struct super_block *s,
@@ -5857,7 +5892,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
- inode = btrfs_iget(dir->i_sb, &location, root, NULL);
+ inode = btrfs_iget(dir->i_sb, &location, root);
if (IS_ERR(inode))
return inode;
@@ -5882,7 +5917,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
- inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
+ inode = btrfs_iget(dir->i_sb, &location, sub_root);
}
srcu_read_unlock(&fs_info->subvol_srcu, index);
@@ -6931,8 +6966,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
- if (em)
- em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
@@ -6948,7 +6981,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
err = -ENOMEM;
goto out;
}
- em->bdev = fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
@@ -7207,7 +7239,6 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
err = -ENOMEM;
goto out;
}
- em->bdev = NULL;
ASSERT(hole_em);
/*
@@ -7567,7 +7598,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
{
struct extent_map_tree *em_tree;
struct extent_map *em;
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
ASSERT(type == BTRFS_ORDERED_PREALLOC ||
@@ -7585,7 +7615,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
em->len = len;
em->block_len = block_len;
em->block_start = block_start;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
em->orig_block_len = orig_block_len;
em->ram_bytes = ram_bytes;
em->generation = -1;
@@ -7624,6 +7653,8 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
struct inode *inode,
u64 start, u64 len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
if (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
return -ENOENT;
@@ -7633,7 +7664,7 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
- bh_result->b_bdev = em->bdev;
+ bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
set_buffer_mapped(bh_result);
return 0;
@@ -7716,7 +7747,7 @@ skip_cow:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
- bh_result->b_bdev = em->bdev;
+ bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
set_buffer_mapped(bh_result);
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
@@ -7858,7 +7889,7 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
if (ret)
return ret;
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
return ret;
}
@@ -8211,18 +8242,14 @@ static void __endio_write_update_ordered(struct inode *inode,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered = NULL;
struct btrfs_workqueue *wq;
- btrfs_work_func_t func;
u64 ordered_offset = offset;
u64 ordered_bytes = bytes;
u64 last_offset;
- if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (btrfs_is_free_space_inode(BTRFS_I(inode)))
wq = fs_info->endio_freespace_worker;
- func = btrfs_freespace_write_helper;
- } else {
+ else
wq = fs_info->endio_write_workers;
- func = btrfs_endio_write_helper;
- }
while (ordered_offset < offset + bytes) {
last_offset = ordered_offset;
@@ -8230,9 +8257,8 @@ static void __endio_write_update_ordered(struct inode *inode,
&ordered_offset,
ordered_bytes,
uptodate)) {
- btrfs_init_work(&ordered->work, func,
- finish_ordered_fn,
- NULL, NULL);
+ btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
+ NULL);
btrfs_queue_work(wq, &ordered->work);
}
/*
@@ -8389,7 +8415,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
goto err;
}
map:
- ret = btrfs_map_bio(fs_info, bio, 0, 0);
+ ret = btrfs_map_bio(fs_info, bio, 0);
err:
return ret;
}
@@ -9321,7 +9347,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->io_failure_tree.track_uptodate = true;
atomic_set(&ei->sync_writers, 0);
mutex_init(&ei->log_mutex);
- mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
@@ -9550,6 +9575,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
goto out_notrans;
}
+ if (dest != root)
+ btrfs_record_root_in_trans(trans, dest);
+
/*
* We need to find a free sequence number both in the source and
* in the destination directory for the exchange.
@@ -9744,6 +9772,18 @@ out_fail:
commit_transaction = true;
}
if (commit_transaction) {
+ /*
+ * We may have set commit_transaction when logging the new name
+ * in the destination root, in which case we left the source
+ * root context in the list of log contextes. So make sure we
+ * remove it to avoid invalid memory accesses, since the context
+ * was allocated in our stack frame.
+ */
+ if (sync_log_root) {
+ mutex_lock(&root->log_mutex);
+ list_del_init(&ctx_root.list);
+ mutex_unlock(&root->log_mutex);
+ }
ret = btrfs_commit_transaction(trans);
} else {
int ret2;
@@ -9757,6 +9797,9 @@ out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
+ ASSERT(list_empty(&ctx_root.list));
+ ASSERT(list_empty(&ctx_dest.list));
+
return ret;
}
@@ -10101,8 +10144,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
- btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
- btrfs_run_delalloc_work, NULL, NULL);
+ btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
return work;
}
@@ -10435,7 +10477,6 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
em->ram_bytes = ins.offset;
- em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
em->generation = trans->transid;
@@ -10791,7 +10832,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
start = 0;
while (start < isize) {
u64 logical_block_start, physical_block_start;
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
u64 len = isize - start;
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 23272d9154f3..a1ee0b775e65 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -479,10 +479,9 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
return put_user(inode->i_generation, arg);
}
-static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
+static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_device *device;
struct request_queue *q;
struct fstrim_range range;
@@ -541,7 +540,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return 0;
}
-int btrfs_is_empty_uuid(u8 *uuid)
+int __pure btrfs_is_empty_uuid(u8 *uuid)
{
int i;
@@ -1409,7 +1408,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
return -EINVAL;
if (do_compress) {
- if (range->compress_type > BTRFS_COMPRESS_TYPES)
+ if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
return -EINVAL;
if (range->compress_type)
compress_type = range->compress_type;
@@ -2462,7 +2461,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
goto out;
}
- temp_inode = btrfs_iget(sb, &key2, root, NULL);
+ temp_inode = btrfs_iget(sb, &key2, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out;
@@ -4032,16 +4031,15 @@ out:
static void get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space)
{
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
space->total_bytes = 0;
space->used_bytes = 0;
space->flags = 0;
list_for_each_entry(block_group, groups_list, list) {
space->flags = block_group->flags;
- space->total_bytes += block_group->key.offset;
- space->used_bytes +=
- btrfs_block_group_used(&block_group->item);
+ space->total_bytes += block_group->length;
+ space->used_bytes += block_group->used;
}
}
@@ -4952,10 +4950,9 @@ drop_write:
return ret;
}
-static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
+static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_rescan_args *qsa;
int ret = 0;
@@ -4978,11 +4975,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
return ret;
}
-static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
+static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -5154,10 +5149,9 @@ out:
return ret;
}
-static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
+static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
size_t len;
int ret;
char label[BTRFS_LABEL_SIZE];
@@ -5241,10 +5235,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg)
return 0;
}
-static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
+static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_super_block *super_block = fs_info->super_copy;
struct btrfs_ioctl_feature_flags features;
@@ -5445,11 +5438,11 @@ long btrfs_ioctl(struct file *file, unsigned int
case FS_IOC_GETVERSION:
return btrfs_ioctl_getversion(file, argp);
case FS_IOC_GETFSLABEL:
- return btrfs_ioctl_get_fslabel(file, argp);
+ return btrfs_ioctl_get_fslabel(fs_info, argp);
case FS_IOC_SETFSLABEL:
return btrfs_ioctl_set_fslabel(file, argp);
case FITRIM:
- return btrfs_ioctl_fitrim(file, argp);
+ return btrfs_ioctl_fitrim(fs_info, argp);
case BTRFS_IOC_SNAP_CREATE:
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SNAP_CREATE_V2:
@@ -5554,15 +5547,15 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_QUOTA_RESCAN:
return btrfs_ioctl_quota_rescan(file, argp);
case BTRFS_IOC_QUOTA_RESCAN_STATUS:
- return btrfs_ioctl_quota_rescan_status(file, argp);
+ return btrfs_ioctl_quota_rescan_status(fs_info, argp);
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
- return btrfs_ioctl_quota_rescan_wait(file, argp);
+ return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(fs_info, argp);
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
return btrfs_ioctl_get_supported_features(argp);
case BTRFS_IOC_GET_FEATURES:
- return btrfs_ioctl_get_features(file, argp);
+ return btrfs_ioctl_get_features(fs_info, argp);
case BTRFS_IOC_SET_FEATURES:
return btrfs_ioctl_set_features(file, argp);
case FS_IOC_FSGETXATTR:
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 7f9a578a1a20..571c4826c428 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -13,65 +13,164 @@
#include "extent_io.h"
#include "locking.h"
+/*
+ * Extent buffer locking
+ * =====================
+ *
+ * The locks use a custom scheme that allows to do more operations than are
+ * available fromt current locking primitives. The building blocks are still
+ * rwlock and wait queues.
+ *
+ * Required semantics:
+ *
+ * - reader/writer exclusion
+ * - writer/writer exclusion
+ * - reader/reader sharing
+ * - spinning lock semantics
+ * - blocking lock semantics
+ * - try-lock semantics for readers and writers
+ * - one level nesting, allowing read lock to be taken by the same thread that
+ * already has write lock
+ *
+ * The extent buffer locks (also called tree locks) manage access to eb data
+ * related to the storage in the b-tree (keys, items, but not the individual
+ * members of eb).
+ * We want concurrency of many readers and safe updates. The underlying locking
+ * is done by read-write spinlock and the blocking part is implemented using
+ * counters and wait queues.
+ *
+ * spinning semantics - the low-level rwlock is held so all other threads that
+ * want to take it are spinning on it.
+ *
+ * blocking semantics - the low-level rwlock is not held but the counter
+ * denotes how many times the blocking lock was held;
+ * sleeping is possible
+ *
+ * Write lock always allows only one thread to access the data.
+ *
+ *
+ * Debugging
+ * ---------
+ *
+ * There are additional state counters that are asserted in various contexts,
+ * removed from non-debug build to reduce extent_buffer size and for
+ * performance reasons.
+ *
+ *
+ * Lock nesting
+ * ------------
+ *
+ * A write operation on a tree might indirectly start a look up on the same
+ * tree. This can happen when btrfs_cow_block locks the tree and needs to
+ * lookup free extents.
+ *
+ * btrfs_cow_block
+ * ..
+ * alloc_tree_block_no_bg_flush
+ * btrfs_alloc_tree_block
+ * btrfs_reserve_extent
+ * ..
+ * load_free_space_cache
+ * ..
+ * btrfs_lookup_file_extent
+ * btrfs_search_slot
+ *
+ *
+ * Locking pattern - spinning
+ * --------------------------
+ *
+ * The simple locking scenario, the +--+ denotes the spinning section.
+ *
+ * +- btrfs_tree_lock
+ * | - extent_buffer::rwlock is held
+ * | - no heavy operations should happen, eg. IO, memory allocations, large
+ * | structure traversals
+ * +- btrfs_tree_unock
+*
+*
+ * Locking pattern - blocking
+ * --------------------------
+ *
+ * The blocking write uses the following scheme. The +--+ denotes the spinning
+ * section.
+ *
+ * +- btrfs_tree_lock
+ * |
+ * +- btrfs_set_lock_blocking_write
+ *
+ * - allowed: IO, memory allocations, etc.
+ *
+ * -- btrfs_tree_unlock - note, no explicit unblocking necessary
+ *
+ *
+ * Blocking read is similar.
+ *
+ * +- btrfs_tree_read_lock
+ * |
+ * +- btrfs_set_lock_blocking_read
+ *
+ * - heavy operations allowed
+ *
+ * +- btrfs_tree_read_unlock_blocking
+ * |
+ * +- btrfs_tree_read_unlock
+ *
+ */
+
#ifdef CONFIG_BTRFS_DEBUG
-static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
{
WARN_ON(eb->spinning_writers);
eb->spinning_writers++;
}
-static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
{
WARN_ON(eb->spinning_writers != 1);
eb->spinning_writers--;
}
-static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
+static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
{
WARN_ON(eb->spinning_writers);
}
-static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
{
atomic_inc(&eb->spinning_readers);
}
-static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
+static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
{
WARN_ON(atomic_read(&eb->spinning_readers) == 0);
atomic_dec(&eb->spinning_readers);
}
-static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
{
atomic_inc(&eb->read_locks);
}
-static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
{
atomic_dec(&eb->read_locks);
}
-static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
{
BUG_ON(!atomic_read(&eb->read_locks));
}
-static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
{
eb->write_locks++;
}
-static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
+static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
{
eb->write_locks--;
}
-void btrfs_assert_tree_locked(struct extent_buffer *eb)
-{
- BUG_ON(!eb->write_locks);
-}
-
#else
static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
@@ -81,11 +180,19 @@ static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
-void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
#endif
+/*
+ * Mark already held read lock as blocking. Can be nested in write lock by the
+ * same thread.
+ *
+ * Use when there are potentially long operations ahead so other thread waiting
+ * on the lock will not actively spin but sleep instead.
+ *
+ * The rwlock is released and blocking reader counter is increased.
+ */
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
{
trace_btrfs_set_lock_blocking_read(eb);
@@ -102,6 +209,14 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
read_unlock(&eb->lock);
}
+/*
+ * Mark already held write lock as blocking.
+ *
+ * Use when there are potentially long operations ahead so other threads
+ * waiting on the lock will not actively spin but sleep instead.
+ *
+ * The rwlock is released and blocking writers is set.
+ */
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
{
trace_btrfs_set_lock_blocking_write(eb);
@@ -115,14 +230,19 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
if (eb->blocking_writers == 0) {
btrfs_assert_spinning_writers_put(eb);
btrfs_assert_tree_locked(eb);
- eb->blocking_writers++;
+ WRITE_ONCE(eb->blocking_writers, 1);
write_unlock(&eb->lock);
}
}
/*
- * take a spinning read lock. This will wait for any blocking
- * writers
+ * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
+ * Can be nested in write lock by the same thread.
+ *
+ * Use when the locked section does only lightweight actions and busy waiting
+ * would be cheaper than making other threads do the wait/wake loop.
+ *
+ * The rwlock is held upon exit.
*/
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
@@ -134,23 +254,24 @@ again:
read_lock(&eb->lock);
BUG_ON(eb->blocking_writers == 0 &&
current->pid == eb->lock_owner);
- if (eb->blocking_writers && current->pid == eb->lock_owner) {
- /*
- * This extent is already write-locked by our thread. We allow
- * an additional read lock to be added because it's for the same
- * thread. btrfs_find_all_roots() depends on this as it may be
- * called on a partly (write-)locked tree.
- */
- BUG_ON(eb->lock_nested);
- eb->lock_nested = true;
- read_unlock(&eb->lock);
- trace_btrfs_tree_read_lock(eb, start_ns);
- return;
- }
if (eb->blocking_writers) {
+ if (current->pid == eb->lock_owner) {
+ /*
+ * This extent is already write-locked by our thread.
+ * We allow an additional read lock to be added because
+ * it's for the same thread. btrfs_find_all_roots()
+ * depends on this as it may be called on a partly
+ * (write-)locked tree.
+ */
+ BUG_ON(eb->lock_nested);
+ eb->lock_nested = true;
+ read_unlock(&eb->lock);
+ trace_btrfs_tree_read_lock(eb, start_ns);
+ return;
+ }
read_unlock(&eb->lock);
wait_event(eb->write_lock_wq,
- eb->blocking_writers == 0);
+ READ_ONCE(eb->blocking_writers) == 0);
goto again;
}
btrfs_assert_tree_read_locks_get(eb);
@@ -159,17 +280,19 @@ again:
}
/*
- * take a spinning read lock.
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers
+ * Lock extent buffer for read, optimistically expecting that there are no
+ * contending blocking writers. If there are, don't wait.
+ *
+ * Return 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
{
- if (eb->blocking_writers)
+ if (READ_ONCE(eb->blocking_writers))
return 0;
read_lock(&eb->lock);
- if (eb->blocking_writers) {
+ /* Refetch value after lock */
+ if (READ_ONCE(eb->blocking_writers)) {
read_unlock(&eb->lock);
return 0;
}
@@ -180,18 +303,20 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
}
/*
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers
+ * Try-lock for read. Don't block or wait for contending writers.
+ *
+ * Retrun 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
- if (eb->blocking_writers)
+ if (READ_ONCE(eb->blocking_writers))
return 0;
if (!read_trylock(&eb->lock))
return 0;
- if (eb->blocking_writers) {
+ /* Refetch value after lock */
+ if (READ_ONCE(eb->blocking_writers)) {
read_unlock(&eb->lock);
return 0;
}
@@ -202,16 +327,19 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
}
/*
- * returns 1 if we get the read lock and 0 if we don't
- * this won't wait for blocking writers or readers
+ * Try-lock for write. May block until the lock is uncontended, but does not
+ * wait until it is free.
+ *
+ * Retrun 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
- if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
+ if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
return 0;
write_lock(&eb->lock);
- if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
+ /* Refetch value after lock */
+ if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock);
return 0;
}
@@ -223,7 +351,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
}
/*
- * drop a spinning read lock
+ * Release read lock. Must be used only if the lock is in spinning mode. If
+ * the read lock is nested, must pair with read lock before the write unlock.
+ *
+ * The rwlock is not held upon exit.
*/
void btrfs_tree_read_unlock(struct extent_buffer *eb)
{
@@ -245,7 +376,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
}
/*
- * drop a blocking read lock
+ * Release read lock, previously set to blocking by a pairing call to
+ * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
+ * thread.
+ *
+ * State of rwlock is unchanged, last reader wakes waiting threads.
*/
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
{
@@ -269,8 +404,10 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
}
/*
- * take a spinning write lock. This will wait for both
- * blocking readers or writers
+ * Lock for write. Wait for all blocking and spinning readers and writers. This
+ * starts context where reader lock could be nested by the same thread.
+ *
+ * The rwlock is held for write upon exit.
*/
void btrfs_tree_lock(struct extent_buffer *eb)
{
@@ -282,9 +419,11 @@ void btrfs_tree_lock(struct extent_buffer *eb)
WARN_ON(eb->lock_owner == current->pid);
again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
- wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
+ wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
write_lock(&eb->lock);
- if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
+ /* Refetch value after lock */
+ if (atomic_read(&eb->blocking_readers) ||
+ READ_ONCE(eb->blocking_writers)) {
write_unlock(&eb->lock);
goto again;
}
@@ -295,10 +434,19 @@ again:
}
/*
- * drop a spinning or a blocking write lock.
+ * Release the write lock, either blocking or spinning (ie. there's no need
+ * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
+ * This also ends the context for nesting, the read lock must have been
+ * released already.
+ *
+ * Tasks blocked and waiting are woken, rwlock is not held upon exit.
*/
void btrfs_tree_unlock(struct extent_buffer *eb)
{
+ /*
+ * This is read both locked and unlocked but always by the same thread
+ * that already owns the lock so we don't need to use READ_ONCE
+ */
int blockers = eb->blocking_writers;
BUG_ON(blockers > 1);
@@ -310,7 +458,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
btrfs_assert_no_spinning_writers(eb);
- eb->blocking_writers--;
+ /* Unlocked write */
+ WRITE_ONCE(eb->blocking_writers, 0);
/*
* We need to order modifying blocking_writers above with
* actually waking up the sleepers to ensure they see the
@@ -322,3 +471,55 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
write_unlock(&eb->lock);
}
}
+
+/*
+ * Set all locked nodes in the path to blocking locks. This should be done
+ * before scheduling
+ */
+void btrfs_set_path_blocking(struct btrfs_path *p)
+{
+ int i;
+
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+ if (!p->nodes[i] || !p->locks[i])
+ continue;
+ /*
+ * If we currently have a spinning reader or writer lock this
+ * will bump the count of blocking holders and drop the
+ * spinlock.
+ */
+ if (p->locks[i] == BTRFS_READ_LOCK) {
+ btrfs_set_lock_blocking_read(p->nodes[i]);
+ p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+ } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
+ btrfs_set_lock_blocking_write(p->nodes[i]);
+ p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
+ }
+ }
+}
+
+/*
+ * This releases any locks held in the path starting at level and going all the
+ * way up to the root.
+ *
+ * btrfs_search_slot will keep the lock held on higher nodes in a few corner
+ * cases, such as COW of the block at slot zero in the node. This ignores
+ * those rules, and it should only be called when there are no more updates to
+ * be done higher up in the tree.
+ */
+void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
+{
+ int i;
+
+ if (path->keep_locks)
+ return;
+
+ for (i = level; i < BTRFS_MAX_LEVEL; i++) {
+ if (!path->nodes[i])
+ continue;
+ if (!path->locks[i])
+ continue;
+ btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
+ path->locks[i] = 0;
+ }
+}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index b775a4207ed9..21a285883e89 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_LOCKING_H
#define BTRFS_LOCKING_H
+#include "extent_io.h"
+
#define BTRFS_WRITE_LOCK 1
#define BTRFS_READ_LOCK 2
#define BTRFS_WRITE_LOCK_BLOCKING 3
@@ -19,11 +21,20 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
-void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
+#ifdef CONFIG_BTRFS_DEBUG
+static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
+ BUG_ON(!eb->write_locks);
+}
+#else
+static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
+#endif
+
+void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index acad4174f68d..aa9cd11f4b78 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -63,27 +63,7 @@ struct workspace {
static struct workspace_manager wsm;
-static void lzo_init_workspace_manager(void)
-{
- btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress);
-}
-
-static void lzo_cleanup_workspace_manager(void)
-{
- btrfs_cleanup_workspace_manager(&wsm);
-}
-
-static struct list_head *lzo_get_workspace(unsigned int level)
-{
- return btrfs_get_workspace(&wsm, level);
-}
-
-static void lzo_put_workspace(struct list_head *ws)
-{
- btrfs_put_workspace(&wsm, ws);
-}
-
-static void lzo_free_workspace(struct list_head *ws)
+void lzo_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -93,7 +73,7 @@ static void lzo_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *lzo_alloc_workspace(unsigned int level)
+struct list_head *lzo_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
@@ -131,13 +111,9 @@ static inline size_t read_compress_length(const char *buf)
return le32_to_cpu(dlen);
}
-static int lzo_compress_pages(struct list_head *ws,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
@@ -303,7 +279,7 @@ out:
return ret;
}
-static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2;
@@ -444,10 +420,9 @@ done:
return ret;
}
-static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen)
+int lzo_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
size_t in_len;
@@ -508,15 +483,7 @@ out:
}
const struct btrfs_compress_op btrfs_lzo_compress = {
- .init_workspace_manager = lzo_init_workspace_manager,
- .cleanup_workspace_manager = lzo_cleanup_workspace_manager,
- .get_workspace = lzo_get_workspace,
- .put_workspace = lzo_put_workspace,
- .alloc_workspace = lzo_alloc_workspace,
- .free_workspace = lzo_free_workspace,
- .compress_pages = lzo_compress_pages,
- .decompress_bio = lzo_decompress_bio,
- .decompress = lzo_decompress,
+ .workspace_manager = &wsm,
.max_level = 1,
.default_level = 1,
};
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 7d564924dfeb..72bab64ecf60 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -47,4 +47,15 @@ static inline u64 div_factor_fine(u64 num, int factor)
return div_u64(num, 100);
}
+/* Copy of is_power_of_two that is 64bit safe */
+static inline bool is_power_of_two_u64(u64 n)
+{
+ return n != 0 && (n & (n - 1)) == 0;
+}
+
+static inline bool has_single_bit_set(u64 n)
+{
+ return is_power_of_two_u64(n);
+}
+
#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 24b6c72b9a59..fb09bc2f8e4d 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work,
- btrfs_flush_delalloc_helper,
btrfs_run_ordered_extent_work, NULL, NULL);
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
@@ -573,12 +572,11 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
return count;
}
-u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
+void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len)
{
struct btrfs_root *root;
struct list_head splice;
- u64 total_done = 0;
u64 done;
INIT_LIST_HEAD(&splice);
@@ -598,7 +596,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
done = btrfs_wait_ordered_extents(root, nr,
range_start, range_len);
btrfs_put_fs_root(root);
- total_done += done;
spin_lock(&fs_info->ordered_root_lock);
if (nr != U64_MAX) {
@@ -608,8 +605,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
list_splice_tail(&splice, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
mutex_unlock(&fs_info->ordered_operations_mutex);
-
- return total_done;
}
/*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 5204171ea962..4eb0319a86d7 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -186,7 +186,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
u8 *sum, int len);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len);
-u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
+void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len);
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct btrfs_inode *inode, u64 start,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 9cb50577d982..873b6b694107 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -266,9 +266,9 @@ void btrfs_print_leaf(struct extent_buffer *l)
struct btrfs_block_group_item);
pr_info(
"\t\tblock group used %llu chunk_objectid %llu flags %llu\n",
- btrfs_disk_block_group_used(l, bi),
- btrfs_disk_block_group_chunk_objectid(l, bi),
- btrfs_disk_block_group_flags(l, bi));
+ btrfs_block_group_used(l, bi),
+ btrfs_block_group_chunk_objectid(l, bi),
+ btrfs_block_group_flags(l, bi));
break;
case BTRFS_CHUNK_ITEM_KEY:
print_chunk(l, btrfs_item_ptr(l, i,
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 1e664e0b59b8..deb59e7cfcac 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -416,11 +416,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- parent_inode = btrfs_iget(sb, &key, parent_root, NULL);
+ parent_inode = btrfs_iget(sb, &key, parent_root);
if (IS_ERR(parent_inode))
return PTR_ERR(parent_inode);
- child_inode = btrfs_iget(sb, &key, root, NULL);
+ child_inode = btrfs_iget(sb, &key, root);
if (IS_ERR(child_inode)) {
iput(parent_inode);
return PTR_ERR(child_inode);
@@ -437,8 +437,6 @@ void __init btrfs_props_init(void)
{
int i;
- hash_init(prop_handlers_ht);
-
for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) {
struct prop_handler *p = &prop_handlers[i];
u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name));
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 3ad151655eb8..93aeb2e539a4 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1811,7 +1811,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
/* For src_path */
- extent_buffer_get(src_eb);
+ atomic_inc(&src_eb->refs);
src_path->nodes[root_level] = src_eb;
src_path->slots[root_level] = dst_path->slots[root_level];
src_path->locks[root_level] = 0;
@@ -2067,7 +2067,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
goto out;
}
/* For dst_path */
- extent_buffer_get(dst_eb);
+ atomic_inc(&dst_eb->refs);
dst_path->nodes[level] = dst_eb;
dst_path->slots[level] = 0;
dst_path->locks[level] = 0;
@@ -2126,7 +2126,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
* walk back up the tree (adjusting slot pointers as we go)
* and restart the search process.
*/
- extent_buffer_get(root_eb); /* For path */
+ atomic_inc(&root_eb->refs); /* For path */
path->nodes[root_level] = root_eb;
path->slots[root_level] = 0;
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
@@ -3277,10 +3277,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
- memset(&fs_info->qgroup_rescan_work, 0,
- sizeof(fs_info->qgroup_rescan_work));
btrfs_init_work(&fs_info->qgroup_rescan_work,
- btrfs_qgroup_rescan_helper,
btrfs_qgroup_rescan_worker, NULL, NULL);
return 0;
}
@@ -3826,7 +3823,7 @@ out:
*/
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *subvol_root,
- struct btrfs_block_group_cache *bg,
+ struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
u64 last_snapshot)
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 46ba7bd2961c..236f12224d52 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -408,7 +408,7 @@ void btrfs_qgroup_init_swapped_blocks(
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *subvol_root,
- struct btrfs_block_group_cache *bg,
+ struct btrfs_block_group *bg,
struct extent_buffer *subvol_parent, int subvol_slot,
struct extent_buffer *reloc_parent, int reloc_slot,
u64 last_snapshot);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 57a2ac721985..a8e53c8e7b01 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work);
static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
+ btrfs_init_work(&rbio->work, work_func, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
@@ -671,8 +671,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
*/
static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
{
- int bucket = rbio_bucket(rbio);
- struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
+ struct btrfs_stripe_hash *h;
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *pending;
unsigned long flags;
@@ -680,64 +679,63 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
struct btrfs_raid_bio *cache_drop = NULL;
int ret = 0;
+ h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
+
spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) {
- if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
- spin_lock(&cur->bio_list_lock);
-
- /* can we steal this cached rbio's pages? */
- if (bio_list_empty(&cur->bio_list) &&
- list_empty(&cur->plug_list) &&
- test_bit(RBIO_CACHE_BIT, &cur->flags) &&
- !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
- list_del_init(&cur->hash_list);
- refcount_dec(&cur->refs);
-
- steal_rbio(cur, rbio);
- cache_drop = cur;
- spin_unlock(&cur->bio_list_lock);
+ if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
+ continue;
- goto lockit;
- }
+ spin_lock(&cur->bio_list_lock);
- /* can we merge into the lock owner? */
- if (rbio_can_merge(cur, rbio)) {
- merge_rbio(cur, rbio);
- spin_unlock(&cur->bio_list_lock);
- freeit = rbio;
- ret = 1;
- goto out;
- }
+ /* Can we steal this cached rbio's pages? */
+ if (bio_list_empty(&cur->bio_list) &&
+ list_empty(&cur->plug_list) &&
+ test_bit(RBIO_CACHE_BIT, &cur->flags) &&
+ !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
+ list_del_init(&cur->hash_list);
+ refcount_dec(&cur->refs);
+ steal_rbio(cur, rbio);
+ cache_drop = cur;
+ spin_unlock(&cur->bio_list_lock);
- /*
- * we couldn't merge with the running
- * rbio, see if we can merge with the
- * pending ones. We don't have to
- * check for rmw_locked because there
- * is no way they are inside finish_rmw
- * right now
- */
- list_for_each_entry(pending, &cur->plug_list,
- plug_list) {
- if (rbio_can_merge(pending, rbio)) {
- merge_rbio(pending, rbio);
- spin_unlock(&cur->bio_list_lock);
- freeit = rbio;
- ret = 1;
- goto out;
- }
- }
+ goto lockit;
+ }
- /* no merging, put us on the tail of the plug list,
- * our rbio will be started with the currently
- * running rbio unlocks
- */
- list_add_tail(&rbio->plug_list, &cur->plug_list);
+ /* Can we merge into the lock owner? */
+ if (rbio_can_merge(cur, rbio)) {
+ merge_rbio(cur, rbio);
spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
ret = 1;
goto out;
}
+
+
+ /*
+ * We couldn't merge with the running rbio, see if we can merge
+ * with the pending ones. We don't have to check for rmw_locked
+ * because there is no way they are inside finish_rmw right now
+ */
+ list_for_each_entry(pending, &cur->plug_list, plug_list) {
+ if (rbio_can_merge(pending, rbio)) {
+ merge_rbio(pending, rbio);
+ spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
+ ret = 1;
+ goto out;
+ }
+ }
+
+ /*
+ * No merging, put us on the tail of the plug list, our rbio
+ * will be started with the currently running rbio unlocks
+ */
+ list_add_tail(&rbio->plug_list, &cur->plug_list);
+ spin_unlock(&cur->bio_list_lock);
+ ret = 1;
+ goto out;
}
lockit:
refcount_inc(&rbio->refs);
@@ -1743,8 +1741,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (from_schedule) {
- btrfs_init_work(&plug->work, btrfs_rmw_helper,
- unplug_work, NULL, NULL);
+ btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
btrfs_queue_work(plug->info->rmw_workers,
&plug->work);
return;
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index ee6f60547a8d..243a2e44526e 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -227,7 +227,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
struct btrfs_fs_info *fs_info = dev->fs_info;
int ret;
struct reada_zone *zone;
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
u64 start;
u64 end;
int i;
@@ -248,8 +248,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
if (!cache)
return NULL;
- start = cache->key.objectid;
- end = start + cache->key.offset - 1;
+ start = cache->start;
+ end = start + cache->length - 1;
btrfs_put_block_group(cache);
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
@@ -752,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
static void reada_start_machine_worker(struct btrfs_work *work)
{
struct reada_machine_work *rmw;
- struct btrfs_fs_info *fs_info;
int old_ioprio;
rmw = container_of(work, struct reada_machine_work, work);
- fs_info = rmw->fs_info;
-
- kfree(rmw);
old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
task_nice_ioprio(current));
set_task_ioprio(current, BTRFS_IOPRIO_READA);
- __reada_start_machine(fs_info);
+ __reada_start_machine(rmw->fs_info);
set_task_ioprio(current, old_ioprio);
- atomic_dec(&fs_info->reada_works_cnt);
+ atomic_dec(&rmw->fs_info->reada_works_cnt);
+
+ kfree(rmw);
}
static void __reada_start_machine(struct btrfs_fs_info *fs_info)
@@ -821,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
/* FIXME we cannot handle this properly right now */
BUG();
}
- btrfs_init_work(&rmw->work, btrfs_readahead_helper,
- reada_start_machine_worker, NULL, NULL);
+ btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
rmw->fs_info = fs_info;
btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 5cd42b66818c..d897a8e5e430 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -147,7 +147,7 @@ struct file_extent_cluster {
struct reloc_control {
/* block group to relocate */
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
/* extent tree */
struct btrfs_root *extent_root;
/* inode for moving data */
@@ -1560,11 +1560,10 @@ again:
return NULL;
}
-static int in_block_group(u64 bytenr,
- struct btrfs_block_group_cache *block_group)
+static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group)
{
- if (bytenr >= block_group->key.objectid &&
- bytenr < block_group->key.objectid + block_group->key.offset)
+ if (bytenr >= block_group->start &&
+ bytenr < block_group->start + block_group->length)
return 1;
return 0;
}
@@ -2246,7 +2245,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_root_level(root_item);
- extent_buffer_get(reloc_root->node);
+ atomic_inc(&reloc_root->node->refs);
path->nodes[level] = reloc_root->node;
path->slots[level] = 0;
} else {
@@ -3195,7 +3194,6 @@ static noinline_for_stack
int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
u64 block_start)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret = 0;
@@ -3208,7 +3206,6 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
em->len = end + 1 - start;
em->block_len = em->len;
em->block_start = block_start;
- em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -3544,7 +3541,7 @@ static int block_use_full_backref(struct reloc_control *rc,
}
static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group,
+ struct btrfs_block_group *block_group,
struct inode *inode,
u64 ino)
{
@@ -3560,7 +3557,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode))
return -ENOENT;
@@ -3863,7 +3860,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
u64 start, end, last;
int ret;
- last = rc->block_group->key.objectid + rc->block_group->key.offset;
+ last = rc->block_group->start + rc->block_group->length;
while (1) {
cond_resched();
if (rc->search_start >= last) {
@@ -3980,7 +3977,7 @@ int prepare_to_relocate(struct reloc_control *rc)
return -ENOMEM;
memset(&rc->cluster, 0, sizeof(rc->cluster));
- rc->search_start = rc->block_group->key.objectid;
+ rc->search_start = rc->block_group->start;
rc->extents_found = 0;
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
@@ -4219,7 +4216,7 @@ out:
*/
static noinline_for_stack
struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *group)
+ struct btrfs_block_group *group)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
@@ -4246,9 +4243,9 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
BUG_ON(IS_ERR(inode));
- BTRFS_I(inode)->index_cnt = group->key.objectid;
+ BTRFS_I(inode)->index_cnt = group->start;
err = btrfs_orphan_add(trans, BTRFS_I(inode));
out:
@@ -4283,7 +4280,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
* Print the block group being relocated
*/
static void describe_relocation(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group)
+ struct btrfs_block_group *block_group)
{
char buf[128] = {'\0'};
@@ -4291,7 +4288,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
btrfs_info(fs_info,
"relocating block group %llu flags %s",
- block_group->key.objectid, buf);
+ block_group->start, buf);
}
/*
@@ -4299,7 +4296,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
*/
int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
{
- struct btrfs_block_group_cache *bg;
+ struct btrfs_block_group *bg;
struct btrfs_root *extent_root = fs_info->extent_root;
struct reloc_control *rc;
struct inode *inode;
@@ -4326,7 +4323,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
rc->extent_root = extent_root;
rc->block_group = bg;
- ret = btrfs_inc_block_group_ro(rc->block_group);
+ ret = btrfs_inc_block_group_ro(rc->block_group, true);
if (ret) {
err = ret;
goto out;
@@ -4364,8 +4361,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
btrfs_wait_ordered_roots(fs_info, U64_MAX,
- rc->block_group->key.objectid,
- rc->block_group->key.offset);
+ rc->block_group->start,
+ rc->block_group->length);
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
@@ -4405,7 +4402,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
- WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
+ WARN_ON(rc->block_group->used > 0);
out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
@@ -4688,7 +4685,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
node->new_bytenr != buf->start);
drop_node_buffer(node);
- extent_buffer_get(cow);
+ atomic_inc(&cow->refs);
node->eb = cow;
node->new_bytenr = cow->start;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f7d4e03f4c5d..21de630b0730 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -389,8 +389,7 @@ static struct full_stripe_lock *search_full_stripe_lock(
*
* Caller must ensure @cache is a RAID56 block group.
*/
-static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
- u64 bytenr)
+static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
{
u64 ret;
@@ -404,8 +403,8 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
* round_down() can only handle power of 2, while RAID56 full
* stripe length can be 64KiB * n, so we need to manually round down.
*/
- ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
- cache->full_stripe_len + cache->key.objectid;
+ ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
+ cache->full_stripe_len + cache->start;
return ret;
}
@@ -423,7 +422,7 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
bool *locked_ret)
{
- struct btrfs_block_group_cache *bg_cache;
+ struct btrfs_block_group *bg_cache;
struct btrfs_full_stripe_locks_tree *locks_root;
struct full_stripe_lock *existing;
u64 fstripe_start;
@@ -470,7 +469,7 @@ out:
static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
bool locked)
{
- struct btrfs_block_group_cache *bg_cache;
+ struct btrfs_block_group *bg_cache;
struct btrfs_full_stripe_locks_tree *locks_root;
struct full_stripe_lock *fstripe_lock;
u64 fstripe_start;
@@ -598,8 +597,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
sbio->index = i;
sbio->sctx = sctx;
sbio->page_count = 0;
- btrfs_init_work(&sbio->work, btrfs_scrub_helper,
- scrub_bio_end_io_worker, NULL, NULL);
+ btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
+ NULL);
if (i != SCRUB_BIOS_PER_SCTX - 1)
sctx->bios[i]->next_free = i + 1;
@@ -1720,8 +1719,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
sbio->status = bio->bi_status;
sbio->bio = bio;
- btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
- scrub_wr_bio_end_io_worker, NULL, NULL);
+ btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
}
@@ -2149,14 +2147,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
scrub_write_block_to_dev_replace(sblock);
}
- scrub_block_put(sblock);
-
if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
}
+ scrub_block_put(sblock);
scrub_pending_bio_dec(sctx);
}
@@ -2204,8 +2201,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
raid56_add_scrub_pages(rbio, spage->page, spage->logical);
}
- btrfs_init_work(&sblock->work, btrfs_scrub_helper,
- scrub_missing_raid56_worker, NULL, NULL);
+ btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
scrub_block_get(sblock);
scrub_pending_bio_inc(sctx);
raid56_submit_missing_rbio(rbio);
@@ -2743,8 +2739,8 @@ static void scrub_parity_bio_endio(struct bio *bio)
bio_put(bio);
- btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
- scrub_parity_bio_endio_worker, NULL, NULL);
+ btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
+ NULL);
btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
}
@@ -3420,7 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
u64 chunk_offset, u64 length,
u64 dev_offset,
- struct btrfs_block_group_cache *cache)
+ struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct extent_map_tree *map_tree = &fs_info->mapping_tree;
@@ -3484,7 +3480,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct extent_buffer *l;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
path = btrfs_alloc_path();
@@ -3563,46 +3559,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* -> btrfs_scrub_pause()
*/
scrub_pause_on(fs_info);
- ret = btrfs_inc_block_group_ro(cache);
- if (!ret && sctx->is_dev_replace) {
- /*
- * If we are doing a device replace wait for any tasks
- * that started delalloc right before we set the block
- * group to RO mode, as they might have just allocated
- * an extent from it or decided they could do a nocow
- * write. And if any such tasks did that, wait for their
- * ordered extents to complete and then commit the
- * current transaction, so that we can later see the new
- * extent items in the extent tree - the ordered extents
- * create delayed data references (for cow writes) when
- * they complete, which will be run and insert the
- * corresponding extent items into the extent tree when
- * we commit the transaction they used when running
- * inode.c:btrfs_finish_ordered_io(). We later use
- * the commit root of the extent tree to find extents
- * to copy from the srcdev into the tgtdev, and we don't
- * want to miss any new extents.
- */
- btrfs_wait_block_group_reservations(cache);
- btrfs_wait_nocow_writers(cache);
- ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
- cache->key.objectid,
- cache->key.offset);
- if (ret > 0) {
- struct btrfs_trans_handle *trans;
-
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- ret = PTR_ERR(trans);
- else
- ret = btrfs_commit_transaction(trans);
- if (ret) {
- scrub_pause_off(fs_info);
- btrfs_put_block_group(cache);
- break;
- }
- }
- }
+
+ /*
+ * Don't do chunk preallocation for scrub.
+ *
+ * This is especially important for SYSTEM bgs, or we can hit
+ * -EFBIG from btrfs_finish_chunk_alloc() like:
+ * 1. The only SYSTEM bg is marked RO.
+ * Since SYSTEM bg is small, that's pretty common.
+ * 2. New SYSTEM bg will be allocated
+ * Due to regular version will allocate new chunk.
+ * 3. New SYSTEM bg is empty and will get cleaned up
+ * Before cleanup really happens, it's marked RO again.
+ * 4. Empty SYSTEM bg get scrubbed
+ * We go back to 2.
+ *
+ * This can easily boost the amount of SYSTEM chunks if cleaner
+ * thread can't be triggered fast enough, and use up all space
+ * of btrfs_super_block::sys_chunk_array
+ */
+ ret = btrfs_inc_block_group_ro(cache, false);
scrub_pause_off(fs_info);
if (ret == 0) {
@@ -3623,7 +3599,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
break;
}
- down_write(&fs_info->dev_replace.rwsem);
+ down_write(&dev_replace->rwsem);
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1;
@@ -3664,10 +3640,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
scrub_pause_off(fs_info);
- down_write(&fs_info->dev_replace.rwsem);
+ down_write(&dev_replace->rwsem);
dev_replace->cursor_left = dev_replace->cursor_right;
dev_replace->item_needs_writeback = 1;
- up_write(&fs_info->dev_replace.rwsem);
+ up_write(&dev_replace->rwsem);
if (ro_set)
btrfs_dec_block_group_ro(cache);
@@ -3681,7 +3657,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
spin_lock(&cache->lock);
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
- btrfs_block_group_used(&cache->item) == 0) {
+ cache->used == 0) {
spin_unlock(&cache->lock);
btrfs_mark_bg_unused(cache);
} else {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 123ac54af071..ae2db5eb1549 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -25,6 +25,14 @@
#include "compression.h"
/*
+ * Maximum number of references an extent can have in order for us to attempt to
+ * issue clone operations instead of write operations. This currently exists to
+ * avoid hitting limitations of the backreference walking code (taking a lot of
+ * time and using too much memory for extents with large number of references).
+ */
+#define SEND_MAX_EXTENT_REFS 64
+
+/*
* A fs_path is a helper to dynamically build path names with unknown size.
* It reallocates the internal buffer on demand.
* It allows fast adding of path elements on the right side (normal path) and
@@ -1248,12 +1256,20 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
*/
if (found->root == bctx->sctx->send_root) {
/*
- * TODO for the moment we don't accept clones from the inode
- * that is currently send. We may change this when
- * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
- * file.
+ * If the source inode was not yet processed we can't issue a
+ * clone operation, as the source extent does not exist yet at
+ * the destination of the stream.
*/
- if (ino >= bctx->cur_objectid)
+ if (ino > bctx->cur_objectid)
+ return 0;
+ /*
+ * We clone from the inode currently being sent as long as the
+ * source extent is already processed, otherwise we could try
+ * to clone from an extent that does not exist yet at the
+ * destination of the stream.
+ */
+ if (ino == bctx->cur_objectid &&
+ offset >= bctx->sctx->cur_inode_next_write_offset)
return 0;
}
@@ -1302,6 +1318,7 @@ static int find_extent_clone(struct send_ctx *sctx,
struct clone_root *cur_clone_root;
struct btrfs_key found_key;
struct btrfs_path *tmp_path;
+ struct btrfs_extent_item *ei;
int compressed;
u32 i;
@@ -1349,7 +1366,6 @@ static int find_extent_clone(struct send_ctx *sctx,
ret = extent_from_logical(fs_info, disk_byte, tmp_path,
&found_key, &flags);
up_read(&fs_info->commit_root_sem);
- btrfs_release_path(tmp_path);
if (ret < 0)
goto out;
@@ -1358,6 +1374,21 @@ static int find_extent_clone(struct send_ctx *sctx,
goto out;
}
+ ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
+ struct btrfs_extent_item);
+ /*
+ * Backreference walking (iterate_extent_inodes() below) is currently
+ * too expensive when an extent has a large number of references, both
+ * in time spent and used memory. So for now just fallback to write
+ * operations instead of clone operations when an extent has more than
+ * a certain amount of references.
+ */
+ if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
+ ret = -ENOENT;
+ goto out;
+ }
+ btrfs_release_path(tmp_path);
+
/*
* Setup the clone roots.
*/
@@ -4779,7 +4810,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index e8a4b0ebe97f..f09aa6ee9113 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -10,7 +10,7 @@
#include "transaction.h"
#include "block-group.h"
-u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
bool may_use_included)
{
ASSERT(s_info);
@@ -58,7 +58,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
spin_lock_init(&space_info->lock);
space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
- init_waitqueue_head(&space_info->wait);
INIT_LIST_HEAD(&space_info->ro_bgs);
INIT_LIST_HEAD(&space_info->tickets);
INIT_LIST_HEAD(&space_info->priority_tickets);
@@ -285,7 +284,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
int index = 0;
spin_lock(&info->lock);
@@ -301,8 +300,7 @@ again:
spin_lock(&cache->lock);
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
- cache->key.objectid, cache->key.offset,
- btrfs_block_group_used(&cache->item), cache->pinned,
+ cache->start, cache->length, cache->used, cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 8867e84aa33d..1a349e3f9cc1 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -63,7 +63,6 @@ struct btrfs_space_info {
struct rw_semaphore groups_sem;
/* for block groups in our same type */
struct list_head block_groups[BTRFS_NR_RAID_TYPES];
- wait_queue_head_t wait;
struct kobject kobj;
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
@@ -116,7 +115,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
struct btrfs_space_info **space_info);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
-u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
bool may_use_included);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 1b151af25772..a98c3c71fc54 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,7 +66,7 @@ static struct file_system_type btrfs_root_fs_type;
static int btrfs_remount(struct super_block *sb, int *flags, char *data);
-const char *btrfs_decode_error(int errno)
+const char * __attribute_const__ btrfs_decode_error(int errno)
{
char *errstr = "unknown";
@@ -187,7 +187,7 @@ static struct ratelimit_state printk_limits[] = {
RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
};
-void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
struct va_format vaf;
@@ -1219,7 +1219,7 @@ static int btrfs_fill_super(struct super_block *sb,
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
+ inode = btrfs_iget(sb, &key, fs_info->fs_root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail_close;
@@ -1669,7 +1669,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
- btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
@@ -1936,6 +1935,10 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
num_stripes = nr_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_stripes = 2;
+ else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
+ num_stripes = 3;
+ else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
+ num_stripes = 4;
else if (type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = 4;
@@ -2022,7 +2025,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
struct btrfs_super_block *disk_super = fs_info->super_copy;
- struct list_head *head = &fs_info->space_info;
struct btrfs_space_info *found;
u64 total_used = 0;
u64 total_free_data = 0;
@@ -2036,7 +2038,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
int mixed = 0;
rcu_read_lock();
- list_for_each_entry_rcu(found, head, list) {
+ list_for_each_entry_rcu(found, &fs_info->space_info, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
int i;
@@ -2360,10 +2362,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_cachep;
- err = extent_map_init();
+ err = extent_state_cache_init();
if (err)
goto free_extent_io;
+ err = extent_map_init();
+ if (err)
+ goto free_extent_state_cache;
+
err = ordered_data_init();
if (err)
goto free_extent_map;
@@ -2422,6 +2428,8 @@ free_ordered_data:
ordered_data_exit();
free_extent_map:
extent_map_exit();
+free_extent_state_cache:
+ extent_state_cache_exit();
free_extent_io:
extent_io_exit();
free_cachep:
@@ -2442,6 +2450,7 @@ static void __exit exit_btrfs_fs(void)
btrfs_prelim_ref_exit();
ordered_data_exit();
extent_map_exit();
+ extent_state_cache_exit();
extent_io_exit();
btrfs_interface_exit();
btrfs_end_io_wq_exit();
@@ -2456,3 +2465,6 @@ module_exit(exit_btrfs_fs)
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crc32c");
+MODULE_SOFTDEP("pre: xxhash64");
+MODULE_SOFTDEP("pre: sha256");
+MODULE_SOFTDEP("pre: blake2b-256");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index f6d3c80f2e28..5ebbe8a5ee76 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -9,6 +9,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/bug.h>
+#include <crypto/hash.h>
#include "ctree.h"
#include "disk-io.h"
@@ -258,6 +259,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
+BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(mixed_backref),
@@ -272,6 +274,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(no_holes),
BTRFS_FEAT_ATTR_PTR(metadata_uuid),
BTRFS_FEAT_ATTR_PTR(free_space_tree),
+ BTRFS_FEAT_ATTR_PTR(raid1c34),
NULL
};
@@ -295,8 +298,30 @@ static ssize_t rmdir_subvol_show(struct kobject *kobj,
}
BTRFS_ATTR(static_feature, rmdir_subvol, rmdir_subvol_show);
+static ssize_t supported_checksums_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ ssize_t ret = 0;
+ int i;
+
+ for (i = 0; i < btrfs_get_num_csums(); i++) {
+ /*
+ * This "trick" only works as long as 'enum btrfs_csum_type' has
+ * no holes in it
+ */
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
+ (i == 0 ? "" : " "), btrfs_super_csum_name(i));
+
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ return ret;
+}
+BTRFS_ATTR(static_feature, supported_checksums, supported_checksums_show);
+
static struct attribute *btrfs_supported_static_feature_attrs[] = {
BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
+ BTRFS_ATTR_PTR(static_feature, supported_checksums),
NULL
};
@@ -372,16 +397,16 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
{
struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
- struct btrfs_block_group_cache *block_group;
+ struct btrfs_block_group *block_group;
int index = btrfs_bg_flags_to_raid_index(to_raid_kobj(kobj)->flags);
u64 val = 0;
down_read(&sinfo->groups_sem);
list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
- val += block_group->key.offset;
+ val += block_group->length;
else
- val += btrfs_block_group_used(&block_group->item);
+ val += block_group->used;
}
up_read(&sinfo->groups_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -604,6 +629,19 @@ static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj,
BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show);
+static ssize_t btrfs_checksum_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ u16 csum_type = btrfs_super_csum_type(fs_info->super_copy);
+
+ return snprintf(buf, PAGE_SIZE, "%s (%s)\n",
+ btrfs_super_csum_name(csum_type),
+ crypto_shash_driver_name(fs_info->csum_shash));
+}
+
+BTRFS_ATTR(, checksum, btrfs_checksum_show);
+
static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, label),
BTRFS_ATTR_PTR(, nodesize),
@@ -611,6 +649,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, clone_alignment),
BTRFS_ATTR_PTR(, quota_override),
BTRFS_ATTR_PTR(, metadata_uuid),
+ BTRFS_ATTR_PTR(, checksum),
NULL,
};
@@ -822,7 +861,7 @@ static void init_feature_attrs(void)
* Create a sysfs entry for a given block group type at path
* /sys/fs/btrfs/UUID/allocation/data/TYPE
*/
-void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache)
+void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_space_info *space_info = cache->space_info;
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index 610e9c36a94c..e10c3adfc30f 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -32,7 +32,7 @@ int __init btrfs_init_sysfs(void);
void __cold btrfs_exit_sysfs(void);
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
-void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache);
+void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache);
int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info);
void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info);
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 99fe9bf3fdac..a7aca4141788 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -202,11 +202,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
kfree(root);
}
-struct btrfs_block_group_cache *
+struct btrfs_block_group *
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
unsigned long length)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
@@ -218,9 +218,8 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
return NULL;
}
- cache->key.objectid = 0;
- cache->key.offset = length;
- cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ cache->start = 0;
+ cache->length = length;
cache->full_stripe_len = fs_info->sectorsize;
cache->fs_info = fs_info;
@@ -233,7 +232,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
return cache;
}
-void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
+void btrfs_free_dummy_block_group(struct btrfs_block_group *cache)
{
if (!cache)
return;
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index ee277bbd939b..9e52527357d8 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -41,9 +41,9 @@ struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_free_dummy_root(struct btrfs_root *root);
-struct btrfs_block_group_cache *
+struct btrfs_block_group *
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
-void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
+void btrfs_free_dummy_block_group(struct btrfs_block_group *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
#else
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 43ec7060fcd2..aebdf23f0cdd 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -17,7 +17,7 @@
* entry and remove space from either end and the middle, and make sure we can
* remove space that covers adjacent extent entries.
*/
-static int test_extents(struct btrfs_block_group_cache *cache)
+static int test_extents(struct btrfs_block_group *cache)
{
int ret = 0;
@@ -87,8 +87,7 @@ static int test_extents(struct btrfs_block_group_cache *cache)
return 0;
}
-static int test_bitmaps(struct btrfs_block_group_cache *cache,
- u32 sectorsize)
+static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize)
{
u64 next_bitmap_offset;
int ret;
@@ -156,7 +155,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache,
}
/* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
u32 sectorsize)
{
u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
@@ -331,7 +330,7 @@ static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
/* Used by test_steal_space_from_bitmap_to_extent(). */
static int
-check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
+check_num_extents_and_bitmaps(const struct btrfs_block_group *cache,
const int num_extents,
const int num_bitmaps)
{
@@ -351,7 +350,7 @@ check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
}
/* Used by test_steal_space_from_bitmap_to_extent(). */
-static int check_cache_empty(struct btrfs_block_group_cache *cache)
+static int check_cache_empty(struct btrfs_block_group *cache)
{
u64 offset;
u64 max_extent_size;
@@ -393,7 +392,7 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
* requests.
*/
static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
u32 sectorsize)
{
int ret;
@@ -829,7 +828,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
struct btrfs_root *root = NULL;
int ret = -ENOMEM;
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index bc92df977630..1a846bf6e197 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -18,7 +18,7 @@ struct free_space_extent {
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
const struct free_space_extent * const extents,
unsigned int num_extents)
@@ -48,7 +48,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
if (path->slots[0] != 0)
goto invalid;
- end = cache->key.objectid + cache->key.offset;
+ end = cache->start + cache->length;
i = 0;
while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -107,7 +107,7 @@ invalid:
static int check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
const struct free_space_extent * const extents,
unsigned int num_extents)
@@ -150,12 +150,12 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
static int test_empty_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, cache->key.offset},
+ {cache->start, cache->length},
};
return check_free_space_extents(trans, fs_info, cache, path,
@@ -164,7 +164,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans,
static int test_remove_all(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
@@ -172,8 +172,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start,
+ cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -185,18 +185,17 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
static int test_remove_beginning(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid + alignment,
- cache->key.offset - alignment},
+ {cache->start + alignment, cache->length - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid, alignment);
+ cache->start, alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -209,19 +208,18 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
static int test_remove_end(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, cache->key.offset - alignment},
+ {cache->start, cache->length - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid +
- cache->key.offset - alignment,
- alignment);
+ cache->start + cache->length - alignment,
+ alignment);
if (ret) {
test_err("could not remove free space");
return ret;
@@ -233,19 +231,18 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
static int test_remove_middle(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, alignment},
- {cache->key.objectid + 2 * alignment,
- cache->key.offset - 2 * alignment},
+ {cache->start, alignment},
+ {cache->start + 2 * alignment, cache->length - 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
+ cache->start + alignment,
alignment);
if (ret) {
test_err("could not remove free space");
@@ -258,24 +255,23 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
static int test_merge_left(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, 2 * alignment},
+ {cache->start, 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
@@ -283,7 +279,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
+ cache->start + alignment,
alignment);
if (ret) {
test_err("could not add free space");
@@ -296,25 +292,24 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
static int test_merge_right(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid + alignment, 2 * alignment},
+ {cache->start + alignment, 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 2 * alignment,
+ cache->start + 2 * alignment,
alignment);
if (ret) {
test_err("could not add free space");
@@ -322,7 +317,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
+ cache->start + alignment,
alignment);
if (ret) {
test_err("could not add free space");
@@ -335,24 +330,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
static int test_merge_both(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, 3 * alignment},
+ {cache->start, 3 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
@@ -360,16 +354,14 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 2 * alignment,
- alignment);
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + alignment,
- alignment);
+ cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -381,26 +373,25 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
static int test_merge_none(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache,
+ struct btrfs_block_group *cache,
struct btrfs_path *path,
u32 alignment)
{
const struct free_space_extent extents[] = {
- {cache->key.objectid, alignment},
- {cache->key.objectid + 2 * alignment, alignment},
- {cache->key.objectid + 4 * alignment, alignment},
+ {cache->start, alignment},
+ {cache->start + 2 * alignment, alignment},
+ {cache->start + 4 * alignment, alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
- cache->key.objectid,
- cache->key.offset);
+ cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
- ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+ ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
@@ -408,16 +399,14 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 4 * alignment,
- alignment);
+ cache->start + 4 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
- cache->key.objectid + 2 * alignment,
- alignment);
+ cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
@@ -429,7 +418,7 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
typedef int (*test_func_t)(struct btrfs_trans_handle *,
struct btrfs_fs_info *,
- struct btrfs_block_group_cache *,
+ struct btrfs_block_group *,
struct btrfs_path *,
u32 alignment);
@@ -438,7 +427,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
{
struct btrfs_fs_info *fs_info;
struct btrfs_root *root = NULL;
- struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_block_group *cache = NULL;
struct btrfs_trans_handle trans;
struct btrfs_path *path = NULL;
int ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8624bdee8c5b..cfc08ef9b876 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -24,9 +24,79 @@
#define BTRFS_ROOT_TRANS_TAG 0
+/*
+ * Transaction states and transitions
+ *
+ * No running transaction (fs tree blocks are not modified)
+ * |
+ * | To next stage:
+ * | Call start_transaction() variants. Except btrfs_join_transaction_nostart().
+ * V
+ * Transaction N [[TRANS_STATE_RUNNING]]
+ * |
+ * | New trans handles can be attached to transaction N by calling all
+ * | start_transaction() variants.
+ * |
+ * | To next stage:
+ * | Call btrfs_commit_transaction() on any trans handle attached to
+ * | transaction N
+ * V
+ * Transaction N [[TRANS_STATE_COMMIT_START]]
+ * |
+ * | Will wait for previous running transaction to completely finish if there
+ * | is one
+ * |
+ * | Then one of the following happes:
+ * | - Wait for all other trans handle holders to release.
+ * | The btrfs_commit_transaction() caller will do the commit work.
+ * | - Wait for current transaction to be committed by others.
+ * | Other btrfs_commit_transaction() caller will do the commit work.
+ * |
+ * | At this stage, only btrfs_join_transaction*() variants can attach
+ * | to this running transaction.
+ * | All other variants will wait for current one to finish and attach to
+ * | transaction N+1.
+ * |
+ * | To next stage:
+ * | Caller is chosen to commit transaction N, and all other trans handle
+ * | haven been released.
+ * V
+ * Transaction N [[TRANS_STATE_COMMIT_DOING]]
+ * |
+ * | The heavy lifting transaction work is started.
+ * | From running delayed refs (modifying extent tree) to creating pending
+ * | snapshots, running qgroups.
+ * | In short, modify supporting trees to reflect modifications of subvolume
+ * | trees.
+ * |
+ * | At this stage, all start_transaction() calls will wait for this
+ * | transaction to finish and attach to transaction N+1.
+ * |
+ * | To next stage:
+ * | Until all supporting trees are updated.
+ * V
+ * Transaction N [[TRANS_STATE_UNBLOCKED]]
+ * | Transaction N+1
+ * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]]
+ * | need to write them back to disk and update |
+ * | super blocks. |
+ * | |
+ * | At this stage, new transaction is allowed to |
+ * | start. |
+ * | All new start_transaction() calls will be |
+ * | attached to transid N+1. |
+ * | |
+ * | To next stage: |
+ * | Until all tree blocks are super blocks are |
+ * | written to block devices |
+ * V |
+ * Transaction N [[TRANS_STATE_COMPLETED]] V
+ * All tree blocks and super blocks are written. Transaction N+1
+ * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]]
+ * data structures will be cleaned up. | Life goes on
+ */
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_RUNNING] = 0U,
- [TRANS_STATE_BLOCKED] = __TRANS_START,
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
@@ -63,10 +133,10 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
* discard the physical locations of the block groups.
*/
while (!list_empty(&transaction->deleted_bgs)) {
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
cache = list_first_entry(&transaction->deleted_bgs,
- struct btrfs_block_group_cache,
+ struct btrfs_block_group,
bg_list);
list_del_init(&cache->bg_list);
btrfs_put_block_group_trimming(cache);
@@ -383,7 +453,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
static inline int is_transaction_blocked(struct btrfs_transaction *trans)
{
- return (trans->state >= TRANS_STATE_BLOCKED &&
+ return (trans->state >= TRANS_STATE_COMMIT_START &&
trans->state < TRANS_STATE_UNBLOCKED &&
!trans->aborted);
}
@@ -570,7 +640,7 @@ again:
INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
- if (cur_trans->state >= TRANS_STATE_BLOCKED &&
+ if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
may_wait_transaction(fs_info, type)) {
current->journal_info = h;
btrfs_commit_transaction(h);
@@ -659,7 +729,7 @@ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
true);
}
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
+struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
BTRFS_RESERVE_NO_FLUSH, true);
@@ -798,7 +868,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
smp_mb();
- if (cur_trans->state >= TRANS_STATE_BLOCKED ||
+ if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
cur_trans->delayed_refs.flushing)
return 1;
@@ -831,7 +901,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
if (refcount_read(&trans->use_count) > 1) {
@@ -847,13 +916,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_chunk_metadata(trans);
- if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
- if (throttle)
- return btrfs_commit_transaction(trans);
- else
- wake_up_process(info->transaction_kthread);
- }
-
if (trans->type & __TRANS_FREEZABLE)
sb_end_intwrite(info->sb);
@@ -990,7 +1052,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
return werr;
}
-int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
+static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
bool errors = false;
@@ -1875,7 +1937,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group *block_group, *tmp;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
btrfs_delayed_refs_rsv_release(fs_info, 1);
@@ -1949,6 +2011,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
struct btrfs_transaction *prev_trans = NULL;
int ret;
+ ASSERT(refcount_read(&trans->use_count) == 1);
+
/* Stop the commit early if ->aborted is set */
if (unlikely(READ_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 2c5a6f6e5bb0..49f7196368f5 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -13,7 +13,6 @@
enum btrfs_trans_state {
TRANS_STATE_RUNNING,
- TRANS_STATE_BLOCKED,
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
@@ -184,7 +183,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
unsigned int num_items,
int min_factor);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
@@ -218,8 +217,6 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
-int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *dirty_pages);
int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 076d5b8014fb..493d4d9e0f79 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -23,6 +23,7 @@
#include "disk-io.h"
#include "compression.h"
#include "volumes.h"
+#include "misc.h"
/*
* Error message should follow the following format:
@@ -124,6 +125,74 @@ static u64 file_extent_end(struct extent_buffer *leaf,
return end;
}
+/*
+ * Customized report for dir_item, the only new important information is
+ * key->objectid, which represents inode number
+ */
+__printf(3, 4)
+__cold
+static void dir_item_err(const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ const struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, &vaf);
+ va_end(args);
+}
+
+/*
+ * This functions checks prev_key->objectid, to ensure current key and prev_key
+ * share the same objectid as inode number.
+ *
+ * This is to detect missing INODE_ITEM in subvolume trees.
+ *
+ * Return true if everything is OK or we don't need to check.
+ * Return false if anything is wrong.
+ */
+static bool check_prev_ino(struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
+{
+ /* No prev key, skip check */
+ if (slot == 0)
+ return true;
+
+ /* Only these key->types needs to be checked */
+ ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
+ key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_DIR_INDEX_KEY ||
+ key->type == BTRFS_DIR_ITEM_KEY ||
+ key->type == BTRFS_EXTENT_DATA_KEY);
+
+ /*
+ * Only subvolume trees along with their reloc trees need this check.
+ * Things like log tree doesn't follow this ino requirement.
+ */
+ if (!is_fstree(btrfs_header_owner(leaf)))
+ return true;
+
+ if (key->objectid == prev_key->objectid)
+ return true;
+
+ /* Error found */
+ dir_item_err(leaf, slot,
+ "invalid previous key objectid, have %llu expect %llu",
+ prev_key->objectid, key->objectid);
+ return false;
+}
static int check_extent_data_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot,
struct btrfs_key *prev_key)
@@ -141,13 +210,33 @@ static int check_extent_data_item(struct extent_buffer *leaf,
return -EUCLEAN;
}
+ /*
+ * Previous key must have the same key->objectid (ino).
+ * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA.
+ * But if objectids mismatch, it means we have a missing
+ * INODE_ITEM.
+ */
+ if (!check_prev_ino(leaf, key, slot, prev_key))
+ return -EUCLEAN;
+
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
+ /*
+ * Make sure the item contains at least inline header, so the file
+ * extent type is not some garbage.
+ */
+ if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) {
+ file_extent_err(leaf, slot,
+ "invalid item size, have %u expect [%lu, %u)",
+ item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
+ SZ_4K);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) {
file_extent_err(leaf, slot,
"invalid type for file extent, have %u expect range [0, %u]",
btrfs_file_extent_type(leaf, fi),
- BTRFS_FILE_EXTENT_TYPES);
+ BTRFS_NR_FILE_EXTENT_TYPES - 1);
return -EUCLEAN;
}
@@ -155,11 +244,11 @@ static int check_extent_data_item(struct extent_buffer *leaf,
* Support for new compression/encryption must introduce incompat flag,
* and must be caught in open_ctree().
*/
- if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
+ if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) {
file_extent_err(leaf, slot,
"invalid compression for file extent, have %u expect range [0, %u]",
btrfs_file_extent_compression(leaf, fi),
- BTRFS_COMPRESS_TYPES);
+ BTRFS_NR_COMPRESS_TYPES - 1);
return -EUCLEAN;
}
if (btrfs_file_extent_encryption(leaf, fi)) {
@@ -270,42 +359,17 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
return 0;
}
-/*
- * Customized reported for dir_item, only important new info is key->objectid,
- * which represents inode number
- */
-__printf(3, 4)
-__cold
-static void dir_item_err(const struct extent_buffer *eb, int slot,
- const char *fmt, ...)
-{
- const struct btrfs_fs_info *fs_info = eb->fs_info;
- struct btrfs_key key;
- struct va_format vaf;
- va_list args;
-
- btrfs_item_key_to_cpu(eb, &key, slot);
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- btrfs_crit(fs_info,
- "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
- btrfs_header_level(eb) == 0 ? "leaf" : "node",
- btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
- key.objectid, &vaf);
- va_end(args);
-}
-
static int check_dir_item(struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, struct btrfs_key *prev_key,
+ int slot)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_dir_item *di;
u32 item_size = btrfs_item_size_nr(leaf, slot);
u32 cur = 0;
+ if (!check_prev_ino(leaf, key, slot, prev_key))
+ return -EUCLEAN;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
while (cur < item_size) {
u32 name_len;
@@ -459,23 +523,23 @@ static int check_block_group_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
sizeof(bgi));
- if (btrfs_block_group_chunk_objectid(&bgi) !=
+ if (btrfs_stack_block_group_chunk_objectid(&bgi) !=
BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
block_group_err(leaf, slot,
"invalid block group chunk objectid, have %llu expect %llu",
- btrfs_block_group_chunk_objectid(&bgi),
+ btrfs_stack_block_group_chunk_objectid(&bgi),
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
return -EUCLEAN;
}
- if (btrfs_block_group_used(&bgi) > key->offset) {
+ if (btrfs_stack_block_group_used(&bgi) > key->offset) {
block_group_err(leaf, slot,
"invalid block group used, have %llu expect [0, %llu)",
- btrfs_block_group_used(&bgi), key->offset);
+ btrfs_stack_block_group_used(&bgi), key->offset);
return -EUCLEAN;
}
- flags = btrfs_block_group_flags(&bgi);
+ flags = btrfs_stack_block_group_flags(&bgi);
if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
block_group_err(leaf, slot,
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
@@ -609,7 +673,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
return -EUCLEAN;
}
- if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
chunk_err(leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
@@ -785,11 +849,11 @@ static int check_inode_item(struct extent_buffer *leaf,
}
/*
- * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
- * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
- * Only needs to check BLK, LNK and SOCKS
+ * S_IFMT is not bit mapped so we can't completely rely on
+ * is_power_of_2/has_single_bit_set, but it can save us from checking
+ * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS
*/
- if (!is_power_of_2(mode & S_IFMT)) {
+ if (!has_single_bit_set(mode & S_IFMT)) {
if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
inode_item_err(fs_info, leaf, slot,
"invalid mode: has 0%o expect valid S_IF* bit(s)",
@@ -1010,8 +1074,8 @@ static int check_extent_item(struct extent_buffer *leaf,
btrfs_super_generation(fs_info->super_copy) + 1);
return -EUCLEAN;
}
- if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA |
- BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
+ if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
+ BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
extent_err(leaf, slot,
"invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
flags, BTRFS_EXTENT_FLAG_DATA |
@@ -1224,6 +1288,58 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
return 0;
}
+#define inode_ref_err(fs_info, eb, slot, fmt, args...) \
+ inode_item_err(fs_info, eb, slot, fmt, ##args)
+static int check_inode_ref(struct extent_buffer *leaf,
+ struct btrfs_key *key, struct btrfs_key *prev_key,
+ int slot)
+{
+ struct btrfs_inode_ref *iref;
+ unsigned long ptr;
+ unsigned long end;
+
+ if (!check_prev_ino(leaf, key, slot, prev_key))
+ return -EUCLEAN;
+ /* namelen can't be 0, so item_size == sizeof() is also invalid */
+ if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
+ inode_ref_err(fs_info, leaf, slot,
+ "invalid item size, have %u expect (%zu, %u)",
+ btrfs_item_size_nr(leaf, slot),
+ sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
+ return -EUCLEAN;
+ }
+
+ ptr = btrfs_item_ptr_offset(leaf, slot);
+ end = ptr + btrfs_item_size_nr(leaf, slot);
+ while (ptr < end) {
+ u16 namelen;
+
+ if (ptr + sizeof(iref) > end) {
+ inode_ref_err(fs_info, leaf, slot,
+ "inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
+ ptr, end, sizeof(iref));
+ return -EUCLEAN;
+ }
+
+ iref = (struct btrfs_inode_ref *)ptr;
+ namelen = btrfs_inode_ref_name_len(leaf, iref);
+ if (ptr + sizeof(*iref) + namelen > end) {
+ inode_ref_err(fs_info, leaf, slot,
+ "inode ref overflow, ptr %lu end %lu namelen %u",
+ ptr, end, namelen);
+ return -EUCLEAN;
+ }
+
+ /*
+ * NOTE: In theory we should record all found index numbers
+ * to find any duplicated indexes, but that will be too time
+ * consuming for inodes with too many hard links.
+ */
+ ptr += sizeof(*iref) + namelen;
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -1244,7 +1360,10 @@ static int check_leaf_item(struct extent_buffer *leaf,
case BTRFS_DIR_ITEM_KEY:
case BTRFS_DIR_INDEX_KEY:
case BTRFS_XATTR_ITEM_KEY:
- ret = check_dir_item(leaf, key, slot);
+ ret = check_dir_item(leaf, key, prev_key, slot);
+ break;
+ case BTRFS_INODE_REF_KEY:
+ ret = check_inode_ref(leaf, key, prev_key, slot);
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
ret = check_block_group_item(leaf, key, slot);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8a6cc600bf18..6f757361db53 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -559,7 +559,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(root->fs_info->sb, &key, root);
if (IS_ERR(inode))
inode = NULL;
return inode;
@@ -945,54 +945,32 @@ static noinline int backref_in_log(struct btrfs_root *log,
const char *name, int namelen)
{
struct btrfs_path *path;
- struct btrfs_inode_ref *ref;
- unsigned long ptr;
- unsigned long ptr_end;
- unsigned long name_ptr;
- int found_name_len;
- int item_size;
int ret;
- int match = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
- if (ret != 0)
+ if (ret < 0) {
goto out;
-
- ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
-
- if (key->type == BTRFS_INODE_EXTREF_KEY) {
- if (btrfs_find_name_in_ext_backref(path->nodes[0],
- path->slots[0],
- ref_objectid,
- name, namelen))
- match = 1;
-
+ } else if (ret == 1) {
+ ret = 0;
goto out;
}
- item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- ptr_end = ptr + item_size;
- while (ptr < ptr_end) {
- ref = (struct btrfs_inode_ref *)ptr;
- found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
- if (found_name_len == namelen) {
- name_ptr = (unsigned long)(ref + 1);
- ret = memcmp_extent_buffer(path->nodes[0], name,
- name_ptr, namelen);
- if (ret == 0) {
- match = 1;
- goto out;
- }
- }
- ptr = (unsigned long)(ref + 1) + found_name_len;
- }
+ if (key->type == BTRFS_INODE_EXTREF_KEY)
+ ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
+ path->slots[0],
+ ref_objectid,
+ name, namelen);
+ else
+ ret = !!btrfs_find_name_in_backref(path->nodes[0],
+ path->slots[0],
+ name, namelen);
out:
btrfs_free_path(path);
- return match;
+ return ret;
}
static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
@@ -1050,10 +1028,13 @@ again:
(unsigned long)(victim_ref + 1),
victim_name_len);
- if (!backref_in_log(log_root, &search_key,
- parent_objectid,
- victim_name,
- victim_name_len)) {
+ ret = backref_in_log(log_root, &search_key,
+ parent_objectid, victim_name,
+ victim_name_len);
+ if (ret < 0) {
+ kfree(victim_name);
+ return ret;
+ } else if (!ret) {
inc_nlink(&inode->vfs_inode);
btrfs_release_path(path);
@@ -1115,10 +1096,12 @@ again:
search_key.offset = btrfs_extref_hash(parent_objectid,
victim_name,
victim_name_len);
- ret = 0;
- if (!backref_in_log(log_root, &search_key,
- parent_objectid, victim_name,
- victim_name_len)) {
+ ret = backref_in_log(log_root, &search_key,
+ parent_objectid, victim_name,
+ victim_name_len);
+ if (ret < 0) {
+ return ret;
+ } else if (!ret) {
ret = -ENOENT;
victim_parent = read_one_inode(root,
parent_objectid);
@@ -1885,30 +1868,6 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
}
/*
- * Return true if an inode reference exists in the log for the given name,
- * inode and parent inode.
- */
-static bool name_in_log_ref(struct btrfs_root *log_root,
- const char *name, const int name_len,
- const u64 dirid, const u64 ino)
-{
- struct btrfs_key search_key;
-
- search_key.objectid = ino;
- search_key.type = BTRFS_INODE_REF_KEY;
- search_key.offset = dirid;
- if (backref_in_log(log_root, &search_key, dirid, name, name_len))
- return true;
-
- search_key.type = BTRFS_INODE_EXTREF_KEY;
- search_key.offset = btrfs_extref_hash(dirid, name, name_len);
- if (backref_in_log(log_root, &search_key, dirid, name, name_len))
- return true;
-
- return false;
-}
-
-/*
* take a single entry in a log directory item and replay it into
* the subvolume.
*
@@ -2024,8 +1983,31 @@ out:
return ret;
insert:
- if (name_in_log_ref(root->log_root, name, name_len,
- key->objectid, log_key.objectid)) {
+ /*
+ * Check if the inode reference exists in the log for the given name,
+ * inode and parent inode
+ */
+ found_key.objectid = log_key.objectid;
+ found_key.type = BTRFS_INODE_REF_KEY;
+ found_key.offset = key->objectid;
+ ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ /* The dentry will be added later. */
+ ret = 0;
+ update_size = false;
+ goto out;
+ }
+
+ found_key.objectid = log_key.objectid;
+ found_key.type = BTRFS_INODE_EXTREF_KEY;
+ found_key.offset = key->objectid;
+ ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
+ name_len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
/* The dentry will be added later. */
ret = 0;
update_size = false;
@@ -2869,7 +2851,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
level = btrfs_header_level(log->node);
orig_level = level;
path->nodes[level] = log->node;
- extent_buffer_get(log->node);
+ atomic_inc(&log->node->refs);
path->slots[level] = 0;
while (1) {
@@ -4983,7 +4965,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
key.objectid = ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
/*
* If the other inode that had a conflicting dir entry was
* deleted in the current transaction, we need to log its parent
@@ -4993,8 +4975,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
ret = PTR_ERR(inode);
if (ret == -ENOENT) {
key.objectid = parent;
- inode = btrfs_iget(fs_info->sb, &key, root,
- NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
} else {
@@ -5699,7 +5680,7 @@ process_leaf:
continue;
btrfs_release_path(path);
- di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
+ di_inode = btrfs_iget(fs_info->sb, &di_key, root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto next_dir_inode;
@@ -5825,8 +5806,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
- dir_inode = btrfs_iget(fs_info->sb, &inode_key,
- root, NULL);
+ dir_inode = btrfs_iget(fs_info->sb, &inode_key, root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -5900,7 +5880,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
search_key.objectid = found_key.offset;
search_key.type = BTRFS_INODE_ITEM_KEY;
search_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &search_key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &search_key, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e04409f85063..d8e5560db285 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -58,6 +58,30 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.bg_flag = BTRFS_BLOCK_GROUP_RAID1,
.mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
},
+ [BTRFS_RAID_RAID1C3] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 3,
+ .tolerated_failures = 2,
+ .devs_increment = 3,
+ .ncopies = 3,
+ .raid_name = "raid1c3",
+ .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
+ .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
+ },
+ [BTRFS_RAID_RAID1C4] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 4,
+ .tolerated_failures = 3,
+ .devs_increment = 4,
+ .ncopies = 4,
+ .raid_name = "raid1c4",
+ .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
+ .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
+ },
[BTRFS_RAID_DUP] = {
.sub_stripes = 1,
.dev_stripes = 2,
@@ -297,7 +321,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
-struct list_head *btrfs_get_fs_uuids(void)
+struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
{
return &fs_uuids;
}
@@ -397,8 +421,6 @@ static struct btrfs_device *__alloc_device(void)
INIT_LIST_HEAD(&dev->dev_alloc_list);
INIT_LIST_HEAD(&dev->post_commit_list);
- spin_lock_init(&dev->io_lock);
-
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
@@ -501,212 +523,6 @@ error:
return ret;
}
-static void requeue_list(struct btrfs_pending_bios *pending_bios,
- struct bio *head, struct bio *tail)
-{
-
- struct bio *old_head;
-
- old_head = pending_bios->head;
- pending_bios->head = head;
- if (pending_bios->tail)
- tail->bi_next = old_head;
- else
- pending_bios->tail = tail;
-}
-
-/*
- * we try to collect pending bios for a device so we don't get a large
- * number of procs sending bios down to the same device. This greatly
- * improves the schedulers ability to collect and merge the bios.
- *
- * But, it also turns into a long list of bios to process and that is sure
- * to eventually make the worker thread block. The solution here is to
- * make some progress and then put this work struct back at the end of
- * the list if the block device is congested. This way, multiple devices
- * can make progress from a single worker thread.
- */
-static noinline void run_scheduled_bios(struct btrfs_device *device)
-{
- struct btrfs_fs_info *fs_info = device->fs_info;
- struct bio *pending;
- struct backing_dev_info *bdi;
- struct btrfs_pending_bios *pending_bios;
- struct bio *tail;
- struct bio *cur;
- int again = 0;
- unsigned long num_run;
- unsigned long batch_run = 0;
- unsigned long last_waited = 0;
- int force_reg = 0;
- int sync_pending = 0;
- struct blk_plug plug;
-
- /*
- * this function runs all the bios we've collected for
- * a particular device. We don't want to wander off to
- * another device without first sending all of these down.
- * So, setup a plug here and finish it off before we return
- */
- blk_start_plug(&plug);
-
- bdi = device->bdev->bd_bdi;
-
-loop:
- spin_lock(&device->io_lock);
-
-loop_lock:
- num_run = 0;
-
- /* take all the bios off the list at once and process them
- * later on (without the lock held). But, remember the
- * tail and other pointers so the bios can be properly reinserted
- * into the list if we hit congestion
- */
- if (!force_reg && device->pending_sync_bios.head) {
- pending_bios = &device->pending_sync_bios;
- force_reg = 1;
- } else {
- pending_bios = &device->pending_bios;
- force_reg = 0;
- }
-
- pending = pending_bios->head;
- tail = pending_bios->tail;
- WARN_ON(pending && !tail);
-
- /*
- * if pending was null this time around, no bios need processing
- * at all and we can stop. Otherwise it'll loop back up again
- * and do an additional check so no bios are missed.
- *
- * device->running_pending is used to synchronize with the
- * schedule_bio code.
- */
- if (device->pending_sync_bios.head == NULL &&
- device->pending_bios.head == NULL) {
- again = 0;
- device->running_pending = 0;
- } else {
- again = 1;
- device->running_pending = 1;
- }
-
- pending_bios->head = NULL;
- pending_bios->tail = NULL;
-
- spin_unlock(&device->io_lock);
-
- while (pending) {
-
- rmb();
- /* we want to work on both lists, but do more bios on the
- * sync list than the regular list
- */
- if ((num_run > 32 &&
- pending_bios != &device->pending_sync_bios &&
- device->pending_sync_bios.head) ||
- (num_run > 64 && pending_bios == &device->pending_sync_bios &&
- device->pending_bios.head)) {
- spin_lock(&device->io_lock);
- requeue_list(pending_bios, pending, tail);
- goto loop_lock;
- }
-
- cur = pending;
- pending = pending->bi_next;
- cur->bi_next = NULL;
-
- BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
-
- /*
- * if we're doing the sync list, record that our
- * plug has some sync requests on it
- *
- * If we're doing the regular list and there are
- * sync requests sitting around, unplug before
- * we add more
- */
- if (pending_bios == &device->pending_sync_bios) {
- sync_pending = 1;
- } else if (sync_pending) {
- blk_finish_plug(&plug);
- blk_start_plug(&plug);
- sync_pending = 0;
- }
-
- btrfsic_submit_bio(cur);
- num_run++;
- batch_run++;
-
- cond_resched();
-
- /*
- * we made progress, there is more work to do and the bdi
- * is now congested. Back off and let other work structs
- * run instead
- */
- if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
- fs_info->fs_devices->open_devices > 1) {
- struct io_context *ioc;
-
- ioc = current->io_context;
-
- /*
- * the main goal here is that we don't want to
- * block if we're going to be able to submit
- * more requests without blocking.
- *
- * This code does two great things, it pokes into
- * the elevator code from a filesystem _and_
- * it makes assumptions about how batching works.
- */
- if (ioc && ioc->nr_batch_requests > 0 &&
- time_before(jiffies, ioc->last_waited + HZ/50UL) &&
- (last_waited == 0 ||
- ioc->last_waited == last_waited)) {
- /*
- * we want to go through our batch of
- * requests and stop. So, we copy out
- * the ioc->last_waited time and test
- * against it before looping
- */
- last_waited = ioc->last_waited;
- cond_resched();
- continue;
- }
- spin_lock(&device->io_lock);
- requeue_list(pending_bios, pending, tail);
- device->running_pending = 1;
-
- spin_unlock(&device->io_lock);
- btrfs_queue_work(fs_info->submit_workers,
- &device->work);
- goto done;
- }
- }
-
- cond_resched();
- if (again)
- goto loop;
-
- spin_lock(&device->io_lock);
- if (device->pending_bios.head || device->pending_sync_bios.head)
- goto loop_lock;
- spin_unlock(&device->io_lock);
-
-done:
- blk_finish_plug(&plug);
-}
-
-static void pending_bios_fn(struct btrfs_work *work)
-{
- struct btrfs_device *device;
-
- device = container_of(work, struct btrfs_device, work);
- run_scheduled_bios(device);
-}
-
static bool device_path_matched(const char *path, struct btrfs_device *device)
{
int found;
@@ -818,7 +634,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
- fs_devices->seeding = 1;
+ fs_devices->seeding = true;
} else {
if (bdev_read_only(bdev))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
@@ -828,7 +644,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
q = bdev_get_queue(bdev);
if (!blk_queue_nonrot(q))
- fs_devices->rotating = 1;
+ fs_devices->rotating = true;
device->bdev = bdev;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
@@ -1005,11 +821,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
*new_device_added = true;
if (disk_super->label[0])
- pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
- disk_super->label, devid, found_transid, path);
+ pr_info(
+ "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
+ disk_super->label, devid, found_transid, path,
+ current->comm, task_pid_nr(current));
else
- pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
- disk_super->fsid, devid, found_transid, path);
+ pr_info(
+ "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
+ disk_super->fsid, devid, found_transid, path,
+ current->comm, task_pid_nr(current));
} else if (!device->name || strcmp(device->name->str, path)) {
/*
@@ -1295,7 +1115,7 @@ static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
- fs_devices->seeding = 0;
+ fs_devices->seeding = false;
return 0;
}
@@ -2048,7 +1868,7 @@ static struct btrfs_device * btrfs_find_next_active_device(
* where this function called, there should be always be another device (or
* this_dev) which is active.
*/
-void btrfs_assign_next_active_device(struct btrfs_device *device,
+void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev)
{
struct btrfs_fs_info *fs_info = device->fs_info;
@@ -2450,11 +2270,11 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
mutex_unlock(&fs_info->chunk_mutex);
- fs_devices->seeding = 0;
+ fs_devices->seeding = false;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
fs_devices->missing_devices = 0;
- fs_devices->rotating = 0;
+ fs_devices->rotating = false;
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
@@ -2649,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
if (!blk_queue_nonrot(q))
- fs_devices->rotating = 1;
+ fs_devices->rotating = true;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
btrfs_set_super_total_bytes(fs_info->super_copy,
@@ -3177,7 +2997,7 @@ error:
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
u64 chunk_offset)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 bytes_used;
u64 chunk_type;
@@ -3186,27 +3006,28 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
chunk_type = cache->flags;
btrfs_put_block_group(cache);
- if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
- spin_lock(&fs_info->data_sinfo->lock);
- bytes_used = fs_info->data_sinfo->bytes_used;
- spin_unlock(&fs_info->data_sinfo->lock);
+ if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
+ return 0;
+
+ spin_lock(&fs_info->data_sinfo->lock);
+ bytes_used = fs_info->data_sinfo->bytes_used;
+ spin_unlock(&fs_info->data_sinfo->lock);
- if (!bytes_used) {
- struct btrfs_trans_handle *trans;
- int ret;
+ if (!bytes_used) {
+ struct btrfs_trans_handle *trans;
+ int ret;
- trans = btrfs_join_transaction(fs_info->tree_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
- ret = btrfs_force_chunk_alloc(trans,
- BTRFS_BLOCK_GROUP_DATA);
- btrfs_end_transaction(trans);
- if (ret < 0)
- return ret;
- return 1;
- }
+ ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
+ btrfs_end_transaction(trans);
+ if (ret < 0)
+ return ret;
+ return 1;
}
+
return 0;
}
@@ -3385,28 +3206,28 @@ static int chunk_profiles_filter(u64 chunk_type,
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
struct btrfs_balance_args *bargs)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 chunk_used;
u64 user_thresh_min;
u64 user_thresh_max;
int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
- chunk_used = btrfs_block_group_used(&cache->item);
+ chunk_used = cache->used;
if (bargs->usage_min == 0)
user_thresh_min = 0;
else
- user_thresh_min = div_factor_fine(cache->key.offset,
- bargs->usage_min);
+ user_thresh_min = div_factor_fine(cache->length,
+ bargs->usage_min);
if (bargs->usage_max == 0)
user_thresh_max = 1;
else if (bargs->usage_max > 100)
- user_thresh_max = cache->key.offset;
+ user_thresh_max = cache->length;
else
- user_thresh_max = div_factor_fine(cache->key.offset,
- bargs->usage_max);
+ user_thresh_max = div_factor_fine(cache->length,
+ bargs->usage_max);
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
ret = 0;
@@ -3418,20 +3239,19 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
u64 chunk_offset, struct btrfs_balance_args *bargs)
{
- struct btrfs_block_group_cache *cache;
+ struct btrfs_block_group *cache;
u64 chunk_used, user_thresh;
int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
- chunk_used = btrfs_block_group_used(&cache->item);
+ chunk_used = cache->used;
if (bargs->usage_min == 0)
user_thresh = 1;
else if (bargs->usage > 100)
- user_thresh = cache->key.offset;
+ user_thresh = cache->length;
else
- user_thresh = div_factor_fine(cache->key.offset,
- bargs->usage);
+ user_thresh = div_factor_fine(cache->length, bargs->usage);
if (chunk_used < user_thresh)
ret = 0;
@@ -3844,12 +3664,7 @@ static int alloc_profile_is_valid(u64 flags, int extended)
if (flags == 0)
return !extended; /* "0" is valid for usual profiles */
- /* true if exactly one bit set */
- /*
- * Don't use is_power_of_2(unsigned long) because it won't work
- * for the single profile (1ULL << 48) on 32-bit CPUs.
- */
- return flags != 0 && (flags & (flags - 1)) == 0;
+ return has_single_bit_set(flags);
}
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
@@ -4036,7 +3851,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
int ret;
u64 num_devices;
unsigned seq;
- bool reducing_integrity;
+ bool reducing_redundancy;
int i;
if (btrfs_fs_closing(fs_info) ||
@@ -4119,9 +3934,9 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(fs_info->avail_metadata_alloc_bits & allowed) &&
!(bctl->meta.target & allowed)))
- reducing_integrity = true;
+ reducing_redundancy = true;
else
- reducing_integrity = false;
+ reducing_redundancy = false;
/* if we're not converting, the target field is uninitialized */
meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
@@ -4130,13 +3945,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
bctl->data.target : fs_info->avail_data_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
- if (reducing_integrity) {
+ if (reducing_redundancy) {
if (bctl->flags & BTRFS_BALANCE_FORCE) {
btrfs_info(fs_info,
- "balance: force reducing metadata integrity");
+ "balance: force reducing metadata redundancy");
} else {
btrfs_err(fs_info,
- "balance: reduces metadata integrity, use --force if you want this");
+ "balance: reduces metadata redundancy, use --force if you want this");
ret = -EINVAL;
goto out;
}
@@ -4902,6 +4717,14 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
+static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
+{
+ if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
+ return;
+
+ btrfs_set_fs_incompat(info, RAID1C34);
+}
+
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 start, u64 type)
{
@@ -5048,8 +4871,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
btrfs_cmp_device_info, NULL);
- /* round down to number of usable stripes */
- ndevs = round_down(ndevs, devs_increment);
+ /*
+ * Round down to number of usable stripes, devs_increment can be any
+ * number so we can't use round_down()
+ */
+ ndevs -= ndevs % devs_increment;
if (ndevs < devs_min) {
ret = -ENOSPC;
@@ -5165,6 +4991,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
free_extent_map(em);
check_raid56_incompat_flag(info, type);
+ check_raid1c34_incompat_flag(info, type);
kfree(devices_info);
return 0;
@@ -5583,12 +5410,13 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
* replace.
*/
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length,
+ u64 logical, u64 *length_ret,
struct btrfs_bio **bbio_ret)
{
struct extent_map *em;
struct map_lookup *map;
struct btrfs_bio *bbio;
+ u64 length = *length_ret;
u64 offset;
u64 stripe_nr;
u64 stripe_nr_end;
@@ -5621,7 +5449,8 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
}
offset = logical - em->start;
- length = min_t(u64, em->len - offset, length);
+ length = min_t(u64, em->start + em->len - logical, length);
+ *length_ret = length;
stripe_len = map->stripe_len;
/*
@@ -6036,7 +5865,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
if (op == BTRFS_MAP_DISCARD)
return __btrfs_map_block_for_discard(fs_info, logical,
- *length, bbio_ret);
+ length, bbio_ret);
ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
if (ret < 0)
@@ -6416,52 +6245,8 @@ static void btrfs_end_bio(struct bio *bio)
}
}
-/*
- * see run_scheduled_bios for a description of why bios are collected for
- * async submit.
- *
- * This will add one bio to the pending list for a device and make sure
- * the work struct is scheduled.
- */
-static noinline void btrfs_schedule_bio(struct btrfs_device *device,
- struct bio *bio)
-{
- struct btrfs_fs_info *fs_info = device->fs_info;
- int should_queue = 1;
- struct btrfs_pending_bios *pending_bios;
-
- /* don't bother with additional async steps for reads, right now */
- if (bio_op(bio) == REQ_OP_READ) {
- btrfsic_submit_bio(bio);
- return;
- }
-
- WARN_ON(bio->bi_next);
- bio->bi_next = NULL;
-
- spin_lock(&device->io_lock);
- if (op_is_sync(bio->bi_opf))
- pending_bios = &device->pending_sync_bios;
- else
- pending_bios = &device->pending_bios;
-
- if (pending_bios->tail)
- pending_bios->tail->bi_next = bio;
-
- pending_bios->tail = bio;
- if (!pending_bios->head)
- pending_bios->head = bio;
- if (device->running_pending)
- should_queue = 0;
-
- spin_unlock(&device->io_lock);
-
- if (should_queue)
- btrfs_queue_work(fs_info->submit_workers, &device->work);
-}
-
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
- u64 physical, int dev_nr, int async)
+ u64 physical, int dev_nr)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
struct btrfs_fs_info *fs_info = bbio->fs_info;
@@ -6479,10 +6264,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
btrfs_bio_counter_inc_noblocked(fs_info);
- if (async)
- btrfs_schedule_bio(dev, bio);
- else
- btrfsic_submit_bio(bio);
+ btrfsic_submit_bio(bio);
}
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
@@ -6503,7 +6285,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
}
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num, int async_submit)
+ int mirror_num)
{
struct btrfs_device *dev;
struct bio *first_bio = bio;
@@ -6572,7 +6354,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
bio = first_bio;
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
- dev_nr, async_submit);
+ dev_nr);
}
btrfs_bio_counter_dec(fs_info);
return BLK_STS_OK;
@@ -6676,9 +6458,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
else
generate_random_uuid(dev->uuid);
- btrfs_init_work(&dev->work, btrfs_submit_helper,
- pending_bios_fn, NULL, NULL);
-
return dev;
}
@@ -6875,7 +6654,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
if (IS_ERR(fs_devices))
return fs_devices;
- fs_devices->seeding = 1;
+ fs_devices->seeding = true;
fs_devices->opened = 1;
return fs_devices;
}
@@ -7064,48 +6843,49 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
sb_array_offset += len;
cur_offset += len;
- if (key.type == BTRFS_CHUNK_ITEM_KEY) {
- chunk = (struct btrfs_chunk *)sb_array_offset;
- /*
- * At least one btrfs_chunk with one stripe must be
- * present, exact stripe count check comes afterwards
- */
- len = btrfs_chunk_item_size(1);
- if (cur_offset + len > array_size)
- goto out_short_read;
-
- num_stripes = btrfs_chunk_num_stripes(sb, chunk);
- if (!num_stripes) {
- btrfs_err(fs_info,
- "invalid number of stripes %u in sys_array at offset %u",
- num_stripes, cur_offset);
- ret = -EIO;
- break;
- }
+ if (key.type != BTRFS_CHUNK_ITEM_KEY) {
+ btrfs_err(fs_info,
+ "unexpected item type %u in sys_array at offset %u",
+ (u32)key.type, cur_offset);
+ ret = -EIO;
+ break;
+ }
- type = btrfs_chunk_type(sb, chunk);
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
- btrfs_err(fs_info,
- "invalid chunk type %llu in sys_array at offset %u",
- type, cur_offset);
- ret = -EIO;
- break;
- }
+ chunk = (struct btrfs_chunk *)sb_array_offset;
+ /*
+ * At least one btrfs_chunk with one stripe must be present,
+ * exact stripe count check comes afterwards
+ */
+ len = btrfs_chunk_item_size(1);
+ if (cur_offset + len > array_size)
+ goto out_short_read;
- len = btrfs_chunk_item_size(num_stripes);
- if (cur_offset + len > array_size)
- goto out_short_read;
+ num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+ if (!num_stripes) {
+ btrfs_err(fs_info,
+ "invalid number of stripes %u in sys_array at offset %u",
+ num_stripes, cur_offset);
+ ret = -EIO;
+ break;
+ }
- ret = read_one_chunk(&key, sb, chunk);
- if (ret)
- break;
- } else {
+ type = btrfs_chunk_type(sb, chunk);
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
btrfs_err(fs_info,
- "unexpected item type %u in sys_array at offset %u",
- (u32)key.type, cur_offset);
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur_offset);
ret = -EIO;
break;
}
+
+ len = btrfs_chunk_item_size(num_stripes);
+ if (cur_offset + len > array_size)
+ goto out_short_read;
+
+ ret = read_one_chunk(&key, sb, chunk);
+ if (ret)
+ break;
+
array_ptr += len;
sb_array_offset += len;
cur_offset += len;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index a7da1f3e3627..fc1b564b9cfe 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -18,10 +18,6 @@ extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN SZ_64K
struct buffer_head;
-struct btrfs_pending_bios {
- struct bio *head;
- struct bio *tail;
-};
struct btrfs_io_geometry {
/* remaining bytes before crossing a stripe */
@@ -68,13 +64,6 @@ struct btrfs_device {
u64 generation;
- spinlock_t io_lock ____cacheline_aligned;
- int running_pending;
- /* regular prio bios */
- struct btrfs_pending_bios pending_bios;
- /* sync bios */
- struct btrfs_pending_bios pending_sync_bios;
-
struct block_device *bdev;
/* the mode sent to blkdev_get */
@@ -254,14 +243,14 @@ struct btrfs_fs_devices {
struct list_head alloc_list;
struct btrfs_fs_devices *seed;
- int seeding;
+ bool seeding;
int opened;
/* set when we find or add a device that doesn't have the
* nonrot flag set
*/
- int rotating;
+ bool rotating;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@@ -330,7 +319,6 @@ struct btrfs_bio {
u64 map_type; /* get from map_lookup->type */
bio_end_io_t *end_io;
struct bio *orig_bio;
- unsigned long flags;
void *private;
atomic_t error;
int max_errors;
@@ -436,7 +424,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num, int async_submit);
+ int mirror_num);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
struct btrfs_device *btrfs_scan_one_device(const char *path,
@@ -557,6 +545,10 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
return BTRFS_RAID_RAID10;
else if (flags & BTRFS_BLOCK_GROUP_RAID1)
return BTRFS_RAID_RAID1;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
+ return BTRFS_RAID_RAID1C3;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
+ return BTRFS_RAID_RAID1C4;
else if (flags & BTRFS_BLOCK_GROUP_DUP)
return BTRFS_RAID_DUP;
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
@@ -571,7 +563,7 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
-struct list_head *btrfs_get_fs_uuids(void);
+struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index df1aace5df50..a6c90a003c12 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -29,19 +29,9 @@ struct workspace {
static struct workspace_manager wsm;
-static void zlib_init_workspace_manager(void)
+struct list_head *zlib_get_workspace(unsigned int level)
{
- btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress);
-}
-
-static void zlib_cleanup_workspace_manager(void)
-{
- btrfs_cleanup_workspace_manager(&wsm);
-}
-
-static struct list_head *zlib_get_workspace(unsigned int level)
-{
- struct list_head *ws = btrfs_get_workspace(&wsm, level);
+ struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
struct workspace *workspace = list_entry(ws, struct workspace, list);
workspace->level = level;
@@ -49,12 +39,7 @@ static struct list_head *zlib_get_workspace(unsigned int level)
return ws;
}
-static void zlib_put_workspace(struct list_head *ws)
-{
- btrfs_put_workspace(&wsm, ws);
-}
-
-static void zlib_free_workspace(struct list_head *ws)
+void zlib_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -63,7 +48,7 @@ static void zlib_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *zlib_alloc_workspace(unsigned int level)
+struct list_head *zlib_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
int workspacesize;
@@ -88,13 +73,9 @@ fail:
return ERR_PTR(-ENOMEM);
}
-static int zlib_compress_pages(struct list_head *ws,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret;
@@ -228,7 +209,7 @@ out:
return ret;
}
-static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2;
@@ -319,10 +300,9 @@ done:
return ret;
}
-static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen)
+int zlib_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
@@ -419,15 +399,7 @@ next:
}
const struct btrfs_compress_op btrfs_zlib_compress = {
- .init_workspace_manager = zlib_init_workspace_manager,
- .cleanup_workspace_manager = zlib_cleanup_workspace_manager,
- .get_workspace = zlib_get_workspace,
- .put_workspace = zlib_put_workspace,
- .alloc_workspace = zlib_alloc_workspace,
- .free_workspace = zlib_free_workspace,
- .compress_pages = zlib_compress_pages,
- .decompress_bio = zlib_decompress_bio,
- .decompress = zlib_decompress,
+ .workspace_manager = &wsm,
.max_level = 9,
.default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
};
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 764d47b107e5..9a4871636c6c 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -91,9 +91,8 @@ static inline struct workspace *list_to_workspace(struct list_head *list)
return container_of(list, struct workspace, list);
}
-static void zstd_free_workspace(struct list_head *ws);
-static struct list_head *zstd_alloc_workspace(unsigned int level);
-
+void zstd_free_workspace(struct list_head *ws);
+struct list_head *zstd_alloc_workspace(unsigned int level);
/*
* zstd_reclaim_timer_fn - reclaim timer
* @t: timer
@@ -168,7 +167,7 @@ static void zstd_calc_ws_mem_sizes(void)
}
}
-static void zstd_init_workspace_manager(void)
+void zstd_init_workspace_manager(void)
{
struct list_head *ws;
int i;
@@ -194,7 +193,7 @@ static void zstd_init_workspace_manager(void)
}
}
-static void zstd_cleanup_workspace_manager(void)
+void zstd_cleanup_workspace_manager(void)
{
struct workspace *workspace;
int i;
@@ -261,7 +260,7 @@ static struct list_head *zstd_find_workspace(unsigned int level)
* attempt to allocate a new workspace. If we fail to allocate one due to
* memory pressure, go to sleep waiting for the max level workspace to free up.
*/
-static struct list_head *zstd_get_workspace(unsigned int level)
+struct list_head *zstd_get_workspace(unsigned int level)
{
struct list_head *ws;
unsigned int nofs_flag;
@@ -302,7 +301,7 @@ again:
* isn't set, it is also set here. Only the max level workspace tries and wakes
* up waiting workspaces.
*/
-static void zstd_put_workspace(struct list_head *ws)
+void zstd_put_workspace(struct list_head *ws)
{
struct workspace *workspace = list_to_workspace(ws);
@@ -332,7 +331,7 @@ static void zstd_put_workspace(struct list_head *ws)
cond_wake_up(&wsm.wait);
}
-static void zstd_free_workspace(struct list_head *ws)
+void zstd_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -341,7 +340,7 @@ static void zstd_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *zstd_alloc_workspace(unsigned int level)
+struct list_head *zstd_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
@@ -367,13 +366,9 @@ fail:
return ERR_PTR(-ENOMEM);
}
-static int zstd_compress_pages(struct list_head *ws,
- struct address_space *mapping,
- u64 start,
- struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct page **pages, unsigned long *out_pages,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_CStream *stream;
@@ -548,7 +543,7 @@ out:
return ret;
}
-static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct page **pages_in = cb->compressed_pages;
@@ -626,10 +621,9 @@ done:
return ret;
}
-static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
- struct page *dest_page,
- unsigned long start_byte,
- size_t srclen, size_t destlen)
+int zstd_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page, unsigned long start_byte, size_t srclen,
+ size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_DStream *stream;
@@ -712,15 +706,8 @@ finish:
}
const struct btrfs_compress_op btrfs_zstd_compress = {
- .init_workspace_manager = zstd_init_workspace_manager,
- .cleanup_workspace_manager = zstd_cleanup_workspace_manager,
- .get_workspace = zstd_get_workspace,
- .put_workspace = zstd_put_workspace,
- .alloc_workspace = zstd_alloc_workspace,
- .free_workspace = zstd_free_workspace,
- .compress_pages = zstd_compress_pages,
- .decompress_bio = zstd_decompress_bio,
- .decompress = zstd_decompress,
+ /* ZSTD uses own workspace manager */
+ .workspace_manager = NULL,
.max_level = ZSTD_BTRFS_MAX_LEVEL,
.default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index bd77adb64bfd..8de633964dc3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -753,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
if (!atomic_dec_and_test(&aio_req->pending_reqs))
return;
+ if (aio_req->iocb->ki_flags & IOCB_DIRECT)
+ inode_dio_end(inode);
+
ret = aio_req->error;
if (!ret)
ret = aio_req->total_len;
@@ -1091,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
CEPH_CAP_FILE_RD);
list_splice(&aio_req->osd_reqs, &osd_reqs);
+ inode_dio_begin(inode);
while (!list_empty(&osd_reqs)) {
req = list_first_entry(&osd_reqs,
struct ceph_osd_request,
@@ -1264,14 +1268,24 @@ again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_read(inode);
+
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
&got, &pinned_page);
- if (ret < 0)
+ if (ret < 0) {
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
return ret;
+ }
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) ||
@@ -1283,16 +1297,12 @@ again:
if (ci->i_inline_version == CEPH_INLINE_NONE) {
if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
- ceph_start_io_direct(inode);
ret = ceph_direct_read_write(iocb, to,
NULL, NULL);
- ceph_end_io_direct(inode);
if (ret >= 0 && ret < len)
retry_op = CHECK_EOF;
} else {
- ceph_start_io_read(inode);
ret = ceph_sync_read(iocb, to, &retry_op);
- ceph_end_io_read(inode);
}
} else {
retry_op = READ_INLINE;
@@ -1303,11 +1313,10 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx);
- ceph_start_io_read(inode);
ret = generic_file_read_iter(iocb, to);
- ceph_end_io_read(inode);
ceph_del_rw_context(fi, &rw_ctx);
}
+
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
if (pinned_page) {
@@ -1315,6 +1324,12 @@ again:
pinned_page = NULL;
}
ceph_put_cap_refs(ci, got);
+
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
+
if (retry_op > HAVE_RETRIED && ret >= 0) {
int statret;
struct page *page = NULL;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index d12ea28836a5..2f04024c3588 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -958,8 +958,8 @@ static int cramfs_get_tree(struct fs_context *fc)
if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
ret = get_tree_mtd(fc, cramfs_mtd_fill_super);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return 0;
}
if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
ret = get_tree_bdev(fc, cramfs_blkdev_fill_super);
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 82da2510721f..1f4b8a277060 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -26,7 +26,7 @@
#include <linux/namei.h>
#include "fscrypt_private.h"
-static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
+void fscrypt_decrypt_bio(struct bio *bio)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
@@ -37,37 +37,10 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bv->bv_offset);
if (ret)
SetPageError(page);
- else if (done)
- SetPageUptodate(page);
- if (done)
- unlock_page(page);
}
}
-
-void fscrypt_decrypt_bio(struct bio *bio)
-{
- __fscrypt_decrypt_bio(bio, false);
-}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
-static void completion_pages(struct work_struct *work)
-{
- struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
- struct bio *bio = ctx->bio;
-
- __fscrypt_decrypt_bio(bio, true);
- fscrypt_release_ctx(ctx);
- bio_put(bio);
-}
-
-void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- fscrypt_enqueue_decrypt_work(&ctx->work);
-}
-EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
-
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 32a7ad0098cc..3719efa546c6 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,29 +27,20 @@
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
-#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
-static LIST_HEAD(fscrypt_free_ctxs);
-static DEFINE_SPINLOCK(fscrypt_ctx_lock);
-
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
@@ -58,62 +49,6 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
-/**
- * fscrypt_release_ctx() - Release a decryption context
- * @ctx: The decryption context to release.
- *
- * If the decryption context was allocated from the pre-allocated pool, return
- * it to that pool. Else, free it.
- */
-void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(fscrypt_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- }
-}
-EXPORT_SYMBOL(fscrypt_release_ctx);
-
-/**
- * fscrypt_get_ctx() - Get a decryption context
- * @gfp_flags: The gfp flag for memory allocation
- *
- * Allocate and initialize a decryption context.
- *
- * Return: A new decryption context on success; an ERR_PTR() otherwise.
- */
-struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- struct fscrypt_ctx *ctx;
- unsigned long flags;
-
- /*
- * First try getting a ctx from the free list so that we don't have to
- * call into the slab allocator.
- */
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
- struct fscrypt_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- return ctx;
-}
-EXPORT_SYMBOL(fscrypt_get_ctx);
-
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
@@ -138,14 +73,17 @@ EXPORT_SYMBOL(fscrypt_free_bounce_page);
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{
+ u8 flags = fscrypt_policy_flags(&ci->ci_policy);
+
memset(iv, 0, ci->ci_mode->ivsize);
- iv->lblk_num = cpu_to_le64(lblk_num);
- if (fscrypt_is_direct_key_policy(&ci->ci_policy))
+ if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ WARN_ON_ONCE((u32)lblk_num != lblk_num);
+ lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
-
- if (ci->ci_essiv_tfm != NULL)
- crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
+ }
+ iv->lblk_num = cpu_to_le64(lblk_num);
}
/* Encrypt or decrypt a single filesystem block of file contents */
@@ -396,17 +334,6 @@ const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
-static void fscrypt_destroy(void)
-{
- struct fscrypt_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
- kmem_cache_free(fscrypt_ctx_cachep, pos);
- INIT_LIST_HEAD(&fscrypt_free_ctxs);
- mempool_destroy(fscrypt_bounce_page_pool);
- fscrypt_bounce_page_pool = NULL;
-}
-
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags
@@ -414,11 +341,11 @@ static void fscrypt_destroy(void)
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
- * Return: Zero on success, non-zero otherwise.
+ * Return: 0 on success; -errno on failure
*/
int fscrypt_initialize(unsigned int cop_flags)
{
- int i, res = -ENOMEM;
+ int err = 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
if (cop_flags & FS_CFLG_OWN_PAGES)
@@ -426,29 +353,18 @@ int fscrypt_initialize(unsigned int cop_flags)
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
- goto already_initialized;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct fscrypt_ctx *ctx;
-
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- }
+ goto out_unlock;
+ err = -ENOMEM;
fscrypt_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!fscrypt_bounce_page_pool)
- goto fail;
+ goto out_unlock;
-already_initialized:
- mutex_unlock(&fscrypt_init_mutex);
- return 0;
-fail:
- fscrypt_destroy();
+ err = 0;
+out_unlock:
mutex_unlock(&fscrypt_init_mutex);
- return res;
+ return err;
}
void fscrypt_msg(const struct inode *inode, const char *level,
@@ -494,13 +410,9 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_ctx_cachep)
- goto fail_free_queue;
-
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep)
- goto fail_free_ctx;
+ goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
@@ -510,8 +422,6 @@ static int __init fscrypt_init(void)
fail_free_info:
kmem_cache_destroy(fscrypt_info_cachep);
-fail_free_ctx:
- kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index e84efc01512e..130b50e5a011 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -163,11 +163,8 @@ struct fscrypt_info {
/* The actual crypto transform used for encryption and decryption */
struct crypto_skcipher *ci_ctfm;
- /*
- * Cipher for ESSIV IV generation. Only set for CBC contents
- * encryption, otherwise is NULL.
- */
- struct crypto_cipher *ci_essiv_tfm;
+ /* True if the key should be freed when this fscrypt_info is freed */
+ bool ci_owns_key;
/*
* Encryption mode used for this inode. It corresponds to either the
@@ -209,8 +206,6 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
{
@@ -289,7 +284,8 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
*/
#define HKDF_CONTEXT_KEY_IDENTIFIER 1
#define HKDF_CONTEXT_PER_FILE_KEY 2
-#define HKDF_CONTEXT_PER_MODE_KEY 3
+#define HKDF_CONTEXT_DIRECT_KEY 3
+#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
@@ -386,8 +382,14 @@ struct fscrypt_master_key {
struct list_head mk_decrypted_inodes;
spinlock_t mk_decrypted_inodes_lock;
- /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */
- struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1];
+ /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */
+ struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1];
+
+ /*
+ * Crypto API transforms for filesystem-layer implementation of
+ * IV_INO_LBLK_64 policies, allocated on-demand.
+ */
+ struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1];
} __randomize_layout;
@@ -443,8 +445,7 @@ struct fscrypt_mode {
const char *cipher_str;
int keysize;
int ivsize;
- bool logged_impl_name;
- bool needs_essiv;
+ int logged_impl_name;
};
static inline bool
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index c34fa7c61b43..040df1f5e1c8 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -43,8 +43,10 @@ static void free_master_key(struct fscrypt_master_key *mk)
wipe_master_key_secret(&mk->mk_secret);
- for (i = 0; i < ARRAY_SIZE(mk->mk_mode_keys); i++)
- crypto_free_skcipher(mk->mk_mode_keys[i]);
+ for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
+ crypto_free_skcipher(mk->mk_direct_tfms[i]);
+ crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]);
+ }
key_put(mk->mk_users);
kzfree(mk);
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index d71c2d6dd162..f577bb6613f9 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -8,15 +8,11 @@
* Heavily modified since then.
*/
-#include <crypto/aes.h>
-#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <linux/key.h>
#include "fscrypt_private.h"
-static struct crypto_shash *essiv_hash_tfm;
-
static struct fscrypt_mode available_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
@@ -31,11 +27,10 @@ static struct fscrypt_mode available_modes[] = {
.ivsize = 16,
},
[FSCRYPT_MODE_AES_128_CBC] = {
- .friendly_name = "AES-128-CBC",
- .cipher_str = "cbc(aes)",
+ .friendly_name = "AES-128-CBC-ESSIV",
+ .cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
.ivsize = 16,
- .needs_essiv = true,
},
[FSCRYPT_MODE_AES_128_CTS] = {
.friendly_name = "AES-128-CTS-CBC",
@@ -86,15 +81,13 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
mode->cipher_str, PTR_ERR(tfm));
return tfm;
}
- if (unlikely(!mode->logged_impl_name)) {
+ if (!xchg(&mode->logged_impl_name, 1)) {
/*
* fscrypt performance can vary greatly depending on which
* crypto algorithm implementation is used. Help people debug
* performance problems by logging the ->cra_driver_name the
- * first time a mode is used. Note that multiple threads can
- * race here, but it doesn't really matter.
+ * first time a mode is used.
*/
- mode->logged_impl_name = true;
pr_info("fscrypt: %s using implementation \"%s\"\n",
mode->friendly_name,
crypto_skcipher_alg(tfm)->base.cra_driver_name);
@@ -111,131 +104,64 @@ err_free_tfm:
return ERR_PTR(err);
}
-static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
-{
- struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);
-
- /* init hash transform on demand */
- if (unlikely(!tfm)) {
- struct crypto_shash *prev_tfm;
-
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
- fscrypt_warn(NULL,
- "Missing crypto API support for SHA-256");
- return -ENOPKG;
- }
- fscrypt_err(NULL,
- "Error allocating SHA-256 transform: %ld",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
- if (prev_tfm) {
- crypto_free_shash(tfm);
- tfm = prev_tfm;
- }
- }
-
- {
- SHASH_DESC_ON_STACK(desc, tfm);
- desc->tfm = tfm;
-
- return crypto_shash_digest(desc, key, keysize, salt);
- }
-}
-
-static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
- int keysize)
-{
- int err;
- struct crypto_cipher *essiv_tfm;
- u8 salt[SHA256_DIGEST_SIZE];
-
- if (WARN_ON(ci->ci_mode->ivsize != AES_BLOCK_SIZE))
- return -EINVAL;
-
- essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(essiv_tfm))
- return PTR_ERR(essiv_tfm);
-
- ci->ci_essiv_tfm = essiv_tfm;
-
- err = derive_essiv_salt(raw_key, keysize, salt);
- if (err)
- goto out;
-
- /*
- * Using SHA256 to derive the salt/key will result in AES-256 being
- * used for IV generation. File contents encryption will still use the
- * configured keysize (AES-128) nevertheless.
- */
- err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
- if (err)
- goto out;
-
-out:
- memzero_explicit(salt, sizeof(salt));
- return err;
-}
-
-/* Given the per-file key, set up the file's crypto transform object(s) */
+/* Given the per-file key, set up the file's crypto transform object */
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
{
- struct fscrypt_mode *mode = ci->ci_mode;
- struct crypto_skcipher *ctfm;
- int err;
-
- ctfm = fscrypt_allocate_skcipher(mode, derived_key, ci->ci_inode);
- if (IS_ERR(ctfm))
- return PTR_ERR(ctfm);
+ struct crypto_skcipher *tfm;
- ci->ci_ctfm = ctfm;
+ tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
- if (mode->needs_essiv) {
- err = init_essiv_generator(ci, derived_key, mode->keysize);
- if (err) {
- fscrypt_warn(ci->ci_inode,
- "Error initializing ESSIV generator: %d",
- err);
- return err;
- }
- }
+ ci->ci_ctfm = tfm;
+ ci->ci_owns_key = true;
return 0;
}
static int setup_per_mode_key(struct fscrypt_info *ci,
- struct fscrypt_master_key *mk)
+ struct fscrypt_master_key *mk,
+ struct crypto_skcipher **tfms,
+ u8 hkdf_context, bool include_fs_uuid)
{
+ const struct inode *inode = ci->ci_inode;
+ const struct super_block *sb = inode->i_sb;
struct fscrypt_mode *mode = ci->ci_mode;
u8 mode_num = mode - available_modes;
struct crypto_skcipher *tfm, *prev_tfm;
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+ u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
+ unsigned int hkdf_infolen = 0;
int err;
- if (WARN_ON(mode_num >= ARRAY_SIZE(mk->mk_mode_keys)))
+ if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
return -EINVAL;
/* pairs with cmpxchg() below */
- tfm = READ_ONCE(mk->mk_mode_keys[mode_num]);
+ tfm = READ_ONCE(tfms[mode_num]);
if (likely(tfm != NULL))
goto done;
BUILD_BUG_ON(sizeof(mode_num) != 1);
+ BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
+ BUILD_BUG_ON(sizeof(hkdf_info) != 17);
+ hkdf_info[hkdf_infolen++] = mode_num;
+ if (include_fs_uuid) {
+ memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
+ sizeof(sb->s_uuid));
+ hkdf_infolen += sizeof(sb->s_uuid);
+ }
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_PER_MODE_KEY,
- &mode_num, sizeof(mode_num),
+ hkdf_context, hkdf_info, hkdf_infolen,
mode_key, mode->keysize);
if (err)
return err;
- tfm = fscrypt_allocate_skcipher(mode, mode_key, ci->ci_inode);
+ tfm = fscrypt_allocate_skcipher(mode, mode_key, inode);
memzero_explicit(mode_key, mode->keysize);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/* pairs with READ_ONCE() above */
- prev_tfm = cmpxchg(&mk->mk_mode_keys[mode_num], NULL, tfm);
+ prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm);
if (prev_tfm != NULL) {
crypto_free_skcipher(tfm);
tfm = prev_tfm;
@@ -266,7 +192,19 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
ci->ci_mode->friendly_name);
return -EINVAL;
}
- return setup_per_mode_key(ci, mk);
+ return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
+ HKDF_CONTEXT_DIRECT_KEY, false);
+ } else if (ci->ci_policy.v2.flags &
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ /*
+ * IV_INO_LBLK_64: encryption keys are derived from (master_key,
+ * mode_num, filesystem_uuid), and inode number is included in
+ * the IVs. This format is optimized for use with inline
+ * encryption hardware compliant with the UFS or eMMC standards.
+ */
+ return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
+ HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
+ true);
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
@@ -388,13 +326,10 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (!ci)
return;
- if (ci->ci_direct_key) {
+ if (ci->ci_direct_key)
fscrypt_put_direct_key(ci->ci_direct_key);
- } else if ((ci->ci_ctfm != NULL || ci->ci_essiv_tfm != NULL) &&
- !fscrypt_is_direct_key_policy(&ci->ci_policy)) {
+ else if (ci->ci_owns_key)
crypto_free_skcipher(ci->ci_ctfm);
- crypto_free_cipher(ci->ci_essiv_tfm);
- }
key = ci->ci_master_key;
if (key) {
@@ -415,6 +350,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
key_invalidate(key);
key_put(key);
}
+ memzero_explicit(ci, sizeof(*ci));
kmem_cache_free(fscrypt_info_cachep, ci);
}
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index ad1a36c370c3..5298ef22aa85 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -270,10 +270,6 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
return -EINVAL;
}
- /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */
- if (WARN_ON(mode->needs_essiv))
- return -EINVAL;
-
dk = fscrypt_get_direct_key(ci, raw_master_key);
if (IS_ERR(dk))
return PTR_ERR(dk);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 4072ba644595..96f528071bed 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -29,6 +29,40 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
+static bool supported_iv_ino_lblk_64_policy(
+ const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ int ino_bits = 64, lblk_bits = 64;
+
+ if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
+ fscrypt_warn(inode,
+ "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive");
+ return false;
+ }
+ /*
+ * It's unsafe to include inode numbers in the IVs if the filesystem can
+ * potentially renumber inodes, e.g. via filesystem shrinking.
+ */
+ if (!sb->s_cop->has_stable_inodes ||
+ !sb->s_cop->has_stable_inodes(sb)) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers",
+ sb->s_id);
+ return false;
+ }
+ if (sb->s_cop->get_ino_and_lblk_bits)
+ sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
+ if (ino_bits > 32 || lblk_bits > 32) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers",
+ sb->s_id);
+ return false;
+ }
+ return true;
+}
+
/**
* fscrypt_supported_policy - check whether an encryption policy is supported
*
@@ -55,7 +89,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
- if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
+ if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
+ FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);
@@ -83,6 +118,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
+ !supported_iv_ino_lblk_64_policy(policy, inode))
+ return false;
+
if (memchr_inv(policy->__reserved, 0,
sizeof(policy->__reserved))) {
fscrypt_warn(inode,
diff --git a/fs/dcache.c b/fs/dcache.c
index e88cf0554e65..f7931b682a0d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1319,7 +1319,7 @@ resume:
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&this_parent->d_lock);
- spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
+ spin_release(&dentry->d_lock.dep_map, _RET_IP_);
this_parent = dentry;
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
goto repeat;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 18426f4855f1..e23752d9a79f 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -128,13 +128,20 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
struct dentry *lower_dir_dentry;
+ struct inode *lower_dir_inode;
int rc;
- dget(lower_dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+ lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+ lower_dir_inode = d_inode(lower_dir_dentry);
+ inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+ dget(lower_dentry); // don't even try to make the lower negative
+ if (lower_dentry->d_parent != lower_dir_dentry)
+ rc = -EINVAL;
+ else if (d_unhashed(lower_dentry))
+ rc = -EINVAL;
+ else
+ rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
@@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
fsstack_copy_attr_times(dir, lower_dir_inode);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode->i_ctime = dir->i_ctime;
- d_drop(dentry);
out_unlock:
- unlock_dir(lower_dir_dentry);
dput(lower_dentry);
+ inode_unlock(lower_dir_inode);
+ if (!rc)
+ d_drop(dentry);
return rc;
}
@@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
- struct inode *inode, *lower_inode = d_inode(lower_dentry);
+ struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info;
- struct vfsmount *lower_mnt;
int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
@@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
return ERR_PTR(-ENOMEM);
}
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
- d_inode(lower_dentry->d_parent));
+ d_inode(path->dentry));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
- dentry_info->lower_path.mnt = lower_mnt;
+ dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry;
- if (d_really_is_negative(lower_dentry)) {
+ /*
+ * negative dentry can go positive under us here - its parent is not
+ * locked. That's OK and that could happen just as we return from
+ * ecryptfs_lookup() anyway. Just need to be careful and fetch
+ * ->d_inode only once - it's not stable here.
+ */
+ lower_inode = READ_ONCE(lower_dentry->d_inode);
+
+ if (!lower_inode) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return NULL;
@@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
+ struct inode *lower_dir_inode;
int rc;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
- dget(dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- dget(lower_dentry);
- rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
- dput(lower_dentry);
- if (!rc && d_really_is_positive(dentry))
+ lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
+ lower_dir_inode = d_inode(lower_dir_dentry);
+
+ inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
+ dget(lower_dentry); // don't even try to make the lower negative
+ if (lower_dentry->d_parent != lower_dir_dentry)
+ rc = -EINVAL;
+ else if (d_unhashed(lower_dentry))
+ rc = -EINVAL;
+ else
+ rc = vfs_rmdir(lower_dir_inode, lower_dentry);
+ if (!rc) {
clear_nlink(d_inode(dentry));
- fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
- set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
- unlock_dir(lower_dir_dentry);
+ fsstack_copy_attr_times(dir, lower_dir_inode);
+ set_nlink(dir, lower_dir_inode->i_nlink);
+ }
+ dput(lower_dentry);
+ inode_unlock(lower_dir_inode);
if (!rc)
d_drop(dentry);
- dput(dentry);
return rc;
}
@@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
- struct dentry *trap = NULL;
+ struct dentry *trap;
struct inode *target_inode;
if (flags)
return -EINVAL;
+ lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
+ lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
+
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
- dget(lower_old_dentry);
- dget(lower_new_dentry);
- lower_old_dir_dentry = dget_parent(lower_old_dentry);
- lower_new_dir_dentry = dget_parent(lower_new_dentry);
+
target_inode = d_inode(new_dentry);
+
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ dget(lower_new_dentry);
rc = -EINVAL;
if (lower_old_dentry->d_parent != lower_old_dir_dentry)
goto out_lock;
@@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- dput(lower_new_dir_dentry);
- dput(lower_old_dir_dentry);
dput(lower_new_dentry);
- dput(lower_old_dentry);
+ unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
index 555e93c7dec8..c27231234764 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1015,7 +1015,7 @@ static int exec_mmap(struct mm_struct *mm)
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
- mm_release(tsk, old_mm);
+ exec_mm_release(tsk, old_mm);
if (old_mm) {
sync_mm_rss(old_mm);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 09bc68708d28..2dd55b172d57 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
* inode is actually connected to the parent.
*/
err = exportfs_get_name(mnt, target_dir, nbuf, result);
- if (!err) {
- inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir,
- strlen(nbuf));
- inode_unlock(target_dir->d_inode);
- if (!IS_ERR(nresult)) {
- if (nresult->d_inode) {
- dput(result);
- result = nresult;
- } else
- dput(nresult);
- }
+ if (err) {
+ dput(target_dir);
+ goto err_result;
}
+ inode_lock(target_dir->d_inode);
+ nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ if (!IS_ERR(nresult)) {
+ if (unlikely(nresult->d_inode != result->d_inode)) {
+ dput(nresult);
+ nresult = ERR_PTR(-ESTALE);
+ }
+ }
+ inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
*/
dput(target_dir);
+ if (IS_ERR(nresult)) {
+ err = PTR_ERR(nresult);
+ goto err_result;
+ }
+ dput(result);
+ result = nresult;
+
/*
* And finally make sure the dentry is actually acceptable
* to NFSD.
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index cbb5ca830e57..ef42ab040905 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -106,3 +106,20 @@ config EXT4_DEBUG
If you select Y here, then you will be able to turn on debugging
with a command such as:
echo 1 > /sys/module/ext4/parameters/mballoc_debug
+
+config EXT4_KUNIT_TESTS
+ bool "KUnit tests for ext4"
+ select EXT4_FS
+ depends on KUNIT
+ help
+ This builds the ext4 KUnit tests.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running KUnit test harness and are not for inclusion into a production
+ build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index b17ddc229ac5..840b91d040f1 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -13,4 +13,5 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_KUNIT_TESTS) += inode-test.o
ext4-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 03db3e71676c..b3a2cc7c0252 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1678,6 +1678,7 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
#define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010
#define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020
#define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200
+#define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
@@ -1779,6 +1780,7 @@ EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR)
EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE)
EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX)
EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2)
+EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES)
EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER)
EXT4_FEATURE_RO_COMPAT_FUNCS(large_file, LARGE_FILE)
diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c
new file mode 100644
index 000000000000..92a9da1774aa
--- /dev/null
+++ b/fs/ext4/inode-test.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test of ext4 inode that verify the seconds part of [a/c/m]
+ * timestamps in ext4 inode structs are decoded correctly.
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/time64.h>
+
+#include "ext4.h"
+
+/*
+ * For constructing the nonnegative timestamp lower bound value.
+ * binary: 00000000 00000000 00000000 00000000
+ */
+#define LOWER_MSB_0 0L
+/*
+ * For constructing the nonnegative timestamp upper bound value.
+ * binary: 01111111 11111111 11111111 11111111
+ *
+ */
+#define UPPER_MSB_0 0x7fffffffL
+/*
+ * For constructing the negative timestamp lower bound value.
+ * binary: 10000000 00000000 00000000 00000000
+ */
+#define LOWER_MSB_1 (-0x80000000L)
+/*
+ * For constructing the negative timestamp upper bound value.
+ * binary: 11111111 11111111 11111111 11111111
+ */
+#define UPPER_MSB_1 (-1L)
+/*
+ * Upper bound for nanoseconds value supported by the encoding.
+ * binary: 00111111 11111111 11111111 11111111
+ */
+#define MAX_NANOSECONDS ((1L << 30) - 1)
+
+#define CASE_NAME_FORMAT "%s: msb:%x lower_bound:%x extra_bits: %x"
+
+#define LOWER_BOUND_NEG_NO_EXTRA_BITS_CASE\
+ "1901-12-13 Lower bound of 32bit < 0 timestamp, no extra bits"
+#define UPPER_BOUND_NEG_NO_EXTRA_BITS_CASE\
+ "1969-12-31 Upper bound of 32bit < 0 timestamp, no extra bits"
+#define LOWER_BOUND_NONNEG_NO_EXTRA_BITS_CASE\
+ "1970-01-01 Lower bound of 32bit >=0 timestamp, no extra bits"
+#define UPPER_BOUND_NONNEG_NO_EXTRA_BITS_CASE\
+ "2038-01-19 Upper bound of 32bit >=0 timestamp, no extra bits"
+#define LOWER_BOUND_NEG_LO_1_CASE\
+ "2038-01-19 Lower bound of 32bit <0 timestamp, lo extra sec bit on"
+#define UPPER_BOUND_NEG_LO_1_CASE\
+ "2106-02-07 Upper bound of 32bit <0 timestamp, lo extra sec bit on"
+#define LOWER_BOUND_NONNEG_LO_1_CASE\
+ "2106-02-07 Lower bound of 32bit >=0 timestamp, lo extra sec bit on"
+#define UPPER_BOUND_NONNEG_LO_1_CASE\
+ "2174-02-25 Upper bound of 32bit >=0 timestamp, lo extra sec bit on"
+#define LOWER_BOUND_NEG_HI_1_CASE\
+ "2174-02-25 Lower bound of 32bit <0 timestamp, hi extra sec bit on"
+#define UPPER_BOUND_NEG_HI_1_CASE\
+ "2242-03-16 Upper bound of 32bit <0 timestamp, hi extra sec bit on"
+#define LOWER_BOUND_NONNEG_HI_1_CASE\
+ "2242-03-16 Lower bound of 32bit >=0 timestamp, hi extra sec bit on"
+#define UPPER_BOUND_NONNEG_HI_1_CASE\
+ "2310-04-04 Upper bound of 32bit >=0 timestamp, hi extra sec bit on"
+#define UPPER_BOUND_NONNEG_HI_1_NS_1_CASE\
+ "2310-04-04 Upper bound of 32bit>=0 timestamp, hi extra sec bit 1. 1 ns"
+#define LOWER_BOUND_NONNEG_HI_1_NS_MAX_CASE\
+ "2378-04-22 Lower bound of 32bit>= timestamp. Extra sec bits 1. Max ns"
+#define LOWER_BOUND_NONNEG_EXTRA_BITS_1_CASE\
+ "2378-04-22 Lower bound of 32bit >=0 timestamp. All extra sec bits on"
+#define UPPER_BOUND_NONNEG_EXTRA_BITS_1_CASE\
+ "2446-05-10 Upper bound of 32bit >=0 timestamp. All extra sec bits on"
+
+struct timestamp_expectation {
+ const char *test_case_name;
+ struct timespec64 expected;
+ u32 extra_bits;
+ bool msb_set;
+ bool lower_bound;
+};
+
+static time64_t get_32bit_time(const struct timestamp_expectation * const test)
+{
+ if (test->msb_set) {
+ if (test->lower_bound)
+ return LOWER_MSB_1;
+
+ return UPPER_MSB_1;
+ }
+
+ if (test->lower_bound)
+ return LOWER_MSB_0;
+ return UPPER_MSB_0;
+}
+
+
+/*
+ * Test data is derived from the table in the Inode Timestamps section of
+ * Documentation/filesystems/ext4/inodes.rst.
+ */
+static void inode_test_xtimestamp_decoding(struct kunit *test)
+{
+ const struct timestamp_expectation test_data[] = {
+ {
+ .test_case_name = LOWER_BOUND_NEG_NO_EXTRA_BITS_CASE,
+ .msb_set = true,
+ .lower_bound = true,
+ .extra_bits = 0,
+ .expected = {.tv_sec = -0x80000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NEG_NO_EXTRA_BITS_CASE,
+ .msb_set = true,
+ .lower_bound = false,
+ .extra_bits = 0,
+ .expected = {.tv_sec = -1LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_NO_EXTRA_BITS_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 0,
+ .expected = {0LL, 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_NO_EXTRA_BITS_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 0,
+ .expected = {.tv_sec = 0x7fffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NEG_LO_1_CASE,
+ .msb_set = true,
+ .lower_bound = true,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0x80000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NEG_LO_1_CASE,
+ .msb_set = true,
+ .lower_bound = false,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0xffffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_LO_1_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0x100000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_LO_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 1,
+ .expected = {.tv_sec = 0x17fffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NEG_HI_1_CASE,
+ .msb_set = true,
+ .lower_bound = true,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x180000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NEG_HI_1_CASE,
+ .msb_set = true,
+ .lower_bound = false,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x1ffffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_HI_1_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x200000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_HI_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 2,
+ .expected = {.tv_sec = 0x27fffffffLL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_HI_1_NS_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 6,
+ .expected = {.tv_sec = 0x27fffffffLL, .tv_nsec = 1L},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_HI_1_NS_MAX_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 0xFFFFFFFF,
+ .expected = {.tv_sec = 0x300000000LL,
+ .tv_nsec = MAX_NANOSECONDS},
+ },
+
+ {
+ .test_case_name = LOWER_BOUND_NONNEG_EXTRA_BITS_1_CASE,
+ .msb_set = false,
+ .lower_bound = true,
+ .extra_bits = 3,
+ .expected = {.tv_sec = 0x300000000LL, .tv_nsec = 0L},
+ },
+
+ {
+ .test_case_name = UPPER_BOUND_NONNEG_EXTRA_BITS_1_CASE,
+ .msb_set = false,
+ .lower_bound = false,
+ .extra_bits = 3,
+ .expected = {.tv_sec = 0x37fffffffLL, .tv_nsec = 0L},
+ }
+ };
+
+ struct timespec64 timestamp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
+ timestamp.tv_sec = get_32bit_time(&test_data[i]);
+ ext4_decode_extra_time(&timestamp,
+ cpu_to_le32(test_data[i].extra_bits));
+
+ KUNIT_EXPECT_EQ_MSG(test,
+ test_data[i].expected.tv_sec,
+ timestamp.tv_sec,
+ CASE_NAME_FORMAT,
+ test_data[i].test_case_name,
+ test_data[i].msb_set,
+ test_data[i].lower_bound,
+ test_data[i].extra_bits);
+ KUNIT_EXPECT_EQ_MSG(test,
+ test_data[i].expected.tv_nsec,
+ timestamp.tv_nsec,
+ CASE_NAME_FORMAT,
+ test_data[i].test_case_name,
+ test_data[i].msb_set,
+ test_data[i].lower_bound,
+ test_data[i].extra_bits);
+ }
+}
+
+static struct kunit_case ext4_inode_test_cases[] = {
+ KUNIT_CASE(inode_test_xtimestamp_decoding),
+ {}
+};
+
+static struct kunit_suite ext4_inode_test_suite = {
+ .name = "ext4_inode_test",
+ .test_cases = ext4_inode_test_cases,
+};
+
+kunit_test_suite(ext4_inode_test_suite);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 516faa280ced..a7ca65177980 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5717,12 +5717,15 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (flags & EXT4_NODUMP_FL)
stat->attributes |= STATX_ATTR_NODUMP;
+ if (flags & EXT4_VERITY_FL)
+ stat->attributes |= STATX_ATTR_VERITY;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_COMPRESSED |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
- STATX_ATTR_NODUMP);
+ STATX_ATTR_NODUMP |
+ STATX_ATTR_VERITY);
generic_fillattr(inode, stat);
return 0;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index dd654e53ba3d..b3cbf8622eab 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1345,6 +1345,18 @@ static bool ext4_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}
+static bool ext4_has_stable_inodes(struct super_block *sb)
+{
+ return ext4_has_feature_stable_inodes(sb);
+}
+
+static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret)
+{
+ *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
+ *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
+}
+
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@@ -1352,6 +1364,8 @@ static const struct fscrypt_operations ext4_cryptops = {
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = EXT4_NAME_LEN,
+ .has_stable_inodes = ext4_has_stable_inodes,
+ .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
};
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 29bc0a542759..6a2e5b7d8fc7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -726,11 +726,14 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (flags & F2FS_NODUMP_FL)
stat->attributes |= STATX_ATTR_NODUMP;
+ if (IS_VERITY(inode))
+ stat->attributes |= STATX_ATTR_VERITY;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
- STATX_ATTR_NODUMP);
+ STATX_ATTR_NODUMP |
+ STATX_ATTR_VERITY);
generic_fillattr(inode, stat);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 808709581481..2c997f94a3b2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1771,7 +1771,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
return -EIO;
}
trace_f2fs_issue_reset_zone(bdev, blkstart);
- return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
+ return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ sector, nr_sects, GFP_NOFS);
}
/* For conventional zones, use regular discard if supported */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 1443cee15863..197ad6b314de 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2308,13 +2308,27 @@ static bool f2fs_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
}
+static bool f2fs_has_stable_inodes(struct super_block *sb)
+{
+ return true;
+}
+
+static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret)
+{
+ *ino_bits_ret = 8 * sizeof(nid_t);
+ *lblk_bits_ret = 8 * sizeof(block_t);
+}
+
static const struct fscrypt_operations f2fs_cryptops = {
- .key_prefix = "f2fs:",
- .get_context = f2fs_get_context,
- .set_context = f2fs_set_context,
- .dummy_context = f2fs_dummy_context,
- .empty_dir = f2fs_empty_dir,
- .max_namelen = F2FS_NAME_LEN,
+ .key_prefix = "f2fs:",
+ .get_context = f2fs_get_context,
+ .set_context = f2fs_set_context,
+ .dummy_context = f2fs_dummy_context,
+ .empty_dir = f2fs_empty_dir,
+ .max_namelen = F2FS_NAME_LEN,
+ .has_stable_inodes = f2fs_has_stable_inodes,
+ .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
};
#endif
@@ -2857,15 +2871,21 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_BLK_DEV_ZONED
+static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct f2fs_dev_info *dev = data;
+
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(idx, dev->blkz_seq);
+ return 0;
+}
+
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
{
struct block_device *bdev = FDEV(devi).bdev;
sector_t nr_sectors = bdev->bd_part->nr_sects;
- sector_t sector = 0;
- struct blk_zone *zones;
- unsigned int i, nr_zones;
- unsigned int n = 0;
- int err = -EIO;
+ int ret;
if (!f2fs_sb_has_blkzoned(sbi))
return 0;
@@ -2890,38 +2910,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (!FDEV(devi).blkz_seq)
return -ENOMEM;
-#define F2FS_REPORT_NR_ZONES 4096
-
- zones = f2fs_kzalloc(sbi,
- array_size(F2FS_REPORT_NR_ZONES,
- sizeof(struct blk_zone)),
- GFP_KERNEL);
- if (!zones)
- return -ENOMEM;
-
/* Get block zones type */
- while (zones && sector < nr_sectors) {
-
- nr_zones = F2FS_REPORT_NR_ZONES;
- err = blkdev_report_zones(bdev, sector, zones, &nr_zones);
- if (err)
- break;
- if (!nr_zones) {
- err = -EIO;
- break;
- }
-
- for (i = 0; i < nr_zones; i++) {
- if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
- set_bit(n, FDEV(devi).blkz_seq);
- sector += zones[i].len;
- n++;
- }
- }
-
- kvfree(zones);
+ ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
+ &FDEV(devi));
+ if (ret < 0)
+ return ret;
- return err;
+ return 0;
}
#endif
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 3d40771e8e7c..41b6438bd2d9 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -261,7 +261,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
static bool rw_hint_valid(enum rw_hint hint)
{
switch (hint) {
- case RWF_WRITE_LIFE_NOT_SET:
+ case RWH_WRITE_LIFE_NOT_SET:
case RWH_WRITE_LIFE_NONE:
case RWH_WRITE_LIFE_SHORT:
case RWH_WRITE_LIFE_MEDIUM:
diff --git a/fs/io-wq.c b/fs/io-wq.c
new file mode 100644
index 000000000000..9174007ce107
--- /dev/null
+++ b/fs/io-wq.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic worker thread pool for io_uring
+ *
+ * Copyright (C) 2019 Jens Axboe
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched/signal.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/rculist_nulls.h>
+
+#include "io-wq.h"
+
+#define WORKER_IDLE_TIMEOUT (5 * HZ)
+
+enum {
+ IO_WORKER_F_UP = 1, /* up and active */
+ IO_WORKER_F_RUNNING = 2, /* account as running */
+ IO_WORKER_F_FREE = 4, /* worker on free list */
+ IO_WORKER_F_EXITING = 8, /* worker exiting */
+ IO_WORKER_F_FIXED = 16, /* static idle worker */
+ IO_WORKER_F_BOUND = 32, /* is doing bounded work */
+};
+
+enum {
+ IO_WQ_BIT_EXIT = 0, /* wq exiting */
+ IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
+};
+
+enum {
+ IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
+};
+
+/*
+ * One for each thread in a wqe pool
+ */
+struct io_worker {
+ refcount_t ref;
+ unsigned flags;
+ struct hlist_nulls_node nulls_node;
+ struct list_head all_list;
+ struct task_struct *task;
+ wait_queue_head_t wait;
+ struct io_wqe *wqe;
+
+ struct io_wq_work *cur_work;
+ spinlock_t lock;
+
+ struct rcu_head rcu;
+ struct mm_struct *mm;
+ struct files_struct *restore_files;
+};
+
+#if BITS_PER_LONG == 64
+#define IO_WQ_HASH_ORDER 6
+#else
+#define IO_WQ_HASH_ORDER 5
+#endif
+
+struct io_wqe_acct {
+ unsigned nr_workers;
+ unsigned max_workers;
+ atomic_t nr_running;
+};
+
+enum {
+ IO_WQ_ACCT_BOUND,
+ IO_WQ_ACCT_UNBOUND,
+};
+
+/*
+ * Per-node worker thread pool
+ */
+struct io_wqe {
+ struct {
+ spinlock_t lock;
+ struct list_head work_list;
+ unsigned long hash_map;
+ unsigned flags;
+ } ____cacheline_aligned_in_smp;
+
+ int node;
+ struct io_wqe_acct acct[2];
+
+ struct hlist_nulls_head free_list;
+ struct hlist_nulls_head busy_list;
+ struct list_head all_list;
+
+ struct io_wq *wq;
+};
+
+/*
+ * Per io_wq state
+ */
+struct io_wq {
+ struct io_wqe **wqes;
+ unsigned long state;
+ unsigned nr_wqes;
+
+ get_work_fn *get_work;
+ put_work_fn *put_work;
+
+ struct task_struct *manager;
+ struct user_struct *user;
+ struct mm_struct *mm;
+ refcount_t refs;
+ struct completion done;
+};
+
+static bool io_worker_get(struct io_worker *worker)
+{
+ return refcount_inc_not_zero(&worker->ref);
+}
+
+static void io_worker_release(struct io_worker *worker)
+{
+ if (refcount_dec_and_test(&worker->ref))
+ wake_up_process(worker->task);
+}
+
+/*
+ * Note: drops the wqe->lock if returning true! The caller must re-acquire
+ * the lock in that case. Some callers need to restart handling if this
+ * happens, so we can't just re-acquire the lock on behalf of the caller.
+ */
+static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
+{
+ bool dropped_lock = false;
+
+ if (current->files != worker->restore_files) {
+ __acquire(&wqe->lock);
+ spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+
+ task_lock(current);
+ current->files = worker->restore_files;
+ task_unlock(current);
+ }
+
+ /*
+ * If we have an active mm, we need to drop the wq lock before unusing
+ * it. If we do, return true and let the caller retry the idle loop.
+ */
+ if (worker->mm) {
+ if (!dropped_lock) {
+ __acquire(&wqe->lock);
+ spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+ }
+ __set_current_state(TASK_RUNNING);
+ set_fs(KERNEL_DS);
+ unuse_mm(worker->mm);
+ mmput(worker->mm);
+ worker->mm = NULL;
+ }
+
+ return dropped_lock;
+}
+
+static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
+ struct io_wq_work *work)
+{
+ if (work->flags & IO_WQ_WORK_UNBOUND)
+ return &wqe->acct[IO_WQ_ACCT_UNBOUND];
+
+ return &wqe->acct[IO_WQ_ACCT_BOUND];
+}
+
+static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
+ struct io_worker *worker)
+{
+ if (worker->flags & IO_WORKER_F_BOUND)
+ return &wqe->acct[IO_WQ_ACCT_BOUND];
+
+ return &wqe->acct[IO_WQ_ACCT_UNBOUND];
+}
+
+static void io_worker_exit(struct io_worker *worker)
+{
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+ unsigned nr_workers;
+
+ /*
+ * If we're not at zero, someone else is holding a brief reference
+ * to the worker. Wait for that to go away.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!refcount_dec_and_test(&worker->ref))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+
+ preempt_disable();
+ current->flags &= ~PF_IO_WORKER;
+ if (worker->flags & IO_WORKER_F_RUNNING)
+ atomic_dec(&acct->nr_running);
+ if (!(worker->flags & IO_WORKER_F_BOUND))
+ atomic_dec(&wqe->wq->user->processes);
+ worker->flags = 0;
+ preempt_enable();
+
+ spin_lock_irq(&wqe->lock);
+ hlist_nulls_del_rcu(&worker->nulls_node);
+ list_del_rcu(&worker->all_list);
+ if (__io_worker_unuse(wqe, worker)) {
+ __release(&wqe->lock);
+ spin_lock_irq(&wqe->lock);
+ }
+ acct->nr_workers--;
+ nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
+ spin_unlock_irq(&wqe->lock);
+
+ /* all workers gone, wq exit can proceed */
+ if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
+ complete(&wqe->wq->done);
+
+ kfree_rcu(worker, rcu);
+}
+
+static inline bool io_wqe_run_queue(struct io_wqe *wqe)
+ __must_hold(wqe->lock)
+{
+ if (!list_empty(&wqe->work_list) && !(wqe->flags & IO_WQE_FLAG_STALLED))
+ return true;
+ return false;
+}
+
+/*
+ * Check head of free list for an available worker. If one isn't available,
+ * caller must wake up the wq manager to create one.
+ */
+static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
+ __must_hold(RCU)
+{
+ struct hlist_nulls_node *n;
+ struct io_worker *worker;
+
+ n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
+ if (is_a_nulls(n))
+ return false;
+
+ worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
+ if (io_worker_get(worker)) {
+ wake_up(&worker->wait);
+ io_worker_release(worker);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * We need a worker. If we find a free one, we're good. If not, and we're
+ * below the max number of workers, wake up the manager to create one.
+ */
+static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
+{
+ bool ret;
+
+ /*
+ * Most likely an attempt to queue unbounded work on an io_wq that
+ * wasn't setup with any unbounded workers.
+ */
+ WARN_ON_ONCE(!acct->max_workers);
+
+ rcu_read_lock();
+ ret = io_wqe_activate_free_worker(wqe);
+ rcu_read_unlock();
+
+ if (!ret && acct->nr_workers < acct->max_workers)
+ wake_up_process(wqe->wq->manager);
+}
+
+static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
+{
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+
+ atomic_inc(&acct->nr_running);
+}
+
+static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
+ __must_hold(wqe->lock)
+{
+ struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+
+ if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
+ io_wqe_wake_worker(wqe, acct);
+}
+
+static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
+{
+ allow_kernel_signal(SIGINT);
+
+ current->flags |= PF_IO_WORKER;
+
+ worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ worker->restore_files = current->files;
+ io_wqe_inc_running(wqe, worker);
+}
+
+/*
+ * Worker will start processing some work. Move it to the busy list, if
+ * it's currently on the freelist
+ */
+static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
+ struct io_wq_work *work)
+ __must_hold(wqe->lock)
+{
+ bool worker_bound, work_bound;
+
+ if (worker->flags & IO_WORKER_F_FREE) {
+ worker->flags &= ~IO_WORKER_F_FREE;
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
+ }
+
+ /*
+ * If worker is moving from bound to unbound (or vice versa), then
+ * ensure we update the running accounting.
+ */
+ worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
+ work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
+ if (worker_bound != work_bound) {
+ io_wqe_dec_running(wqe, worker);
+ if (work_bound) {
+ worker->flags |= IO_WORKER_F_BOUND;
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
+ wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
+ atomic_dec(&wqe->wq->user->processes);
+ } else {
+ worker->flags &= ~IO_WORKER_F_BOUND;
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
+ wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
+ atomic_inc(&wqe->wq->user->processes);
+ }
+ io_wqe_inc_running(wqe, worker);
+ }
+}
+
+/*
+ * No work, worker going to sleep. Move to freelist, and unuse mm if we
+ * have one attached. Dropping the mm may potentially sleep, so we drop
+ * the lock in that case and return success. Since the caller has to
+ * retry the loop in that case (we changed task state), we don't regrab
+ * the lock if we return success.
+ */
+static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
+ __must_hold(wqe->lock)
+{
+ if (!(worker->flags & IO_WORKER_F_FREE)) {
+ worker->flags |= IO_WORKER_F_FREE;
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ }
+
+ return __io_worker_unuse(wqe, worker);
+}
+
+static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
+ __must_hold(wqe->lock)
+{
+ struct io_wq_work *work;
+
+ list_for_each_entry(work, &wqe->work_list, list) {
+ /* not hashed, can run anytime */
+ if (!(work->flags & IO_WQ_WORK_HASHED)) {
+ list_del(&work->list);
+ return work;
+ }
+
+ /* hashed, can run if not already running */
+ *hash = work->flags >> IO_WQ_HASH_SHIFT;
+ if (!(wqe->hash_map & BIT_ULL(*hash))) {
+ wqe->hash_map |= BIT_ULL(*hash);
+ list_del(&work->list);
+ return work;
+ }
+ }
+
+ return NULL;
+}
+
+static void io_worker_handle_work(struct io_worker *worker)
+ __releases(wqe->lock)
+{
+ struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+
+ do {
+ unsigned hash = -1U;
+
+ /*
+ * If we got some work, mark us as busy. If we didn't, but
+ * the list isn't empty, it means we stalled on hashed work.
+ * Mark us stalled so we don't keep looking for work when we
+ * can't make progress, any work completion or insertion will
+ * clear the stalled flag.
+ */
+ work = io_get_next_work(wqe, &hash);
+ if (work)
+ __io_worker_busy(wqe, worker, work);
+ else if (!list_empty(&wqe->work_list))
+ wqe->flags |= IO_WQE_FLAG_STALLED;
+
+ spin_unlock_irq(&wqe->lock);
+ if (put_work && wq->put_work)
+ wq->put_work(old_work);
+ if (!work)
+ break;
+next:
+ /* flush any pending signals before assigning new work */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = work;
+ spin_unlock_irq(&worker->lock);
+
+ if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
+ current->files != work->files) {
+ task_lock(current);
+ current->files = work->files;
+ task_unlock(current);
+ }
+ if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
+ wq->mm && mmget_not_zero(wq->mm)) {
+ use_mm(wq->mm);
+ set_fs(USER_DS);
+ worker->mm = wq->mm;
+ }
+ if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
+ work->flags |= IO_WQ_WORK_CANCEL;
+ if (worker->mm)
+ work->flags |= IO_WQ_WORK_HAS_MM;
+
+ if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
+ put_work = work;
+ wq->get_work(work);
+ }
+
+ old_work = work;
+ work->func(&work);
+
+ spin_lock_irq(&worker->lock);
+ worker->cur_work = NULL;
+ spin_unlock_irq(&worker->lock);
+
+ spin_lock_irq(&wqe->lock);
+
+ if (hash != -1U) {
+ wqe->hash_map &= ~BIT_ULL(hash);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ }
+ if (work && work != old_work) {
+ spin_unlock_irq(&wqe->lock);
+
+ if (put_work && wq->put_work) {
+ wq->put_work(put_work);
+ put_work = NULL;
+ }
+
+ /* dependent work not hashed */
+ hash = -1U;
+ goto next;
+ }
+ } while (1);
+}
+
+static int io_wqe_worker(void *data)
+{
+ struct io_worker *worker = data;
+ struct io_wqe *wqe = worker->wqe;
+ struct io_wq *wq = wqe->wq;
+ DEFINE_WAIT(wait);
+
+ io_worker_start(wqe, worker);
+
+ while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE);
+
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_run_queue(wqe)) {
+ __set_current_state(TASK_RUNNING);
+ io_worker_handle_work(worker);
+ continue;
+ }
+ /* drops the lock on success, retry */
+ if (__io_worker_idle(wqe, worker)) {
+ __release(&wqe->lock);
+ continue;
+ }
+ spin_unlock_irq(&wqe->lock);
+ if (signal_pending(current))
+ flush_signals(current);
+ if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+ continue;
+ /* timed out, exit unless we're the fixed worker */
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
+ !(worker->flags & IO_WORKER_F_FIXED))
+ break;
+ }
+
+ finish_wait(&worker->wait, &wait);
+
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ spin_lock_irq(&wqe->lock);
+ if (!list_empty(&wqe->work_list))
+ io_worker_handle_work(worker);
+ else
+ spin_unlock_irq(&wqe->lock);
+ }
+
+ io_worker_exit(worker);
+ return 0;
+}
+
+/*
+ * Called when a worker is scheduled in. Mark us as currently running.
+ */
+void io_wq_worker_running(struct task_struct *tsk)
+{
+ struct io_worker *worker = kthread_data(tsk);
+ struct io_wqe *wqe = worker->wqe;
+
+ if (!(worker->flags & IO_WORKER_F_UP))
+ return;
+ if (worker->flags & IO_WORKER_F_RUNNING)
+ return;
+ worker->flags |= IO_WORKER_F_RUNNING;
+ io_wqe_inc_running(wqe, worker);
+}
+
+/*
+ * Called when worker is going to sleep. If there are no workers currently
+ * running and we have work pending, wake up a free one or have the manager
+ * set one up.
+ */
+void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+ struct io_worker *worker = kthread_data(tsk);
+ struct io_wqe *wqe = worker->wqe;
+
+ if (!(worker->flags & IO_WORKER_F_UP))
+ return;
+ if (!(worker->flags & IO_WORKER_F_RUNNING))
+ return;
+
+ worker->flags &= ~IO_WORKER_F_RUNNING;
+
+ spin_lock_irq(&wqe->lock);
+ io_wqe_dec_running(wqe, worker);
+ spin_unlock_irq(&wqe->lock);
+}
+
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+{
+ struct io_wqe_acct *acct =&wqe->acct[index];
+ struct io_worker *worker;
+
+ worker = kcalloc_node(1, sizeof(*worker), GFP_KERNEL, wqe->node);
+ if (!worker)
+ return;
+
+ refcount_set(&worker->ref, 1);
+ worker->nulls_node.pprev = NULL;
+ init_waitqueue_head(&worker->wait);
+ worker->wqe = wqe;
+ spin_lock_init(&worker->lock);
+
+ worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
+ "io_wqe_worker-%d/%d", index, wqe->node);
+ if (IS_ERR(worker->task)) {
+ kfree(worker);
+ return;
+ }
+
+ spin_lock_irq(&wqe->lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ list_add_tail_rcu(&worker->all_list, &wqe->all_list);
+ worker->flags |= IO_WORKER_F_FREE;
+ if (index == IO_WQ_ACCT_BOUND)
+ worker->flags |= IO_WORKER_F_BOUND;
+ if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+ worker->flags |= IO_WORKER_F_FIXED;
+ acct->nr_workers++;
+ spin_unlock_irq(&wqe->lock);
+
+ if (index == IO_WQ_ACCT_UNBOUND)
+ atomic_inc(&wq->user->processes);
+
+ wake_up_process(worker->task);
+}
+
+static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
+ __must_hold(wqe->lock)
+{
+ struct io_wqe_acct *acct = &wqe->acct[index];
+
+ /* always ensure we have one bounded worker */
+ if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers)
+ return true;
+ /* if we have available workers or no work, no need */
+ if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
+ return false;
+ return acct->nr_workers < acct->max_workers;
+}
+
+/*
+ * Manager thread. Tasked with creating new workers, if we need them.
+ */
+static int io_wq_manager(void *data)
+{
+ struct io_wq *wq = data;
+
+ while (!kthread_should_stop()) {
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+ bool fork_worker[2] = { false, false };
+
+ spin_lock_irq(&wqe->lock);
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ fork_worker[IO_WQ_ACCT_BOUND] = true;
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
+ fork_worker[IO_WQ_ACCT_UNBOUND] = true;
+ spin_unlock_irq(&wqe->lock);
+ if (fork_worker[IO_WQ_ACCT_BOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
+ if (fork_worker[IO_WQ_ACCT_UNBOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ return 0;
+}
+
+static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
+ struct io_wq_work *work)
+{
+ bool free_worker;
+
+ if (!(work->flags & IO_WQ_WORK_UNBOUND))
+ return true;
+ if (atomic_read(&acct->nr_running))
+ return true;
+
+ rcu_read_lock();
+ free_worker = !hlist_nulls_empty(&wqe->free_list);
+ rcu_read_unlock();
+ if (free_worker)
+ return true;
+
+ if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
+ !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
+ return false;
+
+ return true;
+}
+
+static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
+{
+ struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+ unsigned long flags;
+
+ /*
+ * Do early check to see if we need a new unbound worker, and if we do,
+ * if we're allowed to do so. This isn't 100% accurate as there's a
+ * gap between this check and incrementing the value, but that's OK.
+ * It's close enough to not be an issue, fork() has the same delay.
+ */
+ if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return;
+ }
+
+ spin_lock_irqsave(&wqe->lock, flags);
+ list_add_tail(&work->list, &wqe->work_list);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (!atomic_read(&acct->nr_running))
+ io_wqe_wake_worker(wqe, acct);
+}
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+
+ io_wqe_enqueue(wqe, work);
+}
+
+/*
+ * Enqueue work, hashed by some key. Work items that hash to the same value
+ * will not be done in parallel. Used to limit concurrent writes, generally
+ * hashed by inode.
+ */
+void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+ unsigned bit;
+
+
+ bit = hash_ptr(val, IO_WQ_HASH_ORDER);
+ work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+ io_wqe_enqueue(wqe, work);
+}
+
+static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
+{
+ send_sig(SIGINT, worker->task, 1);
+ return false;
+}
+
+/*
+ * Iterate the passed in list and call the specific function for each
+ * worker that isn't exiting
+ */
+static bool io_wq_for_each_worker(struct io_wqe *wqe,
+ bool (*func)(struct io_worker *, void *),
+ void *data)
+{
+ struct io_worker *worker;
+ bool ret = false;
+
+ list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+ if (io_worker_get(worker)) {
+ ret = func(worker, data);
+ io_worker_release(worker);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void io_wq_cancel_all(struct io_wq *wq)
+{
+ int i;
+
+ set_bit(IO_WQ_BIT_CANCEL, &wq->state);
+
+ /*
+ * Browse both lists, as there's a gap between handing work off
+ * to a worker and the worker putting itself on the busy_list
+ */
+ rcu_read_lock();
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
+ }
+ rcu_read_unlock();
+}
+
+struct io_cb_cancel_data {
+ struct io_wqe *wqe;
+ work_cancel_fn *cancel;
+ void *caller_data;
+};
+
+static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
+{
+ struct io_cb_cancel_data *data = cancel_data;
+ unsigned long flags;
+ bool ret = false;
+
+ /*
+ * Hold the lock to avoid ->cur_work going out of scope, caller
+ * may dereference the passed in work.
+ */
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work &&
+ data->cancel(worker->cur_work, data->caller_data)) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
+ work_cancel_fn *cancel,
+ void *cancel_data)
+{
+ struct io_cb_cancel_data data = {
+ .wqe = wqe,
+ .cancel = cancel,
+ .caller_data = cancel_data,
+ };
+ struct io_wq_work *work;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&wqe->lock, flags);
+ list_for_each_entry(work, &wqe->work_list, list) {
+ if (cancel(work, cancel_data)) {
+ list_del(&work->list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, io_work_cancel, &data);
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ ret = io_wqe_cancel_cb_work(wqe, cancel, data);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+{
+ struct io_wq_work *work = data;
+ unsigned long flags;
+ bool ret = false;
+
+ if (worker->cur_work != work)
+ return false;
+
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work == work) {
+ send_sig(SIGINT, worker->task, 1);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&worker->lock, flags);
+
+ return ret;
+}
+
+static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
+ struct io_wq_work *cwork)
+{
+ struct io_wq_work *work;
+ unsigned long flags;
+ bool found = false;
+
+ cwork->flags |= IO_WQ_WORK_CANCEL;
+
+ /*
+ * First check pending list, if we're lucky we can just remove it
+ * from there. CANCEL_OK means that the work is returned as-new,
+ * no completion will be posted for it.
+ */
+ spin_lock_irqsave(&wqe->lock, flags);
+ list_for_each_entry(work, &wqe->work_list, list) {
+ if (work == cwork) {
+ list_del(&work->list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ return IO_WQ_CANCEL_OK;
+ }
+
+ /*
+ * Now check if a free (going busy) or busy worker has the work
+ * currently running. If we find it there, we'll return CANCEL_RUNNING
+ * as an indication that we attempte to signal cancellation. The
+ * completion will run normally in this case.
+ */
+ rcu_read_lock();
+ found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
+ rcu_read_unlock();
+ return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+}
+
+enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
+{
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ ret = io_wqe_cancel_work(wqe, cwork);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+struct io_wq_flush_data {
+ struct io_wq_work work;
+ struct completion done;
+};
+
+static void io_wq_flush_func(struct io_wq_work **workptr)
+{
+ struct io_wq_work *work = *workptr;
+ struct io_wq_flush_data *data;
+
+ data = container_of(work, struct io_wq_flush_data, work);
+ complete(&data->done);
+}
+
+/*
+ * Doesn't wait for previously queued work to finish. When this completes,
+ * it just means that previously queued work was started.
+ */
+void io_wq_flush(struct io_wq *wq)
+{
+ struct io_wq_flush_data data;
+ int i;
+
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ init_completion(&data.done);
+ INIT_IO_WORK(&data.work, io_wq_flush_func);
+ data.work.flags |= IO_WQ_WORK_INTERNAL;
+ io_wqe_enqueue(wqe, &data.work);
+ wait_for_completion(&data.done);
+ }
+}
+
+struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
+ struct user_struct *user, get_work_fn *get_work,
+ put_work_fn *put_work)
+{
+ int ret = -ENOMEM, i, node;
+ struct io_wq *wq;
+
+ wq = kcalloc(1, sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+
+ wq->nr_wqes = num_online_nodes();
+ wq->wqes = kcalloc(wq->nr_wqes, sizeof(struct io_wqe *), GFP_KERNEL);
+ if (!wq->wqes) {
+ kfree(wq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ wq->get_work = get_work;
+ wq->put_work = put_work;
+
+ /* caller must already hold a reference to this */
+ wq->user = user;
+
+ i = 0;
+ refcount_set(&wq->refs, wq->nr_wqes);
+ for_each_online_node(node) {
+ struct io_wqe *wqe;
+
+ wqe = kcalloc_node(1, sizeof(struct io_wqe), GFP_KERNEL, node);
+ if (!wqe)
+ break;
+ wq->wqes[i] = wqe;
+ wqe->node = node;
+ wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+ atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
+ if (user) {
+ wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+ task_rlimit(current, RLIMIT_NPROC);
+ }
+ atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+ wqe->node = node;
+ wqe->wq = wq;
+ spin_lock_init(&wqe->lock);
+ INIT_LIST_HEAD(&wqe->work_list);
+ INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+ INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
+ INIT_LIST_HEAD(&wqe->all_list);
+
+ i++;
+ }
+
+ init_completion(&wq->done);
+
+ if (i != wq->nr_wqes)
+ goto err;
+
+ /* caller must have already done mmgrab() on this mm */
+ wq->mm = mm;
+
+ wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
+ if (!IS_ERR(wq->manager)) {
+ wake_up_process(wq->manager);
+ return wq;
+ }
+
+ ret = PTR_ERR(wq->manager);
+ wq->manager = NULL;
+err:
+ complete(&wq->done);
+ io_wq_destroy(wq);
+ return ERR_PTR(ret);
+}
+
+static bool io_wq_worker_wake(struct io_worker *worker, void *data)
+{
+ wake_up_process(worker->task);
+ return false;
+}
+
+void io_wq_destroy(struct io_wq *wq)
+{
+ int i;
+
+ if (wq->manager) {
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ kthread_stop(wq->manager);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < wq->nr_wqes; i++) {
+ struct io_wqe *wqe = wq->wqes[i];
+
+ if (!wqe)
+ continue;
+ io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
+ }
+ rcu_read_unlock();
+
+ wait_for_completion(&wq->done);
+
+ for (i = 0; i < wq->nr_wqes; i++)
+ kfree(wq->wqes[i]);
+ kfree(wq->wqes);
+ kfree(wq);
+}
diff --git a/fs/io-wq.h b/fs/io-wq.h
new file mode 100644
index 000000000000..4b29f922f80c
--- /dev/null
+++ b/fs/io-wq.h
@@ -0,0 +1,74 @@
+#ifndef INTERNAL_IO_WQ_H
+#define INTERNAL_IO_WQ_H
+
+struct io_wq;
+
+enum {
+ IO_WQ_WORK_CANCEL = 1,
+ IO_WQ_WORK_HAS_MM = 2,
+ IO_WQ_WORK_HASHED = 4,
+ IO_WQ_WORK_NEEDS_USER = 8,
+ IO_WQ_WORK_NEEDS_FILES = 16,
+ IO_WQ_WORK_UNBOUND = 32,
+ IO_WQ_WORK_INTERNAL = 64,
+
+ IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
+};
+
+enum io_wq_cancel {
+ IO_WQ_CANCEL_OK, /* cancelled before started */
+ IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
+ IO_WQ_CANCEL_NOTFOUND, /* work not found */
+};
+
+struct io_wq_work {
+ struct list_head list;
+ void (*func)(struct io_wq_work **);
+ unsigned flags;
+ struct files_struct *files;
+};
+
+#define INIT_IO_WORK(work, _func) \
+ do { \
+ (work)->func = _func; \
+ (work)->flags = 0; \
+ (work)->files = NULL; \
+ } while (0) \
+
+typedef void (get_work_fn)(struct io_wq_work *);
+typedef void (put_work_fn)(struct io_wq_work *);
+
+struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
+ struct user_struct *user,
+ get_work_fn *get_work, put_work_fn *put_work);
+void io_wq_destroy(struct io_wq *wq);
+
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
+void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
+void io_wq_flush(struct io_wq *wq);
+
+void io_wq_cancel_all(struct io_wq *wq);
+enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+
+typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
+
+enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+ void *data);
+
+#if defined(CONFIG_IO_WQ)
+extern void io_wq_worker_sleeping(struct task_struct *);
+extern void io_wq_worker_running(struct task_struct *);
+#else
+static inline void io_wq_worker_sleeping(struct task_struct *tsk)
+{
+}
+static inline void io_wq_worker_running(struct task_struct *tsk)
+{
+}
+#endif
+
+static inline bool io_wq_current_is_worker(void)
+{
+ return in_task() && (current->flags & PF_IO_WORKER);
+}
+#endif
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f9a38998f2fc..4c030a92de79 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -56,7 +56,6 @@
#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/bvec.h>
@@ -71,12 +70,24 @@
#include <linux/sizes.h>
#include <linux/hugetlb.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/io_uring.h>
+
#include <uapi/linux/io_uring.h>
#include "internal.h"
+#include "io-wq.h"
#define IORING_MAX_ENTRIES 32768
-#define IORING_MAX_FIXED_FILES 1024
+#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
+
+/*
+ * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
+ */
+#define IORING_FILE_TABLE_SHIFT 9
+#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
+#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
+#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
struct io_uring {
u32 head ____cacheline_aligned_in_smp;
@@ -161,14 +172,8 @@ struct io_mapped_ubuf {
unsigned int nr_bvecs;
};
-struct async_list {
- spinlock_t lock;
- atomic_t cnt;
- struct list_head list;
-
- struct file *file;
- off_t io_start;
- size_t io_len;
+struct fixed_file_table {
+ struct file **files;
};
struct io_ring_ctx {
@@ -180,6 +185,7 @@ struct io_ring_ctx {
unsigned int flags;
bool compat;
bool account_mem;
+ bool cq_overflow_flushed;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -198,38 +204,30 @@ struct io_ring_ctx {
unsigned sq_mask;
unsigned sq_thread_idle;
unsigned cached_sq_dropped;
+ atomic_t cached_cq_overflow;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
+ struct list_head cq_overflow_list;
+
+ wait_queue_head_t inflight_wait;
} ____cacheline_aligned_in_smp;
+ struct io_rings *rings;
+
/* IO offload */
- struct workqueue_struct *sqo_wq[2];
+ struct io_wq *io_wq;
struct task_struct *sqo_thread; /* if using sq thread polling */
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
- struct completion sqo_thread_started;
-
- struct {
- unsigned cached_cq_tail;
- atomic_t cached_cq_overflow;
- unsigned cq_entries;
- unsigned cq_mask;
- struct wait_queue_head cq_wait;
- struct fasync_struct *cq_fasync;
- struct eventfd_ctx *cq_ev_fd;
- atomic_t cq_timeouts;
- } ____cacheline_aligned_in_smp;
-
- struct io_rings *rings;
/*
* If used, fixed file set. Writers must ensure that ->refs is dead,
* readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2).
*/
- struct file **user_files;
+ struct fixed_file_table *file_table;
unsigned nr_user_files;
/* if used, fixed mapped user buffers */
@@ -238,7 +236,25 @@ struct io_ring_ctx {
struct user_struct *user;
- struct completion ctx_done;
+ /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
+ struct completion *completions;
+
+ /* if all else fails... */
+ struct io_kiocb *fallback_req;
+
+#if defined(CONFIG_UNIX)
+ struct socket *ring_sock;
+#endif
+
+ struct {
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ unsigned cq_mask;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
+ struct fasync_struct *cq_fasync;
+ struct eventfd_ctx *cq_ev_fd;
+ } ____cacheline_aligned_in_smp;
struct {
struct mutex uring_lock;
@@ -255,22 +271,20 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
struct list_head poll_list;
- struct list_head cancel_list;
- } ____cacheline_aligned_in_smp;
+ struct rb_root cancel_tree;
- struct async_list pending_async[2];
-
-#if defined(CONFIG_UNIX)
- struct socket *ring_sock;
-#endif
+ spinlock_t inflight_lock;
+ struct list_head inflight_list;
+ } ____cacheline_aligned_in_smp;
};
struct sqe_submit {
const struct io_uring_sqe *sqe;
- unsigned short index;
+ struct file *ring_file;
+ int ring_fd;
u32 sequence;
bool has_user;
- bool needs_lock;
+ bool in_async;
bool needs_fixed_file;
};
@@ -309,7 +323,10 @@ struct io_kiocb {
struct sqe_submit submit;
struct io_ring_ctx *ctx;
- struct list_head list;
+ union {
+ struct list_head list;
+ struct rb_node rb_node;
+ };
struct list_head link_list;
unsigned int flags;
refcount_t refs;
@@ -320,17 +337,22 @@ struct io_kiocb {
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_LINK_DONE 128 /* linked sqes done */
+#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
+#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
+#define REQ_F_INFLIGHT 16384 /* on inflight list */
+#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
u64 user_data;
u32 result;
u32 sequence;
- struct work_struct work;
+ struct list_head inflight_entry;
+
+ struct io_wq_work work;
};
#define IO_PLUG_THRESHOLD 2
@@ -356,10 +378,11 @@ struct io_submit_state {
unsigned int ios_left;
};
-static void io_sq_wq_submit_work(struct work_struct *work);
-static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
- long res);
+static void io_wq_submit_work(struct io_wq_work **workptr);
+static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void __io_free_req(struct io_kiocb *req);
+static void io_put_req(struct io_kiocb *req);
+static void io_double_put_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -382,57 +405,67 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
- complete(&ctx->ctx_done);
+ complete(&ctx->completions[0]);
}
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
- int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
+ ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
+ if (!ctx->fallback_req)
+ goto err;
+
+ ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
+ if (!ctx->completions)
+ goto err;
+
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
- kfree(ctx);
- return NULL;
- }
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto err;
ctx->flags = p->flags;
init_waitqueue_head(&ctx->cq_wait);
- init_completion(&ctx->ctx_done);
- init_completion(&ctx->sqo_thread_started);
+ INIT_LIST_HEAD(&ctx->cq_overflow_list);
+ init_completion(&ctx->completions[0]);
+ init_completion(&ctx->completions[1]);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
- for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
- spin_lock_init(&ctx->pending_async[i].lock);
- INIT_LIST_HEAD(&ctx->pending_async[i].list);
- atomic_set(&ctx->pending_async[i].cnt, 0);
- }
spin_lock_init(&ctx->completion_lock);
INIT_LIST_HEAD(&ctx->poll_list);
- INIT_LIST_HEAD(&ctx->cancel_list);
+ ctx->cancel_tree = RB_ROOT;
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
+ init_waitqueue_head(&ctx->inflight_wait);
+ spin_lock_init(&ctx->inflight_lock);
+ INIT_LIST_HEAD(&ctx->inflight_list);
return ctx;
+err:
+ if (ctx->fallback_req)
+ kmem_cache_free(req_cachep, ctx->fallback_req);
+ kfree(ctx->completions);
+ kfree(ctx);
+ return NULL;
}
-static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool __req_need_defer(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ atomic_read(&ctx->cached_cq_overflow);
}
-static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool req_need_defer(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
- return false;
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+ return __req_need_defer(req);
- return __io_sequence_defer(ctx, req);
+ return false;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -440,7 +473,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
- if (req && !io_sequence_defer(ctx, req)) {
+ if (req && !req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
@@ -453,9 +486,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
- if (req && !__io_sequence_defer(ctx, req)) {
- list_del_init(&req->list);
- return req;
+ if (req) {
+ if (req->flags & REQ_F_TIMEOUT_NOSEQ)
+ return NULL;
+ if (!__req_need_defer(req)) {
+ list_del_init(&req->list);
+ return req;
+ }
}
return NULL;
@@ -476,21 +513,59 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
-static inline void io_queue_async_work(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+{
+ u8 opcode = READ_ONCE(sqe->opcode);
+
+ return !(opcode == IORING_OP_READ_FIXED ||
+ opcode == IORING_OP_WRITE_FIXED);
+}
+
+static inline bool io_prep_async_work(struct io_kiocb *req)
{
- int rw = 0;
+ bool do_hashed = false;
if (req->submit.sqe) {
switch (req->submit.sqe->opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
- rw = !(req->rw.ki_flags & IOCB_DIRECT);
+ do_hashed = true;
+ /* fall-through */
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ case IORING_OP_ACCEPT:
+ case IORING_OP_POLL_ADD:
+ /*
+ * We know REQ_F_ISREG is not set on some of these
+ * opcodes, but this enables us to keep the check in
+ * just one place.
+ */
+ if (!(req->flags & REQ_F_ISREG))
+ req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
+ if (io_sqe_needs_user(req->submit.sqe))
+ req->work.flags |= IO_WQ_WORK_NEEDS_USER;
}
- queue_work(ctx->sqo_wq[rw], &req->work);
+ return do_hashed;
+}
+
+static inline void io_queue_async_work(struct io_kiocb *req)
+{
+ bool do_hashed = io_prep_async_work(req);
+ struct io_ring_ctx *ctx = req->ctx;
+
+ trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
+ req->flags);
+ if (!do_hashed) {
+ io_wq_enqueue(ctx->io_wq, &req->work);
+ } else {
+ io_wq_enqueue_hashed(ctx->io_wq, &req->work,
+ file_inode(req->file));
+ }
}
static void io_kill_timeout(struct io_kiocb *req)
@@ -500,9 +575,9 @@ static void io_kill_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts);
- list_del(&req->list);
- io_cqring_fill_event(req->ctx, req->user_data, 0);
- __io_free_req(req);
+ list_del_init(&req->list);
+ io_cqring_fill_event(req, 0);
+ io_put_req(req);
}
}
@@ -532,7 +607,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
continue;
}
req->flags |= REQ_F_IO_DRAINED;
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
}
}
@@ -554,10 +629,73 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return &rings->cqes[tail & ctx->cq_mask];
}
-static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
- long res)
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ if (waitqueue_active(&ctx->sqo_wait))
+ wake_up(&ctx->sqo_wait);
+ if (ctx->cq_ev_fd)
+ eventfd_signal(ctx->cq_ev_fd, 1);
+}
+
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
+ struct io_rings *rings = ctx->rings;
struct io_uring_cqe *cqe;
+ struct io_kiocb *req;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ if (!force) {
+ if (list_empty_careful(&ctx->cq_overflow_list))
+ return;
+ if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
+ rings->cq_ring_entries))
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+
+ /* if force is set, the ring is going away. always drop after that */
+ if (force)
+ ctx->cq_overflow_flushed = true;
+
+ while (!list_empty(&ctx->cq_overflow_list)) {
+ cqe = io_get_cqring(ctx);
+ if (!cqe && !force)
+ break;
+
+ req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
+ list);
+ list_move(&req->list, &list);
+ if (cqe) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
+ WRITE_ONCE(cqe->res, req->result);
+ WRITE_ONCE(cqe->flags, 0);
+ } else {
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
+ }
+ }
+
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
+
+ while (!list_empty(&list)) {
+ req = list_first_entry(&list, struct io_kiocb, list);
+ list_del(&req->list);
+ io_put_req(req);
+ }
+}
+
+static void io_cqring_fill_event(struct io_kiocb *req, long res)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_cqe *cqe;
+
+ trace_io_uring_complete(ctx, req->user_data, res);
/*
* If we can't get a cq entry, userspace overflowed the
@@ -565,39 +703,50 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
* the ring.
*/
cqe = io_get_cqring(ctx);
- if (cqe) {
- WRITE_ONCE(cqe->user_data, ki_user_data);
+ if (likely(cqe)) {
+ WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0);
- } else {
+ } else if (ctx->cq_overflow_flushed) {
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
+ } else {
+ refcount_inc(&req->refs);
+ req->result = res;
+ list_add_tail(&req->list, &ctx->cq_overflow_list);
}
}
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
-{
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
- if (waitqueue_active(&ctx->sqo_wait))
- wake_up(&ctx->sqo_wait);
- if (ctx->cq_ev_fd)
- eventfd_signal(ctx->cq_ev_fd, 1);
-}
-
-static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
- long res)
+static void io_cqring_add_event(struct io_kiocb *req, long res)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- io_cqring_fill_event(ctx, user_data, res);
+ io_cqring_fill_event(req, res);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
}
+static inline bool io_is_fallback_req(struct io_kiocb *req)
+{
+ return req == (struct io_kiocb *)
+ ((unsigned long) req->ctx->fallback_req & ~1UL);
+}
+
+static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req;
+
+ req = ctx->fallback_req;
+ if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
+ return req;
+
+ return NULL;
+}
+
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
@@ -610,7 +759,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
- goto out;
+ goto fallback;
} else if (!state->free_reqs) {
size_t sz;
int ret;
@@ -625,7 +774,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0])
- goto out;
+ goto fallback;
ret = 1;
}
state->free_reqs = ret - 1;
@@ -637,14 +786,19 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
state->cur_req++;
}
+got_it:
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->result = 0;
+ INIT_IO_WORK(&req->work, io_wq_submit_work);
return req;
-out:
+fallback:
+ req = io_get_fallback_req(ctx);
+ if (req)
+ goto got_it;
percpu_ref_put(&ctx->refs);
return NULL;
}
@@ -660,15 +814,48 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
static void __io_free_req(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file);
- percpu_ref_put(&req->ctx->refs);
- kmem_cache_free(req_cachep, req);
+ if (req->flags & REQ_F_INFLIGHT) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ if (waitqueue_active(&ctx->inflight_wait))
+ wake_up(&ctx->inflight_wait);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ }
+ percpu_ref_put(&ctx->refs);
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+}
+
+static bool io_link_cancel_timeout(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ if (ret != -1) {
+ io_cqring_fill_event(req, -ECANCELED);
+ io_commit_cqring(ctx);
+ req->flags &= ~REQ_F_LINK;
+ io_put_req(req);
+ return true;
+ }
+
+ return false;
}
-static void io_req_link_next(struct io_kiocb *req)
+static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt;
+ bool wake_ev = false;
/*
* The list should never be empty when we are called here. But could
@@ -676,18 +863,35 @@ static void io_req_link_next(struct io_kiocb *req)
* safe side.
*/
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
- if (nxt) {
- list_del(&nxt->list);
+ while (nxt) {
+ list_del_init(&nxt->list);
if (!list_empty(&req->link_list)) {
INIT_LIST_HEAD(&nxt->link_list);
list_splice(&req->link_list, &nxt->link_list);
nxt->flags |= REQ_F_LINK;
}
- nxt->flags |= REQ_F_LINK_DONE;
- INIT_WORK(&nxt->work, io_sq_wq_submit_work);
- io_queue_async_work(req->ctx, nxt);
+ /*
+ * If we're in async work, we can continue processing the chain
+ * in this context instead of having to queue up new async work.
+ */
+ if (req->flags & REQ_F_LINK_TIMEOUT) {
+ wake_ev = io_link_cancel_timeout(nxt);
+
+ /* we dropped this link, get next */
+ nxt = list_first_entry_or_null(&req->link_list,
+ struct io_kiocb, list);
+ } else if (nxtptr && io_wq_current_is_worker()) {
+ *nxtptr = nxt;
+ break;
+ } else {
+ io_queue_async_work(nxt);
+ break;
+ }
}
+
+ if (wake_ev)
+ io_cqring_ev_posted(ctx);
}
/*
@@ -695,43 +899,118 @@ static void io_req_link_next(struct io_kiocb *req)
*/
static void io_fail_links(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
while (!list_empty(&req->link_list)) {
link = list_first_entry(&req->link_list, struct io_kiocb, list);
- list_del(&link->list);
+ list_del_init(&link->list);
+
+ trace_io_uring_fail_link(req, link);
- io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
- __io_free_req(link);
+ if ((req->flags & REQ_F_LINK_TIMEOUT) &&
+ link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+ io_link_cancel_timeout(link);
+ } else {
+ io_cqring_fill_event(link, -ECANCELED);
+ io_double_put_req(link);
+ }
}
+
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
}
-static void io_free_req(struct io_kiocb *req)
+static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
+ if (likely(!(req->flags & REQ_F_LINK))) {
+ __io_free_req(req);
+ return;
+ }
+
/*
* If LINK is set, we have dependent requests in this chain. If we
* didn't fail this request, queue the first one up, moving any other
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
- if (req->flags & REQ_F_LINK) {
- if (req->flags & REQ_F_FAIL_LINK)
- io_fail_links(req);
- else
- io_req_link_next(req);
+ if (req->flags & REQ_F_FAIL_LINK) {
+ io_fail_links(req);
+ } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
+ REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ /*
+ * If this is a timeout link, we could be racing with the
+ * timeout timer. Grab the completion lock for this case to
+ * protect against that.
+ */
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ io_req_link_next(req, nxt);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else {
+ io_req_link_next(req, nxt);
}
__io_free_req(req);
}
+static void io_free_req(struct io_kiocb *req)
+{
+ io_free_req_find_next(req, NULL);
+}
+
+/*
+ * Drop reference to request, return next in chain (if there is one) if this
+ * was the last reference to this request.
+ */
+static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+{
+ struct io_kiocb *nxt = NULL;
+
+ if (refcount_dec_and_test(&req->refs))
+ io_free_req_find_next(req, &nxt);
+
+ if (nxt) {
+ if (nxtptr)
+ *nxtptr = nxt;
+ else
+ io_queue_async_work(nxt);
+ }
+}
+
static void io_put_req(struct io_kiocb *req)
{
if (refcount_dec_and_test(&req->refs))
io_free_req(req);
}
-static unsigned io_cqring_events(struct io_rings *rings)
+static void io_double_put_req(struct io_kiocb *req)
{
+ /* drop both submit and complete references */
+ if (refcount_sub_and_test(2, &req->refs))
+ __io_free_req(req);
+}
+
+static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+{
+ struct io_rings *rings = ctx->rings;
+
+ /*
+ * noflush == true is from the waitqueue handler, just ensure we wake
+ * up the task, and the next invocation will flush the entries. We
+ * cannot safely to it from here.
+ */
+ if (noflush && !list_empty(&ctx->cq_overflow_list))
+ return -1U;
+
+ io_cqring_overflow_flush(ctx, false);
+
/* See comment at the top of this file */
smp_rmb();
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
@@ -760,7 +1039,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
- io_cqring_fill_event(ctx, req->user_data, req->result);
+ io_cqring_fill_event(req, req->result);
(*nr_events)++;
if (refcount_dec_and_test(&req->refs)) {
@@ -769,8 +1048,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
* completions for those, only batch free for fixed
* file and non-linked commands.
*/
- if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
- REQ_F_FIXED_FILE) {
+ if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
+ REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
reqs[to_free++] = req;
if (to_free == ARRAY_SIZE(reqs))
io_free_req_many(ctx, reqs, &to_free);
@@ -887,7 +1166,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- if (io_cqring_events(ctx->rings))
+ if (io_cqring_events(ctx, false))
break;
/*
@@ -947,7 +1226,7 @@ static void kiocb_end_write(struct io_kiocb *req)
file_end_write(req->file);
}
-static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+static void io_complete_rw_common(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
@@ -956,10 +1235,28 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, req->user_data, res);
+ io_cqring_add_event(req, res);
+}
+
+static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+
+ io_complete_rw_common(kiocb, res);
io_put_req(req);
}
+static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *nxt = NULL;
+
+ io_complete_rw_common(kiocb, res);
+ io_put_req_find_next(req, &nxt);
+
+ return nxt;
+}
+
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
@@ -1067,10 +1364,9 @@ static bool io_file_supports_async(struct file *file)
return false;
}
-static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw;
unsigned ioprio;
@@ -1154,6 +1450,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
+ bool in_async)
+{
+ if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
+ *nxt = __io_complete_rw(kiocb, ret);
+ else
+ io_rw_done(kiocb, ret);
+}
+
static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
const struct io_uring_sqe *sqe,
struct iov_iter *iter)
@@ -1225,7 +1530,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
}
}
- return 0;
+ return len;
}
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
@@ -1265,65 +1570,6 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
}
-static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
-{
- if (al->file == kiocb->ki_filp) {
- off_t start, end;
-
- /*
- * Allow merging if we're anywhere in the range of the same
- * page. Generally this happens for sub-page reads or writes,
- * and it's beneficial to allow the first worker to bring the
- * page in and the piggy backed work can then work on the
- * cached page.
- */
- start = al->io_start & PAGE_MASK;
- end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
- if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
- return true;
- }
-
- al->file = NULL;
- return false;
-}
-
-/*
- * Make a note of the last file/offset/direction we punted to async
- * context. We'll use this information to see if we can piggy back a
- * sequential request onto the previous one, if it's still hasn't been
- * completed by the async worker.
- */
-static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
-{
- struct async_list *async_list = &req->ctx->pending_async[rw];
- struct kiocb *kiocb = &req->rw;
- struct file *filp = kiocb->ki_filp;
-
- if (io_should_merge(async_list, kiocb)) {
- unsigned long max_bytes;
-
- /* Use 8x RA size as a decent limiter for both reads/writes */
- max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
- if (!max_bytes)
- max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
-
- /* If max len are exceeded, reset the state */
- if (async_list->io_len + len <= max_bytes) {
- req->flags |= REQ_F_SEQ_PREV;
- async_list->io_len += len;
- } else {
- async_list->file = NULL;
- }
- }
-
- /* New file? Reset state. */
- if (async_list->file != filp) {
- async_list->io_start = kiocb->ki_pos;
- async_list->io_len = len;
- async_list->file = filp;
- }
-}
-
/*
* For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually.
@@ -1369,7 +1615,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return ret;
}
-static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -1379,7 +1625,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t read_size, ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
file = kiocb->ki_filp;
@@ -1387,7 +1633,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1418,23 +1664,16 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
- if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
- } else {
- /*
- * If ->needs_lock is true, we're already in async
- * context.
- */
- if (!s->needs_lock)
- io_async_list_note(READ, req, iov_count);
+ if (!force_nonblock || ret2 != -EAGAIN)
+ kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
+ else
ret = -EAGAIN;
- }
}
kfree(iovec);
return ret;
}
-static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -1444,7 +1683,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count;
ssize_t ret;
- ret = io_prep_rw(req, s, force_nonblock);
+ ret = io_prep_rw(req, force_nonblock);
if (ret)
return ret;
@@ -1452,7 +1691,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1462,12 +1701,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
iov_count = iov_iter_count(&iter);
ret = -EAGAIN;
- if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
- /* If ->needs_lock is true, we're already in async context. */
- if (!s->needs_lock)
- io_async_list_note(WRITE, req, iov_count);
+ if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
goto out_free;
- }
ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
if (!ret) {
@@ -1492,17 +1727,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
ret2 = call_write_iter(file, kiocb, &iter);
else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
- if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
- } else {
- /*
- * If ->needs_lock is true, we're already in async
- * context.
- */
- if (!s->needs_lock)
- io_async_list_note(WRITE, req, iov_count);
+ if (!force_nonblock || ret2 != -EAGAIN)
+ kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
+ else
ret = -EAGAIN;
- }
}
out_free:
kfree(iovec);
@@ -1512,15 +1740,14 @@ out_free:
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req, u64 user_data)
+static int io_nop(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- long err = 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- io_cqring_add_event(ctx, user_data, err);
+ io_cqring_add_event(req, 0);
io_put_req(req);
return 0;
}
@@ -1541,7 +1768,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
loff_t sqe_off = READ_ONCE(sqe->off);
loff_t sqe_len = READ_ONCE(sqe->len);
@@ -1567,8 +1794,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
return 0;
}
@@ -1590,6 +1817,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_sync_file_range(struct io_kiocb *req,
const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt,
bool force_nonblock)
{
loff_t sqe_off;
@@ -1613,14 +1841,14 @@ static int io_sync_file_range(struct io_kiocb *req,
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
return 0;
}
#if defined(CONFIG_NET)
static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock,
+ struct io_kiocb **nxt, bool force_nonblock,
long (*fn)(struct socket *, struct user_msghdr __user *,
unsigned int))
{
@@ -1649,32 +1877,80 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return ret;
}
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_cqring_add_event(req, ret);
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
return 0;
}
#endif
static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_sendmsg_sock);
#else
return -EOPNOTSUPP;
#endif
}
static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_recvmsg_sock);
#else
return -EOPNOTSUPP;
#endif
}
+static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt, bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct sockaddr __user *addr;
+ int __user *addr_len;
+ unsigned file_flags;
+ int flags, ret;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
+ return -EINVAL;
+
+ addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
+ addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
+ flags = READ_ONCE(sqe->accept_flags);
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+ ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
+ if (ret == -EAGAIN && force_nonblock) {
+ req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
+ return -EAGAIN;
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static inline void io_poll_remove_req(struct io_kiocb *req)
+{
+ if (!RB_EMPTY_NODE(&req->rb_node)) {
+ rb_erase(&req->rb_node, &req->ctx->cancel_tree);
+ RB_CLEAR_NODE(&req->rb_node);
+ }
+}
+
static void io_poll_remove_one(struct io_kiocb *req)
{
struct io_poll_iocb *poll = &req->poll;
@@ -1683,25 +1959,47 @@ static void io_poll_remove_one(struct io_kiocb *req)
WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry);
- io_queue_async_work(req->ctx, req);
+ io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
-
- list_del_init(&req->list);
+ io_poll_remove_req(req);
}
static void io_poll_remove_all(struct io_ring_ctx *ctx)
{
+ struct rb_node *node;
struct io_kiocb *req;
spin_lock_irq(&ctx->completion_lock);
- while (!list_empty(&ctx->cancel_list)) {
- req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
+ while ((node = rb_first(&ctx->cancel_tree)) != NULL) {
+ req = rb_entry(node, struct io_kiocb, rb_node);
io_poll_remove_one(req);
}
spin_unlock_irq(&ctx->completion_lock);
}
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+{
+ struct rb_node *p, *parent = NULL;
+ struct io_kiocb *req;
+
+ p = ctx->cancel_tree.rb_node;
+ while (p) {
+ parent = p;
+ req = rb_entry(parent, struct io_kiocb, rb_node);
+ if (sqe_addr < req->user_data) {
+ p = p->rb_left;
+ } else if (sqe_addr > req->user_data) {
+ p = p->rb_right;
+ } else {
+ io_poll_remove_one(req);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
/*
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
@@ -1709,8 +2007,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *poll_req, *next;
- int ret = -ENOENT;
+ int ret;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -1719,36 +2016,38 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
- if (READ_ONCE(sqe->addr) == poll_req->user_data) {
- io_poll_remove_one(poll_req);
- ret = 0;
- break;
- }
- }
+ ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
spin_unlock_irq(&ctx->completion_lock);
- io_cqring_add_event(req->ctx, sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return 0;
}
-static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
- __poll_t mask)
+static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
req->poll.done = true;
- io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
+ io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx);
}
-static void io_poll_complete_work(struct work_struct *work)
+static void io_poll_complete_work(struct io_wq_work **workptr)
{
+ struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events };
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *nxt = NULL;
__poll_t mask = 0;
+ if (work->flags & IO_WQ_WORK_CANCEL)
+ WRITE_ONCE(poll->canceled, true);
+
if (!READ_ONCE(poll->canceled))
mask = vfs_poll(poll->file, &pt) & poll->events;
@@ -1765,12 +2064,15 @@ static void io_poll_complete_work(struct work_struct *work)
spin_unlock_irq(&ctx->completion_lock);
return;
}
- list_del_init(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_remove_req(req);
+ io_poll_complete(req, mask);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_put_req(req);
+
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ *workptr = &nxt->work;
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1789,15 +2091,22 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del_init(&poll->wait.entry);
+ /*
+ * Run completion inline if we can. We're using trylock here because
+ * we are violating the completion_lock -> poll wq lock ordering.
+ * If we have a link timeout we're going to need the completion_lock
+ * for finalizing the request, mark us as having grabbed that already.
+ */
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
- list_del(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_remove_req(req);
+ io_poll_complete(req, mask);
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- io_put_req(req);
} else {
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
}
return 1;
@@ -1824,7 +2133,27 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
add_wait_queue(head, &pt->req->poll.wait);
}
-static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void io_poll_req_insert(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct rb_node **p = &ctx->cancel_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct io_kiocb *tmp;
+
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct io_kiocb, rb_node);
+ if (req->user_data < tmp->user_data)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&req->rb_node, parent, p);
+ rb_insert_color(&req->rb_node, &ctx->cancel_tree);
+}
+
+static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -1841,9 +2170,10 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EBADF;
req->submit.sqe = NULL;
- INIT_WORK(&req->work, io_poll_complete_work);
+ INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
+ RB_CLEAR_NODE(&req->rb_node);
poll->head = NULL;
poll->done = false;
@@ -1876,18 +2206,18 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
else if (cancel)
WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */
- list_add_tail(&req->list, &ctx->cancel_list);
+ io_poll_req_insert(req);
spin_unlock(&poll->head->lock);
}
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(ctx, req, mask);
+ io_poll_complete(req, mask);
}
spin_unlock_irq(&ctx->completion_lock);
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req_find_next(req, nxt);
}
return ipt.error;
}
@@ -1895,7 +2225,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
struct io_ring_ctx *ctx;
- struct io_kiocb *req, *prev;
+ struct io_kiocb *req;
unsigned long flags;
req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1904,55 +2234,136 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
spin_lock_irqsave(&ctx->completion_lock, flags);
/*
- * Adjust the reqs sequence before the current one because it
- * will consume a slot in the cq_ring and the the cq_tail pointer
- * will be increased, otherwise other timeout reqs may return in
- * advance without waiting for enough wait_nr.
+ * We could be racing with timeout deletion. If the list is empty,
+ * then timeout lookup already found it and will be handling it.
*/
- prev = req;
- list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
- prev->sequence++;
- list_del(&req->list);
+ if (!list_empty(&req->list)) {
+ struct io_kiocb *prev;
+
+ /*
+ * Adjust the reqs sequence before the current one because it
+ * will consume a slot in the cq_ring and the the cq_tail
+ * pointer will be increased, otherwise other timeout reqs may
+ * return in advance without waiting for enough wait_nr.
+ */
+ prev = req;
+ list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+ prev->sequence++;
+ list_del_init(&req->list);
+ }
- io_cqring_fill_event(ctx, req->user_data, -ETIME);
+ io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
-
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return HRTIMER_NORESTART;
}
+static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+{
+ struct io_kiocb *req;
+ int ret = -ENOENT;
+
+ list_for_each_entry(req, &ctx->timeout_list, list) {
+ if (user_data == req->user_data) {
+ list_del_init(&req->list);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret == -ENOENT)
+ return ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ if (ret == -1)
+ return -EALREADY;
+
+ io_cqring_fill_event(req, -ECANCELED);
+ io_put_req(req);
+ return 0;
+}
+
+/*
+ * Remove or update an existing timeout command
+ */
+static int io_timeout_remove(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned flags;
+ int ret;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->timeout_flags);
+ if (flags)
+ return -EINVAL;
+
+ spin_lock_irq(&ctx->completion_lock);
+ ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
+
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+ if (ret < 0 && req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req(req);
+ return 0;
+}
+
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
unsigned count;
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
+ enum hrtimer_mode mode;
struct timespec64 ts;
unsigned span = 0;
+ unsigned flags;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
- sqe->len != 1)
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
+ return -EINVAL;
+ flags = READ_ONCE(sqe->timeout_flags);
+ if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL;
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
+ if (flags & IORING_TIMEOUT_ABS)
+ mode = HRTIMER_MODE_ABS;
+ else
+ mode = HRTIMER_MODE_REL;
+
+ hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
+ req->flags |= REQ_F_TIMEOUT;
+
/*
* sqe->off holds how many events that need to occur for this
- * timeout event to be satisfied.
+ * timeout event to be satisfied. If it isn't set, then this is
+ * a pure timeout request, sequence isn't used.
*/
count = READ_ONCE(sqe->off);
- if (!count)
- count = 1;
+ if (!count) {
+ req->flags |= REQ_F_TIMEOUT_NOSEQ;
+ spin_lock_irq(&ctx->completion_lock);
+ entry = ctx->timeout_list.prev;
+ goto add;
+ }
req->sequence = ctx->cached_sq_head + count - 1;
/* reuse it to store the count */
req->submit.sequence = count;
- req->flags |= REQ_F_TIMEOUT;
/*
* Insertion sort, ensuring the first entry in the list is always
@@ -1964,6 +2375,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned nxt_sq_head;
long long tmp, tmp_nxt;
+ if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+ continue;
+
/*
* Since cached_sq_head + count - 1 can overflow, use type long
* long to store it.
@@ -1990,22 +2404,94 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
nxt->sequence++;
}
req->sequence -= span;
+add:
list_add(&req->list, entry);
+ req->timeout.timer.function = io_timeout_fn;
+ hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
spin_unlock_irq(&ctx->completion_lock);
+ return 0;
+}
- hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- req->timeout.timer.function = io_timeout_fn;
- hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
- HRTIMER_MODE_REL);
+static bool io_cancel_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ return req->user_data == (unsigned long) data;
+}
+
+static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
+{
+ enum io_wq_cancel cancel_ret;
+ int ret = 0;
+
+ cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
+ switch (cancel_ret) {
+ case IO_WQ_CANCEL_OK:
+ ret = 0;
+ break;
+ case IO_WQ_CANCEL_RUNNING:
+ ret = -EALREADY;
+ break;
+ case IO_WQ_CANCEL_NOTFOUND:
+ ret = -ENOENT;
+ break;
+ }
+
+ return ret;
+}
+
+static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
+ struct io_kiocb *req, __u64 sqe_addr,
+ struct io_kiocb **nxt)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
+ if (ret != -ENOENT) {
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ goto done;
+ }
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ ret = io_timeout_cancel(ctx, sqe_addr);
+ if (ret != -ENOENT)
+ goto done;
+ ret = io_poll_cancel(ctx, sqe_addr);
+done:
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ io_cqring_ev_posted(ctx);
+
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
+}
+
+static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
+ sqe->cancel_flags)
+ return -EINVAL;
+
+ io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
return 0;
}
-static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_defer(struct io_kiocb *req)
{
+ const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy;
+ struct io_ring_ctx *ctx = req->ctx;
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
+ /* Still need defer if there is pending req in defer list. */
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -2013,7 +2499,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN;
spin_lock_irq(&ctx->completion_lock);
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy);
return 0;
@@ -2022,64 +2508,70 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
req->submit.sqe = sqe_copy;
- INIT_WORK(&req->work, io_sq_wq_submit_work);
+ trace_io_uring_defer(ctx, req, false);
list_add_tail(&req->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED;
}
-static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct sqe_submit *s, bool force_nonblock)
+static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
int ret, opcode;
-
- req->user_data = READ_ONCE(s->sqe->user_data);
-
- if (unlikely(s->index >= ctx->sq_entries))
- return -EINVAL;
+ struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
opcode = READ_ONCE(s->sqe->opcode);
switch (opcode) {
case IORING_OP_NOP:
- ret = io_nop(req, req->user_data);
+ ret = io_nop(req);
break;
case IORING_OP_READV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_READ_FIXED:
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITE_FIXED:
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, s->sqe, force_nonblock);
+ ret = io_fsync(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req, s->sqe);
+ ret = io_poll_add(req, s->sqe, nxt);
break;
case IORING_OP_POLL_REMOVE:
ret = io_poll_remove(req, s->sqe);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, s->sqe, force_nonblock);
+ ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, s->sqe, force_nonblock);
+ ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, s->sqe, force_nonblock);
+ ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
ret = io_timeout(req, s->sqe);
break;
+ case IORING_OP_TIMEOUT_REMOVE:
+ ret = io_timeout_remove(req, s->sqe);
+ break;
+ case IORING_OP_ACCEPT:
+ ret = io_accept(req, s->sqe, nxt, force_nonblock);
+ break;
+ case IORING_OP_ASYNC_CANCEL:
+ ret = io_async_cancel(req, s->sqe, nxt);
+ break;
default:
ret = -EINVAL;
break;
@@ -2093,187 +2585,65 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN;
/* workqueue context doesn't hold uring_lock, grab it now */
- if (s->needs_lock)
+ if (s->in_async)
mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req);
- if (s->needs_lock)
+ if (s->in_async)
mutex_unlock(&ctx->uring_lock);
}
return 0;
}
-static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
- const struct io_uring_sqe *sqe)
-{
- switch (sqe->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- return &ctx->pending_async[READ];
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- return &ctx->pending_async[WRITE];
- default:
- return NULL;
- }
-}
-
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
-{
- u8 opcode = READ_ONCE(sqe->opcode);
-
- return !(opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED);
-}
-
-static void io_sq_wq_submit_work(struct work_struct *work)
+static void io_wq_submit_work(struct io_wq_work **workptr)
{
+ struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_ring_ctx *ctx = req->ctx;
- struct mm_struct *cur_mm = NULL;
- struct async_list *async_list;
- LIST_HEAD(req_list);
- mm_segment_t old_fs;
- int ret;
+ struct sqe_submit *s = &req->submit;
+ const struct io_uring_sqe *sqe = s->sqe;
+ struct io_kiocb *nxt = NULL;
+ int ret = 0;
- async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
-restart:
- do {
- struct sqe_submit *s = &req->submit;
- const struct io_uring_sqe *sqe = s->sqe;
- unsigned int flags = req->flags;
+ /* Ensure we clear previously set non-block flag */
+ req->rw.ki_flags &= ~IOCB_NOWAIT;
- /* Ensure we clear previously set non-block flag */
- req->rw.ki_flags &= ~IOCB_NOWAIT;
+ if (work->flags & IO_WQ_WORK_CANCEL)
+ ret = -ECANCELED;
- ret = 0;
- if (io_sqe_needs_user(sqe) && !cur_mm) {
- if (!mmget_not_zero(ctx->sqo_mm)) {
- ret = -EFAULT;
- } else {
- cur_mm = ctx->sqo_mm;
- use_mm(cur_mm);
- old_fs = get_fs();
- set_fs(USER_DS);
- }
- }
+ if (!ret) {
+ s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
+ s->in_async = true;
+ do {
+ ret = __io_submit_sqe(req, &nxt, false);
+ /*
+ * We can get EAGAIN for polled IO even though we're
+ * forcing a sync submission from here, since we can't
+ * wait for request slots on the block side.
+ */
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ } while (1);
+ }
- if (!ret) {
- s->has_user = cur_mm != NULL;
- s->needs_lock = true;
- do {
- ret = __io_submit_sqe(ctx, req, s, false);
- /*
- * We can get EAGAIN for polled IO even though
- * we're forcing a sync submission from here,
- * since we can't wait for request slots on the
- * block side.
- */
- if (ret != -EAGAIN)
- break;
- cond_resched();
- } while (1);
- }
+ /* drop submission reference */
+ io_put_req(req);
- /* drop submission reference */
+ if (ret) {
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
io_put_req(req);
-
- if (ret) {
- io_cqring_add_event(ctx, sqe->user_data, ret);
- io_put_req(req);
- }
-
- /* async context always use a copy of the sqe */
- kfree(sqe);
-
- /* req from defer and link list needn't decrease async cnt */
- if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
- goto out;
-
- if (!async_list)
- break;
- if (!list_empty(&req_list)) {
- req = list_first_entry(&req_list, struct io_kiocb,
- list);
- list_del(&req->list);
- continue;
- }
- if (list_empty(&async_list->list))
- break;
-
- req = NULL;
- spin_lock(&async_list->lock);
- if (list_empty(&async_list->list)) {
- spin_unlock(&async_list->lock);
- break;
- }
- list_splice_init(&async_list->list, &req_list);
- spin_unlock(&async_list->lock);
-
- req = list_first_entry(&req_list, struct io_kiocb, list);
- list_del(&req->list);
- } while (req);
-
- /*
- * Rare case of racing with a submitter. If we find the count has
- * dropped to zero AND we have pending work items, then restart
- * the processing. This is a tiny race window.
- */
- if (async_list) {
- ret = atomic_dec_return(&async_list->cnt);
- while (!ret && !list_empty(&async_list->list)) {
- spin_lock(&async_list->lock);
- atomic_inc(&async_list->cnt);
- list_splice_init(&async_list->list, &req_list);
- spin_unlock(&async_list->lock);
-
- if (!list_empty(&req_list)) {
- req = list_first_entry(&req_list,
- struct io_kiocb, list);
- list_del(&req->list);
- goto restart;
- }
- ret = atomic_dec_return(&async_list->cnt);
- }
}
-out:
- if (cur_mm) {
- set_fs(old_fs);
- unuse_mm(cur_mm);
- mmput(cur_mm);
- }
-}
-
-/*
- * See if we can piggy back onto previously submitted work, that is still
- * running. We currently only allow this if the new request is sequential
- * to the previous one we punted.
- */
-static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
-{
- bool ret;
+ /* async context always use a copy of the sqe */
+ kfree(sqe);
- if (!list)
- return false;
- if (!(req->flags & REQ_F_SEQ_PREV))
- return false;
- if (!atomic_read(&list->cnt))
- return false;
-
- ret = true;
- spin_lock(&list->lock);
- list_add_tail(&req->list, &list->list);
- /*
- * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
- */
- smp_mb();
- if (!atomic_read(&list->cnt)) {
- list_del_init(&req->list);
- ret = false;
+ /* if a dependent link is ready, pass it back */
+ if (!ret && nxt) {
+ io_prep_async_work(nxt);
+ *workptr = &nxt->work;
}
- spin_unlock(&list->lock);
- return ret;
}
static bool io_op_needs_file(const struct io_uring_sqe *sqe)
@@ -2283,15 +2653,29 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
switch (op) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
+ case IORING_OP_TIMEOUT:
+ case IORING_OP_TIMEOUT_REMOVE:
+ case IORING_OP_ASYNC_CANCEL:
+ case IORING_OP_LINK_TIMEOUT:
return false;
default:
return true;
}
}
-static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb *req)
+static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
+ int index)
{
+ struct fixed_file_table *table;
+
+ table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
+ return table->files[index & IORING_FILE_TABLE_MASK];
+}
+
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+{
+ struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int fd;
@@ -2311,14 +2695,18 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0;
if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->user_files ||
+ if (unlikely(!ctx->file_table ||
(unsigned) fd >= ctx->nr_user_files))
return -EBADF;
- req->file = ctx->user_files[fd];
+ fd = array_index_nospec(fd, ctx->nr_user_files);
+ req->file = io_file_from_index(ctx, fd);
+ if (!req->file)
+ return -EBADF;
req->flags |= REQ_F_FIXED_FILE;
} else {
if (s->needs_fixed_file)
return -EBADF;
+ trace_io_uring_file_get(ctx, fd);
req->file = io_file_get(state, fd);
if (unlikely(!req->file))
return -EBADF;
@@ -2327,12 +2715,146 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0;
}
-static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int io_grab_files(struct io_kiocb *req)
+{
+ int ret = -EBADF;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ rcu_read_lock();
+ spin_lock_irq(&ctx->inflight_lock);
+ /*
+ * We use the f_ops->flush() handler to ensure that we can flush
+ * out work accessing these files if the fd is closed. Check if
+ * the fd has changed since we started down this path, and disallow
+ * this operation if it has.
+ */
+ if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ req->flags |= REQ_F_INFLIGHT;
+ req->work.files = current->files;
+ ret = 0;
+ }
+ spin_unlock_irq(&ctx->inflight_lock);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+{
+ struct io_kiocb *req = container_of(timer, struct io_kiocb,
+ timeout.timer);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *prev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+
+ /*
+ * We don't expect the list to be empty, that will only happen if we
+ * race with the completion of the linked work.
+ */
+ if (!list_empty(&req->list)) {
+ prev = list_entry(req->list.prev, struct io_kiocb, link_list);
+ if (refcount_inc_not_zero(&prev->refs))
+ list_del_init(&req->list);
+ else
+ prev = NULL;
+ }
+
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ if (prev) {
+ io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
+ io_put_req(prev);
+ } else {
+ io_cqring_add_event(req, -ETIME);
+ io_put_req(req);
+ }
+ return HRTIMER_NORESTART;
+}
+
+static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
+ enum hrtimer_mode *mode)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ /*
+ * If the list is now empty, then our linked request finished before
+ * we got a chance to setup the timer
+ */
+ spin_lock_irq(&ctx->completion_lock);
+ if (!list_empty(&req->list)) {
+ req->timeout.timer.function = io_link_timeout_fn;
+ hrtimer_start(&req->timeout.timer, timespec64_to_ktime(*ts),
+ *mode);
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ /* drop submission reference */
+ io_put_req(req);
+}
+
+static int io_validate_link_timeout(const struct io_uring_sqe *sqe,
+ struct timespec64 *ts)
+{
+ if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
+ return -EINVAL;
+ if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
+ return -EINVAL;
+ if (get_timespec64(ts, u64_to_user_ptr(sqe->addr)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
+ struct timespec64 *ts,
+ enum hrtimer_mode *mode)
+{
+ struct io_kiocb *nxt;
+ int ret;
+
+ if (!(req->flags & REQ_F_LINK))
+ return NULL;
+
+ nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
+ if (!nxt || nxt->submit.sqe->opcode != IORING_OP_LINK_TIMEOUT)
+ return NULL;
+
+ ret = io_validate_link_timeout(nxt->submit.sqe, ts);
+ if (ret) {
+ list_del_init(&nxt->list);
+ io_cqring_add_event(nxt, ret);
+ io_double_put_req(nxt);
+ return ERR_PTR(-ECANCELED);
+ }
+
+ if (nxt->submit.sqe->timeout_flags & IORING_TIMEOUT_ABS)
+ *mode = HRTIMER_MODE_ABS;
+ else
+ *mode = HRTIMER_MODE_REL;
+
+ req->flags |= REQ_F_LINK_TIMEOUT;
+ hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, *mode);
+ return nxt;
+}
+
+static int __io_queue_sqe(struct io_kiocb *req)
{
+ enum hrtimer_mode mode;
+ struct io_kiocb *nxt;
+ struct timespec64 ts;
int ret;
- ret = __io_submit_sqe(ctx, req, s, true);
+ nxt = io_prep_linked_timeout(req, &ts, &mode);
+ if (IS_ERR(nxt)) {
+ ret = PTR_ERR(nxt);
+ nxt = NULL;
+ goto err;
+ }
+
+ ret = __io_submit_sqe(req, NULL, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2340,36 +2862,47 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
*/
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) {
+ struct sqe_submit *s = &req->submit;
struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
if (sqe_copy) {
- struct async_list *list;
-
s->sqe = sqe_copy;
- memcpy(&req->submit, s, sizeof(*s));
- list = io_async_list_from_sqe(ctx, s->sqe);
- if (!io_add_to_prev_work(list, req)) {
- if (list)
- atomic_inc(&list->cnt);
- INIT_WORK(&req->work, io_sq_wq_submit_work);
- io_queue_async_work(ctx, req);
+ if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
+ ret = io_grab_files(req);
+ if (ret) {
+ kfree(sqe_copy);
+ goto err;
+ }
}
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
+ io_queue_async_work(req);
+
+ if (nxt)
+ io_queue_linked_timeout(nxt, &ts, &mode);
+
return 0;
}
}
+err:
/* drop submission reference */
io_put_req(req);
+ if (nxt) {
+ if (!ret)
+ io_queue_linked_timeout(nxt, &ts, &mode);
+ else
+ io_put_req(nxt);
+ }
+
/* and drop final reference, if we failed */
if (ret) {
- io_cqring_add_event(ctx, req->user_data, ret);
+ io_cqring_add_event(req, ret);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
@@ -2378,31 +2911,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret;
}
-static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int io_queue_sqe(struct io_kiocb *req)
{
int ret;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
- io_free_req(req);
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
}
return 0;
}
- return __io_queue_sqe(ctx, req, s);
+ return __io_queue_sqe(req);
}
-static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s, struct io_kiocb *shadow)
+static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
+ struct io_ring_ctx *ctx = req->ctx;
if (!shadow)
- return io_queue_sqe(ctx, req, s);
+ return io_queue_sqe(req);
/*
* Mark the first IO in link list as DRAIN, let all the following
@@ -2410,12 +2942,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
* list.
*/
req->flags |= REQ_F_IO_DRAIN;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
- io_free_req(req);
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
__io_free_req(shadow);
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0;
}
} else {
@@ -2428,47 +2960,42 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* Insert shadow req to defer_list, blocking next IOs */
spin_lock_irq(&ctx->completion_lock);
+ trace_io_uring_defer(ctx, shadow, true);
list_add_tail(&shadow->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
- return __io_queue_sqe(ctx, req, s);
+ return __io_queue_sqe(req);
return 0;
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link)
+static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
+ struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
- struct io_kiocb *req;
+ struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
int ret;
+ req->user_data = s->sqe->user_data;
+
/* enforce forwards compatibility on users */
if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
- goto err;
+ goto err_req;
}
- req = io_get_req(ctx, state);
- if (unlikely(!req)) {
- ret = -EAGAIN;
- goto err;
- }
-
- ret = io_req_set_file(ctx, s, state, req);
+ ret = io_req_set_file(state, req);
if (unlikely(ret)) {
err_req:
- io_free_req(req);
-err:
- io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ io_cqring_add_event(req, ret);
+ io_double_put_req(req);
return;
}
- req->user_data = s->sqe->user_data;
-
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
@@ -2486,16 +3013,19 @@ err:
}
s->sqe = sqe_copy;
- memcpy(&req->submit, s, sizeof(*s));
+ trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->list, &prev->link_list);
} else if (s->sqe->flags & IOSQE_IO_LINK) {
req->flags |= REQ_F_LINK;
- memcpy(&req->submit, s, sizeof(*s));
INIT_LIST_HEAD(&req->link_list);
*link = req;
+ } else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
+ /* Only valid as a linked SQE */
+ ret = -EINVAL;
+ goto err_req;
} else {
- io_queue_sqe(ctx, req, s);
+ io_queue_sqe(req);
}
}
@@ -2566,7 +3096,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
head = READ_ONCE(sq_array[head & ctx->sq_mask]);
if (head < ctx->sq_entries) {
- s->index = head;
+ s->ring_file = NULL;
s->sqe = &ctx->sq_sqes[head];
s->sequence = ctx->cached_sq_head;
ctx->cached_sq_head++;
@@ -2581,13 +3111,19 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
- bool has_user, bool mm_fault)
+ struct file *ring_file, int ring_fd,
+ struct mm_struct **mm, bool async)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
struct io_kiocb *shadow_req = NULL;
- bool prev_was_link = false;
int i, submitted = 0;
+ bool mm_fault = false;
+
+ if (!list_empty(&ctx->cq_overflow_list)) {
+ io_cqring_overflow_flush(ctx, false);
+ return -EBUSY;
+ }
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr);
@@ -2595,23 +3131,31 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
for (i = 0; i < nr; i++) {
- struct sqe_submit s;
+ struct io_kiocb *req;
+ unsigned int sqe_flags;
- if (!io_get_sqring(ctx, &s))
+ req = io_get_req(ctx, statep);
+ if (unlikely(!req)) {
+ if (!submitted)
+ submitted = -EAGAIN;
+ break;
+ }
+ if (!io_get_sqring(ctx, &req->submit)) {
+ __io_free_req(req);
break;
+ }
- /*
- * If previous wasn't linked and we have a linked command,
- * that's the end of the chain. Submit the previous link.
- */
- if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
- link = NULL;
- shadow_req = NULL;
+ if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
+ mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
+ if (!mm_fault) {
+ use_mm(ctx->sqo_mm);
+ *mm = ctx->sqo_mm;
+ }
}
- prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
- if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
+ sqe_flags = req->submit.sqe->flags;
+
+ if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req))
@@ -2619,27 +3163,39 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
- shadow_req->sequence = s.sequence;
+ shadow_req->sequence = req->submit.sequence;
}
out:
- if (unlikely(mm_fault)) {
- io_cqring_add_event(ctx, s.sqe->user_data,
- -EFAULT);
- } else {
- s.has_user = has_user;
- s.needs_lock = true;
- s.needs_fixed_file = true;
- io_submit_sqe(ctx, &s, statep, &link);
- submitted++;
+ req->submit.ring_file = ring_file;
+ req->submit.ring_fd = ring_fd;
+ req->submit.has_user = *mm != NULL;
+ req->submit.in_async = async;
+ req->submit.needs_fixed_file = async;
+ trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
+ true, async);
+ io_submit_sqe(req, statep, &link);
+ submitted++;
+
+ /*
+ * If previous wasn't linked and we have a linked command,
+ * that's the end of the chain. Submit the previous link.
+ */
+ if (!(sqe_flags & IOSQE_IO_LINK) && link) {
+ io_queue_link_head(link, shadow_req);
+ link = NULL;
+ shadow_req = NULL;
}
}
if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
+ io_queue_link_head(link, shadow_req);
if (statep)
io_submit_state_end(&state);
+ /* Commit SQ ring head once we've consumed and submitted all SQEs */
+ io_commit_sqring(ctx);
+
return submitted;
}
@@ -2651,15 +3207,15 @@ static int io_sq_thread(void *data)
DEFINE_WAIT(wait);
unsigned inflight;
unsigned long timeout;
+ int ret;
- complete(&ctx->sqo_thread_started);
+ complete(&ctx->completions[1]);
old_fs = get_fs();
set_fs(USER_DS);
- timeout = inflight = 0;
+ ret = timeout = inflight = 0;
while (!kthread_should_park()) {
- bool mm_fault = false;
unsigned int to_submit;
if (inflight) {
@@ -2694,13 +3250,21 @@ static int io_sq_thread(void *data)
}
to_submit = io_sqring_entries(ctx);
- if (!to_submit) {
+
+ /*
+ * If submit got -EBUSY, flag us as needing the application
+ * to enter the kernel to reap and flush events.
+ */
+ if (!to_submit || ret == -EBUSY) {
/*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
- * to sleep.
+ * to sleep. The exception is if we got EBUSY doing
+ * more IO, we should wait for the application to
+ * reap events and wake us up.
*/
- if (inflight || !time_after(jiffies, timeout)) {
+ if (inflight ||
+ (!time_after(jiffies, timeout) && ret != -EBUSY)) {
cond_resched();
continue;
}
@@ -2726,7 +3290,7 @@ static int io_sq_thread(void *data)
smp_mb();
to_submit = io_sqring_entries(ctx);
- if (!to_submit) {
+ if (!to_submit || ret == -EBUSY) {
if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
@@ -2744,21 +3308,10 @@ static int io_sq_thread(void *data)
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- /* Unless all new commands are FIXED regions, grab mm */
- if (!cur_mm) {
- mm_fault = !mmget_not_zero(ctx->sqo_mm);
- if (!mm_fault) {
- use_mm(ctx->sqo_mm);
- cur_mm = ctx->sqo_mm;
- }
- }
-
to_submit = min(to_submit, ctx->sq_entries);
- inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
- mm_fault);
-
- /* Commit SQ ring head once we've consumed all SQEs */
- io_commit_sqring(ctx);
+ ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+ if (ret > 0)
+ inflight += ret;
}
set_fs(old_fs);
@@ -2772,65 +3325,6 @@ static int io_sq_thread(void *data)
return 0;
}
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
-{
- struct io_submit_state state, *statep = NULL;
- struct io_kiocb *link = NULL;
- struct io_kiocb *shadow_req = NULL;
- bool prev_was_link = false;
- int i, submit = 0;
-
- if (to_submit > IO_PLUG_THRESHOLD) {
- io_submit_state_start(&state, ctx, to_submit);
- statep = &state;
- }
-
- for (i = 0; i < to_submit; i++) {
- struct sqe_submit s;
-
- if (!io_get_sqring(ctx, &s))
- break;
-
- /*
- * If previous wasn't linked and we have a linked command,
- * that's the end of the chain. Submit the previous link.
- */
- if (!prev_was_link && link) {
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
- link = NULL;
- shadow_req = NULL;
- }
- prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
-
- if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
- if (!shadow_req) {
- shadow_req = io_get_req(ctx, NULL);
- if (unlikely(!shadow_req))
- goto out;
- shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
- refcount_dec(&shadow_req->refs);
- }
- shadow_req->sequence = s.sequence;
- }
-
-out:
- s.has_user = true;
- s.needs_lock = false;
- s.needs_fixed_file = false;
- submit++;
- io_submit_sqe(ctx, &s, statep, &link);
- }
-
- if (link)
- io_queue_link_head(ctx, link, &link->submit, shadow_req);
- if (statep)
- io_submit_state_end(statep);
-
- io_commit_sqring(ctx);
-
- return submit;
-}
-
struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
@@ -2838,7 +3332,7 @@ struct io_wait_queue {
unsigned nr_timeouts;
};
-static inline bool io_should_wake(struct io_wait_queue *iowq)
+static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
{
struct io_ring_ctx *ctx = iowq->ctx;
@@ -2847,7 +3341,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
- return io_cqring_events(ctx->rings) >= iowq->to_wait ||
+ return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
@@ -2857,7 +3351,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq);
- if (!io_should_wake(iowq))
+ /* use noflush == true, as we can't safely rely on locking context */
+ if (!io_should_wake(iowq, true))
return -1;
return autoremove_wake_function(curr, mode, wake_flags, key);
@@ -2880,9 +3375,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
.to_wait = min_events,
};
struct io_rings *rings = ctx->rings;
- int ret;
+ int ret = 0;
- if (io_cqring_events(rings) >= min_events)
+ if (io_cqring_events(ctx, false) >= min_events)
return 0;
if (sig) {
@@ -2898,24 +3393,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- ret = 0;
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ trace_io_uring_cqring_wait(ctx, min_events);
do {
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- if (io_should_wake(&iowq))
+ if (io_should_wake(&iowq, false))
break;
schedule();
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ ret = -EINTR;
break;
}
} while (1);
finish_wait(&ctx->wait, &iowq.wq);
- restore_saved_sigmask_unless(ret == -ERESTARTSYS);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
+ restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
@@ -2933,19 +3426,29 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
#else
int i;
- for (i = 0; i < ctx->nr_user_files; i++)
- fput(ctx->user_files[i]);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file;
+
+ file = io_file_from_index(ctx, i);
+ if (file)
+ fput(file);
+ }
#endif
}
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
- if (!ctx->user_files)
+ unsigned nr_tables, i;
+
+ if (!ctx->file_table)
return -ENXIO;
__io_sqe_files_unregister(ctx);
- kfree(ctx->user_files);
- ctx->user_files = NULL;
+ nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
+ for (i = 0; i < nr_tables; i++)
+ kfree(ctx->file_table[i].files);
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
ctx->nr_user_files = 0;
return 0;
}
@@ -2953,7 +3456,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{
if (ctx->sqo_thread) {
- wait_for_completion(&ctx->sqo_thread_started);
+ wait_for_completion(&ctx->completions[1]);
/*
* The park is a bit of a work-around, without it we get
* warning spews on shutdown with SQPOLL set and affinity
@@ -2967,15 +3470,11 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
static void io_finish_async(struct io_ring_ctx *ctx)
{
- int i;
-
io_sq_thread_stop(ctx);
- for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
- if (ctx->sqo_wq[i]) {
- destroy_workqueue(ctx->sqo_wq[i]);
- ctx->sqo_wq[i] = NULL;
- }
+ if (ctx->io_wq) {
+ io_wq_destroy(ctx->io_wq);
+ ctx->io_wq = NULL;
}
}
@@ -2983,11 +3482,9 @@ static void io_finish_async(struct io_ring_ctx *ctx)
static void io_destruct_skb(struct sk_buff *skb)
{
struct io_ring_ctx *ctx = skb->sk->sk_user_data;
- int i;
- for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
- if (ctx->sqo_wq[i])
- flush_workqueue(ctx->sqo_wq[i]);
+ if (ctx->io_wq)
+ io_wq_flush(ctx->io_wq);
unix_destruct_scm(skb);
}
@@ -3002,7 +3499,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
struct sock *sk = ctx->ring_sock->sk;
struct scm_fp_list *fpl;
struct sk_buff *skb;
- int i;
+ int i, nr_files;
if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
unsigned long inflight = ctx->user->unix_inflight + nr;
@@ -3022,21 +3519,33 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
}
skb->sk = sk;
- skb->destructor = io_destruct_skb;
+ nr_files = 0;
fpl->user = get_uid(ctx->user);
for (i = 0; i < nr; i++) {
- fpl->fp[i] = get_file(ctx->user_files[i + offset]);
- unix_inflight(fpl->user, fpl->fp[i]);
+ struct file *file = io_file_from_index(ctx, i + offset);
+
+ if (!file)
+ continue;
+ fpl->fp[nr_files] = get_file(file);
+ unix_inflight(fpl->user, fpl->fp[nr_files]);
+ nr_files++;
}
- fpl->max = fpl->count = nr;
- UNIXCB(skb).fp = fpl;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_queue_head(&sk->sk_receive_queue, skb);
+ if (nr_files) {
+ fpl->max = SCM_MAX_FD;
+ fpl->count = nr_files;
+ UNIXCB(skb).fp = fpl;
+ skb->destructor = io_destruct_skb;
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ skb_queue_head(&sk->sk_receive_queue, skb);
- for (i = 0; i < nr; i++)
- fput(fpl->fp[i]);
+ for (i = 0; i < nr_files; i++)
+ fput(fpl->fp[i]);
+ } else {
+ kfree_skb(skb);
+ kfree(fpl);
+ }
return 0;
}
@@ -3067,7 +3576,10 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
return 0;
while (total < ctx->nr_user_files) {
- fput(ctx->user_files[total]);
+ struct file *file = io_file_from_index(ctx, total);
+
+ if (file)
+ fput(file);
total++;
}
@@ -3080,33 +3592,79 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
}
#endif
+static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
+ unsigned nr_files)
+{
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ struct fixed_file_table *table = &ctx->file_table[i];
+ unsigned this_files;
+
+ this_files = min(nr_files, IORING_MAX_FILES_TABLE);
+ table->files = kcalloc(this_files, sizeof(struct file *),
+ GFP_KERNEL);
+ if (!table->files)
+ break;
+ nr_files -= this_files;
+ }
+
+ if (i == nr_tables)
+ return 0;
+
+ for (i = 0; i < nr_tables; i++) {
+ struct fixed_file_table *table = &ctx->file_table[i];
+ kfree(table->files);
+ }
+ return 1;
+}
+
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
__s32 __user *fds = (__s32 __user *) arg;
+ unsigned nr_tables;
int fd, ret = 0;
unsigned i;
- if (ctx->user_files)
+ if (ctx->file_table)
return -EBUSY;
if (!nr_args)
return -EINVAL;
if (nr_args > IORING_MAX_FIXED_FILES)
return -EMFILE;
- ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
- if (!ctx->user_files)
+ nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
+ ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
+ GFP_KERNEL);
+ if (!ctx->file_table)
+ return -ENOMEM;
+
+ if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
+ struct fixed_file_table *table;
+ unsigned index;
- for (i = 0; i < nr_args; i++) {
ret = -EFAULT;
if (copy_from_user(&fd, &fds[i], sizeof(fd)))
break;
+ /* allow sparse sets */
+ if (fd == -1) {
+ ret = 0;
+ continue;
+ }
- ctx->user_files[i] = fget(fd);
+ table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ index = i & IORING_FILE_TABLE_MASK;
+ table->files[index] = fget(fd);
ret = -EBADF;
- if (!ctx->user_files[i])
+ if (!table->files[index])
break;
/*
* Don't allow io_uring instances to be registered. If UNIX
@@ -3115,20 +3673,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
* handle it just fine, but there's still no point in allowing
* a ring fd as it doesn't support regular read/write anyway.
*/
- if (ctx->user_files[i]->f_op == &io_uring_fops) {
- fput(ctx->user_files[i]);
+ if (table->files[index]->f_op == &io_uring_fops) {
+ fput(table->files[index]);
break;
}
- ctx->nr_user_files++;
ret = 0;
}
if (ret) {
- for (i = 0; i < ctx->nr_user_files; i++)
- fput(ctx->user_files[i]);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file;
+
+ file = io_file_from_index(ctx, i);
+ if (file)
+ fput(file);
+ }
+ for (i = 0; i < nr_tables; i++)
+ kfree(ctx->file_table[i].files);
- kfree(ctx->user_files);
- ctx->user_files = NULL;
+ kfree(ctx->file_table);
+ ctx->file_table = NULL;
ctx->nr_user_files = 0;
return ret;
}
@@ -3140,9 +3704,201 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
+static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
+{
+#if defined(CONFIG_UNIX)
+ struct file *file = io_file_from_index(ctx, index);
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head list, *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+ int i;
+
+ __skb_queue_head_init(&list);
+
+ /*
+ * Find the skb that holds this file in its SCM_RIGHTS. When found,
+ * remove this entry and rearrange the file array.
+ */
+ skb = skb_dequeue(head);
+ while (skb) {
+ struct scm_fp_list *fp;
+
+ fp = UNIXCB(skb).fp;
+ for (i = 0; i < fp->count; i++) {
+ int left;
+
+ if (fp->fp[i] != file)
+ continue;
+
+ unix_notinflight(fp->user, fp->fp[i]);
+ left = fp->count - 1 - i;
+ if (left) {
+ memmove(&fp->fp[i], &fp->fp[i + 1],
+ left * sizeof(struct file *));
+ }
+ fp->count--;
+ if (!fp->count) {
+ kfree_skb(skb);
+ skb = NULL;
+ } else {
+ __skb_queue_tail(&list, skb);
+ }
+ fput(file);
+ file = NULL;
+ break;
+ }
+
+ if (!file)
+ break;
+
+ __skb_queue_tail(&list, skb);
+
+ skb = skb_dequeue(head);
+ }
+
+ if (skb_peek(&list)) {
+ spin_lock_irq(&head->lock);
+ while ((skb = __skb_dequeue(&list)) != NULL)
+ __skb_queue_tail(head, skb);
+ spin_unlock_irq(&head->lock);
+ }
+#else
+ fput(io_file_from_index(ctx, index));
+#endif
+}
+
+static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
+ int index)
+{
+#if defined(CONFIG_UNIX)
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+
+ /*
+ * See if we can merge this file into an existing skb SCM_RIGHTS
+ * file set. If there's no room, fall back to allocating a new skb
+ * and filling it in.
+ */
+ spin_lock_irq(&head->lock);
+ skb = skb_peek(head);
+ if (skb) {
+ struct scm_fp_list *fpl = UNIXCB(skb).fp;
+
+ if (fpl->count < SCM_MAX_FD) {
+ __skb_unlink(skb, head);
+ spin_unlock_irq(&head->lock);
+ fpl->fp[fpl->count] = get_file(file);
+ unix_inflight(fpl->user, fpl->fp[fpl->count]);
+ fpl->count++;
+ spin_lock_irq(&head->lock);
+ __skb_queue_head(head, skb);
+ } else {
+ skb = NULL;
+ }
+ }
+ spin_unlock_irq(&head->lock);
+
+ if (skb) {
+ fput(file);
+ return 0;
+ }
+
+ return __io_sqe_files_scm(ctx, 1, index);
+#else
+ return 0;
+#endif
+}
+
+static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args)
+{
+ struct io_uring_files_update up;
+ __s32 __user *fds;
+ int fd, i, err;
+ __u32 done;
+
+ if (!ctx->file_table)
+ return -ENXIO;
+ if (!nr_args)
+ return -EINVAL;
+ if (copy_from_user(&up, arg, sizeof(up)))
+ return -EFAULT;
+ if (check_add_overflow(up.offset, nr_args, &done))
+ return -EOVERFLOW;
+ if (done > ctx->nr_user_files)
+ return -EINVAL;
+
+ done = 0;
+ fds = (__s32 __user *) up.fds;
+ while (nr_args) {
+ struct fixed_file_table *table;
+ unsigned index;
+
+ err = 0;
+ if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+ err = -EFAULT;
+ break;
+ }
+ i = array_index_nospec(up.offset, ctx->nr_user_files);
+ table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ index = i & IORING_FILE_TABLE_MASK;
+ if (table->files[index]) {
+ io_sqe_file_unregister(ctx, i);
+ table->files[index] = NULL;
+ }
+ if (fd != -1) {
+ struct file *file;
+
+ file = fget(fd);
+ if (!file) {
+ err = -EBADF;
+ break;
+ }
+ /*
+ * Don't allow io_uring instances to be registered. If
+ * UNIX isn't enabled, then this causes a reference
+ * cycle and this instance can never get freed. If UNIX
+ * is enabled we'll handle it just fine, but there's
+ * still no point in allowing a ring fd as it doesn't
+ * support regular read/write anyway.
+ */
+ if (file->f_op == &io_uring_fops) {
+ fput(file);
+ err = -EBADF;
+ break;
+ }
+ table->files[index] = file;
+ err = io_sqe_file_register(ctx, file, i);
+ if (err)
+ break;
+ }
+ nr_args--;
+ done++;
+ up.offset++;
+ }
+
+ return done ? done : err;
+}
+
+static void io_put_work(struct io_wq_work *work)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ io_put_req(req);
+}
+
+static void io_get_work(struct io_wq_work *work)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ refcount_inc(&req->refs);
+}
+
static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
+ unsigned concurrency;
int ret;
init_waitqueue_head(&ctx->sqo_wait);
@@ -3186,26 +3942,13 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err;
}
- /* Do QD, or 2 * CPUS, whatever is smallest */
- ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
- WQ_UNBOUND | WQ_FREEZABLE,
- min(ctx->sq_entries - 1, 2 * num_online_cpus()));
- if (!ctx->sqo_wq[0]) {
- ret = -ENOMEM;
- goto err;
- }
-
- /*
- * This is for buffered writes, where we want to limit the parallelism
- * due to file locking in file systems. As "normal" buffered writes
- * should parellelize on writeout quite nicely, limit us to having 2
- * pending. This avoids massive contention on the inode when doing
- * buffered async writes.
- */
- ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
- WQ_UNBOUND | WQ_FREEZABLE, 2);
- if (!ctx->sqo_wq[1]) {
- ret = -ENOMEM;
+ /* Do QD, or 4 * CPUS, whatever is smallest */
+ concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
+ ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
+ io_get_work, io_put_work);
+ if (IS_ERR(ctx->io_wq)) {
+ ret = PTR_ERR(ctx->io_wq);
+ ctx->io_wq = NULL;
goto err;
}
@@ -3551,6 +4294,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user);
+ kfree(ctx->completions);
+ kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx);
}
@@ -3589,8 +4334,15 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
+
+ if (ctx->io_wq)
+ io_wq_cancel_all(ctx->io_wq);
+
io_iopoll_reap_events(ctx);
- wait_for_completion(&ctx->ctx_done);
+ /* if we failed setting up the ctx, we might not have any rings */
+ if (ctx->rings)
+ io_cqring_overflow_flush(ctx, true);
+ wait_for_completion(&ctx->completions[0]);
io_ring_ctx_free(ctx);
}
@@ -3603,6 +4355,53 @@ static int io_uring_release(struct inode *inode, struct file *file)
return 0;
}
+static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_kiocb *req;
+ DEFINE_WAIT(wait);
+
+ while (!list_empty_careful(&ctx->inflight_list)) {
+ struct io_kiocb *cancel_req = NULL;
+
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
+ if (req->work.files != files)
+ continue;
+ /* req is being completed, ignore */
+ if (!refcount_inc_not_zero(&req->refs))
+ continue;
+ cancel_req = req;
+ break;
+ }
+ if (cancel_req)
+ prepare_to_wait(&ctx->inflight_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ctx->inflight_lock);
+
+ /* We need to keep going until we don't find a matching req */
+ if (!cancel_req)
+ break;
+
+ io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+ io_put_req(cancel_req);
+ schedule();
+ }
+ finish_wait(&ctx->inflight_wait, &wait);
+}
+
+static int io_uring_flush(struct file *file, void *data)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+
+ io_uring_cancel_files(ctx, data);
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
+ io_cqring_overflow_flush(ctx, true);
+ io_wq_cancel_all(ctx->io_wq);
+ }
+ return 0;
+}
+
static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
{
loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
@@ -3664,14 +4463,20 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/
ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
+ if (!list_empty_careful(&ctx->cq_overflow_list))
+ io_cqring_overflow_flush(ctx, false);
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
- to_submit = min(to_submit, ctx->sq_entries);
+ struct mm_struct *cur_mm;
+ to_submit = min(to_submit, ctx->sq_entries);
mutex_lock(&ctx->uring_lock);
- submitted = io_ring_submit(ctx, to_submit);
+ /* already have mm, so io_submit_sqes() won't try to grab it */
+ cur_mm = ctx->sqo_mm;
+ submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
+ &cur_mm, false);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
@@ -3694,6 +4499,7 @@ out_fput:
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
+ .flush = io_uring_flush,
.mmap = io_uring_mmap,
.poll = io_uring_poll,
.fasync = io_uring_fasync,
@@ -3793,10 +4599,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
* since the sqes are only used at submission time. This allows for
- * some flexibility in overcommitting a bit.
+ * some flexibility in overcommitting a bit. If the application has
+ * set IORING_SETUP_CQSIZE, it will have passed in the desired number
+ * of CQ ring entries manually.
*/
p->sq_entries = roundup_pow_of_two(entries);
- p->cq_entries = 2 * p->sq_entries;
+ if (p->flags & IORING_SETUP_CQSIZE) {
+ /*
+ * If IORING_SETUP_CQSIZE is set, we do the same roundup
+ * to a power-of-two, if it isn't already. We do NOT impose
+ * any cq vs sq ring sizing.
+ */
+ if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
+ return -EINVAL;
+ p->cq_entries = roundup_pow_of_two(p->cq_entries);
+ } else {
+ p->cq_entries = 2 * p->sq_entries;
+ }
user = get_uid(current_user());
account_mem = !capable(CAP_IPC_LOCK);
@@ -3855,7 +4674,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
if (ret < 0)
goto err;
- p->features = IORING_FEAT_SINGLE_MMAP;
+ p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP;
+ trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
@@ -3881,7 +4701,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
}
if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
- IORING_SETUP_SQ_AFF))
+ IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
return -EINVAL;
ret = io_uring_create(entries, &p);
@@ -3925,7 +4745,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
* no new references will come in after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&ctx->ctx_done);
+ wait_for_completion(&ctx->completions[0]);
mutex_lock(&ctx->uring_lock);
switch (opcode) {
@@ -3947,6 +4767,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_sqe_files_unregister(ctx);
break;
+ case IORING_REGISTER_FILES_UPDATE:
+ ret = io_sqe_files_update(ctx, arg, nr_args);
+ break;
case IORING_REGISTER_EVENTFD:
ret = -EINVAL;
if (nr_args != 1)
@@ -3965,7 +4788,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
}
/* bring the ctx back to life */
- reinit_completion(&ctx->ctx_done);
+ reinit_completion(&ctx->completions[0]);
percpu_ref_reinit(&ctx->refs);
return ret;
}
@@ -3990,6 +4813,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
+ trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
+ ctx->cq_ev_fd != NULL, ret);
out_fput:
fdput(f);
return ret;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index bee8498d7792..b25ebdcabfa3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -713,7 +713,7 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
if (need_to_start)
jbd2_log_start_commit(journal, tid);
- rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
+ rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
handle->h_buffer_credits = nblocks;
/*
* Restore the original nofs context because the journal restart
@@ -1848,7 +1848,7 @@ int jbd2_journal_stop(handle_t *handle)
wake_up(&journal->j_wait_transaction_locked);
}
- rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
+ rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
if (wait_for_commit)
err = jbd2_log_wait_commit(journal, tid);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 6ebae6bbe6a5..9d96e6871e1a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -438,7 +438,7 @@ void kernfs_put_active(struct kernfs_node *kn)
return;
if (kernfs_lockdep(kn))
- rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ rwsem_release(&kn->dep_map, _RET_IP_);
v = atomic_dec_return(&kn->active);
if (likely(v != KN_DEACTIVATED_BIAS))
return;
@@ -476,7 +476,7 @@ static void kernfs_drain(struct kernfs_node *kn)
if (kernfs_lockdep(kn)) {
lock_acquired(&kn->dep_map, _RET_IP_);
- rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ rwsem_release(&kn->dep_map, _RET_IP_);
}
kernfs_drain_open_files(kn);
@@ -508,10 +508,6 @@ void kernfs_put(struct kernfs_node *kn)
struct kernfs_node *parent;
struct kernfs_root *root;
- /*
- * kernfs_node is freed with ->count 0, kernfs_find_and_get_node_by_ino
- * depends on this to filter reused stale node
- */
if (!kn || !atomic_dec_and_test(&kn->count))
return;
root = kernfs_root(kn);
@@ -536,7 +532,7 @@ void kernfs_put(struct kernfs_node *kn)
kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
}
spin_lock(&kernfs_idr_lock);
- idr_remove(&root->ino_idr, kn->id.ino);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
spin_unlock(&kernfs_idr_lock);
kmem_cache_free(kernfs_node_cache, kn);
@@ -621,8 +617,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
unsigned flags)
{
struct kernfs_node *kn;
- u32 gen;
- int cursor;
+ u32 id_highbits;
int ret;
name = kstrdup_const(name, GFP_KERNEL);
@@ -635,23 +630,19 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
idr_preload(GFP_KERNEL);
spin_lock(&kernfs_idr_lock);
- cursor = idr_get_cursor(&root->ino_idr);
ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
- if (ret >= 0 && ret < cursor)
- root->next_generation++;
- gen = root->next_generation;
+ if (ret >= 0 && ret < root->last_id_lowbits)
+ root->id_highbits++;
+ id_highbits = root->id_highbits;
+ root->last_id_lowbits = ret;
spin_unlock(&kernfs_idr_lock);
idr_preload_end();
if (ret < 0)
goto err_out2;
- kn->id.ino = ret;
- kn->id.generation = gen;
- /*
- * set ino first. This RELEASE is paired with atomic_inc_not_zero in
- * kernfs_find_and_get_node_by_ino
- */
- atomic_set_release(&kn->count, 1);
+ kn->id = (u64)id_highbits << 32 | ret;
+
+ atomic_set(&kn->count, 1);
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
@@ -680,7 +671,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
return kn;
err_out3:
- idr_remove(&root->ino_idr, kn->id.ino);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
@@ -705,50 +696,52 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
}
/*
- * kernfs_find_and_get_node_by_ino - get kernfs_node from inode number
+ * kernfs_find_and_get_node_by_id - get kernfs_node from node id
* @root: the kernfs root
- * @ino: inode number
+ * @id: the target node id
+ *
+ * @id's lower 32bits encode ino and upper gen. If the gen portion is
+ * zero, all generations are matched.
*
* RETURNS:
* NULL on failure. Return a kernfs node with reference counter incremented
*/
-struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
- unsigned int ino)
+struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ u64 id)
{
struct kernfs_node *kn;
+ ino_t ino = kernfs_id_ino(id);
+ u32 gen = kernfs_id_gen(id);
- rcu_read_lock();
- kn = idr_find(&root->ino_idr, ino);
+ spin_lock(&kernfs_idr_lock);
+
+ kn = idr_find(&root->ino_idr, (u32)ino);
if (!kn)
- goto out;
+ goto err_unlock;
- /*
- * Since kernfs_node is freed in RCU, it's possible an old node for ino
- * is freed, but reused before RCU grace period. But a freed node (see
- * kernfs_put) or an incompletedly initialized node (see
- * __kernfs_new_node) should have 'count' 0. We can use this fact to
- * filter out such node.
- */
- if (!atomic_inc_not_zero(&kn->count)) {
- kn = NULL;
- goto out;
+ if (sizeof(ino_t) >= sizeof(u64)) {
+ /* we looked up with the low 32bits, compare the whole */
+ if (kernfs_ino(kn) != ino)
+ goto err_unlock;
+ } else {
+ /* 0 matches all generations */
+ if (unlikely(gen && kernfs_gen(kn) != gen))
+ goto err_unlock;
}
/*
- * The node could be a new node or a reused node. If it's a new node,
- * we are ok. If it's reused because of RCU (because of
- * SLAB_TYPESAFE_BY_RCU), the __kernfs_new_node always sets its 'ino'
- * before 'count'. So if 'count' is uptodate, 'ino' should be uptodate,
- * hence we can use 'ino' to filter stale node.
+ * ACTIVATED is protected with kernfs_mutex but it was clear when
+ * @kn was added to idr and we just wanna see it set. No need to
+ * grab kernfs_mutex.
*/
- if (kn->id.ino != ino)
- goto out;
- rcu_read_unlock();
+ if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
+ !atomic_inc_not_zero(&kn->count)))
+ goto err_unlock;
+ spin_unlock(&kernfs_idr_lock);
return kn;
-out:
- rcu_read_unlock();
- kernfs_put(kn);
+err_unlock:
+ spin_unlock(&kernfs_idr_lock);
return NULL;
}
@@ -962,7 +955,17 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
idr_init(&root->ino_idr);
INIT_LIST_HEAD(&root->supers);
- root->next_generation = 1;
+
+ /*
+ * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino.
+ * High bits generation. The starting value for both ino and
+ * genenration is 1. Initialize upper 32bit allocation
+ * accordingly.
+ */
+ if (sizeof(ino_t) >= sizeof(u64))
+ root->id_highbits = 0;
+ else
+ root->id_highbits = 1;
kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO,
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
@@ -1678,7 +1681,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
const char *name = pos->name;
unsigned int type = dt_type(pos);
int len = strlen(name);
- ino_t ino = pos->id.ino;
+ ino_t ino = kernfs_ino(pos);
ctx->pos = pos->hash;
file->private_data = pos;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e8c792b49616..34366db3620d 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -892,7 +892,7 @@ repeat:
* have the matching @file available. Look up the inodes
* and generate the events manually.
*/
- inode = ilookup(info->sb, kn->id.ino);
+ inode = ilookup(info->sb, kernfs_ino(kn));
if (!inode)
continue;
@@ -901,7 +901,7 @@ repeat:
if (parent) {
struct inode *p_inode;
- p_inode = ilookup(info->sb, parent->id.ino);
+ p_inode = ilookup(info->sb, kernfs_ino(parent));
if (p_inode) {
fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
inode, FSNOTIFY_EVENT_INODE, &name, 0);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index f3eaa8869f42..eac277c63d42 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -201,7 +201,7 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
inode->i_private = kn;
inode->i_mapping->a_ops = &kernfs_aops;
inode->i_op = &kernfs_iops;
- inode->i_generation = kn->id.generation;
+ inode->i_generation = kernfs_gen(kn);
set_default_inode_attr(inode, kn->mode);
kernfs_refresh_inode(kn, inode);
@@ -247,7 +247,7 @@ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
{
struct inode *inode;
- inode = iget_locked(sb, kn->id.ino);
+ inode = iget_locked(sb, kernfs_ino(kn));
if (inode && (inode->i_state & I_NEW))
kernfs_init_inode(kn, inode);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 02ce570a9a3c..2f3c51d55261 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -109,8 +109,6 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
unsigned flags);
-struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
- unsigned int ino);
/*
* file.c
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 6c12fac2c287..4d31503abaee 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -53,63 +53,85 @@ const struct super_operations kernfs_sops = {
.show_path = kernfs_sop_show_path,
};
-/*
- * Similar to kernfs_fh_get_inode, this one gets kernfs node from inode
- * number and generation
- */
-struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root,
- const union kernfs_node_id *id)
+static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent)
{
- struct kernfs_node *kn;
+ struct kernfs_node *kn = inode->i_private;
- kn = kernfs_find_and_get_node_by_ino(root, id->ino);
- if (!kn)
- return NULL;
- if (kn->id.generation != id->generation) {
- kernfs_put(kn);
- return NULL;
+ if (*max_len < 2) {
+ *max_len = 2;
+ return FILEID_INVALID;
}
- return kn;
+
+ *max_len = 2;
+ *(u64 *)fh = kn->id;
+ return FILEID_KERNFS;
}
-static struct inode *kernfs_fh_get_inode(struct super_block *sb,
- u64 ino, u32 generation)
+static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type, bool get_parent)
{
struct kernfs_super_info *info = kernfs_info(sb);
- struct inode *inode;
struct kernfs_node *kn;
+ struct inode *inode;
+ u64 id;
- if (ino == 0)
- return ERR_PTR(-ESTALE);
+ if (fh_len < 2)
+ return NULL;
+
+ switch (fh_type) {
+ case FILEID_KERNFS:
+ id = *(u64 *)fid;
+ break;
+ case FILEID_INO32_GEN:
+ case FILEID_INO32_GEN_PARENT:
+ /*
+ * blk_log_action() exposes "LOW32,HIGH32" pair without
+ * type and userland can call us with generic fid
+ * constructed from them. Combine it back to ID. See
+ * blk_log_action().
+ */
+ id = ((u64)fid->i32.gen << 32) | fid->i32.ino;
+ break;
+ default:
+ return NULL;
+ }
- kn = kernfs_find_and_get_node_by_ino(info->root, ino);
+ kn = kernfs_find_and_get_node_by_id(info->root, id);
if (!kn)
return ERR_PTR(-ESTALE);
+
+ if (get_parent) {
+ struct kernfs_node *parent;
+
+ parent = kernfs_get_parent(kn);
+ kernfs_put(kn);
+ kn = parent;
+ if (!kn)
+ return ERR_PTR(-ESTALE);
+ }
+
inode = kernfs_get_inode(sb, kn);
kernfs_put(kn);
if (!inode)
return ERR_PTR(-ESTALE);
- if (generation && inode->i_generation != generation) {
- /* we didn't find the right inode.. */
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
- return inode;
+ return d_obtain_alias(inode);
}
-static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
+static struct dentry *kernfs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
{
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
- kernfs_fh_get_inode);
+ return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, false);
}
-static struct dentry *kernfs_fh_to_parent(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
+static struct dentry *kernfs_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
{
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
- kernfs_fh_get_inode);
+ return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, true);
}
static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
@@ -120,6 +142,7 @@ static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
}
static const struct export_operations kernfs_export_ops = {
+ .encode_fh = kernfs_encode_fh,
.fh_to_dentry = kernfs_fh_to_dentry,
.fh_to_parent = kernfs_fh_to_parent,
.get_parent = kernfs_get_parent_dentry,
@@ -363,18 +386,9 @@ void kernfs_kill_sb(struct super_block *sb)
void __init kernfs_init(void)
{
-
- /*
- * the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino
- * can access the slab lock free. This could introduce stale nodes,
- * please see how kernfs_find_and_get_node_by_ino filters out stale
- * nodes.
- */
kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
sizeof(struct kernfs_node),
- 0,
- SLAB_PANIC | SLAB_TYPESAFE_BY_RCU,
- NULL);
+ 0, SLAB_PANIC, NULL);
/* Creates slab cache for kernfs inode attributes */
kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache",
diff --git a/fs/namespace.c b/fs/namespace.c
index fe0e9e1410fe..2adfe7b166a3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2478,8 +2478,10 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
time64_to_tm(sb->s_time_max, 0, &tm);
- pr_warn("Mounted %s file system at %s supports timestamps until %04ld (0x%llx)\n",
- sb->s_type->name, mntpath,
+ pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
+ sb->s_type->name,
+ is_mounted(mnt) ? "remounted" : "mounted",
+ mntpath,
tm.tm_year+1900, (unsigned long long)sb->s_time_max);
free_page((unsigned long)buf);
@@ -2764,14 +2766,11 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
if (IS_ERR(mnt))
return PTR_ERR(mnt);
- error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
- if (error < 0) {
- mntput(mnt);
- return error;
- }
-
mnt_warn_timestamp_expiry(mountpoint, mnt);
+ error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
+ if (error < 0)
+ mntput(mnt);
return error;
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 6e774c5ea13b..1c4c51f3df60 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1687,7 +1687,7 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
spin_unlock_irqrestore(&lockres->l_lock, flags);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (lockres->l_lockdep_map.key != NULL)
- rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
+ rwsem_release(&lockres->l_lockdep_map, caller_ip);
#endif
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d8507972ee13..90c830e3758e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi);
}
+static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ loc->xl_ops->xlo_add_entry(loc, name_hash);
+ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
+ /*
+ * We can't leave the new entry's xe_name_offset at zero or
+ * add_namevalue() will go nuts. We set it to the size of our
+ * storage so that it can never be less than any other entry.
+ */
+ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
+}
+
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc)
goto out;
- if (!loc->xl_entry) {
- rc = -EINVAL;
- goto out;
- }
-
- if (ocfs2_xa_can_reuse_entry(loc, xi)) {
- orig_value_size = loc->xl_entry->xe_value_size;
- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
- if (rc)
- goto out;
- goto alloc_value;
- }
+ if (loc->xl_entry) {
+ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+ orig_value_size = loc->xl_entry->xe_value_size;
+ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+ if (rc)
+ goto out;
+ goto alloc_value;
+ }
- if (!ocfs2_xattr_is_local(loc->xl_entry)) {
- orig_clusters = ocfs2_xa_value_clusters(loc);
- rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
- if (rc) {
- mlog_errno(rc);
- ocfs2_xa_cleanup_value_truncate(loc,
- "overwriting",
- orig_clusters);
- goto out;
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ ocfs2_xa_cleanup_value_truncate(loc,
+ "overwriting",
+ orig_clusters);
+ goto out;
+ }
}
- }
- ocfs2_xa_wipe_namevalue(loc);
+ ocfs2_xa_wipe_namevalue(loc);
+ } else
+ ocfs2_xa_add_entry(loc, name_hash);
/*
* If we get here, we have a blank entry. Fill it. We grow our
diff --git a/fs/pipe.c b/fs/pipe.c
index 8a2ab2f974bd..a9149199e0e7 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -793,6 +793,8 @@ int create_pipe_files(struct file **res, int flags)
}
res[0]->private_data = inode->i_pipe;
res[1] = f;
+ stream_open(inode, res[0]);
+ stream_open(inode, res[1]);
return 0;
}
@@ -931,9 +933,9 @@ static int fifo_open(struct inode *inode, struct file *filp)
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
- filp->f_mode &= (FMODE_READ | FMODE_WRITE);
+ stream_open(inode, filp);
- switch (filp->f_mode) {
+ switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_READ:
/*
* O_RDONLY
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 80c305f206bb..37bdbec5b402 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -120,20 +120,23 @@ static int show_stat(struct seq_file *p, void *v)
getboottime64(&boottime);
for_each_possible_cpu(i) {
- struct kernel_cpustat *kcs = &kcpustat_cpu(i);
-
- user += kcs->cpustat[CPUTIME_USER];
- nice += kcs->cpustat[CPUTIME_NICE];
- system += kcs->cpustat[CPUTIME_SYSTEM];
- idle += get_idle_time(kcs, i);
- iowait += get_iowait_time(kcs, i);
- irq += kcs->cpustat[CPUTIME_IRQ];
- softirq += kcs->cpustat[CPUTIME_SOFTIRQ];
- steal += kcs->cpustat[CPUTIME_STEAL];
- guest += kcs->cpustat[CPUTIME_GUEST];
- guest_nice += kcs->cpustat[CPUTIME_GUEST_NICE];
- sum += kstat_cpu_irqs_sum(i);
- sum += arch_irq_stat_cpu(i);
+ struct kernel_cpustat kcpustat;
+ u64 *cpustat = kcpustat.cpustat;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
+
+ user += cpustat[CPUTIME_USER];
+ nice += cpustat[CPUTIME_NICE];
+ system += cpustat[CPUTIME_SYSTEM];
+ idle += get_idle_time(&kcpustat, i);
+ iowait += get_iowait_time(&kcpustat, i);
+ irq += cpustat[CPUTIME_IRQ];
+ softirq += cpustat[CPUTIME_SOFTIRQ];
+ steal += cpustat[CPUTIME_STEAL];
+ guest += cpustat[CPUTIME_GUEST];
+ guest_nice += cpustat[CPUTIME_USER];
+ sum += kstat_cpu_irqs_sum(i);
+ sum += arch_irq_stat_cpu(i);
for (j = 0; j < NR_SOFTIRQS; j++) {
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
@@ -157,19 +160,22 @@ static int show_stat(struct seq_file *p, void *v)
seq_putc(p, '\n');
for_each_online_cpu(i) {
- struct kernel_cpustat *kcs = &kcpustat_cpu(i);
+ struct kernel_cpustat kcpustat;
+ u64 *cpustat = kcpustat.cpustat;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
- user = kcs->cpustat[CPUTIME_USER];
- nice = kcs->cpustat[CPUTIME_NICE];
- system = kcs->cpustat[CPUTIME_SYSTEM];
- idle = get_idle_time(kcs, i);
- iowait = get_iowait_time(kcs, i);
- irq = kcs->cpustat[CPUTIME_IRQ];
- softirq = kcs->cpustat[CPUTIME_SOFTIRQ];
- steal = kcs->cpustat[CPUTIME_STEAL];
- guest = kcs->cpustat[CPUTIME_GUEST];
- guest_nice = kcs->cpustat[CPUTIME_GUEST_NICE];
+ user = cpustat[CPUTIME_USER];
+ nice = cpustat[CPUTIME_NICE];
+ system = cpustat[CPUTIME_SYSTEM];
+ idle = get_idle_time(&kcpustat, i);
+ iowait = get_iowait_time(&kcpustat, i);
+ irq = cpustat[CPUTIME_IRQ];
+ softirq = cpustat[CPUTIME_SOFTIRQ];
+ steal = cpustat[CPUTIME_STEAL];
+ guest = cpustat[CPUTIME_GUEST];
+ guest_nice = cpustat[CPUTIME_USER];
seq_printf(p, "cpu%d", i);
seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
diff --git a/include/Kbuild b/include/Kbuild
index ffba79483cc5..25edc43483e0 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -65,8 +65,9 @@ header-test- += keys/asymmetric-subtype.h
header-test- += keys/asymmetric-type.h
header-test- += keys/big_key-type.h
header-test- += keys/request_key_auth-type.h
-header-test- += keys/trusted.h
header-test- += kvm/arm_arch_timer.h
+header-test-$(CONFIG_ARM) += kvm/arm_hypercalls.h
+header-test-$(CONFIG_ARM64) += kvm/arm_hypercalls.h
header-test- += kvm/arm_pmu.h
header-test-$(CONFIG_ARM) += kvm/arm_psci.h
header-test-$(CONFIG_ARM64) += kvm/arm_psci.h
@@ -1028,6 +1029,7 @@ header-test- += trace/events/fsi_master_gpio.h
header-test- += trace/events/huge_memory.h
header-test- += trace/events/ib_mad.h
header-test- += trace/events/ib_umad.h
+header-test- += trace/events/io_uring.h
header-test- += trace/events/iscsi.h
header-test- += trace/events/jbd2.h
header-test- += trace/events/kvm.h
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 175f7b40c585..0c23fd0548d1 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -78,9 +78,6 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
-struct acpi_device *
-acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
-
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
@@ -683,6 +680,11 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
+bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
+
static inline void acpi_dev_put(struct acpi_device *adev)
{
put_device(&adev->dev);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e5e041413581..18790b9e16b5 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20190816
+#define ACPI_CA_VERSION 0x20191018
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -458,7 +458,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
u8 physical))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_load_table(struct acpi_table_header *table))
+ acpi_load_table(struct acpi_table_header *table,
+ u32 *table_idx))
+
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_unload_table(u32 table_index))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_unload_parent_table(acpi_handle object))
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 3a2b8535dec6..340da7784cc8 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -2,21 +2,9 @@
#ifndef ACPI_BUTTON_H
#define ACPI_BUTTON_H
-#include <linux/notifier.h>
-
#if IS_ENABLED(CONFIG_ACPI_BUTTON)
-extern int acpi_lid_notifier_register(struct notifier_block *nb);
-extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
extern int acpi_lid_open(void);
#else
-static inline int acpi_lid_notifier_register(struct notifier_block *nb)
-{
- return 0;
-}
-static inline int acpi_lid_notifier_unregister(struct notifier_block *nb)
-{
- return 0;
-}
static inline int acpi_lid_open(void)
{
return 1;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index dae64600ccbf..63cedc3c0c77 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -23,12 +23,11 @@
* _etext = .;
*
* _sdata = .;
- * RO_DATA_SECTION(PAGE_SIZE)
- * RW_DATA_SECTION(...)
+ * RO_DATA(PAGE_SIZE)
+ * RW_DATA(...)
* _edata = .;
*
* EXCEPTION_TABLE(...)
- * NOTES
*
* BSS_SECTION(0, 0, 0)
* _end = .;
@@ -54,6 +53,33 @@
#define LOAD_OFFSET 0
#endif
+/*
+ * Only some architectures want to have the .notes segment visible in
+ * a separate PT_NOTE ELF Program Header. When this happens, it needs
+ * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
+ * Program Headers. In this case, though, the PT_LOAD needs to be made
+ * the default again so that all the following sections don't also end
+ * up in the PT_NOTE Program Header.
+ */
+#ifdef EMITS_PT_NOTE
+#define NOTES_HEADERS :text :note
+#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text
+#else
+#define NOTES_HEADERS
+#define NOTES_HEADERS_RESTORE
+#endif
+
+/*
+ * Some architectures have non-executable read-only exception tables.
+ * They can be added to the RO_DATA segment by specifying their desired
+ * alignment.
+ */
+#ifdef RO_EXCEPTION_TABLE_ALIGN
+#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
+#else
+#define RO_EXCEPTION_TABLE
+#endif
+
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
@@ -110,17 +136,17 @@
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
-#define MCOUNT_REC() . = ALIGN(8); \
- __start_mcount_loc = .; \
- KEEP(*(__patchable_function_entries)) \
- __stop_mcount_loc = .;
-#else
+/*
+ * The ftrace call sites are logged to a section whose name depends on the
+ * compiler option used. A given kernel image will only use one, AKA
+ * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
+ * dependencies for FTRACE_CALLSITE_SECTION's definition.
+ */
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
+ KEEP(*(__patchable_function_entries)) \
__stop_mcount_loc = .;
-#endif
#else
#define MCOUNT_REC()
#endif
@@ -348,7 +374,7 @@
/*
* Read only Data
*/
-#define RO_DATA_SECTION(align) \
+#define RO_DATA(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
@@ -496,15 +522,13 @@
__start___modver = .; \
KEEP(*(__modver)) \
__stop___modver = .; \
- . = ALIGN((align)); \
- __end_rodata = .; \
} \
- . = ALIGN((align));
-
-/* RODATA & RO_DATA provided for backward compatibility.
- * All archs are supposed to use RO_DATA() */
-#define RODATA RO_DATA_SECTION(4096)
-#define RO_DATA(align) RO_DATA_SECTION(align)
+ \
+ RO_EXCEPTION_TABLE \
+ NOTES \
+ \
+ . = ALIGN((align)); \
+ __end_rodata = .;
/*
* .text section. Map to function alignment to avoid address changes
@@ -790,7 +814,8 @@
__start_notes = .; \
KEEP(*(.note.*)) \
__stop_notes = .; \
- }
+ } NOTES_HEADERS \
+ NOTES_HEADERS_RESTORE
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
@@ -962,7 +987,7 @@
* matches the requirement of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
-#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
+#define RW_DATA(cacheline, pagealigned, inittask) \
. = ALIGN(PAGE_SIZE); \
.data : AT(ADDR(.data) - LOAD_OFFSET) { \
INIT_TASK_DATA(inittask) \
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 422f5e5237be..553e539469f0 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -21,10 +21,11 @@
#define HV_MIN_DELTA_TICKS 1
/* Routines called by the VMbus driver */
-extern int hv_stimer_alloc(int sint);
+extern int hv_stimer_alloc(void);
extern void hv_stimer_free(void);
-extern void hv_stimer_init(unsigned int cpu);
-extern void hv_stimer_cleanup(unsigned int cpu);
+extern int hv_stimer_cleanup(unsigned int cpu);
+extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
+extern void hv_stimer_legacy_cleanup(unsigned int cpu);
extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 3c245b1859e7..a3bdadf6221e 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -321,7 +321,7 @@ int crypto_aead_encrypt(struct aead_request *req);
/**
* crypto_aead_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
+ * @req: reference to the aead_request handle that holds all information
* needed to perform the cipher operation
*
* Decrypt ciphertext data using the aead_request handle. That data structure
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e5bd302f2c49..5cd846defdd6 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -85,56 +85,6 @@ struct scatter_walk {
unsigned int offset;
};
-struct blkcipher_walk {
- union {
- struct {
- struct page *page;
- unsigned long offset;
- } phys;
-
- struct {
- u8 *page;
- u8 *addr;
- } virt;
- } src, dst;
-
- struct scatter_walk in;
- unsigned int nbytes;
-
- struct scatter_walk out;
- unsigned int total;
-
- void *page;
- u8 *buffer;
- u8 *iv;
- unsigned int ivsize;
-
- int flags;
- unsigned int walk_blocksize;
- unsigned int cipher_blocksize;
- unsigned int alignmask;
-};
-
-struct ablkcipher_walk {
- struct {
- struct page *page;
- unsigned int offset;
- } src, dst;
-
- struct scatter_walk in;
- unsigned int nbytes;
- struct scatter_walk out;
- unsigned int total;
- struct list_head buffers;
- u8 *iv_buffer;
- u8 *iv;
- int flags;
- unsigned int blocksize;
-};
-
-extern const struct crypto_type crypto_ablkcipher_type;
-extern const struct crypto_type crypto_blkcipher_type;
-
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -233,26 +183,6 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
}
}
-int blkcipher_walk_done(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk, int err);
-int blkcipher_walk_virt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-int blkcipher_walk_phys(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- unsigned int blocksize);
-int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_aead *tfm,
- unsigned int blocksize);
-
-int ablkcipher_walk_done(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk, int err);
-int ablkcipher_walk_phys(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk);
-void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
-
static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
{
return PTR_ALIGN(crypto_tfm_ctx(tfm),
@@ -270,41 +200,6 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst)
return inst->__ctx;
}
-static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
- struct crypto_ablkcipher *tfm)
-{
- return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
-}
-
-static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
-static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
- struct crypto_spawn *spawn)
-{
- u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
-}
-
-static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
static inline struct crypto_cipher *crypto_spawn_cipher(
struct crypto_spawn *spawn)
{
@@ -319,33 +214,6 @@ static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
}
-static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- walk->in.sg = src;
- walk->out.sg = dst;
- walk->total = nbytes;
-}
-
-static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- walk->in.sg = src;
- walk->out.sg = dst;
- walk->total = nbytes;
- INIT_LIST_HEAD(&walk->buffers);
-}
-
-static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
-{
- if (unlikely(!list_empty(&walk->buffers)))
- __ablkcipher_walk_complete(walk);
-}
-
static inline struct crypto_async_request *crypto_get_backlog(
struct crypto_queue *queue)
{
@@ -353,23 +221,6 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
- struct ablkcipher_request *request)
-{
- return crypto_enqueue_request(queue, &request->base);
-}
-
-static inline struct ablkcipher_request *ablkcipher_dequeue_request(
- struct crypto_queue *queue)
-{
- return ablkcipher_request_cast(crypto_dequeue_request(queue));
-}
-
-static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
-{
- return req->__ctx;
-}
-
static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
u32 type, u32 mask)
{
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
new file mode 100644
index 000000000000..b471deac28ff
--- /dev/null
+++ b/include/crypto/blake2s.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef BLAKE2S_H
+#define BLAKE2S_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/bug.h>
+
+enum blake2s_lengths {
+ BLAKE2S_BLOCK_SIZE = 64,
+ BLAKE2S_HASH_SIZE = 32,
+ BLAKE2S_KEY_SIZE = 32,
+
+ BLAKE2S_128_HASH_SIZE = 16,
+ BLAKE2S_160_HASH_SIZE = 20,
+ BLAKE2S_224_HASH_SIZE = 28,
+ BLAKE2S_256_HASH_SIZE = 32,
+};
+
+struct blake2s_state {
+ u32 h[8];
+ u32 t[2];
+ u32 f[2];
+ u8 buf[BLAKE2S_BLOCK_SIZE];
+ unsigned int buflen;
+ unsigned int outlen;
+};
+
+enum blake2s_iv {
+ BLAKE2S_IV0 = 0x6A09E667UL,
+ BLAKE2S_IV1 = 0xBB67AE85UL,
+ BLAKE2S_IV2 = 0x3C6EF372UL,
+ BLAKE2S_IV3 = 0xA54FF53AUL,
+ BLAKE2S_IV4 = 0x510E527FUL,
+ BLAKE2S_IV5 = 0x9B05688CUL,
+ BLAKE2S_IV6 = 0x1F83D9ABUL,
+ BLAKE2S_IV7 = 0x5BE0CD19UL,
+};
+
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
+void blake2s_final(struct blake2s_state *state, u8 *out);
+
+static inline void blake2s_init_param(struct blake2s_state *state,
+ const u32 param)
+{
+ *state = (struct blake2s_state){{
+ BLAKE2S_IV0 ^ param,
+ BLAKE2S_IV1,
+ BLAKE2S_IV2,
+ BLAKE2S_IV3,
+ BLAKE2S_IV4,
+ BLAKE2S_IV5,
+ BLAKE2S_IV6,
+ BLAKE2S_IV7,
+ }};
+}
+
+static inline void blake2s_init(struct blake2s_state *state,
+ const size_t outlen)
+{
+ blake2s_init_param(state, 0x01010000 | outlen);
+ state->outlen = outlen;
+}
+
+static inline void blake2s_init_key(struct blake2s_state *state,
+ const size_t outlen, const void *key,
+ const size_t keylen)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
+ !key || !keylen || keylen > BLAKE2S_KEY_SIZE));
+
+ blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen);
+ memcpy(state->buf, key, keylen);
+ state->buflen = BLAKE2S_BLOCK_SIZE;
+ state->outlen = outlen;
+}
+
+static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
+ const size_t outlen, const size_t inlen,
+ const size_t keylen)
+{
+ struct blake2s_state state;
+
+ WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen ||
+ outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
+ (!key && keylen)));
+
+ if (keylen)
+ blake2s_init_key(&state, outlen, key, keylen);
+ else
+ blake2s_init(&state, outlen);
+
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, out);
+}
+
+void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
+ const size_t keylen);
+
+#endif /* BLAKE2S_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index d1e723c6a37d..2676f4fbd4c1 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -15,9 +15,8 @@
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
-#include <crypto/skcipher.h>
+#include <asm/unaligned.h>
#include <linux/types.h>
-#include <linux/crypto.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
#define CHACHA_IV_SIZE 16
@@ -26,29 +25,79 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
+#ifdef CONFIG_X86_64
+#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
+#else
+#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#endif
+
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-void chacha_block(u32 *state, u8 *stream, int nrounds);
+void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
static inline void chacha20_block(u32 *state, u8 *stream)
{
- chacha_block(state, stream, 20);
+ chacha_block_generic(state, stream, 20);
+}
+
+void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+
+static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
+ hchacha_block_arch(state, out, nrounds);
+ else
+ hchacha_block_generic(state, out, nrounds);
+}
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
+static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+{
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
+ state[4] = key[0];
+ state[5] = key[1];
+ state[6] = key[2];
+ state[7] = key[3];
+ state[8] = key[4];
+ state[9] = key[5];
+ state[10] = key[6];
+ state[11] = key[7];
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
+}
+
+static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
+ chacha_init_arch(state, key, iv);
+ else
+ chacha_init_generic(state, key, iv);
}
-void hchacha_block(const u32 *in, u32 *out, int nrounds);
-void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv);
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
-int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
+static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
+ chacha_crypt_arch(state, dst, src, bytes, nrounds);
+ else
+ chacha_crypt_generic(state, dst, src, bytes, nrounds);
+}
-int crypto_chacha_crypt(struct skcipher_request *req);
-int crypto_xchacha_crypt(struct skcipher_request *req);
+static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
+{
+ chacha_crypt(state, dst, src, bytes, 20);
+}
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
new file mode 100644
index 000000000000..234ee28078ef
--- /dev/null
+++ b/include/crypto/chacha20poly1305.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef __CHACHA20POLY1305_H
+#define __CHACHA20POLY1305_H
+
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+enum chacha20poly1305_lengths {
+ XCHACHA20POLY1305_NONCE_SIZE = 24,
+ CHACHA20POLY1305_KEY_SIZE = 32,
+ CHACHA20POLY1305_AUTHTAG_SIZE = 16
+};
+
+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool __must_check
+chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool __must_check xchacha20poly1305_decrypt(
+ u8 *dst, const u8 *src, const size_t src_len, const u8 *ad,
+ const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+#endif /* __CHACHA20POLY1305_H */
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
new file mode 100644
index 000000000000..4e6dc840b159
--- /dev/null
+++ b/include/crypto/curve25519.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef CURVE25519_H
+#define CURVE25519_H
+
+#include <crypto/algapi.h> // For crypto_memneq.
+#include <linux/types.h>
+#include <linux/random.h>
+
+enum curve25519_lengths {
+ CURVE25519_KEY_SIZE = 32
+};
+
+extern const u8 curve25519_null_point[];
+extern const u8 curve25519_base_point[];
+
+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE]);
+
+void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE]);
+
+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE]);
+
+static inline
+bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE])
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+ curve25519_arch(mypublic, secret, basepoint);
+ else
+ curve25519_generic(mypublic, secret, basepoint);
+ return crypto_memneq(mypublic, curve25519_null_point,
+ CURVE25519_KEY_SIZE);
+}
+
+static inline bool
+__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE])
+{
+ if (unlikely(!crypto_memneq(secret, curve25519_null_point,
+ CURVE25519_KEY_SIZE)))
+ return false;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+ curve25519_base_arch(pub, secret);
+ else
+ curve25519_generic(pub, secret, curve25519_base_point);
+ return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE);
+}
+
+static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE])
+{
+ secret[0] &= 248;
+ secret[31] = (secret[31] & 127) | 64;
+}
+
+static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE])
+{
+ get_random_bytes_wait(secret, CURVE25519_KEY_SIZE);
+ curve25519_clamp_secret(secret);
+}
+
+#endif /* CURVE25519_H */
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 84c708bba00b..e29cd67f93c7 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -83,8 +83,6 @@ struct crypto_engine_ctx {
struct crypto_engine_op op;
};
-int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req);
int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
struct aead_request *req);
int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
@@ -93,8 +91,6 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
-void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
void crypto_finalize_aead_request(struct crypto_engine *engine,
struct aead_request *req, int err);
void crypto_finalize_akcipher_request(struct crypto_engine *engine,
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d52b95b75ae4..fe7f73bad1e2 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -227,7 +227,7 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
*
* The asynchronous cipher operation discussion provided for the
- * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
new file mode 100644
index 000000000000..74ff77032e52
--- /dev/null
+++ b/include/crypto/internal/blake2s.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef BLAKE2S_INTERNAL_H
+#define BLAKE2S_INTERNAL_H
+
+#include <crypto/blake2s.h>
+
+struct blake2s_tfm_ctx {
+ u8 key[BLAKE2S_KEY_SIZE];
+ unsigned int keylen;
+};
+
+void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
+ size_t nblocks, const u32 inc);
+
+void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
+ size_t nblocks, const u32 inc);
+
+static inline void blake2s_set_lastblock(struct blake2s_state *state)
+{
+ state->f[0] = -1;
+}
+
+#endif /* BLAKE2S_INTERNAL_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
new file mode 100644
index 000000000000..aa5d4a16aac5
--- /dev/null
+++ b/include/crypto/internal/chacha.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CRYPTO_INTERNAL_CHACHA_H
+#define _CRYPTO_INTERNAL_CHACHA_H
+
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static int inline chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h
index 81ea1a425e9c..f62a2bb1866b 100644
--- a/include/crypto/internal/des.h
+++ b/include/crypto/internal/des.h
@@ -117,18 +117,6 @@ static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm,
return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key);
}
-static inline int verify_ablkcipher_des_key(struct crypto_ablkcipher *tfm,
- const u8 *key)
-{
- return crypto_des_verify_key(crypto_ablkcipher_tfm(tfm), key);
-}
-
-static inline int verify_ablkcipher_des3_key(struct crypto_ablkcipher *tfm,
- const u8 *key)
-{
- return crypto_des3_ede_verify_key(crypto_ablkcipher_tfm(tfm), key);
-}
-
static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
new file mode 100644
index 000000000000..479b0cab2a1a
--- /dev/null
+++ b/include/crypto/internal/poly1305.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_INTERNAL_POLY1305_H
+#define _CRYPTO_INTERNAL_POLY1305_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <crypto/poly1305.h>
+
+/*
+ * Poly1305 core functions. These implement the ε-almost-∆-universal hash
+ * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
+ * ("s key") at the end. They also only support block-aligned inputs.
+ */
+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
+static inline void poly1305_core_init(struct poly1305_state *state)
+{
+ *state = (struct poly1305_state){};
+}
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_key *key, const void *src,
+ unsigned int nblocks, u32 hibit);
+void poly1305_core_emit(const struct poly1305_state *state, void *dst);
+
+/*
+ * Poly1305 requires a unique key for each tag, which implies that we can't set
+ * it on the tfm that gets accessed by multiple users simultaneously. Instead we
+ * expect the key as the first 32 bytes in the update() call.
+ */
+static inline
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ if (!dctx->sset) {
+ if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
+ poly1305_core_setkey(dctx->r, src);
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (srclen >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ }
+ return srclen;
+}
+
+#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 734b6f7081b8..921c409fe1b1 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -153,17 +153,6 @@ static inline void skcipher_walk_abort(struct skcipher_walk *walk)
skcipher_walk_done(walk, -ECANCELED);
}
-static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
- int err)
-{
- req->base.complete(&req->base, err);
-}
-
-static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
-{
- return req->base.flags;
-}
-
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
@@ -182,73 +171,22 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
static inline unsigned int crypto_skcipher_alg_min_keysize(
struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blkcipher.min_keysize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_ablkcipher.min_keysize;
-
return alg->min_keysize;
}
static inline unsigned int crypto_skcipher_alg_max_keysize(
struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blkcipher.max_keysize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_ablkcipher.max_keysize;
-
return alg->max_keysize;
}
-static inline unsigned int crypto_skcipher_alg_chunksize(
- struct skcipher_alg *alg)
-{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blocksize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_blocksize;
-
- return alg->chunksize;
-}
-
static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blocksize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_blocksize;
-
return alg->walksize;
}
/**
- * crypto_skcipher_chunksize() - obtain chunk size
- * @tfm: cipher handle
- *
- * The block size is set to one for ciphers such as CTR. However,
- * you still need to provide incremental updates in multiples of
- * the underlying block size as the IV does not have sub-block
- * granularity. This is known in this API as the chunk size.
- *
- * Return: chunk size in bytes
- */
-static inline unsigned int crypto_skcipher_chunksize(
- struct crypto_skcipher *tfm)
-{
- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
-}
-
-/**
* crypto_skcipher_walksize() - obtain walk size
* @tfm: cipher handle
*
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 34317ed2071e..74c6e1cd73ee 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -22,43 +22,56 @@ struct poly1305_state {
};
struct poly1305_desc_ctx {
- /* key */
- struct poly1305_key r;
- /* finalize key */
- u32 s[4];
- /* accumulator */
- struct poly1305_state h;
/* partial buffer */
u8 buf[POLY1305_BLOCK_SIZE];
/* bytes used in partial buffer */
unsigned int buflen;
- /* r key has been set */
- bool rset;
- /* s key has been set */
+ /* how many keys have been set in r[] */
+ unsigned short rset;
+ /* whether s[] has been set */
bool sset;
+ /* finalize key */
+ u32 s[4];
+ /* accumulator */
+ struct poly1305_state h;
+ /* key */
+ struct poly1305_key r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE];
};
-/*
- * Poly1305 core functions. These implement the ε-almost-∆-universal hash
- * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
- * ("s key") at the end. They also only support block-aligned inputs.
- */
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
-static inline void poly1305_core_init(struct poly1305_state *state)
+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
+
+static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_init_arch(desc, key);
+ else
+ poly1305_init_generic(desc, key);
+}
+
+void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes);
+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes);
+
+static inline void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes)
{
- memset(state->h, 0, sizeof(state->h));
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_update_arch(desc, src, nbytes);
+ else
+ poly1305_update_generic(desc, src, nbytes);
}
-void poly1305_core_blocks(struct poly1305_state *state,
- const struct poly1305_key *key,
- const void *src, unsigned int nblocks);
-void poly1305_core_emit(const struct poly1305_state *state, void *dst);
-/* Crypto API helper functions for the Poly1305 MAC */
-int crypto_poly1305_init(struct shash_desc *desc);
-unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen);
-int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen);
-int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+
+static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_final_arch(desc, digest);
+ else
+ poly1305_final_generic(desc, digest);
+}
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 37c164234d97..b4655d91661f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -218,30 +218,13 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the skcipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_skcipher(const char *alg_name, u32 type,
- u32 mask)
-{
- return crypto_has_alg(alg_name, crypto_skcipher_type(type),
- crypto_skcipher_mask(mask));
-}
-
-/**
- * crypto_has_skcipher2() - Search for the availability of an skcipher.
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * skcipher
* @type: specifies the type of the skcipher
* @mask: specifies the mask for the skcipher
*
* Return: true when the skcipher is known to the kernel crypto API; false
* otherwise
*/
-int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask);
+int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask);
static inline const char *crypto_skcipher_driver_name(
struct crypto_skcipher *tfm)
@@ -258,13 +241,6 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blkcipher.ivsize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_ablkcipher.ivsize;
-
return alg->ivsize;
}
@@ -304,6 +280,29 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_skcipher_alg_chunksize(
+ struct skcipher_alg *alg)
+{
+ return alg->chunksize;
+}
+
+/**
+ * crypto_skcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_skcipher_chunksize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+}
+
static inline unsigned int crypto_sync_skcipher_blocksize(
struct crypto_sync_skcipher *tfm)
{
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index cf528c289857..9a0c8381a069 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -6,6 +6,8 @@
#ifndef __DW_HDMI__
#define __DW_HDMI__
+#include <sound/hdmi-codec.h>
+
struct drm_connector;
struct drm_display_mode;
struct drm_encoder;
@@ -154,6 +156,8 @@ void dw_hdmi_resume(struct dw_hdmi *hdmi);
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 987ff16b9420..e9ad4863d915 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -45,7 +45,7 @@ static inline bool drm_arch_can_wc_memory(void)
{
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
return false;
-#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON64)
return false;
#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/*
diff --git a/include/dt-bindings/gpio/meson-a1-gpio.h b/include/dt-bindings/gpio/meson-a1-gpio.h
new file mode 100644
index 000000000000..40e57a5ff1db
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-a1-gpio.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_A1_GPIO_H
+#define _DT_BINDINGS_MESON_A1_GPIO_H
+
+#define GPIOP_0 0
+#define GPIOP_1 1
+#define GPIOP_2 2
+#define GPIOP_3 3
+#define GPIOP_4 4
+#define GPIOP_5 5
+#define GPIOP_6 6
+#define GPIOP_7 7
+#define GPIOP_8 8
+#define GPIOP_9 9
+#define GPIOP_10 10
+#define GPIOP_11 11
+#define GPIOP_12 12
+#define GPIOB_0 13
+#define GPIOB_1 14
+#define GPIOB_2 15
+#define GPIOB_3 16
+#define GPIOB_4 17
+#define GPIOB_5 18
+#define GPIOB_6 19
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOF_0 37
+#define GPIOF_1 38
+#define GPIOF_2 39
+#define GPIOF_3 40
+#define GPIOF_4 41
+#define GPIOF_5 42
+#define GPIOF_6 43
+#define GPIOF_7 44
+#define GPIOF_8 45
+#define GPIOF_9 46
+#define GPIOF_10 47
+#define GPIOF_11 48
+#define GPIOF_12 49
+#define GPIOA_0 50
+#define GPIOA_1 51
+#define GPIOA_2 52
+#define GPIOA_3 53
+#define GPIOA_4 54
+#define GPIOA_5 55
+#define GPIOA_6 56
+#define GPIOA_7 57
+#define GPIOA_8 58
+#define GPIOA_9 59
+#define GPIOA_10 60
+#define GPIOA_11 61
+
+#endif /* _DT_BINDINGS_MESON_A1_GPIO_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8974.h b/include/dt-bindings/interconnect/qcom,msm8974.h
new file mode 100644
index 000000000000..e65ae27ffff2
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8974.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Qualcomm msm8974 interconnect IDs
+ *
+ * Copyright (c) 2019 Brian Masney <masneyb@onstation.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8974_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8974_H
+
+#define BIMC_MAS_AMPSS_M0 0
+#define BIMC_MAS_AMPSS_M1 1
+#define BIMC_MAS_MSS_PROC 2
+#define BIMC_TO_MNOC 3
+#define BIMC_TO_SNOC 4
+#define BIMC_SLV_EBI_CH0 5
+#define BIMC_SLV_AMPSS_L2 6
+
+#define CNOC_MAS_RPM_INST 0
+#define CNOC_MAS_RPM_DATA 1
+#define CNOC_MAS_RPM_SYS 2
+#define CNOC_MAS_DEHR 3
+#define CNOC_MAS_QDSS_DAP 4
+#define CNOC_MAS_SPDM 5
+#define CNOC_MAS_TIC 6
+#define CNOC_SLV_CLK_CTL 7
+#define CNOC_SLV_CNOC_MSS 8
+#define CNOC_SLV_SECURITY 9
+#define CNOC_SLV_TCSR 10
+#define CNOC_SLV_TLMM 11
+#define CNOC_SLV_CRYPTO_0_CFG 12
+#define CNOC_SLV_CRYPTO_1_CFG 13
+#define CNOC_SLV_IMEM_CFG 14
+#define CNOC_SLV_MESSAGE_RAM 15
+#define CNOC_SLV_BIMC_CFG 16
+#define CNOC_SLV_BOOT_ROM 17
+#define CNOC_SLV_PMIC_ARB 18
+#define CNOC_SLV_SPDM_WRAPPER 19
+#define CNOC_SLV_DEHR_CFG 20
+#define CNOC_SLV_MPM 21
+#define CNOC_SLV_QDSS_CFG 22
+#define CNOC_SLV_RBCPR_CFG 23
+#define CNOC_SLV_RBCPR_QDSS_APU_CFG 24
+#define CNOC_TO_SNOC 25
+#define CNOC_SLV_CNOC_ONOC_CFG 26
+#define CNOC_SLV_CNOC_MNOC_MMSS_CFG 27
+#define CNOC_SLV_CNOC_MNOC_CFG 28
+#define CNOC_SLV_PNOC_CFG 29
+#define CNOC_SLV_SNOC_MPU_CFG 30
+#define CNOC_SLV_SNOC_CFG 31
+#define CNOC_SLV_EBI1_DLL_CFG 32
+#define CNOC_SLV_PHY_APU_CFG 33
+#define CNOC_SLV_EBI1_PHY_CFG 34
+#define CNOC_SLV_RPM 35
+#define CNOC_SLV_SERVICE_CNOC 36
+
+#define MNOC_MAS_GRAPHICS_3D 0
+#define MNOC_MAS_JPEG 1
+#define MNOC_MAS_MDP_PORT0 2
+#define MNOC_MAS_VIDEO_P0 3
+#define MNOC_MAS_VIDEO_P1 4
+#define MNOC_MAS_VFE 5
+#define MNOC_TO_CNOC 6
+#define MNOC_TO_BIMC 7
+#define MNOC_SLV_CAMERA_CFG 8
+#define MNOC_SLV_DISPLAY_CFG 9
+#define MNOC_SLV_OCMEM_CFG 10
+#define MNOC_SLV_CPR_CFG 11
+#define MNOC_SLV_CPR_XPU_CFG 12
+#define MNOC_SLV_MISC_CFG 13
+#define MNOC_SLV_MISC_XPU_CFG 14
+#define MNOC_SLV_VENUS_CFG 15
+#define MNOC_SLV_GRAPHICS_3D_CFG 16
+#define MNOC_SLV_MMSS_CLK_CFG 17
+#define MNOC_SLV_MMSS_CLK_XPU_CFG 18
+#define MNOC_SLV_MNOC_MPU_CFG 19
+#define MNOC_SLV_ONOC_MPU_CFG 20
+#define MNOC_SLV_SERVICE_MNOC 21
+
+#define OCMEM_NOC_TO_OCMEM_VNOC 0
+#define OCMEM_MAS_JPEG_OCMEM 1
+#define OCMEM_MAS_MDP_OCMEM 2
+#define OCMEM_MAS_VIDEO_P0_OCMEM 3
+#define OCMEM_MAS_VIDEO_P1_OCMEM 4
+#define OCMEM_MAS_VFE_OCMEM 5
+#define OCMEM_MAS_CNOC_ONOC_CFG 6
+#define OCMEM_SLV_SERVICE_ONOC 7
+#define OCMEM_VNOC_TO_SNOC 8
+#define OCMEM_VNOC_TO_OCMEM_NOC 9
+#define OCMEM_VNOC_MAS_GFX3D 10
+#define OCMEM_SLV_OCMEM 11
+
+#define PNOC_MAS_PNOC_CFG 0
+#define PNOC_MAS_SDCC_1 1
+#define PNOC_MAS_SDCC_3 2
+#define PNOC_MAS_SDCC_4 3
+#define PNOC_MAS_SDCC_2 4
+#define PNOC_MAS_TSIF 5
+#define PNOC_MAS_BAM_DMA 6
+#define PNOC_MAS_BLSP_2 7
+#define PNOC_MAS_USB_HSIC 8
+#define PNOC_MAS_BLSP_1 9
+#define PNOC_MAS_USB_HS 10
+#define PNOC_TO_SNOC 11
+#define PNOC_SLV_SDCC_1 12
+#define PNOC_SLV_SDCC_3 13
+#define PNOC_SLV_SDCC_2 14
+#define PNOC_SLV_SDCC_4 15
+#define PNOC_SLV_TSIF 16
+#define PNOC_SLV_BAM_DMA 17
+#define PNOC_SLV_BLSP_2 18
+#define PNOC_SLV_USB_HSIC 19
+#define PNOC_SLV_BLSP_1 20
+#define PNOC_SLV_USB_HS 21
+#define PNOC_SLV_PDM 22
+#define PNOC_SLV_PERIPH_APU_CFG 23
+#define PNOC_SLV_PNOC_MPU_CFG 24
+#define PNOC_SLV_PRNG 25
+#define PNOC_SLV_SERVICE_PNOC 26
+
+#define SNOC_MAS_LPASS_AHB 0
+#define SNOC_MAS_QDSS_BAM 1
+#define SNOC_MAS_SNOC_CFG 2
+#define SNOC_TO_BIMC 3
+#define SNOC_TO_CNOC 4
+#define SNOC_TO_PNOC 5
+#define SNOC_TO_OCMEM_VNOC 6
+#define SNOC_MAS_CRYPTO_CORE0 7
+#define SNOC_MAS_CRYPTO_CORE1 8
+#define SNOC_MAS_LPASS_PROC 9
+#define SNOC_MAS_MSS 10
+#define SNOC_MAS_MSS_NAV 11
+#define SNOC_MAS_OCMEM_DMA 12
+#define SNOC_MAS_WCSS 13
+#define SNOC_MAS_QDSS_ETR 14
+#define SNOC_MAS_USB3 15
+#define SNOC_SLV_AMPSS 16
+#define SNOC_SLV_LPASS 17
+#define SNOC_SLV_USB3 18
+#define SNOC_SLV_WCSS 19
+#define SNOC_SLV_OCIMEM 20
+#define SNOC_SLV_SNOC_OCMEM 21
+#define SNOC_SLV_SERVICE_SNOC 22
+#define SNOC_SLV_QDSS_STM 23
+
+#endif
diff --git a/include/dt-bindings/net/qca-ar803x.h b/include/dt-bindings/net/qca-ar803x.h
new file mode 100644
index 000000000000..9c046c7242ed
--- /dev/null
+++ b/include/dt-bindings/net/qca-ar803x.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Qualcomm Atheros AR803x PHYs
+ */
+
+#ifndef _DT_BINDINGS_QCA_AR803X_H
+#define _DT_BINDINGS_QCA_AR803X_H
+
+#define AR803X_STRENGTH_FULL 0
+#define AR803X_STRENGTH_HALF 1
+#define AR803X_STRENGTH_QUARTER 2
+
+#endif
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
new file mode 100644
index 000000000000..218b1a64e975
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Texas Instruments DP83869 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2019 Texas Instruments, Inc.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83869_H
+#define _DT_BINDINGS_TI_DP83869_H
+
+/* PHY CTRL bits */
+#define DP83869_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
+#define DP83869_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
+#define DP83869_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
+#define DP83869_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
+
+/* IO_MUX_CFG - Clock output selection */
+#define DP83869_CLK_O_SEL_CHN_A_RCLK 0x0
+#define DP83869_CLK_O_SEL_CHN_B_RCLK 0x1
+#define DP83869_CLK_O_SEL_CHN_C_RCLK 0x2
+#define DP83869_CLK_O_SEL_CHN_D_RCLK 0x3
+#define DP83869_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4
+#define DP83869_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5
+#define DP83869_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6
+#define DP83869_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7
+#define DP83869_CLK_O_SEL_CHN_A_TCLK 0x8
+#define DP83869_CLK_O_SEL_CHN_B_TCLK 0x9
+#define DP83869_CLK_O_SEL_CHN_C_TCLK 0xa
+#define DP83869_CLK_O_SEL_CHN_D_TCLK 0xb
+#define DP83869_CLK_O_SEL_REF_CLK 0xc
+
+#define DP83869_RGMII_COPPER_ETHERNET 0x00
+#define DP83869_RGMII_1000_BASE 0x01
+#define DP83869_RGMII_100_BASE 0x02
+#define DP83869_RGMII_SGMII_BRIDGE 0x03
+#define DP83869_1000M_MEDIA_CONVERT 0x04
+#define DP83869_100M_MEDIA_CONVERT 0x05
+#define DP83869_SGMII_COPPER_ETHERNET 0x06
+
+#endif
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 3831f91fb3ba..e8e117306b1b 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -27,8 +27,8 @@
#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5)
#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5)
-#define AT91_PINCTRL_SLEWRATE_DIS (0x0 << 9)
-#define AT91_PINCTRL_SLEWRATE_ENA (0x1 << 9)
+#define AT91_PINCTRL_SLEWRATE_ENA (0x0 << 9)
+#define AT91_PINCTRL_SLEWRATE_DIS (0x1 << 9)
#define AT91_PIOA 0
#define AT91_PIOB 1
diff --git a/include/dt-bindings/pmu/exynos_ppmu.h b/include/dt-bindings/pmu/exynos_ppmu.h
new file mode 100644
index 000000000000..8724abe130f3
--- /dev/null
+++ b/include/dt-bindings/pmu/exynos_ppmu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Samsung Exynos PPMU event types for counting in regs
+ *
+ * Copyright (c) 2019, Samsung Electronics
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+#define __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+
+#define PPMU_RO_BUSY_CYCLE_CNT 0x0
+#define PPMU_WO_BUSY_CYCLE_CNT 0x1
+#define PPMU_RW_BUSY_CYCLE_CNT 0x2
+#define PPMU_RO_REQUEST_CNT 0x3
+#define PPMU_WO_REQUEST_CNT 0x4
+#define PPMU_RO_DATA_CNT 0x5
+#define PPMU_WO_DATA_CNT 0x6
+#define PPMU_RO_LATENCY 0x12
+#define PPMU_WO_LATENCY 0x16
+#define PPMU_V2_RO_DATA_CNT 0x4
+#define PPMU_V2_WO_DATA_CNT 0x5
+#define PPMU_V2_EVT3_RW_DATA_CNT 0x22
+
+#endif
diff --git a/include/dt-bindings/regulator/dlg,da9063-regulator.h b/include/dt-bindings/regulator/dlg,da9063-regulator.h
new file mode 100644
index 000000000000..1de710dd0899
--- /dev/null
+++ b/include/dt-bindings/regulator/dlg,da9063-regulator.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9063_H
+#define _DT_BINDINGS_REGULATOR_DLG_DA9063_H
+
+/*
+ * These buck mode constants may be used to specify values in device tree
+ * properties (e.g. regulator-initial-mode).
+ * A description of the following modes is in the manufacturers datasheet.
+ */
+
+#define DA9063_BUCK_MODE_SLEEP 1
+#define DA9063_BUCK_MODE_SYNC 2
+#define DA9063_BUCK_MODE_AUTO 3
+
+#endif
diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h
index 77545f14c379..250de0d6c734 100644
--- a/include/dt-bindings/sound/samsung-i2s.h
+++ b/include/dt-bindings/sound/samsung-i2s.h
@@ -2,8 +2,14 @@
#ifndef _DT_BINDINGS_SAMSUNG_I2S_H
#define _DT_BINDINGS_SAMSUNG_I2S_H
-#define CLK_I2S_CDCLK 0
-#define CLK_I2S_RCLK_SRC 1
-#define CLK_I2S_RCLK_PSR 2
+#define CLK_I2S_CDCLK 0 /* the CDCLK (CODECLKO) gate clock */
+
+#define CLK_I2S_RCLK_SRC 1 /* the RCLKSRC mux clock (corresponding to
+ * RCLKSRC bit in IISMOD register)
+ */
+
+#define CLK_I2S_RCLK_PSR 2 /* the RCLK prescaler divider clock
+ * (corresponding to the IISPSR register)
+ */
#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */
diff --git a/include/keys/trusted.h b/include/keys/trusted_tpm.h
index 0071298b9b28..a56d8e1298f2 100644
--- a/include/keys/trusted.h
+++ b/include/keys/trusted_tpm.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TRUSTED_KEY_H
-#define __TRUSTED_KEY_H
+#ifndef __TRUSTED_TPM_H
+#define __TRUSTED_TPM_H
+
+#include <keys/trusted-type.h>
+#include <linux/tpm_command.h>
/* implementation specific TPM constants */
#define MAX_BUF_SIZE 1024
#define TPM_GETRANDOM_SIZE 14
-#define TPM_OSAP_SIZE 36
-#define TPM_OIAP_SIZE 10
-#define TPM_SEAL_SIZE 87
-#define TPM_UNSEAL_SIZE 104
#define TPM_SIZE_OFFSET 2
#define TPM_RETURN_OFFSET 6
#define TPM_DATA_OFFSET 10
@@ -17,13 +16,6 @@
#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
-struct tpm_buf {
- int len;
- unsigned char data[MAX_BUF_SIZE];
-};
-
-#define INIT_BUF(tb) (tb->len = 0)
-
struct osapsess {
uint32_t handle;
unsigned char secret[SHA1_DIGEST_SIZE];
@@ -48,6 +40,13 @@ int TSS_checkhmac1(unsigned char *buffer,
int trusted_tpm_send(unsigned char *cmd, size_t buflen);
int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+
#define TPM_DEBUG 0
#if TPM_DEBUG
@@ -109,28 +108,4 @@ static inline void dump_tpm_buf(unsigned char *buf)
{
}
#endif
-
-static inline void store8(struct tpm_buf *buf, const unsigned char value)
-{
- buf->data[buf->len++] = value;
-}
-
-static inline void store16(struct tpm_buf *buf, const uint16_t value)
-{
- *(uint16_t *) & buf->data[buf->len] = htons(value);
- buf->len += sizeof value;
-}
-
-static inline void store32(struct tpm_buf *buf, const uint32_t value)
-{
- *(uint32_t *) & buf->data[buf->len] = htonl(value);
- buf->len += sizeof value;
-}
-
-static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
- const int len)
-{
- memcpy(buf->data + buf->len, in, len);
- buf->len += len;
-}
#endif
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
new file mode 100644
index 000000000000..db6a0fca09b4
--- /dev/null
+++ b/include/kunit/assert.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Assertion and expectation serialization API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_ASSERT_H
+#define _KUNIT_ASSERT_H
+
+#include <kunit/string-stream.h>
+#include <linux/err.h>
+
+struct kunit;
+
+/**
+ * enum kunit_assert_type - Type of expectation/assertion.
+ * @KUNIT_ASSERTION: Used to denote that a kunit_assert represents an assertion.
+ * @KUNIT_EXPECTATION: Denotes that a kunit_assert represents an expectation.
+ *
+ * Used in conjunction with a &struct kunit_assert to denote whether it
+ * represents an expectation or an assertion.
+ */
+enum kunit_assert_type {
+ KUNIT_ASSERTION,
+ KUNIT_EXPECTATION,
+};
+
+/**
+ * struct kunit_assert - Data for printing a failed assertion or expectation.
+ * @test: the test case this expectation/assertion is associated with.
+ * @type: the type (either an expectation or an assertion) of this kunit_assert.
+ * @line: the source code line number that the expectation/assertion is at.
+ * @file: the file path of the source file that the expectation/assertion is in.
+ * @message: an optional message to provide additional context.
+ * @format: a function which formats the data in this kunit_assert to a string.
+ *
+ * Represents a failed expectation/assertion. Contains all the data necessary to
+ * format a string to a user reporting the failure.
+ */
+struct kunit_assert {
+ struct kunit *test;
+ enum kunit_assert_type type;
+ int line;
+ const char *file;
+ struct va_format message;
+ void (*format)(const struct kunit_assert *assert,
+ struct string_stream *stream);
+};
+
+/**
+ * KUNIT_INIT_VA_FMT_NULL - Default initializer for struct va_format.
+ *
+ * Used inside a struct initialization block to initialize struct va_format to
+ * default values where fmt and va are null.
+ */
+#define KUNIT_INIT_VA_FMT_NULL { .fmt = NULL, .va = NULL }
+
+/**
+ * KUNIT_INIT_ASSERT_STRUCT() - Initializer for a &struct kunit_assert.
+ * @kunit: The test case that this expectation/assertion is associated with.
+ * @assert_type: The type (assertion or expectation) of this kunit_assert.
+ * @fmt: The formatting function which builds a string out of this kunit_assert.
+ *
+ * The base initializer for a &struct kunit_assert.
+ */
+#define KUNIT_INIT_ASSERT_STRUCT(kunit, assert_type, fmt) { \
+ .test = kunit, \
+ .type = assert_type, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .message = KUNIT_INIT_VA_FMT_NULL, \
+ .format = fmt \
+}
+
+void kunit_base_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+void kunit_assert_print_msg(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * struct kunit_fail_assert - Represents a plain fail expectation/assertion.
+ * @assert: The parent of this type.
+ *
+ * Represents a simple KUNIT_FAIL/KUNIT_ASSERT_FAILURE that always fails.
+ */
+struct kunit_fail_assert {
+ struct kunit_assert assert;
+};
+
+void kunit_fail_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_FAIL_ASSERT_STRUCT() - Initializer for &struct kunit_fail_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ *
+ * Initializes a &struct kunit_fail_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_FAIL_ASSERT_STRUCT(test, type) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_fail_assert_format) \
+}
+
+/**
+ * struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
+ * @assert: The parent of this type.
+ * @condition: A string representation of a conditional expression.
+ * @expected_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
+ *
+ * Represents a simple expectation or assertion that simply asserts something is
+ * true or false. In other words, represents the expectations:
+ * KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
+ */
+struct kunit_unary_assert {
+ struct kunit_assert assert;
+ const char *condition;
+ bool expected_true;
+};
+
+void kunit_unary_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @cond: A string representation of the expression asserted true or false.
+ * @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
+ *
+ * Initializes a &struct kunit_unary_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_UNARY_ASSERT_STRUCT(test, type, cond, expect_true) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_unary_assert_format), \
+ .condition = cond, \
+ .expected_true = expect_true \
+}
+
+/**
+ * struct kunit_ptr_not_err_assert - An expectation/assertion that a pointer is
+ * not NULL and not a -errno.
+ * @assert: The parent of this type.
+ * @text: A string representation of the expression passed to the expectation.
+ * @value: The actual evaluated pointer value of the expression.
+ *
+ * Represents an expectation/assertion that a pointer is not null and is does
+ * not contain a -errno. (See IS_ERR_OR_NULL().)
+ */
+struct kunit_ptr_not_err_assert {
+ struct kunit_assert assert;
+ const char *text;
+ const void *value;
+};
+
+void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_ptr_not_err_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @txt: A string representation of the expression passed to the expectation.
+ * @val: The actual evaluated pointer value of the expression.
+ *
+ * Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, type, txt, val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_ptr_not_err_assert_format), \
+ .text = txt, \
+ .value = val \
+}
+
+/**
+ * struct kunit_binary_assert - An expectation/assertion that compares two
+ * non-pointer values (for example, KUNIT_EXPECT_EQ(test, 1 + 1, 2)).
+ * @assert: The parent of this type.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the expression in the left slot.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_text: A string representation of the expression in the right slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ *
+ * Represents an expectation/assertion that compares two non-pointer values. For
+ * example, to expect that 1 + 1 == 2, you can use the expectation
+ * KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+ */
+struct kunit_binary_assert {
+ struct kunit_assert assert;
+ const char *operation;
+ const char *left_text;
+ long long left_value;
+ const char *right_text;
+ long long right_value;
+};
+
+void kunit_binary_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_binary_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @op_str: A string representation of the comparison operator (e.g. "==").
+ * @left_str: A string representation of the expression in the left slot.
+ * @left_val: The actual evaluated value of the expression in the left slot.
+ * @right_str: A string representation of the expression in the right slot.
+ * @right_val: The actual evaluated value of the expression in the right slot.
+ *
+ * Initializes a &struct kunit_binary_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
+ type, \
+ op_str, \
+ left_str, \
+ left_val, \
+ right_str, \
+ right_val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_binary_assert_format), \
+ .operation = op_str, \
+ .left_text = left_str, \
+ .left_value = left_val, \
+ .right_text = right_str, \
+ .right_value = right_val \
+}
+
+/**
+ * struct kunit_binary_ptr_assert - An expectation/assertion that compares two
+ * pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)).
+ * @assert: The parent of this type.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the expression in the left slot.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_text: A string representation of the expression in the right slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ *
+ * Represents an expectation/assertion that compares two pointer values. For
+ * example, to expect that foo and bar point to the same thing, you can use the
+ * expectation KUNIT_EXPECT_PTR_EQ(test, foo, bar);
+ */
+struct kunit_binary_ptr_assert {
+ struct kunit_assert assert;
+ const char *operation;
+ const char *left_text;
+ const void *left_value;
+ const char *right_text;
+ const void *right_value;
+};
+
+void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_binary_ptr_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @op_str: A string representation of the comparison operator (e.g. "==").
+ * @left_str: A string representation of the expression in the left slot.
+ * @left_val: The actual evaluated value of the expression in the left slot.
+ * @right_str: A string representation of the expression in the right slot.
+ * @right_val: The actual evaluated value of the expression in the right slot.
+ *
+ * Initializes a &struct kunit_binary_ptr_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT(test, \
+ type, \
+ op_str, \
+ left_str, \
+ left_val, \
+ right_str, \
+ right_val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_binary_ptr_assert_format), \
+ .operation = op_str, \
+ .left_text = left_str, \
+ .left_value = left_val, \
+ .right_text = right_str, \
+ .right_value = right_val \
+}
+
+/**
+ * struct kunit_binary_str_assert - An expectation/assertion that compares two
+ * string values (for example, KUNIT_EXPECT_STREQ(test, foo, "bar")).
+ * @assert: The parent of this type.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the expression in the left slot.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_text: A string representation of the expression in the right slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ *
+ * Represents an expectation/assertion that compares two string values. For
+ * example, to expect that the string in foo is equal to "bar", you can use the
+ * expectation KUNIT_EXPECT_STREQ(test, foo, "bar");
+ */
+struct kunit_binary_str_assert {
+ struct kunit_assert assert;
+ const char *operation;
+ const char *left_text;
+ const char *left_value;
+ const char *right_text;
+ const char *right_value;
+};
+
+void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_BINARY_STR_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_binary_str_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @op_str: A string representation of the comparison operator (e.g. "==").
+ * @left_str: A string representation of the expression in the left slot.
+ * @left_val: The actual evaluated value of the expression in the left slot.
+ * @right_str: A string representation of the expression in the right slot.
+ * @right_val: The actual evaluated value of the expression in the right slot.
+ *
+ * Initializes a &struct kunit_binary_str_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
+ type, \
+ op_str, \
+ left_str, \
+ left_val, \
+ right_str, \
+ right_val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_binary_str_assert_format), \
+ .operation = op_str, \
+ .left_text = left_str, \
+ .left_value = left_val, \
+ .right_text = right_str, \
+ .right_value = right_val \
+}
+
+#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/string-stream.h b/include/kunit/string-stream.h
new file mode 100644
index 000000000000..fe98a00b75a9
--- /dev/null
+++ b/include/kunit/string-stream.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * C++ stream style string builder used in KUnit for building messages.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_STRING_STREAM_H
+#define _KUNIT_STRING_STREAM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <stdarg.h>
+
+struct string_stream_fragment {
+ struct kunit *test;
+ struct list_head node;
+ char *fragment;
+};
+
+struct string_stream {
+ size_t length;
+ struct list_head fragments;
+ /* length and fragments are protected by this lock */
+ spinlock_t lock;
+ struct kunit *test;
+ gfp_t gfp;
+};
+
+struct kunit;
+
+struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
+
+int __printf(2, 3) string_stream_add(struct string_stream *stream,
+ const char *fmt, ...);
+
+int string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args);
+
+char *string_stream_get_string(struct string_stream *stream);
+
+int string_stream_append(struct string_stream *stream,
+ struct string_stream *other);
+
+bool string_stream_is_empty(struct string_stream *stream);
+
+int string_stream_destroy(struct string_stream *stream);
+
+#endif /* _KUNIT_STRING_STREAM_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
new file mode 100644
index 000000000000..dba48304b3bd
--- /dev/null
+++ b/include/kunit/test.h
@@ -0,0 +1,1490 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TEST_H
+#define _KUNIT_TEST_H
+
+#include <kunit/assert.h>
+#include <kunit/try-catch.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct kunit_resource;
+
+typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
+typedef void (*kunit_resource_free_t)(struct kunit_resource *);
+
+/**
+ * struct kunit_resource - represents a *test managed resource*
+ * @allocation: for the user to store arbitrary data.
+ * @free: a user supplied function to free the resource. Populated by
+ * kunit_alloc_resource().
+ *
+ * Represents a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_kmalloc_params {
+ * size_t size;
+ * gfp_t gfp;
+ * };
+ *
+ * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+ * {
+ * struct kunit_kmalloc_params *params = context;
+ * res->allocation = kmalloc(params->size, params->gfp);
+ *
+ * if (!res->allocation)
+ * return -ENOMEM;
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_kmalloc_free(struct kunit_resource *res)
+ * {
+ * kfree(res->allocation);
+ * }
+ *
+ * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+ * {
+ * struct kunit_kmalloc_params params;
+ * struct kunit_resource *res;
+ *
+ * params.size = size;
+ * params.gfp = gfp;
+ *
+ * res = kunit_alloc_resource(test, kunit_kmalloc_init,
+ * kunit_kmalloc_free, &params);
+ * if (res)
+ * return res->allocation;
+ *
+ * return NULL;
+ * }
+ */
+struct kunit_resource {
+ void *allocation;
+ kunit_resource_free_t free;
+
+ /* private: internal use only. */
+ struct list_head node;
+};
+
+struct kunit;
+
+/**
+ * struct kunit_case - represents an individual test case.
+ *
+ * @run_case: the function representing the actual test case.
+ * @name: the name of the test case.
+ *
+ * A test case is a function with the signature,
+ * ``void (*)(struct kunit *)``
+ * that makes expectations and assertions (see KUNIT_EXPECT_TRUE() and
+ * KUNIT_ASSERT_TRUE()) about code under test. Each test case is associated
+ * with a &struct kunit_suite and will be run after the suite's init
+ * function and followed by the suite's exit function.
+ *
+ * A test case should be static and should only be created with the
+ * KUNIT_CASE() macro; additionally, every array of test cases should be
+ * terminated with an empty test case.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * void add_test_basic(struct kunit *test)
+ * {
+ * KUNIT_EXPECT_EQ(test, 1, add(1, 0));
+ * KUNIT_EXPECT_EQ(test, 2, add(1, 1));
+ * KUNIT_EXPECT_EQ(test, 0, add(-1, 1));
+ * KUNIT_EXPECT_EQ(test, INT_MAX, add(0, INT_MAX));
+ * KUNIT_EXPECT_EQ(test, -1, add(INT_MAX, INT_MIN));
+ * }
+ *
+ * static struct kunit_case example_test_cases[] = {
+ * KUNIT_CASE(add_test_basic),
+ * {}
+ * };
+ *
+ */
+struct kunit_case {
+ void (*run_case)(struct kunit *test);
+ const char *name;
+
+ /* private: internal use only. */
+ bool success;
+};
+
+/**
+ * KUNIT_CASE - A helper for creating a &struct kunit_case
+ *
+ * @test_name: a reference to a test case function.
+ *
+ * Takes a symbol for a function representing a test case and creates a
+ * &struct kunit_case object from it. See the documentation for
+ * &struct kunit_case for an example on how to use it.
+ */
+#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+
+/**
+ * struct kunit_suite - describes a related collection of &struct kunit_case
+ *
+ * @name: the name of the test. Purely informational.
+ * @init: called before every test case.
+ * @exit: called after every test case.
+ * @test_cases: a null terminated array of test cases.
+ *
+ * A kunit_suite is a collection of related &struct kunit_case s, such that
+ * @init is called before every test case and @exit is called after every
+ * test case, similar to the notion of a *test fixture* or a *test class*
+ * in other unit testing frameworks like JUnit or Googletest.
+ *
+ * Every &struct kunit_case must be associated with a kunit_suite for KUnit
+ * to run it.
+ */
+struct kunit_suite {
+ const char name[256];
+ int (*init)(struct kunit *test);
+ void (*exit)(struct kunit *test);
+ struct kunit_case *test_cases;
+};
+
+/**
+ * struct kunit - represents a running instance of a test.
+ *
+ * @priv: for user to store arbitrary data. Commonly used to pass data
+ * created in the init function (see &struct kunit_suite).
+ *
+ * Used to store information about the current context under which the test
+ * is running. Most of this data is private and should only be accessed
+ * indirectly via public functions; the one exception is @priv which can be
+ * used by the test writer to store arbitrary data.
+ */
+struct kunit {
+ void *priv;
+
+ /* private: internal use only. */
+ const char *name; /* Read only after initialization! */
+ struct kunit_try_catch try_catch;
+ /*
+ * success starts as true, and may only be set to false during a
+ * test case; thus, it is safe to update this across multiple
+ * threads using WRITE_ONCE; however, as a consequence, it may only
+ * be read after the test case finishes once all threads associated
+ * with the test case have terminated.
+ */
+ bool success; /* Read only after test_case finishes! */
+ spinlock_t lock; /* Guards all mutable test state. */
+ /*
+ * Because resources is a list that may be updated multiple times (with
+ * new resources) from any thread associated with a test case, we must
+ * protect it with some type of lock.
+ */
+ struct list_head resources; /* Protected by lock. */
+};
+
+void kunit_init_test(struct kunit *test, const char *name);
+
+int kunit_run_tests(struct kunit_suite *suite);
+
+/**
+ * kunit_test_suite() - used to register a &struct kunit_suite with KUnit.
+ *
+ * @suite: a statically allocated &struct kunit_suite.
+ *
+ * Registers @suite with the test framework. See &struct kunit_suite for
+ * more information.
+ *
+ * NOTE: Currently KUnit tests are all run as late_initcalls; this means
+ * that they cannot test anything where tests must run at a different init
+ * phase. One significant restriction resulting from this is that KUnit
+ * cannot reliably test anything that is initialize in the late_init phase;
+ * another is that KUnit is useless to test things that need to be run in
+ * an earlier init phase.
+ *
+ * TODO(brendanhiggins@google.com): Don't run all KUnit tests as
+ * late_initcalls. I have some future work planned to dispatch all KUnit
+ * tests from the same place, and at the very least to do so after
+ * everything else is definitely initialized.
+ */
+#define kunit_test_suite(suite) \
+ static int kunit_suite_init##suite(void) \
+ { \
+ return kunit_run_tests(&suite); \
+ } \
+ late_initcall(kunit_suite_init##suite)
+
+/*
+ * Like kunit_alloc_resource() below, but returns the struct kunit_resource
+ * object that contains the allocation. This is mostly for testing purposes.
+ */
+struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context);
+
+/**
+ * kunit_alloc_resource() - Allocates a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource.
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * NOTE: KUnit needs to allocate memory for each kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline void *kunit_alloc_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+
+ res = kunit_alloc_and_get_resource(test, init, free, internal_gfp,
+ context);
+
+ if (res)
+ return res->allocation;
+
+ return NULL;
+}
+
+typedef bool (*kunit_resource_match_t)(struct kunit *test,
+ const void *res,
+ void *match_data);
+
+/**
+ * kunit_resource_instance_match() - Match a resource with the same instance.
+ * @test: Test case to which the resource belongs.
+ * @res: The data stored in kunit_resource->allocation.
+ * @match_data: The resource pointer to match against.
+ *
+ * An instance of kunit_resource_match_t that matches a resource whose
+ * allocation matches @match_data.
+ */
+static inline bool kunit_resource_instance_match(struct kunit *test,
+ const void *res,
+ void *match_data)
+{
+ return res == match_data;
+}
+
+/**
+ * kunit_resource_destroy() - Find a kunit_resource and destroy it.
+ * @test: Test case to which the resource belongs.
+ * @match: Match function. Returns whether a given resource matches @match_data.
+ * @free: Must match free on the kunit_resource to free.
+ * @match_data: Data passed into @match.
+ *
+ * Free the latest kunit_resource of @test for which @free matches the
+ * kunit_resource_free_t associated with the resource and for which @match
+ * returns true.
+ *
+ * RETURNS:
+ * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ */
+int kunit_resource_destroy(struct kunit *test,
+ kunit_resource_match_t match,
+ kunit_resource_free_t free,
+ void *match_data);
+
+/**
+ * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
+ * @test: The test context object.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * Just like `kmalloc(...)`, except the allocation is managed by the test case
+ * and is automatically cleaned up after the test case concludes. See &struct
+ * kunit_resource for more information.
+ */
+void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp);
+
+/**
+ * kunit_kfree() - Like kfree except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @ptr: The memory allocation to free.
+ */
+void kunit_kfree(struct kunit *test, const void *ptr);
+
+/**
+ * kunit_kzalloc() - Just like kunit_kmalloc(), but zeroes the allocation.
+ * @test: The test context object.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kzalloc() and kunit_kmalloc() for more information.
+ */
+static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
+}
+
+void kunit_cleanup(struct kunit *test);
+
+#define kunit_printk(lvl, test, fmt, ...) \
+ printk(lvl "\t# %s: " fmt, (test)->name, ##__VA_ARGS__)
+
+/**
+ * kunit_info() - Prints an INFO level message associated with @test.
+ *
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints an info level message associated with the test suite being run.
+ * Takes a variable number of format parameters just like printk().
+ */
+#define kunit_info(test, fmt, ...) \
+ kunit_printk(KERN_INFO, test, fmt, ##__VA_ARGS__)
+
+/**
+ * kunit_warn() - Prints a WARN level message associated with @test.
+ *
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints a warning level message.
+ */
+#define kunit_warn(test, fmt, ...) \
+ kunit_printk(KERN_WARNING, test, fmt, ##__VA_ARGS__)
+
+/**
+ * kunit_err() - Prints an ERROR level message associated with @test.
+ *
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints an error level message.
+ */
+#define kunit_err(test, fmt, ...) \
+ kunit_printk(KERN_ERR, test, fmt, ##__VA_ARGS__)
+
+/**
+ * KUNIT_SUCCEED() - A no-op expectation. Only exists for code clarity.
+ * @test: The test context object.
+ *
+ * The opposite of KUNIT_FAIL(), it is an expectation that cannot fail. In other
+ * words, it does nothing and only exists for code clarity. See
+ * KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_SUCCEED(test) do {} while (0)
+
+void kunit_do_assertion(struct kunit *test,
+ struct kunit_assert *assert,
+ bool pass,
+ const char *fmt, ...);
+
+#define KUNIT_ASSERTION(test, pass, assert_class, INITIALIZER, fmt, ...) do { \
+ struct assert_class __assertion = INITIALIZER; \
+ kunit_do_assertion(test, \
+ &__assertion.assert, \
+ pass, \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+
+#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \
+ KUNIT_ASSERTION(test, \
+ false, \
+ kunit_fail_assert, \
+ KUNIT_INIT_FAIL_ASSERT_STRUCT(test, assert_type), \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_FAIL() - Always causes a test to fail when evaluated.
+ * @test: The test context object.
+ * @fmt: an informational message to be printed when the assertion is made.
+ * @...: string format arguments.
+ *
+ * The opposite of KUNIT_SUCCEED(), it is an expectation that always fails. In
+ * other words, it always results in a failed expectation, and consequently
+ * always causes the test case to fail when evaluated. See KUNIT_EXPECT_TRUE()
+ * for more information.
+ */
+#define KUNIT_FAIL(test, fmt, ...) \
+ KUNIT_FAIL_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_UNARY_ASSERTION(test, \
+ assert_type, \
+ condition, \
+ expected_true, \
+ fmt, \
+ ...) \
+ KUNIT_ASSERTION(test, \
+ !!(condition) == !!expected_true, \
+ kunit_unary_assert, \
+ KUNIT_INIT_UNARY_ASSERT_STRUCT(test, \
+ assert_type, \
+ #condition, \
+ expected_true), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
+ KUNIT_UNARY_ASSERTION(test, \
+ assert_type, \
+ condition, \
+ true, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_TRUE_ASSERTION(test, assert_type, condition) \
+ KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, NULL)
+
+#define KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
+ KUNIT_UNARY_ASSERTION(test, \
+ assert_type, \
+ condition, \
+ false, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_FALSE_ASSERTION(test, assert_type, condition) \
+ KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, NULL)
+
+/*
+ * A factory macro for defining the assertions and expectations for the basic
+ * comparisons defined for the built in types.
+ *
+ * Unfortunately, there is no common type that all types can be promoted to for
+ * which all the binary operators behave the same way as for the actual types
+ * (for example, there is no type that long long and unsigned long long can
+ * both be cast to where the comparison result is preserved for all values). So
+ * the best we can do is do the comparison in the original types and then coerce
+ * everything to long long for printing; this way, the comparison behaves
+ * correctly and the printed out value usually makes sense without
+ * interpretation, but can always be interpreted to figure out the actual
+ * value.
+ */
+#define KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
+ ...) \
+do { \
+ typeof(left) __left = (left); \
+ typeof(right) __right = (right); \
+ ((void)__typecheck(__left, __right)); \
+ \
+ KUNIT_ASSERTION(test, \
+ __left op __right, \
+ assert_class, \
+ ASSERT_CLASS_INIT(test, \
+ assert_type, \
+ #op, \
+ #left, \
+ __left, \
+ #right, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_BASE_EQ_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_NE_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_LT_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_LE_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_GT_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_GE_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_EQ_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_EQ_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_NE_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_NE_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_LT_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_LT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_LT_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_LE_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_LE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_LE_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_GT_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_GT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_GT_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_GE_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_GE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_GE_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_STR_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
+ ...) \
+do { \
+ typeof(left) __left = (left); \
+ typeof(right) __right = (right); \
+ \
+ KUNIT_ASSERTION(test, \
+ strcmp(__left, __right) op 0, \
+ kunit_binary_str_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
+ assert_type, \
+ #op, \
+ #left, \
+ __left, \
+ #right, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ assert_type, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_STR_EQ_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ assert_type, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_STR_NE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ assert_type, \
+ ptr, \
+ fmt, \
+ ...) \
+do { \
+ typeof(ptr) __ptr = (ptr); \
+ \
+ KUNIT_ASSERTION(test, \
+ !IS_ERR_OR_NULL(__ptr), \
+ kunit_ptr_not_err_assert, \
+ KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, \
+ assert_type, \
+ #ptr, \
+ __ptr), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, assert_type, ptr) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ assert_type, \
+ ptr, \
+ NULL)
+
+/**
+ * KUNIT_EXPECT_TRUE() - Causes a test failure when the expression is not true.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression. The test fails when this does
+ * not evaluate to true.
+ *
+ * This and expectations of the form `KUNIT_EXPECT_*` will cause the test case
+ * to fail when the specified condition is not met; however, it will not prevent
+ * the test case from continuing to run; this is otherwise known as an
+ * *expectation failure*.
+ */
+#define KUNIT_EXPECT_TRUE(test, condition) \
+ KUNIT_TRUE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+
+#define KUNIT_EXPECT_TRUE_MSG(test, condition, fmt, ...) \
+ KUNIT_TRUE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_FALSE() - Makes a test failure when the expression is not false.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression. The test fails when this does
+ * not evaluate to false.
+ *
+ * Sets an expectation that @condition evaluates to false. See
+ * KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_FALSE(test, condition) \
+ KUNIT_FALSE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+
+#define KUNIT_EXPECT_FALSE_MSG(test, condition, fmt, ...) \
+ KUNIT_FALSE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_EQ() - Sets an expectation that @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) == (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_EQ(test, left, right) \
+ KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_PTR_EQ() - Expects that pointers @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) == (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_PTR_EQ(test, left, right) \
+ KUNIT_BINARY_PTR_EQ_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right)
+
+#define KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NE() - An expectation that @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are not
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) != (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_NE(test, left, right) \
+ KUNIT_BINARY_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_NE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_PTR_NE() - Expects that pointers @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are not
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) != (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_PTR_NE(test, left, right) \
+ KUNIT_BINARY_PTR_NE_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right)
+
+#define KUNIT_EXPECT_PTR_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_LT() - An expectation that @left is less than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is less than the
+ * value that @right evaluates to. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) < (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_LT(test, left, right) \
+ KUNIT_BINARY_LT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_LT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LT_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_LE() - Expects that @left is less than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is less than or
+ * equal to the value that @right evaluates to. Semantically this is equivalent
+ * to KUNIT_EXPECT_TRUE(@test, (@left) <= (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_LE(test, left, right) \
+ KUNIT_BINARY_LE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_GT() - An expectation that @left is greater than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is greater than
+ * the value that @right evaluates to. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) > (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_GT(test, left, right) \
+ KUNIT_BINARY_GT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_GT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GT_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_GE() - Expects that @left is greater than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is greater than
+ * the value that @right evaluates to. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) >= (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_GE(test, left, right) \
+ KUNIT_BINARY_GE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_GE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_STREQ() - Expects that strings @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !strcmp((@left), (@right))). See KUNIT_EXPECT_TRUE()
+ * for more information.
+ */
+#define KUNIT_EXPECT_STREQ(test, left, right) \
+ KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_STREQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_EXPECT_TRUE()
+ * for more information.
+ */
+#define KUNIT_EXPECT_STRNEQ(test, left, right) \
+ KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_STRNEQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL() - Expects that @ptr is not null and not err.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is not null and not
+ * an errno stored in a pointer. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !IS_ERR_OR_NULL(@ptr)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_EXPECTATION, ptr)
+
+#define KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_ASSERT_FAILURE(test, fmt, ...) \
+ KUNIT_FAIL_ASSERTION(test, KUNIT_ASSERTION, fmt, ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_TRUE() - Sets an assertion that @condition is true.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression. The test fails and aborts when
+ * this does not evaluate to true.
+ *
+ * This and assertions of the form `KUNIT_ASSERT_*` will cause the test case to
+ * fail *and immediately abort* when the specified condition is not met. Unlike
+ * an expectation failure, it will prevent the test case from continuing to run;
+ * this is otherwise known as an *assertion failure*.
+ */
+#define KUNIT_ASSERT_TRUE(test, condition) \
+ KUNIT_TRUE_ASSERTION(test, KUNIT_ASSERTION, condition)
+
+#define KUNIT_ASSERT_TRUE_MSG(test, condition, fmt, ...) \
+ KUNIT_TRUE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_FALSE() - Sets an assertion that @condition is false.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression.
+ *
+ * Sets an assertion that the value that @condition evaluates to is false. This
+ * is the same as KUNIT_EXPECT_FALSE(), except it causes an assertion failure
+ * (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_FALSE(test, condition) \
+ KUNIT_FALSE_ASSERTION(test, KUNIT_ASSERTION, condition)
+
+#define KUNIT_ASSERT_FALSE_MSG(test, condition, fmt, ...) \
+ KUNIT_FALSE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_EQ() - Sets an assertion that @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is the same as KUNIT_EXPECT_EQ(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_EQ(test, left, right) \
+ KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is the same as KUNIT_EXPECT_EQ(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_PTR_EQ(test, left, right) \
+ KUNIT_BINARY_PTR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NE() - An assertion that @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are not
+ * equal. This is the same as KUNIT_EXPECT_NE(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NE(test, left, right) \
+ KUNIT_BINARY_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_NE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_PTR_NE() - Asserts that pointers @left and @right are not equal.
+ * KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are not
+ * equal. This is the same as KUNIT_EXPECT_NE(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_PTR_NE(test, left, right) \
+ KUNIT_BINARY_PTR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_PTR_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+/**
+ * KUNIT_ASSERT_LT() - An assertion that @left is less than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is less than the
+ * value that @right evaluates to. This is the same as KUNIT_EXPECT_LT(), except
+ * it causes an assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion
+ * is not met.
+ */
+#define KUNIT_ASSERT_LT(test, left, right) \
+ KUNIT_BINARY_LT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LT_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+/**
+ * KUNIT_ASSERT_LE() - An assertion that @left is less than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is less than or
+ * equal to the value that @right evaluates to. This is the same as
+ * KUNIT_EXPECT_LE(), except it causes an assertion failure (see
+ * KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_LE(test, left, right) \
+ KUNIT_BINARY_LE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_LE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_GT() - An assertion that @left is greater than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is greater than the
+ * value that @right evaluates to. This is the same as KUNIT_EXPECT_GT(), except
+ * it causes an assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion
+ * is not met.
+ */
+#define KUNIT_ASSERT_GT(test, left, right) \
+ KUNIT_BINARY_GT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GT_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_GE() - Assertion that @left is greater than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is greater than the
+ * value that @right evaluates to. This is the same as KUNIT_EXPECT_GE(), except
+ * it causes an assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion
+ * is not met.
+ */
+#define KUNIT_ASSERT_GE(test, left, right) \
+ KUNIT_BINARY_GE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_GE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_STREQ() - An assertion that strings @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is the same as KUNIT_EXPECT_STREQ(), except it causes an
+ * assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_STREQ(test, left, right) \
+ KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_STREQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_ASSERT_TRUE()
+ * for more information.
+ */
+#define KUNIT_ASSERT_STRNEQ(test, left, right) \
+ KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_STRNEQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL() - Assertion that @ptr is not null and not err.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the value that @ptr evaluates to is not null and not
+ * an errno stored in a pointer. This is the same as
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(), except it causes an assertion failure (see
+ * KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_ASSERTION, ptr)
+
+#define KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#endif /* _KUNIT_TEST_H */
diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h
new file mode 100644
index 000000000000..404f336cbdc8
--- /dev/null
+++ b/include/kunit/try-catch.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * An API to allow a function, that may fail, to be executed, and recover in a
+ * controlled manner.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TRY_CATCH_H
+#define _KUNIT_TRY_CATCH_H
+
+#include <linux/types.h>
+
+typedef void (*kunit_try_catch_func_t)(void *);
+
+struct completion;
+struct kunit;
+
+/**
+ * struct kunit_try_catch - provides a generic way to run code which might fail.
+ * @test: The test case that is currently being executed.
+ * @try_completion: Completion that the control thread waits on while test runs.
+ * @try_result: Contains any errno obtained while running test case.
+ * @try: The function, the test case, to attempt to run.
+ * @catch: The function called if @try bails out.
+ * @context: used to pass user data to the try and catch functions.
+ *
+ * kunit_try_catch provides a generic, architecture independent way to execute
+ * an arbitrary function of type kunit_try_catch_func_t which may bail out by
+ * calling kunit_try_catch_throw(). If kunit_try_catch_throw() is called, @try
+ * is stopped at the site of invocation and @catch is called.
+ *
+ * struct kunit_try_catch provides a generic interface for the functionality
+ * needed to implement kunit->abort() which in turn is needed for implementing
+ * assertions. Assertions allow stating a precondition for a test simplifying
+ * how test cases are written and presented.
+ *
+ * Assertions are like expectations, except they abort (call
+ * kunit_try_catch_throw()) when the specified condition is not met. This is
+ * useful when you look at a test case as a logical statement about some piece
+ * of code, where assertions are the premises for the test case, and the
+ * conclusion is a set of predicates, rather expectations, that must all be
+ * true. If your premises are violated, it does not makes sense to continue.
+ */
+struct kunit_try_catch {
+ /* private: internal use only. */
+ struct kunit *test;
+ struct completion *try_completion;
+ int try_result;
+ kunit_try_catch_func_t try;
+ kunit_try_catch_func_t catch;
+ void *context;
+};
+
+void kunit_try_catch_init(struct kunit_try_catch *try_catch,
+ struct kunit *test,
+ kunit_try_catch_func_t try,
+ kunit_try_catch_func_t catch);
+
+void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context);
+
+void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch);
+
+static inline int kunit_try_catch_get_result(struct kunit_try_catch *try_catch)
+{
+ return try_catch->try_result;
+}
+
+/*
+ * Exposed for testing only.
+ */
+void kunit_generic_try_catch_init(struct kunit_try_catch *try_catch);
+
+#endif /* _KUNIT_TRY_CATCH_H */
diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h
new file mode 100644
index 000000000000..0e2509d27910
--- /dev/null
+++ b/include/kvm/arm_hypercalls.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd. */
+
+#ifndef __KVM_ARM_HYPERCALLS_H
+#define __KVM_ARM_HYPERCALLS_H
+
+#include <asm/kvm_emulate.h>
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 0);
+}
+
+static inline unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 1);
+}
+
+static inline unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 2);
+}
+
+static inline unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 3);
+}
+
+static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3)
+{
+ vcpu_set_reg(vcpu, 0, a0);
+ vcpu_set_reg(vcpu, 1, a1);
+ vcpu_set_reg(vcpu, 2, a2);
+ vcpu_set_reg(vcpu, 3, a3);
+}
+
+#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index 632e78bdef4d..5b58bd2fe088 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -40,7 +40,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
}
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
struct kvm_one_reg;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index af4f09c02bf1..9d53f545a3d5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -240,7 +240,7 @@ struct vgic_dist {
* Contains the attributes and gpa of the LPI configuration table.
* Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
* one address across all redistributors.
- * GICv3 spec: 6.1.2 "LPI Configuration tables"
+ * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
*/
u64 propbaser;
@@ -378,8 +378,6 @@ static inline int kvm_vgic_get_max_vcpus(void)
return kvm_vgic_global_state.max_gic_vcpus;
}
-int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-
/**
* kvm_vgic_setup_default_irq_routing:
* Setup a default flat gsi routing table mapping all SPIs
@@ -396,7 +394,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
-void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
-void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
+int vgic_v4_load(struct kvm_vcpu *vcpu);
+int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 8b4e516bac00..0f37a7d5fa77 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -678,6 +678,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+struct acpi_device;
+
+static inline bool
+acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
+{
+ return false;
+}
+
static inline struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 080012a6f025..59494df0f55b 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -45,6 +45,7 @@
#define ARM_SMCCC_OWNER_SIP 2
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
+#define ARM_SMCCC_OWNER_STANDARD_HYP 5
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
@@ -80,6 +81,22 @@
#include <linux/linkage.h>
#include <linux/types.h>
+
+enum arm_smccc_conduit {
+ SMCCC_CONDUIT_NONE,
+ SMCCC_CONDUIT_SMC,
+ SMCCC_CONDUIT_HVC,
+};
+
+/**
+ * arm_smccc_1_1_get_conduit()
+ *
+ * Returns the conduit to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
+ */
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -302,5 +319,63 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define SMCCC_RET_NOT_SUPPORTED -1
#define SMCCC_RET_NOT_REQUIRED -2
+/*
+ * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
+ * Used when the SMCCC conduit is not defined. The empty asm statement
+ * avoids compiler warnings about unused variables.
+ */
+#define __fail_smccc_1_1(...) \
+ do { \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm ("" __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_1_invoke(...) ({ \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_1_hvc(__VA_ARGS__); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_1_smc(__VA_ARGS__); \
+ break; \
+ default: \
+ __fail_smccc_1_1(__VA_ARGS__); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 3305ea7f9dc7..0a241c5c911d 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -5,12 +5,6 @@
#include <uapi/linux/arm_sdei.h>
-enum sdei_conduit_types {
- CONDUIT_INVALID = 0,
- CONDUIT_SMC,
- CONDUIT_HVC,
-};
-
#include <acpi/ghes.h>
#ifdef CONFIG_ARM_SDE_INTERFACE
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index bed9e43f9426..19394c77ed99 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -15,7 +15,9 @@
*/
#include <linux/cgroup.h>
+#include <linux/percpu.h>
#include <linux/percpu_counter.h>
+#include <linux/u64_stats_sync.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
@@ -31,15 +33,12 @@
#ifdef CONFIG_BLK_CGROUP
-enum blkg_rwstat_type {
- BLKG_RWSTAT_READ,
- BLKG_RWSTAT_WRITE,
- BLKG_RWSTAT_SYNC,
- BLKG_RWSTAT_ASYNC,
- BLKG_RWSTAT_DISCARD,
+enum blkg_iostat_type {
+ BLKG_IOSTAT_READ,
+ BLKG_IOSTAT_WRITE,
+ BLKG_IOSTAT_DISCARD,
- BLKG_RWSTAT_NR,
- BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+ BLKG_IOSTAT_NR,
};
struct blkcg_gq;
@@ -61,17 +60,15 @@ struct blkcg {
#endif
};
-/*
- * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
- * recursive. Used to carry stats of dead children.
- */
-struct blkg_rwstat {
- struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
- atomic64_t aux_cnt[BLKG_RWSTAT_NR];
+struct blkg_iostat {
+ u64 bytes[BLKG_IOSTAT_NR];
+ u64 ios[BLKG_IOSTAT_NR];
};
-struct blkg_rwstat_sample {
- u64 cnt[BLKG_RWSTAT_NR];
+struct blkg_iostat_set {
+ struct u64_stats_sync sync;
+ struct blkg_iostat cur;
+ struct blkg_iostat last;
};
/*
@@ -127,8 +124,8 @@ struct blkcg_gq {
/* is this blkg online? protected by both blkcg and q locks */
bool online;
- struct blkg_rwstat stat_bytes;
- struct blkg_rwstat stat_ios;
+ struct blkg_iostat_set __percpu *iostat_cpu;
+ struct blkg_iostat_set iostat;
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
@@ -202,13 +199,6 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
-static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
- unsigned int idx)
-{
- return atomic64_read(&rwstat->aux_cnt[idx]) +
- percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
-}
-
const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
@@ -216,17 +206,6 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
const struct blkcg_policy *pol, int data,
bool show_total);
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat_sample *rwstat);
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off);
-int blkg_print_stat_bytes(struct seq_file *sf, void *v);
-int blkg_print_stat_ios(struct seq_file *sf, void *v);
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-
-void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
- int off, struct blkg_rwstat_sample *sum);
struct blkg_conf_ctx {
struct gendisk *disk;
@@ -578,128 +557,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
-{
- int i, ret;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
- if (ret) {
- while (--i >= 0)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
- return ret;
- }
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
- return 0;
-}
-
-static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_add - add a value to a blkg_rwstat
- * @rwstat: target blkg_rwstat
- * @op: REQ_OP and flags
- * @val: value to add
- *
- * Add @val to @rwstat. The counters are chosen according to @rw. The
- * caller is responsible for synchronizing calls to this function.
- */
-static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- unsigned int op, uint64_t val)
-{
- struct percpu_counter *cnt;
-
- if (op_is_discard(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
- else if (op_is_write(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-
- if (op_is_sync(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_rwstat_read - read the current values of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Read the current snapshot of @rwstat and return it in the aux counts.
- */
-static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
- struct blkg_rwstat_sample *result)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- result->cnt[i] =
- percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_total - read the total count of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Return the total count of @rwstat regardless of the IO direction. This
- * function can be called without synchronization and takes care of u64
- * atomicity.
- */
-static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat_sample tmp = { };
-
- blkg_rwstat_read(rwstat, &tmp);
- return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
-}
-
-/**
- * blkg_rwstat_reset - reset a blkg_rwstat
- * @rwstat: blkg_rwstat to reset
- */
-static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- percpu_counter_set(&rwstat->cpu_cnt[i], 0);
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
-}
-
-/**
- * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
- * @to: the destination blkg_rwstat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
-{
- u64 sum[BLKG_RWSTAT_NR];
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
- &to->aux_cnt[i]);
-}
-
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio);
@@ -745,15 +602,33 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
+ struct blkg_iostat_set *bis;
+ int rwd, cpu;
+
+ if (op_is_discard(bio->bi_opf))
+ rwd = BLKG_IOSTAT_DISCARD;
+ else if (op_is_write(bio->bi_opf))
+ rwd = BLKG_IOSTAT_WRITE;
+ else
+ rwd = BLKG_IOSTAT_READ;
+
+ cpu = get_cpu();
+ bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
+ u64_stats_update_begin(&bis->sync);
+
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
* size of the bio.
*/
if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
+ bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
+ bis->cur.ios[rwd]++;
+
+ u64_stats_update_end(&bis->sync);
+ if (cgroup_subsys_on_dfl(io_cgrp_subsys))
+ cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
+ put_cpu();
}
blkcg_bio_issue_init(bio);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 0bf056de5cc3..11cfd6470b1a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -10,103 +10,239 @@ struct blk_mq_tags;
struct blk_flush_queue;
/**
- * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
+ * block device
*/
struct blk_mq_hw_ctx {
struct {
+ /** @lock: Protects the dispatch list. */
spinlock_t lock;
+ /**
+ * @dispatch: Used for requests that are ready to be
+ * dispatched to the hardware but for some reason (e.g. lack of
+ * resources) could not be sent to the hardware. As soon as the
+ * driver can send new requests, requests at this list will
+ * be sent first for a fairer dispatch.
+ */
struct list_head dispatch;
- unsigned long state; /* BLK_MQ_S_* flags */
+ /**
+ * @state: BLK_MQ_S_* flags. Defines the state of the hw
+ * queue (active, scheduled to restart, stopped).
+ */
+ unsigned long state;
} ____cacheline_aligned_in_smp;
+ /**
+ * @run_work: Used for scheduling a hardware queue run at a later time.
+ */
struct delayed_work run_work;
+ /** @cpumask: Map of available CPUs where this hctx can run. */
cpumask_var_t cpumask;
+ /**
+ * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
+ * selection from @cpumask.
+ */
int next_cpu;
+ /**
+ * @next_cpu_batch: Counter of how many works left in the batch before
+ * changing to the next CPU.
+ */
int next_cpu_batch;
- unsigned long flags; /* BLK_MQ_F_* flags */
+ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
+ unsigned long flags;
+ /**
+ * @sched_data: Pointer owned by the IO scheduler attached to a request
+ * queue. It's up to the IO scheduler how to use this pointer.
+ */
void *sched_data;
+ /**
+ * @queue: Pointer to the request queue that owns this hardware context.
+ */
struct request_queue *queue;
+ /** @fq: Queue of requests that need to perform a flush operation. */
struct blk_flush_queue *fq;
+ /**
+ * @driver_data: Pointer to data owned by the block driver that created
+ * this hctx
+ */
void *driver_data;
+ /**
+ * @ctx_map: Bitmap for each software queue. If bit is on, there is a
+ * pending request in that software queue.
+ */
struct sbitmap ctx_map;
+ /**
+ * @dispatch_from: Software queue to be used when no scheduler was
+ * selected.
+ */
struct blk_mq_ctx *dispatch_from;
+ /**
+ * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
+ * decide if the hw_queue is busy using Exponential Weighted Moving
+ * Average algorithm.
+ */
unsigned int dispatch_busy;
+ /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
unsigned short type;
+ /** @nr_ctx: Number of software queues. */
unsigned short nr_ctx;
+ /** @ctxs: Array of software queues. */
struct blk_mq_ctx **ctxs;
+ /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
spinlock_t dispatch_wait_lock;
+ /**
+ * @dispatch_wait: Waitqueue to put requests when there is no tag
+ * available at the moment, to wait for another try in the future.
+ */
wait_queue_entry_t dispatch_wait;
+
+ /**
+ * @wait_index: Index of next available dispatch_wait queue to insert
+ * requests.
+ */
atomic_t wait_index;
+ /**
+ * @tags: Tags owned by the block driver. A tag at this set is only
+ * assigned when a request is dispatched from a hardware queue.
+ */
struct blk_mq_tags *tags;
+ /**
+ * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
+ * scheduler associated with a request queue, a tag is assigned when
+ * that request is allocated. Else, this member is not used.
+ */
struct blk_mq_tags *sched_tags;
+ /** @queued: Number of queued requests. */
unsigned long queued;
+ /** @run: Number of dispatched requests. */
unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 7
+ /** @dispatched: Number of dispatch requests by queue. */
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+ /** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
+ /** @queue_num: Index of this hardware queue. */
unsigned int queue_num;
+ /**
+ * @nr_active: Number of active requests. Only used when a tag set is
+ * shared across request queues.
+ */
atomic_t nr_active;
+ /** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
+ /** @kobj: Kernel object for sysfs. */
struct kobject kobj;
+ /** @poll_considered: Count times blk_poll() was called. */
unsigned long poll_considered;
+ /** @poll_invoked: Count how many requests blk_poll() polled. */
unsigned long poll_invoked;
+ /** @poll_success: Count how many polled requests were completed. */
unsigned long poll_success;
#ifdef CONFIG_BLK_DEBUG_FS
+ /**
+ * @debugfs_dir: debugfs directory for this hardware queue. Named
+ * as cpu<cpu_number>.
+ */
struct dentry *debugfs_dir;
+ /** @sched_debugfs_dir: debugfs directory for the scheduler. */
struct dentry *sched_debugfs_dir;
#endif
+ /** @hctx_list: List of all hardware queues. */
struct list_head hctx_list;
- /* Must be the last member - see also blk_mq_hw_ctx_size(). */
+ /**
+ * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
+ * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
+ * blk_mq_hw_ctx_size().
+ */
struct srcu_struct srcu[0];
};
+/**
+ * struct blk_mq_queue_map - Map software queues to hardware queues
+ * @mq_map: CPU ID to hardware queue index map. This is an array
+ * with nr_cpu_ids elements. Each element has a value in the range
+ * [@queue_offset, @queue_offset + @nr_queues).
+ * @nr_queues: Number of hardware queues to map CPU IDs onto.
+ * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
+ * driver to map each hardware queue type (enum hctx_type) onto a distinct
+ * set of hardware queues.
+ */
struct blk_mq_queue_map {
unsigned int *mq_map;
unsigned int nr_queues;
unsigned int queue_offset;
};
+/**
+ * enum hctx_type - Type of hardware queue
+ * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
+ * @HCTX_TYPE_READ: Just for READ I/O.
+ * @HCTX_TYPE_POLL: Polled I/O of any kind.
+ * @HCTX_MAX_TYPES: Number of types of hctx.
+ */
enum hctx_type {
- HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */
- HCTX_TYPE_READ, /* just for READ I/O */
- HCTX_TYPE_POLL, /* polled I/O of any kind */
+ HCTX_TYPE_DEFAULT,
+ HCTX_TYPE_READ,
+ HCTX_TYPE_POLL,
HCTX_MAX_TYPES,
};
+/**
+ * struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @map: One or more ctx -> hctx mappings. One map exists for each
+ * hardware queue type (enum hctx_type) that the driver wishes
+ * to support. There are no restrictions on maps being of the
+ * same size, and it's perfectly legal to share maps between
+ * types.
+ * @nr_maps: Number of elements in the @map array. A number in the range
+ * [1, HCTX_MAX_TYPES].
+ * @ops: Pointers to functions that implement block driver behavior.
+ * @nr_hw_queues: Number of hardware queues supported by the block driver that
+ * owns this data structure.
+ * @queue_depth: Number of tags per hardware queue, reserved tags included.
+ * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
+ * allocations.
+ * @cmd_size: Number of additional bytes to allocate per request. The block
+ * driver owns these additional bytes.
+ * @numa_node: NUMA node the storage adapter has been connected to.
+ * @timeout: Request processing timeout in jiffies.
+ * @flags: Zero or more BLK_MQ_F_* flags.
+ * @driver_data: Pointer to data owned by the block driver that created this
+ * tag set.
+ * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
+ * elements.
+ * @tag_list_lock: Serializes tag_list accesses.
+ * @tag_list: List of the request queues that use this tag set. See also
+ * request_queue.tag_set_list.
+ */
struct blk_mq_tag_set {
- /*
- * map[] holds ctx -> hctx mappings, one map exists for each type
- * that the driver wishes to support. There are no restrictions
- * on maps being of the same size, and it's perfectly legal to
- * share maps between types.
- */
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
- unsigned int nr_maps; /* nr entries in map[] */
+ unsigned int nr_maps;
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues; /* nr hw queues across maps */
- unsigned int queue_depth; /* max hw supported */
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth;
unsigned int reserved_tags;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int cmd_size;
int numa_node;
unsigned int timeout;
- unsigned int flags; /* BLK_MQ_F_* */
+ unsigned int flags;
void *driver_data;
struct blk_mq_tags **tags;
@@ -115,6 +251,12 @@ struct blk_mq_tag_set {
struct list_head tag_list;
};
+/**
+ * struct blk_mq_queue_data - Data about a request inserted in a queue
+ *
+ * @rq: Request pointer.
+ * @last: If it is the last request in the queue.
+ */
struct blk_mq_queue_data {
struct request *rq;
bool last;
@@ -142,81 +284,101 @@ typedef bool (busy_fn)(struct request_queue *);
typedef void (complete_fn)(struct request *);
typedef void (cleanup_rq_fn)(struct request *);
-
+/**
+ * struct blk_mq_ops - Callback functions that implements block driver
+ * behaviour.
+ */
struct blk_mq_ops {
- /*
- * Queue request
+ /**
+ * @queue_rq: Queue a new request from block IO.
*/
queue_rq_fn *queue_rq;
- /*
- * If a driver uses bd->last to judge when to submit requests to
- * hardware, it must define this function. In case of errors that
- * make us stop issuing further requests, this hook serves the
+ /**
+ * @commit_rqs: If a driver uses bd->last to judge when to submit
+ * requests to hardware, it must define this function. In case of errors
+ * that make us stop issuing further requests, this hook serves the
* purpose of kicking the hardware (which the last request otherwise
* would have done).
*/
commit_rqs_fn *commit_rqs;
- /*
- * Reserve budget before queue request, once .queue_rq is
+ /**
+ * @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock.
*/
get_budget_fn *get_budget;
+ /**
+ * @put_budget: Release the reserved budget.
+ */
put_budget_fn *put_budget;
- /*
- * Called on request timeout
+ /**
+ * @timeout: Called on request timeout.
*/
timeout_fn *timeout;
- /*
- * Called to poll for completion of a specific tag.
+ /**
+ * @poll: Called to poll for completion of a specific tag.
*/
poll_fn *poll;
+ /**
+ * @complete: Mark the request as complete.
+ */
complete_fn *complete;
- /*
- * Called when the block layer side of a hardware queue has been
- * set up, allowing the driver to allocate/init matching structures.
- * Ditto for exit/teardown.
+ /**
+ * @init_hctx: Called when the block layer side of a hardware queue has
+ * been set up, allowing the driver to allocate/init matching
+ * structures.
*/
init_hctx_fn *init_hctx;
+ /**
+ * @exit_hctx: Ditto for exit/teardown.
+ */
exit_hctx_fn *exit_hctx;
- /*
- * Called for every command allocated by the block layer to allow
- * the driver to set up driver specific data.
+ /**
+ * @init_request: Called for every command allocated by the block layer
+ * to allow the driver to set up driver specific data.
*
* Tag greater than or equal to queue_depth is for setting up
* flush request.
- *
- * Ditto for exit/teardown.
*/
init_request_fn *init_request;
+ /**
+ * @exit_request: Ditto for exit/teardown.
+ */
exit_request_fn *exit_request;
- /* Called from inside blk_get_request() */
+
+ /**
+ * @initialize_rq_fn: Called from inside blk_get_request().
+ */
void (*initialize_rq_fn)(struct request *rq);
- /*
- * Called before freeing one request which isn't completed yet,
- * and usually for freeing the driver private data
+ /**
+ * @cleanup_rq: Called before freeing one request which isn't completed
+ * yet, and usually for freeing the driver private data.
*/
cleanup_rq_fn *cleanup_rq;
- /*
- * If set, returns whether or not this queue currently is busy
+ /**
+ * @busy: If set, returns whether or not this queue currently is busy.
*/
busy_fn *busy;
+ /**
+ * @map_queues: This allows drivers specify their own queue mapping by
+ * overriding the setup-time function that builds the mq_map.
+ */
map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS
- /*
- * Used by the debugfs implementation to show driver-specific
+ /**
+ * @show_rq: Used by the debugfs implementation to show driver-specific
* information about a request.
*/
void (*show_rq)(struct seq_file *m, struct request *rq);
@@ -262,7 +424,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -301,9 +462,25 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->state);
+}
+
+static inline int blk_mq_request_started(struct request *rq)
+{
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
+}
+
+static inline int blk_mq_request_completed(struct request *rq)
+{
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
+}
-int blk_mq_request_started(struct request *rq);
-int blk_mq_request_completed(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
@@ -324,7 +501,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
@@ -343,14 +520,29 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
-/*
+/**
+ * blk_mq_rq_from_pdu - cast a PDU to a request
+ * @pdu: the PDU (Protocol Data Unit) to be casted
+ *
+ * Return: request
+ *
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request, add request size to get the PDU.
+ * size to get back to the original request.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
return pdu - sizeof(struct request);
}
+
+/**
+ * blk_mq_rq_to_pdu - cast a request to a PDU
+ * @rq: the request to be casted
+ *
+ * Return: pointer to the PDU
+ *
+ * Driver command data is immediately after the request. So add request to get
+ * the PDU.
+ */
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return rq + 1;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d688b96d1d63..70254ae11769 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -153,10 +153,10 @@ struct bio {
unsigned short bi_write_hint;
blk_status_t bi_status;
u8 bi_partno;
+ atomic_t __bi_remaining;
struct bvec_iter bi_iter;
- atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
void *bi_private;
@@ -290,6 +290,12 @@ enum req_opf {
REQ_OP_ZONE_RESET_ALL = 8,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 9,
+ /* Open a zone */
+ REQ_OP_ZONE_OPEN = 10,
+ /* Close a zone */
+ REQ_OP_ZONE_CLOSE = 11,
+ /* Transition a zone to full */
+ REQ_OP_ZONE_FINISH = 12,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
@@ -371,6 +377,7 @@ enum stat_group {
STAT_READ,
STAT_WRITE,
STAT_DISCARD,
+ STAT_FLUSH,
NR_STAT_GROUPS
};
@@ -417,6 +424,25 @@ static inline bool op_is_discard(unsigned int op)
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
+/*
+ * Check if a bio or request operation is a zone management operation, with
+ * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
+ * due to its different handling in the block layer and device response in
+ * case of command failure.
+ */
+static inline bool op_is_zone_mgmt(enum req_opf op)
+{
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int op_stat_group(unsigned int op)
{
if (op_is_discard(op))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f3ea78b0c91c..397bb9bc230b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -349,25 +349,25 @@ struct queue_limits {
enum blk_zoned_model zoned;
};
+typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
+ void *data);
+
#ifdef CONFIG_BLK_DEV_ZONED
-/*
- * Maximum number of zones to report with a single report zones command.
- */
-#define BLK_ZONED_REPORT_MAX_ZONES 8192U
+#define BLK_ALL_ZONES ((unsigned int)-1)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
-extern int blkdev_report_zones(struct block_device *bdev,
- sector_t sector, struct blk_zone *zones,
- unsigned int *nr_zones);
-extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
- sector_t nr_sectors, gfp_t gfp_mask);
+extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+ sector_t sectors, sector_t nr_sectors,
+ gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
-extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
+extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -388,9 +388,9 @@ static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
return -ENOTTY;
}
-static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
+static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
+ fmode_t mode, unsigned int cmd,
+ unsigned long arg)
{
return -ENOTTY;
}
@@ -411,7 +411,6 @@ struct request_queue {
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
- unsigned int nr_queues;
unsigned int queue_depth;
@@ -1709,7 +1708,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+ unsigned int nr_zones, report_zones_cb cb, void *data);
struct module *owner;
const struct pr_ops *pr_ops;
};
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3bf3835d0e86..35903f148be5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -12,17 +12,23 @@
#include <linux/err.h>
#include <linux/rbtree_latch.h>
#include <linux/numa.h>
+#include <linux/mm_types.h>
#include <linux/wait.h>
#include <linux/u64_stats_sync.h>
+#include <linux/refcount.h>
+#include <linux/mutex.h>
struct bpf_verifier_env;
+struct bpf_verifier_log;
struct perf_event;
struct bpf_prog;
+struct bpf_prog_aux;
struct bpf_map;
struct sock;
struct seq_file;
struct btf;
struct btf_type;
+struct exception_table_entry;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -59,11 +65,18 @@ struct bpf_map_ops {
const struct btf_type *key_type,
const struct btf_type *value_type);
+ /* Prog poke tracking helpers. */
+ int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
+ struct bpf_prog *new);
+
/* Direct value access helpers. */
int (*map_direct_value_addr)(const struct bpf_map *map,
u64 *imm, u32 off);
int (*map_direct_value_meta)(const struct bpf_map *map,
u64 imm, u32 *off);
+ int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
};
struct bpf_map_memory {
@@ -92,17 +105,19 @@ struct bpf_map {
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
+ char name[BPF_OBJ_NAME_LEN];
bool unpriv_array;
- bool frozen; /* write-once */
- /* 48 bytes hole */
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ /* 22 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
- atomic_t refcnt ____cacheline_aligned;
- atomic_t usercnt;
+ atomic64_t refcnt ____cacheline_aligned;
+ atomic64_t usercnt;
struct work_struct work;
- char name[BPF_OBJ_NAME_LEN];
+ struct mutex freeze_mutex;
+ u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
@@ -211,6 +226,7 @@ enum bpf_arg_type {
ARG_PTR_TO_INT, /* pointer to int */
ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
+ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
};
/* type of values returned from helper functions */
@@ -233,11 +249,17 @@ struct bpf_func_proto {
bool gpl_only;
bool pkt_access;
enum bpf_return_type ret_type;
- enum bpf_arg_type arg1_type;
- enum bpf_arg_type arg2_type;
- enum bpf_arg_type arg3_type;
- enum bpf_arg_type arg4_type;
- enum bpf_arg_type arg5_type;
+ union {
+ struct {
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+ };
+ enum bpf_arg_type arg_type[5];
+ };
+ int *btf_id; /* BTF ids of arguments */
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -281,6 +303,7 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
+ PTR_TO_BTF_ID, /* reg points to kernel struct */
};
/* The information passed from prog-specific *_is_valid_access
@@ -288,7 +311,11 @@ enum bpf_reg_type {
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
- int ctx_field_size;
+ union {
+ int ctx_field_size;
+ u32 btf_id;
+ };
+ struct bpf_verifier_log *log; /* for verbose logs */
};
static inline void
@@ -359,14 +386,135 @@ enum bpf_cgroup_storage_type {
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+/* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+#define MAX_BPF_FUNC_ARGS 12
+
struct bpf_prog_stats {
u64 cnt;
u64 nsecs;
struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
+struct btf_func_model {
+ u8 ret_size;
+ u8 nr_args;
+ u8 arg_size[MAX_BPF_FUNC_ARGS];
+};
+
+/* Restore arguments before returning from trampoline to let original function
+ * continue executing. This flag is used for fentry progs when there are no
+ * fexit progs.
+ */
+#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
+/* Call original function after fentry progs, but before fexit progs.
+ * Makes sense for fentry/fexit, normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_CALL_ORIG BIT(1)
+/* Skip current frame and return to parent. Makes sense for fentry/fexit
+ * programs only. Should not be used with normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+
+/* Different use cases for BPF trampoline:
+ * 1. replace nop at the function entry (kprobe equivalent)
+ * flags = BPF_TRAMP_F_RESTORE_REGS
+ * fentry = a set of programs to run before returning from trampoline
+ *
+ * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
+ * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
+ * orig_call = fentry_ip + MCOUNT_INSN_SIZE
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ *
+ * 3. replace direct call instruction anywhere in the function body
+ * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
+ * With flags = 0
+ * fentry = a set of programs to run before returning from trampoline
+ * With flags = BPF_TRAMP_F_CALL_ORIG
+ * orig_call = original callback addr or direct function addr
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ */
+int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
+ struct bpf_prog **fentry_progs, int fentry_cnt,
+ struct bpf_prog **fexit_progs, int fexit_cnt,
+ void *orig_call);
+/* these two functions are called from generated trampoline */
+u64 notrace __bpf_prog_enter(void);
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+
+enum bpf_tramp_prog_type {
+ BPF_TRAMP_FENTRY,
+ BPF_TRAMP_FEXIT,
+ BPF_TRAMP_MAX
+};
+
+struct bpf_trampoline {
+ /* hlist for trampoline_table */
+ struct hlist_node hlist;
+ /* serializes access to fields of this trampoline */
+ struct mutex mutex;
+ refcount_t refcnt;
+ u64 key;
+ struct {
+ struct btf_func_model model;
+ void *addr;
+ } func;
+ /* list of BPF programs using this trampoline */
+ struct hlist_head progs_hlist[BPF_TRAMP_MAX];
+ /* Number of attached programs. A counter per kind. */
+ int progs_cnt[BPF_TRAMP_MAX];
+ /* Executable image of trampoline */
+ void *image;
+ u64 selector;
+};
+#ifdef CONFIG_BPF_JIT
+struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
+int bpf_trampoline_link_prog(struct bpf_prog *prog);
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
+void bpf_trampoline_put(struct bpf_trampoline *tr);
+#else
+static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+{
+ return NULL;
+}
+static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
+{
+ return -ENOTSUPP;
+}
+static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+{
+ return -ENOTSUPP;
+}
+static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+#endif
+
+struct bpf_func_info_aux {
+ bool unreliable;
+};
+
+enum bpf_jit_poke_reason {
+ BPF_POKE_REASON_TAIL_CALL,
+};
+
+/* Descriptor of pokes pointing /into/ the JITed image. */
+struct bpf_jit_poke_descriptor {
+ void *ip;
+ union {
+ struct {
+ struct bpf_map *map;
+ u32 key;
+ } tail_call;
+ };
+ bool ip_stable;
+ u8 adj_off;
+ u16 reason;
};
struct bpf_prog_aux {
- atomic_t refcnt;
+ atomic64_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
@@ -375,10 +523,23 @@ struct bpf_prog_aux {
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
+ u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ struct bpf_prog *linked_prog;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
bool offload_requested;
+ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool func_proto_unreliable;
+ enum bpf_tramp_prog_type trampoline_prog_type;
+ struct bpf_trampoline *trampoline;
+ struct hlist_node tramp_hlist;
+ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
+ const struct btf_type *attach_func_proto;
+ /* function name for valid attach_btf_id */
+ const char *attach_func_name;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
+ struct bpf_jit_poke_descriptor *poke_tab;
+ u32 size_poke_tab;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_prog_ops *ops;
@@ -394,6 +555,7 @@ struct bpf_prog_aux {
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
+ struct bpf_func_info_aux *func_info_aux;
/* bpf_line_info loaded from userspace. linfo->insn_off
* has the xlated insn offset.
* Both the main and sub prog share the same linfo.
@@ -416,6 +578,8 @@ struct bpf_prog_aux {
* main prog always has linfo_idx == 0
*/
u32 linfo_idx;
+ u32 num_exentries;
+ struct exception_table_entry *extable;
struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
@@ -423,17 +587,26 @@ struct bpf_prog_aux {
};
};
+struct bpf_array_aux {
+ /* 'Ownership' of prog array is claimed by the first program that
+ * is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have
+ * the same prog type and JITed flag.
+ */
+ enum bpf_prog_type type;
+ bool jited;
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+ struct mutex poke_mutex;
+ struct work_struct work;
+};
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
u32 index_mask;
- /* 'ownership' of prog_array is claimed by the first program that
- * is going to use this map or by the first program which FD is stored
- * in the map to make sure that all callers and callees have the same
- * prog_type and JITed flag
- */
- enum bpf_prog_type owner_prog_type;
- bool owner_jited;
+ struct bpf_array_aux *aux;
union {
char value[0] __aligned(8);
void *ptrs[0] __aligned(8);
@@ -482,6 +655,7 @@ struct bpf_event_entry {
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
+const char *kernel_type_name(u32 btf_type_id);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -620,7 +794,7 @@ DECLARE_PER_CPU(int, bpf_prog_active);
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
-#define BPF_PROG_TYPE(_id, _name) \
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
@@ -636,9 +810,9 @@ extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
bool attach_drv);
-struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
+void bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
-struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
+void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
@@ -649,9 +823,9 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
-struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map,
- bool uref);
+void bpf_map_inc(struct bpf_map *map);
+void bpf_map_inc_with_uref(struct bpf_map *map);
+struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
@@ -661,6 +835,7 @@ void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src);
void *bpf_map_area_alloc(u64 size, int numa_node);
+void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -747,6 +922,24 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info);
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id);
+int btf_resolve_helper_id(struct bpf_verifier_log *log,
+ const struct bpf_func_proto *fn, int);
+
+int btf_distill_func_proto(struct bpf_verifier_log *log,
+ struct btf *btf,
+ const struct btf_type *func_proto,
+ const char *func_name,
+ struct btf_func_model *m);
+
+int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
+
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -760,10 +953,8 @@ static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
- int i)
+static inline void bpf_prog_add(struct bpf_prog *prog, int i)
{
- return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
@@ -774,9 +965,8 @@ static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
+static inline void bpf_prog_inc(struct bpf_prog *prog)
{
- return ERR_PTR(-EOPNOTSUPP);
}
static inline struct bpf_prog *__must_check
@@ -877,6 +1067,10 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
{
return -ENOTSUPP;
}
+
+static inline void bpf_map_put(struct bpf_map *map)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -972,31 +1166,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
}
#endif
-#if defined(CONFIG_XDP_SOCKETS)
-struct xdp_sock;
-struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs);
-void __xsk_map_flush(struct bpf_map *map);
-#else
-struct xdp_sock;
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
-{
- return NULL;
-}
-
-static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void __xsk_map_flush(struct bpf_map *map)
-{
-}
-#endif
-
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
@@ -1095,6 +1264,15 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
#endif
#ifdef CONFIG_INET
+struct sk_reuseport_kern {
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct sock *selected_sk;
+ void *data_end;
+ u32 hash;
+ u32 reuseport_id;
+ bool bind_inany;
+};
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
@@ -1145,4 +1323,12 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
}
#endif /* CONFIG_INET */
+enum bpf_text_poke_type {
+ BPF_MOD_CALL,
+ BPF_MOD_JUMP,
+};
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2);
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 36a9c2325176..93740b3614d7 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -2,41 +2,68 @@
/* internal file - do not include directly */
#ifdef CONFIG_NET
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
-BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp,
+ struct xdp_md, struct xdp_buff)
#ifdef CONFIG_CGROUP_BPF
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock,
+ struct bpf_sock, struct sock)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr,
+ struct bpf_sock_addr, struct bpf_sock_addr_kern)
#endif
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
-BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops,
+ struct bpf_sock_ops, struct bpf_sock_ops_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg,
+ struct sk_msg_md, struct sk_msg)
+BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector,
+ struct __sk_buff, struct bpf_flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
-BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
-BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
-BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
-BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
-BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe,
+ bpf_user_pt_regs_t, struct pt_regs)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint,
+ __u64, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event,
+ struct bpf_perf_event_data, struct bpf_perf_event_data_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing,
+ void *, void *)
#endif
#ifdef CONFIG_CGROUP_BPF
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev,
+ struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl,
+ struct bpf_sysctl, struct bpf_sysctl_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt,
+ struct bpf_sockopt, struct bpf_sockopt_kern)
#endif
#ifdef CONFIG_BPF_LIRC_MODE2
-BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
+ __u32, u32)
#endif
#ifdef CONFIG_INET
-BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
+ struct sk_reuseport_md, struct sk_reuseport_kern)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 26a6d58ca78c..26e40de9ef55 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -52,6 +52,8 @@ struct bpf_reg_state {
*/
struct bpf_map *map_ptr;
+ u32 btf_id; /* for PTR_TO_BTF_ID */
+
/* Max size from any of the above. */
unsigned long raw;
};
@@ -291,7 +293,7 @@ struct bpf_verifier_state_list {
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- unsigned long map_state; /* pointer/poison value for maps */
+ unsigned long map_ptr_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
struct {
@@ -299,6 +301,7 @@ struct bpf_insn_aux_data {
u32 map_off; /* offset from value base address */
};
};
+ u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
@@ -330,15 +333,18 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
#define BPF_LOG_STATS 4
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
+#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
- return log->level && log->ubuf && !bpf_verifier_log_full(log);
+ return (log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
+ log->level == BPF_LOG_KERNEL;
}
#define BPF_MAX_SUBPROGS 256
struct bpf_subprog_info {
+ /* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
@@ -397,6 +403,8 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
+__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+ const char *fmt, ...);
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 6db2d9a6e503..b475e7f20d28 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -200,9 +200,15 @@
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
-#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
-#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+/* 10011: SerDes 100-FX Control Register */
+#define BCM54616S_SHD_100FX_CTRL 0x13
+#define BCM54616S_100FX_MODE BIT(0) /* 100-FX SerDes Enable */
+
+/* 11111: Mode Control Register */
+#define BCM54XX_SHD_MODE 0x1f
+#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
* EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 64cdf2a23d42..79d4abc2556a 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,6 +5,7 @@
#define _LINUX_BTF_H 1
#include <linux/types.h>
+#include <uapi/linux/btf.h>
struct btf;
struct btf_member;
@@ -53,9 +54,41 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
+static inline bool btf_type_is_ptr(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
+}
+
+static inline bool btf_type_is_int(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
+}
+
+static inline bool btf_type_is_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_type_is_typedef(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
+}
+
+static inline bool btf_type_is_func(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
+}
+
+static inline bool btf_type_is_func_proto(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
+}
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+struct btf *btf_parse_vmlinux(void);
+struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 8339071ab08b..e20a0cd09ba5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -65,5 +65,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
+void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
deleted file mode 100644
index 9e5ac27fb6c1..000000000000
--- a/include/linux/can/platform/mcp251x.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CAN_PLATFORM_MCP251X_H
-#define _CAN_PLATFORM_MCP251X_H
-
-/*
- *
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- */
-
-#include <linux/spi/spi.h>
-
-/*
- * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
- * @oscillator_frequency: - oscillator frequency in Hz
- */
-
-struct mcp251x_platform_data {
- unsigned long oscillator_frequency;
-};
-
-#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 01219f2902bf..1b78a0cfb615 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -15,9 +15,9 @@
struct can_rx_offload {
struct net_device *dev;
- unsigned int (*mailbox_read)(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mb);
+ struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop);
struct sk_buff_head skb_queue;
u32 skb_queue_len_max;
@@ -44,7 +44,6 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
-void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 430e219e3aba..63097cb243cb 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -355,16 +355,6 @@ struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
/*
- * idr allocated in-hierarchy ID.
- *
- * ID 0 is not used, the ID of the root cgroup is always 1, and a
- * new cgroup will be assigned with a smallest available ID.
- *
- * Allocating/Removing ID must be protected by cgroup_mutex.
- */
- int id;
-
- /*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
* ancestor_ids[] can determine whether a given cgroup is a
@@ -458,7 +448,7 @@ struct cgroup {
struct list_head rstat_css_list;
/* cgroup basic resource statistics */
- struct cgroup_base_stat pending_bstat; /* pending from children */
+ struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
struct prev_cputime prev_cputime; /* for printing out cputime */
@@ -488,7 +478,7 @@ struct cgroup {
struct cgroup_freezer_state freezer;
/* ids of the ancestors at each level including self */
- int ancestor_ids[];
+ u64 ancestor_ids[];
};
/*
@@ -509,7 +499,7 @@ struct cgroup_root {
struct cgroup cgrp;
/* for cgrp->ancestor_ids[0] */
- int cgrp_ancestor_id_storage;
+ u64 cgrp_ancestor_id_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -520,9 +510,6 @@ struct cgroup_root {
/* Hierarchy-specific flags */
unsigned int flags;
- /* IDs for cgroups in this hierarchy */
- struct idr cgroup_idr;
-
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3ba3e6da13a6..d7ddebd0cdec 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -150,7 +150,6 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
-void cgroup_enable_task_cg_lists(void);
void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
@@ -305,6 +304,11 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
/**
* css_get - obtain a reference on the specified css
* @css: target css
@@ -566,7 +570,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
+ return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
}
/**
@@ -617,7 +621,7 @@ static inline bool cgroup_is_populated(struct cgroup *cgrp)
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- return cgrp->kn->id.ino;
+ return kernfs_ino(cgrp->kn);
}
/* cft/css accessors for cftype->write() operation */
@@ -688,18 +692,13 @@ static inline void cgroup_kthread_ready(void)
current->no_cgroup_migration = 0;
}
-static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
-{
- return &cgrp->kn->id;
-}
-
-void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
- char *buf, size_t buflen);
+void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
+static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -719,10 +718,6 @@ static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
-static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
-{
- return NULL;
-}
static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
@@ -740,8 +735,8 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
return true;
}
-static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
- char *buf, size_t buflen) {}
+static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
+{}
#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 16dafd9f4b86..c4c389c7e1b4 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -410,8 +410,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-extern void compat_exit_robust_list(struct task_struct *curr);
-
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d05609ad329d..64ec82851aa3 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -22,26 +22,26 @@ extern void context_tracking_user_exit(void);
static inline void user_enter(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
context_tracking_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
context_tracking_exit(CONTEXT_USER);
}
/* Called with interrupts disabled. */
static inline void user_enter_irqoff(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_USER);
}
static inline void user_exit_irqoff(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_USER);
}
@@ -49,7 +49,7 @@ static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_is_enabled())
+ if (!context_tracking_enabled())
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
@@ -61,7 +61,7 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_is_enabled()) {
+ if (context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_enter(prev_ctx);
}
@@ -77,7 +77,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
*/
static inline enum ctx_state ct_state(void)
{
- return context_tracking_is_enabled() ?
+ return context_tracking_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
}
#else
@@ -90,7 +90,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
#endif /* !CONFIG_CONTEXT_TRACKING */
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);
@@ -103,12 +103,12 @@ static inline void context_tracking_init(void) { }
/* must be called with irqs disabled */
static inline void guest_enter_irqoff(void)
{
- if (vtime_accounting_cpu_enabled())
+ if (vtime_accounting_enabled_this_cpu())
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_GUEST);
/* KVM does not hold any references to rcu protected data when it
@@ -118,16 +118,16 @@ static inline void guest_enter_irqoff(void)
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
*/
- if (!context_tracking_cpu_is_enabled())
+ if (!context_tracking_enabled_this_cpu())
rcu_virt_note_context_switch(smp_processor_id());
}
static inline void guest_exit_irqoff(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_GUEST);
- if (vtime_accounting_cpu_enabled())
+ if (vtime_accounting_enabled_this_cpu())
vtime_guest_exit(current);
else
current->flags &= ~PF_VCPU;
@@ -141,7 +141,7 @@ static inline void guest_enter_irqoff(void)
* to assume that it's the stime pending cputime
* to flush.
*/
- vtime_account_system(current);
+ vtime_account_kernel(current);
current->flags |= PF_VCPU;
rcu_virt_note_context_switch(smp_processor_id());
}
@@ -149,7 +149,7 @@ static inline void guest_enter_irqoff(void)
static inline void guest_exit_irqoff(void)
{
/* Flush the guest cputime we spent on the guest */
- vtime_account_system(current);
+ vtime_account_kernel(current);
current->flags &= ~PF_VCPU;
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index f128dc3be0df..e7fe6678b7ad 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -23,17 +23,22 @@ struct context_tracking {
};
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_enabled;
+extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_is_enabled(void)
+static inline bool context_tracking_enabled(void)
{
- return static_branch_unlikely(&context_tracking_enabled);
+ return static_branch_unlikely(&context_tracking_key);
}
-static inline bool context_tracking_cpu_is_enabled(void)
+static inline bool context_tracking_enabled_cpu(int cpu)
{
- return __this_cpu_read(context_tracking.active);
+ return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
+}
+
+static inline bool context_tracking_enabled_this_cpu(void)
+{
+ return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
static inline bool context_tracking_in_user(void)
@@ -42,9 +47,9 @@ static inline bool context_tracking_in_user(void)
}
#else
static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_active(void) { return false; }
-static inline bool context_tracking_is_enabled(void) { return false; }
-static inline bool context_tracking_cpu_is_enabled(void) { return false; }
+static inline bool context_tracking_enabled(void) { return false; }
+static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static inline bool context_tracking_enabled_this_cpu(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a2b68823717b..44e552de419c 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -285,6 +285,8 @@ extern void coresight_disclaim_device(void __iomem *base);
extern void coresight_disclaim_device_unlocked(void __iomem *base);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
+
+extern bool coresight_loses_context_with_cpu(struct device *dev);
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -307,6 +309,10 @@ static inline int coresight_claim_device(void __iomem *base)
static inline void coresight_disclaim_device(void __iomem *base) {}
static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+static inline bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return false;
+}
#endif
extern int coresight_get_cpu(struct device *dev);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d0633ebdaa9c..1ca2baf817ed 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,6 +59,11 @@ extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -179,7 +184,12 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_us);
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
+
+static inline void play_idle(unsigned long duration_us)
+{
+ play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+}
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
@@ -213,28 +223,7 @@ static inline int cpuhp_smt_enable(void) { return 0; }
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
-/*
- * These are used for a global "mitigations=" cmdline option for toggling
- * optional CPU mitigations.
- */
-enum cpu_mitigations {
- CPU_MITIGATIONS_OFF,
- CPU_MITIGATIONS_AUTO,
- CPU_MITIGATIONS_AUTO_NOSMT,
-};
-
-extern enum cpu_mitigations cpu_mitigations;
-
-/* mitigations=off */
-static inline bool cpu_mitigations_off(void)
-{
- return cpu_mitigations == CPU_MITIGATIONS_OFF;
-}
-
-/* mitigations=auto,nosmt */
-static inline bool cpu_mitigations_auto_nosmt(void)
-{
- return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
-}
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 068793a619ca..e51ee772b9f5 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -129,6 +129,7 @@ enum cpuhp_state {
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,
@@ -136,6 +137,7 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4b6b5bea8f79..2dbe46b7c213 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -29,10 +29,13 @@ struct cpuidle_driver;
* CPUIDLE DEVICE INTERFACE *
****************************/
+#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
+
struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
- unsigned long long time; /* in US */
+ u64 time_ns;
unsigned long long above; /* Number of times it's been too deep */
unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND
@@ -45,6 +48,8 @@ struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
+ u64 exit_latency_ns;
+ u64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
@@ -80,14 +85,14 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
- unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1;
unsigned int cpu;
ktime_t next_hrtimer;
int last_state_idx;
- int last_residency;
+ u64 last_residency_ns;
u64 poll_limit_ns;
+ u64 forced_idle_latency_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
@@ -144,6 +149,8 @@ extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern struct cpuidle_driver *cpuidle_get_driver(void);
extern struct cpuidle_driver *cpuidle_driver_ref(void);
extern void cpuidle_driver_unref(void);
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -181,6 +188,8 @@ static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
+ int idx, bool disable) { }
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
@@ -204,18 +213,20 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
}
#endif
@@ -260,7 +271,7 @@ struct cpuidle_governor {
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern int cpuidle_governor_latency_req(unsigned int cpu);
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#else
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 19ea3a371d7b..23365a9d062e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -41,8 +41,6 @@
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
-#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
-#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
@@ -55,7 +53,6 @@
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -139,9 +136,7 @@
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
struct scatterlist;
-struct crypto_ablkcipher;
struct crypto_async_request;
-struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
@@ -163,25 +158,6 @@ struct crypto_async_request {
u32 flags;
};
-struct ablkcipher_request {
- struct crypto_async_request base;
-
- unsigned int nbytes;
-
- void *info;
-
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct blkcipher_desc {
- struct crypto_blkcipher *tfm;
- void *info;
- u32 flags;
-};
-
/**
* DOC: Block Cipher Algorithm Definitions
*
@@ -190,83 +166,6 @@ struct blkcipher_desc {
*/
/**
- * struct ablkcipher_alg - asynchronous block cipher definition
- * @min_keysize: Minimum key size supported by the transformation. This is the
- * smallest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MIN_KEY_SIZE" include/crypto/
- * @max_keysize: Maximum key size supported by the transformation. This is the
- * largest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MAX_KEY_SIZE" include/crypto/
- * @setkey: Set key for the transformation. This function is used to either
- * program a supplied key into the hardware or store the key in the
- * transformation context for programming it later. Note that this
- * function does modify the transformation context. This function can
- * be called multiple times during the existence of the transformation
- * object, so one must make sure the key is properly reprogrammed into
- * the hardware. This function is also responsible for checking the key
- * length for validity. In case a software fallback was put in place in
- * the @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes.
- * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
- * the supplied scatterlist containing the blocks of data. The crypto
- * API consumer is responsible for aligning the entries of the
- * scatterlist properly and making sure the chunks are correctly
- * sized. In case a software fallback was put in place in the
- * @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes. In case the
- * key was stored in transformation context, the key might need to be
- * re-programmed into the hardware in this function. This function
- * shall not modify the transformation context, as this function may
- * be called in parallel with the same transformation object.
- * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
- * and the conditions are exactly the same.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- *
- * All fields except @ivsize are mandatory and must be filled.
- */
-struct ablkcipher_alg {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
- * struct blkcipher_alg - synchronous block cipher definition
- * @min_keysize: see struct ablkcipher_alg
- * @max_keysize: see struct ablkcipher_alg
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @ivsize are mandatory and must be filled.
- */
-struct blkcipher_alg {
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
* struct cipher_alg - single-block symmetric ciphers definition
* @cia_min_keysize: Minimum key size supported by the transformation. This is
* the smallest key length supported by this transformation
@@ -450,8 +349,6 @@ struct crypto_istat_rng {
};
#endif /* CONFIG_CRYPTO_STATS */
-#define cra_ablkcipher cra_u.ablkcipher
-#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -499,9 +396,8 @@ struct crypto_istat_rng {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * transformation types. There are multiple options:
- * &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_rng_type.
+ * transformation types. There are multiple options, such as
+ * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -520,10 +416,6 @@ struct crypto_istat_rng {
* @cra_exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @cra_init, used to remove various changes set in
* @cra_init.
- * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
- * definition. See @struct @ablkcipher_alg.
- * @cra_u.blkcipher: Union member which contains a synchronous block cipher
- * definition See @struct @blkcipher_alg.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
* @cra_u.compress: Union member which contains a (de)compression algorithm.
@@ -565,8 +457,6 @@ struct crypto_alg {
const struct crypto_type *cra_type;
union {
- struct ablkcipher_alg ablkcipher;
- struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
} cra_u;
@@ -594,8 +484,6 @@ struct crypto_alg {
#ifdef CONFIG_CRYPTO_STATS
void crypto_stats_init(struct crypto_alg *alg);
void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
@@ -618,10 +506,6 @@ static inline void crypto_stats_init(struct crypto_alg *alg)
{}
static inline void crypto_stats_get(struct crypto_alg *alg)
{}
-static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
{}
static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
@@ -715,28 +599,6 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
* crypto_free_*(), as well as the various helpers below.
*/
-struct ablkcipher_tfm {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- struct crypto_ablkcipher *base;
-
- unsigned int ivsize;
- unsigned int reqsize;
-};
-
-struct blkcipher_tfm {
- void *iv;
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-};
-
struct cipher_tfm {
int (*cit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen);
@@ -753,8 +615,6 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
-#define crt_ablkcipher crt_u.ablkcipher
-#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_compress crt_u.compress
@@ -763,8 +623,6 @@ struct crypto_tfm {
u32 crt_flags;
union {
- struct ablkcipher_tfm ablkcipher;
- struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct compress_tfm compress;
} crt_u;
@@ -776,14 +634,6 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_ablkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_blkcipher {
- struct crypto_tfm base;
-};
-
struct crypto_cipher {
struct crypto_tfm base;
};
@@ -891,713 +741,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-/*
- * API wrappers.
- */
-static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_ablkcipher *)tfm;
-}
-
-static inline u32 crypto_skcipher_type(u32 type)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- return type;
-}
-
-static inline u32 crypto_skcipher_mask(u32 mask)
-{
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- return mask;
-}
-
-/**
- * DOC: Asynchronous Block Cipher API
- *
- * Asynchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
- *
- * Asynchronous cipher operations imply that the function invocation for a
- * cipher request returns immediately before the completion of the operation.
- * The cipher request is scheduled as a separate kernel thread and therefore
- * load-balanced on the different CPUs via the process scheduler. To allow
- * the kernel crypto API to inform the caller about the completion of a cipher
- * request, the caller must provide a callback function. That function is
- * invoked with the cipher handle when the request completes.
- *
- * To support the asynchronous operation, additional information than just the
- * cipher handle must be supplied to the kernel crypto API. That additional
- * information is given by filling in the ablkcipher_request data structure.
- *
- * For the asynchronous block cipher API, the state is maintained with the tfm
- * cipher handle. A single tfm can be used across multiple calls and in
- * parallel. For asynchronous block cipher calls, context data supplied and
- * only used by the caller can be referenced the request data structure in
- * addition to the IV used for the cipher request. The maintenance of such
- * state information would be important for a crypto driver implementer to
- * have, because when calling the callback function upon completion of the
- * cipher operation, that callback function may need some information about
- * which operation just finished if it invoked multiple in parallel. This
- * state information is unused by the kernel crypto API.
- */
-
-static inline struct crypto_tfm *crypto_ablkcipher_tfm(
- struct crypto_ablkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_ablkcipher() - zeroize and free cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
-{
- crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * ablkcipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the ablkcipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
- u32 mask)
-{
- return crypto_has_alg(alg_name, crypto_skcipher_type(type),
- crypto_skcipher_mask(mask));
-}
-
-static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
- struct crypto_ablkcipher *tfm)
-{
- return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
-}
-
-/**
- * crypto_ablkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the ablkcipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_ablkcipher_ivsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_ablkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the ablkcipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_ablkcipher_blocksize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_ablkcipher_alignmask(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_ablkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the ablkcipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
- * @req: ablkcipher_request out of which the cipher handle is to be obtained
- *
- * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
- * data structure.
- *
- * Return: crypto_ablkcipher handle
- */
-static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
- struct ablkcipher_request *req)
-{
- return __crypto_ablkcipher_cast(req->base.tfm);
-}
-
-/**
- * crypto_ablkcipher_encrypt() - encrypt plaintext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- struct crypto_alg *alg = crt->base->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crt->encrypt(req);
- crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
- return ret;
-}
-
-/**
- * crypto_ablkcipher_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- struct crypto_alg *alg = crt->base->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crt->decrypt(req);
- crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
- return ret;
-}
-
-/**
- * DOC: Asynchronous Cipher Request Handle
- *
- * The ablkcipher_request data structure contains all pointers to data
- * required for the asynchronous cipher operation. This includes the cipher
- * handle (which can be used by multiple ablkcipher_request instances), pointer
- * to plaintext and ciphertext, asynchronous callback function, etc. It acts
- * as a handle to the ablkcipher_request_* API calls in a similar way as
- * ablkcipher handle to the crypto_ablkcipher_* API calls.
- */
-
-/**
- * crypto_ablkcipher_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_ablkcipher_reqsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->reqsize;
-}
-
-/**
- * ablkcipher_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing ablkcipher handle in the request
- * data structure with a different one.
- */
-static inline void ablkcipher_request_set_tfm(
- struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
-{
- req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
-}
-
-static inline struct ablkcipher_request *ablkcipher_request_cast(
- struct crypto_async_request *req)
-{
- return container_of(req, struct ablkcipher_request, base);
-}
-
-/**
- * ablkcipher_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the ablkcipher
- * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success, or NULL if out of memory
- */
-static inline struct ablkcipher_request *ablkcipher_request_alloc(
- struct crypto_ablkcipher *tfm, gfp_t gfp)
-{
- struct ablkcipher_request *req;
-
- req = kmalloc(sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(tfm), gfp);
-
- if (likely(req))
- ablkcipher_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * ablkcipher_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void ablkcipher_request_free(struct ablkcipher_request *req)
-{
- kzfree(req);
-}
-
-/**
- * ablkcipher_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * This function allows setting the callback function that is triggered once the
- * cipher operation completes.
- *
- * The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template::
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void ablkcipher_request_set_callback(
- struct ablkcipher_request *req,
- u32 flags, crypto_completion_t compl, void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * ablkcipher_request_set_crypt() - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @nbytes: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_ablkcipher_ivsize
- *
- * This function allows setting of the source data and destination data
- * scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- */
-static inline void ablkcipher_request_set_crypt(
- struct ablkcipher_request *req,
- struct scatterlist *src, struct scatterlist *dst,
- unsigned int nbytes, void *iv)
-{
- req->src = src;
- req->dst = dst;
- req->nbytes = nbytes;
- req->info = iv;
-}
-
-/**
- * DOC: Synchronous Block Cipher API
- *
- * The synchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
- *
- * Synchronous calls, have a context in the tfm. But since a single tfm can be
- * used in multiple calls and in parallel, this info should not be changeable
- * (unless a lock is used). This applies, for example, to the symmetric key.
- * However, the IV is changeable, so there is an iv field in blkcipher_tfm
- * structure for synchronous blkcipher api. So, its the only state info that can
- * be kept for synchronous calls without using a big lock across a tfm.
- *
- * The block cipher API allows the use of a complete cipher, i.e. a cipher
- * consisting of a template (a block chaining mode) and a single block cipher
- * primitive (e.g. AES).
- *
- * The plaintext data buffer and the ciphertext data buffer are pointed to
- * by using scatter/gather lists. The cipher operation is performed
- * on all segments of the provided scatter/gather lists.
- *
- * The kernel crypto API supports a cipher operation "in-place" which means that
- * the caller may provide the same scatter/gather list for the plaintext and
- * cipher text. After the completion of the cipher operation, the plaintext
- * data is replaced with the ciphertext data in case of an encryption and vice
- * versa for a decryption. The caller must ensure that the scatter/gather lists
- * for the output data point to sufficiently large buffers, i.e. multiples of
- * the block size of the cipher.
- */
-
-static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_blkcipher *)tfm;
-}
-
-static inline struct crypto_blkcipher *crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
- return __crypto_blkcipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * blkcipher cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a block cipher. The returned struct
- * crypto_blkcipher is the cipher handle that is required for any subsequent
- * API invocation for that block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
- const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_blkcipher_tfm(
- struct crypto_blkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_blkcipher() - zeroize and free the block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
-{
- crypto_free_tfm(crypto_blkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_blkcipher() - Search for the availability of a block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the block cipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
- * @tfm: cipher handle
- *
- * Return: The character string holding the name of the cipher
- */
-static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
-}
-
-static inline struct blkcipher_tfm *crypto_blkcipher_crt(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
-}
-
-static inline struct blkcipher_alg *crypto_blkcipher_alg(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
-}
-
-/**
- * crypto_blkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the block cipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_alg(tfm)->ivsize;
-}
-
-/**
- * crypto_blkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the block cipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_blkcipher_blocksize(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_blkcipher_alignmask(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
-}
-
-static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_blkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the block cipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_blkcipher_encrypt() - encrypt plaintext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.info is filled with the IV to be used for
- * the current operation; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt() - decrypt ciphertext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- *
- */
-static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt_iv call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_set_iv() - set IV for cipher
- * @tfm: cipher handle
- * @src: buffer holding the IV
- * @len: length of the IV in bytes
- *
- * The caller provided IV is set for the block cipher referenced by the cipher
- * handle.
- */
-static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
- const u8 *src, unsigned int len)
-{
- memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
-}
-
-/**
- * crypto_blkcipher_get_iv() - obtain IV from cipher
- * @tfm: cipher handle
- * @dst: buffer filled with the IV
- * @len: length of the buffer dst
- *
- * The caller can obtain the IV set for the block cipher referenced by the
- * cipher handle and store it into the user-provided buffer. If the buffer
- * has an insufficient space, the IV is truncated to fit the buffer.
- */
-static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
- u8 *dst, unsigned int len)
-{
- memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
-}
-
/**
* DOC: Single Block Cipher API
*
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 399ad8632356..475668c69dbc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -17,6 +17,7 @@
struct dm_dev;
struct dm_target;
struct dm_table;
+struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
@@ -93,9 +94,9 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
-typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones);
+typedef int (*dm_report_zones_fn) (struct dm_target *ti,
+ struct dm_report_zones_args *args,
+ unsigned int nr_zones);
/*
* These iteration functions are typically used to check (and combine)
@@ -422,10 +423,23 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, sector_t start,
- struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
+#ifdef CONFIG_BLK_DEV_ZONED
+struct dm_report_zones_args {
+ struct dm_target *tgt;
+ sector_t next_sector;
+
+ void *orig_data;
+ report_zones_cb orig_cb;
+ unsigned int zone_idx;
+
+ /* must be filled by ->report_zones before calling dm_report_zones_cb */
+ sector_t start;
+};
+int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
+#endif /* CONFIG_BLK_DEV_ZONED */
+
/*
* Device mapper functions to parse and create devices specified by the
* parameter "dm-mod.create="
@@ -594,9 +608,6 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
-#define dm_array_too_big(fixed, obj, num) \
- ((num) > (UINT_MAX - (fixed)) / (obj))
-
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
diff --git a/include/linux/dim.h b/include/linux/dim.h
index 9fa4b3f88c39..b698266d0035 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -4,22 +4,26 @@
#ifndef DIM_H
#define DIM_H
+#include <linux/bits.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
-/**
+/*
* Number of events between DIM iterations.
* Causes a moderation of the algorithm run.
*/
#define DIM_NEVENTS 64
-/**
+/*
* Is a difference between values justifies taking an action.
* We consider 10% difference as significant.
*/
#define IS_SIGNIFICANT_DIFF(val, ref) \
(((100UL * abs((val) - (ref))) / (ref)) > 10)
-/**
+/*
* Calculate the gap between two values.
* Take wrap-around and variable size into consideration.
*/
@@ -27,12 +31,13 @@
& (BIT_ULL(bits) - 1))
/**
- * Structure for CQ moderation values.
+ * struct dim_cq_moder - Structure for CQ moderation values.
* Used for communications between DIM and its consumer.
*
* @usec: CQ timer suggestion (by DIM)
* @pkts: CQ packet counter suggestion (by DIM)
- * @cq_period_mode: CQ priod count mode (from CQE/EQE)
+ * @comps: Completion counter
+ * @cq_period_mode: CQ period count mode (from CQE/EQE)
*/
struct dim_cq_moder {
u16 usec;
@@ -42,13 +47,14 @@ struct dim_cq_moder {
};
/**
- * Structure for DIM sample data.
+ * struct dim_sample - Structure for DIM sample data.
* Used for communications between DIM and its consumer.
*
* @time: Sample timestamp
* @pkt_ctr: Number of packets
* @byte_ctr: Number of bytes
* @event_ctr: Number of events
+ * @comp_ctr: Current completion counter
*/
struct dim_sample {
ktime_t time;
@@ -59,12 +65,14 @@ struct dim_sample {
};
/**
- * Structure for DIM stats.
+ * struct dim_stats - Structure for DIM stats.
* Used for holding current measured rates.
*
* @ppms: Packets per msec
* @bpms: Bytes per msec
* @epms: Events per msec
+ * @cpms: Completions per msec
+ * @cpe_ratio: Ratio of completions to events
*/
struct dim_stats {
int ppms; /* packets per msec */
@@ -75,12 +83,13 @@ struct dim_stats {
};
/**
- * Main structure for dynamic interrupt moderation (DIM).
+ * struct dim - Main structure for dynamic interrupt moderation (DIM).
* Used for holding all information about a specific DIM instance.
*
* @state: Algorithm state (see below)
* @prev_stats: Measured rates from previous iteration (for comparison)
* @start_sample: Sampled data at start of current iteration
+ * @measuring_sample: A &dim_sample that is used to update the current events
* @work: Work to perform on action required
* @priv: A pointer to the struct that points to dim
* @profile_ix: Current moderation profile
@@ -106,24 +115,21 @@ struct dim {
};
/**
- * enum dim_cq_period_mode
- *
- * These are the modes for CQ period count.
+ * enum dim_cq_period_mode - Modes for CQ period count
*
* @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE
* @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset)
* @DIM_CQ_PERIOD_NUM_MODES: Number of modes
*/
-enum {
+enum dim_cq_period_mode {
DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
DIM_CQ_PERIOD_NUM_MODES
};
/**
- * enum dim_state
+ * enum dim_state - DIM algorithm states
*
- * These are the DIM algorithm states.
* These will determine if the algorithm is in a valid state to start an iteration.
*
* @DIM_START_MEASURE: This is the first iteration (also after applying a new profile)
@@ -131,16 +137,15 @@ enum {
* need to perform an action
* @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure
*/
-enum {
+enum dim_state {
DIM_START_MEASURE,
DIM_MEASURE_IN_PROGRESS,
DIM_APPLY_NEW_PROFILE,
};
/**
- * enum dim_tune_state
+ * enum dim_tune_state - DIM algorithm tune states
*
- * These are the DIM algorithm tune states.
* These will determine which action the algorithm should perform.
*
* @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference
@@ -148,7 +153,7 @@ enum {
* @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels
* @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels
*/
-enum {
+enum dim_tune_state {
DIM_PARKING_ON_TOP,
DIM_PARKING_TIRED,
DIM_GOING_RIGHT,
@@ -156,25 +161,23 @@ enum {
};
/**
- * enum dim_stats_state
+ * enum dim_stats_state - DIM algorithm statistics states
*
- * These are the DIM algorithm statistics states.
* These will determine the verdict of current iteration.
*
* @DIM_STATS_WORSE: Current iteration shows worse performance than before
- * @DIM_STATS_WORSE: Current iteration shows same performance than before
- * @DIM_STATS_WORSE: Current iteration shows better performance than before
+ * @DIM_STATS_SAME: Current iteration shows same performance than before
+ * @DIM_STATS_BETTER: Current iteration shows better performance than before
*/
-enum {
+enum dim_stats_state {
DIM_STATS_WORSE,
DIM_STATS_SAME,
DIM_STATS_BETTER,
};
/**
- * enum dim_step_result
+ * enum dim_step_result - DIM algorithm step results
*
- * These are the DIM algorithm step results.
* These describe the result of a step.
*
* @DIM_STEPPED: Performed a regular step
@@ -182,7 +185,7 @@ enum {
* tired parking
* @DIM_ON_EDGE: Stepped to the most left/right profile
*/
-enum {
+enum dim_step_result {
DIM_STEPPED,
DIM_TOO_TIRED,
DIM_ON_EDGE,
@@ -199,7 +202,7 @@ enum {
bool dim_on_top(struct dim *dim);
/**
- * dim_turn - change profile alterning direction
+ * dim_turn - change profile altering direction
* @dim: DIM context
*
* Go left if we were going right and vice-versa.
@@ -238,7 +241,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats);
/**
- * dim_update_sample - set a sample's fields with give values
+ * dim_update_sample - set a sample's fields with given values
* @event_ctr: number of events to set
* @packets: number of packets to set
* @bytes: number of bytes to set
@@ -304,8 +307,8 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
* @end_sample: Current data measurement
*
* Called by the consumer.
- * This is the main logic of the algorithm, where data is processed in order to decide on next
- * required action.
+ * This is the main logic of the algorithm, where data is processed in order
+ * to decide on next required action.
*/
void net_dim(struct dim *dim, struct dim_sample end_sample);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index adf993a3bd58..d03af3605460 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,6 +5,8 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
+extern unsigned int zone_dma_bits;
+
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4a1c4fca475a..8023071d6903 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -15,11 +15,8 @@
/**
* List of possible attributes associated with a DMA mapping. The semantics
* of each attribute should be defined in Documentation/DMA-attributes.txt.
- *
- * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
- * forces all pending DMA writes to complete.
*/
-#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+
/*
* DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
* may be weakly ordered, that is that reads and writes may pass each other.
diff --git a/include/linux/edac.h b/include/linux/edac.h
index c19483b90079..cc31b9742684 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -362,78 +362,6 @@ struct edac_mc_layer {
*/
#define EDAC_MAX_LAYERS 3
-/**
- * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
- * array for the element given by [layer0,layer1,layer2]
- * position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0] - var";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1] - var";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2] - var".
- *
- * A loop could be used here to make it more generic, but, as we only have
- * 3 layers, this is a little faster.
- *
- * By design, layers can never be 0 or more than 3. If that ever happens,
- * a NULL is returned, causing an OOPS during the memory allocation routine,
- * with would point to the developer that he's doing something wrong.
- */
-#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
- int __i; \
- if ((nlayers) == 1) \
- __i = layer0; \
- else if ((nlayers) == 2) \
- __i = (layer1) + ((layers[1]).size * (layer0)); \
- else if ((nlayers) == 3) \
- __i = (layer2) + ((layers[2]).size * ((layer1) + \
- ((layers[1]).size * (layer0)))); \
- else \
- __i = -EINVAL; \
- __i; \
-})
-
-/**
- * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
- * for the element given by [layer0,layer1,layer2] position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @var: name of the var where we want to get the pointer
- * (like mci->dimms)
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0]";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1]";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2]";
- */
-#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
- typeof(*var) __p; \
- int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
- if (___i < 0) \
- __p = NULL; \
- else \
- __p = (var)[___i]; \
- __p; \
-})
-
struct dimm_info {
struct device dev;
@@ -443,6 +371,7 @@ struct dimm_info {
unsigned int location[EDAC_MAX_LAYERS];
struct mem_ctl_info *mci; /* the parent */
+ unsigned int idx; /* index within the parent dimm array */
u32 grain; /* granularity of reported error in bytes */
enum dev_type dtype; /* memory device type */
@@ -528,15 +457,10 @@ struct errcount_attribute_data {
* (typically, a memory controller error)
*/
struct edac_raw_error_desc {
- /*
- * NOTE: everything before grain won't be cleaned by
- * edac_raw_error_desc_clean()
- */
char location[LOCATION_SIZE];
char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
long grain;
- /* the vars below and grain will be cleaned on every new error report */
u16 error_count;
int top_layer;
int mid_layer;
@@ -669,4 +593,70 @@ struct mem_ctl_info {
bool fake_inject_ue;
u16 fake_inject_count;
};
-#endif
+
+#define mci_for_each_dimm(mci, dimm) \
+ for ((dimm) = (mci)->dimms[0]; \
+ (dimm); \
+ (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms \
+ ? (mci)->dimms[(dimm)->idx + 1] \
+ : NULL)
+
+/**
+ * edac_get_dimm_by_index - Get DIMM info at @index from a memory
+ * controller
+ *
+ * @mci: MC descriptor struct mem_ctl_info
+ * @index: index in the memory controller's DIMM array
+ *
+ * Returns a struct dimm_info * or NULL on failure.
+ */
+static inline struct dimm_info *
+edac_get_dimm_by_index(struct mem_ctl_info *mci, int index)
+{
+ if (index < 0 || index >= mci->tot_dimms)
+ return NULL;
+
+ if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
+ return NULL;
+
+ return mci->dimms[index];
+}
+
+/**
+ * edac_get_dimm - Get DIMM info from a memory controller given by
+ * [layer0,layer1,layer2] position
+ *
+ * @mci: MC descriptor struct mem_ctl_info
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this function returns "dimms[layer0]";
+ *
+ * For 2 layers, this function is similar to allocating a two-dimensional
+ * array and returning "dimms[layer0][layer1]";
+ *
+ * For 3 layers, this function is similar to allocating a tri-dimensional
+ * array and returning "dimms[layer0][layer1][layer2]";
+ */
+static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
+ int layer0, int layer1, int layer2)
+{
+ int index;
+
+ if (layer0 < 0
+ || (mci->n_layers > 1 && layer1 < 0)
+ || (mci->n_layers > 2 && layer2 < 0))
+ return NULL;
+
+ index = layer0;
+
+ if (mci->n_layers > 1)
+ index = index * mci->layers[1].size + layer1;
+
+ if (mci->n_layers > 2)
+ index = index * mci->layers[2].size + layer2;
+
+ return edac_get_dimm_by_index(mci, index);
+}
+#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d87acf62958e..99dfea595c8c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -112,6 +112,7 @@ typedef struct {
#define EFI_MEMORY_MORE_RELIABLE \
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
+#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -1044,7 +1045,6 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
-extern void efi_find_mirror(void);
#else
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1202,6 +1202,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_DBG 8 /* Print additional debug info at runtime */
#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
#ifdef CONFIG_EFI
/*
@@ -1212,6 +1213,14 @@ static inline bool efi_enabled(int feature)
return test_bit(feature, &efi.flags) != 0;
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+
+bool __pure __efi_soft_reserve_enabled(void);
+
+static inline bool __pure efi_soft_reserve_enabled(void)
+{
+ return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE)
+ && __efi_soft_reserve_enabled();
+}
#else
static inline bool efi_enabled(int feature)
{
@@ -1225,6 +1234,11 @@ efi_capsule_pending(int *reset_type)
{
return false;
}
+
+static inline bool efi_soft_reserve_enabled(void)
+{
+ return false;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1645,6 +1659,8 @@ static inline void
efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
#endif
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+
void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
/*
diff --git a/include/linux/errname.h b/include/linux/errname.h
new file mode 100644
index 000000000000..e8576ad90cb7
--- /dev/null
+++ b/include/linux/errname.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERRNAME_H
+#define _LINUX_ERRNAME_H
+
+#include <linux/stddef.h>
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+const char *errname(int err);
+#else
+static inline const char *errname(int err)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _LINUX_ERRNAME_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index cf6571fc9c01..d896b8657085 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -105,6 +105,11 @@ enum fid_type {
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit unique kernfs id
+ */
+ FILEID_KERNFS = 0xfe,
+
+ /*
* Filesystems must not use 0xff file ID.
*/
FILEID_INVALID = 0xff,
diff --git a/include/linux/extable.h b/include/linux/extable.h
index 81ecfaa83ad3..4ab9e78f313b 100644
--- a/include/linux/extable.h
+++ b/include/linux/extable.h
@@ -33,4 +33,14 @@ search_module_extables(unsigned long addr)
}
#endif /*CONFIG_MODULES*/
+#ifdef CONFIG_BPF_JIT
+const struct exception_table_entry *search_bpf_extables(unsigned long addr);
+#else
+static inline const struct exception_table_entry *
+search_bpf_extables(unsigned long addr)
+{
+ return NULL;
+}
+#endif
+
#endif /* _LINUX_EXTABLE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0367a75f873b..1b1e8b8f88da 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -65,6 +65,9 @@ struct ctl_table_header;
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
+/* unused opcode to mark special load instruction. Same as BPF_ABS */
+#define BPF_PROBE_MEM 0x20
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -464,10 +467,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define BPF_CALL_x(x, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
- return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
@@ -511,10 +515,12 @@ struct sock_fprog_kern {
struct sock_filter *filter;
};
+/* Some arches need doubleword alignment for their instructions and/or data */
+#define BPF_IMAGE_ALIGNMENT 8
+
struct bpf_binary_header {
u32 pages;
- /* Some arches need word alignment for their instructions */
- u8 image[] __aligned(4);
+ u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
struct bpf_prog {
@@ -946,6 +952,9 @@ void *bpf_jit_alloc_exec(unsigned long size);
void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke);
+
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
@@ -1044,11 +1053,23 @@ static inline bool ebpf_jit_enabled(void)
return false;
}
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
+{
+ return false;
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
}
+static inline int
+bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
diff --git a/include/linux/firmware/broadcom/tee_bnxt_fw.h b/include/linux/firmware/broadcom/tee_bnxt_fw.h
new file mode 100644
index 000000000000..f24c82d6ef73
--- /dev/null
+++ b/include/linux/firmware/broadcom/tee_bnxt_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#ifndef _BROADCOM_TEE_BNXT_FW_H
+#define _BROADCOM_TEE_BNXT_FW_H
+
+#include <linux/types.h>
+
+int tee_bnxt_fw_load(void);
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size);
+
+#endif /* _BROADCOM_TEE_BNXT_FW_H */
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index b6c4302a39e0..59bc6e2af693 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -41,6 +41,12 @@
*
* SVC_STATUS_RSU_OK:
* Secure firmware accepts the request of remote status update (RSU).
+ *
+ * SVC_STATUS_RSU_ERROR:
+ * Error encountered during remote system update.
+ *
+ * SVC_STATUS_RSU_NO_SUPPORT:
+ * Secure firmware doesn't support RSU retry or notify feature.
*/
#define SVC_STATUS_RECONFIG_REQUEST_OK 0
#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
@@ -50,6 +56,8 @@
#define SVC_STATUS_RECONFIG_ERROR 5
#define SVC_STATUS_RSU_OK 6
#define SVC_STATUS_RSU_ERROR 7
+#define SVC_STATUS_RSU_NO_SUPPORT 8
+
/**
* Flag bit for COMMAND_RECONFIG
*
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 778abbbc7d94..df366f1a4cb4 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -91,7 +91,8 @@ enum pm_ret_status {
};
enum pm_ioctl_id {
- IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
@@ -250,6 +251,16 @@ enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
};
+enum pm_node_id {
+ NODE_SD_0 = 39,
+ NODE_SD_1,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT = 0,
+ PM_TAPDELAY_OUTPUT,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e0d909d35763..ae6c5c37f3ae 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2632,8 +2632,6 @@ extern void bd_finish_claiming(struct block_device *bdev,
extern void bd_abort_claiming(struct block_device *bdev,
struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-extern int __blkdev_reread_part(struct block_device *bdev);
-extern int blkdev_reread_part(struct block_device *bdev);
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
@@ -2703,8 +2701,6 @@ extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
-extern void check_disk_size_change(struct gendisk *disk,
- struct block_device *bdev, bool verbose);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *, bool);
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index f622f7460ed8..1a7bffe78ed5 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -20,7 +20,6 @@
#define FS_CRYPTO_BLOCK_SIZE 16
-struct fscrypt_ctx;
struct fscrypt_info;
struct fscrypt_str {
@@ -62,18 +61,9 @@ struct fscrypt_operations {
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
-};
-
-/* Decryption work */
-struct fscrypt_ctx {
- union {
- struct {
- struct bio *bio;
- struct work_struct work;
- };
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
+ bool (*has_stable_inodes)(struct super_block *sb);
+ void (*get_ino_and_lblk_bits)(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret);
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
@@ -102,8 +92,6 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
@@ -244,8 +232,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
-extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
@@ -295,16 +281,6 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- return;
-}
-
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
@@ -484,11 +460,6 @@ static inline void fscrypt_decrypt_bio(struct bio *bio)
{
}
-static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio)
-{
-}
-
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 975553a9f75d..54d9436600c7 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -403,6 +403,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+
extern struct bus_type fsl_mc_bus_type;
extern struct device_type fsl_mc_bus_dprc_type;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8a8cb3c401b2..9141f2263286 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -499,7 +499,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
/**
* ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should be calling
*
* This is a very sensitive operation and great care needs
@@ -520,9 +520,38 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
+
+/**
+ * ftrace_init_nop - initialize a nop call site
+ * @mod: module structure if called by module load initialization
+ * @rec: the call site record (e.g. mcount/fentry)
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should contain the contents created by
+ * the compiler
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+#ifndef ftrace_init_nop
+static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+}
+#endif
+
/**
* ftrace_make_call - convert a nop call site into a call to addr
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should call
*
* This is a very sensitive operation and great care needs
@@ -545,7 +574,7 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/**
* ftrace_modify_call - convert from one addr to another (no nop)
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @old_addr: the address expected to be currently called to
* @addr: the address to change to
*
@@ -709,6 +738,11 @@ static inline unsigned long get_lock_parent_ip(void)
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
+#else
+#define FTRACE_CALLSITE_SECTION "__mcount_loc"
+#endif
#else
static inline void ftrace_init(void) { }
#endif
diff --git a/include/linux/futex.h b/include/linux/futex.h
index ccaef0097785..5cc3fed27d4c 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
#include <linux/ktime.h>
+
#include <uapi/linux/futex.h>
struct inode;
@@ -48,15 +50,35 @@ union futex_key {
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
#ifdef CONFIG_FUTEX
-extern void exit_robust_list(struct task_struct *curr);
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+};
-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
- u32 __user *uaddr2, u32 val2, u32 val3);
-#else
-static inline void exit_robust_list(struct task_struct *curr)
+static inline void futex_init_task(struct task_struct *tsk)
{
+ tsk->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ tsk->compat_robust_list = NULL;
+#endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+ mutex_init(&tsk->futex_exit_mutex);
}
+void futex_exit_recursive(struct task_struct *tsk);
+void futex_exit_release(struct task_struct *tsk);
+void futex_exec_release(struct task_struct *tsk);
+
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+#else
+static inline void futex_init_task(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
+static inline void futex_exit_release(struct task_struct *tsk) { }
+static inline void futex_exec_release(struct task_struct *tsk) { }
static inline long do_futex(u32 __user *uaddr, int op, u32 val,
ktime_t *timeout, u32 __user *uaddr2,
u32 val2, u32 val3)
@@ -65,12 +87,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
}
#endif
-#ifdef CONFIG_FUTEX_PI
-extern void exit_pi_state_list(struct task_struct *curr);
-#else
-static inline void exit_pi_state_list(struct task_struct *curr)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index ababd6bc82f3..9d9dc444d787 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -49,13 +49,15 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_is_available: Return true if the device is available.
* @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
- * @property_read_integer_array: Read an array of integer properties. Return
- * zero on success, a negative error code
- * otherwise.
+ * @property_read_int_array: Read an array of integer properties. Return zero on
+ * success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
* on success, a negative error code otherwise.
+ * @get_name: Return the name of an fwnode.
+ * @get_name_prefix: Get a prefix for a node (for printing purposes).
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
@@ -82,6 +84,8 @@ struct fwnode_operations {
(*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
const char *propname, const char **val,
size_t nval);
+ const char *(*get_name)(const struct fwnode_handle *fwnode);
+ const char *(*get_name_prefix)(const struct fwnode_handle *fwnode);
struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*get_next_child_node)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 8b5330dd5ac0..8bb63027e4d6 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -621,9 +621,10 @@ extern void blk_invalidate_devt(dev_t devt);
extern dev_t blk_lookup_devt(const char *name, int partno);
extern char *disk_name (struct gendisk *hd, int partno, char *buf);
+int bdev_disk_changed(struct block_device *bdev, bool invalidate);
+int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
+int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev);
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
-extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
int partno, sector_t start,
sector_t len, int flags,
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index b70af921c614..5215fdba6b9a 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -176,11 +176,15 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label);
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label);
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label);
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *child,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label);
#else /* CONFIG_GPIOLIB */
@@ -532,17 +536,48 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
}
static inline
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+static inline
+struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
+}
+
+static inline
struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
const char *con_id, int index,
struct fwnode_handle *child,
enum gpiod_flags flags,
const char *label)
{
- return ERR_PTR(-ENOSYS);
+ return devm_fwnode_gpiod_get_index(dev, child, con_id, index,
+ flags, label);
}
-#endif /* CONFIG_GPIOLIB */
-
static inline
struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
const char *con_id,
@@ -550,8 +585,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
enum gpiod_flags flags,
const char *label)
{
- return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child,
- flags, label);
+ return devm_fwnode_gpiod_get_index(dev, child, con_id, 0, flags, label);
}
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 2d8aaf7d4b9e..81ca84ce3119 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -20,4 +20,19 @@ static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
+
+static inline bool icmp_is_err(int type)
+{
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_REDIRECT:
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index a8f888976137..ef1cbb5f454f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -46,4 +46,18 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
+
+static inline bool icmpv6_is_err(int type)
+{
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index ed11ef594378..6d8bf4bdf240 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -336,7 +336,8 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -360,7 +361,8 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
/* Page group response descriptor QW0 */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 7bddddfc76d6..a9b9170b5dd2 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -134,6 +134,7 @@ enum {
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
+ IORES_DESC_SOFT_RESERVED = 8,
};
/*
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 4dc66157d872..deec18b8944a 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -224,10 +224,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
* is called, and the lower layer must get the interface from that
* call.
*/
-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
- void *send_info,
- struct device *dev,
- unsigned char slave_addr);
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *dev,
+ unsigned char slave_addr);
+
+#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \
+ ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr)
/*
* Remove a low-level interface from the IPMI driver. This will
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 5cc10cf7cb3e..a0bde9e12efa 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -487,6 +487,8 @@
#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT)
#define ICC_CTLR_EL1_CBPR_SHIFT 0
#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
+#define ICC_CTLR_EL1_PMHE_SHIFT 6
+#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT)
#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8
#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
#define ICC_CTLR_EL1_ID_BITS_SHIFT 11
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index e6b155713b47..5dbcfc65f21e 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -32,9 +32,13 @@ struct its_vm {
struct its_vpe {
struct page *vpt_page;
struct its_vm *its_vm;
+ /* per-vPE VLPI tracking */
+ atomic_t vlpi_count;
/* Doorbell interrupt */
int irq;
irq_hw_number_t vpe_db_lpi;
+ /* VPE resident */
+ bool resident;
/* VPE proxy mapping */
int vpe_proxy_event;
/*
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 603fbc4e2f70..564793c24d12 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1170,7 +1170,7 @@ struct journal_s
#define jbd2_might_wait_for_commit(j) \
do { \
rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \
- rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \
+ rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
/* journal feature predicate functions */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d83d403dac2e..09f759228e3f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -328,13 +328,6 @@ extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
-#ifdef CONFIG_ARCH_HAS_REFCOUNT
-void refcount_error_report(struct pt_regs *regs, const char *err);
-#else
-static inline void refcount_error_report(struct pt_regs *regs, const char *err)
-{ }
-#endif
-
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __must_check _kstrtol(const char *s, unsigned int base, long *res);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 7ee2bb43b251..89f0745c096d 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -78,6 +78,24 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu);
+extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
+#else
+static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu)
+{
+ return kcpustat->cpustat[usage];
+}
+
+static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
+{
+ *dst = kcpustat_cpu(cpu);
+}
+
+#endif
+
extern void account_user_time(struct task_struct *, u64);
extern void account_guest_time(struct task_struct *, u64);
extern void account_system_time(struct task_struct *, int, u64);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 936b61bd504e..dded2e5a9f42 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -104,21 +104,6 @@ struct kernfs_elem_attr {
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
-/* represent a kernfs node */
-union kernfs_node_id {
- struct {
- /*
- * blktrace will export this struct as a simplified 'struct
- * fid' (which is a big data struction), so userspace can use
- * it to find kernfs node. The layout must match the first two
- * fields of 'struct fid' exactly.
- */
- u32 ino;
- u32 generation;
- };
- u64 id;
-};
-
/*
* kernfs_node - the building block of kernfs hierarchy. Each and every
* kernfs node is represented by single kernfs_node. Most fields are
@@ -155,7 +140,12 @@ struct kernfs_node {
void *priv;
- union kernfs_node_id id;
+ /*
+ * 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit,
+ * the low 32bits are ino and upper generation.
+ */
+ u64 id;
+
unsigned short flags;
umode_t mode;
struct kernfs_iattrs *iattr;
@@ -187,7 +177,8 @@ struct kernfs_root {
/* private fields, do not use outside kernfs proper */
struct idr ino_idr;
- u32 next_generation;
+ u32 last_id_lowbits;
+ u32 id_highbits;
struct kernfs_syscall_ops *syscall_ops;
/* list of kernfs_super_info of this root, protected by kernfs_mutex */
@@ -291,6 +282,34 @@ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
return kn->flags & KERNFS_TYPE_MASK;
}
+static inline ino_t kernfs_id_ino(u64 id)
+{
+ /* id is ino if ino_t is 64bit; otherwise, low 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return id;
+ else
+ return (u32)id;
+}
+
+static inline u32 kernfs_id_gen(u64 id)
+{
+ /* gen is fixed at 1 if ino_t is 64bit; otherwise, high 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return 1;
+ else
+ return id >> 32;
+}
+
+static inline ino_t kernfs_ino(struct kernfs_node *kn)
+{
+ return kernfs_id_ino(kn->id);
+}
+
+static inline ino_t kernfs_gen(struct kernfs_node *kn)
+{
+ return kernfs_id_gen(kn->id);
+}
+
/**
* kernfs_enable_ns - enable namespace under a directory
* @kn: directory of interest, should be empty
@@ -382,8 +401,8 @@ void kernfs_kill_sb(struct super_block *sb);
void kernfs_init(void);
-struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root,
- const union kernfs_node_id *id);
+struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ u64 id);
#else /* CONFIG_KERNFS */
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 719fc3e15ea4..7ed1e2f8641e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -266,7 +266,8 @@ struct kvm_vcpu {
struct preempt_notifier preempt_notifier;
#endif
int cpu;
- int vcpu_id;
+ int vcpu_id; /* id given by userspace at creation */
+ int vcpu_idx; /* index in kvm->vcpus array */
int srcu_idx;
int mode;
u64 requests;
@@ -278,7 +279,6 @@ struct kvm_vcpu {
struct mutex mutex;
struct kvm_run *run;
- int guest_xcr0_loaded;
struct swait_queue_head wq;
struct pid __rcu *pid;
int sigset_active;
@@ -571,13 +571,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *tmp;
- int idx;
-
- kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
- if (tmp == vcpu)
- return idx;
- BUG();
+ return vcpu->vcpu_idx;
}
#define kvm_for_each_memslot(memslot, slots) \
@@ -622,6 +616,7 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
@@ -746,6 +741,28 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
+
+#define __kvm_put_guest(kvm, gfn, offset, value, type) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ type __user *__uaddr = (type __user *)(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(value, __uaddr); \
+ if (!__ret) \
+ mark_page_dirty(kvm, gfn); \
+ __ret; \
+})
+
+#define kvm_put_guest(kvm, gpa, value, type) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), (value), type); \
+})
+
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
@@ -791,6 +808,8 @@ void kvm_reload_remote_mmus(struct kvm *kvm);
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -966,6 +985,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1240,7 +1260,7 @@ extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;
struct kvm_device {
- struct kvm_device_ops *ops;
+ const struct kvm_device_ops *ops;
struct kvm *kvm;
void *private;
struct list_head vm_node;
@@ -1293,7 +1313,7 @@ struct kvm_device_ops {
void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
-int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
+int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
@@ -1382,4 +1402,10 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr);
+
#endif
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index bde5374ae021..1c88e69db3d9 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -35,6 +35,8 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
+#define GPA_INVALID (~(gpa_t)0)
+
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 207e7ee764ce..d3bbfddf616a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -484,6 +484,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -891,9 +892,9 @@ struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
@@ -1161,7 +1162,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1893,9 +1894,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 7e020782ade2..9280209d1f62 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -75,32 +75,61 @@
#ifdef __ASSEMBLY__
+/* SYM_T_FUNC -- type used by assembler to mark functions */
+#ifndef SYM_T_FUNC
+#define SYM_T_FUNC STT_FUNC
+#endif
+
+/* SYM_T_OBJECT -- type used by assembler to mark data */
+#ifndef SYM_T_OBJECT
+#define SYM_T_OBJECT STT_OBJECT
+#endif
+
+/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */
+#ifndef SYM_T_NONE
+#define SYM_T_NONE STT_NOTYPE
+#endif
+
+/* SYM_A_* -- align the symbol? */
+#define SYM_A_ALIGN ALIGN
+#define SYM_A_NONE /* nothing */
+
+/* SYM_L_* -- linkage of symbols */
+#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name) .weak name
+#define SYM_L_LOCAL(name) /* nothing */
+
#ifndef LINKER_SCRIPT
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
+/* === DEPRECATED annotations === */
+
+#ifndef CONFIG_X86
#ifndef GLOBAL
+/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \
.globl name ASM_NL \
name:
#endif
#ifndef ENTRY
+/* deprecated, use SYM_FUNC_START */
#define ENTRY(name) \
- .globl name ASM_NL \
- ALIGN ASM_NL \
- name:
+ SYM_FUNC_START(name)
#endif
+#endif /* CONFIG_X86 */
#endif /* LINKER_SCRIPT */
+#ifndef CONFIG_X86
#ifndef WEAK
+/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
- .weak name ASM_NL \
- ALIGN ASM_NL \
- name:
+ SYM_FUNC_START_WEAK(name)
#endif
#ifndef END
+/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
@@ -110,11 +139,215 @@
* static analysis tools such as stack depth analyzer.
*/
#ifndef ENDPROC
+/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
- .type name, @function ASM_NL \
- END(name)
+ SYM_FUNC_END(name)
+#endif
+#endif /* CONFIG_X86 */
+
+/* === generic annotations === */
+
+/* SYM_ENTRY -- use only if you have to for non-paired symbols */
+#ifndef SYM_ENTRY
+#define SYM_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ name:
+#endif
+
+/* SYM_START -- use only if you have to */
+#ifndef SYM_START
+#define SYM_START(name, linkage, align...) \
+ SYM_ENTRY(name, linkage, align)
+#endif
+
+/* SYM_END -- use only if you have to */
+#ifndef SYM_END
+#define SYM_END(name, sym_type) \
+ .type name sym_type ASM_NL \
+ .size name, .-name
+#endif
+
+/* === code annotations === */
+
+/*
+ * FUNC -- C-like functions (proper stack frame etc.)
+ * CODE -- non-C code (e.g. irq handlers with different, special stack etc.)
+ *
+ * Objtool validates stack for FUNC, but not for CODE.
+ * Objtool generates debug info for both FUNC & CODE, but needs special
+ * annotations for each CODE's start (to describe the actual stack frame).
+ *
+ * ALIAS -- does not generate debug info -- the aliased function will
+ */
+
+/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL_ALIGN
+#define SYM_INNER_LABEL_ALIGN(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_ALIGN)
+#endif
+
+/* SYM_INNER_LABEL -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL
+#define SYM_INNER_LABEL(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_NONE)
+#endif
+
+/*
+ * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one
+ * function
+ */
+#ifndef SYM_FUNC_START_LOCAL_ALIAS
+#define SYM_FUNC_START_LOCAL_ALIAS(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/*
+ * SYM_FUNC_START_ALIAS -- use where there are two global names for one
+ * function
+ */
+#ifndef SYM_FUNC_START_ALIAS
+#define SYM_FUNC_START_ALIAS(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START -- use for global functions */
+#ifndef SYM_FUNC_START
+/*
+ * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
+ * later.
+ */
+#define SYM_FUNC_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
+#ifndef SYM_FUNC_START_NOALIGN
+#define SYM_FUNC_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START_LOCAL -- use for local functions */
+#ifndef SYM_FUNC_START_LOCAL
+/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
+/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
+#ifndef SYM_FUNC_START_LOCAL_NOALIGN
+#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
#endif
+/* SYM_FUNC_START_WEAK -- use for weak functions */
+#ifndef SYM_FUNC_START_WEAK
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)
#endif
+
+/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
+#ifndef SYM_FUNC_START_WEAK_NOALIGN
+#define SYM_FUNC_START_WEAK_NOALIGN(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
+#ifndef SYM_FUNC_END_ALIAS
+#define SYM_FUNC_END_ALIAS(name) \
+ SYM_END(name, SYM_T_FUNC)
+#endif
+
+/*
+ * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
+ * SYM_FUNC_START_WEAK, ...
+ */
+#ifndef SYM_FUNC_END
+/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
+#define SYM_FUNC_END(name) \
+ SYM_END(name, SYM_T_FUNC)
+#endif
+
+/* SYM_CODE_START -- use for non-C (special) functions */
+#ifndef SYM_CODE_START
+#define SYM_CODE_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */
+#ifndef SYM_CODE_START_NOALIGN
+#define SYM_CODE_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */
+#ifndef SYM_CODE_START_LOCAL
+#define SYM_CODE_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/*
+ * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions,
+ * w/o alignment
+ */
+#ifndef SYM_CODE_START_LOCAL_NOALIGN
+#define SYM_CODE_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */
+#ifndef SYM_CODE_END
+#define SYM_CODE_END(name) \
+ SYM_END(name, SYM_T_NONE)
+#endif
+
+/* === data annotations === */
+
+/* SYM_DATA_START -- global data symbol */
+#ifndef SYM_DATA_START
+#define SYM_DATA_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_START -- local data symbol */
+#ifndef SYM_DATA_START_LOCAL
+#define SYM_DATA_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_END -- the end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END
+#define SYM_DATA_END(name) \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END_LABEL
+#define SYM_DATA_END_LABEL(name, linkage, label) \
+ linkage(label) ASM_NL \
+ .type label SYM_T_OBJECT ASM_NL \
+ label: \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA -- start+end wrapper around simple global data */
+#ifndef SYM_DATA
+#define SYM_DATA(name, data...) \
+ SYM_DATA_START(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */
+#ifndef SYM_DATA_LOCAL
+#define SYM_DATA_LOCAL(name, data...) \
+ SYM_DATA_START_LOCAL(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_LINKAGE_H */
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index a99c58866860..fe740031339d 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -82,4 +82,10 @@ static inline int linkmode_equal(const unsigned long *src1,
return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline int linkmode_subset(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
#endif /* __LINKMODE_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 273400814020..e894e74905f3 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -131,9 +131,22 @@ struct klp_object {
};
/**
+ * struct klp_state - state of the system modified by the livepatch
+ * @id: system state identifier (non-zero)
+ * @version: version of the change
+ * @data: custom data
+ */
+struct klp_state {
+ unsigned long id;
+ unsigned int version;
+ void *data;
+};
+
+/**
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
+ * @states: system states that can get modified
* @replace: replace all actively used patches
* @list: list node for global list of actively used patches
* @kobj: kobject for sysfs resources
@@ -147,6 +160,7 @@ struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
+ struct klp_state *states;
bool replace;
/* internal */
@@ -217,6 +231,9 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
+struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
+struct klp_state *klp_get_prev_state(unsigned long id);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b8a835fd611b..c50d01ef1414 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -349,8 +349,7 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
-extern void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip);
+extern void lock_release(struct lockdep_map *lock, unsigned long ip);
/*
* Same "read" as for lock_acquire(), except -1 means any.
@@ -428,7 +427,7 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
}
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
-# define lock_release(l, n, i) do { } while (0)
+# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
@@ -591,42 +590,42 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define spin_release(l, n, i) lock_release(l, n, i)
+#define spin_release(l, i) lock_release(l, i)
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define rwlock_release(l, n, i) lock_release(l, n, i)
+#define rwlock_release(l, i) lock_release(l, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define seqcount_release(l, n, i) lock_release(l, n, i)
+#define seqcount_release(l, i) lock_release(l, i)
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define mutex_release(l, n, i) lock_release(l, n, i)
+#define mutex_release(l, i) lock_release(l, i)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-#define rwsem_release(l, n, i) lock_release(l, n, i)
+#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
-#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
#define lockdep_assert_irqs_enabled() do { \
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a3763247547c..20d8cf194fb7 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1818,6 +1818,14 @@ union security_list_options {
void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
#endif /* CONFIG_BPF_SYSCALL */
int (*locked_down)(enum lockdown_reason what);
+#ifdef CONFIG_PERF_EVENTS
+ int (*perf_event_open)(struct perf_event_attr *attr, int type);
+ int (*perf_event_alloc)(struct perf_event *event);
+ void (*perf_event_free)(struct perf_event *event);
+ int (*perf_event_read)(struct perf_event *event);
+ int (*perf_event_write)(struct perf_event *event);
+
+#endif
};
struct security_hook_heads {
@@ -2060,6 +2068,13 @@ struct security_hook_heads {
struct hlist_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
struct hlist_head locked_down;
+#ifdef CONFIG_PERF_EVENTS
+ struct hlist_head perf_event_open;
+ struct hlist_head perf_event_alloc;
+ struct hlist_head perf_event_free;
+ struct hlist_head perf_event_read;
+ struct hlist_head perf_event_write;
+#endif
} __randomize_layout;
/*
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 0ebb105eb261..4c75dae8dd29 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -119,6 +119,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
+extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
new file mode 100644
index 000000000000..e11595256cac
--- /dev/null
+++ b/include/linux/memregion.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MEMREGION_H_
+#define _MEMREGION_H_
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct memregion_info {
+ int target_node;
+};
+
+#ifdef CONFIG_MEMREGION
+int memregion_alloc(gfp_t gfp);
+void memregion_free(int id);
+#else
+static inline int memregion_alloc(gfp_t gfp)
+{
+ return -ENOMEM;
+}
+void memregion_free(int id)
+{
+}
+#endif
+#endif /* _MEMREGION_H_ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 3247a3dc7934..b06b75776a32 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -57,6 +57,7 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define RFKILL_MINOR 242
#define MISC_DYNAMIC_MINOR 255
struct device;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3e80f03a387f..27200dea0297 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -556,8 +556,6 @@ struct mlx5_priv {
struct dentry *cmdif_debugfs;
/* end: qp staff */
- struct xarray mkey_table;
-
/* start: alloc staff */
/* protect buffer alocation according to numa node */
struct mutex alloc_mutex;
@@ -942,8 +940,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
@@ -1121,6 +1117,11 @@ static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_PF;
}
+static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_VF;
+}
+
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
@@ -1186,4 +1187,15 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+
+ devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return val.vbool;
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 724d276ea133..4e5b84e66822 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -80,7 +80,8 @@ enum mlx5_flow_namespace_type {
enum {
FDB_BYPASS_PATH,
- FDB_FAST_PATH,
+ FDB_TC_OFFLOAD,
+ FDB_FT_OFFLOAD,
FDB_SLOW_PATH,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 0836fe232f97..5d54fccf87fc 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1110,6 +1110,7 @@ enum {
};
enum {
+ MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1153,7 +1154,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq[0x5];
u8 reserved_at_b0[0x10];
- u8 reserved_at_c0[0x8];
+ u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
u8 reserved_at_d0[0xb];
u8 log_max_cq[0x5];
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 9b6336ad3266..cf3780a6ccc4 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -226,7 +226,7 @@ struct mmc_queue_req;
* MMC Physical partitions
*/
struct mmc_part {
- unsigned int size; /* partition size (in bytes) */
+ u64 size; /* partition size (in bytes) */
unsigned int part_cfg; /* partition type */
char name[MAX_MMC_PART_NAME_LEN];
bool force_ro; /* to make boot parts RO by default */
@@ -291,6 +291,7 @@ struct mmc_card {
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
unsigned int sdio_funcs; /* number of SDIO functions */
+ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
struct sdio_cccr cccr; /* common card info */
struct sdio_cis cis; /* common tuple info */
struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index d1a5d5df02f5..08b25c02b5a1 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -71,6 +71,8 @@
#define SDIO_VENDOR_ID_TI 0x0097
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#define SDIO_VENDOR_ID_TI_WL1251 0x104c
+#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#define SDIO_VENDOR_ID_STE 0x0020
#define SDIO_DEVICE_ID_STE_CW1200 0x2280
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index bda20282746b..b0a36d1580b6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -359,33 +359,40 @@ struct per_cpu_nodestat {
#endif /* !__GENERATING_BOUNDS.H */
enum zone_type {
-#ifdef CONFIG_ZONE_DMA
/*
- * ZONE_DMA is used when there are devices that are not able
- * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
- * carve out the portion of memory that is needed for these devices.
- * The range is arch specific.
+ * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
+ * to DMA to all of the addressable memory (ZONE_NORMAL).
+ * On architectures where this area covers the whole 32 bit address
+ * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
+ * DMA addressing constraints. This distinction is important as a 32bit
+ * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
+ * platforms may need both zones as they support peripherals with
+ * different DMA addressing limitations.
+ *
+ * Some examples:
+ *
+ * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the
+ * rest of the lower 4G.
+ *
+ * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on
+ * the specific device.
+ *
+ * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the
+ * lower 4G.
*
- * Some examples
+ * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary
+ * depending on the specific device.
*
- * Architecture Limit
- * ---------------------------
- * parisc, ia64, sparc <4G
- * s390, powerpc <2G
- * arm Various
- * alpha Unlimited or 0-16MB.
+ * - s390 uses ZONE_DMA fixed to the lower 2G.
*
- * i386, x86_64 and multiple other arches
- * <16M.
+ * - ia64 and riscv only use ZONE_DMA32.
+ *
+ * - parisc uses neither.
*/
+#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32
- /*
- * x86_64 needs two ZONE_DMAs because it supports devices that are
- * only able to do DMA to the lower 16M but also 32 bit devices that
- * can only do DMA areas below 4G.
- */
ZONE_DMA32,
#endif
/*
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 34de06b426ef..8071148f29a6 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -47,16 +47,16 @@ struct vif_entry_notifier_info {
};
static inline int mr_call_vif_notifier(struct notifier_block *nb,
- struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
- unsigned short vif_index, u32 tb_id)
+ unsigned short vif_index, u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct vif_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
+ .extack = extack,
},
.dev = vif->dev,
.vif_index = vif_index,
@@ -64,7 +64,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
.tb_id = tb_id,
};
- return call_fib_notifier(nb, net, event_type, &info.info);
+ return call_fib_notifier(nb, event_type, &info.info);
}
static inline int mr_call_vif_notifiers(struct net *net,
@@ -77,7 +77,6 @@ static inline int mr_call_vif_notifiers(struct net *net,
struct vif_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
},
.dev = vif->dev,
.vif_index = vif_index,
@@ -173,21 +172,21 @@ struct mfc_entry_notifier_info {
};
static inline int mr_call_mfc_notifier(struct notifier_block *nb,
- struct net *net,
unsigned short family,
enum fib_event_type event_type,
- struct mr_mfc *mfc, u32 tb_id)
+ struct mr_mfc *mfc, u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct mfc_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
+ .extack = extack,
},
.mfc = mfc,
.tb_id = tb_id
};
- return call_fib_notifier(nb, net, event_type, &info.info);
+ return call_fib_notifier(nb, event_type, &info.info);
}
static inline int mr_call_mfc_notifiers(struct net *net,
@@ -199,7 +198,6 @@ static inline int mr_call_mfc_notifiers(struct net *net,
struct mfc_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
},
.mfc = mfc,
.tb_id = tb_id
@@ -301,10 +299,11 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
int (*rules_dump)(struct net *net,
- struct notifier_block *nb),
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock);
+ rwlock_t *mrt_lock, struct netlink_ext_ack *extack);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -355,10 +354,11 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
static inline int mr_dump(struct net *net, struct notifier_block *nb,
unsigned short family,
int (*rules_dump)(struct net *net,
- struct notifier_block *nb),
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock)
+ rwlock_t *mrt_lock, struct netlink_ext_ack *extack)
{
return -EINVAL;
}
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index fc0b4b19c900..5a4623fc586b 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -22,6 +22,7 @@
#define SNOR_MFR_INTEL CFI_MFR_INTEL
#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
+#define SNOR_MFR_ISSI CFI_MFR_PMC
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
@@ -133,7 +134,7 @@
#define SR_E_ERR BIT(5)
#define SR_P_ERR BIT(6)
-#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
+#define SR1_QUAD_EN_BIT6 BIT(6)
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
@@ -144,10 +145,8 @@
#define FSR_P_ERR BIT(4) /* Program operation status */
#define FSR_PT_ERR BIT(1) /* Protection error bit */
-/* Configuration Register bits. */
-#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
-
/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT1 BIT(1)
#define SR2_QUAD_EN_BIT7 BIT(7)
/* Supported SPI protocols */
@@ -243,6 +242,9 @@ enum spi_nor_option_flags {
SNOR_F_4B_OPCODES = BIT(6),
SNOR_F_HAS_4BAIT = BIT(7),
SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_HAS_16BIT_SR = BIT(9),
+ SNOR_F_NO_READ_CR = BIT(10),
+
};
/**
@@ -466,6 +468,34 @@ enum spi_nor_pp_command_index {
struct spi_nor;
/**
+ * struct spi_nor_controller_ops - SPI NOR controller driver specific
+ * operations.
+ * @prepare: [OPTIONAL] do some preparations for the
+ * read/write/erase/lock/unlock operations.
+ * @unprepare: [OPTIONAL] do some post work after the
+ * read/write/erase/lock/unlock operations.
+ * @read_reg: read out the register.
+ * @write_reg: write data to the register.
+ * @read: read data from the SPI NOR.
+ * @write: write data to the SPI NOR.
+ * @erase: erase a sector of the SPI NOR at the offset @offs; if
+ * not provided by the driver, spi-nor will send the erase
+ * opcode via write_reg().
+ */
+struct spi_nor_controller_ops {
+ int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len);
+
+ ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u8 *buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+ int (*erase)(struct spi_nor *nor, loff_t offs);
+};
+
+/**
* struct spi_nor_locking_ops - SPI NOR locking methods
* @lock: lock a region of the SPI NOR.
* @unlock: unlock a region of the SPI NOR.
@@ -549,19 +579,7 @@ struct flash_info;
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
- * @prepare: [OPTIONAL] do some preparations for the
- * read/write/erase/lock/unlock operations
- * @unprepare: [OPTIONAL] do some post work after the
- * read/write/erase/lock/unlock operations
- * @read_reg: [DRIVER-SPECIFIC] read out the register
- * @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
- * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
- * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
- * at the offset @offs; if not provided by the driver,
- * spi-nor will send the erase opcode via write_reg()
- * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from
- * the SPI NOR Status Register.
+ * @controller_ops: SPI NOR controller driver specific operations.
* @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
@@ -588,18 +606,8 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
- int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
-
- ssize_t (*read)(struct spi_nor *nor, loff_t from,
- size_t len, u_char *read_buf);
- ssize_t (*write)(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *write_buf);
- int (*erase)(struct spi_nor *nor, loff_t offs);
+ const struct spi_nor_controller_ops *controller_ops;
- int (*clear_sr_bp)(struct spi_nor *nor);
struct spi_nor_flash_parameter params;
void *priv;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c20f190b4c18..cf0923579af4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -848,6 +848,7 @@ enum tc_setup_type {
TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
+ TC_SETUP_FT,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -925,6 +926,15 @@ struct dev_ifalias {
struct devlink;
struct tlsdev_ops;
+struct netdev_name_node {
+ struct hlist_node hlist;
+ struct list_head list;
+ struct net_device *dev;
+ const char *name;
+};
+
+int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
/*
* This structure defines the management hooks for network devices.
@@ -1317,6 +1327,10 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_get_vf_guid)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*ndo_set_vf_guid)(struct net_device *dev,
int vf, u64 guid,
int guid_type);
@@ -1564,7 +1578,7 @@ enum netdev_priv_flags {
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
*
- * @name_hlist: Device name hash chain, please keep it close to name[]
+ * @name_node: Name hashlist node
* @ifalias: SNMP alias
* @mem_end: Shared memory end
* @mem_start: Shared memory start
@@ -1780,7 +1794,7 @@ enum netdev_priv_flags {
struct net_device {
char name[IFNAMSIZ];
- struct hlist_node name_hlist;
+ struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
/*
* I/O specific fields
@@ -2387,11 +2401,23 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+
+static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
+
+ u64_stats_update_begin(&lstats->syncp);
+ u64_stats_add(&lstats->bytes, len);
+ u64_stats_inc(&lstats->packets);
+ u64_stats_update_end(&lstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -2487,6 +2513,9 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
+int unregister_netdevice_notifier_net(struct net *net,
+ struct notifier_block *nb);
struct netdev_notifier_info {
struct net_device *dev;
@@ -2557,6 +2586,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_continue_reverse(net, d) \
+ list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
+ dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
@@ -4081,9 +4113,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs);
-int dev_get_valid_name(struct net *net, struct net_device *dev,
- const char *name);
-
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 77ebb61faf48..eb312e7ca36e 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -199,6 +199,8 @@ extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
const struct nf_hook_entries *e, unsigned int i);
+void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
+ const struct nf_hook_entries *e);
/**
* nf_hook - call a netfilter hook
*
@@ -311,17 +313,36 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct list_head *head, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct sk_buff *skb, *next;
- struct list_head sublist;
-
- INIT_LIST_HEAD(&sublist);
- list_for_each_entry_safe(skb, next, head, list) {
- list_del(&skb->list);
- if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
- list_add_tail(&skb->list, &sublist);
+ struct nf_hook_entries *hook_head = NULL;
+
+#ifdef CONFIG_JUMP_LABEL
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook) &&
+ !static_key_false(&nf_hooks_needed[pf][hook]))
+ return;
+#endif
+
+ rcu_read_lock();
+ switch (pf) {
+ case NFPROTO_IPV4:
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
- /* Put passed packets back on main list */
- list_splice(&sublist, head);
+
+ if (hook_head) {
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn);
+
+ nf_hook_slow_list(head, &state, hook_head);
+ }
+ rcu_read_unlock();
}
/* Call setsockopt() */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 9bc255a8461b..4d8b1eaf7708 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -269,34 +269,15 @@ ip_set_ext_destroy(struct ip_set *set, void *data)
/* Check that the extension is enabled for the set and
* call it's destroy function for its extension part in data.
*/
- if (SET_WITH_COMMENT(set))
- ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
- set, ext_comment(data, set));
-}
+ if (SET_WITH_COMMENT(set)) {
+ struct ip_set_comment *c = ext_comment(data, set);
-static inline int
-ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
-{
- u32 cadt_flags = 0;
-
- if (SET_WITH_TIMEOUT(set))
- if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(set->timeout))))
- return -EMSGSIZE;
- if (SET_WITH_COUNTER(set))
- cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
- if (SET_WITH_COMMENT(set))
- cadt_flags |= IPSET_FLAG_WITH_COMMENT;
- if (SET_WITH_SKBINFO(set))
- cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
- if (SET_WITH_FORCEADD(set))
- cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
-
- if (!cadt_flags)
- return 0;
- return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+ ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(set, c);
+ }
}
+int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set);
+
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
@@ -506,144 +487,8 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
*timeout = t;
}
-static inline u32
-ip_set_timeout_get(const unsigned long *timeout)
-{
- u32 t;
-
- if (*timeout == IPSET_ELEM_PERMANENT)
- return 0;
-
- t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
- /* Zero value in userspace means no timeout */
- return t == 0 ? 1 : t;
-}
-
-static inline char*
-ip_set_comment_uget(struct nlattr *tb)
-{
- return nla_data(tb);
-}
-
-/* Called from uadd only, protected by the set spinlock.
- * The kadt functions don't use the comment extensions in any way.
- */
-static inline void
-ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
- const struct ip_set_ext *ext)
-{
- struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
- size_t len = ext->comment ? strlen(ext->comment) : 0;
-
- if (unlikely(c)) {
- set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
- kfree_rcu(c, rcu);
- rcu_assign_pointer(comment->c, NULL);
- }
- if (!len)
- return;
- if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
- len = IPSET_MAX_COMMENT_SIZE;
- c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
- if (unlikely(!c))
- return;
- strlcpy(c->str, ext->comment, len + 1);
- set->ext_size += sizeof(*c) + strlen(c->str) + 1;
- rcu_assign_pointer(comment->c, c);
-}
-
-/* Used only when dumping a set, protected by rcu_read_lock() */
-static inline int
-ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
-{
- struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
-
- if (!c)
- return 0;
- return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
-}
-
-/* Called from uadd/udel, flush or the garbage collectors protected
- * by the set spinlock.
- * Called when the set is destroyed and when there can't be any user
- * of the set data anymore.
- */
-static inline void
-ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
-{
- struct ip_set_comment_rcu *c;
-
- c = rcu_dereference_protected(comment->c, 1);
- if (unlikely(!c))
- return;
- set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
- kfree_rcu(c, rcu);
- rcu_assign_pointer(comment->c, NULL);
-}
-
-static inline void
-ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
-{
- atomic64_add((long long)bytes, &(counter)->bytes);
-}
-
-static inline void
-ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
-{
- atomic64_add((long long)packets, &(counter)->packets);
-}
-
-static inline u64
-ip_set_get_bytes(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->bytes);
-}
-
-static inline u64
-ip_set_get_packets(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->packets);
-}
-
-static inline bool
-ip_set_match_counter(u64 counter, u64 match, u8 op)
-{
- switch (op) {
- case IPSET_COUNTER_NONE:
- return true;
- case IPSET_COUNTER_EQ:
- return counter == match;
- case IPSET_COUNTER_NE:
- return counter != match;
- case IPSET_COUNTER_LT:
- return counter < match;
- case IPSET_COUNTER_GT:
- return counter > match;
- }
- return false;
-}
-
-static inline void
-ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext, u32 flags)
-{
- if (ext->packets != ULLONG_MAX &&
- !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
- ip_set_add_bytes(ext->bytes, counter);
- ip_set_add_packets(ext->packets, counter);
- }
-}
-
-static inline bool
-ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
-{
- return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter)),
- IPSET_ATTR_PAD) ||
- nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)),
- IPSET_ATTR_PAD);
-}
+void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
+ const struct ip_set_ext *ext);
static inline void
ip_set_init_counter(struct ip_set_counter *counter,
@@ -656,31 +501,6 @@ ip_set_init_counter(struct ip_set_counter *counter,
}
static inline void
-ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- mext->skbinfo = *skbinfo;
-}
-
-static inline bool
-ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
-{
- /* Send nonzero parameters only */
- return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
- nla_put_net64(skb, IPSET_ATTR_SKBMARK,
- cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask),
- IPSET_ATTR_PAD)) ||
- (skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
- cpu_to_be32(skbinfo->skbprio))) ||
- (skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
- cpu_to_be16(skbinfo->skbqueue)));
-}
-
-static inline void
ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
const struct ip_set_ext *ext)
{
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
index 2dddbc6dcac7..fcc4d214a788 100644
--- a/include/linux/netfilter/ipset/ip_set_bitmap.h
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -12,18 +12,4 @@ enum {
IPSET_ADD_START_STORED_TIMEOUT,
};
-/* Common functions */
-
-static inline u32
-range_to_mask(u32 from, u32 to, u8 *bits)
-{
- u32 mask = 0xFFFFFFFE;
-
- *bits = 32;
- while (--(*bits) > 0 && mask && (to & mask) != from)
- mask <<= 1;
-
- return mask;
-}
-
#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
index d74cd112b88a..1ecaabd9a048 100644
--- a/include/linux/netfilter/ipset/ip_set_getport.h
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -20,9 +20,6 @@ static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
}
#endif
-extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
- __be16 *port);
-
static inline bool ip_set_proto_with_ports(u8 proto)
{
switch (proto) {
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 067c9fea64fe..e8c30b39bb27 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -4,33 +4,60 @@
*/
/*
- * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
- * The fcnvme_lsdesc_cr_assoc_cmd struct reflects expected r1.16 content.
+ * This file contains definitions relative to FC-NVME-2 r1.06
+ * (T11-2019-00210-v001).
*/
#ifndef _NVME_FC_H
#define _NVME_FC_H 1
+#include <uapi/scsi/fc/fc_fs.h>
-#define NVME_CMD_SCSI_ID 0xFD
+#define NVME_CMD_FORMAT_ID 0xFD
#define NVME_CMD_FC_ID FC_TYPE_NVME
/* FC-NVME Cmd IU Flags */
-#define FCNVME_CMD_FLAGS_DIRMASK 0x03
-#define FCNVME_CMD_FLAGS_WRITE 0x01
-#define FCNVME_CMD_FLAGS_READ 0x02
+enum {
+ FCNVME_CMD_FLAGS_DIRMASK = 0x03,
+ FCNVME_CMD_FLAGS_WRITE = (1 << 0),
+ FCNVME_CMD_FLAGS_READ = (1 << 1),
+
+ FCNVME_CMD_FLAGS_PICWP = (1 << 2),
+};
+
+enum {
+ FCNVME_CMD_CAT_MASK = 0x0F,
+ FCNVME_CMD_CAT_ADMINQ = 0x01,
+ FCNVME_CMD_CAT_CSSMASK = 0x07,
+ FCNVME_CMD_CAT_CSSFLAG = 0x08,
+};
+
+static inline __u8 fccmnd_set_cat_admin(__u8 rsv_cat)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_ADMINQ;
+}
+
+static inline __u8 fccmnd_set_cat_css(__u8 rsv_cat, __u8 css)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_CSSFLAG |
+ (css & FCNVME_CMD_CAT_CSSMASK);
+}
struct nvme_fc_cmd_iu {
- __u8 scsi_id;
+ __u8 format_id;
__u8 fc_id;
__be16 iu_len;
- __u8 rsvd4[3];
+ __u8 rsvd4[2];
+ __u8 rsv_cat;
__u8 flags;
__be64 connection_id;
__be32 csn;
__be32 data_len;
struct nvme_command sqe;
- __be32 rsvd88[2];
+ __u8 dps;
+ __u8 lbads;
+ __be16 ms;
+ __be32 rsvd92;
};
#define NVME_FC_SIZEOF_ZEROS_RSP 12
@@ -38,11 +65,12 @@ struct nvme_fc_cmd_iu {
enum {
FCNVME_SC_SUCCESS = 0,
FCNVME_SC_INVALID_FIELD = 1,
- FCNVME_SC_INVALID_CONNID = 2,
+ /* reserved 2 */
+ FCNVME_SC_ILL_CONN_PARAMS = 3,
};
struct nvme_fc_ersp_iu {
- __u8 status_code;
+ __u8 ersp_result;
__u8 rsvd1;
__be16 iu_len;
__be32 rsn;
@@ -53,14 +81,44 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME Link Services */
+#define FCNVME_NVME_SR_OPCODE 0x01
+
+struct nvme_fc_nvme_sr_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 retry_rctl;
+ __be32 rsvd4;
+};
+
+
+enum {
+ FCNVME_SRSTAT_ACC = 0x0,
+ FCNVME_SRSTAT_INV_FCID = 0x1,
+ /* reserved 0x2 */
+ FCNVME_SRSTAT_LOGICAL_ERR = 0x3,
+ FCNVME_SRSTAT_INV_QUALIF = 0x4,
+ FCNVME_SRSTAT_UNABL2PERFORM = 0x9,
+};
+
+struct nvme_fc_nvme_sr_rsp_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 status;
+ __be32 rsvd4;
+};
+
+
+/* FC-NVME Link Services - LS cmd values (w0 bits 31:24) */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
FCNVME_LS_ACC = 2,
- FCNVME_LS_CREATE_ASSOCIATION = 3,
- FCNVME_LS_CREATE_CONNECTION = 4,
- FCNVME_LS_DISCONNECT = 5,
+ FCNVME_LS_CREATE_ASSOCIATION = 3, /* Create Association */
+ FCNVME_LS_CREATE_CONNECTION = 4, /* Create I/O Connection */
+ FCNVME_LS_DISCONNECT_ASSOC = 5, /* Disconnect Association */
+ FCNVME_LS_DISCONNECT_CONN = 6, /* Disconnect Connection */
};
/* FC-NVME Link Service Descriptors */
@@ -117,14 +175,17 @@ enum fcnvme_ls_rjt_reason {
FCNVME_RJT_RC_UNSUP = 0x0b,
/* command not supported */
- FCNVME_RJT_RC_INPROG = 0x0e,
- /* command already in progress */
-
FCNVME_RJT_RC_INV_ASSOC = 0x40,
- /* Invalid Association ID*/
+ /* Invalid Association ID */
FCNVME_RJT_RC_INV_CONN = 0x41,
- /* Invalid Connection ID*/
+ /* Invalid Connection ID */
+
+ FCNVME_RJT_RC_INV_PARAM = 0x42,
+ /* Invalid Parameters */
+
+ FCNVME_RJT_RC_INSUF_RES = 0x43,
+ /* Insufficient Resources */
FCNVME_RJT_RC_VENDOR = 0xff,
/* vendor specific error */
@@ -138,14 +199,32 @@ enum fcnvme_ls_rjt_explan {
FCNVME_RJT_EXP_OXID_RXID = 0x17,
/* invalid OX_ID-RX_ID combination */
- FCNVME_RJT_EXP_INSUF_RES = 0x29,
- /* insufficient resources */
-
FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
/* unable to supply requested data */
FCNVME_RJT_EXP_INV_LEN = 0x2d,
/* Invalid payload length */
+
+ FCNVME_RJT_EXP_INV_ERSP_RAT = 0x40,
+ /* Invalid NVMe_ERSP Ratio */
+
+ FCNVME_RJT_EXP_INV_CTLR_ID = 0x41,
+ /* Invalid Controller ID */
+
+ FCNVME_RJT_EXP_INV_QUEUE_ID = 0x42,
+ /* Invalid Queue ID */
+
+ FCNVME_RJT_EXP_INV_SQSIZE = 0x43,
+ /* Invalid Submission Queue Size */
+
+ FCNVME_RJT_EXP_INV_HOSTID = 0x44,
+ /* Invalid HOST ID */
+
+ FCNVME_RJT_EXP_INV_HOSTNQN = 0x45,
+ /* Invalid HOSTNQN */
+
+ FCNVME_RJT_EXP_INV_SUBNQN = 0x46,
+ /* Invalid SUBNQN */
};
/* FCNVME_LSDESC_RJT */
@@ -209,21 +288,11 @@ struct fcnvme_lsdesc_cr_conn_cmd {
__be32 rsvd52;
};
-/* Disconnect Scope Values */
-enum {
- FCNVME_DISCONN_ASSOCIATION = 0,
- FCNVME_DISCONN_CONNECTION = 1,
-};
-
/* FCNVME_LSDESC_DISCONN_CMD */
struct fcnvme_lsdesc_disconn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
- u8 rsvd8[3];
- /* note: scope is really a 1 bit field */
- u8 scope; /* FCNVME_DISCONN_xxx */
- __be32 rsvd12;
- __be64 id;
+ __be32 rsvd8[4];
};
/* FCNVME_LSDESC_CONN_ID */
@@ -242,9 +311,14 @@ struct fcnvme_lsdesc_assoc_id {
/* r_ctl values */
enum {
- FCNVME_RS_RCTL_DATA = 1,
- FCNVME_RS_RCTL_XFER_RDY = 5,
- FCNVME_RS_RCTL_RSP = 8,
+ FCNVME_RS_RCTL_CMND = 0x6,
+ FCNVME_RS_RCTL_DATA = 0x1,
+ FCNVME_RS_RCTL_CONF = 0x3,
+ FCNVME_RS_RCTL_SR = 0x9,
+ FCNVME_RS_RCTL_XFER_RDY = 0x5,
+ FCNVME_RS_RCTL_RSP = 0x7,
+ FCNVME_RS_RCTL_ERSP = 0x8,
+ FCNVME_RS_RCTL_SR_RSP = 0xA,
};
@@ -264,7 +338,10 @@ struct fcnvme_ls_acc_hdr {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
- /* Followed by cmd-specific ACC descriptors, see next definitions */
+ /*
+ * Followed by cmd-specific ACCEPT descriptors, see xxx_acc
+ * definitions below
+ */
};
/* FCNVME_LS_CREATE_ASSOCIATION */
@@ -302,25 +379,39 @@ struct fcnvme_ls_cr_conn_acc {
struct fcnvme_lsdesc_conn_id connectid;
};
-/* FCNVME_LS_DISCONNECT */
-struct fcnvme_ls_disconnect_rqst {
+/* FCNVME_LS_DISCONNECT_ASSOC */
+struct fcnvme_ls_disconnect_assoc_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_disconn_cmd discon_cmd;
};
-struct fcnvme_ls_disconnect_acc {
+struct fcnvme_ls_disconnect_assoc_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+};
+
+
+/* FCNVME_LS_DISCONNECT_CONN */
+struct fcnvme_ls_disconnect_conn_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_disconn_cmd connectid;
+};
+
+struct fcnvme_ls_disconnect_conn_acc {
struct fcnvme_ls_acc_hdr hdr;
};
/*
- * Yet to be defined in FC-NVME:
+ * Default R_A_TOV is pulled in from fc_fs.h but needs conversion
+ * from ms to seconds for our use.
*/
-#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+#define FC_TWO_TIMES_R_A_TOV (2 * (FC_DEF_R_A_TOV / 1000))
+#define NVME_FC_LS_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
+#define NVME_FC_TGTOP_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
/*
* TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
@@ -328,6 +419,7 @@ struct fcnvme_ls_disconnect_acc {
* infront of the <16hexdigits>. Without is considered the "min" string
* and with is considered the "max" string. The hexdigits may be upper
* or lower case.
+ * Note: FC-NVME-2 standard requires a "0x" prefix.
*/
#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index f61d6906e59d..3d5189f46cb1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -107,8 +107,22 @@ enum {
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
- NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
+ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
+ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
+ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
+ * Location
+ */
+ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
+ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
+ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
+ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
+ * Buffer Size
+ */
+ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
+ * Write Throughput
+ */
NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
@@ -295,6 +309,14 @@ enum {
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
+ NVME_CTRL_CTRATT_128_ID = 1 << 0,
+ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
+ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
+ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
+ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
+ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
+ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
+ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
};
struct nvme_lbaf {
@@ -352,6 +374,9 @@ enum {
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_NS_GRANULARITY = 0x16,
+ NVME_ID_CNS_UUID_LIST = 0x17,
};
enum {
@@ -409,7 +434,8 @@ struct nvme_smart_log {
__u8 avail_spare;
__u8 spare_thresh;
__u8 percent_used;
- __u8 rsvd6[26];
+ __u8 endu_grp_crit_warn_sumry;
+ __u8 rsvd7[25];
__u8 data_units_read[16];
__u8 data_units_written[16];
__u8 host_reads[16];
@@ -423,7 +449,11 @@ struct nvme_smart_log {
__le32 warning_temp_time;
__le32 critical_comp_time;
__le16 temp_sensor[8];
- __u8 rsvd216[296];
+ __le32 thm_temp1_trans_count;
+ __le32 thm_temp2_trans_count;
+ __le32 thm_temp1_total_time;
+ __le32 thm_temp2_total_time;
+ __u8 rsvd232[280];
};
struct nvme_fw_slot_info_log {
@@ -440,6 +470,7 @@ enum {
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
};
struct nvme_effects_log {
@@ -563,6 +594,7 @@ enum nvme_opcode {
nvme_cmd_compare = 0x05,
nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_verify = 0x0c,
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
@@ -772,6 +804,12 @@ struct nvme_write_zeroes_cmd {
/* Features */
+enum {
+ NVME_TEMP_THRESH_MASK = 0xffff,
+ NVME_TEMP_THRESH_SELECT_SHIFT = 16,
+ NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
+};
+
struct nvme_feat_auto_pst {
__le64 entries[32];
};
@@ -806,10 +844,14 @@ enum nvme_admin_opcode {
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_dev_self_test = 0x14,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_directive_send = 0x19,
nvme_admin_directive_recv = 0x1a,
+ nvme_admin_virtual_mgmt = 0x1c,
+ nvme_admin_nvme_mi_send = 0x1d,
+ nvme_admin_nvme_mi_recv = 0x1e,
nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -873,6 +915,7 @@ enum {
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
+ NVME_FEAT_SANITIZE = 0x17,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -883,6 +926,10 @@ enum {
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_DEVICE_SELF_TEST = 0x06,
+ NVME_LOG_TELEMETRY_HOST = 0x07,
+ NVME_LOG_TELEMETRY_CTRL = 0x08,
+ NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
@@ -1290,7 +1337,11 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_SANITIZE_FAILED = 0x1C,
+ NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
+
NVME_SC_NS_WRITE_PROTECTED = 0x20,
+ NVME_SC_CMD_INTERRUPTED = 0x21,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
@@ -1328,6 +1379,8 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_PMR_SAN_PROHIBITED = 0x123,
/*
* I/O Command Set Specific - NVM commands:
@@ -1368,6 +1421,7 @@ enum {
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800,
NVME_SC_DNR = 0x4000,
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 8f8be5b00060..d3776be48c53 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -89,6 +89,9 @@ void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
int nvmem_register_notifier(struct notifier_block *nb);
int nvmem_unregister_notifier(struct notifier_block *nb);
+struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data));
+
#else
static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
@@ -118,7 +121,7 @@ static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
static inline int nvmem_cell_write(struct nvmem_cell *cell,
- const char *buf, size_t len)
+ void *buf, size_t len)
{
return -EOPNOTSUPP;
}
@@ -204,6 +207,12 @@ static inline int nvmem_unregister_notifier(struct notifier_block *nb)
return -EOPNOTSUPP;
}
+static inline struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
#endif /* CONFIG_NVMEM */
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 6aeaea1775e6..71bbfcf3adcd 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -6,15 +6,18 @@
#ifndef __LINUX_OF_NET_H
#define __LINUX_OF_NET_H
+#include <linux/phy.h>
+
#ifdef CONFIG_OF_NET
#include <linux/of.h>
struct net_device;
-extern int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern const void *of_get_mac_address(struct device_node *np);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
-static inline int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np,
+ phy_interface_t *interface)
{
return -ENODEV;
}
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 397607a0c0eb..13932ce8b37b 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
+#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f9088c89a534..eb9f371aa77c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1686,6 +1686,7 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
+static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
@@ -2310,9 +2311,11 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
void
pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
+bool pci_pr3_present(struct pci_dev *pdev);
#else
static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_EEH
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3998cdf9cd14..ad2ca2a89d5b 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -93,7 +93,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
__percpu_up_read(sem); /* Unconditional memory barrier */
preempt_enable();
- rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->rw_sem.dep_map, _RET_IP_);
}
extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -118,7 +118,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_release(&sem->rw_sem.dep_map, 1, ip);
+ lock_release(&sem->rw_sem.dep_map, ip);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
if (!read)
atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 68ccc5b1913b..6d4c22aee384 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -56,6 +56,7 @@ struct perf_guest_info_callbacks {
#include <linux/perf_regs.h>
#include <linux/cgroup.h>
#include <linux/refcount.h>
+#include <linux/security.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -248,6 +249,8 @@ struct perf_event;
#define PERF_PMU_CAP_NO_EXCLUDE 0x80
#define PERF_PMU_CAP_AUX_OUTPUT 0x100
+struct perf_output_handle;
+
/**
* struct pmu - generic performance monitoring unit
*/
@@ -409,6 +412,15 @@ struct pmu {
*/
size_t task_ctx_size;
+ /*
+ * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
+ * can be synchronized using this function. See Intel LBR callstack support
+ * implementation and Perf core context switch handling callbacks for usage
+ * examples.
+ */
+ void (*swap_task_ctx) (struct perf_event_context *prev,
+ struct perf_event_context *next);
+ /* optional */
/*
* Set up pmu-private data structures for an AUX area
@@ -423,6 +435,19 @@ struct pmu {
void (*free_aux) (void *aux); /* optional */
/*
+ * Take a snapshot of the AUX buffer without touching the event
+ * state, so that preempting ->start()/->stop() callbacks does
+ * not interfere with their logic. Called in PMI context.
+ *
+ * Returns the size of AUX data copied to the output handle.
+ *
+ * Optional.
+ */
+ long (*snapshot_aux) (struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size);
+
+ /*
* Validate address range filters: make sure the HW supports the
* requested configuration and number of filters; return 0 if the
* supplied filters are valid, -errno otherwise.
@@ -721,6 +746,9 @@ struct perf_event {
struct perf_cgroup *cgrp; /* cgroup event is attach to */
#endif
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
struct list_head sb_list;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -960,6 +988,7 @@ struct perf_sample_data {
u32 reserved;
} cpu_entry;
struct perf_callchain_entry *callchain;
+ u64 aux_size;
/*
* regs_user may point to task_pt_regs or to regs_user_copy, depending
@@ -1241,19 +1270,41 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static inline bool perf_paranoid_tracepoint_raw(void)
+/* Access to perf_event_open(2) syscall. */
+#define PERF_SECURITY_OPEN 0
+
+/* Finer grained perf_event_open(2) access control. */
+#define PERF_SECURITY_CPU 1
+#define PERF_SECURITY_KERNEL 2
+#define PERF_SECURITY_TRACEPOINT 3
+
+static inline int perf_is_paranoid(void)
{
return sysctl_perf_event_paranoid > -1;
}
-static inline bool perf_paranoid_cpu(void)
+static inline int perf_allow_kernel(struct perf_event_attr *attr)
+{
+ if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
+}
+
+static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
- return sysctl_perf_event_paranoid > 0;
+ if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_CPU);
}
-static inline bool perf_paranoid_kernel(void)
+static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
{
- return sysctl_perf_event_paranoid > 1;
+ if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
}
extern void perf_event_init(void);
@@ -1327,6 +1378,9 @@ extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
+extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
+ struct perf_output_handle *handle,
+ unsigned long from, unsigned long to);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
@@ -1336,6 +1390,8 @@ extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
+extern int perf_event_period(struct perf_event *event, u64 value);
+extern u64 perf_event_pause(struct perf_event *event, bool reset);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
@@ -1415,6 +1471,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
+static inline int perf_event_period(struct perf_event *event, u64 value)
+{
+ return -EINVAL;
+}
+static inline u64 perf_event_pause(struct perf_event *event, bool reset)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9a0e981df502..5032d453ac66 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -203,6 +203,8 @@ static inline const char *phy_modes(phy_interface_t interface)
struct device;
struct phylink;
+struct sfp_bus;
+struct sfp_upstream_ops;
struct sk_buff;
/*
@@ -342,6 +344,8 @@ struct phy_c45_device_ids {
* dev_flags: Device-specific flags used by the PHY driver.
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
+ * sfp_bus_attached: flag indicating whether the SFP bus has been attached
+ * sfp_bus: SFP bus attached to this PHY's fiber port
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -432,6 +436,9 @@ struct phy_device {
struct mutex lock;
+ /* This may be modified under the rtnl lock */
+ bool sfp_bus_attached;
+ struct sfp_bus *sfp_bus;
struct phylink *phylink;
struct net_device *attached_dev;
@@ -1020,6 +1027,10 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable);
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -1065,6 +1076,16 @@ static inline const char *phydev_name(const struct phy_device *phydev)
return dev_name(&phydev->mdio.dev);
}
+static inline void phy_lock_mdio_bus(struct phy_device *phydev)
+{
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+}
+
+static inline void phy_unlock_mdio_bus(struct phy_device *phydev)
+{
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+}
+
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
__printf(2, 3);
void phy_attached_info(struct phy_device *phydev);
@@ -1106,6 +1127,10 @@ int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
u16 regnum, u16 val);
+/* Clause 37 */
+int genphy_c37_config_aneg(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev);
+
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart);
@@ -1145,7 +1170,6 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 15032f145063..56d3a100006a 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -38,7 +38,8 @@ enum phy_mode {
PHY_MODE_PCIE,
PHY_MODE_ETHERNET,
PHY_MODE_MIPI_DPHY,
- PHY_MODE_SATA
+ PHY_MODE_SATA,
+ PHY_MODE_LVDS,
};
/**
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index ee59562c8354..1235865e7e2c 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -18,5 +18,7 @@ int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int port, bool idle);
int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
-
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val);
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 300ecdb6790a..fed5488e3c75 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -72,7 +72,7 @@ struct phylink_config {
/**
* struct phylink_mac_ops - MAC operations structure.
* @validate: Validate and update the link configuration.
- * @mac_link_state: Read the current link state from the hardware.
+ * @mac_pcs_get_state: Read the current link state from the hardware.
* @mac_config: configure the MAC for the selected mode and state.
* @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
@@ -84,8 +84,8 @@ struct phylink_mac_ops {
void (*validate)(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state);
- int (*mac_link_state)(struct phylink_config *config,
- struct phylink_link_state *state);
+ void (*mac_pcs_get_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
void (*mac_an_restart)(struct phylink_config *config);
@@ -127,18 +127,19 @@ void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state);
/**
- * mac_link_state() - Read the current link state from the hardware
+ * mac_pcs_get_state() - Read the current inband link state from the hardware
* @config: a pointer to a &struct phylink_config.
* @state: a pointer to a &struct phylink_link_state.
*
- * Read the current link state from the MAC, reporting the current
- * speed in @state->speed, duplex mode in @state->duplex, pause mode
- * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
- * negotiation completion state in @state->an_complete, and link
- * up state in @state->link.
+ * Read the current inband link state from the MAC PCS, reporting the
+ * current speed in @state->speed, duplex mode in @state->duplex, pause
+ * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link up state
+ * in @state->link. If possible, @state->lp_advertising should also be
+ * populated.
*/
-int mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state);
+void mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state);
/**
* mac_config() - configure the MAC for the selected mode and state
@@ -166,7 +167,7 @@ int mac_link_state(struct phylink_config *config,
* 1000base-X or Cisco SGMII mode depending on the @state->interface
* mode). In both cases, link state management (whether the link
* is up or not) is performed by the MAC, and reported via the
- * mac_link_state() callback. Changes in link state must be made
+ * mac_pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
* If in 802.3z mode, the link speed is fixed, dependent on the
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 9645b1194c98..998ae7d24450 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -85,6 +85,10 @@ static inline struct pid *get_pid(struct pid *pid)
extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
+static inline bool pid_has_task(struct pid *pid, enum pid_type type)
+{
+ return !hlist_empty(&pid->tasks[type]);
+}
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
@@ -120,7 +124,8 @@ extern struct pid *find_vpid(int nr);
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-extern struct pid *alloc_pid(struct pid_namespace *ns);
+extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
+ size_t set_tid_size);
extern void free_pid(struct pid *pid);
extern void disable_pid_allocation(struct pid_namespace *ns);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 49538b172483..2ed6af88794b 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -12,6 +12,8 @@
#include <linux/ns_common.h>
#include <linux/idr.h>
+/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
+#define MAX_PID_NS_LEVEL 32
struct fs_pin;
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 98415686cbfa..69210881ebac 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -556,6 +556,9 @@ enum host_event_code {
/* Keyboard recovery combo with hardware reinitialization */
EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30,
+ /* WoV */
+ EC_HOST_EVENT_WOV = 31,
+
/*
* The high bit of the event mask is not used as a host event code. If
* it reads back as set, then the entire event mask should be
@@ -1277,8 +1280,6 @@ enum ec_feature_code {
* MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE.
*/
EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37,
- /* EC supports audio codec. */
- EC_FEATURE_AUDIO_CODEC = 38,
/* The MCU is a System Companion Processor (SCP). */
EC_FEATURE_SCP = 39,
/* The MCU is an Integrated Sensor Hub */
@@ -4468,92 +4469,246 @@ enum mkbp_cec_event {
/*****************************************************************************/
-/* Commands for I2S recording on audio codec. */
+/* Commands for audio codec. */
+#define EC_CMD_EC_CODEC 0x00BC
-#define EC_CMD_CODEC_I2S 0x00BC
-#define EC_WOV_I2S_SAMPLE_RATE 48000
+enum ec_codec_subcmd {
+ EC_CODEC_GET_CAPABILITIES = 0x0,
+ EC_CODEC_GET_SHM_ADDR = 0x1,
+ EC_CODEC_SET_SHM_ADDR = 0x2,
+ EC_CODEC_SUBCMD_COUNT,
+};
-enum ec_codec_i2s_subcmd {
- EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
- EC_CODEC_SET_GAIN = 0x1,
- EC_CODEC_GET_GAIN = 0x2,
- EC_CODEC_I2S_ENABLE = 0x3,
- EC_CODEC_I2S_SET_CONFIG = 0x4,
- EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
- EC_CODEC_I2S_SET_BCLK = 0x6,
- EC_CODEC_I2S_SUBCMD_COUNT = 0x7,
+enum ec_codec_cap {
+ EC_CODEC_CAP_WOV_AUDIO_SHM = 0,
+ EC_CODEC_CAP_WOV_LANG_SHM = 1,
+ EC_CODEC_CAP_LAST = 32,
};
-enum ec_sample_depth_value {
- EC_CODEC_SAMPLE_DEPTH_16 = 0,
- EC_CODEC_SAMPLE_DEPTH_24 = 1,
+enum ec_codec_shm_id {
+ EC_CODEC_SHM_ID_WOV_AUDIO = 0x0,
+ EC_CODEC_SHM_ID_WOV_LANG = 0x1,
+ EC_CODEC_SHM_ID_LAST,
};
-enum ec_i2s_config {
- EC_DAI_FMT_I2S = 0,
- EC_DAI_FMT_RIGHT_J = 1,
- EC_DAI_FMT_LEFT_J = 2,
- EC_DAI_FMT_PCM_A = 3,
- EC_DAI_FMT_PCM_B = 4,
- EC_DAI_FMT_PCM_TDM = 5,
+enum ec_codec_shm_type {
+ EC_CODEC_SHM_TYPE_EC_RAM = 0x0,
+ EC_CODEC_SHM_TYPE_SYSTEM_RAM = 0x1,
};
-/*
- * For subcommand EC_CODEC_GET_GAIN.
- */
-struct __ec_align1 ec_codec_i2s_gain {
+struct __ec_align1 ec_param_ec_codec_get_shm_addr {
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_set_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec {
+ uint8_t cmd; /* enum ec_codec_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_get_shm_addr
+ get_shm_addr_param;
+ struct ec_param_ec_codec_set_shm_addr
+ set_shm_addr_param;
+ };
+};
+
+struct __ec_align4 ec_response_ec_codec_get_capabilities {
+ uint32_t capabilities;
+};
+
+struct __ec_align4 ec_response_ec_codec_get_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t type;
+ uint8_t reserved[3];
+};
+
+/*****************************************************************************/
+
+/* Commands for DMIC on audio codec. */
+#define EC_CMD_EC_CODEC_DMIC 0x00BD
+
+enum ec_codec_dmic_subcmd {
+ EC_CODEC_DMIC_GET_MAX_GAIN = 0x0,
+ EC_CODEC_DMIC_SET_GAIN_IDX = 0x1,
+ EC_CODEC_DMIC_GET_GAIN_IDX = 0x2,
+ EC_CODEC_DMIC_SUBCMD_COUNT,
+};
+
+enum ec_codec_dmic_channel {
+ EC_CODEC_DMIC_CHANNEL_0 = 0x0,
+ EC_CODEC_DMIC_CHANNEL_1 = 0x1,
+ EC_CODEC_DMIC_CHANNEL_2 = 0x2,
+ EC_CODEC_DMIC_CHANNEL_3 = 0x3,
+ EC_CODEC_DMIC_CHANNEL_4 = 0x4,
+ EC_CODEC_DMIC_CHANNEL_5 = 0x5,
+ EC_CODEC_DMIC_CHANNEL_6 = 0x6,
+ EC_CODEC_DMIC_CHANNEL_7 = 0x7,
+ EC_CODEC_DMIC_CHANNEL_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_set_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t gain;
+ uint8_t reserved[2];
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_get_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_dmic {
+ uint8_t cmd; /* enum ec_codec_dmic_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_dmic_set_gain_idx
+ set_gain_idx_param;
+ struct ec_param_ec_codec_dmic_get_gain_idx
+ get_gain_idx_param;
+ };
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_max_gain {
+ uint8_t max_gain;
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_gain_idx {
+ uint8_t gain;
+};
+
+/*****************************************************************************/
+
+/* Commands for I2S RX on audio codec. */
+
+#define EC_CMD_EC_CODEC_I2S_RX 0x00BE
+
+enum ec_codec_i2s_rx_subcmd {
+ EC_CODEC_I2S_RX_ENABLE = 0x0,
+ EC_CODEC_I2S_RX_DISABLE = 0x1,
+ EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
+ EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
+ EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_SUBCMD_COUNT,
+};
+
+enum ec_codec_i2s_rx_sample_depth {
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_16 = 0x0,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_24 = 0x1,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_COUNT,
+};
+
+enum ec_codec_i2s_rx_daifmt {
+ EC_CODEC_I2S_RX_DAIFMT_I2S = 0x0,
+ EC_CODEC_I2S_RX_DAIFMT_RIGHT_J = 0x1,
+ EC_CODEC_I2S_RX_DAIFMT_LEFT_J = 0x2,
+ EC_CODEC_I2S_RX_DAIFMT_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_sample_depth {
+ uint8_t depth;
+ uint8_t reserved[3];
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_gain {
uint8_t left;
uint8_t right;
+ uint8_t reserved[2];
};
-struct __ec_todo_unpacked ec_param_codec_i2s_tdm {
- int16_t ch0_delay; /* 0 to 496 */
- int16_t ch1_delay; /* -1 to 496 */
- uint8_t adjacent_to_ch0;
- uint8_t adjacent_to_ch1;
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_daifmt {
+ uint8_t daifmt;
+ uint8_t reserved[3];
};
-struct __ec_todo_packed ec_param_codec_i2s {
- /* enum ec_codec_i2s_subcmd */
- uint8_t cmd;
+struct __ec_align4 ec_param_ec_codec_i2s_rx_set_bclk {
+ uint32_t bclk;
+};
+
+struct __ec_align4 ec_param_ec_codec_i2s_rx {
+ uint8_t cmd; /* enum ec_codec_i2s_rx_subcmd */
+ uint8_t reserved[3];
+
union {
- /*
- * EC_CODEC_SET_SAMPLE_DEPTH
- * Value should be one of ec_sample_depth_value.
- */
- uint8_t depth;
+ struct ec_param_ec_codec_i2s_rx_set_sample_depth
+ set_sample_depth_param;
+ struct ec_param_ec_codec_i2s_rx_set_daifmt
+ set_daifmt_param;
+ struct ec_param_ec_codec_i2s_rx_set_bclk
+ set_bclk_param;
+ };
+};
- /*
- * EC_CODEC_SET_GAIN
- * Value should be 0~43 for both channels.
- */
- struct ec_codec_i2s_gain gain;
+/*****************************************************************************/
+/* Commands for WoV on audio codec. */
+
+#define EC_CMD_EC_CODEC_WOV 0x00BF
+
+enum ec_codec_wov_subcmd {
+ EC_CODEC_WOV_SET_LANG = 0x0,
+ EC_CODEC_WOV_SET_LANG_SHM = 0x1,
+ EC_CODEC_WOV_GET_LANG = 0x2,
+ EC_CODEC_WOV_ENABLE = 0x3,
+ EC_CODEC_WOV_DISABLE = 0x4,
+ EC_CODEC_WOV_READ_AUDIO = 0x5,
+ EC_CODEC_WOV_READ_AUDIO_SHM = 0x6,
+ EC_CODEC_WOV_SUBCMD_COUNT,
+};
- /*
- * EC_CODEC_I2S_ENABLE
- * 1 to enable, 0 to disable.
- */
- uint8_t i2s_enable;
+/*
+ * @hash is SHA256 of the whole language model.
+ * @total_len indicates the length of whole language model.
+ * @offset is the cursor from the beginning of the model.
+ * @buf is the packet buffer.
+ * @len denotes how many bytes in the buf.
+ */
+struct __ec_align4 ec_param_ec_codec_wov_set_lang {
+ uint8_t hash[32];
+ uint32_t total_len;
+ uint32_t offset;
+ uint8_t buf[128];
+ uint32_t len;
+};
- /*
- * EC_CODEC_I2S_SET_CONFIG
- * Value should be one of ec_i2s_config.
- */
- uint8_t i2s_config;
+struct __ec_align4 ec_param_ec_codec_wov_set_lang_shm {
+ uint8_t hash[32];
+ uint32_t total_len;
+};
- /*
- * EC_CODEC_I2S_SET_TDM_CONFIG
- * Value should be one of ec_i2s_config.
- */
- struct ec_param_codec_i2s_tdm tdm_param;
+struct __ec_align4 ec_param_ec_codec_wov {
+ uint8_t cmd; /* enum ec_codec_wov_subcmd */
+ uint8_t reserved[3];
- /*
- * EC_CODEC_I2S_SET_BCLK
- */
- uint32_t bclk;
+ union {
+ struct ec_param_ec_codec_wov_set_lang
+ set_lang_param;
+ struct ec_param_ec_codec_wov_set_lang_shm
+ set_lang_shm_param;
};
};
+struct __ec_align4 ec_response_ec_codec_wov_get_lang {
+ uint8_t hash[32];
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio {
+ uint8_t buf[128];
+ uint32_t len;
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio_shm {
+ uint32_t offset;
+ uint32_t len;
+};
/*****************************************************************************/
/* System commands */
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index e79d238ff18f..7124a5f4bf06 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -67,9 +67,6 @@ struct omap_hsmmc_platform_data {
/* string specifying a particular variant of hardware */
char *version;
- /* if we have special card, init it using this callback */
- void (*init_card)(struct mmc_card *card);
-
const char *name;
u32 ocr_mask;
};
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
index ebb4f332588b..7f53a5c6f35e 100644
--- a/include/linux/platform_data/intel-spi.h
+++ b/include/linux/platform_data/intel-spi.h
@@ -13,6 +13,7 @@ enum intel_spi_type {
INTEL_SPI_BYT = 1,
INTEL_SPI_LPT,
INTEL_SPI_BXT,
+ INTEL_SPI_CNL,
};
/**
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index f0e6d6483e62..65fd5ffd257c 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -11,7 +11,6 @@
/* Board specific platform_data */
struct mtk_chip_config {
- u32 cs_pol;
u32 sample_sel;
};
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 4c441be03079..e057d1fa2469 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -637,6 +637,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* struct dev_pm_domain - power management domain representation.
*
* @ops: Power management operations associated with this domain.
+ * @start: Called when a user needs to start the device via the domain.
* @detach: Called when removing a device from the domain.
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
@@ -648,6 +649,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ int (*start)(struct device *dev);
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index baf02ff91a31..5a31c711b896 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -366,6 +366,7 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
struct device *dev_pm_domain_attach_by_name(struct device *dev,
const char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
+int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
@@ -383,6 +384,10 @@ static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
return NULL;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline int dev_pm_domain_start(struct device *dev)
+{
+ return 0;
+}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index b8197ab014f2..747861816f4f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -22,6 +22,7 @@ struct opp_table;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADJUST_VOLTAGE,
};
/**
@@ -113,6 +114,10 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq,
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max);
+
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
@@ -242,6 +247,14 @@ static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
}
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ return 0;
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index d0b37e937037..971c9264179e 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -293,6 +293,9 @@ struct omap_sr_data {
struct voltagedomain *voltdm;
};
+
+extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR];
+
#ifdef CONFIG_POWER_AVS_OMAP
/* Smartreflex module enable/disable interface */
diff --git a/include/linux/property.h b/include/linux/property.h
index 9b3d4ca3a73a..48335288c2a9 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -22,7 +22,6 @@ enum dev_prop_type {
DEV_PROP_U32,
DEV_PROP_U64,
DEV_PROP_STRING,
- DEV_PROP_MAX,
};
enum dev_dma_attr {
@@ -80,9 +79,14 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *name,
unsigned int index);
+const char *fwnode_get_name(const struct fwnode_handle *fwnode);
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(
struct fwnode_handle *fwnode);
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
+ unsigned int depth);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_available_child_node(
@@ -234,13 +238,7 @@ struct property_entry {
bool is_array;
enum dev_prop_type type;
union {
- union {
- const u8 *u8_data;
- const u16 *u16_data;
- const u32 *u32_data;
- const u64 *u64_data;
- const char * const *str;
- } pointer;
+ const void *pointer;
union {
u8 u8_data;
u16 u16_data;
@@ -252,62 +250,63 @@ struct property_entry {
};
/*
- * Note: the below four initializers for the anonymous union are carefully
+ * Note: the below initializers for the anonymous union are carefully
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
-#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _Type_, _val_) \
+#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \
+ sizeof(((struct property_entry *)NULL)->value._elem_)
+
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
(struct property_entry) { \
.name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
+ .length = (_len_) * __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
.is_array = true, \
.type = DEV_PROP_##_Type_, \
- { .pointer = { ._type_##_data = _val_ } }, \
+ { .pointer = _val_ }, \
}
-#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, U8, _val_)
-#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, U16, _val_)
-#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, U32, _val_)
-#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, U64, _val_)
-
-#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
- .is_array = true, \
- .type = DEV_PROP_STRING, \
- { .pointer = { .str = _val_ } }, \
-}
-
-#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _Type_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(_type_), \
- .type = DEV_PROP_##_Type_, \
- { .value = { ._type_##_data = _val_ } }, \
+#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
+#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u16_data, U16, _val_, _len_)
+#define PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u32_data, U32, _val_, _len_)
+#define PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
+#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+
+#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+
+#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ .type = DEV_PROP_##_Type_, \
+ { .value = { ._elem_ = _val_ } }, \
}
-#define PROPERTY_ENTRY_U8(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u8, U8, _val_)
-#define PROPERTY_ENTRY_U16(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u16, U16, _val_)
-#define PROPERTY_ENTRY_U32(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u32, U32, _val_)
-#define PROPERTY_ENTRY_U64(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u64, U64, _val_)
-
-#define PROPERTY_ENTRY_STRING(_name_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(const char *), \
- .type = DEV_PROP_STRING, \
- { .value = { .str = _val_ } }, \
-}
+#define PROPERTY_ENTRY_U8(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u8_data, U8, _val_)
+#define PROPERTY_ENTRY_U16(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u16_data, U16, _val_)
+#define PROPERTY_ENTRY_U32(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u32_data, U32, _val_)
+#define PROPERTY_ENTRY_U64(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u64_data, U64, _val_)
+#define PROPERTY_ENTRY_STRING(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_)
#define PROPERTY_ENTRY_BOOL(_name_) \
(struct property_entry) { \
@@ -418,7 +417,8 @@ struct software_node {
};
bool is_software_node(const struct fwnode_handle *fwnode);
-const struct software_node *to_software_node(struct fwnode_handle *fwnode);
+const struct software_node *
+to_software_node(const struct fwnode_handle *fwnode);
struct fwnode_handle *software_node_fwnode(const struct software_node *node);
const struct software_node *
diff --git a/include/linux/psci.h b/include/linux/psci.h
index e2bacc6fd2f2..ebe0a881d13d 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -7,6 +7,7 @@
#ifndef __LINUX_PSCI_H
#define __LINUX_PSCI_H
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -18,12 +19,6 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_suspend_enter(u32 state);
bool psci_power_state_is_valid(u32 state);
-enum psci_conduit {
- PSCI_CONDUIT_NONE,
- PSCI_CONDUIT_SMC,
- PSCI_CONDUIT_HVC,
-};
-
enum smccc_version {
SMCCC_VERSION_1_0,
SMCCC_VERSION_1_1,
@@ -38,7 +33,7 @@ struct psci_operations {
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
- enum psci_conduit conduit;
+ enum arm_smccc_conduit conduit;
enum smccc_version smccc_version;
};
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index a5d1837e4f35..6facf27865f9 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -206,7 +206,7 @@ enum pxa_ssp_type {
};
struct ssp_device {
- struct platform_device *pdev;
+ struct device *dev;
struct list_head node;
struct clk *clk;
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
index 66e73ec1aa99..0b952d06eb0b 100644
--- a/include/linux/rculist_bl.h
+++ b/include/linux/rculist_bl.h
@@ -25,34 +25,6 @@ static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
}
/**
- * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
- * @n: the element to delete from the hash list.
- *
- * Note: hlist_bl_unhashed() on the node returns true after this. It is
- * useful for RCU based read lockfree traversal if the writer side
- * must know if the list entry is still hashed or already unhashed.
- *
- * In particular, it means that we can not poison the forward pointers
- * that may still be used for walking the hash list and we can only
- * zero the pprev pointer so list_unhashed() will return true after
- * this.
- *
- * The caller must take whatever precautions are necessary (such as
- * holding appropriate locks) to avoid racing with another
- * list-mutation primitive, such as hlist_bl_add_head_rcu() or
- * hlist_bl_del_rcu(), running on this same list. However, it is
- * perfectly legal to run concurrently with the _rcu list-traversal
- * primitives, such as hlist_bl_for_each_entry_rcu().
- */
-static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
-{
- if (!hlist_bl_unhashed(n)) {
- __hlist_bl_del(n);
- n->pprev = NULL;
- }
-}
-
-/**
* hlist_bl_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 75a2eded7aa2..0b7506330c87 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -210,7 +210,7 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
static inline void rcu_lock_release(struct lockdep_map *map)
{
- lock_release(map, 1, _THIS_IP_);
+ lock_release(map, _THIS_IP_);
}
extern struct lockdep_map rcu_lock_map;
@@ -383,6 +383,24 @@ do { \
} while (0)
/**
+ * rcu_replace_pointer() - replace an RCU pointer, returning its old value
+ * @rcu_ptr: RCU pointer, whose old value is returned
+ * @ptr: regular pointer
+ * @c: the lockdep conditions under which the dereference will take place
+ *
+ * Perform a replacement, where @rcu_ptr is an RCU-annotated
+ * pointer and @c is the lockdep argument that is passed to the
+ * rcu_dereference_protected() call used to read that pointer. The old
+ * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
+ */
+#define rcu_replace_pointer(rcu_ptr, ptr, c) \
+({ \
+ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ __tmp; \
+})
+
+/**
* rcu_swap_protected() - swap an RCU and a regular pointer
* @rcu_ptr: RCU pointer
* @ptr: regular pointer
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 9bf1dfe7781f..37b6f0c2b79d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -84,6 +84,7 @@ static inline void rcu_scheduler_starting(void) { }
#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_is_watching(void) { return true; }
+static inline void rcu_momentary_dyntick_idle(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 18b1ed9864b0..c5147de885ec 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,6 +37,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
+void rcu_momentary_dyntick_idle(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index e28cce21bad6..0ac50cf62d06 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -1,9 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * Saturation semantics
+ * ====================
+ *
+ * refcount_t differs from atomic_t in that the counter saturates at
+ * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
+ * counter and causing 'spurious' use-after-free issues. In order to avoid the
+ * cost associated with introducing cmpxchg() loops into all of the saturating
+ * operations, we temporarily allow the counter to take on an unchecked value
+ * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
+ * or overflow has occurred. Although this is racy when multiple threads
+ * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
+ * equidistant from 0 and INT_MAX we minimise the scope for error:
+ *
+ * INT_MAX REFCOUNT_SATURATED UINT_MAX
+ * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
+ * +--------------------------------+----------------+----------------+
+ * <---------- bad value! ---------->
+ *
+ * (in a signed view of the world, the "bad value" range corresponds to
+ * a negative counter value).
+ *
+ * As an example, consider a refcount_inc() operation that causes the counter
+ * to overflow:
+ *
+ * int old = atomic_fetch_add_relaxed(r);
+ * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
+ * if (old < 0)
+ * atomic_set(r, REFCOUNT_SATURATED);
+ *
+ * If another thread also performs a refcount_inc() operation between the two
+ * atomic operations, then the count will continue to edge closer to 0. If it
+ * reaches a value of 1 before /any/ of the threads reset it to the saturated
+ * value, then a concurrent refcount_dec_and_test() may erroneously free the
+ * underlying object. Given the precise timing details involved with the
+ * round-robin scheduling of each thread manipulating the refcount and the need
+ * to hit the race multiple times in succession, there doesn't appear to be a
+ * practical avenue of attack even if using refcount_add() operations with
+ * larger increments.
+ *
+ * Memory ordering
+ * ===============
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
+ * ordering on success.
+ *
+ */
+
#ifndef _LINUX_REFCOUNT_H
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
+#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/limits.h>
#include <linux/spinlock_types.h>
struct mutex;
@@ -12,7 +91,7 @@ struct mutex;
* struct refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
- * The counter saturates at UINT_MAX and will not move once
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
@@ -21,13 +100,25 @@ typedef struct refcount_struct {
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+#define REFCOUNT_MAX INT_MAX
+#define REFCOUNT_SATURATED (INT_MIN / 2)
+
+enum refcount_saturation_type {
+ REFCOUNT_ADD_NOT_ZERO_OVF,
+ REFCOUNT_ADD_OVF,
+ REFCOUNT_ADD_UAF,
+ REFCOUNT_SUB_UAF,
+ REFCOUNT_DEC_LEAK,
+};
+
+void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
-static inline void refcount_set(refcount_t *r, unsigned int n)
+static inline void refcount_set(refcount_t *r, int n)
{
atomic_set(&r->refs, n);
}
@@ -43,70 +134,168 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
-extern void refcount_add_checked(unsigned int i, refcount_t *r);
-
-extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
-extern void refcount_inc_checked(refcount_t *r);
-
-extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
-
-extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
-extern void refcount_dec_checked(refcount_t *r);
-
-#ifdef CONFIG_REFCOUNT_FULL
-
-#define refcount_add_not_zero refcount_add_not_zero_checked
-#define refcount_add refcount_add_checked
-
-#define refcount_inc_not_zero refcount_inc_not_zero_checked
-#define refcount_inc refcount_inc_checked
+/**
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
+static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
+{
+ int old = refcount_read(r);
-#define refcount_sub_and_test refcount_sub_and_test_checked
+ do {
+ if (!old)
+ break;
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
-#define refcount_dec_and_test refcount_dec_and_test_checked
-#define refcount_dec refcount_dec_checked
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
-#else
-# ifdef CONFIG_ARCH_HAS_REFCOUNT
-# include <asm/refcount.h>
-# else
-static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
-{
- return atomic_add_unless(&r->refs, i, 0);
+ return old;
}
-static inline void refcount_add(unsigned int i, refcount_t *r)
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
+static inline void refcount_add(int i, refcount_t *r)
{
- atomic_add(i, &r->refs);
+ int old = atomic_fetch_add_relaxed(i, &r->refs);
+
+ if (unlikely(!old))
+ refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
+ else if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
+/**
+ * refcount_inc_not_zero - increment a refcount unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
+ * and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
+ */
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
- return atomic_add_unless(&r->refs, 1, 0);
+ return refcount_add_not_zero(1, r);
}
+/**
+ * refcount_inc - increment a refcount
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object.
+ *
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
+ * condition.
+ */
static inline void refcount_inc(refcount_t *r)
{
- atomic_inc(&r->refs);
+ refcount_add(1, r);
}
-static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+/**
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * @i: amount to subtract from the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
+ * ultimately leak on underflow and will fail to decrement when saturated
+ * at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_dec(), or one of its variants, should instead be used to
+ * decrement a reference count.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
+static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
- return atomic_sub_and_test(i, &r->refs);
+ int old = atomic_fetch_sub_release(i, &r->refs);
+
+ if (old == i) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ if (unlikely(old < 0 || old - i < 0))
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+
+ return false;
}
+/**
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return atomic_dec_and_test(&r->refs);
+ return refcount_sub_and_test(1, r);
}
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
static inline void refcount_dec(refcount_t *r)
{
- atomic_dec(&r->refs);
+ if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
+ refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
-# endif /* !CONFIG_ARCH_HAS_REFCOUNT */
-#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index 7cf8f797e13a..3ab1ddf151a2 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -37,14 +37,11 @@ enum ab8505_regulator_id {
AB8505_LDO_AUX6,
AB8505_LDO_INTCORE,
AB8505_LDO_ADC,
- AB8505_LDO_USB,
AB8505_LDO_AUDIO,
AB8505_LDO_ANAMIC1,
AB8505_LDO_ANAMIC2,
AB8505_LDO_AUX8,
AB8505_LDO_ANA,
- AB8505_SYSCLKREQ_2,
- AB8505_SYSCLKREQ_4,
AB8505_NUM_REGULATORS,
};
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index d44ce5f18a56..55319943fcc5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -36,6 +36,7 @@ struct fixed_voltage_config {
const char *input_supply;
int microvolts;
unsigned startup_delay;
+ unsigned int off_on_delay;
unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
};
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index f87da30a58b1..65b8142a7fed 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1262,6 +1262,7 @@ struct rtsx_pcr {
#define PID_5250 0x5250
#define PID_525A 0x525A
#define PID_5260 0x5260
+#define PID_5261 0x5261
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 86ebb4bf9c6e..abfb53ab11be 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -215,14 +215,14 @@ static inline void __raw_write_lock(rwlock_t *lock)
static inline void __raw_write_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
preempt_enable();
}
static inline void __raw_read_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
preempt_enable();
}
@@ -230,7 +230,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -238,7 +238,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
static inline void __raw_read_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -246,7 +246,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
@@ -254,7 +254,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -262,7 +262,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
static inline void __raw_write_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -270,7 +270,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index a986ac12a848..e40d019c3d9d 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -216,15 +216,6 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
*/
bool sbitmap_any_bit_set(const struct sbitmap *sb);
-/**
- * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
- * sbitmap.
- * @sb: Bitmap to check.
- *
- * Return: true if any bit in the bitmap is clear, false otherwise.
- */
-bool sbitmap_any_bit_clear(const struct sbitmap *sb);
-
#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 67a1d86981a9..07e68d9f5dc4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -250,16 +250,21 @@ struct prev_cputime {
enum vtime_state {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
- /* Task runs in userspace in a CPU with VTIME active: */
- VTIME_USER,
+ /* Task is idle */
+ VTIME_IDLE,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
+ /* Task runs in userspace in a CPU with VTIME active: */
+ VTIME_USER,
+ /* Task runs as guests in a CPU with VTIME active: */
+ VTIME_GUEST,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
+ unsigned int cpu;
u64 utime;
u64 stime;
u64 gtime;
@@ -1054,6 +1059,8 @@ struct task_struct {
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
@@ -1442,7 +1449,6 @@ extern struct pid *cad_pid;
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
@@ -1468,6 +1474,7 @@ extern struct pid *cad_pid;
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
+#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index e6770012db18..c49257a3b510 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -117,8 +117,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
-/* Remove the current tasks stale references to the old mm_struct */
-extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exit() */
+extern void exit_mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exec() */
+extern void exec_mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 4b1c3b664f51..f1879884238e 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -26,6 +26,9 @@ struct kernel_clone_args {
unsigned long stack;
unsigned long stack_size;
unsigned long tls;
+ pid_t *set_tid;
+ /* Number of elements in *set_tid */
+ size_t set_tid_size;
};
/*
diff --git a/include/linux/security.h b/include/linux/security.h
index 9df7547afc0c..06ff66834501 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1895,5 +1895,42 @@ static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
-#endif /* ! __LINUX_SECURITY_H */
+#ifdef CONFIG_PERF_EVENTS
+struct perf_event_attr;
+struct perf_event;
+
+#ifdef CONFIG_SECURITY
+extern int security_perf_event_open(struct perf_event_attr *attr, int type);
+extern int security_perf_event_alloc(struct perf_event *event);
+extern void security_perf_event_free(struct perf_event *event);
+extern int security_perf_event_read(struct perf_event *event);
+extern int security_perf_event_write(struct perf_event *event);
+#else
+static inline int security_perf_event_open(struct perf_event_attr *attr,
+ int type)
+{
+ return 0;
+}
+
+static inline int security_perf_event_alloc(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline void security_perf_event_free(struct perf_event *event)
+{
+}
+
+static inline int security_perf_event_read(struct perf_event *event)
+{
+ return 0;
+}
+static inline int security_perf_event_write(struct perf_event *event)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_PERF_EVENTS */
+
+#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 53c28d750a45..1ac0d712a9c3 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -42,6 +42,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_PSID_REVERT_TPR:
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
+ case IOC_OPAL_GENERIC_TABLE_RW:
return true;
}
return false;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index bcf4cf26b8c8..0491d963d47e 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -79,7 +79,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
local_irq_save(flags);
seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
- seqcount_release(&l->dep_map, 1, _RET_IP_);
+ seqcount_release(&l->dep_map, _RET_IP_);
local_irq_restore(flags);
}
@@ -384,7 +384,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
static inline void write_seqcount_end(seqcount_t *s)
{
- seqcount_release(&s->dep_map, 1, _RET_IP_);
+ seqcount_release(&s->dep_map, _RET_IP_);
raw_write_seqcount_end(s);
}
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 1c35428e98bc..487fd9412d10 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -428,6 +428,10 @@ enum {
SFP_TEC_CUR = 0x6c,
SFP_STATUS = 0x6e,
+ SFP_STATUS_TX_DISABLE = BIT(7),
+ SFP_STATUS_TX_DISABLE_FORCE = BIT(6),
+ SFP_STATUS_TX_FAULT = BIT(2),
+ SFP_STATUS_RX_LOS = BIT(1),
SFP_ALARM0 = 0x70,
SFP_ALARM0_TEMP_HIGH = BIT(7),
SFP_ALARM0_TEMP_LOW = BIT(6),
@@ -508,10 +512,11 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops);
-void sfp_unregister_upstream(struct sfp_bus *bus);
+void sfp_bus_put(struct sfp_bus *bus);
+struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode);
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops);
+void sfp_bus_del_upstream(struct sfp_bus *bus);
#else
static inline int sfp_parse_port(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
@@ -553,14 +558,22 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_register_upstream(
- struct fwnode_handle *fwnode, void *upstream,
- const struct sfp_upstream_ops *ops)
+static inline void sfp_bus_put(struct sfp_bus *bus)
{
- return (struct sfp_bus *)-1;
}
-static inline void sfp_unregister_upstream(struct sfp_bus *bus)
+static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
+{
+ return 0;
+}
+
+static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
{
}
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 64a395c7f689..eceb3607864b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1795,7 +1795,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->prev;
+ struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -1861,7 +1861,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
@@ -2277,12 +2279,12 @@ static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (likely(len <= skb_headlen(skb)))
- return 1;
+ return true;
if (unlikely(len > skb->len))
- return 0;
+ return false;
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
@@ -4169,12 +4171,18 @@ static inline void skb_ext_reset(struct sk_buff *skb)
skb->active_extensions = 0;
}
}
+
+static inline bool skb_has_extensions(struct sk_buff *skb)
+{
+ return unlikely(skb->active_extensions);
+}
#else
static inline void skb_ext_put(struct sk_buff *skb) {}
static inline void skb_ext_reset(struct sk_buff *skb) {}
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
+static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
#endif /* CONFIG_SKB_EXTENSIONS */
static inline void nf_reset_ct(struct sk_buff *skb)
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index ce7055259877..6cb077b646a5 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -28,13 +28,14 @@ struct sk_msg_sg {
u32 end;
u32 size;
u32 copybreak;
- bool copy[MAX_MSG_FRAGS];
+ unsigned long copy;
/* The extra element is used for chaining the front and sections when
* the list becomes partitioned (e.g. end < start). The crypto APIs
* require the chaining.
*/
struct scatterlist data[MAX_MSG_FRAGS + 1];
};
+static_assert(BITS_PER_LONG >= MAX_MSG_FRAGS);
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
@@ -230,7 +231,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
{
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
- if (msg->sg.copy[msg->sg.start]) {
+ if (test_bit(msg->sg.start, &msg->sg.copy)) {
msg->data = NULL;
msg->data_end = NULL;
} else {
@@ -249,7 +250,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
sg_set_page(sge, page, len, offset);
sg_unmark_end(sge);
- msg->sg.copy[msg->sg.end] = true;
+ __set_bit(msg->sg.end, &msg->sg.copy);
msg->sg.size += len;
sk_msg_iter_next(msg, end);
}
@@ -257,7 +258,10 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
{
do {
- msg->sg.copy[i] = copy_state;
+ if (copy_state)
+ __set_bit(i, &msg->sg.copy);
+ else
+ __clear_bit(i, &msg->sg.copy);
sk_msg_iter_var_next(i);
if (i == msg->sg.end)
break;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 4049d9755cf1..09c32a21555b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -392,6 +392,9 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
+extern int __sys_accept4_file(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ea787201c3ac..28745b9ba279 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -40,9 +40,6 @@ struct sdw_slave;
#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
-#define SDW_DAI_ID_RANGE_START 100
-#define SDW_DAI_ID_RANGE_END 200
-
enum {
SDW_PORT_DIRN_SINK = 0,
SDW_PORT_DIRN_SOURCE,
@@ -406,6 +403,8 @@ int sdw_slave_read_prop(struct sdw_slave *slave);
* SDW Slave Structures and APIs
*/
+#define SDW_IGNORED_UNIQUE_ID 0xFF
+
/**
* struct sdw_slave_id - Slave ID
* @mfg_id: MIPI Manufacturer ID
@@ -421,7 +420,7 @@ struct sdw_slave_id {
__u16 mfg_id;
__u16 part_id;
__u8 class_id;
- __u8 unique_id:4;
+ __u8 unique_id;
__u8 sdw_version:4;
};
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index af4f265d0f67..98fe8663033a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/gpio/consumer.h>
+#include <linux/ptp_clock_kernel.h>
struct dma_chan;
struct property_entry;
@@ -90,6 +91,22 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
/**
+ * struct spi_delay - SPI delay information
+ * @value: Value for the delay
+ * @unit: Unit for the delay
+ */
+struct spi_delay {
+#define SPI_DELAY_UNIT_USECS 0
+#define SPI_DELAY_UNIT_NSECS 1
+#define SPI_DELAY_UNIT_SCK 2
+ u16 value;
+ u8 unit;
+};
+
+extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+
+/**
* struct spi_device - Controller side proxy for an SPI slave device
* @dev: Driver model representation of the device.
* @controller: SPI controller used with the device.
@@ -123,7 +140,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
* the spi_master.
* @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
- * @word_delay_usecs: microsecond delay to be inserted between consecutive
+ * @word_delay: delay to be inserted between consecutive
* words of a transfer
*
* @statistics: statistics for the spi_device
@@ -173,7 +190,7 @@ struct spi_device {
const char *driver_override;
int cs_gpio; /* LEGACY: chip select gpio */
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
- uint8_t word_delay_usecs; /* inter-word delay */
+ struct spi_delay word_delay; /* inter-word delay */
/* the statistics */
struct spi_statistics statistics;
@@ -390,6 +407,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
* @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
* CS number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
@@ -409,6 +431,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @fw_translate_cs: If the boot firmware uses different numbering scheme
* what Linux expects, this optional hook can be used to translate
* between the two.
+ * @ptp_sts_supported: If the driver sets this to true, it must provide a
+ * time snapshot in @spi_transfer->ptp_sts as close as possible to the
+ * moment in time when @spi_transfer->ptp_sts_word_pre and
+ * @spi_transfer->ptp_sts_word_post were transmitted.
+ * If the driver does not set this, the SPI core takes the snapshot as
+ * close to the driver hand-over as possible.
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -502,8 +530,8 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
- void (*set_cs_timing)(struct spi_device *spi, u8 setup_clk_cycles,
- u8 hold_clk_cycles, u8 inactive_clk_cycles);
+ int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive);
/* bidirectional bulk transfers
*
@@ -587,6 +615,11 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
+
/* gpio chip select */
int *cs_gpios;
struct gpio_desc **cs_gpiods;
@@ -604,6 +637,15 @@ struct spi_controller {
void *dummy_tx;
int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
+
+ /*
+ * Driver sets this field to indicate it is able to snapshot SPI
+ * transfers (needed e.g. for reading the time of POSIX clocks)
+ */
+ bool ptp_sts_supported;
+
+ /* Interrupt enable state during PTP system timestamping */
+ unsigned long irq_flags;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -644,6 +686,14 @@ extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ct
extern void spi_finalize_current_message(struct spi_controller *ctlr);
extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
+/* Helper calls for driver to timestamp transfer */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off);
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off);
+
/* the spi driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
@@ -739,13 +789,13 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
- * @cs_change_delay_unit: unit of cs_change_delay
+ * @delay: delay to be introduced after this transfer before
+ * (optionally) changing the chipselect status, then starting
+ * the next transfer or completing this @spi_message.
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
- * @word_delay_usecs: microseconds to inter word delay after each word size
- * (set by bits_per_word) transmission.
- * @word_delay: clock cycles to inter word delay after each word size
+ * @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
* transfer this transfer. Set to 0 if the spi bus driver does
@@ -753,6 +803,35 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
+ * within @tx_buf for which the SPI device is requesting that the time
+ * snapshot for this transfer begins. Upon completing the SPI transfer,
+ * this value may have changed compared to what was requested, depending
+ * on the available snapshotting resolution (DMA transfer,
+ * @ptp_sts_supported is false, etc).
+ * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning
+ * that a single byte should be snapshotted).
+ * If the core takes care of the timestamp (if @ptp_sts_supported is false
+ * for this controller), it will set @ptp_sts_word_pre to 0, and
+ * @ptp_sts_word_post to the length of the transfer. This is done
+ * purposefully (instead of setting to spi_transfer->len - 1) to denote
+ * that a transfer-level snapshot taken from within the driver may still
+ * be of higher quality.
+ * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
+ * PTP system timestamp structure may lie. If drivers use PIO or their
+ * hardware has some sort of assist for retrieving exact transfer timing,
+ * they can (and should) assert @ptp_sts_supported and populate this
+ * structure using the ptp_read_system_*ts helper functions.
+ * The timestamp must represent the time at which the SPI slave device has
+ * processed the word, i.e. the "pre" timestamp should be taken before
+ * transmitting the "pre" word, and the "post" timestamp after receiving
+ * transmit confirmation from the controller for the "post" word.
+ * @timestamped_pre: Set by the SPI controller driver to denote it has acted
+ * upon the @ptp_sts request. Not set when the SPI core has taken care of
+ * the task. SPI device drivers are free to print a warning if this comes
+ * back unset and they need the better resolution.
+ * @timestamped_post: See above. The reason why both exist is that these
+ * booleans are also used to keep state in the core SPI logic.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -830,18 +909,22 @@ struct spi_transfer {
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
- u8 word_delay_usecs;
u16 delay_usecs;
- u16 cs_change_delay;
- u8 cs_change_delay_unit;
-#define SPI_DELAY_UNIT_USECS 0
-#define SPI_DELAY_UNIT_NSECS 1
-#define SPI_DELAY_UNIT_SCK 2
+ struct spi_delay delay;
+ struct spi_delay cs_change_delay;
+ struct spi_delay word_delay;
u32 speed_hz;
- u16 word_delay;
u32 effective_speed_hz;
+ unsigned int ptp_sts_word_pre;
+ unsigned int ptp_sts_word_post;
+
+ struct ptp_system_timestamp *ptp_sts;
+
+ bool timestamped_pre;
+ bool timestamped_post;
+
struct list_head transfer_list;
};
@@ -935,6 +1018,20 @@ spi_transfer_del(struct spi_transfer *t)
list_del(&t->transfer_list);
}
+static inline int
+spi_transfer_delay_exec(struct spi_transfer *t)
+{
+ struct spi_delay d;
+
+ if (t->delay_usecs) {
+ d.value = t->delay_usecs;
+ d.unit = SPI_DELAY_UNIT_USECS;
+ return spi_delay_exec(&d, NULL);
+ }
+
+ return spi_delay_exec(&t->delay, t);
+}
+
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
@@ -982,7 +1079,10 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold, u8 inactive_dly);
+extern int spi_set_cs_timing(struct spi_device *spi,
+ struct spi_delay *setup,
+ struct spi_delay *hold,
+ struct spi_delay *inactive);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index b762eaba4cdf..19a9be9d97ee 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -147,7 +147,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
preempt_enable();
}
@@ -155,7 +155,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -163,7 +163,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -171,7 +171,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 765573dc17d6..528c4baad091 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -33,7 +33,8 @@ struct kstat {
STATX_ATTR_IMMUTABLE | \
STATX_ATTR_APPEND | \
STATX_ATTR_NODUMP | \
- STATX_ATTR_ENCRYPTED \
+ STATX_ATTR_ENCRYPTED | \
+ STATX_ATTR_VERITY \
)/* Attrs corresponding to FS_*_FL flags */
u64 ino;
dev_t dev;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dc60d03c4b60..d4bcd9387136 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,6 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
+#include <linux/phy.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -92,6 +93,7 @@ struct stmmac_dma_cfg {
int fixed_burst;
int mixed_burst;
bool aal;
+ bool eame;
};
#define AXI_BLEN 7
@@ -131,7 +133,7 @@ struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
int interface;
- int phy_interface;
+ phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
struct device_node *phylink_node;
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
index 267369110584..85ec745767bd 100644
--- a/include/linux/sxgbe_platform.h
+++ b/include/linux/sxgbe_platform.h
@@ -10,6 +10,8 @@
#ifndef __SXGBE_PLATFORM_H__
#define __SXGBE_PLATFORM_H__
+#include <linux/phy.h>
+
/* MDC Clock Selection define*/
#define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */
#define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */
@@ -38,7 +40,7 @@ struct sxgbe_plat_data {
char *phy_bus_name;
int bus_id;
int phy_addr;
- int interface;
+ phy_interface_t interface;
struct sxgbe_mdio_bus_data *mdio_bus_data;
struct sxgbe_dma_cfg *dma_cfg;
int clk_csr;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 668e25a76d69..ca6f01531e64 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -223,7 +223,7 @@ struct tcp_sock {
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- unused:2;
+ fastopen_client_fail:2; /* reason why fastopen failed */
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f92a10b5e112..7896f792d3b0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,8 @@ enum tick_dep_bits {
TICK_DEP_BIT_POSIX_TIMER = 0,
TICK_DEP_BIT_PERF_EVENTS = 1,
TICK_DEP_BIT_SCHED = 2,
- TICK_DEP_BIT_CLOCK_UNSTABLE = 3
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
+ TICK_DEP_BIT_RCU = 4
};
#define TICK_DEP_MASK_NONE 0
@@ -116,6 +117,7 @@ enum tick_dep_bits {
#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
+#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
@@ -174,7 +176,7 @@ extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
- if (!context_tracking_is_enabled())
+ if (!context_tracking_enabled())
return false;
return tick_nohz_full_running;
@@ -268,6 +270,9 @@ static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+
static inline void tick_dep_set(enum tick_dep_bits bit) { }
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 53c0ea9ec9df..0d6e949ba315 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -21,6 +21,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <crypto/hash_info.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
@@ -67,6 +68,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
void (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ void (*update_durations)(struct tpm_chip *chip,
+ unsigned long *duration_cap);
int (*go_idle)(struct tpm_chip *chip);
int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
@@ -161,6 +164,235 @@ struct tpm_chip {
int locality;
};
+#define TPM_HEADER_SIZE 10
+
+enum tpm2_const {
+ TPM2_PLATFORM_PCR = 24,
+ TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
+};
+
+enum tpm2_timeouts {
+ TPM2_TIMEOUT_A = 750,
+ TPM2_TIMEOUT_B = 2000,
+ TPM2_TIMEOUT_C = 200,
+ TPM2_TIMEOUT_D = 30,
+ TPM2_DURATION_SHORT = 20,
+ TPM2_DURATION_MEDIUM = 750,
+ TPM2_DURATION_LONG = 2000,
+ TPM2_DURATION_LONG_LONG = 300000,
+ TPM2_DURATION_DEFAULT = 120000,
+};
+
+enum tpm2_structures {
+ TPM2_ST_NO_SESSIONS = 0x8001,
+ TPM2_ST_SESSIONS = 0x8002,
+};
+
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
+enum tpm2_return_codes {
+ TPM2_RC_SUCCESS = 0x0000,
+ TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
+ TPM2_RC_HANDLE = 0x008B,
+ TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
+ TPM2_RC_FAILURE = 0x0101,
+ TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
+ TPM2_RC_TESTING = 0x090A, /* RC_WARN */
+ TPM2_RC_REFERENCE_H0 = 0x0910,
+ TPM2_RC_RETRY = 0x0922,
+};
+
+enum tpm2_command_codes {
+ TPM2_CC_FIRST = 0x011F,
+ TPM2_CC_HIERARCHY_CONTROL = 0x0121,
+ TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129,
+ TPM2_CC_CREATE_PRIMARY = 0x0131,
+ TPM2_CC_SEQUENCE_COMPLETE = 0x013E,
+ TPM2_CC_SELF_TEST = 0x0143,
+ TPM2_CC_STARTUP = 0x0144,
+ TPM2_CC_SHUTDOWN = 0x0145,
+ TPM2_CC_NV_READ = 0x014E,
+ TPM2_CC_CREATE = 0x0153,
+ TPM2_CC_LOAD = 0x0157,
+ TPM2_CC_SEQUENCE_UPDATE = 0x015C,
+ TPM2_CC_UNSEAL = 0x015E,
+ TPM2_CC_CONTEXT_LOAD = 0x0161,
+ TPM2_CC_CONTEXT_SAVE = 0x0162,
+ TPM2_CC_FLUSH_CONTEXT = 0x0165,
+ TPM2_CC_VERIFY_SIGNATURE = 0x0177,
+ TPM2_CC_GET_CAPABILITY = 0x017A,
+ TPM2_CC_GET_RANDOM = 0x017B,
+ TPM2_CC_PCR_READ = 0x017E,
+ TPM2_CC_PCR_EXTEND = 0x0182,
+ TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185,
+ TPM2_CC_HASH_SEQUENCE_START = 0x0186,
+ TPM2_CC_CREATE_LOADED = 0x0191,
+ TPM2_CC_LAST = 0x0193, /* Spec 1.36 */
+};
+
+enum tpm2_permanent_handles {
+ TPM2_RS_PW = 0x40000009,
+};
+
+enum tpm2_capabilities {
+ TPM2_CAP_HANDLES = 1,
+ TPM2_CAP_COMMANDS = 2,
+ TPM2_CAP_PCRS = 5,
+ TPM2_CAP_TPM_PROPERTIES = 6,
+};
+
+enum tpm2_properties {
+ TPM_PT_TOTAL_COMMANDS = 0x0129,
+};
+
+enum tpm2_startup_types {
+ TPM2_SU_CLEAR = 0x0000,
+ TPM2_SU_STATE = 0x0001,
+};
+
+enum tpm2_cc_attrs {
+ TPM2_CC_ATTR_CHANDLES = 25,
+ TPM2_CC_ATTR_RHANDLE = 28,
+};
+
+#define TPM_VID_INTEL 0x8086
+#define TPM_VID_WINBOND 0x1050
+#define TPM_VID_STM 0x104A
+
+enum tpm_chip_flags {
+ TPM_CHIP_FLAG_TPM2 = BIT(1),
+ TPM_CHIP_FLAG_IRQ = BIT(2),
+ TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+ TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
+ TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
+};
+
+#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
+
+struct tpm_header {
+ __be16 tag;
+ __be32 length;
+ union {
+ __be32 ordinal;
+ __be32 return_code;
+ };
+} __packed;
+
+/* A string buffer type for constructing TPM commands. This is based on the
+ * ideas of string buffer code in security/keys/trusted.h but is heap based
+ * in order to keep the stack usage minimal.
+ */
+
+enum tpm_buf_flags {
+ TPM_BUF_OVERFLOW = BIT(0),
+};
+
+struct tpm_buf {
+ unsigned int flags;
+ u8 *data;
+};
+
+enum tpm2_object_attributes {
+ TPM2_OA_USER_WITH_AUTH = BIT(6),
+};
+
+enum tpm2_session_attributes {
+ TPM2_SA_CONTINUE_SESSION = BIT(0),
+};
+
+struct tpm2_hash {
+ unsigned int crypto_id;
+ unsigned int tpm_id;
+};
+
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+}
+
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ buf->flags = 0;
+ tpm_buf_reset(buf, tag, ordinal);
+ return 0;
+}
+
+static inline void tpm_buf_destroy(struct tpm_buf *buf)
+{
+ free_page((unsigned long)buf->data);
+}
+
+static inline u32 tpm_buf_length(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be32_to_cpu(head->length);
+}
+
+static inline u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be16_to_cpu(head->tag);
+}
+
+static inline void tpm_buf_append(struct tpm_buf *buf,
+ const unsigned char *new_data,
+ unsigned int new_len)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ u32 len = tpm_buf_length(buf);
+
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_OVERFLOW)
+ return;
+
+ if ((len + new_len) > PAGE_SIZE) {
+ WARN(1, "tpm_buf: overflow\n");
+ buf->flags |= TPM_BUF_OVERFLOW;
+ return;
+ }
+
+ memcpy(&buf->data[len], new_data, new_len);
+ head->length = cpu_to_be32(len + new_len);
+}
+
+static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+ tpm_buf_append(buf, &value, 1);
+}
+
+static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+ __be16 value2 = cpu_to_be16(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 2);
+}
+
+static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+ __be32 value2 = cpu_to_be32(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 4);
+}
+
+static inline u32 tpm2_rc_value(u32 rc)
+{
+ return (rc & BIT(7)) ? rc & 0xff : rc;
+}
+
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
@@ -170,12 +402,6 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
-extern int tpm_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
-extern int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
extern struct tpm_chip *tpm_default_chip(void);
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
@@ -204,18 +430,6 @@ static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
return -ENODEV;
}
-static inline int tpm_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- return -ENODEV;
-}
-static inline int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- return -ENODEV;
-}
static inline struct tpm_chip *tpm_default_chip(void)
{
return NULL;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index a27604f99ed0..9de5c10293f5 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -40,8 +40,8 @@
* spin_lock_bh(...) or other synchronization to get exclusive access
* ...
* u64_stats_update_begin(&stats->syncp);
- * stats->bytes64 += len; // non atomic operation
- * stats->packets64++; // non atomic operation
+ * u64_stats_add(&stats->bytes64, len); // non atomic operation
+ * u64_stats_inc(&stats->packets64); // non atomic operation
* u64_stats_update_end(&stats->syncp);
*
* While a consumer (reader) should use following template to get consistent
@@ -52,8 +52,8 @@
*
* do {
* start = u64_stats_fetch_begin(&stats->syncp);
- * tbytes = stats->bytes64; // non atomic operation
- * tpackets = stats->packets64; // non atomic operation
+ * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
+ * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
* } while (u64_stats_fetch_retry(&stats->syncp, start));
*
*
@@ -68,6 +68,49 @@ struct u64_stats_sync {
#endif
};
+#if BITS_PER_LONG == 64
+#include <asm/local64.h>
+
+typedef struct {
+ local64_t v;
+} u64_stats_t ;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return local64_read(&p->v);
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ local64_add(val, &p->v);
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ local64_inc(&p->v);
+}
+
+#else
+
+typedef struct {
+ u64 v;
+} u64_stats_t;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return p->v;
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ p->v += val;
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ p->v++;
+}
+#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d4ee6e942562..67f016010aad 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -311,6 +311,7 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
@@ -337,7 +338,22 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+/*
+ * probe_user_write(): safely attempt to write to a location in user space
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
+extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
+
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count);
+extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
long count);
extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index 2d77f97df72d..efac3af83d6b 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -51,6 +51,9 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node);
void usb_role_switch_put(struct usb_role_switch *sw);
struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode);
+
+struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc);
void usb_role_switch_unregister(struct usb_role_switch *sw);
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index f516955a0cf4..e7979c01c351 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -46,45 +46,6 @@ enum tcpm_transmit_type {
TCPC_TX_BIST_MODE_2 = 7
};
-/**
- * struct tcpc_config - Port configuration
- * @src_pdo: PDO parameters sent to port partner as response to
- * PD_CTRL_GET_SOURCE_CAP message
- * @nr_src_pdo: Number of entries in @src_pdo
- * @snk_pdo: PDO parameters sent to partner as response to
- * PD_CTRL_GET_SINK_CAP message
- * @nr_snk_pdo: Number of entries in @snk_pdo
- * @operating_snk_mw:
- * Required operating sink power in mW
- * @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or
- * TYPEC_PORT_DRP)
- * @default_role:
- * Default port role (TYPEC_SINK or TYPEC_SOURCE).
- * Set to TYPEC_NO_PREFERRED_ROLE if no default role.
- * @try_role_hw:True if try.{Src,Snk} is implemented in hardware
- * @alt_modes: List of supported alternate modes
- */
-struct tcpc_config {
- const u32 *src_pdo;
- unsigned int nr_src_pdo;
-
- const u32 *snk_pdo;
- unsigned int nr_snk_pdo;
-
- const u32 *snk_vdo;
- unsigned int nr_snk_vdo;
-
- unsigned int operating_snk_mw;
-
- enum typec_port_type type;
- enum typec_port_data data;
- enum typec_role default_role;
- bool try_role_hw; /* try.{src,snk} implemented in hardware */
- bool self_powered; /* port belongs to a self powered device */
-
- const struct typec_altmode_desc *alt_modes;
-};
-
/* Mux state attributes */
#define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */
#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
@@ -92,7 +53,6 @@ struct tcpc_config {
/**
* struct tcpc_dev - Port configuration and callback functions
- * @config: Pointer to port configuration
* @fwnode: Pointer to port fwnode
* @get_vbus: Called to read current VBUS state
* @get_current_limit:
@@ -121,7 +81,6 @@ struct tcpc_config {
* @mux: Pointer to multiplexer data
*/
struct tcpc_dev {
- const struct tcpc_config *config;
struct fwnode_handle *fwnode;
int (*init)(struct tcpc_dev *dev);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 7df4ecabc78a..0f52723a11bd 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -168,6 +168,23 @@ struct typec_partner_desc {
struct usb_pd_identity *identity;
};
+/**
+ * struct typec_operations - USB Type-C Port Operations
+ * @try_role: Set data role preference for DRP port
+ * @dr_set: Set Data Role
+ * @pr_set: Set Power Role
+ * @vconn_set: Source VCONN
+ * @port_type_set: Set port type
+ */
+struct typec_operations {
+ int (*try_role)(struct typec_port *port, int role);
+ int (*dr_set)(struct typec_port *port, enum typec_data_role role);
+ int (*pr_set)(struct typec_port *port, enum typec_role role);
+ int (*vconn_set)(struct typec_port *port, enum typec_role role);
+ int (*port_type_set)(struct typec_port *port,
+ enum typec_port_type type);
+};
+
/*
* struct typec_capability - USB Type-C Port Capabilities
* @type: Supported power role of the port
@@ -179,11 +196,8 @@ struct typec_partner_desc {
* @sw: Cable plug orientation switch
* @mux: Multiplexer switch for Alternate/Accessory Modes
* @fwnode: Optional fwnode of the port
- * @try_role: Set data role preference for DRP port
- * @dr_set: Set Data Role
- * @pr_set: Set Power Role
- * @vconn_set: Set VCONN Role
- * @port_type_set: Set port type
+ * @driver_data: Private pointer for driver specific info
+ * @ops: Port operations vector
*
* Static capabilities of a single USB Type-C port.
*/
@@ -195,21 +209,10 @@ struct typec_capability {
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
- struct typec_switch *sw;
- struct typec_mux *mux;
struct fwnode_handle *fwnode;
+ void *driver_data;
- int (*try_role)(const struct typec_capability *,
- int role);
-
- int (*dr_set)(const struct typec_capability *,
- enum typec_data_role);
- int (*pr_set)(const struct typec_capability *,
- enum typec_role);
- int (*vconn_set)(const struct typec_capability *,
- enum typec_role);
- int (*port_type_set)(const struct typec_capability *,
- enum typec_port_type);
+ const struct typec_operations *ops;
};
/* Specific to try_role(). Indicates the user want's to clear the preference. */
@@ -241,6 +244,8 @@ int typec_set_orientation(struct typec_port *port,
enum typec_orientation typec_get_orientation(struct typec_port *port);
int typec_set_mode(struct typec_port *port, int mode);
+void *typec_get_drvdata(struct typec_port *port);
+
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 07875ccc7bb5..71c81e0dc8f2 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -7,9 +7,6 @@
#include <net/sock.h>
#include <net/af_vsock.h>
-#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128
-#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256)
-#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256)
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -25,11 +22,6 @@ enum {
struct virtio_vsock_sock {
struct vsock_sock *vsk;
- /* Protected by lock_sock(sk_vsock(trans->vsk)) */
- u32 buf_size;
- u32 buf_size_min;
- u32 buf_size_max;
-
spinlock_t tx_lock;
spinlock_t rx_lock;
@@ -92,12 +84,6 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
-u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk);
-u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk);
-u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk);
-void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val);
-void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val);
-void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val);
int
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
size_t target,
@@ -124,6 +110,7 @@ int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data);
int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
ssize_t written, struct vsock_transport_send_notify_data *data);
+void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
@@ -150,7 +137,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h
deleted file mode 100644
index 33f1a2ecd905..000000000000
--- a/include/linux/vm_sockets.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * VMware vSockets Driver
- *
- * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
- */
-
-#ifndef _VM_SOCKETS_H
-#define _VM_SOCKETS_H
-
-#include <uapi/linux/vm_sockets.h>
-
-int vm_sockets_get_local_cid(void);
-
-#endif /* _VM_SOCKETS_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4e7809408073..b4c58a191eb1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -93,6 +93,7 @@ extern void *vzalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vzalloc_node(unsigned long size, int node);
+extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index acd9fafe4fc6..f28907345c80 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -19,6 +19,7 @@
struct msghdr;
typedef void (vmci_device_shutdown_fn) (void *device_registration,
void *user_data);
+typedef void (*vmci_vsock_cb) (bool is_host);
int vmci_datagram_create_handle(u32 resource_id, u32 flags,
vmci_datagram_recv_cb recv_cb,
@@ -37,6 +38,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle);
int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
u32 vmci_get_context_id(void);
bool vmci_is_context_owner(u32 context_id, kuid_t uid);
+int vmci_register_vsock_callback(vmci_vsock_cb callback);
int vmci_event_subscribe(u32 event,
vmci_event_cb callback, void *callback_data,
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index a26ed10a4eac..2cdeca062db3 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -11,11 +11,15 @@
struct task_struct;
/*
- * vtime_accounting_cpu_enabled() definitions/declarations
+ * vtime_accounting_enabled_this_cpu() definitions/declarations
*/
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
-static inline bool vtime_accounting_cpu_enabled(void) { return true; }
+
+static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
+extern void vtime_task_switch(struct task_struct *prev);
+
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
+
/*
* Checks if vtime is enabled on some CPU. Cputime readers want to be careful
* in that case and compute the tickless cputime.
@@ -24,46 +28,43 @@ static inline bool vtime_accounting_cpu_enabled(void) { return true; }
*/
static inline bool vtime_accounting_enabled(void)
{
- return context_tracking_is_enabled();
+ return context_tracking_enabled();
}
-static inline bool vtime_accounting_cpu_enabled(void)
+static inline bool vtime_accounting_enabled_cpu(int cpu)
{
- if (vtime_accounting_enabled()) {
- if (context_tracking_cpu_is_enabled())
- return true;
- }
-
- return false;
+ return context_tracking_enabled_cpu(cpu);
}
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static inline bool vtime_accounting_cpu_enabled(void) { return false; }
-#endif
+static inline bool vtime_accounting_enabled_this_cpu(void)
+{
+ return context_tracking_enabled_this_cpu();
+}
-/*
- * Common vtime APIs
- */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_task_switch_generic(struct task_struct *prev);
-#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
-extern void vtime_task_switch(struct task_struct *prev);
-#else
-extern void vtime_common_task_switch(struct task_struct *prev);
static inline void vtime_task_switch(struct task_struct *prev)
{
- if (vtime_accounting_cpu_enabled())
- vtime_common_task_switch(prev);
+ if (vtime_accounting_enabled_this_cpu())
+ vtime_task_switch_generic(prev);
}
-#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
-
-extern void vtime_account_system(struct task_struct *tsk);
-extern void vtime_account_idle(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
+static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
static inline void vtime_task_switch(struct task_struct *prev) { }
-static inline void vtime_account_system(struct task_struct *tsk) { }
+
+#endif
+
+/*
+ * Common vtime APIs
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_account_kernel(struct task_struct *tsk);
+extern void vtime_account_idle(struct task_struct *tsk);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+static inline void vtime_account_kernel(struct task_struct *tsk) { }
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -86,7 +87,7 @@ extern void vtime_account_irq_enter(struct task_struct *tsk);
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
/* On hard|softirq exit we always account to hard|softirq cputime */
- vtime_account_system(tsk);
+ vtime_account_kernel(tsk);
}
extern void vtime_flush(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 7da0c7588e04..cebf3464bc03 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -262,6 +262,7 @@ struct w1_family_ops {
* @family_entry: family linked list
* @fid: 8 bit family identifier
* @fops: operations for this family
+ * @of_match_table: open firmware match table
* @refcnt: reference counter
*/
struct w1_family {
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 3af7c0e03be5..d7554252404c 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -182,7 +182,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
- mutex_release(&ctx->dep_map, 0, _THIS_IP_);
+ mutex_release(&ctx->dep_map, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index 604e79cb6cbf..88c8b016eb09 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -29,8 +29,11 @@
* an error if negative. If NULL or -ENOTTY is returned,
* then this is not supported.
*
- * These operations are used by the cec pin framework to manipulate
- * the CEC pin.
+ * @received: optional. High-level CEC message callback. Allows the driver
+ * to process CEC messages.
+ *
+ * These operations (except for the @received op) are used by the
+ * cec pin framework to manipulate the CEC pin.
*/
struct cec_pin_ops {
bool (*read)(struct cec_adapter *adap);
@@ -42,6 +45,9 @@ struct cec_pin_ops {
void (*status)(struct cec_adapter *adap, struct seq_file *file);
int (*read_hpd)(struct cec_adapter *adap);
int (*read_5v)(struct cec_adapter *adap);
+
+ /* High-level CEC message callback */
+ int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 4d59387bc61b..0a4f69cc9dd4 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -18,9 +18,6 @@
#include <linux/cec-funcs.h>
#include <media/rc-core.h>
-/* CEC_ADAP_G_CONNECTOR_INFO is available */
-#define CEC_CAP_CONNECTOR_INFO (1 << 8)
-
#define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \
CEC_CAP_PASSTHROUGH | CEC_CAP_RC)
@@ -147,34 +144,6 @@ struct cec_adap_ops {
*/
#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1)
-/**
- * struct cec_drm_connector_info - tells which drm connector is
- * associated with the CEC adapter.
- * @card_no: drm card number
- * @connector_id: drm connector ID
- */
-struct cec_drm_connector_info {
- __u32 card_no;
- __u32 connector_id;
-};
-
-#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0
-#define CEC_CONNECTOR_TYPE_DRM 1
-
-/**
- * struct cec_connector_info - tells if and which connector is
- * associated with the CEC adapter.
- * @type: connector type (if any)
- * @drm: drm connector info
- */
-struct cec_connector_info {
- __u32 type;
- union {
- struct cec_drm_connector_info drm;
- __u32 raw[16];
- };
-};
-
struct cec_adapter {
struct module *owner;
char name[32];
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 7ce4e8332421..1409230ad3a4 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -389,6 +389,7 @@
#define USB_PID_MYGICA_T230 0xc688
#define USB_PID_MYGICA_T230C 0xc689
#define USB_PID_MYGICA_T230C2 0xc68a
+#define USB_PID_MYGICA_T230C_LITE 0xc699
#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
new file mode 100644
index 000000000000..1009cf0891cc
--- /dev/null
+++ b/include/media/hevc-ctrls.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the HEVC state controls for use with stateless HEVC
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _HEVC_CTRLS_H_
+#define _HEVC_CTRLS_H_
+
+#include <linux/videodev2.h>
+
+/* The pixel format isn't stable at the moment and will likely be renamed. */
+#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+
+#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008)
+#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010)
+#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015)
+#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
+#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
+#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
+
+enum v4l2_mpeg_video_hevc_decode_mode {
+ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+};
+
+enum v4l2_mpeg_video_hevc_start_code {
+ V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+};
+
+#define V4L2_HEVC_SLICE_TYPE_B 0
+#define V4L2_HEVC_SLICE_TYPE_P 1
+#define V4L2_HEVC_SLICE_TYPE_I 2
+
+#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
+#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
+#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
+#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
+#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
+#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
+#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
+#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
+
+/* The controls are not stable at the moment and will likely be reworked. */
+struct v4l2_ctrl_hevc_sps {
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */
+ __u16 pic_width_in_luma_samples;
+ __u16 pic_height_in_luma_samples;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 sps_max_dec_pic_buffering_minus1;
+ __u8 sps_max_num_reorder_pics;
+ __u8 sps_max_latency_increase_plus1;
+ __u8 log2_min_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_luma_coding_block_size;
+ __u8 log2_min_luma_transform_block_size_minus2;
+ __u8 log2_diff_max_min_luma_transform_block_size;
+ __u8 max_transform_hierarchy_depth_inter;
+ __u8 max_transform_hierarchy_depth_intra;
+ __u8 pcm_sample_bit_depth_luma_minus1;
+ __u8 pcm_sample_bit_depth_chroma_minus1;
+ __u8 log2_min_pcm_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_pcm_luma_coding_block_size;
+ __u8 num_short_term_ref_pic_sets;
+ __u8 num_long_term_ref_pics_sps;
+ __u8 chroma_format_idc;
+
+ __u8 padding;
+
+ __u64 flags;
+};
+
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
+#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
+#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
+#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
+#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
+#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
+#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
+#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
+#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
+#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
+#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
+#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
+#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
+#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
+#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+
+struct v4l2_ctrl_hevc_pps {
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
+ __u8 num_extra_slice_header_bits;
+ __s8 init_qp_minus26;
+ __u8 diff_cu_qp_delta_depth;
+ __s8 pps_cb_qp_offset;
+ __s8 pps_cr_qp_offset;
+ __u8 num_tile_columns_minus1;
+ __u8 num_tile_rows_minus1;
+ __u8 column_width_minus1[20];
+ __u8 row_height_minus1[22];
+ __s8 pps_beta_offset_div2;
+ __s8 pps_tc_offset_div2;
+ __u8 log2_parallel_merge_level_minus2;
+
+ __u8 padding[4];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01
+#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02
+#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03
+
+#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
+
+struct v4l2_hevc_dpb_entry {
+ __u64 timestamp;
+ __u8 rps;
+ __u8 field_pic;
+ __u16 pic_order_cnt[2];
+ __u8 padding[2];
+};
+
+struct v4l2_hevc_pred_weight_table {
+ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __u8 padding[6];
+
+ __u8 luma_log2_weight_denom;
+ __s8 delta_chroma_log2_weight_denom;
+};
+
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+
+struct v4l2_ctrl_hevc_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
+ __u8 nal_unit_type;
+ __u8 nuh_temporal_id_plus1;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __u16 slice_pic_order_cnt;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ __u8 collocated_ref_idx;
+ __u8 five_minus_max_num_merge_cand;
+ __s8 slice_qp_delta;
+ __s8 slice_cb_qp_offset;
+ __s8 slice_cr_qp_offset;
+ __s8 slice_act_y_qp_offset;
+ __s8 slice_act_cb_qp_offset;
+ __s8 slice_act_cr_qp_offset;
+ __s8 slice_beta_offset_div2;
+ __s8 slice_tc_offset_div2;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
+ __u8 pic_struct;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 num_active_dpb_entries;
+ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+
+ __u8 num_rps_poc_st_curr_before;
+ __u8 num_rps_poc_st_curr_after;
+ __u8 num_rps_poc_lt_curr;
+
+ __u8 padding;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
+ struct v4l2_hevc_pred_weight_table pred_weight_table;
+
+ __u64 flags;
+};
+
+#endif
diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h
index d6ccc859bfcd..80f8251d87a3 100644
--- a/include/media/i2c/smiapp.h
+++ b/include/media/i2c/smiapp.h
@@ -49,7 +49,6 @@ struct smiapp_hwconfig {
unsigned short i2c_addr_dfl; /* Default i2c addr */
unsigned short i2c_addr_alt; /* Alternate i2c addr */
- uint32_t nvm_size; /* bytes */
uint32_t ext_clk; /* sensor external clk */
unsigned int lanes; /* Number of CSI-2 lanes */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index afd2ab31bdf2..f99575a0d29c 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -159,21 +159,22 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100"
#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
#define RC_MAP_ATI_X10 "rc-ati-x10"
+#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d"
#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus"
#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt"
#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a"
#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6"
#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks"
-#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERTV_303 "rc-avertv-303"
#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700"
-#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
+#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1"
#define RC_MAP_BEHOLD "rc-behold"
+#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old"
#define RC_MAP_CEC "rc-cec"
-#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
#define RC_MAP_CINERGY "rc-cinergy"
+#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
#define RC_MAP_D680_DMB "rc-d680-dmb"
#define RC_MAP_DELOCK_61959 "rc-delock-61959"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
@@ -181,17 +182,17 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin"
#define RC_MAP_DIGITTRADE "rc-digittrade"
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
-#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DTT200U "rc-dtt200u"
#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_DVICO_MCE "rc-dvico-mce"
#define RC_MAP_DVICO_PORTABLE "rc-dvico-portable"
#define RC_MAP_EMPTY "rc-empty"
#define RC_MAP_EM_TERRATEC "rc-em-terratec"
+#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53"
-#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube"
#define RC_MAP_EZTV "rc-eztv"
#define RC_MAP_FLYDVB "rc-flydvb"
@@ -201,6 +202,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_GEEKBOX "rc-geekbox"
#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce"
#define RC_MAP_GOTVIEW7135 "rc-gotview7135"
+#define RC_MAP_HAUPPAUGE "rc-hauppauge"
#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge"
#define RC_MAP_HISI_POPLAR "rc-hisi-poplar"
#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo"
@@ -223,8 +225,8 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x"
#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
-#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
+#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_NEBULA "rc-nebula"
#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs"
#define RC_MAP_NORWOOD "rc-norwood"
@@ -234,21 +236,21 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color"
#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey"
#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd"
-#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
#define RC_MAP_PIXELVIEW "rc-pixelview"
-#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t"
+#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t"
#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12"
+#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel"
#define RC_MAP_PROTEUS_2309 "rc-proteus-2309"
#define RC_MAP_PURPLETV "rc-purpletv"
#define RC_MAP_PV951 "rc-pv951"
-#define RC_MAP_HAUPPAUGE "rc-hauppauge"
#define RC_MAP_RC5_TV "rc-rc5-tv"
#define RC_MAP_RC6_MCE "rc-rc6-mce"
#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
#define RC_MAP_REDDO "rc-reddo"
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
+#define RC_MAP_SU3000 "rc-su3000"
#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini"
#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max"
@@ -268,6 +270,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_TT_1500 "rc-tt-1500"
#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci"
#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
+#define RC_MAP_VEGA_S9X "rc-vega-s9x"
#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
@@ -275,9 +278,8 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2"
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
-#define RC_MAP_SU3000 "rc-su3000"
-#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_X96MAX "rc-x96max"
+#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index c070d8ae11e5..d8c29e089000 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -457,8 +457,24 @@ int v4l2_s_parm_cap(struct video_device *vdev,
/* Pixel format and FourCC helpers */
/**
+ * enum v4l2_pixel_encoding - specifies the pixel encoding value
+ *
+ * @V4L2_PIXEL_ENC_UNKNOWN: Pixel encoding is unknown/un-initialized
+ * @V4L2_PIXEL_ENC_YUV: Pixel encoding is YUV
+ * @V4L2_PIXEL_ENC_RGB: Pixel encoding is RGB
+ * @V4L2_PIXEL_ENC_BAYER: Pixel encoding is Bayer
+ */
+enum v4l2_pixel_encoding {
+ V4L2_PIXEL_ENC_UNKNOWN = 0,
+ V4L2_PIXEL_ENC_YUV = 1,
+ V4L2_PIXEL_ENC_RGB = 2,
+ V4L2_PIXEL_ENC_BAYER = 3,
+};
+
+/**
* struct v4l2_format_info - information about a V4L2 format
* @format: 4CC format identifier (V4L2_PIX_FMT_*)
+ * @pixel_enc: Pixel encoding (see enum v4l2_pixel_encoding above)
* @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
* @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
* @bpp: Array of per-plane bytes per pixel
@@ -469,6 +485,7 @@ int v4l2_s_parm_cap(struct video_device *vdev,
*/
struct v4l2_format_info {
u32 format;
+ u8 pixel_enc;
u8 mem_planes;
u8 comp_planes;
u8 bpp[4];
@@ -478,8 +495,22 @@ struct v4l2_format_info {
u8 block_h[4];
};
-const struct v4l2_format_info *v4l2_format_info(u32 format);
+static inline bool v4l2_is_format_rgb(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_RGB;
+}
+static inline bool v4l2_is_format_yuv(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_YUV;
+}
+
+static inline bool v4l2_is_format_bayer(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_BAYER;
+}
+
+const struct v4l2_format_info *v4l2_format_info(u32 format);
void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
const struct v4l2_frmsize_stepwise *frmsize);
int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 570ff4b0205a..7db9e719a583 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -21,6 +21,7 @@
#include <media/fwht-ctrls.h>
#include <media/h264-ctrls.h>
#include <media/vp8-ctrls.h>
+#include <media/hevc-ctrls.h>
/* forward references */
struct file;
@@ -50,7 +51,12 @@ struct poll_table_struct;
* @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params.
* @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params.
* @p_vp8_frame_header: Pointer to a VP8 frame header structure.
+ * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure.
+ * @p_hevc_pps: Pointer to an HEVC picture parameter set structure.
+ * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure.
+ * @p_area: Pointer to an area.
* @p: Pointer to a compound value.
+ * @p_const: Pointer to a constant compound value.
*/
union v4l2_ctrl_ptr {
s32 *p_s32;
@@ -68,10 +74,27 @@ union v4l2_ctrl_ptr {
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *p_area;
void *p;
+ const void *p_const;
};
/**
+ * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a
+ * void pointer
+ * @ptr: The void pointer
+ */
+static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr)
+{
+ union v4l2_ctrl_ptr p = { .p = ptr };
+
+ return p;
+}
+
+/**
* struct v4l2_ctrl_ops - The control operations that the driver has to provide.
*
* @g_volatile_ctrl: Get a new value for this control. Generally only relevant
@@ -200,6 +223,9 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
+ * @p_def: The control's default value represented via a union which
+ * provides a standard way of accessing control types
+ * through a pointer (for compound controls only).
* @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
@@ -254,6 +280,7 @@ struct v4l2_ctrl {
s32 val;
} cur;
+ union v4l2_ctrl_ptr p_def;
union v4l2_ctrl_ptr p_new;
union v4l2_ctrl_ptr p_cur;
};
@@ -357,6 +384,7 @@ struct v4l2_ctrl_handler {
* @max: The control's maximum value.
* @step: The control's step value for non-menu controls.
* @def: The control's default value.
+ * @p_def: The control's default value for compound controls.
* @dims: The size of each dimension.
* @elem_size: The size in bytes of the control.
* @flags: The control's flags.
@@ -385,6 +413,7 @@ struct v4l2_ctrl_config {
s64 max;
u64 step;
s64 def;
+ union v4l2_ctrl_ptr p_def;
u32 dims[V4L2_CTRL_MAX_DIMS];
u32 elem_size;
u32 flags;
@@ -647,6 +676,24 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
const char * const *qmenu);
/**
+ * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2
+ * compound control.
+ *
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @p_def: The control's default value.
+ *
+ * Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks
+ * to the @p_def field.
+ *
+ */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id,
+ const union v4l2_ctrl_ptr p_def);
+
+/**
* v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
*
* @hdl: The control handler.
@@ -1065,6 +1112,46 @@ static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
return rval;
}
+/**
+ * __v4l2_ctrl_s_ctrl_area() - Unlocked variant of v4l2_ctrl_s_ctrl_area().
+ *
+ * @ctrl: The control.
+ * @area: The new area.
+ *
+ * This sets the control's new area safely by going through the control
+ * framework. This function assumes the control's handler is already locked,
+ * allowing it to be used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for area type controls only.
+ */
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area);
+
+/**
+ * v4l2_ctrl_s_ctrl_area() - Helper function to set a control's area value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @area: The new area.
+ *
+ * This sets the control's new area safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for area type controls only.
+ */
+static inline int v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ int rval;
+
+ v4l2_ctrl_lock(ctrl);
+ rval = __v4l2_ctrl_s_ctrl_area(ctrl, area);
+ v4l2_ctrl_unlock(ctrl);
+
+ return rval;
+}
+
/* Internal helper functions that deal with control events. */
extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops;
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index e0b8f2602670..5f36e0d2ede6 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -72,7 +72,7 @@ static inline void v4l2_device_get(struct v4l2_device *v4l2_dev)
}
/**
- * v4l2_device_put - putss a V4L2 device reference
+ * v4l2_device_put - puts a V4L2 device reference
*
* @v4l2_dev: pointer to struct &v4l2_device
*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0b9c3a287061..1d85e24791e4 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -21,7 +21,8 @@
* callback.
* The job does NOT have to end before this callback returns
* (and it will be the usual case). When the job finishes,
- * v4l2_m2m_job_finish() has to be called.
+ * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
+ * has to be called.
* @job_ready: optional. Should return 0 if the driver does not have a job
* fully prepared to run yet (i.e. it will not be able to finish a
* transaction without sleeping). If not provided, it will be
@@ -33,7 +34,8 @@
* stop the device safely; e.g. in the next interrupt handler),
* even if the transaction would not have been finished by then.
* After the driver performs the necessary steps, it has to call
- * v4l2_m2m_job_finish() (as if the transaction ended normally).
+ * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
+ * if the transaction ended normally.
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
*/
@@ -73,6 +75,11 @@ struct v4l2_m2m_queue_ctx {
* struct v4l2_m2m_ctx - Memory to memory context structure
*
* @q_lock: struct &mutex lock
+ * @new_frame: valid in the device_run callback: if true, then this
+ * starts a new frame; if false, then this is a new slice
+ * for an existing frame. This is always true unless
+ * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
+ * indicates slicing support.
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -89,6 +96,8 @@ struct v4l2_m2m_ctx {
/* optional cap/out vb2 queues lock */
struct mutex *q_lock;
+ bool new_frame;
+
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -173,6 +182,33 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
+/**
+ * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
+ * state and inform the framework that a job has been finished and have it
+ * clean up
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
+ *
+ * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
+ * function instead of job_finish() to take held buffers into account. It is
+ * optional for other drivers.
+ *
+ * This function removes the source buffer from the ready list and returns
+ * it with the given state. The same is done for the destination buffer, unless
+ * it is marked 'held'. In that case the buffer is kept on the ready list.
+ *
+ * After that the job is finished (see job_finish()).
+ *
+ * This allows for multiple output buffers to be used to fill in a single
+ * capture buffer. This is typically used by stateless decoders where
+ * multiple e.g. H.264 slices contribute to a single decoded frame.
+ */
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state);
+
static inline void
v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
{
@@ -672,6 +708,10 @@ int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec);
int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc);
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 640aabe69450..a2b2208b02da 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -505,6 +505,8 @@ struct vb2_buf_ops {
* @buf_ops: callbacks to deliver buffer information.
* between user-space and kernel-space.
* @drv_priv: driver private data.
+ * @subsystem_flags: Flags specific to the subsystem (V4L2/DVB/etc.). Not used
+ * by the vb2 core.
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
* structure type. for example, ``sizeof(struct vb2_v4l2_buffer)``
@@ -571,6 +573,7 @@ struct vb2_queue {
const struct vb2_buf_ops *buf_ops;
void *drv_priv;
+ u32 subsystem_flags;
unsigned int buf_struct_size;
u32 timestamp_flags;
gfp_t gfp_flags;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 8a10889dc2fd..59bf33a12648 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -33,6 +33,7 @@
* @timecode: frame timecode.
* @sequence: sequence count of this frame.
* @request_fd: the request_fd associated with this buffer
+ * @is_held: if true, then this capture buffer was held
* @planes: plane information (userptr/fd, length, bytesused, data_offset).
*
* Should contain enough information to be able to cover all the fields
@@ -46,9 +47,13 @@ struct vb2_v4l2_buffer {
struct v4l2_timecode timecode;
__u32 sequence;
__s32 request_fd;
+ bool is_held;
struct vb2_plane planes[VB2_MAX_PLANES];
};
+/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */
+#define VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 0)
+
/*
* to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer *
*/
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b18c699681ca..71347a90a9d1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -23,7 +23,6 @@ struct tc_action_ops;
struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
- __u32 order;
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
@@ -41,6 +40,7 @@ struct tc_action {
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
+ u32 tcfa_flags;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -94,7 +94,7 @@ struct tc_action_ops {
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
int bind, bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack);
+ u32 flags, struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
@@ -154,7 +154,11 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
- int bind, bool cpustats);
+ int bind, bool cpustats, u32 flags);
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
@@ -186,6 +190,43 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+
+static inline void tcf_action_update_bstats(struct tc_action *a,
+ struct sk_buff *skb)
+{
+ if (likely(a->cpu_bstats)) {
+ bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ bstats_update(&a->tcfa_bstats, skb);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_drop_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_overlimit_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
+ bool drop, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 3f62b347b04a..1bab88184d3c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -202,11 +202,11 @@ u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
/*
* multicast prototypes (mcast.c)
*/
-static inline int ipv6_mc_may_pull(struct sk_buff *skb,
- unsigned int len)
+static inline bool ipv6_mc_may_pull(struct sk_buff *skb,
+ unsigned int len)
{
if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
- return 0;
+ return false;
return pskb_may_pull(skb, len);
}
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 80ea0f93d3f7..4206dc6d813f 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
-#include <linux/vm_sockets.h>
+#include <uapi/linux/vm_sockets.h>
#include "vsock_addr.h"
@@ -27,6 +27,7 @@ extern spinlock_t vsock_table_lock;
struct vsock_sock {
/* sk must be the first member. */
struct sock sk;
+ const struct vsock_transport *transport;
struct sockaddr_vm local_addr;
struct sockaddr_vm remote_addr;
/* Links for the global tables of bound and connected sockets. */
@@ -64,16 +65,18 @@ struct vsock_sock {
bool sent_request;
bool ignore_connecting_rst;
+ /* Protected by lock_sock(sk) */
+ u64 buffer_size;
+ u64 buffer_min_size;
+ u64 buffer_max_size;
+
/* Private to transport. */
void *trans;
};
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-struct sock *__vsock_create(struct net *net,
- struct socket *sock,
- struct sock *parent,
- gfp_t priority, unsigned short type, int kern);
+struct sock *vsock_create_connected(struct sock *parent);
/**** TRANSPORT ****/
@@ -88,7 +91,17 @@ struct vsock_transport_send_notify_data {
u64 data2; /* Transport-defined. */
};
+/* Transport features flags */
+/* Transport provides host->guest communication */
+#define VSOCK_TRANSPORT_F_H2G 0x00000001
+/* Transport provides guest->host communication */
+#define VSOCK_TRANSPORT_F_G2H 0x00000002
+/* Transport provides DGRAM communication */
+#define VSOCK_TRANSPORT_F_DGRAM 0x00000004
+
struct vsock_transport {
+ struct module *module;
+
/* Initialize/tear-down socket. */
int (*init)(struct vsock_sock *, struct vsock_sock *);
void (*destruct)(struct vsock_sock *);
@@ -139,33 +152,23 @@ struct vsock_transport {
struct vsock_transport_send_notify_data *);
int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
struct vsock_transport_send_notify_data *);
+ /* sk_lock held by the caller */
+ void (*notify_buffer_size)(struct vsock_sock *, u64 *);
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
- /* Buffer sizes. */
- void (*set_buffer_size)(struct vsock_sock *, u64);
- void (*set_min_buffer_size)(struct vsock_sock *, u64);
- void (*set_max_buffer_size)(struct vsock_sock *, u64);
- u64 (*get_buffer_size)(struct vsock_sock *);
- u64 (*get_min_buffer_size)(struct vsock_sock *);
- u64 (*get_max_buffer_size)(struct vsock_sock *);
-
/* Addressing. */
u32 (*get_local_cid)(void);
};
/**** CORE ****/
-int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
-static inline int vsock_core_init(const struct vsock_transport *t)
-{
- return __vsock_core_init(t, THIS_MODULE);
-}
-void vsock_core_exit(void);
+int vsock_core_register(const struct vsock_transport *t, int features);
+void vsock_core_unregister(const struct vsock_transport *t);
/* The transport may downcast this to access transport-specific functions */
-const struct vsock_transport *vsock_core_get_transport(void);
+const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk);
/**** UTILS ****/
@@ -193,6 +196,8 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
+bool vsock_find_cid(unsigned int cid);
/**** TAP ****/
diff --git a/include/net/arp.h b/include/net/arp.h
index c8f580a0e6b1..4950191f6b2b 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 4ab2c49423dc..059524b87c4c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -565,6 +565,7 @@ struct vif_params {
* with the get_key() callback, must be in little endian,
* length given by @seq_len.
* @seq_len: length of @seq.
+ * @vlan_id: vlan_id for VLAN group key (if nonzero)
* @mode: key install mode (RX_TX, NO_TX or SET_TX)
*/
struct key_params {
@@ -572,6 +573,7 @@ struct key_params {
const u8 *seq;
int key_len;
int seq_len;
+ u16 vlan_id;
u32 cipher;
enum nl80211_key_mode mode;
};
@@ -1124,6 +1126,7 @@ struct sta_txpwr {
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
+ * @vlan_id: VLAN ID for station (if nonzero)
* @peer_aid: mesh peer AID or zero for no change
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
@@ -1159,6 +1162,7 @@ struct station_parameters {
u32 sta_modify_mask;
int listen_interval;
u16 aid;
+ u16 vlan_id;
u16 peer_aid;
u8 supported_rates_len;
u8 plink_action;
@@ -2602,6 +2606,13 @@ enum wiphy_params_flags {
#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
+/* The per TXQ device queue limit in airtime */
+#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L 5000
+#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H 12000
+
+/* The per interface airtime threshold to switch to lower queue limit */
+#define IEEE80211_AQL_THRESHOLD 24000
+
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -6593,7 +6604,7 @@ struct cfg80211_roam_info {
* time it is accessed in __cfg80211_roamed() due to delay in scheduling
* rdev->event_work. In case of any failures, the reference is released
* either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
- * released while diconneting from the current bss.
+ * released while disconnecting from the current bss.
*/
void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
gfp_t gfp);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 23e4b65ec9df..47f87b2fcf63 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -38,7 +38,9 @@ struct devlink {
struct device *dev;
possible_net_t _net;
struct mutex lock;
- bool reload_failed;
+ u8 reload_failed:1,
+ reload_enabled:1,
+ registered:1;
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -400,6 +402,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -434,6 +437,9 @@ enum devlink_param_generic_id {
"reset_dev_on_drv_probe"
#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -506,11 +512,13 @@ enum devlink_health_reporter_state {
struct devlink_health_reporter_ops {
char *name;
int (*recover)(struct devlink_health_reporter *reporter,
- void *priv_ctx);
+ void *priv_ctx, struct netlink_ext_ack *extack);
int (*dump)(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx);
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack);
int (*diagnose)(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg);
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack);
};
/**
@@ -566,6 +574,21 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
DEVLINK_TRAP_GENERIC_ID_TAIL_DROP,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -604,6 +627,36 @@ enum devlink_trap_group_generic_id {
"ttl_value_is_too_small"
#define DEVLINK_TRAP_GENERIC_NAME_TAIL_DROP \
"tail_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_IP_PACKET \
+ "non_ip"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_DIP_MC_DMAC \
+ "uc_dip_over_mc_dmac"
+#define DEVLINK_TRAP_GENERIC_NAME_DIP_LB \
+ "dip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_MC \
+ "sip_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_LB \
+ "sip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_CORRUPTED_IP_HDR \
+ "ip_header_corrupted"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_SIP_BC \
+ "ipv4_sip_is_limited_bc"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_RESERVED_SCOPE \
+ "ipv6_mc_dip_reserved_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE \
+ "ipv6_mc_dip_interface_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_MTU_ERROR \
+ "mtu_value_is_too_small"
+#define DEVLINK_TRAP_GENERIC_NAME_UNRESOLVED_NEIGH \
+ "unresolved_neigh"
+#define DEVLINK_TRAP_GENERIC_NAME_RPF \
+ "mc_reverse_path_forwarding"
+#define DEVLINK_TRAP_GENERIC_NAME_REJECT_ROUTE \
+ "reject_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_LPM_UNICAST_MISS \
+ "ipv4_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \
+ "ipv6_lpm_miss"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -643,7 +696,7 @@ enum devlink_trap_group_generic_id {
}
struct devlink_ops {
- int (*reload_down)(struct devlink *devlink,
+ int (*reload_down)(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack);
int (*reload_up)(struct devlink *devlink,
struct netlink_ext_ack *extack);
@@ -771,9 +824,13 @@ static inline struct devlink *netdev_to_devlink(struct net_device *dev)
struct ib_device;
+struct net *devlink_net(const struct devlink *devlink);
+void devlink_net_set(struct devlink *devlink, struct net *net);
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
int devlink_register(struct devlink *devlink, struct device *dev);
void devlink_unregister(struct devlink *devlink);
+void devlink_reload_enable(struct devlink *devlink);
+void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
@@ -914,8 +971,6 @@ int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value);
@@ -928,7 +983,7 @@ int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
const char *value);
int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u16 value_len);
+ const void *value, u32 value_len);
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 541fb514e31d..6767dc3f66c0 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -42,6 +42,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_8021Q_VALUE 12
#define DSA_TAG_PROTO_SJA1105_VALUE 13
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
+#define DSA_TAG_PROTO_OCELOT_VALUE 15
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -59,6 +60,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
+ DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
};
struct packet_type;
@@ -94,8 +96,6 @@ struct __dsa_skb_cb {
u8 priv[48 - sizeof(struct dsa_skb_cb)];
};
-#define __DSA_SKB_CB(skb) ((struct __dsa_skb_cb *)((skb)->cb))
-
#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
#define DSA_SKB_CB_PRIV(skb) \
@@ -122,15 +122,11 @@ struct dsa_switch_tree {
*/
struct dsa_platform_data *pd;
- /*
- * The switch port to which the CPU is attached.
- */
- struct dsa_port *cpu_dp;
+ /* List of switch ports */
+ struct list_head ports;
- /*
- * Data for the individual switch chips.
- */
- struct dsa_switch *ds[DSA_MAX_SWITCHES];
+ /* List of DSA links composing the routing table */
+ struct list_head rtable;
};
/* TC matchall action types, only mirroring for now */
@@ -197,6 +193,8 @@ struct dsa_port {
struct work_struct xmit_work;
struct sk_buff_head xmit_queue;
+ struct list_head list;
+
/*
* Give the switch driver somewhere to hang its per-port private data
* structures (accessible from the tagger).
@@ -212,9 +210,24 @@ struct dsa_port {
* Original copy of the master netdev net_device_ops
*/
const struct net_device_ops *orig_ndo_ops;
+
+ bool setup;
+};
+
+/* TODO: ideally DSA ports would have a single dp->link_dp member,
+ * and no dst->rtable nor this struct dsa_link would be needed,
+ * but this would require some more complex tree walking,
+ * so keep it stupid at the moment and list them all.
+ */
+struct dsa_link {
+ struct dsa_port *dp;
+ struct dsa_port *link_dp;
+ struct list_head list;
};
struct dsa_switch {
+ bool setup;
+
struct device *dev;
/*
@@ -243,13 +256,6 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
- * An array of which element [a] indicates which port on this
- * switch should be used to send packets to that are destined
- * for switch a. Can be NULL if there is only one switch chip.
- */
- s8 rtable[DSA_MAX_SWITCHES];
-
- /*
* Slave mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
@@ -275,14 +281,19 @@ struct dsa_switch {
*/
bool vlan_filtering;
- /* Dynamically allocated ports, keep last */
size_t num_ports;
- struct dsa_port ports[];
};
-static inline const struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
+static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
{
- return &ds->ports[p];
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds && dp->index == p)
+ return dp;
+
+ return NULL;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
@@ -317,6 +328,19 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask;
}
+/* Return the local port used to reach an arbitrary switch device */
+static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_link *dl;
+
+ list_for_each_entry(dl, &dst->rtable, list)
+ if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
+ return dl->dp->index;
+
+ return ds->num_ports;
+}
+
/* Return the local port used to reach an arbitrary switch port */
static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
int port)
@@ -324,7 +348,7 @@ static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
if (device == ds->index)
return port;
else
- return ds->rtable[device];
+ return dsa_routing_port(ds, device);
}
/* Return the local port used to reach the dedicated CPU port */
@@ -543,6 +567,45 @@ struct dsa_switch_ops {
*/
netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
struct sk_buff *skb);
+ /* Devlink parameters */
+ int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+};
+
+#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
+ DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
+ dsa_devlink_param_get, dsa_devlink_param_set, NULL)
+
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_params_register(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id);
+
+struct dsa_devlink_priv {
+ struct dsa_switch *ds;
};
struct dsa_switch_driver {
@@ -570,7 +633,6 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
return false;
}
-struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
#ifdef CONFIG_PM_SLEEP
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index c49d7bfb5c30..6d59221ff05a 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -8,7 +8,6 @@
struct module;
struct fib_notifier_info {
- struct net *net;
int family;
struct netlink_ext_ack *extack;
};
@@ -30,19 +29,21 @@ struct fib_notifier_ops {
int family;
struct list_head list;
unsigned int (*fib_seq_read)(struct net *net);
- int (*fib_dump)(struct net *net, struct notifier_block *nb);
+ int (*fib_dump)(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
struct module *owner;
struct rcu_head rcu;
};
-int call_fib_notifier(struct notifier_block *nb, struct net *net,
+int call_fib_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);
-int register_fib_notifier(struct notifier_block *nb,
- void (*cb)(struct notifier_block *nb));
-int unregister_fib_notifier(struct notifier_block *nb);
+int register_fib_notifier(struct net *net, struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb),
+ struct netlink_ext_ack *extack);
+int unregister_fib_notifier(struct net *net, struct notifier_block *nb);
struct fib_notifier_ops *
fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net);
void fib_notifier_ops_unregister(struct fib_notifier_ops *ops);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 20dcadd8eed9..54e227e6b06a 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -194,7 +194,8 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
bool fib_rule_matchall(const struct fib_rule *rule);
-int fib_rules_dump(struct net *net, struct notifier_block *nb, int family);
+int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
+ struct netlink_ext_ack *extack);
unsigned int fib_rules_seq_read(struct net *net, int family);
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 5cd12276ae21..b8c20e9f343e 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -7,6 +7,8 @@
#include <linux/siphash.h>
#include <uapi/linux/if_ether.h>
+struct sk_buff;
+
/**
* struct flow_dissector_key_control:
* @thoff: Transport header offset
@@ -46,9 +48,14 @@ struct flow_dissector_key_tags {
};
struct flow_dissector_key_vlan {
- u16 vlan_id:12,
- vlan_dei:1,
- vlan_priority:3;
+ union {
+ struct {
+ u16 vlan_id:12,
+ vlan_dei:1,
+ vlan_priority:3;
+ };
+ __be16 vlan_tci;
+ };
__be16 vlan_tpid;
};
@@ -157,19 +164,16 @@ struct flow_dissector_key_ports {
/**
* flow_dissector_key_icmp:
- * @ports: type and code of ICMP header
- * icmp: ICMP type (high) and code (low)
* type: ICMP type
* code: ICMP code
+ * id: session identifier
*/
struct flow_dissector_key_icmp {
- union {
- __be16 icmp;
- struct {
- u8 type;
- u8 code;
- };
+ struct {
+ u8 type;
+ u8 code;
};
+ u16 id;
};
/**
@@ -204,9 +208,11 @@ struct flow_dissector_key_ip {
/**
* struct flow_dissector_key_meta:
* @ingress_ifindex: ingress ifindex
+ * @ingress_iftype: ingress interface type
*/
struct flow_dissector_key_meta {
int ingress_ifindex;
+ u16 ingress_iftype;
};
/**
@@ -283,6 +289,8 @@ struct flow_keys {
struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_icmp icmp;
+ /* 'addrs' must be the last member */
struct flow_dissector_key_addrs addrs;
};
@@ -316,6 +324,9 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
}
u32 flow_hash_from_keys(struct flow_keys *keys);
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+ struct flow_dissector_key_icmp *key_icmp,
+ void *data, int thoff, int hlen);
static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ca23860adbb9..1424e02cef90 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,6 +7,12 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+/* Note: this used to be in include/uapi/linux/gen_stats.h */
+struct gnet_stats_basic_packed {
+ __u64 bytes;
+ __u64 packets;
+};
+
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 9292f1c588b7..74950663bb00 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -75,8 +75,6 @@ struct genl_family {
struct module *module;
};
-struct nlattr **genl_family_attrbuf(const struct genl_family *family);
-
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
@@ -128,6 +126,24 @@ enum genl_validate_flags {
};
/**
+ * struct genl_info - info that is available during dumpit op call
+ * @family: generic netlink family - for internal genl code usage
+ * @ops: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ */
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ const struct genl_ops *ops;
+ struct nlattr **attrs;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
+/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
diff --git a/include/net/ip.h b/include/net/ip.h
index a2c61c36dc4a..cebf3e10def1 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -339,10 +339,10 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
-static inline int inet_is_local_reserved_port(struct net *net, int port)
+static inline bool inet_is_local_reserved_port(struct net *net, int port)
{
if (!net->ipv4.sysctl_local_reserved_ports)
- return 0;
+ return false;
return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
}
@@ -357,9 +357,9 @@ static inline int inet_prot_sock(struct net *net)
}
#else
-static inline int inet_is_local_reserved_port(struct net *net, int port)
+static inline bool inet_is_local_reserved_port(struct net *net, int port)
{
- return 0;
+ return false;
}
static inline int inet_prot_sock(struct net *net)
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 4b5656c71abc..f1535f172935 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -90,7 +90,32 @@ struct fib6_gc_args {
#ifndef CONFIG_IPV6_SUBTREES
#define FIB6_SUBTREE(fn) NULL
+
+static inline bool fib6_routes_require_src(const struct net *net)
+{
+ return false;
+}
+
+static inline void fib6_routes_require_src_inc(struct net *net) {}
+static inline void fib6_routes_require_src_dec(struct net *net) {}
+
#else
+
+static inline bool fib6_routes_require_src(const struct net *net)
+{
+ return net->ipv6.fib6_routes_require_src > 0;
+}
+
+static inline void fib6_routes_require_src_inc(struct net *net)
+{
+ net->ipv6.fib6_routes_require_src++;
+}
+
+static inline void fib6_routes_require_src_dec(struct net *net)
+{
+ net->ipv6.fib6_routes_require_src--;
+}
+
#define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
#endif
@@ -212,6 +237,11 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
return ((struct rt6_info *)dst)->rt6i_idev;
}
+static inline bool fib6_requires_src(const struct fib6_info *rt)
+{
+ return rt->fib6_src.plen > 0;
+}
+
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~RTF_EXPIRES;
@@ -478,7 +508,7 @@ struct ipv6_route_iter {
extern const struct seq_operations ipv6_route_seq_ops;
-int call_fib6_notifier(struct notifier_block *nb, struct net *net,
+int call_fib6_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
@@ -488,7 +518,8 @@ int __net_init fib6_notifier_init(struct net *net);
void __net_exit fib6_notifier_exit(struct net *net);
unsigned int fib6_tables_seq_read(struct net *net);
-int fib6_tables_dump(struct net *net, struct notifier_block *nb);
+int fib6_tables_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
void fib6_update_sernum(struct net *net, struct fib6_info *rt);
void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
@@ -501,10 +532,16 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
}
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+static inline bool fib6_has_custom_rules(const struct net *net)
+{
+ return net->ipv6.fib6_has_custom_rules;
+}
+
int fib6_rules_init(void);
void fib6_rules_cleanup(void);
bool fib6_rule_default(const struct fib_rule *rule);
-int fib6_rules_dump(struct net *net, struct notifier_block *nb);
+int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
unsigned int fib6_rules_seq_read(struct net *net);
static inline bool fib6_rules_early_flow_dissect(struct net *net,
@@ -525,6 +562,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
return true;
}
#else
+static inline bool fib6_has_custom_rules(const struct net *net)
+{
+ return false;
+}
static inline int fib6_rules_init(void)
{
return 0;
@@ -537,7 +578,8 @@ static inline bool fib6_rule_default(const struct fib_rule *rule)
{
return true;
}
-static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb)
+static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return 0;
}
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index ab1ca9e238d2..b9cba41c6d4f 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -219,7 +219,7 @@ struct fib_nh_notifier_info {
struct fib_nh *fib_nh;
};
-int call_fib4_notifier(struct notifier_block *nb, struct net *net,
+int call_fib4_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
@@ -229,7 +229,8 @@ int __net_init fib4_notifier_init(struct net *net);
void __net_exit fib4_notifier_exit(struct net *net);
void fib_info_notify_update(struct net *net, struct nl_info *info);
-void fib_notify(struct net *net, struct notifier_block *nb);
+int fib_notify(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
struct fib_table {
struct hlist_node tb_hlist;
@@ -310,12 +311,18 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
return err;
}
+static inline bool fib4_has_custom_rules(const struct net *net)
+{
+ return false;
+}
+
static inline bool fib4_rule_default(const struct fib_rule *rule)
{
return true;
}
-static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb)
+static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -376,8 +383,14 @@ out:
return err;
}
+static inline bool fib4_has_custom_rules(const struct net *net)
+{
+ return net->ipv4.fib_has_custom_rules;
+}
+
bool fib4_rule_default(const struct fib_rule *rule);
-int fib4_rules_dump(struct net *net, struct notifier_block *nb);
+int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
unsigned int fib4_rules_seq_read(struct net *net);
static inline bool fib4_rules_early_flow_dissect(struct net *net,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 078887c8c586..83be2d93b407 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1325,7 +1325,7 @@ void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs);
void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs);
void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs);
void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs);
-void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_service_nets_cleanup(struct list_head *net_list);
/* IPVS application functions
* (from ip_vs_app.c)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 009605c56f20..d04b7abe2a4c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -696,6 +696,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
+}
+
static inline u32 ipv6_portaddr_hash(const struct net *net,
const struct in6_addr *addr6,
unsigned int port)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 523c6a09e1c8..aa145808e57a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -312,7 +312,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
* @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
- * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder
+ * @BSS_CHANGED_FTM_RESPONDER: fine timing measurement request responder
* functionality changed for this BSS (AP mode).
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
@@ -967,6 +967,7 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @band: the band to transmit on (use for checking for races)
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
* @ack_frame_id: internal frame ID for TX status, used internally
+ * @tx_time_est: TX time estimate in units of 4us, used internally
* @control: union part for control data
* @control.rates: TX rates array to try
* @control.rts_cts_rate_idx: rate for RTS or CTS
@@ -1007,7 +1008,8 @@ struct ieee80211_tx_info {
u8 hw_queue;
- u16 ack_frame_id;
+ u16 ack_frame_id:6;
+ u16 tx_time_est:10;
union {
struct {
@@ -1058,8 +1060,24 @@ struct ieee80211_tx_info {
};
};
+static inline u16
+ieee80211_info_set_tx_time_est(struct ieee80211_tx_info *info, u16 tx_time_est)
+{
+ /* We only have 10 bits in tx_time_est, so store airtime
+ * in increments of 4us and clamp the maximum to 2**12-1
+ */
+ info->tx_time_est = min_t(u16, tx_time_est, 4095) >> 2;
+ return info->tx_time_est << 2;
+}
+
+static inline u16
+ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
+{
+ return info->tx_time_est << 2;
+}
+
/**
- * struct ieee80211_tx_status - extended tx staus info for rate control
+ * struct ieee80211_tx_status - extended tx status info for rate control
*
* @sta: Station that the packet was transmitted for
* @info: Basic tx status information
@@ -1702,7 +1720,7 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
* @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
* driver for a CCMP/GCMP key to indicate that is requires IV generation
- * only for managment frames (MFP).
+ * only for management frames (MFP).
* @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
* driver for a key to indicate that sufficient tailroom must always
* be reserved for ICV or MIC, even when HW encryption is enabled.
@@ -1998,7 +2016,7 @@ struct ieee80211_sta {
*
* * If the skb is transmitted as part of a BA agreement, the
* A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- * * If the skb is not part of a BA aggreement, the A-MSDU maximal
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
* size is min(max_amsdu_len, 7935) bytes.
*
* Both additional HT limits must be enforced by the low level
@@ -2626,7 +2644,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* @hw: the hardware
* @skb: the skb
*
- * Free a transmit skb. Use this funtion when some failure
+ * Free a transmit skb. Use this function when some failure
* to transmit happened and thus status cannot be reported.
*/
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -3095,7 +3113,9 @@ enum ieee80211_filter_flags {
*
* @IEEE80211_AMPDU_RX_START: start RX aggregation
* @IEEE80211_AMPDU_RX_STOP: stop RX aggregation
- * @IEEE80211_AMPDU_TX_START: start TX aggregation
+ * @IEEE80211_AMPDU_TX_START: start TX aggregation, the driver must either
+ * call ieee80211_start_tx_ba_cb_irqsafe() or return the special
+ * status %IEEE80211_AMPDU_TX_START_IMMEDIATE.
* @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
* @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting
* queued packets, now unaggregated. After all packets are transmitted the
@@ -3119,6 +3139,8 @@ enum ieee80211_ampdu_mlme_action {
IEEE80211_AMPDU_TX_OPERATIONAL,
};
+#define IEEE80211_AMPDU_TX_START_IMMEDIATE 1
+
/**
* struct ieee80211_ampdu_params - AMPDU action parameters
*
@@ -3183,13 +3205,13 @@ enum ieee80211_rate_control_changed {
*
* With the support for multi channel contexts and multi channel operations,
* remain on channel operations might be limited/deferred/aborted by other
- * flows/operations which have higher priority (and vise versa).
+ * flows/operations which have higher priority (and vice versa).
* Specifying the ROC type can be used by devices to prioritize the ROC
* operations compared to other operations/flows.
*
* @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC.
* @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required
- * for sending managment frames offchannel.
+ * for sending management frames offchannel.
*/
enum ieee80211_roc_type {
IEEE80211_ROC_TYPE_NORMAL = 0,
@@ -3896,7 +3918,10 @@ struct ieee80211_ops {
*
* Even ``189`` would be wrong since 1 could be lost again.
*
- * Returns a negative error code on failure.
+ * Returns a negative error code on failure. The driver may return
+ * %IEEE80211_AMPDU_TX_START_IMMEDIATE for %IEEE80211_AMPDU_TX_START
+ * if the session can start immediately.
+ *
* The callback can sleep.
*/
int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -5557,6 +5582,18 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
u32 tx_airtime, u32 rx_airtime);
/**
+ * ieee80211_txq_airtime_check - check if a txq can send frame to device
+ *
+ * @hw: pointer obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Return true if the AQL's airtime limit has not been reached and the txq can
+ * continue to send more packets to the device. Otherwise return false.
+ */
+bool
+ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+
+/**
* ieee80211_iter_keys - iterate keys programmed into the device
* @hw: pointer obtained from ieee80211_alloc_hw()
* @vif: virtual interface to iterate, may be %NULL for all
@@ -5609,7 +5646,7 @@ void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
/**
* ieee80211_iter_chan_contexts_atomic - iterate channel contexts
- * @hw: pointre obtained from ieee80211_alloc_hw().
+ * @hw: pointer obtained from ieee80211_alloc_hw().
* @iter: iterator function
* @iter_data: data passed to iterator function
*
@@ -6357,7 +6394,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
* again.
*
* The API ieee80211_txq_may_transmit() also ensures that TXQ list will be
- * aligned aginst driver's own round-robin scheduler list. i.e it rotates
+ * aligned against driver's own round-robin scheduler list. i.e it rotates
* the TXQ list till it makes the requested node becomes the first entry
* in TXQ list. Thus both the TXQ list and driver's list are in sync. If this
* function returns %true, the driver is expected to schedule packets
@@ -6415,4 +6452,33 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif,
struct cfg80211_nan_match_params *match,
gfp_t gfp);
+/**
+ * ieee80211_calc_rx_airtime - calculate estimated transmission airtime for RX.
+ *
+ * This function calculates the estimated airtime usage of a frame based on the
+ * rate information in the RX status struct and the frame length.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @status: &struct ieee80211_rx_status containing the transmission rate
+ * information.
+ * @len: frame length in bytes
+ */
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len);
+
+/**
+ * ieee80211_calc_tx_airtime - calculate estimated transmission airtime for TX.
+ *
+ * This function calculates the estimated airtime usage of a frame based on the
+ * rate information in the TX info struct and the frame length.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @info: &struct ieee80211_tx_info of the frame.
+ * @len: frame length in bytes
+ */
+u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ int len);
+
#endif /* MAC80211_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b2f715ca0567..b5ebeb3b0de0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
@@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index b8452cc0e059..6ad9ad47a9c5 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -468,7 +468,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
do {
seq = read_seqbegin(&hh->hh_lock);
- hh_len = hh->hh_len;
+ hh_len = READ_ONCE(hh->hh_len);
if (likely(hh_len <= HH_DATA_MOD)) {
hh_alen = HH_DATA_MOD;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index c7e15a213ef2..b8ceaf0cd997 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -36,6 +36,7 @@
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
+#include <linux/notifier.h>
struct user_namespace;
struct proc_dir_entry;
@@ -104,6 +105,8 @@ struct net {
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
+ struct raw_notifier_head netdev_chain;
+
/* Note that @hash_mix can be read millions times per second,
* it is critical that it is on a read_mostly cache line.
*/
@@ -326,7 +329,8 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
/* Protected by net_rwsem */
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
-
+#define for_each_net_continue_reverse(VAR) \
+ list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
#define for_each_net_rcu(VAR) \
list_for_each_entry_rcu(VAR, &net_namespace_list, list)
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 112a6f40dfaf..5ae5295aa46d 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -43,7 +43,6 @@ enum nf_ct_ext_id {
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
- struct rcu_head rcu;
u8 offset[NF_CT_EXT_NUM];
u8 len;
char data[0];
@@ -72,15 +71,6 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
/* Destroy all relationships */
void nf_ct_ext_destroy(struct nf_conn *ct);
-/* Free operation. If you want to free a object referred from private area,
- * please implement __nf_ct_ext_free() and call it.
- */
-static inline void nf_ct_ext_free(struct nf_conn *ct)
-{
- if (ct->ext)
- kfree_rcu(ct->ext, rcu);
-}
-
/* Add this type, returns pointer to data or NULL. */
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index b37a7d608134..f0897b3c97fb 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -8,24 +8,43 @@
#include <linux/rcupdate.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/flow_offload.h>
#include <net/dst.h>
struct nf_flowtable;
+struct nf_flow_rule;
+struct flow_offload;
+enum flow_offload_tuple_dir;
struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ int (*setup)(struct nf_flowtable *ft,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+ int (*action)(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
+enum nf_flowtable_flags {
+ NF_FLOWTABLE_HW_OFFLOAD = 0x1,
+};
+
struct nf_flowtable {
struct list_head list;
struct rhashtable rhashtable;
+ int priority;
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
+ unsigned int flags;
+ struct flow_block flow_block;
+ possible_net_t net;
};
enum flow_offload_tuple_dir {
@@ -68,14 +87,22 @@ struct flow_offload_tuple_rhash {
#define FLOW_OFFLOAD_DNAT 0x2
#define FLOW_OFFLOAD_DYING 0x4
#define FLOW_OFFLOAD_TEARDOWN 0x8
+#define FLOW_OFFLOAD_HW 0x10
+#define FLOW_OFFLOAD_HW_DYING 0x20
+#define FLOW_OFFLOAD_HW_DEAD 0x40
+
+enum flow_offload_type {
+ NF_FLOW_OFFLOAD_UNSPEC = 0,
+ NF_FLOW_OFFLOAD_ROUTE,
+};
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
- u32 flags;
- union {
- /* Your private driver data here. */
- u32 timeout;
- };
+ struct nf_conn *ct;
+ u16 flags;
+ u16 type;
+ u32 timeout;
+ struct rcu_head rcu_head;
};
#define NF_FLOW_TIMEOUT (30 * HZ)
@@ -86,10 +113,12 @@ struct nf_flow_route {
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
-struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
- struct nf_flow_route *route);
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route);
+
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
@@ -123,4 +152,25 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_del(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+
+void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+
+int nf_flow_table_offload_init(void);
+void nf_flow_table_offload_exit(void);
+
#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2d0275f13bbf..fe7c50acc681 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -114,7 +114,7 @@ static inline void nft_reg_store8(u32 *dreg, u8 val)
*(u8 *)dreg = val;
}
-static inline u8 nft_reg_load8(u32 *sreg)
+static inline u8 nft_reg_load8(const u32 *sreg)
{
return *(u8 *)sreg;
}
@@ -125,7 +125,7 @@ static inline void nft_reg_store16(u32 *dreg, u16 val)
*(u16 *)dreg = val;
}
-static inline u16 nft_reg_load16(u32 *sreg)
+static inline u16 nft_reg_load16(const u32 *sreg)
{
return *(u16 *)sreg;
}
@@ -135,7 +135,7 @@ static inline void nft_reg_store64(u32 *dreg, u64 val)
put_unaligned(val, (u64 *)dreg);
}
-static inline u64 nft_reg_load64(u32 *sreg)
+static inline u64 nft_reg_load64(const u32 *sreg)
{
return get_unaligned((u64 *)sreg);
}
@@ -964,25 +964,31 @@ struct nft_stats {
struct u64_stats_sync syncp;
};
+struct nft_hook {
+ struct list_head list;
+ struct nf_hook_ops ops;
+ struct rcu_head rcu;
+};
+
/**
* struct nft_base_chain - nf_tables base chain
*
* @ops: netfilter hook ops
+ * @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
* @stats: per-cpu chain stats
* @chain: the chain
- * @dev_name: device name that this base chain is attached to (if any)
* @flow_block: flow block (for hardware offload)
*/
struct nft_base_chain {
struct nf_hook_ops ops;
+ struct list_head hook_list;
const struct nft_chain_type *type;
u8 policy;
u8 flags;
struct nft_stats __percpu *stats;
struct nft_chain chain;
- char dev_name[IFNAMSIZ];
struct flow_block flow_block;
};
@@ -1147,7 +1153,7 @@ struct nft_object_ops {
int nft_register_obj(struct nft_object_type *obj_type);
void nft_unregister_obj(struct nft_object_type *obj_type);
-#define NFT_FLOWTABLE_DEVICE_MAX 8
+#define NFT_NETDEVICE_MAX 256
/**
* struct nft_flowtable - nf_tables flow table
@@ -1156,7 +1162,6 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @table: the table the flow table is contained in
* @name: name of this flow table
* @hooknum: hook number
- * @priority: hook priority
* @ops_len: number of hooks in array
* @genmask: generation mask
* @use: number of references to this flow table
@@ -1170,13 +1175,12 @@ struct nft_flowtable {
struct nft_table *table;
char *name;
int hooknum;
- int priority;
int ops_len;
u32 genmask:2,
use:30;
u64 handle;
/* runtime data below here */
- struct nf_hook_ops *ops ____cacheline_aligned;
+ struct list_head hook_list ____cacheline_aligned;
struct nf_flowtable data;
};
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 03cf5856d76f..ea7d1d78b92d 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -45,6 +45,7 @@ struct nft_flow_key {
struct flow_dissector_key_ip ip;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_eth_addrs eth_addrs;
+ struct flow_dissector_key_meta meta;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct nft_flow_match {
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 022a0fd1a5a4..5ec054473d81 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -83,6 +83,9 @@ struct netns_ipv6 {
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+#ifdef CONFIG_IPV6_SUBTREES
+ unsigned int fib6_routes_require_src;
+#endif
struct rt6_info *ip6_prohibit_entry;
struct rt6_info *ip6_blk_hole_entry;
struct fib6_table *fib6_local_tbl;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 830bdf345b17..b5fdb108d602 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -24,6 +24,9 @@ struct netns_mib {
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
#endif
+#if IS_ENABLED(CONFIG_TLS)
+ DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
+#endif
};
#endif
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index bdc0f27b8514..d8d02e4188d1 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -89,6 +89,12 @@ struct netns_sctp {
*/
int pf_retrans;
+ /* Primary.Switchover.Max.Retrans sysctl value
+ * taken from:
+ * https://tools.ietf.org/html/rfc7829
+ */
+ int ps_retrans;
+
/*
* Disable Potentially-Failed feature, the feature is enabled by default
* pf_enable - 0 : disable pf
@@ -97,6 +103,14 @@ struct netns_sctp {
int pf_enable;
/*
+ * Disable Potentially-Failed state exposure, ignored by default
+ * pf_expose - 0 : compatible with old applications (by default)
+ * - 1 : disable pf state exposure
+ * - 2 : enable pf state exposure
+ */
+ int pf_expose;
+
+ /*
* Policy for preforming sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
* 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index cfc9441ef074..dec7522b6ce1 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -26,7 +26,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
rcu_read_lock();
css = task_css(p, net_prio_cgrp_id);
- idx = css->cgroup->id;
+ idx = css->id;
rcu_read_unlock();
return idx;
}
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 2cbcdbdec254..cfbed00ba7ee 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -34,8 +34,18 @@
#include <linux/ptr_ring.h>
#include <linux/dma-direction.h>
-#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
-#define PP_FLAG_ALL PP_FLAG_DMA_MAP
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
/*
* Fast allocation side cache array/stack
@@ -65,12 +75,19 @@ struct page_pool_params {
int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */
enum dma_data_direction dma_dir; /* DMA mapping direction */
+ unsigned int max_len; /* max DMA sync memory size */
+ unsigned int offset; /* DMA addr offset */
};
struct page_pool {
struct page_pool_params p;
- u32 pages_state_hold_cnt;
+ struct delayed_work release_dw;
+ void (*disconnect)(void *);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+ u32 pages_state_hold_cnt;
/*
* Data structure for allocation side
@@ -107,6 +124,8 @@ struct page_pool {
* refcnt serves purpose is to simplify drivers error handling.
*/
refcount_t user_cnt;
+
+ u64 destroy_cnt;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -129,29 +148,23 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
struct page_pool *page_pool_create(const struct page_pool_params *params);
-void __page_pool_free(struct page_pool *pool);
-static inline void page_pool_free(struct page_pool *pool)
-{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
#ifdef CONFIG_PAGE_POOL
- __page_pool_free(pool);
-#endif
-}
-
-/* Drivers use this instead of page_pool_free */
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+#else
static inline void page_pool_destroy(struct page_pool *pool)
{
- if (!pool)
- return;
+}
- page_pool_free(pool);
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *))
+{
}
+#endif
/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct);
+void __page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct);
static inline void page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct)
@@ -160,32 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- __page_pool_put_page(pool, page, allow_direct);
+ __page_pool_put_page(pool, page, -1, allow_direct);
#endif
}
/* Very limited use-cases allow recycle direct */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
- __page_pool_put_page(pool, page, true);
-}
-
-/* API user MUST have disconnected alloc-side (not allowed to call
- * page_pool_alloc_pages()) before calling this. The free-side can
- * still run concurrently, to handle in-flight packet-pages.
- *
- * A request to shutdown can fail (with false) if there are still
- * in-flight packet-pages.
- */
-bool __page_pool_request_shutdown(struct page_pool *pool);
-static inline bool page_pool_request_shutdown(struct page_pool *pool)
-{
- bool safe_to_remove = false;
-
-#ifdef CONFIG_PAGE_POOL
- safe_to_remove = __page_pool_request_shutdown(pool);
-#endif
- return safe_to_remove;
+ __page_pool_put_page(pool, page, -1, true);
}
/* Disconnects a page (from a page_pool). API users can have a need
@@ -216,14 +211,16 @@ static inline bool is_page_pool_compiled_in(void)
#endif
}
-static inline void page_pool_get(struct page_pool *pool)
-{
- refcount_inc(&pool->user_cnt);
-}
-
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
}
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/route.h b/include/net/route.h
index 6c516840380d..a9c60fc68e36 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -185,6 +185,10 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin,
struct fib_result *res);
+int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin,
+ const struct sk_buff *hint);
+
static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin)
{
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d80acda231ae..144f264ea394 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -149,8 +149,8 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
- return qdisc->empty;
- return !qdisc->q.qlen;
+ return READ_ONCE(qdisc->empty);
+ return !READ_ONCE(qdisc->q.qlen);
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
@@ -158,7 +158,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
if (qdisc->flags & TCQ_F_NOLOCK) {
if (!spin_trylock(&qdisc->seqlock))
return false;
- qdisc->empty = false;
+ WRITE_ONCE(qdisc->empty, false);
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -1290,17 +1290,9 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
- struct gnet_stats_queue *stats = res->qstats;
- int ret;
-
- if (res->ingress)
- ret = netif_receive_skb(skb);
- else
- ret = dev_queue_xmit(skb);
- if (ret && stats)
- qstats_overlimit_inc(res->qstats);
+ return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
}
#endif
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 823afc42a3aa..15b4d9aec7ff 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -286,6 +286,18 @@ enum { SCTP_MAX_GABS = 16 };
* functions simpler to write.
*/
+/* These are the values for pf exposure, UNUSED is to keep compatible with old
+ * applications by default.
+ */
+enum {
+ SCTP_PF_EXPOSE_UNSET,
+ SCTP_PF_EXPOSE_DISABLE,
+ SCTP_PF_EXPOSE_ENABLE,
+};
+#define SCTP_PF_EXPOSE_MAX SCTP_PF_EXPOSE_ENABLE
+
+#define SCTP_PS_RETRANS_MAX 0xffff
+
/* These return values describe the success or failure of a number of
* routines which form the lower interface to SCTP_outqueue.
*/
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 503fbc3cd819..314a2fa21d6b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -184,7 +184,8 @@ struct sctp_sock {
__u32 flowlabel;
__u8 dscp;
- int pf_retrans;
+ __u16 pf_retrans;
+ __u16 ps_retrans;
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -215,6 +216,7 @@ struct sctp_sock {
__u32 adaptation_ind;
__u32 pd_point;
__u16 nodelay:1,
+ pf_expose:2,
reuse:1,
disable_fragments:1,
v4mapped:1,
@@ -896,7 +898,9 @@ struct sctp_transport {
* and will be initialized from the assocs value. This can be changed
* using the SCTP_PEER_ADDR_THLDS socket option
*/
- int pf_retrans;
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@@ -1239,6 +1243,9 @@ struct sctp_ep_common {
/* What socket does this endpoint belong to? */
struct sock *sk;
+ /* Cache netns and it won't change once set */
+ struct net *net;
+
/* This is where we receive inbound chunks. */
struct sctp_inq inqueue;
@@ -1772,7 +1779,9 @@ struct sctp_association {
* and will be initialized from the assocs value. This can be
* changed using the SCTP_PEER_ADDR_THLDS socket option
*/
- int pf_retrans;
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
/* Maximum number of times the endpoint will retransmit INIT */
__u16 max_init_attempts;
@@ -2053,6 +2062,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
+ pf_expose:2, /* Expose pf state? */
force_delay:1;
__u8 strreset_enable;
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index e1a92c4610f3..0b032b92da0b 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -80,13 +80,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
struct sctp_chunk *chunk,
gfp_t gfp);
-struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
- const struct sctp_association *asoc,
- const struct sockaddr_storage *aaddr,
- int flags,
- int state,
- int error,
- gfp_t gfp);
+void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport,
+ int state, int error);
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc,
@@ -100,6 +95,13 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
__u32 error,
gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed_event(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ gfp_t gfp);
+
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
__u16 flags,
diff --git a/include/net/smc.h b/include/net/smc.h
index bd9c0fb3b577..646feb4bc75f 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -37,6 +37,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
+#define ISM_ERROR 0xFFFF
+
struct smcd_event {
u32 type;
u32 code;
@@ -75,6 +77,11 @@ struct smcd_dev {
struct workqueue_struct *event_wq;
u8 pnetid[SMC_MAX_PNETID_LEN];
bool pnetid_by_user;
+ struct list_head lgr_list;
+ spinlock_t lgr_lock;
+ atomic_t lgr_cnt;
+ wait_queue_head_t lgrs_deleted;
+ u8 going_away : 1;
};
struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
diff --git a/include/net/snmp.h b/include/net/snmp.h
index cb8ced4380a6..468a67836e2f 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -111,6 +111,12 @@ struct linux_xfrm_mib {
unsigned long mibs[LINUX_MIB_XFRMMAX];
};
+/* Linux TLS */
+#define LINUX_MIB_TLSMAX __LINUX_MIB_TLSMAX
+struct linux_tls_mib {
+ unsigned long mibs[LINUX_MIB_TLSMAX];
+};
+
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) __percpu *name
#define DEFINE_SNMP_STAT_ATOMIC(type, name) \
diff --git a/include/net/sock.h b/include/net/sock.h
index 718e62fbe869..87d54ef57f00 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -66,7 +66,6 @@
#include <net/checksum.h>
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
-#include <net/smc.h>
#include <net/l3mdev.h>
/*
@@ -860,17 +859,17 @@ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
static inline void sk_acceptq_removed(struct sock *sk)
{
- sk->sk_ack_backlog--;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
}
static inline void sk_acceptq_added(struct sock *sk)
{
- sk->sk_ack_backlog++;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
- return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+ return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
}
/*
@@ -900,11 +899,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
if (!sk->sk_backlog.tail)
- sk->sk_backlog.head = skb;
+ WRITE_ONCE(sk->sk_backlog.head, skb);
else
sk->sk_backlog.tail->next = skb;
- sk->sk_backlog.tail = skb;
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
skb->next = NULL;
}
@@ -1489,7 +1488,7 @@ static inline void sock_release_ownership(struct sock *sk)
sk->sk_lock.owned = 0;
/* The sk_lock has mutex_unlock() semantics: */
- mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
}
}
@@ -1940,8 +1939,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline void sk_dst_confirm(struct sock *sk)
{
- if (!sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 1;
+ if (!READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
}
static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
@@ -1951,10 +1950,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
- if (sk && sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 0;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
@@ -2528,7 +2527,7 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
-void sock_enable_timestamp(struct sock *sk, int flag);
+void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ab4eb5eb5d07..36f195fb576a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -537,7 +537,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-u64 cookie_init_timestamp(struct request_sock *req);
+u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -757,10 +757,16 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
+/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+static inline u32 tcp_ns_to_ts(u64 ns)
+{
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
- return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(tcp_clock_ns());
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -772,7 +778,7 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
}
/* provide the departure time in us unit */
diff --git a/include/net/tls.h b/include/net/tls.h
index 794e297483ea..6ed91e82edd0 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -44,6 +44,7 @@
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
#include <net/tcp.h>
#include <net/strparser.h>
#include <crypto/aead.h>
@@ -61,7 +62,6 @@
#define TLS_RECORD_TYPE_DATA 0x17
#define TLS_AAD_SPACE_SIZE 13
-#define TLS_DEVICE_NAME_MAX 32
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
@@ -75,36 +75,14 @@
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
-/*
- * This structure defines the routines for Inline TLS driver.
- * The following routines are optional and filled with a
- * null pointer if not defined.
- *
- * @name: Its the name of registered Inline tls device
- * @dev_list: Inline tls device list
- * int (*feature)(struct tls_device *device);
- * Called to return Inline TLS driver capability
- *
- * int (*hash)(struct tls_device *device, struct sock *sk);
- * This function sets Inline driver for listen and program
- * device specific functioanlity as required
- *
- * void (*unhash)(struct tls_device *device, struct sock *sk);
- * This function cleans listen state set by Inline TLS driver
- *
- * void (*release)(struct kref *kref);
- * Release the registered device and allocated resources
- * @kref: Number of reference to tls_device
- */
-struct tls_device {
- char name[TLS_DEVICE_NAME_MAX];
- struct list_head dev_list;
- int (*feature)(struct tls_device *device);
- int (*hash)(struct tls_device *device, struct sock *sk);
- void (*unhash)(struct tls_device *device, struct sock *sk);
- void (*release)(struct kref *kref);
- struct kref kref;
-};
+#define __TLS_INC_STATS(net, field) \
+ __SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define TLS_INC_STATS(net, field) \
+ SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define __TLS_DEC_STATS(net, field) \
+ __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+#define TLS_DEC_STATS(net, field) \
+ SNMP_DEC_STATS((net)->mib.tls_statistics, field)
enum {
TLS_BASE,
@@ -159,7 +137,7 @@ struct tls_sw_context_tx {
struct list_head tx_list;
atomic_t encrypt_pending;
int async_notify;
- int async_capable;
+ u8 async_capable:1;
#define BIT_TX_SCHEDULED 0
#define BIT_TX_CLOSING 1
@@ -175,8 +153,8 @@ struct tls_sw_context_rx {
struct sk_buff *recv_pkt;
u8 control;
- int async_capable;
- bool decrypted;
+ u8 async_capable:1;
+ u8 decrypted:1;
atomic_t decrypt_pending;
bool async_notify;
};
@@ -345,7 +323,10 @@ struct tls_offload_context_rx {
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+struct tls_context *tls_ctx_create(struct sock *sk);
void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
+void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
@@ -356,6 +337,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
@@ -628,13 +611,6 @@ tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
tls_offload_ctx_rx(tls_ctx)->resync_type = type;
}
-static inline void tls_offload_tx_resync_request(struct sock *sk)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
-
- WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
-}
-
/* Driver's seq tracking has to be disabled until resync succeeded */
static inline bool tls_offload_tx_resync_pending(struct sock *sk)
{
@@ -646,10 +622,11 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
return ret;
}
+int __net_init tls_proc_init(struct net *net);
+void __net_exit tls_proc_fini(struct net *net);
+
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
-void tls_register_device(struct tls_device *device);
-void tls_unregister_device(struct tls_device *device);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
@@ -670,7 +647,9 @@ void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ struct sk_buff *skb, struct strp_msg *rxm);
#else
static inline void tls_device_init(void) {}
static inline void tls_device_cleanup(void) {}
@@ -693,7 +672,9 @@ static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
static inline void
tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
-static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+static inline int
+tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ struct sk_buff *skb, struct strp_msg *rxm)
{
return 0;
}
diff --git a/include/net/tls_toe.h b/include/net/tls_toe.h
new file mode 100644
index 000000000000..b3aa7593ce2c
--- /dev/null
+++ b/include/net/tls_toe.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kref.h>
+#include <linux/list.h>
+
+struct sock;
+
+#define TLS_TOE_DEVICE_NAME_MAX 32
+
+/*
+ * This structure defines the routines for Inline TLS driver.
+ * The following routines are optional and filled with a
+ * null pointer if not defined.
+ *
+ * @name: Its the name of registered Inline tls device
+ * @dev_list: Inline tls device list
+ * int (*feature)(struct tls_toe_device *device);
+ * Called to return Inline TLS driver capability
+ *
+ * int (*hash)(struct tls_toe_device *device, struct sock *sk);
+ * This function sets Inline driver for listen and program
+ * device specific functioanlity as required
+ *
+ * void (*unhash)(struct tls_toe_device *device, struct sock *sk);
+ * This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ * Release the registered device and allocated resources
+ * @kref: Number of reference to tls_toe_device
+ */
+struct tls_toe_device {
+ char name[TLS_TOE_DEVICE_NAME_MAX];
+ struct list_head dev_list;
+ int (*feature)(struct tls_toe_device *device);
+ int (*hash)(struct tls_toe_device *device, struct sock *sk);
+ void (*unhash)(struct tls_toe_device *device, struct sock *sk);
+ void (*release)(struct kref *kref);
+ struct kref kref;
+};
+
+int tls_toe_bypass(struct sock *sk);
+int tls_toe_hash(struct sock *sk);
+void tls_toe_unhash(struct sock *sk);
+
+void tls_toe_register_device(struct tls_toe_device *device);
+void tls_toe_unregister_device(struct tls_toe_device *device);
diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h
index 57d2db5c4bdf..cf8cc140d68d 100644
--- a/include/net/vsock_addr.h
+++ b/include/net/vsock_addr.h
@@ -8,7 +8,7 @@
#ifndef _VSOCK_ADDR_H_
#define _VSOCK_ADDR_H_
-#include <linux/vm_sockets.h>
+#include <uapi/linux/vm_sockets.h>
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
int vsock_addr_validate(const struct sockaddr_vm *addr);
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
index 6a8cba6ea79a..a9d5b7603b89 100644
--- a/include/net/xdp_priv.h
+++ b/include/net/xdp_priv.h
@@ -12,12 +12,8 @@ struct xdp_mem_allocator {
struct page_pool *page_pool;
struct zero_copy_allocator *zc_alloc;
};
- int disconnect_cnt;
- unsigned long defer_start;
struct rhash_head node;
struct rcu_head rcu;
- struct delayed_work defer_wq;
- unsigned long defer_warn;
};
#endif /* __LINUX_NET_XDP_PRIV_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index c9398ce7960f..e3780e4b74e1 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -69,7 +69,14 @@ struct xdp_umem {
/* Nodes are linked in the struct xdp_sock map_list field, and used to
* track which maps a certain socket reside in.
*/
-struct xsk_map;
+
+struct xsk_map {
+ struct bpf_map map;
+ struct list_head __percpu *flush_list;
+ spinlock_t lock; /* Synchronize map updates */
+ struct xdp_sock *xsk_map[];
+};
+
struct xsk_map_node {
struct list_head node;
struct xsk_map *map;
@@ -109,8 +116,6 @@ struct xdp_sock {
struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
@@ -134,6 +139,22 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
+int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs);
+void __xsk_map_flush(struct bpf_map *map);
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct xdp_sock *xs;
+
+ if (key >= map->max_entries)
+ return NULL;
+
+ xs = READ_ONCE(m->xsk_map[key]);
+ return xs;
+}
static inline u64 xsk_umem_extract_addr(u64 addr)
{
@@ -224,15 +245,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return -ENOTSUPP;
}
-static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
-{
- return -ENOTSUPP;
-}
-
-static inline void xsk_flush(struct xdp_sock *xs)
-{
-}
-
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return false;
@@ -357,6 +369,21 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0;
}
+static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __xsk_map_flush(struct bpf_map *map)
+{
+}
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index aa08a7a5f6ac..dda3c025452e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1613,13 +1613,6 @@ static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optv
{
return -ENOPROTOOPT;
}
-
-static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
-{
- /* should not happen */
- kfree_skb(skb);
- return 0;
-}
#endif
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 49f4f75499b3..b01a8a8d4de9 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -1,38 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(IB_CM_H)
+#ifndef IB_CM_H
#define IB_CM_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index eea946fcc819..4e62650e2127 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -815,46 +815,6 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf, u32 timeout_ms);
/**
- * ib_redirect_mad_qp - Registers a QP for MAD services.
- * @qp: Reference to a QP that requires MAD services.
- * @rmpp_version: If set, indicates that the client will send
- * and receive MADs that contain the RMPP header for the given version.
- * If set to 0, indicates that RMPP is not used by this client.
- * @send_handler: The completion callback routine invoked after a send
- * request has completed.
- * @recv_handler: The completion callback routine invoked for a received
- * MAD.
- * @context: User specified context associated with the registration.
- *
- * Use of this call allows clients to use MAD services, such as RMPP,
- * on user-owned QPs. After calling this routine, users may send
- * MADs on the specified QP by calling ib_mad_post_send.
- */
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context);
-
-/**
- * ib_process_mad_wc - Processes a work completion associated with a
- * MAD sent or received on a redirected QP.
- * @mad_agent: Specifies the registered MAD service using the redirected QP.
- * @wc: References a work completion associated with a sent or received
- * MAD segment.
- *
- * This routine is used to complete or continue processing on a MAD request.
- * If the work completion is associated with a send operation, calling
- * this routine is required to continue an RMPP transfer or to wait for a
- * corresponding response, if it is a request. If the work completion is
- * associated with a receive operation, calling this routine is required to
- * process an inbound or outbound RMPP transfer, or to match a response MAD
- * with its corresponding request.
- */
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc);
-
-/**
* ib_create_send_mad - Allocate and initialize a data buffer and work request
* for sending a MAD.
* @mad_agent: Specifies the registered MAD service to associate with the MAD.
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a91b2af64ec4..753f54e17e0a 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -70,7 +70,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync);
+ size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
@@ -85,7 +85,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
unsigned long addr, size_t size,
- int access, int dmasync)
+ int access)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 253df1a1fa54..09b0e4494986 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -78,9 +78,7 @@ struct ib_umem_odp {
bool is_implicit_odp;
struct completion notifier_completion;
- int dying;
unsigned int page_shift;
- struct work_struct work;
};
static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
@@ -156,22 +154,6 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
umem_call_back cb,
bool blockable, void *cookie);
-/*
- * Find first region intersecting with address range.
- * Return NULL if not found
- */
-static inline struct ib_umem_odp *
-rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
-{
- struct interval_tree_node *node;
-
- node = interval_tree_iter_first(root, addr, addr + length - 1);
- if (!node)
- return NULL;
- return container_of(node, struct ib_umem_odp, interval_tree);
-
-}
-
static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
unsigned long mmu_seq)
{
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e7e733add99f..03352ec4302c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -445,6 +445,8 @@ struct ib_device_attr {
struct ib_tm_caps tm_caps;
struct ib_cq_caps cq_caps;
u64 max_dm_size;
+ /* Max entries for sgl for optimized performance per READ */
+ u32 max_sgl_rd;
};
enum ib_mtu {
@@ -1471,6 +1473,7 @@ struct ib_ucontext {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct rdma_restrack_entry res;
+ struct xarray mmap_xa;
};
struct ib_uobject {
@@ -2120,7 +2123,7 @@ struct ib_flow_action {
atomic_t usecnt;
};
-struct ib_mad_hdr;
+struct ib_mad;
struct ib_grh;
enum ib_process_mad_flags {
@@ -2218,6 +2221,11 @@ struct rdma_netdev_alloc_params {
struct net_device *netdev, void *param);
};
+struct ib_odp_counters {
+ atomic64_t faults;
+ atomic64_t invalidations;
+};
+
struct ib_counters {
struct ib_device *device;
struct ib_uobject *uobject;
@@ -2251,6 +2259,21 @@ struct iw_cm_conn_param;
#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
+struct rdma_user_mmap_entry {
+ struct kref ref;
+ struct ib_ucontext *ucontext;
+ unsigned long start_pgoff;
+ size_t npages;
+ bool driver_removed;
+};
+
+/* Return the offset (in bytes) the user should pass to libc's mmap() */
+static inline u64
+rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
+{
+ return (u64)entry->start_pgoff << PAGE_SHIFT;
+}
+
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations, providers will
@@ -2278,9 +2301,8 @@ struct ib_device_ops {
int (*process_mad)(struct ib_device *device, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
@@ -2363,6 +2385,13 @@ struct ib_device_ops {
struct ib_udata *udata);
void (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
+ /**
+ * This will be called once refcount of an entry in mmap_xa reaches
+ * zero. The type of the memory that was mapped may differ between
+ * entries and is opaque to the rdma_user_mmap interface.
+ * Therefore needs to be implemented by the driver in mmap_free.
+ */
+ void (*mmap_free)(struct rdma_user_mmap_entry *entry);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
@@ -2448,6 +2477,9 @@ struct ib_device_ops {
struct ifla_vf_info *ivf);
int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
+ int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
@@ -2563,6 +2595,13 @@ struct ib_device_ops {
*/
int (*counter_update_stats)(struct rdma_counter *counter);
+ /**
+ * Allows rdma drivers to add their own restrack attributes
+ * dumped via 'rdma stat' iproute2 command.
+ */
+ int (*fill_stat_entry)(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
@@ -2789,18 +2828,21 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void ib_set_device_ops(struct ib_device *device,
const struct ib_device_ops *ops);
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot);
-#else
-static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size,
- pgprot_t prot)
-{
- return -EINVAL;
-}
-#endif
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry);
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length);
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff);
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma);
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
+
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
{
@@ -3303,6 +3345,9 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *info);
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
@@ -4043,9 +4088,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
*/
static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
{
- struct device_dma_parameters *p = dev->dma_device->dma_parms;
-
- return p ? p->max_segment_size : UINT_MAX;
+ return dma_get_max_seg_size(dev->dma_device);
}
/**
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 83df1ec6664e..7682d1bcf789 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -156,6 +156,11 @@ int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value);
int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name,
u64 value);
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str);
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value);
+
struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
enum rdma_restrack_type type,
u32 id);
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index aa31c05a103a..cfe00e08e85b 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -32,6 +32,7 @@
#define __FSL_QMAN_H
#include <linux/bitops.h>
+#include <linux/device.h>
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
@@ -915,6 +916,16 @@ u16 qman_affine_channel(int cpu);
struct qman_portal *qman_get_affine_portal(int cpu);
/**
+ * qman_start_using_portal - register a device link for the portal user
+ * @p: the portal that will be in use
+ * @dev: the device that will use the portal
+ *
+ * Makes sure that the devices that use the portal are unbound when the
+ * portal is unbound
+ */
+int qman_start_using_portal(struct qman_portal *p, struct device *dev);
+
+/**
* qman_p_poll_dqrr - process DQRR (fast-path) entries
* @limit: the maximum number of DQRR entries to process
*
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
new file mode 100644
index 000000000000..e1108a5f4f17
--- /dev/null
+++ b/include/soc/mscc/ocelot.h
@@ -0,0 +1,550 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _SOC_MSCC_OCELOT_H
+#define _SOC_MSCC_OCELOT_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#define IFH_INJ_BYPASS BIT(31)
+#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+
+#define OCELOT_SPEED_2500 0
+#define OCELOT_SPEED_1000 1
+#define OCELOT_SPEED_100 2
+#define OCELOT_SPEED_10 3
+
+#define TARGET_OFFSET 24
+#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
+#define REG(reg, offset) [reg & REG_MASK] = offset
+
+#define REG_RESERVED_ADDR 0xffffffff
+#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
+
+enum ocelot_target {
+ ANA = 1,
+ QS,
+ QSYS,
+ REW,
+ SYS,
+ S2,
+ HSIO,
+ PTP,
+ GCB,
+ TARGET_MAX,
+};
+
+enum ocelot_reg {
+ ANA_ADVLEARN = ANA << TARGET_OFFSET,
+ ANA_VLANMASK,
+ ANA_PORT_B_DOMAIN,
+ ANA_ANAGEFIL,
+ ANA_ANEVENTS,
+ ANA_STORMLIMIT_BURST,
+ ANA_STORMLIMIT_CFG,
+ ANA_ISOLATED_PORTS,
+ ANA_COMMUNITY_PORTS,
+ ANA_AUTOAGE,
+ ANA_MACTOPTIONS,
+ ANA_LEARNDISC,
+ ANA_AGENCTRL,
+ ANA_MIRRORPORTS,
+ ANA_EMIRRORPORTS,
+ ANA_FLOODING,
+ ANA_FLOODING_IPMC,
+ ANA_SFLOW_CFG,
+ ANA_PORT_MODE,
+ ANA_CUT_THRU_CFG,
+ ANA_PGID_PGID,
+ ANA_TABLES_ANMOVED,
+ ANA_TABLES_MACHDATA,
+ ANA_TABLES_MACLDATA,
+ ANA_TABLES_STREAMDATA,
+ ANA_TABLES_MACACCESS,
+ ANA_TABLES_MACTINDX,
+ ANA_TABLES_VLANACCESS,
+ ANA_TABLES_VLANTIDX,
+ ANA_TABLES_ISDXACCESS,
+ ANA_TABLES_ISDXTIDX,
+ ANA_TABLES_ENTRYLIM,
+ ANA_TABLES_PTP_ID_HIGH,
+ ANA_TABLES_PTP_ID_LOW,
+ ANA_TABLES_STREAMACCESS,
+ ANA_TABLES_STREAMTIDX,
+ ANA_TABLES_SEQ_HISTORY,
+ ANA_TABLES_SEQ_MASK,
+ ANA_TABLES_SFID_MASK,
+ ANA_TABLES_SFIDACCESS,
+ ANA_TABLES_SFIDTIDX,
+ ANA_MSTI_STATE,
+ ANA_OAM_UPM_LM_CNT,
+ ANA_SG_ACCESS_CTRL,
+ ANA_SG_CONFIG_REG_1,
+ ANA_SG_CONFIG_REG_2,
+ ANA_SG_CONFIG_REG_3,
+ ANA_SG_CONFIG_REG_4,
+ ANA_SG_CONFIG_REG_5,
+ ANA_SG_GCL_GS_CONFIG,
+ ANA_SG_GCL_TI_CONFIG,
+ ANA_SG_STATUS_REG_1,
+ ANA_SG_STATUS_REG_2,
+ ANA_SG_STATUS_REG_3,
+ ANA_PORT_VLAN_CFG,
+ ANA_PORT_DROP_CFG,
+ ANA_PORT_QOS_CFG,
+ ANA_PORT_VCAP_CFG,
+ ANA_PORT_VCAP_S1_KEY_CFG,
+ ANA_PORT_VCAP_S2_CFG,
+ ANA_PORT_PCP_DEI_MAP,
+ ANA_PORT_CPU_FWD_CFG,
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ ANA_PORT_CPU_FWD_GARP_CFG,
+ ANA_PORT_CPU_FWD_CCM_CFG,
+ ANA_PORT_PORT_CFG,
+ ANA_PORT_POL_CFG,
+ ANA_PORT_PTP_CFG,
+ ANA_PORT_PTP_DLY1_CFG,
+ ANA_PORT_PTP_DLY2_CFG,
+ ANA_PORT_SFID_CFG,
+ ANA_PFC_PFC_CFG,
+ ANA_PFC_PFC_TIMER,
+ ANA_IPT_OAM_MEP_CFG,
+ ANA_IPT_IPT,
+ ANA_PPT_PPT,
+ ANA_FID_MAP_FID_MAP,
+ ANA_AGGR_CFG,
+ ANA_CPUQ_CFG,
+ ANA_CPUQ_CFG2,
+ ANA_CPUQ_8021_CFG,
+ ANA_DSCP_CFG,
+ ANA_DSCP_REWR_CFG,
+ ANA_VCAP_RNG_TYPE_CFG,
+ ANA_VCAP_RNG_VAL_CFG,
+ ANA_VRAP_CFG,
+ ANA_VRAP_HDR_DATA,
+ ANA_VRAP_HDR_MASK,
+ ANA_DISCARD_CFG,
+ ANA_FID_CFG,
+ ANA_POL_PIR_CFG,
+ ANA_POL_CIR_CFG,
+ ANA_POL_MODE_CFG,
+ ANA_POL_PIR_STATE,
+ ANA_POL_CIR_STATE,
+ ANA_POL_STATE,
+ ANA_POL_FLOWC,
+ ANA_POL_HYST,
+ ANA_POL_MISC_CFG,
+ QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
+ QS_XTR_RD,
+ QS_XTR_FRM_PRUNING,
+ QS_XTR_FLUSH,
+ QS_XTR_DATA_PRESENT,
+ QS_XTR_CFG,
+ QS_INJ_GRP_CFG,
+ QS_INJ_WR,
+ QS_INJ_CTRL,
+ QS_INJ_STATUS,
+ QS_INJ_ERR,
+ QS_INH_DBG,
+ QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
+ QSYS_SWITCH_PORT_MODE,
+ QSYS_STAT_CNT_CFG,
+ QSYS_EEE_CFG,
+ QSYS_EEE_THRES,
+ QSYS_IGR_NO_SHARING,
+ QSYS_EGR_NO_SHARING,
+ QSYS_SW_STATUS,
+ QSYS_EXT_CPU_CFG,
+ QSYS_PAD_CFG,
+ QSYS_CPU_GROUP_MAP,
+ QSYS_QMAP,
+ QSYS_ISDX_SGRP,
+ QSYS_TIMED_FRAME_ENTRY,
+ QSYS_TFRM_MISC,
+ QSYS_TFRM_PORT_DLY,
+ QSYS_TFRM_TIMER_CFG_1,
+ QSYS_TFRM_TIMER_CFG_2,
+ QSYS_TFRM_TIMER_CFG_3,
+ QSYS_TFRM_TIMER_CFG_4,
+ QSYS_TFRM_TIMER_CFG_5,
+ QSYS_TFRM_TIMER_CFG_6,
+ QSYS_TFRM_TIMER_CFG_7,
+ QSYS_TFRM_TIMER_CFG_8,
+ QSYS_RED_PROFILE,
+ QSYS_RES_QOS_MODE,
+ QSYS_RES_CFG,
+ QSYS_RES_STAT,
+ QSYS_EGR_DROP_MODE,
+ QSYS_EQ_CTRL,
+ QSYS_EVENTS_CORE,
+ QSYS_QMAXSDU_CFG_0,
+ QSYS_QMAXSDU_CFG_1,
+ QSYS_QMAXSDU_CFG_2,
+ QSYS_QMAXSDU_CFG_3,
+ QSYS_QMAXSDU_CFG_4,
+ QSYS_QMAXSDU_CFG_5,
+ QSYS_QMAXSDU_CFG_6,
+ QSYS_QMAXSDU_CFG_7,
+ QSYS_PREEMPTION_CFG,
+ QSYS_CIR_CFG,
+ QSYS_EIR_CFG,
+ QSYS_SE_CFG,
+ QSYS_SE_DWRR_CFG,
+ QSYS_SE_CONNECT,
+ QSYS_SE_DLB_SENSE,
+ QSYS_CIR_STATE,
+ QSYS_EIR_STATE,
+ QSYS_SE_STATE,
+ QSYS_HSCH_MISC_CFG,
+ QSYS_TAG_CONFIG,
+ QSYS_TAS_PARAM_CFG_CTRL,
+ QSYS_PORT_MAX_SDU,
+ QSYS_PARAM_CFG_REG_1,
+ QSYS_PARAM_CFG_REG_2,
+ QSYS_PARAM_CFG_REG_3,
+ QSYS_PARAM_CFG_REG_4,
+ QSYS_PARAM_CFG_REG_5,
+ QSYS_GCL_CFG_REG_1,
+ QSYS_GCL_CFG_REG_2,
+ QSYS_PARAM_STATUS_REG_1,
+ QSYS_PARAM_STATUS_REG_2,
+ QSYS_PARAM_STATUS_REG_3,
+ QSYS_PARAM_STATUS_REG_4,
+ QSYS_PARAM_STATUS_REG_5,
+ QSYS_PARAM_STATUS_REG_6,
+ QSYS_PARAM_STATUS_REG_7,
+ QSYS_PARAM_STATUS_REG_8,
+ QSYS_PARAM_STATUS_REG_9,
+ QSYS_GCL_STATUS_REG_1,
+ QSYS_GCL_STATUS_REG_2,
+ REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
+ REW_TAG_CFG,
+ REW_PORT_CFG,
+ REW_DSCP_CFG,
+ REW_PCP_DEI_QOS_MAP_CFG,
+ REW_PTP_CFG,
+ REW_PTP_DLY1_CFG,
+ REW_RED_TAG_CFG,
+ REW_DSCP_REMAP_DP1_CFG,
+ REW_DSCP_REMAP_CFG,
+ REW_STAT_CFG,
+ REW_REW_STICKY,
+ REW_PPT,
+ SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
+ SYS_COUNT_RX_UNICAST,
+ SYS_COUNT_RX_MULTICAST,
+ SYS_COUNT_RX_BROADCAST,
+ SYS_COUNT_RX_SHORTS,
+ SYS_COUNT_RX_FRAGMENTS,
+ SYS_COUNT_RX_JABBERS,
+ SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_SYM_ERRS,
+ SYS_COUNT_RX_64,
+ SYS_COUNT_RX_65_127,
+ SYS_COUNT_RX_128_255,
+ SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_1024_1526,
+ SYS_COUNT_RX_1527_MAX,
+ SYS_COUNT_RX_PAUSE,
+ SYS_COUNT_RX_CONTROL,
+ SYS_COUNT_RX_LONGS,
+ SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_TX_OCTETS,
+ SYS_COUNT_TX_UNICAST,
+ SYS_COUNT_TX_MULTICAST,
+ SYS_COUNT_TX_BROADCAST,
+ SYS_COUNT_TX_COLLISION,
+ SYS_COUNT_TX_DROPS,
+ SYS_COUNT_TX_PAUSE,
+ SYS_COUNT_TX_64,
+ SYS_COUNT_TX_65_127,
+ SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_512_1023,
+ SYS_COUNT_TX_1024_1526,
+ SYS_COUNT_TX_1527_MAX,
+ SYS_COUNT_TX_AGING,
+ SYS_RESET_CFG,
+ SYS_SR_ETYPE_CFG,
+ SYS_VLAN_ETYPE_CFG,
+ SYS_PORT_MODE,
+ SYS_FRONT_PORT_MODE,
+ SYS_FRM_AGING,
+ SYS_STAT_CFG,
+ SYS_SW_STATUS,
+ SYS_MISC_CFG,
+ SYS_REW_MAC_HIGH_CFG,
+ SYS_REW_MAC_LOW_CFG,
+ SYS_TIMESTAMP_OFFSET,
+ SYS_CMID,
+ SYS_PAUSE_CFG,
+ SYS_PAUSE_TOT_CFG,
+ SYS_ATOP,
+ SYS_ATOP_TOT_CFG,
+ SYS_MAC_FC_CFG,
+ SYS_MMGT,
+ SYS_MMGT_FAST,
+ SYS_EVENTS_DIF,
+ SYS_EVENTS_CORE,
+ SYS_CNT,
+ SYS_PTP_STATUS,
+ SYS_PTP_TXSTAMP,
+ SYS_PTP_NXT,
+ SYS_PTP_CFG,
+ SYS_RAM_INIT,
+ SYS_CM_ADDR,
+ SYS_CM_DATA_WR,
+ SYS_CM_DATA_RD,
+ SYS_CM_OP,
+ SYS_CM_DATA,
+ S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
+ S2_CORE_MV_CFG,
+ S2_CACHE_ENTRY_DAT,
+ S2_CACHE_MASK_DAT,
+ S2_CACHE_ACTION_DAT,
+ S2_CACHE_CNT_DAT,
+ S2_CACHE_TG_DAT,
+ PTP_PIN_CFG = PTP << TARGET_OFFSET,
+ PTP_PIN_TOD_SEC_MSB,
+ PTP_PIN_TOD_SEC_LSB,
+ PTP_PIN_TOD_NSEC,
+ PTP_CFG_MISC,
+ PTP_CLK_CFG_ADJ_CFG,
+ PTP_CLK_CFG_ADJ_FREQ,
+ GCB_SOFT_RST = GCB << TARGET_OFFSET,
+};
+
+enum ocelot_regfield {
+ ANA_ADVLEARN_VLAN_CHK,
+ ANA_ADVLEARN_LEARN_MIRROR,
+ ANA_ANEVENTS_FLOOD_DISCARD,
+ ANA_ANEVENTS_MSTI_DROP,
+ ANA_ANEVENTS_ACLKILL,
+ ANA_ANEVENTS_ACLUSED,
+ ANA_ANEVENTS_AUTOAGE,
+ ANA_ANEVENTS_VS2TTL1,
+ ANA_ANEVENTS_STORM_DROP,
+ ANA_ANEVENTS_LEARN_DROP,
+ ANA_ANEVENTS_AGED_ENTRY,
+ ANA_ANEVENTS_CPU_LEARN_FAILED,
+ ANA_ANEVENTS_AUTO_LEARN_FAILED,
+ ANA_ANEVENTS_LEARN_REMOVE,
+ ANA_ANEVENTS_AUTO_LEARNED,
+ ANA_ANEVENTS_AUTO_MOVED,
+ ANA_ANEVENTS_DROPPED,
+ ANA_ANEVENTS_CLASSIFIED_DROP,
+ ANA_ANEVENTS_CLASSIFIED_COPY,
+ ANA_ANEVENTS_VLAN_DISCARD,
+ ANA_ANEVENTS_FWD_DISCARD,
+ ANA_ANEVENTS_MULTICAST_FLOOD,
+ ANA_ANEVENTS_UNICAST_FLOOD,
+ ANA_ANEVENTS_DEST_KNOWN,
+ ANA_ANEVENTS_BUCKET3_MATCH,
+ ANA_ANEVENTS_BUCKET2_MATCH,
+ ANA_ANEVENTS_BUCKET1_MATCH,
+ ANA_ANEVENTS_BUCKET0_MATCH,
+ ANA_ANEVENTS_CPU_OPERATION,
+ ANA_ANEVENTS_DMAC_LOOKUP,
+ ANA_ANEVENTS_SMAC_LOOKUP,
+ ANA_ANEVENTS_SEQ_GEN_ERR_0,
+ ANA_ANEVENTS_SEQ_GEN_ERR_1,
+ ANA_TABLES_MACACCESS_B_DOM,
+ ANA_TABLES_MACTINDX_BUCKET,
+ ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_RESET_CFG_CORE_ENA,
+ SYS_RESET_CFG_MEM_ENA,
+ SYS_RESET_CFG_MEM_INIT,
+ GCB_SOFT_RST_SWC_RST,
+ REGFIELD_MAX
+};
+
+enum ocelot_clk_pins {
+ ALT_PPS_PIN = 1,
+ EXT_CLK_PIN,
+ ALT_LDST_PIN,
+ TOD_ACC_PIN
+};
+
+struct ocelot_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+enum ocelot_tag_prefix {
+ OCELOT_TAG_PREFIX_DISABLED = 0,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_SHORT,
+ OCELOT_TAG_PREFIX_LONG,
+};
+
+struct ocelot;
+
+struct ocelot_ops {
+ void (*pcs_init)(struct ocelot *ocelot, int port);
+ int (*reset)(struct ocelot *ocelot);
+};
+
+struct ocelot_skb {
+ struct list_head head;
+ struct sk_buff *skb;
+ u8 id;
+};
+
+
+struct ocelot_port {
+ struct ocelot *ocelot;
+
+ void __iomem *regs;
+
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+
+ /* Egress default VLAN (vid) */
+ u16 vid;
+
+ u8 ptp_cmd;
+ struct list_head skbs;
+ u8 ts_id;
+};
+
+struct ocelot {
+ struct device *dev;
+
+ const struct ocelot_ops *ops;
+ struct regmap *targets[TARGET_MAX];
+ struct regmap_field *regfields[REGFIELD_MAX];
+ const u32 *const *map;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+
+ int shared_queue_sz;
+
+ struct net_device *hw_bridge_dev;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct ocelot_port **ports;
+
+ u8 base_mac[ETH_ALEN];
+
+ /* Keep track of the vlan port masks */
+ u32 vlan_mask[VLAN_N_VID];
+
+ u8 num_phys_ports;
+ u8 num_cpu_ports;
+ u8 cpu;
+
+ u32 *lags;
+
+ struct list_head multicast;
+
+ /* Workqueue to check statistics for overflow with its lock */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ u8 ptp:1;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct hwtstamp_config hwtstamp_config;
+ /* Protects the PTP interface state */
+ struct mutex ptp_lock;
+ /* Protects the PTP clock */
+ spinlock_t ptp_clock_lock;
+
+ void (*port_pcs_init)(struct ocelot_port *port);
+};
+
+#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
+
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+
+/* I/O */
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+ u32 offset);
+
+/* Hardware initialization */
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields);
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+int ocelot_init(struct ocelot *ocelot);
+void ocelot_deinit(struct ocelot *ocelot);
+void ocelot_init_port(struct ocelot *ocelot, int port);
+
+/* DSA callbacks */
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy);
+void ocelot_port_disable(struct ocelot *ocelot, int port);
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info);
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev);
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware);
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid, bool vlan_aware);
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid);
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged);
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
+ struct sk_buff *skb);
+void ocelot_get_txtstamp(struct ocelot *ocelot);
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h
index 16f91e172bcb..16f91e172bcb 100644
--- a/drivers/net/ethernet/mscc/ocelot_sys.h
+++ b/include/soc/mscc/ocelot_sys.h
diff --git a/include/sound/core.h b/include/sound/core.h
index ee238f100f73..af3dce956c17 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -117,6 +117,7 @@ struct snd_card {
struct device card_dev; /* cardX object for sysfs */
const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
+ int sync_irq; /* assigned irq, used for PCM sync */
wait_queue_head_t remove_sleep;
#ifdef CONFIG_PM
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c679f6116580..b65220685920 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -83,6 +83,11 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
const struct snd_dmaengine_dai_dma_data *dma_data,
struct dma_slave_config *config);
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
/*
* Try to request the DMA channel using compat_request_channel or
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 9a0393cf024c..ac18f428eda6 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -254,6 +254,7 @@ struct hda_codec {
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
+ unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
#ifdef CONFIG_PM
unsigned long power_on_acct;
diff --git a/include/sound/intel-dsp-config.h b/include/sound/intel-dsp-config.h
new file mode 100644
index 000000000000..c36622bee3f8
--- /dev/null
+++ b/include/sound/intel-dsp-config.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel-dsp-config.h - Intel DSP config
+ *
+ * Copyright (c) 2019 Jaroslav Kysela <perex@perex.cz>
+ */
+
+#ifndef __INTEL_DSP_CONFIG_H__
+#define __INTEL_DSP_CONFIG_H__
+
+struct pci_dev;
+
+enum {
+ SND_INTEL_DSP_DRIVER_ANY = 0,
+ SND_INTEL_DSP_DRIVER_LEGACY,
+ SND_INTEL_DSP_DRIVER_SST,
+ SND_INTEL_DSP_DRIVER_SOF,
+ SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_SOF
+};
+
+#if IS_ENABLED(CONFIG_SND_INTEL_DSP_CONFIG)
+
+int snd_intel_dsp_driver_probe(struct pci_dev *pci);
+
+#else
+
+static inline int snd_intel_dsp_driver_probe(struct pci_dev *pci)
+{
+ return SND_INTEL_DSP_DRIVER_ANY;
+}
+
+#endif
+
+#endif
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 240622d89c0b..3b47832b1c1f 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -21,7 +21,6 @@ struct snd_dma_device {
struct device *dev; /* generic device */
};
-#define snd_dma_pci_data(pci) (&(pci)->dev)
#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
@@ -44,6 +43,7 @@ struct snd_dma_device {
#else
#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
#endif
+#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index bbe6eb1ff5d2..8a89fa6fdd5e 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -59,6 +59,7 @@ struct snd_pcm_ops {
int (*hw_free)(struct snd_pcm_substream *substream);
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
+ int (*sync_stop)(struct snd_pcm_substream *substream);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
int (*get_time_info)(struct snd_pcm_substream *substream,
struct timespec *system_ts, struct timespec *audio_ts,
@@ -395,6 +396,7 @@ struct snd_pcm_runtime {
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
struct fasync_struct *fasync;
+ bool stop_operating; /* sync_stop will be called */
/* -- private section -- */
void *private_data;
@@ -414,6 +416,7 @@ struct snd_pcm_runtime {
size_t dma_bytes; /* size of DMA area */
struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+ unsigned int buffer_changed:1; /* buffer allocation changed; set only in managed mode */
/* -- audio timestamp config -- */
struct snd_pcm_audio_tstamp_config audio_tstamp_config;
@@ -475,6 +478,7 @@ struct snd_pcm_substream {
#endif /* CONFIG_SND_VERBOSE_PROCFS */
/* misc flags */
unsigned int hw_opened: 1;
+ unsigned int managed_buffer_alloc:1;
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -1186,6 +1190,12 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
+void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max);
+void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max);
+
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
size_t size, gfp_t gfp_flags);
int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
@@ -1236,14 +1246,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
*/
#define snd_pcm_substream_sgbuf(substream) \
snd_pcm_get_dma_buf(substream)->private_data
-
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-#else /* !SND_DMA_SGBUF */
-/*
- * fake using a continuous buffer
- */
-#define snd_pcm_sgbuf_ops_page NULL
#endif /* SND_DMA_SGBUF */
/**
@@ -1336,8 +1338,6 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
(IEC958_AES1_CON_PCM_CODER<<8)|\
(IEC958_AES3_CON_FS_48000<<24))
-#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
-
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 6758fc12fa84..0feaf16e6ac0 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -10,6 +10,7 @@ struct snd_pcm_substream;
struct snd_pcm_hw_params;
struct snd_soc_pcm_runtime;
struct snd_pcm;
+struct snd_soc_component;
extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
@@ -23,8 +24,29 @@ extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
-extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
-extern const struct snd_pcm_ops pxa2xx_pcm_ops;
+extern void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
+extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+extern int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd);
+extern snd_pcm_uframes_t
+pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
/* AC97 */
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index bf2ee75aabb1..bc2c31734df1 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -31,6 +31,7 @@ struct rt5682_platform_data {
enum rt5682_dmic1_data_pin dmic1_data_pin;
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
+ unsigned int btndet_delay;
};
#endif
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 31f76b6abf71..bbdd1542d6f1 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -8,6 +8,7 @@
#ifndef __SIMPLE_CARD_UTILS_H
#define __SIMPLE_CARD_UTILS_H
+#include <linux/clk.h>
#include <sound/soc.h>
#define asoc_simple_init_hp(card, sjack, prefix) \
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 6c9929abd90b..20c0bee3b959 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -24,9 +24,12 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 35b38e41e5b2..c4c997bd0379 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -60,12 +60,14 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @acpi_ipc_irq_index: used for BYT-CR detection
* @platform: string used for HDaudio codec support
* @codec_mask: used for HDAudio support
+ * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
+ bool common_hdmi_codec_drv;
};
/**
@@ -75,6 +77,7 @@ struct snd_soc_acpi_mach_params {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @link_mask: describes required board layout, e.g. for SoundWire.
* @drv_name: machine driver name
* @fw_filename: firmware file name. Used when SOF is not enabled.
* @board: board name
@@ -90,6 +93,7 @@ struct snd_soc_acpi_mach_params {
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
const u8 id[ACPI_ID_LEN];
+ const u32 link_mask;
const char *drv_name;
const char *fw_filename;
const char *board;
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 5d80b2eef525..506f72a6b2c2 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -3,10 +3,6 @@
* soc-component.h
*
* Copyright (c) 2019 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SOC_COMPONENT_H
#define __SOC_COMPONENT_H
@@ -51,8 +47,10 @@ struct snd_soc_component_driver {
unsigned int reg, unsigned int val);
/* pcm creation and destruction */
- int (*pcm_new)(struct snd_soc_pcm_runtime *rtd);
- void (*pcm_free)(struct snd_pcm *pcm);
+ int (*pcm_construct)(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+ void (*pcm_destruct)(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
/* component wide operations */
int (*set_sysclk)(struct snd_soc_component *component,
@@ -74,7 +72,42 @@ struct snd_soc_component_driver {
int (*set_bias_level)(struct snd_soc_component *component,
enum snd_soc_bias_level level);
- const struct snd_pcm_ops *ops;
+ int (*open)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*close)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*ioctl)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg);
+ int (*hw_params)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+ int (*hw_free)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*prepare)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*trigger)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd);
+ int (*sync_stop)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ snd_pcm_uframes_t (*pointer)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*get_time_info)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, struct timespec *system_ts,
+ struct timespec *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
+ int (*copy_user)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void __user *buf,
+ unsigned long bytes);
+ struct page *(*page)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned long offset);
+ int (*mmap)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
+
const struct snd_compr_ops *compr_ops;
/* probe ordering - for components with runtime dependencies */
@@ -374,6 +407,7 @@ int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component,
int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
void __user *buf, unsigned long bytes);
@@ -381,7 +415,7 @@ struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
-int snd_soc_pcm_component_new(struct snd_pcm *pcm);
-void snd_soc_pcm_component_free(struct snd_pcm *pcm);
+int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd);
+void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index e55aeb00ce2d..b654ebfc8766 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -103,15 +103,15 @@ struct snd_soc_dpcm_runtime {
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
-#define for_each_dpcm_fe(be, stream, dpcm) \
- list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
-
-#define for_each_dpcm_be(fe, stream, dpcm) \
- list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \
- list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-#define for_each_dpcm_be_rollback(fe, stream, dpcm) \
- list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_fe(be, stream, _dpcm) \
+ list_for_each_entry(_dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
+
+#define for_each_dpcm_be(fe, stream, _dpcm) \
+ list_for_each_entry(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_safe(fe, stream, _dpcm, __dpcm) \
+ list_for_each_entry_safe(_dpcm, __dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_rollback(fe, stream, _dpcm) \
+ list_for_each_entry_continue_reverse(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
/* can this BE stop and free */
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f264c6509f00..c28a1ed5e8df 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -299,6 +299,12 @@
.put = snd_soc_bytes_put, .private_value = \
((unsigned long)&(struct soc_bytes) \
{.base = xbase, .num_regs = xregs }) }
+#define SND_SOC_BYTES_E(xname, xbase, xregs, xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_bytes_info, .get = xhandler_get, \
+ .put = xhandler_put, .private_value = \
+ ((unsigned long)&(struct soc_bytes) \
+ {.base = xbase, .num_regs = xregs }) }
#define SND_SOC_BYTES_MASK(xname, xbase, xregs, xmask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -739,10 +745,12 @@ struct snd_soc_rtdcom_list {
struct snd_soc_component*
snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
const char *driver_name);
-#define for_each_rtdcom(rtd, rtdcom) \
- list_for_each_entry(rtdcom, &(rtd)->component_list, list)
-#define for_each_rtdcom_safe(rtd, rtdcom1, rtdcom2) \
- list_for_each_entry_safe(rtdcom1, rtdcom2, &(rtd)->component_list, list)
+#define for_each_rtd_components(rtd, rtdcom, _component) \
+ for (rtdcom = list_first_entry(&(rtd)->component_list, \
+ typeof(*rtdcom), list); \
+ (&rtdcom->list != &(rtd)->component_list) && \
+ (_component = rtdcom->component); \
+ rtdcom = list_next_entry(rtdcom, list))
struct snd_soc_dai_link_component {
const char *name;
@@ -845,7 +853,9 @@ struct snd_soc_dai_link {
unsigned int ignore:1;
struct list_head list; /* DAI link list of the soc card */
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
+#endif
};
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
@@ -978,6 +988,7 @@ struct snd_soc_card {
const char *name;
const char *long_name;
const char *driver_name;
+ const char *components;
char dmi_longname[80];
char topology_shortname[32];
@@ -1148,7 +1159,6 @@ struct snd_soc_pcm_runtime {
struct list_head component_list; /* list of connected components */
/* bit field */
- unsigned int dev_registered:1;
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
};
@@ -1168,7 +1178,9 @@ struct soc_mixer_control {
unsigned int sign_bit;
unsigned int invert:1;
unsigned int autodisable:1;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
+#endif
};
struct soc_bytes {
@@ -1179,8 +1191,9 @@ struct soc_bytes {
struct soc_bytes_ext {
int max;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
-
+#endif
/* used for TLV byte control */
int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes,
unsigned int size);
@@ -1204,7 +1217,9 @@ struct soc_enum {
const char * const *texts;
const unsigned int *values;
unsigned int autodisable:1;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
+#endif
};
/* device driver data */
@@ -1325,8 +1340,10 @@ struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
int id, const char *name,
const char *stream_name);
-int snd_soc_register_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv);
+struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming);
+void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
const struct snd_soc_dai_link_component *dlc);
@@ -1391,6 +1408,11 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
mutex_unlock(&dapm->card->dapm_mutex);
}
+/* bypass */
+int snd_soc_pcm_lib_ioctl(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg);
+
#include <sound/soc-component.h>
#endif
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 4640566b54fe..479101736ee0 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -61,6 +61,9 @@ struct sof_dev_desc {
/* list of machines using this configuration */
struct snd_soc_acpi_mach *machines;
+ /* alternate list of machines using this configuration */
+ struct snd_soc_acpi_mach *alt_machines;
+
/* Platform resource indexes in BAR / ACPI resources. */
/* Must set to -1 if not used - add new items to end */
int resindex_lpe_base;
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
new file mode 100644
index 000000000000..e02fb0b0fae1
--- /dev/null
+++ b/include/sound/sof/dai-imx.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright 2019 NXP
+ *
+ * Author: Daniel Baluta <daniel.baluta@nxp.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_IMX_H__
+#define __INCLUDE_SOUND_SOF_DAI_IMX_H__
+
+#include <sound/sof/header.h>
+
+/* ESAI Configuration Request - SOF_IPC_DAI_ESAI_CONFIG */
+struct sof_ipc_dai_esai_params {
+ struct sof_ipc_hdr hdr;
+
+ /* MCLK */
+ uint16_t reserved1;
+ uint16_t mclk_id;
+ uint32_t mclk_direction;
+
+ uint32_t mclk_rate; /* MCLK frequency in Hz */
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t bclk_rate; /* BCLK frequency in Hz */
+
+ /* TDM */
+ uint32_t tdm_slots;
+ uint32_t rx_slots;
+ uint32_t tx_slots;
+ uint16_t tdm_slot_width;
+ uint16_t reserved2; /* alignment */
+} __packed;
+
+#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 0f1235022146..c229565767e5 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -11,6 +11,7 @@
#include <sound/sof/header.h>
#include <sound/sof/dai-intel.h>
+#include <sound/sof/dai-imx.h>
/*
* DAI Configuration.
@@ -73,6 +74,7 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_dmic_params dmic;
struct sof_ipc_dai_hda_params hda;
struct sof_ipc_dai_alh_params alh;
+ struct sof_ipc_dai_esai_params esai;
};
} __packed;
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 10f00c08dbb7..bf3edd9c08b4 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -9,6 +9,7 @@
#ifndef __INCLUDE_SOUND_SOF_HEADER_H__
#define __INCLUDE_SOUND_SOF_HEADER_H__
+#include <linux/types.h>
#include <uapi/sound/sof/abi.h>
/** \addtogroup sof_uapi uAPI
@@ -74,6 +75,7 @@
#define SOF_IPC_PM_CLK_GET SOF_CMD_TYPE(0x005)
#define SOF_IPC_PM_CLK_REQ SOF_CMD_TYPE(0x006)
#define SOF_IPC_PM_CORE_ENABLE SOF_CMD_TYPE(0x007)
+#define SOF_IPC_PM_GATE SOF_CMD_TYPE(0x008)
/* component runtime config - multiple different types */
#define SOF_IPC_COMP_SET_VALUE SOF_CMD_TYPE(0x001)
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 003879401d63..3cf2e0f39d94 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -45,4 +45,12 @@ struct sof_ipc_pm_core_config {
uint32_t enable_mask;
} __packed;
+struct sof_ipc_pm_gate {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t flags; /* platform specific */
+
+ /* reserved for future use */
+ uint32_t reserved[5];
+} __packed;
+
#endif
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 0b71b381b952..7facefb541b3 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -83,10 +83,10 @@ struct sof_ipc_stream_params {
uint16_t sample_valid_bytes;
uint16_t sample_container_bytes;
- /* for notifying host period has completed - 0 means no period IRQ */
uint32_t host_period_bytes;
+ uint16_t no_stream_position; /**< 1 means don't send stream position */
- uint32_t reserved[2];
+ uint16_t reserved[3];
uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
} __packed;
diff --git a/include/sound/timer.h b/include/sound/timer.h
index 199c36295a0d..a53e37bcd746 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -118,8 +118,10 @@ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer);
int snd_timer_global_free(struct snd_timer *timer);
int snd_timer_global_register(struct snd_timer *timer);
-int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id);
-int snd_timer_close(struct snd_timer_instance *timeri);
+struct snd_timer_instance *snd_timer_instance_new(const char *owner);
+void snd_timer_instance_free(struct snd_timer_instance *timeri);
+int snd_timer_open(struct snd_timer_instance *timeri, struct snd_timer_id *tid, unsigned int slave_id);
+void snd_timer_close(struct snd_timer_instance *timeri);
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri);
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks);
int snd_timer_stop(struct snd_timer_instance *timeri);
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 14074405f501..88ac1870510e 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -120,7 +120,7 @@
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index d6e556c0a085..b04c29270973 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -74,11 +74,12 @@ static inline void bpf_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(__bpf_trace_##template); \
} \
+typedef void (*btf_trace_##call)(void *__data, proto); \
static struct bpf_raw_event_map __used \
__attribute__((section("__bpf_raw_tp_map"))) \
__bpf_trace_tp_map_##call = { \
.tp = &__tracepoint_##call, \
- .bpf_func = (void *)__bpf_trace_##template, \
+ .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \
.num_args = COUNT_ARGS(args), \
.writable_size = size, \
};
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 8ea966448b58..6b200059c2c5 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -95,16 +95,16 @@ TRACE_EVENT(fdb_delete,
TRACE_EVENT(br_fdb_update,
TP_PROTO(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user),
+ const unsigned char *addr, u16 vid, unsigned long flags),
- TP_ARGS(br, source, addr, vid, added_by_user),
+ TP_ARGS(br, source, addr, vid, flags),
TP_STRUCT__entry(
__string(br_dev, br->dev->name)
__string(dev, source->dev->name)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, vid)
- __field(bool, added_by_user)
+ __field(unsigned long, flags)
),
TP_fast_assign(
@@ -112,14 +112,14 @@ TRACE_EVENT(br_fdb_update,
__assign_str(dev, source->dev->name);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
- __entry->added_by_user = added_by_user;
+ __entry->flags = flags;
),
- TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d",
+ TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u flags 0x%lx",
__get_str(br_dev), __get_str(dev), __entry->addr[0],
__entry->addr[1], __entry->addr[2], __entry->addr[3],
__entry->addr[4], __entry->addr[5], __entry->vid,
- __entry->added_by_user)
+ __entry->flags)
);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 75ae1899452b..620bf1b38fba 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -19,7 +19,7 @@ struct btrfs_delayed_ref_node;
struct btrfs_delayed_tree_ref;
struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
-struct btrfs_block_group_cache;
+struct btrfs_block_group;
struct btrfs_free_cluster;
struct map_lookup;
struct extent_buffer;
@@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( blkcnt_t, blocks )
+ __field( u64, blocks )
__field( u64, disk_i_size )
__field( u64, generation )
__field( u64, last_trans )
@@ -194,7 +194,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
show_root_type(__entry->root_objectid),
__entry->generation,
__entry->ino,
- (unsigned long long)__entry->blocks,
+ __entry->blocks,
__entry->disk_i_size,
__entry->last_trans,
__entry->logged_trans)
@@ -292,7 +292,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
TRACE_EVENT(btrfs_handle_em_exist,
- TP_PROTO(struct btrfs_fs_info *fs_info,
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct extent_map *existing, const struct extent_map *map,
u64 start, u64 len),
@@ -330,8 +330,8 @@ TRACE_EVENT(btrfs_handle_em_exist,
/* file extent item */
DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, u64 start),
TP_ARGS(bi, l, fi, start),
@@ -385,8 +385,8 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
DECLARE_EVENT_CLASS(
btrfs__file_extent_item_inline,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, int slot, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, int slot, u64 start),
TP_ARGS(bi, l, fi, slot, start),
@@ -426,8 +426,8 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, u64 start),
TP_ARGS(bi, l, fi, start)
);
@@ -435,8 +435,8 @@ DEFINE_EVENT(
DEFINE_EVENT(
btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, u64 start),
TP_ARGS(bi, l, fi, start)
);
@@ -444,8 +444,8 @@ DEFINE_EVENT(
DEFINE_EVENT(
btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, int slot, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, int slot, u64 start),
TP_ARGS(bi, l, fi, slot, start)
);
@@ -453,8 +453,8 @@ DEFINE_EVENT(
DEFINE_EVENT(
btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, int slot, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, int slot, u64 start),
TP_ARGS(bi, l, fi, slot, start)
);
@@ -574,7 +574,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( char, for_kupdate )
__field( char, for_reclaim )
__field( char, range_cyclic )
- __field( pgoff_t, writeback_index )
+ __field( unsigned long, writeback_index )
__field( u64, root_objectid )
),
@@ -603,7 +603,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->range_start, __entry->range_end,
__entry->for_kupdate,
__entry->for_reclaim, __entry->range_cyclic,
- (unsigned long)__entry->writeback_index)
+ __entry->writeback_index)
);
DEFINE_EVENT(btrfs__writepage, __extent_writepage,
@@ -622,7 +622,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( pgoff_t, index )
+ __field( unsigned long, index )
__field( u64, start )
__field( u64, end )
__field( int, uptodate )
@@ -642,7 +642,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
"end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
- __entry->ino, (unsigned long)__entry->index,
+ __entry->ino, __entry->index,
__entry->start,
__entry->end, __entry->uptodate)
);
@@ -699,7 +699,7 @@ TRACE_EVENT(btrfs_sync_fs,
TRACE_EVENT(btrfs_add_block_group,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_block_group_cache *block_group, int create),
+ const struct btrfs_block_group *block_group, int create),
TP_ARGS(fs_info, block_group, create),
@@ -713,11 +713,10 @@ TRACE_EVENT(btrfs_add_block_group,
),
TP_fast_assign_btrfs(fs_info,
- __entry->offset = block_group->key.objectid;
- __entry->size = block_group->key.offset;
+ __entry->offset = block_group->start;
+ __entry->size = block_group->length;
__entry->flags = block_group->flags;
- __entry->bytes_used =
- btrfs_block_group_used(&block_group->item);
+ __entry->bytes_used = block_group->used;
__entry->bytes_super = block_group->bytes_super;
__entry->create = create;
),
@@ -1018,7 +1017,7 @@ TRACE_EVENT(btrfs_cow_block,
TRACE_EVENT(btrfs_space_reservation,
- TP_PROTO(const struct btrfs_fs_info *fs_info, char *type, u64 val,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val,
u64 bytes, int reserve),
TP_ARGS(fs_info, type, val, bytes, reserve),
@@ -1051,7 +1050,7 @@ TRACE_EVENT(btrfs_space_reservation,
TRACE_EVENT(btrfs_trigger_flush,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
- int flush, char *reason),
+ int flush, const char *reason),
TP_ARGS(fs_info, flags, bytes, flush, reason),
@@ -1185,7 +1184,7 @@ TRACE_EVENT(find_free_extent,
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 len),
TP_ARGS(block_group, start, len),
@@ -1198,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->len = len;
@@ -1215,7 +1214,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 len),
TP_ARGS(block_group, start, len)
@@ -1223,7 +1222,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 len),
TP_ARGS(block_group, start, len)
@@ -1231,7 +1230,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
TRACE_EVENT(btrfs_find_cluster,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 bytes, u64 empty_size, u64 min_bytes),
TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
@@ -1246,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->bytes = bytes;
@@ -1264,7 +1263,7 @@ TRACE_EVENT(btrfs_find_cluster,
TRACE_EVENT(btrfs_failed_cluster_setup,
- TP_PROTO(const struct btrfs_block_group_cache *block_group),
+ TP_PROTO(const struct btrfs_block_group *block_group),
TP_ARGS(block_group),
@@ -1273,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
),
TP_printk_btrfs("block_group=%llu", __entry->bg_objectid)
@@ -1281,7 +1280,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
TRACE_EVENT(btrfs_setup_cluster,
- TP_PROTO(const struct btrfs_block_group_cache *block_group,
+ TP_PROTO(const struct btrfs_block_group *block_group,
const struct btrfs_free_cluster *cluster,
u64 size, int bitmap),
@@ -1297,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = cluster->window_start;
__entry->max_size = cluster->max_size;
@@ -1325,17 +1324,17 @@ TRACE_EVENT(alloc_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
__field(gfp_t, mask)
- __field(unsigned long, ip)
+ __field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
__entry->mask = mask,
- __entry->ip = IP
+ __entry->ip = (const void *)IP
),
TP_printk("state=%p mask=%s caller=%pS", __entry->state,
- show_gfp_flags(__entry->mask), (const void *)__entry->ip)
+ show_gfp_flags(__entry->mask), __entry->ip)
);
TRACE_EVENT(free_extent_state,
@@ -1346,16 +1345,15 @@ TRACE_EVENT(free_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
- __field(unsigned long, ip)
+ __field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
- __entry->ip = IP
+ __entry->ip = (const void *)IP
),
- TP_printk("state=%p caller=%pS", __entry->state,
- (const void *)__entry->ip)
+ TP_printk("state=%p caller=%pS", __entry->state, __entry->ip)
);
DECLARE_EVENT_CLASS(btrfs__work,
@@ -1389,9 +1387,9 @@ DECLARE_EVENT_CLASS(btrfs__work,
);
/*
- * For situiations when the work is freed, we pass fs_info and a tag that that
- * matches address of the work structure so it can be paired with the
- * scheduling event.
+ * For situations when the work is freed, we pass fs_info and a tag that matches
+ * the address of the work structure so it can be paired with the scheduling
+ * event. DO NOT add anything here that dereferences wtag.
*/
DECLARE_EVENT_CLASS(btrfs__work__done,
@@ -1567,8 +1565,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->num_bytes)
+ __entry->bytenr, __entry->num_bytes)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
@@ -1644,7 +1641,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
TRACE_EVENT(qgroup_update_counters,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- struct btrfs_qgroup *qgroup,
+ const struct btrfs_qgroup *qgroup,
u64 cur_old_count, u64 cur_new_count),
TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count),
@@ -1825,7 +1822,7 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
);
TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
- TP_PROTO(struct btrfs_root *root, u64 ino, int mod),
+ TP_PROTO(const struct btrfs_root *root, u64 ino, int mod),
TP_ARGS(root, ino, mod),
@@ -1847,7 +1844,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
);
DECLARE_EVENT_CLASS(btrfs__block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache),
@@ -1859,9 +1856,9 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
),
TP_fast_assign_btrfs(bg_cache->fs_info,
- __entry->bytenr = bg_cache->key.objectid,
- __entry->len = bg_cache->key.offset,
- __entry->used = btrfs_block_group_used(&bg_cache->item);
+ __entry->bytenr = bg_cache->start,
+ __entry->len = bg_cache->length,
+ __entry->used = bg_cache->used;
__entry->flags = bg_cache->flags;
),
@@ -1871,19 +1868,19 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
);
DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache)
);
DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache)
);
DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache)
);
@@ -1906,7 +1903,7 @@ TRACE_EVENT(btrfs_set_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
if (tree->private_data) {
- struct inode *inode = tree->private_data;
+ const struct inode *inode = tree->private_data;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->rootid =
@@ -1945,7 +1942,7 @@ TRACE_EVENT(btrfs_clear_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
if (tree->private_data) {
- struct inode *inode = tree->private_data;
+ const struct inode *inode = tree->private_data;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->rootid =
@@ -1985,7 +1982,7 @@ TRACE_EVENT(btrfs_convert_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
if (tree->private_data) {
- struct inode *inode = tree->private_data;
+ const struct inode *inode = tree->private_data;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->rootid =
@@ -2094,8 +2091,8 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
DECLARE_EVENT_CLASS(btrfs__space_info_update,
- TP_PROTO(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, u64 old, s64 diff),
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
TP_ARGS(fs_info, sinfo, old, diff),
@@ -2117,16 +2114,16 @@ DECLARE_EVENT_CLASS(btrfs__space_info_update,
DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use,
- TP_PROTO(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, u64 old, s64 diff),
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
TP_ARGS(fs_info, sinfo, old, diff)
);
DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
- TP_PROTO(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, u64 old, s64 diff),
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
TP_ARGS(fs_info, sinfo, old, diff)
);
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index a566cc521476..7f42a3de59e6 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -66,7 +66,7 @@ DECLARE_EVENT_CLASS(cgroup,
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
- __entry->id = cgrp->id;
+ __entry->id = cgroup_id(cgrp);
__entry->level = cgrp->level;
__assign_str(path, path);
),
@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
TP_fast_assign(
__entry->dst_root = dst_cgrp->root->hierarchy_id;
- __entry->dst_id = dst_cgrp->id;
+ __entry->dst_id = cgroup_id(dst_cgrp);
__entry->dst_level = dst_cgrp->level;
__assign_str(dst_path, path);
__entry->pid = task->pid;
@@ -179,7 +179,7 @@ DECLARE_EVENT_CLASS(cgroup_event,
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
- __entry->id = cgrp->id;
+ __entry->id = cgroup_id(cgrp);
__entry->level = cgrp->level;
__assign_str(path, path);
__entry->val = val;
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index 92e5e89e52ed..9832cb8e0eb0 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -26,7 +26,7 @@ TRACE_EVENT(fsi_master_read,
__entry->addr = addr;
__entry->size = size;
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd]",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu]",
__entry->master_idx,
__entry->link,
__entry->id,
@@ -56,7 +56,7 @@ TRACE_EVENT(fsi_master_write,
__entry->data = 0;
memcpy(&__entry->data, data, size);
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd] <= {%*ph}",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu] <= {%*ph}",
__entry->master_idx,
__entry->link,
__entry->id,
@@ -93,7 +93,7 @@ TRACE_EVENT(fsi_master_rw_result,
if (__entry->write || !__entry->ret)
memcpy(&__entry->data, data, size);
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd] %s {%*ph} ret %d",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu] %s {%*ph} ret %d",
__entry->master_idx,
__entry->link,
__entry->id,
diff --git a/include/trace/events/fsi_master_aspeed.h b/include/trace/events/fsi_master_aspeed.h
new file mode 100644
index 000000000000..a355ceacc33f
--- /dev/null
+++ b/include/trace/events/fsi_master_aspeed.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_aspeed
+
+#if !defined(_TRACE_FSI_MASTER_ASPEED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_ASPEED_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_aspeed_opb_read,
+ TP_PROTO(uint32_t addr, size_t size, uint32_t result, uint32_t status, uint32_t irq_status),
+ TP_ARGS(addr, size, result, status, irq_status),
+ TP_STRUCT__entry(
+ __field(uint32_t, addr)
+ __field(size_t, size)
+ __field(uint32_t, result)
+ __field(uint32_t, status)
+ __field(uint32_t, irq_status)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->result = result;
+ __entry->status = status;
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("addr %08x size %zu: result %08x sts: %08x irq_sts: %08x",
+ __entry->addr, __entry->size, __entry->result,
+ __entry->status, __entry->irq_status
+ )
+);
+
+TRACE_EVENT(fsi_master_aspeed_opb_write,
+ TP_PROTO(uint32_t addr, uint32_t val, size_t size, uint32_t status, uint32_t irq_status),
+ TP_ARGS(addr, val, size, status, irq_status),
+ TP_STRUCT__entry(
+ __field(uint32_t, addr)
+ __field(uint32_t, val)
+ __field(size_t, size)
+ __field(uint32_t, status)
+ __field(uint32_t, irq_status)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->val = val;
+ __entry->size = size;
+ __entry->status = status;
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("addr %08x val %08x size %zu status: %08x irq_sts: %08x",
+ __entry->addr, __entry->val, __entry->size,
+ __entry->status, __entry->irq_status
+ )
+ );
+
+TRACE_EVENT(fsi_master_aspeed_opb_error,
+ TP_PROTO(uint32_t mresp0, uint32_t mstap0, uint32_t mesrb0),
+ TP_ARGS(mresp0, mstap0, mesrb0),
+ TP_STRUCT__entry(
+ __field(uint32_t, mresp0)
+ __field(uint32_t, mstap0)
+ __field(uint32_t, mesrb0)
+ ),
+ TP_fast_assign(
+ __entry->mresp0 = mresp0;
+ __entry->mstap0 = mstap0;
+ __entry->mesrb0 = mesrb0;
+ ),
+ TP_printk("mresp0 %08x mstap0 %08x mesrb0 %08x",
+ __entry->mresp0, __entry->mstap0, __entry->mesrb0
+ )
+ );
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
new file mode 100644
index 000000000000..72a4d0174b02
--- /dev/null
+++ b/include/trace/events/io_uring.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM io_uring
+
+#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IO_URING_H
+
+#include <linux/tracepoint.h>
+
+struct io_wq_work;
+
+/**
+ * io_uring_create - called after a new io_uring context was prepared
+ *
+ * @fd: corresponding file descriptor
+ * @ctx: pointer to a ring context structure
+ * @sq_entries: actual SQ size
+ * @cq_entries: actual CQ size
+ * @flags: SQ ring flags, provided to io_uring_setup(2)
+ *
+ * Allows to trace io_uring creation and provide pointer to a context, that can
+ * be used later to find correlated events.
+ */
+TRACE_EVENT(io_uring_create,
+
+ TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
+
+ TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
+
+ TP_STRUCT__entry (
+ __field( int, fd )
+ __field( void *, ctx )
+ __field( u32, sq_entries )
+ __field( u32, cq_entries )
+ __field( u32, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->fd = fd;
+ __entry->ctx = ctx;
+ __entry->sq_entries = sq_entries;
+ __entry->cq_entries = cq_entries;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
+ __entry->ctx, __entry->fd, __entry->sq_entries,
+ __entry->cq_entries, __entry->flags)
+);
+
+/**
+ * io_uring_register - called after a buffer/file/eventfd was succesfully
+ * registered for a ring
+ *
+ * @ctx: pointer to a ring context structure
+ * @opcode: describes which operation to perform
+ * @nr_user_files: number of registered files
+ * @nr_user_bufs: number of registered buffers
+ * @cq_ev_fd: whether eventfs registered or not
+ * @ret: return code
+ *
+ * Allows to trace fixed files/buffers/eventfds, that could be registered to
+ * avoid an overhead of getting references to them for every operation. This
+ * event, together with io_uring_file_get, can provide a full picture of how
+ * much overhead one can reduce via fixing.
+ */
+TRACE_EVENT(io_uring_register,
+
+ TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
+ unsigned nr_bufs, bool eventfd, long ret),
+
+ TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( unsigned, opcode )
+ __field( unsigned, nr_files )
+ __field( unsigned, nr_bufs )
+ __field( bool, eventfd )
+ __field( long, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->nr_files = nr_files;
+ __entry->nr_bufs = nr_bufs;
+ __entry->eventfd = eventfd;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
+ "eventfd %d, ret %ld",
+ __entry->ctx, __entry->opcode, __entry->nr_files,
+ __entry->nr_bufs, __entry->eventfd, __entry->ret)
+);
+
+/**
+ * io_uring_file_get - called before getting references to an SQE file
+ *
+ * @ctx: pointer to a ring context structure
+ * @fd: SQE file descriptor
+ *
+ * Allows to trace out how often an SQE file reference is obtained, which can
+ * help figuring out if it makes sense to use fixed files, or check that fixed
+ * files are used correctly.
+ */
+TRACE_EVENT(io_uring_file_get,
+
+ TP_PROTO(void *ctx, int fd),
+
+ TP_ARGS(ctx, fd),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, fd )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->fd = fd;
+ ),
+
+ TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+);
+
+/**
+ * io_uring_queue_async_work - called before submitting a new async work
+ *
+ * @ctx: pointer to a ring context structure
+ * @hashed: type of workqueue, hashed or normal
+ * @req: pointer to a submitted request
+ * @work: pointer to a submitted io_wq_work
+ *
+ * Allows to trace asynchronous work submission.
+ */
+TRACE_EVENT(io_uring_queue_async_work,
+
+ TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
+ unsigned int flags),
+
+ TP_ARGS(ctx, rw, req, work, flags),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, rw )
+ __field( void *, req )
+ __field( struct io_wq_work *, work )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->rw = rw;
+ __entry->req = req;
+ __entry->work = work;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
+ __entry->ctx, __entry->req, __entry->flags,
+ __entry->rw ? "hashed" : "normal", __entry->work)
+);
+
+/**
+ * io_uring_defer_list - called before the io_uring work added into defer_list
+ *
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to a deferred request
+ * @shadow: whether request is shadow or not
+ *
+ * Allows to track deferred requests, to get an insight about what requests are
+ * not started immediately.
+ */
+TRACE_EVENT(io_uring_defer,
+
+ TP_PROTO(void *ctx, void *req, bool shadow),
+
+ TP_ARGS(ctx, req, shadow),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( bool, shadow )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->shadow = shadow;
+ ),
+
+ TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req,
+ __entry->shadow ? ", shadow": "")
+);
+
+/**
+ * io_uring_link - called before the io_uring request added into link_list of
+ * another request
+ *
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to a linked request
+ * @target_req: pointer to a previous request, that would contain @req
+ *
+ * Allows to track linked requests, to understand dependencies between requests
+ * and how does it influence their execution flow.
+ */
+TRACE_EVENT(io_uring_link,
+
+ TP_PROTO(void *ctx, void *req, void *target_req),
+
+ TP_ARGS(ctx, req, target_req),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( void *, target_req )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->target_req = target_req;
+ ),
+
+ TP_printk("ring %p, request %p linked after %p",
+ __entry->ctx, __entry->req, __entry->target_req)
+);
+
+/**
+ * io_uring_cqring_wait - called before start waiting for an available CQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @min_events: minimal number of events to wait for
+ *
+ * Allows to track waiting for CQE, so that we can e.g. troubleshoot
+ * situations, when an application wants to wait for an event, that never
+ * comes.
+ */
+TRACE_EVENT(io_uring_cqring_wait,
+
+ TP_PROTO(void *ctx, int min_events),
+
+ TP_ARGS(ctx, min_events),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, min_events )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->min_events = min_events;
+ ),
+
+ TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
+);
+
+/**
+ * io_uring_fail_link - called before failing a linked request
+ *
+ * @req: request, which links were cancelled
+ * @link: cancelled link
+ *
+ * Allows to track linked requests cancellation, to see not only that some work
+ * was cancelled, but also which request was the reason.
+ */
+TRACE_EVENT(io_uring_fail_link,
+
+ TP_PROTO(void *req, void *link),
+
+ TP_ARGS(req, link),
+
+ TP_STRUCT__entry (
+ __field( void *, req )
+ __field( void *, link )
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->link = link;
+ ),
+
+ TP_printk("request %p, link %p", __entry->req, __entry->link)
+);
+
+/**
+ * io_uring_complete - called when completing an SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: result of the request
+ *
+ */
+TRACE_EVENT(io_uring_complete,
+
+ TP_PROTO(void *ctx, u64 user_data, long res),
+
+ TP_ARGS(ctx, user_data, res),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( long, res )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->res = res;
+ ),
+
+ TP_printk("ring %p, user_data 0x%llx, result %ld",
+ __entry->ctx, (unsigned long long)__entry->user_data,
+ __entry->res)
+);
+
+
+/**
+ * io_uring_submit_sqe - called before submitting one SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @force_nonblock: whether a context blocking or not
+ * @sq_thread: true if sq_thread has submitted this SQE
+ *
+ * Allows to track SQE submitting, to understand what was the source of it, SQ
+ * thread or io_uring_enter call.
+ */
+TRACE_EVENT(io_uring_submit_sqe,
+
+ TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
+
+ TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( bool, force_nonblock )
+ __field( bool, sq_thread )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->force_nonblock = force_nonblock;
+ __entry->sq_thread = sq_thread;
+ ),
+
+ TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
+ __entry->ctx, (unsigned long long) __entry->user_data,
+ __entry->force_nonblock, __entry->sq_thread)
+);
+
+#endif /* _TRACE_IO_URING_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 47b5ee880aa9..ad0aa7f31675 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -8,9 +8,10 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
#include <net/page_pool.h>
-TRACE_EVENT(page_pool_inflight,
+TRACE_EVENT(page_pool_release,
TP_PROTO(const struct page_pool *pool,
s32 inflight, u32 hold, u32 release),
@@ -22,6 +23,7 @@ TRACE_EVENT(page_pool_inflight,
__field(s32, inflight)
__field(u32, hold)
__field(u32, release)
+ __field(u64, cnt)
),
TP_fast_assign(
@@ -29,10 +31,12 @@ TRACE_EVENT(page_pool_inflight,
__entry->inflight = inflight;
__entry->hold = hold;
__entry->release = release;
+ __entry->cnt = pool->destroy_cnt;
),
- TP_printk("page_pool=%p inflight=%d hold=%u release=%u",
- __entry->pool, __entry->inflight, __entry->hold, __entry->release)
+ TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
+ __entry->pool, __entry->inflight, __entry->hold,
+ __entry->release, __entry->cnt)
);
TRACE_EVENT(page_pool_state_release,
@@ -46,16 +50,18 @@ TRACE_EVENT(page_pool_state_release,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, release)
+ __field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->release = release;
+ __entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p release=%u",
- __entry->pool, __entry->page, __entry->release)
+ TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
+ __entry->pool, __entry->page, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
@@ -69,16 +75,40 @@ TRACE_EVENT(page_pool_state_hold,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, hold)
+ __field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->hold = hold;
+ __entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p hold=%u",
- __entry->pool, __entry->page, __entry->hold)
+ TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
+ __entry->pool, __entry->page, __entry->pfn, __entry->hold)
+);
+
+TRACE_EVENT(page_pool_update_nid,
+
+ TP_PROTO(const struct page_pool *pool, int new_nid),
+
+ TP_ARGS(pool, new_nid),
+
+ TP_STRUCT__entry(
+ __field(const struct page_pool *, pool)
+ __field(int, pool_nid)
+ __field(int, new_nid)
+ ),
+
+ TP_fast_assign(
+ __entry->pool = pool;
+ __entry->pool_nid = pool->p.nid;
+ __entry->new_nid = new_nid;
+ ),
+
+ TP_printk("page_pool=%p pool_nid=%d new_nid=%d",
+ __entry->pool, __entry->pool_nid, __entry->new_nid)
);
#endif /* _TRACE_PAGE_POOL_H */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 694bd040cf51..66122602bd08 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -93,16 +93,16 @@ TRACE_EVENT_RCU(rcu_grace_period,
* the data from the rcu_node structure, other than rcuname, which comes
* from the rcu_state structure, and event, which is one of the following:
*
- * "Startleaf": Request a grace period based on leaf-node data.
+ * "Cleanup": Clean up rcu_node structure after previous GP.
+ * "CleanupMore": Clean up, and another GP is needed.
+ * "EndWait": Complete wait.
+ * "NoGPkthread": The RCU grace-period kthread has not yet started.
* "Prestarted": Someone beat us to the request
* "Startedleaf": Leaf node marked for future GP.
* "Startedleafroot": All nodes from leaf to root marked for future GP.
* "Startedroot": Requested a nocb grace period based on root-node data.
- * "NoGPkthread": The RCU grace-period kthread has not yet started.
+ * "Startleaf": Request a grace period based on leaf-node data.
* "StartWait": Start waiting for the requested grace period.
- * "EndWait": Complete wait.
- * "Cleanup": Clean up rcu_node structure after previous GP.
- * "CleanupMore": Clean up, and another GP is needed.
*/
TRACE_EVENT_RCU(rcu_future_grace_period,
@@ -258,20 +258,27 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
* the number of the offloaded CPU are extracted. The third and final
* argument is a string as follows:
*
- * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
- * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
- * "WakeOvf": Wake rcuo kthread, CB list is huge.
- * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
- * "WakeNot": Don't wake rcuo kthread.
- * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
- * "DeferredWake": Carried out the "IsDeferred" wakeup.
- * "Poll": Start of new polling cycle for rcu_nocb_poll.
- * "Sleep": Sleep waiting for GP for !rcu_nocb_poll.
- * "CBSleep": Sleep waiting for CBs for !rcu_nocb_poll.
- * "WokeEmpty": rcuo kthread woke to find empty list.
- * "WokeNonEmpty": rcuo kthread woke to find non-empty list.
- * "WaitQueue": Enqueue partially done, timed wait for it to complete.
- * "WokeQueue": Partial enqueue now complete.
+ * "AlreadyAwake": The to-be-awakened rcuo kthread is already awake.
+ * "Bypass": rcuo GP kthread sees non-empty ->nocb_bypass.
+ * "CBSleep": rcuo CB kthread sleeping waiting for CBs.
+ * "Check": rcuo GP kthread checking specified CPU for work.
+ * "DeferredWake": Timer expired or polled check, time to wake.
+ * "DoWake": The to-be-awakened rcuo kthread needs to be awakened.
+ * "EndSleep": Done waiting for GP for !rcu_nocb_poll.
+ * "FirstBQ": New CB to empty ->nocb_bypass (->cblist maybe non-empty).
+ * "FirstBQnoWake": FirstBQ plus rcuo kthread need not be awakened.
+ * "FirstBQwake": FirstBQ plus rcuo kthread must be awakened.
+ * "FirstQ": New CB to empty ->cblist (->nocb_bypass maybe non-empty).
+ * "NeedWaitGP": rcuo GP kthread must wait on a grace period.
+ * "Poll": Start of new polling cycle for rcu_nocb_poll.
+ * "Sleep": Sleep waiting for GP for !rcu_nocb_poll.
+ * "Timer": Deferred-wake timer expired.
+ * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
+ * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
+ * "WakeNot": Don't wake rcuo kthread.
+ * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
+ * "WokeEmpty": rcuo CB kthread woke to find empty list.
*/
TRACE_EVENT_RCU(rcu_nocb_wake,
@@ -713,8 +720,6 @@ TRACE_EVENT_RCU(rcu_torture_read,
* "Begin": rcu_barrier() started.
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
* "Inc1": rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCB": rcu_barrier() found callback on never-online CPU
- * "OnlineNoCB": rcu_barrier() found online no-CBs CPU.
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
* "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 2bc9960a31aa..cf97f6339acb 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->state))
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b7a904825e7d..295517f109d7 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -367,7 +367,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
- tick_dep_name_end(CLOCK_UNSTABLE)
+ tick_dep_name(CLOCK_UNSTABLE) \
+ tick_dep_name_end(RCU)
#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index b048694070e2..37342a13c9cb 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
__entry->rmax = stat[0].max;
@@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
__entry->window = div_u64(window, 1000);
@@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
__entry->inflight = inflight;
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index c2ce6480b4b1..ef50be4e5e6c 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -61,7 +61,7 @@ DECLARE_EVENT_CLASS(writeback_page_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(pgoff_t, index)
),
@@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(writeback_page_template,
TP_printk("bdi %s: ino=%lu index=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
__entry->index
)
);
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned long, state)
__field(unsigned long, flags)
),
@@ -120,7 +120,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_printk("bdi %s: ino=%lu state=%s flags=%s",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
show_inode_state(__entry->state),
show_inode_state(__entry->flags)
)
@@ -150,28 +150,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
#ifdef CREATE_TRACE_POINTS
#ifdef CONFIG_CGROUP_WRITEBACK
-static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return wb->memcg_css->cgroup->kn->id.ino;
+ return cgroup_ino(wb->memcg_css->cgroup);
}
-static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
if (wbc->wb)
return __trace_wb_assign_cgroup(wbc->wb);
else
- return -1U;
+ return 1;
}
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return -1U;
+ return 1;
}
-static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
- return -1U;
+ return 1;
}
#endif /* CONFIG_CGROUP_WRITEBACK */
@@ -187,8 +187,8 @@ TRACE_EVENT(inode_foreign_history,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, ino)
+ __field(ino_t, cgroup_ino)
__field(unsigned int, history)
),
@@ -199,10 +199,10 @@ TRACE_EVENT(inode_foreign_history,
__entry->history = history;
),
- TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x",
+ TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
__entry->name,
- __entry->ino,
- __entry->cgroup_ino,
+ (unsigned long)__entry->ino,
+ (unsigned long)__entry->cgroup_ino,
__entry->history
)
);
@@ -216,9 +216,9 @@ TRACE_EVENT(inode_switch_wbs,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
- __field(unsigned int, old_cgroup_ino)
- __field(unsigned int, new_cgroup_ino)
+ __field(ino_t, ino)
+ __field(ino_t, old_cgroup_ino)
+ __field(ino_t, new_cgroup_ino)
),
TP_fast_assign(
@@ -228,11 +228,11 @@ TRACE_EVENT(inode_switch_wbs,
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
),
- TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u",
+ TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
__entry->name,
- __entry->ino,
- __entry->old_cgroup_ino,
- __entry->new_cgroup_ino
+ (unsigned long)__entry->ino,
+ (unsigned long)__entry->old_cgroup_ino,
+ (unsigned long)__entry->new_cgroup_ino
)
);
@@ -245,10 +245,10 @@ TRACE_EVENT(track_foreign_dirty,
TP_STRUCT__entry(
__array(char, name, 32)
__field(u64, bdi_id)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned int, memcg_id)
- __field(unsigned int, cgroup_ino)
- __field(unsigned int, page_cgroup_ino)
+ __field(ino_t, cgroup_ino)
+ __field(ino_t, page_cgroup_ino)
),
TP_fast_assign(
@@ -260,16 +260,16 @@ TRACE_EVENT(track_foreign_dirty,
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- __entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino;
+ __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
),
- TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u",
+ TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
__entry->name,
__entry->bdi_id,
- __entry->ino,
+ (unsigned long)__entry->ino,
__entry->memcg_id,
- __entry->cgroup_ino,
- __entry->page_cgroup_ino
+ (unsigned long)__entry->cgroup_ino,
+ (unsigned long)__entry->page_cgroup_ino
)
);
@@ -282,7 +282,7 @@ TRACE_EVENT(flush_foreign,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
__field(unsigned int, frn_bdi_id)
__field(unsigned int, frn_memcg_id)
),
@@ -294,9 +294,9 @@ TRACE_EVENT(flush_foreign,
__entry->frn_memcg_id = frn_memcg_id;
),
- TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u",
+ TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
__entry->name,
- __entry->cgroup_ino,
+ (unsigned long)__entry->cgroup_ino,
__entry->frn_bdi_id,
__entry->frn_memcg_id
)
@@ -311,9 +311,9 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(int, sync_mode)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -324,11 +324,11 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
__entry->sync_mode,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -358,7 +358,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name,
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -383,7 +383,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic,
__entry->for_background,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -413,15 +413,15 @@ DECLARE_EVENT_CLASS(writeback_class,
TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: cgroup_ino=%u",
+ TP_printk("bdi %s: cgroup_ino=%lu",
__entry->name,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
@@ -459,7 +459,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -478,7 +478,7 @@ DECLARE_EVENT_CLASS(wbc_class,
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup_ino=%u",
+ "start=0x%lx end=0x%lx cgroup_ino=%lu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -489,7 +489,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
)
@@ -510,7 +510,7 @@ TRACE_EVENT(writeback_queue_io,
__field(long, age)
__field(int, moved)
__field(int, reason)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
@@ -522,13 +522,13 @@ TRACE_EVENT(writeback_queue_io,
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -596,7 +596,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -614,7 +614,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
+ "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
@@ -622,7 +622,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
__entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -660,7 +660,7 @@ TRACE_EVENT(balance_dirty_pages,
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -692,7 +692,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -707,7 +707,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->pause, /* ms */
__entry->period, /* ms */
__entry->think, /* ms */
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -718,10 +718,10 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -733,13 +733,13 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -789,13 +789,13 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -811,16 +811,16 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -845,7 +845,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field(unsigned long, ino )
+ __field( ino_t, ino )
__field(unsigned long, state )
__field( __u16, mode )
__field(unsigned long, dirtied_when )
@@ -861,7 +861,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino, __entry->dirtied_when,
+ (unsigned long)__entry->ino, __entry->dirtied_when,
show_inode_state(__entry->state), __entry->mode)
);
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 8c8420230a10..a7378bcd9928 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -22,7 +22,7 @@
#define __XDP_ACT_SYM_FN(x) \
{ XDP_##x, #x },
#define __XDP_ACT_SYM_TAB \
- __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
+ __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
__XDP_ACT_MAP(__XDP_ACT_TP_FN)
TRACE_EVENT(xdp_exception,
@@ -317,19 +317,15 @@ __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
TRACE_EVENT(mem_disconnect,
- TP_PROTO(const struct xdp_mem_allocator *xa,
- bool safe_to_remove, bool force),
+ TP_PROTO(const struct xdp_mem_allocator *xa),
- TP_ARGS(xa, safe_to_remove, force),
+ TP_ARGS(xa),
TP_STRUCT__entry(
__field(const struct xdp_mem_allocator *, xa)
__field(u32, mem_id)
__field(u32, mem_type)
__field(const void *, allocator)
- __field(bool, safe_to_remove)
- __field(bool, force)
- __field(int, disconnect_cnt)
),
TP_fast_assign(
@@ -337,19 +333,12 @@ TRACE_EVENT(mem_disconnect,
__entry->mem_id = xa->mem.id;
__entry->mem_type = xa->mem.type;
__entry->allocator = xa->allocator;
- __entry->safe_to_remove = safe_to_remove;
- __entry->force = force;
- __entry->disconnect_cnt = xa->disconnect_cnt;
),
- TP_printk("mem_id=%d mem_type=%s allocator=%p"
- " safe_to_remove=%s force=%s disconnect_cnt=%d",
+ TP_printk("mem_id=%d mem_type=%s allocator=%p",
__entry->mem_id,
__print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
- __entry->allocator,
- __entry->safe_to_remove ? "true" : "false",
- __entry->force ? "true" : "false",
- __entry->disconnect_cnt
+ __entry->allocator
)
);
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 498eec813494..0cdef67135f0 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -120,9 +120,11 @@ struct blk_zone_report {
};
/**
- * struct blk_zone_range - BLKRESETZONE ioctl request
- * @sector: starting sector of the first zone to issue reset write pointer
- * @nr_sectors: Total number of sectors of 1 or more zones to reset
+ * struct blk_zone_range - BLKRESETZONE/BLKOPENZONE/
+ * BLKCLOSEZONE/BLKFINISHZONE ioctl
+ * requests
+ * @sector: Starting sector of the first zone to operate on.
+ * @nr_sectors: Total number of sectors of all zones to operate on.
*/
struct blk_zone_range {
__u64 sector;
@@ -139,10 +141,19 @@ struct blk_zone_range {
* sector range. The sector range must be zone aligned.
* @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
* @BLKGETNRZONES: Get the total number of zones of the device.
+ * @BLKOPENZONE: Open the zones in the specified sector range.
+ * The 512 B sector range must be zone aligned.
+ * @BLKCLOSEZONE: Close the zones in the specified sector range.
+ * The 512 B sector range must be zone aligned.
+ * @BLKFINISHZONE: Mark the zones as full in the specified sector range.
+ * The 512 B sector range must be zone aligned.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
#define BLKGETZONESZ _IOR(0x12, 132, __u32)
#define BLKGETNRZONES _IOR(0x12, 133, __u32)
+#define BLKOPENZONE _IOW(0x12, 134, struct blk_zone_range)
+#define BLKCLOSEZONE _IOW(0x12, 135, struct blk_zone_range)
+#define BLKFINISHZONE _IOW(0x12, 136, struct blk_zone_range)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 77c6be96d676..dbbcf0b02970 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -173,6 +173,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_PROG_TYPE_TRACING,
};
enum bpf_attach_type {
@@ -199,6 +200,9 @@ enum bpf_attach_type {
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
+ BPF_TRACE_FENTRY,
+ BPF_TRACE_FEXIT,
__MAX_BPF_ATTACH_TYPE
};
@@ -344,6 +348,9 @@ enum bpf_attach_type {
/* Clone map from listener for newly accepted socket */
#define BPF_F_CLONE (1U << 9)
+/* Enable memory-mapping BPF map */
+#define BPF_F_MMAPABLE (1U << 10)
+
/* flags for BPF_PROG_QUERY */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
@@ -420,6 +427,8 @@ union bpf_attr {
__u32 line_info_rec_size; /* userspace bpf_line_info size */
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
+ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ __u32 attach_prog_fd; /* 0 to attach to vmlinux */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -560,10 +569,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
- * address *src* and store the data in *dst*.
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
+ * instead.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -794,7 +806,7 @@ union bpf_attr {
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
- * int bpf_get_current_comm(char *buf, u32 size_of_buf)
+ * int bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@@ -1023,7 +1035,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
- * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1068,7 +1080,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
+ * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@@ -1085,7 +1097,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
+ * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@@ -1154,7 +1166,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1172,7 +1184,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@@ -1425,45 +1437,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
- * Copy a NUL terminated string from an unsafe address
- * *unsafe_ptr* to *dst*. The *size* should include the
- * terminating NUL byte. In case the string length is smaller than
- * *size*, the target is not padded with further NUL bytes. If the
- * string length is larger than *size*, just *size*-1 bytes are
- * copied and the last byte is set to NUL.
- *
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
- *
- * ::
- *
- * SEC("kprobe/sys_open")
- * void bpf_sys_open(struct pt_regs *ctx)
- * {
- * char buf[PATHLEN]; // PATHLEN is defined to 256
- * int res = bpf_probe_read_str(buf, sizeof(buf),
- * ctx->di);
- *
- * // Consume buf, for example push it to
- * // userspace via bpf_perf_event_output(); we
- * // can use res (the string length) as event
- * // size, after checking its boundaries.
- * }
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * more details.
*
- * In comparison, using **bpf_probe_read()** helper here instead
- * to read the string would require to estimate the length at
- * compile time, and would often result in copying more memory
- * than necessary.
- *
- * Another useful use case is when parsing individual process
- * arguments or individual environment variables navigating
- * *current*\ **->mm->arg_start** and *current*\
- * **->mm->env_start**: using this helper and the return value,
- * one can quickly iterate at the right offset of the memory area.
+ * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
+ * instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
@@ -1511,7 +1492,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1595,7 +1576,7 @@ union bpf_attr {
* Return
* **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
*
- * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1715,7 +1696,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1947,7 +1928,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
+ * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@@ -1980,7 +1961,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -2033,7 +2014,7 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
- * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
+ * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@@ -2392,7 +2373,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
+ * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@@ -2408,9 +2389,9 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
- * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
* an allocation and copy are required due to a full ring buffer.
* However, the helper will try to avoid doing the allocation
@@ -2505,7 +2486,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
- * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2750,6 +2731,96 @@ union bpf_attr {
* **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
*
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6
+ *
+ * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct sk_buff.
+ *
+ * This helper is similar to **bpf_perf_event_output**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user()** helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * Return
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2862,7 +2933,12 @@ union bpf_attr {
FN(sk_storage_get), \
FN(sk_storage_delete), \
FN(send_signal), \
- FN(tcp_gen_syncookie),
+ FN(tcp_gen_syncookie), \
+ FN(skb_output), \
+ FN(probe_read_user), \
+ FN(probe_read_kernel), \
+ FN(probe_read_user_str), \
+ FN(probe_read_kernel_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 3ee0678c0a83..7a8bc8b920f5 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -270,6 +270,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
+#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@@ -831,7 +832,9 @@ enum btrfs_err_code {
BTRFS_ERROR_DEV_TGT_REPLACE,
BTRFS_ERROR_DEV_MISSING_NOT_FOUND,
BTRFS_ERROR_DEV_ONLY_WRITABLE,
- BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
+ BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS,
+ BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
+ BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
};
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index b65c7ee75bc7..8e322e2c7e78 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -302,6 +302,9 @@
/* csum types */
enum btrfs_csum_type {
BTRFS_CSUM_TYPE_CRC32 = 0,
+ BTRFS_CSUM_TYPE_XXHASH = 1,
+ BTRFS_CSUM_TYPE_SHA256 = 2,
+ BTRFS_CSUM_TYPE_BLAKE2 = 3,
};
/*
@@ -737,10 +740,12 @@ struct btrfs_balance_item {
__le64 unused[4];
} __attribute__ ((__packed__));
-#define BTRFS_FILE_EXTENT_INLINE 0
-#define BTRFS_FILE_EXTENT_REG 1
-#define BTRFS_FILE_EXTENT_PREALLOC 2
-#define BTRFS_FILE_EXTENT_TYPES 2
+enum {
+ BTRFS_FILE_EXTENT_INLINE = 0,
+ BTRFS_FILE_EXTENT_REG = 1,
+ BTRFS_FILE_EXTENT_PREALLOC = 2,
+ BTRFS_NR_FILE_EXTENT_TYPES = 3,
+};
struct btrfs_file_extent_item {
/*
@@ -836,6 +841,8 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
+#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9)
+#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10)
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
@@ -847,6 +854,8 @@ enum btrfs_raid_types {
BTRFS_RAID_SINGLE,
BTRFS_RAID_RAID5,
BTRFS_RAID_RAID6,
+ BTRFS_RAID_RAID1C3,
+ BTRFS_RAID_RAID1C4,
BTRFS_NR_RAID_TYPES
};
@@ -856,6 +865,8 @@ enum btrfs_raid_types {
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID1C3 | \
+ BTRFS_BLOCK_GROUP_RAID1C4 | \
BTRFS_BLOCK_GROUP_RAID5 | \
BTRFS_BLOCK_GROUP_RAID6 | \
BTRFS_BLOCK_GROUP_DUP | \
@@ -863,7 +874,9 @@ enum btrfs_raid_types {
#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
BTRFS_BLOCK_GROUP_RAID6)
-#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1)
+#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID1C3 | \
+ BTRFS_BLOCK_GROUP_RAID1C4)
/*
* We need a bit for restriper to be able to tell when chunks of type
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 8997d5068c08..37590027b604 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -923,7 +923,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_DECK_STATUS : 0;
}
static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
@@ -1027,7 +1028,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_TUNER_DEVICE_STATUS : 0;
}
static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
@@ -1302,17 +1304,17 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
if (!ui_cmd->has_opt_arg)
return;
switch (ui_cmd->ui_cmd) {
- case 0x56:
- case 0x57:
- case 0x60:
- case 0x68:
- case 0x69:
- case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
/* The optional operand is one byte for all these ui commands */
msg->len++;
msg->msg[3] = ui_cmd->play_mode;
break;
- case 0x67:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
@@ -1331,17 +1333,17 @@ static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
if (msg->len == 3)
return;
switch (ui_cmd->ui_cmd) {
- case 0x56:
- case 0x57:
- case 0x60:
- case 0x68:
- case 0x69:
- case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
/* The optional operand is one byte for all these ui commands */
ui_cmd->play_mode = msg->msg[3];
ui_cmd->has_opt_arg = 1;
break;
- case 0x67:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
if (msg->len < 7)
break;
ui_cmd->has_opt_arg = 1;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 5704fa0292b5..7d1a06c52469 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -317,6 +317,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_NEEDS_HPD (1 << 6)
/* Hardware can monitor CEC pin transitions */
#define CEC_CAP_MONITOR_PIN (1 << 7)
+/* CEC_ADAP_G_CONNECTOR_INFO is available */
+#define CEC_CAP_CONNECTOR_INFO (1 << 8)
/**
* struct cec_caps - CEC capabilities structure.
@@ -375,6 +377,34 @@ struct cec_log_addrs {
/* CDC-Only device: supports only CDC messages */
#define CEC_LOG_ADDRS_FL_CDC_ONLY (1 << 2)
+/**
+ * struct cec_drm_connector_info - tells which drm connector is
+ * associated with the CEC adapter.
+ * @card_no: drm card number
+ * @connector_id: drm connector ID
+ */
+struct cec_drm_connector_info {
+ __u32 card_no;
+ __u32 connector_id;
+};
+
+#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0
+#define CEC_CONNECTOR_TYPE_DRM 1
+
+/**
+ * struct cec_connector_info - tells if and which connector is
+ * associated with the CEC adapter.
+ * @type: connector type (if any)
+ * @drm: drm connector info
+ */
+struct cec_connector_info {
+ __u32 type;
+ union {
+ struct cec_drm_connector_info drm;
+ __u32 raw[16];
+ };
+};
+
/* Events */
/* Event that occurs when the adapter state changes */
@@ -398,10 +428,17 @@ struct cec_log_addrs {
* struct cec_event_state_change - used when the CEC adapter changes state.
* @phys_addr: the current physical address
* @log_addr_mask: the current logical address mask
+ * @have_conn_info: if non-zero, then HDMI connector information is available.
+ * This field is only valid if CEC_CAP_CONNECTOR_INFO is set. If that
+ * capability is set and @have_conn_info is zero, then that indicates
+ * that the HDMI connector device is not instantiated, either because
+ * the HDMI driver is still configuring the device or because the HDMI
+ * device was unbound.
*/
struct cec_event_state_change {
__u16 phys_addr;
__u16 log_addr_mask;
+ __u16 have_conn_info;
};
/**
@@ -476,6 +513,9 @@ struct cec_event {
#define CEC_G_MODE _IOR('a', 8, __u32)
#define CEC_S_MODE _IOW('a', 9, __u32)
+/* Get the connector info */
+#define CEC_ADAP_G_CONNECTOR_INFO _IOR('a', 10, struct cec_connector_info)
+
/*
* The remainder of this header defines all CEC messages and operands.
* The format matters since it the cec-ctl utility parses it to generate
@@ -768,8 +808,8 @@ struct cec_event {
#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93
#define CEC_MSG_TUNER_DEVICE_STATUS 0x07
/* Recording Flag Operand (rec_flag) */
-#define CEC_OP_REC_FLAG_USED 0
-#define CEC_OP_REC_FLAG_NOT_USED 1
+#define CEC_OP_REC_FLAG_NOT_USED 0
+#define CEC_OP_REC_FLAG_USED 1
/* Tuner Display Info Operand (tuner_display_info) */
#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0
#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1
@@ -820,6 +860,95 @@ struct cec_event {
#define CEC_OP_MENU_STATE_DEACTIVATED 0x01
#define CEC_MSG_USER_CONTROL_PRESSED 0x44
+/* UI Command Operand (ui_cmd) */
+#define CEC_OP_UI_CMD_SELECT 0x00
+#define CEC_OP_UI_CMD_UP 0x01
+#define CEC_OP_UI_CMD_DOWN 0x02
+#define CEC_OP_UI_CMD_LEFT 0x03
+#define CEC_OP_UI_CMD_RIGHT 0x04
+#define CEC_OP_UI_CMD_RIGHT_UP 0x05
+#define CEC_OP_UI_CMD_RIGHT_DOWN 0x06
+#define CEC_OP_UI_CMD_LEFT_UP 0x07
+#define CEC_OP_UI_CMD_LEFT_DOWN 0x08
+#define CEC_OP_UI_CMD_DEVICE_ROOT_MENU 0x09
+#define CEC_OP_UI_CMD_DEVICE_SETUP_MENU 0x0a
+#define CEC_OP_UI_CMD_CONTENTS_MENU 0x0b
+#define CEC_OP_UI_CMD_FAVORITE_MENU 0x0c
+#define CEC_OP_UI_CMD_BACK 0x0d
+#define CEC_OP_UI_CMD_MEDIA_TOP_MENU 0x10
+#define CEC_OP_UI_CMD_MEDIA_CONTEXT_SENSITIVE_MENU 0x11
+#define CEC_OP_UI_CMD_NUMBER_ENTRY_MODE 0x1d
+#define CEC_OP_UI_CMD_NUMBER_11 0x1e
+#define CEC_OP_UI_CMD_NUMBER_12 0x1f
+#define CEC_OP_UI_CMD_NUMBER_0_OR_NUMBER_10 0x20
+#define CEC_OP_UI_CMD_NUMBER_1 0x21
+#define CEC_OP_UI_CMD_NUMBER_2 0x22
+#define CEC_OP_UI_CMD_NUMBER_3 0x23
+#define CEC_OP_UI_CMD_NUMBER_4 0x24
+#define CEC_OP_UI_CMD_NUMBER_5 0x25
+#define CEC_OP_UI_CMD_NUMBER_6 0x26
+#define CEC_OP_UI_CMD_NUMBER_7 0x27
+#define CEC_OP_UI_CMD_NUMBER_8 0x28
+#define CEC_OP_UI_CMD_NUMBER_9 0x29
+#define CEC_OP_UI_CMD_DOT 0x2a
+#define CEC_OP_UI_CMD_ENTER 0x2b
+#define CEC_OP_UI_CMD_CLEAR 0x2c
+#define CEC_OP_UI_CMD_NEXT_FAVORITE 0x2f
+#define CEC_OP_UI_CMD_CHANNEL_UP 0x30
+#define CEC_OP_UI_CMD_CHANNEL_DOWN 0x31
+#define CEC_OP_UI_CMD_PREVIOUS_CHANNEL 0x32
+#define CEC_OP_UI_CMD_SOUND_SELECT 0x33
+#define CEC_OP_UI_CMD_INPUT_SELECT 0x34
+#define CEC_OP_UI_CMD_DISPLAY_INFORMATION 0x35
+#define CEC_OP_UI_CMD_HELP 0x36
+#define CEC_OP_UI_CMD_PAGE_UP 0x37
+#define CEC_OP_UI_CMD_PAGE_DOWN 0x38
+#define CEC_OP_UI_CMD_POWER 0x40
+#define CEC_OP_UI_CMD_VOLUME_UP 0x41
+#define CEC_OP_UI_CMD_VOLUME_DOWN 0x42
+#define CEC_OP_UI_CMD_MUTE 0x43
+#define CEC_OP_UI_CMD_PLAY 0x44
+#define CEC_OP_UI_CMD_STOP 0x45
+#define CEC_OP_UI_CMD_PAUSE 0x46
+#define CEC_OP_UI_CMD_RECORD 0x47
+#define CEC_OP_UI_CMD_REWIND 0x48
+#define CEC_OP_UI_CMD_FAST_FORWARD 0x49
+#define CEC_OP_UI_CMD_EJECT 0x4a
+#define CEC_OP_UI_CMD_SKIP_FORWARD 0x4b
+#define CEC_OP_UI_CMD_SKIP_BACKWARD 0x4c
+#define CEC_OP_UI_CMD_STOP_RECORD 0x4d
+#define CEC_OP_UI_CMD_PAUSE_RECORD 0x4e
+#define CEC_OP_UI_CMD_ANGLE 0x50
+#define CEC_OP_UI_CMD_SUB_PICTURE 0x51
+#define CEC_OP_UI_CMD_VIDEO_ON_DEMAND 0x52
+#define CEC_OP_UI_CMD_ELECTRONIC_PROGRAM_GUIDE 0x53
+#define CEC_OP_UI_CMD_TIMER_PROGRAMMING 0x54
+#define CEC_OP_UI_CMD_INITIAL_CONFIGURATION 0x55
+#define CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE 0x56
+#define CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION 0x57
+#define CEC_OP_UI_CMD_AUDIO_DESCRIPTION 0x58
+#define CEC_OP_UI_CMD_INTERNET 0x59
+#define CEC_OP_UI_CMD_3D_MODE 0x5a
+#define CEC_OP_UI_CMD_PLAY_FUNCTION 0x60
+#define CEC_OP_UI_CMD_PAUSE_PLAY_FUNCTION 0x61
+#define CEC_OP_UI_CMD_RECORD_FUNCTION 0x62
+#define CEC_OP_UI_CMD_PAUSE_RECORD_FUNCTION 0x63
+#define CEC_OP_UI_CMD_STOP_FUNCTION 0x64
+#define CEC_OP_UI_CMD_MUTE_FUNCTION 0x65
+#define CEC_OP_UI_CMD_RESTORE_VOLUME_FUNCTION 0x66
+#define CEC_OP_UI_CMD_TUNE_FUNCTION 0x67
+#define CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION 0x68
+#define CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION 0x69
+#define CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION 0x6a
+#define CEC_OP_UI_CMD_POWER_TOGGLE_FUNCTION 0x6b
+#define CEC_OP_UI_CMD_POWER_OFF_FUNCTION 0x6c
+#define CEC_OP_UI_CMD_POWER_ON_FUNCTION 0x6d
+#define CEC_OP_UI_CMD_F1_BLUE 0x71
+#define CEC_OP_UI_CMD_F2_RED 0x72
+#define CEC_OP_UI_CMD_F3_GREEN 0x73
+#define CEC_OP_UI_CMD_F4_YELLOW 0x74
+#define CEC_OP_UI_CMD_F5 0x75
+#define CEC_OP_UI_CMD_DATA 0x76
/* UI Broadcast Type Operand (ui_bcast_type) */
#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00
#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 69df19aa8e72..a791a94013a6 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -286,7 +286,7 @@ struct dcbmsg {
* @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
* @DCB_CMD_SNUMTCS: set the number of traffic classes
* @DCB_CMD_GBCN: set backward congestion notification configuration
- * @DCB_CMD_SBCN: get backward congestion notification configration.
+ * @DCB_CMD_SBCN: get backward congestion notification configuration.
* @DCB_CMD_GAPP: get application protocol configuration
* @DCB_CMD_SAPP: set application protocol configuration
* @DCB_CMD_IEEE_SET: set IEEE 802.1Qaz configuration
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 580b7a2e40e1..ae37fd4d194a 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -421,6 +421,11 @@ enum devlink_attr {
DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
+ DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, /* u64 */
+
+ DEVLINK_ATTR_NETNS_FD, /* u32 */
+ DEVLINK_ATTR_NETNS_PID, /* u32 */
+ DEVLINK_ATTR_NETNS_ID, /* u32 */
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8938b76c4ee3..d4591792f0b4 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1507,6 +1507,11 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67,
ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
@@ -1618,6 +1623,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_56000 56000
#define SPEED_100000 100000
#define SPEED_200000 200000
+#define SPEED_400000 400000
#define SPEED_UNKNOWN -1
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 1d338357df8a..1f97b33c840e 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -58,7 +58,7 @@
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
* used to clear any hints previously set.
*/
-#define RWF_WRITE_LIFE_NOT_SET 0
+#define RWH_WRITE_LIFE_NOT_SET 0
#define RWH_WRITE_LIFE_NONE 1
#define RWH_WRITE_LIFE_SHORT 2
#define RWH_WRITE_LIFE_MEDIUM 3
@@ -66,6 +66,13 @@
#define RWH_WRITE_LIFE_EXTREME 5
/*
+ * The originally introduced spelling is remained from the first
+ * versions of the patch set that introduced the feature, see commit
+ * v4.13-rc1~212^2~51.
+ */
+#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
+
+/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 39ccfe9311c3..1beb174ad950 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -17,7 +17,8 @@
#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03
#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
-#define FSCRYPT_POLICY_FLAGS_VALID 0x07
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08
+#define FSCRYPT_POLICY_FLAGS_VALID 0x0F
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 065408e16a80..852f234f1fd6 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -13,6 +13,7 @@ enum {
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
TCA_STATS_BASIC_HW,
+ TCA_STATS_PKT64,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
@@ -26,10 +27,6 @@ struct gnet_stats_basic {
__u64 bytes;
__u32 packets;
};
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u32 packets;
-} __attribute__ ((packed));
/**
* struct gnet_stats_rate_est - rate estimator
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 7fea0fd7d6f5..4bf33344aab1 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -33,6 +33,7 @@
#define IFNAMSIZ 16
#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256
+#define ALTIFNAMSIZ 128
#include <linux/hdlc/ioctl.h>
/* For glibc compatibility. An empty enum does not compile. */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 4a8c02cafa9a..8aec8769d944 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -167,6 +167,8 @@ enum {
IFLA_NEW_IFINDEX,
IFLA_MIN_MTU,
IFLA_MAX_MTU,
+ IFLA_PROP_LIST,
+ IFLA_ALT_IFNAME, /* Alternative ifname */
__IFLA_MAX
};
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ea57526a5b89..2a1569211d87 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -19,7 +19,10 @@ struct io_uring_sqe {
__u8 flags; /* IOSQE_ flags */
__u16 ioprio; /* ioprio for the request */
__s32 fd; /* file descriptor to do IO on */
- __u64 off; /* offset into file */
+ union {
+ __u64 off; /* offset into file */
+ __u64 addr2;
+ };
__u64 addr; /* pointer to buffer or iovecs */
__u32 len; /* buffer size or number of iovecs */
union {
@@ -29,6 +32,8 @@ struct io_uring_sqe {
__u32 sync_range_flags;
__u32 msg_flags;
__u32 timeout_flags;
+ __u32 accept_flags;
+ __u32 cancel_flags;
};
__u64 user_data; /* data to be passed back at completion time */
union {
@@ -50,6 +55,7 @@ struct io_uring_sqe {
#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
+#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_OP_NOP 0
#define IORING_OP_READV 1
@@ -63,6 +69,10 @@ struct io_uring_sqe {
#define IORING_OP_SENDMSG 9
#define IORING_OP_RECVMSG 10
#define IORING_OP_TIMEOUT 11
+#define IORING_OP_TIMEOUT_REMOVE 12
+#define IORING_OP_ACCEPT 13
+#define IORING_OP_ASYNC_CANCEL 14
+#define IORING_OP_LINK_TIMEOUT 15
/*
* sqe->fsync_flags
@@ -70,6 +80,11 @@ struct io_uring_sqe {
#define IORING_FSYNC_DATASYNC (1U << 0)
/*
+ * sqe->timeout_flags
+ */
+#define IORING_TIMEOUT_ABS (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
@@ -140,6 +155,7 @@ struct io_uring_params {
* io_uring_params->features flags
*/
#define IORING_FEAT_SINGLE_MMAP (1U << 0)
+#define IORING_FEAT_NODROP (1U << 1)
/*
* io_uring_register(2) opcodes and arguments
@@ -150,5 +166,11 @@ struct io_uring_params {
#define IORING_UNREGISTER_FILES 3
#define IORING_REGISTER_EVENTFD 4
#define IORING_UNREGISTER_EVENTFD 5
+#define IORING_REGISTER_FILES_UPDATE 6
+
+struct io_uring_files_update {
+ __u32 offset;
+ __s32 *fds;
+};
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 52641d8ca9e8..e6f17c8e2dba 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -235,6 +235,7 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_S390_STSI 25
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
+#define KVM_EXIT_ARM_NISV 28
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -394,6 +395,11 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
+ /* KVM_EXIT_ARM_NISV */
+ struct {
+ __u64 esr_iss;
+ __u64 fault_ipa;
+ } arm_nisv;
/* Fix the size of the union. */
char padding[256];
};
@@ -1000,6 +1006,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PMU_EVENT_FILTER 173
#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
+#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176
+#define KVM_CAP_ARM_NISV_TO_USER 177
+#define KVM_CAP_ARM_INJECT_EXT_DABT 178
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1227,6 +1236,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
KVM_DEV_TYPE_XIVE,
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
+ KVM_DEV_TYPE_ARM_PV_TIME,
+#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
KVM_DEV_TYPE_MAX,
};
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index de696ca12f2c..f6035f737193 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -27,6 +27,7 @@ enum lwtunnel_ip_t {
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
LWTUNNEL_IP_PAD,
+ LWTUNNEL_IP_OPTS,
__LWTUNNEL_IP_MAX,
};
@@ -41,12 +42,52 @@ enum lwtunnel_ip6_t {
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
LWTUNNEL_IP6_PAD,
+ LWTUNNEL_IP6_OPTS,
__LWTUNNEL_IP6_MAX,
};
#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
enum {
+ LWTUNNEL_IP_OPTS_UNSPEC,
+ LWTUNNEL_IP_OPTS_GENEVE,
+ LWTUNNEL_IP_OPTS_VXLAN,
+ LWTUNNEL_IP_OPTS_ERSPAN,
+ __LWTUNNEL_IP_OPTS_MAX,
+};
+
+#define LWTUNNEL_IP_OPTS_MAX (__LWTUNNEL_IP_OPTS_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_GENEVE_UNSPEC,
+ LWTUNNEL_IP_OPT_GENEVE_CLASS,
+ LWTUNNEL_IP_OPT_GENEVE_TYPE,
+ LWTUNNEL_IP_OPT_GENEVE_DATA,
+ __LWTUNNEL_IP_OPT_GENEVE_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_GENEVE_MAX (__LWTUNNEL_IP_OPT_GENEVE_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_VXLAN_UNSPEC,
+ LWTUNNEL_IP_OPT_VXLAN_GBP,
+ __LWTUNNEL_IP_OPT_VXLAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_VXLAN_MAX (__LWTUNNEL_IP_OPT_VXLAN_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_ERSPAN_UNSPEC,
+ LWTUNNEL_IP_OPT_ERSPAN_VER,
+ LWTUNNEL_IP_OPT_ERSPAN_INDEX,
+ LWTUNNEL_IP_OPT_ERSPAN_DIR,
+ LWTUNNEL_IP_OPT_ERSPAN_HWID,
+ __LWTUNNEL_IP_OPT_ERSPAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_ERSPAN_MAX (__LWTUNNEL_IP_OPT_ERSPAN_MAX - 1)
+
+enum {
LWT_BPF_PROG_UNSPEC,
LWT_BPF_PROG_FD,
LWT_BPF_PROG_NAME,
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index eea166c52c36..11a72a938eb1 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -205,6 +205,8 @@ enum ipset_cadt_flags {
IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
IPSET_FLAG_BIT_WITH_SKBINFO = 6,
IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO),
+ IPSET_FLAG_BIT_IFACE_WILDCARD = 7,
+ IPSET_FLAG_IFACE_WILDCARD = (1 << IPSET_FLAG_BIT_IFACE_WILDCARD),
IPSET_FLAG_CADT_MAX = 15,
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index ed8881ad18ed..bb9b049310df 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -144,12 +144,14 @@ enum nft_list_attributes {
* @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
* @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
* @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
+ * @NFTA_HOOK_DEVS: list of netdevices (NLA_NESTED)
*/
enum nft_hook_attributes {
NFTA_HOOK_UNSPEC,
NFTA_HOOK_HOOKNUM,
NFTA_HOOK_PRIORITY,
NFTA_HOOK_DEV,
+ NFTA_HOOK_DEVS,
__NFTA_HOOK_MAX
};
#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
@@ -1516,6 +1518,7 @@ enum nft_object_attributes {
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
*/
enum nft_flowtable_attributes {
NFTA_FLOWTABLE_UNSPEC,
@@ -1525,6 +1528,7 @@ enum nft_flowtable_attributes {
NFTA_FLOWTABLE_USE,
NFTA_FLOWTABLE_HANDLE,
NFTA_FLOWTABLE_PAD,
+ NFTA_FLOWTABLE_FLAGS,
__NFTA_FLOWTABLE_MAX
};
#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter_arp/arp_tables.h b/include/uapi/linux/netfilter_arp/arp_tables.h
index a2a0927d9bd6..bbf5af2b67a8 100644
--- a/include/uapi/linux/netfilter_arp/arp_tables.h
+++ b/include/uapi/linux/netfilter_arp/arp_tables.h
@@ -199,7 +199,7 @@ struct arpt_get_entries {
/* Helper functions */
static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct xt_entry_target *)((char *)e + e->target_offset);
}
/*
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 8076c940ffeb..a494cf43a755 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -194,7 +194,7 @@ struct ebt_entry {
static __inline__ struct ebt_entry_target *
ebt_get_target(struct ebt_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct ebt_entry_target *)((char *)e + e->target_offset);
}
/* {g,s}etsockopt numbers */
diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h
index 6aaeb14bfce1..50c7fee625ae 100644
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -222,7 +222,7 @@ struct ipt_get_entries {
static __inline__ struct xt_entry_target *
ipt_get_target(struct ipt_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct xt_entry_target *)((char *)e + e->target_offset);
}
/*
diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
index 031d0a43bed2..d9e364f96a5c 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
@@ -262,7 +262,7 @@ struct ip6t_get_entries {
static __inline__ struct xt_entry_target *
ip6t_get_target(struct ip6t_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct xt_entry_target *)((char *)e + e->target_offset);
}
/*
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index beee59c831a7..341e0e8cae46 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -249,6 +249,22 @@
*/
/**
+ * DOC: VLAN offload support for setting group keys and binding STAs to VLANs
+ *
+ * By setting @NL80211_EXT_FEATURE_VLAN_OFFLOAD flag drivers can indicate they
+ * support offloading VLAN functionality in a manner where the driver exposes a
+ * single netdev that uses VLAN tagged frames and separate VLAN-specific netdevs
+ * can then be added using RTM_NEWLINK/IFLA_VLAN_ID similarly to the Ethernet
+ * case. Frames received from stations that are not assigned to any VLAN are
+ * delivered on the main netdev and frames to such stations can be sent through
+ * that main netdev.
+ *
+ * %NL80211_CMD_NEW_KEY (for group keys), %NL80211_CMD_NEW_STATION, and
+ * %NL80211_CMD_SET_STATION will optionally specify vlan_id using
+ * %NL80211_ATTR_VLAN_ID.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -571,6 +587,14 @@
* set of BSSID,frequency parameters is used (i.e., either the enforcing
* %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
* %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
+ * Driver shall not modify the IEs specified through %NL80211_ATTR_IE if
+ * %NL80211_ATTR_MAC is included. However, if %NL80211_ATTR_MAC_HINT is
+ * included, these IEs through %NL80211_ATTR_IE are specified by the user
+ * space based on the best possible BSS selected. Thus, if the driver ends
+ * up selecting a different BSS, it can modify these IEs accordingly (e.g.
+ * userspace asks the driver to perform PMKSA caching with BSS1 and the
+ * driver ends up selecting BSS2 with different PMKSA cache entry; RSNIE
+ * has to get updated with the apt PMKID).
* %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within
* the ESS in case the device is already associated and an association with
* a different BSS is desired.
@@ -2373,6 +2397,9 @@ enum nl80211_commands {
* the allowed channel bandwidth configurations. (u8 attribute)
* Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
*
+ * @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key
+ * (u16).
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2835,6 +2862,8 @@ enum nl80211_attrs {
NL80211_ATTR_WIPHY_EDMG_CHANNELS,
NL80211_ATTR_WIPHY_EDMG_BW_CONFIG,
+ NL80211_ATTR_VLAN_ID,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -5484,6 +5513,10 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in
* station mode (SAE password is passed as part of the connect command).
*
+ * @NL80211_EXT_FEATURE_VLAN_OFFLOAD: The driver supports a single netdev
+ * with VLAN tagged frames and separate VLAN-specific netdevs added using
+ * vconfig similarly to the Ethernet case.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5529,6 +5562,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
NL80211_EXT_FEATURE_SAE_OFFLOAD,
+ NL80211_EXT_FEATURE_VLAN_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1887a451c388..a87b44cd5590 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -173,6 +173,7 @@ enum ovs_packet_cmd {
* @OVS_PACKET_ATTR_LEN: Packet size before truncation.
* %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
* size.
+ * @OVS_PACKET_ATTR_HASH: Packet hash info (e.g. hash, sw_hash and l4_hash in skb).
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -190,7 +191,8 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
- OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
+ OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
+ OVS_PACKET_ATTR_HASH, /* Packet hash. */
__OVS_PACKET_ATTR_MAX
};
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index bb7b271397a6..377d794d3105 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -141,8 +141,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
+ PERF_SAMPLE_AUX = 1U << 20,
- PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
@@ -300,6 +301,7 @@ enum perf_event_read_format {
/* add: sample_stack_user */
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
+#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -424,7 +426,9 @@ struct perf_event_attr {
*/
__u32 aux_watermark;
__u16 sample_max_stack;
- __u16 __reserved_2; /* align to __u64 */
+ __u16 __reserved_2;
+ __u32 aux_sample_size;
+ __u32 __reserved_3;
};
/*
@@ -864,6 +868,8 @@ enum perf_event_type {
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
+ * { u64 size;
+ * char data[size]; } && PERF_SAMPLE_AUX
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index a6aa466fac9e..449a63971451 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -16,9 +16,14 @@ enum {
TCA_ACT_STATS,
TCA_ACT_PAD,
TCA_ACT_COOKIE,
+ TCA_ACT_FLAGS,
__TCA_ACT_MAX
};
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
+ * actions stats.
+ */
+
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
#define TCA_ACT_MAX_PRIO 32
@@ -566,6 +571,14 @@ enum {
* TCA_FLOWER_KEY_ENC_OPT_GENEVE_
* attributes
*/
+ TCA_FLOWER_KEY_ENC_OPTS_VXLAN, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_VXLAN_
+ * attributes
+ */
+ TCA_FLOWER_KEY_ENC_OPTS_ERSPAN, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
+ * attributes
+ */
__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};
@@ -584,6 +597,27 @@ enum {
(__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, /* u32 */
+ __TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, /* be32 */
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, /* u8 */
+ __TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 5011259b8f67..9f1a72876212 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -950,19 +950,25 @@ enum {
TCA_PIE_BETA,
TCA_PIE_ECN,
TCA_PIE_BYTEMODE,
+ TCA_PIE_DQ_RATE_ESTIMATOR,
__TCA_PIE_MAX
};
#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
struct tc_pie_xstats {
- __u64 prob; /* current probability */
- __u32 delay; /* current delay in ms */
- __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
- __u32 packets_in; /* total number of packets enqueued */
- __u32 dropped; /* packets dropped due to pie_action */
- __u32 overlimit; /* dropped due to lack of space in queue */
- __u32 maxq; /* maximum queue size */
- __u32 ecn_mark; /* packets marked with ecn*/
+ __u64 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in
+ * bits/pie_time
+ */
+ __u32 dq_rate_estimating; /* is avg_dq_rate being calculated? */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space
+ * in queue
+ */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
};
/* CBS */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 592a0c1b77c9..0549a5c622bf 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -58,6 +58,9 @@ typedef enum {
SEV_RET_HWSEV_RET_PLATFORM,
SEV_RET_HWSEV_RET_UNSAFE,
SEV_RET_UNSUPPORTED,
+ SEV_RET_INVALID_PARAM,
+ SEV_RET_RESOURCE_LIMIT,
+ SEV_RET_SECURE_DATA_INVALID,
SEV_RET_MAX,
} sev_ret_code;
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 59e89a1bc3bb..9dc9d0079e98 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -31,13 +31,16 @@
#define PTP_ENABLE_FEATURE (1<<0)
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
+#define PTP_STRICT_FLAGS (1<<3)
+#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/*
* flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl.
*/
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \
- PTP_FALLING_EDGE)
+ PTP_FALLING_EDGE | \
+ PTP_STRICT_FLAGS)
/*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index ce2a623abb75..1418a8362bb7 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -164,6 +164,13 @@ enum {
RTM_GETNEXTHOP,
#define RTM_GETNEXTHOP RTM_GETNEXTHOP
+ RTM_NEWLINKPROP = 108,
+#define RTM_NEWLINKPROP RTM_NEWLINKPROP
+ RTM_DELLINKPROP,
+#define RTM_DELLINKPROP RTM_DELLINKPROP
+ RTM_GETLINKPROP,
+#define RTM_GETLINKPROP RTM_GETLINKPROP
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 25b4fa00bad1..4a0217832464 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -33,31 +33,48 @@
#define CLONE_NEWNET 0x40000000 /* New network namespace */
#define CLONE_IO 0x80000000 /* Clone io context */
+/* Flags for the clone3() syscall. */
+#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+
#ifndef __ASSEMBLY__
/**
* struct clone_args - arguments for the clone3 syscall
- * @flags: Flags for the new process as listed above.
- * All flags are valid except for CSIGNAL and
- * CLONE_DETACHED.
- * @pidfd: If CLONE_PIDFD is set, a pidfd will be
- * returned in this argument.
- * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
- * child process will be returned in the child's
- * memory.
- * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
- * the child process will be returned in the
- * parent's memory.
- * @exit_signal: The exit_signal the parent process will be
- * sent when the child exits.
- * @stack: Specify the location of the stack for the
- * child process.
- * Note, @stack is expected to point to the
- * lowest address. The stack direction will be
- * determined by the kernel and set up
- * appropriately based on @stack_size.
- * @stack_size: The size of the stack for the child process.
- * @tls: If CLONE_SETTLS is set, the tls descriptor
- * is set to tls.
+ * @flags: Flags for the new process as listed above.
+ * All flags are valid except for CSIGNAL and
+ * CLONE_DETACHED.
+ * @pidfd: If CLONE_PIDFD is set, a pidfd will be
+ * returned in this argument.
+ * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
+ * child process will be returned in the child's
+ * memory.
+ * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
+ * the child process will be returned in the
+ * parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ * sent when the child exits.
+ * @stack: Specify the location of the stack for the
+ * child process.
+ * Note, @stack is expected to point to the
+ * lowest address. The stack direction will be
+ * determined by the kernel and set up
+ * appropriately based on @stack_size.
+ * @stack_size: The size of the stack for the child process.
+ * @tls: If CLONE_SETTLS is set, the tls descriptor
+ * is set to tls.
+ * @set_tid: Pointer to an array of type *pid_t. The size
+ * of the array is defined using @set_tid_size.
+ * This array is used to select PIDs/TIDs for
+ * newly created processes. The first element in
+ * this defines the PID in the most nested PID
+ * namespace. Each additional element in the array
+ * defines the PID in the parent PID namespace of
+ * the original PID namespace. If the array has
+ * less entries than the number of currently
+ * nested PID namespaces only the PIDs in the
+ * corresponding namespaces are set.
+ * @set_tid_size: This defines the size of the array referenced
+ * in @set_tid. This cannot be larger than the
+ * kernel's limit of nested PID namespaces.
*
* The structure is versioned by size and thus extensible.
* New struct members must go at the end of the struct and
@@ -72,10 +89,13 @@ struct clone_args {
__aligned_u64 stack;
__aligned_u64 stack_size;
__aligned_u64 tls;
+ __aligned_u64 set_tid;
+ __aligned_u64 set_tid_size;
};
#endif
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
+#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
/*
* Scheduling policies
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6d5b164af55c..28ad40d9acba 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -105,6 +105,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_DEFAULT_SNDINFO 34
#define SCTP_AUTH_DEACTIVATE_KEY 35
#define SCTP_REUSE_PORT 36
+#define SCTP_PEER_ADDR_THLDS_V2 37
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -137,6 +138,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_ASCONF_SUPPORTED 128
#define SCTP_AUTH_SUPPORTED 129
#define SCTP_ECN_SUPPORTED 130
+#define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131
+#define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -410,6 +413,8 @@ enum sctp_spc_state {
SCTP_ADDR_ADDED,
SCTP_ADDR_MADE_PRIM,
SCTP_ADDR_CONFIRMED,
+ SCTP_ADDR_POTENTIALLY_FAILED,
+#define SCTP_ADDR_PF SCTP_ADDR_POTENTIALLY_FAILED
};
@@ -449,6 +454,16 @@ struct sctp_send_failed {
__u8 ssf_data[0];
};
+struct sctp_send_failed_event {
+ __u16 ssf_type;
+ __u16 ssf_flags;
+ __u32 ssf_length;
+ __u32 ssf_error;
+ struct sctp_sndinfo ssfe_info;
+ sctp_assoc_t ssf_assoc_id;
+ __u8 ssf_data[0];
+};
+
/*
* ssf_flags: 16 bits (unsigned integer)
*
@@ -605,6 +620,7 @@ struct sctp_event_subscribe {
__u8 sctp_stream_reset_event;
__u8 sctp_assoc_reset_event;
__u8 sctp_stream_change_event;
+ __u8 sctp_send_failure_event_event;
};
/*
@@ -632,6 +648,7 @@ union sctp_notification {
struct sctp_stream_reset_event sn_strreset_event;
struct sctp_assoc_reset_event sn_assocreset_event;
struct sctp_stream_change_event sn_strchange_event;
+ struct sctp_send_failed_event sn_send_failed_event;
};
/* Section 5.3.1
@@ -667,7 +684,9 @@ enum sctp_sn_type {
#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
SCTP_STREAM_CHANGE_EVENT,
#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
- SCTP_SN_TYPE_MAX = SCTP_STREAM_CHANGE_EVENT,
+ SCTP_SEND_FAILED_EVENT,
+#define SCTP_SEND_FAILED_EVENT SCTP_SEND_FAILED_EVENT
+ SCTP_SN_TYPE_MAX = SCTP_SEND_FAILED_EVENT,
#define SCTP_SN_TYPE_MAX SCTP_SN_TYPE_MAX
};
@@ -919,6 +938,7 @@ struct sctp_paddrinfo {
enum sctp_spinfo_state {
SCTP_INACTIVE,
SCTP_PF,
+#define SCTP_POTENTIALLY_FAILED SCTP_PF
SCTP_ACTIVE,
SCTP_UNCONFIRMED,
SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
@@ -1068,6 +1088,15 @@ struct sctp_paddrthlds {
__u16 spt_pathpfthld;
};
+/* Use a new structure with spt_pathcpthld for back compatibility */
+struct sctp_paddrthlds_v2 {
+ sctp_assoc_t spt_assoc_id;
+ struct sockaddr_storage spt_address;
+ __u16 spt_pathmaxrxt;
+ __u16 spt_pathpfthld;
+ __u16 spt_pathcpthld;
+};
+
/*
* Socket Option for Getting the Association/Stream-Specific PR-SCTP Status
*/
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index c6d035fa1b6c..6f5af1a84213 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -113,6 +113,25 @@ struct opal_shadow_mbr {
__u64 size;
};
+/* Opal table operations */
+enum opal_table_ops {
+ OPAL_READ_TABLE,
+ OPAL_WRITE_TABLE,
+};
+
+#define OPAL_UID_LENGTH 8
+struct opal_read_write_table {
+ struct opal_key key;
+ const __u64 data;
+ const __u8 table_uid[OPAL_UID_LENGTH];
+ __u64 offset;
+ __u64 size;
+#define OPAL_TABLE_READ (1 << OPAL_READ_TABLE)
+#define OPAL_TABLE_WRITE (1 << OPAL_WRITE_TABLE)
+ __u64 flags;
+ __u64 priv;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -128,5 +147,6 @@ struct opal_shadow_mbr {
#define IOC_OPAL_PSID_REVERT_TPR _IOW('p', 232, struct opal_key)
#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done)
#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr)
+#define IOC_OPAL_GENERIC_TABLE_RW _IOW('p', 235, struct opal_read_write_table)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 549a31c29f7d..7eee233e78d2 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -323,4 +323,21 @@ enum
__LINUX_MIB_XFRMMAX
};
+/* linux TLS mib definitions */
+enum
+{
+ LINUX_MIB_TLSNUM = 0,
+ LINUX_MIB_TLSCURRTXSW, /* TlsCurrTxSw */
+ LINUX_MIB_TLSCURRRXSW, /* TlsCurrRxSw */
+ LINUX_MIB_TLSCURRTXDEVICE, /* TlsCurrTxDevice */
+ LINUX_MIB_TLSCURRRXDEVICE, /* TlsCurrRxDevice */
+ LINUX_MIB_TLSTXSW, /* TlsTxSw */
+ LINUX_MIB_TLSRXSW, /* TlsRxSw */
+ LINUX_MIB_TLSTXDEVICE, /* TlsTxDevice */
+ LINUX_MIB_TLSRXDEVICE, /* TlsRxDevice */
+ LINUX_MIB_TLSDECRYPTERROR, /* TlsDecryptError */
+ LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */
+ __LINUX_MIB_TLSMAX
+};
+
#endif /* _LINUX_SNMP_H */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7b35e98d3c58..ad80a5c885d5 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -167,8 +167,8 @@ struct statx {
#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */
#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
-
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 41c8b462c177..3f10dc4e7a4b 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -50,6 +50,14 @@ enum {
* TCA_TUNNEL_KEY_ENC_OPTS_
* attributes
*/
+ TCA_TUNNEL_KEY_ENC_OPTS_VXLAN, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
__TCA_TUNNEL_KEY_ENC_OPTS_MAX,
};
@@ -67,4 +75,25 @@ enum {
#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
(__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, /* u32 */
+ __TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX - 1)
+
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, /* be32 */
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID, /* u8 */
+ __TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 81e697978e8b..74af1f759cee 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -155,6 +155,14 @@ enum {
TCP_QUEUES_NR,
};
+/* why fastopen failed from client perspective */
+enum tcp_fastopen_client_fail {
+ TFO_STATUS_UNSPEC, /* catch-all */
+ TFO_COOKIE_UNAVAILABLE, /* if not in TFO_CLIENT_NO_COOKIE mode */
+ TFO_DATA_NOT_ACKED, /* SYN-ACK did not ack SYN data */
+ TFO_SYN_RETRANSMITTED, /* SYN-ACK did not ack SYN data after timeout */
+};
+
/* for TCP_INFO socket option */
#define TCPI_OPT_TIMESTAMPS 1
#define TCPI_OPT_SACK 2
@@ -211,7 +219,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
- __u8 tcpi_delivery_rate_app_limited:1;
+ __u8 tcpi_delivery_rate_app_limited:1, tcpi_fastopen_client_fail:2;
__u32 tcpi_rto;
__u32 tcpi_ato;
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 7df026ea6aff..add01db1daef 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -191,6 +191,7 @@ struct sockaddr_tipc {
#define TIPC_GROUP_JOIN 135 /* Takes struct tipc_group_req* */
#define TIPC_GROUP_LEAVE 136 /* No argument */
#define TIPC_SOCK_RECVQ_USED 137 /* Default: none (read only) */
+#define TIPC_NODELAY 138 /* Default: false */
/*
* Flag values
@@ -232,6 +233,27 @@ struct tipc_sioc_nodeid_req {
char node_id[TIPC_NODEID_LEN];
};
+/*
+ * TIPC Crypto, AEAD
+ */
+#define TIPC_AEAD_ALG_NAME (32)
+
+struct tipc_aead_key {
+ char alg_name[TIPC_AEAD_ALG_NAME];
+ unsigned int keylen; /* in bytes */
+ char key[];
+};
+
+#define TIPC_AEAD_KEYLEN_MIN (16 + 4)
+#define TIPC_AEAD_KEYLEN_MAX (32 + 4)
+#define TIPC_AEAD_KEY_SIZE_MAX (sizeof(struct tipc_aead_key) + \
+ TIPC_AEAD_KEYLEN_MAX)
+
+static inline int tipc_aead_key_size(struct tipc_aead_key *key)
+{
+ return sizeof(*key) + key->keylen;
+}
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 4955e1a9f1bc..4dfc05651c98 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -309,7 +309,7 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
tlv_ptr->tlv_len = htons(tlv_len);
if (len && data) {
memcpy(TLV_DATA(tlv_ptr), data, len);
- memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
+ memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
}
return TLV_SPACE(len);
}
@@ -409,7 +409,7 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
tcm_hdr->tcm_flags = htons(flags);
if (data_len && data) {
memcpy(TCM_DATA(msg), data, data_len);
- memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
+ memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
}
return TCM_SPACE(data_len);
}
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index efb958fd167d..6c2194ab745b 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -63,6 +63,8 @@ enum {
TIPC_NL_PEER_REMOVE,
TIPC_NL_BEARER_ADD,
TIPC_NL_UDP_GET_REMOTEIP,
+ TIPC_NL_KEY_SET,
+ TIPC_NL_KEY_FLUSH,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -160,6 +162,8 @@ enum {
TIPC_NLA_NODE_UNSPEC,
TIPC_NLA_NODE_ADDR, /* u32 */
TIPC_NLA_NODE_UP, /* flag */
+ TIPC_NLA_NODE_ID, /* data */
+ TIPC_NLA_NODE_KEY, /* data */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a2669b79b294..5a7bedee2b0e 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1034,6 +1034,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
+#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
/* Image processing controls */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 530638dffd93..04481c717fee 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -422,6 +422,11 @@ struct v4l2_fract {
__u32 denominator;
};
+struct v4l2_area {
+ __u32 width;
+ __u32 height;
+};
+
/**
* struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
*
@@ -755,6 +760,7 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
+#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -915,11 +921,12 @@ struct v4l2_requestbuffers {
};
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
-#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
-#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
-#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
-#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
-#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
+#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
+#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
+#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -1041,6 +1048,8 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
+/* Don't return the capture buffer until OUTPUT timestamp changes */
+#define V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF 0x00000200
/* Buffer is prepared for queuing */
#define V4L2_BUF_FLAG_PREPARED 0x00000400
/* Cache handling flags */
@@ -1675,6 +1684,7 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
+ struct v4l2_area __user *p_area;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1720,6 +1730,7 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
+ V4L2_CTRL_TYPE_AREA = 0x0106,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1975,6 +1986,7 @@ struct v4l2_encoder_cmd {
#define V4L2_DEC_CMD_STOP (1)
#define V4L2_DEC_CMD_PAUSE (2)
#define V4L2_DEC_CMD_RESUME (3)
+#define V4L2_DEC_CMD_FLUSH (4)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 4c4e24c291a5..559f42e73315 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -169,7 +169,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
{
vr->num = num;
vr->desc = p;
- vr->avail = p + num*sizeof(struct vring_desc);
+ vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index fb792e882cef..07de2b7aac85 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -10,6 +10,8 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
struct fastrpc_invoke_args {
__u64 ptr;
@@ -38,4 +40,17 @@ struct fastrpc_alloc_dma_buf {
__u64 size; /* size */
};
+struct fastrpc_req_mmap {
+ __s32 fd;
+ __u32 flags; /* flags for dsp to map with */
+ __u64 vaddrin; /* optional virtual address */
+ __u64 size; /* size */
+ __u64 vaddrout; /* dsp virtual address */
+};
+
+struct fastrpc_req_munmap {
+ __u64 vaddrout; /* address to unmap */
+ __u64 size; /* size */
+};
+
#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 39c4ea51a719..4faa2c9767e5 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -88,13 +88,19 @@ enum hl_device_status {
* internal engine.
* HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
* require an open context.
- * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
- * over the last period specified by the user.
- * The period can be between 100ms to 1s, in
- * resolution of 100ms. The return value is a
- * percentage of the utilization rate.
+ * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
+ * over the last period specified by the user.
+ * The period can be between 100ms to 1s, in
+ * resolution of 100ms. The return value is a
+ * percentage of the utilization rate.
* HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
* event occurred since the driver was loaded.
+ * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate
+ * of the device in MHz. The maximum clock rate is
+ * configurable via sysfs parameter
+ * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
+ * operations performed on the device since the last
+ * time the driver was loaded.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -103,8 +109,11 @@ enum hl_device_status {
#define HL_INFO_DEVICE_STATUS 4
#define HL_INFO_DEVICE_UTILIZATION 6
#define HL_INFO_HW_EVENTS_AGGREGATE 7
+#define HL_INFO_CLK_RATE 8
+#define HL_INFO_RESET_COUNT 9
#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_CARD_NAME_MAX_LEN 16
struct hl_info_hw_ip_info {
__u64 sram_base_address;
@@ -123,6 +132,7 @@ struct hl_info_hw_ip_info {
__u8 dram_enabled;
__u8 pad[2];
__u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
+ __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
};
struct hl_info_dram_usage {
@@ -149,6 +159,16 @@ struct hl_info_device_utilization {
__u32 pad;
};
+struct hl_info_clk_rate {
+ __u32 cur_clk_rate_mhz;
+ __u32 max_clk_rate_mhz;
+};
+
+struct hl_info_reset_count {
+ __u32 hard_reset_cnt;
+ __u32 soft_reset_cnt;
+};
+
struct hl_info_args {
/* Location of relevant struct in userspace */
__u64 return_pointer;
@@ -181,13 +201,15 @@ struct hl_info_args {
/* Opcode to destroy previously created command buffer */
#define HL_CB_OP_DESTROY 1
+#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+
struct hl_cb_in {
/* Handle of CB or 0 if we want to create one */
__u64 cb_handle;
/* HL_CB_OP_* */
__u32 op;
- /* Size of CB. Maximum size is 2MB. The minimum size that will be
- * allocated, regardless of this parameter's value, is PAGE_SIZE
+ /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
+ * will be allocated, regardless of this parameter's value, is PAGE_SIZE
*/
__u32 cb_size;
/* Context ID - Currently not in use */
@@ -233,6 +255,8 @@ struct hl_cs_chunk {
#define HL_CS_STATUS_SUCCESS 0
+#define HL_MAX_JOBS_PER_CS 512
+
struct hl_cs_in {
/* this holds address of array of hl_cs_chunk for restore phase */
__u64 chunks_restore;
@@ -242,9 +266,13 @@ struct hl_cs_in {
* Currently not in use
*/
__u64 chunks_store;
- /* Number of chunks in restore phase array */
+ /* Number of chunks in restore phase array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
__u32 num_chunks_restore;
- /* Number of chunks in execution array */
+ /* Number of chunks in execution array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
__u32 num_chunks_execute;
/* Number of chunks in restore phase array - Currently not in use */
__u32 num_chunks_store;
@@ -589,7 +617,7 @@ struct hl_debug_args {
*
* The user can call this IOCTL with a handle it received from the CS IOCTL
* to wait until the handle's CS has finished executing. The user will wait
- * inside the kernel until the CS has finished or until the user-requeusted
+ * inside the kernel until the CS has finished or until the user-requested
* timeout has expired.
*
* The return value of the IOCTL is a standard Linux error code. The possible
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
deleted file mode 100644
index 85aed672f43e..000000000000
--- a/include/uapi/rdma/cxgb3-abi.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef CXGB3_ABI_USER_H
-#define CXGB3_ABI_USER_H
-
-#include <linux/types.h>
-
-#define IWCH_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __aligned_u64
- * instead.
- */
-struct iwch_create_cq_req {
- __aligned_u64 user_rptr_addr;
-};
-
-struct iwch_create_cq_resp_v0 {
- __aligned_u64 key;
- __u32 cqid;
- __u32 size_log2;
-};
-
-struct iwch_create_cq_resp {
- __aligned_u64 key;
- __u32 cqid;
- __u32 size_log2;
- __u32 memsize;
- __u32 reserved;
-};
-
-struct iwch_create_qp_resp {
- __aligned_u64 key;
- __aligned_u64 db_key;
- __u32 qpid;
- __u32 size_log2;
- __u32 sq_size_log2;
- __u32 rq_size_log2;
-};
-
-struct iwch_reg_user_mr_resp {
- __u32 pbl_addr;
-};
-
-struct iwch_alloc_pd_resp {
- __u32 pdid;
-};
-
-#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 9599a2a62be8..53b6e2036a9b 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -90,12 +90,18 @@ struct efa_ibv_create_ah_resp {
__u8 reserved_30[2];
};
+enum {
+ EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
+};
+
struct efa_ibv_ex_query_device_resp {
__u32 comp_mask;
__u32 max_sq_wr;
__u32 max_rq_wr;
__u16 max_sq_sge;
__u16 max_rq_sge;
+ __u32 max_rdma_size;
+ __u32 device_caps;
};
#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 72c7fc75f960..9019b2d906ea 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -173,4 +173,26 @@ struct ib_uverbs_query_port_resp_ex {
__u8 reserved[6];
};
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+ RDMA_DRIVER_EFA,
+ RDMA_DRIVER_SIW,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index d0da070cf0ab..20d88307f75f 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -198,6 +198,7 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
MLX5_IB_ATTR_CREATE_FLOW_TAG,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
};
enum mlx5_ib_destoy_flow_attrs {
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
deleted file mode 100644
index f80495baa969..000000000000
--- a/include/uapi/rdma/nes-abi.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_ABI_USER_H
-#define NES_ABI_USER_H
-
-#include <linux/types.h>
-
-#define NES_ABI_USERSPACE_VER 2
-#define NES_ABI_KERNEL_VER 2
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct nes_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct nes_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
- __u8 kernel_ver;
- __u8 reserved[2];
-};
-
-struct nes_alloc_pd_resp {
- __u32 pd_id;
- __u32 mmap_db_index;
-};
-
-struct nes_create_cq_req {
- __aligned_u64 user_cq_buffer;
- __u32 mcrqf;
- __u8 reserved[4];
-};
-
-struct nes_create_qp_req {
- __aligned_u64 user_wqe_buffers;
- __aligned_u64 user_qp_buffer;
-};
-
-enum iwnes_memreg_type {
- IWNES_MEMREG_TYPE_MEM = 0x0000,
- IWNES_MEMREG_TYPE_QP = 0x0001,
- IWNES_MEMREG_TYPE_CQ = 0x0002,
- IWNES_MEMREG_TYPE_MW = 0x0003,
- IWNES_MEMREG_TYPE_FMR = 0x0004,
- IWNES_MEMREG_TYPE_FMEM = 0x0005,
-};
-
-struct nes_mem_reg_req {
- __u32 reg_type; /* indicates if id is memory, QP or CQ */
- __u32 reserved;
-};
-
-struct nes_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct nes_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 mmap_sq_db_index;
- __u32 mmap_rq_db_index;
- __u32 nes_drv_opt;
-};
-
-#endif /* NES_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 7a10b3a325fa..c022ee26089b 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -38,6 +38,15 @@
#define QEDR_ABI_VERSION (8)
/* user kernel communication data structures. */
+enum qedr_alloc_ucontext_flags {
+ QEDR_ALLOC_UCTX_RESERVED = 1 << 0,
+ QEDR_ALLOC_UCTX_DB_REC = 1 << 1
+};
+
+struct qedr_alloc_ucontext_req {
+ __u32 context_flags;
+ __u32 reserved;
+};
struct qedr_alloc_ucontext_resp {
__aligned_u64 db_pa;
@@ -74,6 +83,7 @@ struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
__u16 reserved;
+ __aligned_u64 db_rec_addr;
};
struct qedr_create_qp_ureq {
@@ -109,6 +119,13 @@ struct qedr_create_qp_uresp {
__u32 rq_db2_offset;
__u32 reserved;
+
+ /* address of SQ doorbell recovery user entry */
+ __aligned_u64 sq_db_rec_addr;
+
+ /* address of RQ doorbell recovery user entry */
+ __aligned_u64 rq_db_rec_addr;
+
};
struct qedr_create_srq_ureq {
@@ -128,4 +145,12 @@ struct qedr_create_srq_uresp {
__u32 reserved1;
};
+/* doorbell recovery entry allocated and populated by userspace doorbelling
+ * entities and mapped to kernel. Kernel uses this to register doorbell
+ * information with doorbell drop recovery mechanism.
+ */
+struct qedr_user_db_rec {
+ __aligned_u64 db_data; /* doorbell data */
+};
+
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index b8bb285f6b2a..7b1ec806f8f9 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -84,26 +84,4 @@ struct ib_uverbs_ioctl_hdr {
struct ib_uverbs_attr attrs[0];
};
-enum rdma_driver_id {
- RDMA_DRIVER_UNKNOWN,
- RDMA_DRIVER_MLX5,
- RDMA_DRIVER_MLX4,
- RDMA_DRIVER_CXGB3,
- RDMA_DRIVER_CXGB4,
- RDMA_DRIVER_MTHCA,
- RDMA_DRIVER_BNXT_RE,
- RDMA_DRIVER_OCRDMA,
- RDMA_DRIVER_NES,
- RDMA_DRIVER_I40IW,
- RDMA_DRIVER_VMW_PVRDMA,
- RDMA_DRIVER_QEDR,
- RDMA_DRIVER_HNS,
- RDMA_DRIVER_USNIC,
- RDMA_DRIVER_RXE,
- RDMA_DRIVER_HFI1,
- RDMA_DRIVER_QIB,
- RDMA_DRIVER_EFA,
- RDMA_DRIVER_SIW,
-};
-
#endif
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 6e73f0274e41..f8b638c73371 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -179,6 +179,11 @@ struct pvrdma_create_qp {
__aligned_u64 qp_addr;
};
+struct pvrdma_create_qp_resp {
+ __u32 qpn;
+ __u32 qp_handle;
+};
+
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
__aligned_u64 swap_val;
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 3d4d6de66a17..9c96fb0e4d90 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -317,12 +317,22 @@ struct snd_enc_generic {
__s32 reserved[15]; /* Can be used for SND_AUDIOCODEC_BESPOKE */
} __attribute__((packed, aligned(4)));
+struct snd_dec_flac {
+ __u16 sample_size;
+ __u16 min_blk_size;
+ __u16 max_blk_size;
+ __u16 min_frame_size;
+ __u16 max_frame_size;
+ __u16 reserved;
+} __attribute__((packed, aligned(4)));
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
struct snd_enc_real real;
struct snd_enc_flac flac;
struct snd_enc_generic generic;
+ struct snd_dec_flac flac_d;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index a0fe0d4c4b66..ebfdc20ca081 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -26,7 +26,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 10
+#define SOF_ABI_MINOR 11
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 8f996857fb24..76883e6fb750 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -111,7 +111,14 @@
/* TODO: Add SAI tokens */
/* ESAI */
-#define SOF_TKN_IMX_ESAI_FIRST_TOKEN 1100
-/* TODO: Add ESAI tokens */
+#define SOF_TKN_IMX_ESAI_MCLK_ID 1100
+
+/* Stream */
+#define SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3 1200
+#define SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3 1201
+
+/* Led control for mute switches */
+#define SOF_TKN_MUTE_LED_USE 1300
+#define SOF_TKN_MUTE_LED_DIRECTION 1301
#endif
diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
index 73a4ea714d93..7483a78d2425 100644
--- a/include/xen/interface/xen-mca.h
+++ b/include/xen/interface/xen-mca.h
@@ -183,7 +183,6 @@ struct mc_info {
DEFINE_GUEST_HANDLE_STRUCT(mc_info);
#define __MC_MSR_ARRAYSIZE 8
-#define __MC_MSR_MCGCAP 0
#define __MC_NMSRS 1
#define MC_NCAPS 7
struct mcinfo_logical_cpu {
@@ -332,7 +331,11 @@ struct xen_mc {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mc);
-/* Fields are zero when not available */
+/*
+ * Fields are zero when not available. Also, this struct is shared with
+ * userspace mcelog and thus must keep existing fields at current offsets.
+ * Only add new fields to the end of the structure
+ */
struct xen_mce {
__u64 status;
__u64 misc;
@@ -353,6 +356,9 @@ struct xen_mce {
__u32 socketid; /* CPU socket ID */
__u32 apicid; /* CPU initial apic ID */
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
+ __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
+ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
};
/*
diff --git a/init/Kconfig b/init/Kconfig
index b4daad2bac23..67a602ee17f1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -785,6 +785,10 @@ config ARCH_SUPPORTS_NUMA_BALANCING
config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
bool
+config CC_HAS_INT128
+ def_bool y
+ depends on !$(cc-option,-D__SIZEOF_INT128__=0)
+
#
# For architectures that know their GCC __int128 support is sound
#
@@ -1548,6 +1552,7 @@ config AIO
config IO_URING
bool "Enable IO uring support" if EXPERT
select ANON_INODES
+ select IO_WQ
default y
help
This option enables support for the io_uring interface, enabling
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 9634ecf3743d..af9cda887a23 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -212,6 +212,7 @@ static int match_dev_by_label(struct device *dev, const void *data)
* a colon.
* 9) PARTLABEL=<name> with name being the GPT partition label.
* MSDOS partitions do not support labels!
+ * 10) /dev/cifs represents Root_CIFS (0xfe)
*
* If name doesn't have fall into the categories above, we return (0,0).
* block_class is used to check if something is a disk name. If the disk
@@ -268,6 +269,9 @@ dev_t name_to_dev_t(const char *name)
res = Root_NFS;
if (strcmp(name, "nfs") == 0)
goto done;
+ res = Root_CIFS;
+ if (strcmp(name, "cifs") == 0)
+ goto done;
res = Root_RAM0;
if (strcmp(name, "ram") == 0)
goto done;
@@ -501,6 +505,42 @@ static int __init mount_nfs_root(void)
}
#endif
+#ifdef CONFIG_CIFS_ROOT
+
+extern int cifs_root_data(char **dev, char **opts);
+
+#define CIFSROOT_TIMEOUT_MIN 5
+#define CIFSROOT_TIMEOUT_MAX 30
+#define CIFSROOT_RETRY_MAX 5
+
+static int __init mount_cifs_root(void)
+{
+ char *root_dev, *root_data;
+ unsigned int timeout;
+ int try, err;
+
+ err = cifs_root_data(&root_dev, &root_data);
+ if (err != 0)
+ return 0;
+
+ timeout = CIFSROOT_TIMEOUT_MIN;
+ for (try = 1; ; try++) {
+ err = do_mount_root(root_dev, "cifs", root_mountflags,
+ root_data);
+ if (err == 0)
+ return 1;
+ if (try > CIFSROOT_RETRY_MAX)
+ break;
+
+ ssleep(timeout);
+ timeout <<= 1;
+ if (timeout > CIFSROOT_TIMEOUT_MAX)
+ timeout = CIFSROOT_TIMEOUT_MAX;
+ }
+ return 0;
+}
+#endif
+
#if defined(CONFIG_BLK_DEV_RAM) || defined(CONFIG_BLK_DEV_FD)
void __init change_floppy(char *fmt, ...)
{
@@ -542,6 +582,15 @@ void __init mount_root(void)
ROOT_DEV = Root_FD0;
}
#endif
+#ifdef CONFIG_CIFS_ROOT
+ if (ROOT_DEV == Root_CIFS) {
+ if (mount_cifs_root())
+ return;
+
+ printk(KERN_ERR "VFS: Unable to mount root fs via SMB, trying floppy.\n");
+ ROOT_DEV = Root_FD0;
+ }
+#endif
#ifdef CONFIG_BLK_DEV_FD
if (MAJOR(ROOT_DEV) == FLOPPY_MAJOR) {
/* rd_doload is 2 for a dual initrd/ramload setup */
diff --git a/init/initramfs.c b/init/initramfs.c
index c47dad0884f7..8ec1be4d7d51 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -10,6 +10,7 @@
#include <linux/syscalls.h>
#include <linux/utime.h>
#include <linux/file.h>
+#include <linux/memblock.h>
static ssize_t __init xwrite(int fd, const char *p, size_t count)
{
@@ -529,6 +530,13 @@ extern unsigned long __initramfs_size;
void __weak free_initrd_mem(unsigned long start, unsigned long end)
{
+#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
+ unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
+ unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
+
+ memblock_free(__pa(aligned_start), aligned_end - aligned_start);
+#endif
+
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index deff97217496..bf82259cff96 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -65,7 +65,7 @@ config PREEMPT_RT
preemptible priority-inheritance aware variants, enforcing
interrupt threading and introducing mechanisms to break up long
non-preemptible sections. This makes the kernel, except for very
- low level and critical code pathes (entry code, scheduler, low
+ low level and critical code paths (entry code, scheduler, low
level interrupt handling) fully preemptible and brings most
execution contexts under scheduler control.
diff --git a/kernel/Makefile b/kernel/Makefile
index daad787fb795..f0902a7bd1b3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -115,6 +115,8 @@ obj-$(CONFIG_TORTURE_TEST) += torture.o
obj-$(CONFIG_HAS_IOMEM) += iomem.o
obj-$(CONFIG_RSEQ) += rseq.o
+obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
+
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_stackleak.o := n
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 1f31c2f1e6fc..4508d5e0cf69 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -351,12 +351,12 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
struct dentry *d = kern_path_locked(watch->path, parent);
if (IS_ERR(d))
return PTR_ERR(d);
- inode_unlock(d_backing_inode(parent->dentry));
if (d_is_positive(d)) {
/* update watch filter fields */
watch->dev = d->d_sb->s_dev;
watch->ino = d_backing_inode(d)->i_ino;
}
+ inode_unlock(d_backing_inode(parent->dentry));
dput(d);
return 0;
}
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index e1d9adb212f9..3f671bf617e8 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
+obj-$(CONFIG_BPF_JIT) += trampoline.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1c65ce0098a9..f0d19bbb9211 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -14,7 +14,7 @@
#include "map_in_map.h"
#define ARRAY_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
+ (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
static void bpf_array_free_percpu(struct bpf_array *array)
{
@@ -59,6 +59,10 @@ int array_map_alloc_check(union bpf_attr *attr)
(percpu && numa_node != NUMA_NO_NODE))
return -EINVAL;
+ if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
+ attr->map_flags & BPF_F_MMAPABLE)
+ return -EINVAL;
+
if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
@@ -102,10 +106,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
}
array_size = sizeof(*array);
- if (percpu)
+ if (percpu) {
array_size += (u64) max_entries * sizeof(void *);
- else
- array_size += (u64) max_entries * elem_size;
+ } else {
+ /* rely on vmalloc() to return page-aligned memory and
+ * ensure array->value is exactly page-aligned
+ */
+ if (attr->map_flags & BPF_F_MMAPABLE) {
+ array_size = PAGE_ALIGN(array_size);
+ array_size += PAGE_ALIGN((u64) max_entries * elem_size);
+ } else {
+ array_size += (u64) max_entries * elem_size;
+ }
+ }
/* make sure there is no u32 overflow later in round_up() */
cost = array_size;
@@ -117,7 +130,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */
- array = bpf_map_area_alloc(array_size, numa_node);
+ if (attr->map_flags & BPF_F_MMAPABLE) {
+ void *data;
+
+ /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
+ data = bpf_map_area_mmapable_alloc(array_size, numa_node);
+ if (!data) {
+ bpf_map_charge_finish(&mem);
+ return ERR_PTR(-ENOMEM);
+ }
+ array = data + PAGE_ALIGN(sizeof(struct bpf_array))
+ - offsetof(struct bpf_array, value);
+ } else {
+ array = bpf_map_area_alloc(array_size, numa_node);
+ }
if (!array) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
@@ -350,6 +376,11 @@ static int array_map_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
}
+static void *array_map_vmalloc_addr(struct bpf_array *array)
+{
+ return (void *)round_down((unsigned long)array, PAGE_SIZE);
+}
+
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void array_map_free(struct bpf_map *map)
{
@@ -365,7 +396,10 @@ static void array_map_free(struct bpf_map *map)
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array);
- bpf_map_area_free(array);
+ if (array->map.map_flags & BPF_F_MMAPABLE)
+ bpf_map_area_free(array_map_vmalloc_addr(array));
+ else
+ bpf_map_area_free(array);
}
static void array_map_seq_show_elem(struct bpf_map *map, void *key,
@@ -444,6 +478,17 @@ static int array_map_check_btf(const struct bpf_map *map,
return 0;
}
+static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
+
+ if (!(map->map_flags & BPF_F_MMAPABLE))
+ return -EINVAL;
+
+ return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff);
+}
+
const struct bpf_map_ops array_map_ops = {
.map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
@@ -455,6 +500,7 @@ const struct bpf_map_ops array_map_ops = {
.map_gen_lookup = array_map_gen_lookup,
.map_direct_value_addr = array_map_direct_value_addr,
.map_direct_value_meta = array_map_direct_value_meta,
+ .map_mmap = array_map_mmap,
.map_seq_show_elem = array_map_seq_show_elem,
.map_check_btf = array_map_check_btf,
};
@@ -540,10 +586,17 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
if (IS_ERR(new_ptr))
return PTR_ERR(new_ptr);
- old_ptr = xchg(array->ptrs + index, new_ptr);
+ if (map->ops->map_poke_run) {
+ mutex_lock(&array->aux->poke_mutex);
+ old_ptr = xchg(array->ptrs + index, new_ptr);
+ map->ops->map_poke_run(map, index, old_ptr, new_ptr);
+ mutex_unlock(&array->aux->poke_mutex);
+ } else {
+ old_ptr = xchg(array->ptrs + index, new_ptr);
+ }
+
if (old_ptr)
map->ops->map_fd_put_ptr(old_ptr);
-
return 0;
}
@@ -556,7 +609,15 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
if (index >= array->map.max_entries)
return -E2BIG;
- old_ptr = xchg(array->ptrs + index, NULL);
+ if (map->ops->map_poke_run) {
+ mutex_lock(&array->aux->poke_mutex);
+ old_ptr = xchg(array->ptrs + index, NULL);
+ map->ops->map_poke_run(map, index, old_ptr, NULL);
+ mutex_unlock(&array->aux->poke_mutex);
+ } else {
+ old_ptr = xchg(array->ptrs + index, NULL);
+ }
+
if (old_ptr) {
map->ops->map_fd_put_ptr(old_ptr);
return 0;
@@ -625,17 +686,195 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
+struct prog_poke_elem {
+ struct list_head list;
+ struct bpf_prog_aux *aux;
+};
+
+static int prog_array_map_poke_track(struct bpf_map *map,
+ struct bpf_prog_aux *prog_aux)
+{
+ struct prog_poke_elem *elem;
+ struct bpf_array_aux *aux;
+ int ret = 0;
+
+ aux = container_of(map, struct bpf_array, map)->aux;
+ mutex_lock(&aux->poke_mutex);
+ list_for_each_entry(elem, &aux->poke_progs, list) {
+ if (elem->aux == prog_aux)
+ goto out;
+ }
+
+ elem = kmalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&elem->list);
+ /* We must track the program's aux info at this point in time
+ * since the program pointer itself may not be stable yet, see
+ * also comment in prog_array_map_poke_run().
+ */
+ elem->aux = prog_aux;
+
+ list_add_tail(&elem->list, &aux->poke_progs);
+out:
+ mutex_unlock(&aux->poke_mutex);
+ return ret;
+}
+
+static void prog_array_map_poke_untrack(struct bpf_map *map,
+ struct bpf_prog_aux *prog_aux)
+{
+ struct prog_poke_elem *elem, *tmp;
+ struct bpf_array_aux *aux;
+
+ aux = container_of(map, struct bpf_array, map)->aux;
+ mutex_lock(&aux->poke_mutex);
+ list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
+ if (elem->aux == prog_aux) {
+ list_del_init(&elem->list);
+ kfree(elem);
+ break;
+ }
+ }
+ mutex_unlock(&aux->poke_mutex);
+}
+
+static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ struct bpf_prog *old,
+ struct bpf_prog *new)
+{
+ struct prog_poke_elem *elem;
+ struct bpf_array_aux *aux;
+
+ aux = container_of(map, struct bpf_array, map)->aux;
+ WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
+
+ list_for_each_entry(elem, &aux->poke_progs, list) {
+ struct bpf_jit_poke_descriptor *poke;
+ int i, ret;
+
+ for (i = 0; i < elem->aux->size_poke_tab; i++) {
+ poke = &elem->aux->poke_tab[i];
+
+ /* Few things to be aware of:
+ *
+ * 1) We can only ever access aux in this context, but
+ * not aux->prog since it might not be stable yet and
+ * there could be danger of use after free otherwise.
+ * 2) Initially when we start tracking aux, the program
+ * is not JITed yet and also does not have a kallsyms
+ * entry. We skip these as poke->ip_stable is not
+ * active yet. The JIT will do the final fixup before
+ * setting it stable. The various poke->ip_stable are
+ * successively activated, so tail call updates can
+ * arrive from here while JIT is still finishing its
+ * final fixup for non-activated poke entries.
+ * 3) On program teardown, the program's kallsym entry gets
+ * removed out of RCU callback, but we can only untrack
+ * from sleepable context, therefore bpf_arch_text_poke()
+ * might not see that this is in BPF text section and
+ * bails out with -EINVAL. As these are unreachable since
+ * RCU grace period already passed, we simply skip them.
+ * 4) Also programs reaching refcount of zero while patching
+ * is in progress is okay since we're protected under
+ * poke_mutex and untrack the programs before the JIT
+ * buffer is freed. When we're still in the middle of
+ * patching and suddenly kallsyms entry of the program
+ * gets evicted, we just skip the rest which is fine due
+ * to point 3).
+ * 5) Any other error happening below from bpf_arch_text_poke()
+ * is a unexpected bug.
+ */
+ if (!READ_ONCE(poke->ip_stable))
+ continue;
+ if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
+ continue;
+ if (poke->tail_call.map != map ||
+ poke->tail_call.key != key)
+ continue;
+
+ ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
+ old ? (u8 *)old->bpf_func +
+ poke->adj_off : NULL,
+ new ? (u8 *)new->bpf_func +
+ poke->adj_off : NULL);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ }
+ }
+}
+
+static void prog_array_map_clear_deferred(struct work_struct *work)
+{
+ struct bpf_map *map = container_of(work, struct bpf_array_aux,
+ work)->map;
+ bpf_fd_array_map_clear(map);
+ bpf_map_put(map);
+}
+
+static void prog_array_map_clear(struct bpf_map *map)
+{
+ struct bpf_array_aux *aux = container_of(map, struct bpf_array,
+ map)->aux;
+ bpf_map_inc(map);
+ schedule_work(&aux->work);
+}
+
+static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_array_aux *aux;
+ struct bpf_map *map;
+
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&aux->work, prog_array_map_clear_deferred);
+ INIT_LIST_HEAD(&aux->poke_progs);
+ mutex_init(&aux->poke_mutex);
+
+ map = array_map_alloc(attr);
+ if (IS_ERR(map)) {
+ kfree(aux);
+ return map;
+ }
+
+ container_of(map, struct bpf_array, map)->aux = aux;
+ aux->map = map;
+
+ return map;
+}
+
+static void prog_array_map_free(struct bpf_map *map)
+{
+ struct prog_poke_elem *elem, *tmp;
+ struct bpf_array_aux *aux;
+
+ aux = container_of(map, struct bpf_array, map)->aux;
+ list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
+ list_del_init(&elem->list);
+ kfree(elem);
+ }
+ kfree(aux);
+ fd_array_map_free(map);
+}
+
const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
- .map_alloc = array_map_alloc,
- .map_free = fd_array_map_free,
+ .map_alloc = prog_array_map_alloc,
+ .map_free = prog_array_map_free,
+ .map_poke_track = prog_array_map_poke_track,
+ .map_poke_untrack = prog_array_map_poke_untrack,
+ .map_poke_run = prog_array_map_poke_run,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
.map_delete_elem = fd_array_map_delete_elem,
.map_fd_get_ptr = prog_fd_array_get_ptr,
.map_fd_put_ptr = prog_fd_array_put_ptr,
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
- .map_release_uref = bpf_fd_array_map_clear,
+ .map_release_uref = prog_array_map_clear,
.map_seq_show_elem = prog_array_map_seq_show_elem,
};
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 29c7c06c6bd6..40efde5eedcb 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2018 Facebook */
#include <uapi/linux/btf.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
#include <uapi/linux/types.h>
#include <linux/seq_file.h>
#include <linux/compiler.h>
@@ -16,6 +18,9 @@
#include <linux/sort.h>
#include <linux/bpf_verifier.h>
#include <linux/btf.h>
+#include <linux/skmsg.h>
+#include <linux/perf_event.h>
+#include <net/sock.h>
/* BTF (BPF Type Format) is the meta data format which describes
* the data types of BPF program/map. Hence, it basically focus
@@ -336,16 +341,6 @@ static bool btf_type_is_fwd(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
}
-static bool btf_type_is_func(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
-}
-
-static bool btf_type_is_func_proto(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
-}
-
static bool btf_type_nosize(const struct btf_type *t)
{
return btf_type_is_void(t) || btf_type_is_fwd(t) ||
@@ -377,16 +372,6 @@ static bool btf_type_is_array(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
}
-static bool btf_type_is_ptr(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
-}
-
-static bool btf_type_is_int(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
-}
-
static bool btf_type_is_var(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
@@ -698,6 +683,13 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
if (!bpf_verifier_log_needed(log))
return;
+ /* btf verifier prints all types it is processing via
+ * btf_verifier_log_type(..., fmt = NULL).
+ * Skip those prints for in-kernel BTF verification.
+ */
+ if (log->level == BPF_LOG_KERNEL && !fmt)
+ return;
+
__btf_verifier_log(log, "[%u] %s %s%s",
env->log_type_id,
btf_kind_str[kind],
@@ -735,6 +727,8 @@ static void btf_verifier_log_member(struct btf_verifier_env *env,
if (!bpf_verifier_log_needed(log))
return;
+ if (log->level == BPF_LOG_KERNEL && !fmt)
+ return;
/* The CHECK_META phase already did a btf dump.
*
* If member is logged again, it must hit an error in
@@ -777,6 +771,8 @@ static void btf_verifier_log_vsi(struct btf_verifier_env *env,
if (!bpf_verifier_log_needed(log))
return;
+ if (log->level == BPF_LOG_KERNEL && !fmt)
+ return;
if (env->phase != CHECK_META)
btf_verifier_log_type(env, datasec_type, NULL);
@@ -802,6 +798,8 @@ static void btf_verifier_log_hdr(struct btf_verifier_env *env,
if (!bpf_verifier_log_needed(log))
return;
+ if (log->level == BPF_LOG_KERNEL)
+ return;
hdr = &btf->hdr;
__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
__btf_verifier_log(log, "version: %u\n", hdr->version);
@@ -1043,6 +1041,82 @@ static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
}
+/* Resolve the size of a passed-in "type"
+ *
+ * type: is an array (e.g. u32 array[x][y])
+ * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
+ * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
+ * corresponds to the return type.
+ * *elem_type: u32
+ * *total_nelems: (x * y). Hence, individual elem size is
+ * (*type_size / *total_nelems)
+ *
+ * type: is not an array (e.g. const struct X)
+ * return type: type "struct X"
+ * *type_size: sizeof(struct X)
+ * *elem_type: same as return type ("struct X")
+ * *total_nelems: 1
+ */
+static const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size, const struct btf_type **elem_type,
+ u32 *total_nelems)
+{
+ const struct btf_type *array_type = NULL;
+ const struct btf_array *array;
+ u32 i, size, nelems = 1;
+
+ for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
+ switch (BTF_INFO_KIND(type->info)) {
+ /* type->size can be used */
+ case BTF_KIND_INT:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ size = type->size;
+ goto resolved;
+
+ case BTF_KIND_PTR:
+ size = sizeof(void *);
+ goto resolved;
+
+ /* Modifiers */
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ type = btf_type_by_id(btf, type->type);
+ break;
+
+ case BTF_KIND_ARRAY:
+ if (!array_type)
+ array_type = type;
+ array = btf_type_array(type);
+ if (nelems && array->nelems > U32_MAX / nelems)
+ return ERR_PTR(-EINVAL);
+ nelems *= array->nelems;
+ type = btf_type_by_id(btf, array->type);
+ break;
+
+ /* type without size */
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+
+resolved:
+ if (nelems && size > U32_MAX / nelems)
+ return ERR_PTR(-EINVAL);
+
+ *type_size = nelems * size;
+ *total_nelems = nelems;
+ *elem_type = type;
+
+ return array_type ? : type;
+}
+
/* The input param "type_id" must point to a needs_resolve type */
static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
u32 *type_id)
@@ -2405,7 +2479,8 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
-
+ if (env->log.level == BPF_LOG_KERNEL)
+ continue;
btf_verifier_log(env, "\t%s val=%d\n",
__btf_name_by_offset(btf, enums[i].name_off),
enums[i].val);
@@ -3367,6 +3442,685 @@ errout:
return ERR_PTR(err);
}
+extern char __weak _binary__btf_vmlinux_bin_start[];
+extern char __weak _binary__btf_vmlinux_bin_end[];
+extern struct btf *btf_vmlinux;
+
+#define BPF_MAP_TYPE(_id, _ops)
+static union {
+ struct bpf_ctx_convert {
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
+ prog_ctx_type _id##_prog; \
+ kern_ctx_type _id##_kern;
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+ } *__t;
+ /* 't' is written once under lock. Read many times. */
+ const struct btf_type *t;
+} bpf_ctx_convert;
+enum {
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
+ __ctx_convert##_id,
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+};
+static u8 bpf_ctx_convert_map[] = {
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
+ [_id] = __ctx_convert##_id,
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+};
+#undef BPF_MAP_TYPE
+
+static const struct btf_member *
+btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type)
+{
+ const struct btf_type *conv_struct;
+ const struct btf_type *ctx_struct;
+ const struct btf_member *ctx_type;
+ const char *tname, *ctx_tname;
+
+ conv_struct = bpf_ctx_convert.t;
+ if (!conv_struct) {
+ bpf_log(log, "btf_vmlinux is malformed\n");
+ return NULL;
+ }
+ t = btf_type_by_id(btf, t->type);
+ while (btf_type_is_modifier(t))
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_struct(t)) {
+ /* Only pointer to struct is supported for now.
+ * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
+ * is not supported yet.
+ * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
+ */
+ bpf_log(log, "BPF program ctx type is not a struct\n");
+ return NULL;
+ }
+ tname = btf_name_by_offset(btf, t->name_off);
+ if (!tname) {
+ bpf_log(log, "BPF program ctx struct doesn't have a name\n");
+ return NULL;
+ }
+ /* prog_type is valid bpf program type. No need for bounds check. */
+ ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
+ /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
+ * Like 'struct __sk_buff'
+ */
+ ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
+ if (!ctx_struct)
+ /* should not happen */
+ return NULL;
+ ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
+ if (!ctx_tname) {
+ /* should not happen */
+ bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
+ return NULL;
+ }
+ /* only compare that prog's ctx type name is the same as
+ * kernel expects. No need to compare field by field.
+ * It's ok for bpf prog to do:
+ * struct __sk_buff {};
+ * int socket_filter_bpf_prog(struct __sk_buff *skb)
+ * { // no fields of skb are ever used }
+ */
+ if (strcmp(ctx_tname, tname))
+ return NULL;
+ return ctx_type;
+}
+
+static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
+ struct btf *btf,
+ const struct btf_type *t,
+ enum bpf_prog_type prog_type)
+{
+ const struct btf_member *prog_ctx_type, *kern_ctx_type;
+
+ prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type);
+ if (!prog_ctx_type)
+ return -ENOENT;
+ kern_ctx_type = prog_ctx_type + 1;
+ return kern_ctx_type->type;
+}
+
+struct btf *btf_parse_vmlinux(void)
+{
+ struct btf_verifier_env *env = NULL;
+ struct bpf_verifier_log *log;
+ struct btf *btf = NULL;
+ int err, i;
+
+ env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
+ if (!env)
+ return ERR_PTR(-ENOMEM);
+
+ log = &env->log;
+ log->level = BPF_LOG_KERNEL;
+
+ btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
+ if (!btf) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ env->btf = btf;
+
+ btf->data = _binary__btf_vmlinux_bin_start;
+ btf->data_size = _binary__btf_vmlinux_bin_end -
+ _binary__btf_vmlinux_bin_start;
+
+ err = btf_parse_hdr(env);
+ if (err)
+ goto errout;
+
+ btf->nohdr_data = btf->data + btf->hdr.hdr_len;
+
+ err = btf_parse_str_sec(env);
+ if (err)
+ goto errout;
+
+ err = btf_check_all_metas(env);
+ if (err)
+ goto errout;
+
+ /* find struct bpf_ctx_convert for type checking later */
+ for (i = 1; i <= btf->nr_types; i++) {
+ const struct btf_type *t;
+ const char *tname;
+
+ t = btf_type_by_id(btf, i);
+ if (!__btf_type_is_struct(t))
+ continue;
+ tname = __btf_name_by_offset(btf, t->name_off);
+ if (!strcmp(tname, "bpf_ctx_convert")) {
+ /* btf_parse_vmlinux() runs under bpf_verifier_lock */
+ bpf_ctx_convert.t = t;
+ break;
+ }
+ }
+ if (i > btf->nr_types) {
+ err = -ENOENT;
+ goto errout;
+ }
+
+ btf_verifier_env_free(env);
+ refcount_set(&btf->refcnt, 1);
+ return btf;
+
+errout:
+ btf_verifier_env_free(env);
+ if (btf) {
+ kvfree(btf->types);
+ kfree(btf);
+ }
+ return ERR_PTR(err);
+}
+
+struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
+{
+ struct bpf_prog *tgt_prog = prog->aux->linked_prog;
+
+ if (tgt_prog) {
+ return tgt_prog->aux->btf;
+ } else {
+ return btf_vmlinux;
+ }
+}
+
+bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ const struct btf_type *t = prog->aux->attach_func_proto;
+ struct bpf_prog *tgt_prog = prog->aux->linked_prog;
+ struct btf *btf = bpf_prog_get_target_btf(prog);
+ const char *tname = prog->aux->attach_func_name;
+ struct bpf_verifier_log *log = info->log;
+ const struct btf_param *args;
+ u32 nr_args, arg;
+ int ret;
+
+ if (off % 8) {
+ bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
+ tname, off);
+ return false;
+ }
+ arg = off / 8;
+ args = (const struct btf_param *)(t + 1);
+ /* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
+ nr_args = t ? btf_type_vlen(t) : 5;
+ if (prog->aux->attach_btf_trace) {
+ /* skip first 'void *__data' argument in btf_trace_##name typedef */
+ args++;
+ nr_args--;
+ }
+
+ if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
+ arg == nr_args) {
+ if (!t)
+ /* Default prog with 5 args. 6th arg is retval. */
+ return true;
+ /* function return type */
+ t = btf_type_by_id(btf, t->type);
+ } else if (arg >= nr_args) {
+ bpf_log(log, "func '%s' doesn't have %d-th argument\n",
+ tname, arg + 1);
+ return false;
+ } else {
+ if (!t)
+ /* Default prog with 5 args */
+ return true;
+ t = btf_type_by_id(btf, args[arg].type);
+ }
+ /* skip modifiers */
+ while (btf_type_is_modifier(t))
+ t = btf_type_by_id(btf, t->type);
+ if (btf_type_is_int(t))
+ /* accessing a scalar */
+ return true;
+ if (!btf_type_is_ptr(t)) {
+ bpf_log(log,
+ "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
+ tname, arg,
+ __btf_name_by_offset(btf, t->name_off),
+ btf_kind_str[BTF_INFO_KIND(t->info)]);
+ return false;
+ }
+ if (t->type == 0)
+ /* This is a pointer to void.
+ * It is the same as scalar from the verifier safety pov.
+ * No further pointer walking is allowed.
+ */
+ return true;
+
+ /* this is a pointer to another type */
+ info->reg_type = PTR_TO_BTF_ID;
+ info->btf_id = t->type;
+
+ if (tgt_prog) {
+ ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type);
+ if (ret > 0) {
+ info->btf_id = ret;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ t = btf_type_by_id(btf, t->type);
+ /* skip modifiers */
+ while (btf_type_is_modifier(t))
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_struct(t)) {
+ bpf_log(log,
+ "func '%s' arg%d type %s is not a struct\n",
+ tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
+ return false;
+ }
+ bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
+ tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
+ __btf_name_by_offset(btf, t->name_off));
+ return true;
+}
+
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id)
+{
+ u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
+ const struct btf_type *mtype, *elem_type = NULL;
+ const struct btf_member *member;
+ const char *tname, *mname;
+
+again:
+ tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
+ if (!btf_type_is_struct(t)) {
+ bpf_log(log, "Type '%s' is not a struct", tname);
+ return -EINVAL;
+ }
+
+ for_each_member(i, t, member) {
+ if (btf_member_bitfield_size(t, member))
+ /* bitfields are not supported yet */
+ continue;
+
+ /* offset of the field in bytes */
+ moff = btf_member_bit_offset(t, member) / 8;
+ if (off + size <= moff)
+ /* won't find anything, field is already too far */
+ break;
+ /* In case of "off" is pointing to holes of a struct */
+ if (off < moff)
+ continue;
+
+ /* type of the field */
+ mtype = btf_type_by_id(btf_vmlinux, member->type);
+ mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
+
+ mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
+ &elem_type, &total_nelems);
+ if (IS_ERR(mtype)) {
+ bpf_log(log, "field %s doesn't have size\n", mname);
+ return -EFAULT;
+ }
+
+ mtrue_end = moff + msize;
+ if (off >= mtrue_end)
+ /* no overlap with member, keep iterating */
+ continue;
+
+ if (btf_type_is_array(mtype)) {
+ u32 elem_idx;
+
+ /* btf_resolve_size() above helps to
+ * linearize a multi-dimensional array.
+ *
+ * The logic here is treating an array
+ * in a struct as the following way:
+ *
+ * struct outer {
+ * struct inner array[2][2];
+ * };
+ *
+ * looks like:
+ *
+ * struct outer {
+ * struct inner array_elem0;
+ * struct inner array_elem1;
+ * struct inner array_elem2;
+ * struct inner array_elem3;
+ * };
+ *
+ * When accessing outer->array[1][0], it moves
+ * moff to "array_elem2", set mtype to
+ * "struct inner", and msize also becomes
+ * sizeof(struct inner). Then most of the
+ * remaining logic will fall through without
+ * caring the current member is an array or
+ * not.
+ *
+ * Unlike mtype/msize/moff, mtrue_end does not
+ * change. The naming difference ("_true") tells
+ * that it is not always corresponding to
+ * the current mtype/msize/moff.
+ * It is the true end of the current
+ * member (i.e. array in this case). That
+ * will allow an int array to be accessed like
+ * a scratch space,
+ * i.e. allow access beyond the size of
+ * the array's element as long as it is
+ * within the mtrue_end boundary.
+ */
+
+ /* skip empty array */
+ if (moff == mtrue_end)
+ continue;
+
+ msize /= total_nelems;
+ elem_idx = (off - moff) / msize;
+ moff += elem_idx * msize;
+ mtype = elem_type;
+ }
+
+ /* the 'off' we're looking for is either equal to start
+ * of this field or inside of this struct
+ */
+ if (btf_type_is_struct(mtype)) {
+ /* our field must be inside that union or struct */
+ t = mtype;
+
+ /* adjust offset we're looking for */
+ off -= moff;
+ goto again;
+ }
+
+ if (btf_type_is_ptr(mtype)) {
+ const struct btf_type *stype;
+
+ if (msize != size || off != moff) {
+ bpf_log(log,
+ "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
+ mname, moff, tname, off, size);
+ return -EACCES;
+ }
+
+ stype = btf_type_by_id(btf_vmlinux, mtype->type);
+ /* skip modifiers */
+ while (btf_type_is_modifier(stype))
+ stype = btf_type_by_id(btf_vmlinux, stype->type);
+ if (btf_type_is_struct(stype)) {
+ *next_btf_id = mtype->type;
+ return PTR_TO_BTF_ID;
+ }
+ }
+
+ /* Allow more flexible access within an int as long as
+ * it is within mtrue_end.
+ * Since mtrue_end could be the end of an array,
+ * that also allows using an array of int as a scratch
+ * space. e.g. skb->cb[].
+ */
+ if (off + size > mtrue_end) {
+ bpf_log(log,
+ "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
+ mname, mtrue_end, tname, off, size);
+ return -EACCES;
+ }
+
+ return SCALAR_VALUE;
+ }
+ bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
+ return -EINVAL;
+}
+
+static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
+ int arg)
+{
+ char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
+ const struct btf_param *args;
+ const struct btf_type *t;
+ const char *tname, *sym;
+ u32 btf_id, i;
+
+ if (IS_ERR(btf_vmlinux)) {
+ bpf_log(log, "btf_vmlinux is malformed\n");
+ return -EINVAL;
+ }
+
+ sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
+ if (!sym) {
+ bpf_log(log, "kernel doesn't have kallsyms\n");
+ return -EFAULT;
+ }
+
+ for (i = 1; i <= btf_vmlinux->nr_types; i++) {
+ t = btf_type_by_id(btf_vmlinux, i);
+ if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
+ continue;
+ tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
+ if (!strcmp(tname, fnname))
+ break;
+ }
+ if (i > btf_vmlinux->nr_types) {
+ bpf_log(log, "helper %s type is not found\n", fnname);
+ return -ENOENT;
+ }
+
+ t = btf_type_by_id(btf_vmlinux, t->type);
+ if (!btf_type_is_ptr(t))
+ return -EFAULT;
+ t = btf_type_by_id(btf_vmlinux, t->type);
+ if (!btf_type_is_func_proto(t))
+ return -EFAULT;
+
+ args = (const struct btf_param *)(t + 1);
+ if (arg >= btf_type_vlen(t)) {
+ bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
+ fnname, arg);
+ return -EINVAL;
+ }
+
+ t = btf_type_by_id(btf_vmlinux, args[arg].type);
+ if (!btf_type_is_ptr(t) || !t->type) {
+ /* anything but the pointer to struct is a helper config bug */
+ bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
+ return -EFAULT;
+ }
+ btf_id = t->type;
+ t = btf_type_by_id(btf_vmlinux, t->type);
+ /* skip modifiers */
+ while (btf_type_is_modifier(t)) {
+ btf_id = t->type;
+ t = btf_type_by_id(btf_vmlinux, t->type);
+ }
+ if (!btf_type_is_struct(t)) {
+ bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
+ return -EFAULT;
+ }
+ bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
+ arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
+ return btf_id;
+}
+
+int btf_resolve_helper_id(struct bpf_verifier_log *log,
+ const struct bpf_func_proto *fn, int arg)
+{
+ int *btf_id = &fn->btf_id[arg];
+ int ret;
+
+ if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
+ return -EINVAL;
+
+ ret = READ_ONCE(*btf_id);
+ if (ret)
+ return ret;
+ /* ok to race the search. The result is the same */
+ ret = __btf_resolve_helper_id(log, fn->func, arg);
+ if (!ret) {
+ /* Function argument cannot be type 'void' */
+ bpf_log(log, "BTF resolution bug\n");
+ return -EFAULT;
+ }
+ WRITE_ONCE(*btf_id, ret);
+ return ret;
+}
+
+static int __get_type_size(struct btf *btf, u32 btf_id,
+ const struct btf_type **bad_type)
+{
+ const struct btf_type *t;
+
+ if (!btf_id)
+ /* void */
+ return 0;
+ t = btf_type_by_id(btf, btf_id);
+ while (t && btf_type_is_modifier(t))
+ t = btf_type_by_id(btf, t->type);
+ if (!t)
+ return -EINVAL;
+ if (btf_type_is_ptr(t))
+ /* kernel size of pointer. Not BPF's size of pointer*/
+ return sizeof(void *);
+ if (btf_type_is_int(t) || btf_type_is_enum(t))
+ return t->size;
+ *bad_type = t;
+ return -EINVAL;
+}
+
+int btf_distill_func_proto(struct bpf_verifier_log *log,
+ struct btf *btf,
+ const struct btf_type *func,
+ const char *tname,
+ struct btf_func_model *m)
+{
+ const struct btf_param *args;
+ const struct btf_type *t;
+ u32 i, nargs;
+ int ret;
+
+ if (!func) {
+ /* BTF function prototype doesn't match the verifier types.
+ * Fall back to 5 u64 args.
+ */
+ for (i = 0; i < 5; i++)
+ m->arg_size[i] = 8;
+ m->ret_size = 8;
+ m->nr_args = 5;
+ return 0;
+ }
+ args = (const struct btf_param *)(func + 1);
+ nargs = btf_type_vlen(func);
+ if (nargs >= MAX_BPF_FUNC_ARGS) {
+ bpf_log(log,
+ "The function %s has %d arguments. Too many.\n",
+ tname, nargs);
+ return -EINVAL;
+ }
+ ret = __get_type_size(btf, func->type, &t);
+ if (ret < 0) {
+ bpf_log(log,
+ "The function %s return type %s is unsupported.\n",
+ tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
+ return -EINVAL;
+ }
+ m->ret_size = ret;
+
+ for (i = 0; i < nargs; i++) {
+ ret = __get_type_size(btf, args[i].type, &t);
+ if (ret < 0) {
+ bpf_log(log,
+ "The function %s arg%d type %s is unsupported.\n",
+ tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
+ return -EINVAL;
+ }
+ m->arg_size[i] = ret;
+ }
+ m->nr_args = nargs;
+ return 0;
+}
+
+int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
+{
+ struct bpf_verifier_state *st = env->cur_state;
+ struct bpf_func_state *func = st->frame[st->curframe];
+ struct bpf_reg_state *reg = func->regs;
+ struct bpf_verifier_log *log = &env->log;
+ struct bpf_prog *prog = env->prog;
+ struct btf *btf = prog->aux->btf;
+ const struct btf_param *args;
+ const struct btf_type *t;
+ u32 i, nargs, btf_id;
+ const char *tname;
+
+ if (!prog->aux->func_info)
+ return 0;
+
+ btf_id = prog->aux->func_info[subprog].type_id;
+ if (!btf_id)
+ return 0;
+
+ if (prog->aux->func_info_aux[subprog].unreliable)
+ return 0;
+
+ t = btf_type_by_id(btf, btf_id);
+ if (!t || !btf_type_is_func(t)) {
+ bpf_log(log, "BTF of subprog %d doesn't point to KIND_FUNC\n",
+ subprog);
+ return -EINVAL;
+ }
+ tname = btf_name_by_offset(btf, t->name_off);
+
+ t = btf_type_by_id(btf, t->type);
+ if (!t || !btf_type_is_func_proto(t)) {
+ bpf_log(log, "Invalid type of func %s\n", tname);
+ return -EINVAL;
+ }
+ args = (const struct btf_param *)(t + 1);
+ nargs = btf_type_vlen(t);
+ if (nargs > 5) {
+ bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
+ goto out;
+ }
+ /* check that BTF function arguments match actual types that the
+ * verifier sees.
+ */
+ for (i = 0; i < nargs; i++) {
+ t = btf_type_by_id(btf, args[i].type);
+ while (btf_type_is_modifier(t))
+ t = btf_type_by_id(btf, t->type);
+ if (btf_type_is_int(t) || btf_type_is_enum(t)) {
+ if (reg[i + 1].type == SCALAR_VALUE)
+ continue;
+ bpf_log(log, "R%d is not a scalar\n", i + 1);
+ goto out;
+ }
+ if (btf_type_is_ptr(t)) {
+ if (reg[i + 1].type == SCALAR_VALUE) {
+ bpf_log(log, "R%d is not a pointer\n", i + 1);
+ goto out;
+ }
+ /* If program is passing PTR_TO_CTX into subprogram
+ * check that BTF type matches.
+ */
+ if (reg[i + 1].type == PTR_TO_CTX &&
+ !btf_get_prog_ctx_type(log, btf, t, prog->type))
+ goto out;
+ /* All other pointers are ok */
+ continue;
+ }
+ bpf_log(log, "Unrecognized argument type %s\n",
+ btf_kind_str[BTF_INFO_KIND(t->info)]);
+ goto out;
+ }
+ return 0;
+out:
+ /* LLVM optimizations can remove arguments from static functions. */
+ bpf_log(log,
+ "Type info disagrees with actual arguments due to compiler optimizations\n");
+ prog->aux->func_info_aux[subprog].unreliable = true;
+ return 0;
+}
+
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m)
{
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index a3eaf08e7dd3..9f90d3c92bda 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -180,8 +180,8 @@ static void activate_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
struct bpf_prog_array *old_array)
{
- rcu_swap_protected(cgrp->bpf.effective[type], old_array,
- lockdep_is_held(&cgroup_mutex));
+ old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
+ lockdep_is_held(&cgroup_mutex));
/* free prog array after grace period, since __cgroup_bpf_run_*()
* might be still walking the array
*/
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ef0e1e3e66f4..49e32acad7d8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -30,7 +30,8 @@
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
-
+#include <linux/extable.h>
+#include <linux/log2.h>
#include <asm/unaligned.h>
/* Registers */
@@ -255,6 +256,7 @@ void __bpf_prog_free(struct bpf_prog *fp)
{
if (fp->aux) {
free_percpu(fp->aux->stats);
+ kfree(fp->aux->poke_tab);
kfree(fp->aux);
}
vfree(fp);
@@ -668,9 +670,6 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
{
struct latch_tree_node *n;
- if (!bpf_jit_kallsyms_enabled())
- return NULL;
-
n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
return n ?
container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
@@ -712,6 +711,24 @@ bool is_bpf_text_address(unsigned long addr)
return ret;
}
+const struct exception_table_entry *search_bpf_extables(unsigned long addr)
+{
+ const struct exception_table_entry *e = NULL;
+ struct bpf_prog *prog;
+
+ rcu_read_lock();
+ prog = bpf_prog_kallsyms_find(addr);
+ if (!prog)
+ goto out;
+ if (!prog->aux->num_exentries)
+ goto out;
+
+ e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
+out:
+ rcu_read_unlock();
+ return e;
+}
+
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
@@ -740,6 +757,39 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret;
}
+int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke)
+{
+ struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
+ static const u32 poke_tab_max = 1024;
+ u32 slot = prog->aux->size_poke_tab;
+ u32 size = slot + 1;
+
+ if (size > poke_tab_max)
+ return -ENOSPC;
+ if (poke->ip || poke->ip_stable || poke->adj_off)
+ return -EINVAL;
+
+ switch (poke->reason) {
+ case BPF_POKE_REASON_TAIL_CALL:
+ if (!poke->tail_call.map)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
+ if (!tab)
+ return -ENOMEM;
+
+ memcpy(&tab[slot], poke, sizeof(*poke));
+ prog->aux->size_poke_tab = size;
+ prog->aux->poke_tab = tab;
+
+ return slot;
+}
+
static atomic_long_t bpf_jit_current;
/* Can be overridden by an arch's JIT compiler if it has a custom,
@@ -800,6 +850,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
struct bpf_binary_header *hdr;
u32 size, hole, start, pages;
+ WARN_ON_ONCE(!is_power_of_2(alignment) ||
+ alignment > BPF_IMAGE_ALIGNMENT);
+
/* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions.
@@ -1291,6 +1344,12 @@ bool bpf_opcode_in_insntable(u8 code)
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+{
+ memset(dst, 0, size);
+ return -EFAULT;
+}
+
/**
* __bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
@@ -1310,6 +1369,10 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6
/* Non-UAPI available opcodes. */
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
};
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
@@ -1542,6 +1605,16 @@ out:
LDST(W, u32)
LDST(DW, u64)
#undef LDST
+#define LDX_PROBE(SIZEOP, SIZE) \
+ LDX_PROBE_MEM_##SIZEOP: \
+ bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
+ CONT;
+ LDX_PROBE(B, 1)
+ LDX_PROBE(H, 2)
+ LDX_PROBE(W, 4)
+ LDX_PROBE(DW, 8)
+#undef LDX_PROBE
+
STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
(DST + insn->off));
@@ -1652,18 +1725,17 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
if (fp->kprobe_override)
return false;
- if (!array->owner_prog_type) {
+ if (!array->aux->type) {
/* There's no owner yet where we could check for
* compatibility.
*/
- array->owner_prog_type = fp->type;
- array->owner_jited = fp->jited;
-
+ array->aux->type = fp->type;
+ array->aux->jited = fp->jited;
return true;
}
- return array->owner_prog_type == fp->type &&
- array->owner_jited == fp->jited;
+ return array->aux->type == fp->type &&
+ array->aux->jited == fp->jited;
}
static int bpf_check_tail_call(const struct bpf_prog *fp)
@@ -1964,18 +2036,47 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
: 0;
}
+static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype) {
+ if (!aux->cgroup_storage[stype])
+ continue;
+ bpf_cgroup_storage_release(aux->prog,
+ aux->cgroup_storage[stype]);
+ }
+}
+
+static void bpf_free_used_maps(struct bpf_prog_aux *aux)
+{
+ struct bpf_map *map;
+ int i;
+
+ bpf_free_cgroup_storage(aux);
+ for (i = 0; i < aux->used_map_cnt; i++) {
+ map = aux->used_maps[i];
+ if (map->ops->map_poke_untrack)
+ map->ops->map_poke_untrack(map, aux);
+ bpf_map_put(map);
+ }
+ kfree(aux->used_maps);
+}
+
static void bpf_prog_free_deferred(struct work_struct *work)
{
struct bpf_prog_aux *aux;
int i;
aux = container_of(work, struct bpf_prog_aux, work);
+ bpf_free_used_maps(aux);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
#ifdef CONFIG_PERF_EVENTS
if (aux->prog->has_callchain_buf)
put_callchain_buffers();
#endif
+ bpf_trampoline_put(aux->trampoline);
for (i = 0; i < aux->func_cnt; i++)
bpf_jit_free(aux->func[i]);
if (aux->func_cnt) {
@@ -1991,6 +2092,8 @@ void bpf_prog_free(struct bpf_prog *fp)
{
struct bpf_prog_aux *aux = fp->aux;
+ if (aux->linked_prog)
+ bpf_prog_put(aux->linked_prog);
INIT_WORK(&aux->work, bpf_prog_free_deferred);
schedule_work(&aux->work);
}
@@ -2105,6 +2208,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
return -EFAULT;
}
+int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2)
+{
+ return -ENOTSUPP;
+}
+
DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
EXPORT_SYMBOL(bpf_stats_enabled_key);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 3867864cdc2f..3d3d61b5985b 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -74,7 +74,7 @@ struct bpf_dtab_netdev {
struct bpf_dtab {
struct bpf_map map;
- struct bpf_dtab_netdev **netdev_map;
+ struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
struct list_head __percpu *flush_list;
struct list_head list;
@@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries)
return hash;
}
+static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
+ int idx)
+{
+ return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
+}
+
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
int err, cpu;
@@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
- cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
- cost += sizeof(struct list_head) * num_possible_cpus();
+ cost = (u64) sizeof(struct list_head) * num_possible_cpus();
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
@@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (!dtab->n_buckets) /* Overflow check */
return -EINVAL;
cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
+ } else {
+ cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
}
/* if map size is larger than memlock limit, reject it */
@@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
- dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
- sizeof(struct bpf_dtab_netdev *),
- dtab->map.numa_node);
- if (!dtab->netdev_map)
- goto free_percpu;
-
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
if (!dtab->dev_index_head)
- goto free_map_area;
+ goto free_percpu;
spin_lock_init(&dtab->index_lock);
+ } else {
+ dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
+ sizeof(struct bpf_dtab_netdev *),
+ dtab->map.numa_node);
+ if (!dtab->netdev_map)
+ goto free_percpu;
}
return 0;
-free_map_area:
- bpf_map_area_free(dtab->netdev_map);
free_percpu:
free_percpu(dtab->flush_list);
free_charge:
@@ -228,21 +233,40 @@ static void dev_map_free(struct bpf_map *map)
cond_resched();
}
- for (i = 0; i < dtab->map.max_entries; i++) {
- struct bpf_dtab_netdev *dev;
+ if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ for (i = 0; i < dtab->n_buckets; i++) {
+ struct bpf_dtab_netdev *dev;
+ struct hlist_head *head;
+ struct hlist_node *next;
- dev = dtab->netdev_map[i];
- if (!dev)
- continue;
+ head = dev_map_index_hash(dtab, i);
- free_percpu(dev->bulkq);
- dev_put(dev->dev);
- kfree(dev);
+ hlist_for_each_entry_safe(dev, next, head, index_hlist) {
+ hlist_del_rcu(&dev->index_hlist);
+ free_percpu(dev->bulkq);
+ dev_put(dev->dev);
+ kfree(dev);
+ }
+ }
+
+ kfree(dtab->dev_index_head);
+ } else {
+ for (i = 0; i < dtab->map.max_entries; i++) {
+ struct bpf_dtab_netdev *dev;
+
+ dev = dtab->netdev_map[i];
+ if (!dev)
+ continue;
+
+ free_percpu(dev->bulkq);
+ dev_put(dev->dev);
+ kfree(dev);
+ }
+
+ bpf_map_area_free(dtab->netdev_map);
}
free_percpu(dtab->flush_list);
- bpf_map_area_free(dtab->netdev_map);
- kfree(dtab->dev_index_head);
kfree(dtab);
}
@@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
-static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
- int idx)
-{
- return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
-}
-
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 5e28718928ca..cada974c9f4e 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -317,7 +317,7 @@ BPF_CALL_0(bpf_get_current_cgroup_id)
{
struct cgroup *cgrp = task_dfl_cgroup(current);
- return cgrp->kn->id.id;
+ return cgroup_id(cgrp);
}
const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index a70f7209cda3..ecf42bec38c0 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{
switch (type) {
case BPF_TYPE_PROG:
- raw = bpf_prog_inc(raw);
+ bpf_prog_inc(raw);
break;
case BPF_TYPE_MAP:
- raw = bpf_map_inc(raw, true);
+ bpf_map_inc_with_uref(raw);
break;
default:
WARN_ON_ONCE(1);
@@ -534,7 +534,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type
if (!bpf_prog_get_ok(prog, &type, false))
return ERR_PTR(-EINVAL);
- return bpf_prog_inc(prog);
+ bpf_prog_inc(prog);
+ return prog;
}
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index addd6fdceec8..2ba750725cb2 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -569,7 +569,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
return;
storage->key.attach_type = type;
- storage->key.cgroup_inode_id = cgroup->kn->id.id;
+ storage->key.cgroup_inode_id = cgroup_id(cgroup);
map = storage->map;
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index fab4fb134547..5e9366b33f0f 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -17,9 +17,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
if (IS_ERR(inner_map))
return inner_map;
- /* prog_array->owner_prog_type and owner_jited
- * is a runtime binding. Doing static check alone
- * in the verifier is not enough.
+ /* prog_array->aux->{type,jited} is a runtime binding.
+ * Doing static check alone in the verifier is not enough.
*/
if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
@@ -98,7 +97,7 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
return inner_map;
if (bpf_map_meta_equal(map->inner_map_meta, inner_map))
- inner_map = bpf_map_inc(inner_map, false);
+ bpf_map_inc(inner_map);
else
inner_map = ERR_PTR(-EINVAL);
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index ba635209ae9a..5b9da0954a27 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -678,8 +678,10 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
down_write(&bpf_devs_lock);
if (!offdevs_inited) {
err = rhashtable_init(&offdevs, &offdevs_params);
- if (err)
+ if (err) {
+ up_write(&bpf_devs_lock);
return ERR_PTR(err);
+ }
offdevs_inited = true;
}
up_write(&bpf_devs_lock);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 052580c33d26..caca752ee5e6 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -287,7 +287,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
bool irq_work_busy = false;
struct stack_map_irq_work *work = NULL;
- if (in_nmi()) {
+ if (irqs_disabled()) {
work = this_cpu_ptr(&up_read_work);
if (work->irq_work.flags & IRQ_WORK_BUSY)
/* cannot queue more up_read, fallback */
@@ -295,8 +295,9 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
/*
- * We cannot do up_read() in nmi context. To do build_id lookup
- * in nmi context, we need to run up_read() in irq_work. We use
+ * We cannot do up_read() when the irq is disabled, because of
+ * risk to deadlock with rq_lock. To do build_id lookup when the
+ * irqs are disabled, we need to run up_read() in irq_work. We use
* a percpu variable to do the irq_work. If the irq_work is
* already used by another lookup, we fall back to report ips.
*
@@ -338,7 +339,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
* up_read_non_owner(). The rwsem_release() is called
* here to release the lock from lockdep's perspective.
*/
- rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
+ rwsem_release(&current->mm->mmap_sem.dep_map, _RET_IP_);
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ace1cfaa24b6..e3461ec59570 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -23,13 +23,15 @@
#include <linux/timekeeping.h>
#include <linux/ctype.h>
#include <linux/nospec.h>
+#include <uapi/linux/btf.h>
-#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
- (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
- (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
- (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
+#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
+ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
+ (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
+#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
-#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
+#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
+ IS_FD_HASH(map))
#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
@@ -42,7 +44,7 @@ static DEFINE_SPINLOCK(map_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly;
static const struct bpf_map_ops * const bpf_map_types[] = {
-#define BPF_PROG_TYPE(_id, _ops)
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
#define BPF_MAP_TYPE(_id, _ops) \
[_id] = &_ops,
#include <linux/bpf_types.h>
@@ -126,7 +128,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map;
}
-void *bpf_map_area_alloc(u64 size, int numa_node)
+static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
{
/* We really just want to fail instead of triggering OOM killer
* under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
@@ -144,18 +146,33 @@ void *bpf_map_area_alloc(u64 size, int numa_node)
if (size >= SIZE_MAX)
return NULL;
- if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+ /* kmalloc()'ed memory can't be mmap()'ed */
+ if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
numa_node);
if (area != NULL)
return area;
}
-
+ if (mmapable) {
+ BUG_ON(!PAGE_ALIGNED(size));
+ return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL |
+ __GFP_RETRY_MAYFAIL | flags);
+ }
return __vmalloc_node_flags_caller(size, numa_node,
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
flags, __builtin_return_address(0));
}
+void *bpf_map_area_alloc(u64 size, int numa_node)
+{
+ return __bpf_map_area_alloc(size, numa_node, false);
+}
+
+void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
+{
+ return __bpf_map_area_alloc(size, numa_node, true);
+}
+
void bpf_map_area_free(void *area)
{
kvfree(area);
@@ -313,7 +330,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
static void bpf_map_put_uref(struct bpf_map *map)
{
- if (atomic_dec_and_test(&map->usercnt)) {
+ if (atomic64_dec_and_test(&map->usercnt)) {
if (map->ops->map_release_uref)
map->ops->map_release_uref(map);
}
@@ -324,7 +341,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
*/
static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
{
- if (atomic_dec_and_test(&map->refcnt)) {
+ if (atomic64_dec_and_test(&map->refcnt)) {
/* bpf_map_free_id() must be called first */
bpf_map_free_id(map, do_idr_lock);
btf_put(map->btf);
@@ -373,13 +390,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_map *map = filp->private_data;
const struct bpf_array *array;
- u32 owner_prog_type = 0;
- u32 owner_jited = 0;
+ u32 type = 0, jited = 0;
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
array = container_of(map, struct bpf_array, map);
- owner_prog_type = array->owner_prog_type;
- owner_jited = array->owner_jited;
+ type = array->aux->type;
+ jited = array->aux->jited;
}
seq_printf(m,
@@ -399,12 +415,9 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->memory.pages * 1ULL << PAGE_SHIFT,
map->id,
READ_ONCE(map->frozen));
-
- if (owner_prog_type) {
- seq_printf(m, "owner_prog_type:\t%u\n",
- owner_prog_type);
- seq_printf(m, "owner_jited:\t%u\n",
- owner_jited);
+ if (type) {
+ seq_printf(m, "owner_prog_type:\t%u\n", type);
+ seq_printf(m, "owner_jited:\t%u\n", jited);
}
}
#endif
@@ -427,6 +440,74 @@ static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
return -EINVAL;
}
+/* called for any extra memory-mapped regions (except initial) */
+static void bpf_map_mmap_open(struct vm_area_struct *vma)
+{
+ struct bpf_map *map = vma->vm_file->private_data;
+
+ bpf_map_inc_with_uref(map);
+
+ if (vma->vm_flags & VM_WRITE) {
+ mutex_lock(&map->freeze_mutex);
+ map->writecnt++;
+ mutex_unlock(&map->freeze_mutex);
+ }
+}
+
+/* called for all unmapped memory region (including initial) */
+static void bpf_map_mmap_close(struct vm_area_struct *vma)
+{
+ struct bpf_map *map = vma->vm_file->private_data;
+
+ if (vma->vm_flags & VM_WRITE) {
+ mutex_lock(&map->freeze_mutex);
+ map->writecnt--;
+ mutex_unlock(&map->freeze_mutex);
+ }
+
+ bpf_map_put_with_uref(map);
+}
+
+static const struct vm_operations_struct bpf_map_default_vmops = {
+ .open = bpf_map_mmap_open,
+ .close = bpf_map_mmap_close,
+};
+
+static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct bpf_map *map = filp->private_data;
+ int err;
+
+ if (!map->ops->map_mmap || map_value_has_spin_lock(map))
+ return -ENOTSUPP;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ mutex_lock(&map->freeze_mutex);
+
+ if ((vma->vm_flags & VM_WRITE) && map->frozen) {
+ err = -EPERM;
+ goto out;
+ }
+
+ /* set default open/close callbacks */
+ vma->vm_ops = &bpf_map_default_vmops;
+ vma->vm_private_data = map;
+
+ err = map->ops->map_mmap(map, vma);
+ if (err)
+ goto out;
+
+ bpf_map_inc_with_uref(map);
+
+ if (vma->vm_flags & VM_WRITE)
+ map->writecnt++;
+out:
+ mutex_unlock(&map->freeze_mutex);
+ return err;
+}
+
const struct file_operations bpf_map_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_map_show_fdinfo,
@@ -434,6 +515,7 @@ const struct file_operations bpf_map_fops = {
.release = bpf_map_release,
.read = bpf_dummy_read,
.write = bpf_dummy_write,
+ .mmap = bpf_map_mmap,
};
int bpf_map_new_fd(struct bpf_map *map, int flags)
@@ -577,8 +659,9 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map;
- atomic_set(&map->refcnt, 1);
- atomic_set(&map->usercnt, 1);
+ atomic64_set(&map->refcnt, 1);
+ atomic64_set(&map->usercnt, 1);
+ mutex_init(&map->freeze_mutex);
if (attr->btf_key_type_id || attr->btf_value_type_id) {
struct btf *btf;
@@ -655,21 +738,19 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
-/* prog's and map's refcnt limit */
-#define BPF_MAX_REFCNT 32768
-
-struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
+void bpf_map_inc(struct bpf_map *map)
{
- if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
- atomic_dec(&map->refcnt);
- return ERR_PTR(-EBUSY);
- }
- if (uref)
- atomic_inc(&map->usercnt);
- return map;
+ atomic64_inc(&map->refcnt);
}
EXPORT_SYMBOL_GPL(bpf_map_inc);
+void bpf_map_inc_with_uref(struct bpf_map *map)
+{
+ atomic64_inc(&map->refcnt);
+ atomic64_inc(&map->usercnt);
+}
+EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
+
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
struct fd f = fdget(ufd);
@@ -679,38 +760,30 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map))
return map;
- map = bpf_map_inc(map, true);
+ bpf_map_inc_with_uref(map);
fdput(f);
return map;
}
/* map_idr_lock should have been held */
-static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
- bool uref)
+static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
{
int refold;
- refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
-
- if (refold >= BPF_MAX_REFCNT) {
- __bpf_map_put(map, false);
- return ERR_PTR(-EBUSY);
- }
-
+ refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
if (!refold)
return ERR_PTR(-ENOENT);
-
if (uref)
- atomic_inc(&map->usercnt);
+ atomic64_inc(&map->usercnt);
return map;
}
-struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
+struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
{
spin_lock_bh(&map_idr_lock);
- map = __bpf_map_inc_not_zero(map, uref);
+ map = __bpf_map_inc_not_zero(map, false);
spin_unlock_bh(&map_idr_lock);
return map;
@@ -805,7 +878,7 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_percpu_cgroup_storage_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
err = bpf_stackmap_copy(map, key, value);
- } else if (IS_FD_ARRAY(map)) {
+ } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
err = bpf_fd_array_map_lookup_elem(map, key, value);
} else if (IS_FD_HASH(map)) {
err = bpf_fd_htab_map_lookup_elem(map, key, value);
@@ -932,6 +1005,10 @@ static int map_update_elem(union bpf_attr *attr)
map->map_type == BPF_MAP_TYPE_SOCKMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out;
+ } else if (IS_FD_PROG_ARRAY(map)) {
+ err = bpf_fd_array_map_update_elem(map, f.file, key, value,
+ attr->flags);
+ goto out;
}
/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
@@ -1014,6 +1091,9 @@ static int map_delete_elem(union bpf_attr *attr)
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_delete_elem(map, key);
goto out;
+ } else if (IS_FD_PROG_ARRAY(map)) {
+ err = map->ops->map_delete_elem(map, key);
+ goto out;
}
preempt_disable();
@@ -1175,6 +1255,13 @@ static int map_freeze(const union bpf_attr *attr)
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
+
+ mutex_lock(&map->freeze_mutex);
+
+ if (map->writecnt) {
+ err = -EBUSY;
+ goto err_put;
+ }
if (READ_ONCE(map->frozen)) {
err = -EBUSY;
goto err_put;
@@ -1186,12 +1273,13 @@ static int map_freeze(const union bpf_attr *attr)
WRITE_ONCE(map->frozen, true);
err_put:
+ mutex_unlock(&map->freeze_mutex);
fdput(f);
return err;
}
static const struct bpf_prog_ops * const bpf_prog_types[] = {
-#define BPF_PROG_TYPE(_id, _name) \
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
[_id] = & _name ## _prog_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
@@ -1218,25 +1306,6 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
return 0;
}
-/* drop refcnt on maps used by eBPF program and free auxilary data */
-static void free_used_maps(struct bpf_prog_aux *aux)
-{
- enum bpf_cgroup_storage_type stype;
- int i;
-
- for_each_cgroup_storage_type(stype) {
- if (!aux->cgroup_storage[stype])
- continue;
- bpf_cgroup_storage_release(aux->prog,
- aux->cgroup_storage[stype]);
- }
-
- for (i = 0; i < aux->used_map_cnt; i++)
- bpf_map_put(aux->used_maps[i]);
-
- kfree(aux->used_maps);
-}
-
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -1330,7 +1399,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
kvfree(aux->func_info);
- free_used_maps(aux);
+ kfree(aux->func_info_aux);
bpf_prog_uncharge_memlock(aux->prog);
security_bpf_prog_free(aux);
bpf_prog_free(aux->prog);
@@ -1350,7 +1419,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
- if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ if (atomic64_dec_and_test(&prog->aux->refcnt)) {
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
@@ -1456,13 +1525,9 @@ static struct bpf_prog *____bpf_prog_get(struct fd f)
return f.file->private_data;
}
-struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
+void bpf_prog_add(struct bpf_prog *prog, int i)
{
- if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
- atomic_sub(i, &prog->aux->refcnt);
- return ERR_PTR(-EBUSY);
- }
- return prog;
+ atomic64_add(i, &prog->aux->refcnt);
}
EXPORT_SYMBOL_GPL(bpf_prog_add);
@@ -1473,13 +1538,13 @@ void bpf_prog_sub(struct bpf_prog *prog, int i)
* path holds a reference to the program, thus atomic_sub() can
* be safely used in such cases!
*/
- WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
+ WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);
-struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+void bpf_prog_inc(struct bpf_prog *prog)
{
- return bpf_prog_add(prog, 1);
+ atomic64_inc(&prog->aux->refcnt);
}
EXPORT_SYMBOL_GPL(bpf_prog_inc);
@@ -1488,12 +1553,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
int refold;
- refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
-
- if (refold >= BPF_MAX_REFCNT) {
- __bpf_prog_put(prog, false);
- return ERR_PTR(-EBUSY);
- }
+ refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
if (!refold)
return ERR_PTR(-ENOENT);
@@ -1531,7 +1591,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
goto out;
}
- prog = bpf_prog_inc(prog);
+ bpf_prog_inc(prog);
out:
fdput(f);
return prog;
@@ -1576,10 +1636,22 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
}
static int
-bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
- enum bpf_attach_type expected_attach_type)
+bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
+ enum bpf_attach_type expected_attach_type,
+ u32 btf_id, u32 prog_fd)
{
switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ if (btf_id > BTF_MAX_TYPE)
+ return -EINVAL;
+ break;
+ default:
+ if (btf_id || prog_fd)
+ return -EINVAL;
+ break;
+ }
+
+ switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK:
switch (expected_attach_type) {
case BPF_CGROUP_INET_SOCK_CREATE:
@@ -1625,7 +1697,7 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt
+#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
{
@@ -1667,7 +1739,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return -EPERM;
bpf_prog_load_fixup_attach_type(attr);
- if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type))
+ if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
+ attr->attach_btf_id,
+ attr->attach_prog_fd))
return -EINVAL;
/* plain bpf_prog allocation */
@@ -1676,6 +1750,17 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return -ENOMEM;
prog->expected_attach_type = attr->expected_attach_type;
+ prog->aux->attach_btf_id = attr->attach_btf_id;
+ if (attr->attach_prog_fd) {
+ struct bpf_prog *tgt_prog;
+
+ tgt_prog = bpf_prog_get(attr->attach_prog_fd);
+ if (IS_ERR(tgt_prog)) {
+ err = PTR_ERR(tgt_prog);
+ goto free_prog_nouncharge;
+ }
+ prog->aux->linked_prog = tgt_prog;
+ }
prog->aux->offload_requested = !!attr->prog_ifindex;
@@ -1697,7 +1782,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
prog->orig_prog = NULL;
prog->jited = 0;
- atomic_set(&prog->aux->refcnt, 1);
+ atomic64_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
if (bpf_prog_is_dev_bound(prog->aux)) {
@@ -1787,6 +1872,49 @@ static int bpf_obj_get(const union bpf_attr *attr)
attr->file_flags);
}
+static int bpf_tracing_prog_release(struct inode *inode, struct file *filp)
+{
+ struct bpf_prog *prog = filp->private_data;
+
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog));
+ bpf_prog_put(prog);
+ return 0;
+}
+
+static const struct file_operations bpf_tracing_prog_fops = {
+ .release = bpf_tracing_prog_release,
+ .read = bpf_dummy_read,
+ .write = bpf_dummy_write,
+};
+
+static int bpf_tracing_prog_attach(struct bpf_prog *prog)
+{
+ int tr_fd, err;
+
+ if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
+ prog->expected_attach_type != BPF_TRACE_FEXIT) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+
+ err = bpf_trampoline_link_prog(prog);
+ if (err)
+ goto out_put_prog;
+
+ tr_fd = anon_inode_getfd("bpf-tracing-prog", &bpf_tracing_prog_fops,
+ prog, O_CLOEXEC);
+ if (tr_fd < 0) {
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog));
+ err = tr_fd;
+ goto out_put_prog;
+ }
+ return tr_fd;
+
+out_put_prog:
+ bpf_prog_put(prog);
+ return err;
+}
+
struct bpf_raw_tracepoint {
struct bpf_raw_event_map *btp;
struct bpf_prog *prog;
@@ -1818,17 +1946,52 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
struct bpf_raw_tracepoint *raw_tp;
struct bpf_raw_event_map *btp;
struct bpf_prog *prog;
- char tp_name[128];
+ const char *tp_name;
+ char buf[128];
int tp_fd, err;
- if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name),
- sizeof(tp_name) - 1) < 0)
- return -EFAULT;
- tp_name[sizeof(tp_name) - 1] = 0;
+ if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
+ return -EINVAL;
+
+ prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
+ prog->type != BPF_PROG_TYPE_TRACING &&
+ prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+
+ if (prog->type == BPF_PROG_TYPE_TRACING) {
+ if (attr->raw_tracepoint.name) {
+ /* The attach point for this category of programs
+ * should be specified via btf_id during program load.
+ */
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+ if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ tp_name = prog->aux->attach_func_name;
+ else
+ return bpf_tracing_prog_attach(prog);
+ } else {
+ if (strncpy_from_user(buf,
+ u64_to_user_ptr(attr->raw_tracepoint.name),
+ sizeof(buf) - 1) < 0) {
+ err = -EFAULT;
+ goto out_put_prog;
+ }
+ buf[sizeof(buf) - 1] = 0;
+ tp_name = buf;
+ }
btp = bpf_get_raw_tracepoint(tp_name);
- if (!btp)
- return -ENOENT;
+ if (!btp) {
+ err = -ENOENT;
+ goto out_put_prog;
+ }
raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
if (!raw_tp) {
@@ -1836,38 +1999,27 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
goto out_put_btp;
}
raw_tp->btp = btp;
-
- prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto out_free_tp;
- }
- if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
- prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
- err = -EINVAL;
- goto out_put_prog;
- }
+ raw_tp->prog = prog;
err = bpf_probe_register(raw_tp->btp, prog);
if (err)
- goto out_put_prog;
+ goto out_free_tp;
- raw_tp->prog = prog;
tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
O_CLOEXEC);
if (tp_fd < 0) {
bpf_probe_unregister(raw_tp->btp, prog);
err = tp_fd;
- goto out_put_prog;
+ goto out_free_tp;
}
return tp_fd;
-out_put_prog:
- bpf_prog_put(prog);
out_free_tp:
kfree(raw_tp);
out_put_btp:
bpf_put_raw_tracepoint(btp);
+out_put_prog:
+ bpf_prog_put(prog);
return err;
}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
new file mode 100644
index 000000000000..7e89f1f49d77
--- /dev/null
+++ b/kernel/bpf/trampoline.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019 Facebook */
+#include <linux/hash.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+
+/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
+#define TRAMPOLINE_HASH_BITS 10
+#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
+
+static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
+
+/* serializes access to trampoline_table */
+static DEFINE_MUTEX(trampoline_mutex);
+
+struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+{
+ struct bpf_trampoline *tr;
+ struct hlist_head *head;
+ void *image;
+ int i;
+
+ mutex_lock(&trampoline_mutex);
+ head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
+ hlist_for_each_entry(tr, head, hlist) {
+ if (tr->key == key) {
+ refcount_inc(&tr->refcnt);
+ goto out;
+ }
+ }
+ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
+ if (!tr)
+ goto out;
+
+ /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
+ image = bpf_jit_alloc_exec(PAGE_SIZE);
+ if (!image) {
+ kfree(tr);
+ tr = NULL;
+ goto out;
+ }
+
+ tr->key = key;
+ INIT_HLIST_NODE(&tr->hlist);
+ hlist_add_head(&tr->hlist, head);
+ refcount_set(&tr->refcnt, 1);
+ mutex_init(&tr->mutex);
+ for (i = 0; i < BPF_TRAMP_MAX; i++)
+ INIT_HLIST_HEAD(&tr->progs_hlist[i]);
+
+ set_vm_flush_reset_perms(image);
+ /* Keep image as writeable. The alternative is to keep flipping ro/rw
+ * everytime new program is attached or detached.
+ */
+ set_memory_x((long)image, 1);
+ tr->image = image;
+out:
+ mutex_unlock(&trampoline_mutex);
+ return tr;
+}
+
+/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86. Pick a number to fit into PAGE_SIZE / 2
+ */
+#define BPF_MAX_TRAMP_PROGS 40
+
+static int bpf_trampoline_update(struct bpf_trampoline *tr)
+{
+ void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
+ void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
+ struct bpf_prog *progs_to_run[BPF_MAX_TRAMP_PROGS];
+ int fentry_cnt = tr->progs_cnt[BPF_TRAMP_FENTRY];
+ int fexit_cnt = tr->progs_cnt[BPF_TRAMP_FEXIT];
+ struct bpf_prog **progs, **fentry, **fexit;
+ u32 flags = BPF_TRAMP_F_RESTORE_REGS;
+ struct bpf_prog_aux *aux;
+ int err;
+
+ if (fentry_cnt + fexit_cnt == 0) {
+ err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
+ old_image, NULL);
+ tr->selector = 0;
+ goto out;
+ }
+
+ /* populate fentry progs */
+ fentry = progs = progs_to_run;
+ hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FENTRY], tramp_hlist)
+ *progs++ = aux->prog;
+
+ /* populate fexit progs */
+ fexit = progs;
+ hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FEXIT], tramp_hlist)
+ *progs++ = aux->prog;
+
+ if (fexit_cnt)
+ flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
+
+ err = arch_prepare_bpf_trampoline(new_image, &tr->func.model, flags,
+ fentry, fentry_cnt,
+ fexit, fexit_cnt,
+ tr->func.addr);
+ if (err)
+ goto out;
+
+ if (tr->selector)
+ /* progs already running at this address */
+ err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
+ old_image, new_image);
+ else
+ /* first time registering */
+ err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
+ new_image);
+ if (err)
+ goto out;
+ tr->selector++;
+out:
+ return err;
+}
+
+static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t)
+{
+ switch (t) {
+ case BPF_TRACE_FENTRY:
+ return BPF_TRAMP_FENTRY;
+ default:
+ return BPF_TRAMP_FEXIT;
+ }
+}
+
+int bpf_trampoline_link_prog(struct bpf_prog *prog)
+{
+ enum bpf_tramp_prog_type kind;
+ struct bpf_trampoline *tr;
+ int err = 0;
+
+ tr = prog->aux->trampoline;
+ kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
+ mutex_lock(&tr->mutex);
+ if (tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]
+ >= BPF_MAX_TRAMP_PROGS) {
+ err = -E2BIG;
+ goto out;
+ }
+ if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
+ /* prog already linked */
+ err = -EBUSY;
+ goto out;
+ }
+ hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
+ tr->progs_cnt[kind]++;
+ err = bpf_trampoline_update(prog->aux->trampoline);
+ if (err) {
+ hlist_del(&prog->aux->tramp_hlist);
+ tr->progs_cnt[kind]--;
+ }
+out:
+ mutex_unlock(&tr->mutex);
+ return err;
+}
+
+/* bpf_trampoline_unlink_prog() should never fail. */
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+{
+ enum bpf_tramp_prog_type kind;
+ struct bpf_trampoline *tr;
+ int err;
+
+ tr = prog->aux->trampoline;
+ kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
+ mutex_lock(&tr->mutex);
+ hlist_del(&prog->aux->tramp_hlist);
+ tr->progs_cnt[kind]--;
+ err = bpf_trampoline_update(prog->aux->trampoline);
+ mutex_unlock(&tr->mutex);
+ return err;
+}
+
+void bpf_trampoline_put(struct bpf_trampoline *tr)
+{
+ if (!tr)
+ return;
+ mutex_lock(&trampoline_mutex);
+ if (!refcount_dec_and_test(&tr->refcnt))
+ goto out;
+ WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
+ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
+ goto out;
+ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
+ goto out;
+ bpf_jit_free_exec(tr->image);
+ hlist_del(&tr->hlist);
+ kfree(tr);
+out:
+ mutex_unlock(&trampoline_mutex);
+}
+
+/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that
+ * are needed for trampoline. The macro is split into
+ * call _bpf_prog_enter
+ * call prog->bpf_func
+ * call __bpf_prog_exit
+ */
+u64 notrace __bpf_prog_enter(void)
+{
+ u64 start = 0;
+
+ rcu_read_lock();
+ preempt_disable();
+ if (static_branch_unlikely(&bpf_stats_enabled_key))
+ start = sched_clock();
+ return start;
+}
+
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
+{
+ struct bpf_prog_stats *stats;
+
+ if (static_branch_unlikely(&bpf_stats_enabled_key) &&
+ /* static_key could be enabled in __bpf_prog_enter
+ * and disabled in __bpf_prog_exit.
+ * And vice versa.
+ * Hence check that 'start' is not zero.
+ */
+ start) {
+ stats = this_cpu_ptr(prog->aux->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->cnt++;
+ stats->nsecs += sched_clock() - start;
+ u64_stats_update_end(&stats->syncp);
+ }
+ preempt_enable();
+ rcu_read_unlock();
+}
+
+int __weak
+arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
+ struct bpf_prog **fentry_progs, int fentry_cnt,
+ struct bpf_prog **fexit_progs, int fexit_cnt,
+ void *orig_call)
+{
+ return -ENOTSUPP;
+}
+
+static int __init init_trampolines(void)
+{
+ int i;
+
+ for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&trampoline_table[i]);
+ return 0;
+}
+late_initcall(init_trampolines);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ffc3e53f5300..a0482e1c4a77 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -23,7 +23,7 @@
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
-#define BPF_PROG_TYPE(_id, _name) \
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
@@ -171,6 +171,9 @@ struct bpf_verifier_stack_elem {
#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
#define BPF_COMPLEXITY_LIMIT_STATES 64
+#define BPF_MAP_KEY_POISON (1ULL << 63)
+#define BPF_MAP_KEY_SEEN (1ULL << 62)
+
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
POISON_POINTER_DELTA))
@@ -178,12 +181,12 @@ struct bpf_verifier_stack_elem {
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
- return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
+ return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
}
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{
- return aux->map_state & BPF_MAP_PTR_UNPRIV;
+ return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
}
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
@@ -191,8 +194,31 @@ static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
{
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
unpriv |= bpf_map_ptr_unpriv(aux);
- aux->map_state = (unsigned long)map |
- (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
+ aux->map_ptr_state = (unsigned long)map |
+ (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
+}
+
+static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
+{
+ return aux->map_key_state & BPF_MAP_KEY_POISON;
+}
+
+static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
+{
+ return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
+}
+
+static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
+{
+ return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
+}
+
+static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
+{
+ bool poisoned = bpf_map_key_poisoned(aux);
+
+ aux->map_key_state = state | BPF_MAP_KEY_SEEN |
+ (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
struct bpf_call_arg_meta {
@@ -205,8 +231,11 @@ struct bpf_call_arg_meta {
u64 msize_umax_value;
int ref_obj_id;
int func_id;
+ u32 btf_id;
};
+struct btf *btf_vmlinux;
+
static DEFINE_MUTEX(bpf_verifier_lock);
static const struct bpf_line_info *
@@ -243,6 +272,10 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
+ if (log->level == BPF_LOG_KERNEL) {
+ pr_err("BPF:%s\n", log->kbuf);
+ return;
+ }
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
@@ -280,6 +313,19 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
va_end(args);
}
+__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ if (!bpf_verifier_log_needed(log))
+ return;
+
+ va_start(args, fmt);
+ bpf_verifier_vlog(log, fmt, args);
+ va_end(args);
+}
+
static const char *ltrim(const char *s)
{
while (isspace(*s))
@@ -400,6 +446,7 @@ static const char * const reg_type_str[] = {
[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
[PTR_TO_TP_BUFFER] = "tp_buffer",
[PTR_TO_XDP_SOCK] = "xdp_sock",
+ [PTR_TO_BTF_ID] = "ptr_",
};
static char slot_type_char[] = {
@@ -430,6 +477,12 @@ static struct bpf_func_state *func(struct bpf_verifier_env *env,
return cur->frame[reg->frameno];
}
+const char *kernel_type_name(u32 id)
+{
+ return btf_name_by_offset(btf_vmlinux,
+ btf_type_by_id(btf_vmlinux, id)->name_off);
+}
+
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state)
{
@@ -454,6 +507,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
+ if (t == PTR_TO_BTF_ID)
+ verbose(env, "%s", kernel_type_name(reg->btf_id));
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
@@ -978,6 +1033,17 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
reg->umax_value));
}
+static void __reg_bound_offset32(struct bpf_reg_state *reg)
+{
+ u64 mask = 0xffffFFFF;
+ struct tnum range = tnum_range(reg->umin_value & mask,
+ reg->umax_value & mask);
+ struct tnum lo32 = tnum_cast(reg->var_off, 4);
+ struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
+
+ reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
+}
+
/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
@@ -2331,10 +2397,12 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
- enum bpf_access_type t, enum bpf_reg_type *reg_type)
+ enum bpf_access_type t, enum bpf_reg_type *reg_type,
+ u32 *btf_id)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
+ .log = &env->log,
};
if (env->ops->is_valid_access &&
@@ -2348,7 +2416,10 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*/
*reg_type = info.reg_type;
- env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
+ if (*reg_type == PTR_TO_BTF_ID)
+ *btf_id = info.btf_id;
+ else
+ env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
@@ -2739,6 +2810,88 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
reg->smax_value = reg->umax_value;
}
+static bool bpf_map_is_rdonly(const struct bpf_map *map)
+{
+ return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
+}
+
+static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
+{
+ void *ptr;
+ u64 addr;
+ int err;
+
+ err = map->ops->map_direct_value_addr(map, &addr, off);
+ if (err)
+ return err;
+ ptr = (void *)(long)addr + off;
+
+ switch (size) {
+ case sizeof(u8):
+ *val = (u64)*(u8 *)ptr;
+ break;
+ case sizeof(u16):
+ *val = (u64)*(u16 *)ptr;
+ break;
+ case sizeof(u32):
+ *val = (u64)*(u32 *)ptr;
+ break;
+ case sizeof(u64):
+ *val = *(u64 *)ptr;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs,
+ int regno, int off, int size,
+ enum bpf_access_type atype,
+ int value_regno)
+{
+ struct bpf_reg_state *reg = regs + regno;
+ const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
+ const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ u32 btf_id;
+ int ret;
+
+ if (atype != BPF_READ) {
+ verbose(env, "only read is supported\n");
+ return -EACCES;
+ }
+
+ if (off < 0) {
+ verbose(env,
+ "R%d is ptr_%s invalid negative access: off=%d\n",
+ regno, tname, off);
+ return -EACCES;
+ }
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose(env,
+ "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
+ regno, tname, off, tn_buf);
+ return -EACCES;
+ }
+
+ ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
+ if (ret < 0)
+ return ret;
+
+ if (ret == SCALAR_VALUE) {
+ mark_reg_unknown(env, regs, value_regno);
+ return 0;
+ }
+ mark_reg_known_zero(env, regs, value_regno);
+ regs[value_regno].type = PTR_TO_BTF_ID;
+ regs[value_regno].btf_id = btf_id;
+ return 0;
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -2776,11 +2929,30 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err)
return err;
err = check_map_access(env, regno, off, size, false);
- if (!err && t == BPF_READ && value_regno >= 0)
- mark_reg_unknown(env, regs, value_regno);
+ if (!err && t == BPF_READ && value_regno >= 0) {
+ struct bpf_map *map = reg->map_ptr;
+
+ /* if map is read-only, track its contents as scalars */
+ if (tnum_is_const(reg->var_off) &&
+ bpf_map_is_rdonly(map) &&
+ map->ops->map_direct_value_addr) {
+ int map_off = off + reg->var_off.value;
+ u64 val = 0;
+
+ err = bpf_map_direct_read(map, map_off, size,
+ &val);
+ if (err)
+ return err;
+ regs[value_regno].type = SCALAR_VALUE;
+ __mark_reg_known(&regs[value_regno], val);
+ } else {
+ mark_reg_unknown(env, regs, value_regno);
+ }
+ }
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;
+ u32 btf_id = 0;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
@@ -2792,7 +2964,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err < 0)
return err;
- err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
+ err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
+ if (err)
+ verbose_linfo(env, insn_idx, "; ");
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
* PTR_TO_PACKET[_META,_END]. In the latter
@@ -2811,6 +2985,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* a sub-register.
*/
regs[value_regno].subreg_def = DEF_NOT_SUBREG;
+ if (reg_type == PTR_TO_BTF_ID)
+ regs[value_regno].btf_id = btf_id;
}
regs[value_regno].type = reg_type;
}
@@ -2870,6 +3046,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
err = check_tp_buffer_access(env, reg, regno, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
+ } else if (reg->type == PTR_TO_BTF_ID) {
+ err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
+ value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]);
@@ -3298,6 +3477,22 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_SOCKET;
if (type != expected_type)
goto err_type;
+ } else if (arg_type == ARG_PTR_TO_BTF_ID) {
+ expected_type = PTR_TO_BTF_ID;
+ if (type != expected_type)
+ goto err_type;
+ if (reg->btf_id != meta->btf_id) {
+ verbose(env, "Helper has type %s got %s in R%d\n",
+ kernel_type_name(meta->btf_id),
+ kernel_type_name(reg->btf_id), regno);
+
+ return -EACCES;
+ }
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
+ verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
+ regno);
+ return -EACCES;
+ }
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
if (meta->func_id == BPF_FUNC_spin_lock) {
if (process_spin_lock(env, regno, true))
@@ -3445,6 +3640,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output &&
+ func_id != BPF_FUNC_skb_output &&
func_id != BPF_FUNC_perf_event_read_value)
goto error;
break;
@@ -3532,6 +3728,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
case BPF_FUNC_perf_event_read_value:
+ case BPF_FUNC_skb_output:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
@@ -3810,6 +4007,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* only increment it after check_reg_arg() finished */
state->curframe++;
+ if (btf_check_func_arg_match(env, subprog))
+ return -EINVAL;
+
/* and go analyze first insn of the callee */
*insn_idx = target_insn;
@@ -3916,15 +4116,49 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
return -EACCES;
}
- if (!BPF_MAP_PTR(aux->map_state))
+ if (!BPF_MAP_PTR(aux->map_ptr_state))
bpf_map_ptr_store(aux, meta->map_ptr,
meta->map_ptr->unpriv_array);
- else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
+ else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
meta->map_ptr->unpriv_array);
return 0;
}
+static int
+record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
+ int func_id, int insn_idx)
+{
+ struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
+ struct bpf_reg_state *regs = cur_regs(env), *reg;
+ struct bpf_map *map = meta->map_ptr;
+ struct tnum range;
+ u64 val;
+
+ if (func_id != BPF_FUNC_tail_call)
+ return 0;
+ if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
+ verbose(env, "kernel subsystem misconfigured verifier\n");
+ return -EINVAL;
+ }
+
+ range = tnum_range(0, map->max_entries - 1);
+ reg = &regs[BPF_REG_3];
+
+ if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
+ bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
+ return 0;
+ }
+
+ val = reg->var_off.value;
+ if (bpf_map_key_unseen(aux))
+ bpf_map_key_store(aux, val);
+ else if (!bpf_map_key_poisoned(aux) &&
+ bpf_map_key_immediate(aux) != val)
+ bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
+ return 0;
+}
+
static int check_reference_leak(struct bpf_verifier_env *env)
{
struct bpf_func_state *state = cur_func(env);
@@ -3986,23 +4220,20 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
meta.func_id = func_id;
/* check args */
- err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
- if (err)
- return err;
- err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
- if (err)
- return err;
- err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
- if (err)
- return err;
- err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
- if (err)
- return err;
- err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
+ for (i = 0; i < 5; i++) {
+ err = btf_resolve_helper_id(&env->log, fn, i);
+ if (err > 0)
+ meta.btf_id = err;
+ err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
+ if (err)
+ return err;
+ }
+
+ err = record_func_map(env, &meta, func_id, insn_idx);
if (err)
return err;
- err = record_func_map(env, &meta, func_id, insn_idx);
+ err = record_func_key(env, &meta, func_id, insn_idx);
if (err)
return err;
@@ -5433,6 +5664,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
+ if (is_jmp32) {
+ __reg_bound_offset32(false_reg);
+ __reg_bound_offset32(true_reg);
+ }
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
@@ -5542,6 +5777,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
+ if (is_jmp32) {
+ __reg_bound_offset32(false_reg);
+ __reg_bound_offset32(true_reg);
+ }
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
@@ -6124,6 +6363,11 @@ static int check_return_code(struct bpf_verifier_env *env)
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
break;
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ if (!env->prog->aux->attach_btf_id)
+ return 0;
+ range = tnum_const(0);
+ break;
default:
return 0;
}
@@ -6406,6 +6650,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
u32 i, nfuncs, urec_size, min_size;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
+ struct bpf_func_info_aux *info_aux = NULL;
const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
@@ -6439,6 +6684,9 @@ static int check_btf_func(struct bpf_verifier_env *env,
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
if (!krecord)
return -ENOMEM;
+ info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
+ if (!info_aux)
+ goto err_free;
for (i = 0; i < nfuncs; i++) {
ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
@@ -6490,29 +6738,31 @@ static int check_btf_func(struct bpf_verifier_env *env,
ret = -EINVAL;
goto err_free;
}
-
prev_offset = krecord[i].insn_off;
urecord += urec_size;
}
prog->aux->func_info = krecord;
prog->aux->func_info_cnt = nfuncs;
+ prog->aux->func_info_aux = info_aux;
return 0;
err_free:
kvfree(krecord);
+ kfree(info_aux);
return ret;
}
static void adjust_btf_func(struct bpf_verifier_env *env)
{
+ struct bpf_prog_aux *aux = env->prog->aux;
int i;
- if (!env->prog->aux->func_info)
+ if (!aux->func_info)
return;
for (i = 0; i < env->subprog_cnt; i++)
- env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
+ aux->func_info[i].insn_off = env->subprog_info[i].start;
}
#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
@@ -7440,6 +7690,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
case PTR_TO_TCP_SOCK:
case PTR_TO_TCP_SOCK_OR_NULL:
case PTR_TO_XDP_SOCK:
+ case PTR_TO_BTF_ID:
return false;
default:
return true;
@@ -7492,6 +7743,9 @@ static int do_check(struct bpf_verifier_env *env)
0 /* frameno */,
0 /* subprogno, zero == main subprog */);
+ if (btf_check_func_arg_match(env, 0))
+ return -EINVAL;
+
for (;;) {
struct bpf_insn *insn;
u8 class;
@@ -8008,11 +8262,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
* will be used by the valid program until it's unloaded
* and all maps are released in free_used_maps()
*/
- map = bpf_map_inc(map, false);
- if (IS_ERR(map)) {
- fdput(f);
- return PTR_ERR(map);
- }
+ bpf_map_inc(map);
aux->map_index = env->used_map_cnt;
env->used_maps[env->used_map_cnt++] = map;
@@ -8581,6 +8831,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
case PTR_TO_XDP_SOCK:
convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
break;
+ case PTR_TO_BTF_ID:
+ if (type == BPF_WRITE) {
+ verbose(env, "Writes through BTF pointers are not allowed\n");
+ return -EINVAL;
+ }
+ insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
+ env->prog->aux->num_exentries++;
+ continue;
default:
continue;
}
@@ -8871,6 +9129,7 @@ static int fixup_call_args(struct bpf_verifier_env *env)
static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
+ bool expect_blinding = bpf_jit_blinding_enabled(prog);
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
@@ -8879,7 +9138,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
- int i, cnt, delta = 0;
+ int i, ret, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
@@ -9023,6 +9282,26 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
+ if (prog->jit_requested && !expect_blinding &&
+ !bpf_map_key_poisoned(aux) &&
+ !bpf_map_ptr_poisoned(aux) &&
+ !bpf_map_ptr_unpriv(aux)) {
+ struct bpf_jit_poke_descriptor desc = {
+ .reason = BPF_POKE_REASON_TAIL_CALL,
+ .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
+ .tail_call.key = bpf_map_key_immediate(aux),
+ };
+
+ ret = bpf_jit_add_poke_descriptor(prog, &desc);
+ if (ret < 0) {
+ verbose(env, "adding tail call poke descriptor failed\n");
+ return ret;
+ }
+
+ insn->imm = ret + 1;
+ continue;
+ }
+
if (!bpf_map_ptr_unpriv(aux))
continue;
@@ -9037,7 +9316,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
return -EINVAL;
}
- map_ptr = BPF_MAP_PTR(aux->map_state);
+ map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
@@ -9071,7 +9350,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
- map_ptr = BPF_MAP_PTR(aux->map_state);
+ map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
@@ -9151,6 +9430,23 @@ patch_call_imm:
insn->imm = fn->func - __bpf_call_base;
}
+ /* Since poke tab is now finalized, publish aux to tracker. */
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ map_ptr = prog->aux->poke_tab[i].tail_call.map;
+ if (!map_ptr->ops->map_poke_track ||
+ !map_ptr->ops->map_poke_untrack ||
+ !map_ptr->ops->map_poke_run) {
+ verbose(env, "bpf verifier is misconfigured\n");
+ return -EINVAL;
+ }
+
+ ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
+ if (ret < 0) {
+ verbose(env, "tracking tail call prog failed\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -9208,6 +9504,161 @@ static void print_verification_stats(struct bpf_verifier_env *env)
env->peak_states, env->longest_mark_read_walk);
}
+static int check_attach_btf_id(struct bpf_verifier_env *env)
+{
+ struct bpf_prog *prog = env->prog;
+ struct bpf_prog *tgt_prog = prog->aux->linked_prog;
+ u32 btf_id = prog->aux->attach_btf_id;
+ const char prefix[] = "btf_trace_";
+ int ret = 0, subprog = -1, i;
+ struct bpf_trampoline *tr;
+ const struct btf_type *t;
+ bool conservative = true;
+ const char *tname;
+ struct btf *btf;
+ long addr;
+ u64 key;
+
+ if (prog->type != BPF_PROG_TYPE_TRACING)
+ return 0;
+
+ if (!btf_id) {
+ verbose(env, "Tracing programs must provide btf_id\n");
+ return -EINVAL;
+ }
+ btf = bpf_prog_get_target_btf(prog);
+ if (!btf) {
+ verbose(env,
+ "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
+ return -EINVAL;
+ }
+ t = btf_type_by_id(btf, btf_id);
+ if (!t) {
+ verbose(env, "attach_btf_id %u is invalid\n", btf_id);
+ return -EINVAL;
+ }
+ tname = btf_name_by_offset(btf, t->name_off);
+ if (!tname) {
+ verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
+ return -EINVAL;
+ }
+ if (tgt_prog) {
+ struct bpf_prog_aux *aux = tgt_prog->aux;
+
+ for (i = 0; i < aux->func_info_cnt; i++)
+ if (aux->func_info[i].type_id == btf_id) {
+ subprog = i;
+ break;
+ }
+ if (subprog == -1) {
+ verbose(env, "Subprog %s doesn't exist\n", tname);
+ return -EINVAL;
+ }
+ conservative = aux->func_info_aux[subprog].unreliable;
+ key = ((u64)aux->id) << 32 | btf_id;
+ } else {
+ key = btf_id;
+ }
+
+ switch (prog->expected_attach_type) {
+ case BPF_TRACE_RAW_TP:
+ if (tgt_prog) {
+ verbose(env,
+ "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
+ return -EINVAL;
+ }
+ if (!btf_type_is_typedef(t)) {
+ verbose(env, "attach_btf_id %u is not a typedef\n",
+ btf_id);
+ return -EINVAL;
+ }
+ if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
+ verbose(env, "attach_btf_id %u points to wrong type name %s\n",
+ btf_id, tname);
+ return -EINVAL;
+ }
+ tname += sizeof(prefix) - 1;
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_ptr(t))
+ /* should never happen in valid vmlinux build */
+ return -EINVAL;
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_func_proto(t))
+ /* should never happen in valid vmlinux build */
+ return -EINVAL;
+
+ /* remember two read only pointers that are valid for
+ * the life time of the kernel
+ */
+ prog->aux->attach_func_name = tname;
+ prog->aux->attach_func_proto = t;
+ prog->aux->attach_btf_trace = true;
+ return 0;
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ if (!btf_type_is_func(t)) {
+ verbose(env, "attach_btf_id %u is not a function\n",
+ btf_id);
+ return -EINVAL;
+ }
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_func_proto(t))
+ return -EINVAL;
+ tr = bpf_trampoline_lookup(key);
+ if (!tr)
+ return -ENOMEM;
+ prog->aux->attach_func_name = tname;
+ /* t is either vmlinux type or another program's type */
+ prog->aux->attach_func_proto = t;
+ mutex_lock(&tr->mutex);
+ if (tr->func.addr) {
+ prog->aux->trampoline = tr;
+ goto out;
+ }
+ if (tgt_prog && conservative) {
+ prog->aux->attach_func_proto = NULL;
+ t = NULL;
+ }
+ ret = btf_distill_func_proto(&env->log, btf, t,
+ tname, &tr->func.model);
+ if (ret < 0)
+ goto out;
+ if (tgt_prog) {
+ if (!tgt_prog->jited) {
+ /* for now */
+ verbose(env, "Can trace only JITed BPF progs\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (tgt_prog->type == BPF_PROG_TYPE_TRACING) {
+ /* prevent cycles */
+ verbose(env, "Cannot recursively attach\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
+ } else {
+ addr = kallsyms_lookup_name(tname);
+ if (!addr) {
+ verbose(env,
+ "The address of function %s cannot be found\n",
+ tname);
+ ret = -ENOENT;
+ goto out;
+ }
+ }
+ tr->func.addr = (void *)addr;
+ prog->aux->trampoline = tr;
+out:
+ mutex_unlock(&tr->mutex);
+ if (ret)
+ bpf_trampoline_put(tr);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
union bpf_attr __user *uattr)
{
@@ -9241,6 +9692,13 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->ops = bpf_verifier_ops[env->prog->type];
is_priv = capable(CAP_SYS_ADMIN);
+ if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
+ mutex_lock(&bpf_verifier_lock);
+ if (!btf_vmlinux)
+ btf_vmlinux = btf_parse_vmlinux();
+ mutex_unlock(&bpf_verifier_lock);
+ }
+
/* grab the mutex to protect few globals used by verifier */
if (!is_priv)
mutex_lock(&bpf_verifier_lock);
@@ -9260,6 +9718,17 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
goto err_unlock;
}
+ if (IS_ERR(btf_vmlinux)) {
+ /* Either gcc or pahole or kernel are broken. */
+ verbose(env, "in-kernel BTF is malformed\n");
+ ret = PTR_ERR(btf_vmlinux);
+ goto skip_full_check;
+ }
+
+ ret = check_attach_btf_id(env);
+ if (ret)
+ goto skip_full_check;
+
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 82a1ffe15dfa..90c4fce1c981 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -9,19 +9,10 @@
#include <linux/slab.h>
#include <linux/sched.h>
-struct xsk_map {
- struct bpf_map map;
- struct xdp_sock **xsk_map;
- struct list_head __percpu *flush_list;
- spinlock_t lock; /* Synchronize map updates */
-};
-
int xsk_map_inc(struct xsk_map *map)
{
- struct bpf_map *m = &map->map;
-
- m = bpf_map_inc(m, false);
- return PTR_ERR_OR_ZERO(m);
+ bpf_map_inc(&map->map);
+ return 0;
}
void xsk_map_put(struct xsk_map *map)
@@ -80,9 +71,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{
+ struct bpf_map_memory mem;
+ int cpu, err, numa_node;
struct xsk_map *m;
- int cpu, err;
- u64 cost;
+ u64 cost, size;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -92,44 +84,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
return ERR_PTR(-EINVAL);
- m = kzalloc(sizeof(*m), GFP_USER);
- if (!m)
+ numa_node = bpf_map_attr_numa_node(attr);
+ size = struct_size(m, xsk_map, attr->max_entries);
+ cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
+
+ err = bpf_map_charge_init(&mem, cost);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ m = bpf_map_area_alloc(size, numa_node);
+ if (!m) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
bpf_map_init_from_attr(&m->map, attr);
+ bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock);
- cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
- cost += sizeof(struct list_head) * num_possible_cpus();
-
- /* Notice returns -EPERM on if map size is larger than memlock limit */
- err = bpf_map_charge_init(&m->map.memory, cost);
- if (err)
- goto free_m;
-
- err = -ENOMEM;
-
m->flush_list = alloc_percpu(struct list_head);
- if (!m->flush_list)
- goto free_charge;
+ if (!m->flush_list) {
+ bpf_map_charge_finish(&m->map.memory);
+ bpf_map_area_free(m);
+ return ERR_PTR(-ENOMEM);
+ }
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
- m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
- sizeof(struct xdp_sock *),
- m->map.numa_node);
- if (!m->xsk_map)
- goto free_percpu;
return &m->map;
-
-free_percpu:
- free_percpu(m->flush_list);
-free_charge:
- bpf_map_charge_finish(&m->map.memory);
-free_m:
- kfree(m);
- return ERR_PTR(err);
}
static void xsk_map_free(struct bpf_map *map)
@@ -139,8 +122,7 @@ static void xsk_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map);
synchronize_net();
free_percpu(m->flush_list);
- bpf_map_area_free(m->xsk_map);
- kfree(m);
+ bpf_map_area_free(m);
}
static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
@@ -160,45 +142,20 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
-struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
-{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *xs;
-
- if (key >= map->max_entries)
- return NULL;
-
- xs = READ_ONCE(m->xsk_map[key]);
- return xs;
-}
-
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
-{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct list_head *flush_list = this_cpu_ptr(m->flush_list);
- int err;
-
- err = xsk_rcv(xs, xdp);
- if (err)
- return err;
-
- if (!xs->flush_node.prev)
- list_add(&xs->flush_node, flush_list);
-
- return 0;
-}
-
-void __xsk_map_flush(struct bpf_map *map)
+static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct list_head *flush_list = this_cpu_ptr(m->flush_list);
- struct xdp_sock *xs, *tmp;
-
- list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
- xsk_flush(xs);
- __list_del_clearprev(&xs->flush_node);
- }
+ const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
+ struct bpf_insn *insn = insn_buf;
+
+ *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+ *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
+ *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ *insn++ = BPF_MOV64_IMM(ret, 0);
+ return insn - insn_buf;
}
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
@@ -312,6 +269,7 @@ const struct bpf_map_ops xsk_map_ops = {
.map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key,
.map_lookup_elem = xsk_map_lookup_elem,
+ .map_gen_lookup = xsk_map_gen_lookup,
.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 809e34a3c017..90d1710fef6c 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -231,9 +231,10 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup,
int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
bool threadgroup);
-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
+struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
+ bool *locked)
__acquires(&cgroup_threadgroup_rwsem);
-void cgroup_procs_write_finish(struct task_struct *task)
+void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem);
void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 7f83f4121d8d..09f3a413f6f8 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -495,12 +495,13 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *cred, *tcred;
ssize_t ret;
+ bool locked;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, threadgroup);
+ task = cgroup_procs_write_start(buf, threadgroup, &locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -522,7 +523,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(cgrp, task, threadgroup);
out_finish:
- cgroup_procs_write_finish(task);
+ cgroup_procs_write_finish(task, locked);
out_unlock:
cgroup_kn_unlock(of->kn);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 080561bb8a4b..735af8f15f95 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -899,8 +899,7 @@ static void css_set_move_task(struct task_struct *task,
/*
* We are synchronized through cgroup_threadgroup_rwsem
* against PF_EXITING setting such that we can't race
- * against cgroup_exit() changing the css_set to
- * init_css_set and dropping the old one.
+ * against cgroup_exit()/cgroup_free() dropping the css_set.
*/
WARN_ON_ONCE(task->flags & PF_EXITING);
@@ -1309,10 +1308,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
void cgroup_free_root(struct cgroup_root *root)
{
- if (root) {
- idr_destroy(&root->cgroup_idr);
- kfree(root);
- }
+ kfree(root);
}
static void cgroup_destroy_root(struct cgroup_root *root)
@@ -1374,6 +1370,8 @@ current_cgns_cgroup_from_root(struct cgroup_root *root)
cset = current->nsproxy->cgroup_ns->root_cset;
if (cset == &init_css_set) {
res = &root->cgrp;
+ } else if (root == &cgrp_dfl_root) {
+ res = cset->dfl_cgrp;
} else {
struct cgrp_cset_link *link;
@@ -1430,9 +1428,8 @@ struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root)
{
/*
- * No need to lock the task - since we hold cgroup_mutex the
- * task can't change groups, so the only thing that can happen
- * is that it exits and its css is set back to init_css_set.
+ * No need to lock the task - since we hold css_set_lock the
+ * task can't change groups.
*/
return cset_cgroup_from_root(task_css_set(task), root);
}
@@ -1883,65 +1880,6 @@ static int cgroup_reconfigure(struct fs_context *fc)
return 0;
}
-/*
- * To reduce the fork() overhead for systems that are not actually using
- * their cgroups capability, we don't maintain the lists running through
- * each css_set to its tasks until we see the list actually used - in other
- * words after the first mount.
- */
-static bool use_task_css_set_links __read_mostly;
-
-void cgroup_enable_task_cg_lists(void)
-{
- struct task_struct *p, *g;
-
- /*
- * We need tasklist_lock because RCU is not safe against
- * while_each_thread(). Besides, a forking task that has passed
- * cgroup_post_fork() without seeing use_task_css_set_links = 1
- * is not guaranteed to have its child immediately visible in the
- * tasklist if we walk through it with RCU.
- */
- read_lock(&tasklist_lock);
- spin_lock_irq(&css_set_lock);
-
- if (use_task_css_set_links)
- goto out_unlock;
-
- use_task_css_set_links = true;
-
- do_each_thread(g, p) {
- WARN_ON_ONCE(!list_empty(&p->cg_list) ||
- task_css_set(p) != &init_css_set);
-
- /*
- * We should check if the process is exiting, otherwise
- * it will race with cgroup_exit() in that the list
- * entry won't be deleted though the process has exited.
- * Do it while holding siglock so that we don't end up
- * racing against cgroup_exit().
- *
- * Interrupts were already disabled while acquiring
- * the css_set_lock, so we do not need to disable it
- * again when acquiring the sighand->siglock here.
- */
- spin_lock(&p->sighand->siglock);
- if (!(p->flags & PF_EXITING)) {
- struct css_set *cset = task_css_set(p);
-
- if (!css_set_populated(cset))
- css_set_update_populated(cset, true);
- list_add_tail(&p->cg_list, &cset->tasks);
- get_css_set(cset);
- cset->nr_tasks++;
- }
- spin_unlock(&p->sighand->siglock);
- } while_each_thread(g, p);
-out_unlock:
- spin_unlock_irq(&css_set_lock);
- read_unlock(&tasklist_lock);
-}
-
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
struct cgroup_subsys *ss;
@@ -1976,7 +1914,6 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
atomic_set(&root->nr_cgrps, 1);
cgrp->root = root;
init_cgroup_housekeeping(cgrp);
- idr_init(&root->cgroup_idr);
root->flags = ctx->flags;
if (ctx->release_agent)
@@ -1997,12 +1934,6 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
lockdep_assert_held(&cgroup_mutex);
- ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
- if (ret < 0)
- goto out;
- root_cgrp->id = ret;
- root_cgrp->ancestor_ids[0] = ret;
-
ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
0, GFP_KERNEL);
if (ret)
@@ -2035,6 +1966,8 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
goto exit_root_id;
}
root_cgrp->kn = root->kf_root->kn;
+ WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1);
+ root_cgrp->ancestor_ids[0] = cgroup_id(root_cgrp);
ret = css_populate_dir(&root_cgrp->self);
if (ret)
@@ -2119,11 +2052,12 @@ int cgroup_do_get_tree(struct fs_context *fc)
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
dput(fc->root);
- fc->root = nsdentry;
if (IS_ERR(nsdentry)) {
- ret = PTR_ERR(nsdentry);
deactivate_locked_super(sb);
+ ret = PTR_ERR(nsdentry);
+ nsdentry = NULL;
}
+ fc->root = nsdentry;
}
if (!ctx->kfc.new_sb_created)
@@ -2187,13 +2121,6 @@ static int cgroup_init_fs_context(struct fs_context *fc)
if (!ctx)
return -ENOMEM;
- /*
- * The first time anyone tries to mount a cgroup, enable the list
- * linking each css_set to its tasks and fix up all existing tasks.
- */
- if (!use_task_css_set_links)
- cgroup_enable_task_cg_lists();
-
ctx->ns = current->nsproxy->cgroup_ns;
get_cgroup_ns(ctx->ns);
fc->fs_private = &ctx->kfc;
@@ -2371,9 +2298,8 @@ static void cgroup_migrate_add_task(struct task_struct *task,
if (task->flags & PF_EXITING)
return;
- /* leave @task alone if post_fork() hasn't linked it yet */
- if (list_empty(&task->cg_list))
- return;
+ /* cgroup_threadgroup_rwsem protects racing against forks */
+ WARN_ON_ONCE(list_empty(&task->cg_list));
cset = task_css_set(task);
if (!cset->mg_src_cgrp)
@@ -2824,7 +2750,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
return ret;
}
-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
+struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
+ bool *locked)
__acquires(&cgroup_threadgroup_rwsem)
{
struct task_struct *tsk;
@@ -2833,7 +2760,21 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return ERR_PTR(-EINVAL);
- percpu_down_write(&cgroup_threadgroup_rwsem);
+ /*
+ * If we migrate a single thread, we don't care about threadgroup
+ * stability. If the thread is `current`, it won't exit(2) under our
+ * hands or change PID through exec(2). We exclude
+ * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write
+ * callers by cgroup_mutex.
+ * Therefore, we can skip the global lock.
+ */
+ lockdep_assert_held(&cgroup_mutex);
+ if (pid || threadgroup) {
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+ *locked = true;
+ } else {
+ *locked = false;
+ }
rcu_read_lock();
if (pid) {
@@ -2864,13 +2805,16 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
goto out_unlock_rcu;
out_unlock_threadgroup:
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ if (*locked) {
+ percpu_up_write(&cgroup_threadgroup_rwsem);
+ *locked = false;
+ }
out_unlock_rcu:
rcu_read_unlock();
return tsk;
}
-void cgroup_procs_write_finish(struct task_struct *task)
+void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem)
{
struct cgroup_subsys *ss;
@@ -2879,7 +2823,8 @@ void cgroup_procs_write_finish(struct task_struct *task)
/* release reference from cgroup_procs_write_start() */
put_task_struct(task);
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ if (locked)
+ percpu_up_write(&cgroup_threadgroup_rwsem);
for_each_subsys(ss, ssid)
if (ss->post_attach)
ss->post_attach();
@@ -3600,22 +3545,22 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
#ifdef CONFIG_PSI
static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
{
- struct cgroup *cgroup = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup->id == 1 ? &psi_system : &cgroup->psi;
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
return psi_show(seq, psi, PSI_IO);
}
static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
{
- struct cgroup *cgroup = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup->id == 1 ? &psi_system : &cgroup->psi;
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
return psi_show(seq, psi, PSI_MEM);
}
static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
{
- struct cgroup *cgroup = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup->id == 1 ? &psi_system : &cgroup->psi;
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
return psi_show(seq, psi, PSI_CPU);
}
@@ -4567,9 +4512,6 @@ repeat:
void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it)
{
- /* no one should try to iterate before mounting cgroups */
- WARN_ON_ONCE(!use_task_css_set_links);
-
memset(it, 0, sizeof(*it));
spin_lock_irq(&css_set_lock);
@@ -4754,12 +4696,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
struct cgroup *src_cgrp, *dst_cgrp;
struct task_struct *task;
ssize_t ret;
+ bool locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, true);
+ task = cgroup_procs_write_start(buf, true, &locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -4777,7 +4720,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, true);
out_finish:
- cgroup_procs_write_finish(task);
+ cgroup_procs_write_finish(task, locked);
out_unlock:
cgroup_kn_unlock(of->kn);
@@ -4795,6 +4738,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
struct cgroup *src_cgrp, *dst_cgrp;
struct task_struct *task;
ssize_t ret;
+ bool locked;
buf = strstrip(buf);
@@ -4802,7 +4746,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
if (!dst_cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, false);
+ task = cgroup_procs_write_start(buf, false, &locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -4826,7 +4770,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, false);
out_finish:
- cgroup_procs_write_finish(task);
+ cgroup_procs_write_finish(task, locked);
out_unlock:
cgroup_kn_unlock(of->kn);
@@ -5036,9 +4980,6 @@ static void css_release_work_fn(struct work_struct *work)
tcgrp->nr_dying_descendants--;
spin_unlock_irq(&css_set_lock);
- cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
- cgrp->id = -1;
-
/*
* There are two control paths which try to determine
* cgroup from dentry without going through kernfs -
@@ -5203,10 +5144,12 @@ err_free_css:
* it isn't associated with its kernfs_node and doesn't have the control
* mask applied.
*/
-static struct cgroup *cgroup_create(struct cgroup *parent)
+static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
+ umode_t mode)
{
struct cgroup_root *root = parent->root;
struct cgroup *cgrp, *tcgrp;
+ struct kernfs_node *kn;
int level = parent->level + 1;
int ret;
@@ -5226,15 +5169,13 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
goto out_cancel_ref;
}
- /*
- * Temporarily set the pointer to NULL, so idr_find() won't return
- * a half-baked cgroup.
- */
- cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
- if (cgrp->id < 0) {
- ret = -ENOMEM;
+ /* create the directory */
+ kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
+ if (IS_ERR(kn)) {
+ ret = PTR_ERR(kn);
goto out_stat_exit;
}
+ cgrp->kn = kn;
init_cgroup_housekeeping(cgrp);
@@ -5244,7 +5185,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
ret = psi_cgroup_alloc(cgrp);
if (ret)
- goto out_idr_free;
+ goto out_kernfs_remove;
ret = cgroup_bpf_inherit(cgrp);
if (ret)
@@ -5268,7 +5209,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
spin_lock_irq(&css_set_lock);
for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
- cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
+ cgrp->ancestor_ids[tcgrp->level] = cgroup_id(tcgrp);
if (tcgrp != cgrp) {
tcgrp->nr_descendants++;
@@ -5298,12 +5239,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
cgroup_get_live(parent);
/*
- * @cgrp is now fully operational. If something fails after this
- * point, it'll be released via the normal destruction path.
- */
- cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
-
- /*
* On the default hierarchy, a child doesn't automatically inherit
* subtree_control from the parent. Each is configured manually.
*/
@@ -5316,8 +5251,8 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
out_psi_free:
psi_cgroup_free(cgrp);
-out_idr_free:
- cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
+out_kernfs_remove:
+ kernfs_remove(cgrp->kn);
out_stat_exit:
if (cgroup_on_dfl(parent))
cgroup_rstat_exit(cgrp);
@@ -5354,7 +5289,6 @@ fail:
int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
{
struct cgroup *parent, *cgrp;
- struct kernfs_node *kn;
int ret;
/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
@@ -5370,27 +5304,19 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
goto out_unlock;
}
- cgrp = cgroup_create(parent);
+ cgrp = cgroup_create(parent, name, mode);
if (IS_ERR(cgrp)) {
ret = PTR_ERR(cgrp);
goto out_unlock;
}
- /* create the directory */
- kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
- if (IS_ERR(kn)) {
- ret = PTR_ERR(kn);
- goto out_destroy;
- }
- cgrp->kn = kn;
-
/*
* This extra ref will be put in cgroup_free_fn() and guarantees
* that @cgrp->kn is always accessible.
*/
- kernfs_get(kn);
+ kernfs_get(cgrp->kn);
- ret = cgroup_kn_set_ugid(kn);
+ ret = cgroup_kn_set_ugid(cgrp->kn);
if (ret)
goto out_destroy;
@@ -5405,7 +5331,7 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
TRACE_CGROUP_PATH(mkdir, cgrp);
/* let's create and online css's */
- kernfs_activate(kn);
+ kernfs_activate(cgrp->kn);
ret = 0;
goto out_unlock;
@@ -5835,12 +5761,11 @@ static int __init cgroup_wq_init(void)
}
core_initcall(cgroup_wq_init);
-void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
- char *buf, size_t buflen)
+void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
{
struct kernfs_node *kn;
- kn = kernfs_get_node_by_id(cgrp_dfl_root.kf_root, id);
+ kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
if (!kn)
return;
kernfs_path(kn, buf, buflen);
@@ -6001,62 +5926,38 @@ void cgroup_cancel_fork(struct task_struct *child)
void cgroup_post_fork(struct task_struct *child)
{
struct cgroup_subsys *ss;
+ struct css_set *cset;
int i;
+ spin_lock_irq(&css_set_lock);
+
+ WARN_ON_ONCE(!list_empty(&child->cg_list));
+ cset = task_css_set(current); /* current is @child's parent */
+ get_css_set(cset);
+ cset->nr_tasks++;
+ css_set_move_task(child, NULL, cset, false);
+
/*
- * This may race against cgroup_enable_task_cg_lists(). As that
- * function sets use_task_css_set_links before grabbing
- * tasklist_lock and we just went through tasklist_lock to add
- * @child, it's guaranteed that either we see the set
- * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
- * @child during its iteration.
- *
- * If we won the race, @child is associated with %current's
- * css_set. Grabbing css_set_lock guarantees both that the
- * association is stable, and, on completion of the parent's
- * migration, @child is visible in the source of migration or
- * already in the destination cgroup. This guarantee is necessary
- * when implementing operations which need to migrate all tasks of
- * a cgroup to another.
- *
- * Note that if we lose to cgroup_enable_task_cg_lists(), @child
- * will remain in init_css_set. This is safe because all tasks are
- * in the init_css_set before cg_links is enabled and there's no
- * operation which transfers all tasks out of init_css_set.
+ * If the cgroup has to be frozen, the new task has too. Let's set
+ * the JOBCTL_TRAP_FREEZE jobctl bit to get the task into the
+ * frozen state.
*/
- if (use_task_css_set_links) {
- struct css_set *cset;
-
- spin_lock_irq(&css_set_lock);
- cset = task_css_set(current);
- if (list_empty(&child->cg_list)) {
- get_css_set(cset);
- cset->nr_tasks++;
- css_set_move_task(child, NULL, cset, false);
- }
+ if (unlikely(cgroup_task_freeze(child))) {
+ spin_lock(&child->sighand->siglock);
+ WARN_ON_ONCE(child->frozen);
+ child->jobctl |= JOBCTL_TRAP_FREEZE;
+ spin_unlock(&child->sighand->siglock);
/*
- * If the cgroup has to be frozen, the new task has too.
- * Let's set the JOBCTL_TRAP_FREEZE jobctl bit to get
- * the task into the frozen state.
+ * Calling cgroup_update_frozen() isn't required here,
+ * because it will be called anyway a bit later from
+ * do_freezer_trap(). So we avoid cgroup's transient switch
+ * from the frozen state and back.
*/
- if (unlikely(cgroup_task_freeze(child))) {
- spin_lock(&child->sighand->siglock);
- WARN_ON_ONCE(child->frozen);
- child->jobctl |= JOBCTL_TRAP_FREEZE;
- spin_unlock(&child->sighand->siglock);
-
- /*
- * Calling cgroup_update_frozen() isn't required here,
- * because it will be called anyway a bit later
- * from do_freezer_trap(). So we avoid cgroup's
- * transient switch from the frozen state and back.
- */
- }
-
- spin_unlock_irq(&css_set_lock);
}
+ spin_unlock_irq(&css_set_lock);
+
/*
* Call ss->fork(). This must happen after @child is linked on
* css_set; otherwise, @child might change state between ->fork()
@@ -6071,20 +5972,8 @@ void cgroup_post_fork(struct task_struct *child)
* cgroup_exit - detach cgroup from exiting task
* @tsk: pointer to task_struct of exiting process
*
- * Description: Detach cgroup from @tsk and release it.
- *
- * Note that cgroups marked notify_on_release force every task in
- * them to take the global cgroup_mutex mutex when exiting.
- * This could impact scaling on very large systems. Be reluctant to
- * use notify_on_release cgroups where very high task exit scaling
- * is required on large systems.
+ * Description: Detach cgroup from @tsk.
*
- * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We
- * call cgroup_exit() while the task is still competent to handle
- * notify_on_release(), then leave the task attached to the root cgroup in
- * each hierarchy for the remainder of its exit. No need to bother with
- * init_css_set refcnting. init_css_set never goes away and we can't race
- * with migration path - PF_EXITING is visible to migration path.
*/
void cgroup_exit(struct task_struct *tsk)
{
@@ -6092,26 +5981,19 @@ void cgroup_exit(struct task_struct *tsk)
struct css_set *cset;
int i;
- /*
- * Unlink from @tsk from its css_set. As migration path can't race
- * with us, we can check css_set and cg_list without synchronization.
- */
- cset = task_css_set(tsk);
+ spin_lock_irq(&css_set_lock);
- if (!list_empty(&tsk->cg_list)) {
- spin_lock_irq(&css_set_lock);
- css_set_move_task(tsk, cset, NULL, false);
- list_add_tail(&tsk->cg_list, &cset->dying_tasks);
- cset->nr_tasks--;
+ WARN_ON_ONCE(list_empty(&tsk->cg_list));
+ cset = task_css_set(tsk);
+ css_set_move_task(tsk, cset, NULL, false);
+ list_add_tail(&tsk->cg_list, &cset->dying_tasks);
+ cset->nr_tasks--;
- WARN_ON_ONCE(cgroup_task_frozen(tsk));
- if (unlikely(cgroup_task_freeze(tsk)))
- cgroup_update_frozen(task_dfl_cgroup(tsk));
+ WARN_ON_ONCE(cgroup_task_frozen(tsk));
+ if (unlikely(cgroup_task_freeze(tsk)))
+ cgroup_update_frozen(task_dfl_cgroup(tsk));
- spin_unlock_irq(&css_set_lock);
- } else {
- get_css_set(cset);
- }
+ spin_unlock_irq(&css_set_lock);
/* see cgroup_post_fork() for details */
do_each_subsys_mask(ss, i, have_exit_callback) {
@@ -6128,12 +6010,10 @@ void cgroup_release(struct task_struct *task)
ss->release(task);
} while_each_subsys_mask();
- if (use_task_css_set_links) {
- spin_lock_irq(&css_set_lock);
- css_set_skip_task_iters(task_css_set(task), task);
- list_del_init(&task->cg_list);
- spin_unlock_irq(&css_set_lock);
- }
+ spin_lock_irq(&css_set_lock);
+ css_set_skip_task_iters(task_css_set(task), task);
+ list_del_init(&task->cg_list);
+ spin_unlock_irq(&css_set_lock);
}
void cgroup_free(struct task_struct *task)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index c87ee6412b36..58f5073acff7 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -929,8 +929,6 @@ static void rebuild_root_domains(void)
lockdep_assert_cpus_held();
lockdep_assert_held(&sched_domains_mutex);
- cgroup_enable_task_cg_lists();
-
rcu_read_lock();
/*
diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c
index 8cf010680678..3984dd6b8ddb 100644
--- a/kernel/cgroup/freezer.c
+++ b/kernel/cgroup/freezer.c
@@ -231,6 +231,15 @@ void cgroup_freezer_migrate_task(struct task_struct *task,
return;
/*
+ * It's not necessary to do changes if both of the src and dst cgroups
+ * are not freezing and task is not frozen.
+ */
+ if (!test_bit(CGRP_FREEZE, &src->flags) &&
+ !test_bit(CGRP_FREEZE, &dst->flags) &&
+ !task->frozen)
+ return;
+
+ /*
* Adjust counters of freezing and frozen tasks.
* Note, that if the task is frozen, but the destination cgroup is not
* frozen, we bump both counters to keep them balanced.
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index 8e513a573fe9..138059eb730d 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -45,7 +45,7 @@ struct pids_cgroup {
* %PIDS_MAX = (%PID_MAX_LIMIT + 1).
*/
atomic64_t counter;
- int64_t limit;
+ atomic64_t limit;
/* Handle for "pids.events" */
struct cgroup_file events_file;
@@ -73,8 +73,8 @@ pids_css_alloc(struct cgroup_subsys_state *parent)
if (!pids)
return ERR_PTR(-ENOMEM);
- pids->limit = PIDS_MAX;
atomic64_set(&pids->counter, 0);
+ atomic64_set(&pids->limit, PIDS_MAX);
atomic64_set(&pids->events_limit, 0);
return &pids->css;
}
@@ -146,13 +146,14 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
for (p = pids; parent_pids(p); p = parent_pids(p)) {
int64_t new = atomic64_add_return(num, &p->counter);
+ int64_t limit = atomic64_read(&p->limit);
/*
* Since new is capped to the maximum number of pid_t, if
* p->limit is %PIDS_MAX then we know that this test will never
* fail.
*/
- if (new > p->limit)
+ if (new > limit)
goto revert;
}
@@ -277,7 +278,7 @@ set_limit:
* Limit updates don't need to be mutex'd, since it isn't
* critical that any racing fork()s follow the new limit.
*/
- pids->limit = limit;
+ atomic64_set(&pids->limit, limit);
return nbytes;
}
@@ -285,7 +286,7 @@ static int pids_max_show(struct seq_file *sf, void *v)
{
struct cgroup_subsys_state *css = seq_css(sf);
struct pids_cgroup *pids = css_pids(css);
- int64_t limit = pids->limit;
+ int64_t limit = atomic64_read(&pids->limit);
if (limit >= PIDS_MAX)
seq_printf(sf, "%s\n", PIDS_MAX_STR);
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index ca19b4c8acf5..b48b22d4deb6 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -304,44 +304,48 @@ void __init cgroup_rstat_boot(void)
* Functions for cgroup basic resource statistics implemented on top of
* rstat.
*/
-static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
- struct cgroup_base_stat *src_bstat)
+static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
+ struct cgroup_base_stat *src_bstat)
{
dst_bstat->cputime.utime += src_bstat->cputime.utime;
dst_bstat->cputime.stime += src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
}
+static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
+ struct cgroup_base_stat *src_bstat)
+{
+ dst_bstat->cputime.utime -= src_bstat->cputime.utime;
+ dst_bstat->cputime.stime -= src_bstat->cputime.stime;
+ dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
+}
+
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
- struct task_cputime *last_cputime = &rstatc->last_bstat.cputime;
- struct task_cputime cputime;
- struct cgroup_base_stat delta;
+ struct cgroup_base_stat cur, delta;
unsigned seq;
/* fetch the current per-cpu values */
do {
seq = __u64_stats_fetch_begin(&rstatc->bsync);
- cputime = rstatc->bstat.cputime;
+ cur.cputime = rstatc->bstat.cputime;
} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
- /* calculate the delta to propgate */
- delta.cputime.utime = cputime.utime - last_cputime->utime;
- delta.cputime.stime = cputime.stime - last_cputime->stime;
- delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime -
- last_cputime->sum_exec_runtime;
- *last_cputime = cputime;
-
- /* transfer the pending stat into delta */
- cgroup_base_stat_accumulate(&delta, &cgrp->pending_bstat);
- memset(&cgrp->pending_bstat, 0, sizeof(cgrp->pending_bstat));
-
- /* propagate delta into the global stat and the parent's pending */
- cgroup_base_stat_accumulate(&cgrp->bstat, &delta);
- if (parent)
- cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
+ /* propagate percpu delta to global */
+ delta = cur;
+ cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
+ cgroup_base_stat_add(&cgrp->bstat, &delta);
+ cgroup_base_stat_add(&rstatc->last_bstat, &delta);
+
+ /* propagate global delta to parent */
+ if (parent) {
+ delta = cgrp->bstat;
+ cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
+ cgroup_base_stat_add(&parent->bstat, &delta);
+ cgroup_base_stat_add(&cgrp->last_bstat, &delta);
+ }
}
static struct cgroup_rstat_cpu *
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index be01a4d627c9..0296b4bda8f1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -25,8 +25,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/context_tracking.h>
-DEFINE_STATIC_KEY_FALSE(context_tracking_enabled);
-EXPORT_SYMBOL_GPL(context_tracking_enabled);
+DEFINE_STATIC_KEY_FALSE(context_tracking_key);
+EXPORT_SYMBOL_GPL(context_tracking_key);
DEFINE_PER_CPU(struct context_tracking, context_tracking);
EXPORT_SYMBOL_GPL(context_tracking);
@@ -192,7 +192,7 @@ void __init context_tracking_cpu_set(int cpu)
if (!per_cpu(context_tracking.active, cpu)) {
per_cpu(context_tracking.active, cpu) = true;
- static_branch_inc(&context_tracking_enabled);
+ static_branch_inc(&context_tracking_key);
}
if (initialized)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fc28e17940e0..a59cc980adad 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -336,7 +336,7 @@ static void lockdep_acquire_cpus_lock(void)
static void lockdep_release_cpus_lock(void)
{
- rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
+ rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, _THIS_IP_);
}
/*
@@ -2373,7 +2373,18 @@ void __init boot_cpu_hotplug_init(void)
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
-enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+ CPU_MITIGATIONS_OFF,
+ CPU_MITIGATIONS_AUTO,
+ CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+static enum cpu_mitigations cpu_mitigations __ro_after_init =
+ CPU_MITIGATIONS_AUTO;
static int __init mitigations_parse_cmdline(char *arg)
{
@@ -2390,3 +2401,17 @@ static int __init mitigations_parse_cmdline(char *arg)
return 0;
}
early_param("mitigations", mitigations_parse_cmdline);
+
+/* mitigations=off */
+bool cpu_mitigations_off(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_off);
+
+/* mitigations=auto,nosmt */
+bool cpu_mitigations_auto_nosmt(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 099002d84f46..a26170469543 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -161,7 +161,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
{
#ifdef CONFIG_STACKTRACE
if (entry) {
- pr_warning("Mapped at:\n");
+ pr_warn("Mapped at:\n");
stack_trace_print(entry->stack_entries, entry->stack_len, 0);
}
#endif
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8402b29c280f..0b67c04e531b 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -16,12 +16,11 @@
#include <linux/swiotlb.h>
/*
- * Most architectures use ZONE_DMA for the first 16 Megabytes, but
- * some use it for entirely different regions:
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
+ * it for entirely different regions. In that case the arch code needs to
+ * override the variable below for dma-direct to work properly.
*/
-#ifndef ARCH_ZONE_DMA_BITS
-#define ARCH_ZONE_DMA_BITS 24
-#endif
+unsigned int zone_dma_bits __ro_after_init = 24;
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
@@ -69,7 +68,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
* Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
* zones.
*/
- if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ if (*phys_mask <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA;
if (*phys_mask <= DMA_BIT_MASK(32))
return GFP_DMA32;
@@ -395,7 +394,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
u64 min_mask;
if (IS_ENABLED(CONFIG_ZONE_DMA))
- min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
+ min_mask = DMA_BIT_MASK(zone_dma_bits);
else
min_mask = DMA_BIT_MASK(32);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index aec8dba2bea4..4ff86d57f9e5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1031,7 +1031,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
{
}
-void
+static inline void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
@@ -1941,6 +1941,11 @@ static void perf_put_aux_event(struct perf_event *event)
}
}
+static bool perf_need_aux_event(struct perf_event *event)
+{
+ return !!event->attr.aux_output || !!event->attr.aux_sample_size;
+}
+
static int perf_get_aux_event(struct perf_event *event,
struct perf_event *group_leader)
{
@@ -1953,7 +1958,17 @@ static int perf_get_aux_event(struct perf_event *event,
if (!group_leader)
return 0;
- if (!perf_aux_output_match(event, group_leader))
+ /*
+ * aux_output and aux_sample_size are mutually exclusive.
+ */
+ if (event->attr.aux_output && event->attr.aux_sample_size)
+ return 0;
+
+ if (event->attr.aux_output &&
+ !perf_aux_output_match(event, group_leader))
+ return 0;
+
+ if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
return 0;
if (!atomic_long_inc_not_zero(&group_leader->refcount))
@@ -2666,6 +2681,25 @@ perf_install_in_context(struct perf_event_context *ctx,
*/
smp_store_release(&event->ctx, ctx);
+ /*
+ * perf_event_attr::disabled events will not run and can be initialized
+ * without IPI. Except when this is the first event for the context, in
+ * that case we need the magic of the IPI to set ctx->is_active.
+ *
+ * The IOC_ENABLE that is sure to follow the creation of a disabled
+ * event will issue the IPI and reprogram the hardware.
+ */
+ if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) {
+ raw_spin_lock_irq(&ctx->lock);
+ if (ctx->task == TASK_TOMBSTONE) {
+ raw_spin_unlock_irq(&ctx->lock);
+ return;
+ }
+ add_event_to_ctx(event, ctx);
+ raw_spin_unlock_irq(&ctx->lock);
+ return;
+ }
+
if (!task) {
cpu_function_call(cpu, __perf_install_in_context, event);
return;
@@ -3204,10 +3238,21 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
+ struct pmu *pmu = ctx->pmu;
+
WRITE_ONCE(ctx->task, next);
WRITE_ONCE(next_ctx->task, task);
- swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
+ /*
+ * PMU specific parts of task perf context can require
+ * additional synchronization. As an example of such
+ * synchronization see implementation details of Intel
+ * LBR call stack data profiling;
+ */
+ if (pmu->swap_task_ctx)
+ pmu->swap_task_ctx(ctx, next_ctx);
+ else
+ swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
/*
* RCU_INIT_POINTER here is safe because we've not
@@ -4229,8 +4274,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
if (!task) {
/* Must be root to operate on a CPU event: */
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return ERR_PTR(-EACCES);
+ err = perf_allow_cpu(&event->attr);
+ if (err)
+ return ERR_PTR(err);
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
@@ -4539,6 +4585,8 @@ static void _free_event(struct perf_event *event)
unaccount_event(event);
+ security_perf_event_free(event);
+
if (event->rb) {
/*
* Can happen when we close an event with re-directed output.
@@ -4992,6 +5040,10 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct perf_event_context *ctx;
int ret;
+ ret = security_perf_event_read(event);
+ if (ret)
+ return ret;
+
ctx = perf_event_ctx_lock(event);
ret = __perf_read(event, buf, count);
perf_event_ctx_unlock(event, ctx);
@@ -5029,6 +5081,24 @@ static void _perf_event_reset(struct perf_event *event)
perf_event_update_userpage(event);
}
+/* Assume it's not an event with inherit set. */
+u64 perf_event_pause(struct perf_event *event, bool reset)
+{
+ struct perf_event_context *ctx;
+ u64 count;
+
+ ctx = perf_event_ctx_lock(event);
+ WARN_ON_ONCE(event->attr.inherit);
+ _perf_event_disable(event);
+ count = local64_read(&event->count);
+ if (reset)
+ local64_set(&event->count, 0);
+ perf_event_ctx_unlock(event, ctx);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(perf_event_pause);
+
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
@@ -5106,16 +5176,11 @@ static int perf_event_check_period(struct perf_event *event, u64 value)
return event->pmu->check_period(event, value);
}
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
+static int _perf_event_period(struct perf_event *event, u64 value)
{
- u64 value;
-
if (!is_sampling_event(event))
return -EINVAL;
- if (copy_from_user(&value, arg, sizeof(value)))
- return -EFAULT;
-
if (!value)
return -EINVAL;
@@ -5133,6 +5198,19 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
return 0;
}
+int perf_event_period(struct perf_event *event, u64 value)
+{
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_event_period(event, value);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(perf_event_period);
+
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
@@ -5176,8 +5254,14 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
- return perf_event_period(event, (u64 __user *)arg);
+ {
+ u64 value;
+
+ if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
+ return -EFAULT;
+ return _perf_event_period(event, value);
+ }
case PERF_EVENT_IOC_ID:
{
u64 id = primary_event_id(event);
@@ -5256,6 +5340,11 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct perf_event_context *ctx;
long ret;
+ /* Treat ioctl like writes as it is likely a mutating operation. */
+ ret = security_perf_event_write(event);
+ if (ret)
+ return ret;
+
ctx = perf_event_ctx_lock(event);
ret = _perf_ioctl(event, cmd, arg);
perf_event_ctx_unlock(event, ctx);
@@ -5607,10 +5696,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
perf_pmu_output_stop(event);
/* now it's safe to free the pages */
- if (!rb->aux_mmap_locked)
- atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
- else
- atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+ atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
+ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
/* this has to be the last one */
rb_free_aux(rb);
@@ -5721,6 +5808,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
+ ret = security_perf_event_read(event);
+ if (ret)
+ return ret;
+
vma_size = vma->vm_end - vma->vm_start;
if (vma->vm_pgoff == 0) {
@@ -5827,13 +5918,7 @@ accounting:
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
- if (user_locked <= user_lock_limit) {
- /* charge all to locked_vm */
- } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) {
- /* charge all to pinned_vm */
- extra = user_extra;
- user_extra = 0;
- } else {
+ if (user_locked > user_lock_limit) {
/*
* charge locked_vm until it hits user_lock_limit;
* charge the rest from pinned_vm
@@ -5846,7 +5931,7 @@ accounting:
lock_limit >>= PAGE_SHIFT;
locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
- if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
+ if ((locked > lock_limit) && perf_is_paranoid() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
@@ -6176,6 +6261,122 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
}
}
+static unsigned long perf_prepare_sample_aux(struct perf_event *event,
+ struct perf_sample_data *data,
+ size_t size)
+{
+ struct perf_event *sampler = event->aux_event;
+ struct ring_buffer *rb;
+
+ data->aux_size = 0;
+
+ if (!sampler)
+ goto out;
+
+ if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
+ goto out;
+
+ if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
+ goto out;
+
+ rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
+ if (!rb)
+ goto out;
+
+ /*
+ * If this is an NMI hit inside sampling code, don't take
+ * the sample. See also perf_aux_sample_output().
+ */
+ if (READ_ONCE(rb->aux_in_sampling)) {
+ data->aux_size = 0;
+ } else {
+ size = min_t(size_t, size, perf_aux_size(rb));
+ data->aux_size = ALIGN(size, sizeof(u64));
+ }
+ ring_buffer_put(rb);
+
+out:
+ return data->aux_size;
+}
+
+long perf_pmu_snapshot_aux(struct ring_buffer *rb,
+ struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size)
+{
+ unsigned long flags;
+ long ret;
+
+ /*
+ * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler
+ * paths. If we start calling them in NMI context, they may race with
+ * the IRQ ones, that is, for example, re-starting an event that's just
+ * been stopped, which is why we're using a separate callback that
+ * doesn't change the event state.
+ *
+ * IRQs need to be disabled to prevent IPIs from racing with us.
+ */
+ local_irq_save(flags);
+ /*
+ * Guard against NMI hits inside the critical section;
+ * see also perf_prepare_sample_aux().
+ */
+ WRITE_ONCE(rb->aux_in_sampling, 1);
+ barrier();
+
+ ret = event->pmu->snapshot_aux(event, handle, size);
+
+ barrier();
+ WRITE_ONCE(rb->aux_in_sampling, 0);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static void perf_aux_sample_output(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *data)
+{
+ struct perf_event *sampler = event->aux_event;
+ unsigned long pad;
+ struct ring_buffer *rb;
+ long size;
+
+ if (WARN_ON_ONCE(!sampler || !data->aux_size))
+ return;
+
+ rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
+ if (!rb)
+ return;
+
+ size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
+
+ /*
+ * An error here means that perf_output_copy() failed (returned a
+ * non-zero surplus that it didn't copy), which in its current
+ * enlightened implementation is not possible. If that changes, we'd
+ * like to know.
+ */
+ if (WARN_ON_ONCE(size < 0))
+ goto out_put;
+
+ /*
+ * The pad comes from ALIGN()ing data->aux_size up to u64 in
+ * perf_prepare_sample_aux(), so should not be more than that.
+ */
+ pad = data->aux_size - size;
+ if (WARN_ON_ONCE(pad >= sizeof(u64)))
+ pad = 8;
+
+ if (pad) {
+ u64 zero = 0;
+ perf_output_copy(handle, &zero, pad);
+ }
+
+out_put:
+ ring_buffer_put(rb);
+}
+
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
@@ -6495,6 +6696,13 @@ void perf_output_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
perf_output_put(handle, data->phys_addr);
+ if (sample_type & PERF_SAMPLE_AUX) {
+ perf_output_put(handle, data->aux_size);
+
+ if (data->aux_size)
+ perf_aux_sample_output(event, handle, data);
+ }
+
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
@@ -6683,6 +6891,35 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
data->phys_addr = perf_virt_to_phys(data->addr);
+
+ if (sample_type & PERF_SAMPLE_AUX) {
+ u64 size;
+
+ header->size += sizeof(u64); /* size */
+
+ /*
+ * Given the 16bit nature of header::size, an AUX sample can
+ * easily overflow it, what with all the preceding sample bits.
+ * Make sure this doesn't happen by using up to U16_MAX bytes
+ * per sample in total (rounded down to 8 byte boundary).
+ */
+ size = min_t(size_t, U16_MAX - header->size,
+ event->attr.aux_sample_size);
+ size = rounddown(size, 8);
+ size = perf_prepare_sample_aux(event, data, size);
+
+ WARN_ON_ONCE(size + header->size > U16_MAX);
+ header->size += size;
+ }
+ /*
+ * If you're adding more sample types here, you likely need to do
+ * something about the overflowing header::size, like repurpose the
+ * lowest 3 bits of size, which should be always zero at the moment.
+ * This raises a more important question, do we really need 512k sized
+ * samples and why, so good argumentation is in order for whatever you
+ * do here next.
+ */
+ WARN_ON_ONCE(header->size & 7);
}
static __always_inline int
@@ -10034,7 +10271,7 @@ static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, const char *name, int type)
{
- int cpu, ret;
+ int cpu, ret, max = PERF_TYPE_MAX;
mutex_lock(&pmus_lock);
ret = -ENOMEM;
@@ -10047,12 +10284,17 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
goto skip_type;
pmu->name = name;
- if (type < 0) {
- type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
- if (type < 0) {
- ret = type;
+ if (type != PERF_TYPE_SOFTWARE) {
+ if (type >= 0)
+ max = type;
+
+ ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
+ if (ret < 0)
goto free_pdc;
- }
+
+ WARN_ON(type >= 0 && ret != type);
+
+ type = ret;
}
pmu->type = type;
@@ -10129,7 +10371,16 @@ got_cpu_context:
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
- list_add_rcu(&pmu->entry, &pmus);
+ /*
+ * Ensure the TYPE_SOFTWARE PMUs are at the head of the list,
+ * since these cannot be in the IDR. This way the linear search
+ * is fast, provided a valid software event is provided.
+ */
+ if (type == PERF_TYPE_SOFTWARE || !name)
+ list_add_rcu(&pmu->entry, &pmus);
+ else
+ list_add_tail_rcu(&pmu->entry, &pmus);
+
atomic_set(&pmu->exclusive_cnt, 0);
ret = 0;
unlock:
@@ -10142,7 +10393,7 @@ free_dev:
put_device(pmu->dev);
free_idr:
- if (pmu->type >= PERF_TYPE_MAX)
+ if (pmu->type != PERF_TYPE_SOFTWARE)
idr_remove(&pmu_idr, pmu->type);
free_pdc:
@@ -10164,7 +10415,7 @@ void perf_pmu_unregister(struct pmu *pmu)
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
- if (pmu->type >= PERF_TYPE_MAX)
+ if (pmu->type != PERF_TYPE_SOFTWARE)
idr_remove(&pmu_idr, pmu->type);
if (pmu_bus_running) {
if (pmu->nr_addr_filters)
@@ -10234,9 +10485,8 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
static struct pmu *perf_init_event(struct perf_event *event)
{
+ int idx, type, ret;
struct pmu *pmu;
- int idx;
- int ret;
idx = srcu_read_lock(&pmus_srcu);
@@ -10248,13 +10498,28 @@ static struct pmu *perf_init_event(struct perf_event *event)
goto unlock;
}
+ /*
+ * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
+ * are often aliases for PERF_TYPE_RAW.
+ */
+ type = event->attr.type;
+ if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)
+ type = PERF_TYPE_RAW;
+
+again:
rcu_read_lock();
- pmu = idr_find(&pmu_idr, event->attr.type);
+ pmu = idr_find(&pmu_idr, type);
rcu_read_unlock();
if (pmu) {
ret = perf_try_init_event(pmu, event);
+ if (ret == -ENOENT && event->attr.type != type) {
+ type = event->attr.type;
+ goto again;
+ }
+
if (ret)
pmu = ERR_PTR(ret);
+
goto unlock;
}
@@ -10477,12 +10742,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
context = parent_event->overflow_handler_context;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
if (overflow_handler == bpf_overflow_handler) {
- struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
+ struct bpf_prog *prog = parent_event->prog;
- if (IS_ERR(prog)) {
- err = PTR_ERR(prog);
- goto err_ns;
- }
+ bpf_prog_inc(prog);
event->prog = prog;
event->orig_overflow_handler =
parent_event->orig_overflow_handler;
@@ -10535,6 +10797,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
goto err_ns;
}
+ /*
+ * Disallow uncore-cgroup events, they don't make sense as the cgroup will
+ * be different on other CPUs in the uncore mask.
+ */
+ if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
+ err = -EINVAL;
+ goto err_pmu;
+ }
+
if (event->attr.aux_output &&
!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
err = -EOPNOTSUPP;
@@ -10580,11 +10851,20 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
}
}
+ err = security_perf_event_alloc(event);
+ if (err)
+ goto err_callchain_buffer;
+
/* symmetric to unaccount_event() in _free_event() */
account_event(event);
return event;
+err_callchain_buffer:
+ if (!event->parent) {
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ put_callchain_buffers();
+ }
err_addr_filters:
kfree(event->addr_filter_ranges);
@@ -10635,7 +10915,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
attr->size = size;
- if (attr->__reserved_1 || attr->__reserved_2)
+ if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -10673,9 +10953,11 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
attr->branch_sample_type = mask;
}
/* privileged levels capture (kernel, hv): check permissions */
- if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
- && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
+ ret = perf_allow_kernel(attr);
+ if (ret)
+ return ret;
+ }
}
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
@@ -10888,13 +11170,19 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
+ /* Do we allow access to perf_event_open(2) ? */
+ err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
+ if (err)
+ return err;
+
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
- if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ err = perf_allow_kernel(&attr);
+ if (err)
+ return err;
}
if (attr.namespaces) {
@@ -10911,9 +11199,11 @@ SYSCALL_DEFINE5(perf_event_open,
}
/* Only privileged users can get physical addresses */
- if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR) &&
- perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
+ err = perf_allow_kernel(&attr);
+ if (err)
+ return err;
+ }
err = security_locked_down(LOCKDOWN_PERF);
if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
@@ -11175,7 +11465,7 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
- if (event->attr.aux_output && !perf_get_aux_event(event, group_leader))
+ if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader))
goto err_locked;
/*
@@ -11323,8 +11613,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
int err;
/*
- * Get the target context (task or percpu):
+ * Grouping is not supported for kernel events, neither is 'AUX',
+ * make sure the caller's intentions are adjusted.
*/
+ if (attr->aux_output)
+ return ERR_PTR(-EINVAL);
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context, -1);
@@ -11336,6 +11629,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
/* Mark owner so we could distinguish it from user events. */
event->owner = TASK_TOMBSTONE;
+ /*
+ * Get the target context (task or percpu):
+ */
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -11787,7 +12083,7 @@ inherit_event(struct perf_event *parent_event,
GFP_KERNEL);
if (!child_ctx->task_ctx_data) {
free_event(child_event);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
}
@@ -11890,7 +12186,7 @@ static int inherit_group(struct perf_event *parent_event,
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
- if (sub->aux_event == parent_event &&
+ if (sub->aux_event == parent_event && child_ctr &&
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 3aef4191798c..747d67f130cb 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -50,6 +50,7 @@ struct ring_buffer {
unsigned long aux_mmap_locked;
void (*free_aux)(void *);
refcount_t aux_refcount;
+ int aux_in_sampling;
void **aux_pages;
void *aux_priv;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index ffb59a4ef4ff..7ffd5c763f93 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -562,6 +562,42 @@ void *perf_get_aux(struct perf_output_handle *handle)
}
EXPORT_SYMBOL_GPL(perf_get_aux);
+/*
+ * Copy out AUX data from an AUX handle.
+ */
+long perf_output_copy_aux(struct perf_output_handle *aux_handle,
+ struct perf_output_handle *handle,
+ unsigned long from, unsigned long to)
+{
+ unsigned long tocopy, remainder, len = 0;
+ struct ring_buffer *rb = aux_handle->rb;
+ void *addr;
+
+ from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
+ to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
+
+ do {
+ tocopy = PAGE_SIZE - offset_in_page(from);
+ if (to > from)
+ tocopy = min(tocopy, to - from);
+ if (!tocopy)
+ break;
+
+ addr = rb->aux_pages[from >> PAGE_SHIFT];
+ addr += offset_in_page(from);
+
+ remainder = perf_output_copy(handle, addr, tocopy);
+ if (remainder)
+ return -EFAULT;
+
+ len += tocopy;
+ from += tocopy;
+ from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
+ } while (to != from);
+
+ return len;
+}
+
#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
static struct page *rb_alloc_aux_page(int node, int order)
@@ -754,6 +790,14 @@ static void *perf_mmap_alloc_page(int cpu)
return page_address(page);
}
+static void perf_mmap_free_page(void *addr)
+{
+ struct page *page = virt_to_page(addr);
+
+ page->mapping = NULL;
+ __free_page(page);
+}
+
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
struct ring_buffer *rb;
@@ -788,9 +832,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
fail_data_pages:
for (i--; i >= 0; i--)
- free_page((unsigned long)rb->data_pages[i]);
+ perf_mmap_free_page(rb->data_pages[i]);
- free_page((unsigned long)rb->user_page);
+ perf_mmap_free_page(rb->user_page);
fail_user_page:
kfree(rb);
@@ -799,21 +843,13 @@ fail:
return NULL;
}
-static void perf_mmap_free_page(unsigned long addr)
-{
- struct page *page = virt_to_page((void *)addr);
-
- page->mapping = NULL;
- __free_page(page);
-}
-
void rb_free(struct ring_buffer *rb)
{
int i;
- perf_mmap_free_page((unsigned long)rb->user_page);
+ perf_mmap_free_page(rb->user_page);
for (i = 0; i < rb->nr_pages; i++)
- perf_mmap_free_page((unsigned long)rb->data_pages[i]);
+ perf_mmap_free_page(rb->data_pages[i]);
kfree(rb);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index a46a50d67002..0bac4b60d5f3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -437,7 +437,7 @@ static void exit_mm(void)
struct mm_struct *mm = current->mm;
struct core_state *core_state;
- mm_release(current, mm);
+ exit_mm_release(current, mm);
if (!mm)
return;
sync_mm_rss(mm);
@@ -746,32 +746,12 @@ void __noreturn do_exit(long code)
*/
if (unlikely(tsk->flags & PF_EXITING)) {
pr_alert("Fixing recursive fault but reboot is needed!\n");
- /*
- * We can do this unlocked here. The futex code uses
- * this flag just to verify whether the pi state
- * cleanup has been done or not. In the worst case it
- * loops once more. We pretend that the cleanup was
- * done as there is no way to return. Either the
- * OWNER_DIED bit is set by now or we push the blocked
- * task into the wait for ever nirwana as well.
- */
- tsk->flags |= PF_EXITPIDONE;
+ futex_exit_recursive(tsk);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
- /*
- * Ensure that all new tsk->pi_lock acquisitions must observe
- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
- */
- smp_mb();
- /*
- * Ensure that we must observe the pi_state in exit_mm() ->
- * mm_release() -> exit_pi_state_list().
- */
- raw_spin_lock_irq(&tsk->pi_lock);
- raw_spin_unlock_irq(&tsk->pi_lock);
if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
@@ -846,12 +826,6 @@ void __noreturn do_exit(long code)
* Make sure we are holding no locks:
*/
debug_check_no_locks_held();
- /*
- * We can do this unlocked here. The futex code uses this flag
- * just to verify whether the pi state cleanup has been done
- * or not. In the worst case it loops once more.
- */
- tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context(tsk);
@@ -1457,7 +1431,7 @@ repeat:
*/
wo->notask_error = -ECHILD;
if ((wo->wo_type < PIDTYPE_MAX) &&
- (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
+ (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
goto notask;
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/extable.c b/kernel/extable.c
index f6c9406eec7d..f6920a11e28a 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -56,6 +56,8 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
e = search_kernel_exception_table(addr);
if (!e)
e = search_module_extables(addr);
+ if (!e)
+ e = search_bpf_extables(addr);
return e;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 55af6931c6ec..00b64f41c2b4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1283,24 +1283,8 @@ static int wait_for_vfork_done(struct task_struct *child,
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
- /* Get rid of any futexes when releasing the mm */
-#ifdef CONFIG_FUTEX
- if (unlikely(tsk->robust_list)) {
- exit_robust_list(tsk);
- tsk->robust_list = NULL;
- }
-#ifdef CONFIG_COMPAT
- if (unlikely(tsk->compat_robust_list)) {
- compat_exit_robust_list(tsk);
- tsk->compat_robust_list = NULL;
- }
-#endif
- if (unlikely(!list_empty(&tsk->pi_state_list)))
- exit_pi_state_list(tsk);
-#endif
-
uprobe_free_utask(tsk);
/* Get rid of any cached register state */
@@ -1333,6 +1317,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
complete_vfork_done(tsk);
}
+void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
+{
+ futex_exit_release(tsk);
+ mm_release(tsk, mm);
+}
+
+void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
+{
+ futex_exec_release(tsk);
+ mm_release(tsk, mm);
+}
+
/**
* dup_mm() - duplicates an existing mm structure
* @tsk: the task_struct with which the new mm will be associated.
@@ -1517,6 +1513,11 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
spin_lock_irq(&current->sighand->siglock);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
spin_unlock_irq(&current->sighand->siglock);
+
+ /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
+ if (clone_flags & CLONE_CLEAR_SIGHAND)
+ flush_signal_handlers(tsk, 0);
+
return 0;
}
@@ -1695,12 +1696,68 @@ static int pidfd_release(struct inode *inode, struct file *file)
}
#ifdef CONFIG_PROC_FS
+/**
+ * pidfd_show_fdinfo - print information about a pidfd
+ * @m: proc fdinfo file
+ * @f: file referencing a pidfd
+ *
+ * Pid:
+ * This function will print the pid that a given pidfd refers to in the
+ * pid namespace of the procfs instance.
+ * If the pid namespace of the process is not a descendant of the pid
+ * namespace of the procfs instance 0 will be shown as its pid. This is
+ * similar to calling getppid() on a process whose parent is outside of
+ * its pid namespace.
+ *
+ * NSpid:
+ * If pid namespaces are supported then this function will also print
+ * the pid of a given pidfd refers to for all descendant pid namespaces
+ * starting from the current pid namespace of the instance, i.e. the
+ * Pid field and the first entry in the NSpid field will be identical.
+ * If the pid namespace of the process is not a descendant of the pid
+ * namespace of the procfs instance 0 will be shown as its first NSpid
+ * entry and no others will be shown.
+ * Note that this differs from the Pid and NSpid fields in
+ * /proc/<pid>/status where Pid and NSpid are always shown relative to
+ * the pid namespace of the procfs instance. The difference becomes
+ * obvious when sending around a pidfd between pid namespaces from a
+ * different branch of the tree, i.e. where no ancestoral relation is
+ * present between the pid namespaces:
+ * - create two new pid namespaces ns1 and ns2 in the initial pid
+ * namespace (also take care to create new mount namespaces in the
+ * new pid namespace and mount procfs)
+ * - create a process with a pidfd in ns1
+ * - send pidfd from ns1 to ns2
+ * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
+ * have exactly one entry, which is 0
+ */
static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
{
- struct pid_namespace *ns = proc_pid_ns(file_inode(m->file));
struct pid *pid = f->private_data;
+ struct pid_namespace *ns;
+ pid_t nr = -1;
+
+ if (likely(pid_has_task(pid, PIDTYPE_PID))) {
+ ns = proc_pid_ns(file_inode(m->file));
+ nr = pid_nr_ns(pid, ns);
+ }
- seq_put_decimal_ull(m, "Pid:\t", pid_nr_ns(pid, ns));
+ seq_put_decimal_ll(m, "Pid:\t", nr);
+
+#ifdef CONFIG_PID_NS
+ seq_put_decimal_ll(m, "\nNSpid:\t", nr);
+ if (nr > 0) {
+ int i;
+
+ /* If nr is non-zero it means that 'pid' is valid and that
+ * ns, i.e. the pid namespace associated with the procfs
+ * instance, is in the pid namespace hierarchy of pid.
+ * Start at one below the already printed level.
+ */
+ for (i = ns->level + 1; i <= pid->level; i++)
+ seq_put_decimal_ll(m, "\t", pid->numbers[i].nr);
+ }
+#endif
seq_putc(m, '\n');
}
#endif
@@ -1708,11 +1765,11 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
/*
* Poll support for process exit notification.
*/
-static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
{
struct task_struct *task;
struct pid *pid = file->private_data;
- int poll_flags = 0;
+ __poll_t poll_flags = 0;
poll_wait(file, &pid->wait_pidfd, pts);
@@ -1724,7 +1781,7 @@ static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
* group, then poll(2) should block, similar to the wait(2) family.
*/
if (!task || (task->exit_state && thread_group_empty(task)))
- poll_flags = POLLIN | POLLRDNORM;
+ poll_flags = EPOLLIN | EPOLLRDNORM;
rcu_read_unlock();
return poll_flags;
@@ -2026,7 +2083,8 @@ static __latent_entropy struct task_struct *copy_process(
stackleak_task_init(p);
if (pid != &init_struct_pid) {
- pid = alloc_pid(p->nsproxy->pid_ns_for_children);
+ pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
+ args->set_tid_size);
if (IS_ERR(pid)) {
retval = PTR_ERR(pid);
goto bad_fork_cleanup_thread;
@@ -2062,14 +2120,8 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
-#ifdef CONFIG_FUTEX
- p->robust_list = NULL;
-#ifdef CONFIG_COMPAT
- p->compat_robust_list = NULL;
-#endif
- INIT_LIST_HEAD(&p->pi_state_list);
- p->pi_state_cache = NULL;
-#endif
+ futex_init_task(p);
+
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -2529,6 +2581,7 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
{
int err;
struct clone_args args;
+ pid_t *kset_tid = kargs->set_tid;
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
@@ -2539,6 +2592,15 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
if (err)
return err;
+ if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
+ return -EINVAL;
+
+ if (unlikely(!args.set_tid && args.set_tid_size > 0))
+ return -EINVAL;
+
+ if (unlikely(args.set_tid && args.set_tid_size == 0))
+ return -EINVAL;
+
/*
* Verify that higher 32bits of exit_signal are unset and that
* it is a valid signal
@@ -2556,8 +2618,16 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
.stack = args.stack,
.stack_size = args.stack_size,
.tls = args.tls,
+ .set_tid_size = args.set_tid_size,
};
+ if (args.set_tid &&
+ copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
+ (kargs->set_tid_size * sizeof(pid_t))))
+ return -EFAULT;
+
+ kargs->set_tid = kset_tid;
+
return 0;
}
@@ -2591,11 +2661,8 @@ static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
static bool clone3_args_valid(struct kernel_clone_args *kargs)
{
- /*
- * All lower bits of the flag word are taken.
- * Verify that no other unknown flags are passed along.
- */
- if (kargs->flags & ~CLONE_LEGACY_FLAGS)
+ /* Verify that no unknown flags are passed along. */
+ if (kargs->flags & ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND))
return false;
/*
@@ -2605,6 +2672,10 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs)
if (kargs->flags & (CLONE_DETACHED | CSIGNAL))
return false;
+ if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
+ (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
+ return false;
+
if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
kargs->exit_signal)
return false;
@@ -2631,6 +2702,9 @@ SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
int err;
struct kernel_clone_args kargs;
+ pid_t set_tid[MAX_PID_NS_LEVEL];
+
+ kargs.set_tid = set_tid;
err = copy_clone_args_from_user(&kargs, uargs, size);
if (err)
diff --git a/kernel/futex.c b/kernel/futex.c
index bd18f60e4c6c..03c518e9747e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -325,6 +325,12 @@ static inline bool should_fail_futex(bool fshared)
}
#endif /* CONFIG_FAIL_FUTEX */
+#ifdef CONFIG_COMPAT
+static void compat_exit_robust_list(struct task_struct *curr);
+#else
+static inline void compat_exit_robust_list(struct task_struct *curr) { }
+#endif
+
static inline void futex_get_mm(union futex_key *key)
{
mmgrab(key->private.mm);
@@ -890,7 +896,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
-void exit_pi_state_list(struct task_struct *curr)
+static void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
@@ -960,7 +966,8 @@ void exit_pi_state_list(struct task_struct *curr)
}
raw_spin_unlock_irq(&curr->pi_lock);
}
-
+#else
+static inline void exit_pi_state_list(struct task_struct *curr) { }
#endif
/*
@@ -1169,16 +1176,47 @@ out_error:
return ret;
}
+/**
+ * wait_for_owner_exiting - Block until the owner has exited
+ * @exiting: Pointer to the exiting task
+ *
+ * Caller must hold a refcount on @exiting.
+ */
+static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
+{
+ if (ret != -EBUSY) {
+ WARN_ON_ONCE(exiting);
+ return;
+ }
+
+ if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
+ return;
+
+ mutex_lock(&exiting->futex_exit_mutex);
+ /*
+ * No point in doing state checking here. If the waiter got here
+ * while the task was in exec()->exec_futex_release() then it can
+ * have any FUTEX_STATE_* value when the waiter has acquired the
+ * mutex. OK, if running, EXITING or DEAD if it reached exit()
+ * already. Highly unlikely and not a problem. Just one more round
+ * through the futex maze.
+ */
+ mutex_unlock(&exiting->futex_exit_mutex);
+
+ put_task_struct(exiting);
+}
+
static int handle_exit_race(u32 __user *uaddr, u32 uval,
struct task_struct *tsk)
{
u32 uval2;
/*
- * If PF_EXITPIDONE is not yet set, then try again.
+ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
+ * caller that the alleged owner is busy.
*/
- if (tsk && !(tsk->flags & PF_EXITPIDONE))
- return -EAGAIN;
+ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
+ return -EBUSY;
/*
* Reread the user space value to handle the following situation:
@@ -1196,8 +1234,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
* *uaddr = 0xC0000000; tsk = get_task(PID);
* } if (!tsk->flags & PF_EXITING) {
* ... attach();
- * tsk->flags |= PF_EXITPIDONE; } else {
- * if (!(tsk->flags & PF_EXITPIDONE))
+ * tsk->futex_state = } else {
+ * FUTEX_STATE_DEAD; if (tsk->futex_state !=
+ * FUTEX_STATE_DEAD)
* return -EAGAIN;
* return -ESRCH; <--- FAIL
* }
@@ -1228,7 +1267,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
* it after doing proper sanity checks.
*/
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
- struct futex_pi_state **ps)
+ struct futex_pi_state **ps,
+ struct task_struct **exiting)
{
pid_t pid = uval & FUTEX_TID_MASK;
struct futex_pi_state *pi_state;
@@ -1253,22 +1293,33 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
}
/*
- * We need to look at the task state flags to figure out,
- * whether the task is exiting. To protect against the do_exit
- * change of the task flags, we do this protected by
- * p->pi_lock:
+ * We need to look at the task state to figure out, whether the
+ * task is exiting. To protect against the change of the task state
+ * in futex_exit_release(), we do this protected by p->pi_lock:
*/
raw_spin_lock_irq(&p->pi_lock);
- if (unlikely(p->flags & PF_EXITING)) {
+ if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
/*
- * The task is on the way out. When PF_EXITPIDONE is
- * set, we know that the task has finished the
- * cleanup:
+ * The task is on the way out. When the futex state is
+ * FUTEX_STATE_DEAD, we know that the task has finished
+ * the cleanup:
*/
int ret = handle_exit_race(uaddr, uval, p);
raw_spin_unlock_irq(&p->pi_lock);
- put_task_struct(p);
+ /*
+ * If the owner task is between FUTEX_STATE_EXITING and
+ * FUTEX_STATE_DEAD then store the task pointer and keep
+ * the reference on the task struct. The calling code will
+ * drop all locks, wait for the task to reach
+ * FUTEX_STATE_DEAD and then drop the refcount. This is
+ * required to prevent a live lock when the current task
+ * preempted the exiting task between the two states.
+ */
+ if (ret == -EBUSY)
+ *exiting = p;
+ else
+ put_task_struct(p);
return ret;
}
@@ -1307,7 +1358,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
struct futex_hash_bucket *hb,
- union futex_key *key, struct futex_pi_state **ps)
+ union futex_key *key, struct futex_pi_state **ps,
+ struct task_struct **exiting)
{
struct futex_q *top_waiter = futex_top_waiter(hb, key);
@@ -1322,7 +1374,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
* We are the first waiter - try to look up the owner based on
* @uval and attach to it.
*/
- return attach_to_pi_owner(uaddr, uval, key, ps);
+ return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
}
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1350,6 +1402,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
* lookup
* @task: the task to perform the atomic lock work for. This will
* be "current" except in the case of requeue pi.
+ * @exiting: Pointer to store the task pointer of the owner task
+ * which is in the middle of exiting
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Return:
@@ -1358,11 +1412,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
* - <0 - error
*
* The hb->lock and futex_key refs shall be held by the caller.
+ *
+ * @exiting is only set when the return value is -EBUSY. If so, this holds
+ * a refcount on the exiting task on return and the caller needs to drop it
+ * after waiting for the exit to complete.
*/
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
- struct task_struct *task, int set_waiters)
+ struct task_struct *task,
+ struct task_struct **exiting,
+ int set_waiters)
{
u32 uval, newval, vpid = task_pid_vnr(task);
struct futex_q *top_waiter;
@@ -1432,7 +1492,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable.
*/
- return attach_to_pi_owner(uaddr, newval, key, ps);
+ return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
}
/**
@@ -1480,7 +1540,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
/*
* Queue the task for later wakeup for after we've released
- * the hb->lock. wake_q_add() grabs reference to p.
+ * the hb->lock.
*/
wake_q_add_safe(wake_q, p);
}
@@ -1850,6 +1910,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
* @key1: the from futex key
* @key2: the to futex key
* @ps: address to store the pi_state pointer
+ * @exiting: Pointer to store the task pointer of the owner task
+ * which is in the middle of exiting
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Try and get the lock on behalf of the top waiter if we can do it atomically.
@@ -1857,16 +1919,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
* hb1 and hb2 must be held by the caller.
*
+ * @exiting is only set when the return value is -EBUSY. If so, this holds
+ * a refcount on the exiting task on return and the caller needs to drop it
+ * after waiting for the exit to complete.
+ *
* Return:
* - 0 - failed to acquire the lock atomically;
* - >0 - acquired the lock, return value is vpid of the top_waiter
* - <0 - error
*/
-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
- struct futex_hash_bucket *hb1,
- struct futex_hash_bucket *hb2,
- union futex_key *key1, union futex_key *key2,
- struct futex_pi_state **ps, int set_waiters)
+static int
+futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
+ struct futex_hash_bucket *hb2, union futex_key *key1,
+ union futex_key *key2, struct futex_pi_state **ps,
+ struct task_struct **exiting, int set_waiters)
{
struct futex_q *top_waiter = NULL;
u32 curval;
@@ -1903,7 +1969,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
*/
vpid = task_pid_vnr(top_waiter->task);
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
- set_waiters);
+ exiting, set_waiters);
if (ret == 1) {
requeue_pi_wake_futex(top_waiter, key2, hb2);
return vpid;
@@ -2032,6 +2098,8 @@ retry_private:
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
+ struct task_struct *exiting = NULL;
+
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
@@ -2039,7 +2107,8 @@ retry_private:
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
- &key2, &pi_state, nr_requeue);
+ &key2, &pi_state,
+ &exiting, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
@@ -2066,7 +2135,8 @@ retry_private:
* If that call succeeds then we have pi_state and an
* initial refcount on it.
*/
- ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
+ ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
+ &pi_state, &exiting);
}
switch (ret) {
@@ -2084,17 +2154,24 @@ retry_private:
if (!ret)
goto retry;
goto out;
+ case -EBUSY:
case -EAGAIN:
/*
* Two reasons for this:
- * - Owner is exiting and we just wait for the
+ * - EBUSY: Owner is exiting and we just wait for the
* exit to complete.
- * - The user space value changed.
+ * - EAGAIN: The user space value changed.
*/
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
+ /*
+ * Handle the case where the owner is in the middle of
+ * exiting. Wait for the exit to complete otherwise
+ * this task might loop forever, aka. live lock.
+ */
+ wait_for_owner_exiting(ret, exiting);
cond_resched();
goto retry;
default:
@@ -2801,6 +2878,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
{
struct hrtimer_sleeper timeout, *to;
struct futex_pi_state *pi_state = NULL;
+ struct task_struct *exiting = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
@@ -2822,7 +2900,8 @@ retry:
retry_private:
hb = queue_lock(&q);
- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
+ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
+ &exiting, 0);
if (unlikely(ret)) {
/*
* Atomic work succeeded and we got the lock,
@@ -2835,15 +2914,22 @@ retry_private:
goto out_unlock_put_key;
case -EFAULT:
goto uaddr_faulted;
+ case -EBUSY:
case -EAGAIN:
/*
* Two reasons for this:
- * - Task is exiting and we just wait for the
+ * - EBUSY: Task is exiting and we just wait for the
* exit to complete.
- * - The user space value changed.
+ * - EAGAIN: The user space value changed.
*/
queue_unlock(hb);
put_futex_key(&q.key);
+ /*
+ * Handle the case where the owner is in the middle of
+ * exiting. Wait for the exit to complete otherwise
+ * this task might loop forever, aka. live lock.
+ */
+ wait_for_owner_exiting(ret, exiting);
cond_resched();
goto retry;
default:
@@ -3452,11 +3538,16 @@ err_unlock:
return ret;
}
+/* Constants for the pending_op argument of handle_futex_death */
+#define HANDLE_DEATH_PENDING true
+#define HANDLE_DEATH_LIST false
+
/*
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
-static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
+static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
+ bool pi, bool pending_op)
{
u32 uval, uninitialized_var(nval), mval;
int err;
@@ -3469,6 +3560,42 @@ retry:
if (get_user(uval, uaddr))
return -1;
+ /*
+ * Special case for regular (non PI) futexes. The unlock path in
+ * user space has two race scenarios:
+ *
+ * 1. The unlock path releases the user space futex value and
+ * before it can execute the futex() syscall to wake up
+ * waiters it is killed.
+ *
+ * 2. A woken up waiter is killed before it can acquire the
+ * futex in user space.
+ *
+ * In both cases the TID validation below prevents a wakeup of
+ * potential waiters which can cause these waiters to block
+ * forever.
+ *
+ * In both cases the following conditions are met:
+ *
+ * 1) task->robust_list->list_op_pending != NULL
+ * @pending_op == true
+ * 2) User space futex value == 0
+ * 3) Regular futex: @pi == false
+ *
+ * If these conditions are met, it is safe to attempt waking up a
+ * potential waiter without touching the user space futex value and
+ * trying to set the OWNER_DIED bit. The user space futex value is
+ * uncontended and the rest of the user space mutex state is
+ * consistent, so a woken waiter will just take over the
+ * uncontended futex. Setting the OWNER_DIED bit would create
+ * inconsistent state and malfunction of the user space owner died
+ * handling.
+ */
+ if (pending_op && !pi && !uval) {
+ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+ return 0;
+ }
+
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
return 0;
@@ -3547,7 +3674,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
*
* We silently return on any sign of list-walking problem.
*/
-void exit_robust_list(struct task_struct *curr)
+static void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
@@ -3588,10 +3715,11 @@ void exit_robust_list(struct task_struct *curr)
* A pending lock might already be on the list, so
* don't process it twice:
*/
- if (entry != pending)
+ if (entry != pending) {
if (handle_futex_death((void __user *)entry + futex_offset,
- curr, pi))
+ curr, pi, HANDLE_DEATH_LIST))
return;
+ }
if (rc)
return;
entry = next_entry;
@@ -3605,9 +3733,118 @@ void exit_robust_list(struct task_struct *curr)
cond_resched();
}
- if (pending)
+ if (pending) {
handle_futex_death((void __user *)pending + futex_offset,
- curr, pip);
+ curr, pip, HANDLE_DEATH_PENDING);
+ }
+}
+
+static void futex_cleanup(struct task_struct *tsk)
+{
+ if (unlikely(tsk->robust_list)) {
+ exit_robust_list(tsk);
+ tsk->robust_list = NULL;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (unlikely(tsk->compat_robust_list)) {
+ compat_exit_robust_list(tsk);
+ tsk->compat_robust_list = NULL;
+ }
+#endif
+
+ if (unlikely(!list_empty(&tsk->pi_state_list)))
+ exit_pi_state_list(tsk);
+}
+
+/**
+ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
+ * @tsk: task to set the state on
+ *
+ * Set the futex exit state of the task lockless. The futex waiter code
+ * observes that state when a task is exiting and loops until the task has
+ * actually finished the futex cleanup. The worst case for this is that the
+ * waiter runs through the wait loop until the state becomes visible.
+ *
+ * This is called from the recursive fault handling path in do_exit().
+ *
+ * This is best effort. Either the futex exit code has run already or
+ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
+ * take it over. If not, the problem is pushed back to user space. If the
+ * futex exit code did not run yet, then an already queued waiter might
+ * block forever, but there is nothing which can be done about that.
+ */
+void futex_exit_recursive(struct task_struct *tsk)
+{
+ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
+ if (tsk->futex_state == FUTEX_STATE_EXITING)
+ mutex_unlock(&tsk->futex_exit_mutex);
+ tsk->futex_state = FUTEX_STATE_DEAD;
+}
+
+static void futex_cleanup_begin(struct task_struct *tsk)
+{
+ /*
+ * Prevent various race issues against a concurrent incoming waiter
+ * including live locks by forcing the waiter to block on
+ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
+ * attach_to_pi_owner().
+ */
+ mutex_lock(&tsk->futex_exit_mutex);
+
+ /*
+ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
+ *
+ * This ensures that all subsequent checks of tsk->futex_state in
+ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
+ * tsk->pi_lock held.
+ *
+ * It guarantees also that a pi_state which was queued right before
+ * the state change under tsk->pi_lock by a concurrent waiter must
+ * be observed in exit_pi_state_list().
+ */
+ raw_spin_lock_irq(&tsk->pi_lock);
+ tsk->futex_state = FUTEX_STATE_EXITING;
+ raw_spin_unlock_irq(&tsk->pi_lock);
+}
+
+static void futex_cleanup_end(struct task_struct *tsk, int state)
+{
+ /*
+ * Lockless store. The only side effect is that an observer might
+ * take another loop until it becomes visible.
+ */
+ tsk->futex_state = state;
+ /*
+ * Drop the exit protection. This unblocks waiters which observed
+ * FUTEX_STATE_EXITING to reevaluate the state.
+ */
+ mutex_unlock(&tsk->futex_exit_mutex);
+}
+
+void futex_exec_release(struct task_struct *tsk)
+{
+ /*
+ * The state handling is done for consistency, but in the case of
+ * exec() there is no way to prevent futher damage as the PID stays
+ * the same. But for the unlikely and arguably buggy case that a
+ * futex is held on exec(), this provides at least as much state
+ * consistency protection which is possible.
+ */
+ futex_cleanup_begin(tsk);
+ futex_cleanup(tsk);
+ /*
+ * Reset the state to FUTEX_STATE_OK. The task is alive and about
+ * exec a new binary.
+ */
+ futex_cleanup_end(tsk, FUTEX_STATE_OK);
+}
+
+void futex_exit_release(struct task_struct *tsk)
+{
+ futex_cleanup_begin(tsk);
+ futex_cleanup(tsk);
+ futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
@@ -3737,7 +3974,7 @@ static void __user *futex_uaddr(struct robust_list __user *entry,
*
* We silently return on any sign of list-walking problem.
*/
-void compat_exit_robust_list(struct task_struct *curr)
+static void compat_exit_robust_list(struct task_struct *curr)
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *next_entry, *pending;
@@ -3784,7 +4021,8 @@ void compat_exit_robust_list(struct task_struct *curr)
if (entry != pending) {
void __user *uaddr = futex_uaddr(entry, futex_offset);
- if (handle_futex_death(uaddr, curr, pi))
+ if (handle_futex_death(uaddr, curr, pi,
+ HANDLE_DEATH_LIST))
return;
}
if (rc)
@@ -3803,7 +4041,7 @@ void compat_exit_robust_list(struct task_struct *curr)
if (pending) {
void __user *uaddr = futex_uaddr(pending, futex_offset);
- handle_futex_death(uaddr, curr, pip);
+ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
}
}
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index cf9b5bcdb952..cf03d4bdfc66 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_LIVEPATCH) += livepatch.o
-livepatch-objs := core.o patch.o shadow.o transition.o
+livepatch-objs := core.o patch.o shadow.o state.o transition.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index ab4a4606d19b..c3512e7e0801 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -22,6 +22,7 @@
#include <asm/cacheflush.h>
#include "core.h"
#include "patch.h"
+#include "state.h"
#include "transition.h"
/*
@@ -632,7 +633,7 @@ static void klp_free_objects_dynamic(struct klp_patch *patch)
* The operation must be completed by calling klp_free_patch_finish()
* outside klp_mutex.
*/
-void klp_free_patch_start(struct klp_patch *patch)
+static void klp_free_patch_start(struct klp_patch *patch)
{
if (!list_empty(&patch->list))
list_del(&patch->list);
@@ -677,6 +678,23 @@ static void klp_free_patch_work_fn(struct work_struct *work)
klp_free_patch_finish(patch);
}
+void klp_free_patch_async(struct klp_patch *patch)
+{
+ klp_free_patch_start(patch);
+ schedule_work(&patch->free_work);
+}
+
+void klp_free_replaced_patches_async(struct klp_patch *new_patch)
+{
+ struct klp_patch *old_patch, *tmp_patch;
+
+ klp_for_each_patch_safe(old_patch, tmp_patch) {
+ if (old_patch == new_patch)
+ return;
+ klp_free_patch_async(old_patch);
+ }
+}
+
static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{
if (!func->old_name)
@@ -992,6 +1010,13 @@ int klp_enable_patch(struct klp_patch *patch)
mutex_lock(&klp_mutex);
+ if (!klp_is_patch_compatible(patch)) {
+ pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
+ patch->mod->name);
+ mutex_unlock(&klp_mutex);
+ return -EINVAL;
+ }
+
ret = klp_init_patch_early(patch);
if (ret) {
mutex_unlock(&klp_mutex);
@@ -1022,12 +1047,13 @@ err:
EXPORT_SYMBOL_GPL(klp_enable_patch);
/*
- * This function removes replaced patches.
+ * This function unpatches objects from the replaced livepatches.
*
* We could be pretty aggressive here. It is called in the situation where
- * these structures are no longer accessible. All functions are redirected
- * by the klp_transition_patch. They use either a new code or they are in
- * the original code because of the special nop function patches.
+ * these structures are no longer accessed from the ftrace handler.
+ * All functions are redirected by the klp_transition_patch. They
+ * use either a new code or they are in the original code because
+ * of the special nop function patches.
*
* The only exception is when the transition was forced. In this case,
* klp_ftrace_handler() might still see the replaced patch on the stack.
@@ -1035,18 +1061,16 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
* thanks to RCU. We only have to keep the patches on the system. Also
* this is handled transparently by patch->module_put.
*/
-void klp_discard_replaced_patches(struct klp_patch *new_patch)
+void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
{
- struct klp_patch *old_patch, *tmp_patch;
+ struct klp_patch *old_patch;
- klp_for_each_patch_safe(old_patch, tmp_patch) {
+ klp_for_each_patch(old_patch) {
if (old_patch == new_patch)
return;
old_patch->enabled = false;
klp_unpatch_objects(old_patch);
- klp_free_patch_start(old_patch);
- schedule_work(&old_patch->free_work);
}
}
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
index ec43a40b853f..38209c7361b6 100644
--- a/kernel/livepatch/core.h
+++ b/kernel/livepatch/core.h
@@ -13,8 +13,9 @@ extern struct list_head klp_patches;
#define klp_for_each_patch(patch) \
list_for_each_entry(patch, &klp_patches, list)
-void klp_free_patch_start(struct klp_patch *patch);
-void klp_discard_replaced_patches(struct klp_patch *new_patch);
+void klp_free_patch_async(struct klp_patch *patch);
+void klp_free_replaced_patches_async(struct klp_patch *new_patch);
+void klp_unpatch_replaced_patches(struct klp_patch *new_patch);
void klp_discard_nops(struct klp_patch *new_patch);
static inline bool klp_is_object_loaded(struct klp_object *obj)
diff --git a/kernel/livepatch/state.c b/kernel/livepatch/state.c
new file mode 100644
index 000000000000..7ee19476de9d
--- /dev/null
+++ b/kernel/livepatch/state.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * system_state.c - State of the system modified by livepatches
+ *
+ * Copyright (C) 2019 SUSE
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/livepatch.h>
+#include "core.h"
+#include "state.h"
+#include "transition.h"
+
+#define klp_for_each_state(patch, state) \
+ for (state = patch->states; state && state->id; state++)
+
+/**
+ * klp_get_state() - get information about system state modified by
+ * the given patch
+ * @patch: livepatch that modifies the given system state
+ * @id: custom identifier of the modified system state
+ *
+ * Checks whether the given patch modifies the given system state.
+ *
+ * The function can be called either from pre/post (un)patch
+ * callbacks or from the kernel code added by the livepatch.
+ *
+ * Return: pointer to struct klp_state when found, otherwise NULL.
+ */
+struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id)
+{
+ struct klp_state *state;
+
+ klp_for_each_state(patch, state) {
+ if (state->id == id)
+ return state;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(klp_get_state);
+
+/**
+ * klp_get_prev_state() - get information about system state modified by
+ * the already installed livepatches
+ * @id: custom identifier of the modified system state
+ *
+ * Checks whether already installed livepatches modify the given
+ * system state.
+ *
+ * The same system state can be modified by more non-cumulative
+ * livepatches. It is expected that the latest livepatch has
+ * the most up-to-date information.
+ *
+ * The function can be called only during transition when a new
+ * livepatch is being enabled or when such a transition is reverted.
+ * It is typically called only from from pre/post (un)patch
+ * callbacks.
+ *
+ * Return: pointer to the latest struct klp_state from already
+ * installed livepatches, NULL when not found.
+ */
+struct klp_state *klp_get_prev_state(unsigned long id)
+{
+ struct klp_patch *patch;
+ struct klp_state *state, *last_state = NULL;
+
+ if (WARN_ON_ONCE(!klp_transition_patch))
+ return NULL;
+
+ klp_for_each_patch(patch) {
+ if (patch == klp_transition_patch)
+ goto out;
+
+ state = klp_get_state(patch, id);
+ if (state)
+ last_state = state;
+ }
+
+out:
+ return last_state;
+}
+EXPORT_SYMBOL_GPL(klp_get_prev_state);
+
+/* Check if the patch is able to deal with the existing system state. */
+static bool klp_is_state_compatible(struct klp_patch *patch,
+ struct klp_state *old_state)
+{
+ struct klp_state *state;
+
+ state = klp_get_state(patch, old_state->id);
+
+ /* A cumulative livepatch must handle all already modified states. */
+ if (!state)
+ return !patch->replace;
+
+ return state->version >= old_state->version;
+}
+
+/*
+ * Check that the new livepatch will not break the existing system states.
+ * Cumulative patches must handle all already modified states.
+ * Non-cumulative patches can touch already modified states.
+ */
+bool klp_is_patch_compatible(struct klp_patch *patch)
+{
+ struct klp_patch *old_patch;
+ struct klp_state *old_state;
+
+ klp_for_each_patch(old_patch) {
+ klp_for_each_state(old_patch, old_state) {
+ if (!klp_is_state_compatible(patch, old_state))
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/kernel/livepatch/state.h b/kernel/livepatch/state.h
new file mode 100644
index 000000000000..49d9c16e8762
--- /dev/null
+++ b/kernel/livepatch/state.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LIVEPATCH_STATE_H
+#define _LIVEPATCH_STATE_H
+
+#include <linux/livepatch.h>
+
+bool klp_is_patch_compatible(struct klp_patch *patch);
+
+#endif /* _LIVEPATCH_STATE_H */
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index cdf318d86dd6..f6310f848f34 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -78,7 +78,7 @@ static void klp_complete_transition(void)
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
- klp_discard_replaced_patches(klp_transition_patch);
+ klp_unpatch_replaced_patches(klp_transition_patch);
klp_discard_nops(klp_transition_patch);
}
@@ -446,14 +446,14 @@ void klp_try_complete_transition(void)
klp_complete_transition();
/*
- * It would make more sense to free the patch in
+ * It would make more sense to free the unused patches in
* klp_complete_transition() but it is called also
* from klp_cancel_transition().
*/
- if (!patch->enabled) {
- klp_free_patch_start(patch);
- schedule_work(&patch->free_work);
- }
+ if (!patch->enabled)
+ klp_free_patch_async(patch);
+ else if (patch->replace)
+ klp_free_replaced_patches_async(patch);
}
/*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 233459c03b5a..32282e7112d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4208,11 +4208,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
}
/*
- * Remove the lock to the list of currently held locks - this gets
+ * Remove the lock from the list of currently held locks - this gets
* called on mutex_unlock()/spin_unlock*() (or on a failed
* mutex_lock_interruptible()).
- *
- * @nested is an hysterical artifact, needs a tree wide cleanup.
*/
static int
__lock_release(struct lockdep_map *lock, unsigned long ip)
@@ -4491,8 +4489,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
EXPORT_SYMBOL_GPL(lock_acquire);
-void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip)
+void lock_release(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index c513031cd7e3..99475a66c94f 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -16,7 +16,6 @@
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
@@ -889,16 +888,16 @@ static int __init lock_torture_init(void)
cxt.nrealwriters_stress = 2 * num_online_cpus();
#ifdef CONFIG_DEBUG_MUTEXES
- if (strncmp(torture_type, "mutex", 5) == 0)
+ if (str_has_prefix(torture_type, "mutex"))
cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_RT_MUTEXES
- if (strncmp(torture_type, "rtmutex", 7) == 0)
+ if (str_has_prefix(torture_type, "rtmutex"))
cxt.debug_lock = true;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- if ((strncmp(torture_type, "spin", 4) == 0) ||
- (strncmp(torture_type, "rw_lock", 7) == 0))
+ if ((str_has_prefix(torture_type, "spin")) ||
+ (str_has_prefix(torture_type, "rw_lock")))
cxt.debug_lock = true;
#endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 468a9b8422e3..54cc5f9286e9 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -733,6 +733,9 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
*/
void __sched mutex_unlock(struct mutex *lock)
{
+#ifdef CONFIG_DEBUG_MUTEXES
+ WARN_ON(in_interrupt());
+#endif
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
@@ -1091,7 +1094,7 @@ err:
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
- mutex_release(&lock->dep_map, 1, ip);
+ mutex_release(&lock->dep_map, ip);
preempt_enable();
return ret;
}
@@ -1225,7 +1228,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
DEFINE_WAKE_Q(wake_q);
unsigned long owner;
- mutex_release(&lock->dep_map, 1, ip);
+ mutex_release(&lock->dep_map, ip);
/*
* Release the lock before (potentially) taking the spinlock such that
@@ -1413,6 +1416,7 @@ int __sched mutex_trylock(struct mutex *lock)
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
+ WARN_ON(in_interrupt());
#endif
locked = __mutex_trylock(lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2874bf556162..851bbb10819d 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1517,7 +1517,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
if (ret)
- mutex_release(&lock->dep_map, 1, _RET_IP_);
+ mutex_release(&lock->dep_map, _RET_IP_);
return ret;
}
@@ -1561,7 +1561,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
RT_MUTEX_MIN_CHAINWALK,
rt_mutex_slowlock);
if (ret)
- mutex_release(&lock->dep_map, 1, _RET_IP_);
+ mutex_release(&lock->dep_map, _RET_IP_);
return ret;
}
@@ -1600,7 +1600,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
*/
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
- mutex_release(&lock->dep_map, 1, _RET_IP_);
+ mutex_release(&lock->dep_map, _RET_IP_);
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index eef04551eae7..44e68761f432 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1504,7 +1504,7 @@ int __sched down_read_killable(struct rw_semaphore *sem)
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
@@ -1546,7 +1546,7 @@ int __sched down_write_killable(struct rw_semaphore *sem)
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
@@ -1573,7 +1573,7 @@ EXPORT_SYMBOL(down_write_trylock);
*/
void up_read(struct rw_semaphore *sem)
{
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
__up_read(sem);
}
EXPORT_SYMBOL(up_read);
@@ -1583,7 +1583,7 @@ EXPORT_SYMBOL(up_read);
*/
void up_write(struct rw_semaphore *sem)
{
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
__up_write(sem);
}
EXPORT_SYMBOL(up_write);
@@ -1639,7 +1639,7 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
- rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ rwsem_release(&sem->dep_map, _RET_IP_);
return -EINTR;
}
diff --git a/kernel/module.c b/kernel/module.c
index ff2d7359a418..acf7962936c4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3222,7 +3222,7 @@ static int find_module_sections(struct module *mod, struct load_info *info)
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
- mod->ftrace_callsites = section_objs(info, "__mcount_loc",
+ mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
sizeof(*mod->ftrace_callsites),
&mod->num_ftrace_callsites);
#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index f470a038b05b..b69ee9e76cb2 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -671,17 +671,6 @@ EXPORT_SYMBOL(__stack_chk_fail);
#endif
-#ifdef CONFIG_ARCH_HAS_REFCOUNT
-void refcount_error_report(struct pt_regs *regs, const char *err)
-{
- WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n",
- err, (void *)instruction_pointer(regs),
- current->comm, task_pid_nr(current),
- from_kuid_munged(&init_user_ns, current_uid()),
- from_kuid_munged(&init_user_ns, current_euid()));
-}
-#endif
-
core_param(panic, panic_timeout, int, 0644);
core_param(panic_print, panic_print, ulong, 0644);
core_param(pause_on_oops, pause_on_oops, int, 0644);
diff --git a/kernel/pid.c b/kernel/pid.c
index 0a9f2e437217..2278e249141d 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -157,7 +157,8 @@ void free_pid(struct pid *pid)
call_rcu(&pid->rcu, delayed_put_pid);
}
-struct pid *alloc_pid(struct pid_namespace *ns)
+struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
+ size_t set_tid_size)
{
struct pid *pid;
enum pid_type type;
@@ -166,6 +167,17 @@ struct pid *alloc_pid(struct pid_namespace *ns)
struct upid *upid;
int retval = -ENOMEM;
+ /*
+ * set_tid_size contains the size of the set_tid array. Starting at
+ * the most nested currently active PID namespace it tells alloc_pid()
+ * which PID to set for a process in that most nested PID namespace
+ * up to set_tid_size PID namespaces. It does not have to set the PID
+ * for a process in all nested PID namespaces but set_tid_size must
+ * never be greater than the current ns->level + 1.
+ */
+ if (set_tid_size > ns->level + 1)
+ return ERR_PTR(-EINVAL);
+
pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
if (!pid)
return ERR_PTR(retval);
@@ -174,24 +186,54 @@ struct pid *alloc_pid(struct pid_namespace *ns)
pid->level = ns->level;
for (i = ns->level; i >= 0; i--) {
- int pid_min = 1;
+ int tid = 0;
+
+ if (set_tid_size) {
+ tid = set_tid[ns->level - i];
+
+ retval = -EINVAL;
+ if (tid < 1 || tid >= pid_max)
+ goto out_free;
+ /*
+ * Also fail if a PID != 1 is requested and
+ * no PID 1 exists.
+ */
+ if (tid != 1 && !tmp->child_reaper)
+ goto out_free;
+ retval = -EPERM;
+ if (!ns_capable(tmp->user_ns, CAP_SYS_ADMIN))
+ goto out_free;
+ set_tid_size--;
+ }
idr_preload(GFP_KERNEL);
spin_lock_irq(&pidmap_lock);
- /*
- * init really needs pid 1, but after reaching the maximum
- * wrap back to RESERVED_PIDS
- */
- if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
- pid_min = RESERVED_PIDS;
-
- /*
- * Store a null pointer so find_pid_ns does not find
- * a partially initialized PID (see below).
- */
- nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
- pid_max, GFP_ATOMIC);
+ if (tid) {
+ nr = idr_alloc(&tmp->idr, NULL, tid,
+ tid + 1, GFP_ATOMIC);
+ /*
+ * If ENOSPC is returned it means that the PID is
+ * alreay in use. Return EEXIST in that case.
+ */
+ if (nr == -ENOSPC)
+ nr = -EEXIST;
+ } else {
+ int pid_min = 1;
+ /*
+ * init really needs pid 1, but after reaching the
+ * maximum wrap back to RESERVED_PIDS
+ */
+ if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
+ pid_min = RESERVED_PIDS;
+
+ /*
+ * Store a null pointer so find_pid_ns does not find
+ * a partially initialized PID (see below).
+ */
+ nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
+ pid_max, GFP_ATOMIC);
+ }
spin_unlock_irq(&pidmap_lock);
idr_preload_end();
@@ -299,7 +341,7 @@ static void __change_pid(struct task_struct *task, enum pid_type type,
*pid_ptr = new;
for (tmp = PIDTYPE_MAX; --tmp >= 0; )
- if (!hlist_empty(&pid->tasks[tmp]))
+ if (pid_has_task(pid, tmp))
return;
free_pid(pid);
@@ -497,7 +539,7 @@ static int pidfd_create(struct pid *pid)
*/
SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
{
- int fd, ret;
+ int fd;
struct pid *p;
if (flags)
@@ -510,13 +552,11 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
if (!p)
return -ESRCH;
- ret = 0;
- rcu_read_lock();
- if (!pid_task(p, PIDTYPE_TGID))
- ret = -EINVAL;
- rcu_read_unlock();
+ if (pid_has_task(p, PIDTYPE_TGID))
+ fd = pidfd_create(p);
+ else
+ fd = -EINVAL;
- fd = ret ?: pidfd_create(p);
put_pid(p);
return fd;
}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index a6a79f85c81a..d40017e79ebe 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -26,8 +26,6 @@
static DEFINE_MUTEX(pid_caches_mutex);
static struct kmem_cache *pid_ns_cachep;
-/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
-#define MAX_PID_NS_LEVEL 32
/* Write once array, filled from the beginning. */
static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL];
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 04e83fdfbe80..a45cba7df0ae 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -814,6 +814,8 @@ EXPORT_SYMBOL_GPL(freq_qos_update_request);
*/
int freq_qos_remove_request(struct freq_qos_request *req)
{
+ int ret;
+
if (!req)
return -EINVAL;
@@ -821,7 +823,11 @@ int freq_qos_remove_request(struct freq_qos_request *req)
"%s() called for unknown object\n", __func__))
return -EINVAL;
- return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ req->qos = NULL;
+ req->type = 0;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(freq_qos_remove_request);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 83105874f255..26b9168321e7 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -734,8 +734,15 @@ zone_found:
* We have found the zone. Now walk the radix tree to find the leaf node
* for our PFN.
*/
+
+ /*
+ * If the zone we wish to scan is the the current zone and the
+ * pfn falls into the current node then we do not need to walk
+ * the tree.
+ */
node = bm->cur.node;
- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
+ if (zone == bm->cur.zone &&
+ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
goto node_found;
node = zone->rtree;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ca65327a6de8..c8be5a0f5259 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -248,7 +248,7 @@ static void __up_console_sem(unsigned long ip)
{
unsigned long flags;
- mutex_release(&console_lock_dep_map, 1, ip);
+ mutex_release(&console_lock_dep_map, ip);
printk_safe_enter_irqsave(flags);
up(&console_sem);
@@ -1679,20 +1679,20 @@ static int console_lock_spinning_disable_and_check(void)
raw_spin_unlock(&console_owner_lock);
if (!waiter) {
- spin_release(&console_owner_dep_map, 1, _THIS_IP_);
+ spin_release(&console_owner_dep_map, _THIS_IP_);
return 0;
}
/* The waiter is now free to continue */
WRITE_ONCE(console_waiter, false);
- spin_release(&console_owner_dep_map, 1, _THIS_IP_);
+ spin_release(&console_owner_dep_map, _THIS_IP_);
/*
* Hand off console_lock to waiter. The waiter will perform
* the up(). After this, the waiter is the console_lock owner.
*/
- mutex_release(&console_lock_dep_map, 1, _THIS_IP_);
+ mutex_release(&console_lock_dep_map, _THIS_IP_);
return 1;
}
@@ -1746,7 +1746,7 @@ static int console_trylock_spinning(void)
/* Owner will clear console_waiter on hand off */
while (READ_ONCE(console_waiter))
cpu_relax();
- spin_release(&console_owner_dep_map, 1, _THIS_IP_);
+ spin_release(&console_owner_dep_map, _THIS_IP_);
printk_safe_exit_irqrestore(flags);
/*
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 8fd4f82c9b3d..ab504fbc76ca 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -299,6 +299,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
{
int i;
+ for (i = 0; i < RCU_NUM_LVLS; i++)
+ levelspread[i] = INT_MIN;
if (rcu_fanout_exact) {
levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
for (i = rcu_num_lvls - 2; i >= 0; i--)
@@ -455,7 +457,6 @@ enum rcutorture_type {
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
unsigned long *gp_seq);
-void rcutorture_record_progress(unsigned long vernum);
void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
unsigned long secs,
@@ -468,7 +469,6 @@ static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
*flags = 0;
*gp_seq = 0;
}
-static inline void rcutorture_record_progress(unsigned long vernum) { }
#ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
index 495c58ce1640..cbc87b804db9 100644
--- a/kernel/rcu/rcu_segcblist.c
+++ b/kernel/rcu/rcu_segcblist.c
@@ -88,7 +88,7 @@ struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
}
/* Set the length of an rcu_segcblist structure. */
-void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
+static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
{
#ifdef CONFIG_RCU_NOCB_CPU
atomic_long_set(&rsclp->len, v);
@@ -104,7 +104,7 @@ void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
* This increase is fully ordered with respect to the callers accesses
* both before and after.
*/
-void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
+static void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
{
#ifdef CONFIG_RCU_NOCB_CPU
smp_mb__before_atomic(); /* Up to the caller! */
@@ -134,7 +134,7 @@ void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp)
* with the actual number of callbacks on the structure. This exchange is
* fully ordered with respect to the callers accesses both before and after.
*/
-long rcu_segcblist_xchg_len(struct rcu_segcblist *rsclp, long v)
+static long rcu_segcblist_xchg_len(struct rcu_segcblist *rsclp, long v)
{
#ifdef CONFIG_RCU_NOCB_CPU
return atomic_long_xchg(&rsclp->len, v);
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 5a879d073c1c..5f884d560384 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -109,15 +109,6 @@ static unsigned long b_rcu_perf_writer_started;
static unsigned long b_rcu_perf_writer_finished;
static DEFINE_PER_CPU(atomic_t, n_async_inflight);
-static int rcu_perf_writer_state;
-#define RTWS_INIT 0
-#define RTWS_ASYNC 1
-#define RTWS_BARRIER 2
-#define RTWS_EXP_SYNC 3
-#define RTWS_SYNC 4
-#define RTWS_IDLE 5
-#define RTWS_STOPPING 6
-
#define MAX_MEAS 10000
#define MIN_MEAS 100
@@ -404,25 +395,20 @@ retry:
if (!rhp)
rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
- rcu_perf_writer_state = RTWS_ASYNC;
atomic_inc(this_cpu_ptr(&n_async_inflight));
cur_ops->async(rhp, rcu_perf_async_cb);
rhp = NULL;
} else if (!kthread_should_stop()) {
- rcu_perf_writer_state = RTWS_BARRIER;
cur_ops->gp_barrier();
goto retry;
} else {
kfree(rhp); /* Because we are stopping. */
}
} else if (gp_exp) {
- rcu_perf_writer_state = RTWS_EXP_SYNC;
cur_ops->exp_sync();
} else {
- rcu_perf_writer_state = RTWS_SYNC;
cur_ops->sync();
}
- rcu_perf_writer_state = RTWS_IDLE;
t = ktime_get_mono_fast_ns();
*wdp = t - *wdp;
i_max = i;
@@ -463,10 +449,8 @@ retry:
rcu_perf_wait_shutdown();
} while (!torture_must_stop());
if (gp_async) {
- rcu_perf_writer_state = RTWS_BARRIER;
cur_ops->gp_barrier();
}
- rcu_perf_writer_state = RTWS_STOPPING;
writer_n_durations[me] = i_max;
torture_kthread_stopping("rcu_perf_writer");
return 0;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 3c9feca1eab1..dee043feb71f 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -44,6 +44,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/sysctl.h>
#include <linux/oom.h>
+#include <linux/tick.h>
#include "rcu.h"
@@ -1363,15 +1364,15 @@ rcu_torture_reader(void *arg)
set_user_nice(current, MAX_NICE);
if (irqreader && cur_ops->irq_capable)
timer_setup_on_stack(&t, rcu_torture_timer, 0);
-
+ tick_dep_set_task(current, TICK_DEP_BIT_RCU);
do {
if (irqreader && cur_ops->irq_capable) {
if (!timer_pending(&t))
mod_timer(&t, jiffies + 1);
}
- if (!rcu_torture_one_read(&rand))
+ if (!rcu_torture_one_read(&rand) && !torture_must_stop())
schedule_timeout_interruptible(HZ);
- if (time_after(jiffies, lastsleep)) {
+ if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
schedule_timeout_interruptible(1);
lastsleep = jiffies + 10;
}
@@ -1383,6 +1384,7 @@ rcu_torture_reader(void *arg)
del_timer_sync(&t);
destroy_timer_on_stack(&t);
}
+ tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
torture_kthread_stopping("rcu_torture_reader");
return 0;
}
@@ -1442,15 +1444,18 @@ rcu_torture_stats_print(void)
n_rcu_torture_barrier_error);
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
- n_rcu_torture_barrier_error != 0 ||
- n_rcu_torture_boost_ktrerror != 0 ||
- n_rcu_torture_boost_rterror != 0 ||
- n_rcu_torture_boost_failure != 0 ||
+ if (atomic_read(&n_rcu_torture_mberror) ||
+ n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
+ n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
i > 1) {
pr_cont("%s", "!!! ");
atomic_inc(&n_rcu_torture_error);
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
+ WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
+ WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
+ WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
+ WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
+ WARN_ON_ONCE(i > 1); // Too-short grace period
}
pr_cont("Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
@@ -1729,10 +1734,10 @@ static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
// Real call_rcu() floods hit userspace, so emulate that.
if (need_resched() || (iter & 0xfff))
schedule();
- } else {
- // No userspace emulation: CB invocation throttles call_rcu()
- cond_resched();
+ return;
}
+ // No userspace emulation: CB invocation throttles call_rcu()
+ cond_resched();
}
/*
@@ -1759,6 +1764,11 @@ static unsigned long rcu_torture_fwd_prog_cbfree(void)
kfree(rfcp);
freed++;
rcu_torture_fwd_prog_cond_resched(freed);
+ if (tick_nohz_full_enabled()) {
+ local_irq_save(flags);
+ rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
}
return freed;
}
@@ -1803,7 +1813,7 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
udelay(10);
cur_ops->readunlock(idx);
if (!fwd_progress_need_resched || need_resched())
- rcu_torture_fwd_prog_cond_resched(1);
+ cond_resched();
}
(*tested_tries)++;
if (!time_before(jiffies, stopat) &&
@@ -1833,6 +1843,7 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
static void rcu_torture_fwd_prog_cr(void)
{
unsigned long cver;
+ unsigned long flags;
unsigned long gps;
int i;
long n_launders;
@@ -1865,6 +1876,7 @@ static void rcu_torture_fwd_prog_cr(void)
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
rcu_launder_gp_seq_start = gps;
+ tick_dep_set_task(current, TICK_DEP_BIT_RCU);
while (time_before(jiffies, stopat) &&
!shutdown_time_arrived() &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
@@ -1891,6 +1903,11 @@ static void rcu_torture_fwd_prog_cr(void)
}
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
+ if (tick_nohz_full_enabled()) {
+ local_irq_save(flags);
+ rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
}
stoppedat = jiffies;
n_launders_cb_snap = READ_ONCE(n_launders_cb);
@@ -1911,6 +1928,7 @@ static void rcu_torture_fwd_prog_cr(void)
rcu_torture_fwd_cb_hist();
}
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
+ tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 81105141b6a8..1694a6b57ad8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -364,7 +364,7 @@ bool rcu_eqs_special_set(int cpu)
*
* The caller must have disabled interrupts and must not be idle.
*/
-static void __maybe_unused rcu_momentary_dyntick_idle(void)
+void rcu_momentary_dyntick_idle(void)
{
int special;
@@ -375,6 +375,7 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void)
WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
rcu_preempt_deferred_qs(current);
}
+EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
/**
* rcu_is_cpu_rrupt_from_idle - see if interrupted from idle
@@ -496,7 +497,7 @@ module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next
module_param(rcu_kick_kthreads, bool, 0644);
static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
-static int rcu_pending(void);
+static int rcu_pending(int user);
/*
* Return the number of RCU GPs completed thus far for debug & stats.
@@ -824,6 +825,11 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
rcu_cleanup_after_idle();
incby = 1;
+ } else if (tick_nohz_full_cpu(rdp->cpu) &&
+ rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
+ READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
+ rdp->rcu_forced_tick = true;
+ tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
}
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdp->dynticks_nmi_nesting,
@@ -885,6 +891,21 @@ void rcu_irq_enter_irqson(void)
local_irq_restore(flags);
}
+/*
+ * If any sort of urgency was applied to the current CPU (for example,
+ * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
+ * to get to a quiescent state, disable it.
+ */
+static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
+{
+ WRITE_ONCE(rdp->rcu_urgent_qs, false);
+ WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
+ if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
+ tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
+ rdp->rcu_forced_tick = false;
+ }
+}
+
/**
* rcu_is_watching - see if RCU thinks that the current CPU is not idle
*
@@ -1073,6 +1094,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (tick_nohz_full_cpu(rdp->cpu) &&
time_after(jiffies,
READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) {
+ WRITE_ONCE(*ruqp, true);
resched_cpu(rdp->cpu);
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
}
@@ -1968,7 +1990,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
return;
}
mask = rdp->grpmask;
- rdp->core_needs_qs = false;
if ((rnp->qsmask & mask) == 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} else {
@@ -1979,6 +2000,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
if (!offloaded)
needwake = rcu_accelerate_cbs(rnp, rdp);
+ rcu_disable_urgency_upon_qs(rdp);
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
/* ^^^ Released rnp->lock */
if (needwake)
@@ -2101,6 +2123,9 @@ int rcutree_dead_cpu(unsigned int cpu)
rcu_boost_kthread_setaffinity(rnp, -1);
/* Do any needed no-CB deferred wakeups from this CPU. */
do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
+
+ // Stop-machine done, so allow nohz_full to disable tick.
+ tick_dep_clear(TICK_DEP_BIT_RCU);
return 0;
}
@@ -2151,6 +2176,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
rcu_nocb_unlock_irqrestore(rdp, flags);
/* Invoke callbacks. */
+ tick_dep_set_task(current, TICK_DEP_BIT_RCU);
rhp = rcu_cblist_dequeue(&rcl);
for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
debug_rcu_head_unqueue(rhp);
@@ -2217,6 +2243,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
/* Re-invoke RCU core processing if there are callbacks remaining. */
if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
invoke_rcu_core();
+ tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
}
/*
@@ -2241,7 +2268,7 @@ void rcu_sched_clock_irq(int user)
__this_cpu_write(rcu_data.rcu_urgent_qs, false);
}
rcu_flavor_sched_clock_irq(user);
- if (rcu_pending())
+ if (rcu_pending(user))
invoke_rcu_core();
trace_rcu_utilization(TPS("End scheduler-tick"));
@@ -2259,6 +2286,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
int cpu;
unsigned long flags;
unsigned long mask;
+ struct rcu_data *rdp;
struct rcu_node *rnp;
rcu_for_each_leaf_node(rnp) {
@@ -2283,8 +2311,11 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
for_each_leaf_node_possible_cpu(rnp, cpu) {
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
if ((rnp->qsmask & bit) != 0) {
- if (f(per_cpu_ptr(&rcu_data, cpu)))
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ if (f(rdp)) {
mask |= bit;
+ rcu_disable_urgency_upon_qs(rdp);
+ }
}
}
if (mask != 0) {
@@ -2312,7 +2343,7 @@ void rcu_force_quiescent_state(void)
rnp = __this_cpu_read(rcu_data.mynode);
for (; rnp != NULL; rnp = rnp->parent) {
ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
- !raw_spin_trylock(&rnp->fqslock);
+ !raw_spin_trylock(&rnp->fqslock);
if (rnp_old != NULL)
raw_spin_unlock(&rnp_old->fqslock);
if (ret)
@@ -2786,8 +2817,9 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
* CPU-local state are performed first. However, we must check for CPU
* stalls first, else we might not get a chance.
*/
-static int rcu_pending(void)
+static int rcu_pending(int user)
{
+ bool gp_in_progress;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
@@ -2798,12 +2830,13 @@ static int rcu_pending(void)
if (rcu_nocb_need_deferred_wakeup(rdp))
return 1;
- /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
- if (rcu_nohz_full_cpu())
+ /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
+ if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
return 0;
/* Is the RCU core waiting for a quiescent state from this CPU? */
- if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
+ gp_in_progress = rcu_gp_in_progress();
+ if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
return 1;
/* Does this CPU have callbacks ready to invoke? */
@@ -2811,8 +2844,7 @@ static int rcu_pending(void)
return 1;
/* Has RCU gone idle with this CPU needing another grace period? */
- if (!rcu_gp_in_progress() &&
- rcu_segcblist_is_enabled(&rdp->cblist) &&
+ if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
!rcu_segcblist_is_offloaded(&rdp->cblist)) &&
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
@@ -2845,7 +2877,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
{
if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
rcu_barrier_trace(TPS("LastCB"), -1,
- rcu_state.barrier_sequence);
+ rcu_state.barrier_sequence);
complete(&rcu_state.barrier_completion);
} else {
rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
@@ -2869,7 +2901,7 @@ static void rcu_barrier_func(void *unused)
} else {
debug_rcu_head_unqueue(&rdp->barrier_head);
rcu_barrier_trace(TPS("IRQNQ"), -1,
- rcu_state.barrier_sequence);
+ rcu_state.barrier_sequence);
}
rcu_nocb_unlock(rdp);
}
@@ -2896,7 +2928,7 @@ void rcu_barrier(void)
/* Did someone else do our work for us? */
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
rcu_barrier_trace(TPS("EarlyExit"), -1,
- rcu_state.barrier_sequence);
+ rcu_state.barrier_sequence);
smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rcu_state.barrier_mutex);
return;
@@ -2928,11 +2960,11 @@ void rcu_barrier(void)
continue;
if (rcu_segcblist_n_cbs(&rdp->cblist)) {
rcu_barrier_trace(TPS("OnlineQ"), cpu,
- rcu_state.barrier_sequence);
+ rcu_state.barrier_sequence);
smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
} else {
rcu_barrier_trace(TPS("OnlineNQ"), cpu,
- rcu_state.barrier_sequence);
+ rcu_state.barrier_sequence);
}
}
put_online_cpus();
@@ -3083,6 +3115,9 @@ int rcutree_online_cpu(unsigned int cpu)
return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu);
rcutree_affinity_setting(cpu, -1);
+
+ // Stop-machine done, so allow nohz_full to disable tick.
+ tick_dep_clear(TICK_DEP_BIT_RCU);
return 0;
}
@@ -3103,6 +3138,9 @@ int rcutree_offline_cpu(unsigned int cpu)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcutree_affinity_setting(cpu, cpu);
+
+ // nohz_full CPUs need the tick for stop-machine to work quickly
+ tick_dep_set(TICK_DEP_BIT_RCU);
return 0;
}
@@ -3148,6 +3186,7 @@ void rcu_cpu_starting(unsigned int cpu)
rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+ rcu_disable_urgency_upon_qs(rdp);
/* Report QS -after- changing ->qsmaskinitnext! */
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
} else {
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index c612f306fe89..055c31781d3a 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -181,6 +181,7 @@ struct rcu_data {
atomic_t dynticks; /* Even value for idle, else odd. */
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
bool rcu_urgent_qs; /* GP old need light quiescent state. */
+ bool rcu_forced_tick; /* Forced tick to provide QS. */
#ifdef CONFIG_RCU_FAST_NO_HZ
bool all_lazy; /* All CPU's CBs lazy at idle start? */
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 2defc7fe74c3..fa08d55f7040 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1946,7 +1946,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
int __maybe_unused cpu = my_rdp->cpu;
unsigned long cur_gp_seq;
unsigned long flags;
- bool gotcbs;
+ bool gotcbs = false;
unsigned long j = jiffies;
bool needwait_gp = false; // This prevents actual uninitialized use.
bool needwake;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0f2eb3629070..90e4b00ace89 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -16,6 +16,7 @@
#include <asm/tlb.h>
#include "../workqueue_internal.h"
+#include "../../fs/io-wq.h"
#include "../smpboot.h"
#include "pelt.h"
@@ -810,7 +811,7 @@ static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value)
return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value);
}
-static inline enum uclamp_id uclamp_none(enum uclamp_id clamp_id)
+static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
@@ -853,7 +854,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
}
static inline
-enum uclamp_id uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
+unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
unsigned int clamp_value)
{
struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
@@ -918,7 +919,7 @@ uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
return uc_req;
}
-enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
+unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
{
struct uclamp_se uc_eff;
@@ -1065,7 +1066,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
* affecting a valid clamp bucket, the next time it's enqueued,
* it will already see the updated clamp bucket value.
*/
- if (!p->uclamp[clamp_id].active) {
+ if (p->uclamp[clamp_id].active) {
uclamp_rq_dec_id(rq, p, clamp_id);
uclamp_rq_inc_id(rq, p, clamp_id);
}
@@ -3105,7 +3106,7 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf
* do an early lockdep release here:
*/
rq_unpin_lock(rq, rf);
- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+ spin_release(&rq->lock.dep_map, _THIS_IP_);
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = next;
@@ -3917,13 +3918,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
prev->sched_class == &fair_sched_class) &&
rq->nr_running == rq->cfs.h_nr_running)) {
- p = fair_sched_class.pick_next_task(rq, prev, rf);
+ p = pick_next_task_fair(rq, prev, rf);
if (unlikely(p == RETRY_TASK))
goto restart;
/* Assumes fair_sched_class->next == idle_sched_class */
- if (unlikely(!p))
- p = idle_sched_class.pick_next_task(rq, prev, rf);
+ if (!p) {
+ put_prev_task(rq, prev);
+ p = pick_next_task_idle(rq);
+ }
return p;
}
@@ -3947,7 +3950,7 @@ restart:
put_prev_task(rq, prev);
for_each_class(class) {
- p = class->pick_next_task(rq, NULL, NULL);
+ p = class->pick_next_task(rq);
if (p)
return p;
}
@@ -4112,9 +4115,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
* we disable preemption to avoid it calling schedule() again
* in the possible wakeup of a kworker.
*/
- if (tsk->flags & PF_WQ_WORKER) {
+ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
preempt_disable();
- wq_worker_sleeping(tsk);
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_sleeping(tsk);
+ else
+ io_wq_worker_sleeping(tsk);
preempt_enable_no_resched();
}
@@ -4131,8 +4137,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
static void sched_update_worker(struct task_struct *tsk)
{
- if (tsk->flags & PF_WQ_WORKER)
- wq_worker_running(tsk);
+ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+ else
+ io_wq_worker_running(tsk);
+ }
}
asmlinkage __visible void __sched schedule(void)
@@ -6019,10 +6029,11 @@ void init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ __sched_fork(0, idle);
+
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
- __sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE;
@@ -6208,7 +6219,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
struct task_struct *next;
for_each_class(class) {
- next = class->pick_next_task(rq, NULL, NULL);
+ next = class->pick_next_task(rq);
if (next) {
next->sched_class->put_prev_task(rq, next);
return next;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 46ed4e1383e2..d43318a489f2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -405,27 +405,25 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
/*
* Use precise platform statistics if available:
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
-void vtime_common_task_switch(struct task_struct *prev)
+void vtime_task_switch(struct task_struct *prev)
{
if (is_idle_task(prev))
vtime_account_idle(prev);
else
- vtime_account_system(prev);
+ vtime_account_kernel(prev);
vtime_flush(prev);
arch_vtime_task_switch(prev);
}
# endif
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Archs that account the whole time spent in the idle task
* (outside irq) as idle time can rely on this and just implement
- * vtime_account_system() and vtime_account_idle(). Archs that
+ * vtime_account_kernel() and vtime_account_idle(). Archs that
* have other meaning of the idle time (s390 only includes the
* time spent by the CPU when it's in low power mode) must override
* vtime_account().
@@ -436,7 +434,7 @@ void vtime_account_irq_enter(struct task_struct *tsk)
if (!in_interrupt() && is_idle_task(tsk))
vtime_account_idle(tsk);
else
- vtime_account_system(tsk);
+ vtime_account_kernel(tsk);
}
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
@@ -477,7 +475,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
u64 cputime, steal;
struct rq *rq = this_rq();
- if (vtime_accounting_cpu_enabled())
+ if (vtime_accounting_enabled_this_cpu())
return;
if (sched_clock_irqtime) {
@@ -711,8 +709,8 @@ static u64 get_vtime_delta(struct vtime *vtime)
return delta - other;
}
-static void __vtime_account_system(struct task_struct *tsk,
- struct vtime *vtime)
+static void vtime_account_system(struct task_struct *tsk,
+ struct vtime *vtime)
{
vtime->stime += get_vtime_delta(vtime);
if (vtime->stime >= TICK_NSEC) {
@@ -731,7 +729,17 @@ static void vtime_account_guest(struct task_struct *tsk,
}
}
-void vtime_account_system(struct task_struct *tsk)
+static void __vtime_account_kernel(struct task_struct *tsk,
+ struct vtime *vtime)
+{
+ /* We might have scheduled out from guest path */
+ if (vtime->state == VTIME_GUEST)
+ vtime_account_guest(tsk, vtime);
+ else
+ vtime_account_system(tsk, vtime);
+}
+
+void vtime_account_kernel(struct task_struct *tsk)
{
struct vtime *vtime = &tsk->vtime;
@@ -739,11 +747,7 @@ void vtime_account_system(struct task_struct *tsk)
return;
write_seqcount_begin(&vtime->seqcount);
- /* We might have scheduled out from guest path */
- if (tsk->flags & PF_VCPU)
- vtime_account_guest(tsk, vtime);
- else
- __vtime_account_system(tsk, vtime);
+ __vtime_account_kernel(tsk, vtime);
write_seqcount_end(&vtime->seqcount);
}
@@ -752,7 +756,7 @@ void vtime_user_enter(struct task_struct *tsk)
struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
- __vtime_account_system(tsk, vtime);
+ vtime_account_system(tsk, vtime);
vtime->state = VTIME_USER;
write_seqcount_end(&vtime->seqcount);
}
@@ -782,8 +786,9 @@ void vtime_guest_enter(struct task_struct *tsk)
* that can thus safely catch up with a tickless delta.
*/
write_seqcount_begin(&vtime->seqcount);
- __vtime_account_system(tsk, vtime);
+ vtime_account_system(tsk, vtime);
tsk->flags |= PF_VCPU;
+ vtime->state = VTIME_GUEST;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
@@ -795,6 +800,7 @@ void vtime_guest_exit(struct task_struct *tsk)
write_seqcount_begin(&vtime->seqcount);
vtime_account_guest(tsk, vtime);
tsk->flags &= ~PF_VCPU;
+ vtime->state = VTIME_SYS;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
@@ -804,19 +810,30 @@ void vtime_account_idle(struct task_struct *tsk)
account_idle_time(get_vtime_delta(&tsk->vtime));
}
-void arch_vtime_task_switch(struct task_struct *prev)
+void vtime_task_switch_generic(struct task_struct *prev)
{
struct vtime *vtime = &prev->vtime;
write_seqcount_begin(&vtime->seqcount);
+ if (vtime->state == VTIME_IDLE)
+ vtime_account_idle(prev);
+ else
+ __vtime_account_kernel(prev, vtime);
vtime->state = VTIME_INACTIVE;
+ vtime->cpu = -1;
write_seqcount_end(&vtime->seqcount);
vtime = &current->vtime;
write_seqcount_begin(&vtime->seqcount);
- vtime->state = VTIME_SYS;
+ if (is_idle_task(current))
+ vtime->state = VTIME_IDLE;
+ else if (current->flags & PF_VCPU)
+ vtime->state = VTIME_GUEST;
+ else
+ vtime->state = VTIME_SYS;
vtime->starttime = sched_clock();
+ vtime->cpu = smp_processor_id();
write_seqcount_end(&vtime->seqcount);
}
@@ -827,8 +844,9 @@ void vtime_init_idle(struct task_struct *t, int cpu)
local_irq_save(flags);
write_seqcount_begin(&vtime->seqcount);
- vtime->state = VTIME_SYS;
+ vtime->state = VTIME_IDLE;
vtime->starttime = sched_clock();
+ vtime->cpu = cpu;
write_seqcount_end(&vtime->seqcount);
local_irq_restore(flags);
}
@@ -846,7 +864,7 @@ u64 task_gtime(struct task_struct *t)
seq = read_seqcount_begin(&vtime->seqcount);
gtime = t->gtime;
- if (vtime->state == VTIME_SYS && t->flags & PF_VCPU)
+ if (vtime->state == VTIME_GUEST)
gtime += vtime->gtime + vtime_delta(vtime);
} while (read_seqcount_retry(&vtime->seqcount, seq));
@@ -877,20 +895,230 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
*utime = t->utime;
*stime = t->stime;
- /* Task is sleeping, nothing to add */
- if (vtime->state == VTIME_INACTIVE || is_idle_task(t))
+ /* Task is sleeping or idle, nothing to add */
+ if (vtime->state < VTIME_SYS)
continue;
delta = vtime_delta(vtime);
/*
- * Task runs either in user or kernel space, add pending nohz time to
- * the right place.
+ * Task runs either in user (including guest) or kernel space,
+ * add pending nohz time to the right place.
*/
- if (vtime->state == VTIME_USER || t->flags & PF_VCPU)
- *utime += vtime->utime + delta;
- else if (vtime->state == VTIME_SYS)
+ if (vtime->state == VTIME_SYS)
*stime += vtime->stime + delta;
+ else
+ *utime += vtime->utime + delta;
+ } while (read_seqcount_retry(&vtime->seqcount, seq));
+}
+
+static int vtime_state_check(struct vtime *vtime, int cpu)
+{
+ /*
+ * We raced against a context switch, fetch the
+ * kcpustat task again.
+ */
+ if (vtime->cpu != cpu && vtime->cpu != -1)
+ return -EAGAIN;
+
+ /*
+ * Two possible things here:
+ * 1) We are seeing the scheduling out task (prev) or any past one.
+ * 2) We are seeing the scheduling in task (next) but it hasn't
+ * passed though vtime_task_switch() yet so the pending
+ * cputime of the prev task may not be flushed yet.
+ *
+ * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
+ */
+ if (vtime->state == VTIME_INACTIVE)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static u64 kcpustat_user_vtime(struct vtime *vtime)
+{
+ if (vtime->state == VTIME_USER)
+ return vtime->utime + vtime_delta(vtime);
+ else if (vtime->state == VTIME_GUEST)
+ return vtime->gtime + vtime_delta(vtime);
+ return 0;
+}
+
+static int kcpustat_field_vtime(u64 *cpustat,
+ struct task_struct *tsk,
+ enum cpu_usage_stat usage,
+ int cpu, u64 *val)
+{
+ struct vtime *vtime = &tsk->vtime;
+ unsigned int seq;
+ int err;
+
+ do {
+ seq = read_seqcount_begin(&vtime->seqcount);
+
+ err = vtime_state_check(vtime, cpu);
+ if (err < 0)
+ return err;
+
+ *val = cpustat[usage];
+
+ /*
+ * Nice VS unnice cputime accounting may be inaccurate if
+ * the nice value has changed since the last vtime update.
+ * But proper fix would involve interrupting target on nice
+ * updates which is a no go on nohz_full (although the scheduler
+ * may still interrupt the target if rescheduling is needed...)
+ */
+ switch (usage) {
+ case CPUTIME_SYSTEM:
+ if (vtime->state == VTIME_SYS)
+ *val += vtime->stime + vtime_delta(vtime);
+ break;
+ case CPUTIME_USER:
+ if (task_nice(tsk) <= 0)
+ *val += kcpustat_user_vtime(vtime);
+ break;
+ case CPUTIME_NICE:
+ if (task_nice(tsk) > 0)
+ *val += kcpustat_user_vtime(vtime);
+ break;
+ case CPUTIME_GUEST:
+ if (vtime->state == VTIME_GUEST && task_nice(tsk) <= 0)
+ *val += vtime->gtime + vtime_delta(vtime);
+ break;
+ case CPUTIME_GUEST_NICE:
+ if (vtime->state == VTIME_GUEST && task_nice(tsk) > 0)
+ *val += vtime->gtime + vtime_delta(vtime);
+ break;
+ default:
+ break;
+ }
+ } while (read_seqcount_retry(&vtime->seqcount, seq));
+
+ return 0;
+}
+
+u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu)
+{
+ u64 *cpustat = kcpustat->cpustat;
+ struct rq *rq;
+ u64 val;
+ int err;
+
+ if (!vtime_accounting_enabled_cpu(cpu))
+ return cpustat[usage];
+
+ rq = cpu_rq(cpu);
+
+ for (;;) {
+ struct task_struct *curr;
+
+ rcu_read_lock();
+ curr = rcu_dereference(rq->curr);
+ if (WARN_ON_ONCE(!curr)) {
+ rcu_read_unlock();
+ return cpustat[usage];
+ }
+
+ err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
+ rcu_read_unlock();
+
+ if (!err)
+ return val;
+
+ cpu_relax();
+ }
+}
+EXPORT_SYMBOL_GPL(kcpustat_field);
+
+static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
+ const struct kernel_cpustat *src,
+ struct task_struct *tsk, int cpu)
+{
+ struct vtime *vtime = &tsk->vtime;
+ unsigned int seq;
+ int err;
+
+ do {
+ u64 *cpustat;
+ u64 delta;
+
+ seq = read_seqcount_begin(&vtime->seqcount);
+
+ err = vtime_state_check(vtime, cpu);
+ if (err < 0)
+ return err;
+
+ *dst = *src;
+ cpustat = dst->cpustat;
+
+ /* Task is sleeping, dead or idle, nothing to add */
+ if (vtime->state < VTIME_SYS)
+ continue;
+
+ delta = vtime_delta(vtime);
+
+ /*
+ * Task runs either in user (including guest) or kernel space,
+ * add pending nohz time to the right place.
+ */
+ if (vtime->state == VTIME_SYS) {
+ cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
+ } else if (vtime->state == VTIME_USER) {
+ if (task_nice(tsk) > 0)
+ cpustat[CPUTIME_NICE] += vtime->utime + delta;
+ else
+ cpustat[CPUTIME_USER] += vtime->utime + delta;
+ } else {
+ WARN_ON_ONCE(vtime->state != VTIME_GUEST);
+ if (task_nice(tsk) > 0) {
+ cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
+ cpustat[CPUTIME_NICE] += vtime->gtime + delta;
+ } else {
+ cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
+ cpustat[CPUTIME_USER] += vtime->gtime + delta;
+ }
+ }
} while (read_seqcount_retry(&vtime->seqcount, seq));
+
+ return err;
+}
+
+void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
+{
+ const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
+ struct rq *rq;
+ int err;
+
+ if (!vtime_accounting_enabled_cpu(cpu)) {
+ *dst = *src;
+ return;
+ }
+
+ rq = cpu_rq(cpu);
+
+ for (;;) {
+ struct task_struct *curr;
+
+ rcu_read_lock();
+ curr = rcu_dereference(rq->curr);
+ if (WARN_ON_ONCE(!curr)) {
+ rcu_read_unlock();
+ *dst = *src;
+ return;
+ }
+
+ err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
+ rcu_read_unlock();
+
+ if (!err)
+ return;
+
+ cpu_relax();
+ }
}
+EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
+
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a8a08030a8f7..43323f875cb9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1743,13 +1743,16 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
}
#endif
-static void set_next_task_dl(struct rq *rq, struct task_struct *p)
+static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* You can't push away the running task */
dequeue_pushable_dl_task(rq, p);
+ if (!first)
+ return;
+
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
@@ -1770,22 +1773,19 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
return rb_entry(left, struct sched_dl_entity, rb_node);
}
-static struct task_struct *
-pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static struct task_struct *pick_next_task_dl(struct rq *rq)
{
struct sched_dl_entity *dl_se;
struct dl_rq *dl_rq = &rq->dl;
struct task_struct *p;
- WARN_ON_ONCE(prev || rf);
-
if (!sched_dl_runnable(rq))
return NULL;
dl_se = pick_next_dl_entity(rq, dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
- set_next_task_dl(rq, p);
+ set_next_task_dl(rq, p, true);
return p;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22a2fed29054..08a233e97a01 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -229,8 +229,7 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
}
}
- /* hint to use a 32x32->64 mul */
- fact = (u64)(u32)fact * lw->inv_weight;
+ fact = mul_u32_u32(fact, lw->inv_weight);
while (fact >> 32) {
fact >>= 1;
@@ -1474,7 +1473,12 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}
-static unsigned long cpu_runnable_load(struct rq *rq);
+static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
+
+static unsigned long cpu_runnable_load(struct rq *rq)
+{
+ return cfs_rq_runnable_load_avg(&rq->cfs);
+}
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -3504,9 +3508,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
- if (decayed)
- cfs_rq_util_change(cfs_rq, 0);
-
return decayed;
}
@@ -3616,8 +3617,12 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
update_tg_load_avg(cfs_rq, 0);
- } else if (decayed && (flags & UPDATE_TG))
- update_tg_load_avg(cfs_rq, 0);
+ } else if (decayed) {
+ cfs_rq_util_change(cfs_rq, 0);
+
+ if (flags & UPDATE_TG)
+ update_tg_load_avg(cfs_rq, 0);
+ }
}
#ifndef CONFIG_64BIT
@@ -3764,10 +3769,21 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
return;
/*
+ * Reset EWMA on utilization increases, the moving average is used only
+ * to smooth utilization decreases.
+ */
+ ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+ if (sched_feat(UTIL_EST_FASTUP)) {
+ if (ue.ewma < ue.enqueued) {
+ ue.ewma = ue.enqueued;
+ goto done;
+ }
+ }
+
+ /*
* Skip update of task's estimated utilization when its EWMA is
* already ~1% close to its last activation value.
*/
- ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
last_ewma_diff = ue.enqueued - ue.ewma;
if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
return;
@@ -3800,6 +3816,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
+done:
WRITE_ONCE(p->se.avg.util_est, ue);
}
@@ -5370,26 +5387,45 @@ static int sched_idle_cpu(int cpu)
rq->nr_running);
}
-static unsigned long cpu_runnable_load(struct rq *rq)
+static unsigned long cpu_load(struct rq *rq)
{
- return cfs_rq_runnable_load_avg(&rq->cfs);
+ return cfs_rq_load_avg(&rq->cfs);
}
-static unsigned long capacity_of(int cpu)
+/*
+ * cpu_load_without - compute CPU load without any contributions from *p
+ * @cpu: the CPU which load is requested
+ * @p: the task which load should be discounted
+ *
+ * The load of a CPU is defined by the load of tasks currently enqueued on that
+ * CPU as well as tasks which are currently sleeping after an execution on that
+ * CPU.
+ *
+ * This method returns the load of the specified CPU by discounting the load of
+ * the specified task, whenever the task is currently contributing to the CPU
+ * load.
+ */
+static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
{
- return cpu_rq(cpu)->cpu_capacity;
-}
+ struct cfs_rq *cfs_rq;
+ unsigned int load;
-static unsigned long cpu_avg_load_per_task(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = cpu_runnable_load(rq);
+ /* Task has no contribution or is new */
+ if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+ return cpu_load(rq);
- if (nr_running)
- return load_avg / nr_running;
+ cfs_rq = &rq->cfs;
+ load = READ_ONCE(cfs_rq->avg.load_avg);
- return 0;
+ /* Discount task's util from CPU's util */
+ lsub_positive(&load, task_h_load(p));
+
+ return load;
+}
+
+static unsigned long capacity_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity;
}
static void record_wakee(struct task_struct *p)
@@ -5482,7 +5518,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- this_eff_load = cpu_runnable_load(cpu_rq(this_cpu));
+ this_eff_load = cpu_load(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5500,7 +5536,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
+ prev_eff_load = cpu_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5538,149 +5574,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return target;
}
-static unsigned long cpu_util_without(int cpu, struct task_struct *p);
-
-static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
-{
- return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
-}
-
-/*
- * find_idlest_group finds and returns the least busy CPU group within the
- * domain.
- *
- * Assumes p is allowed on at least one CPU in sd.
- */
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int sd_flag)
-{
- struct sched_group *idlest = NULL, *group = sd->groups;
- struct sched_group *most_spare_sg = NULL;
- unsigned long min_runnable_load = ULONG_MAX;
- unsigned long this_runnable_load = ULONG_MAX;
- unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
- unsigned long most_spare = 0, this_spare = 0;
- int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
- unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
- (sd->imbalance_pct-100) / 100;
-
- do {
- unsigned long load, avg_load, runnable_load;
- unsigned long spare_cap, max_spare_cap;
- int local_group;
- int i;
-
- /* Skip over this group if it has no CPUs allowed */
- if (!cpumask_intersects(sched_group_span(group),
- p->cpus_ptr))
- continue;
-
- local_group = cpumask_test_cpu(this_cpu,
- sched_group_span(group));
-
- /*
- * Tally up the load of all CPUs in the group and find
- * the group containing the CPU with most spare capacity.
- */
- avg_load = 0;
- runnable_load = 0;
- max_spare_cap = 0;
-
- for_each_cpu(i, sched_group_span(group)) {
- load = cpu_runnable_load(cpu_rq(i));
- runnable_load += load;
-
- avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
-
- spare_cap = capacity_spare_without(i, p);
-
- if (spare_cap > max_spare_cap)
- max_spare_cap = spare_cap;
- }
-
- /* Adjust by relative CPU capacity of the group */
- avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
- group->sgc->capacity;
- runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
- group->sgc->capacity;
-
- if (local_group) {
- this_runnable_load = runnable_load;
- this_avg_load = avg_load;
- this_spare = max_spare_cap;
- } else {
- if (min_runnable_load > (runnable_load + imbalance)) {
- /*
- * The runnable load is significantly smaller
- * so we can pick this new CPU:
- */
- min_runnable_load = runnable_load;
- min_avg_load = avg_load;
- idlest = group;
- } else if ((runnable_load < (min_runnable_load + imbalance)) &&
- (100*min_avg_load > imbalance_scale*avg_load)) {
- /*
- * The runnable loads are close so take the
- * blocked load into account through avg_load:
- */
- min_avg_load = avg_load;
- idlest = group;
- }
-
- if (most_spare < max_spare_cap) {
- most_spare = max_spare_cap;
- most_spare_sg = group;
- }
- }
- } while (group = group->next, group != sd->groups);
-
- /*
- * The cross-over point between using spare capacity or least load
- * is too conservative for high utilization tasks on partially
- * utilized systems if we require spare_capacity > task_util(p),
- * so we allow for some task stuffing by using
- * spare_capacity > task_util(p)/2.
- *
- * Spare capacity can't be used for fork because the utilization has
- * not been set yet, we must first select a rq to compute the initial
- * utilization.
- */
- if (sd_flag & SD_BALANCE_FORK)
- goto skip_spare;
-
- if (this_spare > task_util(p) / 2 &&
- imbalance_scale*this_spare > 100*most_spare)
- return NULL;
-
- if (most_spare > task_util(p) / 2)
- return most_spare_sg;
-
-skip_spare:
- if (!idlest)
- return NULL;
-
- /*
- * When comparing groups across NUMA domains, it's possible for the
- * local domain to be very lightly loaded relative to the remote
- * domains but "imbalance" skews the comparison making remote CPUs
- * look much more favourable. When considering cross-domain, add
- * imbalance to the runnable load on the remote node and consider
- * staying local.
- */
- if ((sd->flags & SD_NUMA) &&
- min_runnable_load + imbalance >= this_runnable_load)
- return NULL;
-
- if (min_runnable_load > (this_runnable_load + imbalance))
- return NULL;
-
- if ((this_runnable_load < (min_runnable_load + imbalance)) &&
- (100*this_avg_load < imbalance_scale*min_avg_load))
- return NULL;
-
- return idlest;
-}
+ int this_cpu, int sd_flag);
/*
* find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
@@ -5729,7 +5625,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
continue;
}
- load = cpu_runnable_load(cpu_rq(i));
+ load = cpu_load(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -5753,7 +5649,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
return prev_cpu;
/*
- * We need task's util for capacity_spare_without, sync it up to
+ * We need task's util for cpu_util_without, sync it up to
* prev_cpu's last_update_time.
*/
if (!(sd_flag & SD_BALANCE_FORK))
@@ -6746,7 +6642,7 @@ preempt:
set_last_buddy(se);
}
-static struct task_struct *
+struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct cfs_rq *cfs_rq = &rq->cfs;
@@ -6890,6 +6786,11 @@ idle:
return NULL;
}
+static struct task_struct *__pick_next_task_fair(struct rq *rq)
+{
+ return pick_next_task_fair(rq, NULL, NULL);
+}
+
/*
* Account for a descheduled task:
*/
@@ -7079,11 +6980,49 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
enum fbq_type { regular, remote, all };
+/*
+ * 'group_type' describes the group of CPUs at the moment of load balancing.
+ *
+ * The enum is ordered by pulling priority, with the group with lowest priority
+ * first so the group_type can simply be compared when selecting the busiest
+ * group. See update_sd_pick_busiest().
+ */
enum group_type {
- group_other = 0,
+ /* The group has spare capacity that can be used to run more tasks. */
+ group_has_spare = 0,
+ /*
+ * The group is fully used and the tasks don't compete for more CPU
+ * cycles. Nevertheless, some tasks might wait before running.
+ */
+ group_fully_busy,
+ /*
+ * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity
+ * and must be migrated to a more powerful CPU.
+ */
group_misfit_task,
+ /*
+ * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
+ * and the task should be migrated to it instead of running on the
+ * current CPU.
+ */
+ group_asym_packing,
+ /*
+ * The tasks' affinity constraints previously prevented the scheduler
+ * from balancing the load across the system.
+ */
group_imbalanced,
- group_overloaded,
+ /*
+ * The CPU is overloaded and can't provide expected CPU cycles to all
+ * tasks.
+ */
+ group_overloaded
+};
+
+enum migration_type {
+ migrate_load = 0,
+ migrate_util,
+ migrate_task,
+ migrate_misfit
};
#define LBF_ALL_PINNED 0x01
@@ -7116,7 +7055,7 @@ struct lb_env {
unsigned int loop_max;
enum fbq_type fbq_type;
- enum group_type src_grp_type;
+ enum migration_type migration_type;
struct list_head tasks;
};
@@ -7339,7 +7278,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;
/*
- * detach_tasks() -- tries to detach up to imbalance runnable load from
+ * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
@@ -7347,8 +7286,8 @@ static const unsigned int sched_nr_migrate_break = 32;
static int detach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
+ unsigned long util, load;
struct task_struct *p;
- unsigned long load;
int detached = 0;
lockdep_assert_held(&env->src_rq->lock);
@@ -7381,19 +7320,46 @@ static int detach_tasks(struct lb_env *env)
if (!can_migrate_task(p, env))
goto next;
- load = task_h_load(p);
+ switch (env->migration_type) {
+ case migrate_load:
+ load = task_h_load(p);
- if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
- goto next;
+ if (sched_feat(LB_MIN) &&
+ load < 16 && !env->sd->nr_balance_failed)
+ goto next;
- if ((load / 2) > env->imbalance)
- goto next;
+ if (load/2 > env->imbalance)
+ goto next;
+
+ env->imbalance -= load;
+ break;
+
+ case migrate_util:
+ util = task_util_est(p);
+
+ if (util > env->imbalance)
+ goto next;
+
+ env->imbalance -= util;
+ break;
+
+ case migrate_task:
+ env->imbalance--;
+ break;
+
+ case migrate_misfit:
+ /* This is not a misfit task */
+ if (task_fits_capacity(p, capacity_of(env->src_cpu)))
+ goto next;
+
+ env->imbalance = 0;
+ break;
+ }
detach_task(p, env);
list_add(&p->se.group_node, &env->tasks);
detached++;
- env->imbalance -= load;
#ifdef CONFIG_PREEMPTION
/*
@@ -7407,7 +7373,7 @@ static int detach_tasks(struct lb_env *env)
/*
* We only want to steal up to the prescribed amount of
- * runnable load.
+ * load/util/tasks.
*/
if (env->imbalance <= 0)
break;
@@ -7517,6 +7483,28 @@ static inline bool others_have_blocked(struct rq *rq) { return false; }
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
#endif
+static bool __update_blocked_others(struct rq *rq, bool *done)
+{
+ const struct sched_class *curr_class;
+ u64 now = rq_clock_pelt(rq);
+ bool decayed;
+
+ /*
+ * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
+ * DL and IRQ signals have been updated before updating CFS.
+ */
+ curr_class = rq->curr->sched_class;
+
+ decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
+ update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
+ update_irq_load_avg(rq, 0);
+
+ if (others_have_blocked(rq))
+ *done = false;
+
+ return decayed;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
@@ -7536,16 +7524,11 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
return true;
}
-static void update_blocked_averages(int cpu)
+static bool __update_blocked_fair(struct rq *rq, bool *done)
{
- struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq, *pos;
- const struct sched_class *curr_class;
- struct rq_flags rf;
- bool done = true;
-
- rq_lock_irqsave(rq, &rf);
- update_rq_clock(rq);
+ bool decayed = false;
+ int cpu = cpu_of(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
@@ -7554,9 +7537,13 @@ static void update_blocked_averages(int cpu)
for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se;
- if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
+ if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
update_tg_load_avg(cfs_rq, 0);
+ if (cfs_rq == &rq->cfs)
+ decayed = true;
+ }
+
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
@@ -7571,19 +7558,10 @@ static void update_blocked_averages(int cpu)
/* Don't need periodic decay once load/util_avg are null */
if (cfs_rq_has_blocked(cfs_rq))
- done = false;
+ *done = false;
}
- curr_class = rq->curr->sched_class;
- update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
- update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
- update_irq_load_avg(rq, 0);
- /* Don't need periodic decay once load/util_avg are null */
- if (others_have_blocked(rq))
- done = false;
-
- update_blocked_load_status(rq, !done);
- rq_unlock_irqrestore(rq, &rf);
+ return decayed;
}
/*
@@ -7633,23 +7611,16 @@ static unsigned long task_h_load(struct task_struct *p)
cfs_rq_load_avg(cfs_rq) + 1);
}
#else
-static inline void update_blocked_averages(int cpu)
+static bool __update_blocked_fair(struct rq *rq, bool *done)
{
- struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq = &rq->cfs;
- const struct sched_class *curr_class;
- struct rq_flags rf;
+ bool decayed;
- rq_lock_irqsave(rq, &rf);
- update_rq_clock(rq);
- update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+ decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
+ if (cfs_rq_has_blocked(cfs_rq))
+ *done = false;
- curr_class = rq->curr->sched_class;
- update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
- update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
- update_irq_load_avg(rq, 0);
- update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
- rq_unlock_irqrestore(rq, &rf);
+ return decayed;
}
static unsigned long task_h_load(struct task_struct *p)
@@ -7658,6 +7629,24 @@ static unsigned long task_h_load(struct task_struct *p)
}
#endif
+static void update_blocked_averages(int cpu)
+{
+ bool decayed = false, done = true;
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq);
+
+ decayed |= __update_blocked_others(rq, &done);
+ decayed |= __update_blocked_fair(rq, &done);
+
+ update_blocked_load_status(rq, !done);
+ if (decayed)
+ cpufreq_update_util(rq, 0);
+ rq_unlock_irqrestore(rq, &rf);
+}
+
/********** Helpers for find_busiest_group ************************/
/*
@@ -7666,14 +7655,14 @@ static unsigned long task_h_load(struct task_struct *p)
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long load_per_task;
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
- unsigned int sum_nr_running; /* Nr tasks running in the group */
+ unsigned int sum_nr_running; /* Nr of tasks running in the group */
+ unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
- int group_no_capacity;
+ unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
@@ -7688,10 +7677,10 @@ struct sg_lb_stats {
struct sd_lb_stats {
struct sched_group *busiest; /* Busiest group in this sd */
struct sched_group *local; /* Local group in this sd */
- unsigned long total_running;
unsigned long total_load; /* Total load of all groups in sd */
unsigned long total_capacity; /* Total capacity of all groups in sd */
unsigned long avg_load; /* Average load across all groups in sd */
+ unsigned int prefer_sibling; /* tasks should go to sibling first */
struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
struct sg_lb_stats local_stat; /* Statistics of the local group */
@@ -7702,19 +7691,18 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
/*
* Skimp on the clearing to avoid duplicate work. We can avoid clearing
* local_stat because update_sg_lb_stats() does a full clear/assignment.
- * We must however clear busiest_stat::avg_load because
- * update_sd_pick_busiest() reads this before assignment.
+ * We must however set busiest_stat::group_type and
+ * busiest_stat::idle_cpus to the worst busiest group because
+ * update_sd_pick_busiest() reads these before assignment.
*/
*sds = (struct sd_lb_stats){
.busiest = NULL,
.local = NULL,
- .total_running = 0UL,
.total_load = 0UL,
.total_capacity = 0UL,
.busiest_stat = {
- .avg_load = 0UL,
- .sum_nr_running = 0,
- .group_type = group_other,
+ .idle_cpus = UINT_MAX,
+ .group_type = group_has_spare,
},
};
}
@@ -7902,13 +7890,13 @@ static inline int sg_imbalanced(struct sched_group *group)
* any benefit for the load balance.
*/
static inline bool
-group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
+group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running < sgs->group_weight)
return true;
if ((sgs->group_capacity * 100) >
- (sgs->group_util * env->sd->imbalance_pct))
+ (sgs->group_util * imbalance_pct))
return true;
return false;
@@ -7923,13 +7911,13 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
* false.
*/
static inline bool
-group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
+group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running <= sgs->group_weight)
return false;
if ((sgs->group_capacity * 100) <
- (sgs->group_util * env->sd->imbalance_pct))
+ (sgs->group_util * imbalance_pct))
return true;
return false;
@@ -7956,19 +7944,26 @@ group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
}
static inline enum
-group_type group_classify(struct sched_group *group,
+group_type group_classify(unsigned int imbalance_pct,
+ struct sched_group *group,
struct sg_lb_stats *sgs)
{
- if (sgs->group_no_capacity)
+ if (group_is_overloaded(imbalance_pct, sgs))
return group_overloaded;
if (sg_imbalanced(group))
return group_imbalanced;
+ if (sgs->group_asym_packing)
+ return group_asym_packing;
+
if (sgs->group_misfit_task_load)
return group_misfit_task;
- return group_other;
+ if (!group_has_capacity(imbalance_pct, sgs))
+ return group_fully_busy;
+
+ return group_has_spare;
}
static bool update_nohz_stats(struct rq *rq, bool force)
@@ -8005,21 +8000,25 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sg_lb_stats *sgs,
int *sg_status)
{
- int i, nr_running;
+ int i, nr_running, local_group;
memset(sgs, 0, sizeof(*sgs));
+ local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
+
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;
- sgs->group_load += cpu_runnable_load(rq);
+ sgs->group_load += cpu_load(rq);
sgs->group_util += cpu_util(i);
- sgs->sum_nr_running += rq->cfs.h_nr_running;
+ sgs->sum_h_nr_running += rq->cfs.h_nr_running;
nr_running = rq->nr_running;
+ sgs->sum_nr_running += nr_running;
+
if (nr_running > 1)
*sg_status |= SG_OVERLOAD;
@@ -8033,9 +8032,16 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/*
* No need to call idle_cpu() if nr_running is not 0
*/
- if (!nr_running && idle_cpu(i))
+ if (!nr_running && idle_cpu(i)) {
sgs->idle_cpus++;
+ /* Idle cpu can't have misfit task */
+ continue;
+ }
+ if (local_group)
+ continue;
+
+ /* Check for a misfit task on the cpu */
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
sgs->group_misfit_task_load < rq->misfit_task_load) {
sgs->group_misfit_task_load = rq->misfit_task_load;
@@ -8043,17 +8049,24 @@ static inline void update_sg_lb_stats(struct lb_env *env,
}
}
- /* Adjust by relative CPU capacity of the group */
- sgs->group_capacity = group->sgc->capacity;
- sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
+ /* Check if dst CPU is idle and preferred to this group */
+ if (env->sd->flags & SD_ASYM_PACKING &&
+ env->idle != CPU_NOT_IDLE &&
+ sgs->sum_h_nr_running &&
+ sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
+ sgs->group_asym_packing = 1;
+ }
- if (sgs->sum_nr_running)
- sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
+ sgs->group_capacity = group->sgc->capacity;
sgs->group_weight = group->group_weight;
- sgs->group_no_capacity = group_is_overloaded(env, sgs);
- sgs->group_type = group_classify(group, sgs);
+ sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
+
+ /* Computing avg_load makes sense only when group is overloaded */
+ if (sgs->group_type == group_overloaded)
+ sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
+ sgs->group_capacity;
}
/**
@@ -8076,6 +8089,10 @@ static bool update_sd_pick_busiest(struct lb_env *env,
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
+ /* Make sure that there is at least one task to pull */
+ if (!sgs->sum_h_nr_running)
+ return false;
+
/*
* Don't try to pull misfit tasks we can't help.
* We can use max_capacity here as reduction in capacity on some
@@ -8084,7 +8101,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
*/
if (sgs->group_type == group_misfit_task &&
(!group_smaller_max_cpu_capacity(sg, sds->local) ||
- !group_has_capacity(env, &sds->local_stat)))
+ sds->local_stat.group_type != group_has_spare))
return false;
if (sgs->group_type > busiest->group_type)
@@ -8093,62 +8110,88 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (sgs->group_type < busiest->group_type)
return false;
- if (sgs->avg_load <= busiest->avg_load)
- return false;
-
- if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
- goto asym_packing;
-
/*
- * Candidate sg has no more than one task per CPU and
- * has higher per-CPU capacity. Migrating tasks to less
- * capable CPUs may harm throughput. Maximize throughput,
- * power/energy consequences are not considered.
+ * The candidate and the current busiest group are the same type of
+ * group. Let check which one is the busiest according to the type.
*/
- if (sgs->sum_nr_running <= sgs->group_weight &&
- group_smaller_min_cpu_capacity(sds->local, sg))
- return false;
- /*
- * If we have more than one misfit sg go with the biggest misfit.
- */
- if (sgs->group_type == group_misfit_task &&
- sgs->group_misfit_task_load < busiest->group_misfit_task_load)
+ switch (sgs->group_type) {
+ case group_overloaded:
+ /* Select the overloaded group with highest avg_load. */
+ if (sgs->avg_load <= busiest->avg_load)
+ return false;
+ break;
+
+ case group_imbalanced:
+ /*
+ * Select the 1st imbalanced group as we don't have any way to
+ * choose one more than another.
+ */
return false;
-asym_packing:
- /* This is the busiest node in its class. */
- if (!(env->sd->flags & SD_ASYM_PACKING))
- return true;
+ case group_asym_packing:
+ /* Prefer to move from lowest priority CPU's work */
+ if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
+ return false;
+ break;
- /* No ASYM_PACKING if target CPU is already busy */
- if (env->idle == CPU_NOT_IDLE)
- return true;
- /*
- * ASYM_PACKING needs to move all the work to the highest
- * prority CPUs in the group, therefore mark all groups
- * of lower priority than ourself as busy.
- */
- if (sgs->sum_nr_running &&
- sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
- if (!sds->busiest)
- return true;
+ case group_misfit_task:
+ /*
+ * If we have more than one misfit sg go with the biggest
+ * misfit.
+ */
+ if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
+ return false;
+ break;
- /* Prefer to move from lowest priority CPU's work */
- if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
- sg->asym_prefer_cpu))
- return true;
+ case group_fully_busy:
+ /*
+ * Select the fully busy group with highest avg_load. In
+ * theory, there is no need to pull task from such kind of
+ * group because tasks have all compute capacity that they need
+ * but we can still improve the overall throughput by reducing
+ * contention when accessing shared HW resources.
+ *
+ * XXX for now avg_load is not computed and always 0 so we
+ * select the 1st one.
+ */
+ if (sgs->avg_load <= busiest->avg_load)
+ return false;
+ break;
+
+ case group_has_spare:
+ /*
+ * Select not overloaded group with lowest number of
+ * idle cpus. We could also compare the spare capacity
+ * which is more stable but it can end up that the
+ * group has less spare capacity but finally more idle
+ * CPUs which means less opportunity to pull tasks.
+ */
+ if (sgs->idle_cpus >= busiest->idle_cpus)
+ return false;
+ break;
}
- return false;
+ /*
+ * Candidate sg has no more than one task per CPU and has higher
+ * per-CPU capacity. Migrating tasks to less capable CPUs may harm
+ * throughput. Maximize throughput, power/energy consequences are not
+ * considered.
+ */
+ if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
+ (sgs->group_type <= group_fully_busy) &&
+ (group_smaller_min_cpu_capacity(sds->local, sg)))
+ return false;
+
+ return true;
}
#ifdef CONFIG_NUMA_BALANCING
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
{
- if (sgs->sum_nr_running > sgs->nr_numa_running)
+ if (sgs->sum_h_nr_running > sgs->nr_numa_running)
return regular;
- if (sgs->sum_nr_running > sgs->nr_preferred_running)
+ if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
return remote;
return all;
}
@@ -8173,18 +8216,310 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
}
#endif /* CONFIG_NUMA_BALANCING */
+
+struct sg_lb_stats;
+
+/*
+ * task_running_on_cpu - return 1 if @p is running on @cpu.
+ */
+
+static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
+{
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+ return 0;
+
+ if (task_on_rq_queued(p))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * idle_cpu_without - would a given CPU be idle without p ?
+ * @cpu: the processor on which idleness is tested.
+ * @p: task which should be ignored.
+ *
+ * Return: 1 if the CPU would be idle. 0 otherwise.
+ */
+static int idle_cpu_without(int cpu, struct task_struct *p)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->curr != rq->idle && rq->curr != p)
+ return 0;
+
+ /*
+ * rq->nr_running can't be used but an updated version without the
+ * impact of p on cpu must be used instead. The updated nr_running
+ * be computed and tested before calling idle_cpu_without().
+ */
+
+#ifdef CONFIG_SMP
+ if (!llist_empty(&rq->wake_list))
+ return 0;
+#endif
+
+ return 1;
+}
+
+/*
+ * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
+ * @sd: The sched_domain level to look for idlest group.
+ * @group: sched_group whose statistics are to be updated.
+ * @sgs: variable to hold the statistics for this group.
+ * @p: The task for which we look for the idlest group/CPU.
+ */
+static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+ struct sched_group *group,
+ struct sg_lb_stats *sgs,
+ struct task_struct *p)
+{
+ int i, nr_running;
+
+ memset(sgs, 0, sizeof(*sgs));
+
+ for_each_cpu(i, sched_group_span(group)) {
+ struct rq *rq = cpu_rq(i);
+ unsigned int local;
+
+ sgs->group_load += cpu_load_without(rq, p);
+ sgs->group_util += cpu_util_without(i, p);
+ local = task_running_on_cpu(i, p);
+ sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
+
+ nr_running = rq->nr_running - local;
+ sgs->sum_nr_running += nr_running;
+
+ /*
+ * No need to call idle_cpu_without() if nr_running is not 0
+ */
+ if (!nr_running && idle_cpu_without(i, p))
+ sgs->idle_cpus++;
+
+ }
+
+ /* Check if task fits in the group */
+ if (sd->flags & SD_ASYM_CPUCAPACITY &&
+ !task_fits_capacity(p, group->sgc->max_capacity)) {
+ sgs->group_misfit_task_load = 1;
+ }
+
+ sgs->group_capacity = group->sgc->capacity;
+
+ sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
+
+ /*
+ * Computing avg_load makes sense only when group is fully busy or
+ * overloaded
+ */
+ if (sgs->group_type < group_fully_busy)
+ sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
+ sgs->group_capacity;
+}
+
+static bool update_pick_idlest(struct sched_group *idlest,
+ struct sg_lb_stats *idlest_sgs,
+ struct sched_group *group,
+ struct sg_lb_stats *sgs)
+{
+ if (sgs->group_type < idlest_sgs->group_type)
+ return true;
+
+ if (sgs->group_type > idlest_sgs->group_type)
+ return false;
+
+ /*
+ * The candidate and the current idlest group are the same type of
+ * group. Let check which one is the idlest according to the type.
+ */
+
+ switch (sgs->group_type) {
+ case group_overloaded:
+ case group_fully_busy:
+ /* Select the group with lowest avg_load. */
+ if (idlest_sgs->avg_load <= sgs->avg_load)
+ return false;
+ break;
+
+ case group_imbalanced:
+ case group_asym_packing:
+ /* Those types are not used in the slow wakeup path */
+ return false;
+
+ case group_misfit_task:
+ /* Select group with the highest max capacity */
+ if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
+ return false;
+ break;
+
+ case group_has_spare:
+ /* Select group with most idle CPUs */
+ if (idlest_sgs->idle_cpus >= sgs->idle_cpus)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * find_idlest_group() finds and returns the least busy CPU group within the
+ * domain.
+ *
+ * Assumes p is allowed on at least one CPU in sd.
+ */
+static struct sched_group *
+find_idlest_group(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int sd_flag)
+{
+ struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
+ struct sg_lb_stats local_sgs, tmp_sgs;
+ struct sg_lb_stats *sgs;
+ unsigned long imbalance;
+ struct sg_lb_stats idlest_sgs = {
+ .avg_load = UINT_MAX,
+ .group_type = group_overloaded,
+ };
+
+ imbalance = scale_load_down(NICE_0_LOAD) *
+ (sd->imbalance_pct-100) / 100;
+
+ do {
+ int local_group;
+
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpumask_intersects(sched_group_span(group),
+ p->cpus_ptr))
+ continue;
+
+ local_group = cpumask_test_cpu(this_cpu,
+ sched_group_span(group));
+
+ if (local_group) {
+ sgs = &local_sgs;
+ local = group;
+ } else {
+ sgs = &tmp_sgs;
+ }
+
+ update_sg_wakeup_stats(sd, group, sgs, p);
+
+ if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
+ idlest = group;
+ idlest_sgs = *sgs;
+ }
+
+ } while (group = group->next, group != sd->groups);
+
+
+ /* There is no idlest group to push tasks to */
+ if (!idlest)
+ return NULL;
+
+ /*
+ * If the local group is idler than the selected idlest group
+ * don't try and push the task.
+ */
+ if (local_sgs.group_type < idlest_sgs.group_type)
+ return NULL;
+
+ /*
+ * If the local group is busier than the selected idlest group
+ * try and push the task.
+ */
+ if (local_sgs.group_type > idlest_sgs.group_type)
+ return idlest;
+
+ switch (local_sgs.group_type) {
+ case group_overloaded:
+ case group_fully_busy:
+ /*
+ * When comparing groups across NUMA domains, it's possible for
+ * the local domain to be very lightly loaded relative to the
+ * remote domains but "imbalance" skews the comparison making
+ * remote CPUs look much more favourable. When considering
+ * cross-domain, add imbalance to the load on the remote node
+ * and consider staying local.
+ */
+
+ if ((sd->flags & SD_NUMA) &&
+ ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
+ return NULL;
+
+ /*
+ * If the local group is less loaded than the selected
+ * idlest group don't try and push any tasks.
+ */
+ if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
+ return NULL;
+
+ if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
+ return NULL;
+ break;
+
+ case group_imbalanced:
+ case group_asym_packing:
+ /* Those type are not used in the slow wakeup path */
+ return NULL;
+
+ case group_misfit_task:
+ /* Select group with the highest max capacity */
+ if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
+ return NULL;
+ break;
+
+ case group_has_spare:
+ if (sd->flags & SD_NUMA) {
+#ifdef CONFIG_NUMA_BALANCING
+ int idlest_cpu;
+ /*
+ * If there is spare capacity at NUMA, try to select
+ * the preferred node
+ */
+ if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
+ return NULL;
+
+ idlest_cpu = cpumask_first(sched_group_span(idlest));
+ if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
+ return idlest;
+#endif
+ /*
+ * Otherwise, keep the task on this node to stay close
+ * its wakeup source and improve locality. If there is
+ * a real need of migration, periodic load balance will
+ * take care of it.
+ */
+ if (local_sgs.idle_cpus)
+ return NULL;
+ }
+
+ /*
+ * Select group with highest number of idle CPUs. We could also
+ * compare the utilization which is more stable but it can end
+ * up that the group has less spare capacity but finally more
+ * idle CPUs which means more opportunity to run task.
+ */
+ if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
+ return NULL;
+ break;
+ }
+
+ return idlest;
+}
+
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
* @sds: variable to hold the statistics for this sched_domain.
*/
+
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
- bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
int sg_status = 0;
#ifdef CONFIG_NO_HZ_COMMON
@@ -8211,22 +8546,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (local_group)
goto next_group;
- /*
- * In case the child domain prefers tasks go to siblings
- * first, lower the sg capacity so that we'll try
- * and move all the excess tasks away. We lower the capacity
- * of a group only if the local group has the capacity to fit
- * these excess tasks. The extra check prevents the case where
- * you always pull from the heaviest group when it is already
- * under-utilized (possible with a large weight task outweighs
- * the tasks on the system).
- */
- if (prefer_sibling && sds->local &&
- group_has_capacity(env, local) &&
- (sgs->sum_nr_running > local->sum_nr_running + 1)) {
- sgs->group_no_capacity = 1;
- sgs->group_type = group_classify(sg, sgs);
- }
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
@@ -8235,13 +8554,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
next_group:
/* Now, start updating sd_lb_stats */
- sds->total_running += sgs->sum_nr_running;
sds->total_load += sgs->group_load;
sds->total_capacity += sgs->group_capacity;
sg = sg->next;
} while (sg != env->sd->groups);
+ /* Tag domain that child domain prefers tasks go to siblings first */
+ sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
+
#ifdef CONFIG_NO_HZ_COMMON
if ((env->flags & LBF_NOHZ_AGAIN) &&
cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
@@ -8272,203 +8593,160 @@ next_group:
}
/**
- * check_asym_packing - Check to see if the group is packed into the
- * sched domain.
- *
- * This is primarily intended to used at the sibling level. Some
- * cores like POWER7 prefer to use lower numbered SMT threads. In the
- * case of POWER7, it can move to lower SMT modes only when higher
- * threads are idle. When in lower SMT modes, the threads will
- * perform better since they share less core resources. Hence when we
- * have idle threads, we want them to be the higher ones.
- *
- * This packing function is run on idle threads. It checks to see if
- * the busiest CPU in this domain (core in the P7 case) has a higher
- * CPU number than the packing function is being run on. Here we are
- * assuming lower CPU number will be equivalent to lower a SMT thread
- * number.
- *
- * Return: 1 when packing is required and a task should be moved to
- * this CPU. The amount of the imbalance is returned in env->imbalance.
- *
- * @env: The load balancing environment.
- * @sds: Statistics of the sched_domain which is to be packed
- */
-static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
-{
- int busiest_cpu;
-
- if (!(env->sd->flags & SD_ASYM_PACKING))
- return 0;
-
- if (env->idle == CPU_NOT_IDLE)
- return 0;
-
- if (!sds->busiest)
- return 0;
-
- busiest_cpu = sds->busiest->asym_prefer_cpu;
- if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
- return 0;
-
- env->imbalance = sds->busiest_stat.group_load;
-
- return 1;
-}
-
-/**
- * fix_small_imbalance - Calculate the minor imbalance that exists
- * amongst the groups of a sched_domain, during
- * load balancing.
- * @env: The load balancing environment.
- * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
+ * calculate_imbalance - Calculate the amount of imbalance present within the
+ * groups of a given sched_domain during load balance.
+ * @env: load balance environment
+ * @sds: statistics of the sched_domain whose imbalance is to be calculated.
*/
-static inline
-void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
+static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{
- unsigned long tmp, capa_now = 0, capa_move = 0;
- unsigned int imbn = 2;
- unsigned long scaled_busy_load_per_task;
struct sg_lb_stats *local, *busiest;
local = &sds->local_stat;
busiest = &sds->busiest_stat;
- if (!local->sum_nr_running)
- local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
- else if (busiest->load_per_task > local->load_per_task)
- imbn = 1;
+ if (busiest->group_type == group_misfit_task) {
+ /* Set imbalance to allow misfit tasks to be balanced. */
+ env->migration_type = migrate_misfit;
+ env->imbalance = 1;
+ return;
+ }
- scaled_busy_load_per_task =
- (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
- busiest->group_capacity;
+ if (busiest->group_type == group_asym_packing) {
+ /*
+ * In case of asym capacity, we will try to migrate all load to
+ * the preferred CPU.
+ */
+ env->migration_type = migrate_task;
+ env->imbalance = busiest->sum_h_nr_running;
+ return;
+ }
- if (busiest->avg_load + scaled_busy_load_per_task >=
- local->avg_load + (scaled_busy_load_per_task * imbn)) {
- env->imbalance = busiest->load_per_task;
+ if (busiest->group_type == group_imbalanced) {
+ /*
+ * In the group_imb case we cannot rely on group-wide averages
+ * to ensure CPU-load equilibrium, try to move any task to fix
+ * the imbalance. The next load balance will take care of
+ * balancing back the system.
+ */
+ env->migration_type = migrate_task;
+ env->imbalance = 1;
return;
}
/*
- * OK, we don't have enough imbalance to justify moving tasks,
- * however we may be able to increase total CPU capacity used by
- * moving them.
+ * Try to use spare capacity of local group without overloading it or
+ * emptying busiest.
+ * XXX Spreading tasks across NUMA nodes is not always the best policy
+ * and special care should be taken for SD_NUMA domain level before
+ * spreading the tasks. For now, load_balance() fully relies on
+ * NUMA_BALANCING and fbq_classify_group/rq to override the decision.
*/
+ if (local->group_type == group_has_spare) {
+ if (busiest->group_type > group_fully_busy) {
+ /*
+ * If busiest is overloaded, try to fill spare
+ * capacity. This might end up creating spare capacity
+ * in busiest or busiest still being overloaded but
+ * there is no simple way to directly compute the
+ * amount of load to migrate in order to balance the
+ * system.
+ */
+ env->migration_type = migrate_util;
+ env->imbalance = max(local->group_capacity, local->group_util) -
+ local->group_util;
- capa_now += busiest->group_capacity *
- min(busiest->load_per_task, busiest->avg_load);
- capa_now += local->group_capacity *
- min(local->load_per_task, local->avg_load);
- capa_now /= SCHED_CAPACITY_SCALE;
-
- /* Amount of load we'd subtract */
- if (busiest->avg_load > scaled_busy_load_per_task) {
- capa_move += busiest->group_capacity *
- min(busiest->load_per_task,
- busiest->avg_load - scaled_busy_load_per_task);
- }
-
- /* Amount of load we'd add */
- if (busiest->avg_load * busiest->group_capacity <
- busiest->load_per_task * SCHED_CAPACITY_SCALE) {
- tmp = (busiest->avg_load * busiest->group_capacity) /
- local->group_capacity;
- } else {
- tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
- local->group_capacity;
- }
- capa_move += local->group_capacity *
- min(local->load_per_task, local->avg_load + tmp);
- capa_move /= SCHED_CAPACITY_SCALE;
-
- /* Move if we gain throughput */
- if (capa_move > capa_now)
- env->imbalance = busiest->load_per_task;
-}
+ /*
+ * In some cases, the group's utilization is max or even
+ * higher than capacity because of migrations but the
+ * local CPU is (newly) idle. There is at least one
+ * waiting task in this overloaded busiest group. Let's
+ * try to pull it.
+ */
+ if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
+ env->migration_type = migrate_task;
+ env->imbalance = 1;
+ }
-/**
- * calculate_imbalance - Calculate the amount of imbalance present within the
- * groups of a given sched_domain during load balance.
- * @env: load balance environment
- * @sds: statistics of the sched_domain whose imbalance is to be calculated.
- */
-static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
-{
- unsigned long max_pull, load_above_capacity = ~0UL;
- struct sg_lb_stats *local, *busiest;
+ return;
+ }
- local = &sds->local_stat;
- busiest = &sds->busiest_stat;
+ if (busiest->group_weight == 1 || sds->prefer_sibling) {
+ unsigned int nr_diff = busiest->sum_nr_running;
+ /*
+ * When prefer sibling, evenly spread running tasks on
+ * groups.
+ */
+ env->migration_type = migrate_task;
+ lsub_positive(&nr_diff, local->sum_nr_running);
+ env->imbalance = nr_diff >> 1;
+ return;
+ }
- if (busiest->group_type == group_imbalanced) {
/*
- * In the group_imb case we cannot rely on group-wide averages
- * to ensure CPU-load equilibrium, look at wider averages. XXX
+ * If there is no overload, we just want to even the number of
+ * idle cpus.
*/
- busiest->load_per_task =
- min(busiest->load_per_task, sds->avg_load);
+ env->migration_type = migrate_task;
+ env->imbalance = max_t(long, 0, (local->idle_cpus -
+ busiest->idle_cpus) >> 1);
+ return;
}
/*
- * Avg load of busiest sg can be less and avg load of local sg can
- * be greater than avg load across all sgs of sd because avg load
- * factors in sg capacity and sgs with smaller group_type are
- * skipped when updating the busiest sg:
+ * Local is fully busy but has to take more load to relieve the
+ * busiest group
*/
- if (busiest->group_type != group_misfit_task &&
- (busiest->avg_load <= sds->avg_load ||
- local->avg_load >= sds->avg_load)) {
- env->imbalance = 0;
- return fix_small_imbalance(env, sds);
- }
+ if (local->group_type < group_overloaded) {
+ /*
+ * Local will become overloaded so the avg_load metrics are
+ * finally needed.
+ */
- /*
- * If there aren't any idle CPUs, avoid creating some.
- */
- if (busiest->group_type == group_overloaded &&
- local->group_type == group_overloaded) {
- load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
- if (load_above_capacity > busiest->group_capacity) {
- load_above_capacity -= busiest->group_capacity;
- load_above_capacity *= scale_load_down(NICE_0_LOAD);
- load_above_capacity /= busiest->group_capacity;
- } else
- load_above_capacity = ~0UL;
+ local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
+ local->group_capacity;
+
+ sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
+ sds->total_capacity;
}
/*
- * We're trying to get all the CPUs to the average_load, so we don't
- * want to push ourselves above the average load, nor do we wish to
- * reduce the max loaded CPU below the average load. At the same time,
- * we also don't want to reduce the group load below the group
- * capacity. Thus we look for the minimum possible imbalance.
+ * Both group are or will become overloaded and we're trying to get all
+ * the CPUs to the average_load, so we don't want to push ourselves
+ * above the average load, nor do we wish to reduce the max loaded CPU
+ * below the average load. At the same time, we also don't want to
+ * reduce the group load below the group capacity. Thus we look for
+ * the minimum possible imbalance.
*/
- max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
-
- /* How much load to actually move to equalise the imbalance */
+ env->migration_type = migrate_load;
env->imbalance = min(
- max_pull * busiest->group_capacity,
+ (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
(sds->avg_load - local->avg_load) * local->group_capacity
) / SCHED_CAPACITY_SCALE;
-
- /* Boost imbalance to allow misfit task to be balanced. */
- if (busiest->group_type == group_misfit_task) {
- env->imbalance = max_t(long, env->imbalance,
- busiest->group_misfit_task_load);
- }
-
- /*
- * if *imbalance is less than the average load per runnable task
- * there is no guarantee that any tasks will be moved so we'll have
- * a think about bumping its value to force at least one task to be
- * moved
- */
- if (env->imbalance < busiest->load_per_task)
- return fix_small_imbalance(env, sds);
}
/******* find_busiest_group() helpers end here *********************/
+/*
+ * Decision matrix according to the local and busiest group type:
+ *
+ * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
+ * has_spare nr_idle balanced N/A N/A balanced balanced
+ * fully_busy nr_idle nr_idle N/A N/A balanced balanced
+ * misfit_task force N/A N/A N/A force force
+ * asym_packing force force N/A N/A force force
+ * imbalanced force force N/A N/A force force
+ * overloaded force force N/A N/A force avg_load
+ *
+ * N/A : Not Applicable because already filtered while updating
+ * statistics.
+ * balanced : The system is balanced for these 2 groups.
+ * force : Calculate the imbalance as load migration is probably needed.
+ * avg_load : Only if imbalance is significant enough.
+ * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
+ * different in groups.
+ */
+
/**
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
@@ -8488,7 +8766,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
init_sd_lb_stats(&sds);
/*
- * Compute the various statistics relavent for load balancing at
+ * Compute the various statistics relevant for load balancing at
* this level.
*/
update_sd_lb_stats(env, &sds);
@@ -8503,17 +8781,17 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
local = &sds.local_stat;
busiest = &sds.busiest_stat;
- /* ASYM feature bypasses nice load balance check */
- if (check_asym_packing(env, &sds))
- return sds.busiest;
-
/* There is no busy sibling group to pull tasks from */
- if (!sds.busiest || busiest->sum_nr_running == 0)
+ if (!sds.busiest)
goto out_balanced;
- /* XXX broken for overlapping NUMA groups */
- sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
- / sds.total_capacity;
+ /* Misfit tasks should be dealt with regardless of the avg load */
+ if (busiest->group_type == group_misfit_task)
+ goto force_balance;
+
+ /* ASYM feature bypasses nice load balance check */
+ if (busiest->group_type == group_asym_packing)
+ goto force_balance;
/*
* If the busiest group is imbalanced the below checks don't
@@ -8524,55 +8802,80 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto force_balance;
/*
- * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
- * capacities from resulting in underutilization due to avg_load.
- */
- if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
- busiest->group_no_capacity)
- goto force_balance;
-
- /* Misfit tasks should be dealt with regardless of the avg load */
- if (busiest->group_type == group_misfit_task)
- goto force_balance;
-
- /*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
*/
- if (local->avg_load >= busiest->avg_load)
+ if (local->group_type > busiest->group_type)
goto out_balanced;
/*
- * Don't pull any tasks if this group is already above the domain
- * average load.
+ * When groups are overloaded, use the avg_load to ensure fairness
+ * between tasks.
*/
- if (local->avg_load >= sds.avg_load)
- goto out_balanced;
+ if (local->group_type == group_overloaded) {
+ /*
+ * If the local group is more loaded than the selected
+ * busiest group don't try to pull any tasks.
+ */
+ if (local->avg_load >= busiest->avg_load)
+ goto out_balanced;
+
+ /* XXX broken for overlapping NUMA groups */
+ sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
+ sds.total_capacity;
- if (env->idle == CPU_IDLE) {
/*
- * This CPU is idle. If the busiest group is not overloaded
- * and there is no imbalance between this and busiest group
- * wrt idle CPUs, it is balanced. The imbalance becomes
- * significant if the diff is greater than 1 otherwise we
- * might end up to just move the imbalance on another group
+ * Don't pull any tasks if this group is already above the
+ * domain average load.
*/
- if ((busiest->group_type != group_overloaded) &&
- (local->idle_cpus <= (busiest->idle_cpus + 1)))
+ if (local->avg_load >= sds.avg_load)
goto out_balanced;
- } else {
+
/*
- * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
- * imbalance_pct to be conservative.
+ * If the busiest group is more loaded, use imbalance_pct to be
+ * conservative.
*/
if (100 * busiest->avg_load <=
env->sd->imbalance_pct * local->avg_load)
goto out_balanced;
}
+ /* Try to move all excess tasks to child's sibling domain */
+ if (sds.prefer_sibling && local->group_type == group_has_spare &&
+ busiest->sum_nr_running > local->sum_nr_running + 1)
+ goto force_balance;
+
+ if (busiest->group_type != group_overloaded) {
+ if (env->idle == CPU_NOT_IDLE)
+ /*
+ * If the busiest group is not overloaded (and as a
+ * result the local one too) but this CPU is already
+ * busy, let another idle CPU try to pull task.
+ */
+ goto out_balanced;
+
+ if (busiest->group_weight > 1 &&
+ local->idle_cpus <= (busiest->idle_cpus + 1))
+ /*
+ * If the busiest group is not overloaded
+ * and there is no imbalance between this and busiest
+ * group wrt idle CPUs, it is balanced. The imbalance
+ * becomes significant if the diff is greater than 1
+ * otherwise we might end up to just move the imbalance
+ * on another group. Of course this applies only if
+ * there is more than 1 CPU per group.
+ */
+ goto out_balanced;
+
+ if (busiest->sum_h_nr_running == 1)
+ /*
+ * busiest doesn't have any tasks waiting to run
+ */
+ goto out_balanced;
+ }
+
force_balance:
/* Looks like there is an imbalance. Compute it */
- env->src_grp_type = busiest->group_type;
calculate_imbalance(env, &sds);
return env->imbalance ? sds.busiest : NULL;
@@ -8588,11 +8891,13 @@ static struct rq *find_busiest_queue(struct lb_env *env,
struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
- unsigned long busiest_load = 0, busiest_capacity = 1;
+ unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
+ unsigned int busiest_nr = 0;
int i;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- unsigned long capacity, load;
+ unsigned long capacity, load, util;
+ unsigned int nr_running;
enum fbq_type rt;
rq = cpu_rq(i);
@@ -8620,20 +8925,8 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (rt > env->fbq_type)
continue;
- /*
- * For ASYM_CPUCAPACITY domains with misfit tasks we simply
- * seek the "biggest" misfit task.
- */
- if (env->src_grp_type == group_misfit_task) {
- if (rq->misfit_task_load > busiest_load) {
- busiest_load = rq->misfit_task_load;
- busiest = rq;
- }
-
- continue;
- }
-
capacity = capacity_of(i);
+ nr_running = rq->cfs.h_nr_running;
/*
* For ASYM_CPUCAPACITY domains, don't pick a CPU that could
@@ -8643,35 +8936,69 @@ static struct rq *find_busiest_queue(struct lb_env *env,
*/
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
capacity_of(env->dst_cpu) < capacity &&
- rq->nr_running == 1)
+ nr_running == 1)
continue;
- load = cpu_runnable_load(rq);
+ switch (env->migration_type) {
+ case migrate_load:
+ /*
+ * When comparing with load imbalance, use cpu_load()
+ * which is not scaled with the CPU capacity.
+ */
+ load = cpu_load(rq);
- /*
- * When comparing with imbalance, use cpu_runnable_load()
- * which is not scaled with the CPU capacity.
- */
+ if (nr_running == 1 && load > env->imbalance &&
+ !check_cpu_capacity(rq, env->sd))
+ break;
- if (rq->nr_running == 1 && load > env->imbalance &&
- !check_cpu_capacity(rq, env->sd))
- continue;
+ /*
+ * For the load comparisons with the other CPUs,
+ * consider the cpu_load() scaled with the CPU
+ * capacity, so that the load can be moved away
+ * from the CPU that is potentially running at a
+ * lower capacity.
+ *
+ * Thus we're looking for max(load_i / capacity_i),
+ * crosswise multiplication to rid ourselves of the
+ * division works out to:
+ * load_i * capacity_j > load_j * capacity_i;
+ * where j is our previous maximum.
+ */
+ if (load * busiest_capacity > busiest_load * capacity) {
+ busiest_load = load;
+ busiest_capacity = capacity;
+ busiest = rq;
+ }
+ break;
+
+ case migrate_util:
+ util = cpu_util(cpu_of(rq));
+
+ if (busiest_util < util) {
+ busiest_util = util;
+ busiest = rq;
+ }
+ break;
+
+ case migrate_task:
+ if (busiest_nr < nr_running) {
+ busiest_nr = nr_running;
+ busiest = rq;
+ }
+ break;
+
+ case migrate_misfit:
+ /*
+ * For ASYM_CPUCAPACITY domains with misfit tasks we
+ * simply seek the "biggest" misfit task.
+ */
+ if (rq->misfit_task_load > busiest_load) {
+ busiest_load = rq->misfit_task_load;
+ busiest = rq;
+ }
+
+ break;
- /*
- * For the load comparisons with the other CPU's, consider
- * the cpu_runnable_load() scaled with the CPU capacity, so
- * that the load can be moved away from the CPU that is
- * potentially running at a lower capacity.
- *
- * Thus we're looking for max(load_i / capacity_i), crosswise
- * multiplication to rid ourselves of the division works out
- * to: load_i * capacity_j > load_j * capacity_i; where j is
- * our previous maximum.
- */
- if (load * busiest_capacity > busiest_load * capacity) {
- busiest_load = load;
- busiest_capacity = capacity;
- busiest = rq;
}
}
@@ -8717,7 +9044,7 @@ voluntary_active_balance(struct lb_env *env)
return 1;
}
- if (env->src_grp_type == group_misfit_task)
+ if (env->migration_type == migrate_misfit)
return 1;
return 0;
@@ -9746,6 +10073,11 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
/*
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
+ *
+ * Returns:
+ * < 0 - we released the lock and there are !fair tasks present
+ * 0 - failed, no new tasks
+ * > 0 - success, new (fair) tasks present
*/
int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
{
@@ -10140,7 +10472,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
-static void set_next_task_fair(struct rq *rq, struct task_struct *p)
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_entity *se = &p->se;
@@ -10422,7 +10754,7 @@ const struct sched_class fair_sched_class = {
.check_preempt_curr = check_preempt_wakeup,
- .pick_next_task = pick_next_task_fair,
+ .pick_next_task = __pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
.set_next_task = set_next_task_fair,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 2410db5e9a35..7481cd96f391 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -89,3 +89,4 @@ SCHED_FEAT(WA_BIAS, true)
* UtilEstimation. Use estimated CPU utilization.
*/
SCHED_FEAT(UTIL_EST, true)
+SCHED_FEAT(UTIL_EST_FASTUP, true)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index f65ef1e2f204..ffa959e91227 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -104,7 +104,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* update no idle residency and return.
*/
if (current_clr_polling_and_test()) {
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
local_irq_enable();
return -EBUSY;
}
@@ -165,7 +165,9 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens.
*/
- if (idle_should_enter_s2idle() || dev->use_deepest_state) {
+ if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
+ u64 max_latency_ns;
+
if (idle_should_enter_s2idle()) {
rcu_idle_enter();
@@ -176,12 +178,16 @@ static void cpuidle_idle_call(void)
}
rcu_idle_exit();
+
+ max_latency_ns = U64_MAX;
+ } else {
+ max_latency_ns = dev->forced_idle_latency_limit_ns;
}
tick_nohz_idle_stop_tick();
rcu_idle_enter();
- next_state = cpuidle_find_deepest_state(drv, dev);
+ next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
} else {
bool stop_tick = true;
@@ -311,7 +317,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void play_idle(unsigned long duration_us)
+void play_idle_precise(u64 duration_ns, u64 latency_ns)
{
struct idle_timer it;
@@ -323,29 +329,29 @@ void play_idle(unsigned long duration_us)
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
- WARN_ON_ONCE(!duration_us);
+ WARN_ON_ONCE(!duration_ns);
rcu_sleep_check();
preempt_disable();
current->flags |= PF_IDLE;
- cpuidle_use_deepest_state(true);
+ cpuidle_use_deepest_state(latency_ns);
it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it.timer.function = idle_inject_timer_fn;
- hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
+ hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
HRTIMER_MODE_REL_PINNED);
while (!READ_ONCE(it.done))
do_idle();
- cpuidle_use_deepest_state(false);
+ cpuidle_use_deepest_state(0);
current->flags &= ~PF_IDLE;
preempt_fold_need_resched();
preempt_enable();
}
-EXPORT_SYMBOL_GPL(play_idle);
+EXPORT_SYMBOL_GPL(play_idle_precise);
void cpu_startup_entry(enum cpuhp_state state)
{
@@ -385,21 +391,17 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
-static void set_next_task_idle(struct rq *rq, struct task_struct *next)
+static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
}
-static struct task_struct *
-pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+struct task_struct *pick_next_task_idle(struct rq *rq)
{
struct task_struct *next = rq->idle;
- if (prev)
- put_prev_task(rq, prev);
-
- set_next_task_idle(rq, next);
+ set_next_task_idle(rq, next, true);
return next;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 9b8adc01be3d..e591d40fd645 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1515,13 +1515,16 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
#endif
}
-static inline void set_next_task_rt(struct rq *rq, struct task_struct *p)
+static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
+ if (!first)
+ return;
+
/*
* If prev task was rt, put_prev_task() has already updated the
* utilization. We only care of the case where we start to schedule a
@@ -1564,18 +1567,15 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
return rt_task_of(rt_se);
}
-static struct task_struct *
-pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct task_struct *p;
- WARN_ON_ONCE(prev || rf);
-
if (!sched_rt_runnable(rq))
return NULL;
p = _pick_next_task_rt(rq);
- set_next_task_rt(rq, p);
+ set_next_task_rt(rq, p, true);
return p;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c8870c5bd7df..280a3c735935 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1713,22 +1713,10 @@ struct sched_class {
void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
- /*
- * Both @prev and @rf are optional and may be NULL, in which case the
- * caller must already have invoked put_prev_task(rq, prev, rf).
- *
- * Otherwise it is the responsibility of the pick_next_task() to call
- * put_prev_task() on the @prev task or something equivalent, IFF it
- * returns a next task.
- *
- * In that case (@rf != NULL) it may return RETRY_TASK when it finds a
- * higher prio class has runnable tasks.
- */
- struct task_struct * (*pick_next_task)(struct rq *rq,
- struct task_struct *prev,
- struct rq_flags *rf);
+ struct task_struct *(*pick_next_task)(struct rq *rq);
+
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
- void (*set_next_task)(struct rq *rq, struct task_struct *p);
+ void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
#ifdef CONFIG_SMP
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
@@ -1780,7 +1768,7 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
static inline void set_next_task(struct rq *rq, struct task_struct *next)
{
WARN_ON_ONCE(rq->curr != next);
- next->sched_class->set_next_task(rq, next);
+ next->sched_class->set_next_task(rq, next, false);
}
#ifdef CONFIG_SMP
@@ -1821,6 +1809,9 @@ static inline bool sched_fair_runnable(struct rq *rq)
return rq->cfs.nr_running > 0;
}
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
+extern struct task_struct *pick_next_task_idle(struct rq *rq);
+
#ifdef CONFIG_SMP
extern void update_group_capacity(struct sched_domain *sd, int cpu);
@@ -2309,7 +2300,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif /* CONFIG_CPU_FREQ */
#ifdef CONFIG_UCLAMP_TASK
-enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
static __always_inline
unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index c0640739e05e..4c9e9975684f 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -29,20 +29,17 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
/* we're never preempted */
}
-static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
+static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
{
stop->se.exec_start = rq_clock_task(rq);
}
-static struct task_struct *
-pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+static struct task_struct *pick_next_task_stop(struct rq *rq)
{
- WARN_ON_ONCE(prev || rf);
-
if (!sched_stop_runnable(rq))
return NULL;
- set_next_task_stop(rq, rq->stop);
+ set_next_task_stop(rq, rq->stop, true);
return rq->stop;
}
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 49b835f1305f..6ec1e595b1d4 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1201,16 +1201,13 @@ static void set_domain_attribute(struct sched_domain *sd,
if (!attr || attr->relax_domain_level < 0) {
if (default_relax_domain_level < 0)
return;
- else
- request = default_relax_domain_level;
+ request = default_relax_domain_level;
} else
request = attr->relax_domain_level;
- if (request < sd->level) {
+
+ if (sd->level > request) {
/* Turn off idle balance on this domain: */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
- } else {
- /* Turn on idle balance on this domain: */
- sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
diff --git a/kernel/signal.c b/kernel/signal.c
index c4da1ef56fdf..bcd46f547db3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2205,8 +2205,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
*/
preempt_disable();
read_unlock(&tasklist_lock);
- preempt_enable_no_resched();
cgroup_enter_frozen();
+ preempt_enable_no_resched();
freezable_schedule();
cgroup_leave_frozen(true);
} else {
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index c9ea7eb2cb1a..2af66e449aa6 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -142,7 +142,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
.store = store,
.size = size,
/* skip this function if they are tracing us */
- .skip = skipnr + !!(current == tsk),
+ .skip = skipnr + (current == tsk),
};
if (!try_get_task_stack(tsk))
@@ -300,7 +300,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
.entries = store,
.max_entries = size,
/* skip this function if they are tracing us */
- .skip = skipnr + !!(current == task),
+ .skip = skipnr + (current == task),
};
save_stack_trace_tsk(task, &trace);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 998d50ee2d9b..1fe34a9fabc2 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -235,6 +235,7 @@ static int multi_cpu_stop(void *data)
*/
touch_nmi_watchdog();
}
+ rcu_momentary_dyntick_idle();
} while (curstate != MULTI_STOP_EXIT);
local_irq_restore(flags);
diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c
new file mode 100644
index 000000000000..2a63241a8453
--- /dev/null
+++ b/kernel/sysctl-test.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test of proc sysctl.
+ */
+
+#include <kunit/test.h>
+#include <linux/sysctl.h>
+
+#define KUNIT_PROC_READ 0
+#define KUNIT_PROC_WRITE 1
+
+static int i_zero;
+static int i_one_hundred = 100;
+
+/*
+ * Test that proc_dointvec will not try to use a NULL .data field even when the
+ * length is non-zero.
+ */
+static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test)
+{
+ struct ctl_table null_data_table = {
+ .procname = "foo",
+ /*
+ * Here we are testing that proc_dointvec behaves correctly when
+ * we give it a NULL .data field. Normally this would point to a
+ * piece of memory where the value would be stored.
+ */
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ /*
+ * proc_dointvec expects a buffer in user space, so we allocate one. We
+ * also need to cast it to __user so sparse doesn't get mad.
+ */
+ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int),
+ GFP_USER);
+ size_t len;
+ loff_t pos;
+
+ /*
+ * We don't care what the starting length is since proc_dointvec should
+ * not try to read because .data is NULL.
+ */
+ len = 1234;
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table,
+ KUNIT_PROC_READ, buffer, &len,
+ &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+
+ /*
+ * See above.
+ */
+ len = 1234;
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table,
+ KUNIT_PROC_WRITE, buffer, &len,
+ &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+}
+
+/*
+ * Similar to the previous test, we create a struct ctrl_table that has a .data
+ * field that proc_dointvec cannot do anything with; however, this time it is
+ * because we tell proc_dointvec that the size is 0.
+ */
+static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test)
+{
+ int data = 0;
+ struct ctl_table data_maxlen_unset_table = {
+ .procname = "foo",
+ .data = &data,
+ /*
+ * So .data is no longer NULL, but we tell proc_dointvec its
+ * length is 0, so it still shouldn't try to use it.
+ */
+ .maxlen = 0,
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int),
+ GFP_USER);
+ size_t len;
+ loff_t pos;
+
+ /*
+ * As before, we don't care what buffer length is because proc_dointvec
+ * cannot do anything because its internal .data buffer has zero length.
+ */
+ len = 1234;
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table,
+ KUNIT_PROC_READ, buffer, &len,
+ &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+
+ /*
+ * See previous comment.
+ */
+ len = 1234;
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table,
+ KUNIT_PROC_WRITE, buffer, &len,
+ &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+}
+
+/*
+ * Here we provide a valid struct ctl_table, but we try to read and write from
+ * it using a buffer of zero length, so it should still fail in a similar way as
+ * before.
+ */
+static void sysctl_test_api_dointvec_table_len_is_zero(struct kunit *test)
+{
+ int data = 0;
+ /* Good table. */
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int),
+ GFP_USER);
+ /*
+ * However, now our read/write buffer has zero length.
+ */
+ size_t len = 0;
+ loff_t pos;
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer,
+ &len, &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer,
+ &len, &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+}
+
+/*
+ * Test that proc_dointvec refuses to read when the file position is non-zero.
+ */
+static void sysctl_test_api_dointvec_table_read_but_position_set(
+ struct kunit *test)
+{
+ int data = 0;
+ /* Good table. */
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int),
+ GFP_USER);
+ /*
+ * We don't care about our buffer length because we start off with a
+ * non-zero file position.
+ */
+ size_t len = 1234;
+ /*
+ * proc_dointvec should refuse to read into the buffer since the file
+ * pos is non-zero.
+ */
+ loff_t pos = 1;
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer,
+ &len, &pos));
+ KUNIT_EXPECT_EQ(test, (size_t)0, len);
+}
+
+/*
+ * Test that we can read a two digit number in a sufficiently size buffer.
+ * Nothing fancy.
+ */
+static void sysctl_test_dointvec_read_happy_single_positive(struct kunit *test)
+{
+ int data = 0;
+ /* Good table. */
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ size_t len = 4;
+ loff_t pos = 0;
+ char *buffer = kunit_kzalloc(test, len, GFP_USER);
+ char __user *user_buffer = (char __user *)buffer;
+ /* Store 13 in the data field. */
+ *((int *)table.data) = 13;
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ,
+ user_buffer, &len, &pos));
+ KUNIT_ASSERT_EQ(test, (size_t)3, len);
+ buffer[len] = '\0';
+ /* And we read 13 back out. */
+ KUNIT_EXPECT_STREQ(test, "13\n", buffer);
+}
+
+/*
+ * Same as previous test, just now with negative numbers.
+ */
+static void sysctl_test_dointvec_read_happy_single_negative(struct kunit *test)
+{
+ int data = 0;
+ /* Good table. */
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ size_t len = 5;
+ loff_t pos = 0;
+ char *buffer = kunit_kzalloc(test, len, GFP_USER);
+ char __user *user_buffer = (char __user *)buffer;
+ *((int *)table.data) = -16;
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ,
+ user_buffer, &len, &pos));
+ KUNIT_ASSERT_EQ(test, (size_t)4, len);
+ buffer[len] = '\0';
+ KUNIT_EXPECT_STREQ(test, "-16\n", (char *)buffer);
+}
+
+/*
+ * Test that a simple positive write works.
+ */
+static void sysctl_test_dointvec_write_happy_single_positive(struct kunit *test)
+{
+ int data = 0;
+ /* Good table. */
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ char input[] = "9";
+ size_t len = sizeof(input) - 1;
+ loff_t pos = 0;
+ char *buffer = kunit_kzalloc(test, len, GFP_USER);
+ char __user *user_buffer = (char __user *)buffer;
+
+ memcpy(buffer, input, len);
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE,
+ user_buffer, &len, &pos));
+ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len);
+ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos);
+ KUNIT_EXPECT_EQ(test, 9, *((int *)table.data));
+}
+
+/*
+ * Same as previous test, but now with negative numbers.
+ */
+static void sysctl_test_dointvec_write_happy_single_negative(struct kunit *test)
+{
+ int data = 0;
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ char input[] = "-9";
+ size_t len = sizeof(input) - 1;
+ loff_t pos = 0;
+ char *buffer = kunit_kzalloc(test, len, GFP_USER);
+ char __user *user_buffer = (char __user *)buffer;
+
+ memcpy(buffer, input, len);
+
+ KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE,
+ user_buffer, &len, &pos));
+ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len);
+ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos);
+ KUNIT_EXPECT_EQ(test, -9, *((int *)table.data));
+}
+
+/*
+ * Test that writing a value smaller than the minimum possible value is not
+ * allowed.
+ */
+static void sysctl_test_api_dointvec_write_single_less_int_min(
+ struct kunit *test)
+{
+ int data = 0;
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ size_t max_len = 32, len = max_len;
+ loff_t pos = 0;
+ char *buffer = kunit_kzalloc(test, max_len, GFP_USER);
+ char __user *user_buffer = (char __user *)buffer;
+ unsigned long abs_of_less_than_min = (unsigned long)INT_MAX
+ - (INT_MAX + INT_MIN) + 1;
+
+ /*
+ * We use this rigmarole to create a string that contains a value one
+ * less than the minimum accepted value.
+ */
+ KUNIT_ASSERT_LT(test,
+ (size_t)snprintf(buffer, max_len, "-%lu",
+ abs_of_less_than_min),
+ max_len);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE,
+ user_buffer, &len, &pos));
+ KUNIT_EXPECT_EQ(test, max_len, len);
+ KUNIT_EXPECT_EQ(test, 0, *((int *)table.data));
+}
+
+/*
+ * Test that writing the maximum possible value works.
+ */
+static void sysctl_test_api_dointvec_write_single_greater_int_max(
+ struct kunit *test)
+{
+ int data = 0;
+ struct ctl_table table = {
+ .procname = "foo",
+ .data = &data,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ };
+ size_t max_len = 32, len = max_len;
+ loff_t pos = 0;
+ char *buffer = kunit_kzalloc(test, max_len, GFP_USER);
+ char __user *user_buffer = (char __user *)buffer;
+ unsigned long greater_than_max = (unsigned long)INT_MAX + 1;
+
+ KUNIT_ASSERT_GT(test, greater_than_max, (unsigned long)INT_MAX);
+ KUNIT_ASSERT_LT(test, (size_t)snprintf(buffer, max_len, "%lu",
+ greater_than_max),
+ max_len);
+ KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE,
+ user_buffer, &len, &pos));
+ KUNIT_ASSERT_EQ(test, max_len, len);
+ KUNIT_EXPECT_EQ(test, 0, *((int *)table.data));
+}
+
+static struct kunit_case sysctl_test_cases[] = {
+ KUNIT_CASE(sysctl_test_api_dointvec_null_tbl_data),
+ KUNIT_CASE(sysctl_test_api_dointvec_table_maxlen_unset),
+ KUNIT_CASE(sysctl_test_api_dointvec_table_len_is_zero),
+ KUNIT_CASE(sysctl_test_api_dointvec_table_read_but_position_set),
+ KUNIT_CASE(sysctl_test_dointvec_read_happy_single_positive),
+ KUNIT_CASE(sysctl_test_dointvec_read_happy_single_negative),
+ KUNIT_CASE(sysctl_test_dointvec_write_happy_single_positive),
+ KUNIT_CASE(sysctl_test_dointvec_write_happy_single_negative),
+ KUNIT_CASE(sysctl_test_api_dointvec_write_single_less_int_min),
+ KUNIT_CASE(sysctl_test_api_dointvec_write_single_greater_int_max),
+ {}
+};
+
+static struct kunit_suite sysctl_test_suite = {
+ .name = "sysctl_test",
+ .test_cases = sysctl_test_cases,
+};
+
+kunit_test_suite(sysctl_test_suite);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 65eb796610dc..069ca78fb0bf 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -771,7 +771,7 @@ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
/* fill PPS status fields */
pps_fill_timex(txc);
- txc->time.tv_sec = (time_t)ts->tv_sec;
+ txc->time.tv_sec = ts->tv_sec;
txc->time.tv_usec = ts->tv_nsec;
if (!(time_status & STA_NANO))
txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 955851748dc3..8b192e67aabc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -172,6 +172,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
bool tick_nohz_full_running;
+EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask;
static bool check_tick_dependency(atomic_t *dep)
@@ -198,6 +199,11 @@ static bool check_tick_dependency(atomic_t *dep)
return true;
}
+ if (val & TICK_DEP_MASK_RCU) {
+ trace_tick_stop(0, TICK_DEP_MASK_RCU);
+ return true;
+ }
+
return false;
}
@@ -324,6 +330,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
@@ -331,6 +338,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
/*
* Set a per-task tick dependency. Posix CPU timers need this in order to elapse
@@ -344,11 +352,13 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
*/
tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
/*
* Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
@@ -397,6 +407,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true;
}
+EXPORT_SYMBOL_GPL(tick_nohz_full_setup);
static int tick_nohz_cpu_down(unsigned int cpu)
{
@@ -1119,7 +1130,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
unsigned long ticks;
- if (vtime_accounting_cpu_enabled())
+ if (vtime_accounting_enabled_this_cpu())
return;
/*
* We stopped the tick in idle. Update process times would miss the
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2d6e93ab0478..475e29498bca 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -64,8 +64,7 @@ static void blk_unregister_tracepoints(void);
* Send out a notify message.
*/
static void trace_note(struct blk_trace *bt, pid_t pid, int action,
- const void *data, size_t len,
- union kernfs_node_id *cgid)
+ const void *data, size_t len, u64 cgid)
{
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
@@ -73,7 +72,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
int pc = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) {
buffer = blk_tr->trace_buffer.buffer;
@@ -100,8 +99,8 @@ record_it:
t->pid = pid;
t->cpu = cpu;
t->pdu_len = len + cgid_len;
- if (cgid)
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
+ if (cgid_len)
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
if (blk_tracer)
@@ -122,7 +121,7 @@ static void trace_note_tsk(struct task_struct *tsk)
spin_lock_irqsave(&running_trace_lock, flags);
list_for_each_entry(bt, &running_trace_list, running_list) {
trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
- sizeof(tsk->comm), NULL);
+ sizeof(tsk->comm), 0);
}
spin_unlock_irqrestore(&running_trace_lock, flags);
}
@@ -139,7 +138,7 @@ static void trace_note_time(struct blk_trace *bt)
words[1] = now.tv_nsec;
local_irq_save(flags);
- trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
local_irq_restore(flags);
}
@@ -172,9 +171,9 @@ void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
blkcg = NULL;
#ifdef CONFIG_BLK_CGROUP
trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
- blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
+ blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
#else
- trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
+ trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, 0);
#endif
local_irq_restore(flags);
}
@@ -212,7 +211,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
*/
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
int op, int op_flags, u32 what, int error, int pdu_len,
- void *pdu_data, union kernfs_node_id *cgid)
+ void *pdu_data, u64 cgid)
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
@@ -223,7 +222,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
pid_t pid;
int cpu, pc = 0;
bool blk_tracer = blk_tracer_enabled;
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
return;
@@ -294,7 +293,7 @@ record_it:
t->pdu_len = pdu_len + cgid_len;
if (cgid_len)
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
if (pdu_len)
memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
@@ -751,31 +750,29 @@ void blk_trace_shutdown(struct request_queue *q)
}
#ifdef CONFIG_BLK_CGROUP
-static union kernfs_node_id *
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
struct blk_trace *bt = q->blk_trace;
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
- return NULL;
+ return 0;
if (!bio->bi_blkg)
- return NULL;
- return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
+ return 0;
+ return cgroup_id(bio_blkcg(bio)->css.cgroup);
}
#else
-static union kernfs_node_id *
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
+u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
- return NULL;
+ return 0;
}
#endif
-static union kernfs_node_id *
+static u64
blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
{
if (!rq->bio)
- return NULL;
+ return 0;
/* Use the first bio */
return blk_trace_bio_get_cgid(q, rq->bio);
}
@@ -797,8 +794,7 @@ blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
*
**/
static void blk_add_trace_rq(struct request *rq, int error,
- unsigned int nr_bytes, u32 what,
- union kernfs_node_id *cgid)
+ unsigned int nr_bytes, u32 what, u64 cgid)
{
struct blk_trace *bt = rq->q->blk_trace;
@@ -913,7 +909,7 @@ static void blk_add_trace_getrq(void *ignore,
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
- NULL, NULL);
+ NULL, 0);
}
}
@@ -929,7 +925,7 @@ static void blk_add_trace_sleeprq(void *ignore,
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
- 0, 0, NULL, NULL);
+ 0, 0, NULL, 0);
}
}
@@ -938,7 +934,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
struct blk_trace *bt = q->blk_trace;
if (bt)
- __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
+ __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
}
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
@@ -955,7 +951,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
else
what = BLK_TA_UNPLUG_TIMER;
- __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
+ __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
}
}
@@ -1172,19 +1168,17 @@ const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
{
- return (void *)(te_blk_io_trace(ent) + 1) +
- (has_cg ? sizeof(union kernfs_node_id) : 0);
+ return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
}
-static inline const void *cgid_start(const struct trace_entry *ent)
+static inline u64 t_cgid(const struct trace_entry *ent)
{
- return (void *)(te_blk_io_trace(ent) + 1);
+ return *(u64 *)(te_blk_io_trace(ent) + 1);
}
static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
{
- return te_blk_io_trace(ent)->pdu_len -
- (has_cg ? sizeof(union kernfs_node_id) : 0);
+ return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
}
static inline u32 t_action(const struct trace_entry *ent)
@@ -1257,7 +1251,7 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
fill_rwbs(rwbs, t);
if (has_cg) {
- const union kernfs_node_id *id = cgid_start(iter->ent);
+ u64 id = t_cgid(iter->ent);
if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
char blkcg_name_buf[NAME_MAX + 1] = "<...>";
@@ -1267,11 +1261,25 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
MAJOR(t->device), MINOR(t->device),
blkcg_name_buf, act, rwbs);
- } else
+ } else {
+ /*
+ * The cgid portion used to be "INO,GEN". Userland
+ * builds a FILEID_INO32_GEN fid out of them and
+ * opens the cgroup using open_by_handle_at(2).
+ * While 32bit ino setups are still the same, 64bit
+ * ones now use the 64bit ino as the whole ID and
+ * no longer use generation.
+ *
+ * Regarldess of the content, always output
+ * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
+ * be mapped back to @id on both 64 and 32bit ino
+ * setups. See __kernfs_fh_to_dentry().
+ */
trace_seq_printf(&iter->seq,
- "%3d,%-3d %x,%-x %2s %3s ",
+ "%3d,%-3d %llx,%-llx %2s %3s ",
MAJOR(t->device), MINOR(t->device),
- id->ino, id->generation, act, rwbs);
+ id & U32_MAX, id >> 32, act, rwbs);
+ }
} else
trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
MAJOR(t->device), MINOR(t->device), act, rwbs);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 44bd08f2443b..ffc91d4935ac 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -138,24 +138,140 @@ static const struct bpf_func_proto bpf_override_return_proto = {
};
#endif
-BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
+BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
{
- int ret;
+ int ret = probe_user_read(dst, unsafe_ptr, size);
- ret = security_locked_down(LOCKDOWN_BPF_READ);
- if (ret < 0)
- goto out;
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_proto = {
+ .func = bpf_probe_read_user,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
+
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ .func = bpf_probe_read_user_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
- ret = probe_kernel_read(dst, unsafe_ptr, size);
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret = security_locked_down(LOCKDOWN_BPF_READ);
+
+ if (unlikely(ret < 0))
+ goto out;
+ ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
+ probe_kernel_read_strict(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
out:
memset(dst, 0, size);
+ return ret;
+}
+
+BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+ .func = bpf_probe_read_kernel,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_proto = {
+ .func = bpf_probe_read_compat,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret = security_locked_down(LOCKDOWN_BPF_READ);
+
+ if (unlikely(ret < 0))
+ goto out;
+ /*
+ * The strncpy_from_unsafe_*() call will likely not fill the entire
+ * buffer, but that's okay in this circumstance as we're probing
+ * arbitrary memory anyway similar to bpf_probe_read_*() and might
+ * as well probe the stack. Thus, memory is explicitly cleared
+ * only in error case, so that improper users ignoring return
+ * code altogether don't copy garbage; otherwise length of string
+ * is returned that can be used for bpf_perf_event_output() et al.
+ */
+ ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
+ strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+out:
+ memset(dst, 0, size);
return ret;
}
-static const struct bpf_func_proto bpf_probe_read_proto = {
- .func = bpf_probe_read,
+BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
+ .func = bpf_probe_read_kernel_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
+ .func = bpf_probe_read_compat_str,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
@@ -163,7 +279,7 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
+BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
u32, size)
{
/*
@@ -186,10 +302,8 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
return -EPERM;
if (unlikely(!nmi_uaccess_okay()))
return -EPERM;
- if (!access_ok(unsafe_ptr, size))
- return -EPERM;
- return probe_kernel_write(unsafe_ptr, src, size);
+ return probe_user_write(unsafe_ptr, src, size);
}
static const struct bpf_func_proto bpf_probe_write_user_proto = {
@@ -585,41 +699,6 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
- const void *, unsafe_ptr)
-{
- int ret;
-
- ret = security_locked_down(LOCKDOWN_BPF_READ);
- if (ret < 0)
- goto out;
-
- /*
- * The strncpy_from_unsafe() call will likely not fill the entire
- * buffer, but that's okay in this circumstance as we're probing
- * arbitrary memory anyway similar to bpf_probe_read() and might
- * as well probe the stack. Thus, memory is explicitly cleared
- * only in error case, so that improper users ignoring return
- * code altogether don't copy garbage; otherwise length of string
- * is returned that can be used for bpf_perf_event_output() et al.
- */
- ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
- if (unlikely(ret < 0))
-out:
- memset(dst, 0, size);
-
- return ret;
-}
-
-static const struct bpf_func_proto bpf_probe_read_str_proto = {
- .func = bpf_probe_read_str,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_ANYTHING,
-};
-
struct send_signal_irq_work {
struct irq_work irq_work;
struct task_struct *task;
@@ -699,8 +778,6 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto;
- case BPF_FUNC_probe_read:
- return &bpf_probe_read_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call:
@@ -727,8 +804,18 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_read_user:
+ return &bpf_probe_read_user_proto;
+ case BPF_FUNC_probe_read_kernel:
+ return &bpf_probe_read_kernel_proto;
+ case BPF_FUNC_probe_read:
+ return &bpf_probe_read_compat_proto;
+ case BPF_FUNC_probe_read_user_str:
+ return &bpf_probe_read_user_str_proto;
+ case BPF_FUNC_probe_read_kernel_str:
+ return &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_str_proto;
+ return &bpf_probe_read_compat_str_proto;
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;
@@ -995,6 +1082,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
+extern const struct bpf_func_proto bpf_skb_output_proto;
+
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
{
@@ -1062,13 +1151,25 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
}
+static const struct bpf_func_proto *
+tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+#ifdef CONFIG_NET
+ case BPF_FUNC_skb_output:
+ return &bpf_skb_output_proto;
+#endif
+ default:
+ return raw_tp_prog_func_proto(func_id, prog);
+ }
+}
+
static bool raw_tp_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
- /* largest tracepoint in the kernel has 12 args */
- if (off < 0 || off >= sizeof(__u64) * 12)
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
@@ -1077,6 +1178,20 @@ static bool raw_tp_prog_is_valid_access(int off, int size,
return true;
}
+static bool tracing_prog_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
.get_func_proto = raw_tp_prog_func_proto,
.is_valid_access = raw_tp_prog_is_valid_access,
@@ -1085,6 +1200,14 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
};
+const struct bpf_verifier_ops tracing_verifier_ops = {
+ .get_func_proto = tracing_prog_func_proto,
+ .is_valid_access = tracing_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops tracing_prog_ops = {
+};
+
static bool raw_tp_writable_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f296d89be757..5259d4dea675 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2494,14 +2494,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
}
static int
-ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
{
int ret;
if (unlikely(ftrace_disabled))
return 0;
- ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ ret = ftrace_init_nop(mod, rec);
if (ret) {
ftrace_bug_type = FTRACE_BUG_INIT;
ftrace_bug(ret, rec);
@@ -2943,7 +2943,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
* to the NOP instructions.
*/
if (!__is_defined(CC_USING_NOP_MCOUNT) &&
- !ftrace_code_disable(mod, p))
+ !ftrace_nop_initialize(mod, p))
break;
update_cnt++;
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 80e0b2aca703..2e9a4746ea85 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -178,14 +178,14 @@ static int benchmark_event_kthread(void *arg)
int trace_benchmark_reg(void)
{
if (!ok_to_run) {
- pr_warning("trace benchmark cannot be started via kernel command line\n");
+ pr_warn("trace benchmark cannot be started via kernel command line\n");
return -EBUSY;
}
bm_event_thread = kthread_run(benchmark_event_kthread,
NULL, "event_benchmark");
if (IS_ERR(bm_event_thread)) {
- pr_warning("trace benchmark failed to create kernel thread\n");
+ pr_warn("trace benchmark failed to create kernel thread\n");
return PTR_ERR(bm_event_thread);
}
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index a9dfa04ffa44..643e0b19920d 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/security.h>
#include "trace.h"
#include "trace_probe.h"
@@ -26,8 +27,10 @@ static int total_ref_count;
static int perf_trace_event_perm(struct trace_event_call *tp_event,
struct perf_event *p_event)
{
+ int ret;
+
if (tp_event->perf_perm) {
- int ret = tp_event->perf_perm(tp_event, p_event);
+ ret = tp_event->perf_perm(tp_event, p_event);
if (ret)
return ret;
}
@@ -46,8 +49,9 @@ static int perf_trace_event_perm(struct trace_event_call *tp_event,
/* The ftrace function trace is allowed only for root. */
if (ftrace_event_is_function(tp_event)) {
- if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ ret = perf_allow_tracepoint(&p_event->attr);
+ if (ret)
+ return ret;
if (!is_sampling_event(p_event))
return 0;
@@ -82,8 +86,9 @@ static int perf_trace_event_perm(struct trace_event_call *tp_event,
* ...otherwise raw tracepoint data can be a severe data leak,
* only allow root to have these.
*/
- if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ ret = perf_allow_tracepoint(&p_event->attr);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bc2e09a8ea61..bc88fd939f4e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -248,7 +248,7 @@ struct workqueue_struct {
struct list_head flusher_overflow; /* WQ: flush overflow list */
struct list_head maydays; /* MD: pwqs requesting rescue */
- struct worker *rescuer; /* I: rescue worker */
+ struct worker *rescuer; /* MD: rescue worker */
int nr_drainers; /* WQ: drain in progress */
int saved_max_active; /* WQ: saved pwq max_active */
@@ -355,6 +355,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+static void show_pwq(struct pool_workqueue *pwq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
@@ -364,11 +365,6 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
!lockdep_is_held(&wq_pool_mutex), \
"RCU or wq_pool_mutex should be held")
-#define assert_rcu_or_wq_mutex(wq) \
- RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq->mutex), \
- "RCU or wq->mutex should be held")
-
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex) && \
@@ -425,9 +421,8 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* ignored.
*/
#define for_each_pwq(pwq, wq) \
- list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
- if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
- else
+ list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
+ lockdep_is_held(&(wq->mutex)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -2532,8 +2527,14 @@ repeat:
*/
if (need_to_create_worker(pool)) {
spin_lock(&wq_mayday_lock);
- get_pwq(pwq);
- list_move_tail(&pwq->mayday_node, &wq->maydays);
+ /*
+ * Queue iff we aren't racing destruction
+ * and somebody else hasn't queued it already.
+ */
+ if (wq->rescuer && list_empty(&pwq->mayday_node)) {
+ get_pwq(pwq);
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
+ }
spin_unlock(&wq_mayday_lock);
}
}
@@ -4314,6 +4315,22 @@ err_destroy:
}
EXPORT_SYMBOL_GPL(alloc_workqueue);
+static bool pwq_busy(struct pool_workqueue *pwq)
+{
+ int i;
+
+ for (i = 0; i < WORK_NR_COLORS; i++)
+ if (pwq->nr_in_flight[i])
+ return true;
+
+ if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
+ return true;
+ if (pwq->nr_active || !list_empty(&pwq->delayed_works))
+ return true;
+
+ return false;
+}
+
/**
* destroy_workqueue - safely terminate a workqueue
* @wq: target workqueue
@@ -4325,31 +4342,51 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct pool_workqueue *pwq;
int node;
+ /*
+ * Remove it from sysfs first so that sanity check failure doesn't
+ * lead to sysfs name conflicts.
+ */
+ workqueue_sysfs_unregister(wq);
+
/* drain it before proceeding with destruction */
drain_workqueue(wq);
- /* sanity checks */
- mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq) {
- int i;
+ /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
+ if (wq->rescuer) {
+ struct worker *rescuer = wq->rescuer;
- for (i = 0; i < WORK_NR_COLORS; i++) {
- if (WARN_ON(pwq->nr_in_flight[i])) {
- mutex_unlock(&wq->mutex);
- show_workqueue_state();
- return;
- }
- }
+ /* this prevents new queueing */
+ spin_lock_irq(&wq_mayday_lock);
+ wq->rescuer = NULL;
+ spin_unlock_irq(&wq_mayday_lock);
+
+ /* rescuer will empty maydays list before exiting */
+ kthread_stop(rescuer->task);
+ kfree(rescuer);
+ }
- if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
- WARN_ON(pwq->nr_active) ||
- WARN_ON(!list_empty(&pwq->delayed_works))) {
+ /*
+ * Sanity checks - grab all the locks so that we wait for all
+ * in-flight operations which may do put_pwq().
+ */
+ mutex_lock(&wq_pool_mutex);
+ mutex_lock(&wq->mutex);
+ for_each_pwq(pwq, wq) {
+ spin_lock_irq(&pwq->pool->lock);
+ if (WARN_ON(pwq_busy(pwq))) {
+ pr_warning("%s: %s has the following busy pwq\n",
+ __func__, wq->name);
+ show_pwq(pwq);
+ spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);
+ mutex_unlock(&wq_pool_mutex);
show_workqueue_state();
return;
}
+ spin_unlock_irq(&pwq->pool->lock);
}
mutex_unlock(&wq->mutex);
+ mutex_unlock(&wq_pool_mutex);
/*
* wq list is used to freeze wq, remove from list after
@@ -4359,11 +4396,6 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex);
- workqueue_sysfs_unregister(wq);
-
- if (wq->rescuer)
- kthread_stop(wq->rescuer->task);
-
if (!(wq->flags & WQ_UNBOUND)) {
wq_unregister_lockdep(wq);
/*
@@ -4638,7 +4670,8 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_info(" pwq %d:", pool->id);
pr_cont_pool_info(pool);
- pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
+ pr_cont(" active=%d/%d refcnt=%d%s\n",
+ pwq->nr_active, pwq->max_active, pwq->refcnt,
!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
@@ -4657,7 +4690,7 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_cont("%s %d%s:%ps", comma ? "," : "",
task_pid_nr(worker->task),
- worker == pwq->wq->rescuer ? "(RESCUER)" : "",
+ worker->rescue_wq ? "(RESCUER)" : "",
worker->current_func);
list_for_each_entry(work, &worker->scheduled, entry)
pr_cont_work(false, work);
diff --git a/lib/Kconfig b/lib/Kconfig
index 3321d04dfa5a..681b7e50490e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -605,6 +605,9 @@ config ARCH_NO_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config MEMREGION
+ bool
+
# use memcpy to implement user copies for nommu architectures
config UACCESS_MEMCPY
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 93d97f9b0157..ecde997db751 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,6 +164,15 @@ config DYNAMIC_DEBUG
See Documentation/admin-guide/dynamic-debug-howto.rst for additional
information.
+config SYMBOLIC_ERRNAME
+ bool "Support symbolic error names in printf"
+ default y if PRINTK
+ help
+ If you say Y here, the kernel's printf implementation will
+ be able to print symbolic error names such as ENOSPC instead
+ of the number 28. It makes the kernel image slightly larger
+ (about 3KB), but can make the kernel logs easier to read.
+
endmenu # "printk and dmesg options"
menu "Compile-time checks and compiler options"
@@ -1664,6 +1673,8 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
+source "lib/kunit/Kconfig"
+
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1948,6 +1959,35 @@ config TEST_SYSCTL
If unsure, say N.
+config SYSCTL_KUNIT_TEST
+ bool "KUnit test for sysctl"
+ depends on KUNIT
+ help
+ This builds the proc sysctl unit test, which runs on boot.
+ Tests the API contract and implementation correctness of sysctl.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config LIST_KUNIT_TEST
+ bool "KUnit Test for Kernel Linked-list structures"
+ depends on KUNIT
+ help
+ This builds the linked list KUnit test suite.
+ It tests that the API and basic functionality of the list_head type
+ and associated macros.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Makefile b/lib/Makefile
index c5892807e06f..c2f0e2a4e4e8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,8 +26,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
- idr.o extable.o \
- sha1.o chacha.o irq_regs.o argv_split.o \
+ idr.o extable.o sha1.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
@@ -92,6 +91,8 @@ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
+obj-$(CONFIG_KUNIT) += kunit/
+
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
@@ -181,6 +182,7 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
+obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o
obj-$(CONFIG_NLATTR) += nlattr.o
@@ -212,6 +214,7 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_SG_POOL) += sg_pool.o
+obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
@@ -290,3 +293,6 @@ obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
obj-$(CONFIG_OBJAGG) += objagg.o
+
+# KUnit tests
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 075f3788bbe4..f08d9c56f712 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -255,7 +255,7 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
rc = cpu_rmap_update(glue->rmap, glue->index, mask);
if (rc)
- pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+ pr_warn("irq_cpu_rmap_notify: update failed: %d\n", rc);
}
/**
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
new file mode 100644
index 000000000000..0b2c4fce26d9
--- /dev/null
+++ b/lib/crypto/Kconfig
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: GPL-2.0
+
+comment "Crypto library routines"
+
+config CRYPTO_LIB_AES
+ tristate
+
+config CRYPTO_LIB_ARC4
+ tristate
+
+config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Blake2s library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_BLAKE2S_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Blake2s library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_BLAKE2S.
+
+config CRYPTO_LIB_BLAKE2S
+ tristate "BLAKE2s hash function library"
+ depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n
+ help
+ Enable the Blake2s library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_ARCH_HAVE_LIB_CHACHA
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the ChaCha library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_CHACHA_GENERIC
+ tristate
+ select CRYPTO_ALGAPI
+ help
+ This symbol can be depended upon by arch implementations of the
+ ChaCha library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_CHACHA.
+
+config CRYPTO_LIB_CHACHA
+ tristate "ChaCha library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+ help
+ Enable the ChaCha library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Curve25519 library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_CURVE25519_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Curve25519 library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_CURVE25519.
+
+config CRYPTO_LIB_CURVE25519
+ tristate "Curve25519 scalar multiplication library"
+ depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+ help
+ Enable the Curve25519 library interface. This interface may be
+ fulfilled by either the generic implementation or an arch-specific
+ one, if one is available and enabled.
+
+config CRYPTO_LIB_DES
+ tristate
+
+config CRYPTO_LIB_POLY1305_RSIZE
+ int
+ default 2 if MIPS
+ default 4 if X86_64
+ default 9 if ARM || ARM64
+ default 1
+
+config CRYPTO_ARCH_HAVE_LIB_POLY1305
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Poly1305 library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_POLY1305_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Poly1305 library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_POLY1305.
+
+config CRYPTO_LIB_POLY1305
+ tristate "Poly1305 library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+ help
+ Enable the Poly1305 library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_LIB_CHACHA20POLY1305
+ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
+
+config CRYPTO_LIB_SHA256
+ tristate
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index cbe0b6a6450d..34a701ab8b92 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -1,13 +1,39 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
-libaes-y := aes.o
+# chacha is used by the /dev/random driver which is always builtin
+obj-y += chacha.o
+obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
-obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
-libarc4-y := arc4.o
+obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
+libaes-y := aes.o
-obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
-libdes-y := des.o
+obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
+libarc4-y := arc4.o
-obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
-libsha256-y := sha256.o
+obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o
+libblake2s-generic-y += blake2s-generic.o
+
+obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o
+libblake2s-y += blake2s.o
+
+obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
+libchacha20poly1305-y += chacha20poly1305.o
+
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519.o
+libcurve25519-y := curve25519-fiat32.o
+libcurve25519-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
+libcurve25519-y += curve25519.o
+
+obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
+libdes-y := des.o
+
+obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o
+libpoly1305-y := poly1305.o
+
+obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
+libsha256-y := sha256.o
+
+ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
+libblake2s-y += blake2s-selftest.o
+libchacha20poly1305-y += chacha20poly1305-selftest.o
+endif
diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c
new file mode 100644
index 000000000000..04ff8df24513
--- /dev/null
+++ b/lib/crypto/blake2s-generic.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the BLAKE2s hash and PRF functions.
+ *
+ * Information: https://blake2.net/
+ *
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <asm/unaligned.h>
+
+static const u8 blake2s_sigma[10][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
+ { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
+ { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
+ { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
+ { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
+ { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
+ { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
+ { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
+};
+
+static inline void blake2s_increment_counter(struct blake2s_state *state,
+ const u32 inc)
+{
+ state->t[0] += inc;
+ state->t[1] += (state->t[0] < inc);
+}
+
+void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
+ size_t nblocks, const u32 inc)
+{
+ u32 m[16];
+ u32 v[16];
+ int i;
+
+ WARN_ON(IS_ENABLED(DEBUG) &&
+ (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
+
+ while (nblocks > 0) {
+ blake2s_increment_counter(state, inc);
+ memcpy(m, block, BLAKE2S_BLOCK_SIZE);
+ le32_to_cpu_array(m, ARRAY_SIZE(m));
+ memcpy(v, state->h, 32);
+ v[ 8] = BLAKE2S_IV0;
+ v[ 9] = BLAKE2S_IV1;
+ v[10] = BLAKE2S_IV2;
+ v[11] = BLAKE2S_IV3;
+ v[12] = BLAKE2S_IV4 ^ state->t[0];
+ v[13] = BLAKE2S_IV5 ^ state->t[1];
+ v[14] = BLAKE2S_IV6 ^ state->f[0];
+ v[15] = BLAKE2S_IV7 ^ state->f[1];
+
+#define G(r, i, a, b, c, d) do { \
+ a += b + m[blake2s_sigma[r][2 * i + 0]]; \
+ d = ror32(d ^ a, 16); \
+ c += d; \
+ b = ror32(b ^ c, 12); \
+ a += b + m[blake2s_sigma[r][2 * i + 1]]; \
+ d = ror32(d ^ a, 8); \
+ c += d; \
+ b = ror32(b ^ c, 7); \
+} while (0)
+
+#define ROUND(r) do { \
+ G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
+ G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
+ G(r, 2, v[2], v[ 6], v[10], v[14]); \
+ G(r, 3, v[3], v[ 7], v[11], v[15]); \
+ G(r, 4, v[0], v[ 5], v[10], v[15]); \
+ G(r, 5, v[1], v[ 6], v[11], v[12]); \
+ G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
+ G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
+} while (0)
+ ROUND(0);
+ ROUND(1);
+ ROUND(2);
+ ROUND(3);
+ ROUND(4);
+ ROUND(5);
+ ROUND(6);
+ ROUND(7);
+ ROUND(8);
+ ROUND(9);
+
+#undef G
+#undef ROUND
+
+ for (i = 0; i < 8; ++i)
+ state->h[i] ^= v[i] ^ v[i + 8];
+
+ block += BLAKE2S_BLOCK_SIZE;
+ --nblocks;
+ }
+}
+
+EXPORT_SYMBOL(blake2s_compress_generic);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BLAKE2s hash function");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
new file mode 100644
index 000000000000..79ef404a990d
--- /dev/null
+++ b/lib/crypto/blake2s-selftest.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/blake2s.h>
+#include <linux/string.h>
+
+/*
+ * blake2s_testvecs[] generated with the program below (using libb2-dev and
+ * libssl-dev [OpenSSL])
+ *
+ * #include <blake2.h>
+ * #include <stdint.h>
+ * #include <stdio.h>
+ *
+ * #include <openssl/evp.h>
+ * #include <openssl/hmac.h>
+ *
+ * #define BLAKE2S_TESTVEC_COUNT 256
+ *
+ * static void print_vec(const uint8_t vec[], int len)
+ * {
+ * int i;
+ *
+ * printf(" { ");
+ * for (i = 0; i < len; i++) {
+ * if (i && (i % 12) == 0)
+ * printf("\n ");
+ * printf("0x%02x, ", vec[i]);
+ * }
+ * printf("},\n");
+ * }
+ *
+ * int main(void)
+ * {
+ * uint8_t key[BLAKE2S_KEYBYTES];
+ * uint8_t buf[BLAKE2S_TESTVEC_COUNT];
+ * uint8_t hash[BLAKE2S_OUTBYTES];
+ * int i, j;
+ *
+ * key[0] = key[1] = 1;
+ * for (i = 2; i < BLAKE2S_KEYBYTES; ++i)
+ * key[i] = key[i - 2] + key[i - 1];
+ *
+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i)
+ * buf[i] = (uint8_t)i;
+ *
+ * printf("static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
+ *
+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) {
+ * int outlen = 1 + i % BLAKE2S_OUTBYTES;
+ * int keylen = (13 * i) % (BLAKE2S_KEYBYTES + 1);
+ *
+ * blake2s(hash, buf, key + BLAKE2S_KEYBYTES - keylen, outlen, i,
+ * keylen);
+ * print_vec(hash, outlen);
+ * }
+ * printf("};\n\n");
+ *
+ * printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
+ *
+ * HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL);
+ * print_vec(hash, BLAKE2S_OUTBYTES);
+ *
+ * HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL);
+ * print_vec(hash, BLAKE2S_OUTBYTES);
+ *
+ * printf("};\n");
+ *
+ * return 0;
+ *}
+ */
+static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
+ { 0xa1, },
+ { 0x7c, 0x89, },
+ { 0x74, 0x0e, 0xd4, },
+ { 0x47, 0x0c, 0x21, 0x15, },
+ { 0x18, 0xd6, 0x9c, 0xa6, 0xc4, },
+ { 0x13, 0x5d, 0x16, 0x63, 0x2e, 0xf9, },
+ { 0x2c, 0xb5, 0x04, 0xb7, 0x99, 0xe2, 0x73, },
+ { 0x9a, 0x0f, 0xd2, 0x39, 0xd6, 0x68, 0x1b, 0x92, },
+ { 0xc8, 0xde, 0x7a, 0xea, 0x2f, 0xf4, 0xd2, 0xe3, 0x2b, },
+ { 0x5b, 0xf9, 0x43, 0x52, 0x0c, 0x12, 0xba, 0xb5, 0x93, 0x9f, },
+ { 0xc6, 0x2c, 0x4e, 0x80, 0xfc, 0x32, 0x5b, 0x33, 0xb8, 0xb8, 0x0a, },
+ { 0xa7, 0x5c, 0xfd, 0x3a, 0xcc, 0xbf, 0x90, 0xca, 0xb7, 0x97, 0xde, 0xd8, },
+ { 0x66, 0xca, 0x3c, 0xc4, 0x19, 0xef, 0x92, 0x66, 0x3f, 0x21, 0x8f, 0xda,
+ 0xb7, },
+ { 0xba, 0xe5, 0xbb, 0x30, 0x25, 0x94, 0x6d, 0xc3, 0x89, 0x09, 0xc4, 0x25,
+ 0x52, 0x3e, },
+ { 0xa2, 0xef, 0x0e, 0x52, 0x0b, 0x5f, 0xa2, 0x01, 0x6d, 0x0a, 0x25, 0xbc,
+ 0x57, 0xe2, 0x27, },
+ { 0x4f, 0xe0, 0xf9, 0x52, 0x12, 0xda, 0x84, 0xb7, 0xab, 0xae, 0xb0, 0xa6,
+ 0x47, 0x2a, 0xc7, 0xf5, },
+ { 0x56, 0xe7, 0xa8, 0x1c, 0x4c, 0xca, 0xed, 0x90, 0x31, 0xec, 0x87, 0x43,
+ 0xe7, 0x72, 0x08, 0xec, 0xbe, },
+ { 0x7e, 0xdf, 0x80, 0x1c, 0x93, 0x33, 0xfd, 0x53, 0x44, 0xba, 0xfd, 0x96,
+ 0xe1, 0xbb, 0xb5, 0x65, 0xa5, 0x00, },
+ { 0xec, 0x6b, 0xed, 0xf7, 0x7b, 0x62, 0x1d, 0x7d, 0xf4, 0x82, 0xf3, 0x1e,
+ 0x18, 0xff, 0x2b, 0xc4, 0x06, 0x20, 0x2a, },
+ { 0x74, 0x98, 0xd7, 0x68, 0x63, 0xed, 0x87, 0xe4, 0x5d, 0x8d, 0x9e, 0x1d,
+ 0xfd, 0x2a, 0xbb, 0x86, 0xac, 0xe9, 0x2a, 0x89, },
+ { 0x89, 0xc3, 0x88, 0xce, 0x2b, 0x33, 0x1e, 0x10, 0xd1, 0x37, 0x20, 0x86,
+ 0x28, 0x43, 0x70, 0xd9, 0xfb, 0x96, 0xd9, 0xb5, 0xd3, },
+ { 0xcb, 0x56, 0x74, 0x41, 0x8d, 0x80, 0x01, 0x9a, 0x6b, 0x38, 0xe1, 0x41,
+ 0xad, 0x9c, 0x62, 0x74, 0xce, 0x35, 0xd5, 0x6c, 0x89, 0x6e, },
+ { 0x79, 0xaf, 0x94, 0x59, 0x99, 0x26, 0xe1, 0xc9, 0x34, 0xfe, 0x7c, 0x22,
+ 0xf7, 0x43, 0xd7, 0x65, 0xd4, 0x48, 0x18, 0xac, 0x3d, 0xfd, 0x93, },
+ { 0x85, 0x0d, 0xff, 0xb8, 0x3e, 0x87, 0x41, 0xb0, 0x95, 0xd3, 0x3d, 0x00,
+ 0x47, 0x55, 0x9e, 0xd2, 0x69, 0xea, 0xbf, 0xe9, 0x7a, 0x2d, 0x61, 0x45, },
+ { 0x03, 0xe0, 0x85, 0xec, 0x54, 0xb5, 0x16, 0x53, 0xa8, 0xc4, 0x71, 0xe9,
+ 0x6a, 0xe7, 0xcb, 0xc4, 0x15, 0x02, 0xfc, 0x34, 0xa4, 0xa4, 0x28, 0x13,
+ 0xd1, },
+ { 0xe3, 0x34, 0x4b, 0xe1, 0xd0, 0x4b, 0x55, 0x61, 0x8f, 0xc0, 0x24, 0x05,
+ 0xe6, 0xe0, 0x3d, 0x70, 0x24, 0x4d, 0xda, 0xb8, 0x91, 0x05, 0x29, 0x07,
+ 0x01, 0x3e, },
+ { 0x61, 0xff, 0x01, 0x72, 0xb1, 0x4d, 0xf6, 0xfe, 0xd1, 0xd1, 0x08, 0x74,
+ 0xe6, 0x91, 0x44, 0xeb, 0x61, 0xda, 0x40, 0xaf, 0xfc, 0x8c, 0x91, 0x6b,
+ 0xec, 0x13, 0xed, },
+ { 0xd4, 0x40, 0xd2, 0xa0, 0x7f, 0xc1, 0x58, 0x0c, 0x85, 0xa0, 0x86, 0xc7,
+ 0x86, 0xb9, 0x61, 0xc9, 0xea, 0x19, 0x86, 0x1f, 0xab, 0x07, 0xce, 0x37,
+ 0x72, 0x67, 0x09, 0xfc, },
+ { 0x9e, 0xf8, 0x18, 0x67, 0x93, 0x10, 0x9b, 0x39, 0x75, 0xe8, 0x8b, 0x38,
+ 0x82, 0x7d, 0xb8, 0xb7, 0xa5, 0xaf, 0xe6, 0x6a, 0x22, 0x5e, 0x1f, 0x9c,
+ 0x95, 0x29, 0x19, 0xf2, 0x4b, },
+ { 0xc8, 0x62, 0x25, 0xf5, 0x98, 0xc9, 0xea, 0xe5, 0x29, 0x3a, 0xd3, 0x22,
+ 0xeb, 0xeb, 0x07, 0x7c, 0x15, 0x07, 0xee, 0x15, 0x61, 0xbb, 0x05, 0x30,
+ 0x99, 0x7f, 0x11, 0xf6, 0x0a, 0x1d, },
+ { 0x68, 0x70, 0xf7, 0x90, 0xa1, 0x8b, 0x1f, 0x0f, 0xbb, 0xce, 0xd2, 0x0e,
+ 0x33, 0x1f, 0x7f, 0xa9, 0x78, 0xa8, 0xa6, 0x81, 0x66, 0xab, 0x8d, 0xcd,
+ 0x58, 0x55, 0x3a, 0x0b, 0x7a, 0xdb, 0xb5, },
+ { 0xdd, 0x35, 0xd2, 0xb4, 0xf6, 0xc7, 0xea, 0xab, 0x64, 0x24, 0x4e, 0xfe,
+ 0xe5, 0x3d, 0x4e, 0x95, 0x8b, 0x6d, 0x6c, 0xbc, 0xb0, 0xf8, 0x88, 0x61,
+ 0x09, 0xb7, 0x78, 0xa3, 0x31, 0xfe, 0xd9, 0x2f, },
+ { 0x0a, },
+ { 0x6e, 0xd4, },
+ { 0x64, 0xe9, 0xd1, },
+ { 0x30, 0xdd, 0x71, 0xef, },
+ { 0x11, 0xb5, 0x0c, 0x87, 0xc9, },
+ { 0x06, 0x1c, 0x6d, 0x04, 0x82, 0xd0, },
+ { 0x5c, 0x42, 0x0b, 0xee, 0xc5, 0x9c, 0xb2, },
+ { 0xe8, 0x29, 0xd6, 0xb4, 0x5d, 0xf7, 0x2b, 0x93, },
+ { 0x18, 0xca, 0x27, 0x72, 0x43, 0x39, 0x16, 0xbc, 0x6a, },
+ { 0x39, 0x8f, 0xfd, 0x64, 0xf5, 0x57, 0x23, 0xb0, 0x45, 0xf8, },
+ { 0xbb, 0x3a, 0x78, 0x6b, 0x02, 0x1d, 0x0b, 0x16, 0xe3, 0xb2, 0x9a, },
+ { 0xb8, 0xb4, 0x0b, 0xe5, 0xd4, 0x1d, 0x0d, 0x85, 0x49, 0x91, 0x35, 0xfa, },
+ { 0x6d, 0x48, 0x2a, 0x0c, 0x42, 0x08, 0xbd, 0xa9, 0x78, 0x6f, 0x18, 0xaf,
+ 0xe2, },
+ { 0x10, 0x45, 0xd4, 0x58, 0x88, 0xec, 0x4e, 0x1e, 0xf6, 0x14, 0x92, 0x64,
+ 0x7e, 0xb0, },
+ { 0x8b, 0x0b, 0x95, 0xee, 0x92, 0xc6, 0x3b, 0x91, 0xf1, 0x1e, 0xeb, 0x51,
+ 0x98, 0x0a, 0x8d, },
+ { 0xa3, 0x50, 0x4d, 0xa5, 0x1d, 0x03, 0x68, 0xe9, 0x57, 0x78, 0xd6, 0x04,
+ 0xf1, 0xc3, 0x94, 0xd8, },
+ { 0xb8, 0x66, 0x6e, 0xdd, 0x46, 0x15, 0xae, 0x3d, 0x83, 0x7e, 0xcf, 0xe7,
+ 0x2c, 0xe8, 0x8f, 0xc7, 0x34, },
+ { 0x2e, 0xc0, 0x1f, 0x29, 0xea, 0xf6, 0xb9, 0xe2, 0xc2, 0x93, 0xeb, 0x41,
+ 0x0d, 0xf0, 0x0a, 0x13, 0x0e, 0xa2, },
+ { 0x71, 0xb8, 0x33, 0xa9, 0x1b, 0xac, 0xf1, 0xb5, 0x42, 0x8f, 0x5e, 0x81,
+ 0x34, 0x43, 0xb7, 0xa4, 0x18, 0x5c, 0x47, },
+ { 0xda, 0x45, 0xb8, 0x2e, 0x82, 0x1e, 0xc0, 0x59, 0x77, 0x9d, 0xfa, 0xb4,
+ 0x1c, 0x5e, 0xa0, 0x2b, 0x33, 0x96, 0x5a, 0x58, },
+ { 0xe3, 0x09, 0x05, 0xa9, 0xeb, 0x48, 0x13, 0xad, 0x71, 0x88, 0x81, 0x9a,
+ 0x3e, 0x2c, 0xe1, 0x23, 0x99, 0x13, 0x35, 0x9f, 0xb5, },
+ { 0xb7, 0x86, 0x2d, 0x16, 0xe1, 0x04, 0x00, 0x47, 0x47, 0x61, 0x31, 0xfb,
+ 0x14, 0xac, 0xd8, 0xe9, 0xe3, 0x49, 0xbd, 0xf7, 0x9c, 0x3f, },
+ { 0x7f, 0xd9, 0x95, 0xa8, 0xa7, 0xa0, 0xcc, 0xba, 0xef, 0xb1, 0x0a, 0xa9,
+ 0x21, 0x62, 0x08, 0x0f, 0x1b, 0xff, 0x7b, 0x9d, 0xae, 0xb2, 0x95, },
+ { 0x85, 0x99, 0xea, 0x33, 0xe0, 0x56, 0xff, 0x13, 0xc6, 0x61, 0x8c, 0xf9,
+ 0x57, 0x05, 0x03, 0x11, 0xf9, 0xfb, 0x3a, 0xf7, 0xce, 0xbb, 0x52, 0x30, },
+ { 0xb2, 0x72, 0x9c, 0xf8, 0x77, 0x4e, 0x8f, 0x6b, 0x01, 0x6c, 0xff, 0x4e,
+ 0x4f, 0x02, 0xd2, 0xbc, 0xeb, 0x51, 0x28, 0x99, 0x50, 0xab, 0xc4, 0x42,
+ 0xe3, },
+ { 0x8b, 0x0a, 0xb5, 0x90, 0x8f, 0xf5, 0x7b, 0xdd, 0xba, 0x47, 0x37, 0xc9,
+ 0x2a, 0xd5, 0x4b, 0x25, 0x08, 0x8b, 0x02, 0x17, 0xa7, 0x9e, 0x6b, 0x6e,
+ 0xe3, 0x90, },
+ { 0x90, 0xdd, 0xf7, 0x75, 0xa7, 0xa3, 0x99, 0x5e, 0x5b, 0x7d, 0x75, 0xc3,
+ 0x39, 0x6b, 0xa0, 0xe2, 0x44, 0x53, 0xb1, 0x9e, 0xc8, 0xf1, 0x77, 0x10,
+ 0x58, 0x06, 0x9a, },
+ { 0x99, 0x52, 0xf0, 0x49, 0xa8, 0x8c, 0xec, 0xa6, 0x97, 0x32, 0x13, 0xb5,
+ 0xf7, 0xa3, 0x8e, 0xfb, 0x4b, 0x59, 0x31, 0x3d, 0x01, 0x59, 0x98, 0x5d,
+ 0x53, 0x03, 0x1a, 0x39, },
+ { 0x9f, 0xe0, 0xc2, 0xe5, 0x5d, 0x93, 0xd6, 0x9b, 0x47, 0x8f, 0x9b, 0xe0,
+ 0x26, 0x35, 0x84, 0x20, 0x1d, 0xc5, 0x53, 0x10, 0x0f, 0x22, 0xb9, 0xb5,
+ 0xd4, 0x36, 0xb1, 0xac, 0x73, },
+ { 0x30, 0x32, 0x20, 0x3b, 0x10, 0x28, 0xec, 0x1f, 0x4f, 0x9b, 0x47, 0x59,
+ 0xeb, 0x7b, 0xee, 0x45, 0xfb, 0x0c, 0x49, 0xd8, 0x3d, 0x69, 0xbd, 0x90,
+ 0x2c, 0xf0, 0x9e, 0x8d, 0xbf, 0xd5, },
+ { 0x2a, 0x37, 0x73, 0x7f, 0xf9, 0x96, 0x19, 0xaa, 0x25, 0xd8, 0x13, 0x28,
+ 0x01, 0x29, 0x89, 0xdf, 0x6e, 0x0c, 0x9b, 0x43, 0x44, 0x51, 0xe9, 0x75,
+ 0x26, 0x0c, 0xb7, 0x87, 0x66, 0x0b, 0x5f, },
+ { 0x23, 0xdf, 0x96, 0x68, 0x91, 0x86, 0xd0, 0x93, 0x55, 0x33, 0x24, 0xf6,
+ 0xba, 0x08, 0x75, 0x5b, 0x59, 0x11, 0x69, 0xb8, 0xb9, 0xe5, 0x2c, 0x77,
+ 0x02, 0xf6, 0x47, 0xee, 0x81, 0xdd, 0xb9, 0x06, },
+ { 0x9d, },
+ { 0x9d, 0x7d, },
+ { 0xfd, 0xc3, 0xda, },
+ { 0xe8, 0x82, 0xcd, 0x21, },
+ { 0xc3, 0x1d, 0x42, 0x4c, 0x74, },
+ { 0xe9, 0xda, 0xf1, 0xa2, 0xe5, 0x7c, },
+ { 0x52, 0xb8, 0x6f, 0x81, 0x5c, 0x3a, 0x4c, },
+ { 0x5b, 0x39, 0x26, 0xfc, 0x92, 0x5e, 0xe0, 0x49, },
+ { 0x59, 0xe4, 0x7c, 0x93, 0x1c, 0xf9, 0x28, 0x93, 0xde, },
+ { 0xde, 0xdf, 0xb2, 0x43, 0x61, 0x0b, 0x86, 0x16, 0x4c, 0x2e, },
+ { 0x14, 0x8f, 0x75, 0x51, 0xaf, 0xb9, 0xee, 0x51, 0x5a, 0xae, 0x23, },
+ { 0x43, 0x5f, 0x50, 0xd5, 0x70, 0xb0, 0x5b, 0x87, 0xf5, 0xd9, 0xb3, 0x6d, },
+ { 0x66, 0x0a, 0x64, 0x93, 0x79, 0x71, 0x94, 0x40, 0xb7, 0x68, 0x2d, 0xd3,
+ 0x63, },
+ { 0x15, 0x00, 0xc4, 0x0c, 0x7d, 0x1b, 0x10, 0xa9, 0x73, 0x1b, 0x90, 0x6f,
+ 0xe6, 0xa9, },
+ { 0x34, 0x75, 0xf3, 0x86, 0x8f, 0x56, 0xcf, 0x2a, 0x0a, 0xf2, 0x62, 0x0a,
+ 0xf6, 0x0e, 0x20, },
+ { 0xb1, 0xde, 0xc9, 0xf5, 0xdb, 0xf3, 0x2f, 0x4c, 0xd6, 0x41, 0x7d, 0x39,
+ 0x18, 0x3e, 0xc7, 0xc3, },
+ { 0xc5, 0x89, 0xb2, 0xf8, 0xb8, 0xc0, 0xa3, 0xb9, 0x3b, 0x10, 0x6d, 0x7c,
+ 0x92, 0xfc, 0x7f, 0x34, 0x41, },
+ { 0xc4, 0xd8, 0xef, 0xba, 0xef, 0xd2, 0xaa, 0xc5, 0x6c, 0x8e, 0x3e, 0xbb,
+ 0x12, 0xfc, 0x0f, 0x72, 0xbf, 0x0f, },
+ { 0xdd, 0x91, 0xd1, 0x15, 0x9e, 0x7d, 0xf8, 0xc1, 0xb9, 0x14, 0x63, 0x96,
+ 0xb5, 0xcb, 0x83, 0x1d, 0x35, 0x1c, 0xec, },
+ { 0xa9, 0xf8, 0x52, 0xc9, 0x67, 0x76, 0x2b, 0xad, 0xfb, 0xd8, 0x3a, 0xa6,
+ 0x74, 0x02, 0xae, 0xb8, 0x25, 0x2c, 0x63, 0x49, },
+ { 0x77, 0x1f, 0x66, 0x70, 0xfd, 0x50, 0x29, 0xaa, 0xeb, 0xdc, 0xee, 0xba,
+ 0x75, 0x98, 0xdc, 0x93, 0x12, 0x3f, 0xdc, 0x7c, 0x38, },
+ { 0xe2, 0xe1, 0x89, 0x5c, 0x37, 0x38, 0x6a, 0xa3, 0x40, 0xac, 0x3f, 0xb0,
+ 0xca, 0xfc, 0xa7, 0xf3, 0xea, 0xf9, 0x0f, 0x5d, 0x8e, 0x39, },
+ { 0x0f, 0x67, 0xc8, 0x38, 0x01, 0xb1, 0xb7, 0xb8, 0xa2, 0xe7, 0x0a, 0x6d,
+ 0xd2, 0x63, 0x69, 0x9e, 0xcc, 0xf0, 0xf2, 0xbe, 0x9b, 0x98, 0xdd, },
+ { 0x13, 0xe1, 0x36, 0x30, 0xfe, 0xc6, 0x01, 0x8a, 0xa1, 0x63, 0x96, 0x59,
+ 0xc2, 0xa9, 0x68, 0x3f, 0x58, 0xd4, 0x19, 0x0c, 0x40, 0xf3, 0xde, 0x02, },
+ { 0xa3, 0x9e, 0xce, 0xda, 0x42, 0xee, 0x8c, 0x6c, 0x5a, 0x7d, 0xdc, 0x89,
+ 0x02, 0x77, 0xdd, 0xe7, 0x95, 0xbb, 0xff, 0x0d, 0xa4, 0xb5, 0x38, 0x1e,
+ 0xaf, },
+ { 0x9a, 0xf6, 0xb5, 0x9a, 0x4f, 0xa9, 0x4f, 0x2c, 0x35, 0x3c, 0x24, 0xdc,
+ 0x97, 0x6f, 0xd9, 0xa1, 0x7d, 0x1a, 0x85, 0x0b, 0xf5, 0xda, 0x2e, 0xe7,
+ 0xb1, 0x1d, },
+ { 0x84, 0x1e, 0x8e, 0x3d, 0x45, 0xa5, 0xf2, 0x27, 0xf3, 0x31, 0xfe, 0xb9,
+ 0xfb, 0xc5, 0x45, 0x99, 0x99, 0xdd, 0x93, 0x43, 0x02, 0xee, 0x58, 0xaf,
+ 0xee, 0x6a, 0xbe, },
+ { 0x07, 0x2f, 0xc0, 0xa2, 0x04, 0xc4, 0xab, 0x7c, 0x26, 0xbb, 0xa8, 0xd8,
+ 0xe3, 0x1c, 0x75, 0x15, 0x64, 0x5d, 0x02, 0x6a, 0xf0, 0x86, 0xe9, 0xcd,
+ 0x5c, 0xef, 0xa3, 0x25, },
+ { 0x2f, 0x3b, 0x1f, 0xb5, 0x91, 0x8f, 0x86, 0xe0, 0xdc, 0x31, 0x48, 0xb6,
+ 0xa1, 0x8c, 0xfd, 0x75, 0xbb, 0x7d, 0x3d, 0xc1, 0xf0, 0x10, 0x9a, 0xd8,
+ 0x4b, 0x0e, 0xe3, 0x94, 0x9f, },
+ { 0x29, 0xbb, 0x8f, 0x6c, 0xd1, 0xf2, 0xb6, 0xaf, 0xe5, 0xe3, 0x2d, 0xdc,
+ 0x6f, 0xa4, 0x53, 0x88, 0xd8, 0xcf, 0x4d, 0x45, 0x42, 0x62, 0xdb, 0xdf,
+ 0xf8, 0x45, 0xc2, 0x13, 0xec, 0x35, },
+ { 0x06, 0x3c, 0xe3, 0x2c, 0x15, 0xc6, 0x43, 0x03, 0x81, 0xfb, 0x08, 0x76,
+ 0x33, 0xcb, 0x02, 0xc1, 0xba, 0x33, 0xe5, 0xe0, 0xd1, 0x92, 0xa8, 0x46,
+ 0x28, 0x3f, 0x3e, 0x9d, 0x2c, 0x44, 0x54, },
+ { 0xea, 0xbb, 0x96, 0xf8, 0xd1, 0x8b, 0x04, 0x11, 0x40, 0x78, 0x42, 0x02,
+ 0x19, 0xd1, 0xbc, 0x65, 0x92, 0xd3, 0xc3, 0xd6, 0xd9, 0x19, 0xe7, 0xc3,
+ 0x40, 0x97, 0xbd, 0xd4, 0xed, 0xfa, 0x5e, 0x28, },
+ { 0x02, },
+ { 0x52, 0xa8, },
+ { 0x38, 0x25, 0x0d, },
+ { 0xe3, 0x04, 0xd4, 0x92, },
+ { 0x97, 0xdb, 0xf7, 0x81, 0xca, },
+ { 0x8a, 0x56, 0x9d, 0x62, 0x56, 0xcc, },
+ { 0xa1, 0x8e, 0x3c, 0x72, 0x8f, 0x63, 0x03, },
+ { 0xf7, 0xf3, 0x39, 0x09, 0x0a, 0xa1, 0xbb, 0x23, },
+ { 0x6b, 0x03, 0xc0, 0xe9, 0xd9, 0x83, 0x05, 0x22, 0x01, },
+ { 0x1b, 0x4b, 0xf5, 0xd6, 0x4f, 0x05, 0x75, 0x91, 0x4c, 0x7f, },
+ { 0x4c, 0x8c, 0x25, 0x20, 0x21, 0xcb, 0xc2, 0x4b, 0x3a, 0x5b, 0x8d, },
+ { 0x56, 0xe2, 0x77, 0xa0, 0xb6, 0x9f, 0x81, 0xec, 0x83, 0x75, 0xc4, 0xf9, },
+ { 0x71, 0x70, 0x0f, 0xad, 0x4d, 0x35, 0x81, 0x9d, 0x88, 0x69, 0xf9, 0xaa,
+ 0xd3, },
+ { 0x50, 0x6e, 0x86, 0x6e, 0x43, 0xc0, 0xc2, 0x44, 0xc2, 0xe2, 0xa0, 0x1c,
+ 0xb7, 0x9a, },
+ { 0xe4, 0x7e, 0x72, 0xc6, 0x12, 0x8e, 0x7c, 0xfc, 0xbd, 0xe2, 0x08, 0x31,
+ 0x3d, 0x47, 0x3d, },
+ { 0x08, 0x97, 0x5b, 0x80, 0xae, 0xc4, 0x1d, 0x50, 0x77, 0xdf, 0x1f, 0xd0,
+ 0x24, 0xf0, 0x17, 0xc0, },
+ { 0x01, 0xb6, 0x29, 0xf4, 0xaf, 0x78, 0x5f, 0xb6, 0x91, 0xdd, 0x76, 0x76,
+ 0xd2, 0xfd, 0x0c, 0x47, 0x40, },
+ { 0xa1, 0xd8, 0x09, 0x97, 0x7a, 0xa6, 0xc8, 0x94, 0xf6, 0x91, 0x7b, 0xae,
+ 0x2b, 0x9f, 0x0d, 0x83, 0x48, 0xf7, },
+ { 0x12, 0xd5, 0x53, 0x7d, 0x9a, 0xb0, 0xbe, 0xd9, 0xed, 0xe9, 0x9e, 0xee,
+ 0x61, 0x5b, 0x42, 0xf2, 0xc0, 0x73, 0xc0, },
+ { 0xd5, 0x77, 0xd6, 0x5c, 0x6e, 0xa5, 0x69, 0x2b, 0x3b, 0x8c, 0xd6, 0x7d,
+ 0x1d, 0xbe, 0x2c, 0xa1, 0x02, 0x21, 0xcd, 0x29, },
+ { 0xa4, 0x98, 0x80, 0xca, 0x22, 0xcf, 0x6a, 0xab, 0x5e, 0x40, 0x0d, 0x61,
+ 0x08, 0x21, 0xef, 0xc0, 0x6c, 0x52, 0xb4, 0xb0, 0x53, },
+ { 0xbf, 0xaf, 0x8f, 0x3b, 0x7a, 0x97, 0x33, 0xe5, 0xca, 0x07, 0x37, 0xfd,
+ 0x15, 0xdf, 0xce, 0x26, 0x2a, 0xb1, 0xa7, 0x0b, 0xb3, 0xac, },
+ { 0x16, 0x22, 0xe1, 0xbc, 0x99, 0x4e, 0x01, 0xf0, 0xfa, 0xff, 0x8f, 0xa5,
+ 0x0c, 0x61, 0xb0, 0xad, 0xcc, 0xb1, 0xe1, 0x21, 0x46, 0xfa, 0x2e, },
+ { 0x11, 0x5b, 0x0b, 0x2b, 0xe6, 0x14, 0xc1, 0xd5, 0x4d, 0x71, 0x5e, 0x17,
+ 0xea, 0x23, 0xdd, 0x6c, 0xbd, 0x1d, 0xbe, 0x12, 0x1b, 0xee, 0x4c, 0x1a, },
+ { 0x40, 0x88, 0x22, 0xf3, 0x20, 0x6c, 0xed, 0xe1, 0x36, 0x34, 0x62, 0x2c,
+ 0x98, 0x83, 0x52, 0xe2, 0x25, 0xee, 0xe9, 0xf5, 0xe1, 0x17, 0xf0, 0x5c,
+ 0xae, },
+ { 0xc3, 0x76, 0x37, 0xde, 0x95, 0x8c, 0xca, 0x2b, 0x0c, 0x23, 0xe7, 0xb5,
+ 0x38, 0x70, 0x61, 0xcc, 0xff, 0xd3, 0x95, 0x7b, 0xf3, 0xff, 0x1f, 0x9d,
+ 0x59, 0x00, },
+ { 0x0c, 0x19, 0x52, 0x05, 0x22, 0x53, 0xcb, 0x48, 0xd7, 0x10, 0x0e, 0x7e,
+ 0x14, 0x69, 0xb5, 0xa2, 0x92, 0x43, 0xa3, 0x9e, 0x4b, 0x8f, 0x51, 0x2c,
+ 0x5a, 0x2c, 0x3b, },
+ { 0xe1, 0x9d, 0x70, 0x70, 0x28, 0xec, 0x86, 0x40, 0x55, 0x33, 0x56, 0xda,
+ 0x88, 0xca, 0xee, 0xc8, 0x6a, 0x20, 0xb1, 0xe5, 0x3d, 0x57, 0xf8, 0x3c,
+ 0x10, 0x07, 0x2a, 0xc4, },
+ { 0x0b, 0xae, 0xf1, 0xc4, 0x79, 0xee, 0x1b, 0x3d, 0x27, 0x35, 0x8d, 0x14,
+ 0xd6, 0xae, 0x4e, 0x3c, 0xe9, 0x53, 0x50, 0xb5, 0xcc, 0x0c, 0xf7, 0xdf,
+ 0xee, 0xa1, 0x74, 0xd6, 0x71, },
+ { 0xe6, 0xa4, 0xf4, 0x99, 0x98, 0xb9, 0x80, 0xea, 0x96, 0x7f, 0x4f, 0x33,
+ 0xcf, 0x74, 0x25, 0x6f, 0x17, 0x6c, 0xbf, 0xf5, 0x5c, 0x38, 0xd0, 0xff,
+ 0x96, 0xcb, 0x13, 0xf9, 0xdf, 0xfd, },
+ { 0xbe, 0x92, 0xeb, 0xba, 0x44, 0x2c, 0x24, 0x74, 0xd4, 0x03, 0x27, 0x3c,
+ 0x5d, 0x5b, 0x03, 0x30, 0x87, 0x63, 0x69, 0xe0, 0xb8, 0x94, 0xf4, 0x44,
+ 0x7e, 0xad, 0xcd, 0x20, 0x12, 0x16, 0x79, },
+ { 0x30, 0xf1, 0xc4, 0x8e, 0x05, 0x90, 0x2a, 0x97, 0x63, 0x94, 0x46, 0xff,
+ 0xce, 0xd8, 0x67, 0xa7, 0xac, 0x33, 0x8c, 0x95, 0xb7, 0xcd, 0xa3, 0x23,
+ 0x98, 0x9d, 0x76, 0x6c, 0x9d, 0xa8, 0xd6, 0x8a, },
+ { 0xbe, },
+ { 0x17, 0x6c, },
+ { 0x1a, 0x42, 0x4f, },
+ { 0xba, 0xaf, 0xb7, 0x65, },
+ { 0xc2, 0x63, 0x43, 0x6a, 0xea, },
+ { 0xe4, 0x4d, 0xad, 0xf2, 0x0b, 0x02, },
+ { 0x04, 0xc7, 0xc4, 0x7f, 0xa9, 0x2b, 0xce, },
+ { 0x66, 0xf6, 0x67, 0xcb, 0x03, 0x53, 0xc8, 0xf1, },
+ { 0x56, 0xa3, 0x60, 0x78, 0xc9, 0x5f, 0x70, 0x1b, 0x5e, },
+ { 0x99, 0xff, 0x81, 0x7c, 0x13, 0x3c, 0x29, 0x79, 0x4b, 0x65, },
+ { 0x51, 0x10, 0x50, 0x93, 0x01, 0x93, 0xb7, 0x01, 0xc9, 0x18, 0xb7, },
+ { 0x8e, 0x3c, 0x42, 0x1e, 0x5e, 0x7d, 0xc1, 0x50, 0x70, 0x1f, 0x00, 0x98, },
+ { 0x5f, 0xd9, 0x9b, 0xc8, 0xd7, 0xb2, 0x72, 0x62, 0x1a, 0x1e, 0xba, 0x92,
+ 0xe9, },
+ { 0x70, 0x2b, 0xba, 0xfe, 0xad, 0x5d, 0x96, 0x3f, 0x27, 0xc2, 0x41, 0x6d,
+ 0xc4, 0xb3, },
+ { 0xae, 0xe0, 0xd5, 0xd4, 0xc7, 0xae, 0x15, 0x5e, 0xdc, 0xdd, 0x33, 0x60,
+ 0xd7, 0xd3, 0x5e, },
+ { 0x79, 0x8e, 0xbc, 0x9e, 0x20, 0xb9, 0x19, 0x4b, 0x63, 0x80, 0xf3, 0x16,
+ 0xaf, 0x39, 0xbd, 0x92, },
+ { 0xc2, 0x0e, 0x85, 0xa0, 0x0b, 0x9a, 0xb0, 0xec, 0xde, 0x38, 0xd3, 0x10,
+ 0xd9, 0xa7, 0x66, 0x27, 0xcf, },
+ { 0x0e, 0x3b, 0x75, 0x80, 0x67, 0x14, 0x0c, 0x02, 0x90, 0xd6, 0xb3, 0x02,
+ 0x81, 0xf6, 0xa6, 0x87, 0xce, 0x58, },
+ { 0x79, 0xb5, 0xe9, 0x5d, 0x52, 0x4d, 0xf7, 0x59, 0xf4, 0x2e, 0x27, 0xdd,
+ 0xb3, 0xed, 0x57, 0x5b, 0x82, 0xea, 0x6f, },
+ { 0xa2, 0x97, 0xf5, 0x80, 0x02, 0x3d, 0xde, 0xa3, 0xf9, 0xf6, 0xab, 0xe3,
+ 0x57, 0x63, 0x7b, 0x9b, 0x10, 0x42, 0x6f, 0xf2, },
+ { 0x12, 0x7a, 0xfc, 0xb7, 0x67, 0x06, 0x0c, 0x78, 0x1a, 0xfe, 0x88, 0x4f,
+ 0xc6, 0xac, 0x52, 0x96, 0x64, 0x28, 0x97, 0x84, 0x06, },
+ { 0xc5, 0x04, 0x44, 0x6b, 0xb2, 0xa5, 0xa4, 0x66, 0xe1, 0x76, 0xa2, 0x51,
+ 0xf9, 0x59, 0x69, 0x97, 0x56, 0x0b, 0xbf, 0x50, 0xb3, 0x34, },
+ { 0x21, 0x32, 0x6b, 0x42, 0xb5, 0xed, 0x71, 0x8d, 0xf7, 0x5a, 0x35, 0xe3,
+ 0x90, 0xe2, 0xee, 0xaa, 0x89, 0xf6, 0xc9, 0x9c, 0x4d, 0x73, 0xf4, },
+ { 0x4c, 0xa6, 0x09, 0xf4, 0x48, 0xe7, 0x46, 0xbc, 0x49, 0xfc, 0xe5, 0xda,
+ 0xd1, 0x87, 0x13, 0x17, 0x4c, 0x59, 0x71, 0x26, 0x5b, 0x2c, 0x42, 0xb7, },
+ { 0x13, 0x63, 0xf3, 0x40, 0x02, 0xe5, 0xa3, 0x3a, 0x5e, 0x8e, 0xf8, 0xb6,
+ 0x8a, 0x49, 0x60, 0x76, 0x34, 0x72, 0x94, 0x73, 0xf6, 0xd9, 0x21, 0x6a,
+ 0x26, },
+ { 0xdf, 0x75, 0x16, 0x10, 0x1b, 0x5e, 0x81, 0xc3, 0xc8, 0xde, 0x34, 0x24,
+ 0xb0, 0x98, 0xeb, 0x1b, 0x8f, 0xa1, 0x9b, 0x05, 0xee, 0xa5, 0xe9, 0x35,
+ 0xf4, 0x1d, },
+ { 0xcd, 0x21, 0x93, 0x6e, 0x5b, 0xa0, 0x26, 0x2b, 0x21, 0x0e, 0xa0, 0xb9,
+ 0x1c, 0xb5, 0xbb, 0xb8, 0xf8, 0x1e, 0xff, 0x5c, 0xa8, 0xf9, 0x39, 0x46,
+ 0x4e, 0x29, 0x26, },
+ { 0x73, 0x7f, 0x0e, 0x3b, 0x0b, 0x5c, 0xf9, 0x60, 0xaa, 0x88, 0xa1, 0x09,
+ 0xb1, 0x5d, 0x38, 0x7b, 0x86, 0x8f, 0x13, 0x7a, 0x8d, 0x72, 0x7a, 0x98,
+ 0x1a, 0x5b, 0xff, 0xc9, },
+ { 0xd3, 0x3c, 0x61, 0x71, 0x44, 0x7e, 0x31, 0x74, 0x98, 0x9d, 0x9a, 0xd2,
+ 0x27, 0xf3, 0x46, 0x43, 0x42, 0x51, 0xd0, 0x5f, 0xe9, 0x1c, 0x5c, 0x69,
+ 0xbf, 0xf6, 0xbe, 0x3c, 0x40, },
+ { 0x31, 0x99, 0x31, 0x9f, 0xaa, 0x43, 0x2e, 0x77, 0x3e, 0x74, 0x26, 0x31,
+ 0x5e, 0x61, 0xf1, 0x87, 0xe2, 0xeb, 0x9b, 0xcd, 0xd0, 0x3a, 0xee, 0x20,
+ 0x7e, 0x10, 0x0a, 0x0b, 0x7e, 0xfa, },
+ { 0xa4, 0x27, 0x80, 0x67, 0x81, 0x2a, 0xa7, 0x62, 0xf7, 0x6e, 0xda, 0xd4,
+ 0x5c, 0x39, 0x74, 0xad, 0x7e, 0xbe, 0xad, 0xa5, 0x84, 0x7f, 0xa9, 0x30,
+ 0x5d, 0xdb, 0xe2, 0x05, 0x43, 0xf7, 0x1b, },
+ { 0x0b, 0x37, 0xd8, 0x02, 0xe1, 0x83, 0xd6, 0x80, 0xf2, 0x35, 0xc2, 0xb0,
+ 0x37, 0xef, 0xef, 0x5e, 0x43, 0x93, 0xf0, 0x49, 0x45, 0x0a, 0xef, 0xb5,
+ 0x76, 0x70, 0x12, 0x44, 0xc4, 0xdb, 0xf5, 0x7a, },
+ { 0x1f, },
+ { 0x82, 0x60, },
+ { 0xcc, 0xe3, 0x08, },
+ { 0x56, 0x17, 0xe4, 0x59, },
+ { 0xe2, 0xd7, 0x9e, 0xc4, 0x4c, },
+ { 0xb2, 0xad, 0xd3, 0x78, 0x58, 0x5a, },
+ { 0xce, 0x43, 0xb4, 0x02, 0x96, 0xab, 0x3c, },
+ { 0xe6, 0x05, 0x1a, 0x73, 0x22, 0x32, 0xbb, 0x77, },
+ { 0x23, 0xe7, 0xda, 0xfe, 0x2c, 0xef, 0x8c, 0x22, 0xec, },
+ { 0xe9, 0x8e, 0x55, 0x38, 0xd1, 0xd7, 0x35, 0x23, 0x98, 0xc7, },
+ { 0xb5, 0x81, 0x1a, 0xe5, 0xb5, 0xa5, 0xd9, 0x4d, 0xca, 0x41, 0xe7, },
+ { 0x41, 0x16, 0x16, 0x95, 0x8d, 0x9e, 0x0c, 0xea, 0x8c, 0x71, 0x9a, 0xc1, },
+ { 0x7c, 0x33, 0xc0, 0xa4, 0x00, 0x62, 0xea, 0x60, 0x67, 0xe4, 0x20, 0xbc,
+ 0x5b, },
+ { 0xdb, 0xb1, 0xdc, 0xfd, 0x08, 0xc0, 0xde, 0x82, 0xd1, 0xde, 0x38, 0xc0,
+ 0x90, 0x48, },
+ { 0x37, 0x18, 0x2e, 0x0d, 0x61, 0xaa, 0x61, 0xd7, 0x86, 0x20, 0x16, 0x60,
+ 0x04, 0xd9, 0xd5, },
+ { 0xb0, 0xcf, 0x2c, 0x4c, 0x5e, 0x5b, 0x4f, 0x2a, 0x23, 0x25, 0x58, 0x47,
+ 0xe5, 0x31, 0x06, 0x70, },
+ { 0x91, 0xa0, 0xa3, 0x86, 0x4e, 0xe0, 0x72, 0x38, 0x06, 0x67, 0x59, 0x5c,
+ 0x70, 0x25, 0xdb, 0x33, 0x27, },
+ { 0x44, 0x58, 0x66, 0xb8, 0x58, 0xc7, 0x13, 0xed, 0x4c, 0xc0, 0xf4, 0x9a,
+ 0x1e, 0x67, 0x75, 0x33, 0xb6, 0xb8, },
+ { 0x7f, 0x98, 0x4a, 0x8e, 0x50, 0xa2, 0x5c, 0xcd, 0x59, 0xde, 0x72, 0xb3,
+ 0x9d, 0xc3, 0x09, 0x8a, 0xab, 0x56, 0xf1, },
+ { 0x80, 0x96, 0x49, 0x1a, 0x59, 0xa2, 0xc5, 0xd5, 0xa7, 0x20, 0x8a, 0xb7,
+ 0x27, 0x62, 0x84, 0x43, 0xc6, 0xe1, 0x1b, 0x5d, },
+ { 0x6b, 0xb7, 0x2b, 0x26, 0x62, 0x14, 0x70, 0x19, 0x3d, 0x4d, 0xac, 0xac,
+ 0x63, 0x58, 0x5e, 0x94, 0xb5, 0xb7, 0xe8, 0xe8, 0xa2, },
+ { 0x20, 0xa8, 0xc0, 0xfd, 0x63, 0x3d, 0x6e, 0x98, 0xcf, 0x0c, 0x49, 0x98,
+ 0xe4, 0x5a, 0xfe, 0x8c, 0xaa, 0x70, 0x82, 0x1c, 0x7b, 0x74, },
+ { 0xc8, 0xe8, 0xdd, 0xdf, 0x69, 0x30, 0x01, 0xc2, 0x0f, 0x7e, 0x2f, 0x11,
+ 0xcc, 0x3e, 0x17, 0xa5, 0x69, 0x40, 0x3f, 0x0e, 0x79, 0x7f, 0xcf, },
+ { 0xdb, 0x61, 0xc0, 0xe2, 0x2e, 0x49, 0x07, 0x31, 0x1d, 0x91, 0x42, 0x8a,
+ 0xfc, 0x5e, 0xd3, 0xf8, 0x56, 0x1f, 0x2b, 0x73, 0xfd, 0x9f, 0xb2, 0x8e, },
+ { 0x0c, 0x89, 0x55, 0x0c, 0x1f, 0x59, 0x2c, 0x9d, 0x1b, 0x29, 0x1d, 0x41,
+ 0x1d, 0xe6, 0x47, 0x8f, 0x8c, 0x2b, 0xea, 0x8f, 0xf0, 0xff, 0x21, 0x70,
+ 0x88, },
+ { 0x12, 0x18, 0x95, 0xa6, 0x59, 0xb1, 0x31, 0x24, 0x45, 0x67, 0x55, 0xa4,
+ 0x1a, 0x2d, 0x48, 0x67, 0x1b, 0x43, 0x88, 0x2d, 0x8e, 0xa0, 0x70, 0xb3,
+ 0xc6, 0xbb, },
+ { 0xe7, 0xb1, 0x1d, 0xb2, 0x76, 0x4d, 0x68, 0x68, 0x68, 0x23, 0x02, 0x55,
+ 0x3a, 0xe2, 0xe5, 0xd5, 0x4b, 0x43, 0xf9, 0x34, 0x77, 0x5c, 0xa1, 0xf5,
+ 0x55, 0xfd, 0x4f, },
+ { 0x8c, 0x87, 0x5a, 0x08, 0x3a, 0x73, 0xad, 0x61, 0xe1, 0xe7, 0x99, 0x7e,
+ 0xf0, 0x5d, 0xe9, 0x5d, 0x16, 0x43, 0x80, 0x2f, 0xd0, 0x66, 0x34, 0xe2,
+ 0x42, 0x64, 0x3b, 0x1a, },
+ { 0x39, 0xc1, 0x99, 0xcf, 0x22, 0xbf, 0x16, 0x8f, 0x9f, 0x80, 0x7f, 0x95,
+ 0x0a, 0x05, 0x67, 0x27, 0xe7, 0x15, 0xdf, 0x9d, 0xb2, 0xfe, 0x1c, 0xb5,
+ 0x1d, 0x60, 0x8f, 0x8a, 0x1d, },
+ { 0x9b, 0x6e, 0x08, 0x09, 0x06, 0x73, 0xab, 0x68, 0x02, 0x62, 0x1a, 0xe4,
+ 0xd4, 0xdf, 0xc7, 0x02, 0x4c, 0x6a, 0x5f, 0xfd, 0x23, 0xac, 0xae, 0x6d,
+ 0x43, 0xa4, 0x7a, 0x50, 0x60, 0x3c, },
+ { 0x1d, 0xb4, 0xc6, 0xe1, 0xb1, 0x4b, 0xe3, 0xf2, 0xe2, 0x1a, 0x73, 0x1b,
+ 0xa0, 0x92, 0xa7, 0xf5, 0xff, 0x8f, 0x8b, 0x5d, 0xdf, 0xa8, 0x04, 0xb3,
+ 0xb0, 0xf7, 0xcc, 0x12, 0xfa, 0x35, 0x46, },
+ { 0x49, 0x45, 0x97, 0x11, 0x0f, 0x1c, 0x60, 0x8e, 0xe8, 0x47, 0x30, 0xcf,
+ 0x60, 0xa8, 0x71, 0xc5, 0x1b, 0xe9, 0x39, 0x4d, 0x49, 0xb6, 0x12, 0x1f,
+ 0x24, 0xab, 0x37, 0xff, 0x83, 0xc2, 0xe1, 0x3a, },
+ { 0x60, },
+ { 0x24, 0x26, },
+ { 0x47, 0xeb, 0xc9, },
+ { 0x4a, 0xd0, 0xbc, 0xf0, },
+ { 0x8e, 0x2b, 0xc9, 0x85, 0x3c, },
+ { 0xa2, 0x07, 0x15, 0xb8, 0x12, 0x74, },
+ { 0x0f, 0xdb, 0x5b, 0x33, 0x69, 0xfe, 0x4b, },
+ { 0xa2, 0x86, 0x54, 0xf4, 0xfd, 0xb2, 0xd4, 0xe6, },
+ { 0xbb, 0x84, 0x78, 0x49, 0x27, 0x8e, 0x61, 0xda, 0x60, },
+ { 0x04, 0xc3, 0xcd, 0xaa, 0x8f, 0xa7, 0x03, 0xc9, 0xf9, 0xb6, },
+ { 0xf8, 0x27, 0x1d, 0x61, 0xdc, 0x21, 0x42, 0xdd, 0xad, 0x92, 0x40, },
+ { 0x12, 0x87, 0xdf, 0xc2, 0x41, 0x45, 0x5a, 0x36, 0x48, 0x5b, 0x51, 0x2b, },
+ { 0xbb, 0x37, 0x5d, 0x1f, 0xf1, 0x68, 0x7a, 0xc4, 0xa5, 0xd2, 0xa4, 0x91,
+ 0x8d, },
+ { 0x5b, 0x27, 0xd1, 0x04, 0x54, 0x52, 0x9f, 0xa3, 0x47, 0x86, 0x33, 0x33,
+ 0xbf, 0xa0, },
+ { 0xcf, 0x04, 0xea, 0xf8, 0x03, 0x2a, 0x43, 0xff, 0xa6, 0x68, 0x21, 0x4c,
+ 0xd5, 0x4b, 0xed, },
+ { 0xaf, 0xb8, 0xbc, 0x63, 0x0f, 0x18, 0x4d, 0xe2, 0x7a, 0xdd, 0x46, 0x44,
+ 0xc8, 0x24, 0x0a, 0xb7, },
+ { 0x3e, 0xdc, 0x36, 0xe4, 0x89, 0xb1, 0xfa, 0xc6, 0x40, 0x93, 0x2e, 0x75,
+ 0xb2, 0x15, 0xd1, 0xb1, 0x10, },
+ { 0x6c, 0xd8, 0x20, 0x3b, 0x82, 0x79, 0xf9, 0xc8, 0xbc, 0x9d, 0xe0, 0x35,
+ 0xbe, 0x1b, 0x49, 0x1a, 0xbc, 0x3a, },
+ { 0x78, 0x65, 0x2c, 0xbe, 0x35, 0x67, 0xdc, 0x78, 0xd4, 0x41, 0xf6, 0xc9,
+ 0xde, 0xde, 0x1f, 0x18, 0x13, 0x31, 0x11, },
+ { 0x8a, 0x7f, 0xb1, 0x33, 0x8f, 0x0c, 0x3c, 0x0a, 0x06, 0x61, 0xf0, 0x47,
+ 0x29, 0x1b, 0x29, 0xbc, 0x1c, 0x47, 0xef, 0x7a, },
+ { 0x65, 0x91, 0xf1, 0xe6, 0xb3, 0x96, 0xd3, 0x8c, 0xc2, 0x4a, 0x59, 0x35,
+ 0x72, 0x8e, 0x0b, 0x9a, 0x87, 0xca, 0x34, 0x7b, 0x63, },
+ { 0x5f, 0x08, 0x87, 0x80, 0x56, 0x25, 0x89, 0x77, 0x61, 0x8c, 0x64, 0xa1,
+ 0x59, 0x6d, 0x59, 0x62, 0xe8, 0x4a, 0xc8, 0x58, 0x99, 0xd1, },
+ { 0x23, 0x87, 0x1d, 0xed, 0x6f, 0xf2, 0x91, 0x90, 0xe2, 0xfe, 0x43, 0x21,
+ 0xaf, 0x97, 0xc6, 0xbc, 0xd7, 0x15, 0xc7, 0x2d, 0x08, 0x77, 0x91, },
+ { 0x90, 0x47, 0x9a, 0x9e, 0x3a, 0xdf, 0xf3, 0xc9, 0x4c, 0x1e, 0xa7, 0xd4,
+ 0x6a, 0x32, 0x90, 0xfe, 0xb7, 0xb6, 0x7b, 0xfa, 0x96, 0x61, 0xfb, 0xa4, },
+ { 0xb1, 0x67, 0x60, 0x45, 0xb0, 0x96, 0xc5, 0x15, 0x9f, 0x4d, 0x26, 0xd7,
+ 0x9d, 0xf1, 0xf5, 0x6d, 0x21, 0x00, 0x94, 0x31, 0x64, 0x94, 0xd3, 0xa7,
+ 0xd3, },
+ { 0x02, 0x3e, 0xaf, 0xf3, 0x79, 0x73, 0xa5, 0xf5, 0xcc, 0x7a, 0x7f, 0xfb,
+ 0x79, 0x2b, 0x85, 0x8c, 0x88, 0x72, 0x06, 0xbe, 0xfe, 0xaf, 0xc1, 0x16,
+ 0xa6, 0xd6, },
+ { 0x2a, 0xb0, 0x1a, 0xe5, 0xaa, 0x6e, 0xb3, 0xae, 0x53, 0x85, 0x33, 0x80,
+ 0x75, 0xae, 0x30, 0xe6, 0xb8, 0x72, 0x42, 0xf6, 0x25, 0x4f, 0x38, 0x88,
+ 0x55, 0xd1, 0xa9, },
+ { 0x90, 0xd8, 0x0c, 0xc0, 0x93, 0x4b, 0x4f, 0x9e, 0x65, 0x6c, 0xa1, 0x54,
+ 0xa6, 0xf6, 0x6e, 0xca, 0xd2, 0xbb, 0x7e, 0x6a, 0x1c, 0xd3, 0xce, 0x46,
+ 0xef, 0xb0, 0x00, 0x8d, },
+ { 0xed, 0x9c, 0x49, 0xcd, 0xc2, 0xde, 0x38, 0x0e, 0xe9, 0x98, 0x6c, 0xc8,
+ 0x90, 0x9e, 0x3c, 0xd4, 0xd3, 0xeb, 0x88, 0x32, 0xc7, 0x28, 0xe3, 0x94,
+ 0x1c, 0x9f, 0x8b, 0xf3, 0xcb, },
+ { 0xac, 0xe7, 0x92, 0x16, 0xb4, 0x14, 0xa0, 0xe4, 0x04, 0x79, 0xa2, 0xf4,
+ 0x31, 0xe6, 0x0c, 0x26, 0xdc, 0xbf, 0x2f, 0x69, 0x1b, 0x55, 0x94, 0x67,
+ 0xda, 0x0c, 0xd7, 0x32, 0x1f, 0xef, },
+ { 0x68, 0x63, 0x85, 0x57, 0x95, 0x9e, 0x42, 0x27, 0x41, 0x43, 0x42, 0x02,
+ 0xa5, 0x78, 0xa7, 0xc6, 0x43, 0xc1, 0x6a, 0xba, 0x70, 0x80, 0xcd, 0x04,
+ 0xb6, 0x78, 0x76, 0x29, 0xf3, 0xe8, 0xa0, },
+ { 0xe6, 0xac, 0x8d, 0x9d, 0xf0, 0xc0, 0xf7, 0xf7, 0xe3, 0x3e, 0x4e, 0x28,
+ 0x0f, 0x59, 0xb2, 0x67, 0x9e, 0x84, 0x34, 0x42, 0x96, 0x30, 0x2b, 0xca,
+ 0x49, 0xb6, 0xc5, 0x9a, 0x84, 0x59, 0xa7, 0x81, },
+ { 0x7e, },
+ { 0x1e, 0x21, },
+ { 0x26, 0xd3, 0xdd, },
+ { 0x2c, 0xd4, 0xb3, 0x3d, },
+ { 0x86, 0x7b, 0x76, 0x3c, 0xf0, },
+ { 0x12, 0xc3, 0x70, 0x1d, 0x55, 0x18, },
+ { 0x96, 0xc2, 0xbd, 0x61, 0x55, 0xf4, 0x24, },
+ { 0x20, 0x51, 0xf7, 0x86, 0x58, 0x8f, 0x07, 0x2a, },
+ { 0x93, 0x15, 0xa8, 0x1d, 0xda, 0x97, 0xee, 0x0e, 0x6c, },
+ { 0x39, 0x93, 0xdf, 0xd5, 0x0e, 0xca, 0xdc, 0x7a, 0x92, 0xce, },
+ { 0x60, 0xd5, 0xfd, 0xf5, 0x1b, 0x26, 0x82, 0x26, 0x73, 0x02, 0xbc, },
+ { 0x98, 0xf2, 0x34, 0xe1, 0xf5, 0xfb, 0x00, 0xac, 0x10, 0x4a, 0x38, 0x9f, },
+ { 0xda, 0x3a, 0x92, 0x8a, 0xd0, 0xcd, 0x12, 0xcd, 0x15, 0xbb, 0xab, 0x77,
+ 0x66, },
+ { 0xa2, 0x92, 0x1a, 0xe5, 0xca, 0x0c, 0x30, 0x75, 0xeb, 0xaf, 0x00, 0x31,
+ 0x55, 0x66, },
+ { 0x06, 0xea, 0xfd, 0x3e, 0x86, 0x38, 0x62, 0x4e, 0xa9, 0x12, 0xa4, 0x12,
+ 0x43, 0xbf, 0xa1, },
+ { 0xe4, 0x71, 0x7b, 0x94, 0xdb, 0xa0, 0xd2, 0xff, 0x9b, 0xeb, 0xad, 0x8e,
+ 0x95, 0x8a, 0xc5, 0xed, },
+ { 0x25, 0x5a, 0x77, 0x71, 0x41, 0x0e, 0x7a, 0xe9, 0xed, 0x0c, 0x10, 0xef,
+ 0xf6, 0x2b, 0x3a, 0xba, 0x60, },
+ { 0xee, 0xe2, 0xa3, 0x67, 0x64, 0x1d, 0xc6, 0x04, 0xc4, 0xe1, 0x68, 0xd2,
+ 0x6e, 0xd2, 0x91, 0x75, 0x53, 0x07, },
+ { 0xe0, 0xf6, 0x4d, 0x8f, 0x68, 0xfc, 0x06, 0x7e, 0x18, 0x79, 0x7f, 0x2b,
+ 0x6d, 0xef, 0x46, 0x7f, 0xab, 0xb2, 0xad, },
+ { 0x3d, 0x35, 0x88, 0x9f, 0x2e, 0xcf, 0x96, 0x45, 0x07, 0x60, 0x71, 0x94,
+ 0x00, 0x8d, 0xbf, 0xf4, 0xef, 0x46, 0x2e, 0x3c, },
+ { 0x43, 0xcf, 0x98, 0xf7, 0x2d, 0xf4, 0x17, 0xe7, 0x8c, 0x05, 0x2d, 0x9b,
+ 0x24, 0xfb, 0x4d, 0xea, 0x4a, 0xec, 0x01, 0x25, 0x29, },
+ { 0x8e, 0x73, 0x9a, 0x78, 0x11, 0xfe, 0x48, 0xa0, 0x3b, 0x1a, 0x26, 0xdf,
+ 0x25, 0xe9, 0x59, 0x1c, 0x70, 0x07, 0x9f, 0xdc, 0xa0, 0xa6, },
+ { 0xe8, 0x47, 0x71, 0xc7, 0x3e, 0xdf, 0xb5, 0x13, 0xb9, 0x85, 0x13, 0xa8,
+ 0x54, 0x47, 0x6e, 0x59, 0x96, 0x09, 0x13, 0x5f, 0x82, 0x16, 0x0b, },
+ { 0xfb, 0xc0, 0x8c, 0x03, 0x21, 0xb3, 0xc4, 0xb5, 0x43, 0x32, 0x6c, 0xea,
+ 0x7f, 0xa8, 0x43, 0x91, 0xe8, 0x4e, 0x3f, 0xbf, 0x45, 0x58, 0x6a, 0xa3, },
+ { 0x55, 0xf8, 0xf3, 0x00, 0x76, 0x09, 0xef, 0x69, 0x5d, 0xd2, 0x8a, 0xf2,
+ 0x65, 0xc3, 0xcb, 0x9b, 0x43, 0xfd, 0xb1, 0x7e, 0x7f, 0xa1, 0x94, 0xb0,
+ 0xd7, },
+ { 0xaa, 0x13, 0xc1, 0x51, 0x40, 0x6d, 0x8d, 0x4c, 0x0a, 0x95, 0x64, 0x7b,
+ 0xd1, 0x96, 0xb6, 0x56, 0xb4, 0x5b, 0xcf, 0xd6, 0xd9, 0x15, 0x97, 0xdd,
+ 0xb6, 0xef, },
+ { 0xaf, 0xb7, 0x36, 0xb0, 0x04, 0xdb, 0xd7, 0x9c, 0x9a, 0x44, 0xc4, 0xf6,
+ 0x1f, 0x12, 0x21, 0x2d, 0x59, 0x30, 0x54, 0xab, 0x27, 0x61, 0xa3, 0x57,
+ 0xef, 0xf8, 0x53, },
+ { 0x97, 0x34, 0x45, 0x3e, 0xce, 0x7c, 0x35, 0xa2, 0xda, 0x9f, 0x4b, 0x46,
+ 0x6c, 0x11, 0x67, 0xff, 0x2f, 0x76, 0x58, 0x15, 0x71, 0xfa, 0x44, 0x89,
+ 0x89, 0xfd, 0xf7, 0x99, },
+ { 0x1f, 0xb1, 0x62, 0xeb, 0x83, 0xc5, 0x9c, 0x89, 0xf9, 0x2c, 0xd2, 0x03,
+ 0x61, 0xbc, 0xbb, 0xa5, 0x74, 0x0e, 0x9b, 0x7e, 0x82, 0x3e, 0x70, 0x0a,
+ 0xa9, 0x8f, 0x2b, 0x59, 0xfb, },
+ { 0xf8, 0xca, 0x5e, 0x3a, 0x4f, 0x9e, 0x10, 0x69, 0x10, 0xd5, 0x4c, 0xeb,
+ 0x1a, 0x0f, 0x3c, 0x6a, 0x98, 0xf5, 0xb0, 0x97, 0x5b, 0x37, 0x2f, 0x0d,
+ 0xbd, 0x42, 0x4b, 0x69, 0xa1, 0x82, },
+ { 0x12, 0x8c, 0x6d, 0x52, 0x08, 0xef, 0x74, 0xb2, 0xe6, 0xaa, 0xd3, 0xb0,
+ 0x26, 0xb0, 0xd9, 0x94, 0xb6, 0x11, 0x45, 0x0e, 0x36, 0x71, 0x14, 0x2d,
+ 0x41, 0x8c, 0x21, 0x53, 0x31, 0xe9, 0x68, },
+ { 0xee, 0xea, 0x0d, 0x89, 0x47, 0x7e, 0x72, 0xd1, 0xd8, 0xce, 0x58, 0x4c,
+ 0x94, 0x1f, 0x0d, 0x51, 0x08, 0xa3, 0xb6, 0x3d, 0xe7, 0x82, 0x46, 0x92,
+ 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
+};
+
+static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
+ { 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70,
+ 0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79,
+ 0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, },
+ { 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9,
+ 0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f,
+ 0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, },
+};
+
+bool __init blake2s_selftest(void)
+{
+ u8 key[BLAKE2S_KEY_SIZE];
+ u8 buf[ARRAY_SIZE(blake2s_testvecs)];
+ u8 hash[BLAKE2S_HASH_SIZE];
+ struct blake2s_state state;
+ bool success = true;
+ int i, l;
+
+ key[0] = key[1] = 1;
+ for (i = 2; i < sizeof(key); ++i)
+ key[i] = key[i - 2] + key[i - 1];
+
+ for (i = 0; i < sizeof(buf); ++i)
+ buf[i] = (u8)i;
+
+ for (i = l = 0; i < ARRAY_SIZE(blake2s_testvecs); l = (l + 37) % ++i) {
+ int outlen = 1 + i % BLAKE2S_HASH_SIZE;
+ int keylen = (13 * i) % (BLAKE2S_KEY_SIZE + 1);
+
+ blake2s(hash, buf, key + BLAKE2S_KEY_SIZE - keylen, outlen, i,
+ keylen);
+ if (memcmp(hash, blake2s_testvecs[i], outlen)) {
+ pr_err("blake2s self-test %d: FAIL\n", i + 1);
+ success = false;
+ }
+
+ if (!keylen)
+ blake2s_init(&state, outlen);
+ else
+ blake2s_init_key(&state, outlen,
+ key + BLAKE2S_KEY_SIZE - keylen,
+ keylen);
+
+ blake2s_update(&state, buf, l);
+ blake2s_update(&state, buf + l, i - l);
+ blake2s_final(&state, hash);
+ if (memcmp(hash, blake2s_testvecs[i], outlen)) {
+ pr_err("blake2s init/update/final self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ if (success) {
+ blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key));
+ success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE);
+
+ blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf));
+ success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE);
+
+ if (!success)
+ pr_err("blake2s256_hmac self-test: FAIL\n");
+ }
+
+ return success;
+}
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
new file mode 100644
index 000000000000..41025a30c524
--- /dev/null
+++ b/lib/crypto/blake2s.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the BLAKE2s hash and PRF functions.
+ *
+ * Information: https://blake2.net/
+ *
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <asm/unaligned.h>
+
+bool blake2s_selftest(void);
+
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
+{
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
+ blake2s_compress_arch(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress_generic(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
+ blake2s_compress_arch(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress_generic(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+EXPORT_SYMBOL(blake2s_update);
+
+void blake2s_final(struct blake2s_state *state, u8 *out)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && !out);
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
+ blake2s_compress_arch(state, state->buf, 1, state->buflen);
+ else
+ blake2s_compress_generic(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
+ memzero_explicit(state, sizeof(*state));
+}
+EXPORT_SYMBOL(blake2s_final);
+
+void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
+ const size_t keylen)
+{
+ struct blake2s_state state;
+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
+ int i;
+
+ if (keylen > BLAKE2S_BLOCK_SIZE) {
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, key, keylen);
+ blake2s_final(&state, x_key);
+ } else
+ memcpy(x_key, key, keylen);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, i_hash);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x5c ^ 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&state, i_hash);
+
+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
+}
+EXPORT_SYMBOL(blake2s256_hmac);
+
+static int __init mod_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!blake2s_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit mod_exit(void)
+{
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BLAKE2s hash function");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/chacha.c b/lib/crypto/chacha.c
index c7c9826564d3..65ead6b0c7e0 100644
--- a/lib/chacha.c
+++ b/lib/crypto/chacha.c
@@ -5,9 +5,11 @@
* Copyright (C) 2015 Martin Willi
*/
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
+#include <linux/string.h>
#include <linux/cryptohash.h>
#include <asm/unaligned.h>
#include <crypto/chacha.h>
@@ -72,7 +74,7 @@ static void chacha_permute(u32 *x, int nrounds)
* The caller has already converted the endianness of the input. This function
* also handles incrementing the block counter in the input matrix.
*/
-void chacha_block(u32 *state, u8 *stream, int nrounds)
+void chacha_block_generic(u32 *state, u8 *stream, int nrounds)
{
u32 x[16];
int i;
@@ -86,11 +88,11 @@ void chacha_block(u32 *state, u8 *stream, int nrounds)
state[12]++;
}
-EXPORT_SYMBOL(chacha_block);
+EXPORT_SYMBOL(chacha_block_generic);
/**
- * hchacha_block - abbreviated ChaCha core, for XChaCha
- * @in: input state matrix (16 32-bit words)
+ * hchacha_block_generic - abbreviated ChaCha core, for XChaCha
+ * @state: input state matrix (16 32-bit words)
* @out: output (8 32-bit words)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
@@ -99,15 +101,15 @@ EXPORT_SYMBOL(chacha_block);
* skips the final addition of the initial state, and outputs only certain words
* of the state. It should not be used for streaming directly.
*/
-void hchacha_block(const u32 *in, u32 *out, int nrounds)
+void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds)
{
u32 x[16];
- memcpy(x, in, 64);
+ memcpy(x, state, 64);
chacha_permute(x, nrounds);
- memcpy(&out[0], &x[0], 16);
- memcpy(&out[4], &x[12], 16);
+ memcpy(&stream[0], &x[0], 16);
+ memcpy(&stream[4], &x[12], 16);
}
-EXPORT_SYMBOL(hchacha_block);
+EXPORT_SYMBOL(hchacha_block_generic);
diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c
new file mode 100644
index 000000000000..465de46dbdef
--- /dev/null
+++ b/lib/crypto/chacha20poly1305-selftest.c
@@ -0,0 +1,7393 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/chacha20poly1305.h>
+#include <crypto/poly1305.h>
+
+#include <asm/unaligned.h>
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct chacha20poly1305_testvec {
+ const u8 *input, *output, *assoc, *nonce, *key;
+ size_t ilen, alen, nlen;
+ bool failure;
+};
+
+/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539
+ * 2.8.2. After they are generated by reference implementations. And the final
+ * marked ones are taken from wycheproof, but we only do these for the encrypt
+ * side, because mostly we're stressing the primitives rather than the actual
+ * chapoly construction.
+ */
+
+static const u8 enc_input001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 enc_output001[] __initconst = {
+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
+ 0x38
+};
+static const u8 enc_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 enc_nonce001[] __initconst = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+};
+static const u8 enc_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const u8 enc_input002[] __initconst = { };
+static const u8 enc_output002[] __initconst = {
+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
+};
+static const u8 enc_assoc002[] __initconst = { };
+static const u8 enc_nonce002[] __initconst = {
+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
+};
+static const u8 enc_key002[] __initconst = {
+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
+};
+
+static const u8 enc_input003[] __initconst = { };
+static const u8 enc_output003[] __initconst = {
+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
+};
+static const u8 enc_assoc003[] __initconst = {
+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
+};
+static const u8 enc_nonce003[] __initconst = {
+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
+};
+static const u8 enc_key003[] __initconst = {
+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
+};
+
+static const u8 enc_input004[] __initconst = {
+ 0xa4
+};
+static const u8 enc_output004[] __initconst = {
+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
+ 0x89
+};
+static const u8 enc_assoc004[] __initconst = {
+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
+};
+static const u8 enc_nonce004[] __initconst = {
+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
+};
+static const u8 enc_key004[] __initconst = {
+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
+};
+
+static const u8 enc_input005[] __initconst = {
+ 0x2d
+};
+static const u8 enc_output005[] __initconst = {
+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
+ 0xac
+};
+static const u8 enc_assoc005[] __initconst = { };
+static const u8 enc_nonce005[] __initconst = {
+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
+};
+static const u8 enc_key005[] __initconst = {
+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
+};
+
+static const u8 enc_input006[] __initconst = {
+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
+ 0x8f
+};
+static const u8 enc_output006[] __initconst = {
+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
+ 0xeb
+};
+static const u8 enc_assoc006[] __initconst = {
+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
+};
+static const u8 enc_nonce006[] __initconst = {
+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
+};
+static const u8 enc_key006[] __initconst = {
+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
+};
+
+static const u8 enc_input007[] __initconst = {
+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
+};
+static const u8 enc_output007[] __initconst = {
+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
+};
+static const u8 enc_assoc007[] __initconst = { };
+static const u8 enc_nonce007[] __initconst = {
+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
+};
+static const u8 enc_key007[] __initconst = {
+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
+};
+
+static const u8 enc_input008[] __initconst = {
+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
+};
+static const u8 enc_output008[] __initconst = {
+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
+};
+static const u8 enc_assoc008[] __initconst = { };
+static const u8 enc_nonce008[] __initconst = {
+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
+};
+static const u8 enc_key008[] __initconst = {
+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
+};
+
+static const u8 enc_input009[] __initconst = {
+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
+ 0x65
+};
+static const u8 enc_output009[] __initconst = {
+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
+ 0xae
+};
+static const u8 enc_assoc009[] __initconst = {
+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
+ 0xef
+};
+static const u8 enc_nonce009[] __initconst = {
+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
+};
+static const u8 enc_key009[] __initconst = {
+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
+};
+
+static const u8 enc_input010[] __initconst = {
+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
+};
+static const u8 enc_output010[] __initconst = {
+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
+};
+static const u8 enc_assoc010[] __initconst = {
+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
+};
+static const u8 enc_nonce010[] __initconst = {
+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
+};
+static const u8 enc_key010[] __initconst = {
+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
+};
+
+static const u8 enc_input011[] __initconst = {
+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
+ 0x10, 0x1e, 0xbf, 0xec, 0xa8
+};
+static const u8 enc_output011[] __initconst = {
+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c
+};
+static const u8 enc_assoc011[] __initconst = {
+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
+};
+static const u8 enc_nonce011[] __initconst = {
+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
+};
+static const u8 enc_key011[] __initconst = {
+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
+};
+
+static const u8 enc_input012[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 enc_output012[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd6
+};
+static const u8 enc_assoc012[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 enc_nonce012[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 enc_key012[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+/* wycheproof - misc */
+static const u8 enc_input053[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe
+};
+static const u8 enc_output053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7,
+ 0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07
+};
+static const u8 enc_assoc053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key053[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input054[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd
+};
+static const u8 enc_output054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2,
+ 0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce
+};
+static const u8 enc_assoc054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key054[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input055[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd,
+ 0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa,
+ 0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02,
+ 0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80,
+ 0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4,
+ 0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7,
+ 0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed,
+ 0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38,
+ 0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7
+};
+static const u8 enc_output055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3,
+ 0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e
+};
+static const u8 enc_assoc055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key055[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input056[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output056[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27,
+ 0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6
+};
+static const u8 enc_assoc056[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce056[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key056[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input057[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42
+};
+static const u8 enc_output057[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b,
+ 0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68
+};
+static const u8 enc_assoc057[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce057[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key057[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input058[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42,
+ 0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05,
+ 0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd,
+ 0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f,
+ 0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b,
+ 0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38,
+ 0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12,
+ 0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7,
+ 0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output058[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91,
+ 0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62
+};
+static const u8 enc_assoc058[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce058[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key058[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input059[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e
+};
+static const u8 enc_output059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75,
+ 0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4
+};
+static const u8 enc_assoc059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key059[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input060[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d
+};
+static const u8 enc_output060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba,
+ 0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a
+};
+static const u8 enc_assoc060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key060[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input061[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d,
+ 0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a,
+ 0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82,
+ 0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00,
+ 0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44,
+ 0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47,
+ 0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d,
+ 0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8,
+ 0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47
+};
+static const u8 enc_output061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f,
+ 0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a
+};
+static const u8 enc_assoc061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key061[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input062[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1
+};
+static const u8 enc_output062[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59,
+ 0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19
+};
+static const u8 enc_assoc062[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce062[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key062[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input063[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2
+};
+static const u8 enc_output063[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2,
+ 0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c
+};
+static const u8 enc_assoc063[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce063[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key063[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input064[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2,
+ 0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85,
+ 0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d,
+ 0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff,
+ 0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb,
+ 0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8,
+ 0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92,
+ 0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47,
+ 0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8
+};
+static const u8 enc_output064[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5,
+ 0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06
+};
+static const u8 enc_assoc064[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce064[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key064[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input065[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output065[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf,
+ 0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67
+};
+static const u8 enc_assoc065[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce065[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key065[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input066[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42
+};
+static const u8 enc_output066[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89,
+ 0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90
+};
+static const u8 enc_assoc066[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce066[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key066[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input067[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42,
+ 0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05,
+ 0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd,
+ 0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f,
+ 0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b,
+ 0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38,
+ 0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12,
+ 0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7,
+ 0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output067[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94,
+ 0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22
+};
+static const u8 enc_assoc067[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce067[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key067[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input068[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9,
+ 0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d
+};
+static const u8 enc_assoc068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key068[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input069[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42
+};
+static const u8 enc_output069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde,
+ 0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82
+};
+static const u8 enc_assoc069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key069[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input070[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42,
+ 0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05,
+ 0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd,
+ 0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f,
+ 0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b,
+ 0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38,
+ 0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12,
+ 0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7,
+ 0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3,
+ 0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71
+};
+static const u8 enc_assoc070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key070[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input071[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe
+};
+static const u8 enc_output071[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5,
+ 0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20
+};
+static const u8 enc_assoc071[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce071[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key071[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input072[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd
+};
+static const u8 enc_output072[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e,
+ 0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4
+};
+static const u8 enc_assoc072[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce072[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key072[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input073[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd,
+ 0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa,
+ 0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02,
+ 0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80,
+ 0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4,
+ 0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7,
+ 0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed,
+ 0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38,
+ 0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7
+};
+static const u8 enc_output073[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51,
+ 0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f
+};
+static const u8 enc_assoc073[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce073[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key073[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input076[] __initconst = {
+ 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85,
+ 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02,
+ 0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09,
+ 0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8,
+ 0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd,
+ 0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85,
+ 0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39,
+ 0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae,
+ 0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed,
+ 0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4,
+ 0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c,
+ 0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33,
+ 0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19,
+ 0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11,
+ 0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9,
+ 0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5
+};
+static const u8 enc_output076[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d,
+ 0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63
+};
+static const u8 enc_assoc076[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce076[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0
+};
+static const u8 enc_key076[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input077[] __initconst = {
+ 0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae,
+ 0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16,
+ 0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7,
+ 0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61,
+ 0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2,
+ 0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf,
+ 0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf,
+ 0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48,
+ 0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8,
+ 0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa,
+ 0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87,
+ 0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32,
+ 0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1,
+ 0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b,
+ 0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c,
+ 0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f
+};
+static const u8 enc_output077[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4,
+ 0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa
+};
+static const u8 enc_assoc077[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce077[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66
+};
+static const u8 enc_key077[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input078[] __initconst = {
+ 0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef,
+ 0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5,
+ 0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c,
+ 0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d,
+ 0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf,
+ 0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6,
+ 0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe,
+ 0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5,
+ 0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62,
+ 0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c,
+ 0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f,
+ 0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2,
+ 0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33,
+ 0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e,
+ 0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9,
+ 0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62
+};
+static const u8 enc_output078[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7,
+ 0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d
+};
+static const u8 enc_assoc078[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce078[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90
+};
+static const u8 enc_key078[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input079[] __initconst = {
+ 0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9,
+ 0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6,
+ 0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c,
+ 0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82,
+ 0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21,
+ 0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12,
+ 0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c,
+ 0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d,
+ 0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32,
+ 0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84,
+ 0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3,
+ 0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24,
+ 0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94,
+ 0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53,
+ 0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71,
+ 0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd
+};
+static const u8 enc_output079[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2,
+ 0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e
+};
+static const u8 enc_assoc079[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce079[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a
+};
+static const u8 enc_key079[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input080[] __initconst = {
+ 0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5,
+ 0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9,
+ 0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b,
+ 0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc,
+ 0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38,
+ 0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27,
+ 0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83,
+ 0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b,
+ 0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda,
+ 0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4,
+ 0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc,
+ 0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b,
+ 0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4,
+ 0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00,
+ 0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d,
+ 0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1
+};
+static const u8 enc_output080[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a,
+ 0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02
+};
+static const u8 enc_assoc080[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce080[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40
+};
+static const u8 enc_key080[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input081[] __initconst = {
+ 0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92,
+ 0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c,
+ 0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9,
+ 0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06,
+ 0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a,
+ 0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa,
+ 0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07,
+ 0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6,
+ 0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06,
+ 0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8,
+ 0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5,
+ 0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5,
+ 0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f,
+ 0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86,
+ 0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23,
+ 0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1
+};
+static const u8 enc_output081[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba,
+ 0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0
+};
+static const u8 enc_assoc081[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce081[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35
+};
+static const u8 enc_key081[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input082[] __initconst = {
+ 0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb,
+ 0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49,
+ 0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16,
+ 0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d,
+ 0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9,
+ 0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e,
+ 0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d,
+ 0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc,
+ 0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6,
+ 0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca,
+ 0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83,
+ 0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08,
+ 0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95,
+ 0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66,
+ 0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83,
+ 0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07
+};
+static const u8 enc_output082[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42,
+ 0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae
+};
+static const u8 enc_assoc082[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce082[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5
+};
+static const u8 enc_key082[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input083[] __initconst = {
+ 0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58,
+ 0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84,
+ 0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5,
+ 0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf,
+ 0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39,
+ 0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03,
+ 0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f,
+ 0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa,
+ 0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08,
+ 0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59,
+ 0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56,
+ 0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9,
+ 0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43,
+ 0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa,
+ 0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba,
+ 0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a
+};
+static const u8 enc_output083[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b,
+ 0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19
+};
+static const u8 enc_assoc083[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce083[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4
+};
+static const u8 enc_key083[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input084[] __initconst = {
+ 0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18,
+ 0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b,
+ 0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99,
+ 0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c,
+ 0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde,
+ 0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2,
+ 0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1,
+ 0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8,
+ 0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b,
+ 0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f,
+ 0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77,
+ 0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8,
+ 0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c,
+ 0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf,
+ 0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97,
+ 0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee
+};
+static const u8 enc_output084[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab,
+ 0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87
+};
+static const u8 enc_assoc084[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce084[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8
+};
+static const u8 enc_key084[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input085[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6,
+ 0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed,
+ 0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7,
+ 0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37,
+ 0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a,
+ 0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b
+};
+static const u8 enc_output085[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68,
+ 0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43
+};
+static const u8 enc_assoc085[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce085[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key085[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input093[] __initconst = {
+ 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d,
+ 0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44,
+ 0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3,
+ 0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b,
+ 0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb,
+ 0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c
+};
+static const u8 enc_output093[] __initconst = {
+ 0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14,
+ 0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7,
+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
+ 0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77,
+ 0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a
+};
+static const u8 enc_assoc093[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce093[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key093[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input094[] __initconst = {
+ 0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90,
+ 0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45,
+ 0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef,
+ 0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09,
+ 0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20,
+ 0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10
+};
+static const u8 enc_output094[] __initconst = {
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66,
+ 0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02,
+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
+ 0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6,
+ 0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc
+};
+static const u8 enc_assoc094[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce094[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key094[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input095[] __initconst = {
+ 0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b,
+ 0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46,
+ 0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6,
+ 0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a,
+ 0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79,
+ 0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13
+};
+static const u8 enc_output095[] __initconst = {
+ 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad,
+ 0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
+ 0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02,
+ 0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0
+};
+static const u8 enc_assoc095[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce095[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key095[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input096[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60,
+ 0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c,
+ 0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67,
+ 0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94,
+ 0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94,
+ 0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04
+};
+static const u8 enc_output096[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96,
+ 0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b,
+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
+ 0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73,
+ 0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8
+};
+static const u8 enc_assoc096[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce096[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key096[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input097[] __initconst = {
+ 0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6,
+ 0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01,
+ 0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce,
+ 0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f
+};
+static const u8 enc_output097[] __initconst = {
+ 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00,
+ 0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
+ 0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b,
+ 0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26
+};
+static const u8 enc_assoc097[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce097[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key097[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input098[] __initconst = {
+ 0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62,
+ 0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0,
+ 0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f,
+ 0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1
+};
+static const u8 enc_output098[] __initconst = {
+ 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94,
+ 0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
+ 0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e,
+ 0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b
+};
+static const u8 enc_assoc098[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce098[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key098[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input099[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12,
+ 0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98,
+ 0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95,
+ 0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48,
+ 0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66,
+ 0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8
+};
+static const u8 enc_output099[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4,
+ 0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf,
+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
+ 0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56,
+ 0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1
+};
+static const u8 enc_assoc099[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce099[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key099[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input100[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70,
+ 0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd,
+ 0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77,
+ 0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75,
+ 0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84,
+ 0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5
+};
+static const u8 enc_output100[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86,
+ 0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa,
+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
+ 0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59,
+ 0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6
+};
+static const u8 enc_assoc100[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce100[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key100[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input101[] __initconst = {
+ 0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c,
+ 0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde,
+ 0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92,
+ 0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f,
+ 0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea,
+ 0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88
+};
+static const u8 enc_output101[] __initconst = {
+ 0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5,
+ 0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d,
+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
+ 0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65,
+ 0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05
+};
+static const u8 enc_assoc101[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce101[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key101[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input102[] __initconst = {
+ 0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0,
+ 0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d,
+ 0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73,
+ 0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b,
+ 0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b,
+ 0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c
+};
+static const u8 enc_output102[] __initconst = {
+ 0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39,
+ 0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e,
+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
+ 0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56,
+ 0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04
+};
+static const u8 enc_assoc102[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce102[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key102[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input103[] __initconst = {
+ 0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d,
+ 0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce,
+ 0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c,
+ 0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a,
+ 0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14,
+ 0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d
+};
+static const u8 enc_output103[] __initconst = {
+ 0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94,
+ 0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d,
+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
+ 0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a,
+ 0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08
+};
+static const u8 enc_assoc103[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce103[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key103[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input104[] __initconst = {
+ 0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac,
+ 0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b,
+ 0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9,
+ 0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98,
+ 0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1,
+ 0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f
+};
+static const u8 enc_output104[] __initconst = {
+ 0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35,
+ 0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
+ 0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d,
+ 0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d
+};
+static const u8 enc_assoc104[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce104[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key104[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input105[] __initconst = {
+ 0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5,
+ 0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99,
+ 0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e,
+ 0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a,
+ 0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46,
+ 0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d
+};
+static const u8 enc_output105[] __initconst = {
+ 0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c,
+ 0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
+ 0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88,
+ 0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd
+};
+static const u8 enc_assoc105[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce105[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key105[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input106[] __initconst = {
+ 0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06,
+ 0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5,
+ 0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68,
+ 0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a,
+ 0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10,
+ 0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d
+};
+static const u8 enc_output106[] __initconst = {
+ 0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f,
+ 0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76,
+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
+ 0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88,
+ 0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a
+};
+static const u8 enc_assoc106[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce106[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key106[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input107[] __initconst = {
+ 0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71,
+ 0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44,
+ 0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c,
+ 0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a,
+ 0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3,
+ 0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13
+};
+static const u8 enc_output107[] __initconst = {
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87,
+ 0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03,
+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
+ 0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02,
+ 0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88
+};
+static const u8 enc_assoc107[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce107[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key107[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input108[] __initconst = {
+ 0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43,
+ 0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6,
+ 0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11,
+ 0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99,
+ 0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69,
+ 0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e
+};
+static const u8 enc_output108[] __initconst = {
+ 0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda,
+ 0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15,
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
+ 0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96,
+ 0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a
+};
+static const u8 enc_assoc108[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce108[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key108[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input109[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a,
+ 0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb,
+ 0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96,
+ 0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f,
+ 0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59,
+ 0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16
+};
+static const u8 enc_output109[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac,
+ 0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c,
+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
+ 0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1,
+ 0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb
+};
+static const u8 enc_assoc109[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce109[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key109[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input110[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5,
+ 0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9,
+ 0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b,
+ 0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b,
+ 0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4,
+ 0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12
+};
+static const u8 enc_output110[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43,
+ 0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
+ 0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6,
+ 0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe
+};
+static const u8 enc_assoc110[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce110[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key110[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input111[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda,
+ 0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55,
+ 0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5,
+ 0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f,
+ 0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a,
+ 0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16
+};
+static const u8 enc_output111[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c,
+ 0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12,
+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
+ 0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4,
+ 0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9
+};
+static const u8 enc_assoc111[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce111[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key111[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input112[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38,
+ 0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a,
+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65,
+ 0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a,
+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa,
+ 0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13
+};
+static const u8 enc_output112[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce,
+ 0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
+ 0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7,
+ 0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce
+};
+static const u8 enc_assoc112[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce112[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key112[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input113[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86,
+ 0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f,
+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1,
+ 0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f,
+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e,
+ 0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16
+};
+static const u8 enc_output113[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70,
+ 0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
+ 0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa,
+ 0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3
+};
+static const u8 enc_assoc113[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce113[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key113[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input114[] __initconst = {
+ 0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73,
+ 0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc,
+ 0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a,
+ 0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f,
+ 0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2,
+ 0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88
+};
+static const u8 enc_output114[] __initconst = {
+ 0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea,
+ 0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f,
+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
+ 0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69,
+ 0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1
+};
+static const u8 enc_assoc114[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce114[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key114[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input115[] __initconst = {
+ 0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb,
+ 0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68,
+ 0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33,
+ 0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98,
+ 0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b,
+ 0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f
+};
+static const u8 enc_output115[] __initconst = {
+ 0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72,
+ 0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb,
+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
+ 0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74,
+ 0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05
+};
+static const u8 enc_assoc115[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce115[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key115[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input116[] __initconst = {
+ 0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd,
+ 0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44,
+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea,
+ 0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a,
+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25,
+ 0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13
+};
+static const u8 enc_output116[] __initconst = {
+ 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b,
+ 0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
+ 0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50,
+ 0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2
+};
+static const u8 enc_assoc116[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce116[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key116[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input117[] __initconst = {
+ 0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d,
+ 0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47,
+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06,
+ 0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08,
+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9,
+ 0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11
+};
+static const u8 enc_output117[] __initconst = {
+ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb,
+ 0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
+ 0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53,
+ 0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7
+};
+static const u8 enc_assoc117[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce117[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key117[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input118[] __initconst = {
+ 0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03,
+ 0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43,
+ 0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a,
+ 0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f,
+ 0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85,
+ 0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16
+};
+static const u8 enc_output118[] __initconst = {
+ 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5,
+ 0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
+ 0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0,
+ 0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1
+};
+static const u8 enc_assoc118[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce118[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key118[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+static const struct chacha20poly1305_testvec
+chacha20poly1305_enc_vectors[] __initconst = {
+ { enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001,
+ sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) },
+ { enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002,
+ sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) },
+ { enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003,
+ sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) },
+ { enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004,
+ sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) },
+ { enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005,
+ sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) },
+ { enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006,
+ sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) },
+ { enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007,
+ sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) },
+ { enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008,
+ sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) },
+ { enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009,
+ sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) },
+ { enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010,
+ sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) },
+ { enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011,
+ sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) },
+ { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012,
+ sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) },
+ { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053,
+ sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) },
+ { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054,
+ sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) },
+ { enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055,
+ sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) },
+ { enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056,
+ sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) },
+ { enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057,
+ sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) },
+ { enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058,
+ sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) },
+ { enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059,
+ sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) },
+ { enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060,
+ sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) },
+ { enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061,
+ sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) },
+ { enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062,
+ sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) },
+ { enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063,
+ sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) },
+ { enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064,
+ sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) },
+ { enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065,
+ sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) },
+ { enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066,
+ sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) },
+ { enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067,
+ sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) },
+ { enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068,
+ sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) },
+ { enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069,
+ sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) },
+ { enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070,
+ sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) },
+ { enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071,
+ sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) },
+ { enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072,
+ sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) },
+ { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073,
+ sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) },
+ { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076,
+ sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) },
+ { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077,
+ sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) },
+ { enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078,
+ sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) },
+ { enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079,
+ sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) },
+ { enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080,
+ sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) },
+ { enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081,
+ sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) },
+ { enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082,
+ sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) },
+ { enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083,
+ sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) },
+ { enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084,
+ sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) },
+ { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085,
+ sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) },
+ { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093,
+ sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) },
+ { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094,
+ sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) },
+ { enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095,
+ sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) },
+ { enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096,
+ sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) },
+ { enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097,
+ sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) },
+ { enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098,
+ sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) },
+ { enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099,
+ sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) },
+ { enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100,
+ sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) },
+ { enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101,
+ sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) },
+ { enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102,
+ sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) },
+ { enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103,
+ sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) },
+ { enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104,
+ sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) },
+ { enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105,
+ sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) },
+ { enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106,
+ sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) },
+ { enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107,
+ sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) },
+ { enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108,
+ sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) },
+ { enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109,
+ sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) },
+ { enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110,
+ sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) },
+ { enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111,
+ sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) },
+ { enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112,
+ sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) },
+ { enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113,
+ sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) },
+ { enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114,
+ sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) },
+ { enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115,
+ sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) },
+ { enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116,
+ sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) },
+ { enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117,
+ sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) },
+ { enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118,
+ sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) }
+};
+
+static const u8 dec_input001[] __initconst = {
+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
+ 0x38
+};
+static const u8 dec_output001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 dec_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 dec_nonce001[] __initconst = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+};
+static const u8 dec_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const u8 dec_input002[] __initconst = {
+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
+};
+static const u8 dec_output002[] __initconst = { };
+static const u8 dec_assoc002[] __initconst = { };
+static const u8 dec_nonce002[] __initconst = {
+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
+};
+static const u8 dec_key002[] __initconst = {
+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
+};
+
+static const u8 dec_input003[] __initconst = {
+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
+};
+static const u8 dec_output003[] __initconst = { };
+static const u8 dec_assoc003[] __initconst = {
+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
+};
+static const u8 dec_nonce003[] __initconst = {
+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
+};
+static const u8 dec_key003[] __initconst = {
+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
+};
+
+static const u8 dec_input004[] __initconst = {
+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
+ 0x89
+};
+static const u8 dec_output004[] __initconst = {
+ 0xa4
+};
+static const u8 dec_assoc004[] __initconst = {
+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
+};
+static const u8 dec_nonce004[] __initconst = {
+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
+};
+static const u8 dec_key004[] __initconst = {
+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
+};
+
+static const u8 dec_input005[] __initconst = {
+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
+ 0xac
+};
+static const u8 dec_output005[] __initconst = {
+ 0x2d
+};
+static const u8 dec_assoc005[] __initconst = { };
+static const u8 dec_nonce005[] __initconst = {
+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
+};
+static const u8 dec_key005[] __initconst = {
+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
+};
+
+static const u8 dec_input006[] __initconst = {
+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
+ 0xeb
+};
+static const u8 dec_output006[] __initconst = {
+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
+ 0x8f
+};
+static const u8 dec_assoc006[] __initconst = {
+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
+};
+static const u8 dec_nonce006[] __initconst = {
+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
+};
+static const u8 dec_key006[] __initconst = {
+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
+};
+
+static const u8 dec_input007[] __initconst = {
+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
+};
+static const u8 dec_output007[] __initconst = {
+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
+};
+static const u8 dec_assoc007[] __initconst = { };
+static const u8 dec_nonce007[] __initconst = {
+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
+};
+static const u8 dec_key007[] __initconst = {
+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
+};
+
+static const u8 dec_input008[] __initconst = {
+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
+};
+static const u8 dec_output008[] __initconst = {
+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
+};
+static const u8 dec_assoc008[] __initconst = { };
+static const u8 dec_nonce008[] __initconst = {
+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
+};
+static const u8 dec_key008[] __initconst = {
+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
+};
+
+static const u8 dec_input009[] __initconst = {
+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
+ 0xae
+};
+static const u8 dec_output009[] __initconst = {
+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
+ 0x65
+};
+static const u8 dec_assoc009[] __initconst = {
+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
+ 0xef
+};
+static const u8 dec_nonce009[] __initconst = {
+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
+};
+static const u8 dec_key009[] __initconst = {
+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
+};
+
+static const u8 dec_input010[] __initconst = {
+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
+};
+static const u8 dec_output010[] __initconst = {
+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
+};
+static const u8 dec_assoc010[] __initconst = {
+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
+};
+static const u8 dec_nonce010[] __initconst = {
+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
+};
+static const u8 dec_key010[] __initconst = {
+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
+};
+
+static const u8 dec_input011[] __initconst = {
+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c
+};
+static const u8 dec_output011[] __initconst = {
+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
+ 0x10, 0x1e, 0xbf, 0xec, 0xa8
+};
+static const u8 dec_assoc011[] __initconst = {
+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
+};
+static const u8 dec_nonce011[] __initconst = {
+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
+};
+static const u8 dec_key011[] __initconst = {
+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
+};
+
+static const u8 dec_input012[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd6
+};
+static const u8 dec_output012[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 dec_assoc012[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 dec_nonce012[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 dec_key012[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+static const u8 dec_input013[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd7
+};
+static const u8 dec_output013[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 dec_assoc013[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 dec_nonce013[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 dec_key013[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+static const struct chacha20poly1305_testvec
+chacha20poly1305_dec_vectors[] __initconst = {
+ { dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001,
+ sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) },
+ { dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002,
+ sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) },
+ { dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003,
+ sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) },
+ { dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004,
+ sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) },
+ { dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005,
+ sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) },
+ { dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006,
+ sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) },
+ { dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007,
+ sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) },
+ { dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008,
+ sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) },
+ { dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009,
+ sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) },
+ { dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010,
+ sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) },
+ { dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011,
+ sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) },
+ { dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012,
+ sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) },
+ { dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013,
+ sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013),
+ true }
+};
+
+static const u8 xenc_input001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 xenc_output001[] __initconst = {
+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
+ 0x9c
+};
+static const u8 xenc_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 xenc_nonce001[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+static const u8 xenc_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const struct chacha20poly1305_testvec
+xchacha20poly1305_enc_vectors[] __initconst = {
+ { xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001,
+ sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) }
+};
+
+static const u8 xdec_input001[] __initconst = {
+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
+ 0x9c
+};
+static const u8 xdec_output001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 xdec_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 xdec_nonce001[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+static const u8 xdec_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const struct chacha20poly1305_testvec
+xchacha20poly1305_dec_vectors[] __initconst = {
+ { xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001,
+ sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) }
+};
+
+static void __init
+chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 *nonce, const size_t nonce_len,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ if (nonce_len == 8)
+ chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
+ get_unaligned_le64(nonce), key);
+ else
+ BUG();
+}
+
+static bool __init
+decryption_success(bool func_ret, bool expect_failure, int memcmp_result)
+{
+ if (expect_failure)
+ return !func_ret;
+ return func_ret && !memcmp_result;
+}
+
+bool __init chacha20poly1305_selftest(void)
+{
+ enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 };
+ size_t i;
+ u8 *computed_output = NULL, *heap_src = NULL;
+ struct scatterlist sg_src;
+ bool success = true, ret;
+
+ heap_src = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ if (!heap_src || !computed_output) {
+ pr_err("chacha20poly1305 self-test malloc: FAIL\n");
+ success = false;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ chacha20poly1305_selftest_encrypt(computed_output,
+ chacha20poly1305_enc_vectors[i].input,
+ chacha20poly1305_enc_vectors[i].ilen,
+ chacha20poly1305_enc_vectors[i].assoc,
+ chacha20poly1305_enc_vectors[i].alen,
+ chacha20poly1305_enc_vectors[i].nonce,
+ chacha20poly1305_enc_vectors[i].nlen,
+ chacha20poly1305_enc_vectors[i].key);
+ if (memcmp(computed_output,
+ chacha20poly1305_enc_vectors[i].output,
+ chacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
+ if (chacha20poly1305_enc_vectors[i].nlen != 8)
+ continue;
+ memcpy(heap_src, chacha20poly1305_enc_vectors[i].input,
+ chacha20poly1305_enc_vectors[i].ilen);
+ sg_init_one(&sg_src, heap_src,
+ chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE);
+ chacha20poly1305_encrypt_sg_inplace(&sg_src,
+ chacha20poly1305_enc_vectors[i].ilen,
+ chacha20poly1305_enc_vectors[i].assoc,
+ chacha20poly1305_enc_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce),
+ chacha20poly1305_enc_vectors[i].key);
+ if (memcmp(heap_src,
+ chacha20poly1305_enc_vectors[i].output,
+ chacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ ret = chacha20poly1305_decrypt(computed_output,
+ chacha20poly1305_dec_vectors[i].input,
+ chacha20poly1305_dec_vectors[i].ilen,
+ chacha20poly1305_dec_vectors[i].assoc,
+ chacha20poly1305_dec_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
+ chacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ chacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output,
+ chacha20poly1305_dec_vectors[i].output,
+ chacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
+ memcpy(heap_src, chacha20poly1305_dec_vectors[i].input,
+ chacha20poly1305_dec_vectors[i].ilen);
+ sg_init_one(&sg_src, heap_src,
+ chacha20poly1305_dec_vectors[i].ilen);
+ ret = chacha20poly1305_decrypt_sg_inplace(&sg_src,
+ chacha20poly1305_dec_vectors[i].ilen,
+ chacha20poly1305_dec_vectors[i].assoc,
+ chacha20poly1305_dec_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
+ chacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ chacha20poly1305_dec_vectors[i].failure,
+ memcmp(heap_src, chacha20poly1305_dec_vectors[i].output,
+ chacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ xchacha20poly1305_encrypt(computed_output,
+ xchacha20poly1305_enc_vectors[i].input,
+ xchacha20poly1305_enc_vectors[i].ilen,
+ xchacha20poly1305_enc_vectors[i].assoc,
+ xchacha20poly1305_enc_vectors[i].alen,
+ xchacha20poly1305_enc_vectors[i].nonce,
+ xchacha20poly1305_enc_vectors[i].key);
+ if (memcmp(computed_output,
+ xchacha20poly1305_enc_vectors[i].output,
+ xchacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ ret = xchacha20poly1305_decrypt(computed_output,
+ xchacha20poly1305_dec_vectors[i].input,
+ xchacha20poly1305_dec_vectors[i].ilen,
+ xchacha20poly1305_dec_vectors[i].assoc,
+ xchacha20poly1305_dec_vectors[i].alen,
+ xchacha20poly1305_dec_vectors[i].nonce,
+ xchacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ xchacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output,
+ xchacha20poly1305_dec_vectors[i].output,
+ xchacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+out:
+ kfree(heap_src);
+ kfree(computed_output);
+ return success;
+}
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
new file mode 100644
index 000000000000..6d83cafebc69
--- /dev/null
+++ b/lib/crypto/chacha20poly1305.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the ChaCha20Poly1305 AEAD construction.
+ *
+ * Information: https://tools.ietf.org/html/rfc8439
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha20poly1305.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
+#include <crypto/scatterwalk.h>
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
+
+bool __init chacha20poly1305_selftest(void);
+
+static void chacha_load_key(u32 *k, const u8 *in)
+{
+ k[0] = get_unaligned_le32(in);
+ k[1] = get_unaligned_le32(in + 4);
+ k[2] = get_unaligned_le32(in + 8);
+ k[3] = get_unaligned_le32(in + 12);
+ k[4] = get_unaligned_le32(in + 16);
+ k[5] = get_unaligned_le32(in + 20);
+ k[6] = get_unaligned_le32(in + 24);
+ k[7] = get_unaligned_le32(in + 28);
+}
+
+static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce)
+{
+ u32 k[CHACHA_KEY_WORDS];
+ u8 iv[CHACHA_IV_SIZE];
+
+ memset(iv, 0, 8);
+ memcpy(iv + 8, nonce + 16, 8);
+
+ chacha_load_key(k, key);
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(chacha_state, k, nonce);
+ hchacha_block(chacha_state, k, 20);
+
+ chacha_init(chacha_state, k, iv);
+
+ memzero_explicit(k, sizeof(k));
+ memzero_explicit(iv, sizeof(iv));
+}
+
+static void
+__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, u32 *chacha_state)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ __le64 lens[2];
+ } b;
+
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+
+ chacha20_crypt(chacha_state, dst, src, src_len);
+
+ poly1305_update(&poly1305_state, dst, src_len);
+ if (src_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ poly1305_final(&poly1305_state, dst + src_len);
+
+ memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32));
+ memzero_explicit(&b, sizeof(b));
+}
+
+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+
+ chacha_load_key(k, key);
+
+ iv[0] = 0;
+ iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, k, (u8 *)iv);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+
+ memzero_explicit(iv, sizeof(iv));
+ memzero_explicit(k, sizeof(k));
+}
+EXPORT_SYMBOL(chacha20poly1305_encrypt);
+
+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+
+ xchacha_init(chacha_state, key, nonce);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+}
+EXPORT_SYMBOL(xchacha20poly1305_encrypt);
+
+static bool
+__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, u32 *chacha_state)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ size_t dst_len;
+ int ret;
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ u8 mac[POLY1305_DIGEST_SIZE];
+ __le64 lens[2];
+ } b;
+
+ if (unlikely(src_len < POLY1305_DIGEST_SIZE))
+ return false;
+
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+
+ dst_len = src_len - POLY1305_DIGEST_SIZE;
+ poly1305_update(&poly1305_state, src, dst_len);
+ if (dst_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (dst_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(dst_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ poly1305_final(&poly1305_state, b.mac);
+
+ ret = crypto_memneq(b.mac, src + dst_len, POLY1305_DIGEST_SIZE);
+ if (likely(!ret))
+ chacha20_crypt(chacha_state, dst, src, dst_len);
+
+ memzero_explicit(&b, sizeof(b));
+
+ return !ret;
+}
+
+bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+ bool ret;
+
+ chacha_load_key(k, key);
+
+ iv[0] = 0;
+ iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, k, (u8 *)iv);
+ ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
+ chacha_state);
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(iv, sizeof(iv));
+ memzero_explicit(k, sizeof(k));
+ return ret;
+}
+EXPORT_SYMBOL(chacha20poly1305_decrypt);
+
+bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+
+ xchacha_init(chacha_state, key, nonce);
+ return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
+ chacha_state);
+}
+EXPORT_SYMBOL(xchacha20poly1305_decrypt);
+
+static
+bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
+ const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE],
+ int encrypt)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ struct sg_mapping_iter miter;
+ size_t partial = 0;
+ unsigned int flags;
+ bool ret = true;
+ int sl;
+ union {
+ struct {
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+ };
+ u8 block0[POLY1305_KEY_SIZE];
+ u8 chacha_stream[CHACHA_BLOCK_SIZE];
+ struct {
+ u8 mac[2][POLY1305_DIGEST_SIZE];
+ };
+ __le64 lens[2];
+ } b __aligned(16);
+
+ chacha_load_key(b.k, key);
+
+ b.iv[0] = 0;
+ b.iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, b.k, (u8 *)b.iv);
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ if (unlikely(ad_len)) {
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+ }
+
+ flags = SG_MITER_TO_SG;
+ if (!preemptible())
+ flags |= SG_MITER_ATOMIC;
+
+ sg_miter_start(&miter, src, sg_nents(src), flags);
+
+ for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) {
+ u8 *addr = miter.addr;
+ size_t length = min_t(size_t, sl, miter.length);
+
+ if (!encrypt)
+ poly1305_update(&poly1305_state, addr, length);
+
+ if (unlikely(partial)) {
+ size_t l = min(length, CHACHA_BLOCK_SIZE - partial);
+
+ crypto_xor(addr, b.chacha_stream + partial, l);
+ partial = (partial + l) & (CHACHA_BLOCK_SIZE - 1);
+
+ addr += l;
+ length -= l;
+ }
+
+ if (likely(length >= CHACHA_BLOCK_SIZE || length == sl)) {
+ size_t l = length;
+
+ if (unlikely(length < sl))
+ l &= ~(CHACHA_BLOCK_SIZE - 1);
+ chacha20_crypt(chacha_state, addr, addr, l);
+ addr += l;
+ length -= l;
+ }
+
+ if (unlikely(length > 0)) {
+ chacha20_crypt(chacha_state, b.chacha_stream, pad0,
+ CHACHA_BLOCK_SIZE);
+ crypto_xor(addr, b.chacha_stream, length);
+ partial = length;
+ }
+
+ if (encrypt)
+ poly1305_update(&poly1305_state, miter.addr,
+ min_t(size_t, sl, miter.length));
+ }
+
+ if (src_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ if (likely(sl <= -POLY1305_DIGEST_SIZE)) {
+ if (encrypt) {
+ poly1305_final(&poly1305_state,
+ miter.addr + miter.length + sl);
+ ret = true;
+ } else {
+ poly1305_final(&poly1305_state, b.mac[0]);
+ ret = !crypto_memneq(b.mac[0],
+ miter.addr + miter.length + sl,
+ POLY1305_DIGEST_SIZE);
+ }
+ }
+
+ sg_miter_stop(&miter);
+
+ if (unlikely(sl > -POLY1305_DIGEST_SIZE)) {
+ poly1305_final(&poly1305_state, b.mac[1]);
+ scatterwalk_map_and_copy(b.mac[encrypt], src, src_len,
+ sizeof(b.mac[1]), encrypt);
+ ret = encrypt ||
+ !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE);
+ }
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(&b, sizeof(b));
+
+ return ret;
+}
+
+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ return chacha20poly1305_crypt_sg_inplace(src, src_len, ad, ad_len,
+ nonce, key, 1);
+}
+EXPORT_SYMBOL(chacha20poly1305_encrypt_sg_inplace);
+
+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ if (unlikely(src_len < POLY1305_DIGEST_SIZE))
+ return false;
+
+ return chacha20poly1305_crypt_sg_inplace(src,
+ src_len - POLY1305_DIGEST_SIZE,
+ ad, ad_len, nonce, key, 0);
+}
+EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
+
+static int __init mod_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!chacha20poly1305_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+module_init(mod_init);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519-fiat32.c b/lib/crypto/curve25519-fiat32.c
new file mode 100644
index 000000000000..2fde0ec33dbd
--- /dev/null
+++ b/lib/crypto/curve25519-fiat32.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2016 The fiat-crypto Authors.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is a machine-generated formally verified implementation of Curve25519
+ * ECDH from: <https://github.com/mit-plv/fiat-crypto>. Though originally
+ * machine generated, it has been tweaked to be suitable for use in the kernel.
+ * It is optimized for 32-bit machines and machines that cannot work efficiently
+ * with 128-bit integer types.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/curve25519.h>
+#include <linux/string.h>
+
+/* fe means field element. Here the field is \Z/(2^255-19). An element t,
+ * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+ * t[3]+2^102 t[4]+...+2^230 t[9].
+ * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc.
+ * Multiplication and carrying produce fe from fe_loose.
+ */
+typedef struct fe { u32 v[10]; } fe;
+
+/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc
+ * Addition and subtraction produce fe_loose from (fe, fe).
+ */
+typedef struct fe_loose { u32 v[10]; } fe_loose;
+
+static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s)
+{
+ /* Ignores top bit of s. */
+ u32 a0 = get_unaligned_le32(s);
+ u32 a1 = get_unaligned_le32(s+4);
+ u32 a2 = get_unaligned_le32(s+8);
+ u32 a3 = get_unaligned_le32(s+12);
+ u32 a4 = get_unaligned_le32(s+16);
+ u32 a5 = get_unaligned_le32(s+20);
+ u32 a6 = get_unaligned_le32(s+24);
+ u32 a7 = get_unaligned_le32(s+28);
+ h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */
+ h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */
+ h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */
+ h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */
+ h[4] = (a3>> 6); /* (32- 6) = 26 */
+ h[5] = a4&((1<<25)-1); /* 25 */
+ h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */
+ h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */
+ h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */
+ h[9] = (a7>> 6)&((1<<25)-1); /* 25 */
+}
+
+static __always_inline void fe_frombytes(fe *h, const u8 *s)
+{
+ fe_frombytes_impl(h->v, s);
+}
+
+static __always_inline u8 /*bool*/
+addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 25 bits of result and 1 bit of carry
+ * (26 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a + b + c;
+ *low = x & ((1 << 25) - 1);
+ return (x >> 25) & 1;
+}
+
+static __always_inline u8 /*bool*/
+addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 26 bits of result and 1 bit of carry
+ * (27 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a + b + c;
+ *low = x & ((1 << 26) - 1);
+ return (x >> 26) & 1;
+}
+
+static __always_inline u8 /*bool*/
+subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 25 bits of result and 1 bit of borrow
+ * (26 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a - b - c;
+ *low = x & ((1 << 25) - 1);
+ return x >> 31;
+}
+
+static __always_inline u8 /*bool*/
+subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 26 bits of result and 1 bit of borrow
+ *(27 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a - b - c;
+ *low = x & ((1 << 26) - 1);
+ return x >> 31;
+}
+
+static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz)
+{
+ t = -!!t; /* all set if nonzero, 0 if 0 */
+ return (t&nz) | ((~t)&z);
+}
+
+static __always_inline void fe_freeze(u32 out[10], const u32 in1[10])
+{
+ { const u32 x17 = in1[9];
+ { const u32 x18 = in1[8];
+ { const u32 x16 = in1[7];
+ { const u32 x14 = in1[6];
+ { const u32 x12 = in1[5];
+ { const u32 x10 = in1[4];
+ { const u32 x8 = in1[3];
+ { const u32 x6 = in1[2];
+ { const u32 x4 = in1[1];
+ { const u32 x2 = in1[0];
+ { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20);
+ { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23);
+ { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26);
+ { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29);
+ { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32);
+ { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35);
+ { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38);
+ { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41);
+ { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44);
+ { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47);
+ { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff);
+ { u32 x50 = (x49 & 0x3ffffed);
+ { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52);
+ { u32 x54 = (x49 & 0x1ffffff);
+ { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56);
+ { u32 x58 = (x49 & 0x3ffffff);
+ { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60);
+ { u32 x62 = (x49 & 0x1ffffff);
+ { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64);
+ { u32 x66 = (x49 & 0x3ffffff);
+ { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68);
+ { u32 x70 = (x49 & 0x1ffffff);
+ { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72);
+ { u32 x74 = (x49 & 0x3ffffff);
+ { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76);
+ { u32 x78 = (x49 & 0x1ffffff);
+ { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80);
+ { u32 x82 = (x49 & 0x3ffffff);
+ { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84);
+ { u32 x86 = (x49 & 0x1ffffff);
+ { u32 x88; addcarryx_u25(x85, x47, x86, &x88);
+ out[0] = x52;
+ out[1] = x56;
+ out[2] = x60;
+ out[3] = x64;
+ out[4] = x68;
+ out[5] = x72;
+ out[6] = x76;
+ out[7] = x80;
+ out[8] = x84;
+ out[9] = x88;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_tobytes(u8 s[32], const fe *f)
+{
+ u32 h[10];
+ fe_freeze(h, f->v);
+ s[0] = h[0] >> 0;
+ s[1] = h[0] >> 8;
+ s[2] = h[0] >> 16;
+ s[3] = (h[0] >> 24) | (h[1] << 2);
+ s[4] = h[1] >> 6;
+ s[5] = h[1] >> 14;
+ s[6] = (h[1] >> 22) | (h[2] << 3);
+ s[7] = h[2] >> 5;
+ s[8] = h[2] >> 13;
+ s[9] = (h[2] >> 21) | (h[3] << 5);
+ s[10] = h[3] >> 3;
+ s[11] = h[3] >> 11;
+ s[12] = (h[3] >> 19) | (h[4] << 6);
+ s[13] = h[4] >> 2;
+ s[14] = h[4] >> 10;
+ s[15] = h[4] >> 18;
+ s[16] = h[5] >> 0;
+ s[17] = h[5] >> 8;
+ s[18] = h[5] >> 16;
+ s[19] = (h[5] >> 24) | (h[6] << 1);
+ s[20] = h[6] >> 7;
+ s[21] = h[6] >> 15;
+ s[22] = (h[6] >> 23) | (h[7] << 3);
+ s[23] = h[7] >> 5;
+ s[24] = h[7] >> 13;
+ s[25] = (h[7] >> 21) | (h[8] << 4);
+ s[26] = h[8] >> 4;
+ s[27] = h[8] >> 12;
+ s[28] = (h[8] >> 20) | (h[9] << 6);
+ s[29] = h[9] >> 2;
+ s[30] = h[9] >> 10;
+ s[31] = h[9] >> 18;
+}
+
+/* h = f */
+static __always_inline void fe_copy(fe *h, const fe *f)
+{
+ memmove(h, f, sizeof(u32) * 10);
+}
+
+static __always_inline void fe_copy_lt(fe_loose *h, const fe *f)
+{
+ memmove(h, f, sizeof(u32) * 10);
+}
+
+/* h = 0 */
+static __always_inline void fe_0(fe *h)
+{
+ memset(h, 0, sizeof(u32) * 10);
+}
+
+/* h = 1 */
+static __always_inline void fe_1(fe *h)
+{
+ memset(h, 0, sizeof(u32) * 10);
+ h->v[0] = 1;
+}
+
+static noinline void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ out[0] = (x5 + x23);
+ out[1] = (x7 + x25);
+ out[2] = (x9 + x27);
+ out[3] = (x11 + x29);
+ out[4] = (x13 + x31);
+ out[5] = (x15 + x33);
+ out[6] = (x17 + x35);
+ out[7] = (x19 + x37);
+ out[8] = (x21 + x39);
+ out[9] = (x20 + x38);
+ }}}}}}}}}}}}}}}}}}}}
+}
+
+/* h = f + g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_add_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ out[0] = ((0x7ffffda + x5) - x23);
+ out[1] = ((0x3fffffe + x7) - x25);
+ out[2] = ((0x7fffffe + x9) - x27);
+ out[3] = ((0x3fffffe + x11) - x29);
+ out[4] = ((0x7fffffe + x13) - x31);
+ out[5] = ((0x3fffffe + x15) - x33);
+ out[6] = ((0x7fffffe + x17) - x35);
+ out[7] = ((0x3fffffe + x19) - x37);
+ out[8] = ((0x7fffffe + x21) - x39);
+ out[9] = ((0x3fffffe + x20) - x38);
+ }}}}}}}}}}}}}}}}}}}}
+}
+
+/* h = f - g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_sub_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ { u64 x40 = ((u64)x23 * x5);
+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
+ { u64 x58 = ((u64)(0x2 * x38) * x20);
+ { u64 x59 = (x48 + (x58 << 0x4));
+ { u64 x60 = (x59 + (x58 << 0x1));
+ { u64 x61 = (x60 + x58);
+ { u64 x62 = (x47 + (x57 << 0x4));
+ { u64 x63 = (x62 + (x57 << 0x1));
+ { u64 x64 = (x63 + x57);
+ { u64 x65 = (x46 + (x56 << 0x4));
+ { u64 x66 = (x65 + (x56 << 0x1));
+ { u64 x67 = (x66 + x56);
+ { u64 x68 = (x45 + (x55 << 0x4));
+ { u64 x69 = (x68 + (x55 << 0x1));
+ { u64 x70 = (x69 + x55);
+ { u64 x71 = (x44 + (x54 << 0x4));
+ { u64 x72 = (x71 + (x54 << 0x1));
+ { u64 x73 = (x72 + x54);
+ { u64 x74 = (x43 + (x53 << 0x4));
+ { u64 x75 = (x74 + (x53 << 0x1));
+ { u64 x76 = (x75 + x53);
+ { u64 x77 = (x42 + (x52 << 0x4));
+ { u64 x78 = (x77 + (x52 << 0x1));
+ { u64 x79 = (x78 + x52);
+ { u64 x80 = (x41 + (x51 << 0x4));
+ { u64 x81 = (x80 + (x51 << 0x1));
+ { u64 x82 = (x81 + x51);
+ { u64 x83 = (x40 + (x50 << 0x4));
+ { u64 x84 = (x83 + (x50 << 0x1));
+ { u64 x85 = (x84 + x50);
+ { u64 x86 = (x85 >> 0x1a);
+ { u32 x87 = ((u32)x85 & 0x3ffffff);
+ { u64 x88 = (x86 + x82);
+ { u64 x89 = (x88 >> 0x19);
+ { u32 x90 = ((u32)x88 & 0x1ffffff);
+ { u64 x91 = (x89 + x79);
+ { u64 x92 = (x91 >> 0x1a);
+ { u32 x93 = ((u32)x91 & 0x3ffffff);
+ { u64 x94 = (x92 + x76);
+ { u64 x95 = (x94 >> 0x19);
+ { u32 x96 = ((u32)x94 & 0x1ffffff);
+ { u64 x97 = (x95 + x73);
+ { u64 x98 = (x97 >> 0x1a);
+ { u32 x99 = ((u32)x97 & 0x3ffffff);
+ { u64 x100 = (x98 + x70);
+ { u64 x101 = (x100 >> 0x19);
+ { u32 x102 = ((u32)x100 & 0x1ffffff);
+ { u64 x103 = (x101 + x67);
+ { u64 x104 = (x103 >> 0x1a);
+ { u32 x105 = ((u32)x103 & 0x3ffffff);
+ { u64 x106 = (x104 + x64);
+ { u64 x107 = (x106 >> 0x19);
+ { u32 x108 = ((u32)x106 & 0x1ffffff);
+ { u64 x109 = (x107 + x61);
+ { u64 x110 = (x109 >> 0x1a);
+ { u32 x111 = ((u32)x109 & 0x3ffffff);
+ { u64 x112 = (x110 + x49);
+ { u64 x113 = (x112 >> 0x19);
+ { u32 x114 = ((u32)x112 & 0x1ffffff);
+ { u64 x115 = (x87 + (0x13 * x113));
+ { u32 x116 = (u32) (x115 >> 0x1a);
+ { u32 x117 = ((u32)x115 & 0x3ffffff);
+ { u32 x118 = (x116 + x90);
+ { u32 x119 = (x118 >> 0x19);
+ { u32 x120 = (x118 & 0x1ffffff);
+ out[0] = x117;
+ out[1] = x120;
+ out[2] = (x119 + x93);
+ out[3] = x96;
+ out[4] = x99;
+ out[5] = x102;
+ out[6] = x105;
+ out[7] = x108;
+ out[8] = x111;
+ out[9] = x114;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void
+fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_sqr_impl(u32 out[10], const u32 in1[10])
+{
+ { const u32 x17 = in1[9];
+ { const u32 x18 = in1[8];
+ { const u32 x16 = in1[7];
+ { const u32 x14 = in1[6];
+ { const u32 x12 = in1[5];
+ { const u32 x10 = in1[4];
+ { const u32 x8 = in1[3];
+ { const u32 x6 = in1[2];
+ { const u32 x4 = in1[1];
+ { const u32 x2 = in1[0];
+ { u64 x19 = ((u64)x2 * x2);
+ { u64 x20 = ((u64)(0x2 * x2) * x4);
+ { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6)));
+ { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8)));
+ { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10));
+ { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12)));
+ { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12)));
+ { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16)));
+ { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12))))));
+ { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17)));
+ { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17)))));
+ { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17)));
+ { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17))))));
+ { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17)));
+ { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17)));
+ { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17)));
+ { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17));
+ { u64 x36 = ((u64)(0x2 * x18) * x17);
+ { u64 x37 = ((u64)(0x2 * x17) * x17);
+ { u64 x38 = (x27 + (x37 << 0x4));
+ { u64 x39 = (x38 + (x37 << 0x1));
+ { u64 x40 = (x39 + x37);
+ { u64 x41 = (x26 + (x36 << 0x4));
+ { u64 x42 = (x41 + (x36 << 0x1));
+ { u64 x43 = (x42 + x36);
+ { u64 x44 = (x25 + (x35 << 0x4));
+ { u64 x45 = (x44 + (x35 << 0x1));
+ { u64 x46 = (x45 + x35);
+ { u64 x47 = (x24 + (x34 << 0x4));
+ { u64 x48 = (x47 + (x34 << 0x1));
+ { u64 x49 = (x48 + x34);
+ { u64 x50 = (x23 + (x33 << 0x4));
+ { u64 x51 = (x50 + (x33 << 0x1));
+ { u64 x52 = (x51 + x33);
+ { u64 x53 = (x22 + (x32 << 0x4));
+ { u64 x54 = (x53 + (x32 << 0x1));
+ { u64 x55 = (x54 + x32);
+ { u64 x56 = (x21 + (x31 << 0x4));
+ { u64 x57 = (x56 + (x31 << 0x1));
+ { u64 x58 = (x57 + x31);
+ { u64 x59 = (x20 + (x30 << 0x4));
+ { u64 x60 = (x59 + (x30 << 0x1));
+ { u64 x61 = (x60 + x30);
+ { u64 x62 = (x19 + (x29 << 0x4));
+ { u64 x63 = (x62 + (x29 << 0x1));
+ { u64 x64 = (x63 + x29);
+ { u64 x65 = (x64 >> 0x1a);
+ { u32 x66 = ((u32)x64 & 0x3ffffff);
+ { u64 x67 = (x65 + x61);
+ { u64 x68 = (x67 >> 0x19);
+ { u32 x69 = ((u32)x67 & 0x1ffffff);
+ { u64 x70 = (x68 + x58);
+ { u64 x71 = (x70 >> 0x1a);
+ { u32 x72 = ((u32)x70 & 0x3ffffff);
+ { u64 x73 = (x71 + x55);
+ { u64 x74 = (x73 >> 0x19);
+ { u32 x75 = ((u32)x73 & 0x1ffffff);
+ { u64 x76 = (x74 + x52);
+ { u64 x77 = (x76 >> 0x1a);
+ { u32 x78 = ((u32)x76 & 0x3ffffff);
+ { u64 x79 = (x77 + x49);
+ { u64 x80 = (x79 >> 0x19);
+ { u32 x81 = ((u32)x79 & 0x1ffffff);
+ { u64 x82 = (x80 + x46);
+ { u64 x83 = (x82 >> 0x1a);
+ { u32 x84 = ((u32)x82 & 0x3ffffff);
+ { u64 x85 = (x83 + x43);
+ { u64 x86 = (x85 >> 0x19);
+ { u32 x87 = ((u32)x85 & 0x1ffffff);
+ { u64 x88 = (x86 + x40);
+ { u64 x89 = (x88 >> 0x1a);
+ { u32 x90 = ((u32)x88 & 0x3ffffff);
+ { u64 x91 = (x89 + x28);
+ { u64 x92 = (x91 >> 0x19);
+ { u32 x93 = ((u32)x91 & 0x1ffffff);
+ { u64 x94 = (x66 + (0x13 * x92));
+ { u32 x95 = (u32) (x94 >> 0x1a);
+ { u32 x96 = ((u32)x94 & 0x3ffffff);
+ { u32 x97 = (x95 + x69);
+ { u32 x98 = (x97 >> 0x19);
+ { u32 x99 = (x97 & 0x1ffffff);
+ out[0] = x96;
+ out[1] = x99;
+ out[2] = (x98 + x72);
+ out[3] = x75;
+ out[4] = x78;
+ out[5] = x81;
+ out[6] = x84;
+ out[7] = x87;
+ out[8] = x90;
+ out[9] = x93;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_sq_tl(fe *h, const fe_loose *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_sq_tt(fe *h, const fe *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_loose_invert(fe *out, const fe_loose *z)
+{
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+ fe_sq_tl(&t0, z);
+ fe_sq_tt(&t1, &t0);
+ for (i = 1; i < 2; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_tlt(&t1, z, &t1);
+ fe_mul_ttt(&t0, &t0, &t1);
+ fe_sq_tt(&t2, &t0);
+ fe_mul_ttt(&t1, &t1, &t2);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 20; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 100; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t1, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_ttt(out, &t1, &t0);
+}
+
+static __always_inline void fe_invert(fe *out, const fe *z)
+{
+ fe_loose l;
+ fe_copy_lt(&l, z);
+ fe_loose_invert(out, &l);
+}
+
+/* Replace (f,g) with (g,f) if b == 1;
+ * replace (f,g) with (f,g) if b == 0.
+ *
+ * Preconditions: b in {0,1}
+ */
+static noinline void fe_cswap(fe *f, fe *g, unsigned int b)
+{
+ unsigned i;
+ b = 0 - b;
+ for (i = 0; i < 10; i++) {
+ u32 x = f->v[i] ^ g->v[i];
+ x &= b;
+ f->v[i] ^= x;
+ g->v[i] ^= x;
+ }
+}
+
+/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/
+static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = 0;
+ { const u32 x39 = 0;
+ { const u32 x37 = 0;
+ { const u32 x35 = 0;
+ { const u32 x33 = 0;
+ { const u32 x31 = 0;
+ { const u32 x29 = 0;
+ { const u32 x27 = 0;
+ { const u32 x25 = 0;
+ { const u32 x23 = 121666;
+ { u64 x40 = ((u64)x23 * x5);
+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
+ { u64 x58 = ((u64)(0x2 * x38) * x20);
+ { u64 x59 = (x48 + (x58 << 0x4));
+ { u64 x60 = (x59 + (x58 << 0x1));
+ { u64 x61 = (x60 + x58);
+ { u64 x62 = (x47 + (x57 << 0x4));
+ { u64 x63 = (x62 + (x57 << 0x1));
+ { u64 x64 = (x63 + x57);
+ { u64 x65 = (x46 + (x56 << 0x4));
+ { u64 x66 = (x65 + (x56 << 0x1));
+ { u64 x67 = (x66 + x56);
+ { u64 x68 = (x45 + (x55 << 0x4));
+ { u64 x69 = (x68 + (x55 << 0x1));
+ { u64 x70 = (x69 + x55);
+ { u64 x71 = (x44 + (x54 << 0x4));
+ { u64 x72 = (x71 + (x54 << 0x1));
+ { u64 x73 = (x72 + x54);
+ { u64 x74 = (x43 + (x53 << 0x4));
+ { u64 x75 = (x74 + (x53 << 0x1));
+ { u64 x76 = (x75 + x53);
+ { u64 x77 = (x42 + (x52 << 0x4));
+ { u64 x78 = (x77 + (x52 << 0x1));
+ { u64 x79 = (x78 + x52);
+ { u64 x80 = (x41 + (x51 << 0x4));
+ { u64 x81 = (x80 + (x51 << 0x1));
+ { u64 x82 = (x81 + x51);
+ { u64 x83 = (x40 + (x50 << 0x4));
+ { u64 x84 = (x83 + (x50 << 0x1));
+ { u64 x85 = (x84 + x50);
+ { u64 x86 = (x85 >> 0x1a);
+ { u32 x87 = ((u32)x85 & 0x3ffffff);
+ { u64 x88 = (x86 + x82);
+ { u64 x89 = (x88 >> 0x19);
+ { u32 x90 = ((u32)x88 & 0x1ffffff);
+ { u64 x91 = (x89 + x79);
+ { u64 x92 = (x91 >> 0x1a);
+ { u32 x93 = ((u32)x91 & 0x3ffffff);
+ { u64 x94 = (x92 + x76);
+ { u64 x95 = (x94 >> 0x19);
+ { u32 x96 = ((u32)x94 & 0x1ffffff);
+ { u64 x97 = (x95 + x73);
+ { u64 x98 = (x97 >> 0x1a);
+ { u32 x99 = ((u32)x97 & 0x3ffffff);
+ { u64 x100 = (x98 + x70);
+ { u64 x101 = (x100 >> 0x19);
+ { u32 x102 = ((u32)x100 & 0x1ffffff);
+ { u64 x103 = (x101 + x67);
+ { u64 x104 = (x103 >> 0x1a);
+ { u32 x105 = ((u32)x103 & 0x3ffffff);
+ { u64 x106 = (x104 + x64);
+ { u64 x107 = (x106 >> 0x19);
+ { u32 x108 = ((u32)x106 & 0x1ffffff);
+ { u64 x109 = (x107 + x61);
+ { u64 x110 = (x109 >> 0x1a);
+ { u32 x111 = ((u32)x109 & 0x3ffffff);
+ { u64 x112 = (x110 + x49);
+ { u64 x113 = (x112 >> 0x19);
+ { u32 x114 = ((u32)x112 & 0x1ffffff);
+ { u64 x115 = (x87 + (0x13 * x113));
+ { u32 x116 = (u32) (x115 >> 0x1a);
+ { u32 x117 = ((u32)x115 & 0x3ffffff);
+ { u32 x118 = (x116 + x90);
+ { u32 x119 = (x118 >> 0x19);
+ { u32 x120 = (x118 & 0x1ffffff);
+ out[0] = x117;
+ out[1] = x120;
+ out[2] = (x119 + x93);
+ out[3] = x96;
+ out[4] = x99;
+ out[5] = x102;
+ out[6] = x105;
+ out[7] = x108;
+ out[8] = x111;
+ out[9] = x114;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_mul121666(fe *h, const fe_loose *f)
+{
+ fe_mul_121666_impl(h->v, f->v);
+}
+
+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE])
+{
+ fe x1, x2, z2, x3, z3;
+ fe_loose x2l, z2l, x3l;
+ unsigned swap = 0;
+ int pos;
+ u8 e[32];
+
+ memcpy(e, scalar, 32);
+ curve25519_clamp_secret(e);
+
+ /* The following implementation was transcribed to Coq and proven to
+ * correspond to unary scalar multiplication in affine coordinates given
+ * that x1 != 0 is the x coordinate of some point on the curve. It was
+ * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives
+ * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was
+ * quantified over the underlying field, so it applies to Curve25519
+ * itself and the quadratic twist of Curve25519. It was not proven in
+ * Coq that prime-field arithmetic correctly simulates extension-field
+ * arithmetic on prime-field values. The decoding of the byte array
+ * representation of e was not considered.
+ *
+ * Specification of Montgomery curves in affine coordinates:
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27>
+ *
+ * Proof that these form a group that is isomorphic to a Weierstrass
+ * curve:
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35>
+ *
+ * Coq transcription and correctness proof of the loop
+ * (where scalarbits=255):
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118>
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278>
+ * preconditions: 0 <= e < 2^255 (not necessarily e < order),
+ * fe_invert(0) = 0
+ */
+ fe_frombytes(&x1, point);
+ fe_1(&x2);
+ fe_0(&z2);
+ fe_copy(&x3, &x1);
+ fe_1(&z3);
+
+ for (pos = 254; pos >= 0; --pos) {
+ fe tmp0, tmp1;
+ fe_loose tmp0l, tmp1l;
+ /* loop invariant as of right before the test, for the case
+ * where x1 != 0:
+ * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3
+ * is nonzero
+ * let r := e >> (pos+1) in the following equalities of
+ * projective points:
+ * to_xz (r*P) === if swap then (x3, z3) else (x2, z2)
+ * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3)
+ * x1 is the nonzero x coordinate of the nonzero
+ * point (r*P-(r+1)*P)
+ */
+ unsigned b = 1 & (e[pos / 8] >> (pos & 7));
+ swap ^= b;
+ fe_cswap(&x2, &x3, swap);
+ fe_cswap(&z2, &z3, swap);
+ swap = b;
+ /* Coq transcription of ladderstep formula (called from
+ * transcribed loop):
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89>
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131>
+ * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217>
+ * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147>
+ */
+ fe_sub(&tmp0l, &x3, &z3);
+ fe_sub(&tmp1l, &x2, &z2);
+ fe_add(&x2l, &x2, &z2);
+ fe_add(&z2l, &x3, &z3);
+ fe_mul_tll(&z3, &tmp0l, &x2l);
+ fe_mul_tll(&z2, &z2l, &tmp1l);
+ fe_sq_tl(&tmp0, &tmp1l);
+ fe_sq_tl(&tmp1, &x2l);
+ fe_add(&x3l, &z3, &z2);
+ fe_sub(&z2l, &z3, &z2);
+ fe_mul_ttt(&x2, &tmp1, &tmp0);
+ fe_sub(&tmp1l, &tmp1, &tmp0);
+ fe_sq_tl(&z2, &z2l);
+ fe_mul121666(&z3, &tmp1l);
+ fe_sq_tl(&x3, &x3l);
+ fe_add(&tmp0l, &tmp0, &z3);
+ fe_mul_ttt(&z3, &x1, &z2);
+ fe_mul_tll(&z2, &tmp1l, &tmp0l);
+ }
+ /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3)
+ * else (x2, z2)
+ */
+ fe_cswap(&x2, &x3, swap);
+ fe_cswap(&z2, &z3, swap);
+
+ fe_invert(&z2, &z2);
+ fe_mul_ttt(&x2, &x2, &z2);
+ fe_tobytes(out, &x2);
+
+ memzero_explicit(&x1, sizeof(x1));
+ memzero_explicit(&x2, sizeof(x2));
+ memzero_explicit(&z2, sizeof(z2));
+ memzero_explicit(&x3, sizeof(x3));
+ memzero_explicit(&z3, sizeof(z3));
+ memzero_explicit(&x2l, sizeof(x2l));
+ memzero_explicit(&z2l, sizeof(z2l));
+ memzero_explicit(&x3l, sizeof(x3l));
+ memzero_explicit(&e, sizeof(e));
+}
diff --git a/lib/crypto/curve25519-hacl64.c b/lib/crypto/curve25519-hacl64.c
new file mode 100644
index 000000000000..771d82dc5f14
--- /dev/null
+++ b/lib/crypto/curve25519-hacl64.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2016-2017 INRIA and Microsoft Corporation.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is a machine-generated formally verified implementation of Curve25519
+ * ECDH from: <https://github.com/mitls/hacl-star>. Though originally machine
+ * generated, it has been tweaked to be suitable for use in the kernel. It is
+ * optimized for 64-bit machines that can efficiently work with 128-bit
+ * integer types.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/curve25519.h>
+#include <linux/string.h>
+
+typedef __uint128_t u128;
+
+static __always_inline u64 u64_eq_mask(u64 a, u64 b)
+{
+ u64 x = a ^ b;
+ u64 minus_x = ~x + (u64)1U;
+ u64 x_or_minus_x = x | minus_x;
+ u64 xnx = x_or_minus_x >> (u32)63U;
+ u64 c = xnx - (u64)1U;
+ return c;
+}
+
+static __always_inline u64 u64_gte_mask(u64 a, u64 b)
+{
+ u64 x = a;
+ u64 y = b;
+ u64 x_xor_y = x ^ y;
+ u64 x_sub_y = x - y;
+ u64 x_sub_y_xor_y = x_sub_y ^ y;
+ u64 q = x_xor_y | x_sub_y_xor_y;
+ u64 x_xor_q = x ^ q;
+ u64 x_xor_q_ = x_xor_q >> (u32)63U;
+ u64 c = x_xor_q_ - (u64)1U;
+ return c;
+}
+
+static __always_inline void modulo_carry_top(u64 *b)
+{
+ u64 b4 = b[4];
+ u64 b0 = b[0];
+ u64 b4_ = b4 & 0x7ffffffffffffLLU;
+ u64 b0_ = b0 + 19 * (b4 >> 51);
+ b[4] = b4_;
+ b[0] = b0_;
+}
+
+static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input)
+{
+ {
+ u128 xi = input[0];
+ output[0] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[1];
+ output[1] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[2];
+ output[2] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[3];
+ output[3] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[4];
+ output[4] = ((u64)(xi));
+ }
+}
+
+static __always_inline void
+fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s)
+{
+ output[0] += (u128)input[0] * s;
+ output[1] += (u128)input[1] * s;
+ output[2] += (u128)input[2] * s;
+ output[3] += (u128)input[3] * s;
+ output[4] += (u128)input[4] * s;
+}
+
+static __always_inline void fproduct_carry_wide_(u128 *tmp)
+{
+ {
+ u32 ctr = 0;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+ {
+ u32 ctr = 1;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+
+ {
+ u32 ctr = 2;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+ {
+ u32 ctr = 3;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+}
+
+static __always_inline void fmul_shift_reduce(u64 *output)
+{
+ u64 tmp = output[4];
+ u64 b0;
+ {
+ u32 ctr = 5 - 0 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 1 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 2 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 3 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+ b0 = output[0];
+ output[0] = 19 * b0;
+}
+
+static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input,
+ u64 *input21)
+{
+ u32 i;
+ u64 input2i;
+ {
+ u64 input2i = input21[0];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[1];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[2];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[3];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ i = 4;
+ input2i = input21[i];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21)
+{
+ u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] };
+ {
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ u128 t[5] = { 0 };
+ fmul_mul_shift_reduce_(t, tmp, input21);
+ fproduct_carry_wide_(t);
+ b4 = t[4];
+ b0 = t[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ t[4] = b4_;
+ t[0] = b0_;
+ fproduct_copy_from_wide_(output, t);
+ i0 = output[0];
+ i1 = output[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ output[0] = i0_;
+ output[1] = i1_;
+ }
+}
+
+static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output)
+{
+ u64 r0 = output[0];
+ u64 r1 = output[1];
+ u64 r2 = output[2];
+ u64 r3 = output[3];
+ u64 r4 = output[4];
+ u64 d0 = r0 * 2;
+ u64 d1 = r1 * 2;
+ u64 d2 = r2 * 2 * 19;
+ u64 d419 = r4 * 19;
+ u64 d4 = d419 * 2;
+ u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) +
+ (((u128)(d2) * (r3))));
+ u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) +
+ (((u128)(r3 * 19) * (r3))));
+ u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) +
+ (((u128)(d4) * (r3))));
+ u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) +
+ (((u128)(r4) * (d419))));
+ u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) +
+ (((u128)(r2) * (r2))));
+ tmp[0] = s0;
+ tmp[1] = s1;
+ tmp[2] = s2;
+ tmp[3] = s3;
+ tmp[4] = s4;
+}
+
+static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output)
+{
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ fsquare_fsquare__(tmp, output);
+ fproduct_carry_wide_(tmp);
+ b4 = tmp[4];
+ b0 = tmp[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ fproduct_copy_from_wide_(output, tmp);
+ i0 = output[0];
+ i1 = output[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp,
+ u32 count1)
+{
+ u32 i;
+ fsquare_fsquare_(tmp, output);
+ for (i = 1; i < count1; ++i)
+ fsquare_fsquare_(tmp, output);
+}
+
+static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input,
+ u32 count1)
+{
+ u128 t[5];
+ memcpy(output, input, 5 * sizeof(*input));
+ fsquare_fsquare_times_(output, t, count1);
+}
+
+static __always_inline void fsquare_fsquare_times_inplace(u64 *output,
+ u32 count1)
+{
+ u128 t[5];
+ fsquare_fsquare_times_(output, t, count1);
+}
+
+static __always_inline void crecip_crecip(u64 *out, u64 *z)
+{
+ u64 buf[20] = { 0 };
+ u64 *a0 = buf;
+ u64 *t00 = buf + 5;
+ u64 *b0 = buf + 10;
+ u64 *t01;
+ u64 *b1;
+ u64 *c0;
+ u64 *a;
+ u64 *t0;
+ u64 *b;
+ u64 *c;
+ fsquare_fsquare_times(a0, z, 1);
+ fsquare_fsquare_times(t00, a0, 2);
+ fmul_fmul(b0, t00, z);
+ fmul_fmul(a0, b0, a0);
+ fsquare_fsquare_times(t00, a0, 1);
+ fmul_fmul(b0, t00, b0);
+ fsquare_fsquare_times(t00, b0, 5);
+ t01 = buf + 5;
+ b1 = buf + 10;
+ c0 = buf + 15;
+ fmul_fmul(b1, t01, b1);
+ fsquare_fsquare_times(t01, b1, 10);
+ fmul_fmul(c0, t01, b1);
+ fsquare_fsquare_times(t01, c0, 20);
+ fmul_fmul(t01, t01, c0);
+ fsquare_fsquare_times_inplace(t01, 10);
+ fmul_fmul(b1, t01, b1);
+ fsquare_fsquare_times(t01, b1, 50);
+ a = buf;
+ t0 = buf + 5;
+ b = buf + 10;
+ c = buf + 15;
+ fmul_fmul(c, t0, b);
+ fsquare_fsquare_times(t0, c, 100);
+ fmul_fmul(t0, t0, c);
+ fsquare_fsquare_times_inplace(t0, 50);
+ fmul_fmul(t0, t0, b);
+ fsquare_fsquare_times_inplace(t0, 5);
+ fmul_fmul(out, t0, a);
+}
+
+static __always_inline void fsum(u64 *a, u64 *b)
+{
+ a[0] += b[0];
+ a[1] += b[1];
+ a[2] += b[2];
+ a[3] += b[3];
+ a[4] += b[4];
+}
+
+static __always_inline void fdifference(u64 *a, u64 *b)
+{
+ u64 tmp[5] = { 0 };
+ u64 b0;
+ u64 b1;
+ u64 b2;
+ u64 b3;
+ u64 b4;
+ memcpy(tmp, b, 5 * sizeof(*b));
+ b0 = tmp[0];
+ b1 = tmp[1];
+ b2 = tmp[2];
+ b3 = tmp[3];
+ b4 = tmp[4];
+ tmp[0] = b0 + 0x3fffffffffff68LLU;
+ tmp[1] = b1 + 0x3ffffffffffff8LLU;
+ tmp[2] = b2 + 0x3ffffffffffff8LLU;
+ tmp[3] = b3 + 0x3ffffffffffff8LLU;
+ tmp[4] = b4 + 0x3ffffffffffff8LLU;
+ {
+ u64 xi = a[0];
+ u64 yi = tmp[0];
+ a[0] = yi - xi;
+ }
+ {
+ u64 xi = a[1];
+ u64 yi = tmp[1];
+ a[1] = yi - xi;
+ }
+ {
+ u64 xi = a[2];
+ u64 yi = tmp[2];
+ a[2] = yi - xi;
+ }
+ {
+ u64 xi = a[3];
+ u64 yi = tmp[3];
+ a[3] = yi - xi;
+ }
+ {
+ u64 xi = a[4];
+ u64 yi = tmp[4];
+ a[4] = yi - xi;
+ }
+}
+
+static __always_inline void fscalar(u64 *output, u64 *b, u64 s)
+{
+ u128 tmp[5];
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ {
+ u64 xi = b[0];
+ tmp[0] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[1];
+ tmp[1] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[2];
+ tmp[2] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[3];
+ tmp[3] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[4];
+ tmp[4] = ((u128)(xi) * (s));
+ }
+ fproduct_carry_wide_(tmp);
+ b4 = tmp[4];
+ b0 = tmp[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ fproduct_copy_from_wide_(output, tmp);
+}
+
+static __always_inline void fmul(u64 *output, u64 *a, u64 *b)
+{
+ fmul_fmul(output, a, b);
+}
+
+static __always_inline void crecip(u64 *output, u64 *input)
+{
+ crecip_crecip(output, input);
+}
+
+static __always_inline void point_swap_conditional_step(u64 *a, u64 *b,
+ u64 swap1, u32 ctr)
+{
+ u32 i = ctr - 1;
+ u64 ai = a[i];
+ u64 bi = b[i];
+ u64 x = swap1 & (ai ^ bi);
+ u64 ai1 = ai ^ x;
+ u64 bi1 = bi ^ x;
+ a[i] = ai1;
+ b[i] = bi1;
+}
+
+static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1)
+{
+ point_swap_conditional_step(a, b, swap1, 5);
+ point_swap_conditional_step(a, b, swap1, 4);
+ point_swap_conditional_step(a, b, swap1, 3);
+ point_swap_conditional_step(a, b, swap1, 2);
+ point_swap_conditional_step(a, b, swap1, 1);
+}
+
+static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap)
+{
+ u64 swap1 = 0 - iswap;
+ point_swap_conditional5(a, b, swap1);
+ point_swap_conditional5(a + 5, b + 5, swap1);
+}
+
+static __always_inline void point_copy(u64 *output, u64 *input)
+{
+ memcpy(output, input, 5 * sizeof(*input));
+ memcpy(output + 5, input + 5, 5 * sizeof(*input));
+}
+
+static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p,
+ u64 *pq, u64 *qmqp)
+{
+ u64 *qx = qmqp;
+ u64 *x2 = pp;
+ u64 *z2 = pp + 5;
+ u64 *x3 = ppq;
+ u64 *z3 = ppq + 5;
+ u64 *x = p;
+ u64 *z = p + 5;
+ u64 *xprime = pq;
+ u64 *zprime = pq + 5;
+ u64 buf[40] = { 0 };
+ u64 *origx = buf;
+ u64 *origxprime0 = buf + 5;
+ u64 *xxprime0;
+ u64 *zzprime0;
+ u64 *origxprime;
+ xxprime0 = buf + 25;
+ zzprime0 = buf + 30;
+ memcpy(origx, x, 5 * sizeof(*x));
+ fsum(x, z);
+ fdifference(z, origx);
+ memcpy(origxprime0, xprime, 5 * sizeof(*xprime));
+ fsum(xprime, zprime);
+ fdifference(zprime, origxprime0);
+ fmul(xxprime0, xprime, z);
+ fmul(zzprime0, x, zprime);
+ origxprime = buf + 5;
+ {
+ u64 *xx0;
+ u64 *zz0;
+ u64 *xxprime;
+ u64 *zzprime;
+ u64 *zzzprime;
+ xx0 = buf + 15;
+ zz0 = buf + 20;
+ xxprime = buf + 25;
+ zzprime = buf + 30;
+ zzzprime = buf + 35;
+ memcpy(origxprime, xxprime, 5 * sizeof(*xxprime));
+ fsum(xxprime, zzprime);
+ fdifference(zzprime, origxprime);
+ fsquare_fsquare_times(x3, xxprime, 1);
+ fsquare_fsquare_times(zzzprime, zzprime, 1);
+ fmul(z3, zzzprime, qx);
+ fsquare_fsquare_times(xx0, x, 1);
+ fsquare_fsquare_times(zz0, z, 1);
+ {
+ u64 *zzz;
+ u64 *xx;
+ u64 *zz;
+ u64 scalar;
+ zzz = buf + 10;
+ xx = buf + 15;
+ zz = buf + 20;
+ fmul(x2, xx, zz);
+ fdifference(zz, xx);
+ scalar = 121665;
+ fscalar(zzz, zz, scalar);
+ fsum(zzz, xx);
+ fmul(z2, zzz, zz);
+ }
+ }
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
+ u64 *q, u8 byt)
+{
+ u64 bit0 = (u64)(byt >> 7);
+ u64 bit;
+ point_swap_conditional(nq, nqpq, bit0);
+ addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+ bit = (u64)(byt >> 7);
+ point_swap_conditional(nq2, nqpq2, bit);
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2,
+ u64 *nqpq2, u64 *q, u8 byt)
+{
+ u8 byt1;
+ ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+ byt1 = byt << 1;
+ ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
+ u64 *q, u8 byt, u32 i)
+{
+ while (i--) {
+ ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2,
+ nqpq2, q, byt);
+ byt <<= 2;
+ }
+}
+
+static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq,
+ u64 *nqpq, u64 *nq2,
+ u64 *nqpq2, u64 *q,
+ u32 i)
+{
+ while (i--) {
+ u8 byte = n1[i];
+ ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q,
+ byte, 4);
+ }
+}
+
+static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
+{
+ u64 point_buf[40] = { 0 };
+ u64 *nq = point_buf;
+ u64 *nqpq = point_buf + 10;
+ u64 *nq2 = point_buf + 20;
+ u64 *nqpq2 = point_buf + 30;
+ point_copy(nqpq, q);
+ nq[0] = 1;
+ ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32);
+ point_copy(result, nq);
+}
+
+static __always_inline void format_fexpand(u64 *output, const u8 *input)
+{
+ const u8 *x00 = input + 6;
+ const u8 *x01 = input + 12;
+ const u8 *x02 = input + 19;
+ const u8 *x0 = input + 24;
+ u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4;
+ i0 = get_unaligned_le64(input);
+ i1 = get_unaligned_le64(x00);
+ i2 = get_unaligned_le64(x01);
+ i3 = get_unaligned_le64(x02);
+ i4 = get_unaligned_le64(x0);
+ output0 = i0 & 0x7ffffffffffffLLU;
+ output1 = i1 >> 3 & 0x7ffffffffffffLLU;
+ output2 = i2 >> 6 & 0x7ffffffffffffLLU;
+ output3 = i3 >> 1 & 0x7ffffffffffffLLU;
+ output4 = i4 >> 12 & 0x7ffffffffffffLLU;
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static __always_inline void format_fcontract_first_carry_pass(u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 t1_ = t1 + (t0 >> 51);
+ u64 t0_ = t0 & 0x7ffffffffffffLLU;
+ u64 t2_ = t2 + (t1_ >> 51);
+ u64 t1__ = t1_ & 0x7ffffffffffffLLU;
+ u64 t3_ = t3 + (t2_ >> 51);
+ u64 t2__ = t2_ & 0x7ffffffffffffLLU;
+ u64 t4_ = t4 + (t3_ >> 51);
+ u64 t3__ = t3_ & 0x7ffffffffffffLLU;
+ input[0] = t0_;
+ input[1] = t1__;
+ input[2] = t2__;
+ input[3] = t3__;
+ input[4] = t4_;
+}
+
+static __always_inline void format_fcontract_first_carry_full(u64 *input)
+{
+ format_fcontract_first_carry_pass(input);
+ modulo_carry_top(input);
+}
+
+static __always_inline void format_fcontract_second_carry_pass(u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 t1_ = t1 + (t0 >> 51);
+ u64 t0_ = t0 & 0x7ffffffffffffLLU;
+ u64 t2_ = t2 + (t1_ >> 51);
+ u64 t1__ = t1_ & 0x7ffffffffffffLLU;
+ u64 t3_ = t3 + (t2_ >> 51);
+ u64 t2__ = t2_ & 0x7ffffffffffffLLU;
+ u64 t4_ = t4 + (t3_ >> 51);
+ u64 t3__ = t3_ & 0x7ffffffffffffLLU;
+ input[0] = t0_;
+ input[1] = t1__;
+ input[2] = t2__;
+ input[3] = t3__;
+ input[4] = t4_;
+}
+
+static __always_inline void format_fcontract_second_carry_full(u64 *input)
+{
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ format_fcontract_second_carry_pass(input);
+ modulo_carry_top(input);
+ i0 = input[0];
+ i1 = input[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ input[0] = i0_;
+ input[1] = i1_;
+}
+
+static __always_inline void format_fcontract_trim(u64 *input)
+{
+ u64 a0 = input[0];
+ u64 a1 = input[1];
+ u64 a2 = input[2];
+ u64 a3 = input[3];
+ u64 a4 = input[4];
+ u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU);
+ u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU);
+ u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU);
+ u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU);
+ u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU);
+ u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
+ u64 a0_ = a0 - (0x7ffffffffffedLLU & mask);
+ u64 a1_ = a1 - (0x7ffffffffffffLLU & mask);
+ u64 a2_ = a2 - (0x7ffffffffffffLLU & mask);
+ u64 a3_ = a3 - (0x7ffffffffffffLLU & mask);
+ u64 a4_ = a4 - (0x7ffffffffffffLLU & mask);
+ input[0] = a0_;
+ input[1] = a1_;
+ input[2] = a2_;
+ input[3] = a3_;
+ input[4] = a4_;
+}
+
+static __always_inline void format_fcontract_store(u8 *output, u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 o0 = t1 << 51 | t0;
+ u64 o1 = t2 << 38 | t1 >> 13;
+ u64 o2 = t3 << 25 | t2 >> 26;
+ u64 o3 = t4 << 12 | t3 >> 39;
+ u8 *b0 = output;
+ u8 *b1 = output + 8;
+ u8 *b2 = output + 16;
+ u8 *b3 = output + 24;
+ put_unaligned_le64(o0, b0);
+ put_unaligned_le64(o1, b1);
+ put_unaligned_le64(o2, b2);
+ put_unaligned_le64(o3, b3);
+}
+
+static __always_inline void format_fcontract(u8 *output, u64 *input)
+{
+ format_fcontract_first_carry_full(input);
+ format_fcontract_second_carry_full(input);
+ format_fcontract_trim(input);
+ format_fcontract_store(output, input);
+}
+
+static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point)
+{
+ u64 *x = point;
+ u64 *z = point + 5;
+ u64 buf[10] __aligned(32) = { 0 };
+ u64 *zmone = buf;
+ u64 *sc = buf + 5;
+ crecip(zmone, z);
+ fmul(sc, x, zmone);
+ format_fcontract(scalar, sc);
+}
+
+void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE])
+{
+ u64 buf0[10] __aligned(32) = { 0 };
+ u64 *x0 = buf0;
+ u64 *z = buf0 + 5;
+ u64 *q;
+ format_fexpand(x0, basepoint);
+ z[0] = 1;
+ q = buf0;
+ {
+ u8 e[32] __aligned(32) = { 0 };
+ u8 *scalar;
+ memcpy(e, secret, 32);
+ curve25519_clamp_secret(e);
+ scalar = e;
+ {
+ u64 buf[15] = { 0 };
+ u64 *nq = buf;
+ u64 *x = nq;
+ x[0] = 1;
+ ladder_cmult(nq, scalar, q);
+ format_scalar_of_point(mypublic, nq);
+ memzero_explicit(buf, sizeof(buf));
+ }
+ memzero_explicit(e, sizeof(e));
+ }
+ memzero_explicit(buf0, sizeof(buf0));
+}
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
new file mode 100644
index 000000000000..0106bebe6900
--- /dev/null
+++ b/lib/crypto/curve25519.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the Curve25519 ECDH algorithm, using either
+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers,
+ * depending on what is supported by the target compiler.
+ *
+ * Information: https://cr.yp.to/ecdh.html
+ */
+
+#include <crypto/curve25519.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
+const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
+
+EXPORT_SYMBOL(curve25519_null_point);
+EXPORT_SYMBOL(curve25519_base_point);
+EXPORT_SYMBOL(curve25519_generic);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Curve25519 scalar multiplication");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c
new file mode 100644
index 000000000000..dabc3accae05
--- /dev/null
+++ b/lib/crypto/libchacha.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The ChaCha stream cipher (RFC7539)
+ *
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <crypto/algapi.h> // for crypto_xor_cpy
+#include <crypto/chacha.h>
+
+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ /* aligned to potentially speed up crypto_xor() */
+ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
+
+ while (bytes >= CHACHA_BLOCK_SIZE) {
+ chacha_block_generic(state, stream, nrounds);
+ crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
+ bytes -= CHACHA_BLOCK_SIZE;
+ dst += CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
+ }
+ if (bytes) {
+ chacha_block_generic(state, stream, nrounds);
+ crypto_xor_cpy(dst, src, stream, bytes);
+ }
+}
+EXPORT_SYMBOL(chacha_crypt_generic);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
new file mode 100644
index 000000000000..32ec293c65ae
--- /dev/null
+++ b/lib/crypto/poly1305.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Poly1305 authenticator algorithm, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
+ */
+
+#include <crypto/internal/poly1305.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+static inline u64 mlt(u64 a, u64 b)
+{
+ return a * b;
+}
+
+static inline u32 sr(u64 v, u_char n)
+{
+ return v >> n;
+}
+
+static inline u32 and(u32 v, u32 mask)
+{
+ return v & mask;
+}
+
+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key)
+{
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff;
+ key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03;
+ key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff;
+ key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff;
+ key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff;
+}
+EXPORT_SYMBOL_GPL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ u32 r0, r1, r2, r3, r4;
+ u32 s1, s2, s3, s4;
+ u32 h0, h1, h2, h3, h4;
+ u64 d0, d1, d2, d3, d4;
+
+ if (!nblocks)
+ return;
+
+ r0 = key->r[0];
+ r1 = key->r[1];
+ r2 = key->r[2];
+ r3 = key->r[3];
+ r4 = key->r[4];
+
+ s1 = r1 * 5;
+ s2 = r2 * 5;
+ s3 = r3 * 5;
+ s4 = r4 * 5;
+
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ do {
+ /* h += m[i] */
+ h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
+ h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24);
+
+ /* h *= r */
+ d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
+ mlt(h3, s2) + mlt(h4, s1);
+ d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) +
+ mlt(h3, s3) + mlt(h4, s2);
+ d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) +
+ mlt(h3, s4) + mlt(h4, s3);
+ d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) +
+ mlt(h3, r0) + mlt(h4, s4);
+ d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) +
+ mlt(h3, r1) + mlt(h4, r0);
+
+ /* (partial) h %= p */
+ d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff);
+ d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff);
+ d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff);
+ d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff);
+ h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff);
+ h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
+
+ src += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h[0] = h0;
+ state->h[1] = h1;
+ state->h[2] = h2;
+ state->h[3] = h3;
+ state->h[4] = h4;
+}
+EXPORT_SYMBOL_GPL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, void *dst)
+{
+ u32 h0, h1, h2, h3, h4;
+ u32 g0, g1, g2, g3, g4;
+ u32 mask;
+
+ /* fully carry h */
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
+ h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
+ h4 += (h3 >> 26); h3 = h3 & 0x3ffffff;
+ h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff;
+ h1 += (h0 >> 26); h0 = h0 & 0x3ffffff;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff;
+ g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff;
+ g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff;
+ g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff;
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0);
+ put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4);
+ put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8);
+ put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12);
+}
+EXPORT_SYMBOL_GPL(poly1305_core_emit);
+
+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
+{
+ poly1305_core_setkey(desc->r, key);
+ desc->s[0] = get_unaligned_le32(key + 16);
+ desc->s[1] = get_unaligned_le32(key + 20);
+ desc->s[2] = get_unaligned_le32(key + 24);
+ desc->s[3] = get_unaligned_le32(key + 28);
+ poly1305_core_init(&desc->h);
+ desc->buflen = 0;
+ desc->sset = true;
+ desc->rset = 1;
+}
+EXPORT_SYMBOL_GPL(poly1305_init_generic);
+
+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes)
+{
+ unsigned int bytes;
+
+ if (unlikely(desc->buflen)) {
+ bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen);
+ memcpy(desc->buf + desc->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ desc->buflen += bytes;
+
+ if (desc->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 1);
+ desc->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ poly1305_core_blocks(&desc->h, desc->r, src,
+ nbytes / POLY1305_BLOCK_SIZE, 1);
+ src += nbytes - (nbytes % POLY1305_BLOCK_SIZE);
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ desc->buflen = nbytes;
+ memcpy(desc->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL_GPL(poly1305_update_generic);
+
+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst)
+{
+ __le32 digest[4];
+ u64 f = 0;
+
+ if (unlikely(desc->buflen)) {
+ desc->buf[desc->buflen++] = 1;
+ memset(desc->buf + desc->buflen, 0,
+ POLY1305_BLOCK_SIZE - desc->buflen);
+ poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 0);
+ }
+
+ poly1305_core_emit(&desc->h, digest);
+
+ /* mac = (h + s) % (2^128) */
+ f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0];
+ put_unaligned_le32(f, dst + 0);
+ f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1];
+ put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2];
+ put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3];
+ put_unaligned_le32(f, dst + 12);
+
+ *desc = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL_GPL(poly1305_final_generic);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
diff --git a/lib/errname.c b/lib/errname.c
new file mode 100644
index 000000000000..0c4d3e66170e
--- /dev/null
+++ b/lib/errname.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/errname.h>
+#include <linux/kernel.h>
+
+/*
+ * Ensure these tables do not accidentally become gigantic if some
+ * huge errno makes it in. On most architectures, the first table will
+ * only have about 140 entries, but mips and parisc have more sparsely
+ * allocated errnos (with EHWPOISON = 257 on parisc, and EDQUOT = 1133
+ * on mips), so this wastes a bit of space on those - though we
+ * special case the EDQUOT case.
+ */
+#define E(err) [err + BUILD_BUG_ON_ZERO(err <= 0 || err > 300)] = "-" #err
+static const char *names_0[] = {
+ E(E2BIG),
+ E(EACCES),
+ E(EADDRINUSE),
+ E(EADDRNOTAVAIL),
+ E(EADV),
+ E(EAFNOSUPPORT),
+ E(EALREADY),
+ E(EBADE),
+ E(EBADF),
+ E(EBADFD),
+ E(EBADMSG),
+ E(EBADR),
+ E(EBADRQC),
+ E(EBADSLT),
+ E(EBFONT),
+ E(EBUSY),
+#ifdef ECANCELLED
+ E(ECANCELLED),
+#endif
+ E(ECHILD),
+ E(ECHRNG),
+ E(ECOMM),
+ E(ECONNABORTED),
+ E(ECONNRESET),
+ E(EDEADLOCK),
+ E(EDESTADDRREQ),
+ E(EDOM),
+ E(EDOTDOT),
+#ifndef CONFIG_MIPS
+ E(EDQUOT),
+#endif
+ E(EEXIST),
+ E(EFAULT),
+ E(EFBIG),
+ E(EHOSTDOWN),
+ E(EHOSTUNREACH),
+ E(EHWPOISON),
+ E(EIDRM),
+ E(EILSEQ),
+#ifdef EINIT
+ E(EINIT),
+#endif
+ E(EINPROGRESS),
+ E(EINTR),
+ E(EINVAL),
+ E(EIO),
+ E(EISCONN),
+ E(EISDIR),
+ E(EISNAM),
+ E(EKEYEXPIRED),
+ E(EKEYREJECTED),
+ E(EKEYREVOKED),
+ E(EL2HLT),
+ E(EL2NSYNC),
+ E(EL3HLT),
+ E(EL3RST),
+ E(ELIBACC),
+ E(ELIBBAD),
+ E(ELIBEXEC),
+ E(ELIBMAX),
+ E(ELIBSCN),
+ E(ELNRNG),
+ E(ELOOP),
+ E(EMEDIUMTYPE),
+ E(EMFILE),
+ E(EMLINK),
+ E(EMSGSIZE),
+ E(EMULTIHOP),
+ E(ENAMETOOLONG),
+ E(ENAVAIL),
+ E(ENETDOWN),
+ E(ENETRESET),
+ E(ENETUNREACH),
+ E(ENFILE),
+ E(ENOANO),
+ E(ENOBUFS),
+ E(ENOCSI),
+ E(ENODATA),
+ E(ENODEV),
+ E(ENOENT),
+ E(ENOEXEC),
+ E(ENOKEY),
+ E(ENOLCK),
+ E(ENOLINK),
+ E(ENOMEDIUM),
+ E(ENOMEM),
+ E(ENOMSG),
+ E(ENONET),
+ E(ENOPKG),
+ E(ENOPROTOOPT),
+ E(ENOSPC),
+ E(ENOSR),
+ E(ENOSTR),
+#ifdef ENOSYM
+ E(ENOSYM),
+#endif
+ E(ENOSYS),
+ E(ENOTBLK),
+ E(ENOTCONN),
+ E(ENOTDIR),
+ E(ENOTEMPTY),
+ E(ENOTNAM),
+ E(ENOTRECOVERABLE),
+ E(ENOTSOCK),
+ E(ENOTTY),
+ E(ENOTUNIQ),
+ E(ENXIO),
+ E(EOPNOTSUPP),
+ E(EOVERFLOW),
+ E(EOWNERDEAD),
+ E(EPERM),
+ E(EPFNOSUPPORT),
+ E(EPIPE),
+#ifdef EPROCLIM
+ E(EPROCLIM),
+#endif
+ E(EPROTO),
+ E(EPROTONOSUPPORT),
+ E(EPROTOTYPE),
+ E(ERANGE),
+ E(EREMCHG),
+#ifdef EREMDEV
+ E(EREMDEV),
+#endif
+ E(EREMOTE),
+ E(EREMOTEIO),
+#ifdef EREMOTERELEASE
+ E(EREMOTERELEASE),
+#endif
+ E(ERESTART),
+ E(ERFKILL),
+ E(EROFS),
+#ifdef ERREMOTE
+ E(ERREMOTE),
+#endif
+ E(ESHUTDOWN),
+ E(ESOCKTNOSUPPORT),
+ E(ESPIPE),
+ E(ESRCH),
+ E(ESRMNT),
+ E(ESTALE),
+ E(ESTRPIPE),
+ E(ETIME),
+ E(ETIMEDOUT),
+ E(ETOOMANYREFS),
+ E(ETXTBSY),
+ E(EUCLEAN),
+ E(EUNATCH),
+ E(EUSERS),
+ E(EXDEV),
+ E(EXFULL),
+
+ E(ECANCELED), /* ECANCELLED */
+ E(EAGAIN), /* EWOULDBLOCK */
+ E(ECONNREFUSED), /* EREFUSED */
+ E(EDEADLK), /* EDEADLOCK */
+};
+#undef E
+
+#define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err
+static const char *names_512[] = {
+ E(ERESTARTSYS),
+ E(ERESTARTNOINTR),
+ E(ERESTARTNOHAND),
+ E(ENOIOCTLCMD),
+ E(ERESTART_RESTARTBLOCK),
+ E(EPROBE_DEFER),
+ E(EOPENSTALE),
+ E(ENOPARAM),
+
+ E(EBADHANDLE),
+ E(ENOTSYNC),
+ E(EBADCOOKIE),
+ E(ENOTSUPP),
+ E(ETOOSMALL),
+ E(ESERVERFAULT),
+ E(EBADTYPE),
+ E(EJUKEBOX),
+ E(EIOCBQUEUED),
+ E(ERECALLCONFLICT),
+};
+#undef E
+
+static const char *__errname(unsigned err)
+{
+ if (err < ARRAY_SIZE(names_0))
+ return names_0[err];
+ if (err >= 512 && err - 512 < ARRAY_SIZE(names_512))
+ return names_512[err - 512];
+ /* But why? */
+ if (IS_ENABLED(CONFIG_MIPS) && err == EDQUOT) /* 1133 */
+ return "-EDQUOT";
+ return NULL;
+}
+
+/*
+ * errname(EIO) -> "EIO"
+ * errname(-EIO) -> "-EIO"
+ */
+const char *errname(int err)
+{
+ const char *name = __errname(abs(err));
+ if (!name)
+ return NULL;
+
+ return err > 0 ? name + 1 : name;
+}
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
new file mode 100644
index 000000000000..af37016bfdd4
--- /dev/null
+++ b/lib/kunit/Kconfig
@@ -0,0 +1,36 @@
+#
+# KUnit base configuration
+#
+
+menuconfig KUNIT
+ bool "KUnit - Enable support for unit tests"
+ help
+ Enables support for kernel unit tests (KUnit), a lightweight unit
+ testing and mocking framework for the Linux kernel. These tests are
+ able to be run locally on a developer's workstation without a VM or
+ special hardware when using UML. Can also be used on most other
+ architectures. For more information, please see
+ Documentation/dev-tools/kunit/.
+
+if KUNIT
+
+config KUNIT_TEST
+ bool "KUnit test for KUnit"
+ help
+ Enables the unit tests for the KUnit test framework. These tests test
+ the KUnit test framework itself; the tests are both written using
+ KUnit and test KUnit. This option should only be enabled for testing
+ purposes by developers interested in testing that KUnit works as
+ expected.
+
+config KUNIT_EXAMPLE_TEST
+ bool "Example test for KUnit"
+ help
+ Enables an example unit test that illustrates some of the basic
+ features of KUnit. This test only exists to help new users understand
+ what KUnit is and how it is used. Please refer to the example test
+ itself, lib/kunit/example-test.c, for more information. This option
+ is intended for curious hackers who would like to understand how to
+ use KUnit for kernel development.
+
+endif # KUNIT
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
new file mode 100644
index 000000000000..769d9402b5d3
--- /dev/null
+++ b/lib/kunit/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_KUNIT) += test.o \
+ string-stream.o \
+ assert.o \
+ try-catch.o
+
+obj-$(CONFIG_KUNIT_TEST) += test-test.o \
+ string-stream-test.o
+
+obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += example-test.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
new file mode 100644
index 000000000000..86013d4cf891
--- /dev/null
+++ b/lib/kunit/assert.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Assertion and expectation serialization API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include <kunit/assert.h>
+
+void kunit_base_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ const char *expect_or_assert = NULL;
+
+ switch (assert->type) {
+ case KUNIT_EXPECTATION:
+ expect_or_assert = "EXPECTATION";
+ break;
+ case KUNIT_ASSERTION:
+ expect_or_assert = "ASSERTION";
+ break;
+ }
+
+ string_stream_add(stream, "%s FAILED at %s:%d\n",
+ expect_or_assert, assert->file, assert->line);
+}
+
+void kunit_assert_print_msg(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ if (assert->message.fmt)
+ string_stream_add(stream, "\n%pV", &assert->message);
+}
+
+void kunit_fail_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ kunit_base_assert_format(assert, stream);
+ string_stream_add(stream, "%pV", &assert->message);
+}
+
+void kunit_unary_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ struct kunit_unary_assert *unary_assert = container_of(
+ assert, struct kunit_unary_assert, assert);
+
+ kunit_base_assert_format(assert, stream);
+ if (unary_assert->expected_true)
+ string_stream_add(stream,
+ "\tExpected %s to be true, but is false\n",
+ unary_assert->condition);
+ else
+ string_stream_add(stream,
+ "\tExpected %s to be false, but is true\n",
+ unary_assert->condition);
+ kunit_assert_print_msg(assert, stream);
+}
+
+void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ struct kunit_ptr_not_err_assert *ptr_assert = container_of(
+ assert, struct kunit_ptr_not_err_assert, assert);
+
+ kunit_base_assert_format(assert, stream);
+ if (!ptr_assert->value) {
+ string_stream_add(stream,
+ "\tExpected %s is not null, but is\n",
+ ptr_assert->text);
+ } else if (IS_ERR(ptr_assert->value)) {
+ string_stream_add(stream,
+ "\tExpected %s is not error, but is: %ld\n",
+ ptr_assert->text,
+ PTR_ERR(ptr_assert->value));
+ }
+ kunit_assert_print_msg(assert, stream);
+}
+
+void kunit_binary_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ struct kunit_binary_assert *binary_assert = container_of(
+ assert, struct kunit_binary_assert, assert);
+
+ kunit_base_assert_format(assert, stream);
+ string_stream_add(stream,
+ "\tExpected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, "\t\t%s == %lld\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, "\t\t%s == %lld",
+ binary_assert->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(assert, stream);
+}
+
+void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ struct kunit_binary_ptr_assert *binary_assert = container_of(
+ assert, struct kunit_binary_ptr_assert, assert);
+
+ kunit_base_assert_format(assert, stream);
+ string_stream_add(stream,
+ "\tExpected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, "\t\t%s == %pK\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, "\t\t%s == %pK",
+ binary_assert->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(assert, stream);
+}
+
+void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream)
+{
+ struct kunit_binary_str_assert *binary_assert = container_of(
+ assert, struct kunit_binary_str_assert, assert);
+
+ kunit_base_assert_format(assert, stream);
+ string_stream_add(stream,
+ "\tExpected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, "\t\t%s == %s\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, "\t\t%s == %s",
+ binary_assert->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(assert, stream);
+}
diff --git a/lib/kunit/example-test.c b/lib/kunit/example-test.c
new file mode 100644
index 000000000000..f64a829aa441
--- /dev/null
+++ b/lib/kunit/example-test.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Example KUnit test to show how to use KUnit.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+
+/*
+ * This is the most fundamental element of KUnit, the test case. A test case
+ * makes a set EXPECTATIONs and ASSERTIONs about the behavior of some code; if
+ * any expectations or assertions are not met, the test fails; otherwise, the
+ * test passes.
+ *
+ * In KUnit, a test case is just a function with the signature
+ * `void (*)(struct kunit *)`. `struct kunit` is a context object that stores
+ * information about the current test.
+ */
+static void example_simple_test(struct kunit *test)
+{
+ /*
+ * This is an EXPECTATION; it is how KUnit tests things. When you want
+ * to test a piece of code, you set some expectations about what the
+ * code should do. KUnit then runs the test and verifies that the code's
+ * behavior matched what was expected.
+ */
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
+ * This is run once before each test case, see the comment on
+ * example_test_suite for more information.
+ */
+static int example_test_init(struct kunit *test)
+{
+ kunit_info(test, "initializing\n");
+
+ return 0;
+}
+
+/*
+ * Here we make a list of all the test cases we want to add to the test suite
+ * below.
+ */
+static struct kunit_case example_test_cases[] = {
+ /*
+ * This is a helper to create a test case object from a test case
+ * function; its exact function is not important to understand how to
+ * use KUnit, just know that this is how you associate test cases with a
+ * test suite.
+ */
+ KUNIT_CASE(example_simple_test),
+ {}
+};
+
+/*
+ * This defines a suite or grouping of tests.
+ *
+ * Test cases are defined as belonging to the suite by adding them to
+ * `kunit_cases`.
+ *
+ * Often it is desirable to run some function which will set up things which
+ * will be used by every test; this is accomplished with an `init` function
+ * which runs before each test case is invoked. Similarly, an `exit` function
+ * may be specified which runs after every test case and can be used to for
+ * cleanup. For clarity, running tests in a test suite would behave as follows:
+ *
+ * suite.init(test);
+ * suite.test_case[0](test);
+ * suite.exit(test);
+ * suite.init(test);
+ * suite.test_case[1](test);
+ * suite.exit(test);
+ * ...;
+ */
+static struct kunit_suite example_test_suite = {
+ .name = "example",
+ .init = example_test_init,
+ .test_cases = example_test_cases,
+};
+
+/*
+ * This registers the above test suite telling KUnit that this is a suite of
+ * tests that need to be run.
+ */
+kunit_test_suite(example_test_suite);
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
new file mode 100644
index 000000000000..76cc05eb00ed
--- /dev/null
+++ b/lib/kunit/string-stream-test.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for struct string_stream.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/string-stream.h>
+#include <kunit/test.h>
+#include <linux/slab.h>
+
+static void string_stream_test_empty_on_creation(struct kunit *test)
+{
+ struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+}
+
+static void string_stream_test_not_empty_after_add(struct kunit *test)
+{
+ struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+
+ string_stream_add(stream, "Foo");
+
+ KUNIT_EXPECT_FALSE(test, string_stream_is_empty(stream));
+}
+
+static void string_stream_test_get_string(struct kunit *test)
+{
+ struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+ char *output;
+
+ string_stream_add(stream, "Foo");
+ string_stream_add(stream, " %s", "bar");
+
+ output = string_stream_get_string(stream);
+ KUNIT_ASSERT_STREQ(test, output, "Foo bar");
+}
+
+static struct kunit_case string_stream_test_cases[] = {
+ KUNIT_CASE(string_stream_test_empty_on_creation),
+ KUNIT_CASE(string_stream_test_not_empty_after_add),
+ KUNIT_CASE(string_stream_test_get_string),
+ {}
+};
+
+static struct kunit_suite string_stream_test_suite = {
+ .name = "string-stream-test",
+ .test_cases = string_stream_test_cases
+};
+kunit_test_suite(string_stream_test_suite);
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
new file mode 100644
index 000000000000..e6d17aacca30
--- /dev/null
+++ b/lib/kunit/string-stream.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * C++ stream style string builder used in KUnit for building messages.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/string-stream.h>
+#include <kunit/test.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+struct string_stream_fragment_alloc_context {
+ struct kunit *test;
+ int len;
+ gfp_t gfp;
+};
+
+static int string_stream_fragment_init(struct kunit_resource *res,
+ void *context)
+{
+ struct string_stream_fragment_alloc_context *ctx = context;
+ struct string_stream_fragment *frag;
+
+ frag = kunit_kzalloc(ctx->test, sizeof(*frag), ctx->gfp);
+ if (!frag)
+ return -ENOMEM;
+
+ frag->test = ctx->test;
+ frag->fragment = kunit_kmalloc(ctx->test, ctx->len, ctx->gfp);
+ if (!frag->fragment)
+ return -ENOMEM;
+
+ res->allocation = frag;
+
+ return 0;
+}
+
+static void string_stream_fragment_free(struct kunit_resource *res)
+{
+ struct string_stream_fragment *frag = res->allocation;
+
+ list_del(&frag->node);
+ kunit_kfree(frag->test, frag->fragment);
+ kunit_kfree(frag->test, frag);
+}
+
+static struct string_stream_fragment *alloc_string_stream_fragment(
+ struct kunit *test, int len, gfp_t gfp)
+{
+ struct string_stream_fragment_alloc_context context = {
+ .test = test,
+ .len = len,
+ .gfp = gfp
+ };
+
+ return kunit_alloc_resource(test,
+ string_stream_fragment_init,
+ string_stream_fragment_free,
+ gfp,
+ &context);
+}
+
+static int string_stream_fragment_destroy(struct string_stream_fragment *frag)
+{
+ return kunit_resource_destroy(frag->test,
+ kunit_resource_instance_match,
+ string_stream_fragment_free,
+ frag);
+}
+
+int string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args)
+{
+ struct string_stream_fragment *frag_container;
+ int len;
+ va_list args_for_counting;
+
+ /* Make a copy because `vsnprintf` could change it */
+ va_copy(args_for_counting, args);
+
+ /* Need space for null byte. */
+ len = vsnprintf(NULL, 0, fmt, args_for_counting) + 1;
+
+ va_end(args_for_counting);
+
+ frag_container = alloc_string_stream_fragment(stream->test,
+ len,
+ stream->gfp);
+ if (!frag_container)
+ return -ENOMEM;
+
+ len = vsnprintf(frag_container->fragment, len, fmt, args);
+ spin_lock(&stream->lock);
+ stream->length += len;
+ list_add_tail(&frag_container->node, &stream->fragments);
+ spin_unlock(&stream->lock);
+
+ return 0;
+}
+
+int string_stream_add(struct string_stream *stream, const char *fmt, ...)
+{
+ va_list args;
+ int result;
+
+ va_start(args, fmt);
+ result = string_stream_vadd(stream, fmt, args);
+ va_end(args);
+
+ return result;
+}
+
+static void string_stream_clear(struct string_stream *stream)
+{
+ struct string_stream_fragment *frag_container, *frag_container_safe;
+
+ spin_lock(&stream->lock);
+ list_for_each_entry_safe(frag_container,
+ frag_container_safe,
+ &stream->fragments,
+ node) {
+ string_stream_fragment_destroy(frag_container);
+ }
+ stream->length = 0;
+ spin_unlock(&stream->lock);
+}
+
+char *string_stream_get_string(struct string_stream *stream)
+{
+ struct string_stream_fragment *frag_container;
+ size_t buf_len = stream->length + 1; /* +1 for null byte. */
+ char *buf;
+
+ buf = kunit_kzalloc(stream->test, buf_len, stream->gfp);
+ if (!buf)
+ return NULL;
+
+ spin_lock(&stream->lock);
+ list_for_each_entry(frag_container, &stream->fragments, node)
+ strlcat(buf, frag_container->fragment, buf_len);
+ spin_unlock(&stream->lock);
+
+ return buf;
+}
+
+int string_stream_append(struct string_stream *stream,
+ struct string_stream *other)
+{
+ const char *other_content;
+
+ other_content = string_stream_get_string(other);
+
+ if (!other_content)
+ return -ENOMEM;
+
+ return string_stream_add(stream, other_content);
+}
+
+bool string_stream_is_empty(struct string_stream *stream)
+{
+ return list_empty(&stream->fragments);
+}
+
+struct string_stream_alloc_context {
+ struct kunit *test;
+ gfp_t gfp;
+};
+
+static int string_stream_init(struct kunit_resource *res, void *context)
+{
+ struct string_stream *stream;
+ struct string_stream_alloc_context *ctx = context;
+
+ stream = kunit_kzalloc(ctx->test, sizeof(*stream), ctx->gfp);
+ if (!stream)
+ return -ENOMEM;
+
+ res->allocation = stream;
+ stream->gfp = ctx->gfp;
+ stream->test = ctx->test;
+ INIT_LIST_HEAD(&stream->fragments);
+ spin_lock_init(&stream->lock);
+
+ return 0;
+}
+
+static void string_stream_free(struct kunit_resource *res)
+{
+ struct string_stream *stream = res->allocation;
+
+ string_stream_clear(stream);
+}
+
+struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
+{
+ struct string_stream_alloc_context context = {
+ .test = test,
+ .gfp = gfp
+ };
+
+ return kunit_alloc_resource(test,
+ string_stream_init,
+ string_stream_free,
+ gfp,
+ &context);
+}
+
+int string_stream_destroy(struct string_stream *stream)
+{
+ return kunit_resource_destroy(stream->test,
+ kunit_resource_instance_match,
+ string_stream_free,
+ stream);
+}
diff --git a/lib/kunit/test-test.c b/lib/kunit/test-test.c
new file mode 100644
index 000000000000..5ebe059d16e2
--- /dev/null
+++ b/lib/kunit/test-test.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for core test infrastructure.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include <kunit/test.h>
+
+struct kunit_try_catch_test_context {
+ struct kunit_try_catch *try_catch;
+ bool function_called;
+};
+
+static void kunit_test_successful_try(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+
+ ctx->function_called = true;
+}
+
+static void kunit_test_no_catch(void *data)
+{
+ struct kunit *test = data;
+
+ KUNIT_FAIL(test, "Catch should not be called\n");
+}
+
+static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_successful_try,
+ kunit_test_no_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static void kunit_test_unsuccessful_try(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_throw(try_catch);
+ KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_catch(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+
+ ctx->function_called = true;
+}
+
+static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_unsuccessful_try,
+ kunit_test_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static int kunit_try_catch_test_init(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ ctx->try_catch = kunit_kmalloc(test,
+ sizeof(*ctx->try_catch),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->try_catch);
+
+ return 0;
+}
+
+static struct kunit_case kunit_try_catch_test_cases[] = {
+ KUNIT_CASE(kunit_test_try_catch_successful_try_no_catch),
+ KUNIT_CASE(kunit_test_try_catch_unsuccessful_try_does_catch),
+ {}
+};
+
+static struct kunit_suite kunit_try_catch_test_suite = {
+ .name = "kunit-try-catch-test",
+ .init = kunit_try_catch_test_init,
+ .test_cases = kunit_try_catch_test_cases,
+};
+kunit_test_suite(kunit_try_catch_test_suite);
+
+/*
+ * Context for testing test managed resources
+ * is_resource_initialized is used to test arbitrary resources
+ */
+struct kunit_test_resource_context {
+ struct kunit test;
+ bool is_resource_initialized;
+ int allocate_order[2];
+ int free_order[2];
+};
+
+static int fake_resource_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ res->allocation = &ctx->is_resource_initialized;
+ ctx->is_resource_initialized = true;
+ return 0;
+}
+
+static void fake_resource_free(struct kunit_resource *res)
+{
+ bool *is_resource_initialized = res->allocation;
+
+ *is_resource_initialized = false;
+}
+
+static void kunit_resource_test_init_resources(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_init_test(&ctx->test, "testing_test_init_test");
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_alloc_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
+ kunit_resource_free_t free = fake_resource_free;
+
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
+ KUNIT_EXPECT_PTR_EQ(test,
+ &ctx->is_resource_initialized,
+ (bool *) res->allocation);
+ KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
+ KUNIT_EXPECT_PTR_EQ(test, free, res->free);
+}
+
+static void kunit_resource_test_destroy_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_ASSERT_FALSE(test,
+ kunit_resource_destroy(&ctx->test,
+ kunit_resource_instance_match,
+ res->free,
+ res->allocation));
+
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_cleanup_resources(struct kunit *test)
+{
+ int i;
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *resources[5];
+
+ for (i = 0; i < ARRAY_SIZE(resources); i++) {
+ resources[i] = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+ }
+
+ kunit_cleanup(&ctx->test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_mark_order(int order_array[],
+ size_t order_size,
+ int key)
+{
+ int i;
+
+ for (i = 0; i < order_size && order_array[i]; i++)
+ ;
+
+ order_array[i] = key;
+}
+
+#define KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, order_field, key) \
+ kunit_resource_test_mark_order(ctx->order_field, \
+ ARRAY_SIZE(ctx->order_field), \
+ key)
+
+static int fake_resource_2_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
+
+ res->allocation = ctx;
+
+ return 0;
+}
+
+static void fake_resource_2_free(struct kunit_resource *res)
+{
+ struct kunit_test_resource_context *ctx = res->allocation;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
+}
+
+static int fake_resource_1_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_2_init,
+ fake_resource_2_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
+
+ res->allocation = ctx;
+
+ return 0;
+}
+
+static void fake_resource_1_free(struct kunit_resource *res)
+{
+ struct kunit_test_resource_context *ctx = res->allocation;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
+}
+
+/*
+ * TODO(brendanhiggins@google.com): replace the arrays that keep track of the
+ * order of allocation and freeing with strict mocks using the IN_SEQUENCE macro
+ * to assert allocation and freeing order when the feature becomes available.
+ */
+static void kunit_resource_test_proper_free_ordering(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ /* fake_resource_1 allocates a fake_resource_2 in its init. */
+ kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_1_init,
+ fake_resource_1_free,
+ GFP_KERNEL,
+ ctx);
+
+ /*
+ * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
+ * before returning to fake_resource_1_init, it should be the first to
+ * put its key in the allocate_order array.
+ */
+ KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
+ KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
+
+ kunit_cleanup(&ctx->test);
+
+ /*
+ * Because fake_resource_2 finishes allocation before fake_resource_1,
+ * fake_resource_1 should be freed first since it could depend on
+ * fake_resource_2.
+ */
+ KUNIT_EXPECT_EQ(test, ctx->free_order[0], 1);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
+}
+
+static int kunit_resource_test_init(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx =
+ kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ test->priv = ctx;
+
+ kunit_init_test(&ctx->test, "test_test_context");
+
+ return 0;
+}
+
+static void kunit_resource_test_exit(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_cleanup(&ctx->test);
+ kfree(ctx);
+}
+
+static struct kunit_case kunit_resource_test_cases[] = {
+ KUNIT_CASE(kunit_resource_test_init_resources),
+ KUNIT_CASE(kunit_resource_test_alloc_resource),
+ KUNIT_CASE(kunit_resource_test_destroy_resource),
+ KUNIT_CASE(kunit_resource_test_cleanup_resources),
+ KUNIT_CASE(kunit_resource_test_proper_free_ordering),
+ {}
+};
+
+static struct kunit_suite kunit_resource_test_suite = {
+ .name = "kunit-resource-test",
+ .init = kunit_resource_test_init,
+ .exit = kunit_resource_test_exit,
+ .test_cases = kunit_resource_test_cases,
+};
+kunit_test_suite(kunit_resource_test_suite);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
new file mode 100644
index 000000000000..c83c0fa59cbd
--- /dev/null
+++ b/lib/kunit/test.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/try-catch.h>
+#include <linux/kernel.h>
+#include <linux/sched/debug.h>
+
+static void kunit_set_failure(struct kunit *test)
+{
+ WRITE_ONCE(test->success, false);
+}
+
+static void kunit_print_tap_version(void)
+{
+ static bool kunit_has_printed_tap_version;
+
+ if (!kunit_has_printed_tap_version) {
+ pr_info("TAP version 14\n");
+ kunit_has_printed_tap_version = true;
+ }
+}
+
+static size_t kunit_test_cases_len(struct kunit_case *test_cases)
+{
+ struct kunit_case *test_case;
+ size_t len = 0;
+
+ for (test_case = test_cases; test_case->run_case; test_case++)
+ len++;
+
+ return len;
+}
+
+static void kunit_print_subtest_start(struct kunit_suite *suite)
+{
+ kunit_print_tap_version();
+ pr_info("\t# Subtest: %s\n", suite->name);
+ pr_info("\t1..%zd\n", kunit_test_cases_len(suite->test_cases));
+}
+
+static void kunit_print_ok_not_ok(bool should_indent,
+ bool is_ok,
+ size_t test_number,
+ const char *description)
+{
+ const char *indent, *ok_not_ok;
+
+ if (should_indent)
+ indent = "\t";
+ else
+ indent = "";
+
+ if (is_ok)
+ ok_not_ok = "ok";
+ else
+ ok_not_ok = "not ok";
+
+ pr_info("%s%s %zd - %s\n", indent, ok_not_ok, test_number, description);
+}
+
+static bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+{
+ const struct kunit_case *test_case;
+
+ for (test_case = suite->test_cases; test_case->run_case; test_case++)
+ if (!test_case->success)
+ return false;
+
+ return true;
+}
+
+static void kunit_print_subtest_end(struct kunit_suite *suite)
+{
+ static size_t kunit_suite_counter = 1;
+
+ kunit_print_ok_not_ok(false,
+ kunit_suite_has_succeeded(suite),
+ kunit_suite_counter++,
+ suite->name);
+}
+
+static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
+ size_t test_number)
+{
+ kunit_print_ok_not_ok(true,
+ test_case->success,
+ test_number,
+ test_case->name);
+}
+
+static void kunit_print_string_stream(struct kunit *test,
+ struct string_stream *stream)
+{
+ struct string_stream_fragment *fragment;
+ char *buf;
+
+ buf = string_stream_get_string(stream);
+ if (!buf) {
+ kunit_err(test,
+ "Could not allocate buffer, dumping stream:\n");
+ list_for_each_entry(fragment, &stream->fragments, node) {
+ kunit_err(test, "%s", fragment->fragment);
+ }
+ kunit_err(test, "\n");
+ } else {
+ kunit_err(test, "%s", buf);
+ kunit_kfree(test, buf);
+ }
+}
+
+static void kunit_fail(struct kunit *test, struct kunit_assert *assert)
+{
+ struct string_stream *stream;
+
+ kunit_set_failure(test);
+
+ stream = alloc_string_stream(test, GFP_KERNEL);
+ if (!stream) {
+ WARN(true,
+ "Could not allocate stream to print failed assertion in %s:%d\n",
+ assert->file,
+ assert->line);
+ return;
+ }
+
+ assert->format(assert, stream);
+
+ kunit_print_string_stream(test, stream);
+
+ WARN_ON(string_stream_destroy(stream));
+}
+
+static void __noreturn kunit_abort(struct kunit *test)
+{
+ kunit_try_catch_throw(&test->try_catch); /* Does not return. */
+
+ /*
+ * Throw could not abort from test.
+ *
+ * XXX: we should never reach this line! As kunit_try_catch_throw is
+ * marked __noreturn.
+ */
+ WARN_ONCE(true, "Throw could not abort from test!\n");
+}
+
+void kunit_do_assertion(struct kunit *test,
+ struct kunit_assert *assert,
+ bool pass,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ if (pass)
+ return;
+
+ va_start(args, fmt);
+
+ assert->message.fmt = fmt;
+ assert->message.va = &args;
+
+ kunit_fail(test, assert);
+
+ va_end(args);
+
+ if (assert->type == KUNIT_ASSERTION)
+ kunit_abort(test);
+}
+
+void kunit_init_test(struct kunit *test, const char *name)
+{
+ spin_lock_init(&test->lock);
+ INIT_LIST_HEAD(&test->resources);
+ test->name = name;
+ test->success = true;
+}
+
+/*
+ * Initializes and runs test case. Does not clean up or do post validations.
+ */
+static void kunit_run_case_internal(struct kunit *test,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ if (suite->init) {
+ int ret;
+
+ ret = suite->init(test);
+ if (ret) {
+ kunit_err(test, "failed to initialize: %d\n", ret);
+ kunit_set_failure(test);
+ return;
+ }
+ }
+
+ test_case->run_case(test);
+}
+
+static void kunit_case_internal_cleanup(struct kunit *test)
+{
+ kunit_cleanup(test);
+}
+
+/*
+ * Performs post validations and cleanup after a test case was run.
+ * XXX: Should ONLY BE CALLED AFTER kunit_run_case_internal!
+ */
+static void kunit_run_case_cleanup(struct kunit *test,
+ struct kunit_suite *suite)
+{
+ if (suite->exit)
+ suite->exit(test);
+
+ kunit_case_internal_cleanup(test);
+}
+
+struct kunit_try_catch_context {
+ struct kunit *test;
+ struct kunit_suite *suite;
+ struct kunit_case *test_case;
+};
+
+static void kunit_try_run_case(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ struct kunit_suite *suite = ctx->suite;
+ struct kunit_case *test_case = ctx->test_case;
+
+ /*
+ * kunit_run_case_internal may encounter a fatal error; if it does,
+ * abort will be called, this thread will exit, and finally the parent
+ * thread will resume control and handle any necessary clean up.
+ */
+ kunit_run_case_internal(test, suite, test_case);
+ /* This line may never be reached. */
+ kunit_run_case_cleanup(test, suite);
+}
+
+static void kunit_catch_run_case(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ struct kunit_suite *suite = ctx->suite;
+ int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
+
+ if (try_exit_code) {
+ kunit_set_failure(test);
+ /*
+ * Test case could not finish, we have no idea what state it is
+ * in, so don't do clean up.
+ */
+ if (try_exit_code == -ETIMEDOUT) {
+ kunit_err(test, "test case timed out\n");
+ /*
+ * Unknown internal error occurred preventing test case from
+ * running, so there is nothing to clean up.
+ */
+ } else {
+ kunit_err(test, "internal error occurred preventing test case from running: %d\n",
+ try_exit_code);
+ }
+ return;
+ }
+
+ /*
+ * Test case was run, but aborted. It is the test case's business as to
+ * whether it failed or not, we just need to clean up.
+ */
+ kunit_run_case_cleanup(test, suite);
+}
+
+/*
+ * Performs all logic to run a test case. It also catches most errors that
+ * occur in a test case and reports them as failures.
+ */
+static void kunit_run_case_catch_errors(struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ struct kunit_try_catch_context context;
+ struct kunit_try_catch *try_catch;
+ struct kunit test;
+
+ kunit_init_test(&test, test_case->name);
+ try_catch = &test.try_catch;
+
+ kunit_try_catch_init(try_catch,
+ &test,
+ kunit_try_run_case,
+ kunit_catch_run_case);
+ context.test = &test;
+ context.suite = suite;
+ context.test_case = test_case;
+ kunit_try_catch_run(try_catch, &context);
+
+ test_case->success = test.success;
+}
+
+int kunit_run_tests(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+ size_t test_case_count = 1;
+
+ kunit_print_subtest_start(suite);
+
+ for (test_case = suite->test_cases; test_case->run_case; test_case++) {
+ kunit_run_case_catch_errors(suite, test_case);
+ kunit_print_test_case_ok_not_ok(test_case, test_case_count++);
+ }
+
+ kunit_print_subtest_end(suite);
+
+ return 0;
+}
+
+struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ ret = init(res, context);
+ if (ret)
+ return NULL;
+
+ res->free = free;
+ spin_lock(&test->lock);
+ list_add_tail(&res->node, &test->resources);
+ spin_unlock(&test->lock);
+
+ return res;
+}
+
+static void kunit_resource_free(struct kunit *test, struct kunit_resource *res)
+{
+ res->free(res);
+ kfree(res);
+}
+
+static struct kunit_resource *kunit_resource_find(struct kunit *test,
+ kunit_resource_match_t match,
+ kunit_resource_free_t free,
+ void *match_data)
+{
+ struct kunit_resource *resource;
+
+ lockdep_assert_held(&test->lock);
+
+ list_for_each_entry_reverse(resource, &test->resources, node) {
+ if (resource->free != free)
+ continue;
+ if (match(test, resource->allocation, match_data))
+ return resource;
+ }
+
+ return NULL;
+}
+
+static struct kunit_resource *kunit_resource_remove(
+ struct kunit *test,
+ kunit_resource_match_t match,
+ kunit_resource_free_t free,
+ void *match_data)
+{
+ struct kunit_resource *resource;
+
+ spin_lock(&test->lock);
+ resource = kunit_resource_find(test, match, free, match_data);
+ if (resource)
+ list_del(&resource->node);
+ spin_unlock(&test->lock);
+
+ return resource;
+}
+
+int kunit_resource_destroy(struct kunit *test,
+ kunit_resource_match_t match,
+ kunit_resource_free_t free,
+ void *match_data)
+{
+ struct kunit_resource *resource;
+
+ resource = kunit_resource_remove(test, match, free, match_data);
+
+ if (!resource)
+ return -ENOENT;
+
+ kunit_resource_free(test, resource);
+ return 0;
+}
+
+struct kunit_kmalloc_params {
+ size_t size;
+ gfp_t gfp;
+};
+
+static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_kmalloc_params *params = context;
+
+ res->allocation = kmalloc(params->size, params->gfp);
+ if (!res->allocation)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void kunit_kmalloc_free(struct kunit_resource *res)
+{
+ kfree(res->allocation);
+}
+
+void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+{
+ struct kunit_kmalloc_params params = {
+ .size = size,
+ .gfp = gfp
+ };
+
+ return kunit_alloc_resource(test,
+ kunit_kmalloc_init,
+ kunit_kmalloc_free,
+ gfp,
+ &params);
+}
+
+void kunit_kfree(struct kunit *test, const void *ptr)
+{
+ int rc;
+
+ rc = kunit_resource_destroy(test,
+ kunit_resource_instance_match,
+ kunit_kmalloc_free,
+ (void *)ptr);
+
+ WARN_ON(rc);
+}
+
+void kunit_cleanup(struct kunit *test)
+{
+ struct kunit_resource *resource;
+
+ /*
+ * test->resources is a stack - each allocation must be freed in the
+ * reverse order from which it was added since one resource may depend
+ * on another for its entire lifetime.
+ * Also, we cannot use the normal list_for_each constructs, even the
+ * safe ones because *arbitrary* nodes may be deleted when
+ * kunit_resource_free is called; the list_for_each_safe variants only
+ * protect against the current node being deleted, not the next.
+ */
+ while (true) {
+ spin_lock(&test->lock);
+ if (list_empty(&test->resources)) {
+ spin_unlock(&test->lock);
+ break;
+ }
+ resource = list_last_entry(&test->resources,
+ struct kunit_resource,
+ node);
+ list_del(&resource->node);
+ spin_unlock(&test->lock);
+
+ kunit_resource_free(test, resource);
+ }
+}
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
new file mode 100644
index 000000000000..55686839eb61
--- /dev/null
+++ b/lib/kunit/try-catch.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An API to allow a function, that may fail, to be executed, and recover in a
+ * controlled manner.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/try-catch.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/sysctl.h>
+
+void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
+{
+ try_catch->try_result = -EFAULT;
+ complete_and_exit(try_catch->try_completion, -EFAULT);
+}
+
+static int kunit_generic_run_threadfn_adapter(void *data)
+{
+ struct kunit_try_catch *try_catch = data;
+
+ try_catch->try(try_catch->context);
+
+ complete_and_exit(try_catch->try_completion, 0);
+}
+
+static unsigned long kunit_test_timeout(void)
+{
+ unsigned long timeout_msecs;
+
+ /*
+ * TODO(brendanhiggins@google.com): We should probably have some type of
+ * variable timeout here. The only question is what that timeout value
+ * should be.
+ *
+ * The intention has always been, at some point, to be able to label
+ * tests with some type of size bucket (unit/small, integration/medium,
+ * large/system/end-to-end, etc), where each size bucket would get a
+ * default timeout value kind of like what Bazel does:
+ * https://docs.bazel.build/versions/master/be/common-definitions.html#test.size
+ * There is still some debate to be had on exactly how we do this. (For
+ * one, we probably want to have some sort of test runner level
+ * timeout.)
+ *
+ * For more background on this topic, see:
+ * https://mike-bland.com/2011/11/01/small-medium-large.html
+ */
+ if (sysctl_hung_task_timeout_secs) {
+ /*
+ * If sysctl_hung_task is active, just set the timeout to some
+ * value less than that.
+ *
+ * In regards to the above TODO, if we decide on variable
+ * timeouts, this logic will likely need to change.
+ */
+ timeout_msecs = (sysctl_hung_task_timeout_secs - 1) *
+ MSEC_PER_SEC;
+ } else {
+ timeout_msecs = 300 * MSEC_PER_SEC; /* 5 min */
+ }
+
+ return timeout_msecs;
+}
+
+void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+{
+ DECLARE_COMPLETION_ONSTACK(try_completion);
+ struct kunit *test = try_catch->test;
+ struct task_struct *task_struct;
+ int exit_code, time_remaining;
+
+ try_catch->context = context;
+ try_catch->try_completion = &try_completion;
+ try_catch->try_result = 0;
+ task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
+ try_catch,
+ "kunit_try_catch_thread");
+ if (IS_ERR(task_struct)) {
+ try_catch->catch(try_catch->context);
+ return;
+ }
+
+ time_remaining = wait_for_completion_timeout(&try_completion,
+ kunit_test_timeout());
+ if (time_remaining == 0) {
+ kunit_err(test, "try timed out\n");
+ try_catch->try_result = -ETIMEDOUT;
+ }
+
+ exit_code = try_catch->try_result;
+
+ if (!exit_code)
+ return;
+
+ if (exit_code == -EFAULT)
+ try_catch->try_result = 0;
+ else if (exit_code == -EINTR)
+ kunit_err(test, "wake_up_process() was never called\n");
+ else if (exit_code)
+ kunit_err(test, "Unknown error: %d\n", exit_code);
+
+ try_catch->catch(try_catch->context);
+}
+
+void kunit_try_catch_init(struct kunit_try_catch *try_catch,
+ struct kunit *test,
+ kunit_try_catch_func_t try,
+ kunit_try_catch_func_t catch)
+{
+ try_catch->test = test;
+ try_catch->try = try;
+ try_catch->catch = catch;
+}
diff --git a/lib/list-test.c b/lib/list-test.c
new file mode 100644
index 000000000000..363c600491c3
--- /dev/null
+++ b/lib/list-test.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Linked-list structures.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/list.h>
+
+struct list_test_struct {
+ int data;
+ struct list_head list;
+};
+
+static void list_test_list_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct list_head list1 = LIST_HEAD_INIT(list1);
+ struct list_head list2;
+ LIST_HEAD(list3);
+ struct list_head *list4;
+ struct list_head *list5;
+
+ INIT_LIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_LIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_LIST_HEAD(list5);
+
+ /* list_empty_careful() checks both next and prev. */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list3));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list4));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void list_test_list_add(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add(&a, &list);
+ list_add(&b, &list);
+
+ /* should be [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_add_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* should be [list] -> a -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a);
+ KUNIT_EXPECT_PTR_EQ(test, a.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &b);
+}
+
+static void list_test_list_del(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+}
+
+static void list_test_list_replace(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+}
+
+static void list_test_list_replace_init(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace_init(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+
+ /* check a_old is empty (initialized) */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
+}
+
+static void list_test_list_swap(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_swap(&a, &b);
+
+ /* after: [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, &b, list.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, list.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+ KUNIT_EXPECT_PTR_EQ(test, &list, b.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &list, a.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.prev);
+}
+
+static void list_test_list_del_init(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_move(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move(&a, &list2);
+ /* after: [list1] empty, [list2] -> a -> b */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.next);
+}
+
+static void list_test_list_move_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move_tail(&a, &list2);
+ /* after: [list1] empty, [list2] -> b -> a */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &b, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+}
+
+static void list_test_list_bulk_move_tail(struct kunit *test)
+{
+ struct list_head a, b, c, d, x, y;
+ struct list_head *list1_values[] = { &x, &b, &c, &y };
+ struct list_head *list2_values[] = { &a, &d };
+ struct list_head *ptr;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&x, &list1);
+ list_add_tail(&y, &list1);
+
+ list_add_tail(&a, &list2);
+ list_add_tail(&b, &list2);
+ list_add_tail(&c, &list2);
+ list_add_tail(&d, &list2);
+
+ /* before: [list1] -> x -> y, [list2] -> a -> b -> c -> d */
+ list_bulk_move_tail(&y, &b, &c);
+ /* after: [list1] -> x -> b -> c -> y, [list2] -> a -> d */
+
+ list_for_each(ptr, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list1_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+ i = 0;
+ list_for_each(ptr, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list2_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 2);
+}
+
+static void list_test_list_is_first(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_TRUE(test, list_is_first(&a, &list));
+ KUNIT_EXPECT_FALSE(test, list_is_first(&b, &list));
+}
+
+static void list_test_list_is_last(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_FALSE(test, list_is_last(&a, &list));
+ KUNIT_EXPECT_TRUE(test, list_is_last(&b, &list));
+}
+
+static void list_test_list_empty(struct kunit *test)
+{
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty(&list2));
+}
+
+static void list_test_list_empty_careful(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_rotate_left(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_rotate_left(&list);
+ /* after: [list] -> b -> a */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_rotate_to_front(struct kunit *test)
+{
+ struct list_head a, b, c, d;
+ struct list_head *list_values[] = { &c, &d, &a, &b };
+ struct list_head *ptr;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+ list_add_tail(&c, &list);
+ list_add_tail(&d, &list);
+
+ /* before: [list] -> a -> b -> c -> d */
+ list_rotate_to_front(&c, &list);
+ /* after: [list] -> c -> d -> a -> b */
+
+ list_for_each(ptr, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+}
+
+static void list_test_list_is_singular(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ /* [list] empty */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+
+ list_add_tail(&a, &list);
+
+ /* [list] -> a */
+ KUNIT_EXPECT_TRUE(test, list_is_singular(&list));
+
+ list_add_tail(&b, &list);
+
+ /* [list] -> a -> b */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+}
+
+static void list_test_list_cut_position(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_position(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 2);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+}
+
+static void list_test_list_cut_before(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_before(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0], [list1] -> entries[1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 1);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+}
+
+static void list_test_list_splice(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_tail(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_init(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_splice_tail_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail_init(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct, list_entry(&(test_struct.list),
+ struct list_test_struct, list));
+}
+
+static void list_test_list_first_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_first_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_last_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_last_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_first_entry_or_null(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ KUNIT_EXPECT_FALSE(test, list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1,
+ list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_next_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_next_entry(&test_struct1,
+ list));
+}
+
+static void list_test_list_prev_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_prev_entry(&test_struct2,
+ list));
+}
+
+static void list_test_list_for_each(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+static void list_test_list_for_each_prev(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void list_test_list_for_each_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 0;
+
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_prev_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_entry(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ static LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 0;
+
+ list_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_for_each_entry_reverse(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ static LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 4;
+
+ list_for_each_entry_reverse(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static struct kunit_case list_test_cases[] = {
+ KUNIT_CASE(list_test_list_init),
+ KUNIT_CASE(list_test_list_add),
+ KUNIT_CASE(list_test_list_add_tail),
+ KUNIT_CASE(list_test_list_del),
+ KUNIT_CASE(list_test_list_replace),
+ KUNIT_CASE(list_test_list_replace_init),
+ KUNIT_CASE(list_test_list_swap),
+ KUNIT_CASE(list_test_list_del_init),
+ KUNIT_CASE(list_test_list_move),
+ KUNIT_CASE(list_test_list_move_tail),
+ KUNIT_CASE(list_test_list_bulk_move_tail),
+ KUNIT_CASE(list_test_list_is_first),
+ KUNIT_CASE(list_test_list_is_last),
+ KUNIT_CASE(list_test_list_empty),
+ KUNIT_CASE(list_test_list_empty_careful),
+ KUNIT_CASE(list_test_list_rotate_left),
+ KUNIT_CASE(list_test_list_rotate_to_front),
+ KUNIT_CASE(list_test_list_is_singular),
+ KUNIT_CASE(list_test_list_cut_position),
+ KUNIT_CASE(list_test_list_cut_before),
+ KUNIT_CASE(list_test_list_splice),
+ KUNIT_CASE(list_test_list_splice_tail),
+ KUNIT_CASE(list_test_list_splice_init),
+ KUNIT_CASE(list_test_list_splice_tail_init),
+ KUNIT_CASE(list_test_list_entry),
+ KUNIT_CASE(list_test_list_first_entry),
+ KUNIT_CASE(list_test_list_last_entry),
+ KUNIT_CASE(list_test_list_first_entry_or_null),
+ KUNIT_CASE(list_test_list_next_entry),
+ KUNIT_CASE(list_test_list_prev_entry),
+ KUNIT_CASE(list_test_list_for_each),
+ KUNIT_CASE(list_test_list_for_each_prev),
+ KUNIT_CASE(list_test_list_for_each_safe),
+ KUNIT_CASE(list_test_list_for_each_prev_safe),
+ KUNIT_CASE(list_test_list_for_each_entry),
+ KUNIT_CASE(list_test_list_for_each_entry_reverse),
+ {},
+};
+
+static struct kunit_suite list_test_module = {
+ .name = "list-kunit-test",
+ .test_cases = list_test_cases,
+};
+
+kunit_test_suite(list_test_module);
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
index 26900ddaef82..295b94bff370 100644
--- a/lib/livepatch/Makefile
+++ b/lib/livepatch/Makefile
@@ -8,7 +8,10 @@ obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
test_klp_callbacks_busy.o \
test_klp_callbacks_mod.o \
test_klp_livepatch.o \
- test_klp_shadow_vars.o
+ test_klp_shadow_vars.o \
+ test_klp_state.o \
+ test_klp_state2.o \
+ test_klp_state3.o
# Target modules to be livepatched require CC_FLAGS_FTRACE
CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE)
diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c
new file mode 100644
index 000000000000..57a4253acb01
--- /dev/null
+++ b/lib/livepatch/test_klp_state.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/livepatch.h>
+
+#define CONSOLE_LOGLEVEL_STATE 1
+/* Version 1 does not support migration. */
+#define CONSOLE_LOGLEVEL_STATE_VERSION 1
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+static struct klp_patch patch;
+
+static int allocate_loglevel_state(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return -EINVAL;
+
+ loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
+ if (!loglevel_state->data)
+ return -ENOMEM;
+
+ pr_info("%s: allocating space to store console_loglevel\n",
+ __func__);
+ return 0;
+}
+
+static void fix_console_loglevel(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: fixing console_loglevel\n", __func__);
+ *(int *)loglevel_state->data = console_loglevel;
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+static void restore_console_loglevel(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: restoring console_loglevel\n", __func__);
+ console_loglevel = *(int *)loglevel_state->data;
+}
+
+static void free_loglevel_state(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: freeing space for the stored console_loglevel\n",
+ __func__);
+ kfree(loglevel_state->data);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return allocate_loglevel_state();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ fix_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ restore_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ free_loglevel_state();
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_state states[] = {
+ {
+ .id = CONSOLE_LOGLEVEL_STATE,
+ .version = CONSOLE_LOGLEVEL_STATE_VERSION,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ .states = states,
+ .replace = true,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
+MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c
new file mode 100644
index 000000000000..c978ea4d5e67
--- /dev/null
+++ b/lib/livepatch/test_klp_state2.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/livepatch.h>
+
+#define CONSOLE_LOGLEVEL_STATE 1
+/* Version 2 supports migration. */
+#define CONSOLE_LOGLEVEL_STATE_VERSION 2
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+static struct klp_patch patch;
+
+static int allocate_loglevel_state(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: space to store console_loglevel already allocated\n",
+ __func__);
+ return 0;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return -EINVAL;
+
+ loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
+ if (!loglevel_state->data)
+ return -ENOMEM;
+
+ pr_info("%s: allocating space to store console_loglevel\n",
+ __func__);
+ return 0;
+}
+
+static void fix_console_loglevel(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: taking over the console_loglevel change\n",
+ __func__);
+ loglevel_state->data = prev_loglevel_state->data;
+ return;
+ }
+
+ pr_info("%s: fixing console_loglevel\n", __func__);
+ *(int *)loglevel_state->data = console_loglevel;
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+static void restore_console_loglevel(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
+ __func__);
+ return;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: restoring console_loglevel\n", __func__);
+ console_loglevel = *(int *)loglevel_state->data;
+}
+
+static void free_loglevel_state(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: keeping space to store console_loglevel\n",
+ __func__);
+ return;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: freeing space for the stored console_loglevel\n",
+ __func__);
+ kfree(loglevel_state->data);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return allocate_loglevel_state();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ fix_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ restore_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ free_loglevel_state();
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_state states[] = {
+ {
+ .id = CONSOLE_LOGLEVEL_STATE,
+ .version = CONSOLE_LOGLEVEL_STATE_VERSION,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ .states = states,
+ .replace = true,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
+MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c
new file mode 100644
index 000000000000..9226579d10c5
--- /dev/null
+++ b/lib/livepatch/test_klp_state3.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+/* The console loglevel fix is the same in the next cumulative patch. */
+#include "test_klp_state2.c"
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a1705545e6ac..14f44f59e733 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -1475,7 +1475,7 @@ static void ww_test_edeadlk_normal(void)
mutex_lock(&o2.base);
o2.ctx = &t2;
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
WWAI(&t);
t2 = t;
@@ -1500,7 +1500,7 @@ static void ww_test_edeadlk_normal_slow(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1527,7 +1527,7 @@ static void ww_test_edeadlk_no_unlock(void)
mutex_lock(&o2.base);
o2.ctx = &t2;
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
WWAI(&t);
t2 = t;
@@ -1551,7 +1551,7 @@ static void ww_test_edeadlk_no_unlock_slow(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1576,7 +1576,7 @@ static void ww_test_edeadlk_acquire_more(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1597,7 +1597,7 @@ static void ww_test_edeadlk_acquire_more_slow(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1618,11 +1618,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
mutex_lock(&o3.base);
- mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2;
WWAI(&t);
@@ -1644,11 +1644,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
mutex_lock(&o3.base);
- mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2;
WWAI(&t);
@@ -1669,7 +1669,7 @@ static void ww_test_edeadlk_acquire_wrong(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
@@ -1694,7 +1694,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
int ret;
mutex_lock(&o2.base);
- mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2;
WWAI(&t);
diff --git a/lib/memregion.c b/lib/memregion.c
new file mode 100644
index 000000000000..77c85b5251da
--- /dev/null
+++ b/lib/memregion.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* identifiers for device / performance-differentiated memory regions */
+#include <linux/idr.h>
+#include <linux/types.h>
+
+static DEFINE_IDA(memregion_ids);
+
+int memregion_alloc(gfp_t gfp)
+{
+ return ida_alloc(&memregion_ids, gfp);
+}
+EXPORT_SYMBOL(memregion_alloc);
+
+void memregion_free(int id)
+{
+ ida_free(&memregion_ids, id);
+}
+EXPORT_SYMBOL(memregion_free);
diff --git a/lib/refcount.c b/lib/refcount.c
index 6e904af0fb3e..ebac8b7d15a7 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -1,41 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Variant of atomic_t specialized for reference counts.
- *
- * The interface matches the atomic_t interface (to aid in porting) but only
- * provides the few functions one should use for reference counting.
- *
- * It differs in that the counter saturates at UINT_MAX and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free issues.
- *
- * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
- * and provide only what is strictly required for refcounts.
- *
- * The increments are fully relaxed; these will not provide ordering. The
- * rationale is that whatever is used to obtain the object we're increasing the
- * reference count on will provide the ordering. For locked data structures,
- * its the lock acquire, for RCU/lockless data structures its the dependent
- * load.
- *
- * Do note that inc_not_zero() provides a control dependency which will order
- * future stores against the inc, this ensures we'll never modify the object
- * if we did not in fact acquire a reference.
- *
- * The decrements will provide release order, such that all the prior loads and
- * stores will be issued before, it also provides a control dependency, which
- * will order us against the subsequent free().
- *
- * The control dependency is against the load of the cmpxchg (ll/sc) that
- * succeeded. This means the stores aren't fully ordered, but this is fine
- * because the 1->0 transition indicates no concurrency.
- *
- * Note that the allocator is responsible for ordering things between free()
- * and alloc().
- *
- * The decrements dec_and_test() and sub_and_test() also provide acquire
- * ordering on success.
- *
+ * Out-of-line refcount functions.
*/
#include <linux/mutex.h>
@@ -43,199 +8,33 @@
#include <linux/spinlock.h>
#include <linux/bug.h>
-/**
- * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
- * @i: the value to add to the refcount
- * @r: the refcount
- *
- * Will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_inc(), or one of its variants, should instead be used to
- * increment a reference count.
- *
- * Return: false if the passed refcount is 0, true otherwise
- */
-bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
-{
- unsigned int new, val = atomic_read(&r->refs);
-
- do {
- if (!val)
- return false;
-
- if (unlikely(val == UINT_MAX))
- return true;
-
- new = val + i;
- if (new < val)
- new = UINT_MAX;
-
- } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
-
- WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
-
- return true;
-}
-EXPORT_SYMBOL(refcount_add_not_zero_checked);
-
-/**
- * refcount_add_checked - add a value to a refcount
- * @i: the value to add to the refcount
- * @r: the refcount
- *
- * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_inc(), or one of its variants, should instead be used to
- * increment a reference count.
- */
-void refcount_add_checked(unsigned int i, refcount_t *r)
-{
- WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
-}
-EXPORT_SYMBOL(refcount_add_checked);
-
-/**
- * refcount_inc_not_zero_checked - increment a refcount unless it is 0
- * @r: the refcount to increment
- *
- * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Return: true if the increment was successful, false otherwise
- */
-bool refcount_inc_not_zero_checked(refcount_t *r)
-{
- unsigned int new, val = atomic_read(&r->refs);
-
- do {
- new = val + 1;
-
- if (!val)
- return false;
-
- if (unlikely(!new))
- return true;
-
- } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
+#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
- WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
-
- return true;
-}
-EXPORT_SYMBOL(refcount_inc_not_zero_checked);
-
-/**
- * refcount_inc_checked - increment a refcount
- * @r: the refcount to increment
- *
- * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller already has a
- * reference on the object.
- *
- * Will WARN if the refcount is 0, as this represents a possible use-after-free
- * condition.
- */
-void refcount_inc_checked(refcount_t *r)
+void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
{
- WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
-}
-EXPORT_SYMBOL(refcount_inc_checked);
-
-/**
- * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
- * @i: amount to subtract from the refcount
- * @r: the refcount
- *
- * Similar to atomic_dec_and_test(), but it will WARN, return false and
- * ultimately leak on underflow and will fail to decrement when saturated
- * at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides an acquire ordering on success such that free()
- * must come after.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_dec(), or one of its variants, should instead be used to
- * decrement a reference count.
- *
- * Return: true if the resulting refcount is 0, false otherwise
- */
-bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
-{
- unsigned int new, val = atomic_read(&r->refs);
-
- do {
- if (unlikely(val == UINT_MAX))
- return false;
-
- new = val - i;
- if (new > val) {
- WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
- return false;
- }
-
- } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
-
- if (!new) {
- smp_acquire__after_ctrl_dep();
- return true;
+ refcount_set(r, REFCOUNT_SATURATED);
+
+ switch (t) {
+ case REFCOUNT_ADD_NOT_ZERO_OVF:
+ REFCOUNT_WARN("saturated; leaking memory");
+ break;
+ case REFCOUNT_ADD_OVF:
+ REFCOUNT_WARN("saturated; leaking memory");
+ break;
+ case REFCOUNT_ADD_UAF:
+ REFCOUNT_WARN("addition on 0; use-after-free");
+ break;
+ case REFCOUNT_SUB_UAF:
+ REFCOUNT_WARN("underflow; use-after-free");
+ break;
+ case REFCOUNT_DEC_LEAK:
+ REFCOUNT_WARN("decrement hit 0; leaking memory");
+ break;
+ default:
+ REFCOUNT_WARN("unknown saturation event!?");
}
- return false;
-
-}
-EXPORT_SYMBOL(refcount_sub_and_test_checked);
-
-/**
- * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
- * @r: the refcount
- *
- * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
- * decrement when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides an acquire ordering on success such that free()
- * must come after.
- *
- * Return: true if the resulting refcount is 0, false otherwise
- */
-bool refcount_dec_and_test_checked(refcount_t *r)
-{
- return refcount_sub_and_test_checked(1, r);
-}
-EXPORT_SYMBOL(refcount_dec_and_test_checked);
-
-/**
- * refcount_dec_checked - decrement a refcount
- * @r: the refcount
- *
- * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
- * when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before.
- */
-void refcount_dec_checked(refcount_t *r)
-{
- WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec_checked);
+EXPORT_SYMBOL(refcount_warn_saturate);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
@@ -277,7 +76,7 @@ bool refcount_dec_not_one(refcount_t *r)
unsigned int new, val = atomic_read(&r->refs);
do {
- if (unlikely(val == UINT_MAX))
+ if (unlikely(val == REFCOUNT_SATURATED))
return true;
if (val == 1)
@@ -302,7 +101,7 @@ EXPORT_SYMBOL(refcount_dec_not_one);
* @lock: the mutex to be locked
*
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
- * to decrement when saturated at UINT_MAX.
+ * to decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
@@ -333,7 +132,7 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
* @lock: the spinlock to be locked
*
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
- * decrement when saturated at UINT_MAX.
+ * decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 969e5400a615..33feec8989f1 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -236,23 +236,6 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
-bool sbitmap_any_bit_clear(const struct sbitmap *sb)
-{
- unsigned int i;
-
- for (i = 0; i < sb->map_nr; i++) {
- const struct sbitmap_word *word = &sb->map[i];
- unsigned long mask = word->word & ~word->cleared;
- unsigned long ret;
-
- ret = find_first_zero_bit(&mask, word->depth);
- if (ret < word->depth)
- return true;
- }
- return false;
-}
-EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
-
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 60ba93fc42ce..bd9571653288 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
+ if (current->nr_cpus_allowed == 1)
goto out;
/*
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5ef3eccee27c..cecb230833be 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6859,34 +6859,128 @@ err_page0:
return NULL;
}
-static __init int test_skb_segment(void)
+static __init struct sk_buff *build_test_skb_linear_no_head_frag(void)
{
+ unsigned int alloc_size = 2000;
+ unsigned int headroom = 102, doffset = 72, data_size = 1308;
+ struct sk_buff *skb[2];
+ int i;
+
+ /* skbs linked in a frag_list, both with linear data, with head_frag=0
+ * (data allocated by kmalloc), both have tcp data of 1308 bytes
+ * (total payload is 2616 bytes).
+ * Data offset is 72 bytes (40 ipv6 hdr, 32 tcp hdr). Some headroom.
+ */
+ for (i = 0; i < 2; i++) {
+ skb[i] = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb[i]->protocol = htons(ETH_P_IPV6);
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], doffset + data_size);
+ skb_reset_network_header(skb[i]);
+ if (i == 0)
+ skb_reset_mac_header(skb[i]);
+ else
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+ __skb_pull(skb[i], doffset);
+ }
+
+ /* setup shinfo.
+ * mimic bpf_skb_proto_4_to_6, which resets gso_segs and assigns a
+ * reduced gso_size.
+ */
+ skb_shinfo(skb[0])->gso_size = 1288;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV6 | SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ kfree_skb(skb[0]);
+err_skb0:
+ return NULL;
+}
+
+struct skb_segment_test {
+ const char *descr;
+ struct sk_buff *(*build_skb)(void);
netdev_features_t features;
+};
+
+static struct skb_segment_test skb_segment_tests[] __initconst = {
+ {
+ .descr = "gso_with_rx_frags",
+ .build_skb = build_test_skb,
+ .features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM
+ },
+ {
+ .descr = "gso_linear_no_head_frag",
+ .build_skb = build_test_skb_linear_no_head_frag,
+ .features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
+ NETIF_F_LLTX_BIT | NETIF_F_GRO |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_STAG_TX_BIT
+ }
+};
+
+static __init int test_skb_segment_single(const struct skb_segment_test *test)
+{
struct sk_buff *skb, *segs;
int ret = -1;
- features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
- features |= NETIF_F_RXCSUM;
- skb = build_test_skb();
+ skb = test->build_skb();
if (!skb) {
pr_info("%s: failed to build_test_skb", __func__);
goto done;
}
- segs = skb_segment(skb, features);
+ segs = skb_segment(skb, test->features);
if (!IS_ERR(segs)) {
kfree_skb_list(segs);
ret = 0;
- pr_info("%s: success in skb_segment!", __func__);
- } else {
- pr_info("%s: failed in skb_segment!", __func__);
}
kfree_skb(skb);
done:
return ret;
}
+static __init int test_skb_segment(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ const struct skb_segment_test *test = &skb_segment_tests[i];
+
+ pr_info("#%d %s ", i, test->descr);
+
+ if (test_skb_segment_single(test)) {
+ pr_cont("FAIL\n");
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED\n", __func__,
+ pass_cnt, err_cnt);
+ return err_cnt ? -EINVAL : 0;
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 5d94cbff2120..2d9f520d2f27 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -22,6 +22,8 @@
#include <linux/gfp.h>
#include <linux/mm.h>
+#include <linux/property.h>
+
#include "../tools/testing/selftests/kselftest_module.h"
#define BUF_SIZE 256
@@ -593,6 +595,55 @@ flags(void)
kfree(cmp_buffer);
}
+static void __init fwnode_pointer(void)
+{
+ const struct software_node softnodes[] = {
+ { .name = "first", },
+ { .name = "second", .parent = &softnodes[0], },
+ { .name = "third", .parent = &softnodes[1], },
+ { NULL /* Guardian */ }
+ };
+ const char * const full_name = "first/second/third";
+ const char * const full_name_second = "first/second";
+ const char * const second_name = "second";
+ const char * const third_name = "third";
+ int rval;
+
+ rval = software_node_register_nodes(softnodes);
+ if (rval) {
+ pr_warn("cannot register softnodes; rval %d\n", rval);
+ return;
+ }
+
+ test(full_name_second, "%pfw", software_node_fwnode(&softnodes[1]));
+ test(full_name, "%pfw", software_node_fwnode(&softnodes[2]));
+ test(full_name, "%pfwf", software_node_fwnode(&softnodes[2]));
+ test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
+ test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
+
+ software_node_unregister_nodes(softnodes);
+}
+
+static void __init
+errptr(void)
+{
+ test("-1234", "%pe", ERR_PTR(-1234));
+
+ /* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
+ BUILD_BUG_ON(IS_ERR(PTR));
+ test_hashed("%pe", PTR);
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+ test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EAGAIN));
+ BUILD_BUG_ON(EAGAIN != EWOULDBLOCK);
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EWOULDBLOCK));
+ test("[-EIO ]", "[%-8pe]", ERR_PTR(-EIO));
+ test("[ -EIO]", "[%8pe]", ERR_PTR(-EIO));
+ test("-EPROBE_DEFER", "%pe", ERR_PTR(-EPROBE_DEFER));
+#endif
+}
+
static void __init
test_pointer(void)
{
@@ -615,6 +666,8 @@ test_pointer(void)
bitmap();
netdev_features();
flags();
+ errptr();
+ fwnode_pointer();
}
static void __init selftest(void)
diff --git a/lib/ubsan.c b/lib/ubsan.c
index e7d31735950d..fc552d524ef7 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -119,7 +119,7 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
{
if (type_is_int(type)) {
if (type_bit_width(type) == 128) {
-#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
u_max val = get_unsigned_val(type, value);
scnprintf(str, size, "0x%08x%08x%08x%08x",
@@ -374,9 +374,10 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
struct type_descriptor *lhs_type = data->lhs_type;
char rhs_str[VALUE_LENGTH];
char lhs_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
if (suppress_report(&data->location))
- return;
+ goto out;
ubsan_prologue(&data->location, &flags);
@@ -402,6 +403,8 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
lhs_type->type_name);
ubsan_epilogue(&flags);
+out:
+ user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
diff --git a/lib/ubsan.h b/lib/ubsan.h
index b8fa83864467..7b56c09473a9 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -78,7 +78,7 @@ struct invalid_value_data {
struct type_descriptor *type;
};
-#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
#else
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e78017a3e1bd..dee8fc467fcf 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -21,6 +21,7 @@
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/errname.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
#include <linux/string.h>
@@ -38,6 +39,7 @@
#include <net/addrconf.h>
#include <linux/siphash.h>
#include <linux/compiler.h>
+#include <linux/property.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
@@ -613,6 +615,25 @@ static char *string_nocheck(char *buf, char *end, const char *s,
return widen_string(buf, len, end, spec);
}
+static char *err_ptr(char *buf, char *end, void *ptr,
+ struct printf_spec spec)
+{
+ int err = PTR_ERR(ptr);
+ const char *sym = errname(err);
+
+ if (sym)
+ return string_nocheck(buf, end, sym, spec);
+
+ /*
+ * Somebody passed ERR_PTR(-1234) or some other non-existing
+ * Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to
+ * printing it as its decimal representation.
+ */
+ spec.flags |= SIGN;
+ spec.base = 10;
+ return number(buf, end, err, spec);
+}
+
/* Be careful: error messages must fit into the given buffer. */
static char *error_string(char *buf, char *end, const char *s,
struct printf_spec spec)
@@ -918,7 +939,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
#ifdef CONFIG_KALLSYMS
if (*fmt == 'B')
sprint_backtrace(sym, value);
- else if (*fmt != 'f' && *fmt != 's')
+ else if (*fmt != 's')
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
@@ -1872,32 +1893,25 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
return format_flags(buf, end, flags, names);
}
-static const char *device_node_name_for_depth(const struct device_node *np, int depth)
-{
- for ( ; np && depth; depth--)
- np = np->parent;
-
- return kbasename(np->full_name);
-}
-
static noinline_for_stack
-char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end)
+char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+ char *end)
{
int depth;
- const struct device_node *parent = np->parent;
- /* special case for root node */
- if (!parent)
- return string_nocheck(buf, end, "/", default_str_spec);
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+ struct fwnode_handle *__fwnode =
+ fwnode_get_nth_parent(fwnode, depth);
- for (depth = 0; parent->parent; depth++)
- parent = parent->parent;
-
- for ( ; depth >= 0; depth--) {
- buf = string_nocheck(buf, end, "/", default_str_spec);
- buf = string(buf, end, device_node_name_for_depth(np, depth),
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
+
+ fwnode_handle_put(__fwnode);
}
+
return buf;
}
@@ -1921,6 +1935,9 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
+ if (fmt[0] != 'F')
+ return error_string(buf, end, "(%pO?)", spec);
+
if (!IS_ENABLED(CONFIG_OF))
return error_string(buf, end, "(%pOF?)", spec);
@@ -1942,10 +1959,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
switch (*fmt) {
case 'f': /* full_name */
- buf = device_node_gen_full_name(dn, buf, end);
+ buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
+ end);
break;
case 'n': /* name */
- p = kbasename(of_node_full_name(dn));
+ p = fwnode_get_name(of_fwnode_handle(dn));
precision = str_spec.precision;
str_spec.precision = strchrnul(p, '@') - p;
buf = string(buf, end, p, str_spec);
@@ -1955,7 +1973,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
break;
case 'P': /* path-spec */
- p = kbasename(of_node_full_name(dn));
+ p = fwnode_get_name(of_fwnode_handle(dn));
if (!p[1])
p = "/";
buf = string(buf, end, p, str_spec);
@@ -1993,15 +2011,34 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-static char *kobject_string(char *buf, char *end, void *ptr,
- struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
+ struct printf_spec spec, const char *fmt)
{
- switch (fmt[1]) {
- case 'F':
- return device_node_string(buf, end, ptr, spec, fmt + 1);
+ struct printf_spec str_spec = spec;
+ char *buf_start = buf;
+
+ str_spec.field_width = -1;
+
+ if (*fmt != 'w')
+ return error_string(buf, end, "(%pf?)", spec);
+
+ if (check_pointer(&buf, end, fwnode, spec))
+ return buf;
+
+ fmt++;
+
+ switch (*fmt) {
+ case 'P': /* name */
+ buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
+ break;
+ case 'f': /* full_name */
+ default:
+ buf = fwnode_full_name_string(fwnode, buf, end);
+ break;
}
- return error_string(buf, end, "(%pO?)", spec);
+ return widen_string(buf, buf - buf_start, end, spec);
}
/*
@@ -2016,9 +2053,9 @@ static char *kobject_string(char *buf, char *end, void *ptr,
*
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
- * - 'F' Same as 'S'
- * - 'f' Same as 's'
- * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
+ * - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
+ * %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
@@ -2108,6 +2145,10 @@ static char *kobject_string(char *buf, char *end, void *ptr,
* F device node flags
* c major compatible string
* C full compatible string
+ * - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
+ * Without an option prints the full name of the node
+ * f full name
+ * P node name, including a possible unit address
* - 'x' For printing the address. Equivalent to "%lx".
*
* ** When making changes please also update:
@@ -2121,8 +2162,6 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
switch (*fmt) {
- case 'F':
- case 'f':
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
@@ -2184,9 +2223,16 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'G':
return flags_string(buf, end, ptr, spec, fmt);
case 'O':
- return kobject_string(buf, end, ptr, spec, fmt);
+ return device_node_string(buf, end, ptr, spec, fmt + 1);
+ case 'f':
+ return fwnode_string(buf, end, ptr, spec, fmt + 1);
case 'x':
return pointer_string(buf, end, ptr, spec);
+ case 'e':
+ /* %pe with a non-ERR_PTR gets treated as plain %p */
+ if (!IS_ERR(ptr))
+ break;
+ return err_ptr(buf, end, ptr, spec);
}
/* default is to _not_ leak addresses, hash before printing */
@@ -2819,10 +2865,9 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
/* Dereference of functions is still OK */
case 'S':
case 's':
- case 'F':
- case 'f':
case 'x':
case 'K':
+ case 'e':
save_arg(void *);
break;
default:
@@ -2999,6 +3044,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
case 'f':
case 'x':
case 'K':
+ case 'e':
process = true;
break;
default:
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 08c3c8049998..156f26fdc4c9 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1146,6 +1146,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
if (DEC_IS_DYNALLOC(s->dict.mode)) {
if (s->dict.allocated < s->dict.size) {
+ s->dict.allocated = s->dict.size;
vfree(s->dict.buf);
s->dict.buf = vmalloc(s->dict.size);
if (s->dict.buf == NULL) {
diff --git a/mm/debug.c b/mm/debug.c
index 8345bb6e4769..0461df1207cb 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -67,28 +67,31 @@ void __dump_page(struct page *page, const char *reason)
*/
mapcount = PageSlab(page) ? 0 : page_mapcount(page);
- pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx",
- page, page_ref_count(page), mapcount,
- page->mapping, page_to_pgoff(page));
if (PageCompound(page))
- pr_cont(" compound_mapcount: %d", compound_mapcount(page));
- pr_cont("\n");
- if (PageAnon(page))
- pr_warn("anon ");
- else if (PageKsm(page))
- pr_warn("ksm ");
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
+ "index:%#lx compound_mapcount: %d\n",
+ page, page_ref_count(page), mapcount,
+ page->mapping, page_to_pgoff(page),
+ compound_mapcount(page));
+ else
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
+ page, page_ref_count(page), mapcount,
+ page->mapping, page_to_pgoff(page));
+ if (PageKsm(page))
+ pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
+ else if (PageAnon(page))
+ pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
else if (mapping) {
- pr_warn("%ps ", mapping->a_ops);
if (mapping->host && mapping->host->i_dentry.first) {
struct dentry *dentry;
dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
- pr_warn("name:\"%pd\" ", dentry);
- }
+ pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
+ } else
+ pr_warn("%ps\n", mapping->a_ops);
+ pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
}
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
- pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
-
hex_only:
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index f1930fa0b445..2ac38bdc18a1 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
again:
rcu_read_lock();
h_cg = hugetlb_cgroup_from_task(current);
- if (!css_tryget_online(&h_cg->css)) {
+ if (!css_tryget(&h_cg->css)) {
rcu_read_unlock();
goto again;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index f05d27b7183d..a8a57bebb5fa 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1602,17 +1602,6 @@ static void collapse_file(struct mm_struct *mm,
result = SCAN_FAIL;
goto xa_unlocked;
}
- } else if (!PageUptodate(page)) {
- xas_unlock_irq(&xas);
- wait_on_page_locked(page);
- if (!trylock_page(page)) {
- result = SCAN_PAGE_LOCK;
- goto xa_unlocked;
- }
- get_page(page);
- } else if (PageDirty(page)) {
- result = SCAN_FAIL;
- goto xa_locked;
} else if (trylock_page(page)) {
get_page(page);
xas_unlock_irq(&xas);
@@ -1627,7 +1616,12 @@ static void collapse_file(struct mm_struct *mm,
* without racing with truncate.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageUptodate(page), page);
+
+ /* make sure the page is up to date */
+ if (unlikely(!PageUptodate(page))) {
+ result = SCAN_FAIL;
+ goto out_unlock;
+ }
/*
* If file was truncated then extended, or hole-punched, before
@@ -1643,6 +1637,16 @@ static void collapse_file(struct mm_struct *mm,
goto out_unlock;
}
+ if (!is_shmem && PageDirty(page)) {
+ /*
+ * khugepaged only works on read-only fd, so this
+ * page is dirty because it hasn't been flushed
+ * since first write.
+ */
+ result = SCAN_FAIL;
+ goto out_unlock;
+ }
+
if (isolate_lru_page(page)) {
result = SCAN_DEL_PAGE_LRU;
goto out_unlock;
diff --git a/mm/ksm.c b/mm/ksm.c
index dbee2eb4dd05..7905934cd3ad 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node)
return 0;
}
- if (WARN_ON_ONCE(page_mapped(page))) {
- /*
- * This should not happen: but if it does, just refuse to let
- * merge_across_nodes be switched - there is no need to panic.
- */
- err = -EBUSY;
- } else {
+ /*
+ * Page could be still mapped if this races with __mmput() running in
+ * between ksm_exit() and exit_mmap(). Just refuse to let
+ * merge_across_nodes/max_page_sharing be switched.
+ */
+ err = -EBUSY;
+ if (!page_mapped(page)) {
/*
* The stable node did not yet appear stale to get_ksm_page(),
* since that allows for an unmapped ksm page to be recognized
diff --git a/mm/maccess.c b/mm/maccess.c
index d065736f6b87..3ca8d97e5010 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -18,6 +18,18 @@ probe_read_common(void *dst, const void __user *src, size_t size)
return ret ? -EFAULT : 0;
}
+static __always_inline long
+probe_write_common(void __user *dst, const void *src, size_t size)
+{
+ long ret;
+
+ pagefault_disable();
+ ret = __copy_to_user_inatomic(dst, src, size);
+ pagefault_enable();
+
+ return ret ? -EFAULT : 0;
+}
+
/**
* probe_kernel_read(): safely attempt to read from a kernel-space location
* @dst: pointer to the buffer that shall take the data
@@ -31,11 +43,20 @@ probe_read_common(void *dst, const void __user *src, size_t size)
* do_page_fault() doesn't attempt to take mmap_sem. This makes
* probe_kernel_read() suitable for use within regions where the caller
* already holds mmap_sem, or other locks which nest inside mmap_sem.
+ *
+ * probe_kernel_read_strict() is the same as probe_kernel_read() except for
+ * the case where architectures have non-overlapping user and kernel address
+ * ranges: probe_kernel_read_strict() will additionally return -EFAULT for
+ * probing memory on a user address range where probe_user_read() is supposed
+ * to be used instead.
*/
long __weak probe_kernel_read(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_read")));
+long __weak probe_kernel_read_strict(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_read")));
+
long __probe_kernel_read(void *dst, const void *src, size_t size)
{
long ret;
@@ -85,6 +106,7 @@ EXPORT_SYMBOL_GPL(probe_user_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
+
long __weak probe_kernel_write(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_write")));
@@ -94,15 +116,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- pagefault_disable();
- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
- pagefault_enable();
+ ret = probe_write_common((__force void __user *)dst, src, size);
set_fs(old_fs);
- return ret ? -EFAULT : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(probe_kernel_write);
+/**
+ * probe_user_write(): safely attempt to write to a user-space location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+
+long __weak probe_user_write(void __user *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_user_write")));
+
+long __probe_user_write(void __user *dst, const void *src, size_t size)
+{
+ long ret = -EFAULT;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(USER_DS);
+ if (access_ok(dst, size))
+ ret = probe_write_common(dst, src, size);
+ set_fs(old_fs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(probe_user_write);
/**
* strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
@@ -120,8 +166,22 @@ EXPORT_SYMBOL_GPL(probe_kernel_write);
*
* If @count is smaller than the length of the string, copies @count-1 bytes,
* sets the last byte of @dst buffer to NUL and returns @count.
+ *
+ * strncpy_from_unsafe_strict() is the same as strncpy_from_unsafe() except
+ * for the case where architectures have non-overlapping user and kernel address
+ * ranges: strncpy_from_unsafe_strict() will additionally return -EFAULT for
+ * probing memory on a user address range where strncpy_from_unsafe_user() is
+ * supposed to be used instead.
*/
-long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+
+long __weak strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+ __attribute__((alias("__strncpy_from_unsafe")));
+
+long __weak strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count)
+ __attribute__((alias("__strncpy_from_unsafe")));
+
+long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
{
mm_segment_t old_fs = get_fs();
const void *src = unsafe_addr;
diff --git a/mm/madvise.c b/mm/madvise.c
index 2be9f3fdb05e..94c343b4c968 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -363,8 +363,12 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (pageout) {
- if (!isolate_lru_page(page))
- list_add(&page->lru, &page_list);
+ if (!isolate_lru_page(page)) {
+ if (PageUnevictable(page))
+ putback_lru_page(page);
+ else
+ list_add(&page->lru, &page_list);
+ }
} else
deactivate_page(page);
huge_unlock:
@@ -441,8 +445,12 @@ regular_page:
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (pageout) {
- if (!isolate_lru_page(page))
- list_add(&page->lru, &page_list);
+ if (!isolate_lru_page(page)) {
+ if (PageUnevictable(page))
+ putback_lru_page(page);
+ else
+ list_add(&page->lru, &page_list);
+ }
} else
deactivate_page(page);
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 37592dd7ae32..01f3f8b665e9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -960,7 +960,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
if (unlikely(!memcg))
memcg = root_mem_cgroup;
}
- } while (!css_tryget_online(&memcg->css));
+ } while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
}
@@ -1800,7 +1800,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
struct mem_cgroup *iter;
spin_lock(&memcg_oom_lock);
- mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
+ mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
for_each_mem_cgroup_tree(iter, memcg)
iter->oom_lock = false;
spin_unlock(&memcg_oom_lock);
diff --git a/mm/memory.c b/mm/memory.c
index b1ca51a079f2..b6a5d6a08438 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -118,6 +118,18 @@ int randomize_va_space __read_mostly =
2;
#endif
+#ifndef arch_faults_on_old_pte
+static inline bool arch_faults_on_old_pte(void)
+{
+ /*
+ * Those arches which don't have hw access flag feature need to
+ * implement their own helper. By default, "true" means pagefault
+ * will be hit on old pte.
+ */
+ return true;
+}
+#endif
+
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
@@ -2145,32 +2157,82 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
return same;
}
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
+static inline bool cow_user_page(struct page *dst, struct page *src,
+ struct vm_fault *vmf)
{
+ bool ret;
+ void *kaddr;
+ void __user *uaddr;
+ bool force_mkyoung;
+ struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr = vmf->address;
+
debug_dma_assert_idle(src);
+ if (likely(src)) {
+ copy_user_highpage(dst, src, addr, vma);
+ return true;
+ }
+
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
- if (unlikely(!src)) {
- void *kaddr = kmap_atomic(dst);
- void __user *uaddr = (void __user *)(va & PAGE_MASK);
+ kaddr = kmap_atomic(dst);
+ uaddr = (void __user *)(addr & PAGE_MASK);
+
+ /*
+ * On architectures with software "accessed" bits, we would
+ * take a double page fault, so mark it accessed here.
+ */
+ force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
+ if (force_mkyoung) {
+ pte_t entry;
+
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /*
+ * Other thread has already handled the fault
+ * and we don't need to do anything. If it's
+ * not the case, the fault will be triggered
+ * again on the same address.
+ */
+ ret = false;
+ goto pte_unlock;
+ }
+ entry = pte_mkyoung(vmf->orig_pte);
+ if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
+ update_mmu_cache(vma, addr, vmf->pte);
+ }
+
+ /*
+ * This really shouldn't fail, because the page is there
+ * in the page tables. But it might just be unreadable,
+ * in which case we just give up and fill the result with
+ * zeroes.
+ */
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
/*
- * This really shouldn't fail, because the page is there
- * in the page tables. But it might just be unreadable,
- * in which case we just give up and fill the result with
- * zeroes.
+ * Give a warn in case there can be some obscure
+ * use-case
*/
- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
- clear_page(kaddr);
- kunmap_atomic(kaddr);
- flush_dcache_page(dst);
- } else
- copy_user_highpage(dst, src, va, vma);
+ WARN_ON_ONCE(1);
+ clear_page(kaddr);
+ }
+
+ ret = true;
+
+pte_unlock:
+ if (force_mkyoung)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(dst);
+
+ return ret;
}
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
@@ -2327,7 +2389,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
vmf->address);
if (!new_page)
goto oom;
- cow_user_page(new_page, old_page, vmf->address, vma);
+
+ if (!cow_user_page(new_page, old_page, vmf)) {
+ /*
+ * COW failed, if the fault was solved by other,
+ * it's fine. If not, userspace would re-fault on
+ * the same address and we will handle the fault
+ * from the second attempt.
+ */
+ put_page(new_page);
+ if (old_page)
+ put_page(old_page);
+ return 0;
+ }
}
if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 07e5c67f48a8..f307bd82d750 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long end_pfn)
{
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(start_pfn)))
+ if (unlikely(!pfn_to_online_page(start_pfn)))
continue;
if (unlikely(pfn_to_nid(start_pfn) != nid))
@@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
/* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1;
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(pfn)))
+ if (unlikely(!pfn_to_online_page(pfn)))
continue;
if (unlikely(pfn_to_nid(pfn) != nid))
@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
*/
pfn = zone_start_pfn;
for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(pfn)))
+ if (unlikely(!pfn_to_online_page(pfn)))
continue;
if (page_zone(pfn_to_page(pfn)) != zone)
@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long flags;
+#ifdef CONFIG_ZONE_DEVICE
+ /*
+ * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
+ * we will not try to shrink the zones - which is okay as
+ * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
+ */
+ if (zone_idx(zone) == ZONE_DEVICE)
+ return;
+#endif
+
pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat);
@@ -1646,6 +1656,18 @@ static int check_cpu_on_node(pg_data_t *pgdat)
return 0;
}
+static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
+{
+ int nid = *(int *)arg;
+
+ /*
+ * If a memory block belongs to multiple nodes, the stored nid is not
+ * reliable. However, such blocks are always online (e.g., cannot get
+ * offlined) and, therefore, are still spanned by the node.
+ */
+ return mem->nid == nid ? -EEXIST : 0;
+}
+
/**
* try_offline_node
* @nid: the node ID
@@ -1658,25 +1680,24 @@ static int check_cpu_on_node(pg_data_t *pgdat)
void try_offline_node(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
-
- if (!present_section_nr(section_nr))
- continue;
+ int rc;
- if (pfn_to_nid(pfn) != nid)
- continue;
+ /*
+ * If the node still spans pages (especially ZONE_DEVICE), don't
+ * offline it. A node spans memory after move_pfn_range_to_zone(),
+ * e.g., after the memory block was onlined.
+ */
+ if (pgdat->node_spanned_pages)
+ return;
- /*
- * some memory sections of this node are not removed, and we
- * can't offline node now.
- */
+ /*
+ * Especially offline memory blocks might not be spanned by the
+ * node. They will get spanned by the node once they get onlined.
+ * However, they link to the node in sysfs and can get onlined later.
+ */
+ rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
+ if (rc)
return;
- }
if (check_cpu_on_node(pgdat))
return;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4ae967bcf954..e08c94170ae4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -672,7 +672,9 @@ static const struct mm_walk_ops queue_pages_walk_ops = {
* 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* 0 - queue pages successfully or no misplaced page.
- * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
+ * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
+ * memory range specified by nodemask and maxnode points outside
+ * your accessible address space (-EFAULT)
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -1286,7 +1288,7 @@ static long do_mbind(unsigned long start, unsigned long len,
flags | MPOL_MF_INVERT, &pagelist);
if (ret < 0) {
- err = -EIO;
+ err = ret;
goto up_out;
}
@@ -1305,10 +1307,12 @@ static long do_mbind(unsigned long start, unsigned long len,
if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
err = -EIO;
- } else
- putback_movable_pages(&pagelist);
-
+ } else {
up_out:
+ if (!list_empty(&pagelist))
+ putback_movable_pages(&pagelist);
+ }
+
up_write(&mm->mmap_sem);
mpol_out:
mpol_put(new);
diff --git a/mm/nommu.c b/mm/nommu.c
index 99b7ec318824..7de592058ab4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -155,11 +155,11 @@ void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
return __vmalloc(size, flags, PAGE_KERNEL);
}
-void *vmalloc_user(unsigned long size)
+static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
{
void *ret;
- ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ ret = __vmalloc(size, flags, PAGE_KERNEL);
if (ret) {
struct vm_area_struct *vma;
@@ -172,8 +172,19 @@ void *vmalloc_user(unsigned long size)
return ret;
}
+
+void *vmalloc_user(unsigned long size)
+{
+ return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
+}
EXPORT_SYMBOL(vmalloc_user);
+void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags)
+{
+ return __vmalloc_user_flags(size, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(vmalloc_user_node_flags);
+
struct page *vmalloc_to_page(const void *addr)
{
return virt_to_page(addr);
diff --git a/mm/page_io.c b/mm/page_io.c
index 24ee600f9131..60a66a58b9bf 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -73,6 +73,7 @@ static void swap_slot_free_notify(struct page *page)
{
struct swap_info_struct *sis;
struct gendisk *disk;
+ swp_entry_t entry;
/*
* There is no guarantee that the page is in swap cache - the software
@@ -104,11 +105,10 @@ static void swap_slot_free_notify(struct page *page)
* we again wish to reclaim it.
*/
disk = sis->bdev->bd_disk;
- if (disk->fops->swap_slot_free_notify) {
- swp_entry_t entry;
+ entry.val = page_private(page);
+ if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
unsigned long offset;
- entry.val = page_private(page);
offset = swp_offset(entry);
SetPageDirty(page);
diff --git a/mm/slub.c b/mm/slub.c
index b25c807a111f..e72e802fc569 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1433,12 +1433,15 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void *old_tail = *tail ? *tail : *head;
int rsize;
- if (slab_want_init_on_free(s)) {
- void *p = NULL;
+ /* Head and tail of the reconstructed freelist */
+ *head = NULL;
+ *tail = NULL;
- do {
- object = next;
- next = get_freepointer(s, object);
+ do {
+ object = next;
+ next = get_freepointer(s, object);
+
+ if (slab_want_init_on_free(s)) {
/*
* Clear the object and the metadata, but don't touch
* the redzone.
@@ -1448,29 +1451,8 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
: 0;
memset((char *)object + s->inuse, 0,
s->size - s->inuse - rsize);
- set_freepointer(s, object, p);
- p = object;
- } while (object != old_tail);
- }
-
-/*
- * Compiler cannot detect this function can be removed if slab_free_hook()
- * evaluates to nothing. Thus, catch all relevant config debug options here.
- */
-#if defined(CONFIG_LOCKDEP) || \
- defined(CONFIG_DEBUG_KMEMLEAK) || \
- defined(CONFIG_DEBUG_OBJECTS_FREE) || \
- defined(CONFIG_KASAN)
- next = *head;
-
- /* Head and tail of the reconstructed freelist */
- *head = NULL;
- *tail = NULL;
-
- do {
- object = next;
- next = get_freepointer(s, object);
+ }
/* If object's reuse doesn't have to be delayed */
if (!slab_free_hook(s, object)) {
/* Move object to the new freelist */
@@ -1485,9 +1467,6 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
*tail = NULL;
return *head != NULL;
-#else
- return true;
-#endif
}
static void *setup_object(struct kmem_cache *s, struct page *page,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a3c70e275f4e..4a7d7459c4f9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2672,6 +2672,26 @@ void *vzalloc_node(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node);
/**
+ * vmalloc_user_node_flags - allocate memory for userspace on a specific node
+ * @size: allocation size
+ * @node: numa node
+ * @flags: flags for the page level allocator
+ *
+ * The resulting memory area is zeroed so it can be mapped to userspace
+ * without leaking data.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags)
+{
+ return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
+ flags | __GFP_ZERO, PAGE_KERNEL,
+ VM_USERMAP, node,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(vmalloc_user_node_flags);
+
+/**
* vmalloc_exec - allocate virtually contiguous, executable memory
* @size: allocation size
*
diff --git a/net/Kconfig b/net/Kconfig
index 3101bfcbdd7a..bd191f978a23 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -258,7 +258,7 @@ config XPS
default y
config HWBM
- bool
+ bool
config CGROUP_NET_PRIO
bool "Network priority cgroup"
@@ -309,12 +309,12 @@ config BPF_STREAM_PARSER
select STREAM_PARSER
select NET_SOCK_MSG
---help---
- Enabling this allows a stream parser to be used with
- BPF_MAP_TYPE_SOCKMAP.
+ Enabling this allows a stream parser to be used with
+ BPF_MAP_TYPE_SOCKMAP.
- BPF_MAP_TYPE_SOCKMAP provides a map type to use with network sockets.
- It can be used to enforce socket policy, implement socket redirects,
- etc.
+ BPF_MAP_TYPE_SOCKMAP provides a map type to use with network sockets.
+ It can be used to enforce socket policy, implement socket redirects,
+ etc.
config NET_FLOW_LIMIT
bool
@@ -349,12 +349,12 @@ config NET_DROP_MONITOR
tristate "Network packet drop alerting service"
depends on INET && TRACEPOINTS
---help---
- This feature provides an alerting service to userspace in the
- event that packets are discarded in the network stack. Alerts
- are broadcast via netlink socket to any listening user space
- process. If you don't need network drop alerts, or if you are ok
- just checking the various proc files and other utilities for
- drop statistics, say N here.
+ This feature provides an alerting service to userspace in the
+ event that packets are discarded in the network stack. Alerts
+ are broadcast via netlink socket to any listening user space
+ process. If you don't need network drop alerts, or if you are ok
+ just checking the various proc files and other utilities for
+ drop statistics, say N here.
endmenu
@@ -433,7 +433,7 @@ config NET_DEVLINK
imply NET_DROP_MONITOR
config PAGE_POOL
- bool
+ bool
config FAILOVER
tristate "Generic failover module"
diff --git a/net/atm/clip.c b/net/atm/clip.c
index a7972da7235d..294cb9efe3d3 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -89,7 +89,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
struct clip_vcc **walk;
if (!entry) {
- pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
+ pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return;
}
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -109,10 +109,10 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN, 0);
if (error)
- pr_crit("neigh_update failed with %d\n", error);
+ pr_err("neigh_update failed with %d\n", error);
goto out;
}
- pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
+ pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
out:
netif_tx_unlock_bh(entry->neigh->dev);
}
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 6c11cdf4dd4c..fbd0c5e7b299 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -109,7 +109,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
dev_kfree_skb(skb);
goto as_indicate_complete;
}
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 908cbb8654f5..ba144d035e3d 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -381,7 +381,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
msg->pvc.sap_addr.vpi,
msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
if (error) {
sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
&old_vcc->qos, error);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index bb222b882b67..324306d6fde0 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1384,7 +1384,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
/* Now attach up the new socket */
kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
out:
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index dcdbaeeb2358..cd6afe895db9 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -356,7 +356,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
make->sk_state = TCP_ESTABLISHED;
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
bh_unlock_sock(sk);
} else {
if (!mine)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 64054edc2e3c..4ff6cf1ecae7 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1085,7 +1085,6 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
hard_iface->bat_v.aggr_len = 0;
skb_queue_head_init(&hard_iface->bat_v.aggr_list);
- spin_lock_init(&hard_iface->bat_v.aggr_list_lock);
INIT_DELAYED_WORK(&hard_iface->bat_v.aggr_wq,
batadv_v_ogm_aggr_work);
}
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 8033f24f506c..714ce56cfcc8 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -152,7 +152,7 @@ static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
* @skb: the OGM to check
* @hard_iface: the interface to use to send the OGM
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*
* Return: True, if the given OGMv2 packet still fits, false otherwise.
*/
@@ -163,7 +163,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
BATADV_MAX_AGGREGATION_BYTES);
unsigned int ogm_len = batadv_v_ogm_len(skb);
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
return hard_iface->bat_v.aggr_len + ogm_len <= max;
}
@@ -174,17 +174,13 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
*
* Empties the OGMv2 aggregation queue and frees all the skbs it contained.
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
{
- struct sk_buff *skb;
-
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
-
- while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list)))
- kfree_skb(skb);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
+ __skb_queue_purge(&hard_iface->bat_v.aggr_list);
hard_iface->bat_v.aggr_len = 0;
}
@@ -197,7 +193,7 @@ static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
*
* The aggregation queue is empty after this call.
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
{
@@ -206,7 +202,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
unsigned int ogm_len;
struct sk_buff *skb;
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
if (!aggr_len)
return;
@@ -220,7 +216,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
skb_reset_network_header(skb_aggr);
- while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) {
+ while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
ogm_len = batadv_v_ogm_len(skb);
@@ -247,13 +243,13 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
return;
}
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
if (!batadv_v_ogm_queue_left(skb, hard_iface))
batadv_v_ogm_aggr_send(hard_iface);
hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
- skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
@@ -392,9 +388,9 @@ void batadv_v_ogm_aggr_work(struct work_struct *work)
batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_send(hard_iface);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_start_queue_timer(hard_iface);
}
@@ -425,9 +421,9 @@ void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_list_free(hard_iface);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 6967f2e4c3f4..c7b340ddd0e7 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2019.4"
+#define BATADV_SOURCE_VERSION "2019.5"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 1d5bdf3a4b65..f9ec8e7507b6 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1421,7 +1421,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (*orig)
return BATADV_FORW_SINGLE;
- /* fall through */
+ fallthrough;
case 0:
return BATADV_FORW_NONE;
default:
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5ee8e9a100f9..832e156c519e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/percpu.h>
@@ -230,7 +229,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
break;
}
- /* fall through */
+ fallthrough;
case ETH_P_BATMAN:
goto dropped;
}
@@ -455,7 +454,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
break;
- /* fall through */
+ fallthrough;
case ETH_P_BATMAN:
goto dropped;
}
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 4d7f1baee7b7..47718a82eaf2 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -130,9 +130,6 @@ struct batadv_hard_iface_bat_v {
/** @aggr_len: size of the OGM aggregate (excluding ethernet header) */
unsigned int aggr_len;
- /** @aggr_list_lock: protects aggr_list */
- spinlock_t aggr_list_lock;
-
/**
* @throughput_override: throughput override to disable link
* auto-detection
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 3803135c88ff..165148c7c4ce 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -9,7 +9,7 @@ menuconfig BT
depends on RFKILL || !RFKILL
select CRC16
select CRYPTO
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
imply CRYPTO_AES
select CRYPTO_CMAC
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5f508c50649d..3fd124927d4d 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -173,7 +173,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
else
release_sock(sk);
- parent->sk_ack_backlog++;
+ sk_acceptq_added(parent);
}
EXPORT_SYMBOL(bt_accept_enqueue);
@@ -185,7 +185,7 @@ void bt_accept_unlink(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
- bt_sk(sk)->parent->sk_ack_backlog--;
+ sk_acceptq_removed(bt_sk(sk)->parent);
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ad5b0ac1f9ce..87691404d0c6 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -934,6 +934,14 @@ static void hci_req_directed_advertising(struct hci_request *req,
return;
memset(&cp, 0, sizeof(cp));
+
+ /* Some controllers might reject command if intervals are not
+ * within range for undirected advertising.
+ * BCM20702A0 is known to be affected by this.
+ */
+ cp.min_interval = cpu_to_le16(0x0020);
+ cp.max_interval = cpu_to_le16(0x0020);
+
cp.type = LE_ADV_DIRECT_IND;
cp.own_address_type = own_addr_type;
cp.direct_addr_type = conn->dst_type;
@@ -1168,8 +1176,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
if (!conn)
return ERR_PTR(-ENOMEM);
- if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
+ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
+ hci_conn_del(conn);
return ERR_PTR(-EBUSY);
+ }
conn->state = BT_CONNECT;
set_bit(HCI_CONN_SCANNING, &conn->flags);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 04bc79359a17..9e19d5a3aac8 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -842,8 +842,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
struct hci_cp_le_write_def_data_len cp;
- cp.tx_len = hdev->le_max_tx_len;
- cp.tx_time = hdev->le_max_tx_time;
+ cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
+ cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
}
@@ -1444,11 +1444,20 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ bool invalid_bdaddr;
+
hci_sock_dev_event(hdev, HCI_DEV_SETUP);
if (hdev->setup)
ret = hdev->setup(hdev);
+ /* The transport driver can set the quirk to mark the
+ * BD_ADDR invalid before creating the HCI device or in
+ * its setup callback.
+ */
+ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
+ &hdev->quirks);
+
if (ret)
goto setup_failed;
@@ -1457,20 +1466,33 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hci_dev_get_bd_addr_from_property(hdev);
if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
- hdev->set_bdaddr)
+ hdev->set_bdaddr) {
ret = hdev->set_bdaddr(hdev,
&hdev->public_addr);
+
+ /* If setting of the BD_ADDR from the device
+ * property succeeds, then treat the address
+ * as valid even if the invalid BD_ADDR
+ * quirk indicates otherwise.
+ */
+ if (!ret)
+ invalid_bdaddr = false;
+ }
}
setup_failed:
/* The transport driver can set these quirks before
* creating the HCI device or in its setup callback.
*
+ * For the invalid BD_ADDR quirk it is possible that
+ * it becomes a valid address if the bootloader does
+ * provide it (see above).
+ *
* In case any of them is set, the controller has to
* start up as unconfigured.
*/
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
- test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
+ invalid_bdaddr)
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* For an unconfigured controller it is required to
@@ -4440,7 +4462,14 @@ static void hci_rx_work(struct work_struct *work)
hci_send_to_sock(hdev, skb);
}
- if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ /* If the device has been opened in HCI_USER_CHANNEL,
+ * the userspace has exclusive access to device.
+ * When device is HCI_INIT, we still need to process
+ * the data packets to the driver in order
+ * to complete its setup().
+ */
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ !test_bit(HCI_INIT, &hdev->flags)) {
kfree_skb(skb);
continue;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 7f6a581b5b7e..2a1b64dbf76e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -904,9 +904,9 @@ static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
{
struct adv_info *adv_instance;
- /* Ignore instance 0 */
+ /* Instance 0x00 always set local name */
if (instance == 0x00)
- return 0;
+ return 1;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
@@ -923,9 +923,9 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
u8 instance = hdev->cur_adv_instance;
struct adv_info *adv_instance;
- /* Ignore instance 0 */
+ /* Instance 0x00 always set local name */
if (instance == 0x00)
- return 0;
+ return 1;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
@@ -1273,6 +1273,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
instance_flags = get_adv_instance_flags(hdev, instance);
+ /* If instance already has the flags set skip adding it once
+ * again.
+ */
+ if (adv_instance && eir_get_data(adv_instance->adv_data,
+ adv_instance->adv_data_len, EIR_FLAGS,
+ NULL))
+ goto skip_flags;
+
/* The Add Advertising command allows userspace to set both the general
* and limited discoverable flags.
*/
@@ -1305,6 +1313,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
}
}
+skip_flags:
if (adv_instance) {
memcpy(ptr, adv_instance->adv_data,
adv_instance->adv_data_len);
@@ -1690,7 +1699,7 @@ int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
* scheduling it.
*/
if (adv_instance && adv_instance->duration) {
- u16 duration = adv_instance->duration * MSEC_PER_SEC;
+ u16 duration = adv_instance->timeout * MSEC_PER_SEC;
/* Time = N * 10 ms */
adv_set->duration = cpu_to_le16(duration / 10);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index da7fdbdf9c41..a845786258a0 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4936,10 +4936,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
chan, result, local_amp_id, remote_amp_id);
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
- l2cap_chan_unlock(chan);
+ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
return;
- }
if (chan->state != BT_CONNECTED) {
l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 26e8cfad22b8..6b42be4b5861 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -502,15 +502,12 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
const bdaddr_t *bdaddr)
{
struct l2cap_chan *chan = hdev->smp_data;
- struct smp_dev *smp;
u8 hash[3];
int err;
if (!chan || !chan->data)
return false;
- smp = chan->data;
-
BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
err = smp_ah(irk, &bdaddr->b[3], hash);
@@ -523,14 +520,11 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
{
struct l2cap_chan *chan = hdev->smp_data;
- struct smp_dev *smp;
int err;
if (!chan || !chan->data)
return -EOPNOTSUPP;
- smp = chan->data;
-
get_random_bytes(&rpa->b[3], 3);
rpa->b[5] &= 0x3f; /* Clear two most significant bits */
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 1153bbcdff72..915c2d6f7fb9 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -105,6 +105,40 @@ out:
return err;
}
+/* Integer types of various sizes and pointer combinations cover variety of
+ * architecture dependent calling conventions. 7+ can be supported in the
+ * future.
+ */
+int noinline bpf_fentry_test1(int a)
+{
+ return a + 1;
+}
+
+int noinline bpf_fentry_test2(int a, u64 b)
+{
+ return a + b;
+}
+
+int noinline bpf_fentry_test3(char a, int b, u64 c)
+{
+ return a + b + c;
+}
+
+int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
+{
+ return (long)a + b + c + d;
+}
+
+int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
+{
+ return a + (long)b + c + d + e;
+}
+
+int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
+{
+ return a + (long)b + c + d + (long)e + f;
+}
+
static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
u32 headroom, u32 tailroom)
{
@@ -122,6 +156,15 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
kfree(data);
return ERR_PTR(-EFAULT);
}
+ if (bpf_fentry_test1(1) != 2 ||
+ bpf_fentry_test2(2, 3) != 5 ||
+ bpf_fentry_test3(4, 5, 6) != 15 ||
+ bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
+ bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
+ bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) {
+ kfree(data);
+ return ERR_PTR(-EFAULT);
+ }
return data;
}
@@ -218,10 +261,18 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
FIELD_SIZEOF(struct __sk_buff, cb),
+ offsetof(struct __sk_buff, tstamp)))
+ return -EINVAL;
+
+ /* tstamp is allowed */
+
+ if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) +
+ FIELD_SIZEOF(struct __sk_buff, tstamp),
sizeof(struct __sk_buff)))
return -EINVAL;
skb->priority = __skb->priority;
+ skb->tstamp = __skb->tstamp;
memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
return 0;
@@ -235,6 +286,7 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
return;
__skb->priority = skb->priority;
+ __skb->tstamp = skb->tstamp;
memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e804a3016902..434effde02c3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -263,6 +263,37 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
+static int br_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *p;
+
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ list_for_each_entry(p, &br->port_list, list) {
+ struct ethtool_link_ksettings ecmd;
+ struct net_device *pdev = p->dev;
+
+ if (!netif_running(pdev) || !netif_oper_up(pdev))
+ continue;
+
+ if (__ethtool_get_link_ksettings(pdev, &ecmd))
+ continue;
+
+ if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
+ continue;
+
+ if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
+ cmd->base.speed < ecmd.base.speed)
+ cmd->base.speed = ecmd.base.speed;
+ }
+
+ return 0;
+}
+
static netdev_features_t br_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -365,8 +396,9 @@ static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
}
static const struct ethtool_ops br_ethtool_ops = {
- .get_drvinfo = br_getinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = br_getinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = br_get_link_ksettings,
};
static const struct net_device_ops br_netdev_ops = {
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index b1d3248c0252..4877a0db16c6 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -75,8 +75,9 @@ static inline unsigned long hold_time(const struct net_bridge *br)
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
- return !fdb->is_static && !fdb->added_by_external_learn &&
- time_before_eq(fdb->updated + hold_time(br), jiffies);
+ return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
+ !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
+ time_before_eq(fdb->updated + hold_time(br), jiffies);
}
static void fdb_rcu_free(struct rcu_head *head)
@@ -197,7 +198,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
{
trace_fdb_delete(br, f);
- if (f->is_static)
+ if (test_bit(BR_FDB_STATIC, &f->flags))
fdb_del_hw_addr(br, f->key.addr.addr);
hlist_del_init_rcu(&f->fdb_node);
@@ -224,7 +225,7 @@ static void fdb_delete_local(struct net_bridge *br,
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
(!vid || br_vlan_find(vg, vid))) {
f->dst = op;
- f->added_by_user = 0;
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
return;
}
}
@@ -235,7 +236,7 @@ static void fdb_delete_local(struct net_bridge *br,
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
(!vid || (v && br_vlan_should_use(v)))) {
f->dst = NULL;
- f->added_by_user = 0;
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
return;
}
@@ -250,7 +251,8 @@ void br_fdb_find_delete_local(struct net_bridge *br,
spin_lock_bh(&br->hash_lock);
f = br_fdb_find(br, addr, vid);
- if (f && f->is_local && !f->added_by_user && f->dst == p)
+ if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
fdb_delete_local(br, p, f);
spin_unlock_bh(&br->hash_lock);
}
@@ -265,7 +267,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
spin_lock_bh(&br->hash_lock);
vg = nbp_vlan_group(p);
hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
- if (f->dst == p && f->is_local && !f->added_by_user) {
+ if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
/* delete old one */
fdb_delete_local(br, p, f);
@@ -306,7 +309,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
/* If old entry was unassociated with any port, then delete it. */
f = br_fdb_find(br, br->dev->dev_addr, 0);
- if (f && f->is_local && !f->dst && !f->added_by_user)
+ if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
@@ -321,7 +325,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
if (!br_vlan_should_use(v))
continue;
f = br_fdb_find(br, br->dev->dev_addr, v->vid);
- if (f && f->is_local && !f->dst && !f->added_by_user)
+ if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, v->vid);
}
@@ -346,7 +351,8 @@ void br_fdb_cleanup(struct work_struct *work)
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
unsigned long this_timer;
- if (f->is_static || f->added_by_external_learn)
+ if (test_bit(BR_FDB_STATIC, &f->flags) ||
+ test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags))
continue;
this_timer = f->updated + delay;
if (time_after(this_timer, now)) {
@@ -373,7 +379,7 @@ void br_fdb_flush(struct net_bridge *br)
spin_lock_bh(&br->hash_lock);
hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
- if (!f->is_static)
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
fdb_delete(br, f, true);
}
spin_unlock_bh(&br->hash_lock);
@@ -397,10 +403,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
continue;
if (!do_all)
- if (f->is_static || (vid && f->key.vlan_id != vid))
+ if (test_bit(BR_FDB_STATIC, &f->flags) ||
+ (vid && f->key.vlan_id != vid))
continue;
- if (f->is_local)
+ if (test_bit(BR_FDB_LOCAL, &f->flags))
fdb_delete_local(br, p, f);
else
fdb_delete(br, f, true);
@@ -469,8 +476,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
fe->port_no = f->dst->port_no;
fe->port_hi = f->dst->port_no >> 8;
- fe->is_local = f->is_local;
- if (!f->is_static)
+ fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
++fe;
++num;
@@ -484,8 +491,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
struct net_bridge_port *source,
const unsigned char *addr,
__u16 vid,
- unsigned char is_local,
- unsigned char is_static)
+ unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
@@ -494,12 +500,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
fdb->dst = source;
fdb->key.vlan_id = vid;
- fdb->is_local = is_local;
- fdb->is_static = is_static;
- fdb->added_by_user = 0;
- fdb->added_by_external_learn = 0;
- fdb->offloaded = 0;
- fdb->is_sticky = 0;
+ fdb->flags = flags;
fdb->updated = fdb->used = jiffies;
if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
&fdb->rhnode,
@@ -526,14 +527,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
/* it is okay to have multiple ports with same
* address, just use the first one.
*/
- if (fdb->is_local)
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
return 0;
br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
source ? source->dev->name : br->dev->name, addr, vid);
fdb_delete(br, fdb, true);
}
- fdb = fdb_create(br, source, addr, vid, 1, 1);
+ fdb = fdb_create(br, source, addr, vid,
+ BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
if (!fdb)
return -ENOMEM;
@@ -555,7 +557,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
}
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user)
+ const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
bool fdb_modified = false;
@@ -564,15 +566,10 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
if (hold_time(br) == 0)
return;
- /* ignore packets unless we are using this port */
- if (!(source->state == BR_STATE_LEARNING ||
- source->state == BR_STATE_FORWARDING))
- return;
-
fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
- if (unlikely(fdb->is_local)) {
+ if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
if (net_ratelimit())
br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
source->dev->name, addr, vid);
@@ -580,30 +577,30 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
unsigned long now = jiffies;
/* fastpath: update of existing entry */
- if (unlikely(source != fdb->dst && !fdb->is_sticky)) {
+ if (unlikely(source != fdb->dst &&
+ !test_bit(BR_FDB_STICKY, &fdb->flags))) {
fdb->dst = source;
fdb_modified = true;
/* Take over HW learned entry */
- if (unlikely(fdb->added_by_external_learn))
- fdb->added_by_external_learn = 0;
+ if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+ &fdb->flags)))
+ clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+ &fdb->flags);
}
if (now != fdb->updated)
fdb->updated = now;
- if (unlikely(added_by_user))
- fdb->added_by_user = 1;
+ if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
+ set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
if (unlikely(fdb_modified)) {
- trace_br_fdb_update(br, source, addr, vid, added_by_user);
+ trace_br_fdb_update(br, source, addr, vid, flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
}
} else {
spin_lock(&br->hash_lock);
- fdb = fdb_create(br, source, addr, vid, 0, 0);
+ fdb = fdb_create(br, source, addr, vid, flags);
if (fdb) {
- if (unlikely(added_by_user))
- fdb->added_by_user = 1;
- trace_br_fdb_update(br, source, addr, vid,
- added_by_user);
+ trace_br_fdb_update(br, source, addr, vid, flags);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
/* else we lose race and someone else inserts
@@ -616,9 +613,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
static int fdb_to_nud(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
- if (fdb->is_local)
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
return NUD_PERMANENT;
- else if (fdb->is_static)
+ else if (test_bit(BR_FDB_STATIC, &fdb->flags))
return NUD_NOARP;
else if (has_expired(br, fdb))
return NUD_STALE;
@@ -648,11 +645,11 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(br, fdb);
- if (fdb->offloaded)
+ if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
ndm->ndm_flags |= NTF_OFFLOADED;
- if (fdb->added_by_external_learn)
+ if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
ndm->ndm_flags |= NTF_EXT_LEARNED;
- if (fdb->is_sticky)
+ if (test_bit(BR_FDB_STICKY, &fdb->flags))
ndm->ndm_flags |= NTF_STICKY;
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
@@ -799,7 +796,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
const u8 *addr, u16 state, u16 flags, u16 vid,
u8 ndm_flags)
{
- u8 is_sticky = !!(ndm_flags & NTF_STICKY);
+ bool is_sticky = !!(ndm_flags & NTF_STICKY);
struct net_bridge_fdb_entry *fdb;
bool modified = false;
@@ -823,7 +820,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- fdb = fdb_create(br, source, addr, vid, 0, 0);
+ fdb = fdb_create(br, source, addr, vid, 0);
if (!fdb)
return -ENOMEM;
@@ -840,34 +837,28 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
if (fdb_to_nud(br, fdb) != state) {
if (state & NUD_PERMANENT) {
- fdb->is_local = 1;
- if (!fdb->is_static) {
- fdb->is_static = 1;
+ set_bit(BR_FDB_LOCAL, &fdb->flags);
+ if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
fdb_add_hw_addr(br, addr);
- }
} else if (state & NUD_NOARP) {
- fdb->is_local = 0;
- if (!fdb->is_static) {
- fdb->is_static = 1;
+ clear_bit(BR_FDB_LOCAL, &fdb->flags);
+ if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
fdb_add_hw_addr(br, addr);
- }
} else {
- fdb->is_local = 0;
- if (fdb->is_static) {
- fdb->is_static = 0;
+ clear_bit(BR_FDB_LOCAL, &fdb->flags);
+ if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
fdb_del_hw_addr(br, addr);
- }
}
modified = true;
}
- if (is_sticky != fdb->is_sticky) {
- fdb->is_sticky = is_sticky;
+ if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
+ change_bit(BR_FDB_STICKY, &fdb->flags);
modified = true;
}
- fdb->added_by_user = 1;
+ set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
fdb->used = jiffies;
if (modified) {
@@ -890,9 +881,12 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
br->dev->name);
return -EINVAL;
}
+ if (!nbp_state_should_learn(p))
+ return 0;
+
local_bh_disable();
rcu_read_lock();
- br_fdb_update(br, p, addr, vid, true);
+ br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
rcu_read_unlock();
local_bh_enable();
} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
@@ -1064,7 +1058,7 @@ int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
/* We only care for static entries */
- if (!f->is_static)
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
continue;
err = dev_uc_add(p->dev, f->key.addr.addr);
if (err)
@@ -1078,7 +1072,7 @@ done:
rollback:
hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
/* We only care for static entries */
- if (!tmp->is_static)
+ if (!test_bit(BR_FDB_STATIC, &tmp->flags))
continue;
if (tmp == f)
break;
@@ -1097,7 +1091,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
/* We only care for static entries */
- if (!f->is_static)
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
continue;
dev_uc_del(p->dev, f->key.addr.addr);
@@ -1119,14 +1113,15 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
fdb = br_fdb_find(br, addr, vid);
if (!fdb) {
- fdb = fdb_create(br, p, addr, vid, 0, 0);
+ unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
+
+ if (swdev_notify)
+ flags |= BIT(BR_FDB_ADDED_BY_USER);
+ fdb = fdb_create(br, p, addr, vid, flags);
if (!fdb) {
err = -ENOMEM;
goto err_unlock;
}
- if (swdev_notify)
- fdb->added_by_user = 1;
- fdb->added_by_external_learn = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
fdb->updated = jiffies;
@@ -1136,17 +1131,17 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
- if (fdb->added_by_external_learn) {
+ if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
/* Refresh entry */
fdb->used = jiffies;
- } else if (!fdb->added_by_user) {
+ } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
/* Take over SW learned entry */
- fdb->added_by_external_learn = 1;
+ set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
modified = true;
}
if (swdev_notify)
- fdb->added_by_user = 1;
+ set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
@@ -1168,7 +1163,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
- if (fdb && fdb->added_by_external_learn)
+ if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
fdb_delete(br, fdb, swdev_notify);
else
err = -ENOENT;
@@ -1186,8 +1181,8 @@ void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
- if (fdb)
- fdb->offloaded = offloaded;
+ if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+ change_bit(BR_FDB_OFFLOADED, &fdb->flags);
spin_unlock_bh(&br->hash_lock);
}
@@ -1206,7 +1201,7 @@ void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
spin_lock_bh(&p->br->hash_lock);
hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
if (f->dst == p && f->key.vlan_id == vid)
- f->offloaded = 0;
+ clear_bit(BR_FDB_OFFLOADED, &f->flags);
}
spin_unlock_bh(&p->br->hash_lock);
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 09b1dd8cd853..8944ceb47fe9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -88,7 +88,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
- br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
local_rcv = !!(br->dev->flags & IFF_PROMISC);
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
@@ -151,7 +151,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (dst) {
unsigned long now = jiffies;
- if (dst->is_local)
+ if (test_bit(BR_FDB_LOCAL, &dst->flags))
return br_pass_frame_up(skb);
if (now != dst->used)
@@ -182,9 +182,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
/* check if vlan is allowed, to avoid spoofing */
if ((p->flags & BR_LEARNING) &&
+ nbp_state_should_learn(p) &&
!br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
br_should_learn(p, skb, &vid))
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
}
/* note: already called with rcu_read_lock */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index ce2ab14ee605..36b0367ca1e0 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -172,6 +172,16 @@ struct net_bridge_vlan_group {
u16 pvid;
};
+/* bridge fdb flags */
+enum {
+ BR_FDB_LOCAL,
+ BR_FDB_STATIC,
+ BR_FDB_STICKY,
+ BR_FDB_ADDED_BY_USER,
+ BR_FDB_ADDED_BY_EXT_LEARN,
+ BR_FDB_OFFLOADED,
+};
+
struct net_bridge_fdb_key {
mac_addr addr;
u16 vlan_id;
@@ -183,12 +193,7 @@ struct net_bridge_fdb_entry {
struct net_bridge_fdb_key key;
struct hlist_node fdb_node;
- unsigned char is_local:1,
- is_static:1,
- is_sticky:1,
- added_by_user:1,
- added_by_external_learn:1,
- offloaded:1;
+ unsigned long flags;
/* write-heavy members should not affect lookups */
unsigned long updated ____cacheline_aligned_in_smp;
@@ -495,6 +500,11 @@ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
return true;
}
+static inline bool nbp_state_should_learn(const struct net_bridge_port *p)
+{
+ return p->state == BR_STATE_LEARNING || p->state == BR_STATE_FORWARDING;
+}
+
static inline int br_opt_get(const struct net_bridge *br,
enum net_bridge_opts opt)
{
@@ -566,7 +576,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user);
+ const unsigned char *addr, u16 vid, unsigned long flags);
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 921310d3cbae..015209bf44aa 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -129,15 +129,19 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
fdb->key.vlan_id,
fdb->dst->dev,
- fdb->added_by_user,
- fdb->offloaded);
+ test_bit(BR_FDB_ADDED_BY_USER,
+ &fdb->flags),
+ test_bit(BR_FDB_OFFLOADED,
+ &fdb->flags));
break;
case RTM_NEWNEIGH:
br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
fdb->key.vlan_id,
fdb->dst->dev,
- fdb->added_by_user,
- fdb->offloaded);
+ test_bit(BR_FDB_ADDED_BY_USER,
+ &fdb->flags),
+ test_bit(BR_FDB_OFFLOADED,
+ &fdb->flags));
break;
}
}
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index eb83051c8330..b7532a79ca7a 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -13,11 +13,11 @@ menuconfig CAIF
with its modems. It is accessed from user space as sockets (PF_CAIF).
Say Y (or M) here if you build for a phone product (e.g. Android or
- MeeGo ) that uses CAIF as transport, if unsure say N.
+ MeeGo) that uses CAIF as transport. If unsure say N.
If you select to build it as module then CAIF_NETDEV also needs to be
- built as modules. You will also need to say yes to any CAIF physical
- devices that your platform requires.
+ built as a module. You will also need to say Y (or M) to any CAIF
+ physical devices that your platform requires.
See Documentation/networking/caif for a further explanation on how to
use and configure CAIF.
@@ -37,7 +37,7 @@ config CAIF_NETDEV
default CAIF
---help---
Say Y if you will be using a CAIF based GPRS network device.
- This can be either built-in or a loadable module,
+ This can be either built-in or a loadable module.
If you select to build it as a built-in then the main CAIF device must
also be a built-in.
If unsure say Y.
@@ -48,7 +48,7 @@ config CAIF_USB
default n
---help---
Say Y if you are using CAIF over USB CDC NCM.
- This can be either built-in or a loadable module,
+ This can be either built-in or a loadable module.
If you select to build it as a built-in then the main CAIF device must
also be a built-in.
If unsure say N.
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 5518a7d9eed9..128d37a4c2e0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -86,11 +86,12 @@ static atomic_t skbcounter = ATOMIC_INIT(0);
/* af_can socket functions */
-static void can_sock_destruct(struct sock *sk)
+void can_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
}
+EXPORT_SYMBOL(can_sock_destruct);
static const struct can_proto *can_get_proto(int protocol)
{
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index def2f813ffce..137054bff9ec 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -51,6 +51,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
if (!skb)
return;
+ j1939_priv_get(priv);
can_skb_set_owner(skb, iskb->sk);
/* get a pointer to the header of the skb
@@ -104,6 +105,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
j1939_simple_recv(priv, skb);
j1939_sk_recv(priv, skb);
done:
+ j1939_priv_put(priv);
kfree_skb(skb);
}
@@ -150,6 +152,10 @@ static void __j1939_priv_release(struct kref *kref)
netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
+ WARN_ON_ONCE(!list_empty(&priv->active_session_list));
+ WARN_ON_ONCE(!list_empty(&priv->ecus));
+ WARN_ON_ONCE(!list_empty(&priv->j1939_socks));
+
dev_put(ndev);
kfree(priv);
}
@@ -207,6 +213,9 @@ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
{
struct can_ml_priv *can_ml_priv = ndev->ml_priv;
+ if (!can_ml_priv)
+ return NULL;
+
return can_ml_priv->j1939_priv;
}
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 4d8ba701e15d..de09b0a65791 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -78,7 +78,6 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
{
jsk->state |= J1939_SOCK_BOUND;
j1939_priv_get(priv);
- jsk->priv = priv;
spin_lock_bh(&priv->j1939_socks_lock);
list_add_tail(&jsk->list, &priv->j1939_socks);
@@ -91,7 +90,6 @@ static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
list_del_init(&jsk->list);
spin_unlock_bh(&priv->j1939_socks_lock);
- jsk->priv = NULL;
j1939_priv_put(priv);
jsk->state &= ~J1939_SOCK_BOUND;
}
@@ -349,6 +347,34 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
spin_unlock_bh(&priv->j1939_socks_lock);
}
+static void j1939_sk_sock_destruct(struct sock *sk)
+{
+ struct j1939_sock *jsk = j1939_sk(sk);
+
+ /* This function will be call by the generic networking code, when then
+ * the socket is ultimately closed (sk->sk_destruct).
+ *
+ * The race between
+ * - processing a received CAN frame
+ * (can_receive -> j1939_can_recv)
+ * and accessing j1939_priv
+ * ... and ...
+ * - closing a socket
+ * (j1939_can_rx_unregister -> can_rx_unregister)
+ * and calling the final j1939_priv_put()
+ *
+ * is avoided by calling the final j1939_priv_put() from this
+ * RCU deferred cleanup call.
+ */
+ if (jsk->priv) {
+ j1939_priv_put(jsk->priv);
+ jsk->priv = NULL;
+ }
+
+ /* call generic CAN sock destruct */
+ can_sock_destruct(sk);
+}
+
static int j1939_sk_init(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
@@ -371,6 +397,7 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+ sk->sk_destruct = j1939_sk_sock_destruct;
return 0;
}
@@ -443,6 +470,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
}
jsk->ifindex = addr->can_ifindex;
+
+ /* the corresponding j1939_priv_put() is called via
+ * sk->sk_destruct, which points to j1939_sk_sock_destruct()
+ */
+ j1939_priv_get(priv);
+ jsk->priv = priv;
}
/* set default transmit pgn */
@@ -560,8 +593,8 @@ static int j1939_sk_release(struct socket *sock)
if (!sk)
return 0;
- jsk = j1939_sk(sk);
lock_sock(sk);
+ jsk = j1939_sk(sk);
if (jsk->state & J1939_SOCK_BOUND) {
struct j1939_priv *priv = jsk->priv;
@@ -1059,51 +1092,72 @@ static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
- struct j1939_priv *priv = jsk->priv;
+ struct j1939_priv *priv;
int ifindex;
int ret;
+ lock_sock(sock->sk);
/* various socket state tests */
- if (!(jsk->state & J1939_SOCK_BOUND))
- return -EBADFD;
+ if (!(jsk->state & J1939_SOCK_BOUND)) {
+ ret = -EBADFD;
+ goto sendmsg_done;
+ }
+ priv = jsk->priv;
ifindex = jsk->ifindex;
- if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR)
+ if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
/* no source address assigned yet */
- return -EBADFD;
+ ret = -EBADFD;
+ goto sendmsg_done;
+ }
/* deal with provided destination address info */
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
- if (msg->msg_namelen < J1939_MIN_NAMELEN)
- return -EINVAL;
+ if (msg->msg_namelen < J1939_MIN_NAMELEN) {
+ ret = -EINVAL;
+ goto sendmsg_done;
+ }
- if (addr->can_family != AF_CAN)
- return -EINVAL;
+ if (addr->can_family != AF_CAN) {
+ ret = -EINVAL;
+ goto sendmsg_done;
+ }
- if (addr->can_ifindex && addr->can_ifindex != ifindex)
- return -EBADFD;
+ if (addr->can_ifindex && addr->can_ifindex != ifindex) {
+ ret = -EBADFD;
+ goto sendmsg_done;
+ }
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
- !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
- return -EINVAL;
+ !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
+ ret = -EINVAL;
+ goto sendmsg_done;
+ }
if (!addr->can_addr.j1939.name &&
addr->can_addr.j1939.addr == J1939_NO_ADDR &&
- !sock_flag(sk, SOCK_BROADCAST))
+ !sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
- return -EACCES;
+ ret = -EACCES;
+ goto sendmsg_done;
+ }
} else {
if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
- !sock_flag(sk, SOCK_BROADCAST))
+ !sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
- return -EACCES;
+ ret = -EACCES;
+ goto sendmsg_done;
+ }
}
ret = j1939_sk_send_loop(priv, sk, msg, size);
+sendmsg_done:
+ release_sock(sock->sk);
+
return ret;
}
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index e5f1a56994c6..9f99af5b0b11 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -255,6 +255,7 @@ static void __j1939_session_drop(struct j1939_session *session)
return;
j1939_sock_pending_del(session->sk);
+ sock_put(session->sk);
}
static void j1939_session_destroy(struct j1939_session *session)
@@ -266,6 +267,9 @@ static void j1939_session_destroy(struct j1939_session *session)
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
+ WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
+ WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
+
skb_queue_purge(&session->skb_queue);
__j1939_session_drop(session);
j1939_priv_put(session->priv);
@@ -1042,12 +1046,13 @@ j1939_session_deactivate_activate_next(struct j1939_session *session)
j1939_sk_queue_activate_next(session);
}
-static void j1939_session_cancel(struct j1939_session *session,
+static void __j1939_session_cancel(struct j1939_session *session,
enum j1939_xtp_abort err)
{
struct j1939_priv *priv = session->priv;
WARN_ON_ONCE(!err);
+ lockdep_assert_held(&session->priv->active_session_list_lock);
session->err = j1939_xtp_abort_to_errno(priv, err);
/* do not send aborts on incoming broadcasts */
@@ -1062,6 +1067,20 @@ static void j1939_session_cancel(struct j1939_session *session,
j1939_sk_send_loop_abort(session->sk, session->err);
}
+static void j1939_session_cancel(struct j1939_session *session,
+ enum j1939_xtp_abort err)
+{
+ j1939_session_list_lock(session->priv);
+
+ if (session->state >= J1939_SESSION_ACTIVE &&
+ session->state < J1939_SESSION_WAITING_ABORT) {
+ j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
+ __j1939_session_cancel(session, err);
+ }
+
+ j1939_session_list_unlock(session->priv);
+}
+
static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
{
struct j1939_session *session =
@@ -1108,8 +1127,6 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n",
__func__, session, ret);
if (session->skcb.addr.type != J1939_SIMPLE) {
- j1939_tp_set_rxtimeout(session,
- J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_OTHER);
} else {
session->err = ret;
@@ -1169,7 +1186,7 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
HRTIMER_MODE_REL_SOFT);
- j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
+ __j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
}
j1939_session_list_unlock(session->priv);
}
@@ -1375,7 +1392,6 @@ j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
out_session_cancel:
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, err);
}
@@ -1572,7 +1588,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
/* RTS on active session */
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
}
@@ -1583,7 +1598,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
session->last_cmd);
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
return -EBUSY;
@@ -1785,7 +1799,6 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel:
j1939_session_timers_cancel(session);
- j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_put(session);
}
@@ -1866,6 +1879,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
return ERR_PTR(-ENOMEM);
/* skb is recounted in j1939_session_new() */
+ sock_hold(skb->sk);
session->sk = skb->sk;
session->transmission = true;
session->pkt.total = (size + 6) / 7;
@@ -2028,7 +2042,11 @@ int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
&priv->active_session_list,
active_session_list_entry) {
if (!sk || sk == session->sk) {
- j1939_session_timers_cancel(session);
+ if (hrtimer_try_to_cancel(&session->txtimer) == 1)
+ j1939_session_put(session);
+ if (hrtimer_try_to_cancel(&session->rxtimer) == 1)
+ j1939_session_put(session);
+
session->err = ESHUTDOWN;
j1939_session_deactivate_locked(session);
}
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index da5639a5bd3b..458be6b3eda9 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -798,7 +798,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
* Try to grab map refcnt to make sure that it's still
* alive and prevent concurrent removal.
*/
- map = bpf_map_inc_not_zero(&smap->map, false);
+ map = bpf_map_inc_not_zero(&smap->map);
if (IS_ERR(map))
continue;
diff --git a/net/core/dev.c b/net/core/dev.c
index 99ac84ff398f..46580b290450 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -229,6 +229,122 @@ static inline void rps_unlock(struct softnet_data *sd)
#endif
}
+static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
+ const char *name)
+{
+ struct netdev_name_node *name_node;
+
+ name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
+ if (!name_node)
+ return NULL;
+ INIT_HLIST_NODE(&name_node->hlist);
+ name_node->dev = dev;
+ name_node->name = name;
+ return name_node;
+}
+
+static struct netdev_name_node *
+netdev_name_node_head_alloc(struct net_device *dev)
+{
+ struct netdev_name_node *name_node;
+
+ name_node = netdev_name_node_alloc(dev, dev->name);
+ if (!name_node)
+ return NULL;
+ INIT_LIST_HEAD(&name_node->list);
+ return name_node;
+}
+
+static void netdev_name_node_free(struct netdev_name_node *name_node)
+{
+ kfree(name_node);
+}
+
+static void netdev_name_node_add(struct net *net,
+ struct netdev_name_node *name_node)
+{
+ hlist_add_head_rcu(&name_node->hlist,
+ dev_name_hash(net, name_node->name));
+}
+
+static void netdev_name_node_del(struct netdev_name_node *name_node)
+{
+ hlist_del_rcu(&name_node->hlist);
+}
+
+static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
+ const char *name)
+{
+ struct hlist_head *head = dev_name_hash(net, name);
+ struct netdev_name_node *name_node;
+
+ hlist_for_each_entry(name_node, head, hlist)
+ if (!strcmp(name_node->name, name))
+ return name_node;
+ return NULL;
+}
+
+static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
+ const char *name)
+{
+ struct hlist_head *head = dev_name_hash(net, name);
+ struct netdev_name_node *name_node;
+
+ hlist_for_each_entry_rcu(name_node, head, hlist)
+ if (!strcmp(name_node->name, name))
+ return name_node;
+ return NULL;
+}
+
+int netdev_name_node_alt_create(struct net_device *dev, const char *name)
+{
+ struct netdev_name_node *name_node;
+ struct net *net = dev_net(dev);
+
+ name_node = netdev_name_node_lookup(net, name);
+ if (name_node)
+ return -EEXIST;
+ name_node = netdev_name_node_alloc(dev, name);
+ if (!name_node)
+ return -ENOMEM;
+ netdev_name_node_add(net, name_node);
+ /* The node that holds dev->name acts as a head of per-device list. */
+ list_add_tail(&name_node->list, &dev->name_node->list);
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_name_node_alt_create);
+
+static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+{
+ list_del(&name_node->list);
+ netdev_name_node_del(name_node);
+ kfree(name_node->name);
+ netdev_name_node_free(name_node);
+}
+
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
+{
+ struct netdev_name_node *name_node;
+ struct net *net = dev_net(dev);
+
+ name_node = netdev_name_node_lookup(net, name);
+ if (!name_node)
+ return -ENOENT;
+ __netdev_name_node_alt_destroy(name_node);
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_name_node_alt_destroy);
+
+static void netdev_name_node_alt_flush(struct net_device *dev)
+{
+ struct netdev_name_node *name_node, *tmp;
+
+ list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
+ __netdev_name_node_alt_destroy(name_node);
+}
+
/* Device list insertion */
static void list_netdevice(struct net_device *dev)
{
@@ -238,7 +354,7 @@ static void list_netdevice(struct net_device *dev)
write_lock_bh(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
- hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
+ netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock_bh(&dev_base_lock);
@@ -256,7 +372,7 @@ static void unlist_netdevice(struct net_device *dev)
/* Unlink dev from the device chain */
write_lock_bh(&dev_base_lock);
list_del_rcu(&dev->dev_list);
- hlist_del_rcu(&dev->name_hlist);
+ netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
write_unlock_bh(&dev_base_lock);
@@ -652,14 +768,10 @@ EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
struct net_device *__dev_get_by_name(struct net *net, const char *name)
{
- struct net_device *dev;
- struct hlist_head *head = dev_name_hash(net, name);
+ struct netdev_name_node *node_name;
- hlist_for_each_entry(dev, head, name_hlist)
- if (!strncmp(dev->name, name, IFNAMSIZ))
- return dev;
-
- return NULL;
+ node_name = netdev_name_node_lookup(net, name);
+ return node_name ? node_name->dev : NULL;
}
EXPORT_SYMBOL(__dev_get_by_name);
@@ -677,14 +789,10 @@ EXPORT_SYMBOL(__dev_get_by_name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
- struct net_device *dev;
- struct hlist_head *head = dev_name_hash(net, name);
-
- hlist_for_each_entry_rcu(dev, head, name_hlist)
- if (!strncmp(dev->name, name, IFNAMSIZ))
- return dev;
+ struct netdev_name_node *node_name;
- return NULL;
+ node_name = netdev_name_node_lookup_rcu(net, name);
+ return node_name ? node_name->dev : NULL;
}
EXPORT_SYMBOL(dev_get_by_name_rcu);
@@ -1060,8 +1168,8 @@ int dev_alloc_name(struct net_device *dev, const char *name)
}
EXPORT_SYMBOL(dev_alloc_name);
-int dev_get_valid_name(struct net *net, struct net_device *dev,
- const char *name)
+static int dev_get_valid_name(struct net *net, struct net_device *dev,
+ const char *name)
{
BUG_ON(!net);
@@ -1077,7 +1185,6 @@ int dev_get_valid_name(struct net *net, struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL(dev_get_valid_name);
/**
* dev_change_name - change name of a device
@@ -1151,13 +1258,13 @@ rollback:
netdev_adjacent_rename_links(dev, oldname);
write_lock_bh(&dev_base_lock);
- hlist_del_rcu(&dev->name_hlist);
+ netdev_name_node_del(dev->name_node);
write_unlock_bh(&dev_base_lock);
synchronize_rcu();
write_lock_bh(&dev_base_lock);
- hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
+ netdev_name_node_add(net, dev->name_node);
write_unlock_bh(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
@@ -1207,8 +1314,8 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
}
mutex_lock(&ifalias_mutex);
- rcu_swap_protected(dev->ifalias, new_alias,
- mutex_is_locked(&ifalias_mutex));
+ new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
+ mutex_is_locked(&ifalias_mutex));
mutex_unlock(&ifalias_mutex);
if (new_alias)
@@ -1536,6 +1643,62 @@ static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
return nb->notifier_call(nb, val, &info);
}
+static int call_netdevice_register_notifiers(struct notifier_block *nb,
+ struct net_device *dev)
+{
+ int err;
+
+ err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
+ err = notifier_to_errno(err);
+ if (err)
+ return err;
+
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
+ call_netdevice_notifier(nb, NETDEV_UP, dev);
+ return 0;
+}
+
+static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
+ struct net_device *dev)
+{
+ if (dev->flags & IFF_UP) {
+ call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
+ dev);
+ call_netdevice_notifier(nb, NETDEV_DOWN, dev);
+ }
+ call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
+}
+
+static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
+ struct net *net)
+{
+ struct net_device *dev;
+ int err;
+
+ for_each_netdev(net, dev) {
+ err = call_netdevice_register_notifiers(nb, dev);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ for_each_netdev_continue_reverse(net, dev)
+ call_netdevice_unregister_notifiers(nb, dev);
+ return err;
+}
+
+static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
+ struct net *net)
+{
+ struct net_device *dev;
+
+ for_each_netdev(net, dev)
+ call_netdevice_unregister_notifiers(nb, dev);
+}
+
static int dev_boot_phase = 1;
/**
@@ -1554,8 +1717,6 @@ static int dev_boot_phase = 1;
int register_netdevice_notifier(struct notifier_block *nb)
{
- struct net_device *dev;
- struct net_device *last;
struct net *net;
int err;
@@ -1568,17 +1729,9 @@ int register_netdevice_notifier(struct notifier_block *nb)
if (dev_boot_phase)
goto unlock;
for_each_net(net) {
- for_each_netdev(net, dev) {
- err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
- err = notifier_to_errno(err);
- if (err)
- goto rollback;
-
- if (!(dev->flags & IFF_UP))
- continue;
-
- call_netdevice_notifier(nb, NETDEV_UP, dev);
- }
+ err = call_netdevice_register_net_notifiers(nb, net);
+ if (err)
+ goto rollback;
}
unlock:
@@ -1587,22 +1740,9 @@ unlock:
return err;
rollback:
- last = dev;
- for_each_net(net) {
- for_each_netdev(net, dev) {
- if (dev == last)
- goto outroll;
+ for_each_net_continue_reverse(net)
+ call_netdevice_unregister_net_notifiers(nb, net);
- if (dev->flags & IFF_UP) {
- call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
- dev);
- call_netdevice_notifier(nb, NETDEV_DOWN, dev);
- }
- call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
- }
- }
-
-outroll:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
@@ -1653,6 +1793,80 @@ unlock:
EXPORT_SYMBOL(unregister_netdevice_notifier);
/**
+ * register_netdevice_notifier_net - register a per-netns network notifier block
+ * @net: network namespace
+ * @nb: notifier
+ *
+ * Register a notifier to be called when network device events occur.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
+ *
+ * When registered all registration and up events are replayed
+ * to the new notifier to allow device to have a race free
+ * view of the network device list.
+ */
+
+int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_register(&net->netdev_chain, nb);
+ if (err)
+ goto unlock;
+ if (dev_boot_phase)
+ goto unlock;
+
+ err = call_netdevice_register_net_notifiers(nb, net);
+ if (err)
+ goto chain_unregister;
+
+unlock:
+ rtnl_unlock();
+ return err;
+
+chain_unregister:
+ raw_notifier_chain_unregister(&netdev_chain, nb);
+ goto unlock;
+}
+EXPORT_SYMBOL(register_netdevice_notifier_net);
+
+/**
+ * unregister_netdevice_notifier_net - unregister a per-netns
+ * network notifier block
+ * @net: network namespace
+ * @nb: notifier
+ *
+ * Unregister a notifier previously registered by
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
+ *
+ * After unregistering unregister and down device events are synthesized
+ * for all devices on the device list to the removed notifier to remove
+ * the need for special case cleanup code.
+ */
+
+int unregister_netdevice_notifier_net(struct net *net,
+ struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
+ if (err)
+ goto unlock;
+
+ call_netdevice_unregister_net_notifiers(nb, net);
+
+unlock:
+ rtnl_unlock();
+ return err;
+}
+EXPORT_SYMBOL(unregister_netdevice_notifier_net);
+
+/**
* call_netdevice_notifiers_info - call all network notifier blocks
* @val: value passed unmodified to notifier function
* @info: notifier information data
@@ -1664,7 +1878,18 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
static int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info)
{
+ struct net *net = dev_net(info->dev);
+ int ret;
+
ASSERT_RTNL();
+
+ /* Run per-netns notifier block chain first, then run the global one.
+ * Hopefully, one day, the global one is going to be removed after
+ * all notifier block registrators get converted to be per-netns.
+ */
+ ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
return raw_notifier_call_chain(&netdev_chain, val, info);
}
@@ -2690,7 +2915,7 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
void netif_schedule_queue(struct netdev_queue *txq)
{
rcu_read_lock();
- if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
+ if (!netif_xmit_stopped(txq)) {
struct Qdisc *q = rcu_dereference(txq->qdisc);
__netif_schedule(q);
@@ -2858,12 +3083,9 @@ int skb_checksum_help(struct sk_buff *skb)
offset += skb->csum_offset;
BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, offset + sizeof(__sum16))) {
- ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (ret)
- goto out;
- }
+ ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
+ if (ret)
+ goto out;
*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
out_set_summed:
@@ -2898,12 +3120,11 @@ int skb_crc32c_csum_help(struct sk_buff *skb)
ret = -EINVAL;
goto out;
}
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, offset + sizeof(__le32))) {
- ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (ret)
- goto out;
- }
+
+ ret = skb_ensure_writable(skb, offset + sizeof(__le32));
+ if (ret)
+ goto out;
+
crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
skb->len - start, ~(__u32)0,
crc32c_csum_stub));
@@ -3386,7 +3607,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
- if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+ if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
qdisc_run_begin(q)) {
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
&q->state))) {
@@ -5365,7 +5586,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
if (skb_vlan_tag_present(p))
- diffs |= p->vlan_tci ^ skb->vlan_tci;
+ diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
diffs |= skb_metadata_dst_cmp(p, skb);
diffs |= skb_metadata_differs(p, skb);
if (maclen == ETH_HLEN)
@@ -5390,8 +5611,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
- if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
- pinfo->nr_frags &&
+ if (!skb_headlen(skb) && pinfo->nr_frags &&
!PageHighMem(skb_frag_page(frag0))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
@@ -5582,6 +5802,26 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
}
EXPORT_SYMBOL(gro_find_complete_by_type);
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static void gro_normal_list(struct napi_struct *napi)
+{
+ if (!napi->rx_count)
+ return;
+ netif_receive_skb_list_internal(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+{
+ list_add_tail(&skb->list, &napi->rx_list);
+ if (++napi->rx_count >= gro_normal_batch)
+ gro_normal_list(napi);
+}
+
static void napi_skb_free_stolen_head(struct sk_buff *skb)
{
skb_dst_drop(skb);
@@ -5589,12 +5829,13 @@ static void napi_skb_free_stolen_head(struct sk_buff *skb)
kmem_cache_free(skbuff_head_cache, skb);
}
-static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
+static gro_result_t napi_skb_finish(struct napi_struct *napi,
+ struct sk_buff *skb,
+ gro_result_t ret)
{
switch (ret) {
case GRO_NORMAL:
- if (netif_receive_skb_internal(skb))
- ret = GRO_DROP;
+ gro_normal_one(napi, skb);
break;
case GRO_DROP:
@@ -5626,7 +5867,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb_gro_reset_offset(skb);
- ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
trace_napi_gro_receive_exit(ret);
return ret;
@@ -5672,26 +5913,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_get_frags);
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
- if (!napi->rx_count)
- return;
- netif_receive_skb_list_internal(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
-{
- list_add_tail(&skb->list, &napi->rx_list);
- if (++napi->rx_count >= gro_normal_batch)
- gro_normal_list(napi);
-}
-
static gro_result_t napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret)
@@ -8532,6 +8753,9 @@ static void rollback_registered_many(struct list_head *head)
dev_uc_flush(dev);
dev_mc_flush(dev);
+ netdev_name_node_alt_flush(dev);
+ netdev_name_node_free(dev->name_node);
+
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
@@ -9011,6 +9235,11 @@ int register_netdevice(struct net_device *dev)
if (ret < 0)
goto out;
+ ret = -ENOMEM;
+ dev->name_node = netdev_name_node_head_alloc(dev);
+ if (!dev->name_node)
+ goto out;
+
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
@@ -9132,6 +9361,8 @@ out:
return ret;
err_uninit:
+ if (dev->name_node)
+ netdev_name_node_free(dev->name_node);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
if (dev->priv_destructor)
@@ -9946,6 +10177,8 @@ static int __net_init netdev_init(struct net *net)
if (net->dev_index_head == NULL)
goto err_idx;
+ RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
+
return 0;
err_idx:
diff --git a/net/core/devlink.c b/net/core/devlink.c
index f80151eeaf51..4c63c9a4c09e 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -95,16 +95,25 @@ static LIST_HEAD(devlink_list);
*/
static DEFINE_MUTEX(devlink_mutex);
-static struct net *devlink_net(const struct devlink *devlink)
+struct net *devlink_net(const struct devlink *devlink)
{
return read_pnet(&devlink->_net);
}
+EXPORT_SYMBOL_GPL(devlink_net);
-static void devlink_net_set(struct devlink *devlink, struct net *net)
+static void __devlink_net_set(struct devlink *devlink, struct net *net)
{
write_pnet(&devlink->_net, net);
}
+void devlink_net_set(struct devlink *devlink, struct net *net)
+{
+ if (WARN_ON(devlink->registered))
+ return;
+ __devlink_net_set(devlink, net);
+}
+EXPORT_SYMBOL_GPL(devlink_net_set);
+
static struct devlink *devlink_get_from_attrs(struct net *net,
struct nlattr **attrs)
{
@@ -434,8 +443,16 @@ static void devlink_nl_post_doit(const struct genl_ops *ops,
{
struct devlink *devlink;
- devlink = devlink_get_from_info(info);
- if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+ /* When devlink changes netns, it would not be found
+ * by devlink_get_from_info(). So try if it is stored first.
+ */
+ if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
+ devlink = info->user_ptr[0];
+ } else {
+ devlink = devlink_get_from_info(info);
+ WARN_ON(IS_ERR(devlink));
+ }
+ if (!IS_ERR(devlink) && ~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
}
@@ -1035,7 +1052,7 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
- int err;
+ int err = 0;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
@@ -1058,6 +1075,9 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
out:
mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
cb->args[0] = idx;
return msg->len;
}
@@ -1233,7 +1253,7 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
- int err;
+ int err = 0;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
@@ -1256,6 +1276,9 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
out:
mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
cb->args[0] = idx;
return msg->len;
}
@@ -1460,7 +1483,7 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
- int err;
+ int err = 0;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
@@ -1485,6 +1508,9 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
out:
mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
cb->args[0] = idx;
return msg->len;
}
@@ -2674,6 +2700,72 @@ devlink_resources_validate(struct devlink *devlink,
return err;
}
+static struct net *devlink_netns_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
+ struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
+ struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
+ struct net *net;
+
+ if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
+ NL_SET_ERR_MSG(info->extack, "multiple netns identifying attributes specified");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (netns_pid_attr) {
+ net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
+ } else if (netns_fd_attr) {
+ net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
+ } else if (netns_id_attr) {
+ net = get_net_ns_by_id(sock_net(skb->sk),
+ nla_get_u32(netns_id_attr));
+ if (!net)
+ net = ERR_PTR(-EINVAL);
+ } else {
+ WARN_ON(1);
+ net = ERR_PTR(-EINVAL);
+ }
+ if (IS_ERR(net)) {
+ NL_SET_ERR_MSG(info->extack, "Unknown network namespace");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ put_net(net);
+ return ERR_PTR(-EPERM);
+ }
+ return net;
+}
+
+static void devlink_param_notify(struct devlink *devlink,
+ unsigned int port_index,
+ struct devlink_param_item *param_item,
+ enum devlink_command cmd);
+
+static void devlink_reload_netns_change(struct devlink *devlink,
+ struct net *dest_net)
+{
+ struct devlink_param_item *param_item;
+
+ /* Userspace needs to be notified about devlink objects
+ * removed from original and entering new network namespace.
+ * The rest of the devlink objects are re-created during
+ * reload process so the notifications are generated separatelly.
+ */
+
+ list_for_each_entry(param_item, &devlink->param_list, list)
+ devlink_param_notify(devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_DEL);
+ devlink_notify(devlink, DEVLINK_CMD_DEL);
+
+ __devlink_net_set(devlink, dest_net);
+
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+ list_for_each_entry(param_item, &devlink->param_list, list)
+ devlink_param_notify(devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_NEW);
+}
+
static bool devlink_reload_supported(struct devlink *devlink)
{
return devlink->ops->reload_down && devlink->ops->reload_up;
@@ -2694,12 +2786,33 @@ bool devlink_is_reload_failed(const struct devlink *devlink)
}
EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
+static int devlink_reload(struct devlink *devlink, struct net *dest_net,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!devlink->reload_enabled)
+ return -EOPNOTSUPP;
+
+ err = devlink->ops->reload_down(devlink, !!dest_net, extack);
+ if (err)
+ return err;
+
+ if (dest_net && !net_eq(dest_net, devlink_net(devlink)))
+ devlink_reload_netns_change(devlink, dest_net);
+
+ err = devlink->ops->reload_up(devlink, extack);
+ devlink_reload_failed_set(devlink, !!err);
+ return err;
+}
+
static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
+ struct net *dest_net = NULL;
int err;
- if (!devlink_reload_supported(devlink))
+ if (!devlink_reload_supported(devlink) || !devlink->reload_enabled)
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
@@ -2707,11 +2820,20 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
return err;
}
- err = devlink->ops->reload_down(devlink, info->extack);
- if (err)
- return err;
- err = devlink->ops->reload_up(devlink, info->extack);
- devlink_reload_failed_set(devlink, !!err);
+
+ if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+ info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+ info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+ dest_net = devlink_netns_get(skb, info);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+ }
+
+ err = devlink_reload(devlink, dest_net, info->extack);
+
+ if (dest_net)
+ put_net(dest_net);
+
return err;
}
@@ -2884,6 +3006,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
.type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -3155,7 +3282,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
struct devlink *devlink;
int start = cb->args[0];
int idx = 0;
- int err;
+ int err = 0;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
@@ -3183,6 +3310,9 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
out:
mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
cb->args[0] = idx;
return msg->len;
}
@@ -3411,7 +3541,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
struct devlink *devlink;
int start = cb->args[0];
int idx = 0;
- int err;
+ int err = 0;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
@@ -3444,6 +3574,9 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
out:
mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
cb->args[0] = idx;
return msg->len;
}
@@ -3818,29 +3951,19 @@ static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
u64 ret_offset, start_offset, end_offset = 0;
+ struct nlattr **attrs = info->attrs;
struct devlink_region *region;
struct nlattr *chunks_attr;
const char *region_name;
struct devlink *devlink;
- struct nlattr **attrs;
bool dump = true;
void *hdr;
int err;
start_offset = *((u64 *)&cb->args[0]);
- attrs = kmalloc_array(DEVLINK_ATTR_MAX + 1, sizeof(*attrs), GFP_KERNEL);
- if (!attrs)
- return -ENOMEM;
-
- err = nlmsg_parse_deprecated(cb->nlh,
- GENL_HDRLEN + devlink_nl_family.hdrsize,
- attrs, DEVLINK_ATTR_MAX,
- devlink_nl_family.policy, cb->extack);
- if (err)
- goto out_free;
-
mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
if (IS_ERR(devlink)) {
@@ -3917,7 +4040,6 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
genlmsg_end(skb, hdr);
mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
- kfree(attrs);
return skb->len;
@@ -3927,8 +4049,6 @@ out_unlock:
mutex_unlock(&devlink->lock);
out_dev:
mutex_unlock(&devlink_mutex);
-out_free:
- kfree(attrs);
return err;
}
@@ -4066,7 +4186,7 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
struct devlink *devlink;
int start = cb->args[0];
int idx = 0;
- int err;
+ int err = 0;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
@@ -4094,6 +4214,9 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
}
mutex_unlock(&devlink_mutex);
+ if (err != -EMSGSIZE)
+ return err;
+
cb->args[0] = idx;
return msg->len;
}
@@ -4296,12 +4419,11 @@ int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
}
EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len)
+static int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
{
return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
}
-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value)
@@ -4409,19 +4531,26 @@ int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u16 value_len)
+ const void *value, u32 value_len)
{
+ u32 data_size;
+ u32 offset;
int err;
- err = devlink_fmsg_pair_nest_start(fmsg, name);
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
if (err)
return err;
- err = devlink_fmsg_binary_put(fmsg, value, value_len);
- if (err)
- return err;
+ for (offset = 0; offset < value_len; offset += data_size) {
+ data_size = value_len - offset;
+ if (data_size > DEVLINK_FMSG_MAX_SIZE)
+ data_size = DEVLINK_FMSG_MAX_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+ if (err)
+ return err;
+ }
- err = devlink_fmsg_pair_nest_end(fmsg);
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -4618,6 +4747,7 @@ struct devlink_health_reporter {
bool auto_recover;
u8 health_state;
u64 dump_ts;
+ u64 dump_real_ts;
u64 error_count;
u64 recovery_count;
u64 last_recovery_ts;
@@ -4732,14 +4862,17 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
static int
devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx, struct netlink_ext_ack *extack)
{
int err;
+ if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
+ return 0;
+
if (!reporter->ops->recover)
return -EOPNOTSUPP;
- err = reporter->ops->recover(reporter, priv_ctx);
+ err = reporter->ops->recover(reporter, priv_ctx, extack);
if (err)
return err;
@@ -4760,7 +4893,8 @@ devlink_health_dump_clear(struct devlink_health_reporter *reporter)
}
static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -4781,7 +4915,7 @@ static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
goto dump_err;
err = reporter->ops->dump(reporter, reporter->dump_fmsg,
- priv_ctx);
+ priv_ctx, extack);
if (err)
goto dump_err;
@@ -4790,6 +4924,7 @@ static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
goto dump_err;
reporter->dump_ts = jiffies;
+ reporter->dump_real_ts = ktime_get_real_ns();
return 0;
@@ -4828,11 +4963,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
mutex_lock(&reporter->dump_lock);
/* store current dump of current error, for later analysis */
- devlink_health_do_dump(reporter, priv_ctx);
+ devlink_health_do_dump(reporter, priv_ctx, NULL);
mutex_unlock(&reporter->dump_lock);
if (reporter->auto_recover)
- return devlink_health_reporter_recover(reporter, priv_ctx);
+ return devlink_health_reporter_recover(reporter,
+ priv_ctx, NULL);
return 0;
}
@@ -4867,21 +5003,10 @@ devlink_health_reporter_get_from_info(struct devlink *devlink,
static struct devlink_health_reporter *
devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct devlink_health_reporter *reporter;
+ struct nlattr **attrs = info->attrs;
struct devlink *devlink;
- struct nlattr **attrs;
- int err;
-
- attrs = kmalloc_array(DEVLINK_ATTR_MAX + 1, sizeof(*attrs), GFP_KERNEL);
- if (!attrs)
- return NULL;
-
- err = nlmsg_parse_deprecated(cb->nlh,
- GENL_HDRLEN + devlink_nl_family.hdrsize,
- attrs, DEVLINK_ATTR_MAX,
- devlink_nl_family.policy, cb->extack);
- if (err)
- goto free;
mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
@@ -4890,12 +5015,9 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
mutex_unlock(&devlink_mutex);
- kfree(attrs);
return reporter;
unlock:
mutex_unlock(&devlink_mutex);
-free:
- kfree(attrs);
return NULL;
}
@@ -4952,6 +5074,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
jiffies_to_msecs(reporter->dump_ts),
DEVLINK_ATTR_PAD))
goto reporter_nest_cancel;
+ if (reporter->dump_fmsg &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
+ reporter->dump_real_ts, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
nla_nest_end(msg, reporter_attr);
genlmsg_end(msg, hdr);
@@ -5084,7 +5210,7 @@ static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
if (!reporter)
return -EINVAL;
- err = devlink_health_reporter_recover(reporter, NULL);
+ err = devlink_health_reporter_recover(reporter, NULL, info->extack);
devlink_health_reporter_put(reporter);
return err;
@@ -5117,7 +5243,7 @@ static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
if (err)
goto out;
- err = reporter->ops->diagnose(reporter, fmsg);
+ err = reporter->ops->diagnose(reporter, fmsg, info->extack);
if (err)
goto out;
@@ -5152,7 +5278,7 @@ devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
}
mutex_lock(&reporter->dump_lock);
if (!start) {
- err = devlink_health_do_dump(reporter, NULL);
+ err = devlink_health_do_dump(reporter, NULL, cb->extack);
if (err)
goto unlock;
cb->args[1] = reporter->dump_ts;
@@ -5793,6 +5919,9 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
[DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
};
static const struct genl_ops devlink_nl_ops[] = {
@@ -6023,7 +6152,8 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_REGION_READ,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = devlink_nl_cmd_region_read_dumpit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
@@ -6071,7 +6201,8 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -6155,7 +6286,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
if (!devlink)
return NULL;
devlink->ops = ops;
- devlink_net_set(devlink, &init_net);
+ __devlink_net_set(devlink, &init_net);
INIT_LIST_HEAD(&devlink->port_list);
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
@@ -6181,6 +6312,7 @@ int devlink_register(struct devlink *devlink, struct device *dev)
{
mutex_lock(&devlink_mutex);
devlink->dev = dev;
+ devlink->registered = true;
list_add_tail(&devlink->list, &devlink_list);
devlink_notify(devlink, DEVLINK_CMD_NEW);
mutex_unlock(&devlink_mutex);
@@ -6196,6 +6328,8 @@ EXPORT_SYMBOL_GPL(devlink_register);
void devlink_unregister(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
+ WARN_ON(devlink_reload_supported(devlink) &&
+ devlink->reload_enabled);
devlink_notify(devlink, DEVLINK_CMD_DEL);
list_del(&devlink->list);
mutex_unlock(&devlink_mutex);
@@ -6203,6 +6337,41 @@ void devlink_unregister(struct devlink *devlink)
EXPORT_SYMBOL_GPL(devlink_unregister);
/**
+ * devlink_reload_enable - Enable reload of devlink instance
+ *
+ * @devlink: devlink
+ *
+ * Should be called at end of device initialization
+ * process when reload operation is supported.
+ */
+void devlink_reload_enable(struct devlink *devlink)
+{
+ mutex_lock(&devlink_mutex);
+ devlink->reload_enabled = true;
+ mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_reload_enable);
+
+/**
+ * devlink_reload_disable - Disable reload of devlink instance
+ *
+ * @devlink: devlink
+ *
+ * Should be called at the beginning of device cleanup
+ * process when reload operation is supported.
+ */
+void devlink_reload_disable(struct devlink *devlink)
+{
+ mutex_lock(&devlink_mutex);
+ /* Mutex is taken which ensures that no reload operation is in
+ * progress while setting up forbidded flag.
+ */
+ devlink->reload_enabled = false;
+ mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_reload_disable);
+
+/**
* devlink_free - Free devlink instance resources
*
* @devlink: devlink
@@ -7490,6 +7659,21 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
DEVLINK_TRAP(TAIL_DROP, DROP),
+ DEVLINK_TRAP(NON_IP_PACKET, DROP),
+ DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
+ DEVLINK_TRAP(DIP_LB, DROP),
+ DEVLINK_TRAP(SIP_MC, DROP),
+ DEVLINK_TRAP(SIP_LB, DROP),
+ DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
+ DEVLINK_TRAP(IPV4_SIP_BC, DROP),
+ DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
+ DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
+ DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
+ DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
+ DEVLINK_TRAP(RPF, EXCEPTION),
+ DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
+ DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
+ DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
};
#define DEVLINK_TRAP_GROUP(_id) \
@@ -8060,9 +8244,43 @@ int devlink_compat_switch_id_get(struct net_device *dev,
return 0;
}
+static void __net_exit devlink_pernet_pre_exit(struct net *net)
+{
+ struct devlink *devlink;
+ int err;
+
+ /* In case network namespace is getting destroyed, reload
+ * all devlink instances from this namespace into init_net.
+ */
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (net_eq(devlink_net(devlink), net)) {
+ if (WARN_ON(!devlink_reload_supported(devlink)))
+ continue;
+ err = devlink_reload(devlink, &init_net, NULL);
+ if (err && err != -EOPNOTSUPP)
+ pr_warn("Failed to reload devlink instance into init_net\n");
+ }
+ }
+ mutex_unlock(&devlink_mutex);
+}
+
+static struct pernet_operations devlink_pernet_ops __net_initdata = {
+ .pre_exit = devlink_pernet_pre_exit,
+};
+
static int __init devlink_init(void)
{
- return genl_register_family(&devlink_nl_family);
+ int err;
+
+ err = genl_register_family(&devlink_nl_family);
+ if (err)
+ goto out;
+ err = register_pernet_subsys(&devlink_pernet_ops);
+
+out:
+ WARN_ON(err);
+ return err;
}
subsys_initcall(devlink_init);
diff --git a/net/core/fib_notifier.c b/net/core/fib_notifier.c
index 470a606d5e8d..fc96259807b6 100644
--- a/net/core/fib_notifier.c
+++ b/net/core/fib_notifier.c
@@ -12,17 +12,15 @@ static unsigned int fib_notifier_net_id;
struct fib_notifier_net {
struct list_head fib_notifier_ops;
+ struct atomic_notifier_head fib_chain;
};
-static ATOMIC_NOTIFIER_HEAD(fib_chain);
-
-int call_fib_notifier(struct notifier_block *nb, struct net *net,
+int call_fib_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info)
{
int err;
- info->net = net;
err = nb->notifier_call(nb, event_type, info);
return notifier_to_errno(err);
}
@@ -31,106 +29,100 @@ EXPORT_SYMBOL(call_fib_notifier);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info)
{
+ struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id);
int err;
- info->net = net;
- err = atomic_notifier_call_chain(&fib_chain, event_type, info);
+ err = atomic_notifier_call_chain(&fn_net->fib_chain, event_type, info);
return notifier_to_errno(err);
}
EXPORT_SYMBOL(call_fib_notifiers);
-static unsigned int fib_seq_sum(void)
+static unsigned int fib_seq_sum(struct net *net)
{
- struct fib_notifier_net *fn_net;
+ struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id);
struct fib_notifier_ops *ops;
unsigned int fib_seq = 0;
- struct net *net;
rtnl_lock();
- down_read(&net_rwsem);
- for_each_net(net) {
- fn_net = net_generic(net, fib_notifier_net_id);
- rcu_read_lock();
- list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) {
- if (!try_module_get(ops->owner))
- continue;
- fib_seq += ops->fib_seq_read(net);
- module_put(ops->owner);
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) {
+ if (!try_module_get(ops->owner))
+ continue;
+ fib_seq += ops->fib_seq_read(net);
+ module_put(ops->owner);
}
- up_read(&net_rwsem);
+ rcu_read_unlock();
rtnl_unlock();
return fib_seq;
}
-static int fib_net_dump(struct net *net, struct notifier_block *nb)
+static int fib_net_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id);
struct fib_notifier_ops *ops;
+ int err = 0;
+ rcu_read_lock();
list_for_each_entry_rcu(ops, &fn_net->fib_notifier_ops, list) {
- int err;
-
if (!try_module_get(ops->owner))
continue;
- err = ops->fib_dump(net, nb);
+ err = ops->fib_dump(net, nb, extack);
module_put(ops->owner);
if (err)
- return err;
+ goto unlock;
}
- return 0;
+unlock:
+ rcu_read_unlock();
+
+ return err;
}
-static bool fib_dump_is_consistent(struct notifier_block *nb,
+static bool fib_dump_is_consistent(struct net *net, struct notifier_block *nb,
void (*cb)(struct notifier_block *nb),
unsigned int fib_seq)
{
- atomic_notifier_chain_register(&fib_chain, nb);
- if (fib_seq == fib_seq_sum())
+ struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id);
+
+ atomic_notifier_chain_register(&fn_net->fib_chain, nb);
+ if (fib_seq == fib_seq_sum(net))
return true;
- atomic_notifier_chain_unregister(&fib_chain, nb);
+ atomic_notifier_chain_unregister(&fn_net->fib_chain, nb);
if (cb)
cb(nb);
return false;
}
#define FIB_DUMP_MAX_RETRIES 5
-int register_fib_notifier(struct notifier_block *nb,
- void (*cb)(struct notifier_block *nb))
+int register_fib_notifier(struct net *net, struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb),
+ struct netlink_ext_ack *extack)
{
int retries = 0;
int err;
do {
- unsigned int fib_seq = fib_seq_sum();
- struct net *net;
-
- rcu_read_lock();
- for_each_net_rcu(net) {
- err = fib_net_dump(net, nb);
- if (err)
- goto err_fib_net_dump;
- }
- rcu_read_unlock();
-
- if (fib_dump_is_consistent(nb, cb, fib_seq))
+ unsigned int fib_seq = fib_seq_sum(net);
+
+ err = fib_net_dump(net, nb, extack);
+ if (err)
+ return err;
+
+ if (fib_dump_is_consistent(net, nb, cb, fib_seq))
return 0;
} while (++retries < FIB_DUMP_MAX_RETRIES);
return -EBUSY;
-
-err_fib_net_dump:
- rcu_read_unlock();
- return err;
}
EXPORT_SYMBOL(register_fib_notifier);
-int unregister_fib_notifier(struct notifier_block *nb)
+int unregister_fib_notifier(struct net *net, struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&fib_chain, nb);
+ struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id);
+
+ return atomic_notifier_chain_unregister(&fn_net->fib_chain, nb);
}
EXPORT_SYMBOL(unregister_fib_notifier);
@@ -181,6 +173,7 @@ static int __net_init fib_notifier_net_init(struct net *net)
struct fib_notifier_net *fn_net = net_generic(net, fib_notifier_net_id);
INIT_LIST_HEAD(&fn_net->fib_notifier_ops);
+ ATOMIC_INIT_NOTIFIER_HEAD(&fn_net->fib_chain);
return 0;
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index dd220ce7ca7a..3e7e15278c46 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -321,16 +321,18 @@ out:
}
EXPORT_SYMBOL_GPL(fib_rules_lookup);
-static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net,
+static int call_fib_rule_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
- struct fib_rule *rule, int family)
+ struct fib_rule *rule, int family,
+ struct netlink_ext_ack *extack)
{
struct fib_rule_notifier_info info = {
.info.family = family,
+ .info.extack = extack,
.rule = rule,
};
- return call_fib_notifier(nb, net, event_type, &info.info);
+ return call_fib_notifier(nb, event_type, &info.info);
}
static int call_fib_rule_notifiers(struct net *net,
@@ -350,20 +352,25 @@ static int call_fib_rule_notifiers(struct net *net,
}
/* Called with rcu_read_lock() */
-int fib_rules_dump(struct net *net, struct notifier_block *nb, int family)
+int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
+ struct netlink_ext_ack *extack)
{
struct fib_rules_ops *ops;
struct fib_rule *rule;
+ int err = 0;
ops = lookup_rules_ops(net, family);
if (!ops)
return -EAFNOSUPPORT;
- list_for_each_entry_rcu(rule, &ops->rules_list, list)
- call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule,
- family);
+ list_for_each_entry_rcu(rule, &ops->rules_list, list) {
+ err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD,
+ rule, family, extack);
+ if (err)
+ break;
+ }
rules_ops_put(ops);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(fib_rules_dump);
diff --git a/net/core/filter.c b/net/core/filter.c
index 3fed5755494b..b0ed048585ba 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2245,7 +2245,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
* account for the headroom.
*/
bytes_sg_total = start - offset + bytes;
- if (!msg->sg.copy[i] && bytes_sg_total <= len)
+ if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
goto out;
/* At this point we need to linearize multiple scatterlist
@@ -2450,7 +2450,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
/* Place newly allocated data buffer */
sk_mem_charge(msg->sk, len);
msg->sg.size += len;
- msg->sg.copy[new] = false;
+ __clear_bit(new, &msg->sg.copy);
sg_set_page(&msg->sg.data[new], page, len + copy, 0);
if (rsge.length) {
get_page(sg_page(&rsge));
@@ -3798,7 +3798,7 @@ BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
- if (unlikely(skb_size > skb->len))
+ if (unlikely(!skb || skb_size > skb->len))
return -EFAULT;
return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
@@ -3816,6 +3816,19 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
+static int bpf_skb_output_btf_ids[5];
+const struct bpf_func_proto bpf_skb_output_proto = {
+ .func = bpf_skb_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+ .btf_id = bpf_skb_output_btf_ids,
+};
+
static unsigned short bpf_tunnel_key_af(u64 flags)
{
return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
@@ -4089,7 +4102,7 @@ BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
return 0;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return cgrp->kn->id.id;
+ return cgroup_id(cgrp);
}
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
@@ -4114,7 +4127,7 @@ BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
if (!ancestor)
return 0;
- return ancestor->kn->id.id;
+ return cgroup_id(ancestor);
}
static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
@@ -8671,16 +8684,6 @@ out:
}
#ifdef CONFIG_INET
-struct sk_reuseport_kern {
- struct sk_buff *skb;
- struct sock *sk;
- struct sock *selected_sk;
- void *data_end;
- u32 hash;
- u32 reuseport_id;
- bool bind_inany;
-};
-
static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
struct sock_reuseport *reuse,
struct sock *sk, struct sk_buff *skb,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 68eda10d0680..ca871657a4c4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -114,19 +114,50 @@ int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
{
struct bpf_prog *attached;
struct net *net;
+ int ret = 0;
net = current->nsproxy->net_ns;
mutex_lock(&flow_dissector_mutex);
+
+ if (net == &init_net) {
+ /* BPF flow dissector in the root namespace overrides
+ * any per-net-namespace one. When attaching to root,
+ * make sure we don't have any BPF program attached
+ * to the non-root namespaces.
+ */
+ struct net *ns;
+
+ for_each_net(ns) {
+ if (ns == &init_net)
+ continue;
+ if (rcu_access_pointer(ns->flow_dissector_prog)) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ } else {
+ /* Make sure root flow dissector is not attached
+ * when attaching to the non-root namespace.
+ */
+ if (rcu_access_pointer(init_net.flow_dissector_prog)) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
attached = rcu_dereference_protected(net->flow_dissector_prog,
lockdep_is_held(&flow_dissector_mutex));
- if (attached) {
- /* Only one BPF program can be attached at a time */
- mutex_unlock(&flow_dissector_mutex);
- return -EEXIST;
+ if (attached == prog) {
+ /* The same program cannot be attached twice */
+ ret = -EINVAL;
+ goto out;
}
rcu_assign_pointer(net->flow_dissector_prog, prog);
+ if (attached)
+ bpf_prog_put(attached);
+out:
mutex_unlock(&flow_dissector_mutex);
- return 0;
+ return ret;
}
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
@@ -147,27 +178,6 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
mutex_unlock(&flow_dissector_mutex);
return 0;
}
-/**
- * skb_flow_get_be16 - extract be16 entity
- * @skb: sk_buff to extract from
- * @poff: offset to extract at
- * @data: raw buffer pointer to the packet
- * @hlen: packet header length
- *
- * The function will try to retrieve a be32 entity at
- * offset poff
- */
-static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
- void *data, int hlen)
-{
- __be16 *u, _u;
-
- u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
- if (u)
- return *u;
-
- return 0;
-}
/**
* __skb_flow_get_ports - extract the upper layer ports and return them
@@ -203,6 +213,72 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
}
EXPORT_SYMBOL(__skb_flow_get_ports);
+static bool icmp_has_id(u8 type)
+{
+ switch (type) {
+ case ICMP_ECHO:
+ case ICMP_ECHOREPLY:
+ case ICMP_TIMESTAMP:
+ case ICMP_TIMESTAMPREPLY:
+ case ICMPV6_ECHO_REQUEST:
+ case ICMPV6_ECHO_REPLY:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields
+ * @skb: sk_buff to extract from
+ * @key_icmp: struct flow_dissector_key_icmp to fill
+ * @data: raw buffer pointer to the packet
+ * @toff: offset to extract at
+ * @hlen: packet header length
+ */
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+ struct flow_dissector_key_icmp *key_icmp,
+ void *data, int thoff, int hlen)
+{
+ struct icmphdr *ih, _ih;
+
+ ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih);
+ if (!ih)
+ return;
+
+ key_icmp->type = ih->type;
+ key_icmp->code = ih->code;
+
+ /* As we use 0 to signal that the Id field is not present,
+ * avoid confusion with packets without such field
+ */
+ if (icmp_has_id(ih->type))
+ key_icmp->id = ih->un.echo.id ? : 1;
+ else
+ key_icmp->id = 0;
+}
+EXPORT_SYMBOL(skb_flow_get_icmp_tci);
+
+/* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet
+ * using skb_flow_get_icmp_tci().
+ */
+static void __skb_flow_dissect_icmp(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, int thoff, int hlen)
+{
+ struct flow_dissector_key_icmp *key_icmp;
+
+ if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP))
+ return;
+
+ key_icmp = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_ICMP,
+ target_container);
+
+ skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen);
+}
+
void skb_flow_dissect_meta(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container)
@@ -853,7 +929,6 @@ bool __skb_flow_dissect(const struct net *net,
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_ports *key_ports;
- struct flow_dissector_key_icmp *key_icmp;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
struct bpf_prog *attached = NULL;
@@ -910,7 +985,10 @@ bool __skb_flow_dissect(const struct net *net,
WARN_ON_ONCE(!net);
if (net) {
rcu_read_lock();
- attached = rcu_dereference(net->flow_dissector_prog);
+ attached = rcu_dereference(init_net.flow_dissector_prog);
+
+ if (!attached)
+ attached = rcu_dereference(net->flow_dissector_prog);
if (attached) {
struct bpf_flow_keys flow_keys;
@@ -1295,6 +1373,12 @@ ip_proto_again:
data, nhoff, hlen);
break;
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ __skb_flow_dissect_icmp(skb, flow_dissector, target_container,
+ data, nhoff, hlen);
+ break;
+
default:
break;
}
@@ -1308,14 +1392,6 @@ ip_proto_again:
data, hlen);
}
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_ICMP)) {
- key_icmp = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_ICMP,
- target_container);
- key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
- }
-
/* Process result of IP proto processing */
switch (fdret) {
case FLOW_DISSECT_RET_PROTO_AGAIN:
@@ -1365,8 +1441,8 @@ static const void *flow_keys_hash_start(const struct flow_keys *flow)
static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
{
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
- BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
- sizeof(*flow) - sizeof(flow->addrs));
+
+ BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
switch (flow->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
@@ -1412,6 +1488,9 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
}
EXPORT_SYMBOL(flow_get_u32_dst);
+/* Sort the source and destination IP (and the ports if the IP are the same),
+ * to have consistent hash within the two directions
+ */
static inline void __flow_hash_consistentify(struct flow_keys *keys)
{
int addr_diff, i;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index bfe7bdd4c340..80dbf2f4016e 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -48,7 +48,7 @@ struct net_rate_estimator {
u8 intvl_log; /* period : (250ms << intvl_log) */
seqcount_t seq;
- u32 last_packets;
+ u64 last_packets;
u64 last_bytes;
u64 avpps;
@@ -83,7 +83,7 @@ static void est_timer(struct timer_list *t)
brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
brate -= (est->avbps >> est->ewma_log);
- rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
+ rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
rate -= (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 36888f5e09eb..1d653fbfcf52 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -123,8 +123,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
for_each_possible_cpu(i) {
struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
unsigned int start;
- u64 bytes;
- u32 packets;
+ u64 bytes, packets;
do {
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
@@ -176,12 +175,17 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
if (d->tail) {
struct gnet_stats_basic sb;
+ int res;
memset(&sb, 0, sizeof(sb));
sb.bytes = bstats.bytes;
sb.packets = bstats.packets;
- return gnet_stats_copy(d, type, &sb, sizeof(sb),
- TCA_STATS_PAD);
+ res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
+ if (res < 0 || sb.packets == bstats.packets)
+ return res;
+ /* emit 64bit stats only if needed */
+ return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets,
+ sizeof(bstats.packets), TCA_STATS_PAD);
}
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5480edff0c86..652da6369037 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1197,7 +1197,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
if (update) {
hh = &neigh->hh;
- if (hh->hh_len) {
+ if (READ_ONCE(hh->hh_len)) {
write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock);
@@ -1476,7 +1476,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
struct net_device *dev = neigh->dev;
unsigned int seq;
- if (dev->header_ops->cache && !neigh->hh.hh_len)
+ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
neigh_hh_init(neigh);
do {
@@ -2052,8 +2052,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
goto nla_put_failure;
{
unsigned long now = jiffies;
- unsigned int flush_delta = now - tbl->last_flush;
- unsigned int rand_delta = now - tbl->last_rand;
+ long flush_delta = now - tbl->last_flush;
+ long rand_delta = now - tbl->last_rand;
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 36347933ec3a..6bbd06f7dc7d 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -20,8 +20,8 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
struct hlist_head *h;
unsigned int count = 0, offset = get_offset(*pos);
- h = &net->dev_name_head[get_bucket(*pos)];
- hlist_for_each_entry_rcu(dev, h, name_hlist) {
+ h = &net->dev_index_head[get_bucket(*pos)];
+ hlist_for_each_entry_rcu(dev, h, index_hlist) {
if (++count == offset)
return dev;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 865ba6ca16eb..ae3bcb1540ec 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -923,21 +923,23 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
"rx-%u", index);
if (error)
- return error;
+ goto err;
dev_hold(queue->dev);
if (dev->sysfs_rx_queue_group) {
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
- if (error) {
- kobject_put(kobj);
- return error;
- }
+ if (error)
+ goto err;
}
kobject_uevent(kobj, KOBJ_ADD);
return error;
+
+err:
+ kobject_put(kobj);
+ return error;
}
#endif /* CONFIG_SYSFS */
@@ -1461,21 +1463,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index);
if (error)
- return error;
+ goto err;
dev_hold(queue->dev);
#ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group);
- if (error) {
- kobject_put(kobj);
- return error;
- }
+ if (error)
+ goto err;
#endif
kobject_uevent(kobj, KOBJ_ADD);
-
return 0;
+
+err:
+ kobject_put(kobj);
+ return error;
}
#endif /* CONFIG_SYSFS */
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 256b7954b720..8881dd943dd0 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -93,7 +93,7 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
{
struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
- int id = css->cgroup->id;
+ int id = css->id;
if (map && id < map->priomap_len)
return map->priomap[id];
@@ -113,7 +113,7 @@ static int netprio_set_prio(struct cgroup_subsys_state *css,
struct net_device *dev, u32 prio)
{
struct netprio_map *map;
- int id = css->cgroup->id;
+ int id = css->id;
int ret;
/* avoid extending priomap for zero writes */
@@ -177,7 +177,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft)
{
- return css->cgroup->id;
+ return css->id;
}
static int read_priomap(struct seq_file *sf, void *v)
@@ -237,7 +237,7 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct cgroup_subsys_state *css;
cgroup_taskset_for_each(p, css, tset) {
- void *v = (void *)(unsigned long)css->cgroup->id;
+ void *v = (void *)(unsigned long)css->id;
task_lock(p);
iterate_fd(p->files, 0, update_netprio, v);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5bc65587f1c4..a6aefe989043 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -18,6 +18,9 @@
#include <trace/events/page_pool.h>
+#define DEFER_TIME (msecs_to_jiffies(1000))
+#define DEFER_WARN_INTERVAL (60 * HZ)
+
static int page_pool_init(struct page_pool *pool,
const struct page_pool_params *params)
{
@@ -44,6 +47,21 @@ static int page_pool_init(struct page_pool *pool,
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL;
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
+ /* In order to request DMA-sync-for-device the page
+ * needs to be mapped
+ */
+ if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+ return -EINVAL;
+
+ if (!pool->p.max_len)
+ return -EINVAL;
+
+ /* pool->p.offset has to be set according to the address
+ * offset used by the DMA engine to start copying rx data
+ */
+ }
+
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM;
@@ -112,6 +130,16 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
return page;
}
+static void page_pool_dma_sync_for_device(struct page_pool *pool,
+ struct page *page,
+ unsigned int dma_sync_size)
+{
+ dma_sync_size = min(dma_sync_size, pool->p.max_len);
+ dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
+ pool->p.offset, dma_sync_size,
+ pool->p.dma_dir);
+}
+
/* slow path */
noinline
static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
@@ -156,6 +184,9 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
}
page->dma_addr = dma;
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+
skip_dma_map:
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -193,22 +224,14 @@ static s32 page_pool_inflight(struct page_pool *pool)
{
u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
- s32 distance;
+ s32 inflight;
- distance = _distance(hold_cnt, release_cnt);
+ inflight = _distance(hold_cnt, release_cnt);
- trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt);
- return distance;
-}
-
-static bool __page_pool_safe_to_destroy(struct page_pool *pool)
-{
- s32 inflight = page_pool_inflight(pool);
-
- /* The distance should not be able to become negative */
+ trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
- return (inflight == 0);
+ return inflight;
}
/* Cleanup page_pool state from page */
@@ -216,6 +239,7 @@ static void __page_pool_clean_page(struct page_pool *pool,
struct page *page)
{
dma_addr_t dma;
+ int count;
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_unmap;
@@ -227,9 +251,11 @@ static void __page_pool_clean_page(struct page_pool *pool,
DMA_ATTR_SKIP_CPU_SYNC);
page->dma_addr = 0;
skip_dma_unmap:
- atomic_inc(&pool->pages_state_release_cnt);
- trace_page_pool_state_release(pool, page,
- atomic_read(&pool->pages_state_release_cnt));
+ /* This may be the last page returned, releasing the pool, so
+ * it is not safe to reference pool afterwards.
+ */
+ count = atomic_inc_return(&pool->pages_state_release_cnt);
+ trace_page_pool_state_release(pool, page, count);
}
/* unmap the page and clean our state */
@@ -283,8 +309,19 @@ static bool __page_pool_recycle_direct(struct page *page,
return true;
}
-void __page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
+/* page is NOT reusable when:
+ * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
+ * 2) belongs to a different NUMA node than pool->p.nid.
+ *
+ * To update pool->p.nid users must call page_pool_update_nid.
+ */
+static bool pool_page_reusable(struct page_pool *pool, struct page *page)
+{
+ return !page_is_pfmemalloc(page) && page_to_nid(page) == pool->p.nid;
+}
+
+void __page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
@@ -292,9 +329,14 @@ void __page_pool_put_page(struct page_pool *pool,
*
* refcnt == 1 means page_pool owns page, and can recycle it.
*/
- if (likely(page_ref_count(page) == 1)) {
+ if (likely(page_ref_count(page) == 1 &&
+ pool_page_reusable(pool, page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ page_pool_dma_sync_for_device(pool, page,
+ dma_sync_size);
+
if (allow_direct && in_serving_softirq())
if (__page_pool_recycle_direct(page, pool))
return;
@@ -338,31 +380,10 @@ static void __page_pool_empty_ring(struct page_pool *pool)
}
}
-static void __warn_in_flight(struct page_pool *pool)
+static void page_pool_free(struct page_pool *pool)
{
- u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
- u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
- s32 distance;
-
- distance = _distance(hold_cnt, release_cnt);
-
- /* Drivers should fix this, but only problematic when DMA is used */
- WARN(1, "Still in-flight pages:%d hold:%u released:%u",
- distance, hold_cnt, release_cnt);
-}
-
-void __page_pool_free(struct page_pool *pool)
-{
- /* Only last user actually free/release resources */
- if (!page_pool_put(pool))
- return;
-
- WARN(pool->alloc.count, "API usage violation");
- WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
-
- /* Can happen due to forced shutdown */
- if (!__page_pool_safe_to_destroy(pool))
- __warn_in_flight(pool);
+ if (pool->disconnect)
+ pool->disconnect(pool);
ptr_ring_cleanup(&pool->ring, NULL);
@@ -371,15 +392,14 @@ void __page_pool_free(struct page_pool *pool)
kfree(pool);
}
-EXPORT_SYMBOL(__page_pool_free);
-/* Request to shutdown: release pages cached by page_pool, and check
- * for in-flight pages
- */
-bool __page_pool_request_shutdown(struct page_pool *pool)
+static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{
struct page *page;
+ if (pool->destroy_cnt)
+ return;
+
/* Empty alloc cache, assume caller made sure this is
* no-longer in use, and page_pool_alloc_pages() cannot be
* call concurrently.
@@ -388,12 +408,83 @@ bool __page_pool_request_shutdown(struct page_pool *pool)
page = pool->alloc.cache[--pool->alloc.count];
__page_pool_return_page(pool, page);
}
+}
+
+static void page_pool_scrub(struct page_pool *pool)
+{
+ page_pool_empty_alloc_cache_once(pool);
+ pool->destroy_cnt++;
/* No more consumers should exist, but producers could still
* be in-flight.
*/
__page_pool_empty_ring(pool);
+}
+
+static int page_pool_release(struct page_pool *pool)
+{
+ int inflight;
+
+ page_pool_scrub(pool);
+ inflight = page_pool_inflight(pool);
+ if (!inflight)
+ page_pool_free(pool);
+
+ return inflight;
+}
+
+static void page_pool_release_retry(struct work_struct *wq)
+{
+ struct delayed_work *dwq = to_delayed_work(wq);
+ struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
+ int inflight;
+
+ inflight = page_pool_release(pool);
+ if (!inflight)
+ return;
+
+ /* Periodic warning */
+ if (time_after_eq(jiffies, pool->defer_warn)) {
+ int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
+
+ pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
+ __func__, inflight, sec);
+ pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
+ }
+
+ /* Still not ready to be disconnected, retry later */
+ schedule_delayed_work(&pool->release_dw, DEFER_TIME);
+}
- return __page_pool_safe_to_destroy(pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
+{
+ refcount_inc(&pool->user_cnt);
+ pool->disconnect = disconnect;
+}
+
+void page_pool_destroy(struct page_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (!page_pool_put(pool))
+ return;
+
+ if (!page_pool_release(pool))
+ return;
+
+ pool->defer_start = jiffies;
+ pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
+
+ INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
+ schedule_delayed_work(&pool->release_dw, DEFER_TIME);
+}
+EXPORT_SYMBOL(page_pool_destroy);
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid)
+{
+ trace_page_pool_update_nid(pool, new_nid);
+ pool->p.nid = new_nid;
}
-EXPORT_SYMBOL(__page_pool_request_shutdown);
+EXPORT_SYMBOL(page_pool_update_nid);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 48b1e429857c..294bfcf0ce0e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3404,7 +3404,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
HARD_TX_LOCK(odev, txq, smp_processor_id());
if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
- ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index c81cd80114d9..0b5ccb7c9bc8 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -980,6 +980,19 @@ static size_t rtnl_xdp_size(void)
return xdp_size;
}
+static size_t rtnl_prop_list_size(const struct net_device *dev)
+{
+ struct netdev_name_node *name_node;
+ size_t size;
+
+ if (list_empty(&dev->name_node->list))
+ return 0;
+ size = nla_total_size(0);
+ list_for_each_entry(name_node, &dev->name_node->list, list)
+ size += nla_total_size(ALTIFNAMSIZ);
+ return size;
+}
+
static noinline size_t if_nlmsg_size(const struct net_device *dev,
u32 ext_filter_mask)
{
@@ -1027,6 +1040,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
+ nla_total_size(4) /* IFLA_MIN_MTU */
+ nla_total_size(4) /* IFLA_MAX_MTU */
+ + rtnl_prop_list_size(dev)
+ 0;
}
@@ -1204,6 +1218,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
struct ifla_vf_mac vf_mac;
struct ifla_vf_broadcast vf_broadcast;
struct ifla_vf_info ivi;
+ struct ifla_vf_guid node_guid;
+ struct ifla_vf_guid port_guid;
memset(&ivi, 0, sizeof(ivi));
@@ -1270,6 +1286,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put(skb, IFLA_VF_TRUST,
sizeof(vf_trust), &vf_trust))
goto nla_put_vf_failure;
+
+ memset(&node_guid, 0, sizeof(node_guid));
+ memset(&port_guid, 0, sizeof(port_guid));
+ if (dev->netdev_ops->ndo_get_vf_guid &&
+ !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
+ &port_guid)) {
+ if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
+ &node_guid) ||
+ nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
+ &port_guid))
+ goto nla_put_vf_failure;
+ }
vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
if (!vfvlanlist)
goto nla_put_vf_failure;
@@ -1584,6 +1612,42 @@ static int rtnl_fill_link_af(struct sk_buff *skb,
return 0;
}
+static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct netdev_name_node *name_node;
+ int count = 0;
+
+ list_for_each_entry(name_node, &dev->name_node->list, list) {
+ if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
+ return -EMSGSIZE;
+ count++;
+ }
+ return count;
+}
+
+static int rtnl_fill_prop_list(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct nlattr *prop_list;
+ int ret;
+
+ prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
+ if (!prop_list)
+ return -EMSGSIZE;
+
+ ret = rtnl_fill_alt_ifnames(skb, dev);
+ if (ret <= 0)
+ goto nest_cancel;
+
+ nla_nest_end(skb, prop_list);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, prop_list);
+ return ret;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb,
struct net_device *dev, struct net *src_net,
int type, u32 pid, u32 seq, u32 change,
@@ -1697,6 +1761,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure_rcu;
rcu_read_unlock();
+ if (rtnl_fill_prop_list(skb, dev))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -1750,6 +1817,9 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
[IFLA_MIN_MTU] = { .type = NLA_U32 },
[IFLA_MAX_MTU] = { .type = NLA_U32 },
+ [IFLA_PROP_LIST] = { .type = NLA_NESTED },
+ [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
+ .len = ALTIFNAMSIZ - 1 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2195,6 +2265,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_MAC]) {
struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
+ if (ivm->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
@@ -2206,6 +2278,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_VLAN]) {
struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
+ if (ivv->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
@@ -2238,6 +2312,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (len == 0)
return -EINVAL;
+ if (ivvl[0]->vf >= INT_MAX)
+ return -EINVAL;
err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0)
@@ -2248,6 +2324,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
struct ifla_vf_info ivf;
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_get_vf_config)
err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
@@ -2266,6 +2344,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_RATE]) {
struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf,
@@ -2278,6 +2358,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_SPOOFCHK]) {
struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
+ if (ivs->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
@@ -2289,6 +2371,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_LINK_STATE]) {
struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
+ if (ivl->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_link_state)
err = ops->ndo_set_vf_link_state(dev, ivl->vf,
@@ -2302,6 +2386,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP;
ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
+ if (ivrssq_en->vf >= INT_MAX)
+ return -EINVAL;
if (ops->ndo_set_vf_rss_query_en)
err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
ivrssq_en->setting);
@@ -2312,6 +2398,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_TRUST]) {
struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_trust)
err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
@@ -2322,15 +2410,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_IB_NODE_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP;
-
return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
}
if (tb[IFLA_VF_IB_PORT_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP;
@@ -2723,6 +2814,26 @@ errout:
return err;
}
+static struct net_device *rtnl_dev_get(struct net *net,
+ struct nlattr *ifname_attr,
+ struct nlattr *altifname_attr,
+ char *ifname)
+{
+ char buffer[ALTIFNAMSIZ];
+
+ if (!ifname) {
+ ifname = buffer;
+ if (ifname_attr)
+ nla_strlcpy(ifname, ifname_attr, IFNAMSIZ);
+ else if (altifname_attr)
+ nla_strlcpy(ifname, altifname_attr, ALTIFNAMSIZ);
+ else
+ return NULL;
+ }
+
+ return __dev_get_by_name(net, ifname);
+}
+
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -2751,8 +2862,8 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
- else if (tb[IFLA_IFNAME])
- dev = __dev_get_by_name(net, ifname);
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+ dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
else
goto errout;
@@ -2825,7 +2936,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *tgt_net = net;
struct net_device *dev = NULL;
struct ifinfomsg *ifm;
- char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
int err;
int netnsid = -1;
@@ -2839,9 +2949,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
- if (tb[IFLA_IFNAME])
- nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
-
if (tb[IFLA_TARGET_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
@@ -2853,8 +2960,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
- else if (tb[IFLA_IFNAME])
- dev = __dev_get_by_name(tgt_net, ifname);
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+ dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
+ tb[IFLA_ALT_IFNAME], NULL);
else if (tb[IFLA_GROUP])
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
@@ -3025,12 +3133,10 @@ replay:
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
- else {
- if (ifname[0])
- dev = __dev_get_by_name(net, ifname);
- else
- dev = NULL;
- }
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+ dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
+ else
+ dev = NULL;
if (dev) {
master_dev = netdev_master_upper_dev_get(dev);
@@ -3292,6 +3398,7 @@ static int rtnl_valid_getlink_req(struct sk_buff *skb,
switch (i) {
case IFLA_IFNAME:
+ case IFLA_ALT_IFNAME:
case IFLA_EXT_MASK:
case IFLA_TARGET_NETNSID:
break;
@@ -3310,7 +3417,6 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
struct ifinfomsg *ifm;
- char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
struct net_device *dev = NULL;
struct sk_buff *nskb;
@@ -3333,9 +3439,6 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(tgt_net);
}
- if (tb[IFLA_IFNAME])
- nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
-
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@ -3343,8 +3446,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
- else if (tb[IFLA_IFNAME])
- dev = __dev_get_by_name(tgt_net, ifname);
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+ dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME],
+ tb[IFLA_ALT_IFNAME], NULL);
else
goto out;
@@ -3374,6 +3478,100 @@ out:
return err;
}
+static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
+ bool *changed, struct netlink_ext_ack *extack)
+{
+ char *alt_ifname;
+ int err;
+
+ err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
+ if (err)
+ return err;
+
+ alt_ifname = nla_data(attr);
+ if (cmd == RTM_NEWLINKPROP) {
+ alt_ifname = kstrdup(alt_ifname, GFP_KERNEL);
+ if (!alt_ifname)
+ return -ENOMEM;
+ err = netdev_name_node_alt_create(dev, alt_ifname);
+ if (err) {
+ kfree(alt_ifname);
+ return err;
+ }
+ } else if (cmd == RTM_DELLINKPROP) {
+ err = netdev_name_node_alt_destroy(dev, alt_ifname);
+ if (err)
+ return err;
+ } else {
+ WARN_ON(1);
+ return 0;
+ }
+
+ *changed = true;
+ return 0;
+}
+
+static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ struct ifinfomsg *ifm;
+ bool changed = false;
+ struct nlattr *attr;
+ int err, rem;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
+ if (err)
+ return err;
+
+ err = rtnl_ensure_unique_netns(tb, extack, true);
+ if (err)
+ return err;
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+ dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
+ tb[IFLA_ALT_IFNAME], NULL);
+ else
+ return -EINVAL;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!tb[IFLA_PROP_LIST])
+ return 0;
+
+ nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
+ switch (nla_type(attr)) {
+ case IFLA_ALT_IFNAME:
+ err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (changed)
+ netdev_state_change(dev);
+ return 0;
+}
+
+static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
+}
+
+static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
+}
+
static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
@@ -5332,6 +5530,9 @@ void __init rtnetlink_init(void)
rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
+ rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
+ rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
+
rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index ad31e4e53d0a..a469d2124f3f 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -793,15 +793,18 @@ static void sk_psock_strp_data_ready(struct sock *sk)
static void sk_psock_write_space(struct sock *sk)
{
struct sk_psock *psock;
- void (*write_space)(struct sock *sk);
+ void (*write_space)(struct sock *sk) = NULL;
rcu_read_lock();
psock = sk_psock(sk);
- if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)))
- schedule_work(&psock->work);
- write_space = psock->saved_write_space;
+ if (likely(psock)) {
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ schedule_work(&psock->work);
+ write_space = psock->saved_write_space;
+ }
rcu_read_unlock();
- write_space(sk);
+ if (write_space)
+ write_space(sk);
}
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
diff --git a/net/core/sock.c b/net/core/sock.c
index ac78a570e43a..043db3ce023e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -333,7 +333,6 @@ EXPORT_SYMBOL(__sk_backlog_rcv);
static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
{
struct __kernel_sock_timeval tv;
- int size;
if (timeo == MAX_SCHEDULE_TIMEOUT) {
tv.tv_sec = 0;
@@ -354,13 +353,11 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
old_tv.tv_sec = tv.tv_sec;
old_tv.tv_usec = tv.tv_usec;
*(struct __kernel_old_timeval *)optval = old_tv;
- size = sizeof(old_tv);
- } else {
- *(struct __kernel_sock_timeval *)optval = tv;
- size = sizeof(tv);
+ return sizeof(old_tv);
}
- return size;
+ *(struct __kernel_sock_timeval *)optval = tv;
+ return sizeof(tv);
}
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
@@ -521,7 +518,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
rc = sk_backlog_rcv(sk, skb);
- mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
@@ -687,7 +684,8 @@ out:
return ret;
}
-static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
+static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
+ int valbool)
{
if (valbool)
sock_set_flag(sk, bit);
@@ -3015,7 +3013,7 @@ int sock_gettstamp(struct socket *sock, void __user *userstamp,
return -ENOENT;
if (ts.tv_sec == 0) {
ktime_t kt = ktime_get_real();
- sock_write_timestamp(sk, kt);;
+ sock_write_timestamp(sk, kt);
ts = ktime_to_timespec64(kt);
}
@@ -3042,7 +3040,7 @@ int sock_gettstamp(struct socket *sock, void __user *userstamp,
}
EXPORT_SYMBOL(sock_gettstamp);
-void sock_enable_timestamp(struct sock *sk, int flag)
+void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
{
if (!sock_flag(sk, flag)) {
unsigned long previous_flags = sk->sk_flags;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index f3ceec93f392..f19f179538b9 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -356,8 +356,8 @@ int reuseport_detach_prog(struct sock *sk)
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
- rcu_swap_protected(reuse->prog, old_prog,
- lockdep_is_held(&reuseport_lock));
+ old_prog = rcu_replace_pointer(reuse->prog, old_prog,
+ lockdep_is_held(&reuseport_lock));
spin_unlock_bh(&reuseport_lock);
if (!old_prog)
diff --git a/net/core/xdp.c b/net/core/xdp.c
index d7bf62ffbb5e..e334fad0a6b8 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -70,77 +70,63 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
- /* Allocator have indicated safe to remove before this is called */
- if (xa->mem.type == MEM_TYPE_PAGE_POOL)
- page_pool_free(xa->page_pool);
-
/* Allow this ID to be reused */
ida_simple_remove(&mem_id_pool, xa->mem.id);
- /* Poison memory */
- xa->mem.id = 0xFFFF;
- xa->mem.type = 0xF0F0;
- xa->allocator = (void *)0xDEAD9001;
-
kfree(xa);
}
-static bool __mem_id_disconnect(int id, bool force)
+static void mem_xa_remove(struct xdp_mem_allocator *xa)
{
- struct xdp_mem_allocator *xa;
- bool safe_to_remove = true;
+ trace_mem_disconnect(xa);
mutex_lock(&mem_id_lock);
- xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
- if (!xa) {
- mutex_unlock(&mem_id_lock);
- WARN(1, "Request remove non-existing id(%d), driver bug?", id);
- return true;
- }
- xa->disconnect_cnt++;
-
- /* Detects in-flight packet-pages for page_pool */
- if (xa->mem.type == MEM_TYPE_PAGE_POOL)
- safe_to_remove = page_pool_request_shutdown(xa->page_pool);
-
- trace_mem_disconnect(xa, safe_to_remove, force);
-
- if ((safe_to_remove || force) &&
- !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
+ if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
mutex_unlock(&mem_id_lock);
- return (safe_to_remove|force);
}
-#define DEFER_TIME (msecs_to_jiffies(1000))
-#define DEFER_WARN_INTERVAL (30 * HZ)
-#define DEFER_MAX_RETRIES 120
+static void mem_allocator_disconnect(void *allocator)
+{
+ struct xdp_mem_allocator *xa;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(mem_id_ht, &iter);
+ do {
+ rhashtable_walk_start(&iter);
+
+ while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
+ if (xa->allocator == allocator)
+ mem_xa_remove(xa);
+ }
+
+ rhashtable_walk_stop(&iter);
-static void mem_id_disconnect_defer_retry(struct work_struct *wq)
+ } while (xa == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+}
+
+static void mem_id_disconnect(int id)
{
- struct delayed_work *dwq = to_delayed_work(wq);
- struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq);
- bool force = false;
+ struct xdp_mem_allocator *xa;
- if (xa->disconnect_cnt > DEFER_MAX_RETRIES)
- force = true;
+ mutex_lock(&mem_id_lock);
- if (__mem_id_disconnect(xa->mem.id, force))
+ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
+ if (!xa) {
+ mutex_unlock(&mem_id_lock);
+ WARN(1, "Request remove non-existing id(%d), driver bug?", id);
return;
+ }
- /* Periodic warning */
- if (time_after_eq(jiffies, xa->defer_warn)) {
- int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ;
+ trace_mem_disconnect(xa);
- pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n",
- __func__, xa->mem.id, xa->disconnect_cnt, sec);
- xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
- }
+ if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
+ call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
- /* Still not ready to be disconnected, retry later */
- schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
+ mutex_unlock(&mem_id_lock);
}
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
@@ -153,38 +139,21 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
return;
}
- if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
- xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
- return;
- }
-
if (id == 0)
return;
- if (__mem_id_disconnect(id, false))
- return;
-
- /* Could not disconnect, defer new disconnect attempt to later */
- mutex_lock(&mem_id_lock);
+ if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY)
+ return mem_id_disconnect(id);
- xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
- if (!xa) {
- mutex_unlock(&mem_id_lock);
- return;
+ if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
+ rcu_read_lock();
+ xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
+ page_pool_destroy(xa->page_pool);
+ rcu_read_unlock();
}
- xa->defer_start = jiffies;
- xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
-
- INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry);
- mutex_unlock(&mem_id_lock);
- schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
-/* This unregister operation will also cleanup and destroy the
- * allocator. The page_pool_free() operation is first called when it's
- * safe to remove, possibly deferred to a workqueue.
- */
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
{
/* Simplify driver cleanup code paths, allow unreg "unused" */
@@ -371,7 +340,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
}
if (type == MEM_TYPE_PAGE_POOL)
- page_pool_get(xdp_alloc->page_pool);
+ page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
mutex_unlock(&mem_id_lock);
@@ -386,7 +355,7 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
/* XDP RX runs under NAPI protection, and in different delivery error
* scenarios (e.g. queue full), it is possible to return the xdp_frame
- * while still leveraging this protection. The @napi_direct boolian
+ * while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases.
*/
@@ -402,15 +371,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
- if (likely(xa)) {
- napi_direct &= !xdp_return_frame_no_direct();
- page_pool_put_page(xa->page_pool, page, napi_direct);
- } else {
- /* Hopefully stack show who to blame for late return */
- WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
- trace_mem_return_failed(mem, page);
- put_page(page);
- }
+ napi_direct &= !xdp_return_frame_no_direct();
+ page_pool_put_page(xa->page_pool, page, napi_direct);
rcu_read_unlock();
break;
case MEM_TYPE_PAGE_SHARED:
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 5bad08dc4316..a52e8ba1ced0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -944,7 +944,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
goto out;
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 3349ea81f901..e19a92a62e14 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1091,7 +1091,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
}
cb = DN_SKB_CB(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
if (newsk == NULL) {
release_sock(sk);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index e4161e0c86aa..c68503a18025 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -328,7 +328,7 @@ static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
return;
}
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 29e2bd5cc5af..1e6c3cac11e6 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -20,7 +20,7 @@ if NET_DSA
# tagging formats
config NET_DSA_TAG_8021Q
- tristate "Tag driver for switches using custom 802.1Q VLAN headers"
+ tristate
select VLAN_8021Q
help
Unlike the other tagging protocols, the 802.1Q config option simply
@@ -79,6 +79,13 @@ config NET_DSA_TAG_KSZ
Say Y if you want to enable support for tagging frames for the
Microchip 8795/9477/9893 families of switches.
+config NET_DSA_TAG_OCELOT
+ tristate "Tag driver for Ocelot family of switches"
+ select PACKING
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Ocelot switches (VSC7511, VSC7512, VSC7513, VSC7514, VSC9959).
+
config NET_DSA_TAG_QCA
tristate "Tag driver for Qualcomm Atheros QCA8K switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 2c6d286f0511..9a482c38bdb1 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 43120a3fb06f..17281fec710c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -246,7 +246,9 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
#ifdef CONFIG_PM_SLEEP
static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
- return dsa_is_user_port(ds, p) && ds->ports[p].slave;
+ const struct dsa_port *dp = dsa_to_port(ds, p);
+
+ return dp->type == DSA_PORT_TYPE_USER && dp->slave;
}
int dsa_switch_suspend(struct dsa_switch *ds)
@@ -258,7 +260,7 @@ int dsa_switch_suspend(struct dsa_switch *ds)
if (!dsa_is_port_initialized(ds, i))
continue;
- ret = dsa_slave_suspend(ds->ports[i].slave);
+ ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
if (ret)
return ret;
}
@@ -285,7 +287,7 @@ int dsa_switch_resume(struct dsa_switch *ds)
if (!dsa_is_port_initialized(ds, i))
continue;
- ret = dsa_slave_resume(ds->ports[i].slave);
+ ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
if (ret)
return ret;
}
@@ -329,6 +331,91 @@ int call_dsa_notifiers(unsigned long val, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(call_dsa_notifiers);
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct dsa_devlink_priv *dl_priv;
+ struct dsa_switch *ds;
+
+ dl_priv = devlink_priv(dl);
+ ds = dl_priv->ds;
+
+ if (!ds->ops->devlink_param_get)
+ return -EOPNOTSUPP;
+
+ return ds->ops->devlink_param_get(ds, id, ctx);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
+
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct dsa_devlink_priv *dl_priv;
+ struct dsa_switch *ds;
+
+ dl_priv = devlink_priv(dl);
+ ds = dl_priv->ds;
+
+ if (!ds->ops->devlink_param_set)
+ return -EOPNOTSUPP;
+
+ return ds->ops->devlink_param_set(ds, id, ctx);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
+
+int dsa_devlink_params_register(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return devlink_params_register(ds->devlink, params, params_count);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
+
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ devlink_params_unregister(ds->devlink, params, params_count);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
+
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ return devlink_resource_register(ds->devlink, resource_name,
+ resource_size, resource_id,
+ parent_resource_id,
+ size_params);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds)
+{
+ devlink_resources_unregister(ds->devlink, NULL);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ return devlink_resource_occ_get_register(ds->devlink, resource_id,
+ occ_get, occ_get_priv);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
+
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id)
+{
+ devlink_resource_occ_get_unregister(ds->devlink, resource_id);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
+
static int __init dsa_init_module(void)
{
int rc;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 716d265ba8ca..9ef2caa13f27 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -45,6 +45,10 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
dst->index = index;
+ INIT_LIST_HEAD(&dst->rtable);
+
+ INIT_LIST_HEAD(&dst->ports);
+
INIT_LIST_HEAD(&dst->list);
list_add_tail(&dst->list, &dsa_tree_list);
@@ -111,24 +115,38 @@ static bool dsa_port_is_user(struct dsa_port *dp)
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
struct device_node *dn)
{
- struct dsa_switch *ds;
struct dsa_port *dp;
- int device, port;
- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
- ds = dst->ds[device];
- if (!ds)
- continue;
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->dn == dn)
+ return dp;
- for (port = 0; port < ds->num_ports; port++) {
- dp = &ds->ports[port];
+ return NULL;
+}
- if (dp->dn == dn)
- return dp;
- }
- }
+struct dsa_link *dsa_link_touch(struct dsa_port *dp, struct dsa_port *link_dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_switch_tree *dst;
+ struct dsa_link *dl;
- return NULL;
+ dst = ds->dst;
+
+ list_for_each_entry(dl, &dst->rtable, list)
+ if (dl->dp == dp && dl->link_dp == link_dp)
+ return dl;
+
+ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+ if (!dl)
+ return NULL;
+
+ dl->dp = dp;
+ dl->link_dp = link_dp;
+
+ INIT_LIST_HEAD(&dl->list);
+ list_add_tail(&dl->list, &dst->rtable);
+
+ return dl;
}
static bool dsa_port_setup_routing_table(struct dsa_port *dp)
@@ -138,6 +156,7 @@ static bool dsa_port_setup_routing_table(struct dsa_port *dp)
struct device_node *dn = dp->dn;
struct of_phandle_iterator it;
struct dsa_port *link_dp;
+ struct dsa_link *dl;
int err;
of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
@@ -147,24 +166,22 @@ static bool dsa_port_setup_routing_table(struct dsa_port *dp)
return false;
}
- ds->rtable[link_dp->ds->index] = dp->index;
+ dl = dsa_link_touch(dp, link_dp);
+ if (!dl) {
+ of_node_put(it.node);
+ return false;
+ }
}
return true;
}
-static bool dsa_switch_setup_routing_table(struct dsa_switch *ds)
+static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
{
bool complete = true;
struct dsa_port *dp;
- int i;
-
- for (i = 0; i < DSA_MAX_SWITCHES; i++)
- ds->rtable[i] = DSA_RTABLE_NONE;
-
- for (i = 0; i < ds->num_ports; i++) {
- dp = &ds->ports[i];
+ list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_dsa(dp)) {
complete = dsa_port_setup_routing_table(dp);
if (!complete)
@@ -175,81 +192,42 @@ static bool dsa_switch_setup_routing_table(struct dsa_switch *ds)
return complete;
}
-static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
-{
- struct dsa_switch *ds;
- bool complete = true;
- int device;
-
- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
- ds = dst->ds[device];
- if (!ds)
- continue;
-
- complete = dsa_switch_setup_routing_table(ds);
- if (!complete)
- break;
- }
-
- return complete;
-}
-
static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
{
- struct dsa_switch *ds;
struct dsa_port *dp;
- int device, port;
- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
- ds = dst->ds[device];
- if (!ds)
- continue;
-
- for (port = 0; port < ds->num_ports; port++) {
- dp = &ds->ports[port];
-
- if (dsa_port_is_cpu(dp))
- return dp;
- }
- }
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_cpu(dp))
+ return dp;
return NULL;
}
static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
{
- struct dsa_switch *ds;
- struct dsa_port *dp;
- int device, port;
+ struct dsa_port *cpu_dp, *dp;
- /* DSA currently only supports a single CPU port */
- dst->cpu_dp = dsa_tree_find_first_cpu(dst);
- if (!dst->cpu_dp) {
- pr_warn("Tree has no master device\n");
+ cpu_dp = dsa_tree_find_first_cpu(dst);
+ if (!cpu_dp) {
+ pr_err("DSA: tree %d has no CPU port\n", dst->index);
return -EINVAL;
}
/* Assign the default CPU port to all ports of the fabric */
- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
- ds = dst->ds[device];
- if (!ds)
- continue;
-
- for (port = 0; port < ds->num_ports; port++) {
- dp = &ds->ports[port];
-
- if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
- dp->cpu_dp = dst->cpu_dp;
- }
- }
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
+ dp->cpu_dp = cpu_dp;
return 0;
}
static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
{
- /* DSA currently only supports a single CPU port */
- dst->cpu_dp = NULL;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
+ dp->cpu_dp = NULL;
}
static int dsa_port_setup(struct dsa_port *dp)
@@ -265,6 +243,9 @@ static int dsa_port_setup(struct dsa_port *dp)
bool dsa_port_enabled = false;
int err = 0;
+ if (dp->setup)
+ return 0;
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
@@ -333,14 +314,21 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_link_unregister_of(dp);
if (err && devlink_port_registered)
devlink_port_unregister(dlp);
+ if (err)
+ return err;
- return err;
+ dp->setup = true;
+
+ return 0;
}
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ if (!dp->setup)
+ return;
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
break;
@@ -363,11 +351,17 @@ static void dsa_port_teardown(struct dsa_port *dp)
}
break;
}
+
+ dp->setup = false;
}
static int dsa_switch_setup(struct dsa_switch *ds)
{
- int err = 0;
+ struct dsa_devlink_priv *dl_priv;
+ int err;
+
+ if (ds->setup)
+ return 0;
/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
* driver and before ops->setup() has run, since the switch drivers and
@@ -379,9 +373,11 @@ static int dsa_switch_setup(struct dsa_switch *ds)
/* Add the switch to devlink before calling setup, so that setup can
* add dpipe tables
*/
- ds->devlink = devlink_alloc(&dsa_devlink_ops, 0);
+ ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
if (!ds->devlink)
return -ENOMEM;
+ dl_priv = devlink_priv(ds->devlink);
+ dl_priv->ds = ds;
err = devlink_register(ds->devlink, ds->dev);
if (err)
@@ -395,6 +391,8 @@ static int dsa_switch_setup(struct dsa_switch *ds)
if (err < 0)
goto unregister_notifier;
+ devlink_params_publish(ds->devlink);
+
if (!ds->slave_mii_bus && ds->ops->phy_read) {
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
if (!ds->slave_mii_bus) {
@@ -409,6 +407,8 @@ static int dsa_switch_setup(struct dsa_switch *ds)
goto unregister_notifier;
}
+ ds->setup = true;
+
return 0;
unregister_notifier:
@@ -424,6 +424,9 @@ free_devlink:
static void dsa_switch_teardown(struct dsa_switch *ds)
{
+ if (!ds->setup)
+ return;
+
if (ds->slave_mii_bus && ds->ops->phy_read)
mdiobus_unregister(ds->slave_mii_bus);
@@ -438,95 +441,72 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->devlink = NULL;
}
+ ds->setup = false;
}
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
- struct dsa_switch *ds;
struct dsa_port *dp;
- int device, port, i;
- int err = 0;
-
- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
- ds = dst->ds[device];
- if (!ds)
- continue;
+ int err;
- err = dsa_switch_setup(ds);
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_switch_setup(dp->ds);
if (err)
- goto switch_teardown;
-
- for (port = 0; port < ds->num_ports; port++) {
- dp = &ds->ports[port];
+ goto teardown;
+ }
- err = dsa_port_setup(dp);
- if (err)
- goto ports_teardown;
- }
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_port_setup(dp);
+ if (err)
+ goto teardown;
}
return 0;
-ports_teardown:
- for (i = 0; i < port; i++)
- dsa_port_teardown(&ds->ports[i]);
+teardown:
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_port_teardown(dp);
- dsa_switch_teardown(ds);
-
-switch_teardown:
- for (i = 0; i < device; i++) {
- ds = dst->ds[i];
- if (!ds)
- continue;
-
- for (port = 0; port < ds->num_ports; port++) {
- dp = &ds->ports[port];
-
- dsa_port_teardown(dp);
- }
-
- dsa_switch_teardown(ds);
- }
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_switch_teardown(dp->ds);
return err;
}
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
- struct dsa_switch *ds;
struct dsa_port *dp;
- int device, port;
-
- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
- ds = dst->ds[device];
- if (!ds)
- continue;
- for (port = 0; port < ds->num_ports; port++) {
- dp = &ds->ports[port];
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_port_teardown(dp);
- dsa_port_teardown(dp);
- }
-
- dsa_switch_teardown(ds);
- }
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_switch_teardown(dp->ds);
}
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
- struct dsa_port *cpu_dp = dst->cpu_dp;
- struct net_device *master = cpu_dp->master;
+ struct dsa_port *dp;
+ int err;
- /* DSA currently supports a single pair of CPU port and master device */
- return dsa_master_setup(master, cpu_dp);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dsa_port_is_cpu(dp)) {
+ err = dsa_master_setup(dp->master, dp);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
- struct dsa_port *cpu_dp = dst->cpu_dp;
- struct net_device *master = cpu_dp->master;
+ struct dsa_port *dp;
- return dsa_master_teardown(master);
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_cpu(dp))
+ dsa_master_teardown(dp->master);
}
static int dsa_tree_setup(struct dsa_switch_tree *dst)
@@ -572,6 +552,8 @@ teardown_default_cpu:
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
{
+ struct dsa_link *dl, *next;
+
if (!dst->setup)
return;
@@ -581,39 +563,36 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_default_cpu(dst);
+ list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+ list_del(&dl->list);
+ kfree(dl);
+ }
+
pr_info("DSA: tree %d torn down\n", dst->index);
dst->setup = false;
}
-static void dsa_tree_remove_switch(struct dsa_switch_tree *dst,
- unsigned int index)
+static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
{
- dsa_tree_teardown(dst);
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
- dst->ds[index] = NULL;
- dsa_tree_put(dst);
-}
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds && dp->index == index)
+ return dp;
-static int dsa_tree_add_switch(struct dsa_switch_tree *dst,
- struct dsa_switch *ds)
-{
- unsigned int index = ds->index;
- int err;
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return NULL;
- if (dst->ds[index])
- return -EBUSY;
+ dp->ds = ds;
+ dp->index = index;
- dsa_tree_get(dst);
- dst->ds[index] = ds;
+ INIT_LIST_HEAD(&dp->list);
+ list_add_tail(&dp->list, &dst->ports);
- err = dsa_tree_setup(dst);
- if (err) {
- dst->ds[index] = NULL;
- dsa_tree_put(dst);
- }
-
- return err;
+ return dp;
}
static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
@@ -708,7 +687,7 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
goto out_put_node;
}
- dp = &ds->ports[reg];
+ dp = dsa_to_port(ds, reg);
err = dsa_port_parse_of(dp, port);
if (err)
@@ -732,8 +711,6 @@ static int dsa_switch_parse_member_of(struct dsa_switch *ds,
return sz;
ds->index = m[1];
- if (ds->index >= DSA_MAX_SWITCHES)
- return -EINVAL;
ds->dst = dsa_tree_touch(m[0]);
if (!ds->dst)
@@ -742,6 +719,20 @@ static int dsa_switch_parse_member_of(struct dsa_switch *ds,
return 0;
}
+static int dsa_switch_touch_ports(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+ int port;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ dp = dsa_port_touch(ds, port);
+ if (!dp)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
{
int err;
@@ -750,6 +741,10 @@ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
if (err)
return err;
+ err = dsa_switch_touch_ports(ds);
+ if (err)
+ return err;
+
return dsa_switch_parse_ports_of(ds, dn);
}
@@ -787,7 +782,7 @@ static int dsa_switch_parse_ports(struct dsa_switch *ds,
for (i = 0; i < DSA_MAX_PORTS; i++) {
name = cd->port_names[i];
dev = cd->netdev[i];
- dp = &ds->ports[i];
+ dp = dsa_to_port(ds, i);
if (!name)
continue;
@@ -807,6 +802,8 @@ static int dsa_switch_parse_ports(struct dsa_switch *ds,
static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
{
+ int err;
+
ds->cd = cd;
/* We don't support interconnected switches nor multiple trees via
@@ -817,22 +814,29 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
if (!ds->dst)
return -ENOMEM;
- return dsa_switch_parse_ports(ds, cd);
-}
-
-static int dsa_switch_add(struct dsa_switch *ds)
-{
- struct dsa_switch_tree *dst = ds->dst;
+ err = dsa_switch_touch_ports(ds);
+ if (err)
+ return err;
- return dsa_tree_add_switch(dst, ds);
+ return dsa_switch_parse_ports(ds, cd);
}
static int dsa_switch_probe(struct dsa_switch *ds)
{
- struct dsa_chip_data *pdata = ds->dev->platform_data;
- struct device_node *np = ds->dev->of_node;
+ struct dsa_switch_tree *dst;
+ struct dsa_chip_data *pdata;
+ struct device_node *np;
int err;
+ if (!ds->dev)
+ return -ENODEV;
+
+ pdata = ds->dev->platform_data;
+ np = ds->dev->of_node;
+
+ if (!ds->num_ports)
+ return -EINVAL;
+
if (np)
err = dsa_switch_parse_of(ds, np);
else if (pdata)
@@ -843,29 +847,14 @@ static int dsa_switch_probe(struct dsa_switch *ds)
if (err)
return err;
- return dsa_switch_add(ds);
-}
-
-struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
-{
- struct dsa_switch *ds;
- int i;
-
- ds = devm_kzalloc(dev, struct_size(ds, ports, n), GFP_KERNEL);
- if (!ds)
- return NULL;
-
- ds->dev = dev;
- ds->num_ports = n;
-
- for (i = 0; i < ds->num_ports; ++i) {
- ds->ports[i].index = i;
- ds->ports[i].ds = ds;
- }
+ dst = ds->dst;
+ dsa_tree_get(dst);
+ err = dsa_tree_setup(dst);
+ if (err)
+ dsa_tree_put(dst);
- return ds;
+ return err;
}
-EXPORT_SYMBOL_GPL(dsa_switch_alloc);
int dsa_register_switch(struct dsa_switch *ds)
{
@@ -883,9 +872,16 @@ EXPORT_SYMBOL_GPL(dsa_register_switch);
static void dsa_switch_remove(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst = ds->dst;
- unsigned int index = ds->index;
+ struct dsa_port *dp, *next;
- dsa_tree_remove_switch(dst, index);
+ dsa_tree_teardown(dst);
+
+ list_for_each_entry_safe(dp, next, &dst->ports, list) {
+ list_del(&dp->list);
+ kfree(dp);
+ }
+
+ dsa_tree_put(dst);
}
void dsa_unregister_switch(struct dsa_switch *ds)
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 12f8c7ee4dd8..2dd86d9bcda9 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -104,25 +104,14 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
- struct dsa_switch *ds;
- struct dsa_port *slave_port;
+ struct dsa_port *dp;
- if (device < 0 || device >= DSA_MAX_SWITCHES)
- return NULL;
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds->index == device && dp->index == port &&
+ dp->type == DSA_PORT_TYPE_USER)
+ return dp->slave;
- ds = dst->ds[device];
- if (!ds)
- return NULL;
-
- if (port < 0 || port >= ds->num_ports)
- return NULL;
-
- slave_port = &ds->ports[port];
-
- if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
- return NULL;
-
- return slave_port->slave;
+ return NULL;
}
/* port.c */
@@ -164,8 +153,8 @@ void dsa_port_link_unregister_of(struct dsa_port *dp);
void dsa_port_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state);
-int dsa_port_phylink_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state);
+void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state);
void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 9b54e5a76297..46ac9ba21987 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -429,19 +429,22 @@ void dsa_port_phylink_validate(struct phylink_config *config,
}
EXPORT_SYMBOL_GPL(dsa_port_phylink_validate);
-int dsa_port_phylink_mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state)
+void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
- /* Only called for SGMII and 802.3z */
- if (!ds->ops->phylink_mac_link_state)
- return -EOPNOTSUPP;
+ /* Only called for inband modes */
+ if (!ds->ops->phylink_mac_link_state) {
+ state->link = 0;
+ return;
+ }
- return ds->ops->phylink_mac_link_state(ds, dp->index, state);
+ if (ds->ops->phylink_mac_link_state(ds, dp->index, state) < 0)
+ state->link = 0;
}
-EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_link_state);
+EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_pcs_get_state);
void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
@@ -510,7 +513,7 @@ EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_link_up);
const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.validate = dsa_port_phylink_validate,
- .mac_link_state = dsa_port_phylink_mac_link_state,
+ .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
.mac_config = dsa_port_phylink_mac_config,
.mac_an_restart = dsa_port_phylink_mac_an_restart,
.mac_link_down = dsa_port_phylink_mac_link_down,
@@ -561,7 +564,7 @@ static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
struct dsa_switch *ds = dp->ds;
struct phy_device *phydev;
int port = dp->index;
- int mode;
+ phy_interface_t mode;
int err;
err = of_phy_register_fixed_link(dn);
@@ -574,8 +577,8 @@ static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
phydev = of_phy_find_device(dn);
- mode = of_get_phy_mode(dn);
- if (mode < 0)
+ err = of_get_phy_mode(dn, &mode);
+ if (err)
mode = PHY_INTERFACE_MODE_NA;
phydev->interface = mode;
@@ -593,10 +596,11 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct device_node *port_dn = dp->dn;
- int mode, err;
+ phy_interface_t mode;
+ int err;
- mode = of_get_phy_mode(port_dn);
- if (mode < 0)
+ err = of_get_phy_mode(port_dn, &mode);
+ if (err)
mode = PHY_INTERFACE_MODE_NA;
dp->pl_config.dev = ds->dev;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 028e65f4b5ba..78ffc87dc25e 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -789,6 +789,22 @@ static int dsa_slave_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(dp->pl, cmd);
}
+static void dsa_slave_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ phylink_ethtool_get_pauseparam(dp->pl, pause);
+}
+
+static int dsa_slave_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return phylink_ethtool_set_pauseparam(dp->pl, pause);
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static int dsa_slave_netpoll_setup(struct net_device *dev,
struct netpoll_info *ni)
@@ -1192,6 +1208,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_eee = dsa_slave_get_eee,
.get_link_ksettings = dsa_slave_get_link_ksettings,
.set_link_ksettings = dsa_slave_set_link_ksettings,
+ .get_pauseparam = dsa_slave_get_pauseparam,
+ .set_pauseparam = dsa_slave_set_pauseparam,
.get_rxnfc = dsa_slave_get_rxnfc,
.set_rxnfc = dsa_slave_set_rxnfc,
.get_ts_info = dsa_slave_get_ts_info,
@@ -1295,11 +1313,12 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
+ phy_interface_t mode;
u32 phy_flags = 0;
- int mode, ret;
+ int ret;
- mode = of_get_phy_mode(port_dn);
- if (mode < 0)
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
mode = PHY_INTERFACE_MODE_NA;
dp->pl_config.dev = &slave_dev->dev;
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 6a9607518823..df4abe897ed6 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -20,7 +20,7 @@ static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
int i;
for (i = 0; i < ds->num_ports; ++i) {
- struct dsa_port *dp = &ds->ports[i];
+ struct dsa_port *dp = dsa_to_port(ds, i);
if (dp->ageing_time && dp->ageing_time < ageing_time)
ageing_time = dp->ageing_time;
@@ -98,7 +98,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
if (unset_vlan_filtering) {
struct switchdev_trans trans = {0};
- err = dsa_port_vlan_filtering(&ds->ports[info->port],
+ err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
false, &trans);
if (err && err != EOPNOTSUPP)
return err;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 9c1cc2482b68..2fb6c26294b5 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -31,15 +31,14 @@
* Must be transmitted as zero and ignored on receive.
*
* SWITCH_ID - VID[8:6]:
- * Index of switch within DSA tree. Must be between 0 and
- * DSA_MAX_SWITCHES - 1.
+ * Index of switch within DSA tree. Must be between 0 and 7.
*
* RSV - VID[5:4]:
* To be used for further expansion of PORT or for other purposes.
* Must be transmitted as zero and ignored on receive.
*
* PORT - VID[3:0]:
- * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
+ * Index of switch port. Must be between 0 and 15.
*/
#define DSA_8021Q_DIR_SHIFT 10
@@ -103,10 +102,10 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
if (!dsa_is_user_port(ds, port))
return 0;
- slave = ds->ports[port].slave;
+ slave = dsa_to_port(ds, port)->slave;
err = br_vlan_get_pvid(slave, &pvid);
- if (err < 0)
+ if (!pvid || err < 0)
/* There is no pvid on the bridge for this port, which is
* perfectly valid. Nothing to restore, bye-bye!
*/
@@ -118,7 +117,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
return err;
}
- return dsa_port_vid_add(&ds->ports[port], pvid, vinfo.flags);
+ return dsa_port_vid_add(dsa_to_port(ds, port), pvid, vinfo.flags);
}
/* If @enabled is true, installs @vid with @flags into the switch port's HW
@@ -130,7 +129,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
static int dsa_8021q_vid_apply(struct dsa_switch *ds, int port, u16 vid,
u16 flags, bool enabled)
{
- struct dsa_port *dp = &ds->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct bridge_vlan_info vinfo;
int err;
@@ -342,13 +341,4 @@ struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
-static const struct dsa_device_ops dsa_8021q_netdev_ops = {
- .name = "8021q",
- .proto = DSA_TAG_PROTO_8021Q,
- .overhead = VLAN_HLEN,
-};
-
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_8021Q);
-
-module_dsa_tag_driver(dsa_8021q_netdev_ops);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
new file mode 100644
index 000000000000..8e3e7283d430
--- /dev/null
+++ b/net/dsa/tag_ocelot.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 NXP Semiconductors
+ */
+#include <soc/mscc/ocelot.h>
+#include <linux/packing.h>
+#include "dsa_priv.h"
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | ff:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(netdev);
+ u64 bypass, dest, src, qos_class, rew_op;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u8 *injection;
+
+ if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) {
+ netdev_err(netdev, "Cannot make room for tag.\n");
+ return NULL;
+ }
+
+ injection = skb_push(skb, OCELOT_TAG_LEN);
+
+ memset(injection, 0, OCELOT_TAG_LEN);
+
+ src = dsa_upstream_port(ds, port);
+ dest = BIT(port);
+ bypass = true;
+ qos_class = skb->priority;
+
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+
+ if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ rew_op = ocelot_port->ptp_cmd;
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ rew_op |= (ocelot_port->ts_id % 4) << 3;
+ ocelot_port->ts_id++;
+ }
+
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct packet_type *pt)
+{
+ u64 src_port, qos_class;
+ u8 *start = skb->data;
+ u8 *extraction;
+
+ /* Revert skb->data by the amount consumed by the DSA master,
+ * so it points to the beginning of the frame.
+ */
+ skb_push(skb, ETH_HLEN);
+ /* We don't care about the long prefix, it is just for easy entrance
+ * into the DSA master's RX filter. Discard it now by moving it into
+ * the headroom.
+ */
+ skb_pull(skb, OCELOT_LONG_PREFIX_LEN);
+ /* And skb->data now points to the extraction frame header.
+ * Keep a pointer to it.
+ */
+ extraction = skb->data;
+ /* Now the EFH is part of the headroom as well */
+ skb_pull(skb, OCELOT_TAG_LEN);
+ /* Reset the pointer to the real MAC header */
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+ /* And move skb->data to the correct location again */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Remove from inet csum the extraction header */
+ skb_postpull_rcsum(skb, start, OCELOT_LONG_PREFIX_LEN + OCELOT_TAG_LEN);
+
+ packing(extraction, &src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+
+ skb->dev = dsa_master_find_slave(netdev, 0, src_port);
+ if (!skb->dev)
+ /* The switch will reflect back some frames sent through
+ * sockets opened on the bare DSA master. These will come back
+ * with src_port equal to the index of the CPU port, for which
+ * there is no slave registered. So don't print any error
+ * message here (ignore and drop those frames).
+ */
+ return NULL;
+
+ skb->offload_fwd_mark = 1;
+ skb->priority = qos_class;
+
+ return skb;
+}
+
+static struct dsa_device_ops ocelot_netdev_ops = {
+ .name = "ocelot",
+ .proto = DSA_TAG_PROTO_OCELOT,
+ .xmit = ocelot_xmit,
+ .rcv = ocelot_rcv,
+ .overhead = OCELOT_TAG_LEN + OCELOT_LONG_PREFIX_LEN,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OCELOT);
+
+module_dsa_tag_driver(ocelot_netdev_ops);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 17374afee28f..9040fe55e0f5 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -244,7 +244,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
- hh->hh_len = ETH_HLEN;
+
+ /* Pairs with READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, ETH_HLEN);
+
return 0;
}
EXPORT_SYMBOL(eth_header_cache);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index ffcfcef76291..7c5a1aa5adb4 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -236,21 +236,14 @@ nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
struct cfg802154_registered_device **rdev,
struct wpan_dev **wpan_dev)
{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
int err;
rtnl_lock();
if (!cb->args[0]) {
- err = nlmsg_parse_deprecated(cb->nlh,
- GENL_HDRLEN + nl802154_fam.hdrsize,
- genl_family_attrbuf(&nl802154_fam),
- nl802154_fam.maxattr,
- nl802154_policy, NULL);
- if (err)
- goto out_unlock;
-
*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
- genl_family_attrbuf(&nl802154_fam));
+ info->attrs);
if (IS_ERR(*wpan_dev)) {
err = PTR_ERR(*wpan_dev);
goto out_unlock;
@@ -557,17 +550,8 @@ static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb,
struct netlink_callback *cb,
struct nl802154_dump_wpan_phy_state *state)
{
- struct nlattr **tb = genl_family_attrbuf(&nl802154_fam);
- int ret = nlmsg_parse_deprecated(cb->nlh,
- GENL_HDRLEN + nl802154_fam.hdrsize,
- tb, nl802154_fam.maxattr,
- nl802154_policy, NULL);
-
- /* TODO check if we can handle error here,
- * we have no backward compatibility
- */
- if (ret)
- return 0;
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct nlattr **tb = info->attrs;
if (tb[NL802154_ATTR_WPAN_PHY])
state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]);
@@ -2203,7 +2187,8 @@ static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
static const struct genl_ops nl802154_ops[] = {
{
.cmd = NL802154_CMD_GET_WPAN_PHY,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.doit = nl802154_get_wpan_phy,
.dumpit = nl802154_dump_wpan_phy,
.done = nl802154_dump_wpan_phy_done,
@@ -2343,7 +2328,8 @@ static const struct genl_ops nl802154_ops[] = {
},
{
.cmd = NL802154_CMD_GET_SEC_KEY,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO .doit by matching key id? */
.dumpit = nl802154_dump_llsec_key,
.flags = GENL_ADMIN_PERM,
@@ -2369,7 +2355,8 @@ static const struct genl_ops nl802154_ops[] = {
/* TODO unique identifier must short+pan OR extended_addr */
{
.cmd = NL802154_CMD_GET_SEC_DEV,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO .doit by matching extended_addr? */
.dumpit = nl802154_dump_llsec_dev,
.flags = GENL_ADMIN_PERM,
@@ -2395,7 +2382,8 @@ static const struct genl_ops nl802154_ops[] = {
/* TODO remove complete devkey, put it as nested? */
{
.cmd = NL802154_CMD_GET_SEC_DEVKEY,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO doit by matching ??? */
.dumpit = nl802154_dump_llsec_devkey,
.flags = GENL_ADMIN_PERM,
@@ -2420,7 +2408,8 @@ static const struct genl_ops nl802154_ops[] = {
},
{
.cmd = NL802154_CMD_GET_SEC_LEVEL,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO .doit by matching frame_type? */
.dumpit = nl802154_dump_llsec_seclevel,
.flags = GENL_ADMIN_PERM,
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 03381f3e12ba..fc816b187170 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -180,8 +180,8 @@ config NET_IPIP
config NET_IPGRE_DEMUX
tristate "IP: GRE demultiplexer"
help
- This is helper module to demultiplex GRE packets on GRE version field criteria.
- Required by ip_gre and pptp modules.
+ This is helper module to demultiplex GRE packets on GRE version field criteria.
+ Required by ip_gre and pptp modules.
config NET_IP_TUNNEL
tristate
@@ -459,200 +459,200 @@ config TCP_CONG_BIC
tristate "Binary Increase Congestion (BIC) control"
default m
---help---
- BIC-TCP is a sender-side only change that ensures a linear RTT
- fairness under large windows while offering both scalability and
- bounded TCP-friendliness. The protocol combines two schemes
- called additive increase and binary search increase. When the
- congestion window is large, additive increase with a large
- increment ensures linear RTT fairness as well as good
- scalability. Under small congestion windows, binary search
- increase provides TCP friendliness.
- See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/
+ BIC-TCP is a sender-side only change that ensures a linear RTT
+ fairness under large windows while offering both scalability and
+ bounded TCP-friendliness. The protocol combines two schemes
+ called additive increase and binary search increase. When the
+ congestion window is large, additive increase with a large
+ increment ensures linear RTT fairness as well as good
+ scalability. Under small congestion windows, binary search
+ increase provides TCP friendliness.
+ See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/
config TCP_CONG_CUBIC
tristate "CUBIC TCP"
default y
---help---
- This is version 2.0 of BIC-TCP which uses a cubic growth function
- among other techniques.
- See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf
+ This is version 2.0 of BIC-TCP which uses a cubic growth function
+ among other techniques.
+ See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf
config TCP_CONG_WESTWOOD
tristate "TCP Westwood+"
default m
---help---
- TCP Westwood+ is a sender-side only modification of the TCP Reno
- protocol stack that optimizes the performance of TCP congestion
- control. It is based on end-to-end bandwidth estimation to set
- congestion window and slow start threshold after a congestion
- episode. Using this estimation, TCP Westwood+ adaptively sets a
- slow start threshold and a congestion window which takes into
- account the bandwidth used at the time congestion is experienced.
- TCP Westwood+ significantly increases fairness wrt TCP Reno in
- wired networks and throughput over wireless links.
+ TCP Westwood+ is a sender-side only modification of the TCP Reno
+ protocol stack that optimizes the performance of TCP congestion
+ control. It is based on end-to-end bandwidth estimation to set
+ congestion window and slow start threshold after a congestion
+ episode. Using this estimation, TCP Westwood+ adaptively sets a
+ slow start threshold and a congestion window which takes into
+ account the bandwidth used at the time congestion is experienced.
+ TCP Westwood+ significantly increases fairness wrt TCP Reno in
+ wired networks and throughput over wireless links.
config TCP_CONG_HTCP
tristate "H-TCP"
default m
---help---
- H-TCP is a send-side only modifications of the TCP Reno
- protocol stack that optimizes the performance of TCP
- congestion control for high speed network links. It uses a
- modeswitch to change the alpha and beta parameters of TCP Reno
- based on network conditions and in a way so as to be fair with
- other Reno and H-TCP flows.
+ H-TCP is a send-side only modifications of the TCP Reno
+ protocol stack that optimizes the performance of TCP
+ congestion control for high speed network links. It uses a
+ modeswitch to change the alpha and beta parameters of TCP Reno
+ based on network conditions and in a way so as to be fair with
+ other Reno and H-TCP flows.
config TCP_CONG_HSTCP
tristate "High Speed TCP"
default n
---help---
- Sally Floyd's High Speed TCP (RFC 3649) congestion control.
- A modification to TCP's congestion control mechanism for use
- with large congestion windows. A table indicates how much to
- increase the congestion window by when an ACK is received.
- For more detail see http://www.icir.org/floyd/hstcp.html
+ Sally Floyd's High Speed TCP (RFC 3649) congestion control.
+ A modification to TCP's congestion control mechanism for use
+ with large congestion windows. A table indicates how much to
+ increase the congestion window by when an ACK is received.
+ For more detail see http://www.icir.org/floyd/hstcp.html
config TCP_CONG_HYBLA
tristate "TCP-Hybla congestion control algorithm"
default n
---help---
- TCP-Hybla is a sender-side only change that eliminates penalization of
- long-RTT, large-bandwidth connections, like when satellite legs are
- involved, especially when sharing a common bottleneck with normal
- terrestrial connections.
+ TCP-Hybla is a sender-side only change that eliminates penalization of
+ long-RTT, large-bandwidth connections, like when satellite legs are
+ involved, especially when sharing a common bottleneck with normal
+ terrestrial connections.
config TCP_CONG_VEGAS
tristate "TCP Vegas"
default n
---help---
- TCP Vegas is a sender-side only change to TCP that anticipates
- the onset of congestion by estimating the bandwidth. TCP Vegas
- adjusts the sending rate by modifying the congestion
- window. TCP Vegas should provide less packet loss, but it is
- not as aggressive as TCP Reno.
+ TCP Vegas is a sender-side only change to TCP that anticipates
+ the onset of congestion by estimating the bandwidth. TCP Vegas
+ adjusts the sending rate by modifying the congestion
+ window. TCP Vegas should provide less packet loss, but it is
+ not as aggressive as TCP Reno.
config TCP_CONG_NV
- tristate "TCP NV"
- default n
- ---help---
- TCP NV is a follow up to TCP Vegas. It has been modified to deal with
- 10G networks, measurement noise introduced by LRO, GRO and interrupt
- coalescence. In addition, it will decrease its cwnd multiplicatively
- instead of linearly.
+ tristate "TCP NV"
+ default n
+ ---help---
+ TCP NV is a follow up to TCP Vegas. It has been modified to deal with
+ 10G networks, measurement noise introduced by LRO, GRO and interrupt
+ coalescence. In addition, it will decrease its cwnd multiplicatively
+ instead of linearly.
- Note that in general congestion avoidance (cwnd decreased when # packets
- queued grows) cannot coexist with congestion control (cwnd decreased only
- when there is packet loss) due to fairness issues. One scenario when they
- can coexist safely is when the CA flows have RTTs << CC flows RTTs.
+ Note that in general congestion avoidance (cwnd decreased when # packets
+ queued grows) cannot coexist with congestion control (cwnd decreased only
+ when there is packet loss) due to fairness issues. One scenario when they
+ can coexist safely is when the CA flows have RTTs << CC flows RTTs.
- For further details see http://www.brakmo.org/networking/tcp-nv/
+ For further details see http://www.brakmo.org/networking/tcp-nv/
config TCP_CONG_SCALABLE
tristate "Scalable TCP"
default n
---help---
- Scalable TCP is a sender-side only change to TCP which uses a
- MIMD congestion control algorithm which has some nice scaling
- properties, though is known to have fairness issues.
- See http://www.deneholme.net/tom/scalable/
+ Scalable TCP is a sender-side only change to TCP which uses a
+ MIMD congestion control algorithm which has some nice scaling
+ properties, though is known to have fairness issues.
+ See http://www.deneholme.net/tom/scalable/
config TCP_CONG_LP
tristate "TCP Low Priority"
default n
---help---
- TCP Low Priority (TCP-LP), a distributed algorithm whose goal is
- to utilize only the excess network bandwidth as compared to the
- ``fair share`` of bandwidth as targeted by TCP.
- See http://www-ece.rice.edu/networks/TCP-LP/
+ TCP Low Priority (TCP-LP), a distributed algorithm whose goal is
+ to utilize only the excess network bandwidth as compared to the
+ ``fair share`` of bandwidth as targeted by TCP.
+ See http://www-ece.rice.edu/networks/TCP-LP/
config TCP_CONG_VENO
tristate "TCP Veno"
default n
---help---
- TCP Veno is a sender-side only enhancement of TCP to obtain better
- throughput over wireless networks. TCP Veno makes use of state
- distinguishing to circumvent the difficult judgment of the packet loss
- type. TCP Veno cuts down less congestion window in response to random
- loss packets.
- See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186>
+ TCP Veno is a sender-side only enhancement of TCP to obtain better
+ throughput over wireless networks. TCP Veno makes use of state
+ distinguishing to circumvent the difficult judgment of the packet loss
+ type. TCP Veno cuts down less congestion window in response to random
+ loss packets.
+ See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186>
config TCP_CONG_YEAH
tristate "YeAH TCP"
select TCP_CONG_VEGAS
default n
---help---
- YeAH-TCP is a sender-side high-speed enabled TCP congestion control
- algorithm, which uses a mixed loss/delay approach to compute the
- congestion window. It's design goals target high efficiency,
- internal, RTT and Reno fairness, resilience to link loss while
- keeping network elements load as low as possible.
+ YeAH-TCP is a sender-side high-speed enabled TCP congestion control
+ algorithm, which uses a mixed loss/delay approach to compute the
+ congestion window. It's design goals target high efficiency,
+ internal, RTT and Reno fairness, resilience to link loss while
+ keeping network elements load as low as possible.
- For further details look here:
- http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
+ For further details look here:
+ http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
config TCP_CONG_ILLINOIS
tristate "TCP Illinois"
default n
---help---
- TCP-Illinois is a sender-side modification of TCP Reno for
- high speed long delay links. It uses round-trip-time to
- adjust the alpha and beta parameters to achieve a higher average
- throughput and maintain fairness.
+ TCP-Illinois is a sender-side modification of TCP Reno for
+ high speed long delay links. It uses round-trip-time to
+ adjust the alpha and beta parameters to achieve a higher average
+ throughput and maintain fairness.
- For further details see:
- http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
+ For further details see:
+ http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
config TCP_CONG_DCTCP
tristate "DataCenter TCP (DCTCP)"
default n
---help---
- DCTCP leverages Explicit Congestion Notification (ECN) in the network to
- provide multi-bit feedback to the end hosts. It is designed to provide:
+ DCTCP leverages Explicit Congestion Notification (ECN) in the network to
+ provide multi-bit feedback to the end hosts. It is designed to provide:
- - High burst tolerance (incast due to partition/aggregate),
- - Low latency (short flows, queries),
- - High throughput (continuous data updates, large file transfers) with
- commodity, shallow-buffered switches.
+ - High burst tolerance (incast due to partition/aggregate),
+ - Low latency (short flows, queries),
+ - High throughput (continuous data updates, large file transfers) with
+ commodity, shallow-buffered switches.
- All switches in the data center network running DCTCP must support
- ECN marking and be configured for marking when reaching defined switch
- buffer thresholds. The default ECN marking threshold heuristic for
- DCTCP on switches is 20 packets (30KB) at 1Gbps, and 65 packets
- (~100KB) at 10Gbps, but might need further careful tweaking.
+ All switches in the data center network running DCTCP must support
+ ECN marking and be configured for marking when reaching defined switch
+ buffer thresholds. The default ECN marking threshold heuristic for
+ DCTCP on switches is 20 packets (30KB) at 1Gbps, and 65 packets
+ (~100KB) at 10Gbps, but might need further careful tweaking.
- For further details see:
- http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
+ For further details see:
+ http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
config TCP_CONG_CDG
tristate "CAIA Delay-Gradient (CDG)"
default n
---help---
- CAIA Delay-Gradient (CDG) is a TCP congestion control that modifies
- the TCP sender in order to:
+ CAIA Delay-Gradient (CDG) is a TCP congestion control that modifies
+ the TCP sender in order to:
o Use the delay gradient as a congestion signal.
o Back off with an average probability that is independent of the RTT.
o Coexist with flows that use loss-based congestion control.
o Tolerate packet loss unrelated to congestion.
- For further details see:
- D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
- delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+ For further details see:
+ D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
+ delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
config TCP_CONG_BBR
tristate "BBR TCP"
default n
---help---
- BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to
- maximize network utilization and minimize queues. It builds an explicit
- model of the the bottleneck delivery rate and path round-trip
- propagation delay. It tolerates packet loss and delay unrelated to
- congestion. It can operate over LAN, WAN, cellular, wifi, or cable
- modem links. It can coexist with flows that use loss-based congestion
- control, and can operate with shallow buffers, deep buffers,
- bufferbloat, policers, or AQM schemes that do not provide a delay
- signal. It requires the fq ("Fair Queue") pacing packet scheduler.
+ BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to
+ maximize network utilization and minimize queues. It builds an explicit
+ model of the the bottleneck delivery rate and path round-trip
+ propagation delay. It tolerates packet loss and delay unrelated to
+ congestion. It can operate over LAN, WAN, cellular, wifi, or cable
+ modem links. It can coexist with flows that use loss-based congestion
+ control, and can operate with shallow buffers, deep buffers,
+ bufferbloat, policers, or AQM schemes that do not provide a delay
+ signal. It requires the fq ("Fair Queue") pacing packet scheduler.
choice
prompt "Default TCP congestion control"
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 70f92aaca411..53de8e00990e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -208,7 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 71c78d223dfd..577db1d50a24 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -70,11 +70,6 @@ fail:
fib_free_table(main_table);
return -ENOMEM;
}
-
-static bool fib4_has_custom_rules(struct net *net)
-{
- return false;
-}
#else
struct fib_table *fib_new_table(struct net *net, u32 id)
@@ -131,11 +126,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
}
return NULL;
}
-
-static bool fib4_has_custom_rules(struct net *net)
-{
- return net->ipv4.fib_has_custom_rules;
-}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
static void fib_replace_table(struct net *net, struct fib_table *old,
diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c
index b804ccbdb241..0c28bd469a68 100644
--- a/net/ipv4/fib_notifier.c
+++ b/net/ipv4/fib_notifier.c
@@ -9,12 +9,12 @@
#include <net/netns/ipv4.h>
#include <net/ip_fib.h>
-int call_fib4_notifier(struct notifier_block *nb, struct net *net,
+int call_fib4_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info)
{
info->family = AF_INET;
- return call_fib_notifier(nb, net, event_type, info);
+ return call_fib_notifier(nb, event_type, info);
}
int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
@@ -34,17 +34,16 @@ static unsigned int fib4_seq_read(struct net *net)
return net->ipv4.fib_seq + fib4_rules_seq_read(net);
}
-static int fib4_dump(struct net *net, struct notifier_block *nb)
+static int fib4_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
int err;
- err = fib4_rules_dump(net, nb);
+ err = fib4_rules_dump(net, nb, extack);
if (err)
return err;
- fib_notify(net, nb);
-
- return 0;
+ return fib_notify(net, nb, extack);
}
static const struct fib_notifier_ops fib4_notifier_ops_template = {
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index b43a7ba5c6a4..f99e3bac5cab 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -65,9 +65,10 @@ bool fib4_rule_default(const struct fib_rule *rule)
}
EXPORT_SYMBOL_GPL(fib4_rule_default);
-int fib4_rules_dump(struct net *net, struct notifier_block *nb)
+int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
- return fib_rules_dump(net, nb, AF_INET);
+ return fib_rules_dump(net, nb, AF_INET, extack);
}
unsigned int fib4_rules_seq_read(struct net *net)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1ab2fb6bb37d..b9df9c09b84e 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -74,11 +74,13 @@
#include <trace/events/fib.h>
#include "fib_lookup.h"
-static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
+static int call_fib_entry_notifier(struct notifier_block *nb,
enum fib_event_type event_type, u32 dst,
- int dst_len, struct fib_alias *fa)
+ int dst_len, struct fib_alias *fa,
+ struct netlink_ext_ack *extack)
{
struct fib_entry_notifier_info info = {
+ .info.extack = extack,
.dst = dst,
.dst_len = dst_len,
.fi = fa->fa_info,
@@ -86,7 +88,7 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
.type = fa->fa_type,
.tb_id = fa->tb_id,
};
- return call_fib4_notifier(nb, net, event_type, &info.info);
+ return call_fib4_notifier(nb, event_type, &info.info);
}
static int call_fib_entry_notifiers(struct net *net,
@@ -2015,10 +2017,12 @@ void fib_info_notify_update(struct net *net, struct nl_info *info)
}
}
-static void fib_leaf_notify(struct net *net, struct key_vector *l,
- struct fib_table *tb, struct notifier_block *nb)
+static int fib_leaf_notify(struct key_vector *l, struct fib_table *tb,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct fib_alias *fa;
+ int err;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
@@ -2032,39 +2036,53 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l,
if (tb->tb_id != fa->tb_id)
continue;
- call_fib_entry_notifier(nb, net, FIB_EVENT_ENTRY_ADD, l->key,
- KEYLENGTH - fa->fa_slen, fa);
+ err = call_fib_entry_notifier(nb, FIB_EVENT_ENTRY_ADD, l->key,
+ KEYLENGTH - fa->fa_slen,
+ fa, extack);
+ if (err)
+ return err;
}
+ return 0;
}
-static void fib_table_notify(struct net *net, struct fib_table *tb,
- struct notifier_block *nb)
+static int fib_table_notify(struct fib_table *tb, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
t_key key = 0;
+ int err;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
- fib_leaf_notify(net, l, tb, nb);
+ err = fib_leaf_notify(l, tb, nb, extack);
+ if (err)
+ return err;
key = l->key + 1;
/* stop in case of wrap around */
if (key < l->key)
break;
}
+ return 0;
}
-void fib_notify(struct net *net, struct notifier_block *nb)
+int fib_notify(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
unsigned int h;
+ int err;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, head, tb_hlist)
- fib_table_notify(net, tb, nb);
+ hlist_for_each_entry_rcu(tb, head, tb_hlist) {
+ err = fib_table_notify(tb, nb, extack);
+ if (err)
+ return err;
+ }
}
+ return 0;
}
static void __trie_free_rcu(struct rcu_head *head)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4298aae74e0e..18068ed42f25 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -249,10 +249,11 @@ bool icmp_global_allow(void)
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock.
+ * without taking the spinlock. The READ_ONCE() are paired
+ * with the following WRITE_ONCE() in this same function.
*/
- if (!icmp_global.credit) {
- delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (!READ_ONCE(icmp_global.credit)) {
+ delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
@@ -262,14 +263,14 @@ bool icmp_global_allow(void)
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
- icmp_global.stamp = now;
+ WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
- icmp_global.credit = credit;
+ WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}
@@ -682,7 +683,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
- saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
+ saddr = inet_select_addr(dev, iph->saddr,
+ RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 480d0b22db1a..3b9c7a2725a9 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1563,7 +1563,7 @@ static int ip_mc_check_igmp_msg(struct sk_buff *skb)
}
}
-static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
+static __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
{
return skb_checksum_simple_validate(skb);
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index eb30fc1770de..e4c6e8b40490 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -716,7 +716,7 @@ static void reqsk_timer_handler(struct timer_list *t)
* ones are about to clog our table.
*/
qlen = reqsk_queue_len(queue);
- if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
+ if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
int young = reqsk_queue_len_young(queue) << 1;
while (thresh > 2) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7dc79b973e6e..af154977904c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -226,17 +226,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
r->idiag_expires =
- jiffies_to_msecs(icsk->icsk_timeout - jiffies);
+ jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
- jiffies_to_msecs(icsk->icsk_timeout - jiffies);
+ jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
} else if (timer_pending(&sk->sk_timer)) {
r->idiag_timer = 2;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
- jiffies_to_msecs(sk->sk_timer.expires - jiffies);
+ jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
} else {
r->idiag_timer = 0;
r->idiag_expires = 0;
@@ -342,16 +342,13 @@ static int inet_twsk_diag_fill(struct sock *sk,
r = nlmsg_data(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
- tmo = tw->tw_timer.expires - jiffies;
- if (tmo < 0)
- tmo = 0;
-
inet_diag_msg_common_fill(r, sk);
r->idiag_retrans = 0;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
- r->idiag_expires = jiffies_to_msecs(tmo);
+ tmo = tw->tw_timer.expires - jiffies;
+ r->idiag_expires = jiffies_delta_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
@@ -385,7 +382,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
offsetof(struct sock, sk_cookie));
tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
- r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
+ r->idiag_expires = jiffies_delta_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index be778599bfed..ff327a62c9ce 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -160,7 +160,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
base->total / inet_peer_threshold * HZ;
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
- delta = (__u32)jiffies - p->dtime;
+
+ /* The READ_ONCE() pairs with the WRITE_ONCE()
+ * in inet_putpeer()
+ */
+ delta = (__u32)jiffies - READ_ONCE(p->dtime);
+
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
gc_stack[i] = NULL;
}
@@ -237,7 +242,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
- p->dtime = (__u32)jiffies;
+ /* The WRITE_ONCE() pairs with itself (we run lockless)
+ * and the READ_ONCE() in inet_peer_gc()
+ */
+ WRITE_ONCE(p->dtime, (__u32)jiffies);
if (refcount_dec_and_test(&p->refcnt))
call_rcu(&p->rcu, inetpeer_free_rcu);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 10636fb6093e..572b6307a2df 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -340,6 +340,8 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
+ const struct iphdr *tnl_params;
+
if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
raw_proto, false) < 0)
goto drop;
@@ -348,7 +350,9 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
skb_pop_mac_header(skb);
else
skb_reset_mac_header(skb);
- if (tunnel->collect_md) {
+
+ tnl_params = &tunnel->parms.iph;
+ if (tunnel->collect_md || tnl_params->daddr == 0) {
__be16 flags;
__be64 tun_id;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index c59a78a267c3..aa438c6758a7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -302,16 +302,31 @@ drop:
return true;
}
+static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
+ const struct sk_buff *hint)
+{
+ return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
+ ip_hdr(hint)->tos == iph->tos;
+}
+
INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
static int ip_rcv_finish_core(struct net *net, struct sock *sk,
- struct sk_buff *skb, struct net_device *dev)
+ struct sk_buff *skb, struct net_device *dev,
+ const struct sk_buff *hint)
{
const struct iphdr *iph = ip_hdr(skb);
int (*edemux)(struct sk_buff *skb);
struct rtable *rt;
int err;
+ if (ip_can_use_hint(skb, iph, hint)) {
+ err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
+ dev, hint);
+ if (unlikely(err))
+ goto drop_error;
+ }
+
if (net->ipv4.sysctl_ip_early_demux &&
!skb_dst(skb) &&
!skb->sk &&
@@ -408,7 +423,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
if (!skb)
return NET_RX_SUCCESS;
- ret = ip_rcv_finish_core(net, sk, skb, dev);
+ ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
if (ret != NET_RX_DROP)
ret = dst_input(skb);
return ret;
@@ -535,11 +550,20 @@ static void ip_sublist_rcv_finish(struct list_head *head)
}
}
+static struct sk_buff *ip_extract_route_hint(const struct net *net,
+ struct sk_buff *skb, int rt_type)
+{
+ if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
+ return NULL;
+
+ return skb;
+}
+
static void ip_list_rcv_finish(struct net *net, struct sock *sk,
struct list_head *head)
{
+ struct sk_buff *skb, *next, *hint = NULL;
struct dst_entry *curr_dst = NULL;
- struct sk_buff *skb, *next;
struct list_head sublist;
INIT_LIST_HEAD(&sublist);
@@ -554,11 +578,14 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
skb = l3mdev_ip_rcv(skb);
if (!skb)
continue;
- if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
+ if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
continue;
dst = skb_dst(skb);
if (curr_dst != dst) {
+ hint = ip_extract_route_hint(net, skb,
+ ((struct rtable *)dst)->rt_type);
+
/* dispatch old sublist */
if (!list_empty(&sublist))
ip_sublist_rcv_finish(&sublist);
@@ -611,5 +638,6 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
list_add_tail(&skb->list, &sublist);
}
/* dispatch final sublist */
- ip_sublist_rcv(&sublist, curr_dev, curr_net);
+ if (!list_empty(&sublist))
+ ip_sublist_rcv(&sublist, curr_dev, curr_net);
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3d8baaaf7086..9d83cb320dcb 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -422,7 +422,7 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
@@ -430,7 +430,7 @@ int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, dev,
+ net, sk, skb, indev, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 1452a97914a0..47f8b947eef1 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -34,6 +34,9 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/dst_metadata.h>
+#include <net/geneve.h>
+#include <net/vxlan.h>
+#include <net/erspan.h>
const struct ip_tunnel_encap_ops __rcu *
iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
@@ -126,15 +129,14 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
if (!md || md->type != METADATA_IP_TUNNEL ||
md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
-
return NULL;
- res = metadata_dst_alloc(0, METADATA_IP_TUNNEL, flags);
+ src = &md->u.tun_info;
+ res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags);
if (!res)
return NULL;
dst = &res->u.tun_info;
- src = &md->u.tun_info;
dst->key.tun_id = src->key.tun_id;
if (src->mode & IP_TUNNEL_INFO_IPV6)
memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
@@ -143,6 +145,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
dst->key.u.ipv4.dst = src->key.u.ipv4.src;
dst->key.tun_flags = src->key.tun_flags;
dst->mode = src->mode | IP_TUNNEL_INFO_TX;
+ ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
+ src->options_len, 0);
return res;
}
@@ -211,30 +215,243 @@ void ip_tunnel_get_stats64(struct net_device *dev,
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+ [LWTUNNEL_IP_UNSPEC] = { .strict_start_type = LWTUNNEL_IP_OPTS },
[LWTUNNEL_IP_ID] = { .type = NLA_U64 },
[LWTUNNEL_IP_DST] = { .type = NLA_U32 },
[LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
[LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
[LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
[LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
+ [LWTUNNEL_IP_OPTS] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = {
+ [LWTUNNEL_IP_OPTS_GENEVE] = { .type = NLA_NESTED },
+ [LWTUNNEL_IP_OPTS_VXLAN] = { .type = NLA_NESTED },
+ [LWTUNNEL_IP_OPTS_ERSPAN] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
+ [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
+ [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
+};
+
+static const struct nla_policy
+vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = {
+ [LWTUNNEL_IP_OPT_VXLAN_GBP] = { .type = NLA_U32 },
};
+static const struct nla_policy
+erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = {
+ [LWTUNNEL_IP_OPT_ERSPAN_VER] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
+ [LWTUNNEL_IP_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
+};
+
+static int ip_tun_parse_opts_geneve(struct nlattr *attr,
+ struct ip_tunnel_info *info, int opts_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1];
+ int data_len, err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr,
+ geneve_opt_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] ||
+ !tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] ||
+ !tb[LWTUNNEL_IP_OPT_GENEVE_DATA])
+ return -EINVAL;
+
+ attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA];
+ data_len = nla_len(attr);
+ if (data_len % 4)
+ return -EINVAL;
+
+ if (info) {
+ struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len;
+
+ memcpy(opt->opt_data, nla_data(attr), data_len);
+ opt->length = data_len / 4;
+ attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS];
+ opt->opt_class = nla_get_be16(attr);
+ attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
+ opt->type = nla_get_u8(attr);
+ info->key.tun_flags |= TUNNEL_GENEVE_OPT;
+ }
+
+ return sizeof(struct geneve_opt) + data_len;
+}
+
+static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
+ struct ip_tunnel_info *info, int opts_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr,
+ vxlan_opt_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP])
+ return -EINVAL;
+
+ if (info) {
+ struct vxlan_metadata *md =
+ ip_tunnel_info_opts(info) + opts_len;
+
+ attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
+ md->gbp = nla_get_u32(attr);
+ info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+ }
+
+ return sizeof(struct vxlan_metadata);
+}
+
+static int ip_tun_parse_opts_erspan(struct nlattr *attr,
+ struct ip_tunnel_info *info, int opts_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1];
+ int err;
+ u8 ver;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr,
+ erspan_opt_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER])
+ return -EINVAL;
+
+ ver = nla_get_u8(tb[LWTUNNEL_IP_OPT_ERSPAN_VER]);
+ if (ver == 1) {
+ if (!tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX])
+ return -EINVAL;
+ } else if (ver == 2) {
+ if (!tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] ||
+ !tb[LWTUNNEL_IP_OPT_ERSPAN_HWID])
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (info) {
+ struct erspan_metadata *md =
+ ip_tunnel_info_opts(info) + opts_len;
+
+ md->version = ver;
+ if (ver == 1) {
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX];
+ md->u.index = nla_get_be32(attr);
+ } else {
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR];
+ md->u.md2.dir = nla_get_u8(attr);
+ attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID];
+ set_hwid(&md->u.md2, nla_get_u8(attr));
+ }
+
+ info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ }
+
+ return sizeof(struct erspan_metadata);
+}
+
+static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
+ struct netlink_ext_ack *extack)
+{
+ int err, rem, opt_len, opts_len = 0, type = 0;
+ struct nlattr *nla;
+
+ if (!attr)
+ return 0;
+
+ err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX,
+ ip_opts_policy, extack);
+ if (err)
+ return err;
+
+ nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
+ switch (nla_type(nla)) {
+ case LWTUNNEL_IP_OPTS_GENEVE:
+ if (type && type != TUNNEL_GENEVE_OPT)
+ return -EINVAL;
+ opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
+ extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ if (opts_len > IP_TUNNEL_OPTS_MAX)
+ return -EINVAL;
+ type = TUNNEL_GENEVE_OPT;
+ break;
+ case LWTUNNEL_IP_OPTS_VXLAN:
+ if (type)
+ return -EINVAL;
+ opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len,
+ extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ type = TUNNEL_VXLAN_OPT;
+ break;
+ case LWTUNNEL_IP_OPTS_ERSPAN:
+ if (type)
+ return -EINVAL;
+ opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len,
+ extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ type = TUNNEL_ERSPAN_OPT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return opts_len;
+}
+
+static int ip_tun_get_optlen(struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ return ip_tun_parse_opts(attr, NULL, extack);
+}
+
+static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return ip_tun_parse_opts(attr, info, extack);
+}
+
static int ip_tun_build_state(struct nlattr *attr,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_info *tun_info;
- struct lwtunnel_state *new_state;
struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
- int err;
+ struct lwtunnel_state *new_state;
+ struct ip_tunnel_info *tun_info;
+ int err, opt_len;
err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
ip_tun_policy, extack);
if (err < 0)
return err;
- new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack);
+ if (opt_len < 0)
+ return opt_len;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
if (!new_state)
return -ENOMEM;
@@ -242,6 +459,12 @@ static int ip_tun_build_state(struct nlattr *attr,
tun_info = lwt_tun_info(new_state);
+ err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack);
+ if (err < 0) {
+ lwtstate_free(new_state);
+ return err;
+ }
+
#ifdef CONFIG_DST_CACHE
err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
if (err) {
@@ -266,10 +489,12 @@ static int ip_tun_build_state(struct nlattr *attr,
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
if (tb[LWTUNNEL_IP_FLAGS])
- tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);
+ tun_info->key.tun_flags |=
+ (nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
+ ~TUNNEL_OPTIONS_PRESENT);
tun_info->mode = IP_TUNNEL_INFO_TX;
- tun_info->options_len = 0;
+ tun_info->options_len = opt_len;
*ts = new_state;
@@ -285,6 +510,114 @@ static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
#endif
}
+static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
+{
+ struct geneve_opt *opt;
+ struct nlattr *nest;
+ int offset = 0;
+
+ nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE);
+ if (!nest)
+ return -ENOMEM;
+
+ while (tun_info->options_len > offset) {
+ opt = ip_tunnel_info_opts(tun_info) + offset;
+ if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS,
+ opt->opt_class) ||
+ nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) ||
+ nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4,
+ opt->opt_data)) {
+ nla_nest_cancel(skb, nest);
+ return -ENOMEM;
+ }
+ offset += sizeof(*opt) + opt->length * 4;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
+static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
+{
+ struct vxlan_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN);
+ if (!nest)
+ return -ENOMEM;
+
+ md = ip_tunnel_info_opts(tun_info);
+ if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) {
+ nla_nest_cancel(skb, nest);
+ return -ENOMEM;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
+static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
+{
+ struct erspan_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN);
+ if (!nest)
+ return -ENOMEM;
+
+ md = ip_tunnel_info_opts(tun_info);
+ if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version))
+ goto err;
+
+ if (md->version == 1 &&
+ nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index))
+ goto err;
+
+ if (md->version == 2 &&
+ (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) ||
+ nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID,
+ get_hwid(&md->u.md2))))
+ goto err;
+
+ nla_nest_end(skb, nest);
+ return 0;
+err:
+ nla_nest_cancel(skb, nest);
+ return -ENOMEM;
+}
+
+static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
+ struct ip_tunnel_info *tun_info)
+{
+ struct nlattr *nest;
+ int err = 0;
+
+ if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, type);
+ if (!nest)
+ return -ENOMEM;
+
+ if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
+ else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
+ err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
+ else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
+ err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
+
+ if (err) {
+ nla_nest_cancel(skb, nest);
+ return err;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
static int ip_tun_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
@@ -296,12 +629,52 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
- nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
+ nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
+ ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
return -ENOMEM;
return 0;
}
+static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
+{
+ int opt_len;
+
+ if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
+ return 0;
+
+ opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */
+ if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ struct geneve_opt *opt;
+ int offset = 0;
+
+ opt_len += nla_total_size(0); /* LWTUNNEL_IP_OPTS_GENEVE */
+ while (info->options_len > offset) {
+ opt = ip_tunnel_info_opts(info) + offset;
+ opt_len += nla_total_size(2) /* OPT_GENEVE_CLASS */
+ + nla_total_size(1) /* OPT_GENEVE_TYPE */
+ + nla_total_size(opt->length * 4);
+ /* OPT_GENEVE_DATA */
+ offset += sizeof(*opt) + opt->length * 4;
+ }
+ } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */
+ + nla_total_size(4); /* OPT_VXLAN_GBP */
+ } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
+ struct erspan_metadata *md = ip_tunnel_info_opts(info);
+
+ opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */
+ + nla_total_size(1) /* OPT_ERSPAN_VER */
+ + (md->version == 1 ? nla_total_size(4)
+ /* OPT_ERSPAN_INDEX (v1) */
+ : nla_total_size(1) +
+ nla_total_size(1));
+ /* OPT_ERSPAN_DIR + HWID (v2) */
+ }
+
+ return opt_len;
+}
+
static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
{
return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */
@@ -309,13 +682,21 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+ nla_total_size(4) /* LWTUNNEL_IP_SRC */
+ nla_total_size(1) /* LWTUNNEL_IP_TOS */
+ nla_total_size(1) /* LWTUNNEL_IP_TTL */
- + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
+ + nla_total_size(2) /* LWTUNNEL_IP_FLAGS */
+ + ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
+ /* LWTUNNEL_IP_OPTS */
}
static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
{
- return memcmp(lwt_tun_info(a), lwt_tun_info(b),
- sizeof(struct ip_tunnel_info));
+ struct ip_tunnel_info *info_a = lwt_tun_info(a);
+ struct ip_tunnel_info *info_b = lwt_tun_info(b);
+
+ return memcmp(info_a, info_b, sizeof(info_a->key)) ||
+ info_a->mode != info_b->mode ||
+ info_a->options_len != info_b->options_len ||
+ memcmp(ip_tunnel_info_opts(info_a),
+ ip_tunnel_info_opts(info_b), info_a->options_len);
}
static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
@@ -328,12 +709,14 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
};
static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+ [LWTUNNEL_IP6_UNSPEC] = { .strict_start_type = LWTUNNEL_IP6_OPTS },
[LWTUNNEL_IP6_ID] = { .type = NLA_U64 },
[LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) },
[LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
[LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
[LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
[LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
+ [LWTUNNEL_IP6_OPTS] = { .type = NLA_NESTED },
};
static int ip6_tun_build_state(struct nlattr *attr,
@@ -341,17 +724,21 @@ static int ip6_tun_build_state(struct nlattr *attr,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
- struct ip_tunnel_info *tun_info;
- struct lwtunnel_state *new_state;
struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
- int err;
+ struct lwtunnel_state *new_state;
+ struct ip_tunnel_info *tun_info;
+ int err, opt_len;
err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
ip6_tun_policy, extack);
if (err < 0)
return err;
- new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack);
+ if (opt_len < 0)
+ return opt_len;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
if (!new_state)
return -ENOMEM;
@@ -359,6 +746,12 @@ static int ip6_tun_build_state(struct nlattr *attr,
tun_info = lwt_tun_info(new_state);
+ err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack);
+ if (err < 0) {
+ lwtstate_free(new_state);
+ return err;
+ }
+
if (tb[LWTUNNEL_IP6_ID])
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
@@ -375,10 +768,12 @@ static int ip6_tun_build_state(struct nlattr *attr,
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
if (tb[LWTUNNEL_IP6_FLAGS])
- tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
+ tun_info->key.tun_flags |=
+ (nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
+ ~TUNNEL_OPTIONS_PRESENT);
tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
- tun_info->options_len = 0;
+ tun_info->options_len = opt_len;
*ts = new_state;
@@ -396,7 +791,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
- nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+ nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
+ ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
return -ENOMEM;
return 0;
@@ -409,7 +805,9 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+ nla_total_size(16) /* LWTUNNEL_IP6_SRC */
+ nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
+ nla_total_size(1) /* LWTUNNEL_IP6_TC */
- + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
+ + nla_total_size(2) /* LWTUNNEL_IP6_FLAGS */
+ + ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
+ /* LWTUNNEL_IP6_OPTS */
}
static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 9bcca08efec9..f35308ff84c3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1412,6 +1412,9 @@ static int __init wait_for_devices(void)
struct net_device *dev;
int found = 0;
+ /* make sure deferred device probes are finished */
+ wait_for_device_probe();
+
rtnl_lock();
for_each_netdev(&init_net, dev) {
if (ic_is_init_dev(dev)) {
@@ -1483,10 +1486,10 @@ static int __init ip_auto_config(void)
* missing values.
*/
if (ic_myaddr == NONE ||
-#ifdef CONFIG_ROOT_NFS
+#if defined(CONFIG_ROOT_NFS) || defined(CONFIG_CIFS_ROOT)
(root_server_addr == NONE &&
ic_servaddr == NONE &&
- ROOT_DEV == Root_NFS) ||
+ (ROOT_DEV == Root_NFS || ROOT_DEV == Root_CIFS)) ||
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC
@@ -1513,6 +1516,12 @@ static int __init ip_auto_config(void)
goto try_try_again;
}
#endif
+#ifdef CONFIG_CIFS_ROOT
+ if (ROOT_DEV == Root_CIFS) {
+ pr_err("IP-Config: Retrying forever (CIFS root)...\n");
+ goto try_try_again;
+ }
+#endif
if (--retries) {
pr_err("IP-Config: Reopening network devices...\n");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 716d5472c022..6e68def66822 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -278,9 +278,10 @@ static void __net_exit ipmr_rules_exit(struct net *net)
rtnl_unlock();
}
-static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
+static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
- return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR);
+ return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack);
}
static unsigned int ipmr_rules_seq_read(struct net *net)
@@ -336,7 +337,8 @@ static void __net_exit ipmr_rules_exit(struct net *net)
rtnl_unlock();
}
-static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
+static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -2289,7 +2291,8 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
rcu_read_unlock();
return -ENODEV;
}
- skb2 = skb_clone(skb, GFP_ATOMIC);
+
+ skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
if (!skb2) {
read_unlock(&mrt_lock);
rcu_read_unlock();
@@ -3040,10 +3043,11 @@ static unsigned int ipmr_seq_read(struct net *net)
return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
}
-static int ipmr_dump(struct net *net, struct notifier_block *nb)
+static int ipmr_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
- ipmr_mr_table_iter, &mrt_lock);
+ ipmr_mr_table_iter, &mrt_lock, extack);
}
static const struct fib_notifier_ops ipmr_notifier_ops_template = {
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index ea48bd15a575..aa8738a91210 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -386,15 +386,17 @@ EXPORT_SYMBOL(mr_rtm_dumproute);
int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
int (*rules_dump)(struct net *net,
- struct notifier_block *nb),
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock)
+ rwlock_t *mrt_lock,
+ struct netlink_ext_ack *extack)
{
struct mr_table *mrt;
int err;
- err = rules_dump(net, nb);
+ err = rules_dump(net, nb, extack);
if (err)
return err;
@@ -409,17 +411,25 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
if (!v->dev)
continue;
- mr_call_vif_notifier(nb, net, family,
- FIB_EVENT_VIF_ADD,
- v, vifi, mrt->id);
+ err = mr_call_vif_notifier(nb, family,
+ FIB_EVENT_VIF_ADD,
+ v, vifi, mrt->id, extack);
+ if (err)
+ break;
}
read_unlock(mrt_lock);
+ if (err)
+ return err;
+
/* Notify on table MFC entries */
- list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
- mr_call_mfc_notifier(nb, net, family,
- FIB_EVENT_ENTRY_ADD,
- mfc, mrt->id);
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
+ err = mr_call_mfc_notifier(nb, family,
+ FIB_EVENT_ENTRY_ADD,
+ mfc, mrt->id, extack);
+ if (err)
+ return err;
+ }
}
return 0;
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index 012c4047c788..e32e41b99f0f 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -9,6 +9,8 @@
static struct nf_flowtable_type flowtable_ipv4 = {
.family = NFPROTO_IPV4,
.init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_ipv4,
.free = nf_flow_table_free,
.hook = nf_flow_offload_ip_hook,
.owner = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c
index 36a28d46149c..c94445b44d8c 100644
--- a/net/ipv4/netfilter/nf_socket_ipv4.c
+++ b/net/ipv4/netfilter/nf_socket_ipv4.c
@@ -31,16 +31,8 @@ extract_icmp4_fields(const struct sk_buff *skb, u8 *protocol,
if (icmph == NULL)
return 1;
- switch (icmph->type) {
- case ICMP_DEST_UNREACH:
- case ICMP_SOURCE_QUENCH:
- case ICMP_REDIRECT:
- case ICMP_TIME_EXCEEDED:
- case ICMP_PARAMETERPROB:
- break;
- default:
+ if (!icmp_is_err(icmph->type))
return 1;
- }
inside_iph = skb_header_pointer(skb, outside_hdrlen +
sizeof(struct icmphdr),
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index fc34fd1668d6..511eaa94e2d1 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -23,7 +23,6 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
static const struct nla_policy rtm_nh_policy[NHA_MAX + 1] = {
- [NHA_UNSPEC] = { .strict_start_type = NHA_UNSPEC + 1 },
[NHA_ID] = { .type = NLA_U32 },
[NHA_GROUP] = { .type = NLA_BINARY },
[NHA_GROUP_TYPE] = { .type = NLA_U16 },
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 621f83434b24..f88c93c38f11 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1894,10 +1894,7 @@ static void ip_multipath_l3_keys(const struct sk_buff *skb,
if (!icmph)
goto out;
- if (icmph->type != ICMP_DEST_UNREACH &&
- icmph->type != ICMP_REDIRECT &&
- icmph->type != ICMP_TIME_EXCEEDED &&
- icmph->type != ICMP_PARAMETERPROB)
+ if (!icmp_is_err(icmph->type))
goto out;
inner_iph = skb_header_pointer(skb,
@@ -2022,10 +2019,52 @@ static int ip_mkroute_input(struct sk_buff *skb,
return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
+/* Implements all the saddr-related checks as ip_route_input_slow(),
+ * assuming daddr is valid and the destination is not a local broadcast one.
+ * Uses the provided hint instead of performing a route lookup.
+ */
+int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev,
+ const struct sk_buff *hint)
+{
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct rtable *rt = (struct rtable *)hint;
+ struct net *net = dev_net(dev);
+ int err = -EINVAL;
+ u32 tag = 0;
+
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ goto martian_source;
+
+ if (ipv4_is_zeronet(saddr))
+ goto martian_source;
+
+ if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
+ goto martian_source;
+
+ if (rt->rt_type != RTN_LOCAL)
+ goto skip_validate_source;
+
+ tos &= IPTOS_RT_MASK;
+ err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
+ if (err < 0)
+ goto martian_source;
+
+skip_validate_source:
+ skb_dst_copy(skb, hint);
+ return 0;
+
+martian_source:
+ ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
+ return err;
+}
+
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
* must have correct destination already attached by output routine.
+ * Changes in the enforced policies must be applied also to
+ * ip_route_use_hint().
*
* Such approach solves two big problems:
* 1. Not simplex devices are handled properly.
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 535b69326f66..345b2b0ff618 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -62,10 +62,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
-u64 cookie_init_timestamp(struct request_sock *req)
+u64 cookie_init_timestamp(struct request_sock *req, u64 now)
{
struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_time_stamp_raw();
+ u32 ts, ts_now = tcp_ns_to_ts(now);
u32 options = 0;
ireq = inet_rsk(req);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 59ded25acd04..fcb2cd167f64 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -340,6 +340,10 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
user_key[i * 4 + 1],
user_key[i * 4 + 2],
user_key[i * 4 + 3]);
+
+ if (WARN_ON_ONCE(off >= tbl.maxlen - 1))
+ break;
+
if (i + 1 < n_keys)
off += snprintf(tbl.data + off, tbl.maxlen - off, ",");
}
@@ -1037,7 +1041,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = &two,
},
#endif
{
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d8876f0e9672..9b48aec29aca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1741,8 +1741,8 @@ static int tcp_zerocopy_receive(struct sock *sk,
struct tcp_zerocopy_receive *zc)
{
unsigned long address = (unsigned long)zc->address;
+ u32 length = 0, seq, offset, zap_len;
const skb_frag_t *frags = NULL;
- u32 length = 0, seq, offset;
struct vm_area_struct *vma;
struct sk_buff *skb = NULL;
struct tcp_sock *tp;
@@ -1769,12 +1769,12 @@ static int tcp_zerocopy_receive(struct sock *sk,
seq = tp->copied_seq;
inq = tcp_inq(sk);
zc->length = min_t(u32, zc->length, inq);
- zc->length &= ~(PAGE_SIZE - 1);
- if (zc->length) {
- zap_page_range(vma, address, zc->length);
+ zap_len = zc->length & ~(PAGE_SIZE - 1);
+ if (zap_len) {
+ zap_page_range(vma, address, zap_len);
zc->recv_skip_hint = 0;
} else {
- zc->recv_skip_hint = inq;
+ zc->recv_skip_hint = zc->length;
}
ret = 0;
while (length + PAGE_SIZE <= zc->length) {
@@ -1958,8 +1958,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
struct sk_buff *skb, *last;
u32 urg_hole = 0;
struct scm_timestamping_internal tss;
- bool has_tss = false;
- bool has_cmsg;
+ int cmsg_flags;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
@@ -1974,7 +1973,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (sk->sk_state == TCP_LISTEN)
goto out;
- has_cmsg = tp->recvmsg_inq;
+ cmsg_flags = tp->recvmsg_inq ? 1 : 0;
timeo = sock_rcvtimeo(sk, nonblock);
/* Urgent data needs to be handled specially. */
@@ -2047,7 +2046,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -2157,8 +2156,7 @@ skip_copy:
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, &tss);
- has_tss = true;
- has_cmsg = true;
+ cmsg_flags |= 2;
}
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
@@ -2183,10 +2181,10 @@ found_fin_ok:
release_sock(sk);
- if (has_cmsg) {
- if (has_tss)
+ if (cmsg_flags) {
+ if (cmsg_flags & 2)
tcp_recv_timestamp(msg, sk, &tss);
- if (tp->recvmsg_inq) {
+ if (cmsg_flags & 1) {
inq = tcp_inq_hint(sk);
put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
}
@@ -2666,6 +2664,7 @@ int tcp_disconnect(struct sock *sk, int flags)
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
inet->defer_connect = 0;
+ tp->fastopen_client_fail = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
@@ -3224,8 +3223,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
* tcpi_unacked -> Number of children ready for accept()
* tcpi_sacked -> max backlog
*/
- info->tcpi_unacked = sk->sk_ack_backlog;
- info->tcpi_sacked = sk->sk_max_ack_backlog;
+ info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
+ info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
return;
}
@@ -3305,6 +3304,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_reord_seen = tp->reord_seen;
info->tcpi_rcv_ooopack = tp->rcv_ooopack;
info->tcpi_snd_wnd = tp->snd_wnd;
+ info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
unlock_sock_fast(sk, slow);
}
EXPORT_SYMBOL_GPL(tcp_get_info);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index c445a81d144e..3737ec096650 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -256,6 +256,9 @@ void tcp_get_available_congestion_control(char *buf, size_t maxlen)
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
+
+ if (WARN_ON_ONCE(offs >= maxlen))
+ break;
}
rcu_read_unlock();
}
@@ -285,6 +288,9 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
+
+ if (WARN_ON_ONCE(offs >= maxlen))
+ break;
}
rcu_read_unlock();
}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 549506162dde..0d08f9e2d8d0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,8 +21,8 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
struct tcp_info *info = _info;
if (inet_sk_state_load(sk) == TCP_LISTEN) {
- r->idiag_rqueue = sk->sk_ack_backlog;
- r->idiag_wqueue = sk->sk_max_ack_backlog;
+ r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
+ r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
} else if (sk->sk_type == SOCK_STREAM) {
const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index a915ade0c818..19ad9586c720 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -422,7 +422,10 @@ bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
cookie->len = -1;
return true;
}
- return cookie->len > 0;
+ if (cookie->len > 0)
+ return true;
+ tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
+ return false;
}
/* This function checks if we want to defer sending SYN until the first
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a2e52ad7cdab..88b987ca9ebb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5814,6 +5814,10 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
if (data) { /* Retransmit unacked data in SYN */
+ if (tp->total_retrans)
+ tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED;
+ else
+ tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
skb_rbtree_walk_from(data) {
if (__tcp_retransmit_skb(sk, data, 1))
break;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 67b2dc7a1727..92282f98dc82 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -121,11 +121,9 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
- (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
- (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
+ ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
- (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
- (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
+ ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
loopback = true;
} else
#endif
@@ -2453,7 +2451,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
- rx_queue = sk->sk_ack_backlog;
+ rx_queue = READ_ONCE(sk->sk_ack_backlog);
else
/* Because we don't lock the socket,
* we might find a transient negative value.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0488607c5cd3..be6d22b8190f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3290,7 +3290,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
now = tcp_clock_ns();
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp_ns = cookie_init_timestamp(req);
+ skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
else
#endif
{
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 4849edb62d52..12ab5db2b71c 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -92,6 +92,9 @@ void tcp_get_available_ulp(char *buf, size_t maxlen)
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ulp_ops->name);
+
+ if (WARN_ON_ONCE(offs >= maxlen))
+ break;
}
rcu_read_unlock();
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1d58ce829dca..4da5758cc718 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1297,6 +1297,27 @@ out:
#define UDP_SKB_IS_STATELESS 0x80000000
+/* all head states (dst, sk, nf conntrack) except skb extensions are
+ * cleared by udp_rcv().
+ *
+ * We need to preserve secpath, if present, to eventually process
+ * IP_CMSG_PASSSEC at recvmsg() time.
+ *
+ * Other extensions can be cleared.
+ */
+static bool udp_try_make_stateless(struct sk_buff *skb)
+{
+ if (!skb_has_extensions(skb))
+ return true;
+
+ if (!secpath_exists(skb)) {
+ skb_ext_reset(skb);
+ return true;
+ }
+
+ return false;
+}
+
static void udp_set_dev_scratch(struct sk_buff *skb)
{
struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
@@ -1308,11 +1329,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb);
#endif
- /* all head states execept sp (dst, sk, nf) are always cleared by
- * udp_rcv() and we need to preserve secpath, if present, to eventually
- * process IP_CMSG_PASSSEC at recvmsg() time
- */
- if (likely(!skb_sec_path(skb)))
+ if (udp_try_make_stateless(skb))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
}
@@ -2534,9 +2551,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_ENCAP:
switch (val) {
case 0:
+#ifdef CONFIG_XFRM
case UDP_ENCAP_ESPINUDP:
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
+#endif
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index ecff3fce9807..89ba7c87de5d 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -92,7 +92,7 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb_dst(skb)->dev,
+ net, sk, skb, skb->dev, skb_dst(skb)->dev,
__xfrm4_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 34ccef18b40e..98d82305d6de 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5552,14 +5552,13 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
if (!nla)
goto nla_put_failure;
-
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
- goto nla_put_failure;
-
read_lock_bh(&idev->lock);
memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
read_unlock_bh(&idev->lock);
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/net/ipv6/fib6_notifier.c b/net/ipv6/fib6_notifier.c
index 05f82baaa99e..f87ae33e1d01 100644
--- a/net/ipv6/fib6_notifier.c
+++ b/net/ipv6/fib6_notifier.c
@@ -7,12 +7,12 @@
#include <net/netns/ipv6.h>
#include <net/ip6_fib.h>
-int call_fib6_notifier(struct notifier_block *nb, struct net *net,
+int call_fib6_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info)
{
info->family = AF_INET6;
- return call_fib_notifier(nb, net, event_type, info);
+ return call_fib_notifier(nb, event_type, info);
}
int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
@@ -27,15 +27,16 @@ static unsigned int fib6_seq_read(struct net *net)
return fib6_tables_seq_read(net) + fib6_rules_seq_read(net);
}
-static int fib6_dump(struct net *net, struct notifier_block *nb)
+static int fib6_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
int err;
- err = fib6_rules_dump(net, nb);
+ err = fib6_rules_dump(net, nb, extack);
if (err)
return err;
- return fib6_tables_dump(net, nb);
+ return fib6_tables_dump(net, nb, extack);
}
static const struct fib_notifier_ops fib6_notifier_ops_template = {
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index f9e8fe3ff0c5..fafe556d21e0 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -47,9 +47,10 @@ bool fib6_rule_default(const struct fib_rule *rule)
}
EXPORT_SYMBOL_GPL(fib6_rule_default);
-int fib6_rules_dump(struct net *net, struct notifier_block *nb)
+int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
- return fib_rules_dump(net, nb, AF_INET6);
+ return fib_rules_dump(net, nb, AF_INET6, extack);
}
unsigned int fib6_rules_seq_read(struct net *net)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 62c997201970..ef408a5090a2 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -516,13 +516,29 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
mip6_addr_swap(skb);
+ sk = icmpv6_xmit_lock(net);
+ if (!sk)
+ goto out_bh_enable;
+
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.daddr = hdr->saddr;
if (force_saddr)
saddr = force_saddr;
- if (saddr)
+ if (saddr) {
fl6.saddr = *saddr;
+ } else {
+ /* select a more meaningful saddr from input if */
+ struct net_device *in_netdev;
+
+ in_netdev = dev_get_by_index(net, IP6CB(skb)->iif);
+ if (in_netdev) {
+ ipv6_dev_get_saddr(net, in_netdev, &fl6.daddr,
+ inet6_sk(sk)->srcprefs,
+ &fl6.saddr);
+ dev_put(in_netdev);
+ }
+ }
fl6.flowi6_mark = mark;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
@@ -531,10 +547,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
- sk = icmpv6_xmit_lock(net);
- if (!sk)
- goto out_bh_enable;
-
sk->sk_mark = mark;
np = inet6_sk(sk);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6e2af411cd9c..7bae6a91b487 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -357,15 +357,17 @@ unsigned int fib6_tables_seq_read(struct net *net)
return fib_seq;
}
-static int call_fib6_entry_notifier(struct notifier_block *nb, struct net *net,
+static int call_fib6_entry_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
- struct fib6_info *rt)
+ struct fib6_info *rt,
+ struct netlink_ext_ack *extack)
{
struct fib6_entry_notifier_info info = {
+ .info.extack = extack,
.rt = rt,
};
- return call_fib6_notifier(nb, net, event_type, &info.info);
+ return call_fib6_notifier(nb, event_type, &info.info);
}
int call_fib6_entry_notifiers(struct net *net,
@@ -401,40 +403,51 @@ int call_fib6_multipath_entry_notifiers(struct net *net,
struct fib6_dump_arg {
struct net *net;
struct notifier_block *nb;
+ struct netlink_ext_ack *extack;
};
-static void fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg)
+static int fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg)
{
if (rt == arg->net->ipv6.fib6_null_entry)
- return;
- call_fib6_entry_notifier(arg->nb, arg->net, FIB_EVENT_ENTRY_ADD, rt);
+ return 0;
+ return call_fib6_entry_notifier(arg->nb, FIB_EVENT_ENTRY_ADD,
+ rt, arg->extack);
}
static int fib6_node_dump(struct fib6_walker *w)
{
struct fib6_info *rt;
+ int err = 0;
- for_each_fib6_walker_rt(w)
- fib6_rt_dump(rt, w->args);
+ for_each_fib6_walker_rt(w) {
+ err = fib6_rt_dump(rt, w->args);
+ if (err)
+ break;
+ }
w->leaf = NULL;
- return 0;
+ return err;
}
-static void fib6_table_dump(struct net *net, struct fib6_table *tb,
- struct fib6_walker *w)
+static int fib6_table_dump(struct net *net, struct fib6_table *tb,
+ struct fib6_walker *w)
{
+ int err;
+
w->root = &tb->tb6_root;
spin_lock_bh(&tb->tb6_lock);
- fib6_walk(net, w);
+ err = fib6_walk(net, w);
spin_unlock_bh(&tb->tb6_lock);
+ return err;
}
/* Called with rcu_read_lock() */
-int fib6_tables_dump(struct net *net, struct notifier_block *nb)
+int fib6_tables_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct fib6_dump_arg arg;
struct fib6_walker *w;
unsigned int h;
+ int err = 0;
w = kzalloc(sizeof(*w), GFP_ATOMIC);
if (!w)
@@ -443,19 +456,24 @@ int fib6_tables_dump(struct net *net, struct notifier_block *nb)
w->func = fib6_node_dump;
arg.net = net;
arg.nb = nb;
+ arg.extack = extack;
w->args = &arg;
for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv6.fib_table_hash[h];
struct fib6_table *tb;
- hlist_for_each_entry_rcu(tb, head, tb6_hlist)
- fib6_table_dump(net, tb, w);
+ hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
+ err = fib6_table_dump(net, tb, w);
+ if (err < 0)
+ goto out;
+ }
}
+out:
kfree(w);
- return 0;
+ return err;
}
static int fib6_dump_node(struct fib6_walker *w)
@@ -1443,6 +1461,8 @@ out:
}
#endif
goto failure;
+ } else if (fib6_requires_src(rt)) {
+ fib6_routes_require_src_inc(info->nl_net);
}
return err;
@@ -1915,6 +1935,8 @@ int fib6_del(struct fib6_info *rt, struct nl_info *info)
struct fib6_info *cur = rcu_dereference_protected(*rtp,
lockdep_is_held(&table->tb6_lock));
if (rt == cur) {
+ if (fib6_requires_src(cur))
+ fib6_routes_require_src_dec(info->nl_net);
fib6_del_route(table, fn, rtp, info);
return 0;
}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 3d71c7d6102c..7b089d0ac8cd 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -86,11 +86,27 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
}
}
+static bool ip6_can_use_hint(const struct sk_buff *skb,
+ const struct sk_buff *hint)
+{
+ return hint && !skb_dst(skb) &&
+ ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr);
+}
+
+static struct sk_buff *ip6_extract_route_hint(const struct net *net,
+ struct sk_buff *skb)
+{
+ if (fib6_routes_require_src(net) || fib6_has_custom_rules(net))
+ return NULL;
+
+ return skb;
+}
+
static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
struct list_head *head)
{
+ struct sk_buff *skb, *next, *hint = NULL;
struct dst_entry *curr_dst = NULL;
- struct sk_buff *skb, *next;
struct list_head sublist;
INIT_LIST_HEAD(&sublist);
@@ -104,9 +120,15 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
skb = l3mdev_ip6_rcv(skb);
if (!skb)
continue;
- ip6_rcv_finish_core(net, sk, skb);
+
+ if (ip6_can_use_hint(skb, hint))
+ skb_dst_copy(skb, hint);
+ else
+ ip6_rcv_finish_core(net, sk, skb);
dst = skb_dst(skb);
if (curr_dst != dst) {
+ hint = ip6_extract_route_hint(net, skb);
+
/* dispatch old sublist */
if (!list_empty(&sublist))
ip6_sublist_rcv_finish(&sublist);
@@ -325,7 +347,8 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
list_add_tail(&skb->list, &sublist);
}
/* dispatch final sublist */
- ip6_sublist_rcv(&sublist, curr_dev, curr_net);
+ if (!list_empty(&sublist))
+ ip6_sublist_rcv(&sublist, curr_dev, curr_net);
}
INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 71827b56c006..945508a7cb0f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -160,7 +160,7 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
skb->protocol = htons(ETH_P_IPV6);
@@ -173,7 +173,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, dev,
+ net, sk, skb, indev, dev,
ip6_finish_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 857a89ad4d6c..bfa49ff70531 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -265,9 +265,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
rtnl_unlock();
}
-static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
+static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
- return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
+ return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
}
static unsigned int ip6mr_rules_seq_read(struct net *net)
@@ -324,7 +325,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
rtnl_unlock();
}
-static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
+static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -1256,10 +1258,11 @@ static unsigned int ip6mr_seq_read(struct net *net)
return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
}
-static int ip6mr_dump(struct net *net, struct notifier_block *nb)
+static int ip6mr_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
- ip6mr_mr_table_iter, &mrt_lock);
+ ip6mr_mr_table_iter, &mrt_lock, extack);
}
static struct notifier_block ip6_mr_notifier = {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 264c292e7dcc..79fc012dd2ca 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -363,8 +363,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_TRANSPARENT:
- if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
- !ns_capable(net->user_ns, CAP_NET_RAW)) {
+ if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
retv = -EPERM;
break;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 69443e9a3aa5..0594131fa46d 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -128,9 +128,9 @@ config IP6_NF_MATCH_HL
depends on NETFILTER_ADVANCED
select NETFILTER_XT_MATCH_HL
---help---
- This is a backwards-compat option for the user's convenience
- (e.g. when running oldconfig). It selects
- CONFIG_NETFILTER_XT_MATCH_HL.
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MATCH_HL.
config IP6_NF_MATCH_IPV6HEADER
tristate '"ipv6header" IPv6 Extension Headers Match'
@@ -184,9 +184,9 @@ config IP6_NF_TARGET_HL
depends on NETFILTER_ADVANCED && IP6_NF_MANGLE
select NETFILTER_XT_TARGET_HL
---help---
- This is a backwards-compatible option for the user's convenience
- (e.g. when running oldconfig). It selects
- CONFIG_NETFILTER_XT_TARGET_HL.
+ This is a backwards-compatible option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_TARGET_HL.
config IP6_NF_FILTER
tristate "Packet filtering"
@@ -245,14 +245,14 @@ config IP6_NF_RAW
# security table for MAC policy
config IP6_NF_SECURITY
- tristate "Security table"
- depends on SECURITY
- depends on NETFILTER_ADVANCED
- help
- This option adds a `security' table to iptables, for use
- with Mandatory Access Control (MAC) policy.
-
- If unsure, say N.
+ tristate "Security table"
+ depends on SECURITY
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds a `security' table to iptables, for use
+ with Mandatory Access Control (MAC) policy.
+
+ If unsure, say N.
config IP6_NF_NAT
tristate "ip6tables NAT support"
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index f6d9a48c7a2a..a8566ee12e83 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -10,6 +10,8 @@
static struct nf_flowtable_type flowtable_ipv6 = {
.family = NFPROTO_IPV6,
.init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_ipv6,
.free = nf_flow_table_free,
.hook = nf_flow_offload_ipv6_hook,
.owner = THIS_MODULE,
diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
index 34d51cd426b0..6bac68fb27a3 100644
--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
+++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
@@ -150,4 +150,4 @@ EXPORT_SYMBOL_GPL(nf_tproxy_get_sock_v6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs");
-MODULE_DESCRIPTION("Netfilter IPv4 transparent proxy support");
+MODULE_DESCRIPTION("Netfilter IPv6 transparent proxy support");
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e60bf8e7dd1a..b59940416cb5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -634,7 +634,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
* Router Reachability Probe MUST be rate-limited
* to no more than one per minute.
*/
- if (fib6_nh->fib_nh_gw_family)
+ if (!fib6_nh->fib_nh_gw_family)
return;
nh_gw = &fib6_nh->fib_nh_gw6;
@@ -1479,11 +1479,11 @@ static u32 rt6_exception_hash(const struct in6_addr *dst,
u32 val;
net_get_random_once(&seed, sizeof(seed));
- val = jhash(dst, sizeof(*dst), seed);
+ val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
#ifdef CONFIG_IPV6_SUBTREES
if (src)
- val = jhash(src, sizeof(*src), val);
+ val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
#endif
return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
}
@@ -2295,10 +2295,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
if (!icmph)
goto out;
- if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
- icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
- icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
- icmph->icmp6_type != ICMPV6_PARAMPROB)
+ if (!icmpv6_is_err(icmph->icmp6_type))
goto out;
inner_iph = skb_header_pointer(skb,
@@ -6202,6 +6199,9 @@ static int __net_init ip6_route_net_init(struct net *net)
dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
ip6_template_metrics, true);
INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
+#ifdef CONFIG_IPV6_SUBTREES
+ net->ipv6.fib6_routes_require_src = 0;
+#endif
#endif
net->ipv6.sysctl.flush_delay = 0;
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 9d4f75e0d33a..85a5447a3e8d 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -81,6 +81,11 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
if (!pskb_may_pull(skb, srhoff + len))
return NULL;
+ /* note that pskb_may_pull may change pointers in header;
+ * for this reason it is necessary to reload them when needed.
+ */
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
if (!seg6_validate_srh(srh, len))
return NULL;
@@ -144,8 +149,9 @@ static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
*daddr = *addr;
}
-int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
- u32 tbl_id)
+static int
+seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
+ u32 tbl_id, bool local_delivery)
{
struct net *net = dev_net(skb->dev);
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -153,6 +159,7 @@ int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
struct dst_entry *dst = NULL;
struct rt6_info *rt;
struct flowi6 fl6;
+ int dev_flags = 0;
fl6.flowi6_iif = skb->dev->ifindex;
fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
@@ -177,7 +184,13 @@ int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
dst = &rt->dst;
}
- if (dst && dst->dev->flags & IFF_LOOPBACK && !dst->error) {
+ /* we want to discard traffic destined for local packet processing,
+ * if @local_delivery is set to false.
+ */
+ if (!local_delivery)
+ dev_flags |= IFF_LOOPBACK;
+
+ if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
dst_release(dst);
dst = NULL;
}
@@ -194,6 +207,12 @@ out:
return dst->error;
}
+int seg6_lookup_nexthop(struct sk_buff *skb,
+ struct in6_addr *nhaddr, u32 tbl_id)
+{
+ return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false);
+}
+
/* regular endpoint function */
static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
{
@@ -336,6 +355,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (!ipv6_addr_any(&slwt->nh6))
nhaddr = &slwt->nh6;
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
seg6_lookup_nexthop(skb, nhaddr, 0);
return dst_input(skb);
@@ -365,6 +386,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
skb_dst_drop(skb);
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+
err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
if (err)
goto drop;
@@ -385,7 +408,9 @@ static int input_action_end_dt6(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
- seg6_lookup_nexthop(skb, NULL, slwt->table);
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+ seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
return dst_input(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4804b6dc5e65..81f51335e326 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1891,7 +1891,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
state = inet_sk_state_load(sp);
if (state == TCP_LISTEN)
- rx_queue = sp->sk_ack_backlog;
+ rx_queue = READ_ONCE(sp->sk_ack_backlog);
else
/* Because we don't lock the socket,
* we might find a transient negative value.
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index eecac1b7148e..fbe51d40bd7e 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -187,7 +187,7 @@ skip_frag:
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb_dst(skb)->dev,
+ net, sk, skb, skb->dev, skb_dst(skb)->dev,
__xfrm6_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c74f44dfaa22..2922d4150d88 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -705,7 +705,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
/* put original socket back into a clean listen state. */
sk->sk_state = TCP_LISTEN;
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
dprintk("%s: ok success on %02X, client on %02X\n", __func__,
llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
frees:
@@ -780,7 +780,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
}
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4f03ebe732fa..6cbb1286d6c0 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -32,7 +32,8 @@ mac80211-y := \
chan.o \
trace.o mlme.o \
tdls.o \
- ocb.o
+ ocb.o \
+ airtime.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b11883d26875..33da6f738c99 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -485,7 +485,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, &params);
- if (ret) {
+ if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
+ /*
+ * We didn't send the request yet, so don't need to check
+ * here if we already got a response, just mark as driver
+ * ready immediately.
+ */
+ set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
+ } else if (ret) {
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
sta->sta.addr, tid);
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
new file mode 100644
index 000000000000..63cb0028b02d
--- /dev/null
+++ b/net/mac80211/airtime.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "sta_info.h"
+
+#define AVG_PKT_SIZE 1024
+
+/* Number of bits for an average sized packet */
+#define MCS_NBITS (AVG_PKT_SIZE << 3)
+
+/* Number of kilo-symbols (symbols * 1024) for a packet with (bps) bits per
+ * symbol. We use k-symbols to avoid rounding in the _TIME macros below.
+ */
+#define MCS_N_KSYMS(bps) DIV_ROUND_UP(MCS_NBITS << 10, (bps))
+
+/* Transmission time (in 1024 * usec) for a packet containing (ksyms) * 1024
+ * symbols.
+ */
+#define MCS_SYMBOL_TIME(sgi, ksyms) \
+ (sgi ? \
+ ((ksyms) * 4 * 18) / 20 : /* 3.6 us per sym */ \
+ ((ksyms) * 4) /* 4.0 us per sym */ \
+ )
+
+/* Transmit duration for the raw data part of an average sized packet */
+#define MCS_DURATION(streams, sgi, bps) \
+ ((u32)MCS_SYMBOL_TIME(sgi, MCS_N_KSYMS((streams) * (bps))))
+
+#define MCS_DURATION_S(shift, streams, sgi, bps) \
+ ((u16)((MCS_DURATION(streams, sgi, bps) >> shift)))
+
+/* These should match the values in enum nl80211_he_gi */
+#define HE_GI_08 0
+#define HE_GI_16 1
+#define HE_GI_32 2
+
+/* Transmission time (1024 usec) for a packet containing (ksyms) * k-symbols */
+#define HE_SYMBOL_TIME(gi, ksyms) \
+ (gi == HE_GI_08 ? \
+ ((ksyms) * 16 * 17) / 20 : /* 13.6 us per sym */ \
+ (gi == HE_GI_16 ? \
+ ((ksyms) * 16 * 18) / 20 : /* 14.4 us per sym */ \
+ ((ksyms) * 16) /* 16.0 us per sym */ \
+ ))
+
+/* Transmit duration for the raw data part of an average sized packet */
+#define HE_DURATION(streams, gi, bps) \
+ ((u32)HE_SYMBOL_TIME(gi, MCS_N_KSYMS((streams) * (bps))))
+
+#define HE_DURATION_S(shift, streams, gi, bps) \
+ (HE_DURATION(streams, gi, bps) >> shift)
+
+#define BW_20 0
+#define BW_40 1
+#define BW_80 2
+#define BW_160 3
+
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define IEEE80211_MAX_STREAMS 4
+#define IEEE80211_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
+#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */
+
+#define IEEE80211_HE_MAX_STREAMS 8
+#define IEEE80211_HE_STREAM_GROUPS 12 /* BW(=4) * GI(=3) */
+
+#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
+ IEEE80211_HT_STREAM_GROUPS)
+#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
+ IEEE80211_VHT_STREAM_GROUPS)
+#define IEEE80211_HE_GROUPS_NB (IEEE80211_HE_MAX_STREAMS * \
+ IEEE80211_HE_STREAM_GROUPS)
+#define IEEE80211_GROUPS_NB (IEEE80211_HT_GROUPS_NB + \
+ IEEE80211_VHT_GROUPS_NB + \
+ IEEE80211_HE_GROUPS_NB)
+
+#define IEEE80211_HT_GROUP_0 0
+#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB)
+#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB)
+
+#define MCS_GROUP_RATES 12
+
+#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
+ IEEE80211_HT_GROUP_0 + \
+ IEEE80211_MAX_STREAMS * 2 * _ht40 + \
+ IEEE80211_MAX_STREAMS * _sgi + \
+ _streams - 1
+
+#define _MAX(a, b) (((a)>(b))?(a):(b))
+
+#define GROUP_SHIFT(duration) \
+ _MAX(0, 16 - __builtin_clz(duration))
+
+/* MCS rate information for an MCS group */
+#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
+ [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
+ .shift = _s, \
+ .duration = { \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 54 : 26), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 108 : 52), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 162 : 78), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 216 : 104), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 324 : 156), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 432 : 208), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 486 : 234), \
+ MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 540 : 260) \
+ } \
+}
+
+#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
+ GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
+
+#define MCS_GROUP(_streams, _sgi, _ht40) \
+ __MCS_GROUP(_streams, _sgi, _ht40, \
+ MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
+
+#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
+ (IEEE80211_VHT_GROUP_0 + \
+ IEEE80211_MAX_STREAMS * 2 * (_bw) + \
+ IEEE80211_MAX_STREAMS * (_sgi) + \
+ (_streams) - 1)
+
+#define BW2VBPS(_bw, r4, r3, r2, r1) \
+ (_bw == BW_160 ? r4 : _bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
+
+#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
+ [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 234, 117, 54, 26)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 468, 234, 108, 52)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 702, 351, 162, 78)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 936, 468, 216, 104)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 1404, 702, 324, 156)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 1872, 936, 432, 208)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 2106, 1053, 486, 234)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 2340, 1170, 540, 260)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 2808, 1404, 648, 312)), \
+ MCS_DURATION_S(_s, _streams, _sgi, \
+ BW2VBPS(_bw, 3120, 1560, 720, 346)) \
+ } \
+}
+
+#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
+ GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 243, 117, 54, 26)))
+
+#define VHT_GROUP(_streams, _sgi, _bw) \
+ __VHT_GROUP(_streams, _sgi, _bw, \
+ VHT_GROUP_SHIFT(_streams, _sgi, _bw))
+
+
+#define HE_GROUP_IDX(_streams, _gi, _bw) \
+ (IEEE80211_HE_GROUP_0 + \
+ IEEE80211_HE_MAX_STREAMS * 3 * (_bw) + \
+ IEEE80211_HE_MAX_STREAMS * (_gi) + \
+ (_streams) - 1)
+
+#define __HE_GROUP(_streams, _gi, _bw, _s) \
+ [HE_GROUP_IDX(_streams, _gi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 979, 489, 230, 115)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 1958, 979, 475, 230)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 2937, 1468, 705, 345)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 3916, 1958, 936, 475)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 5875, 2937, 1411, 705)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 7833, 3916, 1872, 936)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 8827, 4406, 2102, 1051)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 9806, 4896, 2347, 1166)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 11764, 5875, 2808, 1411)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 13060, 6523, 3124, 1555)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 14702, 7344, 3513, 1756)), \
+ HE_DURATION_S(_s, _streams, _gi, \
+ BW2VBPS(_bw, 16329, 8164, 3902, 1944)) \
+ } \
+}
+
+#define HE_GROUP_SHIFT(_streams, _gi, _bw) \
+ GROUP_SHIFT(HE_DURATION(_streams, _gi, \
+ BW2VBPS(_bw, 979, 489, 230, 115)))
+
+#define HE_GROUP(_streams, _gi, _bw) \
+ __HE_GROUP(_streams, _gi, _bw, \
+ HE_GROUP_SHIFT(_streams, _gi, _bw))
+struct mcs_group {
+ u8 shift;
+ u16 duration[MCS_GROUP_RATES];
+};
+
+static const struct mcs_group airtime_mcs_groups[] = {
+ MCS_GROUP(1, 0, BW_20),
+ MCS_GROUP(2, 0, BW_20),
+ MCS_GROUP(3, 0, BW_20),
+ MCS_GROUP(4, 0, BW_20),
+
+ MCS_GROUP(1, 1, BW_20),
+ MCS_GROUP(2, 1, BW_20),
+ MCS_GROUP(3, 1, BW_20),
+ MCS_GROUP(4, 1, BW_20),
+
+ MCS_GROUP(1, 0, BW_40),
+ MCS_GROUP(2, 0, BW_40),
+ MCS_GROUP(3, 0, BW_40),
+ MCS_GROUP(4, 0, BW_40),
+
+ MCS_GROUP(1, 1, BW_40),
+ MCS_GROUP(2, 1, BW_40),
+ MCS_GROUP(3, 1, BW_40),
+ MCS_GROUP(4, 1, BW_40),
+
+ VHT_GROUP(1, 0, BW_20),
+ VHT_GROUP(2, 0, BW_20),
+ VHT_GROUP(3, 0, BW_20),
+ VHT_GROUP(4, 0, BW_20),
+
+ VHT_GROUP(1, 1, BW_20),
+ VHT_GROUP(2, 1, BW_20),
+ VHT_GROUP(3, 1, BW_20),
+ VHT_GROUP(4, 1, BW_20),
+
+ VHT_GROUP(1, 0, BW_40),
+ VHT_GROUP(2, 0, BW_40),
+ VHT_GROUP(3, 0, BW_40),
+ VHT_GROUP(4, 0, BW_40),
+
+ VHT_GROUP(1, 1, BW_40),
+ VHT_GROUP(2, 1, BW_40),
+ VHT_GROUP(3, 1, BW_40),
+ VHT_GROUP(4, 1, BW_40),
+
+ VHT_GROUP(1, 0, BW_80),
+ VHT_GROUP(2, 0, BW_80),
+ VHT_GROUP(3, 0, BW_80),
+ VHT_GROUP(4, 0, BW_80),
+
+ VHT_GROUP(1, 1, BW_80),
+ VHT_GROUP(2, 1, BW_80),
+ VHT_GROUP(3, 1, BW_80),
+ VHT_GROUP(4, 1, BW_80),
+
+ VHT_GROUP(1, 0, BW_160),
+ VHT_GROUP(2, 0, BW_160),
+ VHT_GROUP(3, 0, BW_160),
+ VHT_GROUP(4, 0, BW_160),
+
+ VHT_GROUP(1, 1, BW_160),
+ VHT_GROUP(2, 1, BW_160),
+ VHT_GROUP(3, 1, BW_160),
+ VHT_GROUP(4, 1, BW_160),
+
+ HE_GROUP(1, HE_GI_08, BW_20),
+ HE_GROUP(2, HE_GI_08, BW_20),
+ HE_GROUP(3, HE_GI_08, BW_20),
+ HE_GROUP(4, HE_GI_08, BW_20),
+ HE_GROUP(5, HE_GI_08, BW_20),
+ HE_GROUP(6, HE_GI_08, BW_20),
+ HE_GROUP(7, HE_GI_08, BW_20),
+ HE_GROUP(8, HE_GI_08, BW_20),
+
+ HE_GROUP(1, HE_GI_16, BW_20),
+ HE_GROUP(2, HE_GI_16, BW_20),
+ HE_GROUP(3, HE_GI_16, BW_20),
+ HE_GROUP(4, HE_GI_16, BW_20),
+ HE_GROUP(5, HE_GI_16, BW_20),
+ HE_GROUP(6, HE_GI_16, BW_20),
+ HE_GROUP(7, HE_GI_16, BW_20),
+ HE_GROUP(8, HE_GI_16, BW_20),
+
+ HE_GROUP(1, HE_GI_32, BW_20),
+ HE_GROUP(2, HE_GI_32, BW_20),
+ HE_GROUP(3, HE_GI_32, BW_20),
+ HE_GROUP(4, HE_GI_32, BW_20),
+ HE_GROUP(5, HE_GI_32, BW_20),
+ HE_GROUP(6, HE_GI_32, BW_20),
+ HE_GROUP(7, HE_GI_32, BW_20),
+ HE_GROUP(8, HE_GI_32, BW_20),
+
+ HE_GROUP(1, HE_GI_08, BW_40),
+ HE_GROUP(2, HE_GI_08, BW_40),
+ HE_GROUP(3, HE_GI_08, BW_40),
+ HE_GROUP(4, HE_GI_08, BW_40),
+ HE_GROUP(5, HE_GI_08, BW_40),
+ HE_GROUP(6, HE_GI_08, BW_40),
+ HE_GROUP(7, HE_GI_08, BW_40),
+ HE_GROUP(8, HE_GI_08, BW_40),
+
+ HE_GROUP(1, HE_GI_16, BW_40),
+ HE_GROUP(2, HE_GI_16, BW_40),
+ HE_GROUP(3, HE_GI_16, BW_40),
+ HE_GROUP(4, HE_GI_16, BW_40),
+ HE_GROUP(5, HE_GI_16, BW_40),
+ HE_GROUP(6, HE_GI_16, BW_40),
+ HE_GROUP(7, HE_GI_16, BW_40),
+ HE_GROUP(8, HE_GI_16, BW_40),
+
+ HE_GROUP(1, HE_GI_32, BW_40),
+ HE_GROUP(2, HE_GI_32, BW_40),
+ HE_GROUP(3, HE_GI_32, BW_40),
+ HE_GROUP(4, HE_GI_32, BW_40),
+ HE_GROUP(5, HE_GI_32, BW_40),
+ HE_GROUP(6, HE_GI_32, BW_40),
+ HE_GROUP(7, HE_GI_32, BW_40),
+ HE_GROUP(8, HE_GI_32, BW_40),
+
+ HE_GROUP(1, HE_GI_08, BW_80),
+ HE_GROUP(2, HE_GI_08, BW_80),
+ HE_GROUP(3, HE_GI_08, BW_80),
+ HE_GROUP(4, HE_GI_08, BW_80),
+ HE_GROUP(5, HE_GI_08, BW_80),
+ HE_GROUP(6, HE_GI_08, BW_80),
+ HE_GROUP(7, HE_GI_08, BW_80),
+ HE_GROUP(8, HE_GI_08, BW_80),
+
+ HE_GROUP(1, HE_GI_16, BW_80),
+ HE_GROUP(2, HE_GI_16, BW_80),
+ HE_GROUP(3, HE_GI_16, BW_80),
+ HE_GROUP(4, HE_GI_16, BW_80),
+ HE_GROUP(5, HE_GI_16, BW_80),
+ HE_GROUP(6, HE_GI_16, BW_80),
+ HE_GROUP(7, HE_GI_16, BW_80),
+ HE_GROUP(8, HE_GI_16, BW_80),
+
+ HE_GROUP(1, HE_GI_32, BW_80),
+ HE_GROUP(2, HE_GI_32, BW_80),
+ HE_GROUP(3, HE_GI_32, BW_80),
+ HE_GROUP(4, HE_GI_32, BW_80),
+ HE_GROUP(5, HE_GI_32, BW_80),
+ HE_GROUP(6, HE_GI_32, BW_80),
+ HE_GROUP(7, HE_GI_32, BW_80),
+ HE_GROUP(8, HE_GI_32, BW_80),
+
+ HE_GROUP(1, HE_GI_08, BW_160),
+ HE_GROUP(2, HE_GI_08, BW_160),
+ HE_GROUP(3, HE_GI_08, BW_160),
+ HE_GROUP(4, HE_GI_08, BW_160),
+ HE_GROUP(5, HE_GI_08, BW_160),
+ HE_GROUP(6, HE_GI_08, BW_160),
+ HE_GROUP(7, HE_GI_08, BW_160),
+ HE_GROUP(8, HE_GI_08, BW_160),
+
+ HE_GROUP(1, HE_GI_16, BW_160),
+ HE_GROUP(2, HE_GI_16, BW_160),
+ HE_GROUP(3, HE_GI_16, BW_160),
+ HE_GROUP(4, HE_GI_16, BW_160),
+ HE_GROUP(5, HE_GI_16, BW_160),
+ HE_GROUP(6, HE_GI_16, BW_160),
+ HE_GROUP(7, HE_GI_16, BW_160),
+ HE_GROUP(8, HE_GI_16, BW_160),
+
+ HE_GROUP(1, HE_GI_32, BW_160),
+ HE_GROUP(2, HE_GI_32, BW_160),
+ HE_GROUP(3, HE_GI_32, BW_160),
+ HE_GROUP(4, HE_GI_32, BW_160),
+ HE_GROUP(5, HE_GI_32, BW_160),
+ HE_GROUP(6, HE_GI_32, BW_160),
+ HE_GROUP(7, HE_GI_32, BW_160),
+ HE_GROUP(8, HE_GI_32, BW_160),
+};
+
+static u32
+ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre,
+ bool cck, int len)
+{
+ u32 duration;
+
+ if (cck) {
+ duration = 144 + 48; /* preamble + PLCP */
+ if (short_pre)
+ duration >>= 1;
+
+ duration += 10; /* SIFS */
+ } else {
+ duration = 20 + 16; /* premable + SIFS */
+ }
+
+ len <<= 3;
+ duration += (len * 10) / bitrate;
+
+ return duration;
+}
+
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rate;
+ bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
+ bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
+ int bw, streams;
+ int group, idx;
+ u32 duration;
+ bool cck;
+
+ switch (status->bw) {
+ case RATE_INFO_BW_20:
+ bw = BW_20;
+ break;
+ case RATE_INFO_BW_40:
+ bw = BW_40;
+ break;
+ case RATE_INFO_BW_80:
+ bw = BW_80;
+ break;
+ case RATE_INFO_BW_160:
+ bw = BW_160;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ switch (status->encoding) {
+ case RX_ENC_LEGACY:
+ if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+ return 0;
+
+ sband = hw->wiphy->bands[status->band];
+ if (!sband || status->rate_idx > sband->n_bitrates)
+ return 0;
+
+ rate = &sband->bitrates[status->rate_idx];
+ cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
+
+ return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
+ cck, len);
+
+ case RX_ENC_VHT:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = VHT_GROUP_IDX(streams, sgi, bw);
+ break;
+ case RX_ENC_HT:
+ streams = ((status->rate_idx >> 3) & 3) + 1;
+ idx = status->rate_idx & 7;
+ group = HT_GROUP_IDX(streams, sgi, bw);
+ break;
+ case RX_ENC_HE:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = HE_GROUP_IDX(streams, status->he_gi, bw);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) ||
+ (status->encoding == RX_ENC_HE && streams > 8)))
+ return 0;
+
+ duration = airtime_mcs_groups[group].duration[idx];
+ duration <<= airtime_mcs_groups[group].shift;
+ duration *= len;
+ duration /= AVG_PKT_SIZE;
+ duration /= 1024;
+
+ duration += 36 + (streams << 2);
+
+ return duration;
+}
+EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime);
+
+static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ u8 band, int len)
+{
+ struct ieee80211_rx_status stat = {
+ .band = band,
+ };
+
+ if (rate->idx < 0 || !rate->count)
+ return 0;
+
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ stat.bw = RATE_INFO_BW_80;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ stat.bw = RATE_INFO_BW_40;
+ else
+ stat.bw = RATE_INFO_BW_20;
+
+ stat.enc_flags = 0;
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ stat.rate_idx = rate->idx;
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ stat.encoding = RX_ENC_VHT;
+ stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ stat.nss = ieee80211_rate_get_vht_nss(rate);
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ stat.encoding = RX_ENC_HT;
+ } else {
+ stat.encoding = RX_ENC_LEGACY;
+ }
+
+ return ieee80211_calc_rx_airtime(hw, &stat, len);
+}
+
+u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ int len)
+{
+ u32 duration = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ struct ieee80211_tx_rate *rate = &info->status.rates[i];
+ u32 cur_duration;
+
+ cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate,
+ info->band, len);
+ if (!cur_duration)
+ break;
+
+ duration += cur_duration * rate->count;
+ }
+
+ return duration;
+}
+EXPORT_SYMBOL_GPL(ieee80211_calc_tx_airtime);
+
+u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *pubsta,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_chanctx_conf *conf;
+ int rateidx, shift = 0;
+ bool cck, short_pream;
+ u32 basic_rates;
+ u8 band = 0;
+ u16 rate;
+
+ len += 38; /* Ethernet header length */
+
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (conf) {
+ band = conf->def.chan->band;
+ shift = ieee80211_chandef_get_shift(&conf->def);
+ }
+
+ if (pubsta) {
+ struct sta_info *sta = container_of(pubsta, struct sta_info,
+ sta);
+
+ return ieee80211_calc_tx_airtime_rate(hw,
+ &sta->tx_stats.last_rate,
+ band, len);
+ }
+
+ if (!conf)
+ return 0;
+
+ /* No station to get latest rate from, so calculate the worst-case
+ * duration using the lowest configured basic rate.
+ */
+ sband = hw->wiphy->bands[band];
+
+ basic_rates = vif->bss_conf.basic_rates;
+ short_pream = vif->bss_conf.use_short_preamble;
+
+ rateidx = basic_rates ? ffs(basic_rates) - 1 : 0;
+ rate = sband->bitrates[rateidx].bitrate << shift;
+ cck = sband->bitrates[rateidx].flags & IEEE80211_RATE_MANDATORY_B;
+
+ return ieee80211_calc_legacy_rate_duration(rate, short_pream, cck, len);
+}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 70739e746c13..4fb7f1f12109 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3428,7 +3428,7 @@ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
spin_lock_irqsave(&local->ack_status_lock, spin_flags);
id = idr_alloc(&local->ack_status_frames, ack_skb,
- 1, 0x10000, GFP_ATOMIC);
+ 1, 0x40, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
if (id < 0) {
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 568b3b276931..ad41d74530c6 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -59,6 +59,8 @@ static const struct file_operations name## _ops = { \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
+DEBUGFS_READONLY_FILE(hw_conf, "%x",
+ local->hw.conf.flags);
DEBUGFS_READONLY_FILE(user_power, "%d",
local->user_power_level);
DEBUGFS_READONLY_FILE(power, "%d",
@@ -148,6 +150,87 @@ static const struct file_operations aqm_ops = {
.llseek = default_llseek,
};
+static ssize_t aql_txq_limit_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ char buf[400];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf),
+ "AC AQL limit low AQL limit high\n"
+ "VO %u %u\n"
+ "VI %u %u\n"
+ "BE %u %u\n"
+ "BK %u %u\n",
+ local->aql_txq_limit_low[IEEE80211_AC_VO],
+ local->aql_txq_limit_high[IEEE80211_AC_VO],
+ local->aql_txq_limit_low[IEEE80211_AC_VI],
+ local->aql_txq_limit_high[IEEE80211_AC_VI],
+ local->aql_txq_limit_low[IEEE80211_AC_BE],
+ local->aql_txq_limit_high[IEEE80211_AC_BE],
+ local->aql_txq_limit_low[IEEE80211_AC_BK],
+ local->aql_txq_limit_high[IEEE80211_AC_BK]);
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, len);
+}
+
+static ssize_t aql_txq_limit_write(struct file *file,
+ const char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ char buf[100];
+ size_t len;
+ u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
+ struct sta_info *sta;
+
+ if (count > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[sizeof(buf) - 1] = 0;
+ len = strlen(buf);
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = 0;
+
+ if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
+ return -EINVAL;
+
+ if (ac >= IEEE80211_NUM_ACS)
+ return -EINVAL;
+
+ q_limit_low_old = local->aql_txq_limit_low[ac];
+ q_limit_high_old = local->aql_txq_limit_high[ac];
+
+ local->aql_txq_limit_low[ac] = q_limit_low;
+ local->aql_txq_limit_high[ac] = q_limit_high;
+
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ /* If a sta has customized queue limits, keep it */
+ if (sta->airtime[ac].aql_limit_low == q_limit_low_old &&
+ sta->airtime[ac].aql_limit_high == q_limit_high_old) {
+ sta->airtime[ac].aql_limit_low = q_limit_low;
+ sta->airtime[ac].aql_limit_high = q_limit_high;
+ }
+ }
+ mutex_unlock(&local->sta_mtx);
+ return count;
+}
+
+static const struct file_operations aql_txq_limit_ops = {
+ .write = aql_txq_limit_write,
+ .read = aql_txq_limit_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static ssize_t force_tx_status_read(struct file *file,
char __user *user_buf,
size_t count,
@@ -433,6 +516,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
+ DEBUGFS_ADD(hw_conf);
DEBUGFS_ADD_MODE(force_tx_status, 0600);
if (local->ops->wake_tx_queue)
@@ -441,6 +525,10 @@ void debugfs_hw_add(struct ieee80211_local *local)
debugfs_create_u16("airtime_flags", 0600,
phyd, &local->airtime_flags);
+ DEBUGFS_ADD(aql_txq_limit);
+ debugfs_create_u32("aql_threshold", 0600,
+ phyd, &local->aql_threshold);
+
statsd = debugfs_create_dir("statistics", phyd);
/* if the dir failed, don't put all the other things into the root! */
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c8ad20c28c43..0185e6e5e5d1 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -197,10 +197,12 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->sdata->local;
- size_t bufsz = 200;
+ size_t bufsz = 400;
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
u64 rx_airtime = 0, tx_airtime = 0;
s64 deficit[IEEE80211_NUM_ACS];
+ u32 q_depth[IEEE80211_NUM_ACS];
+ u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS];
ssize_t rv;
int ac;
@@ -212,19 +214,22 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
rx_airtime += sta->airtime[ac].rx_airtime;
tx_airtime += sta->airtime[ac].tx_airtime;
deficit[ac] = sta->airtime[ac].deficit;
+ q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
+ q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
spin_unlock_bh(&local->active_txq_lock[ac]);
+ q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
}
p += scnprintf(p, bufsz + buf - p,
"RX: %llu us\nTX: %llu us\nWeight: %u\n"
- "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
- rx_airtime,
- tx_airtime,
- sta->airtime_weight,
- deficit[0],
- deficit[1],
- deficit[2],
- deficit[3]);
+ "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n"
+ "Q depth: VO: %u us VI: %u us BE: %u us BK: %u us\n"
+ "Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n",
+ rx_airtime, tx_airtime, sta->airtime_weight,
+ deficit[0], deficit[1], deficit[2], deficit[3],
+ q_depth[0], q_depth[1], q_depth[2], q_depth[3],
+ q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1],
+ q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]),
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
kfree(buf);
@@ -236,7 +241,25 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->sdata->local;
- int ac;
+ u32 ac, q_limit_l, q_limit_h;
+ char _buf[100] = {}, *buf = _buf;
+
+ if (count > sizeof(_buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[sizeof(_buf) - 1] = '\0';
+ if (sscanf(buf, "queue limit %u %u %u", &ac, &q_limit_l, &q_limit_h)
+ != 3)
+ return -EINVAL;
+
+ if (ac >= IEEE80211_NUM_ACS)
+ return -EINVAL;
+
+ sta->airtime[ac].aql_limit_low = q_limit_l;
+ sta->airtime[ac].aql_limit_high = q_limit_h;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
spin_lock_bh(&local->active_txq_lock[ac]);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 0a6ff01c68a9..d40744903fa9 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -538,7 +538,6 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct cfg80211_bss *cbss;
- int err, changed = 0;
sdata_assert_lock(sdata);
@@ -560,13 +559,7 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
ifibss->chandef = sdata->csa_chandef;
/* generate the beacon */
- err = ieee80211_ibss_csa_beacon(sdata, NULL);
- if (err < 0)
- return err;
-
- changed |= err;
-
- return changed;
+ return ieee80211_ibss_csa_beacon(sdata, NULL);
}
void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 05406e9c05b3..ad15b3be8bb3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1142,6 +1142,10 @@ struct ieee80211_local {
u16 schedule_round[IEEE80211_NUM_ACS];
u16 airtime_flags;
+ u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
+ u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
+ u32 aql_threshold;
+ atomic_t aql_total_pending_airtime;
const struct ieee80211_ops *ops;
@@ -2249,6 +2253,10 @@ const char *ieee80211_get_reason_code_string(u16 reason_code);
extern const struct ethtool_ops ieee80211_ethtool_ops;
+u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *pubsta,
+ int len);
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
#else
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2d05c4cfaf6d..6cca0853f183 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -667,8 +667,16 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
INIT_LIST_HEAD(&local->active_txqs[i]);
spin_lock_init(&local->active_txq_lock[i]);
+ local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
+ local->aql_txq_limit_high[i] =
+ IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
}
- local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX;
+
+ local->airtime_flags = AIRTIME_USE_TX |
+ AIRTIME_USE_RX |
+ AIRTIME_USE_AQL;
+ local->aql_threshold = IEEE80211_AQL_THRESHOLD;
+ atomic_set(&local->aql_total_pending_airtime, 0);
INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 54dd8849d1cc..5fa13176036f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3186,15 +3186,14 @@ static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss,
- struct ieee80211_mgmt *mgmt, size_t len)
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee802_11_elems *elems)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
- u8 *pos;
u16 capab_info, aid;
- struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
const struct cfg80211_bss_ies *bss_ies = NULL;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
@@ -3222,19 +3221,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ifmgd->broken_ap = true;
}
- pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
- mgmt->bssid, assoc_data->bss->bssid);
-
- if (!elems.supp_rates) {
+ if (!elems->supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
return false;
}
ifmgd->aid = aid;
ifmgd->tdls_chan_switch_prohibited =
- elems.ext_capab && elems.ext_capab_len >= 5 &&
- (elems.ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
+ elems->ext_capab && elems->ext_capab_len >= 5 &&
+ (elems->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
/*
* Some APs are erroneously not including some information in their
@@ -3243,11 +3238,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
* "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
*/
- if ((assoc_data->wmm && !elems.wmm_param) ||
+ if ((assoc_data->wmm && !elems->wmm_param) ||
(!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
- (!elems.ht_cap_elem || !elems.ht_operation)) ||
+ (!elems->ht_cap_elem || !elems->ht_operation)) ||
(!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
- (!elems.vht_cap_elem || !elems.vht_operation))) {
+ (!elems->vht_cap_elem || !elems->vht_operation))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems bss_elems;
@@ -3265,8 +3260,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
mgmt->bssid,
assoc_data->bss->bssid);
if (assoc_data->wmm &&
- !elems.wmm_param && bss_elems.wmm_param) {
- elems.wmm_param = bss_elems.wmm_param;
+ !elems->wmm_param && bss_elems.wmm_param) {
+ elems->wmm_param = bss_elems.wmm_param;
sdata_info(sdata,
"AP bug: WMM param missing from AssocResp\n");
}
@@ -3275,27 +3270,27 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* Also check if we requested HT/VHT, otherwise the AP doesn't
* have to include the IEs in the (re)association response.
*/
- if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
+ if (!elems->ht_cap_elem && bss_elems.ht_cap_elem &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- elems.ht_cap_elem = bss_elems.ht_cap_elem;
+ elems->ht_cap_elem = bss_elems.ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
- if (!elems.ht_operation && bss_elems.ht_operation &&
+ if (!elems->ht_operation && bss_elems.ht_operation &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
- elems.ht_operation = bss_elems.ht_operation;
+ elems->ht_operation = bss_elems.ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
- if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
+ if (!elems->vht_cap_elem && bss_elems.vht_cap_elem &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
- elems.vht_cap_elem = bss_elems.vht_cap_elem;
+ elems->vht_cap_elem = bss_elems.vht_cap_elem;
sdata_info(sdata,
"AP bug: VHT capa missing from AssocResp\n");
}
- if (!elems.vht_operation && bss_elems.vht_operation &&
+ if (!elems->vht_operation && bss_elems.vht_operation &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
- elems.vht_operation = bss_elems.vht_operation;
+ elems->vht_operation = bss_elems.vht_operation;
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
}
@@ -3306,7 +3301,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* they should be present here. This is just a safety net.
*/
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
- (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
+ (!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
ret = false;
@@ -3314,7 +3309,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
- (!elems.vht_cap_elem || !elems.vht_operation)) {
+ (!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
ret = false;
@@ -3341,7 +3336,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
- (!elems.he_cap || !elems.he_operation)) {
+ (!elems->he_cap || !elems->he_operation)) {
mutex_unlock(&sdata->local->sta_mtx);
sdata_info(sdata,
"HE AP is missing HE capability/operation\n");
@@ -3350,23 +3345,23 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
/* Set up internal HT/VHT capabilities */
- if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
+ if (elems->ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- elems.ht_cap_elem, sta);
+ elems->ht_cap_elem, sta);
- if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ if (elems->vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- elems.vht_cap_elem, sta);
+ elems->vht_cap_elem, sta);
- if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
- elems.he_cap) {
+ if (elems->he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+ elems->he_cap) {
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
- elems.he_cap,
- elems.he_cap_len,
+ elems->he_cap,
+ elems->he_cap_len,
sta);
bss_conf->he_support = sta->sta.he_cap.has_he;
- changed |= ieee80211_recalc_twt_req(sdata, sta, &elems);
+ changed |= ieee80211_recalc_twt_req(sdata, sta, elems);
} else {
bss_conf->he_support = false;
bss_conf->twt_requester = false;
@@ -3374,14 +3369,14 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (bss_conf->he_support) {
bss_conf->bss_color =
- le32_get_bits(elems.he_operation->he_oper_params,
+ le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
bss_conf->htc_trig_based_pkt_ext =
- le32_get_bits(elems.he_operation->he_oper_params,
+ le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
bss_conf->frame_time_rts_th =
- le32_get_bits(elems.he_operation->he_oper_params,
+ le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
bss_conf->multi_sta_back_32bit =
@@ -3392,12 +3387,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN;
- bss_conf->uora_exists = !!elems.uora_element;
- if (elems.uora_element)
- bss_conf->uora_ocw_range = elems.uora_element[0];
+ bss_conf->uora_exists = !!elems->uora_element;
+ if (elems->uora_element)
+ bss_conf->uora_ocw_range = elems->uora_element[0];
- ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems.he_operation);
- ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems.he_spr);
+ ieee80211_he_op_ie_to_bss_conf(&sdata->vif, elems->he_operation);
+ ieee80211_he_spr_ie_to_bss_conf(&sdata->vif, elems->he_spr);
/* TODO: OPEN: what happens if BSS color disable is set? */
}
@@ -3421,11 +3416,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* NSS calculation (that would be done in rate_control_rate_init())
* and use the # of streams from that element.
*/
- if (elems.opmode_notif &&
- !(*elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
+ if (elems->opmode_notif &&
+ !(*elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
u8 nss;
- nss = *elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
+ nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
sta->sta.rx_nss = nss;
@@ -3440,7 +3435,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta->sta.mfp = false;
}
- sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
+ sta->sta.wme = elems->wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
@@ -3468,9 +3463,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ieee80211_set_wmm_default(sdata, false, false);
- } else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
- elems.wmm_param_len,
- elems.mu_edca_param_set)) {
+ } else if (!ieee80211_sta_wmm_params(local, sdata, elems->wmm_param,
+ elems->wmm_param_len,
+ elems->mu_edca_param_set)) {
/* still enable QoS since we might have HT/VHT */
ieee80211_set_wmm_default(sdata, false, true);
/* set the disable-WMM flag in this case to disable
@@ -3484,11 +3479,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
changed |= BSS_CHANGED_QOS;
- if (elems.max_idle_period_ie) {
+ if (elems->max_idle_period_ie) {
bss_conf->max_idle_period =
- le16_to_cpu(elems.max_idle_period_ie->max_idle_period);
+ le16_to_cpu(elems->max_idle_period_ie->max_idle_period);
bss_conf->protected_keep_alive =
- !!(elems.max_idle_period_ie->idle_options &
+ !!(elems->max_idle_period_ie->idle_options &
WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE);
changed |= BSS_CHANGED_KEEP_ALIVE;
} else {
@@ -3598,7 +3593,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
} else {
- if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) {
+ if (!ieee80211_assoc_success(sdata, bss, mgmt, len, &elems)) {
/* oops -- internal error -- send timeout for now */
ieee80211_destroy_assoc_data(sdata, false, false);
cfg80211_assoc_timeout(sdata->dev, bss);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index ee86c3333999..86bc469a28bc 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -70,7 +70,7 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
}
/* return current EMWA throughput */
-int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg)
{
int usecs;
@@ -79,13 +79,13 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
usecs = 1000000;
/* reset thr. below 10% success */
- if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
+ if (mr->stats.prob_avg < MINSTREL_FRAC(10, 100))
return 0;
- if (prob_ewma > MINSTREL_FRAC(90, 100))
+ if (prob_avg > MINSTREL_FRAC(90, 100))
return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
else
- return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
+ return MINSTREL_TRUNC(100000 * (prob_avg / usecs));
}
/* find & sort topmost throughput rates */
@@ -98,8 +98,8 @@ minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
for (j = MAX_THR_RATES; j > 0; --j) {
tmp_mrs = &mi->r[tp_list[j - 1]].stats;
- if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_avg) <=
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_avg))
break;
}
@@ -157,20 +157,24 @@ minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* Recalculate statistics and counters of a given rate
*/
void
-minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
+minstrel_calc_rate_stats(struct minstrel_priv *mp,
+ struct minstrel_rate_stats *mrs)
{
unsigned int cur_prob;
if (unlikely(mrs->attempts > 0)) {
mrs->sample_skipped = 0;
cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
- if (unlikely(!mrs->att_hist)) {
- mrs->prob_ewma = cur_prob;
+ if (mp->new_avg) {
+ minstrel_filter_avg_add(&mrs->prob_avg,
+ &mrs->prob_avg_1, cur_prob);
+ } else if (unlikely(!mrs->att_hist)) {
+ mrs->prob_avg = cur_prob;
} else {
/*update exponential weighted moving avarage */
- mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
- cur_prob,
- EWMA_LEVEL);
+ mrs->prob_avg = minstrel_ewma(mrs->prob_avg,
+ cur_prob,
+ EWMA_LEVEL);
}
mrs->att_hist += mrs->attempts;
mrs->succ_hist += mrs->success;
@@ -200,12 +204,12 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats;
/* Update statistics of success probability per rate */
- minstrel_calc_rate_stats(mrs);
+ minstrel_calc_rate_stats(mp, mrs);
/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
- mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
+ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
+ mrs->prob_avg < MINSTREL_FRAC(10, 100)) {
mr->adjusted_retry_count = mrs->retry_count >> 1;
if (mr->adjusted_retry_count > 2)
mr->adjusted_retry_count = 2;
@@ -225,14 +229,14 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
* highest success probability is chosen as max_prob_rate */
- if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
- tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
+ if (mrs->prob_avg >= MINSTREL_FRAC(95, 100)) {
+ tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_avg);
tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
- tmp_mrs->prob_ewma);
+ tmp_mrs->prob_avg);
if (tmp_cur_tp >= tmp_prob_tp)
tmp_prob_rate = i;
} else {
- if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
+ if (mrs->prob_avg >= tmp_mrs->prob_avg)
tmp_prob_rate = i;
}
}
@@ -290,7 +294,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
mi->sample_deferred--;
if (time_after(jiffies, mi->last_stats_update +
- (mp->update_interval * HZ) / 1000))
+ mp->update_interval / (mp->new_avg ? 2 : 1)))
minstrel_update_stats(mp, mi);
}
@@ -422,7 +426,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* has a probability of >95%, we shouldn't be attempting
* to use it, as this only wastes precious airtime */
if (!mrr_capable &&
- (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
+ (mi->r[ndx].stats.prob_avg > MINSTREL_FRAC(95, 100)))
return;
mi->prev_sample = true;
@@ -573,7 +577,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
* computing cur_tp
*/
tmp_mrs = &mi->r[idx].stats;
- tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
+ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_avg) * 10;
tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
return tmp_cur_tp;
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 51d8b2c846e7..dbb43bcd3c45 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -19,6 +19,21 @@
#define MAX_THR_RATES 4
/*
+ * Coefficients for moving average with noise filter (period=16),
+ * scaled by 10 bits
+ *
+ * a1 = exp(-pi * sqrt(2) / period)
+ * coeff2 = 2 * a1 * cos(sqrt(2) * 2 * pi / period)
+ * coeff3 = -sqr(a1)
+ * coeff1 = 1 - coeff2 - coeff3
+ */
+#define MINSTREL_AVG_COEFF1 (MINSTREL_FRAC(1, 1) - \
+ MINSTREL_AVG_COEFF2 - \
+ MINSTREL_AVG_COEFF3)
+#define MINSTREL_AVG_COEFF2 0x00001499
+#define MINSTREL_AVG_COEFF3 -0x0000092e
+
+/*
* Perform EWMA (Exponentially Weighted Moving Average) calculation
*/
static inline int
@@ -32,6 +47,37 @@ minstrel_ewma(int old, int new, int weight)
return old + incr;
}
+static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
+{
+ s32 out_1 = *prev_1;
+ s32 out_2 = *prev_2;
+ s32 val;
+
+ if (!in)
+ in += 1;
+
+ if (!out_1) {
+ val = out_1 = in;
+ goto out;
+ }
+
+ val = MINSTREL_AVG_COEFF1 * in;
+ val += MINSTREL_AVG_COEFF2 * out_1;
+ val += MINSTREL_AVG_COEFF3 * out_2;
+ val >>= MINSTREL_SCALE;
+
+ if (val > 1 << MINSTREL_SCALE)
+ val = 1 << MINSTREL_SCALE;
+ if (val < 0)
+ val = 1;
+
+out:
+ *prev_2 = out_1;
+ *prev_1 = val;
+
+ return val;
+}
+
struct minstrel_rate_stats {
/* current / last sampling period attempts/success counters */
u16 attempts, last_attempts;
@@ -40,8 +86,9 @@ struct minstrel_rate_stats {
/* total attempts/success counters */
u32 att_hist, succ_hist;
- /* prob_ewma - exponential weighted moving average of prob */
- u16 prob_ewma;
+ /* prob_avg - moving average of prob */
+ u16 prob_avg;
+ u16 prob_avg_1;
/* maximum retry counts */
u8 retry_count;
@@ -95,6 +142,7 @@ struct minstrel_sta_info {
struct minstrel_priv {
struct ieee80211_hw *hw;
bool has_mrr;
+ bool new_avg;
u32 sample_switch;
unsigned int cw_min;
unsigned int cw_max;
@@ -126,8 +174,9 @@ extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
/* Recalculate success probabilities and counters for a given rate using EWMA */
-void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
-int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
+void minstrel_calc_rate_stats(struct minstrel_priv *mp,
+ struct minstrel_rate_stats *mrs);
+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg);
/* debugfs */
int minstrel_stats_open(struct inode *inode, struct file *file);
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index c8afd85b51a0..9b8e0daeb7bb 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -90,8 +90,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "%6u ", mr->perfect_tx_time);
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
" %3u %3u %-3u "
@@ -147,8 +147,8 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
p += sprintf(p, "%u,",mr->perfect_tx_time);
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
"%llu,%llu,%d,%d\n",
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 0ef2633349b5..694a31978a04 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -346,12 +346,12 @@ minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi)
*/
int
minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
- int prob_ewma)
+ int prob_avg)
{
unsigned int nsecs = 0;
/* do not account throughput if sucess prob is below 10% */
- if (prob_ewma < MINSTREL_FRAC(10, 100))
+ if (prob_avg < MINSTREL_FRAC(10, 100))
return 0;
if (group != MINSTREL_CCK_GROUP)
@@ -365,11 +365,11 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
* account for collision related packet error rate fluctuation
* (prob is scaled - see MINSTREL_FRAC above)
*/
- if (prob_ewma > MINSTREL_FRAC(90, 100))
+ if (prob_avg > MINSTREL_FRAC(90, 100))
return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
/ nsecs));
else
- return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
+ return MINSTREL_TRUNC(100000 * ((prob_avg * 1000) / nsecs));
}
/*
@@ -389,13 +389,13 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
cur_group = index / MCS_GROUP_RATES;
cur_idx = index % MCS_GROUP_RATES;
- cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
+ cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
do {
tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
tmp_prob);
if (cur_tp_avg < tmp_tp_avg ||
@@ -432,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
@@ -444,11 +444,11 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
- max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+ max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
- if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
+ if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
- mrs->prob_ewma);
+ mrs->prob_avg);
if (cur_tp_avg > tmp_tp_avg)
mi->max_prob_rate = index;
@@ -458,9 +458,9 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
if (cur_tp_avg > max_gpr_tp_avg)
mg->max_group_prob_rate = index;
} else {
- if (mrs->prob_ewma > tmp_prob)
+ if (mrs->prob_avg > tmp_prob)
mi->max_prob_rate = index;
- if (mrs->prob_ewma > max_gpr_prob)
+ if (mrs->prob_avg > max_gpr_prob)
mg->max_group_prob_rate = index;
}
}
@@ -482,12 +482,12 @@ minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
if (tmp_cck_tp_rate && tmp_cck_tp > tmp_mcs_tp) {
@@ -518,7 +518,7 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
continue;
tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
- tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
(minstrel_mcs_groups[group].streams < tmp_max_streams)) {
@@ -623,7 +623,7 @@ minstrel_ht_rate_sample_switch(struct minstrel_priv *mp,
* If that fails, look again for a rate that is at least as fast
*/
mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
- faster_rate = mrs->prob_ewma > MINSTREL_FRAC(75, 100);
+ faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
if (!n_rates && faster_rate)
minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
@@ -737,8 +737,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
mrs = &mg->rates[i];
mrs->retry_updated = false;
- minstrel_calc_rate_stats(mrs);
- cur_prob = mrs->prob_ewma;
+ minstrel_calc_rate_stats(mp, mrs);
+ cur_prob = mrs->prob_avg;
if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
continue;
@@ -773,6 +773,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
/* try to sample all available rates during each interval */
mi->sample_count *= 8;
+ if (mp->new_avg)
+ mi->sample_count /= 2;
if (sample)
minstrel_ht_rate_sample_switch(mp, mi);
@@ -889,6 +891,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
struct minstrel_priv *mp = priv;
+ u32 update_interval = mp->update_interval / 2;
bool last, update = false;
bool sample_status = false;
int i;
@@ -943,6 +946,10 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
switch (mi->sample_mode) {
case MINSTREL_SAMPLE_IDLE:
+ if (mp->new_avg &&
+ (mp->hw->max_rates > 1 ||
+ mi->total_packets_cur < SAMPLE_SWITCH_THR))
+ update_interval /= 2;
break;
case MINSTREL_SAMPLE_ACTIVE:
@@ -970,23 +977,20 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
*/
rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
if (rate->attempts > 30 &&
- MINSTREL_FRAC(rate->success, rate->attempts) <
- MINSTREL_FRAC(20, 100)) {
+ rate->success < rate->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}
rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
if (rate2->attempts > 30 &&
- MINSTREL_FRAC(rate2->success, rate2->attempts) <
- MINSTREL_FRAC(20, 100)) {
+ rate2->success < rate2->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
update = true;
}
}
- if (time_after(jiffies, mi->last_stats_update +
- (mp->update_interval / 2 * HZ) / 1000)) {
+ if (time_after(jiffies, mi->last_stats_update + update_interval)) {
update = true;
minstrel_ht_update_stats(mp, mi, true);
}
@@ -1008,7 +1012,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
unsigned int overhead = 0, overhead_rtscts = 0;
mrs = minstrel_get_ratestats(mi, index);
- if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
+ if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
mrs->retry_count = 1;
mrs->retry_count_rtscts = 1;
return;
@@ -1065,7 +1069,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (!mrs->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
- if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
+ if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
ratetbl->rate[offset].count = 2;
ratetbl->rate[offset].count_rts = 2;
ratetbl->rate[offset].count_cts = 2;
@@ -1099,11 +1103,11 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
}
static inline int
-minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
+minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
{
int group = rate / MCS_GROUP_RATES;
rate %= MCS_GROUP_RATES;
- return mi->groups[group].rates[rate].prob_ewma;
+ return mi->groups[group].rates[rate].prob_avg;
}
static int
@@ -1115,7 +1119,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
unsigned int duration;
/* Disable A-MSDU if max_prob_rate is bad */
- if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
+ if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
return 1;
duration = g->duration[rate];
@@ -1138,7 +1142,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
* data packet size
*/
if (duration > MCS_DURATION(1, 0, 260) ||
- (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
+ (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100)))
return 3200;
@@ -1243,7 +1247,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* rate, to avoid wasting airtime.
*/
sample_dur = minstrel_get_duration(sample_idx);
- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
+ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
return -1;
@@ -1666,7 +1670,8 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
mp->has_mrr = true;
mp->hw = hw;
- mp->update_interval = 100;
+ mp->update_interval = HZ / 10;
+ mp->new_avg = true;
#ifdef CONFIG_MAC80211_DEBUGFS
mp->fixed_rate_idx = (u32) -1;
@@ -1674,6 +1679,8 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
&mp->fixed_rate_idx);
debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
&mp->sample_switch);
+ debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
+ &mp->new_avg);
#endif
minstrel_ht_init_cck_rates(mp);
@@ -1698,7 +1705,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
- prob = mi->groups[i].rates[j].prob_ewma;
+ prob = mi->groups[i].rates[j].prob_avg;
/* convert tp_avg from pkt per second in kbps */
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index f938701e7ab7..53ea3c29debf 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -119,6 +119,6 @@ struct minstrel_ht_sta_priv {
void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
- int prob_ewma);
+ int prob_avg);
#endif
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 5a6e9f3edc04..bebb71917742 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -98,8 +98,8 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
p += sprintf(p, "%6u ", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
" %3u %3u %-3u "
@@ -243,8 +243,8 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
p += sprintf(p, "%u,", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
"%u,%llu,%llu,",
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8d3a2389b055..8eafd81e97b4 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -210,6 +210,20 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
return NULL;
}
+struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local,
+ const u8 *sta_addr, const u8 *vif_addr)
+{
+ struct rhlist_head *tmp;
+ struct sta_info *sta;
+
+ for_each_sta_info(local, sta_addr, sta, tmp) {
+ if (ether_addr_equal(vif_addr, sta->sdata->vif.addr))
+ return sta;
+ }
+
+ return NULL;
+}
+
struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
int idx)
{
@@ -396,6 +410,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&sta->ps_tx_buf[i]);
skb_queue_head_init(&sta->tx_filtered[i]);
sta->airtime[i].deficit = sta->airtime_weight;
+ atomic_set(&sta->airtime[i].aql_tx_pending, 0);
+ sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
+ sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
}
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
@@ -1893,6 +1910,41 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
}
EXPORT_SYMBOL(ieee80211_sta_register_airtime);
+void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
+ struct sta_info *sta, u8 ac,
+ u16 tx_airtime, bool tx_completed)
+{
+ int tx_pending;
+
+ if (!tx_completed) {
+ if (sta)
+ atomic_add(tx_airtime,
+ &sta->airtime[ac].aql_tx_pending);
+
+ atomic_add(tx_airtime, &local->aql_total_pending_airtime);
+ return;
+ }
+
+ if (sta) {
+ tx_pending = atomic_sub_return(tx_airtime,
+ &sta->airtime[ac].aql_tx_pending);
+ if (WARN_ONCE(tx_pending < 0,
+ "STA %pM AC %d txq pending airtime underflow: %u, %u",
+ sta->addr, ac, tx_pending, tx_airtime))
+ atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending,
+ tx_pending, 0);
+ }
+
+ tx_pending = atomic_sub_return(tx_airtime,
+ &local->aql_total_pending_airtime);
+ if (WARN_ONCE(tx_pending < 0,
+ "Device %s AC %d pending airtime underflow: %u, %u",
+ wiphy_name(local->hw.wiphy), ac, tx_pending,
+ tx_airtime))
+ atomic_cmpxchg(&local->aql_total_pending_airtime,
+ tx_pending, 0);
+}
+
int sta_info_move_state(struct sta_info *sta,
enum ieee80211_sta_state new_state)
{
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 369c2dddce52..ad5d8a4ae56d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -127,13 +127,21 @@ enum ieee80211_agg_stop_reason {
/* Debugfs flags to enable/disable use of RX/TX airtime in scheduler */
#define AIRTIME_USE_TX BIT(0)
#define AIRTIME_USE_RX BIT(1)
+#define AIRTIME_USE_AQL BIT(2)
struct airtime_info {
u64 rx_airtime;
u64 tx_airtime;
s64 deficit;
+ atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
+ u32 aql_limit_low;
+ u32 aql_limit_high;
};
+void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
+ struct sta_info *sta, u8 ac,
+ u16 tx_airtime, bool tx_completed);
+
struct sta_info;
/**
@@ -725,6 +733,10 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+/* user must hold sta_mtx or be in RCU critical section */
+struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local,
+ const u8 *sta_addr, const u8 *vif_addr);
+
#define for_each_sta_info(local, _addr, _sta, _tmp) \
rhl_for_each_entry_rcu(_sta, _tmp, \
sta_info_hash_lookup(local, _addr), hash_node)
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index ab8ba5835ca0..b720feaf9a74 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -670,12 +670,26 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
struct sk_buff *skb, bool dropped)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u16 tx_time_est = ieee80211_info_get_tx_time_est(info);
struct ieee80211_hdr *hdr = (void *)skb->data;
bool acked = info->flags & IEEE80211_TX_STAT_ACK;
if (dropped)
acked = false;
+ if (tx_time_est) {
+ struct sta_info *sta;
+
+ rcu_read_lock();
+
+ sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2);
+ ieee80211_sta_update_pending_airtime(local, sta,
+ skb_get_queue_mapping(skb),
+ tx_time_est,
+ true);
+ rcu_read_unlock();
+ }
+
if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
struct ieee80211_sub_if_data *sdata;
@@ -877,6 +891,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
+ u16 tx_time_est;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
@@ -986,6 +1001,17 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_sta_register_airtime(&sta->sta, tid,
info->status.tx_time, 0);
+ if ((tx_time_est = ieee80211_info_get_tx_time_est(info)) > 0) {
+ /* Do this here to avoid the expensive lookup of the sta
+ * in ieee80211_report_used_skb().
+ */
+ ieee80211_sta_update_pending_airtime(local, sta,
+ skb_get_queue_mapping(skb),
+ tx_time_est,
+ true);
+ ieee80211_info_set_tx_time_est(info, 0);
+ }
+
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
if (sta->status_stats.lost_packets)
@@ -1030,7 +1056,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
I802_DEBUG_INC(local->dot11FailedCount);
}
- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
+ if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ ieee80211_has_pm(fc) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
local->ps_sdata && !(local->scanning)) {
@@ -1073,19 +1100,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
.skb = skb,
.info = IEEE80211_SKB_CB(skb),
};
- struct rhlist_head *tmp;
struct sta_info *sta;
rcu_read_lock();
- for_each_sta_info(local, hdr->addr1, sta, tmp) {
- /* skip wrong virtual interface */
- if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
- continue;
-
+ sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2);
+ if (sta)
status.sta = &sta->sta;
- break;
- }
__ieee80211_tx_status(hw, &status);
rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1fa422782905..b696b9136f4c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1617,7 +1617,7 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
static bool ieee80211_tx_frags(struct ieee80211_local *local,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct sta_info *sta,
struct sk_buff_head *skbs,
bool txpending)
{
@@ -1679,7 +1679,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
info->control.vif = vif;
- control.sta = sta;
+ control.sta = sta ? &sta->sta : NULL;
__skb_unlink(skb, skbs);
drv_tx(local, &control, skb);
@@ -1698,7 +1698,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
struct ieee80211_tx_info *info;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_vif *vif;
- struct ieee80211_sta *pubsta;
struct sk_buff *skb;
bool result = true;
__le16 fc;
@@ -1713,11 +1712,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
if (sta && !sta->uploaded)
sta = NULL;
- if (sta)
- pubsta = &sta->sta;
- else
- pubsta = NULL;
-
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
@@ -1744,8 +1738,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
break;
}
- result = ieee80211_tx_frags(local, vif, pubsta, skbs,
- txpending);
+ result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
ieee80211_tpt_led_trig_tx(local, fc, led_len);
@@ -2277,6 +2270,9 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
* isn't always enough to find the interface to use; for proper
* VLAN/WDS support we will need a different mechanism (which
* likely isn't going to be monitor interfaces).
+ *
+ * This is necessary, for example, for old hostapd versions that
+ * don't use nl80211-based management TX/RX.
*/
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -2424,6 +2420,33 @@ static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_store_ack_skb(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ u32 *info_flags)
+{
+ struct sk_buff *ack_skb = skb_clone_sk(skb);
+ u16 info_id = 0;
+
+ if (ack_skb) {
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ id = idr_alloc(&local->ack_status_frames, ack_skb,
+ 1, 0x40, GFP_ATOMIC);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ if (id >= 0) {
+ info_id = id;
+ *info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ } else {
+ kfree_skb(ack_skb);
+ }
+ }
+
+ return info_id;
+}
+
/**
* ieee80211_build_hdr - build 802.11 header in the given frame
* @sdata: virtual interface to build the header for
@@ -2717,26 +2740,8 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
}
if (unlikely(!multicast && skb->sk &&
- skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
- struct sk_buff *ack_skb = skb_clone_sk(skb);
-
- if (ack_skb) {
- unsigned long flags;
- int id;
-
- spin_lock_irqsave(&local->ack_status_lock, flags);
- id = idr_alloc(&local->ack_status_frames, ack_skb,
- 1, 0x10000, GFP_ATOMIC);
- spin_unlock_irqrestore(&local->ack_status_lock, flags);
-
- if (id >= 0) {
- info_id = id;
- info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- } else {
- kfree_skb(ack_skb);
- }
- }
- }
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
+ info_id = ieee80211_store_ack_skb(local, skb, &info_flags);
/*
* If the skb is shared we need to obtain our own copy.
@@ -3529,7 +3534,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data, u.ap);
__skb_queue_tail(&tx.skbs, skb);
- ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
+ ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false);
return true;
}
@@ -3549,6 +3554,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
WARN_ON_ONCE(softirq_count() == 0);
+ if (!ieee80211_txq_airtime_check(hw, txq))
+ return NULL;
+
begin:
spin_lock_bh(&fq->lock);
@@ -3659,6 +3667,21 @@ begin:
}
IEEE80211_SKB_CB(skb)->control.vif = vif;
+
+ if (local->airtime_flags & AIRTIME_USE_AQL) {
+ u32 airtime;
+
+ airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
+ skb->len);
+ if (airtime) {
+ airtime = ieee80211_info_set_tx_time_est(info, airtime);
+ ieee80211_sta_update_pending_airtime(local, tx.sta,
+ txq->ac,
+ airtime,
+ false);
+ }
+ }
+
return skb;
out:
@@ -3672,7 +3695,8 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_txq *ret = NULL;
- struct txq_info *txqi = NULL;
+ struct txq_info *txqi = NULL, *head = NULL;
+ bool found_eligible_txq = false;
spin_lock_bh(&local->active_txq_lock[ac]);
@@ -3683,13 +3707,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
if (!txqi)
goto out;
+ if (txqi == head) {
+ if (!found_eligible_txq)
+ goto out;
+ else
+ found_eligible_txq = false;
+ }
+
+ if (!head)
+ head = txqi;
+
if (txqi->txq.sta) {
struct sta_info *sta = container_of(txqi->txq.sta,
- struct sta_info, sta);
+ struct sta_info, sta);
+ bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
+ s64 deficit = sta->airtime[txqi->txq.ac].deficit;
+
+ if (aql_check)
+ found_eligible_txq = true;
- if (sta->airtime[txqi->txq.ac].deficit < 0) {
+ if (deficit < 0)
sta->airtime[txqi->txq.ac].deficit +=
sta->airtime_weight;
+
+ if (deficit < 0 || !aql_check) {
list_move_tail(&txqi->schedule_order,
&local->active_txqs[txqi->txq.ac]);
goto begin;
@@ -3743,6 +3784,33 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(__ieee80211_schedule_txq);
+bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct sta_info *sta;
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (!(local->airtime_flags & AIRTIME_USE_AQL))
+ return true;
+
+ if (!txq->sta)
+ return true;
+
+ sta = container_of(txq->sta, struct sta_info, sta);
+ if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
+ sta->airtime[txq->ac].aql_limit_low)
+ return true;
+
+ if (atomic_read(&local->aql_total_pending_airtime) <
+ local->aql_threshold &&
+ atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
+ sta->airtime[txq->ac].aql_limit_high)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ieee80211_txq_airtime_check);
+
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4fc075b612fe..5e9b2eb24349 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -120,7 +120,8 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
# flow table infrastructure
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
-nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o
+nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
+ nf_flow_table_offload.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 5d5bdf450091..78f046ec506f 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -536,6 +536,26 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
}
EXPORT_SYMBOL(nf_hook_slow);
+void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
+ const struct nf_hook_entries *e)
+{
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+ int ret;
+
+ INIT_LIST_HEAD(&sublist);
+
+ list_for_each_entry_safe(skb, next, head, list) {
+ skb_list_del_init(skb);
+ ret = nf_hook_slow(skb, state, e, 0);
+ if (ret == 1)
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* Put passed packets back on main list */
+ list_splice(&sublist, head);
+}
+EXPORT_SYMBOL(nf_hook_slow_list);
+
/* This needs to be compiled in any case to avoid dependencies between the
* nfnetlink_queue code and nf_conntrack.
*/
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 063df74b4647..1abd6f0dc227 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -192,7 +192,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
}
#ifndef IP_SET_BITMAP_STORED_TIMEOUT
-static inline bool
+static bool
mtype_is_filled(const struct mtype_elem *x)
{
return true;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 11ff9d4a7006..abe8f77d7d23 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -55,7 +55,7 @@ struct bitmap_ip_adt_elem {
u16 id;
};
-static inline u32
+static u32
ip_to_id(const struct bitmap_ip *m, u32 ip)
{
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip) / m->hosts;
@@ -63,33 +63,33 @@ ip_to_id(const struct bitmap_ip *m, u32 ip)
/* Common functions */
-static inline int
+static int
bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
struct bitmap_ip *map, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
{
return !!test_bit(id, map->members);
}
-static inline int
+static int
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
u32 flags, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
{
return !test_and_clear_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
size_t dsize)
{
@@ -97,7 +97,7 @@ bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
htonl(map->first_ip + id * map->hosts));
}
-static inline int
+static int
bitmap_ip_do_head(struct sk_buff *skb, const struct bitmap_ip *map)
{
return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
@@ -237,6 +237,18 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
return true;
}
+static u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+ u32 mask = 0xFFFFFFFE;
+
+ *bits = 32;
+ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
+}
+
static int
bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 1d4e63326e68..b618713297da 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -65,7 +65,7 @@ struct bitmap_ipmac_elem {
unsigned char filled;
} __aligned(__alignof__(u64));
-static inline u32
+static u32
ip_to_id(const struct bitmap_ipmac *m, u32 ip)
{
return ip - m->first_ip;
@@ -79,7 +79,7 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
/* Common functions */
-static inline int
+static int
bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
const struct bitmap_ipmac *map, size_t dsize)
{
@@ -94,7 +94,7 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
return -EAGAIN;
}
-static inline int
+static int
bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
{
const struct bitmap_ipmac_elem *elem;
@@ -106,13 +106,13 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
return elem->filled == MAC_FILLED;
}
-static inline int
+static int
bitmap_ipmac_is_filled(const struct bitmap_ipmac_elem *elem)
{
return elem->filled == MAC_FILLED;
}
-static inline int
+static int
bitmap_ipmac_add_timeout(unsigned long *timeout,
const struct bitmap_ipmac_adt_elem *e,
const struct ip_set_ext *ext, struct ip_set *set,
@@ -139,7 +139,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
return 0;
}
-static inline int
+static int
bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac *map, u32 flags, size_t dsize)
{
@@ -177,14 +177,14 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
return IPSET_ADD_STORE_PLAIN_TIMEOUT;
}
-static inline int
+static int
bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac *map)
{
return !test_and_clear_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
u32 id, size_t dsize)
{
@@ -197,7 +197,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, elem->ether));
}
-static inline int
+static int
bitmap_ipmac_do_head(struct sk_buff *skb, const struct bitmap_ipmac *map)
{
return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 704a0dda1609..23d6095cb196 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -46,7 +46,7 @@ struct bitmap_port_adt_elem {
u16 id;
};
-static inline u16
+static u16
port_to_id(const struct bitmap_port *m, u16 port)
{
return port - m->first_port;
@@ -54,34 +54,34 @@ port_to_id(const struct bitmap_port *m, u16 port)
/* Common functions */
-static inline int
+static int
bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
const struct bitmap_port *map, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
{
return !!test_bit(id, map->members);
}
-static inline int
+static int
bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
struct bitmap_port *map, u32 flags, size_t dsize)
{
return !!test_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
struct bitmap_port *map)
{
return !test_and_clear_bit(e->id, map->members);
}
-static inline int
+static int
bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
size_t dsize)
{
@@ -89,13 +89,40 @@ bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
htons(map->first_port + id));
}
-static inline int
+static int
bitmap_port_do_head(struct sk_buff *skb, const struct bitmap_port *map)
{
return nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
}
+static bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+ bool ret;
+ u8 proto;
+
+ switch (pf) {
+ case NFPROTO_IPV4:
+ ret = ip_set_get_ip4_port(skb, src, port, &proto);
+ break;
+ case NFPROTO_IPV6:
+ ret = ip_set_get_ip6_port(skb, src, port, &proto);
+ break;
+ default:
+ return false;
+ }
+ if (!ret)
+ return ret;
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d73d1828216a..169e0a04f814 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -35,7 +35,7 @@ struct ip_set_net {
static unsigned int ip_set_net_id __read_mostly;
-static inline struct ip_set_net *ip_set_pernet(struct net *net)
+static struct ip_set_net *ip_set_pernet(struct net *net)
{
return net_generic(net, ip_set_net_id);
}
@@ -67,13 +67,13 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
* serialized by ip_set_type_mutex.
*/
-static inline void
+static void
ip_set_type_lock(void)
{
mutex_lock(&ip_set_type_mutex);
}
-static inline void
+static void
ip_set_type_unlock(void)
{
mutex_unlock(&ip_set_type_mutex);
@@ -277,7 +277,7 @@ ip_set_free(void *members)
}
EXPORT_SYMBOL_GPL(ip_set_free);
-static inline bool
+static bool
flag_nested(const struct nlattr *nla)
{
return nla->nla_type & NLA_F_NESTED;
@@ -327,6 +327,83 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
}
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+static u32
+ip_set_timeout_get(const unsigned long *timeout)
+{
+ u32 t;
+
+ if (*timeout == IPSET_ELEM_PERMANENT)
+ return 0;
+
+ t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC;
+ /* Zero value in userspace means no timeout */
+ return t == 0 ? 1 : t;
+}
+
+static char *
+ip_set_comment_uget(struct nlattr *tb)
+{
+ return nla_data(tb);
+}
+
+/* Called from uadd only, protected by the set spinlock.
+ * The kadt functions don't use the comment extensions in any way.
+ */
+void
+ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
+ const struct ip_set_ext *ext)
+{
+ struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
+ size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+ if (unlikely(c)) {
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
+ }
+ if (!len)
+ return;
+ if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+ len = IPSET_MAX_COMMENT_SIZE;
+ c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+ if (unlikely(!c))
+ return;
+ strlcpy(c->str, ext->comment, len + 1);
+ set->ext_size += sizeof(*c) + strlen(c->str) + 1;
+ rcu_assign_pointer(comment->c, c);
+}
+EXPORT_SYMBOL_GPL(ip_set_init_comment);
+
+/* Used only when dumping a set, protected by rcu_read_lock() */
+static int
+ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
+{
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
+
+ if (!c)
+ return 0;
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
+}
+
+/* Called from uadd/udel, flush or the garbage collectors protected
+ * by the set spinlock.
+ * Called when the set is destroyed and when there can't be any user
+ * of the set data anymore.
+ */
+static void
+ip_set_comment_free(struct ip_set *set, void *ptr)
+{
+ struct ip_set_comment *comment = ptr;
+ struct ip_set_comment_rcu *c;
+
+ c = rcu_dereference_protected(comment->c, 1);
+ if (unlikely(!c))
+ return;
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
+}
+
typedef void (*destroyer)(struct ip_set *, void *);
/* ipset data extension types, in size order */
@@ -353,12 +430,12 @@ const struct ip_set_ext_type ip_set_extensions[] = {
.flag = IPSET_FLAG_WITH_COMMENT,
.len = sizeof(struct ip_set_comment),
.align = __alignof__(struct ip_set_comment),
- .destroy = (destroyer) ip_set_comment_free,
+ .destroy = ip_set_comment_free,
},
};
EXPORT_SYMBOL_GPL(ip_set_extensions);
-static inline bool
+static bool
add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
{
return ip_set_extensions[id].flag ?
@@ -448,6 +525,46 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
}
EXPORT_SYMBOL_GPL(ip_set_get_extensions);
+static u64
+ip_set_get_bytes(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->bytes);
+}
+
+static u64
+ip_set_get_packets(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->packets);
+}
+
+static bool
+ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
+{
+ return nla_put_net64(skb, IPSET_ATTR_BYTES,
+ cpu_to_be64(ip_set_get_bytes(counter)),
+ IPSET_ATTR_PAD) ||
+ nla_put_net64(skb, IPSET_ATTR_PACKETS,
+ cpu_to_be64(ip_set_get_packets(counter)),
+ IPSET_ATTR_PAD);
+}
+
+static bool
+ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
+{
+ /* Send nonzero parameters only */
+ return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
+ nla_put_net64(skb, IPSET_ATTR_SKBMARK,
+ cpu_to_be64((u64)skbinfo->skbmark << 32 |
+ skbinfo->skbmarkmask),
+ IPSET_ATTR_PAD)) ||
+ (skbinfo->skbprio &&
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ cpu_to_be32(skbinfo->skbprio))) ||
+ (skbinfo->skbqueue &&
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ cpu_to_be16(skbinfo->skbqueue)));
+}
+
int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
const void *e, bool active)
@@ -473,6 +590,55 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
}
EXPORT_SYMBOL_GPL(ip_set_put_extensions);
+static bool
+ip_set_match_counter(u64 counter, u64 match, u8 op)
+{
+ switch (op) {
+ case IPSET_COUNTER_NONE:
+ return true;
+ case IPSET_COUNTER_EQ:
+ return counter == match;
+ case IPSET_COUNTER_NE:
+ return counter != match;
+ case IPSET_COUNTER_LT:
+ return counter < match;
+ case IPSET_COUNTER_GT:
+ return counter > match;
+ }
+ return false;
+}
+
+static void
+ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)bytes, &(counter)->bytes);
+}
+
+static void
+ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)packets, &(counter)->packets);
+}
+
+static void
+ip_set_update_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext, u32 flags)
+{
+ if (ext->packets != ULLONG_MAX &&
+ !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
+ ip_set_add_bytes(ext->bytes, counter);
+ ip_set_add_packets(ext->packets, counter);
+ }
+}
+
+static void
+ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ mext->skbinfo = *skbinfo;
+}
+
bool
ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags, void *data)
@@ -508,7 +674,7 @@ EXPORT_SYMBOL_GPL(ip_set_match_extensions);
* The set behind an index may change by swapping only, from userspace.
*/
-static inline void
+static void
__ip_set_get(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
@@ -516,7 +682,7 @@ __ip_set_get(struct ip_set *set)
write_unlock_bh(&ip_set_ref_lock);
}
-static inline void
+static void
__ip_set_put(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
@@ -528,7 +694,7 @@ __ip_set_put(struct ip_set *set)
/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
* a separate reference counter
*/
-static inline void
+static void
__ip_set_put_netlink(struct ip_set *set)
{
write_lock_bh(&ip_set_ref_lock);
@@ -543,7 +709,7 @@ __ip_set_put_netlink(struct ip_set *set)
* so it can't be destroyed (or changed) under our foot.
*/
-static inline struct ip_set *
+static struct ip_set *
ip_set_rcu_get(struct net *net, ip_set_id_t index)
{
struct ip_set *set;
@@ -672,7 +838,7 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
*
*/
-static inline void
+static void
__ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
{
struct ip_set *set;
@@ -1255,6 +1421,30 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
#define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
#define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
+int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+ u32 cadt_flags = 0;
+
+ if (SET_WITH_TIMEOUT(set))
+ if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(set->timeout))))
+ return -EMSGSIZE;
+ if (SET_WITH_COUNTER(set))
+ cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+ if (SET_WITH_COMMENT(set))
+ cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+ if (SET_WITH_SKBINFO(set))
+ cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
+ if (SET_WITH_FORCEADD(set))
+ cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
+
+ if (!cadt_flags)
+ return 0;
+ return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
+EXPORT_SYMBOL_GPL(ip_set_put_flags);
+
static int
ip_set_dump_done(struct netlink_callback *cb)
{
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 2b8f959574b4..36615eb3eae1 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -148,31 +148,3 @@ ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
}
EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
#endif
-
-bool
-ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
-{
- bool ret;
- u8 proto;
-
- switch (pf) {
- case NFPROTO_IPV4:
- ret = ip_set_get_ip4_port(skb, src, port, &proto);
- break;
- case NFPROTO_IPV6:
- ret = ip_set_get_ip6_port(skb, src, port, &proto);
- break;
- default:
- return false;
- }
- if (!ret)
- return ret;
- switch (proto) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- return true;
- default:
- return false;
- }
-}
-EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index d098d87bc331..7480ce55b5c8 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -39,7 +39,7 @@
#ifdef IP_SET_HASH_WITH_MULTI
#define AHASH_MAX(h) ((h)->ahash_max)
-static inline u8
+static u8
tune_ahash_max(u8 curr, u32 multi)
{
u32 n;
@@ -909,7 +909,7 @@ out:
return ret;
}
-static inline int
+static int
mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
struct ip_set_ext *mext, struct ip_set *set, u32 flags)
{
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index f4432d9fcad0..5d6d68eaf6a9 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -44,7 +44,7 @@ struct hash_ip4_elem {
/* Common functions */
-static inline bool
+static bool
hash_ip4_data_equal(const struct hash_ip4_elem *e1,
const struct hash_ip4_elem *e2,
u32 *multi)
@@ -63,7 +63,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e)
{
next->ip = e->ip;
@@ -171,7 +171,7 @@ struct hash_ip6_elem {
/* Common functions */
-static inline bool
+static bool
hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
const struct hash_ip6_elem *ip2,
u32 *multi)
@@ -179,7 +179,7 @@ hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6);
}
-static inline void
+static void
hash_ip6_netmask(union nf_inet_addr *ip, u8 prefix)
{
ip6_netmask(ip, prefix);
@@ -196,7 +196,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ip6_data_next(struct hash_ip6_elem *next, const struct hash_ip6_elem *e)
{
}
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index 4ce563eb927d..eceb7bc4a93a 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -47,7 +47,7 @@ struct hash_ipmac4_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipmac4_data_equal(const struct hash_ipmac4_elem *e1,
const struct hash_ipmac4_elem *e2,
u32 *multi)
@@ -67,7 +67,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipmac4_data_next(struct hash_ipmac4_elem *next,
const struct hash_ipmac4_elem *e)
{
@@ -154,7 +154,7 @@ struct hash_ipmac6_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipmac6_data_equal(const struct hash_ipmac6_elem *e1,
const struct hash_ipmac6_elem *e2,
u32 *multi)
@@ -175,7 +175,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipmac6_data_next(struct hash_ipmac6_elem *next,
const struct hash_ipmac6_elem *e)
{
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index 7a1734aad0c5..aba1df617d6e 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -42,7 +42,7 @@ struct hash_ipmark4_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipmark4_data_equal(const struct hash_ipmark4_elem *ip1,
const struct hash_ipmark4_elem *ip2,
u32 *multi)
@@ -64,7 +64,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipmark4_data_next(struct hash_ipmark4_elem *next,
const struct hash_ipmark4_elem *d)
{
@@ -165,7 +165,7 @@ struct hash_ipmark6_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipmark6_data_equal(const struct hash_ipmark6_elem *ip1,
const struct hash_ipmark6_elem *ip2,
u32 *multi)
@@ -187,7 +187,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipmark6_data_next(struct hash_ipmark6_elem *next,
const struct hash_ipmark6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 32e240658334..1ff228717e29 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -47,7 +47,7 @@ struct hash_ipport4_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
const struct hash_ipport4_elem *ip2,
u32 *multi)
@@ -71,7 +71,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipport4_data_next(struct hash_ipport4_elem *next,
const struct hash_ipport4_elem *d)
{
@@ -202,7 +202,7 @@ struct hash_ipport6_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
const struct hash_ipport6_elem *ip2,
u32 *multi)
@@ -226,7 +226,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipport6_data_next(struct hash_ipport6_elem *next,
const struct hash_ipport6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 15d419353179..fa88afd812fa 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -46,7 +46,7 @@ struct hash_ipportip4_elem {
u8 padding;
};
-static inline bool
+static bool
hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
const struct hash_ipportip4_elem *ip2,
u32 *multi)
@@ -72,7 +72,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipportip4_data_next(struct hash_ipportip4_elem *next,
const struct hash_ipportip4_elem *d)
{
@@ -210,7 +210,7 @@ struct hash_ipportip6_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
const struct hash_ipportip6_elem *ip2,
u32 *multi)
@@ -236,7 +236,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipportip6_data_next(struct hash_ipportip6_elem *next,
const struct hash_ipportip6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 7a4d7afd4121..eef6ecfcb409 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -59,7 +59,7 @@ struct hash_ipportnet4_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
const struct hash_ipportnet4_elem *ip2,
u32 *multi)
@@ -71,25 +71,25 @@ hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
ip1->proto == ip2->proto;
}
-static inline int
+static int
hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
-static inline void
+static void
hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
{
elem->ip2 &= ip_set_netmask(cidr);
@@ -116,7 +116,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next,
const struct hash_ipportnet4_elem *d)
{
@@ -308,7 +308,7 @@ struct hash_ipportnet6_elem {
/* Common functions */
-static inline bool
+static bool
hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
const struct hash_ipportnet6_elem *ip2,
u32 *multi)
@@ -320,25 +320,25 @@ hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
ip1->proto == ip2->proto;
}
-static inline int
+static int
hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
-static inline void
+static void
hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip2, cidr);
@@ -365,7 +365,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_ipportnet6_data_next(struct hash_ipportnet6_elem *next,
const struct hash_ipportnet6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index d94c585d33c5..0b61593165ef 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -37,7 +37,7 @@ struct hash_mac4_elem {
/* Common functions */
-static inline bool
+static bool
hash_mac4_data_equal(const struct hash_mac4_elem *e1,
const struct hash_mac4_elem *e2,
u32 *multi)
@@ -45,7 +45,7 @@ hash_mac4_data_equal(const struct hash_mac4_elem *e1,
return ether_addr_equal(e1->ether, e2->ether);
}
-static inline bool
+static bool
hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
{
if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
@@ -56,7 +56,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_mac4_data_next(struct hash_mac4_elem *next,
const struct hash_mac4_elem *e)
{
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 3d932de0ad29..136cf0781d3a 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -47,7 +47,7 @@ struct hash_net4_elem {
/* Common functions */
-static inline bool
+static bool
hash_net4_data_equal(const struct hash_net4_elem *ip1,
const struct hash_net4_elem *ip2,
u32 *multi)
@@ -56,25 +56,25 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1,
ip1->cidr == ip2->cidr;
}
-static inline int
+static int
hash_net4_do_data_match(const struct hash_net4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_net4_data_set_flags(struct hash_net4_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
-static inline void
+static void
hash_net4_data_reset_flags(struct hash_net4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
@@ -97,7 +97,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_net4_data_next(struct hash_net4_elem *next,
const struct hash_net4_elem *d)
{
@@ -212,7 +212,7 @@ struct hash_net6_elem {
/* Common functions */
-static inline bool
+static bool
hash_net6_data_equal(const struct hash_net6_elem *ip1,
const struct hash_net6_elem *ip2,
u32 *multi)
@@ -221,25 +221,25 @@ hash_net6_data_equal(const struct hash_net6_elem *ip1,
ip1->cidr == ip2->cidr;
}
-static inline int
+static int
hash_net6_do_data_match(const struct hash_net6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_net6_data_set_flags(struct hash_net6_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
-static inline void
+static void
hash_net6_data_reset_flags(struct hash_net6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
@@ -262,7 +262,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_net6_data_next(struct hash_net6_elem *next,
const struct hash_net6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 87b29f971226..be5e95a0d876 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -25,7 +25,8 @@
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 6 /* skbinfo support added */
+/* 6 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 7 /* interface wildcard support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -57,12 +58,13 @@ struct hash_netiface4_elem {
u8 cidr;
u8 nomatch;
u8 elem;
+ u8 wildcard;
char iface[IFNAMSIZ];
};
/* Common functions */
-static inline bool
+static bool
hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
const struct hash_netiface4_elem *ip2,
u32 *multi)
@@ -71,28 +73,30 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
ip1->cidr == ip2->cidr &&
(++*multi) &&
ip1->physdev == ip2->physdev &&
- strcmp(ip1->iface, ip2->iface) == 0;
+ (ip1->wildcard ?
+ strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 :
+ strcmp(ip1->iface, ip2->iface) == 0);
}
-static inline int
+static int
hash_netiface4_do_data_match(const struct hash_netiface4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netiface4_data_set_flags(struct hash_netiface4_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
-static inline void
+static void
hash_netiface4_data_reset_flags(struct hash_netiface4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
@@ -103,7 +107,8 @@ static bool
hash_netiface4_data_list(struct sk_buff *skb,
const struct hash_netiface4_elem *data)
{
- u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) |
+ (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0);
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
@@ -119,7 +124,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netiface4_data_next(struct hash_netiface4_elem *next,
const struct hash_netiface4_elem *d)
{
@@ -229,6 +234,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
e.physdev = 1;
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
+ if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD)
+ e.wildcard = 1;
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
@@ -280,12 +287,13 @@ struct hash_netiface6_elem {
u8 cidr;
u8 nomatch;
u8 elem;
+ u8 wildcard;
char iface[IFNAMSIZ];
};
/* Common functions */
-static inline bool
+static bool
hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
const struct hash_netiface6_elem *ip2,
u32 *multi)
@@ -294,28 +302,30 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
ip1->cidr == ip2->cidr &&
(++*multi) &&
ip1->physdev == ip2->physdev &&
- strcmp(ip1->iface, ip2->iface) == 0;
+ (ip1->wildcard ?
+ strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 :
+ strcmp(ip1->iface, ip2->iface) == 0);
}
-static inline int
+static int
hash_netiface6_do_data_match(const struct hash_netiface6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netiface6_data_set_flags(struct hash_netiface6_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
-static inline void
+static void
hash_netiface6_data_reset_flags(struct hash_netiface6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
@@ -326,7 +336,8 @@ static bool
hash_netiface6_data_list(struct sk_buff *skb,
const struct hash_netiface6_elem *data)
{
- u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) |
+ (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0);
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
@@ -342,7 +353,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netiface6_data_next(struct hash_netiface6_elem *next,
const struct hash_netiface6_elem *d)
{
@@ -440,6 +451,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
e.physdev = 1;
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
+ if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD)
+ e.wildcard = 1;
}
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 4398322fad59..da4ef910b12d 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -52,7 +52,7 @@ struct hash_netnet4_elem {
/* Common functions */
-static inline bool
+static bool
hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
const struct hash_netnet4_elem *ip2,
u32 *multi)
@@ -61,32 +61,32 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
ip1->ccmp == ip2->ccmp;
}
-static inline int
+static int
hash_netnet4_do_data_match(const struct hash_netnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netnet4_data_set_flags(struct hash_netnet4_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
-static inline void
+static void
hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
struct hash_netnet4_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
-static inline void
+static void
hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
{
if (inner) {
@@ -117,7 +117,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netnet4_data_next(struct hash_netnet4_elem *next,
const struct hash_netnet4_elem *d)
{
@@ -282,7 +282,7 @@ struct hash_netnet6_elem {
/* Common functions */
-static inline bool
+static bool
hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
const struct hash_netnet6_elem *ip2,
u32 *multi)
@@ -292,32 +292,32 @@ hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
ip1->ccmp == ip2->ccmp;
}
-static inline int
+static int
hash_netnet6_do_data_match(const struct hash_netnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netnet6_data_set_flags(struct hash_netnet6_elem *elem, u32 flags)
{
elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
}
-static inline void
+static void
hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
struct hash_netnet6_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
-static inline void
+static void
hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
{
if (inner) {
@@ -348,7 +348,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netnet6_data_next(struct hash_netnet6_elem *next,
const struct hash_netnet6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 799f2272cc65..34448df80fb9 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -57,7 +57,7 @@ struct hash_netport4_elem {
/* Common functions */
-static inline bool
+static bool
hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
const struct hash_netport4_elem *ip2,
u32 *multi)
@@ -68,25 +68,25 @@ hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
ip1->cidr == ip2->cidr;
}
-static inline int
+static int
hash_netport4_do_data_match(const struct hash_netport4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netport4_data_set_flags(struct hash_netport4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
-static inline void
+static void
hash_netport4_data_reset_flags(struct hash_netport4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
@@ -112,7 +112,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netport4_data_next(struct hash_netport4_elem *next,
const struct hash_netport4_elem *d)
{
@@ -270,7 +270,7 @@ struct hash_netport6_elem {
/* Common functions */
-static inline bool
+static bool
hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
const struct hash_netport6_elem *ip2,
u32 *multi)
@@ -281,25 +281,25 @@ hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
ip1->cidr == ip2->cidr;
}
-static inline int
+static int
hash_netport6_do_data_match(const struct hash_netport6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netport6_data_set_flags(struct hash_netport6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
-static inline void
+static void
hash_netport6_data_reset_flags(struct hash_netport6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
@@ -325,7 +325,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netport6_data_next(struct hash_netport6_elem *next,
const struct hash_netport6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index a82b70e8b9a6..934c1712cba8 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -56,7 +56,7 @@ struct hash_netportnet4_elem {
/* Common functions */
-static inline bool
+static bool
hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
const struct hash_netportnet4_elem *ip2,
u32 *multi)
@@ -67,32 +67,32 @@ hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
ip1->proto == ip2->proto;
}
-static inline int
+static int
hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
-static inline void
+static void
hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
struct hash_netportnet4_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
-static inline void
+static void
hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
u8 cidr, bool inner)
{
@@ -126,7 +126,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
const struct hash_netportnet4_elem *d)
{
@@ -331,7 +331,7 @@ struct hash_netportnet6_elem {
/* Common functions */
-static inline bool
+static bool
hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
const struct hash_netportnet6_elem *ip2,
u32 *multi)
@@ -343,32 +343,32 @@ hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
ip1->proto == ip2->proto;
}
-static inline int
+static int
hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
+static void
hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
-static inline void
+static void
hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
-static inline void
+static void
hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
struct hash_netportnet6_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
-static inline void
+static void
hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
u8 cidr, bool inner)
{
@@ -402,7 +402,7 @@ nla_put_failure:
return true;
}
-static inline void
+static void
hash_netportnet6_data_next(struct hash_netportnet6_elem *next,
const struct hash_netportnet6_elem *d)
{
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 67ac50104e6f..cd747c0962fd 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -149,7 +149,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
kfree(e);
}
-static inline void
+static void
list_set_del(struct ip_set *set, struct set_elem *e)
{
struct list_set *map = set->data;
@@ -160,7 +160,7 @@ list_set_del(struct ip_set *set, struct set_elem *e)
call_rcu(&e->rcu, __list_set_del_rcu);
}
-static inline void
+static void
list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
{
struct list_set *map = set->data;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 8b80ab794a92..512259f579d7 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -2402,18 +2402,22 @@ estimator_fail:
return -ENOMEM;
}
-static void __net_exit __ip_vs_cleanup(struct net *net)
+static void __net_exit __ip_vs_cleanup_batch(struct list_head *net_list)
{
- struct netns_ipvs *ipvs = net_ipvs(net);
-
- ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
- ip_vs_conn_net_cleanup(ipvs);
- ip_vs_app_net_cleanup(ipvs);
- ip_vs_protocol_net_cleanup(ipvs);
- ip_vs_control_net_cleanup(ipvs);
- ip_vs_estimator_net_cleanup(ipvs);
- IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
- net->ipvs = NULL;
+ struct netns_ipvs *ipvs;
+ struct net *net;
+
+ ip_vs_service_nets_cleanup(net_list); /* ip_vs_flush() with locks */
+ list_for_each_entry(net, net_list, exit_list) {
+ ipvs = net_ipvs(net);
+ ip_vs_conn_net_cleanup(ipvs);
+ ip_vs_app_net_cleanup(ipvs);
+ ip_vs_protocol_net_cleanup(ipvs);
+ ip_vs_control_net_cleanup(ipvs);
+ ip_vs_estimator_net_cleanup(ipvs);
+ IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
+ net->ipvs = NULL;
+ }
}
static int __net_init __ip_vs_dev_init(struct net *net)
@@ -2429,27 +2433,32 @@ hook_fail:
return ret;
}
-static void __net_exit __ip_vs_dev_cleanup(struct net *net)
+static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
{
- struct netns_ipvs *ipvs = net_ipvs(net);
+ struct netns_ipvs *ipvs;
+ struct net *net;
+
EnterFunction(2);
- nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
- ipvs->enable = 0; /* Disable packet reception */
- smp_wmb();
- ip_vs_sync_net_cleanup(ipvs);
+ list_for_each_entry(net, net_list, exit_list) {
+ ipvs = net_ipvs(net);
+ nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ipvs->enable = 0; /* Disable packet reception */
+ smp_wmb();
+ ip_vs_sync_net_cleanup(ipvs);
+ }
LeaveFunction(2);
}
static struct pernet_operations ipvs_core_ops = {
.init = __ip_vs_init,
- .exit = __ip_vs_cleanup,
+ .exit_batch = __ip_vs_cleanup_batch,
.id = &ip_vs_net_id,
.size = sizeof(struct netns_ipvs),
};
static struct pernet_operations ipvs_core_dev_ops = {
.init = __ip_vs_dev_init,
- .exit = __ip_vs_dev_cleanup,
+ .exit_batch = __ip_vs_dev_cleanup_batch,
};
/*
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 3cccc88ef817..3be7398901e0 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1607,14 +1607,20 @@ static int ip_vs_flush(struct netns_ipvs *ipvs, bool cleanup)
/*
* Delete service by {netns} in the service table.
- * Called by __ip_vs_cleanup()
+ * Called by __ip_vs_batch_cleanup()
*/
-void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs)
+void ip_vs_service_nets_cleanup(struct list_head *net_list)
{
+ struct netns_ipvs *ipvs;
+ struct net *net;
+
EnterFunction(2);
/* Check for "full" addressed entries */
mutex_lock(&__ip_vs_mutex);
- ip_vs_flush(ipvs, true);
+ list_for_each_entry(net, net_list, exit_list) {
+ ipvs = net_ipvs(net);
+ ip_vs_flush(ipvs, true);
+ }
mutex_unlock(&__ip_vs_mutex);
LeaveFunction(2);
}
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
index 78b074cd5464..c03066fdd5ca 100644
--- a/net/netfilter/ipvs/ip_vs_ovf.c
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -5,7 +5,7 @@
* Authors: Raducu Deaconu <rhadoo_io@yahoo.com>
*
* Scheduler implements "overflow" loadbalancing according to number of active
- * connections , will keep all conections to the node with the highest weight
+ * connections , will keep all connections to the node with the highest weight
* and overflow to the next node if the number of connections exceeds the node's
* weight.
* Note that this scheduler might not be suitable for UDP because it only uses
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 888d3068a492..b1e300f8881b 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -407,12 +407,9 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
goto err_put;
skb_dst_drop(skb);
- if (noref) {
- if (!local)
- skb_dst_set_noref(skb, &rt->dst);
- else
- skb_dst_set(skb, dst_clone(&rt->dst));
- } else
+ if (noref)
+ skb_dst_set_noref(skb, &rt->dst);
+ else
skb_dst_set(skb, &rt->dst);
return local;
@@ -574,12 +571,9 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
goto err_put;
skb_dst_drop(skb);
- if (noref) {
- if (!local)
- skb_dst_set_noref(skb, &rt->dst);
- else
- skb_dst_set(skb, dst_clone(&rt->dst));
- } else
+ if (noref)
+ skb_dst_set_noref(skb, &rt->dst);
+ else
skb_dst_set(skb, &rt->dst);
return local;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5cd610b547e0..0af1898af2b8 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -573,7 +573,6 @@ EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
void nf_ct_tmpl_free(struct nf_conn *tmpl)
{
nf_ct_ext_destroy(tmpl);
- nf_ct_ext_free(tmpl);
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
kfree((char *)tmpl - tmpl->proto.tmpl_padto);
@@ -1417,7 +1416,6 @@ void nf_conntrack_free(struct nf_conn *ct)
WARN_ON(atomic_read(&ct->ct_general.use) != 0);
nf_ct_ext_destroy(ct);
- nf_ct_ext_free(ct);
kmem_cache_free(nf_conntrack_cachep, ct);
smp_mb__before_atomic();
atomic_dec(&net->ct.count);
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 6fba74b5aaf7..7956c9f19899 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -30,6 +30,7 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
#define ECACHE_RETRY_WAIT (HZ/10)
+#define ECACHE_STACK_ALLOC (256 / sizeof(void *))
enum retry_state {
STATE_CONGESTED,
@@ -39,11 +40,11 @@ enum retry_state {
static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
{
- struct nf_conn *refs[16];
+ struct nf_conn *refs[ECACHE_STACK_ALLOC];
+ enum retry_state ret = STATE_DONE;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int evicted = 0;
- enum retry_state ret = STATE_DONE;
spin_lock(&pcpu->lock);
@@ -54,10 +55,22 @@ static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
if (!nf_ct_is_confirmed(ct))
continue;
+ /* This ecache access is safe because the ct is on the
+ * pcpu dying list and we hold the spinlock -- the entry
+ * cannot be free'd until after the lock is released.
+ *
+ * This is true even if ct has a refcount of 0: the
+ * cpu that is about to free the entry must remove it
+ * from the dying list and needs the lock to do so.
+ */
e = nf_ct_ecache_find(ct);
if (!e || e->state != NFCT_ECACHE_DESTROY_FAIL)
continue;
+ /* ct is in NFCT_ECACHE_DESTROY_FAIL state, this means
+ * the worker owns this entry: the ct will remain valid
+ * until the worker puts its ct reference.
+ */
if (nf_conntrack_event(IPCT_DESTROY, ct)) {
ret = STATE_CONGESTED;
break;
@@ -189,15 +202,15 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
if (notify == NULL)
goto out_unlock;
+ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct))
+ goto out_unlock;
+
e = nf_ct_ecache_find(ct);
if (e == NULL)
goto out_unlock;
events = xchg(&e->cache, 0);
- if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct))
- goto out_unlock;
-
/* We make a copy of the missed event cache without taking
* the lock, thus we may send missed events twice. However,
* this does not harm and it happens very rarely. */
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index d4ed1e197921..c24e5b64b00c 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -34,21 +34,24 @@ void nf_ct_ext_destroy(struct nf_conn *ct)
t->destroy(ct);
rcu_read_unlock();
}
+
+ kfree(ct->ext);
}
EXPORT_SYMBOL(nf_ct_ext_destroy);
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
{
unsigned int newlen, newoff, oldlen, alloc;
- struct nf_ct_ext *old, *new;
struct nf_ct_ext_type *t;
+ struct nf_ct_ext *new;
/* Conntrack must not be confirmed to avoid races on reallocation. */
WARN_ON(nf_ct_is_confirmed(ct));
- old = ct->ext;
- if (old) {
+ if (ct->ext) {
+ const struct nf_ct_ext *old = ct->ext;
+
if (__nf_ct_ext_exist(old, id))
return NULL;
oldlen = old->len;
@@ -68,22 +71,18 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
rcu_read_unlock();
alloc = max(newlen, NF_CT_EXT_PREALLOC);
- kmemleak_not_leak(old);
- new = __krealloc(old, alloc, gfp);
+ new = krealloc(ct->ext, alloc, gfp);
if (!new)
return NULL;
- if (!old) {
+ if (!ct->ext)
memset(new->offset, 0, sizeof(new->offset));
- ct->ext = new;
- } else if (new != old) {
- kfree_rcu(old, rcu);
- rcu_assign_pointer(ct->ext, new);
- }
new->offset[id] = newoff;
new->len = newlen;
memset((void *)new + newoff, 0, newlen - newoff);
+
+ ct->ext = new;
return (void *)new + newoff;
}
EXPORT_SYMBOL(nf_ct_ext_add);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index e2d13cd18875..d8d33ef52ce0 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -506,9 +506,45 @@ nla_put_failure:
return -1;
}
+/* all these functions access ct->ext. Caller must either hold a reference
+ * on ct or prevent its deletion by holding either the bucket spinlock or
+ * pcpu dying list lock.
+ */
+static int ctnetlink_dump_extinfo(struct sk_buff *skb,
+ struct nf_conn *ct, u32 type)
+{
+ if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0 ||
+ ctnetlink_dump_helpinfo(skb, ct) < 0 ||
+ ctnetlink_dump_labels(skb, ct) < 0 ||
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
+ ctnetlink_dump_ct_synproxy(skb, ct) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
+{
+ if (ctnetlink_dump_status(skb, ct) < 0 ||
+ ctnetlink_dump_mark(skb, ct) < 0 ||
+ ctnetlink_dump_secctx(skb, ct) < 0 ||
+ ctnetlink_dump_id(skb, ct) < 0 ||
+ ctnetlink_dump_use(skb, ct) < 0 ||
+ ctnetlink_dump_master(skb, ct) < 0)
+ return -1;
+
+ if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
+ (ctnetlink_dump_timeout(skb, ct) < 0 ||
+ ctnetlink_dump_protoinfo(skb, ct) < 0))
+ return -1;
+
+ return 0;
+}
+
static int
ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
- struct nf_conn *ct)
+ struct nf_conn *ct, bool extinfo)
{
const struct nf_conntrack_zone *zone;
struct nlmsghdr *nlh;
@@ -552,23 +588,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
- if (ctnetlink_dump_status(skb, ct) < 0 ||
- ctnetlink_dump_acct(skb, ct, type) < 0 ||
- ctnetlink_dump_timestamp(skb, ct) < 0 ||
- ctnetlink_dump_helpinfo(skb, ct) < 0 ||
- ctnetlink_dump_mark(skb, ct) < 0 ||
- ctnetlink_dump_secctx(skb, ct) < 0 ||
- ctnetlink_dump_labels(skb, ct) < 0 ||
- ctnetlink_dump_id(skb, ct) < 0 ||
- ctnetlink_dump_use(skb, ct) < 0 ||
- ctnetlink_dump_master(skb, ct) < 0 ||
- ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
- ctnetlink_dump_ct_synproxy(skb, ct) < 0)
+ if (ctnetlink_dump_info(skb, ct) < 0)
goto nla_put_failure;
-
- if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
- (ctnetlink_dump_timeout(skb, ct) < 0 ||
- ctnetlink_dump_protoinfo(skb, ct) < 0))
+ if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -953,13 +975,11 @@ restart:
if (!ctnetlink_filter_match(ct, cb->data))
continue;
- rcu_read_lock();
res =
ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
- ct);
- rcu_read_unlock();
+ ct, true);
if (res < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
@@ -1364,10 +1384,8 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
return -ENOMEM;
}
- rcu_read_lock();
err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
- NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
- rcu_read_unlock();
+ NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true);
nf_ct_put(ct);
if (err <= 0)
goto free;
@@ -1429,12 +1447,18 @@ restart:
continue;
cb->args[1] = 0;
}
- rcu_read_lock();
+
+ /* We can't dump extension info for the unconfirmed
+ * list because unconfirmed conntracks can have
+ * ct->ext reallocated (and thus freed).
+ *
+ * In the dying list case ct->ext can't be free'd
+ * until after we drop pcpu->lock.
+ */
res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
- ct);
- rcu_read_unlock();
+ ct, dying ? true : false);
if (res < 0) {
if (!atomic_inc_not_zero(&ct->ct_general.use))
continue;
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 097deba7441a..c2e3dff773bc 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -235,11 +235,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
}
/* Need to track icmp error message? */
- if (icmph->type != ICMP_DEST_UNREACH &&
- icmph->type != ICMP_SOURCE_QUENCH &&
- icmph->type != ICMP_TIME_EXCEEDED &&
- icmph->type != ICMP_PARAMETERPROB &&
- icmph->type != ICMP_REDIRECT)
+ if (!icmp_is_err(icmph->type))
return NF_ACCEPT;
memset(&outer_daddr, 0, sizeof(outer_daddr));
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 128245efe84a..9889d52eda82 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -14,24 +14,15 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
-struct flow_offload_entry {
- struct flow_offload flow;
- struct nf_conn *ct;
- struct rcu_head rcu_head;
-};
-
static DEFINE_MUTEX(flowtable_lock);
static LIST_HEAD(flowtables);
static void
-flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
- struct nf_flow_route *route,
+flow_offload_fill_dir(struct flow_offload *flow,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
- struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
- struct dst_entry *other_dst = route->tuple[!dir].dst;
- struct dst_entry *dst = route->tuple[dir].dst;
+ struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
ft->dir = dir;
@@ -39,12 +30,10 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
case NFPROTO_IPV4:
ft->src_v4 = ctt->src.u3.in;
ft->dst_v4 = ctt->dst.u3.in;
- ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
break;
case NFPROTO_IPV6:
ft->src_v6 = ctt->src.u3.in6;
ft->dst_v6 = ctt->dst.u3.in6;
- ft->mtu = ip6_dst_mtu_forward(dst);
break;
}
@@ -52,37 +41,24 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
ft->l4proto = ctt->dst.protonum;
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
-
- ft->iifidx = other_dst->dev->ifindex;
- ft->dst_cache = dst;
}
-struct flow_offload *
-flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
{
- struct flow_offload_entry *entry;
struct flow_offload *flow;
if (unlikely(nf_ct_is_dying(ct) ||
!atomic_inc_not_zero(&ct->ct_general.use)))
return NULL;
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
goto err_ct_refcnt;
- flow = &entry->flow;
+ flow->ct = ct;
- if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
- goto err_dst_cache_original;
-
- if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
- goto err_dst_cache_reply;
-
- entry->ct = ct;
-
- flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
- flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
+ flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+ flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
if (ct->status & IPS_SRC_NAT)
flow->flags |= FLOW_OFFLOAD_SNAT;
@@ -91,10 +67,6 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
return flow;
-err_dst_cache_reply:
- dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
-err_dst_cache_original:
- kfree(entry);
err_ct_refcnt:
nf_ct_put(ct);
@@ -102,6 +74,56 @@ err_ct_refcnt:
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
+static int flow_offload_fill_route(struct flow_offload *flow,
+ const struct nf_flow_route *route,
+ enum flow_offload_tuple_dir dir)
+{
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+ if (!dst_hold_safe(route->tuple[dir].dst))
+ return -1;
+
+ switch (flow_tuple->l3proto) {
+ case NFPROTO_IPV4:
+ flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
+ break;
+ case NFPROTO_IPV6:
+ flow_tuple->mtu = ip6_dst_mtu_forward(dst);
+ break;
+ }
+
+ flow_tuple->iifidx = other_dst->dev->ifindex;
+ flow_tuple->dst_cache = dst;
+
+ return 0;
+}
+
+int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route)
+{
+ int err;
+
+ err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
+ if (err < 0)
+ return err;
+
+ err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+ if (err < 0)
+ goto err_route_reply;
+
+ flow->type = NF_FLOW_OFFLOAD_ROUTE;
+
+ return 0;
+
+err_route_reply:
+ dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(flow_offload_route_init);
+
static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
{
tcp->state = TCP_CONNTRACK_ESTABLISHED;
@@ -150,17 +172,25 @@ static void flow_offload_fixup_ct(struct nf_conn *ct)
flow_offload_fixup_ct_timeout(ct);
}
-void flow_offload_free(struct flow_offload *flow)
+static void flow_offload_route_release(struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
- e = container_of(flow, struct flow_offload_entry, flow);
+}
+
+void flow_offload_free(struct flow_offload *flow)
+{
+ switch (flow->type) {
+ case NF_FLOW_OFFLOAD_ROUTE:
+ flow_offload_route_release(flow);
+ break;
+ default:
+ break;
+ }
if (flow->flags & FLOW_OFFLOAD_DYING)
- nf_ct_delete(e->ct, 0, 0);
- nf_ct_put(e->ct);
- kfree_rcu(e, rcu_head);
+ nf_ct_delete(flow->ct, 0, 0);
+ nf_ct_put(flow->ct);
+ kfree_rcu(flow, rcu_head);
}
EXPORT_SYMBOL_GPL(flow_offload_free);
@@ -220,6 +250,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
return err;
}
+ if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
+ nf_flow_offload_add(flow_table, flow);
+
return 0;
}
EXPORT_SYMBOL_GPL(flow_offload_add);
@@ -232,8 +265,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
nf_flow_offload_rhash_params);
@@ -241,25 +272,21 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
nf_flow_offload_rhash_params);
- e = container_of(flow, struct flow_offload_entry, flow);
- clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+ clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
if (nf_flow_has_expired(flow))
- flow_offload_fixup_ct(e->ct);
+ flow_offload_fixup_ct(flow->ct);
else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
- flow_offload_fixup_ct_timeout(e->ct);
+ flow_offload_fixup_ct_timeout(flow->ct);
flow_offload_free(flow);
}
void flow_offload_teardown(struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
flow->flags |= FLOW_OFFLOAD_TEARDOWN;
- e = container_of(flow, struct flow_offload_entry, flow);
- flow_offload_fixup_ct_state(e->ct);
+ flow_offload_fixup_ct_state(flow->ct);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);
@@ -269,7 +296,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
{
struct flow_offload_tuple_rhash *tuplehash;
struct flow_offload *flow;
- struct flow_offload_entry *e;
int dir;
tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
@@ -282,8 +308,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
return NULL;
- e = container_of(flow, struct flow_offload_entry, flow);
- if (unlikely(nf_ct_is_dying(e->ct)))
+ if (unlikely(nf_ct_is_dying(flow->ct)))
return NULL;
return tuplehash;
@@ -327,12 +352,21 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
- struct flow_offload_entry *e;
- e = container_of(flow, struct flow_offload_entry, flow);
- if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
- (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
- flow_offload_del(flow_table, flow);
+ if (flow->flags & FLOW_OFFLOAD_HW)
+ nf_flow_offload_stats(flow_table, flow);
+
+ if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
+ (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
+ if (flow->flags & FLOW_OFFLOAD_HW) {
+ if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
+ nf_flow_offload_del(flow_table, flow);
+ else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
+ flow_offload_del(flow_table, flow);
+ } else {
+ flow_offload_del(flow_table, flow);
+ }
+ }
}
static void nf_flow_offload_work_gc(struct work_struct *work)
@@ -465,6 +499,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
int err;
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+ flow_block_init(&flowtable->flow_block);
err = rhashtable_init(&flowtable->rhashtable,
&nf_flow_offload_rhash_params);
@@ -485,15 +520,13 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
{
struct net_device *dev = data;
- struct flow_offload_entry *e;
-
- e = container_of(flow, struct flow_offload_entry, flow);
if (!dev) {
flow_offload_teardown(flow);
return;
}
- if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
+
+ if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
(flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
flow->tuplehash[1].tuple.iifidx == dev->ifindex))
flow_offload_dead(flow);
@@ -502,6 +535,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev)
{
+ nf_flow_table_offload_flush(flowtable);
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
}
@@ -529,5 +563,18 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
+static int __init nf_flow_table_module_init(void)
+{
+ return nf_flow_table_offload_init();
+}
+
+static void __exit nf_flow_table_module_exit(void)
+{
+ nf_flow_table_offload_exit();
+}
+
+module_init(nf_flow_table_module_init);
+module_exit(nf_flow_table_module_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 593357aedb36..88bedf1ff1ae 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -21,9 +21,34 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
}
+static int nf_flow_rule_route_inet(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+ int err;
+
+ switch (flow_tuple->l3proto) {
+ case NFPROTO_IPV4:
+ err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule);
+ break;
+ case NFPROTO_IPV6:
+ err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule);
+ break;
+ default:
+ err = -1;
+ break;
+ }
+
+ return err;
+}
+
static struct nf_flowtable_type flowtable_inet = {
.family = NFPROTO_INET,
.init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_inet,
.free = nf_flow_table_free,
.hook = nf_flow_offload_inet_hook,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
new file mode 100644
index 000000000000..c54c9a6cc981
--- /dev/null
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -0,0 +1,851 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <linux/tc_act/tc_csum.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+static struct work_struct nf_flow_offload_work;
+static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
+static LIST_HEAD(flow_offload_pending_list);
+
+struct flow_offload_work {
+ struct list_head list;
+ enum flow_cls_command cmd;
+ int priority;
+ struct nf_flowtable *flowtable;
+ struct flow_offload *flow;
+};
+
+struct nf_flow_key {
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ };
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nf_flow_match {
+ struct flow_dissector dissector;
+ struct nf_flow_key key;
+ struct nf_flow_key mask;
+};
+
+struct nf_flow_rule {
+ struct nf_flow_match match;
+ struct flow_rule *rule;
+};
+
+#define NF_FLOW_DISSECTOR(__match, __type, __field) \
+ (__match)->dissector.offset[__type] = \
+ offsetof(struct nf_flow_key, __field)
+
+static int nf_flow_rule_match(struct nf_flow_match *match,
+ const struct flow_offload_tuple *tuple)
+{
+ struct nf_flow_key *mask = &match->mask;
+ struct nf_flow_key *key = &match->key;
+
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
+
+ switch (tuple->l3proto) {
+ case AF_INET:
+ key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ key->basic.n_proto = htons(ETH_P_IP);
+ key->ipv4.src = tuple->src_v4.s_addr;
+ mask->ipv4.src = 0xffffffff;
+ key->ipv4.dst = tuple->dst_v4.s_addr;
+ mask->ipv4.dst = 0xffffffff;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ mask->basic.n_proto = 0xffff;
+
+ switch (tuple->l4proto) {
+ case IPPROTO_TCP:
+ key->tcp.flags = 0;
+ mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN;
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ key->basic.ip_proto = tuple->l4proto;
+ mask->basic.ip_proto = 0xff;
+
+ key->tp.src = tuple->src_port;
+ mask->tp.src = 0xffff;
+ key->tp.dst = tuple->dst_port;
+ mask->tp.dst = 0xffff;
+
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ return 0;
+}
+
+static void flow_offload_mangle(struct flow_action_entry *entry,
+ enum flow_action_mangle_base htype,
+ u32 offset, u8 *value, u8 *mask)
+{
+ entry->id = FLOW_ACTION_MANGLE;
+ entry->mangle.htype = htype;
+ entry->mangle.offset = offset;
+ memcpy(&entry->mangle.mask, mask, sizeof(u32));
+ memcpy(&entry->mangle.val, value, sizeof(u32));
+}
+
+static inline struct flow_action_entry *
+flow_action_entry_next(struct nf_flow_rule *flow_rule)
+{
+ int i = flow_rule->rule->action.num_entries++;
+
+ return &flow_rule->rule->action.entries[i];
+}
+
+static int flow_offload_eth_src(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+ struct net_device *dev;
+ u32 mask, val;
+ u16 val16;
+
+ dev = dev_get_by_index(net, tuple->iifidx);
+ if (!dev)
+ return -ENOENT;
+
+ mask = ~0xffff0000;
+ memcpy(&val16, dev->dev_addr, 2);
+ val = val16 << 16;
+ flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ (u8 *)&val, (u8 *)&mask);
+
+ mask = ~0xffffffff;
+ memcpy(&val, dev->dev_addr + 2, 4);
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
+ (u8 *)&val, (u8 *)&mask);
+ dev_put(dev);
+
+ return 0;
+}
+
+static int flow_offload_eth_dst(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+ struct neighbour *n;
+ u32 mask, val;
+ u16 val16;
+
+ n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
+ if (!n)
+ return -ENOENT;
+
+ mask = ~0xffffffff;
+ memcpy(&val, n->ha, 4);
+ flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
+ (u8 *)&val, (u8 *)&mask);
+
+ mask = ~0x0000ffff;
+ memcpy(&val16, n->ha + 4, 2);
+ val = val16;
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ (u8 *)&val, (u8 *)&mask);
+ neigh_release(n);
+
+ return 0;
+}
+
+static void flow_offload_ipv4_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffffffff);
+ __be32 addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
+ offset = offsetof(struct iphdr, saddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
+ offset = offsetof(struct iphdr, daddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
+ (u8 *)&addr, (u8 *)&mask);
+}
+
+static void flow_offload_ipv4_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffffffff);
+ __be32 addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
+ offset = offsetof(struct iphdr, daddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
+ offset = offsetof(struct iphdr, saddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
+ (u8 *)&addr, (u8 *)&mask);
+}
+
+static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
+ unsigned int offset,
+ u8 *addr, u8 *mask)
+{
+ struct flow_action_entry *entry;
+ int i;
+
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
+ entry = flow_action_entry_next(flow_rule);
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ offset + i,
+ &addr[i], mask);
+ }
+}
+
+static void flow_offload_ipv6_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ u32 mask = ~htonl(0xffffffff);
+ const u8 *addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, saddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, daddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
+}
+
+static void flow_offload_ipv6_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ u32 mask = ~htonl(0xffffffff);
+ const u8 *addr;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, daddr);
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr;
+ offset = offsetof(struct ipv6hdr, saddr);
+ break;
+ default:
+ return;
+ }
+
+ flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
+}
+
+static int flow_offload_l4proto(const struct flow_offload *flow)
+{
+ u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
+ u8 type = 0;
+
+ switch (protonum) {
+ case IPPROTO_TCP:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
+ break;
+ case IPPROTO_UDP:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
+ break;
+ default:
+ break;
+ }
+
+ return type;
+}
+
+static void flow_offload_port_snat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffff0000);
+ __be16 port;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+ offset = 0; /* offsetof(struct tcphdr, source); */
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+ offset = 0; /* offsetof(struct tcphdr, dest); */
+ break;
+ default:
+ break;
+ }
+
+ flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
+ (u8 *)&port, (u8 *)&mask);
+}
+
+static void flow_offload_port_dnat(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ u32 mask = ~htonl(0xffff);
+ __be16 port;
+ u32 offset;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+ offset = 0; /* offsetof(struct tcphdr, source); */
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+ offset = 0; /* offsetof(struct tcphdr, dest); */
+ break;
+ default:
+ break;
+ }
+
+ flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
+ (u8 *)&port, (u8 *)&mask);
+}
+
+static void flow_offload_ipv4_checksum(struct net *net,
+ const struct flow_offload *flow,
+ struct nf_flow_rule *flow_rule)
+{
+ u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+
+ entry->id = FLOW_ACTION_CSUM;
+ entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
+
+ switch (protonum) {
+ case IPPROTO_TCP:
+ entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
+ break;
+ case IPPROTO_UDP:
+ entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ }
+}
+
+static void flow_offload_redirect(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ struct rtable *rt;
+
+ rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+ entry->id = FLOW_ACTION_REDIRECT;
+ entry->dev = rt->dst.dev;
+ dev_hold(rt->dst.dev);
+}
+
+int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
+ return -1;
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ flow_offload_ipv4_snat(net, flow, dir, flow_rule);
+ flow_offload_port_snat(net, flow, dir, flow_rule);
+ }
+ if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
+ flow_offload_port_dnat(net, flow, dir, flow_rule);
+ }
+ if (flow->flags & FLOW_OFFLOAD_SNAT ||
+ flow->flags & FLOW_OFFLOAD_DNAT)
+ flow_offload_ipv4_checksum(net, flow, flow_rule);
+
+ flow_offload_redirect(flow, dir, flow_rule);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
+
+int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
+ flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
+ return -1;
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ flow_offload_ipv6_snat(net, flow, dir, flow_rule);
+ flow_offload_port_snat(net, flow, dir, flow_rule);
+ }
+ if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
+ flow_offload_port_dnat(net, flow, dir, flow_rule);
+ }
+
+ flow_offload_redirect(flow, dir, flow_rule);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
+
+#define NF_FLOW_RULE_ACTION_MAX 16
+
+static struct nf_flow_rule *
+nf_flow_offload_rule_alloc(struct net *net,
+ const struct flow_offload_work *offload,
+ enum flow_offload_tuple_dir dir)
+{
+ const struct nf_flowtable *flowtable = offload->flowtable;
+ const struct flow_offload *flow = offload->flow;
+ const struct flow_offload_tuple *tuple;
+ struct nf_flow_rule *flow_rule;
+ int err = -ENOMEM;
+
+ flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
+ if (!flow_rule)
+ goto err_flow;
+
+ flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
+ if (!flow_rule->rule)
+ goto err_flow_rule;
+
+ flow_rule->rule->match.dissector = &flow_rule->match.dissector;
+ flow_rule->rule->match.mask = &flow_rule->match.mask;
+ flow_rule->rule->match.key = &flow_rule->match.key;
+
+ tuple = &flow->tuplehash[dir].tuple;
+ err = nf_flow_rule_match(&flow_rule->match, tuple);
+ if (err < 0)
+ goto err_flow_match;
+
+ flow_rule->rule->action.num_entries = 0;
+ if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
+ goto err_flow_match;
+
+ return flow_rule;
+
+err_flow_match:
+ kfree(flow_rule->rule);
+err_flow_rule:
+ kfree(flow_rule);
+err_flow:
+ return NULL;
+}
+
+static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry;
+ int i;
+
+ for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
+ entry = &flow_rule->rule->action.entries[i];
+ if (entry->id != FLOW_ACTION_REDIRECT)
+ continue;
+
+ dev_put(entry->dev);
+ }
+ kfree(flow_rule->rule);
+ kfree(flow_rule);
+}
+
+static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
+{
+ int i;
+
+ for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
+ __nf_flow_offload_destroy(flow_rule[i]);
+}
+
+static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule[])
+{
+ struct net *net = read_pnet(&offload->flowtable->net);
+
+ flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
+ FLOW_OFFLOAD_DIR_ORIGINAL);
+ if (!flow_rule[0])
+ return -ENOMEM;
+
+ flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
+ FLOW_OFFLOAD_DIR_REPLY);
+ if (!flow_rule[1]) {
+ __nf_flow_offload_destroy(flow_rule[0]);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
+ __be16 proto, int priority,
+ enum flow_cls_command cmd,
+ const struct flow_offload_tuple *tuple,
+ struct netlink_ext_ack *extack)
+{
+ cls_flow->common.protocol = proto;
+ cls_flow->common.prio = priority;
+ cls_flow->common.extack = extack;
+ cls_flow->command = cmd;
+ cls_flow->cookie = (unsigned long)tuple;
+}
+
+static int flow_offload_tuple_add(struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule,
+ enum flow_offload_tuple_dir dir)
+{
+ struct nf_flowtable *flowtable = offload->flowtable;
+ struct flow_cls_offload cls_flow = {};
+ struct flow_block_cb *block_cb;
+ struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+ int err, i = 0;
+
+ nf_flow_offload_init(&cls_flow, proto, offload->priority,
+ FLOW_CLS_REPLACE,
+ &offload->flow->tuplehash[dir].tuple, &extack);
+ cls_flow.rule = flow_rule->rule;
+
+ list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
+ err = block_cb->cb(TC_SETUP_FT, &cls_flow,
+ block_cb->cb_priv);
+ if (err < 0)
+ continue;
+
+ i++;
+ }
+
+ return i;
+}
+
+static void flow_offload_tuple_del(struct flow_offload_work *offload,
+ enum flow_offload_tuple_dir dir)
+{
+ struct nf_flowtable *flowtable = offload->flowtable;
+ struct flow_cls_offload cls_flow = {};
+ struct flow_block_cb *block_cb;
+ struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+
+ nf_flow_offload_init(&cls_flow, proto, offload->priority,
+ FLOW_CLS_DESTROY,
+ &offload->flow->tuplehash[dir].tuple, &extack);
+
+ list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
+ block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
+
+ offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
+}
+
+static int flow_offload_rule_add(struct flow_offload_work *offload,
+ struct nf_flow_rule *flow_rule[])
+{
+ int ok_count = 0;
+
+ ok_count += flow_offload_tuple_add(offload, flow_rule[0],
+ FLOW_OFFLOAD_DIR_ORIGINAL);
+ ok_count += flow_offload_tuple_add(offload, flow_rule[1],
+ FLOW_OFFLOAD_DIR_REPLY);
+ if (ok_count == 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int flow_offload_work_add(struct flow_offload_work *offload)
+{
+ struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
+ int err;
+
+ err = nf_flow_offload_alloc(offload, flow_rule);
+ if (err < 0)
+ return -ENOMEM;
+
+ err = flow_offload_rule_add(offload, flow_rule);
+
+ nf_flow_offload_destroy(flow_rule);
+
+ return err;
+}
+
+static void flow_offload_work_del(struct flow_offload_work *offload)
+{
+ flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
+ flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+}
+
+static void flow_offload_tuple_stats(struct flow_offload_work *offload,
+ enum flow_offload_tuple_dir dir,
+ struct flow_stats *stats)
+{
+ struct nf_flowtable *flowtable = offload->flowtable;
+ struct flow_cls_offload cls_flow = {};
+ struct flow_block_cb *block_cb;
+ struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+
+ nf_flow_offload_init(&cls_flow, proto, offload->priority,
+ FLOW_CLS_STATS,
+ &offload->flow->tuplehash[dir].tuple, &extack);
+
+ list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
+ block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
+ memcpy(stats, &cls_flow.stats, sizeof(*stats));
+}
+
+static void flow_offload_work_stats(struct flow_offload_work *offload)
+{
+ struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
+ u64 lastused;
+
+ flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
+ flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
+
+ lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
+ offload->flow->timeout = max_t(u64, offload->flow->timeout,
+ lastused + NF_FLOW_TIMEOUT);
+}
+
+static void flow_offload_work_handler(struct work_struct *work)
+{
+ struct flow_offload_work *offload, *next;
+ LIST_HEAD(offload_pending_list);
+ int ret;
+
+ spin_lock_bh(&flow_offload_pending_list_lock);
+ list_replace_init(&flow_offload_pending_list, &offload_pending_list);
+ spin_unlock_bh(&flow_offload_pending_list_lock);
+
+ list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
+ switch (offload->cmd) {
+ case FLOW_CLS_REPLACE:
+ ret = flow_offload_work_add(offload);
+ if (ret < 0)
+ offload->flow->flags &= ~FLOW_OFFLOAD_HW;
+ break;
+ case FLOW_CLS_DESTROY:
+ flow_offload_work_del(offload);
+ break;
+ case FLOW_CLS_STATS:
+ flow_offload_work_stats(offload);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ list_del(&offload->list);
+ kfree(offload);
+ }
+}
+
+static void flow_offload_queue_work(struct flow_offload_work *offload)
+{
+ spin_lock_bh(&flow_offload_pending_list_lock);
+ list_add_tail(&offload->list, &flow_offload_pending_list);
+ spin_unlock_bh(&flow_offload_pending_list_lock);
+
+ schedule_work(&nf_flow_offload_work);
+}
+
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+
+ offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->cmd = FLOW_CLS_REPLACE;
+ offload->flow = flow;
+ offload->priority = flowtable->priority;
+ offload->flowtable = flowtable;
+ flow->flags |= FLOW_OFFLOAD_HW;
+
+ flow_offload_queue_work(offload);
+}
+
+void nf_flow_offload_del(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+
+ offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->cmd = FLOW_CLS_DESTROY;
+ offload->flow = flow;
+ offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
+ offload->flowtable = flowtable;
+
+ flow_offload_queue_work(offload);
+}
+
+void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow)
+{
+ struct flow_offload_work *offload;
+ s64 delta;
+
+ delta = flow->timeout - jiffies;
+ if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
+ flow->flags & FLOW_OFFLOAD_HW_DYING)
+ return;
+
+ offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->cmd = FLOW_CLS_STATS;
+ offload->flow = flow;
+ offload->flowtable = flowtable;
+
+ flow_offload_queue_work(offload);
+}
+
+void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
+{
+ if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
+ flush_work(&nf_flow_offload_work);
+}
+
+static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
+ struct flow_block_offload *bo,
+ enum flow_block_command cmd)
+{
+ struct flow_block_cb *block_cb, *next;
+ int err = 0;
+
+ switch (cmd) {
+ case FLOW_BLOCK_BIND:
+ list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd)
+{
+ struct netlink_ext_ack extack = {};
+ struct flow_block_offload bo = {};
+ int err;
+
+ if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
+ return 0;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ return -EOPNOTSUPP;
+
+ bo.net = dev_net(dev);
+ bo.block = &flowtable->flow_block;
+ bo.command = cmd;
+ bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
+ bo.extack = &extack;
+ INIT_LIST_HEAD(&bo.cb_list);
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+ if (err < 0)
+ return err;
+
+ return nf_flow_table_block_setup(flowtable, &bo, cmd);
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
+
+int nf_flow_table_offload_init(void)
+{
+ INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
+
+ return 0;
+}
+
+void nf_flow_table_offload_exit(void)
+{
+ struct flow_offload_work *offload, *next;
+ LIST_HEAD(offload_pending_list);
+
+ cancel_work_sync(&nf_flow_offload_work);
+
+ list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
+ list_del(&offload->list);
+ kfree(offload);
+ }
+}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 712a428509ad..062b73a83af0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -151,11 +151,64 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
}
}
+static int nft_netdev_register_hooks(struct net *net,
+ struct list_head *hook_list)
+{
+ struct nft_hook *hook;
+ int err, j;
+
+ j = 0;
+ list_for_each_entry(hook, hook_list, list) {
+ err = nf_register_net_hook(net, &hook->ops);
+ if (err < 0)
+ goto err_register;
+
+ j++;
+ }
+ return 0;
+
+err_register:
+ list_for_each_entry(hook, hook_list, list) {
+ if (j-- <= 0)
+ break;
+
+ nf_unregister_net_hook(net, &hook->ops);
+ }
+ return err;
+}
+
+static void nft_netdev_unregister_hooks(struct net *net,
+ struct list_head *hook_list)
+{
+ struct nft_hook *hook;
+
+ list_for_each_entry(hook, hook_list, list)
+ nf_unregister_net_hook(net, &hook->ops);
+}
+
+static int nft_register_basechain_hooks(struct net *net, int family,
+ struct nft_base_chain *basechain)
+{
+ if (family == NFPROTO_NETDEV)
+ return nft_netdev_register_hooks(net, &basechain->hook_list);
+
+ return nf_register_net_hook(net, &basechain->ops);
+}
+
+static void nft_unregister_basechain_hooks(struct net *net, int family,
+ struct nft_base_chain *basechain)
+{
+ if (family == NFPROTO_NETDEV)
+ nft_netdev_unregister_hooks(net, &basechain->hook_list);
+ else
+ nf_unregister_net_hook(net, &basechain->ops);
+}
+
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
{
- const struct nft_base_chain *basechain;
+ struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
if (table->flags & NFT_TABLE_F_DORMANT ||
@@ -168,14 +221,14 @@ static int nf_tables_register_hook(struct net *net,
if (basechain->type->ops_register)
return basechain->type->ops_register(net, ops);
- return nf_register_net_hook(net, ops);
+ return nft_register_basechain_hooks(net, table->family, basechain);
}
static void nf_tables_unregister_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
{
- const struct nft_base_chain *basechain;
+ struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
if (table->flags & NFT_TABLE_F_DORMANT ||
@@ -187,7 +240,7 @@ static void nf_tables_unregister_hook(struct net *net,
if (basechain->type->ops_unregister)
return basechain->type->ops_unregister(net, ops);
- nf_unregister_net_hook(net, ops);
+ nft_unregister_basechain_hooks(net, table->family, basechain);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -308,6 +361,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
{
+ struct nft_flow_rule *flow;
struct nft_trans *trans;
int err;
@@ -315,6 +369,16 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
if (trans == NULL)
return -ENOMEM;
+ if (ctx->chain->flags & NFT_CHAIN_HW_OFFLOAD) {
+ flow = nft_flow_rule_create(ctx->net, rule);
+ if (IS_ERR(flow)) {
+ nft_trans_destroy(trans);
+ return PTR_ERR(flow);
+ }
+
+ nft_trans_flow_rule(trans) = flow;
+ }
+
err = nf_tables_delrule_deactivate(ctx, rule);
if (err < 0) {
nft_trans_destroy(trans);
@@ -742,7 +806,8 @@ static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
if (cnt && i++ == cnt)
break;
- nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
+ nft_unregister_basechain_hooks(net, table->family,
+ nft_base_chain(chain));
}
}
@@ -757,14 +822,16 @@ static int nf_tables_table_enable(struct net *net, struct nft_table *table)
if (!nft_is_base_chain(chain))
continue;
- err = nf_register_net_hook(net, &nft_base_chain(chain)->ops);
+ err = nft_register_basechain_hooks(net, table->family,
+ nft_base_chain(chain));
if (err < 0)
- goto err;
+ goto err_register_hooks;
i++;
}
return 0;
-err:
+
+err_register_hooks:
if (i)
nft_table_disable(net, table, i);
return err;
@@ -1225,6 +1292,46 @@ nla_put_failure:
return -ENOSPC;
}
+static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
+ const struct nft_base_chain *basechain)
+{
+ const struct nf_hook_ops *ops = &basechain->ops;
+ struct nft_hook *hook, *first = NULL;
+ struct nlattr *nest, *nest_devs;
+ int n = 0;
+
+ nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK);
+ if (nest == NULL)
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
+ goto nla_put_failure;
+
+ if (family == NFPROTO_NETDEV) {
+ nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS);
+ list_for_each_entry(hook, &basechain->hook_list, list) {
+ if (!first)
+ first = hook;
+
+ if (nla_put_string(skb, NFTA_DEVICE_NAME,
+ hook->ops.dev->name))
+ goto nla_put_failure;
+ n++;
+ }
+ nla_nest_end(skb, nest_devs);
+
+ if (n == 1 &&
+ nla_put_string(skb, NFTA_HOOK_DEV, first->ops.dev->name))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
+
+ return 0;
+nla_put_failure:
+ return -1;
+}
+
static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq, int event, u32 flags,
int family, const struct nft_table *table,
@@ -1253,21 +1360,10 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nft_is_base_chain(chain)) {
const struct nft_base_chain *basechain = nft_base_chain(chain);
- const struct nf_hook_ops *ops = &basechain->ops;
struct nft_stats __percpu *stats;
- struct nlattr *nest;
- nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK);
- if (nest == NULL)
- goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
+ if (nft_dump_basechain_hook(skb, family, basechain))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
- goto nla_put_failure;
- if (basechain->dev_name[0] &&
- nla_put_string(skb, NFTA_HOOK_DEV, basechain->dev_name))
- goto nla_put_failure;
- nla_nest_end(skb, nest);
if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
htonl(basechain->policy)))
@@ -1461,8 +1557,9 @@ static void nft_chain_stats_replace(struct nft_trans *trans)
if (!nft_trans_chain_stats(trans))
return;
- rcu_swap_protected(chain->stats, nft_trans_chain_stats(trans),
- lockdep_commit_lock_is_held(trans->ctx.net));
+ nft_trans_chain_stats(trans) =
+ rcu_replace_pointer(chain->stats, nft_trans_chain_stats(trans),
+ lockdep_commit_lock_is_held(trans->ctx.net));
if (!nft_trans_chain_stats(trans))
static_branch_inc(&nft_counters_enabled);
@@ -1485,6 +1582,7 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
static void nf_tables_chain_destroy(struct nft_ctx *ctx)
{
struct nft_chain *chain = ctx->chain;
+ struct nft_hook *hook, *next;
if (WARN_ON(chain->use > 0))
return;
@@ -1495,6 +1593,13 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain = nft_base_chain(chain);
+ if (ctx->family == NFPROTO_NETDEV) {
+ list_for_each_entry_safe(hook, next,
+ &basechain->hook_list, list) {
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
+ }
+ }
module_put(basechain->type->owner);
if (rcu_access_pointer(basechain->stats)) {
static_branch_dec(&nft_counters_enabled);
@@ -1508,13 +1613,125 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
}
}
+static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
+ const struct nlattr *attr)
+{
+ struct net_device *dev;
+ char ifname[IFNAMSIZ];
+ struct nft_hook *hook;
+ int err;
+
+ hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL);
+ if (!hook) {
+ err = -ENOMEM;
+ goto err_hook_alloc;
+ }
+
+ nla_strlcpy(ifname, attr, IFNAMSIZ);
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev) {
+ err = -ENOENT;
+ goto err_hook_dev;
+ }
+ hook->ops.dev = dev;
+
+ return hook;
+
+err_hook_dev:
+ kfree(hook);
+err_hook_alloc:
+ return ERR_PTR(err);
+}
+
+static bool nft_hook_list_find(struct list_head *hook_list,
+ const struct nft_hook *this)
+{
+ struct nft_hook *hook;
+
+ list_for_each_entry(hook, hook_list, list) {
+ if (this->ops.dev == hook->ops.dev)
+ return true;
+ }
+
+ return false;
+}
+
+static int nf_tables_parse_netdev_hooks(struct net *net,
+ const struct nlattr *attr,
+ struct list_head *hook_list)
+{
+ struct nft_hook *hook, *next;
+ const struct nlattr *tmp;
+ int rem, n = 0, err;
+
+ nla_for_each_nested(tmp, attr, rem) {
+ if (nla_type(tmp) != NFTA_DEVICE_NAME) {
+ err = -EINVAL;
+ goto err_hook;
+ }
+
+ hook = nft_netdev_hook_alloc(net, tmp);
+ if (IS_ERR(hook)) {
+ err = PTR_ERR(hook);
+ goto err_hook;
+ }
+ if (nft_hook_list_find(hook_list, hook)) {
+ err = -EEXIST;
+ goto err_hook;
+ }
+ list_add_tail(&hook->list, hook_list);
+ n++;
+
+ if (n == NFT_NETDEVICE_MAX) {
+ err = -EFBIG;
+ goto err_hook;
+ }
+ }
+ if (!n)
+ return -EINVAL;
+
+ return 0;
+
+err_hook:
+ list_for_each_entry_safe(hook, next, hook_list, list) {
+ list_del(&hook->list);
+ kfree(hook);
+ }
+ return err;
+}
+
struct nft_chain_hook {
u32 num;
s32 priority;
const struct nft_chain_type *type;
- struct net_device *dev;
+ struct list_head list;
};
+static int nft_chain_parse_netdev(struct net *net,
+ struct nlattr *tb[],
+ struct list_head *hook_list)
+{
+ struct nft_hook *hook;
+ int err;
+
+ if (tb[NFTA_HOOK_DEV]) {
+ hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV]);
+ if (IS_ERR(hook))
+ return PTR_ERR(hook);
+
+ list_add_tail(&hook->list, hook_list);
+ } else if (tb[NFTA_HOOK_DEVS]) {
+ err = nf_tables_parse_netdev_hooks(net, tb[NFTA_HOOK_DEVS],
+ hook_list);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nft_chain_parse_hook(struct net *net,
const struct nlattr * const nla[],
struct nft_chain_hook *hook, u8 family,
@@ -1522,7 +1739,6 @@ static int nft_chain_parse_hook(struct net *net,
{
struct nlattr *ha[NFTA_HOOK_MAX + 1];
const struct nft_chain_type *type;
- struct net_device *dev;
int err;
lockdep_assert_held(&net->nft.commit_mutex);
@@ -1560,23 +1776,14 @@ static int nft_chain_parse_hook(struct net *net,
hook->type = type;
- hook->dev = NULL;
+ INIT_LIST_HEAD(&hook->list);
if (family == NFPROTO_NETDEV) {
- char ifname[IFNAMSIZ];
-
- if (!ha[NFTA_HOOK_DEV]) {
- module_put(type->owner);
- return -EOPNOTSUPP;
- }
-
- nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ);
- dev = __dev_get_by_name(net, ifname);
- if (!dev) {
+ err = nft_chain_parse_netdev(net, ha, &hook->list);
+ if (err < 0) {
module_put(type->owner);
- return -ENOENT;
+ return err;
}
- hook->dev = dev;
- } else if (ha[NFTA_HOOK_DEV]) {
+ } else if (ha[NFTA_HOOK_DEV] || ha[NFTA_HOOK_DEVS]) {
module_put(type->owner);
return -EOPNOTSUPP;
}
@@ -1586,6 +1793,12 @@ static int nft_chain_parse_hook(struct net *net,
static void nft_chain_release_hook(struct nft_chain_hook *hook)
{
+ struct nft_hook *h, *next;
+
+ list_for_each_entry_safe(h, next, &hook->list, list) {
+ list_del(&h->list);
+ kfree(h);
+ }
module_put(hook->type->owner);
}
@@ -1610,6 +1823,49 @@ static struct nft_rule **nf_tables_chain_alloc_rules(const struct nft_chain *cha
return kvmalloc(alloc, GFP_KERNEL);
}
+static void nft_basechain_hook_init(struct nf_hook_ops *ops, u8 family,
+ const struct nft_chain_hook *hook,
+ struct nft_chain *chain)
+{
+ ops->pf = family;
+ ops->hooknum = hook->num;
+ ops->priority = hook->priority;
+ ops->priv = chain;
+ ops->hook = hook->type->hooks[ops->hooknum];
+}
+
+static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
+ struct nft_chain_hook *hook, u32 flags)
+{
+ struct nft_chain *chain;
+ struct nft_hook *h;
+
+ basechain->type = hook->type;
+ INIT_LIST_HEAD(&basechain->hook_list);
+ chain = &basechain->chain;
+
+ if (family == NFPROTO_NETDEV) {
+ list_splice_init(&hook->list, &basechain->hook_list);
+ list_for_each_entry(h, &basechain->hook_list, list)
+ nft_basechain_hook_init(&h->ops, family, hook, chain);
+
+ basechain->ops.hooknum = hook->num;
+ basechain->ops.priority = hook->priority;
+ } else {
+ nft_basechain_hook_init(&basechain->ops, family, hook, chain);
+ }
+
+ chain->flags |= NFT_BASE_CHAIN | flags;
+ basechain->policy = NF_ACCEPT;
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
+ nft_chain_offload_priority(basechain) < 0)
+ return -EOPNOTSUPP;
+
+ flow_block_init(&basechain->flow_block);
+
+ return 0;
+}
+
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
u8 policy, u32 flags)
{
@@ -1628,7 +1884,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (nla[NFTA_CHAIN_HOOK]) {
struct nft_chain_hook hook;
- struct nf_hook_ops *ops;
err = nft_chain_parse_hook(net, nla, &hook, family, true);
if (err < 0)
@@ -1639,9 +1894,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
nft_chain_release_hook(&hook);
return -ENOMEM;
}
-
- if (hook.dev != NULL)
- strncpy(basechain->dev_name, hook.dev->name, IFNAMSIZ);
+ chain = &basechain->chain;
if (nla[NFTA_CHAIN_COUNTERS]) {
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -1654,24 +1907,12 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
static_branch_inc(&nft_counters_enabled);
}
- basechain->type = hook.type;
- chain = &basechain->chain;
-
- ops = &basechain->ops;
- ops->pf = family;
- ops->hooknum = hook.num;
- ops->priority = hook.priority;
- ops->priv = chain;
- ops->hook = hook.type->hooks[ops->hooknum];
- ops->dev = hook.dev;
-
- chain->flags |= NFT_BASE_CHAIN | flags;
- basechain->policy = NF_ACCEPT;
- if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
- nft_chain_offload_priority(basechain) < 0)
- return -EOPNOTSUPP;
-
- flow_block_init(&basechain->flow_block);
+ err = nft_basechain_init(basechain, family, &hook, flags);
+ if (err < 0) {
+ nft_chain_release_hook(&hook);
+ kfree(basechain);
+ return err;
+ }
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (chain == NULL)
@@ -1731,6 +1972,25 @@ err1:
return err;
}
+static bool nft_hook_list_equal(struct list_head *hook_list1,
+ struct list_head *hook_list2)
+{
+ struct nft_hook *hook;
+ int n = 0, m = 0;
+
+ n = 0;
+ list_for_each_entry(hook, hook_list2, list) {
+ if (!nft_hook_list_find(hook_list1, hook))
+ return false;
+
+ n++;
+ }
+ list_for_each_entry(hook, hook_list1, list)
+ m++;
+
+ return n == m;
+}
+
static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
u32 flags)
{
@@ -1762,12 +2022,19 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return -EBUSY;
}
- ops = &basechain->ops;
- if (ops->hooknum != hook.num ||
- ops->priority != hook.priority ||
- ops->dev != hook.dev) {
- nft_chain_release_hook(&hook);
- return -EBUSY;
+ if (ctx->family == NFPROTO_NETDEV) {
+ if (!nft_hook_list_equal(&basechain->hook_list,
+ &hook.list)) {
+ nft_chain_release_hook(&hook);
+ return -EBUSY;
+ }
+ } else {
+ ops = &basechain->ops;
+ if (ops->hooknum != hook.num ||
+ ops->priority != hook.priority) {
+ nft_chain_release_hook(&hook);
+ return -EBUSY;
+ }
}
nft_chain_release_hook(&hook);
}
@@ -5580,6 +5847,7 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
.len = NFT_NAME_MAXLEN - 1 },
[NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED },
[NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 },
+ [NFTA_FLOWTABLE_FLAGS] = { .type = NLA_U32 },
};
struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
@@ -5626,43 +5894,6 @@ nft_flowtable_lookup_byhandle(const struct nft_table *table,
return ERR_PTR(-ENOENT);
}
-static int nf_tables_parse_devices(const struct nft_ctx *ctx,
- const struct nlattr *attr,
- struct net_device *dev_array[], int *len)
-{
- const struct nlattr *tmp;
- struct net_device *dev;
- char ifname[IFNAMSIZ];
- int rem, n = 0, err;
-
- nla_for_each_nested(tmp, attr, rem) {
- if (nla_type(tmp) != NFTA_DEVICE_NAME) {
- err = -EINVAL;
- goto err1;
- }
-
- nla_strlcpy(ifname, tmp, IFNAMSIZ);
- dev = __dev_get_by_name(ctx->net, ifname);
- if (!dev) {
- err = -ENOENT;
- goto err1;
- }
-
- dev_array[n++] = dev;
- if (n == NFT_FLOWTABLE_DEVICE_MAX) {
- err = -EFBIG;
- goto err1;
- }
- }
- if (!len)
- return -EINVAL;
-
- err = 0;
-err1:
- *len = n;
- return err;
-}
-
static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = {
[NFTA_FLOWTABLE_HOOK_NUM] = { .type = NLA_U32 },
[NFTA_FLOWTABLE_HOOK_PRIORITY] = { .type = NLA_U32 },
@@ -5673,11 +5904,10 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
const struct nlattr *attr,
struct nft_flowtable *flowtable)
{
- struct net_device *dev_array[NFT_FLOWTABLE_DEVICE_MAX];
struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
- struct nf_hook_ops *ops;
+ struct nft_hook *hook;
int hooknum, priority;
- int err, n = 0, i;
+ int err;
err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
nft_flowtable_hook_policy, NULL);
@@ -5695,27 +5925,21 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
- err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
- dev_array, &n);
+ err = nf_tables_parse_netdev_hooks(ctx->net,
+ tb[NFTA_FLOWTABLE_HOOK_DEVS],
+ &flowtable->hook_list);
if (err < 0)
return err;
- ops = kcalloc(n, sizeof(struct nf_hook_ops), GFP_KERNEL);
- if (!ops)
- return -ENOMEM;
-
- flowtable->hooknum = hooknum;
- flowtable->priority = priority;
- flowtable->ops = ops;
- flowtable->ops_len = n;
+ flowtable->hooknum = hooknum;
+ flowtable->data.priority = priority;
- for (i = 0; i < n; i++) {
- flowtable->ops[i].pf = NFPROTO_NETDEV;
- flowtable->ops[i].hooknum = hooknum;
- flowtable->ops[i].priority = priority;
- flowtable->ops[i].priv = &flowtable->data;
- flowtable->ops[i].hook = flowtable->data.type->hook;
- flowtable->ops[i].dev = dev_array[i];
+ list_for_each_entry(hook, &flowtable->hook_list, list) {
+ hook->ops.pf = NFPROTO_NETDEV;
+ hook->ops.hooknum = hooknum;
+ hook->ops.priority = priority;
+ hook->ops.priv = &flowtable->data;
+ hook->ops.hook = flowtable->data.type->hook;
}
return err;
@@ -5752,17 +5976,73 @@ nft_flowtable_type_get(struct net *net, u8 family)
return ERR_PTR(-ENOENT);
}
+static void nft_unregister_flowtable_hook(struct net *net,
+ struct nft_flowtable *flowtable,
+ struct nft_hook *hook)
+{
+ nf_unregister_net_hook(net, &hook->ops);
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
+}
+
static void nft_unregister_flowtable_net_hooks(struct net *net,
struct nft_flowtable *flowtable)
{
- int i;
+ struct nft_hook *hook;
- for (i = 0; i < flowtable->ops_len; i++) {
- if (!flowtable->ops[i].dev)
- continue;
+ list_for_each_entry(hook, &flowtable->hook_list, list)
+ nft_unregister_flowtable_hook(net, flowtable, hook);
+}
+
+static int nft_register_flowtable_net_hooks(struct net *net,
+ struct nft_table *table,
+ struct nft_flowtable *flowtable)
+{
+ struct nft_hook *hook, *hook2, *next;
+ struct nft_flowtable *ft;
+ int err, i = 0;
+
+ list_for_each_entry(hook, &flowtable->hook_list, list) {
+ list_for_each_entry(ft, &table->flowtables, list) {
+ list_for_each_entry(hook2, &ft->hook_list, list) {
+ if (hook->ops.dev == hook2->ops.dev &&
+ hook->ops.pf == hook2->ops.pf) {
+ err = -EBUSY;
+ goto err_unregister_net_hooks;
+ }
+ }
+ }
+
+ err = flowtable->data.type->setup(&flowtable->data,
+ hook->ops.dev,
+ FLOW_BLOCK_BIND);
+ if (err < 0)
+ goto err_unregister_net_hooks;
+
+ err = nf_register_net_hook(net, &hook->ops);
+ if (err < 0) {
+ flowtable->data.type->setup(&flowtable->data,
+ hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
+ goto err_unregister_net_hooks;
+ }
+
+ i++;
+ }
+
+ return 0;
- nf_unregister_net_hook(net, &flowtable->ops[i]);
+err_unregister_net_hooks:
+ list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ if (i-- <= 0)
+ break;
+
+ nft_unregister_flowtable_hook(net, flowtable, hook);
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
}
+
+ return err;
}
static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
@@ -5773,12 +6053,13 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
const struct nf_flowtable_type *type;
- struct nft_flowtable *flowtable, *ft;
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
+ struct nft_flowtable *flowtable;
+ struct nft_hook *hook, *next;
struct nft_table *table;
struct nft_ctx ctx;
- int err, i, k;
+ int err;
if (!nla[NFTA_FLOWTABLE_TABLE] ||
!nla[NFTA_FLOWTABLE_NAME] ||
@@ -5817,6 +6098,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
flowtable->table = table;
flowtable->handle = nf_tables_alloc_handle(table);
+ INIT_LIST_HEAD(&flowtable->hook_list);
flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL);
if (!flowtable->name) {
@@ -5830,6 +6112,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err2;
}
+ if (nla[NFTA_FLOWTABLE_FLAGS]) {
+ flowtable->data.flags =
+ ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+ if (flowtable->data.flags & ~NF_FLOWTABLE_HW_OFFLOAD)
+ goto err3;
+ }
+
+ write_pnet(&flowtable->data.net, net);
flowtable->data.type = type;
err = type->init(&flowtable->data);
if (err < 0)
@@ -5840,43 +6130,24 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
if (err < 0)
goto err4;
- for (i = 0; i < flowtable->ops_len; i++) {
- if (!flowtable->ops[i].dev)
- continue;
-
- list_for_each_entry(ft, &table->flowtables, list) {
- for (k = 0; k < ft->ops_len; k++) {
- if (!ft->ops[k].dev)
- continue;
-
- if (flowtable->ops[i].dev == ft->ops[k].dev &&
- flowtable->ops[i].pf == ft->ops[k].pf) {
- err = -EBUSY;
- goto err5;
- }
- }
- }
-
- err = nf_register_net_hook(net, &flowtable->ops[i]);
- if (err < 0)
- goto err5;
- }
+ err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
+ if (err < 0)
+ goto err4;
err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
if (err < 0)
- goto err6;
+ goto err5;
list_add_tail_rcu(&flowtable->list, &table->flowtables);
table->use++;
return 0;
-err6:
- i = flowtable->ops_len;
err5:
- for (k = i - 1; k >= 0; k--)
- nf_unregister_net_hook(net, &flowtable->ops[k]);
-
- kfree(flowtable->ops);
+ list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ nft_unregister_flowtable_hook(net, flowtable, hook);
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
+ }
err4:
flowtable->data.type->free(&flowtable->data);
err3:
@@ -5943,8 +6214,8 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
{
struct nlattr *nest, *nest_devs;
struct nfgenmsg *nfmsg;
+ struct nft_hook *hook;
struct nlmsghdr *nlh;
- int i;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
@@ -5960,25 +6231,23 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
- NFTA_FLOWTABLE_PAD))
+ NFTA_FLOWTABLE_PAD) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK);
if (!nest)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
- nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority)))
+ nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->data.priority)))
goto nla_put_failure;
nest_devs = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK_DEVS);
if (!nest_devs)
goto nla_put_failure;
- for (i = 0; i < flowtable->ops_len; i++) {
- const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
-
- if (dev &&
- nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
+ list_for_each_entry_rcu(hook, &flowtable->hook_list, list) {
+ if (nla_put_string(skb, NFTA_DEVICE_NAME, hook->ops.dev->name))
goto nla_put_failure;
}
nla_nest_end(skb, nest_devs);
@@ -6169,7 +6438,12 @@ err:
static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
{
- kfree(flowtable->ops);
+ struct nft_hook *hook, *next;
+
+ list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ list_del_rcu(&hook->list);
+ kfree(hook);
+ }
kfree(flowtable->name);
flowtable->data.type->free(&flowtable->data);
module_put(flowtable->data.type->owner);
@@ -6209,14 +6483,15 @@ nla_put_failure:
static void nft_flowtable_event(unsigned long event, struct net_device *dev,
struct nft_flowtable *flowtable)
{
- int i;
+ struct nft_hook *hook;
- for (i = 0; i < flowtable->ops_len; i++) {
- if (flowtable->ops[i].dev != dev)
+ list_for_each_entry(hook, &flowtable->hook_list, list) {
+ if (hook->ops.dev != dev)
continue;
- nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
- flowtable->ops[i].dev = NULL;
+ nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
break;
}
}
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index e25dab8128db..68f17a6921d8 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -132,13 +132,13 @@ static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
common->extack = extack;
}
-static int nft_setup_cb_call(struct nft_base_chain *basechain,
- enum tc_setup_type type, void *type_data)
+static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
+ struct list_head *cb_list)
{
struct flow_block_cb *block_cb;
int err;
- list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
+ list_for_each_entry(block_cb, cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err < 0)
return err;
@@ -155,32 +155,46 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain)
return 0;
}
+static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
+ const struct nft_base_chain *basechain,
+ const struct nft_rule *rule,
+ const struct nft_flow_rule *flow,
+ struct netlink_ext_ack *extack,
+ enum flow_cls_command command)
+{
+ __be16 proto = ETH_P_ALL;
+
+ memset(cls_flow, 0, sizeof(*cls_flow));
+
+ if (flow)
+ proto = flow->proto;
+
+ nft_flow_offload_common_init(&cls_flow->common, proto,
+ basechain->ops.priority, extack);
+ cls_flow->command = command;
+ cls_flow->cookie = (unsigned long) rule;
+ if (flow)
+ cls_flow->rule = flow->rule;
+}
+
static int nft_flow_offload_rule(struct nft_chain *chain,
struct nft_rule *rule,
struct nft_flow_rule *flow,
enum flow_cls_command command)
{
- struct flow_cls_offload cls_flow = {};
+ struct netlink_ext_ack extack = {};
+ struct flow_cls_offload cls_flow;
struct nft_base_chain *basechain;
- struct netlink_ext_ack extack;
- __be16 proto = ETH_P_ALL;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
+ nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
+ command);
- if (flow)
- proto = flow->proto;
-
- nft_flow_offload_common_init(&cls_flow.common, proto,
- basechain->ops.priority, &extack);
- cls_flow.command = command;
- cls_flow.cookie = (unsigned long) rule;
- if (flow)
- cls_flow.rule = flow->rule;
-
- return nft_setup_cb_call(basechain, TC_SETUP_CLSFLOWER, &cls_flow);
+ return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
+ &basechain->flow_block.cb_list);
}
static int nft_flow_offload_bind(struct flow_block_offload *bo,
@@ -194,6 +208,18 @@ static int nft_flow_offload_unbind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
struct flow_block_cb *block_cb, *next;
+ struct flow_cls_offload cls_flow;
+ struct netlink_ext_ack extack;
+ struct nft_chain *chain;
+ struct nft_rule *rule;
+
+ chain = &basechain->chain;
+ list_for_each_entry(rule, &chain->rules, list) {
+ memset(&extack, 0, sizeof(extack));
+ nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
+ &extack, FLOW_CLS_DESTROY);
+ nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
+ }
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
list_del(&block_cb->list);
@@ -224,20 +250,30 @@ static int nft_block_setup(struct nft_base_chain *basechain,
return err;
}
+static void nft_flow_block_offload_init(struct flow_block_offload *bo,
+ struct net *net,
+ enum flow_block_command cmd,
+ struct nft_base_chain *basechain,
+ struct netlink_ext_ack *extack)
+{
+ memset(bo, 0, sizeof(*bo));
+ bo->net = net;
+ bo->block = &basechain->flow_block;
+ bo->command = cmd;
+ bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
+ bo->extack = extack;
+ INIT_LIST_HEAD(&bo->cb_list);
+}
+
static int nft_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
- struct flow_block_offload bo = {};
+ struct flow_block_offload bo;
int err;
- bo.net = dev_net(dev);
- bo.block = &chain->flow_block;
- bo.command = cmd;
- bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
- bo.extack = &extack;
- INIT_LIST_HEAD(&bo.cb_list);
+ nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
if (err < 0)
@@ -253,17 +289,12 @@ static void nft_indr_block_ing_cmd(struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
- struct flow_block_offload bo = {};
+ struct flow_block_offload bo;
if (!chain)
return;
- bo.net = dev_net(dev);
- bo.block = &chain->flow_block;
- bo.command = cmd;
- bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
- bo.extack = &extack;
- INIT_LIST_HEAD(&bo.cb_list);
+ nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
@@ -274,15 +305,10 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
- struct flow_block_offload bo = {};
struct netlink_ext_ack extack = {};
+ struct flow_block_offload bo;
- bo.net = dev_net(dev);
- bo.block = &chain->flow_block;
- bo.command = cmd;
- bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
- bo.extack = &extack;
- INIT_LIST_HEAD(&bo.cb_list);
+ nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
flow_indr_block_call(dev, &bo, cmd);
@@ -294,32 +320,122 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
-static int nft_flow_offload_chain(struct nft_chain *chain,
- u8 *ppolicy,
+static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
+ struct net_device *dev,
+ enum flow_block_command cmd)
+{
+ int err;
+
+ if (dev->netdev_ops->ndo_setup_tc)
+ err = nft_block_offload_cmd(basechain, dev, cmd);
+ else
+ err = nft_indr_block_offload_cmd(basechain, dev, cmd);
+
+ return err;
+}
+
+static int nft_flow_block_chain(struct nft_base_chain *basechain,
+ const struct net_device *this_dev,
+ enum flow_block_command cmd)
+{
+ struct net_device *dev;
+ struct nft_hook *hook;
+ int err, i = 0;
+
+ list_for_each_entry(hook, &basechain->hook_list, list) {
+ dev = hook->ops.dev;
+ if (this_dev && this_dev != dev)
+ continue;
+
+ err = nft_chain_offload_cmd(basechain, dev, cmd);
+ if (err < 0 && cmd == FLOW_BLOCK_BIND) {
+ if (!this_dev)
+ goto err_flow_block;
+
+ return err;
+ }
+ i++;
+ }
+
+ return 0;
+
+err_flow_block:
+ list_for_each_entry(hook, &basechain->hook_list, list) {
+ if (i-- <= 0)
+ break;
+
+ dev = hook->ops.dev;
+ nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
+ }
+ return err;
+}
+
+static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
enum flow_block_command cmd)
{
struct nft_base_chain *basechain;
- struct net_device *dev;
u8 policy;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
- dev = basechain->ops.dev;
- if (!dev)
- return -EOPNOTSUPP;
-
policy = ppolicy ? *ppolicy : basechain->policy;
/* Only default policy to accept is supported for now. */
if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
return -EOPNOTSUPP;
- if (dev->netdev_ops->ndo_setup_tc)
- return nft_block_offload_cmd(basechain, dev, cmd);
- else
- return nft_indr_block_offload_cmd(basechain, dev, cmd);
+ return nft_flow_block_chain(basechain, NULL, cmd);
+}
+
+static void nft_flow_rule_offload_abort(struct net *net,
+ struct nft_trans *trans)
+{
+ int err = 0;
+
+ list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) {
+ if (trans->ctx.family != NFPROTO_NETDEV)
+ continue;
+
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWCHAIN:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
+ nft_trans_chain_update(trans))
+ continue;
+
+ err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ FLOW_BLOCK_UNBIND);
+ break;
+ case NFT_MSG_DELCHAIN:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ err = nft_flow_offload_chain(trans->ctx.chain, NULL,
+ FLOW_BLOCK_BIND);
+ break;
+ case NFT_MSG_NEWRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ NULL, FLOW_CLS_DESTROY);
+ break;
+ case NFT_MSG_DELRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ nft_trans_flow_rule(trans),
+ FLOW_CLS_REPLACE);
+ break;
+ }
+
+ if (WARN_ON_ONCE(err))
+ break;
+ }
}
int nft_flow_rule_offload_commit(struct net *net)
@@ -355,14 +471,14 @@ int nft_flow_rule_offload_commit(struct net *net)
continue;
if (trans->ctx.flags & NLM_F_REPLACE ||
- !(trans->ctx.flags & NLM_F_APPEND))
- return -EOPNOTSUPP;
-
+ !(trans->ctx.flags & NLM_F_APPEND)) {
+ err = -EOPNOTSUPP;
+ break;
+ }
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
nft_trans_flow_rule(trans),
FLOW_CLS_REPLACE);
- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
@@ -370,13 +486,31 @@ int nft_flow_rule_offload_commit(struct net *net)
err = nft_flow_offload_rule(trans->ctx.chain,
nft_trans_rule(trans),
- nft_trans_flow_rule(trans),
- FLOW_CLS_DESTROY);
+ NULL, FLOW_CLS_DESTROY);
break;
}
- if (err)
- return err;
+ if (err) {
+ nft_flow_rule_offload_abort(net, trans);
+ break;
+ }
+ }
+
+ list_for_each_entry(trans, &net->nft.commit_list, list) {
+ if (trans->ctx.family != NFPROTO_NETDEV)
+ continue;
+
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWRULE:
+ case NFT_MSG_DELRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+ break;
+ default:
+ break;
+ }
}
return err;
@@ -386,6 +520,7 @@ static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
{
struct nft_base_chain *basechain;
struct net *net = dev_net(dev);
+ struct nft_hook *hook, *found;
const struct nft_table *table;
struct nft_chain *chain;
@@ -398,8 +533,16 @@ static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
!(chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
+ found = NULL;
basechain = nft_base_chain(chain);
- if (strncmp(basechain->dev_name, dev->name, IFNAMSIZ))
+ list_for_each_entry(hook, &basechain->hook_list, list) {
+ if (hook->ops.dev != dev)
+ continue;
+
+ found = hook;
+ break;
+ }
+ if (!found)
continue;
return chain;
@@ -427,18 +570,6 @@ static void nft_indr_block_cb(struct net_device *dev,
mutex_unlock(&net->nft.commit_mutex);
}
-static void nft_offload_chain_clean(struct nft_chain *chain)
-{
- struct nft_rule *rule;
-
- list_for_each_entry(rule, &chain->rules, list) {
- nft_flow_offload_rule(chain, rule,
- NULL, FLOW_CLS_DESTROY);
- }
-
- nft_flow_offload_chain(chain, NULL, FLOW_BLOCK_UNBIND);
-}
-
static int nft_offload_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -449,7 +580,9 @@ static int nft_offload_netdev_event(struct notifier_block *this,
mutex_lock(&net->nft.commit_mutex);
chain = __nft_offload_get_chain(dev);
if (chain)
- nft_offload_chain_clean(chain);
+ nft_flow_block_chain(nft_base_chain(chain), dev,
+ FLOW_BLOCK_UNBIND);
+
mutex_unlock(&net->nft.commit_mutex);
return NOTIFY_DONE;
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index b5d5d071d765..c78d01bc02e9 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -287,28 +287,35 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
struct nft_ctx *ctx)
{
struct nft_base_chain *basechain = nft_base_chain(ctx->chain);
+ struct nft_hook *hook, *found = NULL;
+ int n = 0;
- switch (event) {
- case NETDEV_UNREGISTER:
- if (strcmp(basechain->dev_name, dev->name) != 0)
- return;
-
- /* UNREGISTER events are also happpening on netns exit.
- *
- * Altough nf_tables core releases all tables/chains, only
- * this event handler provides guarantee that
- * basechain.ops->dev is still accessible, so we cannot
- * skip exiting net namespaces.
- */
- __nft_release_basechain(ctx);
- break;
- case NETDEV_CHANGENAME:
- if (dev->ifindex != basechain->ops.dev->ifindex)
- return;
+ if (event != NETDEV_UNREGISTER)
+ return;
- strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
- break;
+ list_for_each_entry(hook, &basechain->hook_list, list) {
+ if (hook->ops.dev == dev)
+ found = hook;
+
+ n++;
}
+ if (!found)
+ return;
+
+ if (n > 1) {
+ nf_unregister_net_hook(ctx->net, &found->ops);
+ list_del_rcu(&found->list);
+ kfree_rcu(found, rcu);
+ return;
+ }
+
+ /* UNREGISTER events are also happening on netns exit.
+ *
+ * Although nf_tables core releases all tables/chains, only this event
+ * handler provides guarantee that hook->ops.dev is still accessible,
+ * so we cannot skip exiting net namespaces.
+ */
+ __nft_release_basechain(ctx);
}
static int nf_tables_netdev_event(struct notifier_block *this,
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 0744b2bb46da..b8092069f868 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
+#include <linux/if_arp.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables_offload.h>
@@ -125,6 +126,11 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
flow->match.dissector.used_keys |= BIT(reg->key);
flow->match.dissector.offset[reg->key] = reg->base_offset;
+ if (reg->key == FLOW_DISSECTOR_KEY_META &&
+ reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
+ nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
+ return -EOPNOTSUPP;
+
nft_offload_update_dependency(ctx, &priv->data, priv->len);
return 0;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index f29bbc74c4bf..dd82ff2ee19f 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -115,10 +115,13 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
if (nft_flow_route(pkt, ct, &route, dir) < 0)
goto err_flow_route;
- flow = flow_offload_alloc(ct, &route);
+ flow = flow_offload_alloc(ct);
if (!flow)
goto err_flow_alloc;
+ if (flow_offload_route_init(flow, &route) < 0)
+ goto err_flow_add;
+
if (tcph) {
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 317e3a9e8c5b..9740b554fdb3 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -33,19 +33,19 @@
static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
-static u8 nft_meta_weekday(unsigned long secs)
+static u8 nft_meta_weekday(time64_t secs)
{
unsigned int dse;
u8 wday;
secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest;
- dse = secs / NFT_META_SECS_PER_DAY;
+ dse = div_u64(secs, NFT_META_SECS_PER_DAY);
wday = (4 + dse) % NFT_META_DAYS_PER_WEEK;
return wday;
}
-static u32 nft_meta_hour(unsigned long secs)
+static u32 nft_meta_hour(time64_t secs)
{
struct tm tm;
@@ -250,10 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr,
nft_reg_store64(dest, ktime_get_real_ns());
break;
case NFT_META_TIME_DAY:
- nft_reg_store8(dest, nft_meta_weekday(get_seconds()));
+ nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds()));
break;
case NFT_META_TIME_HOUR:
- *dest = nft_meta_hour(get_seconds());
+ *dest = nft_meta_hour(ktime_get_real_seconds());
break;
default:
WARN_ON(1);
@@ -547,6 +547,14 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
sizeof(__u8), reg);
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
break;
+ case NFT_META_IIF:
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
+ ingress_ifindex, sizeof(__u32), reg);
+ break;
+ case NFT_META_IIFTYPE:
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
+ ingress_iftype, sizeof(__u16), reg);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 5cb2d8908d2a..1993af3a2979 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -23,50 +23,58 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
+static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
+ struct vlan_ethhdr *veth)
+{
+ if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
+ return false;
+
+ veth->h_vlan_proto = skb->vlan_proto;
+ veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+
+ return true;
+}
+
/* add vlan header into the user buffer for if tag was removed by offloads */
static bool
nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
{
int mac_off = skb_mac_header(skb) - skb->data;
- u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+ u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
+ u8 vlan_hlen = 0;
+
+ if ((skb->protocol == htons(ETH_P_8021AD) ||
+ skb->protocol == htons(ETH_P_8021Q)) &&
+ offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+ vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
- if (offset < ETH_HLEN) {
- u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+ if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+ u8 ethlen = len;
- if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+ if (vlan_hlen &&
+ skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+ return false;
+ else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
- veth.h_vlan_proto = skb->vlan_proto;
+ if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+ ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
- memcpy(dst_u8, vlanh + offset, ethlen);
+ memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
- offset = ETH_HLEN;
- } else if (offset >= VLAN_ETH_HLEN) {
- offset -= VLAN_HLEN;
- goto skip;
+ offset = ETH_HLEN + vlan_hlen;
+ } else {
+ offset -= VLAN_HLEN + vlan_hlen;
}
- veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
- veth.h_vlan_encapsulated_proto = skb->protocol;
-
- vlanh += offset;
-
- vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
- memcpy(dst_u8, vlanh, vlan_len);
-
- len -= vlan_len;
- if (!len)
- return true;
-
- dst_u8 += vlan_len;
- skip:
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}
@@ -174,6 +182,44 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
dst, ETH_ALEN, reg);
break;
+ case offsetof(struct ethhdr, h_proto):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
+ n_proto, sizeof(__be16), reg);
+ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+ vlan_tci, sizeof(__be16), reg);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+ vlan_tpid, sizeof(__be16), reg);
+ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+ vlan_tci, sizeof(__be16), reg);
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
+ sizeof(struct vlan_hdr):
+ if (priv->len != sizeof(__be16))
+ return -EOPNOTSUPP;
+
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+ vlan_tpid, sizeof(__be16), reg);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index be7798a50546..713fb38541df 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -239,11 +239,7 @@ static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
return 0;
/* Error message? */
- if (icmph->type != ICMP_DEST_UNREACH &&
- icmph->type != ICMP_SOURCE_QUENCH &&
- icmph->type != ICMP_TIME_EXCEEDED &&
- icmph->type != ICMP_PARAMETERPROB &&
- icmph->type != ICMP_REDIRECT)
+ if (!icmp_is_err(icmph->type))
return 0;
*nhoff += iphsz + sizeof(_ih);
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 8dbb4d48f2ed..67cb98489415 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -77,12 +77,12 @@ static inline bool is_leap(unsigned int y)
* This is done in three separate functions so that the most expensive
* calculations are done last, in case a "simple match" can be found earlier.
*/
-static inline unsigned int localtime_1(struct xtm *r, time_t time)
+static inline unsigned int localtime_1(struct xtm *r, time64_t time)
{
unsigned int v, w;
/* Each day has 86400s, so finding the hour/minute is actually easy. */
- v = time % SECONDS_PER_DAY;
+ div_u64_rem(time, SECONDS_PER_DAY, &v);
r->second = v % 60;
w = v / 60;
r->minute = w % 60;
@@ -90,13 +90,13 @@ static inline unsigned int localtime_1(struct xtm *r, time_t time)
return v;
}
-static inline void localtime_2(struct xtm *r, time_t time)
+static inline void localtime_2(struct xtm *r, time64_t time)
{
/*
* Here comes the rest (weekday, monthday). First, divide the SSTE
* by seconds-per-day to get the number of _days_ since the epoch.
*/
- r->dse = time / 86400;
+ r->dse = div_u64(time, SECONDS_PER_DAY);
/*
* 1970-01-01 (w=0) was a Thursday (4).
@@ -105,7 +105,7 @@ static inline void localtime_2(struct xtm *r, time_t time)
r->weekday = (4 + r->dse - 1) % 7 + 1;
}
-static void localtime_3(struct xtm *r, time_t time)
+static void localtime_3(struct xtm *r, time64_t time)
{
unsigned int year, i, w = r->dse;
@@ -160,7 +160,7 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_time_info *info = par->matchinfo;
unsigned int packet_time;
struct xtm current_time;
- s64 stamp;
+ time64_t stamp;
/*
* We need real time here, but we can neither use skb->tstamp
@@ -173,14 +173,14 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
* 1. match before 13:00
* 2. match after 13:00
*
- * If you match against processing time (get_seconds) it
+ * If you match against processing time (ktime_get_real_seconds) it
* may happen that the same packet matches both rules if
* it arrived at the right moment before 13:00, so it would be
* better to check skb->tstamp and set it via __net_timestamp()
* if needed. This however breaks outgoing packets tx timestamp,
* and causes them to get delayed forever by fq packet scheduler.
*/
- stamp = get_seconds();
+ stamp = ktime_get_real_seconds();
if (info->flags & XT_TIME_LOCAL_TZ)
/* Adjust for local timezone */
@@ -193,6 +193,9 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
* - 'now' is in the weekday mask
* - 'now' is in the daytime range time_start..time_end
* (and by default, libxt_time will set these so as to match)
+ *
+ * note: info->date_start/stop are unsigned 32-bit values that
+ * can hold values beyond y2038, but not after y2106.
*/
if (stamp < info->date_start || stamp > info->date_stop)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index efccd1ac9a66..0522b2b1fd95 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -458,10 +458,63 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
}
EXPORT_SYMBOL(genlmsg_put);
+static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
+{
+ return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
+}
+
+static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
+{
+ kfree(info);
+}
+
+static struct nlattr **
+genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ const struct genl_ops *ops,
+ int hdrlen,
+ enum genl_validate_flags no_strict_flag,
+ bool parallel)
+{
+ enum netlink_validation validate = ops->validate & no_strict_flag ?
+ NL_VALIDATE_LIBERAL :
+ NL_VALIDATE_STRICT;
+ struct nlattr **attrbuf;
+ int err;
+
+ if (!family->maxattr)
+ return NULL;
+
+ if (parallel) {
+ attrbuf = kmalloc_array(family->maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ attrbuf = family->attrbuf;
+ }
+
+ err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
+ family->policy, validate, extack);
+ if (err && parallel) {
+ kfree(attrbuf);
+ return ERR_PTR(err);
+ }
+ return attrbuf;
+}
+
+static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
+ struct nlattr **attrbuf,
+ bool parallel)
+{
+ if (parallel)
+ kfree(attrbuf);
+}
+
static int genl_lock_start(struct netlink_callback *cb)
{
- /* our ops are always const - netlink API doesn't propagate that */
- const struct genl_ops *ops = cb->data;
+ const struct genl_ops *ops = genl_dumpit_info(cb)->ops;
int rc = 0;
if (ops->start) {
@@ -474,8 +527,7 @@ static int genl_lock_start(struct netlink_callback *cb)
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
- /* our ops are always const - netlink API doesn't propagate that */
- const struct genl_ops *ops = cb->data;
+ const struct genl_ops *ops = genl_dumpit_info(cb)->ops;
int rc;
genl_lock();
@@ -486,8 +538,8 @@ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
static int genl_lock_done(struct netlink_callback *cb)
{
- /* our ops are always const - netlink API doesn't propagate that */
- const struct genl_ops *ops = cb->data;
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ const struct genl_ops *ops = info->ops;
int rc = 0;
if (ops->done) {
@@ -495,120 +547,111 @@ static int genl_lock_done(struct netlink_callback *cb)
rc = ops->done(cb);
genl_unlock();
}
+ genl_family_rcv_msg_attrs_free(info->family, info->attrs, true);
+ genl_dumpit_info_free(info);
return rc;
}
-static int genl_family_rcv_msg(const struct genl_family *family,
- struct sk_buff *skb,
- struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
+static int genl_parallel_done(struct netlink_callback *cb)
{
- const struct genl_ops *ops;
- struct net *net = sock_net(skb->sk);
- struct genl_info info;
- struct genlmsghdr *hdr = nlmsg_data(nlh);
- struct nlattr **attrbuf;
- int hdrlen, err;
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ const struct genl_ops *ops = info->ops;
+ int rc = 0;
- /* this family doesn't exist in this netns */
- if (!family->netnsok && !net_eq(net, &init_net))
- return -ENOENT;
+ if (ops->done)
+ rc = ops->done(cb);
+ genl_family_rcv_msg_attrs_free(info->family, info->attrs, true);
+ genl_dumpit_info_free(info);
+ return rc;
+}
- hdrlen = GENL_HDRLEN + family->hdrsize;
- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
- return -EINVAL;
+static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
+ struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ const struct genl_ops *ops,
+ int hdrlen, struct net *net)
+{
+ struct genl_dumpit_info *info;
+ struct nlattr **attrs = NULL;
+ int err;
- ops = genl_get_cmd(hdr->cmd, family);
- if (ops == NULL)
+ if (!ops->dumpit)
return -EOPNOTSUPP;
- if ((ops->flags & GENL_ADMIN_PERM) &&
- !netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
- if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
- !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
- if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
- int rc;
-
- if (ops->dumpit == NULL)
- return -EOPNOTSUPP;
-
- if (!(ops->validate & GENL_DONT_VALIDATE_DUMP)) {
- int hdrlen = GENL_HDRLEN + family->hdrsize;
-
- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
- return -EINVAL;
+ if (ops->validate & GENL_DONT_VALIDATE_DUMP)
+ goto no_attrs;
- if (family->maxattr) {
- unsigned int validate = NL_VALIDATE_STRICT;
-
- if (ops->validate &
- GENL_DONT_VALIDATE_DUMP_STRICT)
- validate = NL_VALIDATE_LIBERAL;
- rc = __nla_validate(nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen),
- family->maxattr,
- family->policy,
- validate, extack);
- if (rc)
- return rc;
- }
- }
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return -EINVAL;
- if (!family->parallel_ops) {
- struct netlink_dump_control c = {
- .module = family->module,
- /* we have const, but the netlink API doesn't */
- .data = (void *)ops,
- .start = genl_lock_start,
- .dump = genl_lock_dumpit,
- .done = genl_lock_done,
- };
+ attrs = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
+ ops, hdrlen,
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ true);
+ if (IS_ERR(attrs))
+ return PTR_ERR(attrs);
+
+no_attrs:
+ /* Allocate dumpit info. It is going to be freed by done() callback. */
+ info = genl_dumpit_info_alloc();
+ if (!info) {
+ genl_family_rcv_msg_attrs_free(family, attrs, true);
+ return -ENOMEM;
+ }
- genl_unlock();
- rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
- genl_lock();
+ info->family = family;
+ info->ops = ops;
+ info->attrs = attrs;
- } else {
- struct netlink_dump_control c = {
- .module = family->module,
- .start = ops->start,
- .dump = ops->dumpit,
- .done = ops->done,
- };
+ if (!family->parallel_ops) {
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .data = info,
+ .start = genl_lock_start,
+ .dump = genl_lock_dumpit,
+ .done = genl_lock_done,
+ };
- rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
- }
+ genl_unlock();
+ err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ genl_lock();
- return rc;
+ } else {
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .data = info,
+ .start = ops->start,
+ .dump = ops->dumpit,
+ .done = genl_parallel_done,
+ };
+
+ err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
}
- if (ops->doit == NULL)
- return -EOPNOTSUPP;
-
- if (family->maxattr && family->parallel_ops) {
- attrbuf = kmalloc_array(family->maxattr + 1,
- sizeof(struct nlattr *),
- GFP_KERNEL);
- if (attrbuf == NULL)
- return -ENOMEM;
- } else
- attrbuf = family->attrbuf;
+ return err;
+}
- if (attrbuf) {
- enum netlink_validation validate = NL_VALIDATE_STRICT;
+static int genl_family_rcv_msg_doit(const struct genl_family *family,
+ struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ const struct genl_ops *ops,
+ int hdrlen, struct net *net)
+{
+ struct nlattr **attrbuf;
+ struct genl_info info;
+ int err;
- if (ops->validate & GENL_DONT_VALIDATE_STRICT)
- validate = NL_VALIDATE_LIBERAL;
+ if (!ops->doit)
+ return -EOPNOTSUPP;
- err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
- family->policy, validate, extack);
- if (err < 0)
- goto out;
- }
+ attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
+ ops, hdrlen,
+ GENL_DONT_VALIDATE_STRICT,
+ family->parallel_ops);
+ if (IS_ERR(attrbuf))
+ return PTR_ERR(attrbuf);
info.snd_seq = nlh->nlmsg_seq;
info.snd_portid = NETLINK_CB(skb).portid;
@@ -632,12 +675,49 @@ static int genl_family_rcv_msg(const struct genl_family *family,
family->post_doit(ops, skb, &info);
out:
- if (family->parallel_ops)
- kfree(attrbuf);
+ genl_family_rcv_msg_attrs_free(family, attrbuf, family->parallel_ops);
return err;
}
+static int genl_family_rcv_msg(const struct genl_family *family,
+ struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ const struct genl_ops *ops;
+ struct net *net = sock_net(skb->sk);
+ struct genlmsghdr *hdr = nlmsg_data(nlh);
+ int hdrlen;
+
+ /* this family doesn't exist in this netns */
+ if (!family->netnsok && !net_eq(net, &init_net))
+ return -ENOENT;
+
+ hdrlen = GENL_HDRLEN + family->hdrsize;
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return -EINVAL;
+
+ ops = genl_get_cmd(hdr->cmd, family);
+ if (ops == NULL)
+ return -EOPNOTSUPP;
+
+ if ((ops->flags & GENL_ADMIN_PERM) &&
+ !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
+ !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
+ return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
+ ops, hdrlen, net);
+ else
+ return genl_family_rcv_msg_doit(family, skb, nlh, extack,
+ ops, hdrlen, net);
+}
+
static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -1088,25 +1168,6 @@ problem:
subsys_initcall(genl_init);
-/**
- * genl_family_attrbuf - return family's attrbuf
- * @family: the family
- *
- * Return the family's attrbuf, while validating that it's
- * actually valid to access it.
- *
- * You cannot use this function with a family that has parallel_ops
- * and you can only use it within (pre/post) doit/dumpit callbacks.
- */
-struct nlattr **genl_family_attrbuf(const struct genl_family *family)
-{
- if (!WARN_ON(family->parallel_ops))
- lockdep_assert_held(&genl_mutex);
-
- return family->attrbuf;
-}
-EXPORT_SYMBOL(genl_family_attrbuf);
-
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
gfp_t flags)
{
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
index 97bd3a2c5c98..4822d6f46947 100644
--- a/net/nfc/hci/Kconfig
+++ b/net/nfc/hci/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config NFC_HCI
- depends on NFC
- tristate "NFC HCI implementation"
- default n
- help
- Say Y here if you want to build support for a kernel NFC HCI
- implementation. This is mostly needed for devices that only process
- HCI frames, like for example the NXP pn544.
+ depends on NFC
+ tristate "NFC HCI implementation"
+ default n
+ help
+ Say Y here if you want to build support for a kernel NFC HCI
+ implementation. This is mostly needed for devices that only process
+ HCI frames, like for example the NXP pn544.
config NFC_SHDLC
depends on NFC_HCI
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index afde0d763039..eee0dddb7749 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -102,22 +102,14 @@ nla_put_failure:
static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
{
- struct nlattr **attrbuf = genl_family_attrbuf(&nfc_genl_family);
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct nfc_dev *dev;
- int rc;
u32 idx;
- rc = nlmsg_parse_deprecated(cb->nlh,
- GENL_HDRLEN + nfc_genl_family.hdrsize,
- attrbuf, nfc_genl_family.maxattr,
- nfc_genl_policy, NULL);
- if (rc < 0)
- return ERR_PTR(rc);
-
- if (!attrbuf[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return ERR_PTR(-EINVAL);
- idx = nla_get_u32(attrbuf[NFC_ATTR_DEVICE_INDEX]);
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
@@ -1695,7 +1687,8 @@ static const struct genl_ops nfc_genl_ops[] = {
},
{
.cmd = NFC_CMD_GET_TARGET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = nfc_genl_dump_targets,
.done = nfc_genl_dump_targets_done,
},
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 1c77f520f474..12936c151cc0 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -200,7 +200,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
if (err)
return err;
- flow_key->mpls.top_lse = lse;
+ flow_key->mpls.lse[0] = lse;
return 0;
}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 05249eb45082..df9c80bf621d 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -971,6 +971,8 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
+ bool add_helper = false;
+
/* Packets starting a new connection must be NATted before the
* helper, so that the helper knows about the NAT. We enforce
* this by delaying both NAT and helper calls for unconfirmed
@@ -988,16 +990,17 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
}
/* Userspace may decide to perform a ct lookup without a helper
- * specified followed by a (recirculate and) commit with one.
- * Therefore, for unconfirmed connections which we will commit,
- * we need to attach the helper here.
+ * specified followed by a (recirculate and) commit with one,
+ * or attach a helper in a later commit. Therefore, for
+ * connections which we will commit, we may need to attach
+ * the helper here.
*/
- if (!nf_ct_is_confirmed(ct) && info->commit &&
- info->helper && !nfct_help(ct)) {
+ if (info->commit && info->helper && !nfct_help(ct)) {
int err = __nf_ct_try_assign_helper(ct, info->ct,
GFP_ATOMIC);
if (err)
return err;
+ add_helper = true;
/* helper installed, add seqadj if NAT is required */
if (info->nat && !nfct_seqadj(ct)) {
@@ -1007,11 +1010,13 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
}
/* Call the helper only if:
- * - nf_conntrack_in() was executed above ("!cached") for a
- * confirmed connection, or
+ * - nf_conntrack_in() was executed above ("!cached") or a
+ * helper was just attached ("add_helper") for a confirmed
+ * connection, or
* - When committing an unconfirmed connection.
*/
- if ((nf_ct_is_confirmed(ct) ? !cached : info->commit) &&
+ if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
+ info->commit) &&
ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
return -EINVAL;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index d8c364d637b1..93d4991ddc1f 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -227,7 +227,8 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */
- flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
+ flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
+ &n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -349,7 +350,8 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+ nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
- + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
+ + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
+ + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
/* OVS_PACKET_ATTR_USERDATA */
if (upcall_info->userdata)
@@ -392,6 +394,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
size_t len;
unsigned int hlen;
int err, dp_ifindex;
+ u64 hash;
dp_ifindex = get_dpifindex(dp);
if (!dp_ifindex)
@@ -484,23 +487,30 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
}
/* Add OVS_PACKET_ATTR_MRU */
- if (upcall_info->mru) {
- if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
- upcall_info->mru)) {
- err = -ENOBUFS;
- goto out;
- }
- pad_packet(dp, user_skb);
+ if (upcall_info->mru &&
+ nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
+ err = -ENOBUFS;
+ goto out;
}
/* Add OVS_PACKET_ATTR_LEN when packet is truncated */
- if (cutlen > 0) {
- if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
- skb->len)) {
- err = -ENOBUFS;
- goto out;
- }
- pad_packet(dp, user_skb);
+ if (cutlen > 0 &&
+ nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
+ err = -ENOBUFS;
+ goto out;
+ }
+
+ /* Add OVS_PACKET_ATTR_HASH */
+ hash = skb_get_hash_raw(skb);
+ if (skb->sw_hash)
+ hash |= OVS_PACKET_HASH_SW_BIT;
+
+ if (skb->l4_hash)
+ hash |= OVS_PACKET_HASH_L4_BIT;
+
+ if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
+ err = -ENOBUFS;
+ goto out;
}
/* Only reserve room for attribute header, packet data is added
@@ -542,6 +552,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct vport *input_vport;
u16 mru = 0;
+ u64 hash;
int len;
int err;
bool log = !a[OVS_PACKET_ATTR_PROBE];
@@ -567,6 +578,14 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
}
OVS_CB(packet)->mru = mru;
+ if (a[OVS_PACKET_ATTR_HASH]) {
+ hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
+
+ __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
+ !!(hash & OVS_PACKET_HASH_SW_BIT),
+ !!(hash & OVS_PACKET_HASH_L4_BIT));
+ }
+
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
@@ -1575,6 +1594,31 @@ static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
return 0;
}
+static int ovs_dp_stats_init(struct datapath *dp)
+{
+ dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
+ if (!dp->stats_percpu)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ovs_dp_vport_init(struct datapath *dp)
+{
+ int i;
+
+ dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
+ sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!dp->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+ INIT_HLIST_HEAD(&dp->ports[i]);
+
+ return 0;
+}
+
static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
@@ -1583,7 +1627,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct vport *vport;
struct ovs_net *ovs_net;
- int err, i;
+ int err;
err = -EINVAL;
if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
@@ -1596,35 +1640,26 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
err = -ENOMEM;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
- goto err_free_reply;
+ goto err_destroy_reply;
ovs_dp_set_net(dp, sock_net(skb->sk));
/* Allocate table. */
err = ovs_flow_tbl_init(&dp->table);
if (err)
- goto err_free_dp;
+ goto err_destroy_dp;
- dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
- if (!dp->stats_percpu) {
- err = -ENOMEM;
+ err = ovs_dp_stats_init(dp);
+ if (err)
goto err_destroy_table;
- }
- dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
- sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!dp->ports) {
- err = -ENOMEM;
- goto err_destroy_percpu;
- }
-
- for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
- INIT_HLIST_HEAD(&dp->ports[i]);
+ err = ovs_dp_vport_init(dp);
+ if (err)
+ goto err_destroy_stats;
err = ovs_meters_init(dp);
if (err)
- goto err_destroy_ports_array;
+ goto err_destroy_ports;
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
@@ -1656,6 +1691,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_dp_reset_user_features(skb, info);
}
+ ovs_unlock();
goto err_destroy_meters;
}
@@ -1672,17 +1708,16 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
return 0;
err_destroy_meters:
- ovs_unlock();
ovs_meters_exit(dp);
-err_destroy_ports_array:
+err_destroy_ports:
kfree(dp->ports);
-err_destroy_percpu:
+err_destroy_stats:
free_percpu(dp->stats_percpu);
err_destroy_table:
ovs_flow_tbl_destroy(&dp->table);
-err_free_dp:
+err_destroy_dp:
kfree(dp);
-err_free_reply:
+err_destroy_reply:
kfree_skb(reply);
err:
return err;
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 81e85dde8217..e239a46c2f94 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -139,6 +139,18 @@ struct ovs_net {
bool xt_label;
};
+/**
+ * enum ovs_pkt_hash_types - hash info to include with a packet
+ * to send to userspace.
+ * @OVS_PACKET_HASH_SW_BIT: indicates hash was computed in software stack.
+ * @OVS_PACKET_HASH_L4_BIT: indicates hash is a canonical 4-tuple hash
+ * over transport ports.
+ */
+enum ovs_pkt_hash_types {
+ OVS_PACKET_HASH_SW_BIT = (1ULL << 32),
+ OVS_PACKET_HASH_L4_BIT = (1ULL << 33),
+};
+
extern unsigned int ovs_net_id;
void ovs_lock(void);
void ovs_unlock(void);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 38147e6a20f5..9d375e74b607 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -637,27 +637,35 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (eth_p_mpls(key->eth.type)) {
- size_t stack_len = MPLS_HLEN;
+ u8 label_count = 1;
+ memset(&key->mpls, 0, sizeof(key->mpls));
skb_set_inner_network_header(skb, skb->mac_len);
while (1) {
__be32 lse;
- error = check_header(skb, skb->mac_len + stack_len);
+ error = check_header(skb, skb->mac_len +
+ label_count * MPLS_HLEN);
if (unlikely(error))
return 0;
memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
- if (stack_len == MPLS_HLEN)
- memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
+ if (label_count <= MPLS_LABEL_DEPTH)
+ memcpy(&key->mpls.lse[label_count - 1], &lse,
+ MPLS_HLEN);
- skb_set_inner_network_header(skb, skb->mac_len + stack_len);
+ skb_set_inner_network_header(skb, skb->mac_len +
+ label_count * MPLS_HLEN);
if (lse & htonl(MPLS_LS_S_MASK))
break;
- stack_len += MPLS_HLEN;
+ label_count++;
}
+ if (label_count > MPLS_LABEL_DEPTH)
+ label_count = MPLS_LABEL_DEPTH;
+
+ key->mpls.num_labels_mask = GENMASK(label_count - 1, 0);
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index b830d5ff7af4..fd8ed766bdd1 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -30,6 +30,7 @@ enum sw_flow_mac_proto {
MAC_PROTO_ETHERNET,
};
#define SW_FLOW_KEY_INVALID 0x80
+#define MPLS_LABEL_DEPTH 3
/* Store options at the end of the array if they are less than the
* maximum size. This allows us to get the benefits of variable length
@@ -85,9 +86,6 @@ struct sw_flow_key {
*/
union {
struct {
- __be32 top_lse; /* top label stack entry */
- } mpls;
- struct {
u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
u8 tos; /* IP ToS. */
u8 ttl; /* IP TTL/hop limit. */
@@ -135,6 +133,11 @@ struct sw_flow_key {
} nd;
};
} ipv6;
+ struct {
+ u32 num_labels_mask; /* labels present bitmap of effective length MPLS_LABEL_DEPTH */
+ __be32 lse[MPLS_LABEL_DEPTH]; /* label stack entry */
+ } mpls;
+
struct ovs_key_nsh nsh; /* network service header */
};
struct {
@@ -166,7 +169,6 @@ struct sw_flow_key_range {
struct sw_flow_mask {
int ref_count;
struct rcu_head rcu;
- struct list_head list;
struct sw_flow_key_range range;
struct sw_flow_key key;
};
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d7559c64795d..65c2e3458ff5 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -424,7 +424,7 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
.next = ovs_tunnel_key_lens, },
- [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
+ [OVS_KEY_ATTR_MPLS] = { .len = OVS_ATTR_VARIABLE },
[OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
[OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
[OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
@@ -1628,10 +1628,25 @@ static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
const struct ovs_key_mpls *mpls_key;
+ u32 hdr_len;
+ u32 label_count, label_count_mask, i;
mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
- SW_FLOW_KEY_PUT(match, mpls.top_lse,
- mpls_key->mpls_lse, is_mask);
+ hdr_len = nla_len(a[OVS_KEY_ATTR_MPLS]);
+ label_count = hdr_len / sizeof(struct ovs_key_mpls);
+
+ if (label_count == 0 || label_count > MPLS_LABEL_DEPTH ||
+ hdr_len % sizeof(struct ovs_key_mpls))
+ return -EINVAL;
+
+ label_count_mask = GENMASK(label_count - 1, 0);
+
+ for (i = 0 ; i < label_count; i++)
+ SW_FLOW_KEY_PUT(match, mpls.lse[i],
+ mpls_key[i].mpls_lse, is_mask);
+
+ SW_FLOW_KEY_PUT(match, mpls.num_labels_mask,
+ label_count_mask, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
}
@@ -2114,13 +2129,18 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
} else if (eth_p_mpls(swkey->eth.type)) {
+ u8 i, num_labels;
struct ovs_key_mpls *mpls_key;
- nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
+ num_labels = hweight_long(output->mpls.num_labels_mask);
+ nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS,
+ num_labels * sizeof(*mpls_key));
if (!nla)
goto nla_put_failure;
+
mpls_key = nla_data(nla);
- mpls_key->mpls_lse = output->mpls.top_lse;
+ for (i = 0; i < num_labels; i++)
+ mpls_key[i].mpls_lse = output->mpls.lse[i];
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -2406,13 +2426,14 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa,
static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
- __be16 eth_type, __be16 vlan_tci, bool log);
+ __be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count, bool log);
static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- bool log, bool last)
+ u32 mpls_label_count, bool log, bool last)
{
const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
const struct nlattr *probability, *actions;
@@ -2463,7 +2484,7 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
return err;
err = __ovs_nla_copy_actions(net, actions, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2478,7 +2499,7 @@ static int validate_and_copy_clone(struct net *net,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- bool log, bool last)
+ u32 mpls_label_count, bool log, bool last)
{
int start, err;
u32 exec;
@@ -2498,7 +2519,7 @@ static int validate_and_copy_clone(struct net *net,
return err;
err = __ovs_nla_copy_actions(net, attr, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2864,6 +2885,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count,
bool log, bool last)
{
const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
@@ -2912,7 +2934,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2925,7 +2947,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
- eth_type, vlan_tci, log);
+ eth_type, vlan_tci, mpls_label_count, log);
if (err)
return err;
@@ -2952,7 +2974,8 @@ static int copy_action(const struct nlattr *from,
static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
- __be16 eth_type, __be16 vlan_tci, bool log)
+ __be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count, bool log)
{
u8 mac_proto = ovs_key_mac_proto(key);
const struct nlattr *a;
@@ -3065,25 +3088,36 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
!eth_p_mpls(eth_type)))
return -EINVAL;
eth_type = mpls->mpls_ethertype;
+ mpls_label_count++;
break;
}
- case OVS_ACTION_ATTR_POP_MPLS:
+ case OVS_ACTION_ATTR_POP_MPLS: {
+ __be16 proto;
if (vlan_tci & htons(VLAN_CFI_MASK) ||
!eth_p_mpls(eth_type))
return -EINVAL;
- /* Disallow subsequent L2.5+ set and mpls_pop actions
- * as there is no check here to ensure that the new
- * eth_type is valid and thus set actions could
- * write off the end of the packet or otherwise
- * corrupt it.
+ /* Disallow subsequent L2.5+ set actions and mpls_pop
+ * actions once the last MPLS label in the packet is
+ * is popped as there is no check here to ensure that
+ * the new eth type is valid and thus set actions could
+ * write off the end of the packet or otherwise corrupt
+ * it.
*
* Support for these actions is planned using packet
* recirculation.
*/
- eth_type = htons(0);
+ proto = nla_get_be16(a);
+ mpls_label_count--;
+
+ if (!eth_p_mpls(proto) || !mpls_label_count)
+ eth_type = htons(0);
+ else
+ eth_type = proto;
+
break;
+ }
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
@@ -3106,6 +3140,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_sample(net, a, key, sfa,
eth_type, vlan_tci,
+ mpls_label_count,
log, last);
if (err)
return err;
@@ -3176,6 +3211,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_clone(net, a, key, sfa,
eth_type, vlan_tci,
+ mpls_label_count,
log, last);
if (err)
return err;
@@ -3188,8 +3224,9 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_check_pkt_len(net, a, key, sfa,
eth_type,
- vlan_tci, log,
- last);
+ vlan_tci,
+ mpls_label_count,
+ log, last);
if (err)
return err;
skip_copy = true;
@@ -3219,14 +3256,18 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
struct sw_flow_actions **sfa, bool log)
{
int err;
+ u32 mpls_label_count = 0;
*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
+ if (eth_p_mpls(key->eth.type))
+ mpls_label_count = hweight_long(key->mpls.num_labels_mask);
+
(*sfa)->orig_len = nla_len(attr);
err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
- key->eth.vlan.tci, log);
+ key->eth.vlan.tci, mpls_label_count, log);
if (err)
ovs_nla_free_flow_actions(*sfa);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index cf3582c5ed70..5904e93e5765 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -34,8 +34,13 @@
#include <net/ndisc.h>
#define TBL_MIN_BUCKETS 1024
+#define MASK_ARRAY_SIZE_MIN 16
#define REHASH_INTERVAL (10 * 60 * HZ)
+#define MC_HASH_SHIFT 8
+#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
+#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
+
static struct kmem_cache *flow_cache;
struct kmem_cache *flow_stats_cache __read_mostly;
@@ -164,14 +169,133 @@ static struct table_instance *table_instance_alloc(int new_size)
return ti;
}
+static struct mask_array *tbl_mask_array_alloc(int size)
+{
+ struct mask_array *new;
+
+ size = max(MASK_ARRAY_SIZE_MIN, size);
+ new = kzalloc(sizeof(struct mask_array) +
+ sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->count = 0;
+ new->max = size;
+
+ return new;
+}
+
+static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
+{
+ struct mask_array *old;
+ struct mask_array *new;
+
+ new = tbl_mask_array_alloc(size);
+ if (!new)
+ return -ENOMEM;
+
+ old = ovsl_dereference(tbl->mask_array);
+ if (old) {
+ int i;
+
+ for (i = 0; i < old->max; i++) {
+ if (ovsl_dereference(old->masks[i]))
+ new->masks[new->count++] = old->masks[i];
+ }
+ }
+
+ rcu_assign_pointer(tbl->mask_array, new);
+ kfree_rcu(old, rcu);
+
+ return 0;
+}
+
+static int tbl_mask_array_add_mask(struct flow_table *tbl,
+ struct sw_flow_mask *new)
+{
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int err, ma_count = READ_ONCE(ma->count);
+
+ if (ma_count >= ma->max) {
+ err = tbl_mask_array_realloc(tbl, ma->max +
+ MASK_ARRAY_SIZE_MIN);
+ if (err)
+ return err;
+
+ ma = ovsl_dereference(tbl->mask_array);
+ }
+
+ BUG_ON(ovsl_dereference(ma->masks[ma_count]));
+
+ rcu_assign_pointer(ma->masks[ma_count], new);
+ WRITE_ONCE(ma->count, ma_count +1);
+
+ return 0;
+}
+
+static void tbl_mask_array_del_mask(struct flow_table *tbl,
+ struct sw_flow_mask *mask)
+{
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int i, ma_count = READ_ONCE(ma->count);
+
+ /* Remove the deleted mask pointers from the array */
+ for (i = 0; i < ma_count; i++) {
+ if (mask == ovsl_dereference(ma->masks[i]))
+ goto found;
+ }
+
+ BUG();
+ return;
+
+found:
+ WRITE_ONCE(ma->count, ma_count -1);
+
+ rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
+ RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
+
+ kfree_rcu(mask, rcu);
+
+ /* Shrink the mask array if necessary. */
+ if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
+ ma_count <= (ma->max / 3))
+ tbl_mask_array_realloc(tbl, ma->max / 2);
+}
+
+/* Remove 'mask' from the mask list, if it is not needed any more. */
+static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
+{
+ if (mask) {
+ /* ovs-lock is required to protect mask-refcount and
+ * mask list.
+ */
+ ASSERT_OVSL();
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count)
+ tbl_mask_array_del_mask(tbl, mask);
+ }
+}
+
int ovs_flow_tbl_init(struct flow_table *table)
{
struct table_instance *ti, *ufid_ti;
+ struct mask_array *ma;
- ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+ MC_HASH_ENTRIES,
+ __alignof__(struct mask_cache_entry));
+ if (!table->mask_cache)
+ return -ENOMEM;
+ ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
+ if (!ma)
+ goto free_mask_cache;
+
+ ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
- return -ENOMEM;
+ goto free_mask_array;
ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ufid_ti)
@@ -179,7 +303,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
rcu_assign_pointer(table->ti, ti);
rcu_assign_pointer(table->ufid_ti, ufid_ti);
- INIT_LIST_HEAD(&table->mask_list);
+ rcu_assign_pointer(table->mask_array, ma);
table->last_rehash = jiffies;
table->count = 0;
table->ufid_count = 0;
@@ -187,6 +311,10 @@ int ovs_flow_tbl_init(struct flow_table *table)
free_ti:
__table_instance_destroy(ti);
+free_mask_array:
+ kfree(ma);
+free_mask_cache:
+ free_percpu(table->mask_cache);
return -ENOMEM;
}
@@ -197,7 +325,28 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
__table_instance_destroy(ti);
}
-static void table_instance_destroy(struct table_instance *ti,
+static void table_instance_flow_free(struct flow_table *table,
+ struct table_instance *ti,
+ struct table_instance *ufid_ti,
+ struct sw_flow *flow,
+ bool count)
+{
+ hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
+ if (count)
+ table->count--;
+
+ if (ovs_identifier_is_ufid(&flow->id)) {
+ hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
+
+ if (count)
+ table->ufid_count--;
+ }
+
+ flow_mask_remove(table, flow->mask);
+}
+
+static void table_instance_destroy(struct flow_table *table,
+ struct table_instance *ti,
struct table_instance *ufid_ti,
bool deferred)
{
@@ -214,13 +363,12 @@ static void table_instance_destroy(struct table_instance *ti,
struct sw_flow *flow;
struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n;
- int ver = ti->node_ver;
- int ufid_ver = ufid_ti->node_ver;
- hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
- hlist_del_rcu(&flow->flow_table.node[ver]);
- if (ovs_identifier_is_ufid(&flow->id))
- hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
+ hlist_for_each_entry_safe(flow, n, head,
+ flow_table.node[ti->node_ver]) {
+
+ table_instance_flow_free(table, ti, ufid_ti,
+ flow, false);
ovs_flow_free(flow, deferred);
}
}
@@ -243,7 +391,9 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
struct table_instance *ti = rcu_dereference_raw(table->ti);
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
- table_instance_destroy(ti, ufid_ti, false);
+ free_percpu(table->mask_cache);
+ kfree_rcu(rcu_dereference_raw(table->mask_array), rcu);
+ table_instance_destroy(table, ti, ufid_ti, false);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -359,7 +509,7 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
flow_table->count = 0;
flow_table->ufid_count = 0;
- table_instance_destroy(old_ti, old_ufid_ti, true);
+ table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
return 0;
err_free_ti:
@@ -370,13 +520,10 @@ err_free_ti:
static u32 flow_hash(const struct sw_flow_key *key,
const struct sw_flow_key_range *range)
{
- int key_start = range->start;
- int key_end = range->end;
- const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
- int hash_u32s = (key_end - key_start) >> 2;
+ const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
/* Make sure number of hash bytes are multiple of u32. */
- BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+ int hash_u32s = range_n_bytes(range) >> 2;
return jhash2(hash_key, hash_u32s, 0);
}
@@ -425,7 +572,8 @@ static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
- const struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask,
+ u32 *n_mask_hit)
{
struct sw_flow *flow;
struct hlist_head *head;
@@ -435,6 +583,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
ovs_flow_mask_key(&masked_key, unmasked, false, mask);
hash = flow_hash(&masked_key, &mask->range);
head = find_bucket(ti, hash);
+ (*n_mask_hit)++;
+
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
@@ -443,46 +593,147 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
return NULL;
}
-struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
- const struct sw_flow_key *key,
- u32 *n_mask_hit)
+/* Flow lookup does full lookup on flow table. It starts with
+ * mask from index passed in *index.
+ */
+static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ struct table_instance *ti,
+ struct mask_array *ma,
+ const struct sw_flow_key *key,
+ u32 *n_mask_hit,
+ u32 *index)
{
- struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct sw_flow *flow;
struct sw_flow_mask *mask;
+ int i;
+
+ if (likely(*index < ma->max)) {
+ mask = rcu_dereference_ovsl(ma->masks[*index]);
+ if (mask) {
+ flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ if (flow)
+ return flow;
+ }
+ }
+
+ for (i = 0; i < ma->max; i++) {
+
+ if (i == *index)
+ continue;
+
+ mask = rcu_dereference_ovsl(ma->masks[i]);
+ if (unlikely(!mask))
+ break;
+
+ flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ if (flow) { /* Found */
+ *index = i;
+ return flow;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * mask_cache maps flow to probable mask. This cache is not tightly
+ * coupled cache, It means updates to mask list can result in inconsistent
+ * cache entry in mask cache.
+ * This is per cpu cache and is divided in MC_HASH_SEGS segments.
+ * In case of a hash collision the entry is hashed in next segment.
+ * */
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
+ const struct sw_flow_key *key,
+ u32 skb_hash,
+ u32 *n_mask_hit)
+{
+ struct mask_array *ma = rcu_dereference(tbl->mask_array);
+ struct table_instance *ti = rcu_dereference(tbl->ti);
+ struct mask_cache_entry *entries, *ce;
struct sw_flow *flow;
+ u32 hash;
+ int seg;
*n_mask_hit = 0;
- list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
- (*n_mask_hit)++;
- flow = masked_flow_lookup(ti, key, mask);
- if (flow) /* Found */
+ if (unlikely(!skb_hash)) {
+ u32 mask_index = 0;
+
+ return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
+ }
+
+ /* Pre and post recirulation flows usually have the same skb_hash
+ * value. To avoid hash collisions, rehash the 'skb_hash' with
+ * 'recirc_id'. */
+ if (key->recirc_id)
+ skb_hash = jhash_1word(skb_hash, key->recirc_id);
+
+ ce = NULL;
+ hash = skb_hash;
+ entries = this_cpu_ptr(tbl->mask_cache);
+
+ /* Find the cache entry 'ce' to operate on. */
+ for (seg = 0; seg < MC_HASH_SEGS; seg++) {
+ int index = hash & (MC_HASH_ENTRIES - 1);
+ struct mask_cache_entry *e;
+
+ e = &entries[index];
+ if (e->skb_hash == skb_hash) {
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
+ &e->mask_index);
+ if (!flow)
+ e->skb_hash = 0;
return flow;
+ }
+
+ if (!ce || e->skb_hash < ce->skb_hash)
+ ce = e; /* A better replacement cache candidate. */
+
+ hash >>= MC_HASH_SHIFT;
}
- return NULL;
+
+ /* Cache miss, do full lookup. */
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
+ if (flow)
+ ce->skb_hash = skb_hash;
+
+ return flow;
}
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
const struct sw_flow_key *key)
{
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
+ u32 index = 0;
- return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
+ return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
const struct sw_flow_match *match)
{
- struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
- struct sw_flow_mask *mask;
- struct sw_flow *flow;
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int i;
/* Always called under ovs-mutex. */
- list_for_each_entry(mask, &tbl->mask_list, list) {
- flow = masked_flow_lookup(ti, match->key, mask);
+ for (i = 0; i < ma->max; i++) {
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ u32 __always_unused n_mask_hit;
+ struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+
+ mask = ovsl_dereference(ma->masks[i]);
+ if (!mask)
+ continue;
+
+ flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
if (flow && ovs_identifier_is_key(&flow->id) &&
- ovs_flow_cmp_unmasked_key(flow, match))
+ ovs_flow_cmp_unmasked_key(flow, match)) {
return flow;
+ }
}
+
return NULL;
}
@@ -528,13 +779,8 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
- struct sw_flow_mask *mask;
- int num = 0;
-
- list_for_each_entry(mask, &table->mask_list, list)
- num++;
-
- return num;
+ struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
+ return READ_ONCE(ma->count);
}
static struct table_instance *table_instance_expand(struct table_instance *ti,
@@ -543,24 +789,6 @@ static struct table_instance *table_instance_expand(struct table_instance *ti,
return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
}
-/* Remove 'mask' from the mask list, if it is not needed any more. */
-static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
-{
- if (mask) {
- /* ovs-lock is required to protect mask-refcount and
- * mask list.
- */
- ASSERT_OVSL();
- BUG_ON(!mask->ref_count);
- mask->ref_count--;
-
- if (!mask->ref_count) {
- list_del_rcu(&mask->list);
- kfree_rcu(mask, rcu);
- }
- }
-}
-
/* Must be called with OVS mutex held. */
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
@@ -568,17 +796,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0);
- hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
- table->count--;
- if (ovs_identifier_is_ufid(&flow->id)) {
- hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
- table->ufid_count--;
- }
-
- /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
- * accessible as long as the RCU read lock is held.
- */
- flow_mask_remove(table, flow->mask);
+ table_instance_flow_free(table, ti, ufid_ti, flow, true);
}
static struct sw_flow_mask *mask_alloc(void)
@@ -606,13 +824,16 @@ static bool mask_equal(const struct sw_flow_mask *a,
static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
const struct sw_flow_mask *mask)
{
- struct list_head *ml;
+ struct mask_array *ma;
+ int i;
+
+ ma = ovsl_dereference(tbl->mask_array);
+ for (i = 0; i < ma->max; i++) {
+ struct sw_flow_mask *t;
+ t = ovsl_dereference(ma->masks[i]);
- list_for_each(ml, &tbl->mask_list) {
- struct sw_flow_mask *m;
- m = container_of(ml, struct sw_flow_mask, list);
- if (mask_equal(mask, m))
- return m;
+ if (t && mask_equal(mask, t))
+ return t;
}
return NULL;
@@ -623,6 +844,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
const struct sw_flow_mask *new)
{
struct sw_flow_mask *mask;
+
mask = flow_mask_find(tbl, new);
if (!mask) {
/* Allocate a new mask if none exsits. */
@@ -631,7 +853,12 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
return -ENOMEM;
mask->key = new->key;
mask->range = new->range;
- list_add_rcu(&mask->list, &tbl->mask_list);
+
+ /* Add mask to mask-list. */
+ if (tbl_mask_array_add_mask(tbl, mask)) {
+ kfree(mask);
+ return -ENOMEM;
+ }
} else {
BUG_ON(!mask->ref_count);
mask->ref_count++;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index bc52045b63ff..8a5cea6ae111 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -22,6 +22,17 @@
#include "flow.h"
+struct mask_cache_entry {
+ u32 skb_hash;
+ u32 mask_index;
+};
+
+struct mask_array {
+ struct rcu_head rcu;
+ int count, max;
+ struct sw_flow_mask __rcu *masks[];
+};
+
struct table_instance {
struct hlist_head *buckets;
unsigned int n_buckets;
@@ -34,7 +45,8 @@ struct table_instance {
struct flow_table {
struct table_instance __rcu *ti;
struct table_instance __rcu *ufid_ti;
- struct list_head mask_list;
+ struct mask_cache_entry __percpu *mask_cache;
+ struct mask_array __rcu *mask_array;
unsigned long last_rehash;
unsigned int count;
unsigned int ufid_count;
@@ -60,8 +72,9 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table);
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
u32 *bucket, u32 *idx);
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
- const struct sw_flow_key *,
- u32 *n_mask_hit);
+ const struct sw_flow_key *,
+ u32 skb_hash,
+ u32 *n_mask_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 3fc38d16c456..5da9392b03d6 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -403,8 +403,9 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
ids = rcu_dereference(vport->upcall_portids);
- if (ids->n_ids == 1 && ids->ids[0] == 0)
- return 0;
+ /* If there is only one portid, select it in the fast-path. */
+ if (ids->n_ids == 1)
+ return ids->ids[0];
hash = skb_get_hash(skb);
ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a50e850245..53c1d41fb1c9 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1295,15 +1295,21 @@ static void packet_sock_destruct(struct sock *sk)
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
- u32 rxhash;
+ u32 *history = po->rollover->history;
+ u32 victim, rxhash;
int i, count = 0;
rxhash = skb_get_hash(skb);
for (i = 0; i < ROLLOVER_HLEN; i++)
- if (po->rollover->history[i] == rxhash)
+ if (READ_ONCE(history[i]) == rxhash)
count++;
- po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
+ victim = prandom_u32() % ROLLOVER_HLEN;
+
+ /* Avoid dirtying the cache line if possible */
+ if (READ_ONCE(history[victim]) != rxhash)
+ WRITE_ONCE(history[victim], rxhash);
+
return count > (ROLLOVER_HLEN >> 1);
}
diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
index e35869e81766..15ce9b642b25 100644
--- a/net/qrtr/tun.c
+++ b/net/qrtr/tun.c
@@ -111,15 +111,11 @@ static __poll_t qrtr_tun_poll(struct file *filp, poll_table *wait)
static int qrtr_tun_release(struct inode *inode, struct file *filp)
{
struct qrtr_tun *tun = filp->private_data;
- struct sk_buff *skb;
qrtr_endpoint_unregister(&tun->ep);
/* Discard all SKBs */
- while (!skb_queue_empty(&tun->queue)) {
- skb = skb_dequeue(&tun->queue);
- kfree_skb(skb);
- }
+ skb_queue_purge(&tun->queue);
kfree(tun);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 9de2ae22d583..3fd5f40189bd 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -30,6 +30,7 @@
* SOFTWARE.
*
*/
+#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/if.h>
@@ -107,6 +108,7 @@ static void rds_ib_dev_free(struct work_struct *work)
rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
if (rds_ibdev->pd)
ib_dealloc_pd(rds_ibdev->pd);
+ dma_pool_destroy(rds_ibdev->rid_hdrs_pool);
list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
list_del(&i_ipaddr->list);
@@ -182,6 +184,12 @@ static void rds_ib_add_one(struct ib_device *device)
rds_ibdev->pd = NULL;
goto put_dev;
}
+ rds_ibdev->rid_hdrs_pool = dma_pool_create(device->name,
+ device->dma_device,
+ sizeof(struct rds_header),
+ L1_CACHE_BYTES, 0);
+ if (!rds_ibdev->rid_hdrs_pool)
+ goto put_dev;
rds_ibdev->mr_1m_pool =
rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
diff --git a/net/rds/ib.h b/net/rds/ib.h
index f2b558e8b5ea..6e6f24753998 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -165,8 +165,8 @@ struct rds_ib_connection {
/* tx */
struct rds_ib_work_ring i_send_ring;
struct rm_data_op *i_data_op;
- struct rds_header *i_send_hdrs;
- dma_addr_t i_send_hdrs_dma;
+ struct rds_header **i_send_hdrs;
+ dma_addr_t *i_send_hdrs_dma;
struct rds_ib_send_work *i_sends;
atomic_t i_signaled_sends;
@@ -175,8 +175,8 @@ struct rds_ib_connection {
struct rds_ib_work_ring i_recv_ring;
struct rds_ib_incoming *i_ibinc;
u32 i_recv_data_rem;
- struct rds_header *i_recv_hdrs;
- dma_addr_t i_recv_hdrs_dma;
+ struct rds_header **i_recv_hdrs;
+ dma_addr_t *i_recv_hdrs_dma;
struct rds_ib_recv_work *i_recvs;
u64 i_ack_recv; /* last ACK received */
struct rds_ib_refill_cache i_cache_incs;
@@ -246,6 +246,7 @@ struct rds_ib_device {
struct list_head conn_list;
struct ib_device *dev;
struct ib_pd *pd;
+ struct dma_pool *rid_hdrs_pool; /* RDS headers DMA pool */
bool use_fastreg;
unsigned int max_mrs;
@@ -381,7 +382,11 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
void rds_ib_cm_connect_complete(struct rds_connection *conn,
struct rdma_cm_event *event);
-
+struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev,
+ struct dma_pool *pool,
+ dma_addr_t **dma_addrs, u32 num_hdrs);
+void rds_dma_hdrs_free(struct dma_pool *pool, struct rds_header **hdrs,
+ dma_addr_t *dma_addrs, u32 num_hdrs);
#define rds_ib_conn_error(conn, fmt...) \
__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 233f1368162b..c71f4328d138 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -30,6 +30,7 @@
* SOFTWARE.
*
*/
+#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/slab.h>
@@ -439,6 +440,68 @@ static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
rds_ibdev->vector_load[index]--;
}
+/* Allocate DMA coherent memory to be used to store struct rds_header for
+ * sending/receiving packets. The pointers to the DMA memory and the
+ * associated DMA addresses are stored in two arrays.
+ *
+ * @ibdev: the IB device
+ * @pool: the DMA memory pool
+ * @dma_addrs: pointer to the array for storing DMA addresses
+ * @num_hdrs: number of headers to allocate
+ *
+ * It returns the pointer to the array storing the DMA memory pointers. On
+ * error, NULL pointer is returned.
+ */
+struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev,
+ struct dma_pool *pool,
+ dma_addr_t **dma_addrs, u32 num_hdrs)
+{
+ struct rds_header **hdrs;
+ dma_addr_t *hdr_daddrs;
+ u32 i;
+
+ hdrs = kvmalloc_node(sizeof(*hdrs) * num_hdrs, GFP_KERNEL,
+ ibdev_to_node(ibdev));
+ if (!hdrs)
+ return NULL;
+
+ hdr_daddrs = kvmalloc_node(sizeof(*hdr_daddrs) * num_hdrs, GFP_KERNEL,
+ ibdev_to_node(ibdev));
+ if (!hdr_daddrs) {
+ kvfree(hdrs);
+ return NULL;
+ }
+
+ for (i = 0; i < num_hdrs; i++) {
+ hdrs[i] = dma_pool_zalloc(pool, GFP_KERNEL, &hdr_daddrs[i]);
+ if (!hdrs[i]) {
+ rds_dma_hdrs_free(pool, hdrs, hdr_daddrs, i);
+ return NULL;
+ }
+ }
+
+ *dma_addrs = hdr_daddrs;
+ return hdrs;
+}
+
+/* Free the DMA memory used to store struct rds_header.
+ *
+ * @pool: the DMA memory pool
+ * @hdrs: pointer to the array storing DMA memory pointers
+ * @dma_addrs: pointer to the array storing DMA addresses
+ * @num_hdars: number of headers to free.
+ */
+void rds_dma_hdrs_free(struct dma_pool *pool, struct rds_header **hdrs,
+ dma_addr_t *dma_addrs, u32 num_hdrs)
+{
+ u32 i;
+
+ for (i = 0; i < num_hdrs; i++)
+ dma_pool_free(pool, hdrs[i], dma_addrs[i]);
+ kvfree(hdrs);
+ kvfree(dma_addrs);
+}
+
/*
* This needs to be very careful to not leave IS_ERR pointers around for
* cleanup to trip over.
@@ -450,7 +513,9 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
+ unsigned long max_wrs;
int ret, fr_queue_space;
+ struct dma_pool *pool;
/*
* It's normal to see a null device if an incoming connection races
@@ -469,10 +534,15 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
- if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
- rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
- if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
- rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
+ max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
+ rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
+ if (ic->i_send_ring.w_nr != max_wrs)
+ rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
+
+ max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
+ rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
+ if (ic->i_recv_ring.w_nr != max_wrs)
+ rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
@@ -541,31 +611,28 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
goto recv_cq_out;
}
- ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
- ic->i_send_ring.w_nr *
- sizeof(struct rds_header),
- &ic->i_send_hdrs_dma, GFP_KERNEL);
+ pool = rds_ibdev->rid_hdrs_pool;
+ ic->i_send_hdrs = rds_dma_hdrs_alloc(dev, pool, &ic->i_send_hdrs_dma,
+ ic->i_send_ring.w_nr);
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent send failed\n");
+ rdsdebug("DMA send hdrs alloc failed\n");
goto qp_out;
}
- ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
- ic->i_recv_ring.w_nr *
- sizeof(struct rds_header),
- &ic->i_recv_hdrs_dma, GFP_KERNEL);
+ ic->i_recv_hdrs = rds_dma_hdrs_alloc(dev, pool, &ic->i_recv_hdrs_dma,
+ ic->i_recv_ring.w_nr);
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent recv failed\n");
+ rdsdebug("DMA recv hdrs alloc failed\n");
goto send_hdrs_dma_out;
}
- ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
- &ic->i_ack_dma, GFP_KERNEL);
+ ic->i_ack = dma_pool_zalloc(pool, GFP_KERNEL,
+ &ic->i_ack_dma);
if (!ic->i_ack) {
ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent ack failed\n");
+ rdsdebug("DMA ack header alloc failed\n");
goto recv_hdrs_dma_out;
}
@@ -596,17 +663,23 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
sends_out:
vfree(ic->i_sends);
+
ack_dma_out:
- ib_dma_free_coherent(dev, sizeof(struct rds_header),
- ic->i_ack, ic->i_ack_dma);
+ dma_pool_free(pool, ic->i_ack, ic->i_ack_dma);
+ ic->i_ack = NULL;
+
recv_hdrs_dma_out:
- ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
- sizeof(struct rds_header),
- ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+ rds_dma_hdrs_free(pool, ic->i_recv_hdrs, ic->i_recv_hdrs_dma,
+ ic->i_recv_ring.w_nr);
+ ic->i_recv_hdrs = NULL;
+ ic->i_recv_hdrs_dma = NULL;
+
send_hdrs_dma_out:
- ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
- sizeof(struct rds_header),
- ic->i_send_hdrs, ic->i_send_hdrs_dma);
+ rds_dma_hdrs_free(pool, ic->i_send_hdrs, ic->i_send_hdrs_dma,
+ ic->i_send_ring.w_nr);
+ ic->i_send_hdrs = NULL;
+ ic->i_send_hdrs_dma = NULL;
+
qp_out:
rdma_destroy_qp(ic->i_cm_id);
recv_cq_out:
@@ -984,8 +1057,6 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_cm_id ? ic->i_cm_id->qp : NULL);
if (ic->i_cm_id) {
- struct ib_device *dev = ic->i_cm_id->device;
-
rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
err = rdma_disconnect(ic->i_cm_id);
if (err) {
@@ -1035,24 +1106,39 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ib_destroy_cq(ic->i_recv_cq);
}
- /* then free the resources that ib callbacks use */
- if (ic->i_send_hdrs)
- ib_dma_free_coherent(dev,
- ic->i_send_ring.w_nr *
- sizeof(struct rds_header),
- ic->i_send_hdrs,
- ic->i_send_hdrs_dma);
-
- if (ic->i_recv_hdrs)
- ib_dma_free_coherent(dev,
- ic->i_recv_ring.w_nr *
- sizeof(struct rds_header),
- ic->i_recv_hdrs,
- ic->i_recv_hdrs_dma);
-
- if (ic->i_ack)
- ib_dma_free_coherent(dev, sizeof(struct rds_header),
- ic->i_ack, ic->i_ack_dma);
+ if (ic->rds_ibdev) {
+ struct dma_pool *pool;
+
+ pool = ic->rds_ibdev->rid_hdrs_pool;
+
+ /* then free the resources that ib callbacks use */
+ if (ic->i_send_hdrs) {
+ rds_dma_hdrs_free(pool, ic->i_send_hdrs,
+ ic->i_send_hdrs_dma,
+ ic->i_send_ring.w_nr);
+ ic->i_send_hdrs = NULL;
+ ic->i_send_hdrs_dma = NULL;
+ }
+
+ if (ic->i_recv_hdrs) {
+ rds_dma_hdrs_free(pool, ic->i_recv_hdrs,
+ ic->i_recv_hdrs_dma,
+ ic->i_recv_ring.w_nr);
+ ic->i_recv_hdrs = NULL;
+ ic->i_recv_hdrs_dma = NULL;
+ }
+
+ if (ic->i_ack) {
+ dma_pool_free(pool, ic->i_ack, ic->i_ack_dma);
+ ic->i_ack = NULL;
+ }
+ } else {
+ WARN_ON(ic->i_send_hdrs);
+ WARN_ON(ic->i_send_hdrs_dma);
+ WARN_ON(ic->i_recv_hdrs);
+ WARN_ON(ic->i_recv_hdrs_dma);
+ WARN_ON(ic->i_ack);
+ }
if (ic->i_sends)
rds_ib_send_clear_ring(ic);
@@ -1071,9 +1157,6 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_pd = NULL;
ic->i_send_cq = NULL;
ic->i_recv_cq = NULL;
- ic->i_send_hdrs = NULL;
- ic->i_recv_hdrs = NULL;
- ic->i_ack = NULL;
}
BUG_ON(ic->rds_ibdev);
@@ -1099,8 +1182,9 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_flowctl = 0;
atomic_set(&ic->i_credits, 0);
- rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
- rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+ /* Re-init rings, but retain sizes. */
+ rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
+ rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
if (ic->i_ibinc) {
rds_inc_put(&ic->i_ibinc->ii_inc);
@@ -1147,8 +1231,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
* rds_ib_conn_shutdown() waits for these to be emptied so they
* must be initialized before it can be called.
*/
- rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
- rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+ rds_ib_ring_init(&ic->i_send_ring, 0);
+ rds_ib_ring_init(&ic->i_recv_ring, 0);
ic->conn = conn;
conn->c_transport_data = ic;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index a0f99bbf362c..694d411dc72f 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -61,7 +61,7 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
recv->r_wr.num_sge = RDS_IB_RECV_SGE;
sge = &recv->r_sge[0];
- sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
+ sge->addr = ic->i_recv_hdrs_dma[i];
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_pd->local_dma_lkey;
@@ -343,7 +343,7 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
WARN_ON(ret != 1);
sge = &recv->r_sge[0];
- sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
+ sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
sge->length = sizeof(struct rds_header);
sge = &recv->r_sge[1];
@@ -861,7 +861,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
}
data_len -= sizeof(struct rds_header);
- ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
+ ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
@@ -993,10 +993,11 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
} else {
/* We expect errors as the qp is drained during shutdown */
if (rds_conn_up(conn) || rds_conn_connecting(conn))
- rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), disconnecting and reconnecting\n",
+ rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
conn->c_tos, wc->status,
- ib_wc_status_msg(wc->status));
+ ib_wc_status_msg(wc->status),
+ wc->vendor_err);
}
/* rds_ib_process_recv() doesn't always consume the frag, and
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index dfe6237dafe2..d1cc1d7778d8 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -201,7 +201,8 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
send->s_wr.ex.imm_data = 0;
sge = &send->s_sge[0];
- sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
+ sge->addr = ic->i_send_hdrs_dma[i];
+
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_pd->local_dma_lkey;
@@ -300,10 +301,10 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
/* We expect errors as the qp is drained during shutdown */
if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), disconnecting and reconnecting\n",
+ rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
conn->c_tos, wc->status,
- ib_wc_status_msg(wc->status));
+ ib_wc_status_msg(wc->status), wc->vendor_err);
}
}
@@ -631,11 +632,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
send->s_queued = jiffies;
send->s_op = NULL;
- send->s_sge[0].addr = ic->i_send_hdrs_dma
- + (pos * sizeof(struct rds_header));
+ send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
+
send->s_sge[0].length = sizeof(struct rds_header);
- memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
+ memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
+ sizeof(struct rds_header));
+
/* Set up the data, if present */
if (i < work_alloc
@@ -674,7 +677,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
- struct rds_header *hdr = &ic->i_send_hdrs[pos];
+ struct rds_header *hdr = ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
hdr->h_credit = adv_credits;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f9b08a6d8dbe..0bf9bf1ceb8f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1316,10 +1316,12 @@ static const struct file_operations rfkill_fops = {
.llseek = no_llseek,
};
+#define RFKILL_NAME "rfkill"
+
static struct miscdevice rfkill_miscdev = {
- .name = "rfkill",
.fops = &rfkill_fops,
- .minor = MISC_DYNAMIC_MINOR,
+ .name = RFKILL_NAME,
+ .minor = RFKILL_MINOR,
};
static int __init rfkill_init(void)
@@ -1371,3 +1373,6 @@ static void __exit rfkill_exit(void)
class_unregister(&rfkill_class);
}
module_exit(rfkill_exit);
+
+MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
+MODULE_ALIAS("devname:" RFKILL_NAME);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 6a0df7c8a939..46b8ff24020d 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -906,7 +906,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
out_release:
release_sock(sk);
@@ -1011,7 +1011,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
rose_insert_socket(make);
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 05610c3a3d25..57ebb29c26ad 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -49,7 +49,7 @@ config RXKAD
depends on AF_RXRPC
select CRYPTO
select CRYPTO_MANAGER
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_PCBC
select CRYPTO_FCRYPT
help
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 64830d8c1fdb..452163eadb98 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -209,6 +209,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
*/
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
{
+ const void *here = __builtin_return_address(0);
struct rxrpc_peer *peer;
_enter("");
@@ -230,6 +231,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
peer->cong_cwnd = 3;
else
peer->cong_cwnd = 4;
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_new, 1, here);
}
_leave(" = %p", peer);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 69d4676a402f..90a31b15585f 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -88,7 +88,7 @@ struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
struct tcf_chain *goto_chain)
{
a->tcfa_action = action;
- rcu_swap_protected(a->goto_chain, goto_chain, 1);
+ goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
return goto_chain;
}
EXPORT_SYMBOL(tcf_action_set_ctrlact);
@@ -188,6 +188,8 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
+ nla_total_size(0) /* TCA_ACT_STATS nested */
/* TCA_STATS_BASIC */
+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
+ /* TCA_STATS_PKT64 */
+ + nla_total_size_64bit(sizeof(u64))
/* TCA_STATS_QUEUE */
+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
+ nla_total_size(0) /* TCA_OPTIONS nested */
@@ -399,7 +401,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
- int bind, bool cpustats)
+ int bind, bool cpustats, u32 flags)
{
struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
struct tcf_idrinfo *idrinfo = tn->idrinfo;
@@ -427,6 +429,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
p->tcfa_tm.install = jiffies;
p->tcfa_tm.lastuse = jiffies;
p->tcfa_tm.firstuse = 0;
+ p->tcfa_flags = flags;
if (est) {
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
&p->tcfa_rate_est,
@@ -451,6 +454,17 @@ err1:
}
EXPORT_SYMBOL(tcf_idr_create);
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags)
+{
+ /* Set cpustats according to actions flags. */
+ return tcf_idr_create(tn, index, est, a, ops, bind,
+ !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
+}
+EXPORT_SYMBOL(tcf_idr_create_from_flags);
+
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
@@ -773,6 +787,14 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
}
rcu_read_unlock();
+ if (a->tcfa_flags) {
+ struct nla_bitfield32 flags = { a->tcfa_flags,
+ a->tcfa_flags, };
+
+ if (nla_put(skb, TCA_ACT_FLAGS, sizeof(flags), &flags))
+ goto nla_put_failure;
+ }
+
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
@@ -831,12 +853,15 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
return c;
}
+static const u32 tca_act_flags_allowed = TCA_ACT_FLAGS_NO_PERCPU_STATS;
static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_KIND] = { .type = NLA_STRING },
[TCA_ACT_INDEX] = { .type = NLA_U32 },
[TCA_ACT_COOKIE] = { .type = NLA_BINARY,
.len = TC_COOKIE_MAX_SIZE },
[TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
+ [TCA_ACT_FLAGS] = { .type = NLA_BITFIELD32,
+ .validation_data = &tca_act_flags_allowed },
};
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
@@ -845,6 +870,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
bool rtnl_held,
struct netlink_ext_ack *extack)
{
+ struct nla_bitfield32 flags = { 0, 0 };
struct tc_action *a;
struct tc_action_ops *a_o;
struct tc_cookie *cookie = NULL;
@@ -876,6 +902,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
goto err_out;
}
}
+ if (tb[TCA_ACT_FLAGS])
+ flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
} else {
if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
NL_SET_ERR_MSG(extack, "TC action name too long");
@@ -914,10 +942,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
/* backward compatibility for policer */
if (name == NULL)
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
- rtnl_held, tp, extack);
+ rtnl_held, tp, flags.value, extack);
else
err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
- tp, extack);
+ tp, flags.value, extack);
if (err < 0)
goto err_mod;
@@ -975,7 +1003,6 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
err = PTR_ERR(act);
goto err;
}
- act->order = i;
sz += tcf_action_fill_size(act);
/* Start from index 0 */
actions[i - 1] = act;
@@ -989,6 +1016,29 @@ err:
return err;
}
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
+ bool drop, bool hw)
+{
+ if (a->cpu_bstats) {
+ _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+
+ if (drop)
+ this_cpu_ptr(a->cpu_qstats)->drops += packets;
+
+ if (hw)
+ _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
+ bytes, packets);
+ return;
+ }
+
+ _bstats_update(&a->tcfa_bstats, bytes, packets);
+ if (drop)
+ a->tcfa_qstats.drops += packets;
+ if (hw)
+ _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
+}
+EXPORT_SYMBOL(tcf_action_update_stats);
+
int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
int compat_mode)
{
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 04b7bd4ec751..46f47e58b3be 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -275,7 +275,8 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act,
int replace, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
@@ -303,7 +304,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
ret = tcf_idr_check_alloc(tn, &index, act, bind);
if (!ret) {
ret = tcf_idr_create(tn, index, est, act,
- &act_bpf_ops, bind, true);
+ &act_bpf_ops, bind, true, 0);
if (ret < 0) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 2b43cacf82af..43a243081e7d 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -94,7 +94,7 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
@@ -121,7 +121,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
ret = tcf_idr_check_alloc(tn, &index, a, bind);
if (!ret) {
ret = tcf_idr_create(tn, index, est, a,
- &act_connmark_ops, bind, false);
+ &act_connmark_ops, bind, false, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index d3cfad88dc3a..cb8608f0a77a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -43,7 +43,7 @@ static struct tc_action_ops act_csum_ops;
static int tcf_csum_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind, bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack)
+ u32 flags, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
struct tcf_csum_params *params_new;
@@ -68,8 +68,8 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_csum_ops, bind, true);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_csum_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -101,8 +101,8 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&p->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(p->params, params_new,
- lockdep_is_held(&p->tcf_lock));
+ params_new = rcu_replace_pointer(p->params, params_new,
+ lockdep_is_held(&p->tcf_lock));
spin_unlock_bh(&p->tcf_lock);
if (goto_ch)
@@ -580,7 +580,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
params = rcu_dereference_bh(p->params);
tcf_lastuse_update(&p->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
+ tcf_action_update_bstats(&p->common, skb);
action = READ_ONCE(p->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
@@ -624,7 +624,7 @@ out:
return action;
drop:
- qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
+ tcf_action_inc_drop_qstats(&p->common);
action = TC_ACT_SHOT;
goto out;
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index fcc46025e790..ae0de372b1c8 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -465,16 +465,15 @@ out_push:
skb_push_rcsum(skb, nh_ofs);
out:
- bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ tcf_action_update_bstats(&c->common, skb);
return retval;
drop:
- qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+ tcf_action_inc_drop_qstats(&c->common);
return TC_ACT_SHOT;
}
static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
- [TCA_CT_UNSPEC] = { .strict_start_type = TCA_CT_UNSPEC + 1 },
[TCA_CT_ACTION] = { .type = NLA_U16 },
[TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) },
[TCA_CT_ZONE] = { .type = NLA_U16 },
@@ -656,7 +655,7 @@ static int tcf_ct_fill_params(struct net *net,
static int tcf_ct_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int replace, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, ct_net_id);
@@ -688,8 +687,8 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
return err;
if (!err) {
- err = tcf_idr_create(tn, index, est, a,
- &act_ct_ops, bind, true);
+ err = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_ct_ops, bind, flags);
if (err) {
tcf_idr_cleanup(tn, index);
return err;
@@ -722,7 +721,8 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(c->params, params, lockdep_is_held(&c->tcf_lock));
+ params = rcu_replace_pointer(c->params, params,
+ lockdep_is_held(&c->tcf_lock));
spin_unlock_bh(&c->tcf_lock);
if (goto_ch)
@@ -905,11 +905,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
{
struct tcf_ct *c = to_ct(a);
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-
- if (hw)
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
- bytes, packets);
+ tcf_action_update_stats(a, bytes, packets, false, hw);
c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
}
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 0dbcfd1dca7b..40038c321b4a 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -153,7 +153,7 @@ static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
@@ -210,7 +210,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
ret = tcf_idr_create(tn, index, est, a,
- &act_ctinfo_ops, bind, false);
+ &act_ctinfo_ops, bind, false, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -257,8 +257,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&ci->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch);
- rcu_swap_protected(ci->params, cp_new,
- lockdep_is_held(&ci->tcf_lock));
+ cp_new = rcu_replace_pointer(ci->params, cp_new,
+ lockdep_is_held(&ci->tcf_lock));
spin_unlock_bh(&ci->tcf_lock);
if (goto_ch)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 324f1d1f6d47..416065772719 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -53,7 +53,8 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
struct nlattr *tb[TCA_GACT_MAX + 1];
@@ -98,8 +99,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_gact_ops, bind, true);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_gact_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -161,9 +162,9 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
action = gact_rand[ptype](gact);
}
#endif
- bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
+ tcf_action_update_bstats(&gact->common, skb);
if (action == TC_ACT_SHOT)
- qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+ tcf_action_inc_drop_qstats(&gact->common);
tcf_lastuse_update(&gact->tcf_tm);
@@ -177,15 +178,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
int action = READ_ONCE(gact->tcf_action);
struct tcf_t *tm = &gact->tcf_tm;
- _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes,
- packets);
- if (action == TC_ACT_SHOT)
- this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
-
- if (hw)
- _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats_hw),
- bytes, packets);
-
+ tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT, hw);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 3a31e241c647..5e6379028fc3 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -465,7 +465,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
static int tcf_ife_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
struct nlattr *tb[TCA_IFE_MAX + 1];
@@ -522,7 +523,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
- bind, true);
+ bind, true, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
kfree(p);
@@ -594,7 +595,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&ife->tcf_lock);
/* protected by tcf_lock when modifying existing action */
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(ife->params, p, 1);
+ p = rcu_replace_pointer(ife->params, p, 1);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 214a03d405cf..400a2cfe8452 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -95,7 +95,7 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int ovr, int bind,
- struct tcf_proto *tp)
+ struct tcf_proto *tp, u32 flags)
{
struct tc_action_net *tn = net_generic(net, id);
struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -144,7 +144,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a, ops, bind,
- false);
+ false, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -205,19 +205,19 @@ err1:
static int tcf_ipt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind, bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack)
+ u32 flags, struct netlink_ext_ack *extack)
{
return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
- bind, tp);
+ bind, tp, flags);
}
static int tcf_xt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind, bool unlocked, struct tcf_proto *tp,
- struct netlink_ext_ack *extack)
+ u32 flags, struct netlink_ext_ack *extack)
{
return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
- bind, tp);
+ bind, tp, flags);
}
static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 08923b21e566..1e3eb3a97532 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -93,7 +93,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
struct tcf_proto *tp,
- struct netlink_ext_ack *extack)
+ u32 flags, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
struct nlattr *tb[TCA_MIRRED_MAX + 1];
@@ -148,8 +148,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
return -EINVAL;
}
- ret = tcf_idr_create(tn, index, est, a,
- &act_mirred_ops, bind, true);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_mirred_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -178,8 +178,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
goto put_chain;
}
mac_header_xmit = dev_is_mac_header_xmit(dev);
- rcu_swap_protected(m->tcfm_dev, dev,
- lockdep_is_held(&m->tcf_lock));
+ dev = rcu_replace_pointer(m->tcfm_dev, dev,
+ lockdep_is_held(&m->tcf_lock));
if (dev)
dev_put(dev);
m->tcfm_mac_header_xmit = mac_header_xmit;
@@ -231,7 +231,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
}
tcf_lastuse_update(&m->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+ tcf_action_update_bstats(&m->common, skb);
m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
m_eaction = READ_ONCE(m->tcfm_eaction);
@@ -289,8 +289,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;
- res->qstats = this_cpu_ptr(m->common.cpu_qstats);
- skb_tc_reinsert(skb, res);
+ if (skb_tc_reinsert(skb, res))
+ tcf_action_inc_overlimit_qstats(&m->common);
__this_cpu_dec(mirred_rec_level);
return TC_ACT_CONSUMED;
}
@@ -303,7 +303,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (err) {
out:
- qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
+ tcf_action_inc_overlimit_qstats(&m->common);
if (tcf_mirred_is_act_redirect(m_eaction))
retval = TC_ACT_SHOT;
}
@@ -318,10 +318,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
struct tcf_mirred *m = to_mirred(a);
struct tcf_t *tm = &m->tcf_tm;
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
- if (hw)
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
- bytes, packets);
+ tcf_action_update_stats(a, bytes, packets, false, hw);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 4cf6c553bb0b..325eddcc6621 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -119,7 +119,6 @@ static int valid_label(const struct nlattr *attr,
}
static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
- [TCA_MPLS_UNSPEC] = { .strict_start_type = TCA_MPLS_UNSPEC + 1 },
[TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
[TCA_MPLS_PROTO] = { .type = NLA_U16 },
[TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
@@ -131,7 +130,8 @@ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
static int tcf_mpls_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, mpls_net_id);
struct nlattr *tb[TCA_MPLS_MAX + 1];
@@ -224,7 +224,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_mpls_ops, bind, true);
+ &act_mpls_ops, bind, true, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -262,7 +262,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&m->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(m->mpls_p, p, lockdep_is_held(&m->tcf_lock));
+ p = rcu_replace_pointer(m->mpls_p, p, lockdep_is_held(&m->tcf_lock));
spin_unlock_bh(&m->tcf_lock);
if (goto_ch)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index ea4c5359e7df..855a6fa16a62 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -36,7 +36,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_action **a, int ovr, int bind,
bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack)
+ u32 flags, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
struct nlattr *tb[TCA_NAT_MAX + 1];
@@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
ret = tcf_idr_create(tn, index, est, a,
- &act_nat_ops, bind, false);
+ &act_nat_ops, bind, false, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -206,9 +206,7 @@ static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
icmph = (void *)(skb_network_header(skb) + ihl);
- if ((icmph->type != ICMP_DEST_UNREACH) &&
- (icmph->type != ICMP_TIME_EXCEEDED) &&
- (icmph->type != ICMP_PARAMETERPROB))
+ if (!icmp_is_err(icmph->type))
break;
if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index cdfaa79382a2..3ad718576304 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -43,7 +43,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
int err = -EINVAL;
int rem;
- if (!nla || !n)
+ if (!nla)
return NULL;
keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
@@ -137,7 +137,8 @@ nla_failure:
static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
struct nlattr *tb[TCA_PEDIT_MAX + 1];
@@ -170,6 +171,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(pattr);
+ if (!parm->nkeys) {
+ NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+ return -EINVAL;
+ }
ksize = parm->nkeys * sizeof(struct tc_pedit_key);
if (nla_len(pattr) < sizeof(*parm) + ksize) {
NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
@@ -183,14 +188,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- if (!parm->nkeys) {
- tcf_idr_cleanup(tn, index);
- NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
- ret = -EINVAL;
- goto out_free;
- }
ret = tcf_idr_create(tn, index, est, a,
- &act_pedit_ops, bind, false);
+ &act_pedit_ops, bind, false, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
goto out_free;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 89c04c52af3d..8b7a0ac96c51 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -47,7 +47,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_police_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
int ret = 0, tcfp_result = TC_ACT_OK, err, size;
@@ -87,7 +87,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, NULL, a,
- &act_police_ops, bind, true);
+ &act_police_ops, bind, true, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -191,9 +191,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
police->tcfp_ptoks = new->tcfp_mtu_ptoks;
spin_unlock_bh(&police->tcfp_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(police->params,
- new,
- lockdep_is_held(&police->tcf_lock));
+ new = rcu_replace_pointer(police->params,
+ new,
+ lockdep_is_held(&police->tcf_lock));
spin_unlock_bh(&police->tcf_lock);
if (goto_ch)
@@ -294,10 +294,7 @@ static void tcf_police_stats_update(struct tc_action *a,
struct tcf_police *police = to_police(a);
struct tcf_t *tm = &police->tcf_tm;
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
- if (hw)
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
- bytes, packets);
+ tcf_action_update_stats(a, bytes, packets, false, hw);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
@@ -345,10 +342,7 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
- t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
- t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
+ tcf_tm_dump(&t, &police->tcf_tm);
if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
goto nla_put_failure;
spin_unlock_bh(&police->tcf_lock);
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 514456a0b9a8..ce948c1e24dc 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -36,7 +36,7 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
int bind, bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack)
+ u32 flags, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_sample_ops, bind, true);
+ &act_sample_ops, bind, true, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -102,8 +102,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
s->rate = rate;
s->psample_group_num = psample_group_num;
- rcu_swap_protected(s->psample_group, psample_group,
- lockdep_is_held(&s->tcf_lock));
+ psample_group = rcu_replace_pointer(s->psample_group, psample_group,
+ lockdep_is_held(&s->tcf_lock));
if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
s->truncate = true;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 6120e56117ca..9813ca4006dd 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -35,7 +35,7 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
*/
- pr_info("simple: %s_%d\n",
+ pr_info("simple: %s_%llu\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
return d->tcf_action;
@@ -86,7 +86,8 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
struct nlattr *tb[TCA_DEF_MAX + 1];
@@ -127,7 +128,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_simp_ops, bind, false);
+ &act_simp_ops, bind, false, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 6a8d3337c577..e857424c387c 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -86,7 +86,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 act_flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
@@ -165,7 +165,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_skbedit_ops, bind, true);
+ &act_skbedit_ops, bind, true, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -206,8 +206,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&d->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(d->params, params_new,
- lockdep_is_held(&d->tcf_lock));
+ params_new = rcu_replace_pointer(d->params, params_new,
+ lockdep_is_held(&d->tcf_lock));
spin_unlock_bh(&d->tcf_lock);
if (params_new)
kfree_rcu(params_new, rcu);
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 888437f97ba6..39e6d94cfafb 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -79,7 +79,7 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
@@ -143,7 +143,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_skbmod_ops, bind, true);
+ &act_skbmod_ops, bind, true, 0);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 2f83a79f76aa..536c4bc31be6 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -10,6 +10,8 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/geneve.h>
+#include <net/vxlan.h>
+#include <net/erspan.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -31,7 +33,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
params = rcu_dereference_bh(t->params);
tcf_lastuse_update(&t->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
+ tcf_action_update_bstats(&t->common, skb);
action = READ_ONCE(t->tcf_action);
switch (params->tcft_action) {
@@ -53,7 +55,11 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
static const struct nla_policy
enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
+ [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = {
+ .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
[TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
+ [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
+ [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
};
static const struct nla_policy
@@ -64,6 +70,19 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
.len = 128 },
};
+static const struct nla_policy
+vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
+ [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy
+erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
+};
+
static int
tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
struct netlink_ext_ack *extack)
@@ -116,10 +135,89 @@ tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
return opt_len;
}
+static int
+tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
+ vxlan_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
+ return -EINVAL;
+ }
+
+ if (dst) {
+ struct vxlan_metadata *md = dst;
+
+ md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
+ }
+
+ return sizeof(struct vxlan_metadata);
+}
+
+static int
+tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
+ int err;
+ u8 ver;
+
+ err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
+ erspan_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
+ return -EINVAL;
+ }
+
+ ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
+ if (ver == 1) {
+ if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
+ return -EINVAL;
+ }
+ } else if (ver == 2) {
+ if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
+ !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
+ return -EINVAL;
+ }
+ } else {
+ NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
+ return -EINVAL;
+ }
+
+ if (dst) {
+ struct erspan_metadata *md = dst;
+
+ md->version = ver;
+ if (ver == 1) {
+ nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
+ md->u.index = nla_get_be32(nla);
+ } else {
+ nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
+ md->u.md2.dir = nla_get_u8(nla);
+ nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
+ set_hwid(&md->u.md2, nla_get_u8(nla));
+ }
+ }
+
+ return sizeof(struct erspan_metadata);
+}
+
static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
int dst_len, struct netlink_ext_ack *extack)
{
- int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
+ int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
const struct nlattr *attr, *head = nla_data(nla);
err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
@@ -130,15 +228,48 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
nla_for_each_attr(attr, head, len, rem) {
switch (nla_type(attr)) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
+ if (type && type != TUNNEL_GENEVE_OPT) {
+ NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
+ return -EINVAL;
+ }
opt_len = tunnel_key_copy_geneve_opt(attr, dst,
dst_len, extack);
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
+ if (opts_len > IP_TUNNEL_OPTS_MAX) {
+ NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
+ return -EINVAL;
+ }
if (dst) {
dst_len -= opt_len;
dst += opt_len;
}
+ type = TUNNEL_GENEVE_OPT;
+ break;
+ case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
+ if (type) {
+ NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
+ return -EINVAL;
+ }
+ opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
+ dst_len, extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ type = TUNNEL_VXLAN_OPT;
+ break;
+ case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
+ if (type) {
+ NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
+ return -EINVAL;
+ }
+ opt_len = tunnel_key_copy_erspan_opt(attr, dst,
+ dst_len, extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ type = TUNNEL_ERSPAN_OPT;
break;
}
}
@@ -175,6 +306,22 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
#else
return -EAFNOSUPPORT;
#endif
+ case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
+#if IS_ENABLED(CONFIG_INET)
+ info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+ return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
+ opts_len, extack);
+#else
+ return -EAFNOSUPPORT;
+#endif
+ case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
+#if IS_ENABLED(CONFIG_INET)
+ info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
+ opts_len, extack);
+#else
+ return -EAFNOSUPPORT;
+#endif
default:
NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
return -EINVAL;
@@ -208,7 +355,7 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp,
+ struct tcf_proto *tp, u32 act_flags,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
@@ -347,8 +494,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
}
if (!exists) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_tunnel_key_ops, bind, true);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_tunnel_key_ops, bind,
+ act_flags);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
goto release_tun_meta;
@@ -381,8 +529,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&t->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(t->params, params_new,
- lockdep_is_held(&t->tcf_lock));
+ params_new = rcu_replace_pointer(t->params, params_new,
+ lockdep_is_held(&t->tcf_lock));
spin_unlock_bh(&t->tcf_lock);
tunnel_key_release_params(params_new);
if (goto_ch)
@@ -450,6 +598,56 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
return 0;
}
+static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
+ struct nlattr *start;
+
+ start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
+ if (!start)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
+ nla_nest_cancel(skb, start);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, start);
+ return 0;
+}
+
+static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
+ struct nlattr *start;
+
+ start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
+ if (!start)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
+ goto err;
+
+ if (md->version == 1 &&
+ nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
+ goto err;
+
+ if (md->version == 2 &&
+ (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
+ md->u.md2.dir) ||
+ nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
+ get_hwid(&md->u.md2))))
+ goto err;
+
+ nla_nest_end(skb, start);
+ return 0;
+err:
+ nla_nest_cancel(skb, start);
+ return -EMSGSIZE;
+}
+
static int tunnel_key_opts_dump(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
@@ -467,6 +665,14 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
err = tunnel_key_geneve_opts_dump(skb, info);
if (err)
goto err_out;
+ } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ err = tunnel_key_vxlan_opts_dump(skb, info);
+ if (err)
+ goto err_out;
+ } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
+ err = tunnel_key_erspan_opts_dump(skb, info);
+ if (err)
+ goto err_out;
} else {
err_out:
nla_nest_cancel(skb, start);
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 08aaf719a70f..c91d3958fcbb 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -29,7 +29,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
u16 tci;
tcf_lastuse_update(&v->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(v->common.cpu_bstats), skb);
+ tcf_action_update_bstats(&v->common, skb);
/* Ensure 'data' points at mac_header prior calling vlan manipulating
* functions.
@@ -88,7 +88,7 @@ out:
return action;
drop:
- qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats));
+ tcf_action_inc_drop_qstats(&v->common);
return TC_ACT_SHOT;
}
@@ -102,7 +102,8 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
struct nlattr *tb[TCA_VLAN_MAX + 1];
@@ -188,8 +189,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
action = parm->v_action;
if (!exists) {
- ret = tcf_idr_create(tn, index, est, a,
- &act_vlan_ops, bind, true);
+ ret = tcf_idr_create_from_flags(tn, index, est, a,
+ &act_vlan_ops, bind, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -220,7 +221,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&v->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
- rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
+ p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
spin_unlock_bh(&v->tcf_lock);
if (goto_ch)
@@ -307,10 +308,7 @@ static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets,
struct tcf_vlan *v = to_vlan(a);
struct tcf_t *tm = &v->tcf_tm;
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
- if (hw)
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
- bytes, packets);
+ tcf_action_update_stats(a, bytes, packets, false, hw);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 74221e3351c3..c307ee1d6ca6 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -22,6 +22,8 @@
#include <net/ip.h>
#include <net/flow_dissector.h>
#include <net/geneve.h>
+#include <net/vxlan.h>
+#include <net/erspan.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
@@ -688,7 +690,11 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
static const struct nla_policy
enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
+ [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
+ .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
[TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
+ [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
+ [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
};
static const struct nla_policy
@@ -699,6 +705,19 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
.len = 128 },
};
+static const struct nla_policy
+vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
+ [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy
+erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
+};
+
static void fl_set_key_val(struct nlattr **tb,
void *val, int val_type,
void *mask, int mask_type, int len)
@@ -928,6 +947,105 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
return sizeof(struct geneve_opt) + data_len;
}
+static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ int depth, int option_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
+ struct vxlan_metadata *md;
+ int err;
+
+ md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
+ memset(md, 0xff, sizeof(*md));
+
+ if (!depth)
+ return sizeof(*md);
+
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
+ NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
+ vxlan_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
+ return -EINVAL;
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
+ md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
+
+ return sizeof(*md);
+}
+
+static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ int depth, int option_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
+ struct erspan_metadata *md;
+ int err;
+
+ md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
+ memset(md, 0xff, sizeof(*md));
+ md->version = 1;
+
+ if (!depth)
+ return sizeof(*md);
+
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
+ NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
+ erspan_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
+ return -EINVAL;
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
+ md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
+
+ if (md->version == 1) {
+ if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
+ return -EINVAL;
+ }
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
+ md->u.index = nla_get_be32(nla);
+ }
+ } else if (md->version == 2) {
+ if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
+ !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
+ return -EINVAL;
+ }
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
+ md->u.md2.dir = nla_get_u8(nla);
+ }
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
+ nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
+ set_hwid(&md->u.md2, nla_get_u8(nla));
+ }
+ } else {
+ NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
+ return -EINVAL;
+ }
+
+ return sizeof(*md);
+}
+
static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct fl_flow_key *mask,
struct netlink_ext_ack *extack)
@@ -958,6 +1076,11 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
switch (nla_type(nla_opt_key)) {
case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
+ if (key->enc_opts.dst_opt_type &&
+ key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
+ NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
+ return -EINVAL;
+ }
option_len = 0;
key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
option_len = fl_set_geneve_opt(nla_opt_key, key,
@@ -986,6 +1109,72 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
if (msk_depth)
nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
break;
+ case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
+ if (key->enc_opts.dst_opt_type) {
+ NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
+ return -EINVAL;
+ }
+ option_len = 0;
+ key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
+ option_len = fl_set_vxlan_opt(nla_opt_key, key,
+ key_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ key->enc_opts.len += option_len;
+ /* At the same time we need to parse through the mask
+ * in order to verify exact and mask attribute lengths.
+ */
+ mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
+ option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
+ msk_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ mask->enc_opts.len += option_len;
+ if (key->enc_opts.len != mask->enc_opts.len) {
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+
+ if (msk_depth)
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ break;
+ case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
+ if (key->enc_opts.dst_opt_type) {
+ NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
+ return -EINVAL;
+ }
+ option_len = 0;
+ key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
+ option_len = fl_set_erspan_opt(nla_opt_key, key,
+ key_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ key->enc_opts.len += option_len;
+ /* At the same time we need to parse through the mask
+ * in order to verify exact and mask attribute lengths.
+ */
+ mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
+ option_len = fl_set_erspan_opt(nla_opt_msk, mask,
+ msk_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ mask->enc_opts.len += option_len;
+ if (key->enc_opts.len != mask->enc_opts.len) {
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+
+ if (msk_depth)
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ break;
default:
NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
return -EINVAL;
@@ -2135,6 +2324,61 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
+ struct flow_dissector_key_enc_opts *enc_opts)
+{
+ struct vxlan_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
+ if (!nest)
+ goto nla_put_failure;
+
+ md = (struct vxlan_metadata *)&enc_opts->data[0];
+ if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int fl_dump_key_erspan_opt(struct sk_buff *skb,
+ struct flow_dissector_key_enc_opts *enc_opts)
+{
+ struct erspan_metadata *md;
+ struct nlattr *nest;
+
+ nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
+ if (!nest)
+ goto nla_put_failure;
+
+ md = (struct erspan_metadata *)&enc_opts->data[0];
+ if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
+ goto nla_put_failure;
+
+ if (md->version == 1 &&
+ nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
+ goto nla_put_failure;
+
+ if (md->version == 2 &&
+ (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
+ md->u.md2.dir) ||
+ nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
+ get_hwid(&md->u.md2))))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int fl_dump_key_ct(struct sk_buff *skb,
struct flow_dissector_key_ct *key,
struct flow_dissector_key_ct *mask)
@@ -2188,6 +2432,16 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
if (err)
goto nla_put_failure;
break;
+ case TUNNEL_VXLAN_OPT:
+ err = fl_dump_key_vxlan_opt(skb, enc_opts);
+ if (err)
+ goto nla_put_failure;
+ break;
+ case TUNNEL_ERSPAN_OPT:
+ err = fl_dump_key_erspan_opt(skb, enc_opts);
+ if (err)
+ goto nla_put_failure;
+ break;
default:
goto nla_put_failure;
}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3177dcb17316..d99966a55c84 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -521,7 +521,7 @@ META_COLLECTOR(int_sk_ack_bl)
*err = -1;
return;
}
- dst->value = sk->sk_ack_backlog;
+ dst->value = READ_ONCE(sk->sk_ack_backlog);
}
META_COLLECTOR(int_sk_max_ack_bl)
@@ -532,7 +532,7 @@ META_COLLECTOR(int_sk_max_ack_bl)
*err = -1;
return;
}
- dst->value = sk->sk_max_ack_backlog;
+ dst->value = READ_ONCE(sk->sk_max_ack_backlog);
}
META_COLLECTOR(int_sk_prio)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 98dd87ce1510..b1c7e726ce5d 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -530,8 +530,7 @@ begin:
fq_flow_set_throttled(q, f);
goto begin;
}
- if (time_next_packet &&
- (s64)(now - time_next_packet - q->ce_threshold) > 0) {
+ if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
INET_ECN_set_ce(skb);
q->stat_ce_mark++;
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index c261c0a18868..968519ff36e9 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8769b4b8807d..5ab696efca95 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -382,13 +382,8 @@ void __qdisc_run(struct Qdisc *q)
int packets;
while (qdisc_restart(q, &packets)) {
- /*
- * Ordered by possible occurrence: Postpone processing if
- * 1. we've exceeded packet quota
- * 2. another process needs the CPU;
- */
quota -= packets;
- if (quota <= 0 || need_resched()) {
+ if (quota <= 0) {
__netif_schedule(q);
break;
}
@@ -657,7 +652,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
if (likely(skb)) {
qdisc_update_stats_at_dequeue(qdisc, skb);
} else {
- qdisc->empty = true;
+ WRITE_ONCE(qdisc->empty, true);
}
return skb;
@@ -1214,8 +1209,13 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
- while (some_qdisc_is_busy(dev))
- yield();
+ while (some_qdisc_is_busy(dev)) {
+ /* wait_event() would avoid this sleep-loop but would
+ * require expensive checks in the fast paths of packet
+ * processing which isn't worth it.
+ */
+ schedule_timeout_uninterruptible(1);
+ }
/* The new qdisc is assigned at this point so we can safely
* unwind stale skb lists and qdisc statistics
*/
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index df98a887eb89..b0b0dc46af61 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -22,6 +22,7 @@
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
+#define DTIME_INVALID 0xffffffffffffffff
#define MAX_PROB 0xffffffffffffffff
#define PIE_SCALE 8
@@ -34,6 +35,7 @@ struct pie_params {
u32 beta; /* and are used for shift relative to 1 */
bool ecn; /* true if ecn is enabled */
bool bytemode; /* to scale drop early prob based on pkt size */
+ u8 dq_rate_estimator; /* to calculate delay using Little's law */
};
/* variables used */
@@ -77,11 +79,34 @@ static void pie_params_init(struct pie_params *params)
params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
params->ecn = false;
params->bytemode = false;
+ params->dq_rate_estimator = false;
+}
+
+/* private skb vars */
+struct pie_skb_cb {
+ psched_time_t enqueue_time;
+};
+
+static struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
+ return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_pie_cb(skb)->enqueue_time;
+}
+
+static void pie_set_enqueue_time(struct sk_buff *skb)
+{
+ get_pie_cb(skb)->enqueue_time = psched_get_time();
}
static void pie_vars_init(struct pie_vars *vars)
{
vars->dq_count = DQCOUNT_INVALID;
+ vars->dq_tstamp = DTIME_INVALID;
vars->accu_prob = 0;
vars->avg_dq_rate = 0;
/* default of 150 ms in pschedtime */
@@ -172,6 +197,10 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* we can enqueue the packet */
if (enqueue) {
+ /* Set enqueue time only when dq_rate_estimator is disabled. */
+ if (!q->params.dq_rate_estimator)
+ pie_set_enqueue_time(skb);
+
q->stats.packets_in++;
if (qdisc_qlen(sch) > q->stats.maxq)
q->stats.maxq = qdisc_qlen(sch);
@@ -194,6 +223,7 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
[TCA_PIE_BETA] = {.type = NLA_U32},
[TCA_PIE_ECN] = {.type = NLA_U32},
[TCA_PIE_BYTEMODE] = {.type = NLA_U32},
+ [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
};
static int pie_change(struct Qdisc *sch, struct nlattr *opt,
@@ -247,6 +277,10 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_PIE_BYTEMODE])
q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
+ if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
+ q->params.dq_rate_estimator =
+ nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]);
+
/* Drop excess packets if new limit is lower */
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
@@ -266,6 +300,28 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
{
struct pie_sched_data *q = qdisc_priv(sch);
int qlen = sch->qstats.backlog; /* current queue size in bytes */
+ psched_time_t now = psched_get_time();
+ u32 dtime = 0;
+
+ /* If dq_rate_estimator is disabled, calculate qdelay using the
+ * packet timestamp.
+ */
+ if (!q->params.dq_rate_estimator) {
+ q->vars.qdelay = now - pie_get_enqueue_time(skb);
+
+ if (q->vars.dq_tstamp != DTIME_INVALID)
+ dtime = now - q->vars.dq_tstamp;
+
+ q->vars.dq_tstamp = now;
+
+ if (qlen == 0)
+ q->vars.qdelay = 0;
+
+ if (dtime == 0)
+ return;
+
+ goto burst_allowance_reduction;
+ }
/* If current queue is about 10 packets or more and dq_count is unset
* we have enough packets to calculate the drain rate. Save
@@ -289,10 +345,10 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
q->vars.dq_count += skb->len;
if (q->vars.dq_count >= QUEUE_THRESHOLD) {
- psched_time_t now = psched_get_time();
- u32 dtime = now - q->vars.dq_tstamp;
u32 count = q->vars.dq_count << PIE_SCALE;
+ dtime = now - q->vars.dq_tstamp;
+
if (dtime == 0)
return;
@@ -317,14 +373,19 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
q->vars.dq_tstamp = psched_get_time();
}
- if (q->vars.burst_time > 0) {
- if (q->vars.burst_time > dtime)
- q->vars.burst_time -= dtime;
- else
- q->vars.burst_time = 0;
- }
+ goto burst_allowance_reduction;
}
}
+
+ return;
+
+burst_allowance_reduction:
+ if (q->vars.burst_time > 0) {
+ if (q->vars.burst_time > dtime)
+ q->vars.burst_time -= dtime;
+ else
+ q->vars.burst_time = 0;
+ }
}
static void calculate_probability(struct Qdisc *sch)
@@ -332,19 +393,25 @@ static void calculate_probability(struct Qdisc *sch)
struct pie_sched_data *q = qdisc_priv(sch);
u32 qlen = sch->qstats.backlog; /* queue size in bytes */
psched_time_t qdelay = 0; /* in pschedtime */
- psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
+ psched_time_t qdelay_old = 0; /* in pschedtime */
s64 delta = 0; /* determines the change in probability */
u64 oldprob;
u64 alpha, beta;
u32 power;
bool update_prob = true;
- q->vars.qdelay_old = q->vars.qdelay;
+ if (q->params.dq_rate_estimator) {
+ qdelay_old = q->vars.qdelay;
+ q->vars.qdelay_old = q->vars.qdelay;
- if (q->vars.avg_dq_rate > 0)
- qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
- else
- qdelay = 0;
+ if (q->vars.avg_dq_rate > 0)
+ qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
+ else
+ qdelay = 0;
+ } else {
+ qdelay = q->vars.qdelay;
+ qdelay_old = q->vars.qdelay_old;
+ }
/* If qdelay is zero and qlen is not, it means qlen is very small, less
* than dequeue_rate, so we do not update probabilty in this round
@@ -430,14 +497,18 @@ static void calculate_probability(struct Qdisc *sch)
/* We restart the measurement cycle if the following conditions are met
* 1. If the delay has been low for 2 consecutive Tupdate periods
* 2. Calculated drop probability is zero
- * 3. We have atleast one estimate for the avg_dq_rate ie.,
- * is a non-zero value
+ * 3. If average dq_rate_estimator is enabled, we have atleast one
+ * estimate for the avg_dq_rate ie., is a non-zero value
*/
if ((q->vars.qdelay < q->params.target / 2) &&
(q->vars.qdelay_old < q->params.target / 2) &&
q->vars.prob == 0 &&
- q->vars.avg_dq_rate > 0)
+ (!q->params.dq_rate_estimator || q->vars.avg_dq_rate > 0)) {
pie_vars_init(&q->vars);
+ }
+
+ if (!q->params.dq_rate_estimator)
+ q->vars.qdelay_old = qdelay;
}
static void pie_timer(struct timer_list *t)
@@ -497,7 +568,9 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
- nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
+ nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) ||
+ nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
+ q->params.dq_rate_estimator))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -514,9 +587,6 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
.prob = q->vars.prob,
.delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
NSEC_PER_USEC,
- /* unscale and return dq_rate in bytes per sec */
- .avg_dq_rate = q->vars.avg_dq_rate *
- (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
.packets_in = q->stats.packets_in,
.overlimit = q->stats.overlimit,
.maxq = q->stats.maxq,
@@ -524,6 +594,14 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
.ecn_mark = q->stats.ecn_mark,
};
+ /* avg_dq_rate is only valid if dq_rate_estimator is enabled */
+ st.dq_rate_estimating = q->params.dq_rate_estimator;
+
+ /* unscale and return dq_rate in bytes per sec */
+ if (q->params.dq_rate_estimator)
+ st.avg_dq_rate = q->vars.avg_dq_rate *
+ (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
+
return gnet_stats_copy_app(d, &st, sizeof(st));
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 7cd68628c637..c609373c8661 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -922,7 +922,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
}
/* Verify priority mapping uses valid tcs */
- for (i = 0; i < TC_BITMASK + 1; i++) {
+ for (i = 0; i <= TC_BITMASK; i++) {
if (qopt->prio_tc_map[i] >= qopt->num_tc) {
NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
return -EINVAL;
@@ -1347,6 +1347,26 @@ out:
return err;
}
+static int taprio_mqprio_cmp(const struct net_device *dev,
+ const struct tc_mqprio_qopt *mqprio)
+{
+ int i;
+
+ if (!mqprio || mqprio->num_tc != dev->num_tc)
+ return -1;
+
+ for (i = 0; i < mqprio->num_tc; i++)
+ if (dev->tc_to_txq[i].count != mqprio->count[i] ||
+ dev->tc_to_txq[i].offset != mqprio->offset[i])
+ return -1;
+
+ for (i = 0; i <= TC_BITMASK; i++)
+ if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
+ return -1;
+
+ return 0;
+}
+
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -1398,6 +1418,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
admin = rcu_dereference(q->admin_sched);
rcu_read_unlock();
+ /* no changes - no new mqprio settings */
+ if (!taprio_mqprio_cmp(dev, mqprio))
+ mqprio = NULL;
+
if (mqprio && (oper || admin)) {
NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
err = -ENOTSUPP;
@@ -1455,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
mqprio->offset[i]);
/* Always use supplied priority mappings */
- for (i = 0; i < TC_BITMASK + 1; i++)
+ for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index d2ffc9a0ba3a..bbd5004a5d09 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -64,6 +64,7 @@ static struct sctp_association *sctp_association_init(
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
asoc->base.sk = (struct sock *)sk;
+ asoc->base.net = sock_net(sk);
sctp_endpoint_hold(asoc->ep);
sock_hold(asoc->base.sk);
@@ -86,6 +87,8 @@ static struct sctp_association *sctp_association_init(
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
asoc->pf_retrans = sp->pf_retrans;
+ asoc->ps_retrans = sp->ps_retrans;
+ asoc->pf_expose = sp->pf_expose;
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -324,7 +327,7 @@ void sctp_association_free(struct sctp_association *asoc)
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
}
/* Mark as dead, so other users can know this structure is
@@ -429,6 +432,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
changeover = 1 ;
asoc->peer.primary_path = transport;
+ sctp_ulpevent_nofity_peer_addr_change(transport,
+ SCTP_ADDR_MADE_PRIM, 0);
/* Set a default msg_name for events. */
memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
@@ -569,6 +574,7 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
asoc->peer.transport_count--;
+ sctp_ulpevent_nofity_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
sctp_transport_free(peer);
}
@@ -624,6 +630,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* And the partial failure retrans threshold */
peer->pf_retrans = asoc->pf_retrans;
+ /* And the primary path switchover retrans threshold */
+ peer->ps_retrans = asoc->ps_retrans;
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
@@ -707,6 +715,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
asoc->peer.transport_count++;
+ sctp_ulpevent_nofity_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
+
/* If we do not yet have a primary path, set one. */
if (!asoc->peer.primary_path) {
sctp_assoc_set_primary(asoc, peer);
@@ -781,9 +791,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
enum sctp_transport_cmd command,
sctp_sn_error_t error)
{
- struct sctp_ulpevent *event;
- struct sockaddr_storage addr;
- int spc_state = 0;
+ int spc_state = SCTP_ADDR_AVAILABLE;
bool ulp_notify = true;
/* Record the transition on the transport. */
@@ -793,19 +801,13 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* to heartbeat success, report the SCTP_ADDR_CONFIRMED
* state to the user, otherwise report SCTP_ADDR_AVAILABLE.
*/
- if (SCTP_UNCONFIRMED == transport->state &&
- SCTP_HEARTBEAT_SUCCESS == error)
- spc_state = SCTP_ADDR_CONFIRMED;
- else
- spc_state = SCTP_ADDR_AVAILABLE;
- /* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1 MTU, see SCTP
- * Quick failover draft section 5.1, point 5
- */
- if (transport->state == SCTP_PF) {
+ if (transport->state == SCTP_PF &&
+ asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
ulp_notify = false;
- transport->cwnd = asoc->pathmtu;
- }
+ else if (transport->state == SCTP_UNCONFIRMED &&
+ error == SCTP_HEARTBEAT_SUCCESS)
+ spc_state = SCTP_ADDR_CONFIRMED;
+
transport->state = SCTP_ACTIVE;
break;
@@ -814,19 +816,21 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* to inactive state. Also, release the cached route since
* there may be a better route next time.
*/
- if (transport->state != SCTP_UNCONFIRMED)
+ if (transport->state != SCTP_UNCONFIRMED) {
transport->state = SCTP_INACTIVE;
- else {
+ spc_state = SCTP_ADDR_UNREACHABLE;
+ } else {
sctp_transport_dst_release(transport);
ulp_notify = false;
}
-
- spc_state = SCTP_ADDR_UNREACHABLE;
break;
case SCTP_TRANSPORT_PF:
transport->state = SCTP_PF;
- ulp_notify = false;
+ if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
+ ulp_notify = false;
+ else
+ spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
break;
default:
@@ -836,16 +840,9 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
* to the user.
*/
- if (ulp_notify) {
- memset(&addr, 0, sizeof(struct sockaddr_storage));
- memcpy(&addr, &transport->ipaddr,
- transport->af_specific->sockaddr_len);
-
- event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
- 0, spc_state, error, GFP_ATOMIC);
- if (event)
- asoc->stream.si->enqueue_event(&asoc->ulpq, event);
- }
+ if (ulp_notify)
+ sctp_ulpevent_nofity_peer_addr_change(transport,
+ spc_state, error);
/* Select new active and retran paths. */
sctp_select_active_and_retran_path(asoc);
@@ -1077,7 +1074,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
- oldsk->sk_ack_backlog--;
+ sk_acceptq_removed(oldsk);
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put(assoc->ep);
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index cc0405c79dfc..cc3ce5d80b08 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -75,41 +75,39 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
struct list_head *pos, *temp;
struct sctp_chunk *chunk;
struct sctp_ulpevent *ev;
- int error = 0, notify;
-
- /* If we failed, we may need to notify. */
- notify = msg->send_failed ? -1 : 0;
+ int error, sent;
/* Release all references. */
list_for_each_safe(pos, temp, &msg->chunks) {
list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list);
- /* Check whether we _really_ need to notify. */
- if (notify < 0) {
- asoc = chunk->asoc;
- if (msg->send_error)
- error = msg->send_error;
- else
- error = asoc->outqueue.error;
-
- notify = sctp_ulpevent_type_enabled(asoc->subscribe,
- SCTP_SEND_FAILED);
+
+ if (!msg->send_failed) {
+ sctp_chunk_put(chunk);
+ continue;
}
- /* Generate a SEND FAILED event only if enabled. */
- if (notify > 0) {
- int sent;
- if (chunk->has_tsn)
- sent = SCTP_DATA_SENT;
- else
- sent = SCTP_DATA_UNSENT;
+ asoc = chunk->asoc;
+ error = msg->send_error ?: asoc->outqueue.error;
+ sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT;
+ if (sctp_ulpevent_type_enabled(asoc->subscribe,
+ SCTP_SEND_FAILED)) {
ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
error, GFP_ATOMIC);
if (ev)
asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
+ if (sctp_ulpevent_type_enabled(asoc->subscribe,
+ SCTP_SEND_FAILED_EVENT)) {
+ ev = sctp_ulpevent_make_send_failed_event(asoc, chunk,
+ sent, error,
+ GFP_ATOMIC);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+ }
+
sctp_chunk_put(chunk);
}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 0851166b9175..8a15146faaeb 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -425,8 +425,8 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
r->idiag_wqueue = infox->asoc->sndbuf_used;
} else {
- r->idiag_rqueue = sk->sk_ack_backlog;
- r->idiag_wqueue = sk->sk_max_ack_backlog;
+ r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
+ r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
}
if (infox->sctpinfo)
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ea53049d1db6..3ccab7440c9e 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -110,6 +110,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Remember who we are attached to. */
ep->base.sk = sk;
+ ep->base.net = sock_net(sk);
sock_hold(ep->base.sk);
return ep;
@@ -164,7 +165,7 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
/* Increment the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
}
/* Free the endpoint structure. Delay cleanup until
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2277981559d0..4d2bcfc9d7f8 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -882,7 +882,7 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
if (!sctp_transport_hold(t))
return err;
- if (!net_eq(sock_net(t->asoc->base.sk), x->net))
+ if (!net_eq(t->asoc->base.net, x->net))
goto out;
if (x->lport != htons(t->asoc->base.bind_addr.port))
goto out;
@@ -897,7 +897,7 @@ static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
{
const struct sctp_transport *t = data;
- return sctp_hashfn(sock_net(t->asoc->base.sk),
+ return sctp_hashfn(t->asoc->base.net,
htons(t->asoc->base.bind_addr.port),
&t->ipaddr, seed);
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 08d14d86ecfb..fbbf19128c2d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1217,9 +1217,15 @@ static int __net_init sctp_defaults_init(struct net *net)
/* Max.Burst - 4 */
net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
+ /* Disable of Primary Path Switchover by default */
+ net->sctp.ps_retrans = SCTP_PS_RETRANS_MAX;
+
/* Enable pf state by default */
net->sctp.pf_enable = 1;
+ /* Ignore pf exposure feature by default */
+ net->sctp.pf_expose = SCTP_PF_EXPOSE_UNSET;
+
/* Association.Max.Retrans - 10 attempts
* Path.Max.Retrans - 5 attempts (per destination address)
* Max.Init.Retransmits - 8 attempts
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e52b2128e43b..acd737d4c0e0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -567,6 +567,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
SCTP_FAILED_THRESHOLD);
}
+ if (transport->error_count > transport->ps_retrans &&
+ asoc->peer.primary_path == transport &&
+ asoc->peer.active_path != transport)
+ sctp_assoc_set_primary(asoc, asoc->peer.active_path);
+
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 0c21c52fc408..4ab8208a2dd4 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2160,8 +2160,10 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
/* Update socket peer label if first association. */
if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
- chunk->skb))
+ chunk->skb)) {
+ sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
/* Set temp so that it won't be added into hashtable */
new_asoc->temp = 1;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ffd3262b7a41..83e4ca1fabda 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3943,18 +3943,22 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
*/
static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
char __user *optval,
- unsigned int optlen)
+ unsigned int optlen, bool v2)
{
- struct sctp_paddrthlds val;
+ struct sctp_paddrthlds_v2 val;
struct sctp_transport *trans;
struct sctp_association *asoc;
+ int len;
- if (optlen < sizeof(struct sctp_paddrthlds))
+ len = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds);
+ if (optlen < len)
return -EINVAL;
- if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
- sizeof(struct sctp_paddrthlds)))
+ if (copy_from_user(&val, optval, len))
return -EFAULT;
+ if (v2 && val.spt_pathpfthld > val.spt_pathcpthld)
+ return -EINVAL;
+
if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
@@ -3963,6 +3967,8 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ trans->ps_retrans = val.spt_pathcpthld;
trans->pf_retrans = val.spt_pathpfthld;
return 0;
@@ -3978,17 +3984,23 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
transports) {
if (val.spt_pathmaxrxt)
trans->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ trans->ps_retrans = val.spt_pathcpthld;
trans->pf_retrans = val.spt_pathpfthld;
}
if (val.spt_pathmaxrxt)
asoc->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ asoc->ps_retrans = val.spt_pathcpthld;
asoc->pf_retrans = val.spt_pathpfthld;
} else {
struct sctp_sock *sp = sctp_sk(sk);
if (val.spt_pathmaxrxt)
sp->pathmaxrxt = val.spt_pathmaxrxt;
+ if (v2)
+ sp->ps_retrans = val.spt_pathcpthld;
sp->pf_retrans = val.spt_pathpfthld;
}
@@ -4589,6 +4601,40 @@ out:
return retval;
}
+static int sctp_setsockopt_pf_expose(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(params))
+ goto out;
+
+ if (copy_from_user(&params, optval, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (params.assoc_value > SCTP_PF_EXPOSE_MAX)
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ if (asoc)
+ asoc->pf_expose = params.assoc_value;
+ else
+ sctp_sk(sk)->pf_expose = params.assoc_value;
+ retval = 0;
+
+out:
+ return retval;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4744,7 +4790,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
- retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen,
+ false);
+ break;
+ case SCTP_PEER_ADDR_THLDS_V2:
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen,
+ true);
break;
case SCTP_RECVRCVINFO:
retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
@@ -4798,6 +4849,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_ECN_SUPPORTED:
retval = sctp_setsockopt_ecn_supported(sk, optval, optlen);
break;
+ case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
+ retval = sctp_setsockopt_pf_expose(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -5041,6 +5095,8 @@ static int sctp_init_sock(struct sock *sk)
sp->hbinterval = net->sctp.hb_interval;
sp->pathmaxrxt = net->sctp.max_retrans_path;
sp->pf_retrans = net->sctp.pf_retrans;
+ sp->ps_retrans = net->sctp.ps_retrans;
+ sp->pf_expose = net->sctp.pf_expose;
sp->pathmtu = 0; /* allow default discovery */
sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
@@ -5521,8 +5577,16 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
pinfo.spinfo_assoc_id);
- if (!transport)
- return -EINVAL;
+ if (!transport) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (transport->state == SCTP_PF &&
+ transport->asoc->pf_expose == SCTP_PF_EXPOSE_DISABLE) {
+ retval = -EACCES;
+ goto out;
+ }
pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
pinfo.spinfo_state = transport->state;
@@ -7170,18 +7234,19 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
*/
static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
- char __user *optval,
- int len,
- int __user *optlen)
+ char __user *optval, int len,
+ int __user *optlen, bool v2)
{
- struct sctp_paddrthlds val;
+ struct sctp_paddrthlds_v2 val;
struct sctp_transport *trans;
struct sctp_association *asoc;
+ int min;
- if (len < sizeof(struct sctp_paddrthlds))
+ min = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds);
+ if (len < min)
return -EINVAL;
- len = sizeof(struct sctp_paddrthlds);
- if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
+ len = min;
+ if (copy_from_user(&val, optval, len))
return -EFAULT;
if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
@@ -7192,6 +7257,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
+ val.spt_pathcpthld = trans->ps_retrans;
goto out;
}
@@ -7204,11 +7270,13 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
if (asoc) {
val.spt_pathpfthld = asoc->pf_retrans;
val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ val.spt_pathcpthld = asoc->ps_retrans;
} else {
struct sctp_sock *sp = sctp_sk(sk);
val.spt_pathpfthld = sp->pf_retrans;
val.spt_pathmaxrxt = sp->pathmaxrxt;
+ val.spt_pathcpthld = sp->ps_retrans;
}
out:
@@ -7900,6 +7968,45 @@ out:
return retval;
}
+static int sctp_getsockopt_pf_expose(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->pf_expose
+ : sctp_sk(sk)->pf_expose;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -8049,7 +8156,12 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
break;
case SCTP_PEER_ADDR_THLDS:
- retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len,
+ optlen, false);
+ break;
+ case SCTP_PEER_ADDR_THLDS_V2:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len,
+ optlen, true);
break;
case SCTP_GET_ASSOC_STATS:
retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
@@ -8112,6 +8224,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_ECN_SUPPORTED:
retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen);
break;
+ case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
+ retval = sctp_getsockopt_pf_expose(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -8376,7 +8491,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
}
}
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
return sctp_hash_endpoint(ep);
}
@@ -8430,7 +8545,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
- sk->sk_max_ack_backlog = backlog;
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
else {
err = sctp_listen_start(sk, backlog);
if (err)
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 238cf1737576..4740aa70e652 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -34,6 +34,8 @@ static int rto_alpha_min = 0;
static int rto_beta_min = 0;
static int rto_alpha_max = 1000;
static int rto_beta_max = 1000;
+static int pf_expose_max = SCTP_PF_EXPOSE_MAX;
+static int ps_retrans_max = SCTP_PS_RETRANS_MAX;
static unsigned long max_autoclose_min = 0;
static unsigned long max_autoclose_max =
@@ -212,7 +214,16 @@ static struct ctl_table sctp_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
+ .extra2 = &init_net.sctp.ps_retrans,
+ },
+ {
+ .procname = "ps_retrans",
+ .data = &init_net.sctp.ps_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &init_net.sctp.pf_retrans,
+ .extra2 = &ps_retrans_max,
},
{
.procname = "sndbuf_policy",
@@ -318,6 +329,15 @@ static struct ctl_table sctp_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "pf_expose",
+ .data = &init_net.sctp.pf_expose,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &pf_expose_max,
+ },
{ /* sentinel */ }
};
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index e0cc1edf49a0..c82dbdcf13f2 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -238,7 +238,7 @@ fail:
* When a destination address on a multi-homed peer encounters a change
* an interface details event is sent.
*/
-struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
+static struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const struct sctp_association *asoc,
const struct sockaddr_storage *aaddr,
int flags, int state, int error, gfp_t gfp)
@@ -336,6 +336,22 @@ fail:
return NULL;
}
+void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport,
+ int state, int error)
+{
+ struct sctp_association *asoc = transport->asoc;
+ struct sockaddr_storage addr;
+ struct sctp_ulpevent *event;
+
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
+
+ event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 0, state,
+ error, GFP_ATOMIC);
+ if (event)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+}
+
/* Create and initialize an SCTP_REMOTE_ERROR notification.
*
* Note: This assumes that the chunk->skb->data already points to the
@@ -511,6 +527,45 @@ fail:
return NULL;
}
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed_event(
+ const struct sctp_association *asoc, struct sctp_chunk *chunk,
+ __u16 flags, __u32 error, gfp_t gfp)
+{
+ struct sctp_send_failed_event *ssf;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+ int len;
+
+ skb = skb_copy_expand(chunk->skb, sizeof(*ssf), 0, gfp);
+ if (!skb)
+ return NULL;
+
+ len = ntohs(chunk->chunk_hdr->length);
+ len -= sctp_datachk_len(&asoc->stream);
+
+ skb_pull(skb, sctp_datachk_len(&asoc->stream));
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+ ssf = skb_push(skb, sizeof(*ssf));
+ ssf->ssf_type = SCTP_SEND_FAILED_EVENT;
+ ssf->ssf_flags = flags;
+ ssf->ssf_length = sizeof(*ssf) + len;
+ skb_trim(skb, ssf->ssf_length);
+ ssf->ssf_error = error;
+
+ ssf->ssfe_info.snd_sid = chunk->sinfo.sinfo_stream;
+ ssf->ssfe_info.snd_ppid = chunk->sinfo.sinfo_ppid;
+ ssf->ssfe_info.snd_context = chunk->sinfo.sinfo_context;
+ ssf->ssfe_info.snd_assoc_id = chunk->sinfo.sinfo_assoc_id;
+ ssf->ssfe_info.snd_flags = chunk->chunk_hdr->flags;
+
+ sctp_ulpevent_set_owner(event, asoc);
+ ssf->ssf_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+}
+
/* Create and initialize a SCTP_SHUTDOWN_EVENT notification.
*
* Socket Extensions for SCTP - draft-01
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 47946f489fd4..b997072c72e5 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -25,6 +25,7 @@
#include <linux/in.h>
#include <linux/sched/signal.h>
#include <linux/if_vlan.h>
+#include <linux/rcupdate_wait.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -174,6 +175,7 @@ static int smc_release(struct socket *sock)
if (!sk)
goto out;
+ sock_hold(sk); /* sock_put below */
smc = smc_sk(sk);
/* cleanup for a dangling non-blocking connect */
@@ -196,6 +198,7 @@ static int smc_release(struct socket *sock)
sock->sk = NULL;
release_sock(sk);
+ sock_put(sk); /* sock_hold above */
sock_put(sk); /* final sock_put */
out:
return rc;
@@ -796,6 +799,7 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = EPIPE;
else if (signal_pending(current))
smc->sk.sk_err = -sock_intr_errno(timeo);
+ sock_put(&smc->sk); /* passive closing */
goto out;
}
@@ -977,12 +981,14 @@ void smc_close_non_accepted(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
+ sock_hold(sk); /* sock_put below */
lock_sock(sk);
if (!sk->sk_lingertime)
/* wait for peer closing */
sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
__smc_release(smc);
release_sock(sk);
+ sock_put(sk); /* sock_hold above */
sock_put(sk); /* final sock_put */
}
@@ -1731,7 +1737,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_KEY:
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
- if (sk->sk_state == SMC_INIT) {
+ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
@@ -2034,22 +2040,28 @@ static int __init smc_init(void)
if (rc)
goto out_pernet_subsys;
+ rc = smc_core_init();
+ if (rc) {
+ pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
+ goto out_pnet;
+ }
+
rc = smc_llc_init();
if (rc) {
pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_core;
}
rc = smc_cdc_init();
if (rc) {
pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_core;
}
rc = proto_register(&smc_proto, 1);
if (rc) {
pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_core;
}
rc = proto_register(&smc_proto6, 1);
@@ -2081,6 +2093,8 @@ out_proto6:
proto_unregister(&smc_proto6);
out_proto:
proto_unregister(&smc_proto);
+out_core:
+ smc_core_exit();
out_pnet:
smc_pnet_exit();
out_pernet_subsys:
@@ -2091,14 +2105,15 @@ out_pernet_subsys:
static void __exit smc_exit(void)
{
- smc_core_exit();
static_branch_disable(&tcp_have_smc);
- smc_ib_unregister_client();
sock_unregister(PF_SMC);
+ smc_core_exit();
+ smc_ib_unregister_client();
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();
unregister_pernet_subsys(&smc_net_ops);
+ rcu_barrier();
}
module_init(smc_init);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 878313f8d6c1..be11ba41190f 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -188,6 +188,7 @@ struct smc_connection {
* 0 for SMC-R, 32 for SMC-D
*/
u64 peer_token; /* SMC-D token of peer */
+ u8 killed : 1; /* abnormal termination */
};
struct smc_sock { /* smc sock container */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index d0b0f4c865b4..164f1584861b 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -63,7 +63,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
wr_rdma_buf,
(struct smc_wr_tx_pend_priv **)pend);
- if (!conn->alert_token_local)
+ if (conn->killed)
/* abnormal termination */
rc = -EPIPE;
return rc;
@@ -131,6 +131,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
{
int rc;
+ if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
+ return -EPIPE;
+
if (conn->lgr->is_smcd) {
spin_lock_bh(&conn->send_lock);
rc = smcd_cdc_msg_send(conn);
@@ -328,7 +331,7 @@ static void smcd_cdc_rx_tsklet(unsigned long data)
struct smcd_cdc_msg cdc;
struct smc_sock *smc;
- if (!conn)
+ if (!conn || conn->killed)
return;
data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 49bcebff6378..0879f7bed967 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = 1;
- smc_lgr_terminate(smc->conn.lgr);
+ smc_lgr_terminate(smc->conn.lgr, true);
}
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index fc06720b53c1..290270c821ca 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -13,14 +13,13 @@
#include <linux/sched/signal.h>
#include <net/sock.h>
+#include <net/tcp.h>
#include "smc.h"
#include "smc_tx.h"
#include "smc_cdc.h"
#include "smc_close.h"
-#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
-
/* release the clcsock that is assigned to the smc_sock */
void smc_clcsock_release(struct smc_sock *smc)
{
@@ -65,8 +64,9 @@ static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
rc = sk_wait_event(sk, &timeout,
!smc_tx_prepared_sends(&smc->conn) ||
- (sk->sk_err == ECONNABORTED) ||
- (sk->sk_err == ECONNRESET),
+ sk->sk_err == ECONNABORTED ||
+ sk->sk_err == ECONNRESET ||
+ smc->conn.killed,
&wait);
if (rc)
break;
@@ -95,68 +95,73 @@ static int smc_close_final(struct smc_connection *conn)
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
else
conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1;
+ if (conn->killed)
+ return -EPIPE;
return smc_cdc_get_slot_and_msg_send(conn);
}
-static int smc_close_abort(struct smc_connection *conn)
+int smc_close_abort(struct smc_connection *conn)
{
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
return smc_cdc_get_slot_and_msg_send(conn);
}
+static void smc_close_cancel_work(struct smc_sock *smc)
+{
+ struct sock *sk = &smc->sk;
+
+ release_sock(sk);
+ cancel_work_sync(&smc->conn.close_work);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
+ sk->sk_state = SMC_CLOSED;
+}
+
/* terminate smc socket abnormally - active abort
* link group is terminated, i.e. RDMA communication no longer possible
*/
-static void smc_close_active_abort(struct smc_sock *smc)
+void smc_close_active_abort(struct smc_sock *smc)
{
struct sock *sk = &smc->sk;
-
- struct smc_cdc_conn_state_flags *txflags =
- &smc->conn.local_tx_ctrl.conn_state_flags;
+ bool release_clcsock = false;
if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
sk->sk_err = ECONNABORTED;
- if (smc->clcsock && smc->clcsock->sk) {
- smc->clcsock->sk->sk_err = ECONNABORTED;
- smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
- }
+ if (smc->clcsock && smc->clcsock->sk)
+ tcp_abort(smc->clcsock->sk, ECONNABORTED);
}
switch (sk->sk_state) {
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
- release_sock(sk);
- cancel_delayed_work_sync(&smc->conn.tx_work);
- lock_sock(sk);
+ smc_close_cancel_work(smc);
+ sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
- if (!smc_cdc_rxed_any_close(&smc->conn))
- sk->sk_state = SMC_PEERABORTWAIT;
- else
- sk->sk_state = SMC_CLOSED;
- release_sock(sk);
- cancel_delayed_work_sync(&smc->conn.tx_work);
- lock_sock(sk);
+ smc_close_cancel_work(smc);
+ sk->sk_state = SMC_CLOSED;
+ sock_put(sk); /* postponed passive closing */
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
- if (!txflags->peer_conn_closed) {
- /* just SHUTDOWN_SEND done */
- sk->sk_state = SMC_PEERABORTWAIT;
- } else {
- sk->sk_state = SMC_CLOSED;
- }
+ case SMC_PEERFINCLOSEWAIT:
+ sk->sk_state = SMC_PEERABORTWAIT;
+ smc_close_cancel_work(smc);
+ sk->sk_state = SMC_CLOSED;
+ smc_conn_free(&smc->conn);
+ release_clcsock = true;
sock_put(sk); /* passive closing */
break;
case SMC_PROCESSABORT:
case SMC_APPFINCLOSEWAIT:
+ sk->sk_state = SMC_PEERABORTWAIT;
+ smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED;
- break;
- case SMC_PEERFINCLOSEWAIT:
- sock_put(sk); /* passive closing */
+ smc_conn_free(&smc->conn);
+ release_clcsock = true;
break;
case SMC_INIT:
case SMC_PEERABORTWAIT:
@@ -166,6 +171,12 @@ static void smc_close_active_abort(struct smc_sock *smc)
sock_set_flag(sk, SOCK_DEAD);
sk->sk_state_change(sk);
+
+ if (release_clcsock) {
+ release_sock(sk);
+ smc_clcsock_release(smc);
+ lock_sock(sk);
+ }
}
static inline bool smc_close_sent_any_close(struct smc_connection *conn)
@@ -215,8 +226,6 @@ again:
if (sk->sk_state == SMC_ACTIVE) {
/* send close request */
rc = smc_close_final(conn);
- if (rc)
- break;
sk->sk_state = SMC_PEERCLOSEWAIT1;
} else {
/* peer event has changed the state */
@@ -229,8 +238,6 @@ again:
!smc_close_sent_any_close(conn)) {
/* just shutdown wr done, send close request */
rc = smc_close_final(conn);
- if (rc)
- break;
}
sk->sk_state = SMC_CLOSED;
break;
@@ -246,8 +253,6 @@ again:
goto again;
/* confirm close from peer */
rc = smc_close_final(conn);
- if (rc)
- break;
if (smc_cdc_rxed_any_close(conn)) {
/* peer has closed the socket already */
sk->sk_state = SMC_CLOSED;
@@ -263,8 +268,6 @@ again:
!smc_close_sent_any_close(conn)) {
/* just shutdown wr done, send close request */
rc = smc_close_final(conn);
- if (rc)
- break;
}
/* peer sending PeerConnectionClosed will cause transition */
break;
@@ -272,10 +275,12 @@ again:
/* peer sending PeerConnectionClosed will cause transition */
break;
case SMC_PROCESSABORT:
- smc_close_abort(conn);
+ rc = smc_close_abort(conn);
sk->sk_state = SMC_CLOSED;
break;
case SMC_PEERABORTWAIT:
+ sk->sk_state = SMC_CLOSED;
+ break;
case SMC_CLOSED:
/* nothing to do, add tracing in future patch */
break;
@@ -344,12 +349,6 @@ static void smc_close_passive_work(struct work_struct *work)
lock_sock(sk);
old_state = sk->sk_state;
- if (!conn->alert_token_local) {
- /* abnormal termination */
- smc_close_active_abort(smc);
- goto wakeup;
- }
-
rxflags = &conn->local_rx_ctrl.conn_state_flags;
if (rxflags->peer_conn_abort) {
/* peer has not received all data */
@@ -451,8 +450,6 @@ again:
goto again;
/* send close wr request */
rc = smc_close_wr(conn);
- if (rc)
- break;
sk->sk_state = SMC_PEERCLOSEWAIT1;
break;
case SMC_APPCLOSEWAIT1:
@@ -466,8 +463,6 @@ again:
goto again;
/* confirm close from peer */
rc = smc_close_wr(conn);
- if (rc)
- break;
sk->sk_state = SMC_APPCLOSEWAIT2;
break;
case SMC_APPCLOSEWAIT2:
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
index e0e3b5df25d2..634fea2b7c95 100644
--- a/net/smc/smc_close.h
+++ b/net/smc/smc_close.h
@@ -24,5 +24,7 @@ int smc_close_active(struct smc_sock *smc);
int smc_close_shutdown_write(struct smc_sock *smc);
void smc_close_init(struct smc_sock *smc);
void smc_clcsock_release(struct smc_sock *smc);
+int smc_close_abort(struct smc_connection *conn);
+void smc_close_active_abort(struct smc_sock *smc);
#endif /* SMC_CLOSE_H */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2ba97ff325a5..bb92c7c6214c 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/reboot.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
@@ -39,23 +41,46 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
.num = 0,
};
+static atomic_t lgr_cnt; /* number of existing link groups */
+static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
+
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);
+/* return head of link group list and its lock for a given link group */
+static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
+ spinlock_t **lgr_lock)
+{
+ if (lgr->is_smcd) {
+ *lgr_lock = &lgr->smcd->lgr_lock;
+ return &lgr->smcd->lgr_list;
+ }
+
+ *lgr_lock = &smc_lgr_list.lock;
+ return &smc_lgr_list.list;
+}
+
static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
{
/* client link group creation always follows the server link group
* creation. For client use a somewhat higher removal delay time,
* otherwise there is a risk of out-of-sync link groups.
*/
- mod_delayed_work(system_wq, &lgr->free_work,
- (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
- SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV);
+ if (!lgr->freeing && !lgr->freefast) {
+ mod_delayed_work(system_wq, &lgr->free_work,
+ (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
+ SMC_LGR_FREE_DELAY_CLNT :
+ SMC_LGR_FREE_DELAY_SERV);
+ }
}
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
{
- mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST);
+ if (!lgr->freeing && !lgr->freefast) {
+ lgr->freefast = 1;
+ mod_delayed_work(system_wq, &lgr->free_work,
+ SMC_LGR_FREE_DELAY_FAST);
+ }
}
/* Register connection's alert token in our lookup structure.
@@ -134,16 +159,17 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
__smc_lgr_unregister_conn(conn);
}
write_unlock_bh(&lgr->conns_lock);
+ conn->lgr = NULL;
}
/* Send delete link, either as client to request the initiation
* of the DELETE LINK sequence from server; or as server to
* initiate the delete processing. See smc_llc_rx_delete_link().
*/
-static int smc_link_send_delete(struct smc_link *lnk)
+static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
{
if (lnk->state == SMC_LNK_ACTIVE &&
- !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
+ !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
smc_llc_link_deleting(lnk);
return 0;
}
@@ -157,48 +183,62 @@ static void smc_lgr_free_work(struct work_struct *work)
struct smc_link_group *lgr = container_of(to_delayed_work(work),
struct smc_link_group,
free_work);
+ spinlock_t *lgr_lock;
+ struct smc_link *lnk;
bool conns;
- spin_lock_bh(&smc_lgr_list.lock);
+ smc_lgr_list_head(lgr, &lgr_lock);
+ spin_lock_bh(lgr_lock);
+ if (lgr->freeing) {
+ spin_unlock_bh(lgr_lock);
+ return;
+ }
read_lock_bh(&lgr->conns_lock);
conns = RB_EMPTY_ROOT(&lgr->conns_all);
read_unlock_bh(&lgr->conns_lock);
if (!conns) { /* number of lgr connections is no longer zero */
- spin_unlock_bh(&smc_lgr_list.lock);
+ spin_unlock_bh(lgr_lock);
return;
}
- if (!list_empty(&lgr->list))
- list_del_init(&lgr->list); /* remove from smc_lgr_list */
- spin_unlock_bh(&smc_lgr_list.lock);
+ list_del_init(&lgr->list); /* remove from smc_lgr_list */
+ lnk = &lgr->lnk[SMC_SINGLE_LINK];
if (!lgr->is_smcd && !lgr->terminating) {
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
-
/* try to send del link msg, on error free lgr immediately */
if (lnk->state == SMC_LNK_ACTIVE &&
- !smc_link_send_delete(lnk)) {
+ !smc_link_send_delete(lnk, true)) {
/* reschedule in case we never receive a response */
smc_lgr_schedule_free_work(lgr);
+ spin_unlock_bh(lgr_lock);
return;
}
}
+ lgr->freeing = 1; /* this instance does the freeing, no new schedule */
+ spin_unlock_bh(lgr_lock);
+ cancel_delayed_work(&lgr->free_work);
+
+ if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
+ smc_llc_link_inactive(lnk);
+ if (lgr->is_smcd && !lgr->terminating)
+ smc_ism_signal_shutdown(lgr);
+ smc_lgr_free(lgr);
+}
- if (!delayed_work_pending(&lgr->free_work)) {
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+static void smc_lgr_terminate_work(struct work_struct *work)
+{
+ struct smc_link_group *lgr = container_of(work, struct smc_link_group,
+ terminate_work);
- if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
- smc_llc_link_inactive(lnk);
- if (lgr->is_smcd)
- smc_ism_signal_shutdown(lgr);
- smc_lgr_free(lgr);
- }
+ smc_lgr_terminate(lgr, true);
}
/* create a new SMC link group */
static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
{
struct smc_link_group *lgr;
+ struct list_head *lgr_list;
struct smc_link *lnk;
+ spinlock_t *lgr_lock;
u8 rndvec[3];
int rc = 0;
int i;
@@ -217,6 +257,9 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
}
lgr->is_smcd = ini->is_smcd;
lgr->sync_err = 0;
+ lgr->terminating = 0;
+ lgr->freefast = 0;
+ lgr->freeing = 0;
lgr->vlan_id = ini->vlan_id;
rwlock_init(&lgr->sndbufs_lock);
rwlock_init(&lgr->rmbs_lock);
@@ -228,13 +271,20 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
smc_lgr_list.num += SMC_LGR_NUM_INCR;
memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
+ INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
lgr->conns_all = RB_ROOT;
if (ini->is_smcd) {
/* SMC-D specific settings */
+ get_device(&ini->ism_dev->dev);
lgr->peer_gid = ini->ism_gid;
lgr->smcd = ini->ism_dev;
+ lgr_list = &ini->ism_dev->lgr_list;
+ lgr_lock = &lgr->smcd->lgr_lock;
+ lgr->peer_shutdown = 0;
+ atomic_inc(&ini->ism_dev->lgr_cnt);
} else {
/* SMC-R specific settings */
+ get_device(&ini->ib_dev->ibdev->dev);
lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
SMC_SYSTEMID_LEN);
@@ -245,6 +295,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lnk->link_id = SMC_SINGLE_LINK;
lnk->smcibdev = ini->ib_dev;
lnk->ibport = ini->ib_port;
+ lgr_list = &smc_lgr_list.list;
+ lgr_lock = &smc_lgr_list.lock;
lnk->path_mtu =
ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
if (!ini->ib_dev->initialized)
@@ -272,11 +324,13 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
rc = smc_wr_create_link(lnk);
if (rc)
goto destroy_qp;
+ atomic_inc(&lgr_cnt);
+ atomic_inc(&ini->ib_dev->lnk_cnt);
}
smc->conn.lgr = lgr;
- spin_lock_bh(&smc_lgr_list.lock);
- list_add(&lgr->list, &smc_lgr_list.list);
- spin_unlock_bh(&smc_lgr_list.lock);
+ spin_lock_bh(lgr_lock);
+ list_add(&lgr->list, lgr_list);
+ spin_unlock_bh(lgr_lock);
return 0;
destroy_qp:
@@ -309,7 +363,7 @@ static void smc_buf_unuse(struct smc_connection *conn,
conn->sndbuf_desc->used = 0;
if (conn->rmb_desc) {
if (!conn->rmb_desc->regerr) {
- if (!lgr->is_smcd) {
+ if (!lgr->is_smcd && !list_empty(&lgr->list)) {
/* unregister rmb with peer */
smc_llc_do_delete_rkey(
&lgr->lnk[SMC_SINGLE_LINK],
@@ -335,14 +389,16 @@ void smc_conn_free(struct smc_connection *conn)
if (!lgr)
return;
if (lgr->is_smcd) {
- smc_ism_unset_conn(conn);
+ if (!list_empty(&lgr->list))
+ smc_ism_unset_conn(conn);
tasklet_kill(&conn->rx_tsklet);
} else {
smc_cdc_tx_dismiss_slots(conn);
}
- smc_lgr_unregister_conn(conn);
- smc_buf_unuse(conn, lgr); /* allow buffer reuse */
- conn->lgr = NULL;
+ if (!list_empty(&lgr->list)) {
+ smc_lgr_unregister_conn(conn);
+ smc_buf_unuse(conn, lgr); /* allow buffer reuse */
+ }
if (!lgr->conns_num)
smc_lgr_schedule_free_work(lgr);
@@ -357,6 +413,8 @@ static void smc_link_clear(struct smc_link *lnk)
smc_ib_destroy_queue_pair(lnk);
smc_ib_dealloc_protection_domain(lnk);
smc_wr_free_link_mem(lnk);
+ if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
+ wake_up(&lnk->smcibdev->lnks_deleted);
}
static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
@@ -433,24 +491,101 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
static void smc_lgr_free(struct smc_link_group *lgr)
{
smc_lgr_free_bufs(lgr);
- if (lgr->is_smcd)
- smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
- else
+ if (lgr->is_smcd) {
+ if (!lgr->terminating) {
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+ put_device(&lgr->smcd->dev);
+ }
+ if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
+ wake_up(&lgr->smcd->lgrs_deleted);
+ } else {
smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
+ put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
+ if (!atomic_dec_return(&lgr_cnt))
+ wake_up(&lgrs_deleted);
+ }
kfree(lgr);
}
void smc_lgr_forget(struct smc_link_group *lgr)
{
- spin_lock_bh(&smc_lgr_list.lock);
+ struct list_head *lgr_list;
+ spinlock_t *lgr_lock;
+
+ lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+ spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
- if (!list_empty(&lgr->list))
- list_del_init(&lgr->list);
- spin_unlock_bh(&smc_lgr_list.lock);
+ if (!list_empty(lgr_list))
+ list_del_init(lgr_list);
+ spin_unlock_bh(lgr_lock);
+}
+
+static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
+{
+ int i;
+
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ struct smc_buf_desc *buf_desc;
+
+ list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
+ buf_desc->len += sizeof(struct smcd_cdc_msg);
+ smc_ism_unregister_dmb(lgr->smcd, buf_desc);
+ }
+ }
}
-/* terminate linkgroup abnormally */
-static void __smc_lgr_terminate(struct smc_link_group *lgr)
+static void smc_sk_wake_ups(struct smc_sock *smc)
+{
+ smc->sk.sk_write_space(&smc->sk);
+ smc->sk.sk_data_ready(&smc->sk);
+ smc->sk.sk_state_change(&smc->sk);
+}
+
+/* kill a connection */
+static void smc_conn_kill(struct smc_connection *conn, bool soft)
+{
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+
+ if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+ else
+ smc_close_abort(conn);
+ conn->killed = 1;
+ smc->sk.sk_err = ECONNABORTED;
+ smc_sk_wake_ups(smc);
+ if (conn->lgr->is_smcd) {
+ smc_ism_unset_conn(conn);
+ if (soft)
+ tasklet_kill(&conn->rx_tsklet);
+ else
+ tasklet_unlock_wait(&conn->rx_tsklet);
+ } else {
+ smc_cdc_tx_dismiss_slots(conn);
+ }
+ smc_lgr_unregister_conn(conn);
+ smc_close_active_abort(smc);
+}
+
+static void smc_lgr_cleanup(struct smc_link_group *lgr)
+{
+ if (lgr->is_smcd) {
+ smc_ism_signal_shutdown(lgr);
+ smcd_unregister_all_dmbs(lgr);
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+ put_device(&lgr->smcd->dev);
+ } else {
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+ wake_up(&lnk->wr_reg_wait);
+ if (lnk->state != SMC_LNK_INACTIVE) {
+ smc_link_send_delete(lnk, false);
+ smc_llc_link_inactive(lnk);
+ }
+ }
+}
+
+/* terminate link group */
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{
struct smc_connection *conn;
struct smc_sock *smc;
@@ -458,80 +593,161 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
if (lgr->terminating)
return; /* lgr already terminating */
+ if (!soft)
+ cancel_delayed_work_sync(&lgr->free_work);
lgr->terminating = 1;
- if (!list_empty(&lgr->list)) /* forget lgr */
- list_del_init(&lgr->list);
if (!lgr->is_smcd)
smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
- write_lock_bh(&lgr->conns_lock);
+ /* kill remaining link group connections */
+ read_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
while (node) {
+ read_unlock_bh(&lgr->conns_lock);
conn = rb_entry(node, struct smc_connection, alert_node);
smc = container_of(conn, struct smc_sock, conn);
- sock_hold(&smc->sk); /* sock_put in close work */
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
- __smc_lgr_unregister_conn(conn);
- conn->lgr = NULL;
- write_unlock_bh(&lgr->conns_lock);
- if (!schedule_work(&conn->close_work))
- sock_put(&smc->sk);
- write_lock_bh(&lgr->conns_lock);
+ sock_hold(&smc->sk); /* sock_put below */
+ lock_sock(&smc->sk);
+ smc_conn_kill(conn, soft);
+ release_sock(&smc->sk);
+ sock_put(&smc->sk); /* sock_hold above */
+ read_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
}
- write_unlock_bh(&lgr->conns_lock);
- if (!lgr->is_smcd)
- wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
- smc_lgr_schedule_free_work(lgr);
+ read_unlock_bh(&lgr->conns_lock);
+ smc_lgr_cleanup(lgr);
+ if (soft)
+ smc_lgr_schedule_free_work_fast(lgr);
+ else
+ smc_lgr_free(lgr);
}
-void smc_lgr_terminate(struct smc_link_group *lgr)
+/* unlink and terminate link group
+ * @soft: true if link group shutdown can take its time
+ * false if immediate link group shutdown is required
+ */
+void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{
- spin_lock_bh(&smc_lgr_list.lock);
- __smc_lgr_terminate(lgr);
- spin_unlock_bh(&smc_lgr_list.lock);
+ spinlock_t *lgr_lock;
+
+ smc_lgr_list_head(lgr, &lgr_lock);
+ spin_lock_bh(lgr_lock);
+ if (lgr->terminating) {
+ spin_unlock_bh(lgr_lock);
+ return; /* lgr already terminating */
+ }
+ if (!soft)
+ lgr->freeing = 1;
+ list_del_init(&lgr->list);
+ spin_unlock_bh(lgr_lock);
+ __smc_lgr_terminate(lgr, soft);
}
/* Called when IB port is terminated */
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *l;
+ LIST_HEAD(lgr_free_list);
spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (!lgr->is_smcd &&
lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
- lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
- __smc_lgr_terminate(lgr);
+ lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
+ list_move(&lgr->list, &lgr_free_list);
+ lgr->freeing = 1;
+ }
}
spin_unlock_bh(&smc_lgr_list.lock);
+
+ list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
+ list_del_init(&lgr->list);
+ __smc_lgr_terminate(lgr, false);
+ }
}
-/* Called when SMC-D device is terminated or peer is lost */
+/* Called when peer lgr shutdown (regularly or abnormally) is received */
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
{
struct smc_link_group *lgr, *l;
LIST_HEAD(lgr_free_list);
/* run common cleanup function and build free list */
- spin_lock_bh(&smc_lgr_list.lock);
- list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
- if (lgr->is_smcd && lgr->smcd == dev &&
- (!peer_gid || lgr->peer_gid == peer_gid) &&
+ spin_lock_bh(&dev->lgr_lock);
+ list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
+ if ((!peer_gid || lgr->peer_gid == peer_gid) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
- __smc_lgr_terminate(lgr);
+ if (peer_gid) /* peer triggered termination */
+ lgr->peer_shutdown = 1;
list_move(&lgr->list, &lgr_free_list);
}
}
- spin_unlock_bh(&smc_lgr_list.lock);
+ spin_unlock_bh(&dev->lgr_lock);
/* cancel the regular free workers and actually free lgrs */
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list);
- cancel_delayed_work_sync(&lgr->free_work);
- if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
- smc_ism_signal_shutdown(lgr);
- smc_lgr_free(lgr);
+ schedule_work(&lgr->terminate_work);
+ }
+}
+
+/* Called when an SMCD device is removed or the smc module is unloaded */
+void smc_smcd_terminate_all(struct smcd_dev *smcd)
+{
+ struct smc_link_group *lgr, *lg;
+ LIST_HEAD(lgr_free_list);
+
+ spin_lock_bh(&smcd->lgr_lock);
+ list_splice_init(&smcd->lgr_list, &lgr_free_list);
+ list_for_each_entry(lgr, &lgr_free_list, list)
+ lgr->freeing = 1;
+ spin_unlock_bh(&smcd->lgr_lock);
+
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
+ list_del_init(&lgr->list);
+ __smc_lgr_terminate(lgr, false);
+ }
+
+ if (atomic_read(&smcd->lgr_cnt))
+ wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
+}
+
+/* Called when an SMCR device is removed or the smc module is unloaded.
+ * If smcibdev is given, all SMCR link groups using this device are terminated.
+ * If smcibdev is NULL, all SMCR link groups are terminated.
+ */
+void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
+{
+ struct smc_link_group *lgr, *lg;
+ LIST_HEAD(lgr_free_list);
+
+ spin_lock_bh(&smc_lgr_list.lock);
+ if (!smcibdev) {
+ list_splice_init(&smc_lgr_list.list, &lgr_free_list);
+ list_for_each_entry(lgr, &lgr_free_list, list)
+ lgr->freeing = 1;
+ } else {
+ list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
+ if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
+ list_move(&lgr->list, &lgr_free_list);
+ lgr->freeing = 1;
+ }
+ }
+ }
+ spin_unlock_bh(&smc_lgr_list.lock);
+
+ list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
+ list_del_init(&lgr->list);
+ __smc_lgr_terminate(lgr, false);
+ }
+
+ if (smcibdev) {
+ if (atomic_read(&smcibdev->lnk_cnt))
+ wait_event(smcibdev->lnks_deleted,
+ !atomic_read(&smcibdev->lnk_cnt));
+ } else {
+ if (atomic_read(&lgr_cnt))
+ wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
}
}
@@ -607,10 +823,14 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
{
struct smc_connection *conn = &smc->conn;
+ struct list_head *lgr_list;
struct smc_link_group *lgr;
enum smc_lgr_role role;
+ spinlock_t *lgr_lock;
int rc = 0;
+ lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
+ lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
ini->cln_first_contact = SMC_FIRST_CONTACT;
role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
if (role == SMC_CLNT && ini->srv_first_contact)
@@ -618,8 +838,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
goto create;
/* determine if an existing link group can be reused */
- spin_lock_bh(&smc_lgr_list.lock);
- list_for_each_entry(lgr, &smc_lgr_list.list, list) {
+ spin_lock_bh(lgr_lock);
+ list_for_each_entry(lgr, lgr_list, list) {
write_lock_bh(&lgr->conns_lock);
if ((ini->is_smcd ?
smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
@@ -639,7 +859,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
}
write_unlock_bh(&lgr->conns_lock);
}
- spin_unlock_bh(&smc_lgr_list.lock);
+ spin_unlock_bh(lgr_lock);
if (role == SMC_CLNT && !ini->srv_first_contact &&
ini->cln_first_contact == SMC_FIRST_CONTACT) {
@@ -1027,29 +1247,63 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
return 0;
}
-/* Called (from smc_exit) when module is removed */
-void smc_core_exit(void)
+static void smc_core_going_away(void)
{
- struct smc_link_group *lgr, *lg;
- LIST_HEAD(lgr_freeing_list);
+ struct smc_ib_device *smcibdev;
+ struct smcd_dev *smcd;
- spin_lock_bh(&smc_lgr_list.lock);
- if (!list_empty(&smc_lgr_list.list))
- list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
- spin_unlock_bh(&smc_lgr_list.lock);
- list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
- list_del_init(&lgr->list);
- if (!lgr->is_smcd) {
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+ spin_lock(&smc_ib_devices.lock);
+ list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
+ int i;
- if (lnk->state == SMC_LNK_ACTIVE)
- smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
- false);
- smc_llc_link_inactive(lnk);
- }
- cancel_delayed_work_sync(&lgr->free_work);
- if (lgr->is_smcd)
- smc_ism_signal_shutdown(lgr);
- smc_lgr_free(lgr); /* free link group */
+ for (i = 0; i < SMC_MAX_PORTS; i++)
+ set_bit(i, smcibdev->ports_going_away);
+ }
+ spin_unlock(&smc_ib_devices.lock);
+
+ spin_lock(&smcd_dev_list.lock);
+ list_for_each_entry(smcd, &smcd_dev_list.list, list) {
+ smcd->going_away = 1;
}
+ spin_unlock(&smcd_dev_list.lock);
+}
+
+/* Clean up all SMC link groups */
+static void smc_lgrs_shutdown(void)
+{
+ struct smcd_dev *smcd;
+
+ smc_core_going_away();
+
+ smc_smcr_terminate_all(NULL);
+
+ spin_lock(&smcd_dev_list.lock);
+ list_for_each_entry(smcd, &smcd_dev_list.list, list)
+ smc_smcd_terminate_all(smcd);
+ spin_unlock(&smcd_dev_list.lock);
+}
+
+static int smc_core_reboot_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ smc_lgrs_shutdown();
+
+ return 0;
+}
+
+static struct notifier_block smc_reboot_notifier = {
+ .notifier_call = smc_core_reboot_event,
+};
+
+int __init smc_core_init(void)
+{
+ atomic_set(&lgr_cnt, 0);
+ return register_reboot_notifier(&smc_reboot_notifier);
+}
+
+/* Called (from smc_exit) when module is removed */
+void smc_core_exit(void)
+{
+ unregister_reboot_notifier(&smc_reboot_notifier);
+ smc_lgrs_shutdown();
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c00ac61dc129..c472e12951d1 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -202,8 +202,11 @@ struct smc_link_group {
u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
struct delayed_work free_work; /* delayed freeing of an lgr */
+ struct work_struct terminate_work; /* abnormal lgr termination */
u8 sync_err : 1; /* lgr no longer fits to peer */
u8 terminating : 1;/* lgr is terminating */
+ u8 freefast : 1; /* free worker scheduled fast */
+ u8 freeing : 1; /* lgr is being freed */
bool is_smcd; /* SMC-R or SMC-D */
union {
@@ -225,6 +228,8 @@ struct smc_link_group {
/* Peer GID (remote) */
struct smcd_dev *smcd;
/* ISM device for VLAN reg. */
+ u8 peer_shutdown : 1;
+ /* peer triggered shutdownn */
};
};
};
@@ -280,15 +285,23 @@ static inline struct smc_connection *smc_lgr_find_conn(
return res;
}
+static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
+{
+ if (!lgr->terminating && !lgr->freeing)
+ schedule_work(&lgr->terminate_work);
+}
+
struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr);
-void smc_lgr_terminate(struct smc_link_group *lgr);
+void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan);
+void smc_smcd_terminate_all(struct smcd_dev *dev);
+void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -305,6 +318,7 @@ void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
void smcd_conn_free(struct smc_connection *conn);
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
+int smc_core_init(void);
void smc_core_exit(void);
static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index d14ca4af6f94..548632621f4b 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -15,6 +15,7 @@
#include <linux/random.h>
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
+#include <linux/wait.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
@@ -242,8 +243,12 @@ static void smc_ib_port_event_work(struct work_struct *work)
for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
smc_ib_remember_port_attr(smcibdev, port_idx + 1);
clear_bit(port_idx, &smcibdev->port_event_mask);
- if (!smc_ib_port_active(smcibdev, port_idx + 1))
+ if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
+ set_bit(port_idx, smcibdev->ports_going_away);
smc_port_terminate(smcibdev, port_idx + 1);
+ } else {
+ clear_bit(port_idx, smcibdev->ports_going_away);
+ }
}
}
@@ -259,8 +264,10 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
switch (ibevent->event) {
case IB_EVENT_DEVICE_FATAL:
/* terminate all ports on device */
- for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++)
+ for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
set_bit(port_idx, &smcibdev->port_event_mask);
+ set_bit(port_idx, smcibdev->ports_going_away);
+ }
schedule_work(&smcibdev->port_event_work);
break;
case IB_EVENT_PORT_ERR:
@@ -269,6 +276,10 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
port_idx = ibevent->element.port_num - 1;
if (port_idx < SMC_MAX_PORTS) {
set_bit(port_idx, &smcibdev->port_event_mask);
+ if (ibevent->event == IB_EVENT_PORT_ERR)
+ set_bit(port_idx, smcibdev->ports_going_away);
+ else if (ibevent->event == IB_EVENT_PORT_ACTIVE)
+ clear_bit(port_idx, smcibdev->ports_going_away);
schedule_work(&smcibdev->port_event_work);
}
break;
@@ -307,6 +318,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
port_idx = ibevent->element.qp->port - 1;
if (port_idx < SMC_MAX_PORTS) {
set_bit(port_idx, &smcibdev->port_event_mask);
+ set_bit(port_idx, smcibdev->ports_going_away);
schedule_work(&smcibdev->port_event_work);
}
break;
@@ -509,9 +521,9 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
if (!smcibdev->initialized)
return;
smcibdev->initialized = 0;
- smc_wr_remove_dev(smcibdev);
ib_destroy_cq(smcibdev->roce_cq_recv);
ib_destroy_cq(smcibdev->roce_cq_send);
+ smc_wr_remove_dev(smcibdev);
}
static struct ib_client smc_ib_client;
@@ -532,7 +544,8 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
smcibdev->ibdev = ibdev;
INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
-
+ atomic_set(&smcibdev->lnk_cnt, 0);
+ init_waitqueue_head(&smcibdev->lnks_deleted);
spin_lock(&smc_ib_devices.lock);
list_add_tail(&smcibdev->list, &smc_ib_devices.list);
spin_unlock(&smc_ib_devices.lock);
@@ -554,7 +567,7 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
schedule_work(&smcibdev->port_event_work);
}
-/* callback function for ib_register_client() */
+/* callback function for ib_unregister_client() */
static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
{
struct smc_ib_device *smcibdev;
@@ -564,6 +577,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
spin_unlock(&smc_ib_devices.lock);
+ smc_smcr_terminate_all(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
kfree(smcibdev);
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index da60ab9e8d70..255db87547d3 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/wait.h>
#include <rdma/ib_verbs.h>
#include <net/smc.h>
@@ -47,6 +48,9 @@ struct smc_ib_device { /* ib-device infos for smc */
u8 initialized : 1; /* ib dev CQ, evthdl done */
struct work_struct port_event_work;
unsigned long port_event_mask;
+ DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
+ atomic_t lnk_cnt; /* number of links on ibdev */
+ wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
};
struct smc_buf_desc;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index e89e918b88e0..5c4727d5066e 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -146,6 +146,10 @@ out:
int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
{
struct smcd_dmb dmb;
+ int rc = 0;
+
+ if (!dmb_desc->dma_addr)
+ return rc;
memset(&dmb, 0, sizeof(dmb));
dmb.dmb_tok = dmb_desc->token;
@@ -153,7 +157,13 @@ int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
dmb.cpu_addr = dmb_desc->cpu_addr;
dmb.dma_addr = dmb_desc->dma_addr;
dmb.dmb_len = dmb_desc->len;
- return smcd->ops->unregister_dmb(smcd, &dmb);
+ rc = smcd->ops->unregister_dmb(smcd, &dmb);
+ if (!rc || rc == ISM_ERROR) {
+ dmb_desc->cpu_addr = NULL;
+ dmb_desc->dma_addr = 0;
+ }
+
+ return rc;
}
int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
@@ -226,6 +236,9 @@ int smc_ism_signal_shutdown(struct smc_link_group *lgr)
int rc;
union smcd_sw_event_info ev_info;
+ if (lgr->peer_shutdown)
+ return 0;
+
memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
ev_info.vlan_id = lgr->vlan_id;
ev_info.code = ISM_EVENT_REQUEST;
@@ -286,7 +299,10 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
smc_pnetid_by_dev_port(parent, 0, smcd->pnetid);
spin_lock_init(&smcd->lock);
+ spin_lock_init(&smcd->lgr_lock);
INIT_LIST_HEAD(&smcd->vlan);
+ INIT_LIST_HEAD(&smcd->lgr_list);
+ init_waitqueue_head(&smcd->lgrs_deleted);
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
WQ_MEM_RECLAIM, name);
if (!smcd->event_wq) {
@@ -311,11 +327,12 @@ EXPORT_SYMBOL_GPL(smcd_register_dev);
void smcd_unregister_dev(struct smcd_dev *smcd)
{
spin_lock(&smcd_dev_list.lock);
- list_del(&smcd->list);
+ list_del_init(&smcd->list);
spin_unlock(&smcd_dev_list.lock);
+ smcd->going_away = 1;
+ smc_smcd_terminate_all(smcd);
flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq);
- smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
device_del(&smcd->dev);
}
@@ -342,6 +359,8 @@ void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
{
struct smc_ism_event_work *wrk;
+ if (smcd->going_away)
+ return;
/* copy event to event work queue, and let it be handled there */
wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
if (!wrk)
@@ -367,7 +386,7 @@ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
spin_lock_irqsave(&smcd->lock, flags);
conn = smcd->conn[dmbno];
- if (conn)
+ if (conn && !conn->killed)
tasklet_schedule(&conn->rx_tsklet);
spin_unlock_irqrestore(&smcd->lock, flags);
}
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 4fd60c522802..a9f6431dd69a 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -475,7 +475,7 @@ static void smc_llc_rx_delete_link(struct smc_link *link,
smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP, true);
}
smc_llc_send_message(link, llc, sizeof(*llc));
- smc_lgr_schedule_free_work_fast(lgr);
+ smc_lgr_terminate_sched(lgr);
}
}
@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME);
if (rc <= 0) {
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate(smc_get_lgr(link), true);
return;
}
next_interval = link->llc_testlink_time;
@@ -656,6 +656,7 @@ void smc_llc_link_active(struct smc_link *link, int testlink_time)
void smc_llc_link_deleting(struct smc_link *link)
{
link->state = SMC_LNK_DELETING;
+ smc_wr_wakeup_tx_wait(link);
}
/* called in tasklet context */
@@ -663,6 +664,8 @@ void smc_llc_link_inactive(struct smc_link *link)
{
link->state = SMC_LNK_INACTIVE;
cancel_delayed_work(&link->llc_testlink_wrk);
+ smc_wr_wakeup_reg_wait(link);
+ smc_wr_wakeup_tx_wait(link);
}
/* called in worker context */
@@ -695,9 +698,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
int smc_llc_do_delete_rkey(struct smc_link *link,
struct smc_buf_desc *rmb_desc)
{
- int rc;
+ int rc = 0;
mutex_lock(&link->llc_delete_rkey_mutex);
+ if (link->state != SMC_LNK_ACTIVE)
+ goto out;
reinit_completion(&link->llc_delete_rkey);
rc = smc_llc_send_delete_rkey(link, rmb_desc);
if (rc)
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 571e6d84da3b..82dedf052d86 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -779,6 +779,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
dev_put(ndev);
if (netdev == ndev &&
smc_ib_port_active(ibdev, i) &&
+ !test_bit(i - 1, ibdev->ports_going_away) &&
!smc_ib_determine_gid(ibdev, i, ini->vlan_id,
ini->ib_gid, NULL)) {
ini->ib_dev = ibdev;
@@ -818,6 +819,7 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
continue;
if (smc_pnet_match(ibdev->pnetid[i - 1], ndev_pnetid) &&
smc_ib_port_active(ibdev, i) &&
+ !test_bit(i - 1, ibdev->ports_going_away) &&
!smc_ib_determine_gid(ibdev, i, ini->vlan_id,
ini->ib_gid, NULL)) {
ini->ib_dev = ibdev;
@@ -844,7 +846,8 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
spin_lock(&smcd_dev_list.lock);
list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
- if (smc_pnet_match(ismdev->pnetid, ndev_pnetid)) {
+ if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
+ !ismdev->going_away) {
ini->ism_dev = ismdev;
break;
}
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 97e8369002d7..39d7b34d06d2 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -201,6 +201,8 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct smc_connection *conn = &smc->conn;
+ struct smc_cdc_conn_state_flags *cflags =
+ &conn->local_tx_ctrl.conn_state_flags;
struct sock *sk = &smc->sk;
int rc;
@@ -210,7 +212,9 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
add_wait_queue(sk_sleep(sk), &wait);
rc = sk_wait_event(sk, timeo,
sk->sk_err ||
+ cflags->peer_conn_abort ||
sk->sk_shutdown & RCV_SHUTDOWN ||
+ conn->killed ||
fcrit(conn),
&wait);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -314,11 +318,13 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
if (read_done >= target || (pipe && read_done))
break;
+ if (conn->killed)
+ break;
+
if (smc_rx_recvmsg_data_available(smc))
goto copy;
- if (sk->sk_shutdown & RCV_SHUTDOWN ||
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
/* smc_cdc_msg_recv_action() could have run after
* above smc_rx_recvmsg_data_available()
*/
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 6c8f09c1ce51..0d42e7716b91 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -86,6 +86,7 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
if (sk->sk_err ||
(sk->sk_shutdown & SEND_SHUTDOWN) ||
+ conn->killed ||
conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
rc = -EPIPE;
break;
@@ -155,7 +156,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
return -ENOTCONN;
if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
(smc->sk.sk_err == ECONNABORTED) ||
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
+ conn->killed)
return -EPIPE;
if (smc_cdc_rxed_any_close(conn))
return send_done ?: -ECONNRESET;
@@ -282,10 +283,8 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
peer_rmbe_offset;
rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
- if (rc) {
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
- smc_lgr_terminate(lgr);
- }
+ if (rc)
+ smc_lgr_terminate(lgr, true);
return rc;
}
@@ -495,10 +494,11 @@ static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
if (smc->sk.sk_err == ECONNABORTED)
return sock_error(&smc->sk);
+ if (conn->killed)
+ return -EPIPE;
rc = 0;
- if (conn->alert_token_local) /* connection healthy */
- mod_delayed_work(system_wq, &conn->tx_work,
- SMC_TX_WORK_DELAY);
+ mod_delayed_work(system_wq, &conn->tx_work,
+ SMC_TX_WORK_DELAY);
}
return rc;
}
@@ -547,6 +547,9 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
{
int rc;
+ if (conn->killed ||
+ conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
+ return -EPIPE; /* connection being aborted */
if (conn->lgr->is_smcd)
rc = smcd_tx_sndbuf_nonempty(conn);
else
@@ -573,9 +576,7 @@ void smc_tx_work(struct work_struct *work)
int rc;
lock_sock(&smc->sk);
- if (smc->sk.sk_err ||
- !conn->alert_token_local ||
- conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
+ if (smc->sk.sk_err)
goto out;
rc = smc_tx_sndbuf_nonempty(conn);
@@ -608,8 +609,11 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
((to_confirm > conn->rmbe_update_limit) &&
((sender_free <= (conn->rmb_desc->len / 2)) ||
conn->local_rx_ctrl.prod_flags.write_blocked))) {
+ if (conn->killed ||
+ conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
+ return;
if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
- conn->alert_token_local) { /* connection healthy */
+ !conn->killed) {
schedule_delayed_work(&conn->tx_work,
SMC_TX_WORK_DELAY);
return;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 253aa75dc2b6..337ee52ad3d3 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -50,6 +50,26 @@ struct smc_wr_tx_pend { /* control data for a pending send request */
/*------------------------------- completion --------------------------------*/
+/* returns true if at least one tx work request is pending on the given link */
+static inline bool smc_wr_is_tx_pend(struct smc_link *link)
+{
+ if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
+ link->wr_tx_cnt) {
+ return true;
+ }
+ return false;
+}
+
+/* wait till all pending tx work requests on the given link are completed */
+static inline int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
+{
+ if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
+ SMC_WR_TX_WAIT_PENDING_TIME))
+ return 0;
+ else /* timeout */
+ return -EPIPE;
+}
+
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
{
u32 i;
@@ -75,7 +95,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
link->wr_reg_state = FAILED;
else
link->wr_reg_state = CONFIRMED;
- wake_up(&link->wr_reg_wait);
+ smc_wr_wakeup_reg_wait(link);
return;
}
@@ -101,7 +121,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
clear_bit(i, link->wr_tx_mask);
}
/* terminate connections of this link group abnormally */
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate_sched(smc_get_lgr(link));
}
if (pnd_snd.handler)
pnd_snd.handler(&pnd_snd.priv, link, wc->status);
@@ -171,6 +191,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
struct smc_rdma_wr **wr_rdma_buf,
struct smc_wr_tx_pend_priv **wr_pend_priv)
{
+ struct smc_link_group *lgr = smc_get_lgr(link);
struct smc_wr_tx_pend *wr_pend;
u32 idx = link->wr_tx_cnt;
struct ib_send_wr *wr_ib;
@@ -179,19 +200,20 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
*wr_buf = NULL;
*wr_pend_priv = NULL;
- if (in_softirq()) {
+ if (in_softirq() || lgr->terminating) {
rc = smc_wr_tx_get_free_slot_index(link, &idx);
if (rc)
return rc;
} else {
- rc = wait_event_timeout(
+ rc = wait_event_interruptible_timeout(
link->wr_tx_wait,
link->state == SMC_LNK_INACTIVE ||
+ lgr->terminating ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
if (!rc) {
/* timeout - terminate connections */
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate_sched(lgr);
return -EPIPE;
}
if (idx == link->wr_tx_cnt)
@@ -227,6 +249,7 @@ int smc_wr_tx_put_slot(struct smc_link *link,
memset(&link->wr_tx_bufs[idx], 0,
sizeof(link->wr_tx_bufs[idx]));
test_and_clear_bit(idx, link->wr_tx_mask);
+ wake_up(&link->wr_tx_wait);
return 1;
}
@@ -247,7 +270,7 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
if (rc) {
smc_wr_tx_put_slot(link, priv);
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate_sched(smc_get_lgr(link));
}
return rc;
}
@@ -272,7 +295,7 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
SMC_WR_REG_MR_WAIT_TIME);
if (!rc) {
/* timeout - terminate connections */
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate_sched(smc_get_lgr(link));
return -EPIPE;
}
if (rc == -ERESTARTSYS)
@@ -373,7 +396,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
/* terminate connections of this link group
* abnormally
*/
- smc_lgr_terminate(smc_get_lgr(link));
+ smc_lgr_terminate_sched(smc_get_lgr(link));
break;
default:
smc_wr_rx_post(link); /* refill WR RX */
@@ -510,8 +533,10 @@ void smc_wr_free_link(struct smc_link *lnk)
{
struct ib_device *ibdev;
- memset(lnk->wr_tx_mask, 0,
- BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
+ if (smc_wr_tx_wait_no_pending_sends(lnk))
+ memset(lnk->wr_tx_mask, 0,
+ BITS_TO_LONGS(SMC_WR_BUF_CNT) *
+ sizeof(*lnk->wr_tx_mask));
if (!lnk->smcibdev)
return;
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 09bf32fd3959..3ac99c898418 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -60,6 +60,16 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
atomic_long_set(wr_tx_id, val);
}
+static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
+{
+ wake_up_all(&lnk->wr_tx_wait);
+}
+
+static inline void smc_wr_wakeup_reg_wait(struct smc_link *lnk)
+{
+ wake_up(&lnk->wr_reg_wait);
+}
+
/* post a new receive work request to fill a completed old work request entry */
static inline int smc_wr_rx_post(struct smc_link *link)
{
diff --git a/net/socket.c b/net/socket.c
index 6a9ab7a8b1d2..17bc1eee198a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -404,6 +404,7 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
sock->file = file;
file->private_data = sock;
+ stream_open(SOCK_INODE(sock), file);
return file;
}
EXPORT_SYMBOL(sock_alloc_file);
@@ -1690,24 +1691,13 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
return __sys_listen(fd, backlog);
}
-/*
- * For accept, we attempt to create a new socket, set up the link
- * with the client, wake up the client, then return the new
- * connected fd. We collect the address of the connector in kernel
- * space and move it to user at the very end. This is unclean because
- * we open the socket then return an error.
- *
- * 1003.1g adds the ability to recvmsg() to query connection pending
- * status to recvmsg. We need to add that support in a way thats
- * clean when we restructure accept also.
- */
-
-int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags)
+int __sys_accept4_file(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags)
{
struct socket *sock, *newsock;
struct file *newfile;
- int err, len, newfd, fput_needed;
+ int err, len, newfd;
struct sockaddr_storage address;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
@@ -1716,14 +1706,14 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ sock = sock_from_file(file, &err);
if (!sock)
goto out;
err = -ENFILE;
newsock = sock_alloc();
if (!newsock)
- goto out_put;
+ goto out;
newsock->type = sock->type;
newsock->ops = sock->ops;
@@ -1738,20 +1728,21 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
if (unlikely(newfd < 0)) {
err = newfd;
sock_release(newsock);
- goto out_put;
+ goto out;
}
newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
if (IS_ERR(newfile)) {
err = PTR_ERR(newfile);
put_unused_fd(newfd);
- goto out_put;
+ goto out;
}
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
- err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
+ err = sock->ops->accept(sock, newsock, sock->file->f_flags | file_flags,
+ false);
if (err < 0)
goto out_fd;
@@ -1772,15 +1763,42 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
fd_install(newfd, newfile);
err = newfd;
-
-out_put:
- fput_light(sock->file, fput_needed);
out:
return err;
out_fd:
fput(newfile);
put_unused_fd(newfd);
- goto out_put;
+ goto out;
+
+}
+
+/*
+ * For accept, we attempt to create a new socket, set up the link
+ * with the client, wake up the client, then return the new
+ * connected fd. We collect the address of the connector in kernel
+ * space and move it to user at the very end. This is unclean because
+ * we open the socket then return an error.
+ *
+ * 1003.1g adds the ability to recvmsg() to query connection pending
+ * status to recvmsg. We need to add that support in a way thats
+ * clean when we restructure accept also.
+ */
+
+int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags)
+{
+ int ret = -EBADF;
+ struct fd f;
+
+ f = fdget(fd);
+ if (f.file) {
+ ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
+ upeer_addrlen, flags);
+ if (f.flags)
+ fput(f.file);
+ }
+
+ return ret;
}
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index b83e16ade4d2..716b61a701a8 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -35,6 +35,21 @@ config TIPC_MEDIA_UDP
Saying Y here will enable support for running TIPC over IP/UDP
bool
default y
+config TIPC_CRYPTO
+ bool "TIPC encryption support"
+ depends on TIPC
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ help
+ Saying Y here will enable support for TIPC encryption.
+ All TIPC messages will be encrypted/decrypted by using the currently most
+ advanced algorithm: AEAD AES-GCM (like IPSec or TLS) before leaving/
+ entering the TIPC stack.
+ Key setting from user-space is performed via netlink by a user program
+ (e.g. the iproute2 'tipc' tool).
+ bool
+ default y
config TIPC_DIAG
tristate "TIPC: socket monitoring interface"
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index c86aba0282af..11255e970dd4 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -16,6 +16,7 @@ CFLAGS_trace.o += -I$(src)
tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
+tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
obj-$(CONFIG_TIPC_DIAG) += diag.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 6ef1abdd525f..55aeba681cf4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -84,12 +84,12 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
*/
int tipc_bcast_get_mtu(struct net *net)
{
- return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
+ return tipc_link_mss(tipc_bc_sndlink(net));
}
-void tipc_bcast_disable_rcast(struct net *net)
+void tipc_bcast_toggle_rcast(struct net *net, bool supp)
{
- tipc_bc_base(net)->rcast_support = false;
+ tipc_bc_base(net)->rcast_support = supp;
}
static void tipc_bcbase_calc_bc_threshold(struct net *net)
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index dadad953e2be..9e847d9617d3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -85,7 +85,7 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
int tipc_bcast_get_mtu(struct net *net);
-void tipc_bcast_disable_rcast(struct net *net);
+void tipc_bcast_toggle_rcast(struct net *net, bool supp);
int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct tipc_mc_method *method, struct tipc_nlist *dests,
u16 *cong_link_cnt);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 0214aa1c4427..d7ec26bd739d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -44,6 +44,7 @@
#include "netlink.h"
#include "udp_media.h"
#include "trace.h"
+#include "crypto.h"
#define MAX_ADDR_STR 60
@@ -315,6 +316,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
b->net_plane = bearer_id + 'A';
b->priority = prio;
test_and_set_bit_lock(0, &b->up);
+ refcount_set(&b->refcnt, 1);
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
@@ -351,6 +353,17 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
return 0;
}
+bool tipc_bearer_hold(struct tipc_bearer *b)
+{
+ return (b && refcount_inc_not_zero(&b->refcnt));
+}
+
+void tipc_bearer_put(struct tipc_bearer *b)
+{
+ if (b && refcount_dec_and_test(&b->refcnt))
+ kfree_rcu(b, rcu);
+}
+
/**
* bearer_disable
*
@@ -369,7 +382,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b)
if (b->disc)
tipc_disc_delete(b->disc);
RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
- kfree_rcu(b, rcu);
+ tipc_bearer_put(b);
tipc_mon_delete(net, bearer_id);
}
@@ -504,10 +517,15 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
rcu_read_lock();
b = bearer_get(net, bearer_id);
- if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
- b->media->send_msg(net, skb, b, dest);
- else
+ if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dest, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dest);
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
}
@@ -515,7 +533,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
*/
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
- struct tipc_media_addr *dst)
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
{
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -529,10 +548,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
__skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) {
__skb_dequeue(xmitq);
- if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
- b->media->send_msg(net, skb, b, dst);
- else
+ if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
+ } else {
kfree_skb(skb);
+ }
}
rcu_read_unlock();
}
@@ -543,6 +567,7 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq)
{
struct tipc_net *tn = tipc_net(net);
+ struct tipc_media_addr *dst;
int net_id = tn->net_id;
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -557,7 +582,12 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
msg_set_non_seq(hdr, 1);
msg_set_mc_netid(hdr, net_id);
__skb_dequeue(xmitq);
- b->media->send_msg(net, skb, b, &b->bcast_addr);
+ dst = &b->bcast_addr;
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
}
rcu_read_unlock();
}
@@ -584,6 +614,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
skb_mark_not_on_list(skb);
+ TIPC_SKB_CB(skb)->flags = 0;
tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ea0f3c49cbed..d0c79cc6c0c2 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -165,6 +165,7 @@ struct tipc_bearer {
struct tipc_discoverer *disc;
char net_plane;
unsigned long up;
+ refcount_t refcnt;
};
struct tipc_bearer_names {
@@ -210,6 +211,8 @@ int tipc_media_set_window(const char *name, u32 new_value);
int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
struct nlattr *attrs[]);
+bool tipc_bearer_hold(struct tipc_bearer *b);
+void tipc_bearer_put(struct tipc_bearer *b);
void tipc_disable_l2_media(struct tipc_bearer *b);
int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b, struct tipc_media_addr *dest);
@@ -229,7 +232,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct tipc_media_addr *dest);
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
- struct tipc_media_addr *dst);
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq);
void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 23cb379a93d6..7532a00ac73d 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,8 +34,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "core.h"
#include "name_table.h"
#include "subscr.h"
@@ -44,6 +42,7 @@
#include "socket.h"
#include "bcast.h"
#include "node.h"
+#include "crypto.h"
#include <linux/module.h>
@@ -68,6 +67,11 @@ static int __net_init tipc_init_net(struct net *net)
INIT_LIST_HEAD(&tn->node_list);
spin_lock_init(&tn->node_list_lock);
+#ifdef CONFIG_TIPC_CRYPTO
+ err = tipc_crypto_start(&tn->crypto_tx, net, NULL);
+ if (err)
+ goto out_crypto;
+#endif
err = tipc_sk_rht_init(net);
if (err)
goto out_sk_rht;
@@ -93,6 +97,11 @@ out_bclink:
out_nametbl:
tipc_sk_rht_destroy(net);
out_sk_rht:
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tn->crypto_tx);
+out_crypto:
+#endif
return err;
}
@@ -103,8 +112,20 @@ static void __net_exit tipc_exit_net(struct net *net)
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tipc_net(net)->crypto_tx);
+#endif
+}
+
+static void __net_exit tipc_pernet_pre_exit(struct net *net)
+{
+ tipc_node_pre_cleanup_net(net);
}
+static struct pernet_operations tipc_pernet_pre_exit_ops = {
+ .pre_exit = tipc_pernet_pre_exit,
+};
+
static struct pernet_operations tipc_net_ops = {
.init = tipc_init_net,
.exit = tipc_exit_net,
@@ -151,6 +172,10 @@ static int __init tipc_init(void)
if (err)
goto out_pernet_topsrv;
+ err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ if (err)
+ goto out_register_pernet_subsys;
+
err = tipc_bearer_setup();
if (err)
goto out_bearer;
@@ -158,6 +183,8 @@ static int __init tipc_init(void)
pr_info("Started in single node mode\n");
return 0;
out_bearer:
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+out_register_pernet_subsys:
unregister_pernet_device(&tipc_topsrv_net_ops);
out_pernet_topsrv:
tipc_socket_stop();
@@ -177,6 +204,7 @@ out_netlink:
static void __exit tipc_exit(void)
{
tipc_bearer_cleanup();
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
unregister_pernet_device(&tipc_topsrv_net_ops);
tipc_socket_stop();
unregister_pernet_device(&tipc_net_ops);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 60d829581068..631d83c9705f 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -59,6 +59,13 @@
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
#include <net/genetlink.h>
+#include <net/netns/hash.h>
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
struct tipc_node;
struct tipc_bearer;
@@ -67,6 +74,9 @@ struct tipc_link;
struct tipc_name_table;
struct tipc_topsrv;
struct tipc_monitor;
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto;
+#endif
#define TIPC_MOD_VER "2.0.0"
@@ -128,6 +138,11 @@ struct tipc_net {
/* Tracing of node internal messages */
struct packet_type loopback_pt;
+
+#ifdef CONFIG_TIPC_CRYPTO
+ /* TX crypto handler */
+ struct tipc_crypto *crypto_tx;
+#endif
};
static inline struct tipc_net *tipc_net(struct net *net)
@@ -185,6 +200,11 @@ static inline int in_range(u16 val, u16 min, u16 max)
return !less(val, min) && !more(val, max);
}
+static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
+{
+ return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
new file mode 100644
index 000000000000..990a872cec46
--- /dev/null
+++ b/net/tipc/crypto.c
@@ -0,0 +1,1986 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include "crypto.h"
+
+#define TIPC_TX_PROBE_LIM msecs_to_jiffies(1000) /* > 1s */
+#define TIPC_TX_LASTING_LIM msecs_to_jiffies(120000) /* 2 mins */
+#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
+#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(180000) /* 3 mins */
+#define TIPC_MAX_TFMS_DEF 10
+#define TIPC_MAX_TFMS_LIM 1000
+
+/**
+ * TIPC Key ids
+ */
+enum {
+ KEY_UNUSED = 0,
+ KEY_MIN,
+ KEY_1 = KEY_MIN,
+ KEY_2,
+ KEY_3,
+ KEY_MAX = KEY_3,
+};
+
+/**
+ * TIPC Crypto statistics
+ */
+enum {
+ STAT_OK,
+ STAT_NOK,
+ STAT_ASYNC,
+ STAT_ASYNC_OK,
+ STAT_ASYNC_NOK,
+ STAT_BADKEYS, /* tx only */
+ STAT_BADMSGS = STAT_BADKEYS, /* rx only */
+ STAT_NOKEYS,
+ STAT_SWITCHES,
+
+ MAX_STATS,
+};
+
+/* TIPC crypto statistics' header */
+static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
+ "async_nok", "badmsgs", "nokeys",
+ "switches"};
+
+/* Max TFMs number per key */
+int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
+
+/**
+ * struct tipc_key - TIPC keys' status indicator
+ *
+ * 7 6 5 4 3 2 1 0
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * key: | (reserved)|passive idx| active idx|pending idx|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+struct tipc_key {
+#define KEY_BITS (2)
+#define KEY_MASK ((1 << KEY_BITS) - 1)
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 pending:2,
+ active:2,
+ passive:2, /* rx only */
+ reserved:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:2,
+ passive:2, /* rx only */
+ active:2,
+ pending:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ } __packed;
+ u8 keys;
+ };
+};
+
+/**
+ * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
+ */
+struct tipc_tfm {
+ struct crypto_aead *tfm;
+ struct list_head list;
+};
+
+/**
+ * struct tipc_aead - TIPC AEAD key structure
+ * @tfm_entry: per-cpu pointer to one entry in TFM list
+ * @crypto: TIPC crypto owns this key
+ * @cloned: reference to the source key in case cloning
+ * @users: the number of the key users (TX/RX)
+ * @salt: the key's SALT value
+ * @authsize: authentication tag size (max = 16)
+ * @mode: crypto mode is applied to the key
+ * @hint[]: a hint for user key
+ * @rcu: struct rcu_head
+ * @seqno: the key seqno (cluster scope)
+ * @refcnt: the key reference counter
+ */
+struct tipc_aead {
+#define TIPC_AEAD_HINT_LEN (5)
+ struct tipc_tfm * __percpu *tfm_entry;
+ struct tipc_crypto *crypto;
+ struct tipc_aead *cloned;
+ atomic_t users;
+ u32 salt;
+ u8 authsize;
+ u8 mode;
+ char hint[TIPC_AEAD_HINT_LEN + 1];
+ struct rcu_head rcu;
+
+ atomic64_t seqno ____cacheline_aligned;
+ refcount_t refcnt ____cacheline_aligned;
+
+} ____cacheline_aligned;
+
+/**
+ * struct tipc_crypto_stats - TIPC Crypto statistics
+ */
+struct tipc_crypto_stats {
+ unsigned int stat[MAX_STATS];
+};
+
+/**
+ * struct tipc_crypto - TIPC TX/RX crypto structure
+ * @net: struct net
+ * @node: TIPC node (RX)
+ * @aead: array of pointers to AEAD keys for encryption/decryption
+ * @peer_rx_active: replicated peer RX active key index
+ * @key: the key states
+ * @working: the crypto is working or not
+ * @stats: the crypto statistics
+ * @sndnxt: the per-peer sndnxt (TX)
+ * @timer1: general timer 1 (jiffies)
+ * @timer2: general timer 1 (jiffies)
+ * @lock: tipc_key lock
+ */
+struct tipc_crypto {
+ struct net *net;
+ struct tipc_node *node;
+ struct tipc_aead __rcu *aead[KEY_MAX + 1]; /* key[0] is UNUSED */
+ atomic_t peer_rx_active;
+ struct tipc_key key;
+ u8 working:1;
+ struct tipc_crypto_stats __percpu *stats;
+
+ atomic64_t sndnxt ____cacheline_aligned;
+ unsigned long timer1;
+ unsigned long timer2;
+ spinlock_t lock; /* crypto lock */
+
+} ____cacheline_aligned;
+
+/* struct tipc_crypto_tx_ctx - TX context for callbacks */
+struct tipc_crypto_tx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+ struct tipc_media_addr dst;
+};
+
+/* struct tipc_crypto_rx_ctx - RX context for callbacks */
+struct tipc_crypto_rx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+};
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
+static inline void tipc_aead_put(struct tipc_aead *aead);
+static void tipc_aead_free(struct rcu_head *rp);
+static int tipc_aead_users(struct tipc_aead __rcu *aead);
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode);
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg);
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err);
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b);
+static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err);
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx);
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending);
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos);
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb);
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
+ struct tipc_msg *hdr);
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err);
+static void tipc_crypto_do_cmd(struct net *net, int cmd);
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
+#ifdef TIPC_CRYPTO_DEBUG
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf);
+#endif
+
+#define key_next(cur) ((cur) % KEY_MAX + 1)
+
+#define tipc_aead_rcu_ptr(rcu_ptr, lock) \
+ rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
+
+#define tipc_aead_rcu_swap(rcu_ptr, ptr, lock) \
+ rcu_swap_protected((rcu_ptr), (ptr), lockdep_is_held(lock))
+
+#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
+do { \
+ typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \
+ lockdep_is_held(lock)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ tipc_aead_put(__tmp); \
+} while (0)
+
+#define tipc_crypto_key_detach(rcu_ptr, lock) \
+ tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
+
+/**
+ * tipc_aead_key_validate - Validate a AEAD user key
+ */
+int tipc_aead_key_validate(struct tipc_aead_key *ukey)
+{
+ int keylen;
+
+ /* Check if algorithm exists */
+ if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
+ pr_info("Not found cipher: \"%s\"!\n", ukey->alg_name);
+ return -ENODEV;
+ }
+
+ /* Currently, we only support the "gcm(aes)" cipher algorithm */
+ if (strcmp(ukey->alg_name, "gcm(aes)"))
+ return -ENOTSUPP;
+
+ /* Check if key size is correct */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+ if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_256))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
+ tmp = NULL;
+ rcu_read_unlock();
+
+ return tmp;
+}
+
+static inline void tipc_aead_put(struct tipc_aead *aead)
+{
+ if (aead && refcount_dec_and_test(&aead->refcnt))
+ call_rcu(&aead->rcu, tipc_aead_free);
+}
+
+/**
+ * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
+ * @rp: rcu head pointer
+ */
+static void tipc_aead_free(struct rcu_head *rp)
+{
+ struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
+ struct tipc_tfm *tfm_entry, *head, *tmp;
+
+ if (aead->cloned) {
+ tipc_aead_put(aead->cloned);
+ } else {
+ head = *this_cpu_ptr(aead->tfm_entry);
+ list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
+ crypto_free_aead(tfm_entry->tfm);
+ list_del(&tfm_entry->list);
+ kfree(tfm_entry);
+ }
+ /* Free the head */
+ crypto_free_aead(head->tfm);
+ list_del(&head->list);
+ kfree(head);
+ }
+ free_percpu(aead->tfm_entry);
+ kfree(aead);
+}
+
+static int tipc_aead_users(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+ int users = 0;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ users = atomic_read(&tmp->users);
+ rcu_read_unlock();
+
+ return users;
+}
+
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&tmp->users, 1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
+{
+ struct tipc_aead *tmp;
+ int cur;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp) {
+ do {
+ cur = atomic_read(&tmp->users);
+ if (cur == val)
+ break;
+ } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
+ */
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
+{
+ struct tipc_tfm **tfm_entry = this_cpu_ptr(aead->tfm_entry);
+
+ *tfm_entry = list_next_entry(*tfm_entry, list);
+ return (*tfm_entry)->tfm;
+}
+
+/**
+ * tipc_aead_init - Initiate TIPC AEAD
+ * @aead: returned new TIPC AEAD key handle pointer
+ * @ukey: pointer to user key data
+ * @mode: the key mode
+ *
+ * Allocate a (list of) new cipher transformation (TFM) with the specific user
+ * key data if valid. The number of the allocated TFMs can be set via the sysfs
+ * "net/tipc/max_tfms" first.
+ * Also, all the other AEAD data are also initialized.
+ *
+ * Return: 0 if the initiation is successful, otherwise: < 0
+ */
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode)
+{
+ struct tipc_tfm *tfm_entry, *head;
+ struct crypto_aead *tfm;
+ struct tipc_aead *tmp;
+ int keylen, err, cpu;
+ int tfm_cnt = 0;
+
+ if (unlikely(*aead))
+ return -EEXIST;
+
+ /* Allocate a new AEAD */
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (unlikely(!tmp))
+ return -ENOMEM;
+
+ /* The key consists of two parts: [AES-KEY][SALT] */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+
+ /* Allocate per-cpu TFM entry pointer */
+ tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
+ if (!tmp->tfm_entry) {
+ kzfree(tmp);
+ return -ENOMEM;
+ }
+
+ /* Make a list of TFMs with the user key data */
+ do {
+ tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ err = PTR_ERR(tfm);
+ break;
+ }
+
+ if (unlikely(!tfm_cnt &&
+ crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
+ crypto_free_aead(tfm);
+ err = -ENOTSUPP;
+ break;
+ }
+
+ err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
+ err |= crypto_aead_setkey(tfm, ukey->key, keylen);
+ if (unlikely(err)) {
+ crypto_free_aead(tfm);
+ break;
+ }
+
+ tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
+ if (unlikely(!tfm_entry)) {
+ crypto_free_aead(tfm);
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&tfm_entry->list);
+ tfm_entry->tfm = tfm;
+
+ /* First entry? */
+ if (!tfm_cnt) {
+ head = tfm_entry;
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
+ }
+ } else {
+ list_add_tail(&tfm_entry->list, &head->list);
+ }
+
+ } while (++tfm_cnt < sysctl_tipc_max_tfms);
+
+ /* Not any TFM is allocated? */
+ if (!tfm_cnt) {
+ free_percpu(tmp->tfm_entry);
+ kzfree(tmp);
+ return err;
+ }
+
+ /* Copy some chars from the user key as a hint */
+ memcpy(tmp->hint, ukey->key, TIPC_AEAD_HINT_LEN);
+ tmp->hint[TIPC_AEAD_HINT_LEN] = '\0';
+
+ /* Initialize the other data */
+ tmp->mode = mode;
+ tmp->cloned = NULL;
+ tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
+ memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
+ atomic_set(&tmp->users, 0);
+ atomic64_set(&tmp->seqno, 0);
+ refcount_set(&tmp->refcnt, 1);
+
+ *aead = tmp;
+ return 0;
+}
+
+/**
+ * tipc_aead_clone - Clone a TIPC AEAD key
+ * @dst: dest key for the cloning
+ * @src: source key to clone from
+ *
+ * Make a "copy" of the source AEAD key data to the dest, the TFMs list is
+ * common for the keys.
+ * A reference to the source is hold in the "cloned" pointer for the later
+ * freeing purposes.
+ *
+ * Note: this must be done in cluster-key mode only!
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
+{
+ struct tipc_aead *aead;
+ int cpu;
+
+ if (!src)
+ return -ENOKEY;
+
+ if (src->mode != CLUSTER_KEY)
+ return -EINVAL;
+
+ if (unlikely(*dst))
+ return -EEXIST;
+
+ aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
+ if (unlikely(!aead))
+ return -ENOMEM;
+
+ aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
+ if (unlikely(!aead->tfm_entry)) {
+ kzfree(aead);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(aead->tfm_entry, cpu) =
+ *per_cpu_ptr(src->tfm_entry, cpu);
+ }
+
+ memcpy(aead->hint, src->hint, sizeof(src->hint));
+ aead->mode = src->mode;
+ aead->salt = src->salt;
+ aead->authsize = src->authsize;
+ atomic_set(&aead->users, 0);
+ atomic64_set(&aead->seqno, 0);
+ refcount_set(&aead->refcnt, 1);
+
+ WARN_ON(!refcount_inc_not_zero(&src->refcnt));
+ aead->cloned = src;
+
+ *dst = aead;
+ return 0;
+}
+
+/**
+ * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
+ * @tfm: cipher handle to be registered with the request
+ * @crypto_ctx_size: size of crypto context for callback
+ * @iv: returned pointer to IV data
+ * @req: returned pointer to AEAD request data
+ * @sg: returned pointer to SG lists
+ * @nsg: number of SG lists to be allocated
+ *
+ * Allocate memory to store the crypto context data, AEAD request, IV and SG
+ * lists, the memory layout is as follows:
+ * crypto_ctx || iv || aead_req || sg[]
+ *
+ * Return: the pointer to the memory areas in case of success, otherwise NULL
+ */
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg)
+{
+ unsigned int iv_size, req_size;
+ unsigned int len;
+ u8 *mem;
+
+ iv_size = crypto_aead_ivsize(tfm);
+ req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+
+ len = crypto_ctx_size;
+ len += iv_size;
+ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+ len += req_size;
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += nsg * sizeof(**sg);
+
+ mem = kmalloc(len, GFP_ATOMIC);
+ if (!mem)
+ return NULL;
+
+ *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
+ crypto_aead_alignmask(tfm) + 1);
+ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
+ crypto_tfm_ctx_alignment());
+ *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
+ __alignof__(struct scatterlist));
+
+ return (void *)mem;
+}
+
+/**
+ * tipc_aead_encrypt - Encrypt a message
+ * @aead: TIPC AEAD key for the message encryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message will be delivered after the encryption
+ * @dst: the destination media address
+ * @__dnode: TIPC dest node if "known"
+ *
+ * Return:
+ * 0 : if the encryption has completed
+ * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * < 0 : the encryption has failed
+ */
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
+ struct tipc_crypto_tx_ctx *tx_ctx;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, len, tailen, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ /* Make sure message len at least 4-byte aligned */
+ len = ALIGN(skb->len, 4);
+ tailen = len - skb->len + aead->authsize;
+
+ /* Expand skb tail for authentication tag:
+ * As for simplicity, we'd have made sure skb having enough tailroom
+ * for authentication tag @skb allocation. Even when skb is nonlinear
+ * but there is no frag_list, it should be still fine!
+ * Otherwise, we must cow it to be a writable buffer with the tailroom.
+ */
+#ifdef TIPC_CRYPTO_DEBUG
+ SKB_LINEAR_ASSERT(skb);
+ if (tailen > skb_tailroom(skb)) {
+ pr_warn("TX: skb tailroom is not enough: %d, requires: %d\n",
+ skb_tailroom(skb), tailen);
+ }
+#endif
+
+ if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
+ nsg = 1;
+ trailer = skb;
+ } else {
+ /* TODO: We could avoid skb_cow_data() if skb has no frag_list
+ * e.g. by skb_fill_page_desc() to add another page to the skb
+ * with the wanted tailen... However, page skbs look not often,
+ * so take it easy now!
+ * Cloned skbs e.g. from link_xmit() seems no choice though :(
+ */
+ nsg = skb_cow_data(skb, tailen, &trailer);
+ if (unlikely(nsg < 0)) {
+ pr_err("TX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+ }
+
+ pskb_put(skb, trailer, tailen);
+
+ /* Allocate memory for the AEAD operation */
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)]
+ * In case we're in cluster-key mode, SALT is varied by xor-ing with
+ * the source address (or w0 of id), otherwise with the dest address
+ * if dest is known.
+ */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= ehdr->addr; /* __be32 */
+ else if (__dnode)
+ salt ^= tipc_node_get_addr(__dnode);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_encrypt_done, skb);
+ tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
+ tx_ctx->aead = aead;
+ tx_ctx->bearer = b;
+ memcpy(&tx_ctx->dst, dst, sizeof(*dst));
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do encrypt */
+ rc = crypto_aead_encrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = tx_ctx->bearer;
+ struct tipc_aead *aead = tx_ctx->aead;
+ struct tipc_crypto *tx = aead->crypto;
+ struct net *net = tx->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ if (likely(test_bit(0, &b->up)))
+ b->media->send_msg(net, skb, b, &tx_ctx->dst);
+ else
+ kfree_skb(skb);
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
+ kfree_skb(skb);
+ break;
+ }
+
+ kfree(tx_ctx);
+ tipc_bearer_put(b);
+ tipc_aead_put(aead);
+}
+
+/**
+ * tipc_aead_decrypt - Decrypt an encrypted message
+ * @net: struct net
+ * @aead: TIPC AEAD for the message decryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message has been received
+ *
+ * Return:
+ * 0 : if the decryption has completed
+ * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * < 0 : the decryption has failed
+ */
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto_rx_ctx *rx_ctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ struct sk_buff *unused;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ if (unlikely(!aead))
+ return -ENOKEY;
+
+ /* Cow skb data if needed */
+ if (likely(!skb_cloned(skb) &&
+ (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
+ nsg = 1 + skb_shinfo(skb)->nr_frags;
+ } else {
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ pr_err("RX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+ }
+
+ /* Allocate memory for the AEAD operation */
+ tfm = tipc_aead_tfm_next(aead);
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Reconstruct IV: */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= ehdr->addr; /* __be32 */
+ else if (ehdr->destined)
+ salt ^= tipc_own_addr(net);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_decrypt_done, skb);
+ rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
+ rx_ctx->aead = aead;
+ rx_ctx->bearer = b;
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do decrypt */
+ rc = crypto_aead_decrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = rx_ctx->bearer;
+ struct tipc_aead *aead = rx_ctx->aead;
+ struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
+ struct net *net = aead->crypto->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
+ break;
+ }
+
+ kfree(rx_ctx);
+ tipc_crypto_rcv_complete(net, aead, b, &skb, err);
+ if (likely(skb)) {
+ if (likely(test_bit(0, &b->up)))
+ tipc_rcv(net, skb, b);
+ else
+ kfree_skb(skb);
+ }
+
+ tipc_bearer_put(b);
+}
+
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
+{
+ return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+}
+
+/**
+ * tipc_ehdr_validate - Validate an encryption message
+ * @skb: the message buffer
+ *
+ * Returns "true" if this is a valid encryption message, otherwise "false"
+ */
+bool tipc_ehdr_validate(struct sk_buff *skb)
+{
+ struct tipc_ehdr *ehdr;
+ int ehsz;
+
+ if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
+ return false;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (unlikely(ehdr->version != TIPC_EVERSION))
+ return false;
+ ehsz = tipc_ehdr_size(ehdr);
+ if (unlikely(!pskb_may_pull(skb, ehsz)))
+ return false;
+ if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
+ return false;
+ if (unlikely(!ehdr->tx_key))
+ return false;
+
+ return true;
+}
+
+/**
+ * tipc_ehdr_build - Build TIPC encryption message header
+ * @net: struct net
+ * @aead: TX AEAD key to be used for the message encryption
+ * @tx_key: key id used for the message encryption
+ * @skb: input/output message skb
+ * @__rx: RX crypto handle if dest is "known"
+ *
+ * Return: the header size if the building is successful, otherwise < 0
+ */
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_ehdr *ehdr;
+ u32 user = msg_user(hdr);
+ u64 seqno;
+ int ehsz;
+
+ /* Make room for encryption header */
+ ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+ WARN_ON(skb_headroom(skb) < ehsz);
+ ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
+
+ /* Obtain a seqno first:
+ * Use the key seqno (= cluster wise) if dest is unknown or we're in
+ * cluster key mode, otherwise it's better for a per-peer seqno!
+ */
+ if (!__rx || aead->mode == CLUSTER_KEY)
+ seqno = atomic64_inc_return(&aead->seqno);
+ else
+ seqno = atomic64_inc_return(&__rx->sndnxt);
+
+ /* Revoke the key if seqno is wrapped around */
+ if (unlikely(!seqno))
+ return tipc_crypto_key_revoke(net, tx_key);
+
+ /* Word 1-2 */
+ ehdr->seqno = cpu_to_be64(seqno);
+
+ /* Words 0, 3- */
+ ehdr->version = TIPC_EVERSION;
+ ehdr->user = 0;
+ ehdr->keepalive = 0;
+ ehdr->tx_key = tx_key;
+ ehdr->destined = (__rx) ? 1 : 0;
+ ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
+ ehdr->reserved_1 = 0;
+ ehdr->reserved_2 = 0;
+
+ switch (user) {
+ case LINK_CONFIG:
+ ehdr->user = LINK_CONFIG;
+ memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
+ break;
+ default:
+ if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
+ ehdr->user = LINK_PROTOCOL;
+ ehdr->keepalive = msg_is_keepalive(hdr);
+ }
+ ehdr->addr = hdr->hdr[3];
+ break;
+ }
+
+ return ehsz;
+}
+
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending)
+{
+#ifdef TIPC_CRYPTO_DEBUG
+ struct tipc_key old = c->key;
+ char buf[32];
+#endif
+
+ c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
+ ((new_active & KEY_MASK) << (KEY_BITS)) |
+ ((new_pending & KEY_MASK));
+
+#ifdef TIPC_CRYPTO_DEBUG
+ pr_info("%s(%s): key changing %s ::%pS\n",
+ (c->node) ? "RX" : "TX",
+ (c->node) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net),
+ tipc_key_change_dump(old, c->key, buf),
+ __builtin_return_address(0));
+#endif
+}
+
+/**
+ * tipc_crypto_key_init - Initiate a new user / AEAD key
+ * @c: TIPC crypto to which new key is attached
+ * @ukey: the user key
+ * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
+ *
+ * A new TIPC AEAD key will be allocated and initiated with the specified user
+ * key, then attached to the TIPC crypto.
+ *
+ * Return: new key id in case of success, otherwise: < 0
+ */
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode)
+{
+ struct tipc_aead *aead = NULL;
+ int rc = 0;
+
+ /* Initiate with the new user key */
+ rc = tipc_aead_init(&aead, ukey, mode);
+
+ /* Attach it to the crypto */
+ if (likely(!rc)) {
+ rc = tipc_crypto_key_attach(c, aead, 0);
+ if (rc < 0)
+ tipc_aead_free(&aead->rcu);
+ }
+
+ pr_info("%s(%s): key initiating, rc %d!\n",
+ (c->node) ? "RX" : "TX",
+ (c->node) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net),
+ rc);
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
+ * @c: TIPC crypto to which the new AEAD key is attached
+ * @aead: the new AEAD key pointer
+ * @pos: desired slot in the crypto key array, = 0 if any!
+ *
+ * Return: new key id in case of success, otherwise: -EBUSY
+ */
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos)
+{
+ u8 new_pending, new_passive, new_key;
+ struct tipc_key key;
+ int rc = -EBUSY;
+
+ spin_lock_bh(&c->lock);
+ key = c->key;
+ if (key.active && key.passive)
+ goto exit;
+ if (key.passive && !tipc_aead_users(c->aead[key.passive]))
+ goto exit;
+ if (key.pending) {
+ if (pos)
+ goto exit;
+ if (tipc_aead_users(c->aead[key.pending]) > 0)
+ goto exit;
+ /* Replace it */
+ new_pending = key.pending;
+ new_passive = key.passive;
+ new_key = new_pending;
+ } else {
+ if (pos) {
+ if (key.active && pos != key_next(key.active)) {
+ new_pending = key.pending;
+ new_passive = pos;
+ new_key = new_passive;
+ goto attach;
+ } else if (!key.active && !key.passive) {
+ new_pending = pos;
+ new_passive = key.passive;
+ new_key = new_pending;
+ goto attach;
+ }
+ }
+ new_pending = key_next(key.active ?: key.passive);
+ new_passive = key.passive;
+ new_key = new_pending;
+ }
+
+attach:
+ aead->crypto = c;
+ tipc_crypto_key_set_state(c, new_passive, key.active, new_pending);
+ tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
+
+ c->working = 1;
+ c->timer1 = jiffies;
+ c->timer2 = jiffies;
+ rc = new_key;
+
+exit:
+ spin_unlock_bh(&c->lock);
+ return rc;
+}
+
+void tipc_crypto_key_flush(struct tipc_crypto *c)
+{
+ int k;
+
+ spin_lock_bh(&c->lock);
+ c->working = 0;
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_crypto_key_detach(c->aead[k], &c->lock);
+ atomic_set(&c->peer_rx_active, 0);
+ atomic64_set(&c->sndnxt, 0);
+ spin_unlock_bh(&c->lock);
+}
+
+/**
+ * tipc_crypto_key_try_align - Align RX keys if possible
+ * @rx: RX crypto handle
+ * @new_pending: new pending slot if aligned (= TX key from peer)
+ *
+ * Peer has used an unknown key slot, this only happens when peer has left and
+ * rejoned, or we are newcomer.
+ * That means, there must be no active key but a pending key at unaligned slot.
+ * If so, we try to move the pending key to the new slot.
+ * Note: A potential passive key can exist, it will be shifted correspondingly!
+ *
+ * Return: "true" if key is successfully aligned, otherwise "false"
+ */
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
+{
+ struct tipc_aead *tmp1, *tmp2 = NULL;
+ struct tipc_key key;
+ bool aligned = false;
+ u8 new_passive = 0;
+ int x;
+
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (key.pending == new_pending) {
+ aligned = true;
+ goto exit;
+ }
+ if (key.active)
+ goto exit;
+ if (!key.pending)
+ goto exit;
+ if (tipc_aead_users(rx->aead[key.pending]) > 0)
+ goto exit;
+
+ /* Try to "isolate" this pending key first */
+ tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
+ if (!refcount_dec_if_one(&tmp1->refcnt))
+ goto exit;
+ rcu_assign_pointer(rx->aead[key.pending], NULL);
+
+ /* Move passive key if any */
+ if (key.passive) {
+ tipc_aead_rcu_swap(rx->aead[key.passive], tmp2, &rx->lock);
+ x = (key.passive - key.pending + new_pending) % KEY_MAX;
+ new_passive = (x <= 0) ? x + KEY_MAX : x;
+ }
+
+ /* Re-allocate the key(s) */
+ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
+ rcu_assign_pointer(rx->aead[new_pending], tmp1);
+ if (new_passive)
+ rcu_assign_pointer(rx->aead[new_passive], tmp2);
+ refcount_set(&tmp1->refcnt, 1);
+ aligned = true;
+ pr_info("RX(%s): key is aligned!\n", tipc_node_get_id_str(rx->node));
+
+exit:
+ spin_unlock(&rx->lock);
+ return aligned;
+}
+
+/**
+ * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
+ * @tx: TX crypto handle
+ * @rx: RX crypto handle (can be NULL)
+ * @skb: the message skb which will be decrypted later
+ *
+ * This function looks up the existing TX keys and pick one which is suitable
+ * for the message decryption, that must be a cluster key and not used before
+ * on the same message (i.e. recursive).
+ *
+ * Return: the TX AEAD key handle in case of success, otherwise NULL
+ */
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key = tx->key;
+ u8 k, i = 0;
+
+ /* Initialize data if not yet */
+ if (!skb_cb->tx_clone_deferred) {
+ skb_cb->tx_clone_deferred = 1;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ }
+
+ skb_cb->tx_clone_ctx.rx = rx;
+ if (++skb_cb->tx_clone_ctx.recurs > 2)
+ return NULL;
+
+ /* Pick one TX key */
+ spin_lock(&tx->lock);
+ do {
+ k = (i == 0) ? key.pending :
+ ((i == 1) ? key.active : key.passive);
+ if (!k)
+ continue;
+ aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
+ if (!aead)
+ continue;
+ if (aead->mode != CLUSTER_KEY ||
+ aead == skb_cb->tx_clone_ctx.last) {
+ aead = NULL;
+ continue;
+ }
+ /* Ok, found one cluster key */
+ skb_cb->tx_clone_ctx.last = aead;
+ WARN_ON(skb->next);
+ skb->next = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb->next))
+ pr_warn("Failed to clone skb for next round if any\n");
+ WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
+ break;
+ } while (++i < 3);
+ spin_unlock(&tx->lock);
+
+ return aead;
+}
+
+/**
+ * tipc_crypto_key_synch: Synch own key data according to peer key status
+ * @rx: RX crypto handle
+ * @new_rx_active: latest RX active key from peer
+ * @hdr: TIPCv2 message
+ *
+ * This function updates the peer node related data as the peer RX active key
+ * has changed, so the number of TX keys' users on this node are increased and
+ * decreased correspondingly.
+ *
+ * The "per-peer" sndnxt is also reset when the peer key has switched.
+ */
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
+ struct tipc_msg *hdr)
+{
+ struct net *net = rx->net;
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ u8 cur_rx_active;
+
+ /* TX might be even not ready yet */
+ if (unlikely(!tx->key.active && !tx->key.pending))
+ return;
+
+ cur_rx_active = atomic_read(&rx->peer_rx_active);
+ if (likely(cur_rx_active == new_rx_active))
+ return;
+
+ /* Make sure this message destined for this node */
+ if (unlikely(msg_short(hdr) ||
+ msg_destnode(hdr) != tipc_own_addr(net)))
+ return;
+
+ /* Peer RX active key has changed, try to update owns' & TX users */
+ if (atomic_cmpxchg(&rx->peer_rx_active,
+ cur_rx_active,
+ new_rx_active) == cur_rx_active) {
+ if (new_rx_active)
+ tipc_aead_users_inc(tx->aead[new_rx_active], INT_MAX);
+ if (cur_rx_active)
+ tipc_aead_users_dec(tx->aead[cur_rx_active], 0);
+
+ atomic64_set(&rx->sndnxt, 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+
+#ifdef TIPC_CRYPTO_DEBUG
+ pr_info("TX(%s): key users changed %d-- %d++, peer RX(%s)\n",
+ tipc_own_id_string(net), cur_rx_active,
+ new_rx_active, tipc_node_get_id_str(rx->node));
+#endif
+ }
+}
+
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_key key;
+
+ spin_lock(&tx->lock);
+ key = tx->key;
+ WARN_ON(!key.active || tx_key != key.active);
+
+ /* Free the active key */
+ tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ spin_unlock(&tx->lock);
+
+ pr_warn("TX(%s): key is revoked!\n", tipc_own_id_string(net));
+ return -EKEYREVOKED;
+}
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node)
+{
+ struct tipc_crypto *c;
+
+ if (*crypto)
+ return -EEXIST;
+
+ /* Allocate crypto */
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+ if (!c)
+ return -ENOMEM;
+
+ /* Allocate statistic structure */
+ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ if (!c->stats) {
+ kzfree(c);
+ return -ENOMEM;
+ }
+
+ c->working = 0;
+ c->net = net;
+ c->node = node;
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ atomic_set(&c->peer_rx_active, 0);
+ atomic64_set(&c->sndnxt, 0);
+ c->timer1 = jiffies;
+ c->timer2 = jiffies;
+ spin_lock_init(&c->lock);
+ *crypto = c;
+
+ return 0;
+}
+
+void tipc_crypto_stop(struct tipc_crypto **crypto)
+{
+ struct tipc_crypto *c, *tx, *rx;
+ bool is_rx;
+ u8 k;
+
+ if (!*crypto)
+ return;
+
+ rcu_read_lock();
+ /* RX stopping? => decrease TX key users if any */
+ is_rx = !!((*crypto)->node);
+ if (is_rx) {
+ rx = *crypto;
+ tx = tipc_net(rx->net)->crypto_tx;
+ k = atomic_read(&rx->peer_rx_active);
+ if (k) {
+ tipc_aead_users_dec(tx->aead[k], 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+ }
+ }
+
+ /* Release AEAD keys */
+ c = *crypto;
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_aead_put(rcu_dereference(c->aead[k]));
+ rcu_read_unlock();
+
+ pr_warn("%s(%s) has been purged, node left!\n",
+ (is_rx) ? "RX" : "TX",
+ (is_rx) ? tipc_node_get_id_str((*crypto)->node) :
+ tipc_own_id_string((*crypto)->net));
+
+ /* Free this crypto statistics */
+ free_percpu(c->stats);
+
+ *crypto = NULL;
+ kzfree(c);
+}
+
+void tipc_crypto_timeout(struct tipc_crypto *rx)
+{
+ struct tipc_net *tn = tipc_net(rx->net);
+ struct tipc_crypto *tx = tn->crypto_tx;
+ struct tipc_key key;
+ u8 new_pending, new_passive;
+ int cmd;
+
+ /* TX key activating:
+ * The pending key (users > 0) -> active
+ * The active key if any (users == 0) -> free
+ */
+ spin_lock(&tx->lock);
+ key = tx->key;
+ if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
+ goto s1;
+ if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
+ goto s1;
+ if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_LIM))
+ goto s1;
+
+ tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
+ if (key.active)
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
+ pr_info("TX(%s): key %d is activated!\n", tipc_own_id_string(tx->net),
+ key.pending);
+
+s1:
+ spin_unlock(&tx->lock);
+
+ /* RX key activating:
+ * The pending key (users > 0) -> active
+ * The active key if any -> passive, freed later
+ */
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
+ goto s2;
+
+ new_pending = (key.passive &&
+ !tipc_aead_users(rx->aead[key.passive])) ?
+ key.passive : 0;
+ new_passive = (key.active) ?: ((new_pending) ? 0 : key.passive);
+ tipc_crypto_key_set_state(rx, new_passive, key.pending, new_pending);
+ this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
+ pr_info("RX(%s): key %d is activated!\n",
+ tipc_node_get_id_str(rx->node), key.pending);
+ goto s5;
+
+s2:
+ /* RX key "faulty" switching:
+ * The faulty pending key (users < -30) -> passive
+ * The passive key (users = 0) -> pending
+ * Note: This only happens after RX deactivated - s3!
+ */
+ key = rx->key;
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -30)
+ goto s3;
+ if (!key.passive || tipc_aead_users(rx->aead[key.passive]) != 0)
+ goto s3;
+
+ new_pending = key.passive;
+ new_passive = key.pending;
+ tipc_crypto_key_set_state(rx, new_passive, key.active, new_pending);
+ goto s5;
+
+s3:
+ /* RX key deactivating:
+ * The passive key if any -> pending
+ * The active key -> passive (users = 0) / pending
+ * The pending key if any -> passive (users = 0)
+ */
+ key = rx->key;
+ if (!key.active)
+ goto s4;
+ if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM))
+ goto s4;
+
+ new_pending = (key.passive) ?: key.active;
+ new_passive = (key.passive) ? key.active : key.pending;
+ tipc_aead_users_set(rx->aead[new_pending], 0);
+ if (new_passive)
+ tipc_aead_users_set(rx->aead[new_passive], 0);
+ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
+ pr_info("RX(%s): key %d is deactivated!\n",
+ tipc_node_get_id_str(rx->node), key.active);
+ goto s5;
+
+s4:
+ /* RX key passive -> freed: */
+ key = rx->key;
+ if (!key.passive || !tipc_aead_users(rx->aead[key.passive]))
+ goto s5;
+ if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM))
+ goto s5;
+
+ tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
+ tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
+ pr_info("RX(%s): key %d is freed!\n", tipc_node_get_id_str(rx->node),
+ key.passive);
+
+s5:
+ spin_unlock(&rx->lock);
+
+ /* Limit max_tfms & do debug commands if needed */
+ if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
+ return;
+
+ cmd = sysctl_tipc_max_tfms;
+ sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
+ tipc_crypto_do_cmd(rx->net, cmd);
+}
+
+/**
+ * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
+ * @net: struct net
+ * @skb: input/output message skb pointer
+ * @b: bearer used for xmit later
+ * @dst: destination media address
+ * @__dnode: destination node for reference if any
+ *
+ * First, build an encryption message header on the top of the message, then
+ * encrypt the original TIPC message by using the active or pending TX key.
+ * If the encryption is successful, the encrypted skb is returned directly or
+ * via the callback.
+ * Otherwise, the skb is freed!
+ *
+ * Return:
+ * 0 : the encryption has succeeded (or no encryption)
+ * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
+ * -ENOKEK : the encryption has failed due to no key
+ * -EKEYREVOKED : the encryption has failed due to key revoked
+ * -ENOMEM : the encryption has failed due to no memory
+ * < 0 : the encryption has failed due to other reasons
+ */
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats = tx->stats;
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead = NULL;
+ struct sk_buff *probe;
+ int rc = -ENOKEY;
+ u8 tx_key;
+
+ /* No encryption? */
+ if (!tx->working)
+ return 0;
+
+ /* Try with the pending key if available and:
+ * 1) This is the only choice (i.e. no active key) or;
+ * 2) Peer has switched to this key (unicast only) or;
+ * 3) It is time to do a pending key probe;
+ */
+ if (unlikely(key.pending)) {
+ tx_key = key.pending;
+ if (!key.active)
+ goto encrypt;
+ if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->probe)
+ goto encrypt;
+ if (!__rx &&
+ time_after(jiffies, tx->timer2 + TIPC_TX_PROBE_LIM)) {
+ tx->timer2 = jiffies;
+ probe = skb_clone(*skb, GFP_ATOMIC);
+ if (probe) {
+ TIPC_SKB_CB(probe)->probe = 1;
+ tipc_crypto_xmit(net, &probe, b, dst, __dnode);
+ if (probe)
+ b->media->send_msg(net, probe, b, dst);
+ }
+ }
+ }
+ /* Else, use the active key if any */
+ if (likely(key.active)) {
+ tx_key = key.active;
+ goto encrypt;
+ }
+ goto exit;
+
+encrypt:
+ aead = tipc_aead_get(tx->aead[tx_key]);
+ if (unlikely(!aead))
+ goto exit;
+ rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
+ if (likely(rc > 0))
+ rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
+
+exit:
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY)
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ else if (rc == -EKEYREVOKED)
+ this_cpu_inc(stats->stat[STAT_BADKEYS]);
+ kfree_skb(*skb);
+ *skb = NULL;
+ break;
+ }
+
+ tipc_aead_put(aead);
+ return rc;
+}
+
+/**
+ * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
+ * @net: struct net
+ * @rx: RX crypto handle
+ * @skb: input/output message skb pointer
+ * @b: bearer where the message has been received
+ *
+ * If the decryption is successful, the decrypted skb is returned directly or
+ * as the callback, the encryption header and auth tag will be trimed out
+ * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
+ * Otherwise, the skb will be freed!
+ * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
+ * cluster key(s) can be taken for decryption (- recursive).
+ *
+ * Return:
+ * 0 : the decryption has successfully completed
+ * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
+ * -ENOKEY : the decryption has failed due to no key
+ * -EBADMSG : the decryption has failed due to bad message
+ * -ENOMEM : the decryption has failed due to no memory
+ * < 0 : the decryption has failed due to other reasons
+ */
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats;
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key;
+ int rc = -ENOKEY;
+ u8 tx_key = 0;
+
+ /* New peer?
+ * Let's try with TX key (i.e. cluster mode) & verify the skb first!
+ */
+ if (unlikely(!rx))
+ goto pick_tx;
+
+ /* Pick RX key according to TX key, three cases are possible:
+ * 1) The current active key (likely) or;
+ * 2) The pending (new or deactivated) key (if any) or;
+ * 3) The passive or old active key (i.e. users > 0);
+ */
+ tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
+ key = rx->key;
+ if (likely(tx_key == key.active))
+ goto decrypt;
+ if (tx_key == key.pending)
+ goto decrypt;
+ if (tx_key == key.passive) {
+ rx->timer2 = jiffies;
+ if (tipc_aead_users(rx->aead[key.passive]) > 0)
+ goto decrypt;
+ }
+
+ /* Unknown key, let's try to align RX key(s) */
+ if (tipc_crypto_key_try_align(rx, tx_key))
+ goto decrypt;
+
+pick_tx:
+ /* No key suitable? Try to pick one from TX... */
+ aead = tipc_crypto_key_pick_tx(tx, rx, *skb);
+ if (aead)
+ goto decrypt;
+ goto exit;
+
+decrypt:
+ rcu_read_lock();
+ if (!aead)
+ aead = tipc_aead_get(rx->aead[tx_key]);
+ rc = tipc_aead_decrypt(net, aead, *skb, b);
+ rcu_read_unlock();
+
+exit:
+ stats = ((rx) ?: tx)->stats;
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY) {
+ kfree_skb(*skb);
+ *skb = NULL;
+ if (rx)
+ tipc_node_put(rx->node);
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ return rc;
+ } else if (rc == -EBADMSG) {
+ this_cpu_inc(stats->stat[STAT_BADMSGS]);
+ }
+ break;
+ }
+
+ tipc_crypto_rcv_complete(net, aead, b, skb, rc);
+ return rc;
+}
+
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
+ struct tipc_crypto *rx = aead->crypto;
+ struct tipc_aead *tmp = NULL;
+ struct tipc_ehdr *ehdr;
+ struct tipc_node *n;
+ u8 rx_key_active;
+ bool destined;
+
+ /* Is this completed by TX? */
+ if (unlikely(!rx->node)) {
+ rx = skb_cb->tx_clone_ctx.rx;
+#ifdef TIPC_CRYPTO_DEBUG
+ pr_info("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
+ (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
+ (*skb)->next, skb_cb->flags);
+ pr_info("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
+ skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
+ aead->crypto->aead[1], aead->crypto->aead[2],
+ aead->crypto->aead[3]);
+#endif
+ if (unlikely(err)) {
+ if (err == -EBADMSG && (*skb)->next)
+ tipc_rcv(net, (*skb)->next, b);
+ goto free_skb;
+ }
+
+ if (likely((*skb)->next)) {
+ kfree_skb((*skb)->next);
+ (*skb)->next = NULL;
+ }
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+ if (!rx) {
+ WARN_ON(ehdr->user != LINK_CONFIG);
+ n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
+ true);
+ rx = tipc_node_crypto_rx(n);
+ if (unlikely(!rx))
+ goto free_skb;
+ }
+
+ /* Skip cloning this time as we had a RX pending key */
+ if (rx->key.pending)
+ goto rcv;
+ if (tipc_aead_clone(&tmp, aead) < 0)
+ goto rcv;
+ if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key) < 0) {
+ tipc_aead_free(&tmp->rcu);
+ goto rcv;
+ }
+ tipc_aead_put(aead);
+ aead = tipc_aead_get(tmp);
+ }
+
+ if (unlikely(err)) {
+ tipc_aead_users_dec(aead, INT_MIN);
+ goto free_skb;
+ }
+
+ /* Set the RX key's user */
+ tipc_aead_users_set(aead, 1);
+
+rcv:
+ /* Mark this point, RX works */
+ rx->timer1 = jiffies;
+
+ /* Remove ehdr & auth. tag prior to tipc_rcv() */
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+ destined = ehdr->destined;
+ rx_key_active = ehdr->rx_key_active;
+ skb_pull(*skb, tipc_ehdr_size(ehdr));
+ pskb_trim(*skb, (*skb)->len - aead->authsize);
+
+ /* Validate TIPCv2 message */
+ if (unlikely(!tipc_msg_validate(skb))) {
+ pr_err_ratelimited("Packet dropped after decryption!\n");
+ goto free_skb;
+ }
+
+ /* Update peer RX active key & TX users */
+ if (destined)
+ tipc_crypto_key_synch(rx, rx_key_active, buf_msg(*skb));
+
+ /* Mark skb decrypted */
+ skb_cb->decrypted = 1;
+
+ /* Clear clone cxt if any */
+ if (likely(!skb_cb->tx_clone_deferred))
+ goto exit;
+ skb_cb->tx_clone_deferred = 0;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ goto exit;
+
+free_skb:
+ kfree_skb(*skb);
+ *skb = NULL;
+
+exit:
+ tipc_aead_put(aead);
+ if (rx)
+ tipc_node_put(rx->node);
+}
+
+static void tipc_crypto_do_cmd(struct net *net, int cmd)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_crypto *tx = tn->crypto_tx, *rx;
+ struct list_head *p;
+ unsigned int stat;
+ int i, j, cpu;
+ char buf[200];
+
+ /* Currently only one command is supported */
+ switch (cmd) {
+ case 0xfff1:
+ goto print_stats;
+ default:
+ return;
+ }
+
+print_stats:
+ /* Print a header */
+ pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
+
+ /* Print key status */
+ pr_info("Key status:\n");
+ pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
+ tipc_crypto_key_dump(tx, buf));
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
+ tipc_crypto_key_dump(rx, buf));
+ }
+ rcu_read_unlock();
+
+ /* Print crypto statistics */
+ for (i = 0, j = 0; i < MAX_STATS; i++)
+ j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
+ pr_info("\nCounter %s", buf);
+
+ memset(buf, '-', 115);
+ buf[115] = '\0';
+ pr_info("%s\n", buf);
+
+ j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ j = scnprintf(buf, 200, "RX(%7.7s) ",
+ tipc_node_get_id_str(rx->node));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ",
+ stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+ }
+ rcu_read_unlock();
+
+ pr_info("\n======================== Done ========================\n");
+}
+
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
+{
+ struct tipc_key key = c->key;
+ struct tipc_aead *aead;
+ int k, i = 0;
+ char *s;
+
+ for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ if (k == key.passive)
+ s = "PAS";
+ else if (k == key.active)
+ s = "ACT";
+ else if (k == key.pending)
+ s = "PEN";
+ else
+ s = "-";
+ i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
+
+ rcu_read_lock();
+ aead = rcu_dereference(c->aead[k]);
+ if (aead)
+ i += scnprintf(buf + i, 200 - i,
+ "{\"%s...\", \"%s\"}/%d:%d",
+ aead->hint,
+ (aead->mode == CLUSTER_KEY) ? "c" : "p",
+ atomic_read(&aead->users),
+ refcount_read(&aead->refcnt));
+ rcu_read_unlock();
+ i += scnprintf(buf + i, 200 - i, "\n");
+ }
+
+ if (c->node)
+ i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
+ atomic_read(&c->peer_rx_active));
+
+ return buf;
+}
+
+#ifdef TIPC_CRYPTO_DEBUG
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf)
+{
+ struct tipc_key *key = &old;
+ int k, i = 0;
+ char *s;
+
+ /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
+again:
+ i += scnprintf(buf + i, 32 - i, "[");
+ for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ if (k == key->passive)
+ s = "pas";
+ else if (k == key->active)
+ s = "act";
+ else if (k == key->pending)
+ s = "pen";
+ else
+ s = "-";
+ i += scnprintf(buf + i, 32 - i,
+ (k != KEY_MAX) ? "%s " : "%s", s);
+ }
+ if (key != &new) {
+ i += scnprintf(buf + i, 32 - i, "] -> ");
+ key = &new;
+ goto again;
+ }
+ i += scnprintf(buf + i, 32 - i, "]");
+ return buf;
+}
+#endif
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
new file mode 100644
index 000000000000..c3de769f49e8
--- /dev/null
+++ b/net/tipc/crypto.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * net/tipc/crypto.h: Include file for TIPC crypto
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef CONFIG_TIPC_CRYPTO
+#ifndef _TIPC_CRYPTO_H
+#define _TIPC_CRYPTO_H
+
+#include "core.h"
+#include "node.h"
+#include "msg.h"
+#include "bearer.h"
+
+#define TIPC_EVERSION 7
+
+/* AEAD aes(gcm) */
+#define TIPC_AES_GCM_KEY_SIZE_128 16
+#define TIPC_AES_GCM_KEY_SIZE_192 24
+#define TIPC_AES_GCM_KEY_SIZE_256 32
+
+#define TIPC_AES_GCM_SALT_SIZE 4
+#define TIPC_AES_GCM_IV_SIZE 12
+#define TIPC_AES_GCM_TAG_SIZE 16
+
+/**
+ * TIPC crypto modes:
+ * - CLUSTER_KEY:
+ * One single key is used for both TX & RX in all nodes in the cluster.
+ * - PER_NODE_KEY:
+ * Each nodes in the cluster has one TX key, for RX a node needs to know
+ * its peers' TX key for the decryption of messages from those nodes.
+ */
+enum {
+ CLUSTER_KEY = 1,
+ PER_NODE_KEY = (1 << 1),
+};
+
+extern int sysctl_tipc_max_tfms __read_mostly;
+
+/**
+ * TIPC encryption message format:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w0:|Ver=7| User |D|TX |RX |K| Rsvd |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w1:| Seqno |
+ * w2:| (8 octets) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w3:\ Prevnode \
+ * / (4 or 16 octets) /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Encrypted complete TIPC V2 header and user data /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | AuthTag |
+ * | (16 octets) |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Word0:
+ * Ver : = 7 i.e. TIPC encryption message version
+ * User : = 7 (for LINK_PROTOCOL); = 13 (for LINK_CONFIG) or = 0
+ * D : The destined bit i.e. the message's destination node is
+ * "known" or not at the message encryption
+ * TX : TX key used for the message encryption
+ * RX : Currently RX active key corresponding to the destination
+ * node's TX key (when the "D" bit is set)
+ * K : Keep-alive bit (for RPS, LINK_PROTOCOL/STATE_MSG only)
+ * Rsvd : Reserved bit, field
+ * Word1-2:
+ * Seqno : The 64-bit sequence number of the encrypted message, also
+ * part of the nonce used for the message encryption/decryption
+ * Word3-:
+ * Prevnode: The source node address, or ID in case LINK_CONFIG only
+ * AuthTag : The authentication tag for the message integrity checking
+ * generated by the message encryption
+ */
+struct tipc_ehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 destined:1,
+ user:4,
+ version:3;
+ __u8 reserved_1:3,
+ keepalive:1,
+ rx_key_active:2,
+ tx_key:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:3,
+ user:4,
+ destined:1;
+ __u8 tx_key:2,
+ rx_key_active:2,
+ keepalive:1,
+ reserved_1:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 reserved_2;
+ } __packed;
+ __be32 w0;
+ };
+ __be64 seqno;
+ union {
+ __be32 addr;
+ __u8 id[NODE_ID_LEN]; /* For a LINK_CONFIG message only! */
+ };
+#define EHDR_SIZE (offsetof(struct tipc_ehdr, addr) + sizeof(__be32))
+#define EHDR_CFG_SIZE (sizeof(struct tipc_ehdr))
+#define EHDR_MIN_SIZE (EHDR_SIZE)
+#define EHDR_MAX_SIZE (EHDR_CFG_SIZE)
+#define EMSG_OVERHEAD (EHDR_SIZE + TIPC_AES_GCM_TAG_SIZE)
+} __packed;
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node);
+void tipc_crypto_stop(struct tipc_crypto **crypto);
+void tipc_crypto_timeout(struct tipc_crypto *rx);
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b);
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode);
+void tipc_crypto_key_flush(struct tipc_crypto *c);
+int tipc_aead_key_validate(struct tipc_aead_key *ukey);
+bool tipc_ehdr_validate(struct sk_buff *skb);
+
+#endif /* _TIPC_CRYPTO_H */
+#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index c138d68e8a69..b043e8c6397a 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -94,6 +94,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
msg_set_dest_domain(hdr, dest_domain);
msg_set_bc_netid(hdr, tn->net_id);
b->media->addr2msg(msg_media_addr(hdr), &b->addr);
+ msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
msg_set_node_id(hdr, tipc_own_id(net));
}
@@ -242,7 +243,8 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
if (!tipc_in_scope(legacy, b->domain, src))
return;
tipc_node_check_dest(net, src, peer_id, b, caps, signature,
- &maddr, &respond, &dupl_addr);
+ msg_peer_net_hash(hdr), &maddr, &respond,
+ &dupl_addr);
if (dupl_addr)
disc_dupl_alert(b, src, &maddr);
if (!respond)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 999eab592de8..24d4d10756d3 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -44,6 +44,7 @@
#include "netlink.h"
#include "monitor.h"
#include "trace.h"
+#include "crypto.h"
#include <linux/pkt_sched.h>
@@ -397,6 +398,15 @@ int tipc_link_mtu(struct tipc_link *l)
return l->mtu;
}
+int tipc_link_mss(struct tipc_link *l)
+{
+#ifdef CONFIG_TIPC_CRYPTO
+ return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
+#else
+ return l->mtu - INT_H_SIZE;
+#endif
+}
+
u16 tipc_link_rcv_nxt(struct tipc_link *l)
{
return l->rcv_nxt;
@@ -540,7 +550,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
/* Disable replicast if even a single peer doesn't support it */
if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
- tipc_bcast_disable_rcast(net);
+ tipc_bcast_toggle_rcast(net, false);
return true;
}
@@ -940,16 +950,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb_peek(list));
- unsigned int maxwin = l->window;
- int imp = msg_importance(hdr);
- unsigned int mtu = l->mtu;
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff *skb, *_skb;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
- u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
- struct sk_buff_head *transmq = &l->transmq;
- struct sk_buff_head *backlogq = &l->backlogq;
- struct sk_buff *skb, *_skb, **tskb;
int pkt_cnt = skb_queue_len(list);
+ int imp = msg_importance(hdr);
+ unsigned int mss = tipc_link_mss(l);
+ unsigned int maxwin = l->window;
+ unsigned int mtu = l->mtu;
+ bool new_bundle;
int rc = 0;
if (unlikely(msg_size(hdr) > mtu)) {
@@ -975,20 +987,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
}
/* Prepare each packet for sending, and add to relevant queue: */
- while (skb_queue_len(list)) {
- skb = skb_peek(list);
- hdr = buf_msg(skb);
- msg_set_seqno(hdr, seqno);
- msg_set_ack(hdr, ack);
- msg_set_bcast_ack(hdr, bc_ack);
-
+ while ((skb = __skb_dequeue(list))) {
if (likely(skb_queue_len(transmq) < maxwin)) {
+ hdr = buf_msg(skb);
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
+ kfree_skb(skb);
__skb_queue_purge(list);
return -ENOBUFS;
}
- __skb_dequeue(list);
__skb_queue_tail(transmq, skb);
/* next retransmit attempt */
if (link_is_bc_sndlink(l))
@@ -1000,22 +1010,25 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
seqno++;
continue;
}
- tskb = &l->backlog[imp].target_bskb;
- if (tipc_msg_bundle(*tskb, hdr, mtu)) {
- kfree_skb(__skb_dequeue(list));
- l->stats.sent_bundled++;
- continue;
- }
- if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
- kfree_skb(__skb_dequeue(list));
- __skb_queue_tail(backlogq, *tskb);
- l->backlog[imp].len++;
- l->stats.sent_bundled++;
- l->stats.sent_bundles++;
+ if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
+ mss, l->addr, &new_bundle)) {
+ if (skb) {
+ /* Keep a ref. to the skb for next try */
+ l->backlog[imp].target_bskb = skb;
+ l->backlog[imp].len++;
+ __skb_queue_tail(backlogq, skb);
+ } else {
+ if (new_bundle) {
+ l->stats.sent_bundles++;
+ l->stats.sent_bundled++;
+ }
+ l->stats.sent_bundled++;
+ }
continue;
}
l->backlog[imp].target_bskb = NULL;
- l->backlog[imp].len += skb_queue_len(list);
+ l->backlog[imp].len += (1 + skb_queue_len(list));
+ __skb_queue_tail(backlogq, skb);
skb_queue_splice_tail_init(list, backlogq);
}
l->snd_nxt = seqno;
@@ -1084,7 +1097,7 @@ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
return false;
if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
- msecs_to_jiffies(r->tolerance)))
+ msecs_to_jiffies(r->tolerance * 10)))
return false;
hdr = buf_msg(skb);
@@ -1151,7 +1164,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
- _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
+ _skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
return 0;
hdr = buf_msg(_skb);
@@ -1427,8 +1440,7 @@ next_gap_ack:
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
- _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
- GFP_ATOMIC);
+ _skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
continue;
hdr = buf_msg(_skb);
@@ -1728,21 +1740,6 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
return;
__skb_queue_head_init(&tnlq);
- __skb_queue_head_init(&tmpxq);
- __skb_queue_head_init(&frags);
-
- /* At least one packet required for safe algorithm => add dummy */
- skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
- BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
- 0, 0, TIPC_ERR_NO_PORT);
- if (!skb) {
- pr_warn("%sunable to create tunnel packet\n", link_co_err);
- return;
- }
- __skb_queue_tail(&tnlq, skb);
- tipc_link_xmit(l, &tnlq, &tmpxq);
- __skb_queue_purge(&tmpxq);
-
/* Link Synching:
* From now on, send only one single ("dummy") SYNCH message
* to peer. The SYNCH message does not contain any data, just
@@ -1768,6 +1765,20 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
return;
}
+ __skb_queue_head_init(&tmpxq);
+ __skb_queue_head_init(&frags);
+ /* At least one packet required for safe algorithm => add dummy */
+ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
+ 0, 0, TIPC_ERR_NO_PORT);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, &tmpxq);
+ __skb_queue_purge(&tmpxq);
+
/* Initialize reusable tunnel packet header */
tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
mtyp, INT_H_SIZE, l->addr);
@@ -1873,7 +1884,7 @@ void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
tipc_link_create_dummy_tnl_msg(tnl, xmitq);
- /* This failover link enpoint was never established before,
+ /* This failover link endpoint was never established before,
* so it has not received anything from peer.
* Otherwise, it must be a normal failover situation or the
* node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
diff --git a/net/tipc/link.h b/net/tipc/link.h
index adcad65e761c..c09e9d49d0a3 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -141,6 +141,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
int tipc_link_bc_peers(struct tipc_link *l);
void tipc_link_set_mtu(struct tipc_link *l, int mtu);
int tipc_link_mtu(struct tipc_link *l);
+int tipc_link_mss(struct tipc_link *l);
void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
struct sk_buff_head *xmitq);
void tipc_link_build_bc_sync_msg(struct tipc_link *l,
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 6a6eae88442f..58708b4c7719 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -665,6 +665,21 @@ void tipc_mon_delete(struct net *net, int bearer_id)
kfree(mon);
}
+void tipc_mon_reinit_self(struct net *net)
+{
+ struct tipc_monitor *mon;
+ int bearer_id;
+
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ mon = tipc_monitor(net, bearer_id);
+ if (!mon)
+ continue;
+ write_lock_bh(&mon->lock);
+ mon->self->addr = tipc_own_addr(net);
+ write_unlock_bh(&mon->lock);
+ }
+}
+
int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
{
struct tipc_net *tn = tipc_net(net);
diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
index 2a21b93e0d04..ed63d2e650b0 100644
--- a/net/tipc/monitor.h
+++ b/net/tipc/monitor.h
@@ -77,6 +77,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id);
int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id, u32 *prev_node);
+void tipc_mon_reinit_self(struct net *net);
extern const int tipc_max_domain_size;
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 922d262e153f..0d515d20b056 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -39,10 +39,16 @@
#include "msg.h"
#include "addr.h"
#include "name_table.h"
+#include "crypto.h"
#define MAX_FORWARD_SIZE 1024
+#ifdef CONFIG_TIPC_CRYPTO
+#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
+#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
+#else
#define BUF_HEADROOM (LL_MAX_HEADER + 48)
#define BUF_TAILROOM 16
+#endif
static unsigned int align(unsigned int i)
{
@@ -61,7 +67,11 @@ static unsigned int align(unsigned int i)
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
+#ifdef CONFIG_TIPC_CRYPTO
+ unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
+#else
unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+#endif
skb = alloc_skb_fclone(buf_size, gfp);
if (skb) {
@@ -173,7 +183,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
}
if (fragid == LAST_FRAGMENT) {
- TIPC_SKB_CB(head)->validated = false;
+ TIPC_SKB_CB(head)->validated = 0;
if (unlikely(!tipc_msg_validate(&head)))
goto err;
*buf = head;
@@ -190,6 +200,59 @@ err:
return 0;
}
+/**
+ * tipc_msg_append(): Append data to tail of an existing buffer queue
+ * @hdr: header to be used
+ * @m: the data to be appended
+ * @mss: max allowable size of buffer
+ * @dlen: size of data to be appended
+ * @txq: queue to appand to
+ * Returns the number og 1k blocks appended or errno value
+ */
+int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+ int mss, struct sk_buff_head *txq)
+{
+ struct sk_buff *skb, *prev;
+ int accounted, total, curr;
+ int mlen, cpy, rem = dlen;
+ struct tipc_msg *hdr;
+
+ skb = skb_peek_tail(txq);
+ accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
+ total = accounted;
+
+ while (rem) {
+ if (!skb || skb->len >= mss) {
+ prev = skb;
+ skb = tipc_buf_acquire(mss, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ skb_orphan(skb);
+ skb_trim(skb, MIN_H_SIZE);
+ hdr = buf_msg(skb);
+ skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
+ msg_set_hdr_sz(hdr, MIN_H_SIZE);
+ msg_set_size(hdr, MIN_H_SIZE);
+ __skb_queue_tail(txq, skb);
+ total += 1;
+ if (prev)
+ msg_set_ack_required(buf_msg(prev), 0);
+ msg_set_ack_required(hdr, 1);
+ }
+ hdr = buf_msg(skb);
+ curr = msg_blocks(hdr);
+ mlen = msg_size(hdr);
+ cpy = min_t(int, rem, mss - mlen);
+ if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
+ return -EFAULT;
+ msg_set_size(hdr, mlen + cpy);
+ skb_put(skb, cpy);
+ rem -= cpy;
+ total += msg_blocks(hdr) - curr;
+ }
+ return total - accounted;
+}
+
/* tipc_msg_validate - validate basic format of received message
*
* This routine ensures a TIPC message has an acceptable header, and at least
@@ -218,6 +281,7 @@ bool tipc_msg_validate(struct sk_buff **_skb)
if (unlikely(TIPC_SKB_CB(skb)->validated))
return true;
+
if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
return false;
@@ -239,7 +303,7 @@ bool tipc_msg_validate(struct sk_buff **_skb)
if (unlikely(skb->len < msz))
return false;
- TIPC_SKB_CB(skb)->validated = true;
+ TIPC_SKB_CB(skb)->validated = 1;
return true;
}
@@ -419,48 +483,98 @@ error:
}
/**
- * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @skb: the buffer to append to ("bundle")
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer
- * Consumes buffer if successful
- * Returns true if bundling could be performed, otherwise false
+ * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
+ * @bskb: the bundle buffer to append to
+ * @msg: message to be appended
+ * @max: max allowable size for the bundle buffer
+ *
+ * Returns "true" if bundling has been performed, otherwise "false"
*/
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
+static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
+ u32 max)
{
- struct tipc_msg *bmsg;
- unsigned int bsz;
- unsigned int msz = msg_size(msg);
- u32 start, pad;
- u32 max = mtu - INT_H_SIZE;
+ struct tipc_msg *bmsg = buf_msg(bskb);
+ u32 msz, bsz, offset, pad;
- if (likely(msg_user(msg) == MSG_FRAGMENTER))
- return false;
- if (!skb)
- return false;
- bmsg = buf_msg(skb);
+ msz = msg_size(msg);
bsz = msg_size(bmsg);
- start = align(bsz);
- pad = start - bsz;
+ offset = align(bsz);
+ pad = offset - bsz;
- if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
+ if (unlikely(skb_tailroom(bskb) < (pad + msz)))
return false;
- if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
+ if (unlikely(max < (offset + msz)))
return false;
- if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
+
+ skb_put(bskb, pad + msz);
+ skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
+ msg_set_size(bmsg, offset + msz);
+ msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ return true;
+}
+
+/**
+ * tipc_msg_try_bundle - Try to bundle a new message to the last one
+ * @tskb: the last/target message to which the new one will be appended
+ * @skb: the new message skb pointer
+ * @mss: max message size (header inclusive)
+ * @dnode: destination node for the message
+ * @new_bundle: if this call made a new bundle or not
+ *
+ * Return: "true" if the new message skb is potential for bundling this time or
+ * later, in the case a bundling has been done this time, the skb is consumed
+ * (the skb pointer = NULL).
+ * Otherwise, "false" if the skb cannot be bundled at all.
+ */
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle)
+{
+ struct tipc_msg *msg, *inner, *outer;
+ u32 tsz;
+
+ /* First, check if the new buffer is suitable for bundling */
+ msg = buf_msg(*skb);
+ if (msg_user(msg) == MSG_FRAGMENTER)
return false;
- if (unlikely(skb_tailroom(skb) < (pad + msz)))
+ if (msg_user(msg) == TUNNEL_PROTOCOL)
return false;
- if (unlikely(max < (start + msz)))
+ if (msg_user(msg) == BCAST_PROTOCOL)
return false;
- if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
- (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+ if (mss <= INT_H_SIZE + msg_size(msg))
return false;
- skb_put(skb, pad + msz);
- skb_copy_to_linear_data_offset(skb, start, msg, msz);
- msg_set_size(bmsg, start + msz);
- msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ /* Ok, but the last/target buffer can be empty? */
+ if (unlikely(!tskb))
+ return true;
+
+ /* Is it a bundle already? Try to bundle the new message to it */
+ if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
+ *new_bundle = false;
+ goto bundle;
+ }
+
+ /* Make a new bundle of the two messages if possible */
+ tsz = msg_size(buf_msg(tskb));
+ if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
+ return true;
+ if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
+ GFP_ATOMIC)))
+ return true;
+ inner = buf_msg(tskb);
+ skb_push(tskb, INT_H_SIZE);
+ outer = buf_msg(tskb);
+ tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
+ dnode);
+ msg_set_importance(outer, msg_importance(inner));
+ msg_set_size(outer, INT_H_SIZE + tsz);
+ msg_set_msgcnt(outer, 1);
+ *new_bundle = true;
+
+bundle:
+ if (likely(tipc_msg_bundle(tskb, msg, mss))) {
+ consume_skb(*skb);
+ *skb = NULL;
+ }
return true;
}
@@ -510,49 +624,6 @@ none:
}
/**
- * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain, where head is the buffer to replace/append
- * @skb: buffer to be created, appended to and returned in case of success
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer, inclusive header
- * @dnode: destination node for message. (Not always present in header)
- * Returns true if success, otherwise false
- */
-bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
- u32 mtu, u32 dnode)
-{
- struct sk_buff *_skb;
- struct tipc_msg *bmsg;
- u32 msz = msg_size(msg);
- u32 max = mtu - INT_H_SIZE;
-
- if (msg_user(msg) == MSG_FRAGMENTER)
- return false;
- if (msg_user(msg) == TUNNEL_PROTOCOL)
- return false;
- if (msg_user(msg) == BCAST_PROTOCOL)
- return false;
- if (msz > (max / 2))
- return false;
-
- _skb = tipc_buf_acquire(max, GFP_ATOMIC);
- if (!_skb)
- return false;
-
- skb_trim(_skb, INT_H_SIZE);
- bmsg = buf_msg(_skb);
- tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
- INT_H_SIZE, dnode);
- msg_set_importance(bmsg, msg_importance(msg));
- msg_set_seqno(bmsg, msg_seqno(msg));
- msg_set_ack(bmsg, msg_ack(msg));
- msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
- tipc_msg_bundle(_skb, msg, mtu);
- *skb = _skb;
- return true;
-}
-
-/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
* @own_node: originating node id for reversed message
* @skb: buffer containing message to be reversed; will be consumed
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 0daa6f04ca81..6d466ebdb64f 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,16 +102,42 @@ struct plist;
#define TIPC_MEDIA_INFO_OFFSET 5
struct tipc_skb_cb {
- struct sk_buff *tail;
- unsigned long nxt_retr;
- unsigned long retr_stamp;
- u32 bytes_read;
- u32 orig_member;
- u16 chain_imp;
- u16 ackers;
- u16 retr_cnt;
- bool validated;
-};
+ union {
+ struct {
+ struct sk_buff *tail;
+ unsigned long nxt_retr;
+ unsigned long retr_stamp;
+ u32 bytes_read;
+ u32 orig_member;
+ u16 chain_imp;
+ u16 ackers;
+ u16 retr_cnt;
+ } __packed;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct {
+ struct tipc_crypto *rx;
+ struct tipc_aead *last;
+ u8 recurs;
+ } tx_clone_ctx __packed;
+#endif
+ } __packed;
+ union {
+ struct {
+ u8 validated:1;
+#ifdef CONFIG_TIPC_CRYPTO
+ u8 encrypted:1;
+ u8 decrypted:1;
+ u8 probe:1;
+ u8 tx_clone_deferred:1;
+#endif
+ };
+ u8 flags;
+ };
+ u8 reserved;
+#ifdef CONFIG_TIPC_CRYPTO
+ void *crypto_ctx;
+#endif
+} __packed;
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
@@ -290,6 +316,16 @@ static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
msg_set_bits(m, 0, 18, 1, d);
}
+static inline int msg_ack_required(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_ack_required(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 18, 1, d);
+}
+
static inline bool msg_is_rcast(struct tipc_msg *m)
{
return msg_bits(m, 0, 18, 0x1);
@@ -1026,6 +1062,20 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
+/* Word 13
+ */
+static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 13, n);
+}
+
+static inline u32 msg_peer_net_hash(struct tipc_msg *m)
+{
+ return msg_word(m, 13);
+}
+
+/* Word 14
+ */
static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
{
return msg_word(m, 14);
@@ -1057,14 +1107,15 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
uint data_sz, u32 dnode, u32 onode,
u32 dport, u32 oport, int errcode);
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
-bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
- u32 mtu, u32 dnode);
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle);
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
int pktmax, struct sk_buff_head *frags);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
+int tipc_msg_append(struct tipc_msg *hdr, struct msghdr *m, int dlen,
+ int mss, struct sk_buff_head *txq);
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
bool tipc_msg_assemble(struct sk_buff_head *list);
bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 836e629e8f4a..5feaf3b67380 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -146,7 +146,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 66a65c2cdb23..92d04dc2a44b 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -35,6 +35,7 @@
*/
#include <net/sock.h>
+#include <linux/list_sort.h>
#include "core.h"
#include "netlink.h"
#include "name_table.h"
@@ -66,6 +67,7 @@ struct service_range {
/**
* struct tipc_service - container for all published instances of a service type
* @type: 32 bit 'type' value for service
+ * @publ_cnt: increasing counter for publications in this service
* @ranges: rb tree containing all service ranges for this service
* @service_list: links to adjacent name ranges in hash chain
* @subscriptions: list of subscriptions for this service type
@@ -74,6 +76,7 @@ struct service_range {
*/
struct tipc_service {
u32 type;
+ u32 publ_cnt;
struct rb_root ranges;
struct hlist_node service_list;
struct list_head subscriptions;
@@ -109,6 +112,7 @@ static struct publication *tipc_publ_create(u32 type, u32 lower, u32 upper,
INIT_LIST_HEAD(&publ->binding_node);
INIT_LIST_HEAD(&publ->local_publ);
INIT_LIST_HEAD(&publ->all_publ);
+ INIT_LIST_HEAD(&publ->list);
return publ;
}
@@ -244,6 +248,8 @@ static struct publication *tipc_service_insert_publ(struct net *net,
p = tipc_publ_create(type, lower, upper, scope, node, port, key);
if (!p)
goto err;
+ /* Suppose there shouldn't be a huge gap btw publs i.e. >INT_MAX */
+ p->id = sc->publ_cnt++;
if (in_own_node(net, node))
list_add(&p->local_publ, &sr->local_publ);
list_add(&p->all_publ, &sr->all_publ);
@@ -278,6 +284,20 @@ static struct publication *tipc_service_remove_publ(struct service_range *sr,
}
/**
+ * Code reused: time_after32() for the same purpose
+ */
+#define publication_after(pa, pb) time_after32((pa)->id, (pb)->id)
+static int tipc_publ_sort(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct publication *pa, *pb;
+
+ pa = container_of(a, struct publication, list);
+ pb = container_of(b, struct publication, list);
+ return publication_after(pa, pb);
+}
+
+/**
* tipc_service_subscribe - attach a subscription, and optionally
* issue the prescribed number of events if there is any service
* range overlapping with the requested range
@@ -286,36 +306,51 @@ static void tipc_service_subscribe(struct tipc_service *service,
struct tipc_subscription *sub)
{
struct tipc_subscr *sb = &sub->evt.s;
+ struct publication *p, *first, *tmp;
+ struct list_head publ_list;
struct service_range *sr;
struct tipc_name_seq ns;
- struct publication *p;
struct rb_node *n;
- bool first;
+ u32 filter;
ns.type = tipc_sub_read(sb, seq.type);
ns.lower = tipc_sub_read(sb, seq.lower);
ns.upper = tipc_sub_read(sb, seq.upper);
+ filter = tipc_sub_read(sb, filter);
tipc_sub_get(sub);
list_add(&sub->service_list, &service->subscriptions);
- if (tipc_sub_read(sb, filter) & TIPC_SUB_NO_STATUS)
+ if (filter & TIPC_SUB_NO_STATUS)
return;
+ INIT_LIST_HEAD(&publ_list);
for (n = rb_first(&service->ranges); n; n = rb_next(n)) {
sr = container_of(n, struct service_range, tree_node);
if (sr->lower > ns.upper)
break;
if (!tipc_sub_check_overlap(&ns, sr->lower, sr->upper))
continue;
- first = true;
+ first = NULL;
list_for_each_entry(p, &sr->all_publ, all_publ) {
- tipc_sub_report_overlap(sub, sr->lower, sr->upper,
- TIPC_PUBLISHED, p->port,
- p->node, p->scope, first);
- first = false;
+ if (filter & TIPC_SUB_PORTS)
+ list_add_tail(&p->list, &publ_list);
+ else if (!first || publication_after(first, p))
+ /* Pick this range's *first* publication */
+ first = p;
}
+ if (first)
+ list_add_tail(&first->list, &publ_list);
+ }
+
+ /* Sort the publications before reporting */
+ list_sort(NULL, &publ_list, tipc_publ_sort);
+ list_for_each_entry_safe(p, tmp, &publ_list, list) {
+ tipc_sub_report_overlap(sub, p->lower, p->upper,
+ TIPC_PUBLISHED, p->port, p->node,
+ p->scope, true);
+ list_del_init(&p->list);
}
}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index f79066334cc8..728bc7016c38 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -58,6 +58,7 @@ struct tipc_group;
* @node: network address of publishing socket's node
* @port: publishing port
* @key: publication key, unique across the cluster
+ * @id: publication id
* @binding_node: all publications from the same node which bound this one
* - Remote publications: in node->publ_list
* Used by node/name distr to withdraw publications when node is lost
@@ -69,6 +70,7 @@ struct tipc_group;
* Used by closest_first and multicast receive lookup algorithms
* @all_publ: all publications identical to this one, whatever node and scope
* Used by round-robin lookup algorithm
+ * @list: to form a list of publications in temporal order
* @rcu: RCU callback head used for deferred freeing
*/
struct publication {
@@ -79,10 +81,12 @@ struct publication {
u32 node;
u32 port;
u32 key;
+ u32 id;
struct list_head binding_node;
struct list_head binding_sock;
struct list_head local_publ;
struct list_head all_publ;
+ struct list_head list;
struct rcu_head rcu;
};
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 85707c185360..2de3cec9929d 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -42,6 +42,7 @@
#include "node.h"
#include "bcast.h"
#include "netlink.h"
+#include "monitor.h"
/*
* The TIPC locking policy is designed to ensure a very fine locking
@@ -136,6 +137,7 @@ static void tipc_net_finalize(struct net *net, u32 addr)
tipc_set_node_addr(net, addr);
tipc_named_reinit(net);
tipc_sk_reinit(net);
+ tipc_mon_reinit_self(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
TIPC_CLUSTER_SCOPE, 0, addr);
}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index d6165ad384c0..e53231bd23b4 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -102,7 +102,11 @@ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
[TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
- [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
+ [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_ID] = { .type = NLA_BINARY,
+ .len = TIPC_NODEID_LEN},
+ [TIPC_NLA_NODE_KEY] = { .type = NLA_BINARY,
+ .len = TIPC_AEAD_KEY_SIZE_MAX},
};
/* Properties valid for media, bearer and link */
@@ -176,7 +180,8 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
},
{
.cmd = TIPC_NL_PUBL_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = tipc_nl_publ_dump,
},
{
@@ -239,7 +244,8 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
},
{
.cmd = TIPC_NL_MON_PEER_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = tipc_nl_node_dump_monitor_peer,
},
{
@@ -250,10 +256,23 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
#ifdef CONFIG_TIPC_MEDIA_UDP
{
.cmd = TIPC_NL_UDP_GET_REMOTEIP,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = tipc_udp_nl_dump_remoteip,
},
#endif
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .cmd = TIPC_NL_KEY_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_set_key,
+ },
+ {
+ .cmd = TIPC_NL_KEY_FLUSH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_flush_key,
+ },
+#endif
};
struct genl_family tipc_genl_family __ro_after_init = {
@@ -268,18 +287,6 @@ struct genl_family tipc_genl_family __ro_after_init = {
.n_ops = ARRAY_SIZE(tipc_genl_v2_ops),
};
-int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
-{
- u32 maxattr = tipc_genl_family.maxattr;
-
- *attr = genl_family_attrbuf(&tipc_genl_family);
- if (!*attr)
- return -EOPNOTSUPP;
-
- return nlmsg_parse_deprecated(nlh, GENL_HDRLEN, *attr, maxattr,
- tipc_nl_policy, NULL);
-}
-
int __init tipc_netlink_start(void)
{
int res;
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h
index 4ba0ad422110..7cf777723e3e 100644
--- a/net/tipc/netlink.h
+++ b/net/tipc/netlink.h
@@ -38,7 +38,6 @@
#include <net/netlink.h>
extern struct genl_family tipc_genl_family;
-int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
struct tipc_nl_msg {
struct sk_buff *skb;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index e135d4e11231..17a529739f8d 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -181,15 +181,18 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
{
+ struct genl_dumpit_info info;
int len = 0;
int err;
struct sk_buff *buf;
struct nlmsghdr *nlmsg;
struct netlink_callback cb;
+ struct nlattr **attrbuf;
memset(&cb, 0, sizeof(cb));
cb.nlh = (struct nlmsghdr *)arg->data;
cb.skb = arg;
+ cb.data = &info;
buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!buf)
@@ -201,19 +204,35 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
}
+ attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ info.attrs = attrbuf;
+ err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf,
+ tipc_genl_family.maxattr,
+ tipc_genl_family.policy, NULL);
+ if (err)
+ goto err_out;
+
do {
int rem;
len = (*cmd->dumpit)(buf, &cb);
nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
- struct nlattr **attrs;
-
- err = tipc_nlmsg_parse(nlmsg, &attrs);
+ err = nlmsg_parse_deprecated(nlmsg, GENL_HDRLEN,
+ attrbuf,
+ tipc_genl_family.maxattr,
+ tipc_genl_family.policy,
+ NULL);
if (err)
goto err_out;
- err = (*cmd->format)(msg, attrs);
+ err = (*cmd->format)(msg, attrbuf);
if (err)
goto err_out;
@@ -231,6 +250,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
err = 0;
err_out:
+ kfree(attrbuf);
tipc_dump_done(&cb);
kfree_skb(buf);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index c8f6177dd5a2..ab04e00cb95b 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -44,6 +44,7 @@
#include "discover.h"
#include "netlink.h"
#include "trace.h"
+#include "crypto.h"
#define INVALID_NODE_SIG 0x10000
#define NODE_CLEANUP_AFTER 300000
@@ -89,6 +90,7 @@ struct tipc_bclink_entry {
* @links: array containing references to all links to node
* @action_flags: bit mask of different types of node actions
* @state: connectivity state vs peer node
+ * @preliminary: a preliminary node or not
* @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
@@ -99,6 +101,7 @@ struct tipc_bclink_entry {
* @publ_list: list of publications
* @rcu: rcu struct for tipc_node
* @delete_at: indicates the time for deleting a down node
+ * @crypto_rx: RX crypto handler
*/
struct tipc_node {
u32 addr;
@@ -112,6 +115,7 @@ struct tipc_node {
int action_flags;
struct list_head list;
int state;
+ bool preliminary;
bool failover_sent;
u16 sync_point;
int link_cnt;
@@ -120,12 +124,18 @@ struct tipc_node {
u32 signature;
u32 link_id;
u8 peer_id[16];
+ char peer_id_string[NODE_ID_STR_LEN];
struct list_head publ_list;
struct list_head conn_sks;
unsigned long keepalive_intv;
struct timer_list timer;
struct rcu_head rcu;
unsigned long delete_at;
+ struct net *peer_net;
+ u32 peer_hash_mix;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_crypto *crypto_rx;
+#endif
};
/* Node FSM states and events:
@@ -163,7 +173,6 @@ static void tipc_node_timeout(struct timer_list *t);
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
-static void tipc_node_put(struct tipc_node *node);
static bool node_is_up(struct tipc_node *n);
static void tipc_node_delete_from_list(struct tipc_node *node);
@@ -184,7 +193,7 @@ static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
return n->links[bearer_id].link;
}
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
{
struct tipc_node *n;
int bearer_id;
@@ -194,6 +203,14 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
if (unlikely(!n))
return mtu;
+ /* Allow MAX_MSG_SIZE when building connection oriented message
+ * if they are in the same core network
+ */
+ if (n->peer_net && connected) {
+ tipc_node_put(n);
+ return mtu;
+ }
+
bearer_id = n->active_links[sel & 1];
if (likely(bearer_id != INVALID_BEARER_ID))
mtu = n->links[bearer_id].mtu;
@@ -235,15 +252,51 @@ u16 tipc_node_get_capabilities(struct net *net, u32 addr)
return caps;
}
+u32 tipc_node_get_addr(struct tipc_node *node)
+{
+ return (node) ? node->addr : 0;
+}
+
+char *tipc_node_get_id_str(struct tipc_node *node)
+{
+ return node->peer_id_string;
+}
+
+#ifdef CONFIG_TIPC_CRYPTO
+/**
+ * tipc_node_crypto_rx - Retrieve crypto RX handle from node
+ * Note: node ref counter must be held first!
+ */
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
+{
+ return (__n) ? __n->crypto_rx : NULL;
+}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
+{
+ return container_of(pos, struct tipc_node, list)->crypto_rx;
+}
+#endif
+
+void tipc_node_free(struct rcu_head *rp)
+{
+ struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&n->crypto_rx);
+#endif
+ kfree(n);
+}
+
static void tipc_node_kref_release(struct kref *kref)
{
struct tipc_node *n = container_of(kref, struct tipc_node, kref);
kfree(n->bc_entry.link);
- kfree_rcu(n, rcu);
+ call_rcu(&n->rcu, tipc_node_free);
}
-static void tipc_node_put(struct tipc_node *node)
+void tipc_node_put(struct tipc_node *node)
{
kref_put(&node->kref, tipc_node_kref_release);
}
@@ -264,7 +317,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
rcu_read_lock();
hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
- if (node->addr != addr)
+ if (node->addr != addr || node->preliminary)
continue;
if (!kref_get_unless_zero(&node->kref))
node = NULL;
@@ -360,18 +413,71 @@ static void tipc_node_write_unlock(struct tipc_node *n)
}
}
-static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
- u8 *peer_id, u16 capabilities)
+static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
+{
+ int net_id = tipc_netid(n->net);
+ struct tipc_net *tn_peer;
+ struct net *tmp;
+ u32 hash_chk;
+
+ if (n->peer_net)
+ return;
+
+ for_each_net_rcu(tmp) {
+ tn_peer = tipc_net(tmp);
+ if (!tn_peer)
+ continue;
+ /* Integrity checking whether node exists in namespace or not */
+ if (tn_peer->net_id != net_id)
+ continue;
+ if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
+ continue;
+ hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
+ if (hash_mixes ^ hash_chk)
+ continue;
+ n->peer_net = tmp;
+ n->peer_hash_mix = hash_mixes;
+ break;
+ }
+}
+
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n, *temp_node;
struct tipc_link *l;
+ unsigned long intv;
int bearer_id;
int i;
spin_lock_bh(&tn->node_list_lock);
- n = tipc_node_find(net, addr);
+ n = tipc_node_find(net, addr) ?:
+ tipc_node_find_by_id(net, peer_id);
if (n) {
+ if (!n->preliminary)
+ goto update;
+ if (preliminary)
+ goto exit;
+ /* A preliminary node becomes "real" now, refresh its data */
+ tipc_node_write_lock(n);
+ n->preliminary = false;
+ n->addr = addr;
+ hlist_del_rcu(&n->hash);
+ hlist_add_head_rcu(&n->hash,
+ &tn->node_htable[tipc_hashfn(addr)]);
+ list_del_rcu(&n->list);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n->list, &temp_node->list);
+ tipc_node_write_unlock_fast(n);
+
+update:
+ if (n->peer_hash_mix ^ hash_mixes)
+ tipc_node_assign_peer_net(n, hash_mixes);
if (n->capabilities == capabilities)
goto exit;
/* Same node may come back with new capabilities */
@@ -389,6 +495,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
tn->capabilities &= temp_node->capabilities;
}
+
+ tipc_bcast_toggle_rcast(net,
+ (tn->capabilities & TIPC_BCAST_RCAST));
+
goto exit;
}
n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -396,9 +506,23 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
pr_warn("Node creation failed, no memory\n");
goto exit;
}
+ tipc_nodeid2string(n->peer_id_string, peer_id);
+#ifdef CONFIG_TIPC_CRYPTO
+ if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
+ pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
+ kfree(n);
+ n = NULL;
+ goto exit;
+ }
+#endif
n->addr = addr;
+ n->preliminary = preliminary;
memcpy(&n->peer_id, peer_id, 16);
n->net = net;
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ /* Assign kernel local namespace if exists */
+ tipc_node_assign_peer_net(n, hash_mixes);
n->capabilities = capabilities;
kref_init(&n->kref);
rwlock_init(&n->lock);
@@ -417,22 +541,14 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
n->signature = INVALID_NODE_SIG;
n->active_links[0] = INVALID_BEARER_ID;
n->active_links[1] = INVALID_BEARER_ID;
- if (!tipc_link_bc_create(net, tipc_own_addr(net),
- addr, U16_MAX,
- tipc_link_window(tipc_bc_sndlink(net)),
- n->capabilities,
- &n->bc_entry.inputq1,
- &n->bc_entry.namedq,
- tipc_bc_sndlink(net),
- &n->bc_entry.link)) {
- pr_warn("Broadcast rcv link creation failed, no memory\n");
- kfree(n);
- n = NULL;
- goto exit;
- }
+ n->bc_entry.link = NULL;
tipc_node_get(n);
timer_setup(&n->timer, tipc_node_timeout, 0);
- n->keepalive_intv = U32_MAX;
+ /* Start a slow timer anyway, crypto needs it */
+ n->keepalive_intv = 10000;
+ intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
+ if (!mod_timer(&n->timer, intv))
+ tipc_node_get(n);
hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
if (n->addr < temp_node->addr)
@@ -444,6 +560,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
tn->capabilities &= temp_node->capabilities;
}
+ tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
trace_tipc_node_create(n, true, " ");
exit:
spin_unlock_bh(&tn->node_list_lock);
@@ -617,12 +734,18 @@ static bool tipc_node_cleanup(struct tipc_node *peer)
}
tipc_node_write_unlock(peer);
+ if (!deleted) {
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+ }
+
/* Calculate cluster capabilities */
tn->capabilities = TIPC_NODE_CAPABILITIES;
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
tn->capabilities &= temp_node->capabilities;
}
-
+ tipc_bcast_toggle_rcast(peer->net,
+ (tn->capabilities & TIPC_BCAST_RCAST));
spin_unlock_bh(&tn->node_list_lock);
return deleted;
}
@@ -645,6 +768,10 @@ static void tipc_node_timeout(struct timer_list *t)
return;
}
+#ifdef CONFIG_TIPC_CRYPTO
+ /* Take any crypto key related actions first */
+ tipc_crypto_timeout(n->crypto_rx);
+#endif
__skb_queue_head_init(&xmitq);
/* Initial node interval to value larger (10 seconds), then it will be
@@ -665,7 +792,7 @@ static void tipc_node_timeout(struct timer_list *t)
remains--;
}
tipc_node_read_unlock(n);
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
}
@@ -697,7 +824,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
n->link_id = tipc_link_id(nl);
/* Leave room for tunnel header when returning 'mtu' to users: */
- n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
+ n->links[bearer_id].mtu = tipc_link_mss(nl);
tipc_bearer_add_dest(n->net, bearer_id, n->addr);
tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
@@ -751,7 +878,7 @@ static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
tipc_node_write_lock(n);
__tipc_node_link_up(n, bearer_id, xmitq);
maddr = &n->links[bearer_id].maddr;
- tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
+ tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
tipc_node_write_unlock(n);
}
@@ -906,7 +1033,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
if (delete)
tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
tipc_sk_rcv(n->net, &le->inputq);
}
@@ -950,6 +1077,8 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_node *n;
+ bool preliminary;
+ u32 sugg_addr;
/* Suggest new address if some other peer is using this one */
n = tipc_node_find(net, addr);
@@ -965,9 +1094,11 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
/* Suggest previously used address if peer is known */
n = tipc_node_find_by_id(net, id);
if (n) {
- addr = n->addr;
+ sugg_addr = n->addr;
+ preliminary = n->preliminary;
tipc_node_put(n);
- return addr;
+ if (!preliminary)
+ return sugg_addr;
}
/* Even this node may be in conflict */
@@ -979,12 +1110,12 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
void tipc_node_check_dest(struct net *net, u32 addr,
u8 *peer_id, struct tipc_bearer *b,
- u16 capabilities, u32 signature,
+ u16 capabilities, u32 signature, u32 hash_mixes,
struct tipc_media_addr *maddr,
bool *respond, bool *dupl_addr)
{
struct tipc_node *n;
- struct tipc_link *l;
+ struct tipc_link *l, *snd_l;
struct tipc_link_entry *le;
bool addr_match = false;
bool sign_match = false;
@@ -998,11 +1129,27 @@ void tipc_node_check_dest(struct net *net, u32 addr,
*dupl_addr = false;
*respond = false;
- n = tipc_node_create(net, addr, peer_id, capabilities);
+ n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
+ false);
if (!n)
return;
tipc_node_write_lock(n);
+ if (unlikely(!n->bc_entry.link)) {
+ snd_l = tipc_bc_sndlink(net);
+ if (!tipc_link_bc_create(net, tipc_own_addr(net),
+ addr, U16_MAX,
+ tipc_link_window(snd_l),
+ n->capabilities,
+ &n->bc_entry.inputq1,
+ &n->bc_entry.namedq, snd_l,
+ &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link creation failed, no mem\n");
+ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+ return;
+ }
+ }
le = &n->links[b->identity];
@@ -1017,6 +1164,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
if (sign_match && addr_match && link_up) {
/* All is fine. Do nothing. */
reset = false;
+ /* Peer node is not a container/local namespace */
+ if (!n->peer_hash_mix)
+ n->peer_hash_mix = hash_mixes;
} else if (sign_match && addr_match && !link_up) {
/* Respond. The link will come up in due time */
*respond = true;
@@ -1342,7 +1492,8 @@ static void node_lost_contact(struct tipc_node *n,
/* Notify publications from this node */
n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
-
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
/* Notify sockets connected to node */
list_for_each_entry_safe(conn, safe, conns, list) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
@@ -1424,6 +1575,56 @@ msg_full:
return -EMSGSIZE;
}
+static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+{
+ struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ struct sk_buff_head inputq;
+
+ switch (msg_user(hdr)) {
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ if (msg_connected(hdr) || msg_named(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ }
+ if (msg_mcast(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ return;
+ }
+ return;
+ case MSG_FRAGMENTER:
+ if (tipc_msg_assemble(list)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ }
+ return;
+ case GROUP_PROTOCOL:
+ case CONN_MANAGER:
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ case LINK_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case TUNNEL_PROTOCOL:
+ case BCAST_PROTOCOL:
+ return;
+ default:
+ return;
+ };
+}
+
/**
* tipc_node_xmit() is the general link level function for message sending
* @net: the applicable net namespace
@@ -1439,6 +1640,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
struct tipc_link_entry *le = NULL;
struct tipc_node *n;
struct sk_buff_head xmitq;
+ bool node_up = false;
int bearer_id;
int rc;
@@ -1456,6 +1658,17 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
}
tipc_node_read_lock(n);
+ node_up = node_is_up(n);
+ if (node_up && n->peer_net && check_net(n->peer_net)) {
+ /* xmit inner linux container */
+ tipc_lxc_xmit(n->peer_net, list);
+ if (likely(skb_queue_empty(list))) {
+ tipc_node_read_unlock(n);
+ tipc_node_put(n);
+ return 0;
+ }
+ }
+
bearer_id = n->active_links[selector & 1];
if (unlikely(bearer_id == INVALID_BEARER_ID)) {
tipc_node_read_unlock(n);
@@ -1474,7 +1687,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
if (unlikely(rc == -ENOBUFS))
tipc_node_link_down(n, bearer_id, false);
else
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
tipc_node_put(n);
@@ -1622,7 +1835,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
}
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
if (!skb_queue_empty(&be->inputq1))
tipc_node_mcast_rcv(n);
@@ -1800,20 +2013,38 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
{
struct sk_buff_head xmitq;
- struct tipc_node *n;
+ struct tipc_link_entry *le;
struct tipc_msg *hdr;
+ struct tipc_node *n;
int bearer_id = b->identity;
- struct tipc_link_entry *le;
u32 self = tipc_own_addr(net);
int usr, rc = 0;
u16 bc_ack;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_ehdr *ehdr;
- __skb_queue_head_init(&xmitq);
+ /* Check if message must be decrypted first */
+ if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
+ goto rcv;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (likely(ehdr->user != LINK_CONFIG)) {
+ n = tipc_node_find(net, ntohl(ehdr->addr));
+ if (unlikely(!n))
+ goto discard;
+ } else {
+ n = tipc_node_find_by_id(net, ehdr->id);
+ }
+ tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ if (!skb)
+ return;
+rcv:
+#endif
/* Ensure message is well-formed before touching the header */
- TIPC_SKB_CB(skb)->validated = false;
if (unlikely(!tipc_msg_validate(&skb)))
goto discard;
+ __skb_queue_head_init(&xmitq);
hdr = buf_msg(skb);
usr = msg_user(hdr);
bc_ack = msg_bcast_ack(hdr);
@@ -1884,7 +2115,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
tipc_sk_rcv(net, &le->inputq);
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
tipc_node_put(n);
discard:
@@ -1915,7 +2146,7 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
tipc_link_set_mtu(e->link, b->mtu);
}
tipc_node_write_unlock(n);
- tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
}
rcu_read_unlock();
@@ -1926,7 +2157,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
- struct tipc_node *peer;
+ struct tipc_node *peer, *temp_node;
u32 addr;
int err;
@@ -1967,6 +2198,12 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
tipc_node_write_unlock(peer);
tipc_node_delete(peer);
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
err = 0;
err_out:
tipc_node_put(peer);
@@ -2011,6 +2248,8 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
list_for_each_entry_rcu(node, &tn->node_list, list) {
+ if (node->preliminary)
+ continue;
if (last_addr) {
if (node->addr == last_addr)
last_addr = 0;
@@ -2150,7 +2389,8 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
out:
tipc_node_read_unlock(node);
- tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
+ NULL);
return res;
}
@@ -2484,13 +2724,9 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
int err;
if (!prev_node) {
- struct nlattr **attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
- err = tipc_nlmsg_parse(cb->nlh, &attrs);
- if (err)
- return err;
-
if (!attrs[TIPC_NLA_MON])
return -EINVAL;
@@ -2530,11 +2766,141 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
return skb->len;
}
-u32 tipc_node_get_addr(struct tipc_node *node)
+#ifdef CONFIG_TIPC_CRYPTO
+static int tipc_nl_retrieve_key(struct nlattr **attrs,
+ struct tipc_aead_key **key)
{
- return (node) ? node->addr : 0;
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+
+ if (!attr)
+ return -ENODATA;
+
+ *key = (struct tipc_aead_key *)nla_data(attr);
+ if (nla_len(attr) < tipc_aead_key_size(*key))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
+
+ if (!attr)
+ return -ENODATA;
+
+ if (nla_len(attr) < TIPC_NODEID_LEN)
+ return -EINVAL;
+
+ *node_id = (u8 *)nla_data(attr);
+ return 0;
+}
+
+int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n = NULL;
+ struct tipc_aead_key *ukey;
+ struct tipc_crypto *c;
+ u8 *id, *own_id;
+ int rc = 0;
+
+ if (!info->attrs[TIPC_NLA_NODE])
+ return -EINVAL;
+
+ rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
+ info->attrs[TIPC_NLA_NODE],
+ tipc_nl_node_policy, info->extack);
+ if (rc)
+ goto exit;
+
+ own_id = tipc_own_id(net);
+ if (!own_id) {
+ rc = -EPERM;
+ goto exit;
+ }
+
+ rc = tipc_nl_retrieve_key(attrs, &ukey);
+ if (rc)
+ goto exit;
+
+ rc = tipc_aead_key_validate(ukey);
+ if (rc)
+ goto exit;
+
+ rc = tipc_nl_retrieve_nodeid(attrs, &id);
+ switch (rc) {
+ case -ENODATA:
+ /* Cluster key mode */
+ rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
+ break;
+ case 0:
+ /* Per-node key mode */
+ if (!memcmp(id, own_id, NODE_ID_LEN)) {
+ c = tn->crypto_tx;
+ } else {
+ n = tipc_node_find_by_id(net, id) ?:
+ tipc_node_create(net, 0, id, 0xffffu, 0, true);
+ if (unlikely(!n)) {
+ rc = -ENOMEM;
+ break;
+ }
+ c = n->crypto_rx;
+ }
+
+ rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
+ if (n)
+ tipc_node_put(n);
+ break;
+ default:
+ break;
+ }
+
+exit:
+ return (rc < 0) ? rc : 0;
+}
+
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_set_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+int __tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+
+ tipc_crypto_key_flush(tn->crypto_tx);
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list)
+ tipc_crypto_key_flush(n->crypto_rx);
+ rcu_read_unlock();
+
+ pr_info("All keys are flushed!\n");
+ return 0;
}
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_flush_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+#endif
+
/**
* tipc_node_dump - dump TIPC node data
* @n: tipc node to be dumped
@@ -2591,3 +2957,33 @@ int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
return i;
}
+
+void tipc_node_pre_cleanup_net(struct net *exit_net)
+{
+ struct tipc_node *n;
+ struct tipc_net *tn;
+ struct net *tmp;
+
+ rcu_read_lock();
+ for_each_net_rcu(tmp) {
+ if (tmp == exit_net)
+ continue;
+ tn = tipc_net(tmp);
+ if (!tn)
+ continue;
+ spin_lock_bh(&tn->node_list_lock);
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ if (!n->peer_net)
+ continue;
+ if (n->peer_net != exit_net)
+ continue;
+ tipc_node_write_lock(n);
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ tipc_node_write_unlock_fast(n);
+ break;
+ }
+ spin_unlock_bh(&tn->node_list_lock);
+ }
+ rcu_read_unlock();
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 291d0ecd4101..a6803b449a2c 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -54,7 +54,8 @@ enum {
TIPC_LINK_PROTO_SEQNO = (1 << 6),
TIPC_MCAST_RBCTL = (1 << 7),
TIPC_GAP_ACK_BLOCK = (1 << 8),
- TIPC_TUNNEL_ENHANCED = (1 << 9)
+ TIPC_TUNNEL_ENHANCED = (1 << 9),
+ TIPC_NAGLE = (1 << 10)
};
#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \
@@ -66,16 +67,27 @@ enum {
TIPC_LINK_PROTO_SEQNO | \
TIPC_MCAST_RBCTL | \
TIPC_GAP_ACK_BLOCK | \
- TIPC_TUNNEL_ENHANCED)
+ TIPC_TUNNEL_ENHANCED | \
+ TIPC_NAGLE)
+
#define INVALID_BEARER_ID -1
void tipc_node_stop(struct net *net);
bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
u32 tipc_node_get_addr(struct tipc_node *node);
+char *tipc_node_get_id_str(struct tipc_node *node);
+void tipc_node_put(struct tipc_node *node);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary);
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n);
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos);
+#endif
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
struct tipc_bearer *bearer,
- u16 capabilities, u32 signature,
+ u16 capabilities, u32 signature, u32 hash_mixes,
struct tipc_media_addr *maddr,
bool *respond, bool *dupl_addr);
void tipc_node_delete_links(struct net *net, int bearer_id);
@@ -92,7 +104,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
bool tipc_node_is_up(struct net *net, u32 addr);
u16 tipc_node_get_capabilities(struct net *net, u32 addr);
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
@@ -107,4 +119,9 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
struct netlink_callback *cb);
+#ifdef CONFIG_TIPC_CRYPTO
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info);
+#endif
+void tipc_node_pre_cleanup_net(struct net *exit_net);
#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 4b92b196cfa6..a1c8d722ca20 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -75,6 +75,7 @@ struct sockaddr_pair {
* @conn_instance: TIPC instance used when connection was established
* @published: non-zero if port has one or more associated names
* @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @maxnagle: maximum size of msg which can be subject to nagle
* @portid: unique port identity in TIPC socket hash table
* @phdr: preformatted message header used when sending messages
* #cong_links: list of congested links
@@ -97,6 +98,7 @@ struct tipc_sock {
u32 conn_instance;
int published;
u32 max_pkt;
+ u32 maxnagle;
u32 portid;
struct tipc_msg phdr;
struct list_head cong_links;
@@ -116,6 +118,10 @@ struct tipc_sock {
struct tipc_mc_method mc_method;
struct rcu_head rcu;
struct tipc_group *group;
+ u32 oneway;
+ u16 snd_backlog;
+ bool expect_ack;
+ bool nodelay;
bool group_is_open;
};
@@ -137,6 +143,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
+static void tipc_sk_push_backlog(struct tipc_sock *tsk);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -227,6 +234,26 @@ static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
return 1;
}
+/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
+ */
+static void tsk_set_nagle(struct tipc_sock *tsk)
+{
+ struct sock *sk = &tsk->sk;
+
+ tsk->maxnagle = 0;
+ if (sk->sk_type != SOCK_STREAM)
+ return;
+ if (tsk->nodelay)
+ return;
+ if (!(tsk->peer_caps & TIPC_NAGLE))
+ return;
+ /* Limit node local buffer size to avoid receive queue overflow */
+ if (tsk->max_pkt == MAX_MSG_SIZE)
+ tsk->maxnagle = 1500;
+ else
+ tsk->maxnagle = tsk->max_pkt;
+}
+
/**
* tsk_advance_rx_queue - discard first buffer in socket receive queue
*
@@ -446,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
tsk = tipc_sk(sk);
tsk->max_pkt = MAX_PKT_DEFAULT;
+ tsk->maxnagle = 0;
INIT_LIST_HEAD(&tsk->publications);
INIT_LIST_HEAD(&tsk->cong_links);
msg = &tsk->phdr;
@@ -512,8 +540,12 @@ static void __tipc_shutdown(struct socket *sock, int error)
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
- /* Remove any pending SYN message */
- __skb_queue_purge(&sk->sk_write_queue);
+ /* Push out unsent messages or remove if pending SYN */
+ skb = skb_peek(&sk->sk_write_queue);
+ if (skb && !msg_is_syn(buf_msg(skb)))
+ tipc_sk_push_backlog(tsk);
+ else
+ __skb_queue_purge(&sk->sk_write_queue);
/* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer).
@@ -854,7 +886,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
@@ -1208,6 +1240,27 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
tipc_sk_rcv(net, inputq);
}
+/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
+ * when socket is in Nagle mode
+ */
+static void tipc_sk_push_backlog(struct tipc_sock *tsk)
+{
+ struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
+ struct net *net = sock_net(&tsk->sk);
+ u32 dnode = tsk_peer_node(tsk);
+ int rc;
+
+ if (skb_queue_empty(txq) || tsk->cong_link_cnt)
+ return;
+
+ tsk->snt_unacked += tsk->snd_backlog;
+ tsk->snd_backlog = 0;
+ tsk->expect_ack = true;
+ rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
+ if (rc == -ELINKCONG)
+ tsk->cong_link_cnt = 1;
+}
+
/**
* tipc_sk_conn_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
@@ -1221,7 +1274,7 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
u32 onode = tsk_own_node(tsk);
struct sock *sk = &tsk->sk;
int mtyp = msg_type(hdr);
- bool conn_cong;
+ bool was_cong;
/* Ignore if connection cannot be validated: */
if (!tsk_peer_msg(tsk, hdr)) {
@@ -1254,11 +1307,13 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
__skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
- conn_cong = tsk_conn_cong(tsk);
+ was_cong = tsk_conn_cong(tsk);
+ tsk->expect_ack = false;
+ tipc_sk_push_backlog(tsk);
tsk->snt_unacked -= msg_conn_ack(hdr);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
tsk->snd_win = msg_adv_win(hdr);
- if (conn_cong)
+ if (was_cong && !tsk_conn_cong(tsk))
sk->sk_write_space(sk);
} else if (mtyp != CONN_PROBE_REPLY) {
pr_warn("Received unknown CONN_PROTO msg\n");
@@ -1388,7 +1443,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
return rc;
__skb_queue_head_init(&pkts);
- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
@@ -1437,15 +1492,15 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+ struct sk_buff_head *txq = &sk->sk_write_queue;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
- struct sk_buff_head pkts;
u32 dnode = tsk_peer_node(tsk);
+ int maxnagle = tsk->maxnagle;
+ int maxpkt = tsk->max_pkt;
int send, sent = 0;
- int rc = 0;
-
- __skb_queue_head_init(&pkts);
+ int blocks, rc = 0;
if (unlikely(dlen > INT_MAX))
return -EMSGSIZE;
@@ -1467,21 +1522,35 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
tipc_sk_connected(sk)));
if (unlikely(rc))
break;
-
send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
- rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
- if (unlikely(rc != send))
- break;
-
- trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
+ blocks = tsk->snd_backlog;
+ if (tsk->oneway++ >= 4 && send <= maxnagle) {
+ rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
+ if (unlikely(rc < 0))
+ break;
+ blocks += rc;
+ if (blocks <= 64 && tsk->expect_ack) {
+ tsk->snd_backlog = blocks;
+ sent += send;
+ break;
+ }
+ tsk->expect_ack = true;
+ } else {
+ rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
+ if (unlikely(rc != send))
+ break;
+ blocks += tsk_inc(tsk, send + MIN_H_SIZE);
+ }
+ trace_tipc_sk_sendstream(sk, skb_peek(txq),
TIPC_DUMP_SK_SNDQ, " ");
- rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+ rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tsk->cong_link_cnt = 1;
rc = 0;
}
if (likely(!rc)) {
- tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
+ tsk->snt_unacked += blocks;
+ tsk->snd_backlog = 0;
sent += send;
}
} while (sent < dlen && !rc);
@@ -1526,8 +1595,9 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
tipc_set_sk_state(sk, TIPC_ESTABLISHED);
tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
- tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ tsk_set_nagle(tsk);
__skb_queue_purge(&sk->sk_write_queue);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
return;
@@ -1848,6 +1918,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
bool peek = flags & MSG_PEEK;
int offset, required, copy, copied = 0;
int hlen, dlen, err, rc;
+ bool ack = false;
long timeout;
/* Catch invalid receive attempts */
@@ -1892,6 +1963,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Copy data if msg ok, otherwise return error/partial data */
if (likely(!err)) {
+ ack = msg_ack_required(hdr);
offset = skb_cb->bytes_read;
copy = min_t(int, dlen - offset, buflen - copied);
rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
@@ -1919,7 +1991,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Send connection flow control advertisement when applicable */
tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
- if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
+ if (ack || tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tipc_sk_send_ack(tsk);
/* Exit if all requested data or FIN/error received */
@@ -1990,6 +2062,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
smp_wmb();
tsk->cong_link_cnt--;
wakeup = true;
+ tipc_sk_push_backlog(tsk);
break;
case GROUP_PROTOCOL:
tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
@@ -2029,6 +2102,7 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
if (unlikely(msg_mcast(hdr)))
return false;
+ tsk->oneway = 0;
switch (sk->sk_state) {
case TIPC_CONNECTING:
@@ -2074,6 +2148,8 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
return true;
return false;
case TIPC_ESTABLISHED:
+ if (!skb_queue_empty(&sk->sk_write_queue))
+ tipc_sk_push_backlog(tsk);
/* Accept only connection-based messages sent by peer */
if (likely(con_msg && !err && pport == oport && pnode == onode))
return true;
@@ -2804,7 +2880,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
struct tipc_sock *tsk;
rcu_read_lock();
- tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
+ tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
if (tsk)
sock_hold(&tsk->sk);
rcu_read_unlock();
@@ -2959,6 +3035,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
case TIPC_SRC_DROPPABLE:
case TIPC_DEST_DROPPABLE:
case TIPC_CONN_TIMEOUT:
+ case TIPC_NODELAY:
if (ol < sizeof(value))
return -EINVAL;
if (get_user(value, (u32 __user *)ov))
@@ -3007,6 +3084,10 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
case TIPC_GROUP_LEAVE:
res = tipc_sk_leave(tsk);
break;
+ case TIPC_NODELAY:
+ tsk->nodelay = !!value;
+ tsk_set_nagle(tsk);
+ break;
default:
res = -EINVAL;
}
@@ -3588,13 +3669,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_sock *tsk;
if (!tsk_portid) {
- struct nlattr **attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
- err = tipc_nlmsg_parse(cb->nlh, &attrs);
- if (err)
- return err;
-
if (!attrs[TIPC_NLA_SOCK])
return -EINVAL;
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 6159d327db76..58ab3d6dcdce 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -35,6 +35,7 @@
#include "core.h"
#include "trace.h"
+#include "crypto.h"
#include <linux/sysctl.h>
@@ -64,6 +65,16 @@ static struct ctl_table tipc_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .procname = "max_tfms",
+ .data = &sysctl_tipc_max_tfms,
+ .maxlen = sizeof(sysctl_tipc_max_tfms),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+#endif
{}
};
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 287df68721df..86aaa4d3e781 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -372,6 +372,7 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
goto out;
if (b && test_bit(0, &b->up)) {
+ TIPC_SKB_CB(skb)->flags = 0;
tipc_rcv(sock_net(sk), skb, b);
return 0;
}
@@ -448,15 +449,11 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
int i;
if (!bid && !skip_cnt) {
+ struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
struct net *net = sock_net(skb->sk);
struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1];
- struct nlattr **attrs;
char *bname;
- err = tipc_nlmsg_parse(cb->nlh, &attrs);
- if (err)
- return err;
-
if (!attrs[TIPC_NLA_BEARER])
return -EINVAL;
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
index e4328b3b72eb..61ec78521a60 100644
--- a/net/tls/Kconfig
+++ b/net/tls/Kconfig
@@ -26,3 +26,13 @@ config TLS_DEVICE
Enable kernel support for HW offload of the TLS protocol.
If unsure, say N.
+
+config TLS_TOE
+ bool "Transport Layer Security TCP stack bypass"
+ depends on TLS
+ default n
+ help
+ Enable kernel support for legacy HW offload of the TLS protocol,
+ which is incompatible with the Linux networking stack semantics.
+
+ If unsure, say N.
diff --git a/net/tls/Makefile b/net/tls/Makefile
index ef0dc74ce8f9..f1ffbfe8968d 100644
--- a/net/tls/Makefile
+++ b/net/tls/Makefile
@@ -3,8 +3,11 @@
# Makefile for the TLS subsystem.
#
+CFLAGS_trace.o := -I$(src)
+
obj-$(CONFIG_TLS) += tls.o
-tls-y := tls_main.o tls_sw.o
+tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
+tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 683d00837693..0683788bbef0 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -38,6 +38,8 @@
#include <net/tcp.h>
#include <net/tls.h>
+#include "trace.h"
+
/* device_offload_lock is used to synchronize tls_dev_add
* against NETDEV_DOWN notifications.
*/
@@ -202,6 +204,15 @@ void tls_device_free_resources_tx(struct sock *sk)
tls_free_partial_record(sk, tls_ctx);
}
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
+}
+EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
+
static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
@@ -216,6 +227,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
rcd_sn = tls_ctx->tx.rec_seq;
+ trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
down_read(&device_offload_lock);
netdev = tls_ctx->netdev;
if (netdev)
@@ -419,7 +431,7 @@ static int tls_push_data(struct sock *sk,
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
return -ENOTSUPP;
- if (sk->sk_err)
+ if (unlikely(sk->sk_err))
return -sk->sk_err;
flags |= MSG_SENDPAGE_DECRYPTED;
@@ -440,9 +452,8 @@ static int tls_push_data(struct sock *sk,
max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
prot->prepend_size;
do {
- rc = tls_do_allocation(sk, ctx, pfrag,
- prot->prepend_size);
- if (rc) {
+ rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
+ if (unlikely(rc)) {
rc = sk_stream_wait_memory(sk, &timeo);
if (!rc)
continue;
@@ -645,15 +656,19 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct sock *sk, u32 seq, u8 *rcd_sn)
{
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
struct net_device *netdev;
if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
return;
+
+ trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
}
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
@@ -661,8 +676,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
- u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@@ -691,8 +706,12 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
/* head of next rec is already in, note that the sock_inq will
* include the currently parsed message when called from parser
*/
- if (tcp_inq(sk) > rcd_len)
+ sock_data = tcp_inq(sk);
+ if (sock_data > rcd_len) {
+ trace_tls_device_rx_resync_nh_delay(sk, sock_data,
+ rcd_len);
return;
+ }
rx_ctx->resync_nh_do_now = 0;
seq += rcd_len;
@@ -736,6 +755,7 @@ static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
/* head of next rec is already in, parser will sync for us */
if (tcp_inq(sk) > rxm->full_len) {
+ trace_tls_device_rx_resync_nh_schedule(sk);
ctx->resync_nh_do_now = 1;
} else {
struct tls_prot_info *prot = &tls_ctx->prot_info;
@@ -834,9 +854,9 @@ free_buf:
return err;
}
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ struct sk_buff *skb, struct strp_msg *rxm)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
int is_decrypted = skb->decrypted;
int is_encrypted = !is_decrypted;
@@ -848,6 +868,10 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
is_encrypted &= !skb_iter->decrypted;
}
+ trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
+ tls_ctx->rx.rec_seq, rxm->full_len,
+ is_encrypted, is_decrypted);
+
ctx->sw.decrypted |= is_decrypted;
/* Return immediately if the record is either entirely plaintext or
@@ -1021,6 +1045,8 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
&ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
+ tcp_sk(sk)->write_seq, rec_seq, rc);
if (rc)
goto release_lock;
@@ -1057,6 +1083,7 @@ free_marker_record:
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
{
+ struct tls12_crypto_info_aes_gcm_128 *info;
struct tls_offload_context_rx *context;
struct net_device *netdev;
int rc = 0;
@@ -1104,6 +1131,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
&ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
+ info = (void *)&ctx->crypto_recv.info;
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
+ tcp_sk(sk)->copied_seq, info->rec_seq, rc);
if (rc)
goto free_sw_resources;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0775ae40fcfb..bdca31ffe6da 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -41,7 +41,9 @@
#include <linux/inetdevice.h>
#include <linux/inet_diag.h>
+#include <net/snmp.h>
#include <net/tls.h>
+#include <net/tls_toe.h>
MODULE_AUTHOR("Mellanox Technologies");
MODULE_DESCRIPTION("Transport Layer Security Support");
@@ -58,14 +60,12 @@ static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
-static LIST_HEAD(device_list);
-static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
struct proto *base);
-static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
@@ -287,14 +287,19 @@ static void tls_sk_proto_cleanup(struct sock *sk,
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_release_resources_tx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
}
- if (ctx->rx_conf == TLS_SW)
+ if (ctx->rx_conf == TLS_SW) {
tls_sw_release_resources_rx(sk);
- else if (ctx->rx_conf == TLS_HW)
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
+ } else if (ctx->rx_conf == TLS_HW) {
tls_device_offload_cleanup_rx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
+ }
}
static void tls_sk_proto_close(struct sock *sk, long timeout)
@@ -535,19 +540,29 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
if (tx) {
rc = tls_set_device_offload(sk, ctx);
conf = TLS_HW;
- if (rc) {
+ if (!rc) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
+ } else {
rc = tls_set_sw_offload(sk, ctx, 1);
if (rc)
goto err_crypto_info;
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
conf = TLS_SW;
}
} else {
rc = tls_set_device_offload_rx(sk, ctx);
conf = TLS_HW;
- if (rc) {
+ if (!rc) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
+ } else {
rc = tls_set_sw_offload(sk, ctx, 0);
if (rc)
goto err_crypto_info;
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
conf = TLS_SW;
}
tls_sw_strparser_arm(sk, ctx);
@@ -604,7 +619,7 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
return do_tls_setsockopt(sk, optname, optval, optlen);
}
-static struct tls_context *create_ctx(struct sock *sk)
+struct tls_context *tls_ctx_create(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx;
@@ -645,90 +660,6 @@ static void tls_build_proto(struct sock *sk)
}
}
-static void tls_hw_sk_destruct(struct sock *sk)
-{
- struct tls_context *ctx = tls_get_ctx(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- ctx->sk_destruct(sk);
- /* Free ctx */
- rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
- tls_ctx_free(sk, ctx);
-}
-
-static int tls_hw_prot(struct sock *sk)
-{
- struct tls_context *ctx;
- struct tls_device *dev;
- int rc = 0;
-
- spin_lock_bh(&device_spinlock);
- list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->feature && dev->feature(dev)) {
- ctx = create_ctx(sk);
- if (!ctx)
- goto out;
-
- spin_unlock_bh(&device_spinlock);
- tls_build_proto(sk);
- ctx->sk_destruct = sk->sk_destruct;
- sk->sk_destruct = tls_hw_sk_destruct;
- ctx->rx_conf = TLS_HW_RECORD;
- ctx->tx_conf = TLS_HW_RECORD;
- update_sk_prot(sk, ctx);
- spin_lock_bh(&device_spinlock);
- rc = 1;
- break;
- }
- }
-out:
- spin_unlock_bh(&device_spinlock);
- return rc;
-}
-
-static void tls_hw_unhash(struct sock *sk)
-{
- struct tls_context *ctx = tls_get_ctx(sk);
- struct tls_device *dev;
-
- spin_lock_bh(&device_spinlock);
- list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->unhash) {
- kref_get(&dev->kref);
- spin_unlock_bh(&device_spinlock);
- dev->unhash(dev, sk);
- kref_put(&dev->kref, dev->release);
- spin_lock_bh(&device_spinlock);
- }
- }
- spin_unlock_bh(&device_spinlock);
- ctx->sk_proto->unhash(sk);
-}
-
-static int tls_hw_hash(struct sock *sk)
-{
- struct tls_context *ctx = tls_get_ctx(sk);
- struct tls_device *dev;
- int err;
-
- err = ctx->sk_proto->hash(sk);
- spin_lock_bh(&device_spinlock);
- list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->hash) {
- kref_get(&dev->kref);
- spin_unlock_bh(&device_spinlock);
- err |= dev->hash(dev, sk);
- kref_put(&dev->kref, dev->release);
- spin_lock_bh(&device_spinlock);
- }
- }
- spin_unlock_bh(&device_spinlock);
-
- if (err)
- tls_hw_unhash(sk);
- return err;
-}
-
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
struct proto *base)
{
@@ -766,10 +697,11 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
#endif
-
+#ifdef CONFIG_TLS_TOE
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
- prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
- prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
+ prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
+ prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
+#endif
}
static int tls_init(struct sock *sk)
@@ -777,8 +709,12 @@ static int tls_init(struct sock *sk)
struct tls_context *ctx;
int rc = 0;
- if (tls_hw_prot(sk))
+ tls_build_proto(sk);
+
+#ifdef CONFIG_TLS_TOE
+ if (tls_toe_bypass(sk))
return 0;
+#endif
/* The TLS ulp is currently supported only for TCP sockets
* in ESTABLISHED state.
@@ -789,11 +725,9 @@ static int tls_init(struct sock *sk)
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTSUPP;
- tls_build_proto(sk);
-
/* allocate tls context */
write_lock_bh(&sk->sk_callback_lock);
- ctx = create_ctx(sk);
+ ctx = tls_ctx_create(sk);
if (!ctx) {
rc = -ENOMEM;
goto out;
@@ -879,21 +813,34 @@ static size_t tls_get_info_size(const struct sock *sk)
return size;
}
-void tls_register_device(struct tls_device *device)
+static int __net_init tls_init_net(struct net *net)
{
- spin_lock_bh(&device_spinlock);
- list_add_tail(&device->dev_list, &device_list);
- spin_unlock_bh(&device_spinlock);
+ int err;
+
+ net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
+ if (!net->mib.tls_statistics)
+ return -ENOMEM;
+
+ err = tls_proc_init(net);
+ if (err)
+ goto err_free_stats;
+
+ return 0;
+err_free_stats:
+ free_percpu(net->mib.tls_statistics);
+ return err;
}
-EXPORT_SYMBOL(tls_register_device);
-void tls_unregister_device(struct tls_device *device)
+static void __net_exit tls_exit_net(struct net *net)
{
- spin_lock_bh(&device_spinlock);
- list_del(&device->dev_list);
- spin_unlock_bh(&device_spinlock);
+ tls_proc_fini(net);
+ free_percpu(net->mib.tls_statistics);
}
-EXPORT_SYMBOL(tls_unregister_device);
+
+static struct pernet_operations tls_proc_ops = {
+ .init = tls_init_net,
+ .exit = tls_exit_net,
+};
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
@@ -906,8 +853,15 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
static int __init tls_register(void)
{
+ int err;
+
+ err = register_pernet_subsys(&tls_proc_ops);
+ if (err)
+ return err;
+
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
+ tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
@@ -919,6 +873,7 @@ static void __exit tls_unregister(void)
{
tcp_unregister_ulp(&tcp_tls_ulp_ops);
tls_device_cleanup();
+ unregister_pernet_subsys(&tls_proc_ops);
}
module_init(tls_register);
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
new file mode 100644
index 000000000000..3a5dd1e07233
--- /dev/null
+++ b/net/tls/tls_proc.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <net/snmp.h>
+#include <net/tls.h>
+
+#ifdef CONFIG_PROC_FS
+static const struct snmp_mib tls_mib_list[] = {
+ SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
+ SNMP_MIB_ITEM("TlsCurrRxSw", LINUX_MIB_TLSCURRRXSW),
+ SNMP_MIB_ITEM("TlsCurrTxDevice", LINUX_MIB_TLSCURRTXDEVICE),
+ SNMP_MIB_ITEM("TlsCurrRxDevice", LINUX_MIB_TLSCURRRXDEVICE),
+ SNMP_MIB_ITEM("TlsTxSw", LINUX_MIB_TLSTXSW),
+ SNMP_MIB_ITEM("TlsRxSw", LINUX_MIB_TLSRXSW),
+ SNMP_MIB_ITEM("TlsTxDevice", LINUX_MIB_TLSTXDEVICE),
+ SNMP_MIB_ITEM("TlsRxDevice", LINUX_MIB_TLSRXDEVICE),
+ SNMP_MIB_ITEM("TlsDecryptError", LINUX_MIB_TLSDECRYPTERROR),
+ SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
+ SNMP_MIB_SENTINEL
+};
+
+static int tls_statistics_seq_show(struct seq_file *seq, void *v)
+{
+ unsigned long buf[LINUX_MIB_TLSMAX] = {};
+ struct net *net = seq->private;
+ int i;
+
+ snmp_get_cpu_field_batch(buf, tls_mib_list, net->mib.tls_statistics);
+ for (i = 0; tls_mib_list[i].name; i++)
+ seq_printf(seq, "%-32s\t%lu\n", tls_mib_list[i].name, buf[i]);
+
+ return 0;
+}
+#endif
+
+int __net_init tls_proc_init(struct net *net)
+{
+ if (!proc_create_net_single("tls_stat", 0444, net->proc_net,
+ tls_statistics_seq_show, NULL))
+ return -ENOMEM;
+ return 0;
+}
+
+void __net_exit tls_proc_fini(struct net *net)
+{
+ remove_proc_entry("tls_stat", net->proc_net);
+}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 446f23c1f3ce..da9f9ce51e7b 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -168,6 +168,9 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
/* Propagate if there was an err */
if (err) {
+ if (err == -EBADMSG)
+ TLS_INC_STATS(sock_net(skb->sk),
+ LINUX_MIB_TLSDECRYPTERROR);
ctx->async_wait.err = err;
tls_err_abort(skb->sk, err);
} else {
@@ -253,6 +256,8 @@ static int tls_do_decryption(struct sock *sk,
return ret;
ret = crypto_wait_req(ret, &ctx->async_wait);
+ } else if (ret == -EBADMSG) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
}
if (async)
@@ -1204,6 +1209,17 @@ sendpage_end:
return copied ? copied : ret;
}
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
+ MSG_NO_SHARED_FRAGS))
+ return -ENOTSUPP;
+
+ return tls_sw_do_sendpage(sk, page, offset, size, flags);
+}
+
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
@@ -1481,7 +1497,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
if (!ctx->decrypted) {
if (tls_ctx->rx_conf == TLS_HW) {
- err = tls_device_decrypted(sk, skb);
+ err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
if (err < 0)
return err;
}
@@ -1509,7 +1525,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
- ctx->decrypted = true;
+ ctx->decrypted = 1;
ctx->saved_data_ready(sk);
} else {
*zc = false;
@@ -1919,7 +1935,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
tls_err_abort(sk, EBADMSG);
goto splice_read_end;
}
- ctx->decrypted = true;
+ ctx->decrypted = 1;
}
rxm = strp_msg(skb);
@@ -2020,7 +2036,7 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb)
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- ctx->decrypted = false;
+ ctx->decrypted = 0;
ctx->recv_pkt = skb;
strp_pause(strp);
@@ -2376,10 +2392,11 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
if (crypto_info->version == TLS_1_3_VERSION)
- sw_ctx_rx->async_capable = false;
+ sw_ctx_rx->async_capable = 0;
else
sw_ctx_rx->async_capable =
- tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+ !!(tfm->__crt_alg->cra_flags &
+ CRYPTO_ALG_ASYNC);
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
diff --git a/net/tls/tls_toe.c b/net/tls/tls_toe.c
new file mode 100644
index 000000000000..7e1330f19165
--- /dev/null
+++ b/net/tls/tls_toe.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <net/inet_connection_sock.h>
+#include <net/tls.h>
+#include <net/tls_toe.h>
+
+static LIST_HEAD(device_list);
+static DEFINE_SPINLOCK(device_spinlock);
+
+static void tls_toe_sk_destruct(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ ctx->sk_destruct(sk);
+ /* Free ctx */
+ rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
+ tls_ctx_free(sk, ctx);
+}
+
+int tls_toe_bypass(struct sock *sk)
+{
+ struct tls_toe_device *dev;
+ struct tls_context *ctx;
+ int rc = 0;
+
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->feature && dev->feature(dev)) {
+ ctx = tls_ctx_create(sk);
+ if (!ctx)
+ goto out;
+
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_toe_sk_destruct;
+ ctx->rx_conf = TLS_HW_RECORD;
+ ctx->tx_conf = TLS_HW_RECORD;
+ update_sk_prot(sk, ctx);
+ rc = 1;
+ break;
+ }
+ }
+out:
+ spin_unlock_bh(&device_spinlock);
+ return rc;
+}
+
+void tls_toe_unhash(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_toe_device *dev;
+
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->unhash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
+ dev->unhash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
+ }
+ spin_unlock_bh(&device_spinlock);
+ ctx->sk_proto->unhash(sk);
+}
+
+int tls_toe_hash(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_toe_device *dev;
+ int err;
+
+ err = ctx->sk_proto->hash(sk);
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->hash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
+ err |= dev->hash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
+ }
+ spin_unlock_bh(&device_spinlock);
+
+ if (err)
+ tls_toe_unhash(sk);
+ return err;
+}
+
+void tls_toe_register_device(struct tls_toe_device *device)
+{
+ spin_lock_bh(&device_spinlock);
+ list_add_tail(&device->dev_list, &device_list);
+ spin_unlock_bh(&device_spinlock);
+}
+EXPORT_SYMBOL(tls_toe_register_device);
+
+void tls_toe_unregister_device(struct tls_toe_device *device)
+{
+ spin_lock_bh(&device_spinlock);
+ list_del(&device->dev_list);
+ spin_unlock_bh(&device_spinlock);
+}
+EXPORT_SYMBOL(tls_toe_unregister_device);
diff --git a/net/tls/trace.c b/net/tls/trace.c
new file mode 100644
index 000000000000..e374913cf9c9
--- /dev/null
+++ b/net/tls/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/tls/trace.h b/net/tls/trace.h
new file mode 100644
index 000000000000..9ba5f600ea43
--- /dev/null
+++ b/net/tls/trace.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tls
+
+#if !defined(_TLS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TLS_TRACE_H_
+
+#include <asm/unaligned.h>
+#include <linux/tracepoint.h>
+
+struct sock;
+
+TRACE_EVENT(tls_device_offload_set,
+
+ TP_PROTO(struct sock *sk, int dir, u32 tcp_seq, u8 *rec_no, int ret),
+
+ TP_ARGS(sk, dir, tcp_seq, rec_no, ret),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( int, dir )
+ __field( u32, tcp_seq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->dir = dir;
+ __entry->tcp_seq = tcp_seq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ "sk=%p direction=%d tcp_seq=%u rec_no=%llu ret=%d",
+ __entry->sk, __entry->dir, __entry->tcp_seq, __entry->rec_no,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(tls_device_decrypted,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, u32 rec_len,
+ bool encrypted, bool decrypted),
+
+ TP_ARGS(sk, tcp_seq, rec_no, rec_len, encrypted, decrypted),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ __field( u32, rec_len )
+ __field( bool, encrypted )
+ __field( bool, decrypted )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ __entry->rec_len = rec_len;
+ __entry->encrypted = encrypted;
+ __entry->decrypted = decrypted;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu len=%u encrypted=%d decrypted=%d",
+ __entry->sk, __entry->tcp_seq,
+ __entry->rec_no, __entry->rec_len,
+ __entry->encrypted, __entry->decrypted
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_send,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, int sync_type),
+
+ TP_ARGS(sk, tcp_seq, rec_no, sync_type),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ __field( int, sync_type )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ __entry->sync_type = sync_type;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu sync_type=%d",
+ __entry->sk, __entry->tcp_seq, __entry->rec_no,
+ __entry->sync_type
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_nh_schedule,
+
+ TP_PROTO(struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ ),
+
+ TP_printk(
+ "sk=%p", __entry->sk
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_nh_delay,
+
+ TP_PROTO(struct sock *sk, u32 sock_data, u32 rec_len),
+
+ TP_ARGS(sk, sock_data, rec_len),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u32, sock_data )
+ __field( u32, rec_len )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->sock_data = sock_data;
+ __entry->rec_len = rec_len;
+ ),
+
+ TP_printk(
+ "sk=%p sock_data=%u rec_len=%u",
+ __entry->sk, __entry->sock_data, __entry->rec_len
+ )
+);
+
+TRACE_EVENT(tls_device_tx_resync_req,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u32 exp_tcp_seq),
+
+ TP_ARGS(sk, tcp_seq, exp_tcp_seq),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u32, tcp_seq )
+ __field( u32, exp_tcp_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->tcp_seq = tcp_seq;
+ __entry->exp_tcp_seq = exp_tcp_seq;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u exp_tcp_seq=%u",
+ __entry->sk, __entry->tcp_seq, __entry->exp_tcp_seq
+ )
+);
+
+TRACE_EVENT(tls_device_tx_resync_send,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no),
+
+ TP_ARGS(sk, tcp_seq, rec_no),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu",
+ __entry->sk, __entry->tcp_seq, __entry->rec_no
+ )
+);
+
+#endif /* _TLS_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0d8da809bea2..193cba2d777b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -284,11 +284,9 @@ static struct sock *__unix_find_socket_byname(struct net *net,
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
- goto found;
+ return s;
}
- s = NULL;
-found:
- return s;
+ return NULL;
}
static inline struct sock *unix_find_socket_byname(struct net *net,
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 582a3e4dfce2..74db4cd637a7 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -126,19 +126,18 @@ static struct proto vsock_proto = {
*/
#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
-static const struct vsock_transport *transport;
+#define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
+#define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
+#define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
+
+/* Transport used for host->guest communication */
+static const struct vsock_transport *transport_h2g;
+/* Transport used for guest->host communication */
+static const struct vsock_transport *transport_g2h;
+/* Transport used for DGRAM communication */
+static const struct vsock_transport *transport_dgram;
static DEFINE_MUTEX(vsock_register_mutex);
-/**** EXPORTS ****/
-
-/* Get the ID of the local context. This is transport dependent. */
-
-int vm_sockets_get_local_cid(void)
-{
- return transport->get_local_cid();
-}
-EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
-
/**** UTILS ****/
/* Each bound VSocket is stored in the bind hash table and each connected
@@ -188,7 +187,7 @@ static int vsock_auto_bind(struct vsock_sock *vsk)
return __vsock_bind(sk, &local_addr);
}
-static int __init vsock_init_tables(void)
+static void vsock_init_tables(void)
{
int i;
@@ -197,7 +196,6 @@ static int __init vsock_init_tables(void)
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
INIT_LIST_HEAD(&vsock_connected_table[i]);
- return 0;
}
static void __vsock_insert_bound(struct list_head *list,
@@ -230,9 +228,15 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
{
struct vsock_sock *vsk;
- list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
- if (addr->svm_port == vsk->local_addr.svm_port)
+ list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
+ if (vsock_addr_equals_addr(addr, &vsk->local_addr))
+ return sk_vsock(vsk);
+
+ if (addr->svm_port == vsk->local_addr.svm_port &&
+ (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
+ addr->svm_cid == VMADDR_CID_ANY))
return sk_vsock(vsk);
+ }
return NULL;
}
@@ -382,6 +386,88 @@ void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
}
EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
+static void vsock_deassign_transport(struct vsock_sock *vsk)
+{
+ if (!vsk->transport)
+ return;
+
+ vsk->transport->destruct(vsk);
+ module_put(vsk->transport->module);
+ vsk->transport = NULL;
+}
+
+/* Assign a transport to a socket and call the .init transport callback.
+ *
+ * Note: for stream socket this must be called when vsk->remote_addr is set
+ * (e.g. during the connect() or when a connection request on a listener
+ * socket is received).
+ * The vsk->remote_addr is used to decide which transport to use:
+ * - remote CID <= VMADDR_CID_HOST will use guest->host transport;
+ * - remote CID == local_cid (guest->host transport) will use guest->host
+ * transport for loopback (host->guest transports don't support loopback);
+ * - remote CID > VMADDR_CID_HOST will use host->guest transport;
+ */
+int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+{
+ const struct vsock_transport *new_transport;
+ struct sock *sk = sk_vsock(vsk);
+ unsigned int remote_cid = vsk->remote_addr.svm_cid;
+ int ret;
+
+ switch (sk->sk_type) {
+ case SOCK_DGRAM:
+ new_transport = transport_dgram;
+ break;
+ case SOCK_STREAM:
+ if (remote_cid <= VMADDR_CID_HOST ||
+ (transport_g2h &&
+ remote_cid == transport_g2h->get_local_cid()))
+ new_transport = transport_g2h;
+ else
+ new_transport = transport_h2g;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
+
+ if (vsk->transport) {
+ if (vsk->transport == new_transport)
+ return 0;
+
+ vsk->transport->release(vsk);
+ vsock_deassign_transport(vsk);
+ }
+
+ /* We increase the module refcnt to prevent the transport unloading
+ * while there are open sockets assigned to it.
+ */
+ if (!new_transport || !try_module_get(new_transport->module))
+ return -ENODEV;
+
+ ret = new_transport->init(vsk, psk);
+ if (ret) {
+ module_put(new_transport->module);
+ return ret;
+ }
+
+ vsk->transport = new_transport;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsock_assign_transport);
+
+bool vsock_find_cid(unsigned int cid)
+{
+ if (transport_g2h && cid == transport_g2h->get_local_cid())
+ return true;
+
+ if (transport_h2g && cid == VMADDR_CID_HOST)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vsock_find_cid);
+
static struct sock *vsock_dequeue_accept(struct sock *listener)
{
struct vsock_sock *vlistener;
@@ -418,7 +504,12 @@ static bool vsock_is_pending(struct sock *sk)
static int vsock_send_shutdown(struct sock *sk, int mode)
{
- return transport->shutdown(vsock_sk(sk), mode);
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ if (!vsk->transport)
+ return -ENODEV;
+
+ return vsk->transport->shutdown(vsk, mode);
}
static void vsock_pending_work(struct work_struct *work)
@@ -439,7 +530,7 @@ static void vsock_pending_work(struct work_struct *work)
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
- listener->sk_ack_backlog--;
+ sk_acceptq_removed(listener);
} else if (!vsk->rejected) {
/* We are not on the pending list and accept() did not reject
* us, so we must have been accepted by our user process. We
@@ -528,13 +619,12 @@ static int __vsock_bind_stream(struct vsock_sock *vsk,
static int __vsock_bind_dgram(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
- return transport->dgram_bind(vsk, addr);
+ return vsk->transport->dgram_bind(vsk, addr);
}
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
{
struct vsock_sock *vsk = vsock_sk(sk);
- u32 cid;
int retval;
/* First ensure this socket isn't already bound. */
@@ -544,10 +634,9 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
/* Now bind to the provided address or select appropriate values if
* none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
* like AF_INET prevents binding to a non-local IP address (in most
- * cases), we only allow binding to the local CID.
+ * cases), we only allow binding to a local CID.
*/
- cid = transport->get_local_cid();
- if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
+ if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
return -EADDRNOTAVAIL;
switch (sk->sk_socket->type) {
@@ -571,12 +660,12 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
static void vsock_connect_timeout(struct work_struct *work);
-struct sock *__vsock_create(struct net *net,
- struct socket *sock,
- struct sock *parent,
- gfp_t priority,
- unsigned short type,
- int kern)
+static struct sock *__vsock_create(struct net *net,
+ struct socket *sock,
+ struct sock *parent,
+ gfp_t priority,
+ unsigned short type,
+ int kern)
{
struct sock *sk;
struct vsock_sock *psk;
@@ -620,28 +709,24 @@ struct sock *__vsock_create(struct net *net,
vsk->trusted = psk->trusted;
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
+ vsk->buffer_size = psk->buffer_size;
+ vsk->buffer_min_size = psk->buffer_min_size;
+ vsk->buffer_max_size = psk->buffer_max_size;
} else {
vsk->trusted = capable(CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
+ vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
+ vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
+ vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
}
- if (transport->init(vsk, psk) < 0) {
- sk_free(sk);
- return NULL;
- }
-
- if (sock)
- vsock_insert_unbound(vsk);
-
return sk;
}
-EXPORT_SYMBOL_GPL(__vsock_create);
static void __vsock_release(struct sock *sk, int level)
{
if (sk) {
- struct sk_buff *skb;
struct sock *pending;
struct vsock_sock *vsk;
@@ -651,7 +736,10 @@ static void __vsock_release(struct sock *sk, int level)
/* The release call is supposed to use lock_sock_nested()
* rather than lock_sock(), if a sock lock should be acquired.
*/
- transport->release(vsk);
+ if (vsk->transport)
+ vsk->transport->release(vsk);
+ else if (sk->sk_type == SOCK_STREAM)
+ vsock_remove_sock(vsk);
/* When "level" is SINGLE_DEPTH_NESTING, use the nested
* version to avoid the warning "possible recursive locking
@@ -662,8 +750,7 @@ static void __vsock_release(struct sock *sk, int level)
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
- while ((skb = skb_dequeue(&sk->sk_receive_queue)))
- kfree_skb(skb);
+ skb_queue_purge(&sk->sk_receive_queue);
/* Clean up any sockets that never were accepted. */
while ((pending = vsock_dequeue_accept(sk)) != NULL) {
@@ -680,7 +767,7 @@ static void vsock_sk_destruct(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
- transport->destruct(vsk);
+ vsock_deassign_transport(vsk);
/* When clearing these addresses, there's no need to set the family and
* possibly register the address family with the kernel.
@@ -702,15 +789,22 @@ static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
}
+struct sock *vsock_create_connected(struct sock *parent)
+{
+ return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
+ parent->sk_type, 0);
+}
+EXPORT_SYMBOL_GPL(vsock_create_connected);
+
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
- return transport->stream_has_data(vsk);
+ return vsk->transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
- return transport->stream_has_space(vsk);
+ return vsk->transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
@@ -879,6 +973,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
} else if (sock->type == SOCK_STREAM) {
+ const struct vsock_transport *transport = vsk->transport;
lock_sock(sk);
/* Listening sockets that have connections in their accept
@@ -889,7 +984,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* If there is something in the queue then we can read. */
- if (transport->stream_is_active(vsk) &&
+ if (transport && transport->stream_is_active(vsk) &&
!(sk->sk_shutdown & RCV_SHUTDOWN)) {
bool data_ready_now = false;
int ret = transport->notify_poll_in(
@@ -954,6 +1049,7 @@ static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
struct sock *sk;
struct vsock_sock *vsk;
struct sockaddr_vm *remote_addr;
+ const struct vsock_transport *transport;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
@@ -962,6 +1058,7 @@ static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
lock_sock(sk);
@@ -1046,8 +1143,8 @@ static int vsock_dgram_connect(struct socket *sock,
if (err)
goto out;
- if (!transport->dgram_allow(remote_addr->svm_cid,
- remote_addr->svm_port)) {
+ if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
+ remote_addr->svm_port)) {
err = -EINVAL;
goto out;
}
@@ -1063,7 +1160,9 @@ out:
static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
- return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
+ struct vsock_sock *vsk = vsock_sk(sock->sk);
+
+ return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
}
static const struct proto_ops vsock_dgram_ops = {
@@ -1089,6 +1188,8 @@ static const struct proto_ops vsock_dgram_ops = {
static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
{
+ const struct vsock_transport *transport = vsk->transport;
+
if (!transport->cancel_pkt)
return -EOPNOTSUPP;
@@ -1125,6 +1226,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
int err;
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
struct sockaddr_vm *remote_addr;
long timeout;
DEFINE_WAIT(wait);
@@ -1159,19 +1261,26 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
goto out;
}
+ /* Set the remote address that we are connecting to. */
+ memcpy(&vsk->remote_addr, remote_addr,
+ sizeof(vsk->remote_addr));
+
+ err = vsock_assign_transport(vsk, NULL);
+ if (err)
+ goto out;
+
+ transport = vsk->transport;
+
/* The hypervisor and well-known contexts do not have socket
* endpoints.
*/
- if (!transport->stream_allow(remote_addr->svm_cid,
+ if (!transport ||
+ !transport->stream_allow(remote_addr->svm_cid,
remote_addr->svm_port)) {
err = -ENETUNREACH;
goto out;
}
- /* Set the remote address that we are connecting to. */
- memcpy(&vsk->remote_addr, remote_addr,
- sizeof(vsk->remote_addr));
-
err = vsock_auto_bind(vsk);
if (err)
goto out;
@@ -1301,7 +1410,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
err = -listener->sk_err;
if (connected) {
- listener->sk_ack_backlog--;
+ sk_acceptq_removed(listener);
lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
vconnected = vsock_sk(connected);
@@ -1366,6 +1475,23 @@ out:
return err;
}
+static void vsock_update_buffer_size(struct vsock_sock *vsk,
+ const struct vsock_transport *transport,
+ u64 val)
+{
+ if (val > vsk->buffer_max_size)
+ val = vsk->buffer_max_size;
+
+ if (val < vsk->buffer_min_size)
+ val = vsk->buffer_min_size;
+
+ if (val != vsk->buffer_size &&
+ transport && transport->notify_buffer_size)
+ transport->notify_buffer_size(vsk, &val);
+
+ vsk->buffer_size = val;
+}
+
static int vsock_stream_setsockopt(struct socket *sock,
int level,
int optname,
@@ -1375,6 +1501,7 @@ static int vsock_stream_setsockopt(struct socket *sock,
int err;
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
u64 val;
if (level != AF_VSOCK)
@@ -1395,23 +1522,26 @@ static int vsock_stream_setsockopt(struct socket *sock,
err = 0;
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
lock_sock(sk);
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
COPY_IN(val);
- transport->set_buffer_size(vsk, val);
+ vsock_update_buffer_size(vsk, transport, val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
COPY_IN(val);
- transport->set_max_buffer_size(vsk, val);
+ vsk->buffer_max_size = val;
+ vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
COPY_IN(val);
- transport->set_min_buffer_size(vsk, val);
+ vsk->buffer_min_size = val;
+ vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
@@ -1478,17 +1608,17 @@ static int vsock_stream_getsockopt(struct socket *sock,
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
- val = transport->get_buffer_size(vsk);
+ val = vsk->buffer_size;
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
- val = transport->get_max_buffer_size(vsk);
+ val = vsk->buffer_max_size;
COPY_OUT(val);
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
- val = transport->get_min_buffer_size(vsk);
+ val = vsk->buffer_min_size;
COPY_OUT(val);
break;
@@ -1519,6 +1649,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
ssize_t total_written;
long timeout;
int err;
@@ -1527,6 +1658,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
total_written = 0;
err = 0;
@@ -1548,7 +1680,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto out;
}
- if (sk->sk_state != TCP_ESTABLISHED ||
+ if (!transport || sk->sk_state != TCP_ESTABLISHED ||
!vsock_addr_bound(&vsk->local_addr)) {
err = -ENOTCONN;
goto out;
@@ -1658,6 +1790,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
{
struct sock *sk;
struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
int err;
size_t target;
ssize_t copied;
@@ -1668,11 +1801,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sk = sock->sk;
vsk = vsock_sk(sk);
+ transport = vsk->transport;
err = 0;
lock_sock(sk);
- if (sk->sk_state != TCP_ESTABLISHED) {
+ if (!transport || sk->sk_state != TCP_ESTABLISHED) {
/* Recvmsg is supposed to return 0 if a peer performs an
* orderly shutdown. Differentiate between that case and when a
* peer has not connected or a local shutdown occured with the
@@ -1846,6 +1980,10 @@ static const struct proto_ops vsock_stream_ops = {
static int vsock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
+ struct vsock_sock *vsk;
+ struct sock *sk;
+ int ret;
+
if (!sock)
return -EINVAL;
@@ -1865,7 +2003,23 @@ static int vsock_create(struct net *net, struct socket *sock,
sock->state = SS_UNCONNECTED;
- return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
+ sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
+ if (!sk)
+ return -ENOMEM;
+
+ vsk = vsock_sk(sk);
+
+ if (sock->type == SOCK_DGRAM) {
+ ret = vsock_assign_transport(vsk, NULL);
+ if (ret < 0) {
+ sock_put(sk);
+ return ret;
+ }
+ }
+
+ vsock_insert_unbound(vsk);
+
+ return 0;
}
static const struct net_proto_family vsock_family_ops = {
@@ -1878,11 +2032,20 @@ static long vsock_dev_do_ioctl(struct file *filp,
unsigned int cmd, void __user *ptr)
{
u32 __user *p = ptr;
+ u32 cid = VMADDR_CID_ANY;
int retval = 0;
switch (cmd) {
case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
- if (put_user(transport->get_local_cid(), p) != 0)
+ /* To be compatible with the VMCI behavior, we prioritize the
+ * guest CID instead of well-know host CID (VMADDR_CID_HOST).
+ */
+ if (transport_g2h)
+ cid = transport_g2h->get_local_cid();
+ else if (transport_h2g)
+ cid = transport_h2g->get_local_cid();
+
+ if (put_user(cid, p) != 0)
retval = -EFAULT;
break;
@@ -1922,24 +2085,13 @@ static struct miscdevice vsock_device = {
.fops = &vsock_device_ops,
};
-int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
+static int __init vsock_init(void)
{
- int err = mutex_lock_interruptible(&vsock_register_mutex);
+ int err = 0;
- if (err)
- return err;
-
- if (transport) {
- err = -EBUSY;
- goto err_busy;
- }
-
- /* Transport must be the owner of the protocol so that it can't
- * unload while there are open sockets.
- */
- vsock_proto.owner = owner;
- transport = t;
+ vsock_init_tables();
+ vsock_proto.owner = THIS_MODULE;
vsock_device.minor = MISC_DYNAMIC_MINOR;
err = misc_register(&vsock_device);
if (err) {
@@ -1960,7 +2112,6 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
goto err_unregister_proto;
}
- mutex_unlock(&vsock_register_mutex);
return 0;
err_unregister_proto:
@@ -1968,44 +2119,86 @@ err_unregister_proto:
err_deregister_misc:
misc_deregister(&vsock_device);
err_reset_transport:
- transport = NULL;
-err_busy:
- mutex_unlock(&vsock_register_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(__vsock_core_init);
-void vsock_core_exit(void)
+static void __exit vsock_exit(void)
{
- mutex_lock(&vsock_register_mutex);
-
misc_deregister(&vsock_device);
sock_unregister(AF_VSOCK);
proto_unregister(&vsock_proto);
-
- /* We do not want the assignment below re-ordered. */
- mb();
- transport = NULL;
-
- mutex_unlock(&vsock_register_mutex);
}
-EXPORT_SYMBOL_GPL(vsock_core_exit);
-const struct vsock_transport *vsock_core_get_transport(void)
+const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
{
- /* vsock_register_mutex not taken since only the transport uses this
- * function and only while registered.
- */
- return transport;
+ return vsk->transport;
}
EXPORT_SYMBOL_GPL(vsock_core_get_transport);
-static void __exit vsock_exit(void)
+int vsock_core_register(const struct vsock_transport *t, int features)
+{
+ const struct vsock_transport *t_h2g, *t_g2h, *t_dgram;
+ int err = mutex_lock_interruptible(&vsock_register_mutex);
+
+ if (err)
+ return err;
+
+ t_h2g = transport_h2g;
+ t_g2h = transport_g2h;
+ t_dgram = transport_dgram;
+
+ if (features & VSOCK_TRANSPORT_F_H2G) {
+ if (t_h2g) {
+ err = -EBUSY;
+ goto err_busy;
+ }
+ t_h2g = t;
+ }
+
+ if (features & VSOCK_TRANSPORT_F_G2H) {
+ if (t_g2h) {
+ err = -EBUSY;
+ goto err_busy;
+ }
+ t_g2h = t;
+ }
+
+ if (features & VSOCK_TRANSPORT_F_DGRAM) {
+ if (t_dgram) {
+ err = -EBUSY;
+ goto err_busy;
+ }
+ t_dgram = t;
+ }
+
+ transport_h2g = t_h2g;
+ transport_g2h = t_g2h;
+ transport_dgram = t_dgram;
+
+err_busy:
+ mutex_unlock(&vsock_register_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vsock_core_register);
+
+void vsock_core_unregister(const struct vsock_transport *t)
{
- /* Do nothing. This function makes this module removable. */
+ mutex_lock(&vsock_register_mutex);
+
+ if (transport_h2g == t)
+ transport_h2g = NULL;
+
+ if (transport_g2h == t)
+ transport_g2h = NULL;
+
+ if (transport_dgram == t)
+ transport_dgram = NULL;
+
+ mutex_unlock(&vsock_register_mutex);
}
+EXPORT_SYMBOL_GPL(vsock_core_unregister);
-module_init(vsock_init_tables);
+module_init(vsock_init);
module_exit(vsock_exit);
MODULE_AUTHOR("VMware, Inc.");
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index c443db7af8d4..3c7d07a99fc5 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -13,15 +13,16 @@
#include <linux/hyperv.h>
#include <net/sock.h>
#include <net/af_vsock.h>
+#include <asm/hyperv-tlfs.h>
/* Older (VMBUS version 'VERSION_WIN10' or before) Windows hosts have some
- * stricter requirements on the hv_sock ring buffer size of six 4K pages. Newer
- * hosts don't have this limitation; but, keep the defaults the same for compat.
+ * stricter requirements on the hv_sock ring buffer size of six 4K pages.
+ * hyperv-tlfs defines HV_HYP_PAGE_SIZE as 4K. Newer hosts don't have this
+ * limitation; but, keep the defaults the same for compat.
*/
-#define PAGE_SIZE_4K 4096
-#define RINGBUFFER_HVS_RCV_SIZE (PAGE_SIZE_4K * 6)
-#define RINGBUFFER_HVS_SND_SIZE (PAGE_SIZE_4K * 6)
-#define RINGBUFFER_HVS_MAX_SIZE (PAGE_SIZE_4K * 64)
+#define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6)
+#define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6)
+#define RINGBUFFER_HVS_MAX_SIZE (HV_HYP_PAGE_SIZE * 64)
/* The MTU is 16KB per the host side's design */
#define HVS_MTU_SIZE (1024 * 16)
@@ -54,7 +55,8 @@ struct hvs_recv_buf {
* ringbuffer APIs that allow us to directly copy data from userspace buffer
* to VMBus ringbuffer.
*/
-#define HVS_SEND_BUF_SIZE (PAGE_SIZE_4K - sizeof(struct vmpipe_proto_header))
+#define HVS_SEND_BUF_SIZE \
+ (HV_HYP_PAGE_SIZE - sizeof(struct vmpipe_proto_header))
struct hvs_send_buf {
/* The header before the payload data */
@@ -163,6 +165,8 @@ static const guid_t srv_id_template =
GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3);
+static bool hvs_check_transport(struct vsock_sock *vsk);
+
static bool is_valid_srv_id(const guid_t *id)
{
return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(guid_t) - 4);
@@ -186,7 +190,8 @@ static void hvs_remote_addr_init(struct sockaddr_vm *remote,
static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
struct sock *sk;
- vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+ /* Remote peer is always the host */
+ vsock_addr_init(remote, VMADDR_CID_HOST, VMADDR_PORT_ANY);
while (1) {
/* Wrap around ? */
@@ -358,13 +363,24 @@ static void hvs_open_connection(struct vmbus_channel *chan)
if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)
goto out;
- new = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type, 0);
+ new = vsock_create_connected(sk);
if (!new)
goto out;
new->sk_state = TCP_SYN_SENT;
vnew = vsock_sk(new);
+
+ hvs_addr_init(&vnew->local_addr, if_type);
+ hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
+
+ ret = vsock_assign_transport(vnew, vsock_sk(sk));
+ /* Transport assigned (looking at remote_addr) must be the
+ * same where we received the request.
+ */
+ if (ret || !hvs_check_transport(vnew)) {
+ sock_put(new);
+ goto out;
+ }
hvs_new = vnew->trans;
hvs_new->chan = chan;
} else {
@@ -393,10 +409,10 @@ static void hvs_open_connection(struct vmbus_channel *chan)
} else {
sndbuf = max_t(int, sk->sk_sndbuf, RINGBUFFER_HVS_SND_SIZE);
sndbuf = min_t(int, sndbuf, RINGBUFFER_HVS_MAX_SIZE);
- sndbuf = ALIGN(sndbuf, PAGE_SIZE);
+ sndbuf = ALIGN(sndbuf, HV_HYP_PAGE_SIZE);
rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE);
rcvbuf = min_t(int, rcvbuf, RINGBUFFER_HVS_MAX_SIZE);
- rcvbuf = ALIGN(rcvbuf, PAGE_SIZE);
+ rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE);
}
ret = vmbus_open(chan, sndbuf, rcvbuf, NULL, 0, hvs_channel_cb,
@@ -426,10 +442,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
if (conn_from_host) {
new->sk_state = TCP_ESTABLISHED;
- sk->sk_ack_backlog++;
-
- hvs_addr_init(&vnew->local_addr, if_type);
- hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
+ sk_acceptq_added(sk);
hvs_new->vm_srv_id = *if_type;
hvs_new->host_srv_id = *if_instance;
@@ -670,7 +683,7 @@ static ssize_t hvs_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg,
ssize_t ret = 0;
ssize_t bytes_written = 0;
- BUILD_BUG_ON(sizeof(*send_buf) != PAGE_SIZE_4K);
+ BUILD_BUG_ON(sizeof(*send_buf) != HV_HYP_PAGE_SIZE);
send_buf = kmalloc(sizeof(*send_buf), GFP_KERNEL);
if (!send_buf)
@@ -843,37 +856,9 @@ int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
return 0;
}
-static void hvs_set_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- /* Ignored. */
-}
-
-static void hvs_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- /* Ignored. */
-}
-
-static void hvs_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- /* Ignored. */
-}
-
-static u64 hvs_get_buffer_size(struct vsock_sock *vsk)
-{
- return -ENOPROTOOPT;
-}
-
-static u64 hvs_get_min_buffer_size(struct vsock_sock *vsk)
-{
- return -ENOPROTOOPT;
-}
-
-static u64 hvs_get_max_buffer_size(struct vsock_sock *vsk)
-{
- return -ENOPROTOOPT;
-}
-
static struct vsock_transport hvs_transport = {
+ .module = THIS_MODULE,
+
.get_local_cid = hvs_get_local_cid,
.init = hvs_sock_init,
@@ -906,14 +891,13 @@ static struct vsock_transport hvs_transport = {
.notify_send_pre_enqueue = hvs_notify_send_pre_enqueue,
.notify_send_post_enqueue = hvs_notify_send_post_enqueue,
- .set_buffer_size = hvs_set_buffer_size,
- .set_min_buffer_size = hvs_set_min_buffer_size,
- .set_max_buffer_size = hvs_set_max_buffer_size,
- .get_buffer_size = hvs_get_buffer_size,
- .get_min_buffer_size = hvs_get_min_buffer_size,
- .get_max_buffer_size = hvs_get_max_buffer_size,
};
+static bool hvs_check_transport(struct vsock_sock *vsk)
+{
+ return vsk->transport == &hvs_transport;
+}
+
static int hvs_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -962,7 +946,7 @@ static int __init hvs_init(void)
if (ret != 0)
return ret;
- ret = vsock_core_init(&hvs_transport);
+ ret = vsock_core_register(&hvs_transport, VSOCK_TRANSPORT_F_G2H);
if (ret) {
vmbus_driver_unregister(&hvs_drv);
return ret;
@@ -973,7 +957,7 @@ static int __init hvs_init(void)
static void __exit hvs_exit(void)
{
- vsock_core_exit();
+ vsock_core_unregister(&hvs_transport);
vmbus_driver_unregister(&hvs_drv);
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 082a30936690..1458c5c8b64d 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -86,33 +86,6 @@ out_rcu:
return ret;
}
-static void virtio_transport_loopback_work(struct work_struct *work)
-{
- struct virtio_vsock *vsock =
- container_of(work, struct virtio_vsock, loopback_work);
- LIST_HEAD(pkts);
-
- spin_lock_bh(&vsock->loopback_list_lock);
- list_splice_init(&vsock->loopback_list, &pkts);
- spin_unlock_bh(&vsock->loopback_list_lock);
-
- mutex_lock(&vsock->rx_lock);
-
- if (!vsock->rx_run)
- goto out;
-
- while (!list_empty(&pkts)) {
- struct virtio_vsock_pkt *pkt;
-
- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
-
- virtio_transport_recv_pkt(pkt);
- }
-out:
- mutex_unlock(&vsock->rx_lock);
-}
-
static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
struct virtio_vsock_pkt *pkt)
{
@@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
return val < virtqueue_get_vring_size(vq);
}
-static void virtio_transport_rx_work(struct work_struct *work)
-{
- struct virtio_vsock *vsock =
- container_of(work, struct virtio_vsock, rx_work);
- struct virtqueue *vq;
-
- vq = vsock->vqs[VSOCK_VQ_RX];
-
- mutex_lock(&vsock->rx_lock);
-
- if (!vsock->rx_run)
- goto out;
-
- do {
- virtqueue_disable_cb(vq);
- for (;;) {
- struct virtio_vsock_pkt *pkt;
- unsigned int len;
-
- if (!virtio_transport_more_replies(vsock)) {
- /* Stop rx until the device processes already
- * pending replies. Leave rx virtqueue
- * callbacks disabled.
- */
- goto out;
- }
-
- pkt = virtqueue_get_buf(vq, &len);
- if (!pkt) {
- break;
- }
-
- vsock->rx_buf_nr--;
-
- /* Drop short/long packets */
- if (unlikely(len < sizeof(pkt->hdr) ||
- len > sizeof(pkt->hdr) + pkt->len)) {
- virtio_transport_free_pkt(pkt);
- continue;
- }
-
- pkt->len = len - sizeof(pkt->hdr);
- virtio_transport_deliver_tap_pkt(pkt);
- virtio_transport_recv_pkt(pkt);
- }
- } while (!virtqueue_enable_cb(vq));
-
-out:
- if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
- virtio_vsock_rx_fill(vsock);
- mutex_unlock(&vsock->rx_lock);
-}
-
/* event_lock must be held */
static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
struct virtio_vsock_event *event)
@@ -542,6 +462,8 @@ static void virtio_vsock_rx_done(struct virtqueue *vq)
static struct virtio_transport virtio_transport = {
.transport = {
+ .module = THIS_MODULE,
+
.get_local_cid = virtio_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
@@ -574,18 +496,92 @@ static struct virtio_transport virtio_transport = {
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
},
.send_pkt = virtio_transport_send_pkt,
};
+static void virtio_transport_loopback_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, loopback_work);
+ LIST_HEAD(pkts);
+
+ spin_lock_bh(&vsock->loopback_list_lock);
+ list_splice_init(&vsock->loopback_list, &pkts);
+ spin_unlock_bh(&vsock->loopback_list_lock);
+
+ mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
+ while (!list_empty(&pkts)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+
+ virtio_transport_recv_pkt(&virtio_transport, pkt);
+ }
+out:
+ mutex_unlock(&vsock->rx_lock);
+}
+
+static void virtio_transport_rx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, rx_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
+ do {
+ virtqueue_disable_cb(vq);
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ if (!virtio_transport_more_replies(vsock)) {
+ /* Stop rx until the device processes already
+ * pending replies. Leave rx virtqueue
+ * callbacks disabled.
+ */
+ goto out;
+ }
+
+ pkt = virtqueue_get_buf(vq, &len);
+ if (!pkt) {
+ break;
+ }
+
+ vsock->rx_buf_nr--;
+
+ /* Drop short/long packets */
+ if (unlikely(len < sizeof(pkt->hdr) ||
+ len > sizeof(pkt->hdr) + pkt->len)) {
+ virtio_transport_free_pkt(pkt);
+ continue;
+ }
+
+ pkt->len = len - sizeof(pkt->hdr);
+ virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_recv_pkt(&virtio_transport, pkt);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+out:
+ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+}
+
static int virtio_vsock_probe(struct virtio_device *vdev)
{
vq_callback_t *callbacks[] = {
@@ -776,7 +772,8 @@ static int __init virtio_vsock_init(void)
if (!virtio_vsock_workqueue)
return -ENOMEM;
- ret = vsock_core_init(&virtio_transport.transport);
+ ret = vsock_core_register(&virtio_transport.transport,
+ VSOCK_TRANSPORT_F_G2H);
if (ret)
goto out_wq;
@@ -787,7 +784,7 @@ static int __init virtio_vsock_init(void)
return 0;
out_vci:
- vsock_core_exit();
+ vsock_core_unregister(&virtio_transport.transport);
out_wq:
destroy_workqueue(virtio_vsock_workqueue);
return ret;
@@ -796,7 +793,7 @@ out_wq:
static void __exit virtio_vsock_exit(void)
{
unregister_virtio_driver(&virtio_vsock_driver);
- vsock_core_exit();
+ vsock_core_unregister(&virtio_transport.transport);
destroy_workqueue(virtio_vsock_workqueue);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index fb2060dffb0a..e5ea29c6bca7 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -29,9 +29,10 @@
/* Threshold for detecting small packets to copy */
#define GOOD_COPY_LEN 128
-static const struct virtio_transport *virtio_transport_get_ops(void)
+static const struct virtio_transport *
+virtio_transport_get_ops(struct vsock_sock *vsk)
{
- const struct vsock_transport *t = vsock_core_get_transport();
+ const struct vsock_transport *t = vsock_core_get_transport(vsk);
return container_of(t, struct virtio_transport, transport);
}
@@ -168,7 +169,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt *pkt;
u32 pkt_len = info->pkt_len;
- src_cid = vm_sockets_get_local_cid();
+ src_cid = virtio_transport_get_ops(vsk)->transport.get_local_cid();
src_port = vsk->local_addr.svm_port;
if (!info->remote_cid) {
dst_cid = vsk->remote_addr.svm_cid;
@@ -201,7 +202,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
virtio_transport_inc_tx_pkt(vvs, pkt);
- return virtio_transport_get_ops()->send_pkt(pkt);
+ return virtio_transport_get_ops(vsk)->send_pkt(pkt);
}
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
@@ -268,6 +269,55 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
}
static ssize_t
+virtio_transport_stream_do_peek(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct virtio_vsock_pkt *pkt;
+ size_t bytes, total = 0, off;
+ int err = -EFAULT;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ list_for_each_entry(pkt, &vvs->rx_queue, list) {
+ off = pkt->off;
+
+ if (total == len)
+ break;
+
+ while (total < len && off < pkt->len) {
+ bytes = len - total;
+ if (bytes > pkt->len - off)
+ bytes = pkt->len - off;
+
+ /* sk_lock is held by caller so no one else can dequeue.
+ * Unlock rx_lock since memcpy_to_msg() may sleep.
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+ err = memcpy_to_msg(msg, pkt->buf + off, bytes);
+ if (err)
+ goto out;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ total += bytes;
+ off += bytes;
+ }
+ }
+
+ spin_unlock_bh(&vvs->rx_lock);
+
+ return total;
+
+out:
+ if (total)
+ err = total;
+ return err;
+}
+
+static ssize_t
virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
struct msghdr *msg,
size_t len)
@@ -339,9 +389,9 @@ virtio_transport_stream_dequeue(struct vsock_sock *vsk,
size_t len, int flags)
{
if (flags & MSG_PEEK)
- return -EOPNOTSUPP;
-
- return virtio_transport_stream_do_dequeue(vsk, msg, len);
+ return virtio_transport_stream_do_peek(vsk, msg, len);
+ else
+ return virtio_transport_stream_do_dequeue(vsk, msg, len);
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
@@ -403,20 +453,16 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
vsk->trans = vvs;
vvs->vsk = vsk;
- if (psk) {
+ if (psk && psk->trans) {
struct virtio_vsock_sock *ptrans = psk->trans;
- vvs->buf_size = ptrans->buf_size;
- vvs->buf_size_min = ptrans->buf_size_min;
- vvs->buf_size_max = ptrans->buf_size_max;
vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
- } else {
- vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
- vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
- vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
}
- vvs->buf_alloc = vvs->buf_size;
+ if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
+
+ vvs->buf_alloc = vsk->buffer_size;
spin_lock_init(&vvs->rx_lock);
spin_lock_init(&vvs->tx_lock);
@@ -426,71 +472,20 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
}
EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
-u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- return vvs->buf_size;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
-
-u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- return vvs->buf_size_min;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
-
-u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
+/* sk_lock held by the caller */
+void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- return vvs->buf_size_max;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
+ if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ *val = VIRTIO_VSOCK_MAX_BUF_SIZE;
-void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
- val = VIRTIO_VSOCK_MAX_BUF_SIZE;
- if (val < vvs->buf_size_min)
- vvs->buf_size_min = val;
- if (val > vvs->buf_size_max)
- vvs->buf_size_max = val;
- vvs->buf_size = val;
- vvs->buf_alloc = val;
+ vvs->buf_alloc = *val;
virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
NULL);
}
-EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
-
-void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
- val = VIRTIO_VSOCK_MAX_BUF_SIZE;
- if (val > vvs->buf_size)
- vvs->buf_size = val;
- vvs->buf_size_min = val;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
-
-void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
- val = VIRTIO_VSOCK_MAX_BUF_SIZE;
- if (val < vvs->buf_size)
- vvs->buf_size = val;
- vvs->buf_size_max = val;
-}
-EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
+EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
int
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
@@ -582,9 +577,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
{
- struct virtio_vsock_sock *vvs = vsk->trans;
-
- return vvs->buf_size;
+ return vsk->buffer_size;
}
EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
@@ -696,9 +689,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
/* Normally packets are associated with a socket. There may be no socket if an
* attempt was made to connect to a socket that does not exist.
*/
-static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt)
{
- const struct virtio_transport *t;
struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
@@ -718,7 +711,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (!reply)
return -ENOMEM;
- t = virtio_transport_get_ops();
if (!t) {
virtio_transport_free_pkt(reply);
return -ENOTCONN;
@@ -994,13 +986,39 @@ virtio_transport_send_response(struct vsock_sock *vsk,
return virtio_transport_send_pkt_info(vsk, &info);
}
+static bool virtio_transport_space_update(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool space_available;
+
+ /* Listener sockets are not associated with any transport, so we are
+ * not able to take the state to see if there is space available in the
+ * remote peer, but since they are only used to receive requests, we
+ * can assume that there is always space available in the other peer.
+ */
+ if (!vvs)
+ return true;
+
+ /* buf_alloc and fwd_cnt is always included in the hdr */
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
+ vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
+ space_available = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+ return space_available;
+}
+
/* Handle server socket */
static int
-virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
+virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ struct virtio_transport *t)
{
struct vsock_sock *vsk = vsock_sk(sk);
struct vsock_sock *vchild;
struct sock *child;
+ int ret;
if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
virtio_transport_reset(vsk, pkt);
@@ -1012,14 +1030,13 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
return -ENOMEM;
}
- child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type, 0);
+ child = vsock_create_connected(sk);
if (!child) {
virtio_transport_reset(vsk, pkt);
return -ENOMEM;
}
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
lock_sock_nested(child, SINGLE_DEPTH_NESTING);
@@ -1031,6 +1048,20 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port));
+ ret = vsock_assign_transport(vchild, vsk);
+ /* Transport assigned (looking at remote_addr) must be the same
+ * where we received the request.
+ */
+ if (ret || vchild->transport != &t->transport) {
+ release_sock(child);
+ virtio_transport_reset(vsk, pkt);
+ sock_put(child);
+ return ret;
+ }
+
+ if (virtio_transport_space_update(child, pkt))
+ child->sk_write_space(child);
+
vsock_insert_connected(vchild);
vsock_enqueue_accept(sk, child);
virtio_transport_send_response(vchild, pkt);
@@ -1041,26 +1072,11 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
return 0;
}
-static bool virtio_transport_space_update(struct sock *sk,
- struct virtio_vsock_pkt *pkt)
-{
- struct vsock_sock *vsk = vsock_sk(sk);
- struct virtio_vsock_sock *vvs = vsk->trans;
- bool space_available;
-
- /* buf_alloc and fwd_cnt is always included in the hdr */
- spin_lock_bh(&vvs->tx_lock);
- vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
- vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
- space_available = virtio_transport_has_space(vsk);
- spin_unlock_bh(&vvs->tx_lock);
- return space_available;
-}
-
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock.
*/
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt)
{
struct sockaddr_vm src, dst;
struct vsock_sock *vsk;
@@ -1082,7 +1098,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
le32_to_cpu(pkt->hdr.fwd_cnt));
if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
- (void)virtio_transport_reset_no_sock(pkt);
+ (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
@@ -1093,7 +1109,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
if (!sk) {
sk = vsock_find_bound_socket(&dst);
if (!sk) {
- (void)virtio_transport_reset_no_sock(pkt);
+ (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
}
@@ -1112,7 +1128,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
switch (sk->sk_state) {
case TCP_LISTEN:
- virtio_transport_recv_listen(sk, pkt);
+ virtio_transport_recv_listen(sk, pkt, t);
virtio_transport_free_pkt(pkt);
break;
case TCP_SYN_SENT:
@@ -1130,6 +1146,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
virtio_transport_free_pkt(pkt);
break;
}
+
release_sock(sk);
/* Release refcnt obtained when we fetched this socket out of the
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 8c9c4ed90fa7..644d32e43d23 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -57,6 +57,7 @@ static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
static u16 vmci_transport_new_proto_supported_versions(void);
static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
bool old_pkt_proto);
+static bool vmci_check_transport(struct vsock_sock *vsk);
struct vmci_transport_recv_pkt_info {
struct work_struct work;
@@ -74,15 +75,6 @@ static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
static int PROTOCOL_OVERRIDE = -1;
-#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128
-#define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144
-#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144
-
-/* The default peer timeout indicates how long we will wait for a peer response
- * to a control message.
- */
-#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
-
/* Helper function to convert from a VMCI error code to a VSock error code. */
static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
@@ -1013,8 +1005,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
return -ECONNREFUSED;
}
- pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type, 0);
+ pending = vsock_create_connected(sk);
if (!pending) {
vmci_transport_send_reset(sk, pkt);
return -ENOMEM;
@@ -1027,14 +1018,24 @@ static int vmci_transport_recv_listen(struct sock *sk,
vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
pkt->src_port);
+ err = vsock_assign_transport(vpending, vsock_sk(sk));
+ /* Transport assigned (looking at remote_addr) must be the same
+ * where we received the request.
+ */
+ if (err || !vmci_check_transport(vpending)) {
+ vmci_transport_send_reset(sk, pkt);
+ sock_put(pending);
+ return err;
+ }
+
/* If the proposed size fits within our min/max, accept it. Otherwise
* propose our own size.
*/
- if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
- pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
+ if (pkt->u.size >= vpending->buffer_min_size &&
+ pkt->u.size <= vpending->buffer_max_size) {
qp_size = pkt->u.size;
} else {
- qp_size = vmci_trans(vpending)->queue_pair_size;
+ qp_size = vpending->buffer_size;
}
/* Figure out if we are using old or new requests based on the
@@ -1098,12 +1099,12 @@ static int vmci_transport_recv_listen(struct sock *sk,
}
vsock_add_pending(sk, pending);
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
pending->sk_state = TCP_SYN_SENT;
vmci_trans(vpending)->produce_size =
vmci_trans(vpending)->consume_size = qp_size;
- vmci_trans(vpending)->queue_pair_size = qp_size;
+ vpending->buffer_size = qp_size;
vmci_trans(vpending)->notify_ops->process_request(pending);
@@ -1397,8 +1398,8 @@ static int vmci_transport_recv_connecting_client_negotiate(
vsk->ignore_connecting_rst = false;
/* Verify that we're OK with the proposed queue pair size */
- if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
- pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
+ if (pkt->u.size < vsk->buffer_min_size ||
+ pkt->u.size > vsk->buffer_max_size) {
err = -EINVAL;
goto destroy;
}
@@ -1503,8 +1504,7 @@ vmci_transport_recv_connecting_client_invalid(struct sock *sk,
vsk->sent_request = false;
vsk->ignore_connecting_rst = true;
- err = vmci_transport_send_conn_request(
- sk, vmci_trans(vsk)->queue_pair_size);
+ err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
if (err < 0)
err = vmci_transport_error_to_vsock_error(err);
else
@@ -1588,21 +1588,6 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
vmci_trans(vsk)->sk = &vsk->sk;
spin_lock_init(&vmci_trans(vsk)->lock);
- if (psk) {
- vmci_trans(vsk)->queue_pair_size =
- vmci_trans(psk)->queue_pair_size;
- vmci_trans(vsk)->queue_pair_min_size =
- vmci_trans(psk)->queue_pair_min_size;
- vmci_trans(vsk)->queue_pair_max_size =
- vmci_trans(psk)->queue_pair_max_size;
- } else {
- vmci_trans(vsk)->queue_pair_size =
- VMCI_TRANSPORT_DEFAULT_QP_SIZE;
- vmci_trans(vsk)->queue_pair_min_size =
- VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
- vmci_trans(vsk)->queue_pair_max_size =
- VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
- }
return 0;
}
@@ -1818,8 +1803,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
if (vmci_transport_old_proto_override(&old_pkt_proto) &&
old_pkt_proto) {
- err = vmci_transport_send_conn_request(
- sk, vmci_trans(vsk)->queue_pair_size);
+ err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
if (err < 0) {
sk->sk_state = TCP_CLOSE;
return err;
@@ -1827,8 +1811,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
} else {
int supported_proto_versions =
vmci_transport_new_proto_supported_versions();
- err = vmci_transport_send_conn_request2(
- sk, vmci_trans(vsk)->queue_pair_size,
+ err = vmci_transport_send_conn_request2(sk, vsk->buffer_size,
supported_proto_versions);
if (err < 0) {
sk->sk_state = TCP_CLOSE;
@@ -1881,46 +1864,6 @@ static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
}
-static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
-{
- return vmci_trans(vsk)->queue_pair_size;
-}
-
-static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
-{
- return vmci_trans(vsk)->queue_pair_min_size;
-}
-
-static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
-{
- return vmci_trans(vsk)->queue_pair_max_size;
-}
-
-static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
-{
- if (val < vmci_trans(vsk)->queue_pair_min_size)
- vmci_trans(vsk)->queue_pair_min_size = val;
- if (val > vmci_trans(vsk)->queue_pair_max_size)
- vmci_trans(vsk)->queue_pair_max_size = val;
- vmci_trans(vsk)->queue_pair_size = val;
-}
-
-static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
- u64 val)
-{
- if (val > vmci_trans(vsk)->queue_pair_size)
- vmci_trans(vsk)->queue_pair_size = val;
- vmci_trans(vsk)->queue_pair_min_size = val;
-}
-
-static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
- u64 val)
-{
- if (val < vmci_trans(vsk)->queue_pair_size)
- vmci_trans(vsk)->queue_pair_size = val;
- vmci_trans(vsk)->queue_pair_max_size = val;
-}
-
static int vmci_transport_notify_poll_in(
struct vsock_sock *vsk,
size_t target,
@@ -2076,7 +2019,8 @@ static u32 vmci_transport_get_local_cid(void)
return vmci_get_context_id();
}
-static const struct vsock_transport vmci_transport = {
+static struct vsock_transport vmci_transport = {
+ .module = THIS_MODULE,
.init = vmci_transport_socket_init,
.destruct = vmci_transport_destruct,
.release = vmci_transport_release,
@@ -2103,15 +2047,26 @@ static const struct vsock_transport vmci_transport = {
.notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
.shutdown = vmci_transport_shutdown,
- .set_buffer_size = vmci_transport_set_buffer_size,
- .set_min_buffer_size = vmci_transport_set_min_buffer_size,
- .set_max_buffer_size = vmci_transport_set_max_buffer_size,
- .get_buffer_size = vmci_transport_get_buffer_size,
- .get_min_buffer_size = vmci_transport_get_min_buffer_size,
- .get_max_buffer_size = vmci_transport_get_max_buffer_size,
.get_local_cid = vmci_transport_get_local_cid,
};
+static bool vmci_check_transport(struct vsock_sock *vsk)
+{
+ return vsk->transport == &vmci_transport;
+}
+
+void vmci_vsock_transport_cb(bool is_host)
+{
+ int features;
+
+ if (is_host)
+ features = VSOCK_TRANSPORT_F_H2G;
+ else
+ features = VSOCK_TRANSPORT_F_G2H;
+
+ vsock_core_register(&vmci_transport, features);
+}
+
static int __init vmci_transport_init(void)
{
int err;
@@ -2128,7 +2083,6 @@ static int __init vmci_transport_init(void)
pr_err("Unable to create datagram handle. (%d)\n", err);
return vmci_transport_error_to_vsock_error(err);
}
-
err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
vmci_transport_qp_resumed_cb,
NULL, &vmci_transport_qp_resumed_sub_id);
@@ -2139,12 +2093,21 @@ static int __init vmci_transport_init(void)
goto err_destroy_stream_handle;
}
- err = vsock_core_init(&vmci_transport);
+ /* Register only with dgram feature, other features (H2G, G2H) will be
+ * registered when the first host or guest becomes active.
+ */
+ err = vsock_core_register(&vmci_transport, VSOCK_TRANSPORT_F_DGRAM);
if (err < 0)
goto err_unsubscribe;
+ err = vmci_register_vsock_callback(vmci_vsock_transport_cb);
+ if (err < 0)
+ goto err_unregister;
+
return 0;
+err_unregister:
+ vsock_core_unregister(&vmci_transport);
err_unsubscribe:
vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
err_destroy_stream_handle:
@@ -2170,7 +2133,8 @@ static void __exit vmci_transport_exit(void)
vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
}
- vsock_core_exit();
+ vmci_register_vsock_callback(NULL);
+ vsock_core_unregister(&vmci_transport);
}
module_exit(vmci_transport_exit);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index 1ca1e8640b31..b7b072194282 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -108,9 +108,6 @@ struct vmci_transport {
struct vmci_qp *qpair;
u64 produce_size;
u64 consume_size;
- u64 queue_pair_size;
- u64 queue_pair_min_size;
- u64 queue_pair_max_size;
u32 detach_sub_id;
union vmci_transport_notify notify;
const struct vmci_transport_notify_ops *notify_ops;
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
index 7843f08d4290..a1aa5a998c0e 100644
--- a/net/vmw_vsock/vmci_transport_notify.h
+++ b/net/vmw_vsock/vmci_transport_notify.h
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include <linux/vm_sockets.h>
#include "vmci_transport.h"
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7b72286922f7..da5262b2298b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -624,6 +624,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
.len = SAE_PASSWORD_MAX_LEN },
[NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG },
[NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy),
+ [NL80211_ATTR_VLAN_ID] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
};
/* policy for the key attributes */
@@ -3940,6 +3941,10 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
key.type != NL80211_KEYTYPE_GROUP)
return -EINVAL;
+ if (key.type == NL80211_KEYTYPE_GROUP &&
+ info->attrs[NL80211_ATTR_VLAN_ID])
+ key.p.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]);
+
if (!rdev->ops->add_key)
return -EOPNOTSUPP;
@@ -5711,6 +5716,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_STA_AID])
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+ if (info->attrs[NL80211_ATTR_VLAN_ID])
+ params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]);
+
if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
@@ -5856,6 +5864,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
+ if (info->attrs[NL80211_ATTR_VLAN_ID])
+ params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]);
+
if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
params.support_p2p_ps =
nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
@@ -8265,10 +8276,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
/* leave request id zero for legacy request
* or if driver does not support multi-scheduled scan
*/
- if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) {
- while (!sched_scan_req->reqid)
- sched_scan_req->reqid = cfg80211_assign_cookie(rdev);
- }
+ if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1)
+ sched_scan_req->reqid = cfg80211_assign_cookie(rdev);
err = rdev_sched_scan_start(rdev, dev, sched_scan_req);
if (err)
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index dc8f689bd469..f9e83031a40a 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -114,7 +114,7 @@ void regulatory_hint_country_ie(struct wiphy *wiphy,
u8 country_ie_len);
/**
- * regulatory_hint_disconnect - informs all devices have been disconneted
+ * regulatory_hint_disconnect - informs all devices have been disconnected
*
* Regulotory rules can be enhanced further upon scanning and upon
* connection to an AP. These rules become stale if we disconnect
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6aee9f5e8e71..c34f7d077604 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -891,7 +891,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
rc = 0;
out2:
@@ -1062,7 +1062,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
makex25->calluserdata.cudlength = skb->len;
- sk->sk_ack_backlog++;
+ sk_acceptq_added(sk);
x25_insert_socket(make);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9044073fbf22..956793893c9d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -196,7 +196,7 @@ static bool xsk_is_bound(struct xdp_sock *xs)
return false;
}
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
u32 len;
@@ -212,7 +212,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
}
-void xsk_flush(struct xdp_sock *xs)
+static void xsk_flush(struct xdp_sock *xs)
{
xskq_produce_flush_desc(xs->rx);
xs->sk.sk_data_ready(&xs->sk);
@@ -264,6 +264,35 @@ out_unlock:
return err;
}
+int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct list_head *flush_list = this_cpu_ptr(m->flush_list);
+ int err;
+
+ err = xsk_rcv(xs, xdp);
+ if (err)
+ return err;
+
+ if (!xs->flush_node.prev)
+ list_add(&xs->flush_node, flush_list);
+
+ return 0;
+}
+
+void __xsk_map_flush(struct bpf_map *map)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct list_head *flush_list = this_cpu_ptr(m->flush_list);
+ struct xdp_sock *xs, *tmp;
+
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
+ xsk_flush(xs);
+ __list_del_clearprev(&xs->flush_node);
+ }
+}
+
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
xskq_produce_flush_addr_n(umem->cq, nb_entries);
@@ -418,10 +447,10 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return __xsk_sendmsg(sk);
}
-static unsigned int xsk_poll(struct file *file, struct socket *sock,
+static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
- unsigned int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev;
@@ -443,9 +472,9 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock,
}
if (xs->rx && !xskq_empty_desc(xs->rx))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && !xskq_full_desc(xs->tx))
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 51bb6018f3bf..6921a18201a0 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -3,20 +3,20 @@
# XFRM configuration
#
config XFRM
- bool
- depends on INET
- select GRO_CELLS
- select SKB_EXTENSIONS
+ bool
+ depends on INET
+ select GRO_CELLS
+ select SKB_EXTENSIONS
config XFRM_OFFLOAD
- bool
+ bool
config XFRM_ALGO
tristate
select XFRM
select CRYPTO
select CRYPTO_HASH
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
if INET
config XFRM_USER
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 32a378e7011f..4dae3ab8d030 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -626,8 +626,8 @@ static const struct xfrm_algo_list xfrm_aalg_list = {
static const struct xfrm_algo_list xfrm_ealg_list = {
.algs = ealg_list,
.entries = ARRAY_SIZE(ealg_list),
- .type = CRYPTO_ALG_TYPE_BLKCIPHER,
- .mask = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .mask = CRYPTO_ALG_TYPE_MASK,
};
static const struct xfrm_algo_list xfrm_calg_list = {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 9b599ed66d97..2c86a2fc3915 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -480,6 +480,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
+
+ if (encap_type == -1)
+ dev_put(skb->dev);
goto drop;
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 0f5131bc3342..7ac1542feaf8 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -732,30 +732,7 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
-static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
-{
- struct xfrm_if *xi;
- LIST_HEAD(list);
-
- xi = rtnl_dereference(xfrmn->xfrmi[0]);
- if (!xi)
- return;
-
- unregister_netdevice_queue(xi->dev, &list);
- unregister_netdevice_many(&list);
-}
-
-static void __net_exit xfrmi_exit_net(struct net *net)
-{
- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
-
- rtnl_lock();
- xfrmi_destroy_interfaces(xfrmn);
- rtnl_unlock();
-}
-
static struct pernet_operations xfrmi_net_ops = {
- .exit = xfrmi_exit_net,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index c6f3c4a1bd99..f3423562d933 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -495,6 +495,8 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
x->type->destructor(x);
xfrm_put_type(x->type);
}
+ if (x->xfrag.page)
+ put_page(x->xfrag.page);
xfrm_dev_state_free(x);
security_xfrm_state_free(x);
xfrm_state_free(x);
diff --git a/samples/Kconfig b/samples/Kconfig
index c8dacb4dda80..b663d9d24114 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -169,4 +169,11 @@ config SAMPLE_VFS
as mount API and statx(). Note that this is restricted to the x86
arch whilst it accesses system calls that aren't yet in all arches.
+config SAMPLE_INTEL_MEI
+ bool "Build example program working with intel mei driver"
+ depends on INTEL_MEI
+ help
+ Build a sample program to work with mei device.
+
+
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 7d6e4ca28d69..d6062ab25347 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
obj-y += vfio-mdev/
subdir-$(CONFIG_SAMPLE_VFS) += vfs
+obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 42b571cde177..1fc42ad8ff49 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -4,55 +4,53 @@ BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src))
TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools
# List of programs to build
-hostprogs-y := test_lru_dist
-hostprogs-y += sock_example
-hostprogs-y += fds_example
-hostprogs-y += sockex1
-hostprogs-y += sockex2
-hostprogs-y += sockex3
-hostprogs-y += tracex1
-hostprogs-y += tracex2
-hostprogs-y += tracex3
-hostprogs-y += tracex4
-hostprogs-y += tracex5
-hostprogs-y += tracex6
-hostprogs-y += tracex7
-hostprogs-y += test_probe_write_user
-hostprogs-y += trace_output
-hostprogs-y += lathist
-hostprogs-y += offwaketime
-hostprogs-y += spintest
-hostprogs-y += map_perf_test
-hostprogs-y += test_overhead
-hostprogs-y += test_cgrp2_array_pin
-hostprogs-y += test_cgrp2_attach
-hostprogs-y += test_cgrp2_sock
-hostprogs-y += test_cgrp2_sock2
-hostprogs-y += xdp1
-hostprogs-y += xdp2
-hostprogs-y += xdp_router_ipv4
-hostprogs-y += test_current_task_under_cgroup
-hostprogs-y += trace_event
-hostprogs-y += sampleip
-hostprogs-y += tc_l2_redirect
-hostprogs-y += lwt_len_hist
-hostprogs-y += xdp_tx_iptunnel
-hostprogs-y += test_map_in_map
-hostprogs-y += per_socket_stats_example
-hostprogs-y += xdp_redirect
-hostprogs-y += xdp_redirect_map
-hostprogs-y += xdp_redirect_cpu
-hostprogs-y += xdp_monitor
-hostprogs-y += xdp_rxq_info
-hostprogs-y += syscall_tp
-hostprogs-y += cpustat
-hostprogs-y += xdp_adjust_tail
-hostprogs-y += xdpsock
-hostprogs-y += xdp_fwd
-hostprogs-y += task_fd_query
-hostprogs-y += xdp_sample_pkts
-hostprogs-y += ibumad
-hostprogs-y += hbm
+tprogs-y := test_lru_dist
+tprogs-y += sock_example
+tprogs-y += fds_example
+tprogs-y += sockex1
+tprogs-y += sockex2
+tprogs-y += sockex3
+tprogs-y += tracex1
+tprogs-y += tracex2
+tprogs-y += tracex3
+tprogs-y += tracex4
+tprogs-y += tracex5
+tprogs-y += tracex6
+tprogs-y += tracex7
+tprogs-y += test_probe_write_user
+tprogs-y += trace_output
+tprogs-y += lathist
+tprogs-y += offwaketime
+tprogs-y += spintest
+tprogs-y += map_perf_test
+tprogs-y += test_overhead
+tprogs-y += test_cgrp2_array_pin
+tprogs-y += test_cgrp2_attach
+tprogs-y += test_cgrp2_sock
+tprogs-y += test_cgrp2_sock2
+tprogs-y += xdp1
+tprogs-y += xdp2
+tprogs-y += xdp_router_ipv4
+tprogs-y += test_current_task_under_cgroup
+tprogs-y += trace_event
+tprogs-y += sampleip
+tprogs-y += tc_l2_redirect
+tprogs-y += lwt_len_hist
+tprogs-y += xdp_tx_iptunnel
+tprogs-y += test_map_in_map
+tprogs-y += xdp_redirect_map
+tprogs-y += xdp_redirect_cpu
+tprogs-y += xdp_monitor
+tprogs-y += xdp_rxq_info
+tprogs-y += syscall_tp
+tprogs-y += cpustat
+tprogs-y += xdp_adjust_tail
+tprogs-y += xdpsock
+tprogs-y += xdp_fwd
+tprogs-y += task_fd_query
+tprogs-y += xdp_sample_pkts
+tprogs-y += ibumad
+tprogs-y += hbm
# Libbpf dependencies
LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
@@ -111,7 +109,7 @@ ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
# Tell kbuild to always build the programs
-always := $(hostprogs-y)
+always := $(tprogs-y)
always += sockex1_kern.o
always += sockex2_kern.o
always += sockex3_kern.o
@@ -145,7 +143,6 @@ always += sampleip_kern.o
always += lwt_len_hist_kern.o
always += xdp_tx_iptunnel_kern.o
always += test_map_in_map_kern.o
-always += cookie_uid_helper_example.o
always += tcp_synrto_kern.o
always += tcp_rwnd_kern.o
always += tcp_bufs_kern.o
@@ -170,25 +167,44 @@ always += xdp_sample_pkts_kern.o
always += ibumad_kern.o
always += hbm_out_kern.o
always += hbm_edt_kern.o
+always += xdpsock_kern.o
-KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include
-KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
-KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
-KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
-KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
-KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0
+ifeq ($(ARCH), arm)
+# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
+# headers when arm instruction set identification is requested.
+ARM_ARCH_SELECTOR := $(filter -D__LINUX_ARM_ARCH__%, $(KBUILD_CFLAGS))
+BPF_EXTRA_CFLAGS := $(ARM_ARCH_SELECTOR)
+TPROGS_CFLAGS += $(ARM_ARCH_SELECTOR)
+endif
+
+TPROGS_CFLAGS += -Wall -O2
+TPROGS_CFLAGS += -Wmissing-prototypes
+TPROGS_CFLAGS += -Wstrict-prototypes
+
+TPROGS_CFLAGS += -I$(objtree)/usr/include
+TPROGS_CFLAGS += -I$(srctree)/tools/lib/bpf/
+TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
+TPROGS_CFLAGS += -I$(srctree)/tools/lib/
+TPROGS_CFLAGS += -I$(srctree)/tools/include
+TPROGS_CFLAGS += -I$(srctree)/tools/perf
+TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
-HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
+ifdef SYSROOT
+TPROGS_CFLAGS += --sysroot=$(SYSROOT)
+TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
+endif
+
+TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
-KBUILD_HOSTLDLIBS += $(LIBBPF) -lelf
-HOSTLDLIBS_tracex4 += -lrt
-HOSTLDLIBS_trace_output += -lrt
-HOSTLDLIBS_map_perf_test += -lrt
-HOSTLDLIBS_test_overhead += -lrt
-HOSTLDLIBS_xdpsock += -pthread
+TPROGS_LDLIBS += $(LIBBPF) -lelf
+TPROGLDLIBS_tracex4 += -lrt
+TPROGLDLIBS_trace_output += -lrt
+TPROGLDLIBS_map_perf_test += -lrt
+TPROGLDLIBS_test_overhead += -lrt
+TPROGLDLIBS_xdpsock += -pthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
-# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+# make M=samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
LLVM_OBJCOPY ?= llvm-objcopy
@@ -196,15 +212,14 @@ BTF_PAHOLE ?= pahole
# Detect that we're cross compiling and use the cross compiler
ifdef CROSS_COMPILE
-HOSTCC = $(CROSS_COMPILE)gcc
-CLANG_ARCH_ARGS = -target $(ARCH)
+CLANG_ARCH_ARGS = --target=$(notdir $(CROSS_COMPILE:%-=%))
endif
# Don't evaluate probes and warnings if we need to run make recursively
ifneq ($(src),)
-HDR_PROBE := $(shell echo "\#include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
- $(HOSTCC) $(KBUILD_HOSTCFLAGS) -x c - -o /dev/null 2>/dev/null && \
- echo okay)
+HDR_PROBE := $(shell printf "\#include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
+ $(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \
+ -o /dev/null 2>/dev/null && echo okay)
ifeq ($(HDR_PROBE),)
$(warning WARNING: Detected possible issues with include path.)
@@ -220,10 +235,10 @@ BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
- EXTRA_CFLAGS += -g
+ BPF_EXTRA_CFLAGS += -g
else
ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
- EXTRA_CFLAGS += -g
+ BPF_EXTRA_CFLAGS += -g
LLC_FLAGS += -mattr=dwarfris
DWARF2BTF = y
endif
@@ -232,7 +247,7 @@ endif
# Trick to allow make to be run from this directory
all:
- $(MAKE) -C ../../ $(CURDIR)/ BPF_SAMPLES_PATH=$(CURDIR)
+ $(MAKE) -C ../../ M=$(CURDIR) BPF_SAMPLES_PATH=$(CURDIR)
clean:
$(MAKE) -C ../../ M=$(CURDIR) clean
@@ -240,7 +255,8 @@ clean:
$(LIBBPF): FORCE
# Fix up variables inherited from Kbuild that tools/ build system won't like
- $(MAKE) -C $(dir $@) RM='rm -rf' LDFLAGS= srctree=$(BPF_SAMPLES_PATH)/../../ O=
+ $(MAKE) -C $(dir $@) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
+ LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ O=
$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
$(call filechk,offsets,__SYSCALL_NRS_H__)
@@ -277,13 +293,16 @@ $(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
$(obj)/hbm.o: $(src)/hbm.h
$(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
+-include $(BPF_SAMPLES_PATH)/Makefile.target
+
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
$(obj)/%.o: $(src)/%.c
@echo " CLANG-bpf " $@
- $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
- -I$(srctree)/tools/testing/selftests/bpf/ \
+ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
+ -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
+ -I$(srctree)/tools/lib/bpf/ \
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
diff --git a/samples/bpf/Makefile.target b/samples/bpf/Makefile.target
new file mode 100644
index 000000000000..7621f55e2947
--- /dev/null
+++ b/samples/bpf/Makefile.target
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+# ==========================================================================
+# Building binaries on the host system
+# Binaries are not used during the compilation of the kernel, and intended
+# to be build for target board, target board can be host of course. Added to
+# build binaries to run not on host system.
+#
+# Sample syntax
+# tprogs-y := xsk_example
+# Will compile xsk_example.c and create an executable named xsk_example
+#
+# tprogs-y := xdpsock
+# xdpsock-objs := xdpsock_1.o xdpsock_2.o
+# Will compile xdpsock_1.c and xdpsock_2.c, and then link the executable
+# xdpsock, based on xdpsock_1.o and xdpsock_2.o
+#
+# Derived from scripts/Makefile.host
+#
+__tprogs := $(sort $(tprogs-y))
+
+# C code
+# Executables compiled from a single .c file
+tprog-csingle := $(foreach m,$(__tprogs), \
+ $(if $($(m)-objs),,$(m)))
+
+# C executables linked based on several .o files
+tprog-cmulti := $(foreach m,$(__tprogs),\
+ $(if $($(m)-objs),$(m)))
+
+# Object (.o) files compiled from .c files
+tprog-cobjs := $(sort $(foreach m,$(__tprogs),$($(m)-objs)))
+
+tprog-csingle := $(addprefix $(obj)/,$(tprog-csingle))
+tprog-cmulti := $(addprefix $(obj)/,$(tprog-cmulti))
+tprog-cobjs := $(addprefix $(obj)/,$(tprog-cobjs))
+
+#####
+# Handle options to gcc. Support building with separate output directory
+
+_tprogc_flags = $(TPROGS_CFLAGS) \
+ $(TPROGCFLAGS_$(basetarget).o)
+
+# $(objtree)/$(obj) for including generated headers from checkin source files
+ifeq ($(KBUILD_EXTMOD),)
+ifdef building_out_of_srctree
+_tprogc_flags += -I $(objtree)/$(obj)
+endif
+endif
+
+tprogc_flags = -Wp,-MD,$(depfile) $(_tprogc_flags)
+
+# Create executable from a single .c file
+# tprog-csingle -> Executable
+quiet_cmd_tprog-csingle = CC $@
+ cmd_tprog-csingle = $(CC) $(tprogc_flags) $(TPROGS_LDFLAGS) -o $@ $< \
+ $(TPROGS_LDLIBS) $(TPROGLDLIBS_$(@F))
+$(tprog-csingle): $(obj)/%: $(src)/%.c FORCE
+ $(call if_changed_dep,tprog-csingle)
+
+# Link an executable based on list of .o files, all plain c
+# tprog-cmulti -> executable
+quiet_cmd_tprog-cmulti = LD $@
+ cmd_tprog-cmulti = $(CC) $(tprogc_flags) $(TPROGS_LDFLAGS) -o $@ \
+ $(addprefix $(obj)/,$($(@F)-objs)) \
+ $(TPROGS_LDLIBS) $(TPROGLDLIBS_$(@F))
+$(tprog-cmulti): $(tprog-cobjs) FORCE
+ $(call if_changed,tprog-cmulti)
+$(call multi_depend, $(tprog-cmulti), , -objs)
+
+# Create .o file from a single .c file
+# tprog-cobjs -> .o
+quiet_cmd_tprog-cobjs = CC $@
+ cmd_tprog-cobjs = $(CC) $(tprogc_flags) -c -o $@ $<
+$(tprog-cobjs): $(obj)/%.o: $(src)/%.c FORCE
+ $(call if_changed_dep,tprog-cobjs)
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
index 5f27e4faca50..dd34b2d26f1c 100644
--- a/samples/bpf/README.rst
+++ b/samples/bpf/README.rst
@@ -14,6 +14,20 @@ Compiling requires having installed:
Note that LLVM's tool 'llc' must support target 'bpf', list version
and supported targets with command: ``llc --version``
+Clean and configuration
+-----------------------
+
+It can be needed to clean tools, samples or kernel before trying new arch or
+after some changes (on demand)::
+
+ make -C tools clean
+ make -C samples/bpf clean
+ make clean
+
+Configure kernel, defconfig for instance::
+
+ make defconfig
+
Kernel headers
--------------
@@ -32,12 +46,10 @@ Compiling
For building the BPF samples, issue the below command from the kernel
top level directory::
- make samples/bpf/
-
-Do notice the "/" slash after the directory name.
+ make M=samples/bpf
It is also possible to call make from this directory. This will just
-hide the the invocation of make as above with the appended "/".
+hide the invocation of make as above.
Manually compiling LLVM with 'bpf' support
------------------------------------------
@@ -63,14 +75,31 @@ Quick sniplet for manually compiling LLVM and clang
It is also possible to point make to the newly compiled 'llc' or
'clang' command via redefining LLC or CLANG on the make command line::
- make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+ make M=samples/bpf LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
Cross compiling samples
-----------------------
In order to cross-compile, say for arm64 targets, export CROSS_COMPILE and ARCH
-environment variables before calling make. This will direct make to build
-samples for the cross target.
+environment variables before calling make. But do this before clean,
+cofiguration and header install steps described above. This will direct make to
+build samples for the cross target::
+
+ export ARCH=arm64
+ export CROSS_COMPILE="aarch64-linux-gnu-"
+
+Headers can be also installed on RFS of target board if need to keep them in
+sync (not necessarily and it creates a local "usr/include" directory also)::
+
+ make INSTALL_HDR_PATH=~/some_sysroot/usr headers_install
+
+Pointing LLC and CLANG is not necessarily if it's installed on HOST and have
+in its targets appropriate arm64 arch (usually it has several arches).
+Build samples::
+
+ make M=samples/bpf
+
+Or build samples with SYSROOT if some header or library is absent in toolchain,
+say libelf, providing address to file system containing headers and libs,
+can be RFS of target board::
-export ARCH=arm64
-export CROSS_COMPILE="aarch64-linux-gnu-"
-make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+ make M=samples/bpf SYSROOT=~/some_sysroot
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index e0fbab9bec83..829b68d87687 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -147,7 +147,7 @@ static int prog_load(char *prog)
}
if (ret) {
- printf("ERROR: load_bpf_file failed for: %s\n", prog);
+ printf("ERROR: bpf_prog_load_xattr failed for: %s\n", prog);
printf(" Output from verifier:\n%s\n------\n", bpf_log_buf);
ret = -1;
} else {
diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h
index aa207a2eebbd..4edaf47876ca 100644
--- a/samples/bpf/hbm_kern.h
+++ b/samples/bpf/hbm_kern.h
@@ -59,21 +59,18 @@
#define BYTES_PER_NS(delta, rate) ((((u64)(delta)) * (rate)) >> 20)
#define BYTES_TO_NS(bytes, rate) div64_u64(((u64)(bytes)) << 20, (u64)(rate))
-struct bpf_map_def SEC("maps") queue_state = {
- .type = BPF_MAP_TYPE_CGROUP_STORAGE,
- .key_size = sizeof(struct bpf_cgroup_storage_key),
- .value_size = sizeof(struct hbm_vqueue),
-};
-BPF_ANNOTATE_KV_PAIR(queue_state, struct bpf_cgroup_storage_key,
- struct hbm_vqueue);
-
-struct bpf_map_def SEC("maps") queue_stats = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct hbm_queue_stats),
- .max_entries = 1,
-};
-BPF_ANNOTATE_KV_PAIR(queue_stats, int, struct hbm_queue_stats);
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, struct hbm_vqueue);
+} queue_state SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, u32);
+ __type(value, struct hvm_queue_stats);
+} queue_stats SEC(".maps");
struct hbm_pkt_info {
int cwnd;
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 2b2ffb97018b..281bcdaee58e 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -9,25 +9,27 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
+#include "bpf_tracing.h"
#define MAX_ENTRIES 1000
#define MAX_NR_CPUS 1024
-struct bpf_map_def SEC("maps") hash_map = {
+struct bpf_map_def_legacy SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};
-struct bpf_map_def SEC("maps") lru_hash_map = {
+struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = 10000,
};
-struct bpf_map_def SEC("maps") nocommon_lru_hash_map = {
+struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@@ -35,7 +37,7 @@ struct bpf_map_def SEC("maps") nocommon_lru_hash_map = {
.map_flags = BPF_F_NO_COMMON_LRU,
};
-struct bpf_map_def SEC("maps") inner_lru_hash_map = {
+struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@@ -44,20 +46,20 @@ struct bpf_map_def SEC("maps") inner_lru_hash_map = {
.numa_node = 0,
};
-struct bpf_map_def SEC("maps") array_of_lru_hashs = {
+struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.key_size = sizeof(u32),
.max_entries = MAX_NR_CPUS,
};
-struct bpf_map_def SEC("maps") percpu_hash_map = {
+struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};
-struct bpf_map_def SEC("maps") hash_map_alloc = {
+struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@@ -65,7 +67,7 @@ struct bpf_map_def SEC("maps") hash_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
-struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
+struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@@ -73,7 +75,7 @@ struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
-struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
+struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
.type = BPF_MAP_TYPE_LPM_TRIE,
.key_size = 8,
.value_size = sizeof(long),
@@ -81,14 +83,14 @@ struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
-struct bpf_map_def SEC("maps") array_map = {
+struct bpf_map_def_legacy SEC("maps") array_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};
-struct bpf_map_def SEC("maps") lru_hash_lookup_map = {
+struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@@ -179,8 +181,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx)
if (addrlen != sizeof(*in6))
return 0;
- ret = bpf_probe_read(test_params.dst6, sizeof(test_params.dst6),
- &in6->sin6_addr);
+ ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
+ &in6->sin6_addr);
if (ret)
goto done;
diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c
index e7d9a0a3d45b..9cb5207a692f 100644
--- a/samples/bpf/offwaketime_kern.c
+++ b/samples/bpf/offwaketime_kern.c
@@ -6,6 +6,7 @@
*/
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
#include <uapi/linux/ptrace.h>
#include <uapi/linux/perf_event.h>
#include <linux/version.h>
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
index 6db6b21fdc6d..ef5892377beb 100644
--- a/samples/bpf/parse_ldabs.c
+++ b/samples/bpf/parse_ldabs.c
@@ -12,6 +12,7 @@
#include <linux/udp.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
#define DEFAULT_PKTGEN_UDP_PORT 9
#define IP_MF 0x2000
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
index ceabf31079cf..4a190893894f 100644
--- a/samples/bpf/sampleip_kern.c
+++ b/samples/bpf/sampleip_kern.c
@@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/bpf_perf_event.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
#define MAX_IPS 8192
diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c
index ed18e9a4909c..2408dbfb7a21 100644
--- a/samples/bpf/sockex1_kern.c
+++ b/samples/bpf/sockex1_kern.c
@@ -3,13 +3,14 @@
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(long),
- .max_entries = 256,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 256);
+} my_map SEC(".maps");
SEC("socket1")
int bpf_prog1(struct __sk_buff *skb)
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
index f2f9dbc021b0..a7bcd03bf529 100644
--- a/samples/bpf/sockex2_kern.c
+++ b/samples/bpf/sockex2_kern.c
@@ -1,5 +1,6 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
#include <uapi/linux/in.h>
#include <uapi/linux/if.h>
#include <uapi/linux/if_ether.h>
@@ -189,12 +190,12 @@ struct pair {
long bytes;
};
-struct bpf_map_def SEC("maps") hash_map = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__be32),
- .value_size = sizeof(struct pair),
- .max_entries = 1024,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, struct pair);
+ __uint(max_entries, 1024);
+} hash_map SEC(".maps");
SEC("socket2")
int bpf_prog2(struct __sk_buff *skb)
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index c527b57d3ec8..151dd842ecc0 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -6,6 +6,7 @@
*/
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
#include <uapi/linux/in.h>
#include <uapi/linux/if.h>
#include <uapi/linux/if_ether.h>
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
index ce0167d09cdc..6e9478aa2938 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest_kern.c
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/perf_event.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index 274c884c87fe..ff43341bdfce 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -8,6 +8,7 @@
#include <uapi/linux/filter.h>
#include <uapi/linux/pkt_cls.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
/* compiler workaround */
#define _htonl __builtin_bswap32
diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c
index 42c44d091dd1..32ee752f19df 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -11,11 +11,13 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/in6.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
+#include "bpf_tracing.h"
#define MAX_NR_PORTS 65536
/* map #0 */
-struct bpf_map_def SEC("maps") port_a = {
+struct bpf_map_def_legacy SEC("maps") port_a = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@@ -23,7 +25,7 @@ struct bpf_map_def SEC("maps") port_a = {
};
/* map #1 */
-struct bpf_map_def SEC("maps") port_h = {
+struct bpf_map_def_legacy SEC("maps") port_h = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@@ -31,7 +33,7 @@ struct bpf_map_def SEC("maps") port_h = {
};
/* map #2 */
-struct bpf_map_def SEC("maps") reg_result_h = {
+struct bpf_map_def_legacy SEC("maps") reg_result_h = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@@ -39,7 +41,7 @@ struct bpf_map_def SEC("maps") reg_result_h = {
};
/* map #3 */
-struct bpf_map_def SEC("maps") inline_result_h = {
+struct bpf_map_def_legacy SEC("maps") inline_result_h = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@@ -47,7 +49,7 @@ struct bpf_map_def SEC("maps") inline_result_h = {
};
/* map #4 */ /* Test case #0 */
-struct bpf_map_def SEC("maps") a_of_port_a = {
+struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.key_size = sizeof(u32),
.inner_map_idx = 0, /* map_fd[0] is port_a */
@@ -55,7 +57,7 @@ struct bpf_map_def SEC("maps") a_of_port_a = {
};
/* map #5 */ /* Test case #1 */
-struct bpf_map_def SEC("maps") h_of_port_a = {
+struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
.key_size = sizeof(u32),
.inner_map_idx = 0, /* map_fd[0] is port_a */
@@ -63,7 +65,7 @@ struct bpf_map_def SEC("maps") h_of_port_a = {
};
/* map #6 */ /* Test case #2 */
-struct bpf_map_def SEC("maps") h_of_port_h = {
+struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
.key_size = sizeof(u32),
.inner_map_idx = 1, /* map_fd[1] is port_h */
@@ -116,7 +118,7 @@ int trace_sys_connect(struct pt_regs *ctx)
if (addrlen != sizeof(*in6))
return 0;
- ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr);
+ ret = bpf_probe_read_user(dst6, sizeof(dst6), &in6->sin6_addr);
if (ret) {
inline_ret = ret;
goto done;
@@ -127,7 +129,7 @@ int trace_sys_connect(struct pt_regs *ctx)
test_case = dst6[7];
- ret = bpf_probe_read(&port, sizeof(port), &in6->sin6_port);
+ ret = bpf_probe_read_user(&port, sizeof(port), &in6->sin6_port);
if (ret) {
inline_ret = ret;
goto done;
diff --git a/samples/bpf/test_overhead_kprobe_kern.c b/samples/bpf/test_overhead_kprobe_kern.c
index 468a66a92ef9..8d2518e68db9 100644
--- a/samples/bpf/test_overhead_kprobe_kern.c
+++ b/samples/bpf/test_overhead_kprobe_kern.c
@@ -8,6 +8,7 @@
#include <linux/ptrace.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c
index 3a677c807044..b7c48f37132c 100644
--- a/samples/bpf/test_probe_write_user_kern.c
+++ b/samples/bpf/test_probe_write_user_kern.c
@@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <linux/version.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") dnat_map = {
.type = BPF_MAP_TYPE_HASH,
@@ -36,7 +37,7 @@ int bpf_prog1(struct pt_regs *ctx)
if (sockaddr_len > sizeof(orig_addr))
return 0;
- if (bpf_probe_read(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0)
+ if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0)
return 0;
mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr);
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index 7068fbdde951..8dc18d233a27 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf_perf_event.h>
#include <uapi/linux/perf_event.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
struct key_t {
char comm[TASK_COMM_LEN];
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
index 107da148820f..1a15f6605129 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1_kern.c
@@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <linux/version.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index 5e11c20ce5ec..d70b3ea79ea7 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index ea1d4c19c132..9af546bebfa9 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index 6dd8e384de96..2a02cbe9d9a1 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -8,6 +8,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
struct pair {
u64 val;
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index 35cb0eed3be5..b3557b21a8fe 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -11,6 +11,7 @@
#include <uapi/linux/unistd.h>
#include "syscall_nrs.h"
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c
index 219742106bfd..db6870aee42c 100644
--- a/samples/bpf/xdp1_kern.c
+++ b/samples/bpf/xdp1_kern.c
@@ -14,12 +14,12 @@
#include <linux/ipv6.h>
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") rxcnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(long),
- .max_entries = 256,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
static int parse_ipv4(void *data, u64 nh_off, void *data_end)
{
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index a8e5fa02e8a8..3e553eed95a7 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -139,7 +139,7 @@ int main(int argc, char **argv)
map_fd = bpf_map__fd(map);
if (!prog_fd) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
}
diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
index e01288867d15..c74b52c6d945 100644
--- a/samples/bpf/xdp2_kern.c
+++ b/samples/bpf/xdp2_kern.c
@@ -14,12 +14,12 @@
#include <linux/ipv6.h>
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") rxcnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(long),
- .max_entries = 256,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
static void swap_src_dst_mac(void *data)
{
diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c
index 411fdb21f8bc..0f707e0fb375 100644
--- a/samples/bpf/xdp_adjust_tail_kern.c
+++ b/samples/bpf/xdp_adjust_tail_kern.c
@@ -25,12 +25,15 @@
#define ICMP_TOOBIG_SIZE 98
#define ICMP_TOOBIG_PAYLOAD_SIZE 92
-struct bpf_map_def SEC("maps") icmpcnt = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
- .max_entries = 1,
-};
+/* volatile to prevent compiler optimizations */
+static volatile __u32 max_pcktsz = MAX_PCKT_SIZE;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 1);
+} icmpcnt SEC(".maps");
static __always_inline void count_icmp(void)
{
@@ -92,7 +95,7 @@ static __always_inline int send_icmp4_too_big(struct xdp_md *xdp)
orig_iph = data + off;
icmp_hdr->type = ICMP_DEST_UNREACH;
icmp_hdr->code = ICMP_FRAG_NEEDED;
- icmp_hdr->un.frag.mtu = htons(MAX_PCKT_SIZE-sizeof(struct ethhdr));
+ icmp_hdr->un.frag.mtu = htons(max_pcktsz - sizeof(struct ethhdr));
icmp_hdr->checksum = 0;
ipv4_csum(icmp_hdr, ICMP_TOOBIG_PAYLOAD_SIZE, &csum);
icmp_hdr->checksum = csum;
@@ -121,7 +124,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
int pckt_size = data_end - data;
int offset;
- if (pckt_size > MAX_PCKT_SIZE) {
+ if (pckt_size > max(max_pcktsz, ICMP_TOOBIG_SIZE)) {
offset = pckt_size - ICMP_TOOBIG_SIZE;
if (bpf_xdp_adjust_tail(xdp, 0 - offset))
return XDP_PASS;
diff --git a/samples/bpf/xdp_adjust_tail_user.c b/samples/bpf/xdp_adjust_tail_user.c
index a3596b617c4c..d86e9ad0356b 100644
--- a/samples/bpf/xdp_adjust_tail_user.c
+++ b/samples/bpf/xdp_adjust_tail_user.c
@@ -23,6 +23,7 @@
#include "libbpf.h"
#define STATS_INTERVAL_S 2U
+#define MAX_PCKT_SIZE 600
static int ifindex = -1;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
@@ -72,6 +73,7 @@ static void usage(const char *cmd)
printf("Usage: %s [...]\n", cmd);
printf(" -i <ifname|ifindex> Interface\n");
printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
+ printf(" -P <MAX_PCKT_SIZE> Default: %u\n", MAX_PCKT_SIZE);
printf(" -S use skb-mode\n");
printf(" -N enforce native mode\n");
printf(" -F force loading prog\n");
@@ -85,13 +87,14 @@ int main(int argc, char **argv)
.prog_type = BPF_PROG_TYPE_XDP,
};
unsigned char opt_flags[256] = {};
- const char *optstr = "i:T:SNFh";
+ const char *optstr = "i:T:P:SNFh";
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
unsigned int kill_after_s = 0;
int i, prog_fd, map_fd, opt;
struct bpf_object *obj;
- struct bpf_map *map;
+ __u32 max_pckt_size = 0;
+ __u32 key = 0;
char filename[256];
int err;
@@ -110,6 +113,9 @@ int main(int argc, char **argv)
case 'T':
kill_after_s = atoi(optarg);
break;
+ case 'P':
+ max_pckt_size = atoi(optarg);
+ break;
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
@@ -150,15 +156,20 @@ int main(int argc, char **argv)
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return 1;
- map = bpf_map__next(NULL, obj);
- if (!map) {
- printf("finding a map in obj file failed\n");
- return 1;
+ /* static global var 'max_pcktsz' is accessible from .data section */
+ if (max_pckt_size) {
+ map_fd = bpf_object__find_map_fd_by_name(obj, "xdp_adju.data");
+ if (map_fd < 0) {
+ printf("finding a max_pcktsz map in obj file failed\n");
+ return 1;
+ }
+ bpf_map_update_elem(map_fd, &key, &max_pckt_size, BPF_ANY);
}
- map_fd = bpf_map__fd(map);
- if (!prog_fd) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ /* fetch icmpcnt map */
+ map_fd = bpf_object__find_map_fd_by_name(obj, "icmpcnt");
+ if (map_fd < 0) {
+ printf("finding a icmpcnt map in obj file failed\n");
return 1;
}
diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c
index 701a30f258b1..d013029aeaa2 100644
--- a/samples/bpf/xdp_fwd_kern.c
+++ b/samples/bpf/xdp_fwd_kern.c
@@ -23,13 +23,12 @@
#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
-/* For TX-traffic redirect requires net_device ifindex to be in this devmap */
-struct bpf_map_def SEC("maps") xdp_tx_ports = {
- .type = BPF_MAP_TYPE_DEVMAP,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 64,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 64);
+} xdp_tx_ports SEC(".maps");
/* from include/net/ip.h */
static __always_inline int ip_decrease_ttl(struct iphdr *iph)
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index a306d1c75622..cfcc31e51197 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -18,12 +18,12 @@
#define MAX_CPUS 64 /* WARNING - sync with _user.c */
/* Special map type that can XDP_REDIRECT frames to another CPU */
-struct bpf_map_def SEC("maps") cpu_map = {
- .type = BPF_MAP_TYPE_CPUMAP,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = MAX_CPUS,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, MAX_CPUS);
+} cpu_map SEC(".maps");
/* Common stats data record to keep userspace more simple */
struct datarec {
@@ -35,67 +35,67 @@ struct datarec {
/* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
* feedback. Redirect TX errors can be caught via a tracepoint.
*/
-struct bpf_map_def SEC("maps") rx_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} rx_cnt SEC(".maps");
/* Used by trace point */
-struct bpf_map_def SEC("maps") redirect_err_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 2,
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */
-};
+} redirect_err_cnt SEC(".maps");
/* Used by trace point */
-struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = MAX_CPUS,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, MAX_CPUS);
+} cpumap_enqueue_cnt SEC(".maps");
/* Used by trace point */
-struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} cpumap_kthread_cnt SEC(".maps");
/* Set of maps controlling available CPU, and for iterating through
* selectable redirect CPUs.
*/
-struct bpf_map_def SEC("maps") cpus_available = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = MAX_CPUS,
-};
-struct bpf_map_def SEC("maps") cpus_count = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = 1,
-};
-struct bpf_map_def SEC("maps") cpus_iterator = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, MAX_CPUS);
+} cpus_available SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} cpus_count SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} cpus_iterator SEC(".maps");
/* Used by trace point */
-struct bpf_map_def SEC("maps") exception_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} exception_cnt SEC(".maps");
/* Helper parse functions */
diff --git a/samples/bpf/xdp_redirect_kern.c b/samples/bpf/xdp_redirect_kern.c
index 8abb151e385f..1f0b7d05abb2 100644
--- a/samples/bpf/xdp_redirect_kern.c
+++ b/samples/bpf/xdp_redirect_kern.c
@@ -19,22 +19,22 @@
#include <linux/ipv6.h>
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") tx_port = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} tx_port SEC(".maps");
/* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
* feedback. Redirect TX errors can be caught via a tracepoint.
*/
-struct bpf_map_def SEC("maps") rxcnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(long),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 1);
+} rxcnt SEC(".maps");
static void swap_src_dst_mac(void *data)
{
diff --git a/samples/bpf/xdp_redirect_map_kern.c b/samples/bpf/xdp_redirect_map_kern.c
index 740a529ba84f..4631b484c432 100644
--- a/samples/bpf/xdp_redirect_map_kern.c
+++ b/samples/bpf/xdp_redirect_map_kern.c
@@ -19,22 +19,22 @@
#include <linux/ipv6.h>
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") tx_port = {
- .type = BPF_MAP_TYPE_DEVMAP,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 100,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 100);
+} tx_port SEC(".maps");
/* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
* feedback. Redirect TX errors can be caught via a tracepoint.
*/
-struct bpf_map_def SEC("maps") rxcnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(long),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 1);
+} rxcnt SEC(".maps");
static void swap_src_dst_mac(void *data)
{
diff --git a/samples/bpf/xdp_router_ipv4_kern.c b/samples/bpf/xdp_router_ipv4_kern.c
index 993f56bc7b9a..bf11efc8e949 100644
--- a/samples/bpf/xdp_router_ipv4_kern.c
+++ b/samples/bpf/xdp_router_ipv4_kern.c
@@ -42,44 +42,44 @@ struct direct_map {
};
/* Map for trie implementation*/
-struct bpf_map_def SEC("maps") lpm_map = {
- .type = BPF_MAP_TYPE_LPM_TRIE,
- .key_size = 8,
- .value_size = sizeof(struct trie_value),
- .max_entries = 50,
- .map_flags = BPF_F_NO_PREALLOC,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __uint(key_size, 8);
+ __uint(value_size, sizeof(struct trie_value));
+ __uint(max_entries, 50);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} lpm_map SEC(".maps");
/* Map for counter*/
-struct bpf_map_def SEC("maps") rxcnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = 256,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
/* Map for ARP table*/
-struct bpf_map_def SEC("maps") arp_table = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__be32),
- .value_size = sizeof(__be64),
- .max_entries = 50,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, __be64);
+ __uint(max_entries, 50);
+} arp_table SEC(".maps");
/* Map to keep the exact match entries in the route table*/
-struct bpf_map_def SEC("maps") exact_match = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__be32),
- .value_size = sizeof(struct direct_map),
- .max_entries = 50,
-};
-
-struct bpf_map_def SEC("maps") tx_port = {
- .type = BPF_MAP_TYPE_DEVMAP,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 100,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, struct direct_map);
+ __uint(max_entries, 50);
+} exact_match SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 100);
+} tx_port SEC(".maps");
/* Function to set source and destination mac of the packet */
static inline void set_src_dst_mac(void *data, void *src, void *dst)
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
index 222a83eed1cb..272d0f82a6b5 100644
--- a/samples/bpf/xdp_rxq_info_kern.c
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -23,12 +23,13 @@ enum cfg_options_flags {
READ_MEM = 0x1U,
SWAP_MAC = 0x2U,
};
-struct bpf_map_def SEC("maps") config_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct config),
- .max_entries = 1,
-};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct config);
+ __uint(max_entries, 1);
+} config_map SEC(".maps");
/* Common stats data record (shared with userspace) */
struct datarec {
@@ -36,22 +37,22 @@ struct datarec {
__u64 issue;
};
-struct bpf_map_def SEC("maps") stats_global_map = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} stats_global_map SEC(".maps");
#define MAX_RXQs 64
/* Stats per rx_queue_index (per CPU) */
-struct bpf_map_def SEC("maps") rx_queue_index_map = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = MAX_RXQs + 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, MAX_RXQs + 1);
+} rx_queue_index_map SEC(".maps");
static __always_inline
void swap_src_dst_mac(void *data)
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
index c7e4e45d824a..51e0d810e070 100644
--- a/samples/bpf/xdp_rxq_info_user.c
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -51,8 +51,8 @@ static const struct option long_options[] = {
{"sec", required_argument, NULL, 's' },
{"no-separators", no_argument, NULL, 'z' },
{"action", required_argument, NULL, 'a' },
- {"readmem", no_argument, NULL, 'r' },
- {"swapmac", no_argument, NULL, 'm' },
+ {"readmem", no_argument, NULL, 'r' },
+ {"swapmac", no_argument, NULL, 'm' },
{"force", no_argument, NULL, 'F' },
{0, 0, NULL, 0 }
};
@@ -499,7 +499,7 @@ int main(int argc, char **argv)
map_fd = bpf_map__fd(map);
if (!prog_fd) {
- fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
+ fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", strerror(errno));
return EXIT_FAIL;
}
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
index 3002714e3cd5..a5760e8bf2c4 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -150,7 +150,7 @@ int main(int argc, char **argv)
return 1;
if (!prog_fd) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
}
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
index 0f4f6e8c8611..6db450a5c1ca 100644
--- a/samples/bpf/xdp_tx_iptunnel_kern.c
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -19,19 +19,19 @@
#include "bpf_helpers.h"
#include "xdp_tx_iptunnel_common.h"
-struct bpf_map_def SEC("maps") rxcnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
- .max_entries = 256,
-};
-
-struct bpf_map_def SEC("maps") vip2tnl = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct vip),
- .value_size = sizeof(struct iptnl_info),
- .max_entries = MAX_IPTNL_ENTRIES,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct vip);
+ __type(value, struct iptnl_info);
+ __uint(max_entries, MAX_IPTNL_ENTRIES);
+} vip2tnl SEC(".maps");
static __always_inline void count_tx(u32 protocol)
{
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
index dfb68582e243..2fe4c7f5ffe5 100644
--- a/samples/bpf/xdp_tx_iptunnel_user.c
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -268,7 +268,7 @@ int main(int argc, char **argv)
return 1;
if (!prog_fd) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
}
diff --git a/samples/bpf/xdpsock.h b/samples/bpf/xdpsock.h
new file mode 100644
index 000000000000..b7eca15c78cc
--- /dev/null
+++ b/samples/bpf/xdpsock.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef XDPSOCK_H_
+#define XDPSOCK_H_
+
+#define MAX_SOCKS 4
+
+#endif /* XDPSOCK_H */
diff --git a/samples/bpf/xdpsock_kern.c b/samples/bpf/xdpsock_kern.c
new file mode 100644
index 000000000000..a06177c262cd
--- /dev/null
+++ b/samples/bpf/xdpsock_kern.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#include "xdpsock.h"
+
+/* This XDP program is only needed for the XDP_SHARED_UMEM mode.
+ * If you do not use this mode, libbpf can supply an XDP program for you.
+ */
+
+struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, MAX_SOCKS);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} xsks_map SEC(".maps");
+
+static unsigned int rr;
+
+SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+{
+ rr = (rr + 1) & (MAX_SOCKS - 1);
+
+ return bpf_redirect_map(&xsks_map, rr, XDP_DROP);
+}
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index df011ac33402..a15480010828 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -29,6 +29,7 @@
#include "libbpf.h"
#include "xsk.h"
+#include "xdpsock.h"
#include <bpf/bpf.h>
#ifndef SOL_XDP
@@ -47,7 +48,6 @@
#define BATCH_SIZE 64
#define DEBUG_HEXDUMP 0
-#define MAX_SOCKS 8
typedef __u64 u64;
typedef __u32 u32;
@@ -75,7 +75,8 @@ static u32 opt_xdp_bind_flags;
static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
static int opt_timeout = 1000;
static bool opt_need_wakeup = true;
-static __u32 prog_id;
+static u32 opt_num_xsks = 1;
+static u32 prog_id;
struct xsk_umem_info {
struct xsk_ring_prod fq;
@@ -179,7 +180,7 @@ static void *poller(void *arg)
static void remove_xdp_program(void)
{
- __u32 curr_prog_id = 0;
+ u32 curr_prog_id = 0;
if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
printf("bpf_get_link_xdp_id failed\n");
@@ -196,11 +197,11 @@ static void remove_xdp_program(void)
static void int_exit(int sig)
{
struct xsk_umem *umem = xsks[0]->umem->umem;
-
- (void)sig;
+ int i;
dump_stats();
- xsk_socket__delete(xsks[0]->xsk);
+ for (i = 0; i < num_socks; i++)
+ xsk_socket__delete(xsks[i]->xsk);
(void)xsk_umem__delete(umem);
remove_xdp_program();
@@ -290,7 +291,6 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
.frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
.flags = opt_umem_flags
};
-
int ret;
umem = calloc(1, sizeof(*umem));
@@ -299,7 +299,6 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
&cfg);
-
if (ret)
exit_with_error(-ret);
@@ -307,13 +306,29 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
return umem;
}
-static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
+static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
+{
+ int ret, i;
+ u32 idx;
+
+ ret = xsk_ring_prod__reserve(&umem->fq,
+ XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
+ if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ exit_with_error(-ret);
+ for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx++) =
+ i * opt_xsk_frame_size;
+ xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
+}
+
+static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
+ bool rx, bool tx)
{
struct xsk_socket_config cfg;
struct xsk_socket_info *xsk;
+ struct xsk_ring_cons *rxr;
+ struct xsk_ring_prod *txr;
int ret;
- u32 idx;
- int i;
xsk = calloc(1, sizeof(*xsk));
if (!xsk)
@@ -322,11 +337,17 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
xsk->umem = umem;
cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- cfg.libbpf_flags = 0;
+ if (opt_num_xsks > 1)
+ cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
+ else
+ cfg.libbpf_flags = 0;
cfg.xdp_flags = opt_xdp_flags;
cfg.bind_flags = opt_xdp_bind_flags;
+
+ rxr = rx ? &xsk->rx : NULL;
+ txr = tx ? &xsk->tx : NULL;
ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
- &xsk->rx, &xsk->tx, &cfg);
+ rxr, txr, &cfg);
if (ret)
exit_with_error(-ret);
@@ -334,17 +355,6 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
if (ret)
exit_with_error(-ret);
- ret = xsk_ring_prod__reserve(&xsk->umem->fq,
- XSK_RING_PROD__DEFAULT_NUM_DESCS,
- &idx);
- if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
- exit_with_error(-ret);
- for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++)
- *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) =
- i * opt_xsk_frame_size;
- xsk_ring_prod__submit(&xsk->umem->fq,
- XSK_RING_PROD__DEFAULT_NUM_DESCS);
-
return xsk;
}
@@ -363,6 +373,8 @@ static struct option long_options[] = {
{"frame-size", required_argument, 0, 'f'},
{"no-need-wakeup", no_argument, 0, 'm'},
{"unaligned", no_argument, 0, 'u'},
+ {"shared-umem", no_argument, 0, 'M'},
+ {"force", no_argument, 0, 'F'},
{0, 0, 0, 0}
};
@@ -378,14 +390,15 @@ static void usage(const char *prog)
" -q, --queue=n Use queue n (default 0)\n"
" -p, --poll Use poll syscall\n"
" -S, --xdp-skb=n Use XDP skb-mod\n"
- " -N, --xdp-native=n Enfore XDP native mode\n"
+ " -N, --xdp-native=n Enforce XDP native mode\n"
" -n, --interval=n Specify statistics update interval (default 1 sec).\n"
" -z, --zero-copy Force zero-copy mode.\n"
" -c, --copy Force copy mode.\n"
- " -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n"
" -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
" -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
" -u, --unaligned Enable unaligned chunk placement\n"
+ " -M, --shared-umem Enable XDP_SHARED_UMEM\n"
+ " -F, --force Force loading the XDP prog\n"
"\n";
fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE);
exit(EXIT_FAILURE);
@@ -398,7 +411,7 @@ static void parse_command_line(int argc, char **argv)
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:mu",
+ c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:muM",
long_options, &option_index);
if (c == -1)
break;
@@ -448,11 +461,14 @@ static void parse_command_line(int argc, char **argv)
break;
case 'f':
opt_xsk_frame_size = atoi(optarg);
+ break;
case 'm':
opt_need_wakeup = false;
opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
break;
-
+ case 'M':
+ opt_num_xsks = MAX_SOCKS;
+ break;
default:
usage(basename(argv[0]));
}
@@ -586,11 +602,9 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
static void rx_drop_all(void)
{
- struct pollfd fds[MAX_SOCKS + 1];
+ struct pollfd fds[MAX_SOCKS] = {};
int i, ret;
- memset(fds, 0, sizeof(fds));
-
for (i = 0; i < num_socks; i++) {
fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
fds[i].events = POLLIN;
@@ -633,11 +647,10 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb)
static void tx_only_all(void)
{
- struct pollfd fds[MAX_SOCKS];
+ struct pollfd fds[MAX_SOCKS] = {};
u32 frame_nb[MAX_SOCKS] = {};
int i, ret;
- memset(fds, 0, sizeof(fds));
for (i = 0; i < num_socks; i++) {
fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
fds[0].events = POLLOUT;
@@ -706,11 +719,9 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
static void l2fwd_all(void)
{
- struct pollfd fds[MAX_SOCKS];
+ struct pollfd fds[MAX_SOCKS] = {};
int i, ret;
- memset(fds, 0, sizeof(fds));
-
for (i = 0; i < num_socks; i++) {
fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
fds[i].events = POLLOUT | POLLIN;
@@ -728,13 +739,66 @@ static void l2fwd_all(void)
}
}
+static void load_xdp_program(char **argv, struct bpf_object **obj)
+{
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ char xdp_filename[256];
+ int prog_fd;
+
+ snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = xdp_filename;
+
+ if (bpf_prog_load_xattr(&prog_load_attr, obj, &prog_fd))
+ exit(EXIT_FAILURE);
+ if (prog_fd < 0) {
+ fprintf(stderr, "ERROR: no program found: %s\n",
+ strerror(prog_fd));
+ exit(EXIT_FAILURE);
+ }
+
+ if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
+ fprintf(stderr, "ERROR: link set xdp fd failed\n");
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void enter_xsks_into_map(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ int i, xsks_map;
+
+ map = bpf_object__find_map_by_name(obj, "xsks_map");
+ xsks_map = bpf_map__fd(map);
+ if (xsks_map < 0) {
+ fprintf(stderr, "ERROR: no xsks map found: %s\n",
+ strerror(xsks_map));
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < num_socks; i++) {
+ int fd = xsk_socket__fd(xsks[i]->xsk);
+ int key, ret;
+
+ key = i;
+ ret = bpf_map_update_elem(xsks_map, &key, &fd, 0);
+ if (ret) {
+ fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ bool rx = false, tx = false;
struct xsk_umem_info *umem;
+ struct bpf_object *obj;
pthread_t pt;
+ int i, ret;
void *bufs;
- int ret;
parse_command_line(argc, argv);
@@ -744,6 +808,9 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ if (opt_num_xsks > 1)
+ load_xdp_program(argv, &obj);
+
/* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size,
PROT_READ | PROT_WRITE,
@@ -752,16 +819,24 @@ int main(int argc, char **argv)
printf("ERROR: mmap failed\n");
exit(EXIT_FAILURE);
}
- /* Create sockets... */
- umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
- xsks[num_socks++] = xsk_configure_socket(umem);
- if (opt_bench == BENCH_TXONLY) {
- int i;
+ /* Create sockets... */
+ umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
+ if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) {
+ rx = true;
+ xsk_populate_fill_ring(umem);
+ }
+ if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY)
+ tx = true;
+ for (i = 0; i < opt_num_xsks; i++)
+ xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
+ if (opt_bench == BENCH_TXONLY)
for (i = 0; i < NUM_FRAMES; i++)
- (void)gen_eth_frame(umem, i * opt_xsk_frame_size);
- }
+ gen_eth_frame(umem, i * opt_xsk_frame_size);
+
+ if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY)
+ enter_xsks_into_map(obj);
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
diff --git a/samples/mei/Makefile b/samples/mei/Makefile
index c7e52e9e92ca..27f37efdadb4 100644
--- a/samples/mei/Makefile
+++ b/samples/mei/Makefile
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-CC := $(CROSS_COMPILE)gcc
-CFLAGS := -I../../usr/include
+# Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
-PROGS := mei-amt-version
+hostprogs-y := mei-amt-version
-all: $(PROGS)
+HOSTCFLAGS_mei-amt-version.o += -I$(objtree)/usr/include
-clean:
- rm -fr $(PROGS)
+always := $(hostprogs-y)
+
+all: mei-amt-version
diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst
index fd39215db508..3f6483e8b2df 100644
--- a/samples/pktgen/README.rst
+++ b/samples/pktgen/README.rst
@@ -18,7 +18,7 @@ across the sample scripts. Usage example is printed on errors::
Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
-i : ($DEV) output interface/device (required)
-s : ($PKT_SIZE) packet size
- -d : ($DEST_IP) destination IP
+ -d : ($DEST_IP) destination IP. CIDR (e.g. 198.18.0.0/15) is also allowed
-m : ($DST_MAC) destination MAC-addr
-p : ($DST_PORT) destination PORT range (e.g. 433-444) is also allowed
-t : ($THREADS) threads to start
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
index 4af4046d71be..dae06d5b38fa 100644
--- a/samples/pktgen/functions.sh
+++ b/samples/pktgen/functions.sh
@@ -5,6 +5,8 @@
# Author: Jesper Dangaaard Brouer
# License: GPL
+set -o errexit
+
## -- General shell logging cmds --
function err() {
local exitcode=$1
@@ -58,6 +60,7 @@ function pg_set() {
function proc_cmd() {
local result
local proc_file=$1
+ local status=0
# after shift, the remaining args are contained in $@
shift
local proc_ctrl=${PROC_DIR}/$proc_file
@@ -73,13 +76,13 @@ function proc_cmd() {
echo "cmd: $@ > $proc_ctrl"
fi
# Quoting of "$@" is important for space expansion
- echo "$@" > "$proc_ctrl"
- local status=$?
+ echo "$@" > "$proc_ctrl" || status=$?
- result=$(grep "Result: OK:" $proc_ctrl)
- # Due to pgctrl, cannot use exit code $? from grep
- if [[ "$result" == "" ]]; then
- grep "Result:" $proc_ctrl >&2
+ if [[ "$proc_file" != "pgctrl" ]]; then
+ result=$(grep "Result: OK:" $proc_ctrl) || true
+ if [[ "$result" == "" ]]; then
+ grep "Result:" $proc_ctrl >&2
+ fi
fi
if (( $status != 0 )); then
err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
@@ -105,6 +108,8 @@ function pgset() {
fi
}
+[[ $EUID -eq 0 ]] && trap 'pg_ctrl "reset"' EXIT
+
## -- General shell tricks --
function root_check_run_with_sudo() {
@@ -163,6 +168,137 @@ function get_node_cpus()
echo $node_cpu_list
}
+# Check $1 is in between $2, $3 ($2 <= $1 <= $3)
+function in_between() { [[ ($1 -ge $2) && ($1 -le $3) ]] ; }
+
+# Extend shrunken IPv6 address.
+# fe80::42:bcff:fe84:e10a => fe80:0:0:0:42:bcff:fe84:e10a
+function extend_addr6()
+{
+ local addr=$1
+ local sep=: sep2=::
+ local sep_cnt=$(tr -cd $sep <<< $1 | wc -c)
+ local shrink
+
+ # separator count should be (2 <= $sep_cnt <= 7)
+ if ! (in_between $sep_cnt 2 7); then
+ err 5 "Invalid IP6 address: $1"
+ fi
+
+ # if shrink '::' occurs multiple, it's malformed.
+ shrink=( $(egrep -o "$sep{2,}" <<< $addr) )
+ if [[ ${#shrink[@]} -ne 0 ]]; then
+ if [[ ${#shrink[@]} -gt 1 || ( ${shrink[0]} != $sep2 ) ]]; then
+ err 5 "Invalid IP6 address: $1"
+ fi
+ fi
+
+ # add 0 at begin & end, and extend addr by adding :0
+ [[ ${addr:0:1} == $sep ]] && addr=0${addr}
+ [[ ${addr: -1} == $sep ]] && addr=${addr}0
+ echo "${addr/$sep2/$(printf ':0%.s' $(seq $[8-sep_cnt])):}"
+}
+
+# Given a single IP(v4/v6) address, whether it is valid.
+function validate_addr()
+{
+ # check function is called with (funcname)6
+ [[ ${FUNCNAME[1]: -1} == 6 ]] && local IP6=6
+ local bitlen=$[ IP6 ? 128 : 32 ]
+ local len=$[ IP6 ? 8 : 4 ]
+ local max=$[ 2**(len*2)-1 ]
+ local net prefix
+ local addr sep
+
+ IFS='/' read net prefix <<< $1
+ [[ $IP6 ]] && net=$(extend_addr6 $net)
+
+ # if prefix exists, check (0 <= $prefix <= $bitlen)
+ if [[ -n $prefix ]]; then
+ if ! (in_between $prefix 0 $bitlen); then
+ err 5 "Invalid prefix: /$prefix"
+ fi
+ fi
+
+ # set separator for each IP(v4/v6)
+ [[ $IP6 ]] && sep=: || sep=.
+ IFS=$sep read -a addr <<< $net
+
+ # array length
+ if [[ ${#addr[@]} != $len ]]; then
+ err 5 "Invalid IP$IP6 address: $1"
+ fi
+
+ # check each digit (0 <= $digit <= $max)
+ for digit in "${addr[@]}"; do
+ [[ $IP6 ]] && digit=$[ 16#$digit ]
+ if ! (in_between $digit 0 $max); then
+ err 5 "Invalid IP$IP6 address: $1"
+ fi
+ done
+
+ return 0
+}
+
+function validate_addr6() { validate_addr $@ ; }
+
+# Given a single IP(v4/v6) or CIDR, return minimum and maximum IP addr.
+function parse_addr()
+{
+ # check function is called with (funcname)6
+ [[ ${FUNCNAME[1]: -1} == 6 ]] && local IP6=6
+ local net prefix
+ local min_ip max_ip
+
+ IFS='/' read net prefix <<< $1
+ [[ $IP6 ]] && net=$(extend_addr6 $net)
+
+ if [[ -z $prefix ]]; then
+ min_ip=$net
+ max_ip=$net
+ else
+ # defining array for converting Decimal 2 Binary
+ # 00000000 00000001 00000010 00000011 00000100 ...
+ local d2b='{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}'
+ [[ $IP6 ]] && d2b+=$d2b
+ eval local D2B=($d2b)
+
+ local bitlen=$[ IP6 ? 128 : 32 ]
+ local remain=$[ bitlen-prefix ]
+ local octet=$[ IP6 ? 16 : 8 ]
+ local min_mask max_mask
+ local min max
+ local ip_bit
+ local ip sep
+
+ # set separator for each IP(v4/v6)
+ [[ $IP6 ]] && sep=: || sep=.
+ IFS=$sep read -ra ip <<< $net
+
+ min_mask="$(printf '1%.s' $(seq $prefix))$(printf '0%.s' $(seq $remain))"
+ max_mask="$(printf '0%.s' $(seq $prefix))$(printf '1%.s' $(seq $remain))"
+
+ # calculate min/max ip with &,| operator
+ for i in "${!ip[@]}"; do
+ digit=$[ IP6 ? 16#${ip[$i]} : ${ip[$i]} ]
+ ip_bit=${D2B[$digit]}
+
+ idx=$[ octet*i ]
+ min[$i]=$[ 2#$ip_bit & 2#${min_mask:$idx:$octet} ]
+ max[$i]=$[ 2#$ip_bit | 2#${max_mask:$idx:$octet} ]
+ [[ $IP6 ]] && { min[$i]=$(printf '%X' ${min[$i]});
+ max[$i]=$(printf '%X' ${max[$i]}); }
+ done
+
+ min_ip=$(IFS=$sep; echo "${min[*]}")
+ max_ip=$(IFS=$sep; echo "${max[*]}")
+ fi
+
+ echo $min_ip $max_ip
+}
+
+function parse_addr6() { parse_addr $@ ; }
+
# Given a single or range of port(s), return minimum and maximum port number.
function parse_ports()
{
@@ -185,9 +321,9 @@ function validate_ports()
local min_port=$1
local max_port=$2
- # 0 < port < 65536
- if [[ $min_port -gt 0 && $min_port -lt 65536 ]]; then
- if [[ $max_port -gt 0 && $max_port -lt 65536 ]]; then
+ # 1 <= port <= 65535
+ if (in_between $min_port 1 65535); then
+ if (in_between $max_port 1 65535); then
if [[ $min_port -le $max_port ]]; then
return 0
fi
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
index a06b00a0c7b6..ff0ed474fee9 100644
--- a/samples/pktgen/parameters.sh
+++ b/samples/pktgen/parameters.sh
@@ -8,7 +8,7 @@ function usage() {
echo "Usage: $0 [-vx] -i ethX"
echo " -i : (\$DEV) output interface/device (required)"
echo " -s : (\$PKT_SIZE) packet size"
- echo " -d : (\$DEST_IP) destination IP"
+ echo " -d : (\$DEST_IP) destination IP. CIDR (e.g. 198.18.0.0/15) is also allowed"
echo " -m : (\$DST_MAC) destination MAC-addr"
echo " -p : (\$DST_PORT) destination PORT range (e.g. 433-444) is also allowed"
echo " -t : (\$THREADS) threads to start"
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
index e14b1a9144d9..1b6204125d2d 100755
--- a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -41,9 +41,13 @@ fi
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
[ -z "$BURST" ] && BURST=1024
[ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# Base Config
@@ -71,13 +75,14 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# Destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst$IP6 $DEST_IP"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Inject packet into RX path of stack
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
index 82c3e504e056..e607cb369b20 100755
--- a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
+++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
@@ -24,9 +24,13 @@ if [[ -n "$BURST" ]]; then
err 1 "Bursting not supported for this mode"
fi
[ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# Base Config
@@ -54,13 +58,14 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# Destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst$IP6 $DEST_IP"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Inject packet into TX qdisc egress path of stack
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
index d1702fdde8f3..a4e250b45dce 100755
--- a/samples/pktgen/pktgen_sample01_simple.sh
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -22,17 +22,21 @@ fi
# Example enforce param "-m" for dst_mac
[ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac"
[ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# Base Config
DELAY="0" # Zero means max speed
# Flow variation random source port between min and max
-UDP_MIN=9
-UDP_MAX=109
+UDP_SRC_MIN=9
+UDP_SRC_MAX=109
# General cleanup everything since last run
# (especially important if other threads were configured by other scripts)
@@ -61,19 +65,20 @@ pg_set $DEV "flag NO_TIMESTAMP"
# Destination
pg_set $DEV "dst_mac $DST_MAC"
-pg_set $DEV "dst$IP6 $DEST_IP"
+pg_set $DEV "dst${IP6}_min $DST_MIN"
+pg_set $DEV "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $DEV "flag UDPDST_RND"
- pg_set $DEV "udp_dst_min $DST_MIN"
- pg_set $DEV "udp_dst_max $DST_MAX"
+ pg_set $DEV "udp_dst_min $UDP_DST_MIN"
+ pg_set $DEV "udp_dst_max $UDP_DST_MAX"
fi
# Setup random UDP port src range
pg_set $DEV "flag UDPSRC_RND"
-pg_set $DEV "udp_src_min $UDP_MIN"
-pg_set $DEV "udp_src_max $UDP_MAX"
+pg_set $DEV "udp_src_min $UDP_SRC_MIN"
+pg_set $DEV "udp_src_max $UDP_SRC_MAX"
# start_run
echo "Running... ctrl^C to stop" >&2
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
index 7f7a9a27548f..cb2495fcdc60 100755
--- a/samples/pktgen/pktgen_sample02_multiqueue.sh
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -21,17 +21,21 @@ DELAY="0" # Zero means max speed
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
# Flow variation random source port between min and max
-UDP_MIN=9
-UDP_MAX=109
+UDP_SRC_MIN=9
+UDP_SRC_MAX=109
# (example of setting default params in your script)
if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
fi
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# General cleanup everything since last run
@@ -62,19 +66,20 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# Destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst$IP6 $DEST_IP"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Setup random UDP port src range
pg_set $dev "flag UDPSRC_RND"
- pg_set $dev "udp_src_min $UDP_MIN"
- pg_set $dev "udp_src_max $UDP_MAX"
+ pg_set $dev "udp_src_min $UDP_SRC_MIN"
+ pg_set $dev "udp_src_max $UDP_SRC_MAX"
done
# start_run
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
index b520637817ce..fff50765a5aa 100755
--- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -33,9 +33,13 @@ fi
[ -z "$BURST" ] && BURST=32
[ -z "$CLONE_SKB" ] && CLONE_SKB="0" # No need for clones when bursting
[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# Base Config
@@ -62,13 +66,14 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# Destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst$IP6 $DEST_IP"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Setup burst, for easy testing -b 0 disable bursting
diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
index 5b6e9d9cb5b5..2cd6b701400d 100755
--- a/samples/pktgen/pktgen_sample04_many_flows.sh
+++ b/samples/pktgen/pktgen_sample04_many_flows.sh
@@ -17,9 +17,13 @@ source ${basedir}/parameters.sh
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# NOTICE: Script specific settings
@@ -37,6 +41,9 @@ if [[ -n "$BURST" ]]; then
err 1 "Bursting not supported for this mode"
fi
+# 198.18.0.0 / 198.19.255.255
+read -r SRC_MIN SRC_MAX <<< $(parse_addr 198.18.0.0/15)
+
# General cleanup everything since last run
pg_ctrl "reset"
@@ -58,19 +65,20 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# Single destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst $DEST_IP"
+ pg_set $dev "dst_min $DST_MIN"
+ pg_set $dev "dst_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Randomize source IP-addresses
pg_set $dev "flag IPSRC_RND"
- pg_set $dev "src_min 198.18.0.0"
- pg_set $dev "src_max 198.19.255.255"
+ pg_set $dev "src_min $SRC_MIN"
+ pg_set $dev "src_max $SRC_MAX"
# Limit number of flows (max 65535)
pg_set $dev "flows $FLOWS"
diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
index 0c06e63fbe97..4cb6252ade39 100755
--- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh
+++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
@@ -22,9 +22,13 @@ source ${basedir}/parameters.sh
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
[ -z "$BURST" ] && BURST=32
[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# Base Config
@@ -51,13 +55,14 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# Single destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst $DEST_IP"
+ pg_set $dev "dst_min $DST_MIN"
+ pg_set $dev "dst_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Setup source IP-addresses based on thread number
diff --git a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
index 97f0266c0356..728106060a02 100755
--- a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
+++ b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
@@ -20,8 +20,8 @@ DELAY="0" # Zero means max speed
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
# Flow variation random source port between min and max
-UDP_MIN=9
-UDP_MAX=109
+UDP_SRC_MIN=9
+UDP_SRC_MAX=109
node=`get_iface_node $DEV`
irq_array=(`get_iface_irqs $DEV`)
@@ -35,9 +35,13 @@ if [ -z "$DEST_IP" ]; then
[ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
fi
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
if [ -n "$DST_PORT" ]; then
- read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT)
- validate_ports $DST_MIN $DST_MAX
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
fi
# General cleanup everything since last run
@@ -79,19 +83,20 @@ for ((i = 0; i < $THREADS; i++)); do
# Destination
pg_set $dev "dst_mac $DST_MAC"
- pg_set $dev "dst$IP6 $DEST_IP"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
if [ -n "$DST_PORT" ]; then
# Single destination port or random port range
pg_set $dev "flag UDPDST_RND"
- pg_set $dev "udp_dst_min $DST_MIN"
- pg_set $dev "udp_dst_max $DST_MAX"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
fi
# Setup random UDP port src range
pg_set $dev "flag UDPSRC_RND"
- pg_set $dev "udp_src_min $UDP_MIN"
- pg_set $dev "udp_src_max $UDP_MAX"
+ pg_set $dev "udp_src_min $UDP_SRC_MIN"
+ pg_set $dev "udp_src_max $UDP_SRC_MAX"
done
# start_run
diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py
index 894cc58c1a03..7548569e8076 100755
--- a/scripts/bpf_helpers_doc.py
+++ b/scripts/bpf_helpers_doc.py
@@ -391,6 +391,154 @@ SEE ALSO
print('')
+class PrinterHelpers(Printer):
+ """
+ A printer for dumping collected information about helpers as C header to
+ be included from BPF program.
+ @helpers: array of Helper objects to print to standard output
+ """
+
+ type_fwds = [
+ 'struct bpf_fib_lookup',
+ 'struct bpf_perf_event_data',
+ 'struct bpf_perf_event_value',
+ 'struct bpf_sock',
+ 'struct bpf_sock_addr',
+ 'struct bpf_sock_ops',
+ 'struct bpf_sock_tuple',
+ 'struct bpf_spin_lock',
+ 'struct bpf_sysctl',
+ 'struct bpf_tcp_sock',
+ 'struct bpf_tunnel_key',
+ 'struct bpf_xfrm_state',
+ 'struct pt_regs',
+ 'struct sk_reuseport_md',
+ 'struct sockaddr',
+ 'struct tcphdr',
+
+ 'struct __sk_buff',
+ 'struct sk_msg_md',
+ 'struct xdp_md',
+ ]
+ known_types = {
+ '...',
+ 'void',
+ 'const void',
+ 'char',
+ 'const char',
+ 'int',
+ 'long',
+ 'unsigned long',
+
+ '__be16',
+ '__be32',
+ '__wsum',
+
+ 'struct bpf_fib_lookup',
+ 'struct bpf_perf_event_data',
+ 'struct bpf_perf_event_value',
+ 'struct bpf_sock',
+ 'struct bpf_sock_addr',
+ 'struct bpf_sock_ops',
+ 'struct bpf_sock_tuple',
+ 'struct bpf_spin_lock',
+ 'struct bpf_sysctl',
+ 'struct bpf_tcp_sock',
+ 'struct bpf_tunnel_key',
+ 'struct bpf_xfrm_state',
+ 'struct pt_regs',
+ 'struct sk_reuseport_md',
+ 'struct sockaddr',
+ 'struct tcphdr',
+ }
+ mapped_types = {
+ 'u8': '__u8',
+ 'u16': '__u16',
+ 'u32': '__u32',
+ 'u64': '__u64',
+ 's8': '__s8',
+ 's16': '__s16',
+ 's32': '__s32',
+ 's64': '__s64',
+ 'size_t': 'unsigned long',
+ 'struct bpf_map': 'void',
+ 'struct sk_buff': 'struct __sk_buff',
+ 'const struct sk_buff': 'const struct __sk_buff',
+ 'struct sk_msg_buff': 'struct sk_msg_md',
+ 'struct xdp_buff': 'struct xdp_md',
+ }
+
+ def print_header(self):
+ header = '''\
+/* This is auto-generated file. See bpf_helpers_doc.py for details. */
+
+/* Forward declarations of BPF structs */'''
+
+ print(header)
+ for fwd in self.type_fwds:
+ print('%s;' % fwd)
+ print('')
+
+ def print_footer(self):
+ footer = ''
+ print(footer)
+
+ def map_type(self, t):
+ if t in self.known_types:
+ return t
+ if t in self.mapped_types:
+ return self.mapped_types[t]
+ print("Unrecognized type '%s', please add it to known types!" % t,
+ file=sys.stderr)
+ sys.exit(1)
+
+ seen_helpers = set()
+
+ def print_one(self, helper):
+ proto = helper.proto_break_down()
+
+ if proto['name'] in self.seen_helpers:
+ return
+ self.seen_helpers.add(proto['name'])
+
+ print('/*')
+ print(" * %s" % proto['name'])
+ print(" *")
+ if (helper.desc):
+ # Do not strip all newline characters: formatted code at the end of
+ # a section must be followed by a blank line.
+ for line in re.sub('\n$', '', helper.desc, count=1).split('\n'):
+ print(' *{}{}'.format(' \t' if line else '', line))
+
+ if (helper.ret):
+ print(' *')
+ print(' * Returns')
+ for line in helper.ret.rstrip().split('\n'):
+ print(' *{}{}'.format(' \t' if line else '', line))
+
+ print(' */')
+ print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
+ proto['ret_star'], proto['name']), end='')
+ comma = ''
+ for i, a in enumerate(proto['args']):
+ t = a['type']
+ n = a['name']
+ if proto['name'] == 'bpf_get_socket_cookie' and i == 0:
+ t = 'void'
+ n = 'ctx'
+ one_arg = '{}{}'.format(comma, self.map_type(t))
+ if n:
+ if a['star']:
+ one_arg += ' {}'.format(a['star'])
+ else:
+ one_arg += ' '
+ one_arg += '{}'.format(n)
+ comma = ', '
+ print(one_arg, end='')
+
+ print(') = (void *) %d;' % len(self.seen_helpers))
+ print('')
+
###############################################################################
# If script is launched from scripts/ from kernel tree and can access
@@ -405,6 +553,8 @@ Parse eBPF header file and generate documentation for eBPF helper functions.
The RST-formatted output produced can be turned into a manual page with the
rst2man utility.
""")
+argParser.add_argument('--header', action='store_true',
+ help='generate C header file')
if (os.path.isfile(bpfh)):
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h',
default=bpfh)
@@ -417,5 +567,8 @@ headerParser = HeaderParser(args.filename)
headerParser.run()
# Print formatted output to standard output.
-printer = PrinterRST(headerParser.helpers)
+if args.header:
+ printer = PrinterHelpers(headerParser.helpers)
+else:
+ printer = PrinterRST(headerParser.helpers)
printer.print_all()
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6fcc66afb088..64890be3c8fd 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6015,14 +6015,18 @@ sub process {
for (my $count = $linenr; $count <= $lc; $count++) {
my $specifier;
my $extension;
+ my $qualifier;
my $bad_specifier = "";
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
- while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
+ while ($fmt =~ /(\%[\*\d\.]*p(\w)(\w*))/g) {
$specifier = $1;
$extension = $2;
- if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
+ $qualifier = $3;
+ if ($extension !~ /[SsBKRraEehMmIiUDdgVCbGNOxtf]/ ||
+ ($extension eq "f" &&
+ defined $qualifier && $qualifier !~ /^w/)) {
$bad_specifier = $specifier;
last;
}
@@ -6039,7 +6043,6 @@ sub process {
my $ext_type = "Invalid";
my $use = "";
if ($bad_specifier =~ /p[Ff]/) {
- $ext_type = "Deprecated";
$use = " - use %pS instead";
$use =~ s/pS/ps/ if ($bad_specifier =~ /pf/);
}
diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
index 97a2c844a95e..45e8aa360b45 100755
--- a/scripts/tools-support-relr.sh
+++ b/scripts/tools-support-relr.sh
@@ -4,13 +4,13 @@
tmp_file=$(mktemp)
trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
-cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
+cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
void *p = &p;
END
-"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
# Despite printing an error message, GNU nm still exits with exit code 0 if it
# sees a relr section. So we need to check that nothing is printed to stderr.
-test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"
+test -z "$($NM $tmp_file 2>&1 >/dev/null)"
-"$OBJCOPY" -O binary $tmp_file $tmp_file.bin
+$OBJCOPY -O binary $tmp_file $tmp_file.bin
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 9cef54064f60..074f27538f55 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -28,5 +28,5 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
# Key types
#
obj-$(CONFIG_BIG_KEYS) += big_key.o
-obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted-keys/
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/trusted-keys/Makefile b/security/keys/trusted-keys/Makefile
new file mode 100644
index 000000000000..7b73cebbb378
--- /dev/null
+++ b/security/keys/trusted-keys/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for trusted keys
+#
+
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_tpm1.o
+trusted-y += trusted_tpm2.o
diff --git a/security/keys/trusted.c b/security/keys/trusted-keys/trusted_tpm1.c
index 1fbd77816610..d2c5ec1e040b 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -27,7 +27,7 @@
#include <linux/tpm.h>
#include <linux/tpm_command.h>
-#include <keys/trusted.h>
+#include <keys/trusted_tpm.h>
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
@@ -406,13 +406,10 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
if (ret != TPM_NONCE_SIZE)
return ret;
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_OSAP_SIZE);
- store32(tb, TPM_ORD_OSAP);
- store16(tb, type);
- store32(tb, handle);
- storebytes(tb, ononce, TPM_NONCE_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ tpm_buf_append_u16(tb, type);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append(tb, ononce, TPM_NONCE_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
@@ -437,10 +434,7 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
if (!chip)
return -ENODEV;
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_OIAP_SIZE);
- store32(tb, TPM_ORD_OIAP);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OIAP);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -535,20 +529,17 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
goto out;
/* build and send the TPM request packet */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
- store32(tb, TPM_ORD_SEAL);
- store32(tb, keyhandle);
- storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
- store32(tb, pcrinfosize);
- storebytes(tb, pcrinfo, pcrinfosize);
- store32(tb, datalen);
- storebytes(tb, data, datalen);
- store32(tb, sess.handle);
- storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, td->encauth, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, pcrinfosize);
+ tpm_buf_append(tb, pcrinfo, pcrinfosize);
+ tpm_buf_append_u32(tb, datalen);
+ tpm_buf_append(tb, data, datalen);
+ tpm_buf_append_u32(tb, sess.handle);
+ tpm_buf_append(tb, td->nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, td->pubauth, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
@@ -594,7 +585,6 @@ static int tpm_unseal(struct tpm_buf *tb,
uint32_t authhandle2 = 0;
unsigned char cont = 0;
uint32_t ordinal;
- uint32_t keyhndl;
int ret;
/* sessions for unsealing key and data */
@@ -610,7 +600,6 @@ static int tpm_unseal(struct tpm_buf *tb,
}
ordinal = htonl(TPM_ORD_UNSEAL);
- keyhndl = htonl(SRKHANDLE);
ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
@@ -628,20 +617,17 @@ static int tpm_unseal(struct tpm_buf *tb,
return ret;
/* build and send TPM request packet */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
- store32(tb, TPM_UNSEAL_SIZE + bloblen);
- store32(tb, TPM_ORD_UNSEAL);
- store32(tb, keyhandle);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle1);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
- store32(tb, authhandle2);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH2_COMMAND, TPM_ORD_UNSEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle1);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata1, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, authhandle2);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata2, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -670,23 +656,23 @@ static int tpm_unseal(struct tpm_buf *tb,
static int key_seal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
int ret;
- tb = kzalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
/* include migratable flag at end of sealed key */
p->key[p->key_len] = p->migratable;
- ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
+ ret = tpm_seal(&tb, o->keytype, o->keyhandle, o->keyauth,
p->key, p->key_len + 1, p->blob, &p->blob_len,
o->blobauth, o->pcrinfo, o->pcrinfo_len);
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kzfree(tb);
+ tpm_buf_destroy(&tb);
return ret;
}
@@ -696,14 +682,14 @@ static int key_seal(struct trusted_key_payload *p,
static int key_unseal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
int ret;
- tb = kzalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
- ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+ ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
o->blobauth, p->key, &p->key_len);
if (ret < 0)
pr_info("trusted_key: srkunseal failed (%d)\n", ret);
@@ -711,7 +697,7 @@ static int key_unseal(struct trusted_key_payload *p,
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kzfree(tb);
+ tpm_buf_destroy(&tb);
return ret;
}
@@ -1016,7 +1002,7 @@ static int trusted_instantiate(struct key *key,
switch (key_cmd) {
case Opt_load:
if (tpm2)
- ret = tpm_unseal_trusted(chip, payload, options);
+ ret = tpm2_unseal_trusted(chip, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
@@ -1032,7 +1018,7 @@ static int trusted_instantiate(struct key *key,
goto out;
}
if (tpm2)
- ret = tpm_seal_trusted(chip, payload, options);
+ ret = tpm2_seal_trusted(chip, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
new file mode 100644
index 000000000000..a9810ac2776f
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ */
+
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted-type.h>
+#include <keys/trusted_tpm.h>
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+/**
+ * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+ const u8 *nonce, u16 nonce_len,
+ u8 attributes,
+ const u8 *hmac, u16 hmac_len)
+{
+ tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+ tpm_buf_append_u32(buf, session_handle);
+ tpm_buf_append_u16(buf, nonce_len);
+
+ if (nonce && nonce_len)
+ tpm_buf_append(buf, nonce, nonce_len);
+
+ tpm_buf_append_u8(buf, attributes);
+ tpm_buf_append_u16(buf, hmac_len);
+
+ if (hmac && hmac_len)
+ tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ unsigned int blob_len;
+ struct tpm_buf buf;
+ u32 hash;
+ int i;
+ int rc;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ if (options->hash == tpm2_hash_map[i].crypto_id) {
+ hash = tpm2_hash_map[i].tpm_id;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(tpm2_hash_map))
+ return -EINVAL;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ /* sensitive */
+ tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
+
+ tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+ tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+ tpm_buf_append_u16(&buf, payload->key_len + 1);
+ tpm_buf_append(&buf, payload->key, payload->key_len);
+ tpm_buf_append_u8(&buf, payload->migratable);
+
+ /* public */
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, hash);
+
+ /* policy */
+ if (options->policydigest_len) {
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
+ tpm_buf_append(&buf, options->policydigest,
+ options->policydigest_len);
+ } else {
+ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+ tpm_buf_append_u16(&buf, 0);
+ }
+
+ /* public parameters */
+ tpm_buf_append_u16(&buf, TPM_ALG_NULL);
+ tpm_buf_append_u16(&buf, 0);
+
+ /* outside info */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR */
+ tpm_buf_append_u32(&buf, 0);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (rc)
+ goto out;
+
+ blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ if (blob_len > MAX_BLOB_SIZE) {
+ rc = -E2BIG;
+ goto out;
+ }
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
+ payload->blob_len = blob_len;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0) {
+ if (tpm2_rc_value(rc) == TPM2_RC_HASH)
+ rc = -EINVAL;
+ else
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ *
+ * Return: 0 on success.
+ * -E2BIG on wrong payload size.
+ * -EPERM on tpm error status.
+ * < 0 error from tpm_send.
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle)
+{
+ struct tpm_buf buf;
+ unsigned int private_len;
+ unsigned int public_len;
+ unsigned int blob_len;
+ int rc;
+
+ private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
+ if (private_len > (payload->blob_len - 2))
+ return -E2BIG;
+
+ public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+ blob_len = private_len + public_len + 4;
+ if (blob_len > payload->blob_len)
+ return -E2BIG;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ tpm_buf_append(&buf, payload->blob, blob_len);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (!rc)
+ *blob_handle = be32_to_cpup(
+ (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: blob handle
+ *
+ * Return: 0 on success
+ * -EPERM on tpm error status
+ * < 0 error from tpm_send
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle)
+{
+ struct tpm_buf buf;
+ u16 data_len;
+ u8 *data;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, blob_handle);
+ tpm2_buf_append_auth(&buf,
+ options->policyhandle ?
+ options->policyhandle : TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ TPM2_SA_CONTINUE_SESSION,
+ options->blobauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (rc > 0)
+ rc = -EPERM;
+
+ if (!rc) {
+ data_len = be16_to_cpup(
+ (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+ data = &buf.data[TPM_HEADER_SIZE + 6];
+
+ memcpy(payload->key, data, data_len - 1);
+ payload->key_len = data_len - 1;
+ payload->migratable = data[data_len - 1];
+ }
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: Same as with tpm_send.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ u32 blob_handle;
+ int rc;
+
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
+ if (rc)
+ return rc;
+
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+
+ return rc;
+}
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
index 74a13d432ed8..f8bc574cea9c 100644
--- a/security/safesetid/securityfs.c
+++ b/security/safesetid/securityfs.c
@@ -179,8 +179,8 @@ out_free_rule:
* doesn't currently exist, just use a spinlock for now.
*/
mutex_lock(&policy_update_lock);
- rcu_swap_protected(safesetid_setuid_rules, pol,
- lockdep_is_held(&policy_update_lock));
+ pol = rcu_replace_pointer(safesetid_setuid_rules, pol,
+ lockdep_is_held(&policy_update_lock));
mutex_unlock(&policy_update_lock);
err = len;
diff --git a/security/security.c b/security/security.c
index 1bc000f834e2..cd2d18d2d279 100644
--- a/security/security.c
+++ b/security/security.c
@@ -2404,3 +2404,30 @@ int security_locked_down(enum lockdown_reason what)
return call_int_hook(locked_down, 0, what);
}
EXPORT_SYMBOL(security_locked_down);
+
+#ifdef CONFIG_PERF_EVENTS
+int security_perf_event_open(struct perf_event_attr *attr, int type)
+{
+ return call_int_hook(perf_event_open, 0, attr, type);
+}
+
+int security_perf_event_alloc(struct perf_event *event)
+{
+ return call_int_hook(perf_event_alloc, 0, event);
+}
+
+void security_perf_event_free(struct perf_event *event)
+{
+ call_void_hook(perf_event_free, event);
+}
+
+int security_perf_event_read(struct perf_event *event)
+{
+ return call_int_hook(perf_event_read, 0, event);
+}
+
+int security_perf_event_write(struct perf_event *event)
+{
+ return call_int_hook(perf_event_write, 0, event);
+}
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 9625b99e677f..28eb05490d59 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6795,6 +6795,67 @@ struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
.lbs_msg_msg = sizeof(struct msg_security_struct),
};
+#ifdef CONFIG_PERF_EVENTS
+static int selinux_perf_event_open(struct perf_event_attr *attr, int type)
+{
+ u32 requested, sid = current_sid();
+
+ if (type == PERF_SECURITY_OPEN)
+ requested = PERF_EVENT__OPEN;
+ else if (type == PERF_SECURITY_CPU)
+ requested = PERF_EVENT__CPU;
+ else if (type == PERF_SECURITY_KERNEL)
+ requested = PERF_EVENT__KERNEL;
+ else if (type == PERF_SECURITY_TRACEPOINT)
+ requested = PERF_EVENT__TRACEPOINT;
+ else
+ return -EINVAL;
+
+ return avc_has_perm(&selinux_state, sid, sid, SECCLASS_PERF_EVENT,
+ requested, NULL);
+}
+
+static int selinux_perf_event_alloc(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec;
+
+ perfsec = kzalloc(sizeof(*perfsec), GFP_KERNEL);
+ if (!perfsec)
+ return -ENOMEM;
+
+ perfsec->sid = current_sid();
+ event->security = perfsec;
+
+ return 0;
+}
+
+static void selinux_perf_event_free(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+
+ event->security = NULL;
+ kfree(perfsec);
+}
+
+static int selinux_perf_event_read(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
+ SECCLASS_PERF_EVENT, PERF_EVENT__READ, NULL);
+}
+
+static int selinux_perf_event_write(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
+ SECCLASS_PERF_EVENT, PERF_EVENT__WRITE, NULL);
+}
+#endif
+
static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
@@ -7030,6 +7091,14 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
#endif
+
+#ifdef CONFIG_PERF_EVENTS
+ LSM_HOOK_INIT(perf_event_open, selinux_perf_event_open),
+ LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
+ LSM_HOOK_INIT(perf_event_free, selinux_perf_event_free),
+ LSM_HOOK_INIT(perf_event_read, selinux_perf_event_read),
+ LSM_HOOK_INIT(perf_event_write, selinux_perf_event_write),
+#endif
};
static __init int selinux_init(void)
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 32e9b03be3dd..7db24855e12d 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -244,6 +244,8 @@ struct security_class_mapping secclass_map[] = {
{"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
{ "xdp_socket",
{ COMMON_SOCK_PERMS, NULL } },
+ { "perf_event",
+ {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
{ NULL }
};
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 586b7abd0aa7..a4a86cbcfb0a 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -141,7 +141,11 @@ struct pkey_security_struct {
};
struct bpf_security_struct {
- u32 sid; /*SID of bpf obj creater*/
+ u32 sid; /* SID of bpf obj creator */
+};
+
+struct perf_event_security_struct {
+ u32 sid; /* SID of perf_event obj creator */
};
extern struct lsm_blob_sizes selinux_blob_sizes;
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 58345ba0528e..c97fdae8f71b 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -83,6 +83,8 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWLINKPROP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELLINKPROP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -166,7 +168,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
* structures at the top of this file with the new mappings
* before updating the BUILD_BUG_ON() macro!
*/
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWNEXTHOP + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWLINKPROP + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/sound/aoa/soundbus/i2sbus/pcm.c b/sound/aoa/soundbus/i2sbus/pcm.c
index 7f0754dd3d7d..a94e4023fadf 100644
--- a/sound/aoa/soundbus/i2sbus/pcm.c
+++ b/sound/aoa/soundbus/i2sbus/pcm.c
@@ -1028,7 +1028,7 @@ i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card,
/* well, we really should support scatter/gather DMA */
snd_pcm_lib_preallocate_pages_for_all(
dev->pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(macio_get_pci_dev(i2sdev->macio)),
+ &macio_get_pci_dev(i2sdev->macio)->dev,
64 * 1024, 64 * 1024);
return 0;
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 54500bd098f9..a86c95d89824 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -175,7 +175,15 @@ void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
EXPORT_SYMBOL(pxa2xx_pcm_free_dma_buffers);
-int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
+{
+ pxa2xx_pcm_free_dma_buffers(pcm);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_free);
+
+int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -203,18 +211,64 @@ int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
EXPORT_SYMBOL(pxa2xx_soc_pcm_new);
-const struct snd_pcm_ops pxa2xx_pcm_ops = {
- .open = pxa2xx_pcm_open,
- .close = pxa2xx_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pxa2xx_pcm_hw_params,
- .hw_free = pxa2xx_pcm_hw_free,
- .prepare = pxa2xx_pcm_prepare,
- .trigger = pxa2xx_pcm_trigger,
- .pointer = pxa2xx_pcm_pointer,
- .mmap = pxa2xx_pcm_mmap,
-};
-EXPORT_SYMBOL(pxa2xx_pcm_ops);
+int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_open(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_open);
+
+int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_close(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_close);
+
+int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ return pxa2xx_pcm_hw_params(substream, params);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_hw_params);
+
+int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_hw_free(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_hw_free);
+
+int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_prepare(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_prepare);
+
+int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ return pxa2xx_pcm_trigger(substream, cmd);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_trigger);
+
+snd_pcm_uframes_t
+pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_pointer(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_pointer);
+
+int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ return pxa2xx_pcm_mmap(substream, vma);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_mmap);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("Intel PXA2xx sound library");
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 4ee79ad6ae22..4044c42d8595 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -72,11 +72,11 @@ config SND_PCM_OSS
config SND_PCM_OSS_PLUGINS
bool "OSS PCM (digital audio) API - Include plugin system"
depends on SND_PCM_OSS
- default y
+ default y
help
- If you disable this option, the ALSA's OSS PCM API will not
- support conversion of channels, formats and rates. It will
- behave like most of new OSS/Free drivers in 2.4/2.6 kernels.
+ If you disable this option, the ALSA's OSS PCM API will not
+ support conversion of channels, formats and rates. It will
+ behave like most of new OSS/Free drivers in 2.4/2.6 kernels.
config SND_PCM_TIMER
bool "PCM timer interface" if EXPERT
@@ -128,13 +128,13 @@ config SND_SUPPORT_OLD_API
or older).
config SND_PROC_FS
- bool "Sound Proc FS Support" if EXPERT
- depends on PROC_FS
- default y
- help
- Say 'N' to disable Sound proc FS, which may reduce code size about
- 9KB on x86_64 platform.
- If unsure say Y.
+ bool "Sound Proc FS Support" if EXPERT
+ depends on PROC_FS
+ default y
+ help
+ Say 'N' to disable Sound proc FS, which may reduce code size about
+ 9KB on x86_64 platform.
+ If unsure say Y.
config SND_VERBOSE_PROCFS
bool "Verbose procfs contents"
@@ -142,8 +142,8 @@ config SND_VERBOSE_PROCFS
default y
help
Say Y here to include code for verbose procfs contents (provides
- useful information to developers when a problem occurs). On the
- other side, it makes the ALSA subsystem larger.
+ useful information to developers when a problem occurs). On the
+ other side, it makes the ALSA subsystem larger.
config SND_VERBOSE_PRINTK
bool "Verbose printk"
@@ -164,7 +164,7 @@ config SND_DEBUG_VERBOSE
depends on SND_DEBUG
help
Say Y here to enable extra-verbose debugging messages.
-
+
Let me repeat: it enables EXTRA-VERBOSE DEBUGGING messages.
So, say Y only if you are ready to be annoyed.
diff --git a/sound/core/init.c b/sound/core/init.c
index db99b7fad6ad..faa9f03c01ca 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -215,6 +215,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
init_waitqueue_head(&card->power_sleep);
#endif
init_waitqueue_head(&card->remove_sleep);
+ card->sync_irq = -1;
device_initialize(&card->card_dev);
card->card_dev.parent = parent;
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 6850d13aa98c..a83553fbedf0 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
@@ -99,6 +100,14 @@ static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
*
*/
+static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
+ gfp_t default_gfp)
+{
+ if (!dev)
+ return default_gfp;
+ else
+ return (__force gfp_t)(unsigned long)dev;
+}
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
@@ -116,20 +125,25 @@ static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
+ gfp_t gfp;
+
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
- if (WARN_ON(!device))
- return -EINVAL;
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
- dmab->area = alloc_pages_exact(size,
- (__force gfp_t)(unsigned long)device);
+ gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
+ dmab->area = alloc_pages_exact(size, gfp);
+ dmab->addr = 0;
+ break;
+ case SNDRV_DMA_TYPE_VMALLOC:
+ gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
+ dmab->area = __vmalloc(size, gfp, PAGE_KERNEL);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
@@ -215,6 +229,9 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
case SNDRV_DMA_TYPE_CONTINUOUS:
free_pages_exact(dmab->area, dmab->bytes);
break;
+ case SNDRV_DMA_TYPE_VMALLOC:
+ vfree(dmab->area);
+ break;
#ifdef CONFIG_HAS_DMA
#ifdef CONFIG_GENERIC_ALLOCATOR
case SNDRV_DMA_TYPE_DEV_IRAM:
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 89a05926ac73..5749a8a49784 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -369,4 +369,87 @@ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
MODULE_LICENSE("GPL");
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index d80041ea4e01..2236b5e0c1f2 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1782,11 +1782,14 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
unsigned long flags;
- if (PCM_RUNTIME_CHECK(substream))
+ if (snd_BUG_ON(!substream))
return;
- runtime = substream->runtime;
snd_pcm_stream_lock_irqsave(substream, flags);
+ if (PCM_RUNTIME_CHECK(substream))
+ goto _unlock;
+ runtime = substream->runtime;
+
if (!snd_pcm_running(substream) ||
snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
@@ -1797,6 +1800,7 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
#endif
_end:
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ _unlock:
snd_pcm_stream_unlock_irqrestore(substream, flags);
}
EXPORT_SYMBOL(snd_pcm_period_elapsed);
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index 1161ab2d6a5b..384efd002984 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -67,4 +67,11 @@ static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
void __snd_pcm_xrun(struct snd_pcm_substream *substream);
void snd_pcm_group_init(struct snd_pcm_group *group);
+#ifdef CONFIG_SND_DMA_SGBUF
+struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
+ unsigned long offset);
+#endif
+
+#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
+
#endif /* __SOUND_CORE_PCM_LOCAL_H */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 7600dcdf5fd4..d4702cc1d376 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -15,6 +15,7 @@
#include <sound/pcm.h>
#include <sound/info.h>
#include <sound/initval.h>
+#include "pcm_local.h"
static int preallocate_dma = 1;
module_param(preallocate_dma, int, 0444);
@@ -193,9 +194,15 @@ static inline void preallocate_info_init(struct snd_pcm_substream *substream)
/*
* pre-allocate the buffer and create a proc file for the substream
*/
-static void snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
- size_t size, size_t max)
+static void preallocate_pages(struct snd_pcm_substream *substream,
+ int type, struct device *data,
+ size_t size, size_t max, bool managed)
{
+ if (snd_BUG_ON(substream->dma_buffer.dev.type))
+ return;
+
+ substream->dma_buffer.dev.type = type;
+ substream->dma_buffer.dev.dev = data;
if (size > 0 && preallocate_dma && substream->number < maximum_substreams)
preallocate_pcm_pages(substream, size);
@@ -203,9 +210,25 @@ static void snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
if (substream->dma_buffer.bytes > 0)
substream->buffer_bytes_max = substream->dma_buffer.bytes;
substream->dma_max = max;
- preallocate_info_init(substream);
+ if (max > 0)
+ preallocate_info_init(substream);
+ if (managed)
+ substream->managed_buffer_alloc = 1;
}
+static void preallocate_pages_for_all(struct snd_pcm *pcm, int type,
+ void *data, size_t size, size_t max,
+ bool managed)
+{
+ struct snd_pcm_substream *substream;
+ int stream;
+
+ for (stream = 0; stream < 2; stream++)
+ for (substream = pcm->streams[stream].substream; substream;
+ substream = substream->next)
+ preallocate_pages(substream, type, data, size, max,
+ managed);
+}
/**
* snd_pcm_lib_preallocate_pages - pre-allocation for the given DMA type
@@ -221,9 +244,7 @@ void snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
int type, struct device *data,
size_t size, size_t max)
{
- substream->dma_buffer.dev.type = type;
- substream->dma_buffer.dev.dev = data;
- snd_pcm_lib_preallocate_pages1(substream, size, max);
+ preallocate_pages(substream, type, data, size, max, false);
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages);
@@ -242,17 +263,57 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int type, void *data,
size_t size, size_t max)
{
- struct snd_pcm_substream *substream;
- int stream;
-
- for (stream = 0; stream < 2; stream++)
- for (substream = pcm->streams[stream].substream; substream; substream = substream->next)
- snd_pcm_lib_preallocate_pages(substream, type, data, size, max);
+ preallocate_pages_for_all(pcm, type, data, size, max, false);
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
-#ifdef CONFIG_SND_DMA_SGBUF
/**
+ * snd_pcm_set_managed_buffer - set up buffer management for a substream
+ * @substream: the pcm substream instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ * @max: the max. allowed pre-allocation size
+ *
+ * Do pre-allocation for the given DMA buffer type, and set the managed
+ * buffer allocation mode to the given substream.
+ * In this mode, PCM core will allocate a buffer automatically before PCM
+ * hw_params ops call, and release the buffer after PCM hw_free ops call
+ * as well, so that the driver doesn't need to invoke the allocation and
+ * the release explicitly in its callback.
+ * When a buffer is actually allocated before the PCM hw_params call, it
+ * turns on the runtime buffer_changed flag for drivers changing their h/w
+ * parameters accordingly.
+ */
+void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max)
+{
+ preallocate_pages(substream, type, data, size, max, true);
+}
+EXPORT_SYMBOL(snd_pcm_set_managed_buffer);
+
+/**
+ * snd_pcm_set_managed_buffer_all - set up buffer management for all substreams
+ * for all substreams
+ * @pcm: the pcm instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ * @max: the max. allowed pre-allocation size
+ *
+ * Do pre-allocation to all substreams of the given pcm for the specified DMA
+ * type and size, and set the managed_buffer_alloc flag to each substream.
+ */
+void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max)
+{
+ preallocate_pages_for_all(pcm, type, data, size, max, true);
+}
+EXPORT_SYMBOL(snd_pcm_set_managed_buffer_all);
+
+#ifdef CONFIG_SND_DMA_SGBUF
+/*
* snd_pcm_sgbuf_ops_page - get the page struct at the given offset
* @substream: the pcm substream instance
* @offset: the buffer offset
@@ -270,7 +331,6 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigne
return NULL;
return sgbuf->page_table[idx];
}
-EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
#endif /* CONFIG_SND_DMA_SGBUF */
/**
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 91c6ad58729f..1fe581167b7b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -13,6 +13,7 @@
#include <linux/pm_qos.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -177,6 +178,16 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
+/* Run PCM ioctl ops */
+static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
+ unsigned cmd, void *arg)
+{
+ if (substream->ops->ioctl)
+ return substream->ops->ioctl(substream, cmd, arg);
+ else
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
struct snd_pcm *pcm = substream->pcm;
@@ -222,7 +233,8 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
return false;
if (substream->ops->mmap ||
- substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV)
+ (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
+ substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
return true;
return dma_can_mmap(substream->dma_buffer.dev.dev);
@@ -446,8 +458,9 @@ static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (snd_mask_single(m) && snd_interval_single(i)) {
- err = substream->ops->ioctl(substream,
- SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
+ err = snd_pcm_ops_ioctl(substream,
+ SNDRV_PCM_IOCTL1_FIFO_SIZE,
+ params);
if (err < 0)
return err;
}
@@ -555,6 +568,17 @@ static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
#endif
}
+static void snd_pcm_sync_stop(struct snd_pcm_substream *substream)
+{
+ if (substream->runtime->stop_operating) {
+ substream->runtime->stop_operating = false;
+ if (substream->ops->sync_stop)
+ substream->ops->sync_stop(substream);
+ else if (substream->pcm->card->sync_irq > 0)
+ synchronize_irq(substream->pcm->card->sync_irq);
+ }
+}
+
/**
* snd_pcm_hw_param_choose - choose a configuration defined by @params
* @pcm: PCM instance
@@ -647,6 +671,8 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
if (atomic_read(&substream->mmap_count))
return -EBADFD;
+ snd_pcm_sync_stop(substream);
+
params->rmask = ~0U;
err = snd_pcm_hw_refine(substream, params);
if (err < 0)
@@ -660,6 +686,14 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
goto _error;
+ if (substream->managed_buffer_alloc) {
+ err = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(params));
+ if (err < 0)
+ goto _error;
+ runtime->buffer_changed = err > 0;
+ }
+
if (substream->ops->hw_params != NULL) {
err = substream->ops->hw_params(substream, params);
if (err < 0)
@@ -721,6 +755,8 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
if (substream->ops->hw_free != NULL)
substream->ops->hw_free(substream);
+ if (substream->managed_buffer_alloc)
+ snd_pcm_lib_free_pages(substream);
return err;
}
@@ -765,8 +801,11 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
snd_pcm_stream_unlock_irq(substream);
if (atomic_read(&substream->mmap_count))
return -EBADFD;
+ snd_pcm_sync_stop(substream);
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
+ if (substream->managed_buffer_alloc)
+ snd_pcm_lib_free_pages(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
pm_qos_remove_request(&substream->latency_pm_qos_req);
return result;
@@ -957,7 +996,7 @@ static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
return -EINVAL;
memset(info, 0, sizeof(*info));
info->channel = channel;
- return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
+ return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
}
static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
@@ -1288,6 +1327,7 @@ static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
runtime->status->state = state;
snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
}
+ runtime->stop_operating = true;
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
}
@@ -1564,6 +1604,7 @@ static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
snd_pcm_trigger_tstamp(substream);
runtime->status->state = runtime->status->suspended_state;
snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
+ snd_pcm_sync_stop(substream);
}
static const struct action_ops snd_pcm_action_resume = {
@@ -1633,7 +1674,7 @@ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
+ int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
if (err < 0)
return err;
runtime->hw_ptr_base = 0;
@@ -1684,6 +1725,7 @@ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
{
int err;
+ snd_pcm_sync_stop(substream);
err = substream->ops->prepare(substream);
if (err < 0)
return err;
@@ -3334,7 +3376,18 @@ static inline struct page *
snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
{
void *vaddr = substream->runtime->dma_area + ofs;
- return virt_to_page(vaddr);
+
+ switch (substream->dma_buffer.dev.type) {
+#ifdef CONFIG_SND_DMA_SGBUF
+ case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
+ return snd_pcm_sgbuf_ops_page(substream, ofs);
+#endif /* CONFIG_SND_DMA_SGBUF */
+ case SNDRV_DMA_TYPE_VMALLOC:
+ return vmalloc_to_page(vaddr);
+ default:
+ return virt_to_page(vaddr);
+ }
}
/*
@@ -3403,7 +3456,8 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
#endif /* CONFIG_GENERIC_ALLOCATOR */
#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
+ (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
+ substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
substream->runtime->dma_area,
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 161f3170bd7e..63dc7bdb622d 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -272,7 +272,13 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
return -EINVAL;
if (tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tmr->alsa_id.dev_sclass = SNDRV_TIMER_SCLASS_SEQUENCER;
- err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue);
+ t = snd_timer_instance_new(str);
+ if (!t)
+ return -ENOMEM;
+ t->callback = snd_seq_timer_interrupt;
+ t->callback_data = q;
+ t->flags |= SNDRV_TIMER_IFLG_AUTO;
+ err = snd_timer_open(t, &tmr->alsa_id, q->queue);
if (err < 0 && tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_SLAVE) {
if (tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_GLOBAL ||
tmr->alsa_id.device != SNDRV_TIMER_GLOBAL_SYSTEM) {
@@ -282,16 +288,14 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
tid.dev_sclass = SNDRV_TIMER_SCLASS_SEQUENCER;
tid.card = -1;
tid.device = SNDRV_TIMER_GLOBAL_SYSTEM;
- err = snd_timer_open(&t, str, &tid, q->queue);
+ err = snd_timer_open(t, &tid, q->queue);
}
}
if (err < 0) {
pr_err("ALSA: seq fatal error: cannot create timer (%i)\n", err);
+ snd_timer_instance_free(t);
return err;
}
- t->callback = snd_seq_timer_interrupt;
- t->callback_data = q;
- t->flags |= SNDRV_TIMER_IFLG_AUTO;
spin_lock_irq(&tmr->lock);
tmr->timeri = t;
spin_unlock_irq(&tmr->lock);
@@ -310,8 +314,10 @@ int snd_seq_timer_close(struct snd_seq_queue *q)
t = tmr->timeri;
tmr->timeri = NULL;
spin_unlock_irq(&tmr->lock);
- if (t)
+ if (t) {
snd_timer_close(t);
+ snd_timer_instance_free(t);
+ }
return 0;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 59ae21b0bb93..24fed5c78273 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -74,6 +74,9 @@ static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
+#define MAX_SLAVE_INSTANCES 1000
+static int num_slaves;
+
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
@@ -85,12 +88,11 @@ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_l
/*
* create a timer instance with the given owner string.
- * when timer is not NULL, increments the module counter
*/
-static struct snd_timer_instance *snd_timer_instance_new(char *owner,
- struct snd_timer *timer)
+struct snd_timer_instance *snd_timer_instance_new(const char *owner)
{
struct snd_timer_instance *timeri;
+
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
@@ -105,15 +107,20 @@ static struct snd_timer_instance *snd_timer_instance_new(char *owner,
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
- timeri->timer = timer;
- if (timer && !try_module_get(timer->module)) {
+ return timeri;
+}
+EXPORT_SYMBOL(snd_timer_instance_new);
+
+void snd_timer_instance_free(struct snd_timer_instance *timeri)
+{
+ if (timeri) {
+ if (timeri->private_free)
+ timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
- return NULL;
}
-
- return timeri;
}
+EXPORT_SYMBOL(snd_timer_instance_free);
/*
* find a timer instance from the given timer id
@@ -160,6 +167,28 @@ static void snd_timer_request(struct snd_timer_id *tid)
#endif
+/* move the slave if it belongs to the master; return 1 if match */
+static int check_matching_master_slave(struct snd_timer_instance *master,
+ struct snd_timer_instance *slave)
+{
+ if (slave->slave_class != master->slave_class ||
+ slave->slave_id != master->slave_id)
+ return 0;
+ if (master->timer->num_instances >= master->timer->max_instances)
+ return -EBUSY;
+ list_move_tail(&slave->open_list, &master->slave_list_head);
+ master->timer->num_instances++;
+ spin_lock_irq(&slave_active_lock);
+ spin_lock(&master->timer->lock);
+ slave->master = master;
+ slave->timer = master->timer;
+ if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
+ list_add_tail(&slave->active_list, &master->slave_active_head);
+ spin_unlock(&master->timer->lock);
+ spin_unlock_irq(&slave_active_lock);
+ return 1;
+}
+
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
@@ -170,27 +199,18 @@ static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
+ int err = 0;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
- if (slave->slave_class == master->slave_class &&
- slave->slave_id == master->slave_id) {
- if (master->timer->num_instances >=
- master->timer->max_instances)
- return -EBUSY;
- list_move_tail(&slave->open_list,
- &master->slave_list_head);
- master->timer->num_instances++;
- spin_lock_irq(&slave_active_lock);
- slave->master = master;
- slave->timer = master->timer;
- spin_unlock_irq(&slave_active_lock);
- return 0;
- }
+ err = check_matching_master_slave(master, slave);
+ if (err != 0) /* match found or error */
+ goto out;
}
}
- return 0;
+ out:
+ return err < 0 ? err : 0;
}
/*
@@ -202,43 +222,29 @@ static int snd_timer_check_slave(struct snd_timer_instance *slave)
static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
+ int err = 0;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
- if (slave->slave_class == master->slave_class &&
- slave->slave_id == master->slave_id) {
- if (master->timer->num_instances >=
- master->timer->max_instances)
- return -EBUSY;
- list_move_tail(&slave->open_list, &master->slave_list_head);
- master->timer->num_instances++;
- spin_lock_irq(&slave_active_lock);
- spin_lock(&master->timer->lock);
- slave->master = master;
- slave->timer = master->timer;
- if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
- list_add_tail(&slave->active_list,
- &master->slave_active_head);
- spin_unlock(&master->timer->lock);
- spin_unlock_irq(&slave_active_lock);
- }
+ err = check_matching_master_slave(master, slave);
+ if (err < 0)
+ break;
}
- return 0;
+ return err < 0 ? err : 0;
}
-static int snd_timer_close_locked(struct snd_timer_instance *timeri,
- struct device **card_devp_to_put);
+static void snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put);
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
-int snd_timer_open(struct snd_timer_instance **ti,
- char *owner, struct snd_timer_id *tid,
+int snd_timer_open(struct snd_timer_instance *timeri,
+ struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
- struct snd_timer_instance *timeri = NULL;
struct device *card_dev_to_put = NULL;
int err;
@@ -252,21 +258,17 @@ int snd_timer_open(struct snd_timer_instance **ti,
err = -EINVAL;
goto unlock;
}
- timeri = snd_timer_instance_new(owner, NULL);
- if (!timeri) {
- err = -ENOMEM;
+ if (num_slaves >= MAX_SLAVE_INSTANCES) {
+ err = -EBUSY;
goto unlock;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
+ num_slaves++;
err = snd_timer_check_slave(timeri);
- if (err < 0) {
- snd_timer_close_locked(timeri, &card_dev_to_put);
- timeri = NULL;
- }
- goto unlock;
+ goto list_added;
}
/* open a master instance */
@@ -296,45 +298,40 @@ int snd_timer_open(struct snd_timer_instance **ti,
err = -EBUSY;
goto unlock;
}
- timeri = snd_timer_instance_new(owner, timer);
- if (!timeri) {
- err = -ENOMEM;
+ if (!try_module_get(timer->module)) {
+ err = -EBUSY;
goto unlock;
}
/* take a card refcount for safe disconnection */
- if (timer->card)
+ if (timer->card) {
get_device(&timer->card->card_dev);
- timeri->slave_class = tid->dev_sclass;
- timeri->slave_id = slave_id;
+ card_dev_to_put = &timer->card->card_dev;
+ }
if (list_empty(&timer->open_list_head) && timer->hw.open) {
err = timer->hw.open(timer);
if (err) {
- kfree(timeri->owner);
- kfree(timeri);
- timeri = NULL;
-
- if (timer->card)
- card_dev_to_put = &timer->card->card_dev;
module_put(timer->module);
goto unlock;
}
}
+ timeri->timer = timer;
+ timeri->slave_class = tid->dev_sclass;
+ timeri->slave_id = slave_id;
+
list_add_tail(&timeri->open_list, &timer->open_list_head);
timer->num_instances++;
err = snd_timer_check_master(timeri);
- if (err < 0) {
+list_added:
+ if (err < 0)
snd_timer_close_locked(timeri, &card_dev_to_put);
- timeri = NULL;
- }
unlock:
mutex_unlock(&register_mutex);
/* put_device() is called after unlock for avoiding deadlock */
- if (card_dev_to_put)
+ if (err < 0 && card_dev_to_put)
put_device(card_dev_to_put);
- *ti = timeri;
return err;
}
EXPORT_SYMBOL(snd_timer_open);
@@ -343,8 +340,8 @@ EXPORT_SYMBOL(snd_timer_open);
* close a timer instance
* call this with register_mutex down.
*/
-static int snd_timer_close_locked(struct snd_timer_instance *timeri,
- struct device **card_devp_to_put)
+static void snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put)
{
struct snd_timer *timer = timeri->timer;
struct snd_timer_instance *slave, *tmp;
@@ -355,7 +352,11 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
spin_unlock_irq(&timer->lock);
}
- list_del(&timeri->open_list);
+ if (!list_empty(&timeri->open_list)) {
+ list_del_init(&timeri->open_list);
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ num_slaves--;
+ }
/* force to stop the timer */
snd_timer_stop(timeri);
@@ -374,6 +375,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
+ timeri->timer = NULL;
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
@@ -391,11 +393,6 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
timer = NULL;
}
- if (timeri->private_free)
- timeri->private_free(timeri);
- kfree(timeri->owner);
- kfree(timeri);
-
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
@@ -404,28 +401,24 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
*card_devp_to_put = &timer->card->card_dev;
module_put(timer->module);
}
-
- return 0;
}
/*
* close a timer instance
*/
-int snd_timer_close(struct snd_timer_instance *timeri)
+void snd_timer_close(struct snd_timer_instance *timeri)
{
struct device *card_dev_to_put = NULL;
- int err;
if (snd_BUG_ON(!timeri))
- return -ENXIO;
+ return;
mutex_lock(&register_mutex);
- err = snd_timer_close_locked(timeri, &card_dev_to_put);
+ snd_timer_close_locked(timeri, &card_dev_to_put);
mutex_unlock(&register_mutex);
/* put_device() is called after unlock for avoiding deadlock */
if (card_dev_to_put)
put_device(card_dev_to_put);
- return err;
}
EXPORT_SYMBOL(snd_timer_close);
@@ -1474,8 +1467,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
- if (tu->timeri)
+ if (tu->timeri) {
snd_timer_close(tu->timeri);
+ snd_timer_instance_free(tu->timeri);
+ }
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
@@ -1716,6 +1711,7 @@ static int snd_timer_user_tselect(struct file *file,
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
+ snd_timer_instance_free(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
@@ -1725,9 +1721,11 @@ static int snd_timer_user_tselect(struct file *file,
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
- err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
- if (err < 0)
+ tu->timeri = snd_timer_instance_new(str);
+ if (!tu->timeri) {
+ err = -ENOMEM;
goto __err;
+ }
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
@@ -1736,6 +1734,12 @@ static int snd_timer_user_tselect(struct file *file,
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
+ err = snd_timer_open(tu->timeri, &tselect.id, current->pid);
+ if (err < 0) {
+ snd_timer_instance_free(tu->timeri);
+ tu->timeri = NULL;
+ }
+
__err:
return err;
}
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index 09932cc98e9d..577c8e03ec4d 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config SND_MPU401_UART
- tristate
- select SND_RAWMIDI
+ tristate
+ select SND_RAWMIDI
config SND_OPL3_LIB
tristate
@@ -90,16 +90,17 @@ config SND_DUMMY
will be called snd-dummy.
config SND_ALOOP
- tristate "Generic loopback driver (PCM)"
- select SND_PCM
- help
- Say 'Y' or 'M' to include support for the PCM loopback device.
+ tristate "Generic loopback driver (PCM)"
+ select SND_PCM
+ select SND_TIMER
+ help
+ Say 'Y' or 'M' to include support for the PCM loopback device.
This module returns played samples back to the user space using
the standard ALSA PCM device. The devices are routed 0->1 and
- 1->0, where first number is the playback PCM device and second
+ 1->0, where first number is the playback PCM device and second
number is the capture device. Module creates two PCM devices and
configured number of substreams (see the pcm_substreams module
- parameter).
+ parameter).
The loopback device allows time sychronization with an external
timing source using the time shift universal control (+-20%
@@ -142,12 +143,12 @@ config SND_MTS64
select SND_RAWMIDI
help
The ESI Miditerminal 4140 is a 4 In 4 Out MIDI Interface with
- additional SMPTE Timecode capabilities for the parallel port.
+ additional SMPTE Timecode capabilities for the parallel port.
Say 'Y' to include support for this device.
To compile this driver as a module, chose 'M' here: the module
- will be called snd-mts64.
+ will be called snd-mts64.
config SND_SERIAL_U16550
tristate "UART16550 serial MIDI driver"
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 9ccdad89c288..0ebfbe70db00 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -28,6 +28,7 @@
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/initval.h>
+#include <sound/timer.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("A loopback soundcard");
@@ -41,6 +42,7 @@ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8};
static int pcm_notify[SNDRV_CARDS];
+static char *timer_source[SNDRV_CARDS];
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for loopback soundcard.");
@@ -52,11 +54,48 @@ module_param_array(pcm_substreams, int, NULL, 0444);
MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-8) for loopback driver.");
module_param_array(pcm_notify, int, NULL, 0444);
MODULE_PARM_DESC(pcm_notify, "Break capture when PCM format/rate/channels changes.");
+module_param_array(timer_source, charp, NULL, 0444);
+MODULE_PARM_DESC(timer_source, "Sound card name or number and device/subdevice number of timer to be used. Empty string for jiffies timer [default].");
#define NO_PITCH 100000
+#define CABLE_VALID_PLAYBACK BIT(SNDRV_PCM_STREAM_PLAYBACK)
+#define CABLE_VALID_CAPTURE BIT(SNDRV_PCM_STREAM_CAPTURE)
+#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK | CABLE_VALID_CAPTURE)
+
+struct loopback_cable;
struct loopback_pcm;
+struct loopback_ops {
+ /* optional
+ * call in loopback->cable_lock
+ */
+ int (*open)(struct loopback_pcm *dpcm);
+ /* required
+ * call in cable->lock
+ */
+ int (*start)(struct loopback_pcm *dpcm);
+ /* required
+ * call in cable->lock
+ */
+ int (*stop)(struct loopback_pcm *dpcm);
+ /* optional */
+ int (*stop_sync)(struct loopback_pcm *dpcm);
+ /* optional */
+ int (*close_substream)(struct loopback_pcm *dpcm);
+ /* optional
+ * call in loopback->cable_lock
+ */
+ int (*close_cable)(struct loopback_pcm *dpcm);
+ /* optional
+ * call in cable->lock
+ */
+ unsigned int (*pos_update)(struct loopback_cable *cable);
+ /* optional */
+ void (*dpcm_info)(struct loopback_pcm *dpcm,
+ struct snd_info_buffer *buffer);
+};
+
struct loopback_cable {
spinlock_t lock;
struct loopback_pcm *streams[2];
@@ -65,6 +104,15 @@ struct loopback_cable {
unsigned int valid;
unsigned int running;
unsigned int pause;
+ /* timer specific */
+ struct loopback_ops *ops;
+ /* If sound timer is used */
+ struct {
+ int stream;
+ struct snd_timer_id id;
+ struct tasklet_struct event_tasklet;
+ struct snd_timer_instance *instance;
+ } snd_timer;
};
struct loopback_setup {
@@ -85,6 +133,7 @@ struct loopback {
struct loopback_cable *cables[MAX_PCM_SUBSTREAMS][2];
struct snd_pcm *pcm[2];
struct loopback_setup setup[MAX_PCM_SUBSTREAMS][2];
+ const char *timer_source;
};
struct loopback_pcm {
@@ -102,10 +151,13 @@ struct loopback_pcm {
/* flags */
unsigned int period_update_pending :1;
/* timer stuff */
- unsigned int irq_pos; /* fractional IRQ position */
- unsigned int period_size_frac;
+ unsigned int irq_pos; /* fractional IRQ position in jiffies
+ * ticks
+ */
+ unsigned int period_size_frac; /* period size in jiffies ticks */
unsigned int last_drift;
unsigned long last_jiffies;
+ /* If jiffies timer is used */
struct timer_list timer;
};
@@ -153,7 +205,7 @@ static inline unsigned int get_rate_shift(struct loopback_pcm *dpcm)
}
/* call in cable->lock */
-static void loopback_timer_start(struct loopback_pcm *dpcm)
+static int loopback_jiffies_timer_start(struct loopback_pcm *dpcm)
{
unsigned long tick;
unsigned int rate_shift = get_rate_shift(dpcm);
@@ -169,23 +221,102 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
tick = dpcm->period_size_frac - dpcm->irq_pos;
tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
mod_timer(&dpcm->timer, jiffies + tick);
+
+ return 0;
}
/* call in cable->lock */
-static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
+static int loopback_snd_timer_start(struct loopback_pcm *dpcm)
+{
+ struct loopback_cable *cable = dpcm->cable;
+ int err;
+
+ /* Loopback device has to use same period as timer card. Therefore
+ * wake up for each snd_pcm_period_elapsed() call of timer card.
+ */
+ err = snd_timer_start(cable->snd_timer.instance, 1);
+ if (err < 0) {
+ /* do not report error if trying to start but already
+ * running. For example called by opposite substream
+ * of the same cable
+ */
+ if (err == -EBUSY)
+ return 0;
+
+ pcm_err(dpcm->substream->pcm,
+ "snd_timer_start(%d,%d,%d) failed with %d",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ err);
+ }
+
+ return err;
+}
+
+/* call in cable->lock */
+static inline int loopback_jiffies_timer_stop(struct loopback_pcm *dpcm)
{
del_timer(&dpcm->timer);
dpcm->timer.expires = 0;
+
+ return 0;
}
-static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
+/* call in cable->lock */
+static int loopback_snd_timer_stop(struct loopback_pcm *dpcm)
+{
+ struct loopback_cable *cable = dpcm->cable;
+ int err;
+
+ /* only stop if both devices (playback and capture) are not running */
+ if (cable->running ^ cable->pause)
+ return 0;
+
+ err = snd_timer_stop(cable->snd_timer.instance);
+ if (err < 0) {
+ pcm_err(dpcm->substream->pcm,
+ "snd_timer_stop(%d,%d,%d) failed with %d",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ err);
+ }
+
+ return err;
+}
+
+static inline int loopback_jiffies_timer_stop_sync(struct loopback_pcm *dpcm)
{
del_timer_sync(&dpcm->timer);
+
+ return 0;
}
-#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
-#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE)
-#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
+/* call in loopback->cable_lock */
+static int loopback_snd_timer_close_cable(struct loopback_pcm *dpcm)
+{
+ struct loopback_cable *cable = dpcm->cable;
+
+ /* snd_timer was not opened */
+ if (!cable->snd_timer.instance)
+ return 0;
+
+ /* will only be called from free_cable() when other stream was
+ * already closed. Other stream cannot be reopened as long as
+ * loopback->cable_lock is locked. Therefore no need to lock
+ * cable->lock;
+ */
+ snd_timer_close(cable->snd_timer.instance);
+
+ /* wait till drain tasklet has finished if requested */
+ tasklet_kill(&cable->snd_timer.event_tasklet);
+
+ snd_timer_instance_free(cable->snd_timer.instance);
+ memset(&cable->snd_timer, 0, sizeof(cable->snd_timer));
+
+ return 0;
+}
static int loopback_check_format(struct loopback_cable *cable, int stream)
{
@@ -249,7 +380,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_pcm_runtime *runtime = substream->runtime;
struct loopback_pcm *dpcm = runtime->private_data;
struct loopback_cable *cable = dpcm->cable;
- int err, stream = 1 << substream->stream;
+ int err = 0, stream = 1 << substream->stream;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -262,7 +393,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
cable->running |= stream;
cable->pause &= ~stream;
- loopback_timer_start(dpcm);
+ err = cable->ops->start(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -271,7 +402,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
cable->running &= ~stream;
cable->pause &= ~stream;
- loopback_timer_stop(dpcm);
+ err = cable->ops->stop(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -280,7 +411,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&cable->lock);
cable->pause |= stream;
- loopback_timer_stop(dpcm);
+ err = cable->ops->stop(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -290,7 +421,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
dpcm->last_jiffies = jiffies;
cable->pause &= ~stream;
- loopback_timer_start(dpcm);
+ err = cable->ops->start(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -298,7 +429,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
default:
return -EINVAL;
}
- return 0;
+ return err;
}
static void params_change(struct snd_pcm_substream *substream)
@@ -312,6 +443,13 @@ static void params_change(struct snd_pcm_substream *substream)
cable->hw.rate_max = runtime->rate;
cable->hw.channels_min = runtime->channels;
cable->hw.channels_max = runtime->channels;
+
+ if (cable->snd_timer.instance) {
+ cable->hw.period_bytes_min =
+ frames_to_bytes(runtime, runtime->period_size);
+ cable->hw.period_bytes_max = cable->hw.period_bytes_min;
+ }
+
}
static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -319,9 +457,13 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct loopback_pcm *dpcm = runtime->private_data;
struct loopback_cable *cable = dpcm->cable;
- int bps, salign;
+ int err, bps, salign;
- loopback_timer_stop_sync(dpcm);
+ if (cable->ops->stop_sync) {
+ err = cable->ops->stop_sync(dpcm);
+ if (err < 0)
+ return err;
+ }
salign = (snd_pcm_format_physical_width(runtime->format) *
runtime->channels) / 8;
@@ -457,7 +599,8 @@ static inline void bytepos_finish(struct loopback_pcm *dpcm,
}
/* call in cable->lock */
-static unsigned int loopback_pos_update(struct loopback_cable *cable)
+static unsigned int loopback_jiffies_timer_pos_update
+ (struct loopback_cable *cable)
{
struct loopback_pcm *dpcm_play =
cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
@@ -510,14 +653,15 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
return running;
}
-static void loopback_timer_function(struct timer_list *t)
+static void loopback_jiffies_timer_function(struct timer_list *t)
{
struct loopback_pcm *dpcm = from_timer(dpcm, t, timer);
unsigned long flags;
spin_lock_irqsave(&dpcm->cable->lock, flags);
- if (loopback_pos_update(dpcm->cable) & (1 << dpcm->substream->stream)) {
- loopback_timer_start(dpcm);
+ if (loopback_jiffies_timer_pos_update(dpcm->cable) &
+ (1 << dpcm->substream->stream)) {
+ loopback_jiffies_timer_start(dpcm);
if (dpcm->period_update_pending) {
dpcm->period_update_pending = 0;
spin_unlock_irqrestore(&dpcm->cable->lock, flags);
@@ -529,6 +673,193 @@ static void loopback_timer_function(struct timer_list *t)
spin_unlock_irqrestore(&dpcm->cable->lock, flags);
}
+/* call in cable->lock */
+static int loopback_snd_timer_check_resolution(struct snd_pcm_runtime *runtime,
+ unsigned long resolution)
+{
+ if (resolution != runtime->timer_resolution) {
+ struct loopback_pcm *dpcm = runtime->private_data;
+ struct loopback_cable *cable = dpcm->cable;
+ /* Worst case estimation of possible values for resolution
+ * resolution <= (512 * 1024) frames / 8kHz in nsec
+ * resolution <= 65.536.000.000 nsec
+ *
+ * period_size <= 65.536.000.000 nsec / 1000nsec/usec * 192kHz +
+ * 500.000
+ * period_size <= 12.582.912.000.000 <64bit
+ * / 1.000.000 usec/sec
+ */
+ snd_pcm_uframes_t period_size_usec =
+ resolution / 1000 * runtime->rate;
+ /* round to nearest sample rate */
+ snd_pcm_uframes_t period_size =
+ (period_size_usec + 500 * 1000) / (1000 * 1000);
+
+ pcm_err(dpcm->substream->pcm,
+ "Period size (%lu frames) of loopback device is not corresponding to timer resolution (%lu nsec = %lu frames) of card timer %d,%d,%d. Use period size of %lu frames for loopback device.",
+ runtime->period_size, resolution, period_size,
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ period_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void loopback_snd_timer_period_elapsed(struct loopback_cable *cable,
+ int event,
+ unsigned long resolution)
+{
+ struct loopback_pcm *dpcm_play, *dpcm_capt;
+ struct snd_pcm_substream *substream_play, *substream_capt;
+ struct snd_pcm_runtime *valid_runtime;
+ unsigned int running, elapsed_bytes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cable->lock, flags);
+ running = cable->running ^ cable->pause;
+ /* no need to do anything if no stream is running */
+ if (!running) {
+ spin_unlock_irqrestore(&cable->lock, flags);
+ return;
+ }
+
+ dpcm_play = cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
+ dpcm_capt = cable->streams[SNDRV_PCM_STREAM_CAPTURE];
+ substream_play = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
+ dpcm_play->substream : NULL;
+ substream_capt = (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) ?
+ dpcm_capt->substream : NULL;
+
+ if (event == SNDRV_TIMER_EVENT_MSTOP) {
+ if (!dpcm_play ||
+ dpcm_play->substream->runtime->status->state !=
+ SNDRV_PCM_STATE_DRAINING) {
+ spin_unlock_irqrestore(&cable->lock, flags);
+ return;
+ }
+ }
+
+ valid_runtime = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
+ dpcm_play->substream->runtime :
+ dpcm_capt->substream->runtime;
+
+ /* resolution is only valid for SNDRV_TIMER_EVENT_TICK events */
+ if (event == SNDRV_TIMER_EVENT_TICK) {
+ /* The hardware rules guarantee that playback and capture period
+ * are the same. Therefore only one device has to be checked
+ * here.
+ */
+ if (loopback_snd_timer_check_resolution(valid_runtime,
+ resolution) < 0) {
+ spin_unlock_irqrestore(&cable->lock, flags);
+ if (substream_play)
+ snd_pcm_stop_xrun(substream_play);
+ if (substream_capt)
+ snd_pcm_stop_xrun(substream_capt);
+ return;
+ }
+ }
+
+ elapsed_bytes = frames_to_bytes(valid_runtime,
+ valid_runtime->period_size);
+ /* The same timer interrupt is used for playback and capture device */
+ if ((running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) &&
+ (running & (1 << SNDRV_PCM_STREAM_CAPTURE))) {
+ copy_play_buf(dpcm_play, dpcm_capt, elapsed_bytes);
+ bytepos_finish(dpcm_play, elapsed_bytes);
+ bytepos_finish(dpcm_capt, elapsed_bytes);
+ } else if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
+ bytepos_finish(dpcm_play, elapsed_bytes);
+ } else if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
+ clear_capture_buf(dpcm_capt, elapsed_bytes);
+ bytepos_finish(dpcm_capt, elapsed_bytes);
+ }
+ spin_unlock_irqrestore(&cable->lock, flags);
+
+ if (substream_play)
+ snd_pcm_period_elapsed(substream_play);
+ if (substream_capt)
+ snd_pcm_period_elapsed(substream_capt);
+}
+
+static void loopback_snd_timer_function(struct snd_timer_instance *timeri,
+ unsigned long resolution,
+ unsigned long ticks)
+{
+ struct loopback_cable *cable = timeri->callback_data;
+
+ loopback_snd_timer_period_elapsed(cable, SNDRV_TIMER_EVENT_TICK,
+ resolution);
+}
+
+static void loopback_snd_timer_tasklet(unsigned long arg)
+{
+ struct snd_timer_instance *timeri = (struct snd_timer_instance *)arg;
+ struct loopback_cable *cable = timeri->callback_data;
+
+ loopback_snd_timer_period_elapsed(cable, SNDRV_TIMER_EVENT_MSTOP, 0);
+}
+
+static void loopback_snd_timer_event(struct snd_timer_instance *timeri,
+ int event,
+ struct timespec *tstamp,
+ unsigned long resolution)
+{
+ /* Do not lock cable->lock here because timer->lock is already hold.
+ * There are other functions which first lock cable->lock and than
+ * timer->lock e.g.
+ * loopback_trigger()
+ * spin_lock(&cable->lock)
+ * loopback_snd_timer_start()
+ * snd_timer_start()
+ * spin_lock(&timer->lock)
+ * Therefore when using the oposit order of locks here it could result
+ * in a deadlock.
+ */
+
+ if (event == SNDRV_TIMER_EVENT_MSTOP) {
+ struct loopback_cable *cable = timeri->callback_data;
+
+ /* sound card of the timer was stopped. Therefore there will not
+ * be any further timer callbacks. Due to this forward audio
+ * data from here if in draining state. When still in running
+ * state the streaming will be aborted by the usual timeout. It
+ * should not be aborted here because may be the timer sound
+ * card does only a recovery and the timer is back soon.
+ * This tasklet triggers loopback_snd_timer_tasklet()
+ */
+ tasklet_schedule(&cable->snd_timer.event_tasklet);
+ }
+}
+
+static void loopback_jiffies_timer_dpcm_info(struct loopback_pcm *dpcm,
+ struct snd_info_buffer *buffer)
+{
+ snd_iprintf(buffer, " update_pending:\t%u\n",
+ dpcm->period_update_pending);
+ snd_iprintf(buffer, " irq_pos:\t\t%u\n", dpcm->irq_pos);
+ snd_iprintf(buffer, " period_frac:\t%u\n", dpcm->period_size_frac);
+ snd_iprintf(buffer, " last_jiffies:\t%lu (%lu)\n",
+ dpcm->last_jiffies, jiffies);
+ snd_iprintf(buffer, " timer_expires:\t%lu\n", dpcm->timer.expires);
+}
+
+static void loopback_snd_timer_dpcm_info(struct loopback_pcm *dpcm,
+ struct snd_info_buffer *buffer)
+{
+ struct loopback_cable *cable = dpcm->cable;
+
+ snd_iprintf(buffer, " sound timer:\thw:%d,%d,%d\n",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice);
+ snd_iprintf(buffer, " timer open:\t\t%s\n",
+ (cable->snd_timer.stream == SNDRV_PCM_STREAM_CAPTURE) ?
+ "capture" : "playback");
+}
+
static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -536,7 +867,8 @@ static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t pos;
spin_lock(&dpcm->cable->lock);
- loopback_pos_update(dpcm->cable);
+ if (dpcm->cable->ops->pos_update)
+ dpcm->cable->ops->pos_update(dpcm->cable);
pos = dpcm->buf_pos;
spin_unlock(&dpcm->cable->lock);
return bytes_to_frames(runtime, pos);
@@ -576,8 +908,7 @@ static void loopback_runtime_free(struct snd_pcm_runtime *runtime)
static int loopback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
}
static int loopback_hw_free(struct snd_pcm_substream *substream)
@@ -589,7 +920,7 @@ static int loopback_hw_free(struct snd_pcm_substream *substream)
mutex_lock(&dpcm->loopback->cable_lock);
cable->valid &= ~(1 << substream->stream);
mutex_unlock(&dpcm->loopback->cable_lock);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static unsigned int get_cable_index(struct snd_pcm_substream *substream)
@@ -647,6 +978,23 @@ static int rule_channels(struct snd_pcm_hw_params *params,
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
+static int rule_period_bytes(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct loopback_pcm *dpcm = rule->private;
+ struct loopback_cable *cable = dpcm->cable;
+ struct snd_interval t;
+
+ mutex_lock(&dpcm->loopback->cable_lock);
+ t.min = cable->hw.period_bytes_min;
+ t.max = cable->hw.period_bytes_max;
+ mutex_unlock(&dpcm->loopback->cable_lock);
+ t.openmin = 0;
+ t.openmax = 0;
+ t.integer = 0;
+ return snd_interval_refine(hw_param_interval(params, rule->var), &t);
+}
+
static void free_cable(struct snd_pcm_substream *substream)
{
struct loopback *loopback = substream->private_data;
@@ -662,12 +1010,183 @@ static void free_cable(struct snd_pcm_substream *substream)
cable->streams[substream->stream] = NULL;
spin_unlock_irq(&cable->lock);
} else {
+ struct loopback_pcm *dpcm = substream->runtime->private_data;
+
+ if (cable->ops && cable->ops->close_cable && dpcm)
+ cable->ops->close_cable(dpcm);
/* free the cable */
loopback->cables[substream->number][dev] = NULL;
kfree(cable);
}
}
+static int loopback_jiffies_timer_open(struct loopback_pcm *dpcm)
+{
+ timer_setup(&dpcm->timer, loopback_jiffies_timer_function, 0);
+
+ return 0;
+}
+
+static struct loopback_ops loopback_jiffies_timer_ops = {
+ .open = loopback_jiffies_timer_open,
+ .start = loopback_jiffies_timer_start,
+ .stop = loopback_jiffies_timer_stop,
+ .stop_sync = loopback_jiffies_timer_stop_sync,
+ .close_substream = loopback_jiffies_timer_stop_sync,
+ .pos_update = loopback_jiffies_timer_pos_update,
+ .dpcm_info = loopback_jiffies_timer_dpcm_info,
+};
+
+static int loopback_parse_timer_id(const char *str,
+ struct snd_timer_id *tid)
+{
+ /* [<pref>:](<card name>|<card idx>)[{.,}<dev idx>[{.,}<subdev idx>]] */
+ const char * const sep_dev = ".,";
+ const char * const sep_pref = ":";
+ const char *name = str;
+ char *sep, save = '\0';
+ int card_idx = 0, dev = 0, subdev = 0;
+ int err;
+
+ sep = strpbrk(str, sep_pref);
+ if (sep)
+ name = sep + 1;
+ sep = strpbrk(name, sep_dev);
+ if (sep) {
+ save = *sep;
+ *sep = '\0';
+ }
+ err = kstrtoint(name, 0, &card_idx);
+ if (err == -EINVAL) {
+ /* Must be the name, not number */
+ for (card_idx = 0; card_idx < snd_ecards_limit; card_idx++) {
+ struct snd_card *card = snd_card_ref(card_idx);
+
+ if (card) {
+ if (!strcmp(card->id, name))
+ err = 0;
+ snd_card_unref(card);
+ }
+ if (!err)
+ break;
+ }
+ }
+ if (sep) {
+ *sep = save;
+ if (!err) {
+ char *sep2, save2 = '\0';
+
+ sep2 = strpbrk(sep + 1, sep_dev);
+ if (sep2) {
+ save2 = *sep2;
+ *sep2 = '\0';
+ }
+ err = kstrtoint(sep + 1, 0, &dev);
+ if (sep2) {
+ *sep2 = save2;
+ if (!err)
+ err = kstrtoint(sep2 + 1, 0, &subdev);
+ }
+ }
+ }
+ if (!err && tid) {
+ tid->card = card_idx;
+ tid->device = dev;
+ tid->subdevice = subdev;
+ }
+ return err;
+}
+
+/* call in loopback->cable_lock */
+static int loopback_snd_timer_open(struct loopback_pcm *dpcm)
+{
+ int err = 0;
+ struct snd_timer_id tid = {
+ .dev_class = SNDRV_TIMER_CLASS_PCM,
+ .dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION,
+ };
+ struct snd_timer_instance *timeri;
+ struct loopback_cable *cable = dpcm->cable;
+
+ /* check if timer was already opened. It is only opened once
+ * per playback and capture subdevice (aka cable).
+ */
+ if (cable->snd_timer.instance)
+ goto exit;
+
+ err = loopback_parse_timer_id(dpcm->loopback->timer_source, &tid);
+ if (err < 0) {
+ pcm_err(dpcm->substream->pcm,
+ "Parsing timer source \'%s\' failed with %d",
+ dpcm->loopback->timer_source, err);
+ goto exit;
+ }
+
+ cable->snd_timer.stream = dpcm->substream->stream;
+ cable->snd_timer.id = tid;
+
+ timeri = snd_timer_instance_new(dpcm->loopback->card->id);
+ if (!timeri) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* The callback has to be called from another tasklet. If
+ * SNDRV_TIMER_IFLG_FAST is specified it will be called from the
+ * snd_pcm_period_elapsed() call of the selected sound card.
+ * snd_pcm_period_elapsed() helds snd_pcm_stream_lock_irqsave().
+ * Due to our callback loopback_snd_timer_function() also calls
+ * snd_pcm_period_elapsed() which calls snd_pcm_stream_lock_irqsave().
+ * This would end up in a dead lock.
+ */
+ timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
+ timeri->callback = loopback_snd_timer_function;
+ timeri->callback_data = (void *)cable;
+ timeri->ccallback = loopback_snd_timer_event;
+
+ /* initialise a tasklet used for draining */
+ tasklet_init(&cable->snd_timer.event_tasklet,
+ loopback_snd_timer_tasklet, (unsigned long)timeri);
+
+ /* The mutex loopback->cable_lock is kept locked.
+ * Therefore snd_timer_open() cannot be called a second time
+ * by the other device of the same cable.
+ * Therefore the following issue cannot happen:
+ * [proc1] Call loopback_timer_open() ->
+ * Unlock cable->lock for snd_timer_close/open() call
+ * [proc2] Call loopback_timer_open() -> snd_timer_open(),
+ * snd_timer_start()
+ * [proc1] Call snd_timer_open() and overwrite running timer
+ * instance
+ */
+ err = snd_timer_open(timeri, &cable->snd_timer.id, current->pid);
+ if (err < 0) {
+ pcm_err(dpcm->substream->pcm,
+ "snd_timer_open (%d,%d,%d) failed with %d",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ err);
+ snd_timer_instance_free(timeri);
+ goto exit;
+ }
+
+ cable->snd_timer.instance = timeri;
+
+exit:
+ return err;
+}
+
+/* stop_sync() is not required for sound timer because it does not need to be
+ * restarted in loopback_prepare() on Xrun recovery
+ */
+static struct loopback_ops loopback_snd_timer_ops = {
+ .open = loopback_snd_timer_open,
+ .start = loopback_snd_timer_start,
+ .stop = loopback_snd_timer_stop,
+ .close_cable = loopback_snd_timer_close_cable,
+ .dpcm_info = loopback_snd_timer_dpcm_info,
+};
+
static int loopback_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -685,7 +1204,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
}
dpcm->loopback = loopback;
dpcm->substream = substream;
- timer_setup(&dpcm->timer, loopback_timer_function, 0);
cable = loopback->cables[substream->number][dev];
if (!cable) {
@@ -696,9 +1214,20 @@ static int loopback_open(struct snd_pcm_substream *substream)
}
spin_lock_init(&cable->lock);
cable->hw = loopback_pcm_hardware;
+ if (loopback->timer_source)
+ cable->ops = &loopback_snd_timer_ops;
+ else
+ cable->ops = &loopback_jiffies_timer_ops;
loopback->cables[substream->number][dev] = cable;
}
dpcm->cable = cable;
+ runtime->private_data = dpcm;
+
+ if (cable->ops->open) {
+ err = cable->ops->open(dpcm);
+ if (err < 0)
+ goto unlock;
+ }
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
@@ -724,7 +1253,22 @@ static int loopback_open(struct snd_pcm_substream *substream)
if (err < 0)
goto unlock;
- runtime->private_data = dpcm;
+ /* In case of sound timer the period time of both devices of the same
+ * loop has to be the same.
+ * This rule only takes effect if a sound timer was chosen
+ */
+ if (cable->snd_timer.instance) {
+ err = snd_pcm_hw_rule_add(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ rule_period_bytes, dpcm,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, -1);
+ if (err < 0)
+ goto unlock;
+ }
+
+ /* loopback_runtime_free() has not to be called if kfree(dpcm) was
+ * already called here. Otherwise it will end up with a double free.
+ */
runtime->private_free = loopback_runtime_free;
if (get_notify(dpcm))
runtime->hw = loopback_pcm_hardware;
@@ -748,12 +1292,14 @@ static int loopback_close(struct snd_pcm_substream *substream)
{
struct loopback *loopback = substream->private_data;
struct loopback_pcm *dpcm = substream->runtime->private_data;
+ int err = 0;
- loopback_timer_stop_sync(dpcm);
+ if (dpcm->cable->ops->close_substream)
+ err = dpcm->cable->ops->close_substream(dpcm);
mutex_lock(&loopback->cable_lock);
free_cable(substream);
mutex_unlock(&loopback->cable_lock);
- return 0;
+ return err;
}
static const struct snd_pcm_ops loopback_pcm_ops = {
@@ -765,7 +1311,6 @@ static const struct snd_pcm_ops loopback_pcm_ops = {
.prepare = loopback_prepare,
.trigger = loopback_trigger,
.pointer = loopback_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int loopback_pcm_new(struct loopback *loopback,
@@ -780,6 +1325,8 @@ static int loopback_pcm_new(struct loopback *loopback,
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &loopback_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &loopback_pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
pcm->private_data = loopback;
pcm->info_flags = 0;
@@ -1076,13 +1623,8 @@ static void print_dpcm_info(struct snd_info_buffer *buffer,
snd_iprintf(buffer, " bytes_per_sec:\t%u\n", dpcm->pcm_bps);
snd_iprintf(buffer, " sample_align:\t%u\n", dpcm->pcm_salign);
snd_iprintf(buffer, " rate_shift:\t\t%u\n", dpcm->pcm_rate_shift);
- snd_iprintf(buffer, " update_pending:\t%u\n",
- dpcm->period_update_pending);
- snd_iprintf(buffer, " irq_pos:\t\t%u\n", dpcm->irq_pos);
- snd_iprintf(buffer, " period_frac:\t%u\n", dpcm->period_size_frac);
- snd_iprintf(buffer, " last_jiffies:\t%lu (%lu)\n",
- dpcm->last_jiffies, jiffies);
- snd_iprintf(buffer, " timer_expires:\t%lu\n", dpcm->timer.expires);
+ if (dpcm->cable->ops->dpcm_info)
+ dpcm->cable->ops->dpcm_info(dpcm, buffer);
}
static void print_substream_info(struct snd_info_buffer *buffer,
@@ -1118,7 +1660,7 @@ static void print_cable_info(struct snd_info_entry *entry,
mutex_unlock(&loopback->cable_lock);
}
-static int loopback_proc_new(struct loopback *loopback, int cidx)
+static int loopback_cable_proc_new(struct loopback *loopback, int cidx)
{
char name[32];
@@ -1127,6 +1669,48 @@ static int loopback_proc_new(struct loopback *loopback, int cidx)
print_cable_info);
}
+static void loopback_set_timer_source(struct loopback *loopback,
+ const char *value)
+{
+ if (loopback->timer_source) {
+ devm_kfree(loopback->card->dev, loopback->timer_source);
+ loopback->timer_source = NULL;
+ }
+ if (value && *value)
+ loopback->timer_source = devm_kstrdup(loopback->card->dev,
+ value, GFP_KERNEL);
+}
+
+static void print_timer_source_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct loopback *loopback = entry->private_data;
+
+ mutex_lock(&loopback->cable_lock);
+ snd_iprintf(buffer, "%s\n",
+ loopback->timer_source ? loopback->timer_source : "");
+ mutex_unlock(&loopback->cable_lock);
+}
+
+static void change_timer_source_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct loopback *loopback = entry->private_data;
+ char line[64];
+
+ mutex_lock(&loopback->cable_lock);
+ if (!snd_info_get_line(buffer, line, sizeof(line)))
+ loopback_set_timer_source(loopback, strim(line));
+ mutex_unlock(&loopback->cable_lock);
+}
+
+static int loopback_timer_source_proc_new(struct loopback *loopback)
+{
+ return snd_card_rw_proc_new(loopback->card, "timer_source", loopback,
+ print_timer_source_info,
+ change_timer_source_info);
+}
+
static int loopback_probe(struct platform_device *devptr)
{
struct snd_card *card;
@@ -1146,6 +1730,8 @@ static int loopback_probe(struct platform_device *devptr)
pcm_substreams[dev] = MAX_PCM_SUBSTREAMS;
loopback->card = card;
+ loopback_set_timer_source(loopback, timer_source[dev]);
+
mutex_init(&loopback->cable_lock);
err = loopback_pcm_new(loopback, 0, pcm_substreams[dev]);
@@ -1157,8 +1743,9 @@ static int loopback_probe(struct platform_device *devptr)
err = loopback_mixer_new(loopback, pcm_notify[dev] ? 1 : 0);
if (err < 0)
goto __nodev;
- loopback_proc_new(loopback, 0);
- loopback_proc_new(loopback, 1);
+ loopback_cable_proc_new(loopback, 0);
+ loopback_cable_proc_new(loopback, 1);
+ loopback_timer_source_proc_new(loopback);
strcpy(card->driver, "Loopback");
strcpy(card->shortname, "Loopback");
sprintf(card->longname, "Loopback %i", dev + 1);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index aee7c04d49e5..022a0db692e0 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -702,7 +702,7 @@ static int snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
if (!fake_buffer) {
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
0, 64*1024);
}
return 0;
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index a3c1c064d1b5..70a6d1832698 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1242,7 +1242,7 @@ snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device)
ml403_ac97cr->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64 * 1024,
128 * 1024);
return 0;
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 8f0f05bbc081..f91316bf01cb 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -352,8 +352,8 @@ int snd_pcsp_new_pcm(struct snd_pcsp *chip)
snd_pcm_lib_preallocate_pages_for_all(chip->pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data
- (GFP_KERNEL), PCSP_BUFFER_SIZE,
+ NULL,
+ PCSP_BUFFER_SIZE,
PCSP_BUFFER_SIZE);
return 0;
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index 4705c50fbf4f..f17e0a76c73c 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -778,8 +778,7 @@ static snd_pcm_uframes_t vx_pcm_playback_pointer(struct snd_pcm_substream *subs)
static int vx_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_32_buffer
- (subs, params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params));
}
/*
@@ -787,7 +786,7 @@ static int vx_pcm_hw_params(struct snd_pcm_substream *subs,
*/
static int vx_pcm_hw_free(struct snd_pcm_substream *subs)
{
- return snd_pcm_lib_free_vmalloc_buffer(subs);
+ return snd_pcm_lib_free_pages(subs);
}
/*
@@ -867,7 +866,6 @@ static const struct snd_pcm_ops vx_pcm_playback_ops = {
.prepare = vx_pcm_prepare,
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_playback_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -1088,7 +1086,6 @@ static const struct snd_pcm_ops vx_pcm_capture_ops = {
.prepare = vx_pcm_prepare,
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_capture_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -1233,6 +1230,9 @@ int snd_vx_pcm_new(struct vx_core *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &vx_pcm_playback_ops);
if (ins)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &vx_pcm_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ snd_dma_continuous_data(GFP_KERNEL | GFP_DMA32),
+ 0, 0);
pcm->private_data = chip;
pcm->private_free = snd_vx_pcm_free;
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index b0a904cdb932..995c2cefc222 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -77,7 +77,7 @@ config SND_BEBOB
tristate "BridgeCo DM1000/DM1100/DM1500 with BeBoB firmware"
select SND_FIREWIRE_LIB
select SND_HWDEP
- help
+ help
Say Y here to include support for FireWire devices based
on BridgeCo DM1000/DM1100/DM1500 with BeBoB firmware:
* Edirol FA-66/FA-101
@@ -111,8 +111,8 @@ config SND_BEBOB
* M-Audio FireWire 1814/ProjectMix IO
* Digidesign Mbox 2 Pro
- To compile this driver as a module, choose M here: the module
- will be called snd-bebob.
+ To compile this driver as a module, choose M here: the module
+ will be called snd-bebob.
config SND_FIREWIRE_DIGI00X
tristate "Digidesign Digi 002/003 family support"
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index e50e28f77e74..37d38efb4c87 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/pcm.h>
@@ -52,10 +53,6 @@
#define CIP_FMT_AM 0x10
#define AMDTP_FDF_NO_DATA 0xff
-/* TODO: make these configurable */
-#define INTERRUPT_INTERVAL 16
-#define QUEUE_LENGTH 48
-
// For iso header, tstamp and 2 CIP header.
#define IR_CTX_HEADER_SIZE_CIP 16
// For iso header and tstamp.
@@ -180,6 +177,8 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
struct snd_pcm_hardware *hw = &runtime->hw;
+ unsigned int ctx_header_size;
+ unsigned int maximum_usec_per_period;
int err;
hw->info = SNDRV_PCM_INFO_BATCH |
@@ -200,19 +199,36 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
hw->period_bytes_max = hw->period_bytes_min * 2048;
hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
- /*
- * Currently firewire-lib processes 16 packets in one software
- * interrupt callback. This equals to 2msec but actually the
- * interval of the interrupts has a jitter.
- * Additionally, even if adding a constraint to fit period size to
- * 2msec, actual calculated frames per period doesn't equal to 2msec,
- * depending on sampling rate.
- * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
- * Here let us use 5msec for safe period interrupt.
- */
+ // Linux driver for 1394 OHCI controller voluntarily flushes isoc
+ // context when total size of accumulated context header reaches
+ // PAGE_SIZE. This kicks tasklet for the isoc context and brings
+ // callback in the middle of scheduled interrupts.
+ // Although AMDTP streams in the same domain use the same events per
+ // IRQ, use the largest size of context header between IT/IR contexts.
+ // Here, use the value of context header in IR context is for both
+ // contexts.
+ if (!(s->flags & CIP_NO_HEADER))
+ ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
+ else
+ ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
+ maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
+ CYCLES_PER_SECOND / ctx_header_size;
+
+ // In IEC 61883-6, one isoc packet can transfer events up to the value
+ // of syt interval. This comes from the interval of isoc cycle. As 1394
+ // OHCI controller can generate hardware IRQ per isoc packet, the
+ // interval is 125 usec.
+ // However, there are two ways of transmission in IEC 61883-6; blocking
+ // and non-blocking modes. In blocking mode, the sequence of isoc packet
+ // includes 'empty' or 'NODATA' packets which include no event. In
+ // non-blocking mode, the number of events per packet is variable up to
+ // the syt interval.
+ // Due to the above protocol design, the minimum PCM frames per
+ // interrupt should be double of the value of syt interval, thus it is
+ // 250 usec.
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- 5000, UINT_MAX);
+ 250, maximum_usec_per_period);
if (err < 0)
goto end;
@@ -436,11 +452,12 @@ static void pcm_period_tasklet(unsigned long data)
snd_pcm_period_elapsed(pcm);
}
-static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
+static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
+ bool sched_irq)
{
int err;
- params->interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
+ params->interrupt = sched_irq;
params->tag = s->tag;
params->sy = 0;
@@ -451,18 +468,18 @@ static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
goto end;
}
- if (++s->packet_index >= QUEUE_LENGTH)
+ if (++s->packet_index >= s->queue_size)
s->packet_index = 0;
end:
return err;
}
static inline int queue_out_packet(struct amdtp_stream *s,
- struct fw_iso_packet *params)
+ struct fw_iso_packet *params, bool sched_irq)
{
params->skip =
!!(params->header_length == 0 && params->payload_length == 0);
- return queue_packet(s, params);
+ return queue_packet(s, params, sched_irq);
}
static inline int queue_in_packet(struct amdtp_stream *s,
@@ -472,7 +489,7 @@ static inline int queue_in_packet(struct amdtp_stream *s,
params->header_length = s->ctx_data.tx.ctx_header_size;
params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
params->skip = false;
- return queue_packet(s, params);
+ return queue_packet(s, params, false);
}
static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
@@ -669,13 +686,14 @@ static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
}
// Align to actual cycle count for the packet which is going to be scheduled.
-// This module queued the same number of isochronous cycle as QUEUE_LENGTH to
-// skip isochronous cycle, therefore it's OK to just increment the cycle by
-// QUEUE_LENGTH for scheduled cycle.
-static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp)
+// This module queued the same number of isochronous cycle as the size of queue
+// to kip isochronous cycle, therefore it's OK to just increment the cycle by
+// the size of queue for scheduled cycle.
+static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
+ unsigned int queue_size)
{
u32 cycle = compute_cycle_count(ctx_header_tstamp);
- return increment_cycle_count(cycle, QUEUE_LENGTH);
+ return increment_cycle_count(cycle, queue_size);
}
static int generate_device_pkt_descs(struct amdtp_stream *s,
@@ -689,7 +707,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
@@ -730,9 +748,9 @@ static void generate_ideal_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
- desc->cycle = compute_it_cycle(*ctx_header);
+ desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
desc->syt = calculate_syt(s, desc->cycle);
desc->data_blocks = calculate_data_blocks(s, desc->syt);
@@ -773,22 +791,40 @@ static void process_ctx_payloads(struct amdtp_stream *s,
update_pcm_pointers(s, pcm, pcm_frames);
}
+static void amdtp_stream_master_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
+static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header,
void *private_data)
{
struct amdtp_stream *s = private_data;
const __be32 *ctx_header = header;
- unsigned int packets = header_length / sizeof(*ctx_header);
+ unsigned int events_per_period = s->ctx_data.rx.events_per_period;
+ unsigned int event_count = s->ctx_data.rx.event_count;
+ unsigned int packets;
+ bool is_irq_target;
int i;
if (s->packet_index < 0)
return;
+ // Calculate the number of packets in buffer and check XRUN.
+ packets = header_length / sizeof(*ctx_header);
+
generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets);
process_ctx_payloads(s, s->pkt_descs, packets);
+ is_irq_target =
+ !!(context->callback.sc == amdtp_stream_master_callback ||
+ context->callback.sc == amdtp_stream_master_first_callback);
+
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = s->pkt_descs + i;
unsigned int syt;
@@ -796,6 +832,7 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
struct fw_iso_packet params;
__be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
} template = { {0}, {0} };
+ bool sched_irq = false;
if (s->ctx_data.rx.syt_override < 0)
syt = desc->syt;
@@ -806,13 +843,21 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
desc->data_blocks, desc->data_block_counter,
syt, i);
- if (queue_out_packet(s, &template.params) < 0) {
+ if (is_irq_target) {
+ event_count += desc->data_blocks;
+ if (event_count >= events_per_period) {
+ event_count -= events_per_period;
+ sched_irq = true;
+ }
+ }
+
+ if (queue_out_packet(s, &template.params, sched_irq) < 0) {
cancel_stream(s);
return;
}
}
- fw_iso_context_queue_flush(s->context);
+ s->ctx_data.rx.event_count = event_count;
}
static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
@@ -820,15 +865,15 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
void *private_data)
{
struct amdtp_stream *s = private_data;
- unsigned int packets;
__be32 *ctx_header = header;
+ unsigned int packets;
int i;
int err;
if (s->packet_index < 0)
return;
- // The number of packets in buffer.
+ // Calculate the number of packets in buffer and check XRUN.
packets = header_length / s->ctx_data.tx.ctx_header_size;
err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
@@ -849,11 +894,40 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
return;
}
}
+}
+
+static void amdtp_stream_master_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_domain *d = private_data;
+ struct amdtp_stream *irq_target = d->irq_target;
+ struct amdtp_stream *s;
+
+ out_stream_callback(context, tstamp, header_length, header, irq_target);
+ if (amdtp_streaming_error(irq_target))
+ goto error;
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (s != irq_target && amdtp_stream_running(s)) {
+ fw_iso_context_flush_completions(s->context);
+ if (amdtp_streaming_error(s))
+ goto error;
+ }
+ }
- fw_iso_context_queue_flush(s->context);
+ return;
+error:
+ if (amdtp_stream_running(irq_target))
+ cancel_stream(irq_target);
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (amdtp_stream_running(s))
+ cancel_stream(s);
+ }
}
-/* this is executed one time */
+// this is executed one time.
static void amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length,
void *header, void *private_data)
@@ -874,7 +948,7 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc = in_stream_callback;
} else {
- cycle = compute_it_cycle(*ctx_header);
+ cycle = compute_it_cycle(*ctx_header, s->queue_size);
context->callback.sc = out_stream_callback;
}
@@ -884,17 +958,42 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc(context, tstamp, header_length, header, s);
}
+static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_domain *d = private_data;
+ struct amdtp_stream *s = d->irq_target;
+ const __be32 *ctx_header = header;
+
+ s->callbacked = true;
+ wake_up(&s->callback_wait);
+
+ s->start_cycle = compute_it_cycle(*ctx_header, s->queue_size);
+
+ context->callback.sc = amdtp_stream_master_callback;
+
+ context->callback.sc(context, tstamp, header_length, header, d);
+}
+
/**
* amdtp_stream_start - start transferring packets
* @s: the AMDTP stream to start
* @channel: the isochronous channel on the bus
* @speed: firewire speed code
+ * @d: the AMDTP domain to which the AMDTP stream belongs
+ * @is_irq_target: whether isoc context for the AMDTP stream is used to generate
+ * hardware IRQ.
+ * @start_cycle: the isochronous cycle to start the context. Start immediately
+ * if negative value is given.
*
* The stream cannot be started until it has been configured with
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
-static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
+static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
+ struct amdtp_domain *d, bool is_irq_target,
+ int start_cycle)
{
static const struct {
unsigned int data_block;
@@ -908,10 +1007,15 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
+ unsigned int events_per_buffer = d->events_per_buffer;
+ unsigned int events_per_period = d->events_per_period;
+ unsigned int idle_irq_interval;
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
int type, tag, err;
+ fw_iso_callback_t ctx_cb;
+ void *ctx_data;
mutex_lock(&s->mutex);
@@ -922,6 +1026,12 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
}
if (s->direction == AMDTP_IN_STREAM) {
+ // NOTE: IT context should be used for constant IRQ.
+ if (is_irq_target) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
s->data_block_counter = UINT_MAX;
} else {
entry = &initial_state[s->sfc];
@@ -953,14 +1063,37 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
- err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
+ // This is a case that AMDTP streams in domain run just for MIDI
+ // substream. Use the number of events equivalent to 10 msec as
+ // interval of hardware IRQ.
+ if (events_per_period == 0)
+ events_per_period = amdtp_rate_table[s->sfc] / 100;
+ if (events_per_buffer == 0)
+ events_per_buffer = events_per_period * 3;
+
+ idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
+ amdtp_rate_table[s->sfc]);
+ s->queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
+ amdtp_rate_table[s->sfc]);
+
+ err = iso_packets_buffer_init(&s->buffer, s->unit, s->queue_size,
max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
+ if (is_irq_target) {
+ s->ctx_data.rx.events_per_period = events_per_period;
+ s->ctx_data.rx.event_count = 0;
+ ctx_cb = amdtp_stream_master_first_callback;
+ ctx_data = d;
+ } else {
+ ctx_cb = amdtp_stream_first_callback;
+ ctx_data = s;
+ }
+
s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
type, channel, speed, ctx_header_size,
- amdtp_stream_first_callback, s);
+ ctx_cb, ctx_data);
if (IS_ERR(s->context)) {
err = PTR_ERR(s->context);
if (err == -EBUSY)
@@ -981,7 +1114,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
else
s->tag = TAG_CIP;
- s->pkt_descs = kcalloc(INTERRUPT_INTERVAL, sizeof(*s->pkt_descs),
+ s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
GFP_KERNEL);
if (!s->pkt_descs) {
err = -ENOMEM;
@@ -991,12 +1124,21 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
s->packet_index = 0;
do {
struct fw_iso_packet params;
+
if (s->direction == AMDTP_IN_STREAM) {
err = queue_in_packet(s, &params);
} else {
+ bool sched_irq = false;
+
params.header_length = 0;
params.payload_length = 0;
- err = queue_out_packet(s, &params);
+
+ if (is_irq_target) {
+ sched_irq = !((s->packet_index + 1) %
+ idle_irq_interval);
+ }
+
+ err = queue_out_packet(s, &params, sched_irq);
}
if (err < 0)
goto err_pkt_descs;
@@ -1008,7 +1150,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
tag |= FW_ISO_CONTEXT_MATCH_TAG0;
s->callbacked = false;
- err = fw_iso_context_start(s->context, -1, 0, tag);
+ err = fw_iso_context_start(s->context, start_cycle, 0, tag);
if (err < 0)
goto err_pkt_descs;
@@ -1029,54 +1171,69 @@ err_unlock:
}
/**
- * amdtp_stream_pcm_pointer - get the PCM buffer position
+ * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
+ * @d: the AMDTP domain.
* @s: the AMDTP stream that transports the PCM data
*
* Returns the current buffer position, in frames.
*/
-unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
+unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ struct amdtp_stream *s)
{
- /*
- * This function is called in software IRQ context of period_tasklet or
- * process context.
- *
- * When the software IRQ context was scheduled by software IRQ context
- * of IR/IT contexts, queued packets were already handled. Therefore,
- * no need to flush the queue in buffer anymore.
- *
- * When the process context reach here, some packets will be already
- * queued in the buffer. These packets should be handled immediately
- * to keep better granularity of PCM pointer.
- *
- * Later, the process context will sometimes schedules software IRQ
- * context of the period_tasklet. Then, no need to flush the queue by
- * the same reason as described for IR/IT contexts.
- */
- if (!in_interrupt() && amdtp_stream_running(s))
- fw_iso_context_flush_completions(s->context);
+ struct amdtp_stream *irq_target = d->irq_target;
+
+ if (irq_target && amdtp_stream_running(irq_target)) {
+ // This function is called in software IRQ context of
+ // period_tasklet or process context.
+ //
+ // When the software IRQ context was scheduled by software IRQ
+ // context of IT contexts, queued packets were already handled.
+ // Therefore, no need to flush the queue in buffer furthermore.
+ //
+ // When the process context reach here, some packets will be
+ // already queued in the buffer. These packets should be handled
+ // immediately to keep better granularity of PCM pointer.
+ //
+ // Later, the process context will sometimes schedules software
+ // IRQ context of the period_tasklet. Then, no need to flush the
+ // queue by the same reason as described in the above
+ if (!in_interrupt()) {
+ // Queued packet should be processed without any kernel
+ // preemption to keep latency against bus cycle.
+ preempt_disable();
+ fw_iso_context_flush_completions(irq_target->context);
+ preempt_enable();
+ }
+ }
return READ_ONCE(s->pcm_buffer_pointer);
}
-EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
/**
- * amdtp_stream_pcm_ack - acknowledge queued PCM frames
+ * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
+ * @d: the AMDTP domain.
* @s: the AMDTP stream that transfers the PCM frames
*
* Returns zero always.
*/
-int amdtp_stream_pcm_ack(struct amdtp_stream *s)
+int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
{
- /*
- * Process isochronous packets for recent isochronous cycle to handle
- * queued PCM frames.
- */
- if (amdtp_stream_running(s))
- fw_iso_context_flush_completions(s->context);
+ struct amdtp_stream *irq_target = d->irq_target;
+
+ // Process isochronous packets for recent isochronous cycle to handle
+ // queued PCM frames.
+ if (irq_target && amdtp_stream_running(irq_target)) {
+ // Queued packet should be processed without any kernel
+ // preemption to keep latency against bus cycle.
+ preempt_disable();
+ fw_iso_context_flush_completions(irq_target->context);
+ preempt_enable();
+ }
return 0;
}
-EXPORT_SYMBOL(amdtp_stream_pcm_ack);
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
/**
* amdtp_stream_update - update the stream after a bus reset
@@ -1143,6 +1300,8 @@ int amdtp_domain_init(struct amdtp_domain *d)
{
INIT_LIST_HEAD(&d->streams);
+ d->events_per_period = 0;
+
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
@@ -1184,26 +1343,105 @@ int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
}
EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
+static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
+{
+ int generation;
+ int rcode;
+ __be32 reg;
+ u32 data;
+
+ // This is a request to local 1394 OHCI controller and expected to
+ // complete without any event waiting.
+ generation = fw_card->generation;
+ smp_rmb(); // node_id vs. generation.
+ rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
+ fw_card->node_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_CYCLE_TIME,
+ &reg, sizeof(reg));
+ if (rcode != RCODE_COMPLETE)
+ return -EIO;
+
+ data = be32_to_cpu(reg);
+ *cur_cycle = data >> 12;
+
+ return 0;
+}
+
/**
* amdtp_domain_start - start sending packets for isoc context in the domain.
* @d: the AMDTP domain.
+ * @ir_delay_cycle: the cycle delay to start all IR contexts.
*/
-int amdtp_domain_start(struct amdtp_domain *d)
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
{
struct amdtp_stream *s;
- int err = 0;
+ int cycle;
+ int err;
+ // Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) {
- err = amdtp_stream_start(s, s->channel, s->speed);
- if (err < 0)
+ if (s->direction == AMDTP_OUT_STREAM)
break;
}
+ if (!s)
+ return -ENXIO;
+ d->irq_target = s;
- if (err < 0) {
- list_for_each_entry(s, &d->streams, list)
- amdtp_stream_stop(s);
+ if (ir_delay_cycle > 0) {
+ struct fw_card *fw_card = fw_parent_device(s->unit)->card;
+
+ err = get_current_cycle_time(fw_card, &cycle);
+ if (err < 0)
+ return err;
+
+ // No need to care overflow in cycle field because of enough
+ // width.
+ cycle += ir_delay_cycle;
+
+ // Round up to sec field.
+ if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
+ unsigned int sec;
+
+ // The sec field can overflow.
+ sec = (cycle & 0xffffe000) >> 13;
+ cycle = (++sec << 13) |
+ ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
+ }
+
+ // In OHCI 1394 specification, lower 2 bits are available for
+ // sec field.
+ cycle &= 0x00007fff;
+ } else {
+ cycle = -1;
+ }
+
+ list_for_each_entry(s, &d->streams, list) {
+ int cycle_match;
+
+ if (s->direction == AMDTP_IN_STREAM) {
+ cycle_match = cycle;
+ } else {
+ // IT context starts immediately.
+ cycle_match = -1;
+ }
+
+ if (s != d->irq_target) {
+ err = amdtp_stream_start(s, s->channel, s->speed, d,
+ false, cycle_match);
+ if (err < 0)
+ goto error;
+ }
}
+ s = d->irq_target;
+ err = amdtp_stream_start(s, s->channel, s->speed, d, true, -1);
+ if (err < 0)
+ goto error;
+
+ return 0;
+error:
+ list_for_each_entry(s, &d->streams, list)
+ amdtp_stream_stop(s);
return err;
}
EXPORT_SYMBOL_GPL(amdtp_domain_start);
@@ -1216,10 +1454,17 @@ void amdtp_domain_stop(struct amdtp_domain *d)
{
struct amdtp_stream *s, *next;
+ if (d->irq_target)
+ amdtp_stream_stop(d->irq_target);
+
list_for_each_entry_safe(s, next, &d->streams, list) {
list_del(&s->list);
- amdtp_stream_stop(s);
+ if (s != d->irq_target)
+ amdtp_stream_stop(s);
}
+
+ d->events_per_period = 0;
+ d->irq_target = NULL;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stop);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index bbbca964b9b4..f2d44e2dc3c8 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -117,6 +117,7 @@ struct amdtp_stream {
/* For packet processing. */
struct fw_iso_context *context;
struct iso_packets_buffer buffer;
+ unsigned int queue_size;
int packet_index;
struct pkt_desc *pkt_descs;
int tag;
@@ -142,6 +143,10 @@ struct amdtp_stream {
// To generate CIP header.
unsigned int fdf;
int syt_override;
+
+ // To generate constant hardware IRQ.
+ unsigned int event_count;
+ unsigned int events_per_period;
} rx;
} ctx_data;
@@ -194,8 +199,6 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime);
void amdtp_stream_pcm_prepare(struct amdtp_stream *s);
-unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s);
-int amdtp_stream_pcm_ack(struct amdtp_stream *s);
void amdtp_stream_pcm_abort(struct amdtp_stream *s);
extern const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT];
@@ -272,6 +275,11 @@ static inline bool amdtp_stream_wait_callback(struct amdtp_stream *s,
struct amdtp_domain {
struct list_head streams;
+
+ unsigned int events_per_period;
+ unsigned int events_per_buffer;
+
+ struct amdtp_stream *irq_target;
};
int amdtp_domain_init(struct amdtp_domain *d);
@@ -280,7 +288,21 @@ void amdtp_domain_destroy(struct amdtp_domain *d);
int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
int channel, int speed);
-int amdtp_domain_start(struct amdtp_domain *d);
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle);
void amdtp_domain_stop(struct amdtp_domain *d);
+static inline int amdtp_domain_set_events_per_period(struct amdtp_domain *d,
+ unsigned int events_per_period,
+ unsigned int events_per_buffer)
+{
+ d->events_per_period = events_per_period;
+ d->events_per_buffer = events_per_buffer;
+
+ return 0;
+}
+
+unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ struct amdtp_stream *s);
+int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s);
+
#endif
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index 356d6ba60959..d1ad9a8451bc 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -217,7 +217,9 @@ int snd_bebob_stream_get_clock_src(struct snd_bebob *bebob,
enum snd_bebob_clock_type *src);
int snd_bebob_stream_discover(struct snd_bebob *bebob);
int snd_bebob_stream_init_duplex(struct snd_bebob *bebob);
-int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate);
+int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_bebob_stream_start_duplex(struct snd_bebob *bebob);
void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob);
void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob);
diff --git a/sound/firewire/bebob/bebob_midi.c b/sound/firewire/bebob/bebob_midi.c
index 4d8805fa8a00..6f597d03e7c1 100644
--- a/sound/firewire/bebob/bebob_midi.c
+++ b/sound/firewire/bebob/bebob_midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
return err;
mutex_lock(&bebob->mutex);
- err = snd_bebob_stream_reserve_duplex(bebob, 0);
+ err = snd_bebob_stream_reserve_duplex(bebob, 0, 0, 0);
if (err >= 0) {
++bebob->substreams_counter;
err = snd_bebob_stream_start_duplex(bebob);
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index 0fb9eed46837..d4edd06d32cf 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -129,18 +129,17 @@ end:
return err;
}
-static int
-pcm_open(struct snd_pcm_substream *substream)
+static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_bebob *bebob = substream->private_data;
const struct snd_bebob_rate_spec *spec = bebob->spec->rate;
- unsigned int sampling_rate;
+ struct amdtp_domain *d = &bebob->domain;
enum snd_bebob_clock_type src;
int err;
err = snd_bebob_stream_lock_try(bebob);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(bebob, substream);
if (err < 0)
@@ -150,15 +149,20 @@ pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- /*
- * When source of clock is internal or any PCM stream are running,
- * the available sampling rate is limited at current sampling rate.
- */
+ mutex_lock(&bebob->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if (src == SND_BEBOB_CLOCK_TYPE_EXTERNAL ||
- amdtp_stream_pcm_running(&bebob->tx_stream) ||
- amdtp_stream_pcm_running(&bebob->rx_stream)) {
+ (bebob->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int sampling_rate;
+
err = spec->get(bebob, &sampling_rate);
if (err < 0) {
+ mutex_unlock(&bebob->mutex);
dev_err(&bebob->unit->device,
"fail to get sampling rate: %d\n", err);
goto err_locked;
@@ -166,11 +170,31 @@ pcm_open(struct snd_pcm_substream *substream)
substream->runtime->hw.rate_min = sampling_rate;
substream->runtime->hw.rate_max = sampling_rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&bebob->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&bebob->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&bebob->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_bebob_stream_lock_release(bebob);
return err;
@@ -190,16 +214,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_bebob *bebob = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&bebob->mutex);
- err = snd_bebob_stream_reserve_duplex(bebob, rate);
+ err = snd_bebob_stream_reserve_duplex(bebob, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++bebob->substreams_counter;
mutex_unlock(&bebob->mutex);
@@ -221,7 +247,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&bebob->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int
@@ -286,31 +312,33 @@ pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static snd_pcm_uframes_t
-pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
+static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_bebob *bebob = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&bebob->tx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&bebob->domain,
+ &bebob->tx_stream);
}
-static snd_pcm_uframes_t
-pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
+static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_bebob *bebob = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&bebob->rx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&bebob->domain,
+ &bebob->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_bebob *bebob = substream->private_data;
- return amdtp_stream_pcm_ack(&bebob->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&bebob->domain, &bebob->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_bebob *bebob = substream->private_data;
- return amdtp_stream_pcm_ack(&bebob->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&bebob->domain, &bebob->rx_stream);
}
int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
@@ -325,7 +353,6 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -337,7 +364,6 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -351,6 +377,8 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
"%s PCM", bebob->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
end:
return err;
}
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 6c1497d9f52b..bbae04793c50 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -7,7 +7,7 @@
#include "./bebob.h"
-#define CALLBACK_TIMEOUT 2000
+#define CALLBACK_TIMEOUT 2500
#define FW_ISO_RESOURCE_DELAY 1000
/*
@@ -398,36 +398,19 @@ check_connection_used_by_others(struct snd_bebob *bebob, struct amdtp_stream *s)
return err;
}
-static int make_both_connections(struct snd_bebob *bebob)
-{
- int err = 0;
-
- err = cmp_connection_establish(&bebob->out_conn);
- if (err < 0)
- return err;
-
- err = cmp_connection_establish(&bebob->in_conn);
- if (err < 0) {
- cmp_connection_break(&bebob->out_conn);
- return err;
- }
-
- return 0;
-}
-
-static void
-break_both_connections(struct snd_bebob *bebob)
+static void break_both_connections(struct snd_bebob *bebob)
{
cmp_connection_break(&bebob->in_conn);
cmp_connection_break(&bebob->out_conn);
- /* These models seems to be in transition state for a longer time. */
- if (bebob->maudio_special_quirk != NULL)
- msleep(200);
+ // These models seem to be in transition state for a longer time. When
+ // accessing in the state, any transactions is corrupted. In the worst
+ // case, the device is going to reboot.
+ if (bebob->version < 2)
+ msleep(600);
}
-static int
-start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
+static int start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
{
struct cmp_connection *conn;
int err = 0;
@@ -437,18 +420,19 @@ start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
else
conn = &bebob->out_conn;
- /* channel mapping */
+ // channel mapping.
if (bebob->maudio_special_quirk == NULL) {
err = map_data_channels(bebob, stream);
if (err < 0)
- goto end;
+ return err;
}
- // start amdtp stream.
- err = amdtp_domain_add_stream(&bebob->domain, stream,
- conn->resources.channel, conn->speed);
-end:
- return err;
+ err = cmp_connection_establish(conn);
+ if (err < 0)
+ return err;
+
+ return amdtp_domain_add_stream(&bebob->domain, stream,
+ conn->resources.channel, conn->speed);
}
static int init_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
@@ -553,7 +537,9 @@ static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
return cmp_connection_reserve(conn, amdtp_stream_get_max_payload(stream));
}
-int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate)
+int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -606,6 +592,14 @@ int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate)
cmp_connection_release(&bebob->out_conn);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&bebob->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ cmp_connection_release(&bebob->out_conn);
+ cmp_connection_release(&bebob->in_conn);
+ return err;
+ }
}
return 0;
@@ -627,7 +621,10 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
}
if (!amdtp_stream_running(&bebob->rx_stream)) {
+ enum snd_bebob_clock_type src;
+ struct amdtp_stream *master, *slave;
unsigned int curr_rate;
+ unsigned int ir_delay_cycle;
if (bebob->maudio_special_quirk) {
err = bebob->spec->rate->get(bebob, &curr_rate);
@@ -635,19 +632,40 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
return err;
}
- err = make_both_connections(bebob);
+ err = snd_bebob_stream_get_clock_src(bebob, &src);
if (err < 0)
return err;
- err = start_stream(bebob, &bebob->rx_stream);
+ if (src != SND_BEBOB_CLOCK_TYPE_SYT) {
+ master = &bebob->tx_stream;
+ slave = &bebob->rx_stream;
+ } else {
+ master = &bebob->rx_stream;
+ slave = &bebob->tx_stream;
+ }
+
+ err = start_stream(bebob, master);
if (err < 0)
goto error;
- err = start_stream(bebob, &bebob->tx_stream);
+ err = start_stream(bebob, slave);
if (err < 0)
goto error;
- err = amdtp_domain_start(&bebob->domain);
+ // The device postpones start of transmission mostly for 1 sec
+ // after receives packets firstly. For safe, IR context starts
+ // 0.4 sec (=3200 cycles) later to version 1 or 2 firmware,
+ // 2.0 sec (=16000 cycles) for version 3 firmware. This is
+ // within 2.5 sec (=CALLBACK_TIMEOUT).
+ // Furthermore, some devices transfer isoc packets with
+ // discontinuous counter in the beginning of packet streaming.
+ // The delay has an effect to avoid detection of this
+ // discontinuity.
+ if (bebob->version < 2)
+ ir_delay_cycle = 3200;
+ else
+ ir_delay_cycle = 16000;
+ err = amdtp_domain_start(&bebob->domain, ir_delay_cycle);
if (err < 0)
goto error;
diff --git a/sound/firewire/dice/dice-midi.c b/sound/firewire/dice/dice-midi.c
index c9e19bddfc09..4c2998034313 100644
--- a/sound/firewire/dice/dice-midi.c
+++ b/sound/firewire/dice/dice-midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
mutex_lock(&dice->mutex);
- err = snd_dice_stream_reserve_duplex(dice, 0);
+ err = snd_dice_stream_reserve_duplex(dice, 0, 0, 0);
if (err >= 0) {
++dice->substreams_counter;
err = snd_dice_stream_start_duplex(dice);
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index 94a4dccfc381..be79d659eedf 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -164,13 +164,14 @@ static int init_hw_info(struct snd_dice *dice,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_dice *dice = substream->private_data;
+ struct amdtp_domain *d = &dice->domain;
unsigned int source;
bool internal;
int err;
err = snd_dice_stream_lock_try(dice);
if (err < 0)
- goto end;
+ return err;
err = init_hw_info(dice, substream);
if (err < 0)
@@ -195,27 +196,56 @@ static int pcm_open(struct snd_pcm_substream *substream)
break;
}
- /*
- * When source of clock is not internal or any PCM streams are running,
- * available sampling rate is limited at current sampling rate.
- */
+ mutex_lock(&dice->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if (!internal ||
- amdtp_stream_pcm_running(&dice->tx_stream[0]) ||
- amdtp_stream_pcm_running(&dice->tx_stream[1]) ||
- amdtp_stream_pcm_running(&dice->rx_stream[0]) ||
- amdtp_stream_pcm_running(&dice->rx_stream[1])) {
+ (dice->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
unsigned int rate;
err = snd_dice_transaction_get_rate(dice, &rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
goto err_locked;
+ }
+
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ if (frames_per_period > 0) {
+ // For double_pcm_frame quirk.
+ if (rate > 96000) {
+ frames_per_period *= 2;
+ frames_per_buffer *= 2;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&dice->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_dice_stream_lock_release(dice);
return err;
@@ -236,16 +266,23 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_dice *dice = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int events_per_period = params_period_size(hw_params);
+ unsigned int events_per_buffer = params_buffer_size(hw_params);
mutex_lock(&dice->mutex);
- err = snd_dice_stream_reserve_duplex(dice, rate);
+ // For double_pcm_frame quirk.
+ if (rate > 96000) {
+ events_per_period /= 2;
+ events_per_buffer /= 2;
+ }
+ err = snd_dice_stream_reserve_duplex(dice, rate,
+ events_per_period, events_per_buffer);
if (err >= 0)
++dice->substreams_counter;
mutex_unlock(&dice->mutex);
@@ -267,7 +304,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&dice->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int capture_prepare(struct snd_pcm_substream *substream)
@@ -341,14 +378,14 @@ static snd_pcm_uframes_t capture_pointer(struct snd_pcm_substream *substream)
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device];
- return amdtp_stream_pcm_pointer(stream);
+ return amdtp_domain_stream_pcm_pointer(&dice->domain, stream);
}
static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device];
- return amdtp_stream_pcm_pointer(stream);
+ return amdtp_domain_stream_pcm_pointer(&dice->domain, stream);
}
static int capture_ack(struct snd_pcm_substream *substream)
@@ -356,7 +393,7 @@ static int capture_ack(struct snd_pcm_substream *substream)
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device];
- return amdtp_stream_pcm_ack(stream);
+ return amdtp_domain_stream_pcm_ack(&dice->domain, stream);
}
static int playback_ack(struct snd_pcm_substream *substream)
@@ -364,7 +401,7 @@ static int playback_ack(struct snd_pcm_substream *substream)
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device];
- return amdtp_stream_pcm_ack(stream);
+ return amdtp_domain_stream_pcm_ack(&dice->domain, stream);
}
int snd_dice_create_pcm(struct snd_dice *dice)
@@ -379,7 +416,6 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.trigger = capture_trigger,
.pointer = capture_pointer,
.ack = capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -391,7 +427,6 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.trigger = playback_trigger,
.pointer = playback_pointer,
.ack = playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
unsigned int capture, playback;
@@ -421,6 +456,10 @@ int snd_dice_create_pcm(struct snd_dice *dice)
if (playback > 0)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&playback_ops);
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
}
return 0;
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index f6a8627ae5a2..6a3d60913e10 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -278,7 +278,9 @@ static void finish_session(struct snd_dice *dice, struct reg_params *tx_params,
snd_dice_transaction_clear_enable(dice);
}
-int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate)
+int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate,
+ unsigned int events_per_period,
+ unsigned int events_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -324,6 +326,11 @@ int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate)
&rx_params);
if (err < 0)
goto error;
+
+ err = amdtp_domain_set_events_per_period(&dice->domain,
+ events_per_period, events_per_buffer);
+ if (err < 0)
+ goto error;
}
return 0;
@@ -455,7 +462,7 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice)
goto error;
}
- err = amdtp_domain_start(&dice->domain);
+ err = amdtp_domain_start(&dice->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
index fa6d74303f54..16366773e22e 100644
--- a/sound/firewire/dice/dice.h
+++ b/sound/firewire/dice/dice.h
@@ -210,7 +210,9 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice);
void snd_dice_stream_stop_duplex(struct snd_dice *dice);
int snd_dice_stream_init_duplex(struct snd_dice *dice);
void snd_dice_stream_destroy_duplex(struct snd_dice *dice);
-int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate);
+int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate,
+ unsigned int events_per_period,
+ unsigned int events_per_buffer);
void snd_dice_stream_update_duplex(struct snd_dice *dice);
int snd_dice_stream_detect_current_formats(struct snd_dice *dice);
diff --git a/sound/firewire/digi00x/digi00x-midi.c b/sound/firewire/digi00x/digi00x-midi.c
index 2b57ece89101..68eb8c39afa6 100644
--- a/sound/firewire/digi00x/digi00x-midi.c
+++ b/sound/firewire/digi00x/digi00x-midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
return err;
mutex_lock(&dg00x->mutex);
- err = snd_dg00x_stream_reserve_duplex(dg00x, 0);
+ err = snd_dg00x_stream_reserve_duplex(dg00x, 0, 0, 0);
if (err >= 0) {
++dg00x->substreams_counter;
err = snd_dg00x_stream_start_duplex(dg00x);
diff --git a/sound/firewire/digi00x/digi00x-pcm.c b/sound/firewire/digi00x/digi00x-pcm.c
index 18e561b26625..57cbce4fd836 100644
--- a/sound/firewire/digi00x/digi00x-pcm.c
+++ b/sound/firewire/digi00x/digi00x-pcm.c
@@ -100,14 +100,14 @@ static int pcm_init_hw_params(struct snd_dg00x *dg00x,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_dg00x *dg00x = substream->private_data;
+ struct amdtp_domain *d = &dg00x->domain;
enum snd_dg00x_clock clock;
bool detect;
- unsigned int rate;
int err;
err = snd_dg00x_stream_lock_try(dg00x);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(dg00x, substream);
if (err < 0)
@@ -127,19 +127,49 @@ static int pcm_open(struct snd_pcm_substream *substream)
}
}
+ mutex_lock(&dg00x->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if ((clock != SND_DG00X_CLOCK_INTERNAL) ||
- amdtp_stream_pcm_running(&dg00x->rx_stream) ||
- amdtp_stream_pcm_running(&dg00x->tx_stream)) {
+ (dg00x->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int rate;
+
err = snd_dg00x_stream_get_external_rate(dg00x, &rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&dg00x->mutex);
goto err_locked;
+ }
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&dg00x->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&dg00x->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&dg00x->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_dg00x_stream_lock_release(dg00x);
return err;
@@ -160,16 +190,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_dg00x *dg00x = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&dg00x->mutex);
- err = snd_dg00x_stream_reserve_duplex(dg00x, rate);
+ err = snd_dg00x_stream_reserve_duplex(dg00x, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++dg00x->substreams_counter;
mutex_unlock(&dg00x->mutex);
@@ -191,7 +223,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&dg00x->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -268,28 +300,28 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_dg00x *dg00x = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&dg00x->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&dg00x->domain, &dg00x->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_dg00x *dg00x = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&dg00x->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&dg00x->domain, &dg00x->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_dg00x *dg00x = substream->private_data;
- return amdtp_stream_pcm_ack(&dg00x->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&dg00x->domain, &dg00x->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_dg00x *dg00x = substream->private_data;
- return amdtp_stream_pcm_ack(&dg00x->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&dg00x->domain, &dg00x->rx_stream);
}
int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
@@ -304,7 +336,6 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -316,7 +347,6 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -330,6 +360,8 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
"%s PCM", dg00x->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/digi00x/digi00x-stream.c b/sound/firewire/digi00x/digi00x-stream.c
index d6a92460060f..405d6903bfbc 100644
--- a/sound/firewire/digi00x/digi00x-stream.c
+++ b/sound/firewire/digi00x/digi00x-stream.c
@@ -283,7 +283,9 @@ void snd_dg00x_stream_destroy_duplex(struct snd_dg00x *dg00x)
destroy_stream(dg00x, &dg00x->tx_stream);
}
-int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate)
+int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -315,6 +317,14 @@ int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate)
fw_iso_resources_free(&dg00x->rx_resources);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&dg00x->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&dg00x->rx_resources);
+ fw_iso_resources_free(&dg00x->tx_resources);
+ return err;
+ }
}
return 0;
@@ -365,7 +375,7 @@ int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x)
if (err < 0)
goto error;
- err = amdtp_domain_start(&dg00x->domain);
+ err = amdtp_domain_start(&dg00x->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h
index 8041c65f2736..129de8edd5ea 100644
--- a/sound/firewire/digi00x/digi00x.h
+++ b/sound/firewire/digi00x/digi00x.h
@@ -141,7 +141,9 @@ int snd_dg00x_stream_get_clock(struct snd_dg00x *dg00x,
int snd_dg00x_stream_check_external_clock(struct snd_dg00x *dg00x,
bool *detect);
int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x);
-int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate);
+int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_stop_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_update_duplex(struct snd_dg00x *dg00x);
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index 9eab3ad283ce..4e3bd9a2bec0 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -139,6 +139,7 @@ static int pcm_init_hw_params(struct snd_ff *ff,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_ff *ff = substream->private_data;
+ struct amdtp_domain *d = &ff->domain;
unsigned int rate;
enum snd_ff_clock_src src;
int i, err;
@@ -155,16 +156,21 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto release_lock;
+ mutex_lock(&ff->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if (src != SND_FF_CLOCK_SRC_INTERNAL) {
for (i = 0; i < CIP_SFC_COUNT; ++i) {
if (amdtp_rate_table[i] == rate)
break;
}
- /*
- * The unit is configured at sampling frequency which packet
- * streaming engine can't support.
- */
+
+ // The unit is configured at sampling frequency which packet
+ // streaming engine can't support.
if (i >= CIP_SFC_COUNT) {
+ mutex_unlock(&ff->mutex);
err = -EIO;
goto release_lock;
}
@@ -172,14 +178,34 @@ static int pcm_open(struct snd_pcm_substream *substream)
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
} else {
- if (amdtp_stream_pcm_running(&ff->rx_stream) ||
- amdtp_stream_pcm_running(&ff->tx_stream)) {
+ if (ff->substreams_counter > 0) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+
rate = amdtp_rate_table[ff->rx_stream.sfc];
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&ff->mutex);
+ goto release_lock;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&ff->mutex);
+ goto release_lock;
+ }
}
}
+ mutex_unlock(&ff->mutex);
+
snd_pcm_set_sync(substream);
return 0;
@@ -204,16 +230,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_ff *ff = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&ff->mutex);
- err = snd_ff_stream_reserve_duplex(ff, rate);
+ err = snd_ff_stream_reserve_duplex(ff, rate, frames_per_period,
+ frames_per_buffer);
if (err >= 0)
++ff->substreams_counter;
mutex_unlock(&ff->mutex);
@@ -235,7 +263,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&ff->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -312,28 +340,28 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_ff *ff = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&ff->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&ff->domain, &ff->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_ff *ff = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&ff->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&ff->domain, &ff->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_ff *ff = substream->private_data;
- return amdtp_stream_pcm_ack(&ff->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&ff->domain, &ff->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_ff *ff = substream->private_data;
- return amdtp_stream_pcm_ack(&ff->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&ff->domain, &ff->rx_stream);
}
int snd_ff_create_pcm_devices(struct snd_ff *ff)
@@ -348,7 +376,6 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops pcm_playback_ops = {
.open = pcm_open,
@@ -360,7 +387,6 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -374,6 +400,8 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
"%s PCM", ff->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
index e8e6f9fd6433..63b79c4a5405 100644
--- a/sound/firewire/fireface/ff-stream.c
+++ b/sound/firewire/fireface/ff-stream.c
@@ -106,7 +106,9 @@ void snd_ff_stream_destroy_duplex(struct snd_ff *ff)
destroy_stream(ff, &ff->tx_stream);
}
-int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate)
+int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
enum snd_ff_clock_src src;
@@ -150,6 +152,14 @@ int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate)
err = ff->spec->protocol->allocate_resources(ff, rate);
if (err < 0)
return err;
+
+ err = amdtp_domain_set_events_per_period(&ff->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&ff->tx_resources);
+ fw_iso_resources_free(&ff->rx_resources);
+ return err;
+ }
}
return 0;
@@ -174,6 +184,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
*/
if (!amdtp_stream_running(&ff->rx_stream)) {
int spd = fw_parent_device(ff->unit)->max_speed;
+ unsigned int ir_delay_cycle;
err = ff->spec->protocol->begin_session(ff, rate);
if (err < 0)
@@ -189,7 +200,14 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (err < 0)
goto error;
- err = amdtp_domain_start(&ff->domain);
+ // The device postpones start of transmission mostly for several
+ // cycles after receiving packets firstly.
+ if (ff->spec->protocol == &snd_ff_protocol_ff800)
+ ir_delay_cycle = 800; // = 100 msec
+ else
+ ir_delay_cycle = 16; // = 2 msec
+
+ err = amdtp_domain_start(&ff->domain, ir_delay_cycle);
if (err < 0)
goto error;
diff --git a/sound/firewire/fireface/ff.h b/sound/firewire/fireface/ff.h
index b4c22ca6079e..dc7a20f75983 100644
--- a/sound/firewire/fireface/ff.h
+++ b/sound/firewire/fireface/ff.h
@@ -139,7 +139,9 @@ int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
enum snd_ff_stream_mode *mode);
int snd_ff_stream_init_duplex(struct snd_ff *ff);
void snd_ff_stream_destroy_duplex(struct snd_ff *ff);
-int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate);
+int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate);
void snd_ff_stream_stop_duplex(struct snd_ff *ff);
void snd_ff_stream_update_duplex(struct snd_ff *ff);
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4cda297f8438..dda797209a27 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -207,7 +207,9 @@ int snd_efw_command_get_sampling_rate(struct snd_efw *efw, unsigned int *rate);
int snd_efw_command_set_sampling_rate(struct snd_efw *efw, unsigned int rate);
int snd_efw_stream_init_duplex(struct snd_efw *efw);
-int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate);
+int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_efw_stream_start_duplex(struct snd_efw *efw);
void snd_efw_stream_stop_duplex(struct snd_efw *efw);
void snd_efw_stream_update_duplex(struct snd_efw *efw);
diff --git a/sound/firewire/fireworks/fireworks_midi.c b/sound/firewire/fireworks/fireworks_midi.c
index a9f4a9630d15..84621e356848 100644
--- a/sound/firewire/fireworks/fireworks_midi.c
+++ b/sound/firewire/fireworks/fireworks_midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
goto end;
mutex_lock(&efw->mutex);
- err = snd_efw_stream_reserve_duplex(efw, 0);
+ err = snd_efw_stream_reserve_duplex(efw, 0, 0, 0);
if (err >= 0) {
++efw->substreams_counter;
err = snd_efw_stream_start_duplex(efw);
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index a7025dccc754..e69896d748df 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -173,13 +173,13 @@ end:
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_efw *efw = substream->private_data;
- unsigned int sampling_rate;
+ struct amdtp_domain *d = &efw->domain;
enum snd_efw_clock_source clock_source;
int err;
err = snd_efw_stream_lock_try(efw);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(efw, substream);
if (err < 0)
@@ -189,23 +189,49 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- /*
- * When source of clock is not internal or any PCM streams are running,
- * available sampling rate is limited at current sampling rate.
- */
+ mutex_lock(&efw->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if ((clock_source != SND_EFW_CLOCK_SOURCE_INTERNAL) ||
- amdtp_stream_pcm_running(&efw->tx_stream) ||
- amdtp_stream_pcm_running(&efw->rx_stream)) {
+ (efw->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int sampling_rate;
+
err = snd_efw_command_get_sampling_rate(efw, &sampling_rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&efw->mutex);
goto err_locked;
+ }
substream->runtime->hw.rate_min = sampling_rate;
substream->runtime->hw.rate_max = sampling_rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&efw->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&efw->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&efw->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_efw_stream_lock_release(efw);
return err;
@@ -224,16 +250,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_efw *efw = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&efw->mutex);
- err = snd_efw_stream_reserve_duplex(efw, rate);
+ err = snd_efw_stream_reserve_duplex(efw, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++efw->substreams_counter;
mutex_unlock(&efw->mutex);
@@ -255,7 +283,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&efw->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -319,26 +347,28 @@ static int pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_efw *efw = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&efw->tx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&efw->domain, &efw->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_efw *efw = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&efw->rx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&efw->domain, &efw->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_efw *efw = substream->private_data;
- return amdtp_stream_pcm_ack(&efw->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&efw->domain, &efw->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_efw *efw = substream->private_data;
- return amdtp_stream_pcm_ack(&efw->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&efw->domain, &efw->rx_stream);
}
int snd_efw_create_pcm_devices(struct snd_efw *efw)
@@ -353,7 +383,6 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -365,7 +394,6 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -378,6 +406,8 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
snprintf(pcm->name, sizeof(pcm->name), "%s PCM", efw->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
end:
return err;
}
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index f2de304d2f26..2206af0fef42 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -181,7 +181,9 @@ static int keep_resources(struct snd_efw *efw, struct amdtp_stream *stream,
return cmp_connection_reserve(conn, amdtp_stream_get_max_payload(stream));
}
-int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate)
+int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -228,6 +230,14 @@ int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate)
cmp_connection_release(&efw->in_conn);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&efw->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ cmp_connection_release(&efw->in_conn);
+ cmp_connection_release(&efw->out_conn);
+ return err;
+ }
}
return 0;
@@ -262,7 +272,7 @@ int snd_efw_stream_start_duplex(struct snd_efw *efw)
if (err < 0)
goto error;
- err = amdtp_domain_start(&efw->domain);
+ err = amdtp_domain_start(&efw->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index a16beda7c530..d9f1b962bfef 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -288,8 +288,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
struct isight *isight = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
@@ -337,7 +336,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int isight_start_streaming(struct isight *isight)
@@ -453,7 +452,6 @@ static int isight_create_pcm(struct isight *isight)
.prepare = isight_prepare,
.trigger = isight_trigger,
.pointer = isight_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -465,6 +463,8 @@ static int isight_create_pcm(struct isight *isight)
strcpy(pcm->name, "iSight");
isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
isight->pcm->ops = &ops;
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/motu/motu-midi.c b/sound/firewire/motu/motu-midi.c
index 46a0035df31e..2365f7dfde26 100644
--- a/sound/firewire/motu/motu-midi.c
+++ b/sound/firewire/motu/motu-midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
mutex_lock(&motu->mutex);
- err = snd_motu_stream_reserve_duplex(motu, 0);
+ err = snd_motu_stream_reserve_duplex(motu, 0, 0, 0);
if (err >= 0) {
++motu->substreams_counter;
err = snd_motu_stream_start_duplex(motu);
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index aa2e584da6fe..349b4d09e84f 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -134,8 +134,8 @@ static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
const struct snd_motu_protocol *const protocol = motu->spec->protocol;
+ struct amdtp_domain *d = &motu->domain;
enum snd_motu_clock_source src;
- unsigned int rate;
int err;
err = snd_motu_stream_lock_try(motu);
@@ -152,28 +152,51 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- /*
- * When source of clock is not internal or any PCM streams are running,
- * available sampling rate is limited at current sampling rate.
- */
err = protocol->get_clock_source(motu, &src);
if (err < 0)
goto err_locked;
- if (src != SND_MOTU_CLOCK_SOURCE_INTERNAL ||
- amdtp_stream_pcm_running(&motu->tx_stream) ||
- amdtp_stream_pcm_running(&motu->rx_stream)) {
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
+ if ((src != SND_MOTU_CLOCK_SOURCE_INTERNAL &&
+ src != SND_MOTU_CLOCK_SOURCE_SPH) ||
+ (motu->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int rate;
+
err = protocol->get_clock_rate(motu, &rate);
if (err < 0)
goto err_locked;
+
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&motu->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&motu->mutex);
+ goto err_locked;
+ }
+ }
}
snd_pcm_set_sync(substream);
mutex_unlock(&motu->mutex);
- return err;
+ return 0;
err_locked:
mutex_unlock(&motu->mutex);
snd_motu_stream_lock_release(motu);
@@ -195,16 +218,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_motu *motu = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&motu->mutex);
- err = snd_motu_stream_reserve_duplex(motu, rate);
+ err = snd_motu_stream_reserve_duplex(motu, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++motu->substreams_counter;
mutex_unlock(&motu->mutex);
@@ -226,7 +251,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&motu->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int capture_prepare(struct snd_pcm_substream *substream)
@@ -295,27 +320,27 @@ static snd_pcm_uframes_t capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_pointer(&motu->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&motu->domain, &motu->tx_stream);
}
static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_pointer(&motu->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&motu->domain, &motu->rx_stream);
}
static int capture_ack(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_ack(&motu->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&motu->domain, &motu->tx_stream);
}
static int playback_ack(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_ack(&motu->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&motu->domain, &motu->rx_stream);
}
int snd_motu_create_pcm_devices(struct snd_motu *motu)
@@ -330,7 +355,6 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.trigger = capture_trigger,
.pointer = capture_pointer,
.ack = capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -342,7 +366,6 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.trigger = playback_trigger,
.pointer = playback_pointer,
.ack = playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -355,6 +378,8 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c
index ea46fb4c1b5a..187f6abd878c 100644
--- a/sound/firewire/motu/motu-proc.c
+++ b/sound/firewire/motu/motu-proc.c
@@ -16,9 +16,11 @@ static const char *const clock_names[] = {
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT] = "S/PDIF on optical interface",
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A] = "S/PDIF on optical interface A",
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B] = "S/PDIF on optical interface B",
- [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PCIF on coaxial interface",
+ [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PDIF on coaxial interface",
[SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR] = "AESEBU on XLR interface",
[SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC] = "Word clock on BNC interface",
+ [SND_MOTU_CLOCK_SOURCE_SPH] = "Source packet header",
+ [SND_MOTU_CLOCK_SOURCE_UNKNOWN] = "Unknown",
};
static void proc_read_clock(struct snd_info_entry *entry,
diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
index 9e2f16eebe0a..619b6ae73f62 100644
--- a/sound/firewire/motu/motu-protocol-v2.c
+++ b/sound/firewire/motu/motu-protocol-v2.c
@@ -12,10 +12,8 @@
#define V2_CLOCK_RATE_SHIFT 3
#define V2_CLOCK_SRC_MASK 0x00000007
#define V2_CLOCK_SRC_SHIFT 0
-#define V2_CLOCK_TRAVELER_FETCH_DISABLE 0x04000000
-#define V2_CLOCK_TRAVELER_FETCH_ENABLE 0x03000000
-#define V2_CLOCK_8PRE_FETCH_DISABLE 0x02000000
-#define V2_CLOCK_8PRE_FETCH_ENABLE 0x00000000
+#define V2_CLOCK_FETCH_ENABLE 0x02000000
+#define V2_CLOCK_MODEL_SPECIFIC 0x04000000
#define V2_IN_OUT_CONF_OFFSET 0x0c04
#define V2_OPT_OUT_IFACE_MASK 0x00000c00
@@ -26,10 +24,20 @@
#define V2_OPT_IFACE_MODE_ADAT 1
#define V2_OPT_IFACE_MODE_SPDIF 2
+static int get_clock_rate(u32 data, unsigned int *rate)
+{
+ unsigned int index = (data & V2_CLOCK_RATE_MASK) >> V2_CLOCK_RATE_SHIFT;
+ if (index >= ARRAY_SIZE(snd_motu_clock_rates))
+ return -EIO;
+
+ *rate = snd_motu_clock_rates[index];
+
+ return 0;
+}
+
static int v2_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
{
__be32 reg;
- unsigned int index;
int err;
err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
@@ -37,13 +45,7 @@ static int v2_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
if (err < 0)
return err;
- index = (be32_to_cpu(reg) & V2_CLOCK_RATE_MASK) >> V2_CLOCK_RATE_SHIFT;
- if (index >= ARRAY_SIZE(snd_motu_clock_rates))
- return -EIO;
-
- *rate = snd_motu_clock_rates[index];
-
- return 0;
+ return get_clock_rate(be32_to_cpu(reg), rate);
}
static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
@@ -69,51 +71,44 @@ static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
data &= ~V2_CLOCK_RATE_MASK;
data |= i << V2_CLOCK_RATE_SHIFT;
- if (motu->spec == &snd_motu_spec_traveler) {
- data &= ~V2_CLOCK_TRAVELER_FETCH_ENABLE;
- data |= V2_CLOCK_TRAVELER_FETCH_DISABLE;
- }
-
reg = cpu_to_be32(data);
return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET, &reg,
sizeof(reg));
}
-static int v2_get_clock_source(struct snd_motu *motu,
- enum snd_motu_clock_source *src)
+static int get_clock_source(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
{
- __be32 reg;
- unsigned int index;
- int err;
-
- err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
- sizeof(reg));
- if (err < 0)
- return err;
-
- index = be32_to_cpu(reg) & V2_CLOCK_SRC_MASK;
+ unsigned int index = data & V2_CLOCK_SRC_MASK;
if (index > 5)
return -EIO;
- /* To check the configuration of optical interface. */
- err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET, &reg,
- sizeof(reg));
- if (err < 0)
- return err;
-
switch (index) {
case 0:
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
break;
case 1:
+ {
+ __be32 reg;
+
+ // To check the configuration of optical interface.
+ int err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET,
+ &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+
if (be32_to_cpu(reg) & 0x00000200)
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT;
else
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
break;
+ }
case 2:
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
break;
+ case 3:
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
+ break;
case 4:
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
break;
@@ -127,44 +122,65 @@ static int v2_get_clock_source(struct snd_motu *motu,
return 0;
}
+static int v2_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src)
+{
+ __be32 reg;
+ int err;
+
+ err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+
+ return get_clock_source(motu, be32_to_cpu(reg), src);
+}
+
static int v2_switch_fetching_mode(struct snd_motu *motu, bool enable)
{
+ enum snd_motu_clock_source src;
__be32 reg;
u32 data;
int err = 0;
- if (motu->spec == &snd_motu_spec_traveler ||
- motu->spec == &snd_motu_spec_8pre) {
- err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET,
- &reg, sizeof(reg));
+ // 828mkII implements Altera ACEX 1K EP1K30. Nothing to do.
+ if (motu->spec == &snd_motu_spec_828mk2)
+ return 0;
+
+ err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
+
+ err = get_clock_source(motu, data, &src);
+ if (err < 0)
+ return err;
+
+ data &= ~(V2_CLOCK_FETCH_ENABLE | V2_CLOCK_MODEL_SPECIFIC);
+ if (enable)
+ data |= V2_CLOCK_FETCH_ENABLE;
+
+ if (motu->spec->flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4) {
+ // Expected for Traveler and 896HD, which implements Altera
+ // Cyclone EP1C3.
+ data |= V2_CLOCK_MODEL_SPECIFIC;
+ } else {
+ // For UltraLite and 8pre, which implements Xilinx Spartan
+ // XC3S200.
+ unsigned int rate;
+
+ err = get_clock_rate(data, &rate);
if (err < 0)
return err;
- data = be32_to_cpu(reg);
-
- if (motu->spec == &snd_motu_spec_traveler) {
- data &= ~(V2_CLOCK_TRAVELER_FETCH_DISABLE |
- V2_CLOCK_TRAVELER_FETCH_ENABLE);
-
- if (enable)
- data |= V2_CLOCK_TRAVELER_FETCH_ENABLE;
- else
- data |= V2_CLOCK_TRAVELER_FETCH_DISABLE;
- } else if (motu->spec == &snd_motu_spec_8pre) {
- data &= ~(V2_CLOCK_8PRE_FETCH_DISABLE |
- V2_CLOCK_8PRE_FETCH_ENABLE);
-
- if (enable)
- data |= V2_CLOCK_8PRE_FETCH_DISABLE;
- else
- data |= V2_CLOCK_8PRE_FETCH_ENABLE;
- }
- reg = cpu_to_be32(data);
- err = snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET,
- &reg, sizeof(reg));
+ if (src == SND_MOTU_CLOCK_SOURCE_SPH && rate > 48000)
+ data |= V2_CLOCK_MODEL_SPECIFIC;
}
- return err;
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
}
static void calculate_fixed_part(struct snd_motu_packet_format *formats,
@@ -191,7 +207,7 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[1] += 2;
}
} else {
- if (flags & SND_MOTU_SPEC_RX_SEPARETED_MAIN) {
+ if (flags & SND_MOTU_SPEC_RX_SEPARATED_MAIN) {
pcm_chunks[0] += 2;
pcm_chunks[1] += 2;
}
diff --git a/sound/firewire/motu/motu-protocol-v3.c b/sound/firewire/motu/motu-protocol-v3.c
index 5eafa506e8a9..d1545e2b5caa 100644
--- a/sound/firewire/motu/motu-protocol-v3.c
+++ b/sound/firewire/motu/motu-protocol-v3.c
@@ -104,6 +104,8 @@ static int v3_get_clock_source(struct snd_motu *motu,
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
} else if (val == 0x01) {
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
+ } else if (val == 0x02) {
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
} else if (val == 0x10) {
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
} else if (val == 0x18 || val == 0x19) {
@@ -187,7 +189,7 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[1] += 2;
}
} else {
- if (flags & SND_MOTU_SPEC_RX_SEPARETED_MAIN) {
+ if (flags & SND_MOTU_SPEC_RX_SEPARATED_MAIN) {
pcm_chunks[0] += 2;
pcm_chunks[1] += 2;
}
diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
index 813e38e6a86e..a17ddceb1bec 100644
--- a/sound/firewire/motu/motu-stream.c
+++ b/sound/firewire/motu/motu-stream.c
@@ -133,7 +133,9 @@ int snd_motu_stream_cache_packet_formats(struct snd_motu *motu)
return 0;
}
-int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate)
+int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -171,6 +173,14 @@ int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate)
fw_iso_resources_free(&motu->tx_resources);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&motu->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&motu->tx_resources);
+ fw_iso_resources_free(&motu->rx_resources);
+ return err;
+ }
}
return 0;
@@ -250,7 +260,7 @@ int snd_motu_stream_start_duplex(struct snd_motu *motu)
if (err < 0)
goto stop_streams;
- err = amdtp_domain_start(&motu->domain);
+ err = amdtp_domain_start(&motu->domain, 0);
if (err < 0)
goto stop_streams;
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 72908b4de77c..f2080d720aa9 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -172,13 +172,13 @@ static void motu_bus_update(struct fw_unit *unit)
snd_motu_transaction_reregister(motu);
}
-static const struct snd_motu_spec motu_828mk2 = {
+const struct snd_motu_spec snd_motu_spec_828mk2 = {
.name = "828mk2",
.protocol = &snd_motu_protocol_v2,
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN |
SND_MOTU_SPEC_HAS_OPT_IFACE_A |
SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_2ND_Q,
@@ -187,7 +187,7 @@ static const struct snd_motu_spec motu_828mk2 = {
.analog_out_ports = 8,
};
-const struct snd_motu_spec snd_motu_spec_traveler = {
+static const struct snd_motu_spec motu_traveler = {
.name = "Traveler",
.protocol = &snd_motu_protocol_v2,
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
@@ -202,7 +202,20 @@ const struct snd_motu_spec snd_motu_spec_traveler = {
.analog_out_ports = 8,
};
-const struct snd_motu_spec snd_motu_spec_8pre = {
+static const struct snd_motu_spec motu_ultralite = {
+ .name = "UltraLite",
+ .protocol = &snd_motu_protocol_v2,
+ .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
+ SND_MOTU_SPEC_TX_MICINST_CHUNK | // padding.
+ SND_MOTU_SPEC_TX_RETURN_CHUNK |
+ SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN,
+ .analog_in_ports = 8,
+ .analog_out_ports = 8,
+};
+
+static const struct snd_motu_spec motu_8pre = {
.name = "8pre",
.protocol = &snd_motu_protocol_v2,
// In tx, use coax chunks for mix-return 1/2. In rx, use coax chunks for
@@ -224,7 +237,7 @@ static const struct snd_motu_spec motu_828mk3 = {
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
SND_MOTU_SPEC_TX_REVERB_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN |
SND_MOTU_SPEC_HAS_OPT_IFACE_A |
SND_MOTU_SPEC_HAS_OPT_IFACE_B |
SND_MOTU_SPEC_RX_MIDI_3RD_Q |
@@ -240,7 +253,7 @@ static const struct snd_motu_spec motu_audio_express = {
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN |
SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_3RD_Q,
.analog_in_ports = 2,
@@ -253,7 +266,7 @@ static const struct snd_motu_spec motu_4pre = {
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN,
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN,
.analog_in_ports = 2,
.analog_out_ports = 2,
};
@@ -270,9 +283,10 @@ static const struct snd_motu_spec motu_4pre = {
}
static const struct ieee1394_device_id motu_id_table[] = {
- SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
- SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
- SND_MOTU_DEV_ENTRY(0x00000f, &snd_motu_spec_8pre),
+ SND_MOTU_DEV_ENTRY(0x000003, &snd_motu_spec_828mk2),
+ SND_MOTU_DEV_ENTRY(0x000009, &motu_traveler),
+ SND_MOTU_DEV_ENTRY(0x00000d, &motu_ultralite),
+ SND_MOTU_DEV_ENTRY(0x00000f, &motu_8pre),
SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
diff --git a/sound/firewire/motu/motu.h b/sound/firewire/motu/motu.h
index 350ee2c16f4a..6efbde405a0d 100644
--- a/sound/firewire/motu/motu.h
+++ b/sound/firewire/motu/motu.h
@@ -86,7 +86,7 @@ enum snd_motu_spec_flags {
SND_MOTU_SPEC_RX_MIDI_3RD_Q = 0x0200,
SND_MOTU_SPEC_TX_MIDI_2ND_Q = 0x0400,
SND_MOTU_SPEC_TX_MIDI_3RD_Q = 0x0800,
- SND_MOTU_SPEC_RX_SEPARETED_MAIN = 0x1000,
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN = 0x1000,
};
#define SND_MOTU_CLOCK_RATE_COUNT 6
@@ -104,6 +104,7 @@ enum snd_motu_clock_source {
SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX,
SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR,
SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC,
+ SND_MOTU_CLOCK_SOURCE_SPH,
SND_MOTU_CLOCK_SOURCE_UNKNOWN,
};
@@ -129,8 +130,7 @@ struct snd_motu_spec {
extern const struct snd_motu_protocol snd_motu_protocol_v2;
extern const struct snd_motu_protocol snd_motu_protocol_v3;
-extern const struct snd_motu_spec snd_motu_spec_traveler;
-extern const struct snd_motu_spec snd_motu_spec_8pre;
+extern const struct snd_motu_spec snd_motu_spec_828mk2;
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
@@ -154,7 +154,9 @@ void snd_motu_transaction_unregister(struct snd_motu *motu);
int snd_motu_stream_init_duplex(struct snd_motu *motu);
void snd_motu_stream_destroy_duplex(struct snd_motu *motu);
int snd_motu_stream_cache_packet_formats(struct snd_motu *motu);
-int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate);
+int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_motu_stream_start_duplex(struct snd_motu *motu);
void snd_motu_stream_stop_duplex(struct snd_motu *motu);
int snd_motu_stream_lock_try(struct snd_motu *motu);
diff --git a/sound/firewire/oxfw/oxfw-midi.c b/sound/firewire/oxfw/oxfw-midi.c
index 9bdec08cb8ea..775cba3f1f02 100644
--- a/sound/firewire/oxfw/oxfw-midi.c
+++ b/sound/firewire/oxfw/oxfw-midi.c
@@ -18,7 +18,7 @@ static int midi_capture_open(struct snd_rawmidi_substream *substream)
mutex_lock(&oxfw->mutex);
- err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 0, 0);
+ err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 0, 0, 0, 0);
if (err >= 0) {
++oxfw->substreams_count;
err = snd_oxfw_stream_start_duplex(oxfw);
@@ -45,7 +45,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream)
mutex_lock(&oxfw->mutex);
- err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream, 0, 0);
+ err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream, 0, 0, 0, 0);
if (err >= 0) {
++oxfw->substreams_count;
err = snd_oxfw_stream_start_duplex(oxfw);
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 7c6d1c277d4d..9124603edabe 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -170,30 +170,56 @@ end:
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_oxfw *oxfw = substream->private_data;
+ struct amdtp_domain *d = &oxfw->domain;
int err;
err = snd_oxfw_stream_lock_try(oxfw);
if (err < 0)
- goto end;
+ return err;
err = init_hw_params(oxfw, substream);
if (err < 0)
goto err_locked;
- /*
- * When any PCM streams are already running, the available sampling
- * rate is limited at current value.
- */
- if (amdtp_stream_pcm_running(&oxfw->tx_stream) ||
- amdtp_stream_pcm_running(&oxfw->rx_stream)) {
+ mutex_lock(&oxfw->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
+ if (oxfw->substreams_count > 0 && d->events_per_period > 0) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+
err = limit_to_current_params(substream);
- if (err < 0)
- goto end;
+ if (err < 0) {
+ mutex_unlock(&oxfw->mutex);
+ goto err_locked;
+ }
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&oxfw->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&oxfw->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&oxfw->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_oxfw_stream_lock_release(oxfw);
return err;
@@ -213,18 +239,20 @@ static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_oxfw *oxfw = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
unsigned int channels = params_channels(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&oxfw->mutex);
err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream,
- rate, channels);
+ rate, channels, frames_per_period,
+ frames_per_buffer);
if (err >= 0)
++oxfw->substreams_count;
mutex_unlock(&oxfw->mutex);
@@ -238,18 +266,20 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_oxfw *oxfw = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
unsigned int channels = params_channels(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&oxfw->mutex);
err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
- rate, channels);
+ rate, channels, frames_per_period,
+ frames_per_buffer);
if (err >= 0)
++oxfw->substreams_count;
mutex_unlock(&oxfw->mutex);
@@ -271,7 +301,7 @@ static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&oxfw->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_playback_hw_free(struct snd_pcm_substream *substream)
{
@@ -286,7 +316,7 @@ static int pcm_playback_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&oxfw->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -361,27 +391,27 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstm)
{
struct snd_oxfw *oxfw = sbstm->private_data;
- return amdtp_stream_pcm_pointer(&oxfw->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&oxfw->domain, &oxfw->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstm)
{
struct snd_oxfw *oxfw = sbstm->private_data;
- return amdtp_stream_pcm_pointer(&oxfw->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&oxfw->domain, &oxfw->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_oxfw *oxfw = substream->private_data;
- return amdtp_stream_pcm_ack(&oxfw->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&oxfw->domain, &oxfw->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_oxfw *oxfw = substream->private_data;
- return amdtp_stream_pcm_ack(&oxfw->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&oxfw->domain, &oxfw->rx_stream);
}
int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
@@ -396,7 +426,6 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -408,7 +437,6 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
unsigned int cap = 0;
@@ -426,6 +454,8 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
if (cap > 0)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 3c9a796b6526..501a80094bf7 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -244,7 +244,9 @@ static int keep_resources(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
int snd_oxfw_stream_reserve_duplex(struct snd_oxfw *oxfw,
struct amdtp_stream *stream,
- unsigned int rate, unsigned int pcm_channels)
+ unsigned int rate, unsigned int pcm_channels,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
struct snd_oxfw_stream_formation formation;
enum avc_general_plug_dir dir;
@@ -305,6 +307,15 @@ int snd_oxfw_stream_reserve_duplex(struct snd_oxfw *oxfw,
return err;
}
}
+
+ err = amdtp_domain_set_events_per_period(&oxfw->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ cmp_connection_release(&oxfw->in_conn);
+ if (oxfw->has_output)
+ cmp_connection_release(&oxfw->out_conn);
+ return err;
+ }
}
return 0;
@@ -344,7 +355,7 @@ int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw)
}
}
- err = amdtp_domain_start(&oxfw->domain);
+ err = amdtp_domain_start(&oxfw->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h
index c9627b8c5d6e..c30e537087b0 100644
--- a/sound/firewire/oxfw/oxfw.h
+++ b/sound/firewire/oxfw/oxfw.h
@@ -103,7 +103,9 @@ int avc_general_inquiry_sig_fmt(struct fw_unit *unit, unsigned int rate,
int snd_oxfw_stream_init_duplex(struct snd_oxfw *oxfw);
int snd_oxfw_stream_reserve_duplex(struct snd_oxfw *oxfw,
struct amdtp_stream *stream,
- unsigned int rate, unsigned int pcm_channels);
+ unsigned int rate, unsigned int pcm_channels,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw);
void snd_oxfw_stream_stop_duplex(struct snd_oxfw *oxfw);
void snd_oxfw_stream_destroy_duplex(struct snd_oxfw *oxfw);
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index 2377732caa52..8e9b444c8bff 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -43,13 +43,13 @@ static int pcm_init_hw_params(struct snd_tscm *tscm,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_tscm *tscm = substream->private_data;
+ struct amdtp_domain *d = &tscm->domain;
enum snd_tscm_clock clock;
- unsigned int rate;
int err;
err = snd_tscm_stream_lock_try(tscm);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(tscm, substream);
if (err < 0)
@@ -59,19 +59,46 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- if (clock != SND_TSCM_CLOCK_INTERNAL ||
- amdtp_stream_pcm_running(&tscm->rx_stream) ||
- amdtp_stream_pcm_running(&tscm->tx_stream)) {
+ mutex_lock(&tscm->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
+ if (clock != SND_TSCM_CLOCK_INTERNAL || tscm->substreams_counter > 0) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int rate;
+
err = snd_tscm_stream_get_rate(tscm, &rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&tscm->mutex);
goto err_locked;
+ }
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&tscm->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&tscm->mutex);
+ goto err_locked;
+ }
}
+ mutex_unlock(&tscm->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_tscm_stream_lock_release(tscm);
return err;
@@ -92,16 +119,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_tscm *tscm = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&tscm->mutex);
- err = snd_tscm_stream_reserve_duplex(tscm, rate);
+ err = snd_tscm_stream_reserve_duplex(tscm, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++tscm->substreams_counter;
mutex_unlock(&tscm->mutex);
@@ -123,7 +152,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&tscm->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -200,28 +229,28 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_tscm *tscm = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&tscm->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&tscm->domain, &tscm->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_tscm *tscm = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&tscm->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&tscm->domain, &tscm->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_tscm *tscm = substream->private_data;
- return amdtp_stream_pcm_ack(&tscm->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&tscm->domain, &tscm->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_tscm *tscm = substream->private_data;
- return amdtp_stream_pcm_ack(&tscm->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&tscm->domain, &tscm->rx_stream);
}
int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
@@ -236,7 +265,6 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -248,7 +276,6 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -262,6 +289,8 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
"%s PCM", tscm->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index adf69a520b80..eb07e1decf9b 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -383,7 +383,9 @@ void snd_tscm_stream_destroy_duplex(struct snd_tscm *tscm)
destroy_stream(tscm, &tscm->tx_stream);
}
-int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate)
+int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -413,6 +415,14 @@ int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate)
fw_iso_resources_free(&tscm->tx_resources);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&tscm->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&tscm->tx_resources);
+ fw_iso_resources_free(&tscm->rx_resources);
+ return err;
+ }
}
return 0;
@@ -463,7 +473,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
if (err < 0)
goto error;
- err = amdtp_domain_start(&tscm->domain);
+ err = amdtp_domain_start(&tscm->domain, 0);
if (err < 0)
return err;
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 15bd335fa07f..78b7a08986a1 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -168,7 +168,9 @@ int snd_tscm_stream_get_clock(struct snd_tscm *tscm,
int snd_tscm_stream_init_duplex(struct snd_tscm *tscm);
void snd_tscm_stream_update_duplex(struct snd_tscm *tscm);
void snd_tscm_stream_destroy_duplex(struct snd_tscm *tscm);
-int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate);
+int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate);
void snd_tscm_stream_stop_duplex(struct snd_tscm *tscm);
diff --git a/sound/hda/Kconfig b/sound/hda/Kconfig
index 3d33fc1757ba..b0c88fe040ee 100644
--- a/sound/hda/Kconfig
+++ b/sound/hda/Kconfig
@@ -34,6 +34,12 @@ config SND_HDA_PREALLOC_SIZE
via a proc file (/proc/asound/card*/pcm*/sub*/prealloc), too.
config SND_INTEL_NHLT
- tristate
+ bool
# this config should be selected only for Intel ACPI platforms.
- # A fallback is provided so that the code compiles in all cases. \ No newline at end of file
+ # A fallback is provided so that the code compiles in all cases.
+
+config SND_INTEL_DSP_CONFIG
+ tristate
+ select SND_INTEL_NHLT if ACPI
+ # this config should be selected only for Intel DSP platforms.
+ # A fallback is provided so that the code compiles in all cases.
diff --git a/sound/hda/Makefile b/sound/hda/Makefile
index 8560f6ef1b19..601e617918b8 100644
--- a/sound/hda/Makefile
+++ b/sound/hda/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_SND_HDA_CORE) += snd-hda-core.o
#extended hda
obj-$(CONFIG_SND_HDA_EXT_CORE) += ext/
-snd-intel-nhlt-objs := intel-nhlt.o
-obj-$(CONFIG_SND_INTEL_NHLT) += snd-intel-nhlt.o
+snd-intel-dspcfg-objs := intel-dsp-config.o
+snd-intel-dspcfg-$(CONFIG_SND_INTEL_NHLT) += intel-nhlt.o
+obj-$(CONFIG_SND_INTEL_DSP_CONFIG) += snd-intel-dspcfg.o
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 286361ecd640..906b1e20bae0 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -363,6 +363,7 @@ static const struct regmap_config hda_regmap_cfg = {
.reg_write = hda_reg_write,
.use_single_read = true,
.use_single_write = true,
+ .disable_locking = true,
};
/**
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
new file mode 100644
index 000000000000..be1df80ed013
--- /dev/null
+++ b/sound/hda/intel-dsp-config.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Jaroslav Kysela <perex@perex.cz>
+
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include <sound/intel-dsp-config.h>
+#include <sound/intel-nhlt.h>
+
+static int dsp_driver;
+
+module_param(dsp_driver, int, 0444);
+MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
+
+#define FLAG_SST BIT(0)
+#define FLAG_SOF BIT(1)
+#define FLAG_SOF_ONLY_IF_DMIC BIT(16)
+
+struct config_entry {
+ u32 flags;
+ u16 device;
+ const struct dmi_system_id *dmi_table;
+};
+
+/*
+ * configuration table
+ * - the order of similar PCI ID entries is important!
+ * - the first successful match will win
+ */
+static const struct config_entry config_table[] = {
+/* Merrifield */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x119a,
+ },
+#endif
+/* Broxton-T */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x1a98,
+ },
+#endif
+/*
+ * Apollolake (Broxton-P)
+ * the legacy HDaudio driver is used except on Up Squared (SOF) and
+ * Chromebooks (SST)
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x5a98,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Up Squared",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ DMI_MATCH(DMI_BOARD_NAME, "UP-APL01"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL)
+ {
+ .flags = FLAG_SST,
+ .device = 0x5a98,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+/*
+ * Skylake and Kabylake use legacy HDaudio driver except for Google
+ * Chromebooks (SST)
+ */
+
+/* Sunrise Point-LP */
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL)
+ {
+ .flags = FLAG_SST,
+ .device = 0x9d70,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+/* Kabylake-LP */
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL)
+ {
+ .flags = FLAG_SST,
+ .device = 0x9d71,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+
+/*
+ * Geminilake uses legacy HDaudio driver except for Google
+ * Chromebooks
+ */
+/* Geminilake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x3198,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+
+/*
+ * CoffeeLake, CannonLake, CometLake, IceLake, TigerLake use legacy
+ * HDaudio driver except for Google Chromebooks and when DMICs are
+ * present. Two cases are required since Coreboot does not expose NHLT
+ * tables.
+ *
+ * When the Chromebook quirk is not present, it's based on information
+ * that no such device exists. When the quirk is present, it could be
+ * either based on product information or a placeholder.
+ */
+
+/* Cannonlake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x9dc8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x9dc8,
+ },
+#endif
+
+/* Coffelake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0xa348,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0xa348,
+ },
+#endif
+
+/* Cometlake-LP */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x02c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x02c8,
+ },
+#endif
+/* Cometlake-H */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x06c8,
+ },
+#endif
+
+/* Icelake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x34c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x34c8,
+ },
+#endif
+
+/* Tigerlake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0xa0c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0xa0c8,
+ },
+#endif
+
+/* Elkhart Lake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x4b55,
+ },
+#endif
+
+};
+
+static const struct config_entry *snd_intel_dsp_find_config
+ (struct pci_dev *pci, const struct config_entry *table, u32 len)
+{
+ u16 device;
+
+ device = pci->device;
+ for (; len > 0; len--, table++) {
+ if (table->device != device)
+ continue;
+ if (table->dmi_table && !dmi_check_system(table->dmi_table))
+ continue;
+ return table;
+ }
+ return NULL;
+}
+
+static int snd_intel_dsp_check_dmic(struct pci_dev *pci)
+{
+ struct nhlt_acpi_table *nhlt;
+ int ret = 0;
+
+ nhlt = intel_nhlt_init(&pci->dev);
+ if (nhlt) {
+ if (intel_nhlt_get_dmic_geo(&pci->dev, nhlt))
+ ret = 1;
+ intel_nhlt_free(nhlt);
+ }
+ return ret;
+}
+
+int snd_intel_dsp_driver_probe(struct pci_dev *pci)
+{
+ const struct config_entry *cfg;
+
+ /* Intel vendor only */
+ if (pci->vendor != 0x8086)
+ return SND_INTEL_DSP_DRIVER_ANY;
+
+ if (dsp_driver > 0 && dsp_driver <= SND_INTEL_DSP_DRIVER_LAST)
+ return dsp_driver;
+
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, use legacy driver
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: use DSP or legacy mode
+ */
+ if (pci->class == 0x040300)
+ return SND_INTEL_DSP_DRIVER_LEGACY;
+ if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDA legacy driver\n", pci->class);
+ return SND_INTEL_DSP_DRIVER_LEGACY;
+ }
+
+ dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
+
+ /* find the configuration for the specific device */
+ cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table));
+ if (!cfg)
+ return SND_INTEL_DSP_DRIVER_ANY;
+
+ if (cfg->flags & FLAG_SOF) {
+ if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC) {
+ if (snd_intel_dsp_check_dmic(pci)) {
+ dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+ } else {
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+ }
+
+ if (cfg->flags & FLAG_SST)
+ return SND_INTEL_DSP_DRIVER_SST;
+
+ return SND_INTEL_DSP_DRIVER_LEGACY;
+}
+EXPORT_SYMBOL_GPL(snd_intel_dsp_driver_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel DSP config driver");
diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
index daede96f28ee..097ff6c10099 100644
--- a/sound/hda/intel-nhlt.c
+++ b/sound/hda/intel-nhlt.c
@@ -102,6 +102,3 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
return dmic_geo;
}
EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Intel NHLT driver");
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index b690ed937cbe..6ffa48dd5983 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -2,22 +2,22 @@
# ALSA ISA drivers
config SND_WSS_LIB
- tristate
- select SND_PCM
+ tristate
+ select SND_PCM
select SND_TIMER
config SND_SB_COMMON
- tristate
+ tristate
config SND_SB8_DSP
- tristate
- select SND_PCM
- select SND_SB_COMMON
+ tristate
+ select SND_PCM
+ select SND_SB_COMMON
config SND_SB16_DSP
- tristate
- select SND_PCM
- select SND_SB_COMMON
+ tristate
+ select SND_PCM
+ select SND_SB_COMMON
menuconfig SND_ISA
bool "ISA sound devices"
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 78dd213589b4..fa3c39cff5f8 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -278,7 +278,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
} else {
mpu_port[dev] = pnp_port_start(pdev, 0);
if (mpu_irq[dev] >= 0 &&
- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) {
+ pnp_irq_valid(pdev, 0) &&
+ pnp_irq(pdev, 0) != (resource_size_t)-1) {
mpu_irq[dev] = pnp_irq(pdev, 0);
} else {
mpu_irq[dev] = -1; /* disable interrupt */
diff --git a/sound/mips/Kconfig b/sound/mips/Kconfig
index 8a33402fd415..b497b803c834 100644
--- a/sound/mips/Kconfig
+++ b/sound/mips/Kconfig
@@ -14,15 +14,15 @@ config SND_SGI_O2
tristate "SGI O2 Audio"
depends on SGI_IP32
select SND_PCM
- help
- Sound support for the SGI O2 Workstation.
+ help
+ Sound support for the SGI O2 Workstation.
config SND_SGI_HAL2
- tristate "SGI HAL2 Audio"
- depends on SGI_HAS_HAL2
+ tristate "SGI HAL2 Audio"
+ depends on SGI_HAS_HAL2
select SND_PCM
- help
- Sound support for the SGI Indy and Indigo2 Workstation.
+ help
+ Sound support for the SGI Indy and Indigo2 Workstation.
endif # SND_MIPS
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 6676bcbd769f..c9e060939708 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -741,8 +741,7 @@ static int hal2_pcm_create(struct snd_hal2 *hal2)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&hal2_capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 0, 1024 * 1024);
+ NULL, 0, 1024 * 1024);
return 0;
}
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index fadc1194b136..9d20ce6118a0 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -582,14 +582,13 @@ static int snd_sgio2audio_pcm_close(struct snd_pcm_substream *substream)
static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
/* hw_free callback */
static int snd_sgio2audio_pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/* prepare callback */
@@ -670,7 +669,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
@@ -682,7 +680,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
@@ -694,7 +691,6 @@ static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
/*
@@ -720,6 +716,8 @@ static int snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip)
&snd_sgio2audio_playback1_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_sgio2audio_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
/* create second pcm device with one outputs and no input */
err = snd_pcm_new(chip->card, "SGI O2 Audio", 1, 1, 0, &pcm);
@@ -732,6 +730,8 @@ static int snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip)
/* set operators */
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_sgio2audio_playback2_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 7630f808d087..93bc9bef7641 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -217,7 +217,7 @@ config SND_CMIPCI
will be called snd-cmipci.
config SND_OXYGEN_LIB
- tristate
+ tristate
config SND_OXYGEN
tristate "C-Media 8786, 8787, 8788 (Oxygen)"
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 4b2451287e2c..5b6452df8bbd 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -633,9 +633,9 @@ snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device)
chip->csubs = NULL;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- BUFFER_BYTES_MAX / 2,
- BUFFER_BYTES_MAX);
+ &chip->pci->dev,
+ BUFFER_BYTES_MAX / 2,
+ BUFFER_BYTES_MAX);
return 0;
}
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 6e28e381c21a..ae29df085ae1 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1672,7 +1672,7 @@ static int snd_ali_pcm(struct snd_ali *codec, int device,
desc->capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(codec->pci),
+ &codec->pci->dev,
64*1024, 128*1024);
pcm->info_flags = 0;
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index 530799c8d3ce..cfbb8cacaaac 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -592,7 +592,8 @@ static int snd_als300_new_pcm(struct snd_als300 *chip)
/* pre-allocation of buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index b06c3dbb525d..d6f5487afe52 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -693,7 +693,8 @@ static int snd_als4000_pcm(struct snd_sb *chip, int device)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_als4000_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_als4000_capture_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
64*1024, 64*1024);
chip->pcm = pcm;
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 2a21a3d99719..147005fdd3ea 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1325,8 +1325,8 @@ static int snd_card_asihpi_pcm_new(struct snd_card_asihpi *asihpi, int device)
/*? do we want to emulate MMAP for non-BBM cards?
Jack doesn't work with ALSAs MMAP emulation - WHY NOT? */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(asihpi->pci),
- 64*1024, BUFFER_BYTES_MAX);
+ &asihpi->pci->dev,
+ 64*1024, BUFFER_BYTES_MAX);
return 0;
}
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index c953bd73a48c..1e1ededf8eb2 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -353,7 +353,7 @@ static int atiixp_build_dma_packets(struct atiixp *chip, struct atiixp_dma *dma,
if (dma->desc_buf.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
ATI_DESC_LIST_SIZE,
&dma->desc_buf) < 0)
return -ENOMEM;
@@ -1284,7 +1284,7 @@ static int snd_atiixp_pcm_new(struct atiixp *chip)
chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1317,7 +1317,7 @@ static int snd_atiixp_pcm_new(struct atiixp *chip)
chip->pcmdevs[ATI_PCMDEV_DIGITAL] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
/* pre-select AC97 SPDIF slots 10/11 */
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 95d209f96581..6f088c1949f3 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -321,7 +321,7 @@ static int atiixp_build_dma_packets(struct atiixp_modem *chip,
return -ENOMEM;
if (dma->desc_buf.area == NULL) {
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
ATI_DESC_LIST_SIZE, &dma->desc_buf) < 0)
return -ENOMEM;
dma->period_bytes = dma->periods = 0; /* clear */
@@ -995,7 +995,7 @@ static int snd_atiixp_pcm_new(struct atiixp_modem *chip)
chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
return 0;
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 39ea9ef00f47..a2dcf43beedf 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -436,7 +436,6 @@ static const struct snd_pcm_ops snd_vortex_playback_ops = {
.prepare = snd_vortex_pcm_prepare,
.trigger = snd_vortex_pcm_trigger,
.pointer = snd_vortex_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/*
@@ -638,7 +637,7 @@ static int snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
/* pre-allocation of Scatter-Gather buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci_dev),
+ &chip->pci_dev->dev,
0x10000, 0x10000);
switch (VORTEX_PCM_TYPE(pcm)) {
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index e413414181df..1cbfae856a2a 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -613,7 +613,7 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* Preallocate continuous pages. */
snd_pcm_lib_preallocate_pages_for_all(pcm_playback_ana,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 64 * 1024);
err = snd_pcm_new(chip->card, "Audiowerk2 digital playback", 1, 1, 0,
@@ -645,7 +645,7 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* Preallocate continuous pages. */
snd_pcm_lib_preallocate_pages_for_all(pcm_playback_num,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 64 * 1024);
err = snd_pcm_new(chip->card, "Audiowerk2 capture", 2, 0, 1,
@@ -678,7 +678,7 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* Preallocate continuous pages. */
snd_pcm_lib_preallocate_pages_for_all(pcm_capture,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 64 * 1024);
/* Create control */
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index f92c9cbb955a..f475370faaaa 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2135,8 +2135,8 @@ snd_azf3328_pcm(struct snd_azf3328 *chip)
chip->pcm[AZF_CODEC_CAPTURE] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
err = snd_pcm_new(chip->card, "AZF3328 I2S OUT", AZF_PCMDEV_I2S_OUT,
1, 0, &pcm);
@@ -2151,8 +2151,8 @@ snd_azf3328_pcm(struct snd_azf3328 *chip)
chip->pcm[AZF_CODEC_I2S_OUT] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 66a5a24e7558..6bf5ac3600c5 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -217,7 +217,7 @@ static int snd_bt87x_create_risc(struct snd_bt87x *chip, struct snd_pcm_substrea
__le32 *risc;
if (chip->dma_risc.area == NULL) {
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(MAX_RISC_SIZE), &chip->dma_risc) < 0)
return -ENOMEM;
}
@@ -545,7 +545,6 @@ static const struct snd_pcm_ops snd_bt87x_pcm_ops = {
.prepare = snd_bt87x_prepare,
.trigger = snd_bt87x_trigger,
.pointer = snd_bt87x_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static int snd_bt87x_capture_volume_info(struct snd_kcontrol *kcontrol,
@@ -701,7 +700,7 @@ static int snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name)
strcpy(pcm->name, name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_bt87x_pcm_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
128 * 1024,
ALIGN(255 * 4092, 1024));
return 0;
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 478412e0aa3c..abc2440dc2d9 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1389,7 +1389,7 @@ static int snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
}
@@ -1397,7 +1397,7 @@ static int snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
}
@@ -1692,7 +1692,7 @@ static int snd_ca0106_create(int dev, struct snd_card *card,
chip->irq = pci->irq;
/* This stores the periods table. */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
1024, &chip->buffer) < 0) {
snd_ca0106_free(chip);
return -ENOMEM;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index df720881eb99..dd9d62e2b633 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1902,7 +1902,7 @@ static int snd_cmipci_pcm_new(struct cmipci *cm, int device)
cm->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cm->pci), 64*1024, 128*1024);
+ &cm->pci->dev, 64*1024, 128*1024);
return 0;
}
@@ -1924,7 +1924,7 @@ static int snd_cmipci_pcm2_new(struct cmipci *cm, int device)
cm->pcm2 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cm->pci), 64*1024, 128*1024);
+ &cm->pci->dev, 64*1024, 128*1024);
return 0;
}
@@ -1947,7 +1947,7 @@ static int snd_cmipci_pcm_spdif_new(struct cmipci *cm, int device)
cm->pcm_spdif = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cm->pci), 64*1024, 128*1024);
+ &cm->pci->dev, 64*1024, 128*1024);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_alt_chmaps, cm->max_channels, 0,
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 04c712647853..058c1414b777 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -975,7 +975,8 @@ static int snd_cs4281_pcm(struct cs4281 *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 512*1024);
+ &chip->pci->dev,
+ 64*1024, 512*1024);
return 0;
}
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 5b888b795f7e..102a62965ac1 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1494,7 +1494,7 @@ static int _cs46xx_playback_open_channel (struct snd_pcm_substream *substream,in
cpcm = kzalloc(sizeof(*cpcm), GFP_KERNEL);
if (cpcm == NULL)
return -ENOMEM;
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_SIZE, &cpcm->hw_buf) < 0) {
kfree(cpcm);
return -ENOMEM;
@@ -1582,7 +1582,7 @@ static int snd_cs46xx_capture_open(struct snd_pcm_substream *substream)
{
struct snd_cs46xx *chip = snd_pcm_substream_chip(substream);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_SIZE, &chip->capt.hw_buf) < 0)
return -ENOMEM;
chip->capt.substream = substream;
@@ -1784,7 +1784,8 @@ int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1809,7 +1810,8 @@ int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device)
chip->pcm_rear = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1832,7 +1834,8 @@ int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device)
chip->pcm_center_lfe = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1855,7 +1858,8 @@ int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device)
chip->pcm_iec958 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index 04822bf2f987..4642e5384e83 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -117,7 +117,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
if (dma->desc_buf.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cs5535au->pci),
+ &cs5535au->pci->dev,
CS5535AUDIO_DESC_LIST_SIZE+1,
&dma->desc_buf) < 0)
return -ENOMEM;
@@ -432,8 +432,8 @@ int snd_cs5535audio_pcm(struct cs5535audio *cs5535au)
strcpy(pcm->name, "CS5535 Audio");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cs5535au->pci),
- 64*1024, 128*1024);
+ &cs5535au->pci->dev,
+ 64*1024, 128*1024);
cs5535au->pcm = pcm;
return 0;
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index 89923399e646..7ae5b238703c 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -379,7 +379,6 @@ static const struct snd_pcm_ops ct_pcm_playback_ops = {
.prepare = ct_pcm_playback_prepare,
.trigger = ct_pcm_playback_trigger,
.pointer = ct_pcm_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* PCM operators for capture */
@@ -392,7 +391,6 @@ static const struct snd_pcm_ops ct_pcm_capture_ops = {
.prepare = ct_pcm_capture_prepare,
.trigger = ct_pcm_capture_trigger,
.pointer = ct_pcm_capture_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_chmap_elem surround_map[] = {
@@ -452,7 +450,8 @@ int ct_alsa_pcm_create(struct ct_atc *atc,
SNDRV_PCM_STREAM_CAPTURE, &ct_pcm_capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(atc->pci), 128*1024, 128*1024);
+ &atc->pci->dev,
+ 128*1024, 128*1024);
chs = 2;
switch (device) {
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 2e80b17a7104..bde28aa9e139 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -183,7 +183,7 @@ int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pci),
+ &pci->dev,
PAGE_SIZE, &vm->ptp[i]);
if (err < 0)
break;
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index ca9125726be2..1465813bf7c6 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -324,7 +324,7 @@ static int pcm_open(struct snd_pcm_substream *substream,
/* Finally allocate a page for the scatter-gather list */
if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
PAGE_SIZE, &pipe->sgpage)) < 0) {
dev_err(chip->card->dev, "s-g list allocation failed\n");
return err;
@@ -824,7 +824,6 @@ static const struct snd_pcm_ops analog_playback_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops analog_capture_ops = {
.open = pcm_analog_in_open,
@@ -835,7 +834,6 @@ static const struct snd_pcm_ops analog_capture_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
#ifdef ECHOCARD_HAS_DIGITAL_IO
#ifndef ECHOCARD_HAS_VMIXER
@@ -848,7 +846,6 @@ static const struct snd_pcm_ops digital_playback_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
#endif /* !ECHOCARD_HAS_VMIXER */
static const struct snd_pcm_ops digital_capture_ops = {
@@ -860,7 +857,6 @@ static const struct snd_pcm_ops digital_capture_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
#endif /* ECHOCARD_HAS_DIGITAL_IO */
@@ -869,7 +865,7 @@ static const struct snd_pcm_ops digital_capture_ops = {
/* Preallocate memory only for the first substream because it's the most
* used one
*/
-static int snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
+static void snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
{
struct snd_pcm_substream *ss;
int stream;
@@ -880,8 +876,6 @@ static int snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
dev,
ss->number ? 0 : 128<<10,
256<<10);
-
- return 0;
}
@@ -908,8 +902,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &analog_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &analog_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital inputs, no outputs */
@@ -920,8 +913,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
chip->digital_pcm = pcm;
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &digital_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#endif /* ECHOCARD_HAS_DIGITAL_IO */
#else /* ECHOCARD_HAS_VMIXER */
@@ -941,8 +933,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &analog_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &analog_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital i/o */
@@ -955,8 +946,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &digital_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &digital_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#endif /* ECHOCARD_HAS_DIGITAL_IO */
#endif /* ECHOCARD_HAS_VMIXER */
@@ -1958,7 +1948,7 @@ static int snd_echo_create(struct snd_card *card,
/* Create the DSP comm page - this is the area of memory used for most
of the communication with the DSP, which accesses it via bus mastering */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
sizeof(struct comm_page),
&chip->commpage_dma_buf) < 0) {
dev_err(chip->card->dev, "cannot allocate the comm page\n");
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index f208b6e217fd..29b7720d7961 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -124,8 +124,9 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
goto error;
/* This stores the periods table. */
if (emu->card_capabilities->ca0151_chip) { /* P16V */
- if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- 1024, &emu->p16v_buffer)) < 0)
+ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ 1024, &emu->p16v_buffer);
+ if (err < 0)
goto error;
}
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 9cf81832259c..241b4a0631ab 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -877,7 +877,7 @@ static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device)
emu->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
32*1024, 32*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
@@ -936,8 +936,8 @@ static int snd_emu10k1x_create(struct snd_card *card,
}
chip->irq = pci->irq;
- if(snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- 4 * 1024, &chip->dma_buffer) < 0) {
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ 4 * 1024, &chip->dma_buffer) < 0) {
snd_emu10k1x_free(chip);
return -ENOMEM;
}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index e053f0d58bdd..a31adecfe608 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2464,7 +2464,7 @@ int snd_emu10k1_fx8010_tram_setup(struct snd_emu10k1 *emu, u32 size)
}
if (size > 0) {
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &emu->pci->dev,
size * 2, &emu->fx8010.etram_pages) < 0)
return -ENOMEM;
memset(emu->fx8010.etram_pages.area, 0, size * 2);
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 6530a55fb878..9a8cf3c7dd67 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1366,7 +1366,6 @@ static const struct snd_pcm_ops snd_emu10k1_playback_ops = {
.prepare = snd_emu10k1_playback_prepare,
.trigger = snd_emu10k1_playback_trigger,
.pointer = snd_emu10k1_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops snd_emu10k1_capture_ops = {
@@ -1390,7 +1389,6 @@ static const struct snd_pcm_ops snd_emu10k1_efx_playback_ops = {
.prepare = snd_emu10k1_efx_playback_prepare,
.trigger = snd_emu10k1_efx_playback_trigger,
.pointer = snd_emu10k1_efx_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
@@ -1414,12 +1412,12 @@ int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
@@ -1445,7 +1443,7 @@ int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device)
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
@@ -1480,7 +1478,7 @@ int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device)
emu->pcm_mic = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
@@ -1855,7 +1853,7 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
return err;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 135e26544275..94b8d5b08225 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -387,7 +387,7 @@ int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
}
return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci), size, dmab);
+ &emu->pci->dev, size, dmab);
}
/*
@@ -477,7 +477,7 @@ static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
int page;
dmab.dev.type = SNDRV_DMA_TYPE_DEV;
- dmab.dev.dev = snd_dma_pci_data(emu->pci);
+ dmab.dev.dev = &emu->pci->dev;
for (page = first_page; page <= last_page; page++) {
if (emu->page_ptr_table[page] == NULL)
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index eeaed555185c..ab8876855989 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -643,7 +643,7 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
(65536 - 64) * 8,
(65536 - 64) * 8);
/*
@@ -656,7 +656,7 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
65536 - 64, 65536 - 64);
/*
dev_dbg(emu->card->dev,
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index b767df8181b5..0499dc863202 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1275,7 +1275,8 @@ static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device)
ensoniq->pcm1 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ensoniq->pci), 64*1024, 128*1024);
+ &ensoniq->pci->dev,
+ 64*1024, 128*1024);
#ifdef CHIP1370
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1307,7 +1308,8 @@ static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device)
ensoniq->pcm2 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ensoniq->pci), 64*1024, 128*1024);
+ &ensoniq->pci->dev,
+ 64*1024, 128*1024);
#ifdef CHIP1370
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -2095,7 +2097,7 @@ static int snd_ensoniq_create(struct snd_card *card,
}
ensoniq->irq = pci->irq;
#ifdef CHIP1370
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
16, &ensoniq->dma_bug) < 0) {
dev_err(card->dev, "unable to allocate space for phantom area - dma_bug\n");
snd_ensoniq_free(ensoniq);
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index ecf77c8c9e59..c571c5d380ca 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1032,7 +1032,8 @@ static int snd_es1938_new_pcm(struct es1938 *chip, int device)
strcpy(pcm->name, "ESS Solo-1");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
chip->pcm = pcm;
return 0;
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 974142535a25..7017ca9dea4a 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -1422,10 +1422,8 @@ snd_es1968_init_dmabuf(struct es1968 *chip)
int err;
struct esm_memory *chunk;
- chip->dma.dev.type = SNDRV_DMA_TYPE_DEV;
- chip->dma.dev.dev = snd_dma_pci_data(chip->pci);
err = snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
chip->total_bufsize, &chip->dma);
if (err < 0 || ! chip->dma.area) {
dev_err(chip->card->dev,
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 3ef7d507eb9b..a7f8109acced 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -721,7 +721,7 @@ static int snd_fm801_pcm(struct fm801 *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pdev),
+ &pdev->dev,
chip->multichannel ? 128*1024 : 64*1024, 128*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index dae47a45b2b8..bd48335d09d7 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -12,7 +12,7 @@ config SND_HDA_INTEL
tristate "HD Audio PCI"
depends on SND_PCI
select SND_HDA
- select SND_INTEL_NHLT if ACPI
+ select SND_INTEL_DSP_CONFIG
help
Say Y here to include support for Intel "High Definition
Audio" (Azalia) and its compatible devices.
@@ -23,15 +23,6 @@ config SND_HDA_INTEL
To compile this driver as a module, choose M here: the module
will be called snd-hda-intel.
-config SND_HDA_INTEL_DETECT_DMIC
- bool "DMIC detection and probe abort"
- depends on SND_HDA_INTEL
- help
- Say Y to detect digital microphones on SKL+ devices. DMICs
- cannot be handled by the HDaudio legacy driver and are
- currently only supported by the SOF driver.
- If unsure say N.
-
config SND_HDA_TEGRA
tristate "NVIDIA Tegra HD Audio"
depends on ARCH_TEGRA
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 8272b50b8349..6a8564566375 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -43,6 +43,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
{
struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+ /* ignore unsol events during shutdown */
+ if (codec->bus->shutdown)
+ return;
+
if (codec->patch_ops.unsol_event)
codec->patch_ops.unsol_event(codec, ev);
}
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 6387c7e90918..2f3b7a35f2d9 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -701,7 +701,6 @@ static const struct snd_pcm_ops azx_pcm_ops = {
.pointer = azx_pcm_pointer,
.get_time_info = azx_get_time_info,
.mmap = azx_pcm_mmap,
- .page = snd_pcm_sgbuf_ops_page,
};
static void azx_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cf53fbd872ee..e76a0bb6d3cf 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -46,7 +46,7 @@
#include <sound/initval.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
-#include <sound/intel-nhlt.h>
+#include <sound/intel-dsp-config.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/firmware.h>
@@ -124,7 +124,7 @@ static char *patch[SNDRV_CARDS];
static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
CONFIG_SND_HDA_INPUT_BEEP_MODE};
#endif
-static bool dmic_detect = IS_ENABLED(CONFIG_SND_HDA_INTEL_DETECT_DMIC);
+static bool dsp_driver = 1;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -159,8 +159,9 @@ module_param_array(beep_mode, bool, NULL, 0444);
MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
"(0=off, 1=on) (default=1).");
#endif
-module_param(dmic_detect, bool, 0444);
-MODULE_PARM_DESC(dmic_detect, "DMIC detect on SKL+ platforms");
+module_param(dsp_driver, bool, 0444);
+MODULE_PARM_DESC(dsp_driver, "Allow DSP driver selection (bypass this driver) "
+ "(0=off, 1=on) (default=1)");
#ifdef CONFIG_PM
static int param_set_xint(const char *val, const struct kernel_param *kp);
@@ -368,8 +369,6 @@ enum {
((pci)->device == 0x160c))
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
@@ -1280,11 +1279,17 @@ static void init_vga_switcheroo(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct pci_dev *p = get_bound_vga(chip->pci);
+ struct pci_dev *parent;
if (p) {
dev_info(chip->card->dev,
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
- chip->bus.keep_power = 1; /* cleared in either gpu_bound op or codec probe */
+
+ /* cleared in either gpu_bound op or codec probe, or when its
+ * upstream port has _PR3 (i.e. dGPU).
+ */
+ parent = pci_upstream_bridge(p);
+ chip->bus.keep_power = parent ? !pci_pr3_present(parent) : 1;
chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
pci_dev_put(p);
}
@@ -1382,8 +1387,11 @@ static int azx_free(struct azx *chip)
static int azx_dev_disconnect(struct snd_device *device)
{
struct azx *chip = device->device_data;
+ struct hdac_bus *bus = azx_bus(chip);
chip->bus.shutdown = 1;
+ cancel_work_sync(&bus->unsol_work);
+
return 0;
}
@@ -1753,10 +1761,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
if (!azx_snoop(chip))
azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_UC;
- /* Workaround for a communication error on CFL (bko#199007) and CNL */
- if (IS_CFL(pci) || IS_CNL(pci))
- azx_bus(chip)->polling_mode = 1;
-
if (chip->driver_type == AZX_DRIVER_NVIDIA) {
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
chip->bus.needs_damn_long_delay = 1;
@@ -2020,25 +2024,6 @@ static const struct hda_controller_ops pci_hda_ops = {
.position_check = azx_position_check,
};
-static int azx_check_dmic(struct pci_dev *pci, struct azx *chip)
-{
- struct nhlt_acpi_table *nhlt;
- int ret = 0;
-
- if (chip->driver_type == AZX_DRIVER_SKL &&
- pci->class != 0x040300) {
- nhlt = intel_nhlt_init(&pci->dev);
- if (nhlt) {
- if (intel_nhlt_get_dmic_geo(&pci->dev, nhlt)) {
- ret = -ENODEV;
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, aborting probe\n");
- }
- intel_nhlt_free(nhlt);
- }
- }
- return ret;
-}
-
static int azx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
@@ -2056,6 +2041,16 @@ static int azx_probe(struct pci_dev *pci,
return -ENOENT;
}
+ /*
+ * stop probe if another Intel's DSP driver should be activated
+ */
+ if (dsp_driver) {
+ err = snd_intel_dsp_driver_probe(pci);
+ if (err != SND_INTEL_DSP_DRIVER_ANY &&
+ err != SND_INTEL_DSP_DRIVER_LEGACY)
+ return -ENODEV;
+ }
+
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
0, &card);
if (err < 0) {
@@ -2069,17 +2064,6 @@ static int azx_probe(struct pci_dev *pci,
card->private_data = chip;
hda = container_of(chip, struct hda_intel, chip);
- /*
- * stop probe if digital microphones detected on Skylake+ platform
- * with the DSP enabled. This is an opt-in behavior defined at build
- * time or at run-time with a module parameter
- */
- if (dmic_detect) {
- err = azx_check_dmic(pci, chip);
- if (err < 0)
- goto out_free;
- }
-
pci_set_drvdata(pci, card);
err = register_vga_switcheroo(chip);
@@ -2396,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
/* CometLake-H */
{ PCI_DEVICE(0x8086, 0x06C8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* CometLake-S */
+ { PCI_DEVICE(0x8086, 0xa3f0),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index 1fb7b06457ae..bf0255cb0515 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -43,7 +43,7 @@ bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
EXPORT_SYMBOL_GPL(is_jack_detectable);
/* execute pin sense measurement */
-static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid)
+static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
u32 pincap;
u32 val;
@@ -55,19 +55,20 @@ static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid)
AC_VERB_SET_PIN_SENSE, 0);
}
val = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_PIN_SENSE, 0);
+ AC_VERB_GET_PIN_SENSE, dev_id);
if (codec->inv_jack_detect)
val ^= AC_PINSENSE_PRESENCE;
return val;
}
/**
- * snd_hda_jack_tbl_get - query the jack-table entry for the given NID
+ * snd_hda_jack_tbl_get_mst - query the jack-table entry for the given NID
* @codec: the HDA codec
* @nid: pin NID to refer to
+ * @dev_id: pin device entry id
*/
struct hda_jack_tbl *
-snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
+snd_hda_jack_tbl_get_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
struct hda_jack_tbl *jack = codec->jacktbl.list;
int i;
@@ -75,19 +76,21 @@ snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
if (!nid || !jack)
return NULL;
for (i = 0; i < codec->jacktbl.used; i++, jack++)
- if (jack->nid == nid)
+ if (jack->nid == nid && jack->dev_id == dev_id)
return jack;
return NULL;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get);
+EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_mst);
/**
* snd_hda_jack_tbl_get_from_tag - query the jack-table entry for the given tag
* @codec: the HDA codec
* @tag: tag value to refer to
+ * @dev_id: pin device entry id
*/
struct hda_jack_tbl *
-snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag)
+snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec,
+ unsigned char tag, int dev_id)
{
struct hda_jack_tbl *jack = codec->jacktbl.list;
int i;
@@ -95,29 +98,62 @@ snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag)
if (!tag || !jack)
return NULL;
for (i = 0; i < codec->jacktbl.used; i++, jack++)
- if (jack->tag == tag)
+ if (jack->tag == tag && jack->dev_id == dev_id)
return jack;
return NULL;
}
EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_from_tag);
+static struct hda_jack_tbl *
+any_jack_tbl_get_from_nid(struct hda_codec *codec, hda_nid_t nid)
+{
+ struct hda_jack_tbl *jack = codec->jacktbl.list;
+ int i;
+
+ if (!nid || !jack)
+ return NULL;
+ for (i = 0; i < codec->jacktbl.used; i++, jack++)
+ if (jack->nid == nid)
+ return jack;
+ return NULL;
+}
+
/**
* snd_hda_jack_tbl_new - create a jack-table entry for the given NID
* @codec: the HDA codec
* @nid: pin NID to assign
*/
static struct hda_jack_tbl *
-snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid)
+snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ struct hda_jack_tbl *jack =
+ snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
+ struct hda_jack_tbl *existing_nid_jack =
+ any_jack_tbl_get_from_nid(codec, nid);
+
+ WARN_ON(dev_id != 0 && !codec->dp_mst);
+
if (jack)
return jack;
jack = snd_array_new(&codec->jacktbl);
if (!jack)
return NULL;
jack->nid = nid;
+ jack->dev_id = dev_id;
jack->jack_dirty = 1;
- jack->tag = codec->jacktbl.used;
+ if (existing_nid_jack) {
+ jack->tag = existing_nid_jack->tag;
+
+ /*
+ * Copy jack_detect from existing_nid_jack to avoid
+ * snd_hda_jack_detect_enable_callback_mst() making multiple
+ * SET_UNSOLICITED_ENABLE calls on the same pin.
+ */
+ jack->jack_detect = existing_nid_jack->jack_detect;
+ } else {
+ jack->tag = codec->jacktbl.used;
+ }
+
return jack;
}
@@ -153,10 +189,12 @@ static void jack_detect_update(struct hda_codec *codec,
if (jack->phantom_jack)
jack->pin_sense = AC_PINSENSE_PRESENCE;
else
- jack->pin_sense = read_pin_sense(codec, jack->nid);
+ jack->pin_sense = read_pin_sense(codec, jack->nid,
+ jack->dev_id);
/* A gating jack indicates the jack is invalid if gating is unplugged */
- if (jack->gating_jack && !snd_hda_jack_detect(codec, jack->gating_jack))
+ if (jack->gating_jack &&
+ !snd_hda_jack_detect_mst(codec, jack->gating_jack, jack->dev_id))
jack->pin_sense &= ~AC_PINSENSE_PRESENCE;
jack->jack_dirty = 0;
@@ -164,7 +202,8 @@ static void jack_detect_update(struct hda_codec *codec,
/* If a jack is gated by this one update it. */
if (jack->gated_jack) {
struct hda_jack_tbl *gated =
- snd_hda_jack_tbl_get(codec, jack->gated_jack);
+ snd_hda_jack_tbl_get_mst(codec, jack->gated_jack,
+ jack->dev_id);
if (gated) {
gated->jack_dirty = 1;
jack_detect_update(codec, gated);
@@ -191,63 +230,69 @@ void snd_hda_jack_set_dirty_all(struct hda_codec *codec)
EXPORT_SYMBOL_GPL(snd_hda_jack_set_dirty_all);
/**
- * snd_hda_pin_sense - execute pin sense measurement
+ * snd_hda_jack_pin_sense - execute pin sense measurement
* @codec: the CODEC to sense
* @nid: the pin NID to sense
*
* Execute necessary pin sense measurement and return its Presence Detect,
* Impedance, ELD Valid etc. status bits.
*/
-u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
+u32 snd_hda_jack_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ struct hda_jack_tbl *jack =
+ snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
if (jack) {
jack_detect_update(codec, jack);
return jack->pin_sense;
}
- return read_pin_sense(codec, nid);
+ return read_pin_sense(codec, nid, dev_id);
}
-EXPORT_SYMBOL_GPL(snd_hda_pin_sense);
+EXPORT_SYMBOL_GPL(snd_hda_jack_pin_sense);
/**
- * snd_hda_jack_detect_state - query pin Presence Detect status
+ * snd_hda_jack_detect_state_mst - query pin Presence Detect status
* @codec: the CODEC to sense
* @nid: the pin NID to sense
+ * @dev_id: pin device entry id
*
* Query and return the pin's Presence Detect status, as either
* HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM.
*/
-int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid)
+int snd_hda_jack_detect_state_mst(struct hda_codec *codec,
+ hda_nid_t nid, int dev_id)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ struct hda_jack_tbl *jack =
+ snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
if (jack && jack->phantom_jack)
return HDA_JACK_PHANTOM;
- else if (snd_hda_pin_sense(codec, nid) & AC_PINSENSE_PRESENCE)
+ else if (snd_hda_jack_pin_sense(codec, nid, dev_id) &
+ AC_PINSENSE_PRESENCE)
return HDA_JACK_PRESENT;
else
return HDA_JACK_NOT_PRESENT;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state);
+EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst);
/**
- * snd_hda_jack_detect_enable - enable the jack-detection
+ * snd_hda_jack_detect_enable_mst - enable the jack-detection
* @codec: the HDA codec
* @nid: pin NID to enable
* @func: callback function to register
+ * @dev_id: pin device entry id
*
* In the case of error, the return value will be a pointer embedded with
* errno. Check and handle the return value appropriately with standard
* macros such as @IS_ERR() and @PTR_ERR().
*/
struct hda_jack_callback *
-snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
- hda_jack_callback_fn func)
+snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, hda_jack_callback_fn func)
{
struct hda_jack_tbl *jack;
struct hda_jack_callback *callback = NULL;
int err;
- jack = snd_hda_jack_tbl_new(codec, nid);
+ jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
if (!jack)
return ERR_PTR(-ENOMEM);
if (func) {
@@ -256,6 +301,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
return ERR_PTR(-ENOMEM);
callback->func = func;
callback->nid = jack->nid;
+ callback->dev_id = jack->dev_id;
callback->next = jack->callback;
jack->callback = callback;
}
@@ -272,19 +318,24 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
return ERR_PTR(err);
return callback;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback);
+EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback_mst);
/**
* snd_hda_jack_detect_enable - Enable the jack detection on the given pin
* @codec: the HDA codec
* @nid: pin NID to enable jack detection
+ * @dev_id: pin device entry id
*
* Enable the jack detection with the default callback. Returns zero if
* successful or a negative error code.
*/
-int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid)
+int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id)
{
- return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback(codec, nid, NULL));
+ return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback_mst(codec,
+ nid,
+ dev_id,
+ NULL));
}
EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable);
@@ -299,8 +350,11 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable);
int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid)
{
- struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid);
- struct hda_jack_tbl *gating = snd_hda_jack_tbl_new(codec, gating_nid);
+ struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid, 0);
+ struct hda_jack_tbl *gating =
+ snd_hda_jack_tbl_new(codec, gating_nid, 0);
+
+ WARN_ON(codec->dp_mst);
if (!gated || !gating)
return -EINVAL;
@@ -376,9 +430,10 @@ static void hda_free_jack_priv(struct snd_jack *jack)
}
/**
- * snd_hda_jack_add_kctl - Add a kctl for the given pin
+ * snd_hda_jack_add_kctl_mst - Add a kctl for the given pin
* @codec: the HDA codec
* @nid: pin NID to assign
+ * @dev_id : pin device entry id
* @name: string name for the jack
* @phantom_jack: flag to deal as a phantom jack
* @type: jack type bits to be reported, 0 for guessing from pincfg
@@ -387,15 +442,15 @@ static void hda_free_jack_priv(struct snd_jack *jack)
* This assigns a jack-detection kctl to the given pin. The kcontrol
* will have the given name and index.
*/
-int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, bool phantom_jack,
- int type, const struct hda_jack_keymap *keymap)
+int snd_hda_jack_add_kctl_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap)
{
struct hda_jack_tbl *jack;
const struct hda_jack_keymap *map;
int err, state, buttons;
- jack = snd_hda_jack_tbl_new(codec, nid);
+ jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
if (!jack)
return 0;
if (jack->jack)
@@ -425,12 +480,12 @@ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
snd_jack_set_key(jack->jack, map->type, map->key);
}
- state = snd_hda_jack_detect(codec, nid);
+ state = snd_hda_jack_detect_mst(codec, nid, dev_id);
snd_jack_report(jack->jack, state ? jack->type : 0);
return 0;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctl);
+EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctl_mst);
static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
const struct auto_pin_cfg *cfg,
@@ -441,6 +496,8 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
int err;
bool phantom_jack;
+ WARN_ON(codec->dp_mst);
+
if (!nid)
return 0;
def_conf = snd_hda_codec_get_pincfg(codec, nid);
@@ -462,7 +519,7 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
return err;
if (!phantom_jack)
- return snd_hda_jack_detect_enable(codec, nid);
+ return snd_hda_jack_detect_enable(codec, nid, 0);
return 0;
}
@@ -540,7 +597,8 @@ static void call_jack_callback(struct hda_codec *codec, unsigned int res,
}
if (jack->gated_jack) {
struct hda_jack_tbl *gated =
- snd_hda_jack_tbl_get(codec, jack->gated_jack);
+ snd_hda_jack_tbl_get_mst(codec, jack->gated_jack,
+ jack->dev_id);
if (gated) {
for (cb = gated->callback; cb; cb = cb->next) {
cb->jack = gated;
@@ -561,7 +619,14 @@ void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
struct hda_jack_tbl *event;
int tag = (res & AC_UNSOL_RES_TAG) >> AC_UNSOL_RES_TAG_SHIFT;
- event = snd_hda_jack_tbl_get_from_tag(codec, tag);
+ if (codec->dp_mst) {
+ int dev_entry =
+ (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+
+ event = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
+ } else {
+ event = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
+ }
if (!event)
return;
event->jack_dirty = 1;
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 22fe7ee43e82..727b6d3ba454 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -19,6 +19,7 @@ typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callba
struct hda_jack_callback {
hda_nid_t nid;
+ int dev_id;
hda_jack_callback_fn func;
unsigned int private_data; /* arbitrary data */
unsigned int unsol_res; /* unsolicited event bits */
@@ -28,6 +29,7 @@ struct hda_jack_callback {
struct hda_jack_tbl {
hda_nid_t nid;
+ int dev_id;
unsigned char tag; /* unsol event tag */
struct hda_jack_callback *callback;
/* jack-detection stuff */
@@ -49,46 +51,129 @@ struct hda_jack_keymap {
};
struct hda_jack_tbl *
-snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid);
+snd_hda_jack_tbl_get_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id);
+
+/**
+ * snd_hda_jack_tbl_get - query the jack-table entry for the given NID
+ * @codec: the HDA codec
+ * @nid: pin NID to refer to
+ */
+static inline struct hda_jack_tbl *
+snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_jack_tbl_get_mst(codec, nid, 0);
+}
+
struct hda_jack_tbl *
-snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag);
+snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec,
+ unsigned char tag, int dev_id);
void snd_hda_jack_tbl_clear(struct hda_codec *codec);
void snd_hda_jack_set_dirty_all(struct hda_codec *codec);
-int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id);
+
struct hda_jack_callback *
+snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, hda_jack_callback_fn cb);
+
+/**
+ * snd_hda_jack_detect_enable - enable the jack-detection
+ * @codec: the HDA codec
+ * @nid: pin NID to enable
+ * @func: callback function to register
+ *
+ * In the case of error, the return value will be a pointer embedded with
+ * errno. Check and handle the return value appropriately with standard
+ * macros such as @IS_ERR() and @PTR_ERR().
+ */
+static inline struct hda_jack_callback *
snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
- hda_jack_callback_fn cb);
+ hda_jack_callback_fn cb)
+{
+ return snd_hda_jack_detect_enable_callback_mst(codec, nid, 0, cb);
+}
int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid);
-u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid);
+u32 snd_hda_jack_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id);
/* the jack state returned from snd_hda_jack_detect_state() */
enum {
HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT, HDA_JACK_PHANTOM,
};
-int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_jack_detect_state_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id);
+
+/**
+ * snd_hda_jack_detect_state - query pin Presence Detect status
+ * @codec: the CODEC to sense
+ * @nid: the pin NID to sense
+ *
+ * Query and return the pin's Presence Detect status, as either
+ * HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM.
+ */
+static inline int
+snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_jack_detect_state_mst(codec, nid, 0);
+}
+
+/**
+ * snd_hda_jack_detect_mst - Detect the jack
+ * @codec: the HDA codec
+ * @nid: pin NID to check jack detection
+ * @dev_id: pin device entry id
+ */
+static inline bool
+snd_hda_jack_detect_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id)
+{
+ return snd_hda_jack_detect_state_mst(codec, nid, dev_id) !=
+ HDA_JACK_NOT_PRESENT;
+}
/**
* snd_hda_jack_detect - Detect the jack
* @codec: the HDA codec
* @nid: pin NID to check jack detection
*/
-static inline bool snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
+static inline bool
+snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
{
- return snd_hda_jack_detect_state(codec, nid) != HDA_JACK_NOT_PRESENT;
+ return snd_hda_jack_detect_mst(codec, nid, 0);
}
bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, bool phantom_jack,
- int type, const struct hda_jack_keymap *keymap);
+int snd_hda_jack_add_kctl_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap);
+
+/**
+ * snd_hda_jack_add_kctl - Add a kctl for the given pin
+ * @codec: the HDA codec
+ * @nid: pin NID to assign
+ * @name: string name for the jack
+ * @phantom_jack: flag to deal as a phantom jack
+ * @type: jack type bits to be reported, 0 for guessing from pincfg
+ * @keymap: optional jack / key mapping
+ *
+ * This assigns a jack-detection kctl to the given pin. The kcontrol
+ * will have the given name and index.
+ */
+static inline int
+snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
+ const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap)
+{
+ return snd_hda_jack_add_kctl_mst(codec, nid, 0,
+ name, phantom_jack, type, keymap);
+}
+
int snd_hda_jack_add_kctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 968d3caab6ac..90aa0f400a57 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -910,6 +910,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3c720703ebb8..bffde594e204 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -37,23 +37,6 @@ static bool static_hdmi_pcm;
module_param(static_hdmi_pcm, bool, 0644);
MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
-#define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
-#define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
-#define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
-#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
-#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
-#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
- ((codec)->core.vendor_id == 0x80862800))
-#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
-#define is_icelake(codec) ((codec)->core.vendor_id == 0x8086280f)
-#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
- || is_skylake(codec) || is_broxton(codec) \
- || is_kabylake(codec) || is_geminilake(codec) \
- || is_cannonlake(codec) || is_icelake(codec))
-#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
-#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
-#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
-
struct hdmi_spec_per_cvt {
hda_nid_t cvt_nid;
int assigned;
@@ -97,16 +80,19 @@ struct hdmi_spec_per_pin {
/* operations used by generic code that can be overridden by patches */
struct hdmi_ops {
int (*pin_get_eld)(struct hda_codec *codec, hda_nid_t pin_nid,
- unsigned char *buf, int *eld_size);
+ int dev_id, unsigned char *buf, int *eld_size);
void (*pin_setup_infoframe)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int dev_id,
int ca, int active_channels, int conn_type);
/* enable/disable HBR (HD passthrough) */
- int (*pin_hbr_setup)(struct hda_codec *codec, hda_nid_t pin_nid, bool hbr);
+ int (*pin_hbr_setup)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int dev_id, bool hbr);
int (*setup_stream)(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format);
+ hda_nid_t pin_nid, int dev_id, u32 stream_tag,
+ int format);
void (*pin_cvt_fixup)(struct hda_codec *codec,
struct hdmi_spec_per_pin *per_pin,
@@ -160,6 +146,7 @@ struct hdmi_spec {
bool dyn_pin_out;
bool dyn_pcm_assign;
+ bool intel_hsw_fixup; /* apply Intel platform-specific fixups */
/*
* Non-generic VIA/NVIDIA specific
*/
@@ -652,8 +639,16 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
return true;
}
+static int hdmi_pin_get_eld(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, unsigned char *buf, int *eld_size)
+{
+ snd_hda_set_dev_select(codec, nid, dev_id);
+
+ return snd_hdmi_get_eld(codec, nid, buf, eld_size);
+}
+
static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
- hda_nid_t pin_nid,
+ hda_nid_t pin_nid, int dev_id,
int ca, int active_channels,
int conn_type)
{
@@ -683,6 +678,8 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
return;
}
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
+
/*
* sizeof(ai) is used instead of sizeof(*hdmi_ai) or
* sizeof(*dp_ai) to avoid partial match/update problems when
@@ -708,6 +705,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
struct hdmi_spec *spec = codec->spec;
struct hdac_chmap *chmap = &spec->chmap;
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
int channels = per_pin->channels;
int active_channels;
struct hdmi_eld *eld;
@@ -716,6 +714,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
if (!channels)
return;
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
+
/* some HW (e.g. HSW+) needs reprogramming the amp at each time */
if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin_nid, 0,
@@ -741,8 +741,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
pin_nid, non_pcm, ca, channels,
per_pin->chmap, per_pin->chmap_set);
- spec->ops.pin_setup_infoframe(codec, pin_nid, ca, active_channels,
- eld->info.conn_type);
+ spec->ops.pin_setup_infoframe(codec, pin_nid, dev_id,
+ ca, active_channels, eld->info.conn_type);
per_pin->non_pcm = non_pcm;
}
@@ -774,34 +774,32 @@ static void jack_callback(struct hda_codec *codec,
if (codec_has_acomp(codec))
return;
- /* hda_jack don't support DP MST */
- check_presence_and_report(codec, jack->nid, 0);
+ check_presence_and_report(codec, jack->nid, jack->dev_id);
}
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
struct hda_jack_tbl *jack;
- int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
- /*
- * assume DP MST uses dyn_pcm_assign and acomp and
- * never comes here
- * if DP MST supports unsol event, below code need
- * consider dev_entry
- */
- jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
+ if (codec->dp_mst) {
+ int dev_entry =
+ (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
+ } else {
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
+ }
if (!jack)
return;
jack->jack_dirty = 1;
codec_dbg(codec,
"HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n",
- codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
+ codec->addr, jack->nid, jack->dev_id, !!(res & AC_UNSOL_RES_IA),
!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
- /* hda_jack don't support DP MST */
- check_presence_and_report(codec, jack->nid, 0);
+ check_presence_and_report(codec, jack->nid, jack->dev_id);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -831,11 +829,21 @@ static void hdmi_unsol_event(struct hda_codec *codec, unsigned int res)
{
int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
int subtag = (res & AC_UNSOL_RES_SUBTAG) >> AC_UNSOL_RES_SUBTAG_SHIFT;
+ struct hda_jack_tbl *jack;
if (codec_has_acomp(codec))
return;
- if (!snd_hda_jack_tbl_get_from_tag(codec, tag)) {
+ if (codec->dp_mst) {
+ int dev_entry =
+ (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
+ } else {
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
+ }
+
+ if (!jack) {
codec_dbg(codec, "Unexpected HDMI event tag 0x%x\n", tag);
return;
}
@@ -876,11 +884,12 @@ static void haswell_verify_D0(struct hda_codec *codec,
((format & AC_FMT_TYPE_NON_PCM) && (format & AC_FMT_CHAN_MASK) == 7)
static int hdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
- bool hbr)
+ int dev_id, bool hbr)
{
int pinctl, new_pinctl;
if (snd_hda_query_pin_caps(codec, pin_nid) & AC_PINCAP_HBR) {
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
pinctl = snd_hda_codec_read(codec, pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
@@ -910,20 +919,22 @@ static int hdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
}
static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+ hda_nid_t pin_nid, int dev_id,
+ u32 stream_tag, int format)
{
struct hdmi_spec *spec = codec->spec;
unsigned int param;
int err;
- err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
+ err = spec->ops.pin_hbr_setup(codec, pin_nid, dev_id,
+ is_hbr_format(format));
if (err) {
codec_dbg(codec, "hdmi_setup_stream: HBR is not supported\n");
return err;
}
- if (is_haswell_plus(codec)) {
+ if (spec->intel_hsw_fixup) {
/*
* on recent platforms IEC Coding Type is required for HBR
@@ -1290,6 +1301,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
if (!(get_wcaps(codec, pin_nid) & AC_WCAP_CONN_LIST)) {
codec_warn(codec,
@@ -1298,24 +1310,43 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
return -EINVAL;
}
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
+
/* all the device entries on the same pin have the same conn list */
- per_pin->num_mux_nids = snd_hda_get_connections(codec, pin_nid,
- per_pin->mux_nids,
- HDA_MAX_CONNECTIONS);
+ per_pin->num_mux_nids =
+ snd_hda_get_raw_connections(codec, pin_nid, per_pin->mux_nids,
+ HDA_MAX_CONNECTIONS);
return 0;
}
static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
- struct hdmi_spec_per_pin *per_pin)
+ struct hdmi_spec_per_pin *per_pin)
{
int i;
- /* try the prefer PCM */
- if (!test_bit(per_pin->pin_nid_idx, &spec->pcm_bitmap))
+ /*
+ * generic_hdmi_build_pcms() allocates (num_nids + dev_num - 1)
+ * number of pcms.
+ *
+ * The per_pin of pin_nid_idx=n and dev_id=m prefers to get pcm-n
+ * if m==0. This guarantees that dynamic pcm assignments are compatible
+ * with the legacy static per_pin-pmc assignment that existed in the
+ * days before DP-MST.
+ *
+ * per_pin of m!=0 prefers to get pcm=(num_nids + (m - 1)).
+ */
+ if (per_pin->dev_id == 0 &&
+ !test_bit(per_pin->pin_nid_idx, &spec->pcm_bitmap))
return per_pin->pin_nid_idx;
- /* have a second try; check the "reserved area" over num_pins */
+ if (per_pin->dev_id != 0 &&
+ !(test_bit(spec->num_nids + (per_pin->dev_id - 1),
+ &spec->pcm_bitmap))) {
+ return spec->num_nids + (per_pin->dev_id - 1);
+ }
+
+ /* have a second try; check the area over num_nids */
for (i = spec->num_nids; i < spec->pcm_used; i++) {
if (!test_bit(i, &spec->pcm_bitmap))
return i;
@@ -1509,6 +1540,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
struct hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->temp_eld;
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
/*
* Always execute a GetPinSense verb here, even when called from
* hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
@@ -1521,7 +1553,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
bool ret;
bool do_repoll = false;
- present = snd_hda_pin_sense(codec, pin_nid);
+ present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
mutex_lock(&per_pin->lock);
eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
@@ -1535,8 +1567,8 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
if (eld->eld_valid) {
- if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
- &eld->eld_size) < 0)
+ if (spec->ops.pin_get_eld(codec, pin_nid, dev_id,
+ eld->eld_buffer, &eld->eld_size) < 0)
eld->eld_valid = false;
else {
if (snd_hdmi_parse_eld(codec, &eld->info, eld->eld_buffer,
@@ -1554,7 +1586,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
ret = !repoll || !eld->monitor_present || eld->eld_valid;
- jack = snd_hda_jack_tbl_get(codec, pin_nid);
+ jack = snd_hda_jack_tbl_get_mst(codec, pin_nid, per_pin->dev_id);
if (jack) {
jack->block_report = !ret;
jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
@@ -1585,7 +1617,8 @@ static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec,
* DP MST will use dyn_pcm_assign,
* so DP MST will never come here
*/
- jack_tbl = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id);
if (jack_tbl)
jack = jack_tbl->jack;
}
@@ -1666,7 +1699,8 @@ static void hdmi_repoll_eld(struct work_struct *work)
struct hdmi_spec *spec = codec->spec;
struct hda_jack_tbl *jack;
- jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ jack = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id);
if (jack)
jack->jack_dirty = 1;
@@ -1707,7 +1741,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
* To simplify the implementation, malloc all
* the virtual pins in the initialization statically
*/
- if (is_haswell_plus(codec)) {
+ if (spec->intel_hsw_fixup) {
/*
* On Intel platforms, device entries number is
* changed dynamically. If there is a DP MST
@@ -1756,7 +1790,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
per_pin->dev_id = i;
per_pin->non_pcm = false;
snd_hda_set_dev_select(codec, pin_nid, i);
- if (is_haswell_plus(codec))
+ if (spec->intel_hsw_fixup)
intel_haswell_fixup_connect_list(codec, pin_nid);
err = hdmi_read_pin_conn(codec, pin_idx);
if (err < 0)
@@ -1871,7 +1905,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hdmi_spec *spec = codec->spec;
int pin_idx;
struct hdmi_spec_per_pin *per_pin;
- hda_nid_t pin_nid;
struct snd_pcm_runtime *runtime = substream->runtime;
bool non_pcm;
int pinctl, stripe;
@@ -1895,7 +1928,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
goto unlock;
}
per_pin = get_pin(spec, pin_idx);
- pin_nid = per_pin->pin_nid;
/* Verify pin:cvt selections to avoid silent audio after S3.
* After S3, the audio driver restores pin:cvt selections
@@ -1910,8 +1942,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
/* Todo: add DP1.2 MST audio support later */
if (codec_has_acomp(codec))
- snd_hdac_sync_audio_rate(&codec->core, pin_nid, per_pin->dev_id,
- runtime->rate);
+ snd_hdac_sync_audio_rate(&codec->core, per_pin->pin_nid,
+ per_pin->dev_id, runtime->rate);
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
mutex_lock(&per_pin->lock);
@@ -1929,16 +1961,18 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
mutex_unlock(&per_pin->lock);
if (spec->dyn_pin_out) {
- pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+ snd_hda_set_dev_select(codec, per_pin->pin_nid,
+ per_pin->dev_id);
+ pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
- snd_hda_codec_write(codec, pin_nid, 0,
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl | PIN_OUT);
}
/* snd_hda_set_dev_select() has been called before */
- err = spec->ops.setup_stream(codec, cvt_nid, pin_nid,
- stream_tag, format);
+ err = spec->ops.setup_stream(codec, cvt_nid, per_pin->pin_nid,
+ per_pin->dev_id, stream_tag, format);
unlock:
mutex_unlock(&spec->pcm_lock);
return err;
@@ -1990,6 +2024,8 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
per_pin = get_pin(spec, pin_idx);
if (spec->dyn_pin_out) {
+ snd_hda_set_dev_select(codec, per_pin->pin_nid,
+ per_pin->dev_id);
pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
snd_hda_codec_write(codec, per_pin->pin_nid, 0,
@@ -2073,15 +2109,24 @@ static bool is_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
static int generic_hdmi_build_pcms(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
- int idx;
+ int idx, pcm_num;
/*
* for non-mst mode, pcm number is the same as before
- * for DP MST mode, pcm number is (nid number + dev_num - 1)
- * dev_num is the device entry number in a pin
- *
+ * for DP MST mode without extra PCM, pcm number is same
+ * for DP MST mode with extra PCMs, pcm number is
+ * (nid number + dev_num - 1)
+ * dev_num is the device entry number in a pin
*/
- for (idx = 0; idx < spec->num_nids + spec->dev_num - 1; idx++) {
+
+ if (codec->mst_no_extra_pcms)
+ pcm_num = spec->num_nids;
+ else
+ pcm_num = spec->num_nids + spec->dev_num - 1;
+
+ codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
+
+ for (idx = 0; idx < pcm_num; idx++) {
struct hda_pcm *info;
struct hda_pcm_stream *pstr;
@@ -2158,11 +2203,13 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pcm_idx)
if (phantom_jack)
strncat(hdmi_str, " Phantom",
sizeof(hdmi_str) - strlen(hdmi_str) - 1);
- ret = snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str,
- phantom_jack, 0, NULL);
+ ret = snd_hda_jack_add_kctl_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id, hdmi_str, phantom_jack,
+ 0, NULL);
if (ret < 0)
return ret;
- jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ jack = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id);
if (jack == NULL)
return 0;
/* assign jack->jack to pcm_rec[].jack to
@@ -2271,10 +2318,11 @@ static int generic_hdmi_init(struct hda_codec *codec)
if (codec_has_acomp(codec))
continue;
if (spec->use_jack_detect)
- snd_hda_jack_detect_enable(codec, pin_nid);
+ snd_hda_jack_detect_enable(codec, pin_nid, dev_id);
else
- snd_hda_jack_detect_enable_callback(codec, pin_nid,
- jack_callback);
+ snd_hda_jack_detect_enable_callback_mst(codec, pin_nid,
+ dev_id,
+ jack_callback);
}
mutex_unlock(&spec->bind_lock);
return 0;
@@ -2313,8 +2361,8 @@ static void generic_hdmi_free(struct hda_codec *codec)
snd_hdac_acomp_exit(&codec->bus->core);
} else if (codec_has_acomp(codec)) {
snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
- codec->relaxed_resume = 0;
}
+ codec->relaxed_resume = 0;
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2364,7 +2412,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
};
static const struct hdmi_ops generic_standard_hdmi_ops = {
- .pin_get_eld = snd_hdmi_get_eld,
+ .pin_get_eld = hdmi_pin_get_eld,
.pin_setup_infoframe = hdmi_pin_setup_infoframe,
.pin_hbr_setup = hdmi_pin_hbr_setup,
.setup_stream = hdmi_setup_stream,
@@ -2424,11 +2472,11 @@ static int patch_generic_hdmi(struct hda_codec *codec)
/* turn on / off the unsol event jack detection dynamically */
static void reprogram_jack_detect(struct hda_codec *codec, hda_nid_t nid,
- bool use_acomp)
+ int dev_id, bool use_acomp)
{
struct hda_jack_tbl *tbl;
- tbl = snd_hda_jack_tbl_get(codec, nid);
+ tbl = snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
if (tbl) {
/* clear unsol even if component notifier is used, or re-enable
* if notifier is cleared
@@ -2441,7 +2489,7 @@ static void reprogram_jack_detect(struct hda_codec *codec, hda_nid_t nid,
* at need (i.e. only when notifier is cleared)
*/
if (!use_acomp)
- snd_hda_jack_detect_enable(codec, nid);
+ snd_hda_jack_detect_enable(codec, nid, dev_id);
}
}
@@ -2461,6 +2509,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
for (i = 0; i < spec->num_pins; i++)
reprogram_jack_detect(spec->codec,
get_pin(spec, i)->pin_nid,
+ get_pin(spec, i)->dev_id,
use_acomp);
}
mutex_unlock(&spec->bind_lock);
@@ -2561,7 +2610,8 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
hda_nid_t conns[4];
int nconns;
- nconns = snd_hda_get_connections(codec, nid, conns, ARRAY_SIZE(conns));
+ nconns = snd_hda_get_raw_connections(codec, nid, conns,
+ ARRAY_SIZE(conns));
if (nconns == spec->num_cvts &&
!memcmp(conns, spec->cvt_nids, spec->num_cvts * sizeof(hda_nid_t)))
return;
@@ -2662,7 +2712,7 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
base_nid = intel_base_nid(codec);
if (WARN_ON(pin_nid < base_nid || pin_nid >= base_nid + 3))
return -1;
- return pin_nid - base_nid + 1; /* intel port is 1-based */
+ return pin_nid - base_nid + 1;
}
/*
@@ -2671,10 +2721,9 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
*/
for (i = 0; i < spec->port_num; i++) {
if (pin_nid == spec->port_map[i])
- return i + 1;
+ return i;
}
- /* return -1 if pin number exceeds our expectation */
codec_info(codec, "Can't find the HDMI/DP port for pin %d\n", pin_nid);
return -1;
}
@@ -2687,13 +2736,12 @@ static int intel_port2pin(struct hda_codec *codec, int port)
/* we assume only from port-B to port-D */
if (port < 1 || port > 3)
return 0;
- /* intel port is 1-based */
return port + intel_base_nid(codec) - 1;
}
- if (port < 1 || port > spec->port_num)
+ if (port < 0 || port >= spec->port_num)
return 0;
- return spec->port_map[port - 1];
+ return spec->port_map[port];
}
static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
@@ -2739,10 +2787,12 @@ static void register_i915_notifier(struct hda_codec *codec)
/* setup_stream ops override for HSW+ */
static int i915_hsw_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+ hda_nid_t pin_nid, int dev_id, u32 stream_tag,
+ int format)
{
haswell_verify_D0(codec, cvt_nid, pin_nid);
- return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
+ stream_tag, format);
}
/* pin_cvt_fixup ops override for HSW+ and VLV+ */
@@ -2814,6 +2864,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
spec->vendor_nid = vendor_nid;
spec->port_map = port_map;
spec->port_num = port_num;
+ spec->intel_hsw_fixup = true;
intel_haswell_enable_all_pins(codec, true);
intel_haswell_fixup_enable_dp12(codec);
@@ -2844,9 +2895,9 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
{
/*
* pin to port mapping table where the value indicate the pin number and
- * the index indicate the port number with 1 base.
+ * the index indicate the port number.
*/
- static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb};
+ static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
}
@@ -2855,14 +2906,13 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
{
/*
* pin to port mapping table where the value indicate the pin number and
- * the index indicate the port number with 1 base.
+ * the index indicate the port number.
*/
static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
}
-
/* Intel Baytrail and Braswell; with eld notifier */
static int patch_i915_byt_hdmi(struct hda_codec *codec)
{
@@ -2968,7 +3018,7 @@ static int simple_playback_init(struct hda_codec *codec)
if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
- snd_hda_jack_detect_enable(codec, pin);
+ snd_hda_jack_detect_enable(codec, pin, per_pin->dev_id);
return 0;
}
@@ -3477,11 +3527,22 @@ static int patch_nvhdmi(struct hda_codec *codec)
struct hdmi_spec *spec;
int err;
- err = patch_generic_hdmi(codec);
- if (err)
+ err = alloc_generic_hdmi(codec);
+ if (err < 0)
return err;
+ codec->dp_mst = true;
spec = codec->spec;
+ spec->dyn_pcm_assign = true;
+
+ err = hdmi_parse_codec(codec);
+ if (err < 0) {
+ generic_spec_free(codec);
+ return err;
+ }
+
+ generic_hdmi_init_per_pins(codec);
+
spec->dyn_pin_out = true;
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
@@ -3495,6 +3556,27 @@ static int patch_nvhdmi(struct hda_codec *codec)
return 0;
}
+static int patch_nvhdmi_legacy(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec;
+ int err;
+
+ err = patch_generic_hdmi(codec);
+ if (err)
+ return err;
+
+ spec = codec->spec;
+ spec->dyn_pin_out = true;
+
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+
+ codec->link_down_at_suspend = 1;
+
+ return 0;
+}
+
/*
* The HDA codec on NVIDIA Tegra contains two scratch registers that are
* accessed using vendor-defined verbs. These registers can be used for
@@ -3708,16 +3790,19 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
#define ATI_HBR_ENABLE 0x10
static int atihdmi_pin_get_eld(struct hda_codec *codec, hda_nid_t nid,
- unsigned char *buf, int *eld_size)
+ int dev_id, unsigned char *buf, int *eld_size)
{
+ WARN_ON(dev_id != 0);
/* call hda_eld.c ATI/AMD-specific function */
return snd_hdmi_get_eld_ati(codec, nid, buf, eld_size,
is_amdhdmi_rev3_or_later(codec));
}
-static void atihdmi_pin_setup_infoframe(struct hda_codec *codec, hda_nid_t pin_nid, int ca,
+static void atihdmi_pin_setup_infoframe(struct hda_codec *codec,
+ hda_nid_t pin_nid, int dev_id, int ca,
int active_channels, int conn_type)
{
+ WARN_ON(dev_id != 0);
snd_hda_codec_write(codec, pin_nid, 0, ATI_VERB_SET_CHANNEL_ALLOCATION, ca);
}
@@ -3908,10 +3993,12 @@ static void atihdmi_paired_cea_alloc_to_tlv_chmap(struct hdac_chmap *hchmap,
}
static int atihdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
- bool hbr)
+ int dev_id, bool hbr)
{
int hbr_ctl, hbr_ctl_new;
+ WARN_ON(dev_id != 0);
+
hbr_ctl = snd_hda_codec_read(codec, pin_nid, 0, ATI_VERB_GET_HBR_CONTROL, 0);
if (hbr_ctl >= 0 && (hbr_ctl & ATI_HBR_CAPABLE)) {
if (hbr)
@@ -3937,9 +4024,9 @@ static int atihdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
}
static int atihdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+ hda_nid_t pin_nid, int dev_id,
+ u32 stream_tag, int format)
{
-
if (is_amdhdmi_rev3_or_later(codec)) {
int ramp_rate = 180; /* default as per AMD spec */
/* disable ramp-up/down for non-pcm as per AMD spec */
@@ -3949,7 +4036,8 @@ static int atihdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
snd_hda_codec_write(codec, cvt_nid, 0, ATI_VERB_SET_RAMP_RATE, ramp_rate);
}
- return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
+ stream_tag, format);
}
@@ -4079,25 +4167,25 @@ HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x),
HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x),
-HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000d, "GPU 0d HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0010, "GPU 10 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0011, "GPU 11 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0012, "GPU 12 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0013, "GPU 13 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0014, "GPU 14 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0015, "GPU 15 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0016, "GPU 16 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000d, "GPU 0d HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0010, "GPU 10 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0011, "GPU 11 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0012, "GPU 12 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0013, "GPU 13 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0014, "GPU 14 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0015, "GPU 15 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0016, "GPU 16 HDMI/DP", patch_nvhdmi_legacy),
/* 17 is known to be absent */
-HDA_CODEC_ENTRY(0x10de0018, "GPU 18 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0019, "GPU 19 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de001a, "GPU 1a HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de001b, "GPU 1b HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de001c, "GPU 1c HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0018, "GPU 18 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0019, "GPU 19 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de001a, "GPU 1a HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de001b, "GPU 1b HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de001c, "GPU 1c HDMI/DP", patch_nvhdmi_legacy),
HDA_CODEC_ENTRY(0x10de0020, "Tegra30 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0022, "Tegra114 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0028, "Tegra124 HDMI", patch_tegra_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 80f66ba85f87..d2bf70a1d2fd 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5892,6 +5892,7 @@ enum {
ALC299_FIXUP_PREDATOR_SPK,
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
+ ALC294_FIXUP_ASUS_INTSPK_GPIO,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6982,6 +6983,13 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
},
+ [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
+ .type = HDA_FIXUP_FUNC,
+ /* The GPIO must be pulled to initialize the AMP */
+ .v.func = alc_fixup_gpio4,
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7141,7 +7149,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
@@ -7248,6 +7256,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
#if 0
@@ -7512,20 +7521,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x19, 0x02a11020},
{0x1a, 0x02a11030},
{0x21, 0x0221101f}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x14, 0x90170110},
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x14, 0x90170150},
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x40000000},
- {0x14, 0x90170110},
- {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -7608,38 +7603,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x1b, 0x01011020},
{0x21, 0x02211010}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60130},
- {0x14, 0x90170110},
- {0x1b, 0x01011020},
- {0x21, 0x0221101f}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60160},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60170},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60180},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0xb7a60130},
- {0x14, 0x90170110},
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60130},
- {0x14, 0x90170110},
- {0x14, 0x01011020},
- {0x21, 0x0221101f}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC256_STANDARD_PINS),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x14, 0x90170110},
- {0x1b, 0x01011020},
- {0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
{0x14, 0x90170110},
{0x1b, 0x90a70130},
@@ -7852,6 +7815,12 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
{0x19, 0x40000000},
{0x1b, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
{}
};
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 4b0dea7f7669..deadba40131c 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -884,7 +884,8 @@ static int snd_ice1712_pcm(struct snd_ice1712 *ice, int device)
ice->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci), 64*1024, 64*1024);
+ &ice->pci->dev,
+ 64*1024, 64*1024);
dev_warn(ice->card->dev,
"Consumer PCM code does not work well at the moment --jk\n");
@@ -909,7 +910,8 @@ static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device)
ice->pcm_ds = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci), 64*1024, 128*1024);
+ &ice->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
@@ -1253,7 +1255,8 @@ static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, "ICE1712 multi");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci), 256*1024, 256*1024);
+ &ice->pci->dev,
+ 256*1024, 256*1024);
ice->pcm_pro = pcm;
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index e62c11816683..c80a16ee6e76 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -1143,7 +1143,7 @@ static int snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, "ICE1724");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci),
+ &ice->pci->dev,
256*1024, 256*1024);
ice->pcm_pro = pcm;
@@ -1341,7 +1341,7 @@ static int snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci),
+ &ice->pci->dev,
256*1024, 256*1024);
ice->pcm = pcm;
@@ -1455,7 +1455,7 @@ static int snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, "ICE1724 Surround PCM");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci),
+ &ice->pci->dev,
256*1024, 256*1024);
ice->pcm_ds = pcm;
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 6ff94d8ad86e..12374ba08ca2 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1488,7 +1488,7 @@ static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
chip->pcm[device] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, intel8x0_dma_type(chip),
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
rec->prealloc_size, rec->prealloc_max_size);
if (rec->playback_ops &&
@@ -3047,7 +3047,7 @@ static int snd_intel8x0_create(struct snd_card *card,
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
- if (snd_dma_alloc_pages(intel8x0_dma_type(chip), snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(intel8x0_dma_type(chip), &pci->dev,
chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2,
&chip->bdbars) < 0) {
snd_intel8x0_free(chip);
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 2f960fb092df..a9add5fedfcb 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -734,7 +734,7 @@ static int snd_intel8x0m_pcm1(struct intel8x0m *chip, int device,
chip->pcm[device] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
rec->prealloc_size,
rec->prealloc_max_size);
@@ -1176,7 +1176,7 @@ static int snd_intel8x0m_create(struct snd_card *card,
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2,
&chip->bdbars) < 0) {
snd_intel8x0m_free(chip);
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 0d81eac0a478..2b8204a13c69 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2275,7 +2275,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
korg1212->idRegPtr,
stateName[korg1212->cardState]);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
sizeof(struct KorgSharedBuffer), &korg1212->dma_shared) < 0) {
snd_printk(KERN_ERR "korg1212: can not allocate shared buffer memory (%zd bytes)\n", sizeof(struct KorgSharedBuffer));
snd_korg1212_free(korg1212);
@@ -2290,7 +2290,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
korg1212->DataBufsSize = sizeof(struct KorgAudioBuffer) * kNumBuffers;
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
korg1212->DataBufsSize, &korg1212->dma_play) < 0) {
snd_printk(KERN_ERR "korg1212: can not allocate play data buffer memory (%d bytes)\n", korg1212->DataBufsSize);
snd_korg1212_free(korg1212);
@@ -2302,7 +2302,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
K1212_DEBUG_PRINTK("K1212_DEBUG: Play Data Area = 0x%p (0x%08x), %d bytes\n",
korg1212->playDataBufsPtr, korg1212->PlayDataPhy, korg1212->DataBufsSize);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
korg1212->DataBufsSize, &korg1212->dma_rec) < 0) {
snd_printk(KERN_ERR "korg1212: can not allocate record data buffer memory (%d bytes)\n", korg1212->DataBufsSize);
snd_korg1212_free(korg1212);
@@ -2337,7 +2337,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
return err;
}
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
dsp_code->size, &korg1212->dma_dsp) < 0) {
snd_printk(KERN_ERR "korg1212: cannot allocate dsp code memory (%zd bytes)\n", dsp_code->size);
snd_korg1212_free(korg1212);
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 5cda3488ceab..21ac9d003e8e 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -350,7 +350,7 @@ static int setup_corb_rirb(struct lola *chip)
unsigned long end_time;
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
PAGE_SIZE, &chip->rb);
if (err < 0)
return err;
diff --git a/sound/pci/lola/lola_pcm.c b/sound/pci/lola/lola_pcm.c
index 151f7cf5ce0e..856bcca60128 100644
--- a/sound/pci/lola/lola_pcm.c
+++ b/sound/pci/lola/lola_pcm.c
@@ -582,7 +582,6 @@ static const struct snd_pcm_ops lola_pcm_ops = {
.prepare = lola_pcm_prepare,
.trigger = lola_pcm_trigger,
.pointer = lola_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
int lola_create_pcm(struct lola *chip)
@@ -592,7 +591,7 @@ int lola_create_pcm(struct lola *chip)
for (i = 0; i < 2; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
PAGE_SIZE, &chip->pcm[i].bdl);
if (err < 0)
return err;
@@ -612,7 +611,7 @@ int lola_create_pcm(struct lola *chip)
}
/* buffer pre-allocation */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
1024 * 64, 32 * 1024 * 1024);
return 0;
}
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index fe10714380f2..d0f63fa54121 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -846,7 +846,7 @@ static int lx_pcm_create(struct lx6464es *chip)
strcpy(pcm->name, card_name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
size, size);
chip->pcm = pcm;
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 19fa73df0846..cc8594d76c70 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -1861,7 +1861,8 @@ snd_m3_pcm(struct snd_m3 * chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index e5279ce54ee1..674d37ec96b3 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -948,7 +948,8 @@ static void preallocate_buffers(struct snd_mixart *chip, struct snd_pcm *pcm)
}
#endif
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->mgr->pci), 32*1024, 32*1024);
+ &chip->mgr->pci->dev,
+ 32*1024, 32*1024);
}
/*
@@ -1360,7 +1361,7 @@ static int snd_mixart_probe(struct pci_dev *pci,
/* create array of streaminfo */
size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
sizeof(struct mixart_flowinfo)) );
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
size, &mgr->flowinfo) < 0) {
snd_mixart_free(mgr);
return -ENOMEM;
@@ -1371,7 +1372,7 @@ static int snd_mixart_probe(struct pci_dev *pci,
/* create array of bufferinfo */
size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
sizeof(struct mixart_bufferinfo)) );
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
size, &mgr->bufferinfo) < 0) {
snd_mixart_free(mgr);
return -ENOMEM;
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index e6aa16646fd4..203c8fe48a01 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -713,13 +713,13 @@ int oxygen_pcm_init(struct oxygen *chip)
if (outs)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES_MULTICH,
BUFFER_BYTES_MAX_MULTICH);
if (ins)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
@@ -739,7 +739,7 @@ int oxygen_pcm_init(struct oxygen *chip)
pcm->private_data = chip;
strcpy(pcm->name, "Digital");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
@@ -769,7 +769,7 @@ int oxygen_pcm_init(struct oxygen *chip)
pcm->private_data = chip;
strcpy(pcm->name, outs ? "Front Panel" : "Analog 2");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
@@ -787,7 +787,7 @@ int oxygen_pcm_init(struct oxygen *chip)
pcm->private_data = chip;
strcpy(pcm->name, "Analog 3");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index e493962d8455..4af34d6d92df 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1171,7 +1171,7 @@ int pcxhr_create_pcm(struct snd_pcxhr *chip)
strcpy(pcm->name, name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->mgr->pci),
+ &chip->mgr->pci->dev,
32*1024, 32*1024);
chip->pcm = pcm;
return 0;
@@ -1644,7 +1644,7 @@ static int pcxhr_probe(struct pci_dev *pci,
/* create hostport purgebuffer */
size = PAGE_ALIGN(sizeof(struct pcxhr_hostport));
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
size, &mgr->hostport) < 0) {
pcxhr_free(mgr);
return -ENOMEM;
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 58771ae0ed63..abcea86045ec 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1550,7 +1550,7 @@ snd_riptide_hw_params(struct snd_pcm_substream *substream,
if (sgdlist->area)
snd_dma_free_pages(sgdlist);
if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
sizeof(struct sgd) * (DESC_MAX_MASK + 1),
sgdlist)) < 0) {
snd_printk(KERN_ERR "Riptide: failed to alloc %d dma bytes\n",
@@ -1661,7 +1661,6 @@ static const struct snd_pcm_ops snd_riptide_playback_ops = {
.hw_params = snd_riptide_hw_params,
.hw_free = snd_riptide_hw_free,
.prepare = snd_riptide_prepare,
- .page = snd_pcm_sgbuf_ops_page,
.trigger = snd_riptide_trigger,
.pointer = snd_riptide_pointer,
};
@@ -1672,7 +1671,6 @@ static const struct snd_pcm_ops snd_riptide_capture_ops = {
.hw_params = snd_riptide_hw_params,
.hw_free = snd_riptide_hw_free,
.prepare = snd_riptide_prepare,
- .page = snd_pcm_sgbuf_ops_page,
.trigger = snd_riptide_trigger,
.pointer = snd_riptide_pointer,
};
@@ -1695,7 +1693,7 @@ static int snd_riptide_pcm(struct snd_riptide *chip, int device)
strcpy(pcm->name, "RIPTIDE");
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 128 * 1024);
return 0;
}
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 40cc6ca88f7b..58a4b8df25d4 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1375,7 +1375,7 @@ static int snd_rme32_create(struct rme32 *rme32)
snd_pcm_set_ops(rme32->spdif_pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_rme32_capture_spdif_fd_ops);
snd_pcm_lib_preallocate_pages_for_all(rme32->spdif_pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
0, RME32_MID_BUFFER_SIZE);
rme32->spdif_pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
} else {
@@ -1407,7 +1407,7 @@ static int snd_rme32_create(struct rme32 *rme32)
snd_pcm_set_ops(rme32->adat_pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_rme32_capture_adat_fd_ops);
snd_pcm_lib_preallocate_pages_for_all(rme32->adat_pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
0, RME32_MID_BUFFER_SIZE);
rme32->adat_pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
} else {
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 5cbdc9be9c7e..cd20af465d8e 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -569,12 +569,7 @@ static char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = {
static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer *dmab, size_t size)
{
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- dmab->dev.dev = snd_dma_pci_data(pci);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- size, dmab) < 0)
- return -ENOMEM;
- return 0;
+ return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev, size, dmab);
}
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 81a6f4b2bd3c..75c06a7cc779 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6368,7 +6368,6 @@ static const struct snd_pcm_ops snd_hdspm_ops = {
.prepare = snd_hdspm_prepare,
.trigger = snd_hdspm_trigger,
.pointer = snd_hdspm_hw_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static int snd_hdspm_create_hwdep(struct snd_card *card,
@@ -6407,7 +6406,7 @@ static int snd_hdspm_preallocate_memory(struct hdspm *hdspm)
wanted = HDSPM_DMA_AREA_BYTES;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(hdspm->pci),
+ &hdspm->pci->dev,
wanted, wanted);
dev_dbg(hdspm->card->dev, " Preallocated %zd Bytes\n", wanted);
return 0;
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 4c851f8dcaf8..ef5c2f8e17c7 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -279,12 +279,7 @@ static char channel_map_9636_ds[26] = {
static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer *dmab, size_t size)
{
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- dmab->dev.dev = snd_dma_pci_data(pci);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- size, dmab) < 0)
- return -ENOMEM;
- return 0;
+ return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev, size, dmab);
}
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index b0b5e74e776c..ef7dd290ae05 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -905,7 +905,8 @@ static int sis_pcm_create(struct sis7019 *sis)
* world if this fails.
*/
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(sis->pci), 64*1024, 128*1024);
+ &sis->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 13103f5c309b..31cbc811ad37 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -884,7 +884,8 @@ static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device)
sonic->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(sonic->pci), 64*1024, 128*1024);
+ &sonic->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 1a6f6202fd16..07022c0dad40 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -2076,7 +2076,6 @@ static const struct snd_pcm_ops snd_trident_nx_playback_ops = {
.prepare = snd_trident_playback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops snd_trident_capture_ops = {
@@ -2121,7 +2120,6 @@ static const struct snd_pcm_ops snd_trident_nx_foldback_ops = {
.prepare = snd_trident_foldback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops snd_trident_spdif_ops = {
@@ -2186,14 +2184,16 @@ int snd_trident_pcm(struct snd_trident *trident, int device)
struct snd_pcm_substream *substream;
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(trident->pci),
+ &trident->pci->dev,
64*1024, 128*1024);
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
- SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
+ SNDRV_DMA_TYPE_DEV,
+ &trident->pci->dev,
64*1024, 128*1024);
} else {
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
+ &trident->pci->dev,
+ 64*1024, 128*1024);
}
return 0;
@@ -2243,10 +2243,12 @@ int snd_trident_foldback_pcm(struct snd_trident *trident, int device)
if (trident->tlb.entries)
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(trident->pci), 0, 128*1024);
+ &trident->pci->dev,
+ 0, 128*1024);
else
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
+ &trident->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
@@ -2280,7 +2282,9 @@ int snd_trident_spdif_pcm(struct snd_trident *trident, int device)
strcpy(spdif->name, "Trident 4DWave IEC958");
trident->spdif = spdif;
- snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
+ snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV,
+ &trident->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
@@ -3338,7 +3342,7 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
/* TLB array must be aligned to 16kB !!! so we allocate
32kB region and correct offset when necessary */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &trident->pci->dev,
2 * SNDRV_TRIDENT_MAX_PAGES * 4, &trident->tlb.buffer) < 0) {
dev_err(trident->card->dev, "unable to allocate TLB buffer\n");
return -ENOMEM;
@@ -3353,7 +3357,7 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
return -ENOMEM;
/* allocate and setup silent page and initialise TLB entries */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &trident->pci->dev,
SNDRV_TRIDENT_PAGE_SIZE, &trident->tlb.silent_page) < 0) {
dev_err(trident->card->dev, "unable to allocate silent page\n");
return -ENOMEM;
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 38601d0dfb73..30c817b6b635 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -419,7 +419,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
/* the start of each lists must be aligned to 8 bytes,
* but the kernel pages are much bigger, so we don't care
*/
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
&dev->table) < 0)
return -ENOMEM;
@@ -1363,7 +1363,6 @@ static const struct snd_pcm_ops snd_via686_playback_ops = {
.prepare = snd_via686_playback_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via686 capture callbacks */
@@ -1376,7 +1375,6 @@ static const struct snd_pcm_ops snd_via686_capture_ops = {
.prepare = snd_via686_capture_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via823x DSX playback callbacks */
@@ -1389,7 +1387,6 @@ static const struct snd_pcm_ops snd_via8233_playback_ops = {
.prepare = snd_via8233_playback_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via8233_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via823x multi-channel playback callbacks */
@@ -1402,7 +1399,6 @@ static const struct snd_pcm_ops snd_via8233_multi_ops = {
.prepare = snd_via8233_multi_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via8233_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via823x capture callbacks */
@@ -1415,7 +1411,6 @@ static const struct snd_pcm_ops snd_via8233_capture_ops = {
.prepare = snd_via8233_capture_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via8233_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
@@ -1459,7 +1454,7 @@ static int snd_via8233_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->capture_devno, VIA_REG_CAPTURE_8233_STATUS, 6, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1483,7 +1478,7 @@ static int snd_via8233_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->capture_devno + 1, VIA_REG_CAPTURE_8233_STATUS + 0x10, 7, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1526,7 +1521,7 @@ static int snd_via8233a_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->capture_devno, VIA_REG_CAPTURE_8233_STATUS, 6, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1552,7 +1547,7 @@ static int snd_via8233a_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->playback_devno, 0x30, 3, 0);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
return 0;
}
@@ -1582,7 +1577,7 @@ static int snd_via686_pcm_new(struct via82xx *chip)
init_viadev(chip, 1, VIA_REG_CAPTURE_STATUS, 0, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
return 0;
}
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index bfb5e1b89d5f..0edb9ea6e8a6 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -272,7 +272,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
/* the start of each lists must be aligned to 8 bytes,
* but the kernel pages are much bigger, so we don't care
*/
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
&dev->table) < 0)
return -ENOMEM;
@@ -801,7 +801,6 @@ static const struct snd_pcm_ops snd_via686_playback_ops = {
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via686 capture callbacks */
@@ -814,7 +813,6 @@ static const struct snd_pcm_ops snd_via686_capture_ops = {
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
@@ -852,7 +850,7 @@ static int snd_via686_pcm_new(struct via82xx_modem *chip)
init_viadev(chip, 1, VIA_REG_MI_STATUS, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
return 0;
}
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 90400ebb64af..125c11ed5064 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -587,7 +587,7 @@ static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int
static int snd_ymfpci_ac3_init(struct snd_ymfpci *chip)
{
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
4096, &chip->ac3_tmp_base) < 0)
return -ENOMEM;
@@ -1149,7 +1149,8 @@ int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps, 2, 0, NULL);
@@ -1184,7 +1185,8 @@ int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device)
chip->pcm2 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1217,7 +1219,8 @@ int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device)
chip->pcm_spdif = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1258,7 +1261,8 @@ int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device)
chip->pcm_4ch = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
surround_map, 2, 0, NULL);
@@ -2108,7 +2112,7 @@ static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
chip->work_size;
/* work_ptr must be aligned to 256 bytes, but it's already
covered with the kernel page allocation mechanism */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
size, &chip->work_ptr) < 0)
return -ENOMEM;
ptr = chip->work_ptr.area;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index c21fec60cd98..067b1c3a3e02 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -89,8 +89,7 @@ static int pdacf_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
static int pdacf_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_32_buffer
- (subs, params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params));
}
/*
@@ -98,7 +97,7 @@ static int pdacf_pcm_hw_params(struct snd_pcm_substream *subs,
*/
static int pdacf_pcm_hw_free(struct snd_pcm_substream *subs)
{
- return snd_pcm_lib_free_vmalloc_buffer(subs);
+ return snd_pcm_lib_free_pages(subs);
}
/*
@@ -262,7 +261,6 @@ static const struct snd_pcm_ops pdacf_pcm_capture_ops = {
.prepare = pdacf_pcm_prepare,
.trigger = pdacf_pcm_trigger,
.pointer = pdacf_pcm_capture_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -279,6 +277,9 @@ int snd_pdacf_pcm_new(struct snd_pdacf *chip)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pdacf_pcm_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ snd_dma_continuous_data(GFP_KERNEL | GFP_DMA32),
+ 0, 0);
pcm->private_data = chip;
pcm->info_flags = 0;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 52e9cfb4f819..bf1fb0d8a930 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -443,7 +443,7 @@ static int __init snd_aicapcmchip(struct snd_card_aica
/* Allocate the DMA buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
AICA_BUFFER_SIZE,
AICA_BUFFER_SIZE);
return 0;
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index ed877a138965..f9e36abc98ac 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -268,7 +268,7 @@ static int snd_sh_dac_pcm(struct snd_sh_dac *chip, int device)
/* buffer size=48K */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
48 * 1024,
48 * 1024);
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 52225b4b6382..4b9a27e25206 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -759,14 +759,12 @@ static irqreturn_t dma_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
-static int acp_dma_open(struct snd_pcm_substream *substream)
+static int acp_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u16 bank;
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct audio_drv_data *intr_data = dev_get_drvdata(component->dev);
struct audio_substream_data *adata =
kzalloc(sizeof(struct audio_substream_data), GFP_KERNEL);
@@ -834,7 +832,8 @@ static int acp_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int acp_dma_hw_params(struct snd_pcm_substream *substream,
+static int acp_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
int status;
@@ -843,8 +842,6 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime;
struct audio_substream_data *rtd;
struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
struct snd_soc_card *card = prtd->card;
struct acp_platform_info *pinfo = snd_soc_card_get_drvdata(card);
@@ -995,7 +992,8 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
return status;
}
-static int acp_dma_hw_free(struct snd_pcm_substream *substream)
+static int acp_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
@@ -1011,7 +1009,8 @@ static u64 acp_get_byte_count(struct audio_substream_data *rtd)
return byte_count.bytescount;
}
-static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t acp_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u32 buffersize;
u32 pos = 0;
@@ -1053,13 +1052,15 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, pos);
}
-static int acp_dma_mmap(struct snd_pcm_substream *substream,
+static int acp_dma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
return snd_pcm_lib_default_mmap(substream, vma);
}
-static int acp_dma_prepare(struct snd_pcm_substream *substream)
+static int acp_dma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
@@ -1086,7 +1087,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int acp_dma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret;
@@ -1132,10 +1134,9 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int acp_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
- DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
struct device *parent = component->dev->parent;
@@ -1158,14 +1159,12 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int acp_dma_close(struct snd_pcm_substream *substream)
+static int acp_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u16 bank;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -1216,22 +1215,18 @@ static int acp_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops acp_dma_ops = {
- .open = acp_dma_open,
- .close = acp_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = acp_dma_hw_params,
- .hw_free = acp_dma_hw_free,
- .trigger = acp_dma_trigger,
- .pointer = acp_dma_pointer,
- .mmap = acp_dma_mmap,
- .prepare = acp_dma_prepare,
-};
-
static const struct snd_soc_component_driver acp_asoc_platform = {
- .name = DRV_NAME,
- .ops = &acp_dma_ops,
- .pcm_new = acp_dma_new,
+ .name = DRV_NAME,
+ .open = acp_dma_open,
+ .close = acp_dma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = acp_dma_hw_params,
+ .hw_free = acp_dma_hw_free,
+ .trigger = acp_dma_trigger,
+ .pointer = acp_dma_pointer,
+ .mmap = acp_dma_mmap,
+ .prepare = acp_dma_prepare,
+ .pcm_construct = acp_dma_new,
};
static int acp_audio_probe(struct platform_device *pdev)
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index bc4dfafdfcd1..60709e3ba99d 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -275,16 +275,12 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
rtd->acp3x_base + mmACP_EXTERNAL_INTR_CNTL);
}
-static int acp3x_dma_open(struct snd_pcm_substream *substream)
+static int acp3x_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int ret = 0;
-
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct i2s_dev_data *adata = dev_get_drvdata(component->dev);
-
struct i2s_stream_instance *i2s_data = kzalloc(sizeof(struct i2s_stream_instance),
GFP_KERNEL);
if (!i2s_data)
@@ -334,7 +330,8 @@ static u64 acp_get_byte_count(struct i2s_stream_instance *rtd, int direction)
return byte_count;
}
-static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+static int acp3x_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
int status;
@@ -362,7 +359,8 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
return status;
}
-static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u32 pos = 0;
u32 buffersize = 0;
@@ -379,33 +377,32 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, pos);
}
-static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int acp3x_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
- DRV_NAME);
struct device *parent = component->dev->parent;
snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
parent, MIN_BUFFER, MAX_BUFFER);
return 0;
}
-static int acp3x_dma_hw_free(struct snd_pcm_substream *substream)
+static int acp3x_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int acp3x_dma_mmap(struct snd_pcm_substream *substream,
+static int acp3x_dma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
return snd_pcm_lib_default_mmap(substream, vma);
}
-static int acp3x_dma_close(struct snd_pcm_substream *substream)
+static int acp3x_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
struct i2s_stream_instance *rtd = substream->runtime->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct i2s_dev_data *adata = dev_get_drvdata(component->dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -422,17 +419,6 @@ static int acp3x_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops acp3x_dma_ops = {
- .open = acp3x_dma_open,
- .close = acp3x_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = acp3x_dma_hw_params,
- .hw_free = acp3x_dma_hw_free,
- .pointer = acp3x_dma_pointer,
- .mmap = acp3x_dma_mmap,
-};
-
-
static int acp3x_dai_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
@@ -610,9 +596,15 @@ static struct snd_soc_dai_driver acp3x_i2s_dai_driver = {
};
static const struct snd_soc_component_driver acp3x_i2s_component = {
- .name = DRV_NAME,
- .ops = &acp3x_dma_ops,
- .pcm_new = acp3x_dma_new,
+ .name = DRV_NAME,
+ .open = acp3x_dma_open,
+ .close = acp3x_dma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = acp3x_dma_hw_params,
+ .hw_free = acp3x_dma_hw_free,
+ .pointer = acp3x_dma_pointer,
+ .mmap = acp3x_dma_mmap,
+ .pcm_construct = acp3x_dma_new,
};
static int acp3x_audio_probe(struct platform_device *pdev)
@@ -631,7 +623,7 @@ static int acp3x_audio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
- return -ENODEV;
+ return -ENODEV;
}
adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
index ed095af866db..18a2fd02fffe 100644
--- a/sound/soc/atmel/atmel-pcm-pdc.c
+++ b/sound/soc/atmel/atmel-pcm-pdc.c
@@ -56,15 +56,17 @@ static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
return 0;
}
-static int atmel_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int atmel_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
return remap_pfn_range(vma, vma->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int atmel_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -93,7 +95,8 @@ static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-static void atmel_pcm_free(struct snd_pcm *pcm)
+static void atmel_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -196,8 +199,9 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
/*--------------------------------------------------------------------------*\
* PCM operations
\*--------------------------------------------------------------------------*/
-static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int atmel_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd = runtime->private_data;
@@ -225,7 +229,8 @@ static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int atmel_pcm_hw_free(struct snd_pcm_substream *substream)
+static int atmel_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
@@ -239,7 +244,8 @@ static int atmel_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int atmel_pcm_prepare(struct snd_pcm_substream *substream)
+static int atmel_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
@@ -251,8 +257,8 @@ static int atmel_pcm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int atmel_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int atmel_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *rtd = substream->runtime;
struct atmel_runtime_data *prtd = rtd->private_data;
@@ -317,8 +323,8 @@ static int atmel_pcm_trigger(struct snd_pcm_substream *substream,
return ret;
}
-static snd_pcm_uframes_t atmel_pcm_pointer(
- struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t atmel_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd = runtime->private_data;
@@ -335,7 +341,8 @@ static snd_pcm_uframes_t atmel_pcm_pointer(
return x;
}
-static int atmel_pcm_open(struct snd_pcm_substream *substream)
+static int atmel_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd;
@@ -360,7 +367,8 @@ static int atmel_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int atmel_pcm_close(struct snd_pcm_substream *substream)
+static int atmel_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
@@ -368,22 +376,18 @@ static int atmel_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops atmel_pcm_ops = {
+static const struct snd_soc_component_driver atmel_soc_platform = {
.open = atmel_pcm_open,
.close = atmel_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = atmel_pcm_hw_params,
.hw_free = atmel_pcm_hw_free,
.prepare = atmel_pcm_prepare,
.trigger = atmel_pcm_trigger,
.pointer = atmel_pcm_pointer,
.mmap = atmel_pcm_mmap,
-};
-
-static struct snd_soc_component_driver atmel_soc_platform = {
- .ops = &atmel_pcm_ops,
- .pcm_new = atmel_pcm_new,
- .pcm_free = atmel_pcm_free,
+ .pcm_construct = atmel_pcm_new,
+ .pcm_destruct = atmel_pcm_free,
};
int atmel_pcm_pdc_platform_register(struct device *dev)
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index d56092a5ee11..4553108ec92a 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -182,15 +182,15 @@ out:
return 0;
}
-static inline struct au1xpsc_audio_dmadata *to_dmadata(struct snd_pcm_substream *ss)
+static inline struct au1xpsc_audio_dmadata *to_dmadata(struct snd_pcm_substream *ss,
+ struct snd_soc_component *component)
{
- struct snd_soc_pcm_runtime *rtd = ss->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct au1xpsc_audio_dmadata *pcd = snd_soc_component_get_drvdata(component);
return &pcd[ss->stream];
}
-static int au1xpsc_pcm_hw_params(struct snd_pcm_substream *substream,
+static int au1xpsc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -202,7 +202,7 @@ static int au1xpsc_pcm_hw_params(struct snd_pcm_substream *substream,
goto out;
stype = substream->stream;
- pcd = to_dmadata(substream);
+ pcd = to_dmadata(substream, component);
DBG("runtime->dma_area = 0x%08lx dma_addr_t = 0x%08lx dma_size = %zu "
"runtime->min_align %lu\n",
@@ -232,15 +232,17 @@ out:
return ret;
}
-static int au1xpsc_pcm_hw_free(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
-static int au1xpsc_pcm_prepare(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream);
+ struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream, component);
au1xxx_dbdma_reset(pcd->ddma_chan);
@@ -255,9 +257,10 @@ static int au1xpsc_pcm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int au1xpsc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int au1xpsc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
- u32 c = to_dmadata(substream)->ddma_chan;
+ u32 c = to_dmadata(substream, component)->ddma_chan;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -275,14 +278,17 @@ static int au1xpsc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t
-au1xpsc_pcm_pointer(struct snd_pcm_substream *substream)
+au1xpsc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- return bytes_to_frames(substream->runtime, to_dmadata(substream)->pos);
+ return bytes_to_frames(substream->runtime,
+ to_dmadata(substream, component)->pos);
}
-static int au1xpsc_pcm_open(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream);
+ struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream, component);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int stype = substream->stream, *dmaids;
@@ -296,24 +302,15 @@ static int au1xpsc_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int au1xpsc_pcm_close(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- au1x_pcm_dbdma_free(to_dmadata(substream));
+ au1x_pcm_dbdma_free(to_dmadata(substream, component));
return 0;
}
-static const struct snd_pcm_ops au1xpsc_pcm_ops = {
- .open = au1xpsc_pcm_open,
- .close = au1xpsc_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = au1xpsc_pcm_hw_params,
- .hw_free = au1xpsc_pcm_hw_free,
- .prepare = au1xpsc_pcm_prepare,
- .trigger = au1xpsc_pcm_trigger,
- .pointer = au1xpsc_pcm_pointer,
-};
-
-static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int au1xpsc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -327,8 +324,15 @@ static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
/* au1xpsc audio platform */
static struct snd_soc_component_driver au1xpsc_soc_component = {
.name = DRV_NAME,
- .ops = &au1xpsc_pcm_ops,
- .pcm_new = au1xpsc_pcm_new,
+ .open = au1xpsc_pcm_open,
+ .close = au1xpsc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = au1xpsc_pcm_hw_params,
+ .hw_free = au1xpsc_pcm_hw_free,
+ .prepare = au1xpsc_pcm_prepare,
+ .trigger = au1xpsc_pcm_trigger,
+ .pointer = au1xpsc_pcm_pointer,
+ .pcm_construct = au1xpsc_pcm_new,
};
static int au1xpsc_pcm_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 1e98cc4f9e27..054dfda89d3e 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -174,22 +174,23 @@ static const struct snd_pcm_hardware alchemy_pcm_hardware = {
.fifo_size = 16,
};
-static inline struct alchemy_pcm_ctx *ss_to_ctx(struct snd_pcm_substream *ss)
+static inline struct alchemy_pcm_ctx *ss_to_ctx(struct snd_pcm_substream *ss,
+ struct snd_soc_component *component)
{
- struct snd_soc_pcm_runtime *rtd = ss->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
return snd_soc_component_get_drvdata(component);
}
-static inline struct audio_stream *ss_to_as(struct snd_pcm_substream *ss)
+static inline struct audio_stream *ss_to_as(struct snd_pcm_substream *ss,
+ struct snd_soc_component *component)
{
- struct alchemy_pcm_ctx *ctx = ss_to_ctx(ss);
+ struct alchemy_pcm_ctx *ctx = ss_to_ctx(ss, component);
return &(ctx->stream[ss->stream]);
}
-static int alchemy_pcm_open(struct snd_pcm_substream *substream)
+static int alchemy_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream);
+ struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream, component);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int *dmaids, s = substream->stream;
char *name;
@@ -213,9 +214,10 @@ static int alchemy_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int alchemy_pcm_close(struct snd_pcm_substream *substream)
+static int alchemy_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream);
+ struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream, component);
int stype = substream->stream;
ctx->stream[stype].substream = NULL;
@@ -224,10 +226,11 @@ static int alchemy_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int alchemy_pcm_hw_params(struct snd_pcm_substream *substream,
+static int alchemy_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct audio_stream *stream = ss_to_as(substream);
+ struct audio_stream *stream = ss_to_as(substream, component);
int err;
err = snd_pcm_lib_malloc_pages(substream,
@@ -243,16 +246,18 @@ static int alchemy_pcm_hw_params(struct snd_pcm_substream *substream,
return err;
}
-static int alchemy_pcm_hw_free(struct snd_pcm_substream *substream)
+static int alchemy_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct audio_stream *stream = ss_to_as(substream);
+ struct audio_stream *stream = ss_to_as(substream, component);
au1000_release_dma_link(stream);
return snd_pcm_lib_free_pages(substream);
}
-static int alchemy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int alchemy_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
- struct audio_stream *stream = ss_to_as(substream);
+ struct audio_stream *stream = ss_to_as(substream, component);
int err = 0;
switch (cmd) {
@@ -269,9 +274,10 @@ static int alchemy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return err;
}
-static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_pcm_substream *ss)
+static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
- struct audio_stream *stream = ss_to_as(ss);
+ struct audio_stream *stream = ss_to_as(ss, component);
long location;
location = get_dma_residue(stream->dma);
@@ -281,30 +287,27 @@ static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_pcm_substream *ss)
return bytes_to_frames(ss->runtime, location);
}
-static const struct snd_pcm_ops alchemy_pcm_ops = {
- .open = alchemy_pcm_open,
- .close = alchemy_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = alchemy_pcm_hw_params,
- .hw_free = alchemy_pcm_hw_free,
- .trigger = alchemy_pcm_trigger,
- .pointer = alchemy_pcm_pointer,
-};
-
-static int alchemy_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int alchemy_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 65536, (4096 * 1024) - 1);
+ NULL, 65536, (4096 * 1024) - 1);
return 0;
}
static struct snd_soc_component_driver alchemy_pcm_soc_component = {
.name = DRV_NAME,
- .ops = &alchemy_pcm_ops,
- .pcm_new = alchemy_pcm_new,
+ .open = alchemy_pcm_open,
+ .close = alchemy_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = alchemy_pcm_hw_params,
+ .hw_free = alchemy_pcm_hw_free,
+ .trigger = alchemy_pcm_trigger,
+ .pointer = alchemy_pcm_pointer,
+ .pcm_construct = alchemy_pcm_new,
};
static int alchemy_pcm_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/bcm/cygnus-pcm.c b/sound/soc/bcm/cygnus-pcm.c
index 8966b02844dc..c65408085c1d 100644
--- a/sound/soc/bcm/cygnus-pcm.c
+++ b/sound/soc/bcm/cygnus-pcm.c
@@ -376,7 +376,8 @@ static void disable_intr(struct snd_pcm_substream *substream)
}
-static int cygnus_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int cygnus_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
@@ -577,7 +578,8 @@ static irqreturn_t cygnus_dma_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int cygnus_pcm_open(struct snd_pcm_substream *substream)
+static int cygnus_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -613,7 +615,8 @@ static int cygnus_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int cygnus_pcm_close(struct snd_pcm_substream *substream)
+static int cygnus_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct cygnus_aio_port *aio;
@@ -633,8 +636,9 @@ static int cygnus_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int cygnus_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int cygnus_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -649,7 +653,8 @@ static int cygnus_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int cygnus_pcm_hw_free(struct snd_pcm_substream *substream)
+static int cygnus_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct cygnus_aio_port *aio;
@@ -661,7 +666,8 @@ static int cygnus_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int cygnus_pcm_prepare(struct snd_pcm_substream *substream)
+static int cygnus_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -694,7 +700,8 @@ static int cygnus_pcm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t cygnus_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t cygnus_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct cygnus_aio_port *aio;
unsigned int res = 0, cur = 0, base = 0;
@@ -750,19 +757,8 @@ static int cygnus_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-
-static const struct snd_pcm_ops cygnus_pcm_ops = {
- .open = cygnus_pcm_open,
- .close = cygnus_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = cygnus_pcm_hw_params,
- .hw_free = cygnus_pcm_hw_free,
- .prepare = cygnus_pcm_prepare,
- .trigger = cygnus_pcm_trigger,
- .pointer = cygnus_pcm_pointer,
-};
-
-static void cygnus_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void cygnus_dma_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -788,7 +784,8 @@ static void cygnus_dma_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int cygnus_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -810,7 +807,7 @@ static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
ret = cygnus_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret) {
- cygnus_dma_free_dma_buffers(pcm);
+ cygnus_dma_free_dma_buffers(component, pcm);
return ret;
}
}
@@ -819,9 +816,16 @@ static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
}
static struct snd_soc_component_driver cygnus_soc_platform = {
- .ops = &cygnus_pcm_ops,
- .pcm_new = cygnus_dma_new,
- .pcm_free = cygnus_dma_free_dma_buffers,
+ .open = cygnus_pcm_open,
+ .close = cygnus_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = cygnus_pcm_hw_params,
+ .hw_free = cygnus_pcm_hw_free,
+ .prepare = cygnus_pcm_prepare,
+ .trigger = cygnus_pcm_trigger,
+ .pointer = cygnus_pcm_pointer,
+ .pcm_construct = cygnus_dma_new,
+ .pcm_destruct = cygnus_dma_free_dma_buffers,
};
int cygnus_soc_platform_register(struct device *dev,
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 2333efac758a..8039a8febefa 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -33,13 +33,13 @@ config SND_EP93XX_SOC_AC97
select SND_SOC_AC97_BUS
config SND_EP93XX_SOC_SNAPPERCL15
- tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
- depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
- select SND_EP93XX_SOC_I2S
- select SND_SOC_TLV320AIC23_I2C
- help
- Say Y or M here if you want to add support for I2S audio on the
- Bluewater Systems Snapper CL15 module.
+ tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
+ depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
+ select SND_EP93XX_SOC_I2S
+ select SND_SOC_TLV320AIC23_I2C
+ help
+ Say Y or M here if you want to add support for I2S audio on the
+ Bluewater Systems Snapper CL15 module.
config SND_EP93XX_SOC_SIMONE
tristate "SoC Audio support for Simplemachines Sim.One board"
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 229cc89f8c5a..4abf37b5083f 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -34,6 +34,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ADAU1977_I2C if I2C
select SND_SOC_ADAU1701 if I2C
select SND_SOC_ADAU7002
+ select SND_SOC_ADAU7118_I2C if I2C
+ select SND_SOC_ADAU7118_HW
select SND_SOC_ADS117X
select SND_SOC_AK4104 if SPI_MASTER
select SND_SOC_AK4118 if I2C
@@ -179,6 +181,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
select SND_SOC_STI_SAS
select SND_SOC_TAS2552 if I2C
+ select SND_SOC_TAS2562 if I2C
+ select SND_SOC_TAS2770 if I2C
select SND_SOC_TAS5086 if I2C
select SND_SOC_TAS571X if I2C
select SND_SOC_TAS5720 if I2C
@@ -257,16 +261,16 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM9705 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
select SND_SOC_WM9712 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
select SND_SOC_WM9713 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
- help
- Normally ASoC codec drivers are only built if a machine driver which
- uses them is also built since they are only usable with a machine
- driver. Selecting this option will allow these drivers to be built
- without an explicit machine driver for test and development purposes.
+ help
+ Normally ASoC codec drivers are only built if a machine driver which
+ uses them is also built since they are only usable with a machine
+ driver. Selecting this option will allow these drivers to be built
+ without an explicit machine driver for test and development purposes.
Support for the bus types used to access the codecs to be built must
be selected separately.
- If unsure select "N".
+ If unsure select "N".
config SND_SOC_88PM860X
tristate
@@ -395,6 +399,33 @@ config SND_SOC_ADAU1977_I2C
config SND_SOC_ADAU7002
tristate "Analog Devices ADAU7002 Stereo PDM-to-I2S/TDM Converter"
+config SND_SOC_ADAU7118
+ tristate
+
+config SND_SOC_ADAU7118_HW
+ tristate "Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM Converter - HW Mode"
+ select SND_SOC_ADAU7118
+ help
+ Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
+ Converter. In this mode, the device works in standalone mode which
+ means that there is no bus to comunicate with it. Stereo mode is not
+ supported in this mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-soc-adau7118-hw.
+
+config SND_SOC_ADAU7118_I2C
+ tristate "Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM Converter - I2C"
+ depends on I2C
+ select SND_SOC_ADAU7118
+ select REGMAP_I2C
+ help
+ Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
+ Converter over I2C. This gives full support over the device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-soc-adau7118-i2c.
+
config SND_SOC_ADAV80X
tristate
@@ -478,6 +509,8 @@ config SND_SOC_CQ0093VC
config SND_SOC_CROS_EC_CODEC
tristate "codec driver for ChromeOS EC"
depends on CROS_EC
+ select CRYPTO
+ select CRYPTO_SHA256
help
If you say yes here you will get support for the
ChromeOS Embedded Controller's Audio Codec.
@@ -570,8 +603,8 @@ config SND_SOC_CS42XX8_I2C
# Cirrus Logic CS43130 HiFi DAC
config SND_SOC_CS43130
- tristate "Cirrus Logic CS43130 CODEC"
- depends on I2C
+ tristate "Cirrus Logic CS43130 CODEC"
+ depends on I2C
config SND_SOC_CS4341
tristate "Cirrus Logic CS4341 CODEC"
@@ -643,19 +676,20 @@ config SND_SOC_L3
tristate
config SND_SOC_DA7210
- tristate
+ tristate
config SND_SOC_DA7213
- tristate
+ tristate "Dialog DA7213 CODEC"
+ depends on I2C
config SND_SOC_DA7218
tristate
config SND_SOC_DA7219
- tristate
+ tristate
config SND_SOC_DA732X
- tristate
+ tristate
config SND_SOC_DA9055
tristate
@@ -717,7 +751,7 @@ config SND_SOC_INNO_RK3036
select REGMAP_MMIO
config SND_SOC_ISABELLE
- tristate
+ tristate
config SND_SOC_LM49453
tristate
@@ -988,7 +1022,7 @@ config SND_SOC_RT5640
tristate
config SND_SOC_RT5645
- tristate
+ tristate
config SND_SOC_RT5651
tristate
@@ -1104,6 +1138,14 @@ config SND_SOC_TAS2552
tristate "Texas Instruments TAS2552 Mono Audio amplifier"
depends on I2C
+config SND_SOC_TAS2562
+ tristate "Texas Instruments TAS2562 Mono Audio amplifier"
+ depends on I2C
+
+config SND_SOC_TAS2770
+ tristate "Texas Instruments TAS2770 speaker amplifier"
+ depends on I2C
+
config SND_SOC_TAS5086
tristate "Texas Instruments TAS5086 speaker amplifier"
depends on I2C
@@ -1220,7 +1262,7 @@ config SND_SOC_UDA134X
tristate
config SND_SOC_UDA1380
- tristate
+ tristate
depends on I2C
config SND_SOC_WCD9335
@@ -1348,7 +1390,7 @@ config SND_SOC_WM8904
depends on I2C
config SND_SOC_WM8940
- tristate
+ tristate
config SND_SOC_WM8955
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index c498373dcc5f..ddfd07071925 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -22,6 +22,9 @@ snd-soc-adau1977-objs := adau1977.o
snd-soc-adau1977-spi-objs := adau1977-spi.o
snd-soc-adau1977-i2c-objs := adau1977-i2c.o
snd-soc-adau7002-objs := adau7002.o
+snd-soc-adau7118-objs := adau7118.o
+snd-soc-adau7118-i2c-objs := adau7118-i2c.o
+snd-soc-adau7118-hw-objs := adau7118-hw.o
snd-soc-adav80x-objs := adav80x.o
snd-soc-adav801-objs := adav801.o
snd-soc-adav803-objs := adav803.o
@@ -196,6 +199,7 @@ snd-soc-tas571x-objs := tas571x.o
snd-soc-tas5720-objs := tas5720.o
snd-soc-tas6424-objs := tas6424.o
snd-soc-tda7419-objs := tda7419.o
+snd-soc-tas2770-objs := tas2770.o
snd-soc-tfa9879-objs := tfa9879.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -280,6 +284,7 @@ snd-soc-max98504-objs := max98504.o
snd-soc-simple-amplifier-objs := simple-amplifier.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-tas2552-objs := tas2552.o
+snd-soc-tas2562-objs := tas2562.o
obj-$(CONFIG_SND_SOC_88PM860X) += snd-soc-88pm860x.o
obj-$(CONFIG_SND_SOC_AB8500_CODEC) += snd-soc-ab8500-codec.o
@@ -304,6 +309,9 @@ obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o
obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o
obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o
obj-$(CONFIG_SND_SOC_ADAU7002) += snd-soc-adau7002.o
+obj-$(CONFIG_SND_SOC_ADAU7118) += snd-soc-adau7118.o
+obj-$(CONFIG_SND_SOC_ADAU7118_I2C) += snd-soc-adau7118-i2c.o
+obj-$(CONFIG_SND_SOC_ADAU7118_HW) += snd-soc-adau7118-hw.o
obj-$(CONFIG_SND_SOC_ADAV80X) += snd-soc-adav80x.o
obj-$(CONFIG_SND_SOC_ADAV801) += snd-soc-adav801.o
obj-$(CONFIG_SND_SOC_ADAV803) += snd-soc-adav803.o
@@ -474,11 +482,13 @@ obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o
obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o
obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
+obj-$(CONFIG_SND_SOC_TAS2562) += snd-soc-tas2562.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
obj-$(CONFIG_SND_SOC_TAS6424) += snd-soc-tas6424.o
obj-$(CONFIG_SND_SOC_TDA7419) += snd-soc-tda7419.o
+obj-$(CONFIG_SND_SOC_TAS2770) += snd-soc-tas2770.o
obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index 977f5a63be3f..5ca9b744b7d8 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -28,6 +28,10 @@
#define ADAU1761_REC_MIXER_RIGHT1 0x400d
#define ADAU1761_LEFT_DIFF_INPUT_VOL 0x400e
#define ADAU1761_RIGHT_DIFF_INPUT_VOL 0x400f
+#define ADAU1761_ALC_CTRL0 0x4011
+#define ADAU1761_ALC_CTRL1 0x4012
+#define ADAU1761_ALC_CTRL2 0x4013
+#define ADAU1761_ALC_CTRL3 0x4014
#define ADAU1761_PLAY_LR_MIXER_LEFT 0x4020
#define ADAU1761_PLAY_MIXER_LEFT0 0x401c
#define ADAU1761_PLAY_MIXER_LEFT1 0x401d
@@ -71,6 +75,10 @@ static const struct reg_default adau1761_reg_defaults[] = {
{ ADAU1761_REC_MIXER_RIGHT0, 0x00 },
{ ADAU1761_REC_MIXER_RIGHT1, 0x00 },
{ ADAU1761_LEFT_DIFF_INPUT_VOL, 0x00 },
+ { ADAU1761_ALC_CTRL0, 0x00 },
+ { ADAU1761_ALC_CTRL1, 0x00 },
+ { ADAU1761_ALC_CTRL2, 0x00 },
+ { ADAU1761_ALC_CTRL3, 0x00 },
{ ADAU1761_RIGHT_DIFF_INPUT_VOL, 0x00 },
{ ADAU1761_PLAY_LR_MIXER_LEFT, 0x00 },
{ ADAU1761_PLAY_MIXER_LEFT0, 0x00 },
@@ -121,6 +129,10 @@ static const DECLARE_TLV_DB_SCALE(adau1761_sidetone_tlv, -1800, 300, 1);
static const DECLARE_TLV_DB_SCALE(adau1761_boost_tlv, -600, 600, 1);
static const DECLARE_TLV_DB_SCALE(adau1761_pga_boost_tlv, -2000, 2000, 1);
+static const DECLARE_TLV_DB_SCALE(adau1761_alc_max_gain_tlv, -1200, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adau1761_alc_target_tlv, -2850, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adau1761_alc_ng_threshold_tlv, -7650, 150, 0);
+
static const unsigned int adau1761_bias_select_values[] = {
0, 2, 3,
};
@@ -147,6 +159,103 @@ static SOC_VALUE_ENUM_SINGLE_DECL(adau1761_capture_bias_enum,
ADAU17X1_REC_POWER_MGMT, 1, 0x3, adau1761_bias_select_text,
adau1761_bias_select_values);
+static const unsigned int adau1761_pga_slew_time_values[] = {
+ 3, 0, 1, 2,
+};
+
+static const char * const adau1761_pga_slew_time_text[] = {
+ "Off",
+ "24 ms",
+ "48 ms",
+ "96 ms",
+};
+
+static const char * const adau1761_alc_function_text[] = {
+ "Off",
+ "Right",
+ "Left",
+ "Stereo",
+ "DSP control",
+};
+
+static const char * const adau1761_alc_hold_time_text[] = {
+ "2.67 ms",
+ "5.34 ms",
+ "10.68 ms",
+ "21.36 ms",
+ "42.72 ms",
+ "85.44 ms",
+ "170.88 ms",
+ "341.76 ms",
+ "683.52 ms",
+ "1367 ms",
+ "2734.1 ms",
+ "5468.2 ms",
+ "10936 ms",
+ "21873 ms",
+ "43745 ms",
+ "87491 ms",
+};
+
+static const char * const adau1761_alc_attack_time_text[] = {
+ "6 ms",
+ "12 ms",
+ "24 ms",
+ "48 ms",
+ "96 ms",
+ "192 ms",
+ "384 ms",
+ "768 ms",
+ "1540 ms",
+ "3070 ms",
+ "6140 ms",
+ "12290 ms",
+ "24580 ms",
+ "49150 ms",
+ "98300 ms",
+ "196610 ms",
+};
+
+static const char * const adau1761_alc_decay_time_text[] = {
+ "24 ms",
+ "48 ms",
+ "96 ms",
+ "192 ms",
+ "384 ms",
+ "768 ms",
+ "15400 ms",
+ "30700 ms",
+ "61400 ms",
+ "12290 ms",
+ "24580 ms",
+ "49150 ms",
+ "98300 ms",
+ "196610 ms",
+ "393220 ms",
+ "786430 ms",
+};
+
+static const char * const adau1761_alc_ng_type_text[] = {
+ "Hold",
+ "Mute",
+ "Fade",
+ "Fade + Mute",
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adau1761_pga_slew_time_enum,
+ ADAU1761_ALC_CTRL0, 6, 0x3, adau1761_pga_slew_time_text,
+ adau1761_pga_slew_time_values);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_function_enum,
+ ADAU1761_ALC_CTRL0, 0, adau1761_alc_function_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_hold_time_enum,
+ ADAU1761_ALC_CTRL1, 4, adau1761_alc_hold_time_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_attack_time_enum,
+ ADAU1761_ALC_CTRL2, 4, adau1761_alc_attack_time_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_decay_time_enum,
+ ADAU1761_ALC_CTRL2, 0, adau1761_alc_decay_time_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_ng_type_enum,
+ ADAU1761_ALC_CTRL3, 6, adau1761_alc_ng_type_text);
+
static const struct snd_kcontrol_new adau1761_jack_detect_controls[] = {
SOC_SINGLE("Speaker Auto-mute Switch", ADAU1761_DIGMIC_JACKDETECT,
4, 1, 0),
@@ -161,6 +270,22 @@ static const struct snd_kcontrol_new adau1761_differential_mode_controls[] = {
SOC_DOUBLE_R_TLV("PGA Boost Capture Volume", ADAU1761_REC_MIXER_LEFT1,
ADAU1761_REC_MIXER_RIGHT1, 3, 2, 0, adau1761_pga_boost_tlv),
+
+ SOC_ENUM("PGA Capture Slew Time", adau1761_pga_slew_time_enum),
+
+ SOC_SINGLE_TLV("ALC Capture Max Gain Volume", ADAU1761_ALC_CTRL0,
+ 3, 7, 0, adau1761_alc_max_gain_tlv),
+ SOC_ENUM("ALC Capture Function", adau1761_alc_function_enum),
+ SOC_ENUM("ALC Capture Hold Time", adau1761_alc_hold_time_enum),
+ SOC_SINGLE_TLV("ALC Capture Target Volume", ADAU1761_ALC_CTRL1,
+ 0, 15, 0, adau1761_alc_target_tlv),
+ SOC_ENUM("ALC Capture Attack Time", adau1761_alc_decay_time_enum),
+ SOC_ENUM("ALC Capture Decay Time", adau1761_alc_attack_time_enum),
+ SOC_ENUM("ALC Capture Noise Gate Type", adau1761_alc_ng_type_enum),
+ SOC_SINGLE("ALC Capture Noise Gate Switch",
+ ADAU1761_ALC_CTRL3, 5, 1, 0),
+ SOC_SINGLE_TLV("ALC Capture Noise Gate Threshold Volume",
+ ADAU1761_ALC_CTRL3, 0, 31, 0, adau1761_alc_ng_threshold_tlv),
};
static const struct snd_kcontrol_new adau1761_single_mode_controls[] = {
@@ -632,6 +757,10 @@ static bool adau1761_readable_register(struct device *dev, unsigned int reg)
case ADAU1761_DEJITTER:
case ADAU1761_CLK_ENABLE0:
case ADAU1761_CLK_ENABLE1:
+ case ADAU1761_ALC_CTRL0:
+ case ADAU1761_ALC_CTRL1:
+ case ADAU1761_ALC_CTRL2:
+ case ADAU1761_ALC_CTRL3:
return true;
default:
break;
diff --git a/sound/soc/codecs/adau7118-hw.c b/sound/soc/codecs/adau7118-hw.c
new file mode 100644
index 000000000000..45a5d2dcc0f2
--- /dev/null
+++ b/sound/soc/codecs/adau7118-hw.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Analog Devices ADAU7118 8 channel PDM-to-I2S/TDM Converter Standalone Hw
+// driver
+//
+// Copyright 2019 Analog Devices Inc.
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "adau7118.h"
+
+static int adau7118_probe_hw(struct platform_device *pdev)
+{
+ return adau7118_probe(&pdev->dev, NULL, true);
+}
+
+static const struct of_device_id adau7118_of_match[] = {
+ { .compatible = "adi,adau7118" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adau7118_of_match);
+
+static const struct platform_device_id adau7118_id[] = {
+ { .name = "adau7118" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, adau7118_id);
+
+static struct platform_driver adau7118_driver_hw = {
+ .driver = {
+ .name = "adau7118",
+ .of_match_table = adau7118_of_match,
+ },
+ .probe = adau7118_probe_hw,
+ .id_table = adau7118_id,
+};
+module_platform_driver(adau7118_driver_hw);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADAU7118 8 channel PDM-to-I2S/TDM Converter driver for standalone hw mode");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau7118-i2c.c b/sound/soc/codecs/adau7118-i2c.c
new file mode 100644
index 000000000000..a8211362fe82
--- /dev/null
+++ b/sound/soc/codecs/adau7118-i2c.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Analog Devices ADAU7118 8 channel PDM-to-I2S/TDM Converter driver over I2C
+//
+// Copyright 2019 Analog Devices Inc.
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adau7118.h"
+
+static const struct reg_default adau7118_reg_defaults[] = {
+ { ADAU7118_REG_VENDOR_ID, 0x41 },
+ { ADAU7118_REG_DEVICE_ID1, 0x71 },
+ { ADAU7118_REG_DEVICE_ID2, 0x18 },
+ { ADAU7118_REG_REVISION_ID, 0x00 },
+ { ADAU7118_REG_ENABLES, 0x3F },
+ { ADAU7118_REG_DEC_RATIO_CLK_MAP, 0xC0 },
+ { ADAU7118_REG_HPF_CONTROL, 0xD0 },
+ { ADAU7118_REG_SPT_CTRL1, 0x41 },
+ { ADAU7118_REG_SPT_CTRL2, 0x00 },
+ { ADAU7118_REG_SPT_CX(0), 0x01 },
+ { ADAU7118_REG_SPT_CX(1), 0x11 },
+ { ADAU7118_REG_SPT_CX(2), 0x21 },
+ { ADAU7118_REG_SPT_CX(3), 0x31 },
+ { ADAU7118_REG_SPT_CX(4), 0x41 },
+ { ADAU7118_REG_SPT_CX(5), 0x51 },
+ { ADAU7118_REG_SPT_CX(6), 0x61 },
+ { ADAU7118_REG_SPT_CX(7), 0x71 },
+ { ADAU7118_REG_DRIVE_STRENGTH, 0x2a },
+ { ADAU7118_REG_RESET, 0x00 },
+};
+
+static const struct regmap_config adau7118_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_defaults = adau7118_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adau7118_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = ADAU7118_REG_RESET,
+};
+
+static int adau7118_probe_i2c(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_i2c(i2c, &adau7118_regmap_config);
+ if (IS_ERR(map)) {
+ dev_err(&i2c->dev, "Failed to init regmap %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ return adau7118_probe(&i2c->dev, map, false);
+}
+
+static const struct of_device_id adau7118_of_match[] = {
+ { .compatible = "adi,adau7118" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adau7118_of_match);
+
+static const struct i2c_device_id adau7118_id[] = {
+ {"adau7118", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adau7118_id);
+
+static struct i2c_driver adau7118_driver = {
+ .driver = {
+ .name = "adau7118",
+ .of_match_table = adau7118_of_match,
+ },
+ .probe = adau7118_probe_i2c,
+ .id_table = adau7118_id,
+};
+module_i2c_driver(adau7118_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADAU7118 8 channel PDM-to-I2S/TDM Converter driver over I2C");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c
new file mode 100644
index 000000000000..841229dcbca1
--- /dev/null
+++ b/sound/soc/codecs/adau7118.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Analog Devices ADAU7118 8 channel PDM-to-I2S/TDM Converter driver
+//
+// Copyright 2019 Analog Devices Inc.
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "adau7118.h"
+
+#define ADAU7118_DEC_RATIO_MASK GENMASK(1, 0)
+#define ADAU7118_DEC_RATIO(x) FIELD_PREP(ADAU7118_DEC_RATIO_MASK, x)
+#define ADAU7118_CLK_MAP_MASK GENMASK(7, 4)
+#define ADAU7118_SLOT_WIDTH_MASK GENMASK(5, 4)
+#define ADAU7118_SLOT_WIDTH(x) FIELD_PREP(ADAU7118_SLOT_WIDTH_MASK, x)
+#define ADAU7118_TRISTATE_MASK BIT(6)
+#define ADAU7118_TRISTATE(x) FIELD_PREP(ADAU7118_TRISTATE_MASK, x)
+#define ADAU7118_DATA_FMT_MASK GENMASK(3, 1)
+#define ADAU7118_DATA_FMT(x) FIELD_PREP(ADAU7118_DATA_FMT_MASK, x)
+#define ADAU7118_SAI_MODE_MASK BIT(0)
+#define ADAU7118_SAI_MODE(x) FIELD_PREP(ADAU7118_SAI_MODE_MASK, x)
+#define ADAU7118_LRCLK_BCLK_POL_MASK GENMASK(1, 0)
+#define ADAU7118_LRCLK_BCLK_POL(x) \
+ FIELD_PREP(ADAU7118_LRCLK_BCLK_POL_MASK, x)
+#define ADAU7118_SPT_SLOT_MASK GENMASK(7, 4)
+#define ADAU7118_SPT_SLOT(x) FIELD_PREP(ADAU7118_SPT_SLOT_MASK, x)
+#define ADAU7118_FULL_SOFT_R_MASK BIT(1)
+#define ADAU7118_FULL_SOFT_R(x) FIELD_PREP(ADAU7118_FULL_SOFT_R_MASK, x)
+
+struct adau7118_data {
+ struct regmap *map;
+ struct device *dev;
+ struct regulator *iovdd;
+ struct regulator *dvdd;
+ u32 slot_width;
+ u32 slots;
+ bool hw_mode;
+ bool right_j;
+};
+
+/* Input Enable */
+static const struct snd_kcontrol_new adau7118_dapm_pdm_control[4] = {
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 0, 1, 0),
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 1, 1, 0),
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 2, 1, 0),
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 3, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget adau7118_widgets_sw[] = {
+ /* Input Enable Switches */
+ SND_SOC_DAPM_SWITCH("PDM0", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[0]),
+ SND_SOC_DAPM_SWITCH("PDM1", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[1]),
+ SND_SOC_DAPM_SWITCH("PDM2", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[2]),
+ SND_SOC_DAPM_SWITCH("PDM3", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[3]),
+
+ /* PDM Clocks */
+ SND_SOC_DAPM_SUPPLY("PDM_CLK0", ADAU7118_REG_ENABLES, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PDM_CLK1", ADAU7118_REG_ENABLES, 5, 0, NULL, 0),
+
+ /* Output channels */
+ SND_SOC_DAPM_AIF_OUT("AIF1TX1", "Capture", 0, ADAU7118_REG_SPT_CX(0),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX2", "Capture", 0, ADAU7118_REG_SPT_CX(1),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX3", "Capture", 0, ADAU7118_REG_SPT_CX(2),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX4", "Capture", 0, ADAU7118_REG_SPT_CX(3),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX5", "Capture", 0, ADAU7118_REG_SPT_CX(4),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX6", "Capture", 0, ADAU7118_REG_SPT_CX(5),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX7", "Capture", 0, ADAU7118_REG_SPT_CX(6),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX8", "Capture", 0, ADAU7118_REG_SPT_CX(7),
+ 0, 0),
+};
+
+static const struct snd_soc_dapm_route adau7118_routes_sw[] = {
+ { "PDM0", "Capture Switch", "PDM_DAT0" },
+ { "PDM1", "Capture Switch", "PDM_DAT1" },
+ { "PDM2", "Capture Switch", "PDM_DAT2" },
+ { "PDM3", "Capture Switch", "PDM_DAT3" },
+ { "AIF1TX1", NULL, "PDM0" },
+ { "AIF1TX2", NULL, "PDM0" },
+ { "AIF1TX3", NULL, "PDM1" },
+ { "AIF1TX4", NULL, "PDM1" },
+ { "AIF1TX5", NULL, "PDM2" },
+ { "AIF1TX6", NULL, "PDM2" },
+ { "AIF1TX7", NULL, "PDM3" },
+ { "AIF1TX8", NULL, "PDM3" },
+ { "Capture", NULL, "PDM_CLK0" },
+ { "Capture", NULL, "PDM_CLK1" },
+};
+
+static const struct snd_soc_dapm_widget adau7118_widgets_hw[] = {
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route adau7118_routes_hw[] = {
+ { "AIF1TX", NULL, "PDM_DAT0" },
+ { "AIF1TX", NULL, "PDM_DAT1" },
+ { "AIF1TX", NULL, "PDM_DAT2" },
+ { "AIF1TX", NULL, "PDM_DAT3" },
+};
+
+static const struct snd_soc_dapm_widget adau7118_widgets[] = {
+ SND_SOC_DAPM_INPUT("PDM_DAT0"),
+ SND_SOC_DAPM_INPUT("PDM_DAT1"),
+ SND_SOC_DAPM_INPUT("PDM_DAT2"),
+ SND_SOC_DAPM_INPUT("PDM_DAT3"),
+};
+
+static int adau7118_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int chan, ret;
+
+ dev_dbg(st->dev, "Set channel map, %d", tx_num);
+
+ for (chan = 0; chan < tx_num; chan++) {
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CX(chan),
+ ADAU7118_SPT_SLOT_MASK,
+ ADAU7118_SPT_SLOT(tx_slot[chan]));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adau7118_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret = 0;
+ u32 regval;
+
+ dev_dbg(st->dev, "Set format, fmt:%d\n", fmt);
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_DATA_FMT_MASK,
+ ADAU7118_DATA_FMT(0));
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_DATA_FMT_MASK,
+ ADAU7118_DATA_FMT(1));
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ st->right_j = true;
+ break;
+ default:
+ dev_err(st->dev, "Invalid format %d",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ regval = ADAU7118_LRCLK_BCLK_POL(0);
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ regval = ADAU7118_LRCLK_BCLK_POL(2);
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ regval = ADAU7118_LRCLK_BCLK_POL(1);
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ regval = ADAU7118_LRCLK_BCLK_POL(3);
+ break;
+ default:
+ dev_err(st->dev, "Invalid Inv mask %d",
+ fmt & SND_SOC_DAIFMT_INV_MASK);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL2,
+ ADAU7118_LRCLK_BCLK_POL_MASK,
+ regval);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int adau7118_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ dev_dbg(st->dev, "Set tristate, %d\n", tristate);
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_TRISTATE_MASK,
+ ADAU7118_TRISTATE(tristate));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int adau7118_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots,
+ int slot_width)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret = 0;
+ u32 regval;
+
+ dev_dbg(st->dev, "Set tdm, slots:%d width:%d\n", slots, slot_width);
+
+ switch (slot_width) {
+ case 32:
+ regval = ADAU7118_SLOT_WIDTH(0);
+ break;
+ case 24:
+ regval = ADAU7118_SLOT_WIDTH(2);
+ break;
+ case 16:
+ regval = ADAU7118_SLOT_WIDTH(1);
+ break;
+ default:
+ dev_err(st->dev, "Invalid slot width:%d\n", slot_width);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_SLOT_WIDTH_MASK, regval);
+ if (ret < 0)
+ return ret;
+
+ st->slot_width = slot_width;
+ st->slots = slots;
+
+ return 0;
+}
+
+static int adau7118_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ u32 data_width = params_width(params), slots_width;
+ int ret;
+ u32 regval;
+
+ if (!st->slots) {
+ /* set stereo mode */
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_SAI_MODE_MASK,
+ ADAU7118_SAI_MODE(0));
+ if (ret < 0)
+ return ret;
+
+ slots_width = 32;
+ } else {
+ slots_width = st->slot_width;
+ }
+
+ if (data_width > slots_width) {
+ dev_err(st->dev, "Invalid data_width:%d, slots_width:%d",
+ data_width, slots_width);
+ return -EINVAL;
+ }
+
+ if (st->right_j) {
+ switch (slots_width - data_width) {
+ case 8:
+ /* delay bclck by 8 */
+ regval = ADAU7118_DATA_FMT(2);
+ break;
+ case 12:
+ /* delay bclck by 12 */
+ regval = ADAU7118_DATA_FMT(3);
+ break;
+ case 16:
+ /* delay bclck by 16 */
+ regval = ADAU7118_DATA_FMT(4);
+ break;
+ default:
+ dev_err(st->dev,
+ "Cannot set right_j setting, slot_w:%d, data_w:%d\n",
+ slots_width, data_width);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_DATA_FMT_MASK,
+ regval);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adau7118_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct adau7118_data *st = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ dev_dbg(st->dev, "Set bias level %d\n", level);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_component_get_bias_level(component) ==
+ SND_SOC_BIAS_OFF) {
+ /* power on */
+ ret = regulator_enable(st->iovdd);
+ if (ret)
+ return ret;
+
+ /* there's no timing constraints before enabling dvdd */
+ ret = regulator_enable(st->dvdd);
+ if (ret) {
+ regulator_disable(st->iovdd);
+ return ret;
+ }
+
+ if (st->hw_mode)
+ return 0;
+
+ regcache_cache_only(st->map, false);
+ /* sync cache */
+ ret = snd_soc_component_cache_sync(component);
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ /* power off */
+ ret = regulator_disable(st->dvdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(st->iovdd);
+ if (ret)
+ return ret;
+
+ if (st->hw_mode)
+ return 0;
+
+ /* cache only */
+ regcache_mark_dirty(st->map);
+ regcache_cache_only(st->map, true);
+
+ break;
+ }
+
+ return ret;
+}
+
+static int adau7118_component_probe(struct snd_soc_component *component)
+{
+ struct adau7118_data *st = snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ int ret = 0;
+
+ if (st->hw_mode) {
+ ret = snd_soc_dapm_new_controls(dapm, adau7118_widgets_hw,
+ ARRAY_SIZE(adau7118_widgets_hw));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(dapm, adau7118_routes_hw,
+ ARRAY_SIZE(adau7118_routes_hw));
+ } else {
+ snd_soc_component_init_regmap(component, st->map);
+ ret = snd_soc_dapm_new_controls(dapm, adau7118_widgets_sw,
+ ARRAY_SIZE(adau7118_widgets_sw));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(dapm, adau7118_routes_sw,
+ ARRAY_SIZE(adau7118_routes_sw));
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dai_ops adau7118_ops = {
+ .hw_params = adau7118_hw_params,
+ .set_channel_map = adau7118_set_channel_map,
+ .set_fmt = adau7118_set_fmt,
+ .set_tdm_slot = adau7118_set_tdm_slot,
+ .set_tristate = adau7118_set_tristate,
+};
+
+static struct snd_soc_dai_driver adau7118_dai = {
+ .name = "adau7118-hifi-capture",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S20_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 4000,
+ .rate_max = 192000,
+ .sig_bits = 24,
+ },
+};
+
+static const struct snd_soc_component_driver adau7118_component_driver = {
+ .probe = adau7118_component_probe,
+ .set_bias_level = adau7118_set_bias_level,
+ .dapm_widgets = adau7118_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau7118_widgets),
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static void adau7118_regulator_disable(void *data)
+{
+ struct adau7118_data *st = data;
+ int ret;
+ /*
+ * If we fail to disable DVDD, don't bother in trying IOVDD. We
+ * actually don't want to be left in the situation where DVDD
+ * is enabled and IOVDD is disabled.
+ */
+ ret = regulator_disable(st->dvdd);
+ if (ret)
+ return;
+
+ regulator_disable(st->iovdd);
+}
+
+static int adau7118_regulator_setup(struct adau7118_data *st)
+{
+ st->iovdd = devm_regulator_get(st->dev, "iovdd");
+ if (IS_ERR(st->iovdd)) {
+ dev_err(st->dev, "Could not get iovdd: %ld\n",
+ PTR_ERR(st->iovdd));
+ return PTR_ERR(st->iovdd);
+ }
+
+ st->dvdd = devm_regulator_get(st->dev, "dvdd");
+ if (IS_ERR(st->dvdd)) {
+ dev_err(st->dev, "Could not get dvdd: %ld\n",
+ PTR_ERR(st->dvdd));
+ return PTR_ERR(st->dvdd);
+ }
+ /* just assume the device is in reset */
+ if (!st->hw_mode) {
+ regcache_mark_dirty(st->map);
+ regcache_cache_only(st->map, true);
+ }
+
+ return devm_add_action_or_reset(st->dev, adau7118_regulator_disable,
+ st);
+}
+
+static int adau7118_parset_dt(const struct adau7118_data *st)
+{
+ int ret;
+ u32 dec_ratio = 0;
+ /* 4 inputs */
+ u32 clk_map[4], regval;
+
+ if (st->hw_mode)
+ return 0;
+
+ ret = device_property_read_u32(st->dev, "adi,decimation-ratio",
+ &dec_ratio);
+ if (!ret) {
+ switch (dec_ratio) {
+ case 64:
+ regval = ADAU7118_DEC_RATIO(0);
+ break;
+ case 32:
+ regval = ADAU7118_DEC_RATIO(1);
+ break;
+ case 16:
+ regval = ADAU7118_DEC_RATIO(2);
+ break;
+ default:
+ dev_err(st->dev, "Invalid dec ratio: %u", dec_ratio);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->map,
+ ADAU7118_REG_DEC_RATIO_CLK_MAP,
+ ADAU7118_DEC_RATIO_MASK, regval);
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_u32_array(st->dev, "adi,pdm-clk-map",
+ clk_map, ARRAY_SIZE(clk_map));
+ if (!ret) {
+ int pdm;
+ u32 _clk_map = 0;
+
+ for (pdm = 0; pdm < ARRAY_SIZE(clk_map); pdm++)
+ _clk_map |= (clk_map[pdm] << (pdm + 4));
+
+ ret = regmap_update_bits(st->map,
+ ADAU7118_REG_DEC_RATIO_CLK_MAP,
+ ADAU7118_CLK_MAP_MASK, _clk_map);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int adau7118_probe(struct device *dev, struct regmap *map, bool hw_mode)
+{
+ struct adau7118_data *st;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->dev = dev;
+ st->hw_mode = hw_mode;
+ dev_set_drvdata(dev, st);
+
+ if (!hw_mode) {
+ st->map = map;
+ adau7118_dai.ops = &adau7118_ops;
+ /*
+ * Perform a full soft reset. This will set all register's
+ * with their reset values.
+ */
+ ret = regmap_update_bits(map, ADAU7118_REG_RESET,
+ ADAU7118_FULL_SOFT_R_MASK,
+ ADAU7118_FULL_SOFT_R(1));
+ if (ret)
+ return ret;
+ }
+
+ ret = adau7118_parset_dt(st);
+ if (ret)
+ return ret;
+
+ ret = adau7118_regulator_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_component(dev,
+ &adau7118_component_driver,
+ &adau7118_dai, 1);
+}
+EXPORT_SYMBOL_GPL(adau7118_probe);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADAU7118 8 channel PDM-to-I2S/TDM Converter driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau7118.h b/sound/soc/codecs/adau7118.h
new file mode 100644
index 000000000000..c65679a4dff1
--- /dev/null
+++ b/sound/soc/codecs/adau7118.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ADAU7118_H
+#define _LINUX_ADAU7118_H
+
+struct regmap;
+struct device;
+
+/* register map */
+#define ADAU7118_REG_VENDOR_ID 0x00
+#define ADAU7118_REG_DEVICE_ID1 0x01
+#define ADAU7118_REG_DEVICE_ID2 0x02
+#define ADAU7118_REG_REVISION_ID 0x03
+#define ADAU7118_REG_ENABLES 0x04
+#define ADAU7118_REG_DEC_RATIO_CLK_MAP 0x05
+#define ADAU7118_REG_HPF_CONTROL 0x06
+#define ADAU7118_REG_SPT_CTRL1 0x07
+#define ADAU7118_REG_SPT_CTRL2 0x08
+#define ADAU7118_REG_SPT_CX(num) (0x09 + (num))
+#define ADAU7118_REG_DRIVE_STRENGTH 0x11
+#define ADAU7118_REG_RESET 0x12
+
+int adau7118_probe(struct device *dev, struct regmap *map, bool hw_mode);
+
+#endif
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
index 3c1bd24a1057..7b17f39a6a10 100644
--- a/sound/soc/codecs/cros_ec_codec.c
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -1,15 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for ChromeOS Embedded Controller codec.
+ * Copyright 2019 Google, Inc.
+ *
+ * ChromeOS Embedded Controller codec driver.
*
* This driver uses the cros-ec interface to communicate with the ChromeOS
* EC for audio function.
*/
+#include <crypto/hash.h>
+#include <crypto/sha.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
@@ -18,92 +26,279 @@
#include <sound/soc.h>
#include <sound/tlv.h>
-#define DRV_NAME "cros-ec-codec"
-
-/**
- * struct cros_ec_codec_data - ChromeOS EC codec driver data.
- * @dev: Device structure used in sysfs.
- * @ec_device: cros_ec_device structure to talk to the physical device.
- * @component: Pointer to the component.
- * @max_dmic_gain: Maximum gain in dB supported by EC codec.
- */
-struct cros_ec_codec_data {
+struct cros_ec_codec_priv {
struct device *dev;
struct cros_ec_device *ec_device;
- struct snd_soc_component *component;
- unsigned int max_dmic_gain;
+
+ /* common */
+ uint32_t ec_capabilities;
+
+ uint64_t ec_shm_addr;
+ uint32_t ec_shm_len;
+
+ uint64_t ap_shm_phys_addr;
+ uint32_t ap_shm_len;
+ uint64_t ap_shm_addr;
+ uint64_t ap_shm_last_alloc;
+
+ /* DMIC */
+ atomic_t dmic_probed;
+
+ /* WoV */
+ bool wov_enabled;
+ uint8_t *wov_audio_shm_p;
+ uint32_t wov_audio_shm_len;
+ uint8_t wov_audio_shm_type;
+ uint8_t *wov_lang_shm_p;
+ uint32_t wov_lang_shm_len;
+ uint8_t wov_lang_shm_type;
+
+ struct mutex wov_dma_lock;
+ uint8_t wov_buf[64000];
+ uint32_t wov_rp, wov_wp;
+ size_t wov_dma_offset;
+ bool wov_burst_read;
+ struct snd_pcm_substream *wov_substream;
+ struct delayed_work wov_copy_work;
+ struct notifier_block wov_notifier;
};
-static const DECLARE_TLV_DB_SCALE(ec_mic_gain_tlv, 0, 100, 0);
+static int ec_codec_capable(struct cros_ec_codec_priv *priv, uint8_t cap)
+{
+ return priv->ec_capabilities & BIT(cap);
+}
-static int ec_command_get_gain(struct snd_soc_component *component,
- struct ec_param_codec_i2s *param,
- struct ec_codec_i2s_gain *resp)
+static int send_ec_host_command(struct cros_ec_device *ec_dev, uint32_t cmd,
+ uint8_t *out, size_t outsize,
+ uint8_t *in, size_t insize)
{
- struct cros_ec_codec_data *codec_data =
- snd_soc_component_get_drvdata(component);
- struct cros_ec_device *ec_device = codec_data->ec_device;
- u8 buffer[sizeof(struct cros_ec_command) +
- max(sizeof(struct ec_param_codec_i2s),
- sizeof(struct ec_codec_i2s_gain))];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
int ret;
+ struct cros_ec_command *msg;
+
+ msg = kmalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
msg->version = 0;
- msg->command = EC_CMD_CODEC_I2S;
- msg->outsize = sizeof(struct ec_param_codec_i2s);
- msg->insize = sizeof(struct ec_codec_i2s_gain);
+ msg->command = cmd;
+ msg->outsize = outsize;
+ msg->insize = insize;
- memcpy(msg->data, param, msg->outsize);
+ if (outsize)
+ memcpy(msg->data, out, outsize);
- ret = cros_ec_cmd_xfer_status(ec_device, msg);
- if (ret > 0)
- memcpy(resp, msg->data, msg->insize);
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ if (insize)
+ memcpy(in, msg->data, insize);
+ ret = 0;
+error:
+ kfree(msg);
return ret;
}
-/*
- * Wrapper for EC command without response.
- */
-static int ec_command_no_resp(struct snd_soc_component *component,
- struct ec_param_codec_i2s *param)
+static int calculate_sha256(struct cros_ec_codec_priv *priv,
+ uint8_t *buf, uint32_t size, uint8_t *digest)
{
- struct cros_ec_codec_data *codec_data =
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash("sha256", CRYPTO_ALG_TYPE_SHASH, 0);
+ if (IS_ERR(tfm)) {
+ dev_err(priv->dev, "can't alloc shash\n");
+ return PTR_ERR(tfm);
+ }
+
+ {
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ desc->tfm = tfm;
+
+ crypto_shash_digest(desc, buf, size, digest);
+ shash_desc_zero(desc);
+ }
+
+ crypto_free_shash(tfm);
+
+#ifdef DEBUG
+ {
+ char digest_str[65];
+
+ bin2hex(digest_str, digest, 32);
+ digest_str[64] = 0;
+ dev_dbg(priv->dev, "hash=%s\n", digest_str);
+ }
+#endif
+
+ return 0;
+}
+
+static int dmic_get_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv =
snd_soc_component_get_drvdata(component);
- struct cros_ec_device *ec_device = codec_data->ec_device;
- u8 buffer[sizeof(struct cros_ec_command) +
- sizeof(struct ec_param_codec_i2s)];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+ struct ec_param_ec_codec_dmic p;
+ struct ec_response_ec_codec_dmic_get_gain_idx r;
+ int ret;
- msg->version = 0;
- msg->command = EC_CMD_CODEC_I2S;
- msg->outsize = sizeof(struct ec_param_codec_i2s);
- msg->insize = 0;
+ p.cmd = EC_CODEC_DMIC_GET_GAIN_IDX;
+ p.get_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_0;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret < 0)
+ return ret;
+ ucontrol->value.integer.value[0] = r.gain;
- memcpy(msg->data, param, msg->outsize);
+ p.cmd = EC_CODEC_DMIC_GET_GAIN_IDX;
+ p.get_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_1;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret < 0)
+ return ret;
+ ucontrol->value.integer.value[1] = r.gain;
- return cros_ec_cmd_xfer_status(ec_device, msg);
+ return 0;
}
-static int set_i2s_config(struct snd_soc_component *component,
- enum ec_i2s_config i2s_config)
+static int dmic_put_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- struct ec_param_codec_i2s param;
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct soc_mixer_control *control =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int max_dmic_gain = control->max;
+ int left = ucontrol->value.integer.value[0];
+ int right = ucontrol->value.integer.value[1];
+ struct ec_param_ec_codec_dmic p;
+ int ret;
+
+ if (left > max_dmic_gain || right > max_dmic_gain)
+ return -EINVAL;
- dev_dbg(component->dev, "%s set I2S format to %u\n", __func__,
- i2s_config);
+ dev_dbg(component->dev, "set mic gain to %u, %u\n", left, right);
- param.cmd = EC_CODEC_I2S_SET_CONFIG;
- param.i2s_config = i2s_config;
+ p.cmd = EC_CODEC_DMIC_SET_GAIN_IDX;
+ p.set_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_0;
+ p.set_gain_idx_param.gain = left;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ p.cmd = EC_CODEC_DMIC_SET_GAIN_IDX;
+ p.set_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_1;
+ p.set_gain_idx_param.gain = right;
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static const DECLARE_TLV_DB_SCALE(dmic_gain_tlv, 0, 100, 0);
+
+enum {
+ DMIC_CTL_GAIN = 0,
+};
+
+static struct snd_kcontrol_new dmic_controls[] = {
+ [DMIC_CTL_GAIN] =
+ SOC_DOUBLE_EXT_TLV("EC Mic Gain", SND_SOC_NOPM, SND_SOC_NOPM,
+ 0, 0, 0, dmic_get_gain, dmic_put_gain,
+ dmic_gain_tlv),
+};
+
+static int dmic_probe(struct snd_soc_component *component)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct device *dev = priv->dev;
+ struct soc_mixer_control *control;
+ struct ec_param_ec_codec_dmic p;
+ struct ec_response_ec_codec_dmic_get_max_gain r;
+ int ret;
+
+ if (!atomic_add_unless(&priv->dmic_probed, 1, 1))
+ return 0;
+
+ p.cmd = EC_CODEC_DMIC_GET_MAX_GAIN;
+
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret < 0) {
+ dev_warn(dev, "get_max_gain() unsupported\n");
+ return 0;
+ }
+
+ dev_dbg(dev, "max gain = %d\n", r.max_gain);
+
+ control = (struct soc_mixer_control *)
+ dmic_controls[DMIC_CTL_GAIN].private_value;
+ control->max = r.max_gain;
+ control->platform_max = r.max_gain;
- return ec_command_no_resp(component, &param);
+ return snd_soc_add_component_controls(component,
+ &dmic_controls[DMIC_CTL_GAIN], 1);
}
-static int cros_ec_i2s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+static int i2s_rx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
- enum ec_i2s_config i2s_config;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_i2s_rx p;
+ enum ec_codec_i2s_rx_sample_depth depth;
+ int ret;
+
+ if (params_rate(params) != 48000)
+ return -EINVAL;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ depth = EC_CODEC_I2S_RX_SAMPLE_DEPTH_16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ depth = EC_CODEC_I2S_RX_SAMPLE_DEPTH_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(component->dev, "set depth to %u\n", depth);
+
+ p.cmd = EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH;
+ p.set_sample_depth_param.depth = depth;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(component->dev, "set bclk to %u\n",
+ snd_soc_params_to_bclk(params));
+
+ p.cmd = EC_CODEC_I2S_RX_SET_BCLK;
+ p.set_bclk_param.bclk = snd_soc_params_to_bclk(params);
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static int i2s_rx_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_i2s_rx p;
+ enum ec_codec_i2s_rx_daifmt daifmt;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
@@ -121,300 +316,727 @@ static int cros_ec_i2s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- i2s_config = EC_DAI_FMT_I2S;
+ daifmt = EC_CODEC_I2S_RX_DAIFMT_I2S;
break;
-
case SND_SOC_DAIFMT_RIGHT_J:
- i2s_config = EC_DAI_FMT_RIGHT_J;
+ daifmt = EC_CODEC_I2S_RX_DAIFMT_RIGHT_J;
break;
-
case SND_SOC_DAIFMT_LEFT_J:
- i2s_config = EC_DAI_FMT_LEFT_J;
+ daifmt = EC_CODEC_I2S_RX_DAIFMT_LEFT_J;
break;
+ default:
+ return -EINVAL;
+ }
- case SND_SOC_DAIFMT_DSP_A:
- i2s_config = EC_DAI_FMT_PCM_A;
- break;
+ dev_dbg(component->dev, "set format to %u\n", daifmt);
- case SND_SOC_DAIFMT_DSP_B:
- i2s_config = EC_DAI_FMT_PCM_B;
- break;
+ p.cmd = EC_CODEC_I2S_RX_SET_DAIFMT;
+ p.set_daifmt_param.daifmt = daifmt;
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static const struct snd_soc_dai_ops i2s_rx_dai_ops = {
+ .hw_params = i2s_rx_hw_params,
+ .set_fmt = i2s_rx_set_fmt,
+};
+static int i2s_rx_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_i2s_rx p;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dev_dbg(component->dev, "enable I2S RX\n");
+ p.cmd = EC_CODEC_I2S_RX_ENABLE;
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ dev_dbg(component->dev, "disable I2S RX\n");
+ p.cmd = EC_CODEC_I2S_RX_DISABLE;
+ break;
default:
- return -EINVAL;
+ return 0;
}
- return set_i2s_config(component, i2s_config);
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static struct snd_soc_dapm_widget i2s_rx_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("DMIC"),
+ SND_SOC_DAPM_SUPPLY("I2S RX Enable", SND_SOC_NOPM, 0, 0, i2s_rx_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_AIF_OUT("I2S RX", "I2S Capture", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static struct snd_soc_dapm_route i2s_rx_dapm_routes[] = {
+ {"I2S RX", NULL, "DMIC"},
+ {"I2S RX", NULL, "I2S RX Enable"},
+};
+
+static struct snd_soc_dai_driver i2s_rx_dai_driver = {
+ .name = "EC Codec I2S RX",
+ .capture = {
+ .stream_name = "I2S Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &i2s_rx_dai_ops,
+};
+
+static int i2s_rx_probe(struct snd_soc_component *component)
+{
+ return dmic_probe(component);
}
-static int set_i2s_sample_depth(struct snd_soc_component *component,
- enum ec_sample_depth_value depth)
+static const struct snd_soc_component_driver i2s_rx_component_driver = {
+ .probe = i2s_rx_probe,
+ .dapm_widgets = i2s_rx_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(i2s_rx_dapm_widgets),
+ .dapm_routes = i2s_rx_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(i2s_rx_dapm_routes),
+};
+
+static void *wov_map_shm(struct cros_ec_codec_priv *priv,
+ uint8_t shm_id, uint32_t *len, uint8_t *type)
{
- struct ec_param_codec_i2s param;
+ struct ec_param_ec_codec p;
+ struct ec_response_ec_codec_get_shm_addr r;
+ uint32_t req, offset;
+
+ p.cmd = EC_CODEC_GET_SHM_ADDR;
+ p.get_shm_addr_param.shm_id = shm_id;
+ if (send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r)) < 0) {
+ dev_err(priv->dev, "failed to EC_CODEC_GET_SHM_ADDR\n");
+ return NULL;
+ }
- dev_dbg(component->dev, "%s set depth to %u\n", __func__, depth);
+ dev_dbg(priv->dev, "phys_addr=%#llx, len=%#x\n", r.phys_addr, r.len);
+
+ *len = r.len;
+ *type = r.type;
+
+ switch (r.type) {
+ case EC_CODEC_SHM_TYPE_EC_RAM:
+ return (void __force *)devm_ioremap_wc(priv->dev,
+ r.phys_addr + priv->ec_shm_addr, r.len);
+ case EC_CODEC_SHM_TYPE_SYSTEM_RAM:
+ if (r.phys_addr) {
+ dev_err(priv->dev, "unknown status\n");
+ return NULL;
+ }
+
+ req = round_up(r.len, PAGE_SIZE);
+ dev_dbg(priv->dev, "round up from %u to %u\n", r.len, req);
+
+ if (priv->ap_shm_last_alloc + req >
+ priv->ap_shm_phys_addr + priv->ap_shm_len) {
+ dev_err(priv->dev, "insufficient space for AP SHM\n");
+ return NULL;
+ }
+
+ dev_dbg(priv->dev, "alloc AP SHM addr=%#llx, len=%#x\n",
+ priv->ap_shm_last_alloc, req);
+
+ p.cmd = EC_CODEC_SET_SHM_ADDR;
+ p.set_shm_addr_param.phys_addr = priv->ap_shm_last_alloc;
+ p.set_shm_addr_param.len = req;
+ p.set_shm_addr_param.shm_id = shm_id;
+ if (send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC,
+ (uint8_t *)&p, sizeof(p),
+ NULL, 0) < 0) {
+ dev_err(priv->dev, "failed to EC_CODEC_SET_SHM_ADDR\n");
+ return NULL;
+ }
+
+ /*
+ * Note: EC codec only requests for `r.len' but we allocate
+ * round up PAGE_SIZE `req'.
+ */
+ offset = priv->ap_shm_last_alloc - priv->ap_shm_phys_addr;
+ priv->ap_shm_last_alloc += req;
+
+ return (void *)(uintptr_t)(priv->ap_shm_addr + offset);
+ default:
+ return NULL;
+ }
+}
- param.cmd = EC_CODEC_SET_SAMPLE_DEPTH;
- param.depth = depth;
+static bool wov_queue_full(struct cros_ec_codec_priv *priv)
+{
+ return ((priv->wov_wp + 1) % sizeof(priv->wov_buf)) == priv->wov_rp;
+}
- return ec_command_no_resp(component, &param);
+static size_t wov_queue_size(struct cros_ec_codec_priv *priv)
+{
+ if (priv->wov_wp >= priv->wov_rp)
+ return priv->wov_wp - priv->wov_rp;
+ else
+ return sizeof(priv->wov_buf) - priv->wov_rp + priv->wov_wp;
}
-static int set_i2s_bclk(struct snd_soc_component *component, uint32_t bclk)
+static void wov_queue_dequeue(struct cros_ec_codec_priv *priv, size_t len)
{
- struct ec_param_codec_i2s param;
+ struct snd_pcm_runtime *runtime = priv->wov_substream->runtime;
+ size_t req;
+
+ while (len) {
+ req = min(len, runtime->dma_bytes - priv->wov_dma_offset);
+ if (priv->wov_wp >= priv->wov_rp)
+ req = min(req, (size_t)priv->wov_wp - priv->wov_rp);
+ else
+ req = min(req, sizeof(priv->wov_buf) - priv->wov_rp);
- dev_dbg(component->dev, "%s set i2s bclk to %u\n", __func__, bclk);
+ memcpy(runtime->dma_area + priv->wov_dma_offset,
+ priv->wov_buf + priv->wov_rp, req);
- param.cmd = EC_CODEC_I2S_SET_BCLK;
- param.bclk = bclk;
+ priv->wov_dma_offset += req;
+ if (priv->wov_dma_offset == runtime->dma_bytes)
+ priv->wov_dma_offset = 0;
- return ec_command_no_resp(component, &param);
+ priv->wov_rp += req;
+ if (priv->wov_rp == sizeof(priv->wov_buf))
+ priv->wov_rp = 0;
+
+ len -= req;
+ }
+
+ snd_pcm_period_elapsed(priv->wov_substream);
}
-static int cros_ec_i2s_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
+static void wov_queue_try_dequeue(struct cros_ec_codec_priv *priv)
{
- struct snd_soc_component *component = dai->component;
- unsigned int rate, bclk;
- int ret;
+ size_t period_bytes = snd_pcm_lib_period_bytes(priv->wov_substream);
- rate = params_rate(params);
- if (rate != 48000)
- return -EINVAL;
+ while (period_bytes && wov_queue_size(priv) >= period_bytes) {
+ wov_queue_dequeue(priv, period_bytes);
+ period_bytes = snd_pcm_lib_period_bytes(priv->wov_substream);
+ }
+}
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- ret = set_i2s_sample_depth(component, EC_CODEC_SAMPLE_DEPTH_16);
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- ret = set_i2s_sample_depth(component, EC_CODEC_SAMPLE_DEPTH_24);
- break;
- default:
- return -EINVAL;
+static void wov_queue_enqueue(struct cros_ec_codec_priv *priv,
+ uint8_t *addr, size_t len, bool iomem)
+{
+ size_t req;
+
+ while (len) {
+ if (wov_queue_full(priv)) {
+ wov_queue_try_dequeue(priv);
+
+ if (wov_queue_full(priv)) {
+ dev_err(priv->dev, "overrun detected\n");
+ return;
+ }
+ }
+
+ if (priv->wov_wp >= priv->wov_rp)
+ req = sizeof(priv->wov_buf) - priv->wov_wp;
+ else
+ /* Note: waste 1-byte to differentiate full and empty */
+ req = priv->wov_rp - priv->wov_wp - 1;
+ req = min(req, len);
+
+ if (iomem)
+ memcpy_fromio(priv->wov_buf + priv->wov_wp,
+ (void __force __iomem *)addr, req);
+ else
+ memcpy(priv->wov_buf + priv->wov_wp, addr, req);
+
+ priv->wov_wp += req;
+ if (priv->wov_wp == sizeof(priv->wov_buf))
+ priv->wov_wp = 0;
+
+ addr += req;
+ len -= req;
}
- if (ret < 0)
+
+ wov_queue_try_dequeue(priv);
+}
+
+static int wov_read_audio_shm(struct cros_ec_codec_priv *priv)
+{
+ struct ec_param_ec_codec_wov p;
+ struct ec_response_ec_codec_wov_read_audio_shm r;
+ int ret;
+
+ p.cmd = EC_CODEC_WOV_READ_AUDIO_SHM;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret) {
+ dev_err(priv->dev, "failed to EC_CODEC_WOV_READ_AUDIO_SHM\n");
return ret;
+ }
- bclk = snd_soc_params_to_bclk(params);
- return set_i2s_bclk(component, bclk);
+ if (!r.len)
+ dev_dbg(priv->dev, "no data, sleep\n");
+ else
+ wov_queue_enqueue(priv, priv->wov_audio_shm_p + r.offset, r.len,
+ priv->wov_audio_shm_type == EC_CODEC_SHM_TYPE_EC_RAM);
+ return -EAGAIN;
}
-static const struct snd_soc_dai_ops cros_ec_i2s_dai_ops = {
- .hw_params = cros_ec_i2s_hw_params,
- .set_fmt = cros_ec_i2s_set_dai_fmt,
-};
+static int wov_read_audio(struct cros_ec_codec_priv *priv)
+{
+ struct ec_param_ec_codec_wov p;
+ struct ec_response_ec_codec_wov_read_audio r;
+ int remain = priv->wov_burst_read ? 16000 : 320;
+ int ret;
-static struct snd_soc_dai_driver cros_ec_dai[] = {
- {
- .name = "cros_ec_codec I2S",
- .id = 0,
- .capture = {
- .stream_name = "I2S Capture",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
- },
- .ops = &cros_ec_i2s_dai_ops,
+ while (remain >= 0) {
+ p.cmd = EC_CODEC_WOV_READ_AUDIO;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to EC_CODEC_WOV_READ_AUDIO\n");
+ return ret;
+ }
+
+ if (!r.len) {
+ dev_dbg(priv->dev, "no data, sleep\n");
+ priv->wov_burst_read = false;
+ break;
+ }
+
+ wov_queue_enqueue(priv, r.buf, r.len, false);
+ remain -= r.len;
}
-};
-static int get_ec_mic_gain(struct snd_soc_component *component,
- u8 *left, u8 *right)
+ return -EAGAIN;
+}
+
+static void wov_copy_work(struct work_struct *w)
{
- struct ec_param_codec_i2s param;
- struct ec_codec_i2s_gain resp;
+ struct cros_ec_codec_priv *priv =
+ container_of(w, struct cros_ec_codec_priv, wov_copy_work.work);
int ret;
- param.cmd = EC_CODEC_GET_GAIN;
+ mutex_lock(&priv->wov_dma_lock);
+ if (!priv->wov_substream) {
+ dev_warn(priv->dev, "no pcm substream\n");
+ goto leave;
+ }
- ret = ec_command_get_gain(component, &param, &resp);
- if (ret < 0)
- return ret;
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_AUDIO_SHM))
+ ret = wov_read_audio_shm(priv);
+ else
+ ret = wov_read_audio(priv);
+
+ if (ret == -EAGAIN)
+ schedule_delayed_work(&priv->wov_copy_work,
+ msecs_to_jiffies(10));
+ else if (ret)
+ dev_err(priv->dev, "failed to read audio data\n");
+leave:
+ mutex_unlock(&priv->wov_dma_lock);
+}
- *left = resp.left;
- *right = resp.right;
+static int wov_enable_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv = snd_soc_component_get_drvdata(c);
+ ucontrol->value.integer.value[0] = priv->wov_enabled;
return 0;
}
-static int mic_gain_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int wov_enable_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- u8 left, right;
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv = snd_soc_component_get_drvdata(c);
+ int enabled = ucontrol->value.integer.value[0];
+ struct ec_param_ec_codec_wov p;
int ret;
- ret = get_ec_mic_gain(component, &left, &right);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[0] = left;
- ucontrol->value.integer.value[1] = right;
+ if (priv->wov_enabled != enabled) {
+ if (enabled)
+ p.cmd = EC_CODEC_WOV_ENABLE;
+ else
+ p.cmd = EC_CODEC_WOV_DISABLE;
+
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to %s wov\n",
+ enabled ? "enable" : "disable");
+ return ret;
+ }
+
+ priv->wov_enabled = enabled;
+ }
return 0;
}
-static int set_ec_mic_gain(struct snd_soc_component *component,
- u8 left, u8 right)
+static int wov_set_lang_shm(struct cros_ec_codec_priv *priv,
+ uint8_t *buf, size_t size, uint8_t *digest)
{
- struct ec_param_codec_i2s param;
+ struct ec_param_ec_codec_wov p;
+ struct ec_param_ec_codec_wov_set_lang_shm *pp = &p.set_lang_shm_param;
+ int ret;
- dev_dbg(component->dev, "%s set mic gain to %u, %u\n",
- __func__, left, right);
+ if (size > priv->wov_lang_shm_len) {
+ dev_err(priv->dev, "no enough SHM size: %d\n",
+ priv->wov_lang_shm_len);
+ return -EIO;
+ }
- param.cmd = EC_CODEC_SET_GAIN;
- param.gain.left = left;
- param.gain.right = right;
+ switch (priv->wov_lang_shm_type) {
+ case EC_CODEC_SHM_TYPE_EC_RAM:
+ memcpy_toio((void __force __iomem *)priv->wov_lang_shm_p,
+ buf, size);
+ memset_io((void __force __iomem *)priv->wov_lang_shm_p + size,
+ 0, priv->wov_lang_shm_len - size);
+ break;
+ case EC_CODEC_SHM_TYPE_SYSTEM_RAM:
+ memcpy(priv->wov_lang_shm_p, buf, size);
+ memset(priv->wov_lang_shm_p + size, 0,
+ priv->wov_lang_shm_len - size);
- return ec_command_no_resp(component, &param);
+ /* make sure write to memory before calling host command */
+ wmb();
+ break;
+ }
+
+ p.cmd = EC_CODEC_WOV_SET_LANG_SHM;
+ memcpy(pp->hash, digest, SHA256_DIGEST_SIZE);
+ pp->total_len = size;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to EC_CODEC_WOV_SET_LANG_SHM\n");
+ return ret;
+ }
+
+ return 0;
}
-static int mic_gain_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int wov_set_lang(struct cros_ec_codec_priv *priv,
+ uint8_t *buf, size_t size, uint8_t *digest)
{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- struct cros_ec_codec_data *codec_data =
- snd_soc_component_get_drvdata(component);
- int left = ucontrol->value.integer.value[0];
- int right = ucontrol->value.integer.value[1];
- unsigned int max_dmic_gain = codec_data->max_dmic_gain;
+ struct ec_param_ec_codec_wov p;
+ struct ec_param_ec_codec_wov_set_lang *pp = &p.set_lang_param;
+ size_t i, req;
+ int ret;
- if (left > max_dmic_gain || right > max_dmic_gain)
- return -EINVAL;
+ for (i = 0; i < size; i += req) {
+ req = min(size - i, ARRAY_SIZE(pp->buf));
+
+ p.cmd = EC_CODEC_WOV_SET_LANG;
+ memcpy(pp->hash, digest, SHA256_DIGEST_SIZE);
+ pp->total_len = size;
+ pp->offset = i;
+ memcpy(pp->buf, buf + i, req);
+ pp->len = req;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to EC_CODEC_WOV_SET_LANG\n");
+ return ret;
+ }
+ }
- return set_ec_mic_gain(component, (u8)left, (u8)right);
+ return 0;
}
-static struct snd_kcontrol_new mic_gain_control =
- SOC_DOUBLE_EXT_TLV("EC Mic Gain", SND_SOC_NOPM, SND_SOC_NOPM, 0, 0, 0,
- mic_gain_get, mic_gain_put, ec_mic_gain_tlv);
-
-static int enable_i2s(struct snd_soc_component *component, int enable)
+static int wov_hotword_model_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *bytes,
+ unsigned int size)
{
- struct ec_param_codec_i2s param;
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_wov p;
+ struct ec_response_ec_codec_wov_get_lang r;
+ uint8_t digest[SHA256_DIGEST_SIZE];
+ uint8_t *buf;
+ int ret;
+
+ /* Skips the TLV header. */
+ bytes += 2;
+ size -= 8;
+
+ dev_dbg(priv->dev, "%s: size=%d\n", __func__, size);
+
+ buf = memdup_user(bytes, size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = calculate_sha256(priv, buf, size, digest);
+ if (ret)
+ goto leave;
+
+ p.cmd = EC_CODEC_WOV_GET_LANG;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret)
+ goto leave;
- dev_dbg(component->dev, "%s set i2s to %u\n", __func__, enable);
+ if (memcmp(digest, r.hash, SHA256_DIGEST_SIZE) == 0) {
+ dev_dbg(priv->dev, "not updated");
+ goto leave;
+ }
- param.cmd = EC_CODEC_I2S_ENABLE;
- param.i2s_enable = enable;
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_LANG_SHM))
+ ret = wov_set_lang_shm(priv, buf, size, digest);
+ else
+ ret = wov_set_lang(priv, buf, size, digest);
- return ec_command_no_resp(component, &param);
+leave:
+ kfree(buf);
+ return ret;
}
-static int cros_ec_i2s_enable_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+static struct snd_kcontrol_new wov_controls[] = {
+ SOC_SINGLE_BOOL_EXT("Wake-on-Voice Switch", 0,
+ wov_enable_get, wov_enable_put),
+ SND_SOC_BYTES_TLV("Hotword Model", 0x11000, NULL,
+ wov_hotword_model_put),
+};
+
+static struct snd_soc_dai_driver wov_dai_driver = {
+ .name = "Wake on Voice",
+ .capture = {
+ .stream_name = "WoV Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static int wov_host_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend, void *notify)
{
- struct snd_soc_component *component =
- snd_soc_dapm_to_component(w->dapm);
+ struct cros_ec_codec_priv *priv =
+ container_of(nb, struct cros_ec_codec_priv, wov_notifier);
+ u32 host_event;
+
+ dev_dbg(priv->dev, "%s\n", __func__);
+
+ host_event = cros_ec_get_host_event(priv->ec_device);
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_WOV)) {
+ schedule_delayed_work(&priv->wov_copy_work, 0);
+ return NOTIFY_OK;
+ } else {
+ return NOTIFY_DONE;
+ }
+}
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- dev_dbg(component->dev,
- "%s got SND_SOC_DAPM_PRE_PMU event\n", __func__);
- return enable_i2s(component, 1);
+static int wov_probe(struct snd_soc_component *component)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ int ret;
- case SND_SOC_DAPM_PRE_PMD:
- dev_dbg(component->dev,
- "%s got SND_SOC_DAPM_PRE_PMD event\n", __func__);
- return enable_i2s(component, 0);
+ mutex_init(&priv->wov_dma_lock);
+ INIT_DELAYED_WORK(&priv->wov_copy_work, wov_copy_work);
+
+ priv->wov_notifier.notifier_call = wov_host_event;
+ ret = blocking_notifier_chain_register(
+ &priv->ec_device->event_notifier, &priv->wov_notifier);
+ if (ret)
+ return ret;
+
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_LANG_SHM)) {
+ priv->wov_lang_shm_p = wov_map_shm(priv,
+ EC_CODEC_SHM_ID_WOV_LANG,
+ &priv->wov_lang_shm_len,
+ &priv->wov_lang_shm_type);
+ if (!priv->wov_lang_shm_p)
+ return -EFAULT;
}
- return 0;
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_AUDIO_SHM)) {
+ priv->wov_audio_shm_p = wov_map_shm(priv,
+ EC_CODEC_SHM_ID_WOV_AUDIO,
+ &priv->wov_audio_shm_len,
+ &priv->wov_audio_shm_type);
+ if (!priv->wov_audio_shm_p)
+ return -EFAULT;
+ }
+
+ return dmic_probe(component);
}
-/*
- * The goal of this DAPM route is to turn on/off I2S using EC
- * host command when capture stream is started/stopped.
- */
-static const struct snd_soc_dapm_widget cros_ec_codec_dapm_widgets[] = {
- SND_SOC_DAPM_INPUT("DMIC"),
+static void wov_remove(struct snd_soc_component *component)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
- /*
- * Control EC to enable/disable I2S.
- */
- SND_SOC_DAPM_SUPPLY("I2S Enable", SND_SOC_NOPM,
- 0, 0, cros_ec_i2s_enable_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ blocking_notifier_chain_unregister(
+ &priv->ec_device->event_notifier, &priv->wov_notifier);
+}
- SND_SOC_DAPM_AIF_OUT("I2STX", "I2S Capture", 0, SND_SOC_NOPM, 0, 0),
-};
+static int wov_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ static const struct snd_pcm_hardware hw_param = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_16000,
+ .channels_min = 1,
+ .channels_max = 1,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = 0x20000 / 8,
+ .periods_min = 8,
+ .periods_max = 8,
+ .buffer_bytes_max = 0x20000,
+ };
+
+ return snd_soc_set_runtime_hwparams(substream, &hw_param);
+}
-static const struct snd_soc_dapm_route cros_ec_codec_dapm_routes[] = {
- { "I2STX", NULL, "DMIC" },
- { "I2STX", NULL, "I2S Enable" },
-};
+static int wov_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
-/*
- * Read maximum gain from device property and set it to mixer control.
- */
-static int cros_ec_set_gain_range(struct device *dev)
+ mutex_lock(&priv->wov_dma_lock);
+ priv->wov_substream = substream;
+ priv->wov_rp = priv->wov_wp = 0;
+ priv->wov_dma_offset = 0;
+ priv->wov_burst_read = true;
+ mutex_unlock(&priv->wov_dma_lock);
+
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+}
+
+static int wov_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct soc_mixer_control *control;
- struct cros_ec_codec_data *codec_data = dev_get_drvdata(dev);
- int rc;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
- rc = device_property_read_u32(dev, "max-dmic-gain",
- &codec_data->max_dmic_gain);
- if (rc)
- return rc;
+ mutex_lock(&priv->wov_dma_lock);
+ wov_queue_dequeue(priv, wov_queue_size(priv));
+ priv->wov_substream = NULL;
+ mutex_unlock(&priv->wov_dma_lock);
- control = (struct soc_mixer_control *)
- mic_gain_control.private_value;
- control->max = codec_data->max_dmic_gain;
- control->platform_max = codec_data->max_dmic_gain;
+ cancel_delayed_work_sync(&priv->wov_copy_work);
- return 0;
+ return snd_pcm_lib_free_pages(substream);
}
-static int cros_ec_codec_probe(struct snd_soc_component *component)
+static snd_pcm_uframes_t wov_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- int rc;
-
- struct cros_ec_codec_data *codec_data =
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cros_ec_codec_priv *priv =
snd_soc_component_get_drvdata(component);
- rc = cros_ec_set_gain_range(codec_data->dev);
- if (rc)
- return rc;
+ return bytes_to_frames(runtime, priv->wov_dma_offset);
+}
- return snd_soc_add_component_controls(component, &mic_gain_control, 1);
+static int wov_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ return 0;
}
-static const struct snd_soc_component_driver cros_ec_component_driver = {
- .probe = cros_ec_codec_probe,
- .dapm_widgets = cros_ec_codec_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cros_ec_codec_dapm_widgets),
- .dapm_routes = cros_ec_codec_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cros_ec_codec_dapm_routes),
+static const struct snd_soc_component_driver wov_component_driver = {
+ .probe = wov_probe,
+ .remove = wov_remove,
+ .controls = wov_controls,
+ .num_controls = ARRAY_SIZE(wov_controls),
+ .open = wov_pcm_open,
+ .hw_params = wov_pcm_hw_params,
+ .hw_free = wov_pcm_hw_free,
+ .pointer = wov_pcm_pointer,
+ .pcm_construct = wov_pcm_new,
};
-/*
- * Platform device and platform driver fro cros-ec-codec.
- */
-static int cros_ec_codec_platform_probe(struct platform_device *pd)
+static int cros_ec_codec_platform_probe(struct platform_device *pdev)
{
- struct device *dev = &pd->dev;
- struct cros_ec_device *ec_device = dev_get_drvdata(pd->dev.parent);
- struct cros_ec_codec_data *codec_data;
+ struct device *dev = &pdev->dev;
+ struct cros_ec_device *ec_device = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_codec_priv *priv;
+ struct ec_param_ec_codec p;
+ struct ec_response_ec_codec_get_capabilities r;
+ int ret;
+#ifdef CONFIG_OF
+ struct device_node *node;
+ struct resource res;
+ u64 ec_shm_size;
+ const __be32 *regaddr_p;
+#endif
- codec_data = devm_kzalloc(dev, sizeof(struct cros_ec_codec_data),
- GFP_KERNEL);
- if (!codec_data)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- codec_data->dev = dev;
- codec_data->ec_device = ec_device;
+#ifdef CONFIG_OF
+ regaddr_p = of_get_address(dev->of_node, 0, &ec_shm_size, NULL);
+ if (regaddr_p) {
+ priv->ec_shm_addr = of_read_number(regaddr_p, 2);
+ priv->ec_shm_len = ec_shm_size;
- platform_set_drvdata(pd, codec_data);
+ dev_dbg(dev, "ec_shm_addr=%#llx len=%#x\n",
+ priv->ec_shm_addr, priv->ec_shm_len);
+ }
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &res);
+ if (!ret) {
+ priv->ap_shm_phys_addr = res.start;
+ priv->ap_shm_len = resource_size(&res);
+ priv->ap_shm_addr =
+ (uint64_t)(uintptr_t)devm_ioremap_wc(
+ dev, priv->ap_shm_phys_addr,
+ priv->ap_shm_len);
+ priv->ap_shm_last_alloc = priv->ap_shm_phys_addr;
+
+ dev_dbg(dev, "ap_shm_phys_addr=%#llx len=%#x\n",
+ priv->ap_shm_phys_addr, priv->ap_shm_len);
+ }
+ }
+#endif
+
+ priv->dev = dev;
+ priv->ec_device = ec_device;
+ atomic_set(&priv->dmic_probed, 0);
+
+ p.cmd = EC_CODEC_GET_CAPABILITIES;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret) {
+ dev_err(dev, "failed to EC_CODEC_GET_CAPABILITIES\n");
+ return ret;
+ }
+ priv->ec_capabilities = r.capabilities;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_snd_soc_register_component(dev, &i2s_rx_component_driver,
+ &i2s_rx_dai_driver, 1);
+ if (ret)
+ return ret;
- return devm_snd_soc_register_component(dev, &cros_ec_component_driver,
- cros_ec_dai, ARRAY_SIZE(cros_ec_dai));
+ return devm_snd_soc_register_component(dev, &wov_component_driver,
+ &wov_dai_driver, 1);
}
#ifdef CONFIG_OF
@@ -427,7 +1049,7 @@ MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match);
static struct platform_driver cros_ec_codec_platform_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "cros-ec-codec",
.of_match_table = of_match_ptr(cros_ec_codec_of_match),
},
.probe = cros_ec_codec_platform_probe,
@@ -438,4 +1060,4 @@ module_platform_driver(cros_ec_codec_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC codec driver");
MODULE_AUTHOR("Cheng-Yi Chiang <cychiang@chromium.org>");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:cros-ec-codec");
diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c
index 1c1ba7bea4d8..2ad00ed21bec 100644
--- a/sound/soc/codecs/cx2072x.c
+++ b/sound/soc/codecs/cx2072x.c
@@ -1507,7 +1507,7 @@ static int cx2072x_probe(struct snd_soc_component *codec)
regmap_multi_reg_write(cx2072x->regmap, cx2072x_reg_init,
ARRAY_SIZE(cx2072x_reg_init));
- /* configre PortC as input device */
+ /* configure PortC as input device */
regmap_update_bits(cx2072x->regmap, CX2072X_PORTC_PIN_CTRL,
0x20, 0x20);
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index 4570f662fb48..6803d39e09a5 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -14,13 +14,11 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/hdaudio_ext.h>
+#include <sound/hda_i915.h>
#include <sound/hda_codec.h>
#include <sound/hda_register.h>
-#include "hdac_hda.h"
-#define HDAC_ANALOG_DAI_ID 0
-#define HDAC_DIGITAL_DAI_ID 1
-#define HDAC_ALT_ANALOG_DAI_ID 2
+#include "hdac_hda.h"
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_U8 | \
@@ -32,6 +30,11 @@
SNDRV_PCM_FMTBIT_U32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+#define STUB_HDMI_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
static int hdac_hda_dai_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
@@ -121,7 +124,46 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = {
.formats = STUB_FORMATS,
.sig_bits = 24,
},
-}
+},
+{
+ .id = HDAC_HDMI_0_DAI_ID,
+ .name = "intel-hdmi-hifi1",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "hifi1",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rates = STUB_HDMI_RATES,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_HDMI_1_DAI_ID,
+ .name = "intel-hdmi-hifi2",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "hifi2",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rates = STUB_HDMI_RATES,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_HDMI_2_DAI_ID,
+ .name = "intel-hdmi-hifi3",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "hifi3",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rates = STUB_HDMI_RATES,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
};
@@ -135,10 +177,11 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
hda_pvt = snd_soc_component_get_drvdata(component);
pcm = &hda_pvt->pcm[dai->id];
+
if (tx_mask)
- pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
+ pcm->stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
else
- pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
+ pcm->stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
return 0;
}
@@ -278,6 +321,12 @@ static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
struct hda_pcm *cpcm;
const char *pcm_name;
+ /*
+ * map DAI ID to the closest matching PCM name, using the naming
+ * scheme used by hda-codec snd_hda_gen_build_pcms() and for
+ * HDMI in hda_codec patch_hdmi.c)
+ */
+
switch (dai->id) {
case HDAC_ANALOG_DAI_ID:
pcm_name = "Analog";
@@ -288,13 +337,22 @@ static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
case HDAC_ALT_ANALOG_DAI_ID:
pcm_name = "Alt Analog";
break;
+ case HDAC_HDMI_0_DAI_ID:
+ pcm_name = "HDMI 0";
+ break;
+ case HDAC_HDMI_1_DAI_ID:
+ pcm_name = "HDMI 1";
+ break;
+ case HDAC_HDMI_2_DAI_ID:
+ pcm_name = "HDMI 2";
+ break;
default:
dev_err(&hcodec->core.dev, "invalid dai id %d\n", dai->id);
return NULL;
}
list_for_each_entry(cpcm, &hcodec->pcm_list_head, list) {
- if (strpbrk(cpcm->name, pcm_name))
+ if (strstr(cpcm->name, pcm_name))
return cpcm;
}
@@ -302,6 +360,18 @@ static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
return NULL;
}
+static bool is_hdmi_codec(struct hda_codec *hcodec)
+{
+ struct hda_pcm *cpcm;
+
+ list_for_each_entry(cpcm, &hcodec->pcm_list_head, list) {
+ if (cpcm->pcm_type == HDA_PCM_TYPE_HDMI)
+ return true;
+ }
+
+ return false;
+}
+
static int hdac_hda_codec_probe(struct snd_soc_component *component)
{
struct hdac_hda_priv *hda_pvt =
@@ -322,6 +392,15 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+ /*
+ * Ensure any HDA display is powered at codec probe.
+ * After snd_hda_codec_device_new(), display power is
+ * managed by runtime PM.
+ */
+ if (hda_pvt->need_display_power)
+ snd_hdac_display_power(hdev->bus,
+ HDA_CODEC_IDX_CONTROLLER, true);
+
ret = snd_hda_codec_device_new(hcodec->bus, component->card->snd_card,
hdev->addr, hcodec);
if (ret < 0) {
@@ -366,20 +445,31 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
dev_dbg(&hdev->dev, "no patch file found\n");
}
+ /* configure codec for 1:1 PCM:DAI mapping */
+ hcodec->mst_no_extra_pcms = 1;
+
ret = snd_hda_codec_parse_pcms(hcodec);
if (ret < 0) {
dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
goto error;
}
- ret = snd_hda_codec_build_controls(hcodec);
- if (ret < 0) {
- dev_err(&hdev->dev, "unable to create controls %d\n", ret);
- goto error;
+ /* HDMI controls need to be created in machine drivers */
+ if (!is_hdmi_codec(hcodec)) {
+ ret = snd_hda_codec_build_controls(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to create controls %d\n",
+ ret);
+ goto error;
+ }
}
hcodec->core.lazy_cache = true;
+ if (hda_pvt->need_display_power)
+ snd_hdac_display_power(hdev->bus,
+ HDA_CODEC_IDX_CONTROLLER, false);
+
/*
* hdac_device core already sets the state to active and calls
* get_noresume. So enable runtime and set the device to suspend.
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
index 6b1bd4f428e7..e145cec085b8 100644
--- a/sound/soc/codecs/hdac_hda.h
+++ b/sound/soc/codecs/hdac_hda.h
@@ -6,6 +6,16 @@
#ifndef __HDAC_HDA_H__
#define __HDAC_HDA_H__
+enum {
+ HDAC_ANALOG_DAI_ID = 0,
+ HDAC_DIGITAL_DAI_ID,
+ HDAC_ALT_ANALOG_DAI_ID,
+ HDAC_HDMI_0_DAI_ID,
+ HDAC_HDMI_1_DAI_ID,
+ HDAC_HDMI_2_DAI_ID,
+ HDAC_LAST_DAI_ID = HDAC_HDMI_2_DAI_ID,
+};
+
struct hdac_hda_pcm {
int stream_tag[2];
unsigned int format_val[2];
@@ -13,7 +23,8 @@ struct hdac_hda_pcm {
struct hdac_hda_priv {
struct hda_codec codec;
- struct hdac_hda_pcm pcm[2];
+ struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID];
+ bool need_display_power;
};
#define hdac_to_hda_priv(_hdac) \
diff --git a/sound/soc/codecs/madera.h b/sound/soc/codecs/madera.h
index 1f3e8e230cf2..6d8938a3fb64 100644
--- a/sound/soc/codecs/madera.h
+++ b/sound/soc/codecs/madera.h
@@ -27,6 +27,7 @@
#define MADERA_FLL_SRC_NONE -1
#define MADERA_FLL_SRC_MCLK1 0
#define MADERA_FLL_SRC_MCLK2 1
+#define MADERA_FLL_SRC_MCLK3 2
#define MADERA_FLL_SRC_SLIMCLK 3
#define MADERA_FLL_SRC_FLL1 4
#define MADERA_FLL_SRC_FLL2 5
@@ -51,6 +52,7 @@
#define MADERA_CLK_SRC_MCLK1 0x0
#define MADERA_CLK_SRC_MCLK2 0x1
+#define MADERA_CLK_SRC_MCLK3 0x2
#define MADERA_CLK_SRC_FLL1 0x4
#define MADERA_CLK_SRC_FLL2 0x5
#define MADERA_CLK_SRC_FLL3 0x6
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index e3d311fb510e..f53235be77d9 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -228,6 +228,10 @@
#define CDC_A_RX_EAR_CTL (0xf19E)
#define RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK BIT(0)
#define RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE BIT(0)
+#define RX_EAR_CTL_PA_EAR_PA_EN_MASK BIT(6)
+#define RX_EAR_CTL_PA_EAR_PA_EN_ENABLE BIT(6)
+#define RX_EAR_CTL_PA_SEL_MASK BIT(7)
+#define RX_EAR_CTL_PA_SEL BIT(7)
#define CDC_A_SPKR_DAC_CTL (0xf1B0)
#define SPKR_DAC_CTL_DAC_RESET_MASK BIT(4)
@@ -312,6 +316,7 @@ static const char *const hph_text[] = { "ZERO", "Switch", };
static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
ARRAY_SIZE(hph_text), hph_text);
+static const struct snd_kcontrol_new ear_mux = SOC_DAPM_ENUM("EAR_S", hph_enum);
static const struct snd_kcontrol_new hphl_mux = SOC_DAPM_ENUM("HPHL", hph_enum);
static const struct snd_kcontrol_new hphr_mux = SOC_DAPM_ENUM("HPHR", hph_enum);
@@ -685,6 +690,34 @@ static int pm8916_wcd_analog_enable_spk_pa(struct snd_soc_dapm_widget *w,
return 0;
}
+static int pm8916_wcd_analog_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_SEL_MASK, RX_EAR_CTL_PA_SEL);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_EAR_PA_EN_MASK,
+ RX_EAR_CTL_PA_EAR_PA_EN_ENABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_EAR_PA_EN_MASK, 0);
+ /* Delay to reduce ear turn off pop */
+ usleep_range(7000, 7100);
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_SEL_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
static const struct reg_default wcd_reg_defaults_2_0[] = {
{CDC_A_RX_COM_OCP_CTL, 0xD1},
{CDC_A_RX_COM_OCP_COUNT, 0xFF},
@@ -801,12 +834,20 @@ static const struct snd_soc_dapm_route pm8916_wcd_analog_audio_map[] = {
{"PDM_TX", NULL, "A_MCLK2"},
{"A_MCLK2", NULL, "A_MCLK"},
+ /* Earpiece (RX MIX1) */
+ {"EAR", NULL, "EAR_S"},
+ {"EAR_S", "Switch", "EAR PA"},
+ {"EAR PA", NULL, "RX_BIAS"},
+ {"EAR PA", NULL, "HPHL DAC"},
+ {"EAR PA", NULL, "HPHR DAC"},
+ {"EAR PA", NULL, "EAR CP"},
+
/* Headset (RX MIX1 and RX MIX2) */
{"HEADPHONE", NULL, "HPHL PA"},
{"HEADPHONE", NULL, "HPHR PA"},
- {"HPHL PA", NULL, "EAR_HPHL_CLK"},
- {"HPHR PA", NULL, "EAR_HPHR_CLK"},
+ {"HPHL DAC", NULL, "EAR_HPHL_CLK"},
+ {"HPHR DAC", NULL, "EAR_HPHR_CLK"},
{"CP", NULL, "NCP_CLK"},
@@ -847,11 +888,20 @@ static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC3"),
SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
/* RX stuff */
SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ pm8916_wcd_analog_enable_ear_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("EAR_S", SND_SOC_NOPM, 0, 0, &ear_mux),
+ SND_SOC_DAPM_SUPPLY("EAR CP", CDC_A_NCP_EN, 4, 0, NULL, 0),
+
SND_SOC_DAPM_PGA("HPHL PA", CDC_A_RX_HPH_CNP_EN, 5, 0, NULL, 0),
SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, &hphl_mux),
SND_SOC_DAPM_MIXER("HPHL DAC", CDC_A_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
index bb737fd678cc..1b830ea4f6ed 100644
--- a/sound/soc/codecs/mt6358.c
+++ b/sound/soc/codecs/mt6358.c
@@ -93,6 +93,8 @@ struct mt6358_priv {
int mtkaif_protocol;
struct regulator *avdd_reg;
+
+ int wov_enabled;
};
int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
@@ -464,6 +466,106 @@ static int mt6358_put_volsw(struct snd_kcontrol *kcontrol,
return ret;
}
+static void mt6358_restore_pga(struct mt6358_priv *priv);
+
+static int mt6358_enable_wov_phase2(struct mt6358_priv *priv)
+{
+ /* analog */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW14, 0xffff, 0xa2b5);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xffff, 0x0800);
+ mt6358_restore_pga(priv);
+
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW13, 0xffff, 0x9929);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xffff, 0x0025);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON8,
+ 0xffff, 0x0005);
+
+ /* digital */
+ regmap_update_bits(priv->regmap, MT6358_AUD_TOP_CKPDN_CON0,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3, 0xffff, 0x0120);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG0, 0xffff, 0xffff);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG1, 0xffff, 0x0200);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG2, 0xffff, 0x2424);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG3, 0xffff, 0xdbac);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG4, 0xffff, 0x029e);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG5, 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_POSDIV_CFG0,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_HPF_CFG0,
+ 0xffff, 0x0451);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_TOP, 0xffff, 0x68d1);
+
+ return 0;
+}
+
+static int mt6358_disable_wov_phase2(struct mt6358_priv *priv)
+{
+ /* digital */
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_TOP, 0xffff, 0xc000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_HPF_CFG0,
+ 0xffff, 0x0450);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_POSDIV_CFG0,
+ 0xffff, 0x0c00);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG5, 0xffff, 0x0100);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG4, 0xffff, 0x006c);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG3, 0xffff, 0xa879);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG2, 0xffff, 0x2323);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG1, 0xffff, 0x0400);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG0, 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3, 0xffff, 0x02d8);
+ regmap_update_bits(priv->regmap, MT6358_AUD_TOP_CKPDN_CON0,
+ 0xffff, 0x0000);
+
+ /* analog */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON8,
+ 0xffff, 0x0004);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW13, 0xffff, 0x9829);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xffff, 0x0000);
+ mt6358_restore_pga(priv);
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW14, 0xffff, 0xa2b5);
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0xffff, 0x0010);
+
+ return 0;
+}
+
+static int mt6358_get_wov(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(c);
+
+ ucontrol->value.integer.value[0] = priv->wov_enabled;
+ return 0;
+}
+
+static int mt6358_put_wov(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(c);
+ int enabled = ucontrol->value.integer.value[0];
+
+ if (priv->wov_enabled != enabled) {
+ if (enabled)
+ mt6358_enable_wov_phase2(priv);
+ else
+ mt6358_disable_wov_phase2(priv);
+
+ priv->wov_enabled = enabled;
+ }
+
+ return 0;
+}
+
static const DECLARE_TLV_DB_SCALE(playback_tlv, -1000, 100, 0);
static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 600, 0);
@@ -483,6 +585,9 @@ static const struct snd_kcontrol_new mt6358_snd_controls[] = {
MT6358_AUDENC_ANA_CON0, MT6358_AUDENC_ANA_CON1,
8, 4, 0,
snd_soc_get_volsw, mt6358_put_volsw, pga_tlv),
+
+ SOC_SINGLE_BOOL_EXT("Wake-on-Voice Phase2 Switch", 0,
+ mt6358_get_wov, mt6358_put_wov),
};
/* MUX */
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 88b75695fbf7..9711fab296eb 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -9,7 +9,9 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -59,9 +61,11 @@ struct pcm3168a_priv {
struct regulator_bulk_data supplies[PCM3168A_NUM_SUPPLIES];
struct regmap *regmap;
struct clk *scki;
+ struct gpio_desc *gpio_rst;
unsigned long sysclk;
struct pcm3168a_io_params io_params[2];
+ struct snd_soc_dai_driver dai_drv[2];
};
static const char *const pcm3168a_roll_off[] = { "Sharp", "Slow" };
@@ -314,6 +318,34 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai,
return 0;
}
+static void pcm3168a_update_fixup_pcm_stream(struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(component);
+ u64 formats = SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE;
+ unsigned int channel_max = dai->id == PCM3168A_DAI_DAC ? 8 : 6;
+
+ if (pcm3168a->io_params[dai->id].fmt == PCM3168A_FMT_RIGHT_J) {
+ /* S16_LE is only supported in RIGHT_J mode */
+ formats |= SNDRV_PCM_FMTBIT_S16_LE;
+
+ /*
+ * If multi DIN/DOUT is not selected, RIGHT_J can only support
+ * two channels (no TDM support)
+ */
+ if (pcm3168a->io_params[dai->id].tdm_slots != 2)
+ channel_max = 2;
+ }
+
+ if (dai->id == PCM3168A_DAI_DAC) {
+ dai->driver->playback.channels_max = channel_max;
+ dai->driver->playback.formats = formats;
+ } else {
+ dai->driver->capture.channels_max = channel_max;
+ dai->driver->capture.formats = formats;
+ }
+}
+
static int pcm3168a_set_dai_fmt(struct snd_soc_dai *dai, unsigned int format)
{
struct snd_soc_component *component = dai->component;
@@ -376,6 +408,8 @@ static int pcm3168a_set_dai_fmt(struct snd_soc_dai *dai, unsigned int format)
regmap_update_bits(pcm3168a->regmap, reg, mask, fmt << shift);
+ pcm3168a_update_fixup_pcm_stream(dai);
+
return 0;
}
@@ -409,6 +443,8 @@ static int pcm3168a_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
else
io_params->tdm_mask = rx_mask;
+ pcm3168a_update_fixup_pcm_stream(dai);
+
return 0;
}
@@ -530,63 +566,7 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int pcm3168a_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
- struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(component);
- unsigned int sample_min;
- unsigned int channel_max;
- unsigned int channel_maxs[] = {
- 8, /* DAC */
- 6 /* ADC */
- };
-
- /*
- * Available Data Bits
- *
- * RIGHT_J : 24 / 16
- * LEFT_J : 24
- * I2S : 24
- *
- * TDM available
- *
- * I2S
- * LEFT_J
- */
- switch (pcm3168a->io_params[dai->id].fmt) {
- case PCM3168A_FMT_RIGHT_J:
- sample_min = 16;
- channel_max = 2;
- break;
- case PCM3168A_FMT_LEFT_J:
- case PCM3168A_FMT_I2S:
- case PCM3168A_FMT_DSP_A:
- case PCM3168A_FMT_DSP_B:
- sample_min = 24;
- channel_max = channel_maxs[dai->id];
- break;
- default:
- sample_min = 24;
- channel_max = 2;
- }
-
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
- sample_min, 32);
-
- /* Allow all channels in multi DIN/DOUT mode */
- if (pcm3168a->io_params[dai->id].tdm_slots == 2)
- channel_max = channel_maxs[dai->id];
-
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- 2, channel_max);
-
- return 0;
-}
static const struct snd_soc_dai_ops pcm3168a_dai_ops = {
- .startup = pcm3168a_startup,
.set_fmt = pcm3168a_set_dai_fmt,
.set_sysclk = pcm3168a_set_dai_sysclk,
.hw_params = pcm3168a_hw_params,
@@ -666,6 +646,7 @@ static bool pcm3168a_readable_register(struct device *dev, unsigned int reg)
static bool pcm3168a_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case PCM3168A_RST_SMODE:
case PCM3168A_DAC_ZERO:
case PCM3168A_ADC_OV:
return true;
@@ -725,6 +706,25 @@ int pcm3168a_probe(struct device *dev, struct regmap *regmap)
dev_set_drvdata(dev, pcm3168a);
+ /*
+ * Request the reset (connected to RST pin) gpio line as non exclusive
+ * as the same reset line might be connected to multiple pcm3168a codec
+ *
+ * The RST is low active, we want the GPIO line to be high initially, so
+ * request the initial level to LOW which in practice means DEASSERTED:
+ * The deasserted level of GPIO_ACTIVE_LOW is HIGH.
+ */
+ pcm3168a->gpio_rst = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(pcm3168a->gpio_rst)) {
+ ret = PTR_ERR(pcm3168a->gpio_rst);
+ if (ret != -EPROBE_DEFER )
+ dev_err(dev, "failed to acquire RST gpio: %d\n", ret);
+
+ return ret;
+ }
+
pcm3168a->scki = devm_clk_get(dev, "scki");
if (IS_ERR(pcm3168a->scki)) {
ret = PTR_ERR(pcm3168a->scki);
@@ -766,18 +766,28 @@ int pcm3168a_probe(struct device *dev, struct regmap *regmap)
goto err_regulator;
}
- ret = pcm3168a_reset(pcm3168a);
- if (ret) {
- dev_err(dev, "Failed to reset device: %d\n", ret);
- goto err_regulator;
+ if (pcm3168a->gpio_rst) {
+ /*
+ * The device is taken out from reset via GPIO line, wait for
+ * 3846 SCKI clock cycles for the internal reset de-assertion
+ */
+ msleep(DIV_ROUND_UP(3846 * 1000, pcm3168a->sysclk));
+ } else {
+ ret = pcm3168a_reset(pcm3168a);
+ if (ret) {
+ dev_err(dev, "Failed to reset device: %d\n", ret);
+ goto err_regulator;
+ }
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_idle(dev);
- ret = devm_snd_soc_register_component(dev, &pcm3168a_driver, pcm3168a_dais,
- ARRAY_SIZE(pcm3168a_dais));
+ memcpy(pcm3168a->dai_drv, pcm3168a_dais, sizeof(pcm3168a->dai_drv));
+ ret = devm_snd_soc_register_component(dev, &pcm3168a_driver,
+ pcm3168a->dai_drv,
+ ARRAY_SIZE(pcm3168a->dai_drv));
if (ret) {
dev_err(dev, "failed to register component: %d\n", ret);
goto err_regulator;
@@ -806,6 +816,15 @@ static void pcm3168a_disable(struct device *dev)
void pcm3168a_remove(struct device *dev)
{
+ struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
+
+ /*
+ * The RST is low active, we want the GPIO line to be low when the
+ * driver is removed, so set level to 1 which in practice means
+ * ASSERTED:
+ * The asserted level of GPIO_ACTIVE_LOW is LOW.
+ */
+ gpiod_set_value_cansleep(pcm3168a->gpio_rst, 1);
pm_runtime_disable(dev);
#ifndef CONFIG_PM
pcm3168a_disable(dev);
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index be1e276e3631..2552073e54ce 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -61,7 +61,6 @@ static const struct reg_sequence init_list[] = {
{ RT1011_DAC_SET_1, 0xe702 },
{ RT1011_DAC_SET_3, 0x2004 },
};
-#define RT1011_INIT_REG_LEN ARRAY_SIZE(init_list)
static const struct reg_default rt1011_reg[] = {
{0x0000, 0x0000},
@@ -684,7 +683,8 @@ static int rt1011_reg_init(struct snd_soc_component *component)
{
struct rt1011_priv *rt1011 = snd_soc_component_get_drvdata(component);
- regmap_multi_reg_write(rt1011->regmap, init_list, RT1011_INIT_REG_LEN);
+ regmap_multi_reg_write(rt1011->regmap,
+ init_list, ARRAY_SIZE(init_list));
return 0;
}
@@ -989,7 +989,7 @@ static SOC_ENUM_SINGLE_DECL(rt1011_din_source_enum, RT1011_CROSS_BQ_SET_1, 5,
static const char * const rt1011_tdm_data_out_select[] = {
"TDM_O_LR", "BQ1", "DVOL", "BQ10", "ALC", "DMIX", "ADC_SRC_LR",
- "ADC_O_LR", "ADC_MONO", "RSPK_BPF_LR", "DMIX_ADD", "ENVELOPE_FS",
+ "ADC_O_LR", "ADC_MONO", "RSPK_BPF_LR", "DMIX_ADD", "ENVELOPE_FS",
"SEP_O_GAIN", "ALC_BK_GAIN", "STP_V_C", "DMIX_ABST"
};
@@ -1002,7 +1002,7 @@ static SOC_ENUM_SINGLE_DECL(rt1011_tdm2_l_dac1_enum, RT1011_TDM2_SET_4, 12,
rt1011_tdm_l_ch_data_select);
static SOC_ENUM_SINGLE_DECL(rt1011_tdm1_adc1_dat_enum,
- RT1011_ADCDAT_OUT_SOURCE, 0, rt1011_tdm_data_out_select);
+ RT1011_ADCDAT_OUT_SOURCE, 0, rt1011_tdm_data_out_select);
static SOC_ENUM_SINGLE_DECL(rt1011_tdm1_adc1_loc_enum, RT1011_TDM1_SET_2, 0,
rt1011_tdm_l_ch_data_select);
@@ -1024,9 +1024,9 @@ static const char * const rt1011_tdm_adc_swap_select[] = {
"L/R", "R/L", "L/L", "R/R"
};
-static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc1_1_enum, RT1011_TDM1_SET_3, 6,
+static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc1_1_enum, RT1011_TDM1_SET_3, 6,
rt1011_tdm_adc_swap_select);
-static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc2_1_enum, RT1011_TDM1_SET_3, 4,
+static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc2_1_enum, RT1011_TDM1_SET_3, 4,
rt1011_tdm_adc_swap_select);
static void rt1011_reset(struct regmap *regmap)
@@ -1092,9 +1092,9 @@ static bool rt1011_validate_bq_drc_coeff(unsigned short reg)
{
if ((reg == RT1011_DAC_SET_1) |
(reg >= RT1011_ADC_SET && reg <= RT1011_ADC_SET_1) |
- (reg == RT1011_ADC_SET_4) | (reg == RT1011_ADC_SET_5) |
+ (reg == RT1011_ADC_SET_4) | (reg == RT1011_ADC_SET_5) |
(reg == RT1011_MIXER_1) |
- (reg == RT1011_A_TIMING_1) | (reg >= RT1011_POWER_7 &&
+ (reg == RT1011_A_TIMING_1) | (reg >= RT1011_POWER_7 &&
reg <= RT1011_POWER_8) |
(reg == RT1011_CLASS_D_POS) | (reg == RT1011_ANALOG_CTRL) |
(reg >= RT1011_SPK_TEMP_PROTECT_0 &&
@@ -1163,9 +1163,6 @@ static int rt1011_bq_drc_coeff_put(struct snd_kcontrol *kcontrol,
(struct rt1011_bq_drc_params *)ucontrol->value.integer.value;
unsigned int i, mode_idx = 0;
- if (!component->card->instantiated)
- return 0;
-
if (strstr(ucontrol->id.name, "AdvanceMode Initial Set"))
mode_idx = RT1011_ADVMODE_INITIAL_SET;
else if (strstr(ucontrol->id.name, "AdvanceMode SEP BQ Coeff"))
@@ -1236,9 +1233,6 @@ static int rt1011_r0_cali_put(struct snd_kcontrol *kcontrol,
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct rt1011_priv *rt1011 = snd_soc_component_get_drvdata(component);
- if (!component->card->instantiated)
- return 0;
-
rt1011->cali_done = 0;
if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF &&
ucontrol->value.integer.value[0])
@@ -1284,9 +1278,6 @@ static int rt1011_r0_load_mode_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0] == rt1011->r0_reg)
return 0;
- if (!component->card->instantiated)
- return 0;
-
if (ucontrol->value.integer.value[0] == 0)
return -EINVAL;
@@ -1298,7 +1289,7 @@ static int rt1011_r0_load_mode_put(struct snd_kcontrol *kcontrol,
r0_integer = format / rt1011->r0_reg / 128;
r0_factor = ((format / rt1011->r0_reg * 100) / 128)
- (r0_integer * 100);
- dev_info(dev, "New r0 resistance about %d.%02d ohm, reg=0x%X\n",
+ dev_info(dev, "New r0 resistance about %d.%02d ohm, reg=0x%X\n",
r0_integer, r0_factor, rt1011->r0_reg);
if (rt1011->r0_reg)
@@ -1640,6 +1631,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
default:
ret = -EINVAL;
+ goto _set_fmt_err_;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -1650,6 +1642,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
default:
ret = -EINVAL;
+ goto _set_fmt_err_;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -1666,6 +1659,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
default:
ret = -EINVAL;
+ goto _set_fmt_err_;
}
switch (dai->id) {
@@ -1683,6 +1677,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
ret = -EINVAL;
}
+_set_fmt_err_:
snd_soc_dapm_mutex_unlock(dapm);
return ret;
}
@@ -1778,7 +1773,8 @@ static int rt1011_set_component_pll(struct snd_soc_component *component,
ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
if (ret < 0) {
- dev_err(component->dev, "Unsupport input clock %d\n", freq_in);
+ dev_err(component->dev, "Unsupported input clock %d\n",
+ freq_in);
return ret;
}
@@ -1805,8 +1801,8 @@ static int rt1011_set_tdm_slot(struct snd_soc_dai *dai,
struct snd_soc_component *component = dai->component;
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(component);
- unsigned int val = 0, tdm_en = 0;
- int ret = 0;
+ unsigned int val = 0, tdm_en = 0, rx_slotnum, tx_slotnum;
+ int ret = 0, first_bit, last_bit;
snd_soc_dapm_mutex_lock(dapm);
if (rx_mask || tx_mask)
@@ -1829,6 +1825,7 @@ static int rt1011_set_tdm_slot(struct snd_soc_dai *dai,
break;
default:
ret = -EINVAL;
+ goto _set_tdm_err_;
}
switch (slot_width) {
@@ -1848,22 +1845,153 @@ static int rt1011_set_tdm_slot(struct snd_soc_dai *dai,
break;
default:
ret = -EINVAL;
+ goto _set_tdm_err_;
+ }
+
+ /* Rx slot configuration */
+ rx_slotnum = hweight_long(rx_mask);
+ first_bit = find_next_bit((unsigned long *)&rx_mask, 32, 0);
+ if (rx_slotnum > 1 || rx_slotnum == 0) {
+ ret = -EINVAL;
+ dev_dbg(component->dev, "too many rx slots or zero slot\n");
+ goto _set_tdm_err_;
+ }
+
+ switch (first_bit) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ snd_soc_component_update_bits(component,
+ RT1011_CROSS_BQ_SET_1, RT1011_MONO_LR_SEL_MASK,
+ RT1011_MONO_L_CHANNEL);
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_4,
+ RT1011_TDM_I2S_TX_L_DAC1_1_MASK |
+ RT1011_TDM_I2S_TX_R_DAC1_1_MASK,
+ (first_bit << RT1011_TDM_I2S_TX_L_DAC1_1_SFT) |
+ ((first_bit+1) << RT1011_TDM_I2S_TX_R_DAC1_1_SFT));
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ snd_soc_component_update_bits(component,
+ RT1011_CROSS_BQ_SET_1, RT1011_MONO_LR_SEL_MASK,
+ RT1011_MONO_R_CHANNEL);
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_4,
+ RT1011_TDM_I2S_TX_L_DAC1_1_MASK |
+ RT1011_TDM_I2S_TX_R_DAC1_1_MASK,
+ ((first_bit-1) << RT1011_TDM_I2S_TX_L_DAC1_1_SFT) |
+ (first_bit << RT1011_TDM_I2S_TX_R_DAC1_1_SFT));
+ break;
+ default:
+ ret = -EINVAL;
+ goto _set_tdm_err_;
+ }
+
+ /* Tx slot configuration */
+ tx_slotnum = hweight_long(tx_mask);
+ first_bit = find_next_bit((unsigned long *)&tx_mask, 32, 0);
+ last_bit = find_last_bit((unsigned long *)&tx_mask, 32);
+ if (tx_slotnum > 2 || (last_bit-first_bit) > 1) {
+ ret = -EINVAL;
+ dev_dbg(component->dev, "too many tx slots or tx slot location error\n");
+ goto _set_tdm_err_;
+ }
+
+ if (tx_slotnum == 1) {
+ snd_soc_component_update_bits(component, RT1011_TDM1_SET_2,
+ RT1011_TDM_I2S_DOCK_ADCDAT_LEN_1_MASK |
+ RT1011_TDM_ADCDAT1_DATA_LOCATION, first_bit);
+ switch (first_bit) {
+ case 1:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC1_1_MASK,
+ RT1011_TDM_I2S_RX_ADC1_1_LL);
+ break;
+ case 3:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC2_1_MASK,
+ RT1011_TDM_I2S_RX_ADC2_1_LL);
+ break;
+ case 5:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC3_1_MASK,
+ RT1011_TDM_I2S_RX_ADC3_1_LL);
+ break;
+ case 7:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC4_1_MASK,
+ RT1011_TDM_I2S_RX_ADC4_1_LL);
+ break;
+ case 0:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC1_1_MASK, 0);
+ break;
+ case 2:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC2_1_MASK, 0);
+ break;
+ case 4:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC3_1_MASK, 0);
+ break;
+ case 6:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC4_1_MASK, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_dbg(component->dev,
+ "tx slot location error\n");
+ goto _set_tdm_err_;
+ }
+ } else if (tx_slotnum == 2) {
+ switch (first_bit) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_2,
+ RT1011_TDM_I2S_DOCK_ADCDAT_LEN_1_MASK |
+ RT1011_TDM_ADCDAT1_DATA_LOCATION,
+ RT1011_TDM_I2S_DOCK_ADCDAT_2CH | first_bit);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_dbg(component->dev,
+ "tx slot location should be paired and start from slot0/2/4/6\n");
+ goto _set_tdm_err_;
+ }
}
snd_soc_component_update_bits(component, RT1011_TDM1_SET_1,
RT1011_I2S_CH_TX_MASK | RT1011_I2S_CH_RX_MASK |
- RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
+ RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
snd_soc_component_update_bits(component, RT1011_TDM2_SET_1,
RT1011_I2S_CH_TX_MASK | RT1011_I2S_CH_RX_MASK |
- RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
+ RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
snd_soc_component_update_bits(component, RT1011_TDM1_SET_2,
- RT1011_TDM_I2S_DOCK_EN_1_MASK, tdm_en);
+ RT1011_TDM_I2S_DOCK_EN_1_MASK, tdm_en);
snd_soc_component_update_bits(component, RT1011_TDM2_SET_2,
- RT1011_TDM_I2S_DOCK_EN_2_MASK, tdm_en);
- snd_soc_component_update_bits(component, RT1011_TDM_TOTAL_SET,
- RT1011_ADCDAT1_PIN_CONFIG | RT1011_ADCDAT2_PIN_CONFIG,
- RT1011_ADCDAT1_OUTPUT | RT1011_ADCDAT2_OUTPUT);
+ RT1011_TDM_I2S_DOCK_EN_2_MASK, tdm_en);
+ if (tx_slotnum)
+ snd_soc_component_update_bits(component, RT1011_TDM_TOTAL_SET,
+ RT1011_ADCDAT1_PIN_CONFIG | RT1011_ADCDAT2_PIN_CONFIG,
+ RT1011_ADCDAT1_OUTPUT | RT1011_ADCDAT2_OUTPUT);
+_set_tdm_err_:
snd_soc_dapm_mutex_unlock(dapm);
return ret;
}
@@ -1982,7 +2110,7 @@ static const struct snd_soc_component_driver soc_component_dev_rt1011 = {
.remove = rt1011_remove,
.suspend = rt1011_suspend,
.resume = rt1011_resume,
- .set_bias_level = rt1011_set_bias_level,
+ .set_bias_level = rt1011_set_bias_level,
.controls = rt1011_snd_controls,
.num_controls = ARRAY_SIZE(rt1011_snd_controls),
.dapm_widgets = rt1011_dapm_widgets,
@@ -1991,9 +2119,9 @@ static const struct snd_soc_component_driver soc_component_dev_rt1011 = {
.num_dapm_routes = ARRAY_SIZE(rt1011_dapm_routes),
.set_sysclk = rt1011_set_component_sysclk,
.set_pll = rt1011_set_component_pll,
- .use_pmdown_time = 1,
- .endianness = 1,
- .non_legacy_dai_naming = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1011_regmap = {
@@ -2095,17 +2223,17 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
dc_offset = value << 16;
regmap_read(rt1011->regmap, RT1011_EFUSE_ADC_OFFSET_15_0, &value);
dc_offset |= (value & 0xffff);
- dev_info(dev, "ADC offset=0x%x\n", dc_offset);
+ dev_info(dev, "ADC offset=0x%x\n", dc_offset);
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G0_20_16, &value);
dc_offset = value << 16;
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G0_15_0, &value);
dc_offset |= (value & 0xffff);
- dev_info(dev, "Gain0 offset=0x%x\n", dc_offset);
+ dev_info(dev, "Gain0 offset=0x%x\n", dc_offset);
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G1_20_16, &value);
dc_offset = value << 16;
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G1_15_0, &value);
dc_offset |= (value & 0xffff);
- dev_info(dev, "Gain1 offset=0x%x\n", dc_offset);
+ dev_info(dev, "Gain1 offset=0x%x\n", dc_offset);
if (cali_flag) {
@@ -2125,7 +2253,7 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
while (count < chk_cnt) {
msleep(100);
regmap_read(rt1011->regmap,
- RT1011_INIT_RECIPROCAL_SYN_24_16, &value);
+ RT1011_INIT_RECIPROCAL_SYN_24_16, &value);
r0[count%3] = value << 16;
regmap_read(rt1011->regmap,
RT1011_INIT_RECIPROCAL_SYN_15_0, &value);
@@ -2140,7 +2268,7 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
break;
}
if (count > chk_cnt) {
- dev_err(dev, "Calibrate R0 Failure\n");
+ dev_err(dev, "Calibrate R0 Failure\n");
ret = -EAGAIN;
} else {
format = 2147483648U; /* 2^24 * 128 */
@@ -2149,7 +2277,7 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
- (r0_integer * 100);
rt1011->r0_reg = r0[0];
rt1011->cali_done = 1;
- dev_info(dev, "r0 resistance about %d.%02d ohm, reg=0x%X\n",
+ dev_info(dev, "r0 resistance about %d.%02d ohm, reg=0x%X\n",
r0_integer, r0_factor, r0[0]);
}
}
@@ -2196,8 +2324,12 @@ static void rt1011_calibration_work(struct work_struct *work)
struct rt1011_priv *rt1011 =
container_of(work, struct rt1011_priv, cali_work);
struct snd_soc_component *component = rt1011->component;
+ unsigned int r0_integer, r0_factor, format;
- rt1011_calibrate(rt1011, 1);
+ if (rt1011->r0_calib)
+ rt1011_calibrate(rt1011, 0);
+ else
+ rt1011_calibrate(rt1011, 1);
/*
* This flag should reset after booting.
@@ -2208,6 +2340,40 @@ static void rt1011_calibration_work(struct work_struct *work)
/* initial */
rt1011_reg_init(component);
+
+ /* Apply temperature and calibration data from device property */
+ if (rt1011->temperature_calib <= 0xff &&
+ rt1011->temperature_calib > 0) {
+ snd_soc_component_update_bits(component,
+ RT1011_STP_INITIAL_RESISTANCE_TEMP, 0x3ff,
+ (rt1011->temperature_calib << 2));
+ }
+
+ if (rt1011->r0_calib) {
+ rt1011->r0_reg = rt1011->r0_calib;
+
+ format = 2147483648U; /* 2^24 * 128 */
+ r0_integer = format / rt1011->r0_reg / 128;
+ r0_factor = ((format / rt1011->r0_reg * 100) / 128)
+ - (r0_integer * 100);
+ dev_info(component->dev, "DP r0 resistance about %d.%02d ohm, reg=0x%X\n",
+ r0_integer, r0_factor, rt1011->r0_reg);
+
+ rt1011_r0_load(rt1011);
+ }
+}
+
+static int rt1011_parse_dp(struct rt1011_priv *rt1011, struct device *dev)
+{
+ device_property_read_u32(dev, "realtek,temperature_calib",
+ &rt1011->temperature_calib);
+ device_property_read_u32(dev, "realtek,r0_calib",
+ &rt1011->r0_calib);
+
+ dev_dbg(dev, "%s: r0_calib: 0x%x, temperature_calib: 0x%x",
+ __func__, rt1011->r0_calib, rt1011->temperature_calib);
+
+ return 0;
}
static int rt1011_i2c_probe(struct i2c_client *i2c,
@@ -2219,11 +2385,13 @@ static int rt1011_i2c_probe(struct i2c_client *i2c,
rt1011 = devm_kzalloc(&i2c->dev, sizeof(struct rt1011_priv),
GFP_KERNEL);
- if (rt1011 == NULL)
+ if (!rt1011)
return -ENOMEM;
i2c_set_clientdata(i2c, rt1011);
+ rt1011_parse_dp(rt1011, &i2c->dev);
+
rt1011->regmap = devm_regmap_init_i2c(i2c, &rt1011_regmap);
if (IS_ERR(rt1011->regmap)) {
ret = PTR_ERR(rt1011->regmap);
@@ -2254,7 +2422,6 @@ static void rt1011_i2c_shutdown(struct i2c_client *client)
rt1011_reset(rt1011->regmap);
}
-
static struct i2c_driver rt1011_i2c_driver = {
.driver = {
.name = "rt1011",
diff --git a/sound/soc/codecs/rt1011.h b/sound/soc/codecs/rt1011.h
index 2d65983f3d0f..68fadc15fa8c 100644
--- a/sound/soc/codecs/rt1011.h
+++ b/sound/soc/codecs/rt1011.h
@@ -460,6 +460,23 @@
#define RT1011_TDM_I2S_DOCK_EN_1_MASK (0x1 << 3)
#define RT1011_TDM_I2S_DOCK_EN_1_SFT 3
#define RT1011_TDM_I2S_DOCK_EN_1 (0x1 << 3)
+#define RT1011_TDM_ADCDAT1_DATA_LOCATION (0x7 << 0)
+
+/* TDM1 Setting-3 (0x0118) */
+#define RT1011_TDM_I2S_RX_ADC1_1_MASK (0x3 << 6)
+#define RT1011_TDM_I2S_RX_ADC2_1_MASK (0x3 << 4)
+#define RT1011_TDM_I2S_RX_ADC3_1_MASK (0x3 << 2)
+#define RT1011_TDM_I2S_RX_ADC4_1_MASK (0x3 << 0)
+#define RT1011_TDM_I2S_RX_ADC1_1_LL (0x2 << 6)
+#define RT1011_TDM_I2S_RX_ADC2_1_LL (0x2 << 4)
+#define RT1011_TDM_I2S_RX_ADC3_1_LL (0x2 << 2)
+#define RT1011_TDM_I2S_RX_ADC4_1_LL (0x2 << 0)
+
+/* TDM1 Setting-4 (0x011a) */
+#define RT1011_TDM_I2S_TX_L_DAC1_1_MASK (0x7 << 12)
+#define RT1011_TDM_I2S_TX_R_DAC1_1_MASK (0x7 << 8)
+#define RT1011_TDM_I2S_TX_L_DAC1_1_SFT 12
+#define RT1011_TDM_I2S_TX_R_DAC1_1_SFT 8
/* TDM2 Setting-2 (0x0120) */
#define RT1011_TDM_I2S_DOCK_ADCDAT_LEN_2_MASK (0x7 << 13)
@@ -585,6 +602,12 @@
#define RT1011_STP_T0_EN_BIT 6
#define RT1011_STP_T0_EN (0x1 << 6)
+/* Cross Biquad Setting-1 (0x0702) */
+#define RT1011_MONO_LR_SEL_MASK (0x3 << 5)
+#define RT1011_MONO_L_CHANNEL (0x0 << 5)
+#define RT1011_MONO_R_CHANNEL (0x1 << 5)
+#define RT1011_MONO_LR_MIX_CHANNEL (0x2 << 5)
+
/* ClassD Internal Setting-1 (0x1300) */
#define RT1011_DRIVER_READY_SPK (0x1 << 12)
#define RT1011_DRIVER_READY_SPK_BIT 12
@@ -667,6 +690,7 @@ struct rt1011_priv {
int bq_drc_set;
unsigned int r0_reg, cali_done;
+ unsigned int r0_calib, temperature_calib;
int recv_spk_mode;
};
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 892ea406a69b..f1b7b947ecbd 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -201,26 +201,25 @@ static irqreturn_t rt5514_spi_irq(int irq, void *data)
}
/* PCM for streaming audio from the DSP buffer */
-static int rt5514_spi_pcm_open(struct snd_pcm_substream *substream)
+static int rt5514_spi_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_soc_set_runtime_hwparams(substream, &rt5514_spi_pcm_hardware);
return 0;
}
-static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int rt5514_spi_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct rt5514_dsp *rt5514_dsp =
snd_soc_component_get_drvdata(component);
int ret;
u8 buf[8];
mutex_lock(&rt5514_dsp->dma_lock);
- ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
rt5514_dsp->substream = substream;
rt5514_dsp->dma_offset = 0;
@@ -234,10 +233,9 @@ static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static int rt5514_spi_hw_free(struct snd_pcm_substream *substream)
+static int rt5514_spi_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct rt5514_dsp *rt5514_dsp =
snd_soc_component_get_drvdata(component);
@@ -247,28 +245,20 @@ static int rt5514_spi_hw_free(struct snd_pcm_substream *substream)
cancel_delayed_work_sync(&rt5514_dsp->copy_work);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static snd_pcm_uframes_t rt5514_spi_pcm_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct rt5514_dsp *rt5514_dsp =
snd_soc_component_get_drvdata(component);
return bytes_to_frames(runtime, rt5514_dsp->dma_offset);
}
-static const struct snd_pcm_ops rt5514_spi_pcm_ops = {
- .open = rt5514_spi_pcm_open,
- .hw_params = rt5514_spi_hw_params,
- .hw_free = rt5514_spi_hw_free,
- .pointer = rt5514_spi_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
-};
static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
{
@@ -301,10 +291,22 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
return 0;
}
+static int rt5514_spi_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ return 0;
+}
+
static const struct snd_soc_component_driver rt5514_spi_component = {
- .name = DRV_NAME,
- .probe = rt5514_spi_pcm_probe,
- .ops = &rt5514_spi_pcm_ops,
+ .name = DRV_NAME,
+ .probe = rt5514_spi_pcm_probe,
+ .open = rt5514_spi_pcm_open,
+ .hw_params = rt5514_spi_hw_params,
+ .hw_free = rt5514_spi_hw_free,
+ .pointer = rt5514_spi_pcm_pointer,
+ .pcm_construct = rt5514_spi_pcm_new,
};
/**
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 1c06b3b9218c..92d67010aeed 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3270,6 +3270,9 @@ static void rt5645_jack_detect_work(struct work_struct *work)
snd_soc_jack_report(rt5645->mic_jack,
report, SND_JACK_MICROPHONE);
return;
+ case 4:
+ val = snd_soc_component_read32(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
+ break;
default: /* read rt5645 jd1_1 status */
val = snd_soc_component_read32(rt5645->component, RT5645_INT_IRQ_ST) & 0x1000;
break;
@@ -3603,7 +3606,7 @@ static const struct rt5645_platform_data intel_braswell_platform_data = {
static const struct rt5645_platform_data buddy_platform_data = {
.dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
- .jd_mode = 3,
+ .jd_mode = 4,
.level_trigger_irq = true,
};
@@ -3636,6 +3639,12 @@ static const struct rt5645_platform_data lattepanda_board_platform_data = {
.inv_jd1_1 = true
};
+static const struct rt5645_platform_data kahlee_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
+ .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
+ .jd_mode = 3,
+};
+
static const struct dmi_system_id dmi_platform_data[] = {
{
.ident = "Chrome Buddy",
@@ -3742,6 +3751,13 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&lattepanda_board_platform_data,
},
+ {
+ .ident = "Chrome Kahlee",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Kahlee"),
+ },
+ .driver_data = (void *)&kahlee_platform_data,
+ },
{ }
};
@@ -3999,6 +4015,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
RT5645_JD1_MODE_1);
break;
case 3:
+ case 4:
regmap_update_bits(rt5645->regmap, RT5645_A_JD_CTRL1,
RT5645_JD1_MODE_MASK,
RT5645_JD1_MODE_2);
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 2943692f66ed..e6c1ec6c426e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -3644,7 +3644,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_1,
RT5663_LDO1_DVO_MASK | RT5663_AMP_HP_MASK,
RT5663_LDO1_DVO_0_9V | RT5663_AMP_HP_3X);
- break;
+ break;
case CODEC_VER_0:
regmap_update_bits(rt5663->regmap, RT5663_DIG_MISC,
RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
@@ -3663,7 +3663,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5663->regmap, RT5663_TDM_2,
RT5663_DATA_SWAP_ADCDAT1_MASK,
RT5663_DATA_SWAP_ADCDAT1_LL);
- break;
+ break;
default:
dev_err(&i2c->dev, "%s:Unknown codec type\n", __func__);
}
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index d681488f5312..7810b1d7de32 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -24,6 +24,9 @@
#include <linux/firmware.h>
#include <linux/acpi.h>
+#include <sound/soc.h>
+
+#include "rt5677.h"
#include "rt5677-spi.h"
#define DRV_NAME "rt5677spi"
@@ -45,9 +48,367 @@
#define RT5677_SPI_WRITE_16 0x1
#define RT5677_SPI_READ_16 0x0
+#define RT5677_BUF_BYTES_TOTAL 0x20000
+#define RT5677_MIC_BUF_ADDR 0x60030000
+#define RT5677_MODEL_ADDR 0x5FFC9800
+#define RT5677_MIC_BUF_BYTES ((u32)(RT5677_BUF_BYTES_TOTAL - \
+ sizeof(u32)))
+#define RT5677_MIC_BUF_FIRST_READ_SIZE 0x10000
+
static struct spi_device *g_spi;
static DEFINE_MUTEX(spi_mutex);
+struct rt5677_dsp {
+ struct device *dev;
+ struct delayed_work copy_work;
+ struct mutex dma_lock;
+ struct snd_pcm_substream *substream;
+ size_t dma_offset; /* zero-based offset into runtime->dma_area */
+ size_t avail_bytes; /* number of new bytes since last period */
+ u32 mic_read_offset; /* zero-based offset into DSP's mic buffer */
+ bool new_hotword; /* a new hotword is fired */
+};
+
+static const struct snd_pcm_hardware rt5677_spi_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = RT5677_BUF_BYTES_TOTAL / 8,
+ .periods_min = 8,
+ .periods_max = 8,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = RT5677_BUF_BYTES_TOTAL,
+};
+
+static struct snd_soc_dai_driver rt5677_spi_dai = {
+ /* The DAI name "rt5677-dsp-cpu-dai" is not used. The actual DAI name
+ * registered with ASoC is the name of the device "spi-RT5677AA:00",
+ * because we only have one DAI. See snd_soc_register_dais().
+ */
+ .name = "rt5677-dsp-cpu-dai",
+ .id = 0,
+ .capture = {
+ .stream_name = "DSP Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+/* PCM for streaming audio from the DSP buffer */
+static int rt5677_spi_pcm_open(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &rt5677_spi_pcm_hardware);
+ return 0;
+}
+
+static int rt5677_spi_pcm_close(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *codec_component =
+ snd_soc_rtdcom_lookup(rtd, "rt5677");
+ struct rt5677_priv *rt5677 =
+ snd_soc_component_get_drvdata(codec_component);
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ cancel_delayed_work_sync(&rt5677_dsp->copy_work);
+ rt5677->set_dsp_vad(codec_component, false);
+ return 0;
+}
+
+static int rt5677_spi_hw_params(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ mutex_lock(&rt5677_dsp->dma_lock);
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+ rt5677_dsp->substream = substream;
+ mutex_unlock(&rt5677_dsp->dma_lock);
+
+ return ret;
+}
+
+static int rt5677_spi_hw_free(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ mutex_lock(&rt5677_dsp->dma_lock);
+ rt5677_dsp->substream = NULL;
+ mutex_unlock(&rt5677_dsp->dma_lock);
+
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int rt5677_spi_prepare(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *rt5677_component =
+ snd_soc_rtdcom_lookup(rtd, "rt5677");
+ struct rt5677_priv *rt5677 =
+ snd_soc_component_get_drvdata(rt5677_component);
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ rt5677->set_dsp_vad(rt5677_component, true);
+ rt5677_dsp->dma_offset = 0;
+ rt5677_dsp->avail_bytes = 0;
+ return 0;
+}
+
+static snd_pcm_uframes_t rt5677_spi_pcm_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ return bytes_to_frames(runtime, rt5677_dsp->dma_offset);
+}
+
+static int rt5677_spi_mic_write_offset(u32 *mic_write_offset)
+{
+ int ret;
+ /* Grab the first 4 bytes that hold the write pointer on the
+ * dsp, and check to make sure that it points somewhere inside the
+ * buffer.
+ */
+ ret = rt5677_spi_read(RT5677_MIC_BUF_ADDR, mic_write_offset,
+ sizeof(u32));
+ if (ret)
+ return ret;
+ /* Adjust the offset so that it's zero-based */
+ *mic_write_offset = *mic_write_offset - sizeof(u32);
+ return *mic_write_offset < RT5677_MIC_BUF_BYTES ? 0 : -EFAULT;
+}
+
+/*
+ * Copy one contiguous block of audio samples from the DSP mic buffer to the
+ * dma_area of the pcm runtime. The receiving buffer may wrap around.
+ * @begin: start offset of the block to copy, in bytes.
+ * @end: offset of the first byte after the block to copy, must be greater
+ * than or equal to begin.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+static int rt5677_spi_copy_block(struct rt5677_dsp *rt5677_dsp,
+ u32 begin, u32 end)
+{
+ struct snd_pcm_runtime *runtime = rt5677_dsp->substream->runtime;
+ size_t bytes_per_frame = frames_to_bytes(runtime, 1);
+ size_t first_chunk_len, second_chunk_len;
+ int ret;
+
+ if (begin > end || runtime->dma_bytes < 2 * bytes_per_frame) {
+ dev_err(rt5677_dsp->dev,
+ "Invalid copy from (%u, %u), dma_area size %zu\n",
+ begin, end, runtime->dma_bytes);
+ return -EINVAL;
+ }
+
+ /* The block to copy is empty */
+ if (begin == end)
+ return 0;
+
+ /* If the incoming chunk is too big for the receiving buffer, only the
+ * last "receiving buffer size - one frame" bytes are copied.
+ */
+ if (end - begin > runtime->dma_bytes - bytes_per_frame)
+ begin = end - (runtime->dma_bytes - bytes_per_frame);
+
+ /* May need to split to two chunks, calculate the size of each */
+ first_chunk_len = end - begin;
+ second_chunk_len = 0;
+ if (rt5677_dsp->dma_offset + first_chunk_len > runtime->dma_bytes) {
+ /* Receiving buffer wrapped around */
+ second_chunk_len = first_chunk_len;
+ first_chunk_len = runtime->dma_bytes - rt5677_dsp->dma_offset;
+ second_chunk_len -= first_chunk_len;
+ }
+
+ /* Copy first chunk */
+ ret = rt5677_spi_read(RT5677_MIC_BUF_ADDR + sizeof(u32) + begin,
+ runtime->dma_area + rt5677_dsp->dma_offset,
+ first_chunk_len);
+ if (ret)
+ return ret;
+ rt5677_dsp->dma_offset += first_chunk_len;
+ if (rt5677_dsp->dma_offset == runtime->dma_bytes)
+ rt5677_dsp->dma_offset = 0;
+
+ /* Copy second chunk */
+ if (second_chunk_len) {
+ ret = rt5677_spi_read(RT5677_MIC_BUF_ADDR + sizeof(u32) +
+ begin + first_chunk_len, runtime->dma_area,
+ second_chunk_len);
+ if (!ret)
+ rt5677_dsp->dma_offset = second_chunk_len;
+ }
+ return ret;
+}
+
+/*
+ * Copy a given amount of audio samples from the DSP mic buffer starting at
+ * mic_read_offset, to the dma_area of the pcm runtime. The source buffer may
+ * wrap around. mic_read_offset is updated after successful copy.
+ * @amount: amount of samples to copy, in bytes.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+static int rt5677_spi_copy(struct rt5677_dsp *rt5677_dsp, u32 amount)
+{
+ int ret = 0;
+ u32 target;
+
+ if (amount == 0)
+ return ret;
+
+ target = rt5677_dsp->mic_read_offset + amount;
+ /* Copy the first chunk in DSP's mic buffer */
+ ret |= rt5677_spi_copy_block(rt5677_dsp, rt5677_dsp->mic_read_offset,
+ min(target, RT5677_MIC_BUF_BYTES));
+
+ if (target >= RT5677_MIC_BUF_BYTES) {
+ /* Wrap around, copy the second chunk */
+ target -= RT5677_MIC_BUF_BYTES;
+ ret |= rt5677_spi_copy_block(rt5677_dsp, 0, target);
+ }
+
+ if (!ret)
+ rt5677_dsp->mic_read_offset = target;
+ return ret;
+}
+
+/*
+ * A delayed work that streams audio samples from the DSP mic buffer to the
+ * dma_area of the pcm runtime via SPI.
+ */
+static void rt5677_spi_copy_work(struct work_struct *work)
+{
+ struct rt5677_dsp *rt5677_dsp =
+ container_of(work, struct rt5677_dsp, copy_work.work);
+ struct snd_pcm_runtime *runtime;
+ u32 mic_write_offset;
+ size_t new_bytes, copy_bytes, period_bytes;
+ unsigned int delay;
+ int ret = 0;
+
+ /* Ensure runtime->dma_area buffer does not go away while copying. */
+ mutex_lock(&rt5677_dsp->dma_lock);
+ if (!rt5677_dsp->substream) {
+ dev_err(rt5677_dsp->dev, "No pcm substream\n");
+ goto done;
+ }
+
+ runtime = rt5677_dsp->substream->runtime;
+
+ if (rt5677_spi_mic_write_offset(&mic_write_offset)) {
+ dev_err(rt5677_dsp->dev, "No mic_write_offset\n");
+ goto done;
+ }
+
+ /* If this is the first time that we've asked for streaming data after
+ * a hotword is fired, we should start reading from the previous 2
+ * seconds of audio from wherever the mic_write_offset is currently.
+ */
+ if (rt5677_dsp->new_hotword) {
+ rt5677_dsp->new_hotword = false;
+ /* See if buffer wraparound happens */
+ if (mic_write_offset < RT5677_MIC_BUF_FIRST_READ_SIZE)
+ rt5677_dsp->mic_read_offset = RT5677_MIC_BUF_BYTES -
+ (RT5677_MIC_BUF_FIRST_READ_SIZE -
+ mic_write_offset);
+ else
+ rt5677_dsp->mic_read_offset = mic_write_offset -
+ RT5677_MIC_BUF_FIRST_READ_SIZE;
+ }
+
+ /* Calculate the amount of new samples in bytes */
+ if (rt5677_dsp->mic_read_offset <= mic_write_offset)
+ new_bytes = mic_write_offset - rt5677_dsp->mic_read_offset;
+ else
+ new_bytes = RT5677_MIC_BUF_BYTES + mic_write_offset
+ - rt5677_dsp->mic_read_offset;
+
+ /* Copy all new samples from DSP mic buffer, one period at a time */
+ period_bytes = snd_pcm_lib_period_bytes(rt5677_dsp->substream);
+ while (new_bytes) {
+ copy_bytes = min(new_bytes, period_bytes
+ - rt5677_dsp->avail_bytes);
+ ret = rt5677_spi_copy(rt5677_dsp, copy_bytes);
+ if (ret) {
+ dev_err(rt5677_dsp->dev, "Copy failed %d\n", ret);
+ goto done;
+ }
+ rt5677_dsp->avail_bytes += copy_bytes;
+ if (rt5677_dsp->avail_bytes >= period_bytes) {
+ snd_pcm_period_elapsed(rt5677_dsp->substream);
+ rt5677_dsp->avail_bytes = 0;
+ }
+ new_bytes -= copy_bytes;
+ }
+
+ delay = bytes_to_frames(runtime, period_bytes) / (runtime->rate / 1000);
+ schedule_delayed_work(&rt5677_dsp->copy_work, msecs_to_jiffies(delay));
+done:
+ mutex_unlock(&rt5677_dsp->dma_lock);
+}
+
+static int rt5677_spi_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ return 0;
+}
+
+static int rt5677_spi_pcm_probe(struct snd_soc_component *component)
+{
+ struct rt5677_dsp *rt5677_dsp;
+
+ rt5677_dsp = devm_kzalloc(component->dev, sizeof(*rt5677_dsp),
+ GFP_KERNEL);
+ if (!rt5677_dsp)
+ return -ENOMEM;
+ rt5677_dsp->dev = &g_spi->dev;
+ mutex_init(&rt5677_dsp->dma_lock);
+ INIT_DELAYED_WORK(&rt5677_dsp->copy_work, rt5677_spi_copy_work);
+
+ snd_soc_component_set_drvdata(component, rt5677_dsp);
+ return 0;
+}
+
+static const struct snd_soc_component_driver rt5677_spi_dai_component = {
+ .name = DRV_NAME,
+ .probe = rt5677_spi_pcm_probe,
+ .open = rt5677_spi_pcm_open,
+ .close = rt5677_spi_pcm_close,
+ .hw_params = rt5677_spi_hw_params,
+ .hw_free = rt5677_spi_hw_free,
+ .prepare = rt5677_spi_prepare,
+ .pointer = rt5677_spi_pcm_pointer,
+ .pcm_construct = rt5677_spi_pcm_new,
+};
+
/* Select a suitable transfer command for the next transfer to ensure
* the transfer address is always naturally aligned while minimizing
* the total number of transfers required.
@@ -218,9 +579,45 @@ int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
}
EXPORT_SYMBOL_GPL(rt5677_spi_write_firmware);
+void rt5677_spi_hotword_detected(void)
+{
+ struct rt5677_dsp *rt5677_dsp;
+
+ if (!g_spi)
+ return;
+
+ rt5677_dsp = dev_get_drvdata(&g_spi->dev);
+ if (!rt5677_dsp) {
+ dev_err(&g_spi->dev, "Can't get rt5677_dsp\n");
+ return;
+ }
+
+ mutex_lock(&rt5677_dsp->dma_lock);
+ dev_info(rt5677_dsp->dev, "Hotword detected\n");
+ rt5677_dsp->new_hotword = true;
+ mutex_unlock(&rt5677_dsp->dma_lock);
+
+ schedule_delayed_work(&rt5677_dsp->copy_work, 0);
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_hotword_detected);
+
static int rt5677_spi_probe(struct spi_device *spi)
{
+ int ret;
+
g_spi = spi;
+
+ ret = snd_soc_register_component(&spi->dev, &rt5677_spi_dai_component,
+ &rt5677_spi_dai, 1);
+ if (ret < 0)
+ dev_err(&spi->dev, "Failed to register component.\n");
+
+ return ret;
+}
+
+static int rt5677_spi_remove(struct spi_device *spi)
+{
+ snd_soc_unregister_component(&spi->dev);
return 0;
}
@@ -236,6 +633,7 @@ static struct spi_driver rt5677_spi_driver = {
.acpi_match_table = ACPI_PTR(rt5677_spi_acpi_id),
},
.probe = rt5677_spi_probe,
+ .remove = rt5677_spi_remove,
};
module_spi_driver(rt5677_spi_driver);
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
index 6ba3369dc235..3af36ec928e9 100644
--- a/sound/soc/codecs/rt5677-spi.h
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -12,5 +12,6 @@
int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
+void rt5677_spi_hotword_detected(void);
#endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 315a3d39bc09..e9a051a50ab2 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -38,6 +38,10 @@
#define RT5677_DEVICE_ID 0x6327
+/* Register controlling boot vector */
+#define RT5677_DSP_BOOT_VECTOR 0x1801f090
+#define RT5677_MODEL_ADDR 0x5FFC9800
+
#define RT5677_PR_RANGE_BASE (0xff + 1)
#define RT5677_PR_SPACING 0x100
@@ -298,6 +302,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
case RT5677_I2C_MASTER_CTRL7:
case RT5677_I2C_MASTER_CTRL8:
case RT5677_HAP_GENE_CTRL2:
+ case RT5677_PWR_ANLG2: /* Modified by DSP firmware */
case RT5677_PWR_DSP_ST:
case RT5677_PRIV_DATA:
case RT5677_ASRC_22:
@@ -308,6 +313,8 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
case RT5677_IRQ_CTRL1:
case RT5677_IRQ_CTRL2:
case RT5677_GPIO_ST:
+ case RT5677_GPIO_CTRL1: /* Modified by DSP firmware */
+ case RT5677_GPIO_CTRL2: /* Modified by DSP firmware */
case RT5677_DSP_INB1_SRC_CTRL4:
case RT5677_DSP_INB2_SRC_CTRL4:
case RT5677_DSP_INB3_SRC_CTRL4:
@@ -686,10 +693,8 @@ static int rt5677_dsp_mode_i2c_read(
return ret;
}
-static void rt5677_set_dsp_mode(struct snd_soc_component *component, bool on)
+static void rt5677_set_dsp_mode(struct rt5677_priv *rt5677, bool on)
{
- struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
-
if (on) {
regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1,
RT5677_PWR_DSP, RT5677_PWR_DSP);
@@ -701,86 +706,259 @@ static void rt5677_set_dsp_mode(struct snd_soc_component *component, bool on)
}
}
+static unsigned int rt5677_set_vad_source(struct rt5677_priv *rt5677)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(rt5677->component);
+ /* Force dapm to sync before we enable the
+ * DSP to prevent write corruption
+ */
+ snd_soc_dapm_sync(dapm);
+
+ /* DMIC1 power = enabled
+ * DMIC CLK = 256 * fs / 12
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_DMIC_CTRL1,
+ RT5677_DMIC_CLK_MASK, 5 << RT5677_DMIC_CLK_SFT);
+
+ /* I2S pre divide 2 = /6 (clk_sys2) */
+ regmap_update_bits(rt5677->regmap, RT5677_CLK_TREE_CTRL1,
+ RT5677_I2S_PD2_MASK, RT5677_I2S_PD2_6);
+
+ /* DSP Clock = MCLK1 (bypassed PLL2) */
+ regmap_write(rt5677->regmap, RT5677_GLB_CLK2,
+ RT5677_DSP_CLK_SRC_BYPASS);
+
+ /* SAD Threshold1 */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL2, 0x013f);
+ /* SAD Threshold2 */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL3, 0x0ae5);
+ /* SAD Sample Rate Converter = Up 6 (8K to 48K)
+ * SAD Output Sample Rate = Same as I2S
+ * SAD Threshold3
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_VAD_CTRL4,
+ RT5677_VAD_OUT_SRC_RATE_MASK | RT5677_VAD_OUT_SRC_MASK |
+ RT5677_VAD_LV_DIFF_MASK, 0x7f << RT5677_VAD_LV_DIFF_SFT);
+ /* Minimum frame level within a pre-determined duration = 32 frames
+ * Bypass ADPCM Encoder/Decoder = Bypass ADPCM
+ * Automatic Push Data to SAD Buffer Once SAD Flag is triggered = enable
+ * SAD Buffer Over-Writing = enable
+ * SAD Buffer Pop Mode Control = disable
+ * SAD Buffer Push Mode Control = enable
+ * SAD Detector Control = enable
+ * SAD Function Control = enable
+ * SAD Function Reset = normal
+ */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL1,
+ RT5677_VAD_FUNC_RESET | RT5677_VAD_FUNC_ENABLE |
+ RT5677_VAD_DET_ENABLE | RT5677_VAD_BUF_PUSH |
+ RT5677_VAD_BUF_OW | RT5677_VAD_FG2ENC |
+ RT5677_VAD_ADPCM_BYPASS | 1 << RT5677_VAD_MIN_DUR_SFT);
+
+ /* VAD/SAD is not routed to the IRQ output (i.e. MX-BE[14] = 0), but it
+ * is routed to DSP_IRQ_0, so DSP firmware may use it to sleep and save
+ * power. See ALC5677 datasheet section 9.17 "GPIO, Interrupt and Jack
+ * Detection" for more info.
+ */
+
+ /* Private register, no doc */
+ regmap_update_bits(rt5677->regmap, RT5677_PR_BASE + RT5677_BIAS_CUR4,
+ 0x0f00, 0x0100);
+
+ /* LDO2 output = 1.2V
+ * LDO1 output = 1.2V (LDO_IN = 1.8V)
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
+ RT5677_LDO1_SEL_MASK | RT5677_LDO2_SEL_MASK,
+ 5 << RT5677_LDO1_SEL_SFT | 5 << RT5677_LDO2_SEL_SFT);
+
+ /* Codec core power = power on
+ * LDO1 power = power on
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
+ RT5677_PWR_CORE | RT5677_PWR_LDO1,
+ RT5677_PWR_CORE | RT5677_PWR_LDO1);
+
+ /* Isolation for DCVDD4 = normal (set during probe)
+ * Isolation for DCVDD2 = normal (set during probe)
+ * Isolation for DSP = normal
+ * Isolation for Band 0~7 = disable
+ * Isolation for InBound 4~10 and OutBound 4~10 = disable
+ */
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP2,
+ RT5677_PWR_CORE_ISO | RT5677_PWR_DSP_ISO |
+ RT5677_PWR_SR7_ISO | RT5677_PWR_SR6_ISO |
+ RT5677_PWR_SR5_ISO | RT5677_PWR_SR4_ISO |
+ RT5677_PWR_SR3_ISO | RT5677_PWR_SR2_ISO |
+ RT5677_PWR_SR1_ISO | RT5677_PWR_SR0_ISO |
+ RT5677_PWR_MLT_ISO);
+
+ /* System Band 0~7 = power on
+ * InBound 4~10 and OutBound 4~10 = power on
+ * DSP = power on
+ * DSP CPU = stop (will be set to "run" after firmware loaded)
+ */
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP1,
+ RT5677_PWR_SR7 | RT5677_PWR_SR6 |
+ RT5677_PWR_SR5 | RT5677_PWR_SR4 |
+ RT5677_PWR_SR3 | RT5677_PWR_SR2 |
+ RT5677_PWR_SR1 | RT5677_PWR_SR0 |
+ RT5677_PWR_MLT | RT5677_PWR_DSP |
+ RT5677_PWR_DSP_CPU);
+
+ return 0;
+}
+
+static int rt5677_parse_and_load_dsp(struct rt5677_priv *rt5677, const u8 *buf,
+ unsigned int len)
+{
+ struct snd_soc_component *component = rt5677->component;
+ Elf32_Ehdr *elf_hdr;
+ Elf32_Phdr *pr_hdr;
+ Elf32_Half i;
+ int ret = 0;
+
+ if (!buf || (len < sizeof(Elf32_Ehdr)))
+ return -ENOMEM;
+
+ elf_hdr = (Elf32_Ehdr *)buf;
+#ifndef EM_XTENSA
+#define EM_XTENSA 94
+#endif
+ if (strncmp(elf_hdr->e_ident, ELFMAG, sizeof(ELFMAG) - 1))
+ dev_err(component->dev, "Wrong ELF header prefix\n");
+ if (elf_hdr->e_ehsize != sizeof(Elf32_Ehdr))
+ dev_err(component->dev, "Wrong Elf header size\n");
+ if (elf_hdr->e_machine != EM_XTENSA)
+ dev_err(component->dev, "Wrong DSP code file\n");
+
+ if (len < elf_hdr->e_phoff)
+ return -ENOMEM;
+ pr_hdr = (Elf32_Phdr *)(buf + elf_hdr->e_phoff);
+ for (i = 0; i < elf_hdr->e_phnum; i++) {
+ /* TODO: handle p_memsz != p_filesz */
+ if (pr_hdr->p_paddr && pr_hdr->p_filesz) {
+ dev_info(component->dev, "Load 0x%x bytes to 0x%x\n",
+ pr_hdr->p_filesz, pr_hdr->p_paddr);
+
+ ret = rt5677_spi_write(pr_hdr->p_paddr,
+ buf + pr_hdr->p_offset,
+ pr_hdr->p_filesz);
+ if (ret)
+ dev_err(component->dev, "Load firmware failed %d\n",
+ ret);
+ }
+ pr_hdr++;
+ }
+ return ret;
+}
+
+static int rt5677_load_dsp_from_file(struct rt5677_priv *rt5677)
+{
+ const struct firmware *fwp;
+ struct device *dev = rt5677->component->dev;
+ int ret = 0;
+
+ /* Load dsp firmware from rt5677_elf_vad file */
+ ret = request_firmware(&fwp, "rt5677_elf_vad", dev);
+ if (ret) {
+ dev_err(dev, "Request rt5677_elf_vad failed %d\n", ret);
+ return ret;
+ }
+ dev_info(dev, "Requested rt5677_elf_vad (%zu)\n", fwp->size);
+
+ ret = rt5677_parse_and_load_dsp(rt5677, fwp->data, fwp->size);
+ release_firmware(fwp);
+ return ret;
+}
+
static int rt5677_set_dsp_vad(struct snd_soc_component *component, bool on)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
- static bool activity;
- int ret;
+ rt5677->dsp_vad_en_request = on;
+ rt5677->dsp_vad_en = on;
if (!IS_ENABLED(CONFIG_SND_SOC_RT5677_SPI))
return -ENXIO;
- if (on && !activity) {
+ schedule_delayed_work(&rt5677->dsp_work, 0);
+ return 0;
+}
+
+static void rt5677_dsp_work(struct work_struct *work)
+{
+ struct rt5677_priv *rt5677 =
+ container_of(work, struct rt5677_priv, dsp_work.work);
+ static bool activity;
+ bool enable = rt5677->dsp_vad_en;
+ int i, val;
+
+
+ dev_info(rt5677->component->dev, "DSP VAD: enable=%d, activity=%d\n",
+ enable, activity);
+
+ if (enable && !activity) {
activity = true;
- regcache_cache_only(rt5677->regmap, false);
- regcache_cache_bypass(rt5677->regmap, true);
+ /* Before a hotword is detected, GPIO1 pin is configured as IRQ
+ * output so that jack detect works. When a hotword is detected,
+ * the DSP firmware configures the GPIO1 pin as GPIO1 and
+ * drives a 1. rt5677_irq() is called after a rising edge on
+ * the GPIO1 pin, due to either jack detect event or hotword
+ * event, or both. All possible events are checked and handled
+ * in rt5677_irq() where GPIO1 pin is configured back to IRQ
+ * output if a hotword is detected.
+ */
- regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x1, 0x1);
- regmap_update_bits(rt5677->regmap,
- RT5677_PR_BASE + RT5677_BIAS_CUR4, 0x0f00, 0x0f00);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
- RT5677_LDO1_SEL_MASK, 0x0);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
- RT5677_PWR_LDO1, RT5677_PWR_LDO1);
- switch (rt5677->type) {
- case RT5677:
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK1,
- RT5677_MCLK_SRC_MASK, RT5677_MCLK2_SRC);
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
- RT5677_PLL2_PR_SRC_MASK |
- RT5677_DSP_CLK_SRC_MASK,
- RT5677_PLL2_PR_SRC_MCLK2 |
- RT5677_DSP_CLK_SRC_BYPASS);
- break;
- case RT5676:
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
- RT5677_DSP_CLK_SRC_MASK,
- RT5677_DSP_CLK_SRC_BYPASS);
- break;
- default:
- break;
+ rt5677_set_vad_source(rt5677);
+ rt5677_set_dsp_mode(rt5677, true);
+
+#define RT5677_BOOT_RETRY 20
+ for (i = 0; i < RT5677_BOOT_RETRY; i++) {
+ regmap_read(rt5677->regmap, RT5677_PWR_DSP_ST, &val);
+ if (val == 0x3ff)
+ break;
+ udelay(500);
}
- regmap_write(rt5677->regmap, RT5677_PWR_DSP2, 0x07ff);
- regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x07fd);
- rt5677_set_dsp_mode(component, true);
-
- ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
- component->dev);
- if (ret == 0) {
- rt5677_spi_write_firmware(0x50000000, rt5677->fw1);
- release_firmware(rt5677->fw1);
+ if (i == RT5677_BOOT_RETRY && val != 0x3ff) {
+ dev_err(rt5677->component->dev, "DSP Boot Timed Out!");
+ return;
}
- ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
- component->dev);
- if (ret == 0) {
- rt5677_spi_write_firmware(0x60000000, rt5677->fw2);
- release_firmware(rt5677->fw2);
- }
+ /* Boot the firmware from IRAM instead of SRAM0. */
+ rt5677_dsp_mode_i2c_write_addr(rt5677, RT5677_DSP_BOOT_VECTOR,
+ 0x0009, 0x0003);
+ rt5677_dsp_mode_i2c_write_addr(rt5677, RT5677_DSP_BOOT_VECTOR,
+ 0x0019, 0x0003);
+ rt5677_dsp_mode_i2c_write_addr(rt5677, RT5677_DSP_BOOT_VECTOR,
+ 0x0009, 0x0003);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x1, 0x0);
+ rt5677_load_dsp_from_file(rt5677);
- regcache_cache_bypass(rt5677->regmap, false);
- regcache_cache_only(rt5677->regmap, true);
- } else if (!on && activity) {
+ /* Set DSP CPU to Run */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1,
+ RT5677_PWR_DSP_CPU, 0x0);
+ } else if (!enable && activity) {
activity = false;
- regcache_cache_only(rt5677->regmap, false);
- regcache_cache_bypass(rt5677->regmap, true);
+ /* Don't turn off the DSP while handling irqs */
+ mutex_lock(&rt5677->irq_lock);
+ /* Set DSP CPU to Stop */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1,
+ RT5677_PWR_DSP_CPU, RT5677_PWR_DSP_CPU);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x1, 0x1);
- rt5677_set_dsp_mode(component, false);
- regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x0001);
+ rt5677_set_dsp_mode(rt5677, false);
- regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
+ /* Disable and clear VAD interrupt */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL1, 0x2184);
- regcache_cache_bypass(rt5677->regmap, false);
- regcache_mark_dirty(rt5677->regmap);
- regcache_sync(rt5677->regmap);
- }
+ /* Set GPIO1 pin back to be IRQ output for jack detect */
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL1,
+ RT5677_GPIO1_PIN_MASK, RT5677_GPIO1_PIN_IRQ);
- return 0;
+ mutex_unlock(&rt5677->irq_lock);
+ }
}
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
@@ -805,7 +983,7 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
- ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
+ ucontrol->value.integer.value[0] = rt5677->dsp_vad_en_request;
return 0;
}
@@ -814,12 +992,8 @@ static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
-
- rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
- rt5677_set_dsp_vad(component, rt5677->dsp_vad_en);
+ rt5677_set_dsp_vad(component, !!ucontrol->value.integer.value[0]);
return 0;
}
@@ -3010,6 +3184,7 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("AIF4TX", "AIF4 Capture", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SLBRX", "SLIMBus Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLBTX", "SLIMBus Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DSPTX", "DSP Buffer", 0, SND_SOC_NOPM, 0, 0),
/* Sidetone Mux */
SND_SOC_DAPM_MUX("Sidetone Mux", SND_SOC_NOPM, 0, 0,
@@ -3544,11 +3719,24 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "SLBTX", NULL, "SLB ADC3 Mux" },
{ "SLBTX", NULL, "SLB ADC4 Mux" },
+ { "DSPTX", NULL, "IB01 Bypass Mux" },
+
{ "IB01 Mux", "IF1 DAC 01", "IF1 DAC01" },
{ "IB01 Mux", "IF2 DAC 01", "IF2 DAC01" },
{ "IB01 Mux", "SLB DAC 01", "SLB DAC01" },
{ "IB01 Mux", "STO1 ADC MIX", "Stereo1 ADC MIX" },
- { "IB01 Mux", "VAD ADC/DAC1 FS", "DAC1 FS" },
+ /* The IB01 Mux controls the source for InBound0 and InBound1.
+ * When the mux option "VAD ADC/DAC1 FS" is selected, "VAD ADC" goes to
+ * InBound0 and "DAC1 FS" goes to InBound1. "VAD ADC" is used for
+ * hotwording. "DAC1 FS" is not used currently.
+ *
+ * Creating a common widget node for "VAD ADC" + "DAC1 FS" and
+ * connecting the common widget to IB01 Mux causes the issue where
+ * there is an active path going from system playback -> "DAC1 FS" ->
+ * IB01 Mux -> DSP Buffer -> hotword stream. This wrong path confuses
+ * DAPM. Therefore "DAC1 FS" is ignored for now.
+ */
+ { "IB01 Mux", "VAD ADC/DAC1 FS", "VAD ADC Mux" },
{ "IB01 Bypass Mux", "Bypass", "IB01 Mux" },
{ "IB01 Bypass Mux", "Pass SRC", "IB01 Mux" },
@@ -4457,14 +4645,15 @@ static int rt5677_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ enum snd_soc_bias_level prev_bias =
+ snd_soc_component_get_bias_level(component);
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_STANDBY) {
- rt5677_set_dsp_vad(component, false);
+ if (prev_bias == SND_SOC_BIAS_STANDBY) {
regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
RT5677_LDO1_SEL_MASK | RT5677_LDO2_SEL_MASK,
@@ -4488,9 +4677,25 @@ static int rt5677_set_bias_level(struct snd_soc_component *component,
break;
case SND_SOC_BIAS_STANDBY:
+ if (prev_bias == SND_SOC_BIAS_OFF &&
+ rt5677->dsp_vad_en_request) {
+ /* Re-enable the DSP if it was turned off at suspend */
+ rt5677->dsp_vad_en = true;
+ /* The delay is to wait for MCLK */
+ schedule_delayed_work(&rt5677->dsp_work,
+ msecs_to_jiffies(1000));
+ }
break;
case SND_SOC_BIAS_OFF:
+ flush_delayed_work(&rt5677->dsp_work);
+ if (rt5677->is_dsp_mode) {
+ /* Turn off the DSP before suspend */
+ rt5677->dsp_vad_en = false;
+ schedule_delayed_work(&rt5677->dsp_work, 0);
+ flush_delayed_work(&rt5677->dsp_work);
+ }
+
regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x1, 0x0);
regmap_write(rt5677->regmap, RT5677_PWR_DIG1, 0x0000);
regmap_write(rt5677->regmap, RT5677_PWR_ANLG1,
@@ -4740,6 +4945,8 @@ static void rt5677_remove(struct snd_soc_component *component)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ cancel_delayed_work_sync(&rt5677->dsp_work);
+
regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
gpiod_set_value_cansleep(rt5677->reset_pin, 1);
@@ -4750,6 +4957,11 @@ static int rt5677_suspend(struct snd_soc_component *component)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ if (rt5677->irq) {
+ cancel_delayed_work_sync(&rt5677->resume_irq_check);
+ disable_irq(rt5677->irq);
+ }
+
if (!rt5677->dsp_vad_en) {
regcache_cache_only(rt5677->regmap, true);
regcache_mark_dirty(rt5677->regmap);
@@ -4778,6 +4990,11 @@ static int rt5677_resume(struct snd_soc_component *component)
regcache_sync(rt5677->regmap);
}
+ if (rt5677->irq) {
+ enable_irq(rt5677->irq);
+ schedule_delayed_work(&rt5677->resume_irq_check, 0);
+ }
+
return 0;
}
#else
@@ -4842,6 +5059,11 @@ static const struct snd_soc_dai_ops rt5677_aif_dai_ops = {
.set_tdm_slot = rt5677_set_tdm_slot,
};
+static const struct snd_soc_dai_ops rt5677_dsp_dai_ops = {
+ .set_sysclk = rt5677_set_dai_sysclk,
+ .set_pll = rt5677_set_dai_pll,
+};
+
static struct snd_soc_dai_driver rt5677_dai[] = {
{
.name = "rt5677-aif1",
@@ -4938,6 +5160,18 @@ static struct snd_soc_dai_driver rt5677_dai[] = {
},
.ops = &rt5677_aif_dai_ops,
},
+ {
+ .name = "rt5677-dspbuffer",
+ .id = RT5677_DSPBUFF,
+ .capture = {
+ .stream_name = "DSP Buffer",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &rt5677_dsp_dai_ops,
+ },
};
static const struct snd_soc_component_driver soc_component_dev_rt5677 = {
@@ -5073,6 +5307,28 @@ static const struct rt5677_irq_desc rt5677_irq_descs[] = {
},
};
+static bool rt5677_check_hotword(struct rt5677_priv *rt5677)
+{
+ int reg_gpio;
+
+ if (!rt5677->is_dsp_mode)
+ return false;
+
+ if (regmap_read(rt5677->regmap, RT5677_GPIO_CTRL1, &reg_gpio))
+ return false;
+
+ /* Firmware sets GPIO1 pin to be GPIO1 after hotword is detected */
+ if ((reg_gpio & RT5677_GPIO1_PIN_MASK) == RT5677_GPIO1_PIN_IRQ)
+ return false;
+
+ /* Set GPIO1 pin back to be IRQ output for jack detect */
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL1,
+ RT5677_GPIO1_PIN_MASK, RT5677_GPIO1_PIN_IRQ);
+
+ rt5677_spi_hotword_detected();
+ return true;
+}
+
static irqreturn_t rt5677_irq(int unused, void *data)
{
struct rt5677_priv *rt5677 = data;
@@ -5118,7 +5374,13 @@ static irqreturn_t rt5677_irq(int unused, void *data)
reg_irq ^= rt5677_irq_descs[i].polarity_mask;
}
}
- if (!irq_fired)
+
+ /* Exit the loop only when we know for sure that GPIO1 pin
+ * was low at some point since irq_lock was acquired. Any event
+ * after that point creates a rising edge that triggers another
+ * call to rt5677_irq().
+ */
+ if (!irq_fired && !rt5677_check_hotword(rt5677))
goto exit;
ret = regmap_write(rt5677->regmap, RT5677_IRQ_CTRL1, reg_irq);
@@ -5129,6 +5391,7 @@ static irqreturn_t rt5677_irq(int unused, void *data)
}
}
exit:
+ WARN_ON_ONCE(loop == 20);
mutex_unlock(&rt5677->irq_lock);
if (irq_fired)
return IRQ_HANDLED;
@@ -5136,6 +5399,39 @@ exit:
return IRQ_NONE;
}
+static void rt5677_resume_irq_check(struct work_struct *work)
+{
+ int i, virq;
+ struct rt5677_priv *rt5677 =
+ container_of(work, struct rt5677_priv, resume_irq_check.work);
+
+ /* This is needed to check and clear the interrupt status register
+ * at resume. If the headset is plugged/unplugged when the device is
+ * fully suspended, there won't be a rising edge at resume to trigger
+ * the interrupt. Without this, we miss the next unplug/plug event.
+ */
+ rt5677_irq(0, rt5677);
+
+ /* Call all enabled jack detect irq handlers again. This is needed in
+ * addition to the above check for a corner case caused by jack gpio
+ * debounce. After codec irq is disabled at suspend, the delayed work
+ * scheduled by soc-jack may run and read wrong jack gpio values, since
+ * the regmap is in cache only mode. At resume, there is no irq because
+ * rt5677_irq has already ran and cleared the irq status at suspend.
+ * Without this explicit check, unplug the headset right after suspend
+ * starts, then after resume the headset is still shown as plugged in.
+ */
+ mutex_lock(&rt5677->irq_lock);
+ for (i = 0; i < RT5677_IRQ_NUM; i++) {
+ if (rt5677->irq_en & rt5677_irq_descs[i].enable_mask) {
+ virq = irq_find_mapping(rt5677->domain, i);
+ if (virq)
+ handle_nested_irq(virq);
+ }
+ }
+ mutex_unlock(&rt5677->irq_lock);
+}
+
static void rt5677_irq_bus_lock(struct irq_data *data)
{
struct rt5677_priv *rt5677 = irq_data_get_irq_chip_data(data);
@@ -5211,6 +5507,7 @@ static int rt5677_init_irq(struct i2c_client *i2c)
}
mutex_init(&rt5677->irq_lock);
+ INIT_DELAYED_WORK(&rt5677->resume_irq_check, rt5677_resume_irq_check);
/*
* Select RC as the debounce clock so that GPIO works even when
@@ -5256,6 +5553,8 @@ static int rt5677_init_irq(struct i2c_client *i2c)
if (ret)
dev_err(&i2c->dev, "Failed to request IRQ: %d\n", ret);
+ rt5677->irq = i2c->irq;
+
return ret;
}
@@ -5271,6 +5570,8 @@ static int rt5677_i2c_probe(struct i2c_client *i2c)
return -ENOMEM;
rt5677->dev = &i2c->dev;
+ rt5677->set_dsp_vad = rt5677_set_dsp_vad;
+ INIT_DELAYED_WORK(&rt5677->dsp_work, rt5677_dsp_work);
i2c_set_clientdata(i2c, rt5677);
if (i2c->dev.of_node) {
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 213f4b8ca269..944ae02aafc2 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -1336,6 +1336,8 @@
#define RT5677_PLL_M_SFT 12
#define RT5677_PLL_M_BP (0x1 << 11)
#define RT5677_PLL_M_BP_SFT 11
+#define RT5677_PLL_UPDATE_PLL1 (0x1 << 1)
+#define RT5677_PLL_UPDATE_PLL1_SFT 1
/* Global Clock Control 1 (0x80) */
#define RT5677_SCLK_SRC_MASK (0x3 << 14)
@@ -1730,6 +1732,7 @@ enum {
RT5677_AIF4,
RT5677_AIF5,
RT5677_AIFS,
+ RT5677_DSPBUFF,
};
enum {
@@ -1845,14 +1848,20 @@ struct rt5677_priv {
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio_chip;
#endif
- bool dsp_vad_en;
+ bool dsp_vad_en_request; /* DSP VAD enable/disable request */
+ bool dsp_vad_en; /* dsp_work parameter */
bool is_dsp_mode;
bool is_vref_slow;
+ struct delayed_work dsp_work;
/* Interrupt handling */
struct irq_domain *domain;
struct mutex irq_lock;
unsigned int irq_en;
+ struct delayed_work resume_irq_check;
+ int irq;
+
+ int (*set_dsp_vad)(struct snd_soc_component *component, bool on);
};
int rt5677_sel_asrc_clk_src(struct snd_soc_component *component,
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index c50b75ce82e0..b1713fffa3eb 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -44,6 +44,7 @@ static const struct rt5682_platform_data i2s_default_platform_data = {
.dmic1_data_pin = RT5682_DMIC1_DATA_GPIO2,
.dmic1_clk_pin = RT5682_DMIC1_CLK_GPIO3,
.jd_src = RT5682_JD1,
+ .btndet_delay = 16,
};
struct rt5682_priv {
@@ -1002,6 +1003,7 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ cancel_delayed_work_sync(&rt5682->jack_detect_work);
return 0;
}
@@ -1026,6 +1028,18 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
RT5682_JD1_EN_MASK | RT5682_JD1_POL_MASK,
RT5682_JD1_EN | RT5682_JD1_POL_NOR);
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_4,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_5,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_6,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_7,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
mod_delayed_work(system_power_efficient_wq,
&rt5682->jack_detect_work, msecs_to_jiffies(250));
break;
@@ -1450,28 +1464,6 @@ static const struct snd_kcontrol_new hpor_switch =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
RT5682_R_MUTE_SFT, 1, 1);
-static int rt5682_charge_pump_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_to_component(w->dapm);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_component_update_bits(component,
- RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_component_update_bits(component,
- RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_LV);
- break;
- default:
- return 0;
- }
-
- return 0;
-}
-
static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1755,8 +1747,7 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("HP Amp R", RT5682_PWR_ANLG_1,
RT5682_PWR_HA_R_BIT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("Charge Pump", 1, RT5682_DEPOP_1,
- RT5682_PUMP_EN_SFT, 0, rt5682_charge_pump_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ RT5682_PUMP_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("Capless", 2, RT5682_DEPOP_1,
RT5682_CAPLESS_EN_SFT, 0, NULL, 0),
@@ -2467,6 +2458,8 @@ static int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
&rt5682->pdata.dmic1_clk_pin);
device_property_read_u32(dev, "realtek,jd-src",
&rt5682->pdata.jd_src);
+ device_property_read_u32(dev, "realtek,btndet-delay",
+ &rt5682->pdata.btndet_delay);
rt5682->pdata.ldo1_en = of_get_named_gpio(dev->of_node,
"realtek,ldo1-en-gpios", 0);
@@ -2654,6 +2647,8 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
+ regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
INIT_DELAYED_WORK(&rt5682->jack_detect_work,
rt5682_jack_detect_handler);
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
new file mode 100644
index 000000000000..729acd874c48
--- /dev/null
+++ b/sound/soc/codecs/tas2562.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the Texas Instruments TAS2562 CODEC
+// Copyright (C) 2019 Texas Instruments Inc.
+
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas2562.h"
+
+#define TAS2562_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FORMAT_S32_LE)
+
+struct tas2562_data {
+ struct snd_soc_component *component;
+ struct gpio_desc *sdz_gpio;
+ struct regmap *regmap;
+ struct device *dev;
+ struct i2c_client *client;
+ int v_sense_slot;
+ int i_sense_slot;
+};
+
+static int tas2562_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct tas2562_data *tas2562 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_ACTIVE);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_MUTE);
+ break;
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_SHUTDOWN);
+ break;
+
+ default:
+ dev_err(tas2562->dev,
+ "wrong power level setting %d\n", level);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tas2562_set_samplerate(struct tas2562_data *tas2562, int samplerate)
+{
+ int samp_rate;
+ int ramp_rate;
+
+ switch (samplerate) {
+ case 7350:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ;
+ break;
+ case 8000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ;
+ break;
+ case 14700:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ;
+ break;
+ case 16000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ;
+ break;
+ case 22050:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ;
+ break;
+ case 24000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ;
+ break;
+ case 29400:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ;
+ break;
+ case 32000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ;
+ break;
+ case 44100:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ;
+ break;
+ case 48000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ;
+ break;
+ case 88200:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ;
+ break;
+ case 96000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ;
+ break;
+ case 176400:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ;
+ break;
+ case 192000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ;
+ break;
+ default:
+ dev_info(tas2562->dev, "%s, unsupported sample rate, %d\n",
+ __func__, samplerate);
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(tas2562->component, TAS2562_TDM_CFG0,
+ TAS2562_TDM_CFG0_RAMPRATE_MASK, ramp_rate);
+ snd_soc_component_update_bits(tas2562->component, TAS2562_TDM_CFG0,
+ TAS2562_TDM_CFG0_SAMPRATE_MASK, samp_rate);
+
+ return 0;
+}
+
+static int tas2562_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ switch (slot_width) {
+ case 16:
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXLEN_MASK,
+ TAS2562_TDM_CFG2_RXLEN_16B);
+ break;
+ case 24:
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXLEN_MASK,
+ TAS2562_TDM_CFG2_RXLEN_24B);
+ break;
+ case 32:
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXLEN_MASK,
+ TAS2562_TDM_CFG2_RXLEN_32B);
+ break;
+
+ case 0:
+ /* Do not change slot width */
+ break;
+ default:
+ dev_err(tas2562->dev, "slot width not supported");
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_set_bitwidth(struct tas2562_data *tas2562, int bitwidth)
+{
+ int ret;
+
+ switch (bitwidth) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXWLEN_MASK,
+ TAS2562_TDM_CFG2_RXWLEN_16B);
+ tas2562->v_sense_slot = tas2562->i_sense_slot + 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXWLEN_MASK,
+ TAS2562_TDM_CFG2_RXWLEN_24B);
+ tas2562->v_sense_slot = tas2562->i_sense_slot + 4;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXWLEN_MASK,
+ TAS2562_TDM_CFG2_RXWLEN_32B);
+ tas2562->v_sense_slot = tas2562->i_sense_slot + 4;
+ break;
+
+ default:
+ dev_info(tas2562->dev, "Not supported params format\n");
+ }
+
+ ret = snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG5,
+ TAS2562_TDM_CFG5_VSNS_EN | TAS2562_TDM_CFG5_VSNS_SLOT_MASK,
+ TAS2562_TDM_CFG5_VSNS_EN | tas2562->v_sense_slot);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG6,
+ TAS2562_TDM_CFG6_ISNS_EN | TAS2562_TDM_CFG6_ISNS_SLOT_MASK,
+ TAS2562_TDM_CFG6_ISNS_EN | tas2562->i_sense_slot);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret;
+
+ ret = tas2562_set_bitwidth(tas2562, params_format(params));
+ if (ret) {
+ dev_err(tas2562->dev, "set bitwidth failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = tas2562_set_samplerate(tas2562, params_rate(params));
+ if (ret)
+ dev_err(tas2562->dev, "set bitwidth failed, %d\n", ret);
+
+ return ret;
+}
+
+static int tas2562_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+ int ret;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ asi_cfg_1 = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ asi_cfg_1 |= TAS2562_TDM_CFG1_RX_FALLING;
+ break;
+ default:
+ dev_err(tas2562->dev, "ASI format Inverse is not found\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2562_TDM_CFG1,
+ TAS2562_TDM_CFG1_RX_EDGE_MASK,
+ asi_cfg_1);
+ if (ret < 0) {
+ dev_err(tas2562->dev, "Failed to set RX edge\n");
+ return ret;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case (SND_SOC_DAIFMT_I2S):
+ case (SND_SOC_DAIFMT_DSP_A):
+ case (SND_SOC_DAIFMT_DSP_B):
+ tdm_rx_start_slot = BIT(1);
+ break;
+ case (SND_SOC_DAIFMT_LEFT_J):
+ tdm_rx_start_slot = 0;
+ break;
+ default:
+ dev_err(tas2562->dev, "DAI Format is not found, fmt=0x%x\n",
+ fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2562_TDM_CFG1,
+ TAS2562_TDM_CFG1_RX_OFFSET_MASK,
+ tdm_rx_start_slot);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+
+ return snd_soc_component_update_bits(component, TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK,
+ mute ? TAS2562_MUTE : 0);
+}
+
+static int tas2562_codec_probe(struct snd_soc_component *component)
+{
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret;
+
+ tas2562->component = component;
+
+ if (tas2562->sdz_gpio)
+ gpiod_set_value_cansleep(tas2562->sdz_gpio, 1);
+
+ ret = snd_soc_component_update_bits(component, TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_MUTE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tas2562_suspend(struct snd_soc_component *component)
+{
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ regcache_cache_only(tas2562->regmap, true);
+ regcache_mark_dirty(tas2562->regmap);
+
+ if (tas2562->sdz_gpio)
+ gpiod_set_value_cansleep(tas2562->sdz_gpio, 0);
+
+ return 0;
+}
+
+static int tas2562_resume(struct snd_soc_component *component)
+{
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ if (tas2562->sdz_gpio)
+ gpiod_set_value_cansleep(tas2562->sdz_gpio, 1);
+
+ regcache_cache_only(tas2562->regmap, false);
+
+ return regcache_sync(tas2562->regmap);
+}
+#else
+#define tas2562_suspend NULL
+#define tas2562_resume NULL
+#endif
+
+static const char * const tas2562_ASI1_src[] = {
+ "I2C offset", "Left", "Right", "LeftRightDiv2",
+};
+
+static SOC_ENUM_SINGLE_DECL(tas2562_ASI1_src_enum, TAS2562_TDM_CFG2, 4,
+ tas2562_ASI1_src);
+
+static const struct snd_kcontrol_new tas2562_asi1_mux =
+ SOC_DAPM_ENUM("ASI1 Source", tas2562_ASI1_src_enum);
+
+static int tas2562_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ dev_info(tas2562->dev, "SND_SOC_DAPM_POST_PMU\n");
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ dev_info(tas2562->dev, "SND_SOC_DAPM_PRE_PMD\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static DECLARE_TLV_DB_SCALE(tas2562_dac_tlv, 850, 50, 0);
+
+static const struct snd_kcontrol_new isense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2562_PWR_CTRL, TAS2562_ISENSE_POWER_EN,
+ 1, 1);
+
+static const struct snd_kcontrol_new vsense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2562_PWR_CTRL, TAS2562_VSENSE_POWER_EN,
+ 1, 1);
+
+static const struct snd_kcontrol_new tas2562_snd_controls[] = {
+ SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 0, 0x1c, 0,
+ tas2562_dac_tlv),
+};
+
+static const struct snd_soc_dapm_widget tas2562_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2562_asi1_mux),
+ SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2562_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SWITCH("ISENSE", TAS2562_PWR_CTRL, 3, 1, &isense_switch),
+ SND_SOC_DAPM_SWITCH("VSENSE", TAS2562_PWR_CTRL, 2, 1, &vsense_switch),
+ SND_SOC_DAPM_SIGGEN("VMON"),
+ SND_SOC_DAPM_SIGGEN("IMON"),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+};
+
+static const struct snd_soc_dapm_route tas2562_audio_map[] = {
+ {"ASI1 Sel", "I2C offset", "ASI1"},
+ {"ASI1 Sel", "Left", "ASI1"},
+ {"ASI1 Sel", "Right", "ASI1"},
+ {"ASI1 Sel", "LeftRightDiv2", "ASI1"},
+ { "DAC", NULL, "DAC IN" },
+ { "OUT", NULL, "DAC" },
+ {"ISENSE", "Switch", "IMON"},
+ {"VSENSE", "Switch", "VMON"},
+};
+
+static const struct snd_soc_component_driver soc_component_dev_tas2562 = {
+ .probe = tas2562_codec_probe,
+ .suspend = tas2562_suspend,
+ .resume = tas2562_resume,
+ .set_bias_level = tas2562_set_bias_level,
+ .controls = tas2562_snd_controls,
+ .num_controls = ARRAY_SIZE(tas2562_snd_controls),
+ .dapm_widgets = tas2562_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2562_dapm_widgets),
+ .dapm_routes = tas2562_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2562_audio_map),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct snd_soc_dai_ops tas2562_speaker_dai_ops = {
+ .hw_params = tas2562_hw_params,
+ .set_fmt = tas2562_set_dai_fmt,
+ .set_tdm_slot = tas2562_set_dai_tdm_slot,
+ .digital_mute = tas2562_mute,
+};
+
+static struct snd_soc_dai_driver tas2562_dai[] = {
+ {
+ .name = "tas2562-amplifier",
+ .id = 0,
+ .playback = {
+ .stream_name = "ASI1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = TAS2562_FORMATS,
+ },
+ .ops = &tas2562_speaker_dai_ops,
+ },
+};
+
+static const struct regmap_range_cfg tas2562_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 5 * 128,
+ .selector_reg = TAS2562_PAGE_CTRL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 128,
+ },
+};
+
+static const struct reg_default tas2562_reg_defaults[] = {
+ { TAS2562_PAGE_CTRL, 0x00 },
+ { TAS2562_SW_RESET, 0x00 },
+ { TAS2562_PWR_CTRL, 0x0e },
+ { TAS2562_PB_CFG1, 0x20 },
+ { TAS2562_TDM_CFG0, 0x09 },
+ { TAS2562_TDM_CFG1, 0x02 },
+};
+
+static const struct regmap_config tas2562_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 5 * 128,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = tas2562_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas2562_reg_defaults),
+ .ranges = tas2562_ranges,
+ .num_ranges = ARRAY_SIZE(tas2562_ranges),
+};
+
+static int tas2562_parse_dt(struct tas2562_data *tas2562)
+{
+ struct device *dev = tas2562->dev;
+ int ret = 0;
+
+ tas2562->sdz_gpio = devm_gpiod_get_optional(dev, "shut-down-gpio",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tas2562->sdz_gpio)) {
+ if (PTR_ERR(tas2562->sdz_gpio) == -EPROBE_DEFER) {
+ tas2562->sdz_gpio = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
+ &tas2562->i_sense_slot);
+ if (ret)
+ dev_err(dev, "Looking up %s property failed %d\n",
+ "ti,imon-slot-no", ret);
+
+ return ret;
+}
+
+static int tas2562_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tas2562_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->dev = &client->dev;
+
+ tas2562_parse_dt(data);
+
+ data->regmap = devm_regmap_init_i2c(client, &tas2562_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ ret = PTR_ERR(data->regmap);
+ dev_err(dev, "failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&client->dev, data);
+
+ return devm_snd_soc_register_component(dev, &soc_component_dev_tas2562,
+ tas2562_dai,
+ ARRAY_SIZE(tas2562_dai));
+
+}
+
+static const struct i2c_device_id tas2562_id[] = {
+ { "tas2562", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas2562_id);
+
+static const struct of_device_id tas2562_of_match[] = {
+ { .compatible = "ti,tas2562", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tas2562_of_match);
+
+static struct i2c_driver tas2562_i2c_driver = {
+ .driver = {
+ .name = "tas2562",
+ .of_match_table = of_match_ptr(tas2562_of_match),
+ },
+ .probe = tas2562_probe,
+ .id_table = tas2562_id,
+};
+
+module_i2c_driver(tas2562_i2c_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_DESCRIPTION("TAS2562 Audio amplifier driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
new file mode 100644
index 000000000000..62e659ab786d
--- /dev/null
+++ b/sound/soc/codecs/tas2562.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tas2562.h - ALSA SoC Texas Instruments TAS2562 Mono Audio Amplifier
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ */
+
+#ifndef __TAS2562_H__
+#define __TAS2562_H__
+
+#define TAS2562_PAGE_CTRL 0x00
+
+#define TAS2562_REG(page, reg) ((page * 128) + reg)
+
+#define TAS2562_SW_RESET TAS2562_REG(0, 0x01)
+#define TAS2562_PWR_CTRL TAS2562_REG(0, 0x02)
+#define TAS2562_PB_CFG1 TAS2562_REG(0, 0x03)
+#define TAS2562_MISC_CFG1 TAS2562_REG(0, 0x04)
+#define TAS2562_MISC_CFG2 TAS2562_REG(0, 0x05)
+
+#define TAS2562_TDM_CFG0 TAS2562_REG(0, 0x06)
+#define TAS2562_TDM_CFG1 TAS2562_REG(0, 0x07)
+#define TAS2562_TDM_CFG2 TAS2562_REG(0, 0x08)
+#define TAS2562_TDM_CFG3 TAS2562_REG(0, 0x09)
+#define TAS2562_TDM_CFG4 TAS2562_REG(0, 0x0a)
+#define TAS2562_TDM_CFG5 TAS2562_REG(0, 0x0b)
+#define TAS2562_TDM_CFG6 TAS2562_REG(0, 0x0c)
+#define TAS2562_TDM_CFG7 TAS2562_REG(0, 0x0d)
+#define TAS2562_TDM_CFG8 TAS2562_REG(0, 0x0e)
+#define TAS2562_TDM_CFG9 TAS2562_REG(0, 0x0f)
+#define TAS2562_TDM_CFG10 TAS2562_REG(0, 0x10)
+#define TAS2562_TDM_DET TAS2562_REG(0, 0x11)
+#define TAS2562_REV_ID TAS2562_REG(0, 0x7d)
+
+/* Page 2 */
+#define TAS2562_DVC_CFG1 TAS2562_REG(2, 0x01)
+#define TAS2562_DVC_CFG2 TAS2562_REG(2, 0x02)
+
+#define TAS2562_RESET BIT(0)
+
+#define TAS2562_MODE_MASK 0x3
+#define TAS2562_ACTIVE 0x0
+#define TAS2562_MUTE 0x1
+#define TAS2562_SHUTDOWN 0x2
+
+#define TAS2562_TDM_CFG1_RX_EDGE_MASK BIT(0)
+#define TAS2562_TDM_CFG1_RX_FALLING 1
+#define TAS2562_TDM_CFG1_RX_OFFSET_MASK GENMASK(4, 0)
+
+#define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
+#define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
+#define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+
+#define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
+
+#define TAS2562_TDM_CFG2_RXLEN_MASK GENMASK(1, 0)
+#define TAS2562_TDM_CFG2_RXLEN_16B 0x0
+#define TAS2562_TDM_CFG2_RXLEN_24B BIT(0)
+#define TAS2562_TDM_CFG2_RXLEN_32B BIT(1)
+
+#define TAS2562_TDM_CFG2_RXWLEN_MASK GENMASK(3, 2)
+#define TAS2562_TDM_CFG2_RXWLEN_16B 0x0
+#define TAS2562_TDM_CFG2_RXWLEN_20B BIT(2)
+#define TAS2562_TDM_CFG2_RXWLEN_24B BIT(3)
+#define TAS2562_TDM_CFG2_RXWLEN_32B (BIT(2) | BIT(3))
+
+#define TAS2562_VSENSE_POWER_EN BIT(2)
+#define TAS2562_ISENSE_POWER_EN BIT(3)
+
+#define TAS2562_TDM_CFG5_VSNS_EN BIT(6)
+#define TAS2562_TDM_CFG5_VSNS_SLOT_MASK GENMASK(5, 0)
+
+#define TAS2562_TDM_CFG6_ISNS_EN BIT(6)
+#define TAS2562_TDM_CFG6_ISNS_SLOT_MASK GENMASK(5, 0)
+
+#endif /* __TAS2562_H__ */
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
new file mode 100644
index 000000000000..54c8135fe43c
--- /dev/null
+++ b/sound/soc/codecs/tas2770.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// ALSA SoC Texas Instruments TAS2770 20-W Digital Input Mono Class-D
+// Audio Amplifier with Speaker I/V Sense
+//
+// Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+// Author: Tracy Yi <tracy-yi@ti.com>
+// Frank Shi <shifu0704@thundersoft.com>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "tas2770.h"
+
+#define TAS2770_MDELAY 0xFFFFFFFE
+
+static void tas2770_reset(struct tas2770_priv *tas2770)
+{
+ if (tas2770->reset_gpio) {
+ gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
+ }
+ snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
+ TAS2770_RST);
+}
+
+static int tas2770_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_ACTIVE);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_SHUTDOWN);
+ break;
+
+ default:
+ dev_err(tas2770->dev,
+ "wrong power level setting %d\n", level);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tas2770_codec_suspend(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_SHUTDOWN);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2770_codec_resume(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_ACTIVE);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#else
+#define tas2770_codec_suspend NULL
+#define tas2770_codec_resume NULL
+#endif
+
+static const char * const tas2770_ASI1_src[] = {
+ "I2C offset", "Left", "Right", "LeftRightDiv2",
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ tas2770_ASI1_src_enum, TAS2770_TDM_CFG_REG2,
+ 4, tas2770_ASI1_src);
+
+static const struct snd_kcontrol_new tas2770_asi1_mux =
+ SOC_DAPM_ENUM("ASI1 Source", tas2770_ASI1_src_enum);
+
+static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_MUTE);
+ if (ret)
+ goto end;
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_SHUTDOWN);
+ if (ret)
+ goto end;
+ break;
+ default:
+ dev_err(tas2770->dev, "Not supported evevt\n");
+ return -EINVAL;
+ }
+
+end:
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new isense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 3, 1, 1);
+static const struct snd_kcontrol_new vsense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 2, 1, 1);
+
+static const struct snd_soc_dapm_widget tas2770_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0,
+ &tas2770_asi1_mux),
+ SND_SOC_DAPM_SWITCH("ISENSE", TAS2770_PWR_CTRL, 3, 1,
+ &isense_switch),
+ SND_SOC_DAPM_SWITCH("VSENSE", TAS2770_PWR_CTRL, 2, 1,
+ &vsense_switch),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2770_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+ SND_SOC_DAPM_SIGGEN("VMON"),
+ SND_SOC_DAPM_SIGGEN("IMON")
+};
+
+static const struct snd_soc_dapm_route tas2770_audio_map[] = {
+ {"ASI1 Sel", "I2C offset", "ASI1"},
+ {"ASI1 Sel", "Left", "ASI1"},
+ {"ASI1 Sel", "Right", "ASI1"},
+ {"ASI1 Sel", "LeftRightDiv2", "ASI1"},
+ {"DAC", NULL, "ASI1 Sel"},
+ {"OUT", NULL, "DAC"},
+ {"ISENSE", "Switch", "IMON"},
+ {"VSENSE", "Switch", "VMON"},
+};
+
+static int tas2770_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ if (mute)
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_MUTE);
+ else
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_ACTIVE);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
+{
+ int ret;
+ struct snd_soc_component *component = tas2770->component;
+
+ switch (bitwidth) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXW_MASK,
+ TAS2770_TDM_CFG_REG2_RXW_16BITS);
+ tas2770->v_sense_slot = tas2770->i_sense_slot + 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXW_MASK,
+ TAS2770_TDM_CFG_REG2_RXW_24BITS);
+ tas2770->v_sense_slot = tas2770->i_sense_slot + 4;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXW_MASK,
+ TAS2770_TDM_CFG_REG2_RXW_32BITS);
+ tas2770->v_sense_slot = tas2770->i_sense_slot + 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tas2770->channel_size = bitwidth;
+
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG5,
+ TAS2770_TDM_CFG_REG5_VSNS_MASK |
+ TAS2770_TDM_CFG_REG5_50_MASK,
+ TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
+ tas2770->v_sense_slot);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG6,
+ TAS2770_TDM_CFG_REG6_ISNS_MASK |
+ TAS2770_TDM_CFG_REG6_50_MASK,
+ TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
+ tas2770->i_sense_slot);
+
+end:
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
+{
+ int ret;
+ struct snd_soc_component *component = tas2770->component;
+
+ switch (samplerate) {
+ case 48000:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
+ if (ret)
+ goto end;
+ break;
+ case 44100:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
+ if (ret)
+ goto end;
+ break;
+ case 96000:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_88_2_96KHZ);
+ break;
+ case 88200:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_88_2_96KHZ);
+ break;
+ case 19200:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
+ if (ret)
+ goto end;
+ break;
+ case 17640:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+end:
+ if (ret < 0)
+ return ret;
+
+ tas2770->sampling_rate = samplerate;
+ return 0;
+}
+
+static int tas2770_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ ret = tas2770_set_bitwidth(tas2770, params_format(params));
+ if (ret < 0)
+ goto end;
+
+
+ ret = tas2770_set_samplerate(tas2770, params_rate(params));
+
+end:
+ return ret;
+}
+
+static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+ int ret;
+ struct snd_soc_component *component = dai->component;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(tas2770->dev, "ASI format master is not found\n");
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_FALING;
+ break;
+ default:
+ dev_err(tas2770->dev, "ASI format Inverse is not found\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG1,
+ TAS2770_TDM_CFG_REG1_RX_MASK,
+ asi_cfg_1);
+ if (ret < 0)
+ return ret;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ tdm_rx_start_slot = 1;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ tdm_rx_start_slot = 0;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ tdm_rx_start_slot = 1;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ tdm_rx_start_slot = 0;
+ break;
+ default:
+ dev_err(tas2770->dev,
+ "DAI Format is not found, fmt=0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG1,
+ TAS2770_TDM_CFG_REG1_MASK,
+ (tdm_rx_start_slot << TAS2770_TDM_CFG_REG1_51_SHIFT));
+ if (ret < 0)
+ return ret;
+
+ tas2770->asi_format = fmt;
+
+ return 0;
+}
+
+static int tas2770_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+ int left_slot, right_slot;
+ int ret;
+
+ if (tx_mask == 0 || rx_mask != 0)
+ return -EINVAL;
+
+ if (slots == 1) {
+ if (tx_mask != 1)
+ return -EINVAL;
+ left_slot = 0;
+ right_slot = 0;
+ } else {
+ left_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << left_slot);
+ if (tx_mask == 0) {
+ right_slot = left_slot;
+ } else {
+ right_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << right_slot);
+ }
+ }
+
+ if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
+ return -EINVAL;
+
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG3,
+ TAS2770_TDM_CFG_REG3_30_MASK,
+ (left_slot << TAS2770_TDM_CFG_REG3_30_SHIFT));
+ if (ret < 0)
+ return ret;
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG3,
+ TAS2770_TDM_CFG_REG3_RXS_MASK,
+ (right_slot << TAS2770_TDM_CFG_REG3_RXS_SHIFT));
+ if (ret < 0)
+ return ret;
+
+ switch (slot_width) {
+ case 16:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXS_MASK,
+ TAS2770_TDM_CFG_REG2_RXS_16BITS);
+ break;
+
+ case 24:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXS_MASK,
+ TAS2770_TDM_CFG_REG2_RXS_24BITS);
+ break;
+
+ case 32:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXS_MASK,
+ TAS2770_TDM_CFG_REG2_RXS_32BITS);
+ break;
+
+ case 0:
+ /* Do not change slot width */
+ ret = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ tas2770->slot_width = slot_width;
+ return 0;
+}
+
+static struct snd_soc_dai_ops tas2770_dai_ops = {
+ .digital_mute = tas2770_mute,
+ .hw_params = tas2770_hw_params,
+ .set_fmt = tas2770_set_fmt,
+ .set_tdm_slot = tas2770_set_dai_tdm_slot,
+};
+
+#define TAS2770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TAS2770_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_192000\
+ )
+
+static struct snd_soc_dai_driver tas2770_dai_driver[] = {
+ {
+ .name = "tas2770 ASI1",
+ .id = 0,
+ .playback = {
+ .stream_name = "ASI1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TAS2770_RATES,
+ .formats = TAS2770_FORMATS,
+ },
+ .capture = {
+ .stream_name = "ASI1 Capture",
+ .channels_min = 0,
+ .channels_max = 2,
+ .rates = TAS2770_RATES,
+ .formats = TAS2770_FORMATS,
+ },
+ .ops = &tas2770_dai_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static int tas2770_codec_probe(struct snd_soc_component *component)
+{
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+
+ tas2770->component = component;
+
+ return 0;
+}
+
+static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0);
+
+static const struct snd_kcontrol_new tas2770_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2,
+ 0, TAS2770_PLAY_CFG_REG2_VMAX, 1,
+ tas2770_playback_volume),
+ SOC_SINGLE_TLV("Amp Gain Volume", TAS2770_PLAY_CFG_REG0,
+ 0, 0x14, 0,
+ tas2770_digital_tlv),
+};
+
+static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
+ .probe = tas2770_codec_probe,
+ .suspend = tas2770_codec_suspend,
+ .resume = tas2770_codec_resume,
+ .set_bias_level = tas2770_set_bias_level,
+ .controls = tas2770_snd_controls,
+ .num_controls = ARRAY_SIZE(tas2770_snd_controls),
+ .dapm_widgets = tas2770_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2770_dapm_widgets),
+ .dapm_routes = tas2770_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2770_audio_map),
+ .idle_bias_on = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static int tas2770_register_codec(struct tas2770_priv *tas2770)
+{
+ return devm_snd_soc_register_component(tas2770->dev,
+ &soc_component_driver_tas2770,
+ tas2770_dai_driver, ARRAY_SIZE(tas2770_dai_driver));
+}
+
+static const struct reg_default tas2770_reg_defaults[] = {
+ { TAS2770_PAGE, 0x00 },
+ { TAS2770_SW_RST, 0x00 },
+ { TAS2770_PWR_CTRL, 0x0e },
+ { TAS2770_PLAY_CFG_REG0, 0x10 },
+ { TAS2770_PLAY_CFG_REG1, 0x01 },
+ { TAS2770_PLAY_CFG_REG2, 0x00 },
+ { TAS2770_MSC_CFG_REG0, 0x07 },
+ { TAS2770_TDM_CFG_REG1, 0x02 },
+ { TAS2770_TDM_CFG_REG2, 0x0a },
+ { TAS2770_TDM_CFG_REG3, 0x10 },
+ { TAS2770_INT_MASK_REG0, 0xfc },
+ { TAS2770_INT_MASK_REG1, 0xb1 },
+ { TAS2770_INT_CFG, 0x05 },
+ { TAS2770_MISC_IRQ, 0x81 },
+ { TAS2770_CLK_CGF, 0x0c },
+
+};
+
+static bool tas2770_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS2770_PAGE: /* regmap implementation requires this */
+ case TAS2770_SW_RST: /* always clears after write */
+ case TAS2770_BO_PRV_REG0:/* has a self clearing bit */
+ case TAS2770_LVE_INT_REG0:
+ case TAS2770_LVE_INT_REG1:
+ case TAS2770_LAT_INT_REG0:/* Sticky interrupt flags */
+ case TAS2770_LAT_INT_REG1:/* Sticky interrupt flags */
+ case TAS2770_VBAT_MSB:
+ case TAS2770_VBAT_LSB:
+ case TAS2770_TEMP_MSB:
+ case TAS2770_TEMP_LSB:
+ return true;
+ }
+ return false;
+}
+
+static bool tas2770_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS2770_LVE_INT_REG0:
+ case TAS2770_LVE_INT_REG1:
+ case TAS2770_LAT_INT_REG0:
+ case TAS2770_LAT_INT_REG1:
+ case TAS2770_VBAT_MSB:
+ case TAS2770_VBAT_LSB:
+ case TAS2770_TEMP_MSB:
+ case TAS2770_TEMP_LSB:
+ case TAS2770_TDM_CLK_DETC:
+ case TAS2770_REV_AND_GPID:
+ return false;
+ }
+ return true;
+}
+
+static const struct regmap_range_cfg tas2770_regmap_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 1 * 128,
+ .selector_reg = TAS2770_PAGE,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 128,
+ },
+};
+
+static const struct regmap_config tas2770_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = tas2770_writeable,
+ .volatile_reg = tas2770_volatile,
+ .reg_defaults = tas2770_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas2770_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .ranges = tas2770_regmap_ranges,
+ .num_ranges = ARRAY_SIZE(tas2770_regmap_ranges),
+ .max_register = 1 * 128,
+};
+
+static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
+{
+ int rc = 0;
+
+ rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format",
+ &tas2770->asi_format);
+ if (rc) {
+ dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+ "ti,asi-format", rc);
+ goto end;
+ }
+
+ rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
+ &tas2770->i_sense_slot);
+ if (rc) {
+ dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+ "ti,imon-slot-no", rc);
+ goto end;
+ }
+
+ rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
+ &tas2770->v_sense_slot);
+ if (rc) {
+ dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+ "ti,vmon-slot-no", rc);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+static int tas2770_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tas2770_priv *tas2770;
+ int result;
+
+ tas2770 = devm_kzalloc(&client->dev,
+ sizeof(struct tas2770_priv), GFP_KERNEL);
+ if (!tas2770)
+ return -ENOMEM;
+ tas2770->dev = &client->dev;
+
+ i2c_set_clientdata(client, tas2770);
+ dev_set_drvdata(&client->dev, tas2770);
+ tas2770->power_state = TAS2770_POWER_SHUTDOWN;
+
+ tas2770->regmap = devm_regmap_init_i2c(client, &tas2770_i2c_regmap);
+ if (IS_ERR(tas2770->regmap)) {
+ result = PTR_ERR(tas2770->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ result);
+ goto end;
+ }
+
+ if (client->dev.of_node) {
+ result = tas2770_parse_dt(&client->dev, tas2770);
+ if (result) {
+ dev_err(tas2770->dev, "%s: Failed to parse devicetree\n",
+ __func__);
+ goto end;
+ }
+ }
+
+ tas2770->reset_gpio = devm_gpiod_get_optional(tas2770->dev,
+ "reset-gpio",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tas2770->reset_gpio)) {
+ if (PTR_ERR(tas2770->reset_gpio) == -EPROBE_DEFER) {
+ tas2770->reset_gpio = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ tas2770->channel_size = 0;
+ tas2770->slot_width = 0;
+
+ tas2770_reset(tas2770);
+
+ result = tas2770_register_codec(tas2770);
+ if (result)
+ dev_err(tas2770->dev, "Register codec failed.\n");
+
+end:
+ return result;
+}
+
+static int tas2770_i2c_remove(struct i2c_client *client)
+{
+ pm_runtime_disable(&client->dev);
+ return 0;
+}
+
+
+static const struct i2c_device_id tas2770_i2c_id[] = {
+ { "tas2770", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas2770_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id tas2770_of_match[] = {
+ { .compatible = "ti,tas2770" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tas2770_of_match);
+#endif
+
+static struct i2c_driver tas2770_i2c_driver = {
+ .driver = {
+ .name = "tas2770",
+ .of_match_table = of_match_ptr(tas2770_of_match),
+ },
+ .probe = tas2770_i2c_probe,
+ .remove = tas2770_i2c_remove,
+ .id_table = tas2770_i2c_id,
+};
+
+module_i2c_driver(tas2770_i2c_driver);
+
+MODULE_AUTHOR("Shi Fu <shifu0704@thundersoft.com>");
+MODULE_DESCRIPTION("TAS2770 I2C Smart Amplifier driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas2770.h b/sound/soc/codecs/tas2770.h
new file mode 100644
index 000000000000..cbb858369fe6
--- /dev/null
+++ b/sound/soc/codecs/tas2770.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * ALSA SoC TAS2770 codec driver
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ */
+#ifndef __TAS2770__
+#define __TAS2770__
+
+/* Book Control Register (available in page0 of each book) */
+#define TAS2770_BOOKCTL_PAGE 0
+#define TAS2770_BOOKCTL_REG 127
+#define TAS2770_REG(page, reg) ((page * 128) + reg)
+ /* Page */
+#define TAS2770_PAGE TAS2770_REG(0X0, 0x00)
+#define TAS2770_PAGE_PAGE_MASK 255
+ /* Software Reset */
+#define TAS2770_SW_RST TAS2770_REG(0X0, 0x01)
+#define TAS2770_RST BIT(0)
+ /* Power Control */
+#define TAS2770_PWR_CTRL TAS2770_REG(0X0, 0x02)
+#define TAS2770_PWR_CTRL_MASK 0x3
+#define TAS2770_PWR_CTRL_ACTIVE 0x0
+#define TAS2770_PWR_CTRL_MUTE BIT(0)
+#define TAS2770_PWR_CTRL_SHUTDOWN 0x2
+ /* Playback Configuration Reg0 */
+#define TAS2770_PLAY_CFG_REG0 TAS2770_REG(0X0, 0x03)
+ /* Playback Configuration Reg1 */
+#define TAS2770_PLAY_CFG_REG1 TAS2770_REG(0X0, 0x04)
+ /* Playback Configuration Reg2 */
+#define TAS2770_PLAY_CFG_REG2 TAS2770_REG(0X0, 0x05)
+#define TAS2770_PLAY_CFG_REG2_VMAX 0xc9
+ /* Misc Configuration Reg0 */
+#define TAS2770_MSC_CFG_REG0 TAS2770_REG(0X0, 0x07)
+ /* TDM Configuration Reg0 */
+#define TAS2770_TDM_CFG_REG0 TAS2770_REG(0X0, 0x0A)
+#define TAS2770_TDM_CFG_REG0_SMP_MASK BIT(5)
+#define TAS2770_TDM_CFG_REG0_SMP_48KHZ 0x0
+#define TAS2770_TDM_CFG_REG0_SMP_44_1KHZ BIT(5)
+#define TAS2770_TDM_CFG_REG0_31_MASK 0xe
+#define TAS2770_TDM_CFG_REG0_31_44_1_48KHZ 0x6
+#define TAS2770_TDM_CFG_REG0_31_88_2_96KHZ 0x8
+#define TAS2770_TDM_CFG_REG0_31_176_4_192KHZ 0xa
+ /* TDM Configuration Reg1 */
+#define TAS2770_TDM_CFG_REG1 TAS2770_REG(0X0, 0x0B)
+#define TAS2770_TDM_CFG_REG1_MASK 0x3e
+#define TAS2770_TDM_CFG_REG1_51_SHIFT 1
+#define TAS2770_TDM_CFG_REG1_RX_MASK BIT(0)
+#define TAS2770_TDM_CFG_REG1_RX_RSING 0x0
+#define TAS2770_TDM_CFG_REG1_RX_FALING BIT(0)
+ /* TDM Configuration Reg2 */
+#define TAS2770_TDM_CFG_REG2 TAS2770_REG(0X0, 0x0C)
+#define TAS2770_TDM_CFG_REG2_RXW_MASK 0xc
+#define TAS2770_TDM_CFG_REG2_RXW_16BITS 0x0
+#define TAS2770_TDM_CFG_REG2_RXW_24BITS 0x8
+#define TAS2770_TDM_CFG_REG2_RXW_32BITS 0xc
+#define TAS2770_TDM_CFG_REG2_RXS_MASK 0x3
+#define TAS2770_TDM_CFG_REG2_RXS_16BITS 0x0
+#define TAS2770_TDM_CFG_REG2_RXS_24BITS BIT(0)
+#define TAS2770_TDM_CFG_REG2_RXS_32BITS 0x2
+ /* TDM Configuration Reg3 */
+#define TAS2770_TDM_CFG_REG3 TAS2770_REG(0X0, 0x0D)
+#define TAS2770_TDM_CFG_REG3_RXS_MASK 0xf0
+#define TAS2770_TDM_CFG_REG3_RXS_SHIFT 0x4
+#define TAS2770_TDM_CFG_REG3_30_MASK 0xf
+#define TAS2770_TDM_CFG_REG3_30_SHIFT 0
+ /* TDM Configuration Reg5 */
+#define TAS2770_TDM_CFG_REG5 TAS2770_REG(0X0, 0x0F)
+#define TAS2770_TDM_CFG_REG5_VSNS_MASK BIT(6)
+#define TAS2770_TDM_CFG_REG5_VSNS_ENABLE BIT(6)
+#define TAS2770_TDM_CFG_REG5_50_MASK 0x3f
+ /* TDM Configuration Reg6 */
+#define TAS2770_TDM_CFG_REG6 TAS2770_REG(0X0, 0x10)
+#define TAS2770_TDM_CFG_REG6_ISNS_MASK BIT(6)
+#define TAS2770_TDM_CFG_REG6_ISNS_ENABLE BIT(6)
+#define TAS2770_TDM_CFG_REG6_50_MASK 0x3f
+ /* Brown Out Prevention Reg0 */
+#define TAS2770_BO_PRV_REG0 TAS2770_REG(0X0, 0x1B)
+ /* Interrupt MASK Reg0 */
+#define TAS2770_INT_MASK_REG0 TAS2770_REG(0X0, 0x20)
+#define TAS2770_INT_REG0_DEFAULT 0xfc
+#define TAS2770_INT_MASK_REG0_DISABLE 0xff
+ /* Interrupt MASK Reg1 */
+#define TAS2770_INT_MASK_REG1 TAS2770_REG(0X0, 0x21)
+#define TAS2770_INT_REG1_DEFAULT 0xb1
+#define TAS2770_INT_MASK_REG1_DISABLE 0xff
+ /* Live-Interrupt Reg0 */
+#define TAS2770_LVE_INT_REG0 TAS2770_REG(0X0, 0x22)
+ /* Live-Interrupt Reg1 */
+#define TAS2770_LVE_INT_REG1 TAS2770_REG(0X0, 0x23)
+ /* Latched-Interrupt Reg0 */
+#define TAS2770_LAT_INT_REG0 TAS2770_REG(0X0, 0x24)
+#define TAS2770_LAT_INT_REG0_OCE_FLG BIT(1)
+#define TAS2770_LAT_INT_REG0_OTE_FLG BIT(0)
+ /* Latched-Interrupt Reg1 */
+#define TAS2770_LAT_INT_REG1 TAS2770_REG(0X0, 0x25)
+#define TAS2770_LAT_INT_REG1_VBA_TOV BIT(3)
+#define TAS2770_LAT_INT_REG1_VBA_TUV BIT(2)
+#define TAS2770_LAT_INT_REG1_BOUT_FLG BIT(1)
+ /* VBAT MSB */
+#define TAS2770_VBAT_MSB TAS2770_REG(0X0, 0x27)
+ /* VBAT LSB */
+#define TAS2770_VBAT_LSB TAS2770_REG(0X0, 0x28)
+ /* TEMP MSB */
+#define TAS2770_TEMP_MSB TAS2770_REG(0X0, 0x29)
+ /* TEMP LSB */
+#define TAS2770_TEMP_LSB TAS2770_REG(0X0, 0x2A)
+ /* Interrupt Configuration */
+#define TAS2770_INT_CFG TAS2770_REG(0X0, 0x30)
+ /* Misc IRQ */
+#define TAS2770_MISC_IRQ TAS2770_REG(0X0, 0x32)
+ /* Clock Configuration */
+#define TAS2770_CLK_CGF TAS2770_REG(0X0, 0x3C)
+ /* TDM Clock detection monitor */
+#define TAS2770_TDM_CLK_DETC TAS2770_REG(0X0, 0x77)
+ /* Revision and PG ID */
+#define TAS2770_REV_AND_GPID TAS2770_REG(0X0, 0x7D)
+
+#define TAS2770_POWER_ACTIVE 0
+#define TAS2770_POWER_MUTE 1
+#define TAS2770_POWER_SHUTDOWN 2
+#define ERROR_OVER_CURRENT 0x0000001
+#define ERROR_DIE_OVERTEMP 0x0000002
+#define ERROR_OVER_VOLTAGE 0x0000004
+#define ERROR_UNDER_VOLTAGE 0x0000008
+#define ERROR_BROWNOUT 0x0000010
+#define ERROR_CLASSD_PWR 0x0000020
+
+struct tas2770_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct snd_soc_component *component;
+ int power_state;
+ int asi_format;
+ struct gpio_desc *reset_gpio;
+ int sampling_rate;
+ int channel_size;
+ int slot_width;
+ int v_sense_slot;
+ int i_sense_slot;
+};
+
+#endif /* __TAS2770__ */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index df627a08def9..f6f19fdc72f5 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -171,6 +171,7 @@ struct aic31xx_priv {
int rate_div_line;
bool master_dapm_route_applied;
int irq;
+ u8 ocmv; /* output common-mode voltage */
};
struct aic31xx_rate_divs {
@@ -1312,6 +1313,11 @@ static int aic31xx_codec_probe(struct snd_soc_component *component)
if (ret)
return ret;
+ /* set output common-mode voltage */
+ snd_soc_component_update_bits(component, AIC31XX_HPDRIVER,
+ AIC31XX_HPD_OCMV_MASK,
+ aic31xx->ocmv << AIC31XX_HPD_OCMV_SHIFT);
+
return 0;
}
@@ -1501,6 +1507,43 @@ exit:
return IRQ_NONE;
}
+static void aic31xx_configure_ocmv(struct aic31xx_priv *priv)
+{
+ struct device *dev = priv->dev;
+ int dvdd, avdd;
+ u32 value;
+
+ if (dev->fwnode &&
+ fwnode_property_read_u32(dev->fwnode, "ai31xx-ocmv", &value)) {
+ /* OCMV setting is forced by DT */
+ if (value <= 3) {
+ priv->ocmv = value;
+ return;
+ }
+ }
+
+ avdd = regulator_get_voltage(priv->supplies[3].consumer);
+ dvdd = regulator_get_voltage(priv->supplies[5].consumer);
+
+ if (avdd > 3600000 || dvdd > 1950000) {
+ dev_warn(dev,
+ "Too high supply voltage(s) AVDD: %d, DVDD: %d\n",
+ avdd, dvdd);
+ } else if (avdd == 3600000 && dvdd == 1950000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_8V;
+ } else if (avdd >= 3300000 && dvdd >= 1800000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_65V;
+ } else if (avdd >= 3000000 && dvdd >= 1650000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_5V;
+ } else if (avdd >= 2700000 && dvdd >= 1525000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_35V;
+ } else {
+ dev_warn(dev,
+ "Invalid supply voltage(s) AVDD: %d, DVDD: %d\n",
+ avdd, dvdd);
+ }
+}
+
static int aic31xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1570,6 +1613,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ aic31xx_configure_ocmv(aic31xx);
+
if (aic31xx->irq > 0) {
regmap_update_bits(aic31xx->regmap, AIC31XX_GPIO1,
AIC31XX_GPIO1_FUNC_MASK,
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index cb024955c978..83a8c7604cc3 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -232,6 +232,14 @@ struct aic31xx_pdata {
#define AIC31XX_HSD_HP 0x01
#define AIC31XX_HSD_HS 0x03
+/* AIC31XX_HPDRIVER */
+#define AIC31XX_HPD_OCMV_MASK GENMASK(4, 3)
+#define AIC31XX_HPD_OCMV_SHIFT 3
+#define AIC31XX_HPD_OCMV_1_35V 0x0
+#define AIC31XX_HPD_OCMV_1_5V 0x1
+#define AIC31XX_HPD_OCMV_1_65V 0x2
+#define AIC31XX_HPD_OCMV_1_8V 0x3
+
/* AIC31XX_MICBIAS */
#define AIC31XX_MICBIAS_MASK GENMASK(1, 0)
#define AIC31XX_MICBIAS_SHIFT 0
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 68165de1c8de..b4e9a6c73f90 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -573,6 +573,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
struct clk *pll;
pll = devm_clk_get(component->dev, "pll");
+ if (IS_ERR(pll))
+ return PTR_ERR(pll);
+
mclk = clk_get_parent(pll);
return clk_set_rate(mclk, freq);
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index f318403133e9..f11ffa28683b 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -2837,11 +2837,11 @@ static int wcd9335_codec_enable_dec(struct snd_soc_dapm_widget *w,
TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x10);
snd_soc_component_update_bits(comp, dec_cfg_reg, 0x08, 0x00);
- if (hpf_coff_freq != CF_MIN_3DB_150HZ) {
- snd_soc_component_update_bits(comp, dec_cfg_reg,
- TX_HPF_CUT_OFF_FREQ_MASK,
- hpf_coff_freq << 5);
- }
+ if (hpf_coff_freq != CF_MIN_3DB_150HZ) {
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ TX_HPF_CUT_OFF_FREQ_MASK,
+ hpf_coff_freq << 5);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x00);
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index cf64e109c658..7b087d94141b 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -2410,6 +2410,8 @@ static int wm2200_i2c_probe(struct i2c_client *i2c,
err_pm_runtime:
pm_runtime_disable(&i2c->dev);
+ if (i2c->irq)
+ free_irq(i2c->irq, wm2200);
err_reset:
if (wm2200->pdata.reset)
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
@@ -2426,12 +2428,15 @@ static int wm2200_i2c_remove(struct i2c_client *i2c)
{
struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c);
+ pm_runtime_disable(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm2200);
if (wm2200->pdata.reset)
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
if (wm2200->pdata.ldo_ena)
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
+ regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
+ wm2200->core_supplies);
return 0;
}
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 4af0e519e623..91cc63c5a51f 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -2617,6 +2617,7 @@ static int wm5100_i2c_probe(struct i2c_client *i2c,
return ret;
err_reset:
+ pm_runtime_disable(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm5100);
wm5100_free_gpio(i2c);
@@ -2640,6 +2641,7 @@ static int wm5100_i2c_remove(struct i2c_client *i2c)
{
struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
+ pm_runtime_disable(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm5100);
wm5100_free_gpio(i2c);
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index bcb3c9d5abf0..7d7ea15d73e0 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1410,34 +1410,6 @@ static int wm8904_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-
-static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
- unsigned int freq, int dir)
-{
- struct snd_soc_component *component = dai->component;
- struct wm8904_priv *priv = snd_soc_component_get_drvdata(component);
-
- switch (clk_id) {
- case WM8904_CLK_MCLK:
- priv->sysclk_src = clk_id;
- priv->mclk_rate = freq;
- break;
-
- case WM8904_CLK_FLL:
- priv->sysclk_src = clk_id;
- break;
-
- default:
- return -EINVAL;
- }
-
- dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
-
- wm8904_configure_clocking(component);
-
- return 0;
-}
-
static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
@@ -1824,6 +1796,50 @@ out:
return 0;
}
+static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct wm8904_priv *priv = snd_soc_component_get_drvdata(component);
+ unsigned long mclk_freq;
+ int ret;
+
+ switch (clk_id) {
+ case WM8904_CLK_AUTO:
+ mclk_freq = clk_get_rate(priv->mclk);
+ /* enable FLL if a different sysclk is desired */
+ if (mclk_freq != freq) {
+ priv->sysclk_src = WM8904_CLK_FLL;
+ ret = wm8904_set_fll(dai, WM8904_FLL_MCLK,
+ WM8904_FLL_MCLK,
+ mclk_freq, freq);
+ if (ret)
+ return ret;
+ break;
+ }
+ clk_id = WM8904_CLK_MCLK;
+ /* fallthrough */
+
+ case WM8904_CLK_MCLK:
+ priv->sysclk_src = clk_id;
+ priv->mclk_rate = freq;
+ break;
+
+ case WM8904_CLK_FLL:
+ priv->sysclk_src = clk_id;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
+
+ wm8904_configure_clocking(component);
+
+ return 0;
+}
+
static int wm8904_digital_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_component *component = codec_dai->component;
@@ -1917,6 +1933,7 @@ static int wm8904_set_bias_level(struct snd_soc_component *component,
snd_soc_component_update_bits(component, WM8904_BIAS_CONTROL_0,
WM8904_BIAS_ENA, 0);
+ snd_soc_component_write(component, WM8904_SW_RESET_AND_ID, 0);
regcache_cache_only(wm8904->regmap, true);
regcache_mark_dirty(wm8904->regmap);
diff --git a/sound/soc/codecs/wm8904.h b/sound/soc/codecs/wm8904.h
index c1bca52f9927..de6340446b1f 100644
--- a/sound/soc/codecs/wm8904.h
+++ b/sound/soc/codecs/wm8904.h
@@ -10,6 +10,7 @@
#ifndef _WM8904_H
#define _WM8904_H
+#define WM8904_CLK_AUTO 0
#define WM8904_CLK_MCLK 1
#define WM8904_CLK_FLL 2
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 18535b326680..ca42445b649d 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -25,6 +25,8 @@
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/gpio.h>
+#include <asm/unaligned.h>
+
#include "wm8994.h"
#define WM_FW_BLOCK_INFO 0xff
@@ -58,18 +60,15 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
- memcpy(&data32, fw->data, sizeof(data32));
- data32 = be32_to_cpu(data32);
+ data32 = get_unaligned_be32(fw->data);
dev_err(component->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
}
- memcpy(&data32, fw->data + 4, sizeof(data32));
- len = be32_to_cpu(data32);
+ len = get_unaligned_be32(fw->data + 4);
+ data32 = get_unaligned_be32(fw->data + 8);
- memcpy(&data32, fw->data + 8, sizeof(data32));
- data32 = be32_to_cpu(data32);
if ((data32 >> 24) & 0xff) {
dev_err(component->dev, "%s: unsupported firmware version %d\n",
name, (data32 >> 24) & 0xff);
@@ -87,9 +86,8 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
}
if (check) {
- memcpy(&data64, fw->data + 24, sizeof(u64));
- dev_info(component->dev, "%s timestamp %llx\n",
- name, be64_to_cpu(data64));
+ data64 = get_unaligned_be64(fw->data + 24);
+ dev_info(component->dev, "%s timestamp %llx\n", name, data64);
} else {
snd_soc_component_write(component, 0x102, 0x2);
snd_soc_component_write(component, 0x900, 0x2);
@@ -104,8 +102,7 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
goto err;
}
- memcpy(&data32, data + 4, sizeof(data32));
- block_len = be32_to_cpu(data32);
+ block_len = get_unaligned_be32(data + 4);
if (block_len + 8 > len) {
dev_err(component->dev, "%zd byte block longer than file\n",
block_len);
@@ -116,8 +113,7 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
goto err;
}
- memcpy(&data32, data, sizeof(data32));
- data32 = be32_to_cpu(data32);
+ data32 = get_unaligned_be32(data);
switch ((data32 >> 24) & 0xff) {
case WM_FW_BLOCK_INFO:
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index d5fb7f5dd551..15ce64a48a87 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -167,12 +167,12 @@ static int configure_aif_clock(struct snd_soc_component *component, int aif)
switch (wm8994->sysclk[aif]) {
case WM8994_SYSCLK_MCLK1:
- rate = wm8994->mclk[0];
+ rate = wm8994->mclk_rate[0];
break;
case WM8994_SYSCLK_MCLK2:
reg1 |= 0x8;
- rate = wm8994->mclk[1];
+ rate = wm8994->mclk_rate[1];
break;
case WM8994_SYSCLK_FLL1:
@@ -1038,6 +1038,45 @@ static bool wm8994_check_class_w_digital(struct snd_soc_component *component)
return true;
}
+static int aif_mclk_set(struct snd_soc_component *component, int aif, bool enable)
+{
+ struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
+ unsigned int offset, val, clk_idx;
+ int ret;
+
+ if (aif)
+ offset = 4;
+ else
+ offset = 0;
+
+ val = snd_soc_component_read32(component, WM8994_AIF1_CLOCKING_1 + offset);
+ val &= WM8994_AIF1CLK_SRC_MASK;
+
+ switch (val) {
+ case 0:
+ clk_idx = WM8994_MCLK1;
+ break;
+ case 1:
+ clk_idx = WM8994_MCLK2;
+ break;
+ default:
+ return 0;
+ }
+
+ if (enable) {
+ ret = clk_prepare_enable(wm8994->mclk[clk_idx].clk);
+ if (ret < 0) {
+ dev_err(component->dev, "Failed to enable MCLK%d\n",
+ clk_idx);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(wm8994->mclk[clk_idx].clk);
+ }
+
+ return 0;
+}
+
static int aif1clk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1045,7 +1084,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
struct wm8994 *control = wm8994->wm8994;
int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
- int i;
+ int ret, i;
int dac;
int adc;
int val;
@@ -1061,6 +1100,10 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ ret = aif_mclk_set(component, 0, true);
+ if (ret < 0)
+ return ret;
+
/* Don't enable timeslot 2 if not in use */
if (wm8994->channels[0] <= 2)
mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
@@ -1133,6 +1176,12 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
break;
}
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ aif_mclk_set(component, 0, false);
+ break;
+ }
+
return 0;
}
@@ -1140,13 +1189,17 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
- int i;
+ int ret, i;
int dac;
int adc;
int val;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ ret = aif_mclk_set(component, 1, true);
+ if (ret < 0)
+ return ret;
+
val = snd_soc_component_read32(component, WM8994_AIF2_CONTROL_1);
if ((val & WM8994_AIF2ADCL_SRC) &&
(val & WM8994_AIF2ADCR_SRC))
@@ -1218,6 +1271,12 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
break;
}
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ aif_mclk_set(component, 1, false);
+ break;
+ }
+
return 0;
}
@@ -1623,10 +1682,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -2141,6 +2200,7 @@ static int _wm8994_set_fll(struct snd_soc_component *component, int id, int src,
u16 reg, clk1, aif_reg, aif_src;
unsigned long timeout;
bool was_enabled;
+ struct clk *mclk;
switch (id) {
case WM8994_FLL1:
@@ -2216,6 +2276,27 @@ static int _wm8994_set_fll(struct snd_soc_component *component, int id, int src,
snd_soc_component_update_bits(component, WM8994_FLL1_CONTROL_1 + reg_offset,
WM8994_FLL1_ENA, 0);
+ /* Disable MCLK if needed before we possibly change to new clock parent */
+ if (was_enabled) {
+ reg = snd_soc_component_read32(component, WM8994_FLL1_CONTROL_5
+ + reg_offset);
+ reg = ((reg & WM8994_FLL1_REFCLK_SRC_MASK)
+ >> WM8994_FLL1_REFCLK_SRC_SHIFT) + 1;
+
+ switch (reg) {
+ case WM8994_FLL_SRC_MCLK1:
+ mclk = wm8994->mclk[WM8994_MCLK1].clk;
+ break;
+ case WM8994_FLL_SRC_MCLK2:
+ mclk = wm8994->mclk[WM8994_MCLK2].clk;
+ break;
+ default:
+ mclk = NULL;
+ }
+
+ clk_disable_unprepare(mclk);
+ }
+
if (wm8994->fll_byp && src == WM8994_FLL_SRC_BCLK &&
freq_in == freq_out && freq_out) {
dev_dbg(component->dev, "Bypassing FLL%d\n", id + 1);
@@ -2260,10 +2341,29 @@ static int _wm8994_set_fll(struct snd_soc_component *component, int id, int src,
/* Clear any pending completion from a previous failure */
try_wait_for_completion(&wm8994->fll_locked[id]);
+ switch (src) {
+ case WM8994_FLL_SRC_MCLK1:
+ mclk = wm8994->mclk[WM8994_MCLK1].clk;
+ break;
+ case WM8994_FLL_SRC_MCLK2:
+ mclk = wm8994->mclk[WM8994_MCLK2].clk;
+ break;
+ default:
+ mclk = NULL;
+ }
+
/* Enable (with fractional mode if required) */
if (freq_out) {
+ ret = clk_prepare_enable(mclk);
+ if (ret < 0) {
+ dev_err(component->dev, "Failed to enable MCLK for FLL%d\n",
+ id + 1);
+ return ret;
+ }
+
/* Enable VMID if we need it */
if (!was_enabled) {
+
active_reference(component);
switch (control->type) {
@@ -2372,12 +2472,29 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
return _wm8994_set_fll(dai->component, id, src, freq_in, freq_out);
}
+static int wm8994_set_mclk_rate(struct wm8994_priv *wm8994, unsigned int id,
+ unsigned int *freq)
+{
+ int ret;
+
+ if (!wm8994->mclk[id].clk || *freq == wm8994->mclk_rate[id])
+ return 0;
+
+ ret = clk_set_rate(wm8994->mclk[id].clk, *freq);
+ if (ret < 0)
+ return ret;
+
+ *freq = clk_get_rate(wm8994->mclk[id].clk);
+
+ return 0;
+}
+
static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_component *component = dai->component;
struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
- int i;
+ int ret, i;
switch (dai->id) {
case 1:
@@ -2392,7 +2509,12 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
switch (clk_id) {
case WM8994_SYSCLK_MCLK1:
wm8994->sysclk[dai->id - 1] = WM8994_SYSCLK_MCLK1;
- wm8994->mclk[0] = freq;
+
+ ret = wm8994_set_mclk_rate(wm8994, dai->id - 1, &freq);
+ if (ret < 0)
+ return ret;
+
+ wm8994->mclk_rate[0] = freq;
dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n",
dai->id, freq);
break;
@@ -2400,7 +2522,12 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
case WM8994_SYSCLK_MCLK2:
/* TODO: Set GPIO AF */
wm8994->sysclk[dai->id - 1] = WM8994_SYSCLK_MCLK2;
- wm8994->mclk[1] = freq;
+
+ ret = wm8994_set_mclk_rate(wm8994, dai->id - 1, &freq);
+ if (ret < 0)
+ return ret;
+
+ wm8994->mclk_rate[1] = freq;
dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n",
dai->id, freq);
break;
@@ -4456,6 +4583,7 @@ static const struct snd_soc_component_driver soc_component_dev_wm8994 = {
static int wm8994_probe(struct platform_device *pdev)
{
struct wm8994_priv *wm8994;
+ int ret;
wm8994 = devm_kzalloc(&pdev->dev, sizeof(struct wm8994_priv),
GFP_KERNEL);
@@ -4467,6 +4595,16 @@ static int wm8994_probe(struct platform_device *pdev)
wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
+ wm8994->mclk[WM8994_MCLK1].id = "MCLK1";
+ wm8994->mclk[WM8994_MCLK2].id = "MCLK2";
+
+ ret = devm_clk_bulk_get_optional(pdev->dev.parent, ARRAY_SIZE(wm8994->mclk),
+ wm8994->mclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 1d6f2abe1c11..41c4b126114d 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -6,6 +6,7 @@
#ifndef _WM8994_H
#define _WM8994_H
+#include <linux/clk.h>
#include <sound/soc.h>
#include <linux/firmware.h>
#include <linux/completion.h>
@@ -14,6 +15,12 @@
#include "wm_hubs.h"
+enum {
+ WM8994_MCLK1,
+ WM8994_MCLK2,
+ WM8994_NUM_MCLK
+};
+
/* Sources for AIF1/2 SYSCLK - use with set_dai_sysclk() */
#define WM8994_SYSCLK_MCLK1 1
#define WM8994_SYSCLK_MCLK2 2
@@ -73,9 +80,10 @@ struct wm8994;
struct wm8994_priv {
struct wm_hubs_data hubs;
struct wm8994 *wm8994;
+ struct clk_bulk_data mclk[WM8994_NUM_MCLK];
int sysclk[2];
int sysclk_rate[2];
- int mclk[2];
+ int mclk_rate[2];
int aifclk[2];
int aifdiv[2];
int channels[2];
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 9b8bb7bbe945..2a9b610f6d43 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -599,6 +599,9 @@ struct wm_coeff_ctl_ops {
struct wm_coeff_ctl {
const char *name;
const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
struct wm_adsp_alg_region alg_region;
struct wm_coeff_ctl_ops ops;
struct wm_adsp *dsp;
@@ -1399,6 +1402,7 @@ static void wm_adsp_free_ctl_blk(struct wm_coeff_ctl *ctl)
{
kfree(ctl->cache);
kfree(ctl->name);
+ kfree(ctl->subname);
kfree(ctl);
}
@@ -1472,6 +1476,15 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
ret = -ENOMEM;
goto err_ctl;
}
+ if (subname) {
+ ctl->subname_len = subname_len;
+ ctl->subname = kmemdup(subname,
+ strlen(subname) + 1, GFP_KERNEL);
+ if (!ctl->subname) {
+ ret = -ENOMEM;
+ goto err_ctl_name;
+ }
+ }
ctl->enabled = 1;
ctl->set = 0;
ctl->ops.xget = wm_coeff_get;
@@ -1485,7 +1498,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
if (!ctl->cache) {
ret = -ENOMEM;
- goto err_ctl_name;
+ goto err_ctl_subname;
}
list_add(&ctl->list, &dsp->ctl_list);
@@ -1508,6 +1521,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
err_ctl_cache:
kfree(ctl->cache);
+err_ctl_subname:
+ kfree(ctl->subname);
err_ctl_name:
kfree(ctl->name);
err_ctl:
@@ -1995,6 +2010,70 @@ out:
return ret;
}
+/*
+ * Find wm_coeff_ctl with input name as its subname
+ * If not found, return NULL
+ */
+static struct wm_coeff_ctl *wm_adsp_get_ctl(struct wm_adsp *dsp,
+ const char *name, int type,
+ unsigned int alg)
+{
+ struct wm_coeff_ctl *pos, *rslt = NULL;
+
+ list_for_each_entry(pos, &dsp->ctl_list, list) {
+ if (!pos->subname)
+ continue;
+ if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ pos->alg_region.alg == alg &&
+ pos->alg_region.type == type) {
+ rslt = pos;
+ break;
+ }
+ }
+
+ return rslt;
+}
+
+int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len)
+{
+ struct wm_coeff_ctl *ctl;
+ struct snd_kcontrol *kcontrol;
+ int ret;
+
+ ctl = wm_adsp_get_ctl(dsp, name, type, alg);
+ if (!ctl)
+ return -EINVAL;
+
+ if (len > ctl->len)
+ return -EINVAL;
+
+ ret = wm_coeff_write_control(ctl, buf, len);
+
+ kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
+ snd_ctl_notify(dsp->component->card->snd_card,
+ SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm_adsp_write_ctl);
+
+int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len)
+{
+ struct wm_coeff_ctl *ctl;
+
+ ctl = wm_adsp_get_ctl(dsp, name, type, alg);
+ if (!ctl)
+ return -EINVAL;
+
+ if (len > ctl->len)
+ return -EINVAL;
+
+ return wm_coeff_read_control(ctl, buf, len);
+}
+EXPORT_SYMBOL_GPL(wm_adsp_read_ctl);
+
static void wm_adsp_ctl_fixup_base(struct wm_adsp *dsp,
const struct wm_adsp_alg_region *alg_region)
{
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index aa634ef6c9f5..4c481cf20275 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -201,5 +201,9 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp);
int wm_adsp_compr_copy(struct snd_compr_stream *stream,
char __user *buf, size_t count);
+int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len);
+int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len);
#endif
diff --git a/sound/soc/dwc/dwc-pcm.c b/sound/soc/dwc/dwc-pcm.c
index a9ae91c4597f..4771eb5fbe2a 100644
--- a/sound/soc/dwc/dwc-pcm.c
+++ b/sound/soc/dwc/dwc-pcm.c
@@ -135,7 +135,8 @@ void dw_pcm_pop_rx(struct dw_i2s_dev *dev)
dw_pcm_transfer(dev, false);
}
-static int dw_pcm_open(struct snd_pcm_substream *substream)
+static int dw_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -148,14 +149,16 @@ static int dw_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int dw_pcm_close(struct snd_pcm_substream *substream)
+static int dw_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
synchronize_rcu();
return 0;
}
-static int dw_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int dw_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dw_i2s_dev *dev = runtime->private_data;
@@ -192,12 +195,14 @@ static int dw_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int dw_pcm_hw_free(struct snd_pcm_substream *substream)
+static int dw_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int dw_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dw_i2s_dev *dev = runtime->private_data;
@@ -231,7 +236,8 @@ static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t dw_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t dw_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dw_i2s_dev *dev = runtime->private_data;
@@ -245,35 +251,33 @@ static snd_pcm_uframes_t dw_pcm_pointer(struct snd_pcm_substream *substream)
return pos < runtime->buffer_size ? pos : 0;
}
-static int dw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int dw_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
size_t size = dw_pcm_hardware.buffer_bytes_max;
snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), size, size);
+ NULL, size, size);
return 0;
}
-static void dw_pcm_free(struct snd_pcm *pcm)
+static void dw_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static const struct snd_pcm_ops dw_pcm_ops = {
- .open = dw_pcm_open,
- .close = dw_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = dw_pcm_hw_params,
- .hw_free = dw_pcm_hw_free,
- .trigger = dw_pcm_trigger,
- .pointer = dw_pcm_pointer,
-};
-
static const struct snd_soc_component_driver dw_pcm_component = {
- .pcm_new = dw_pcm_new,
- .pcm_free = dw_pcm_free,
- .ops = &dw_pcm_ops,
+ .open = dw_pcm_open,
+ .close = dw_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = dw_pcm_hw_params,
+ .hw_free = dw_pcm_hw_free,
+ .trigger = dw_pcm_trigger,
+ .pointer = dw_pcm_pointer,
+ .pcm_construct = dw_pcm_new,
+ .pcm_destruct = dw_pcm_free,
};
int dw_pcm_register(struct platform_device *pdev)
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index aa99c008a925..65e8cd4be930 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -25,6 +25,16 @@ config SND_SOC_FSL_SAI
This option is only useful for out-of-tree drivers since
in-tree drivers select it automatically.
+config SND_SOC_FSL_MQS
+ tristate "Medium Quality Sound (MQS) module support"
+ depends on SND_SOC_FSL_SAI
+ select REGMAP_MMIO
+ help
+ Say Y if you want to add Medium Quality Sound (MQS)
+ support for the Freescale CPUs.
+ This option is only useful for out-of-tree drivers since
+ in-tree drivers select it automatically.
+
config SND_SOC_FSL_AUDMIX
tristate "Audio Mixer (AUDMIX) module support"
select REGMAP_MMIO
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index c0dd04422fe9..8cde88c72d93 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -23,6 +23,7 @@ snd-soc-fsl-esai-objs := fsl_esai.o
snd-soc-fsl-micfil-objs := fsl_micfil.o
snd-soc-fsl-utils-objs := fsl_utils.o
snd-soc-fsl-dma-objs := fsl_dma.o
+snd-soc-fsl-mqs-objs := fsl_mqs.o
obj-$(CONFIG_SND_SOC_FSL_AUDMIX) += snd-soc-fsl-audmix.o
obj-$(CONFIG_SND_SOC_FSL_ASOC_CARD) += snd-soc-fsl-asoc-card.o
@@ -33,6 +34,7 @@ obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
obj-$(CONFIG_SND_SOC_FSL_MICFIL) += snd-soc-fsl-micfil.o
obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
+obj-$(CONFIG_SND_SOC_FSL_MQS) += snd-soc-fsl-mqs.o
obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
# MPC5200 Platform Support
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index cfa40ef6b1ca..a3cfceea7d2f 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -115,7 +115,7 @@ static void fsl_asrc_sel_proc(int inrate, int outrate,
* within range [ANCA, ANCA+ANCB-1], depends on the channels of pair A
* while pair A and pair C are comparatively independent.
*/
-static int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
+int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
{
enum asrc_pair_index index = ASRC_INVALID_PAIR;
struct fsl_asrc *asrc_priv = pair->asrc_priv;
@@ -158,7 +158,7 @@ static int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
*
* It clears the resource from asrc_priv and releases the occupied channels.
*/
-static void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
+void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
{
struct fsl_asrc *asrc_priv = pair->asrc_priv;
enum asrc_pair_index index = pair->index;
@@ -259,14 +259,24 @@ static int fsl_asrc_set_ideal_ratio(struct fsl_asrc_pair *pair,
* It configures those ASRC registers according to a configuration instance
* of struct asrc_config which includes in/output sample rate, width, channel
* and clock settings.
+ *
+ * Note:
+ * The ideal ratio configuration can work with a flexible clock rate setting.
+ * Using IDEAL_RATIO_RATE gives a faster converting speed but overloads ASRC.
+ * For a regular audio playback, the clock rate should not be slower than an
+ * clock rate aligning with the output sample rate; For a use case requiring
+ * faster conversion, set use_ideal_rate to have the faster speed.
*/
-static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
+static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
{
struct asrc_config *config = pair->config;
struct fsl_asrc *asrc_priv = pair->asrc_priv;
enum asrc_pair_index index = pair->index;
+ enum asrc_word_width input_word_width;
+ enum asrc_word_width output_word_width;
u32 inrate, outrate, indiv, outdiv;
- u32 clk_index[2], div[2];
+ u32 clk_index[2], div[2], rem[2];
+ u64 clk_rate;
int in, out, channels;
int pre_proc, post_proc;
struct clk *clk;
@@ -283,9 +293,32 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
return -EINVAL;
}
- /* Validate output width */
- if (config->output_word_width == ASRC_WIDTH_8_BIT) {
- pair_err("does not support 8bit width output\n");
+ switch (snd_pcm_format_width(config->input_format)) {
+ case 8:
+ input_word_width = ASRC_WIDTH_8_BIT;
+ break;
+ case 16:
+ input_word_width = ASRC_WIDTH_16_BIT;
+ break;
+ case 24:
+ input_word_width = ASRC_WIDTH_24_BIT;
+ break;
+ default:
+ pair_err("does not support this input format, %d\n",
+ config->input_format);
+ return -EINVAL;
+ }
+
+ switch (snd_pcm_format_width(config->output_format)) {
+ case 16:
+ output_word_width = ASRC_WIDTH_16_BIT;
+ break;
+ case 24:
+ output_word_width = ASRC_WIDTH_24_BIT;
+ break;
+ default:
+ pair_err("does not support this output format, %d\n",
+ config->output_format);
return -EINVAL;
}
@@ -326,27 +359,42 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
/* We only have output clock for ideal ratio mode */
clk = asrc_priv->asrck_clk[clk_index[ideal ? OUT : IN]];
- div[IN] = clk_get_rate(clk) / inrate;
- if (div[IN] == 0) {
+ clk_rate = clk_get_rate(clk);
+ rem[IN] = do_div(clk_rate, inrate);
+ div[IN] = (u32)clk_rate;
+
+ /*
+ * The divider range is [1, 1024], defined by the hardware. For non-
+ * ideal ratio configuration, clock rate has to be strictly aligned
+ * with the sample rate. For ideal ratio configuration, clock rates
+ * only result in different converting speeds. So remainder does not
+ * matter, as long as we keep the divider within its valid range.
+ */
+ if (div[IN] == 0 || (!ideal && (div[IN] > 1024 || rem[IN] != 0))) {
pair_err("failed to support input sample rate %dHz by asrck_%x\n",
inrate, clk_index[ideal ? OUT : IN]);
return -EINVAL;
}
- clk = asrc_priv->asrck_clk[clk_index[OUT]];
+ div[IN] = min_t(u32, 1024, div[IN]);
- /* Use fixed output rate for Ideal Ratio mode (INCLK_NONE) */
- if (ideal)
- div[OUT] = clk_get_rate(clk) / IDEAL_RATIO_RATE;
+ clk = asrc_priv->asrck_clk[clk_index[OUT]];
+ clk_rate = clk_get_rate(clk);
+ if (ideal && use_ideal_rate)
+ rem[OUT] = do_div(clk_rate, IDEAL_RATIO_RATE);
else
- div[OUT] = clk_get_rate(clk) / outrate;
+ rem[OUT] = do_div(clk_rate, outrate);
+ div[OUT] = clk_rate;
- if (div[OUT] == 0) {
+ /* Output divider has the same limitation as the input one */
+ if (div[OUT] == 0 || (!ideal && (div[OUT] > 1024 || rem[OUT] != 0))) {
pair_err("failed to support output sample rate %dHz by asrck_%x\n",
outrate, clk_index[OUT]);
return -EINVAL;
}
+ div[OUT] = min_t(u32, 1024, div[OUT]);
+
/* Set the channel number */
channels = config->channel_num;
@@ -383,8 +431,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
/* Implement word_width configurations */
regmap_update_bits(asrc_priv->regmap, REG_ASRMCR1(index),
ASRMCR1i_OW16_MASK | ASRMCR1i_IWD_MASK,
- ASRMCR1i_OW16(config->output_word_width) |
- ASRMCR1i_IWD(config->input_word_width));
+ ASRMCR1i_OW16(output_word_width) |
+ ASRMCR1i_IWD(input_word_width));
/* Enable BUFFER STALL */
regmap_update_bits(asrc_priv->regmap, REG_ASRMCR(index),
@@ -497,13 +545,13 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
- int width = params_width(params);
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
struct asrc_config config;
- int word_width, ret;
+ snd_pcm_format_t format;
+ int ret;
ret = fsl_asrc_request_pair(channels, pair);
if (ret) {
@@ -513,15 +561,10 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
pair->config = &config;
- if (width == 16)
- width = ASRC_WIDTH_16_BIT;
- else
- width = ASRC_WIDTH_24_BIT;
-
if (asrc_priv->asrc_width == 16)
- word_width = ASRC_WIDTH_16_BIT;
+ format = SNDRV_PCM_FORMAT_S16_LE;
else
- word_width = ASRC_WIDTH_24_BIT;
+ format = SNDRV_PCM_FORMAT_S24_LE;
config.pair = pair->index;
config.channel_num = channels;
@@ -529,18 +572,18 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
config.outclk = OUTCLK_ASRCK1_CLK;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- config.input_word_width = width;
- config.output_word_width = word_width;
+ config.input_format = params_format(params);
+ config.output_format = format;
config.input_sample_rate = rate;
config.output_sample_rate = asrc_priv->asrc_rate;
} else {
- config.input_word_width = word_width;
- config.output_word_width = width;
+ config.input_format = format;
+ config.output_format = params_format(params);
config.input_sample_rate = asrc_priv->asrc_rate;
config.output_sample_rate = rate;
}
- ret = fsl_asrc_config_pair(pair);
+ ret = fsl_asrc_config_pair(pair, false);
if (ret) {
dev_err(dai->dev, "fail to config asrc pair\n");
return ret;
@@ -604,7 +647,7 @@ static int fsl_asrc_dai_probe(struct snd_soc_dai *dai)
#define FSL_ASRC_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S20_3LE)
+ SNDRV_PCM_FMTBIT_S24_3LE)
static struct snd_soc_dai_driver fsl_asrc_dai = {
.probe = fsl_asrc_dai_probe,
@@ -615,7 +658,8 @@ static struct snd_soc_dai_driver fsl_asrc_dai = {
.rate_min = 5512,
.rate_max = 192000,
.rates = SNDRV_PCM_RATE_KNOT,
- .formats = FSL_ASRC_FORMATS,
+ .formats = FSL_ASRC_FORMATS |
+ SNDRV_PCM_FMTBIT_S8,
},
.capture = {
.stream_name = "ASRC-Capture",
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index c60075112570..2b57e8c53728 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -342,8 +342,8 @@ struct asrc_config {
unsigned int dma_buffer_size;
unsigned int input_sample_rate;
unsigned int output_sample_rate;
- enum asrc_word_width input_word_width;
- enum asrc_word_width output_word_width;
+ snd_pcm_format_t input_format;
+ snd_pcm_format_t output_format;
enum asrc_inclk inclk;
enum asrc_outclk outclk;
};
@@ -462,4 +462,7 @@ struct fsl_asrc {
#define DRV_NAME "fsl-asrc-dai"
extern struct snd_soc_component_driver fsl_asrc_component;
struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir);
+int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair);
+void fsl_asrc_release_pair(struct fsl_asrc_pair *pair);
+
#endif /* _FSL_ASRC_H */
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 01052a0808b0..d6146de9acd2 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -16,13 +16,11 @@
#define FSL_ASRC_DMABUF_SIZE (256 * 1024)
-static const struct snd_pcm_hardware snd_imx_hardware = {
+static struct snd_pcm_hardware snd_imx_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME,
+ SNDRV_PCM_INFO_MMAP_VALID,
.buffer_bytes_max = FSL_ASRC_DMABUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = 65535, /* Limited by SDMA engine */
@@ -54,13 +52,12 @@ static void fsl_asrc_dma_complete(void *arg)
snd_pcm_period_elapsed(substream);
}
-static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream,
+ struct snd_soc_component *component)
{
u8 dir = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUT : IN;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
unsigned long flags = DMA_CTRL_ACK;
@@ -97,7 +94,8 @@ static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
return 0;
}
-static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int fsl_asrc_dma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -107,7 +105,7 @@ static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ret = fsl_asrc_dma_prepare_and_submit(substream);
+ ret = fsl_asrc_dma_prepare_and_submit(substream, component);
if (ret)
return ret;
dma_async_issue_pending(pair->dma_chan[IN]);
@@ -126,7 +124,8 @@ static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
+static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -134,7 +133,6 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
struct fsl_asrc *asrc_priv = pair->asrc_priv;
@@ -249,7 +247,8 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int fsl_asrc_dma_hw_free(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -268,14 +267,27 @@ static int fsl_asrc_dma_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int fsl_asrc_dma_startup(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_startup(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_dmaengine_dai_dma_data *dma_data;
struct device *dev = component->dev;
struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
struct fsl_asrc_pair *pair;
+ struct dma_chan *tmp_chan = NULL;
+ u8 dir = tx ? OUT : IN;
+ bool release_pair = true;
+ int ret = 0;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(dev, "failed to set pcm hw params periods\n");
+ return ret;
+ }
pair = kzalloc(sizeof(struct fsl_asrc_pair), GFP_KERNEL);
if (!pair)
@@ -285,14 +297,54 @@ static int fsl_asrc_dma_startup(struct snd_pcm_substream *substream)
runtime->private_data = pair;
- snd_pcm_hw_constraint_integer(substream->runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
+ /* Request a dummy pair, which will be released later.
+ * Request pair function needs channel num as input, for this
+ * dummy pair, we just request "1" channel temporarily.
+ */
+ ret = fsl_asrc_request_pair(1, pair);
+ if (ret < 0) {
+ dev_err(dev, "failed to request asrc pair\n");
+ goto req_pair_err;
+ }
+
+ /* Request a dummy dma channel, which will be released later. */
+ tmp_chan = fsl_asrc_get_dma_channel(pair, dir);
+ if (!tmp_chan) {
+ dev_err(dev, "failed to get dma channel\n");
+ ret = -EINVAL;
+ goto dma_chan_err;
+ }
+
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ /* Refine the snd_imx_hardware according to caps of DMA. */
+ ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
+ dma_data,
+ &snd_imx_hardware,
+ tmp_chan);
+ if (ret < 0) {
+ dev_err(dev, "failed to refine runtime hwparams\n");
+ goto out;
+ }
+
+ release_pair = false;
snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
- return 0;
+out:
+ dma_release_channel(tmp_chan);
+
+dma_chan_err:
+ fsl_asrc_release_pair(pair);
+
+req_pair_err:
+ if (release_pair)
+ kfree(pair);
+
+ return ret;
}
-static int fsl_asrc_dma_shutdown(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_shutdown(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -311,7 +363,9 @@ static int fsl_asrc_dma_shutdown(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t
+fsl_asrc_dma_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -319,17 +373,8 @@ static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *subs
return bytes_to_frames(substream->runtime, pair->pos);
}
-static const struct snd_pcm_ops fsl_asrc_dma_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fsl_asrc_dma_hw_params,
- .hw_free = fsl_asrc_dma_hw_free,
- .trigger = fsl_asrc_dma_trigger,
- .open = fsl_asrc_dma_startup,
- .close = fsl_asrc_dma_shutdown,
- .pointer = fsl_asrc_dma_pcm_pointer,
-};
-
-static int fsl_asrc_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int fsl_asrc_dma_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm_substream *substream;
@@ -364,7 +409,8 @@ err:
return ret;
}
-static void fsl_asrc_dma_pcm_free(struct snd_pcm *pcm)
+static void fsl_asrc_dma_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -382,8 +428,14 @@ static void fsl_asrc_dma_pcm_free(struct snd_pcm *pcm)
struct snd_soc_component_driver fsl_asrc_component = {
.name = DRV_NAME,
- .ops = &fsl_asrc_dma_pcm_ops,
- .pcm_new = fsl_asrc_dma_pcm_new,
- .pcm_free = fsl_asrc_dma_pcm_free,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = fsl_asrc_dma_hw_params,
+ .hw_free = fsl_asrc_dma_hw_free,
+ .trigger = fsl_asrc_dma_trigger,
+ .open = fsl_asrc_dma_startup,
+ .close = fsl_asrc_dma_shutdown,
+ .pointer = fsl_asrc_dma_pcm_pointer,
+ .pcm_construct = fsl_asrc_dma_pcm_new,
+ .pcm_destruct = fsl_asrc_dma_pcm_free,
};
EXPORT_SYMBOL_GPL(fsl_asrc_component);
diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c
index c7e4e9757dce..a1db1bce330f 100644
--- a/sound/soc/fsl/fsl_audmix.c
+++ b/sound/soc/fsl/fsl_audmix.c
@@ -286,6 +286,7 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct fsl_audmix *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned long lock_flags;
/* Capture stream shall not be handled */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
@@ -295,12 +296,16 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ spin_lock_irqsave(&priv->lock, lock_flags);
priv->tdms |= BIT(dai->driver->id);
+ spin_unlock_irqrestore(&priv->lock, lock_flags);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ spin_lock_irqsave(&priv->lock, lock_flags);
priv->tdms &= ~BIT(dai->driver->id);
+ spin_unlock_irqrestore(&priv->lock, lock_flags);
break;
default:
return -EINVAL;
@@ -491,6 +496,7 @@ static int fsl_audmix_probe(struct platform_device *pdev)
return PTR_ERR(priv->ipg_clk);
}
+ spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
pm_runtime_enable(dev);
diff --git a/sound/soc/fsl/fsl_audmix.h b/sound/soc/fsl/fsl_audmix.h
index 7812ffec45c5..479f05695d53 100644
--- a/sound/soc/fsl/fsl_audmix.h
+++ b/sound/soc/fsl/fsl_audmix.h
@@ -96,6 +96,7 @@ struct fsl_audmix {
struct platform_device *pdev;
struct regmap *regmap;
struct clk *ipg_clk;
+ spinlock_t lock; /* Protect tdms */
u8 tdms;
};
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index e22508301412..2868c4f97cb2 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -201,8 +201,7 @@ static irqreturn_t fsl_dma_isr(int irq, void *dev_id)
struct fsl_dma_private *dma_private = dev_id;
struct snd_pcm_substream *substream = dma_private->substream;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
- struct device *dev = component->dev;
+ struct device *dev = rtd->dev;
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
irqreturn_t ret = IRQ_NONE;
u32 sr, sr2 = 0;
@@ -280,7 +279,8 @@ static irqreturn_t fsl_dma_isr(int irq, void *dev_id)
* Regardless of where the memory is actually allocated, since the device can
* technically DMA to any 36-bit address, we do need to set the DMA mask to 36.
*/
-static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int fsl_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -380,11 +380,10 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
* buffer, which is what ALSA expects. We're just dividing it into
* contiguous parts, and creating a link descriptor for each one.
*/
-static int fsl_dma_open(struct snd_pcm_substream *substream)
+static int fsl_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct dma_object *dma =
container_of(component->driver, struct dma_object, dai);
@@ -533,13 +532,12 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
* and 8 bytes at a time). So we do not support packed 24-bit samples.
* 24-bit data must be padded to 32 bits.
*/
-static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int fsl_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
/* Number of bits per sample */
@@ -698,12 +696,11 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
* The base address of the buffer is stored in the source_addr field of the
* first link descriptor.
*/
-static snd_pcm_uframes_t fsl_dma_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t fsl_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
dma_addr_t position;
@@ -763,7 +760,8 @@ static snd_pcm_uframes_t fsl_dma_pointer(struct snd_pcm_substream *substream)
*
* This function can be called multiple times.
*/
-static int fsl_dma_hw_free(struct snd_pcm_substream *substream)
+static int fsl_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
@@ -796,12 +794,11 @@ static int fsl_dma_hw_free(struct snd_pcm_substream *substream)
/**
* fsl_dma_close: close the stream.
*/
-static int fsl_dma_close(struct snd_pcm_substream *substream)
+static int fsl_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct dma_object *dma =
container_of(component->driver, struct dma_object, dai);
@@ -824,7 +821,8 @@ static int fsl_dma_close(struct snd_pcm_substream *substream)
/*
* Remove this PCM driver.
*/
-static void fsl_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void fsl_dma_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
unsigned int i;
@@ -872,15 +870,6 @@ static struct device_node *find_ssi_node(struct device_node *dma_channel_np)
return NULL;
}
-static const struct snd_pcm_ops fsl_dma_ops = {
- .open = fsl_dma_open,
- .close = fsl_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fsl_dma_hw_params,
- .hw_free = fsl_dma_hw_free,
- .pointer = fsl_dma_pointer,
-};
-
static int fsl_soc_dma_probe(struct platform_device *pdev)
{
struct dma_object *dma;
@@ -912,9 +901,14 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
}
dma->dai.name = DRV_NAME;
- dma->dai.ops = &fsl_dma_ops;
- dma->dai.pcm_new = fsl_dma_new;
- dma->dai.pcm_free = fsl_dma_free_dma_buffers;
+ dma->dai.open = fsl_dma_open;
+ dma->dai.close = fsl_dma_close;
+ dma->dai.ioctl = snd_soc_pcm_lib_ioctl;
+ dma->dai.hw_params = fsl_dma_hw_params;
+ dma->dai.hw_free = fsl_dma_hw_free;
+ dma->dai.pointer = fsl_dma_pointer;
+ dma->dai.pcm_construct = fsl_dma_new;
+ dma->dai.pcm_destruct = fsl_dma_free_dma_buffers;
/* Store the SSI-specific information that we need */
dma->ssi_stx_phys = res.start + REG_SSI_STX0;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index a78e4ab478df..c7a49d03463a 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -33,6 +33,7 @@
* @fsysclk: system clock source to derive HCK, SCK and FS
* @spbaclk: SPBA clock (optional, depending on SoC design)
* @task: tasklet to handle the reset operation
+ * @lock: spin lock between hw_reset() and trigger()
* @fifo_depth: depth of tx/rx FIFO
* @slot_width: width of each DAI slot
* @slots: number of slots
@@ -56,6 +57,7 @@ struct fsl_esai {
struct clk *fsysclk;
struct clk *spbaclk;
struct tasklet_struct task;
+ spinlock_t lock; /* Protect hw_reset and trigger */
u32 fifo_depth;
u32 slot_width;
u32 slots;
@@ -676,8 +678,10 @@ static void fsl_esai_hw_reset(unsigned long arg)
{
struct fsl_esai *esai_priv = (struct fsl_esai *)arg;
bool tx = true, rx = false, enabled[2];
+ unsigned long lock_flags;
u32 tfcr, rfcr;
+ spin_lock_irqsave(&esai_priv->lock, lock_flags);
/* Save the registers */
regmap_read(esai_priv->regmap, REG_ESAI_TFCR, &tfcr);
regmap_read(esai_priv->regmap, REG_ESAI_RFCR, &rfcr);
@@ -715,6 +719,8 @@ static void fsl_esai_hw_reset(unsigned long arg)
fsl_esai_trigger_start(esai_priv, tx);
if (enabled[rx])
fsl_esai_trigger_start(esai_priv, rx);
+
+ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
}
static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -722,6 +728,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
{
struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai);
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned long lock_flags;
esai_priv->channels[tx] = substream->runtime->channels;
@@ -729,12 +736,16 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ spin_lock_irqsave(&esai_priv->lock, lock_flags);
fsl_esai_trigger_start(esai_priv, tx);
+ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ spin_lock_irqsave(&esai_priv->lock, lock_flags);
fsl_esai_trigger_stop(esai_priv, tx);
+ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
break;
default:
return -EINVAL;
@@ -1002,6 +1013,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, esai_priv);
+ spin_lock_init(&esai_priv->lock);
ret = fsl_esai_hw_init(esai_priv);
if (ret)
return ret;
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
new file mode 100644
index 000000000000..0c813a45bba7
--- /dev/null
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// ALSA SoC IMX MQS driver
+//
+// Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+// Copyright 2019 NXP
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+
+#define REG_MQS_CTRL 0x00
+
+#define MQS_EN_MASK (0x1 << 28)
+#define MQS_EN_SHIFT (28)
+#define MQS_SW_RST_MASK (0x1 << 24)
+#define MQS_SW_RST_SHIFT (24)
+#define MQS_OVERSAMPLE_MASK (0x1 << 20)
+#define MQS_OVERSAMPLE_SHIFT (20)
+#define MQS_CLK_DIV_MASK (0xFF << 0)
+#define MQS_CLK_DIV_SHIFT (0)
+
+/* codec private data */
+struct fsl_mqs {
+ struct regmap *regmap;
+ struct clk *mclk;
+ struct clk *ipg;
+
+ unsigned int reg_iomuxc_gpr2;
+ unsigned int reg_mqs_ctrl;
+ bool use_gpr;
+};
+
+#define FSL_MQS_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+#define FSL_MQS_FORMATS SNDRV_PCM_FMTBIT_S16_LE
+
+static int fsl_mqs_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
+ unsigned long mclk_rate;
+ int div, res;
+ int lrclk;
+
+ mclk_rate = clk_get_rate(mqs_priv->mclk);
+ lrclk = params_rate(params);
+
+ /*
+ * mclk_rate / (oversample(32,64) * FS * 2 * divider ) = repeat_rate;
+ * if repeat_rate is 8, mqs can achieve better quality.
+ * oversample rate is fix to 32 currently.
+ */
+ div = mclk_rate / (32 * lrclk * 2 * 8);
+ res = mclk_rate % (32 * lrclk * 2 * 8);
+
+ if (res == 0 && div > 0 && div <= 256) {
+ if (mqs_priv->use_gpr) {
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_CLK_DIV_MASK,
+ (div - 1) << IMX6SX_GPR2_MQS_CLK_DIV_SHIFT);
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_OVERSAMPLE_MASK, 0);
+ } else {
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_CLK_DIV_MASK,
+ (div - 1) << MQS_CLK_DIV_SHIFT);
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_OVERSAMPLE_MASK, 0);
+ }
+ } else {
+ dev_err(component->dev, "can't get proper divider\n");
+ }
+
+ return 0;
+}
+
+static int fsl_mqs_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ /* Only LEFT_J & SLAVE mode is supported. */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_LEFT_J:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fsl_mqs_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
+
+ if (mqs_priv->use_gpr)
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_EN_MASK,
+ 1 << IMX6SX_GPR2_MQS_EN_SHIFT);
+ else
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_EN_MASK,
+ 1 << MQS_EN_SHIFT);
+ return 0;
+}
+
+static void fsl_mqs_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
+
+ if (mqs_priv->use_gpr)
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_EN_MASK, 0);
+ else
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_EN_MASK, 0);
+}
+
+static const struct snd_soc_component_driver soc_codec_fsl_mqs = {
+ .idle_bias_on = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct snd_soc_dai_ops fsl_mqs_dai_ops = {
+ .startup = fsl_mqs_startup,
+ .shutdown = fsl_mqs_shutdown,
+ .hw_params = fsl_mqs_hw_params,
+ .set_fmt = fsl_mqs_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver fsl_mqs_dai = {
+ .name = "fsl-mqs-dai",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = FSL_MQS_RATES,
+ .formats = FSL_MQS_FORMATS,
+ },
+ .ops = &fsl_mqs_dai_ops,
+};
+
+static const struct regmap_config fsl_mqs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = REG_MQS_CTRL,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int fsl_mqs_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *gpr_np = NULL;
+ struct fsl_mqs *mqs_priv;
+ void __iomem *regs;
+ int ret;
+
+ mqs_priv = devm_kzalloc(&pdev->dev, sizeof(*mqs_priv), GFP_KERNEL);
+ if (!mqs_priv)
+ return -ENOMEM;
+
+ /* On i.MX6sx the MQS control register is in GPR domain
+ * But in i.MX8QM/i.MX8QXP the control register is moved
+ * to its own domain.
+ */
+ if (of_device_is_compatible(np, "fsl,imx8qm-mqs"))
+ mqs_priv->use_gpr = false;
+ else
+ mqs_priv->use_gpr = true;
+
+ if (mqs_priv->use_gpr) {
+ gpr_np = of_parse_phandle(np, "gpr", 0);
+ if (!gpr_np) {
+ dev_err(&pdev->dev, "failed to get gpr node by phandle\n");
+ return -EINVAL;
+ }
+
+ mqs_priv->regmap = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(mqs_priv->regmap)) {
+ dev_err(&pdev->dev, "failed to get gpr regmap\n");
+ ret = PTR_ERR(mqs_priv->regmap);
+ goto err_free_gpr_np;
+ }
+ } else {
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mqs_priv->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
+ "core",
+ regs,
+ &fsl_mqs_regmap_config);
+ if (IS_ERR(mqs_priv->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(mqs_priv->regmap));
+ return PTR_ERR(mqs_priv->regmap);
+ }
+
+ mqs_priv->ipg = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(mqs_priv->ipg)) {
+ dev_err(&pdev->dev, "failed to get the clock: %ld\n",
+ PTR_ERR(mqs_priv->ipg));
+ return PTR_ERR(mqs_priv->ipg);
+ }
+ }
+
+ mqs_priv->mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mqs_priv->mclk)) {
+ dev_err(&pdev->dev, "failed to get the clock: %ld\n",
+ PTR_ERR(mqs_priv->mclk));
+ ret = PTR_ERR(mqs_priv->mclk);
+ goto err_free_gpr_np;
+ }
+
+ dev_set_drvdata(&pdev->dev, mqs_priv);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_fsl_mqs,
+ &fsl_mqs_dai, 1);
+ if (ret)
+ goto err_free_gpr_np;
+ return 0;
+
+err_free_gpr_np:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
+static int fsl_mqs_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fsl_mqs_runtime_resume(struct device *dev)
+{
+ struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+
+ if (mqs_priv->ipg)
+ clk_prepare_enable(mqs_priv->ipg);
+
+ if (mqs_priv->mclk)
+ clk_prepare_enable(mqs_priv->mclk);
+
+ if (mqs_priv->use_gpr)
+ regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
+ mqs_priv->reg_iomuxc_gpr2);
+ else
+ regmap_write(mqs_priv->regmap, REG_MQS_CTRL,
+ mqs_priv->reg_mqs_ctrl);
+ return 0;
+}
+
+static int fsl_mqs_runtime_suspend(struct device *dev)
+{
+ struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+
+ if (mqs_priv->use_gpr)
+ regmap_read(mqs_priv->regmap, IOMUXC_GPR2,
+ &mqs_priv->reg_iomuxc_gpr2);
+ else
+ regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
+ &mqs_priv->reg_mqs_ctrl);
+
+ if (mqs_priv->mclk)
+ clk_disable_unprepare(mqs_priv->mclk);
+
+ if (mqs_priv->ipg)
+ clk_disable_unprepare(mqs_priv->ipg);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_mqs_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_mqs_runtime_suspend,
+ fsl_mqs_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id fsl_mqs_dt_ids[] = {
+ { .compatible = "fsl,imx8qm-mqs", },
+ { .compatible = "fsl,imx6sx-mqs", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_mqs_dt_ids);
+
+static struct platform_driver fsl_mqs_driver = {
+ .probe = fsl_mqs_probe,
+ .remove = fsl_mqs_remove,
+ .driver = {
+ .name = "fsl-mqs",
+ .of_match_table = fsl_mqs_dt_ids,
+ .pm = &fsl_mqs_pm_ops,
+ },
+};
+
+module_platform_driver(fsl_mqs_driver);
+
+MODULE_AUTHOR("Shengjiu Wang <Shengjiu.Wang@nxp.com>");
+MODULE_DESCRIPTION("MQS codec driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform: fsl-mqs");
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index c49aea4fba56..08131d147983 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -69,8 +69,9 @@ static struct fiq_handler fh = {
.name = DRV_NAME,
};
-static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int snd_imx_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -85,7 +86,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
+static int snd_imx_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -104,7 +106,8 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
static int imx_pcm_fiq;
-static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_imx_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -141,7 +144,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t
+snd_imx_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -165,7 +170,8 @@ static const struct snd_pcm_hardware snd_imx_hardware = {
.fifo_size = 0,
};
-static int snd_imx_open(struct snd_pcm_substream *substream)
+static int snd_imx_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd;
@@ -194,7 +200,8 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_imx_close(struct snd_pcm_substream *substream)
+static int snd_imx_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -206,8 +213,9 @@ static int snd_imx_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int snd_imx_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
@@ -222,17 +230,6 @@ static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
return ret;
}
-static const struct snd_pcm_ops imx_pcm_ops = {
- .open = snd_imx_open,
- .close = snd_imx_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_imx_pcm_hw_params,
- .prepare = snd_imx_pcm_prepare,
- .trigger = snd_imx_pcm_trigger,
- .pointer = snd_imx_pcm_pointer,
- .mmap = snd_imx_pcm_mmap,
-};
-
static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
@@ -279,7 +276,8 @@ static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
static int ssi_irq;
-static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
+static int snd_imx_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
struct snd_pcm_substream *substream;
@@ -329,7 +327,8 @@ static void imx_pcm_free(struct snd_pcm *pcm)
}
}
-static void imx_pcm_fiq_free(struct snd_pcm *pcm)
+static void snd_imx_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
mxc_set_irq_fiq(ssi_irq, 0);
release_fiq(&fh);
@@ -337,9 +336,16 @@ static void imx_pcm_fiq_free(struct snd_pcm *pcm)
}
static const struct snd_soc_component_driver imx_soc_component_fiq = {
- .ops = &imx_pcm_ops,
- .pcm_new = imx_pcm_fiq_new,
- .pcm_free = imx_pcm_fiq_free,
+ .open = snd_imx_open,
+ .close = snd_imx_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = snd_imx_pcm_hw_params,
+ .prepare = snd_imx_pcm_prepare,
+ .trigger = snd_imx_pcm_trigger,
+ .pointer = snd_imx_pcm_pointer,
+ .mmap = snd_imx_pcm_mmap,
+ .pcm_construct = snd_imx_pcm_new,
+ .pcm_destruct = snd_imx_pcm_free,
};
int imx_pcm_fiq_init(struct platform_device *pdev,
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index ccf9301889fe..5237ac96b756 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -98,7 +98,8 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
return IRQ_HANDLED;
}
-static int psc_dma_hw_free(struct snd_pcm_substream *substream)
+static int psc_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
@@ -110,7 +111,8 @@ static int psc_dma_hw_free(struct snd_pcm_substream *substream)
* This function is called by ALSA to start, stop, pause, and resume the DMA
* transfer of data.
*/
-static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int psc_dma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -210,7 +212,8 @@ static const struct snd_pcm_hardware psc_dma_hardware = {
.fifo_size = 512,
};
-static int psc_dma_open(struct snd_pcm_substream *substream)
+static int psc_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -238,7 +241,8 @@ static int psc_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int psc_dma_close(struct snd_pcm_substream *substream)
+static int psc_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -263,7 +267,8 @@ static int psc_dma_close(struct snd_pcm_substream *substream)
}
static snd_pcm_uframes_t
-psc_dma_pointer(struct snd_pcm_substream *substream)
+psc_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -280,29 +285,19 @@ psc_dma_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, count);
}
-static int
-psc_dma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int psc_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
}
-static const struct snd_pcm_ops psc_dma_ops = {
- .open = psc_dma_open,
- .close = psc_dma_close,
- .hw_free = psc_dma_hw_free,
- .ioctl = snd_pcm_lib_ioctl,
- .pointer = psc_dma_pointer,
- .trigger = psc_dma_trigger,
- .hw_params = psc_dma_hw_params,
-};
-
-static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int psc_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
size_t size = psc_dma_hardware.buffer_bytes_max;
@@ -341,10 +336,10 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
return -ENOMEM;
}
-static void psc_dma_free(struct snd_pcm *pcm)
+static void psc_dma_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_soc_pcm_runtime *rtd = pcm->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_pcm_substream *substream;
int stream;
@@ -362,9 +357,15 @@ static void psc_dma_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver mpc5200_audio_dma_component = {
.name = DRV_NAME,
- .ops = &psc_dma_ops,
- .pcm_new = &psc_dma_new,
- .pcm_free = &psc_dma_free,
+ .open = psc_dma_open,
+ .close = psc_dma_close,
+ .hw_free = psc_dma_hw_free,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = psc_dma_pointer,
+ .trigger = psc_dma_trigger,
+ .hw_params = psc_dma_hw_params,
+ .pcm_construct = psc_dma_new,
+ .pcm_destruct = psc_dma_free,
};
int mpc5200_audio_dma_create(struct platform_device *op)
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 6007e6305735..9ad35d9940fe 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -232,7 +232,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (li->cpu) {
int is_single_links = 0;
- /* BE is dummy */
+ /* Codec is dummy */
codecs->of_node = NULL;
codecs->dai_name = "snd-soc-dummy-dai";
codecs->name = "snd-soc-dummy";
@@ -263,7 +263,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
} else {
struct snd_soc_codec_conf *cconf;
- /* FE is dummy */
+ /* CPU is dummy */
cpus->of_node = NULL;
cpus->dai_name = "snd-soc-dummy-dai";
cpus->name = "snd-soc-dummy";
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fc9c753db8dd..10b82bf043d1 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -149,7 +149,7 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (li->cpu) {
int is_single_links = 0;
- /* BE is dummy */
+ /* Codec is dummy */
codecs->of_node = NULL;
codecs->dai_name = "snd-soc-dummy-dai";
codecs->name = "snd-soc-dummy";
@@ -179,7 +179,7 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
} else {
struct snd_soc_codec_conf *cconf;
- /* FE is dummy */
+ /* CPU is dummy */
cpus->of_node = NULL;
cpus->dai_name = "snd-soc-dummy-dai";
cpus->name = "snd-soc-dummy";
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 01c99750212a..c8de0bb5bed9 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -59,10 +59,13 @@ config SND_SOC_INTEL_HASWELL
If you have a Intel Haswell or Broadwell platform connected to
an I2S codec, then enable this option by saying Y or m. This is
typically used for Chromebooks. This is a recommended option.
+ This option is mutually exclusive with the SOF support on
+ Broadwell. If you want to enable SOF on Broadwell, you need to
+ deselect this option first.
config SND_SOC_INTEL_BAYTRAIL
tristate "Baytrail (legacy) Platforms"
- depends on DMADEVICES && ACPI && SND_SST_ATOM_HIFI2_PLATFORM=n
+ depends on DMADEVICES && ACPI && SND_SST_ATOM_HIFI2_PLATFORM=n && SND_SOC_SOF_BAYTRAIL=n
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_SST_ACPI
select SND_SOC_INTEL_SST_FIRMWARE
@@ -101,6 +104,9 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
If you have a Intel Baytrail or Cherrytrail platform with an I2S
codec, then enable this option by saying Y or m. This is a
recommended option
+ This option is mutually exclusive with the SOF support on
+ Baytrail/Cherrytrail. If you want to enable SOF on
+ Baytrail/Cherrytrail, you need to deselect this option first.
config SND_SOC_INTEL_SKYLAKE
tristate "All Skylake/SST Platforms"
@@ -113,7 +119,7 @@ config SND_SOC_INTEL_SKYLAKE
select SND_SOC_INTEL_CNL
select SND_SOC_INTEL_CFL
help
- This is a backwards-compatible option to select all devices
+ This is a backwards-compatible option to select all devices
supported by the Intel SST/Skylake driver. This option is no
longer recommended and will be deprecated when the SOF
driver is introduced. Distributions should explicitly
@@ -203,9 +209,12 @@ config SND_SOC_INTEL_SKYLAKE_SSP_CLK
config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
bool "HDAudio codec support"
help
- If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
- GeminiLake or CannonLake platform with an HDaudio codec
- then enable this option by saying Y
+ This option broke audio on Linus' Skylake laptop in December 2018
+ and the race conditions during the probe were not fixed since.
+ This option is DEPRECATED, all HDaudio codec support needs
+ to be handled by the SOF driver.
+ Distributions should not enable this option and there are no known
+ users of this capability.
config SND_SOC_INTEL_SKYLAKE_COMMON
tristate
@@ -215,7 +224,7 @@ config SND_SOC_INTEL_SKYLAKE_COMMON
select SND_SOC_INTEL_SST
select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
select SND_SOC_ACPI_INTEL_MATCH
- select SND_INTEL_NHLT if ACPI
+ select SND_INTEL_DSP_CONFIG
help
If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
GeminiLake or CannonLake platform with the DSP enabled in the BIOS
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 8cc3cc363eb0..47e3d1943d7e 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -586,7 +586,8 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
},
};
-static int sst_platform_open(struct snd_pcm_substream *substream)
+static int sst_soc_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
@@ -598,15 +599,15 @@ static int sst_platform_open(struct snd_pcm_substream *substream)
return 0;
}
-static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int sst_soc_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret_val = 0, str_id;
struct sst_runtime_stream *stream;
int status;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- dev_dbg(rtd->dev, "sst_platform_pcm_trigger called\n");
+ dev_dbg(rtd->dev, "%s called\n", __func__);
if (substream->pcm->internal)
return 0;
stream = substream->runtime->private_data;
@@ -646,8 +647,8 @@ static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
}
-static snd_pcm_uframes_t sst_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sst_soc_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct sst_runtime_stream *stream;
int ret_val, status;
@@ -668,14 +669,8 @@ static snd_pcm_uframes_t sst_platform_pcm_pointer
return str_info->buffer_ptr;
}
-static const struct snd_pcm_ops sst_platform_ops = {
- .open = sst_platform_open,
- .ioctl = snd_pcm_lib_ioctl,
- .trigger = sst_platform_pcm_trigger,
- .pointer = sst_platform_pcm_pointer,
-};
-
-static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sst_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
@@ -709,9 +704,12 @@ static const struct snd_soc_component_driver sst_soc_platform_drv = {
.name = DRV_NAME,
.probe = sst_soc_probe,
.remove = sst_soc_remove,
- .ops = &sst_platform_ops,
+ .open = sst_soc_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .trigger = sst_soc_trigger,
+ .pointer = sst_soc_pointer,
.compr_ops = &sst_platform_compr_ops,
- .pcm_new = sst_pcm_new,
+ .pcm_construct = sst_soc_pcm_new,
};
static int sst_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/baytrail/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
index 54f2ee3010ee..1d780fcc448c 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-pcm.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
@@ -58,11 +58,11 @@ struct sst_byt_priv_data {
};
/* this may get called several times by oss emulation */
-static int sst_byt_pcm_hw_params(struct snd_pcm_substream *substream,
+static int sst_byt_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -121,7 +121,8 @@ static int sst_byt_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int sst_byt_pcm_hw_free(struct snd_pcm_substream *substream)
+static int sst_byt_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -164,10 +165,10 @@ static void sst_byt_pcm_work(struct work_struct *work)
sst_byt_pcm_restore_stream_context(pcm_data->substream);
}
-static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int sst_byt_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -228,11 +229,11 @@ static u32 byt_notify_pointer(struct sst_byt_stream *stream, void *data)
return pos;
}
-static snd_pcm_uframes_t sst_byt_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sst_byt_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
@@ -241,10 +242,10 @@ static snd_pcm_uframes_t sst_byt_pcm_pointer(struct snd_pcm_substream *substream
return bytes_to_frames(runtime, pcm_data->hw_ptr);
}
-static int sst_byt_pcm_open(struct snd_pcm_substream *substream)
+static int sst_byt_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -269,10 +270,10 @@ static int sst_byt_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int sst_byt_pcm_close(struct snd_pcm_substream *substream)
+static int sst_byt_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -294,7 +295,8 @@ out:
return ret;
}
-static int sst_byt_pcm_mmap(struct snd_pcm_substream *substream,
+static int sst_byt_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -303,22 +305,11 @@ static int sst_byt_pcm_mmap(struct snd_pcm_substream *substream,
return snd_pcm_lib_default_mmap(substream, vma);
}
-static const struct snd_pcm_ops sst_byt_pcm_ops = {
- .open = sst_byt_pcm_open,
- .close = sst_byt_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = sst_byt_pcm_hw_params,
- .hw_free = sst_byt_pcm_hw_free,
- .trigger = sst_byt_pcm_trigger,
- .pointer = sst_byt_pcm_pointer,
- .mmap = sst_byt_pcm_mmap,
-};
-
-static int sst_byt_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sst_byt_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
size_t size;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_pdata *pdata = dev_get_platdata(component->dev);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
@@ -380,8 +371,15 @@ static int sst_byt_pcm_probe(struct snd_soc_component *component)
static const struct snd_soc_component_driver byt_dai_component = {
.name = DRV_NAME,
.probe = sst_byt_pcm_probe,
- .ops = &sst_byt_pcm_ops,
- .pcm_new = sst_byt_pcm_new,
+ .open = sst_byt_pcm_open,
+ .close = sst_byt_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = sst_byt_pcm_hw_params,
+ .hw_free = sst_byt_pcm_hw_free,
+ .trigger = sst_byt_pcm_trigger,
+ .pointer = sst_byt_pcm_pointer,
+ .mmap = sst_byt_pcm_mmap,
+ .pcm_construct = sst_byt_pcm_new,
};
#ifdef CONFIG_PM
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 5c27f7ab4a5f..ef20316e83d1 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -3,13 +3,13 @@ menuconfig SND_SOC_INTEL_MACH
bool "Intel Machine drivers"
depends on SND_SOC_INTEL_SST_TOPLEVEL || SND_SOC_SOF_INTEL_TOPLEVEL
help
- Intel ASoC Machine Drivers. If you have a Intel machine that
- has an audio controller with a DSP and I2S or DMIC port, then
- enable this option by saying Y
+ Intel ASoC Machine Drivers. If you have a Intel machine that
+ has an audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Intel ASoC machine drivers.
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel ASoC machine drivers.
if SND_SOC_INTEL_MACH
@@ -114,11 +114,11 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
depends on X86_INTEL_LPSS || COMPILE_TEST
select SND_SOC_ACPI
select SND_SOC_RT5670
- help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with RT5672 audio codec.
- Say Y or m if you have such a device. This is a recommended option.
- If unsure select "N".
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with RT5672 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
tristate "Cherrytrail & Braswell with RT5645/5650 codec"
@@ -263,14 +263,17 @@ config SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+config SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ tristate
+ select SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
+
if SND_SOC_INTEL_APL
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
- select SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
- select SND_HDA_DSP_LOADER
+ select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
help
This adds support for ASoC machine driver for Broxton-P platforms
with DA7219 + MAX98357A I2S audio codec.
@@ -284,7 +287,6 @@ config SND_SOC_INTEL_BXT_RT298_MACH
select SND_SOC_RT298
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
- select SND_HDA_DSP_LOADER
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
@@ -311,20 +313,21 @@ config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
If unsure select "N".
config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
- tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+ tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
- depends on SPI
- select SND_SOC_RT5663
- select SND_SOC_RT5514
- select SND_SOC_RT5514_SPI
- select SND_SOC_MAX98927
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC Onboard Codec I2S machine driver. This will
- create an alsa sound card for RT5663 + RT5514 + MAX98927.
- Say Y or m if you have such a device. This is a recommended option.
- If unsure select "N".
+ depends on SPI
+ select SND_SOC_RT5663
+ select SND_SOC_RT5514
+ select SND_SOC_RT5514_SPI
+ select SND_SOC_MAX98927
+ select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_SKYLAKE_SSP_CLK
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5663 + RT5514 + MAX98927.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
tristate "KBL with DA7219 and MAX98357A in I2S Mode"
@@ -364,7 +367,18 @@ config SND_SOC_INTEL_KBL_RT5660_MACH
endif ## SND_SOC_INTEL_KBL
-if SND_SOC_INTEL_GLK || (SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK)
+if SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK
+
+config SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH
+ tristate "GLK with DA7219 and MAX98357A in I2S Mode"
+ depends on I2C && ACPI
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ help
+ This adds support for ASoC machine driver for Geminilake platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
tristate "GLK with RT5682 and MAX98357A in I2S Mode"
@@ -374,14 +388,13 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
select SND_SOC_MAX98357A
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
- select SND_HDA_DSP_LOADER
help
This adds support for ASoC machine driver for Geminilake platforms
with RT5682 + MAX98357A I2S audio codec.
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_INTEL_GLK || (SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK)
+endif ## SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK
if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
@@ -393,16 +406,16 @@ config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
help
This adds support for ASoC machine driver for Intel platforms
SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
- Say Y or m if you have such a device. This is a recommended option.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
-if SND_SOC_SOF_HDA_COMMON || SND_SOC_SOF_BAYTRAIL
+if SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
config SND_SOC_INTEL_SOF_RT5682_MACH
tristate "SOF with rt5682 codec in I2S Mode"
depends on I2C && ACPI
- depends on (SND_SOC_SOF_HDA_COMMON && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
+ depends on (SND_SOC_SOF_HDA_LINK && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
(SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
select SND_SOC_RT5682
select SND_SOC_DMIC
@@ -412,7 +425,7 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
with rt5682 codec.
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_SOF_HDA_COMMON || SND_SOC_SOF_BAYTRAIL
+endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
@@ -420,7 +433,26 @@ config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
- select SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
+ select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ help
+ This adds support for ASoC machine driver for Cometlake platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
+ tristate "CML with RT1011 and RT5682 in I2S Mode"
+ depends on I2C && ACPI
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_RT1011
+ select SND_SOC_RT5682
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC machine driver for SOF platform with
+ RT1011 + RT5682 I2S codec.
+ Say Y if you have such a device.
+ If unsure select "N".
endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 6445f90ea542..ba1aa89db09d 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -4,9 +4,9 @@ snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
snd-soc-sst-broadwell-objs := broadwell.o
-snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
-snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
-snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o
+snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o hda_dsp_common.o
+snd-soc-sst-bxt-rt298-objs := bxt_rt298.o hda_dsp_common.o
+snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o hda_dsp_common.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
@@ -17,14 +17,15 @@ snd-soc-sst-byt-cht-cx2072x-objs := bytcht_cx2072x.o
snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
-snd-soc-sof_rt5682-objs := sof_rt5682.o
+snd-soc-sof_rt5682-objs := sof_rt5682.o hda_dsp_common.o
+snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o hda_dsp_common.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-kbl_rt5660-objs := kbl_rt5660.o
snd-soc-skl_rt286-objs := skl_rt286.o
-snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
+snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o hda_dsp_common.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
@@ -32,7 +33,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH) += snd-soc-sof_rt5682.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
-obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH) += snd-soc-sst-bxt-da7219_max98357a.o
+obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
obj-$(CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH) += snd-soc-sst-glk-rt5682_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH) += snd-soc-sst-byt-cht-cx2072x.
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH) += snd-soc-sst-byt-cht-da7213.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH) += snd-soc-sst-byt-cht-es8316.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) += snd-soc-sst-byt-cht-nocodec.o
+obj-$(CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH) += snd-soc-cml_rt1011_rt5682.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH) += snd-soc-kbl_da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH) += snd-soc-kbl_da7219_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 4a4d3353e26d..2af8e5a62da8 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -74,6 +74,11 @@ static const struct snd_soc_dapm_route bdw_rt5677_map[] = {
/* CODEC BE connections */
{"SSP0 CODEC IN", NULL, "AIF1 Capture"},
{"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
+ {"DSP Capture", NULL, "DSP Buffer"},
+
+ /* DSP Clock Connections */
+ { "DSP Buffer", NULL, "SSP0 CODEC IN" },
+ { "SSP0 CODEC IN", NULL, "DSPTX" },
};
static const struct snd_kcontrol_new bdw_rt5677_controls[] = {
@@ -165,10 +170,37 @@ static int bdw_rt5677_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+static int bdw_rt5677_dsp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_PLL1, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5677_PLL1_S_MCLK,
+ 24000000, 24576000);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll configuration\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_ops bdw_rt5677_ops = {
.hw_params = bdw_rt5677_hw_params,
};
+static const struct snd_soc_ops bdw_rt5677_dsp_ops = {
+ .hw_params = bdw_rt5677_dsp_hw_params,
+};
+
#if !IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
static int bdw_rt5677_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
@@ -208,6 +240,11 @@ static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
rt5677_sel_asrc_clk_src(component, RT5677_DA_STEREO_FILTER |
RT5677_AD_STEREO1_FILTER | RT5677_I2S1_SOURCE,
RT5677_CLK_SEL_I2S1_ASRC);
+ /* Enable codec ASRC function for Mono ADC L.
+ * The ASRC clock source is clk_sys2_asrc.
+ */
+ rt5677_sel_asrc_clk_src(component, RT5677_AD_MONO_L_FILTER,
+ RT5677_CLK_SEL_SYS2);
/* Request rt5677 GPIO for headphone amp control */
bdw_rt5677->gpio_hp_en = devm_gpiod_get(component->dev, "headphone-enable",
@@ -258,6 +295,12 @@ SND_SOC_DAILINK_DEF(platform,
SND_SOC_DAILINK_DEF(be,
DAILINK_COMP_ARRAY(COMP_CODEC("i2c-RT5677CE:00", "rt5677-aif1")));
+/* Wake on voice interface */
+SND_SOC_DAILINK_DEFS(dsp,
+ DAILINK_COMP_ARRAY(COMP_CPU("spi-RT5677AA:00")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-RT5677CE:00", "rt5677-dspbuffer")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("spi-RT5677AA:00")));
+
static struct snd_soc_dai_link bdw_rt5677_dais[] = {
/* Front End DAI links */
{
@@ -276,6 +319,14 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = {
SND_SOC_DAILINK_REG(fe, dummy, platform),
},
+ /* Non-DPCM links */
+ {
+ .name = "Codec DSP",
+ .stream_name = "Wake on Voice",
+ .ops = &bdw_rt5677_dsp_ops,
+ SND_SOC_DAILINK_REG(dsp),
+ },
+
/* Back End DAI links */
{
/* SSP0 - Codec */
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index ac1dea5f9d11..5873abb46441 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -21,6 +21,7 @@
#include "../../codecs/da7219.h"
#include "../../codecs/da7219-aad.h"
#include "../common/soc-intel-quirks.h"
+#include "hda_dsp_common.h"
#define BXT_DIALOG_CODEC_DAI "da7219-hifi"
#define BXT_MAXIM_CODEC_DAI "HiFi"
@@ -38,6 +39,7 @@ struct bxt_hdmi_pcm {
struct bxt_card_private {
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
enum {
@@ -615,6 +617,13 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
snd_soc_dapm_add_routes(&card->dapm, broxton_map,
ARRAY_SIZE(broxton_map));
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -720,6 +729,8 @@ static int broxton_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
return devm_snd_soc_register_card(&pdev->dev, &broxton_audio_card);
}
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index adf416a49b48..eabf9d8468ae 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -18,6 +18,7 @@
#include <sound/pcm_params.h>
#include "../../codecs/hdac_hdmi.h"
#include "../../codecs/rt298.h"
+#include "hda_dsp_common.h"
/* Headset jack detection DAPM pins */
static struct snd_soc_jack broxton_headset;
@@ -31,6 +32,7 @@ struct bxt_hdmi_pcm {
struct bxt_rt286_private {
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
enum {
@@ -527,6 +529,13 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
int err, i = 0;
char jack_name[NAME_SIZE];
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -626,6 +635,8 @@ static int broxton_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
return devm_snd_soc_register_card(&pdev->dev, card);
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 9c1aa4ec9cba..dd2b5ad08659 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -405,10 +405,12 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
},
- .driver_data = (void *)(BYT_RT5640_IN1_MAP |
- BYT_RT5640_MCLK_EN |
- BYT_RT5640_SSP0_AIF1),
-
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
},
{
.matches = {
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 8879c3be29d5..c68a5b85a4a0 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -48,6 +48,7 @@ struct cht_mc_private {
#define CHT_RT5645_SSP2_AIF2 BIT(16) /* default is using AIF1 */
#define CHT_RT5645_SSP0_AIF1 BIT(17)
#define CHT_RT5645_SSP0_AIF2 BIT(18)
+#define CHT_RT5645_PMC_PLT_CLK_0 BIT(19)
static unsigned long cht_rt5645_quirk = 0;
@@ -59,6 +60,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk SSP0_AIF1 enabled");
if (cht_rt5645_quirk & CHT_RT5645_SSP0_AIF2)
dev_info(dev, "quirk SSP0_AIF2 enabled");
+ if (cht_rt5645_quirk & CHT_RT5645_PMC_PLT_CLK_0)
+ dev_info(dev, "quirk PMC_PLT_CLK_0 enabled");
}
static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -226,16 +229,22 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-/* uncomment when we have a real quirk
static int cht_rt5645_quirk_cb(const struct dmi_system_id *id)
{
cht_rt5645_quirk = (unsigned long)id->driver_data;
return 1;
}
-*/
static const struct dmi_system_id cht_rt5645_quirk_table[] = {
{
+ /* Strago family Chromebooks */
+ .callback = cht_rt5645_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
+ },
+ .driver_data = (void *)CHT_RT5645_PMC_PLT_CLK_0,
+ },
+ {
},
};
@@ -526,6 +535,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
int dai_index = 0;
int ret_val = 0;
int i;
+ const char *mclk_name;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -662,11 +672,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
if (ret_val)
return ret_val;
- drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (cht_rt5645_quirk & CHT_RT5645_PMC_PLT_CLK_0)
+ mclk_name = "pmc_plt_clk_0";
+ else
+ mclk_name = "pmc_plt_clk_3";
+
+ drv->mclk = devm_clk_get(&pdev->dev, mclk_name);
if (IS_ERR(drv->mclk)) {
- dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(drv->mclk));
+ dev_err(&pdev->dev, "Failed to get MCLK from %s: %ld\n",
+ mclk_name, PTR_ERR(drv->mclk));
return PTR_ERR(drv->mclk);
}
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
new file mode 100644
index 000000000000..a22f97234201
--- /dev/null
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2019 Intel Corporation.
+
+/*
+ * Intel Cometlake I2S Machine driver for RT1011 + RT5682 codec
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/rt5682.h>
+#include <sound/soc-acpi.h>
+#include "../../codecs/rt1011.h"
+#include "../../codecs/rt5682.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "hda_dsp_common.h"
+
+/* The platform clock outputs 24Mhz clock to codec as I2S MCLK */
+#define CML_PLAT_CLK 24000000
+#define CML_RT1011_CODEC_DAI "rt1011-aif"
+#define CML_RT5682_CODEC_DAI "rt5682-aif1"
+#define NAME_SIZE 32
+
+static struct snd_soc_jack hdmi_jack[3];
+
+struct hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct card_private {
+ char codec_name[SND_ACPI_I2C_ID_LEN];
+ struct snd_soc_jack headset;
+ struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
+};
+
+static const struct snd_kcontrol_new cml_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("TL Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("TR Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("WL Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("WR Ext Spk"),
+};
+
+static const struct snd_soc_dapm_widget cml_rt1011_rt5682_widgets[] = {
+ SND_SOC_DAPM_SPK("TL Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("TR Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("WL Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("WR Ext Spk", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+};
+
+static const struct snd_soc_dapm_route cml_rt1011_rt5682_map[] = {
+ /*speaker*/
+ {"TL Ext Spk", NULL, "TL SPO"},
+ {"TR Ext Spk", NULL, "TR SPO"},
+ {"WL Ext Spk", NULL, "WL SPO"},
+ {"WR Ext Spk", NULL, "WR SPO"},
+
+ /* HP jack connectors - unknown if we have jack detection */
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ /* other jacks */
+ { "IN1P", NULL, "Headset Mic" },
+
+ /* DMIC */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
+static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ int ret;
+
+ /* need to enable ASRC function for 24MHz mclk rate */
+ rt5682_sel_asrc_clk_src(component, RT5682_DA_STEREO1_FILTER |
+ RT5682_AD_STEREO1_FILTER,
+ RT5682_CLK_SEL_I2S1_ASRC);
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->headset;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+ if (ret)
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+
+ return ret;
+};
+
+static int cml_rt5682_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int clk_id, clk_freq, pll_out, ret;
+
+ clk_id = RT5682_PLL1_S_MCLK;
+ clk_freq = CML_PLAT_CLK;
+
+ pll_out = params_rate(params) * 512;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, clk_id, clk_freq, pll_out);
+ if (ret < 0)
+ dev_warn(rtd->dev, "snd_soc_dai_set_pll err = %d\n", ret);
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
+ pll_out, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_warn(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ /*
+ * slot_width should be equal or large than data length, set them
+ * be the same
+ */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2,
+ params_width(params));
+ if (ret < 0)
+ dev_warn(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+}
+
+static int cml_rt1011_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_card *card = rtd->card;
+ int srate, i, ret = 0;
+
+ srate = params_rate(params);
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ codec_dai = rtd->codec_dais[i];
+
+ /* 100 Fs to drive 24 bit data */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT1011_PLL1_S_BCLK,
+ 100 * srate, 256 * srate);
+ if (ret < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT1011_FS_SYS_PRE_S_PLL1,
+ 256 * srate, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return ret;
+ }
+
+ /*
+ * Codec TDM is configured as 24 bit capture/ playback.
+ * 2 CH PB is done over 4 codecs - 2 Woofers and 2 Tweeters.
+ * The Left woofer and tweeter plays the Left playback data
+ * and similar by the Right.
+ * Hence 2 codecs (1 T and 1 W pair) share same Rx slot.
+ * The feedback is captured for each codec individually.
+ * Hence all 4 codecs use 1 Tx slot each for feedback.
+ */
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:00")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x4, 0x1, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:02")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x1, 0x1, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ /* TDM Rx slot 2 is used for Right Woofer & Tweeters pair */
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:01")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x8, 0x2, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:03")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x2, 0x2, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ dev_err(rtd->dev,
+ "set codec TDM slot for %s failed with error %d\n",
+ codec_dai->component->name, ret);
+ return ret;
+}
+
+static struct snd_soc_ops cml_rt5682_ops = {
+ .hw_params = cml_rt5682_hw_params,
+};
+
+static const struct snd_soc_ops cml_rt1011_ops = {
+ .hw_params = cml_rt1011_hw_params,
+};
+
+static int sof_card_late_probe(struct snd_soc_card *card)
+{
+ struct card_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = NULL;
+ char jack_name[NAME_SIZE];
+ struct hdmi_pcm *pcm;
+ int ret, i = 0;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ ret = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &hdmi_jack[i],
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &hdmi_jack[i]);
+ if (ret < 0)
+ return ret;
+
+ i++;
+ }
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
+
+static int hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = dai->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+/* Cometlake digital audio interface glue - connects codec <--> CPU */
+
+SND_SOC_DAILINK_DEF(ssp0_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("SSP0 Pin")));
+SND_SOC_DAILINK_DEF(ssp0_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-10EC5682:00",
+ CML_RT5682_CODEC_DAI)));
+
+SND_SOC_DAILINK_DEF(ssp1_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("SSP1 Pin")));
+SND_SOC_DAILINK_DEF(ssp1_codec,
+ DAILINK_COMP_ARRAY(
+ /* WL */ COMP_CODEC("i2c-10EC1011:00", CML_RT1011_CODEC_DAI),
+ /* WR */ COMP_CODEC("i2c-10EC1011:01", CML_RT1011_CODEC_DAI),
+ /* TL */ COMP_CODEC("i2c-10EC1011:02", CML_RT1011_CODEC_DAI),
+ /* TR */ COMP_CODEC("i2c-10EC1011:03", CML_RT1011_CODEC_DAI)));
+
+SND_SOC_DAILINK_DEF(dmic_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC01 Pin")));
+
+SND_SOC_DAILINK_DEF(dmic16k_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC16k Pin")));
+
+SND_SOC_DAILINK_DEF(dmic_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
+
+SND_SOC_DAILINK_DEF(idisp1_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp1 Pin")));
+SND_SOC_DAILINK_DEF(idisp1_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi1")));
+
+SND_SOC_DAILINK_DEF(idisp2_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp2 Pin")));
+SND_SOC_DAILINK_DEF(idisp2_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi2")));
+
+SND_SOC_DAILINK_DEF(idisp3_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp3 Pin")));
+SND_SOC_DAILINK_DEF(idisp3_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi3")));
+
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("0000:00:1f.3")));
+
+static struct snd_soc_dai_link cml_rt1011_rt5682_dailink[] = {
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .init = cml_rt5682_codec_init,
+ .ignore_pmdown_time = 1,
+ .ops = &cml_rt5682_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(ssp0_pin, ssp0_codec, platform),
+ },
+ {
+ .name = "dmic01",
+ .id = 1,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic_pin, dmic_codec, platform),
+ },
+ {
+ .name = "dmic16k",
+ .id = 2,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic16k_pin, dmic_codec, platform),
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp1_pin, idisp1_codec, platform),
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp2_pin, idisp2_codec, platform),
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp3_pin, idisp3_codec, platform),
+ },
+ {
+ /*
+ * SSP1 - Codec : added to end of list ensuring
+ * reuse of common topologies for other end points
+ * and changing only SSP1's codec
+ */
+ .name = "SSP1-Codec",
+ .id = 6,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1, /* Capture stream provides Feedback */
+ .no_pcm = 1,
+ .ops = &cml_rt1011_ops,
+ SND_SOC_DAILINK_REG(ssp1_pin, ssp1_codec, platform),
+ },
+};
+
+static struct snd_soc_codec_conf rt1011_conf[] = {
+ {
+ .dev_name = "i2c-10EC1011:00",
+ .name_prefix = "WL",
+ },
+ {
+ .dev_name = "i2c-10EC1011:01",
+ .name_prefix = "WR",
+ },
+ {
+ .dev_name = "i2c-10EC1011:02",
+ .name_prefix = "TL",
+ },
+ {
+ .dev_name = "i2c-10EC1011:03",
+ .name_prefix = "TR",
+ },
+};
+
+/* Cometlake audio machine driver for RT1011 and RT5682 */
+static struct snd_soc_card snd_soc_card_cml = {
+ .name = "cml_rt1011_rt5682",
+ .dai_link = cml_rt1011_rt5682_dailink,
+ .num_links = ARRAY_SIZE(cml_rt1011_rt5682_dailink),
+ .codec_conf = rt1011_conf,
+ .num_configs = ARRAY_SIZE(rt1011_conf),
+ .dapm_widgets = cml_rt1011_rt5682_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cml_rt1011_rt5682_widgets),
+ .dapm_routes = cml_rt1011_rt5682_map,
+ .num_dapm_routes = ARRAY_SIZE(cml_rt1011_rt5682_map),
+ .controls = cml_controls,
+ .num_controls = ARRAY_SIZE(cml_controls),
+ .fully_routed = true,
+ .late_probe = sof_card_late_probe,
+};
+
+static int snd_cml_rt1011_probe(struct platform_device *pdev)
+{
+ struct card_private *ctx;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
+ int ret;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+ mach = (&pdev->dev)->platform_data;
+ snd_soc_card_cml.dev = &pdev->dev;
+ platform_name = mach->mach_params.platform;
+
+ /* set platform name for each dailink */
+ ret = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cml,
+ platform_name);
+ if (ret)
+ return ret;
+
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
+ snd_soc_card_set_drvdata(&snd_soc_card_cml, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cml);
+}
+
+static struct platform_driver snd_cml_rt1011_rt5682_driver = {
+ .probe = snd_cml_rt1011_probe,
+ .driver = {
+ .name = "cml_rt1011_rt5682",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(snd_cml_rt1011_rt5682_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("Cometlake Audio Machine driver - RT1011 and RT5682 in I2S mode");
+MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
+MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
+MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cml_rt1011_rt5682");
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index bd2d371f2acd..b36264d1d1cd 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -19,6 +19,7 @@
#include <sound/soc-acpi.h>
#include "../../codecs/rt5682.h"
#include "../../codecs/hdac_hdmi.h"
+#include "hda_dsp_common.h"
/* The platform clock outputs 19.2Mhz clock to codec as I2S MCLK */
#define GLK_PLAT_CLK_FREQ 19200000
@@ -41,6 +42,7 @@ struct glk_hdmi_pcm {
struct glk_card_private {
struct snd_soc_jack geminilake_headset;
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
enum {
@@ -545,6 +547,13 @@ static int glk_card_late_probe(struct snd_soc_card *card)
int err = 0;
int i = 0;
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -612,6 +621,8 @@ static int geminilake_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
return devm_snd_soc_register_card(&pdev->dev, card);
}
diff --git a/sound/soc/intel/boards/hda_dsp_common.c b/sound/soc/intel/boards/hda_dsp_common.c
new file mode 100644
index 000000000000..ed36b68d6705
--- /dev/null
+++ b/sound/soc/intel/boards/hda_dsp_common.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include "../../codecs/hdac_hda.h"
+
+#include "hda_dsp_common.h"
+
+/*
+ * Search card topology and return PCM device number
+ * matching Nth HDMI device (zero-based index).
+ */
+struct snd_pcm *hda_dsp_hdmi_pcm_handle(struct snd_soc_card *card,
+ int hdmi_idx)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_pcm *spcm;
+ int i = 0;
+
+ for_each_card_rtds(card, rtd) {
+ spcm = rtd->pcm ?
+ rtd->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].pcm : NULL;
+ if (spcm && strstr(spcm->id, "HDMI")) {
+ if (i == hdmi_idx)
+ return rtd->pcm;
+ ++i;
+ }
+ }
+
+ return NULL;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+/*
+ * Search card topology and register HDMI PCM related controls
+ * to codec driver.
+ */
+int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+ struct snd_soc_component *comp)
+{
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_codec *hcodec;
+ struct snd_pcm *spcm;
+ struct hda_pcm *hpcm;
+ int err = 0, i = 0;
+
+ if (!comp)
+ return -EINVAL;
+
+ hda_pvt = snd_soc_component_get_drvdata(comp);
+ hcodec = &hda_pvt->codec;
+
+ list_for_each_entry(hpcm, &hcodec->pcm_list_head, list) {
+ spcm = hda_dsp_hdmi_pcm_handle(card, i);
+ if (spcm) {
+ hpcm->pcm = spcm;
+ hpcm->device = spcm->device;
+ dev_dbg(card->dev,
+ "%s: mapping HDMI converter %d to PCM %d (%p)\n",
+ __func__, i, hpcm->device, spcm);
+ } else {
+ hpcm->pcm = 0;
+ hpcm->device = SNDRV_PCM_INVALID_DEVICE;
+ dev_warn(card->dev,
+ "%s: no PCM in topology for HDMI converter %d\n\n",
+ __func__, i);
+ }
+ i++;
+ }
+ snd_hdac_display_power(hcodec->core.bus,
+ HDA_CODEC_IDX_CONTROLLER, true);
+ err = snd_hda_codec_build_controls(hcodec);
+ if (err < 0)
+ dev_err(card->dev, "unable to create controls %d\n", err);
+ snd_hdac_display_power(hcodec->core.bus,
+ HDA_CODEC_IDX_CONTROLLER, false);
+
+ return err;
+}
+
+#endif
diff --git a/sound/soc/intel/boards/hda_dsp_common.h b/sound/soc/intel/boards/hda_dsp_common.h
new file mode 100644
index 000000000000..431f7f09dccb
--- /dev/null
+++ b/sound/soc/intel/boards/hda_dsp_common.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+/*
+ * This file defines helper functions used by multiple
+ * Intel HDA based machine drivers.
+ */
+
+#ifndef __HDA_DSP_COMMON_H
+#define __HDA_DSP_COMMON_H
+
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include "../../codecs/hdac_hda.h"
+
+struct snd_pcm *hda_dsp_hdmi_pcm_handle(struct snd_soc_card *card,
+ int hdmi_idx);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+ struct snd_soc_component *comp);
+#else
+static inline int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+ struct snd_soc_component *comp)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __HDA_DSP_COMMON_H */
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 74dda8784f1a..3e5f6bead229 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -22,6 +22,9 @@
#include "../../codecs/rt5514.h"
#include "../../codecs/rt5663.h"
#include "../../codecs/hdac_hdmi.h"
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#define KBL_REALTEK_CODEC_DAI "rt5663-aif"
#define KBL_REALTEK_DMIC_CODEC_DAI "rt5514-aif1"
@@ -50,6 +53,8 @@ struct kbl_codec_private {
struct snd_soc_jack kabylake_headset;
struct list_head hdmi_pcm_list;
struct snd_soc_jack kabylake_hdmi[2];
+ struct clk *mclk;
+ struct clk *sclk;
};
enum {
@@ -71,6 +76,61 @@ static const struct snd_kcontrol_new kabylake_controls[] = {
SOC_DAPM_PIN_SWITCH("DMIC"),
};
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct kbl_codec_private *priv = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ /*
+ * MCLK/SCLK need to be ON early for a successful synchronization of
+ * codec internal clock. And the clocks are turned off during
+ * POST_PMD after the stream is stopped.
+ */
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enable MCLK */
+ ret = clk_set_rate(priv->mclk, 24000000);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't set rate for mclk, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't enable mclk, err: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable SCLK */
+ ret = clk_set_rate(priv->sclk, 3072000);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't set rate for sclk, err: %d\n",
+ ret);
+ clk_disable_unprepare(priv->mclk);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->sclk);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't enable sclk, err: %d\n", ret);
+ clk_disable_unprepare(priv->mclk);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ clk_disable_unprepare(priv->mclk);
+ clk_disable_unprepare(priv->sclk);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget kabylake_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
@@ -79,11 +139,15 @@ static const struct snd_soc_dapm_widget kabylake_widgets[] = {
SND_SOC_DAPM_MIC("DMIC", NULL),
SND_SOC_DAPM_SPK("HDMI1", NULL),
SND_SOC_DAPM_SPK("HDMI2", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route kabylake_map[] = {
/* Headphones */
+ { "Headphone Jack", NULL, "Platform Clock" },
{ "Headphone Jack", NULL, "HPOL" },
{ "Headphone Jack", NULL, "HPOR" },
@@ -92,6 +156,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
{ "Right Spk", NULL, "Right BE_OUT" },
/* other jacks */
+ { "Headset Mic", NULL, "Platform Clock" },
{ "IN1P", NULL, "Headset Mic" },
{ "IN1N", NULL, "Headset Mic" },
@@ -400,6 +465,9 @@ static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
dmic_constraints);
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
@@ -588,6 +656,55 @@ static struct snd_soc_dai_link kabylake_dais[] = {
},
};
+static int kabylake_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
+{
+ struct snd_soc_component *component = dapm->component;
+ struct kbl_codec_private *priv = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ if (!component || strcmp(component->name, RT5514_DEV_NAME))
+ return 0;
+
+ if (IS_ERR(priv->mclk))
+ return 0;
+
+ /*
+ * It's required to control mclk directly in the set_bias_level
+ * function for rt5514 codec or the recording function could
+ * break.
+ */
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_ON) {
+ dev_dbg(card->dev, "Disable mclk");
+ clk_disable_unprepare(priv->mclk);
+ } else {
+ dev_dbg(card->dev, "Enable mclk");
+ ret = clk_set_rate(priv->mclk, 24000000);
+ if (ret) {
+ dev_err(card->dev, "Can't set rate for mclk, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret) {
+ dev_err(card->dev, "Can't enable mclk, err: %d\n",
+ ret);
+
+ /* mclk is already enabled in FW */
+ ret = 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static int kabylake_card_late_probe(struct snd_soc_card *card)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
@@ -623,10 +740,11 @@ static int kabylake_card_late_probe(struct snd_soc_card *card)
* kabylake audio machine driver for MAX98927 + RT5514 + RT5663
*/
static struct snd_soc_card kabylake_audio_card = {
- .name = "kbl_r5514_5663_max",
+ .name = "kbl-r5514-5663-max",
.owner = THIS_MODULE,
.dai_link = kabylake_dais,
.num_links = ARRAY_SIZE(kabylake_dais),
+ .set_bias_level = kabylake_set_bias_level,
.controls = kabylake_controls,
.num_controls = ARRAY_SIZE(kabylake_controls),
.dapm_widgets = kabylake_widgets,
@@ -643,6 +761,7 @@ static int kabylake_audio_probe(struct platform_device *pdev)
{
struct kbl_codec_private *ctx;
struct snd_soc_acpi_mach *mach;
+ int ret = 0;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -658,6 +777,34 @@ static int kabylake_audio_probe(struct platform_device *pdev)
dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
+ ctx->mclk = devm_clk_get(&pdev->dev, "ssp1_mclk");
+ if (IS_ERR(ctx->mclk)) {
+ ret = PTR_ERR(ctx->mclk);
+ if (ret == -ENOENT) {
+ dev_info(&pdev->dev,
+ "Failed to get ssp1_mclk, defer probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ dev_err(&pdev->dev, "Failed to get ssp1_mclk with err:%d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->sclk = devm_clk_get(&pdev->dev, "ssp1_sclk");
+ if (IS_ERR(ctx->sclk)) {
+ ret = PTR_ERR(ctx->sclk);
+ if (ret == -ENOENT) {
+ dev_info(&pdev->dev,
+ "Failed to get ssp1_sclk, defer probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ dev_err(&pdev->dev, "Failed to get ssp1_sclk with err:%d\n",
+ ret);
+ return ret;
+ }
+
return devm_snd_soc_register_card(&pdev->dev, &kabylake_audio_card);
}
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.c b/sound/soc/intel/boards/skl_hda_dsp_common.c
index 58409b6e476e..eb419e1ec42b 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.c
@@ -14,6 +14,9 @@
#include "../../codecs/hdac_hdmi.h"
#include "skl_hda_dsp_common.h"
+#include <sound/hda_codec.h>
+#include "../../codecs/hdac_hda.h"
+
#define NAME_SIZE 32
int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device)
@@ -136,6 +139,9 @@ int skl_hda_hdmi_jack_init(struct snd_soc_card *card)
char jack_name[NAME_SIZE];
int err;
+ if (ctx->common_hdmi_codec_drv)
+ return skl_hda_hdmi_build_controls(card);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
index daa582e513b2..d6150670ca05 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.h
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
@@ -8,12 +8,15 @@
* platforms with HDA Codecs.
*/
-#ifndef __SOUND_SOC_HDA_DSP_COMMON_H
-#define __SOUND_SOC_HDA_DSP_COMMON_H
+#ifndef __SKL_HDA_DSP_COMMON_H
+#define __SKL_HDA_DSP_COMMON_H
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/jack.h>
+#include <sound/hda_codec.h>
+#include "../../codecs/hdac_hda.h"
+#include "hda_dsp_common.h"
#define HDA_DSP_MAX_BE_DAI_LINKS 7
@@ -29,10 +32,30 @@ struct skl_hda_private {
int pcm_count;
int dai_index;
const char *platform_name;
+ bool common_hdmi_codec_drv;
};
extern struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS];
int skl_hda_hdmi_jack_init(struct snd_soc_card *card);
int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device);
+/*
+ * Search card topology and register HDMI PCM related controls
+ * to codec driver.
+ */
+static inline int skl_hda_hdmi_build_controls(struct snd_soc_card *card)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component;
+ struct skl_hda_hdmi_pcm *pcm;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct skl_hda_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+ if (!component)
+ return -EINVAL;
+
+ return hda_dsp_hdmi_build_controls(card, component);
+}
+
#endif /* __SOUND_SOC_HDA_DSP_COMMON_H */
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 1778acdc367c..4e45901e3a2f 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -90,7 +90,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
}
static struct snd_soc_card hda_soc_card = {
- .name = "skl_hda_card",
+ .name = "hda-dsp",
.owner = THIS_MODULE,
.dai_link = skl_hda_be_dai_links,
.dapm_widgets = skl_hda_widgets,
@@ -178,6 +178,7 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
ctx->pcm_count = hda_soc_card.num_links;
ctx->dai_index = 1; /* hdmi codec dai name starts from index 1 */
ctx->platform_name = mach->mach_params.platform;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
hda_soc_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&hda_soc_card, ctx);
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 4f6e58c3954a..751b8ea6ae1f 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -21,6 +21,7 @@
#include "../../codecs/rt5682.h"
#include "../../codecs/hdac_hdmi.h"
#include "../common/soc-intel-quirks.h"
+#include "hda_dsp_common.h"
#define NAME_SIZE 32
@@ -53,6 +54,7 @@ struct sof_card_private {
struct clk *mclk;
struct snd_soc_jack sof_headset;
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
static int sof_rt5682_quirk_cb(const struct dmi_system_id *id)
@@ -274,6 +276,13 @@ static int sof_card_late_probe(struct snd_soc_card *card)
if (is_legacy_cpu)
return 0;
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -370,7 +379,7 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd)
/* sof audio machine driver for rt5682 codec */
static struct snd_soc_card sof_audio_card_rt5682 = {
- .name = "sof_rt5682",
+ .name = "rt5682", /* the sof- prefix is added by the core */
.owner = THIS_MODULE,
.controls = sof_controls,
.num_controls = ARRAY_SIZE(sof_controls),
@@ -651,6 +660,8 @@ static int sof_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
snd_soc_card_set_drvdata(&sof_audio_card_rt5682, ctx);
return devm_snd_soc_register_card(&pdev->dev,
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 18d9630ae9a2..bd352878f89a 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -7,8 +7,10 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
soc-acpi-intel-hsw-bdw-match.o \
soc-acpi-intel-skl-match.o soc-acpi-intel-kbl-match.o \
soc-acpi-intel-bxt-match.o soc-acpi-intel-glk-match.o \
- soc-acpi-intel-cnl-match.o soc-acpi-intel-icl-match.o \
+ soc-acpi-intel-cnl-match.o soc-acpi-intel-cfl-match.o \
+ soc-acpi-intel-cml-match.o soc-acpi-intel-icl-match.o \
soc-acpi-intel-tgl-match.o soc-acpi-intel-ehl-match.o \
+ soc-acpi-intel-jsl-match.o \
soc-acpi-intel-hda-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-cfl-match.c b/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
new file mode 100644
index 000000000000..d6fd2026d0b8
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-cfl-match.c - tables and support for CFL ACPI enumeration.
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_machines[] = {
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cfl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
new file mode 100644
index 000000000000..5d08ae066738
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-acpi-intel-cml-match.c - tables and support for CML ACPI enumeration.
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+static struct snd_soc_acpi_codecs cml_codecs = {
+ .num_codecs = 1,
+ .codecs = {"10EC5682"}
+};
+
+static struct snd_soc_acpi_codecs cml_spk_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
+ {
+ .id = "DLGS7219",
+ .drv_name = "cml_da7219_max98357a",
+ .quirk_data = &cml_spk_codecs,
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg",
+ },
+ {
+ .id = "MX98357A",
+ .drv_name = "sof_rt5682",
+ .quirk_data = &cml_codecs,
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
+ },
+ {
+ .id = "10EC1011",
+ .drv_name = "cml_rt1011_rt5682",
+ .quirk_data = &cml_codecs,
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-rt1011-rt5682.tplg",
+ },
+ {
+ .id = "10EC5682",
+ .drv_name = "sof_rt5682",
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-rt5682.tplg",
+ },
+
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index 985aa366c9e8..27588841c8b0 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -14,16 +14,6 @@ static struct skl_machine_pdata cnl_pdata = {
.use_tplg_pcm = true,
};
-static struct snd_soc_acpi_codecs cml_codecs = {
- .num_codecs = 1,
- .codecs = {"10EC5682"}
-};
-
-static struct snd_soc_acpi_codecs cml_spk_codecs = {
- .num_codecs = 1,
- .codecs = {"MX98357A"}
-};
-
struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
{
.id = "INT34C2",
@@ -33,27 +23,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
.sof_fw_filename = "sof-cnl.ri",
.sof_tplg_filename = "sof-cnl-rt274.tplg",
},
- {
- .id = "DLGS7219",
- .drv_name = "cml_da7219_max98357a",
- .quirk_data = &cml_spk_codecs,
- .sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg",
- },
- {
- .id = "MX98357A",
- .drv_name = "sof_rt5682",
- .quirk_data = &cml_codecs,
- .sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
- },
- {
- .id = "10EC5682",
- .drv_name = "sof_rt5682",
- .sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-rt5682.tplg",
- },
-
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cnl_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
new file mode 100644
index 000000000000..1c68a04f0c6e
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-jsl-match.c - tables and support for JSL ACPI enumeration.
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_jsl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 7f4f6b755760..a3a5bba2fbd9 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -458,12 +458,12 @@ static int create_adsp_page_table(struct snd_pcm_substream *substream,
}
/* this may get called several times by oss emulation */
-static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int hsw_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -656,16 +656,17 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int hsw_pcm_hw_free(struct snd_pcm_substream *substream)
+static int hsw_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
-static int hsw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int hsw_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw_stream *sst_stream;
@@ -770,11 +771,11 @@ static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data)
return pos;
}
-static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -795,10 +796,10 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
return offset;
}
-static int hsw_pcm_open(struct snd_pcm_substream *substream)
+static int hsw_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -828,10 +829,10 @@ static int hsw_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int hsw_pcm_close(struct snd_pcm_substream *substream)
+static int hsw_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -862,17 +863,6 @@ out:
return ret;
}
-static const struct snd_pcm_ops hsw_pcm_ops = {
- .open = hsw_pcm_open,
- .close = hsw_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = hsw_pcm_hw_params,
- .hw_free = hsw_pcm_hw_free,
- .trigger = hsw_pcm_trigger,
- .pointer = hsw_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
{
struct sst_hsw *hsw = pdata->hsw;
@@ -930,10 +920,10 @@ static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
}
}
-static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int hsw_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_pdata *pdata = dev_get_platdata(component->dev);
struct hsw_priv_data *priv_data = dev_get_drvdata(component->dev);
struct device *dev = pdata->dma_dev;
@@ -1121,8 +1111,14 @@ static const struct snd_soc_component_driver hsw_dai_component = {
.name = DRV_NAME,
.probe = hsw_pcm_probe,
.remove = hsw_pcm_remove,
- .ops = &hsw_pcm_ops,
- .pcm_new = hsw_pcm_new,
+ .open = hsw_pcm_open,
+ .close = hsw_pcm_close,
+ .hw_params = hsw_pcm_hw_params,
+ .hw_free = hsw_pcm_hw_free,
+ .trigger = hsw_pcm_trigger,
+ .pointer = hsw_pcm_pointer,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pcm_construct = hsw_pcm_new,
.controls = hsw_volume_controls,
.num_controls = ARRAY_SIZE(hsw_volume_controls),
.dapm_widgets = widgets,
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 7f287424af9b..8b9abb79a69e 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1081,7 +1081,8 @@ int skl_dai_load(struct snd_soc_component *cmp, int index,
return 0;
}
-static int skl_platform_open(struct snd_pcm_substream *substream)
+static int skl_platform_soc_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai_link *dai_link = rtd->dai_link;
@@ -1167,8 +1168,9 @@ static int skl_coupled_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int skl_platform_soc_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ int cmd)
{
struct hdac_bus *bus = get_bus_ctx(substream);
@@ -1178,8 +1180,9 @@ static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static snd_pcm_uframes_t skl_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t skl_platform_soc_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
struct hdac_bus *bus = get_bus_ctx(substream);
@@ -1225,6 +1228,13 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
return bytes_to_frames(substream->runtime, pos);
}
+static int skl_platform_soc_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+{
+ return snd_pcm_lib_default_mmap(substream, area);
+}
+
static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
u64 nsec)
{
@@ -1245,7 +1255,9 @@ static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
}
-static int skl_get_time_info(struct snd_pcm_substream *substream,
+static int skl_platform_soc_get_time_info(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct timespec *system_ts, struct timespec *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
@@ -1277,24 +1289,16 @@ static int skl_get_time_info(struct snd_pcm_substream *substream,
return 0;
}
-static const struct snd_pcm_ops skl_platform_ops = {
- .open = skl_platform_open,
- .ioctl = snd_pcm_lib_ioctl,
- .trigger = skl_platform_pcm_trigger,
- .pointer = skl_platform_pcm_pointer,
- .get_time_info = skl_get_time_info,
- .mmap = snd_pcm_lib_default_mmap,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
-static void skl_pcm_free(struct snd_pcm *pcm)
+static void skl_platform_soc_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
-static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int skl_platform_soc_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct hdac_bus *bus = dev_get_drvdata(dai->dev);
@@ -1310,7 +1314,7 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
size = MAX_PREALLOC_SIZE;
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(skl->pci),
+ &skl->pci->dev,
size, MAX_PREALLOC_SIZE);
}
@@ -1458,7 +1462,7 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
return 0;
}
-static void skl_pcm_remove(struct snd_soc_component *component)
+static void skl_platform_soc_remove(struct snd_soc_component *component)
{
struct hdac_bus *bus = dev_get_drvdata(component->dev);
struct skl_dev *skl = bus_to_skl(bus);
@@ -1471,10 +1475,15 @@ static void skl_pcm_remove(struct snd_soc_component *component)
static const struct snd_soc_component_driver skl_component = {
.name = "pcm",
.probe = skl_platform_soc_probe,
- .remove = skl_pcm_remove,
- .ops = &skl_platform_ops,
- .pcm_new = skl_pcm_new,
- .pcm_free = skl_pcm_free,
+ .remove = skl_platform_soc_remove,
+ .open = skl_platform_soc_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .trigger = skl_platform_soc_trigger,
+ .pointer = skl_platform_soc_pointer,
+ .get_time_info = skl_platform_soc_get_time_info,
+ .mmap = skl_platform_soc_mmap,
+ .pcm_construct = skl_platform_soc_new,
+ .pcm_destruct = skl_platform_soc_free,
.module_get_upon_open = 1, /* increment refcount when a pcm is opened */
};
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 141dbbf975ac..58ba3e9469ba 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -27,6 +27,7 @@
#include <sound/hda_i915.h>
#include <sound/hda_codec.h>
#include <sound/intel-nhlt.h>
+#include <sound/intel-dsp-config.h>
#include "skl.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
@@ -987,22 +988,10 @@ static int skl_probe(struct pci_dev *pci,
switch (skl_pci_binding) {
case SND_SKL_PCI_BIND_AUTO:
- /*
- * detect DSP by checking class/subclass/prog-id information
- * class=04 subclass 03 prog-if 00: no DSP, use legacy driver
- * class=04 subclass 01 prog-if 00: DSP is present
- * (and may be required e.g. for DMIC or SSP support)
- * class=04 subclass 03 prog-if 80: use DSP or legacy mode
- */
- if (pci->class == 0x040300) {
- dev_info(&pci->dev, "The DSP is not enabled on this platform, aborting probe\n");
+ err = snd_intel_dsp_driver_probe(pci);
+ if (err != SND_INTEL_DSP_DRIVER_ANY &&
+ err != SND_INTEL_DSP_DRIVER_SST)
return -ENODEV;
- }
- if (pci->class != 0x040100 && pci->class != 0x040380) {
- dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, aborting probe\n", pci->class);
- return -ENODEV;
- }
- dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
break;
case SND_SKL_PCI_BIND_LEGACY:
dev_info(&pci->dev, "Module parameter forced binding with HDaudio legacy, aborting probe\n");
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 13408de34055..38d48d101783 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -497,15 +497,13 @@ static int jz4740_i2s_dev_probe(struct platform_device *pdev)
struct jz4740_i2s *i2s;
struct resource *mem;
int ret;
- const struct of_device_id *match;
i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
- match = of_match_device(jz4740_of_matches, &pdev->dev);
- if (match)
- i2s->version = (enum jz47xx_i2s_version)match->data;
+ i2s->version =
+ (enum jz47xx_i2s_version)of_device_get_match_data(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2s->base = devm_ioremap_resource(&pdev->dev, mem);
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 6f69f314f2c2..e28fb3449f1d 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -98,7 +98,8 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
}
}
-static int kirkwood_dma_open(struct snd_pcm_substream *substream)
+static int kirkwood_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int err;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -132,7 +133,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
"kirkwood-i2s", priv);
if (err)
- return -EBUSY;
+ return err;
/*
* Enable Error interrupts. We're only ack'ing them but
@@ -160,7 +161,8 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int kirkwood_dma_close(struct snd_pcm_substream *substream)
+static int kirkwood_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
@@ -180,8 +182,9 @@ static int kirkwood_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -191,13 +194,15 @@ static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream)
+static int kirkwood_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
-static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
+static int kirkwood_dma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
@@ -222,8 +227,9 @@ static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
- *substream)
+static snd_pcm_uframes_t kirkwood_dma_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
snd_pcm_uframes_t count;
@@ -238,16 +244,6 @@ static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
return count;
}
-static const struct snd_pcm_ops kirkwood_dma_ops = {
- .open = kirkwood_dma_open,
- .close = kirkwood_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = kirkwood_dma_hw_params,
- .hw_free = kirkwood_dma_hw_free,
- .prepare = kirkwood_dma_prepare,
- .pointer = kirkwood_dma_pointer,
-};
-
static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
@@ -267,7 +263,8 @@ static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm,
return 0;
}
-static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int kirkwood_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -294,7 +291,8 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void kirkwood_dma_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -316,7 +314,13 @@ static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
const struct snd_soc_component_driver kirkwood_soc_component = {
.name = DRV_NAME,
- .ops = &kirkwood_dma_ops,
- .pcm_new = kirkwood_dma_new,
- .pcm_free = kirkwood_dma_free_dma_buffers,
+ .open = kirkwood_dma_open,
+ .close = kirkwood_dma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = kirkwood_dma_hw_params,
+ .hw_free = kirkwood_dma_hw_free,
+ .prepare = kirkwood_dma_prepare,
+ .pointer = kirkwood_dma_pointer,
+ .pcm_construct = kirkwood_dma_new,
+ .pcm_destruct = kirkwood_dma_free_dma_buffers,
};
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 111e44b64b38..a656d2014127 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -125,6 +125,7 @@ config SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A
select SND_SOC_MAX98357A
select SND_SOC_BT_SCO
select SND_SOC_TS3A227E
+ select SND_SOC_CROS_EC_CODEC if CROS_EC
help
This adds ASoC driver for Mediatek MT8183 boards
with the MT6358 TS3A227E MAX98357A audio codec.
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
index 3ce527ce30ce..b6624d8d084b 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
@@ -77,11 +77,10 @@ int mtk_afe_add_sub_dai_control(struct snd_soc_component *component)
}
EXPORT_SYMBOL_GPL(mtk_afe_add_sub_dai_control);
-static snd_pcm_uframes_t mtk_afe_pcm_pointer
- (struct snd_pcm_substream *substream)
+snd_pcm_uframes_t mtk_afe_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
const struct mtk_base_memif_data *memif_data = memif->data;
@@ -111,18 +110,13 @@ static snd_pcm_uframes_t mtk_afe_pcm_pointer
POINTER_RETURN_FRAMES:
return bytes_to_frames(substream->runtime, pcm_ptr_bytes);
}
+EXPORT_SYMBOL_GPL(mtk_afe_pcm_pointer);
-const struct snd_pcm_ops mtk_afe_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .pointer = mtk_afe_pcm_pointer,
-};
-EXPORT_SYMBOL_GPL(mtk_afe_pcm_ops);
-
-int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
+int mtk_afe_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
size_t size;
struct snd_pcm *pcm = rtd->pcm;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
size = afe->mtk_afe_hardware->buffer_bytes_max;
@@ -132,17 +126,19 @@ int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
EXPORT_SYMBOL_GPL(mtk_afe_pcm_new);
-void mtk_afe_pcm_free(struct snd_pcm *pcm)
+void mtk_afe_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
EXPORT_SYMBOL_GPL(mtk_afe_pcm_free);
const struct snd_soc_component_driver mtk_afe_pcm_platform = {
- .name = AFE_PCM_NAME,
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
+ .name = AFE_PCM_NAME,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pcm_destruct = mtk_afe_pcm_free,
};
EXPORT_SYMBOL_GPL(mtk_afe_pcm_platform);
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.h b/sound/soc/mediatek/common/mtk-afe-platform-driver.h
index 88df6797732f..e550d11568c3 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.h
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.h
@@ -10,7 +10,6 @@
#define _MTK_AFE_PLATFORM_DRIVER_H_
#define AFE_PCM_NAME "mtk-afe-pcm"
-extern const struct snd_pcm_ops mtk_afe_pcm_ops;
extern const struct snd_soc_component_driver mtk_afe_pcm_platform;
struct mtk_base_afe;
@@ -18,9 +17,12 @@ struct snd_pcm;
struct snd_soc_component;
struct snd_soc_pcm_runtime;
-
-int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd);
-void mtk_afe_pcm_free(struct snd_pcm *pcm);
+snd_pcm_uframes_t mtk_afe_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+int mtk_afe_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+void mtk_afe_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
int mtk_afe_combine_sub_dai(struct mtk_base_afe *afe);
int mtk_afe_add_sub_dai_control(struct snd_soc_component *component);
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
index d00608c73c6e..2b490ae2e642 100644
--- a/sound/soc/mediatek/common/mtk-btcvsd.c
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -875,11 +875,9 @@ static const struct snd_pcm_hardware mtk_btcvsd_hardware = {
.fifo_size = 0,
};
-static int mtk_pcm_btcvsd_open(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
int ret;
@@ -899,11 +897,9 @@ static int mtk_pcm_btcvsd_open(struct snd_pcm_substream *substream)
return ret;
}
-static int mtk_pcm_btcvsd_close(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
@@ -914,12 +910,10 @@ static int mtk_pcm_btcvsd_close(struct snd_pcm_substream *substream)
return 0;
}
-static int mtk_pcm_btcvsd_hw_params(struct snd_pcm_substream *substream,
+static int mtk_pcm_btcvsd_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -934,11 +928,9 @@ static int mtk_pcm_btcvsd_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int mtk_pcm_btcvsd_hw_free(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -947,11 +939,9 @@ static int mtk_pcm_btcvsd_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int mtk_pcm_btcvsd_prepare(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
@@ -961,11 +951,9 @@ static int mtk_pcm_btcvsd_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int mtk_pcm_btcvsd_trigger(struct snd_pcm_substream *substream, int cmd)
+static int mtk_pcm_btcvsd_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
int stream = substream->stream;
@@ -993,12 +981,10 @@ static int mtk_pcm_btcvsd_trigger(struct snd_pcm_substream *substream, int cmd)
}
}
-static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer
- (struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream;
snd_pcm_uframes_t frame = 0;
@@ -1044,13 +1030,11 @@ static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer
return frame;
}
-static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
+static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int channel, unsigned long pos,
void __user *buf, unsigned long count)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -1061,18 +1045,6 @@ static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops mtk_btcvsd_ops = {
- .open = mtk_pcm_btcvsd_open,
- .close = mtk_pcm_btcvsd_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = mtk_pcm_btcvsd_hw_params,
- .hw_free = mtk_pcm_btcvsd_hw_free,
- .prepare = mtk_pcm_btcvsd_prepare,
- .trigger = mtk_pcm_btcvsd_trigger,
- .pointer = mtk_pcm_btcvsd_pointer,
- .copy_user = mtk_pcm_btcvsd_copy,
-};
-
/* kcontrol */
static const char *const btsco_band_str[] = {"NB", "WB"};
@@ -1295,9 +1267,17 @@ static int mtk_btcvsd_snd_component_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
- .name = BTCVSD_SND_NAME,
- .ops = &mtk_btcvsd_ops,
- .probe = mtk_btcvsd_snd_component_probe,
+ .name = BTCVSD_SND_NAME,
+ .probe = mtk_btcvsd_snd_component_probe,
+ .open = mtk_pcm_btcvsd_open,
+ .close = mtk_pcm_btcvsd_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = mtk_pcm_btcvsd_hw_params,
+ .hw_free = mtk_pcm_btcvsd_hw_free,
+ .prepare = mtk_pcm_btcvsd_prepare,
+ .trigger = mtk_pcm_btcvsd_trigger,
+ .pointer = mtk_pcm_btcvsd_pointer,
+ .copy_user = mtk_pcm_btcvsd_copy,
};
static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index e52c032d53aa..033c07fb599c 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -710,11 +710,12 @@ static int mt6797_afe_component_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver mt6797_afe_component = {
- .name = AFE_PCM_NAME,
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
- .probe = mt6797_afe_component_probe,
+ .name = AFE_PCM_NAME,
+ .probe = mt6797_afe_component_probe,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pcm_destruct = mtk_afe_pcm_free,
};
static int mt6797_dai_memif_register(struct mtk_base_afe *afe)
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
index 4a31106d3471..76af09d8f1af 100644
--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "mt8183-afe-common.h"
#include "mt8183-afe-clk.h"
@@ -1047,11 +1048,12 @@ static int mt8183_afe_component_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver mt8183_afe_component = {
- .name = AFE_PCM_NAME,
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
- .probe = mt8183_afe_component_probe,
+ .name = AFE_PCM_NAME,
+ .probe = mt8183_afe_component_probe,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pcm_destruct = mtk_afe_pcm_free,
};
static int mt8183_dai_memif_register(struct mtk_base_afe *afe)
@@ -1089,6 +1091,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
struct mtk_base_afe *afe;
struct mt8183_afe_private *afe_priv;
struct device *dev;
+ struct reset_control *rstc;
int i, irq_id, ret;
afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
@@ -1126,6 +1129,19 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
return ret;
}
+ rstc = devm_reset_control_get(dev, "audiosys");
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ dev_err(dev, "could not get audiosys reset:%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_reset(rstc);
+ if (ret) {
+ dev_err(dev, "failed to trigger audio reset:%d\n", ret);
+ return ret;
+ }
+
/* enable clock for regcache get default value from hw */
afe_priv->pm_runtime_bypass_reg_ctl = true;
pm_runtime_get_sync(&pdev->dev);
diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
index bb9cdc0d6552..0555f7d73d05 100644
--- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
@@ -19,11 +19,12 @@ enum PINCTRL_PIN_STATE {
PIN_STATE_DEFAULT = 0,
PIN_TDM_OUT_ON,
PIN_TDM_OUT_OFF,
+ PIN_WOV,
PIN_STATE_MAX
};
static const char * const mt8183_pin_str[PIN_STATE_MAX] = {
- "default", "aud_tdm_out_on", "aud_tdm_out_off",
+ "default", "aud_tdm_out_on", "aud_tdm_out_off", "wov",
};
struct mt8183_mt6358_ts3a227_max98357_priv {
@@ -142,6 +143,11 @@ SND_SOC_DAILINK_DEFS(playback_hdmi,
DAILINK_COMP_ARRAY(COMP_DUMMY()),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(wake_on_voice,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
/* BE */
SND_SOC_DAILINK_DEFS(primary_codec,
DAILINK_COMP_ARRAY(COMP_CPU("ADDA")),
@@ -229,6 +235,41 @@ static struct snd_soc_ops mt8183_mt6358_tdm_ops = {
.shutdown = mt8183_mt6358_tdm_shutdown,
};
+static int
+mt8183_mt6358_ts3a227_max98357_wov_startup(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct mt8183_mt6358_ts3a227_max98357_priv *priv =
+ snd_soc_card_get_drvdata(card);
+
+ return pinctrl_select_state(priv->pinctrl,
+ priv->pin_states[PIN_WOV]);
+}
+
+static void
+mt8183_mt6358_ts3a227_max98357_wov_shutdown(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct mt8183_mt6358_ts3a227_max98357_priv *priv =
+ snd_soc_card_get_drvdata(card);
+ int ret;
+
+ ret = pinctrl_select_state(priv->pinctrl,
+ priv->pin_states[PIN_STATE_DEFAULT]);
+ if (ret)
+ dev_err(card->dev, "%s failed to select state %d\n",
+ __func__, ret);
+}
+
+static const struct snd_soc_ops mt8183_mt6358_ts3a227_max98357_wov_ops = {
+ .startup = mt8183_mt6358_ts3a227_max98357_wov_startup,
+ .shutdown = mt8183_mt6358_ts3a227_max98357_wov_shutdown,
+};
+
static struct snd_soc_dai_link
mt8183_mt6358_ts3a227_max98357_dai_links[] = {
/* FE */
@@ -306,6 +347,15 @@ mt8183_mt6358_ts3a227_max98357_dai_links[] = {
.dpcm_playback = 1,
SND_SOC_DAILINK_REG(playback_hdmi),
},
+ {
+ .name = "Wake on Voice",
+ .stream_name = "Wake on Voice",
+ .ignore_suspend = 1,
+ .ignore = 1,
+ SND_SOC_DAILINK_REG(wake_on_voice),
+ .ops = &mt8183_mt6358_ts3a227_max98357_wov_ops,
+ },
+
/* BE */
{
.name = "Primary Codec",
@@ -429,7 +479,7 @@ static int
mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8183_mt6358_ts3a227_max98357_card;
- struct device_node *platform_node;
+ struct device_node *platform_node, *ec_codec;
struct snd_soc_dai_link *dai_link;
struct mt8183_mt6358_ts3a227_max98357_priv *priv;
int ret;
@@ -444,10 +494,24 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ec_codec = of_parse_phandle(pdev->dev.of_node, "mediatek,ec-codec", 0);
+
for_each_card_prelinks(card, i, dai_link) {
if (dai_link->platforms->name)
continue;
- dai_link->platforms->of_node = platform_node;
+
+ if (ec_codec && strcmp(dai_link->name, "Wake on Voice") == 0) {
+ dai_link->cpus[0].name = NULL;
+ dai_link->cpus[0].of_node = ec_codec;
+ dai_link->cpus[0].dai_name = NULL;
+ dai_link->codecs[0].name = NULL;
+ dai_link->codecs[0].of_node = ec_codec;
+ dai_link->codecs[0].dai_name = "Wake on Voice";
+ dai_link->platforms[0].of_node = ec_codec;
+ dai_link->ignore = 0;
+ } else {
+ dai_link->platforms->of_node = platform_node;
+ }
}
mt8183_mt6358_ts3a227_max98357_headset_dev.dlc.of_node =
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 5a3749938900..d6f3eefb8f09 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -70,7 +70,8 @@ static void __dma_enable(struct axg_fifo *fifo, bool enable)
enable ? CTRL0_DMA_EN : 0);
}
-static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+int axg_fifo_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss, int cmd)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
@@ -91,8 +92,10 @@ static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
return 0;
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger);
-static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
+snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
struct snd_pcm_runtime *runtime = ss->runtime;
@@ -102,9 +105,11 @@ static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer);
-static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
- struct snd_pcm_hw_params *params)
+int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = ss->runtime;
struct axg_fifo *fifo = axg_fifo_data(ss);
@@ -132,15 +137,17 @@ static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
return 0;
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params);
-static int g12a_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
- struct snd_pcm_hw_params *params)
+int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
struct snd_pcm_runtime *runtime = ss->runtime;
int ret;
- ret = axg_fifo_pcm_hw_params(ss, params);
+ ret = axg_fifo_pcm_hw_params(component, ss, params);
if (ret)
return ret;
@@ -149,8 +156,10 @@ static int g12a_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
return 0;
}
+EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params);
-static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
+int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
@@ -160,6 +169,7 @@ static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
return snd_pcm_lib_free_pages(ss);
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
{
@@ -194,7 +204,8 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
return IRQ_RETVAL(status);
}
-static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
+int axg_fifo_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
struct device *dev = axg_fifo_dev(ss);
@@ -250,8 +261,10 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
return ret;
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
-static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
+int axg_fifo_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
int ret;
@@ -267,28 +280,7 @@ static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
return ret;
}
-
-const struct snd_pcm_ops axg_fifo_pcm_ops = {
- .open = axg_fifo_pcm_open,
- .close = axg_fifo_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = axg_fifo_pcm_hw_params,
- .hw_free = axg_fifo_pcm_hw_free,
- .pointer = axg_fifo_pcm_pointer,
- .trigger = axg_fifo_pcm_trigger,
-};
-EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
-
-const struct snd_pcm_ops g12a_fifo_pcm_ops = {
- .open = axg_fifo_pcm_open,
- .close = axg_fifo_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = g12a_fifo_pcm_hw_params,
- .hw_free = axg_fifo_pcm_hw_free,
- .pointer = axg_fifo_pcm_pointer,
- .trigger = axg_fifo_pcm_trigger,
-};
-EXPORT_SYMBOL_GPL(g12a_fifo_pcm_ops);
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_close);
int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
{
diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
index bb1e2ce50256..cf928d43b558 100644
--- a/sound/soc/meson/axg-fifo.h
+++ b/sound/soc/meson/axg-fifo.h
@@ -15,7 +15,7 @@ struct reset_control;
struct snd_soc_component_driver;
struct snd_soc_dai;
struct snd_soc_dai_driver;
-struct snd_pcm_ops;
+
struct snd_soc_pcm_runtime;
#define AXG_FIFO_CH_MAX 128
@@ -75,8 +75,22 @@ struct axg_fifo_match_data {
struct snd_soc_dai_driver *dai_drv;
};
-extern const struct snd_pcm_ops axg_fifo_pcm_ops;
-extern const struct snd_pcm_ops g12a_fifo_pcm_ops;
+int axg_fifo_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+int axg_fifo_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params);
+int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params);
+int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+int axg_fifo_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss, int cmd);
int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type);
int axg_fifo_probe(struct platform_device *pdev);
diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
index 6ab111c31b28..665d75d49d7b 100644
--- a/sound/soc/meson/axg-frddr.c
+++ b/sound/soc/meson/axg-frddr.c
@@ -149,7 +149,13 @@ static const struct snd_soc_component_driver axg_frddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(axg_frddr_dapm_widgets),
.dapm_routes = axg_frddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_frddr_dapm_routes),
- .ops = &axg_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = axg_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data axg_frddr_match_data = {
@@ -267,7 +273,13 @@ static const struct snd_soc_component_driver g12a_frddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(g12a_frddr_dapm_widgets),
.dapm_routes = g12a_frddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_frddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data g12a_frddr_match_data = {
@@ -331,7 +343,13 @@ static const struct snd_soc_component_driver sm1_frddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(sm1_frddr_dapm_widgets),
.dapm_routes = g12a_frddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_frddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data sm1_frddr_match_data = {
diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
index c8ea2145f576..7fef0b961496 100644
--- a/sound/soc/meson/axg-toddr.c
+++ b/sound/soc/meson/axg-toddr.c
@@ -181,7 +181,13 @@ static const struct snd_soc_component_driver axg_toddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(axg_toddr_dapm_widgets),
.dapm_routes = axg_toddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_toddr_dapm_routes),
- .ops = &axg_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = axg_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data axg_toddr_match_data = {
@@ -214,7 +220,13 @@ static const struct snd_soc_component_driver g12a_toddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(axg_toddr_dapm_widgets),
.dapm_routes = axg_toddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_toddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data g12a_toddr_match_data = {
@@ -278,7 +290,13 @@ static const struct snd_soc_component_driver sm1_toddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(sm1_toddr_dapm_widgets),
.dapm_routes = sm1_toddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(sm1_toddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data sm1_toddr_match_data = {
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 213d4dab0346..295cfffa4646 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -190,14 +190,14 @@ config SND_PXA2XX_SOC_MAGICIAN
HTC Magician.
config SND_PXA2XX_SOC_MIOA701
- tristate "SoC Audio support for MIO A701"
- depends on SND_PXA2XX_SOC && MACH_MIOA701
+ tristate "SoC Audio support for MIO A701"
+ depends on SND_PXA2XX_SOC && MACH_MIOA701
depends on AC97_BUS=n
- select SND_PXA2XX_SOC_AC97
- select SND_SOC_WM9713
- help
- Say Y if you want to add support for SoC audio on the
- MIO A701.
+ select SND_PXA2XX_SOC_AC97
+ select SND_SOC_WM9713
+ help
+ Say Y if you want to add support for SoC audio on the
+ MIO A701.
config SND_PXA2XX_SOC_IMOTE2
tristate "SoC Audio support for IMote 2"
@@ -205,7 +205,7 @@ config SND_PXA2XX_SOC_IMOTE2
select SND_PXA2XX_SOC_I2S
select SND_SOC_WM8940
help
- Say Y if you want to add support for SoC audio on the
+ Say Y if you want to add support for SoC audio on the
IMote 2.
config SND_MMP_SOC_BROWNSTONE
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 7096b5263e25..54a4c9213e83 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -55,8 +55,9 @@ static struct snd_pcm_hardware mmp_pcm_hardware[] = {
},
};
-static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int mmp_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
struct dma_slave_config slave_config;
@@ -77,6 +78,18 @@ static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int mmp_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ return snd_dmaengine_pcm_trigger(substream, cmd);
+}
+
+static snd_pcm_uframes_t mmp_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_pointer(substream);
+}
+
static bool filter(struct dma_chan *chan, void *param)
{
struct mmp_dma_data *dma_data = param;
@@ -94,10 +107,10 @@ static bool filter(struct dma_chan *chan, void *param)
return found;
}
-static int mmp_pcm_open(struct snd_pcm_substream *substream)
+static int mmp_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct platform_device *pdev = to_platform_device(component->dev);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct mmp_dma_data dma_data;
@@ -117,8 +130,15 @@ static int mmp_pcm_open(struct snd_pcm_substream *substream)
&dma_data);
}
-static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int mmp_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_close_release_chan(substream);
+}
+
+static int mmp_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long off = vma->vm_pgoff;
@@ -129,17 +149,8 @@ static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-static const struct snd_pcm_ops mmp_pcm_ops = {
- .open = mmp_pcm_open,
- .close = snd_dmaengine_pcm_close_release_chan,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = mmp_pcm_hw_params,
- .trigger = snd_dmaengine_pcm_trigger,
- .pointer = snd_dmaengine_pcm_pointer,
- .mmap = mmp_pcm_mmap,
-};
-
-static void mmp_pcm_free_dma_buffers(struct snd_pcm *pcm)
+static void mmp_pcm_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -188,7 +199,8 @@ static int mmp_pcm_preallocate_dma_buffer(struct snd_pcm_substream *substream,
return 0;
}
-static int mmp_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int mmp_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm_substream *substream;
struct snd_pcm *pcm = rtd->pcm;
@@ -205,15 +217,21 @@ static int mmp_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
err:
- mmp_pcm_free_dma_buffers(pcm);
+ mmp_pcm_free_dma_buffers(component, pcm);
return ret;
}
static const struct snd_soc_component_driver mmp_soc_component = {
.name = DRV_NAME,
- .ops = &mmp_pcm_ops,
- .pcm_new = mmp_pcm_new,
- .pcm_free = mmp_pcm_free_dma_buffers,
+ .open = mmp_pcm_open,
+ .close = mmp_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = mmp_pcm_hw_params,
+ .trigger = mmp_pcm_trigger,
+ .pointer = mmp_pcm_pointer,
+ .mmap = mmp_pcm_mmap,
+ .pcm_construct = mmp_pcm_new,
+ .pcm_destruct = mmp_pcm_free_dma_buffers,
};
static int mmp_pcm_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index e3e5425b5c62..e701637a9ae9 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -177,7 +177,7 @@ static int mmp_sspa_set_dai_fmt(struct snd_soc_dai *cpu_dai,
/* we can only change the settings if the port is not in use */
if ((mmp_sspa_read_reg(sspa, SSPA_TXSP) & SSPA_SP_S_EN) ||
(mmp_sspa_read_reg(sspa, SSPA_RXSP) & SSPA_SP_S_EN)) {
- dev_err(&sspa->pdev->dev,
+ dev_err(sspa->dev,
"can't change hardware dai format: stream is in use\n");
return -EINVAL;
}
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 48d5c2252b10..59ef04d0467a 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -56,7 +56,7 @@ static void poodle_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
}
- /* set the enpoints to their new connetion states */
+ /* set the endpoints to their new connection states */
if (poodle_spk_func == POODLE_SPK_ON)
snd_soc_dapm_enable_pin(dapm, "Ext Spk");
else
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 5fdd1a24c232..76fdce54f007 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -52,11 +52,11 @@ struct ssp_priv {
static void dump_registers(struct ssp_device *ssp)
{
- dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n",
+ dev_dbg(ssp->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n",
pxa_ssp_read_reg(ssp, SSCR0), pxa_ssp_read_reg(ssp, SSCR1),
pxa_ssp_read_reg(ssp, SSTO));
- dev_dbg(&ssp->pdev->dev, "SSPSP 0x%08x SSSR 0x%08x SSACD 0x%08x\n",
+ dev_dbg(ssp->dev, "SSPSP 0x%08x SSSR 0x%08x SSACD 0x%08x\n",
pxa_ssp_read_reg(ssp, SSPSP), pxa_ssp_read_reg(ssp, SSSR),
pxa_ssp_read_reg(ssp, SSACD));
}
@@ -223,7 +223,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
clk_id = PXA_SSP_CLK_EXT;
}
- dev_dbg(&ssp->pdev->dev,
+ dev_dbg(ssp->dev,
"pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n",
cpu_dai->id, clk_id, freq);
@@ -316,7 +316,7 @@ static int pxa_ssp_set_pll(struct ssp_priv *priv, unsigned int freq)
ssacd |= (0x6 << 4);
- dev_dbg(&ssp->pdev->dev,
+ dev_dbg(ssp->dev,
"Using SSACDD %x to supply %uHz\n",
val, freq);
break;
@@ -687,7 +687,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
* - complain loudly and fail if they've not been set up yet.
*/
if ((sscr0 & SSCR0_MOD) && !ttsa) {
- dev_err(&ssp->pdev->dev, "No TDM timeslot configured\n");
+ dev_err(ssp->dev, "No TDM timeslot configured\n");
return -EINVAL;
}
@@ -869,9 +869,17 @@ static struct snd_soc_dai_driver pxa_ssp_dai = {
static const struct snd_soc_component_driver pxa_ssp_component = {
.name = "pxa-ssp",
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index bf28187315db..31e81a6f616f 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -204,9 +204,17 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
static const struct snd_soc_component_driver pxa_ac97_component = {
.name = "pxa-ac97",
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 9f7fb7335ac0..e77d707efde7 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -360,9 +360,17 @@ static struct snd_soc_dai_driver pxa_i2s_dai = {
static const struct snd_soc_component_driver pxa_i2s_component = {
.name = "pxa-i2s",
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 74b56fa0870f..07b3455a6f23 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -18,9 +18,17 @@
#include <sound/dmaengine_pcm.h>
static const struct snd_soc_component_driver pxa2xx_soc_platform = {
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 60086858e920..6530d2462a9e 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -3,8 +3,8 @@ config SND_SOC_QCOM
tristate "ASoC support for QCOM platforms"
depends on ARCH_QCOM || COMPILE_TEST
help
- Say Y or M if you want to add support to use audio devices
- in Qualcomm Technologies SOC-based platforms.
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
config SND_SOC_LPASS_CPU
tristate
@@ -30,17 +30,17 @@ config SND_SOC_STORM
select SND_SOC_LPASS_IPQ806X
select SND_SOC_MAX98357A
help
- Say Y or M if you want add support for SoC audio on the
- Qualcomm Technologies IPQ806X-based Storm board.
+ Say Y or M if you want add support for SoC audio on the
+ Qualcomm Technologies IPQ806X-based Storm board.
config SND_SOC_APQ8016_SBC
tristate "SoC Audio support for APQ8016 SBC platforms"
depends on SND_SOC_QCOM
select SND_SOC_LPASS_APQ8016
help
- Support for Qualcomm Technologies LPASS audio block in
- APQ8016 SOC-based systems.
- Say Y if you want to use audio devices on MI2S.
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8016 SOC-based systems.
+ Say Y if you want to use audio devices on MI2S.
config SND_SOC_QCOM_COMMON
tristate
@@ -93,9 +93,9 @@ config SND_SOC_MSM8996
select SND_SOC_QDSP6
select SND_SOC_QCOM_COMMON
help
- Support for Qualcomm Technologies LPASS audio block in
- APQ8096 SoC-based systems.
- Say Y if you want to use audio device on this SoCs
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8096 SoC-based systems.
+ Say Y if you want to use audio device on this SoCs
config SND_SOC_SDM845
tristate "SoC Machine driver for SDM845 boards"
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 4c745baa39f7..2e8892316423 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -50,12 +50,12 @@ static const struct snd_pcm_hardware lpass_platform_pcm_hardware = {
.fifo_size = 0,
};
-static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct lpass_variant *v = drvdata->variant;
int ret, dma_ch, dir = substream->stream;
@@ -105,11 +105,10 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
return 0;
}
-static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct lpass_variant *v = drvdata->variant;
struct lpass_pcm_data *data;
@@ -122,11 +121,11 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
return 0;
}
-static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -216,10 +215,10 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -236,11 +235,11 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
return ret;
}
-static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -288,11 +287,11 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ int cmd)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -363,10 +362,10 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
}
static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -395,8 +394,9 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
return bytes_to_frames(substream->runtime, curr_addr - base_addr);
}
-static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int lpass_platform_pcmops_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -405,18 +405,6 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
runtime->dma_bytes);
}
-static const struct snd_pcm_ops lpass_platform_pcm_ops = {
- .open = lpass_platform_pcmops_open,
- .close = lpass_platform_pcmops_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = lpass_platform_pcmops_hw_params,
- .hw_free = lpass_platform_pcmops_hw_free,
- .prepare = lpass_platform_pcmops_prepare,
- .trigger = lpass_platform_pcmops_trigger,
- .pointer = lpass_platform_pcmops_pointer,
- .mmap = lpass_platform_pcmops_mmap,
-};
-
static irqreturn_t lpass_dma_interrupt_handler(
struct snd_pcm_substream *substream,
struct lpass_data *drvdata,
@@ -499,11 +487,11 @@ static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
+static int lpass_platform_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *soc_runtime)
{
struct snd_pcm *pcm = soc_runtime->pcm;
struct snd_pcm_substream *psubstream, *csubstream;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
int ret = -EINVAL;
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
@@ -535,7 +523,8 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
return 0;
}
-static void lpass_platform_pcm_free(struct snd_pcm *pcm)
+static void lpass_platform_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -552,9 +541,18 @@ static void lpass_platform_pcm_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver lpass_component_driver = {
.name = DRV_NAME,
- .pcm_new = lpass_platform_pcm_new,
- .pcm_free = lpass_platform_pcm_free,
- .ops = &lpass_platform_pcm_ops,
+ .open = lpass_platform_pcmops_open,
+ .close = lpass_platform_pcmops_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = lpass_platform_pcmops_hw_params,
+ .hw_free = lpass_platform_pcmops_hw_free,
+ .prepare = lpass_platform_pcmops_prepare,
+ .trigger = lpass_platform_pcmops_trigger,
+ .pointer = lpass_platform_pcmops_pointer,
+ .mmap = lpass_platform_pcmops_mmap,
+ .pcm_construct = lpass_platform_pcm_new,
+ .pcm_destruct = lpass_platform_pcm_free,
+
};
int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 548eb4fa2da6..8150c10f081e 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -206,16 +206,16 @@ static void event_handler(uint32_t opcode, uint32_t token,
}
}
-static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
+static int q6asm_dai_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct q6asm_dai_rtd *prtd = runtime->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
struct q6asm_dai_data *pdata;
int ret, i;
- pdata = snd_soc_component_get_drvdata(c);
+ pdata = snd_soc_component_get_drvdata(component);
if (!pdata)
return -EINVAL;
@@ -294,7 +294,8 @@ static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+static int q6asm_dai_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -322,21 +323,21 @@ static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static int q6asm_dai_open(struct snd_pcm_substream *substream)
+static int q6asm_dai_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_prtd->cpu_dai;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
struct q6asm_dai_rtd *prtd;
struct q6asm_dai_data *pdata;
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
int ret = 0;
int stream_id;
stream_id = cpu_dai->driver->id;
- pdata = snd_soc_component_get_drvdata(c);
+ pdata = snd_soc_component_get_drvdata(component);
if (!pdata) {
pr_err("Drv data not found ..\n");
return -EINVAL;
@@ -414,7 +415,8 @@ static int q6asm_dai_open(struct snd_pcm_substream *substream)
return 0;
}
-static int q6asm_dai_close(struct snd_pcm_substream *substream)
+static int q6asm_dai_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
@@ -435,7 +437,8 @@ static int q6asm_dai_close(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -447,22 +450,21 @@ static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
-static int q6asm_dai_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int q6asm_dai_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
-
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
return dma_mmap_coherent(dev, vma,
runtime->dma_area, runtime->dma_addr,
runtime->dma_bytes);
}
-static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int q6asm_dai_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
@@ -482,17 +484,6 @@ static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops q6asm_dai_ops = {
- .open = q6asm_dai_open,
- .hw_params = q6asm_dai_hw_params,
- .close = q6asm_dai_close,
- .ioctl = snd_pcm_lib_ioctl,
- .prepare = q6asm_dai_prepare,
- .trigger = q6asm_dai_trigger,
- .pointer = q6asm_dai_pointer,
- .mmap = q6asm_dai_mmap,
-};
-
static void compress_event_handler(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv)
{
@@ -635,8 +626,14 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int dir = stream->direction;
struct q6asm_dai_data *pdata;
+ struct q6asm_flac_cfg flac_cfg;
struct device *dev = c->dev;
int ret;
+ union snd_codec_options *codec_options;
+ struct snd_dec_flac *flac;
+
+ codec_options = &(prtd->codec_param.codec.options);
+
memcpy(&prtd->codec_param, params, sizeof(*params));
@@ -673,6 +670,32 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
return ret;
}
+ switch (params->codec.id) {
+ case SND_AUDIOCODEC_FLAC:
+
+ memset(&flac_cfg, 0x0, sizeof(struct q6asm_flac_cfg));
+ flac = &codec_options->flac_d;
+
+ flac_cfg.ch_cfg = params->codec.ch_in;
+ flac_cfg.sample_rate = params->codec.sample_rate;
+ flac_cfg.stream_info_present = 1;
+ flac_cfg.sample_size = flac->sample_size;
+ flac_cfg.min_blk_size = flac->min_blk_size;
+ flac_cfg.max_blk_size = flac->max_blk_size;
+ flac_cfg.max_frame_size = flac->max_frame_size;
+ flac_cfg.min_frame_size = flac->min_frame_size;
+
+ ret = q6asm_stream_media_format_block_flac(prtd->audio_client,
+ &flac_cfg);
+ if (ret < 0) {
+ dev_err(dev, "FLAC CMD Format block failed:%d\n", ret);
+ return -EIO;
+ }
+ break;
+ default:
+ break;
+ }
+
ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys,
(prtd->pcm_size / prtd->periods),
prtd->periods);
@@ -768,8 +791,9 @@ static int q6asm_dai_compr_get_caps(struct snd_compr_stream *stream,
caps->max_fragment_size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE;
caps->min_fragments = COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
caps->max_fragments = COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- caps->num_codecs = 1;
+ caps->num_codecs = 2;
caps->codecs[0] = SND_AUDIOCODEC_MP3;
+ caps->codecs[1] = SND_AUDIOCODEC_FLAC;
return 0;
}
@@ -800,15 +824,15 @@ static struct snd_compr_ops q6asm_dai_compr_ops = {
.ack = q6asm_dai_compr_ack,
};
-static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int q6asm_dai_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm_substream *psubstream, *csubstream;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_pcm *pcm = rtd->pcm;
struct device *dev;
int size, ret;
- dev = c->dev;
+ dev = component->dev;
size = q6asm_dai_hardware_playback.buffer_bytes_max;
psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (psubstream) {
@@ -835,7 +859,8 @@ static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
+static void q6asm_dai_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -852,9 +877,16 @@ static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver q6asm_fe_dai_component = {
.name = DRV_NAME,
- .ops = &q6asm_dai_ops,
- .pcm_new = q6asm_dai_pcm_new,
- .pcm_free = q6asm_dai_pcm_free,
+ .open = q6asm_dai_open,
+ .hw_params = q6asm_dai_hw_params,
+ .close = q6asm_dai_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .prepare = q6asm_dai_prepare,
+ .trigger = q6asm_dai_trigger,
+ .pointer = q6asm_dai_pointer,
+ .mmap = q6asm_dai_mmap,
+ .pcm_construct = q6asm_dai_pcm_new,
+ .pcm_destruct = q6asm_dai_pcm_free,
.compr_ops = &q6asm_dai_compr_ops,
};
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index e8141a33a55e..36e0eab13a98 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -38,6 +38,7 @@
#define ASM_SESSION_CMD_RUN_V2 0x00010DAA
#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
#define ASM_MEDIA_FMT_MP3 0x00010BE9
+#define ASM_MEDIA_FMT_FLAC 0x00010C16
#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
#define ASM_DATA_CMD_READ_V2 0x00010DAC
#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
@@ -89,6 +90,20 @@ struct asm_multi_channel_pcm_fmt_blk_v2 {
u8 channel_mapping[PCM_MAX_NUM_CHANNEL];
} __packed;
+struct asm_flac_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 is_stream_info_present;
+ u16 num_channels;
+ u16 min_blk_size;
+ u16 max_blk_size;
+ u16 md5_sum[8];
+ u32 sample_rate;
+ u32 min_frame_size;
+ u32 max_frame_size;
+ u16 sample_size;
+ u16 reserved;
+} __packed;
+
struct asm_stream_cmd_set_encdec_param {
u32 param_id;
u32 param_size;
@@ -876,6 +891,9 @@ int q6asm_open_write(struct audio_client *ac, uint32_t format,
case FORMAT_LINEAR_PCM:
open->dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
+ case SND_AUDIOCODEC_FLAC:
+ open->dec_fmt_id = ASM_MEDIA_FMT_FLAC;
+ break;
default:
dev_err(ac->dev, "Invalid format 0x%x\n", format);
rc = -EINVAL;
@@ -1021,6 +1039,42 @@ err:
}
EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+int q6asm_stream_media_format_block_flac(struct audio_client *ac,
+ struct q6asm_flac_cfg *cfg)
+{
+ struct asm_flac_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->is_stream_info_present = cfg->stream_info_present;
+ fmt->num_channels = cfg->ch_cfg;
+ fmt->min_blk_size = cfg->min_blk_size;
+ fmt->max_blk_size = cfg->max_blk_size;
+ fmt->sample_rate = cfg->sample_rate;
+ fmt->min_frame_size = cfg->min_frame_size;
+ fmt->max_frame_size = cfg->max_frame_size;
+ fmt->sample_size = cfg->sample_size;
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ kfree(pkt);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_flac);
/**
* q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
*
@@ -1075,6 +1129,7 @@ err:
}
EXPORT_SYMBOL_GPL(q6asm_enc_cfg_blk_pcm_format_support);
+
/**
* q6asm_read() - read data of period size from audio client
*
diff --git a/sound/soc/qcom/qdsp6/q6asm.h b/sound/soc/qcom/qdsp6/q6asm.h
index 9f5fb573e4a0..6764f55f7078 100644
--- a/sound/soc/qcom/qdsp6/q6asm.h
+++ b/sound/soc/qcom/qdsp6/q6asm.h
@@ -32,6 +32,19 @@ enum {
#define NO_TIMESTAMP 0xFF00
#define FORMAT_LINEAR_PCM 0x0000
+struct q6asm_flac_cfg {
+ u32 sample_rate;
+ u32 ext_sample_rate;
+ u32 min_frame_size;
+ u32 max_frame_size;
+ u16 stream_info_present;
+ u16 min_blk_size;
+ u16 max_blk_size;
+ u16 ch_cfg;
+ u16 sample_size;
+ u16 md5_sum;
+};
+
typedef void (*q6asm_cb) (uint32_t opcode, uint32_t token,
void *payload, void *priv);
struct audio_client;
@@ -54,6 +67,8 @@ int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels,
u8 channel_map[PCM_MAX_NUM_CHANNEL],
uint16_t bits_per_sample);
+int q6asm_stream_media_format_block_flac(struct audio_client *ac,
+ struct q6asm_flac_cfg *cfg);
int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
uint32_t lsw_ts);
int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index ddcd9978cf57..20724102e85a 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -939,12 +939,12 @@ static const struct snd_soc_dapm_route intercon[] = {
};
-static int routing_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int routing_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
- struct msm_routing_data *data = dev_get_drvdata(c->dev);
+ struct msm_routing_data *data = dev_get_drvdata(component->dev);
unsigned int be_id = rtd->cpu_dai->id;
struct session_data *session;
int path_type;
@@ -980,10 +980,6 @@ static int routing_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops q6pcm_routing_ops = {
- .hw_params = routing_hw_params,
-};
-
static int msm_routing_probe(struct snd_soc_component *c)
{
int i;
@@ -997,9 +993,9 @@ static int msm_routing_probe(struct snd_soc_component *c)
}
static const struct snd_soc_component_driver msm_soc_routing_component = {
- .ops = &q6pcm_routing_ops,
.probe = msm_routing_probe,
.name = DRV_NAME,
+ .hw_params = routing_hw_params,
.dapm_widgets = msm_qdsp6_widgets,
.num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
.dapm_routes = intercon,
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index b43657e6e655..d610b553ea3b 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -40,9 +40,10 @@ config SND_SOC_ROCKCHIP_MAX98090
select SND_SOC_ROCKCHIP_I2S
select SND_SOC_MAX98090
select SND_SOC_TS3A227E
+ select SND_SOC_HDMI_CODEC
help
Say Y or M here if you want to add support for SoC audio on Rockchip
- boards using the MAX98090 codec, such as Veyron.
+ boards using the MAX98090 codec and HDMI codec, such as Veyron.
config SND_SOC_ROCKCHIP_RT5645
tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index e80b09143b63..60930fa85aa4 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -6,11 +6,13 @@
*/
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <sound/core.h>
+#include <sound/hdmi-codec.h>
#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -36,28 +38,73 @@ static struct snd_soc_jack_pin headset_jack_pins[] = {
};
-static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_SPK("Speaker", NULL),
+#define RK_MAX98090_WIDGETS \
+ SND_SOC_DAPM_HP("Headphone", NULL), \
+ SND_SOC_DAPM_MIC("Headset Mic", NULL), \
+ SND_SOC_DAPM_MIC("Int Mic", NULL), \
+ SND_SOC_DAPM_SPK("Speaker", NULL)
+
+#define RK_HDMI_WIDGETS \
+ SND_SOC_DAPM_LINE("HDMI", NULL)
+
+static const struct snd_soc_dapm_widget rk_max98090_dapm_widgets[] = {
+ RK_MAX98090_WIDGETS,
+};
+
+static const struct snd_soc_dapm_widget rk_hdmi_dapm_widgets[] = {
+ RK_HDMI_WIDGETS,
+};
+
+static const struct snd_soc_dapm_widget rk_max98090_hdmi_dapm_widgets[] = {
+ RK_MAX98090_WIDGETS,
+ RK_HDMI_WIDGETS,
+};
+
+#define RK_MAX98090_AUDIO_MAP \
+ {"IN34", NULL, "Headset Mic"}, \
+ {"Headset Mic", NULL, "MICBIAS"}, \
+ {"DMICL", NULL, "Int Mic"}, \
+ {"Headphone", NULL, "HPL"}, \
+ {"Headphone", NULL, "HPR"}, \
+ {"Speaker", NULL, "SPKL"}, \
+ {"Speaker", NULL, "SPKR"}
+
+#define RK_HDMI_AUDIO_MAP \
+ {"HDMI", NULL, "TX"}
+
+static const struct snd_soc_dapm_route rk_max98090_audio_map[] = {
+ RK_MAX98090_AUDIO_MAP,
+};
+
+static const struct snd_soc_dapm_route rk_hdmi_audio_map[] = {
+ RK_HDMI_AUDIO_MAP,
+};
+
+static const struct snd_soc_dapm_route rk_max98090_hdmi_audio_map[] = {
+ RK_MAX98090_AUDIO_MAP,
+ RK_HDMI_AUDIO_MAP,
+};
+
+#define RK_MAX98090_CONTROLS \
+ SOC_DAPM_PIN_SWITCH("Headphone"), \
+ SOC_DAPM_PIN_SWITCH("Headset Mic"), \
+ SOC_DAPM_PIN_SWITCH("Int Mic"), \
+ SOC_DAPM_PIN_SWITCH("Speaker")
+
+#define RK_HDMI_CONTROLS \
+ SOC_DAPM_PIN_SWITCH("HDMI")
+
+static const struct snd_kcontrol_new rk_max98090_controls[] = {
+ RK_MAX98090_CONTROLS,
};
-static const struct snd_soc_dapm_route rk_audio_map[] = {
- {"IN34", NULL, "Headset Mic"},
- {"Headset Mic", NULL, "MICBIAS"},
- {"DMICL", NULL, "Int Mic"},
- {"Headphone", NULL, "HPL"},
- {"Headphone", NULL, "HPR"},
- {"Speaker", NULL, "SPKL"},
- {"Speaker", NULL, "SPKR"},
+static const struct snd_kcontrol_new rk_hdmi_controls[] = {
+ RK_HDMI_CONTROLS,
};
-static const struct snd_kcontrol_new rk_mc_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
- SOC_DAPM_PIN_SWITCH("Speaker"),
+static const struct snd_kcontrol_new rk_max98090_hdmi_controls[] = {
+ RK_MAX98090_CONTROLS,
+ RK_HDMI_CONTROLS,
};
static int rk_jack_event(struct notifier_block *nb, unsigned long event,
@@ -125,15 +172,20 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
SND_SOC_CLOCK_OUT);
- if (ret < 0) {
- dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+ if (ret) {
+ dev_err(cpu_dai->dev, "Can't set cpu dai clock %d\n", ret);
return ret;
}
ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
SND_SOC_CLOCK_IN);
- if (ret < 0) {
- dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+
+ /* HDMI codec dai does not need to set sysclk. */
+ if (!strcmp(rtd->dai_link->name, "HDMI"))
+ return 0;
+
+ if (ret) {
+ dev_err(codec_dai->dev, "Can't set codec dai clock %d\n", ret);
return ret;
}
@@ -155,20 +207,88 @@ static const struct snd_soc_ops rk_aif1_ops = {
.startup = rk_aif1_startup,
};
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "HiFi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link rk_dailink = {
- .name = "max98090",
- .stream_name = "Audio",
- .init = rk_init,
- .ops = &rk_aif1_ops,
- /* set max98090 as slave */
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(hifi),
+SND_SOC_DAILINK_DEFS(analog,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "HiFi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(hdmi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "i2s-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+enum {
+ DAILINK_MAX98090,
+ DAILINK_HDMI,
+};
+
+static struct snd_soc_jack rk_hdmi_jack;
+
+static int rk_hdmi_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_component *component = runtime->codec_dai->component;
+ int ret;
+
+ /* enable jack detection */
+ ret = snd_soc_card_jack_new(card, "HDMI Jack", SND_JACK_LINEOUT,
+ &rk_hdmi_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Can't new HDMI Jack %d\n", ret);
+ return ret;
+ }
+
+ return hdmi_codec_set_jack_detect(component, &rk_hdmi_jack);
+}
+
+/* max98090 dai_link */
+static struct snd_soc_dai_link rk_max98090_dailinks[] = {
+ {
+ .name = "max98090",
+ .stream_name = "Analog",
+ .init = rk_init,
+ .ops = &rk_aif1_ops,
+ /* set max98090 as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(analog),
+ },
+};
+
+/* HDMI codec dai_link */
+static struct snd_soc_dai_link rk_hdmi_dailinks[] = {
+ {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .init = rk_hdmi_init,
+ .ops = &rk_aif1_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(hdmi),
+ }
+};
+
+/* max98090 and HDMI codec dai_link */
+static struct snd_soc_dai_link rk_max98090_hdmi_dailinks[] = {
+ [DAILINK_MAX98090] = {
+ .name = "max98090",
+ .stream_name = "Analog",
+ .init = rk_init,
+ .ops = &rk_aif1_ops,
+ /* set max98090 as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(analog),
+ },
+ [DAILINK_HDMI] = {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .init = rk_hdmi_init,
+ .ops = &rk_aif1_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(hdmi),
+ }
};
static int rk_98090_headset_init(struct snd_soc_component *component);
@@ -178,19 +298,47 @@ static struct snd_soc_aux_dev rk_98090_headset_dev = {
.init = rk_98090_headset_init,
};
-static struct snd_soc_card snd_soc_card_rk = {
+static struct snd_soc_card rockchip_max98090_card = {
.name = "ROCKCHIP-I2S",
.owner = THIS_MODULE,
- .dai_link = &rk_dailink,
- .num_links = 1,
+ .dai_link = rk_max98090_dailinks,
+ .num_links = ARRAY_SIZE(rk_max98090_dailinks),
+ .aux_dev = &rk_98090_headset_dev,
+ .num_aux_devs = 1,
+ .dapm_widgets = rk_max98090_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk_max98090_dapm_widgets),
+ .dapm_routes = rk_max98090_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rk_max98090_audio_map),
+ .controls = rk_max98090_controls,
+ .num_controls = ARRAY_SIZE(rk_max98090_controls),
+};
+
+static struct snd_soc_card rockchip_hdmi_card = {
+ .name = "ROCKCHIP-HDMI",
+ .owner = THIS_MODULE,
+ .dai_link = rk_hdmi_dailinks,
+ .num_links = ARRAY_SIZE(rk_hdmi_dailinks),
+ .dapm_widgets = rk_hdmi_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk_hdmi_dapm_widgets),
+ .dapm_routes = rk_hdmi_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rk_hdmi_audio_map),
+ .controls = rk_hdmi_controls,
+ .num_controls = ARRAY_SIZE(rk_hdmi_controls),
+};
+
+static struct snd_soc_card rockchip_max98090_hdmi_card = {
+ .name = "ROCKCHIP-MAX98090-HDMI",
+ .owner = THIS_MODULE,
+ .dai_link = rk_max98090_hdmi_dailinks,
+ .num_links = ARRAY_SIZE(rk_max98090_hdmi_dailinks),
.aux_dev = &rk_98090_headset_dev,
.num_aux_devs = 1,
- .dapm_widgets = rk_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
- .dapm_routes = rk_audio_map,
- .num_dapm_routes = ARRAY_SIZE(rk_audio_map),
- .controls = rk_mc_controls,
- .num_controls = ARRAY_SIZE(rk_mc_controls),
+ .dapm_widgets = rk_max98090_hdmi_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk_max98090_hdmi_dapm_widgets),
+ .dapm_routes = rk_max98090_hdmi_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rk_max98090_hdmi_audio_map),
+ .controls = rk_max98090_hdmi_controls,
+ .num_controls = ARRAY_SIZE(rk_max98090_hdmi_controls),
};
static int rk_98090_headset_init(struct snd_soc_component *component)
@@ -198,7 +346,7 @@ static int rk_98090_headset_init(struct snd_soc_component *component)
int ret;
/* Enable Headset and 4 Buttons Jack detection */
- ret = snd_soc_card_jack_new(&snd_soc_card_rk, "Headset Jack",
+ ret = snd_soc_card_jack_new(component->card, "Headset Jack",
SND_JACK_HEADSET |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3,
@@ -213,41 +361,75 @@ static int rk_98090_headset_init(struct snd_soc_component *component)
return ret;
}
+static int rk_parse_headset_from_of(struct device *dev, struct device_node *np)
+{
+ rk_98090_headset_dev.dlc.of_node = of_parse_phandle(
+ np, "rockchip,headset-codec", 0);
+ if (!rk_98090_headset_dev.dlc.of_node) {
+ dev_err(dev,
+ "Property 'rockchip,headset-codec' missing/invalid\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int snd_rk_mc_probe(struct platform_device *pdev)
{
int ret = 0;
- struct snd_soc_card *card = &snd_soc_card_rk;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *np_cpu;
+ struct device_node *np_audio, *np_hdmi;
- /* register the soc card */
- card->dev = &pdev->dev;
+ /* Parse DTS for I2S controller. */
+ np_cpu = of_parse_phandle(np, "rockchip,i2s-controller", 0);
- rk_dailink.codecs->of_node = of_parse_phandle(np,
- "rockchip,audio-codec", 0);
- if (!rk_dailink.codecs->of_node) {
+ if (!np_cpu) {
dev_err(&pdev->dev,
- "Property 'rockchip,audio-codec' missing or invalid\n");
+ "Property 'rockchip,i2s-controller missing or invalid\n");
return -EINVAL;
}
- rk_dailink.cpus->of_node = of_parse_phandle(np,
- "rockchip,i2s-controller", 0);
- if (!rk_dailink.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'rockchip,i2s-controller' missing or invalid\n");
+ /*
+ * Find the card to use based on the presences of audio codec
+ * and hdmi codec in device property. Set their of_node accordingly.
+ */
+ np_audio = of_parse_phandle(np, "rockchip,audio-codec", 0);
+ np_hdmi = of_parse_phandle(np, "rockchip,hdmi-codec", 0);
+ if (np_audio && np_hdmi) {
+ card = &rockchip_max98090_hdmi_card;
+ card->dai_link[DAILINK_MAX98090].codecs->of_node = np_audio;
+ card->dai_link[DAILINK_HDMI].codecs->of_node = np_hdmi;
+ card->dai_link[DAILINK_MAX98090].cpus->of_node = np_cpu;
+ card->dai_link[DAILINK_MAX98090].platforms->of_node = np_cpu;
+ card->dai_link[DAILINK_HDMI].cpus->of_node = np_cpu;
+ card->dai_link[DAILINK_HDMI].platforms->of_node = np_cpu;
+ } else if (np_audio) {
+ card = &rockchip_max98090_card;
+ card->dai_link[0].codecs->of_node = np_audio;
+ card->dai_link[0].cpus->of_node = np_cpu;
+ card->dai_link[0].platforms->of_node = np_cpu;
+ } else if (np_hdmi) {
+ card = &rockchip_hdmi_card;
+ card->dai_link[0].codecs->of_node = np_hdmi;
+ card->dai_link[0].cpus->of_node = np_cpu;
+ card->dai_link[0].platforms->of_node = np_cpu;
+ } else {
+ dev_err(dev, "At least one of codecs should be specified\n");
return -EINVAL;
}
- rk_dailink.platforms->of_node = rk_dailink.cpus->of_node;
+ card->dev = dev;
- rk_98090_headset_dev.dlc.of_node = of_parse_phandle(np,
- "rockchip,headset-codec", 0);
- if (!rk_98090_headset_dev.dlc.of_node) {
- dev_err(&pdev->dev,
- "Property 'rockchip,headset-codec' missing/invalid\n");
- return -EINVAL;
+ /* Parse headset detection codec. */
+ if (np_audio) {
+ ret = rk_parse_headset_from_of(dev, np);
+ if (ret)
+ return ret;
}
+ /* Parse card name. */
ret = snd_soc_of_parse_card_name(card, "rockchip,model");
if (ret) {
dev_err(&pdev->dev,
@@ -255,6 +437,7 @@ static int snd_rk_mc_probe(struct platform_device *pdev)
return ret;
}
+ /* register the soc card */
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev,
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 638983123d8f..1a0b163ca47b 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -194,11 +194,13 @@ config SND_SOC_ODROID
help
Say Y here to enable audio support for the Odroid XU3/XU4.
-config SND_SOC_ARNDALE_RT5631_ALC5631
- tristate "Audio support for RT5631(ALC5631) on Arndale Board"
- depends on I2C
- select SND_SAMSUNG_I2S
- select SND_SOC_RT5631
+config SND_SOC_ARNDALE
+ tristate "Audio support for Arndale Board"
+ depends on I2C
+ select SND_SAMSUNG_I2S
+ select SND_SOC_RT5631
+ select MFD_WM8994
+ select SND_SOC_WM8994
config SND_SOC_SAMSUNG_TM2_WM5110
tristate "SoC I2S Audio support for WM5110 on TM2 board"
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index c3b76035f69c..8f5dfe20b9f1 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -39,7 +39,7 @@ snd-soc-lowland-objs := lowland.o
snd-soc-littlemill-objs := littlemill.o
snd-soc-bells-objs := bells.o
snd-soc-odroid-objs := odroid.o
-snd-soc-arndale-rt5631-objs := arndale_rt5631.o
+snd-soc-arndale-objs := arndale.o
snd-soc-tm2-wm5110-objs := tm2_wm5110.o
obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
@@ -62,5 +62,5 @@ obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
obj-$(CONFIG_SND_SOC_ODROID) += snd-soc-odroid.o
-obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
+obj-$(CONFIG_SND_SOC_ARNDALE) += snd-soc-arndale.o
obj-$(CONFIG_SND_SOC_SAMSUNG_TM2_WM5110) += snd-soc-tm2-wm5110.o
diff --git a/sound/soc/samsung/arndale.c b/sound/soc/samsung/arndale.c
new file mode 100644
index 000000000000..d64602950cbd
--- /dev/null
+++ b/sound/soc/samsung/arndale.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2014, Insignal Co., Ltd.
+//
+// Author: Claude <claude@insginal.co.kr>
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "../codecs/wm8994.h"
+#include "i2s.h"
+
+static int arndale_rt5631_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int rfs, ret;
+ unsigned long rclk;
+
+ rfs = 256;
+
+ rclk = params_rate(params) * rfs;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
+ 0, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
+ 0, SND_SOC_CLOCK_OUT);
+
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct snd_soc_ops arndale_rt5631_ops = {
+ .hw_params = arndale_rt5631_hw_params,
+};
+
+static int arndale_wm1811_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ unsigned int rfs, rclk;
+
+ /* Ensure AIF1CLK is >= 3 MHz for optimal performance */
+ if (params_width(params) == 24)
+ rfs = 384;
+ else if (params_rate(params) == 8000 || params_rate(params) == 11025)
+ rfs = 512;
+ else
+ rfs = 256;
+
+ rclk = params_rate(params) * rfs;
+
+ /*
+ * We add 1 to the frequency value to ensure proper EPLL setting
+ * for each audio sampling rate (see epll_24mhz_tbl in drivers/clk/
+ * samsung/clk-exynos5250.c for list of available EPLL rates).
+ * The CODEC uses clk API and the value will be rounded hence the MCLK1
+ * clock's frequency will still be exact multiple of the sample rate.
+ */
+ return snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_MCLK1,
+ rclk + 1, SND_SOC_CLOCK_IN);
+}
+
+static struct snd_soc_ops arndale_wm1811_ops = {
+ .hw_params = arndale_wm1811_hw_params,
+};
+
+SND_SOC_DAILINK_DEFS(rt5631_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5631-aif1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link arndale_rt5631_dai[] = {
+ {
+ .name = "RT5631 HiFi",
+ .stream_name = "Primary",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &arndale_rt5631_ops,
+ SND_SOC_DAILINK_REG(rt5631_hifi),
+ },
+};
+
+SND_SOC_DAILINK_DEFS(wm1811_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8994-aif1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link arndale_wm1811_dai[] = {
+ {
+ .name = "WM1811 HiFi",
+ .stream_name = "Primary",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .ops = &arndale_wm1811_ops,
+ SND_SOC_DAILINK_REG(wm1811_hifi),
+ },
+};
+
+static struct snd_soc_card arndale_rt5631 = {
+ .name = "Arndale RT5631",
+ .owner = THIS_MODULE,
+ .dai_link = arndale_rt5631_dai,
+ .num_links = ARRAY_SIZE(arndale_rt5631_dai),
+};
+
+static struct snd_soc_card arndale_wm1811 = {
+ .name = "Arndale WM1811",
+ .owner = THIS_MODULE,
+ .dai_link = arndale_wm1811_dai,
+ .num_links = ARRAY_SIZE(arndale_wm1811_dai),
+};
+
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link;
+ int i;
+
+ for_each_card_prelinks(card, i, dai_link) {
+ of_node_put(dai_link->cpus->of_node);
+ of_node_put(dai_link->codecs->of_node);
+ }
+}
+
+static int arndale_audio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct snd_soc_card *card;
+ struct snd_soc_dai_link *dai_link;
+ int ret;
+
+ card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev);
+ card->dev = &pdev->dev;
+ dai_link = card->dai_link;
+
+ dai_link->cpus->of_node = of_parse_phandle(np, "samsung,audio-cpu", 0);
+ if (!dai_link->cpus->of_node) {
+ dev_err(&pdev->dev,
+ "Property 'samsung,audio-cpu' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ if (!dai_link->platforms->name)
+ dai_link->platforms->of_node = dai_link->cpus->of_node;
+
+ dai_link->codecs->of_node = of_parse_phandle(np, "samsung,audio-codec", 0);
+ if (!dai_link->codecs->of_node) {
+ dev_err(&pdev->dev,
+ "Property 'samsung,audio-codec' missing or invalid\n");
+ ret = -EINVAL;
+ goto err_put_of_nodes;
+ }
+
+ ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+ goto err_put_of_nodes;
+ }
+ return 0;
+
+err_put_of_nodes:
+ arndale_put_of_nodes(card);
+ return ret;
+}
+
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ arndale_put_of_nodes(card);
+ return 0;
+}
+
+static const struct of_device_id arndale_audio_of_match[] = {
+ { .compatible = "samsung,arndale-rt5631", .data = &arndale_rt5631 },
+ { .compatible = "samsung,arndale-alc5631", .data = &arndale_rt5631 },
+ { .compatible = "samsung,arndale-wm1811", .data = &arndale_wm1811 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arndale_audio_of_match);
+
+static struct platform_driver arndale_audio_driver = {
+ .driver = {
+ .name = "arndale-audio",
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = arndale_audio_of_match,
+ },
+ .probe = arndale_audio_probe,
+ .remove = arndale_audio_remove,
+};
+
+module_platform_driver(arndale_audio_driver);
+
+MODULE_AUTHOR("Claude <claude@insignal.co.kr>");
+MODULE_DESCRIPTION("ALSA SoC Driver for Arndale Board");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
deleted file mode 100644
index fd8c6642fb0d..000000000000
--- a/sound/soc/samsung/arndale_rt5631.c
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// Copyright (c) 2014, Insignal Co., Ltd.
-//
-// Author: Claude <claude@insginal.co.kr>
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#include "i2s.h"
-
-static int arndale_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- int rfs, ret;
- unsigned long rclk;
-
- rfs = 256;
-
- rclk = params_rate(params) * rfs;
-
- ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
- 0, SND_SOC_CLOCK_OUT);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
- 0, SND_SOC_CLOCK_OUT);
-
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk, SND_SOC_CLOCK_OUT);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_ops arndale_ops = {
- .hw_params = arndale_hw_params,
-};
-
-SND_SOC_DAILINK_DEFS(rt5631_hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5631-hifi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link arndale_rt5631_dai[] = {
- {
- .name = "RT5631 HiFi",
- .stream_name = "Primary",
- .dai_fmt = SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBS_CFS,
- .ops = &arndale_ops,
- SND_SOC_DAILINK_REG(rt5631_hifi),
- },
-};
-
-static struct snd_soc_card arndale_rt5631 = {
- .name = "Arndale RT5631",
- .owner = THIS_MODULE,
- .dai_link = arndale_rt5631_dai,
- .num_links = ARRAY_SIZE(arndale_rt5631_dai),
-};
-
-static void arndale_put_of_nodes(struct snd_soc_card *card)
-{
- struct snd_soc_dai_link *dai_link;
- int i;
-
- for_each_card_prelinks(card, i, dai_link) {
- of_node_put(dai_link->cpus->of_node);
- of_node_put(dai_link->codecs->of_node);
- }
-}
-
-static int arndale_audio_probe(struct platform_device *pdev)
-{
- int n, ret;
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &arndale_rt5631;
-
- card->dev = &pdev->dev;
-
- for (n = 0; np && n < ARRAY_SIZE(arndale_rt5631_dai); n++) {
- if (!arndale_rt5631_dai[n].cpus->dai_name) {
- arndale_rt5631_dai[n].cpus->of_node = of_parse_phandle(np,
- "samsung,audio-cpu", n);
-
- if (!arndale_rt5631_dai[n].cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'samsung,audio-cpu' missing or invalid\n");
- return -EINVAL;
- }
- }
- if (!arndale_rt5631_dai[n].platforms->name)
- arndale_rt5631_dai[n].platforms->of_node =
- arndale_rt5631_dai[n].cpus->of_node;
-
- arndale_rt5631_dai[n].codecs->name = NULL;
- arndale_rt5631_dai[n].codecs->of_node = of_parse_phandle(np,
- "samsung,audio-codec", n);
- if (!arndale_rt5631_dai[0].codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'samsung,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err_put_of_nodes;
- }
- }
-
- ret = devm_snd_soc_register_card(card->dev, card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
- goto err_put_of_nodes;
- }
- return 0;
-
-err_put_of_nodes:
- arndale_put_of_nodes(card);
- return ret;
-}
-
-static int arndale_audio_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- arndale_put_of_nodes(card);
- return 0;
-}
-
-static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
- { .compatible = "samsung,arndale-rt5631", },
- { .compatible = "samsung,arndale-alc5631", },
- {},
-};
-MODULE_DEVICE_TABLE(of, samsung_arndale_rt5631_of_match);
-
-static struct platform_driver arndale_audio_driver = {
- .driver = {
- .name = "arndale-audio",
- .pm = &snd_soc_pm_ops,
- .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
- },
- .probe = arndale_audio_probe,
- .remove = arndale_audio_remove,
-};
-
-module_platform_driver(arndale_audio_driver);
-
-MODULE_AUTHOR("Claude <claude@insignal.co.kr>");
-MODULE_DESCRIPTION("ALSA SoC Driver for Arndale Board");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index 65497cd477a5..294dce111b05 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -137,8 +137,9 @@ static void idma_done(void *id, int bytes_xfer)
snd_pcm_period_elapsed(substream);
}
-static int idma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int idma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd = substream->runtime->private_data;
@@ -163,14 +164,16 @@ static int idma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int idma_hw_free(struct snd_pcm_substream *substream)
+static int idma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
-static int idma_prepare(struct snd_pcm_substream *substream)
+static int idma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct idma_ctrl *prtd = substream->runtime->private_data;
@@ -183,7 +186,8 @@ static int idma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int idma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct idma_ctrl *prtd = substream->runtime->private_data;
int ret = 0;
@@ -216,7 +220,8 @@ static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t
- idma_pointer(struct snd_pcm_substream *substream)
+idma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd = runtime->private_data;
@@ -233,7 +238,8 @@ static snd_pcm_uframes_t
return bytes_to_frames(substream->runtime, res);
}
-static int idma_mmap(struct snd_pcm_substream *substream,
+static int idma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -278,7 +284,8 @@ static irqreturn_t iis_irq(int irqno, void *dev_id)
return IRQ_HANDLED;
}
-static int idma_open(struct snd_pcm_substream *substream)
+static int idma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd;
@@ -304,7 +311,8 @@ static int idma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int idma_close(struct snd_pcm_substream *substream)
+static int idma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd = runtime->private_data;
@@ -319,19 +327,8 @@ static int idma_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops idma_ops = {
- .open = idma_open,
- .close = idma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .trigger = idma_trigger,
- .pointer = idma_pointer,
- .mmap = idma_mmap,
- .hw_params = idma_hw_params,
- .hw_free = idma_hw_free,
- .prepare = idma_prepare,
-};
-
-static void idma_free(struct snd_pcm *pcm)
+static void idma_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -367,7 +364,8 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-static int idma_new(struct snd_soc_pcm_runtime *rtd)
+static int idma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -394,9 +392,17 @@ void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
EXPORT_SYMBOL_GPL(idma_reg_addr_init);
static const struct snd_soc_component_driver asoc_idma_platform = {
- .ops = &idma_ops,
- .pcm_new = idma_new,
- .pcm_free = idma_free,
+ .open = idma_open,
+ .close = idma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .trigger = idma_trigger,
+ .pointer = idma_pointer,
+ .mmap = idma_mmap,
+ .hw_params = idma_hw_params,
+ .hw_free = idma_hw_free,
+ .prepare = idma_prepare,
+ .pcm_construct = idma_new,
+ .pcm_destruct = idma_free,
};
static int asoc_idma_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index 7e196b599be1..593be1b668d6 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -672,13 +672,13 @@ static int s3c2412_i2s_suspend(struct snd_soc_dai *dai)
iismod = readl(i2s->regs + S3C2412_IISMOD);
if (iismod & S3C2412_IISCON_RXDMA_ACTIVE)
- pr_warning("%s: RXDMA active?\n", __func__);
+ pr_warn("%s: RXDMA active?\n", __func__);
if (iismod & S3C2412_IISCON_TXDMA_ACTIVE)
- pr_warning("%s: TXDMA active?\n", __func__);
+ pr_warn("%s: TXDMA active?\n", __func__);
if (iismod & S3C2412_IISCON_IIS_ACTIVE)
- pr_warning("%s: IIS active\n", __func__);
+ pr_warn("%s: IIS active\n", __func__);
}
return 0;
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 5aee11c94f2a..2b0eca02a8b9 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -115,7 +115,8 @@ static void camelot_rxdma(void *data)
snd_pcm_period_elapsed(cam->rx_ss);
}
-static int camelot_pcm_open(struct snd_pcm_substream *substream)
+static int camelot_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
@@ -148,7 +149,8 @@ static int camelot_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int camelot_pcm_close(struct snd_pcm_substream *substream)
+static int camelot_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
@@ -168,7 +170,8 @@ static int camelot_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int camelot_hw_params(struct snd_pcm_substream *substream,
+static int camelot_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -191,12 +194,14 @@ static int camelot_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int camelot_hw_free(struct snd_pcm_substream *substream)
+static int camelot_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int camelot_prepare(struct snd_pcm_substream *substream)
+static int camelot_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -244,7 +249,8 @@ static inline void dmabrg_rec_dma_stop(struct camelot_pcm *cam)
BRGREG(BRGACR) = acr | ACR_RDS;
}
-static int camelot_trigger(struct snd_pcm_substream *substream, int cmd)
+static int camelot_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
@@ -270,7 +276,8 @@ static int camelot_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static snd_pcm_uframes_t camelot_pos(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t camelot_pos(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -292,18 +299,8 @@ static snd_pcm_uframes_t camelot_pos(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, pos);
}
-static const struct snd_pcm_ops camelot_pcm_ops = {
- .open = camelot_pcm_open,
- .close = camelot_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = camelot_hw_params,
- .hw_free = camelot_hw_free,
- .prepare = camelot_prepare,
- .trigger = camelot_trigger,
- .pointer = camelot_pos,
-};
-
-static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int camelot_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
@@ -312,15 +309,22 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
*/
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
DMABRG_PREALLOC_BUFFER, DMABRG_PREALLOC_BUFFER_MAX);
return 0;
}
static const struct snd_soc_component_driver sh7760_soc_component = {
- .ops = &camelot_pcm_ops,
- .pcm_new = camelot_pcm_new,
+ .open = camelot_pcm_open,
+ .close = camelot_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = camelot_hw_params,
+ .hw_free = camelot_hw_free,
+ .prepare = camelot_prepare,
+ .trigger = camelot_trigger,
+ .pointer = camelot_pos,
+ .pcm_construct = camelot_pcm_new,
};
static int sh7760_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 3447dbdba1f1..e384fdc8d60e 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1718,7 +1718,8 @@ static const struct snd_pcm_hardware fsi_pcm_hardware = {
.fifo_size = 256,
};
-static int fsi_pcm_open(struct snd_pcm_substream *substream)
+static int fsi_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret = 0;
@@ -1731,19 +1732,22 @@ static int fsi_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int fsi_hw_params(struct snd_pcm_substream *substream,
+static int fsi_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
}
-static int fsi_hw_free(struct snd_pcm_substream *substream)
+static int fsi_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t fsi_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct fsi_priv *fsi = fsi_get_priv(substream);
struct fsi_stream *io = fsi_stream_get(fsi, substream);
@@ -1751,14 +1755,6 @@ static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
return fsi_sample2frame(fsi, io->buff_sample_pos);
}
-static const struct snd_pcm_ops fsi_pcm_ops = {
- .open = fsi_pcm_open,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fsi_hw_params,
- .hw_free = fsi_hw_free,
- .pointer = fsi_pointer,
-};
-
/*
* snd_soc_component
*/
@@ -1766,7 +1762,8 @@ static const struct snd_pcm_ops fsi_pcm_ops = {
#define PREALLOC_BUFFER (32 * 1024)
#define PREALLOC_BUFFER_MAX (32 * 1024)
-static int fsi_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int fsi_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
snd_pcm_lib_preallocate_pages_for_all(
rtd->pcm,
@@ -1817,8 +1814,12 @@ static struct snd_soc_dai_driver fsi_soc_dai[] = {
static const struct snd_soc_component_driver fsi_soc_component = {
.name = "fsi",
- .ops = &fsi_pcm_ops,
- .pcm_new = fsi_pcm_new,
+ .open = fsi_pcm_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = fsi_hw_params,
+ .hw_free = fsi_hw_free,
+ .pointer = fsi_pointer,
+ .pcm_construct = fsi_pcm_new,
};
/*
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index e9596c2096cd..399dc6e9bde5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -302,7 +302,7 @@ int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
int rsnd_channel_normalization(int chan)
{
- if ((chan > 8) || (chan < 0))
+ if (WARN_ON((chan > 8) || (chan < 0)))
return 0;
/* TDM Extend Mode needs 8ch */
@@ -376,6 +376,17 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
*/
u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
{
+ static const u32 dalign_values[8][2] = {
+ {0x76543210, 0x67452301},
+ {0x00000032, 0x00000023},
+ {0x00007654, 0x00006745},
+ {0x00000076, 0x00000067},
+ {0xfedcba98, 0xefcdab89},
+ {0x000000ba, 0x000000ab},
+ {0x0000fedc, 0x0000efcd},
+ {0x000000fe, 0x000000ef},
+ };
+ int id = 0, inv;
struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
struct rsnd_mod *target;
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -411,13 +422,18 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
target = cmd ? cmd : ssiu;
}
+ if (mod == ssiu)
+ id = rsnd_mod_id_sub(mod);
+
/* Non target mod or non 16bit needs normal DALIGN */
if ((snd_pcm_format_width(runtime->format) != 16) ||
(mod != target))
- return 0x76543210;
+ inv = 0;
/* Target mod needs inverted DALIGN when 16bit */
else
- return 0x67452301;
+ inv = 1;
+
+ return dalign_values[id][inv];
}
u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
@@ -1076,7 +1092,10 @@ static void rsnd_parse_tdm_split_mode(struct rsnd_priv *priv,
j++;
}
+ of_node_put(node);
}
+
+ of_node_put(ssiu_np);
}
static void rsnd_parse_connect_simple(struct rsnd_priv *priv,
@@ -1094,11 +1113,13 @@ static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
struct device_node *endpoint)
{
struct device *dev = rsnd_priv_to_dev(priv);
- struct device_node *remote_node = of_graph_get_remote_port_parent(endpoint);
+ struct device_node *remote_node;
if (!rsnd_io_to_mod_ssi(io))
return;
+ remote_node = of_graph_get_remote_port_parent(endpoint);
+
/* HDMI0 */
if (strstr(remote_node->full_name, "hdmi@fead0000")) {
rsnd_flags_set(io, RSND_STREAM_HDMI0);
@@ -1112,6 +1133,8 @@ static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
}
rsnd_parse_tdm_split_mode(priv, io, endpoint);
+
+ of_node_put(remote_node);
}
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1374,8 +1397,9 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
/*
* pcm ops
*/
-static int rsnd_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int rsnd_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
@@ -1422,7 +1446,8 @@ static int rsnd_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
}
-static int rsnd_hw_free(struct snd_pcm_substream *substream)
+static int rsnd_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
@@ -1436,7 +1461,8 @@ static int rsnd_hw_free(struct snd_pcm_substream *substream)
return snd_pcm_lib_free_pages(substream);
}
-static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t rsnd_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
@@ -1448,13 +1474,6 @@ static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
return pointer;
}
-static const struct snd_pcm_ops rsnd_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = rsnd_hw_params,
- .hw_free = rsnd_hw_free,
- .pointer = rsnd_pointer,
-};
-
/*
* snd_kcontrol
*/
@@ -1648,8 +1667,11 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
* snd_soc_component
*/
static const struct snd_soc_component_driver rsnd_soc_component = {
- .ops = &rsnd_pcm_ops,
.name = "rsnd",
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = rsnd_hw_params,
+ .hw_free = rsnd_hw_free,
+ .pointer = rsnd_pointer,
};
static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 28f65eba2bb4..95aa26d62e4f 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -165,14 +165,40 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
struct device *dev = rsnd_priv_to_dev(priv);
struct dma_async_tx_descriptor *desc;
struct dma_slave_config cfg = {};
+ enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
int is_play = rsnd_io_is_play(io);
int ret;
+ /*
+ * in case of monaural data writing or reading through Audio-DMAC
+ * data is always in Left Justified format, so both src and dst
+ * DMA Bus width need to be set equal to physical data width.
+ */
+ if (rsnd_runtime_channel_original(io) == 1) {
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ int bits = snd_pcm_format_physical_width(runtime->format);
+
+ switch (bits) {
+ case 8:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 16:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 32:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ dev_err(dev, "invalid format width %d\n", bits);
+ return -EINVAL;
+ }
+ }
+
cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
cfg.src_addr = dma->src_addr;
cfg.dst_addr = dma->dst_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_addr_width = buswidth;
+ cfg.dst_addr_width = buswidth;
dev_dbg(dev, "%s %pad -> %pad\n",
rsnd_mod_name(mod),
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 78c3145b4109..a5e21e554da2 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -281,7 +281,8 @@ static int siu_pcm_stmread_stop(struct siu_port *port_info)
return 0;
}
-static int siu_pcm_hw_params(struct snd_pcm_substream *ss,
+static int siu_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
struct snd_pcm_hw_params *hw_params)
{
struct siu_info *info = siu_i2s_data;
@@ -297,7 +298,8 @@ static int siu_pcm_hw_params(struct snd_pcm_substream *ss,
return ret;
}
-static int siu_pcm_hw_free(struct snd_pcm_substream *ss)
+static int siu_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
@@ -324,11 +326,10 @@ static bool filter(struct dma_chan *chan, void *slave)
return true;
}
-static int siu_pcm_open(struct snd_pcm_substream *ss)
+static int siu_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
/* Playback / Capture */
- struct snd_soc_pcm_runtime *rtd = ss->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct siu_platform *pdata = component->dev->platform_data;
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
@@ -367,7 +368,8 @@ static int siu_pcm_open(struct snd_pcm_substream *ss)
return 0;
}
-static int siu_pcm_close(struct snd_pcm_substream *ss)
+static int siu_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
@@ -389,7 +391,8 @@ static int siu_pcm_close(struct snd_pcm_substream *ss)
return 0;
}
-static int siu_pcm_prepare(struct snd_pcm_substream *ss)
+static int siu_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
@@ -435,7 +438,8 @@ static int siu_pcm_prepare(struct snd_pcm_substream *ss)
return 0;
}
-static int siu_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+static int siu_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss, int cmd)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
@@ -477,7 +481,9 @@ static int siu_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
* So far only resolution of one period is supported, subject to extending the
* dmangine API
*/
-static snd_pcm_uframes_t siu_pcm_pointer_dma(struct snd_pcm_substream *ss)
+static snd_pcm_uframes_t
+siu_pcm_pointer_dma(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct device *dev = ss->pcm->card->dev;
struct siu_info *info = siu_i2s_data;
@@ -512,7 +518,8 @@ static snd_pcm_uframes_t siu_pcm_pointer_dma(struct snd_pcm_substream *ss)
return bytes_to_frames(ss->runtime, ptr);
}
-static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int siu_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
/* card->dev == socdev->dev, see snd_soc_new_pcms() */
struct snd_card *card = rtd->card->snd_card;
@@ -558,7 +565,8 @@ static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void siu_pcm_free(struct snd_pcm *pcm)
+static void siu_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct platform_device *pdev = to_platform_device(pcm->card->dev);
struct siu_port *port_info = siu_ports[pdev->id];
@@ -571,21 +579,17 @@ static void siu_pcm_free(struct snd_pcm *pcm)
dev_dbg(pcm->card->dev, "%s\n", __func__);
}
-static const struct snd_pcm_ops siu_pcm_ops = {
+struct const snd_soc_component_driver siu_component = {
+ .name = DRV_NAME,
.open = siu_pcm_open,
.close = siu_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = siu_pcm_hw_params,
.hw_free = siu_pcm_hw_free,
.prepare = siu_pcm_prepare,
.trigger = siu_pcm_trigger,
.pointer = siu_pcm_pointer_dma,
-};
-
-struct snd_soc_component_driver siu_component = {
- .name = DRV_NAME,
- .ops = &siu_pcm_ops,
- .pcm_new = siu_pcm_new,
- .pcm_free = siu_pcm_free,
+ .pcm_construct = siu_pcm_new,
+ .pcm_destruct = siu_pcm_free,
};
EXPORT_SYMBOL_GPL(siu_component);
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index 79ffc2820ba9..9054558ce386 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -314,30 +314,24 @@ void snd_soc_component_module_put(struct snd_soc_component *component,
int snd_soc_component_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->open)
- return component->driver->ops->open(substream);
-
+ if (component->driver->open)
+ return component->driver->open(component, substream);
return 0;
}
int snd_soc_component_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->close)
- return component->driver->ops->close(substream);
-
+ if (component->driver->close)
+ return component->driver->close(component, substream);
return 0;
}
int snd_soc_component_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->prepare)
- return component->driver->ops->prepare(substream);
-
+ if (component->driver->prepare)
+ return component->driver->prepare(component, substream);
return 0;
}
@@ -345,20 +339,17 @@ int snd_soc_component_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- if (component->driver->ops &&
- component->driver->ops->hw_params)
- return component->driver->ops->hw_params(substream, params);
-
+ if (component->driver->hw_params)
+ return component->driver->hw_params(component,
+ substream, params);
return 0;
}
int snd_soc_component_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->hw_free)
- return component->driver->ops->hw_free(substream);
-
+ if (component->driver->hw_free)
+ return component->driver->hw_free(component, substream);
return 0;
}
@@ -366,10 +357,8 @@ int snd_soc_component_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
int cmd)
{
- if (component->driver->ops &&
- component->driver->ops->trigger)
- return component->driver->ops->trigger(substream, cmd);
-
+ if (component->driver->trigger)
+ return component->driver->trigger(component, substream, cmd);
return 0;
}
@@ -431,14 +420,10 @@ int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream)
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME: use 1st pointer */
- if (component->driver->ops &&
- component->driver->ops->pointer)
- return component->driver->ops->pointer(substream);
- }
+ /* FIXME: use 1st pointer */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->pointer)
+ return component->driver->pointer(component, substream);
return 0;
}
@@ -450,17 +435,32 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
+ /* FIXME: use 1st ioctl */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->ioctl)
+ return component->driver->ioctl(component, substream,
+ cmd, arg);
+
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ int ret;
- /* FIXME: use 1st ioctl */
- if (component->driver->ops &&
- component->driver->ops->ioctl)
- return component->driver->ops->ioctl(substream,
- cmd, arg);
+ for_each_rtd_components(rtd, rtdcom, component) {
+ if (component->driver->ioctl) {
+ ret = component->driver->sync_stop(component,
+ substream);
+ if (ret < 0)
+ return ret;
+ }
}
- return snd_pcm_lib_ioctl(substream, cmd, arg);
+ return 0;
}
int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
@@ -471,15 +471,11 @@ int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME. it returns 1st copy now */
- if (component->driver->ops &&
- component->driver->ops->copy_user)
- return component->driver->ops->copy_user(
- substream, channel, pos, buf, bytes);
- }
+ /* FIXME. it returns 1st copy now */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->copy_user)
+ return component->driver->copy_user(
+ component, substream, channel, pos, buf, bytes);
return -EINVAL;
}
@@ -492,13 +488,11 @@ struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct page *page;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME. it returns 1st page now */
- if (component->driver->ops &&
- component->driver->ops->page) {
- page = component->driver->ops->page(substream, offset);
+ /* FIXME. it returns 1st page now */
+ for_each_rtd_components(rtd, rtdcom, component) {
+ if (component->driver->page) {
+ page = component->driver->page(component,
+ substream, offset);
if (page)
return page;
}
@@ -514,30 +508,24 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME. it returns 1st mmap now */
- if (component->driver->ops &&
- component->driver->ops->mmap)
- return component->driver->ops->mmap(substream, vma);
- }
+ /* FIXME. it returns 1st mmap now */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->mmap)
+ return component->driver->mmap(component,
+ substream, vma);
return -EINVAL;
}
-int snd_soc_pcm_component_new(struct snd_pcm *pcm)
+int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (component->driver->pcm_new) {
- ret = component->driver->pcm_new(rtd);
+ for_each_rtd_components(rtd, rtdcom, component) {
+ if (component->driver->pcm_construct) {
+ ret = component->driver->pcm_construct(component, rtd);
if (ret < 0)
return ret;
}
@@ -546,16 +534,12 @@ int snd_soc_pcm_component_new(struct snd_pcm *pcm)
return 0;
}
-void snd_soc_pcm_component_free(struct snd_pcm *pcm)
+void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (component->driver->pcm_free)
- component->driver->pcm_free(pcm);
- }
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->pcm_destruct)
+ component->driver->pcm_destruct(component, rtd->pcm);
}
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 9e54d8ae6d2c..61f230324164 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -28,9 +28,7 @@ static int soc_compr_components_open(struct snd_compr_stream *cstream,
struct snd_soc_rtdcom_list *rtdcom;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->open)
continue;
@@ -57,9 +55,7 @@ static int soc_compr_components_free(struct snd_compr_stream *cstream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component == last)
break;
@@ -353,9 +349,7 @@ static int soc_compr_components_trigger(struct snd_compr_stream *cstream,
struct snd_soc_rtdcom_list *rtdcom;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->trigger)
continue;
@@ -458,9 +452,7 @@ static int soc_compr_components_set_params(struct snd_compr_stream *cstream,
struct snd_soc_rtdcom_list *rtdcom;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->set_params)
continue;
@@ -601,9 +593,7 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
goto err;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_params)
continue;
@@ -627,9 +617,7 @@ static int soc_compr_get_caps(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_caps)
continue;
@@ -652,9 +640,7 @@ static int soc_compr_get_codec_caps(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_codec_caps)
continue;
@@ -684,9 +670,7 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
goto err;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->ack)
continue;
@@ -715,9 +699,7 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
if (cpu_dai->driver->cops && cpu_dai->driver->cops->pointer)
cpu_dai->driver->cops->pointer(cstream, tstamp, cpu_dai);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->pointer)
continue;
@@ -740,9 +722,7 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->copy)
continue;
@@ -770,9 +750,7 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
return ret;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->set_metadata)
continue;
@@ -801,9 +779,7 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
return ret;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_metadata)
continue;
@@ -932,9 +908,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->copy)
continue;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 88978a3036c4..062653ab03a3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -125,6 +125,9 @@ static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+ if (!rtd)
+ return 0;
+
if (attr == &dev_attr_pmdown_time.attr)
return attr->mode; /* always visible */
return rtd->num_codecs ? attr->mode : 0; /* enabled only with codec */
@@ -274,43 +277,58 @@ static inline void snd_soc_debugfs_exit(void)
#endif
+/*
+ * This is glue code between snd_pcm_lib_ioctl() and
+ * snd_soc_component_driver :: ioctl
+ */
+int snd_soc_pcm_lib_ioctl(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(snd_soc_pcm_lib_ioctl);
+
static int snd_soc_rtdcom_add(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_component *component)
{
struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *comp;
- for_each_rtdcom(rtd, rtdcom) {
+ for_each_rtd_components(rtd, rtdcom, comp) {
/* already connected */
- if (rtdcom->component == component)
+ if (comp == component)
return 0;
}
- rtdcom = kmalloc(sizeof(*rtdcom), GFP_KERNEL);
+ /*
+ * created rtdcom here will be freed when rtd->dev was freed.
+ * see
+ * soc_free_pcm_runtime() :: device_unregister(rtd->dev)
+ */
+ rtdcom = devm_kzalloc(rtd->dev, sizeof(*rtdcom), GFP_KERNEL);
if (!rtdcom)
return -ENOMEM;
rtdcom->component = component;
INIT_LIST_HEAD(&rtdcom->list);
+ /*
+ * When rtd was freed, created rtdcom here will be
+ * also freed.
+ * And we don't need to call list_del(&rtdcom->list)
+ * when freed, because rtd is also freed.
+ */
list_add_tail(&rtdcom->list, &rtd->component_list);
return 0;
}
-static void snd_soc_rtdcom_del_all(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_rtdcom_list *rtdcom1, *rtdcom2;
-
- for_each_rtdcom_safe(rtd, rtdcom1, rtdcom2)
- kfree(rtdcom1);
-
- INIT_LIST_HEAD(&rtd->component_list);
-}
-
struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
const char *driver_name)
{
struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
if (!driver_name)
return NULL;
@@ -323,8 +341,8 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
* But, if many components which have same driver name are connected
* to 1 rtd, this function will return 1st found component.
*/
- for_each_rtdcom(rtd, rtdcom) {
- const char *component_name = rtdcom->component->driver->name;
+ for_each_rtd_components(rtd, rtdcom, component) {
+ const char *component_name = component->driver->name;
if (!component_name)
continue;
@@ -338,6 +356,39 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
}
EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
+static struct snd_soc_component
+*snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name)
+{
+ struct snd_soc_component *component;
+ struct snd_soc_component *found_component;
+
+ found_component = NULL;
+ for_each_component(component) {
+ if ((dev == component->dev) &&
+ (!driver_name ||
+ (driver_name == component->driver->name) ||
+ (strcmp(component->driver->name, driver_name) == 0))) {
+ found_component = component;
+ break;
+ }
+ }
+
+ return found_component;
+}
+
+struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
+ const char *driver_name)
+{
+ struct snd_soc_component *component;
+
+ mutex_lock(&client_mutex);
+ component = snd_soc_lookup_component_nolocked(dev, driver_name);
+ mutex_unlock(&client_mutex);
+
+ return component;
+}
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component);
+
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream)
{
@@ -355,58 +406,104 @@ EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream);
static const struct snd_soc_ops null_snd_soc_ops;
+static void soc_release_rtd_dev(struct device *dev)
+{
+ /* "dev" means "rtd->dev" */
+ kfree(dev);
+}
+
+static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
+{
+ if (!rtd)
+ return;
+
+ list_del(&rtd->list);
+
+ flush_delayed_work(&rtd->delayed_work);
+ snd_soc_pcm_component_free(rtd);
+
+ /*
+ * we don't need to call kfree() for rtd->dev
+ * see
+ * soc_release_rtd_dev()
+ *
+ * We don't need rtd->dev NULL check, because
+ * it is alloced *before* rtd.
+ * see
+ * soc_new_pcm_runtime()
+ */
+ device_unregister(rtd->dev);
+}
+
static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
struct snd_soc_card *card, struct snd_soc_dai_link *dai_link)
{
struct snd_soc_pcm_runtime *rtd;
+ struct device *dev;
+ int ret;
- rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime), GFP_KERNEL);
- if (!rtd)
+ /*
+ * for rtd->dev
+ */
+ dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev)
return NULL;
- INIT_LIST_HEAD(&rtd->component_list);
- rtd->card = card;
- rtd->dai_link = dai_link;
- if (!rtd->dai_link->ops)
- rtd->dai_link->ops = &null_snd_soc_ops;
+ dev->parent = card->dev;
+ dev->release = soc_release_rtd_dev;
+ dev->groups = soc_dev_attr_groups;
- rtd->codec_dais = kcalloc(dai_link->num_codecs,
- sizeof(struct snd_soc_dai *),
- GFP_KERNEL);
- if (!rtd->codec_dais) {
- kfree(rtd);
+ dev_set_name(dev, "%s", dai_link->name);
+
+ ret = device_register(dev);
+ if (ret < 0) {
+ put_device(dev); /* soc_release_rtd_dev */
return NULL;
}
- return rtd;
-}
+ /*
+ * for rtd
+ */
+ rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ goto free_rtd;
-static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
-{
- kfree(rtd->codec_dais);
- snd_soc_rtdcom_del_all(rtd);
- kfree(rtd);
-}
+ rtd->dev = dev;
+ dev_set_drvdata(dev, rtd);
+
+ /*
+ * for rtd->codec_dais
+ */
+ rtd->codec_dais = devm_kcalloc(dev, dai_link->num_codecs,
+ sizeof(struct snd_soc_dai *),
+ GFP_KERNEL);
+ if (!rtd->codec_dais)
+ goto free_rtd;
+
+ /*
+ * rtd remaining settings
+ */
+ INIT_LIST_HEAD(&rtd->component_list);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
+
+ rtd->card = card;
+ rtd->dai_link = dai_link;
+ if (!rtd->dai_link->ops)
+ rtd->dai_link->ops = &null_snd_soc_ops;
-static void soc_add_pcm_runtime(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd)
-{
/* see for_each_card_rtds */
list_add_tail(&rtd->list, &card->rtd_list);
rtd->num = card->num_rtd;
card->num_rtd++;
-}
-static void soc_remove_pcm_runtimes(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd, *_rtd;
-
- for_each_card_rtds_safe(card, rtd, _rtd) {
- list_del(&rtd->list);
- soc_free_pcm_runtime(rtd);
- }
+ return rtd;
- card->num_rtd = 0;
+free_rtd:
+ soc_free_pcm_runtime(rtd);
+ return NULL;
}
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -859,37 +956,168 @@ struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_find_dai_link);
-static bool soc_is_dai_link_bound(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
+static int soc_dai_link_sanity_check(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link)
{
- struct snd_soc_pcm_runtime *rtd;
+ int i;
+ struct snd_soc_dai_link_component *codec, *platform;
- for_each_card_rtds(card, rtd) {
- if (rtd->dai_link == dai_link)
- return true;
+ for_each_link_codecs(link, i, codec) {
+ /*
+ * Codec must be specified by 1 of name or OF node,
+ * not both or neither.
+ */
+ if (!!codec->name == !!codec->of_node) {
+ dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /* Codec DAI name must be specified */
+ if (!codec->dai_name) {
+ dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Defer card registration if codec component is not added to
+ * component list.
+ */
+ if (!soc_find_component(codec))
+ return -EPROBE_DEFER;
}
- return false;
+ for_each_link_platforms(link, i, platform) {
+ /*
+ * Platform may be specified by either name or OF node, but it
+ * can be left unspecified, then no components will be inserted
+ * in the rtdcom list
+ */
+ if (!!platform->name == !!platform->of_node) {
+ dev_err(card->dev,
+ "ASoC: Neither/both platform name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Defer card registration if platform component is not added to
+ * component list.
+ */
+ if (!soc_find_component(platform))
+ return -EPROBE_DEFER;
+ }
+
+ /* FIXME */
+ if (link->num_cpus > 1) {
+ dev_err(card->dev,
+ "ASoC: multi cpu is not yet supported %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * CPU device may be specified by either name or OF node, but
+ * can be left unspecified, and will be matched based on DAI
+ * name alone..
+ */
+ if (link->cpus->name && link->cpus->of_node) {
+ dev_err(card->dev,
+ "ASoC: Neither/both cpu name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Defer card registration if cpu dai component is not added to
+ * component list.
+ */
+ if ((link->cpus->of_node || link->cpus->name) &&
+ !soc_find_component(link->cpus))
+ return -EPROBE_DEFER;
+
+ /*
+ * At least one of CPU DAI name or CPU device name/node must be
+ * specified
+ */
+ if (!link->cpus->dai_name &&
+ !(link->cpus->name || link->cpus->of_node)) {
+ dev_err(card->dev,
+ "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * snd_soc_remove_dai_link - Remove a DAI link from the list
+ * @card: The ASoC card that owns the link
+ * @dai_link: The DAI link to remove
+ *
+ * This function removes a DAI link from the ASoC card's link list.
+ *
+ * For DAI links previously added by topology, topology should
+ * remove them by using the dobj embedded in the link.
+ */
+void snd_soc_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ lockdep_assert_held(&client_mutex);
+
+ /*
+ * Notify the machine driver for extra destruction
+ */
+ if (card->remove_dai_link)
+ card->remove_dai_link(card, dai_link);
+
+ list_del(&dai_link->list);
+
+ rtd = snd_soc_get_pcm_runtime(card, dai_link->name);
+ if (rtd)
+ soc_free_pcm_runtime(rtd);
}
+EXPORT_SYMBOL_GPL(snd_soc_remove_dai_link);
-static int soc_bind_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
+/**
+ * snd_soc_add_dai_link - Add a DAI link dynamically
+ * @card: The ASoC card to which the DAI link is added
+ * @dai_link: The new DAI link to add
+ *
+ * This function adds a DAI link to the ASoC card's link list.
+ *
+ * Note: Topology can use this API to add DAI links when probing the
+ * topology component. And machine drivers can still define static
+ * DAI links in dai_link array.
+ */
+int snd_soc_add_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai_link_component *codec, *platform;
struct snd_soc_component *component;
- int i;
+ int i, ret;
+
+ lockdep_assert_held(&client_mutex);
+
+ /*
+ * Notify the machine driver for extra initialization
+ */
+ if (card->add_dai_link)
+ card->add_dai_link(card, dai_link);
if (dai_link->ignore)
return 0;
dev_dbg(card->dev, "ASoC: binding %s\n", dai_link->name);
- if (soc_is_dai_link_bound(card, dai_link)) {
- dev_dbg(card->dev, "ASoC: dai link %s already bound\n",
- dai_link->name);
- return 0;
- }
+ ret = soc_dai_link_sanity_check(card, dai_link);
+ if (ret < 0)
+ return ret;
rtd = soc_new_pcm_runtime(card, dai_link);
if (!rtd)
@@ -930,13 +1158,16 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
}
}
- soc_add_pcm_runtime(card, rtd);
+ /* see for_each_card_links */
+ list_add_tail(&dai_link->list, &card->dai_link_list);
+
return 0;
_err_defer:
soc_free_pcm_runtime(rtd);
return -EPROBE_DEFER;
}
+EXPORT_SYMBOL_GPL(snd_soc_add_dai_link);
static void soc_set_of_name_prefix(struct snd_soc_component *component)
{
@@ -973,8 +1204,16 @@ static void soc_set_name_prefix(struct snd_soc_card *card,
soc_set_of_name_prefix(component);
}
-static void soc_cleanup_component(struct snd_soc_component *component)
+static void soc_remove_component(struct snd_soc_component *component,
+ int probed)
{
+
+ if (!component->card)
+ return;
+
+ if (probed)
+ snd_soc_component_remove(component);
+
/* For framework level robustness */
snd_soc_component_set_jack(component, NULL, NULL);
@@ -985,22 +1224,13 @@ static void soc_cleanup_component(struct snd_soc_component *component)
snd_soc_component_module_put_when_remove(component);
}
-static void soc_remove_component(struct snd_soc_component *component)
-{
- if (!component->card)
- return;
-
- snd_soc_component_remove(component);
-
- soc_cleanup_component(component);
-}
-
static int soc_probe_component(struct snd_soc_card *card,
struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(component);
struct snd_soc_dai *dai;
+ int probed = 0;
int ret;
if (!strcmp(component->name, "snd-soc-dummy"))
@@ -1056,6 +1286,7 @@ static int soc_probe_component(struct snd_soc_card *card,
dapm->bias_level != SND_SOC_BIAS_OFF,
"codec %s can not start from non-off bias with idle_bias_off==1\n",
component->name);
+ probed = 1;
/* machine specific init */
if (component->init) {
@@ -1084,7 +1315,7 @@ static int soc_probe_component(struct snd_soc_card *card,
err_probe:
if (ret < 0)
- soc_cleanup_component(component);
+ soc_remove_component(component, probed);
return ret;
}
@@ -1126,7 +1357,6 @@ static int soc_probe_dai(struct snd_soc_dai *dai, int order)
return 0;
}
-static void soc_rtd_free(struct snd_soc_pcm_runtime *rtd); /* remove me */
static void soc_remove_link_dais(struct snd_soc_card *card)
{
int i;
@@ -1136,10 +1366,6 @@ static void soc_remove_link_dais(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
-
- /* finalize rtd device */
- soc_rtd_free(rtd);
-
/* remove the CODEC DAI */
for_each_rtd_codec_dai(rtd, i, codec_dai)
soc_remove_dai(codec_dai, order);
@@ -1187,13 +1413,11 @@ static void soc_remove_link_components(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component->driver->remove_order != order)
continue;
- soc_remove_component(component);
+ soc_remove_component(component, 1);
}
}
}
@@ -1208,9 +1432,7 @@ static int soc_probe_link_components(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component->driver->probe_order != order)
continue;
@@ -1224,119 +1446,6 @@ static int soc_probe_link_components(struct snd_soc_card *card)
return 0;
}
-static void soc_remove_dai_links(struct snd_soc_card *card)
-{
- struct snd_soc_dai_link *link, *_link;
-
- soc_remove_link_dais(card);
-
- soc_remove_link_components(card);
-
- for_each_card_links_safe(card, link, _link) {
- if (link->dobj.type == SND_SOC_DOBJ_DAI_LINK)
- dev_warn(card->dev, "Topology forgot to remove link %s?\n",
- link->name);
-
- list_del(&link->list);
- }
-}
-
-static int soc_init_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *link)
-{
- int i;
- struct snd_soc_dai_link_component *codec, *platform;
-
- for_each_link_codecs(link, i, codec) {
- /*
- * Codec must be specified by 1 of name or OF node,
- * not both or neither.
- */
- if (!!codec->name == !!codec->of_node) {
- dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /* Codec DAI name must be specified */
- if (!codec->dai_name) {
- dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * Defer card registration if codec component is not added to
- * component list.
- */
- if (!soc_find_component(codec))
- return -EPROBE_DEFER;
- }
-
- for_each_link_platforms(link, i, platform) {
- /*
- * Platform may be specified by either name or OF node, but it
- * can be left unspecified, then no components will be inserted
- * in the rtdcom list
- */
- if (!!platform->name == !!platform->of_node) {
- dev_err(card->dev,
- "ASoC: Neither/both platform name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * Defer card registration if platform component is not added to
- * component list.
- */
- if (!soc_find_component(platform))
- return -EPROBE_DEFER;
- }
-
- /* FIXME */
- if (link->num_cpus > 1) {
- dev_err(card->dev,
- "ASoC: multi cpu is not yet supported %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * CPU device may be specified by either name or OF node, but
- * can be left unspecified, and will be matched based on DAI
- * name alone..
- */
- if (link->cpus->name && link->cpus->of_node) {
- dev_err(card->dev,
- "ASoC: Neither/both cpu name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * Defer card registartion if cpu dai component is not added to
- * component list.
- */
- if ((link->cpus->of_node || link->cpus->name) &&
- !soc_find_component(link->cpus))
- return -EPROBE_DEFER;
-
- /*
- * At least one of CPU DAI name or CPU device name/node must be
- * specified
- */
- if (!link->cpus->dai_name &&
- !(link->cpus->name || link->cpus->of_node)) {
- dev_err(card->dev,
- "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
void snd_soc_disconnect_sync(struct device *dev)
{
struct snd_soc_component *component =
@@ -1349,117 +1458,6 @@ void snd_soc_disconnect_sync(struct device *dev)
}
EXPORT_SYMBOL_GPL(snd_soc_disconnect_sync);
-/**
- * snd_soc_add_dai_link - Add a DAI link dynamically
- * @card: The ASoC card to which the DAI link is added
- * @dai_link: The new DAI link to add
- *
- * This function adds a DAI link to the ASoC card's link list.
- *
- * Note: Topology can use this API to add DAI links when probing the
- * topology component. And machine drivers can still define static
- * DAI links in dai_link array.
- */
-int snd_soc_add_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
-{
- if (dai_link->dobj.type
- && dai_link->dobj.type != SND_SOC_DOBJ_DAI_LINK) {
- dev_err(card->dev, "Invalid dai link type %d\n",
- dai_link->dobj.type);
- return -EINVAL;
- }
-
- lockdep_assert_held(&client_mutex);
- /*
- * Notify the machine driver for extra initialization
- * on the link created by topology.
- */
- if (dai_link->dobj.type && card->add_dai_link)
- card->add_dai_link(card, dai_link);
-
- /* see for_each_card_links */
- list_add_tail(&dai_link->list, &card->dai_link_list);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_add_dai_link);
-
-/**
- * snd_soc_remove_dai_link - Remove a DAI link from the list
- * @card: The ASoC card that owns the link
- * @dai_link: The DAI link to remove
- *
- * This function removes a DAI link from the ASoC card's link list.
- *
- * For DAI links previously added by topology, topology should
- * remove them by using the dobj embedded in the link.
- */
-void snd_soc_remove_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
-{
- if (dai_link->dobj.type
- && dai_link->dobj.type != SND_SOC_DOBJ_DAI_LINK) {
- dev_err(card->dev, "Invalid dai link type %d\n",
- dai_link->dobj.type);
- return;
- }
-
- lockdep_assert_held(&client_mutex);
- /*
- * Notify the machine driver for extra destruction
- * on the link created by topology.
- */
- if (dai_link->dobj.type && card->remove_dai_link)
- card->remove_dai_link(card, dai_link);
-
- list_del(&dai_link->list);
-}
-EXPORT_SYMBOL_GPL(snd_soc_remove_dai_link);
-
-static void soc_rtd_free(struct snd_soc_pcm_runtime *rtd)
-{
- if (rtd->dev_registered) {
- /* we don't need to call kfree() for rtd->dev */
- device_unregister(rtd->dev);
- rtd->dev_registered = 0;
- }
-}
-
-static void soc_rtd_release(struct device *dev)
-{
- kfree(dev);
-}
-
-static int soc_rtd_init(struct snd_soc_pcm_runtime *rtd, const char *name)
-{
- int ret = 0;
-
- /* register the rtd device */
- rtd->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!rtd->dev)
- return -ENOMEM;
- rtd->dev->parent = rtd->card->dev;
- rtd->dev->release = soc_rtd_release;
- rtd->dev->groups = soc_dev_attr_groups;
- dev_set_name(rtd->dev, "%s", name);
- dev_set_drvdata(rtd->dev, rtd);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
- ret = device_register(rtd->dev);
- if (ret < 0) {
- /* calling put_device() here to free the rtd->dev */
- put_device(rtd->dev);
- dev_err(rtd->card->dev,
- "ASoC: failed to register runtime device: %d\n", ret);
- return ret;
- }
- rtd->dev_registered = 1;
- return 0;
-}
-
static int soc_link_dai_pcm_new(struct snd_soc_dai **dais, int num_dais,
struct snd_soc_pcm_runtime *rtd)
{
@@ -1509,10 +1507,6 @@ static int soc_link_init(struct snd_soc_card *card,
return ret;
}
- ret = soc_rtd_init(rtd, dai_link->name);
- if (ret)
- return ret;
-
/* add DPCM sysfs entries */
soc_dpcm_debugfs_add(rtd);
@@ -1523,9 +1517,7 @@ static int soc_link_init(struct snd_soc_card *card,
* topology based drivers can use the DAI link id field to set PCM
* device number and then use rtd + a base offset of the BEs.
*/
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->use_dai_pcm_id)
continue;
@@ -1590,21 +1582,18 @@ static int soc_bind_aux_dev(struct snd_soc_card *card)
static int soc_probe_aux_devices(struct snd_soc_card *card)
{
- struct snd_soc_component *comp;
+ struct snd_soc_component *component;
int order;
int ret;
for_each_comp_order(order) {
- for_each_card_auxs(card, comp) {
- if (comp->driver->probe_order == order) {
- ret = soc_probe_component(card, comp);
- if (ret < 0) {
- dev_err(card->dev,
- "ASoC: failed to probe aux component %s %d\n",
- comp->name, ret);
- return ret;
- }
- }
+ for_each_card_auxs(card, component) {
+ if (component->driver->probe_order != order)
+ continue;
+
+ ret = soc_probe_component(card, component);
+ if (ret < 0)
+ return ret;
}
}
@@ -1619,7 +1608,7 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_auxs_safe(card, comp, _comp) {
if (comp->driver->remove_order == order)
- soc_remove_component(comp);
+ soc_remove_component(comp, 1);
}
}
}
@@ -1729,6 +1718,23 @@ static int is_dmi_valid(const char *field)
return 1;
}
+/*
+ * Append a string to card->dmi_longname with character cleanups.
+ */
+static void append_dmi_string(struct snd_soc_card *card, const char *str)
+{
+ char *dst = card->dmi_longname;
+ size_t dst_len = sizeof(card->dmi_longname);
+ size_t len;
+
+ len = strlen(dst);
+ snprintf(dst + len, dst_len - len, "-%s", str);
+
+ len++; /* skip the separator "-" */
+ if (len < dst_len)
+ cleanup_dmi_name(dst + len);
+}
+
/**
* snd_soc_set_dmi_name() - Register DMI names to card
* @card: The card to register DMI names
@@ -1763,61 +1769,37 @@ static int is_dmi_valid(const char *field)
int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
{
const char *vendor, *product, *product_version, *board;
- size_t longname_buf_size = sizeof(card->snd_card->longname);
- size_t len;
if (card->long_name)
return 0; /* long name already set by driver or from DMI */
- /* make up dmi long name as: vendor.product.version.board */
+ /* make up dmi long name as: vendor-product-version-board */
vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
if (!vendor || !is_dmi_valid(vendor)) {
dev_warn(card->dev, "ASoC: no DMI vendor name!\n");
return 0;
}
- snprintf(card->dmi_longname, sizeof(card->snd_card->longname),
- "%s", vendor);
+ snprintf(card->dmi_longname, sizeof(card->dmi_longname), "%s", vendor);
cleanup_dmi_name(card->dmi_longname);
product = dmi_get_system_info(DMI_PRODUCT_NAME);
if (product && is_dmi_valid(product)) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", product);
-
- len++; /* skip the separator "-" */
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
+ append_dmi_string(card, product);
/*
* some vendors like Lenovo may only put a self-explanatory
* name in the product version field
*/
product_version = dmi_get_system_info(DMI_PRODUCT_VERSION);
- if (product_version && is_dmi_valid(product_version)) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", product_version);
-
- len++;
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
- }
+ if (product_version && is_dmi_valid(product_version))
+ append_dmi_string(card, product_version);
}
board = dmi_get_system_info(DMI_BOARD_NAME);
if (board && is_dmi_valid(board)) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", board);
-
- len++;
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
+ if (!product || strcasecmp(board, product))
+ append_dmi_string(card, board);
} else if (!product) {
/* fall back to using legacy name */
dev_warn(card->dev, "ASoC: no DMI board/product name!\n");
@@ -1825,16 +1807,8 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
}
/* Add flavour to dmi long name */
- if (flavour) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", flavour);
-
- len++;
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
- }
+ if (flavour)
+ append_dmi_string(card, flavour);
/* set the card long name */
card->long_name = card->dmi_longname;
@@ -1853,7 +1827,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
for_each_component(component) {
- /* does this component override FEs ? */
+ /* does this component override BEs ? */
if (!component->driver->ignore_machine)
continue;
@@ -1874,7 +1848,7 @@ match:
continue;
}
- dev_info(card->dev, "info: override FE DAI link %s\n",
+ dev_info(card->dev, "info: override BE DAI link %s\n",
card->dai_link[i].name);
/* override platform component */
@@ -1918,17 +1892,58 @@ match:
}
}
-static void soc_cleanup_card_resources(struct snd_soc_card *card)
+#define soc_setup_card_name(name, name1, name2, norm) \
+ __soc_setup_card_name(name, sizeof(name), name1, name2, norm)
+static void __soc_setup_card_name(char *name, int len,
+ const char *name1, const char *name2,
+ int normalization)
{
- /* free the ALSA card at first; this syncs with pending operations */
- if (card->snd_card) {
- snd_card_free(card->snd_card);
- card->snd_card = NULL;
+ int i;
+
+ snprintf(name, len, "%s", name1 ? name1 : name2);
+
+ if (!normalization)
+ return;
+
+ /*
+ * Name normalization
+ *
+ * The driver name is somewhat special, as it's used as a key for
+ * searches in the user-space.
+ *
+ * ex)
+ * "abcd??efg" -> "abcd__efg"
+ */
+ for (i = 0; i < len; i++) {
+ switch (name[i]) {
+ case '_':
+ case '-':
+ case '\0':
+ break;
+ default:
+ if (!isalnum(name[i]))
+ name[i] = '_';
+ break;
+ }
}
+}
+
+static void soc_cleanup_card_resources(struct snd_soc_card *card,
+ int card_probed)
+{
+ struct snd_soc_dai_link *link, *_link;
+
+ if (card->snd_card)
+ snd_card_disconnect_sync(card->snd_card);
+
+ snd_soc_dapm_shutdown(card);
/* remove and free each DAI */
- soc_remove_dai_links(card);
- soc_remove_pcm_runtimes(card);
+ soc_remove_link_dais(card);
+ soc_remove_link_components(card);
+
+ for_each_card_links_safe(card, link, _link)
+ snd_soc_remove_dai_link(card, link);
/* remove auxiliary devices */
soc_remove_aux_devices(card);
@@ -1938,26 +1953,39 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card)
soc_cleanup_card_debugfs(card);
/* remove the card */
- if (card->remove)
+ if (card_probed && card->remove)
card->remove(card);
+
+ if (card->snd_card) {
+ snd_card_free(card->snd_card);
+ card->snd_card = NULL;
+ }
+}
+
+static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+{
+ if (card->instantiated) {
+ int card_probed = 1;
+
+ card->instantiated = false;
+ snd_soc_flush_all_delayed_work(card);
+
+ soc_cleanup_card_resources(card, card_probed);
+ if (!unregister)
+ list_add(&card->list, &unbind_card_list);
+ } else {
+ if (unregister)
+ list_del(&card->list);
+ }
}
-static int snd_soc_instantiate_card(struct snd_soc_card *card)
+static int snd_soc_bind_card(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai_link *dai_link;
- int ret, i;
+ int ret, i, card_probed = 0;
mutex_lock(&client_mutex);
- for_each_card_prelinks(card, i, dai_link) {
- ret = soc_init_dai_link(card, dai_link);
- if (ret) {
- dev_err(card->dev, "ASoC: failed to init link %s: %d\n",
- dai_link->name, ret);
- mutex_unlock(&client_mutex);
- return ret;
- }
- }
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
snd_soc_dapm_init(&card->dapm, card, NULL);
@@ -1965,19 +1993,13 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* check whether any platform is ignore machine FE and using topology */
soc_check_tplg_fes(card);
- /* bind DAIs */
- for_each_card_prelinks(card, i, dai_link) {
- ret = soc_bind_dai_link(card, dai_link);
- if (ret != 0)
- goto probe_end;
- }
-
/* bind aux_devs too */
ret = soc_bind_aux_dev(card);
if (ret < 0)
goto probe_end;
/* add predefined DAI links to the list */
+ card->num_rtd = 0;
for_each_card_prelinks(card, i, dai_link) {
ret = snd_soc_add_dai_link(card, dai_link);
if (ret < 0)
@@ -2013,6 +2035,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
ret = card->probe(card);
if (ret < 0)
goto probe_end;
+ card_probed = 1;
}
/* probe all components used by DAI links on this card */
@@ -2025,23 +2048,10 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* probe auxiliary components */
ret = soc_probe_aux_devices(card);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(card->dev,
+ "ASoC: failed to probe aux component %d\n", ret);
goto probe_end;
-
- /*
- * Find new DAI links added during probing components and bind them.
- * Components with topology may bring new DAIs and DAI links.
- */
- for_each_card_links(card, dai_link) {
- if (soc_is_dai_link_bound(card, dai_link))
- continue;
-
- ret = soc_init_dai_link(card, dai_link);
- if (ret)
- goto probe_end;
- ret = soc_bind_dai_link(card, dai_link);
- if (ret)
- goto probe_end;
}
/* probe all DAI links on this card */
@@ -2076,22 +2086,23 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* try to set some sane longname if DMI is available */
snd_soc_set_dmi_name(card, NULL);
- snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
- "%s", card->name);
- snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
- "%s", card->long_name ? card->long_name : card->name);
- snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
- "%s", card->driver_name ? card->driver_name : card->name);
- for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
- switch (card->snd_card->driver[i]) {
- case '_':
- case '-':
- case '\0':
- break;
- default:
- if (!isalnum(card->snd_card->driver[i]))
- card->snd_card->driver[i] = '_';
- break;
+ soc_setup_card_name(card->snd_card->shortname,
+ card->name, NULL, 0);
+ soc_setup_card_name(card->snd_card->longname,
+ card->long_name, card->name, 0);
+ soc_setup_card_name(card->snd_card->driver,
+ card->driver_name, card->name, 1);
+
+ if (card->components) {
+ /* the current implementation of snd_component_add() accepts */
+ /* multiple components in the string separated by space, */
+ /* but the string collision (identical string) check might */
+ /* not work correctly */
+ ret = snd_component_add(card->snd_card, card->components);
+ if (ret < 0) {
+ dev_err(card->dev, "ASoC: %s snd_component_add() failed: %d\n",
+ card->name, ret);
+ goto probe_end;
}
}
@@ -2103,6 +2114,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
goto probe_end;
}
}
+ card_probed = 1;
snd_soc_dapm_new_widgets(card);
@@ -2117,9 +2129,22 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
+ /* deactivate pins to sleep state */
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *dai;
+
+ for_each_rtd_codec_dai(rtd, i, dai) {
+ if (!dai->active)
+ pinctrl_pm_select_sleep_state(dai->dev);
+ }
+
+ if (!rtd->cpu_dai->active)
+ pinctrl_pm_select_sleep_state(rtd->cpu_dai->dev);
+ }
+
probe_end:
if (ret < 0)
- soc_cleanup_card_resources(card);
+ soc_cleanup_card_resources(card, card_probed);
mutex_unlock(&card->mutex);
mutex_unlock(&client_mutex);
@@ -2349,33 +2374,6 @@ int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
}
EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls);
-static int snd_soc_bind_card(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
- int ret;
-
- ret = snd_soc_instantiate_card(card);
- if (ret != 0)
- return ret;
-
- /* deactivate pins to sleep state */
- for_each_card_rtds(card, rtd) {
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
- int j;
-
- for_each_rtd_codec_dai(rtd, j, codec_dai) {
- if (!codec_dai->active)
- pinctrl_pm_select_sleep_state(codec_dai->dev);
- }
-
- if (!cpu_dai->active)
- pinctrl_pm_select_sleep_state(cpu_dai->dev);
- }
-
- return ret;
-}
-
/**
* snd_soc_register_card - Register a card with the ASoC core
*
@@ -2400,7 +2398,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->dapm_dirty);
INIT_LIST_HEAD(&card->dobj_list);
- card->num_rtd = 0;
card->instantiated = 0;
mutex_init(&card->mutex);
mutex_init(&card->dapm_mutex);
@@ -2411,25 +2408,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
}
EXPORT_SYMBOL_GPL(snd_soc_register_card);
-static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
-{
- if (card->instantiated) {
- card->instantiated = false;
- snd_soc_dapm_shutdown(card);
- snd_soc_flush_all_delayed_work(card);
-
- /* remove all components used by DAI links on this card */
- soc_remove_link_components(card);
-
- soc_cleanup_card_resources(card);
- if (!unregister)
- list_add(&card->list, &unbind_card_list);
- } else {
- if (unregister)
- list_del(&card->list);
- }
-}
-
/**
* snd_soc_unregister_card - Unregister a card with the ASoC core
*
@@ -2488,7 +2466,7 @@ static char *fmt_single_name(struct device *dev, int *id)
*id = 0;
}
- return kstrdup(name, GFP_KERNEL);
+ return devm_kstrdup(dev, name, GFP_KERNEL);
}
/*
@@ -2505,38 +2483,38 @@ static inline char *fmt_multiple_name(struct device *dev,
return NULL;
}
- return kstrdup(dai_drv->name, GFP_KERNEL);
+ return devm_kstrdup(dev, dai_drv->name, GFP_KERNEL);
}
-/**
- * snd_soc_unregister_dai - Unregister DAIs from the ASoC core
- *
- * @component: The component for which the DAIs should be unregistered
- */
-static void snd_soc_unregister_dais(struct snd_soc_component *component)
+void snd_soc_unregister_dai(struct snd_soc_dai *dai)
{
- struct snd_soc_dai *dai, *_dai;
-
- for_each_component_dais_safe(component, dai, _dai) {
- dev_dbg(component->dev, "ASoC: Unregistered DAI '%s'\n",
- dai->name);
- list_del(&dai->list);
- kfree(dai->name);
- kfree(dai);
- }
+ dev_dbg(dai->dev, "ASoC: Unregistered DAI '%s'\n", dai->name);
+ list_del(&dai->list);
}
+EXPORT_SYMBOL_GPL(snd_soc_unregister_dai);
-/* Create a DAI and add it to the component's DAI list */
-static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv,
- bool legacy_dai_naming)
+/**
+ * snd_soc_register_dai - Register a DAI dynamically & create its widgets
+ *
+ * @component: The component the DAIs are registered for
+ * @dai_drv: DAI driver to use for the DAI
+ *
+ * Topology can use this API to register DAIs when probing a component.
+ * These DAIs's widgets will be freed in the card cleanup and the DAIs
+ * will be freed in the component cleanup.
+ */
+struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming)
{
struct device *dev = component->dev;
struct snd_soc_dai *dai;
dev_dbg(dev, "ASoC: dynamically register DAI %s\n", dev_name(dev));
- dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL);
+ lockdep_assert_held(&client_mutex);
+
+ dai = devm_kzalloc(dev, sizeof(*dai), GFP_KERNEL);
if (dai == NULL)
return NULL;
@@ -2558,10 +2536,8 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
else
dai->id = component->num_dai;
}
- if (dai->name == NULL) {
- kfree(dai);
+ if (!dai->name)
return NULL;
- }
dai->component = component;
dai->dev = dev;
@@ -2578,6 +2554,19 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
}
/**
+ * snd_soc_unregister_dai - Unregister DAIs from the ASoC core
+ *
+ * @component: The component for which the DAIs should be unregistered
+ */
+static void snd_soc_unregister_dais(struct snd_soc_component *component)
+{
+ struct snd_soc_dai *dai, *_dai;
+
+ for_each_component_dais_safe(component, dai, _dai)
+ snd_soc_unregister_dai(dai);
+}
+
+/**
* snd_soc_register_dais - Register a DAI with the ASoC core
*
* @component: The component the DAIs are registered for
@@ -2588,16 +2577,12 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
size_t count)
{
- struct device *dev = component->dev;
struct snd_soc_dai *dai;
unsigned int i;
int ret;
- dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count);
-
for (i = 0; i < count; i++) {
-
- dai = soc_add_dai(component, dai_drv + i, count == 1 &&
+ dai = snd_soc_register_dai(component, dai_drv + i, count == 1 &&
!component->driver->non_legacy_dai_naming);
if (dai == NULL) {
ret = -ENOMEM;
@@ -2613,49 +2598,6 @@ err:
return ret;
}
-/**
- * snd_soc_register_dai - Register a DAI dynamically & create its widgets
- *
- * @component: The component the DAIs are registered for
- * @dai_drv: DAI driver to use for the DAI
- *
- * Topology can use this API to register DAIs when probing a component.
- * These DAIs's widgets will be freed in the card cleanup and the DAIs
- * will be freed in the component cleanup.
- */
-int snd_soc_register_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv)
-{
- struct snd_soc_dapm_context *dapm =
- snd_soc_component_get_dapm(component);
- struct snd_soc_dai *dai;
- int ret;
-
- if (dai_drv->dobj.type != SND_SOC_DOBJ_PCM) {
- dev_err(component->dev, "Invalid dai type %d\n",
- dai_drv->dobj.type);
- return -EINVAL;
- }
-
- lockdep_assert_held(&client_mutex);
- dai = soc_add_dai(component, dai_drv, false);
- if (!dai)
- return -ENOMEM;
-
- /*
- * Create the DAI widgets here. After adding DAIs, topology may
- * also add routes that need these widgets as source or sink.
- */
- ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
- if (ret != 0) {
- dev_err(component->dev,
- "Failed to create DAI widgets %d\n", ret);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_register_dai);
-
static int snd_soc_component_initialize(struct snd_soc_component *component,
const struct snd_soc_component_driver *driver, struct device *dev)
{
@@ -2726,40 +2668,6 @@ EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap);
#endif
-static void snd_soc_component_add(struct snd_soc_component *component)
-{
- mutex_lock(&client_mutex);
-
- if (!component->driver->write && !component->driver->read) {
- if (!component->regmap)
- component->regmap = dev_get_regmap(component->dev,
- NULL);
- if (component->regmap)
- snd_soc_component_setup_regmap(component);
- }
-
- /* see for_each_component */
- list_add(&component->list, &component_list);
-
- mutex_unlock(&client_mutex);
-}
-
-static void snd_soc_component_cleanup(struct snd_soc_component *component)
-{
- snd_soc_unregister_dais(component);
- kfree(component->name);
-}
-
-static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
-{
- struct snd_soc_card *card = component->card;
-
- if (card)
- snd_soc_unbind_card(card, false);
-
- list_del(&component->list);
-}
-
#define ENDIANNESS_MAP(name) \
(SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE)
static u64 endianness_format_map[] = {
@@ -2804,6 +2712,18 @@ static void snd_soc_try_rebind_card(void)
list_del(&card->list);
}
+static void snd_soc_del_component_unlocked(struct snd_soc_component *component)
+{
+ struct snd_soc_card *card = component->card;
+
+ snd_soc_unregister_dais(component);
+
+ if (card)
+ snd_soc_unbind_card(card, false);
+
+ list_del(&component->list);
+}
+
int snd_soc_add_component(struct device *dev,
struct snd_soc_component *component,
const struct snd_soc_component_driver *component_driver,
@@ -2813,6 +2733,8 @@ int snd_soc_add_component(struct device *dev,
int ret;
int i;
+ mutex_lock(&client_mutex);
+
ret = snd_soc_component_initialize(component, component_driver, dev);
if (ret)
goto err_free;
@@ -2830,14 +2752,26 @@ int snd_soc_add_component(struct device *dev,
goto err_cleanup;
}
- snd_soc_component_add(component);
- snd_soc_try_rebind_card();
+ if (!component->driver->write && !component->driver->read) {
+ if (!component->regmap)
+ component->regmap = dev_get_regmap(component->dev,
+ NULL);
+ if (component->regmap)
+ snd_soc_component_setup_regmap(component);
+ }
- return 0;
+ /* see for_each_component */
+ list_add(&component->list, &component_list);
err_cleanup:
- snd_soc_component_cleanup(component);
+ if (ret < 0)
+ snd_soc_del_component_unlocked(component);
err_free:
+ mutex_unlock(&client_mutex);
+
+ if (ret == 0)
+ snd_soc_try_rebind_card();
+
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_add_component);
@@ -2864,62 +2798,21 @@ EXPORT_SYMBOL_GPL(snd_soc_register_component);
*
* @dev: The device to unregister
*/
-static int __snd_soc_unregister_component(struct device *dev)
-{
- struct snd_soc_component *component;
- int found = 0;
-
- mutex_lock(&client_mutex);
- for_each_component(component) {
- if (dev != component->dev)
- continue;
-
- snd_soc_tplg_component_remove(component,
- SND_SOC_TPLG_INDEX_ALL);
- snd_soc_component_del_unlocked(component);
- found = 1;
- break;
- }
- mutex_unlock(&client_mutex);
-
- if (found)
- snd_soc_component_cleanup(component);
-
- return found;
-}
-
void snd_soc_unregister_component(struct device *dev)
{
- while (__snd_soc_unregister_component(dev))
- ;
-}
-EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
-
-struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
- const char *driver_name)
-{
struct snd_soc_component *component;
- struct snd_soc_component *ret;
- ret = NULL;
mutex_lock(&client_mutex);
- for_each_component(component) {
- if (dev != component->dev)
- continue;
-
- if (driver_name &&
- (driver_name != component->driver->name) &&
- (strcmp(component->driver->name, driver_name) != 0))
- continue;
+ while (1) {
+ component = snd_soc_lookup_component_nolocked(dev, NULL);
+ if (!component)
+ break;
- ret = component;
- break;
+ snd_soc_del_component_unlocked(component);
}
mutex_unlock(&client_mutex);
-
- return ret;
}
-EXPORT_SYMBOL_GPL(snd_soc_lookup_component);
+EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
/* Retrieve a card's name from device tree */
int snd_soc_of_parse_card_name(struct snd_soc_card *card,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 5552c66ca642..a428ff393ea2 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -75,12 +75,10 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
-static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
int (*prepare_slave_config)(struct snd_pcm_substream *substream,
@@ -109,21 +107,16 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
}
-static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substream)
+static int
+dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
struct dma_chan *chan = pcm->chan[substream->stream];
struct snd_dmaengine_dai_dma_data *dma_data;
- struct dma_slave_caps dma_caps;
struct snd_pcm_hardware hw;
- u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- snd_pcm_format_t i;
int ret;
if (pcm->config && pcm->config->pcm_hardware)
@@ -145,82 +138,53 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
hw.info |= SNDRV_PCM_INFO_BATCH;
- ret = dma_get_slave_caps(chan, &dma_caps);
- if (ret == 0) {
- if (dma_caps.cmd_pause && dma_caps.cmd_resume)
- hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
- if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
- hw.info |= SNDRV_PCM_INFO_BATCH;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- addr_widths = dma_caps.dst_addr_widths;
- else
- addr_widths = dma_caps.src_addr_widths;
- }
-
- /*
- * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
- * hw.formats set to 0, meaning no restrictions are in place.
- * In this case it's the responsibility of the DAI driver to
- * provide the supported format information.
- */
- if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
- /*
- * Prepare formats mask for valid/allowed sample types. If the
- * dma does not have support for the given physical word size,
- * it needs to be masked out so user space can not use the
- * format which produces corrupted audio.
- * In case the dma driver does not implement the slave_caps the
- * default assumption is that it supports 1, 2 and 4 bytes
- * widths.
- */
- for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) {
- int bits = snd_pcm_format_physical_width(i);
-
- /*
- * Enable only samples with DMA supported physical
- * widths
- */
- switch (bits) {
- case 8:
- case 16:
- case 24:
- case 32:
- case 64:
- if (addr_widths & (1 << (bits / 8)))
- hw.formats |= pcm_format_to_bits(i);
- break;
- default:
- /* Unsupported types */
- break;
- }
- }
+ ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
+ dma_data,
+ &hw,
+ chan);
+ if (ret)
+ return ret;
return snd_soc_set_runtime_hwparams(substream, &hw);
}
-static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
+static int dmaengine_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct dma_chan *chan = pcm->chan[substream->stream];
int ret;
- ret = dmaengine_pcm_set_runtime_hwparams(substream);
+ ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
if (ret)
return ret;
return snd_dmaengine_pcm_open(substream, chan);
}
+static int dmaengine_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_close(substream);
+}
+
+static int dmaengine_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int dmaengine_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ return snd_dmaengine_pcm_trigger(substream, cmd);
+}
+
static struct dma_chan *dmaengine_pcm_compat_request_channel(
+ struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct snd_dmaengine_dai_dma_data *dma_data;
dma_filter_fn fn = NULL;
@@ -258,10 +222,9 @@ static bool dmaengine_pcm_can_report_residue(struct device *dev,
return true;
}
-static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int dmaengine_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
const struct snd_dmaengine_pcm_config *config = pcm->config;
struct device *dev = component->dev;
@@ -288,8 +251,8 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
config->chan_names[i]);
if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
- pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
- substream);
+ pcm->chan[i] = dmaengine_pcm_compat_request_channel(
+ component, rtd, substream);
}
if (!pcm->chan[i]) {
@@ -318,11 +281,9 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
static snd_pcm_uframes_t dmaengine_pcm_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
@@ -331,13 +292,11 @@ static snd_pcm_uframes_t dmaengine_pcm_pointer(
return snd_dmaengine_pcm_pointer(substream);
}
-static int dmaengine_copy_user(struct snd_pcm_substream *substream,
+static int dmaengine_copy_user(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
void __user *buf, unsigned long bytes)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
int (*process)(struct snd_pcm_substream *substream,
@@ -365,39 +324,31 @@ static int dmaengine_copy_user(struct snd_pcm_substream *substream,
return 0;
}
-static const struct snd_pcm_ops dmaengine_pcm_ops = {
+static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ .name = SND_DMAENGINE_PCM_DRV_NAME,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
.open = dmaengine_pcm_open,
- .close = snd_dmaengine_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .close = dmaengine_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = dmaengine_pcm_hw_params,
- .hw_free = snd_pcm_lib_free_pages,
- .trigger = snd_dmaengine_pcm_trigger,
+ .hw_free = dmaengine_pcm_hw_free,
+ .trigger = dmaengine_pcm_trigger,
.pointer = dmaengine_pcm_pointer,
+ .pcm_construct = dmaengine_pcm_new,
};
-static const struct snd_pcm_ops dmaengine_pcm_process_ops = {
+static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+ .name = SND_DMAENGINE_PCM_DRV_NAME,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
.open = dmaengine_pcm_open,
- .close = snd_dmaengine_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .close = dmaengine_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = dmaengine_pcm_hw_params,
- .hw_free = snd_pcm_lib_free_pages,
- .trigger = snd_dmaengine_pcm_trigger,
+ .hw_free = dmaengine_pcm_hw_free,
+ .trigger = dmaengine_pcm_trigger,
.pointer = dmaengine_pcm_pointer,
.copy_user = dmaengine_copy_user,
-};
-
-static const struct snd_soc_component_driver dmaengine_pcm_component = {
- .name = SND_DMAENGINE_PCM_DRV_NAME,
- .probe_order = SND_SOC_COMP_ORDER_LATE,
- .ops = &dmaengine_pcm_ops,
- .pcm_new = dmaengine_pcm_new,
-};
-
-static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
- .name = SND_DMAENGINE_PCM_DRV_NAME,
- .probe_order = SND_SOC_COMP_ORDER_LATE,
- .ops = &dmaengine_pcm_process_ops,
- .pcm_new = dmaengine_pcm_new,
+ .pcm_construct = dmaengine_pcm_new,
};
static const char * const dmaengine_pcm_dma_channel_names[] = {
@@ -436,7 +387,7 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
name = dmaengine_pcm_dma_channel_names[i];
if (config && config->chan_names[i])
name = config->chan_names[i];
- chan = dma_request_slave_channel_reason(dev, name);
+ chan = dma_request_chan(dev, name);
if (IS_ERR(chan)) {
if (PTR_ERR(chan) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index a71d2340eb05..b5748dcd490f 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -82,10 +82,9 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
unsigned int sync = 0;
int enable;
- trace_snd_soc_jack_report(jack, mask, status);
-
if (!jack)
return;
+ trace_snd_soc_jack_report(jack, mask, status);
dapm = &jack->card->dapm;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index f4dc3d445aae..652657dc6809 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -592,23 +592,16 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
int snd_soc_limit_volume(struct snd_soc_card *card,
const char *name, int max)
{
- struct snd_card *snd_card = card->snd_card;
struct snd_kcontrol *kctl;
struct soc_mixer_control *mc;
- int found = 0;
int ret = -EINVAL;
/* Sanity check for name and max */
if (unlikely(!name || max <= 0))
return -EINVAL;
- list_for_each_entry(kctl, &snd_card->controls, list) {
- if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
- found = 1;
- break;
- }
- }
- if (found) {
+ kctl = snd_soc_card_get_kcontrol(card, name);
+ if (kctl) {
mc = (struct soc_mixer_control *)kctl->private_value;
if (max <= mc->max) {
mc->platform_max = max;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index b600d3eaaf5c..76b7ee637e86 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -118,11 +118,8 @@ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd)
if (!rtd->pmdown_time || rtd->dai_link->ignore_pmdown_time)
return true;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component)
ignore &= !component->driver->use_pmdown_time;
- }
return ignore;
}
@@ -435,8 +432,7 @@ static int soc_pcm_components_open(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
int ret = 0;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
+ for_each_rtd_components(rtd, rtdcom, component) {
*last = component;
ret = snd_soc_component_module_get_when_open(component);
@@ -467,9 +463,7 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
int ret = 0;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component == last)
break;
@@ -500,9 +494,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
for_each_rtd_codec_dai(rtd, i, codec_dai)
pinctrl_pm_select_default_state(codec_dai->dev);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
pm_runtime_get_sync(component->dev);
}
@@ -625,9 +617,7 @@ component_err:
out:
mutex_unlock(&rtd->card->pcm_mutex);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
pm_runtime_mark_last_busy(component->dev);
pm_runtime_put_autosuspend(component->dev);
}
@@ -740,9 +730,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
mutex_unlock(&rtd->card->pcm_mutex);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
pm_runtime_mark_last_busy(component->dev);
pm_runtime_put_autosuspend(component->dev);
}
@@ -782,9 +770,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
ret = snd_soc_component_prepare(component, substream);
if (ret < 0) {
dev_err(component->dev,
@@ -849,9 +835,7 @@ static int soc_pcm_components_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
int ret = 0;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component == last)
break;
@@ -877,6 +861,11 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
+
+ ret = soc_pcm_params_symmetry(substream, params);
+ if (ret)
+ goto out;
+
if (rtd->dai_link->ops->hw_params) {
ret = rtd->dai_link->ops->hw_params(substream, params);
if (ret < 0) {
@@ -945,9 +934,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
snd_soc_dapm_update_dai(substream, params, cpu_dai);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
ret = snd_soc_component_hw_params(component, substream, params);
if (ret < 0) {
dev_err(component->dev,
@@ -958,9 +945,6 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
component = NULL;
- ret = soc_pcm_params_symmetry(substream, params);
- if (ret)
- goto component_err;
out:
mutex_unlock(&rtd->card->pcm_mutex);
return ret;
@@ -1047,7 +1031,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
@@ -1056,16 +1040,42 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_soc_dai *codec_dai;
int i, ret;
+ if (rtd->dai_link->ops->trigger) {
+ ret = rtd->dai_link->ops->trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ for_each_rtd_components(rtd, rtdcom, component) {
+ ret = snd_soc_component_trigger(component, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
+ if (ret < 0)
+ return ret;
+
for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_trigger(codec_dai, substream, cmd);
if (ret < 0)
return ret;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
+ return 0;
+}
- ret = snd_soc_component_trigger(component, substream, cmd);
+static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
+ int i, ret;
+
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ ret = snd_soc_dai_trigger(codec_dai, substream, cmd);
if (ret < 0)
return ret;
}
@@ -1074,6 +1084,12 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (ret < 0)
return ret;
+ for_each_rtd_components(rtd, rtdcom, component) {
+ ret = snd_soc_component_trigger(component, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
if (rtd->dai_link->ops->trigger) {
ret = rtd->dai_link->ops->trigger(substream, cmd);
if (ret < 0)
@@ -1083,6 +1099,28 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
+static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = soc_pcm_trigger_start(substream, cmd);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = soc_pcm_trigger_stop(substream, cmd);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
int cmd)
{
@@ -1146,7 +1184,9 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
{
struct snd_soc_dpcm *dpcm;
unsigned long flags;
+#ifdef CONFIG_DEBUG_FS
char *name;
+#endif
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1385,6 +1425,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dapm_widget *widget;
struct snd_soc_dai *dai;
int prune = 0;
+ int do_prune;
/* Destroy any old FE <--> BE connections */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1398,13 +1439,16 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
continue;
/* is there a valid CODEC DAI widget for this BE */
+ do_prune = 1;
for_each_rtd_codec_dai(dpcm->be, i, dai) {
widget = dai_get_widget(dai, stream);
/* prune the BE if it's no longer in our active list */
if (widget && widget_in_list(list, widget))
- continue;
+ do_prune = 0;
}
+ if (!do_prune)
+ continue;
dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n",
stream ? "capture" : "playback",
@@ -2289,42 +2333,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
}
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
+static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
+ int cmd, bool fe_first)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int ret;
+
+ /* call trigger on the frontend before the backend. */
+ if (fe_first) {
+ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ return ret;
+ }
+
+ /* call trigger on the frontend after the backend. */
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+
+ return ret;
+}
+
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
- int stream = substream->stream, ret;
+ int stream = substream->stream;
+ int ret = 0;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
switch (trigger) {
case SND_SOC_DPCM_TRIGGER_PRE:
- /* call trigger on the frontend before the backend. */
-
- dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_POST:
- /* call trigger on the frontend after the backend. */
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
@@ -2333,10 +2416,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
fe->dai_link->name, cmd);
ret = soc_pcm_bespoke_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
- }
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
@@ -2345,6 +2424,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
goto out;
}
+ if (ret < 0) {
+ dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
+ cmd, ret);
+ goto out;
+ }
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -2809,21 +2894,13 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
return ret;
}
-static void soc_pcm_private_free(struct snd_pcm *pcm)
-{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
-
- /* need to sync the delayed work before releasing resources */
- flush_delayed_work(&rtd->delayed_work);
- snd_soc_pcm_component_free(pcm);
-}
-
/* create a new pcm */
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
struct snd_pcm *pcm;
char new_name[64];
int ret = 0, playback = 0, capture = 0;
@@ -2923,7 +3000,6 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
rtd->ops.hw_free = dpcm_fe_dai_hw_free;
rtd->ops.close = dpcm_fe_dai_close;
rtd->ops.pointer = soc_pcm_pointer;
- rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
} else {
rtd->ops.open = soc_pcm_open;
rtd->ops.hw_params = soc_pcm_hw_params;
@@ -2932,20 +3008,20 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
rtd->ops.hw_free = soc_pcm_hw_free;
rtd->ops.close = soc_pcm_close;
rtd->ops.pointer = soc_pcm_pointer;
- rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
}
- for_each_rtdcom(rtd, rtdcom) {
- const struct snd_pcm_ops *ops = rtdcom->component->driver->ops;
-
- if (!ops)
- continue;
+ for_each_rtd_components(rtd, rtdcom, component) {
+ const struct snd_soc_component_driver *drv = component->driver;
- if (ops->copy_user)
+ if (drv->ioctl)
+ rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
+ if (drv->sync_stop)
+ rtd->ops.sync_stop = snd_soc_pcm_component_sync_stop;
+ if (drv->copy_user)
rtd->ops.copy_user = snd_soc_pcm_component_copy_user;
- if (ops->page)
+ if (drv->page)
rtd->ops.page = snd_soc_pcm_component_page;
- if (ops->mmap)
+ if (drv->mmap)
rtd->ops.mmap = snd_soc_pcm_component_mmap;
}
@@ -2955,13 +3031,12 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
if (capture)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
- ret = snd_soc_pcm_component_new(pcm);
+ ret = snd_soc_pcm_component_new(rtd);
if (ret < 0) {
dev_err(rtd->dev, "ASoC: pcm constructor failed: %d\n", ret);
return ret;
}
- pcm->private_free = soc_pcm_private_free;
pcm->no_device_suspend = true;
out:
dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 0fd032914a31..81d2af000a5c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1800,6 +1800,9 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
struct snd_soc_dai_driver *dai_drv;
struct snd_soc_pcm_stream *stream;
struct snd_soc_tplg_stream_caps *caps;
+ struct snd_soc_dai *dai;
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(tplg->comp);
int ret;
dai_drv = kzalloc(sizeof(struct snd_soc_dai_driver), GFP_KERNEL);
@@ -1842,7 +1845,19 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
/* register the DAI to the component */
- return snd_soc_register_dai(tplg->comp, dai_drv);
+ dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
+ if (!dai)
+ return -ENOMEM;
+
+ /* Create the DAI widgets here */
+ ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
+ if (ret != 0) {
+ dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
+ snd_soc_unregister_dai(dai);
+ return ret;
+ }
+
+ return ret;
}
static void set_link_flags(struct snd_soc_dai_link *link,
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 54dcece52b0c..2fd4562f5e63 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -63,7 +63,8 @@ static const struct snd_pcm_hardware dummy_dma_hardware = {
.periods_max = 128,
};
-static int dummy_dma_open(struct snd_pcm_substream *substream)
+static int dummy_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -74,13 +75,9 @@ static int dummy_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops snd_dummy_dma_ops = {
- .open = dummy_dma_open,
- .ioctl = snd_pcm_lib_ioctl,
-};
-
static const struct snd_soc_component_driver dummy_platform = {
- .ops = &snd_dummy_dma_ops,
+ .open = dummy_dma_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
};
static const struct snd_soc_component_driver dummy_codec = {
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index bb8036ae567e..71a0fc075a63 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -14,8 +14,6 @@ config SND_SOC_SOF_PCI
depends on PCI
select SND_SOC_SOF
select SND_SOC_ACPI if ACPI
- select SND_SOC_SOF_OPTIONS
- select SND_SOC_SOF_INTEL_PCI if SND_SOC_SOF_INTEL_TOPLEVEL
help
This adds support for PCI enumeration. This option is
required to enable Intel Skylake+ devices
@@ -27,8 +25,6 @@ config SND_SOC_SOF_ACPI
depends on ACPI || COMPILE_TEST
select SND_SOC_SOF
select SND_SOC_ACPI if ACPI
- select SND_SOC_SOF_OPTIONS
- select SND_SOC_SOF_INTEL_ACPI if SND_SOC_SOF_INTEL_TOPLEVEL
select IOSF_MBI if X86 && PCI
help
This adds support for ACPI enumeration. This option is required
@@ -40,19 +36,23 @@ config SND_SOC_SOF_OF
tristate "SOF OF enumeration support"
depends on OF || COMPILE_TEST
select SND_SOC_SOF
- select SND_SOC_SOF_OPTIONS
help
This adds support for Device Tree enumeration. This option is
required to enable i.MX8 devices.
Say Y if you need this option. If unsure select "N".
-config SND_SOC_SOF_OPTIONS
- tristate
+config SND_SOC_SOF_DEVELOPER_SUPPORT
+ bool "SOF developer options support"
+ depends on EXPERT
help
- This option is not user-selectable but automagically handled by
- 'select' statements at a higher level
+ This option unlock SOF developer options for debug/performance/
+ code hardening.
+ Distributions should not select this option, only SOF development
+ teams should select it.
+ Say Y if you are involved in SOF development and need this option
+ If not, select N
-if SND_SOC_SOF_OPTIONS
+if SND_SOC_SOF_DEVELOPER_SUPPORT
config SND_SOC_SOF_NOCODEC
tristate
@@ -64,6 +64,11 @@ config SND_SOC_SOF_NOCODEC_SUPPORT
option if no known codec is detected. This is typically only
enabled for developers or devices where the sound card is
controlled externally
+ This option is mutually exclusive with the Intel HDaudio support,
+ selecting it may have negative impacts and prevent e.g. microphone
+ functionality from being enabled on Intel CoffeeLake and later
+ platforms.
+ Distributions should not select this option!
Say Y if you need this nocodec fallback option
If unsure select "N".
@@ -142,6 +147,14 @@ config SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE
Say Y if you want to enable caching the memory windows.
If unsure, select "N".
+config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
+ bool "SOF enable firmware trace"
+ help
+ The firmware trace can be enabled either at build-time with
+ this option, or dynamically by setting flags in the SOF core
+ module parameter (similar to dynamic debug)
+ If unsure, select "N".
+
config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
bool "SOF enable IPC flood test"
help
@@ -150,9 +163,17 @@ config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
Say Y if you want to enable IPC flood test.
If unsure, select "N".
+config SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT
+ bool "SOF retain DSP context on any FW exceptions"
+ help
+ This option keeps the DSP in D0 state so that firmware debug
+ information can be retained and dumped to userspace.
+ Say Y if you want to retain DSP context for FW exceptions.
+ If unsure, select "N".
+
endif ## SND_SOC_SOF_DEBUG
-endif ## SND_SOC_SOF_OPTIONS
+endif ## SND_SOC_SOF_DEVELOPER_SUPPORT
config SND_SOC_SOF
tristate
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
index 2b8711eda362..7baf7f1507c3 100644
--- a/sound/soc/sof/control.c
+++ b/sound/soc/sof/control.c
@@ -11,8 +11,39 @@
/* Mixer Controls */
#include <linux/pm_runtime.h>
+#include <linux/leds.h>
#include "sof-priv.h"
+static void update_mute_led(struct snd_sof_control *scontrol,
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ unsigned int temp = 0;
+ unsigned int mask;
+ int i;
+
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+
+ for (i = 0; i < scontrol->num_channels; i++) {
+ if (ucontrol->value.integer.value[i]) {
+ temp |= mask;
+ break;
+ }
+ }
+
+ if (temp == scontrol->led_ctl.led_value)
+ return;
+
+ scontrol->led_ctl.led_value = temp;
+
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+ if (!scontrol->led_ctl.direction)
+ ledtrig_audio_set(LED_AUDIO_MUTE, temp ? LED_OFF : LED_ON);
+ else
+ ledtrig_audio_set(LED_AUDIO_MICMUTE, temp ? LED_OFF : LED_ON);
+#endif
+}
+
static inline u32 mixer_to_ipc(unsigned int value, u32 *volume_map, int size)
{
if (value >= size)
@@ -118,6 +149,9 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
cdata->chanv[i].value = value;
}
+ if (scontrol->led_ctl.use_led)
+ update_mute_led(scontrol, kcontrol, ucontrol);
+
/* notify DSP of mixer updates */
if (pm_runtime_active(sdev->dev))
snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 81f28f7ff1a0..805918d3bcc0 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -16,6 +16,11 @@
#include "sof-priv.h"
#include "ops.h"
+/* see SOF_DBG_ flags */
+int sof_core_debug;
+module_param_named(sof_debug, sof_core_debug, int, 0444);
+MODULE_PARM_DESC(sof_debug, "SOF core debug options (0x0 all off)");
+
/* SOF defaults if not provided by the platform in ms */
#define TIMEOUT_DEFAULT_IPC_MS 500
#define TIMEOUT_DEFAULT_BOOT_MS 2000
@@ -127,6 +132,19 @@ struct snd_sof_dai *snd_sof_find_dai(struct snd_sof_dev *sdev,
return NULL;
}
+bool snd_sof_dsp_d0i3_on_suspend(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].suspend_ignored ||
+ spcm->stream[SNDRV_PCM_STREAM_CAPTURE].suspend_ignored)
+ return true;
+ }
+
+ return false;
+}
+
/*
* FW Panic/fault handling.
*/
@@ -350,12 +368,20 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
goto fw_run_err;
}
- /* init DMA trace */
- ret = snd_sof_init_trace(sdev);
- if (ret < 0) {
- /* non fatal */
- dev_warn(sdev->dev,
- "warning: failed to initialize trace %d\n", ret);
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE) ||
+ (sof_core_debug & SOF_DBG_ENABLE_TRACE)) {
+ sdev->dtrace_is_supported = true;
+
+ /* init DMA trace */
+ ret = snd_sof_init_trace(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to initialize trace %d\n",
+ ret);
+ }
+ } else {
+ dev_dbg(sdev->dev, "SOF firmware trace disabled\n");
}
/* hereafter all FW boot flows are for PM reasons */
@@ -445,6 +471,9 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
/* initialize sof device */
sdev->dev = dev;
+ /* initialize default D0 sub-state */
+ sdev->d0_substate = SOF_DSP_D0I0;
+
sdev->pdata = plat_data;
sdev->first_boot = true;
dev_set_drvdata(dev, sdev);
@@ -453,7 +482,8 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
if (!sof_ops(sdev) || !sof_ops(sdev)->probe || !sof_ops(sdev)->run ||
!sof_ops(sdev)->block_read || !sof_ops(sdev)->block_write ||
!sof_ops(sdev)->send_msg || !sof_ops(sdev)->load_firmware ||
- !sof_ops(sdev)->ipc_msg_data || !sof_ops(sdev)->ipc_pcm_params)
+ !sof_ops(sdev)->ipc_msg_data || !sof_ops(sdev)->ipc_pcm_params ||
+ !sof_ops(sdev)->fw_ready)
return -EINVAL;
INIT_LIST_HEAD(&sdev->pcm_list);
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index 5529e8eeca46..d2b3b99d3a20 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -463,3 +463,19 @@ void snd_sof_free_debug(struct snd_sof_dev *sdev)
debugfs_remove_recursive(sdev->debugfs_root);
}
EXPORT_SYMBOL_GPL(snd_sof_free_debug);
+
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
+{
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
+ (sof_core_debug & SOF_DBG_RETAIN_CTX)) {
+ /* should we prevent DSP entering D3 ? */
+ dev_info(sdev->dev, "info: preventing DSP entering D3 state to preserve context\n");
+ pm_runtime_get_noresume(sdev->dev);
+ }
+
+ /* dump vital information to the logs */
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ snd_sof_ipc_dump(sdev);
+ snd_sof_trace_notify_for_error(sdev);
+}
+EXPORT_SYMBOL(snd_sof_handle_fw_exception);
diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
index 5acae75f5750..bae4f7bf5f75 100644
--- a/sound/soc/sof/imx/Kconfig
+++ b/sound/soc/sof/imx/Kconfig
@@ -5,19 +5,23 @@ config SND_SOC_SOF_IMX_TOPLEVEL
depends on ARM64|| COMPILE_TEST
depends on SND_SOC_SOF_OF
help
- This adds support for Sound Open Firmware for NXP i.MX platforms.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for NXP i.MX platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
if SND_SOC_SOF_IMX_TOPLEVEL
-config SND_SOC_SOF_IMX8
- tristate "SOF support for i.MX8"
+config SND_SOC_SOF_IMX8_SUPPORT
+ bool "SOF support for i.MX8"
depends on IMX_SCU
depends on IMX_DSP
help
- This adds support for Sound Open Firmware for NXP i.MX8 platforms
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for NXP i.MX8 platforms
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8
+ def_tristate SND_SOC_SOF_OF
+ depends on SND_SOC_SOF_IMX8_SUPPORT
endif ## SND_SOC_SOF_IMX_IMX_TOPLEVEL
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 2a22b18e5ec0..cfefcfd92798 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -388,6 +388,13 @@ struct snd_sof_dsp_ops sof_imx8_ops = {
/* DAI drivers */
.drv = imx8_dai,
.num_drv = 1, /* we have only 1 ESAI interface on i.MX8 */
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
};
EXPORT_SYMBOL(sof_imx8_ops);
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index d62f51d33be1..cc09bb606f7d 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -10,7 +10,7 @@ config SND_SOC_SOF_INTEL_TOPLEVEL
if SND_SOC_SOF_INTEL_TOPLEVEL
config SND_SOC_SOF_INTEL_ACPI
- tristate
+ def_tristate SND_SOC_SOF_ACPI
select SND_SOC_SOF_BAYTRAIL if SND_SOC_SOF_BAYTRAIL_SUPPORT
select SND_SOC_SOF_BROADWELL if SND_SOC_SOF_BROADWELL_SUPPORT
help
@@ -18,7 +18,7 @@ config SND_SOC_SOF_INTEL_ACPI
'select' statements at a higher level
config SND_SOC_SOF_INTEL_PCI
- tristate
+ def_tristate SND_SOC_SOF_PCI
select SND_SOC_SOF_MERRIFIELD if SND_SOC_SOF_MERRIFIELD_SUPPORT
select SND_SOC_SOF_APOLLOLAKE if SND_SOC_SOF_APOLLOLAKE_SUPPORT
select SND_SOC_SOF_GEMINILAKE if SND_SOC_SOF_GEMINILAKE_SUPPORT
@@ -29,6 +29,7 @@ config SND_SOC_SOF_INTEL_PCI
select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT
select SND_SOC_SOF_TIGERLAKE if SND_SOC_SOF_TIGERLAKE_SUPPORT
select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
+ select SND_SOC_SOF_JASPERLAKE if SND_SOC_SOF_JASPERLAKE_SUPPORT
help
This option is not user-selectable but automagically handled by
'select' statements at a higher level
@@ -36,7 +37,7 @@ config SND_SOC_SOF_INTEL_PCI
config SND_SOC_SOF_INTEL_HIFI_EP_IPC
tristate
help
- This option is not user-selectable but automagically handled by
+ This option is not user-selectable but automagically handled by
'select' statements at a higher level
config SND_SOC_SOF_INTEL_ATOM_HIFI_EP
@@ -61,10 +62,18 @@ if SND_SOC_SOF_INTEL_ACPI
config SND_SOC_SOF_BAYTRAIL_SUPPORT
bool "SOF support for Baytrail, Braswell and Cherrytrail"
+ depends on SND_SST_ATOM_HIFI2_PLATFORM_ACPI=n
help
This adds support for Sound Open Firmware for Intel(R) platforms
using the Baytrail, Braswell or Cherrytrail processors.
- Say Y if you have such a device.
+ This option is mutually exclusive with the Atom/SST and Baytrail
+ legacy drivers. If you want to enable SOF on Baytrail/Cherrytrail,
+ you need to deselect those options first.
+ SOF does not support Baytrail-CR for now, so this option is not
+ recommended for distros. At some point all legacy drivers will be
+ deprecated but not before all userspace firmware/topology/UCM files
+ are made available to downstream distros.
+ Say Y if you want to enable SOF on Baytrail/Cherrytrail
If unsure select "N".
config SND_SOC_SOF_BAYTRAIL
@@ -76,10 +85,18 @@ config SND_SOC_SOF_BAYTRAIL
config SND_SOC_SOF_BROADWELL_SUPPORT
bool "SOF support for Broadwell"
+ depends on SND_SOC_INTEL_HASWELL=n
help
This adds support for Sound Open Firmware for Intel(R) platforms
using the Broadwell processors.
- Say Y if you have such a device.
+ This option is mutually exclusive with the Haswell/Broadwell legacy
+ driver. If you want to enable SOF on Broadwell you need to deselect
+ the legacy driver first.
+ SOF does fully support Broadwell yet, so this option is not
+ recommended for distros. At some point all legacy drivers will be
+ deprecated but not before all userspace firmware/topology/UCM files
+ are made available to downstream distros.
+ Say Y if you want to enable SOF on Broadwell
If unsure select "N".
config SND_SOC_SOF_BROADWELL
@@ -217,31 +234,46 @@ config SND_SOC_SOF_COMETLAKE_H_SUPPORT
config SND_SOC_SOF_TIGERLAKE_SUPPORT
bool "SOF support for Tigerlake"
help
- This adds support for Sound Open Firmware for Intel(R) platforms
- using the Tigerlake processors.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tigerlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
config SND_SOC_SOF_TIGERLAKE
tristate
select SND_SOC_SOF_HDA_COMMON
help
- This option is not user-selectable but automagically handled by
+ This option is not user-selectable but automagically handled by
'select' statements at a higher level
config SND_SOC_SOF_ELKHARTLAKE_SUPPORT
bool "SOF support for ElkhartLake"
help
- This adds support for Sound Open Firmware for Intel(R) platforms
- using the ElkhartLake processors.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the ElkhartLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
config SND_SOC_SOF_ELKHARTLAKE
tristate
select SND_SOC_SOF_HDA_COMMON
help
- This option is not user-selectable but automagically handled by
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_JASPERLAKE_SUPPORT
+ bool "SOF support for JasperLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the JasperLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_JASPERLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
'select' statements at a higher level
config SND_SOC_SOF_HDA_COMMON
@@ -283,6 +315,16 @@ config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
Say Y if you want to enable DMI Link L1
If unsure, select "N".
+config SND_SOC_SOF_HDA_COMMON_HDMI_CODEC
+ bool "SOF common HDA HDMI codec driver"
+ depends on SND_SOC_SOF_HDA_LINK
+ depends on SND_HDA_CODEC_HDMI
+ help
+ This adds support for HDMI audio by using the common HDA
+ HDMI/DisplayPort codec driver.
+ Say Y if you want to use the common codec driver with SOF.
+ If unsure select "Y".
+
endif ## SND_SOC_SOF_HDA_COMMON
config SND_SOC_SOF_HDA_LINK_BASELINE
@@ -296,7 +338,7 @@ config SND_SOC_SOF_HDA
tristate
select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK
select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC
- select SND_INTEL_NHLT if ACPI
+ select SND_INTEL_DSP_CONFIG
help
This option is not user-selectable but automagically handled by
'select' statements at a higher level
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
index 8dc7a5558da4..7daa8eb456c8 100644
--- a/sound/soc/sof/intel/apl.c
+++ b/sound/soc/sof/intel/apl.c
@@ -97,6 +97,14 @@ const struct snd_sof_dsp_ops sof_apl_ops = {
.runtime_resume = hda_dsp_runtime_resume,
.runtime_idle = hda_dsp_runtime_idle,
.set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
};
EXPORT_SYMBOL(sof_apl_ops);
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index 80e2826fb447..141dad554764 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -247,7 +247,7 @@ static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[BDW_STACK_DUMP_SIZE];
- u32 status, panic;
+ u32 status, panic, imrx, imrd;
/* now try generic SOF status messages */
status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
@@ -256,6 +256,26 @@ static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
BDW_STACK_DUMP_SIZE);
snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
BDW_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
}
/*
@@ -571,7 +591,14 @@ const struct snd_sof_dsp_ops sof_bdw_ops = {
/* DAI drivers */
.drv = bdw_dai,
- .num_drv = ARRAY_SIZE(bdw_dai)
+ .num_drv = ARRAY_SIZE(bdw_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_bdw_ops);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index a1e514f71739..2abf80b3eb52 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -145,7 +145,7 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[BYT_STACK_DUMP_SIZE];
- u32 status, panic;
+ u32 status, panic, imrd, imrx;
/* now try generic SOF status messages */
status = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCD);
@@ -154,6 +154,27 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
BYT_STACK_DUMP_SIZE);
snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
BYT_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+
}
/*
@@ -511,6 +532,13 @@ const struct snd_sof_dsp_ops sof_tng_ops = {
/* DAI drivers */
.drv = byt_dai,
.num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_tng_ops);
@@ -672,6 +700,13 @@ const struct snd_sof_dsp_ops sof_byt_ops = {
/* DAI drivers */
.drv = byt_dai,
.num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_byt_ops);
@@ -732,6 +767,13 @@ const struct snd_sof_dsp_ops sof_cht_ops = {
.drv = byt_dai,
/* all 6 SSPs may be available for cherrytrail */
.num_drv = ARRAY_SIZE(byt_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_cht_ops);
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index 4ddd73762d81..0e1e265f3f3b 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -17,6 +17,7 @@
#include "../ops.h"
#include "hda.h"
+#include "hda-ipc.h"
static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
@@ -150,14 +151,45 @@ static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
CNL_DSP_REG_HIPCCTL_DONE);
}
+static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
+ u32 *dr, u32 *dd)
+{
+ struct sof_ipc_pm_gate *pm_gate;
+
+ if (msg->header == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ pm_gate = msg->msg_data;
+
+ /* send the compact message via the primary register */
+ *dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
+
+ /* send payload via the extended data register */
+ *dd = pm_gate->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
static int cnl_ipc_send_msg(struct snd_sof_dev *sdev,
struct snd_sof_ipc_msg *msg)
{
- /* send the message */
- sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
- msg->msg_size);
- snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
- CNL_DSP_REG_HIPCIDR_BUSY);
+ u32 dr = 0;
+ u32 dd = 0;
+
+ if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
+ /* send the message via IPC registers */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
+ dd);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY | dr);
+ } else {
+ /* send the message via mailbox */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY);
+ }
return 0;
}
@@ -255,6 +287,14 @@ const struct snd_sof_dsp_ops sof_cnl_ops = {
.runtime_resume = hda_dsp_runtime_resume,
.runtime_idle = hda_dsp_runtime_idle,
.set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
};
EXPORT_SYMBOL(sof_cnl_ops);
@@ -327,3 +367,20 @@ const struct sof_intel_dsp_desc ehl_chip_info = {
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
};
EXPORT_SYMBOL(ehl_chip_info);
+
+const struct sof_intel_dsp_desc jsl_chip_info = {
+ /* Jasperlake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .cores_mask = HDA_DSP_CORE_MASK(0) |
+ HDA_DSP_CORE_MASK(1),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL(jsl_chip_info);
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 3ca6795a89ba..827f84a0722e 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -84,6 +84,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
{
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
struct hdac_hda_priv *hda_priv;
+ struct snd_soc_acpi_mach_params *mach_params = NULL;
+ struct snd_sof_pdata *pdata = sdev->pdata;
#endif
struct hda_bus *hbus = sof_to_hbus(sdev);
struct hdac_device *hdev;
@@ -113,8 +115,19 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
if (ret < 0)
return ret;
- /* use legacy bus only for HDA codecs, idisp uses ext bus */
- if ((resp & 0xFFFF0000) != IDISP_VID_INTEL) {
+ if (pdata->machine)
+ mach_params = (struct snd_soc_acpi_mach_params *)
+ &pdata->machine->mach_params;
+
+ if ((resp & 0xFFFF0000) == IDISP_VID_INTEL)
+ hda_priv->need_display_power = true;
+
+ /*
+ * if common HDMI codec driver is not used, codec load
+ * is skipped here and hdac_hdmi is used instead
+ */
+ if ((mach_params && mach_params->common_hdmi_codec_drv) ||
+ (resp & 0xFFFF0000) != IDISP_VID_INTEL) {
hdev->type = HDA_DEV_LEGACY;
hda_codec_load_module(&hda_priv->codec);
}
@@ -155,7 +168,8 @@ int hda_codec_probe_bus(struct snd_sof_dev *sdev)
}
EXPORT_SYMBOL(hda_codec_probe_bus);
-#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
void hda_codec_i915_get(struct snd_sof_dev *sdev)
{
@@ -204,6 +218,6 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
}
EXPORT_SYMBOL(hda_codec_i915_exit);
-#endif /* CONFIG_SND_SOC_HDAC_HDMI */
+#endif
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index fb55a3c5afd0..4a4d318f97ff 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -19,6 +19,7 @@
#include <sound/hda_register.h>
#include "../ops.h"
#include "hda.h"
+#include "hda-ipc.h"
/*
* DSP Core control.
@@ -42,6 +43,12 @@ int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
((adspcs & reset) == reset),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
/* has core entered reset ? */
adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
@@ -77,6 +84,13 @@ int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
/* has core left reset ? */
adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
HDA_DSP_REG_ADSPCS);
@@ -151,8 +165,12 @@ int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
(adspcs & cpa) == cpa,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_RESET_TIMEOUT_US);
- if (ret < 0)
- dev_err(sdev->dev, "error: timeout on core powerup\n");
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
/* did core power up ? */
adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
@@ -171,17 +189,24 @@ int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
{
u32 adspcs;
+ int ret;
/* update bits */
snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
HDA_DSP_REG_ADSPCS,
HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
- return snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
HDA_DSP_REG_ADSPCS, adspcs,
!(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+
+ return ret;
}
bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
@@ -282,6 +307,80 @@ void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
}
+static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
+
+ while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
+ if (!retry--)
+ return -ETIMEDOUT;
+ usleep_range(10, 15);
+ }
+
+ return 0;
+}
+
+static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_pm_gate pm_gate;
+ struct sof_ipc_reply reply;
+
+ memset(&pm_gate, 0, sizeof(pm_gate));
+
+ /* configure pm_gate ipc message */
+ pm_gate.hdr.size = sizeof(pm_gate);
+ pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
+ pm_gate.flags = flags;
+
+ /* send pm_gate ipc to dsp */
+ return sof_ipc_tx_message(sdev->ipc, pm_gate.hdr.cmd, &pm_gate,
+ sizeof(pm_gate), &reply, sizeof(reply));
+}
+
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 flags;
+ int ret;
+ u8 value;
+
+ /* Write to D0I3C after Command-In-Progress bit is cleared */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
+ return ret;
+ }
+
+ /* Update D0I3C register */
+ value = d0_substate == SOF_DSP_D0I3 ? SOF_HDA_VS_D0I3C_I3 : 0;
+ snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
+
+ /* Wait for cmd in progress to be cleared before exiting the function */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
+ return ret;
+ }
+
+ dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
+ snd_hdac_chip_readb(bus, VS_D0I3C));
+
+ if (d0_substate == SOF_DSP_D0I0)
+ flags = HDA_PM_PPG;/* prevent power gating in D0 */
+ else
+ flags = HDA_PM_NO_DMA_TRACE;/* disable DMA trace in D0I3*/
+
+ /* sending pm_gate IPC */
+ ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: PM_GATE ipc error %d\n", ret);
+
+ return ret;
+}
+
static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
@@ -379,6 +478,22 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
int hda_dsp_resume(struct snd_sof_dev *sdev)
{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+
+ if (sdev->s0_suspend) {
+ /* restore L1SEN bit */
+ if (hda->l1_support_changed)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ /* restore and disable the system wakeup */
+ pci_restore_state(pci);
+ disable_irq_wake(pci->irq);
+ return 0;
+ }
+
/* init hda controller. DSP cores will be powered up during fw boot */
return hda_resume(sdev, false);
}
@@ -410,9 +525,25 @@ int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
int hda_dsp_suspend(struct snd_sof_dev *sdev)
{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
int ret;
+ if (sdev->s0_suspend) {
+ /* enable L1SEN to make sure the system can enter S0Ix */
+ hda->l1_support_changed =
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+ /* enable the system waking up via IPC IRQ */
+ enable_irq_wake(pci->irq);
+ pci_save_state(pci);
+ return 0;
+ }
+
/* stop hda controller and power dsp off */
ret = hda_suspend(sdev, false);
if (ret < 0) {
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index 6aae6f18b3dc..0fd2153c1769 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -83,10 +83,12 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
}
hdr = msg->msg_data;
- if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE)) {
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
+ hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
/*
* memory windows are powered off before sending IPC reply,
- * so we can't read the mailbox for CTX_SAVE reply.
+ * so we can't read the mailbox for CTX_SAVE and PM_GATE
+ * replies.
*/
reply.error = 0;
reply.hdr.cmd = SOF_IPC_GLB_REPLY;
diff --git a/sound/soc/sof/intel/hda-ipc.h b/sound/soc/sof/intel/hda-ipc.h
new file mode 100644
index 000000000000..aef0ceac9803
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_IPC_H
+#define __SOF_INTEL_HDA_IPC_H
+
+/*
+ * Primary register, mapped to
+ * - DIPCTDR (HIPCIDR) in sideband IPC (cAVS 1.8+)
+ * - DIPCT in cAVS 1.5 IPC
+ *
+ * Secondary register, mapped to:
+ * - DIPCTDD (HIPCIDD) in sideband IPC (cAVS 1.8+)
+ * - DIPCTE in cAVS 1.5 IPC
+ */
+
+/* Common bits in primary register */
+
+/* Reserved for doorbell */
+#define HDA_IPC_RSVD_31 BIT(31)
+/* Target, 0 - normal message, 1 - compact message(cAVS compatible) */
+#define HDA_IPC_MSG_COMPACT BIT(30)
+/* Direction, 0 - request, 1 - response */
+#define HDA_IPC_RSP BIT(29)
+
+#define HDA_IPC_TYPE_SHIFT 24
+#define HDA_IPC_TYPE_MASK GENMASK(28, 24)
+#define HDA_IPC_TYPE(x) ((x) << HDA_IPC_TYPE_SHIFT)
+
+#define HDA_IPC_PM_GATE HDA_IPC_TYPE(0x8U)
+
+/* Command specific payload bits in secondary register */
+
+/* Disable DMA tracing (0 - keep tracing, 1 - to disable DMA trace) */
+#define HDA_PM_NO_DMA_TRACE BIT(4)
+/* Prevent clock gating (0 - cg allowed, 1 - DSP clock always on) */
+#define HDA_PM_PCG BIT(3)
+/* Prevent power gating (0 - deep power state transitions allowed) */
+#define HDA_PM_PPG BIT(2)
+/* Indicates whether streaming is active */
+#define HDA_PM_PG_STREAMING BIT(1)
+#define HDA_PM_PG_RSVD BIT(0)
+
+#endif
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index 65c2af3fcaab..b1783360fe10 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -126,7 +126,8 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, const void *fwdata,
HDA_DSP_INIT_TIMEOUT_US);
if (ret < 0) {
- dev_err(sdev->dev, "error: waiting for HIPCIE done\n");
+ dev_err(sdev->dev, "error: %s: timeout for HIPCIE done\n",
+ __func__);
goto err;
}
@@ -152,6 +153,10 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, const void *fwdata,
if (!ret)
return 0;
+ dev_err(sdev->dev,
+ "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
+ __func__);
+
err:
hda_dsp_dump(sdev, SOF_DBG_REGS | SOF_DBG_PCI | SOF_DBG_MBOX);
hda_dsp_core_reset_power_down(sdev, chip->cores_mask);
@@ -253,10 +258,22 @@ static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_BASEFW_TIMEOUT_US);
+ /*
+ * even in case of errors we still need to stop the DMAs,
+ * but we return the initial error should the DMA stop also fail
+ */
+
+ if (status < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
+ __func__);
+ }
+
ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_STOP);
if (ret < 0) {
dev_err(sdev->dev, "error: DMA trigger stop failed\n");
- return ret;
+ if (!status)
+ status = ret;
}
return status;
@@ -341,13 +358,15 @@ cleanup:
/*
* Perform codeloader stream cleanup.
* This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
*/
ret1 = cl_cleanup(sdev, &sdev->dmab, stream);
if (ret1 < 0) {
dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
/* set return value to indicate cleanup failure */
- ret = ret1;
+ if (!ret)
+ ret = ret1;
}
/*
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 9b730f183529..575f5f5877d8 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -89,6 +89,7 @@ int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
struct hdac_ext_stream *stream = stream_to_hdac_ext_stream(hstream);
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct snd_dma_buffer *dmab;
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
int ret;
u32 size, rate, bits;
@@ -116,9 +117,17 @@ int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
/* disable SPIB, to enable buffer wrap for stream */
hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
- /* set host_period_bytes to 0 if no IPC position */
- if (hda && hda->no_ipc_position)
- ipc_params->host_period_bytes = 0;
+ /* update no_stream_position flag for ipc params */
+ if (hda && hda->no_ipc_position) {
+ /* For older ABIs set host_period_bytes to zero to inform
+ * FW we don't want position updates. Newer versions use
+ * no_stream_position for this purpose.
+ */
+ if (v->abi_version < SOF_ABI_VER(3, 10, 0))
+ ipc_params->host_period_bytes = 0;
+ else
+ ipc_params->no_stream_position = 1;
+ }
ipc_params->stream_tag = hstream->stream_tag;
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index 0c11fceb28a7..29ab43281670 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -275,8 +275,12 @@ int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: cmd %d: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd);
return ret;
+ }
hstream->running = true;
break;
@@ -294,8 +298,12 @@ int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: cmd %d: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd);
return ret;
+ }
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset +
SOF_HDA_ADSP_REG_CL_SD_STS,
@@ -356,8 +364,12 @@ int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__);
return ret;
+ }
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
@@ -418,8 +430,12 @@ int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on STREAM_SD_OFFSET read2\n",
+ __func__);
return ret;
+ }
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 06e84679087b..91bd88fddac7 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -32,9 +32,6 @@
/* platform specific devices */
#include "shim.h"
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
-
#define EXCEPT_MAX_HDR_SIZE 0x400
/*
@@ -56,6 +53,11 @@ MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
static int hda_dmic_num = -1;
module_param_named(dmic_num, hda_dmic_num, int, 0444);
MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
+
+static bool hda_codec_use_common_hdmi =
+ IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_COMMON_HDMI_CODEC);
+module_param_named(use_common_hdmi, hda_codec_use_common_hdmi, bool, 0444);
+MODULE_PARM_DESC(use_common_hdmi, "SOF HDA use common HDMI codec driver");
#endif
static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
@@ -262,12 +264,9 @@ static int hda_init(struct snd_sof_dev *sdev)
/* HDA bus init */
sof_hda_bus_init(bus, &pci->dev);
- /* Workaround for a communication error on CFL (bko#199007) and CNL */
- if (IS_CFL(pci) || IS_CNL(pci))
- bus->polling_mode = 1;
-
bus->use_posbuf = 1;
bus->bdl_pos_adj = 0;
+ bus->sync_write = 1;
mutex_init(&hbus->prepare_mutex);
hbus->pci = pci;
@@ -416,9 +415,16 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
pdata->tplg_filename =
hda_mach->sof_tplg_filename;
- /* firmware: pick the first in machine list */
+ /*
+ * firmware: pick the first in machine list,
+ * or use nocodec firmware name if list is empty
+ */
mach = pdata->desc->machines;
- pdata->fw_filename = mach->sof_fw_filename;
+ if (mach->id[0])
+ pdata->fw_filename = mach->sof_fw_filename;
+ else
+ pdata->fw_filename =
+ pdata->desc->nocodec_fw_filename;
dev_info(bus->dev, "using HDA machine driver %s now\n",
hda_mach->drv_name);
@@ -465,6 +471,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
&pdata->machine->mach_params;
mach_params->codec_mask = bus->codec_mask;
mach_params->platform = dev_name(sdev->dev);
+ mach_params->common_hdmi_codec_drv = hda_codec_use_common_hdmi;
}
/* create codec instances */
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 23e430d3e056..18d7e72bf9b7 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -64,6 +64,13 @@
#define SOF_HDA_PPCTL_PIE BIT(31)
#define SOF_HDA_PPCTL_GPROCEN BIT(30)
+/*Vendor Specific Registers*/
+#define SOF_HDA_VS_D0I3C 0x104A
+
+/* D0I3C Register fields */
+#define SOF_HDA_VS_D0I3C_CIP BIT(0) /* Command-In-Progress */
+#define SOF_HDA_VS_D0I3C_I3 BIT(2) /* D0i3 enable bit */
+
/* DPIB entry size: 8 Bytes = 2 DWords */
#define SOF_HDA_DPIB_ENTRY_SIZE 0x8
@@ -207,6 +214,7 @@
#define HDA_DSP_CTRL_RESET_TIMEOUT 100
#define HDA_DSP_WAIT_TIMEOUT 500 /* 500 msec */
#define HDA_DSP_REG_POLL_INTERVAL_US 500 /* 0.5 msec */
+#define HDA_DSP_REG_POLL_RETRY_COUNT 50
#define HDA_DSP_ADSPIC_IPC 1
#define HDA_DSP_ADSPIS_IPC 1
@@ -304,6 +312,7 @@
#define CNL_DSP_REG_HIPCTDD (CNL_DSP_IPC_BASE + 0x08)
#define CNL_DSP_REG_HIPCIDR (CNL_DSP_IPC_BASE + 0x10)
#define CNL_DSP_REG_HIPCIDA (CNL_DSP_IPC_BASE + 0x14)
+#define CNL_DSP_REG_HIPCIDD (CNL_DSP_IPC_BASE + 0x18)
#define CNL_DSP_REG_HIPCCTL (CNL_DSP_IPC_BASE + 0x28)
/* HIPCI */
@@ -399,6 +408,9 @@ struct sof_intel_hda_dev {
int irq;
+ /* PM related */
+ bool l1_support_changed;/* during suspend, is L1SEN changed or not */
+
/* DMIC device */
struct platform_device *dmic_dev;
};
@@ -455,6 +467,9 @@ int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate);
+
int hda_dsp_suspend(struct snd_sof_dev *sdev);
int hda_dsp_resume(struct snd_sof_dev *sdev);
int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
@@ -565,7 +580,9 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev);
#endif /* CONFIG_SND_SOC_SOF_HDA */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && \
+ (IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
void hda_codec_i915_get(struct snd_sof_dev *sdev);
void hda_codec_i915_put(struct snd_sof_dev *sdev);
@@ -579,7 +596,7 @@ static inline void hda_codec_i915_put(struct snd_sof_dev *sdev) { }
static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
-#endif /* CONFIG_SND_SOC_SOF_HDA && CONFIG_SND_SOC_HDAC_HDMI */
+#endif
/*
* Trace Control.
@@ -596,7 +613,6 @@ extern struct snd_soc_dai_driver skl_dai[];
*/
extern const struct snd_sof_dsp_ops sof_apl_ops;
extern const struct snd_sof_dsp_ops sof_cnl_ops;
-extern const struct snd_sof_dsp_ops sof_skl_ops;
extern const struct sof_intel_dsp_desc apl_chip_info;
extern const struct sof_intel_dsp_desc cnl_chip_info;
@@ -604,5 +620,6 @@ extern const struct sof_intel_dsp_desc skl_chip_info;
extern const struct sof_intel_dsp_desc icl_chip_info;
extern const struct sof_intel_dsp_desc tgl_chip_info;
extern const struct sof_intel_dsp_desc ehl_chip_info;
+extern const struct sof_intel_dsp_desc jsl_chip_info;
#endif
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index 086eeeab8679..5994e1073364 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -210,9 +210,7 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg,
if (ret == 0) {
dev_err(sdev->dev, "error: ipc timed out for 0x%x size %d\n",
hdr->cmd, hdr->size);
- snd_sof_dsp_dbg_dump(ipc->sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
- snd_sof_ipc_dump(ipc->sdev);
- snd_sof_trace_notify_for_error(ipc->sdev);
+ snd_sof_handle_fw_exception(ipc->sdev);
ret = -ETIMEDOUT;
} else {
/* copy the data returned from DSP */
@@ -796,12 +794,6 @@ struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
struct snd_sof_ipc *ipc;
struct snd_sof_ipc_msg *msg;
- /* check if mandatory ops required for ipc are defined */
- if (!sof_ops(sdev)->fw_ready) {
- dev_err(sdev->dev, "error: ipc mandatory ops not defined\n");
- return NULL;
- }
-
ipc = devm_kzalloc(sdev->dev, sizeof(*ipc), GFP_KERNEL);
if (!ipc)
return NULL;
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index 824d36fe59fd..93512dcbaacd 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -193,6 +193,16 @@ static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
return 0;
}
+static inline int snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
+ enum sof_d0_substate substate)
+{
+ if (sof_ops(sdev)->set_power_state)
+ return sof_ops(sdev)->set_power_state(sdev, substate);
+
+ /* D0 substate is not supported */
+ return -ENOTSUPP;
+}
+
/* debug */
static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags)
{
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 2b876d497447..549238a98b2a 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -19,12 +19,11 @@
#define DRV_NAME "sof-audio-component"
/* Create DMA buffer page table for DSP */
-static int create_page_table(struct snd_pcm_substream *substream,
+static int create_page_table(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
unsigned char *dma_area, size_t size)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
@@ -95,13 +94,12 @@ void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream)
EXPORT_SYMBOL(snd_sof_pcm_period_elapsed);
/* this may get called several times by oss emulation */
-static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
+static int sof_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct sof_ipc_pcm_params pcm;
@@ -135,7 +133,7 @@ static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
* ret == 0 means the buffer is not changed
* so no need to regenerate the page table
*/
- ret = create_page_table(substream, runtime->dma_area,
+ ret = create_page_table(component, substream, runtime->dma_area,
runtime->dma_bytes);
if (ret < 0)
return ret;
@@ -237,11 +235,10 @@ static int sof_pcm_dsp_pcm_free(struct snd_pcm_substream *substream,
return ret;
}
-static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
+static int sof_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
int ret, err = 0;
@@ -276,11 +273,10 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
return err;
}
-static int sof_pcm_prepare(struct snd_pcm_substream *substream)
+static int sof_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
int ret;
@@ -300,7 +296,8 @@ static int sof_pcm_prepare(struct snd_pcm_substream *substream)
substream->stream);
/* set hw_params */
- ret = sof_pcm_hw_params(substream, &spcm->params[substream->stream]);
+ ret = sof_pcm_hw_params(component,
+ substream, &spcm->params[substream->stream]);
if (ret < 0) {
dev_err(sdev->dev, "error: set pcm hw_params after resume\n");
return ret;
@@ -313,11 +310,10 @@ static int sof_pcm_prepare(struct snd_pcm_substream *substream)
* FE dai link trigger actions are always executed in non-atomic context because
* they involve IPC's.
*/
-static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int sof_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct sof_ipc_stream stream;
@@ -350,8 +346,18 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
break;
case SNDRV_PCM_TRIGGER_RESUME:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * this case will be triggered when INFO_RESUME is
+ * supported, no need to resume streams that remained
+ * enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
+
/* set up hw_params */
- ret = sof_pcm_prepare(substream);
+ ret = sof_pcm_prepare(component, substream);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to set up hw_params upon resume\n");
@@ -360,9 +366,30 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
/* fallthrough */
case SNDRV_PCM_TRIGGER_START:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * This case will be triggered when INFO_RESUME is
+ * not supported, no need to re-start streams that
+ * remained enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (sdev->s0_suspend &&
+ spcm->stream[substream->stream].d0i3_compatible) {
+ /*
+ * trap the event, not sending trigger stop to
+ * prevent the FW pipelines from being stopped,
+ * and mark the flag to ignore the upcoming DAPM
+ * PM events.
+ */
+ spcm->stream[substream->stream].suspend_ignored = true;
+ return 0;
+ }
+ /* fallthrough */
case SNDRV_PCM_TRIGGER_STOP:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
ipc_first = true;
@@ -395,11 +422,10 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t sof_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sof_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
snd_pcm_uframes_t host, dai;
@@ -428,13 +454,13 @@ static snd_pcm_uframes_t sof_pcm_pointer(struct snd_pcm_substream *substream)
return host;
}
-static int sof_pcm_open(struct snd_pcm_substream *substream)
+static int sof_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
struct snd_sof_pcm *spcm;
struct snd_soc_tplg_stream_caps *caps;
int ret;
@@ -464,11 +490,8 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
le32_to_cpu(caps->period_size_min));
/* set runtime config */
- runtime->hw.info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
+ runtime->hw.info = ops->hw_info; /* platform-specific */
+
runtime->hw.formats = le64_to_cpu(caps->formats);
runtime->hw.period_bytes_min = le32_to_cpu(caps->period_size_min);
runtime->hw.period_bytes_max = le32_to_cpu(caps->period_size_max);
@@ -505,11 +528,10 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int sof_pcm_close(struct snd_pcm_substream *substream)
+static int sof_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
int err;
@@ -538,27 +560,14 @@ static int sof_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops sof_pcm_ops = {
- .open = sof_pcm_open,
- .close = sof_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = sof_pcm_hw_params,
- .prepare = sof_pcm_prepare,
- .hw_free = sof_pcm_hw_free,
- .trigger = sof_pcm_trigger,
- .pointer = sof_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
/*
* Pre-allocate playback/capture audio buffer pages.
* no need to explicitly release memory preallocated by sof_pcm_new in pcm_free
* snd_pcm_lib_preallocate_free_for_all() is called by the core.
*/
-static int sof_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sof_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct snd_pcm *pcm = rtd->pcm;
@@ -691,6 +700,14 @@ static int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
case SOF_DAI_INTEL_ALH:
/* do nothing for ALH dai_link */
break;
+ case SOF_DAI_IMX_ESAI:
+ channels->min = dai->dai_config->esai.tdm_slots;
+ channels->max = dai->dai_config->esai.tdm_slots;
+
+ dev_dbg(sdev->dev,
+ "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
default:
dev_err(sdev->dev, "error: invalid DAI type %d\n",
dai->dai_config->type);
@@ -752,11 +769,19 @@ void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
pd->name = "sof-audio-component";
pd->probe = sof_pcm_probe;
pd->remove = sof_pcm_remove;
- pd->ops = &sof_pcm_ops;
+ pd->open = sof_pcm_open;
+ pd->close = sof_pcm_close;
+ pd->ioctl = snd_soc_pcm_lib_ioctl;
+ pd->hw_params = sof_pcm_hw_params;
+ pd->prepare = sof_pcm_prepare;
+ pd->hw_free = sof_pcm_hw_free;
+ pd->trigger = sof_pcm_trigger;
+ pd->pointer = sof_pcm_pointer;
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
pd->compr_ops = &sof_compressed_ops;
#endif
- pd->pcm_new = sof_pcm_new;
+ pd->pcm_construct = sof_pcm_new;
pd->ignore_machine = drv_name;
pd->be_hw_params_fixup = sof_pcm_dai_link_fixup;
pd->be_pcm_base = SOF_BE_PCM_BASE;
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index e23beaeefe00..0fd5567237a8 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -197,7 +197,7 @@ static int sof_restore_pipelines(struct snd_sof_dev *sdev)
return ret;
}
-static int sof_send_pm_ipc(struct snd_sof_dev *sdev, int cmd)
+static int sof_send_pm_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
{
struct sof_ipc_pm_ctx pm_ctx;
struct sof_ipc_reply reply;
@@ -320,12 +320,15 @@ static int sof_resume(struct device *dev, bool runtime_resume)
}
/* notify DSP of system resume */
- ret = sof_send_pm_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
+ ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
if (ret < 0)
dev_err(sdev->dev,
"error: ctx_restore ipc error during resume %d\n",
ret);
+ /* initialize default D0 sub-state */
+ sdev->d0_substate = SOF_DSP_D0I0;
+
return ret;
}
@@ -358,7 +361,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
sof_cache_debugfs(sdev);
#endif
/* notify DSP of upcoming power down */
- ret = sof_send_pm_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+ ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
if (ret == -EBUSY || ret == -EAGAIN) {
/*
* runtime PM has logic to handle -EBUSY/-EAGAIN so
@@ -408,14 +411,135 @@ int snd_sof_runtime_resume(struct device *dev)
}
EXPORT_SYMBOL(snd_sof_runtime_resume);
+int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate)
+{
+ int ret;
+
+ if (sdev->d0_substate == d0_substate)
+ return 0;
+
+ /* do platform specific set_state */
+ ret = snd_sof_dsp_set_power_state(sdev, d0_substate);
+ if (ret < 0)
+ return ret;
+
+ /* update dsp D0 sub-state */
+ sdev->d0_substate = d0_substate;
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_set_d0_substate);
+
+/*
+ * Audio DSP states may transform as below:-
+ *
+ * D0I3 compatible stream
+ * Runtime +---------------------+ opened only, timeout
+ * suspend | +--------------------+
+ * +------------+ D0(active) | |
+ * | | <---------------+ |
+ * | +--------> | | |
+ * | |Runtime +--^--+---------^--+--+ The last | |
+ * | |resume | | | | opened D0I3 | |
+ * | | | | | | compatible | |
+ * | | resume| | | | stream closed | |
+ * | | from | | D3 | | | |
+ * | | D3 | |suspend | | d0i3 | |
+ * | | | | | |suspend | |
+ * | | | | | | | |
+ * | | | | | | | |
+ * +-v---+-----------+--v-------+ | | +------+----v----+
+ * | | | +-----------> |
+ * | D3 (suspended) | | | D0I3 +-----+
+ * | | +--------------+ | |
+ * | | resume from | | |
+ * +-------------------^--------+ d0i3 suspend +----------------+ |
+ * | |
+ * | D3 suspend |
+ * +------------------------------------------------+
+ *
+ * d0i3_suspend = s0_suspend && D0I3 stream opened,
+ * D3 suspend = !d0i3_suspend,
+ */
+
int snd_sof_resume(struct device *dev)
{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (snd_sof_dsp_d0i3_on_suspend(sdev)) {
+ /* resume from D0I3 */
+ dev_dbg(sdev->dev, "DSP will exit from D0i3...\n");
+ ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I0);
+ if (ret == -ENOTSUPP) {
+ /* fallback to resume from D3 */
+ dev_dbg(sdev->dev, "D0i3 not supported, fall back to resume from D3...\n");
+ goto d3_resume;
+ } else if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit from D0I3 %d\n",
+ ret);
+ return ret;
+ }
+
+ /* platform-specific resume from D0i3 */
+ return snd_sof_dsp_resume(sdev);
+ }
+
+d3_resume:
+ /* resume from D3 */
return sof_resume(dev, false);
}
EXPORT_SYMBOL(snd_sof_resume);
int snd_sof_suspend(struct device *dev)
{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (snd_sof_dsp_d0i3_on_suspend(sdev)) {
+ /* suspend to D0i3 */
+ dev_dbg(sdev->dev, "DSP is trying to enter D0i3...\n");
+ ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I3);
+ if (ret == -ENOTSUPP) {
+ /* fallback to D3 suspend */
+ dev_dbg(sdev->dev, "D0i3 not supported, fall back to D3...\n");
+ goto d3_suspend;
+ } else if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to enter D0I3, %d\n",
+ ret);
+ return ret;
+ }
+
+ /* platform-specific suspend to D0i3 */
+ return snd_sof_dsp_suspend(sdev);
+ }
+
+d3_suspend:
+ /* suspend to D3 */
return sof_suspend(dev, false);
}
EXPORT_SYMBOL(snd_sof_suspend);
+
+int snd_sof_prepare(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+#if defined(CONFIG_ACPI)
+ sdev->s0_suspend = acpi_target_system_state() == ACPI_STATE_S0;
+#else
+ /* will suspend to S3 by default */
+ sdev->s0_suspend = false;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_prepare);
+
+void snd_sof_complete(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ sdev->s0_suspend = false;
+}
+EXPORT_SYMBOL(snd_sof_complete);
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
index ea7b8b895412..df318f50dd0b 100644
--- a/sound/soc/sof/sof-acpi-dev.c
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -29,6 +29,12 @@ static char *tplg_path;
module_param(tplg_path, charp, 0444);
MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+static int sof_acpi_debug;
+module_param_named(sof_acpi_debug, sof_acpi_debug, int, 0444);
+MODULE_PARM_DESC(sof_acpi_debug, "SOF ACPI debug options (0x0 all off)");
+
+#define SOF_ACPI_DISABLE_PM_RUNTIME BIT(0)
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HASWELL)
static const struct sof_dev_desc sof_acpi_haswell_desc = {
.machines = snd_soc_acpi_intel_haswell_machines,
@@ -121,6 +127,9 @@ static const struct dev_pm_ops sof_acpi_pm = {
static void sof_acpi_probe_complete(struct device *dev)
{
+ if (sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME)
+ return;
+
/* allow runtime_pm */
pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
@@ -221,7 +230,8 @@ static int sof_acpi_probe(struct platform_device *pdev)
static int sof_acpi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ if (!(sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME))
+ pm_runtime_disable(&pdev->dev);
/* call sof helper for DSP hardware remove */
snd_sof_device_remove(&pdev->dev);
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index d66412a77873..bbeffd932de7 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <sound/intel-dsp-config.h>
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
#include <sound/sof.h>
@@ -29,6 +30,12 @@ static char *tplg_path;
module_param(tplg_path, charp, 0444);
MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+static int sof_pci_debug;
+module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
+MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
+
+#define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
static const struct sof_dev_desc bxt_desc = {
.machines = snd_soc_acpi_intel_bxt_machines,
@@ -113,7 +120,7 @@ static const struct sof_dev_desc cnl_desc = {
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
static const struct sof_dev_desc cfl_desc = {
- .machines = snd_soc_acpi_intel_cnl_machines,
+ .machines = snd_soc_acpi_intel_cfl_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
@@ -122,7 +129,7 @@ static const struct sof_dev_desc cfl_desc = {
.chip_info = &cnl_chip_info,
.default_fw_path = "intel/sof",
.default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-cnl.ri",
+ .nocodec_fw_filename = "sof-cfl.ri",
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.arch_ops = &sof_xtensa_arch_ops
@@ -133,7 +140,7 @@ static const struct sof_dev_desc cfl_desc = {
IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
static const struct sof_dev_desc cml_desc = {
- .machines = snd_soc_acpi_intel_cnl_machines,
+ .machines = snd_soc_acpi_intel_cml_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
@@ -142,7 +149,7 @@ static const struct sof_dev_desc cml_desc = {
.chip_info = &cnl_chip_info,
.default_fw_path = "intel/sof",
.default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-cnl.ri",
+ .nocodec_fw_filename = "sof-cml.ri",
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.arch_ops = &sof_xtensa_arch_ops
@@ -167,42 +174,6 @@ static const struct sof_dev_desc icl_desc = {
};
#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_SKYLAKE)
-static const struct sof_dev_desc skl_desc = {
- .machines = snd_soc_acpi_intel_skl_machines,
- .resindex_lpe_base = 0,
- .resindex_pcicfg_base = -1,
- .resindex_imr_base = -1,
- .irqindex_host_ipc = -1,
- .resindex_dma_base = -1,
- .chip_info = &skl_chip_info,
- .default_fw_path = "intel/sof",
- .default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-skl.ri",
- .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
- .ops = &sof_skl_ops,
- .arch_ops = &sof_xtensa_arch_ops
-};
-#endif
-
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_KABYLAKE)
-static const struct sof_dev_desc kbl_desc = {
- .machines = snd_soc_acpi_intel_kbl_machines,
- .resindex_lpe_base = 0,
- .resindex_pcicfg_base = -1,
- .resindex_imr_base = -1,
- .irqindex_host_ipc = -1,
- .resindex_dma_base = -1,
- .chip_info = &skl_chip_info,
- .default_fw_path = "intel/sof",
- .default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-kbl.ri",
- .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
- .ops = &sof_skl_ops,
- .arch_ops = &sof_xtensa_arch_ops
-};
-#endif
-
#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
static const struct sof_dev_desc tgl_desc = {
.machines = snd_soc_acpi_intel_tgl_machines,
@@ -239,7 +210,27 @@ static const struct sof_dev_desc ehl_desc = {
};
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+static const struct sof_dev_desc jsl_desc = {
+ .machines = snd_soc_acpi_intel_jsl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &jsl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-jsl.ri",
+ .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
static const struct dev_pm_ops sof_pci_pm = {
+ .prepare = snd_sof_prepare,
+ .complete = snd_sof_complete,
SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
snd_sof_runtime_idle)
@@ -249,6 +240,9 @@ static void sof_pci_probe_complete(struct device *dev)
{
dev_dbg(dev, "Completing SOF PCI probe");
+ if (sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME)
+ return;
+
/* allow runtime_pm */
pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
@@ -277,6 +271,11 @@ static int sof_pci_probe(struct pci_dev *pci,
const struct snd_sof_dsp_ops *ops;
int ret;
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY &&
+ ret != SND_INTEL_DSP_DRIVER_SOF)
+ return -ENODEV;
+
dev_dbg(&pci->dev, "PCI DSP detected");
/* get ops for platform */
@@ -370,7 +369,8 @@ static void sof_pci_remove(struct pci_dev *pci)
snd_sof_device_remove(&pci->dev);
/* follow recommendation in pci-driver.c to increment usage counter */
- pm_runtime_get_noresume(&pci->dev);
+ if (!(sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME))
+ pm_runtime_get_noresume(&pci->dev);
/* release pci regions and disable device */
pci_release_regions(pci);
@@ -401,18 +401,14 @@ static const struct pci_device_id sof_pci_ids[] = {
{ PCI_DEVICE(0x8086, 0xa348),
.driver_data = (unsigned long)&cfl_desc},
#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_KABYLAKE)
- { PCI_DEVICE(0x8086, 0x9d71),
- .driver_data = (unsigned long)&kbl_desc},
-#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_SKYLAKE)
- { PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = (unsigned long)&skl_desc},
-#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
{ PCI_DEVICE(0x8086, 0x34C8),
.driver_data = (unsigned long)&icl_desc},
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+ { PCI_DEVICE(0x8086, 0x38c8),
+ .driver_data = (unsigned long)&jsl_desc},
+#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
{ PCI_DEVICE(0x8086, 0x02c8),
.driver_data = (unsigned long)&cml_desc},
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index 730f3259dd02..c7c2c70ee4d0 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -15,6 +15,7 @@
#include <sound/hdaudio.h>
#include <sound/soc.h>
+#include <sound/control.h>
#include <sound/sof.h>
#include <sound/sof/stream.h> /* needs to be included before control.h */
@@ -28,10 +29,15 @@
#include <uapi/sound/sof/fw.h>
/* debug flags */
-#define SOF_DBG_REGS BIT(1)
-#define SOF_DBG_MBOX BIT(2)
-#define SOF_DBG_TEXT BIT(3)
-#define SOF_DBG_PCI BIT(4)
+#define SOF_DBG_ENABLE_TRACE BIT(0)
+#define SOF_DBG_REGS BIT(1)
+#define SOF_DBG_MBOX BIT(2)
+#define SOF_DBG_TEXT BIT(3)
+#define SOF_DBG_PCI BIT(4)
+#define SOF_DBG_RETAIN_CTX BIT(5) /* prevent DSP D3 on FW exception */
+
+/* global debug state set by SOF_DBG_ flags */
+extern int sof_core_debug;
/* max BARs mmaped devices can use */
#define SND_SOF_BARS 8
@@ -62,6 +68,12 @@
#define DMA_CHAN_INVALID 0xFFFFFFFF
+/* DSP D0ix sub-state */
+enum sof_d0_substate {
+ SOF_DSP_D0I0 = 0, /* DSP default D0 substate */
+ SOF_DSP_D0I3, /* DSP D0i3(low power) substate*/
+};
+
struct snd_sof_dev;
struct snd_sof_ipc_msg;
struct snd_sof_ipc;
@@ -128,7 +140,7 @@ struct snd_sof_dsp_ops {
* FW ready checks for ABI compatibility and creates
* memory windows at first boot
*/
- int (*fw_ready)(struct snd_sof_dev *sdev, u32 msg_id); /* optional */
+ int (*fw_ready)(struct snd_sof_dev *sdev, u32 msg_id); /* mandatory */
/* connect pcm substream to a host stream */
int (*pcm_open)(struct snd_sof_dev *sdev,
@@ -177,6 +189,8 @@ struct snd_sof_dsp_ops {
int (*runtime_resume)(struct snd_sof_dev *sof_dev); /* optional */
int (*runtime_idle)(struct snd_sof_dev *sof_dev); /* optional */
int (*set_hw_params_upon_resume)(struct snd_sof_dev *sdev); /* optional */
+ int (*set_power_state)(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate); /* optional */
/* DSP clocking */
int (*set_clk)(struct snd_sof_dev *sof_dev, u32 freq); /* optional */
@@ -205,6 +219,9 @@ struct snd_sof_dsp_ops {
/* DAI ops */
struct snd_soc_dai_driver *drv;
int num_drv;
+
+ /* ALSA HW info flags, will be stored in snd_pcm_runtime.hw.info */
+ u32 hw_info;
};
/* DSP architecture specific callbacks for oops and stack dumps */
@@ -293,6 +310,12 @@ struct snd_sof_pcm_stream {
struct sof_ipc_stream_posn posn;
struct snd_pcm_substream *substream;
struct work_struct period_elapsed_work;
+ bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ /*
+ * flag to indicate that the DSP pipelines should be kept
+ * active or not while suspending the stream
+ */
+ bool suspend_ignored;
};
/* ALSA SOF PCM device */
@@ -305,6 +328,12 @@ struct snd_sof_pcm {
bool prepared[2]; /* PCM_PARAMS set successfully */
};
+struct snd_sof_led_control {
+ unsigned int use_led;
+ unsigned int direction;
+ unsigned int led_value;
+};
+
/* ALSA SOF Kcontrol device */
struct snd_sof_control {
struct snd_sof_dev *sdev;
@@ -319,6 +348,8 @@ struct snd_sof_control {
u32 *volume_table; /* volume table computed from tlv data*/
struct list_head list; /* list in sdev control list */
+
+ struct snd_sof_led_control led_ctl;
};
/* ASoC SOF DAPM widget */
@@ -370,6 +401,11 @@ struct snd_sof_dev {
*/
struct snd_soc_component_driver plat_drv;
+ /* power states related */
+ enum sof_d0_substate d0_substate;
+ /* flag to track if the intended power target of suspend is S0ix */
+ bool s0_suspend;
+
/* DSP firmware boot */
wait_queue_head_t boot_wait;
u32 boot_complete;
@@ -434,6 +470,7 @@ struct snd_sof_dev {
int dma_trace_pages;
wait_queue_head_t trace_sleep;
u32 host_offset;
+ u32 dtrace_is_supported; /* set with Kconfig or module parameter */
u32 dtrace_is_enabled;
u32 dtrace_error;
u32 dtrace_draining;
@@ -455,6 +492,10 @@ int snd_sof_runtime_resume(struct device *dev);
int snd_sof_runtime_idle(struct device *dev);
int snd_sof_resume(struct device *dev);
int snd_sof_suspend(struct device *dev);
+int snd_sof_prepare(struct device *dev);
+void snd_sof_complete(struct device *dev);
+int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate);
void snd_sof_new_platform_drv(struct snd_sof_dev *sdev);
@@ -512,6 +553,8 @@ struct snd_sof_pcm *snd_sof_find_spcm_dai(struct snd_sof_dev *sdev,
return NULL;
}
+bool snd_sof_dsp_d0i3_on_suspend(struct snd_sof_dev *sdev);
+
struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_sof_dev *sdev,
const char *name);
struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_sof_dev *sdev,
@@ -575,6 +618,7 @@ void snd_sof_get_status(struct snd_sof_dev *sdev, u32 panic_code,
struct sof_ipc_panic_info *panic_info,
void *stack, size_t stack_words);
int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev);
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev);
/*
* Platform specific ops.
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 4452594c2e17..d82ab981e840 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -135,7 +135,9 @@ static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
struct snd_sof_widget *swidget = w->dobj.private;
+ int stream = SNDRV_PCM_STREAM_CAPTURE;
struct snd_sof_dev *sdev;
+ struct snd_sof_pcm *spcm;
int ret = 0;
if (!swidget)
@@ -146,11 +148,24 @@ static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
dev_dbg(sdev->dev, "received event %d for widget %s\n",
event, w->name);
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(sdev, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: cannot find PCM for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
/* process events */
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(sdev->dev, "PRE_PMU event ignored, KWD pipeline is already RUNNING\n");
+ return 0;
+ }
+
/* set pcm params */
- ret = ipc_pcm_params(swidget, SOF_IPC_STREAM_CAPTURE);
+ ret = ipc_pcm_params(swidget, stream);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to set pcm params for widget %s\n",
@@ -166,6 +181,11 @@ static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
swidget->widget->name);
break;
case SND_SOC_DAPM_POST_PMD:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(sdev->dev, "POST_PMD even ignored, KWD pipeline will remain RUNNING\n");
+ return 0;
+ }
+
/* stop trigger */
ret = ipc_trigger(swidget, SOF_IPC_STREAM_TRIG_STOP);
if (ret < 0)
@@ -433,164 +453,6 @@ static enum sof_comp_type find_process_comp_type(enum sof_ipc_process_type type)
}
/*
- * Standard Kcontrols.
- */
-
-static int sof_control_load_volume(struct snd_soc_component *scomp,
- struct snd_sof_control *scontrol,
- struct snd_kcontrol_new *kc,
- struct snd_soc_tplg_ctl_hdr *hdr)
-{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct snd_soc_tplg_mixer_control *mc =
- container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
- struct sof_ipc_ctrl_data *cdata;
- int tlv[TLV_ITEMS];
- unsigned int i;
- int ret;
-
- /* validate topology data */
- if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN)
- return -EINVAL;
-
- /* init the volume get/put data */
- scontrol->size = struct_size(scontrol->control_data, chanv,
- le32_to_cpu(mc->num_channels));
- scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
- if (!scontrol->control_data)
- return -ENOMEM;
-
- scontrol->comp_id = sdev->next_comp_id;
- scontrol->min_volume_step = le32_to_cpu(mc->min);
- scontrol->max_volume_step = le32_to_cpu(mc->max);
- scontrol->num_channels = le32_to_cpu(mc->num_channels);
-
- /* set cmd for mixer control */
- if (le32_to_cpu(mc->max) == 1) {
- scontrol->cmd = SOF_CTRL_CMD_SWITCH;
- goto out;
- }
-
- scontrol->cmd = SOF_CTRL_CMD_VOLUME;
-
- /* extract tlv data */
- if (get_tlv_data(kc->tlv.p, tlv) < 0) {
- dev_err(sdev->dev, "error: invalid TLV data\n");
- return -EINVAL;
- }
-
- /* set up volume table */
- ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
- if (ret < 0) {
- dev_err(sdev->dev, "error: setting up volume table\n");
- return ret;
- }
-
- /* set default volume values to 0dB in control */
- cdata = scontrol->control_data;
- for (i = 0; i < scontrol->num_channels; i++) {
- cdata->chanv[i].channel = i;
- cdata->chanv[i].value = VOL_ZERO_DB;
- }
-
-out:
- dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
- scontrol->comp_id, scontrol->num_channels);
-
- return 0;
-}
-
-static int sof_control_load_enum(struct snd_soc_component *scomp,
- struct snd_sof_control *scontrol,
- struct snd_kcontrol_new *kc,
- struct snd_soc_tplg_ctl_hdr *hdr)
-{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct snd_soc_tplg_enum_control *ec =
- container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
-
- /* validate topology data */
- if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
- return -EINVAL;
-
- /* init the enum get/put data */
- scontrol->size = struct_size(scontrol->control_data, chanv,
- le32_to_cpu(ec->num_channels));
- scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
- if (!scontrol->control_data)
- return -ENOMEM;
-
- scontrol->comp_id = sdev->next_comp_id;
- scontrol->num_channels = le32_to_cpu(ec->num_channels);
-
- scontrol->cmd = SOF_CTRL_CMD_ENUM;
-
- dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
- scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
-
- return 0;
-}
-
-static int sof_control_load_bytes(struct snd_soc_component *scomp,
- struct snd_sof_control *scontrol,
- struct snd_kcontrol_new *kc,
- struct snd_soc_tplg_ctl_hdr *hdr)
-{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct sof_ipc_ctrl_data *cdata;
- struct snd_soc_tplg_bytes_control *control =
- container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
- struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
- int max_size = sbe->max;
-
- /* init the get/put bytes data */
- scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
- le32_to_cpu(control->priv.size);
-
- if (scontrol->size > max_size) {
- dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
- scontrol->size, max_size);
- return -EINVAL;
- }
-
- scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
- cdata = scontrol->control_data;
- if (!scontrol->control_data)
- return -ENOMEM;
-
- scontrol->comp_id = sdev->next_comp_id;
- scontrol->cmd = SOF_CTRL_CMD_BINARY;
-
- dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
- scontrol->comp_id, scontrol->num_channels);
-
- if (le32_to_cpu(control->priv.size) > 0) {
- memcpy(cdata->data, control->priv.data,
- le32_to_cpu(control->priv.size));
-
- if (cdata->data->magic != SOF_ABI_MAGIC) {
- dev_err(sdev->dev, "error: Wrong ABI magic 0x%08x.\n",
- cdata->data->magic);
- return -EINVAL;
- }
- if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION,
- cdata->data->abi)) {
- dev_err(sdev->dev,
- "error: Incompatible ABI version 0x%08x.\n",
- cdata->data->abi);
- return -EINVAL;
- }
- if (cdata->data->size + sizeof(const struct sof_abi_hdr) !=
- le32_to_cpu(control->priv.size)) {
- dev_err(sdev->dev,
- "error: Conflict in bytes vs. priv size.\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
* Topology Token Parsing.
* New tokens should be added to headers and parsing tables below.
*/
@@ -725,6 +587,16 @@ static const struct sof_topology_token pcm_tokens[] = {
offsetof(struct sof_ipc_comp_host, dmac_config), 0},
};
+/* PCM */
+static const struct sof_topology_token stream_tokens[] = {
+ {SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3,
+ SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[0].d0i3_compatible), 0},
+ {SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3,
+ SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[1].d0i3_compatible), 0},
+};
+
/* Generic components */
static const struct sof_topology_token comp_tokens[] = {
{SOF_TKN_COMP_PERIOD_SINK_COUNT,
@@ -799,6 +671,13 @@ static const struct sof_topology_token dmic_tokens[] = {
};
+/* ESAI */
+static const struct sof_topology_token esai_tokens[] = {
+ {SOF_TKN_IMX_ESAI_MCLK_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_esai_params, mclk_id), 0},
+};
+
/*
* DMIC PDM Tokens
* SOF_TKN_INTEL_DMIC_PDM_CTRL_ID should be the first token
@@ -840,6 +719,14 @@ static const struct sof_topology_token dmic_pdm_tokens[] = {
static const struct sof_topology_token hda_tokens[] = {
};
+/* Leds */
+static const struct sof_topology_token led_tokens[] = {
+ {SOF_TKN_MUTE_LED_USE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_led_control, use_led), 0},
+ {SOF_TKN_MUTE_LED_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct snd_sof_led_control, direction), 0},
+};
+
static void sof_parse_uuid_tokens(struct snd_soc_component *scomp,
void *object,
const struct sof_topology_token *tokens,
@@ -1040,6 +927,200 @@ static void sof_dbg_comp_config(struct snd_soc_component *scomp,
config->frame_fmt);
}
+/*
+ * Standard Kcontrols.
+ */
+
+static int sof_control_load_volume(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_mixer_control *mc =
+ container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
+ struct sof_ipc_ctrl_data *cdata;
+ int tlv[TLV_ITEMS];
+ unsigned int i;
+ int ret = 0;
+
+ /* validate topology data */
+ if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* init the volume get/put data */
+ scontrol->size = struct_size(scontrol->control_data, chanv,
+ le32_to_cpu(mc->num_channels));
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->min_volume_step = le32_to_cpu(mc->min);
+ scontrol->max_volume_step = le32_to_cpu(mc->max);
+ scontrol->num_channels = le32_to_cpu(mc->num_channels);
+
+ /* set cmd for mixer control */
+ if (le32_to_cpu(mc->max) == 1) {
+ scontrol->cmd = SOF_CTRL_CMD_SWITCH;
+ goto skip;
+ }
+
+ scontrol->cmd = SOF_CTRL_CMD_VOLUME;
+
+ /* extract tlv data */
+ if (get_tlv_data(kc->tlv.p, tlv) < 0) {
+ dev_err(sdev->dev, "error: invalid TLV data\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* set up volume table */
+ ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting up volume table\n");
+ goto out_free;
+ }
+
+ /* set default volume values to 0dB in control */
+ cdata = scontrol->control_data;
+ for (i = 0; i < scontrol->num_channels; i++) {
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = VOL_ZERO_DB;
+ }
+
+skip:
+ /* set up possible led control from mixer private data */
+ ret = sof_parse_tokens(scomp, &scontrol->led_ctl, led_tokens,
+ ARRAY_SIZE(led_tokens), mc->priv.array,
+ le32_to_cpu(mc->priv.size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse led tokens failed %d\n",
+ le32_to_cpu(mc->priv.size));
+ goto out_free_table;
+ }
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ return ret;
+
+out_free_table:
+ if (le32_to_cpu(mc->max) > 1)
+ kfree(scontrol->volume_table);
+out_free:
+ kfree(scontrol->control_data);
+out:
+ return ret;
+}
+
+static int sof_control_load_enum(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_enum_control *ec =
+ container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
+
+ /* validate topology data */
+ if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ /* init the enum get/put data */
+ scontrol->size = struct_size(scontrol->control_data, chanv,
+ le32_to_cpu(ec->num_channels));
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data)
+ return -ENOMEM;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->num_channels = le32_to_cpu(ec->num_channels);
+
+ scontrol->cmd = SOF_CTRL_CMD_ENUM;
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
+ scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
+
+ return 0;
+}
+
+static int sof_control_load_bytes(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_ctrl_data *cdata;
+ struct snd_soc_tplg_bytes_control *control =
+ container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+ struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
+ int max_size = sbe->max;
+ int ret = 0;
+
+ /* init the get/put bytes data */
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+ le32_to_cpu(control->priv.size);
+
+ if (scontrol->size > max_size) {
+ dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
+ scontrol->size, max_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
+ cdata = scontrol->control_data;
+ if (!scontrol->control_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->cmd = SOF_CTRL_CMD_BINARY;
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ if (le32_to_cpu(control->priv.size) > 0) {
+ memcpy(cdata->data, control->priv.data,
+ le32_to_cpu(control->priv.size));
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err(sdev->dev, "error: Wrong ABI magic 0x%08x.\n",
+ cdata->data->magic);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION,
+ cdata->data->abi)) {
+ dev_err(sdev->dev,
+ "error: Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (cdata->data->size + sizeof(const struct sof_abi_hdr) !=
+ le32_to_cpu(control->priv.size)) {
+ dev_err(sdev->dev,
+ "error: Conflict in bytes vs. priv size.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ return ret;
+
+out_free:
+ kfree(scontrol->control_data);
+out:
+ return ret;
+}
+
/* external kcontrol init - used for any driver specific init */
static int sof_control_load(struct snd_soc_component *scomp, int index,
struct snd_kcontrol_new *kc,
@@ -1095,6 +1176,11 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
return 0;
}
+ if (ret < 0) {
+ kfree(scontrol);
+ return ret;
+ }
+
dobj->private = scontrol;
list_add(&scontrol->list, &sdev->kcontrol_list);
return ret;
@@ -1581,7 +1667,7 @@ static int sof_widget_load_pga(struct snd_soc_component *scomp, int index,
if (!volume)
return -ENOMEM;
- if (le32_to_cpu(tw->num_kcontrols) != 1) {
+ if (!le32_to_cpu(tw->num_kcontrols)) {
dev_err(sdev->dev, "error: invalid kcontrol count %d for volume\n",
tw->num_kcontrols);
ret = -EINVAL;
@@ -1618,7 +1704,8 @@ static int sof_widget_load_pga(struct snd_soc_component *scomp, int index,
swidget->private = volume;
list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
- if (scontrol->comp_id == swidget->comp_id) {
+ if (scontrol->comp_id == swidget->comp_id &&
+ scontrol->volume_table) {
min_step = scontrol->min_volume_step;
max_step = scontrol->max_volume_step;
volume->min_value = scontrol->volume_table[min_step];
@@ -2272,6 +2359,7 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_stream_caps *caps;
+ struct snd_soc_tplg_private *private = &pcm->priv;
struct snd_sof_pcm *spcm;
int stream = SNDRV_PCM_STREAM_PLAYBACK;
int ret = 0;
@@ -2288,17 +2376,28 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].comp_id = COMP_ID_UNASSIGNED;
spcm->stream[SNDRV_PCM_STREAM_CAPTURE].comp_id = COMP_ID_UNASSIGNED;
- if (pcm) {
- spcm->pcm = *pcm;
- dev_dbg(sdev->dev, "tplg: load pcm %s\n", pcm->dai_name);
- }
+ spcm->pcm = *pcm;
+ dev_dbg(sdev->dev, "tplg: load pcm %s\n", pcm->dai_name);
+
dai_drv->dobj.private = spcm;
list_add(&spcm->list, &sdev->pcm_list);
+ ret = sof_parse_tokens(scomp, spcm, stream_tokens,
+ ARRAY_SIZE(stream_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret) {
+ dev_err(sdev->dev, "error: parse stream tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
/* do we need to allocate playback PCM DMA pages */
if (!spcm->pcm.playback)
goto capture;
+ dev_vdbg(sdev->dev, "tplg: pcm %s stream tokens: playback d0i3:%d\n",
+ spcm->pcm.pcm_name, spcm->stream[0].d0i3_compatible);
+
caps = &spcm->pcm.caps[stream];
/* allocate playback page table buffer */
@@ -2326,6 +2425,9 @@ capture:
if (!spcm->pcm.capture)
return ret;
+ dev_vdbg(sdev->dev, "tplg: pcm %s stream tokens: capture d0i3:%d\n",
+ spcm->pcm.pcm_name, spcm->stream[1].d0i3_compatible);
+
caps = &spcm->pcm.caps[stream];
/* allocate capture page table buffer */
@@ -2536,8 +2638,66 @@ static int sof_link_esai_load(struct snd_soc_component *scomp, int index,
struct snd_soc_tplg_hw_config *hw_config,
struct sof_ipc_dai_config *config)
{
- /*TODO: Add implementation */
- return 0;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_reply reply;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->esai, 0, sizeof(struct sof_ipc_dai_esai_params));
+ config->hdr.size = size;
+
+ ret = sof_parse_tokens(scomp, &config->esai, esai_tokens,
+ ARRAY_SIZE(esai_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse esai tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ config->esai.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->esai.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->esai.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->esai.mclk_direction = hw_config->mclk_direction;
+ config->esai.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->esai.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->esai.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->esai.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_info(sdev->dev,
+ "tplg: config ESAI%d fmt 0x%x mclk %d width %d slots %d mclk id %d\n",
+ config->dai_index, config->format,
+ config->esai.mclk_rate, config->esai.tdm_slot_width,
+ config->esai.tdm_slots, config->esai.mclk_id);
+
+ if (config->esai.tdm_slots < 1 || config->esai.tdm_slots > 8) {
+ dev_err(sdev->dev, "error: invalid channel count for ESAI%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config, size, &reply,
+ sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DAI config for ESAI%d\n",
+ config->dai_index);
+ return ret;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to save DAI config for ESAI%d\n",
+ config->dai_index);
+
+ return ret;
}
static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
@@ -2828,6 +2988,10 @@ static int sof_link_load(struct snd_soc_component *scomp, int index,
if (!link->no_pcm) {
link->nonatomic = true;
+ /* set trigger order */
+ link->trigger[0] = SND_SOC_DPCM_TRIGGER_POST;
+ link->trigger[1] = SND_SOC_DPCM_TRIGGER_POST;
+
/* nothing more to do for FE dai links */
return 0;
}
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
index 4c3cff031fd6..b0e4556c8536 100644
--- a/sound/soc/sof/trace.c
+++ b/sound/soc/sof/trace.c
@@ -162,6 +162,9 @@ int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
struct sof_ipc_reply ipc_reply;
int ret;
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
return -EINVAL;
@@ -222,6 +225,9 @@ int snd_sof_init_trace(struct snd_sof_dev *sdev)
{
int ret;
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
/* set false before start initialization */
sdev->dtrace_is_enabled = false;
@@ -277,6 +283,9 @@ EXPORT_SYMBOL(snd_sof_init_trace);
int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
struct sof_ipc_dma_trace_posn *posn)
{
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
sdev->host_offset = posn->host_offset;
wake_up(&sdev->trace_sleep);
@@ -293,6 +302,9 @@ int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
/* an error has occurred within the DSP that prevents further trace */
void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
{
+ if (!sdev->dtrace_is_supported)
+ return;
+
if (sdev->dtrace_is_enabled) {
dev_err(sdev->dev, "error: waking up any trace sleepers\n");
sdev->dtrace_error = true;
@@ -305,7 +317,7 @@ void snd_sof_release_trace(struct snd_sof_dev *sdev)
{
int ret;
- if (!sdev->dtrace_is_enabled)
+ if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
return;
ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
@@ -326,6 +338,9 @@ EXPORT_SYMBOL(snd_sof_release_trace);
void snd_sof_free_trace(struct snd_sof_dev *sdev)
{
+ if (!sdev->dtrace_is_supported)
+ return;
+
snd_sof_release_trace(sdev);
snd_dma_free_pages(&sdev->dmatb);
diff --git a/sound/soc/sprd/sprd-pcm-dma.c b/sound/soc/sprd/sprd-pcm-dma.c
index d38ebbbbf169..da4b8f5f192b 100644
--- a/sound/soc/sprd/sprd-pcm-dma.c
+++ b/sound/soc/sprd/sprd-pcm-dma.c
@@ -46,12 +46,10 @@ static const struct snd_pcm_hardware sprd_pcm_hardware = {
.buffer_bytes_max = 64 * 1024,
};
-static int sprd_pcm_open(struct snd_pcm_substream *substream)
+static int sprd_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_pcm_dma_private *dma_private;
int hw_chan = SPRD_PCM_CHANNEL_MAX;
@@ -111,13 +109,11 @@ error:
return ret;
}
-static int sprd_pcm_close(struct snd_pcm_substream *substream)
+static int sprd_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
int size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
int i;
@@ -157,14 +153,12 @@ static void sprd_pcm_release_dma_channel(struct snd_pcm_substream *substream)
}
}
-static int sprd_pcm_request_dma_channel(struct snd_pcm_substream *substream,
+static int sprd_pcm_request_dma_channel(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int channels)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_pcm_dma_params *dma_params = dma_private->params;
int i;
@@ -190,14 +184,13 @@ static int sprd_pcm_request_dma_channel(struct snd_pcm_substream *substream,
return 0;
}
-static int sprd_pcm_hw_params(struct snd_pcm_substream *substream,
+static int sprd_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sprd_pcm_dma_params *dma_params;
size_t totsize = params_buffer_bytes(params);
size_t period = params_period_bytes(params);
@@ -218,7 +211,8 @@ static int sprd_pcm_hw_params(struct snd_pcm_substream *substream,
if (!dma_private->params) {
dma_private->params = dma_params;
- ret = sprd_pcm_request_dma_channel(substream, channels);
+ ret = sprd_pcm_request_dma_channel(component,
+ substream, channels);
if (ret)
return ret;
}
@@ -313,7 +307,8 @@ sg_err:
return ret;
}
-static int sprd_pcm_hw_free(struct snd_pcm_substream *substream)
+static int sprd_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
sprd_pcm_release_dma_channel(substream);
@@ -321,13 +316,11 @@ static int sprd_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int sprd_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int sprd_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct sprd_pcm_dma_private *dma_private =
substream->runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int ret = 0, i;
switch (cmd) {
@@ -387,13 +380,11 @@ static int sprd_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int pointer[SPRD_PCM_CHANNEL_MAX];
int bytes_of_pointer = 0, sel_max = 0, i;
snd_pcm_uframes_t x;
@@ -444,7 +435,8 @@ static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_pcm_substream *substream)
return x;
}
-static int sprd_pcm_mmap(struct snd_pcm_substream *substream,
+static int sprd_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -456,18 +448,8 @@ static int sprd_pcm_mmap(struct snd_pcm_substream *substream,
vma->vm_page_prot);
}
-static struct snd_pcm_ops sprd_pcm_ops = {
- .open = sprd_pcm_open,
- .close = sprd_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = sprd_pcm_hw_params,
- .hw_free = sprd_pcm_hw_free,
- .trigger = sprd_pcm_trigger,
- .pointer = sprd_pcm_pointer,
- .mmap = sprd_pcm_mmap,
-};
-
-static int sprd_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sprd_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -506,7 +488,8 @@ static int sprd_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void sprd_pcm_free(struct snd_pcm *pcm)
+static void sprd_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -523,10 +506,17 @@ static void sprd_pcm_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver sprd_soc_component = {
.name = DRV_NAME,
- .ops = &sprd_pcm_ops,
+ .open = sprd_pcm_open,
+ .close = sprd_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = sprd_pcm_hw_params,
+ .hw_free = sprd_pcm_hw_free,
+ .trigger = sprd_pcm_trigger,
+ .pointer = sprd_pcm_pointer,
+ .mmap = sprd_pcm_mmap,
+ .pcm_construct = sprd_pcm_new,
+ .pcm_destruct = sprd_pcm_free,
.compr_ops = &sprd_platform_compr_ops,
- .pcm_new = sprd_pcm_new,
- .pcm_free = sprd_pcm_free,
};
static int sprd_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 3c9a9deec9af..81c407da15c5 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -210,7 +210,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
return 0;
}
-static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int stm32_adfsdm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
@@ -230,7 +231,8 @@ static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
return -EINVAL;
}
-static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
+static int stm32_adfsdm_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -243,7 +245,8 @@ static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
+static int stm32_adfsdm_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
@@ -256,6 +259,7 @@ static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
}
static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -265,7 +269,8 @@ static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
return bytes_to_frames(substream->runtime, priv->pos);
}
-static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
+static int stm32_adfsdm_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -282,23 +287,16 @@ static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
params_period_size(params));
}
-static int stm32_adfsdm_pcm_hw_free(struct snd_pcm_substream *substream)
+static int stm32_adfsdm_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
-static struct snd_pcm_ops stm32_adfsdm_pcm_ops = {
- .open = stm32_adfsdm_pcm_open,
- .close = stm32_adfsdm_pcm_close,
- .hw_params = stm32_adfsdm_pcm_hw_params,
- .hw_free = stm32_adfsdm_pcm_hw_free,
- .trigger = stm32_adfsdm_trigger,
- .pointer = stm32_adfsdm_pcm_pointer,
-};
-
-static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int stm32_adfsdm_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
struct stm32_adfsdm_priv *priv =
@@ -310,7 +308,8 @@ static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
+static void stm32_adfsdm_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
@@ -320,9 +319,14 @@ static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
}
static struct snd_soc_component_driver stm32_adfsdm_soc_platform = {
- .ops = &stm32_adfsdm_pcm_ops,
- .pcm_new = stm32_adfsdm_pcm_new,
- .pcm_free = stm32_adfsdm_pcm_free,
+ .open = stm32_adfsdm_pcm_open,
+ .close = stm32_adfsdm_pcm_close,
+ .hw_params = stm32_adfsdm_pcm_hw_params,
+ .hw_free = stm32_adfsdm_pcm_hw_free,
+ .trigger = stm32_adfsdm_trigger,
+ .pointer = stm32_adfsdm_pcm_pointer,
+ .pcm_construct = stm32_adfsdm_pcm_new,
+ .pcm_destruct = stm32_adfsdm_pcm_free,
};
static const struct of_device_id stm32_adfsdm_of_match[] = {
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index ef4273361d0d..e20267504b16 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -100,7 +100,7 @@ static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
dev_err(&sai->pdev->dev, "%pOFn%s already set as sync provider\n",
sai->pdev->dev.of_node,
prev_synco == STM_SAI_SYNC_OUT_A ? "A" : "B");
- stm32_sai_pclk_disable(&sai->pdev->dev);
+ stm32_sai_pclk_disable(&sai->pdev->dev);
return -EINVAL;
}
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index cd4b235fce57..3fd28ee01675 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -351,6 +351,8 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
SPDIFRX_CR_CUMSK | SPDIFRX_CR_PTMSK | SPDIFRX_CR_RXSTEO;
cr_mask = cr;
+ cr |= SPDIFRX_CR_NBTRSET(SPDIFRX_NBTR_63);
+ cr_mask |= SPDIFRX_CR_NBTR_MASK;
cr |= SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_SYNC);
cr_mask |= SPDIFRX_CR_SPDIFEN_MASK;
ret = regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
@@ -666,7 +668,7 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
struct snd_pcm_substream *substream = spdifrx->substream;
struct platform_device *pdev = spdifrx->pdev;
unsigned int cr, mask, sr, imr;
- unsigned int flags;
+ unsigned int flags, sync_state;
int err = 0, err_xrun = 0;
regmap_read(spdifrx->regmap, STM32_SPDIFRX_SR, &sr);
@@ -726,11 +728,23 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
}
if (err) {
- /* SPDIFRX in STATE_STOP. Disable SPDIFRX to clear errors */
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_CR, &cr);
+ sync_state = FIELD_GET(SPDIFRX_CR_SPDIFEN_MASK, cr) &&
+ SPDIFRX_SPDIFEN_SYNC;
+
+ /* SPDIFRX is in STATE_STOP. Disable SPDIFRX to clear errors */
cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_DISABLE);
regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
SPDIFRX_CR_SPDIFEN_MASK, cr);
+ /* If SPDIFRX was in STATE_SYNC, retry synchro */
+ if (sync_state) {
+ cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_SYNC);
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_SPDIFEN_MASK, cr);
+ return IRQ_HANDLED;
+ }
+
if (substream)
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index ee448d5e07a6..34f3e0be3058 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1442,7 +1442,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
if (!aux_dev.dlc.of_node) {
dev_err(dev, "Can't find analog controls for codec.\n");
return ERR_PTR(-EINVAL);
- };
+ }
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
@@ -1480,7 +1480,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
if (!aux_dev.dlc.of_node) {
dev_err(dev, "Can't find analog controls for codec.\n");
return ERR_PTR(-EINVAL);
- };
+ }
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
@@ -1518,7 +1518,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
if (!aux_dev.dlc.of_node) {
dev_err(dev, "Can't find analog controls for codec.\n");
return ERR_PTR(-EINVAL);
- };
+ }
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index e6d548fa980b..dbed3c5408e7 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -127,7 +127,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
struct device *dev = dai->dev;
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int mask, val, reg;
- int ret, sample_size, srate, i2sclock, bitcnt;
+ int ret, sample_size, srate, i2sclock, bitcnt, audio_bits;
struct tegra30_ahub_cif_conf cif_conf;
if (params_channels(params) != 2)
@@ -137,8 +137,19 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
val = TEGRA30_I2S_CTRL_BIT_SIZE_16;
+ audio_bits = TEGRA30_AUDIOCIF_BITS_16;
sample_size = 16;
break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val = TEGRA30_I2S_CTRL_BIT_SIZE_24;
+ audio_bits = TEGRA30_AUDIOCIF_BITS_24;
+ sample_size = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val = TEGRA30_I2S_CTRL_BIT_SIZE_32;
+ audio_bits = TEGRA30_AUDIOCIF_BITS_32;
+ sample_size = 32;
+ break;
default:
return -EINVAL;
}
@@ -170,8 +181,8 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
cif_conf.threshold = 0;
cif_conf.audio_channels = 2;
cif_conf.client_channels = 2;
- cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
- cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.audio_bits = audio_bits;
+ cif_conf.client_bits = audio_bits;
cif_conf.expand = 0;
cif_conf.stereo_conv = 0;
cif_conf.replicate = 0;
@@ -220,9 +231,9 @@ static void tegra30_i2s_start_capture(struct tegra30_i2s *i2s)
static void tegra30_i2s_stop_capture(struct tegra30_i2s *i2s)
{
- tegra30_ahub_disable_rx_fifo(i2s->capture_fifo_cif);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
TEGRA30_I2S_CTRL_XFER_EN_RX, 0);
+ tegra30_ahub_disable_rx_fifo(i2s->capture_fifo_cif);
}
static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -254,6 +265,34 @@ static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
+static int tegra30_i2s_set_tdm(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ unsigned int mask, val;
+
+ dev_dbg(dai->dev, "%s: txmask=0x%08x rxmask=0x%08x slots=%d width=%d\n",
+ __func__, tx_mask, rx_mask, slots, slot_width);
+
+ mask = TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK |
+ TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_MASK |
+ TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_MASK;
+
+ val = (tx_mask << TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_SHIFT) |
+ (rx_mask << TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT) |
+ ((slots - 1) << TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_SHIFT);
+
+ pm_runtime_get_sync(dai->dev);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_SLOT_CTRL, mask, val);
+ /* set the fsync width to minimum of 1 clock width */
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CH_CTRL,
+ TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_MASK, 0x0);
+ pm_runtime_put(dai->dev);
+
+ return 0;
+}
+
static int tegra30_i2s_probe(struct snd_soc_dai *dai)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
@@ -268,6 +307,7 @@ static const struct snd_soc_dai_ops tegra30_i2s_dai_ops = {
.set_fmt = tegra30_i2s_set_fmt,
.hw_params = tegra30_i2s_hw_params,
.trigger = tegra30_i2s_trigger,
+ .set_tdm_slot = tegra30_i2s_set_tdm,
};
static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
@@ -277,14 +317,18 @@ static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &tegra30_i2s_dai_ops,
.symmetric_rates = 1,
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 7aa3c32e4a49..8e5371801d88 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -1867,7 +1867,7 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
return PCM_EDMA;
tmp = mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK].filter_data;
- chan = dma_request_slave_channel_reason(mcasp->dev, tmp);
+ chan = dma_request_chan(mcasp->dev, tmp);
if (IS_ERR(chan)) {
if (PTR_ERR(chan) != -EPROBE_DEFER)
dev_err(mcasp->dev,
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 66044559f70f..33c78d33e5a1 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -47,12 +47,12 @@ static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
.buffer_bytes_max = 32 * 1024,
};
-static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
+static int txx9aclc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct txx9aclc_dmadata *dmadata = runtime->private_data;
int ret;
@@ -76,12 +76,14 @@ static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct txx9aclc_dmadata *dmadata = runtime->private_data;
@@ -203,7 +205,8 @@ static void txx9aclc_dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&dmadata->dma_lock, flags);
}
-static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int txx9aclc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
@@ -236,14 +239,16 @@ static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t
-txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
+txx9aclc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
return bytes_to_frames(substream->runtime, dmadata->pos);
}
-static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct txx9aclc_soc_device *dev = &txx9aclc_soc_device;
struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
@@ -261,7 +266,8 @@ static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
struct dma_chan *chan = dmadata->dma_chan;
@@ -271,23 +277,12 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops txx9aclc_pcm_ops = {
- .open = txx9aclc_pcm_open,
- .close = txx9aclc_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = txx9aclc_pcm_hw_params,
- .hw_free = txx9aclc_pcm_hw_free,
- .prepare = txx9aclc_pcm_prepare,
- .trigger = txx9aclc_pcm_trigger,
- .pointer = txx9aclc_pcm_pointer,
-};
-
-static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int txx9aclc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct platform_device *pdev = to_platform_device(component->dev);
struct txx9aclc_soc_device *dev;
struct resource *r;
@@ -409,8 +404,15 @@ static const struct snd_soc_component_driver txx9aclc_soc_component = {
.name = DRV_NAME,
.probe = txx9aclc_pcm_probe,
.remove = txx9aclc_pcm_remove,
- .ops = &txx9aclc_pcm_ops,
- .pcm_new = txx9aclc_pcm_new,
+ .open = txx9aclc_pcm_open,
+ .close = txx9aclc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = txx9aclc_pcm_hw_params,
+ .hw_free = txx9aclc_pcm_hw_free,
+ .prepare = txx9aclc_pcm_prepare,
+ .trigger = txx9aclc_pcm_trigger,
+ .pointer = txx9aclc_pcm_pointer,
+ .pcm_construct = txx9aclc_pcm_new,
};
static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c
index e8446cc4e8f8..700d936ed94e 100644
--- a/sound/soc/uniphier/aio-dma.c
+++ b/sound/soc/uniphier/aio-dma.c
@@ -93,7 +93,8 @@ static irqreturn_t aiodma_irq(int irq, void *p)
return ret;
}
-static int uniphier_aiodma_open(struct snd_pcm_substream *substream)
+static int uniphier_aiodma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -103,7 +104,8 @@ static int uniphier_aiodma_open(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256);
}
-static int uniphier_aiodma_hw_params(struct snd_pcm_substream *substream,
+static int uniphier_aiodma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
@@ -112,7 +114,8 @@ static int uniphier_aiodma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int uniphier_aiodma_hw_free(struct snd_pcm_substream *substream)
+static int uniphier_aiodma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
substream->runtime->dma_bytes = 0;
@@ -120,7 +123,8 @@ static int uniphier_aiodma_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int uniphier_aiodma_prepare(struct snd_pcm_substream *substream)
+static int uniphier_aiodma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
@@ -146,7 +150,8 @@ static int uniphier_aiodma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int uniphier_aiodma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int uniphier_aiodma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
@@ -181,6 +186,7 @@ static int uniphier_aiodma_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t uniphier_aiodma_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -204,7 +210,8 @@ static snd_pcm_uframes_t uniphier_aiodma_pointer(
return pos;
}
-static int uniphier_aiodma_mmap(struct snd_pcm_substream *substream,
+static int uniphier_aiodma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -214,18 +221,8 @@ static int uniphier_aiodma_mmap(struct snd_pcm_substream *substream,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-static const struct snd_pcm_ops uniphier_aiodma_ops = {
- .open = uniphier_aiodma_open,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uniphier_aiodma_hw_params,
- .hw_free = uniphier_aiodma_hw_free,
- .prepare = uniphier_aiodma_prepare,
- .trigger = uniphier_aiodma_trigger,
- .pointer = uniphier_aiodma_pointer,
- .mmap = uniphier_aiodma_mmap,
-};
-
-static int uniphier_aiodma_new(struct snd_soc_pcm_runtime *rtd)
+static int uniphier_aiodma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct device *dev = rtd->card->snd_card->dev;
struct snd_pcm *pcm = rtd->pcm;
@@ -242,16 +239,24 @@ static int uniphier_aiodma_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void uniphier_aiodma_free(struct snd_pcm *pcm)
+static void uniphier_aiodma_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static const struct snd_soc_component_driver uniphier_soc_platform = {
- .pcm_new = uniphier_aiodma_new,
- .pcm_free = uniphier_aiodma_free,
- .ops = &uniphier_aiodma_ops,
- .compr_ops = &uniphier_aio_compr_ops,
+ .open = uniphier_aiodma_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = uniphier_aiodma_hw_params,
+ .hw_free = uniphier_aiodma_hw_free,
+ .prepare = uniphier_aiodma_prepare,
+ .trigger = uniphier_aiodma_trigger,
+ .pointer = uniphier_aiodma_pointer,
+ .mmap = uniphier_aiodma_mmap,
+ .pcm_construct = uniphier_aiodma_new,
+ .pcm_destruct = uniphier_aiodma_free,
+ .compr_ops = &uniphier_aio_compr_ops,
};
static const struct regmap_config aiodma_regmap_config = {
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index a90e0d7f0b73..394d8b2a4a16 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -533,7 +533,6 @@ static void disable_msp_tx(struct ux500_msp *msp)
static int disable_msp(struct ux500_msp *msp, unsigned int dir)
{
u32 reg_val_GCR;
- int status = 0;
unsigned int disable_tx, disable_rx;
reg_val_GCR = readl(msp->registers + MSP_GCR);
@@ -566,7 +565,7 @@ static int disable_msp(struct ux500_msp *msp, unsigned int dir)
else if (disable_rx)
disable_msp_rx(msp);
- return status;
+ return 0;
}
int ux500_msp_i2s_trigger(struct ux500_msp *msp, int cmd, int direction)
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 69973179ef15..1d3586b68db7 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -9,15 +9,15 @@ config SND_SOC_XILINX_I2S
encapsulates PCM in AES format and sends AES data.
config SND_SOC_XILINX_AUDIO_FORMATTER
- tristate "Audio support for the the Xilinx audio formatter"
- help
- Select this option to enable Xilinx audio formatter
- support. This provides DMA platform device support for
- audio functionality.
+ tristate "Audio support for the the Xilinx audio formatter"
+ help
+ Select this option to enable Xilinx audio formatter
+ support. This provides DMA platform device support for
+ audio functionality.
config SND_SOC_XILINX_SPDIF
- tristate "Audio support for the the Xilinx SPDIF"
- help
- Select this option to enable Xilinx SPDIF Audio.
- This provides playback and capture of SPDIF audio in
- AES format.
+ tristate "Audio support for the the Xilinx SPDIF"
+ help
+ Select this option to enable Xilinx SPDIF Audio.
+ This provides playback and capture of SPDIF audio in
+ AES format.
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 48970efe7838..296c4caf96a0 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -313,16 +313,14 @@ static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
-static int xlnx_formatter_pcm_open(struct snd_pcm_substream *substream)
+static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int err;
u32 val, data_format_mode;
u32 ch_count_mask, ch_count_shift, data_xfer_mode, data_xfer_shift;
struct xlnx_pcm_stream_param *stream_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -387,14 +385,12 @@ static int xlnx_formatter_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int xlnx_formatter_pcm_close(struct snd_pcm_substream *substream)
+static int xlnx_formatter_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int ret;
struct xlnx_pcm_stream_param *stream_data =
substream->runtime->private_data;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
ret = xlnx_formatter_pcm_reset(stream_data->mmio);
if (ret) {
@@ -409,7 +405,8 @@ err_reset:
}
static snd_pcm_uframes_t
-xlnx_formatter_pcm_pointer(struct snd_pcm_substream *substream)
+xlnx_formatter_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u32 pos;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -423,16 +420,14 @@ xlnx_formatter_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, pos);
}
-static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
+static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
u32 low, high, active_ch, val, bytes_per_ch, bits_per_sample;
u32 aes_reg1_val, aes_reg2_val;
int status;
u64 size;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
@@ -500,12 +495,14 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int xlnx_formatter_pcm_hw_free(struct snd_pcm_substream *substream)
+static int xlnx_formatter_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int xlnx_formatter_pcm_trigger(struct snd_pcm_substream *substream,
+static int xlnx_formatter_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int cmd)
{
u32 val;
@@ -532,10 +529,9 @@ static int xlnx_formatter_pcm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static int xlnx_formatter_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int xlnx_formatter_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
- DRV_NAME);
snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV, component->dev,
xlnx_pcm_hardware.buffer_bytes_max,
@@ -543,20 +539,16 @@ static int xlnx_formatter_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static const struct snd_pcm_ops xlnx_formatter_pcm_ops = {
- .open = xlnx_formatter_pcm_open,
- .close = xlnx_formatter_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = xlnx_formatter_pcm_hw_params,
- .hw_free = xlnx_formatter_pcm_hw_free,
- .trigger = xlnx_formatter_pcm_trigger,
- .pointer = xlnx_formatter_pcm_pointer,
-};
-
static const struct snd_soc_component_driver xlnx_asoc_component = {
- .name = DRV_NAME,
- .ops = &xlnx_formatter_pcm_ops,
- .pcm_new = xlnx_formatter_pcm_new,
+ .name = DRV_NAME,
+ .open = xlnx_formatter_pcm_open,
+ .close = xlnx_formatter_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = xlnx_formatter_pcm_hw_params,
+ .hw_free = xlnx_formatter_pcm_hw_free,
+ .trigger = xlnx_formatter_pcm_trigger,
+ .pointer = xlnx_formatter_pcm_pointer,
+ .pcm_construct = xlnx_formatter_pcm_new,
};
static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
@@ -564,7 +556,6 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
int ret;
u32 val;
struct xlnx_pcm_drv_data *aud_drv_data;
- struct resource *res;
struct device *dev = &pdev->dev;
aud_drv_data = devm_kzalloc(dev, sizeof(*aud_drv_data), GFP_KERNEL);
@@ -584,13 +575,7 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "audio formatter node:addr to resource failed\n");
- ret = -ENXIO;
- goto clk_err;
- }
- aud_drv_data->mmio = devm_ioremap_resource(dev, res);
+ aud_drv_data->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aud_drv_data->mmio)) {
dev_err(dev, "audio formatter ioremap failed\n");
ret = PTR_ERR(aud_drv_data->mmio);
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index efd374f114a0..e08f4fee932a 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -365,7 +365,8 @@ static const struct snd_pcm_hardware xtfpga_pcm_hardware = {
.fifo_size = 16,
};
-static int xtfpga_pcm_open(struct snd_pcm_substream *substream)
+static int xtfpga_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -378,13 +379,15 @@ static int xtfpga_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int xtfpga_pcm_close(struct snd_pcm_substream *substream)
+static int xtfpga_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
synchronize_rcu();
return 0;
}
-static int xtfpga_pcm_hw_params(struct snd_pcm_substream *substream,
+static int xtfpga_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int ret;
@@ -424,7 +427,8 @@ static int xtfpga_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int xtfpga_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -452,7 +456,8 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct xtfpga_i2s *i2s = runtime->private_data;
@@ -461,7 +466,8 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
return pos < runtime->buffer_size ? pos : 0;
}
-static int xtfpga_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int xtfpga_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
size_t size = xtfpga_pcm_hardware.buffer_bytes_max;
@@ -471,19 +477,15 @@ static int xtfpga_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static const struct snd_pcm_ops xtfpga_pcm_ops = {
+static const struct snd_soc_component_driver xtfpga_i2s_component = {
+ .name = DRV_NAME,
.open = xtfpga_pcm_open,
.close = xtfpga_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = xtfpga_pcm_hw_params,
.trigger = xtfpga_pcm_trigger,
.pointer = xtfpga_pcm_pointer,
-};
-
-static const struct snd_soc_component_driver xtfpga_i2s_component = {
- .name = DRV_NAME,
- .pcm_new = xtfpga_pcm_new,
- .ops = &xtfpga_pcm_ops,
+ .pcm_construct = xtfpga_pcm_new,
};
static const struct snd_soc_dai_ops xtfpga_i2s_dai_ops = {
diff --git a/sound/soc/zte/Kconfig b/sound/soc/zte/Kconfig
index a7842e4b791c..a23d4f13ca19 100644
--- a/sound/soc/zte/Kconfig
+++ b/sound/soc/zte/Kconfig
@@ -18,9 +18,9 @@ config ZX_I2S
ZTE ZX I2S interface
config ZX_TDM
- tristate "ZTE ZX TDM Driver Support"
- depends on COMMON_CLK
- select SND_SOC_GENERIC_DMAENGINE_PCM
- help
- Say Y or M if you want to add support for codecs attached to the
- ZTE ZX TDM interface
+ tristate "ZTE ZX TDM Driver Support"
+ depends on COMMON_CLK
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Say Y or M if you want to add support for codecs attached to the
+ ZTE ZX TDM interface
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 441222c8e223..d4b8ccc61dc2 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -777,7 +777,7 @@ static int snd_amd7930_pcm(struct snd_amd7930 *amd)
amd->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 64*1024);
return 0;
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 6e065d44060e..4911103421ff 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2249,7 +2249,7 @@ static int snd_dbri_pcm(struct snd_card *card)
strcpy(pcm->name, card->shortname);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64 * 1024, 64 * 1024);
return 0;
}
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 88ac1c4ee163..cdc5dd7fbe16 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -449,13 +449,13 @@ static int usb6fire_pcm_close(struct snd_pcm_substream *alsa_sub)
static int usb6fire_pcm_hw_params(struct snd_pcm_substream *alsa_sub,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(alsa_sub,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(alsa_sub,
+ params_buffer_bytes(hw_params));
}
static int usb6fire_pcm_hw_free(struct snd_pcm_substream *alsa_sub)
{
- return snd_pcm_lib_free_vmalloc_buffer(alsa_sub);
+ return snd_pcm_lib_free_pages(alsa_sub);
}
static int usb6fire_pcm_prepare(struct snd_pcm_substream *alsa_sub)
@@ -560,7 +560,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = usb6fire_pcm_prepare,
.trigger = usb6fire_pcm_trigger,
.pointer = usb6fire_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
@@ -659,14 +658,9 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
strcpy(pcm->name, "DMX 6Fire USB");
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
- if (ret) {
- usb6fire_pcm_buffers_destroy(rt);
- kfree(rt);
- dev_err(&chip->dev->dev,
- "error preallocating pcm buffers.\n");
- return ret;
- }
rt->instance = pcm;
chip->pcm = rt;
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index e2c53a0841da..059242f15d75 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -107,24 +107,24 @@ config SND_USB_US122L
will be called snd-usb-us122l.
config SND_USB_6FIRE
- tristate "TerraTec DMX 6Fire USB"
- select FW_LOADER
- select BITREVERSE
- select SND_RAWMIDI
- select SND_PCM
- select SND_VMASTER
- help
- Say Y here to include support for TerraTec 6fire DMX USB interface.
-
- You will need firmware files in order to be able to use the device
- after it has been coldstarted. An install script for the firmware
- and further help can be found at
- http://sixfireusb.sourceforge.net
+ tristate "TerraTec DMX 6Fire USB"
+ select FW_LOADER
+ select BITREVERSE
+ select SND_RAWMIDI
+ select SND_PCM
+ select SND_VMASTER
+ help
+ Say Y here to include support for TerraTec 6fire DMX USB interface.
+
+ You will need firmware files in order to be able to use the device
+ after it has been coldstarted. An install script for the firmware
+ and further help can be found at
+ http://sixfireusb.sourceforge.net
config SND_USB_HIFACE
- tristate "M2Tech hiFace USB-SPDIF driver"
- select SND_PCM
- help
+ tristate "M2Tech hiFace USB-SPDIF driver"
+ select SND_PCM
+ help
Select this option to include support for M2Tech hiFace USB-SPDIF
interface.
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 444bb637ce13..970eb0865ba3 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -170,15 +170,14 @@ static int snd_usb_caiaq_substream_close(struct snd_pcm_substream *substream)
static int snd_usb_caiaq_pcm_hw_params(struct snd_pcm_substream *sub,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(sub,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(sub, params_buffer_bytes(hw_params));
}
static int snd_usb_caiaq_pcm_hw_free(struct snd_pcm_substream *sub)
{
struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(sub);
deactivate_substream(cdev, sub);
- return snd_pcm_lib_free_vmalloc_buffer(sub);
+ return snd_pcm_lib_free_pages(sub);
}
/* this should probably go upstream */
@@ -334,7 +333,6 @@ static const struct snd_pcm_ops snd_usb_caiaq_ops = {
.prepare = snd_usb_caiaq_pcm_prepare,
.trigger = snd_usb_caiaq_pcm_trigger,
.pointer = snd_usb_caiaq_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static void check_for_elapsed_periods(struct snd_usb_caiaqdev *cdev,
@@ -843,6 +841,8 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
&snd_usb_caiaq_ops);
snd_pcm_set_ops(cdev->pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_usb_caiaq_ops);
+ snd_pcm_lib_preallocate_pages_for_all(cdev->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
cdev->data_cb_info =
kmalloc_array(N_URBS, sizeof(struct snd_usb_caiaq_cb_info),
diff --git a/sound/usb/card.c b/sound/usb/card.c
index db91dc76cc91..9f743ebae615 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -74,6 +74,7 @@ static bool autoclock = true;
static char *quirk_alias[SNDRV_CARDS];
bool snd_usb_use_vmalloc = true;
+bool snd_usb_skip_validation;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
@@ -96,6 +97,8 @@ module_param_array(quirk_alias, charp, NULL, 0444);
MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
module_param_named(use_vmalloc, snd_usb_use_vmalloc, bool, 0444);
MODULE_PARM_DESC(use_vmalloc, "Use vmalloc for PCM intermediate buffers (default: yes).");
+module_param_named(skip_validation, snd_usb_skip_validation, bool, 0444);
+MODULE_PARM_DESC(skip_validation, "Skip unit descriptor validation (default: no).");
/*
* we keep the snd_usb_audio_t instances by ourselves for merging
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 6b8c14f9b5d4..018b1ecb5404 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -165,21 +165,21 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
if (!cs_desc)
- return 0;
+ return false;
bmControls = le32_to_cpu(cs_desc->bmControls);
} else { /* UAC_VERSION_1/2 */
struct uac_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source(chip->ctrl_intf, source_id);
if (!cs_desc)
- return 0;
+ return false;
bmControls = cs_desc->bmControls;
}
/* If a clock source can't tell us whether it's valid, we assume it is */
if (!uac_v2v3_control_is_readable(bmControls,
UAC2_CS_CONTROL_CLOCK_VALID))
- return 1;
+ return true;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
@@ -191,10 +191,10 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
dev_warn(&dev->dev,
"%s(): cannot get clock validity for id %d\n",
__func__, source_id);
- return 0;
+ return false;
}
- return !!data;
+ return data ? true : false;
}
static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index a2ab8e8d3a93..4a9a2f6ef5a4 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -388,6 +388,9 @@ static void snd_complete_urb(struct urb *urb)
}
prepare_outbound_urb(ep, ctx);
+ /* can be stopped during prepare callback */
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
} else {
retire_inbound_urb(ep, ctx);
/* can be stopped during retire callback */
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index c406497c5919..e0de71917274 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -418,13 +418,13 @@ static int hiface_pcm_close(struct snd_pcm_substream *alsa_sub)
static int hiface_pcm_hw_params(struct snd_pcm_substream *alsa_sub,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(alsa_sub,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(alsa_sub,
+ params_buffer_bytes(hw_params));
}
static int hiface_pcm_hw_free(struct snd_pcm_substream *alsa_sub)
{
- return snd_pcm_lib_free_vmalloc_buffer(alsa_sub);
+ return snd_pcm_lib_free_pages(alsa_sub);
}
static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
@@ -518,7 +518,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = hiface_pcm_prepare,
.trigger = hiface_pcm_trigger,
.pointer = hiface_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int hiface_pcm_init_urb(struct pcm_urb *urb,
@@ -614,6 +613,8 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
strlcpy(pcm->name, "USB-SPDIF Audio", sizeof(pcm->name));
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
rt->instance = pcm;
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index f70211e6b174..9c437c716cfd 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -502,9 +502,7 @@ static int snd_line6_new_pcm(struct usb_line6 *line6, struct snd_pcm **pcm_ret)
/* pre-allocation of buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data
- (GFP_KERNEL), 64 * 1024,
- 128 * 1024);
+ NULL, 64 * 1024, 128 * 1024);
return 0;
}
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index 307b72d5fffa..566a4a31528a 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -733,8 +733,8 @@ static int capture_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
return err;
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
}
static int playback_pcm_hw_params(struct snd_pcm_substream *substream,
@@ -751,13 +751,13 @@ static int playback_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
return err;
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
}
static int ua101_pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int capture_pcm_prepare(struct snd_pcm_substream *substream)
@@ -889,7 +889,6 @@ static const struct snd_pcm_ops capture_pcm_ops = {
.prepare = capture_pcm_prepare,
.trigger = capture_pcm_trigger,
.pointer = capture_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_pcm_ops = {
@@ -901,7 +900,6 @@ static const struct snd_pcm_ops playback_pcm_ops = {
.prepare = playback_pcm_prepare,
.trigger = playback_pcm_trigger,
.pointer = playback_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct uac_format_type_i_discrete_descriptor *
@@ -1296,6 +1294,8 @@ static int ua101_probe(struct usb_interface *interface,
strcpy(ua->pcm->name, name);
snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops);
snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(ua->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
err = snd_usbmidi_create(card, ua->intf[INTF_MIDI],
&ua->midi_list, &midi_quirk);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 3fd1d1749edf..6cd4ff09c5ee 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1229,7 +1229,8 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
if (cval->min + cval->res < cval->max) {
int last_valid_res = cval->res;
int saved, test, check;
- get_cur_mix_raw(cval, minchn, &saved);
+ if (get_cur_mix_raw(cval, minchn, &saved) < 0)
+ goto no_res_check;
for (;;) {
test = saved;
if (test < cval->max)
@@ -1249,6 +1250,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
}
+no_res_check:
cval->initialized = 1;
}
@@ -2928,6 +2930,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
continue;
iface = usb_ifnum_to_if(dev, intf);
+ if (!iface)
+ continue;
+
num = iface->num_altsetting;
if (num < 2)
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
index 83715fd8dfd6..9d10cbf1b5ed 100644
--- a/sound/usb/mixer_scarlett.c
+++ b/sound/usb/mixer_scarlett.c
@@ -142,6 +142,7 @@ enum {
SCARLETT_OUTPUTS,
SCARLETT_SWITCH_IMPEDANCE,
SCARLETT_SWITCH_PAD,
+ SCARLETT_SWITCH_GAIN,
};
enum {
@@ -192,6 +193,15 @@ static const struct scarlett_mixer_elem_enum_info opt_pad = {
}
};
+static const struct scarlett_mixer_elem_enum_info opt_gain = {
+ .start = 0,
+ .len = 2,
+ .offsets = {},
+ .names = (char const * const []){
+ "Lo", "Hi"
+ }
+};
+
static const struct scarlett_mixer_elem_enum_info opt_impedance = {
.start = 0,
.len = 2,
@@ -652,8 +662,8 @@ static struct scarlett_device_info s6i6_info = {
{ .num = 1, .type = SCARLETT_SWITCH_PAD, .name = NULL},
{ .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
{ .num = 2, .type = SCARLETT_SWITCH_PAD, .name = NULL},
- { .num = 3, .type = SCARLETT_SWITCH_PAD, .name = NULL},
- { .num = 4, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 3, .type = SCARLETT_SWITCH_GAIN, .name = NULL},
+ { .num = 4, .type = SCARLETT_SWITCH_GAIN, .name = NULL},
},
.matrix_mux_init = {
@@ -883,6 +893,15 @@ static int scarlett_controls_create_generic(struct usb_mixer_interface *mixer,
if (err < 0)
return err;
break;
+ case SCARLETT_SWITCH_GAIN:
+ sprintf(mx, "Input %d Gain Switch", ctl->num);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_enum,
+ scarlett_ctl_enum_resume, 0x01,
+ 0x08, ctl->num, USB_MIXER_S16, 1, mx,
+ &opt_gain, &elem);
+ if (err < 0)
+ return err;
+ break;
}
}
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 7d460b1f1735..94b903d95afa 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -261,34 +261,34 @@ static const struct scarlett2_device_info s6i6_gen2_info = {
},
.ports = {
- {
+ [SCARLETT2_PORT_TYPE_NONE] = {
.id = 0x000,
.num = { 1, 0, 8, 8, 8 },
.src_descr = "Off",
.src_num_offset = 0,
},
- {
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
.id = 0x080,
.num = { 4, 4, 4, 4, 4 },
.src_descr = "Analogue %d",
.src_num_offset = 1,
.dst_descr = "Analogue Output %02d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
.id = 0x180,
.num = { 2, 2, 2, 2, 2 },
.src_descr = "S/PDIF %d",
.src_num_offset = 1,
.dst_descr = "S/PDIF Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_MIX] = {
.id = 0x300,
.num = { 10, 18, 18, 18, 18 },
.src_descr = "Mix %c",
.src_num_offset = 65,
.dst_descr = "Mixer Input %02d Capture"
},
- {
+ [SCARLETT2_PORT_TYPE_PCM] = {
.id = 0x600,
.num = { 6, 6, 6, 6, 6 },
.src_descr = "PCM %d",
@@ -317,44 +317,44 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
},
.ports = {
- {
+ [SCARLETT2_PORT_TYPE_NONE] = {
.id = 0x000,
.num = { 1, 0, 8, 8, 4 },
.src_descr = "Off",
.src_num_offset = 0,
},
- {
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
.id = 0x080,
.num = { 8, 6, 6, 6, 6 },
.src_descr = "Analogue %d",
.src_num_offset = 1,
.dst_descr = "Analogue Output %02d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
+ .id = 0x180,
/* S/PDIF outputs aren't available at 192KHz
* but are included in the USB mux I/O
* assignment message anyway
*/
- .id = 0x180,
.num = { 2, 2, 2, 2, 2 },
.src_descr = "S/PDIF %d",
.src_num_offset = 1,
.dst_descr = "S/PDIF Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_ADAT] = {
.id = 0x200,
.num = { 8, 0, 0, 0, 0 },
.src_descr = "ADAT %d",
.src_num_offset = 1,
},
- {
+ [SCARLETT2_PORT_TYPE_MIX] = {
.id = 0x300,
.num = { 10, 18, 18, 18, 18 },
.src_descr = "Mix %c",
.src_num_offset = 65,
.dst_descr = "Mixer Input %02d Capture"
},
- {
+ [SCARLETT2_PORT_TYPE_PCM] = {
.id = 0x600,
.num = { 20, 18, 18, 14, 10 },
.src_descr = "PCM %d",
@@ -387,20 +387,20 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
},
.ports = {
- {
+ [SCARLETT2_PORT_TYPE_NONE] = {
.id = 0x000,
.num = { 1, 0, 8, 8, 6 },
.src_descr = "Off",
.src_num_offset = 0,
},
- {
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
.id = 0x080,
.num = { 8, 10, 10, 10, 10 },
.src_descr = "Analogue %d",
.src_num_offset = 1,
.dst_descr = "Analogue Output %02d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
/* S/PDIF outputs aren't available at 192KHz
* but are included in the USB mux I/O
* assignment message anyway
@@ -411,21 +411,21 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
.src_num_offset = 1,
.dst_descr = "S/PDIF Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_ADAT] = {
.id = 0x200,
.num = { 8, 8, 8, 4, 0 },
.src_descr = "ADAT %d",
.src_num_offset = 1,
.dst_descr = "ADAT Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_MIX] = {
.id = 0x300,
.num = { 10, 18, 18, 18, 18 },
.src_descr = "Mix %c",
.src_num_offset = 65,
.dst_descr = "Mixer Input %02d Capture"
},
- {
+ [SCARLETT2_PORT_TYPE_PCM] = {
.id = 0x600,
.num = { 20, 18, 18, 14, 10 },
.src_descr = "PCM %d",
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index ff5ab24f3bd1..9c8930bb00c8 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -785,12 +785,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
- if (snd_usb_use_vmalloc)
- ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
- else
- ret = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
if (ret < 0)
goto stop_pipeline;
@@ -857,10 +853,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
snd_usb_unlock_shutdown(subs->stream->chip);
}
- if (snd_usb_use_vmalloc)
- return snd_pcm_lib_free_vmalloc_buffer(substream);
- else
- return snd_pcm_lib_free_pages(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/*
@@ -1781,7 +1774,6 @@ static const struct snd_pcm_ops snd_usb_playback_ops = {
.prepare = snd_usb_pcm_prepare,
.trigger = snd_usb_substream_playback_trigger,
.pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_usb_capture_ops = {
@@ -1793,43 +1785,14 @@ static const struct snd_pcm_ops snd_usb_capture_ops = {
.prepare = snd_usb_pcm_prepare,
.trigger = snd_usb_substream_capture_trigger,
.pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
-};
-
-static const struct snd_pcm_ops snd_usb_playback_dev_ops = {
- .open = snd_usb_pcm_open,
- .close = snd_usb_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usb_hw_params,
- .hw_free = snd_usb_hw_free,
- .prepare = snd_usb_pcm_prepare,
- .trigger = snd_usb_substream_playback_trigger,
- .pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
-static const struct snd_pcm_ops snd_usb_capture_dev_ops = {
- .open = snd_usb_pcm_open,
- .close = snd_usb_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usb_hw_params,
- .hw_free = snd_usb_hw_free,
- .prepare = snd_usb_pcm_prepare,
- .trigger = snd_usb_substream_capture_trigger,
- .pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream)
{
const struct snd_pcm_ops *ops;
- if (snd_usb_use_vmalloc)
- ops = stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ ops = stream == SNDRV_PCM_STREAM_PLAYBACK ?
&snd_usb_playback_ops : &snd_usb_capture_ops;
- else
- ops = stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &snd_usb_playback_dev_ops : &snd_usb_capture_dev_ops;
snd_pcm_set_ops(pcm, stream, ops);
}
@@ -1839,7 +1802,10 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs)
struct snd_pcm_substream *s = pcm->streams[subs->direction].substream;
struct device *dev = subs->dev->bus->controller;
- if (!snd_usb_use_vmalloc)
+ if (snd_usb_use_vmalloc)
+ snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ else
snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG,
dev, 64*1024, 512*1024);
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 0bbe1201a6ac..349e1e52996d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -248,8 +248,8 @@ static int create_yamaha_midi_quirk(struct snd_usb_audio *chip,
NULL, USB_MS_MIDI_OUT_JACK);
if (!injd && !outjd)
return -ENODEV;
- if (!(injd && snd_usb_validate_midi_desc(injd)) ||
- !(outjd && snd_usb_validate_midi_desc(outjd)))
+ if ((injd && !snd_usb_validate_midi_desc(injd)) ||
+ (outjd && !snd_usb_validate_midi_desc(outjd)))
return -ENODEV;
if (injd && (injd->bLength < 5 ||
(injd->bJackType != USB_MS_EMBEDDED &&
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index feb30f9c1716..ff3cbf653de8 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -120,5 +120,6 @@ int snd_usb_lock_shutdown(struct snd_usb_audio *chip);
void snd_usb_unlock_shutdown(struct snd_usb_audio *chip);
extern bool snd_usb_use_vmalloc;
+extern bool snd_usb_skip_validation;
#endif /* __USBAUDIO_H */
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 89fa287678fc..25a0939f410a 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -970,13 +970,13 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
if (playback_endpoint) {
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
}
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
usX2Y(card)->pcm_devs++;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index ac8960b6b299..997493e839ee 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -728,11 +728,11 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
sprintf(pcm->name, NAME_ALLCAPS" hwdep Audio");
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
return 0;
diff --git a/sound/usb/validate.c b/sound/usb/validate.c
index a5e584b60dcd..36ae78c3da3d 100644
--- a/sound/usb/validate.c
+++ b/sound/usb/validate.c
@@ -81,9 +81,9 @@ static bool validate_processing_unit(const void *p,
switch (v->protocol) {
case UAC_VERSION_1:
default:
- /* bNrChannels, wChannelConfig, iChannelNames, bControlSize */
- len += 1 + 2 + 1 + 1;
- if (d->bLength < len) /* bControlSize */
+ /* bNrChannels, wChannelConfig, iChannelNames */
+ len += 1 + 2 + 1;
+ if (d->bLength < len + 1) /* bControlSize */
return false;
m = hdr[len];
len += 1 + m + 1; /* bControlSize, bmControls, iProcessing */
@@ -322,11 +322,28 @@ static bool validate_desc(unsigned char *hdr, int protocol,
bool snd_usb_validate_audio_desc(void *p, int protocol)
{
- return validate_desc(p, protocol, audio_validators);
+ unsigned char *c = p;
+ bool valid;
+
+ valid = validate_desc(p, protocol, audio_validators);
+ if (!valid && snd_usb_skip_validation) {
+ print_hex_dump(KERN_ERR, "USB-audio: buggy audio desc: ",
+ DUMP_PREFIX_NONE, 16, 1, c, c[0], true);
+ valid = true;
+ }
+ return valid;
}
bool snd_usb_validate_midi_desc(void *p)
{
- return validate_desc(p, UAC_VERSION_1, midi_validators);
+ unsigned char *c = p;
+ bool valid;
+
+ valid = validate_desc(p, UAC_VERSION_1, midi_validators);
+ if (!valid && snd_usb_skip_validation) {
+ print_hex_dump(KERN_ERR, "USB-audio: buggy midi desc: ",
+ DUMP_PREFIX_NONE, 16, 1, c, c[0], true);
+ valid = true;
+ }
+ return valid;
}
-
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 5fd4e32247a6..cd389d21219a 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1708,10 +1708,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
/* get resources */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Could not get irq resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
res_mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mmio) {
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index a5ea841cc6d2..8e1d0bb46361 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -22,7 +22,7 @@
# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
#endif
-#ifdef CONFIG_X86_INTEL_UMIP
+#ifdef CONFIG_X86_UMIP
# define DISABLE_UMIP 0
#else
# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
diff --git a/tools/arch/x86/include/asm/emulate_prefix.h b/tools/arch/x86/include/asm/emulate_prefix.h
new file mode 100644
index 000000000000..70f5b98a5286
--- /dev/null
+++ b/tools/arch/x86/include/asm/emulate_prefix.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_EMULATE_PREFIX_H
+#define _ASM_X86_EMULATE_PREFIX_H
+
+/*
+ * Virt escape sequences to trigger instruction emulation;
+ * ideally these would decode to 'whole' instruction and not destroy
+ * the instruction stream; sadly this is not true for the 'kvm' one :/
+ */
+
+#define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */
+#define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */
+
+#endif
diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h
index 37a4c390750b..568854b14d0a 100644
--- a/tools/arch/x86/include/asm/insn.h
+++ b/tools/arch/x86/include/asm/insn.h
@@ -45,6 +45,7 @@ struct insn {
struct insn_field immediate2; /* for 64bit imm or seg16 */
};
+ int emulate_prefix_size;
insn_attr_t attr;
unsigned char opnd_bytes;
unsigned char addr_bytes;
@@ -128,6 +129,11 @@ static inline int insn_is_evex(struct insn *insn)
return (insn->vex_prefix.nbytes == 4);
}
+static inline int insn_has_emulate_prefix(struct insn *insn)
+{
+ return !!insn->emulate_prefix_size;
+}
+
/* Ensure this instruction is decoded completely */
static inline int insn_complete(struct insn *insn)
{
diff --git a/tools/arch/x86/include/asm/irq_vectors.h b/tools/arch/x86/include/asm/irq_vectors.h
new file mode 100644
index 000000000000..889f8b1b5b7f
--- /dev/null
+++ b/tools/arch/x86/include/asm/irq_vectors.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_IRQ_VECTORS_H
+#define _ASM_X86_IRQ_VECTORS_H
+
+#include <linux/threads.h>
+/*
+ * Linux IRQ vector layout.
+ *
+ * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
+ * be defined by Linux. They are used as a jump table by the CPU when a
+ * given vector is triggered - by a CPU-external, CPU-internal or
+ * software-triggered event.
+ *
+ * Linux sets the kernel code address each entry jumps to early during
+ * bootup, and never changes them. This is the general layout of the
+ * IDT entries:
+ *
+ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
+ * Vectors 32 ... 127 : device interrupts
+ * Vector 128 : legacy int80 syscall interface
+ * Vectors 129 ... LOCAL_TIMER_VECTOR-1
+ * Vectors LOCAL_TIMER_VECTOR ... 255 : special interrupts
+ *
+ * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
+ *
+ * This file enumerates the exact layout of them:
+ */
+
+#define NMI_VECTOR 0x02
+#define MCE_VECTOR 0x12
+
+/*
+ * IDT vectors usable for external interrupt sources start at 0x20.
+ * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+
+/*
+ * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
+ * triggering cleanup after irq migration. 0x21-0x2f will still be used
+ * for device interrupts.
+ */
+#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+
+#define IA32_SYSCALL_VECTOR 0x80
+
+/*
+ * Vectors 0x30-0x3f are used for ISA interrupts.
+ * round up to the next 16-vector boundary
+ */
+#define ISA_IRQ_VECTOR(irq) (((FIRST_EXTERNAL_VECTOR + 16) & ~15) + irq)
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ */
+
+#define SPURIOUS_APIC_VECTOR 0xff
+/*
+ * Sanity check
+ */
+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+# error SPURIOUS_APIC_VECTOR definition error
+#endif
+
+#define ERROR_APIC_VECTOR 0xfe
+#define RESCHEDULE_VECTOR 0xfd
+#define CALL_FUNCTION_VECTOR 0xfc
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
+#define THERMAL_APIC_VECTOR 0xfa
+#define THRESHOLD_APIC_VECTOR 0xf9
+#define REBOOT_VECTOR 0xf8
+
+/*
+ * Generic system vector for platform specific use
+ */
+#define X86_PLATFORM_IPI_VECTOR 0xf7
+
+/*
+ * IRQ work vector:
+ */
+#define IRQ_WORK_VECTOR 0xf6
+
+#define UV_BAU_MESSAGE 0xf5
+#define DEFERRED_ERROR_VECTOR 0xf4
+
+/* Vector on which hypervisor callbacks will be delivered */
+#define HYPERVISOR_CALLBACK_VECTOR 0xf3
+
+/* Vector for KVM to deliver posted interrupt IPI */
+#ifdef CONFIG_HAVE_KVM
+#define POSTED_INTR_VECTOR 0xf2
+#define POSTED_INTR_WAKEUP_VECTOR 0xf1
+#define POSTED_INTR_NESTED_VECTOR 0xf0
+#endif
+
+#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef
+
+#if IS_ENABLED(CONFIG_HYPERV)
+#define HYPERV_REENLIGHTENMENT_VECTOR 0xee
+#define HYPERV_STIMER0_VECTOR 0xed
+#endif
+
+#define LOCAL_TIMER_VECTOR 0xec
+
+#define NR_VECTORS 256
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#else
+#define FIRST_SYSTEM_VECTOR NR_VECTORS
+#endif
+
+/*
+ * Size the maximum number of interrupts.
+ *
+ * If the irq_desc[] array has a sparse layout, we can size things
+ * generously - it scales up linearly with the maximum number of CPUs,
+ * and the maximum number of IO-APICs, whichever is higher.
+ *
+ * In other cases we size more conservatively, to not create too large
+ * static arrays.
+ */
+
+#define NR_IRQS_LEGACY 16
+
+#define CPU_VECTOR_LIMIT (64 * NR_CPUS)
+#define IO_APIC_VECTOR_LIMIT (32 * MAX_IO_APICS)
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_PCI_MSI)
+#define NR_IRQS \
+ (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \
+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
+#elif defined(CONFIG_X86_IO_APIC)
+#define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
+#elif defined(CONFIG_PCI_MSI)
+#define NR_IRQS (NR_VECTORS + CPU_VECTOR_LIMIT)
+#else
+#define NR_IRQS NR_IRQS_LEGACY
+#endif
+
+#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
new file mode 100644
index 000000000000..20ce682a2540
--- /dev/null
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -0,0 +1,857 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_MSR_INDEX_H
+#define _ASM_X86_MSR_INDEX_H
+
+#include <linux/bits.h>
+
+/*
+ * CPU model specific register (MSR) numbers.
+ *
+ * Do not add new entries to this file unless the definitions are shared
+ * between multiple compilation units.
+ */
+
+/* x86-64 specific MSRs */
+#define MSR_EFER 0xc0000080 /* extended feature register */
+#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
+#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
+#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
+#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
+#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
+#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
+#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
+
+/* EFER bits: */
+#define _EFER_SCE 0 /* SYSCALL/SYSRET */
+#define _EFER_LME 8 /* Long mode enable */
+#define _EFER_LMA 10 /* Long mode active (read-only) */
+#define _EFER_NX 11 /* No execute enable */
+#define _EFER_SVME 12 /* Enable virtualization */
+#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
+#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+
+#define EFER_SCE (1<<_EFER_SCE)
+#define EFER_LME (1<<_EFER_LME)
+#define EFER_LMA (1<<_EFER_LMA)
+#define EFER_NX (1<<_EFER_NX)
+#define EFER_SVME (1<<_EFER_SVME)
+#define EFER_LMSLE (1<<_EFER_LMSLE)
+#define EFER_FFXSR (1<<_EFER_FFXSR)
+
+/* Intel MSRs. Some also available on other CPUs */
+
+#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
+#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
+
+#define MSR_PPIN_CTL 0x0000004e
+#define MSR_PPIN 0x0000004f
+
+#define MSR_IA32_PERFCTR0 0x000000c1
+#define MSR_IA32_PERFCTR1 0x000000c2
+#define MSR_FSB_FREQ 0x000000cd
+#define MSR_PLATFORM_INFO 0x000000ce
+#define MSR_PLATFORM_INFO_CPUID_FAULT_BIT 31
+#define MSR_PLATFORM_INFO_CPUID_FAULT BIT_ULL(MSR_PLATFORM_INFO_CPUID_FAULT_BIT)
+
+#define MSR_IA32_UMWAIT_CONTROL 0xe1
+#define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0)
+#define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1)
+/*
+ * The time field is bit[31:2], but representing a 32bit value with
+ * bit[1:0] zero.
+ */
+#define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U)
+
+#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
+#define NHM_C3_AUTO_DEMOTE (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE (1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
+#define SNB_C3_AUTO_UNDEMOTE (1UL << 27)
+#define SNB_C1_AUTO_UNDEMOTE (1UL << 28)
+
+#define MSR_MTRRcap 0x000000fe
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
+#define ARCH_CAP_SSB_NO BIT(4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
+#define ARCH_CAP_MDS_NO BIT(5) /*
+ * Not susceptible to
+ * Microarchitectural Data
+ * Sampling (MDS) vulnerabilities.
+ */
+
+#define MSR_IA32_FLUSH_CMD 0x0000010b
+#define L1D_FLUSH BIT(0) /*
+ * Writeback and invalidate the
+ * L1 data cache.
+ */
+
+#define MSR_IA32_BBL_CR_CTL 0x00000119
+#define MSR_IA32_BBL_CR_CTL3 0x0000011e
+
+#define MSR_IA32_SYSENTER_CS 0x00000174
+#define MSR_IA32_SYSENTER_ESP 0x00000175
+#define MSR_IA32_SYSENTER_EIP 0x00000176
+
+#define MSR_IA32_MCG_CAP 0x00000179
+#define MSR_IA32_MCG_STATUS 0x0000017a
+#define MSR_IA32_MCG_CTL 0x0000017b
+#define MSR_IA32_MCG_EXT_CTL 0x000004d0
+
+#define MSR_OFFCORE_RSP_0 0x000001a6
+#define MSR_OFFCORE_RSP_1 0x000001a7
+#define MSR_TURBO_RATIO_LIMIT 0x000001ad
+#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
+#define MSR_TURBO_RATIO_LIMIT2 0x000001af
+
+#define MSR_LBR_SELECT 0x000001c8
+#define MSR_LBR_TOS 0x000001c9
+#define MSR_LBR_NHM_FROM 0x00000680
+#define MSR_LBR_NHM_TO 0x000006c0
+#define MSR_LBR_CORE_FROM 0x00000040
+#define MSR_LBR_CORE_TO 0x00000060
+
+#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */
+#define LBR_INFO_MISPRED BIT_ULL(63)
+#define LBR_INFO_IN_TX BIT_ULL(62)
+#define LBR_INFO_ABORT BIT_ULL(61)
+#define LBR_INFO_CYCLES 0xffff
+
+#define MSR_IA32_PEBS_ENABLE 0x000003f1
+#define MSR_PEBS_DATA_CFG 0x000003f2
+#define MSR_IA32_DS_AREA 0x00000600
+#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
+
+#define MSR_IA32_RTIT_CTL 0x00000570
+#define RTIT_CTL_TRACEEN BIT(0)
+#define RTIT_CTL_CYCLEACC BIT(1)
+#define RTIT_CTL_OS BIT(2)
+#define RTIT_CTL_USR BIT(3)
+#define RTIT_CTL_PWR_EVT_EN BIT(4)
+#define RTIT_CTL_FUP_ON_PTW BIT(5)
+#define RTIT_CTL_FABRIC_EN BIT(6)
+#define RTIT_CTL_CR3EN BIT(7)
+#define RTIT_CTL_TOPA BIT(8)
+#define RTIT_CTL_MTC_EN BIT(9)
+#define RTIT_CTL_TSC_EN BIT(10)
+#define RTIT_CTL_DISRETC BIT(11)
+#define RTIT_CTL_PTW_EN BIT(12)
+#define RTIT_CTL_BRANCH_EN BIT(13)
+#define RTIT_CTL_MTC_RANGE_OFFSET 14
+#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
+#define RTIT_CTL_CYC_THRESH_OFFSET 19
+#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
+#define RTIT_CTL_PSB_FREQ_OFFSET 24
+#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
+#define RTIT_CTL_ADDR0_OFFSET 32
+#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET)
+#define RTIT_CTL_ADDR1_OFFSET 36
+#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET)
+#define RTIT_CTL_ADDR2_OFFSET 40
+#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET)
+#define RTIT_CTL_ADDR3_OFFSET 44
+#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET)
+#define MSR_IA32_RTIT_STATUS 0x00000571
+#define RTIT_STATUS_FILTEREN BIT(0)
+#define RTIT_STATUS_CONTEXTEN BIT(1)
+#define RTIT_STATUS_TRIGGEREN BIT(2)
+#define RTIT_STATUS_BUFFOVF BIT(3)
+#define RTIT_STATUS_ERROR BIT(4)
+#define RTIT_STATUS_STOPPED BIT(5)
+#define RTIT_STATUS_BYTECNT_OFFSET 32
+#define RTIT_STATUS_BYTECNT (0x1ffffull << RTIT_STATUS_BYTECNT_OFFSET)
+#define MSR_IA32_RTIT_ADDR0_A 0x00000580
+#define MSR_IA32_RTIT_ADDR0_B 0x00000581
+#define MSR_IA32_RTIT_ADDR1_A 0x00000582
+#define MSR_IA32_RTIT_ADDR1_B 0x00000583
+#define MSR_IA32_RTIT_ADDR2_A 0x00000584
+#define MSR_IA32_RTIT_ADDR2_B 0x00000585
+#define MSR_IA32_RTIT_ADDR3_A 0x00000586
+#define MSR_IA32_RTIT_ADDR3_B 0x00000587
+#define MSR_IA32_RTIT_CR3_MATCH 0x00000572
+#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560
+#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561
+
+#define MSR_MTRRfix64K_00000 0x00000250
+#define MSR_MTRRfix16K_80000 0x00000258
+#define MSR_MTRRfix16K_A0000 0x00000259
+#define MSR_MTRRfix4K_C0000 0x00000268
+#define MSR_MTRRfix4K_C8000 0x00000269
+#define MSR_MTRRfix4K_D0000 0x0000026a
+#define MSR_MTRRfix4K_D8000 0x0000026b
+#define MSR_MTRRfix4K_E0000 0x0000026c
+#define MSR_MTRRfix4K_E8000 0x0000026d
+#define MSR_MTRRfix4K_F0000 0x0000026e
+#define MSR_MTRRfix4K_F8000 0x0000026f
+#define MSR_MTRRdefType 0x000002ff
+
+#define MSR_IA32_CR_PAT 0x00000277
+
+#define MSR_IA32_DEBUGCTLMSR 0x000001d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
+#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
+#define MSR_IA32_LASTINTFROMIP 0x000001dd
+#define MSR_IA32_LASTINTTOIP 0x000001de
+
+/* DEBUGCTLMSR bits (others vary by model): */
+#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
+#define DEBUGCTLMSR_BTF_SHIFT 1
+#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+#define DEBUGCTLMSR_TR (1UL << 6)
+#define DEBUGCTLMSR_BTS (1UL << 7)
+#define DEBUGCTLMSR_BTINT (1UL << 8)
+#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
+#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
+#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12)
+#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
+#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
+
+#define MSR_PEBS_FRONTEND 0x000003f7
+
+#define MSR_IA32_POWER_CTL 0x000001fc
+
+#define MSR_IA32_MC0_CTL 0x00000400
+#define MSR_IA32_MC0_STATUS 0x00000401
+#define MSR_IA32_MC0_ADDR 0x00000402
+#define MSR_IA32_MC0_MISC 0x00000403
+
+/* C-state Residency Counters */
+#define MSR_PKG_C3_RESIDENCY 0x000003f8
+#define MSR_PKG_C6_RESIDENCY 0x000003f9
+#define MSR_ATOM_PKG_C6_RESIDENCY 0x000003fa
+#define MSR_PKG_C7_RESIDENCY 0x000003fa
+#define MSR_CORE_C3_RESIDENCY 0x000003fc
+#define MSR_CORE_C6_RESIDENCY 0x000003fd
+#define MSR_CORE_C7_RESIDENCY 0x000003fe
+#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff
+#define MSR_PKG_C2_RESIDENCY 0x0000060d
+#define MSR_PKG_C8_RESIDENCY 0x00000630
+#define MSR_PKG_C9_RESIDENCY 0x00000631
+#define MSR_PKG_C10_RESIDENCY 0x00000632
+
+/* Interrupt Response Limit */
+#define MSR_PKGC3_IRTL 0x0000060a
+#define MSR_PKGC6_IRTL 0x0000060b
+#define MSR_PKGC7_IRTL 0x0000060c
+#define MSR_PKGC8_IRTL 0x00000633
+#define MSR_PKGC9_IRTL 0x00000634
+#define MSR_PKGC10_IRTL 0x00000635
+
+/* Run Time Average Power Limiting (RAPL) Interface */
+
+#define MSR_RAPL_POWER_UNIT 0x00000606
+
+#define MSR_PKG_POWER_LIMIT 0x00000610
+#define MSR_PKG_ENERGY_STATUS 0x00000611
+#define MSR_PKG_PERF_STATUS 0x00000613
+#define MSR_PKG_POWER_INFO 0x00000614
+
+#define MSR_DRAM_POWER_LIMIT 0x00000618
+#define MSR_DRAM_ENERGY_STATUS 0x00000619
+#define MSR_DRAM_PERF_STATUS 0x0000061b
+#define MSR_DRAM_POWER_INFO 0x0000061c
+
+#define MSR_PP0_POWER_LIMIT 0x00000638
+#define MSR_PP0_ENERGY_STATUS 0x00000639
+#define MSR_PP0_POLICY 0x0000063a
+#define MSR_PP0_PERF_STATUS 0x0000063b
+
+#define MSR_PP1_POWER_LIMIT 0x00000640
+#define MSR_PP1_ENERGY_STATUS 0x00000641
+#define MSR_PP1_POLICY 0x00000642
+
+/* Config TDP MSRs */
+#define MSR_CONFIG_TDP_NOMINAL 0x00000648
+#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
+#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
+#define MSR_CONFIG_TDP_CONTROL 0x0000064B
+#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
+
+#define MSR_PLATFORM_ENERGY_STATUS 0x0000064D
+
+#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
+#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
+#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
+#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
+
+#define MSR_CORE_C1_RES 0x00000660
+#define MSR_MODULE_C6_RES_MS 0x00000664
+
+#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
+#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
+
+#define MSR_ATOM_CORE_RATIOS 0x0000066a
+#define MSR_ATOM_CORE_VIDS 0x0000066b
+#define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c
+#define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d
+
+
+#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
+#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
+#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
+
+/* Hardware P state interface */
+#define MSR_PPERF 0x0000064e
+#define MSR_PERF_LIMIT_REASONS 0x0000064f
+#define MSR_PM_ENABLE 0x00000770
+#define MSR_HWP_CAPABILITIES 0x00000771
+#define MSR_HWP_REQUEST_PKG 0x00000772
+#define MSR_HWP_INTERRUPT 0x00000773
+#define MSR_HWP_REQUEST 0x00000774
+#define MSR_HWP_STATUS 0x00000777
+
+/* CPUID.6.EAX */
+#define HWP_BASE_BIT (1<<7)
+#define HWP_NOTIFICATIONS_BIT (1<<8)
+#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
+#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
+#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
+
+/* IA32_HWP_CAPABILITIES */
+#define HWP_HIGHEST_PERF(x) (((x) >> 0) & 0xff)
+#define HWP_GUARANTEED_PERF(x) (((x) >> 8) & 0xff)
+#define HWP_MOSTEFFICIENT_PERF(x) (((x) >> 16) & 0xff)
+#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
+
+/* IA32_HWP_REQUEST */
+#define HWP_MIN_PERF(x) (x & 0xff)
+#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
+#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
+#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24)
+#define HWP_EPP_PERFORMANCE 0x00
+#define HWP_EPP_BALANCE_PERFORMANCE 0x80
+#define HWP_EPP_BALANCE_POWERSAVE 0xC0
+#define HWP_EPP_POWERSAVE 0xFF
+#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42)
+
+/* IA32_HWP_STATUS */
+#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
+#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
+
+/* IA32_HWP_INTERRUPT */
+#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
+#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
+
+#define MSR_AMD64_MC0_MASK 0xc0010044
+
+#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
+#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
+#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
+#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
+
+#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
+
+/* These are consecutive and not in the normal 4er MCE bank block */
+#define MSR_IA32_MC0_CTL2 0x00000280
+#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
+
+#define MSR_P6_PERFCTR0 0x000000c1
+#define MSR_P6_PERFCTR1 0x000000c2
+#define MSR_P6_EVNTSEL0 0x00000186
+#define MSR_P6_EVNTSEL1 0x00000187
+
+#define MSR_KNC_PERFCTR0 0x00000020
+#define MSR_KNC_PERFCTR1 0x00000021
+#define MSR_KNC_EVNTSEL0 0x00000028
+#define MSR_KNC_EVNTSEL1 0x00000029
+
+/* Alternative perfctr range with full access. */
+#define MSR_IA32_PMC0 0x000004c1
+
+/* Auto-reload via MSR instead of DS area */
+#define MSR_RELOAD_PMC0 0x000014c1
+#define MSR_RELOAD_FIXED_CTR0 0x00001309
+
+/*
+ * AMD64 MSRs. Not complete. See the architecture manual for a more
+ * complete list.
+ */
+#define MSR_AMD64_PATCH_LEVEL 0x0000008b
+#define MSR_AMD64_TSC_RATIO 0xc0000104
+#define MSR_AMD64_NB_CFG 0xc001001f
+#define MSR_AMD64_CPUID_FN_1 0xc0011004
+#define MSR_AMD64_PATCH_LOADER 0xc0010020
+#define MSR_AMD_PERF_CTL 0xc0010062
+#define MSR_AMD_PERF_STATUS 0xc0010063
+#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
+#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD64_LS_CFG 0xc0011020
+#define MSR_AMD64_DC_CFG 0xc0011022
+#define MSR_AMD64_BU_CFG2 0xc001102a
+#define MSR_AMD64_IBSFETCHCTL 0xc0011030
+#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
+#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
+#define MSR_AMD64_IBSFETCH_REG_COUNT 3
+#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
+#define MSR_AMD64_IBSOPCTL 0xc0011033
+#define MSR_AMD64_IBSOPRIP 0xc0011034
+#define MSR_AMD64_IBSOPDATA 0xc0011035
+#define MSR_AMD64_IBSOPDATA2 0xc0011036
+#define MSR_AMD64_IBSOPDATA3 0xc0011037
+#define MSR_AMD64_IBSDCLINAD 0xc0011038
+#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
+#define MSR_AMD64_IBSOP_REG_COUNT 7
+#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
+#define MSR_AMD64_IBSCTL 0xc001103a
+#define MSR_AMD64_IBSBRTARGET 0xc001103b
+#define MSR_AMD64_IBSOPDATA4 0xc001103d
+#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SEV 0xc0010131
+#define MSR_AMD64_SEV_ENABLED_BIT 0
+#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
+/* Fam 17h MSRs */
+#define MSR_F17H_IRPERF 0xc00000e9
+
+/* Fam 16h MSRs */
+#define MSR_F16H_L2I_PERF_CTL 0xc0010230
+#define MSR_F16H_L2I_PERF_CTR 0xc0010231
+#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
+#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
+#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
+#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
+
+/* Fam 15h MSRs */
+#define MSR_F15H_PERF_CTL 0xc0010200
+#define MSR_F15H_PERF_CTL0 MSR_F15H_PERF_CTL
+#define MSR_F15H_PERF_CTL1 (MSR_F15H_PERF_CTL + 2)
+#define MSR_F15H_PERF_CTL2 (MSR_F15H_PERF_CTL + 4)
+#define MSR_F15H_PERF_CTL3 (MSR_F15H_PERF_CTL + 6)
+#define MSR_F15H_PERF_CTL4 (MSR_F15H_PERF_CTL + 8)
+#define MSR_F15H_PERF_CTL5 (MSR_F15H_PERF_CTL + 10)
+
+#define MSR_F15H_PERF_CTR 0xc0010201
+#define MSR_F15H_PERF_CTR0 MSR_F15H_PERF_CTR
+#define MSR_F15H_PERF_CTR1 (MSR_F15H_PERF_CTR + 2)
+#define MSR_F15H_PERF_CTR2 (MSR_F15H_PERF_CTR + 4)
+#define MSR_F15H_PERF_CTR3 (MSR_F15H_PERF_CTR + 6)
+#define MSR_F15H_PERF_CTR4 (MSR_F15H_PERF_CTR + 8)
+#define MSR_F15H_PERF_CTR5 (MSR_F15H_PERF_CTR + 10)
+
+#define MSR_F15H_NB_PERF_CTL 0xc0010240
+#define MSR_F15H_NB_PERF_CTR 0xc0010241
+#define MSR_F15H_PTSC 0xc0010280
+#define MSR_F15H_IC_CFG 0xc0011021
+#define MSR_F15H_EX_CFG 0xc001102c
+
+/* Fam 10h MSRs */
+#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
+#define FAM10H_MMIO_CONF_ENABLE (1<<0)
+#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
+#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
+#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
+#define FAM10H_MMIO_CONF_BASE_SHIFT 20
+#define MSR_FAM10H_NODE_ID 0xc001100c
+#define MSR_F10H_DECFG 0xc0011029
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+
+/* K8 MSRs */
+#define MSR_K8_TOP_MEM1 0xc001001a
+#define MSR_K8_TOP_MEM2 0xc001001d
+#define MSR_K8_SYSCFG 0xc0010010
+#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_K8_INT_PENDING_MSG 0xc0010055
+/* C1E active bits in int pending message */
+#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
+#define MSR_K8_TSEG_ADDR 0xc0010112
+#define MSR_K8_TSEG_MASK 0xc0010113
+#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
+#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
+#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
+
+/* K7 MSRs */
+#define MSR_K7_EVNTSEL0 0xc0010000
+#define MSR_K7_PERFCTR0 0xc0010004
+#define MSR_K7_EVNTSEL1 0xc0010001
+#define MSR_K7_PERFCTR1 0xc0010005
+#define MSR_K7_EVNTSEL2 0xc0010002
+#define MSR_K7_PERFCTR2 0xc0010006
+#define MSR_K7_EVNTSEL3 0xc0010003
+#define MSR_K7_PERFCTR3 0xc0010007
+#define MSR_K7_CLK_CTL 0xc001001b
+#define MSR_K7_HWCR 0xc0010015
+#define MSR_K7_HWCR_SMMLOCK_BIT 0
+#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
+#define MSR_K7_FID_VID_CTL 0xc0010041
+#define MSR_K7_FID_VID_STATUS 0xc0010042
+
+/* K6 MSRs */
+#define MSR_K6_WHCR 0xc0000082
+#define MSR_K6_UWCCR 0xc0000085
+#define MSR_K6_EPMR 0xc0000086
+#define MSR_K6_PSOR 0xc0000087
+#define MSR_K6_PFIR 0xc0000088
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x00000107
+#define MSR_IDT_FCR2 0x00000108
+#define MSR_IDT_FCR3 0x00000109
+#define MSR_IDT_FCR4 0x0000010a
+
+#define MSR_IDT_MCR0 0x00000110
+#define MSR_IDT_MCR1 0x00000111
+#define MSR_IDT_MCR2 0x00000112
+#define MSR_IDT_MCR3 0x00000113
+#define MSR_IDT_MCR4 0x00000114
+#define MSR_IDT_MCR5 0x00000115
+#define MSR_IDT_MCR6 0x00000116
+#define MSR_IDT_MCR7 0x00000117
+#define MSR_IDT_MCR_CTRL 0x00000120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x00001107
+#define MSR_VIA_LONGHAUL 0x0000110a
+#define MSR_VIA_RNG 0x0000110b
+#define MSR_VIA_BCR2 0x00001147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0x00000000
+#define MSR_IA32_P5_MC_TYPE 0x00000001
+#define MSR_IA32_TSC 0x00000010
+#define MSR_IA32_PLATFORM_ID 0x00000017
+#define MSR_IA32_EBL_CR_POWERON 0x0000002a
+#define MSR_EBC_FREQUENCY_ID 0x0000002c
+#define MSR_SMI_COUNT 0x00000034
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+#define MSR_IA32_TSC_ADJUST 0x0000003b
+#define MSR_IA32_BNDCFGS 0x00000d90
+
+#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+
+#define MSR_IA32_XSS 0x00000da0
+
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+#define FEATURE_CONTROL_LMCE (1<<20)
+
+#define MSR_IA32_APICBASE 0x0000001b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_TSCDEADLINE 0x000006e0
+
+#define MSR_IA32_UCODE_WRITE 0x00000079
+#define MSR_IA32_UCODE_REV 0x0000008b
+
+#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
+#define MSR_IA32_SMBASE 0x0000009e
+
+#define MSR_IA32_PERF_STATUS 0x00000198
+#define MSR_IA32_PERF_CTL 0x00000199
+#define INTEL_PERF_CTL_MASK 0xffff
+
+#define MSR_IA32_MPERF 0x000000e7
+#define MSR_IA32_APERF 0x000000e8
+
+#define MSR_IA32_THERM_CONTROL 0x0000019a
+#define MSR_IA32_THERM_INTERRUPT 0x0000019b
+
+#define THERM_INT_HIGH_ENABLE (1 << 0)
+#define THERM_INT_LOW_ENABLE (1 << 1)
+#define THERM_INT_PLN_ENABLE (1 << 24)
+
+#define MSR_IA32_THERM_STATUS 0x0000019c
+
+#define THERM_STATUS_PROCHOT (1 << 0)
+#define THERM_STATUS_POWER_LIMIT (1 << 10)
+
+#define MSR_THERM2_CTL 0x0000019d
+
+#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
+
+#define MSR_IA32_MISC_ENABLE 0x000001a0
+
+#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
+
+#define MSR_MISC_FEATURE_CONTROL 0x000001a4
+#define MSR_MISC_PWR_MGMT 0x000001aa
+
+#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
+#define ENERGY_PERF_BIAS_PERFORMANCE 0
+#define ENERGY_PERF_BIAS_BALANCE_PERFORMANCE 4
+#define ENERGY_PERF_BIAS_NORMAL 6
+#define ENERGY_PERF_BIAS_BALANCE_POWERSAVE 8
+#define ENERGY_PERF_BIAS_POWERSAVE 15
+
+#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
+
+#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
+#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
+
+#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
+
+#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
+#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
+#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
+
+/* Thermal Thresholds Support */
+#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
+#define THERM_SHIFT_THRESHOLD0 8
+#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
+#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
+#define THERM_SHIFT_THRESHOLD1 16
+#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
+#define THERM_STATUS_THRESHOLD0 (1 << 6)
+#define THERM_LOG_THRESHOLD0 (1 << 7)
+#define THERM_STATUS_THRESHOLD1 (1 << 8)
+#define THERM_LOG_THRESHOLD1 (1 << 9)
+
+/* MISC_ENABLE bits: architectural */
+#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0
+#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
+#define MSR_IA32_MISC_ENABLE_TCC_BIT 1
+#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
+#define MSR_IA32_MISC_ENABLE_EMON_BIT 7
+#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
+#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
+#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
+
+/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
+#define MSR_IA32_MISC_ENABLE_TM1_BIT 3
+#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_BIT 10
+#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
+#define MSR_IA32_MISC_ENABLE_TM2_BIT 13
+#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
+
+/* MISC_FEATURES_ENABLES non-architectural features */
+#define MSR_MISC_FEATURES_ENABLES 0x00000140
+
+#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT 0
+#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT BIT_ULL(MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT)
+#define MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT 1
+
+#define MSR_IA32_TSC_DEADLINE 0x000006E0
+
+
+#define MSR_TSX_FORCE_ABORT 0x0000010F
+
+#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
+#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX 0x00000180
+#define MSR_IA32_MCG_EBX 0x00000181
+#define MSR_IA32_MCG_ECX 0x00000182
+#define MSR_IA32_MCG_EDX 0x00000183
+#define MSR_IA32_MCG_ESI 0x00000184
+#define MSR_IA32_MCG_EDI 0x00000185
+#define MSR_IA32_MCG_EBP 0x00000186
+#define MSR_IA32_MCG_ESP 0x00000187
+#define MSR_IA32_MCG_EFLAGS 0x00000188
+#define MSR_IA32_MCG_EIP 0x00000189
+#define MSR_IA32_MCG_RESERVED 0x0000018a
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0 0x00000300
+#define MSR_P4_BPU_PERFCTR1 0x00000301
+#define MSR_P4_BPU_PERFCTR2 0x00000302
+#define MSR_P4_BPU_PERFCTR3 0x00000303
+#define MSR_P4_MS_PERFCTR0 0x00000304
+#define MSR_P4_MS_PERFCTR1 0x00000305
+#define MSR_P4_MS_PERFCTR2 0x00000306
+#define MSR_P4_MS_PERFCTR3 0x00000307
+#define MSR_P4_FLAME_PERFCTR0 0x00000308
+#define MSR_P4_FLAME_PERFCTR1 0x00000309
+#define MSR_P4_FLAME_PERFCTR2 0x0000030a
+#define MSR_P4_FLAME_PERFCTR3 0x0000030b
+#define MSR_P4_IQ_PERFCTR0 0x0000030c
+#define MSR_P4_IQ_PERFCTR1 0x0000030d
+#define MSR_P4_IQ_PERFCTR2 0x0000030e
+#define MSR_P4_IQ_PERFCTR3 0x0000030f
+#define MSR_P4_IQ_PERFCTR4 0x00000310
+#define MSR_P4_IQ_PERFCTR5 0x00000311
+#define MSR_P4_BPU_CCCR0 0x00000360
+#define MSR_P4_BPU_CCCR1 0x00000361
+#define MSR_P4_BPU_CCCR2 0x00000362
+#define MSR_P4_BPU_CCCR3 0x00000363
+#define MSR_P4_MS_CCCR0 0x00000364
+#define MSR_P4_MS_CCCR1 0x00000365
+#define MSR_P4_MS_CCCR2 0x00000366
+#define MSR_P4_MS_CCCR3 0x00000367
+#define MSR_P4_FLAME_CCCR0 0x00000368
+#define MSR_P4_FLAME_CCCR1 0x00000369
+#define MSR_P4_FLAME_CCCR2 0x0000036a
+#define MSR_P4_FLAME_CCCR3 0x0000036b
+#define MSR_P4_IQ_CCCR0 0x0000036c
+#define MSR_P4_IQ_CCCR1 0x0000036d
+#define MSR_P4_IQ_CCCR2 0x0000036e
+#define MSR_P4_IQ_CCCR3 0x0000036f
+#define MSR_P4_IQ_CCCR4 0x00000370
+#define MSR_P4_IQ_CCCR5 0x00000371
+#define MSR_P4_ALF_ESCR0 0x000003ca
+#define MSR_P4_ALF_ESCR1 0x000003cb
+#define MSR_P4_BPU_ESCR0 0x000003b2
+#define MSR_P4_BPU_ESCR1 0x000003b3
+#define MSR_P4_BSU_ESCR0 0x000003a0
+#define MSR_P4_BSU_ESCR1 0x000003a1
+#define MSR_P4_CRU_ESCR0 0x000003b8
+#define MSR_P4_CRU_ESCR1 0x000003b9
+#define MSR_P4_CRU_ESCR2 0x000003cc
+#define MSR_P4_CRU_ESCR3 0x000003cd
+#define MSR_P4_CRU_ESCR4 0x000003e0
+#define MSR_P4_CRU_ESCR5 0x000003e1
+#define MSR_P4_DAC_ESCR0 0x000003a8
+#define MSR_P4_DAC_ESCR1 0x000003a9
+#define MSR_P4_FIRM_ESCR0 0x000003a4
+#define MSR_P4_FIRM_ESCR1 0x000003a5
+#define MSR_P4_FLAME_ESCR0 0x000003a6
+#define MSR_P4_FLAME_ESCR1 0x000003a7
+#define MSR_P4_FSB_ESCR0 0x000003a2
+#define MSR_P4_FSB_ESCR1 0x000003a3
+#define MSR_P4_IQ_ESCR0 0x000003ba
+#define MSR_P4_IQ_ESCR1 0x000003bb
+#define MSR_P4_IS_ESCR0 0x000003b4
+#define MSR_P4_IS_ESCR1 0x000003b5
+#define MSR_P4_ITLB_ESCR0 0x000003b6
+#define MSR_P4_ITLB_ESCR1 0x000003b7
+#define MSR_P4_IX_ESCR0 0x000003c8
+#define MSR_P4_IX_ESCR1 0x000003c9
+#define MSR_P4_MOB_ESCR0 0x000003aa
+#define MSR_P4_MOB_ESCR1 0x000003ab
+#define MSR_P4_MS_ESCR0 0x000003c0
+#define MSR_P4_MS_ESCR1 0x000003c1
+#define MSR_P4_PMH_ESCR0 0x000003ac
+#define MSR_P4_PMH_ESCR1 0x000003ad
+#define MSR_P4_RAT_ESCR0 0x000003bc
+#define MSR_P4_RAT_ESCR1 0x000003bd
+#define MSR_P4_SAAT_ESCR0 0x000003ae
+#define MSR_P4_SAAT_ESCR1 0x000003af
+#define MSR_P4_SSU_ESCR0 0x000003be
+#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
+
+#define MSR_P4_TBPU_ESCR0 0x000003c2
+#define MSR_P4_TBPU_ESCR1 0x000003c3
+#define MSR_P4_TC_ESCR0 0x000003c4
+#define MSR_P4_TC_ESCR1 0x000003c5
+#define MSR_P4_U2L_ESCR0 0x000003b0
+#define MSR_P4_U2L_ESCR1 0x000003b1
+
+#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
+
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
+#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
+#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+
+/* PERF_GLOBAL_OVF_CTL bits */
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT 62
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT)
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT 63
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT)
+
+/* Geode defined MSRs */
+#define MSR_GEODE_BUSCONT_CONF0 0x00001900
+
+/* Intel VT MSRs */
+#define MSR_IA32_VMX_BASIC 0x00000480
+#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
+#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
+#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
+#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
+#define MSR_IA32_VMX_MISC 0x00000485
+#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
+#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
+#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
+#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
+#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
+#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
+#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
+#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
+#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
+#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
+#define MSR_IA32_VMX_VMFUNC 0x00000491
+
+/* VMX_BASIC bits and bitmasks */
+#define VMX_BASIC_VMCS_SIZE_SHIFT 32
+#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
+#define VMX_BASIC_64 0x0001000000000000LLU
+#define VMX_BASIC_MEM_TYPE_SHIFT 50
+#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
+#define VMX_BASIC_MEM_TYPE_WB 6LLU
+#define VMX_BASIC_INOUT 0x0040000000000000LLU
+
+/* MSR_IA32_VMX_MISC bits */
+#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
+#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
+#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
+/* AMD-V MSRs */
+
+#define MSR_VM_CR 0xc0010114
+#define MSR_VM_IGNNE 0xc0010115
+#define MSR_VM_HSAVE_PA 0xc0010117
+
+#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index 79e048f1d902..0151dfc6da61 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -13,6 +13,8 @@
#include "../include/asm/inat.h"
#include "../include/asm/insn.h"
+#include "../include/asm/emulate_prefix.h"
+
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
@@ -58,6 +60,36 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
insn->addr_bytes = 4;
}
+static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
+static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
+
+static int __insn_get_emulate_prefix(struct insn *insn,
+ const insn_byte_t *prefix, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
+ goto err_out;
+ }
+
+ insn->emulate_prefix_size = len;
+ insn->next_byte += len;
+
+ return 1;
+
+err_out:
+ return 0;
+}
+
+static void insn_get_emulate_prefix(struct insn *insn)
+{
+ if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
+ return;
+
+ __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
+}
+
/**
* insn_get_prefixes - scan x86 instruction prefix bytes
* @insn: &struct insn containing instruction
@@ -76,6 +108,8 @@ void insn_get_prefixes(struct insn *insn)
if (prefixes->got)
return;
+ insn_get_emulate_prefix(insn);
+
nb = 0;
lb = 0;
b = peek_next(insn_byte_t, insn);
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index e0b85930dd77..0a0e9112f284 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -333,7 +333,7 @@ AVXcode: 1
06: CLTS
07: SYSRET (o64)
08: INVD
-09: WBINVD
+09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
@@ -364,7 +364,7 @@ AVXcode: 1
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
+1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
@@ -792,6 +792,8 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -943,9 +945,9 @@ GrpTable: Grp6
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
@@ -1020,7 +1022,7 @@ GrpTable: Grp15
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
@@ -1051,6 +1053,10 @@ GrpTable: Grp19
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
+GrpTable: Grp20
+0: cldemote Mb
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk
index b02a36b2c14f..a42015b305f4 100644
--- a/tools/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk
@@ -69,7 +69,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod)
}
-/^[0-9a-f]+\:/ {
+/^[0-9a-f]+:/ {
if (NR == 1)
next
# get index
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 5d1995fd369c..5535650800ab 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -16,7 +16,13 @@ CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
# isn't set and when invoked from selftests build, where srctree
# is set to ".". building_out_of_srctree is undefined for in srctree
# builds
+ifeq ($(srctree),)
+update_srctree := 1
+endif
ifndef building_out_of_srctree
+update_srctree := 1
+endif
+ifeq ($(update_srctree),1)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y
index 56ba1de50784..8d48e896be50 100644
--- a/tools/bpf/bpf_exp.y
+++ b/tools/bpf/bpf_exp.y
@@ -545,6 +545,16 @@ static void bpf_reduce_k_jumps(void)
}
}
+static uint8_t bpf_encode_jt_jf_offset(int off, int i)
+{
+ int delta = off - i - 1;
+
+ if (delta < 0 || delta > 255)
+ fprintf(stderr, "warning: insn #%d jumps to insn #%d, "
+ "which is out of range\n", i, off);
+ return (uint8_t) delta;
+}
+
static void bpf_reduce_jt_jumps(void)
{
int i;
@@ -552,7 +562,7 @@ static void bpf_reduce_jt_jumps(void)
for (i = 0; i < curr_instr; i++) {
if (labels_jt[i]) {
int off = bpf_find_insns_offset(labels_jt[i]);
- out[i].jt = (uint8_t) (off - i -1);
+ out[i].jt = bpf_encode_jt_jf_offset(off, i);
}
}
}
@@ -564,7 +574,7 @@ static void bpf_reduce_jf_jumps(void)
for (i = 0; i < curr_instr; i++) {
if (labels_jf[i]) {
int off = bpf_find_insns_offset(labels_jf[i]);
- out[i].jf = (uint8_t) (off - i - 1);
+ out[i].jf = bpf_encode_jt_jf_offset(off, i);
}
}
}
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 9a9376d1d3df..e5bc97b71ceb 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -12,6 +12,9 @@
#include <libbpf.h>
#include <linux/btf.h>
#include <linux/hashtable.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "btf.h"
#include "json_writer.h"
@@ -388,6 +391,54 @@ done:
return err;
}
+static struct btf *btf__parse_raw(const char *file)
+{
+ struct btf *btf;
+ struct stat st;
+ __u8 *buf;
+ FILE *f;
+
+ if (stat(file, &st))
+ return NULL;
+
+ f = fopen(file, "rb");
+ if (!f)
+ return NULL;
+
+ buf = malloc(st.st_size);
+ if (!buf) {
+ btf = ERR_PTR(-ENOMEM);
+ goto exit_close;
+ }
+
+ if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) {
+ btf = ERR_PTR(-EINVAL);
+ goto exit_free;
+ }
+
+ btf = btf__new(buf, st.st_size);
+
+exit_free:
+ free(buf);
+exit_close:
+ fclose(f);
+ return btf;
+}
+
+static bool is_btf_raw(const char *file)
+{
+ __u16 magic = 0;
+ int fd, nb_read;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ return false;
+
+ nb_read = read(fd, &magic, sizeof(magic));
+ close(fd);
+ return nb_read == sizeof(magic) && magic == BTF_MAGIC;
+}
+
static int do_dump(int argc, char **argv)
{
struct btf *btf = NULL;
@@ -465,7 +516,11 @@ static int do_dump(int argc, char **argv)
}
NEXT_ARG();
} else if (is_prefix(src, "file")) {
- btf = btf__parse_elf(*argv, NULL);
+ if (is_btf_raw(*argv))
+ btf = btf__parse_raw(*argv);
+ else
+ btf = btf__parse_elf(*argv, NULL);
+
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
btf = NULL;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 93d008687020..4764581ff9ea 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -27,7 +27,7 @@ bool json_output;
bool show_pinned;
bool block_mount;
bool verifier_logs;
-int bpf_flags;
+bool relaxed_maps;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@@ -396,7 +396,7 @@ int main(int argc, char **argv)
show_pinned = true;
break;
case 'm':
- bpf_flags = MAPS_RELAX_COMPAT;
+ relaxed_maps = true;
break;
case 'n':
block_mount = true;
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index af9ad56c303a..2899095f8254 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -94,7 +94,7 @@ extern bool json_output;
extern bool show_pinned;
extern bool block_mount;
extern bool verifier_logs;
-extern int bpf_flags;
+extern bool relaxed_maps;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 43fdbbfe41bb..4535c863d2cd 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1091,10 +1091,11 @@ free_data_in:
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
+ enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+ .relaxed_maps = relaxed_maps,
+ );
struct bpf_object_load_attr load_attr = { 0 };
- struct bpf_object_open_attr open_attr = {
- .prog_type = BPF_PROG_TYPE_UNSPEC,
- };
enum bpf_attach_type expected_attach_type;
struct map_replace *map_replace = NULL;
struct bpf_program *prog = NULL, *pos;
@@ -1105,11 +1106,13 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
const char *pinfile;
unsigned int i, j;
__u32 ifindex = 0;
+ const char *file;
int idx, err;
+
if (!REQ_ARGS(2))
return -1;
- open_attr.file = GET_ARG();
+ file = GET_ARG();
pinfile = GET_ARG();
while (argc) {
@@ -1118,7 +1121,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
NEXT_ARG();
- if (open_attr.prog_type != BPF_PROG_TYPE_UNSPEC) {
+ if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
p_err("program type already specified");
goto err_free_reuse_maps;
}
@@ -1135,8 +1138,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
strcat(type, *argv);
strcat(type, "/");
- err = libbpf_prog_type_by_name(type,
- &open_attr.prog_type,
+ err = libbpf_prog_type_by_name(type, &common_prog_type,
&expected_attach_type);
free(type);
if (err < 0)
@@ -1224,16 +1226,16 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
- obj = __bpf_object__open_xattr(&open_attr, bpf_flags);
+ obj = bpf_object__open_file(file, &open_opts);
if (IS_ERR_OR_NULL(obj)) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
bpf_object__for_each_program(pos, obj) {
- enum bpf_prog_type prog_type = open_attr.prog_type;
+ enum bpf_prog_type prog_type = common_prog_type;
- if (open_attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
+ if (prog_type == BPF_PROG_TYPE_UNSPEC) {
const char *sec_name = bpf_program__title(pos, false);
err = libbpf_prog_type_by_name(sec_name, &prog_type,
diff --git a/tools/gpio/Build b/tools/gpio/Build
index 620c1937d957..4141f35837db 100644
--- a/tools/gpio/Build
+++ b/tools/gpio/Build
@@ -1,3 +1,4 @@
+gpio-utils-y += gpio-utils.o
lsgpio-y += lsgpio.o gpio-utils.o
gpio-hammer-y += gpio-hammer.o gpio-utils.o
gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 1178d302757e..6080de58861f 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
prepare: $(OUTPUT)include/linux/gpio.h
+GPIO_UTILS_IN := $(output)gpio-utils-in.o
+$(GPIO_UTILS_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=gpio-utils
+
#
# lsgpio
#
LSGPIO_IN := $(OUTPUT)lsgpio-in.o
-$(LSGPIO_IN): prepare FORCE
+$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=lsgpio
$(OUTPUT)lsgpio: $(LSGPIO_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
# gpio-hammer
#
GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
-$(GPIO_HAMMER_IN): prepare FORCE
+$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-hammer
$(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
# gpio-event-mon
#
GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
-$(GPIO_EVENT_MON_IN): prepare FORCE
+$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-event-mon
$(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 77c6be96d676..dbbcf0b02970 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -173,6 +173,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_PROG_TYPE_TRACING,
};
enum bpf_attach_type {
@@ -199,6 +200,9 @@ enum bpf_attach_type {
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
+ BPF_TRACE_FENTRY,
+ BPF_TRACE_FEXIT,
__MAX_BPF_ATTACH_TYPE
};
@@ -344,6 +348,9 @@ enum bpf_attach_type {
/* Clone map from listener for newly accepted socket */
#define BPF_F_CLONE (1U << 9)
+/* Enable memory-mapping BPF map */
+#define BPF_F_MMAPABLE (1U << 10)
+
/* flags for BPF_PROG_QUERY */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
@@ -420,6 +427,8 @@ union bpf_attr {
__u32 line_info_rec_size; /* userspace bpf_line_info size */
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
+ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ __u32 attach_prog_fd; /* 0 to attach to vmlinux */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -560,10 +569,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
- * address *src* and store the data in *dst*.
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
+ * instead.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -794,7 +806,7 @@ union bpf_attr {
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
- * int bpf_get_current_comm(char *buf, u32 size_of_buf)
+ * int bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@@ -1023,7 +1035,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
- * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1068,7 +1080,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
+ * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@@ -1085,7 +1097,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
+ * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@@ -1154,7 +1166,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1172,7 +1184,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@@ -1425,45 +1437,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
- * Copy a NUL terminated string from an unsafe address
- * *unsafe_ptr* to *dst*. The *size* should include the
- * terminating NUL byte. In case the string length is smaller than
- * *size*, the target is not padded with further NUL bytes. If the
- * string length is larger than *size*, just *size*-1 bytes are
- * copied and the last byte is set to NUL.
- *
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
- *
- * ::
- *
- * SEC("kprobe/sys_open")
- * void bpf_sys_open(struct pt_regs *ctx)
- * {
- * char buf[PATHLEN]; // PATHLEN is defined to 256
- * int res = bpf_probe_read_str(buf, sizeof(buf),
- * ctx->di);
- *
- * // Consume buf, for example push it to
- * // userspace via bpf_perf_event_output(); we
- * // can use res (the string length) as event
- * // size, after checking its boundaries.
- * }
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * more details.
*
- * In comparison, using **bpf_probe_read()** helper here instead
- * to read the string would require to estimate the length at
- * compile time, and would often result in copying more memory
- * than necessary.
- *
- * Another useful use case is when parsing individual process
- * arguments or individual environment variables navigating
- * *current*\ **->mm->arg_start** and *current*\
- * **->mm->env_start**: using this helper and the return value,
- * one can quickly iterate at the right offset of the memory area.
+ * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
+ * instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
@@ -1511,7 +1492,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1595,7 +1576,7 @@ union bpf_attr {
* Return
* **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
*
- * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1715,7 +1696,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1947,7 +1928,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
+ * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@@ -1980,7 +1961,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -2033,7 +2014,7 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
- * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
+ * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@@ -2392,7 +2373,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
+ * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@@ -2408,9 +2389,9 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
- * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
* an allocation and copy are required due to a full ring buffer.
* However, the helper will try to avoid doing the allocation
@@ -2505,7 +2486,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
- * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2750,6 +2731,96 @@ union bpf_attr {
* **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
*
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6
+ *
+ * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct sk_buff.
+ *
+ * This helper is similar to **bpf_perf_event_output**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user()** helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * Return
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2862,7 +2933,12 @@ union bpf_attr {
FN(sk_storage_get), \
FN(sk_storage_delete), \
FN(send_signal), \
- FN(tcp_gen_syncookie),
+ FN(tcp_gen_syncookie), \
+ FN(skb_output), \
+ FN(probe_read_user), \
+ FN(probe_read_kernel), \
+ FN(probe_read_user_str), \
+ FN(probe_read_kernel_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 1d338357df8a..1f97b33c840e 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -58,7 +58,7 @@
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
* used to clear any hints previously set.
*/
-#define RWF_WRITE_LIFE_NOT_SET 0
+#define RWH_WRITE_LIFE_NOT_SET 0
#define RWH_WRITE_LIFE_NONE 1
#define RWH_WRITE_LIFE_SHORT 2
#define RWH_WRITE_LIFE_MEDIUM 3
@@ -66,6 +66,13 @@
#define RWH_WRITE_LIFE_EXTREME 5
/*
+ * The originally introduced spelling is remained from the first
+ * versions of the patch set that introduced the feature, see commit
+ * v4.13-rc1~212^2~51.
+ */
+#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
+
+/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 4a8c02cafa9a..8aec8769d944 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -167,6 +167,8 @@ enum {
IFLA_NEW_IFINDEX,
IFLA_MIN_MTU,
IFLA_MAX_MTU,
+ IFLA_PROP_LIST,
+ IFLA_ALT_IFNAME, /* Alternative ifname */
__IFLA_MAX
};
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index bb7b271397a6..377d794d3105 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -141,8 +141,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
+ PERF_SAMPLE_AUX = 1U << 20,
- PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
@@ -300,6 +301,7 @@ enum perf_event_read_format {
/* add: sample_stack_user */
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
+#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -424,7 +426,9 @@ struct perf_event_attr {
*/
__u32 aux_watermark;
__u16 sample_max_stack;
- __u16 __reserved_2; /* align to __u64 */
+ __u16 __reserved_2;
+ __u32 aux_sample_size;
+ __u32 __reserved_3;
};
/*
@@ -864,6 +868,8 @@ enum perf_event_type {
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
+ * { u64 size;
+ * char data[size]; } && PERF_SAMPLE_AUX
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/tools/lib/api/debug-internal.h b/tools/lib/api/debug-internal.h
index 80c783497d25..5a5820c11db8 100644
--- a/tools/lib/api/debug-internal.h
+++ b/tools/lib/api/debug-internal.h
@@ -10,11 +10,11 @@ do { \
(func)("libapi: " fmt, ##__VA_ARGS__); \
} while (0)
-extern libapi_print_fn_t __pr_warning;
+extern libapi_print_fn_t __pr_warn;
extern libapi_print_fn_t __pr_info;
extern libapi_print_fn_t __pr_debug;
-#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
+#define pr_warn(fmt, ...) __pr(__pr_warn, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
diff --git a/tools/lib/api/debug.c b/tools/lib/api/debug.c
index 69b1ba3d1ee3..7708f0558e8c 100644
--- a/tools/lib/api/debug.c
+++ b/tools/lib/api/debug.c
@@ -15,7 +15,7 @@ static int __base_pr(const char *format, ...)
return err;
}
-libapi_print_fn_t __pr_warning = __base_pr;
+libapi_print_fn_t __pr_warn = __base_pr;
libapi_print_fn_t __pr_info = __base_pr;
libapi_print_fn_t __pr_debug;
@@ -23,7 +23,7 @@ void libapi_set_print(libapi_print_fn_t warn,
libapi_print_fn_t info,
libapi_print_fn_t debug)
{
- __pr_warning = warn;
+ __pr_warn = warn;
__pr_info = info;
__pr_debug = debug;
}
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 7aba8243a0e7..11b3885e833e 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -381,8 +381,8 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep)
n = read(fd, bf + size, alloc_size - size);
if (n < 0) {
if (size) {
- pr_warning("read failed %d: %s\n", errno,
- strerror_r(errno, sbuf, sizeof(sbuf)));
+ pr_warn("read failed %d: %s\n", errno,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
err = 0;
} else
err = -errno;
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index d9e9dec04605..35bf013e368c 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -3,3 +3,7 @@ libbpf.pc
FEATURE-DUMP.libbpf
test_libbpf
libbpf.so.*
+TAGS
+tags
+cscope.*
+/bpf_helper_defs.h
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 56ce6292071b..99425d0be6ff 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -56,7 +56,7 @@ ifndef VERBOSE
endif
FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx
+FEATURE_TESTS = libelf libelf-mmap bpf reallocarray
FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
@@ -143,6 +143,8 @@ LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
+TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags)
+
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
@@ -150,22 +152,14 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
-CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
-
-CXX_TEST_TARGET = $(OUTPUT)test_libbpf
-
-ifeq ($(feature-cxx), 1)
- CMD_TARGETS += $(CXX_TEST_TARGET)
-endif
-
-TARGETS = $(CMD_TARGETS)
+CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) $(OUTPUT)test_libbpf
all: fixdep
$(Q)$(MAKE) all_cmd
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN_SHARED): force elfdep bpfdep
+$(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -183,22 +177,27 @@ $(BPF_IN_SHARED): force elfdep bpfdep
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
-$(BPF_IN_STATIC): force elfdep bpfdep
+$(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
+bpf_helper_defs.h: $(srctree)/include/uapi/linux/bpf.h
+ $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \
+ --file $(srctree)/include/uapi/linux/bpf.h > bpf_helper_defs.h
+
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
- $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
- -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
+ $(QUIET_LINK)$(CC) $(LDFLAGS) \
+ --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
+ -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
-$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
- $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@
+$(OUTPUT)test_libbpf: test_libbpf.c $(OUTPUT)libbpf.a
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(INCLUDES) $^ -lelf -o $@
$(OUTPUT)libbpf.pc:
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
@@ -247,13 +246,18 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
-install_headers:
+install_headers: bpf_helper_defs.h
$(call QUIET_INSTALL, headers) \
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
$(call do_install,btf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
- $(call do_install,xsk.h,$(prefix)/include/bpf,644);
+ $(call do_install,xsk.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
+ $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
install_pkgconfig: $(PC_FILE)
$(call QUIET_INSTALL, $(PC_FILE)) \
@@ -268,14 +272,15 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \
+ $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
*.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
- *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR)
+ *.pc LIBBPF-CFLAGS bpf_helper_defs.h \
+ $(SHARED_OBJDIR) $(STATIC_OBJDIR)
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
-PHONY += force elfdep bpfdep
+PHONY += force elfdep bpfdep cscope tags
force:
elfdep:
@@ -284,6 +289,17 @@ elfdep:
bpfdep:
@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
+cscope:
+ ls *.c *.h > cscope.files
+ cscope -b -q -I $(srctree)/include -f cscope.out
+
+tags:
+ rm -f TAGS tags
+ ls *.c *.h | xargs $(TAGS_PROG) -a
+
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index cbb933532981..98596e15390f 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -189,7 +189,7 @@ static void *
alloc_zero_tailing_info(const void *orecord, __u32 cnt,
__u32 actual_rec_size, __u32 expected_rec_size)
{
- __u64 info_len = actual_rec_size * cnt;
+ __u64 info_len = (__u64)actual_rec_size * cnt;
void *info, *nrecord;
int i;
@@ -228,6 +228,13 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
+ if (attr.prog_type == BPF_PROG_TYPE_TRACING) {
+ attr.attach_btf_id = load_attr->attach_btf_id;
+ attr.attach_prog_fd = load_attr->attach_prog_fd;
+ } else {
+ attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.kern_version = load_attr->kern_version;
+ }
attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
@@ -241,8 +248,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.log_size = 0;
}
- attr.kern_version = load_attr->kern_version;
- attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 0db01334740f..3c791fa8e68e 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -77,8 +77,14 @@ struct bpf_load_program_attr {
const struct bpf_insn *insns;
size_t insns_cnt;
const char *license;
- __u32 kern_version;
- __u32 prog_ifindex;
+ union {
+ __u32 kern_version;
+ __u32 attach_prog_fd;
+ };
+ union {
+ __u32 prog_ifindex;
+ __u32 attach_btf_id;
+ };
__u32 prog_btf_fd;
__u32 func_info_rec_size;
const void *func_info;
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
new file mode 100644
index 000000000000..7009dc90e012
--- /dev/null
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __BPF_CORE_READ_H__
+#define __BPF_CORE_READ_H__
+
+/*
+ * enum bpf_field_info_kind is passed as a second argument into
+ * __builtin_preserve_field_info() built-in to get a specific aspect of
+ * a field, captured as a first argument. __builtin_preserve_field_info(field,
+ * info_kind) returns __u32 integer and produces BTF field relocation, which
+ * is understood and processed by libbpf during BPF object loading. See
+ * selftests/bpf for examples.
+ */
+enum bpf_field_info_kind {
+ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_FIELD_BYTE_SIZE = 1,
+ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_FIELD_SIGNED = 3,
+ BPF_FIELD_LSHIFT_U64 = 4,
+ BPF_FIELD_RSHIFT_U64 = 5,
+};
+
+#define __CORE_RELO(src, field, info) \
+ __builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
+ bpf_probe_read((void *)dst, \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+#else
+/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
+ * for big-endian we need to adjust destination pointer accordingly, based on
+ * field byte size
+ */
+#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
+ bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+#endif
+
+/*
+ * Extract bitfield, identified by s->field, and return its value as u64.
+ * All this is done in relocatable manner, so bitfield changes such as
+ * signedness, bit size, offset changes, this will be handled automatically.
+ * This version of macro is using bpf_probe_read() to read underlying integer
+ * storage. Macro functions as an expression and its return type is
+ * bpf_probe_read()'s return value: 0, on success, <0 on error.
+ */
+#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \
+ unsigned long long val = 0; \
+ \
+ __CORE_BITFIELD_PROBE_READ(&val, s, field); \
+ val <<= __CORE_RELO(s, field, LSHIFT_U64); \
+ if (__CORE_RELO(s, field, SIGNED)) \
+ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
+ else \
+ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
+ val; \
+})
+
+/*
+ * Extract bitfield, identified by s->field, and return its value as u64.
+ * This version of macro is using direct memory reads and should be used from
+ * BPF program types that support such functionality (e.g., typed raw
+ * tracepoints).
+ */
+#define BPF_CORE_READ_BITFIELD(s, field) ({ \
+ const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
+ unsigned long long val; \
+ \
+ switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
+ case 1: val = *(const unsigned char *)p; \
+ case 2: val = *(const unsigned short *)p; \
+ case 4: val = *(const unsigned int *)p; \
+ case 8: val = *(const unsigned long long *)p; \
+ } \
+ val <<= __CORE_RELO(s, field, LSHIFT_U64); \
+ if (__CORE_RELO(s, field, SIGNED)) \
+ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
+ else \
+ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
+ val; \
+})
+
+/*
+ * Convenience macro to check that field actually exists in target kernel's.
+ * Returns:
+ * 1, if matching field is present in target kernel;
+ * 0, if no matching field found.
+ */
+#define bpf_core_field_exists(field) \
+ __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
+
+/*
+ * Convenience macro to get byte size of a field. Works for integers,
+ * struct/unions, pointers, arrays, and enums.
+ */
+#define bpf_core_field_size(field) \
+ __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
+
+/*
+ * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
+ * relocation for source address using __builtin_preserve_access_index()
+ * built-in, provided by Clang.
+ *
+ * __builtin_preserve_access_index() takes as an argument an expression of
+ * taking an address of a field within struct/union. It makes compiler emit
+ * a relocation, which records BTF type ID describing root struct/union and an
+ * accessor string which describes exact embedded field that was used to take
+ * an address. See detailed description of this relocation format and
+ * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
+ *
+ * This relocation allows libbpf to adjust BPF instruction to use correct
+ * actual field offset, based on target kernel BTF type that matches original
+ * (local) BTF, used to record relocation.
+ */
+#define bpf_core_read(dst, sz, src) \
+ bpf_probe_read(dst, sz, \
+ (const void *)__builtin_preserve_access_index(src))
+
+/*
+ * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
+ * additionally emitting BPF CO-RE field relocation for specified source
+ * argument.
+ */
+#define bpf_core_read_str(dst, sz, src) \
+ bpf_probe_read_str(dst, sz, \
+ (const void *)__builtin_preserve_access_index(src))
+
+#define ___concat(a, b) a ## b
+#define ___apply(fn, n) ___concat(fn, n)
+#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
+
+/*
+ * return number of provided arguments; used for switch-based variadic macro
+ * definitions (see ___last, ___arrow, etc below)
+ */
+#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+/*
+ * return 0 if no arguments are passed, N - otherwise; used for
+ * recursively-defined macros to specify termination (0) case, and generic
+ * (N) case (e.g., ___read_ptrs, ___core_read)
+ */
+#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
+
+#define ___last1(x) x
+#define ___last2(a, x) x
+#define ___last3(a, b, x) x
+#define ___last4(a, b, c, x) x
+#define ___last5(a, b, c, d, x) x
+#define ___last6(a, b, c, d, e, x) x
+#define ___last7(a, b, c, d, e, f, x) x
+#define ___last8(a, b, c, d, e, f, g, x) x
+#define ___last9(a, b, c, d, e, f, g, h, x) x
+#define ___last10(a, b, c, d, e, f, g, h, i, x) x
+#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
+
+#define ___nolast2(a, _) a
+#define ___nolast3(a, b, _) a, b
+#define ___nolast4(a, b, c, _) a, b, c
+#define ___nolast5(a, b, c, d, _) a, b, c, d
+#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
+#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
+#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
+#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
+#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
+#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
+
+#define ___arrow1(a) a
+#define ___arrow2(a, b) a->b
+#define ___arrow3(a, b, c) a->b->c
+#define ___arrow4(a, b, c, d) a->b->c->d
+#define ___arrow5(a, b, c, d, e) a->b->c->d->e
+#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
+#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
+#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
+#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
+#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
+#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
+
+#define ___type(...) typeof(___arrow(__VA_ARGS__))
+
+#define ___read(read_fn, dst, src_type, src, accessor) \
+ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
+
+/* "recursively" read a sequence of inner pointers using local __t var */
+#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a);
+#define ___rd_last(...) \
+ ___read(bpf_core_read, &__t, \
+ ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
+#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__)
+#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
+#define ___read_ptrs(src, ...) \
+ ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__)
+
+#define ___core_read0(fn, dst, src, a) \
+ ___read(fn, dst, ___type(src), src, a);
+#define ___core_readN(fn, dst, src, ...) \
+ ___read_ptrs(src, ___nolast(__VA_ARGS__)) \
+ ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \
+ ___last(__VA_ARGS__));
+#define ___core_read(fn, dst, src, a, ...) \
+ ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \
+ src, a, ##__VA_ARGS__)
+
+/*
+ * BPF_CORE_READ_INTO() is a more performance-conscious variant of
+ * BPF_CORE_READ(), in which final field is read into user-provided storage.
+ * See BPF_CORE_READ() below for more details on general usage.
+ */
+#define BPF_CORE_READ_INTO(dst, src, a, ...) \
+ ({ \
+ ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \
+ })
+
+/*
+ * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
+ * BPF_CORE_READ() for intermediate pointers, but then executes (and returns
+ * corresponding error code) bpf_core_read_str() for final string read.
+ */
+#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \
+ ({ \
+ ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \
+ })
+
+/*
+ * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
+ * when there are few pointer chasing steps.
+ * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
+ * int x = s->a.b.c->d.e->f->g;
+ * can be succinctly achieved using BPF_CORE_READ as:
+ * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
+ *
+ * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
+ * CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to:
+ * 1. const void *__t = s->a.b.c;
+ * 2. __t = __t->d.e;
+ * 3. __t = __t->f;
+ * 4. return __t->g;
+ *
+ * Equivalence is logical, because there is a heavy type casting/preservation
+ * involved, as well as all the reads are happening through bpf_probe_read()
+ * calls using __builtin_preserve_access_index() to emit CO-RE relocations.
+ *
+ * N.B. Only up to 9 "field accessors" are supported, which should be more
+ * than enough for any practical purpose.
+ */
+#define BPF_CORE_READ(src, a, ...) \
+ ({ \
+ ___type(src, a, ##__VA_ARGS__) __r; \
+ BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \
+ __r; \
+ })
+
+#endif
+
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/lib/bpf/bpf_endian.h
index fbe28008450f..fbe28008450f 100644
--- a/tools/testing/selftests/bpf/bpf_endian.h
+++ b/tools/lib/bpf/bpf_endian.h
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
new file mode 100644
index 000000000000..0c7d28292898
--- /dev/null
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __BPF_HELPERS__
+#define __BPF_HELPERS__
+
+#include "bpf_helper_defs.h"
+
+#define __uint(name, val) int (*name)[val]
+#define __type(name, val) typeof(val) *name
+
+/* Helper macro to print out debug messages */
+#define bpf_printk(fmt, ...) \
+({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+/*
+ * Helper macro to place programs, maps, license in
+ * different sections in elf_bpf file. Section names
+ * are interpreted by elf_bpf loader
+ */
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+#ifndef __always_inline
+#define __always_inline __attribute__((always_inline))
+#endif
+
+/*
+ * Helper structure used by eBPF C program
+ * to describe BPF map attributes to libbpf loader
+ */
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+ unsigned int map_flags;
+};
+
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
+#endif
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index 8c67561c93b0..3ed1a27b5f7c 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -101,6 +101,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
{
struct bpf_prog_linfo *prog_linfo;
__u32 nr_linfo, nr_jited_func;
+ __u64 data_sz;
nr_linfo = info->nr_line_info;
@@ -122,11 +123,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
/* Copy xlated line_info */
prog_linfo->nr_linfo = nr_linfo;
prog_linfo->rec_size = info->line_info_rec_size;
- prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size);
+ data_sz = (__u64)nr_linfo * prog_linfo->rec_size;
+ prog_linfo->raw_linfo = malloc(data_sz);
if (!prog_linfo->raw_linfo)
goto err_free;
- memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info,
- nr_linfo * prog_linfo->rec_size);
+ memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz);
nr_jited_func = info->nr_jited_ksyms;
if (!nr_jited_func ||
@@ -142,13 +143,12 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
/* Copy jited_line_info */
prog_linfo->nr_jited_func = nr_jited_func;
prog_linfo->jited_rec_size = info->jited_line_info_rec_size;
- prog_linfo->raw_jited_linfo = malloc(nr_linfo *
- prog_linfo->jited_rec_size);
+ data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size;
+ prog_linfo->raw_jited_linfo = malloc(data_sz);
if (!prog_linfo->raw_jited_linfo)
goto err_free;
memcpy(prog_linfo->raw_jited_linfo,
- (void *)(long)info->jited_line_info,
- nr_linfo * prog_linfo->jited_rec_size);
+ (void *)(long)info->jited_line_info, data_sz);
/* Number of jited_line_info per jited func */
prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func *
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
new file mode 100644
index 000000000000..b0dafe8b4ebc
--- /dev/null
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __BPF_TRACING_H__
+#define __BPF_TRACING_H__
+
+/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
+#if defined(__TARGET_ARCH_x86)
+ #define bpf_target_x86
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_s390)
+ #define bpf_target_s390
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arm)
+ #define bpf_target_arm
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arm64)
+ #define bpf_target_arm64
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_mips)
+ #define bpf_target_mips
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_powerpc)
+ #define bpf_target_powerpc
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_sparc)
+ #define bpf_target_sparc
+ #define bpf_target_defined
+#else
+ #undef bpf_target_defined
+#endif
+
+/* Fall back to what the compiler says */
+#ifndef bpf_target_defined
+#if defined(__x86_64__)
+ #define bpf_target_x86
+#elif defined(__s390__)
+ #define bpf_target_s390
+#elif defined(__arm__)
+ #define bpf_target_arm
+#elif defined(__aarch64__)
+ #define bpf_target_arm64
+#elif defined(__mips__)
+ #define bpf_target_mips
+#elif defined(__powerpc__)
+ #define bpf_target_powerpc
+#elif defined(__sparc__)
+ #define bpf_target_sparc
+#endif
+#endif
+
+#if defined(bpf_target_x86)
+
+#ifdef __KERNEL__
+#define PT_REGS_PARM1(x) ((x)->di)
+#define PT_REGS_PARM2(x) ((x)->si)
+#define PT_REGS_PARM3(x) ((x)->dx)
+#define PT_REGS_PARM4(x) ((x)->cx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->sp)
+#define PT_REGS_FP(x) ((x)->bp)
+#define PT_REGS_RC(x) ((x)->ax)
+#define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->ip)
+#else
+#ifdef __i386__
+/* i386 kernel is built with -mregparm=3 */
+#define PT_REGS_PARM1(x) ((x)->eax)
+#define PT_REGS_PARM2(x) ((x)->edx)
+#define PT_REGS_PARM3(x) ((x)->ecx)
+#define PT_REGS_PARM4(x) 0
+#define PT_REGS_PARM5(x) 0
+#define PT_REGS_RET(x) ((x)->esp)
+#define PT_REGS_FP(x) ((x)->ebp)
+#define PT_REGS_RC(x) ((x)->eax)
+#define PT_REGS_SP(x) ((x)->esp)
+#define PT_REGS_IP(x) ((x)->eip)
+#else
+#define PT_REGS_PARM1(x) ((x)->rdi)
+#define PT_REGS_PARM2(x) ((x)->rsi)
+#define PT_REGS_PARM3(x) ((x)->rdx)
+#define PT_REGS_PARM4(x) ((x)->rcx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->rsp)
+#define PT_REGS_FP(x) ((x)->rbp)
+#define PT_REGS_RC(x) ((x)->rax)
+#define PT_REGS_SP(x) ((x)->rsp)
+#define PT_REGS_IP(x) ((x)->rip)
+#endif
+#endif
+
+#elif defined(bpf_target_s390)
+
+/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
+struct pt_regs;
+#define PT_REGS_S390 const volatile user_pt_regs
+#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
+#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
+#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
+#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
+#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
+#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
+/* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
+#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
+#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
+#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
+
+#elif defined(bpf_target_arm)
+
+#define PT_REGS_PARM1(x) ((x)->uregs[0])
+#define PT_REGS_PARM2(x) ((x)->uregs[1])
+#define PT_REGS_PARM3(x) ((x)->uregs[2])
+#define PT_REGS_PARM4(x) ((x)->uregs[3])
+#define PT_REGS_PARM5(x) ((x)->uregs[4])
+#define PT_REGS_RET(x) ((x)->uregs[14])
+#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->uregs[0])
+#define PT_REGS_SP(x) ((x)->uregs[13])
+#define PT_REGS_IP(x) ((x)->uregs[12])
+
+#elif defined(bpf_target_arm64)
+
+/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
+struct pt_regs;
+#define PT_REGS_ARM64 const volatile struct user_pt_regs
+#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
+#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
+#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
+#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
+#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
+#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
+/* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
+#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
+#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
+#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
+
+#elif defined(bpf_target_mips)
+
+#define PT_REGS_PARM1(x) ((x)->regs[4])
+#define PT_REGS_PARM2(x) ((x)->regs[5])
+#define PT_REGS_PARM3(x) ((x)->regs[6])
+#define PT_REGS_PARM4(x) ((x)->regs[7])
+#define PT_REGS_PARM5(x) ((x)->regs[8])
+#define PT_REGS_RET(x) ((x)->regs[31])
+#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->regs[1])
+#define PT_REGS_SP(x) ((x)->regs[29])
+#define PT_REGS_IP(x) ((x)->cp0_epc)
+
+#elif defined(bpf_target_powerpc)
+
+#define PT_REGS_PARM1(x) ((x)->gpr[3])
+#define PT_REGS_PARM2(x) ((x)->gpr[4])
+#define PT_REGS_PARM3(x) ((x)->gpr[5])
+#define PT_REGS_PARM4(x) ((x)->gpr[6])
+#define PT_REGS_PARM5(x) ((x)->gpr[7])
+#define PT_REGS_RC(x) ((x)->gpr[3])
+#define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->nip)
+
+#elif defined(bpf_target_sparc)
+
+#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
+#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
+#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
+#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
+#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
+#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
+#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
+#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+
+/* Should this also be a bpf_target check for the sparc case? */
+#if defined(__arch64__)
+#define PT_REGS_IP(x) ((x)->tpc)
+#else
+#define PT_REGS_IP(x) ((x)->pc)
+#endif
+
+#endif
+
+#if defined(bpf_target_powerpc)
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
+#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+#elif defined(bpf_target_sparc)
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
+#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+#else
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
+ ({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
+ ({ bpf_probe_read(&(ip), sizeof(ip), \
+ (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+#endif
+
+#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 1aa189a9112a..88efa2bb7137 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -269,10 +269,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
}
+done:
if (size < 0)
return -EINVAL;
-
-done:
if (nelems && size > UINT32_MAX / nelems)
return -E2BIG;
@@ -317,6 +316,28 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
return -ENOENT;
}
+__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
+ __u32 kind)
+{
+ __u32 i;
+
+ if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
+ return 0;
+
+ for (i = 1; i <= btf->nr_types; i++) {
+ const struct btf_type *t = btf->types[i];
+ const char *name;
+
+ if (btf_kind(t) != kind)
+ continue;
+ name = btf__name_by_offset(btf, t->name_off);
+ if (name && !strcmp(type_name, name))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
void btf__free(struct btf *btf)
{
if (!btf)
@@ -390,14 +411,14 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
GElf_Ehdr ehdr;
if (elf_version(EV_CURRENT) == EV_NONE) {
- pr_warning("failed to init libelf for %s\n", path);
+ pr_warn("failed to init libelf for %s\n", path);
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
fd = open(path, O_RDONLY);
if (fd < 0) {
err = -errno;
- pr_warning("failed to open %s: %s\n", path, strerror(errno));
+ pr_warn("failed to open %s: %s\n", path, strerror(errno));
return ERR_PTR(err);
}
@@ -405,19 +426,19 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
- pr_warning("failed to open %s as ELF file\n", path);
+ pr_warn("failed to open %s as ELF file\n", path);
goto done;
}
if (!gelf_getehdr(elf, &ehdr)) {
- pr_warning("failed to get EHDR from %s\n", path);
+ pr_warn("failed to get EHDR from %s\n", path);
goto done;
}
if (!btf_check_endianness(&ehdr)) {
- pr_warning("non-native ELF endianness is not supported\n");
+ pr_warn("non-native ELF endianness is not supported\n");
goto done;
}
if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
- pr_warning("failed to get e_shstrndx from %s\n", path);
+ pr_warn("failed to get e_shstrndx from %s\n", path);
goto done;
}
@@ -427,29 +448,29 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warning("failed to get section(%d) header from %s\n",
- idx, path);
+ pr_warn("failed to get section(%d) header from %s\n",
+ idx, path);
goto done;
}
name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
if (!name) {
- pr_warning("failed to get section(%d) name from %s\n",
- idx, path);
+ pr_warn("failed to get section(%d) name from %s\n",
+ idx, path);
goto done;
}
if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = elf_getdata(scn, 0);
if (!btf_data) {
- pr_warning("failed to get section(%d, %s) data from %s\n",
- idx, name, path);
+ pr_warn("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
goto done;
}
continue;
} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = elf_getdata(scn, 0);
if (!btf_ext_data) {
- pr_warning("failed to get section(%d, %s) data from %s\n",
- idx, name, path);
+ pr_warn("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
goto done;
}
continue;
@@ -600,9 +621,9 @@ int btf__load(struct btf *btf)
log_buf, log_buf_size, false);
if (btf->fd < 0) {
err = -errno;
- pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
+ pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
if (*log_buf)
- pr_warning("%s\n", log_buf);
+ pr_warn("%s\n", log_buf);
goto done;
}
@@ -707,8 +728,8 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
max_name) {
- pr_warning("map:%s length of '____btf_map_%s' is too long\n",
- map_name, map_name);
+ pr_warn("map:%s length of '____btf_map_%s' is too long\n",
+ map_name, map_name);
return -EINVAL;
}
@@ -721,14 +742,14 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
container_type = btf__type_by_id(btf, container_id);
if (!container_type) {
- pr_warning("map:%s cannot find BTF type for container_id:%u\n",
- map_name, container_id);
+ pr_warn("map:%s cannot find BTF type for container_id:%u\n",
+ map_name, container_id);
return -EINVAL;
}
if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
- pr_warning("map:%s container_name:%s is an invalid container struct\n",
- map_name, container_name);
+ pr_warn("map:%s container_name:%s is an invalid container struct\n",
+ map_name, container_name);
return -EINVAL;
}
@@ -737,25 +758,25 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
key_size = btf__resolve_size(btf, key->type);
if (key_size < 0) {
- pr_warning("map:%s invalid BTF key_type_size\n", map_name);
+ pr_warn("map:%s invalid BTF key_type_size\n", map_name);
return key_size;
}
if (expected_key_size != key_size) {
- pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
- map_name, (__u32)key_size, expected_key_size);
+ pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
+ map_name, (__u32)key_size, expected_key_size);
return -EINVAL;
}
value_size = btf__resolve_size(btf, value->type);
if (value_size < 0) {
- pr_warning("map:%s invalid BTF value_type_size\n", map_name);
+ pr_warn("map:%s invalid BTF value_type_size\n", map_name);
return value_size;
}
if (expected_value_size != value_size) {
- pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
- map_name, (__u32)value_size, expected_value_size);
+ pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
+ map_name, (__u32)value_size, expected_value_size);
return -EINVAL;
}
@@ -888,14 +909,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext)
+static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
{
struct btf_ext_sec_setup_param param = {
- .off = btf_ext->hdr->offset_reloc_off,
- .len = btf_ext->hdr->offset_reloc_len,
- .min_rec_size = sizeof(struct bpf_offset_reloc),
- .ext_info = &btf_ext->offset_reloc_info,
- .desc = "offset_reloc",
+ .off = btf_ext->hdr->field_reloc_off,
+ .len = btf_ext->hdr->field_reloc_len,
+ .min_rec_size = sizeof(struct bpf_field_reloc),
+ .ext_info = &btf_ext->field_reloc_info,
+ .desc = "field_reloc",
};
return btf_ext_setup_info(btf_ext, &param);
@@ -975,9 +996,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
goto done;
if (btf_ext->hdr->hdr_len <
- offsetofend(struct btf_ext_header, offset_reloc_len))
+ offsetofend(struct btf_ext_header, field_reloc_len))
goto done;
- err = btf_ext_setup_offset_reloc(btf_ext);
+ err = btf_ext_setup_field_reloc(btf_ext);
if (err)
goto done;
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 9cb44b4fbf60..d9ac73a02cde 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -60,8 +60,8 @@ struct btf_ext_header {
__u32 line_info_len;
/* optional part of .BTF.ext header */
- __u32 offset_reloc_off;
- __u32 offset_reloc_len;
+ __u32 field_reloc_off;
+ __u32 field_reloc_len;
};
LIBBPF_API void btf__free(struct btf *btf);
@@ -72,6 +72,8 @@ LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
+LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
+ const char *type_name, __u32 kind);
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index ede55fec3618..cb126d8fcf75 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -428,7 +428,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
/* type loop, but resolvable through fwd declaration */
if (btf_is_composite(t) && through_ptr && t->name_off != 0)
return 0;
- pr_warning("unsatisfiable type cycle, id:[%u]\n", id);
+ pr_warn("unsatisfiable type cycle, id:[%u]\n", id);
return -ELOOP;
}
@@ -636,8 +636,8 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
if (id == cont_id)
return;
if (t->name_off == 0) {
- pr_warning("anonymous struct/union loop, id:[%u]\n",
- id);
+ pr_warn("anonymous struct/union loop, id:[%u]\n",
+ id);
return;
}
btf_dump_emit_struct_fwd(d, id, t);
@@ -782,7 +782,7 @@ static int btf_align_of(const struct btf *btf, __u32 id)
return align;
}
default:
- pr_warning("unsupported BTF_KIND:%u\n", btf_kind(t));
+ pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
return 1;
}
}
@@ -876,7 +876,6 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
__u16 vlen = btf_vlen(t);
packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
- align = packed ? 1 : btf_align_of(d->btf, id);
btf_dump_printf(d, "%s%s%s {",
is_struct ? "struct" : "union",
@@ -906,6 +905,13 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, ";");
}
+ /* pad at the end, if necessary */
+ if (is_struct) {
+ align = packed ? 1 : btf_align_of(d->btf, id);
+ btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
+ lvl + 1);
+ }
+
if (vlen)
btf_dump_printf(d, "\n");
btf_dump_printf(d, "%s}", pfx(lvl));
@@ -969,6 +975,17 @@ static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
{
const char *name = btf_dump_ident_name(d, id);
+ /*
+ * Old GCC versions are emitting invalid typedef for __gnuc_va_list
+ * pointing to VOID. This generates warnings from btf_dump() and
+ * results in uncompilable header file, so we are fixing it up here
+ * with valid typedef into __builtin_va_list.
+ */
+ if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) {
+ btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list");
+ return;
+ }
+
btf_dump_printf(d, "typedef ");
btf_dump_emit_type_decl(d, t->type, name, lvl);
}
@@ -1050,7 +1067,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
* chain, restore stack, emit warning, and try to
* proceed nevertheless
*/
- pr_warning("not enough memory for decl stack:%d", err);
+ pr_warn("not enough memory for decl stack:%d", err);
d->decl_stack_cnt = stack_start;
return;
}
@@ -1079,8 +1096,8 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
case BTF_KIND_TYPEDEF:
goto done;
default:
- pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
- btf_kind(t), id);
+ pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
+ btf_kind(t), id);
goto done;
}
}
@@ -1306,8 +1323,8 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
return;
}
default:
- pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
- kind, id);
+ pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
+ kind, id);
return;
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e0276520171b..b20f82e58989 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -33,6 +33,7 @@
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
+#include <linux/version.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -104,7 +105,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
err = action; \
if (err) \
goto out; \
-} while(0)
+} while (0)
/* Copied from tools/perf/util/util.h */
@@ -141,6 +142,8 @@ struct bpf_capabilities {
__u32 btf_func:1;
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
__u32 btf_datasec:1;
+ /* BPF_F_MMAPABLE is supported for arrays */
+ __u32 array_mmap:1;
};
/*
@@ -187,6 +190,8 @@ struct bpf_program {
bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type;
+ __u32 attach_btf_id;
+ __u32 attach_prog_fd;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
@@ -225,6 +230,9 @@ struct bpf_map {
void *priv;
bpf_map_clear_priv_t clear_priv;
enum libbpf_map_type libbpf_type;
+ char *pin_path;
+ bool pinned;
+ bool reused;
};
struct bpf_secdata {
@@ -248,6 +256,7 @@ struct bpf_object {
bool loaded;
bool has_pseudo_calls;
+ bool relaxed_core_relocs;
/*
* Information when doing elf related work. Only valid if fd
@@ -255,7 +264,7 @@ struct bpf_object {
*/
struct {
int fd;
- void *obj_buf;
+ const void *obj_buf;
size_t obj_buf_sz;
Elf *elf;
GElf_Ehdr ehdr;
@@ -267,8 +276,8 @@ struct bpf_object {
struct {
GElf_Shdr shdr;
Elf_Data *data;
- } *reloc;
- int nr_reloc;
+ } *reloc_sects;
+ int nr_reloc_sects;
int maps_shndx;
int btf_maps_shndx;
int text_shndx;
@@ -310,8 +319,8 @@ void bpf_program__unload(struct bpf_program *prog)
for (i = 0; i < prog->instances.nr; i++)
zclose(prog->instances.fds[i]);
} else if (prog->instances.nr != -1) {
- pr_warning("Internal error: instances.nr is %d\n",
- prog->instances.nr);
+ pr_warn("Internal error: instances.nr is %d\n",
+ prog->instances.nr);
}
prog->instances.nr = -1;
@@ -362,8 +371,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
const size_t bpf_insn_sz = sizeof(struct bpf_insn);
if (size == 0 || size % bpf_insn_sz) {
- pr_warning("corrupted section '%s', size: %zu\n",
- section_name, size);
+ pr_warn("corrupted section '%s', size: %zu\n",
+ section_name, size);
return -EINVAL;
}
@@ -371,22 +380,22 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->section_name = strdup(section_name);
if (!prog->section_name) {
- pr_warning("failed to alloc name for prog under section(%d) %s\n",
- idx, section_name);
+ pr_warn("failed to alloc name for prog under section(%d) %s\n",
+ idx, section_name);
goto errout;
}
prog->pin_name = __bpf_program__pin_name(prog);
if (!prog->pin_name) {
- pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
- idx, section_name);
+ pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
+ idx, section_name);
goto errout;
}
prog->insns = malloc(size);
if (!prog->insns) {
- pr_warning("failed to alloc insns for prog under section %s\n",
- section_name);
+ pr_warn("failed to alloc insns for prog under section %s\n",
+ section_name);
goto errout;
}
prog->insns_cnt = size / bpf_insn_sz;
@@ -424,8 +433,8 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
* is still valid, so don't need special treat for
* bpf_close_object().
*/
- pr_warning("failed to alloc a new program under section '%s'\n",
- section_name);
+ pr_warn("failed to alloc a new program under section '%s'\n",
+ section_name);
bpf_program__exit(&prog);
return -ENOMEM;
}
@@ -465,8 +474,8 @@ bpf_object__init_prog_names(struct bpf_object *obj)
obj->efile.strtabidx,
sym.st_name);
if (!name) {
- pr_warning("failed to get sym name string for prog %s\n",
- prog->section_name);
+ pr_warn("failed to get sym name string for prog %s\n",
+ prog->section_name);
return -LIBBPF_ERRNO__LIBELF;
}
}
@@ -475,15 +484,15 @@ bpf_object__init_prog_names(struct bpf_object *obj)
name = ".text";
if (!name) {
- pr_warning("failed to find sym for prog %s\n",
- prog->section_name);
+ pr_warn("failed to find sym for prog %s\n",
+ prog->section_name);
return -EINVAL;
}
prog->name = strdup(name);
if (!prog->name) {
- pr_warning("failed to allocate memory for prog sym %s\n",
- name);
+ pr_warn("failed to allocate memory for prog sym %s\n",
+ name);
return -ENOMEM;
}
}
@@ -491,25 +500,43 @@ bpf_object__init_prog_names(struct bpf_object *obj)
return 0;
}
+static __u32 get_kernel_version(void)
+{
+ __u32 major, minor, patch;
+ struct utsname info;
+
+ uname(&info);
+ if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
+ return KERNEL_VERSION(major, minor, patch);
+}
+
static struct bpf_object *bpf_object__new(const char *path,
- void *obj_buf,
- size_t obj_buf_sz)
+ const void *obj_buf,
+ size_t obj_buf_sz,
+ const char *obj_name)
{
struct bpf_object *obj;
char *end;
obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
if (!obj) {
- pr_warning("alloc memory failed for %s\n", path);
+ pr_warn("alloc memory failed for %s\n", path);
return ERR_PTR(-ENOMEM);
}
strcpy(obj->path, path);
- /* Using basename() GNU version which doesn't modify arg. */
- strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
- end = strchr(obj->name, '.');
- if (end)
- *end = 0;
+ if (obj_name) {
+ strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
+ obj->name[sizeof(obj->name) - 1] = 0;
+ } else {
+ /* Using basename() GNU version which doesn't modify arg. */
+ strncpy(obj->name, basename((void *)path),
+ sizeof(obj->name) - 1);
+ end = strchr(obj->name, '.');
+ if (end)
+ *end = 0;
+ }
obj->efile.fd = -1;
/*
@@ -526,6 +553,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.rodata_shndx = -1;
obj->efile.bss_shndx = -1;
+ obj->kern_version = get_kernel_version();
obj->loaded = false;
INIT_LIST_HEAD(&obj->list);
@@ -547,8 +575,8 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.rodata = NULL;
obj->efile.bss = NULL;
- zfree(&obj->efile.reloc);
- obj->efile.nr_reloc = 0;
+ zfree(&obj->efile.reloc_sects);
+ obj->efile.nr_reloc_sects = 0;
zclose(obj->efile.fd);
obj->efile.obj_buf = NULL;
obj->efile.obj_buf_sz = 0;
@@ -560,7 +588,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
GElf_Ehdr *ep;
if (obj_elf_valid(obj)) {
- pr_warning("elf init: internal error\n");
+ pr_warn("elf init: internal error\n");
return -LIBBPF_ERRNO__LIBELF;
}
@@ -569,7 +597,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
* obj_buf should have been validated by
* bpf_object__open_buffer().
*/
- obj->efile.elf = elf_memory(obj->efile.obj_buf,
+ obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
obj->efile.obj_buf_sz);
} else {
obj->efile.fd = open(obj->path, O_RDONLY);
@@ -578,7 +606,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warning("failed to open %s: %s\n", obj->path, cp);
+ pr_warn("failed to open %s: %s\n", obj->path, cp);
return err;
}
@@ -587,13 +615,13 @@ static int bpf_object__elf_init(struct bpf_object *obj)
}
if (!obj->efile.elf) {
- pr_warning("failed to open %s as ELF file\n", obj->path);
+ pr_warn("failed to open %s as ELF file\n", obj->path);
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
- pr_warning("failed to get EHDR from %s\n", obj->path);
+ pr_warn("failed to get EHDR from %s\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@@ -602,7 +630,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
/* Old LLVM set e_machine to EM_NONE */
if (ep->e_type != ET_REL ||
(ep->e_machine && ep->e_machine != EM_BPF)) {
- pr_warning("%s is not an eBPF object file\n", obj->path);
+ pr_warn("%s is not an eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@@ -624,7 +652,7 @@ static int bpf_object__check_endianness(struct bpf_object *obj)
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
- pr_warning("endianness mismatch.\n");
+ pr_warn("endianness mismatch.\n");
return -LIBBPF_ERRNO__ENDIAN;
}
@@ -642,7 +670,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
__u32 kver;
if (size != sizeof(kver)) {
- pr_warning("invalid kver section in %s\n", obj->path);
+ pr_warn("invalid kver section in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
memcpy(&kver, data, sizeof(kver));
@@ -684,15 +712,15 @@ static int bpf_object_search_section_size(const struct bpf_object *obj,
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warning("failed to get section(%d) header from %s\n",
- idx, obj->path);
+ pr_warn("failed to get section(%d) header from %s\n",
+ idx, obj->path);
return -EIO;
}
sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!sec_name) {
- pr_warning("failed to get section(%d) name from %s\n",
- idx, obj->path);
+ pr_warn("failed to get section(%d) name from %s\n",
+ idx, obj->path);
return -EIO;
}
@@ -701,8 +729,8 @@ static int bpf_object_search_section_size(const struct bpf_object *obj,
data = elf_getdata(scn, 0);
if (!data) {
- pr_warning("failed to get section(%d) data from %s(%s)\n",
- idx, name, obj->path);
+ pr_warn("failed to get section(%d) data from %s(%s)\n",
+ idx, name, obj->path);
return -EIO;
}
@@ -762,8 +790,8 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
sym.st_name);
if (!sname) {
- pr_warning("failed to get sym name string for var %s\n",
- name);
+ pr_warn("failed to get sym name string for var %s\n",
+ name);
return -EIO;
}
if (strcmp(name, sname) == 0) {
@@ -787,7 +815,7 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
if (!new_maps) {
- pr_warning("alloc maps for object failed\n");
+ pr_warn("alloc maps for object failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -828,11 +856,9 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
libbpf_type_to_btf_name[type]);
map->name = strdup(map_name);
if (!map->name) {
- pr_warning("failed to alloc map name\n");
+ pr_warn("failed to alloc map name\n");
return -ENOMEM;
}
- pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
- map_name, map->sec_idx, map->sec_offset);
def = &map->def;
def->type = BPF_MAP_TYPE_ARRAY;
@@ -840,11 +866,17 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->value_size = data->d_size;
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
+ if (obj->caps.array_mmap)
+ def->map_flags |= BPF_F_MMAPABLE;
+
+ pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
+ map_name, map->sec_idx, map->sec_offset, def->map_flags);
+
if (data_buff) {
*data_buff = malloc(data->d_size);
if (!*data_buff) {
zfree(&map->name);
- pr_warning("failed to alloc map content buffer\n");
+ pr_warn("failed to alloc map content buffer\n");
return -ENOMEM;
}
memcpy(*data_buff, data->d_buf, data->d_size);
@@ -906,8 +938,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
if (scn)
data = elf_getdata(scn, NULL);
if (!scn || !data) {
- pr_warning("failed to get Elf_Data from map section %d\n",
- obj->efile.maps_shndx);
+ pr_warn("failed to get Elf_Data from map section %d\n",
+ obj->efile.maps_shndx);
return -EINVAL;
}
@@ -932,13 +964,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
pr_debug("maps in %s: %d maps in %zd bytes\n",
obj->path, nr_maps, data->d_size);
- map_def_sz = data->d_size / nr_maps;
- if (!data->d_size || (data->d_size % nr_maps) != 0) {
- pr_warning("unable to determine map definition size "
- "section %s, %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
+ if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
+ pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
+ obj->path, nr_maps, data->d_size);
return -EINVAL;
}
+ map_def_sz = data->d_size / nr_maps;
/* Fill obj->maps using data in "maps" section. */
for (i = 0; i < nr_syms; i++) {
@@ -959,8 +990,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
sym.st_name);
if (!map_name) {
- pr_warning("failed to get map #%d name sym string for obj %s\n",
- i, obj->path);
+ pr_warn("failed to get map #%d name sym string for obj %s\n",
+ i, obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -970,14 +1001,14 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
if (sym.st_value + map_def_sz > data->d_size) {
- pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
- obj->path, map_name);
+ pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
+ obj->path, map_name);
return -EINVAL;
}
map->name = strdup(map_name);
if (!map->name) {
- pr_warning("failed to alloc map name\n");
+ pr_warn("failed to alloc map name\n");
return -ENOMEM;
}
pr_debug("map %d is \"%s\"\n", i, map->name);
@@ -998,13 +1029,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
* incompatible.
*/
char *b;
+
for (b = ((char *)def) + sizeof(struct bpf_map_def);
b < ((char *)def) + map_def_sz; b++) {
if (*b != 0) {
- pr_warning("maps section in %s: \"%s\" "
- "has unrecognized, non-zero "
- "options\n",
- obj->path, map_name);
+ pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
+ obj->path, map_name);
if (strict)
return -EINVAL;
}
@@ -1041,27 +1071,28 @@ skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
*/
static bool get_map_field_int(const char *map_name, const struct btf *btf,
const struct btf_type *def,
- const struct btf_member *m, __u32 *res) {
+ const struct btf_member *m, __u32 *res)
+{
const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
const char *name = btf__name_by_offset(btf, m->name_off);
const struct btf_array *arr_info;
const struct btf_type *arr_t;
if (!btf_is_ptr(t)) {
- pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
- map_name, name, btf_kind(t));
+ pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
+ map_name, name, btf_kind(t));
return false;
}
arr_t = btf__type_by_id(btf, t->type);
if (!arr_t) {
- pr_warning("map '%s': attr '%s': type [%u] not found.\n",
- map_name, name, t->type);
+ pr_warn("map '%s': attr '%s': type [%u] not found.\n",
+ map_name, name, t->type);
return false;
}
if (!btf_is_array(arr_t)) {
- pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
- map_name, name, btf_kind(arr_t));
+ pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
+ map_name, name, btf_kind(arr_t));
return false;
}
arr_info = btf_array(arr_t);
@@ -1069,10 +1100,32 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return true;
}
+static int build_map_pin_path(struct bpf_map *map, const char *path)
+{
+ char buf[PATH_MAX];
+ int err, len;
+
+ if (!path)
+ path = "/sys/fs/bpf";
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
+ if (len < 0)
+ return -EINVAL;
+ else if (len >= PATH_MAX)
+ return -ENAMETOOLONG;
+
+ err = bpf_map__set_pin_path(map, buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
- const Elf_Data *data, bool strict)
+ const Elf_Data *data, bool strict,
+ const char *pin_root_path)
{
const struct btf_type *var, *def, *t;
const struct btf_var_secinfo *vi;
@@ -1089,33 +1142,33 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
vlen = btf_vlen(var);
if (map_name == NULL || map_name[0] == '\0') {
- pr_warning("map #%d: empty name.\n", var_idx);
+ pr_warn("map #%d: empty name.\n", var_idx);
return -EINVAL;
}
if ((__u64)vi->offset + vi->size > data->d_size) {
- pr_warning("map '%s' BTF data is corrupted.\n", map_name);
+ pr_warn("map '%s' BTF data is corrupted.\n", map_name);
return -EINVAL;
}
if (!btf_is_var(var)) {
- pr_warning("map '%s': unexpected var kind %u.\n",
- map_name, btf_kind(var));
+ pr_warn("map '%s': unexpected var kind %u.\n",
+ map_name, btf_kind(var));
return -EINVAL;
}
if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
var_extra->linkage != BTF_VAR_STATIC) {
- pr_warning("map '%s': unsupported var linkage %u.\n",
- map_name, var_extra->linkage);
+ pr_warn("map '%s': unsupported var linkage %u.\n",
+ map_name, var_extra->linkage);
return -EOPNOTSUPP;
}
def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
if (!btf_is_struct(def)) {
- pr_warning("map '%s': unexpected def kind %u.\n",
- map_name, btf_kind(var));
+ pr_warn("map '%s': unexpected def kind %u.\n",
+ map_name, btf_kind(var));
return -EINVAL;
}
if (def->size > vi->size) {
- pr_warning("map '%s': invalid def size.\n", map_name);
+ pr_warn("map '%s': invalid def size.\n", map_name);
return -EINVAL;
}
@@ -1124,7 +1177,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return PTR_ERR(map);
map->name = strdup(map_name);
if (!map->name) {
- pr_warning("map '%s': failed to alloc map name.\n", map_name);
+ pr_warn("map '%s': failed to alloc map name.\n", map_name);
return -ENOMEM;
}
map->libbpf_type = LIBBPF_MAP_UNSPEC;
@@ -1140,8 +1193,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const char *name = btf__name_by_offset(obj->btf, m->name_off);
if (!name) {
- pr_warning("map '%s': invalid field #%d.\n",
- map_name, i);
+ pr_warn("map '%s': invalid field #%d.\n", map_name, i);
return -EINVAL;
}
if (strcmp(name, "type") == 0) {
@@ -1171,8 +1223,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
pr_debug("map '%s': found key_size = %u.\n",
map_name, sz);
if (map->def.key_size && map->def.key_size != sz) {
- pr_warning("map '%s': conflicting key size %u != %u.\n",
- map_name, map->def.key_size, sz);
+ pr_warn("map '%s': conflicting key size %u != %u.\n",
+ map_name, map->def.key_size, sz);
return -EINVAL;
}
map->def.key_size = sz;
@@ -1181,26 +1233,26 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
t = btf__type_by_id(obj->btf, m->type);
if (!t) {
- pr_warning("map '%s': key type [%d] not found.\n",
- map_name, m->type);
+ pr_warn("map '%s': key type [%d] not found.\n",
+ map_name, m->type);
return -EINVAL;
}
if (!btf_is_ptr(t)) {
- pr_warning("map '%s': key spec is not PTR: %u.\n",
- map_name, btf_kind(t));
+ pr_warn("map '%s': key spec is not PTR: %u.\n",
+ map_name, btf_kind(t));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
if (sz < 0) {
- pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
- map_name, t->type, sz);
+ pr_warn("map '%s': can't determine key size for type [%u]: %lld.\n",
+ map_name, t->type, sz);
return sz;
}
pr_debug("map '%s': found key [%u], sz = %lld.\n",
map_name, t->type, sz);
if (map->def.key_size && map->def.key_size != sz) {
- pr_warning("map '%s': conflicting key size %u != %lld.\n",
- map_name, map->def.key_size, sz);
+ pr_warn("map '%s': conflicting key size %u != %lld.\n",
+ map_name, map->def.key_size, sz);
return -EINVAL;
}
map->def.key_size = sz;
@@ -1214,8 +1266,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
pr_debug("map '%s': found value_size = %u.\n",
map_name, sz);
if (map->def.value_size && map->def.value_size != sz) {
- pr_warning("map '%s': conflicting value size %u != %u.\n",
- map_name, map->def.value_size, sz);
+ pr_warn("map '%s': conflicting value size %u != %u.\n",
+ map_name, map->def.value_size, sz);
return -EINVAL;
}
map->def.value_size = sz;
@@ -1224,34 +1276,58 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
t = btf__type_by_id(obj->btf, m->type);
if (!t) {
- pr_warning("map '%s': value type [%d] not found.\n",
- map_name, m->type);
+ pr_warn("map '%s': value type [%d] not found.\n",
+ map_name, m->type);
return -EINVAL;
}
if (!btf_is_ptr(t)) {
- pr_warning("map '%s': value spec is not PTR: %u.\n",
- map_name, btf_kind(t));
+ pr_warn("map '%s': value spec is not PTR: %u.\n",
+ map_name, btf_kind(t));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
if (sz < 0) {
- pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
- map_name, t->type, sz);
+ pr_warn("map '%s': can't determine value size for type [%u]: %lld.\n",
+ map_name, t->type, sz);
return sz;
}
pr_debug("map '%s': found value [%u], sz = %lld.\n",
map_name, t->type, sz);
if (map->def.value_size && map->def.value_size != sz) {
- pr_warning("map '%s': conflicting value size %u != %lld.\n",
- map_name, map->def.value_size, sz);
+ pr_warn("map '%s': conflicting value size %u != %lld.\n",
+ map_name, map->def.value_size, sz);
return -EINVAL;
}
map->def.value_size = sz;
map->btf_value_type_id = t->type;
+ } else if (strcmp(name, "pinning") == 0) {
+ __u32 val;
+ int err;
+
+ if (!get_map_field_int(map_name, obj->btf, def, m,
+ &val))
+ return -EINVAL;
+ pr_debug("map '%s': found pinning = %u.\n",
+ map_name, val);
+
+ if (val != LIBBPF_PIN_NONE &&
+ val != LIBBPF_PIN_BY_NAME) {
+ pr_warn("map '%s': invalid pinning value %u.\n",
+ map_name, val);
+ return -EINVAL;
+ }
+ if (val == LIBBPF_PIN_BY_NAME) {
+ err = build_map_pin_path(map, pin_root_path);
+ if (err) {
+ pr_warn("map '%s': couldn't build pin path.\n",
+ map_name);
+ return err;
+ }
+ }
} else {
if (strict) {
- pr_warning("map '%s': unknown field '%s'.\n",
- map_name, name);
+ pr_warn("map '%s': unknown field '%s'.\n",
+ map_name, name);
return -ENOTSUP;
}
pr_debug("map '%s': ignoring unknown field '%s'.\n",
@@ -1260,14 +1336,15 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
}
if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
- pr_warning("map '%s': map type isn't specified.\n", map_name);
+ pr_warn("map '%s': map type isn't specified.\n", map_name);
return -EINVAL;
}
return 0;
}
-static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
+static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
+ const char *pin_root_path)
{
const struct btf_type *sec = NULL;
int nr_types, i, vlen, err;
@@ -1283,8 +1360,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
if (scn)
data = elf_getdata(scn, NULL);
if (!scn || !data) {
- pr_warning("failed to get Elf_Data from map section %d (%s)\n",
- obj->efile.maps_shndx, MAPS_ELF_SEC);
+ pr_warn("failed to get Elf_Data from map section %d (%s)\n",
+ obj->efile.maps_shndx, MAPS_ELF_SEC);
return -EINVAL;
}
@@ -1301,7 +1378,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
}
if (!sec) {
- pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
+ pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
return -ENOENT;
}
@@ -1309,7 +1386,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
for (i = 0; i < vlen; i++) {
err = bpf_object__init_user_btf_map(obj, sec, i,
obj->efile.btf_maps_shndx,
- data, strict);
+ data, strict,
+ pin_root_path);
if (err)
return err;
}
@@ -1317,16 +1395,17 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
return 0;
}
-static int bpf_object__init_maps(struct bpf_object *obj, int flags)
+static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
+ const char *pin_root_path)
{
- bool strict = !(flags & MAPS_RELAX_COMPAT);
+ bool strict = !relaxed_maps;
int err;
err = bpf_object__init_user_maps(obj, strict);
if (err)
return err;
- err = bpf_object__init_user_btf_maps(obj, strict);
+ err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
if (err)
return err;
@@ -1445,14 +1524,13 @@ static int bpf_object__init_btf(struct bpf_object *obj,
if (btf_data) {
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
if (IS_ERR(obj->btf)) {
- pr_warning("Error loading ELF section %s: %d.\n",
- BTF_ELF_SEC, err);
+ pr_warn("Error loading ELF section %s: %d.\n",
+ BTF_ELF_SEC, err);
goto out;
}
err = btf__finalize_data(obj, obj->btf);
if (err) {
- pr_warning("Error finalizing %s: %d.\n",
- BTF_ELF_SEC, err);
+ pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
goto out;
}
}
@@ -1465,8 +1543,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);
if (IS_ERR(obj->btf_ext)) {
- pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
- BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
+ pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
+ BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
goto out;
}
@@ -1482,7 +1560,7 @@ out:
obj->btf = NULL;
}
if (btf_required && !obj->btf) {
- pr_warning("BTF is required, but is missing or corrupted.\n");
+ pr_warn("BTF is required, but is missing or corrupted.\n");
return err == 0 ? -ENOENT : err;
}
return 0;
@@ -1500,8 +1578,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
err = btf__load(obj->btf);
if (err) {
- pr_warning("Error loading %s into kernel: %d.\n",
- BTF_ELF_SEC, err);
+ pr_warn("Error loading %s into kernel: %d.\n",
+ BTF_ELF_SEC, err);
btf__free(obj->btf);
obj->btf = NULL;
/* btf_ext can't exist without btf, so free it as well */
@@ -1516,7 +1594,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
return 0;
}
-static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
+static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
+ const char *pin_root_path)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
@@ -1527,7 +1606,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
- pr_warning("failed to get e_shstrndx from %s\n", obj->path);
+ pr_warn("failed to get e_shstrndx from %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -1538,22 +1617,22 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warning("failed to get section(%d) header from %s\n",
- idx, obj->path);
+ pr_warn("failed to get section(%d) header from %s\n",
+ idx, obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!name) {
- pr_warning("failed to get section(%d) name from %s\n",
- idx, obj->path);
+ pr_warn("failed to get section(%d) name from %s\n",
+ idx, obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
data = elf_getdata(scn, 0);
if (!data) {
- pr_warning("failed to get section(%d) data from %s(%s)\n",
- idx, name, obj->path);
+ pr_warn("failed to get section(%d) data from %s(%s)\n",
+ idx, name, obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
@@ -1583,8 +1662,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
btf_ext_data = data;
} else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
- pr_warning("bpf: multiple SYMTAB in %s\n",
- obj->path);
+ pr_warn("bpf: multiple SYMTAB in %s\n",
+ obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
obj->efile.symbols = data;
@@ -1594,14 +1673,16 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
err = bpf_object__add_program(obj, data->d_buf,
- data->d_size, name, idx);
+ data->d_size,
+ name, idx);
if (err) {
char errmsg[STRERR_BUFSIZE];
- char *cp = libbpf_strerror_r(-err, errmsg,
- sizeof(errmsg));
+ char *cp;
- pr_warning("failed to alloc program %s (%s): %s",
- name, obj->path, cp);
+ cp = libbpf_strerror_r(-err, errmsg,
+ sizeof(errmsg));
+ pr_warn("failed to alloc program %s (%s): %s",
+ name, obj->path, cp);
return err;
}
} else if (strcmp(name, ".data") == 0) {
@@ -1614,8 +1695,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_debug("skip section(%d) %s\n", idx, name);
}
} else if (sh.sh_type == SHT_REL) {
- int nr_reloc = obj->efile.nr_reloc;
- void *reloc = obj->efile.reloc;
+ int nr_sects = obj->efile.nr_reloc_sects;
+ void *sects = obj->efile.reloc_sects;
int sec = sh.sh_info; /* points to other section */
/* Only do relo for section with exec instructions */
@@ -1625,18 +1706,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
continue;
}
- reloc = reallocarray(reloc, nr_reloc + 1,
- sizeof(*obj->efile.reloc));
- if (!reloc) {
- pr_warning("realloc failed\n");
+ sects = reallocarray(sects, nr_sects + 1,
+ sizeof(*obj->efile.reloc_sects));
+ if (!sects) {
+ pr_warn("reloc_sects realloc failed\n");
return -ENOMEM;
}
- obj->efile.reloc = reloc;
- obj->efile.nr_reloc++;
+ obj->efile.reloc_sects = sects;
+ obj->efile.nr_reloc_sects++;
- obj->efile.reloc[nr_reloc].shdr = sh;
- obj->efile.reloc[nr_reloc].data = data;
+ obj->efile.reloc_sects[nr_sects].shdr = sh;
+ obj->efile.reloc_sects[nr_sects].data = data;
} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
obj->efile.bss = data;
obj->efile.bss_shndx = idx;
@@ -1645,13 +1726,13 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
}
}
- if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
- pr_warning("Corrupted ELF file: index of strtab invalid\n");
+ if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
+ pr_warn("Corrupted ELF file: index of strtab invalid\n");
return -LIBBPF_ERRNO__FORMAT;
}
err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
if (!err)
- err = bpf_object__init_maps(obj, flags);
+ err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
if (!err)
err = bpf_object__sanitize_and_load_btf(obj);
if (!err)
@@ -1701,14 +1782,6 @@ static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
shndx == obj->efile.btf_maps_shndx;
}
-static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
- int shndx)
-{
- return shndx == obj->efile.text_shndx ||
- bpf_object__shndx_is_maps(obj, shndx) ||
- bpf_object__shndx_is_data(obj, shndx);
-}
-
static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
{
@@ -1722,130 +1795,162 @@ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
return LIBBPF_MAP_UNSPEC;
}
+static int bpf_program__record_reloc(struct bpf_program *prog,
+ struct reloc_desc *reloc_desc,
+ __u32 insn_idx, const char *name,
+ const GElf_Sym *sym, const GElf_Rel *rel)
+{
+ struct bpf_insn *insn = &prog->insns[insn_idx];
+ size_t map_idx, nr_maps = prog->obj->nr_maps;
+ struct bpf_object *obj = prog->obj;
+ __u32 shdr_idx = sym->st_shndx;
+ enum libbpf_map_type type;
+ struct bpf_map *map;
+
+ /* sub-program call relocation */
+ if (insn->code == (BPF_JMP | BPF_CALL)) {
+ if (insn->src_reg != BPF_PSEUDO_CALL) {
+ pr_warn("incorrect bpf_call opcode\n");
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ /* text_shndx can be 0, if no default "main" program exists */
+ if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
+ pr_warn("bad call relo against section %u\n", shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (sym->st_value % 8) {
+ pr_warn("bad call relo offset: %lu\n", sym->st_value);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ reloc_desc->type = RELO_CALL;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->text_off = sym->st_value / 8;
+ obj->has_pseudo_calls = true;
+ return 0;
+ }
+
+ if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
+ pr_warn("invalid relo for insns[%d].code 0x%x\n",
+ insn_idx, insn->code);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
+ pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
+ name, shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
+
+ /* generic map reference relocation */
+ if (type == LIBBPF_MAP_UNSPEC) {
+ if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
+ pr_warn("bad map relo against section %u\n",
+ shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+ map = &obj->maps[map_idx];
+ if (map->libbpf_type != type ||
+ map->sec_idx != sym->st_shndx ||
+ map->sec_offset != sym->st_value)
+ continue;
+ pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
+ map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
+ break;
+ }
+ if (map_idx >= nr_maps) {
+ pr_warn("map relo failed to find map for sec %u, off %llu\n",
+ shdr_idx, (__u64)sym->st_value);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ reloc_desc->type = RELO_LD64;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = map_idx;
+ return 0;
+ }
+
+ /* global data map relocation */
+ if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
+ pr_warn("bad data relo against section %u\n", shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (!obj->caps.global_data) {
+ pr_warn("relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
+ name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+ map = &obj->maps[map_idx];
+ if (map->libbpf_type != type)
+ continue;
+ pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
+ map_idx, map->name, map->sec_idx, map->sec_offset,
+ insn_idx);
+ break;
+ }
+ if (map_idx >= nr_maps) {
+ pr_warn("data relo failed to find map for sec %u\n",
+ shdr_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ reloc_desc->type = RELO_DATA;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = map_idx;
+ return 0;
+}
+
static int
bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
Elf_Data *data, struct bpf_object *obj)
{
Elf_Data *symbols = obj->efile.symbols;
- struct bpf_map *maps = obj->maps;
- size_t nr_maps = obj->nr_maps;
- int i, nrels;
+ int err, i, nrels;
pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
nrels = shdr->sh_size / shdr->sh_entsize;
prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
if (!prog->reloc_desc) {
- pr_warning("failed to alloc memory in relocation\n");
+ pr_warn("failed to alloc memory in relocation\n");
return -ENOMEM;
}
prog->nr_reloc = nrels;
for (i = 0; i < nrels; i++) {
- struct bpf_insn *insns = prog->insns;
- enum libbpf_map_type type;
- unsigned int insn_idx;
- unsigned int shdr_idx;
const char *name;
- size_t map_idx;
+ __u32 insn_idx;
GElf_Sym sym;
GElf_Rel rel;
if (!gelf_getrel(data, i, &rel)) {
- pr_warning("relocation: failed to get %d reloc\n", i);
+ pr_warn("relocation: failed to get %d reloc\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
-
if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
- pr_warning("relocation: symbol %"PRIx64" not found\n",
- GELF_R_SYM(rel.r_info));
+ pr_warn("relocation: symbol %"PRIx64" not found\n",
+ GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
}
+ if (rel.r_offset % sizeof(struct bpf_insn))
+ return -LIBBPF_ERRNO__FORMAT;
+ insn_idx = rel.r_offset / sizeof(struct bpf_insn);
name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
sym.st_name) ? : "<?>";
- pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
- (long long) (rel.r_info >> 32),
- (long long) sym.st_value, sym.st_name, name);
+ pr_debug("relo for shdr %u, symb %llu, value %llu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
+ (__u32)sym.st_shndx, (__u64)GELF_R_SYM(rel.r_info),
+ (__u64)sym.st_value, GELF_ST_TYPE(sym.st_info),
+ GELF_ST_BIND(sym.st_info), sym.st_name, name,
+ insn_idx);
- shdr_idx = sym.st_shndx;
- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
- pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
- insn_idx, shdr_idx);
-
- if (shdr_idx >= SHN_LORESERVE) {
- pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
- name, shdr_idx, insn_idx,
- insns[insn_idx].code);
- return -LIBBPF_ERRNO__RELOC;
- }
- if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
- pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
- prog->section_name, shdr_idx);
- return -LIBBPF_ERRNO__RELOC;
- }
-
- if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
- if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
- pr_warning("incorrect bpf_call opcode\n");
- return -LIBBPF_ERRNO__RELOC;
- }
- prog->reloc_desc[i].type = RELO_CALL;
- prog->reloc_desc[i].insn_idx = insn_idx;
- prog->reloc_desc[i].text_off = sym.st_value;
- obj->has_pseudo_calls = true;
- continue;
- }
-
- if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
- pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
- insn_idx, insns[insn_idx].code);
- return -LIBBPF_ERRNO__RELOC;
- }
-
- if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
- bpf_object__shndx_is_data(obj, shdr_idx)) {
- type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
- if (type != LIBBPF_MAP_UNSPEC) {
- if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
- pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
- name, insn_idx, insns[insn_idx].code);
- return -LIBBPF_ERRNO__RELOC;
- }
- if (!obj->caps.global_data) {
- pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
- name, insn_idx);
- return -LIBBPF_ERRNO__RELOC;
- }
- }
-
- for (map_idx = 0; map_idx < nr_maps; map_idx++) {
- if (maps[map_idx].libbpf_type != type)
- continue;
- if (type != LIBBPF_MAP_UNSPEC ||
- (maps[map_idx].sec_idx == sym.st_shndx &&
- maps[map_idx].sec_offset == sym.st_value)) {
- pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
- map_idx, maps[map_idx].name,
- maps[map_idx].sec_idx,
- maps[map_idx].sec_offset,
- insn_idx);
- break;
- }
- }
-
- if (map_idx >= nr_maps) {
- pr_warning("bpf relocation: map_idx %d larger than %d\n",
- (int)map_idx, (int)nr_maps - 1);
- return -LIBBPF_ERRNO__RELOC;
- }
-
- prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
- RELO_DATA : RELO_LD64;
- prog->reloc_desc[i].insn_idx = insn_idx;
- prog->reloc_desc[i].map_idx = map_idx;
- }
+ err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
+ insn_idx, name, &sym, &rel);
+ if (err)
+ return err;
}
return 0;
}
@@ -1897,16 +2002,22 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
return -errno;
new_fd = open("/", O_RDONLY | O_CLOEXEC);
- if (new_fd < 0)
+ if (new_fd < 0) {
+ err = -errno;
goto err_free_new_name;
+ }
new_fd = dup3(fd, new_fd, O_CLOEXEC);
- if (new_fd < 0)
+ if (new_fd < 0) {
+ err = -errno;
goto err_close_new_fd;
+ }
err = zclose(map->fd);
- if (err)
+ if (err) {
+ err = -errno;
goto err_close_new_fd;
+ }
free(map->name);
map->fd = new_fd;
@@ -1918,6 +2029,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
map->def.map_flags = info.map_flags;
map->btf_key_type_id = info.btf_key_type_id;
map->btf_value_type_id = info.btf_value_type_id;
+ map->reused = true;
return 0;
@@ -1925,7 +2037,7 @@ err_close_new_fd:
close(new_fd);
err_free_new_name:
free(new_name);
- return -errno;
+ return err;
}
int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
@@ -1964,8 +2076,8 @@ bpf_object__probe_name(struct bpf_object *obj)
ret = bpf_load_program_xattr(&attr, NULL, 0);
if (ret < 0) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
- __func__, cp, errno);
+ pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
+ __func__, cp, errno);
return -errno;
}
close(ret);
@@ -2005,8 +2117,8 @@ bpf_object__probe_global_data(struct bpf_object *obj)
map = bpf_create_map_xattr(&map_attr);
if (map < 0) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, errno);
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, errno);
return -errno;
}
@@ -2030,7 +2142,7 @@ bpf_object__probe_global_data(struct bpf_object *obj)
static int bpf_object__probe_btf_func(struct bpf_object *obj)
{
- const char strs[] = "\0int\0x\0a";
+ static const char strs[] = "\0int\0x\0a";
/* void x(int a) {} */
__u32 types[] = {
/* int */
@@ -2056,7 +2168,7 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
{
- const char strs[] = "\0x\0.data";
+ static const char strs[] = "\0x\0.data";
/* static int a; */
__u32 types[] = {
/* int */
@@ -2081,6 +2193,27 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
return 0;
}
+static int bpf_object__probe_array_mmap(struct bpf_object *obj)
+{
+ struct bpf_create_map_attr attr = {
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_flags = BPF_F_MMAPABLE,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 1,
+ };
+ int fd;
+
+ fd = bpf_create_map_xattr(&attr);
+ if (fd >= 0) {
+ obj->caps.array_mmap = 1;
+ close(fd);
+ return 1;
+ }
+
+ return 0;
+}
+
static int
bpf_object__probe_caps(struct bpf_object *obj)
{
@@ -2089,6 +2222,7 @@ bpf_object__probe_caps(struct bpf_object *obj)
bpf_object__probe_global_data,
bpf_object__probe_btf_func,
bpf_object__probe_btf_datasec,
+ bpf_object__probe_array_mmap,
};
int i, ret;
@@ -2101,6 +2235,66 @@ bpf_object__probe_caps(struct bpf_object *obj)
return 0;
}
+static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
+{
+ struct bpf_map_info map_info = {};
+ char msg[STRERR_BUFSIZE];
+ __u32 map_info_len;
+
+ map_info_len = sizeof(map_info);
+
+ if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
+ pr_warn("failed to get map info for map FD %d: %s\n",
+ map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
+ return false;
+ }
+
+ return (map_info.type == map->def.type &&
+ map_info.key_size == map->def.key_size &&
+ map_info.value_size == map->def.value_size &&
+ map_info.max_entries == map->def.max_entries &&
+ map_info.map_flags == map->def.map_flags);
+}
+
+static int
+bpf_object__reuse_map(struct bpf_map *map)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int err, pin_fd;
+
+ pin_fd = bpf_obj_get(map->pin_path);
+ if (pin_fd < 0) {
+ err = -errno;
+ if (err == -ENOENT) {
+ pr_debug("found no pinned map to reuse at '%s'\n",
+ map->pin_path);
+ return 0;
+ }
+
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warn("couldn't retrieve pinned map '%s': %s\n",
+ map->pin_path, cp);
+ return err;
+ }
+
+ if (!map_is_reuse_compat(map, pin_fd)) {
+ pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
+ map->pin_path);
+ close(pin_fd);
+ return -EINVAL;
+ }
+
+ err = bpf_map__reuse_fd(map, pin_fd);
+ if (err) {
+ close(pin_fd);
+ return err;
+ }
+ map->pinned = true;
+ pr_debug("reused pinned map at '%s'\n", map->pin_path);
+
+ return 0;
+}
+
static int
bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
{
@@ -2121,8 +2315,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
err = bpf_map_freeze(map->fd);
if (err) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("Error freezing map(%s) as read-only: %s\n",
- map->name, cp);
+ pr_warn("Error freezing map(%s) as read-only: %s\n",
+ map->name, cp);
err = 0;
}
}
@@ -2143,6 +2337,15 @@ bpf_object__create_maps(struct bpf_object *obj)
char *cp, errmsg[STRERR_BUFSIZE];
int *pfd = &map->fd;
+ if (map->pin_path) {
+ err = bpf_object__reuse_map(map);
+ if (err) {
+ pr_warn("error reusing pinned map %s\n",
+ map->name);
+ return err;
+ }
+ }
+
if (map->fd >= 0) {
pr_debug("skip map create (preset) %s: fd=%d\n",
map->name, map->fd);
@@ -2161,8 +2364,8 @@ bpf_object__create_maps(struct bpf_object *obj)
if (!nr_cpus)
nr_cpus = libbpf_num_possible_cpus();
if (nr_cpus < 0) {
- pr_warning("failed to determine number of system CPUs: %d\n",
- nr_cpus);
+ pr_warn("failed to determine number of system CPUs: %d\n",
+ nr_cpus);
err = nr_cpus;
goto err_out;
}
@@ -2190,8 +2393,8 @@ bpf_object__create_maps(struct bpf_object *obj)
create_attr.btf_value_type_id)) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
- map->name, cp, err);
+ pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
+ map->name, cp, err);
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -2206,8 +2409,8 @@ bpf_object__create_maps(struct bpf_object *obj)
err = -errno;
err_out:
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warning("failed to create map (name: '%s'): %s(%d)\n",
- map->name, cp, err);
+ pr_warn("failed to create map (name: '%s'): %s(%d)\n",
+ map->name, cp, err);
for (j = 0; j < i; j++)
zclose(obj->maps[j].fd);
return err;
@@ -2221,6 +2424,15 @@ err_out:
}
}
+ if (map->pin_path && !map->pinned) {
+ err = bpf_map__pin(map, NULL);
+ if (err) {
+ pr_warn("failed to auto-pin map name '%s' at '%s'\n",
+ map->name, map->pin_path);
+ return err;
+ }
+ }
+
pr_debug("created map %s: fd=%d\n", map->name, *pfd);
}
@@ -2232,8 +2444,8 @@ check_btf_ext_reloc_err(struct bpf_program *prog, int err,
void *btf_prog_info, const char *info_name)
{
if (err != -ENOENT) {
- pr_warning("Error in loading %s for sec %s.\n",
- info_name, prog->section_name);
+ pr_warn("Error in loading %s for sec %s.\n",
+ info_name, prog->section_name);
return err;
}
@@ -2244,14 +2456,14 @@ check_btf_ext_reloc_err(struct bpf_program *prog, int err,
* Some info has already been found but has problem
* in the last btf_ext reloc. Must have to error out.
*/
- pr_warning("Error in relocating %s for sec %s.\n",
- info_name, prog->section_name);
+ pr_warn("Error in relocating %s for sec %s.\n",
+ info_name, prog->section_name);
return err;
}
/* Have problem loading the very first info. Ignore the rest. */
- pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
- info_name, prog->section_name, info_name);
+ pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
+ info_name, prog->section_name, info_name);
return 0;
}
@@ -2315,8 +2527,8 @@ struct bpf_core_spec {
int raw_spec[BPF_CORE_SPEC_MAX_LEN];
/* raw spec length */
int raw_len;
- /* field byte offset represented by spec */
- __u32 offset;
+ /* field bit offset represented by spec */
+ __u32 bit_offset;
};
static bool str_is_empty(const char *s)
@@ -2325,10 +2537,10 @@ static bool str_is_empty(const char *s)
}
/*
- * Turn bpf_offset_reloc into a low- and high-level spec representation,
+ * Turn bpf_field_reloc into a low- and high-level spec representation,
* validating correctness along the way, as well as calculating resulting
- * field offset (in bytes), specified by accessor string. Low-level spec
- * captures every single level of nestedness, including traversing anonymous
+ * field bit offset, specified by accessor string. Low-level spec captures
+ * every single level of nestedness, including traversing anonymous
* struct/union members. High-level one only captures semantically meaningful
* "turning points": named fields and array indicies.
* E.g., for this case:
@@ -2400,7 +2612,7 @@ static int bpf_core_spec_parse(const struct btf *btf,
sz = btf__resolve_size(btf, id);
if (sz < 0)
return sz;
- spec->offset = access_idx * sz;
+ spec->bit_offset = access_idx * sz * 8;
for (i = 1; i < spec->raw_len; i++) {
t = skip_mods_and_typedefs(btf, id, &id);
@@ -2411,17 +2623,13 @@ static int bpf_core_spec_parse(const struct btf *btf,
if (btf_is_composite(t)) {
const struct btf_member *m;
- __u32 offset;
+ __u32 bit_offset;
if (access_idx >= btf_vlen(t))
return -EINVAL;
- if (btf_member_bitfield_size(t, access_idx))
- return -EINVAL;
- offset = btf_member_bit_offset(t, access_idx);
- if (offset % 8)
- return -EINVAL;
- spec->offset += offset / 8;
+ bit_offset = btf_member_bit_offset(t, access_idx);
+ spec->bit_offset += bit_offset;
m = btf_members(t) + access_idx;
if (m->name_off) {
@@ -2450,10 +2658,10 @@ static int bpf_core_spec_parse(const struct btf *btf,
sz = btf__resolve_size(btf, id);
if (sz < 0)
return sz;
- spec->offset += access_idx * sz;
+ spec->bit_offset += access_idx * sz * 8;
} else {
- pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
- type_id, spec_str, i, id, btf_kind(t));
+ pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
+ type_id, spec_str, i, id, btf_kind(t));
return -EINVAL;
}
}
@@ -2551,12 +2759,14 @@ err_out:
}
/* Check two types for compatibility, skipping const/volatile/restrict and
- * typedefs, to ensure we are relocating offset to the compatible entities:
+ * typedefs, to ensure we are relocating compatible entities:
* - any two STRUCTs/UNIONs are compatible and can be mixed;
- * - any two FWDs are compatible;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
* - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
* - for ENUMs, check sizes, names are ignored;
- * - for INT, size and bitness should match, signedness is ignored;
+ * - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - everything else shouldn't be ever a target of relocation.
@@ -2582,23 +2792,36 @@ recur:
return 0;
switch (btf_kind(local_type)) {
- case BTF_KIND_FWD:
case BTF_KIND_PTR:
return 1;
- case BTF_KIND_ENUM:
- return local_type->size == targ_type->size;
+ case BTF_KIND_FWD:
+ case BTF_KIND_ENUM: {
+ const char *local_name, *targ_name;
+ size_t local_len, targ_len;
+
+ local_name = btf__name_by_offset(local_btf,
+ local_type->name_off);
+ targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
+ local_len = bpf_core_essential_name_len(local_name);
+ targ_len = bpf_core_essential_name_len(targ_name);
+ /* one of them is anonymous or both w/ same flavor-less names */
+ return local_len == 0 || targ_len == 0 ||
+ (local_len == targ_len &&
+ strncmp(local_name, targ_name, local_len) == 0);
+ }
case BTF_KIND_INT:
+ /* just reject deprecated bitfield-like integers; all other
+ * integers are by default compatible between each other
+ */
return btf_int_offset(local_type) == 0 &&
- btf_int_offset(targ_type) == 0 &&
- local_type->size == targ_type->size &&
- btf_int_bits(local_type) == btf_int_bits(targ_type);
+ btf_int_offset(targ_type) == 0;
case BTF_KIND_ARRAY:
local_id = btf_array(local_type)->type;
targ_id = btf_array(targ_type)->type;
goto recur;
default:
- pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
- btf_kind(local_type), local_id, targ_id);
+ pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
+ btf_kind(local_type), local_id, targ_id);
return 0;
}
}
@@ -2607,7 +2830,7 @@ recur:
* Given single high-level named field accessor in local type, find
* corresponding high-level accessor for a target type. Along the way,
* maintain low-level spec for target as well. Also keep updating target
- * offset.
+ * bit offset.
*
* Searching is performed through recursive exhaustive enumeration of all
* fields of a struct/union. If there are any anonymous (embedded)
@@ -2646,21 +2869,16 @@ static int bpf_core_match_member(const struct btf *local_btf,
n = btf_vlen(targ_type);
m = btf_members(targ_type);
for (i = 0; i < n; i++, m++) {
- __u32 offset;
+ __u32 bit_offset;
- /* bitfield relocations not supported */
- if (btf_member_bitfield_size(targ_type, i))
- continue;
- offset = btf_member_bit_offset(targ_type, i);
- if (offset % 8)
- continue;
+ bit_offset = btf_member_bit_offset(targ_type, i);
/* too deep struct/union/array nesting */
if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
return -E2BIG;
/* speculate this member will be the good one */
- spec->offset += offset / 8;
+ spec->bit_offset += bit_offset;
spec->raw_spec[spec->raw_len++] = i;
targ_name = btf__name_by_offset(targ_btf, m->name_off);
@@ -2689,7 +2907,7 @@ static int bpf_core_match_member(const struct btf *local_btf,
return found;
}
/* member turned out not to be what we looked for */
- spec->offset -= offset / 8;
+ spec->bit_offset -= bit_offset;
spec->raw_len--;
}
@@ -2698,7 +2916,7 @@ static int bpf_core_match_member(const struct btf *local_btf,
/*
* Try to match local spec to a target type and, if successful, produce full
- * target spec (high-level, low-level + offset).
+ * target spec (high-level, low-level + bit offset).
*/
static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
const struct btf *targ_btf, __u32 targ_id,
@@ -2761,35 +2979,165 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
sz = btf__resolve_size(targ_btf, targ_id);
if (sz < 0)
return sz;
- targ_spec->offset += local_acc->idx * sz;
+ targ_spec->bit_offset += local_acc->idx * sz * 8;
}
}
return 1;
}
+static int bpf_core_calc_field_relo(const struct bpf_program *prog,
+ const struct bpf_field_reloc *relo,
+ const struct bpf_core_spec *spec,
+ __u32 *val, bool *validate)
+{
+ const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
+ const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
+ __u32 byte_off, byte_sz, bit_off, bit_sz;
+ const struct btf_member *m;
+ const struct btf_type *mt;
+ bool bitfield;
+ __s64 sz;
+
+ /* a[n] accessor needs special handling */
+ if (!acc->name) {
+ if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
+ *val = spec->bit_offset / 8;
+ } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
+ sz = btf__resolve_size(spec->btf, acc->type_id);
+ if (sz < 0)
+ return -EINVAL;
+ *val = sz;
+ } else {
+ pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
+ bpf_program__title(prog, false),
+ relo->kind, relo->insn_off / 8);
+ return -EINVAL;
+ }
+ if (validate)
+ *validate = true;
+ return 0;
+ }
+
+ m = btf_members(t) + acc->idx;
+ mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
+ bit_off = spec->bit_offset;
+ bit_sz = btf_member_bitfield_size(t, acc->idx);
+
+ bitfield = bit_sz > 0;
+ if (bitfield) {
+ byte_sz = mt->size;
+ byte_off = bit_off / 8 / byte_sz * byte_sz;
+ /* figure out smallest int size necessary for bitfield load */
+ while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
+ if (byte_sz >= 8) {
+ /* bitfield can't be read with 64-bit read */
+ pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
+ bpf_program__title(prog, false),
+ relo->kind, relo->insn_off / 8);
+ return -E2BIG;
+ }
+ byte_sz *= 2;
+ byte_off = bit_off / 8 / byte_sz * byte_sz;
+ }
+ } else {
+ sz = btf__resolve_size(spec->btf, m->type);
+ if (sz < 0)
+ return -EINVAL;
+ byte_sz = sz;
+ byte_off = spec->bit_offset / 8;
+ bit_sz = byte_sz * 8;
+ }
+
+ /* for bitfields, all the relocatable aspects are ambiguous and we
+ * might disagree with compiler, so turn off validation of expected
+ * value, except for signedness
+ */
+ if (validate)
+ *validate = !bitfield;
+
+ switch (relo->kind) {
+ case BPF_FIELD_BYTE_OFFSET:
+ *val = byte_off;
+ break;
+ case BPF_FIELD_BYTE_SIZE:
+ *val = byte_sz;
+ break;
+ case BPF_FIELD_SIGNED:
+ /* enums will be assumed unsigned */
+ *val = btf_is_enum(mt) ||
+ (btf_int_encoding(mt) & BTF_INT_SIGNED);
+ if (validate)
+ *validate = true; /* signedness is never ambiguous */
+ break;
+ case BPF_FIELD_LSHIFT_U64:
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ *val = 64 - (bit_off + bit_sz - byte_off * 8);
+#else
+ *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
+#endif
+ break;
+ case BPF_FIELD_RSHIFT_U64:
+ *val = 64 - bit_sz;
+ if (validate)
+ *validate = true; /* right shift is never ambiguous */
+ break;
+ case BPF_FIELD_EXISTS:
+ default:
+ pr_warn("prog '%s': unknown relo %d at insn #%d\n",
+ bpf_program__title(prog, false),
+ relo->kind, relo->insn_off / 8);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Patch relocatable BPF instruction.
- * Expected insn->imm value is provided for validation, as well as the new
- * relocated value.
+ *
+ * Patched value is determined by relocation kind and target specification.
+ * For field existence relocation target spec will be NULL if field is not
+ * found.
+ * Expected insn->imm value is determined using relocation kind and local
+ * spec, and is checked before patching instruction. If actual insn->imm value
+ * is wrong, bail out with error.
*
* Currently three kinds of BPF instructions are supported:
* 1. rX = <imm> (assignment with immediate operand);
* 2. rX += <imm> (arithmetic operations with immediate operand);
- * 3. *(rX) = <imm> (indirect memory assignment with immediate operand).
- *
- * If actual insn->imm value is wrong, bail out.
*/
-static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
- __u32 orig_off, __u32 new_off)
+static int bpf_core_reloc_insn(struct bpf_program *prog,
+ const struct bpf_field_reloc *relo,
+ const struct bpf_core_spec *local_spec,
+ const struct bpf_core_spec *targ_spec)
{
+ bool failed = false, validate = true;
+ __u32 orig_val, new_val;
struct bpf_insn *insn;
- int insn_idx;
+ int insn_idx, err;
__u8 class;
- if (insn_off % sizeof(struct bpf_insn))
+ if (relo->insn_off % sizeof(struct bpf_insn))
return -EINVAL;
- insn_idx = insn_off / sizeof(struct bpf_insn);
+ insn_idx = relo->insn_off / sizeof(struct bpf_insn);
+
+ if (relo->kind == BPF_FIELD_EXISTS) {
+ orig_val = 1; /* can't generate EXISTS relo w/o local field */
+ new_val = targ_spec ? 1 : 0;
+ } else if (!targ_spec) {
+ failed = true;
+ new_val = (__u32)-1;
+ } else {
+ err = bpf_core_calc_field_relo(prog, relo, local_spec,
+ &orig_val, &validate);
+ if (err)
+ return err;
+ err = bpf_core_calc_field_relo(prog, relo, targ_spec,
+ &new_val, NULL);
+ if (err)
+ return err;
+ }
insn = &prog->insns[insn_idx];
class = BPF_CLASS(insn->code);
@@ -2797,19 +3145,25 @@ static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL;
- if (insn->imm != orig_off)
+ if (!failed && validate && insn->imm != orig_val) {
+ pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
+ bpf_program__title(prog, false), insn_idx,
+ insn->imm, orig_val, new_val);
return -EINVAL;
- insn->imm = new_off;
- pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
- bpf_program__title(prog, false),
- insn_idx, orig_off, new_off);
+ }
+ orig_val = insn->imm;
+ insn->imm = new_val;
+ pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
+ bpf_program__title(prog, false), insn_idx,
+ failed ? " w/ failed reloc" : "", orig_val, new_val);
} else {
- pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
- bpf_program__title(prog, false),
- insn_idx, insn->code, insn->src_reg, insn->dst_reg,
- insn->off, insn->imm);
+ pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
+ bpf_program__title(prog, false),
+ insn_idx, insn->code, insn->src_reg, insn->dst_reg,
+ insn->off, insn->imm);
return -EINVAL;
}
+
return 0;
}
@@ -2895,7 +3249,7 @@ static struct btf *bpf_core_find_kernel_btf(void)
return btf;
}
- pr_warning("failed to find valid kernel BTF\n");
+ pr_warn("failed to find valid kernel BTF\n");
return ERR_PTR(-ESRCH);
}
@@ -2919,7 +3273,8 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
libbpf_print(level, "%d%s", spec->raw_spec[i],
i == spec->raw_len - 1 ? " => " : ":");
- libbpf_print(level, "%u @ &x", spec->offset);
+ libbpf_print(level, "%u.%u @ &x",
+ spec->bit_offset / 8, spec->bit_offset % 8);
for (i = 0; i < spec->len; i++) {
if (spec->spec[i].name)
@@ -2976,7 +3331,7 @@ static void *u32_as_hash_key(__u32 x)
* types should be compatible (see bpf_core_fields_are_compat for details).
* 3. It is supported and expected that there might be multiple flavors
* matching the spec. As long as all the specs resolve to the same set of
- * offsets across all candidates, there is not error. If there is any
+ * offsets across all candidates, there is no error. If there is any
* ambiguity, CO-RE relocation will fail. This is necessary to accomodate
* imprefection of BTF deduplication, which can cause slight duplication of
* the same BTF type, if some directly or indirectly referenced (by
@@ -2991,12 +3346,12 @@ static void *u32_as_hash_key(__u32 x)
* CPU-wise compared to prebuilding a map from all local type names to
* a list of candidate type names. It's also sped up by caching resolved
* list of matching candidates per each local "root" type ID, that has at
- * least one bpf_offset_reloc associated with it. This list is shared
+ * least one bpf_field_reloc associated with it. This list is shared
* between multiple relocations for the same type ID and is updated as some
* of the candidates are pruned due to structural incompatibility.
*/
-static int bpf_core_reloc_offset(struct bpf_program *prog,
- const struct bpf_offset_reloc *relo,
+static int bpf_core_reloc_field(struct bpf_program *prog,
+ const struct bpf_field_reloc *relo,
int relo_idx,
const struct btf *local_btf,
const struct btf *targ_btf,
@@ -3027,22 +3382,23 @@ static int bpf_core_reloc_offset(struct bpf_program *prog,
err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
if (err) {
- pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
- prog_name, relo_idx, local_id, local_name, spec_str,
- err);
+ pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
+ prog_name, relo_idx, local_id, local_name, spec_str,
+ err);
return -EINVAL;
}
- pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
+ pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
+ relo->kind);
bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
libbpf_print(LIBBPF_DEBUG, "\n");
if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
if (IS_ERR(cand_ids)) {
- pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
- prog_name, relo_idx, local_id, local_name,
- PTR_ERR(cand_ids));
+ pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
+ prog_name, relo_idx, local_id, local_name,
+ PTR_ERR(cand_ids));
return PTR_ERR(cand_ids);
}
err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
@@ -3064,8 +3420,8 @@ static int bpf_core_reloc_offset(struct bpf_program *prog,
bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
if (err < 0) {
- pr_warning("prog '%s': relo #%d: matching error: %d\n",
- prog_name, relo_idx, err);
+ pr_warn("prog '%s': relo #%d: matching error: %d\n",
+ prog_name, relo_idx, err);
return err;
}
if (err == 0)
@@ -3073,31 +3429,42 @@ static int bpf_core_reloc_offset(struct bpf_program *prog,
if (j == 0) {
targ_spec = cand_spec;
- } else if (cand_spec.offset != targ_spec.offset) {
+ } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
/* if there are many candidates, they should all
- * resolve to the same offset
+ * resolve to the same bit offset
*/
- pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
- prog_name, relo_idx, cand_spec.offset,
- targ_spec.offset);
+ pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
+ prog_name, relo_idx, cand_spec.bit_offset,
+ targ_spec.bit_offset);
return -EINVAL;
}
cand_ids->data[j++] = cand_spec.spec[0].type_id;
}
- cand_ids->len = j;
- if (cand_ids->len == 0) {
- pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
- prog_name, relo_idx, local_id, local_name, spec_str);
+ /*
+ * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
+ * requested, it's expected that we might not find any candidates.
+ * In this case, if field wasn't found in any candidate, the list of
+ * candidates shouldn't change at all, we'll just handle relocating
+ * appropriately, depending on relo's kind.
+ */
+ if (j > 0)
+ cand_ids->len = j;
+
+ if (j == 0 && !prog->obj->relaxed_core_relocs &&
+ relo->kind != BPF_FIELD_EXISTS) {
+ pr_warn("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
+ prog_name, relo_idx, local_id, local_name, spec_str);
return -ESRCH;
}
- err = bpf_core_reloc_insn(prog, relo->insn_off,
- local_spec.offset, targ_spec.offset);
+ /* bpf_core_reloc_insn should know how to handle missing targ_spec */
+ err = bpf_core_reloc_insn(prog, relo, &local_spec,
+ j ? &targ_spec : NULL);
if (err) {
- pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
- prog_name, relo_idx, relo->insn_off, err);
+ pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
+ prog_name, relo_idx, relo->insn_off, err);
return -EINVAL;
}
@@ -3105,10 +3472,10 @@ static int bpf_core_reloc_offset(struct bpf_program *prog,
}
static int
-bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
+bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
{
const struct btf_ext_info_sec *sec;
- const struct bpf_offset_reloc *rec;
+ const struct bpf_field_reloc *rec;
const struct btf_ext_info *seg;
struct hashmap_entry *entry;
struct hashmap *cand_cache = NULL;
@@ -3122,8 +3489,7 @@ bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
else
targ_btf = bpf_core_find_kernel_btf();
if (IS_ERR(targ_btf)) {
- pr_warning("failed to get target BTF: %ld\n",
- PTR_ERR(targ_btf));
+ pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
return PTR_ERR(targ_btf);
}
@@ -3133,7 +3499,7 @@ bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
goto out;
}
- seg = &obj->btf_ext->offset_reloc_info;
+ seg = &obj->btf_ext->field_reloc_info;
for_each_btf_ext_sec(seg, sec) {
sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
if (str_is_empty(sec_name)) {
@@ -3142,8 +3508,8 @@ bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
}
prog = bpf_object__find_program_by_title(obj, sec_name);
if (!prog) {
- pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
- sec_name);
+ pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
+ sec_name);
err = -EINVAL;
goto out;
}
@@ -3152,11 +3518,11 @@ bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
- err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
- targ_btf, cand_cache);
+ err = bpf_core_reloc_field(prog, rec, i, obj->btf,
+ targ_btf, cand_cache);
if (err) {
- pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
- sec_name, i, err);
+ pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
+ sec_name, i, err);
goto out;
}
}
@@ -3178,8 +3544,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
{
int err = 0;
- if (obj->btf_ext->offset_reloc_info.len)
- err = bpf_core_reloc_offsets(obj, targ_btf_path);
+ if (obj->btf_ext->field_reloc_info.len)
+ err = bpf_core_reloc_fields(obj, targ_btf_path);
return err;
}
@@ -3197,23 +3563,24 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
return -LIBBPF_ERRNO__RELOC;
if (prog->idx == obj->efile.text_shndx) {
- pr_warning("relo in .text insn %d into off %d\n",
- relo->insn_idx, relo->text_off);
+ pr_warn("relo in .text insn %d into off %d\n",
+ relo->insn_idx, relo->text_off);
return -LIBBPF_ERRNO__RELOC;
}
if (prog->main_prog_cnt == 0) {
text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
if (!text) {
- pr_warning("no .text section found yet relo into text exist\n");
+ pr_warn("no .text section found yet relo into text exist\n");
return -LIBBPF_ERRNO__RELOC;
}
new_cnt = prog->insns_cnt + text->insns_cnt;
new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
if (!new_insn) {
- pr_warning("oom in prog realloc\n");
+ pr_warn("oom in prog realloc\n");
return -ENOMEM;
}
+ prog->insns = new_insn;
if (obj->btf_ext) {
err = bpf_program_reloc_btf_ext(prog, obj,
@@ -3225,7 +3592,6 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
memcpy(new_insn + prog->insns_cnt, text->insns,
text->insns_cnt * sizeof(*insn));
- prog->insns = new_insn;
prog->main_prog_cnt = prog->insns_cnt;
prog->insns_cnt = new_cnt;
pr_debug("added %zd insn from %s to prog %s\n",
@@ -3233,7 +3599,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
prog->section_name);
}
insn = &prog->insns[relo->insn_idx];
- insn->imm += prog->main_prog_cnt - relo->insn_idx;
+ insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx;
return 0;
}
@@ -3266,8 +3632,8 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
map_idx = prog->reloc_desc[i].map_idx;
if (insn_idx + 1 >= (int)prog->insns_cnt) {
- pr_warning("relocation out of range: '%s'\n",
- prog->section_name);
+ pr_warn("relocation out of range: '%s'\n",
+ prog->section_name);
return -LIBBPF_ERRNO__RELOC;
}
@@ -3301,8 +3667,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
if (obj->btf_ext) {
err = bpf_object__relocate_core(obj, targ_btf_path);
if (err) {
- pr_warning("failed to perform CO-RE relocations: %d\n",
- err);
+ pr_warn("failed to perform CO-RE relocations: %d\n",
+ err);
return err;
}
}
@@ -3311,8 +3677,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
err = bpf_program__relocate(prog, obj);
if (err) {
- pr_warning("failed to relocate '%s'\n",
- prog->section_name);
+ pr_warn("failed to relocate '%s'\n", prog->section_name);
return err;
}
}
@@ -3324,24 +3689,24 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
int i, err;
if (!obj_elf_valid(obj)) {
- pr_warning("Internal error: elf object is closed\n");
+ pr_warn("Internal error: elf object is closed\n");
return -LIBBPF_ERRNO__INTERNAL;
}
- for (i = 0; i < obj->efile.nr_reloc; i++) {
- GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
- Elf_Data *data = obj->efile.reloc[i].data;
+ for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
+ GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
+ Elf_Data *data = obj->efile.reloc_sects[i].data;
int idx = shdr->sh_info;
struct bpf_program *prog;
if (shdr->sh_type != SHT_REL) {
- pr_warning("internal error at %d\n", __LINE__);
+ pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
prog = bpf_object__find_prog_by_idx(obj, idx);
if (!prog) {
- pr_warning("relocation failed: no section(%d)\n", idx);
+ pr_warn("relocation failed: no section(%d)\n", idx);
return -LIBBPF_ERRNO__RELOC;
}
@@ -3373,8 +3738,13 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
load_attr.license = license;
- load_attr.kern_version = kern_version;
- load_attr.prog_ifindex = prog->prog_ifindex;
+ if (prog->type == BPF_PROG_TYPE_TRACING) {
+ load_attr.attach_prog_fd = prog->attach_prog_fd;
+ load_attr.attach_btf_id = prog->attach_btf_id;
+ } else {
+ load_attr.kern_version = kern_version;
+ load_attr.prog_ifindex = prog->prog_ifindex;
+ }
/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
if (prog->obj->btf_ext)
btf_fd = bpf_object__btf_fd(prog->obj);
@@ -3393,7 +3763,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
retry_load:
log_buf = malloc(log_buf_size);
if (!log_buf)
- pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
+ pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
@@ -3410,36 +3780,31 @@ retry_load:
free(log_buf);
goto retry_load;
}
- ret = -LIBBPF_ERRNO__LOAD;
+ ret = -errno;
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("load bpf program failed: %s\n", cp);
+ pr_warn("load bpf program failed: %s\n", cp);
if (log_buf && log_buf[0] != '\0') {
ret = -LIBBPF_ERRNO__VERIFY;
- pr_warning("-- BEGIN DUMP LOG ---\n");
- pr_warning("\n%s\n", log_buf);
- pr_warning("-- END LOG --\n");
+ pr_warn("-- BEGIN DUMP LOG ---\n");
+ pr_warn("\n%s\n", log_buf);
+ pr_warn("-- END LOG --\n");
} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
- pr_warning("Program too large (%zu insns), at most %d insns\n",
- load_attr.insns_cnt, BPF_MAXINSNS);
+ pr_warn("Program too large (%zu insns), at most %d insns\n",
+ load_attr.insns_cnt, BPF_MAXINSNS);
ret = -LIBBPF_ERRNO__PROG2BIG;
- } else {
+ } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
/* Wrong program type? */
- if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
- int fd;
-
- load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
- load_attr.expected_attach_type = 0;
- fd = bpf_load_program_xattr(&load_attr, NULL, 0);
- if (fd >= 0) {
- close(fd);
- ret = -LIBBPF_ERRNO__PROGTYPE;
- goto out;
- }
- }
+ int fd;
- if (log_buf)
- ret = -LIBBPF_ERRNO__KVER;
+ load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
+ load_attr.expected_attach_type = 0;
+ fd = bpf_load_program_xattr(&load_attr, NULL, 0);
+ if (fd >= 0) {
+ close(fd);
+ ret = -LIBBPF_ERRNO__PROGTYPE;
+ goto out;
+ }
}
out:
@@ -3455,14 +3820,14 @@ bpf_program__load(struct bpf_program *prog,
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
- pr_warning("Internal error: can't load program '%s'\n",
- prog->section_name);
+ pr_warn("Internal error: can't load program '%s'\n",
+ prog->section_name);
return -LIBBPF_ERRNO__INTERNAL;
}
prog->instances.fds = malloc(sizeof(int));
if (!prog->instances.fds) {
- pr_warning("Not enough memory for BPF fds\n");
+ pr_warn("Not enough memory for BPF fds\n");
return -ENOMEM;
}
prog->instances.nr = 1;
@@ -3471,8 +3836,8 @@ bpf_program__load(struct bpf_program *prog,
if (!prog->preprocessor) {
if (prog->instances.nr != 1) {
- pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
- prog->section_name, prog->instances.nr);
+ pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
+ prog->section_name, prog->instances.nr);
}
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_version, &fd);
@@ -3489,8 +3854,8 @@ bpf_program__load(struct bpf_program *prog,
err = preprocessor(prog, i, prog->insns,
prog->insns_cnt, &result);
if (err) {
- pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
- i, prog->section_name);
+ pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
+ i, prog->section_name);
goto out;
}
@@ -3508,8 +3873,8 @@ bpf_program__load(struct bpf_program *prog,
license, kern_version, &fd);
if (err) {
- pr_warning("Loading the %dth instance of program '%s' failed\n",
- i, prog->section_name);
+ pr_warn("Loading the %dth instance of program '%s' failed\n",
+ i, prog->section_name);
goto out;
}
@@ -3519,8 +3884,7 @@ bpf_program__load(struct bpf_program *prog,
}
out:
if (err)
- pr_warning("failed to load program '%s'\n",
- prog->section_name);
+ pr_warn("failed to load program '%s'\n", prog->section_name);
zfree(&prog->insns);
prog->insns_cnt = 0;
return err;
@@ -3551,93 +3915,104 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
return 0;
}
-static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
-{
- switch (type) {
- case BPF_PROG_TYPE_SOCKET_FILTER:
- case BPF_PROG_TYPE_SCHED_CLS:
- case BPF_PROG_TYPE_SCHED_ACT:
- case BPF_PROG_TYPE_XDP:
- case BPF_PROG_TYPE_CGROUP_SKB:
- case BPF_PROG_TYPE_CGROUP_SOCK:
- case BPF_PROG_TYPE_LWT_IN:
- case BPF_PROG_TYPE_LWT_OUT:
- case BPF_PROG_TYPE_LWT_XMIT:
- case BPF_PROG_TYPE_LWT_SEG6LOCAL:
- case BPF_PROG_TYPE_SOCK_OPS:
- case BPF_PROG_TYPE_SK_SKB:
- case BPF_PROG_TYPE_CGROUP_DEVICE:
- case BPF_PROG_TYPE_SK_MSG:
- case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- case BPF_PROG_TYPE_LIRC_MODE2:
- case BPF_PROG_TYPE_SK_REUSEPORT:
- case BPF_PROG_TYPE_FLOW_DISSECTOR:
- case BPF_PROG_TYPE_UNSPEC:
- case BPF_PROG_TYPE_TRACEPOINT:
- case BPF_PROG_TYPE_RAW_TRACEPOINT:
- case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
- case BPF_PROG_TYPE_PERF_EVENT:
- case BPF_PROG_TYPE_CGROUP_SYSCTL:
- case BPF_PROG_TYPE_CGROUP_SOCKOPT:
- return false;
- case BPF_PROG_TYPE_KPROBE:
- default:
- return true;
- }
-}
-
-static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
-{
- if (needs_kver && obj->kern_version == 0) {
- pr_warning("%s doesn't provide kernel version\n",
- obj->path);
- return -LIBBPF_ERRNO__KVERSION;
- }
- return 0;
-}
-
+static int libbpf_find_attach_btf_id(const char *name,
+ enum bpf_attach_type attach_type,
+ __u32 attach_prog_fd);
static struct bpf_object *
-__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
- bool needs_kver, int flags)
+__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
{
+ const char *pin_root_path;
+ struct bpf_program *prog;
struct bpf_object *obj;
+ const char *obj_name;
+ char tmp_name[64];
+ bool relaxed_maps;
+ __u32 attach_prog_fd;
int err;
if (elf_version(EV_CURRENT) == EV_NONE) {
- pr_warning("failed to init libelf for %s\n", path);
+ pr_warn("failed to init libelf for %s\n",
+ path ? : "(mem buf)");
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
- obj = bpf_object__new(path, obj_buf, obj_buf_sz);
+ if (!OPTS_VALID(opts, bpf_object_open_opts))
+ return ERR_PTR(-EINVAL);
+
+ obj_name = OPTS_GET(opts, object_name, NULL);
+ if (obj_buf) {
+ if (!obj_name) {
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
+ (unsigned long)obj_buf,
+ (unsigned long)obj_buf_sz);
+ obj_name = tmp_name;
+ }
+ path = obj_name;
+ pr_debug("loading object '%s' from buffer\n", obj_name);
+ }
+
+ obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
+ obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
+ relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
+ pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
+ attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
+
CHECK_ERR(bpf_object__elf_init(obj), err, out);
CHECK_ERR(bpf_object__check_endianness(obj), err, out);
CHECK_ERR(bpf_object__probe_caps(obj), err, out);
- CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
+ CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
+ err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
- CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
-
bpf_object__elf_finish(obj);
+
+ bpf_object__for_each_program(prog, obj) {
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type attach_type;
+
+ err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
+ &attach_type);
+ if (err == -ESRCH)
+ /* couldn't guess, but user might manually specify */
+ continue;
+ if (err)
+ goto out;
+
+ bpf_program__set_type(prog, prog_type);
+ bpf_program__set_expected_attach_type(prog, attach_type);
+ if (prog_type == BPF_PROG_TYPE_TRACING) {
+ err = libbpf_find_attach_btf_id(prog->section_name,
+ attach_type,
+ attach_prog_fd);
+ if (err <= 0)
+ goto out;
+ prog->attach_btf_id = err;
+ prog->attach_prog_fd = attach_prog_fd;
+ }
+ }
+
return obj;
out:
bpf_object__close(obj);
return ERR_PTR(err);
}
-struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
- int flags)
+static struct bpf_object *
+__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .relaxed_maps = flags & MAPS_RELAX_COMPAT,
+ );
+
/* param validation */
if (!attr->file)
return NULL;
pr_debug("loading %s\n", attr->file);
-
- return __bpf_object__open(attr->file, NULL, 0,
- bpf_prog_type__needs_kver(attr->prog_type),
- flags);
+ return __bpf_object__open(attr->file, NULL, 0, &opts);
}
struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
@@ -3655,25 +4030,42 @@ struct bpf_object *bpf_object__open(const char *path)
return bpf_object__open_xattr(&attr);
}
-struct bpf_object *bpf_object__open_buffer(void *obj_buf,
- size_t obj_buf_sz,
- const char *name)
+struct bpf_object *
+bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
{
- char tmp_name[64];
+ if (!path)
+ return ERR_PTR(-EINVAL);
- /* param validation */
- if (!obj_buf || obj_buf_sz <= 0)
- return NULL;
+ pr_debug("loading %s\n", path);
- if (!name) {
- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
- (unsigned long)obj_buf,
- (unsigned long)obj_buf_sz);
- name = tmp_name;
- }
- pr_debug("loading object '%s' from buffer\n", name);
+ return __bpf_object__open(path, NULL, 0, opts);
+}
+
+struct bpf_object *
+bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts)
+{
+ if (!obj_buf || obj_buf_sz == 0)
+ return ERR_PTR(-EINVAL);
- return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
+ return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
+}
+
+struct bpf_object *
+bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
+ const char *name)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .object_name = name,
+ /* wrong default, but backwards-compatible */
+ .relaxed_maps = true,
+ );
+
+ /* returning NULL is wrong, but backwards-compatible */
+ if (!obj_buf || obj_buf_sz == 0)
+ return NULL;
+
+ return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
}
int bpf_object__unload(struct bpf_object *obj)
@@ -3695,7 +4087,7 @@ int bpf_object__unload(struct bpf_object *obj)
int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
{
struct bpf_object *obj;
- int err;
+ int err, i;
if (!attr)
return -EINVAL;
@@ -3704,7 +4096,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
return -EINVAL;
if (obj->loaded) {
- pr_warning("object should not be loaded twice\n");
+ pr_warn("object should not be loaded twice\n");
return -EINVAL;
}
@@ -3716,8 +4108,13 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
return 0;
out:
+ /* unpin any maps that were auto-pinned during load */
+ for (i = 0; i < obj->nr_maps; i++)
+ if (obj->maps[i].pinned && !obj->maps[i].reused)
+ bpf_map__unpin(&obj->maps[i], NULL);
+
bpf_object__unload(obj);
- pr_warning("failed to load object '%s'\n", obj->path);
+ pr_warn("failed to load object '%s'\n", obj->path);
return err;
}
@@ -3730,6 +4127,28 @@ int bpf_object__load(struct bpf_object *obj)
return bpf_object__load_xattr(&attr);
}
+static int make_parent_dir(const char *path)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ char *dname, *dir;
+ int err = 0;
+
+ dname = strdup(path);
+ if (dname == NULL)
+ return -ENOMEM;
+
+ dir = dirname(dname);
+ if (mkdir(dir, 0700) && errno != EEXIST)
+ err = -errno;
+
+ free(dname);
+ if (err) {
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warn("failed to mkdir %s: %s\n", path, cp);
+ }
+ return err;
+}
+
static int check_path(const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
@@ -3747,13 +4166,13 @@ static int check_path(const char *path)
dir = dirname(dname);
if (statfs(dir, &st_fs)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("failed to statfs %s: %s\n", dir, cp);
+ pr_warn("failed to statfs %s: %s\n", dir, cp);
err = -errno;
}
free(dname);
if (!err && st_fs.f_type != BPF_FS_MAGIC) {
- pr_warning("specified path %s is not on BPF FS\n", path);
+ pr_warn("specified path %s is not on BPF FS\n", path);
err = -EINVAL;
}
@@ -3766,24 +4185,28 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
char *cp, errmsg[STRERR_BUFSIZE];
int err;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+
err = check_path(path);
if (err)
return err;
if (prog == NULL) {
- pr_warning("invalid program pointer\n");
+ pr_warn("invalid program pointer\n");
return -EINVAL;
}
if (instance < 0 || instance >= prog->instances.nr) {
- pr_warning("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
+ pr_warn("invalid prog instance %d of prog %s (max %d)\n",
+ instance, prog->section_name, prog->instances.nr);
return -EINVAL;
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("failed to pin program: %s\n", cp);
+ pr_warn("failed to pin program: %s\n", cp);
return -errno;
}
pr_debug("pinned program '%s'\n", path);
@@ -3801,13 +4224,13 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
return err;
if (prog == NULL) {
- pr_warning("invalid program pointer\n");
+ pr_warn("invalid program pointer\n");
return -EINVAL;
}
if (instance < 0 || instance >= prog->instances.nr) {
- pr_warning("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
+ pr_warn("invalid prog instance %d of prog %s (max %d)\n",
+ instance, prog->section_name, prog->instances.nr);
return -EINVAL;
}
@@ -3819,36 +4242,25 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
return 0;
}
-static int make_dir(const char *path)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- int err = 0;
-
- if (mkdir(path, 0700) && errno != EEXIST)
- err = -errno;
-
- if (err) {
- cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
- pr_warning("failed to mkdir %s: %s\n", path, cp);
- }
- return err;
-}
-
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
int i, err;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+
err = check_path(path);
if (err)
return err;
if (prog == NULL) {
- pr_warning("invalid program pointer\n");
+ pr_warn("invalid program pointer\n");
return -EINVAL;
}
if (prog->instances.nr <= 0) {
- pr_warning("no instances of prog %s to pin\n",
+ pr_warn("no instances of prog %s to pin\n",
prog->section_name);
return -EINVAL;
}
@@ -3858,10 +4270,6 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
return bpf_program__pin_instance(prog, path, 0);
}
- err = make_dir(path);
- if (err)
- return err;
-
for (i = 0; i < prog->instances.nr; i++) {
char buf[PATH_MAX];
int len;
@@ -3910,12 +4318,12 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
return err;
if (prog == NULL) {
- pr_warning("invalid program pointer\n");
+ pr_warn("invalid program pointer\n");
return -EINVAL;
}
if (prog->instances.nr <= 0) {
- pr_warning("no instances of prog %s to pin\n",
+ pr_warn("no instances of prog %s to pin\n",
prog->section_name);
return -EINVAL;
}
@@ -3952,47 +4360,123 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
char *cp, errmsg[STRERR_BUFSIZE];
int err;
- err = check_path(path);
- if (err)
- return err;
-
if (map == NULL) {
- pr_warning("invalid map pointer\n");
+ pr_warn("invalid map pointer\n");
return -EINVAL;
}
- if (bpf_obj_pin(map->fd, path)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warning("failed to pin map: %s\n", cp);
- return -errno;
+ if (map->pin_path) {
+ if (path && strcmp(path, map->pin_path)) {
+ pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
+ bpf_map__name(map), map->pin_path, path);
+ return -EINVAL;
+ } else if (map->pinned) {
+ pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
+ bpf_map__name(map), map->pin_path);
+ return 0;
+ }
+ } else {
+ if (!path) {
+ pr_warn("missing a path to pin map '%s' at\n",
+ bpf_map__name(map));
+ return -EINVAL;
+ } else if (map->pinned) {
+ pr_warn("map '%s' already pinned\n", bpf_map__name(map));
+ return -EEXIST;
+ }
+
+ map->pin_path = strdup(path);
+ if (!map->pin_path) {
+ err = -errno;
+ goto out_err;
+ }
}
- pr_debug("pinned map '%s'\n", path);
+ err = make_parent_dir(map->pin_path);
+ if (err)
+ return err;
+
+ err = check_path(map->pin_path);
+ if (err)
+ return err;
+
+ if (bpf_obj_pin(map->fd, map->pin_path)) {
+ err = -errno;
+ goto out_err;
+ }
+
+ map->pinned = true;
+ pr_debug("pinned map '%s'\n", map->pin_path);
return 0;
+
+out_err:
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warn("failed to pin map: %s\n", cp);
+ return err;
}
int bpf_map__unpin(struct bpf_map *map, const char *path)
{
int err;
- err = check_path(path);
- if (err)
- return err;
-
if (map == NULL) {
- pr_warning("invalid map pointer\n");
+ pr_warn("invalid map pointer\n");
+ return -EINVAL;
+ }
+
+ if (map->pin_path) {
+ if (path && strcmp(path, map->pin_path)) {
+ pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
+ bpf_map__name(map), map->pin_path, path);
+ return -EINVAL;
+ }
+ path = map->pin_path;
+ } else if (!path) {
+ pr_warn("no path to unpin map '%s' from\n",
+ bpf_map__name(map));
return -EINVAL;
}
+ err = check_path(path);
+ if (err)
+ return err;
+
err = unlink(path);
if (err != 0)
return -errno;
- pr_debug("unpinned map '%s'\n", path);
+ map->pinned = false;
+ pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
+
+ return 0;
+}
+
+int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
+{
+ char *new = NULL;
+
+ if (path) {
+ new = strdup(path);
+ if (!new)
+ return -errno;
+ }
+
+ free(map->pin_path);
+ map->pin_path = new;
return 0;
}
+const char *bpf_map__get_pin_path(const struct bpf_map *map)
+{
+ return map->pin_path;
+}
+
+bool bpf_map__is_pinned(const struct bpf_map *map)
+{
+ return map->pinned;
+}
+
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
{
struct bpf_map *map;
@@ -4002,29 +4486,32 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
return -ENOENT;
if (!obj->loaded) {
- pr_warning("object not yet loaded; load it first\n");
+ pr_warn("object not yet loaded; load it first\n");
return -ENOENT;
}
- err = make_dir(path);
- if (err)
- return err;
-
bpf_object__for_each_map(map, obj) {
+ char *pin_path = NULL;
char buf[PATH_MAX];
- int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin_maps;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
- goto err_unpin_maps;
+ if (path) {
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ bpf_map__name(map));
+ if (len < 0) {
+ err = -EINVAL;
+ goto err_unpin_maps;
+ } else if (len >= PATH_MAX) {
+ err = -ENAMETOOLONG;
+ goto err_unpin_maps;
+ }
+ pin_path = buf;
+ } else if (!map->pin_path) {
+ continue;
}
- err = bpf_map__pin(map, buf);
+ err = bpf_map__pin(map, pin_path);
if (err)
goto err_unpin_maps;
}
@@ -4033,17 +4520,10 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
err_unpin_maps:
while ((map = bpf_map__prev(map, obj))) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0)
- continue;
- else if (len >= PATH_MAX)
+ if (!map->pin_path)
continue;
- bpf_map__unpin(map, buf);
+ bpf_map__unpin(map, NULL);
}
return err;
@@ -4058,17 +4538,24 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
return -ENOENT;
bpf_object__for_each_map(map, obj) {
+ char *pin_path = NULL;
char buf[PATH_MAX];
- int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ if (path) {
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ bpf_map__name(map));
+ if (len < 0)
+ return -EINVAL;
+ else if (len >= PATH_MAX)
+ return -ENAMETOOLONG;
+ pin_path = buf;
+ } else if (!map->pin_path) {
+ continue;
+ }
- err = bpf_map__unpin(map, buf);
+ err = bpf_map__unpin(map, pin_path);
if (err)
return err;
}
@@ -4085,14 +4572,10 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
return -ENOENT;
if (!obj->loaded) {
- pr_warning("object not yet loaded; load it first\n");
+ pr_warn("object not yet loaded; load it first\n");
return -ENOENT;
}
- err = make_dir(path);
- if (err)
- return err;
-
bpf_object__for_each_program(prog, obj) {
char buf[PATH_MAX];
int len;
@@ -4193,6 +4676,7 @@ void bpf_object__close(struct bpf_object *obj)
for (i = 0; i < obj->nr_maps; i++) {
zfree(&obj->maps[i].name);
+ zfree(&obj->maps[i].pin_path);
if (obj->maps[i].clear_priv)
obj->maps[i].clear_priv(&obj->maps[i],
obj->maps[i].priv);
@@ -4236,7 +4720,7 @@ bpf_object__next(struct bpf_object *prev)
const char *bpf_object__name(const struct bpf_object *obj)
{
- return obj ? obj->path : ERR_PTR(-EINVAL);
+ return obj ? obj->name : ERR_PTR(-EINVAL);
}
unsigned int bpf_object__kversion(const struct bpf_object *obj)
@@ -4286,7 +4770,7 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
&obj->programs[nr_programs - 1];
if (p->obj != obj) {
- pr_warning("error: program handler doesn't match object\n");
+ pr_warn("error: program handler doesn't match object\n");
return NULL;
}
@@ -4349,7 +4833,7 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
if (needs_copy) {
title = strdup(title);
if (!title) {
- pr_warning("failed to strdup program title\n");
+ pr_warn("failed to strdup program title\n");
return ERR_PTR(-ENOMEM);
}
}
@@ -4362,6 +4846,11 @@ int bpf_program__fd(const struct bpf_program *prog)
return bpf_program__nth_fd(prog, 0);
}
+size_t bpf_program__size(const struct bpf_program *prog)
+{
+ return prog->insns_cnt * sizeof(struct bpf_insn);
+}
+
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
bpf_program_prep_t prep)
{
@@ -4371,13 +4860,13 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
return -EINVAL;
if (prog->instances.nr > 0 || prog->instances.fds) {
- pr_warning("Can't set pre-processor after loading\n");
+ pr_warn("Can't set pre-processor after loading\n");
return -EINVAL;
}
instances_fds = malloc(sizeof(int) * nr_instances);
if (!instances_fds) {
- pr_warning("alloc memory failed for fds\n");
+ pr_warn("alloc memory failed for fds\n");
return -ENOMEM;
}
@@ -4398,21 +4887,26 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n)
return -EINVAL;
if (n >= prog->instances.nr || n < 0) {
- pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
- n, prog->section_name, prog->instances.nr);
+ pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
+ n, prog->section_name, prog->instances.nr);
return -EINVAL;
}
fd = prog->instances.fds[n];
if (fd < 0) {
- pr_warning("%dth instance of program '%s' is invalid\n",
- n, prog->section_name);
+ pr_warn("%dth instance of program '%s' is invalid\n",
+ n, prog->section_name);
return -ENOENT;
}
return fd;
}
+enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
+{
+ return prog->type;
+}
+
void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
prog->type = type;
@@ -4446,6 +4940,13 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
+BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
+
+enum bpf_attach_type
+bpf_program__get_expected_attach_type(struct bpf_program *prog)
+{
+ return prog->expected_attach_type;
+}
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
@@ -4453,19 +4954,23 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
prog->expected_attach_type = type;
}
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
- { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
+#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
+ { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
/* Programs that can NOT be attached. */
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
+#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
/* Programs that can be attached. */
#define BPF_APROG_SEC(string, ptype, atype) \
- BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
+ BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
/* Programs that must specify expected attach type at load time. */
#define BPF_EAPROG_SEC(string, ptype, eatype) \
- BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
+ BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
+
+/* Programs that use BTF to identify attach point */
+#define BPF_PROG_BTF(string, ptype, eatype) \
+ BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
/* Programs that can be attached but attach type can't be identified by section
* name. Kept for backward compatibility.
@@ -4477,16 +4982,27 @@ static const struct {
size_t len;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
- int is_attachable;
+ bool is_attachable;
+ bool is_attach_btf;
enum bpf_attach_type attach_type;
} section_names[] = {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
+ BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING,
+ BPF_TRACE_RAW_TP),
+ BPF_PROG_BTF("fentry/", BPF_PROG_TYPE_TRACING,
+ BPF_TRACE_FENTRY),
+ BPF_PROG_BTF("fexit/", BPF_PROG_TYPE_TRACING,
+ BPF_TRACE_FEXIT),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
@@ -4593,14 +5109,105 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
*expected_attach_type = section_names[i].expected_attach_type;
return 0;
}
- pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
+ pr_warn("failed to guess program type from ELF section '%s'\n", name);
type_names = libbpf_get_type_names(false);
if (type_names != NULL) {
pr_info("supported section(type) names are:%s\n", type_names);
free(type_names);
}
- return -EINVAL;
+ return -ESRCH;
+}
+
+#define BTF_PREFIX "btf_trace_"
+int libbpf_find_vmlinux_btf_id(const char *name,
+ enum bpf_attach_type attach_type)
+{
+ struct btf *btf = bpf_core_find_kernel_btf();
+ char raw_tp_btf[128] = BTF_PREFIX;
+ char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
+ const char *btf_name;
+ int err = -EINVAL;
+ __u32 kind;
+
+ if (IS_ERR(btf)) {
+ pr_warn("vmlinux BTF is not found\n");
+ return -EINVAL;
+ }
+
+ if (attach_type == BPF_TRACE_RAW_TP) {
+ /* prepend "btf_trace_" prefix per kernel convention */
+ strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
+ btf_name = raw_tp_btf;
+ kind = BTF_KIND_TYPEDEF;
+ } else {
+ btf_name = name;
+ kind = BTF_KIND_FUNC;
+ }
+ err = btf__find_by_name_kind(btf, btf_name, kind);
+ btf__free(btf);
+ return err;
+}
+
+static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info *info;
+ struct btf *btf = NULL;
+ int err = -EINVAL;
+
+ info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ pr_warn("failed get_prog_info_linear for FD %d\n",
+ attach_prog_fd);
+ return -EINVAL;
+ }
+ info = &info_linear->info;
+ if (!info->btf_id) {
+ pr_warn("The target program doesn't have BTF\n");
+ goto out;
+ }
+ if (btf__get_from_id(info->btf_id, &btf)) {
+ pr_warn("Failed to get BTF of the program\n");
+ goto out;
+ }
+ err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ btf__free(btf);
+ if (err <= 0) {
+ pr_warn("%s is not found in prog's BTF\n", name);
+ goto out;
+ }
+out:
+ free(info_linear);
+ return err;
+}
+
+static int libbpf_find_attach_btf_id(const char *name,
+ enum bpf_attach_type attach_type,
+ __u32 attach_prog_fd)
+{
+ int i, err;
+
+ if (!name)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (!section_names[i].is_attach_btf)
+ continue;
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+ if (attach_prog_fd)
+ err = libbpf_find_prog_btf_id(name + section_names[i].len,
+ attach_prog_fd);
+ else
+ err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
+ attach_type);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+ return err;
+ }
+ pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
+ return -ESRCH;
}
int libbpf_attach_type_by_name(const char *name,
@@ -4620,7 +5227,7 @@ int libbpf_attach_type_by_name(const char *name,
*attach_type = section_names[i].attach_type;
return 0;
}
- pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
+ pr_warn("failed to guess attach type based on ELF section name '%s'\n", name);
type_names = libbpf_get_type_names(true);
if (type_names != NULL) {
pr_info("attachable section(type) names are:%s\n", type_names);
@@ -4630,15 +5237,6 @@ int libbpf_attach_type_by_name(const char *name,
return -EINVAL;
}
-static int
-bpf_program__identify_section(struct bpf_program *prog,
- enum bpf_prog_type *prog_type,
- enum bpf_attach_type *expected_attach_type)
-{
- return libbpf_prog_type_by_name(prog->section_name, prog_type,
- expected_attach_type);
-}
-
int bpf_map__fd(const struct bpf_map *map)
{
return map ? map->fd : -EINVAL;
@@ -4703,11 +5301,11 @@ void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
{
if (!bpf_map_type__is_map_in_map(map->def.type)) {
- pr_warning("error: unsupported map type\n");
+ pr_warn("error: unsupported map type\n");
return -EINVAL;
}
if (map->inner_map_fd != -1) {
- pr_warning("error: inner_map_fd already specified\n");
+ pr_warn("error: inner_map_fd already specified\n");
return -EINVAL;
}
map->inner_map_fd = fd;
@@ -4727,8 +5325,8 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
e = obj->maps + obj->nr_maps;
if ((m < s) || (m >= e)) {
- pr_warning("error in %s: map handler doesn't belong to object\n",
- __func__);
+ pr_warn("error in %s: map handler doesn't belong to object\n",
+ __func__);
return NULL;
}
@@ -4806,8 +5404,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
{
struct bpf_object_open_attr open_attr = {};
struct bpf_program *prog, *first_prog = NULL;
- enum bpf_attach_type expected_attach_type;
- enum bpf_prog_type prog_type;
struct bpf_object *obj;
struct bpf_map *map;
int err;
@@ -4825,26 +5421,27 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
return -ENOENT;
bpf_object__for_each_program(prog, obj) {
+ enum bpf_attach_type attach_type = attr->expected_attach_type;
/*
- * If type is not specified, try to guess it based on
- * section name.
+ * to preserve backwards compatibility, bpf_prog_load treats
+ * attr->prog_type, if specified, as an override to whatever
+ * bpf_object__open guessed
*/
- prog_type = attr->prog_type;
- prog->prog_ifindex = attr->ifindex;
- expected_attach_type = attr->expected_attach_type;
- if (prog_type == BPF_PROG_TYPE_UNSPEC) {
- err = bpf_program__identify_section(prog, &prog_type,
- &expected_attach_type);
- if (err < 0) {
- bpf_object__close(obj);
- return -EINVAL;
- }
+ if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
+ bpf_program__set_type(prog, attr->prog_type);
+ bpf_program__set_expected_attach_type(prog,
+ attach_type);
+ }
+ if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
+ /*
+ * we haven't guessed from section name and user
+ * didn't provide a fallback type, too bad...
+ */
+ bpf_object__close(obj);
+ return -EINVAL;
}
- bpf_program__set_type(prog, prog_type);
- bpf_program__set_expected_attach_type(prog,
- expected_attach_type);
-
+ prog->prog_ifindex = attr->ifindex;
prog->log_level = attr->log_level;
prog->prog_flags = attr->prog_flags;
if (!first_prog)
@@ -4857,7 +5454,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
}
if (!first_prog) {
- pr_warning("object file doesn't contain bpf program\n");
+ pr_warn("object file doesn't contain bpf program\n");
bpf_object__close(obj);
return -ENOENT;
}
@@ -4916,14 +5513,14 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
int prog_fd, err;
if (pfd < 0) {
- pr_warning("program '%s': invalid perf event FD %d\n",
- bpf_program__title(prog, false), pfd);
+ pr_warn("program '%s': invalid perf event FD %d\n",
+ bpf_program__title(prog, false), pfd);
return ERR_PTR(-EINVAL);
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
- bpf_program__title(prog, false));
+ pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ bpf_program__title(prog, false));
return ERR_PTR(-EINVAL);
}
@@ -4936,16 +5533,16 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
err = -errno;
free(link);
- pr_warning("program '%s': failed to attach to pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
+ pr_warn("program '%s': failed to attach to pfd %d: %s\n",
+ bpf_program__title(prog, false), pfd,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
free(link);
- pr_warning("program '%s': failed to enable pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
+ pr_warn("program '%s': failed to enable pfd %d: %s\n",
+ bpf_program__title(prog, false), pfd,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
@@ -5020,9 +5617,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
type = uprobe ? determine_uprobe_perf_type()
: determine_kprobe_perf_type();
if (type < 0) {
- pr_warning("failed to determine %s perf type: %s\n",
- uprobe ? "uprobe" : "kprobe",
- libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
+ pr_warn("failed to determine %s perf type: %s\n",
+ uprobe ? "uprobe" : "kprobe",
+ libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
return type;
}
if (retprobe) {
@@ -5030,10 +5627,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
: determine_kprobe_retprobe_bit();
if (bit < 0) {
- pr_warning("failed to determine %s retprobe bit: %s\n",
- uprobe ? "uprobe" : "kprobe",
- libbpf_strerror_r(bit, errmsg,
- sizeof(errmsg)));
+ pr_warn("failed to determine %s retprobe bit: %s\n",
+ uprobe ? "uprobe" : "kprobe",
+ libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
return bit;
}
attr.config |= 1 << bit;
@@ -5050,9 +5646,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
if (pfd < 0) {
err = -errno;
- pr_warning("%s perf_event_open() failed: %s\n",
- uprobe ? "uprobe" : "kprobe",
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("%s perf_event_open() failed: %s\n",
+ uprobe ? "uprobe" : "kprobe",
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return err;
}
return pfd;
@@ -5069,20 +5665,20 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
0 /* offset */, -1 /* pid */);
if (pfd < 0) {
- pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "kretprobe" : "kprobe", func_name,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link = bpf_program__attach_perf_event(prog, pfd);
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warning("program '%s': failed to attach to %s '%s': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to attach to %s '%s': %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "kretprobe" : "kprobe", func_name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
return link;
@@ -5100,22 +5696,22 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
pfd = perf_event_open_probe(true /* uprobe */, retprobe,
binary_path, func_offset, pid);
if (pfd < 0) {
- pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
- binary_path, func_offset,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "uretprobe" : "uprobe",
+ binary_path, func_offset,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link = bpf_program__attach_perf_event(prog, pfd);
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
- binary_path, func_offset,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
+ bpf_program__title(prog, false),
+ retprobe ? "uretprobe" : "uprobe",
+ binary_path, func_offset,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
return link;
@@ -5149,9 +5745,9 @@ static int perf_event_open_tracepoint(const char *tp_category,
tp_id = determine_tracepoint_id(tp_category, tp_name);
if (tp_id < 0) {
- pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
- tp_category, tp_name,
- libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
+ pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
+ tp_category, tp_name,
+ libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
return tp_id;
}
@@ -5163,9 +5759,9 @@ static int perf_event_open_tracepoint(const char *tp_category,
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
if (pfd < 0) {
err = -errno;
- pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n",
- tp_category, tp_name,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
+ tp_category, tp_name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return err;
}
return pfd;
@@ -5181,20 +5777,20 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
pfd = perf_event_open_tracepoint(tp_category, tp_name);
if (pfd < 0) {
- pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
+ bpf_program__title(prog, false),
+ tp_category, tp_name,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link = bpf_program__attach_perf_event(prog, pfd);
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
+ bpf_program__title(prog, false),
+ tp_category, tp_name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
return link;
@@ -5216,8 +5812,8 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warning("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("program '%s': can't attach before loaded\n",
+ bpf_program__title(prog, false));
return ERR_PTR(-EINVAL);
}
@@ -5230,9 +5826,40 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n",
- bpf_program__title(prog, false), tp_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
+ bpf_program__title(prog, false), tp_name,
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(pfd);
+ }
+ link->fd = pfd;
+ return (struct bpf_link *)link;
+}
+
+struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
+{
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link_fd *link;
+ int prog_fd, pfd;
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warn("program '%s': can't attach before loaded\n",
+ bpf_program__title(prog, false));
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = malloc(sizeof(*link));
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+ link->link.destroy = &bpf_link__destroy_fd;
+
+ pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
+ if (pfd < 0) {
+ pfd = -errno;
+ free(link);
+ pr_warn("program '%s': failed to attach to trace: %s\n",
+ bpf_program__title(prog, false),
+ libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link->fd = pfd;
@@ -5334,7 +5961,7 @@ static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
return;
if (cpu_buf->base &&
munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
- pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
+ pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
if (cpu_buf->fd >= 0) {
ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
close(cpu_buf->fd);
@@ -5384,8 +6011,8 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
-1, PERF_FLAG_FD_CLOEXEC);
if (cpu_buf->fd < 0) {
err = -errno;
- pr_warning("failed to open perf buffer event on cpu #%d: %s\n",
- cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
+ cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
goto error;
}
@@ -5395,15 +6022,15 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
if (cpu_buf->base == MAP_FAILED) {
cpu_buf->base = NULL;
err = -errno;
- pr_warning("failed to mmap perf buffer on cpu #%d: %s\n",
- cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
+ cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
goto error;
}
if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
- pr_warning("failed to enable perf buffer event on cpu #%d: %s\n",
- cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
+ cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
goto error;
}
@@ -5463,8 +6090,8 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
int err, i;
if (page_cnt & (page_cnt - 1)) {
- pr_warning("page count should be power of two, but is %zu\n",
- page_cnt);
+ pr_warn("page count should be power of two, but is %zu\n",
+ page_cnt);
return ERR_PTR(-EINVAL);
}
@@ -5472,14 +6099,14 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
if (err) {
err = -errno;
- pr_warning("failed to get map info for map FD %d: %s\n",
- map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to get map info for map FD %d: %s\n",
+ map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
return ERR_PTR(err);
}
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
- pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
- map.name);
+ pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
+ map.name);
return ERR_PTR(-EINVAL);
}
@@ -5499,8 +6126,8 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pb->epoll_fd < 0) {
err = -errno;
- pr_warning("failed to create epoll instance: %s\n",
- libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to create epoll instance: %s\n",
+ libbpf_strerror_r(err, msg, sizeof(msg)));
goto error;
}
@@ -5519,13 +6146,13 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
if (!pb->events) {
err = -ENOMEM;
- pr_warning("failed to allocate events: out of memory\n");
+ pr_warn("failed to allocate events: out of memory\n");
goto error;
}
pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
if (!pb->cpu_bufs) {
err = -ENOMEM;
- pr_warning("failed to allocate buffers: out of memory\n");
+ pr_warn("failed to allocate buffers: out of memory\n");
goto error;
}
@@ -5548,9 +6175,9 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
&cpu_buf->fd, 0);
if (err) {
err = -errno;
- pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
- cpu, map_key, cpu_buf->fd,
- libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
+ cpu, map_key, cpu_buf->fd,
+ libbpf_strerror_r(err, msg, sizeof(msg)));
goto error;
}
@@ -5559,9 +6186,9 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
&pb->events[i]) < 0) {
err = -errno;
- pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
- cpu, cpu_buf->fd,
- libbpf_strerror_r(err, msg, sizeof(msg)));
+ pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
+ cpu, cpu_buf->fd,
+ libbpf_strerror_r(err, msg, sizeof(msg)));
goto error;
}
}
@@ -5614,7 +6241,7 @@ perf_buffer__process_record(struct perf_event_header *e, void *ctx)
break;
}
default:
- pr_warning("unknown perf sample type %d\n", e->type);
+ pr_warn("unknown perf sample type %d\n", e->type);
return LIBBPF_PERF_EVENT_ERROR;
}
return LIBBPF_PERF_EVENT_CONT;
@@ -5644,7 +6271,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
- pr_warning("error while processing records: %d\n", err);
+ pr_warn("error while processing records: %d\n", err);
return err;
}
}
@@ -5708,7 +6335,8 @@ static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
};
-static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
+static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
+ int offset)
{
__u32 *array = (__u32 *)info;
@@ -5717,7 +6345,8 @@ static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offse
return -(int)offset;
}
-static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
+static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
+ int offset)
{
__u64 *array = (__u64 *)info;
@@ -5841,13 +6470,13 @@ bpf_program__get_prog_info_linear(int fd, __u64 arrays)
v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->count_offset);
if (v1 != v2)
- pr_warning("%s: mismatch in element count\n", __func__);
+ pr_warn("%s: mismatch in element count\n", __func__);
v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->size_offset);
if (v1 != v2)
- pr_warning("%s: mismatch in rec size\n", __func__);
+ pr_warn("%s: mismatch in rec size\n", __func__);
}
/* step 7: update info_len and data_len */
@@ -5915,20 +6544,19 @@ int libbpf_num_possible_cpus(void)
fd = open(fcpu, O_RDONLY);
if (fd < 0) {
error = errno;
- pr_warning("Failed to open file %s: %s\n",
- fcpu, strerror(error));
+ pr_warn("Failed to open file %s: %s\n", fcpu, strerror(error));
return -error;
}
len = read(fd, buf, sizeof(buf));
close(fd);
if (len <= 0) {
error = len ? errno : EINVAL;
- pr_warning("Failed to read # of possible cpus from %s: %s\n",
- fcpu, strerror(error));
+ pr_warn("Failed to read # of possible cpus from %s: %s\n",
+ fcpu, strerror(error));
return -error;
}
if (len == sizeof(buf)) {
- pr_warning("File %s size overflow\n", fcpu);
+ pr_warn("File %s size overflow\n", fcpu);
return -EOVERFLOW;
}
buf[len] = '\0';
@@ -5939,8 +6567,8 @@ int libbpf_num_possible_cpus(void)
buf[ir] = '\0';
n = sscanf(&buf[il], "%u-%u", &start, &end);
if (n <= 0) {
- pr_warning("Failed to get # CPUs from %s\n",
- &buf[il]);
+ pr_warn("Failed to get # CPUs from %s\n",
+ &buf[il]);
return -EINVAL;
} else if (n == 1) {
end = start;
@@ -5950,7 +6578,7 @@ int libbpf_num_possible_cpus(void)
}
}
if (tmp_cpus <= 0) {
- pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
+ pr_warn("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
return -EINVAL;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index e8f70977d137..0dbf4bfba0c4 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -67,18 +67,80 @@ struct bpf_object_open_attr {
enum bpf_prog_type prog_type;
};
+/* Helper macro to declare and initialize libbpf options struct
+ *
+ * This dance with uninitialized declaration, followed by memset to zero,
+ * followed by assignment using compound literal syntax is done to preserve
+ * ability to use a nice struct field initialization syntax and **hopefully**
+ * have all the padding bytes initialized to zero. It's not guaranteed though,
+ * when copying literal, that compiler won't copy garbage in literal's padding
+ * bytes, but that's the best way I've found and it seems to work in practice.
+ *
+ * Macro declares opts struct of given type and name, zero-initializes,
+ * including any extra padding, it with memset() and then assigns initial
+ * values provided by users in struct initializer-syntax as varargs.
+ */
+#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
+ struct TYPE NAME = ({ \
+ memset(&NAME, 0, sizeof(struct TYPE)); \
+ (struct TYPE) { \
+ .sz = sizeof(struct TYPE), \
+ __VA_ARGS__ \
+ }; \
+ })
+
+struct bpf_object_open_opts {
+ /* size of this struct, for forward/backward compatiblity */
+ size_t sz;
+ /* object name override, if provided:
+ * - for object open from file, this will override setting object
+ * name from file path's base name;
+ * - for object open from memory buffer, this will specify an object
+ * name and will override default "<addr>-<buf-size>" name;
+ */
+ const char *object_name;
+ /* parse map definitions non-strictly, allowing extra attributes/data */
+ bool relaxed_maps;
+ /* process CO-RE relocations non-strictly, allowing them to fail */
+ bool relaxed_core_relocs;
+ /* maps that set the 'pinning' attribute in their definition will have
+ * their pin_path attribute set to a file in this directory, and be
+ * auto-pinned to that path on load; defaults to "/sys/fs/bpf".
+ */
+ const char *pin_root_path;
+ __u32 attach_prog_fd;
+};
+#define bpf_object_open_opts__last_field attach_prog_fd
+
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
LIBBPF_API struct bpf_object *
+bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts);
+LIBBPF_API struct bpf_object *
+bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
+ struct bpf_object_open_opts *opts);
+
+/* deprecated bpf_object__open variants */
+LIBBPF_API struct bpf_object *
+bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
+ const char *name);
+LIBBPF_API struct bpf_object *
bpf_object__open_xattr(struct bpf_object_open_attr *attr);
-struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
- int flags);
-LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
- size_t obj_buf_sz,
- const char *name);
+
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
__u32 *size);
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
+
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
+/* pin_maps and unpin_maps can both be called with a NULL path, in which case
+ * they will use the pin_path attribute of each map (and ignore all maps that
+ * don't have a pin_path set).
+ */
LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
const char *path);
@@ -127,6 +189,8 @@ libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type);
LIBBPF_API int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type);
+LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
+ enum bpf_attach_type attach_type);
/* Accessors of bpf_program */
struct bpf_program;
@@ -153,6 +217,9 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
bool needs_copy);
+/* returns program size in bytes */
+LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
+
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
__u32 kern_version);
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
@@ -187,6 +254,8 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
const char *tp_name);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_trace(struct bpf_program *prog);
struct bpf_insn;
/*
@@ -262,8 +331,14 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
+
+LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
enum bpf_prog_type type);
+
+LIBBPF_API enum bpf_attach_type
+bpf_program__get_expected_attach_type(struct bpf_program *prog);
LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
@@ -276,6 +351,7 @@ LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
/*
* No need for __attribute__((packed)), all members of 'bpf_map_def'
@@ -335,6 +411,9 @@ LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
+LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
+LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
+LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
@@ -356,8 +435,18 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
+struct xdp_link_info {
+ __u32 prog_id;
+ __u32 drv_prog_id;
+ __u32 hw_prog_id;
+ __u32 skb_prog_id;
+ __u8 attach_mode;
+};
+
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
+LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
+ size_t info_size, __u32 flags);
struct perf_buffer;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index d04c7cb623ed..8ddc2c40e482 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -190,3 +190,21 @@ LIBBPF_0.0.5 {
global:
bpf_btf_get_next_id;
} LIBBPF_0.0.4;
+
+LIBBPF_0.0.6 {
+ global:
+ bpf_get_link_xdp_info;
+ bpf_map__get_pin_path;
+ bpf_map__is_pinned;
+ bpf_map__set_pin_path;
+ bpf_object__open_file;
+ bpf_object__open_mem;
+ bpf_program__attach_trace;
+ bpf_program__get_expected_attach_type;
+ bpf_program__get_type;
+ bpf_program__is_tracing;
+ bpf_program__set_tracing;
+ bpf_program__size;
+ btf__find_by_name_kind;
+ libbpf_find_vmlinux_btf_id;
+} LIBBPF_0.0.5;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 98216a69c32f..97ac17a64a58 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -59,10 +59,42 @@ do { \
libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
} while (0)
-#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
+#define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+static inline bool libbpf_validate_opts(const char *opts,
+ size_t opts_sz, size_t user_sz,
+ const char *type_name)
+{
+ if (user_sz < sizeof(size_t)) {
+ pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
+ return false;
+ }
+ if (user_sz > opts_sz) {
+ size_t i;
+
+ for (i = opts_sz; i < user_sz; i++) {
+ if (opts[i]) {
+ pr_warn("%s has non-zero extra bytes",
+ type_name);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+#define OPTS_VALID(opts, type) \
+ (!(opts) || libbpf_validate_opts((const char *)opts, \
+ offsetofend(struct type, \
+ type##__last_field), \
+ (opts)->sz, #type))
+#define OPTS_HAS(opts, field) \
+ ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
+#define OPTS_GET(opts, field, fallback_value) \
+ (OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
+
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
@@ -94,7 +126,7 @@ struct btf_ext {
};
struct btf_ext_info func_info;
struct btf_ext_info line_info;
- struct btf_ext_info offset_reloc_info;
+ struct btf_ext_info field_reloc_info;
__u32 data_size;
};
@@ -119,13 +151,27 @@ struct bpf_line_info_min {
__u32 line_col;
};
-/* The minimum bpf_offset_reloc checked by the loader
+/* bpf_field_info_kind encodes which aspect of captured field has to be
+ * adjusted by relocations. Currently supported values are:
+ * - BPF_FIELD_BYTE_OFFSET: field offset (in bytes);
+ * - BPF_FIELD_EXISTS: field existence (1, if field exists; 0, otherwise);
+ */
+enum bpf_field_info_kind {
+ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_FIELD_BYTE_SIZE = 1,
+ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_FIELD_SIGNED = 3,
+ BPF_FIELD_LSHIFT_U64 = 4,
+ BPF_FIELD_RSHIFT_U64 = 5,
+};
+
+/* The minimum bpf_field_reloc checked by the loader
*
- * Offset relocation captures the following data:
+ * Field relocation captures the following data:
* - insn_off - instruction offset (in bytes) within a BPF program that needs
- * its insn->imm field to be relocated with actual offset;
+ * its insn->imm field to be relocated with actual field info;
* - type_id - BTF type ID of the "root" (containing) entity of a relocatable
- * offset;
+ * field;
* - access_str_off - offset into corresponding .BTF string section. String
* itself encodes an accessed field using a sequence of field and array
* indicies, separated by colon (:). It's conceptually very close to LLVM's
@@ -156,15 +202,16 @@ struct bpf_line_info_min {
* bpf_probe_read(&dst, sizeof(dst),
* __builtin_preserve_access_index(&src->a.b.c));
*
- * In this case Clang will emit offset relocation recording necessary data to
+ * In this case Clang will emit field relocation recording necessary data to
* be able to find offset of embedded `a.b.c` field within `src` struct.
*
* [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
*/
-struct bpf_offset_reloc {
+struct bpf_field_reloc {
__u32 insn_off;
__u32 type_id;
__u32 access_str_off;
+ enum bpf_field_info_kind kind;
};
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 4b0b0364f5fc..a9eb8b322671 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -102,6 +102,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_TRACING:
default:
break;
}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index ce3ec81b71c0..5065c1aa1061 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -12,6 +12,7 @@
#include "bpf.h"
#include "libbpf.h"
+#include "libbpf_internal.h"
#include "nlattr.h"
#ifndef SOL_NETLINK
@@ -24,7 +25,7 @@ typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
struct xdp_id_md {
int ifindex;
__u32 flags;
- __u32 id;
+ struct xdp_link_info info;
};
int libbpf_netlink_open(__u32 *nl_pid)
@@ -43,7 +44,7 @@ int libbpf_netlink_open(__u32 *nl_pid)
if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
&one, sizeof(one)) < 0) {
- fprintf(stderr, "Netlink error reporting not supported\n");
+ pr_warn("Netlink error reporting not supported\n");
}
if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
@@ -202,26 +203,11 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
return dump_link_nlmsg(cookie, ifi, tb);
}
-static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags)
-{
- if (mode != XDP_ATTACHED_MULTI)
- return IFLA_XDP_PROG_ID;
- if (flags & XDP_FLAGS_DRV_MODE)
- return IFLA_XDP_DRV_PROG_ID;
- if (flags & XDP_FLAGS_HW_MODE)
- return IFLA_XDP_HW_PROG_ID;
- if (flags & XDP_FLAGS_SKB_MODE)
- return IFLA_XDP_SKB_PROG_ID;
-
- return IFLA_XDP_UNSPEC;
-}
-
-static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb)
+static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
{
struct nlattr *xdp_tb[IFLA_XDP_MAX + 1];
struct xdp_id_md *xdp_id = cookie;
struct ifinfomsg *ifinfo = msg;
- unsigned char mode, xdp_attr;
int ret;
if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index)
@@ -237,27 +223,40 @@ static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb)
if (!xdp_tb[IFLA_XDP_ATTACHED])
return 0;
- mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]);
- if (mode == XDP_ATTACHED_NONE)
- return 0;
+ xdp_id->info.attach_mode = libbpf_nla_getattr_u8(
+ xdp_tb[IFLA_XDP_ATTACHED]);
- xdp_attr = get_xdp_id_attr(mode, xdp_id->flags);
- if (!xdp_attr || !xdp_tb[xdp_attr])
+ if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE)
return 0;
- xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]);
+ if (xdp_tb[IFLA_XDP_PROG_ID])
+ xdp_id->info.prog_id = libbpf_nla_getattr_u32(
+ xdp_tb[IFLA_XDP_PROG_ID]);
+
+ if (xdp_tb[IFLA_XDP_SKB_PROG_ID])
+ xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32(
+ xdp_tb[IFLA_XDP_SKB_PROG_ID]);
+
+ if (xdp_tb[IFLA_XDP_DRV_PROG_ID])
+ xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32(
+ xdp_tb[IFLA_XDP_DRV_PROG_ID]);
+
+ if (xdp_tb[IFLA_XDP_HW_PROG_ID])
+ xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32(
+ xdp_tb[IFLA_XDP_HW_PROG_ID]);
return 0;
}
-int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
+int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
+ size_t info_size, __u32 flags)
{
struct xdp_id_md xdp_id = {};
int sock, ret;
__u32 nl_pid;
__u32 mask;
- if (flags & ~XDP_FLAGS_MASK)
+ if (flags & ~XDP_FLAGS_MASK || !info_size)
return -EINVAL;
/* Check whether the single {HW,DRV,SKB} mode is set */
@@ -273,14 +272,44 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
xdp_id.ifindex = ifindex;
xdp_id.flags = flags;
- ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id);
- if (!ret)
- *prog_id = xdp_id.id;
+ ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id);
+ if (!ret) {
+ size_t sz = min(info_size, sizeof(xdp_id.info));
+
+ memcpy(info, &xdp_id.info, sz);
+ memset((void *) info + sz, 0, info_size - sz);
+ }
close(sock);
return ret;
}
+static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
+{
+ if (info->attach_mode != XDP_ATTACHED_MULTI)
+ return info->prog_id;
+ if (flags & XDP_FLAGS_DRV_MODE)
+ return info->drv_prog_id;
+ if (flags & XDP_FLAGS_HW_MODE)
+ return info->hw_prog_id;
+ if (flags & XDP_FLAGS_SKB_MODE)
+ return info->skb_prog_id;
+
+ return 0;
+}
+
+int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
+{
+ struct xdp_link_info info;
+ int ret;
+
+ ret = bpf_get_link_xdp_info(ifindex, &info, sizeof(info), flags);
+ if (!ret)
+ *prog_id = get_xdp_id(&info, flags);
+
+ return ret;
+}
+
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 1e69c0c8d413..8db44bbfc66d 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include "nlattr.h"
+#include "libbpf_internal.h"
#include <linux/rtnetlink.h>
#include <string.h>
#include <stdio.h>
@@ -121,8 +122,8 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
}
if (tb[type])
- fprintf(stderr, "Attribute of type %#x found multiple times in message, "
- "previous attribute is being ignored.\n", type);
+ pr_warn("Attribute of type %#x found multiple times in message, "
+ "previous attribute is being ignored.\n", type);
tb[type] = nla;
}
@@ -181,15 +182,14 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
extack_policy) != 0) {
- fprintf(stderr,
- "Failed to parse extended error attributes\n");
+ pr_warn("Failed to parse extended error attributes\n");
return 0;
}
if (tb[NLMSGERR_ATTR_MSG])
errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]);
- fprintf(stderr, "Kernel error message: %s\n", errmsg);
+ pr_warn("Kernel error message: %s\n", errmsg);
return 0;
}
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.c
index fc134873bb6d..f0eb2727b766 100644
--- a/tools/lib/bpf/test_libbpf.cpp
+++ b/tools/lib/bpf/test_libbpf.c
@@ -7,12 +7,14 @@
int main(int argc, char *argv[])
{
- /* libbpf.h */
- libbpf_set_print(NULL);
+ /* libbpf.h */
+ libbpf_set_print(NULL);
- /* bpf.h */
- bpf_prog_get_fd_by_id(0);
+ /* bpf.h */
+ bpf_prog_get_fd_by_id(0);
- /* btf.h */
- btf__new(NULL, 0);
+ /* btf.h */
+ btf__new(NULL, 0);
+
+ return 0;
}
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index a902838f9fcc..8e0ffa800a71 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -73,6 +73,21 @@ struct xsk_nl_info {
int fd;
};
+/* Up until and including Linux 5.3 */
+struct xdp_ring_offset_v1 {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+/* Up until and including Linux 5.3 */
+struct xdp_mmap_offsets_v1 {
+ struct xdp_ring_offset_v1 rx;
+ struct xdp_ring_offset_v1 tx;
+ struct xdp_ring_offset_v1 fr;
+ struct xdp_ring_offset_v1 cr;
+};
+
int xsk_umem__fd(const struct xsk_umem *umem)
{
return umem ? umem->fd : -EINVAL;
@@ -133,6 +148,58 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
return 0;
}
+static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
+{
+ struct xdp_mmap_offsets_v1 off_v1;
+
+ /* getsockopt on a kernel <= 5.3 has no flags fields.
+ * Copy over the offsets to the correct places in the >=5.4 format
+ * and put the flags where they would have been on that kernel.
+ */
+ memcpy(&off_v1, off, sizeof(off_v1));
+
+ off->rx.producer = off_v1.rx.producer;
+ off->rx.consumer = off_v1.rx.consumer;
+ off->rx.desc = off_v1.rx.desc;
+ off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
+
+ off->tx.producer = off_v1.tx.producer;
+ off->tx.consumer = off_v1.tx.consumer;
+ off->tx.desc = off_v1.tx.desc;
+ off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
+
+ off->fr.producer = off_v1.fr.producer;
+ off->fr.consumer = off_v1.fr.consumer;
+ off->fr.desc = off_v1.fr.desc;
+ off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
+
+ off->cr.producer = off_v1.cr.producer;
+ off->cr.consumer = off_v1.cr.consumer;
+ off->cr.desc = off_v1.cr.desc;
+ off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
+}
+
+static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
+{
+ socklen_t optlen;
+ int err;
+
+ optlen = sizeof(*off);
+ err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
+ if (err)
+ return err;
+
+ if (optlen == sizeof(*off))
+ return 0;
+
+ if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
+ xsk_mmap_offsets_v1(off);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
__u64 size, struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
@@ -141,7 +208,6 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
struct xdp_mmap_offsets off;
struct xdp_umem_reg mr;
struct xsk_umem *umem;
- socklen_t optlen;
void *map;
int err;
@@ -163,6 +229,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
umem->umem_area = umem_area;
xsk_set_umem_config(&umem->config, usr_config);
+ memset(&mr, 0, sizeof(mr));
mr.addr = (uintptr_t)umem_area;
mr.len = size;
mr.chunk_size = umem->config.frame_size;
@@ -189,8 +256,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
goto out_socket;
}
- optlen = sizeof(off);
- err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(umem->fd, &off);
if (err) {
err = -errno;
goto out_socket;
@@ -273,33 +339,55 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* This is the C-program:
* SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
* {
- * int index = ctx->rx_queue_index;
+ * int ret, index = ctx->rx_queue_index;
*
* // A set entry here means that the correspnding queue_id
* // has an active AF_XDP socket bound to it.
+ * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
+ * if (ret > 0)
+ * return ret;
+ *
+ * // Fallback for pre-5.3 kernels, not supporting default
+ * // action in the flags parameter.
* if (bpf_map_lookup_elem(&xsks_map, &index))
* return bpf_redirect_map(&xsks_map, index, 0);
- *
* return XDP_PASS;
* }
*/
struct bpf_insn prog[] = {
- /* r1 = *(u32 *)(r1 + 16) */
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
- /* *(u32 *)(r10 - 4) = r1 */
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
+ /* r2 = *(u32 *)(r1 + 16) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
+ /* *(u32 *)(r10 - 4) = r2 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
+ /* r1 = xskmap[] */
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ /* r3 = XDP_PASS */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ /* call bpf_redirect_map */
+ BPF_EMIT_CALL(BPF_FUNC_redirect_map),
+ /* if w0 != 0 goto pc+13 */
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
+ /* r2 = r10 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ /* r2 += -4 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ /* r1 = xskmap[] */
BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ /* call bpf_map_lookup_elem */
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ /* r1 = r0 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- /* if r1 == 0 goto +5 */
+ /* r0 = XDP_PASS */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ /* if r1 == 0 goto pc+5 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
/* r2 = *(u32 *)(r10 - 4) */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
- BPF_MOV32_IMM(BPF_REG_3, 0),
+ /* r1 = xskmap[] */
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ /* r3 = 0 */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ /* call bpf_redirect_map */
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
/* The jumps are to this instruction */
BPF_EXIT_INSN(),
@@ -310,7 +398,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
"LGPL-2.1 or BSD-2-Clause", 0, log_buf,
log_buf_size);
if (prog_fd < 0) {
- pr_warning("BPF log buffer:\n%s", log_buf);
+ pr_warn("BPF log buffer:\n%s", log_buf);
return prog_fd;
}
@@ -343,13 +431,18 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
goto out;
}
- if (err || channels.max_combined == 0)
+ if (err) {
/* If the device says it has no channels, then all traffic
* is sent to a single stream, so max queues = 1.
*/
ret = 1;
- else
- ret = channels.max_combined;
+ } else {
+ /* Take the max of rx, tx, combined. Drivers return
+ * the number of channels in different ways.
+ */
+ ret = max(channels.max_rx, channels.max_tx);
+ ret = max(ret, (int)channels.max_combined);
+ }
out:
close(fd);
@@ -465,6 +558,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
}
} else {
xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (xsk->prog_fd < 0)
+ return -errno;
err = xsk_lookup_bpf_maps(xsk);
if (err) {
close(xsk->prog_fd);
@@ -472,7 +567,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
}
}
- err = xsk_set_bpf_maps(xsk);
+ if (xsk->rx)
+ err = xsk_set_bpf_maps(xsk);
if (err) {
xsk_delete_bpf_maps(xsk);
close(xsk->prog_fd);
@@ -491,21 +587,26 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off;
struct xsk_socket *xsk;
- socklen_t optlen;
int err;
- if (!umem || !xsk_ptr || !rx || !tx)
+ if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
- if (umem->refcount) {
- pr_warning("Error: shared umems not supported by libbpf.\n");
- return -EBUSY;
- }
-
xsk = calloc(1, sizeof(*xsk));
if (!xsk)
return -ENOMEM;
+ err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+ if (err)
+ goto out_xsk_alloc;
+
+ if (umem->refcount &&
+ !(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
+ pr_warn("Error: shared umems not supported by libbpf supplied XDP program.\n");
+ err = -EBUSY;
+ goto out_xsk_alloc;
+ }
+
if (umem->refcount++ > 0) {
xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
if (xsk->fd < 0) {
@@ -527,10 +628,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
xsk->ifname[IFNAMSIZ - 1] = '\0';
- err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
- if (err)
- goto out_socket;
-
if (rx) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
&xsk->config.rx_size,
@@ -550,8 +647,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
}
}
- optlen = sizeof(off);
- err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(xsk->fd, &off);
if (err) {
err = -errno;
goto out_socket;
@@ -599,7 +695,12 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
sxdp.sxdp_family = PF_XDP;
sxdp.sxdp_ifindex = xsk->ifindex;
sxdp.sxdp_queue_id = xsk->queue_id;
- sxdp.sxdp_flags = xsk->config.bind_flags;
+ if (umem->refcount > 1) {
+ sxdp.sxdp_flags = XDP_SHARED_UMEM;
+ sxdp.sxdp_shared_umem_fd = umem->fd;
+ } else {
+ sxdp.sxdp_flags = xsk->config.bind_flags;
+ }
err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
if (err) {
@@ -637,7 +738,6 @@ out_xsk_alloc:
int xsk_umem__delete(struct xsk_umem *umem)
{
struct xdp_mmap_offsets off;
- socklen_t optlen;
int err;
if (!umem)
@@ -646,8 +746,7 @@ int xsk_umem__delete(struct xsk_umem *umem)
if (umem->refcount)
return -EBUSY;
- optlen = sizeof(off);
- err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(umem->fd, &off);
if (!err) {
munmap(umem->fill->ring - off.fr.desc,
off.fr.desc + umem->config.fill_size * sizeof(__u64));
@@ -665,7 +764,6 @@ void xsk_socket__delete(struct xsk_socket *xsk)
{
size_t desc_sz = sizeof(struct xdp_desc);
struct xdp_mmap_offsets off;
- socklen_t optlen;
int err;
if (!xsk)
@@ -676,8 +774,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
close(xsk->prog_fd);
}
- optlen = sizeof(off);
- err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ err = xsk_get_mmap_offsets(xsk->fd, &off);
if (!err) {
if (xsk->rx) {
munmap(xsk->rx->ring - off.rx.desc,
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
index a81d91d4fc78..a6d7ee5f18ba 100644
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ b/tools/lib/lockdep/include/liblockdep/common.h
@@ -42,8 +42,7 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
-void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip);
+void lock_release(struct lockdep_map *lock, unsigned long ip);
void lockdep_reset_lock(struct lockdep_map *lock);
void lockdep_register_key(struct lock_class_key *key);
void lockdep_unregister_key(struct lock_class_key *key);
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index 783dd0df06f9..bd106b82759b 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -42,7 +42,7 @@ static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock
static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock)
{
- lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&lock->dep_map, (unsigned long)_RET_IP_);
return pthread_mutex_unlock(&lock->mutex);
}
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index 365762e3a1ea..6d5d2932bf4d 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -44,7 +44,7 @@ static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *
static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock)
{
- lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&lock->dep_map, (unsigned long)_RET_IP_);
return pthread_rwlock_unlock(&lock->rwlock);
}
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 76245d16196d..8f1adbe887b2 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -270,7 +270,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
*/
r = ll_pthread_mutex_lock(mutex);
if (r)
- lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
return r;
}
@@ -284,7 +284,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_mutex_trylock(mutex);
if (r)
- lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
return r;
}
@@ -295,7 +295,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
try_init_preload();
- lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
/*
* Just like taking a lock, only in reverse!
*
@@ -355,7 +355,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_rdlock(rwlock);
if (r)
- lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
return r;
}
@@ -369,7 +369,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_tryrdlock(rwlock);
if (r)
- lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
return r;
}
@@ -383,7 +383,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_trywrlock(rwlock);
if (r)
- lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
return r;
}
@@ -397,7 +397,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_wrlock(rwlock);
if (r)
- lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
return r;
}
@@ -408,7 +408,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
init_preload();
- lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
+ lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_unlock(rwlock);
if (r)
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index 5b2cd5e58df0..1c777a72bb39 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -19,8 +19,7 @@ MAKEFLAGS += --no-print-directory
LIBFILE = $(OUTPUT)libsubcmd.a
-CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
+CFLAGS := -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
ifeq ($(DEBUG),0)
ifeq ($(feature-fortify-source), 1)
@@ -28,7 +27,9 @@ ifeq ($(DEBUG),0)
endif
endif
-ifeq ($(CC_NO_CLANG), 0)
+ifeq ($(DEBUG),1)
+ CFLAGS += -O0
+else ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -O3
else
CFLAGS += -O6
@@ -43,6 +44,8 @@ CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
CFLAGS += -I$(srctree)/tools/include/
+CFLAGS += $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+
SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
all:
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 5315f3787f8d..cbb429f55062 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -232,10 +232,10 @@ install_pkgconfig:
install_headers:
$(call QUIET_INSTALL, headers) \
- $(call do_install,event-parse.h,$(DESTDIR)$(includedir_SQ),644); \
- $(call do_install,event-utils.h,$(DESTDIR)$(includedir_SQ),644); \
- $(call do_install,trace-seq.h,$(DESTDIR)$(includedir_SQ),644); \
- $(call do_install,kbuffer.h,$(DESTDIR)$(includedir_SQ),644)
+ $(call do_install,event-parse.h,$(includedir_SQ),644); \
+ $(call do_install,event-utils.h,$(includedir_SQ),644); \
+ $(call do_install,trace-seq.h,$(includedir_SQ),644); \
+ $(call do_install,kbuffer.h,$(includedir_SQ),644)
install: install_lib
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index d948475585ce..beaa8b8c08ff 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4395,8 +4395,10 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
/* fall through */
case 'd':
case 'u':
- case 'x':
case 'i':
+ case 'x':
+ case 'X':
+ case 'o':
switch (ls) {
case 0:
vsize = 4;
@@ -5078,10 +5080,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
/* fall through */
case 'd':
+ case 'u':
case 'i':
case 'x':
case 'X':
- case 'u':
+ case 'o':
if (!arg) {
do_warning_event(event, "no argument match");
event->flags |= TEP_EVENT_FL_FAILED;
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 552592d153fb..f3cbf86e51ac 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1473,8 +1473,10 @@ static int copy_filter_type(struct tep_event_filter *filter,
if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
/* Add trivial event */
arg = allocate_arg();
- if (arg == NULL)
+ if (arg == NULL) {
+ free(str);
return -1;
+ }
arg->type = TEP_FILTER_ARG_BOOLEAN;
if (strcmp(str, "TRUE") == 0)
@@ -1483,8 +1485,11 @@ static int copy_filter_type(struct tep_event_filter *filter,
arg->boolean.value = 0;
filter_type = add_filter_type(filter, event->id);
- if (filter_type == NULL)
+ if (filter_type == NULL) {
+ free(str);
+ free_arg(arg);
return -1;
+ }
filter_type->filter = arg;
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 488f11f6c588..e91a2eb19592 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -27,9 +27,10 @@ Explanation of the Linux-Kernel Memory Consistency Model
19. AND THEN THERE WAS ALPHA
20. THE HAPPENS-BEFORE RELATION: hb
21. THE PROPAGATES-BEFORE RELATION: pb
- 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
+ 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb
23. LOCKING
- 24. ODDS AND ENDS
+ 24. PLAIN ACCESSES AND DATA RACES
+ 25. ODDS AND ENDS
@@ -42,8 +43,7 @@ linux-kernel.bell and linux-kernel.cat files that make up the formal
version of the model; they are extremely terse and their meanings are
far from clear.
-This document describes the ideas underlying the LKMM, but excluding
-the modeling of bare C (or plain) shared memory accesses. It is meant
+This document describes the ideas underlying the LKMM. It is meant
for people who want to understand how the model was designed. It does
not go into the details of the code in the .bell and .cat files;
rather, it explains in English what the code expresses symbolically.
@@ -206,7 +206,7 @@ goes like this:
P0 stores 1 to buf before storing 1 to flag, since it executes
its instructions in order.
- Since an instruction (in this case, P1's store to flag) cannot
+ Since an instruction (in this case, P0's store to flag) cannot
execute before itself, the specified outcome is impossible.
However, real computer hardware almost never follows the Sequential
@@ -419,7 +419,7 @@ example:
The object code might call f(5) either before or after g(6); the
memory model cannot assume there is a fixed program order relation
-between them. (In fact, if the functions are inlined then the
+between them. (In fact, if the function calls are inlined then the
compiler might even interleave their object code.)
@@ -499,7 +499,7 @@ different CPUs (external reads-from, or rfe).
For our purposes, a memory location's initial value is treated as
though it had been written there by an imaginary initial store that
-executes on a separate CPU before the program runs.
+executes on a separate CPU before the main program runs.
Usage of the rf relation implicitly assumes that loads will always
read from a single store. It doesn't apply properly in the presence
@@ -857,7 +857,7 @@ outlined above. These restrictions involve the necessity of
maintaining cache coherence and the fact that a CPU can't operate on a
value before it knows what that value is, among other things.
-The formal version of the LKMM is defined by five requirements, or
+The formal version of the LKMM is defined by six requirements, or
axioms:
Sequential consistency per variable: This requires that the
@@ -877,10 +877,14 @@ axioms:
grace periods obey the rules of RCU, in particular, the
Grace-Period Guarantee.
+ Plain-coherence: This requires that plain memory accesses
+ (those not using READ_ONCE(), WRITE_ONCE(), etc.) must obey
+ the operational model's rules regarding cache coherence.
+
The first and second are quite common; they can be found in many
memory models (such as those for C11/C++11). The "happens-before" and
"propagation" axioms have analogs in other memory models as well. The
-"rcu" axiom is specific to the LKMM.
+"rcu" and "plain-coherence" axioms are specific to the LKMM.
Each of these axioms is discussed below.
@@ -955,7 +959,7 @@ atomic update. This is what the LKMM's "atomic" axiom says.
THE PRESERVED PROGRAM ORDER RELATION: ppo
-----------------------------------------
-There are many situations where a CPU is obligated to execute two
+There are many situations where a CPU is obliged to execute two
instructions in program order. We amalgamate them into the ppo (for
"preserved program order") relation, which links the po-earlier
instruction to the po-later instruction and is thus a sub-relation of
@@ -1425,8 +1429,8 @@ they execute means that it cannot have cycles. This requirement is
the content of the LKMM's "propagation" axiom.
-RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
--------------------------------------------------------------
+RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb
+------------------------------------------------------------------------
RCU (Read-Copy-Update) is a powerful synchronization mechanism. It
rests on two concepts: grace periods and read-side critical sections.
@@ -1536,29 +1540,29 @@ Z's CPU before Z begins but doesn't propagate to some other CPU until
after X ends.) Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is
the end of a critical section which starts before Z begins.
-The LKMM goes on to define the rcu-fence relation as a sequence of
+The LKMM goes on to define the rcu-order relation as a sequence of
rcu-gp and rcu-rscsi links separated by rcu-link links, in which the
number of rcu-gp links is >= the number of rcu-rscsi links. For
example:
X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
-would imply that X ->rcu-fence V, because this sequence contains two
+would imply that X ->rcu-order V, because this sequence contains two
rcu-gp links and one rcu-rscsi link. (It also implies that
-X ->rcu-fence T and Z ->rcu-fence V.) On the other hand:
+X ->rcu-order T and Z ->rcu-order V.) On the other hand:
X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
-does not imply X ->rcu-fence V, because the sequence contains only
+does not imply X ->rcu-order V, because the sequence contains only
one rcu-gp link but two rcu-rscsi links.
-The rcu-fence relation is important because the Grace Period Guarantee
-means that rcu-fence acts kind of like a strong fence. In particular,
-E ->rcu-fence F implies not only that E begins before F ends, but also
-that any write po-before E will propagate to every CPU before any
-instruction po-after F can execute. (However, it does not imply that
-E must execute before F; in fact, each synchronize_rcu() fence event
-is linked to itself by rcu-fence as a degenerate case.)
+The rcu-order relation is important because the Grace Period Guarantee
+means that rcu-order links act kind of like strong fences. In
+particular, E ->rcu-order F implies not only that E begins before F
+ends, but also that any write po-before E will propagate to every CPU
+before any instruction po-after F can execute. (However, it does not
+imply that E must execute before F; in fact, each synchronize_rcu()
+fence event is linked to itself by rcu-order as a degenerate case.)
To prove this in full generality requires some intellectual effort.
We'll consider just a very simple case:
@@ -1572,7 +1576,7 @@ and there are events X, Y and a read-side critical section C such that:
2. X comes "before" Y in some sense (including rfe, co and fr);
- 2. Y is po-before Z;
+ 3. Y is po-before Z;
4. Z is the rcu_read_unlock() event marking the end of C;
@@ -1585,7 +1589,26 @@ G's CPU before G starts must propagate to every CPU before C starts.
In particular, the write propagates to every CPU before F finishes
executing and hence before any instruction po-after F can execute.
This sort of reasoning can be extended to handle all the situations
-covered by rcu-fence.
+covered by rcu-order.
+
+The rcu-fence relation is a simple extension of rcu-order. While
+rcu-order only links certain fence events (calls to synchronize_rcu(),
+rcu_read_lock(), or rcu_read_unlock()), rcu-fence links any events
+that are separated by an rcu-order link. This is analogous to the way
+the strong-fence relation links events that are separated by an
+smp_mb() fence event (as mentioned above, rcu-order links act kind of
+like strong fences). Written symbolically, X ->rcu-fence Y means
+there are fence events E and F such that:
+
+ X ->po E ->rcu-order F ->po Y.
+
+From the discussion above, we see this implies not only that X
+executes before Y, but also (if X is a store) that X propagates to
+every CPU before Y executes. Thus rcu-fence is sort of a
+"super-strong" fence: Unlike the original strong fences (smp_mb() and
+synchronize_rcu()), rcu-fence is able to link events on different
+CPUs. (Perhaps this fact should lead us to say that rcu-fence isn't
+really a fence at all!)
Finally, the LKMM defines the RCU-before (rb) relation in terms of
rcu-fence. This is done in essentially the same way as the pb
@@ -1596,7 +1619,7 @@ before F, just as E ->pb F does (and for much the same reasons).
Putting this all together, the LKMM expresses the Grace Period
Guarantee by requiring that the rb relation does not contain a cycle.
Equivalently, this "rcu" axiom requires that there are no events E
-and F with E ->rcu-link F ->rcu-fence E. Or to put it a third way,
+and F with E ->rcu-link F ->rcu-order E. Or to put it a third way,
the axiom requires that there are no cycles consisting of rcu-gp and
rcu-rscsi alternating with rcu-link, where the number of rcu-gp links
is >= the number of rcu-rscsi links.
@@ -1750,7 +1773,7 @@ addition to normal RCU. The ideas involved are much the same as
above, with new relations srcu-gp and srcu-rscsi added to represent
SRCU grace periods and read-side critical sections. There is a
restriction on the srcu-gp and srcu-rscsi links that can appear in an
-rcu-fence sequence (the srcu-rscsi links must be paired with srcu-gp
+rcu-order sequence (the srcu-rscsi links must be paired with srcu-gp
links having the same SRCU domain with proper nesting); the details
are relatively unimportant.
@@ -1896,6 +1919,521 @@ architectures supported by the Linux kernel, albeit for various
differing reasons.
+PLAIN ACCESSES AND DATA RACES
+-----------------------------
+
+In the LKMM, memory accesses such as READ_ONCE(x), atomic_inc(&y),
+smp_load_acquire(&z), and so on are collectively referred to as
+"marked" accesses, because they are all annotated with special
+operations of one kind or another. Ordinary C-language memory
+accesses such as x or y = 0 are simply called "plain" accesses.
+
+Early versions of the LKMM had nothing to say about plain accesses.
+The C standard allows compilers to assume that the variables affected
+by plain accesses are not concurrently read or written by any other
+threads or CPUs. This leaves compilers free to implement all manner
+of transformations or optimizations of code containing plain accesses,
+making such code very difficult for a memory model to handle.
+
+Here is just one example of a possible pitfall:
+
+ int a = 6;
+ int *x = &a;
+
+ P0()
+ {
+ int *r1;
+ int r2 = 0;
+
+ r1 = x;
+ if (r1 != NULL)
+ r2 = READ_ONCE(*r1);
+ }
+
+ P1()
+ {
+ WRITE_ONCE(x, NULL);
+ }
+
+On the face of it, one would expect that when this code runs, the only
+possible final values for r2 are 6 and 0, depending on whether or not
+P1's store to x propagates to P0 before P0's load from x executes.
+But since P0's load from x is a plain access, the compiler may decide
+to carry out the load twice (for the comparison against NULL, then again
+for the READ_ONCE()) and eliminate the temporary variable r1. The
+object code generated for P0 could therefore end up looking rather
+like this:
+
+ P0()
+ {
+ int r2 = 0;
+
+ if (x != NULL)
+ r2 = READ_ONCE(*x);
+ }
+
+And now it is obvious that this code runs the risk of dereferencing a
+NULL pointer, because P1's store to x might propagate to P0 after the
+test against NULL has been made but before the READ_ONCE() executes.
+If the original code had said "r1 = READ_ONCE(x)" instead of "r1 = x",
+the compiler would not have performed this optimization and there
+would be no possibility of a NULL-pointer dereference.
+
+Given the possibility of transformations like this one, the LKMM
+doesn't try to predict all possible outcomes of code containing plain
+accesses. It is instead content to determine whether the code
+violates the compiler's assumptions, which would render the ultimate
+outcome undefined.
+
+In technical terms, the compiler is allowed to assume that when the
+program executes, there will not be any data races. A "data race"
+occurs when two conflicting memory accesses execute concurrently;
+two memory accesses "conflict" if:
+
+ they access the same location,
+
+ they occur on different CPUs (or in different threads on the
+ same CPU),
+
+ at least one of them is a plain access,
+
+ and at least one of them is a store.
+
+The LKMM tries to determine whether a program contains two conflicting
+accesses which may execute concurrently; if it does then the LKMM says
+there is a potential data race and makes no predictions about the
+program's outcome.
+
+Determining whether two accesses conflict is easy; you can see that
+all the concepts involved in the definition above are already part of
+the memory model. The hard part is telling whether they may execute
+concurrently. The LKMM takes a conservative attitude, assuming that
+accesses may be concurrent unless it can prove they cannot.
+
+If two memory accesses aren't concurrent then one must execute before
+the other. Therefore the LKMM decides two accesses aren't concurrent
+if they can be connected by a sequence of hb, pb, and rb links
+(together referred to as xb, for "executes before"). However, there
+are two complicating factors.
+
+If X is a load and X executes before a store Y, then indeed there is
+no danger of X and Y being concurrent. After all, Y can't have any
+effect on the value obtained by X until the memory subsystem has
+propagated Y from its own CPU to X's CPU, which won't happen until
+some time after Y executes and thus after X executes. But if X is a
+store, then even if X executes before Y it is still possible that X
+will propagate to Y's CPU just as Y is executing. In such a case X
+could very well interfere somehow with Y, and we would have to
+consider X and Y to be concurrent.
+
+Therefore when X is a store, for X and Y to be non-concurrent the LKMM
+requires not only that X must execute before Y but also that X must
+propagate to Y's CPU before Y executes. (Or vice versa, of course, if
+Y executes before X -- then Y must propagate to X's CPU before X
+executes if Y is a store.) This is expressed by the visibility
+relation (vis), where X ->vis Y is defined to hold if there is an
+intermediate event Z such that:
+
+ X is connected to Z by a possibly empty sequence of
+ cumul-fence links followed by an optional rfe link (if none of
+ these links are present, X and Z are the same event),
+
+and either:
+
+ Z is connected to Y by a strong-fence link followed by a
+ possibly empty sequence of xb links,
+
+or:
+
+ Z is on the same CPU as Y and is connected to Y by a possibly
+ empty sequence of xb links (again, if the sequence is empty it
+ means Z and Y are the same event).
+
+The motivations behind this definition are straightforward:
+
+ cumul-fence memory barriers force stores that are po-before
+ the barrier to propagate to other CPUs before stores that are
+ po-after the barrier.
+
+ An rfe link from an event W to an event R says that R reads
+ from W, which certainly means that W must have propagated to
+ R's CPU before R executed.
+
+ strong-fence memory barriers force stores that are po-before
+ the barrier, or that propagate to the barrier's CPU before the
+ barrier executes, to propagate to all CPUs before any events
+ po-after the barrier can execute.
+
+To see how this works out in practice, consider our old friend, the MP
+pattern (with fences and statement labels, but without the conditional
+test):
+
+ int buf = 0, flag = 0;
+
+ P0()
+ {
+ X: WRITE_ONCE(buf, 1);
+ smp_wmb();
+ W: WRITE_ONCE(flag, 1);
+ }
+
+ P1()
+ {
+ int r1;
+ int r2 = 0;
+
+ Z: r1 = READ_ONCE(flag);
+ smp_rmb();
+ Y: r2 = READ_ONCE(buf);
+ }
+
+The smp_wmb() memory barrier gives a cumul-fence link from X to W, and
+assuming r1 = 1 at the end, there is an rfe link from W to Z. This
+means that the store to buf must propagate from P0 to P1 before Z
+executes. Next, Z and Y are on the same CPU and the smp_rmb() fence
+provides an xb link from Z to Y (i.e., it forces Z to execute before
+Y). Therefore we have X ->vis Y: X must propagate to Y's CPU before Y
+executes.
+
+The second complicating factor mentioned above arises from the fact
+that when we are considering data races, some of the memory accesses
+are plain. Now, although we have not said so explicitly, up to this
+point most of the relations defined by the LKMM (ppo, hb, prop,
+cumul-fence, pb, and so on -- including vis) apply only to marked
+accesses.
+
+There are good reasons for this restriction. The compiler is not
+allowed to apply fancy transformations to marked accesses, and
+consequently each such access in the source code corresponds more or
+less directly to a single machine instruction in the object code. But
+plain accesses are a different story; the compiler may combine them,
+split them up, duplicate them, eliminate them, invent new ones, and
+who knows what else. Seeing a plain access in the source code tells
+you almost nothing about what machine instructions will end up in the
+object code.
+
+Fortunately, the compiler isn't completely free; it is subject to some
+limitations. For one, it is not allowed to introduce a data race into
+the object code if the source code does not already contain a data
+race (if it could, memory models would be useless and no multithreaded
+code would be safe!). For another, it cannot move a plain access past
+a compiler barrier.
+
+A compiler barrier is a kind of fence, but as the name implies, it
+only affects the compiler; it does not necessarily have any effect on
+how instructions are executed by the CPU. In Linux kernel source
+code, the barrier() function is a compiler barrier. It doesn't give
+rise directly to any machine instructions in the object code; rather,
+it affects how the compiler generates the rest of the object code.
+Given source code like this:
+
+ ... some memory accesses ...
+ barrier();
+ ... some other memory accesses ...
+
+the barrier() function ensures that the machine instructions
+corresponding to the first group of accesses will all end po-before
+any machine instructions corresponding to the second group of accesses
+-- even if some of the accesses are plain. (Of course, the CPU may
+then execute some of those accesses out of program order, but we
+already know how to deal with such issues.) Without the barrier()
+there would be no such guarantee; the two groups of accesses could be
+intermingled or even reversed in the object code.
+
+The LKMM doesn't say much about the barrier() function, but it does
+require that all fences are also compiler barriers. In addition, it
+requires that the ordering properties of memory barriers such as
+smp_rmb() or smp_store_release() apply to plain accesses as well as to
+marked accesses.
+
+This is the key to analyzing data races. Consider the MP pattern
+again, now using plain accesses for buf:
+
+ int buf = 0, flag = 0;
+
+ P0()
+ {
+ U: buf = 1;
+ smp_wmb();
+ X: WRITE_ONCE(flag, 1);
+ }
+
+ P1()
+ {
+ int r1;
+ int r2 = 0;
+
+ Y: r1 = READ_ONCE(flag);
+ if (r1) {
+ smp_rmb();
+ V: r2 = buf;
+ }
+ }
+
+This program does not contain a data race. Although the U and V
+accesses conflict, the LKMM can prove they are not concurrent as
+follows:
+
+ The smp_wmb() fence in P0 is both a compiler barrier and a
+ cumul-fence. It guarantees that no matter what hash of
+ machine instructions the compiler generates for the plain
+ access U, all those instructions will be po-before the fence.
+ Consequently U's store to buf, no matter how it is carried out
+ at the machine level, must propagate to P1 before X's store to
+ flag does.
+
+ X and Y are both marked accesses. Hence an rfe link from X to
+ Y is a valid indicator that X propagated to P1 before Y
+ executed, i.e., X ->vis Y. (And if there is no rfe link then
+ r1 will be 0, so V will not be executed and ipso facto won't
+ race with U.)
+
+ The smp_rmb() fence in P1 is a compiler barrier as well as a
+ fence. It guarantees that all the machine-level instructions
+ corresponding to the access V will be po-after the fence, and
+ therefore any loads among those instructions will execute
+ after the fence does and hence after Y does.
+
+Thus U's store to buf is forced to propagate to P1 before V's load
+executes (assuming V does execute), ruling out the possibility of a
+data race between them.
+
+This analysis illustrates how the LKMM deals with plain accesses in
+general. Suppose R is a plain load and we want to show that R
+executes before some marked access E. We can do this by finding a
+marked access X such that R and X are ordered by a suitable fence and
+X ->xb* E. If E was also a plain access, we would also look for a
+marked access Y such that X ->xb* Y, and Y and E are ordered by a
+fence. We describe this arrangement by saying that R is
+"post-bounded" by X and E is "pre-bounded" by Y.
+
+In fact, we go one step further: Since R is a read, we say that R is
+"r-post-bounded" by X. Similarly, E would be "r-pre-bounded" or
+"w-pre-bounded" by Y, depending on whether E was a store or a load.
+This distinction is needed because some fences affect only loads
+(i.e., smp_rmb()) and some affect only stores (smp_wmb()); otherwise
+the two types of bounds are the same. And as a degenerate case, we
+say that a marked access pre-bounds and post-bounds itself (e.g., if R
+above were a marked load then X could simply be taken to be R itself.)
+
+The need to distinguish between r- and w-bounding raises yet another
+issue. When the source code contains a plain store, the compiler is
+allowed to put plain loads of the same location into the object code.
+For example, given the source code:
+
+ x = 1;
+
+the compiler is theoretically allowed to generate object code that
+looks like:
+
+ if (x != 1)
+ x = 1;
+
+thereby adding a load (and possibly replacing the store entirely).
+For this reason, whenever the LKMM requires a plain store to be
+w-pre-bounded or w-post-bounded by a marked access, it also requires
+the store to be r-pre-bounded or r-post-bounded, so as to handle cases
+where the compiler adds a load.
+
+(This may be overly cautious. We don't know of any examples where a
+compiler has augmented a store with a load in this fashion, and the
+Linux kernel developers would probably fight pretty hard to change a
+compiler if it ever did this. Still, better safe than sorry.)
+
+Incidentally, the other tranformation -- augmenting a plain load by
+adding in a store to the same location -- is not allowed. This is
+because the compiler cannot know whether any other CPUs might perform
+a concurrent load from that location. Two concurrent loads don't
+constitute a race (they can't interfere with each other), but a store
+does race with a concurrent load. Thus adding a store might create a
+data race where one was not already present in the source code,
+something the compiler is forbidden to do. Augmenting a store with a
+load, on the other hand, is acceptable because doing so won't create a
+data race unless one already existed.
+
+The LKMM includes a second way to pre-bound plain accesses, in
+addition to fences: an address dependency from a marked load. That
+is, in the sequence:
+
+ p = READ_ONCE(ptr);
+ r = *p;
+
+the LKMM says that the marked load of ptr pre-bounds the plain load of
+*p; the marked load must execute before any of the machine
+instructions corresponding to the plain load. This is a reasonable
+stipulation, since after all, the CPU can't perform the load of *p
+until it knows what value p will hold. Furthermore, without some
+assumption like this one, some usages typical of RCU would count as
+data races. For example:
+
+ int a = 1, b;
+ int *ptr = &a;
+
+ P0()
+ {
+ b = 2;
+ rcu_assign_pointer(ptr, &b);
+ }
+
+ P1()
+ {
+ int *p;
+ int r;
+
+ rcu_read_lock();
+ p = rcu_dereference(ptr);
+ r = *p;
+ rcu_read_unlock();
+ }
+
+(In this example the rcu_read_lock() and rcu_read_unlock() calls don't
+really do anything, because there aren't any grace periods. They are
+included merely for the sake of good form; typically P0 would call
+synchronize_rcu() somewhere after the rcu_assign_pointer().)
+
+rcu_assign_pointer() performs a store-release, so the plain store to b
+is definitely w-post-bounded before the store to ptr, and the two
+stores will propagate to P1 in that order. However, rcu_dereference()
+is only equivalent to READ_ONCE(). While it is a marked access, it is
+not a fence or compiler barrier. Hence the only guarantee we have
+that the load of ptr in P1 is r-pre-bounded before the load of *p
+(thus avoiding a race) is the assumption about address dependencies.
+
+This is a situation where the compiler can undermine the memory model,
+and a certain amount of care is required when programming constructs
+like this one. In particular, comparisons between the pointer and
+other known addresses can cause trouble. If you have something like:
+
+ p = rcu_dereference(ptr);
+ if (p == &x)
+ r = *p;
+
+then the compiler just might generate object code resembling:
+
+ p = rcu_dereference(ptr);
+ if (p == &x)
+ r = x;
+
+or even:
+
+ rtemp = x;
+ p = rcu_dereference(ptr);
+ if (p == &x)
+ r = rtemp;
+
+which would invalidate the memory model's assumption, since the CPU
+could now perform the load of x before the load of ptr (there might be
+a control dependency but no address dependency at the machine level).
+
+Finally, it turns out there is a situation in which a plain write does
+not need to be w-post-bounded: when it is separated from the
+conflicting access by a fence. At first glance this may seem
+impossible. After all, to be conflicting the second access has to be
+on a different CPU from the first, and fences don't link events on
+different CPUs. Well, normal fences don't -- but rcu-fence can!
+Here's an example:
+
+ int x, y;
+
+ P0()
+ {
+ WRITE_ONCE(x, 1);
+ synchronize_rcu();
+ y = 3;
+ }
+
+ P1()
+ {
+ rcu_read_lock();
+ if (READ_ONCE(x) == 0)
+ y = 2;
+ rcu_read_unlock();
+ }
+
+Do the plain stores to y race? Clearly not if P1 reads a non-zero
+value for x, so let's assume the READ_ONCE(x) does obtain 0. This
+means that the read-side critical section in P1 must finish executing
+before the grace period in P0 does, because RCU's Grace-Period
+Guarantee says that otherwise P0's store to x would have propagated to
+P1 before the critical section started and so would have been visible
+to the READ_ONCE(). (Another way of putting it is that the fre link
+from the READ_ONCE() to the WRITE_ONCE() gives rise to an rcu-link
+between those two events.)
+
+This means there is an rcu-fence link from P1's "y = 2" store to P0's
+"y = 3" store, and consequently the first must propagate from P1 to P0
+before the second can execute. Therefore the two stores cannot be
+concurrent and there is no race, even though P1's plain store to y
+isn't w-post-bounded by any marked accesses.
+
+Putting all this material together yields the following picture. For
+two conflicting stores W and W', where W ->co W', the LKMM says the
+stores don't race if W can be linked to W' by a
+
+ w-post-bounded ; vis ; w-pre-bounded
+
+sequence. If W is plain then they also have to be linked by an
+
+ r-post-bounded ; xb* ; w-pre-bounded
+
+sequence, and if W' is plain then they also have to be linked by a
+
+ w-post-bounded ; vis ; r-pre-bounded
+
+sequence. For a conflicting load R and store W, the LKMM says the two
+accesses don't race if R can be linked to W by an
+
+ r-post-bounded ; xb* ; w-pre-bounded
+
+sequence or if W can be linked to R by a
+
+ w-post-bounded ; vis ; r-pre-bounded
+
+sequence. For the cases involving a vis link, the LKMM also accepts
+sequences in which W is linked to W' or R by a
+
+ strong-fence ; xb* ; {w and/or r}-pre-bounded
+
+sequence with no post-bounding, and in every case the LKMM also allows
+the link simply to be a fence with no bounding at all. If no sequence
+of the appropriate sort exists, the LKMM says that the accesses race.
+
+There is one more part of the LKMM related to plain accesses (although
+not to data races) we should discuss. Recall that many relations such
+as hb are limited to marked accesses only. As a result, the
+happens-before, propagates-before, and rcu axioms (which state that
+various relation must not contain a cycle) doesn't apply to plain
+accesses. Nevertheless, we do want to rule out such cycles, because
+they don't make sense even for plain accesses.
+
+To this end, the LKMM imposes three extra restrictions, together
+called the "plain-coherence" axiom because of their resemblance to the
+rules used by the operational model to ensure cache coherence (that
+is, the rules governing the memory subsystem's choice of a store to
+satisfy a load request and its determination of where a store will
+fall in the coherence order):
+
+ If R and W conflict and it is possible to link R to W by one
+ of the xb* sequences listed above, then W ->rfe R is not
+ allowed (i.e., a load cannot read from a store that it
+ executes before, even if one or both is plain).
+
+ If W and R conflict and it is possible to link W to R by one
+ of the vis sequences listed above, then R ->fre W is not
+ allowed (i.e., if a store is visible to a load then the load
+ must read from that store or one coherence-after it).
+
+ If W and W' conflict and it is possible to link W to W' by one
+ of the vis sequences listed above, then W' ->co W is not
+ allowed (i.e., if one store is visible to a second then the
+ second must come after the first in the coherence order).
+
+This is the extent to which the LKMM deals with plain accesses.
+Perhaps it could say more (for example, plain accesses might
+contribute to the ppo relation), but at the moment it seems that this
+minimal, conservative approach is good enough.
+
+
ODDS AND ENDS
-------------
@@ -1943,6 +2481,16 @@ treated as READ_ONCE() and rcu_assign_pointer() is treated as
smp_store_release() -- which is basically how the Linux kernel treats
them.
+Although we said that plain accesses are not linked by the ppo
+relation, they do contribute to it indirectly. Namely, when there is
+an address dependency from a marked load R to a plain store W,
+followed by smp_wmb() and then a marked store W', the LKMM creates a
+ppo link from R to W'. The reasoning behind this is perhaps a little
+shaky, but essentially it says there is no way to generate object code
+for this source code in which W' could execute before R. Just as with
+pre-bounding by address dependencies, it is possible for the compiler
+to undermine this relation if sufficient care is not taken.
+
There are a few oddball fences which need special treatment:
smp_mb__before_atomic(), smp_mb__after_atomic(), and
smp_mb__after_spinlock(). The LKMM uses fence events with special
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index ea2ff4b94074..2a9b4fe4a84e 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -197,7 +197,7 @@ empty (wr-incoh | rw-incoh | ww-incoh) as plain-coherence
(* Actual races *)
let ww-nonrace = ww-vis & ((Marked * W) | rw-xbstar) & ((W * Marked) | wr-vis)
let ww-race = (pre-race & co) \ ww-nonrace
-let wr-race = (pre-race & (co? ; rf)) \ wr-vis
+let wr-race = (pre-race & (co? ; rf)) \ wr-vis \ rw-xbstar^-1
let rw-race = (pre-race & fr) \ rw-xbstar
flag ~empty (ww-race | wr-race | rw-race) as data-race
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 044c9a3cb247..4768d91c6d68 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -144,6 +144,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"usercopy_abort",
"machine_real_restart",
"rewind_stack_do_exit",
+ "kunit_try_catch_throw",
};
if (!func)
@@ -481,6 +482,7 @@ static const char *uaccess_safe_builtin[] = {
"ubsan_type_mismatch_common",
"__ubsan_handle_type_mismatch",
"__ubsan_handle_type_mismatch_v1",
+ "__ubsan_handle_shift_out_of_bounds",
/* misc */
"csum_partial_copy_generic",
"__memcpy_mcsafe",
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
index 0a832e265a50..9bd04bbed01e 100755
--- a/tools/objtool/sync-check.sh
+++ b/tools/objtool/sync-check.sh
@@ -4,6 +4,7 @@
FILES='
arch/x86/include/asm/inat_types.h
arch/x86/include/asm/orc_types.h
+arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
'
@@ -46,6 +47,6 @@ done
check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"'
+check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
cd -
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index e0d9e7dd4f17..2cf2d9e9d0da 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -434,6 +434,56 @@ pwr_evt Enable power events. The power events provide information about
"0" otherwise.
+AUX area sampling option
+------------------------
+
+To select Intel PT "sampling" the AUX area sampling option can be used:
+
+ --aux-sample
+
+Optionally it can be followed by the sample size in bytes e.g.
+
+ --aux-sample=8192
+
+In addition, the Intel PT event to sample must be defined e.g.
+
+ -e intel_pt//u
+
+Samples on other events will be created containing Intel PT data e.g. the
+following will create Intel PT samples on the branch-misses event, note the
+events must be grouped using {}:
+
+ perf record --aux-sample -e '{intel_pt//u,branch-misses:u}'
+
+An alternative to '--aux-sample' is to add the config term 'aux-sample-size' to
+events. In this case, the grouping is implied e.g.
+
+ perf record -e intel_pt//u -e branch-misses/aux-sample-size=8192/u
+
+is the same as:
+
+ perf record -e '{intel_pt//u,branch-misses/aux-sample-size=8192/u}'
+
+but allows for also using an address filter e.g.:
+
+ perf record -e intel_pt//u --filter 'filter * @/bin/ls' -e branch-misses/aux-sample-size=8192/u -- ls
+
+It is important to select a sample size that is big enough to contain at least
+one PSB packet. If not a warning will be displayed:
+
+ Intel PT sample size (%zu) may be too small for PSB period (%zu)
+
+The calculation used for that is: if sample_size <= psb_period + 256 display the
+warning. When sampling is used, psb_period defaults to 0 (2KiB).
+
+The default sample size is 4KiB.
+
+The sample size is passed in aux_sample_size in struct perf_event_attr. The
+sample size is limited by the maximum event size which is 64KiB. It is
+difficult to know how big the event might be without the trace sample attached,
+but the tool validates that the sample size is not greater than 60KiB.
+
+
new snapshot option
-------------------
@@ -487,8 +537,8 @@ their mlock limit (which defaults to 64KiB but is not multiplied by the number
of cpus).
In full-trace mode, powers of two are allowed for buffer size, with a minimum
-size of 2 pages. In snapshot mode, it is the same but the minimum size is
-1 page.
+size of 2 pages. In snapshot mode or sampling mode, it is the same but the
+minimum size is 1 page.
The mmap size and auxtrace mmap size are displayed if the -vv option is used e.g.
@@ -501,12 +551,17 @@ Intel PT modes of operation
Intel PT can be used in 2 modes:
full-trace mode
+ sample mode
snapshot mode
Full-trace mode traces continuously e.g.
perf record -e intel_pt//u uname
+Sample mode attaches a Intel PT sample to other events e.g.
+
+ perf record --aux-sample -e intel_pt//u -e branch-misses:u
+
Snapshot mode captures the available data when a signal is sent e.g.
perf record -v -e intel_pt//u -S ./loopy 1000000000 &
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index c599623a1f3d..c4dd23c4b478 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -561,6 +561,11 @@ trace.*::
trace.show_zeros::
Do not suppress syscall arguments that are equal to zero.
+ trace.tracepoint_beautifiers::
+ Use "libtraceevent" to use that library to augment the tracepoint arguments,
+ "libbeauty", the default, to use the same argument beautifiers used in the
+ strace-like sys_enter+sys_exit lines.
+
llvm.*::
llvm.clang-path::
Path to clang. If omit, search it from $PATH.
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index d5cc15e651cf..f50ca0fef0a4 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -95,6 +95,11 @@ OPTIONS
diff.compute config option. See COMPARISON METHODS section for
more info.
+--cycles-hist::
+ Report a histogram and the standard deviation for cycles data.
+ It can help us to judge if the reported cycles data is noisy or
+ not. This option should be used with '-c cycles'.
+
-p::
--period::
Show period values for both compared hist entries.
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 18ed1b0fceb3..6345db33c533 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -36,6 +36,9 @@ Enable debugging output.
Print how named events are resolved internally into perf events, and also
any extra expressions computed by perf stat.
+--deprecated::
+Print deprecated events. By default the deprecated events are hidden.
+
[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index c6f9f31b6039..b23a4012a606 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -62,6 +62,9 @@ OPTIONS
like this: name=\'CPU_CLK_UNHALTED.THREAD:cmask=0x1\'.
- 'aux-output': Generate AUX records instead of events. This requires
that an AUX area event is also provided.
+ - 'aux-sample-size': Set sample size for AUX area sampling. If the
+ '--aux-sample' option has been used, set aux-sample-size=0 to disable
+ AUX area sampling for the event.
See the linkperf:perf-list[1] man page for more parameters.
@@ -433,6 +436,12 @@ can be specified in a string that follows this option:
In Snapshot Mode trace data is captured only when signal SIGUSR2 is received
and on exit if the above 'e' option is given.
+--aux-sample[=OPTIONS]::
+Select AUX area sampling. At least one of the events selected by the -e option
+must be an AUX area event. Samples on other events will be created containing
+data from the AUX area. Optionally sample size may be specified, otherwise it
+defaults to 4KiB.
+
--proc-map-timeout::
When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
because the file may be huge. A time out is needed in such cases.
@@ -571,6 +580,13 @@ config terms. For example: 'cycles/overwrite/' and 'instructions/no-overwrite/'.
Implies --tail-synthesize.
+--kcore::
+Make a copy of /proc/kcore and place it into a directory with the perf data file.
+
+--max-size=<size>::
+Limit the sample data max size, <size> is expected to be a number with
+appended unit character - B/K/M/G
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 7315f155803f..8dbe2119686a 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -525,6 +525,17 @@ include::itrace.txt[]
Configure time quantum for time sort key. Default 100ms.
Accepts s, us, ms, ns units.
+--total-cycles::
+ When --total-cycles is specified, it supports sorting for all blocks by
+ 'Sampled Cycles%'. This is useful to concentrate on the globally hottest
+ blocks. In output, there are some new columns:
+
+ 'Sampled Cycles%' - block sampled cycles aggregation / total sampled cycles
+ 'Sampled Cycles' - block sampled cycles aggregation
+ 'Avg Cycles%' - block average sampled cycles / sum of total block average
+ sampled cycles
+ 'Avg Cycles' - block average sampled cycles
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 930c51c01201..9431b8066fb4 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -217,6 +217,11 @@ core number and the number of online logical processors on that physical process
Aggregate counts per monitored threads, when monitoring threads (-t option)
or processes (-p option).
+--per-node::
+Aggregate counts per NUMA nodes for system-wide mode measurements. This
+is a useful mode to detect imbalance between NUMA nodes. To enable this
+mode, use --per-node in addition to -a. (system-wide).
+
-D msecs::
--delay msecs::
After starting the program, wait msecs before measuring. This is useful to
@@ -323,6 +328,12 @@ The output is SMI cycles%, equals to (aperf - unhalted core cycles) / aperf
Users who wants to get the actual value can apply --no-metric-only.
+--all-kernel::
+Configure all used events to run in kernel space.
+
+--all-user::
+Configure all used events to run in user space.
+
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 25b74fdb36fa..abc9b5d83312 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -42,6 +42,11 @@ OPTIONS
Prefixing with ! shows all syscalls but the ones specified. You may
need to escape it.
+--filter=<filter>::
+ Event filter. This option should follow an event selector (-e) which
+ selects tracepoint event(s).
+
+
-D msecs::
--delay msecs::
After starting the program, wait msecs before measuring. This is useful to
@@ -141,6 +146,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Show all syscalls followed by a summary by thread with min, max, and
average times (in msec) and relative stddev.
+--errno-summary::
+ To be used with -s or -S, to show stats for the errnos experienced by
+ syscalls, using only this option will trigger --summary.
+
--tool_stats::
Show tool stats such as number of times fd->pathname was discovered thru
hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
@@ -219,6 +228,11 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
may happen, for instance, when a thread gets migrated to a different CPU
while processing a syscall.
+--libtraceevent_print::
+ Use libtraceevent to print tracepoint arguments. By default 'perf trace' uses
+ the same beautifiers used in the strace-like enter+exit lines to augment the
+ tracepoint arguments.
+
--map-dump::
Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
diff --git a/tools/perf/Documentation/perf.data-directory-format.txt b/tools/perf/Documentation/perf.data-directory-format.txt
new file mode 100644
index 000000000000..f37fbd29112e
--- /dev/null
+++ b/tools/perf/Documentation/perf.data-directory-format.txt
@@ -0,0 +1,63 @@
+perf.data directory format
+
+DISCLAIMER This is not ABI yet and is subject to possible change
+ in following versions of perf. We will remove this
+ disclaimer once the directory format soaks in.
+
+
+This document describes the on-disk perf.data directory format.
+
+The layout is described by HEADER_DIR_FORMAT feature.
+Currently it holds only version number (0):
+
+ HEADER_DIR_FORMAT = 24
+
+ struct {
+ uint64_t version;
+ }
+
+The current only version value 0 means that:
+ - there is a single perf.data file named 'data' within the directory.
+ e.g.
+
+ $ tree -ps perf.data
+ perf.data
+ └── [-rw------- 25912] data
+
+Future versions are expected to describe different data files
+layout according to special needs.
+
+Currently the only 'perf record' option to output to a directory is
+the --kcore option which puts a copy of /proc/kcore into the directory.
+e.g.
+
+ $ sudo perf record --kcore uname
+ Linux
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.015 MB perf.data (9 samples) ]
+ $ sudo tree -ps perf.data
+ perf.data
+ ├── [-rw------- 23744] data
+ └── [drwx------ 4096] kcore_dir
+ ├── [-r-------- 6731125] kallsyms
+ ├── [-r-------- 40230912] kcore
+ └── [-r-------- 5419] modules
+
+ 1 directory, 4 files
+ $ sudo perf script -v
+ build id event received for vmlinux: 1eaa285996affce2d74d8e66dcea09a80c9941de
+ build id event received for [vdso]: 8bbaf5dc62a9b644b4d4e4539737e104e4a84541
+ build id event received for /lib/x86_64-linux-gnu/libc-2.28.so: 5b157f49586a3ca84d55837f97ff466767dd3445
+ Samples for 'cycles' event do not have CPU attribute set. Skipping 'cpu' field.
+ Using CPUID GenuineIntel-6-8E-A
+ Using perf.data/kcore_dir/kcore for kernel data
+ Using perf.data/kcore_dir/kallsyms for symbols
+ perf 15316 2060795.480902: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux)
+ perf 15316 2060795.480906: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux)
+ perf 15316 2060795.480908: 7 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux)
+ perf 15316 2060795.480910: 119 cycles: ffffffffa2caa54a native_write_msr+0xa (vmlinux)
+ perf 15316 2060795.480912: 2109 cycles: ffffffffa2c9b7b0 native_apic_msr_write+0x0 (vmlinux)
+ perf 15316 2060795.480914: 37606 cycles: ffffffffa2f121fe perf_event_addr_filters_exec+0x2e (vmlinux)
+ uname 15316 2060795.480924: 588287 cycles: ffffffffa303a56d page_counter_try_charge+0x6d (vmlinux)
+ uname 15316 2060795.481067: 2261945 cycles: ffffffffa301438f kmem_cache_free+0x4f (vmlinux)
+ uname 15316 2060795.481643: 2172167 cycles: 7f1a48c393c0 _IO_un_link+0x0 (/lib/x86_64-linux-gnu/libc-2.28.so)
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 401f0ed67439..3f37ded13f8c 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -24,6 +24,8 @@ OPTIONS
data-convert - data convert command debug messages
stderr - write debug output (option -v) to stderr
in browser mode
+ perf-event-open - Print perf_event_open() arguments and
+ return value
--buildid-dir::
Setup buildid cache directory. It has higher priority than
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 46f7fba2306c..1783427da9b0 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -188,7 +188,7 @@ endif
# Treat warnings as errors unless directed not to
ifneq ($(WERROR),0)
- CFLAGS += -Werror
+ CORE_CFLAGS += -Werror
CXXFLAGS += -Werror
endif
@@ -198,9 +198,9 @@ endif
ifeq ($(DEBUG),0)
ifeq ($(CC_NO_CLANG), 0)
- CFLAGS += -O3
+ CORE_CFLAGS += -O3
else
- CFLAGS += -O6
+ CORE_CFLAGS += -O6
endif
endif
@@ -245,12 +245,12 @@ FEATURE_CHECK_LDFLAGS-libaio = -lrt
FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
-CFLAGS += -fno-omit-frame-pointer
-CFLAGS += -ggdb3
-CFLAGS += -funwind-tables
-CFLAGS += -Wall
-CFLAGS += -Wextra
-CFLAGS += -std=gnu99
+CORE_CFLAGS += -fno-omit-frame-pointer
+CORE_CFLAGS += -ggdb3
+CORE_CFLAGS += -funwind-tables
+CORE_CFLAGS += -Wall
+CORE_CFLAGS += -Wextra
+CORE_CFLAGS += -std=gnu99
CXXFLAGS += -std=gnu++11 -fno-exceptions -fno-rtti
CXXFLAGS += -Wall
@@ -265,6 +265,11 @@ LDFLAGS += -Wl,-z,noexecstack
EXTLIBS = -lpthread -lrt -lm -ldl
+ifneq ($(TCMALLOC),)
+ CFLAGS += -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free
+ EXTLIBS += -ltcmalloc
+endif
+
ifeq ($(FEATURES_DUMP),)
include $(srctree)/tools/build/Makefile.feature
else
@@ -272,12 +277,12 @@ include $(FEATURES_DUMP)
endif
ifeq ($(feature-stackprotector-all), 1)
- CFLAGS += -fstack-protector-all
+ CORE_CFLAGS += -fstack-protector-all
endif
ifeq ($(DEBUG),0)
ifeq ($(feature-fortify-source), 1)
- CFLAGS += -D_FORTIFY_SOURCE=2
+ CORE_CFLAGS += -D_FORTIFY_SOURCE=2
endif
endif
@@ -301,10 +306,12 @@ INC_FLAGS += -I$(src-perf)/util
INC_FLAGS += -I$(src-perf)
INC_FLAGS += -I$(srctree)/tools/lib/
-CFLAGS += $(INC_FLAGS)
+CORE_CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+
+CFLAGS += $(CORE_CFLAGS) $(INC_FLAGS)
CXXFLAGS += $(INC_FLAGS)
-CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+LIBPERF_CFLAGS := $(CORE_CFLAGS) $(EXTRA_CFLAGS)
ifeq ($(feature-sync-compare-and-swap), 1)
CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 902c792f326a..1cd294468a1f 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -114,6 +114,8 @@ include ../scripts/utilities.mak
# Define NO_LIBZSTD if you do not want support of Zstandard based runtime
# trace compression in record mode.
#
+# Define TCMALLOC to enable tcmalloc heap profiling.
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -407,6 +409,7 @@ linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
x86_arch_asm_uapi_dir := $(srctree)/tools/arch/x86/include/uapi/asm/
+x86_arch_asm_dir := $(srctree)/tools/arch/x86/include/asm/
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
@@ -543,6 +546,18 @@ x86_arch_prctl_code_tbl := $(srctree)/tools/perf/trace/beauty/x86_arch_prctl.sh
$(x86_arch_prctl_code_array): $(x86_arch_asm_uapi_dir)/prctl.h $(x86_arch_prctl_code_tbl)
$(Q)$(SHELL) '$(x86_arch_prctl_code_tbl)' $(x86_arch_asm_uapi_dir) > $@
+x86_arch_irq_vectors_array := $(beauty_outdir)/x86_arch_irq_vectors_array.c
+x86_arch_irq_vectors_tbl := $(srctree)/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh
+
+$(x86_arch_irq_vectors_array): $(x86_arch_asm_dir)/irq_vectors.h $(x86_arch_irq_vectors_tbl)
+ $(Q)$(SHELL) '$(x86_arch_irq_vectors_tbl)' $(x86_arch_asm_dir) > $@
+
+x86_arch_MSRs_array := $(beauty_outdir)/x86_arch_MSRs_array.c
+x86_arch_MSRs_tbl := $(srctree)/tools/perf/trace/beauty/tracepoints/x86_msr.sh
+
+$(x86_arch_MSRs_array): $(x86_arch_asm_dir)/msr-index.h $(x86_arch_MSRs_tbl)
+ $(Q)$(SHELL) '$(x86_arch_MSRs_tbl)' $(x86_arch_asm_dir) > $@
+
rename_flags_array := $(beauty_outdir)/rename_flags_array.c
rename_flags_tbl := $(srctree)/tools/perf/trace/beauty/rename_flags.sh
@@ -677,6 +692,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(perf_ioctl_array) \
$(prctl_option_array) \
$(usbdevfs_ioctl_array) \
+ $(x86_arch_irq_vectors_array) \
+ $(x86_arch_MSRs_array) \
$(x86_arch_prctl_code_array) \
$(rename_flags_array) \
$(arch_errno_name_array) \
@@ -761,7 +778,7 @@ $(LIBBPF)-clean:
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
$(LIBPERF): FORCE
- $(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(OUTPUT) $(OUTPUT)libperf.a
+ $(Q)$(MAKE) -C $(LIBPERF_DIR) EXTRA_CFLAGS="$(LIBPERF_CFLAGS)" O=$(OUTPUT) $(OUTPUT)libperf.a
$(LIBPERF)-clean:
$(call QUIET_CLEAN, libperf)
@@ -981,6 +998,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
$(OUTPUT)$(usbdevfs_ioctl_array) \
+ $(OUTPUT)$(x86_arch_irq_vectors_array) \
+ $(OUTPUT)$(x86_arch_MSRs_array) \
$(OUTPUT)$(x86_arch_prctl_code_array) \
$(OUTPUT)$(rename_flags_array) \
$(OUTPUT)$(arch_errno_name_array) \
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index 296f0eac5e18..37fc63708966 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -1,3 +1,5 @@
+perf-y += perf_regs.o
+
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/arm/util/perf_regs.c b/tools/perf/arch/arm/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/arm/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 3cde540d2fcf..0a7782c61209 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,4 +1,5 @@
perf-y += header.o
+perf-y += perf_regs.o
perf-y += sym-handling.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/arm64/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
index 5df788985130..8dfa3e5229f1 100644
--- a/tools/perf/arch/arm64/util/sym-handling.c
+++ b/tools/perf/arch/arm64/util/sym-handling.c
@@ -6,9 +6,10 @@
#include "symbol.h" // for the elf__needs_adjust_symbols() prototype
#include <stdbool.h>
-#include <gelf.h>
#ifdef HAVE_LIBELF_SUPPORT
+#include <gelf.h>
+
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{
return ehdr.e_type == ET_EXEC ||
diff --git a/tools/perf/arch/csky/util/Build b/tools/perf/arch/csky/util/Build
index 1160bb2332ba..7d3050134ae0 100644
--- a/tools/perf/arch/csky/util/Build
+++ b/tools/perf/arch/csky/util/Build
@@ -1,2 +1,4 @@
+perf-y += perf_regs.o
+
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/csky/util/perf_regs.c b/tools/perf/arch/csky/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/csky/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index 9cc1c4a9dec4..16807269317c 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -113,10 +113,10 @@ static int is_tracepoint_available(const char *str, struct evlist *evlist)
struct parse_events_error err;
int ret;
- err.str = NULL;
+ bzero(&err, sizeof(err));
ret = parse_events(evlist, str, &err);
if (err.str)
- pr_err("%s : %s\n", str, err.str);
+ parse_events_print_error(&err, "tracepoint");
return ret;
}
diff --git a/tools/perf/arch/riscv/util/Build b/tools/perf/arch/riscv/util/Build
index 1160bb2332ba..7d3050134ae0 100644
--- a/tools/perf/arch/riscv/util/Build
+++ b/tools/perf/arch/riscv/util/Build
@@ -1,2 +1,4 @@
+perf-y += perf_regs.o
+
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/riscv/util/perf_regs.c b/tools/perf/arch/riscv/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/riscv/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index a50e70baf918..2a6662e42f89 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -7,7 +7,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
char *endptr, *tok, *name;
struct map *map = ms->map;
struct addr_map_symbol target = {
- .map = map,
+ .ms = { .map = map, },
};
tok = strchr(ops->raw, ',');
@@ -38,9 +38,9 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
return -1;
target.addr = map__objdump_2mem(map, ops->target.addr);
- if (map_groups__find_ams(&target) == 0 &&
- map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
- ops->target.sym = target.sym;
+ if (map_groups__find_ams(ms->mg, &target) == 0 &&
+ map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
+ ops->target.sym = target.ms.sym;
return 0;
}
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 22797f043b84..3d9d0f4f72ca 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,5 +1,6 @@
perf-y += header.o
perf-y += kvm-stat.o
+perf-y += perf_regs.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/s390/util/perf_regs.c b/tools/perf/arch/s390/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/s390/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
index fab3c6de73fa..58f8f2a095c4 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-32.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -1647,6 +1647,12 @@
"0f ae 30 \txsaveopt (%eax)",},
{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
"0f ae f0 \tmfence ",},
+{{0x0f, 0x1c, 0x00, }, 3, 0, "", "",
+"0f 1c 00 \tcldemote (%eax)",},
+{{0x0f, 0x1c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1c 05 78 56 34 12 \tcldemote 0x12345678",},
+{{0x0f, 0x1c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1c 84 c8 78 56 34 12 \tcldemote 0x12345678(%eax,%ecx,8)",},
{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
"0f c7 20 \txsavec (%eax)",},
{{0x0f, 0xc7, 0x25, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
@@ -1677,3 +1683,49 @@
"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0xae, 0xf3, }, 4, 0, "", "",
+"66 0f ae f3 \ttpause %ebx",},
+{{0x67, 0xf3, 0x0f, 0xae, 0xf0, }, 5, 0, "", "",
+"67 f3 0f ae f0 \tumonitor %ax",},
+{{0xf3, 0x0f, 0xae, 0xf0, }, 4, 0, "", "",
+"f3 0f ae f0 \tumonitor %eax",},
+{{0xf2, 0x0f, 0xae, 0xf0, }, 4, 0, "", "",
+"f2 0f ae f0 \tumwait %eax",},
+{{0x0f, 0x38, 0xf9, 0x03, }, 4, 0, "", "",
+"0f 38 f9 03 \tmovdiri %eax,(%ebx)",},
+{{0x0f, 0x38, 0xf9, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 f9 88 78 56 34 12 \tmovdiri %ecx,0x12345678(%eax)",},
+{{0x66, 0x0f, 0x38, 0xf8, 0x18, }, 5, 0, "", "",
+"66 0f 38 f8 18 \tmovdir64b (%eax),%ebx",},
+{{0x66, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 38 f8 88 78 56 34 12 \tmovdir64b 0x12345678(%eax),%ecx",},
+{{0x67, 0x66, 0x0f, 0x38, 0xf8, 0x1c, }, 6, 0, "", "",
+"67 66 0f 38 f8 1c \tmovdir64b (%si),%bx",},
+{{0x67, 0x66, 0x0f, 0x38, 0xf8, 0x8c, 0x34, 0x12, }, 8, 0, "", "",
+"67 66 0f 38 f8 8c 34 12 \tmovdir64b 0x1234(%si),%cx",},
+{{0xf2, 0x0f, 0x38, 0xf8, 0x18, }, 5, 0, "", "",
+"f2 0f 38 f8 18 \tenqcmd (%eax),%ebx",},
+{{0xf2, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 38 f8 88 78 56 34 12 \tenqcmd 0x12345678(%eax),%ecx",},
+{{0x67, 0xf2, 0x0f, 0x38, 0xf8, 0x1c, }, 6, 0, "", "",
+"67 f2 0f 38 f8 1c \tenqcmd (%si),%bx",},
+{{0x67, 0xf2, 0x0f, 0x38, 0xf8, 0x8c, 0x34, 0x12, }, 8, 0, "", "",
+"67 f2 0f 38 f8 8c 34 12 \tenqcmd 0x1234(%si),%cx",},
+{{0xf3, 0x0f, 0x38, 0xf8, 0x18, }, 5, 0, "", "",
+"f3 0f 38 f8 18 \tenqcmds (%eax),%ebx",},
+{{0xf3, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 38 f8 88 78 56 34 12 \tenqcmds 0x12345678(%eax),%ecx",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xf8, 0x1c, }, 6, 0, "", "",
+"67 f3 0f 38 f8 1c \tenqcmds (%si),%bx",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xf8, 0x8c, 0x34, 0x12, }, 8, 0, "", "",
+"67 f3 0f 38 f8 8c 34 12 \tenqcmds 0x1234(%si),%cx",},
+{{0x0f, 0x01, 0xcf, }, 3, 0, "", "",
+"0f 01 cf \tencls ",},
+{{0x0f, 0x01, 0xd7, }, 3, 0, "", "",
+"0f 01 d7 \tenclu ",},
+{{0x0f, 0x01, 0xc0, }, 3, 0, "", "",
+"0f 01 c0 \tenclv ",},
+{{0x0f, 0x01, 0xc5, }, 3, 0, "", "",
+"0f 01 c5 \tpconfig ",},
+{{0xf3, 0x0f, 0x09, }, 3, 0, "", "",
+"f3 0f 09 \twbnoinvd ",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
index c57f34603b9b..656f8aed31de 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-64.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -1667,6 +1667,16 @@
"41 0f ae 30 \txsaveopt (%r8)",},
{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
"0f ae f0 \tmfence ",},
+{{0x0f, 0x1c, 0x00, }, 3, 0, "", "",
+"0f 1c 00 \tcldemote (%rax)",},
+{{0x41, 0x0f, 0x1c, 0x00, }, 4, 0, "", "",
+"41 0f 1c 00 \tcldemote (%r8)",},
+{{0x0f, 0x1c, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1c 04 25 78 56 34 12 \tcldemote 0x12345678",},
+{{0x0f, 0x1c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1c 84 c8 78 56 34 12 \tcldemote 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0x1c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f 1c 84 c8 78 56 34 12 \tcldemote 0x12345678(%r8,%rcx,8)",},
{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
"0f c7 20 \txsavec (%rax)",},
{{0x41, 0x0f, 0xc7, 0x20, }, 4, 0, "", "",
@@ -1727,3 +1737,55 @@
"f3 48 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x49, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 49 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%r8,%rcx,8)",},
+{{0x66, 0x0f, 0xae, 0xf3, }, 4, 0, "", "",
+"66 0f ae f3 \ttpause %ebx",},
+{{0x66, 0x41, 0x0f, 0xae, 0xf0, }, 5, 0, "", "",
+"66 41 0f ae f0 \ttpause %r8d",},
+{{0x67, 0xf3, 0x0f, 0xae, 0xf0, }, 5, 0, "", "",
+"67 f3 0f ae f0 \tumonitor %eax",},
+{{0xf3, 0x0f, 0xae, 0xf0, }, 4, 0, "", "",
+"f3 0f ae f0 \tumonitor %rax",},
+{{0x67, 0xf3, 0x41, 0x0f, 0xae, 0xf0, }, 6, 0, "", "",
+"67 f3 41 0f ae f0 \tumonitor %r8d",},
+{{0xf2, 0x0f, 0xae, 0xf0, }, 4, 0, "", "",
+"f2 0f ae f0 \tumwait %eax",},
+{{0xf2, 0x41, 0x0f, 0xae, 0xf0, }, 5, 0, "", "",
+"f2 41 0f ae f0 \tumwait %r8d",},
+{{0x48, 0x0f, 0x38, 0xf9, 0x03, }, 5, 0, "", "",
+"48 0f 38 f9 03 \tmovdiri %rax,(%rbx)",},
+{{0x48, 0x0f, 0x38, 0xf9, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"48 0f 38 f9 88 78 56 34 12 \tmovdiri %rcx,0x12345678(%rax)",},
+{{0x66, 0x0f, 0x38, 0xf8, 0x18, }, 5, 0, "", "",
+"66 0f 38 f8 18 \tmovdir64b (%rax),%rbx",},
+{{0x66, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 38 f8 88 78 56 34 12 \tmovdir64b 0x12345678(%rax),%rcx",},
+{{0x67, 0x66, 0x0f, 0x38, 0xf8, 0x18, }, 6, 0, "", "",
+"67 66 0f 38 f8 18 \tmovdir64b (%eax),%ebx",},
+{{0x67, 0x66, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"67 66 0f 38 f8 88 78 56 34 12 \tmovdir64b 0x12345678(%eax),%ecx",},
+{{0xf2, 0x0f, 0x38, 0xf8, 0x18, }, 5, 0, "", "",
+"f2 0f 38 f8 18 \tenqcmd (%rax),%rbx",},
+{{0xf2, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 38 f8 88 78 56 34 12 \tenqcmd 0x12345678(%rax),%rcx",},
+{{0x67, 0xf2, 0x0f, 0x38, 0xf8, 0x18, }, 6, 0, "", "",
+"67 f2 0f 38 f8 18 \tenqcmd (%eax),%ebx",},
+{{0x67, 0xf2, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"67 f2 0f 38 f8 88 78 56 34 12 \tenqcmd 0x12345678(%eax),%ecx",},
+{{0xf3, 0x0f, 0x38, 0xf8, 0x18, }, 5, 0, "", "",
+"f3 0f 38 f8 18 \tenqcmds (%rax),%rbx",},
+{{0xf3, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 38 f8 88 78 56 34 12 \tenqcmds 0x12345678(%rax),%rcx",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xf8, 0x18, }, 6, 0, "", "",
+"67 f3 0f 38 f8 18 \tenqcmds (%eax),%ebx",},
+{{0x67, 0xf3, 0x0f, 0x38, 0xf8, 0x88, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"67 f3 0f 38 f8 88 78 56 34 12 \tenqcmds 0x12345678(%eax),%ecx",},
+{{0x0f, 0x01, 0xcf, }, 3, 0, "", "",
+"0f 01 cf \tencls ",},
+{{0x0f, 0x01, 0xd7, }, 3, 0, "", "",
+"0f 01 d7 \tenclu ",},
+{{0x0f, 0x01, 0xc0, }, 3, 0, "", "",
+"0f 01 c0 \tenclv ",},
+{{0x0f, 0x01, 0xc5, }, 3, 0, "", "",
+"0f 01 c5 \tpconfig ",},
+{{0xf3, 0x0f, 0x09, }, 3, 0, "", "",
+"f3 0f 09 \twbnoinvd ",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
index 891415b10984..dd85a3afd9ce 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-src.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -1320,6 +1320,14 @@ int main(void)
asm volatile("xsaveopt (%r8)");
asm volatile("mfence");
+ /* cldemote m8 */
+
+ asm volatile("cldemote (%rax)");
+ asm volatile("cldemote (%r8)");
+ asm volatile("cldemote (0x12345678)");
+ asm volatile("cldemote 0x12345678(%rax,%rcx,8)");
+ asm volatile("cldemote 0x12345678(%r8,%rcx,8)");
+
/* xsavec mem */
asm volatile("xsavec (%rax)");
@@ -1364,6 +1372,48 @@ int main(void)
asm volatile("ptwriteq 0x12345678(%rax,%rcx,8)");
asm volatile("ptwriteq 0x12345678(%r8,%rcx,8)");
+ /* tpause */
+
+ asm volatile("tpause %ebx");
+ asm volatile("tpause %r8d");
+
+ /* umonitor */
+
+ asm volatile("umonitor %eax");
+ asm volatile("umonitor %rax");
+ asm volatile("umonitor %r8d");
+
+ /* umwait */
+
+ asm volatile("umwait %eax");
+ asm volatile("umwait %r8d");
+
+ /* movdiri */
+
+ asm volatile("movdiri %rax,(%rbx)");
+ asm volatile("movdiri %rcx,0x12345678(%rax)");
+
+ /* movdir64b */
+
+ asm volatile("movdir64b (%rax),%rbx");
+ asm volatile("movdir64b 0x12345678(%rax),%rcx");
+ asm volatile("movdir64b (%eax),%ebx");
+ asm volatile("movdir64b 0x12345678(%eax),%ecx");
+
+ /* enqcmd */
+
+ asm volatile("enqcmd (%rax),%rbx");
+ asm volatile("enqcmd 0x12345678(%rax),%rcx");
+ asm volatile("enqcmd (%eax),%ebx");
+ asm volatile("enqcmd 0x12345678(%eax),%ecx");
+
+ /* enqcmds */
+
+ asm volatile("enqcmds (%rax),%rbx");
+ asm volatile("enqcmds 0x12345678(%rax),%rcx");
+ asm volatile("enqcmds (%eax),%ebx");
+ asm volatile("enqcmds 0x12345678(%eax),%ecx");
+
#else /* #ifdef __x86_64__ */
/* bound r32, mem (same op code as EVEX prefix) */
@@ -2656,6 +2706,12 @@ int main(void)
asm volatile("xsaveopt (%eax)");
asm volatile("mfence");
+ /* cldemote m8 */
+
+ asm volatile("cldemote (%eax)");
+ asm volatile("cldemote (0x12345678)");
+ asm volatile("cldemote 0x12345678(%eax,%ecx,8)");
+
/* xsavec mem */
asm volatile("xsavec (%eax)");
@@ -2684,8 +2740,61 @@ int main(void)
asm volatile("ptwritel (0x12345678)");
asm volatile("ptwritel 0x12345678(%eax,%ecx,8)");
+ /* tpause */
+
+ asm volatile("tpause %ebx");
+
+ /* umonitor */
+
+ asm volatile("umonitor %ax");
+ asm volatile("umonitor %eax");
+
+ /* umwait */
+
+ asm volatile("umwait %eax");
+
+ /* movdiri */
+
+ asm volatile("movdiri %eax,(%ebx)");
+ asm volatile("movdiri %ecx,0x12345678(%eax)");
+
+ /* movdir64b */
+
+ asm volatile("movdir64b (%eax),%ebx");
+ asm volatile("movdir64b 0x12345678(%eax),%ecx");
+ asm volatile("movdir64b (%si),%bx");
+ asm volatile("movdir64b 0x1234(%si),%cx");
+
+ /* enqcmd */
+
+ asm volatile("enqcmd (%eax),%ebx");
+ asm volatile("enqcmd 0x12345678(%eax),%ecx");
+ asm volatile("enqcmd (%si),%bx");
+ asm volatile("enqcmd 0x1234(%si),%cx");
+
+ /* enqcmds */
+
+ asm volatile("enqcmds (%eax),%ebx");
+ asm volatile("enqcmds 0x12345678(%eax),%ecx");
+ asm volatile("enqcmds (%si),%bx");
+ asm volatile("enqcmds 0x1234(%si),%cx");
+
#endif /* #ifndef __x86_64__ */
+ /* SGX */
+
+ asm volatile("encls");
+ asm volatile("enclu");
+ asm volatile("enclv");
+
+ /* pconfig */
+
+ asm volatile("pconfig");
+
+ /* wbnoinvd */
+
+ asm volatile("wbnoinvd");
+
/* Following line is a marker for the awk script - do not change */
asm volatile("rdtsc"); /* Stop here */
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index fa947952c16a..909ead08a6f6 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -9,6 +9,7 @@
#include <sys/prctl.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
@@ -117,10 +118,10 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_COMM ||
@@ -139,9 +140,9 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
comm2_time = sample.time;
}
next_event:
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
if (!comm1_time || !comm2_time)
diff --git a/tools/perf/arch/x86/util/auxtrace.c b/tools/perf/arch/x86/util/auxtrace.c
index 96f4a2c11893..7abc9fd4cbec 100644
--- a/tools/perf/arch/x86/util/auxtrace.c
+++ b/tools/perf/arch/x86/util/auxtrace.c
@@ -26,7 +26,11 @@ struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist,
bool found_bts = false;
intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
+ if (intel_pt_pmu)
+ intel_pt_pmu->auxtrace = true;
intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
+ if (intel_bts_pmu)
+ intel_bts_pmu->auxtrace = true;
evlist__for_each_entry(evlist, evsel) {
if (intel_pt_pmu && evsel->core.attr.type == intel_pt_pmu->type)
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index d357c625c09f..d1044df7c0d7 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -29,7 +29,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
return -1;
}
- for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+ maps__for_each_entry(maps, pos) {
struct kmap *kmap;
size_t size;
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index f7f68a50a5cd..27d9e214d068 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -113,6 +113,11 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
const struct perf_cpu_map *cpus = evlist->core.cpus;
bool privileged = perf_event_paranoid_check(-1);
+ if (opts->auxtrace_sample_mode) {
+ pr_err("Intel BTS does not support AUX area sampling\n");
+ return -EINVAL;
+ }
+
btsr->evlist = evlist;
btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index d6d26256915f..20df442fdf36 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -17,6 +17,7 @@
#include "../../util/event.h"
#include "../../util/evlist.h"
#include "../../util/evsel.h"
+#include "../../util/evsel_config.h"
#include "../../util/cpumap.h"
#include "../../util/mmap.h"
#include <subcmd/parse-options.h>
@@ -551,6 +552,43 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
evsel->core.attr.config);
}
+static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu,
+ struct evsel *evsel)
+{
+ struct perf_evsel_config_term *term;
+ u64 user_bits = 0, bits;
+
+ term = perf_evsel__get_config_term(evsel, CFG_CHG);
+ if (term)
+ user_bits = term->val.cfg_chg;
+
+ bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period");
+
+ /* Did user change psb_period */
+ if (bits & user_bits)
+ return;
+
+ /* Set psb_period to 0 */
+ evsel->core.attr.config &= ~bits;
+}
+
+static void intel_pt_min_max_sample_sz(struct evlist *evlist,
+ size_t *min_sz, size_t *max_sz)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ size_t sz = evsel->core.attr.aux_sample_size;
+
+ if (!sz)
+ continue;
+ if (min_sz && (sz < *min_sz || !*min_sz))
+ *min_sz = sz;
+ if (max_sz && sz > *max_sz)
+ *max_sz = sz;
+ }
+}
+
/*
* Currently, there is not enough information to disambiguate different PEBS
* events, so only allow one.
@@ -606,6 +644,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
return -EINVAL;
}
+ if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) {
+ pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n");
+ return -EINVAL;
+ }
+
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
return -EINVAL;
@@ -617,6 +660,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace)
return 0;
+ if (opts->auxtrace_sample_mode)
+ intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel);
+
err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
if (err)
return err;
@@ -666,6 +712,34 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
opts->auxtrace_snapshot_size, psb_period);
}
+ /* Set default sizes for sample mode */
+ if (opts->auxtrace_sample_mode) {
+ size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
+ size_t min_sz = 0, max_sz = 0;
+
+ intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz);
+ if (!opts->auxtrace_mmap_pages && !privileged &&
+ opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ if (!opts->auxtrace_mmap_pages) {
+ size_t sz = round_up(max_sz, page_size) / page_size;
+
+ opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+ }
+ if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
+ pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
+ max_sz,
+ opts->auxtrace_mmap_pages * (size_t)page_size);
+ return -EINVAL;
+ }
+ pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
+ min_sz, max_sz);
+ if (psb_period &&
+ min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
+ ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
+ min_sz, psb_period);
+ }
+
/* Set default sizes for full trace mode */
if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
if (privileged) {
@@ -682,7 +756,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
size_t min_sz;
- if (opts->auxtrace_snapshot_mode)
+ if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode)
min_sz = KiB(4);
else
min_sz = KiB(8);
@@ -1136,5 +1210,10 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
ptr->itr.reference = intel_pt_reference;
ptr->itr.read_finish = intel_pt_read_finish;
+ /*
+ * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
+ * should give at least 1 PSB per sample.
+ */
+ ptr->itr.default_aux_sample_size = 4096;
return &ptr->itr;
}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 8db8fc9bddef..5898662bc8fb 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -83,7 +83,7 @@ static void process_basic_block(struct addr_map_symbol *start,
struct addr_map_symbol *end,
struct branch_flags *flags)
{
- struct symbol *sym = start->sym;
+ struct symbol *sym = start->ms.sym;
struct annotation *notes = sym ? symbol__annotation(sym) : NULL;
struct block_range_iter iter;
struct block_range *entry;
@@ -201,7 +201,7 @@ static int process_branch_callback(struct evsel *evsel,
if (a.map != NULL)
a.map->dso->hit = 1;
- hist__account_cycles(sample->branch_stack, al, sample, false);
+ hist__account_cycles(sample->branch_stack, al, sample, false, NULL);
ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
return ret;
@@ -301,9 +301,9 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
struct perf_annotate *ann)
{
if (!ann->use_stdio2)
- return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, &ann->opts);
+ return symbol__tty_annotate(&he->ms, evsel, &ann->opts);
- return symbol__tty_annotate2(he->ms.sym, he->ms.map, evsel, &ann->opts);
+ return symbol__tty_annotate2(&he->ms, evsel, &ann->opts);
}
static void hists__find_annotations(struct hists *hists,
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index c37a78677955..376dbf10ad64 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -23,6 +23,8 @@
#include "util/time-utils.h"
#include "util/annotate.h"
#include "util/map.h"
+#include "util/spark.h"
+#include "util/block-info.h"
#include <linux/err.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
@@ -53,6 +55,7 @@ enum {
PERF_HPP_DIFF__FORMULA,
PERF_HPP_DIFF__DELTA_ABS,
PERF_HPP_DIFF__CYCLES,
+ PERF_HPP_DIFF__CYCLES_HIST,
PERF_HPP_DIFF__MAX_INDEX
};
@@ -87,6 +90,7 @@ static bool force;
static bool show_period;
static bool show_formula;
static bool show_baseline_only;
+static bool cycles_hist;
static unsigned int sort_compute = 1;
static s64 compute_wdiff_w1;
@@ -95,8 +99,6 @@ static s64 compute_wdiff_w2;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
-static struct addr_location dummy_al;
-
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
@@ -164,6 +166,10 @@ static struct header_column {
[PERF_HPP_DIFF__CYCLES] = {
.name = "[Program Block Range] Cycles Diff",
.width = 70,
+ },
+ [PERF_HPP_DIFF__CYCLES_HIST] = {
+ .name = "stddev/Hist",
+ .width = NUM_SPARKS + 9,
}
};
@@ -420,7 +426,8 @@ static int diff__process_sample_event(struct perf_tool *tool,
goto out_put;
}
- hist__account_cycles(sample->branch_stack, &al, sample, false);
+ hist__account_cycles(sample->branch_stack, &al, sample, false,
+ NULL);
}
/*
@@ -530,41 +537,6 @@ static void hists__baseline_only(struct hists *hists)
}
}
-static int64_t block_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
- struct hist_entry *left, struct hist_entry *right)
-{
- struct block_info *bi_l = left->block_info;
- struct block_info *bi_r = right->block_info;
- int cmp;
-
- if (!bi_l->sym || !bi_r->sym) {
- if (!bi_l->sym && !bi_r->sym)
- return 0;
- else if (!bi_l->sym)
- return -1;
- else
- return 1;
- }
-
- if (bi_l->sym == bi_r->sym) {
- if (bi_l->start == bi_r->start) {
- if (bi_l->end == bi_r->end)
- return 0;
- else
- return (int64_t)(bi_r->end - bi_l->end);
- } else
- return (int64_t)(bi_r->start - bi_l->start);
- } else {
- cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
- return cmp;
- }
-
- if (bi_l->sym->start != bi_r->sym->start)
- return (int64_t)(bi_r->sym->start - bi_l->sym->start);
-
- return (int64_t)(bi_r->sym->end - bi_l->sym->end);
-}
-
static int64_t block_cycles_diff_cmp(struct hist_entry *left,
struct hist_entry *right)
{
@@ -593,64 +565,13 @@ static void init_block_hist(struct block_hist *bh)
INIT_LIST_HEAD(&bh->block_fmt.list);
INIT_LIST_HEAD(&bh->block_fmt.sort_list);
- bh->block_fmt.cmp = block_cmp;
+ bh->block_fmt.cmp = block_info__cmp;
bh->block_fmt.sort = block_sort;
perf_hpp_list__register_sort_field(&bh->block_list,
&bh->block_fmt);
bh->valid = true;
}
-static void init_block_info(struct block_info *bi, struct symbol *sym,
- struct cyc_hist *ch, int offset)
-{
- bi->sym = sym;
- bi->start = ch->start;
- bi->end = offset;
- bi->cycles = ch->cycles;
- bi->cycles_aggr = ch->cycles_aggr;
- bi->num = ch->num;
- bi->num_aggr = ch->num_aggr;
-}
-
-static int process_block_per_sym(struct hist_entry *he)
-{
- struct annotation *notes;
- struct cyc_hist *ch;
- struct block_hist *bh;
-
- if (!he->ms.map || !he->ms.sym)
- return 0;
-
- notes = symbol__annotation(he->ms.sym);
- if (!notes || !notes->src || !notes->src->cycles_hist)
- return 0;
-
- bh = container_of(he, struct block_hist, he);
- init_block_hist(bh);
-
- ch = notes->src->cycles_hist;
- for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
- if (ch[i].num_aggr) {
- struct block_info *bi;
- struct hist_entry *he_block;
-
- bi = block_info__new();
- if (!bi)
- return -1;
-
- init_block_info(bi, he->ms.sym, &ch[i], i);
- he_block = hists__add_entry_block(&bh->block_hists,
- &dummy_al, bi);
- if (!he_block) {
- block_info__put(bi);
- return -1;
- }
- }
- }
-
- return 0;
-}
-
static int block_pair_cmp(struct hist_entry *a, struct hist_entry *b)
{
struct block_info *bi_a = a->block_info;
@@ -689,6 +610,21 @@ static struct hist_entry *get_block_pair(struct hist_entry *he,
return NULL;
}
+static void init_spark_values(unsigned long *svals, int num)
+{
+ for (int i = 0; i < num; i++)
+ svals[i] = 0;
+}
+
+static void update_spark_value(unsigned long *svals, int num,
+ struct stats *stats, u64 val)
+{
+ int n = stats->n;
+
+ if (n < num)
+ svals[n] = val;
+}
+
static void compute_cycles_diff(struct hist_entry *he,
struct hist_entry *pair)
{
@@ -697,6 +633,26 @@ static void compute_cycles_diff(struct hist_entry *he,
pair->diff.cycles =
pair->block_info->cycles_aggr / pair->block_info->num_aggr -
he->block_info->cycles_aggr / he->block_info->num_aggr;
+
+ if (!cycles_hist)
+ return;
+
+ init_stats(&pair->diff.stats);
+ init_spark_values(pair->diff.svals, NUM_SPARKS);
+
+ for (int i = 0; i < pair->block_info->num; i++) {
+ u64 val;
+
+ if (i >= he->block_info->num || i >= NUM_SPARKS)
+ break;
+
+ val = labs(pair->block_info->cycles_spark[i] -
+ he->block_info->cycles_spark[i]);
+
+ update_spark_value(pair->diff.svals, NUM_SPARKS,
+ &pair->diff.stats, val);
+ update_stats(&pair->diff.stats, val);
+ }
}
}
@@ -720,13 +676,6 @@ static void block_hists_match(struct hists *hists_base,
}
}
-static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
-{
- /* Skip the calculation of column length in output_resort */
- he->filtered = true;
- return 0;
-}
-
static void hists__precompute(struct hists *hists)
{
struct rb_root_cached *root;
@@ -747,8 +696,11 @@ static void hists__precompute(struct hists *hists)
he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
- if (compute == COMPUTE_CYCLES)
- process_block_per_sym(he);
+ if (compute == COMPUTE_CYCLES) {
+ bh = container_of(he, struct block_hist, he);
+ init_block_hist(bh);
+ block_info__process_sym(he, bh, NULL, 0);
+ }
data__for_each_file_new(i, d) {
pair = get_pair_data(he, d);
@@ -767,16 +719,18 @@ static void hists__precompute(struct hists *hists)
compute_wdiff(he, pair);
break;
case COMPUTE_CYCLES:
- process_block_per_sym(pair);
- bh = container_of(he, struct block_hist, he);
pair_bh = container_of(pair, struct block_hist,
he);
+ init_block_hist(pair_bh);
+ block_info__process_sym(pair, pair_bh, NULL, 0);
+
+ bh = container_of(he, struct block_hist, he);
if (bh->valid && pair_bh->valid) {
block_hists_match(&bh->block_hists,
&pair_bh->block_hists);
- hists__output_resort_cb(&pair_bh->block_hists,
- NULL, filter_cb);
+ hists__output_resort(&pair_bh->block_hists,
+ NULL);
}
break;
default:
@@ -1255,6 +1209,9 @@ static const struct option options[] = {
"Show period values."),
OPT_BOOLEAN('F', "formula", &show_formula,
"Show formula."),
+ OPT_BOOLEAN(0, "cycles-hist", &cycles_hist,
+ "Show cycles histogram and standard deviation "
+ "- WARNING: use only with -c cycles."),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -1462,6 +1419,90 @@ static int hpp__color_cycles(struct perf_hpp_fmt *fmt,
return __hpp__color_compare(fmt, hpp, he, COMPUTE_CYCLES);
}
+static int all_zero(unsigned long *vals, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (vals[i] != 0)
+ return 0;
+ return 1;
+}
+
+static int print_cycles_spark(char *bf, int size, unsigned long *svals, u64 n)
+{
+ int printed;
+
+ if (n <= 1)
+ return 0;
+
+ if (n > NUM_SPARKS)
+ n = NUM_SPARKS;
+ if (all_zero(svals, n))
+ return 0;
+
+ printed = print_spark(bf, size, svals, n);
+ printed += scnprintf(bf + printed, size - printed, " ");
+ return printed;
+}
+
+static int hpp__color_cycles_hist(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ struct diff_hpp_fmt *dfmt =
+ container_of(fmt, struct diff_hpp_fmt, fmt);
+ struct hist_entry *pair = get_pair_fmt(he, dfmt);
+ struct block_hist *bh = container_of(he, struct block_hist, he);
+ struct block_hist *bh_pair;
+ struct hist_entry *block_he;
+ char spark[32], buf[128];
+ double r;
+ int ret, pad;
+
+ if (!pair) {
+ if (bh->block_idx)
+ hpp->skip = true;
+
+ goto no_print;
+ }
+
+ bh_pair = container_of(pair, struct block_hist, he);
+
+ block_he = hists__get_entry(&bh_pair->block_hists, bh->block_idx);
+ if (!block_he) {
+ hpp->skip = true;
+ goto no_print;
+ }
+
+ ret = print_cycles_spark(spark, sizeof(spark), block_he->diff.svals,
+ block_he->diff.stats.n);
+
+ r = rel_stddev_stats(stddev_stats(&block_he->diff.stats),
+ avg_stats(&block_he->diff.stats));
+
+ if (ret) {
+ /*
+ * Padding spaces if number of sparks less than NUM_SPARKS
+ * otherwise the output is not aligned.
+ */
+ pad = NUM_SPARKS - ((ret - 1) / 3);
+ scnprintf(buf, sizeof(buf), "%s%5.1f%% %s", "\u00B1", r, spark);
+ ret = scnprintf(hpp->buf, hpp->size, "%*s",
+ dfmt->header_width, buf);
+
+ if (pad) {
+ ret += scnprintf(hpp->buf + ret, hpp->size - ret,
+ "%-*s", pad, " ");
+ }
+
+ return ret;
+ }
+
+no_print:
+ return scnprintf(hpp->buf, hpp->size, "%*s",
+ dfmt->header_width, " ");
+}
+
static void
hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size)
{
@@ -1667,6 +1708,10 @@ static void data__hpp_register(struct data__file *d, int idx)
fmt->color = hpp__color_cycles;
fmt->sort = hist_entry__cmp_nop;
break;
+ case PERF_HPP_DIFF__CYCLES_HIST:
+ fmt->color = hpp__color_cycles_hist;
+ fmt->sort = hist_entry__cmp_nop;
+ break;
default:
fmt->sort = hist_entry__cmp_nop;
break;
@@ -1692,10 +1737,14 @@ static int ui_init(void)
* PERF_HPP_DIFF__DELTA
* PERF_HPP_DIFF__RATIO
* PERF_HPP_DIFF__WEIGHTED_DIFF
+ * PERF_HPP_DIFF__CYCLES
*/
data__hpp_register(d, i ? compute_2_hpp[compute] :
PERF_HPP_DIFF__BASELINE);
+ if (cycles_hist && i)
+ data__hpp_register(d, PERF_HPP_DIFF__CYCLES_HIST);
+
/*
* And the rest:
*
@@ -1850,6 +1899,9 @@ int cmd_diff(int argc, const char **argv)
if (quiet)
perf_quiet_option();
+ if (cycles_hist && (compute != COMPUTE_CYCLES))
+ usage_with_options(diff_usage, options);
+
symbol__annotation_init();
if (symbol__init(NULL) < 0)
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 372ecb3e2c06..9664a72a089d 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -45,6 +45,7 @@ struct perf_inject {
u64 aux_id;
struct list_head samples;
struct itrace_synth_opts itrace_synth_opts;
+ char event_copy[PERF_SAMPLE_MAX_SIZE];
};
struct event_entry {
@@ -214,6 +215,28 @@ static int perf_event__drop_aux(struct perf_tool *tool,
return 0;
}
+static union perf_event *
+perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ size_t sz1 = sample->aux_sample.data - (void *)event;
+ size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
+ union perf_event *ev = (union perf_event *)inject->event_copy;
+
+ if (sz1 > event->header.size || sz2 > event->header.size ||
+ sz1 + sz2 > event->header.size ||
+ sz1 < sizeof(struct perf_event_header) + sizeof(u64))
+ return event;
+
+ memcpy(ev, event, sz1);
+ memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
+ ev->header.size = sz1 + sz2;
+ ((u64 *)((void *)ev + sz1))[-1] = 0;
+
+ return ev;
+}
+
typedef int (*inject_handler)(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -226,6 +249,9 @@ static int perf_event__repipe_sample(struct perf_tool *tool,
struct evsel *evsel,
struct machine *machine)
{
+ struct perf_inject *inject = container_of(tool, struct perf_inject,
+ tool);
+
if (evsel && evsel->handler) {
inject_handler f = evsel->handler;
return f(tool, event, sample, evsel, machine);
@@ -233,6 +259,9 @@ static int perf_event__repipe_sample(struct perf_tool *tool,
build_id__mark_dso_hit(tool, event, sample, evsel, machine);
+ if (inject->itrace_synth_opts.set && sample->aux_sample.size)
+ event = perf_inject__cut_auxtrace_sample(inject, event, sample);
+
return perf_event__repipe_synth(tool, event);
}
@@ -578,58 +607,6 @@ static void strip_init(struct perf_inject *inject)
evsel->handler = drop_sample;
}
-static bool has_tracking(struct evsel *evsel)
-{
- return evsel->core.attr.mmap || evsel->core.attr.mmap2 || evsel->core.attr.comm ||
- evsel->core.attr.task;
-}
-
-#define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
- PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
-
-/*
- * In order that the perf.data file is parsable, tracking events like MMAP need
- * their selected event to exist, except if there is only 1 selected event left
- * and it has a compatible sample type.
- */
-static bool ok_to_remove(struct evlist *evlist,
- struct evsel *evsel_to_remove)
-{
- struct evsel *evsel;
- int cnt = 0;
- bool ok = false;
-
- if (!has_tracking(evsel_to_remove))
- return true;
-
- evlist__for_each_entry(evlist, evsel) {
- if (evsel->handler != drop_sample) {
- cnt += 1;
- if ((evsel->core.attr.sample_type & COMPAT_MASK) ==
- (evsel_to_remove->core.attr.sample_type & COMPAT_MASK))
- ok = true;
- }
- }
-
- return ok && cnt == 1;
-}
-
-static void strip_fini(struct perf_inject *inject)
-{
- struct evlist *evlist = inject->session->evlist;
- struct evsel *evsel, *tmp;
-
- /* Remove non-synthesized evsels if possible */
- evlist__for_each_entry_safe(evlist, tmp, evsel) {
- if (evsel->handler == drop_sample &&
- ok_to_remove(evlist, evsel)) {
- pr_debug("Deleting %s\n", perf_evsel__name(evsel));
- evlist__remove(evlist, evsel);
- evsel__delete(evsel);
- }
- }
-}
-
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
@@ -729,8 +706,6 @@ static int __cmd_inject(struct perf_inject *inject)
evlist__remove(session->evlist, evsel);
evsel__delete(evsel);
}
- if (inject->strip)
- strip_fini(inject);
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 9661671cc26e..003c85f5f56c 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -412,8 +412,8 @@ static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
sizeof(key), callcmp);
if (!caller) {
/* found */
- if (node->map)
- addr = map__unmap_ip(node->map, node->ip);
+ if (node->ms.map)
+ addr = map__unmap_ip(node->ms.map, node->ip);
else
addr = node->ip;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 58a9e0989491..577af4f3297a 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -46,6 +46,7 @@
#include <semaphore.h>
#include <signal.h>
#include <math.h>
+#include <perf/mmap.h>
static const char *get_filename_for_perf_kvm(void)
{
@@ -759,14 +760,14 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
*mmap_time = ULLONG_MAX;
md = &evlist->mmap[idx];
- err = perf_mmap__read_init(md);
+ err = perf_mmap__read_init(&md->core);
if (err < 0)
return (err == -EAGAIN) ? 0 : -1;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
if (err) {
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
pr_err("Failed to parse sample\n");
return -1;
}
@@ -776,7 +777,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
@@ -793,7 +794,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
break;
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
return n;
}
@@ -997,7 +998,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
done = perf_kvm__handle_stdin();
if (!rc && !done)
- err = fdarray__poll(fda, 100);
+ err = evlist__poll(kvm->evlist, 100);
}
evlist__disable(kvm->evlist);
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 08e62ae9d37e..965ef017496f 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -26,6 +26,7 @@ int cmd_list(int argc, const char **argv)
int i;
bool raw_dump = false;
bool long_desc_flag = false;
+ bool deprecated = false;
struct option list_options[] = {
OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
OPT_BOOLEAN('d', "desc", &desc_flag,
@@ -34,6 +35,8 @@ int cmd_list(int argc, const char **argv)
"Print longer event descriptions."),
OPT_BOOLEAN(0, "details", &details_flag,
"Print information on the perf event names and expressions used internally by events."),
+ OPT_BOOLEAN(0, "deprecated", &deprecated,
+ "Print deprecated events."),
OPT_INCR(0, "debug", &verbose,
"Enable debugging output"),
OPT_END()
@@ -55,7 +58,7 @@ int cmd_list(int argc, const char **argv)
if (argc == 0) {
print_events(NULL, raw_dump, !desc_flag, long_desc_flag,
- details_flag);
+ details_flag, deprecated);
return 0;
}
@@ -78,7 +81,8 @@ int cmd_list(int argc, const char **argv)
print_hwcache_events(NULL, raw_dump);
else if (strcmp(argv[i], "pmu") == 0)
print_pmu_events(NULL, raw_dump, !desc_flag,
- long_desc_flag, details_flag);
+ long_desc_flag, details_flag,
+ deprecated);
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0)
@@ -91,7 +95,8 @@ int cmd_list(int argc, const char **argv)
if (sep == NULL) {
print_events(argv[i], raw_dump, !desc_flag,
long_desc_flag,
- details_flag);
+ details_flag,
+ deprecated);
continue;
}
sep_idx = sep - argv[i];
@@ -117,7 +122,8 @@ int cmd_list(int argc, const char **argv)
print_hwcache_events(s, raw_dump);
print_pmu_events(s, raw_dump, !desc_flag,
long_desc_flag,
- details_flag);
+ details_flag,
+ deprecated);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
metricgroup__print(true, true, s, raw_dump, details_flag);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 23332861de6e..b5063d3b6fd0 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -55,6 +55,9 @@
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/time64.h>
@@ -91,8 +94,11 @@ struct record {
struct switch_output switch_output;
unsigned long long samples;
cpu_set_t affinity_mask;
+ unsigned long output_max_size; /* = 0: unlimited */
};
+static volatile int done;
+
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
@@ -120,6 +126,12 @@ static bool switch_output_time(struct record *rec)
trigger_is_ready(&switch_output_trigger);
}
+static bool record__output_max_size_exceeded(struct record *rec)
+{
+ return rec->output_max_size &&
+ (rec->bytes_written >= rec->output_max_size);
+}
+
static int record__write(struct record *rec, struct mmap *map __maybe_unused,
void *bf, size_t size)
{
@@ -132,6 +144,13 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
rec->bytes_written += size;
+ if (record__output_max_size_exceeded(rec) && !done) {
+ fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
+ " stopping session ]\n",
+ rec->bytes_written >> 10);
+ done = 1;
+ }
+
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
@@ -197,7 +216,7 @@ static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
* every aio write request started in record__aio_push() so
* decrement it because the request is now complete.
*/
- perf_mmap__put(md);
+ perf_mmap__put(&md->core);
rc = 1;
} else {
/*
@@ -276,7 +295,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
if (record__comp_enabled(aio->rec)) {
size = zstd_compress(aio->rec->session, aio->data + aio->size,
- perf_mmap__mmap_len(map) - aio->size,
+ mmap__mmap_len(map) - aio->size,
buf, size);
} else {
memcpy(aio->data + aio->size, buf, size);
@@ -293,7 +312,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
* after started aio request completion or at record__aio_push()
* if the request failed to start.
*/
- perf_mmap__get(map);
+ perf_mmap__get(&map->core);
}
aio->size += size;
@@ -332,7 +351,7 @@ static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
* map->refcount is decremented in record__aio_complete() after
* aio write operation finishes successfully.
*/
- perf_mmap__put(map);
+ perf_mmap__put(&map->core);
}
return ret;
@@ -488,7 +507,7 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
struct record *rec = to;
if (record__comp_enabled(rec)) {
- size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
+ size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
bf = map->data;
}
@@ -496,7 +515,6 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
return record__write(rec, map, bf, size);
}
-static volatile int done;
static volatile int signr = -1;
static volatile int child_finished;
@@ -537,7 +555,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
size_t padding;
u8 pad[8] = {0};
- if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
+ if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
@@ -662,6 +680,11 @@ static int record__auxtrace_init(struct record *rec)
if (err)
return err;
+ err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
+ rec->opts.auxtrace_sample_opts);
+ if (err)
+ return err;
+
return auxtrace_parse_filters(rec->evlist);
}
@@ -699,10 +722,43 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
#endif
+static bool record__kcore_readable(struct machine *machine)
+{
+ char kcore[PATH_MAX];
+ int fd;
+
+ scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
+
+ fd = open(kcore, O_RDONLY);
+ if (fd < 0)
+ return false;
+
+ close(fd);
+
+ return true;
+}
+
+static int record__kcore_copy(struct machine *machine, struct perf_data *data)
+{
+ char from_dir[PATH_MAX];
+ char kcore_dir[PATH_MAX];
+ int ret;
+
+ snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
+
+ ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
+ if (ret)
+ return ret;
+
+ return kcore_copy(from_dir, kcore_dir);
+}
+
static int record__mmap_evlist(struct record *rec,
struct evlist *evlist)
{
struct record_opts *opts = &rec->opts;
+ bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
+ opts->auxtrace_sample_mode;
char msg[512];
if (opts->affinity != PERF_AFFINITY_SYS)
@@ -710,7 +766,7 @@ static int record__mmap_evlist(struct record *rec,
if (evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode,
+ auxtrace_overwrite,
opts->nr_cblocks, opts->affinity,
opts->mmap_flush, opts->comp_level) < 0) {
if (errno == EPERM) {
@@ -997,6 +1053,7 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
}
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
+ !rec->opts.auxtrace_sample_mode &&
record__auxtrace_mmap_read(rec, map) != 0) {
rc = -1;
goto out;
@@ -1272,6 +1329,15 @@ static int record__synthesize(struct record *rec, bool tail)
if (err)
goto out;
+ /* Synthesize id_index before auxtrace_info */
+ if (rec->opts.auxtrace_sample_mode) {
+ err = perf_event__synthesize_id_index(tool,
+ process_synthesized_event,
+ session->evlist, machine);
+ if (err)
+ goto out;
+ }
+
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
session, process_synthesized_event);
@@ -1383,6 +1449,12 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
session->header.env.comp_type = PERF_COMP_ZSTD;
session->header.env.comp_level = rec->opts.comp_level;
+ if (rec->opts.kcore &&
+ !record__kcore_readable(&session->machines.host)) {
+ pr_err("ERROR: kcore is not readable.\n");
+ return -1;
+ }
+
record__init_features(rec);
if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
@@ -1414,6 +1486,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
+ if (rec->opts.kcore) {
+ err = record__kcore_copy(&session->machines.host, data);
+ if (err) {
+ pr_err("ERROR: Failed to copy kcore\n");
+ goto out_child;
+ }
+ }
+
err = bpf__apply_obj_config();
if (err) {
char errbuf[BUFSIZ];
@@ -1936,6 +2016,33 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
return 0;
}
+static int parse_output_max_size(const struct option *opt,
+ const char *str, int unset)
+{
+ unsigned long *s = (unsigned long *)opt->value;
+ static struct parse_tag tags_size[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+ unsigned long val;
+
+ if (unset) {
+ *s = 0;
+ return 0;
+ }
+
+ val = parse_tag_value(str, tags_size);
+ if (val != (unsigned long) -1) {
+ *s = val;
+ return 0;
+ }
+
+ return -1;
+}
+
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@@ -2058,6 +2165,31 @@ static const char * const __record_usage[] = {
};
const char * const *record_usage = __record_usage;
+static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
+{
+ /*
+ * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
+ * no need to add them twice.
+ */
+ if (!(event->header.misc & PERF_RECORD_MISC_USER))
+ return 0;
+ return perf_event__process_mmap(tool, event, sample, machine);
+}
+
+static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
+{
+ /*
+ * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
+ * no need to add them twice.
+ */
+ if (!(event->header.misc & PERF_RECORD_MISC_USER))
+ return 0;
+
+ return perf_event__process_mmap2(tool, event, sample, machine);
+}
+
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
@@ -2087,8 +2219,8 @@ static struct record record = {
.exit = perf_event__process_exit,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
+ .mmap = build_id__process_mmap,
+ .mmap2 = build_id__process_mmap2,
.ordered_events = true,
},
};
@@ -2184,6 +2316,7 @@ static struct option __record_options[] = {
parse_cgroups),
OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
"ms to wait before starting measurement after program start"),
+ OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
@@ -2213,6 +2346,8 @@ static struct option __record_options[] = {
parse_clockid),
OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
"opts", "AUX area tracing Snapshot Mode", ""),
+ OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
+ "opts", "sample AUX area", ""),
OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
@@ -2262,6 +2397,8 @@ static struct option __record_options[] = {
"n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
record__parse_comp_level),
#endif
+ OPT_CALLBACK(0, "max-size", &record.output_max_size,
+ "size", "Limit the maximum size of the output file", parse_output_max_size),
OPT_END()
};
@@ -2322,6 +2459,9 @@ int cmd_record(int argc, const char **argv)
}
+ if (rec->opts.kcore)
+ rec->data.is_dir = true;
+
if (rec->opts.comp_level != 0) {
pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
rec->no_buildid = true;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index aae0e57c60fb..ab0f6e516b03 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -51,6 +51,7 @@
#include "util/util.h" // perf_tip()
#include "ui/ui.h"
#include "ui/progress.h"
+#include "util/block-info.h"
#include <dlfcn.h>
#include <errno.h>
@@ -96,10 +97,13 @@ struct report {
float min_percent;
u64 nr_entries;
u64 queue_size;
+ u64 total_cycles;
int socket_filter;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
struct branch_type_stat brtype_stat;
bool symbol_ipc;
+ bool total_cycles_mode;
+ struct block_report *block_reports;
};
static int report__config(const char *var, const char *value, void *cb)
@@ -290,9 +294,10 @@ static int process_sample_event(struct perf_tool *tool,
if (al.map != NULL)
al.map->dso->hit = 1;
- if (ui__has_annotation() || rep->symbol_ipc) {
+ if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
hist__account_cycles(sample->branch_stack, &al, sample,
- rep->nonany_branch_mode);
+ rep->nonany_branch_mode,
+ &rep->total_cycles);
}
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
@@ -399,6 +404,13 @@ static int report__setup_sample_type(struct report *rep)
PERF_SAMPLE_BRANCH_ANY))
rep->nonany_branch_mode = true;
+#ifndef HAVE_LIBUNWIND_SUPPORT
+ if (dwarf_callchain_users) {
+ ui__warning("Please install libunwind development packages "
+ "during the perf build.\n");
+ }
+#endif
+
return 0;
}
@@ -473,11 +485,30 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
return ret + fprintf(fp, "\n#\n");
}
+static int perf_evlist__tui_block_hists_browse(struct evlist *evlist,
+ struct report *rep)
+{
+ struct evsel *pos;
+ int i = 0, ret;
+
+ evlist__for_each_entry(evlist, pos) {
+ ret = report__browse_block_hists(&rep->block_reports[i++].hist,
+ rep->min_percent, pos,
+ &rep->session->header.env,
+ &rep->annotation_opts);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int perf_evlist__tty_browse_hists(struct evlist *evlist,
struct report *rep,
const char *help)
{
struct evsel *pos;
+ int i = 0;
if (!quiet) {
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
@@ -493,6 +524,14 @@ static int perf_evlist__tty_browse_hists(struct evlist *evlist,
continue;
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
+
+ if (rep->total_cycles_mode) {
+ report__browse_block_hists(&rep->block_reports[i++].hist,
+ rep->min_percent, pos,
+ NULL, NULL);
+ continue;
+ }
+
hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
!(symbol_conf.use_callchain ||
symbol_conf.show_branchflag_count));
@@ -575,6 +614,11 @@ static int report__browse_hists(struct report *rep)
switch (use_browser) {
case 1:
+ if (rep->total_cycles_mode) {
+ ret = perf_evlist__tui_block_hists_browse(evlist, rep);
+ break;
+ }
+
ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
rep->min_percent,
&session->header.env,
@@ -639,7 +683,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
if (rep->symbol_ipc && sym && !sym->annotate2) {
struct evsel *evsel = hists_to_evsel(he->hists);
- symbol__annotate2(sym, he->ms.map, evsel,
+ symbol__annotate2(&he->ms, evsel,
&annotation__default_options, NULL);
}
@@ -720,11 +764,9 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
{
size_t printed = 0;
- struct rb_node *nd;
-
- for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
- struct map *map = rb_entry(nd, struct map, rb_node);
+ struct map *map;
+ maps__for_each_entry(maps, map) {
printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
indent, "", map->start, map->end,
map->prot & PROT_READ ? 'r' : '-',
@@ -732,7 +774,7 @@ static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
map->prot & PROT_EXEC ? 'x' : '-',
map->flags & MAP_SHARED ? 's' : 'p',
map->pgoff,
- map->ino, map->dso->name);
+ map->dso->id.ino, map->dso->name);
}
return printed;
@@ -920,6 +962,13 @@ static int __cmd_report(struct report *rep)
report__output_resort(rep);
+ if (rep->total_cycles_mode) {
+ rep->block_reports = block_info__create_report(session->evlist,
+ rep->total_cycles);
+ if (!rep->block_reports)
+ return -1;
+ }
+
return report__browse_hists(rep);
}
@@ -1204,6 +1253,8 @@ int cmd_report(int argc, const char **argv)
"Set time quantum for time sort key (default 100ms)",
parse_time_quantum),
OPTS_EVSWITCH(&report.evswitch),
+ OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
+ "Sort all blocks by 'Sampled Cycles%'"),
OPT_END()
};
struct perf_data data = {
@@ -1366,6 +1417,13 @@ repeat:
goto error;
}
+ if (report.total_cycles_mode) {
+ if (sort__mode != SORT_MODE__BRANCH)
+ report.total_cycles_mode = false;
+ else
+ sort_order = NULL;
+ }
+
if (strcmp(input_name, "-") != 0)
setup_browser(true);
else
@@ -1416,7 +1474,8 @@ repeat:
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
- if (ui__has_annotation() || report.symbol_ipc) {
+ if (ui__has_annotation() || report.symbol_ipc ||
+ report.total_cycles_mode) {
ret = symbol__annotation_init();
if (ret < 0)
goto error;
@@ -1477,6 +1536,10 @@ error:
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
zfree(&report.ptime_range);
}
+
+ if (report.block_reports)
+ zfree(&report.block_reports);
+
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
return ret;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5cacc4f84c8d..8a12d71364c3 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2172,7 +2172,7 @@ static void save_task_callchain(struct perf_sched *sched,
if (node == NULL)
break;
- sym = node->sym;
+ sym = node->ms.sym;
if (sym) {
if (!strcmp(sym->name, "schedule") ||
!strcmp(sym->name, "__schedule") ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 67be8d31afab..f86c5cce5b2c 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -3605,11 +3605,6 @@ int cmd_script(int argc, const char **argv)
}
}
- if (script.time_str && reltime) {
- fprintf(stderr, "Don't combine --reltime with --time\n");
- return -1;
- }
-
if (itrace_synth_opts.callchain &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3869,10 +3864,11 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
if (script.time_str) {
- err = perf_time__parse_for_ranges(script.time_str, session,
+ err = perf_time__parse_for_ranges_reltime(script.time_str, session,
&script.ptime_range,
&script.range_size,
- &script.range_num);
+ &script.range_num,
+ reltime);
if (err < 0)
goto out_delete;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 468fc49420ce..0a15253b438c 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -792,6 +792,8 @@ static struct option stat_options[] = {
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
"aggregate counts per thread", AGGR_THREAD),
+ OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
+ "aggregate counts per numa node", AGGR_NODE),
OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
"ms to wait before starting measurement after program start"),
OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
@@ -803,6 +805,12 @@ static struct option stat_options[] = {
OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
"monitor specified metrics or metric groups (separated by ,)",
parse_metric_groups),
+ OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
+ "Configure all used events to run in kernel space.",
+ PARSE_OPT_EXCLUSIVE),
+ OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
+ "Configure all used events to run in user space.",
+ PARSE_OPT_EXCLUSIVE),
OPT_END()
};
@@ -824,6 +832,12 @@ static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
return cpu_map__get_core(map, cpu, NULL);
}
+static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu_map *map, int cpu)
+{
+ return cpu_map__get_node(map, cpu, NULL);
+}
+
static int perf_stat__get_aggr(struct perf_stat_config *config,
aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
{
@@ -858,6 +872,12 @@ static int perf_stat__get_core_cached(struct perf_stat_config *config,
return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
}
+static int perf_stat__get_node_cached(struct perf_stat_config *config,
+ struct perf_cpu_map *map, int idx)
+{
+ return perf_stat__get_aggr(config, perf_stat__get_node, map, idx);
+}
+
static bool term_percore_set(void)
{
struct evsel *counter;
@@ -896,6 +916,13 @@ static int perf_stat_init_aggr_mode(void)
}
stat_config.aggr_get_id = perf_stat__get_core_cached;
break;
+ case AGGR_NODE:
+ if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
+ perror("cannot build core map");
+ return -1;
+ }
+ stat_config.aggr_get_id = perf_stat__get_node_cached;
+ break;
case AGGR_NONE:
if (term_percore_set()) {
if (cpu_map__build_core_map(evsel_list->core.cpus,
@@ -1008,6 +1035,13 @@ static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
return core;
}
+static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data)
+{
+ int cpu = perf_env__get_cpu(data, map, idx);
+
+ return perf_env__numa_node(data, cpu);
+}
+
static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
struct perf_cpu_map **sockp)
{
@@ -1026,6 +1060,12 @@ static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *c
return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
}
+static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus,
+ struct perf_cpu_map **nodep)
+{
+ return cpu_map__build_map(cpus, nodep, perf_env__get_node, env);
+}
+
static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu_map *map, int idx)
{
@@ -1043,6 +1083,12 @@ static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unus
return perf_env__get_core(map, idx, &perf_stat.session->header.env);
}
+static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu_map *map, int idx)
+{
+ return perf_env__get_node(map, idx, &perf_stat.session->header.env);
+}
+
static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
{
struct perf_env *env = &st->session->header.env;
@@ -1069,6 +1115,13 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
}
stat_config.aggr_get_id = perf_stat__get_core_file;
break;
+ case AGGR_NODE:
+ if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
+ perror("cannot build core map");
+ return -1;
+ }
+ stat_config.aggr_get_id = perf_stat__get_node_file;
+ break;
case AGGR_NONE:
case AGGR_GLOBAL:
case AGGR_THREAD:
@@ -1254,6 +1307,7 @@ static int add_default_attributes(void)
if (stat_config.null_run)
return 0;
+ bzero(&errinfo, sizeof(errinfo));
if (transaction_run) {
/* Handle -T as -M transaction. Once platform specific metrics
* support has been added to the json files, all archictures
@@ -1311,6 +1365,7 @@ static int add_default_attributes(void)
return -1;
}
if (err) {
+ parse_events_print_error(&errinfo, smi_cost_attrs);
fprintf(stderr, "Cannot set up SMI cost events\n");
return -1;
}
@@ -1616,6 +1671,8 @@ static int __cmd_report(int argc, const char **argv)
"aggregate counts per processor die", AGGR_DIE),
OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
+ OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
+ "aggregate counts per numa node", AGGR_NODE),
OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_END()
@@ -1890,6 +1947,9 @@ int cmd_stat(int argc, const char **argv)
}
}
+ if (stat_config.aggr_mode == AGGR_NODE)
+ cpu__setup_cpunode_map();
+
if (stat_config.times && interval)
interval_count = true;
else if (stat_config.times && !interval) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1f60124eb19b..dc80044bc46f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -82,6 +82,7 @@
#include <linux/err.h>
#include <linux/ctype.h>
+#include <perf/mmap.h>
static volatile int done;
static volatile int resize;
@@ -142,12 +143,12 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
+ err = symbol__annotate(&he->ms, evsel, 0, &top->annotation_opts, NULL);
if (err == 0) {
top->sym_filter_entry = he;
} else {
char msg[BUFSIZ];
- symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
+ symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg));
pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
}
@@ -256,7 +257,7 @@ static void perf_top__show_details(struct perf_top *top)
printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
- more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
+ more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
if (top->evlist->enabled) {
if (top->zero)
@@ -724,7 +725,8 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
- !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
+ !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
+ NULL);
return 0;
}
@@ -869,10 +871,10 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
union perf_event *event;
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
return;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
int ret;
ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
@@ -883,7 +885,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
if (ret)
break;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (top->qe.rotate) {
pthread_mutex_lock(&top->qe.mutex);
@@ -893,7 +895,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
}
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
static void perf_top__mmap_read(struct perf_top *top)
@@ -1560,6 +1562,17 @@ int cmd_top(int argc, const char **argv)
status = perf_config(perf_top_config, &top);
if (status)
return status;
+ /*
+ * Since the per arch annotation init routine may need the cpuid, read
+ * it here, since we are not getting this from the perf.data header.
+ */
+ status = perf_env__read_cpuid(&perf_env);
+ if (status) {
+ pr_err("Couldn't read the cpuid for this machine: %s\n",
+ str_error_r(errno, errbuf, sizeof(errbuf)));
+ goto out_delete_evlist;
+ }
+ top.evlist->env = &perf_env;
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index bb5130d02155..46a72ecac427 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -77,6 +77,7 @@
#include <sys/sysmacros.h>
#include <linux/ctype.h>
+#include <perf/mmap.h>
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
@@ -86,6 +87,33 @@
# define F_LINUX_SPECIFIC_BASE 1024
#endif
+/*
+ * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
+ */
+struct syscall_arg_fmt {
+ size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
+ bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
+ unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
+ void *parm;
+ const char *name;
+ u16 nr_entries; // for arrays
+ bool show_zero;
+};
+
+struct syscall_fmt {
+ const char *name;
+ const char *alias;
+ struct {
+ const char *sys_enter,
+ *sys_exit;
+ } bpf_prog_name;
+ struct syscall_arg_fmt arg[6];
+ u8 nr_args;
+ bool errpid;
+ bool timeout;
+ bool hexret;
+};
+
struct trace {
struct perf_tool tool;
struct syscalltbl *sctbl;
@@ -147,11 +175,13 @@ struct trace {
bool multiple_threads;
bool summary;
bool summary_only;
+ bool errno_summary;
bool failure_only;
bool show_comm;
bool print_sample;
bool show_tool_stats;
bool trace_syscalls;
+ bool libtraceevent_print;
bool kernel_syscallchains;
s16 args_alignment;
bool show_tstamp;
@@ -162,6 +192,7 @@ struct trace {
bool force;
bool vfs_getname;
int trace_pgfaults;
+ char *perfconfig_events;
struct {
struct ordered_events data;
u64 last;
@@ -254,6 +285,87 @@ struct syscall_tp {
};
};
+/*
+ * The evsel->priv as used by 'perf trace'
+ * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
+ * fmt: for all the other tracepoints
+ */
+struct evsel_trace {
+ struct syscall_tp sc;
+ struct syscall_arg_fmt *fmt;
+};
+
+static struct evsel_trace *evsel_trace__new(void)
+{
+ return zalloc(sizeof(struct evsel_trace));
+}
+
+static void evsel_trace__delete(struct evsel_trace *et)
+{
+ if (et == NULL)
+ return;
+
+ zfree(&et->fmt);
+ free(et);
+}
+
+/*
+ * Used with raw_syscalls:sys_{enter,exit} and with the
+ * syscalls:sys_{enter,exit}_SYSCALL tracepoints
+ */
+static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
+{
+ struct evsel_trace *et = evsel->priv;
+
+ return &et->sc;
+}
+
+static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
+{
+ if (evsel->priv == NULL) {
+ evsel->priv = evsel_trace__new();
+ if (evsel->priv == NULL)
+ return NULL;
+ }
+
+ return __evsel__syscall_tp(evsel);
+}
+
+/*
+ * Used with all the other tracepoints.
+ */
+static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
+{
+ struct evsel_trace *et = evsel->priv;
+
+ return et->fmt;
+}
+
+static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
+{
+ struct evsel_trace *et = evsel->priv;
+
+ if (evsel->priv == NULL) {
+ et = evsel->priv = evsel_trace__new();
+
+ if (et == NULL)
+ return NULL;
+ }
+
+ if (et->fmt == NULL) {
+ et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
+ if (et->fmt == NULL)
+ goto out_delete;
+ }
+
+ return __evsel__syscall_arg_fmt(evsel);
+
+out_delete:
+ evsel_trace__delete(evsel->priv);
+ evsel->priv = NULL;
+ return NULL;
+}
+
static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
struct tp_field *field,
const char *name)
@@ -267,7 +379,7 @@ static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
}
#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
- ({ struct syscall_tp *sc = evsel->priv;\
+ ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
@@ -283,7 +395,7 @@ static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
}
#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
- ({ struct syscall_tp *sc = evsel->priv;\
+ ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
static void evsel__delete_priv(struct evsel *evsel)
@@ -294,73 +406,61 @@ static void evsel__delete_priv(struct evsel *evsel)
static int perf_evsel__init_syscall_tp(struct evsel *evsel)
{
- struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
+ struct syscall_tp *sc = evsel__syscall_tp(evsel);
- if (evsel->priv != NULL) {
+ if (sc != NULL) {
if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
- goto out_delete;
+ return -ENOENT;
return 0;
}
return -ENOMEM;
-out_delete:
- zfree(&evsel->priv);
- return -ENOENT;
}
static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
{
- struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
+ struct syscall_tp *sc = evsel__syscall_tp(evsel);
- if (evsel->priv != NULL) {
+ if (sc != NULL) {
struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
if (syscall_id == NULL)
syscall_id = perf_evsel__field(tp, "__syscall_nr");
- if (syscall_id == NULL)
- goto out_delete;
- if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
- goto out_delete;
+ if (syscall_id == NULL ||
+ __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
+ return -EINVAL;
return 0;
}
return -ENOMEM;
-out_delete:
- zfree(&evsel->priv);
- return -EINVAL;
}
static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
{
- struct syscall_tp *sc = evsel->priv;
+ struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
}
static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
{
- struct syscall_tp *sc = evsel->priv;
+ struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
}
static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
{
- evsel->priv = malloc(sizeof(struct syscall_tp));
- if (evsel->priv != NULL) {
+ if (evsel__syscall_tp(evsel) != NULL) {
if (perf_evsel__init_sc_tp_uint_field(evsel, id))
- goto out_delete;
+ return -ENOENT;
evsel->handler = handler;
return 0;
}
return -ENOMEM;
-
-out_delete:
- zfree(&evsel->priv);
- return -ENOENT;
}
static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
@@ -385,13 +485,27 @@ out_delete:
}
#define perf_evsel__sc_tp_uint(evsel, name, sample) \
- ({ struct syscall_tp *fields = evsel->priv; \
+ ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
fields->name.integer(&fields->name, sample); })
#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
- ({ struct syscall_tp *fields = evsel->priv; \
+ ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
fields->name.pointer(&fields->name, sample); })
+size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
+{
+ int idx = val - sa->offset;
+
+ if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
+ size_t printed = scnprintf(bf, size, intfmt, val);
+ if (show_suffix)
+ printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
+ return printed;
+ }
+
+ return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
+}
+
size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
{
int idx = val - sa->offset;
@@ -421,6 +535,21 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
+bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
+{
+ return strarray__strtoul(arg->parm, bf, size, ret);
+}
+
+bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
+{
+ return strarray__strtoul_flags(arg->parm, bf, size, ret);
+}
+
+bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
+{
+ return strarrays__strtoul(arg->parm, bf, size, ret);
+}
+
size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
{
return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
@@ -448,6 +577,77 @@ size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const
return printed;
}
+bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
+{
+ int i;
+
+ for (i = 0; i < sa->nr_entries; ++i) {
+ if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
+ *ret = sa->offset + i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
+{
+ u64 val = 0;
+ char *tok = bf, *sep, *end;
+
+ *ret = 0;
+
+ while (size != 0) {
+ int toklen = size;
+
+ sep = memchr(tok, '|', size);
+ if (sep != NULL) {
+ size -= sep - tok + 1;
+
+ end = sep - 1;
+ while (end > tok && isspace(*end))
+ --end;
+
+ toklen = end - tok + 1;
+ }
+
+ while (isspace(*tok))
+ ++tok;
+
+ if (isalpha(*tok) || *tok == '_') {
+ if (!strarray__strtoul(sa, tok, toklen, &val))
+ return false;
+ } else {
+ bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X');
+
+ val = strtoul(tok, NULL, is_hexa ? 16 : 0);
+ }
+
+ *ret |= (1 << (val - 1));
+
+ if (sep == NULL)
+ break;
+ tok = sep + 1;
+ }
+
+ return true;
+}
+
+bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
+{
+ int i;
+
+ for (i = 0; i < sas->nr_entries; ++i) {
+ struct strarray *sa = sas->entries[i];
+
+ if (strarray__strtoul(sa, bf, size, ret))
+ return true;
+ }
+
+ return false;
+}
+
size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -499,6 +699,16 @@ size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *ar
return scnprintf(bf, size, "%ld", arg->val);
}
+static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
+{
+ // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
+ // fill missing comms using thread__set_comm()...
+ // here or in a special syscall_arg__scnprintf_pid_sched_tp...
+ return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
+}
+
+#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
+
static const char *bpf_cmd[] = {
"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
"MAP_GET_NEXT_KEY", "PROG_LOAD",
@@ -672,10 +882,12 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
#define STRARRAY(name, array) \
{ .scnprintf = SCA_STRARRAY, \
+ .strtoul = STUL_STRARRAY, \
.parm = &strarray__##array, }
#define STRARRAY_FLAGS(name, array) \
{ .scnprintf = SCA_STRARRAY_FLAGS, \
+ .strtoul = STUL_STRARRAY_FLAGS, \
.parm = &strarray__##array, }
#include "trace/beauty/arch_errno_names.c"
@@ -694,27 +906,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
#include "trace/beauty/socket_type.c"
#include "trace/beauty/waitid_options.c"
-struct syscall_arg_fmt {
- size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
- unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
- void *parm;
- const char *name;
- bool show_zero;
-};
-
-static struct syscall_fmt {
- const char *name;
- const char *alias;
- struct {
- const char *sys_enter,
- *sys_exit;
- } bpf_prog_name;
- struct syscall_arg_fmt arg[6];
- u8 nr_args;
- bool errpid;
- bool timeout;
- bool hexret;
-} syscall_fmts[] = {
+static struct syscall_fmt syscall_fmts[] = {
{ .name = "access",
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
{ .name = "arch_prctl",
@@ -751,7 +943,8 @@ static struct syscall_fmt {
{ .name = "fchownat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "fcntl",
- .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
+ .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
+ .strtoul = STUL_STRARRAYS,
.parm = &strarrays__fcntl_cmds_arrays,
.show_zero = true, },
[2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
@@ -822,7 +1015,9 @@ static struct syscall_fmt {
.alias = "old_mmap",
#endif
.arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
- [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ },
+ [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
+ .strtoul = STUL_STRARRAY_FLAGS,
+ .parm = &strarray__mmap_flags, },
[5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
{ .name = "mount",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
@@ -964,24 +1159,35 @@ static int syscall_fmt__cmp(const void *name, const void *fmtp)
return strcmp(name, fmt->name);
}
+static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
+{
+ return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
+}
+
static struct syscall_fmt *syscall_fmt__find(const char *name)
{
const int nmemb = ARRAY_SIZE(syscall_fmts);
- return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
+ return __syscall_fmt__find(syscall_fmts, nmemb, name);
}
-static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
+static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
{
- int i, nmemb = ARRAY_SIZE(syscall_fmts);
+ int i;
for (i = 0; i < nmemb; ++i) {
- if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
- return &syscall_fmts[i];
+ if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
+ return &fmts[i];
}
return NULL;
}
+static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
+{
+ const int nmemb = ARRAY_SIZE(syscall_fmts);
+ return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
+}
+
/*
* is_exit: is this "exit" or "exit_group"?
* is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
@@ -1453,15 +1659,39 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
return 0;
}
-static int syscall__set_arg_fmts(struct syscall *sc)
+static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
+ { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
+ { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
+};
+
+static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
+{
+ const struct syscall_arg_fmt *fmt = fmtp;
+ return strcmp(name, fmt->name);
+}
+
+static struct syscall_arg_fmt *
+__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
+{
+ return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
+}
+
+static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
{
- struct tep_format_field *field, *last_field = NULL;
- int idx = 0, len;
+ const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
+ return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
+}
- for (field = sc->args; field; field = field->next, ++idx) {
+static struct tep_format_field *
+syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
+{
+ struct tep_format_field *last_field = NULL;
+ int len;
+
+ for (; field; field = field->next, ++arg) {
last_field = field;
- if (sc->fmt && sc->fmt->arg[idx].scnprintf)
+ if (arg->scnprintf)
continue;
len = strlen(field->name);
@@ -1469,14 +1699,17 @@ static int syscall__set_arg_fmts(struct syscall *sc)
if (strcmp(field->type, "const char *") == 0 &&
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
strstr(field->name, "path") != NULL))
- sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
+ arg->scnprintf = SCA_FILENAME;
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
- sc->arg_fmt[idx].scnprintf = SCA_PTR;
+ arg->scnprintf = SCA_PTR;
else if (strcmp(field->type, "pid_t") == 0)
- sc->arg_fmt[idx].scnprintf = SCA_PID;
+ arg->scnprintf = SCA_PID;
else if (strcmp(field->type, "umode_t") == 0)
- sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
- else if ((strcmp(field->type, "int") == 0 ||
+ arg->scnprintf = SCA_MODE_T;
+ else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
+ arg->scnprintf = SCA_CHAR_ARRAY;
+ arg->nr_entries = field->arraylen;
+ } else if ((strcmp(field->type, "int") == 0 ||
strcmp(field->type, "unsigned int") == 0 ||
strcmp(field->type, "long") == 0) &&
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
@@ -1487,10 +1720,24 @@ static int syscall__set_arg_fmts(struct syscall *sc)
* 23 unsigned int
* 7 unsigned long
*/
- sc->arg_fmt[idx].scnprintf = SCA_FD;
+ arg->scnprintf = SCA_FD;
+ } else {
+ struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
+
+ if (fmt) {
+ arg->scnprintf = fmt->scnprintf;
+ arg->strtoul = fmt->strtoul;
+ }
}
}
+ return last_field;
+}
+
+static int syscall__set_arg_fmts(struct syscall *sc)
+{
+ struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
+
if (last_field)
sc->args_size = last_field->offset + last_field->size;
@@ -1552,6 +1799,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
return syscall__set_arg_fmts(sc);
}
+static int perf_evsel__init_tp_arg_scnprintf(struct evsel *evsel)
+{
+ struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
+
+ if (fmt != NULL) {
+ syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
static int intcmp(const void *a, const void *b)
{
const int *one = a, *another = b;
@@ -1680,22 +1939,22 @@ static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
* as mount 'flags' argument that needs ignoring some magic flag, see comment
* in tools/perf/trace/beauty/mount_flags.c
*/
-static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
+static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
{
- if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
- return sc->arg_fmt[arg->idx].mask_val(arg, val);
+ if (fmt && fmt->mask_val)
+ return fmt->mask_val(arg, val);
return val;
}
-static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
- struct syscall_arg *arg, unsigned long val)
+static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
+ struct syscall_arg *arg, unsigned long val)
{
- if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
+ if (fmt && fmt->scnprintf) {
arg->val = val;
- if (sc->arg_fmt[arg->idx].parm)
- arg->parm = sc->arg_fmt[arg->idx].parm;
- return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
+ if (fmt->parm)
+ arg->parm = fmt->parm;
+ return fmt->scnprintf(bf, size, arg);
}
return scnprintf(bf, size, "%ld", val);
}
@@ -1736,12 +1995,13 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
if (arg.mask & bit)
continue;
+ arg.fmt = &sc->arg_fmt[arg.idx];
val = syscall_arg__val(&arg, arg.idx);
/*
* Some syscall args need some mask, most don't and
* return val untouched.
*/
- val = syscall__mask_val(sc, &arg, val);
+ val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
/*
* Suppress this argument if its value is zero and
@@ -1762,7 +2022,8 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
if (trace->show_arg_names)
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
- printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
+ printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
+ bf + printed, size - printed, &arg, val);
}
} else if (IS_ERR(sc->tp_format)) {
/*
@@ -1777,7 +2038,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
if (printed)
printed += scnprintf(bf + printed, size - printed, ", ");
printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
- printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
+ printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
next_arg:
++arg.idx;
bit <<= 1;
@@ -1844,11 +2105,18 @@ out_cant_read:
return NULL;
}
-static void thread__update_stats(struct thread_trace *ttrace,
- int id, struct perf_sample *sample)
+struct syscall_stats {
+ struct stats stats;
+ u64 nr_failures;
+ int max_errno;
+ u32 *errnos;
+};
+
+static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
+ int id, struct perf_sample *sample, long err, bool errno_summary)
{
struct int_node *inode;
- struct stats *stats;
+ struct syscall_stats *stats;
u64 duration = 0;
inode = intlist__findnew(ttrace->syscall_stats, id);
@@ -1857,17 +2125,46 @@ static void thread__update_stats(struct thread_trace *ttrace,
stats = inode->priv;
if (stats == NULL) {
- stats = malloc(sizeof(struct stats));
+ stats = malloc(sizeof(*stats));
if (stats == NULL)
return;
- init_stats(stats);
+
+ stats->nr_failures = 0;
+ stats->max_errno = 0;
+ stats->errnos = NULL;
+ init_stats(&stats->stats);
inode->priv = stats;
}
if (ttrace->entry_time && sample->time > ttrace->entry_time)
duration = sample->time - ttrace->entry_time;
- update_stats(stats, duration);
+ update_stats(&stats->stats, duration);
+
+ if (err < 0) {
+ ++stats->nr_failures;
+
+ if (!errno_summary)
+ return;
+
+ err = -err;
+ if (err > stats->max_errno) {
+ u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
+
+ if (new_errnos) {
+ memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
+ } else {
+ pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
+ thread__comm_str(thread), thread->pid_, thread->tid);
+ return;
+ }
+
+ stats->errnos = new_errnos;
+ stats->max_errno = err;
+ }
+
+ ++stats->errnos[err - 1];
+ }
}
static int trace__printf_interrupted_entry(struct trace *trace)
@@ -2112,11 +2409,11 @@ static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
trace__fprintf_sample(trace, evsel, sample, thread);
- if (trace->summary)
- thread__update_stats(ttrace, id, sample);
-
ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
+ if (trace->summary)
+ thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
+
if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
trace__set_fd_pathname(thread, ret, ttrace->filename.name);
ttrace->filename.pending_open = false;
@@ -2346,6 +2643,80 @@ static void bpf_output__fprintf(struct trace *trace,
++trace->nr_events_printed;
}
+static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
+ struct thread *thread, void *augmented_args, int augmented_args_size)
+{
+ char bf[2048];
+ size_t size = sizeof(bf);
+ struct tep_format_field *field = evsel->tp_format->format.fields;
+ struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
+ size_t printed = 0;
+ unsigned long val;
+ u8 bit = 1;
+ struct syscall_arg syscall_arg = {
+ .augmented = {
+ .size = augmented_args_size,
+ .args = augmented_args,
+ },
+ .idx = 0,
+ .mask = 0,
+ .trace = trace,
+ .thread = thread,
+ .show_string_prefix = trace->show_string_prefix,
+ };
+
+ for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
+ if (syscall_arg.mask & bit)
+ continue;
+
+ syscall_arg.len = 0;
+ syscall_arg.fmt = arg;
+ if (field->flags & TEP_FIELD_IS_ARRAY) {
+ int offset = field->offset;
+
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
+ offset = format_field__intval(field, sample, evsel->needs_swap);
+ syscall_arg.len = offset >> 16;
+ offset &= 0xffff;
+ }
+
+ val = (uintptr_t)(sample->raw_data + offset);
+ } else
+ val = format_field__intval(field, sample, evsel->needs_swap);
+ /*
+ * Some syscall args need some mask, most don't and
+ * return val untouched.
+ */
+ val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
+
+ /*
+ * Suppress this argument if its value is zero and
+ * and we don't have a string associated in an
+ * strarray for it.
+ */
+ if (val == 0 &&
+ !trace->show_zeros &&
+ !((arg->show_zero ||
+ arg->scnprintf == SCA_STRARRAY ||
+ arg->scnprintf == SCA_STRARRAYS) &&
+ arg->parm))
+ continue;
+
+ printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
+
+ /*
+ * XXX Perhaps we should have a show_tp_arg_names,
+ * leaving show_arg_names just for syscalls?
+ */
+ if (1 || trace->show_arg_names)
+ printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
+
+ printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
+ }
+
+ return printed + fprintf(trace->output, "%s", bf);
+}
+
static int trace__event_handler(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -2399,32 +2770,37 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
*/
}
- fprintf(trace->output, "%s:", evsel->name);
+ fprintf(trace->output, "%s(", evsel->name);
if (perf_evsel__is_bpf_output(evsel)) {
bpf_output__fprintf(trace, sample);
} else if (evsel->tp_format) {
if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
trace__fprintf_sys_enter(trace, evsel, sample)) {
- event_format__fprintf(evsel->tp_format, sample->cpu,
- sample->raw_data, sample->raw_size,
- trace->output);
- ++trace->nr_events_printed;
-
- if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
- evsel__disable(evsel);
- evsel__close(evsel);
+ if (trace->libtraceevent_print) {
+ event_format__fprintf(evsel->tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size,
+ trace->output);
+ } else {
+ trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
}
}
}
newline:
- fprintf(trace->output, "\n");
+ fprintf(trace->output, ")\n");
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+
+ ++trace->nr_events_printed;
+
+ if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
+ evsel__disable(evsel);
+ evsel__close(evsel);
+ }
out:
thread__put(thread);
return 0;
@@ -2576,21 +2952,23 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
"-m", "1024",
"-c", "1",
};
-
+ pid_t pid = getpid();
+ char *filter = asprintf__tp_filter_pids(1, &pid);
const char * const sc_args[] = { "-e", };
unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
const char * const majpf_args[] = { "-e", "major-faults" };
unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
const char * const minpf_args[] = { "-e", "minor-faults" };
unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
+ int err = -1;
- /* +1 is for the event string below */
- rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
+ /* +3 is for the event string below and the pid filter */
+ rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
majpf_args_nr + minpf_args_nr + argc;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
- if (rec_argv == NULL)
- return -ENOMEM;
+ if (rec_argv == NULL || filter == NULL)
+ goto out_free;
j = 0;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
@@ -2607,11 +2985,13 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
else {
pr_err("Neither raw_syscalls nor syscalls events exist.\n");
- free(rec_argv);
- return -1;
+ goto out_free;
}
}
+ rec_argv[j++] = "--filter";
+ rec_argv[j++] = filter;
+
if (trace->trace_pgfaults & TRACE_PFMAJ)
for (i = 0; i < majpf_args_nr; i++)
rec_argv[j++] = majpf_args[i];
@@ -2623,7 +3003,11 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
for (i = 0; i < (unsigned int)argc; i++)
rec_argv[j++] = argv[i];
- return cmd_record(j, rec_argv);
+ err = cmd_record(j, rec_argv);
+out_free:
+ free(filter);
+ free(rec_argv);
+ return err;
}
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
@@ -2632,11 +3016,18 @@ static bool evlist__add_vfs_getname(struct evlist *evlist)
{
bool found = false;
struct evsel *evsel, *tmp;
- struct parse_events_error err = { .idx = 0, };
- int ret = parse_events(evlist, "probe:vfs_getname*", &err);
+ struct parse_events_error err;
+ int ret;
- if (ret)
+ bzero(&err, sizeof(err));
+ ret = parse_events(evlist, "probe:vfs_getname*", &err);
+ if (ret) {
+ free(err.str);
+ free(err.help);
+ free(err.first_str);
+ free(err.first_help);
return false;
+ }
evlist__for_each_entry_safe(evlist, evsel, tmp) {
if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
@@ -3103,7 +3494,27 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
return err;
}
-#else
+
+static void trace__delete_augmented_syscalls(struct trace *trace)
+{
+ struct evsel *evsel, *tmp;
+
+ evlist__remove(trace->evlist, trace->syscalls.events.augmented);
+ evsel__delete(trace->syscalls.events.augmented);
+ trace->syscalls.events.augmented = NULL;
+
+ evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
+ if (evsel->bpf_obj == trace->bpf_obj) {
+ evlist__remove(trace->evlist, evsel);
+ evsel__delete(evsel);
+ }
+
+ }
+
+ bpf_object__close(trace->bpf_obj);
+ trace->bpf_obj = NULL;
+}
+#else // HAVE_LIBBPF_SUPPORT
static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
{
return 0;
@@ -3124,8 +3535,27 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_
{
return 0;
}
+
+static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
+{
+}
#endif // HAVE_LIBBPF_SUPPORT
+static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(trace->evlist, evsel) {
+ if (evsel == trace->syscalls.events.augmented ||
+ evsel->bpf_obj == trace->bpf_obj)
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
if (trace->syscalls.map)
@@ -3175,7 +3605,7 @@ static int trace__set_filter_loop_pids(struct trace *trace)
thread = parent;
}
- err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
+ err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
if (!err && trace->filter_pids.map)
err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
@@ -3192,8 +3622,8 @@ static int trace__set_filter_pids(struct trace *trace)
* we fork the workload in perf_evlist__prepare_workload.
*/
if (trace->filter_pids.nr > 0) {
- err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
- trace->filter_pids.entries);
+ err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
+ trace->filter_pids.entries);
if (!err && trace->filter_pids.map) {
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
trace->filter_pids.entries);
@@ -3263,6 +3693,137 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
return __trace__deliver_event(trace, event->event);
}
+static struct syscall_arg_fmt *perf_evsel__syscall_arg_fmt(struct evsel *evsel, char *arg)
+{
+ struct tep_format_field *field;
+ struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
+
+ if (evsel->tp_format == NULL || fmt == NULL)
+ return NULL;
+
+ for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
+ if (strcmp(field->name, arg) == 0)
+ return fmt;
+
+ return NULL;
+}
+
+static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
+{
+ char *tok, *left = evsel->filter, *new_filter = evsel->filter;
+
+ while ((tok = strpbrk(left, "=<>!")) != NULL) {
+ char *right = tok + 1, *right_end;
+
+ if (*right == '=')
+ ++right;
+
+ while (isspace(*right))
+ ++right;
+
+ if (*right == '\0')
+ break;
+
+ while (!isalpha(*left))
+ if (++left == tok) {
+ /*
+ * Bail out, can't find the name of the argument that is being
+ * used in the filter, let it try to set this filter, will fail later.
+ */
+ return 0;
+ }
+
+ right_end = right + 1;
+ while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
+ ++right_end;
+
+ if (isalpha(*right)) {
+ struct syscall_arg_fmt *fmt;
+ int left_size = tok - left,
+ right_size = right_end - right;
+ char arg[128];
+
+ while (isspace(left[left_size - 1]))
+ --left_size;
+
+ scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
+
+ fmt = perf_evsel__syscall_arg_fmt(evsel, arg);
+ if (fmt == NULL) {
+ pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
+ arg, evsel->name, evsel->filter);
+ return -1;
+ }
+
+ pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
+ arg, (int)(right - tok), tok, right_size, right);
+
+ if (fmt->strtoul) {
+ u64 val;
+ struct syscall_arg syscall_arg = {
+ .parm = fmt->parm,
+ };
+
+ if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
+ char *n, expansion[19];
+ int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
+ int expansion_offset = right - new_filter;
+
+ pr_debug("%s", expansion);
+
+ if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
+ pr_debug(" out of memory!\n");
+ free(new_filter);
+ return -1;
+ }
+ if (new_filter != evsel->filter)
+ free(new_filter);
+ left = n + expansion_offset + expansion_lenght;
+ new_filter = n;
+ } else {
+ pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
+ right_size, right, arg, evsel->name, evsel->filter);
+ return -1;
+ }
+ } else {
+ pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
+ arg, evsel->name, evsel->filter);
+ return -1;
+ }
+
+ pr_debug("\n");
+ } else {
+ left = right_end;
+ }
+ }
+
+ if (new_filter != evsel->filter) {
+ pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
+ perf_evsel__set_filter(evsel, new_filter);
+ free(new_filter);
+ }
+
+ return 0;
+}
+
+static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
+{
+ struct evlist *evlist = trace->evlist;
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->filter == NULL)
+ continue;
+
+ if (trace__expand_filter(trace, evsel)) {
+ *err_evsel = evsel;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct evlist *evlist = trace->evlist;
@@ -3302,7 +3863,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
trace__sched_stat_runtime))
goto out_error_sched_stat_runtime;
-
/*
* If a global cgroup was set, apply it to all the events without an
* explicit cgroup. I.e.:
@@ -3405,6 +3965,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
*/
trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
+ err = trace__expand_filters(trace, &evsel);
+ if (err)
+ goto out_delete_evlist;
err = perf_evlist__apply_filters(evlist, &evsel);
if (err < 0)
goto out_error_apply_filters;
@@ -3450,17 +4013,17 @@ again:
struct mmap *md;
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
++trace->nr_events;
err = trace__deliver_event(trace, event);
if (err)
goto out_disable;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (interrupted)
goto out_disable;
@@ -3470,7 +4033,7 @@ again:
draining = true;
}
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
if (trace->nr_events == before) {
@@ -3665,17 +4228,17 @@ static size_t trace__fprintf_threads_header(FILE *fp)
}
DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
- struct stats *stats;
- double msecs;
- int syscall;
+ struct syscall_stats *stats;
+ double msecs;
+ int syscall;
)
{
struct int_node *source = rb_entry(nd, struct int_node, rb_node);
- struct stats *stats = source->priv;
+ struct syscall_stats *stats = source->priv;
entry->syscall = source->i;
entry->stats = stats;
- entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
+ entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
}
static size_t thread__dump_stats(struct thread_trace *ttrace,
@@ -3691,27 +4254,37 @@ static size_t thread__dump_stats(struct thread_trace *ttrace,
printed += fprintf(fp, "\n");
- printed += fprintf(fp, " syscall calls total min avg max stddev\n");
- printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
- printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
+ printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
+ printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
+ printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
resort_rb__for_each_entry(nd, syscall_stats) {
- struct stats *stats = syscall_stats_entry->stats;
+ struct syscall_stats *stats = syscall_stats_entry->stats;
if (stats) {
- double min = (double)(stats->min) / NSEC_PER_MSEC;
- double max = (double)(stats->max) / NSEC_PER_MSEC;
- double avg = avg_stats(stats);
+ double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
+ double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
+ double avg = avg_stats(&stats->stats);
double pct;
- u64 n = (u64) stats->n;
+ u64 n = (u64)stats->stats.n;
- pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
+ pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
avg /= NSEC_PER_MSEC;
sc = &trace->syscalls.table[syscall_stats_entry->syscall];
printed += fprintf(fp, " %-15s", sc->name);
- printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
- n, syscall_stats_entry->msecs, min, avg);
+ printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
+ n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
+
+ if (trace->errno_summary && stats->nr_failures) {
+ const char *arch_name = perf_env__arch(trace->host->env);
+ int e;
+
+ for (e = 0; e < stats->max_errno; ++e) {
+ if (stats->errnos[e] != 0)
+ fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
+ }
+ }
}
}
@@ -3858,12 +4431,33 @@ static int parse_pagefaults(const struct option *opt, const char *str,
return 0;
}
-static void evlist__set_evsel_handler(struct evlist *evlist, void *handler)
+static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
{
struct evsel *evsel;
- evlist__for_each_entry(evlist, evsel)
- evsel->handler = handler;
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->handler == NULL)
+ evsel->handler = handler;
+ }
+}
+
+static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
+{
+ struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
+
+ if (fmt) {
+ struct syscall_fmt *scfmt = syscall_fmt__find(name);
+
+ if (scfmt) {
+ int skip = 0;
+
+ if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
+ strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
+ ++skip;
+
+ memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
+ }
+ }
}
static int evlist__set_syscall_tp_fields(struct evlist *evlist)
@@ -3874,22 +4468,28 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist)
if (evsel->priv || !evsel->tp_format)
continue;
- if (strcmp(evsel->tp_format->system, "syscalls"))
+ if (strcmp(evsel->tp_format->system, "syscalls")) {
+ perf_evsel__init_tp_arg_scnprintf(evsel);
continue;
+ }
if (perf_evsel__init_syscall_tp(evsel))
return -1;
if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
- struct syscall_tp *sc = evsel->priv;
+ struct syscall_tp *sc = __evsel__syscall_tp(evsel);
if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
return -1;
+
+ evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
- struct syscall_tp *sc = evsel->priv;
+ struct syscall_tp *sc = __evsel__syscall_tp(evsel);
if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
return -1;
+
+ evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
}
}
@@ -4029,15 +4629,11 @@ static int trace__config(const char *var, const char *value, void *arg)
int err = 0;
if (!strcmp(var, "trace.add_events")) {
- struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
- "event selector. use 'perf list' to list available events",
- parse_events_option);
- /*
- * We can't propagate parse_event_option() return, as it is 1
- * for failure while perf_config() expects -1.
- */
- if (parse_events_option(&o, value, 0))
- err = -1;
+ trace->perfconfig_events = strdup(value);
+ if (trace->perfconfig_events == NULL) {
+ pr_err("Not enough memory for %s\n", "trace.add_events");
+ return -1;
+ }
} else if (!strcmp(var, "trace.show_timestamp")) {
trace->show_tstamp = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.show_duration")) {
@@ -4061,6 +4657,11 @@ static int trace__config(const char *var, const char *value, void *arg)
int args_alignment = 0;
if (perf_config_int(&args_alignment, var, value) == 0)
trace->args_alignment = args_alignment;
+ } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
+ if (strcasecmp(value, "libtraceevent") == 0)
+ trace->libtraceevent_print = true;
+ else if (strcasecmp(value, "libbeauty") == 0)
+ trace->libtraceevent_print = false;
}
out:
return err;
@@ -4103,6 +4704,8 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK('e', "event", &trace, "event",
"event/syscall selector. use 'perf list' to list available events",
trace__parse_events_option),
+ OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
+ "event filter", parse_filter),
OPT_BOOLEAN(0, "comm", &trace.show_comm,
"show the thread COMM next to its id"),
OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
@@ -4143,6 +4746,8 @@ int cmd_trace(int argc, const char **argv)
"Show only syscall summary with statistics"),
OPT_BOOLEAN('S', "with-summary", &trace.summary,
"Show all syscalls and summary with statistics"),
+ OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
+ "Show errno stats per syscall, use with -s or -S"),
OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
"Trace pagefaults", parse_pagefaults, "maj"),
OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
@@ -4150,6 +4755,8 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "call-graph", &trace.opts,
"record_mode[,record_size]", record_callchain_help,
&record_parse_callchain_opt),
+ OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
+ "Use libtraceevent to print the tracepoint arguments."),
OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
"Show the kernel callchains on the syscall exit path"),
OPT_ULONG(0, "max-events", &trace.max_events,
@@ -4210,6 +4817,38 @@ int cmd_trace(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+ /*
+ * Here we already passed thru trace__parse_events_option() and it has
+ * already figured out if -e syscall_name, if not but if --event
+ * foo:bar was used, the user is interested _just_ in those, say,
+ * tracepoint events, not in the strace-like syscall-name-based mode.
+ *
+ * This is important because we need to check if strace-like mode is
+ * needed to decided if we should filter out the eBPF
+ * __augmented_syscalls__ code, if it is in the mix, say, via
+ * .perfconfig trace.add_events, and filter those out.
+ */
+ if (!trace.trace_syscalls && !trace.trace_pgfaults &&
+ trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
+ trace.trace_syscalls = true;
+ }
+ /*
+ * Now that we have --verbose figured out, lets see if we need to parse
+ * events from .perfconfig, so that if those events fail parsing, say some
+ * BPF program fails, then we'll be able to use --verbose to see what went
+ * wrong in more detail.
+ */
+ if (trace.perfconfig_events != NULL) {
+ struct parse_events_error parse_err;
+
+ bzero(&parse_err, sizeof(parse_err));
+ err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
+ if (err) {
+ parse_events_print_error(&parse_err, trace.perfconfig_events);
+ goto out;
+ }
+ }
+
if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
usage_with_options_msg(trace_usage, trace_options,
"cgroup monitoring only available in system-wide mode");
@@ -4238,9 +4877,45 @@ int cmd_trace(int argc, const char **argv)
trace.bpf_obj = evsel->bpf_obj;
- trace__set_bpf_map_filtered_pids(&trace);
- trace__set_bpf_map_syscalls(&trace);
- trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
+ /*
+ * If we have _just_ the augmenter event but don't have a
+ * explicit --syscalls, then assume we want all strace-like
+ * syscalls:
+ */
+ if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
+ trace.trace_syscalls = true;
+ /*
+ * So, if we have a syscall augmenter, but trace_syscalls, aka
+ * strace-like syscall tracing is not set, then we need to trow
+ * away the augmenter, i.e. all the events that were created
+ * from that BPF object file.
+ *
+ * This is more to fix the current .perfconfig trace.add_events
+ * style of setting up the strace-like eBPF based syscall point
+ * payload augmenter.
+ *
+ * All this complexity will be avoided by adding an alternative
+ * to trace.add_events in the form of
+ * trace.bpf_augmented_syscalls, that will be only parsed if we
+ * need it.
+ *
+ * .perfconfig trace.add_events is still useful if we want, for
+ * instance, have msr_write.msr in some .perfconfig profile based
+ * 'perf trace --config determinism.profile' mode, where for some
+ * particular goal/workload type we want a set of events and
+ * output mode (with timings, etc) instead of having to add
+ * all via the command line.
+ *
+ * Also --config to specify an alternate .perfconfig file needs
+ * to be implemented.
+ */
+ if (!trace.trace_syscalls) {
+ trace__delete_augmented_syscalls(&trace);
+ } else {
+ trace__set_bpf_map_filtered_pids(&trace);
+ trace__set_bpf_map_syscalls(&trace);
+ trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
+ }
}
err = bpf__setup_stdout(trace.evlist);
@@ -4287,7 +4962,7 @@ int cmd_trace(int argc, const char **argv)
}
if (trace.evlist->core.nr_entries > 0) {
- evlist__set_evsel_handler(trace.evlist, trace__event_handler);
+ evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
if (evlist__set_syscall_tp_fields(trace.evlist)) {
perror("failed to set syscalls:* tracepoint fields");
goto out;
@@ -4348,7 +5023,7 @@ int cmd_trace(int argc, const char **argv)
init_augmented_syscall_tp:
if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
goto out;
- sc = evsel->priv;
+ sc = __evsel__syscall_tp(evsel);
/*
* For now with BPF raw_augmented we hook into
* raw_syscalls:sys_enter and there we get all
@@ -4379,15 +5054,14 @@ init_augmented_syscall_tp:
if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
return trace__record(&trace, argc-1, &argv[1]);
+ /* Using just --errno-summary will trigger --summary */
+ if (trace.errno_summary && !trace.summary && !trace.summary_only)
+ trace.summary_only = true;
+
/* summary_only implies summary option, but don't overwrite summary if set */
if (trace.summary_only)
trace.summary = trace.summary_only;
- if (!trace.trace_syscalls && !trace.trace_pgfaults &&
- trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
- trace.trace_syscalls = true;
- }
-
if (output_name != NULL) {
err = trace__open_output(&trace, output_name);
if (err < 0) {
@@ -4426,5 +5100,6 @@ out_close:
if (output_name != NULL)
fclose(trace.output);
out:
+ zfree(&trace.perfconfig_events);
return err;
}
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index cea13cb987d0..a1dc16724352 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -28,6 +28,9 @@ arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/required-features.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/emulate_prefix.h
+arch/x86/include/asm/irq_vectors.h
+arch/x86/include/asm/msr-index.h
arch/x86/include/uapi/asm/prctl.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
@@ -116,7 +119,7 @@ check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B
check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"'
+check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
# diff non-symmetric files
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
diff --git a/tools/perf/lib/Build b/tools/perf/lib/Build
index c31f1c111f8f..2ef9a4ec6d99 100644
--- a/tools/perf/lib/Build
+++ b/tools/perf/lib/Build
@@ -3,6 +3,7 @@ libperf-y += cpumap.o
libperf-y += threadmap.o
libperf-y += evsel.o
libperf-y += evlist.o
+libperf-y += mmap.o
libperf-y += zalloc.o
libperf-y += xyarray.o
libperf-y += lib.o
diff --git a/tools/perf/lib/Makefile b/tools/perf/lib/Makefile
index 85ccb8c439a4..0f233638ef1f 100644
--- a/tools/perf/lib/Makefile
+++ b/tools/perf/lib/Makefile
@@ -107,6 +107,7 @@ else
endif
LIBAPI = $(API_PATH)libapi.a
+export LIBAPI
$(LIBAPI): FORCE
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
@@ -172,8 +173,9 @@ install_headers:
$(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
$(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
$(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644);
- $(call do_install,include/perf/event.h,$(prefix)/include/perf,644);
+ $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); \
+ $(call do_install,include/perf/event.h,$(prefix)/include/perf,644); \
+ $(call do_install,include/perf/mmap.h,$(prefix)/include/perf,644);
install_pkgconfig: $(LIBPERF_PC)
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
diff --git a/tools/perf/lib/core.c b/tools/perf/lib/core.c
index d0b9ae422b9f..58fc894b76c5 100644
--- a/tools/perf/lib/core.c
+++ b/tools/perf/lib/core.c
@@ -5,11 +5,12 @@
#include <stdio.h>
#include <stdarg.h>
#include <unistd.h>
+#include <linux/compiler.h>
#include <perf/core.h>
#include <internal/lib.h>
#include "internal.h"
-static int __base_pr(enum libperf_print_level level, const char *format,
+static int __base_pr(enum libperf_print_level level __maybe_unused, const char *format,
va_list args)
{
return vfprintf(stderr, format, args);
diff --git a/tools/perf/lib/evlist.c b/tools/perf/lib/evlist.c
index d1496fee810c..205ddbb80bc1 100644
--- a/tools/perf/lib/evlist.c
+++ b/tools/perf/lib/evlist.c
@@ -8,13 +8,20 @@
#include <internal/evlist.h>
#include <internal/evsel.h>
#include <internal/xyarray.h>
+#include <internal/mmap.h>
+#include <internal/cpumap.h>
+#include <internal/threadmap.h>
+#include <internal/xyarray.h>
+#include <internal/lib.h>
#include <linux/zalloc.h>
+#include <sys/ioctl.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <signal.h>
#include <poll.h>
+#include <sys/mman.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <api/fd/array.h>
@@ -27,6 +34,7 @@ void perf_evlist__init(struct perf_evlist *evlist)
INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
evlist->nr_entries = 0;
+ fdarray__init(&evlist->pollfd, 64);
}
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
@@ -101,8 +109,36 @@ perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
return next;
}
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *n;
+
+ perf_evlist__for_each_entry_safe(evlist, n, pos) {
+ list_del_init(&pos->node);
+ perf_evsel__delete(pos);
+ }
+
+ evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+ perf_cpu_map__put(evlist->cpus);
+ perf_thread_map__put(evlist->threads);
+ evlist->cpus = NULL;
+ evlist->threads = NULL;
+ fdarray__exit(&evlist->pollfd);
+}
+
void perf_evlist__delete(struct perf_evlist *evlist)
{
+ if (evlist == NULL)
+ return;
+
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ perf_evlist__purge(evlist);
+ perf_evlist__exit(evlist);
free(evlist);
}
@@ -277,7 +313,328 @@ int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
return pos;
}
+static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
+ void *arg __maybe_unused)
+{
+ struct perf_mmap *map = fda->priv[fd].ptr;
+
+ if (map)
+ perf_mmap__put(map);
+}
+
+int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
+{
+ return fdarray__filter(&evlist->pollfd, revents_and_mask,
+ perf_evlist__munmap_filtered, NULL);
+}
+
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
{
return fdarray__poll(&evlist->pollfd, timeout);
}
+
+static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
+{
+ int i;
+ struct perf_mmap *map;
+
+ map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ if (!map)
+ return NULL;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *prev = i ? &map[i - 1] : NULL;
+
+ /*
+ * When the perf_mmap() call is made we grab one refcount, plus
+ * one extra to let perf_mmap__consume() get the last
+ * events after all real references (perf_mmap__get()) are
+ * dropped.
+ *
+ * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
+ * thus does perf_mmap__get() on it.
+ */
+ perf_mmap__init(&map[i], prev, overwrite, NULL);
+ }
+
+ return map;
+}
+
+static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
+ struct perf_evsel *evsel, int idx, int cpu,
+ int thread)
+{
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->idx = idx;
+ if (evlist->cpus && cpu >= 0)
+ sid->cpu = evlist->cpus->map[cpu];
+ else
+ sid->cpu = -1;
+ if (!evsel->system_wide && evlist->threads && thread >= 0)
+ sid->tid = perf_thread_map__pid(evlist->threads, thread);
+ else
+ sid->tid = -1;
+}
+
+static struct perf_mmap*
+perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
+{
+ struct perf_mmap *maps;
+
+ maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
+
+ if (!maps) {
+ maps = perf_evlist__alloc_mmap(evlist, overwrite);
+ if (!maps)
+ return NULL;
+
+ if (overwrite)
+ evlist->mmap_ovw = maps;
+ else
+ evlist->mmap = maps;
+ }
+
+ return &maps[idx];
+}
+
+#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+
+static int
+perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int output, int cpu)
+{
+ return perf_mmap__mmap(map, mp, output, cpu);
+}
+
+static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
+ bool overwrite)
+{
+ if (overwrite)
+ evlist->mmap_ovw_first = map;
+ else
+ evlist->mmap_first = map;
+}
+
+static int
+mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ int idx, struct perf_mmap_param *mp, int cpu_idx,
+ int thread, int *_output, int *_output_overwrite)
+{
+ int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+ struct perf_evsel *evsel;
+ int revent;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ bool overwrite = evsel->attr.write_backward;
+ struct perf_mmap *map;
+ int *output, fd, cpu;
+
+ if (evsel->system_wide && thread)
+ continue;
+
+ cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
+ if (cpu == -1)
+ continue;
+
+ map = ops->get(evlist, overwrite, idx);
+ if (map == NULL)
+ return -ENOMEM;
+
+ if (overwrite) {
+ mp->prot = PROT_READ;
+ output = _output_overwrite;
+ } else {
+ mp->prot = PROT_READ | PROT_WRITE;
+ output = _output;
+ }
+
+ fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+
+ /*
+ * The last one will be done at perf_mmap__consume(), so that we
+ * make sure we don't prevent tools from consuming every last event in
+ * the ring buffer.
+ *
+ * I.e. we can get the POLLHUP meaning that the fd doesn't exist
+ * anymore, but the last events for it are still in the ring buffer,
+ * waiting to be consumed.
+ *
+ * Tools can chose to ignore this at their own discretion, but the
+ * evlist layer can't just drop it when filtering events in
+ * perf_evlist__filter_pollfd().
+ */
+ refcount_set(&map->refcnt, 2);
+
+ if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
+ return -1;
+
+ if (!idx)
+ perf_evlist__set_mmap_first(evlist, map, overwrite);
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+
+ perf_mmap__get(map);
+ }
+
+ revent = !overwrite ? POLLIN : 0;
+
+ if (!evsel->system_wide &&
+ perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
+ perf_mmap__put(map);
+ return -1;
+ }
+
+ if (evsel->attr.read_format & PERF_FORMAT_ID) {
+ if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
+ fd) < 0)
+ return -1;
+ perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
+ thread);
+ }
+ }
+
+ return 0;
+}
+
+static int
+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ int thread;
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+
+ for (thread = 0; thread < nr_threads; thread++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (ops->idx)
+ ops->idx(evlist, mp, thread, false);
+
+ if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
+ &output, &output_overwrite))
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ perf_evlist__munmap(evlist);
+ return -1;
+}
+
+static int
+mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int cpu, thread;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (ops->idx)
+ ops->idx(evlist, mp, cpu, true);
+
+ for (thread = 0; thread < nr_threads; thread++) {
+ if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
+ thread, &output, &output_overwrite))
+ goto out_unmap;
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ perf_evlist__munmap(evlist);
+ return -1;
+}
+
+static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
+{
+ int nr_mmaps;
+
+ nr_mmaps = perf_cpu_map__nr(evlist->cpus);
+ if (perf_cpu_map__empty(evlist->cpus))
+ nr_mmaps = perf_thread_map__nr(evlist->threads);
+
+ return nr_mmaps;
+}
+
+int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ struct perf_evsel *evsel;
+ const struct perf_cpu_map *cpus = evlist->cpus;
+ const struct perf_thread_map *threads = evlist->threads;
+
+ if (!ops || !ops->get || !ops->mmap)
+ return -EINVAL;
+
+ mp->mask = evlist->mmap_len - page_size - 1;
+
+ evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->sample_id == NULL &&
+ perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ return -ENOMEM;
+ }
+
+ if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ return -ENOMEM;
+
+ if (perf_cpu_map__empty(cpus))
+ return mmap_per_thread(evlist, ops, mp);
+
+ return mmap_per_cpu(evlist, ops, mp);
+}
+
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
+{
+ struct perf_mmap_param mp;
+ struct perf_evlist_mmap_ops ops = {
+ .get = perf_evlist__mmap_cb_get,
+ .mmap = perf_evlist__mmap_cb_mmap,
+ };
+
+ evlist->mmap_len = (pages + 1) * page_size;
+
+ return perf_evlist__mmap_ops(evlist, &ops, &mp);
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+ int i;
+
+ if (evlist->mmap) {
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->mmap[i]);
+ }
+
+ if (evlist->mmap_ovw) {
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->mmap_ovw[i]);
+ }
+
+ zfree(&evlist->mmap);
+ zfree(&evlist->mmap_ovw);
+}
+
+struct perf_mmap*
+perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
+ bool overwrite)
+{
+ if (map)
+ return map->next;
+
+ return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
+}
diff --git a/tools/perf/lib/evsel.c b/tools/perf/lib/evsel.c
index a8cb582e2721..5a89857b0381 100644
--- a/tools/perf/lib/evsel.c
+++ b/tools/perf/lib/evsel.c
@@ -120,7 +120,8 @@ void perf_evsel__close_fd(struct perf_evsel *evsel)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
- close(FD(evsel, cpu, thread));
+ if (FD(evsel, cpu, thread) >= 0)
+ close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
}
}
diff --git a/tools/perf/lib/include/internal/evlist.h b/tools/perf/lib/include/internal/evlist.h
index 9f440ab12b76..a2fbccf1922f 100644
--- a/tools/perf/lib/include/internal/evlist.h
+++ b/tools/perf/lib/include/internal/evlist.h
@@ -11,6 +11,7 @@
struct perf_cpu_map;
struct perf_thread_map;
+struct perf_mmap_param;
struct perf_evlist {
struct list_head entries;
@@ -22,12 +23,36 @@ struct perf_evlist {
size_t mmap_len;
struct fdarray pollfd;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ struct perf_mmap *mmap;
+ struct perf_mmap *mmap_ovw;
+ struct perf_mmap *mmap_first;
+ struct perf_mmap *mmap_ovw_first;
+};
+
+typedef void
+(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
+typedef struct perf_mmap*
+(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
+typedef int
+(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
+
+struct perf_evlist_mmap_ops {
+ perf_evlist_mmap__cb_idx_t idx;
+ perf_evlist_mmap__cb_get_t get;
+ perf_evlist_mmap__cb_mmap_t mmap;
};
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
void *ptr, short revent);
+int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp);
+
+void perf_evlist__init(struct perf_evlist *evlist);
+void perf_evlist__exit(struct perf_evlist *evlist);
+
/**
* __perf_evlist__for_each_entry - iterate thru all the evsels
* @list: list_head instance to iterate
@@ -60,6 +85,24 @@ int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
#define perf_evlist__for_each_entry_reverse(evlist, evsel) \
__perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
+/**
+ * __perf_evlist__for_each_entry_safe - safely iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @tmp: struct evsel temp iterator
+ * @evsel: struct evsel iterator
+ */
+#define __perf_evlist__for_each_entry_safe(list, tmp, evsel) \
+ list_for_each_entry_safe(evsel, tmp, list, node)
+
+/**
+ * perf_evlist__for_each_entry_safe - safely iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ * @tmp: struct evsel temp iterator
+ */
+#define perf_evlist__for_each_entry_safe(evlist, tmp, evsel) \
+ __perf_evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
+
static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
{
return list_entry(evlist->entries.next, struct perf_evsel, node);
diff --git a/tools/perf/lib/include/internal/evsel.h b/tools/perf/lib/include/internal/evsel.h
index a69b8299c36f..1ffd083b235e 100644
--- a/tools/perf/lib/include/internal/evsel.h
+++ b/tools/perf/lib/include/internal/evsel.h
@@ -50,6 +50,7 @@ struct perf_evsel {
bool system_wide;
};
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void perf_evsel__close_fd(struct perf_evsel *evsel);
void perf_evsel__free_fd(struct perf_evsel *evsel);
diff --git a/tools/perf/lib/include/internal/mmap.h b/tools/perf/lib/include/internal/mmap.h
index ba1e519c15b9..be7556e0a2b2 100644
--- a/tools/perf/lib/include/internal/mmap.h
+++ b/tools/perf/lib/include/internal/mmap.h
@@ -10,23 +10,46 @@
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+struct perf_mmap;
+
+typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
+
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
- void *base;
- int mask;
- int fd;
- int cpu;
- refcount_t refcnt;
- u64 prev;
- u64 start;
- u64 end;
- bool overwrite;
- u64 flush;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ void *base;
+ int mask;
+ int fd;
+ int cpu;
+ refcount_t refcnt;
+ u64 prev;
+ u64 start;
+ u64 end;
+ bool overwrite;
+ u64 flush;
+ libperf_unmap_cb_t unmap_cb;
+ char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ struct perf_mmap *next;
+};
+
+struct perf_mmap_param {
+ int prot;
+ int mask;
};
+size_t perf_mmap__mmap_len(struct perf_mmap *map);
+
+void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
+ bool overwrite, libperf_unmap_cb_t unmap_cb);
+int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int fd, int cpu);
+void perf_mmap__munmap(struct perf_mmap *map);
+void perf_mmap__get(struct perf_mmap *map);
+void perf_mmap__put(struct perf_mmap *map);
+
+u64 perf_mmap__read_head(struct perf_mmap *map);
+
#endif /* __LIBPERF_INTERNAL_MMAP_H */
diff --git a/tools/perf/lib/include/internal/tests.h b/tools/perf/lib/include/internal/tests.h
index b7a20cd24ee1..2093e8868a67 100644
--- a/tools/perf/lib/include/internal/tests.h
+++ b/tools/perf/lib/include/internal/tests.h
@@ -4,14 +4,28 @@
#include <stdio.h>
-#define __T_START fprintf(stdout, "- running %s...", __FILE__)
-#define __T_OK fprintf(stdout, "OK\n")
-#define __T_FAIL fprintf(stdout, "FAIL\n")
+int tests_failed;
+
+#define __T_START \
+do { \
+ fprintf(stdout, "- running %s...", __FILE__); \
+ fflush(NULL); \
+ tests_failed = 0; \
+} while (0)
+
+#define __T_END \
+do { \
+ if (tests_failed) \
+ fprintf(stdout, " FAILED (%d)\n", tests_failed); \
+ else \
+ fprintf(stdout, "OK\n"); \
+} while (0)
#define __T(text, cond) \
do { \
if (!(cond)) { \
fprintf(stderr, "FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
+ tests_failed++; \
return -1; \
} \
} while (0)
diff --git a/tools/perf/lib/include/perf/core.h b/tools/perf/lib/include/perf/core.h
index cfd70e720c1c..a3f6d68edad7 100644
--- a/tools/perf/lib/include/perf/core.h
+++ b/tools/perf/lib/include/perf/core.h
@@ -9,9 +9,12 @@
#endif
enum libperf_print_level {
+ LIBPERF_ERR,
LIBPERF_WARN,
LIBPERF_INFO,
LIBPERF_DEBUG,
+ LIBPERF_DEBUG2,
+ LIBPERF_DEBUG3,
};
typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
diff --git a/tools/perf/lib/include/perf/evlist.h b/tools/perf/lib/include/perf/evlist.h
index 8a2ce0757ab2..0a7479dc13bf 100644
--- a/tools/perf/lib/include/perf/evlist.h
+++ b/tools/perf/lib/include/perf/evlist.h
@@ -3,13 +3,13 @@
#define __LIBPERF_EVLIST_H
#include <perf/core.h>
+#include <stdbool.h>
struct perf_evlist;
struct perf_evsel;
struct perf_cpu_map;
struct perf_thread_map;
-LIBPERF_API void perf_evlist__init(struct perf_evlist *evlist);
LIBPERF_API void perf_evlist__add(struct perf_evlist *evlist,
struct perf_evsel *evsel);
LIBPERF_API void perf_evlist__remove(struct perf_evlist *evlist,
@@ -32,5 +32,18 @@ LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
+LIBPERF_API int perf_evlist__filter_pollfd(struct perf_evlist *evlist,
+ short revents_and_mask);
+
+LIBPERF_API int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
+LIBPERF_API void perf_evlist__munmap(struct perf_evlist *evlist);
+
+LIBPERF_API struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
+ struct perf_mmap *map,
+ bool overwrite);
+#define perf_evlist__for_each_mmap(evlist, pos, overwrite) \
+ for ((pos) = perf_evlist__next_mmap((evlist), NULL, overwrite); \
+ (pos) != NULL; \
+ (pos) = perf_evlist__next_mmap((evlist), (pos), overwrite))
#endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/perf/lib/include/perf/evsel.h b/tools/perf/lib/include/perf/evsel.h
index 4388667f265c..557f5815a9c9 100644
--- a/tools/perf/lib/include/perf/evsel.h
+++ b/tools/perf/lib/include/perf/evsel.h
@@ -21,8 +21,6 @@ struct perf_counts_values {
};
};
-LIBPERF_API void perf_evsel__init(struct perf_evsel *evsel,
- struct perf_event_attr *attr);
LIBPERF_API struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr);
LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel);
LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
diff --git a/tools/perf/lib/include/perf/mmap.h b/tools/perf/lib/include/perf/mmap.h
new file mode 100644
index 000000000000..9508ad90d8b9
--- /dev/null
+++ b/tools/perf/lib/include/perf/mmap.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_MMAP_H
+#define __LIBPERF_MMAP_H
+
+#include <perf/core.h>
+
+struct perf_mmap;
+union perf_event;
+
+LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
+LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
+LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
+LIBPERF_API union perf_event *perf_mmap__read_event(struct perf_mmap *map);
+
+#endif /* __LIBPERF_MMAP_H */
diff --git a/tools/perf/lib/internal.h b/tools/perf/lib/internal.h
index dc92f241732e..2c27e158de6b 100644
--- a/tools/perf/lib/internal.h
+++ b/tools/perf/lib/internal.h
@@ -2,6 +2,8 @@
#ifndef __LIBPERF_INTERNAL_H
#define __LIBPERF_INTERNAL_H
+#include <perf/core.h>
+
void libperf_print(enum libperf_print_level level,
const char *format, ...)
__attribute__((format(printf, 2, 3)));
@@ -11,8 +13,11 @@ do { \
libperf_print(level, "libperf: " fmt, ##__VA_ARGS__); \
} while (0)
+#define pr_err(fmt, ...) __pr(LIBPERF_ERR, fmt, ##__VA_ARGS__)
#define pr_warning(fmt, ...) __pr(LIBPERF_WARN, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) __pr(LIBPERF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBPERF_DEBUG, fmt, ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) __pr(LIBPERF_DEBUG2, fmt, ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) __pr(LIBPERF_DEBUG3, fmt, ##__VA_ARGS__)
#endif /* __LIBPERF_INTERNAL_H */
diff --git a/tools/perf/lib/libperf.map b/tools/perf/lib/libperf.map
index ab8dbde1136c..7be1af8a546c 100644
--- a/tools/perf/lib/libperf.map
+++ b/tools/perf/lib/libperf.map
@@ -21,7 +21,6 @@ LIBPERF_0.0.1 {
perf_evsel__delete;
perf_evsel__enable;
perf_evsel__disable;
- perf_evsel__init;
perf_evsel__open;
perf_evsel__close;
perf_evsel__read;
@@ -34,12 +33,19 @@ LIBPERF_0.0.1 {
perf_evlist__close;
perf_evlist__enable;
perf_evlist__disable;
- perf_evlist__init;
perf_evlist__add;
perf_evlist__remove;
perf_evlist__next;
perf_evlist__set_maps;
perf_evlist__poll;
+ perf_evlist__mmap;
+ perf_evlist__munmap;
+ perf_evlist__filter_pollfd;
+ perf_evlist__next_mmap;
+ perf_mmap__consume;
+ perf_mmap__read_init;
+ perf_mmap__read_done;
+ perf_mmap__read_event;
local:
*;
};
diff --git a/tools/perf/lib/mmap.c b/tools/perf/lib/mmap.c
new file mode 100644
index 000000000000..79d5ed6c38cc
--- /dev/null
+++ b/tools/perf/lib/mmap.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/mman.h>
+#include <inttypes.h>
+#include <asm/bug.h>
+#include <errno.h>
+#include <string.h>
+#include <linux/ring_buffer.h>
+#include <linux/perf_event.h>
+#include <perf/mmap.h>
+#include <perf/event.h>
+#include <internal/mmap.h>
+#include <internal/lib.h>
+#include <linux/kernel.h>
+#include "internal.h"
+
+void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
+ bool overwrite, libperf_unmap_cb_t unmap_cb)
+{
+ map->fd = -1;
+ map->overwrite = overwrite;
+ map->unmap_cb = unmap_cb;
+ refcount_set(&map->refcnt, 0);
+ if (prev)
+ prev->next = map;
+}
+
+size_t perf_mmap__mmap_len(struct perf_mmap *map)
+{
+ return map->mask + 1 + page_size;
+}
+
+int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int fd, int cpu)
+{
+ map->prev = 0;
+ map->mask = mp->mask;
+ map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+ MAP_SHARED, fd, 0);
+ if (map->base == MAP_FAILED) {
+ map->base = NULL;
+ return -1;
+ }
+
+ map->fd = fd;
+ map->cpu = cpu;
+ return 0;
+}
+
+void perf_mmap__munmap(struct perf_mmap *map)
+{
+ if (map && map->base != NULL) {
+ munmap(map->base, perf_mmap__mmap_len(map));
+ map->base = NULL;
+ map->fd = -1;
+ refcount_set(&map->refcnt, 0);
+ }
+ if (map && map->unmap_cb)
+ map->unmap_cb(map);
+}
+
+void perf_mmap__get(struct perf_mmap *map)
+{
+ refcount_inc(&map->refcnt);
+}
+
+void perf_mmap__put(struct perf_mmap *map)
+{
+ BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+
+ if (refcount_dec_and_test(&map->refcnt))
+ perf_mmap__munmap(map);
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
+{
+ ring_buffer_write_tail(md->base, tail);
+}
+
+u64 perf_mmap__read_head(struct perf_mmap *map)
+{
+ return ring_buffer_read_head(map->base);
+}
+
+static bool perf_mmap__empty(struct perf_mmap *map)
+{
+ struct perf_event_mmap_page *pc = map->base;
+
+ return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
+}
+
+void perf_mmap__consume(struct perf_mmap *map)
+{
+ if (!map->overwrite) {
+ u64 old = map->prev;
+
+ perf_mmap__write_tail(map, old);
+ }
+
+ if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ perf_mmap__put(map);
+}
+
+static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
+{
+ struct perf_event_header *pheader;
+ u64 evt_head = *start;
+ int size = mask + 1;
+
+ pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
+ pheader = (struct perf_event_header *)(buf + (*start & mask));
+ while (true) {
+ if (evt_head - *start >= (unsigned int)size) {
+ pr_debug("Finished reading overwrite ring buffer: rewind\n");
+ if (evt_head - *start > (unsigned int)size)
+ evt_head -= pheader->size;
+ *end = evt_head;
+ return 0;
+ }
+
+ pheader = (struct perf_event_header *)(buf + (evt_head & mask));
+
+ if (pheader->size == 0) {
+ pr_debug("Finished reading overwrite ring buffer: get start\n");
+ *end = evt_head;
+ return 0;
+ }
+
+ evt_head += pheader->size;
+ pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
+ }
+ WARN_ONCE(1, "Shouldn't get here\n");
+ return -1;
+}
+
+/*
+ * Report the start and end of the available data in ringbuffer
+ */
+static int __perf_mmap__read_init(struct perf_mmap *md)
+{
+ u64 head = perf_mmap__read_head(md);
+ u64 old = md->prev;
+ unsigned char *data = md->base + page_size;
+ unsigned long size;
+
+ md->start = md->overwrite ? head : old;
+ md->end = md->overwrite ? old : head;
+
+ if ((md->end - md->start) < md->flush)
+ return -EAGAIN;
+
+ size = md->end - md->start;
+ if (size > (unsigned long)(md->mask) + 1) {
+ if (!md->overwrite) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+
+ md->prev = head;
+ perf_mmap__consume(md);
+ return -EAGAIN;
+ }
+
+ /*
+ * Backward ring buffer is full. We still have a chance to read
+ * most of data from it.
+ */
+ if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int perf_mmap__read_init(struct perf_mmap *map)
+{
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return -ENOENT;
+
+ return __perf_mmap__read_init(map);
+}
+
+/*
+ * Mandatory for overwrite mode
+ * The direction of overwrite mode is backward.
+ * The last perf_mmap__read() will set tail to map->core.prev.
+ * Need to correct the map->core.prev to head which is the end of next read.
+ */
+void perf_mmap__read_done(struct perf_mmap *map)
+{
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return;
+
+ map->prev = perf_mmap__read_head(map);
+}
+
+/* When check_messup is true, 'end' must points to a good entry */
+static union perf_event *perf_mmap__read(struct perf_mmap *map,
+ u64 *startp, u64 end)
+{
+ unsigned char *data = map->base + page_size;
+ union perf_event *event = NULL;
+ int diff = end - *startp;
+
+ if (diff >= (int)sizeof(event->header)) {
+ size_t size;
+
+ event = (union perf_event *)&data[*startp & map->mask];
+ size = event->header.size;
+
+ if (size < sizeof(event->header) || diff < (int)size)
+ return NULL;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+ unsigned int offset = *startp;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = map->event_copy;
+
+ do {
+ cpy = min(map->mask + 1 - (offset & map->mask), len);
+ memcpy(dst, &data[offset & map->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = (union perf_event *)map->event_copy;
+ }
+
+ *startp += size;
+ }
+
+ return event;
+}
+
+/*
+ * Read event from ring buffer one by one.
+ * Return one event for each call.
+ *
+ * Usage:
+ * perf_mmap__read_init()
+ * while(event = perf_mmap__read_event()) {
+ * //process the event
+ * perf_mmap__consume()
+ * }
+ * perf_mmap__read_done()
+ */
+union perf_event *perf_mmap__read_event(struct perf_mmap *map)
+{
+ union perf_event *event;
+
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return NULL;
+
+ /* non-overwirte doesn't pause the ringbuffer */
+ if (!map->overwrite)
+ map->end = perf_mmap__read_head(map);
+
+ event = perf_mmap__read(map, &map->start, map->end);
+
+ if (!map->overwrite)
+ map->prev = map->start;
+
+ return event;
+}
diff --git a/tools/perf/lib/tests/Makefile b/tools/perf/lib/tests/Makefile
index 1ee4e9ba848b..a43cd08c5c03 100644
--- a/tools/perf/lib/tests/Makefile
+++ b/tools/perf/lib/tests/Makefile
@@ -16,13 +16,13 @@ all:
include $(srctree)/tools/scripts/Makefile.include
-INCLUDE = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include
+INCLUDE = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include -I$(srctree)/tools/lib
$(TESTS_A): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a
+ $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a $(LIBAPI)
$(TESTS_SO): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) -lperf
+ $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) $(LIBAPI) -lperf
all: $(TESTS_A) $(TESTS_SO)
diff --git a/tools/perf/lib/tests/test-cpumap.c b/tools/perf/lib/tests/test-cpumap.c
index aa34c20df07e..c8d45091e7c2 100644
--- a/tools/perf/lib/tests/test-cpumap.c
+++ b/tools/perf/lib/tests/test-cpumap.c
@@ -26,6 +26,6 @@ int main(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
- __T_OK;
+ __T_END;
return 0;
}
diff --git a/tools/perf/lib/tests/test-evlist.c b/tools/perf/lib/tests/test-evlist.c
index e6b2ab2e2bde..6d8ebe0c2504 100644
--- a/tools/perf/lib/tests/test-evlist.c
+++ b/tools/perf/lib/tests/test-evlist.c
@@ -1,12 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <sched.h>
#include <stdio.h>
#include <stdarg.h>
+#include <unistd.h>
+#include <stdlib.h>
#include <linux/perf_event.h>
+#include <linux/limits.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <perf/evlist.h>
#include <perf/evsel.h>
+#include <perf/mmap.h>
+#include <perf/event.h>
#include <internal/tests.h>
+#include <api/fs/fs.h>
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -181,6 +192,210 @@ static int test_stat_thread_enable(void)
return 0;
}
+static int test_mmap_thread(void)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_mmap *map;
+ struct perf_cpu_map *cpus;
+ struct perf_thread_map *threads;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_period = 1,
+ .wakeup_watermark = 1,
+ .disabled = 1,
+ };
+ char path[PATH_MAX];
+ int id, err, pid, go_pipe[2];
+ union perf_event *event;
+ char bf;
+ int count = 0;
+
+ snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
+ sysfs__mountpoint());
+
+ if (filename__read_int(path, &id)) {
+ fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
+ return -1;
+ }
+
+ attr.config = id;
+
+ err = pipe(go_pipe);
+ __T("failed to create pipe", err == 0);
+
+ fflush(NULL);
+
+ pid = fork();
+ if (!pid) {
+ int i;
+
+ read(go_pipe[0], &bf, 1);
+
+ /* Generate 100 prctl calls. */
+ for (i = 0; i < 100; i++)
+ prctl(0, 0, 0, 0, 0);
+
+ exit(0);
+ }
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ cpus = perf_cpu_map__dummy_new();
+ __T("failed to create cpus", cpus);
+
+ perf_thread_map__set_pid(threads, 0, pid);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel1", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ err = perf_evlist__mmap(evlist, 4);
+ __T("failed to mmap evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ /* kick the child and wait for it to finish */
+ write(go_pipe[1], &bf, 1);
+ waitpid(pid, NULL, 0);
+
+ /*
+ * There's no need to call perf_evlist__disable,
+ * monitored process is dead now.
+ */
+
+ perf_evlist__for_each_mmap(evlist, map, false) {
+ if (perf_mmap__read_init(map) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ count++;
+ perf_mmap__consume(map);
+ }
+
+ perf_mmap__read_done(map);
+ }
+
+ /* calls perf_evlist__munmap/perf_evlist__close */
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+ perf_cpu_map__put(cpus);
+
+ /*
+ * The generated prctl calls should match the
+ * number of events in the buffer.
+ */
+ __T("failed count", count == 100);
+
+ return 0;
+}
+
+static int test_mmap_cpus(void)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_mmap *map;
+ struct perf_cpu_map *cpus;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_period = 1,
+ .wakeup_watermark = 1,
+ .disabled = 1,
+ };
+ cpu_set_t saved_mask;
+ char path[PATH_MAX];
+ int id, err, cpu, tmp;
+ union perf_event *event;
+ int count = 0;
+
+ snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
+ sysfs__mountpoint());
+
+ if (filename__read_int(path, &id)) {
+ fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
+ return -1;
+ }
+
+ attr.config = id;
+
+ cpus = perf_cpu_map__new(NULL);
+ __T("failed to create cpus", cpus);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel1", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_maps(evlist, cpus, NULL);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ err = perf_evlist__mmap(evlist, 4);
+ __T("failed to mmap evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ err = sched_getaffinity(0, sizeof(saved_mask), &saved_mask);
+ __T("sched_getaffinity failed", err == 0);
+
+ perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ cpu_set_t mask;
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ err = sched_setaffinity(0, sizeof(mask), &mask);
+ __T("sched_setaffinity failed", err == 0);
+
+ prctl(0, 0, 0, 0, 0);
+ }
+
+ err = sched_setaffinity(0, sizeof(saved_mask), &saved_mask);
+ __T("sched_setaffinity failed", err == 0);
+
+ perf_evlist__disable(evlist);
+
+ perf_evlist__for_each_mmap(evlist, map, false) {
+ if (perf_mmap__read_init(map) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ count++;
+ perf_mmap__consume(map);
+ }
+
+ perf_mmap__read_done(map);
+ }
+
+ /* calls perf_evlist__munmap/perf_evlist__close */
+ perf_evlist__delete(evlist);
+
+ /*
+ * The generated prctl events should match the
+ * number of cpus or be bigger (we are system-wide).
+ */
+ __T("failed count", count >= perf_cpu_map__nr(cpus));
+
+ perf_cpu_map__put(cpus);
+
+ return 0;
+}
+
int main(int argc, char **argv)
{
__T_START;
@@ -190,7 +405,9 @@ int main(int argc, char **argv)
test_stat_cpu();
test_stat_thread();
test_stat_thread_enable();
+ test_mmap_thread();
+ test_mmap_cpus();
- __T_OK;
+ __T_END;
return 0;
}
diff --git a/tools/perf/lib/tests/test-evsel.c b/tools/perf/lib/tests/test-evsel.c
index 1b6c4285ac2b..135722ac965b 100644
--- a/tools/perf/lib/tests/test-evsel.c
+++ b/tools/perf/lib/tests/test-evsel.c
@@ -130,6 +130,6 @@ int main(int argc, char **argv)
test_stat_thread();
test_stat_thread_enable();
- __T_OK;
+ __T_END;
return 0;
}
diff --git a/tools/perf/lib/tests/test-threadmap.c b/tools/perf/lib/tests/test-threadmap.c
index 8c5f47247d9e..7dc4d6fbedde 100644
--- a/tools/perf/lib/tests/test-threadmap.c
+++ b/tools/perf/lib/tests/test-threadmap.c
@@ -26,6 +26,6 @@ int main(int argc, char **argv)
perf_thread_map__put(threads);
perf_thread_map__put(threads);
- __T_OK;
+ __T_END;
return 0;
}
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
index abc98b018446..2d15b11e5383 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
@@ -1,12 +1,12 @@
[
{
- "ArchStdEvent": "BR_IMMED_SPEC",
+ "ArchStdEvent": "BR_IMMED_SPEC"
},
{
- "ArchStdEvent": "BR_RETURN_SPEC",
+ "ArchStdEvent": "BR_RETURN_SPEC"
},
{
- "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
},
{
"PublicDescription": "Mispredicted or not predicted branch speculatively executed",
@@ -19,5 +19,5 @@
"EventCode": "0x12",
"EventName": "BR_PRED",
"BriefDescription": "Predictable branch"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
index 687b2629e1d1..5c1a9a922ca4 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
@@ -1,26 +1,26 @@
[
{
- "ArchStdEvent": "BUS_ACCESS_RD",
+ "ArchStdEvent": "BUS_ACCESS_RD"
},
{
- "ArchStdEvent": "BUS_ACCESS_WR",
+ "ArchStdEvent": "BUS_ACCESS_WR"
},
{
- "ArchStdEvent": "BUS_ACCESS_SHARED",
+ "ArchStdEvent": "BUS_ACCESS_SHARED"
},
{
- "ArchStdEvent": "BUS_ACCESS_NOT_SHARED",
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
},
{
- "ArchStdEvent": "BUS_ACCESS_NORMAL",
+ "ArchStdEvent": "BUS_ACCESS_NORMAL"
},
{
- "ArchStdEvent": "BUS_ACCESS_PERIPH",
+ "ArchStdEvent": "BUS_ACCESS_PERIPH"
},
{
"PublicDescription": "Bus access",
"EventCode": "0x19",
"EventName": "BUS_ACCESS",
"BriefDescription": "Bus access"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
index df9201434cb6..40010a8724b3 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
@@ -1,42 +1,42 @@
[
{
- "ArchStdEvent": "L1D_CACHE_RD",
+ "ArchStdEvent": "L1D_CACHE_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_WR",
+ "ArchStdEvent": "L1D_CACHE_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_INVAL",
+ "ArchStdEvent": "L1D_CACHE_INVAL"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_RD",
+ "ArchStdEvent": "L2D_CACHE_RD"
},
{
- "ArchStdEvent": "L2D_CACHE_WR",
+ "ArchStdEvent": "L2D_CACHE_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
},
{
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
},
{
- "ArchStdEvent": "L2D_CACHE_INVAL",
+ "ArchStdEvent": "L2D_CACHE_INVAL"
},
{
"PublicDescription": "Level 1 instruction cache refill",
@@ -187,5 +187,5 @@
"EventCode": "0x116",
"EventName": "PAGE_WALK_L2_STAGE2_HIT",
"BriefDescription": "Page walk, L2 stage-2 hit"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
index 38cd1f1a70dc..51d1dc1519b2 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
@@ -16,5 +16,5 @@
"EventCode": "0x110",
"EventName": "Wait_CYCLES",
"BriefDescription": "Wait state cycle"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
index 3720dc28a15f..66e51bc64b22 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
@@ -1,39 +1,39 @@
[
{
- "ArchStdEvent": "EXC_UNDEF",
+ "ArchStdEvent": "EXC_UNDEF"
},
{
- "ArchStdEvent": "EXC_SVC",
+ "ArchStdEvent": "EXC_SVC"
},
{
- "ArchStdEvent": "EXC_PABORT",
+ "ArchStdEvent": "EXC_PABORT"
},
{
- "ArchStdEvent": "EXC_DABORT",
+ "ArchStdEvent": "EXC_DABORT"
},
{
- "ArchStdEvent": "EXC_IRQ",
+ "ArchStdEvent": "EXC_IRQ"
},
{
- "ArchStdEvent": "EXC_FIQ",
+ "ArchStdEvent": "EXC_FIQ"
},
{
- "ArchStdEvent": "EXC_HVC",
+ "ArchStdEvent": "EXC_HVC"
},
{
- "ArchStdEvent": "EXC_TRAP_PABORT",
+ "ArchStdEvent": "EXC_TRAP_PABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_DABORT",
+ "ArchStdEvent": "EXC_TRAP_DABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_OTHER",
+ "ArchStdEvent": "EXC_TRAP_OTHER"
},
{
- "ArchStdEvent": "EXC_TRAP_IRQ",
+ "ArchStdEvent": "EXC_TRAP_IRQ"
},
{
- "ArchStdEvent": "EXC_TRAP_FIQ",
+ "ArchStdEvent": "EXC_TRAP_FIQ"
},
{
"PublicDescription": "Exception taken",
@@ -46,5 +46,5 @@
"EventCode": "0x0a",
"EventName": "EXC_RETURN",
"BriefDescription": "Exception return"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
index 82cf753e6472..0d3e46776642 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
@@ -1,42 +1,42 @@
[
{
- "ArchStdEvent": "LD_SPEC",
+ "ArchStdEvent": "LD_SPEC"
},
{
- "ArchStdEvent": "ST_SPEC",
+ "ArchStdEvent": "ST_SPEC"
},
{
- "ArchStdEvent": "LDST_SPEC",
+ "ArchStdEvent": "LDST_SPEC"
},
{
- "ArchStdEvent": "DP_SPEC",
+ "ArchStdEvent": "DP_SPEC"
},
{
- "ArchStdEvent": "ASE_SPEC",
+ "ArchStdEvent": "ASE_SPEC"
},
{
- "ArchStdEvent": "VFP_SPEC",
+ "ArchStdEvent": "VFP_SPEC"
},
{
- "ArchStdEvent": "PC_WRITE_SPEC",
+ "ArchStdEvent": "PC_WRITE_SPEC"
},
{
- "ArchStdEvent": "CRYPTO_SPEC",
+ "ArchStdEvent": "CRYPTO_SPEC"
},
{
- "ArchStdEvent": "ISB_SPEC",
+ "ArchStdEvent": "ISB_SPEC"
},
{
- "ArchStdEvent": "DSB_SPEC",
+ "ArchStdEvent": "DSB_SPEC"
},
{
- "ArchStdEvent": "DMB_SPEC",
+ "ArchStdEvent": "DMB_SPEC"
},
{
- "ArchStdEvent": "RC_LD_SPEC",
+ "ArchStdEvent": "RC_LD_SPEC"
},
{
- "ArchStdEvent": "RC_ST_SPEC",
+ "ArchStdEvent": "RC_ST_SPEC"
},
{
"PublicDescription": "Instruction architecturally executed, software increment",
@@ -85,5 +85,5 @@
"EventCode": "0x100",
"EventName": "NOP_SPEC",
"BriefDescription": "Speculatively executed, NOP"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json
index 2aecc5c2347d..7ecffb989ae0 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json
@@ -1,14 +1,14 @@
[
{
- "ArchStdEvent": "LDREX_SPEC",
+ "ArchStdEvent": "LDREX_SPEC"
},
{
- "ArchStdEvent": "STREX_PASS_SPEC",
+ "ArchStdEvent": "STREX_PASS_SPEC"
},
{
- "ArchStdEvent": "STREX_FAIL_SPEC",
+ "ArchStdEvent": "STREX_FAIL_SPEC"
},
{
- "ArchStdEvent": "STREX_SPEC",
- },
+ "ArchStdEvent": "STREX_SPEC"
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
index 08508697b318..c2fe674df960 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
@@ -1,18 +1,18 @@
[
{
- "ArchStdEvent": "MEM_ACCESS_RD",
+ "ArchStdEvent": "MEM_ACCESS_RD"
},
{
- "ArchStdEvent": "MEM_ACCESS_WR",
+ "ArchStdEvent": "MEM_ACCESS_WR"
},
{
- "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
},
{
- "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
},
{
- "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
},
{
"PublicDescription": "Data memory access",
@@ -25,5 +25,5 @@
"EventCode": "0x1a",
"EventName": "MEM_ERROR",
"BriefDescription": "Memory error"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json
index e2087de586bf..17c71aba6612 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json
@@ -46,5 +46,5 @@
"EventCode": "0x10f",
"EventName": "FX_STALL",
"BriefDescription": "FX stalled"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json
index 0b0e6b26605b..8f5cf88aaf38 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/branch.json
@@ -1,6 +1,6 @@
[
{
- "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
},
{
"EventCode": "0xC9",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json
index ce33b2553277..0a70b82f753f 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/bus.json
@@ -1,8 +1,8 @@
[
{
- "ArchStdEvent": "BUS_ACCESS_RD",
+ "ArchStdEvent": "BUS_ACCESS_RD"
},
{
- "ArchStdEvent": "BUS_ACCESS_WR",
+ "ArchStdEvent": "BUS_ACCESS_WR"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json
index 6cc6cbd7bf0b..e9f7e4c3900d 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a53/other.json
@@ -1,9 +1,9 @@
[
{
- "ArchStdEvent": "EXC_IRQ",
+ "ArchStdEvent": "EXC_IRQ"
},
{
- "ArchStdEvent": "EXC_FIQ",
+ "ArchStdEvent": "EXC_FIQ"
},
{
"EventCode": "0xC6",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
index 0ac9b7927450..543c7692677a 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
@@ -1,179 +1,179 @@
[
{
- "ArchStdEvent": "L1D_CACHE_RD",
+ "ArchStdEvent": "L1D_CACHE_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_WR",
+ "ArchStdEvent": "L1D_CACHE_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
},
{
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
},
{
- "ArchStdEvent": "L1D_CACHE_INVAL",
+ "ArchStdEvent": "L1D_CACHE_INVAL"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_RD",
+ "ArchStdEvent": "L2D_CACHE_RD"
},
{
- "ArchStdEvent": "L2D_CACHE_WR",
+ "ArchStdEvent": "L2D_CACHE_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
},
{
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
},
{
- "ArchStdEvent": "L2D_CACHE_INVAL",
+ "ArchStdEvent": "L2D_CACHE_INVAL"
},
{
- "ArchStdEvent": "BUS_ACCESS_RD",
+ "ArchStdEvent": "BUS_ACCESS_RD"
},
{
- "ArchStdEvent": "BUS_ACCESS_WR",
+ "ArchStdEvent": "BUS_ACCESS_WR"
},
{
- "ArchStdEvent": "BUS_ACCESS_SHARED",
+ "ArchStdEvent": "BUS_ACCESS_SHARED"
},
{
- "ArchStdEvent": "BUS_ACCESS_NOT_SHARED",
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
},
{
- "ArchStdEvent": "BUS_ACCESS_NORMAL",
+ "ArchStdEvent": "BUS_ACCESS_NORMAL"
},
{
- "ArchStdEvent": "BUS_ACCESS_PERIPH",
+ "ArchStdEvent": "BUS_ACCESS_PERIPH"
},
{
- "ArchStdEvent": "MEM_ACCESS_RD",
+ "ArchStdEvent": "MEM_ACCESS_RD"
},
{
- "ArchStdEvent": "MEM_ACCESS_WR",
+ "ArchStdEvent": "MEM_ACCESS_WR"
},
{
- "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
},
{
- "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
},
{
- "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
},
{
- "ArchStdEvent": "LDREX_SPEC",
+ "ArchStdEvent": "LDREX_SPEC"
},
{
- "ArchStdEvent": "STREX_PASS_SPEC",
+ "ArchStdEvent": "STREX_PASS_SPEC"
},
{
- "ArchStdEvent": "STREX_FAIL_SPEC",
+ "ArchStdEvent": "STREX_FAIL_SPEC"
},
{
- "ArchStdEvent": "LD_SPEC",
+ "ArchStdEvent": "LD_SPEC"
},
{
- "ArchStdEvent": "ST_SPEC",
+ "ArchStdEvent": "ST_SPEC"
},
{
- "ArchStdEvent": "LDST_SPEC",
+ "ArchStdEvent": "LDST_SPEC"
},
{
- "ArchStdEvent": "DP_SPEC",
+ "ArchStdEvent": "DP_SPEC"
},
{
- "ArchStdEvent": "ASE_SPEC",
+ "ArchStdEvent": "ASE_SPEC"
},
{
- "ArchStdEvent": "VFP_SPEC",
+ "ArchStdEvent": "VFP_SPEC"
},
{
- "ArchStdEvent": "PC_WRITE_SPEC",
+ "ArchStdEvent": "PC_WRITE_SPEC"
},
{
- "ArchStdEvent": "CRYPTO_SPEC",
+ "ArchStdEvent": "CRYPTO_SPEC"
},
{
- "ArchStdEvent": "BR_IMMED_SPEC",
+ "ArchStdEvent": "BR_IMMED_SPEC"
},
{
- "ArchStdEvent": "BR_RETURN_SPEC",
+ "ArchStdEvent": "BR_RETURN_SPEC"
},
{
- "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
},
{
- "ArchStdEvent": "ISB_SPEC",
+ "ArchStdEvent": "ISB_SPEC"
},
{
- "ArchStdEvent": "DSB_SPEC",
+ "ArchStdEvent": "DSB_SPEC"
},
{
- "ArchStdEvent": "DMB_SPEC",
+ "ArchStdEvent": "DMB_SPEC"
},
{
- "ArchStdEvent": "EXC_UNDEF",
+ "ArchStdEvent": "EXC_UNDEF"
},
{
- "ArchStdEvent": "EXC_SVC",
+ "ArchStdEvent": "EXC_SVC"
},
{
- "ArchStdEvent": "EXC_PABORT",
+ "ArchStdEvent": "EXC_PABORT"
},
{
- "ArchStdEvent": "EXC_DABORT",
+ "ArchStdEvent": "EXC_DABORT"
},
{
- "ArchStdEvent": "EXC_IRQ",
+ "ArchStdEvent": "EXC_IRQ"
},
{
- "ArchStdEvent": "EXC_FIQ",
+ "ArchStdEvent": "EXC_FIQ"
},
{
- "ArchStdEvent": "EXC_SMC",
+ "ArchStdEvent": "EXC_SMC"
},
{
- "ArchStdEvent": "EXC_HVC",
+ "ArchStdEvent": "EXC_HVC"
},
{
- "ArchStdEvent": "EXC_TRAP_PABORT",
+ "ArchStdEvent": "EXC_TRAP_PABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_DABORT",
+ "ArchStdEvent": "EXC_TRAP_DABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_OTHER",
+ "ArchStdEvent": "EXC_TRAP_OTHER"
},
{
- "ArchStdEvent": "EXC_TRAP_IRQ",
+ "ArchStdEvent": "EXC_TRAP_IRQ"
},
{
- "ArchStdEvent": "EXC_TRAP_FIQ",
+ "ArchStdEvent": "EXC_TRAP_FIQ"
},
{
- "ArchStdEvent": "RC_LD_SPEC",
+ "ArchStdEvent": "RC_LD_SPEC"
},
{
- "ArchStdEvent": "RC_ST_SPEC",
- },
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json b/tools/perf/pmu-events/arch/arm64/armv8-recommended.json
index 6328828c018c..d0a19866563d 100644
--- a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json
+++ b/tools/perf/pmu-events/arch/arm64/armv8-recommended.json
@@ -154,297 +154,297 @@
"EventCode": "0x61",
"EventName": "BUS_ACCESS_WR",
"BriefDescription": "Bus access write"
- }
+ },
{
"PublicDescription": "Bus access, Normal, Cacheable, Shareable",
"EventCode": "0x62",
"EventName": "BUS_ACCESS_SHARED",
"BriefDescription": "Bus access, Normal, Cacheable, Shareable"
- }
+ },
{
"PublicDescription": "Bus access, not Normal, Cacheable, Shareable",
"EventCode": "0x63",
"EventName": "BUS_ACCESS_NOT_SHARED",
"BriefDescription": "Bus access, not Normal, Cacheable, Shareable"
- }
+ },
{
"PublicDescription": "Bus access, Normal",
"EventCode": "0x64",
"EventName": "BUS_ACCESS_NORMAL",
"BriefDescription": "Bus access, Normal"
- }
+ },
{
"PublicDescription": "Bus access, peripheral",
"EventCode": "0x65",
"EventName": "BUS_ACCESS_PERIPH",
"BriefDescription": "Bus access, peripheral"
- }
+ },
{
"PublicDescription": "Data memory access, read",
"EventCode": "0x66",
"EventName": "MEM_ACCESS_RD",
"BriefDescription": "Data memory access, read"
- }
+ },
{
"PublicDescription": "Data memory access, write",
"EventCode": "0x67",
"EventName": "MEM_ACCESS_WR",
"BriefDescription": "Data memory access, write"
- }
+ },
{
"PublicDescription": "Unaligned access, read",
"EventCode": "0x68",
"EventName": "UNALIGNED_LD_SPEC",
"BriefDescription": "Unaligned access, read"
- }
+ },
{
"PublicDescription": "Unaligned access, write",
"EventCode": "0x69",
"EventName": "UNALIGNED_ST_SPEC",
"BriefDescription": "Unaligned access, write"
- }
+ },
{
"PublicDescription": "Unaligned access",
"EventCode": "0x6a",
"EventName": "UNALIGNED_LDST_SPEC",
"BriefDescription": "Unaligned access"
- }
+ },
{
"PublicDescription": "Exclusive operation speculatively executed, LDREX or LDX",
"EventCode": "0x6c",
"EventName": "LDREX_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, LDREX or LDX"
- }
+ },
{
"PublicDescription": "Exclusive operation speculatively executed, STREX or STX pass",
"EventCode": "0x6d",
"EventName": "STREX_PASS_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, STREX or STX pass"
- }
+ },
{
"PublicDescription": "Exclusive operation speculatively executed, STREX or STX fail",
"EventCode": "0x6e",
"EventName": "STREX_FAIL_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, STREX or STX fail"
- }
+ },
{
"PublicDescription": "Exclusive operation speculatively executed, STREX or STX",
"EventCode": "0x6f",
"EventName": "STREX_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, STREX or STX"
- }
+ },
{
"PublicDescription": "Operation speculatively executed, load",
"EventCode": "0x70",
"EventName": "LD_SPEC",
"BriefDescription": "Operation speculatively executed, load"
- }
+ },
{
- "PublicDescription": "Operation speculatively executed, store"
+ "PublicDescription": "Operation speculatively executed, store",
"EventCode": "0x71",
"EventName": "ST_SPEC",
"BriefDescription": "Operation speculatively executed, store"
- }
+ },
{
"PublicDescription": "Operation speculatively executed, load or store",
"EventCode": "0x72",
"EventName": "LDST_SPEC",
"BriefDescription": "Operation speculatively executed, load or store"
- }
+ },
{
"PublicDescription": "Operation speculatively executed, integer data processing",
"EventCode": "0x73",
"EventName": "DP_SPEC",
"BriefDescription": "Operation speculatively executed, integer data processing"
- }
+ },
{
"PublicDescription": "Operation speculatively executed, Advanced SIMD instruction",
"EventCode": "0x74",
"EventName": "ASE_SPEC",
- "BriefDescription": "Operation speculatively executed, Advanced SIMD instruction",
- }
+ "BriefDescription": "Operation speculatively executed, Advanced SIMD instruction"
+ },
{
"PublicDescription": "Operation speculatively executed, floating-point instruction",
"EventCode": "0x75",
"EventName": "VFP_SPEC",
"BriefDescription": "Operation speculatively executed, floating-point instruction"
- }
+ },
{
"PublicDescription": "Operation speculatively executed, software change of the PC",
"EventCode": "0x76",
"EventName": "PC_WRITE_SPEC",
"BriefDescription": "Operation speculatively executed, software change of the PC"
- }
+ },
{
"PublicDescription": "Operation speculatively executed, Cryptographic instruction",
"EventCode": "0x77",
"EventName": "CRYPTO_SPEC",
"BriefDescription": "Operation speculatively executed, Cryptographic instruction"
- }
+ },
{
- "PublicDescription": "Branch speculatively executed, immediate branch"
+ "PublicDescription": "Branch speculatively executed, immediate branch",
"EventCode": "0x78",
"EventName": "BR_IMMED_SPEC",
"BriefDescription": "Branch speculatively executed, immediate branch"
- }
+ },
{
- "PublicDescription": "Branch speculatively executed, procedure return"
+ "PublicDescription": "Branch speculatively executed, procedure return",
"EventCode": "0x79",
"EventName": "BR_RETURN_SPEC",
"BriefDescription": "Branch speculatively executed, procedure return"
- }
+ },
{
- "PublicDescription": "Branch speculatively executed, indirect branch"
+ "PublicDescription": "Branch speculatively executed, indirect branch",
"EventCode": "0x7a",
"EventName": "BR_INDIRECT_SPEC",
"BriefDescription": "Branch speculatively executed, indirect branch"
- }
+ },
{
- "PublicDescription": "Barrier speculatively executed, ISB"
+ "PublicDescription": "Barrier speculatively executed, ISB",
"EventCode": "0x7c",
"EventName": "ISB_SPEC",
"BriefDescription": "Barrier speculatively executed, ISB"
- }
+ },
{
- "PublicDescription": "Barrier speculatively executed, DSB"
+ "PublicDescription": "Barrier speculatively executed, DSB",
"EventCode": "0x7d",
"EventName": "DSB_SPEC",
"BriefDescription": "Barrier speculatively executed, DSB"
- }
+ },
{
- "PublicDescription": "Barrier speculatively executed, DMB"
+ "PublicDescription": "Barrier speculatively executed, DMB",
"EventCode": "0x7e",
"EventName": "DMB_SPEC",
"BriefDescription": "Barrier speculatively executed, DMB"
- }
+ },
{
- "PublicDescription": "Exception taken, Other synchronous"
+ "PublicDescription": "Exception taken, Other synchronous",
"EventCode": "0x81",
"EventName": "EXC_UNDEF",
"BriefDescription": "Exception taken, Other synchronous"
- }
+ },
{
- "PublicDescription": "Exception taken, Supervisor Call"
+ "PublicDescription": "Exception taken, Supervisor Call",
"EventCode": "0x82",
"EventName": "EXC_SVC",
"BriefDescription": "Exception taken, Supervisor Call"
- }
+ },
{
- "PublicDescription": "Exception taken, Instruction Abort"
+ "PublicDescription": "Exception taken, Instruction Abort",
"EventCode": "0x83",
"EventName": "EXC_PABORT",
"BriefDescription": "Exception taken, Instruction Abort"
- }
+ },
{
- "PublicDescription": "Exception taken, Data Abort and SError"
+ "PublicDescription": "Exception taken, Data Abort and SError",
"EventCode": "0x84",
"EventName": "EXC_DABORT",
"BriefDescription": "Exception taken, Data Abort and SError"
- }
+ },
{
- "PublicDescription": "Exception taken, IRQ"
+ "PublicDescription": "Exception taken, IRQ",
"EventCode": "0x86",
"EventName": "EXC_IRQ",
"BriefDescription": "Exception taken, IRQ"
- }
+ },
{
- "PublicDescription": "Exception taken, FIQ"
+ "PublicDescription": "Exception taken, FIQ",
"EventCode": "0x87",
"EventName": "EXC_FIQ",
"BriefDescription": "Exception taken, FIQ"
- }
+ },
{
- "PublicDescription": "Exception taken, Secure Monitor Call"
+ "PublicDescription": "Exception taken, Secure Monitor Call",
"EventCode": "0x88",
"EventName": "EXC_SMC",
"BriefDescription": "Exception taken, Secure Monitor Call"
- }
+ },
{
- "PublicDescription": "Exception taken, Hypervisor Call"
+ "PublicDescription": "Exception taken, Hypervisor Call",
"EventCode": "0x8a",
"EventName": "EXC_HVC",
"BriefDescription": "Exception taken, Hypervisor Call"
- }
+ },
{
- "PublicDescription": "Exception taken, Instruction Abort not taken locally"
+ "PublicDescription": "Exception taken, Instruction Abort not taken locally",
"EventCode": "0x8b",
"EventName": "EXC_TRAP_PABORT",
"BriefDescription": "Exception taken, Instruction Abort not taken locally"
- }
+ },
{
- "PublicDescription": "Exception taken, Data Abort or SError not taken locally"
+ "PublicDescription": "Exception taken, Data Abort or SError not taken locally",
"EventCode": "0x8c",
"EventName": "EXC_TRAP_DABORT",
"BriefDescription": "Exception taken, Data Abort or SError not taken locally"
- }
+ },
{
- "PublicDescription": "Exception taken, Other traps not taken locally"
+ "PublicDescription": "Exception taken, Other traps not taken locally",
"EventCode": "0x8d",
"EventName": "EXC_TRAP_OTHER",
"BriefDescription": "Exception taken, Other traps not taken locally"
- }
+ },
{
- "PublicDescription": "Exception taken, IRQ not taken locally"
+ "PublicDescription": "Exception taken, IRQ not taken locally",
"EventCode": "0x8e",
"EventName": "EXC_TRAP_IRQ",
"BriefDescription": "Exception taken, IRQ not taken locally"
- }
+ },
{
- "PublicDescription": "Exception taken, FIQ not taken locally"
+ "PublicDescription": "Exception taken, FIQ not taken locally",
"EventCode": "0x8f",
"EventName": "EXC_TRAP_FIQ",
"BriefDescription": "Exception taken, FIQ not taken locally"
- }
+ },
{
- "PublicDescription": "Release consistency operation speculatively executed, Load-Acquire"
+ "PublicDescription": "Release consistency operation speculatively executed, Load-Acquire",
"EventCode": "0x90",
"EventName": "RC_LD_SPEC",
"BriefDescription": "Release consistency operation speculatively executed, Load-Acquire"
- }
+ },
{
- "PublicDescription": "Release consistency operation speculatively executed, Store-Release"
+ "PublicDescription": "Release consistency operation speculatively executed, Store-Release",
"EventCode": "0x91",
"EventName": "RC_ST_SPEC",
"BriefDescription": "Release consistency operation speculatively executed, Store-Release"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache access, read"
+ "PublicDescription": "Attributable Level 3 data or unified cache access, read",
"EventCode": "0xa0",
"EventName": "L3D_CACHE_RD",
"BriefDescription": "Attributable Level 3 data or unified cache access, read"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache access, write"
+ "PublicDescription": "Attributable Level 3 data or unified cache access, write",
"EventCode": "0xa1",
"EventName": "L3D_CACHE_WR",
"BriefDescription": "Attributable Level 3 data or unified cache access, write"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache refill, read"
+ "PublicDescription": "Attributable Level 3 data or unified cache refill, read",
"EventCode": "0xa2",
"EventName": "L3D_CACHE_REFILL_RD",
"BriefDescription": "Attributable Level 3 data or unified cache refill, read"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache refill, write"
+ "PublicDescription": "Attributable Level 3 data or unified cache refill, write",
"EventCode": "0xa3",
"EventName": "L3D_CACHE_REFILL_WR",
"BriefDescription": "Attributable Level 3 data or unified cache refill, write"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim"
+ "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim",
"EventCode": "0xa6",
"EventName": "L3D_CACHE_WB_VICTIM",
"BriefDescription": "Attributable Level 3 data or unified cache Write-Back, victim"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean"
+ "PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean",
"EventCode": "0xa7",
"EventName": "L3D_CACHE_WB_CLEAN",
"BriefDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean"
- }
+ },
{
- "PublicDescription": "Attributable Level 3 data or unified cache access, invalidate"
+ "PublicDescription": "Attributable Level 3 data or unified cache access, invalidate",
"EventCode": "0xa8",
"EventName": "L3D_CACHE_INVAL",
"BriefDescription": "Attributable Level 3 data or unified cache access, invalidate"
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
index 752e47eb6977..3a87d351cc39 100644
--- a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
@@ -1,113 +1,113 @@
[
{
- "ArchStdEvent": "L1D_CACHE_RD",
+ "ArchStdEvent": "L1D_CACHE_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_WR",
+ "ArchStdEvent": "L1D_CACHE_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
},
{
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
},
{
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
},
{
- "ArchStdEvent": "L1D_CACHE_INVAL",
+ "ArchStdEvent": "L1D_CACHE_INVAL"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
},
{
- "ArchStdEvent": "L1D_TLB_RD",
+ "ArchStdEvent": "L1D_TLB_RD"
},
{
- "ArchStdEvent": "L1D_TLB_WR",
+ "ArchStdEvent": "L1D_TLB_WR"
},
{
- "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
},
{
- "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
},
{
- "ArchStdEvent": "L2D_TLB_RD",
+ "ArchStdEvent": "L2D_TLB_RD"
},
{
- "ArchStdEvent": "L2D_TLB_WR",
+ "ArchStdEvent": "L2D_TLB_WR"
},
{
- "ArchStdEvent": "BUS_ACCESS_RD",
+ "ArchStdEvent": "BUS_ACCESS_RD"
},
{
- "ArchStdEvent": "BUS_ACCESS_WR",
+ "ArchStdEvent": "BUS_ACCESS_WR"
},
{
- "ArchStdEvent": "MEM_ACCESS_RD",
+ "ArchStdEvent": "MEM_ACCESS_RD"
},
{
- "ArchStdEvent": "MEM_ACCESS_WR",
+ "ArchStdEvent": "MEM_ACCESS_WR"
},
{
- "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
},
{
- "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
},
{
- "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
},
{
- "ArchStdEvent": "EXC_UNDEF",
+ "ArchStdEvent": "EXC_UNDEF"
},
{
- "ArchStdEvent": "EXC_SVC",
+ "ArchStdEvent": "EXC_SVC"
},
{
- "ArchStdEvent": "EXC_PABORT",
+ "ArchStdEvent": "EXC_PABORT"
},
{
- "ArchStdEvent": "EXC_DABORT",
+ "ArchStdEvent": "EXC_DABORT"
},
{
- "ArchStdEvent": "EXC_IRQ",
+ "ArchStdEvent": "EXC_IRQ"
},
{
- "ArchStdEvent": "EXC_FIQ",
+ "ArchStdEvent": "EXC_FIQ"
},
{
- "ArchStdEvent": "EXC_SMC",
+ "ArchStdEvent": "EXC_SMC"
},
{
- "ArchStdEvent": "EXC_HVC",
+ "ArchStdEvent": "EXC_HVC"
},
{
- "ArchStdEvent": "EXC_TRAP_PABORT",
+ "ArchStdEvent": "EXC_TRAP_PABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_DABORT",
+ "ArchStdEvent": "EXC_TRAP_DABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_OTHER",
+ "ArchStdEvent": "EXC_TRAP_OTHER"
},
{
- "ArchStdEvent": "EXC_TRAP_IRQ",
+ "ArchStdEvent": "EXC_TRAP_IRQ"
},
{
- "ArchStdEvent": "EXC_TRAP_FIQ",
+ "ArchStdEvent": "EXC_TRAP_FIQ"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json
index 9f0f15d15f75..a4a6408639b4 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/core-imp-def.json
@@ -1,122 +1,122 @@
[
{
- "ArchStdEvent": "L1D_CACHE_RD",
+ "ArchStdEvent": "L1D_CACHE_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_WR",
+ "ArchStdEvent": "L1D_CACHE_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
},
{
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
},
{
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
},
{
- "ArchStdEvent": "L1D_CACHE_INVAL",
+ "ArchStdEvent": "L1D_CACHE_INVAL"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
},
{
- "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
},
{
- "ArchStdEvent": "L1D_TLB_RD",
+ "ArchStdEvent": "L1D_TLB_RD"
},
{
- "ArchStdEvent": "L1D_TLB_WR",
+ "ArchStdEvent": "L1D_TLB_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_RD",
+ "ArchStdEvent": "L2D_CACHE_RD"
},
{
- "ArchStdEvent": "L2D_CACHE_WR",
+ "ArchStdEvent": "L2D_CACHE_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
},
{
- "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
},
{
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
},
{
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
},
{
- "ArchStdEvent": "L2D_CACHE_INVAL",
+ "ArchStdEvent": "L2D_CACHE_INVAL"
},
{
"PublicDescription": "Level 1 instruction cache prefetch access count",
"EventCode": "0x102e",
"EventName": "L1I_CACHE_PRF",
- "BriefDescription": "L1I cache prefetch access count",
+ "BriefDescription": "L1I cache prefetch access count"
},
{
"PublicDescription": "Level 1 instruction cache miss due to prefetch access count",
"EventCode": "0x102f",
"EventName": "L1I_CACHE_PRF_REFILL",
- "BriefDescription": "L1I cache miss due to prefetch access count",
+ "BriefDescription": "L1I cache miss due to prefetch access count"
},
{
"PublicDescription": "Instruction queue is empty",
"EventCode": "0x1043",
"EventName": "IQ_IS_EMPTY",
- "BriefDescription": "Instruction queue is empty",
+ "BriefDescription": "Instruction queue is empty"
},
{
"PublicDescription": "Instruction fetch stall cycles",
"EventCode": "0x1044",
"EventName": "IF_IS_STALL",
- "BriefDescription": "Instruction fetch stall cycles",
+ "BriefDescription": "Instruction fetch stall cycles"
},
{
"PublicDescription": "Instructions can receive, but not send",
"EventCode": "0x2014",
"EventName": "FETCH_BUBBLE",
- "BriefDescription": "Instructions can receive, but not send",
+ "BriefDescription": "Instructions can receive, but not send"
},
{
"PublicDescription": "Prefetch request from LSU",
"EventCode": "0x6013",
"EventName": "PRF_REQ",
- "BriefDescription": "Prefetch request from LSU",
+ "BriefDescription": "Prefetch request from LSU"
},
{
"PublicDescription": "Hit on prefetched data",
"EventCode": "0x6014",
"EventName": "HIT_ON_PRF",
- "BriefDescription": "Hit on prefetched data",
+ "BriefDescription": "Hit on prefetched data"
},
{
"PublicDescription": "Cycles of that the number of issuing micro operations are less than 4",
"EventCode": "0x7001",
"EventName": "EXE_STALL_CYCLE",
- "BriefDescription": "Cycles of that the number of issue ups are less than 4",
+ "BriefDescription": "Cycles of that the number of issue ups are less than 4"
},
{
"PublicDescription": "No any micro operation is issued and meanwhile any load operation is not resolved",
"EventCode": "0x7004",
"EventName": "MEM_STALL_ANYLOAD",
- "BriefDescription": "No any micro operation is issued and meanwhile any load operation is not resolved",
+ "BriefDescription": "No any micro operation is issued and meanwhile any load operation is not resolved"
},
{
"PublicDescription": "No any micro operation is issued and meanwhile there is any load operation missing L1 cache and pending data refill",
"EventCode": "0x7006",
"EventName": "MEM_STALL_L1MISS",
- "BriefDescription": "No any micro operation is issued and meanwhile there is any load operation missing L1 cache and pending data refill",
+ "BriefDescription": "No any micro operation is issued and meanwhile there is any load operation missing L1 cache and pending data refill"
},
{
"PublicDescription": "No any micro operation is issued and meanwhile there is any load operation missing both L1 and L2 cache and pending data refill from L3 cache",
"EventCode": "0x7007",
"EventName": "MEM_STALL_L2MISS",
- "BriefDescription": "No any micro operation is issued and meanwhile there is any load operation missing both L1 and L2 cache and pending data refill from L3 cache",
- },
+ "BriefDescription": "No any micro operation is issued and meanwhile there is any load operation missing both L1 and L2 cache and pending data refill from L3 cache"
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
index 0d1556fcdffe..61514d38601b 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
@@ -1,44 +1,58 @@
[
{
+ "EventCode": "0x00",
+ "EventName": "uncore_hisi_ddrc.flux_wr",
+ "BriefDescription": "DDRC total write operations",
+ "PublicDescription": "DDRC total write operations",
+ "Unit": "hisi_sccl,ddrc"
+ },
+ {
+ "EventCode": "0x01",
+ "EventName": "uncore_hisi_ddrc.flux_rd",
+ "BriefDescription": "DDRC total read operations",
+ "PublicDescription": "DDRC total read operations",
+ "Unit": "hisi_sccl,ddrc"
+ },
+ {
"EventCode": "0x02",
"EventName": "uncore_hisi_ddrc.flux_wcmd",
"BriefDescription": "DDRC write commands",
"PublicDescription": "DDRC write commands",
- "Unit": "hisi_sccl,ddrc",
+ "Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_ddrc.flux_rcmd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
- "Unit": "hisi_sccl,ddrc",
+ "Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x04",
- "EventName": "uncore_hisi_ddrc.flux_wr",
+ "EventName": "uncore_hisi_ddrc.pre_cmd",
"BriefDescription": "DDRC precharge commands",
"PublicDescription": "DDRC precharge commands",
- "Unit": "hisi_sccl,ddrc",
+ "Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x05",
"EventName": "uncore_hisi_ddrc.act_cmd",
"BriefDescription": "DDRC active commands",
"PublicDescription": "DDRC active commands",
- "Unit": "hisi_sccl,ddrc",
+ "Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x06",
"EventName": "uncore_hisi_ddrc.rnk_chg",
"BriefDescription": "DDRC rank commands",
"PublicDescription": "DDRC rank commands",
- "Unit": "hisi_sccl,ddrc",
+ "Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x07",
"EventName": "uncore_hisi_ddrc.rw_chg",
"BriefDescription": "DDRC read and write changes",
"PublicDescription": "DDRC read and write changes",
- "Unit": "hisi_sccl,ddrc",
- },
+ "Unit": "hisi_sccl,ddrc"
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
index 447d3064de90..ada86782933f 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
@@ -4,48 +4,69 @@
"EventName": "uncore_hisi_hha.rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"PublicDescription": "The number of all operations received by the HHA",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_hha.rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"PublicDescription": "The number of all operations received by the HHA from another socket",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_hha.rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "EventCode": "0x03",
+ "EventName": "uncore_hisi_hha.rx_ccix",
+ "BriefDescription": "Count of the number of operations that HHA has received from CCIX",
+ "PublicDescription": "Count of the number of operations that HHA has received from CCIX",
+ "Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1c",
"EventName": "uncore_hisi_hha.rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1d",
- "EventName": "uncore_hisi_hha.wr_dr_64b",
+ "EventName": "uncore_hisi_hha.wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1e",
"EventName": "uncore_hisi_hha.rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1f",
"EventName": "uncore_hisi_hha.wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
- "Unit": "hisi_sccl,hha",
+ "Unit": "hisi_sccl,hha"
},
+ {
+ "EventCode": "0x20",
+ "EventName": "uncore_hisi_hha.spill_num",
+ "BriefDescription": "Count of the number of spill operations that the HHA has sent",
+ "PublicDescription": "Count of the number of spill operations that the HHA has sent",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "EventCode": "0x21",
+ "EventName": "uncore_hisi_hha.spill_success",
+ "BriefDescription": "Count of the number of successful spill operations that the HHA has sent",
+ "PublicDescription": "Count of the number of successful spill operations that the HHA has sent",
+ "Unit": "hisi_sccl,hha"
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
index ca48747642e1..67ab19e8cf3a 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
@@ -4,34 +4,90 @@
"EventName": "uncore_hisi_l3c.rd_cpipe",
"BriefDescription": "Total read accesses",
"PublicDescription": "Total read accesses",
- "Unit": "hisi_sccl,l3c",
+ "Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_l3c.wr_cpipe",
"BriefDescription": "Total write accesses",
"PublicDescription": "Total write accesses",
- "Unit": "hisi_sccl,l3c",
+ "Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_l3c.rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
- "Unit": "hisi_sccl,l3c",
+ "Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_l3c.wr_hit_cpipe",
"BriefDescription": "Total write hits",
"PublicDescription": "Total write hits",
- "Unit": "hisi_sccl,l3c",
+ "Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x04",
"EventName": "uncore_hisi_l3c.victim_num",
"BriefDescription": "l3c precharge commands",
"PublicDescription": "l3c precharge commands",
- "Unit": "hisi_sccl,l3c",
+ "Unit": "hisi_sccl,l3c"
},
+ {
+ "EventCode": "0x20",
+ "EventName": "uncore_hisi_l3c.rd_spipe",
+ "BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
+ "PublicDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x21",
+ "EventName": "uncore_hisi_l3c.wr_spipe",
+ "BriefDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
+ "PublicDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x22",
+ "EventName": "uncore_hisi_l3c.rd_hit_spipe",
+ "BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
+ "PublicDescription": "Count of the number of read lines that hits in spipe of this L3C",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x23",
+ "EventName": "uncore_hisi_l3c.wr_hit_spipe",
+ "BriefDescription": "Count of the number of write lines that hits in spipe of this L3C",
+ "PublicDescription": "Count of the number of write lines that hits in spipe of this L3C",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x29",
+ "EventName": "uncore_hisi_l3c.back_invalid",
+ "BriefDescription": "Count of the number of L3C back invalid operations",
+ "PublicDescription": "Count of the number of L3C back invalid operations",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x40",
+ "EventName": "uncore_hisi_l3c.retry_cpu",
+ "BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
+ "PublicDescription": "Count of the number of retry that L3C suppresses the CPU operations",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x41",
+ "EventName": "uncore_hisi_l3c.retry_ring",
+ "BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
+ "PublicDescription": "Count of the number of retry that L3C suppresses the ring operations",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x42",
+ "EventName": "uncore_hisi_l3c.prefetch_drop",
+ "BriefDescription": "Count of the number of prefetch drops from this L3C",
+ "PublicDescription": "Count of the number of prefetch drops from this L3C",
+ "Unit": "hisi_sccl,l3c"
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/cache.json b/tools/perf/pmu-events/arch/powerpc/power8/cache.json
index 4a3daa6b4b96..6b792b2c87e2 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/cache.json
@@ -1,176 +1,176 @@
[
- {,
+ {
"EventCode": "0x4c048",
"EventName": "PM_DATA_FROM_DL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c048",
"EventName": "PM_DATA_FROM_DL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c04c",
"EventName": "PM_DATA_FROM_DL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c042",
"EventName": "PM_DATA_FROM_L2",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x200fe",
"EventName": "PM_DATA_FROM_L2MISS",
"BriefDescription": "Demand LD - L2 Miss (not L2 hit)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1c04e",
"EventName": "PM_DATA_FROM_L2MISS_MOD",
"BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c040",
"EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x4c040",
"EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c040",
"EventName": "PM_DATA_FROM_L2_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c040",
"EventName": "PM_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x4c042",
"EventName": "PM_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x300fe",
"EventName": "PM_DATA_FROM_L3MISS",
"BriefDescription": "Demand LD - L3 Miss (not L2 hit and not L3 hit)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c04e",
"EventName": "PM_DATA_FROM_L3MISS_MOD",
"BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c042",
"EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c042",
"EventName": "PM_DATA_FROM_L3_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c044",
"EventName": "PM_DATA_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c04c",
"EventName": "PM_DATA_FROM_LL4",
"BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x4c04a",
"EventName": "PM_DATA_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a demand load",
"PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c048",
"EventName": "PM_DATA_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c046",
"EventName": "PM_DATA_FROM_RL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c04a",
"EventName": "PM_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3001a",
"EventName": "PM_DATA_TABLEWALK_CYC",
"BriefDescription": "Tablwalk Cycles (could be 1 or 2 active)",
"PublicDescription": "Data Tablewalk Active"
},
- {,
+ {
"EventCode": "0x4e04e",
"EventName": "PM_DPTEG_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd094",
"EventName": "PM_DSLB_MISS",
"BriefDescription": "Data SLB Miss - Total of all segment sizes",
"PublicDescription": "Data SLB Miss - Total of all segment sizesData SLB misses"
},
- {,
+ {
"EventCode": "0x1002c",
"EventName": "PM_L1_DCACHE_RELOADED_ALL",
"BriefDescription": "L1 data cache reloaded for demand or prefetch",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x300f6",
"EventName": "PM_L1_DCACHE_RELOAD_VALID",
"BriefDescription": "DL1 reloaded due to Demand Load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e054",
"EventName": "PM_LD_MISS_L1",
"BriefDescription": "Load Missed L1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x100ee",
"EventName": "PM_LD_REF_L1",
"BriefDescription": "All L1 D cache load references counted at finish, gated by reject",
"PublicDescription": "Load Ref count combined for all units"
},
- {,
+ {
"EventCode": "0x300f0",
"EventName": "PM_ST_MISS_L1",
"BriefDescription": "Store Missed L1",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json b/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json
index 5f1bb9fca40c..4ef0d01b7f5e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json
@@ -1,14 +1,14 @@
[
- {,
+ {
"EventCode": "0x2000e",
"EventName": "PM_FXU_BUSY",
"BriefDescription": "fxu0 busy and fxu1 busy",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1000e",
"EventName": "PM_FXU_IDLE",
"BriefDescription": "fxu0 idle and fxu1 idle",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/frontend.json b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
index 04c5f1b7bee1..1ddc30655d43 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
@@ -1,470 +1,470 @@
[
- {,
+ {
"EventCode": "0x2505e",
"EventName": "PM_BACK_BR_CMPL",
"BriefDescription": "Branch instruction completed with a target address less than current instruction address",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10068",
"EventName": "PM_BRU_FIN",
"BriefDescription": "Branch Instruction Finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20036",
"EventName": "PM_BR_2PATH",
"BriefDescription": "two path branch",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40060",
"EventName": "PM_BR_CMPL",
"BriefDescription": "Branch Instruction completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x400f6",
"EventName": "PM_BR_MPRED_CMPL",
"BriefDescription": "Number of Branch Mispredicts",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200fa",
"EventName": "PM_BR_TAKEN_CMPL",
"BriefDescription": "New event for Branch Taken",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10018",
"EventName": "PM_IC_DEMAND_CYC",
"BriefDescription": "Cycles when a demand ifetch was pending",
"PublicDescription": "Demand ifetch pending"
},
- {,
+ {
"EventCode": "0x100f6",
"EventName": "PM_IERAT_RELOAD",
"BriefDescription": "Number of I-ERAT reloads",
"PublicDescription": "IERAT Reloaded (Miss)"
},
- {,
+ {
"EventCode": "0x4006a",
"EventName": "PM_IERAT_RELOAD_16M",
"BriefDescription": "IERAT Reloaded (Miss) for a 16M page",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20064",
"EventName": "PM_IERAT_RELOAD_4K",
"BriefDescription": "IERAT Miss (Not implemented as DI on POWER6)",
"PublicDescription": "IERAT Reloaded (Miss) for a 4k page"
},
- {,
+ {
"EventCode": "0x3006a",
"EventName": "PM_IERAT_RELOAD_64K",
"BriefDescription": "IERAT Reloaded (Miss) for a 64k page",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x14050",
"EventName": "PM_INST_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for an instruction fetch",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x2",
"EventName": "PM_INST_CMPL",
"BriefDescription": "Number of PowerPC Instructions that completed",
"PublicDescription": "PPC Instructions Finished (completed)"
},
- {,
+ {
"EventCode": "0x200f2",
"EventName": "PM_INST_DISP",
"BriefDescription": "PPC Dispatched",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x44048",
"EventName": "PM_INST_FROM_DL2L3_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x34048",
"EventName": "PM_INST_FROM_DL2L3_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x3404c",
"EventName": "PM_INST_FROM_DL4",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x4404c",
"EventName": "PM_INST_FROM_DMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x14042",
"EventName": "PM_INST_FROM_L2",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x1404e",
"EventName": "PM_INST_FROM_L2MISS",
"BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x34040",
"EventName": "PM_INST_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x44040",
"EventName": "PM_INST_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x24040",
"EventName": "PM_INST_FROM_L2_MEPF",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x14040",
"EventName": "PM_INST_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x44042",
"EventName": "PM_INST_FROM_L3",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x300fa",
"EventName": "PM_INST_FROM_L3MISS",
"BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet",
"PublicDescription": "Inst from L3 miss"
},
- {,
+ {
"EventCode": "0x4404e",
"EventName": "PM_INST_FROM_L3MISS_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to a instruction fetch",
"PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x34042",
"EventName": "PM_INST_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x24042",
"EventName": "PM_INST_FROM_L3_MEPF",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x14044",
"EventName": "PM_INST_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x1404c",
"EventName": "PM_INST_FROM_LL4",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x24048",
"EventName": "PM_INST_FROM_LMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x2404c",
"EventName": "PM_INST_FROM_MEMORY",
"BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x4404a",
"EventName": "PM_INST_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x14048",
"EventName": "PM_INST_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x24046",
"EventName": "PM_INST_FROM_RL2L3_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x1404a",
"EventName": "PM_INST_FROM_RL2L3_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x2404a",
"EventName": "PM_INST_FROM_RL4",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x3404a",
"EventName": "PM_INST_FROM_RMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x24050",
"EventName": "PM_INST_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for an instruction fetch",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for an instruction fetch"
},
- {,
+ {
"EventCode": "0x24052",
"EventName": "PM_INST_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for an instruction fetch",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
},
- {,
+ {
"EventCode": "0x14052",
"EventName": "PM_INST_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for an instruction fetch",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor an instruction fetch"
},
- {,
+ {
"EventCode": "0x1003a",
"EventName": "PM_INST_IMC_MATCH_CMPL",
"BriefDescription": "IMC Match Count ( Not architected in P8)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x14054",
"EventName": "PM_INST_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for an instruction fetch",
"PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor an instruction fetch"
},
- {,
+ {
"EventCode": "0x44052",
"EventName": "PM_INST_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for an instruction fetch",
"PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor an instruction fetch"
},
- {,
+ {
"EventCode": "0x34050",
"EventName": "PM_INST_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for an instruction fetch",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for an instruction fetch"
},
- {,
+ {
"EventCode": "0x34052",
"EventName": "PM_INST_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for an instruction fetch",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
},
- {,
+ {
"EventCode": "0x44050",
"EventName": "PM_INST_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for an instruction fetch",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x45048",
"EventName": "PM_IPTEG_FROM_DL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x35048",
"EventName": "PM_IPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3504c",
"EventName": "PM_IPTEG_FROM_DL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4504c",
"EventName": "PM_IPTEG_FROM_DMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15042",
"EventName": "PM_IPTEG_FROM_L2",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1504e",
"EventName": "PM_IPTEG_FROM_L2MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x25040",
"EventName": "PM_IPTEG_FROM_L2_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15040",
"EventName": "PM_IPTEG_FROM_L2_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x45042",
"EventName": "PM_IPTEG_FROM_L3",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4504e",
"EventName": "PM_IPTEG_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x35042",
"EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x25042",
"EventName": "PM_IPTEG_FROM_L3_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15044",
"EventName": "PM_IPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1504c",
"EventName": "PM_IPTEG_FROM_LL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x25048",
"EventName": "PM_IPTEG_FROM_LMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2504c",
"EventName": "PM_IPTEG_FROM_MEMORY",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4504a",
"EventName": "PM_IPTEG_FROM_OFF_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15048",
"EventName": "PM_IPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x25046",
"EventName": "PM_IPTEG_FROM_RL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1504a",
"EventName": "PM_IPTEG_FROM_RL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2504a",
"EventName": "PM_IPTEG_FROM_RL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3504a",
"EventName": "PM_IPTEG_FROM_RMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd096",
"EventName": "PM_ISLB_MISS",
"BriefDescription": "I SLB Miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x400fc",
"EventName": "PM_ITLB_MISS",
"BriefDescription": "ITLB Reloaded (always zero on POWER6)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200fd",
"EventName": "PM_L1_ICACHE_MISS",
"BriefDescription": "Demand iCache Miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40012",
"EventName": "PM_L1_ICACHE_RELOADED_ALL",
"BriefDescription": "Counts all Icache reloads includes demand, prefetchm prefetch turned into demand and demand turned into prefetch",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30068",
"EventName": "PM_L1_ICACHE_RELOADED_PREF",
"BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x300f4",
"EventName": "PM_THRD_CONC_RUN_INST",
"BriefDescription": "PPC Instructions Finished when both threads in run_cycles",
"PublicDescription": "Concurrent Run Instructions"
},
- {,
+ {
"EventCode": "0x30060",
"EventName": "PM_TM_TRANS_RUN_INST",
"BriefDescription": "Instructions completed in transactional state",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e014",
"EventName": "PM_TM_TX_PASS_RUN_INST",
"BriefDescription": "run instructions spent in successful transactions",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/marked.json b/tools/perf/pmu-events/arch/powerpc/power8/marked.json
index dcdcede3c3fe..94dc58b83b7e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/marked.json
@@ -1,794 +1,794 @@
[
- {,
+ {
"EventCode": "0x3515e",
"EventName": "PM_MRK_BACK_BR_CMPL",
"BriefDescription": "Marked branch instruction completed with a target address less than current instruction address",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2013a",
"EventName": "PM_MRK_BRU_FIN",
"BriefDescription": "bru marked instr finish",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1016e",
"EventName": "PM_MRK_BR_CMPL",
"BriefDescription": "Branch Instruction completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x301e4",
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x101e2",
"EventName": "PM_MRK_BR_TAKEN_CMPL",
"BriefDescription": "Marked Branch Taken completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d148",
"EventName": "PM_MRK_DATA_FROM_DL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d128",
"EventName": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d148",
"EventName": "PM_MRK_DATA_FROM_DL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c128",
"EventName": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d14c",
"EventName": "PM_MRK_DATA_FROM_DL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c12c",
"EventName": "PM_MRK_DATA_FROM_DL4_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's L4 on a different Node or Group (Distant) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d14c",
"EventName": "PM_MRK_DATA_FROM_DMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d12c",
"EventName": "PM_MRK_DATA_FROM_DMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d142",
"EventName": "PM_MRK_DATA_FROM_L2",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d14e",
"EventName": "PM_MRK_DATA_FROM_L2MISS",
"BriefDescription": "Data cache reload L2 miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c12e",
"EventName": "PM_MRK_DATA_FROM_L2MISS_CYC",
"BriefDescription": "Duration in cycles to reload from a localtion other than the local core's L2 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c122",
"EventName": "PM_MRK_DATA_FROM_L2_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d140",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c120",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 with load hit store conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d140",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d120",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 with dispatch conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d140",
"EventName": "PM_MRK_DATA_FROM_L2_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d120",
"EventName": "PM_MRK_DATA_FROM_L2_MEPF_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d140",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c120",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 without conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d142",
"EventName": "PM_MRK_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x201e4",
"EventName": "PM_MRK_DATA_FROM_L3MISS",
"BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d12e",
"EventName": "PM_MRK_DATA_FROM_L3MISS_CYC",
"BriefDescription": "Duration in cycles to reload from a localtion other than the local core's L3 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d122",
"EventName": "PM_MRK_DATA_FROM_L3_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d142",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c122",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 with dispatch conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d142",
"EventName": "PM_MRK_DATA_FROM_L3_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d122",
"EventName": "PM_MRK_DATA_FROM_L3_MEPF_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d144",
"EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c124",
"EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 without conflict due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d14c",
"EventName": "PM_MRK_DATA_FROM_LL4",
"BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c12c",
"EventName": "PM_MRK_DATA_FROM_LL4_CYC",
"BriefDescription": "Duration in cycles to reload from the local chip's L4 cache due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d148",
"EventName": "PM_MRK_DATA_FROM_LMEM",
"BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d128",
"EventName": "PM_MRK_DATA_FROM_LMEM_CYC",
"BriefDescription": "Duration in cycles to reload from the local chip's Memory due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d14c",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d12c",
"EventName": "PM_MRK_DATA_FROM_MEMORY_CYC",
"BriefDescription": "Duration in cycles to reload from a memory location including L4 from local remote or distant due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d14a",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d12a",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d148",
"EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c128",
"EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d146",
"EventName": "PM_MRK_DATA_FROM_RL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d126",
"EventName": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d14a",
"EventName": "PM_MRK_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c12a",
"EventName": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d14a",
"EventName": "PM_MRK_DATA_FROM_RL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d12a",
"EventName": "PM_MRK_DATA_FROM_RL4_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's L4 on the same Node or Group ( Remote) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d14a",
"EventName": "PM_MRK_DATA_FROM_RMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c12a",
"EventName": "PM_MRK_DATA_FROM_RMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group ( Remote) due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40118",
"EventName": "PM_MRK_DCACHE_RELOAD_INTV",
"BriefDescription": "Combined Intervention event",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x301e6",
"EventName": "PM_MRK_DERAT_MISS",
"BriefDescription": "Erat Miss (TLB Access) All page sizes",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d154",
"EventName": "PM_MRK_DERAT_MISS_16G",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16G",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d154",
"EventName": "PM_MRK_DERAT_MISS_16M",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16M",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d156",
"EventName": "PM_MRK_DERAT_MISS_4K",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 4K",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d154",
"EventName": "PM_MRK_DERAT_MISS_64K",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 64K",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20132",
"EventName": "PM_MRK_DFU_FIN",
"BriefDescription": "Decimal Unit marked Instruction Finish",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f148",
"EventName": "PM_MRK_DPTEG_FROM_DL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f148",
"EventName": "PM_MRK_DPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f14c",
"EventName": "PM_MRK_DPTEG_FROM_DL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f14c",
"EventName": "PM_MRK_DPTEG_FROM_DMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f142",
"EventName": "PM_MRK_DPTEG_FROM_L2",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f14e",
"EventName": "PM_MRK_DPTEG_FROM_L2MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f140",
"EventName": "PM_MRK_DPTEG_FROM_L2_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f140",
"EventName": "PM_MRK_DPTEG_FROM_L2_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f142",
"EventName": "PM_MRK_DPTEG_FROM_L3",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f14e",
"EventName": "PM_MRK_DPTEG_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f142",
"EventName": "PM_MRK_DPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f142",
"EventName": "PM_MRK_DPTEG_FROM_L3_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f144",
"EventName": "PM_MRK_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f14c",
"EventName": "PM_MRK_DPTEG_FROM_LL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f148",
"EventName": "PM_MRK_DPTEG_FROM_LMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f14c",
"EventName": "PM_MRK_DPTEG_FROM_MEMORY",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f14a",
"EventName": "PM_MRK_DPTEG_FROM_OFF_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f148",
"EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f146",
"EventName": "PM_MRK_DPTEG_FROM_RL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f14a",
"EventName": "PM_MRK_DPTEG_FROM_RL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f14a",
"EventName": "PM_MRK_DPTEG_FROM_RL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f14a",
"EventName": "PM_MRK_DPTEG_FROM_RMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x401e4",
"EventName": "PM_MRK_DTLB_MISS",
"BriefDescription": "Marked dtlb miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d158",
"EventName": "PM_MRK_DTLB_MISS_16G",
"BriefDescription": "Marked Data TLB Miss page size 16G",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d156",
"EventName": "PM_MRK_DTLB_MISS_16M",
"BriefDescription": "Marked Data TLB Miss page size 16M",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d156",
"EventName": "PM_MRK_DTLB_MISS_4K",
"BriefDescription": "Marked Data TLB Miss page size 4k",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d156",
"EventName": "PM_MRK_DTLB_MISS_64K",
"BriefDescription": "Marked Data TLB Miss page size 64K",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40154",
"EventName": "PM_MRK_FAB_RSP_BKILL",
"BriefDescription": "Marked store had to do a bkill",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f150",
"EventName": "PM_MRK_FAB_RSP_BKILL_CYC",
"BriefDescription": "cycles L2 RC took for a bkill",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3015e",
"EventName": "PM_MRK_FAB_RSP_CLAIM_RTY",
"BriefDescription": "Sampled store did a rwitm and got a rty",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30154",
"EventName": "PM_MRK_FAB_RSP_DCLAIM",
"BriefDescription": "Marked store had to do a dclaim",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f152",
"EventName": "PM_MRK_FAB_RSP_DCLAIM_CYC",
"BriefDescription": "cycles L2 RC took for a dclaim",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4015e",
"EventName": "PM_MRK_FAB_RSP_RD_RTY",
"BriefDescription": "Sampled L2 reads retry count",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1015e",
"EventName": "PM_MRK_FAB_RSP_RD_T_INTV",
"BriefDescription": "Sampled Read got a T intervention",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f150",
"EventName": "PM_MRK_FAB_RSP_RWITM_CYC",
"BriefDescription": "cycles L2 RC took for a rwitm",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2015e",
"EventName": "PM_MRK_FAB_RSP_RWITM_RTY",
"BriefDescription": "Sampled store did a rwitm and got a rty",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20134",
"EventName": "PM_MRK_FXU_FIN",
"BriefDescription": "fxu marked instr finish",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x401e0",
"EventName": "PM_MRK_INST_CMPL",
"BriefDescription": "marked instruction completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20130",
"EventName": "PM_MRK_INST_DECODED",
"BriefDescription": "marked instruction decoded",
"PublicDescription": "marked instruction decoded. Name from ISU?"
},
- {,
+ {
"EventCode": "0x101e0",
"EventName": "PM_MRK_INST_DISP",
"BriefDescription": "The thread has dispatched a randomly sampled marked instruction",
"PublicDescription": "Marked Instruction dispatched"
},
- {,
+ {
"EventCode": "0x30130",
"EventName": "PM_MRK_INST_FIN",
"BriefDescription": "marked instruction finished",
"PublicDescription": "marked instr finish any unit"
},
- {,
+ {
"EventCode": "0x401e6",
"EventName": "PM_MRK_INST_FROM_L3MISS",
"BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet",
"PublicDescription": "n/a"
},
- {,
+ {
"EventCode": "0x10132",
"EventName": "PM_MRK_INST_ISSUED",
"BriefDescription": "Marked instruction issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40134",
"EventName": "PM_MRK_INST_TIMEO",
"BriefDescription": "marked Instruction finish timeout (instruction lost)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x101e4",
"EventName": "PM_MRK_L1_ICACHE_MISS",
"BriefDescription": "sampled Instruction suffered an icache Miss",
"PublicDescription": "Marked L1 Icache Miss"
},
- {,
+ {
"EventCode": "0x101ea",
"EventName": "PM_MRK_L1_RELOAD_VALID",
"BriefDescription": "Marked demand reload",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20114",
"EventName": "PM_MRK_L2_RC_DISP",
"BriefDescription": "Marked Instruction RC dispatched in L2",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3012a",
"EventName": "PM_MRK_L2_RC_DONE",
"BriefDescription": "Marked RC done",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40116",
"EventName": "PM_MRK_LARX_FIN",
"BriefDescription": "Larx finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1013e",
"EventName": "PM_MRK_LD_MISS_EXPOSED_CYC",
"BriefDescription": "Marked Load exposed Miss cycles",
"PublicDescription": "Marked Load exposed Miss (use edge detect to count #)"
},
- {,
+ {
"EventCode": "0x201e2",
"EventName": "PM_MRK_LD_MISS_L1",
"BriefDescription": "Marked DL1 Demand Miss counted at exec time",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4013e",
"EventName": "PM_MRK_LD_MISS_L1_CYC",
"BriefDescription": "Marked ld latency",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40132",
"EventName": "PM_MRK_LSU_FIN",
"BriefDescription": "lsu marked instr finish",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20112",
"EventName": "PM_MRK_NTF_FIN",
"BriefDescription": "Marked next to finish instruction finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d15e",
"EventName": "PM_MRK_RUN_CYC",
"BriefDescription": "Marked run cycles",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3013e",
"EventName": "PM_MRK_STALL_CMPLU_CYC",
"BriefDescription": "Marked Group completion Stall",
"PublicDescription": "Marked Group Completion Stall cycles (use edge detect to count #)"
},
- {,
+ {
"EventCode": "0x3e158",
"EventName": "PM_MRK_STCX_FAIL",
"BriefDescription": "marked stcx failed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10134",
"EventName": "PM_MRK_ST_CMPL",
"BriefDescription": "marked store completed and sent to nest",
"PublicDescription": "Marked store completed"
},
- {,
+ {
"EventCode": "0x30134",
"EventName": "PM_MRK_ST_CMPL_INT",
"BriefDescription": "marked store finished with intervention",
"PublicDescription": "marked store complete (data home) with intervention"
},
- {,
+ {
"EventCode": "0x3f150",
"EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC",
"BriefDescription": "cycles to drain st from core to L2",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3012c",
"EventName": "PM_MRK_ST_FWD",
"BriefDescription": "Marked st forwards",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f150",
"EventName": "PM_MRK_ST_L2DISP_TO_CMPL_CYC",
"BriefDescription": "cycles from L2 rc disp to l2 rc completion",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20138",
"EventName": "PM_MRK_ST_NEST",
"BriefDescription": "Marked store sent to nest",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30132",
"EventName": "PM_MRK_VSU_FIN",
"BriefDescription": "VSU marked instr finish",
"PublicDescription": "vsu (fpu) marked instr finish"
},
- {,
+ {
"EventCode": "0x3d15e",
"EventName": "PM_MULT_MRK",
"BriefDescription": "mult marked instr",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15152",
"EventName": "PM_SYNC_MRK_BR_LINK",
"BriefDescription": "Marked Branch and link branch that can cause a synchronous interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1515c",
"EventName": "PM_SYNC_MRK_BR_MPRED",
"BriefDescription": "Marked Branch mispredict that can cause a synchronous interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15156",
"EventName": "PM_SYNC_MRK_FX_DIVIDE",
"BriefDescription": "Marked fixed point divide that can cause a synchronous interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15158",
"EventName": "PM_SYNC_MRK_L2HIT",
"BriefDescription": "Marked L2 Hits that can throw a synchronous interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1515a",
"EventName": "PM_SYNC_MRK_L2MISS",
"BriefDescription": "Marked L2 Miss that can throw a synchronous interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15154",
"EventName": "PM_SYNC_MRK_L3MISS",
"BriefDescription": "Marked L3 misses that can throw a synchronous interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15150",
"EventName": "PM_SYNC_MRK_PROBE_NOP",
"BriefDescription": "Marked probeNops which can cause synchronous interrupts",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/memory.json b/tools/perf/pmu-events/arch/powerpc/power8/memory.json
index 87cdaadba7bd..2ba33420e20d 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/memory.json
@@ -1,212 +1,212 @@
[
- {,
+ {
"EventCode": "0x10050",
"EventName": "PM_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for all data types ( demand load,data,inst prefetch,inst fetch,xlate (I or d)"
},
- {,
+ {
"EventCode": "0x1c050",
"EventName": "PM_DATA_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for a demand load",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for a demand load"
},
- {,
+ {
"EventCode": "0x4c04c",
"EventName": "PM_DATA_FROM_DMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c048",
"EventName": "PM_DATA_FROM_LMEM",
"BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from the local chip's Memory due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c04c",
"EventName": "PM_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c04a",
"EventName": "PM_DATA_FROM_RL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c04a",
"EventName": "PM_DATA_FROM_RMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a demand load",
"PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c050",
"EventName": "PM_DATA_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for a demand load",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for a demand load"
},
- {,
+ {
"EventCode": "0x2c052",
"EventName": "PM_DATA_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for a demand load",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
},
- {,
+ {
"EventCode": "0x1c052",
"EventName": "PM_DATA_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for a demand load",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor a demand load"
},
- {,
+ {
"EventCode": "0x1c054",
"EventName": "PM_DATA_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for a demand load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c052",
"EventName": "PM_DATA_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for a demand load",
"PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor a demand load"
},
- {,
+ {
"EventCode": "0x3c050",
"EventName": "PM_DATA_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for a demand load",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for a demand load"
},
- {,
+ {
"EventCode": "0x3c052",
"EventName": "PM_DATA_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
},
- {,
+ {
"EventCode": "0x4c050",
"EventName": "PM_DATA_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for a demand load",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for a demand load"
},
- {,
+ {
"EventCode": "0x3e04c",
"EventName": "PM_DPTEG_FROM_DL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e04c",
"EventName": "PM_DPTEG_FROM_DMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e04a",
"EventName": "PM_DPTEG_FROM_RMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20050",
"EventName": "PM_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20052",
"EventName": "PM_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
},
- {,
+ {
"EventCode": "0x10052",
"EventName": "PM_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x18082",
"EventName": "PM_L3_CO_MEPF",
"BriefDescription": "L3 CO of line in Mep state ( includes casthrough",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c058",
"EventName": "PM_MEM_CO",
"BriefDescription": "Memory castouts from this lpar",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10058",
"EventName": "PM_MEM_LOC_THRESH_IFU",
"BriefDescription": "Local Memory above threshold for IFU speculation control",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40056",
"EventName": "PM_MEM_LOC_THRESH_LSU_HIGH",
"BriefDescription": "Local memory above threshold for LSU medium",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1c05e",
"EventName": "PM_MEM_LOC_THRESH_LSU_MED",
"BriefDescription": "Local memory above theshold for data prefetch",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c058",
"EventName": "PM_MEM_PREF",
"BriefDescription": "Memory prefetch for this lpar. Includes L4",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10056",
"EventName": "PM_MEM_READ",
"BriefDescription": "Reads from Memory from this lpar (includes data/inst/xlate/l1prefetch/inst prefetch). Includes L4",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3c05e",
"EventName": "PM_MEM_RWITM",
"BriefDescription": "Memory rwitm for this lpar",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3006e",
"EventName": "PM_NEST_REF_CLK",
"BriefDescription": "Multiply by 4 to obtain the number of PB cycles",
"PublicDescription": "Nest reference clocks"
},
- {,
+ {
"EventCode": "0x10054",
"EventName": "PM_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x40052",
"EventName": "PM_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x30050",
"EventName": "PM_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x30052",
"EventName": "PM_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
},
- {,
+ {
"EventCode": "0x40050",
"EventName": "PM_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index b2a3df07fbc4..f4e760cab111 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -1,3446 +1,3446 @@
[
- {,
+ {
"EventCode": "0x1f05e",
"EventName": "PM_1LPAR_CYC",
"BriefDescription": "Number of cycles in single lpar mode. All threads in the core are assigned to the same lpar",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2006e",
"EventName": "PM_2LPAR_CYC",
"BriefDescription": "Cycles in 2-lpar mode. Threads 0-3 belong to Lpar0 and threads 4-7 belong to Lpar1",
"PublicDescription": "Number of cycles in 2 lpar mode"
},
- {,
+ {
"EventCode": "0x4e05e",
"EventName": "PM_4LPAR_CYC",
"BriefDescription": "Number of cycles in 4 LPAR mode. Threads 0-1 belong to lpar0, threads 2-3 belong to lpar1, threads 4-5 belong to lpar2, and threads 6-7 belong to lpar3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x610050",
"EventName": "PM_ALL_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for all data types ( demand load,data,inst prefetch,inst fetch,xlate (I or d)"
},
- {,
+ {
"EventCode": "0x520050",
"EventName": "PM_ALL_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x620052",
"EventName": "PM_ALL_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
},
- {,
+ {
"EventCode": "0x610052",
"EventName": "PM_ALL_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x610054",
"EventName": "PM_ALL_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x640052",
"EventName": "PM_ALL_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x630050",
"EventName": "PM_ALL_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x630052",
"EventName": "PM_ALL_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
},
- {,
+ {
"EventCode": "0x640050",
"EventName": "PM_ALL_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x4082",
"EventName": "PM_BANK_CONFLICT",
"BriefDescription": "Read blocked due to interleave conflict. The ifar logic will detect an interleave conflict and kill the data that was read that cycle",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5086",
"EventName": "PM_BR_BC_8",
"BriefDescription": "Pairable BC+8 branch that has not been converted to a Resolve Finished in the BRU pipeline",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5084",
"EventName": "PM_BR_BC_8_CONV",
"BriefDescription": "Pairable BC+8 branch that was converted to a Resolve Finished in the BRU pipeline",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40ac",
"EventName": "PM_BR_MPRED_CCACHE",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Count Cache Target Prediction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40b8",
"EventName": "PM_BR_MPRED_CR",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the BHT Direction Prediction (taken/not taken)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40ae",
"EventName": "PM_BR_MPRED_LSTACK",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Link Stack Target Prediction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40ba",
"EventName": "PM_BR_MPRED_TA",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10138",
"EventName": "PM_BR_MRK_2PATH",
"BriefDescription": "marked two path branch",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x409c",
"EventName": "PM_BR_PRED_BR0",
"BriefDescription": "Conditional Branch Completed on BR0 (1st branch in group) in which the HW predicted the Direction or Target",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x409e",
"EventName": "PM_BR_PRED_BR1",
"BriefDescription": "Conditional Branch Completed on BR1 (2nd branch in group) in which the HW predicted the Direction or Target. Note: BR1 can only be used in Single Thread Mode. In all of the SMT modes, only one branch can complete, thus BR1 is unused",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x489c",
"EventName": "PM_BR_PRED_BR_CMPL",
"BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) OR if_pc_br0_br_pred(1)",
"PublicDescription": "IFU"
},
- {,
+ {
"EventCode": "0x40a4",
"EventName": "PM_BR_PRED_CCACHE_BR0",
"BriefDescription": "Conditional Branch Completed on BR0 that used the Count Cache for Target Prediction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40a6",
"EventName": "PM_BR_PRED_CCACHE_BR1",
"BriefDescription": "Conditional Branch Completed on BR1 that used the Count Cache for Target Prediction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x48a4",
"EventName": "PM_BR_PRED_CCACHE_CMPL",
"BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) AND if_pc_br0_pred_type",
"PublicDescription": "IFU"
},
- {,
+ {
"EventCode": "0x40b0",
"EventName": "PM_BR_PRED_CR_BR0",
"BriefDescription": "Conditional Branch Completed on BR0 that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40b2",
"EventName": "PM_BR_PRED_CR_BR1",
"BriefDescription": "Conditional Branch Completed on BR1 that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x48b0",
"EventName": "PM_BR_PRED_CR_CMPL",
"BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(1)='1'",
"PublicDescription": "IFU"
},
- {,
+ {
"EventCode": "0x40a8",
"EventName": "PM_BR_PRED_LSTACK_BR0",
"BriefDescription": "Conditional Branch Completed on BR0 that used the Link Stack for Target Prediction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40aa",
"EventName": "PM_BR_PRED_LSTACK_BR1",
"BriefDescription": "Conditional Branch Completed on BR1 that used the Link Stack for Target Prediction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x48a8",
"EventName": "PM_BR_PRED_LSTACK_CMPL",
"BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) AND (not if_pc_br0_pred_type)",
"PublicDescription": "IFU"
},
- {,
+ {
"EventCode": "0x40b4",
"EventName": "PM_BR_PRED_TA_BR0",
"BriefDescription": "Conditional Branch Completed on BR0 that had its target address predicted. Only XL-form branches set this event",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40b6",
"EventName": "PM_BR_PRED_TA_BR1",
"BriefDescription": "Conditional Branch Completed on BR1 that had its target address predicted. Only XL-form branches set this event",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x48b4",
"EventName": "PM_BR_PRED_TA_CMPL",
"BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0)='1'",
"PublicDescription": "IFU"
},
- {,
+ {
"EventCode": "0x40a0",
"EventName": "PM_BR_UNCOND_BR0",
"BriefDescription": "Unconditional Branch Completed on BR0. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was coverted to a Resolve",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40a2",
"EventName": "PM_BR_UNCOND_BR1",
"BriefDescription": "Unconditional Branch Completed on BR1. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was coverted to a Resolve",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x48a0",
"EventName": "PM_BR_UNCOND_CMPL",
"BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred=00 AND if_pc_br0_completed",
"PublicDescription": "IFU"
},
- {,
+ {
"EventCode": "0x3094",
"EventName": "PM_CASTOUT_ISSUED",
"BriefDescription": "Castouts issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3096",
"EventName": "PM_CASTOUT_ISSUED_GPR",
"BriefDescription": "Castouts issued GPR",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2090",
"EventName": "PM_CLB_HELD",
"BriefDescription": "CLB Hold: Any Reason",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d018",
"EventName": "PM_CMPLU_STALL_BRU_CRU",
"BriefDescription": "Completion stall due to IFU",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30026",
"EventName": "PM_CMPLU_STALL_COQ_FULL",
"BriefDescription": "Completion stall due to CO q full",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30038",
"EventName": "PM_CMPLU_STALL_FLUSH",
"BriefDescription": "completion stall due to flush by own thread",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30028",
"EventName": "PM_CMPLU_STALL_MEM_ECC_DELAY",
"BriefDescription": "Completion stall due to mem ECC delay",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e01c",
"EventName": "PM_CMPLU_STALL_NO_NTF",
"BriefDescription": "Completion stall due to nop",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e01e",
"EventName": "PM_CMPLU_STALL_NTCG_FLUSH",
"BriefDescription": "Completion stall due to ntcg flush",
"PublicDescription": "Completion stall due to reject (load hit store)"
},
- {,
+ {
"EventCode": "0x4c010",
"EventName": "PM_CMPLU_STALL_REJECT",
"BriefDescription": "Completion stall due to LSU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c01a",
"EventName": "PM_CMPLU_STALL_REJECT_LHS",
"BriefDescription": "Completion stall due to reject (load hit store)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c014",
"EventName": "PM_CMPLU_STALL_REJ_LMQ_FULL",
"BriefDescription": "Completion stall due to LSU reject LMQ full",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d010",
"EventName": "PM_CMPLU_STALL_SCALAR",
"BriefDescription": "Completion stall due to VSU scalar instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d010",
"EventName": "PM_CMPLU_STALL_SCALAR_LONG",
"BriefDescription": "Completion stall due to VSU scalar long latency instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c014",
"EventName": "PM_CMPLU_STALL_STORE",
"BriefDescription": "Completion stall by stores this includes store agen finishes in pipe LS0/LS1 and store data finishes in LS2/LS3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d014",
"EventName": "PM_CMPLU_STALL_VECTOR",
"BriefDescription": "Completion stall due to VSU vector instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d012",
"EventName": "PM_CMPLU_STALL_VECTOR_LONG",
"BriefDescription": "Completion stall due to VSU vector long instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d012",
"EventName": "PM_CMPLU_STALL_VSU",
"BriefDescription": "Completion stall due to VSU instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x16083",
"EventName": "PM_CO0_ALLOC",
"BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x16082",
"EventName": "PM_CO0_BUSY",
"BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3608a",
"EventName": "PM_CO_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40066",
"EventName": "PM_CRU_FIN",
"BriefDescription": "IFU Finished a (non-branch) instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x61c050",
"EventName": "PM_DATA_ALL_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for either demand loads or data prefetch",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for a demand load"
},
- {,
+ {
"EventCode": "0x64c048",
"EventName": "PM_DATA_ALL_FROM_DL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c048",
"EventName": "PM_DATA_ALL_FROM_DL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c04c",
"EventName": "PM_DATA_ALL_FROM_DL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c04c",
"EventName": "PM_DATA_ALL_FROM_DMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c042",
"EventName": "PM_DATA_ALL_FROM_L2",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c046",
"EventName": "PM_DATA_ALL_FROM_L21_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c046",
"EventName": "PM_DATA_ALL_FROM_L21_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c04e",
"EventName": "PM_DATA_ALL_FROM_L2MISS_MOD",
"BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c040",
"EventName": "PM_DATA_ALL_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c040",
"EventName": "PM_DATA_ALL_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c040",
"EventName": "PM_DATA_ALL_FROM_L2_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c040",
"EventName": "PM_DATA_ALL_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c042",
"EventName": "PM_DATA_ALL_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c044",
"EventName": "PM_DATA_ALL_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c044",
"EventName": "PM_DATA_ALL_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c044",
"EventName": "PM_DATA_ALL_FROM_L31_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c046",
"EventName": "PM_DATA_ALL_FROM_L31_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c04e",
"EventName": "PM_DATA_ALL_FROM_L3MISS_MOD",
"BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c042",
"EventName": "PM_DATA_ALL_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c042",
"EventName": "PM_DATA_ALL_FROM_L3_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c044",
"EventName": "PM_DATA_ALL_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c04c",
"EventName": "PM_DATA_ALL_FROM_LL4",
"BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c048",
"EventName": "PM_DATA_ALL_FROM_LMEM",
"BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from the local chip's Memory due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c04c",
"EventName": "PM_DATA_ALL_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x64c04a",
"EventName": "PM_DATA_ALL_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c048",
"EventName": "PM_DATA_ALL_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c046",
"EventName": "PM_DATA_ALL_FROM_RL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x61c04a",
"EventName": "PM_DATA_ALL_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c04a",
"EventName": "PM_DATA_ALL_FROM_RL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x63c04a",
"EventName": "PM_DATA_ALL_FROM_RMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either demand loads or data prefetch",
"PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x62c050",
"EventName": "PM_DATA_ALL_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for either demand loads or data prefetch",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for a demand load"
},
- {,
+ {
"EventCode": "0x62c052",
"EventName": "PM_DATA_ALL_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for either demand loads or data prefetch",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
},
- {,
+ {
"EventCode": "0x61c052",
"EventName": "PM_DATA_ALL_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for either demand loads or data prefetch",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor a demand load"
},
- {,
+ {
"EventCode": "0x61c054",
"EventName": "PM_DATA_ALL_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for either demand loads or data prefetch",
"PublicDescription": "Pump prediction correct. Counts across all types of pumps for a demand load"
},
- {,
+ {
"EventCode": "0x64c052",
"EventName": "PM_DATA_ALL_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for either demand loads or data prefetch",
"PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor a demand load"
},
- {,
+ {
"EventCode": "0x63c050",
"EventName": "PM_DATA_ALL_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for either demand loads or data prefetch",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for a demand load"
},
- {,
+ {
"EventCode": "0x63c052",
"EventName": "PM_DATA_ALL_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for either demand loads or data prefetch",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
},
- {,
+ {
"EventCode": "0x64c050",
"EventName": "PM_DATA_ALL_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for either demand loads or data prefetch",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for a demand load"
},
- {,
+ {
"EventCode": "0x4c046",
"EventName": "PM_DATA_FROM_L21_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c046",
"EventName": "PM_DATA_FROM_L21_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x4c044",
"EventName": "PM_DATA_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x3c044",
"EventName": "PM_DATA_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x2c044",
"EventName": "PM_DATA_FROM_L31_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x1c046",
"EventName": "PM_DATA_FROM_L31_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a demand load",
"PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
- {,
+ {
"EventCode": "0x400fe",
"EventName": "PM_DATA_FROM_MEM",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load",
"PublicDescription": "Data cache reload from memory (including L4)"
},
- {,
+ {
"EventCode": "0xe0bc",
"EventName": "PM_DC_COLLISIONS",
"BriefDescription": "DATA Cache collisions",
"PublicDescription": "DATA Cache collisions42"
},
- {,
+ {
"EventCode": "0x1e050",
"EventName": "PM_DC_PREF_STREAM_ALLOC",
"BriefDescription": "Stream marked valid. The stream could have been allocated through the hardware prefetch mechanism or through software. This is combined ls0 and ls1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e050",
"EventName": "PM_DC_PREF_STREAM_CONF",
"BriefDescription": "A demand load referenced a line in an active prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software. Combine up + down",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e050",
"EventName": "PM_DC_PREF_STREAM_FUZZY_CONF",
"BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e050",
"EventName": "PM_DC_PREF_STREAM_STRIDED_CONF",
"BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0ba",
"EventName": "PM_DFU",
"BriefDescription": "Finish DFU (all finish)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0be",
"EventName": "PM_DFU_DCFFIX",
"BriefDescription": "Convert from fixed opcode finish (dcffix,dcffixq)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0bc",
"EventName": "PM_DFU_DENBCD",
"BriefDescription": "BCD->DPD opcode finish (denbcd, denbcdq)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0b8",
"EventName": "PM_DFU_MC",
"BriefDescription": "Finish DFU multicycle",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2092",
"EventName": "PM_DISP_CLB_HELD_BAL",
"BriefDescription": "Dispatch/CLB Hold: Balance",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2094",
"EventName": "PM_DISP_CLB_HELD_RES",
"BriefDescription": "Dispatch/CLB Hold: Resource",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20a8",
"EventName": "PM_DISP_CLB_HELD_SB",
"BriefDescription": "Dispatch/CLB Hold: Scoreboard",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2098",
"EventName": "PM_DISP_CLB_HELD_SYNC",
"BriefDescription": "Dispatch/CLB Hold: Sync type instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2096",
"EventName": "PM_DISP_CLB_HELD_TLBIE",
"BriefDescription": "Dispatch Hold: Due to TLBIE",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20006",
"EventName": "PM_DISP_HELD_IQ_FULL",
"BriefDescription": "Dispatch held due to Issue q full",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1002a",
"EventName": "PM_DISP_HELD_MAP_FULL",
"BriefDescription": "Dispatch for this thread was held because the Mappers were full",
"PublicDescription": "Dispatch held due to Mapper full"
},
- {,
+ {
"EventCode": "0x30018",
"EventName": "PM_DISP_HELD_SRQ_FULL",
"BriefDescription": "Dispatch held due SRQ no room",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30a6",
"EventName": "PM_DISP_HOLD_GCT_FULL",
"BriefDescription": "Dispatch Hold Due to no space in the GCT",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30008",
"EventName": "PM_DISP_WT",
"BriefDescription": "Dispatched Starved",
"PublicDescription": "Dispatched Starved (not held, nothing to dispatch)"
},
- {,
+ {
"EventCode": "0x4e046",
"EventName": "PM_DPTEG_FROM_L21_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e046",
"EventName": "PM_DPTEG_FROM_L21_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e040",
"EventName": "PM_DPTEG_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e040",
"EventName": "PM_DPTEG_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e044",
"EventName": "PM_DPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e044",
"EventName": "PM_DPTEG_FROM_L31_ECO_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e044",
"EventName": "PM_DPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e046",
"EventName": "PM_DPTEG_FROM_L31_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50a8",
"EventName": "PM_EAT_FORCE_MISPRED",
"BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issue",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4084",
"EventName": "PM_EAT_FULL_CYC",
"BriefDescription": "Cycles No room in EAT",
"PublicDescription": "Cycles No room in EATSet on bank conflict and case where no ibuffers available"
},
- {,
+ {
"EventCode": "0x2080",
"EventName": "PM_EE_OFF_EXT_INT",
"BriefDescription": "Ee off and external interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20b4",
"EventName": "PM_FAV_TBEGIN",
"BriefDescription": "Dispatch time Favored tbegin",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x100f4",
"EventName": "PM_FLOP",
"BriefDescription": "Floating Point Operation Finished",
"PublicDescription": "Floating Point Operations Finished"
},
- {,
+ {
"EventCode": "0xa0ae",
"EventName": "PM_FLOP_SUM_SCALAR",
"BriefDescription": "flops summary scalar instructions",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0ac",
"EventName": "PM_FLOP_SUM_VEC",
"BriefDescription": "flops summary vector instructions",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2084",
"EventName": "PM_FLUSH_BR_MPRED",
"BriefDescription": "Flush caused by branch mispredict",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2082",
"EventName": "PM_FLUSH_DISP",
"BriefDescription": "Dispatch flush",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x208c",
"EventName": "PM_FLUSH_DISP_SB",
"BriefDescription": "Dispatch Flush: Scoreboard",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2088",
"EventName": "PM_FLUSH_DISP_SYNC",
"BriefDescription": "Dispatch Flush: Sync",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x208a",
"EventName": "PM_FLUSH_DISP_TLBIE",
"BriefDescription": "Dispatch Flush: TLBIE",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x208e",
"EventName": "PM_FLUSH_LSU",
"BriefDescription": "Flush initiated by LSU",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2086",
"EventName": "PM_FLUSH_PARTIAL",
"BriefDescription": "Partial flush",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0b0",
"EventName": "PM_FPU0_FCONV",
"BriefDescription": "Convert instruction executed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0b8",
"EventName": "PM_FPU0_FEST",
"BriefDescription": "Estimate instruction executed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0b4",
"EventName": "PM_FPU0_FRSP",
"BriefDescription": "Round to single precision instruction executed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0b2",
"EventName": "PM_FPU1_FCONV",
"BriefDescription": "Convert instruction executed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0ba",
"EventName": "PM_FPU1_FEST",
"BriefDescription": "Estimate instruction executed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0b6",
"EventName": "PM_FPU1_FRSP",
"BriefDescription": "Round to single precision instruction executed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50b0",
"EventName": "PM_FUSION_TOC_GRP0_1",
"BriefDescription": "One pair of instructions fused with TOC in Group0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50ae",
"EventName": "PM_FUSION_TOC_GRP0_2",
"BriefDescription": "Two pairs of instructions fused with TOCin Group0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50ac",
"EventName": "PM_FUSION_TOC_GRP0_3",
"BriefDescription": "Three pairs of instructions fused with TOC in Group0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50b2",
"EventName": "PM_FUSION_TOC_GRP1_1",
"BriefDescription": "One pair of instructions fused with TOX in Group1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50b8",
"EventName": "PM_FUSION_VSX_GRP0_1",
"BriefDescription": "One pair of instructions fused with VSX in Group0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50b6",
"EventName": "PM_FUSION_VSX_GRP0_2",
"BriefDescription": "Two pairs of instructions fused with VSX in Group0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50b4",
"EventName": "PM_FUSION_VSX_GRP0_3",
"BriefDescription": "Three pairs of instructions fused with VSX in Group0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50ba",
"EventName": "PM_FUSION_VSX_GRP1_1",
"BriefDescription": "One pair of instructions fused with VSX in Group1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3000e",
"EventName": "PM_FXU0_BUSY_FXU1_IDLE",
"BriefDescription": "fxu0 busy and fxu1 idle",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10004",
"EventName": "PM_FXU0_FIN",
"BriefDescription": "The fixed point unit Unit 0 finished an instruction. Instructions that finish may not necessary complete",
"PublicDescription": "FXU0 Finished"
},
- {,
+ {
"EventCode": "0x4000e",
"EventName": "PM_FXU1_BUSY_FXU0_IDLE",
"BriefDescription": "fxu0 idle and fxu1 busy",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40004",
"EventName": "PM_FXU1_FIN",
"BriefDescription": "FXU1 Finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20008",
"EventName": "PM_GCT_EMPTY_CYC",
"BriefDescription": "No itags assigned either thread (GCT Empty)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30a4",
"EventName": "PM_GCT_MERGE",
"BriefDescription": "Group dispatched on a merged GCT empty. GCT entries can be merged only within the same thread",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d01e",
"EventName": "PM_GCT_NOSLOT_BR_MPRED",
"BriefDescription": "Gct empty for this thread due to branch mispred",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d01a",
"EventName": "PM_GCT_NOSLOT_BR_MPRED_ICMISS",
"BriefDescription": "Gct empty for this thread due to Icache Miss and branch mispred",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x100f8",
"EventName": "PM_GCT_NOSLOT_CYC",
"BriefDescription": "No itags assigned",
"PublicDescription": "Pipeline empty (No itags assigned , no GCT slots used)"
},
- {,
+ {
"EventCode": "0x2d01e",
"EventName": "PM_GCT_NOSLOT_DISP_HELD_ISSQ",
"BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to Issue q full",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d01c",
"EventName": "PM_GCT_NOSLOT_DISP_HELD_MAP",
"BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to Mapper full",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e010",
"EventName": "PM_GCT_NOSLOT_DISP_HELD_OTHER",
"BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to sync",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d01c",
"EventName": "PM_GCT_NOSLOT_DISP_HELD_SRQ",
"BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to SRQ full",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e010",
"EventName": "PM_GCT_NOSLOT_IC_L3MISS",
"BriefDescription": "Gct empty for this thread due to icach l3 miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d01a",
"EventName": "PM_GCT_NOSLOT_IC_MISS",
"BriefDescription": "Gct empty for this thread due to Icache Miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20a2",
"EventName": "PM_GCT_UTIL_11_14_ENTRIES",
"BriefDescription": "GCT Utilization 11-14 entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20a4",
"EventName": "PM_GCT_UTIL_15_17_ENTRIES",
"BriefDescription": "GCT Utilization 15-17 entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20a6",
"EventName": "PM_GCT_UTIL_18_ENTRIES",
"BriefDescription": "GCT Utilization 18+ entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x209c",
"EventName": "PM_GCT_UTIL_1_2_ENTRIES",
"BriefDescription": "GCT Utilization 1-2 entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x209e",
"EventName": "PM_GCT_UTIL_3_6_ENTRIES",
"BriefDescription": "GCT Utilization 3-6 entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20a0",
"EventName": "PM_GCT_UTIL_7_10_ENTRIES",
"BriefDescription": "GCT Utilization 7-10 entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1000a",
"EventName": "PM_GRP_BR_MPRED_NONSPEC",
"BriefDescription": "Group experienced non-speculative branch redirect",
"PublicDescription": "Group experienced Non-speculative br mispredicct"
},
- {,
+ {
"EventCode": "0x30004",
"EventName": "PM_GRP_CMPL",
"BriefDescription": "group completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3000a",
"EventName": "PM_GRP_DISP",
"BriefDescription": "group dispatch",
"PublicDescription": "dispatch_success (Group Dispatched)"
},
- {,
+ {
"EventCode": "0x1000c",
"EventName": "PM_GRP_IC_MISS_NONSPEC",
"BriefDescription": "Group experienced non-speculative I cache miss",
"PublicDescription": "Group experi enced Non-specu lative I cache miss"
},
- {,
+ {
"EventCode": "0x10130",
"EventName": "PM_GRP_MRK",
"BriefDescription": "Instruction Marked",
"PublicDescription": "Instruction marked in idu"
},
- {,
+ {
"EventCode": "0x509c",
"EventName": "PM_GRP_NON_FULL_GROUP",
"BriefDescription": "GROUPs where we did not have 6 non branch instructions in the group(ST mode), in SMT mode 3 non branches",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50a4",
"EventName": "PM_GRP_TERM_2ND_BRANCH",
"BriefDescription": "There were enough instructions in the Ibuffer, but 2nd branch ends group",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50a6",
"EventName": "PM_GRP_TERM_FPU_AFTER_BR",
"BriefDescription": "There were enough instructions in the Ibuffer, but FPU OP IN same group after a branch terminates a group, cant do partial flushes",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x509e",
"EventName": "PM_GRP_TERM_NOINST",
"BriefDescription": "Do not fill every slot in the group, Not enough instructions in the Ibuffer. This includes cases where the group started with enough instructions, but some got knocked out by a cache miss or branch redirect (which would also empty the Ibuffer)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50a0",
"EventName": "PM_GRP_TERM_OTHER",
"BriefDescription": "There were enough instructions in the Ibuffer, but the group terminated early for some other reason, most likely due to a First or Last",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x50a2",
"EventName": "PM_GRP_TERM_SLOT_LIMIT",
"BriefDescription": "There were enough instructions in the Ibuffer, but 3 src RA/RB/RC , 2 way crack caused a group termination",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4086",
"EventName": "PM_IBUF_FULL_CYC",
"BriefDescription": "Cycles No room in ibuff",
"PublicDescription": "Cycles No room in ibufffully qualified transfer (if5 valid)"
},
- {,
+ {
"EventCode": "0x4098",
"EventName": "PM_IC_DEMAND_L2_BHT_REDIRECT",
"BriefDescription": "L2 I cache demand request due to BHT redirect, branch redirect ( 2 bubbles 3 cycles)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x409a",
"EventName": "PM_IC_DEMAND_L2_BR_REDIRECT",
"BriefDescription": "L2 I cache demand request due to branch Mispredict ( 15 cycle path)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4088",
"EventName": "PM_IC_DEMAND_REQ",
"BriefDescription": "Demand Instruction fetch request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x508a",
"EventName": "PM_IC_INVALIDATE",
"BriefDescription": "Ic line invalidated",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4092",
"EventName": "PM_IC_PREF_CANCEL_HIT",
"BriefDescription": "Prefetch Canceled due to icache hit",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4094",
"EventName": "PM_IC_PREF_CANCEL_L2",
"BriefDescription": "L2 Squashed request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4090",
"EventName": "PM_IC_PREF_CANCEL_PAGE",
"BriefDescription": "Prefetch Canceled due to page boundary",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x408a",
"EventName": "PM_IC_PREF_REQ",
"BriefDescription": "Instruction prefetch requests",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x408e",
"EventName": "PM_IC_PREF_WRITE",
"BriefDescription": "Instruction prefetch written into IL1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4096",
"EventName": "PM_IC_RELOAD_PRIVATE",
"BriefDescription": "Reloading line was brought in private for a specific thread. Most lines are brought in shared for all eight thrreads. If RA does not match then invalidates and then brings it shared to other thread. In P7 line brought in private , then line was invalidat",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5088",
"EventName": "PM_IFU_L2_TOUCH",
"BriefDescription": "L2 touch to update MRU on a line",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x514050",
"EventName": "PM_INST_ALL_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for instruction fetches and prefetches",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x544048",
"EventName": "PM_INST_ALL_FROM_DL2L3_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x534048",
"EventName": "PM_INST_ALL_FROM_DL2L3_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x53404c",
"EventName": "PM_INST_ALL_FROM_DL4",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x54404c",
"EventName": "PM_INST_ALL_FROM_DMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x514042",
"EventName": "PM_INST_ALL_FROM_L2",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x544046",
"EventName": "PM_INST_ALL_FROM_L21_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x534046",
"EventName": "PM_INST_ALL_FROM_L21_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x51404e",
"EventName": "PM_INST_ALL_FROM_L2MISS",
"BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x534040",
"EventName": "PM_INST_ALL_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x544040",
"EventName": "PM_INST_ALL_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x524040",
"EventName": "PM_INST_ALL_FROM_L2_MEPF",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x514040",
"EventName": "PM_INST_ALL_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x544042",
"EventName": "PM_INST_ALL_FROM_L3",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x544044",
"EventName": "PM_INST_ALL_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x534044",
"EventName": "PM_INST_ALL_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x524044",
"EventName": "PM_INST_ALL_FROM_L31_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x514046",
"EventName": "PM_INST_ALL_FROM_L31_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x54404e",
"EventName": "PM_INST_ALL_FROM_L3MISS_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to a instruction fetch",
"PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x534042",
"EventName": "PM_INST_ALL_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x524042",
"EventName": "PM_INST_ALL_FROM_L3_MEPF",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x514044",
"EventName": "PM_INST_ALL_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x51404c",
"EventName": "PM_INST_ALL_FROM_LL4",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x524048",
"EventName": "PM_INST_ALL_FROM_LMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x52404c",
"EventName": "PM_INST_ALL_FROM_MEMORY",
"BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x54404a",
"EventName": "PM_INST_ALL_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x514048",
"EventName": "PM_INST_ALL_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x524046",
"EventName": "PM_INST_ALL_FROM_RL2L3_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x51404a",
"EventName": "PM_INST_ALL_FROM_RL2L3_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x52404a",
"EventName": "PM_INST_ALL_FROM_RL4",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x53404a",
"EventName": "PM_INST_ALL_FROM_RMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to instruction fetches and prefetches",
"PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x524050",
"EventName": "PM_INST_ALL_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for instruction fetches and prefetches",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for an instruction fetch"
},
- {,
+ {
"EventCode": "0x524052",
"EventName": "PM_INST_ALL_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for instruction fetches and prefetches",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
},
- {,
+ {
"EventCode": "0x514052",
"EventName": "PM_INST_ALL_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for instruction fetches and prefetches",
"PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor an instruction fetch"
},
- {,
+ {
"EventCode": "0x514054",
"EventName": "PM_INST_ALL_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for instruction fetches and prefetches",
"PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor an instruction fetch"
},
- {,
+ {
"EventCode": "0x544052",
"EventName": "PM_INST_ALL_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for instruction fetches and prefetches",
"PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor an instruction fetch"
},
- {,
+ {
"EventCode": "0x534050",
"EventName": "PM_INST_ALL_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for instruction fetches and prefetches",
"PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for an instruction fetch"
},
- {,
+ {
"EventCode": "0x534052",
"EventName": "PM_INST_ALL_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for instruction fetches and prefetches",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
},
- {,
+ {
"EventCode": "0x544050",
"EventName": "PM_INST_ALL_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for instruction fetches and prefetches",
"PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x4080",
"EventName": "PM_INST_FROM_L1",
"BriefDescription": "Instruction fetches from L1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x44046",
"EventName": "PM_INST_FROM_L21_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x34046",
"EventName": "PM_INST_FROM_L21_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x44044",
"EventName": "PM_INST_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x34044",
"EventName": "PM_INST_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x24044",
"EventName": "PM_INST_FROM_L31_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x14046",
"EventName": "PM_INST_FROM_L31_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)",
"PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
- {,
+ {
"EventCode": "0x30016",
"EventName": "PM_INST_IMC_MATCH_DISP",
"BriefDescription": "Matched Instructions Dispatched",
"PublicDescription": "IMC Matches dispatched"
},
- {,
+ {
"EventCode": "0x30014",
"EventName": "PM_IOPS_DISP",
"BriefDescription": "Internal Operations dispatched",
"PublicDescription": "IOPS dispatched"
},
- {,
+ {
"EventCode": "0x45046",
"EventName": "PM_IPTEG_FROM_L21_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x35046",
"EventName": "PM_IPTEG_FROM_L21_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x35040",
"EventName": "PM_IPTEG_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x45040",
"EventName": "PM_IPTEG_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x45044",
"EventName": "PM_IPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x35044",
"EventName": "PM_IPTEG_FROM_L31_ECO_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x25044",
"EventName": "PM_IPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x15046",
"EventName": "PM_IPTEG_FROM_L31_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4608e",
"EventName": "PM_ISIDE_L2MEMACC",
"BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30ac",
"EventName": "PM_ISU_REF_FX0",
"BriefDescription": "FX0 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30ae",
"EventName": "PM_ISU_REF_FX1",
"BriefDescription": "FX1 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x38ac",
"EventName": "PM_ISU_REF_FXU",
"BriefDescription": "FXU ISU reject from either pipe",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30b0",
"EventName": "PM_ISU_REF_LS0",
"BriefDescription": "LS0 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30b2",
"EventName": "PM_ISU_REF_LS1",
"BriefDescription": "LS1 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30b4",
"EventName": "PM_ISU_REF_LS2",
"BriefDescription": "LS2 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30b6",
"EventName": "PM_ISU_REF_LS3",
"BriefDescription": "LS3 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x309c",
"EventName": "PM_ISU_REJECTS_ALL",
"BriefDescription": "All isu rejects could be more than 1 per cycle",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30a2",
"EventName": "PM_ISU_REJECT_RES_NA",
"BriefDescription": "ISU reject due to resource not available",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x309e",
"EventName": "PM_ISU_REJECT_SAR_BYPASS",
"BriefDescription": "Reject because of SAR bypass",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30a0",
"EventName": "PM_ISU_REJECT_SRC_NA",
"BriefDescription": "ISU reject due to source not available",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30a8",
"EventName": "PM_ISU_REJ_VS0",
"BriefDescription": "VS0 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30aa",
"EventName": "PM_ISU_REJ_VS1",
"BriefDescription": "VS1 ISU reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x38a8",
"EventName": "PM_ISU_REJ_VSU",
"BriefDescription": "VSU ISU reject from either pipe",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30b8",
"EventName": "PM_ISYNC",
"BriefDescription": "Isync count per thread",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200301ea",
"EventName": "PM_L1MISS_LAT_EXC_1024",
"BriefDescription": "L1 misses that took longer than 1024 cyles to resolve (miss to reload)",
"PublicDescription": "Reload latency exceeded 1024 cyc"
},
- {,
+ {
"EventCode": "0x200401ec",
"EventName": "PM_L1MISS_LAT_EXC_2048",
"BriefDescription": "L1 misses that took longer than 2048 cyles to resolve (miss to reload)",
"PublicDescription": "Reload latency exceeded 2048 cyc"
},
- {,
+ {
"EventCode": "0x200101e8",
"EventName": "PM_L1MISS_LAT_EXC_256",
"BriefDescription": "L1 misses that took longer than 256 cyles to resolve (miss to reload)",
"PublicDescription": "Reload latency exceeded 256 cyc"
},
- {,
+ {
"EventCode": "0x200201e6",
"EventName": "PM_L1MISS_LAT_EXC_32",
"BriefDescription": "L1 misses that took longer than 32 cyles to resolve (miss to reload)",
"PublicDescription": "Reload latency exceeded 32 cyc"
},
- {,
+ {
"EventCode": "0x26086",
"EventName": "PM_L1PF_L2MEMACC",
"BriefDescription": "valid when first beat of data comes in for an L1pref where data came from mem(or L4)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x408c",
"EventName": "PM_L1_DEMAND_WRITE",
"BriefDescription": "Instruction Demand sectors wriittent into IL1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x27084",
"EventName": "PM_L2_CHIP_PUMP",
"BriefDescription": "RC requests that were local on chip pump attempts",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x27086",
"EventName": "PM_L2_GROUP_PUMP",
"BriefDescription": "RC requests that were on Node Pump attempts",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3708a",
"EventName": "PM_L2_RTY_ST",
"BriefDescription": "RC retries on PB for any store from core",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x17080",
"EventName": "PM_L2_ST",
"BriefDescription": "All successful D-side store dispatches for this thread",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x17082",
"EventName": "PM_L2_ST_MISS",
"BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e05e",
"EventName": "PM_L2_TM_REQ_ABORT",
"BriefDescription": "TM abort",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e05c",
"EventName": "PM_L2_TM_ST_ABORT_SISTER",
"BriefDescription": "TM marked store abort",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x819082",
"EventName": "PM_L3_CI_USAGE",
"BriefDescription": "rotating sample of 16 CI or CO actives",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x83908b",
"EventName": "PM_L3_CO0_ALLOC",
"BriefDescription": "lifetime, sample of CO machine 0 valid",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x83908a",
"EventName": "PM_L3_CO0_BUSY",
"BriefDescription": "lifetime, sample of CO machine 0 valid",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x28086",
"EventName": "PM_L3_CO_L31",
"BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x28084",
"EventName": "PM_L3_CO_MEM",
"BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e052",
"EventName": "PM_L3_LD_PREF",
"BriefDescription": "L3 Load Prefetches",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x84908d",
"EventName": "PM_L3_PF0_ALLOC",
"BriefDescription": "lifetime, sample of PF machine 0 valid",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x84908c",
"EventName": "PM_L3_PF0_BUSY",
"BriefDescription": "lifetime, sample of PF machine 0 valid",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x18080",
"EventName": "PM_L3_PF_MISS_L3",
"BriefDescription": "L3 Prefetch missed in L3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3808a",
"EventName": "PM_L3_PF_OFF_CHIP_CACHE",
"BriefDescription": "L3 Prefetch from Off chip cache",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4808e",
"EventName": "PM_L3_PF_OFF_CHIP_MEM",
"BriefDescription": "L3 Prefetch from Off chip memory",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x38088",
"EventName": "PM_L3_PF_ON_CHIP_CACHE",
"BriefDescription": "L3 Prefetch from On chip cache",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4808c",
"EventName": "PM_L3_PF_ON_CHIP_MEM",
"BriefDescription": "L3 Prefetch from On chip memory",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x829084",
"EventName": "PM_L3_PF_USAGE",
"BriefDescription": "rotating sample of 32 PF actives",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e052",
"EventName": "PM_L3_PREF_ALL",
"BriefDescription": "Total HW L3 prefetches(Load+store)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x84908f",
"EventName": "PM_L3_RD0_ALLOC",
"BriefDescription": "lifetime, sample of RD machine 0 valid",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x84908e",
"EventName": "PM_L3_RD0_BUSY",
"BriefDescription": "lifetime, sample of RD machine 0 valid",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x829086",
"EventName": "PM_L3_RD_USAGE",
"BriefDescription": "rotating sample of 16 RD actives",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x839089",
"EventName": "PM_L3_SN0_ALLOC",
"BriefDescription": "lifetime, sample of snooper machine 0 valid",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x839088",
"EventName": "PM_L3_SN0_BUSY",
"BriefDescription": "lifetime, sample of snooper machine 0 valid",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x819080",
"EventName": "PM_L3_SN_USAGE",
"BriefDescription": "rotating sample of 8 snoop valids",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e052",
"EventName": "PM_L3_ST_PREF",
"BriefDescription": "L3 store Prefetches",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e052",
"EventName": "PM_L3_SW_PREF",
"BriefDescription": "Data stream touchto L3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x18081",
"EventName": "PM_L3_WI0_ALLOC",
"BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xc080",
"EventName": "PM_LD_REF_L1_LSU0",
"BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject",
"PublicDescription": "LS0 L1 D cache load references counted at finish, gated by rejectLSU0 L1 D cache load references"
},
- {,
+ {
"EventCode": "0xc082",
"EventName": "PM_LD_REF_L1_LSU1",
"BriefDescription": "LS1 L1 D cache load references counted at finish, gated by reject",
"PublicDescription": "LS1 L1 D cache load references counted at finish, gated by rejectLSU1 L1 D cache load references"
},
- {,
+ {
"EventCode": "0xc094",
"EventName": "PM_LD_REF_L1_LSU2",
"BriefDescription": "LS2 L1 D cache load references counted at finish, gated by reject",
"PublicDescription": "LS2 L1 D cache load references counted at finish, gated by reject42"
},
- {,
+ {
"EventCode": "0xc096",
"EventName": "PM_LD_REF_L1_LSU3",
"BriefDescription": "LS3 L1 D cache load references counted at finish, gated by reject",
"PublicDescription": "LS3 L1 D cache load references counted at finish, gated by reject42"
},
- {,
+ {
"EventCode": "0x509a",
"EventName": "PM_LINK_STACK_INVALID_PTR",
"BriefDescription": "A flush were LS ptr is invalid, results in a pop , A lot of interrupts between push and pops",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5098",
"EventName": "PM_LINK_STACK_WRONG_ADD_PRED",
"BriefDescription": "Link stack predicts wrong address, because of link stack design limitation",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe080",
"EventName": "PM_LS0_ERAT_MISS_PREF",
"BriefDescription": "LS0 Erat miss due to prefetch",
"PublicDescription": "LS0 Erat miss due to prefetch42"
},
- {,
+ {
"EventCode": "0xd0b8",
"EventName": "PM_LS0_L1_PREF",
"BriefDescription": "LS0 L1 cache data prefetches",
"PublicDescription": "LS0 L1 cache data prefetches42"
},
- {,
+ {
"EventCode": "0xc098",
"EventName": "PM_LS0_L1_SW_PREF",
"BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches",
"PublicDescription": "Software L1 Prefetches, including SW Transient Prefetches42"
},
- {,
+ {
"EventCode": "0xe082",
"EventName": "PM_LS1_ERAT_MISS_PREF",
"BriefDescription": "LS1 Erat miss due to prefetch",
"PublicDescription": "LS1 Erat miss due to prefetch42"
},
- {,
+ {
"EventCode": "0xd0ba",
"EventName": "PM_LS1_L1_PREF",
"BriefDescription": "LS1 L1 cache data prefetches",
"PublicDescription": "LS1 L1 cache data prefetches42"
},
- {,
+ {
"EventCode": "0xc09a",
"EventName": "PM_LS1_L1_SW_PREF",
"BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches",
"PublicDescription": "Software L1 Prefetches, including SW Transient Prefetches42"
},
- {,
+ {
"EventCode": "0xc0b0",
"EventName": "PM_LSU0_FLUSH_LRQ",
"BriefDescription": "LS0 Flush: LRQ",
"PublicDescription": "LS0 Flush: LRQLSU0 LRQ flushes"
},
- {,
+ {
"EventCode": "0xc0b8",
"EventName": "PM_LSU0_FLUSH_SRQ",
"BriefDescription": "LS0 Flush: SRQ",
"PublicDescription": "LS0 Flush: SRQLSU0 SRQ lhs flushes"
},
- {,
+ {
"EventCode": "0xc0a4",
"EventName": "PM_LSU0_FLUSH_ULD",
"BriefDescription": "LS0 Flush: Unaligned Load",
"PublicDescription": "LS0 Flush: Unaligned LoadLSU0 unaligned load flushes"
},
- {,
+ {
"EventCode": "0xc0ac",
"EventName": "PM_LSU0_FLUSH_UST",
"BriefDescription": "LS0 Flush: Unaligned Store",
"PublicDescription": "LS0 Flush: Unaligned StoreLSU0 unaligned store flushes"
},
- {,
+ {
"EventCode": "0xf088",
"EventName": "PM_LSU0_L1_CAM_CANCEL",
"BriefDescription": "ls0 l1 tm cam cancel",
"PublicDescription": "ls0 l1 tm cam cancel42"
},
- {,
+ {
"EventCode": "0x1e056",
"EventName": "PM_LSU0_LARX_FIN",
"BriefDescription": "Larx finished in LSU pipe0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd08c",
"EventName": "PM_LSU0_LMQ_LHR_MERGE",
"BriefDescription": "LS0 Load Merged with another cacheline request",
"PublicDescription": "LS0 Load Merged with another cacheline request42"
},
- {,
+ {
"EventCode": "0xc08c",
"EventName": "PM_LSU0_NCLD",
"BriefDescription": "LS0 Non-cachable Loads counted at finish",
"PublicDescription": "LS0 Non-cachable Loads counted at finishLSU0 non-cacheable loads"
},
- {,
+ {
"EventCode": "0xe090",
"EventName": "PM_LSU0_PRIMARY_ERAT_HIT",
"BriefDescription": "Primary ERAT hit",
"PublicDescription": "Primary ERAT hit42"
},
- {,
+ {
"EventCode": "0x1e05a",
"EventName": "PM_LSU0_REJECT",
"BriefDescription": "LSU0 reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc09c",
"EventName": "PM_LSU0_SRQ_STFWD",
"BriefDescription": "LS0 SRQ forwarded data to a load",
"PublicDescription": "LS0 SRQ forwarded data to a loadLSU0 SRQ store forwarded"
},
- {,
+ {
"EventCode": "0xf084",
"EventName": "PM_LSU0_STORE_REJECT",
"BriefDescription": "ls0 store reject",
"PublicDescription": "ls0 store reject42"
},
- {,
+ {
"EventCode": "0xe0a8",
"EventName": "PM_LSU0_TMA_REQ_L2",
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
"PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
},
- {,
+ {
"EventCode": "0xe098",
"EventName": "PM_LSU0_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1",
"PublicDescription": "Load tm hit in L142"
},
- {,
+ {
"EventCode": "0xe0a0",
"EventName": "PM_LSU0_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss",
"PublicDescription": "Load tm L1 miss42"
},
- {,
+ {
"EventCode": "0xc0b2",
"EventName": "PM_LSU1_FLUSH_LRQ",
"BriefDescription": "LS1 Flush: LRQ",
"PublicDescription": "LS1 Flush: LRQLSU1 LRQ flushes"
},
- {,
+ {
"EventCode": "0xc0ba",
"EventName": "PM_LSU1_FLUSH_SRQ",
"BriefDescription": "LS1 Flush: SRQ",
"PublicDescription": "LS1 Flush: SRQLSU1 SRQ lhs flushes"
},
- {,
+ {
"EventCode": "0xc0a6",
"EventName": "PM_LSU1_FLUSH_ULD",
"BriefDescription": "LS 1 Flush: Unaligned Load",
"PublicDescription": "LS 1 Flush: Unaligned LoadLSU1 unaligned load flushes"
},
- {,
+ {
"EventCode": "0xc0ae",
"EventName": "PM_LSU1_FLUSH_UST",
"BriefDescription": "LS1 Flush: Unaligned Store",
"PublicDescription": "LS1 Flush: Unaligned StoreLSU1 unaligned store flushes"
},
- {,
+ {
"EventCode": "0xf08a",
"EventName": "PM_LSU1_L1_CAM_CANCEL",
"BriefDescription": "ls1 l1 tm cam cancel",
"PublicDescription": "ls1 l1 tm cam cancel42"
},
- {,
+ {
"EventCode": "0x2e056",
"EventName": "PM_LSU1_LARX_FIN",
"BriefDescription": "Larx finished in LSU pipe1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd08e",
"EventName": "PM_LSU1_LMQ_LHR_MERGE",
"BriefDescription": "LS1 Load Merge with another cacheline request",
"PublicDescription": "LS1 Load Merge with another cacheline request42"
},
- {,
+ {
"EventCode": "0xc08e",
"EventName": "PM_LSU1_NCLD",
"BriefDescription": "LS1 Non-cachable Loads counted at finish",
"PublicDescription": "LS1 Non-cachable Loads counted at finishLSU1 non-cacheable loads"
},
- {,
+ {
"EventCode": "0xe092",
"EventName": "PM_LSU1_PRIMARY_ERAT_HIT",
"BriefDescription": "Primary ERAT hit",
"PublicDescription": "Primary ERAT hit42"
},
- {,
+ {
"EventCode": "0x2e05a",
"EventName": "PM_LSU1_REJECT",
"BriefDescription": "LSU1 reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc09e",
"EventName": "PM_LSU1_SRQ_STFWD",
"BriefDescription": "LS1 SRQ forwarded data to a load",
"PublicDescription": "LS1 SRQ forwarded data to a loadLSU1 SRQ store forwarded"
},
- {,
+ {
"EventCode": "0xf086",
"EventName": "PM_LSU1_STORE_REJECT",
"BriefDescription": "ls1 store reject",
"PublicDescription": "ls1 store reject42"
},
- {,
+ {
"EventCode": "0xe0aa",
"EventName": "PM_LSU1_TMA_REQ_L2",
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
"PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
},
- {,
+ {
"EventCode": "0xe09a",
"EventName": "PM_LSU1_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1",
"PublicDescription": "Load tm hit in L142"
},
- {,
+ {
"EventCode": "0xe0a2",
"EventName": "PM_LSU1_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss",
"PublicDescription": "Load tm L1 miss42"
},
- {,
+ {
"EventCode": "0xc0b4",
"EventName": "PM_LSU2_FLUSH_LRQ",
"BriefDescription": "LS02Flush: LRQ",
"PublicDescription": "LS02Flush: LRQ42"
},
- {,
+ {
"EventCode": "0xc0bc",
"EventName": "PM_LSU2_FLUSH_SRQ",
"BriefDescription": "LS2 Flush: SRQ",
"PublicDescription": "LS2 Flush: SRQ42"
},
- {,
+ {
"EventCode": "0xc0a8",
"EventName": "PM_LSU2_FLUSH_ULD",
"BriefDescription": "LS3 Flush: Unaligned Load",
"PublicDescription": "LS3 Flush: Unaligned Load42"
},
- {,
+ {
"EventCode": "0xf08c",
"EventName": "PM_LSU2_L1_CAM_CANCEL",
"BriefDescription": "ls2 l1 tm cam cancel",
"PublicDescription": "ls2 l1 tm cam cancel42"
},
- {,
+ {
"EventCode": "0x3e056",
"EventName": "PM_LSU2_LARX_FIN",
"BriefDescription": "Larx finished in LSU pipe2",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc084",
"EventName": "PM_LSU2_LDF",
"BriefDescription": "LS2 Scalar Loads",
"PublicDescription": "LS2 Scalar Loads42"
},
- {,
+ {
"EventCode": "0xc088",
"EventName": "PM_LSU2_LDX",
"BriefDescription": "LS0 Vector Loads",
"PublicDescription": "LS0 Vector Loads42"
},
- {,
+ {
"EventCode": "0xd090",
"EventName": "PM_LSU2_LMQ_LHR_MERGE",
"BriefDescription": "LS0 Load Merged with another cacheline request",
"PublicDescription": "LS0 Load Merged with another cacheline request42"
},
- {,
+ {
"EventCode": "0xe094",
"EventName": "PM_LSU2_PRIMARY_ERAT_HIT",
"BriefDescription": "Primary ERAT hit",
"PublicDescription": "Primary ERAT hit42"
},
- {,
+ {
"EventCode": "0x3e05a",
"EventName": "PM_LSU2_REJECT",
"BriefDescription": "LSU2 reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc0a0",
"EventName": "PM_LSU2_SRQ_STFWD",
"BriefDescription": "LS2 SRQ forwarded data to a load",
"PublicDescription": "LS2 SRQ forwarded data to a load42"
},
- {,
+ {
"EventCode": "0xe0ac",
"EventName": "PM_LSU2_TMA_REQ_L2",
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
"PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
},
- {,
+ {
"EventCode": "0xe09c",
"EventName": "PM_LSU2_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1",
"PublicDescription": "Load tm hit in L142"
},
- {,
+ {
"EventCode": "0xe0a4",
"EventName": "PM_LSU2_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss",
"PublicDescription": "Load tm L1 miss42"
},
- {,
+ {
"EventCode": "0xc0b6",
"EventName": "PM_LSU3_FLUSH_LRQ",
"BriefDescription": "LS3 Flush: LRQ",
"PublicDescription": "LS3 Flush: LRQ42"
},
- {,
+ {
"EventCode": "0xc0be",
"EventName": "PM_LSU3_FLUSH_SRQ",
"BriefDescription": "LS13 Flush: SRQ",
"PublicDescription": "LS13 Flush: SRQ42"
},
- {,
+ {
"EventCode": "0xc0aa",
"EventName": "PM_LSU3_FLUSH_ULD",
"BriefDescription": "LS 14Flush: Unaligned Load",
"PublicDescription": "LS 14Flush: Unaligned Load42"
},
- {,
+ {
"EventCode": "0xf08e",
"EventName": "PM_LSU3_L1_CAM_CANCEL",
"BriefDescription": "ls3 l1 tm cam cancel",
"PublicDescription": "ls3 l1 tm cam cancel42"
},
- {,
+ {
"EventCode": "0x4e056",
"EventName": "PM_LSU3_LARX_FIN",
"BriefDescription": "Larx finished in LSU pipe3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc086",
"EventName": "PM_LSU3_LDF",
"BriefDescription": "LS3 Scalar Loads",
"PublicDescription": "LS3 Scalar Loads 42"
},
- {,
+ {
"EventCode": "0xc08a",
"EventName": "PM_LSU3_LDX",
"BriefDescription": "LS1 Vector Loads",
"PublicDescription": "LS1 Vector Loads42"
},
- {,
+ {
"EventCode": "0xd092",
"EventName": "PM_LSU3_LMQ_LHR_MERGE",
"BriefDescription": "LS1 Load Merge with another cacheline request",
"PublicDescription": "LS1 Load Merge with another cacheline request42"
},
- {,
+ {
"EventCode": "0xe096",
"EventName": "PM_LSU3_PRIMARY_ERAT_HIT",
"BriefDescription": "Primary ERAT hit",
"PublicDescription": "Primary ERAT hit42"
},
- {,
+ {
"EventCode": "0x4e05a",
"EventName": "PM_LSU3_REJECT",
"BriefDescription": "LSU3 reject",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc0a2",
"EventName": "PM_LSU3_SRQ_STFWD",
"BriefDescription": "LS3 SRQ forwarded data to a load",
"PublicDescription": "LS3 SRQ forwarded data to a load42"
},
- {,
+ {
"EventCode": "0xe0ae",
"EventName": "PM_LSU3_TMA_REQ_L2",
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
"PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
},
- {,
+ {
"EventCode": "0xe09e",
"EventName": "PM_LSU3_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1",
"PublicDescription": "Load tm hit in L142"
},
- {,
+ {
"EventCode": "0xe0a6",
"EventName": "PM_LSU3_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss",
"PublicDescription": "Load tm L1 miss42"
},
- {,
+ {
"EventCode": "0xe880",
"EventName": "PM_LSU_ERAT_MISS_PREF",
"BriefDescription": "Erat miss due to prefetch, on either pipe",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xc8ac",
"EventName": "PM_LSU_FLUSH_UST",
"BriefDescription": "Unaligned Store Flush on either pipe",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xd0a4",
"EventName": "PM_LSU_FOUR_TABLEWALK_CYC",
"BriefDescription": "Cycles when four tablewalks pending on this thread",
"PublicDescription": "Cycles when four tablewalks pending on this thread42"
},
- {,
+ {
"EventCode": "0x10066",
"EventName": "PM_LSU_FX_FIN",
"BriefDescription": "LSU Finished a FX operation (up to 2 per cycle",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd8b8",
"EventName": "PM_LSU_L1_PREF",
"BriefDescription": "hw initiated , include sw streaming forms as well , include sw streams as a separate event",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xc898",
"EventName": "PM_LSU_L1_SW_PREF",
"BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches, on both pipes",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xc884",
"EventName": "PM_LSU_LDF",
"BriefDescription": "FPU loads only on LS2/LS3 ie LU0/LU1",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xc888",
"EventName": "PM_LSU_LDX",
"BriefDescription": "Vector loads can issue only on LS2/LS3",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xd0a2",
"EventName": "PM_LSU_LMQ_FULL_CYC",
"BriefDescription": "LMQ full",
"PublicDescription": "LMQ fullCycles LMQ full"
},
- {,
+ {
"EventCode": "0xd0a1",
"EventName": "PM_LSU_LMQ_S0_ALLOC",
"BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xd0a0",
"EventName": "PM_LSU_LMQ_S0_VALID",
"BriefDescription": "Slot 0 of LMQ valid",
"PublicDescription": "Slot 0 of LMQ validLMQ slot 0 valid"
},
- {,
+ {
"EventCode": "0x3001c",
"EventName": "PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC",
"BriefDescription": "ALL threads lsu empty (lmq and srq empty)",
"PublicDescription": "ALL threads lsu empty (lmq and srq empty). Issue HW016541"
},
- {,
+ {
"EventCode": "0xd09f",
"EventName": "PM_LSU_LRQ_S0_ALLOC",
"BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xd09e",
"EventName": "PM_LSU_LRQ_S0_VALID",
"BriefDescription": "Slot 0 of LRQ valid",
"PublicDescription": "Slot 0 of LRQ validLRQ slot 0 valid"
},
- {,
+ {
"EventCode": "0xf091",
"EventName": "PM_LSU_LRQ_S43_ALLOC",
"BriefDescription": "LRQ slot 43 was released",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xf090",
"EventName": "PM_LSU_LRQ_S43_VALID",
"BriefDescription": "LRQ slot 43 was busy",
"PublicDescription": "LRQ slot 43 was busy42"
},
- {,
+ {
"EventCode": "0x30162",
"EventName": "PM_LSU_MRK_DERAT_MISS",
"BriefDescription": "DERAT Reloaded (Miss)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc88c",
"EventName": "PM_LSU_NCLD",
"BriefDescription": "count at finish so can return only on ls0 or ls1",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xc092",
"EventName": "PM_LSU_NCST",
"BriefDescription": "Non-cachable Stores sent to nest",
"PublicDescription": "Non-cachable Stores sent to nest42"
},
- {,
+ {
"EventCode": "0x10064",
"EventName": "PM_LSU_REJECT",
"BriefDescription": "LSU Reject (up to 4 per cycle)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd082",
"EventName": "PM_LSU_SET_MPRED",
"BriefDescription": "Line already in cache at reload time",
"PublicDescription": "Line already in cache at reload time42"
},
- {,
+ {
"EventCode": "0x40008",
"EventName": "PM_LSU_SRQ_EMPTY_CYC",
"BriefDescription": "ALL threads srq empty",
"PublicDescription": "All threads srq empty"
},
- {,
+ {
"EventCode": "0xd09d",
"EventName": "PM_LSU_SRQ_S0_ALLOC",
"BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xd09c",
"EventName": "PM_LSU_SRQ_S0_VALID",
"BriefDescription": "Slot 0 of SRQ valid",
"PublicDescription": "Slot 0 of SRQ validSRQ slot 0 valid"
},
- {,
+ {
"EventCode": "0xf093",
"EventName": "PM_LSU_SRQ_S39_ALLOC",
"BriefDescription": "SRQ slot 39 was released",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xf092",
"EventName": "PM_LSU_SRQ_S39_VALID",
"BriefDescription": "SRQ slot 39 was busy",
"PublicDescription": "SRQ slot 39 was busy42"
},
- {,
+ {
"EventCode": "0xd09b",
"EventName": "PM_LSU_SRQ_SYNC",
"BriefDescription": "A sync in the SRQ ended",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0xd09a",
"EventName": "PM_LSU_SRQ_SYNC_CYC",
"BriefDescription": "A sync is in the SRQ (edge detect to count)",
"PublicDescription": "A sync is in the SRQ (edge detect to count)SRQ sync duration"
},
- {,
+ {
"EventCode": "0xf084",
"EventName": "PM_LSU_STORE_REJECT",
"BriefDescription": "Store reject on either pipe",
"PublicDescription": "LSU"
},
- {,
+ {
"EventCode": "0xd0a6",
"EventName": "PM_LSU_TWO_TABLEWALK_CYC",
"BriefDescription": "Cycles when two tablewalks pending on this thread",
"PublicDescription": "Cycles when two tablewalks pending on this thread42"
},
- {,
+ {
"EventCode": "0x5094",
"EventName": "PM_LWSYNC",
"BriefDescription": "threaded version, IC Misses where we got EA dir hit but no sector valids were on. ICBI took line out",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x209a",
"EventName": "PM_LWSYNC_HELD",
"BriefDescription": "LWSYNC held at dispatch",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3013a",
"EventName": "PM_MRK_CRU_FIN",
"BriefDescription": "IFU non-branch finished",
"PublicDescription": "IFU non-branch marked instruction finished"
},
- {,
+ {
"EventCode": "0x4d146",
"EventName": "PM_MRK_DATA_FROM_L21_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d126",
"EventName": "PM_MRK_DATA_FROM_L21_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L2 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d146",
"EventName": "PM_MRK_DATA_FROM_L21_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c126",
"EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d144",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d124",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d144",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c124",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's ECO L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d144",
"EventName": "PM_MRK_DATA_FROM_L31_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d124",
"EventName": "PM_MRK_DATA_FROM_L31_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d146",
"EventName": "PM_MRK_DATA_FROM_L31_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c126",
"EventName": "PM_MRK_DATA_FROM_L31_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L3 on the same chip due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x201e0",
"EventName": "PM_MRK_DATA_FROM_MEM",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f146",
"EventName": "PM_MRK_DPTEG_FROM_L21_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f146",
"EventName": "PM_MRK_DPTEG_FROM_L21_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f140",
"EventName": "PM_MRK_DPTEG_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f140",
"EventName": "PM_MRK_DPTEG_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f144",
"EventName": "PM_MRK_DPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3f144",
"EventName": "PM_MRK_DPTEG_FROM_L31_ECO_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2f144",
"EventName": "PM_MRK_DPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1f146",
"EventName": "PM_MRK_DPTEG_FROM_L31_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a marked data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30156",
"EventName": "PM_MRK_FAB_RSP_MATCH",
"BriefDescription": "ttype and cresp matched as specified in MMCR1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4f152",
"EventName": "PM_MRK_FAB_RSP_MATCH_CYC",
"BriefDescription": "cresp/ttype match cycles",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2013c",
"EventName": "PM_MRK_FILT_MATCH",
"BriefDescription": "Marked filter Match",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1013c",
"EventName": "PM_MRK_FIN_STALL_CYC",
"BriefDescription": "Marked instruction Finish Stall cycles (marked finish after NTC) (use edge detect to count )",
"PublicDescription": "Marked instruction Finish Stall cycles (marked finish after NTC) (use edge detect to count #)"
},
- {,
+ {
"EventCode": "0x40130",
"EventName": "PM_MRK_GRP_CMPL",
"BriefDescription": "marked instruction finished (completed)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4013a",
"EventName": "PM_MRK_GRP_IC_MISS",
"BriefDescription": "Marked Group experienced I cache miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3013c",
"EventName": "PM_MRK_GRP_NTC",
"BriefDescription": "Marked group ntc cycles",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1013f",
"EventName": "PM_MRK_LD_MISS_EXPOSED",
"BriefDescription": "Marked Load exposed Miss (exposed period ended)",
"PublicDescription": "Marked Load exposed Miss (use edge detect to count #)"
},
- {,
+ {
"EventCode": "0xd180",
"EventName": "PM_MRK_LSU_FLUSH",
"BriefDescription": "Flush: (marked) : All Cases",
"PublicDescription": "Flush: (marked) : All Cases42"
},
- {,
+ {
"EventCode": "0xd188",
"EventName": "PM_MRK_LSU_FLUSH_LRQ",
"BriefDescription": "Flush: (marked) LRQ",
"PublicDescription": "Flush: (marked) LRQMarked LRQ flushes"
},
- {,
+ {
"EventCode": "0xd18a",
"EventName": "PM_MRK_LSU_FLUSH_SRQ",
"BriefDescription": "Flush: (marked) SRQ",
"PublicDescription": "Flush: (marked) SRQMarked SRQ lhs flushes"
},
- {,
+ {
"EventCode": "0xd184",
"EventName": "PM_MRK_LSU_FLUSH_ULD",
"BriefDescription": "Flush: (marked) Unaligned Load",
"PublicDescription": "Flush: (marked) Unaligned LoadMarked unaligned load flushes"
},
- {,
+ {
"EventCode": "0xd186",
"EventName": "PM_MRK_LSU_FLUSH_UST",
"BriefDescription": "Flush: (marked) Unaligned Store",
"PublicDescription": "Flush: (marked) Unaligned StoreMarked unaligned store flushes"
},
- {,
+ {
"EventCode": "0x40164",
"EventName": "PM_MRK_LSU_REJECT",
"BriefDescription": "LSU marked reject (up to 2 per cycle)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30164",
"EventName": "PM_MRK_LSU_REJECT_ERAT_MISS",
"BriefDescription": "LSU marked reject due to ERAT (up to 2 per cycle)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d15a",
"EventName": "PM_MRK_SRC_PREF_TRACK_EFF",
"BriefDescription": "Marked src pref track was effective",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d15a",
"EventName": "PM_MRK_SRC_PREF_TRACK_INEFF",
"BriefDescription": "Prefetch tracked was ineffective for marked src",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d15c",
"EventName": "PM_MRK_SRC_PREF_TRACK_MOD",
"BriefDescription": "Prefetch tracked was moderate for marked src",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1d15c",
"EventName": "PM_MRK_SRC_PREF_TRACK_MOD_L2",
"BriefDescription": "Marked src Prefetch Tracked was moderate (source L2)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3d15c",
"EventName": "PM_MRK_SRC_PREF_TRACK_MOD_L3",
"BriefDescription": "Prefetch tracked was moderate (L3 hit) for marked src",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1c15a",
"EventName": "PM_MRK_TGT_PREF_TRACK_EFF",
"BriefDescription": "Marked target pref track was effective",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3c15a",
"EventName": "PM_MRK_TGT_PREF_TRACK_INEFF",
"BriefDescription": "Prefetch tracked was ineffective for marked target",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c15c",
"EventName": "PM_MRK_TGT_PREF_TRACK_MOD",
"BriefDescription": "Prefetch tracked was moderate for marked target",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1c15c",
"EventName": "PM_MRK_TGT_PREF_TRACK_MOD_L2",
"BriefDescription": "Marked target Prefetch Tracked was moderate (source L2)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3c15c",
"EventName": "PM_MRK_TGT_PREF_TRACK_MOD_L3",
"BriefDescription": "Prefetch tracked was moderate (L3 hit) for marked target",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20b0",
"EventName": "PM_NESTED_TEND",
"BriefDescription": "Completion time nested tend",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20b6",
"EventName": "PM_NON_FAV_TBEGIN",
"BriefDescription": "Dispatch time non favored tbegin",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2001a",
"EventName": "PM_NTCG_ALL_FIN",
"BriefDescription": "Cycles after all instructions have finished to group completed",
"PublicDescription": "Ccycles after all instructions have finished to group completed"
},
- {,
+ {
"EventCode": "0x20ac",
"EventName": "PM_OUTER_TBEGIN",
"BriefDescription": "Completion time outer tbegin",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20ae",
"EventName": "PM_OUTER_TEND",
"BriefDescription": "Completion time outer tend",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2005a",
"EventName": "PM_PREF_TRACKED",
"BriefDescription": "Total number of Prefetch Operations that were tracked",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1005a",
"EventName": "PM_PREF_TRACK_EFF",
"BriefDescription": "Prefetch Tracked was effective",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3005a",
"EventName": "PM_PREF_TRACK_INEFF",
"BriefDescription": "Prefetch tracked was ineffective",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4005a",
"EventName": "PM_PREF_TRACK_MOD",
"BriefDescription": "Prefetch tracked was moderate",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1005c",
"EventName": "PM_PREF_TRACK_MOD_L2",
"BriefDescription": "Prefetch Tracked was moderate (source L2)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3005c",
"EventName": "PM_PREF_TRACK_MOD_L3",
"BriefDescription": "Prefetch tracked was moderate (L3)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe084",
"EventName": "PM_PTE_PREFETCH",
"BriefDescription": "PTE prefetches",
"PublicDescription": "PTE prefetches42"
},
- {,
+ {
"EventCode": "0x16081",
"EventName": "PM_RC0_ALLOC",
"BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x16080",
"EventName": "PM_RC0_BUSY",
"BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200301ea",
"EventName": "PM_RC_LIFETIME_EXC_1024",
"BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 1024 cycles",
"PublicDescription": "Reload latency exceeded 1024 cyc"
},
- {,
+ {
"EventCode": "0x200401ec",
"EventName": "PM_RC_LIFETIME_EXC_2048",
"BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 2048 cycles",
"PublicDescription": "Threshold counter exceeded a value of 2048"
},
- {,
+ {
"EventCode": "0x200101e8",
"EventName": "PM_RC_LIFETIME_EXC_256",
"BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 256 cycles",
"PublicDescription": "Threshold counter exceed a count of 256"
},
- {,
+ {
"EventCode": "0x200201e6",
"EventName": "PM_RC_LIFETIME_EXC_32",
"BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 32 cycles",
"PublicDescription": "Reload latency exceeded 32 cyc"
},
- {,
+ {
"EventCode": "0x36088",
"EventName": "PM_RC_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20004",
"EventName": "PM_REAL_SRQ_FULL",
"BriefDescription": "Out of real srq entries",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2006a",
"EventName": "PM_RUN_CYC_SMT2_SHRD_MODE",
"BriefDescription": "cycles this threads run latch is set and the core is in SMT2 shared mode",
"PublicDescription": "Cycles run latch is set and core is in SMT2-shared mode"
},
- {,
+ {
"EventCode": "0x1006a",
"EventName": "PM_RUN_CYC_SMT2_SPLIT_MODE",
"BriefDescription": "Cycles run latch is set and core is in SMT2-split mode",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4006c",
"EventName": "PM_RUN_CYC_SMT8_MODE",
"BriefDescription": "Cycles run latch is set and core is in SMT8 mode",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xf082",
"EventName": "PM_SEC_ERAT_HIT",
"BriefDescription": "secondary ERAT Hit",
"PublicDescription": "secondary ERAT Hit42"
},
- {,
+ {
"EventCode": "0x508c",
"EventName": "PM_SHL_CREATED",
"BriefDescription": "Store-Hit-Load Table Entry Created",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x508e",
"EventName": "PM_SHL_ST_CONVERT",
"BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5090",
"EventName": "PM_SHL_ST_DISABLE",
"BriefDescription": "Store-Hit-Load Table Read Hit with entry Disabled (entry was disabled due to the entry shown to not prevent the flush)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x26085",
"EventName": "PM_SN0_ALLOC",
"BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": "0.0"
},
- {,
+ {
"EventCode": "0x26084",
"EventName": "PM_SN0_BUSY",
"BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xd0b2",
"EventName": "PM_SNOOP_TLBIE",
"BriefDescription": "TLBIE snoop",
"PublicDescription": "TLBIE snoopSnoop TLBIE"
},
- {,
+ {
"EventCode": "0x4608c",
"EventName": "PM_SN_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10028",
"EventName": "PM_STALL_END_GCT_EMPTY",
"BriefDescription": "Count ended because GCT went empty",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xc090",
"EventName": "PM_STCX_LSU",
"BriefDescription": "STCX executed reported at sent to nest",
"PublicDescription": "STCX executed reported at sent to nest42"
},
- {,
+ {
"EventCode": "0x3090",
"EventName": "PM_SWAP_CANCEL",
"BriefDescription": "SWAP cancel , rtag not available",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3092",
"EventName": "PM_SWAP_CANCEL_GPR",
"BriefDescription": "SWAP cancel , rtag not available for gpr",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x308c",
"EventName": "PM_SWAP_COMPLETE",
"BriefDescription": "swap cast in completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x308e",
"EventName": "PM_SWAP_COMPLETE_GPR",
"BriefDescription": "swap cast in completed fpr gpr",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe086",
"EventName": "PM_TABLEWALK_CYC_PREF",
"BriefDescription": "tablewalk qualified for pte prefetches",
"PublicDescription": "tablewalk qualified for pte prefetches42"
},
- {,
+ {
"EventCode": "0x20b2",
"EventName": "PM_TABORT_TRECLAIM",
"BriefDescription": "Completion time tabortnoncd, tabortcd, treclaim",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe0ba",
"EventName": "PM_TEND_PEND_CYC",
"BriefDescription": "TEND latency per thread",
"PublicDescription": "TEND latency per thread42"
},
- {,
+ {
"EventCode": "0x10012",
"EventName": "PM_THRD_GRP_CMPL_BOTH_CYC",
"BriefDescription": "Cycles group completed on both completion slots by any thread",
"PublicDescription": "Two threads finished same cycle (gated by run latch)"
},
- {,
+ {
"EventCode": "0x40bc",
"EventName": "PM_THRD_PRIO_0_1_CYC",
"BriefDescription": "Cycles thread running at priority level 0 or 1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x40be",
"EventName": "PM_THRD_PRIO_2_3_CYC",
"BriefDescription": "Cycles thread running at priority level 2 or 3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5080",
"EventName": "PM_THRD_PRIO_4_5_CYC",
"BriefDescription": "Cycles thread running at priority level 4 or 5",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x5082",
"EventName": "PM_THRD_PRIO_6_7_CYC",
"BriefDescription": "Cycles thread running at priority level 6 or 7",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3098",
"EventName": "PM_THRD_REBAL_CYC",
"BriefDescription": "cycles rebalance was active",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20b8",
"EventName": "PM_TM_BEGIN_ALL",
"BriefDescription": "Tm any tbegin",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20ba",
"EventName": "PM_TM_END_ALL",
"BriefDescription": "Tm any tend",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3086",
"EventName": "PM_TM_FAIL_CONF_NON_TM",
"BriefDescription": "TEXAS fail reason @ completion",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3088",
"EventName": "PM_TM_FAIL_CON_TM",
"BriefDescription": "TEXAS fail reason @ completion",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe0b2",
"EventName": "PM_TM_FAIL_DISALLOW",
"BriefDescription": "TM fail disallow",
"PublicDescription": "TM fail disallow42"
},
- {,
+ {
"EventCode": "0x3084",
"EventName": "PM_TM_FAIL_FOOTPRINT_OVERFLOW",
"BriefDescription": "TEXAS fail reason @ completion",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe0b8",
"EventName": "PM_TM_FAIL_NON_TX_CONFLICT",
"BriefDescription": "Non transactional conflict from LSU whtver gets repoted to texas",
"PublicDescription": "Non transactional conflict from LSU whtver gets repoted to texas42"
},
- {,
+ {
"EventCode": "0x308a",
"EventName": "PM_TM_FAIL_SELF",
"BriefDescription": "TEXAS fail reason @ completion",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe0b4",
"EventName": "PM_TM_FAIL_TLBIE",
"BriefDescription": "TLBIE hit bloom filter",
"PublicDescription": "TLBIE hit bloom filter42"
},
- {,
+ {
"EventCode": "0xe0b6",
"EventName": "PM_TM_FAIL_TX_CONFLICT",
"BriefDescription": "Transactional conflict from LSU, whatever gets reported to texas",
"PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
},
- {,
+ {
"EventCode": "0x20bc",
"EventName": "PM_TM_TBEGIN",
"BriefDescription": "Tm nested tbegin",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3080",
"EventName": "PM_TM_TRESUME",
"BriefDescription": "Tm resume",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20be",
"EventName": "PM_TM_TSUSPEND",
"BriefDescription": "Tm suspend",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xe08c",
"EventName": "PM_UP_PREF_L3",
"BriefDescription": "Micropartition prefetch",
"PublicDescription": "Micropartition prefetch42"
},
- {,
+ {
"EventCode": "0xe08e",
"EventName": "PM_UP_PREF_POINTER",
"BriefDescription": "Micrpartition pointer prefetches",
"PublicDescription": "Micrpartition pointer prefetches42"
},
- {,
+ {
"EventCode": "0xa0a4",
"EventName": "PM_VSU0_16FLOP",
"BriefDescription": "Sixteen flops operation (SP vector versions of fdiv,fsqrt)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa080",
"EventName": "PM_VSU0_1FLOP",
"BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished",
"PublicDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finishedDecode into 1,2,4 FLOP according to instr IOP, multiplied by #vector elements according to route( eg x1, x2, x4) Only if instr sends finish to ISU"
},
- {,
+ {
"EventCode": "0xa098",
"EventName": "PM_VSU0_2FLOP",
"BriefDescription": "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa09c",
"EventName": "PM_VSU0_4FLOP",
"BriefDescription": "four flops operation (scalar fdiv, fsqrt, DP vector version of fmadd, fnmadd, fmsub, fnmsub, SP vector versions of single flop instructions)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0a0",
"EventName": "PM_VSU0_8FLOP",
"BriefDescription": "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0a4",
"EventName": "PM_VSU0_COMPLEX_ISSUED",
"BriefDescription": "Complex VMX instruction issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0b4",
"EventName": "PM_VSU0_CY_ISSUED",
"BriefDescription": "Cryptographic instruction RFC02196 Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0a8",
"EventName": "PM_VSU0_DD_ISSUED",
"BriefDescription": "64BIT Decimal Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa08c",
"EventName": "PM_VSU0_DP_2FLOP",
"BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa090",
"EventName": "PM_VSU0_DP_FMA",
"BriefDescription": "DP vector version of fmadd,fnmadd,fmsub,fnmsub",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa094",
"EventName": "PM_VSU0_DP_FSQRT_FDIV",
"BriefDescription": "DP vector versions of fdiv,fsqrt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0ac",
"EventName": "PM_VSU0_DQ_ISSUED",
"BriefDescription": "128BIT Decimal Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0b0",
"EventName": "PM_VSU0_EX_ISSUED",
"BriefDescription": "Direct move 32/64b VRFtoGPR RFC02206 Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0bc",
"EventName": "PM_VSU0_FIN",
"BriefDescription": "VSU0 Finished an instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa084",
"EventName": "PM_VSU0_FMA",
"BriefDescription": "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only!",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb098",
"EventName": "PM_VSU0_FPSCR",
"BriefDescription": "Move to/from FPSCR type instruction issued on Pipe 0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa088",
"EventName": "PM_VSU0_FSQRT_FDIV",
"BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only!",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb090",
"EventName": "PM_VSU0_PERMUTE_ISSUED",
"BriefDescription": "Permute VMX Instruction Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb088",
"EventName": "PM_VSU0_SCALAR_DP_ISSUED",
"BriefDescription": "Double Precision scalar instruction issued on Pipe0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb094",
"EventName": "PM_VSU0_SIMPLE_ISSUED",
"BriefDescription": "Simple VMX instruction issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0a8",
"EventName": "PM_VSU0_SINGLE",
"BriefDescription": "FPU single precision",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb09c",
"EventName": "PM_VSU0_SQ",
"BriefDescription": "Store Vector Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb08c",
"EventName": "PM_VSU0_STF",
"BriefDescription": "FPU store (SP or DP) issued on Pipe0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb080",
"EventName": "PM_VSU0_VECTOR_DP_ISSUED",
"BriefDescription": "Double Precision vector instruction issued on Pipe0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb084",
"EventName": "PM_VSU0_VECTOR_SP_ISSUED",
"BriefDescription": "Single Precision vector instruction issued (executed)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0a6",
"EventName": "PM_VSU1_16FLOP",
"BriefDescription": "Sixteen flops operation (SP vector versions of fdiv,fsqrt)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa082",
"EventName": "PM_VSU1_1FLOP",
"BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa09a",
"EventName": "PM_VSU1_2FLOP",
"BriefDescription": "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa09e",
"EventName": "PM_VSU1_4FLOP",
"BriefDescription": "four flops operation (scalar fdiv, fsqrt, DP vector version of fmadd, fnmadd, fmsub, fnmsub, SP vector versions of single flop instructions)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0a2",
"EventName": "PM_VSU1_8FLOP",
"BriefDescription": "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0a6",
"EventName": "PM_VSU1_COMPLEX_ISSUED",
"BriefDescription": "Complex VMX instruction issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0b6",
"EventName": "PM_VSU1_CY_ISSUED",
"BriefDescription": "Cryptographic instruction RFC02196 Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0aa",
"EventName": "PM_VSU1_DD_ISSUED",
"BriefDescription": "64BIT Decimal Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa08e",
"EventName": "PM_VSU1_DP_2FLOP",
"BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa092",
"EventName": "PM_VSU1_DP_FMA",
"BriefDescription": "DP vector version of fmadd,fnmadd,fmsub,fnmsub",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa096",
"EventName": "PM_VSU1_DP_FSQRT_FDIV",
"BriefDescription": "DP vector versions of fdiv,fsqrt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0ae",
"EventName": "PM_VSU1_DQ_ISSUED",
"BriefDescription": "128BIT Decimal Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb0b2",
"EventName": "PM_VSU1_EX_ISSUED",
"BriefDescription": "Direct move 32/64b VRFtoGPR RFC02206 Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0be",
"EventName": "PM_VSU1_FIN",
"BriefDescription": "VSU1 Finished an instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa086",
"EventName": "PM_VSU1_FMA",
"BriefDescription": "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only!",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb09a",
"EventName": "PM_VSU1_FPSCR",
"BriefDescription": "Move to/from FPSCR type instruction issued on Pipe 0",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa08a",
"EventName": "PM_VSU1_FSQRT_FDIV",
"BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only!",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb092",
"EventName": "PM_VSU1_PERMUTE_ISSUED",
"BriefDescription": "Permute VMX Instruction Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb08a",
"EventName": "PM_VSU1_SCALAR_DP_ISSUED",
"BriefDescription": "Double Precision scalar instruction issued on Pipe1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb096",
"EventName": "PM_VSU1_SIMPLE_ISSUED",
"BriefDescription": "Simple VMX instruction issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xa0aa",
"EventName": "PM_VSU1_SINGLE",
"BriefDescription": "FPU single precision",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb09e",
"EventName": "PM_VSU1_SQ",
"BriefDescription": "Store Vector Issued",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb08e",
"EventName": "PM_VSU1_STF",
"BriefDescription": "FPU store (SP or DP) issued on Pipe1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb082",
"EventName": "PM_VSU1_VECTOR_DP_ISSUED",
"BriefDescription": "Double Precision vector instruction issued on Pipe1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0xb086",
"EventName": "PM_VSU1_VECTOR_SP_ISSUED",
"BriefDescription": "Single Precision vector instruction issued (executed)",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json
index 293f3a4c6901..0acfaaef4772 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json
@@ -1,350 +1,350 @@
[
- {,
+ {
"EventCode": "0x100f2",
"EventName": "PM_1PLUS_PPC_CMPL",
"BriefDescription": "1 or more ppc insts finished",
"PublicDescription": "1 or more ppc insts finished (completed)"
},
- {,
+ {
"EventCode": "0x400f2",
"EventName": "PM_1PLUS_PPC_DISP",
"BriefDescription": "Cycles at least one Instr Dispatched",
"PublicDescription": "Cycles at least one Instr Dispatched. Could be a group with only microcode. Issue HW016521"
},
- {,
+ {
"EventCode": "0x100fa",
"EventName": "PM_ANY_THRD_RUN_CYC",
"BriefDescription": "One of threads in run_cycles",
"PublicDescription": "Any thread in run_cycles (was one thread in run_cycles)"
},
- {,
+ {
"EventCode": "0x4000a",
"EventName": "PM_CMPLU_STALL",
"BriefDescription": "Completion stall",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d018",
"EventName": "PM_CMPLU_STALL_BRU",
"BriefDescription": "Completion stall due to a Branch Unit",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c012",
"EventName": "PM_CMPLU_STALL_DCACHE_MISS",
"BriefDescription": "Completion stall by Dcache miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c018",
"EventName": "PM_CMPLU_STALL_DMISS_L21_L31",
"BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c016",
"EventName": "PM_CMPLU_STALL_DMISS_L2L3",
"BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c016",
"EventName": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT",
"BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict",
"PublicDescription": "Completion stall due to cache miss resolving in core's L2/L3 with a conflict"
},
- {,
+ {
"EventCode": "0x4c01a",
"EventName": "PM_CMPLU_STALL_DMISS_L3MISS",
"BriefDescription": "Completion stall due to cache miss resolving missed the L3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c018",
"EventName": "PM_CMPLU_STALL_DMISS_LMEM",
"BriefDescription": "Completion stall due to cache miss that resolves in local memory",
"PublicDescription": "Completion stall due to cache miss resolving in core's Local Memory"
},
- {,
+ {
"EventCode": "0x2c01c",
"EventName": "PM_CMPLU_STALL_DMISS_REMOTE",
"BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)",
"PublicDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)"
},
- {,
+ {
"EventCode": "0x4c012",
"EventName": "PM_CMPLU_STALL_ERAT_MISS",
"BriefDescription": "Completion stall due to LSU reject ERAT miss",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d016",
"EventName": "PM_CMPLU_STALL_FXLONG",
"BriefDescription": "Completion stall due to a long latency fixed point instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2d016",
"EventName": "PM_CMPLU_STALL_FXU",
"BriefDescription": "Completion stall due to FXU",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30036",
"EventName": "PM_CMPLU_STALL_HWSYNC",
"BriefDescription": "completion stall due to hwsync",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4d014",
"EventName": "PM_CMPLU_STALL_LOAD_FINISH",
"BriefDescription": "Completion stall due to a Load finish",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c010",
"EventName": "PM_CMPLU_STALL_LSU",
"BriefDescription": "Completion stall by LSU instruction",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10036",
"EventName": "PM_CMPLU_STALL_LWSYNC",
"BriefDescription": "completion stall due to isync/lwsync",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30006",
"EventName": "PM_CMPLU_STALL_OTHER_CMPL",
"BriefDescription": "Instructions core completed while this tread was stalled",
"PublicDescription": "Instructions core completed while this thread was stalled"
},
- {,
+ {
"EventCode": "0x4c01c",
"EventName": "PM_CMPLU_STALL_ST_FWD",
"BriefDescription": "Completion stall due to store forward",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1001c",
"EventName": "PM_CMPLU_STALL_THRD",
"BriefDescription": "Completion Stalled due to thread conflict. Group ready to complete but it was another thread's turn",
"PublicDescription": "Completion stall due to thread conflict"
},
- {,
+ {
"EventCode": "0x1e",
"EventName": "PM_CYC",
"BriefDescription": "Cycles",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10006",
"EventName": "PM_DISP_HELD",
"BriefDescription": "Dispatch Held",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4003c",
"EventName": "PM_DISP_HELD_SYNC_HOLD",
"BriefDescription": "Dispatch held due to SYNC hold",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200f8",
"EventName": "PM_EXT_INT",
"BriefDescription": "external interrupt",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x400f8",
"EventName": "PM_FLUSH",
"BriefDescription": "Flush (any type)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30012",
"EventName": "PM_FLUSH_COMPLETION",
"BriefDescription": "Completion Flush",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3000c",
"EventName": "PM_FREQ_DOWN",
"BriefDescription": "Power Management: Below Threshold B",
"PublicDescription": "Frequency is being slewed down due to Power Management"
},
- {,
+ {
"EventCode": "0x4000c",
"EventName": "PM_FREQ_UP",
"BriefDescription": "Power Management: Above Threshold A",
"PublicDescription": "Frequency is being slewed up due to Power Management"
},
- {,
+ {
"EventCode": "0x2000a",
"EventName": "PM_HV_CYC",
"BriefDescription": "Cycles in which msr_hv is high. Note that this event does not take msr_pr into consideration",
"PublicDescription": "cycles in hypervisor mode"
},
- {,
+ {
"EventCode": "0x3405e",
"EventName": "PM_IFETCH_THROTTLE",
"BriefDescription": "Cycles in which Instruction fetch throttle was active",
"PublicDescription": "Cycles instruction fecth was throttled in IFU"
},
- {,
+ {
"EventCode": "0x10014",
"EventName": "PM_IOPS_CMPL",
"BriefDescription": "Internal Operations completed",
"PublicDescription": "IOPS Completed"
},
- {,
+ {
"EventCode": "0x3c058",
"EventName": "PM_LARX_FIN",
"BriefDescription": "Larx finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1002e",
"EventName": "PM_LD_CMPL",
"BriefDescription": "count of Loads completed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10062",
"EventName": "PM_LD_L3MISS_PEND_CYC",
"BriefDescription": "Cycles L3 miss was pending for this thread",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30066",
"EventName": "PM_LSU_FIN",
"BriefDescription": "LSU Finished an instruction (up to 2 per cycle)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2003e",
"EventName": "PM_LSU_LMQ_SRQ_EMPTY_CYC",
"BriefDescription": "LSU empty (lmq and srq empty)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e05c",
"EventName": "PM_LSU_REJECT_ERAT_MISS",
"BriefDescription": "LSU Reject due to ERAT (up to 4 per cycles)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e05c",
"EventName": "PM_LSU_REJECT_LHS",
"BriefDescription": "LSU Reject due to LHS (up to 4 per cycle)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e05c",
"EventName": "PM_LSU_REJECT_LMQ_FULL",
"BriefDescription": "LSU reject due to LMQ full ( 4 per cycle)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1001a",
"EventName": "PM_LSU_SRQ_FULL_CYC",
"BriefDescription": "Storage Queue is full and is blocking dispatch",
"PublicDescription": "SRQ is Full"
},
- {,
+ {
"EventCode": "0x40014",
"EventName": "PM_PROBE_NOP_DISP",
"BriefDescription": "ProbeNops dispatched",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x600f4",
"EventName": "PM_RUN_CYC",
"BriefDescription": "Run_cycles",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3006c",
"EventName": "PM_RUN_CYC_SMT2_MODE",
"BriefDescription": "Cycles run latch is set and core is in SMT2 mode",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2006c",
"EventName": "PM_RUN_CYC_SMT4_MODE",
"BriefDescription": "cycles this threads run latch is set and the core is in SMT4 mode",
"PublicDescription": "Cycles run latch is set and core is in SMT4 mode"
},
- {,
+ {
"EventCode": "0x1006c",
"EventName": "PM_RUN_CYC_ST_MODE",
"BriefDescription": "Cycles run latch is set and core is in ST mode",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x500fa",
"EventName": "PM_RUN_INST_CMPL",
"BriefDescription": "Run_Instructions",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e058",
"EventName": "PM_STCX_FAIL",
"BriefDescription": "stcx failed",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x20016",
"EventName": "PM_ST_CMPL",
"BriefDescription": "Store completion count",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200f0",
"EventName": "PM_ST_FIN",
"BriefDescription": "Store Instructions Finished",
"PublicDescription": "Store Instructions Finished (store sent to nest)"
},
- {,
+ {
"EventCode": "0x20018",
"EventName": "PM_ST_FWD",
"BriefDescription": "Store forwards that finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10026",
"EventName": "PM_TABLEWALK_CYC",
"BriefDescription": "Cycles when a tablewalk (I or D) is active",
"PublicDescription": "Tablewalk Active"
},
- {,
+ {
"EventCode": "0x300f8",
"EventName": "PM_TB_BIT_TRANS",
"BriefDescription": "timebase event",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2000c",
"EventName": "PM_THRD_ALL_RUN_CYC",
"BriefDescription": "All Threads in Run_cycles (was both threads in run_cycles)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30058",
"EventName": "PM_TLBIE_FIN",
"BriefDescription": "tlbie finished",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10060",
"EventName": "PM_TM_TRANS_RUN_CYC",
"BriefDescription": "run cycles in transactional state",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e012",
"EventName": "PM_TM_TX_PASS_RUN_CYC",
"BriefDescription": "cycles spent in successful transactions",
"PublicDescription": "run cycles spent in successful transactions"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/pmc.json b/tools/perf/pmu-events/arch/powerpc/power8/pmc.json
index 583e4d937621..5e0469f68bea 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/pmc.json
@@ -1,140 +1,140 @@
[
- {,
+ {
"EventCode": "0x20010",
"EventName": "PM_PMC1_OVERFLOW",
"BriefDescription": "Overflow from counter 1",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30010",
"EventName": "PM_PMC2_OVERFLOW",
"BriefDescription": "Overflow from counter 2",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30020",
"EventName": "PM_PMC2_REWIND",
"BriefDescription": "PMC2 Rewind Event (did not match condition)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10022",
"EventName": "PM_PMC2_SAVED",
"BriefDescription": "PMC2 Rewind Value saved",
"PublicDescription": "PMC2 Rewind Value saved (matched condition)"
},
- {,
+ {
"EventCode": "0x40010",
"EventName": "PM_PMC3_OVERFLOW",
"BriefDescription": "Overflow from counter 3",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10010",
"EventName": "PM_PMC4_OVERFLOW",
"BriefDescription": "Overflow from counter 4",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10020",
"EventName": "PM_PMC4_REWIND",
"BriefDescription": "PMC4 Rewind Event",
"PublicDescription": "PMC4 Rewind Event (did not match condition)"
},
- {,
+ {
"EventCode": "0x30022",
"EventName": "PM_PMC4_SAVED",
"BriefDescription": "PMC4 Rewind Value saved (matched condition)",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10024",
"EventName": "PM_PMC5_OVERFLOW",
"BriefDescription": "Overflow from counter 5",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x30024",
"EventName": "PM_PMC6_OVERFLOW",
"BriefDescription": "Overflow from counter 6",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x400f4",
"EventName": "PM_RUN_PURR",
"BriefDescription": "Run_PURR",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x10008",
"EventName": "PM_RUN_SPURR",
"BriefDescription": "Run SPURR",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x0",
"EventName": "PM_SUSPENDED",
"BriefDescription": "Counter OFF",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x301ea",
"EventName": "PM_THRESH_EXC_1024",
"BriefDescription": "Threshold counter exceeded a value of 1024",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x401ea",
"EventName": "PM_THRESH_EXC_128",
"BriefDescription": "Threshold counter exceeded a value of 128",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x401ec",
"EventName": "PM_THRESH_EXC_2048",
"BriefDescription": "Threshold counter exceeded a value of 2048",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x101e8",
"EventName": "PM_THRESH_EXC_256",
"BriefDescription": "Threshold counter exceed a count of 256",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x201e6",
"EventName": "PM_THRESH_EXC_32",
"BriefDescription": "Threshold counter exceeded a value of 32",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x101e6",
"EventName": "PM_THRESH_EXC_4096",
"BriefDescription": "Threshold counter exceed a count of 4096",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x201e8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x301e8",
"EventName": "PM_THRESH_EXC_64",
"BriefDescription": "IFU non-branch finished",
"PublicDescription": "Threshold counter exceeded a value of 64"
},
- {,
+ {
"EventCode": "0x101ec",
"EventName": "PM_THRESH_MET",
"BriefDescription": "threshold exceeded",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4016e",
"EventName": "PM_THRESH_NOT_MET",
"BriefDescription": "Threshold counter did not meet threshold",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/translation.json b/tools/perf/pmu-events/arch/powerpc/power8/translation.json
index e47a55459bc8..623e7475b010 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/translation.json
@@ -1,176 +1,176 @@
[
- {,
+ {
"EventCode": "0x4c054",
"EventName": "PM_DERAT_MISS_16G",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16G",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3c054",
"EventName": "PM_DERAT_MISS_16M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1c056",
"EventName": "PM_DERAT_MISS_4K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c054",
"EventName": "PM_DERAT_MISS_64K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e048",
"EventName": "PM_DPTEG_FROM_DL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e048",
"EventName": "PM_DPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e042",
"EventName": "PM_DPTEG_FROM_L2",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e04e",
"EventName": "PM_DPTEG_FROM_L2MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e040",
"EventName": "PM_DPTEG_FROM_L2_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e040",
"EventName": "PM_DPTEG_FROM_L2_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e042",
"EventName": "PM_DPTEG_FROM_L3",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3e042",
"EventName": "PM_DPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e042",
"EventName": "PM_DPTEG_FROM_L3_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e044",
"EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e04c",
"EventName": "PM_DPTEG_FROM_LL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e048",
"EventName": "PM_DPTEG_FROM_LMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e04c",
"EventName": "PM_DPTEG_FROM_MEMORY",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4e04a",
"EventName": "PM_DPTEG_FROM_OFF_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e048",
"EventName": "PM_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e046",
"EventName": "PM_DPTEG_FROM_RL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x1e04a",
"EventName": "PM_DPTEG_FROM_RL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2e04a",
"EventName": "PM_DPTEG_FROM_RL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a data side request",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x300fc",
"EventName": "PM_DTLB_MISS",
"BriefDescription": "Data PTEG reload",
"PublicDescription": "Data PTEG Reloaded (DTLB Miss)"
},
- {,
+ {
"EventCode": "0x1c058",
"EventName": "PM_DTLB_MISS_16G",
"BriefDescription": "Data TLB Miss page size 16G",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x4c056",
"EventName": "PM_DTLB_MISS_16M",
"BriefDescription": "Data TLB Miss page size 16M",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x2c056",
"EventName": "PM_DTLB_MISS_4K",
"BriefDescription": "Data TLB Miss page size 4k",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x3c056",
"EventName": "PM_DTLB_MISS_64K",
"BriefDescription": "Data TLB Miss page size 64K",
"PublicDescription": ""
},
- {,
+ {
"EventCode": "0x200f6",
"EventName": "PM_LSU_DERAT_MISS",
"BriefDescription": "DERAT Reloaded due to a DERAT miss",
"PublicDescription": "DERAT Reloaded (Miss)"
},
- {,
+ {
"EventCode": "0x20066",
"EventName": "PM_TLB_MISS",
"BriefDescription": "TLB Miss (I + D)",
"PublicDescription": ""
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/cache.json b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
index 851072105054..2984190c2118 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
@@ -1,107 +1,107 @@
[
- {,
+ {
"EventCode": "0x300F4",
"EventName": "PM_THRD_CONC_RUN_INST",
"BriefDescription": "PPC Instructions Finished by this thread when all threads in the core had the run-latch set"
},
- {,
+ {
"EventCode": "0x1E056",
"EventName": "PM_CMPLU_STALL_FLUSH_ANY_THREAD",
"BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because any of the 4 threads in the same core suffered a flush, which blocks completion"
},
- {,
+ {
"EventCode": "0x4D016",
"EventName": "PM_CMPLU_STALL_FXLONG",
"BriefDescription": "Completion stall due to a long latency scalar fixed point instruction (division, square root)"
},
- {,
+ {
"EventCode": "0x2D016",
"EventName": "PM_CMPLU_STALL_FXU",
"BriefDescription": "Finish stall due to a scalar fixed point or CR instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes"
},
- {,
+ {
"EventCode": "0x4D12A",
"EventName": "PM_MRK_DATA_FROM_RL4_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's L4 on the same Node or Group ( Remote) due to a marked load"
},
- {,
+ {
"EventCode": "0x1003C",
"EventName": "PM_CMPLU_STALL_DMISS_L2L3",
"BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3"
},
- {,
+ {
"EventCode": "0x4C014",
"EventName": "PM_CMPLU_STALL_LMQ_FULL",
"BriefDescription": "Finish stall because the NTF instruction was a load that missed in the L1 and the LMQ was unable to accept this load miss request because it was full"
},
- {,
+ {
"EventCode": "0x14048",
"EventName": "PM_INST_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x4D014",
"EventName": "PM_CMPLU_STALL_LOAD_FINISH",
"BriefDescription": "Finish stall because the NTF instruction was a load instruction with all its dependencies satisfied just going through the LSU pipe to finish"
},
- {,
+ {
"EventCode": "0x2404A",
"EventName": "PM_INST_FROM_RL4",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x1404A",
"EventName": "PM_INST_FROM_RL2L3_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x401EA",
"EventName": "PM_THRESH_EXC_128",
"BriefDescription": "Threshold counter exceeded a value of 128"
},
- {,
+ {
"EventCode": "0x400F6",
"EventName": "PM_BR_MPRED_CMPL",
"BriefDescription": "Number of Branch Mispredicts"
},
- {,
+ {
"EventCode": "0x2F140",
"EventName": "PM_MRK_DPTEG_FROM_L2_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x101E6",
"EventName": "PM_THRESH_EXC_4096",
"BriefDescription": "Threshold counter exceed a count of 4096"
},
- {,
+ {
"EventCode": "0x3F14A",
"EventName": "PM_MRK_DPTEG_FROM_RMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4C016",
"EventName": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT",
"BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict"
},
- {,
+ {
"EventCode": "0x2C01A",
"EventName": "PM_CMPLU_STALL_LHS",
"BriefDescription": "Finish stall because the NTF instruction was a load that hit on an older store and it was waiting for store data"
},
- {,
+ {
"EventCode": "0x401E4",
"EventName": "PM_MRK_DTLB_MISS",
"BriefDescription": "Marked dtlb miss"
},
- {,
+ {
"EventCode": "0x24046",
"EventName": "PM_INST_FROM_RL2L3_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x1002A",
"EventName": "PM_CMPLU_STALL_LARX",
"BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/floating-point.json b/tools/perf/pmu-events/arch/powerpc/power9/floating-point.json
index 8a83bca26552..d228d6c95ea2 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/floating-point.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/floating-point.json
@@ -1,32 +1,32 @@
[
- {,
+ {
"EventCode": "0x1415A",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 with load hit store conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x10058",
"EventName": "PM_MEM_LOC_THRESH_IFU",
"BriefDescription": "Local Memory above threshold for IFU speculation control"
},
- {,
+ {
"EventCode": "0x2D028",
"EventName": "PM_RADIX_PWC_L2_PDE_FROM_L2",
"BriefDescription": "A Page Directory Entry was reloaded to a level 2 page walk cache from the core's L2 data cache"
},
- {,
+ {
"EventCode": "0x30012",
"EventName": "PM_FLUSH_COMPLETION",
"BriefDescription": "The instruction that was next to complete did not complete because it suffered a flush"
},
- {,
+ {
"EventCode": "0x2D154",
"EventName": "PM_MRK_DERAT_MISS_64K",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 64K"
},
- {,
+ {
"EventCode": "0x4016E",
"EventName": "PM_THRESH_NOT_MET",
"BriefDescription": "Threshold counter did not meet threshold"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
index f9fa84b16fb5..c8add9dfaa64 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
@@ -1,355 +1,355 @@
[
- {,
+ {
"EventCode": "0x25044",
"EventName": "PM_IPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x101E8",
"EventName": "PM_THRESH_EXC_256",
"BriefDescription": "Threshold counter exceed a count of 256"
},
- {,
+ {
"EventCode": "0x4504E",
"EventName": "PM_IPTEG_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a instruction side request"
},
- {,
+ {
"EventCode": "0x1006A",
"EventName": "PM_NTC_ISSUE_HELD_DARQ_FULL",
"BriefDescription": "The NTC instruction is being held at dispatch because there are no slots in the DARQ for it"
},
- {,
+ {
"EventCode": "0x4E016",
"EventName": "PM_CMPLU_STALL_LSAQ_ARB",
"BriefDescription": "Finish stall because the NTF instruction was a load or store that was held in LSAQ because an older instruction from SRQ or LRQ won arbitration to the LSU pipe when this instruction tried to launch"
},
- {,
+ {
"EventCode": "0x1001A",
"EventName": "PM_LSU_SRQ_FULL_CYC",
"BriefDescription": "Cycles in which the Store Queue is full on all 4 slices. This is event is not per thread. All the threads will see the same count for this core resource"
},
- {,
+ {
"EventCode": "0x1E15E",
"EventName": "PM_MRK_L2_TM_REQ_ABORT",
"BriefDescription": "TM abort"
},
- {,
+ {
"EventCode": "0x34052",
"EventName": "PM_INST_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for an instruction fetch"
},
- {,
+ {
"EventCode": "0x20114",
"EventName": "PM_MRK_L2_RC_DISP",
"BriefDescription": "Marked Instruction RC dispatched in L2"
},
- {,
+ {
"EventCode": "0x4C044",
"EventName": "PM_DATA_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x1C044",
"EventName": "PM_DATA_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a demand load"
},
- {,
+ {
"EventCode": "0x44050",
"EventName": "PM_INST_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x30154",
"EventName": "PM_MRK_FAB_RSP_DCLAIM",
"BriefDescription": "Marked store had to do a dclaim"
},
- {,
+ {
"EventCode": "0x30014",
"EventName": "PM_CMPLU_STALL_STORE_FIN_ARB",
"BriefDescription": "Finish stall because the NTF instruction was a store waiting for a slot in the store finish pipe. This means the instruction is ready to finish but there are instructions ahead of it, using the finish pipe"
},
- {,
+ {
"EventCode": "0x3E054",
"EventName": "PM_LD_MISS_L1",
"BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
},
- {,
+ {
"EventCode": "0x2E01A",
"EventName": "PM_CMPLU_STALL_LSU_FLUSH_NEXT",
"BriefDescription": "Completion stall of one cycle because the LSU requested to flush the next iop in the sequence. It takes 1 cycle for the ISU to process this request before the LSU instruction is allowed to complete"
},
- {,
+ {
"EventCode": "0x2D01C",
"EventName": "PM_CMPLU_STALL_STCX",
"BriefDescription": "Finish stall because the NTF instruction was a stcx waiting for response from L2"
},
- {,
+ {
"EventCode": "0x2C010",
"EventName": "PM_CMPLU_STALL_LSU",
"BriefDescription": "Completion stall by LSU instruction"
},
- {,
+ {
"EventCode": "0x2C042",
"EventName": "PM_DATA_FROM_L3_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to a demand load"
},
- {,
+ {
"EventCode": "0x4E012",
"EventName": "PM_CMPLU_STALL_MTFPSCR",
"BriefDescription": "Completion stall because the ISU is updating the register and notifying the Effective Address Table (EAT)"
},
- {,
+ {
"EventCode": "0x100F2",
"EventName": "PM_1PLUS_PPC_CMPL",
"BriefDescription": "1 or more ppc insts finished"
},
- {,
+ {
"EventCode": "0x3001C",
"EventName": "PM_LSU_REJECT_LMQ_FULL",
"BriefDescription": "LSU Reject due to LMQ full (up to 4 per cycles)"
},
- {,
+ {
"EventCode": "0x15046",
"EventName": "PM_IPTEG_FROM_L31_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x1015E",
"EventName": "PM_MRK_FAB_RSP_RD_T_INTV",
"BriefDescription": "Sampled Read got a T intervention"
},
- {,
+ {
"EventCode": "0x101EC",
"EventName": "PM_THRESH_MET",
"BriefDescription": "threshold exceeded"
},
- {,
+ {
"EventCode": "0x10020",
"EventName": "PM_PMC4_REWIND",
"BriefDescription": "PMC4 Rewind Event"
},
- {,
+ {
"EventCode": "0x301EA",
"EventName": "PM_THRESH_EXC_1024",
"BriefDescription": "Threshold counter exceeded a value of 1024"
},
- {,
+ {
"EventCode": "0x34056",
"EventName": "PM_CMPLU_STALL_LSU_MFSPR",
"BriefDescription": "Finish stall because the NTF instruction was a mfspr instruction targeting an LSU SPR and it was waiting for the register data to be returned"
},
- {,
+ {
"EventCode": "0x44056",
"EventName": "PM_VECTOR_ST_CMPL",
"BriefDescription": "Number of vector store instructions completed"
},
- {,
+ {
"EventCode": "0x2C124",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x4C12A",
"EventName": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0x30060",
"EventName": "PM_TM_TRANS_RUN_INST",
"BriefDescription": "Run instructions completed in transactional state (gated by the run latch)"
},
- {,
+ {
"EventCode": "0x2C014",
"EventName": "PM_CMPLU_STALL_STORE_FINISH",
"BriefDescription": "Finish stall because the NTF instruction was a store with all its dependencies met, just waiting to go through the LSU pipe to finish"
},
- {,
+ {
"EventCode": "0x3515A",
"EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x34050",
"EventName": "PM_INST_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x3015E",
"EventName": "PM_MRK_FAB_RSP_CLAIM_RTY",
"BriefDescription": "Sampled store did a rwitm and got a rty"
},
- {,
+ {
"EventCode": "0x0",
"EventName": "PM_SUSPENDED",
"BriefDescription": "Counter OFF"
},
- {,
+ {
"EventCode": "0x10010",
"EventName": "PM_PMC4_OVERFLOW",
"BriefDescription": "Overflow from counter 4"
},
- {,
+ {
"EventCode": "0x3E04A",
"EventName": "PM_DPTEG_FROM_RMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2F152",
"EventName": "PM_MRK_FAB_RSP_DCLAIM_CYC",
"BriefDescription": "cycles L2 RC took for a dclaim"
},
- {,
+ {
"EventCode": "0x10004",
"EventName": "PM_CMPLU_STALL_LRQ_OTHER",
"BriefDescription": "Finish stall due to LRQ miscellaneous reasons, lost arbitration to LMQ slot, bank collisions, set prediction cleanup, set prediction multihit and others"
},
- {,
+ {
"EventCode": "0x4F150",
"EventName": "PM_MRK_FAB_RSP_RWITM_CYC",
"BriefDescription": "cycles L2 RC took for a rwitm"
},
- {,
+ {
"EventCode": "0x4E042",
"EventName": "PM_DPTEG_FROM_L3",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1F054",
"EventName": "PM_TLB_HIT",
"BriefDescription": "Number of times the TLB had the data required by the instruction. Applies to both HPT and RPT"
},
- {,
+ {
"EventCode": "0x2C01E",
"EventName": "PM_CMPLU_STALL_SYNC_PMU_INT",
"BriefDescription": "Cycles in which the NTC instruction is waiting for a synchronous PMU interrupt"
},
- {,
+ {
"EventCode": "0x24050",
"EventName": "PM_IOPS_CMPL",
"BriefDescription": "Internal Operations completed"
},
- {,
+ {
"EventCode": "0x1515C",
"EventName": "PM_SYNC_MRK_BR_MPRED",
"BriefDescription": "Marked Branch mispredict that can cause a synchronous interrupt"
},
- {,
+ {
"EventCode": "0x300FA",
"EventName": "PM_INST_FROM_L3MISS",
"BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet"
},
- {,
+ {
"EventCode": "0x15044",
"EventName": "PM_IPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a instruction side request"
},
- {,
+ {
"EventCode": "0x15152",
"EventName": "PM_SYNC_MRK_BR_LINK",
"BriefDescription": "Marked Branch and link branch that can cause a synchronous interrupt"
},
- {,
+ {
"EventCode": "0x1E050",
"EventName": "PM_CMPLU_STALL_TEND",
"BriefDescription": "Finish stall because the NTF instruction was a tend instruction awaiting response from L2"
},
- {,
+ {
"EventCode": "0x1013E",
"EventName": "PM_MRK_LD_MISS_EXPOSED_CYC",
"BriefDescription": "Marked Load exposed Miss (use edge detect to count #)"
},
- {,
+ {
"EventCode": "0x25042",
"EventName": "PM_IPTEG_FROM_L3_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a instruction side request"
},
- {,
+ {
"EventCode": "0x14054",
"EventName": "PM_INST_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for an instruction fetch"
},
- {,
+ {
"EventCode": "0x4015E",
"EventName": "PM_MRK_FAB_RSP_RD_RTY",
"BriefDescription": "Sampled L2 reads retry count"
},
- {,
+ {
"EventCode": "0x45048",
"EventName": "PM_IPTEG_FROM_DL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x44052",
"EventName": "PM_INST_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for an instruction fetch"
},
- {,
+ {
"EventCode": "0x30026",
"EventName": "PM_CMPLU_STALL_STORE_DATA",
"BriefDescription": "Finish stall because the next to finish instruction was a store waiting on data"
},
- {,
+ {
"EventCode": "0x301E6",
"EventName": "PM_MRK_DERAT_MISS",
"BriefDescription": "Erat Miss (TLB Access) All page sizes"
},
- {,
+ {
"EventCode": "0x24154",
"EventName": "PM_THRESH_ACC",
"BriefDescription": "This event increments every time the threshold event counter ticks. Thresholding must be enabled (via MMCRA) and the thresholding start event must occur for this counter to increment. It will stop incrementing when the thresholding stop event occurs or when thresholding is disabled, until the next time a configured thresholding start event occurs."
},
- {,
+ {
"EventCode": "0x2015E",
"EventName": "PM_MRK_FAB_RSP_RWITM_RTY",
"BriefDescription": "Sampled store did a rwitm and got a rty"
},
- {,
+ {
"EventCode": "0x200FA",
"EventName": "PM_BR_TAKEN_CMPL",
"BriefDescription": "New event for Branch Taken"
},
- {,
+ {
"EventCode": "0x35044",
"EventName": "PM_IPTEG_FROM_L31_ECO_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x4C010",
"EventName": "PM_CMPLU_STALL_STORE_PIPE_ARB",
"BriefDescription": "Finish stall because the NTF instruction was a store waiting for the next relaunch opportunity after an internal reject. This means the instruction is ready to relaunch and tried once but lost arbitration"
},
- {,
+ {
"EventCode": "0x4C01C",
"EventName": "PM_CMPLU_STALL_ST_FWD",
"BriefDescription": "Completion stall due to store forward"
},
- {,
+ {
"EventCode": "0x3515C",
"EventName": "PM_MRK_DATA_FROM_RL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a marked load"
},
- {,
+ {
"EventCode": "0x2D14C",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x40116",
"EventName": "PM_MRK_LARX_FIN",
"BriefDescription": "Larx finished"
},
- {,
+ {
"EventCode": "0x1003A",
"EventName": "PM_CMPLU_STALL_LSU_FIN",
"BriefDescription": "Finish stall because the NTF instruction was an LSU op (other than a load or a store) with all its dependencies met and just going through the LSU pipe to finish"
},
- {,
+ {
"EventCode": "0x3012A",
"EventName": "PM_MRK_L2_RC_DONE",
"BriefDescription": "Marked RC done"
},
- {,
+ {
"EventCode": "0x45044",
"EventName": "PM_IPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/marked.json b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
index b1954c38bab1..b24d25aba2df 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
@@ -1,625 +1,625 @@
[
- {,
+ {
"EventCode": "0x3013E",
"EventName": "PM_MRK_STALL_CMPLU_CYC",
"BriefDescription": "Number of cycles the marked instruction is experiencing a stall while it is next to complete (NTC)"
},
- {,
+ {
"EventCode": "0x4F056",
"EventName": "PM_RADIX_PWC_L1_PDE_FROM_L3MISS",
"BriefDescription": "A Page Directory Entry was reloaded to a level 1 page walk cache from beyond the core's L3 data cache. The source could be local/remote/distant memory or another core's cache"
},
- {,
+ {
"EventCode": "0x24158",
"EventName": "PM_MRK_INST",
"BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens"
},
- {,
+ {
"EventCode": "0x1E046",
"EventName": "PM_DPTEG_FROM_L31_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x3C04A",
"EventName": "PM_DATA_FROM_RMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a demand load"
},
- {,
+ {
"EventCode": "0x2C01C",
"EventName": "PM_CMPLU_STALL_DMISS_REMOTE",
"BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)"
},
- {,
+ {
"EventCode": "0x44040",
"EventName": "PM_INST_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x2E050",
"EventName": "PM_DARQ0_7_9_ENTRIES",
"BriefDescription": "Cycles in which 7,8, or 9 DARQ entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x2D02E",
"EventName": "PM_RADIX_PWC_L3_PTE_FROM_L2",
"BriefDescription": "A Page Table Entry was reloaded to a level 3 page walk cache from the core's L2 data cache. This implies that a level 4 PWC access was not necessary for this translation"
},
- {,
+ {
"EventCode": "0x3F05E",
"EventName": "PM_RADIX_PWC_L3_PTE_FROM_L3",
"BriefDescription": "A Page Table Entry was reloaded to a level 3 page walk cache from the core's L3 data cache. This implies that a level 4 PWC access was not necessary for this translation"
},
- {,
+ {
"EventCode": "0x2E01E",
"EventName": "PM_CMPLU_STALL_NTC_FLUSH",
"BriefDescription": "Completion stall due to ntc flush"
},
- {,
+ {
"EventCode": "0x1F14C",
"EventName": "PM_MRK_DPTEG_FROM_LL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x20130",
"EventName": "PM_MRK_INST_DECODED",
"BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only"
},
- {,
+ {
"EventCode": "0x3F144",
"EventName": "PM_MRK_DPTEG_FROM_L31_ECO_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D058",
"EventName": "PM_VECTOR_FLOP_CMPL",
"BriefDescription": "Vector FP instruction completed"
},
- {,
+ {
"EventCode": "0x14040",
"EventName": "PM_INST_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x4404E",
"EventName": "PM_INST_FROM_L3MISS_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to a instruction fetch"
},
- {,
+ {
"EventCode": "0x3003A",
"EventName": "PM_CMPLU_STALL_EXCEPTION",
"BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete"
},
- {,
+ {
"EventCode": "0x4F144",
"EventName": "PM_MRK_DPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x3E044",
"EventName": "PM_DPTEG_FROM_L31_ECO_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x300F6",
"EventName": "PM_L1_DCACHE_RELOAD_VALID",
"BriefDescription": "DL1 reloaded due to Demand Load"
},
- {,
+ {
"EventCode": "0x1415E",
"EventName": "PM_MRK_DATA_FROM_L3MISS_CYC",
"BriefDescription": "Duration in cycles to reload from a location other than the local core's L3 due to a marked load"
},
- {,
+ {
"EventCode": "0x1E052",
"EventName": "PM_CMPLU_STALL_SLB",
"BriefDescription": "Finish stall because the NTF instruction was awaiting L2 response for an SLB"
},
- {,
+ {
"EventCode": "0x4404C",
"EventName": "PM_INST_FROM_DMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x3000E",
"EventName": "PM_FXU_1PLUS_BUSY",
"BriefDescription": "At least one of the 4 FXU units is busy"
},
- {,
+ {
"EventCode": "0x2C048",
"EventName": "PM_DATA_FROM_LMEM",
"BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a demand load"
},
- {,
+ {
"EventCode": "0x3000A",
"EventName": "PM_CMPLU_STALL_PM",
"BriefDescription": "Finish stall because the NTF instruction was issued to the Permute execution pipe and waiting to finish. Includes permute and decimal fixed point instructions (128 bit BCD arithmetic) + a few 128 bit fixpoint add/subtract instructions with carry. Not qualified by vector or multicycle"
},
- {,
+ {
"EventCode": "0x1504E",
"EventName": "PM_IPTEG_FROM_L2MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a instruction side request"
},
- {,
+ {
"EventCode": "0x1C052",
"EventName": "PM_DATA_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for a demand load"
},
- {,
+ {
"EventCode": "0x30008",
"EventName": "PM_DISP_STARVED",
"BriefDescription": "Dispatched Starved"
},
- {,
+ {
"EventCode": "0x14042",
"EventName": "PM_INST_FROM_L2",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x4000C",
"EventName": "PM_FREQ_UP",
"BriefDescription": "Power Management: Above Threshold A"
},
- {,
+ {
"EventCode": "0x3C050",
"EventName": "PM_DATA_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for a demand load"
},
- {,
+ {
"EventCode": "0x25040",
"EventName": "PM_IPTEG_FROM_L2_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a instruction side request"
},
- {,
+ {
"EventCode": "0x10132",
"EventName": "PM_MRK_INST_ISSUED",
"BriefDescription": "Marked instruction issued"
},
- {,
+ {
"EventCode": "0x1C046",
"EventName": "PM_DATA_FROM_L31_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x2C044",
"EventName": "PM_DATA_FROM_L31_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x2C04A",
"EventName": "PM_DATA_FROM_RL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a demand load"
},
- {,
+ {
"EventCode": "0x24044",
"EventName": "PM_INST_FROM_L31_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x4C050",
"EventName": "PM_DATA_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for a demand load"
},
- {,
+ {
"EventCode": "0x2C052",
"EventName": "PM_DATA_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for a demand load"
},
- {,
+ {
"EventCode": "0x2F148",
"EventName": "PM_MRK_DPTEG_FROM_LMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D01A",
"EventName": "PM_CMPLU_STALL_EIEIO",
"BriefDescription": "Finish stall because the NTF instruction is an EIEIO waiting for response from L2"
},
- {,
+ {
"EventCode": "0x4F14E",
"EventName": "PM_MRK_DPTEG_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4F05A",
"EventName": "PM_RADIX_PWC_L4_PTE_FROM_L3",
"BriefDescription": "A Page Table Entry was reloaded to a level 4 page walk cache from the core's L3 data cache. This is the deepest level of PWC possible for a translation"
},
- {,
+ {
"EventCode": "0x1F05A",
"EventName": "PM_RADIX_PWC_L4_PTE_FROM_L2",
"BriefDescription": "A Page Table Entry was reloaded to a level 4 page walk cache from the core's L2 data cache. This is the deepest level of PWC possible for a translation"
},
- {,
+ {
"EventCode": "0x30068",
"EventName": "PM_L1_ICACHE_RELOADED_PREF",
"BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)"
},
- {,
+ {
"EventCode": "0x4C04A",
"EventName": "PM_DATA_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a demand load"
},
- {,
+ {
"EventCode": "0x400FE",
"EventName": "PM_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load"
},
- {,
+ {
"EventCode": "0x3F058",
"EventName": "PM_RADIX_PWC_L1_PDE_FROM_L3",
"BriefDescription": "A Page Directory Entry was reloaded to a level 1 page walk cache from the core's L3 data cache"
},
- {,
+ {
"EventCode": "0x3C052",
"EventName": "PM_DATA_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
},
- {,
+ {
"EventCode": "0x4D142",
"EventName": "PM_MRK_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load"
},
- {,
+ {
"EventCode": "0x30050",
"EventName": "PM_SYS_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x30028",
"EventName": "PM_CMPLU_STALL_SPEC_FINISH",
"BriefDescription": "Finish stall while waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC"
},
- {,
+ {
"EventCode": "0x400F4",
"EventName": "PM_RUN_PURR",
"BriefDescription": "Run_PURR"
},
- {,
+ {
"EventCode": "0x3404C",
"EventName": "PM_INST_FROM_DL4",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x3D05A",
"EventName": "PM_NTC_ISSUE_HELD_OTHER",
"BriefDescription": "The NTC instruction is being held at dispatch during regular pipeline cycles, or because the VSU is busy with multi-cycle instructions, or because of a write-back collision with VSU"
},
- {,
+ {
"EventCode": "0x2E048",
"EventName": "PM_DPTEG_FROM_LMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2D02A",
"EventName": "PM_RADIX_PWC_L3_PDE_FROM_L2",
"BriefDescription": "A Page Directory Entry was reloaded to a level 3 page walk cache from the core's L2 data cache"
},
- {,
+ {
"EventCode": "0x1F05C",
"EventName": "PM_RADIX_PWC_L3_PDE_FROM_L3",
"BriefDescription": "A Page Directory Entry was reloaded to a level 3 page walk cache from the core's L3 data cache"
},
- {,
+ {
"EventCode": "0x4D04A",
"EventName": "PM_DARQ0_0_3_ENTRIES",
"BriefDescription": "Cycles in which 3 or less DARQ entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x1404C",
"EventName": "PM_INST_FROM_LL4",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x200FD",
"EventName": "PM_L1_ICACHE_MISS",
"BriefDescription": "Demand iCache Miss"
},
- {,
+ {
"EventCode": "0x34040",
"EventName": "PM_INST_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x20138",
"EventName": "PM_MRK_ST_NEST",
"BriefDescription": "Marked store sent to nest"
},
- {,
+ {
"EventCode": "0x44048",
"EventName": "PM_INST_FROM_DL2L3_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x35046",
"EventName": "PM_IPTEG_FROM_L21_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x4C04E",
"EventName": "PM_DATA_FROM_L3MISS_MOD",
"BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to a demand load"
},
- {,
+ {
"EventCode": "0x401E0",
"EventName": "PM_MRK_INST_CMPL",
"BriefDescription": "marked instruction completed"
},
- {,
+ {
"EventCode": "0x2C128",
"EventName": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0x34044",
"EventName": "PM_INST_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x4E018",
"EventName": "PM_CMPLU_STALL_NTC_DISP_FIN",
"BriefDescription": "Finish stall because the NTF instruction was one that must finish at dispatch."
},
- {,
+ {
"EventCode": "0x2E05E",
"EventName": "PM_LMQ_EMPTY_CYC",
"BriefDescription": "Cycles in which the LMQ has no pending load misses for this thread"
},
- {,
+ {
"EventCode": "0x4C122",
"EventName": "PM_DARQ1_0_3_ENTRIES",
"BriefDescription": "Cycles in which 3 or fewer DARQ1 entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x4F058",
"EventName": "PM_RADIX_PWC_L2_PTE_FROM_L3",
"BriefDescription": "A Page Table Entry was reloaded to a level 2 page walk cache from the core's L3 data cache. This implies that level 3 and level 4 PWC accesses were not necessary for this translation"
},
- {,
+ {
"EventCode": "0x14046",
"EventName": "PM_INST_FROM_L31_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x3012C",
"EventName": "PM_MRK_ST_FWD",
"BriefDescription": "Marked st forwards"
},
- {,
+ {
"EventCode": "0x101E0",
"EventName": "PM_MRK_INST_DISP",
"BriefDescription": "The thread has dispatched a randomly sampled marked instruction"
},
- {,
+ {
"EventCode": "0x1D058",
"EventName": "PM_DARQ0_10_12_ENTRIES",
"BriefDescription": "Cycles in which 10 or more DARQ entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x300FE",
"EventName": "PM_DATA_FROM_L3MISS",
"BriefDescription": "Demand LD - L3 Miss (not L2 hit and not L3 hit)"
},
- {,
+ {
"EventCode": "0x30006",
"EventName": "PM_CMPLU_STALL_OTHER_CMPL",
"BriefDescription": "Instructions the core completed while this tread was stalled"
},
- {,
+ {
"EventCode": "0x1005C",
"EventName": "PM_CMPLU_STALL_DP",
"BriefDescription": "Finish stall because the NTF instruction was a scalar instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format. Not qualified multicycle. Qualified by NOT vector"
},
- {,
+ {
"EventCode": "0x1E042",
"EventName": "PM_DPTEG_FROM_L2",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1016E",
"EventName": "PM_MRK_BR_CMPL",
"BriefDescription": "Branch Instruction completed"
},
- {,
+ {
"EventCode": "0x2013A",
"EventName": "PM_MRK_BRU_FIN",
"BriefDescription": "bru marked instr finish"
},
- {,
+ {
"EventCode": "0x4F05E",
"EventName": "PM_RADIX_PWC_L3_PTE_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was reloaded to a level 3 page walk cache from beyond the core's L3 data cache. This implies that a level 4 PWC access was not necessary for this translation. The source could be local/remote/distant memory or another core's cache"
},
- {,
+ {
"EventCode": "0x400FC",
"EventName": "PM_ITLB_MISS",
"BriefDescription": "ITLB Reloaded. Counts 1 per ITLB miss for HPT but multiple for radix depending on number of levels traveresed"
},
- {,
+ {
"EventCode": "0x1E044",
"EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D05A",
"EventName": "PM_NON_MATH_FLOP_CMPL",
"BriefDescription": "Non FLOP operation completed"
},
- {,
+ {
"EventCode": "0x101E2",
"EventName": "PM_MRK_BR_TAKEN_CMPL",
"BriefDescription": "Marked Branch Taken completed"
},
- {,
+ {
"EventCode": "0x3E158",
"EventName": "PM_MRK_STCX_FAIL",
"BriefDescription": "marked stcx failed"
},
- {,
+ {
"EventCode": "0x1C048",
"EventName": "PM_DATA_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x1C054",
"EventName": "PM_DATA_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for a demand load"
},
- {,
+ {
"EventCode": "0x4405E",
"EventName": "PM_DARQ_STORE_REJECT",
"BriefDescription": "The DARQ attempted to transmit a store into an LSAQ or SRQ entry but It was rejected. Divide by PM_DARQ_STORE_XMIT to get reject ratio"
},
- {,
+ {
"EventCode": "0x1C042",
"EventName": "PM_DATA_FROM_L2",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a demand load"
},
- {,
+ {
"EventCode": "0x1D14C",
"EventName": "PM_MRK_DATA_FROM_LL4",
"BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a marked load"
},
- {,
+ {
"EventCode": "0x1006C",
"EventName": "PM_RUN_CYC_ST_MODE",
"BriefDescription": "Cycles run latch is set and core is in ST mode"
},
- {,
+ {
"EventCode": "0x3C044",
"EventName": "PM_DATA_FROM_L31_ECO_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x4C052",
"EventName": "PM_DATA_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for a demand load"
},
- {,
+ {
"EventCode": "0x20050",
"EventName": "PM_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x1F150",
"EventName": "PM_MRK_ST_L2DISP_TO_CMPL_CYC",
"BriefDescription": "cycles from L2 rc disp to l2 rc completion"
},
- {,
+ {
"EventCode": "0x4505A",
"EventName": "PM_SP_FLOP_CMPL",
"BriefDescription": "SP instruction completed"
},
- {,
+ {
"EventCode": "0x4000A",
"EventName": "PM_ISQ_36_44_ENTRIES",
"BriefDescription": "Cycles in which 36 or more Issue Queue entries are in use. This is a shared event, not per thread. There are 44 issue queue entries across 4 slices in the whole core"
},
- {,
+ {
"EventCode": "0x2C12E",
"EventName": "PM_MRK_DATA_FROM_LL4_CYC",
"BriefDescription": "Duration in cycles to reload from the local chip's L4 cache due to a marked load"
},
- {,
+ {
"EventCode": "0x2C058",
"EventName": "PM_MEM_PREF",
"BriefDescription": "Memory prefetch for this thread. Includes L4"
},
- {,
+ {
"EventCode": "0x40012",
"EventName": "PM_L1_ICACHE_RELOADED_ALL",
"BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch"
},
- {,
+ {
"EventCode": "0x3003C",
"EventName": "PM_CMPLU_STALL_NESTED_TEND",
"BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tend and decrement the TEXASR nested level. This is a short delay"
},
- {,
+ {
"EventCode": "0x3D05C",
"EventName": "PM_DISP_HELD_HB_FULL",
"BriefDescription": "Dispatch held due to History Buffer full. Could be GPR/VSR/VMR/FPR/CR/XVF; CR; XVF (XER/VSCR/FPSCR)"
},
- {,
+ {
"EventCode": "0x30052",
"EventName": "PM_SYS_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x2E044",
"EventName": "PM_DPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x34048",
"EventName": "PM_INST_FROM_DL2L3_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x45042",
"EventName": "PM_IPTEG_FROM_L3",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a instruction side request"
},
- {,
+ {
"EventCode": "0x15042",
"EventName": "PM_IPTEG_FROM_L2",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a instruction side request"
},
- {,
+ {
"EventCode": "0x1C05E",
"EventName": "PM_MEM_LOC_THRESH_LSU_MED",
"BriefDescription": "Local memory above threshold for data prefetch"
},
- {,
+ {
"EventCode": "0x40134",
"EventName": "PM_MRK_INST_TIMEO",
"BriefDescription": "marked Instruction finish timeout (instruction lost)"
},
- {,
+ {
"EventCode": "0x1002C",
"EventName": "PM_L1_DCACHE_RELOADED_ALL",
"BriefDescription": "L1 data cache reloaded for demand. If MMCR1[16] is 1, prefetches will be included as well"
},
- {,
+ {
"EventCode": "0x30130",
"EventName": "PM_MRK_INST_FIN",
"BriefDescription": "marked instruction finished"
},
- {,
+ {
"EventCode": "0x1F14A",
"EventName": "PM_MRK_DPTEG_FROM_RL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x3504E",
"EventName": "PM_DARQ0_4_6_ENTRIES",
"BriefDescription": "Cycles in which 4, 5, or 6 DARQ entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x30064",
"EventName": "PM_DARQ_STORE_XMIT",
"BriefDescription": "The DARQ attempted to transmit a store into an LSAQ or SRQ entry. Includes rejects. Not qualified by thread, so it includes counts for the whole core"
},
- {,
+ {
"EventCode": "0x45046",
"EventName": "PM_IPTEG_FROM_L21_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x2C016",
"EventName": "PM_CMPLU_STALL_PASTE",
"BriefDescription": "Finish stall because the NTF instruction was a paste waiting for response from L2"
},
- {,
+ {
"EventCode": "0x24156",
"EventName": "PM_MRK_STCX_FIN",
"BriefDescription": "Number of marked stcx instructions finished. This includes instructions in the speculative path of a branch that may be flushed"
},
- {,
+ {
"EventCode": "0x15150",
"EventName": "PM_SYNC_MRK_PROBE_NOP",
"BriefDescription": "Marked probeNops which can cause synchronous interrupts"
},
- {,
+ {
"EventCode": "0x301E4",
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/memory.json b/tools/perf/pmu-events/arch/powerpc/power9/memory.json
index c3bb283e37e9..a3488f3ce429 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/memory.json
@@ -1,127 +1,127 @@
[
- {,
+ {
"EventCode": "0x3006E",
"EventName": "PM_NEST_REF_CLK",
"BriefDescription": "Multiply by 4 to obtain the number of PB cycles"
},
- {,
+ {
"EventCode": "0x20010",
"EventName": "PM_PMC1_OVERFLOW",
"BriefDescription": "Overflow from counter 1"
},
- {,
+ {
"EventCode": "0x2005A",
"EventName": "PM_DARQ1_7_9_ENTRIES",
"BriefDescription": "Cycles in which 7 to 9 DARQ1 entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x3C048",
"EventName": "PM_DATA_FROM_DL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load"
},
- {,
+ {
"EventCode": "0x10008",
"EventName": "PM_RUN_SPURR",
"BriefDescription": "Run SPURR"
},
- {,
+ {
"EventCode": "0x200F6",
"EventName": "PM_LSU_DERAT_MISS",
"BriefDescription": "DERAT Reloaded due to a DERAT miss"
},
- {,
+ {
"EventCode": "0x4C048",
"EventName": "PM_DATA_FROM_DL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load"
},
- {,
+ {
"EventCode": "0x1D15E",
"EventName": "PM_MRK_RUN_CYC",
"BriefDescription": "Run cycles in which a marked instruction is in the pipeline"
},
- {,
+ {
"EventCode": "0x4003E",
"EventName": "PM_LD_CMPL",
"BriefDescription": "count of Loads completed"
},
- {,
+ {
"EventCode": "0x4C042",
"EventName": "PM_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a demand load"
},
- {,
+ {
"EventCode": "0x4D02C",
"EventName": "PM_PMC1_REWIND",
"BriefDescription": "PMC1 rewind event"
},
- {,
+ {
"EventCode": "0x15158",
"EventName": "PM_SYNC_MRK_L2HIT",
"BriefDescription": "Marked L2 Hits that can throw a synchronous interrupt"
},
- {,
+ {
"EventCode": "0x3404A",
"EventName": "PM_INST_FROM_RMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x301E2",
"EventName": "PM_MRK_ST_CMPL",
"BriefDescription": "Marked store completed and sent to nest"
},
- {,
+ {
"EventCode": "0x1C050",
"EventName": "PM_DATA_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for a demand load"
},
- {,
+ {
"EventCode": "0x4C040",
"EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a demand load"
},
- {,
+ {
"EventCode": "0x2E05C",
"EventName": "PM_LSU_REJECT_ERAT_MISS",
"BriefDescription": "LSU Reject due to ERAT (up to 4 per cycles)"
},
- {,
+ {
"EventCode": "0x1000A",
"EventName": "PM_PMC3_REWIND",
"BriefDescription": "PMC3 rewind event. A rewind happens when a speculative event (such as latency or CPI stack) is selected on PMC3 and the stall reason or reload source did not match the one programmed in PMC3. When this occurs, the count in PMC3 will not change."
},
- {,
+ {
"EventCode": "0x3C058",
"EventName": "PM_LARX_FIN",
"BriefDescription": "Larx finished"
},
- {,
+ {
"EventCode": "0x1C040",
"EventName": "PM_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a demand load"
},
- {,
+ {
"EventCode": "0x2C040",
"EventName": "PM_DATA_FROM_L2_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to a demand load"
},
- {,
+ {
"EventCode": "0x2E05A",
"EventName": "PM_LRQ_REJECT",
"BriefDescription": "Internal LSU reject from LRQ. Rejects cause the load to go back to LRQ, but it stays contained within the LSU once it gets issued. This event counts the number of times the LRQ attempts to relaunch an instruction after a reject. Any load can suffer multiple rejects"
},
- {,
+ {
"EventCode": "0x2C05C",
"EventName": "PM_INST_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for an instruction fetch (demand only)"
},
- {,
+ {
"EventCode": "0x4D056",
"EventName": "PM_NON_FMA_FLOP_CMPL",
"BriefDescription": "Non FMA instruction completed"
},
- {,
+ {
"EventCode": "0x3E050",
"EventName": "PM_DARQ1_4_6_ENTRIES",
"BriefDescription": "Cycles in which 4, 5, or 6 DARQ1 entries (out of 12) are in use"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 62b864269623..3f69422c21f9 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -1,2335 +1,2335 @@
[
- {,
+ {
"EventCode": "0x3084",
"EventName": "PM_ISU1_ISS_HOLD_ALL",
"BriefDescription": "All ISU rejects"
},
- {,
+ {
"EventCode": "0xF880",
"EventName": "PM_SNOOP_TLBIE",
"BriefDescription": "TLBIE snoop"
},
- {,
+ {
"EventCode": "0x4088",
"EventName": "PM_IC_DEMAND_REQ",
"BriefDescription": "Demand Instruction fetch request"
},
- {,
+ {
"EventCode": "0x20A4",
"EventName": "PM_TM_TRESUME",
"BriefDescription": "TM resume instruction completed"
},
- {,
+ {
"EventCode": "0x40008",
"EventName": "PM_SRQ_EMPTY_CYC",
"BriefDescription": "Cycles in which the SRQ has at least one (out of four) empty slice"
},
- {,
+ {
"EventCode": "0x20064",
"EventName": "PM_IERAT_RELOAD_4K",
"BriefDescription": "IERAT reloaded (after a miss) for 4K pages"
},
- {,
+ {
"EventCode": "0x260B4",
"EventName": "PM_L3_P2_LCO_RTY",
"BriefDescription": "L3 initiated LCO received retry on port 2 (can try 4 times)"
},
- {,
+ {
"EventCode": "0x20006",
"EventName": "PM_DISP_HELD_ISSQ_FULL",
"BriefDescription": "Dispatch held due to Issue q full. Includes issue queue and branch queue"
},
- {,
+ {
"EventCode": "0x201E4",
"EventName": "PM_MRK_DATA_FROM_L3MISS",
"BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to a marked load"
},
- {,
+ {
"EventCode": "0x4E044",
"EventName": "PM_DPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x40B8",
"EventName": "PM_BR_MPRED_TAKEN_CR",
"BriefDescription": "A Conditional Branch that resolved to taken was mispredicted as not taken (due to the BHT Direction Prediction)."
},
- {,
+ {
"EventCode": "0xF8AC",
"EventName": "PM_DC_DEALLOC_NO_CONF",
"BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)"
},
- {,
+ {
"EventCode": "0xD090",
"EventName": "PM_LS0_DC_COLLISIONS",
"BriefDescription": "Read-write data cache collisions"
},
- {,
+ {
"EventCode": "0x40BC",
"EventName": "PM_THRD_PRIO_0_1_CYC",
"BriefDescription": "Cycles thread running at priority level 0 or 1"
},
- {,
+ {
"EventCode": "0x4C054",
"EventName": "PM_DERAT_MISS_16G_1G",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16G (hpt mode) or 1G (radix mode)"
},
- {,
+ {
"EventCode": "0x2084",
"EventName": "PM_FLUSH_HB_RESTORE_CYC",
"BriefDescription": "Cycles in which no new instructions can be dispatched to the ICT after a flush. History buffer recovery"
},
- {,
+ {
"EventCode": "0x4F054",
"EventName": "PM_RADIX_PWC_MISS",
"BriefDescription": "A radix translation attempt missed in the TLB and all levels of page walk cache."
},
- {,
+ {
"EventCode": "0x26882",
"EventName": "PM_L2_DC_INV",
"BriefDescription": "D-cache invalidates sent over the reload bus to the core"
},
- {,
+ {
"EventCode": "0x24048",
"EventName": "PM_INST_FROM_LMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0xD8B4",
"EventName": "PM_LSU0_LRQ_S0_VALID_CYC",
"BriefDescription": "Slot 0 of LRQ valid"
},
- {,
+ {
"EventCode": "0x2E052",
"EventName": "PM_TM_PASSED",
"BriefDescription": "Number of TM transactions that passed"
},
- {,
+ {
"EventCode": "0xF088",
"EventName": "PM_LSU0_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
- {,
+ {
"EventCode": "0x360B2",
"EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
"BriefDescription": "Prefetch scope predictor selected GS or NNS, but was wrong because scope was LNS"
},
- {,
+ {
"EventCode": "0x168A6",
"EventName": "PM_TM_CAM_OVERFLOW",
"BriefDescription": "L3 TM CAM is full when a L2 castout of TM_SC line occurs. Line is pushed to memory"
},
- {,
+ {
"EventCode": "0xE8B0",
"EventName": "PM_TEND_PEND_CYC",
"BriefDescription": "TEND latency per thread"
},
- {,
+ {
"EventCode": "0x4884",
"EventName": "PM_IBUF_FULL_CYC",
"BriefDescription": "Cycles No room in ibuff"
},
- {,
+ {
"EventCode": "0xD08C",
"EventName": "PM_LSU2_LDMX_FIN",
"BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
- {,
+ {
"EventCode": "0x300F8",
"EventName": "PM_TB_BIT_TRANS",
"BriefDescription": "timebase event"
},
- {,
+ {
"EventCode": "0x3C040",
"EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a demand load"
},
- {,
+ {
"EventCode": "0xE0BC",
"EventName": "PM_LS0_PTE_TABLEWALK_CYC",
"BriefDescription": "Cycles when a tablewalk is pending on this thread on table 0"
},
- {,
+ {
"EventCode": "0x3884",
"EventName": "PM_ISU3_ISS_HOLD_ALL",
"BriefDescription": "All ISU rejects"
},
- {,
+ {
"EventCode": "0x468A0",
"EventName": "PM_L3_PF_OFF_CHIP_MEM",
"BriefDescription": "L3 PF from Off chip memory"
},
- {,
+ {
"EventCode": "0x268AA",
"EventName": "PM_L3_P1_LCO_DATA",
"BriefDescription": "LCO sent with data port 1"
},
- {,
+ {
"EventCode": "0xE894",
"EventName": "PM_LSU1_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1"
},
- {,
+ {
"EventCode": "0x5888",
"EventName": "PM_IC_INVALIDATE",
"BriefDescription": "Ic line invalidated"
},
- {,
+ {
"EventCode": "0x2890",
"EventName": "PM_DISP_CLB_HELD_TLBIE",
"BriefDescription": "Dispatch Hold: Due to TLBIE"
},
- {,
+ {
"EventCode": "0x1001C",
"EventName": "PM_CMPLU_STALL_THRD",
"BriefDescription": "Completion Stalled because the thread was blocked"
},
- {,
+ {
"EventCode": "0x368A6",
"EventName": "PM_SNP_TM_HIT_T",
"BriefDescription": "TM snoop that is a store hits line in L3 in T, Tn or Te state (shared modified)"
},
- {,
+ {
"EventCode": "0x3001A",
"EventName": "PM_DATA_TABLEWALK_CYC",
"BriefDescription": "Data Tablewalk Cycles. Could be 1 or 2 active tablewalks. Includes data prefetches."
},
- {,
+ {
"EventCode": "0xD894",
"EventName": "PM_LS3_DC_COLLISIONS",
"BriefDescription": "Read-write data cache collisions"
},
- {,
+ {
"EventCode": "0x35158",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0xF0B4",
"EventName": "PM_DC_PREF_CONS_ALLOC",
"BriefDescription": "Prefetch stream allocated in the conservative phase by either the hardware prefetch mechanism or software prefetch. The sum of this pair subtracted from the total number of allocs will give the total allocs in normal phase"
},
- {,
+ {
"EventCode": "0xF894",
"EventName": "PM_LSU3_L1_CAM_CANCEL",
"BriefDescription": "ls3 l1 tm cam cancel"
},
- {,
+ {
"EventCode": "0x2888",
"EventName": "PM_FLUSH_DISP_TLBIE",
"BriefDescription": "Dispatch Flush: TLBIE"
},
- {,
+ {
"EventCode": "0x4E11E",
"EventName": "PM_MRK_DATA_FROM_DMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load"
},
- {,
+ {
"EventCode": "0x14156",
"EventName": "PM_MRK_DATA_FROM_L2_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load"
},
- {,
+ {
"EventCode": "0x468A6",
"EventName": "PM_RD_CLEARING_SC",
"BriefDescription": "Core TM load hits line in L3 in TM_SC state and causes it to be invalidated"
},
- {,
+ {
"EventCode": "0xD0B0",
"EventName": "PM_HWSYNC",
"BriefDescription": "A hwsync instruction was decoded and transferred"
},
- {,
+ {
"EventCode": "0x168B0",
"EventName": "PM_L3_P1_NODE_PUMP",
"BriefDescription": "L3 PF sent with nodal scope port 1, counts even retried requests"
},
- {,
+ {
"EventCode": "0xD0BC",
"EventName": "PM_LSU0_1_LRQF_FULL_CYC",
"BriefDescription": "Counts the number of cycles the LRQF is full. LRQF is the queue that holds loads between finish and completion. If it fills up, instructions stay in LRQ until completion, potentially backing up the LRQ"
},
- {,
+ {
"EventCode": "0x2D148",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x468AE",
"EventName": "PM_L3_P3_CO_RTY",
"BriefDescription": "L3 CO received retry port 3 (memory only), every retry counted"
},
- {,
+ {
"EventCode": "0x460A8",
"EventName": "PM_SN_HIT",
"BriefDescription": "Any port snooper hit L3. Up to 4 can happen in a cycle but we only count 1"
},
- {,
+ {
"EventCode": "0x360AA",
"EventName": "PM_L3_P0_CO_MEM",
"BriefDescription": "L3 CO to memory port 0 with or without data"
},
- {,
+ {
"EventCode": "0xF0A4",
"EventName": "PM_DC_PREF_HW_ALLOC",
"BriefDescription": "Prefetch stream allocated by the hardware prefetch mechanism"
},
- {,
+ {
"EventCode": "0xF0BC",
"EventName": "PM_LS2_UNALIGNED_ST",
"BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0xD0AC",
"EventName": "PM_SRQ_SYNC_CYC",
"BriefDescription": "A sync is in the S2Q (edge detect to count)"
},
- {,
+ {
"EventCode": "0x401E6",
"EventName": "PM_MRK_INST_FROM_L3MISS",
"BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet"
},
- {,
+ {
"EventCode": "0x58A8",
"EventName": "PM_DECODE_HOLD_ICT_FULL",
"BriefDescription": "Counts the number of cycles in which the IFU was not able to decode and transmit one or more instructions because all itags were in use. This means the ICT is full for this thread"
},
- {,
+ {
"EventCode": "0x26082",
"EventName": "PM_L2_IC_INV",
"BriefDescription": "I-cache Invalidates sent over the realod bus to the core"
},
- {,
+ {
"EventCode": "0xC8AC",
"EventName": "PM_LSU_FLUSH_RELAUNCH_MISS",
"BriefDescription": "If a load that has already returned data and has to relaunch for any reason then gets a miss (erat, setp, data cache), it will often be flushed at relaunch time because the data might be inconsistent"
},
- {,
+ {
"EventCode": "0x260A4",
"EventName": "PM_L3_LD_HIT",
"BriefDescription": "L3 Hits for demand LDs"
},
- {,
+ {
"EventCode": "0xF0A0",
"EventName": "PM_DATA_STORE",
"BriefDescription": "All ops that drain from s2q to L2 containing data"
},
- {,
+ {
"EventCode": "0x1D148",
"EventName": "PM_MRK_DATA_FROM_RMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a marked load"
},
- {,
+ {
"EventCode": "0x16088",
"EventName": "PM_L2_LOC_GUESS_CORRECT",
"BriefDescription": "L2 guess local (LNS) and guess was correct (ie data local)"
},
- {,
+ {
"EventCode": "0x160A4",
"EventName": "PM_L3_HIT",
"BriefDescription": "L3 Hits (L2 miss hitting L3, including data/instrn/xlate)"
},
- {,
+ {
"EventCode": "0xE09C",
"EventName": "PM_LSU0_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss"
},
- {,
+ {
"EventCode": "0x168B4",
"EventName": "PM_L3_P1_LCO_RTY",
"BriefDescription": "L3 initiated LCO received retry on port 1 (can try 4 times)"
},
- {,
+ {
"EventCode": "0x268AC",
"EventName": "PM_L3_RD_USAGE",
"BriefDescription": "Rotating sample of 16 RD actives"
},
- {,
+ {
"EventCode": "0x1415C",
"EventName": "PM_MRK_DATA_FROM_L3_MEPF_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 without dispatch conflicts hit on Mepf state due to a marked load"
},
- {,
+ {
"EventCode": "0xE880",
"EventName": "PM_L1_SW_PREF",
"BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches"
},
- {,
+ {
"EventCode": "0x288C",
"EventName": "PM_DISP_CLB_HELD_BAL",
"BriefDescription": "Dispatch/CLB Hold: Balance Flush"
},
- {,
+ {
"EventCode": "0x101EA",
"EventName": "PM_MRK_L1_RELOAD_VALID",
"BriefDescription": "Marked demand reload"
},
- {,
+ {
"EventCode": "0x1D156",
"EventName": "PM_MRK_LD_MISS_L1_CYC",
"BriefDescription": "Marked ld latency"
},
- {,
+ {
"EventCode": "0x4C01A",
"EventName": "PM_CMPLU_STALL_DMISS_L3MISS",
"BriefDescription": "Completion stall due to cache miss resolving missed the L3"
},
- {,
+ {
"EventCode": "0x2006C",
"EventName": "PM_RUN_CYC_SMT4_MODE",
"BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT4 mode"
},
- {,
+ {
"EventCode": "0x1D14E",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load"
},
- {,
+ {
"EventCode": "0xF888",
"EventName": "PM_LSU1_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
- {,
+ {
"EventCode": "0xC098",
"EventName": "PM_LS2_UNALIGNED_LD",
"BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0x20058",
"EventName": "PM_DARQ1_10_12_ENTRIES",
"BriefDescription": "Cycles in which 10 or more DARQ1 entries (out of 12) are in use"
},
- {,
+ {
"EventCode": "0x360A6",
"EventName": "PM_SNP_TM_HIT_M",
"BriefDescription": "TM snoop that is a store hits line in L3 in M or Mu state (exclusive modified)"
},
- {,
+ {
"EventCode": "0x5898",
"EventName": "PM_LINK_STACK_INVALID_PTR",
"BriefDescription": "It is most often caused by certain types of flush where the pointer is not available. Can result in the data in the link stack becoming unusable."
},
- {,
+ {
"EventCode": "0x46088",
"EventName": "PM_L2_CHIP_PUMP",
"BriefDescription": "RC requests that were local (aka chip) pump attempts"
},
- {,
+ {
"EventCode": "0x28A0",
"EventName": "PM_TM_TSUSPEND",
"BriefDescription": "TM suspend instruction completed"
},
- {,
+ {
"EventCode": "0x20054",
"EventName": "PM_L1_PREF",
"BriefDescription": "A data line was written to the L1 due to a hardware or software prefetch"
},
- {,
+ {
"EventCode": "0x2608E",
"EventName": "PM_TM_LD_CONF",
"BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)"
},
- {,
+ {
"EventCode": "0x1D144",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x400FA",
"EventName": "PM_RUN_INST_CMPL",
"BriefDescription": "Run_Instructions"
},
- {,
+ {
"EventCode": "0x15154",
"EventName": "PM_SYNC_MRK_L3MISS",
"BriefDescription": "Marked L3 misses that can throw a synchronous interrupt"
},
- {,
+ {
"EventCode": "0xE0B4",
"EventName": "PM_LS0_TM_DISALLOW",
"BriefDescription": "A TM-ineligible instruction tries to execute inside a transaction and the LSU disallows it"
},
- {,
+ {
"EventCode": "0x26884",
"EventName": "PM_DSIDE_MRU_TOUCH",
"BriefDescription": "D-side L2 MRU touch commands sent to the L2"
},
- {,
+ {
"EventCode": "0x30134",
"EventName": "PM_MRK_ST_CMPL_INT",
"BriefDescription": "marked store finished with intervention"
},
- {,
+ {
"EventCode": "0xC0B8",
"EventName": "PM_LSU_FLUSH_SAO",
"BriefDescription": "A load-hit-load condition with Strong Address Ordering will have address compare disabled and flush"
},
- {,
+ {
"EventCode": "0x50A8",
"EventName": "PM_EAT_FORCE_MISPRED",
"BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issued"
},
- {,
+ {
"EventCode": "0xC094",
"EventName": "PM_LS0_UNALIGNED_LD",
"BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0xF8BC",
"EventName": "PM_LS3_UNALIGNED_ST",
"BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0x460AE",
"EventName": "PM_L3_P2_CO_RTY",
"BriefDescription": "L3 CO received retry port 2 (memory only), every retry counted"
},
- {,
+ {
"EventCode": "0x58B0",
"EventName": "PM_BTAC_GOOD_RESULT",
"BriefDescription": "BTAC predicts a taken branch and the BHT agrees, and the target address is correct"
},
- {,
+ {
"EventCode": "0x1C04C",
"EventName": "PM_DATA_FROM_LL4",
"BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a demand load"
},
- {,
+ {
"EventCode": "0x3608E",
"EventName": "PM_TM_ST_CONF",
"BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)"
},
- {,
+ {
"EventCode": "0xF8A0",
"EventName": "PM_NON_DATA_STORE",
"BriefDescription": "All ops that drain from s2q to L2 and contain no data"
},
- {,
+ {
"EventCode": "0x3F146",
"EventName": "PM_MRK_DPTEG_FROM_L21_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x40A0",
"EventName": "PM_BR_UNCOND",
"BriefDescription": "Unconditional Branch Completed. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was covenrted to a Resolve."
},
- {,
+ {
"EventCode": "0xF8A8",
"EventName": "PM_DC_PREF_FUZZY_CONF",
"BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)"
},
- {,
+ {
"EventCode": "0xF8A4",
"EventName": "PM_DC_PREF_SW_ALLOC",
"BriefDescription": "Prefetch stream allocated by software prefetching"
},
- {,
+ {
"EventCode": "0xE0A0",
"EventName": "PM_LSU2_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss"
},
- {,
+ {
"EventCode": "0xC880",
"EventName": "PM_LS1_LD_VECTOR_FIN",
"BriefDescription": "LS1 finished load vector op"
},
- {,
+ {
"EventCode": "0x2894",
"EventName": "PM_TM_OUTER_TEND",
"BriefDescription": "Completion time outer tend"
},
- {,
+ {
"EventCode": "0xF098",
"EventName": "PM_XLATE_HPT_MODE",
"BriefDescription": "LSU reports every cycle the thread is in HPT translation mode (as opposed to radix mode)"
},
- {,
+ {
"EventCode": "0x2C04E",
"EventName": "PM_LD_MISS_L1_FIN",
"BriefDescription": "Number of load instructions that finished with an L1 miss. Note that even if a load spans multiple slices this event will increment only once per load op."
},
- {,
+ {
"EventCode": "0x30162",
"EventName": "PM_MRK_LSU_DERAT_MISS",
"BriefDescription": "Marked derat reload (miss) for any page size"
},
- {,
+ {
"EventCode": "0x160A0",
"EventName": "PM_L3_PF_MISS_L3",
"BriefDescription": "L3 PF missed in L3"
},
- {,
+ {
"EventCode": "0x1C04A",
"EventName": "PM_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load"
},
- {,
+ {
"EventCode": "0x268B0",
"EventName": "PM_L3_P1_GRP_PUMP",
"BriefDescription": "L3 PF sent with grp scope port 1, counts even retried requests"
},
- {,
+ {
"EventCode": "0x30016",
"EventName": "PM_CMPLU_STALL_SRQ_FULL",
"BriefDescription": "Finish stall because the NTF instruction was a store that was held in LSAQ because the SRQ was full"
},
- {,
+ {
"EventCode": "0x40B4",
"EventName": "PM_BR_PRED_TA",
"BriefDescription": "Conditional Branch Completed that had its target address predicted. Only XL-form branches set this event. This equal the sum of CCACHE, LSTACK, and PCACHE"
},
- {,
+ {
"EventCode": "0x40AC",
"EventName": "PM_BR_MPRED_CCACHE",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Count Cache Target Prediction"
},
- {,
+ {
"EventCode": "0x3688A",
"EventName": "PM_L2_RTY_LD",
"BriefDescription": "RC retries on PB for any load from core (excludes DCBFs)"
},
- {,
+ {
"EventCode": "0xE08C",
"EventName": "PM_LSU0_ERAT_HIT",
"BriefDescription": "Primary ERAT hit. There is no secondary ERAT"
},
- {,
+ {
"EventCode": "0xE088",
"EventName": "PM_LS2_ERAT_MISS_PREF",
"BriefDescription": "LS0 Erat miss due to prefetch"
},
- {,
+ {
"EventCode": "0xF0A8",
"EventName": "PM_DC_PREF_CONF",
"BriefDescription": "A demand load referenced a line in an active prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software. Includes forwards and backwards streams"
},
- {,
+ {
"EventCode": "0x16888",
"EventName": "PM_L2_LOC_GUESS_WRONG",
"BriefDescription": "L2 guess local (LNS) and guess was not correct (ie data not on chip)"
},
- {,
+ {
"EventCode": "0xC888",
"EventName": "PM_LSU_DTLB_MISS_64K",
"BriefDescription": "Data TLB Miss page size 64K"
},
- {,
+ {
"EventCode": "0xE0A4",
"EventName": "PM_TMA_REQ_L2",
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding"
},
- {,
+ {
"EventCode": "0xC088",
"EventName": "PM_LSU_DTLB_MISS_4K",
"BriefDescription": "Data TLB Miss page size 4K"
},
- {,
+ {
"EventCode": "0x3C042",
"EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load"
},
- {,
+ {
"EventCode": "0x168AA",
"EventName": "PM_L3_P1_LCO_NO_DATA",
"BriefDescription": "Dataless L3 LCO sent port 1"
},
- {,
+ {
"EventCode": "0x3D140",
"EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 with dispatch conflict due to a marked load"
},
- {,
+ {
"EventCode": "0xC89C",
"EventName": "PM_LS1_LAUNCH_HELD_PREF",
"BriefDescription": "Number of times a load or store instruction was unable to launch/relaunch because a high priority prefetch used that relaunch cycle"
},
- {,
+ {
"EventCode": "0x4894",
"EventName": "PM_IC_RELOAD_PRIVATE",
"BriefDescription": "Reloading line was brought in private for a specific thread. Most lines are brought in shared for all eight threads. If RA does not match then invalidates and then brings it shared to other thread. In P7 line brought in private , then line was invalidat"
},
- {,
+ {
"EventCode": "0x1688E",
"EventName": "PM_TM_LD_CAUSED_FAIL",
"BriefDescription": "Non-TM Load caused any thread to fail"
},
- {,
+ {
"EventCode": "0x26084",
"EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
"BriefDescription": "All D-side-Ld or I-side-instruction-fetch dispatch attempts for this thread that failed due to reasons other than an address collision conflicts with an L2 machines (e.g. Read-Claim/Snoop machine not available)"
},
- {,
+ {
"EventCode": "0x101E4",
"EventName": "PM_MRK_L1_ICACHE_MISS",
"BriefDescription": "sampled Instruction suffered an icache Miss"
},
- {,
+ {
"EventCode": "0x20A0",
"EventName": "PM_TM_NESTED_TBEGIN",
"BriefDescription": "Completion Tm nested tbegin"
},
- {,
+ {
"EventCode": "0x368AA",
"EventName": "PM_L3_P1_CO_MEM",
"BriefDescription": "L3 CO to memory port 1 with or without data"
},
- {,
+ {
"EventCode": "0xC8A4",
"EventName": "PM_LSU3_FALSE_LHS",
"BriefDescription": "False LHS match detected"
},
- {,
+ {
"EventCode": "0xF0B0",
"EventName": "PM_L3_LD_PREF",
"BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
},
- {,
+ {
"EventCode": "0x4D012",
"EventName": "PM_PMC3_SAVED",
"BriefDescription": "PMC3 Rewind Value saved"
},
- {,
+ {
"EventCode": "0xE888",
"EventName": "PM_LS3_ERAT_MISS_PREF",
"BriefDescription": "LS1 Erat miss due to prefetch"
},
- {,
+ {
"EventCode": "0x368B4",
"EventName": "PM_L3_RD0_BUSY",
"BriefDescription": "Lifetime, sample of RD machine 0 valid"
},
- {,
+ {
"EventCode": "0x46080",
"EventName": "PM_L2_DISP_ALL_L2MISS",
"BriefDescription": "All successful D-side-Ld/St or I-side-instruction-fetch dispatches for this thread that were an L2 miss"
},
- {,
+ {
"EventCode": "0xF8B8",
"EventName": "PM_LS1_UNALIGNED_ST",
"BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0x408C",
"EventName": "PM_L1_DEMAND_WRITE",
"BriefDescription": "Instruction Demand sectors written into IL1"
},
- {,
+ {
"EventCode": "0x368A8",
"EventName": "PM_SN_INVL",
"BriefDescription": "Any port snooper detects a store to a line in the Sx state and invalidates the line. Up to 4 can happen in a cycle but we only count 1"
},
- {,
+ {
"EventCode": "0x160B2",
"EventName": "PM_L3_LOC_GUESS_CORRECT",
"BriefDescription": "Prefetch scope predictor selected LNS and was correct"
},
- {,
+ {
"EventCode": "0x48B4",
"EventName": "PM_DECODE_FUSION_CONST_GEN",
"BriefDescription": "32-bit constant generation"
},
- {,
+ {
"EventCode": "0x4D146",
"EventName": "PM_MRK_DATA_FROM_L21_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0xE080",
"EventName": "PM_S2Q_FULL",
"BriefDescription": "Cycles during which the S2Q is full"
},
- {,
+ {
"EventCode": "0x268B4",
"EventName": "PM_L3_P3_LCO_RTY",
"BriefDescription": "L3 initiated LCO received retry on port 3 (can try 4 times)"
},
- {,
+ {
"EventCode": "0xD8B8",
"EventName": "PM_LSU0_LMQ_S0_VALID",
"BriefDescription": "Slot 0 of LMQ valid"
},
- {,
+ {
"EventCode": "0x2098",
"EventName": "PM_TM_NESTED_TEND",
"BriefDescription": "Completion time nested tend"
},
- {,
+ {
"EventCode": "0x368A0",
"EventName": "PM_L3_PF_OFF_CHIP_CACHE",
"BriefDescription": "L3 PF from Off chip cache"
},
- {,
+ {
"EventCode": "0x20056",
"EventName": "PM_TAKEN_BR_MPRED_CMPL",
"BriefDescription": "Total number of taken branches that were incorrectly predicted as not-taken. This event counts branches completed and does not include speculative instructions"
},
- {,
+ {
"EventCode": "0x4688A",
"EventName": "PM_L2_SYS_PUMP",
"BriefDescription": "RC requests that were system pump attempts"
},
- {,
+ {
"EventCode": "0xE090",
"EventName": "PM_LSU2_ERAT_HIT",
"BriefDescription": "Primary ERAT hit. There is no secondary ERAT"
},
- {,
+ {
"EventCode": "0x4001C",
"EventName": "PM_INST_IMC_MATCH_CMPL",
"BriefDescription": "IMC Match Count"
},
- {,
+ {
"EventCode": "0x40A8",
"EventName": "PM_BR_PRED_LSTACK",
"BriefDescription": "Conditional Branch Completed that used the Link Stack for Target Prediction"
},
- {,
+ {
"EventCode": "0x268A2",
"EventName": "PM_L3_CI_MISS",
"BriefDescription": "L3 castins miss (total count)"
},
- {,
+ {
"EventCode": "0x289C",
"EventName": "PM_TM_NON_FAV_TBEGIN",
"BriefDescription": "Dispatch time non favored tbegin"
},
- {,
+ {
"EventCode": "0xF08C",
"EventName": "PM_LSU2_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
- {,
+ {
"EventCode": "0x360A0",
"EventName": "PM_L3_PF_ON_CHIP_CACHE",
"BriefDescription": "L3 PF from On chip cache"
},
- {,
+ {
"EventCode": "0x35152",
"EventName": "PM_MRK_DATA_FROM_L2MISS_CYC",
"BriefDescription": "Duration in cycles to reload from a location other than the local core's L2 due to a marked load"
},
- {,
+ {
"EventCode": "0x160AC",
"EventName": "PM_L3_SN_USAGE",
"BriefDescription": "Rotating sample of 16 snoop valids"
},
- {,
+ {
"EventCode": "0x1608C",
"EventName": "PM_RC0_BUSY",
"BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC lifetime (mach0 used as sample point)"
},
- {,
+ {
"EventCode": "0x36082",
"EventName": "PM_L2_LD_DISP",
"BriefDescription": "All successful D-side-Ld or I-side-instruction-fetch dispatches for this thread"
},
- {,
+ {
"EventCode": "0xF8B0",
"EventName": "PM_L3_SW_PREF",
"BriefDescription": "L3 load prefetch, sourced from a software prefetch stream, was sent to the nest"
},
- {,
+ {
"EventCode": "0xF884",
"EventName": "PM_TABLEWALK_CYC_PREF",
"BriefDescription": "tablewalk qualified for pte prefetches"
},
- {,
+ {
"EventCode": "0x4D144",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x16884",
"EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
"BriefDescription": "All D-side-Ld or I-side-instruction-fetch dispatch attempts for this thread that failed due to an address collision conflicts with an L2 machines already working on this line (e.g. ld-hit-stq or Read-claim/Castout/Snoop machines)"
},
- {,
+ {
"EventCode": "0x460A0",
"EventName": "PM_L3_PF_ON_CHIP_MEM",
"BriefDescription": "L3 PF from On chip memory"
},
- {,
+ {
"EventCode": "0xF084",
"EventName": "PM_PTE_PREFETCH",
"BriefDescription": "PTE prefetches"
},
- {,
+ {
"EventCode": "0x2D026",
"EventName": "PM_RADIX_PWC_L1_PDE_FROM_L2",
"BriefDescription": "A Page Directory Entry was reloaded to a level 1 page walk cache from the core's L2 data cache"
},
- {,
+ {
"EventCode": "0x48B0",
"EventName": "PM_BR_MPRED_PCACHE",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to pattern cache prediction"
},
- {,
+ {
"EventCode": "0x2C126",
"EventName": "PM_MRK_DATA_FROM_L2",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a marked load"
},
- {,
+ {
"EventCode": "0xE0AC",
"EventName": "PM_TM_FAIL_TLBIE",
"BriefDescription": "Transaction failed because there was a TLBIE hit in the bloom filter"
},
- {,
+ {
"EventCode": "0x260AA",
"EventName": "PM_L3_P0_LCO_DATA",
"BriefDescription": "LCO sent with data port 0"
},
- {,
+ {
"EventCode": "0x4888",
"EventName": "PM_IC_PREF_REQ",
"BriefDescription": "Instruction prefetch requests"
},
- {,
+ {
"EventCode": "0xC898",
"EventName": "PM_LS3_UNALIGNED_LD",
"BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0x488C",
"EventName": "PM_IC_PREF_WRITE",
"BriefDescription": "Instruction prefetch written into IL1"
},
- {,
+ {
"EventCode": "0xF89C",
"EventName": "PM_XLATE_MISS",
"BriefDescription": "The LSU requested a line from L2 for translation. It may be satisfied from any source beyond L2. Includes speculative instructions. Includes instruction, prefetch and demand"
},
- {,
+ {
"EventCode": "0x14158",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 without conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x35156",
"EventName": "PM_MRK_DATA_FROM_L31_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0xC88C",
"EventName": "PM_LSU_DTLB_MISS_16G_1G",
"BriefDescription": "Data TLB Miss page size 16G (HPT) or 1G (Radix)"
},
- {,
+ {
"EventCode": "0x268A6",
"EventName": "PM_TM_RST_SC",
"BriefDescription": "TM snoop hits line in L3 that is TM_SC state and causes it to be invalidated"
},
- {,
+ {
"EventCode": "0x468A4",
"EventName": "PM_L3_TRANS_PF",
"BriefDescription": "L3 Transient prefetch received from L2"
},
- {,
+ {
"EventCode": "0x4094",
"EventName": "PM_IC_PREF_CANCEL_L2",
"BriefDescription": "L2 Squashed a demand or prefetch request"
},
- {,
+ {
"EventCode": "0x48AC",
"EventName": "PM_BR_MPRED_LSTACK",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Link Stack Target Prediction"
},
- {,
+ {
"EventCode": "0xE88C",
"EventName": "PM_LSU1_ERAT_HIT",
"BriefDescription": "Primary ERAT hit. There is no secondary ERAT"
},
- {,
+ {
"EventCode": "0xC0B4",
"EventName": "PM_LSU_FLUSH_WRK_ARND",
"BriefDescription": "LSU workaround flush. These flushes are setup with programmable scan only latches to perform various actions when the flush macro receives a trigger from the dbg macros. These actions include things like flushing the next op encountered for a particular thread or flushing the next op that is NTC op that is encountered on a particular slice. The kind of flush that the workaround is setup to perform is highly variable."
},
- {,
+ {
"EventCode": "0x34054",
"EventName": "PM_PARTIAL_ST_FIN",
"BriefDescription": "Any store finished by an LSU slice"
},
- {,
+ {
"EventCode": "0x5880",
"EventName": "PM_THRD_PRIO_6_7_CYC",
"BriefDescription": "Cycles thread running at priority level 6 or 7"
},
- {,
+ {
"EventCode": "0x4898",
"EventName": "PM_IC_DEMAND_L2_BR_REDIRECT",
"BriefDescription": "L2 I cache demand request due to branch Mispredict ( 15 cycle path)"
},
- {,
+ {
"EventCode": "0x4880",
"EventName": "PM_BANK_CONFLICT",
"BriefDescription": "Read blocked due to interleave conflict. The ifar logic will detect an interleave conflict and kill the data that was read that cycle."
},
- {,
+ {
"EventCode": "0x360B0",
"EventName": "PM_L3_P0_SYS_PUMP",
"BriefDescription": "L3 PF sent with sys scope port 0, counts even retried requests"
},
- {,
+ {
"EventCode": "0x3006A",
"EventName": "PM_IERAT_RELOAD_64K",
"BriefDescription": "IERAT Reloaded (Miss) for a 64k page"
},
- {,
+ {
"EventCode": "0xD8BC",
"EventName": "PM_LSU2_3_LRQF_FULL_CYC",
"BriefDescription": "Counts the number of cycles the LRQF is full. LRQF is the queue that holds loads between finish and completion. If it fills up, instructions stay in LRQ until completion, potentially backing up the LRQ"
},
- {,
+ {
"EventCode": "0x46086",
"EventName": "PM_L2_SN_M_RD_DONE",
"BriefDescription": "Snoop dispatched for a read and was M (true M)"
},
- {,
+ {
"EventCode": "0x40154",
"EventName": "PM_MRK_FAB_RSP_BKILL",
"BriefDescription": "Marked store had to do a bkill"
},
- {,
+ {
"EventCode": "0xF094",
"EventName": "PM_LSU2_L1_CAM_CANCEL",
"BriefDescription": "ls2 l1 tm cam cancel"
},
- {,
+ {
"EventCode": "0x2D014",
"EventName": "PM_CMPLU_STALL_LRQ_FULL",
"BriefDescription": "Finish stall because the NTF instruction was a load that was held in LSAQ (load-store address queue) because the LRQ (load-reorder queue) was full"
},
- {,
+ {
"EventCode": "0x3E05E",
"EventName": "PM_L3_CO_MEPF",
"BriefDescription": "L3 CO of line in Mep state (includes casthrough to memory). The Mepf state indicates that a line was brought in to satisfy an L3 prefetch request"
},
- {,
+ {
"EventCode": "0x460A2",
"EventName": "PM_L3_LAT_CI_HIT",
"BriefDescription": "L3 Lateral Castins Hit"
},
- {,
+ {
"EventCode": "0x3D14E",
"EventName": "PM_MRK_DATA_FROM_DL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0x3D15E",
"EventName": "PM_MULT_MRK",
"BriefDescription": "mult marked instr"
},
- {,
+ {
"EventCode": "0x4084",
"EventName": "PM_EAT_FULL_CYC",
"BriefDescription": "Cycles No room in EAT"
},
- {,
+ {
"EventCode": "0x5098",
"EventName": "PM_LINK_STACK_WRONG_ADD_PRED",
"BriefDescription": "Link stack predicts wrong address, because of link stack design limitation or software violating the coding conventions"
},
- {,
+ {
"EventCode": "0x2C050",
"EventName": "PM_DATA_GRP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for a demand load"
},
- {,
+ {
"EventCode": "0xC0A4",
"EventName": "PM_LSU2_FALSE_LHS",
"BriefDescription": "False LHS match detected"
},
- {,
+ {
"EventCode": "0x58A0",
"EventName": "PM_LINK_STACK_CORRECT",
"BriefDescription": "Link stack predicts right address"
},
- {,
+ {
"EventCode": "0x36886",
"EventName": "PM_L2_SN_SX_I_DONE",
"BriefDescription": "Snoop dispatched and went from Sx to Ix"
},
- {,
+ {
"EventCode": "0x4E04A",
"EventName": "PM_DPTEG_FROM_OFF_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2C12C",
"EventName": "PM_MRK_DATA_FROM_DL4_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's L4 on a different Node or Group (Distant) due to a marked load"
},
- {,
+ {
"EventCode": "0x4080",
"EventName": "PM_INST_FROM_L1",
"BriefDescription": "Instruction fetches from L1. L1 instruction hit"
},
- {,
+ {
"EventCode": "0xE898",
"EventName": "PM_LSU3_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1"
},
- {,
+ {
"EventCode": "0x260A0",
"EventName": "PM_L3_CO_MEM",
"BriefDescription": "L3 CO to memory OR of port 0 and 1 (lossy = may undercount if two cresp come in the same cyc)"
},
- {,
+ {
"EventCode": "0x16082",
"EventName": "PM_L2_CASTOUT_MOD",
"BriefDescription": "L2 Castouts - Modified (M,Mu,Me)"
},
- {,
+ {
"EventCode": "0xC09C",
"EventName": "PM_LS0_LAUNCH_HELD_PREF",
"BriefDescription": "Number of times a load or store instruction was unable to launch/relaunch because a high priority prefetch used that relaunch cycle"
},
- {,
+ {
"EventCode": "0xC8B8",
"EventName": "PM_LSU_FLUSH_LARX_STCX",
"BriefDescription": "A larx is flushed because an older larx has an LMQ reservation for the same thread. A stcx is flushed because an older stcx is in the LMQ. The flush happens when the older larx/stcx relaunches"
},
- {,
+ {
"EventCode": "0x260A6",
"EventName": "PM_NON_TM_RST_SC",
"BriefDescription": "Non-TM snoop hits line in L3 that is TM_SC state and causes it to be invalidated"
},
- {,
+ {
"EventCode": "0x3608A",
"EventName": "PM_L2_RTY_ST",
"BriefDescription": "RC retries on PB for any store from core (excludes DCBFs)"
},
- {,
+ {
"EventCode": "0x24040",
"EventName": "PM_INST_FROM_L2_MEPF",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x209C",
"EventName": "PM_TM_FAV_TBEGIN",
"BriefDescription": "Dispatch time Favored tbegin"
},
- {,
+ {
"EventCode": "0x2D01E",
"EventName": "PM_ICT_NOSLOT_DISP_HELD_ISSQ",
"BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full"
},
- {,
+ {
"EventCode": "0x50A4",
"EventName": "PM_FLUSH_MPRED",
"BriefDescription": "Branch mispredict flushes. Includes target and address misprecition"
},
- {,
+ {
"EventCode": "0x1504C",
"EventName": "PM_IPTEG_FROM_LL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a instruction side request"
},
- {,
+ {
"EventCode": "0x268A4",
"EventName": "PM_L3_LD_MISS",
"BriefDescription": "L3 Misses for demand LDs"
},
- {,
+ {
"EventCode": "0x26088",
"EventName": "PM_L2_GRP_GUESS_CORRECT",
"BriefDescription": "L2 guess grp (GS or NNS) and guess was correct (data intra-group AND ^on-chip)"
},
- {,
+ {
"EventCode": "0xD088",
"EventName": "PM_LSU0_LDMX_FIN",
"BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
- {,
+ {
"EventCode": "0xE8B4",
"EventName": "PM_LS1_TM_DISALLOW",
"BriefDescription": "A TM-ineligible instruction tries to execute inside a transaction and the LSU disallows it"
},
- {,
+ {
"EventCode": "0x1688C",
"EventName": "PM_RC_USAGE",
"BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
},
- {,
+ {
"EventCode": "0x3F054",
"EventName": "PM_RADIX_PWC_L4_PTE_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was reloaded to a level 4 page walk cache from beyond the core's L3 data cache. This is the deepest level of PWC possible for a translation. The source could be local/remote/distant memory or another core's cache"
},
- {,
+ {
"EventCode": "0x2608A",
"EventName": "PM_ISIDE_DISP_FAIL_ADDR",
"BriefDescription": "All I-side-instruction-fetch dispatch attempts for this thread that failed due to an address collision conflict with an L2 machine already working on this line (e.g. ld-hit-stq or RC/CO/SN machines)"
},
- {,
+ {
"EventCode": "0x50B4",
"EventName": "PM_TAGE_CORRECT_TAKEN_CMPL",
"BriefDescription": "The TAGE overrode BHT direction prediction and it was correct. Counted at completion for taken branches only"
},
- {,
+ {
"EventCode": "0x2090",
"EventName": "PM_DISP_CLB_HELD_SB",
"BriefDescription": "Dispatch/CLB Hold: Scoreboard"
},
- {,
+ {
"EventCode": "0xE0B0",
"EventName": "PM_TM_FAIL_NON_TX_CONFLICT",
"BriefDescription": "Non transactional conflict from LSU, gets reported to TEXASR"
},
- {,
+ {
"EventCode": "0x201E0",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load"
},
- {,
+ {
"EventCode": "0x368A2",
"EventName": "PM_L3_L2_CO_MISS",
"BriefDescription": "L2 CO miss"
},
- {,
+ {
"EventCode": "0x3608C",
"EventName": "PM_CO0_BUSY",
"BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave CO lifetime (mach0 used as sample point)"
},
- {,
+ {
"EventCode": "0x2C122",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 with dispatch conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x35154",
"EventName": "PM_MRK_DATA_FROM_L3_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 due to a marked load"
},
- {,
+ {
"EventCode": "0x1D140",
"EventName": "PM_MRK_DATA_FROM_L31_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x4404A",
"EventName": "PM_INST_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x28AC",
"EventName": "PM_TM_FAIL_SELF",
"BriefDescription": "TM aborted because a self-induced conflict occurred in Suspended state, due to one of the following: a store to a storage location that was previously accessed transactionally; a dcbf, dcbi, or icbi specify- ing a block that was previously accessed transactionally; a dcbst specifying a block that was previously written transactionally; or a tlbie that specifies a translation that was pre- viously used transactionally"
},
- {,
+ {
"EventCode": "0x45056",
"EventName": "PM_SCALAR_FLOP_CMPL",
"BriefDescription": "Scalar flop operation completed"
},
- {,
+ {
"EventCode": "0x16092",
"EventName": "PM_L2_LD_MISS_128B",
"BriefDescription": "All successful D-side load dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 128B (i.e., M=0)"
},
- {,
+ {
"EventCode": "0x2E014",
"EventName": "PM_STCX_FIN",
"BriefDescription": "Number of stcx instructions finished. This includes instructions in the speculative path of a branch that may be flushed"
},
- {,
+ {
"EventCode": "0xD8AC",
"EventName": "PM_LWSYNC",
"BriefDescription": "An lwsync instruction was decoded and transferred"
},
- {,
+ {
"EventCode": "0x2094",
"EventName": "PM_TM_OUTER_TBEGIN",
"BriefDescription": "Completion time outer tbegin"
},
- {,
+ {
"EventCode": "0x160B4",
"EventName": "PM_L3_P0_LCO_RTY",
"BriefDescription": "L3 initiated LCO received retry on port 0 (can try 4 times)"
},
- {,
+ {
"EventCode": "0x36892",
"EventName": "PM_DSIDE_OTHER_64B_L2MEMACC",
"BriefDescription": "Valid when first beat of data comes in for an D-side fetch where data came EXCLUSIVELY from memory that was for hpc_read64, (RC had to fetch other 64B of a line from MC) i.e., number of times RC had to go to memory to get 'missing' 64B"
},
- {,
+ {
"EventCode": "0x20A8",
"EventName": "PM_TM_FAIL_FOOTPRINT_OVERFLOW",
"BriefDescription": "TM aborted because the tracking limit for transactional storage accesses was exceeded.. Asynchronous"
},
- {,
+ {
"EventCode": "0x30018",
"EventName": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL",
"BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF; CR; XVF (XER/VSCR/FPSCR)"
},
- {,
+ {
"EventCode": "0xC894",
"EventName": "PM_LS1_UNALIGNED_LD",
"BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0x360A2",
"EventName": "PM_L3_L2_CO_HIT",
"BriefDescription": "L2 CO hits"
},
- {,
+ {
"EventCode": "0x36092",
"EventName": "PM_DSIDE_L2MEMACC",
"BriefDescription": "Valid when first beat of data comes in for an D-side fetch where data came EXCLUSIVELY from memory (excluding hpcread64 accesses), i.e., total memory accesses by RCs"
},
- {,
+ {
"EventCode": "0x10138",
"EventName": "PM_MRK_BR_2PATH",
"BriefDescription": "marked branches which are not strongly biased"
},
- {,
+ {
"EventCode": "0x2884",
"EventName": "PM_ISYNC",
"BriefDescription": "Isync completion count per thread"
},
- {,
+ {
"EventCode": "0x16882",
"EventName": "PM_L2_CASTOUT_SHR",
"BriefDescription": "L2 Castouts - Shared (Tx,Sx)"
},
- {,
+ {
"EventCode": "0x26092",
"EventName": "PM_L2_LD_MISS_64B",
"BriefDescription": "All successful D-side load dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 64B(i.e., M=1)"
},
- {,
+ {
"EventCode": "0x26080",
"EventName": "PM_L2_LD_MISS",
"BriefDescription": "All successful D-Side Load dispatches that were an L2 miss for this thread"
},
- {,
+ {
"EventCode": "0x3D14C",
"EventName": "PM_MRK_DATA_FROM_DMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a marked load"
},
- {,
+ {
"EventCode": "0x100FA",
"EventName": "PM_ANY_THRD_RUN_CYC",
"BriefDescription": "Cycles in which at least one thread has the run latch set"
},
- {,
+ {
"EventCode": "0x2C12A",
"EventName": "PM_MRK_DATA_FROM_RMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group ( Remote) due to a marked load"
},
- {,
+ {
"EventCode": "0x25048",
"EventName": "PM_IPTEG_FROM_LMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a instruction side request"
},
- {,
+ {
"EventCode": "0xD8A8",
"EventName": "PM_ISLB_MISS",
"BriefDescription": "Instruction SLB Miss - Total of all segment sizes"
},
- {,
+ {
"EventCode": "0x368AE",
"EventName": "PM_L3_P1_CO_RTY",
"BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
},
- {,
+ {
"EventCode": "0x260A2",
"EventName": "PM_L3_CI_HIT",
"BriefDescription": "L3 Castins Hit (total count)"
},
- {,
+ {
"EventCode": "0x44054",
"EventName": "PM_VECTOR_LD_CMPL",
"BriefDescription": "Number of vector load instructions completed"
},
- {,
+ {
"EventCode": "0x1E05C",
"EventName": "PM_CMPLU_STALL_NESTED_TBEGIN",
"BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT"
},
- {,
+ {
"EventCode": "0xC084",
"EventName": "PM_LS2_LD_VECTOR_FIN",
"BriefDescription": "LS2 finished load vector op"
},
- {,
+ {
"EventCode": "0x1608E",
"EventName": "PM_ST_CAUSED_FAIL",
"BriefDescription": "Non-TM Store caused any thread to fail"
},
- {,
+ {
"EventCode": "0x3080",
"EventName": "PM_ISU0_ISS_HOLD_ALL",
"BriefDescription": "All ISU rejects"
},
- {,
+ {
"EventCode": "0x1515A",
"EventName": "PM_SYNC_MRK_L2MISS",
"BriefDescription": "Marked L2 Miss that can throw a synchronous interrupt"
},
- {,
+ {
"EventCode": "0x26892",
"EventName": "PM_L2_ST_MISS_64B",
"BriefDescription": "All successful D-side store dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 64B (i.e., M=1)"
},
- {,
+ {
"EventCode": "0x2688C",
"EventName": "PM_CO_USAGE",
"BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
},
- {,
+ {
"EventCode": "0x48B8",
"EventName": "PM_BR_MPRED_TAKEN_TA",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event."
},
- {,
+ {
"EventCode": "0x50B0",
"EventName": "PM_BTAC_BAD_RESULT",
"BriefDescription": "BTAC thinks branch will be taken but it is either predicted not-taken by the BHT, or the target address is wrong (less common). In both cases, a redirect will happen"
},
- {,
+ {
"EventCode": "0xD888",
"EventName": "PM_LSU1_LDMX_FIN",
"BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
- {,
+ {
"EventCode": "0x58B4",
"EventName": "PM_TAGE_CORRECT",
"BriefDescription": "The TAGE overrode BHT direction prediction and it was correct. Includes taken and not taken and is counted at execution time"
},
- {,
+ {
"EventCode": "0x3688C",
"EventName": "PM_SN_USAGE",
"BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
},
- {,
+ {
"EventCode": "0x36084",
"EventName": "PM_L2_RCST_DISP",
"BriefDescription": "All D-side store dispatch attempts for this thread"
},
- {,
+ {
"EventCode": "0x46084",
"EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
"BriefDescription": "All D-side store dispatch attempts for this thread that failed due to reason other than address collision"
},
- {,
+ {
"EventCode": "0xF0AC",
"EventName": "PM_DC_PREF_STRIDED_CONF",
"BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software."
},
- {,
+ {
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
"BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
},
- {,
+ {
"EventCode": "0x201E8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512"
},
- {,
+ {
"EventCode": "0x36080",
"EventName": "PM_L2_INST",
"BriefDescription": "All successful I-side-instruction-fetch (e.g. i-demand, i-prefetch) dispatches for this thread"
},
- {,
+ {
"EventCode": "0x3504C",
"EventName": "PM_IPTEG_FROM_DL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a instruction side request"
},
- {,
+ {
"EventCode": "0xD890",
"EventName": "PM_LS1_DC_COLLISIONS",
"BriefDescription": "Read-write data cache collisions"
},
- {,
+ {
"EventCode": "0x1688A",
"EventName": "PM_ISIDE_DISP",
"BriefDescription": "All I-side-instruction-fetch dispatch attempts for this thread"
},
- {,
+ {
"EventCode": "0x468AA",
"EventName": "PM_L3_P1_CO_L31",
"BriefDescription": "L3 CO to L3.1 (LCO) port 1 with or without data"
},
- {,
+ {
"EventCode": "0x28B0",
"EventName": "PM_DISP_HELD_TBEGIN",
"BriefDescription": "This outer tbegin transaction cannot be dispatched until the previous tend instruction completes"
},
- {,
+ {
"EventCode": "0xE8A0",
"EventName": "PM_LSU3_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss"
},
- {,
+ {
"EventCode": "0x2C05E",
"EventName": "PM_INST_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for an instruction fetch (demand only)"
},
- {,
+ {
"EventCode": "0xC8BC",
"EventName": "PM_STCX_SUCCESS_CMPL",
"BriefDescription": "Number of stcx instructions that completed successfully"
},
- {,
+ {
"EventCode": "0xE098",
"EventName": "PM_LSU2_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1"
},
- {,
+ {
"EventCode": "0xE0B8",
"EventName": "PM_LS2_TM_DISALLOW",
"BriefDescription": "A TM-ineligible instruction tries to execute inside a transaction and the LSU disallows it"
},
- {,
+ {
"EventCode": "0x44044",
"EventName": "PM_INST_FROM_L31_ECO_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x16886",
"EventName": "PM_CO_DISP_FAIL",
"BriefDescription": "CO dispatch failed due to all CO machines being busy"
},
- {,
+ {
"EventCode": "0x3D146",
"EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x16892",
"EventName": "PM_L2_ST_MISS_128B",
"BriefDescription": "All successful D-side store dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 128B (i.e., M=0)"
},
- {,
+ {
"EventCode": "0x26890",
"EventName": "PM_ISIDE_L2MEMACC",
"BriefDescription": "Valid when first beat of data comes in for an I-side fetch where data came from memory"
},
- {,
+ {
"EventCode": "0xD094",
"EventName": "PM_LS2_DC_COLLISIONS",
"BriefDescription": "Read-write data cache collisions"
},
- {,
+ {
"EventCode": "0x3C05E",
"EventName": "PM_MEM_RWITM",
"BriefDescription": "Memory Read With Intent to Modify for this thread"
},
- {,
+ {
"EventCode": "0xC090",
"EventName": "PM_LSU_STCX",
"BriefDescription": "STCX sent to nest, i.e. total"
},
- {,
+ {
"EventCode": "0x2C120",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x36086",
"EventName": "PM_L2_RC_ST_DONE",
"BriefDescription": "Read-claim machine did store to line that was in Tx or Sx (Tagged or Shared state)"
},
- {,
+ {
"EventCode": "0xE8AC",
"EventName": "PM_TM_FAIL_TX_CONFLICT",
"BriefDescription": "Transactional conflict from LSU, gets reported to TEXASR"
},
- {,
+ {
"EventCode": "0x48A8",
"EventName": "PM_DECODE_FUSION_LD_ST_DISP",
"BriefDescription": "32-bit displacement D-form and 16-bit displacement X-form"
},
- {,
+ {
"EventCode": "0x3D144",
"EventName": "PM_MRK_DATA_FROM_L2_MEPF_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load"
},
- {,
+ {
"EventCode": "0x44046",
"EventName": "PM_INST_FROM_L21_MOD",
"BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x40B0",
"EventName": "PM_BR_PRED_TAKEN_CR",
"BriefDescription": "Conditional Branch that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches"
},
- {,
+ {
"EventCode": "0x15040",
"EventName": "PM_IPTEG_FROM_L2_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request"
},
- {,
+ {
"EventCode": "0x460A6",
"EventName": "PM_RD_FORMING_SC",
"BriefDescription": "Doesn't occur"
},
- {,
+ {
"EventCode": "0x35042",
"EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request"
},
- {,
+ {
"EventCode": "0xF898",
"EventName": "PM_XLATE_RADIX_MODE",
"BriefDescription": "LSU reports every cycle the thread is in radix translation mode (as opposed to HPT mode)"
},
- {,
+ {
"EventCode": "0x2D142",
"EventName": "PM_MRK_DATA_FROM_L3_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load"
},
- {,
+ {
"EventCode": "0x160B0",
"EventName": "PM_L3_P0_NODE_PUMP",
"BriefDescription": "L3 PF sent with nodal scope port 0, counts even retried requests"
},
- {,
+ {
"EventCode": "0xD88C",
"EventName": "PM_LSU3_LDMX_FIN",
"BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
- {,
+ {
"EventCode": "0x36882",
"EventName": "PM_L2_LD_HIT",
"BriefDescription": "All successful D-side-Ld or I-side-instruction-fetch dispatches for this thread that were L2 hits"
},
- {,
+ {
"EventCode": "0x168AC",
"EventName": "PM_L3_CI_USAGE",
"BriefDescription": "Rotating sample of 16 CI or CO actives"
},
- {,
+ {
"EventCode": "0x20134",
"EventName": "PM_MRK_FXU_FIN",
"BriefDescription": "fxu marked instr finish"
},
- {,
+ {
"EventCode": "0x4608E",
"EventName": "PM_TM_CAP_OVERFLOW",
"BriefDescription": "TM Footprint Capacity Overflow"
},
- {,
+ {
"EventCode": "0x4F05C",
"EventName": "PM_RADIX_PWC_L2_PTE_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was reloaded to a level 2 page walk cache from beyond the core's L3 data cache. This implies that level 3 and level 4 PWC accesses were not necessary for this translation. The source could be local/remote/distant memory or another core's cache"
},
- {,
+ {
"EventCode": "0x40014",
"EventName": "PM_PROBE_NOP_DISP",
"BriefDescription": "ProbeNops dispatched"
},
- {,
+ {
"EventCode": "0x10052",
"EventName": "PM_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x2505E",
"EventName": "PM_BACK_BR_CMPL",
"BriefDescription": "Branch instruction completed with a target address less than current instruction address"
},
- {,
+ {
"EventCode": "0x2688A",
"EventName": "PM_ISIDE_DISP_FAIL_OTHER",
"BriefDescription": "All I-side-instruction-fetch dispatch attempts for this thread that failed due to reasons other than an address collision conflict with an L2 machine (e.g. no available RC/CO machines)"
},
- {,
+ {
"EventCode": "0x2001A",
"EventName": "PM_NTC_ALL_FIN",
"BriefDescription": "Cycles after instruction finished to instruction completed."
},
- {,
+ {
"EventCode": "0x3005A",
"EventName": "PM_ISQ_0_8_ENTRIES",
"BriefDescription": "Cycles in which 8 or less Issue Queue entries are in use. This is a shared event, not per thread"
},
- {,
+ {
"EventCode": "0x3515E",
"EventName": "PM_MRK_BACK_BR_CMPL",
"BriefDescription": "Marked branch instruction completed with a target address less than current instruction address"
},
- {,
+ {
"EventCode": "0xF890",
"EventName": "PM_LSU1_L1_CAM_CANCEL",
"BriefDescription": "ls1 l1 tm cam cancel"
},
- {,
+ {
"EventCode": "0x268AE",
"EventName": "PM_L3_P3_PF_RTY",
"BriefDescription": "L3 PF received retry port 3, every retry counted"
},
- {,
+ {
"EventCode": "0xE884",
"EventName": "PM_LS1_ERAT_MISS_PREF",
"BriefDescription": "LS1 Erat miss due to prefetch"
},
- {,
+ {
"EventCode": "0xE89C",
"EventName": "PM_LSU1_TM_L1_MISS",
"BriefDescription": "Load tm L1 miss"
},
- {,
+ {
"EventCode": "0x28A8",
"EventName": "PM_TM_FAIL_CONF_NON_TM",
"BriefDescription": "TM aborted because a conflict occurred with a non-transactional access by another processor"
},
- {,
+ {
"EventCode": "0x16890",
"EventName": "PM_L1PF_L2MEMACC",
"BriefDescription": "Valid when first beat of data comes in for an L1PF where data came from memory"
},
- {,
+ {
"EventCode": "0x4504C",
"EventName": "PM_IPTEG_FROM_DMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a instruction side request"
},
- {,
+ {
"EventCode": "0x1002E",
"EventName": "PM_LMQ_MERGE",
"BriefDescription": "A demand miss collides with a prefetch for the same line"
},
- {,
+ {
"EventCode": "0x160B6",
"EventName": "PM_L3_WI0_BUSY",
"BriefDescription": "Rotating sample of 8 WI valid (duplicate)"
},
- {,
+ {
"EventCode": "0x368AC",
"EventName": "PM_L3_CO0_BUSY",
"BriefDescription": "Lifetime, sample of CO machine 0 valid"
},
- {,
+ {
"EventCode": "0x2E040",
"EventName": "PM_DPTEG_FROM_L2_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1D152",
"EventName": "PM_MRK_DATA_FROM_DL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a marked load"
},
- {,
+ {
"EventCode": "0x46880",
"EventName": "PM_ISIDE_MRU_TOUCH",
"BriefDescription": "I-side L2 MRU touch sent to L2 for this thread I-side L2 MRU touch commands sent to the L2 for this thread"
},
- {,
+ {
"EventCode": "0x508C",
"EventName": "PM_SHL_CREATED",
"BriefDescription": "Store-Hit-Load Table Entry Created"
},
- {,
+ {
"EventCode": "0x50B8",
"EventName": "PM_TAGE_OVERRIDE_WRONG",
"BriefDescription": "The TAGE overrode BHT direction prediction but it was incorrect. Counted at completion for taken branches only"
},
- {,
+ {
"EventCode": "0x160AE",
"EventName": "PM_L3_P0_PF_RTY",
"BriefDescription": "L3 PF received retry port 0, every retry counted"
},
- {,
+ {
"EventCode": "0x268B2",
"EventName": "PM_L3_LOC_GUESS_WRONG",
"BriefDescription": "Prefetch scope predictor selected LNS, but was wrong"
},
- {,
+ {
"EventCode": "0x36088",
"EventName": "PM_L2_SYS_GUESS_CORRECT",
"BriefDescription": "L2 guess system (VGS or RNS) and guess was correct (ie data beyond-group)"
},
- {,
+ {
"EventCode": "0x260AE",
"EventName": "PM_L3_P2_PF_RTY",
"BriefDescription": "L3 PF received retry port 2, every retry counted"
},
- {,
+ {
"EventCode": "0xD8B0",
"EventName": "PM_PTESYNC",
"BriefDescription": "A ptesync instruction was counted when the instruction is decoded and transmitted"
},
- {,
+ {
"EventCode": "0x26086",
"EventName": "PM_CO_TM_SC_FOOTPRINT",
"BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3) OR L2 TM_store hit dirty HPC line and L3 indicated SC line formed in L3 on RDR bus"
},
- {,
+ {
"EventCode": "0x1E05A",
"EventName": "PM_CMPLU_STALL_ANY_SYNC",
"BriefDescription": "Cycles in which the NTC sync instruction (isync, lwsync or hwsync) is not allowed to complete"
},
- {,
+ {
"EventCode": "0xF090",
"EventName": "PM_LSU0_L1_CAM_CANCEL",
"BriefDescription": "ls0 l1 tm cam cancel"
},
- {,
+ {
"EventCode": "0xC0A8",
"EventName": "PM_LSU_FLUSH_CI",
"BriefDescription": "Load was not issued to LSU as a cache inhibited (non-cacheable) load but it was later determined to be cache inhibited"
},
- {,
+ {
"EventCode": "0x20AC",
"EventName": "PM_TM_FAIL_CONF_TM",
"BriefDescription": "TM aborted because a conflict occurred with another transaction."
},
- {,
+ {
"EventCode": "0x588C",
"EventName": "PM_SHL_ST_DEP_CREATED",
"BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled"
},
- {,
+ {
"EventCode": "0x46882",
"EventName": "PM_L2_ST_HIT",
"BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
},
- {,
+ {
"EventCode": "0x360AC",
"EventName": "PM_L3_SN0_BUSY",
"BriefDescription": "Lifetime, sample of snooper machine 0 valid"
},
- {,
+ {
"EventCode": "0x3005C",
"EventName": "PM_BFU_BUSY",
"BriefDescription": "Cycles in which all 4 Binary Floating Point units are busy. The BFU is running at capacity"
},
- {,
+ {
"EventCode": "0x48A0",
"EventName": "PM_BR_PRED_PCACHE",
"BriefDescription": "Conditional branch completed that used pattern cache prediction"
},
- {,
+ {
"EventCode": "0x26880",
"EventName": "PM_L2_ST_MISS",
"BriefDescription": "All successful D-Side Store dispatches that were an L2 miss for this thread"
},
- {,
+ {
"EventCode": "0xF8B4",
"EventName": "PM_DC_PREF_XCONS_ALLOC",
"BriefDescription": "Prefetch stream allocated in the Ultra conservative phase by either the hardware prefetch mechanism or software prefetch"
},
- {,
+ {
"EventCode": "0x35048",
"EventName": "PM_IPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x260A8",
"EventName": "PM_L3_PF_HIT_L3",
"BriefDescription": "L3 PF hit in L3 (abandoned)"
},
- {,
+ {
"EventCode": "0x360B4",
"EventName": "PM_L3_PF0_BUSY",
"BriefDescription": "Lifetime, sample of PF machine 0 valid"
},
- {,
+ {
"EventCode": "0xC0B0",
"EventName": "PM_LSU_FLUSH_UE",
"BriefDescription": "Correctable ECC error on reload data, reported at critical data forward time"
},
- {,
+ {
"EventCode": "0x4013A",
"EventName": "PM_MRK_IC_MISS",
"BriefDescription": "Marked instruction experienced I cache miss"
},
- {,
+ {
"EventCode": "0x2088",
"EventName": "PM_FLUSH_DISP_SB",
"BriefDescription": "Dispatch Flush: Scoreboard"
},
- {,
+ {
"EventCode": "0x401E8",
"EventName": "PM_MRK_DATA_FROM_L2MISS",
"BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to a marked load"
},
- {,
+ {
"EventCode": "0x3688E",
"EventName": "PM_TM_ST_CAUSED_FAIL",
"BriefDescription": "TM Store (fav or non-fav) caused another thread to fail"
},
- {,
+ {
"EventCode": "0x460B2",
"EventName": "PM_L3_SYS_GUESS_WRONG",
"BriefDescription": "Prefetch scope predictor selected VGS or RNS, but was wrong"
},
- {,
+ {
"EventCode": "0x58B8",
"EventName": "PM_TAGE_OVERRIDE_WRONG_SPEC",
"BriefDescription": "The TAGE overrode BHT direction prediction and it was correct. Includes taken and not taken and is counted at execution time"
},
- {,
+ {
"EventCode": "0xE890",
"EventName": "PM_LSU3_ERAT_HIT",
"BriefDescription": "Primary ERAT hit. There is no secondary ERAT"
},
- {,
+ {
"EventCode": "0x2898",
"EventName": "PM_TM_TABORT_TRECLAIM",
"BriefDescription": "Completion time tabortnoncd, tabortcd, treclaim"
},
- {,
+ {
"EventCode": "0x268A0",
"EventName": "PM_L3_CO_L31",
"BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 (lossy = may undercount if two cresps come in the same cyc)"
},
- {,
+ {
"EventCode": "0x5080",
"EventName": "PM_THRD_PRIO_4_5_CYC",
"BriefDescription": "Cycles thread running at priority level 4 or 5"
},
- {,
+ {
"EventCode": "0x2505C",
"EventName": "PM_VSU_FIN",
"BriefDescription": "VSU instruction finished. Up to 4 per cycle"
},
- {,
+ {
"EventCode": "0x40A4",
"EventName": "PM_BR_PRED_CCACHE",
"BriefDescription": "Conditional Branch Completed that used the Count Cache for Target Prediction"
},
- {,
+ {
"EventCode": "0x2E04A",
"EventName": "PM_DPTEG_FROM_RL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D12E",
"EventName": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0xC8B4",
"EventName": "PM_LSU_FLUSH_LHL_SHL",
"BriefDescription": "The instruction was flushed because of a sequential load/store consistency. If a load or store hits on an older load that has either been snooped (for loads) or has stale data (for stores)."
},
- {,
+ {
"EventCode": "0x58A4",
"EventName": "PM_FLUSH_LSU",
"BriefDescription": "LSU flushes. Includes all lsu flushes"
},
- {,
+ {
"EventCode": "0x1D150",
"EventName": "PM_MRK_DATA_FROM_DL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0xC8A0",
"EventName": "PM_LSU1_FALSE_LHS",
"BriefDescription": "False LHS match detected"
},
- {,
+ {
"EventCode": "0x48BC",
"EventName": "PM_THRD_PRIO_2_3_CYC",
"BriefDescription": "Cycles thread running at priority level 2 or 3"
},
- {,
+ {
"EventCode": "0x368B2",
"EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
"BriefDescription": "Prefetch scope predictor selected GS or NNS, but was wrong because scope was VGS or RNS"
},
- {,
+ {
"EventCode": "0xE8BC",
"EventName": "PM_LS1_PTE_TABLEWALK_CYC",
"BriefDescription": "Cycles when a tablewalk is pending on this thread on table 1"
},
- {,
+ {
"EventCode": "0x1F152",
"EventName": "PM_MRK_FAB_RSP_BKILL_CYC",
"BriefDescription": "cycles L2 RC took for a bkill"
},
- {,
+ {
"EventCode": "0x4C124",
"EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L3 without conflict due to a marked load"
},
- {,
+ {
"EventCode": "0x2F14A",
"EventName": "PM_MRK_DPTEG_FROM_RL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x26888",
"EventName": "PM_L2_GRP_GUESS_WRONG",
"BriefDescription": "L2 guess grp (GS or NNS) and guess was not correct (ie data on-chip OR beyond-group)"
},
- {,
+ {
"EventCode": "0xC0AC",
"EventName": "PM_LSU_FLUSH_EMSH",
"BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
},
- {,
+ {
"EventCode": "0x260B2",
"EventName": "PM_L3_SYS_GUESS_CORRECT",
"BriefDescription": "Prefetch scope predictor selected VGS or RNS and was correct"
},
- {,
+ {
"EventCode": "0x1D146",
"EventName": "PM_MRK_DATA_FROM_MEMORY_CYC",
"BriefDescription": "Duration in cycles to reload from a memory location including L4 from local remote or distant due to a marked load"
},
- {,
+ {
"EventCode": "0xE094",
"EventName": "PM_LSU0_TM_L1_HIT",
"BriefDescription": "Load tm hit in L1"
},
- {,
+ {
"EventCode": "0x46888",
"EventName": "PM_L2_GROUP_PUMP",
"BriefDescription": "RC requests that were on group (aka nodel) pump attempts"
},
- {,
+ {
"EventCode": "0xC08C",
"EventName": "PM_LSU_DTLB_MISS_16M_2M",
"BriefDescription": "Data TLB Miss page size 16M (HPT) or 2M (Radix)"
},
- {,
+ {
"EventCode": "0x16080",
"EventName": "PM_L2_LD",
"BriefDescription": "All successful D-side Load dispatches for this thread (L2 miss + L2 hits)"
},
- {,
+ {
"EventCode": "0x4505C",
"EventName": "PM_MATH_FLOP_CMPL",
"BriefDescription": "Math flop instruction completed"
},
- {,
+ {
"EventCode": "0xC080",
"EventName": "PM_LS0_LD_VECTOR_FIN",
"BriefDescription": "LS0 finished load vector op"
},
- {,
+ {
"EventCode": "0x368B0",
"EventName": "PM_L3_P1_SYS_PUMP",
"BriefDescription": "L3 PF sent with sys scope port 1, counts even retried requests"
},
- {,
+ {
"EventCode": "0x1F146",
"EventName": "PM_MRK_DPTEG_FROM_L31_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2000C",
"EventName": "PM_THRD_ALL_RUN_CYC",
"BriefDescription": "Cycles in which all the threads have the run latch set"
},
- {,
+ {
"EventCode": "0xC0BC",
"EventName": "PM_LSU_FLUSH_OTHER",
"BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
},
- {,
+ {
"EventCode": "0x5094",
"EventName": "PM_IC_MISS_ICBI",
"BriefDescription": "threaded version, IC Misses where we got EA dir hit but no sector valids were on. ICBI took line out"
},
- {,
+ {
"EventCode": "0xC8A8",
"EventName": "PM_LSU_FLUSH_ATOMIC",
"BriefDescription": "Quad-word loads (lq) are considered atomic because they always span at least 2 slices. If a snoop or store from another thread changes the data the load is accessing between the 2 or 3 pieces of the lq instruction, the lq will be flushed"
},
- {,
+ {
"EventCode": "0x1E04E",
"EventName": "PM_DPTEG_FROM_L2MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D05E",
"EventName": "PM_BR_CMPL",
"BriefDescription": "Any Branch instruction completed"
},
- {,
+ {
"EventCode": "0x260B0",
"EventName": "PM_L3_P0_GRP_PUMP",
"BriefDescription": "L3 PF sent with grp scope port 0, counts even retried requests"
},
- {,
+ {
"EventCode": "0x30132",
"EventName": "PM_MRK_VSU_FIN",
"BriefDescription": "VSU marked instr finish"
},
- {,
+ {
"EventCode": "0x2D120",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load"
},
- {,
+ {
"EventCode": "0x1E048",
"EventName": "PM_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x16086",
"EventName": "PM_L2_SN_M_WR_DONE",
"BriefDescription": "SNP dispatched for a write and was M (true M); for DMA cacheinj this will pulse if rty/push is required (won't pulse if cacheinj is accepted)"
},
- {,
+ {
"EventCode": "0x489C",
"EventName": "PM_BR_CORECT_PRED_TAKEN_CMPL",
"BriefDescription": "Conditional Branch Completed in which the HW correctly predicted the direction as taken. Counted at completion time"
},
- {,
+ {
"EventCode": "0xF0B8",
"EventName": "PM_LS0_UNALIGNED_ST",
"BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
},
- {,
+ {
"EventCode": "0x20132",
"EventName": "PM_MRK_DFU_FIN",
"BriefDescription": "Decimal Unit marked Instruction Finish"
},
- {,
+ {
"EventCode": "0x160A6",
"EventName": "PM_TM_SC_CO",
"BriefDescription": "L3 castout of line that was StoreCopy (original value of speculatively written line) in a Transaction"
},
- {,
+ {
"EventCode": "0xC8B0",
"EventName": "PM_LSU_FLUSH_LHS",
"BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
},
- {,
+ {
"EventCode": "0x16084",
"EventName": "PM_L2_RCLD_DISP",
"BriefDescription": "All D-side-Ld or I-side-instruction-fetch dispatch attempts for this thread"
},
- {,
+ {
"EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC",
"BriefDescription": "cycles to drain st from core to L2"
},
- {,
+ {
"EventCode": "0x168A4",
"EventName": "PM_L3_MISS",
"BriefDescription": "L3 Misses (L2 miss also missing L3, including data/instrn/xlate)"
},
- {,
+ {
"EventCode": "0xF080",
"EventName": "PM_LSU_STCX_FAIL",
"BriefDescription": "The LSU detects the condition that a stcx instruction failed. No requirement to wait for a response from the nest"
},
- {,
+ {
"EventCode": "0x30038",
"EventName": "PM_CMPLU_STALL_DMISS_LMEM",
"BriefDescription": "Completion stall due to cache miss that resolves in local memory"
},
- {,
+ {
"EventCode": "0x28A4",
"EventName": "PM_MRK_TEND_FAIL",
"BriefDescription": "Nested or not nested tend failed for a marked tend instruction"
},
- {,
+ {
"EventCode": "0x100FC",
"EventName": "PM_LD_REF_L1",
"BriefDescription": "All L1 D cache load references counted at finish, gated by reject"
},
- {,
+ {
"EventCode": "0xC0A0",
"EventName": "PM_LSU0_FALSE_LHS",
"BriefDescription": "False LHS match detected"
},
- {,
+ {
"EventCode": "0x468A8",
"EventName": "PM_SN_MISS",
"BriefDescription": "Any port snooper L3 miss or collision. Up to 4 can happen in a cycle but we only count 1"
},
- {,
+ {
"EventCode": "0x36888",
"EventName": "PM_L2_SYS_GUESS_WRONG",
"BriefDescription": "L2 guess system (VGS or RNS) and guess was not correct (ie data ^beyond-group)"
},
- {,
+ {
"EventCode": "0x2080",
"EventName": "PM_EE_OFF_EXT_INT",
"BriefDescription": "CyclesMSR[EE] is off and external interrupts are active"
},
- {,
+ {
"EventCode": "0xE8B8",
"EventName": "PM_LS3_TM_DISALLOW",
"BriefDescription": "A TM-ineligible instruction tries to execute inside a transaction and the LSU disallows it"
},
- {,
+ {
"EventCode": "0x2688E",
"EventName": "PM_TM_FAV_CAUSED_FAIL",
"BriefDescription": "TM Load (fav) caused another thread to fail"
},
- {,
+ {
"EventCode": "0x16090",
"EventName": "PM_SN0_BUSY",
"BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave SN lifetime (mach0 used as sample point)"
},
- {,
+ {
"EventCode": "0x360AE",
"EventName": "PM_L3_P0_CO_RTY",
"BriefDescription": "L3 CO received retry port 0 (memory only), every retry counted"
},
- {,
+ {
"EventCode": "0x168A8",
"EventName": "PM_L3_WI_USAGE",
"BriefDescription": "Lifetime, sample of Write Inject machine 0 valid"
},
- {,
+ {
"EventCode": "0x468A2",
"EventName": "PM_L3_LAT_CI_MISS",
"BriefDescription": "L3 Lateral Castins Miss"
},
- {,
+ {
"EventCode": "0x4090",
"EventName": "PM_IC_PREF_CANCEL_PAGE",
"BriefDescription": "Prefetch Canceled due to page boundary"
},
- {,
+ {
"EventCode": "0x460AA",
"EventName": "PM_L3_P0_CO_L31",
"BriefDescription": "L3 CO to L3.1 (LCO) port 0 with or without data"
},
- {,
+ {
"EventCode": "0x2880",
"EventName": "PM_FLUSH_DISP",
"BriefDescription": "Dispatch flush"
},
- {,
+ {
"EventCode": "0x168AE",
"EventName": "PM_L3_P1_PF_RTY",
"BriefDescription": "L3 PF received retry port 1, every retry counted"
},
- {,
+ {
"EventCode": "0x46082",
"EventName": "PM_L2_ST_DISP",
"BriefDescription": "All successful D-side store dispatches for this thread"
},
- {,
+ {
"EventCode": "0x36880",
"EventName": "PM_L2_INST_MISS",
"BriefDescription": "All successful I-side-instruction-fetch (e.g. i-demand, i-prefetch) dispatches for this thread that were an L2 miss"
},
- {,
+ {
"EventCode": "0xE084",
"EventName": "PM_LS0_ERAT_MISS_PREF",
"BriefDescription": "LS0 Erat miss due to prefetch"
},
- {,
+ {
"EventCode": "0x409C",
"EventName": "PM_BR_PRED",
"BriefDescription": "Conditional Branch Executed in which the HW predicted the Direction or Target. Includes taken and not taken and is counted at execution time"
},
- {,
+ {
"EventCode": "0x2D144",
"EventName": "PM_MRK_DATA_FROM_L31_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x360A4",
"EventName": "PM_L3_CO_LCO",
"BriefDescription": "Total L3 COs occurred on LCO L3.1 (good cresp, may end up in mem on a retry)"
},
- {,
+ {
"EventCode": "0x4890",
"EventName": "PM_IC_PREF_CANCEL_HIT",
"BriefDescription": "Prefetch Canceled due to icache hit"
},
- {,
+ {
"EventCode": "0x268A8",
"EventName": "PM_RD_HIT_PF",
"BriefDescription": "RD machine hit L3 PF machine"
},
- {,
+ {
"EventCode": "0x16880",
"EventName": "PM_L2_ST",
"BriefDescription": "All successful D-side store dispatches for this thread (L2 miss + L2 hits)"
},
- {,
+ {
"EventCode": "0x4098",
"EventName": "PM_IC_DEMAND_L2_BHT_REDIRECT",
"BriefDescription": "L2 I cache demand request due to BHT redirect, branch redirect ( 2 bubbles 3 cycles)"
},
- {,
+ {
"EventCode": "0xD0B4",
"EventName": "PM_LSU0_SRQ_S0_VALID_CYC",
"BriefDescription": "Slot 0 of SRQ valid"
},
- {,
+ {
"EventCode": "0x160AA",
"EventName": "PM_L3_P0_LCO_NO_DATA",
"BriefDescription": "Dataless L3 LCO sent port 0"
},
- {,
+ {
"EventCode": "0x208C",
"EventName": "PM_CLB_HELD",
"BriefDescription": "CLB (control logic block - indicates quadword fetch block) Hold: Any Reason"
},
- {,
+ {
"EventCode": "0xF88C",
"EventName": "PM_LSU3_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
- {,
+ {
"EventCode": "0x200F2",
"EventName": "PM_INST_DISP",
"BriefDescription": "# PPC Dispatched"
},
- {,
+ {
"EventCode": "0x4E05E",
"EventName": "PM_TM_OUTER_TBEGIN_DISP",
"BriefDescription": "Number of outer tbegin instructions dispatched. The dispatch unit determines whether the tbegin instruction is outer or nested. This is a speculative count, which includes flushed instructions"
},
- {,
+ {
"EventCode": "0x2D018",
"EventName": "PM_CMPLU_STALL_EXEC_UNIT",
"BriefDescription": "Completion stall due to execution units (FXU/VSU/CRU)"
},
- {,
+ {
"EventCode": "0x20B0",
"EventName": "PM_LSU_FLUSH_NEXT",
"BriefDescription": "LSU flush next reported at flush time. Sometimes these also come with an exception"
},
- {,
+ {
"EventCode": "0x3880",
"EventName": "PM_ISU2_ISS_HOLD_ALL",
"BriefDescription": "All ISU rejects"
},
- {,
+ {
"EventCode": "0xC884",
"EventName": "PM_LS3_LD_VECTOR_FIN",
"BriefDescription": "LS3 finished load vector op"
},
- {,
+ {
"EventCode": "0x360A8",
"EventName": "PM_L3_CO",
"BriefDescription": "L3 castout occurring (does not include casthrough or log writes (cinj/dmaw))"
},
- {,
+ {
"EventCode": "0x368A4",
"EventName": "PM_L3_CINJ",
"BriefDescription": "L3 castin of cache inject"
},
- {,
+ {
"EventCode": "0xC890",
"EventName": "PM_LSU_NCST",
"BriefDescription": "Asserts when a i=1 store op is sent to the nest. No record of issue pipe (LS0/LS1) is maintained so this is for both pipes. Probably don't need separate LS0 and LS1"
},
- {,
+ {
"EventCode": "0xD0B8",
"EventName": "PM_LSU_LMQ_FULL_CYC",
"BriefDescription": "Counts the number of cycles the LMQ is full"
},
- {,
+ {
"EventCode": "0x168B2",
"EventName": "PM_L3_GRP_GUESS_CORRECT",
"BriefDescription": "Prefetch scope predictor selected GS or NNS and was correct"
},
- {,
+ {
"EventCode": "0x48A4",
"EventName": "PM_STOP_FETCH_PENDING_CYC",
"BriefDescription": "Fetching is stopped due to an incoming instruction that will result in a flush"
},
- {,
+ {
"EventCode": "0x36884",
"EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
"BriefDescription": "All D-side store dispatch attempts for this thread that failed due to address collision with RC/CO/SN/SQ"
},
- {,
+ {
"EventCode": "0x260AC",
"EventName": "PM_L3_PF_USAGE",
"BriefDescription": "Rotating sample of 32 PF actives"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index b4772f54a271..d0265f255de2 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -1,530 +1,530 @@
[
- {,
+ {
"EventCode": "0x4D04C",
"EventName": "PM_DFU_BUSY",
"BriefDescription": "Cycles in which all 4 Decimal Floating Point units are busy. The DFU is running at capacity"
},
- {,
+ {
"EventCode": "0x100F6",
"EventName": "PM_IERAT_RELOAD",
"BriefDescription": "Number of I-ERAT reloads"
},
- {,
+ {
"EventCode": "0x201E2",
"EventName": "PM_MRK_LD_MISS_L1",
"BriefDescription": "Marked DL1 Demand Miss counted at exec time. Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
},
- {,
+ {
"EventCode": "0x40010",
"EventName": "PM_PMC3_OVERFLOW",
"BriefDescription": "Overflow from counter 3"
},
- {,
+ {
"EventCode": "0x1005A",
"EventName": "PM_CMPLU_STALL_DFLONG",
"BriefDescription": "Finish stall because the NTF instruction was a multi-cycle instruction issued to the Decimal Floating Point execution pipe and waiting to finish. Includes decimal floating point instructions + 128 bit binary floating point instructions. Qualified by multicycle"
},
- {,
+ {
"EventCode": "0x4D140",
"EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE",
"BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x3F14C",
"EventName": "PM_MRK_DPTEG_FROM_DL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1E040",
"EventName": "PM_DPTEG_FROM_L2_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x24052",
"EventName": "PM_FXU_IDLE",
"BriefDescription": "Cycles in which FXU0, FXU1, FXU2, and FXU3 are all idle"
},
- {,
+ {
"EventCode": "0x1E054",
"EventName": "PM_CMPLU_STALL",
"BriefDescription": "Nothing completed and ICT not empty"
},
- {,
+ {
"EventCode": "0x2",
"EventName": "PM_INST_CMPL",
"BriefDescription": "Number of PowerPC Instructions that completed."
},
- {,
+ {
"EventCode": "0x3D058",
"EventName": "PM_VSU_DP_FSQRT_FDIV",
"BriefDescription": "vector versions of fdiv,fsqrt"
},
- {,
+ {
"EventCode": "0x10006",
"EventName": "PM_DISP_HELD",
"BriefDescription": "Dispatch Held"
},
- {,
+ {
"EventCode": "0x200F8",
"EventName": "PM_EXT_INT",
"BriefDescription": "external interrupt"
},
- {,
+ {
"EventCode": "0x20008",
"EventName": "PM_ICT_EMPTY_CYC",
"BriefDescription": "Cycles in which the ICT is completely empty. No itags are assigned to any thread"
},
- {,
+ {
"EventCode": "0x4F146",
"EventName": "PM_MRK_DPTEG_FROM_L21_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x10056",
"EventName": "PM_MEM_READ",
"BriefDescription": "Reads from Memory from this thread (includes data/inst/xlate/l1prefetch/inst prefetch). Includes L4"
},
- {,
+ {
"EventCode": "0x3C04C",
"EventName": "PM_DATA_FROM_DL4",
"BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a demand load"
},
- {,
+ {
"EventCode": "0x4E046",
"EventName": "PM_DPTEG_FROM_L21_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2E016",
"EventName": "PM_NTC_ISSUE_HELD_ARB",
"BriefDescription": "The NTC instruction is being held at dispatch because it lost arbitration onto the issue pipe to another instruction (from the same thread or a different thread)"
},
- {,
+ {
"EventCode": "0x15156",
"EventName": "PM_SYNC_MRK_FX_DIVIDE",
"BriefDescription": "Marked fixed point divide that can cause a synchronous interrupt"
},
- {,
+ {
"EventCode": "0x1C056",
"EventName": "PM_DERAT_MISS_4K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K"
},
- {,
+ {
"EventCode": "0x2F142",
"EventName": "PM_MRK_DPTEG_FROM_L3_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4C15C",
"EventName": "PM_MRK_DERAT_MISS_16G_1G",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16G (hpt mode) and 1G (radix mode)"
},
- {,
+ {
"EventCode": "0x10024",
"EventName": "PM_PMC5_OVERFLOW",
"BriefDescription": "Overflow from counter 5"
},
- {,
+ {
"EventCode": "0x4505E",
"EventName": "PM_FLOP_CMPL",
"BriefDescription": "Floating Point Operation Finished"
},
- {,
+ {
"EventCode": "0x2C018",
"EventName": "PM_CMPLU_STALL_DMISS_L21_L31",
"BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)"
},
- {,
+ {
"EventCode": "0x4006A",
"EventName": "PM_IERAT_RELOAD_16M",
"BriefDescription": "IERAT Reloaded (Miss) for a 16M page"
},
- {,
+ {
"EventCode": "0x4E010",
"EventName": "PM_ICT_NOSLOT_IC_L3MISS",
"BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache"
},
- {,
+ {
"EventCode": "0x4D01C",
"EventName": "PM_ICT_NOSLOT_DISP_HELD_SYNC",
"BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch"
},
- {,
+ {
"EventCode": "0x2D01A",
"EventName": "PM_ICT_NOSLOT_IC_MISS",
"BriefDescription": "Ict empty for this thread due to Icache Miss"
},
- {,
+ {
"EventCode": "0x4F14A",
"EventName": "PM_MRK_DPTEG_FROM_OFF_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x30058",
"EventName": "PM_TLBIE_FIN",
"BriefDescription": "tlbie finished"
},
- {,
+ {
"EventCode": "0x100F8",
"EventName": "PM_ICT_NOSLOT_CYC",
"BriefDescription": "Number of cycles the ICT has no itags assigned to this thread"
},
- {,
+ {
"EventCode": "0x3E042",
"EventName": "PM_DPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1F140",
"EventName": "PM_MRK_DPTEG_FROM_L2_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1F058",
"EventName": "PM_RADIX_PWC_L2_PTE_FROM_L2",
"BriefDescription": "A Page Table Entry was reloaded to a level 2 page walk cache from the core's L2 data cache. This implies that level 3 and level 4 PWC accesses were not necessary for this translation"
},
- {,
+ {
"EventCode": "0x1D14A",
"EventName": "PM_MRK_DATA_FROM_RL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0x10050",
"EventName": "PM_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x45058",
"EventName": "PM_IC_MISS_CMPL",
"BriefDescription": "Non-speculative icache miss, counted at completion"
},
- {,
+ {
"EventCode": "0x2D150",
"EventName": "PM_MRK_DERAT_MISS_4K",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 4K"
},
- {,
+ {
"EventCode": "0x34058",
"EventName": "PM_ICT_NOSLOT_BR_MPRED_ICMISS",
"BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred"
},
- {,
+ {
"EventCode": "0x10022",
"EventName": "PM_PMC2_SAVED",
"BriefDescription": "PMC2 Rewind Value saved"
},
- {,
+ {
"EventCode": "0x2000A",
"EventName": "PM_HV_CYC",
"BriefDescription": "Cycles in which msr_hv is high. Note that this event does not take msr_pr into consideration"
},
- {,
+ {
"EventCode": "0x1F144",
"EventName": "PM_MRK_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x300FC",
"EventName": "PM_DTLB_MISS",
"BriefDescription": "Data PTEG reload"
},
- {,
+ {
"EventCode": "0x2C046",
"EventName": "PM_DATA_FROM_RL2L3_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load"
},
- {,
+ {
"EventCode": "0x20052",
"EventName": "PM_GRP_PUMP_MPRED",
"BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x3F05A",
"EventName": "PM_RADIX_PWC_L2_PDE_FROM_L3",
"BriefDescription": "A Page Directory Entry was reloaded to a level 2 page walk cache from the core's L3 data cache"
},
- {,
+ {
"EventCode": "0x1E04A",
"EventName": "PM_DPTEG_FROM_RL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x10064",
"EventName": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN",
"BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch"
},
- {,
+ {
"EventCode": "0x2E046",
"EventName": "PM_DPTEG_FROM_RL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4F14C",
"EventName": "PM_MRK_DPTEG_FROM_DMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2E042",
"EventName": "PM_DPTEG_FROM_L3_MEPF",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2D012",
"EventName": "PM_CMPLU_STALL_DFU",
"BriefDescription": "Finish stall because the NTF instruction was issued to the Decimal Floating Point execution pipe and waiting to finish. Includes decimal floating point instructions + 128 bit binary floating point instructions. Not qualified by multicycle"
},
- {,
+ {
"EventCode": "0x3C054",
"EventName": "PM_DERAT_MISS_16M_2M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M (HPT mode) or 2M (Radix mode)"
},
- {,
+ {
"EventCode": "0x4C04C",
"EventName": "PM_DATA_FROM_DMEM",
"BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a demand load"
},
- {,
+ {
"EventCode": "0x30022",
"EventName": "PM_PMC4_SAVED",
"BriefDescription": "PMC4 Rewind Value saved (matched condition)"
},
- {,
+ {
"EventCode": "0x200F4",
"EventName": "PM_RUN_CYC",
"BriefDescription": "Run_cycles"
},
- {,
+ {
"EventCode": "0x400F2",
"EventName": "PM_1PLUS_PPC_DISP",
"BriefDescription": "Cycles at least one Instr Dispatched"
},
- {,
+ {
"EventCode": "0x3D148",
"EventName": "PM_MRK_DATA_FROM_L21_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L2 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x2F146",
"EventName": "PM_MRK_DPTEG_FROM_RL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4E01A",
"EventName": "PM_ICT_NOSLOT_DISP_HELD",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason"
},
- {,
+ {
"EventCode": "0x401EC",
"EventName": "PM_THRESH_EXC_2048",
"BriefDescription": "Threshold counter exceeded a value of 2048"
},
- {,
+ {
"EventCode": "0x35150",
"EventName": "PM_MRK_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0x3E052",
"EventName": "PM_ICT_NOSLOT_IC_L3",
"BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3"
},
- {,
+ {
"EventCode": "0x2405A",
"EventName": "PM_NTC_FIN",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. This event is used to account for cycles in which work is being completed in the CPI stack"
},
- {,
+ {
"EventCode": "0x40052",
"EventName": "PM_PUMP_MPRED",
"BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x30056",
"EventName": "PM_TM_ABORTS",
"BriefDescription": "Number of TM transactions aborted"
},
- {,
+ {
"EventCode": "0x2404C",
"EventName": "PM_INST_FROM_MEMORY",
"BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x30024",
"EventName": "PM_PMC6_OVERFLOW",
"BriefDescription": "Overflow from counter 6"
},
- {,
+ {
"EventCode": "0x10068",
"EventName": "PM_BRU_FIN",
"BriefDescription": "Branch Instruction Finished"
},
- {,
+ {
"EventCode": "0x3D154",
"EventName": "PM_MRK_DERAT_MISS_16M_2M",
"BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16M (hpt mode) or 2M (radix mode)"
},
- {,
+ {
"EventCode": "0x30020",
"EventName": "PM_PMC2_REWIND",
"BriefDescription": "PMC2 Rewind Event (did not match condition)"
},
- {,
+ {
"EventCode": "0x40064",
"EventName": "PM_DUMMY2_REMOVE_ME",
"BriefDescription": "Space holder for LS_PC_RELOAD_RA"
},
- {,
+ {
"EventCode": "0x3F148",
"EventName": "PM_MRK_DPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D01E",
"EventName": "PM_ICT_NOSLOT_BR_MPRED",
"BriefDescription": "Ict empty for this thread due to branch mispred"
},
- {,
+ {
"EventCode": "0x1F148",
"EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x3E046",
"EventName": "PM_DPTEG_FROM_L21_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2F144",
"EventName": "PM_MRK_DPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x14052",
"EventName": "PM_INST_GRP_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for an instruction fetch"
},
- {,
+ {
"EventCode": "0xD0A8",
"EventName": "PM_DSLB_MISS",
"BriefDescription": "gate_and(sd_pc_c0_comp_valid AND sd_pc_c0_comp_thread(0:1)=tid,sd_pc_c0_comp_ppc_count(0:3)) + gate_and(sd_pc_c1_comp_valid AND sd_pc_c1_comp_thread(0:1)=tid,sd_pc_c1_comp_ppc_count(0:3))"
},
- {,
+ {
"EventCode": "0x4C058",
"EventName": "PM_MEM_CO",
"BriefDescription": "Memory castouts from this thread"
},
- {,
+ {
"EventCode": "0x40004",
"EventName": "PM_FXU_FIN",
"BriefDescription": "The fixed point unit Unit finished an instruction. Instructions that finish may not necessary complete."
},
- {,
+ {
"EventCode": "0x2C054",
"EventName": "PM_DERAT_MISS_64K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K"
},
- {,
+ {
"EventCode": "0x10018",
"EventName": "PM_IC_DEMAND_CYC",
"BriefDescription": "Icache miss demand cycles"
},
- {,
+ {
"EventCode": "0x2D14E",
"EventName": "PM_MRK_DATA_FROM_L21_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x3405C",
"EventName": "PM_CMPLU_STALL_DPLONG",
"BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format. Qualified by NOT vector AND multicycle"
},
- {,
+ {
"EventCode": "0x4D052",
"EventName": "PM_2FLOP_CMPL",
"BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
},
- {,
+ {
"EventCode": "0x1F142",
"EventName": "PM_MRK_DPTEG_FROM_L2",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x40062",
"EventName": "PM_DUMMY1_REMOVE_ME",
"BriefDescription": "Space holder for L2_PC_PM_MK_LDST_SCOPE_PRED_STATUS"
},
- {,
+ {
"EventCode": "0x4C012",
"EventName": "PM_CMPLU_STALL_ERAT_MISS",
"BriefDescription": "Finish stall because the NTF instruction was a load or store that suffered a translation miss"
},
- {,
+ {
"EventCode": "0x4D050",
"EventName": "PM_VSU_NON_FLOP_CMPL",
"BriefDescription": "Non FLOP operation completed"
},
- {,
+ {
"EventCode": "0x2E012",
"EventName": "PM_TM_TX_PASS_RUN_CYC",
"BriefDescription": "cycles spent in successful transactions"
},
- {,
+ {
"EventCode": "0x4D04E",
"EventName": "PM_VSU_FSQRT_FDIV",
"BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only"
},
- {,
+ {
"EventCode": "0x4C120",
"EventName": "PM_MRK_DATA_FROM_L2_MEPF",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load"
},
- {,
+ {
"EventCode": "0x10062",
"EventName": "PM_LD_L3MISS_PEND_CYC",
"BriefDescription": "Cycles L3 miss was pending for this thread"
},
- {,
+ {
"EventCode": "0x2F14C",
"EventName": "PM_MRK_DPTEG_FROM_MEMORY",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x14050",
"EventName": "PM_INST_CHIP_PUMP_CPRED",
"BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for an instruction fetch"
},
- {,
+ {
"EventCode": "0x2000E",
"EventName": "PM_FXU_BUSY",
"BriefDescription": "Cycles in which all 4 FXUs are busy. The FXU is running at capacity"
},
- {,
+ {
"EventCode": "0x20066",
"EventName": "PM_TLB_MISS",
"BriefDescription": "TLB Miss (I + D)"
},
- {,
+ {
"EventCode": "0x10054",
"EventName": "PM_PUMP_CPRED",
"BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x4D124",
"EventName": "PM_MRK_DATA_FROM_L31_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x400F8",
"EventName": "PM_FLUSH",
"BriefDescription": "Flush (any type)"
},
- {,
+ {
"EventCode": "0x30004",
"EventName": "PM_CMPLU_STALL_EMQ_FULL",
"BriefDescription": "Finish stall because the next to finish instruction suffered an ERAT miss and the EMQ was full"
},
- {,
+ {
"EventCode": "0x1D154",
"EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
index 8b3b0f3be664..9edab15cc447 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
@@ -1,117 +1,117 @@
[
- {,
+ {
"EventCode": "0x20036",
"EventName": "PM_BR_2PATH",
"BriefDescription": "Branches that are not strongly biased"
},
- {,
+ {
"EventCode": "0x40056",
"EventName": "PM_MEM_LOC_THRESH_LSU_HIGH",
"BriefDescription": "Local memory above threshold for LSU medium"
},
- {,
+ {
"EventCode": "0x40118",
"EventName": "PM_MRK_DCACHE_RELOAD_INTV",
"BriefDescription": "Combined Intervention event"
},
- {,
+ {
"EventCode": "0x4F148",
"EventName": "PM_MRK_DPTEG_FROM_DL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x301E8",
"EventName": "PM_THRESH_EXC_64",
"BriefDescription": "Threshold counter exceeded a value of 64"
},
- {,
+ {
"EventCode": "0x4E04E",
"EventName": "PM_DPTEG_FROM_L3MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x40050",
"EventName": "PM_SYS_PUMP_MPRED_RTY",
"BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
},
- {,
+ {
"EventCode": "0x1F14E",
"EventName": "PM_MRK_DPTEG_FROM_L2MISS",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4D018",
"EventName": "PM_CMPLU_STALL_BRU",
"BriefDescription": "Completion stall due to a Branch Unit"
},
- {,
+ {
"EventCode": "0x45052",
"EventName": "PM_4FLOP_CMPL",
"BriefDescription": "4 FLOP instruction completed"
},
- {,
+ {
"EventCode": "0x3D142",
"EventName": "PM_MRK_DATA_FROM_LMEM",
"BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a marked load"
},
- {,
+ {
"EventCode": "0x4C01E",
"EventName": "PM_CMPLU_STALL_CRYPTO",
"BriefDescription": "Finish stall because the NTF instruction was routed to the crypto execution pipe and was waiting to finish"
},
- {,
+ {
"EventCode": "0x3000C",
"EventName": "PM_FREQ_DOWN",
"BriefDescription": "Power Management: Below Threshold B"
},
- {,
+ {
"EventCode": "0x4D128",
"EventName": "PM_MRK_DATA_FROM_LMEM_CYC",
"BriefDescription": "Duration in cycles to reload from the local chip's Memory due to a marked load"
},
- {,
+ {
"EventCode": "0x4D054",
"EventName": "PM_8FLOP_CMPL",
"BriefDescription": "8 FLOP instruction completed"
},
- {,
+ {
"EventCode": "0x10026",
"EventName": "PM_TABLEWALK_CYC",
"BriefDescription": "Cycles when an instruction tablewalk is active"
},
- {,
+ {
"EventCode": "0x2C012",
"EventName": "PM_CMPLU_STALL_DCACHE_MISS",
"BriefDescription": "Finish stall because the NTF instruction was a load that missed the L1 and was waiting for the data to return from the nest"
},
- {,
+ {
"EventCode": "0x2E04C",
"EventName": "PM_DPTEG_FROM_MEMORY",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x3F142",
"EventName": "PM_MRK_DPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x4F142",
"EventName": "PM_MRK_DPTEG_FROM_L3",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a marked data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x10060",
"EventName": "PM_TM_TRANS_RUN_CYC",
"BriefDescription": "run cycles in transactional state"
},
- {,
+ {
"EventCode": "0x1E04C",
"EventName": "PM_DPTEG_FROM_LL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x45050",
"EventName": "PM_1FLOP_CMPL",
"BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation completed"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/translation.json b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
index b27642676244..2360b5afec6b 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
@@ -1,227 +1,227 @@
[
- {,
+ {
"EventCode": "0x1E",
"EventName": "PM_CYC",
"BriefDescription": "Processor cycles"
},
- {,
+ {
"EventCode": "0x30010",
"EventName": "PM_PMC2_OVERFLOW",
"BriefDescription": "Overflow from counter 2"
},
- {,
+ {
"EventCode": "0x3C046",
"EventName": "PM_DATA_FROM_L21_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x4D05C",
"EventName": "PM_DP_QP_FLOP_CMPL",
"BriefDescription": "Double-Precion or Quad-Precision instruction completed"
},
- {,
+ {
"EventCode": "0x4E04C",
"EventName": "PM_DPTEG_FROM_DMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x20016",
"EventName": "PM_ST_FIN",
"BriefDescription": "Store finish count. Includes speculative activity"
},
- {,
+ {
"EventCode": "0x1504A",
"EventName": "PM_IPTEG_FROM_RL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x40132",
"EventName": "PM_MRK_LSU_FIN",
"BriefDescription": "lsu marked instr PPC finish"
},
- {,
+ {
"EventCode": "0x3C05C",
"EventName": "PM_CMPLU_STALL_VFXU",
"BriefDescription": "Finish stall due to a vector fixed point instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes"
},
- {,
+ {
"EventCode": "0x30066",
"EventName": "PM_LSU_FIN",
"BriefDescription": "LSU Finished a PPC instruction (up to 4 per cycle)"
},
- {,
+ {
"EventCode": "0x2011C",
"EventName": "PM_MRK_NTC_CYC",
"BriefDescription": "Cycles during which the marked instruction is next to complete (completion is held up because the marked instruction hasn't completed yet)"
},
- {,
+ {
"EventCode": "0x3E048",
"EventName": "PM_DPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x2E018",
"EventName": "PM_CMPLU_STALL_VFXLONG",
"BriefDescription": "Completion stall due to a long latency vector fixed point instruction (division, square root)"
},
- {,
+ {
"EventCode": "0x1C04E",
"EventName": "PM_DATA_FROM_L2MISS_MOD",
"BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to a demand load"
},
- {,
+ {
"EventCode": "0x15048",
"EventName": "PM_IPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x34046",
"EventName": "PM_INST_FROM_L21_SHR",
"BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x1E058",
"EventName": "PM_STCX_FAIL",
"BriefDescription": "stcx failed"
},
- {,
+ {
"EventCode": "0x300F0",
"EventName": "PM_ST_MISS_L1",
"BriefDescription": "Store Missed L1"
},
- {,
+ {
"EventCode": "0x4C046",
"EventName": "PM_DATA_FROM_L21_MOD",
"BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a demand load"
},
- {,
+ {
"EventCode": "0x2504A",
"EventName": "PM_IPTEG_FROM_RL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a instruction side request"
},
- {,
+ {
"EventCode": "0x2003E",
"EventName": "PM_LSU_LMQ_SRQ_EMPTY_CYC",
"BriefDescription": "Cycles in which the LSU is empty for all threads (lmq and srq are completely empty)"
},
- {,
+ {
"EventCode": "0x201E6",
"EventName": "PM_THRESH_EXC_32",
"BriefDescription": "Threshold counter exceeded a value of 32"
},
- {,
+ {
"EventCode": "0x4405C",
"EventName": "PM_CMPLU_STALL_VDP",
"BriefDescription": "Finish stall because the NTF instruction was a vector instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format. Not qualified multicycle. Qualified by vector"
},
- {,
+ {
"EventCode": "0x4D010",
"EventName": "PM_PMC1_SAVED",
"BriefDescription": "PMC1 Rewind Value saved"
},
- {,
+ {
"EventCode": "0x44042",
"EventName": "PM_INST_FROM_L3",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x200FE",
"EventName": "PM_DATA_FROM_L2MISS",
"BriefDescription": "Demand LD - L2 Miss (not L2 hit)"
},
- {,
+ {
"EventCode": "0x2D14A",
"EventName": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC",
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load"
},
- {,
+ {
"EventCode": "0x10028",
"EventName": "PM_STALL_END_ICT_EMPTY",
"BriefDescription": "The number a times the core transitioned from a stall to ICT-empty for this thread"
},
- {,
+ {
"EventCode": "0x2504C",
"EventName": "PM_IPTEG_FROM_MEMORY",
"BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a instruction side request"
},
- {,
+ {
"EventCode": "0x4504A",
"EventName": "PM_IPTEG_FROM_OFF_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a instruction side request"
},
- {,
+ {
"EventCode": "0x1404E",
"EventName": "PM_INST_FROM_L2MISS",
"BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x34042",
"EventName": "PM_INST_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x4E048",
"EventName": "PM_DPTEG_FROM_DL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x200F0",
"EventName": "PM_ST_CMPL",
"BriefDescription": "Stores completed from S2Q (2nd-level store queue)."
},
- {,
+ {
"EventCode": "0x4E05C",
"EventName": "PM_LSU_REJECT_LHS",
"BriefDescription": "LSU Reject due to LHS (up to 4 per cycle)"
},
- {,
+ {
"EventCode": "0x14044",
"EventName": "PM_INST_FROM_L3_NO_CONFLICT",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x3E04C",
"EventName": "PM_DPTEG_FROM_DL4",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
},
- {,
+ {
"EventCode": "0x1F15E",
"EventName": "PM_MRK_PROBE_NOP_CMPL",
"BriefDescription": "Marked probeNops completed"
},
- {,
+ {
"EventCode": "0x20018",
"EventName": "PM_ST_FWD",
"BriefDescription": "Store forwards that finished"
},
- {,
+ {
"EventCode": "0x1D142",
"EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's ECO L3 on the same chip due to a marked load"
},
- {,
+ {
"EventCode": "0x24042",
"EventName": "PM_INST_FROM_L3_MEPF",
"BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to an instruction fetch (not prefetch)"
},
- {,
+ {
"EventCode": "0x25046",
"EventName": "PM_IPTEG_FROM_RL2L3_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request"
},
- {,
+ {
"EventCode": "0x3504A",
"EventName": "PM_IPTEG_FROM_RMEM",
"BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a instruction side request"
},
- {,
+ {
"EventCode": "0x3C05A",
"EventName": "PM_CMPLU_STALL_VDPLONG",
"BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format. Qualified by NOT vector AND multicycle"
},
- {,
+ {
"EventCode": "0x2E01C",
"EventName": "PM_CMPLU_STALL_TLBIE",
"BriefDescription": "Finish stall because the NTF instruction was a tlbie waiting for response from L2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 212b117a8ffb..bc7151d639d7 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -1,352 +1,352 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index c6f9762f32c0..113d19e92678 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -1,370 +1,370 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
- "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
- "MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
index 143077c2caf4..3fba310a5012 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -1,10172 +1,9976 @@
[
{
- "EventCode": "0x24",
- "UMask": "0x21",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "PublicDescription": "Demand requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x38",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.PF_MISS",
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400028000",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "PublicDescription": "All requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400002",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xc1",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0004",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xc2",
- "BriefDescription": "RFO requests that hit L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xc4",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "BriefDescription": "Demand requests that miss L2 cache",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x24",
- "UMask": "0xd8",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.PF_HIT",
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
- "EventCode": "0x24",
- "UMask": "0xe1",
- "BriefDescription": "Demand Data Read requests",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xe2",
- "BriefDescription": "RFO requests to L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_RFO",
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040010",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xe4",
- "BriefDescription": "L2 code requests",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "PublicDescription": "Counts the total number of L2 code requests.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "PublicDescription": "Demand requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xf8",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_PF",
- "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests",
+ "BriefDescription": "L2 writebacks that access L2 cache",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "PublicDescription": "All L2 requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x2E",
- "UMask": "0x41",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "BriefDescription": "L2 cache lines filling L2",
"Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "Errata": "SKL057",
- "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1f"
},
{
- "EventCode": "0x2E",
- "UMask": "0x4f",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "Errata": "SKL057",
- "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200002",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0010",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "L1D miss outstandings duration in cycles",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020004",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040080",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000807F7",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x51",
- "UMask": "0x1",
- "BriefDescription": "L1D data line replacements",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
"Counter": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080080",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100020",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080004",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0080",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200004",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests sent to uncore",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100080",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "UMask": "0x2",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200020",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "UMask": "0x4",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "UMask": "0x8",
- "BriefDescription": "Demand and prefetch data reads",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040020",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "UMask": "0x80",
- "BriefDescription": "Any memory transaction that reached the SQ.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020490",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB2",
- "UMask": "0x1",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x11",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80028000",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x12",
- "BriefDescription": "Retired store instructions that miss the STLB.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F804007F7",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x21",
- "BriefDescription": "Retired load instructions with locked access.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100400",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x41",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020400",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x42",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040400",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x81",
- "BriefDescription": "All retired load instructions.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "All requests that miss L2 cache",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
},
{
- "EventCode": "0xD0",
- "UMask": "0x82",
- "BriefDescription": "All retired store instructions.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
"Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD1",
- "UMask": "0x1",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD1",
- "UMask": "0x2",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400490",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD1",
- "UMask": "0x4",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. ",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
- "UMask": "0x8",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0400",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD1",
- "UMask": "0x10",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
- "UMask": "0x20",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. ",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD1",
- "UMask": "0x40",
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. ",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD1",
- "UMask": "0x80",
- "BriefDescription": "Retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
- "Data_LA": "1",
- "PEBS": "1",
- "ELLC": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
- "PublicDescription": "Counts retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400490",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD2",
- "UMask": "0x1",
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD2",
- "UMask": "0x2",
"BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Data_LA": "1",
- "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD2",
- "UMask": "0x4",
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "CounterHTOff": "0,1,2,3",
"Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xD2",
- "UMask": "0x8",
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD3",
- "UMask": "0x1",
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD3",
- "UMask": "0x2",
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
- "Data_LA": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD3",
- "UMask": "0x4",
- "BriefDescription": "Retired load instructions whose data sources was remote HITM",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
- "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100122",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD3",
- "UMask": "0x8",
- "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
- "Data_LA": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0080",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD3",
- "UMask": "0x10",
- "BriefDescription": "Retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
- "Data_LA": "1",
- "PEBS": "1",
- "ELLC": "1",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
"Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
- "PublicDescription": "Counts retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD4",
- "UMask": "0x4",
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "CounterHTOff": "0,1,2,3",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xF0",
- "UMask": "0x40",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_WB",
- "PublicDescription": "Counts L2 writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF1",
- "UMask": "0x1f",
- "BriefDescription": "L2 cache lines filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xF2",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.SILENT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "UMask": "0x4",
- "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "CounterHTOff": "0,1,2,3",
"Deprecated": "1",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF4",
- "UMask": "0x10",
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Offcore": "1",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080001",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080001",
+ "BriefDescription": "Any memory transaction that reached the SQ.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200001",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200001",
+ "BriefDescription": "Retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "ELLC": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0001",
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0001",
+ "BriefDescription": "Demand Data Read requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020002",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020002",
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040002",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100002",
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100002",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080004",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080004",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100004",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200004",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020010",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00800207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200010",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0010",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x41"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01004007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080020",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080020",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00804007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100020",
+ "BriefDescription": "RFO requests that hit L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200020",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00000107F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080408000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F801007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100080",
+ "BriefDescription": "L1D data line replacements",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200080",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0080",
+ "BriefDescription": "All retired load instructions.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200100",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0100",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020400",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x41"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020400",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040400",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040400",
+ "BriefDescription": "RFO requests to L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80028000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080048000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100048000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200048000",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400048000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800048000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000048000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80048000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080088000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100088000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200088000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400088000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800088000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000088000",
+ "BriefDescription": "L2 code requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80088000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080108000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100108000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200108000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400108000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800108000",
+ "BriefDescription": "Demand requests to L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000108000",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80108000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080208000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100208000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200208000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400208000",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800208000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000208000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80208000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C8000",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C8000",
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040490",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4f"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100490",
+ "BriefDescription": "Retired load instructions that miss the STLB.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200490",
+ "BriefDescription": "Retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "ELLC": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
- "MSRValue": "0x00803C0490",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0490",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0490",
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020120",
+ "BriefDescription": "All L2 requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "All L2 requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020120",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00801007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020491",
+ "BriefDescription": "Retired load instructions with locked access.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040491",
+ "BriefDescription": "All retired store instructions.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080491",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0491",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80020122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080040122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100040122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200040122",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400040122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800040122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000040122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80040122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80080122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080100122",
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100100122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200100122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400100122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800100122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000100122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80100122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080200122",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100200122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0200200122",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd8"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0400200122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0800200122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1000200122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80200122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0122",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100408000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00800207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01000207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02000207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04000207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08000207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10000207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F800207F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00800407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01000407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02000407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04000407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08000407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10000407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F800407F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00800807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01000807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02000807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04000807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08000807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10000807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80408000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F800807F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00801007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01001007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02001007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04001007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08001007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10001007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F801007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00802007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01002007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02002007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04002007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00800407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08002007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10002007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F802007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00803C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00800807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02003C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04003C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08003C07F7",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10003C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F803C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0080",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00802007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0400",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000018000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C8000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x0000010122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C0122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.ANY_RESPONSE",
- "Deprecated": "1",
- "MSRValue": "0x00000107F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01003C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08007C07F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100408000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080408000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400491",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0100400122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0080400122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01004007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00804007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400020",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80408000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F802007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F80400122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.ANY_RESPONSE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F804007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index a382b115633d..2ba32af9bc36 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1,394 +1,412 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Access_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "L2_Evictions_Silent_PKI"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "L2_Evictions_NonSilent_PKI"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x35\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
- "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1\\\\\\,config\\=0x40433@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
- "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 1 == 1 else 0 else 0",
"BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
"MetricGroup": "Memory_Lat",
"MetricName": "MEM_PMM_Read_Latency"
},
{
- "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
"BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
"MetricGroup": "Memory_BW",
"MetricName": "PMM_Read_BW"
},
{
- "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
"BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
"MetricGroup": "Memory_BW",
"MetricName": "PMM_Write_BW"
},
{
- "MetricExpr": "cha_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cha_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
+ "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
+ "MetricGroup": "",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
index 91b38de138f2..3c0b95fd60ad 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
@@ -1,85 +1,85 @@
[
{
- "EventCode": "0xC7",
- "UMask": "0x1",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
- "UMask": "0x2",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC7",
- "UMask": "0x4",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
- "UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
- "Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xC7",
- "UMask": "0x10",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xC7",
- "UMask": "0x40",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC7",
- "UMask": "0x80",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 16 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xCA",
- "UMask": "0x1e",
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.ANY",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
"PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
index 954e64574ee2..3553472ad266 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
@@ -1,482 +1,482 @@
[
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x200206",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x300206",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x30",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
"EventName": "IDQ.MS_UOPS",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "EventName": "ICACHE_16B.IFDATA_STALL",
- "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x83",
- "UMask": "0x1",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "UMask": "0x2",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "CounterMask": "1",
- "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
- "CounterMask": "2",
- "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
- "CounterMask": "3",
- "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "CounterMask": "4",
- "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAB",
- "UMask": "0x2",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x400406",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
- "MSRIndex": "0x3F7",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x200206",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x400806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x400206",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x400206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
- "PEBS": "1",
- "MSRValue": "0x15",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. ",
- "TakenAlone": "1",
+ "MSRValue": "0x400406",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
- "PEBS": "1",
- "MSRValue": "0x14",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
- "MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
- "PEBS": "1",
- "MSRValue": "0x13",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L2_MISS",
- "MSRIndex": "0x3F7",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
- "PEBS": "1",
- "MSRValue": "0x12",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x408006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
- "PEBS": "1",
- "MSRValue": "0x11",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
- "MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. ",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x300206",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
- "MSRIndex": "0x3F7",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x100206",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
- "TakenAlone": "1",
+ "MSRValue": "0x404006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x420006",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x410006",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x410006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x408006",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
- "MSRIndex": "0x3F7",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x404006",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x401006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x402006",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x402006",
+ "PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
- "TakenAlone": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x401006",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
- "MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
"PEBS": "1",
- "MSRValue": "0x400806",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
- "TakenAlone": "1",
+ "MSRValue": "0x420006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
index dfee92596379..cc66a51c6a7b 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
@@ -1,9909 +1,9909 @@
[
{
- "EventCode": "0x54",
- "UMask": "0x1",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x2",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x4",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x8",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x10",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x20",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x06040007F7",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x54",
- "UMask": "0x40",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC2",
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000490",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC3",
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000020",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x8",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC4",
- "PublicDescription": "RTM region detected inside HLE.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC5",
- "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000400",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x10",
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000490",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x10",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000010",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x10",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00490",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000120",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "UMask": "0x10",
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC3",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "Errata": "SKL089",
- "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000100",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x1",
- "BriefDescription": "Number of times an HLE execution started.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.START",
- "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804008000",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x2",
- "BriefDescription": "Number of times an HLE execution successfully committed",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.COMMIT",
- "PublicDescription": "Number of times HLE commit succeeded.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x06040007F7",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x4",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
- "PEBS": "1",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x8",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000010",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x10",
- "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_TIMER",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000080",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x20",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000010",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x40",
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
- "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01040007F7",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC8",
- "UMask": "0x80",
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000400",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC9",
- "UMask": "0x1",
- "BriefDescription": "Number of times an RTM execution started.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.START",
- "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000004",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC9",
- "UMask": "0x2",
- "BriefDescription": "Number of times an RTM execution successfully committed",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.COMMIT",
- "PublicDescription": "Number of times RTM commit succeeded.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000010",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC9",
- "UMask": "0x4",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000491",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC9",
- "UMask": "0x8",
"BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
"PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x10",
- "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_TIMER",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x20",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x40",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x80",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "PEBS": "2",
- "MSRValue": "0x200",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "101",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "PEBS": "2",
- "MSRValue": "0x100",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "503",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "PEBS": "2",
- "MSRValue": "0x80",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "1009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "PEBS": "2",
- "MSRValue": "0x40",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "2003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "PEBS": "2",
- "MSRValue": "0x20",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "PEBS": "2",
- "MSRValue": "0x10",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "PEBS": "2",
- "MSRValue": "0x8",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "PEBS": "2",
- "MSRValue": "0x4",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000001",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000001",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000001",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000001",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000001",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000001",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000001",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F840007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000002",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000002",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000002",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000002",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000002",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000002",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000002",
+ "BriefDescription": "ALL_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000002",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000002",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000002",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000004",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000004",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000004",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000004",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000004",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000004",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000004",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000004",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000004",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B8007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000004",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00840007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000010",
+ "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000010",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000010",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F900007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000010",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000010",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000010",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000010",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000010",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000020",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000020",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000020",
+ "BriefDescription": "Number of times an HLE execution successfully committed",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000020",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000020",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000020",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000020",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000020",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000080",
+ "BriefDescription": "ALL_READS & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000080",
+ "BriefDescription": "ALL_READS & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000080",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000080",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000080",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000080",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000080",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000100",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000100",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000100",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F840007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000100",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000100",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000100",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000100",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000100",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000100",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000400",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000400",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000400",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000400",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000400",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000400",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000400",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084008000",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104008000",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204008000",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004008000",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110008000",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210008000",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410008000",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810008000",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010008000",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90008000",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC008000",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C008000",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00900007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C008000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C008000",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C008000",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC008000",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000490",
+ "BriefDescription": "ALL_RFO & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000490",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B8007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000490",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000490",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000490",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000490",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000490",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000490",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000490",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B808000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000490",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000120",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000120",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000120",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000120",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000120",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000120",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000491",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B808000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000491",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000491",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC08000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000491",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000491",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000491",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000491",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000491",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0084000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0104000122",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0204000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0404000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0804000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1004000122",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F84000122",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x0090000122",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x0110000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x0210000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0410000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0810000122",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x1010000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F90000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC000122",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C000122",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C000122",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00840007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04040007F7",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08040007F7",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F840007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00900007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x01100007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x02100007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x04100007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x08100007F7",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x10100007F7",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3F900007F7",
+ "BriefDescription": "ALL_READS & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_NONE",
- "Deprecated": "1",
- "MSRValue": "0x00BC0007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
- "Deprecated": "1",
- "MSRValue": "0x013C0007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_MISS",
- "Deprecated": "1",
- "MSRValue": "0x023C0007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x043C0007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
- "Deprecated": "1",
- "MSRValue": "0x083C0007F7",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
- "Deprecated": "1",
- "MSRValue": "0x103C0007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.ANY_SNOOP",
- "Deprecated": "1",
- "MSRValue": "0x3FBC0007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000001",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000002",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800004",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000004",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000020",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000080",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000100",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL089",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800400",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B808000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604008000",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000490",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800120",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B800122",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x0604000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x063B8007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Deprecated": "1",
- "MSRValue": "0x06040007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00001",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00002",
+ "BriefDescription": "ALL_READS & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00010",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00020",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00080",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00100",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00400",
+ "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC08000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00491",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x6"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC00122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HITM",
- "Deprecated": "1",
- "MSRValue": "0x103FC007F7",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00001",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00002",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00004",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00020",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00400",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC08000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00120",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC00122",
+ "BriefDescription": "ALL_READS & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
- "Deprecated": "1",
- "MSRValue": "0x083FC007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0084000001",
+ "BriefDescription": "ALL_RFO & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0104000001",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0204000001",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0404000001",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0804000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1004000001",
+ "BriefDescription": "ALL_READS & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F84000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0090000001",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0110000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0210000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0410000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0810000001",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1010000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F90000001",
+ "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x00BC000001",
+ "BriefDescription": "ALL_RFO & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x013C000001",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x023C000001",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x043C000001",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x083C000001",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x103C000001",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x3FBC000001",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0084000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0104000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0204000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0404000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0804000002",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1004000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F84000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0090000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0110000002",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0210000002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0410000002",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0810000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1010000002",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F90000002",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x00BC000002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x013C000002",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x023C000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x043C000002",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x083C000002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x103C000002",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x3FBC000002",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0084000004",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0104000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0204000004",
+ "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0404000004",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0804000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC08000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1004000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F84000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0090000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0110000004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0210000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0410000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0810000004",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1010000004",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F90000004",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x00BC000004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x013C000004",
+ "BriefDescription": "ALL_READS & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x023C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x043C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x083C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x103C000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x3FBC000004",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0084000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0104000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0204000010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0404000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0804000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1004000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F84000010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0090000010",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0110000010",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0210000010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0410000010",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0810000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1010000010",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F90000010",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x00BC000010",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x013C000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x023C000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x043C000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x083C000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x103C000010",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x3FBC000010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0084000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0104000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0204000020",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0404000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0804000020",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1004000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F84000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0090000020",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0110000020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0210000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0410000020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0810000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1010000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F90000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x00BC000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x013C000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x023C000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x043C000020",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x083C000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x103C000020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x3FBC000020",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0084000080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0104000080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0204000080",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0404000080",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0804000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1004000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F84000080",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0090000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0110000080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0210000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0410000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0810000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1010000080",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F90000080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x00BC000080",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & SNOOP_MISS",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x013C000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x023C000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x043C000080",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x083C000080",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x103C000080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x3FBC000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0084000100",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0104000100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0204000100",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0404000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0804000100",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1004000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F84000100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0090000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0110000100",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0210000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0410000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0810000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1010000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F90000100",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x00BC000100",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x013C000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x023C000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x043C000100",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x083C000100",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x103C000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x3FBC000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0084000400",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0104000400",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0204000400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0404000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0804000400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1004000400",
+ "BriefDescription": "ALL_RFO & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F84000400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0090000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0110000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0210000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0410000400",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0810000400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1010000400",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "ALL_RFO & L3_MISS & HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F90000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x00BC000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x013C000400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x023C000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x043C000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x083C000400",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x103C000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x3FBC000400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0084008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0104008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0204008000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0404008000",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0804008000",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1004008000",
+ "BriefDescription": "ALL_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F84008000",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0090008000",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0110008000",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0210008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0410008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0810008000",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1010008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F90008000",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x00BC008000",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x013C008000",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x023C008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x043C008000",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x083C008000",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x103C008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x3FBC008000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0084000490",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0104000490",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & REMOTE_HITM",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC08000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0204000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0404000490",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0804000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1004000490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F84000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0090000490",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0110000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0210000490",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0410000490",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0810000490",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1010000490",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F90000490",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00BC000490",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x013C000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC08000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x023C000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x043C000490",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x083C000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x103C000490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0084000120",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0104000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0204000120",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0404000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0804000120",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1004000120",
+ "BriefDescription": "Number of times an RTM execution successfully committed",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F84000120",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0090000120",
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0110000120",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0210000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0410000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0810000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1010000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F900007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F90000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00BC000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x013C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x023C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x043C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x083C000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x103C000120",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000120",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0084000491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0104000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0204000491",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0404000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0804000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1004000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F84000491",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0090000491",
+ "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0110000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0210000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0410000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0810000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1010000491",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F90000491",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00BC000491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x013C000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x023C000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x043C000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x083C000491",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x103C000491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0084000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0104000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0204000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0404000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0804000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1004000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F84000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0090000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0110000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0210000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0410000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0810000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1010000122",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F90000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00BC000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x013C000122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x023C000122",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x043C000122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x083C000122",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x103C000122",
+ "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01100007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00840007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02040007F7",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00900007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F840007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00900007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00BC000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01100007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02100007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04100007F7",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08100007F7",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10100007F7",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F900007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00BC0007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x013C0007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x023C0007F7",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x043C0007F7",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08040007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x083C0007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x103C0007F7",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC0007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x063B800001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0604000001",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x063B800002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0084000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0604000002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x063B800004",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0604000004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x013C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x063B800010",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0604000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0604000010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x063B800020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0604000020",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x063B800080",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0604000080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x063B800100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0604000100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x063B800120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x063B800400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0604000400",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x063B808000",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0604008000",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000490",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000120",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000491",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000122",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0104000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B8007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x06040007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x103FC00001",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x043C000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x103FC00002",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x103FC00004",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0804000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x103FC00010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x103FC00020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0210000122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x103FC00080",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810000001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x103FC00100",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F90000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x103FC00400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0090000490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x103FC08000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0110000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00120",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00491",
+ "BriefDescription": "Number of times an HLE execution started.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00122",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC007F7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HITM",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083FC00004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x083FC00001",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x083FC00002",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C000120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x083FC00004",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0410000400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x083FC00010",
+ "BriefDescription": "ALL_READS & L3_MISS & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x083FC00020",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0404000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x083FC00080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x083FC00100",
+ "BriefDescription": "Counts any other requests OTHER & L3_MISS & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x083FC00400",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0810008000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x083FC08000",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & REMOTE_HITM",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00490",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0204000010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00120",
+ "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00840007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00491",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00122",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x023C000100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC007F7",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "ALL_READS & L3_MISS & HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x083C0007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
index 73e27c48bd6e..05d13d53c374 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -1,8908 +1,8662 @@
[
{
- "EventCode": "0x09",
- "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x28",
- "UMask": "0x7",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
- "Counter": "0,1,2,3",
- "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x28",
- "UMask": "0x18",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
- "Counter": "0,1,2,3",
- "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x28",
- "UMask": "0x20",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
- "Counter": "0,1,2,3",
- "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x28",
- "UMask": "0x40",
- "BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
- "Counter": "0,1,2,3",
- "EventName": "CORE_POWER.THROTTLE",
- "PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x1",
- "BriefDescription": "Number of PREFETCHNTA instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x2",
- "BriefDescription": "Number of PREFETCHT0 instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x4",
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x8",
- "BriefDescription": "Number of PREFETCHW instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCB",
- "UMask": "0x1",
- "BriefDescription": "Number of hardware interrupts received by the processor.",
- "Counter": "0,1,2,3",
- "EventName": "HW_INTERRUPTS.RECEIVED",
- "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
- "SampleAfterValue": "203",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x1",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x2",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x4",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x8",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x10",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x20",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xEF",
- "UMask": "0x40",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xFE",
- "UMask": "0x2",
- "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventName": "IDI_MISC.WB_UPGRADE",
- "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xFE",
- "UMask": "0x4",
- "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventName": "IDI_MISC.WB_DOWNGRADE",
- "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0080020001",
- "Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0100020001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0200020001",
+ "BriefDescription": "ALL_READS & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0400020001",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0800020001",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1000020001",
+ "BriefDescription": "ALL_READS & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F80020001",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0080040001",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0100040001",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0200040001",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0400040001",
+ "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0800040001",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1000040001",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F80040001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0080080001",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0100080001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0200080001",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0400080001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0800080001",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1000080001",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F80080001",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0080100001",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0100100001",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0200100001",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0400100001",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0800100001",
+ "BriefDescription": "ALL_READS & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1000100001",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F80100001",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0080200001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0100200001",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x0200200001",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0400200001",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0800200001",
+ "BriefDescription": "Counts any other requests OTHER & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80408000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x1000200001",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F80200001",
+ "BriefDescription": "ALL_READS & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F804007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x00803C0001",
+ "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_DOWNGRADE",
+ "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x01003C0001",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x02003C0001",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x04003C0001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x08003C0001",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x10003C0001",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x3F803C0001",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0080020002",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0100020002",
+ "BriefDescription": "ALL_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0200020002",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0400020002",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0800020002",
+ "BriefDescription": "ALL_READS & L3_HIT_F & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1000020002",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F80020002",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0080040002",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0100040002",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0200040002",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0400040002",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0800040002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1000040002",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F80040002",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0080080002",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0100080002",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0200080002",
+ "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0400080002",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0800080002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1000080002",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F80080002",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0080100002",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0100100002",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0200100002",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0400100002",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0800100002",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1000100002",
+ "BriefDescription": "ALL_RFO & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F80100002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0080200002",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0100200002",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x0200200002",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0400200002",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0800200002",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x1000200002",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F80200002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x00803C0002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x01003C0002",
+ "BriefDescription": "ALL_READS & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x02003C0002",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x04003C0002",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x08003C0002",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x10003C0002",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x3F803C0002",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0080020004",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0100020004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0200020004",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0400020004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0800020004",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1000020004",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F80020004",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0080040004",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0100040004",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0200040004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0400040004",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0800040004",
+ "BriefDescription": "ALL_RFO & L3_HIT & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1000040004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F80040004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0080080004",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0100080004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0200080004",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0400080004",
+ "BriefDescription": "ALL_PF_DATA_RD & ANY_RESPONSE have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0800080004",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1000080004",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F80080004",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0080100004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0100100004",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0200100004",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0400100004",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0800100004",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1000100004",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F80100004",
+ "BriefDescription": "ALL_READS & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0080200004",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0100200004",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x0200200004",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0400200004",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0800200004",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x1000200004",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F80200004",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x00803C0004",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x01003C0004",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x02003C0004",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x04003C0004",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x08003C0004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x10003C0004",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD TBD",
- "MSRValue": "0x3F803C0004",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0080020010",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0100020010",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0200020010",
+ "BriefDescription": "ALL_RFO & ANY_RESPONSE have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0400020010",
+ "BriefDescription": "Counts all demand code reads have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0800020010",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1000020010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F80020010",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0080040010",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0100040010",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0200040010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0400040010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0800040010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1000040010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F80040010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0080080010",
+ "BriefDescription": "ALL_READS & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0100080010",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0200080010",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0400080010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0800080010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1000080010",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F80080010",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0080100010",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0100100010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0200100010",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0400100010",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0800100010",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1000100010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F80100010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0080200010",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0100200010",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x0200200010",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0400200010",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00800207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0800200010",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x1000200010",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F80200010",
+ "BriefDescription": "ALL_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x00803C0010",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x01003C0010",
+ "BriefDescription": "ALL_READS & L3_HIT_E & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x02003C0010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x04003C0010",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x08003C0010",
+ "BriefDescription": "ALL_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x10003C0010",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x3F803C0010",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0080020020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0100020020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0200020020",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0400020020",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0800020020",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1000020020",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F80020020",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0080040020",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0100040020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0200040020",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0400040020",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0800040020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1000040020",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F80040020",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0080080020",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0100080020",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0200080020",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0400080020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0800080020",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1000080020",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F80080020",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0080100020",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0100100020",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0200100020",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0400100020",
+ "BriefDescription": "ALL_PF_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0800100020",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1000100020",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F80100020",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0080200020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0100200020",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x0200200020",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0400200020",
+ "BriefDescription": "ALL_READS & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0800200020",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x1000200020",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F80200020",
+ "BriefDescription": "ALL_PF_RFO & ANY_RESPONSE have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x00803C0020",
+ "BriefDescription": "ALL_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x01003C0020",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x02003C0020",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x04003C0020",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x08003C0020",
+ "BriefDescription": "ALL_READS & L3_HIT_M & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x10003C0020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x3F803C0020",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0080020080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0100020080",
+ "BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.THROTTLE",
+ "PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0200020080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0400020080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0800020080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1000020080",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F80020080",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0080040080",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0100040080",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0200040080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0400040080",
+ "BriefDescription": "Counts demand data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0800040080",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1000040080",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F80040080",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0080080080",
+ "BriefDescription": "Counts any other requests have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0100080080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0200080080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0400080080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0800080080",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1000080080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F80080080",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0080100080",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0100100080",
+ "BriefDescription": "Counts demand data reads have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0200100080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0400100080",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0800100080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1000100080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F80100080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0080200080",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0100200080",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x0200200080",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0400200080",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0800200080",
+ "BriefDescription": "ALL_DATA_RD & ANY_RESPONSE have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x1000200080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F80200080",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x00803C0080",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x01003C0080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x02003C0080",
+ "BriefDescription": "ALL_PF_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x04003C0080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x08003C0080",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x10003C0080",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x3F803C0080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0080020100",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0100020100",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0200020100",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0400020100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0800020100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "MSRValue": "0x0800020100",
"Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1000020100",
- "Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F80020100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
+ "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0080040100",
- "Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0100040100",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0200040100",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0400040100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0800040100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1000040100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F80040100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0080080100",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0100080100",
+ "BriefDescription": "ALL_READS & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0200080100",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0400080100",
+ "BriefDescription": "ALL_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0800080100",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1000080100",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F80080100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0080100100",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0100100100",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0200100100",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0400100100",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0800100100",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1000100100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F80100100",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0080200100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0100200100",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x0200200100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0400200100",
+ "BriefDescription": "ALL_READS & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0800200100",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x1000200100",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F80200100",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x00803C0100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x01003C0100",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x02003C0100",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x04003C0100",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x08003C0100",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x10003C0100",
+ "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_UPGRADE",
+ "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x3F803C0100",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0080020400",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0100020400",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0200020400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0400020400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0800020400",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1000020400",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F80020400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0080040400",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0100040400",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0200040400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0400040400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0800040400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1000040400",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F80040400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0080080400",
+ "BriefDescription": "ALL_READS & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0100080400",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0200080400",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0400080400",
+ "BriefDescription": "ALL_READS & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0800080400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1000080400",
+ "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F80080400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0080100400",
+ "BriefDescription": "Counts any other requests OTHER & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080408000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0100100400",
+ "BriefDescription": "ALL_READS & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0200100400",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0400100400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0800100400",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1000100400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F80100400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0080200400",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0100200400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x0200200400",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0400200400",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0800200400",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x1000200400",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F80200400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x00803C0400",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x01003C0400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x02003C0400",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x04003C0400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x08003C0400",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x10003C0400",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x3F803C0400",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0080028000",
+ "BriefDescription": "Counts any other requests OTHER & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100408000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0100028000",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0200028000",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0400028000",
+ "BriefDescription": "ALL_READS & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0800028000",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1000028000",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F80028000",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0080048000",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0100048000",
+ "BriefDescription": "ALL_READS & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0200048000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0400048000",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0800048000",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1000048000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F80048000",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts any other requests",
- "MSRValue": "0x0080088000",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0100088000",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0200088000",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0400088000",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0800088000",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1000088000",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F80088000",
+ "BriefDescription": "ALL_READS & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0080108000",
+ "BriefDescription": "ALL_READS & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00804007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0100108000",
+ "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0200108000",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0400108000",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0800108000",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1000108000",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F80108000",
+ "BriefDescription": "ALL_READS & L3_HIT & SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0080208000",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0100208000",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x0200208000",
+ "BriefDescription": "ALL_READS & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0400208000",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0800208000",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x1000208000",
+ "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F80208000",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x00803C8000",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x01003C8000",
+ "BriefDescription": "ALL_READS & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x02003C8000",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x04003C8000",
+ "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x08003C8000",
+ "BriefDescription": "ALL_READS & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x10003C8000",
+ "BriefDescription": "ALL_READS & L3_HIT & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C07F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD TBD",
- "MSRValue": "0x3F803C8000",
+ "BriefDescription": "ALL_READS & ANY_RESPONSE have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00000107F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080020490",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100020490",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200020490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400020490",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800020490",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000020490",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80020490",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080040490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100040490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200040490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400040490",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800040490",
+ "BriefDescription": "ALL_PF_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000040490",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80040490",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080080490",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100080490",
+ "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800028000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200080490",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400080490",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800080490",
+ "BriefDescription": "ALL_READS & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000080490",
+ "BriefDescription": "ALL_READS & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F802007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80080490",
+ "BriefDescription": "ALL_READS & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080100490",
+ "BriefDescription": "ALL_READS & L3_HIT_S & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00801007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100100490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200100490",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400100490",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800100490",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000100490",
+ "BriefDescription": "ALL_READS & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80100490",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080200490",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100200490",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200200490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400200490",
+ "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800200490",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000200490",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80200490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00803C0490",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0490",
+ "BriefDescription": "ALL_READS & L3_HIT_E & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00800807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x02003C0490",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0490",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x08003C0490",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0490",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0490",
+ "BriefDescription": "ALL_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080020120",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100020120",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200020120",
+ "BriefDescription": "ALL_READS & L3_HIT_M & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00800407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400020120",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800020120",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000020120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80020120",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080040120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100040120",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200040120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400040120",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800040120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000040120",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80040120",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080080120",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100080120",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200080120",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400080120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800080120",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000080120",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80080120",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080100120",
+ "BriefDescription": "ALL_READS & L3_HIT_F & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00802007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100100120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200100120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400100120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800100120",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000100120",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80100120",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080200120",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100200120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200200120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400200120",
+ "BriefDescription": "ALL_PF_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800200120",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000200120",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80200120",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00803C0120",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0120",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x02003C0120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0120",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x08003C0120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0120",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080020491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100020491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200020491",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400020491",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_E & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800020491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000020491",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80020491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080040491",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100040491",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200040491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400040491",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800040491",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000040491",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80040491",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080080491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100080491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200080491",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400080491",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800080491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000080491",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80080491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080100491",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100100491",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200100491",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400100491",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800100491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000100491",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80100491",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080200491",
+ "BriefDescription": "ALL_READS & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000807F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100200491",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200200491",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400200491",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800200491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000200491",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80200491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00803C0491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x02003C0491",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0491",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x08003C0491",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0491",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0491",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080020122",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100020122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200020122",
+ "BriefDescription": "ALL_READS & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01004007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400020122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800020122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000020122",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80020122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080040122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100040122",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200040122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400040122",
+ "BriefDescription": "ALL_READS & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000407F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800040122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000040122",
+ "BriefDescription": "ALL_READS & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000207F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80040122",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080080122",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100080122",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200080122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400080122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800080122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000080122",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80080122",
+ "BriefDescription": "Counts any other requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080100122",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100100122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200100122",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400100122",
+ "BriefDescription": "ALL_RFO & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800100122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000100122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80100122",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200088000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0080200122",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100200122",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x0200200122",
+ "BriefDescription": "ALL_RFO & L3_HIT_M & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0400200122",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0800200122",
+ "BriefDescription": "ALL_PF_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x1000200122",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80200122",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00803C0122",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0122",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x02003C0122",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0122",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x08003C0122",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0122",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0122",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00800207F7",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01000207F7",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02000207F7",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_M & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200040120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04000207F7",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08000207F7",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10000207F7",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F800207F7",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00800407F7",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01000407F7",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02000407F7",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04000407F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08000407F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10000407F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F800407F7",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00800807F7",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01000807F7",
+ "BriefDescription": "ALL_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02000807F7",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04000807F7",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x18"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08000807F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10000807F7",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F800807F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00801007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01001007F7",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080400001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02001007F7",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04001007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08001007F7",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10001007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F801007F7",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x00802007F7",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01002007F7",
+ "BriefDescription": "ALL_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x02002007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x04002007F7",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x08002007F7",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x10002007F7",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F802007F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00803C07F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C07F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x02003C07F7",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C07F7",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000108000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x08003C07F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C07F7",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C07F7",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads have any response type.",
- "MSRValue": "0x0000010001",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0001",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x08007C0001",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
- "MSRValue": "0x0000010002",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "MSRValue": "0x0000010002",
"Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x08007C0002",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads have any response type.",
- "MSRValue": "0x0000010004",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0004",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads",
- "MSRValue": "0x08007C0004",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400020002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
- "MSRValue": "0x0000010010",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0010",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x08007C0010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
- "MSRValue": "0x0000010020",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_E & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0020",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x08007C0020",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
- "MSRValue": "0x0000010080",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0080",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x08007C0080",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400048000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
- "MSRValue": "0x0000010100",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0100",
+ "BriefDescription": "Counts any other requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x08007C0100",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00803C0490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
- "MSRValue": "0x0000010400",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0400",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x08007C0400",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests have any response type.",
- "MSRValue": "0x0000018000",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C8000",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800200491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests",
- "MSRValue": "0x08007C8000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010490",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0490",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0490",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010120",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0120",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0120",
+ "BriefDescription": "Counts demand data reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200020001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0491",
+ "BriefDescription": "ALL_READS & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04002007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0491",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010122",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08007C0020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C0122",
+ "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0122",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x00000107F7",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080020400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003C07F7",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C07F7",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0100400001",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_F & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0080400001",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0100400002",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0080400002",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0100400004",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x0080400004",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C8000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0100400010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0080400010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200100100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0100400020",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0080400020",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0100400080",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0080400080",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100200004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0100400100",
+ "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800040004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0080400100",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT_S & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0100400400",
+ "BriefDescription": "Counts all demand code reads",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080100004",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0080400400",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0100408000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x0080408000",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200200400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100400490",
+ "BriefDescription": "ALL_RFO & L3_HIT_E & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080080122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0080400490",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100400120",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100100020",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0080400120",
+ "BriefDescription": "ALL_RFO & L3_HIT_F & SNOOP_NONE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100400491",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0080400491",
+ "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100040001",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0100400122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800100010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0080400122",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x01004007F7",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080490",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x00804007F7",
+ "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & NO_SNOOP_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100208000",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x3F80400001",
+ "BriefDescription": "ALL_READS & L3_HIT_S & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F801007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x3F80400002",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200080002",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads TBD",
- "MSRValue": "0x3F80400004",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x3F80400010",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x3F80400020",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x3F80400080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x3F80400100",
+ "BriefDescription": "ALL_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400122",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x3F80400400",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080100",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests TBD",
- "MSRValue": "0x3F80408000",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & ANY_SNOOP",
"Counter": "0,1,2,3",
- "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040080",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80400490",
+ "BriefDescription": "ALL_PF_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400120",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80400120",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800020491",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80400491",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & HIT_OTHER_CORE_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080400",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F80400122",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400200010",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x3F804007F7",
+ "BriefDescription": "ALL_READS & L3_HIT_S & SNOOP_MISS",
"Counter": "0,1,2,3",
- "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02001007F7",
+ "Offcore": "1",
"PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index 5b7df05f900c..5ec668f46ac1 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -1,969 +1,969 @@
[
{
- "EventCode": "0x00",
- "UMask": "0x1",
- "BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 0",
- "EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "UMask": "0x4"
},
{
- "EventCode": "0x00",
- "UMask": "0x3",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "UMask": "0x20"
},
{
- "EventCode": "0x03",
- "UMask": "0x2",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "10",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x03",
- "UMask": "0x8",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x07",
- "UMask": "0x1",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "BriefDescription": "Cycles without actually retired uops.",
"Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
- "UMask": "0x1",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
- "UMask": "0x1",
+ "AnyThread": "1",
"BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x0D",
- "UMask": "0x80",
- "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
+ "CounterHTOff": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0E",
- "UMask": "0x2",
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "AnyThread": "1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x0E",
- "UMask": "0x20",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x14",
- "UMask": "0x1",
"BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
"Counter": "0,1,2,3",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "CounterMask": "1",
- "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
+ "CounterHTOff": "Fixed counter 0",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Conditional branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "25003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "25003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "25003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "25003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "25003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "25003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x5"
},
{
- "EventCode": "0x4C",
- "UMask": "0x1",
- "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x59",
- "UMask": "0x1",
- "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
- "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x87",
- "UMask": "0x1",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "All (macro) branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA1",
"EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xa2",
- "UMask": "0x1",
- "BriefDescription": "Resource-related stall cycles",
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "Counts resource-related stall cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA2",
- "UMask": "0x8",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.SB",
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "BriefDescription": "Taken branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x10",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "BriefDescription": "Resource-related stall cycles",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "16",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x14",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterHTOff": "0,1,2,3",
"CounterMask": "20",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x14"
},
{
- "EventCode": "0xA6",
- "UMask": "0x1",
"BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA6",
- "UMask": "0x2",
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA6",
- "UMask": "0x4",
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0xA6",
- "UMask": "0x8",
- "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
- "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA6",
- "UMask": "0x10",
- "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
- "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA6",
- "UMask": "0x40",
- "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Number of Uops delivered by the LSD.",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
"Counter": "0,1,2,3",
- "EventName": "LSD.UOPS",
- "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "MSRValue": "0x00",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
"Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
"BriefDescription": "Cycles where at least 3 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE",
- "PublicDescription": "Number of uops executed from any thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "CounterMask": "1",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Counter": "1",
+ "CounterHTOff": "1",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.X87",
- "PublicDescription": "Counts the number of x87 uops executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "SKL091, SKL044",
- "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3f"
},
{
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "PEBS": "2",
- "Counter": "1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "Errata": "SKL091, SKL044",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "1"
+ "UMask": "0x40"
},
{
- "Invert": "1",
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
- "PEBS": "2",
- "Counter": "0,2,3",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
- "CounterMask": "10",
- "Errata": "SKL091, SKL044",
- "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC1",
- "UMask": "0x3f",
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
"Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.ANY",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles without actually retired uops.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC2",
- "UMask": "0x2",
"BriefDescription": "Retirement slots used.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PublicDescription": "Counts the retirement slots used.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "UMask": "0x4",
- "BriefDescription": "Self-modifying code (SMC) detected.",
+ "BriefDescription": "Return instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "Errata": "SKL091",
- "PublicDescription": "Counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC4",
- "UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
- "PEBS": "1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "Errata": "SKL091",
- "PublicDescription": "This event counts conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "Errata": "SKL091",
- "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "Counter": "0,2,3",
+ "CounterHTOff": "0,2,3",
+ "CounterMask": "10",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired.",
- "PEBS": "2",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
- "PEBS": "1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "Errata": "SKL091",
- "PublicDescription": "This event counts return instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x10",
- "BriefDescription": "Not taken branch instructions retired.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "CounterHTOff": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "Errata": "SKL091",
- "PublicDescription": "This event counts not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
- "PEBS": "1",
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "Errata": "SKL091",
- "PublicDescription": "This event counts taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC4",
- "UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
- "PEBS": "1",
+ "AnyThread": "1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "Errata": "SKL091",
- "PublicDescription": "This event counts far branch instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "CounterHTOff": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0xC5",
- "UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC5",
- "UMask": "0x2",
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
- "PEBS": "1",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC5",
- "UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "PEBS": "2",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xC5",
- "UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "PEBS": "1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "SampleAfterValue": "100007"
+ },
+ {
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "UMask": "0x40",
- "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "BriefDescription": "Number of uops executed on the core.",
"Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xE6",
- "UMask": "0x1",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index 22df833fe032..3fb5cdce842f 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -74,6 +74,22 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE3",
@@ -103,6 +119,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
@@ -113,5 +138,171 @@
"ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS Commands issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "All commands for Intel Optane DC persistent memory",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Regular reads(RPQ) commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RD",
+ "PerPkg": "1",
+ "PublicDescription": "All Reads - RPQ or Ufill",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill read commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Underfill reads",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Writes",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of read requests allocated into the Read Pending Queue (RPQ). This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. The requests deallocate after the read CAS command has been issued to DRAM. This event counts both Isochronous and non-Isochronous requests which were issued to the RPQ.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries in the Read Pending Queue (RPQ) at each cycle. This can then be used to calculate both the average occupancy of the queue (in conjunction with the number of cycles not empty) and the average latency in the queue (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate from the RPQ after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Check; Hit",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All Clean line misses to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Check; Clean",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All dirty line misses to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Check; Dirty",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of writes requests allocated into the Write Pending Queue (WPQ). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (Memory Controller). The write requests deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts???",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
index cab355872dff..df355ba7acc8 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
@@ -119,47 +119,15 @@
"EventName": "UPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
"ScaleUnit": "7.11E-06Bytes",
- "UMask": "0x0F",
+ "UMask": "0xf",
"Unit": "UPI LL"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_READ",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
"Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"FCMask": "0x07",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
"PerPkg": "1",
"PortMask": "0x01",
"ScaleUnit": "4Bytes",
@@ -203,13 +171,26 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
"Counter": "0,1",
"EventCode": "0x83",
"EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
"FCMask": "0x07",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
"PerPkg": "1",
"PortMask": "0x01",
"ScaleUnit": "4Bytes",
@@ -251,5 +232,1539 @@
"ScaleUnit": "4Bytes",
"UMask": "0x04",
"Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspI Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to it's home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to it's home socket to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This reponse will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part1 to the MMIO space of an IIO target.In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts reads in which direct to Intel Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel Ultra Path Interconnect (bypassing the CHA) was disabled",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel UPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when messages were sent direct to the Intel Ultra Path Interconnect (bypassing the CHA)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a read message that was sent direct to the Intel Ultra Path Interconnect (bypassing the CHA) was overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to I (Invalid)",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to S (Shared)",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to A (SnoopAll)",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to S (Shared)",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to A (SnoopAll)",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to I (Invalid)",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller).",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Read requests to Intel Optane DC persistent memory issued to the iMC from M2M",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Writes to iMC issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write requests to Intel Optane DC persistent memory issued to the iMC from M2M",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefecth requests that got turn into a demand request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefecth queue is made of CAM (Content Addressable Memory)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) recieves a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "AD Ingress (from CMS) Occupancy",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Ingress (from CMS) Allocations",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "BL Ingress (from CMS) Occupancy",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Read Hit from NearMem, Dirty Line",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Clean Line",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Dirty Line",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress (to CMS) Allocations",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress (to CMS) Occupancy",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress (to CMS) Allocations; All",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress (to CMS) Occupancy; All",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
+ "Counter": "0,1,2",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where flow control queue that sits between the Intel Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to Intel UPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel Ultra Path Interconnect (UPI) bypassing the CHA .",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the Intel Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the the receive side (Rx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Recieve Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) whcih bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid data FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs transmitted from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel Ulra Path Interconnect (UPI) slots on this UPI unit.",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Idle FLITs transmitted",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
index 579733168e23..d13b4111eb52 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
@@ -1,285 +1,284 @@
[
{
- "EventCode": "0x08",
- "UMask": "0x1",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x08",
- "UMask": "0x2",
- "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x08",
- "UMask": "0x4",
- "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
},
{
- "EventCode": "0x08",
- "UMask": "0x8",
- "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x08",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "BriefDescription": "STLB flush attempts",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
- "EventCode": "0x08",
- "UMask": "0x10",
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x08",
- "UMask": "0x20",
- "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x49",
- "UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x49",
- "UMask": "0x2",
- "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
"Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
- "UMask": "0x4",
- "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x49",
- "UMask": "0x8",
- "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
"Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4F",
+ "EventName": "EPT.WALK_PENDING",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
- "UMask": "0x10",
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x49",
- "UMask": "0x20",
"BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x4F",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
- "EventName": "EPT.WALK_PENDING",
- "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
},
{
- "EventCode": "0x85",
- "UMask": "0x1",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x85",
- "UMask": "0x2",
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x85",
- "UMask": "0x4",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x8",
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
- "MSRValue": "0x00",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
- "CounterMask": "1",
- "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x85",
- "UMask": "0x20",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xAE",
- "UMask": "0x1",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"Counter": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xBD",
- "UMask": "0x1",
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
"Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBD",
- "UMask": "0x20",
- "BriefDescription": "STLB flush attempts",
- "Counter": "0,1,2,3",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 21b27488b621..c80f16fde6d0 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -1,322 +1,322 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index e5aac148c941..e501729c3dd1 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -1,340 +1,340 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
- "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
- "MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index bc4d5fc284a0..e2446966b651 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -1,340 +1,340 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index f3874b5f9995..9294769dec64 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -1,346 +1,346 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index 98c73e430b05..603ff9c2e9a1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -1,232 +1,232 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "cbox_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index cfeba5067bab..c6b485b3a2cb 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -1,226 +1,226 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 2c95417a4dae..0ca539bb60f6 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1,364 +1,370 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Access_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
+ "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
+ "MetricGroup": "",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 35b255fa6a79..047d7e11aa6f 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1,376 +1,394 @@
[
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound"
+ "MetricName": "Frontend_Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
},
{
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT"
+ "MetricName": "Frontend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation"
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
},
{
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT"
+ "MetricName": "Bad_Speculation_SMT",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
"MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound"
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
},
{
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT"
+ "MetricName": "Backend_Bound_SMT",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
"MetricGroup": "TopdownL1",
- "MetricName": "Retiring"
+ "MetricName": "Retiring",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
"MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT"
+ "MetricName": "Retiring_SMT",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"BriefDescription": "Uops Per Instruction",
- "MetricGroup": "Pipeline;Retiring",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Retire",
"MetricName": "UPI"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Instruction per taken branch",
- "MetricGroup": "Branches;PGO",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fetch_BW;PGO",
"MetricName": "IpTB"
},
{
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;PGO",
"MetricName": "BpTB"
},
{
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
"BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricGroup": "PGO",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "MetricGroup": "PGO;IcMiss",
"MetricName": "IFetch_Line_Utilization"
},
{
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricGroup": "DSB;Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fetch_BW",
"MetricName": "DSB_Coverage"
},
{
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * cycles",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
"MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1_SMT",
"MetricName": "SLOTS_SMT"
},
{
+ "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
- "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
- "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpL"
},
{
+ "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
- "BriefDescription": "Instructions per Store",
- "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricGroup": "Instruction_Type",
"MetricName": "IpS"
},
{
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Instructions per Branch",
- "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricGroup": "Branches;Instruction_Type",
"MetricName": "IpB"
},
{
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "BriefDescription": "Instruction per (near) call",
"MetricGroup": "Branches",
"MetricName": "IpCall"
},
{
- "MetricExpr": "INST_RETIRED.ANY",
"BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
- "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "SMT",
"MetricName": "CoreIPC_SMT"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
"MetricGroup": "FLOPS",
"MetricName": "FLOPc"
},
{
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
"MetricGroup": "FLOPS_SMT",
"MetricName": "FLOPc_SMT"
},
{
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Pipeline",
"MetricName": "ILP"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricGroup": "BrMispredicts",
"MetricName": "Branch_Misprediction_Cost"
},
{
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
"MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
- "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricGroup": "BrMispredicts_SMT",
"MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricGroup": "Branch_Mispredicts",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "BrMispredicts",
"MetricName": "IpMispredict"
},
{
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
"MetricGroup": "TLB_SMT",
"MetricName": "Page_Walks_Utilization_SMT"
},
{
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L2_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Fill_BW"
},
{
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "L3_Cache_Access_BW"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L1MPKI"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI"
},
{
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2MPKI_All"
},
{
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L2HPKI_All"
},
{
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricGroup": "Cache_Misses;",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Cache_Misses",
"MetricName": "L3MPKI"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "L2_Evictions_Silent_PKI"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "L2_Evictions_NonSilent_PKI"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_BW_Use"
},
{
- "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x35\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
- "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,config\\=0x40433@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1\\\\\\,config\\=0x40433@",
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
- "MetricExpr": "cha_0@event\\=0x0@",
"BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cha_0@event\\=0x0@",
"MetricGroup": "",
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
+ "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
+ "MetricGroup": "",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
+ "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
+ "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
+ "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
+ "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
+ "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index e2837260ca4d..079c77b6a2fd 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -322,7 +322,8 @@ static int print_events_table_entry(void *data, char *name, char *event,
char *desc, char *long_desc,
char *pmu, char *unit, char *perpkg,
char *metric_expr,
- char *metric_name, char *metric_group)
+ char *metric_name, char *metric_group,
+ char *deprecated)
{
struct perf_entry_data *pd = data;
FILE *outfp = pd->outfp;
@@ -354,6 +355,8 @@ static int print_events_table_entry(void *data, char *name, char *event,
fprintf(outfp, "\t.metric_name = \"%s\",\n", metric_name);
if (metric_group)
fprintf(outfp, "\t.metric_group = \"%s\",\n", metric_group);
+ if (deprecated)
+ fprintf(outfp, "\t.deprecated = \"%s\",\n", deprecated);
fprintf(outfp, "},\n");
return 0;
@@ -371,6 +374,7 @@ struct event_struct {
char *metric_expr;
char *metric_name;
char *metric_group;
+ char *deprecated;
};
#define ADD_EVENT_FIELD(field) do { if (field) { \
@@ -398,6 +402,7 @@ struct event_struct {
op(metric_expr); \
op(metric_name); \
op(metric_group); \
+ op(deprecated); \
} while (0)
static LIST_HEAD(arch_std_events);
@@ -416,7 +421,8 @@ static void free_arch_std_events(void)
static int save_arch_std_events(void *data, char *name, char *event,
char *desc, char *long_desc, char *pmu,
char *unit, char *perpkg, char *metric_expr,
- char *metric_name, char *metric_group)
+ char *metric_name, char *metric_group,
+ char *deprecated)
{
struct event_struct *es;
@@ -479,7 +485,8 @@ static int
try_fixup(const char *fn, char *arch_std, char **event, char **desc,
char **name, char **long_desc, char **pmu, char **filter,
char **perpkg, char **unit, char **metric_expr, char **metric_name,
- char **metric_group, unsigned long long eventcode)
+ char **metric_group, unsigned long long eventcode,
+ char **deprecated)
{
/* try to find matching event from arch standard values */
struct event_struct *es;
@@ -507,7 +514,8 @@ int json_events(const char *fn,
char *long_desc,
char *pmu, char *unit, char *perpkg,
char *metric_expr,
- char *metric_name, char *metric_group),
+ char *metric_name, char *metric_group,
+ char *deprecated),
void *data)
{
int err;
@@ -536,6 +544,7 @@ int json_events(const char *fn,
char *metric_expr = NULL;
char *metric_name = NULL;
char *metric_group = NULL;
+ char *deprecated = NULL;
char *arch_std = NULL;
unsigned long long eventcode = 0;
struct msrmap *msr = NULL;
@@ -614,6 +623,8 @@ int json_events(const char *fn,
addfield(map, &unit, "", "", val);
} else if (json_streq(map, field, "PerPkg")) {
addfield(map, &perpkg, "", "", val);
+ } else if (json_streq(map, field, "Deprecated")) {
+ addfield(map, &deprecated, "", "", val);
} else if (json_streq(map, field, "MetricName")) {
addfield(map, &metric_name, "", "", val);
} else if (json_streq(map, field, "MetricGroup")) {
@@ -658,12 +669,14 @@ int json_events(const char *fn,
err = try_fixup(fn, arch_std, &event, &desc, &name,
&long_desc, &pmu, &filter, &perpkg,
&unit, &metric_expr, &metric_name,
- &metric_group, eventcode);
+ &metric_group, eventcode,
+ &deprecated);
if (err)
goto free_strings;
}
err = func(data, name, real_event(name, event), desc, long_desc,
- pmu, unit, perpkg, metric_expr, metric_name, metric_group);
+ pmu, unit, perpkg, metric_expr, metric_name,
+ metric_group, deprecated);
free_strings:
free(event);
free(desc);
@@ -673,6 +686,7 @@ free_strings:
free(pmu);
free(filter);
free(perpkg);
+ free(deprecated);
free(unit);
free(metric_expr);
free(metric_name);
@@ -758,6 +772,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
char *line, *p;
int line_num;
char *tblname;
+ int ret = 0;
pr_info("%s: Processing mapfile %s\n", prog, fpath);
@@ -769,6 +784,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
if (!mapfp) {
pr_info("%s: Error %s opening %s\n", prog, strerror(errno),
fpath);
+ free(line);
return -1;
}
@@ -795,7 +811,8 @@ static int process_mapfile(FILE *outfp, char *fpath)
/* TODO Deal with lines longer than 16K */
pr_info("%s: Mapfile %s: line %d too long, aborting\n",
prog, fpath, line_num);
- return -1;
+ ret = -1;
+ goto out;
}
line[strlen(line)-1] = '\0';
@@ -825,7 +842,9 @@ static int process_mapfile(FILE *outfp, char *fpath)
out:
print_mapping_table_suffix(outfp);
- return 0;
+ fclose(mapfp);
+ free(line);
+ return ret;
}
/*
@@ -1122,6 +1141,7 @@ int main(int argc, char *argv[])
goto empty_map;
} else if (rc < 0) {
/* Make build fail */
+ fclose(eventsfp);
free_arch_std_events();
return 1;
} else if (rc) {
@@ -1134,6 +1154,7 @@ int main(int argc, char *argv[])
goto empty_map;
} else if (rc < 0) {
/* Make build fail */
+ fclose(eventsfp);
free_arch_std_events();
return 1;
} else if (rc) {
@@ -1151,6 +1172,8 @@ int main(int argc, char *argv[])
if (process_mapfile(eventsfp, mapfile)) {
pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
/* Make build fail */
+ fclose(eventsfp);
+ free_arch_std_events();
return 1;
}
diff --git a/tools/perf/pmu-events/jevents.h b/tools/perf/pmu-events/jevents.h
index 4684c673c445..5cda49a42143 100644
--- a/tools/perf/pmu-events/jevents.h
+++ b/tools/perf/pmu-events/jevents.h
@@ -7,7 +7,8 @@ int json_events(const char *fn,
char *long_desc,
char *pmu,
char *unit, char *perpkg, char *metric_expr,
- char *metric_name, char *metric_group),
+ char *metric_name, char *metric_group,
+ char *deprecated),
void *data);
char *get_cpu_str(void);
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index 92a4d15ee0b9..caeb577d36c9 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -17,6 +17,7 @@ struct pmu_event {
const char *metric_expr;
const char *metric_name;
const char *metric_group;
+ const char *deprecated;
};
/*
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 61b3911d91e6..26d7be785288 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -105,6 +105,9 @@ except ImportError:
glb_nsz = 16
import re
import os
+import random
+import copy
+import math
pyside_version_1 = True
if not "--pyside-version-1" in sys.argv:
@@ -341,6 +344,15 @@ def LookupCreateModel(model_name, create_fn):
model_cache_lock.release()
return model
+def LookupModel(model_name):
+ model_cache_lock.acquire()
+ try:
+ model = model_cache[model_name]
+ except:
+ model = None
+ model_cache_lock.release()
+ return model
+
# Find bar
class FindBar():
@@ -625,7 +637,7 @@ class CallGraphRootItem(CallGraphLevelItemBase):
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
- if_has_calls = " WHERE has_calls = TRUE"
+ if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
@@ -785,15 +797,16 @@ class CallGraphModel(CallGraphModelBase):
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
- def __init__(self, glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
+ def __init__(self, glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
+ self.call_time = call_time
+ self.time = time
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
- self.time = time
def Select(self):
self.query_done = True
@@ -830,17 +843,17 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
- def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
- super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
+ def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
- self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
+ self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
- self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
@@ -848,7 +861,7 @@ class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
- super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, parent_item)
+ super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
@@ -905,7 +918,7 @@ class CallTreeRootItem(CallGraphLevelItemBase):
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
- if_has_calls = " WHERE has_calls = TRUE"
+ if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
@@ -971,20 +984,41 @@ class CallTreeModel(CallGraphModelBase):
ids.insert(0, query.value(1))
return ids
-# Vertical widget layout
+# Vertical layout
-class VBox():
+class HBoxLayout(QHBoxLayout):
- def __init__(self, w1, w2, w3=None):
- self.vbox = QWidget()
- self.vbox.setLayout(QVBoxLayout())
+ def __init__(self, *children):
+ super(HBoxLayout, self).__init__()
+
+ self.layout().setContentsMargins(0, 0, 0, 0)
+ for child in children:
+ if child.isWidgetType():
+ self.layout().addWidget(child)
+ else:
+ self.layout().addLayout(child)
+
+# Horizontal layout
+
+class VBoxLayout(QVBoxLayout):
- self.vbox.layout().setContentsMargins(0, 0, 0, 0)
+ def __init__(self, *children):
+ super(VBoxLayout, self).__init__()
- self.vbox.layout().addWidget(w1)
- self.vbox.layout().addWidget(w2)
- if w3:
- self.vbox.layout().addWidget(w3)
+ self.layout().setContentsMargins(0, 0, 0, 0)
+ for child in children:
+ if child.isWidgetType():
+ self.layout().addWidget(child)
+ else:
+ self.layout().addLayout(child)
+
+# Vertical layout widget
+
+class VBox():
+
+ def __init__(self, *children):
+ self.vbox = QWidget()
+ self.vbox.setLayout(VBoxLayout(*children))
def Widget(self):
return self.vbox
@@ -1063,7 +1097,7 @@ class CallGraphWindow(TreeWindowBase):
class CallTreeWindow(TreeWindowBase):
- def __init__(self, glb, parent=None):
+ def __init__(self, glb, parent=None, thread_at_time=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
@@ -1081,6 +1115,1343 @@ class CallTreeWindow(TreeWindowBase):
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+ if thread_at_time:
+ self.DisplayThreadAtTime(*thread_at_time)
+
+ def DisplayThreadAtTime(self, comm_id, thread_id, time):
+ parent = QModelIndex()
+ for dbid in (comm_id, thread_id):
+ found = False
+ n = self.model.rowCount(parent)
+ for row in xrange(n):
+ child = self.model.index(row, 0, parent)
+ if child.internalPointer().dbid == dbid:
+ found = True
+ self.view.setCurrentIndex(child)
+ parent = child
+ break
+ if not found:
+ return
+ found = False
+ while True:
+ n = self.model.rowCount(parent)
+ if not n:
+ return
+ last_child = None
+ for row in xrange(n):
+ child = self.model.index(row, 0, parent)
+ child_call_time = child.internalPointer().call_time
+ if child_call_time < time:
+ last_child = child
+ elif child_call_time == time:
+ self.view.setCurrentIndex(child)
+ return
+ elif child_call_time > time:
+ break
+ if not last_child:
+ if not found:
+ child = self.model.index(0, 0, parent)
+ self.view.setCurrentIndex(child)
+ return
+ found = True
+ self.view.setCurrentIndex(last_child)
+ parent = last_child
+
+# ExecComm() gets the comm_id of the command string that was set when the process exec'd i.e. the program name
+
+def ExecComm(db, thread_id, time):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT comm_threads.comm_id, comms.c_time, comms.exec_flag"
+ " FROM comm_threads"
+ " INNER JOIN comms ON comms.id = comm_threads.comm_id"
+ " WHERE comm_threads.thread_id = " + str(thread_id) +
+ " ORDER BY comms.c_time, comms.id")
+ first = None
+ last = None
+ while query.next():
+ if first is None:
+ first = query.value(0)
+ if query.value(2) and Decimal(query.value(1)) <= Decimal(time):
+ last = query.value(0)
+ if not(last is None):
+ return last
+ return first
+
+# Container for (x, y) data
+
+class XY():
+ def __init__(self, x=0, y=0):
+ self.x = x
+ self.y = y
+
+ def __str__(self):
+ return "XY({}, {})".format(str(self.x), str(self.y))
+
+# Container for sub-range data
+
+class Subrange():
+ def __init__(self, lo=0, hi=0):
+ self.lo = lo
+ self.hi = hi
+
+ def __str__(self):
+ return "Subrange({}, {})".format(str(self.lo), str(self.hi))
+
+# Graph data region base class
+
+class GraphDataRegion(object):
+
+ def __init__(self, key, title = "", ordinal = ""):
+ self.key = key
+ self.title = title
+ self.ordinal = ordinal
+
+# Function to sort GraphDataRegion
+
+def GraphDataRegionOrdinal(data_region):
+ return data_region.ordinal
+
+# Attributes for a graph region
+
+class GraphRegionAttribute():
+
+ def __init__(self, colour):
+ self.colour = colour
+
+# Switch graph data region represents a task
+
+class SwitchGraphDataRegion(GraphDataRegion):
+
+ def __init__(self, key, exec_comm_id, pid, tid, comm, thread_id, comm_id):
+ super(SwitchGraphDataRegion, self).__init__(key)
+
+ self.title = str(pid) + " / " + str(tid) + " " + comm
+ # Order graph legend within exec comm by pid / tid / time
+ self.ordinal = str(pid).rjust(16) + str(exec_comm_id).rjust(8) + str(tid).rjust(16)
+ self.exec_comm_id = exec_comm_id
+ self.pid = pid
+ self.tid = tid
+ self.comm = comm
+ self.thread_id = thread_id
+ self.comm_id = comm_id
+
+# Graph data point
+
+class GraphDataPoint():
+
+ def __init__(self, data, index, x, y, altx=None, alty=None, hregion=None, vregion=None):
+ self.data = data
+ self.index = index
+ self.x = x
+ self.y = y
+ self.altx = altx
+ self.alty = alty
+ self.hregion = hregion
+ self.vregion = vregion
+
+# Graph data (single graph) base class
+
+class GraphData(object):
+
+ def __init__(self, collection, xbase=Decimal(0), ybase=Decimal(0)):
+ self.collection = collection
+ self.points = []
+ self.xbase = xbase
+ self.ybase = ybase
+ self.title = ""
+
+ def AddPoint(self, x, y, altx=None, alty=None, hregion=None, vregion=None):
+ index = len(self.points)
+
+ x = float(Decimal(x) - self.xbase)
+ y = float(Decimal(y) - self.ybase)
+
+ self.points.append(GraphDataPoint(self, index, x, y, altx, alty, hregion, vregion))
+
+ def XToData(self, x):
+ return Decimal(x) + self.xbase
+
+ def YToData(self, y):
+ return Decimal(y) + self.ybase
+
+# Switch graph data (for one CPU)
+
+class SwitchGraphData(GraphData):
+
+ def __init__(self, db, collection, cpu, xbase):
+ super(SwitchGraphData, self).__init__(collection, xbase)
+
+ self.cpu = cpu
+ self.title = "CPU " + str(cpu)
+ self.SelectSwitches(db)
+
+ def SelectComms(self, db, thread_id, last_comm_id, start_time, end_time):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT id, c_time"
+ " FROM comms"
+ " WHERE c_thread_id = " + str(thread_id) +
+ " AND exec_flag = " + self.collection.glb.dbref.TRUE +
+ " AND c_time >= " + str(start_time) +
+ " AND c_time <= " + str(end_time) +
+ " ORDER BY c_time, id")
+ while query.next():
+ comm_id = query.value(0)
+ if comm_id == last_comm_id:
+ continue
+ time = query.value(1)
+ hregion = self.HRegion(db, thread_id, comm_id, time)
+ self.AddPoint(time, 1000, None, None, hregion)
+
+ def SelectSwitches(self, db):
+ last_time = None
+ last_comm_id = None
+ last_thread_id = None
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT time, thread_out_id, thread_in_id, comm_out_id, comm_in_id, flags"
+ " FROM context_switches"
+ " WHERE machine_id = " + str(self.collection.machine_id) +
+ " AND cpu = " + str(self.cpu) +
+ " ORDER BY time, id")
+ while query.next():
+ flags = int(query.value(5))
+ if flags & 1:
+ # Schedule-out: detect and add exec's
+ if last_thread_id == query.value(1) and last_comm_id is not None and last_comm_id != query.value(3):
+ self.SelectComms(db, last_thread_id, last_comm_id, last_time, query.value(0))
+ continue
+ # Schedule-in: add data point
+ if len(self.points) == 0:
+ start_time = self.collection.glb.StartTime(self.collection.machine_id)
+ hregion = self.HRegion(db, query.value(1), query.value(3), start_time)
+ self.AddPoint(start_time, 1000, None, None, hregion)
+ time = query.value(0)
+ comm_id = query.value(4)
+ thread_id = query.value(2)
+ hregion = self.HRegion(db, thread_id, comm_id, time)
+ self.AddPoint(time, 1000, None, None, hregion)
+ last_time = time
+ last_comm_id = comm_id
+ last_thread_id = thread_id
+
+ def NewHRegion(self, db, key, thread_id, comm_id, time):
+ exec_comm_id = ExecComm(db, thread_id, time)
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT pid, tid FROM threads WHERE id = " + str(thread_id))
+ if query.next():
+ pid = query.value(0)
+ tid = query.value(1)
+ else:
+ pid = -1
+ tid = -1
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT comm FROM comms WHERE id = " + str(comm_id))
+ if query.next():
+ comm = query.value(0)
+ else:
+ comm = ""
+ return SwitchGraphDataRegion(key, exec_comm_id, pid, tid, comm, thread_id, comm_id)
+
+ def HRegion(self, db, thread_id, comm_id, time):
+ key = str(thread_id) + ":" + str(comm_id)
+ hregion = self.collection.LookupHRegion(key)
+ if hregion is None:
+ hregion = self.NewHRegion(db, key, thread_id, comm_id, time)
+ self.collection.AddHRegion(key, hregion)
+ return hregion
+
+# Graph data collection (multiple related graphs) base class
+
+class GraphDataCollection(object):
+
+ def __init__(self, glb):
+ self.glb = glb
+ self.data = []
+ self.hregions = {}
+ self.xrangelo = None
+ self.xrangehi = None
+ self.yrangelo = None
+ self.yrangehi = None
+ self.dp = XY(0, 0)
+
+ def AddGraphData(self, data):
+ self.data.append(data)
+
+ def LookupHRegion(self, key):
+ if key in self.hregions:
+ return self.hregions[key]
+ return None
+
+ def AddHRegion(self, key, hregion):
+ self.hregions[key] = hregion
+
+# Switch graph data collection (SwitchGraphData for each CPU)
+
+class SwitchGraphDataCollection(GraphDataCollection):
+
+ def __init__(self, glb, db, machine_id):
+ super(SwitchGraphDataCollection, self).__init__(glb)
+
+ self.machine_id = machine_id
+ self.cpus = self.SelectCPUs(db)
+
+ self.xrangelo = glb.StartTime(machine_id)
+ self.xrangehi = glb.FinishTime(machine_id)
+
+ self.yrangelo = Decimal(0)
+ self.yrangehi = Decimal(1000)
+
+ for cpu in self.cpus:
+ self.AddGraphData(SwitchGraphData(db, self, cpu, self.xrangelo))
+
+ def SelectCPUs(self, db):
+ cpus = []
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT DISTINCT cpu"
+ " FROM context_switches"
+ " WHERE machine_id = " + str(self.machine_id))
+ while query.next():
+ cpus.append(int(query.value(0)))
+ return sorted(cpus)
+
+# Switch graph data graphics item displays the graphed data
+
+class SwitchGraphDataGraphicsItem(QGraphicsItem):
+
+ def __init__(self, data, graph_width, graph_height, attrs, event_handler, parent=None):
+ super(SwitchGraphDataGraphicsItem, self).__init__(parent)
+
+ self.data = data
+ self.graph_width = graph_width
+ self.graph_height = graph_height
+ self.attrs = attrs
+ self.event_handler = event_handler
+ self.setAcceptHoverEvents(True)
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.graph_width, self.graph_height)
+
+ def PaintPoint(self, painter, last, x):
+ if not(last is None or last.hregion.pid == 0 or x < self.attrs.subrange.x.lo):
+ if last.x < self.attrs.subrange.x.lo:
+ x0 = self.attrs.subrange.x.lo
+ else:
+ x0 = last.x
+ if x > self.attrs.subrange.x.hi:
+ x1 = self.attrs.subrange.x.hi
+ else:
+ x1 = x - 1
+ x0 = self.attrs.XToPixel(x0)
+ x1 = self.attrs.XToPixel(x1)
+
+ y0 = self.attrs.YToPixel(last.y)
+
+ colour = self.attrs.region_attributes[last.hregion.key].colour
+
+ width = x1 - x0 + 1
+ if width < 2:
+ painter.setPen(colour)
+ painter.drawLine(x0, self.graph_height - y0, x0, self.graph_height)
+ else:
+ painter.fillRect(x0, self.graph_height - y0, width, self.graph_height - 1, colour)
+
+ def paint(self, painter, option, widget):
+ last = None
+ for point in self.data.points:
+ self.PaintPoint(painter, last, point.x)
+ if point.x > self.attrs.subrange.x.hi:
+ break;
+ last = point
+ self.PaintPoint(painter, last, self.attrs.subrange.x.hi + 1)
+
+ def BinarySearchPoint(self, target):
+ lower_pos = 0
+ higher_pos = len(self.data.points)
+ while True:
+ pos = int((lower_pos + higher_pos) / 2)
+ val = self.data.points[pos].x
+ if target >= val:
+ lower_pos = pos
+ else:
+ higher_pos = pos
+ if higher_pos <= lower_pos + 1:
+ return lower_pos
+
+ def XPixelToData(self, x):
+ x = self.attrs.PixelToX(x)
+ if x < self.data.points[0].x:
+ x = 0
+ pos = 0
+ low = True
+ else:
+ pos = self.BinarySearchPoint(x)
+ low = False
+ return (low, pos, self.data.XToData(x))
+
+ def EventToData(self, event):
+ no_data = (None,) * 4
+ if len(self.data.points) < 1:
+ return no_data
+ x = event.pos().x()
+ if x < 0:
+ return no_data
+ low0, pos0, time_from = self.XPixelToData(x)
+ low1, pos1, time_to = self.XPixelToData(x + 1)
+ hregions = set()
+ hregion_times = []
+ if not low1:
+ for i in xrange(pos0, pos1 + 1):
+ hregion = self.data.points[i].hregion
+ hregions.add(hregion)
+ if i == pos0:
+ time = time_from
+ else:
+ time = self.data.XToData(self.data.points[i].x)
+ hregion_times.append((hregion, time))
+ return (time_from, time_to, hregions, hregion_times)
+
+ def hoverMoveEvent(self, event):
+ time_from, time_to, hregions, hregion_times = self.EventToData(event)
+ if time_from is not None:
+ self.event_handler.PointEvent(self.data.cpu, time_from, time_to, hregions)
+
+ def hoverLeaveEvent(self, event):
+ self.event_handler.NoPointEvent()
+
+ def mousePressEvent(self, event):
+ if event.button() != Qt.RightButton:
+ super(SwitchGraphDataGraphicsItem, self).mousePressEvent(event)
+ return
+ time_from, time_to, hregions, hregion_times = self.EventToData(event)
+ if hregion_times:
+ self.event_handler.RightClickEvent(self.data.cpu, hregion_times, event.screenPos())
+
+# X-axis graphics item
+
+class XAxisGraphicsItem(QGraphicsItem):
+
+ def __init__(self, width, parent=None):
+ super(XAxisGraphicsItem, self).__init__(parent)
+
+ self.width = width
+ self.max_mark_sz = 4
+ self.height = self.max_mark_sz + 1
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def Step(self):
+ attrs = self.parentItem().attrs
+ subrange = attrs.subrange.x
+ t = subrange.hi - subrange.lo
+ s = (3.0 * t) / self.width
+ n = 1.0
+ while s > n:
+ n = n * 10.0
+ return n
+
+ def PaintMarks(self, painter, at_y, lo, hi, step, i):
+ attrs = self.parentItem().attrs
+ x = lo
+ while x <= hi:
+ xp = attrs.XToPixel(x)
+ if i % 10:
+ if i % 5:
+ sz = 1
+ else:
+ sz = 2
+ else:
+ sz = self.max_mark_sz
+ i = 0
+ painter.drawLine(xp, at_y, xp, at_y + sz)
+ x += step
+ i += 1
+
+ def paint(self, painter, option, widget):
+ # Using QPainter::drawLine(int x1, int y1, int x2, int y2) so x2 = width -1
+ painter.drawLine(0, 0, self.width - 1, 0)
+ n = self.Step()
+ attrs = self.parentItem().attrs
+ subrange = attrs.subrange.x
+ if subrange.lo:
+ x_offset = n - (subrange.lo % n)
+ else:
+ x_offset = 0.0
+ x = subrange.lo + x_offset
+ i = (x / n) % 10
+ self.PaintMarks(painter, 0, x, subrange.hi, n, i)
+
+ def ScaleDimensions(self):
+ n = self.Step()
+ attrs = self.parentItem().attrs
+ lo = attrs.subrange.x.lo
+ hi = (n * 10.0) + lo
+ width = attrs.XToPixel(hi)
+ if width > 500:
+ width = 0
+ return (n, lo, hi, width)
+
+ def PaintScale(self, painter, at_x, at_y):
+ n, lo, hi, width = self.ScaleDimensions()
+ if not width:
+ return
+ painter.drawLine(at_x, at_y, at_x + width, at_y)
+ self.PaintMarks(painter, at_y, lo, hi, n, 0)
+
+ def ScaleWidth(self):
+ n, lo, hi, width = self.ScaleDimensions()
+ return width
+
+ def ScaleHeight(self):
+ return self.height
+
+ def ScaleUnit(self):
+ return self.Step() * 10
+
+# Scale graphics item base class
+
+class ScaleGraphicsItem(QGraphicsItem):
+
+ def __init__(self, axis, parent=None):
+ super(ScaleGraphicsItem, self).__init__(parent)
+ self.axis = axis
+
+ def boundingRect(self):
+ scale_width = self.axis.ScaleWidth()
+ if not scale_width:
+ return QRectF()
+ return QRectF(0, 0, self.axis.ScaleWidth() + 100, self.axis.ScaleHeight())
+
+ def paint(self, painter, option, widget):
+ scale_width = self.axis.ScaleWidth()
+ if not scale_width:
+ return
+ self.axis.PaintScale(painter, 0, 5)
+ x = scale_width + 4
+ painter.drawText(QPointF(x, 10), self.Text())
+
+ def Unit(self):
+ return self.axis.ScaleUnit()
+
+ def Text(self):
+ return ""
+
+# Switch graph scale graphics item
+
+class SwitchScaleGraphicsItem(ScaleGraphicsItem):
+
+ def __init__(self, axis, parent=None):
+ super(SwitchScaleGraphicsItem, self).__init__(axis, parent)
+
+ def Text(self):
+ unit = self.Unit()
+ if unit >= 1000000000:
+ unit = int(unit / 1000000000)
+ us = "s"
+ elif unit >= 1000000:
+ unit = int(unit / 1000000)
+ us = "ms"
+ elif unit >= 1000:
+ unit = int(unit / 1000)
+ us = "us"
+ else:
+ unit = int(unit)
+ us = "ns"
+ return " = " + str(unit) + " " + us
+
+# Switch graph graphics item contains graph title, scale, x/y-axis, and the graphed data
+
+class SwitchGraphGraphicsItem(QGraphicsItem):
+
+ def __init__(self, collection, data, attrs, event_handler, first, parent=None):
+ super(SwitchGraphGraphicsItem, self).__init__(parent)
+ self.collection = collection
+ self.data = data
+ self.attrs = attrs
+ self.event_handler = event_handler
+
+ margin = 20
+ title_width = 50
+
+ self.title_graphics = QGraphicsSimpleTextItem(data.title, self)
+
+ self.title_graphics.setPos(margin, margin)
+ graph_width = attrs.XToPixel(attrs.subrange.x.hi) + 1
+ graph_height = attrs.YToPixel(attrs.subrange.y.hi) + 1
+
+ self.graph_origin_x = margin + title_width + margin
+ self.graph_origin_y = graph_height + margin
+
+ x_axis_size = 1
+ y_axis_size = 1
+ self.yline = QGraphicsLineItem(0, 0, 0, graph_height, self)
+
+ self.x_axis = XAxisGraphicsItem(graph_width, self)
+ self.x_axis.setPos(self.graph_origin_x, self.graph_origin_y + 1)
+
+ if first:
+ self.scale_item = SwitchScaleGraphicsItem(self.x_axis, self)
+ self.scale_item.setPos(self.graph_origin_x, self.graph_origin_y + 10)
+
+ self.yline.setPos(self.graph_origin_x - y_axis_size, self.graph_origin_y - graph_height)
+
+ self.axis_point = QGraphicsLineItem(0, 0, 0, 0, self)
+ self.axis_point.setPos(self.graph_origin_x - 1, self.graph_origin_y +1)
+
+ self.width = self.graph_origin_x + graph_width + margin
+ self.height = self.graph_origin_y + margin
+
+ self.graph = SwitchGraphDataGraphicsItem(data, graph_width, graph_height, attrs, event_handler, self)
+ self.graph.setPos(self.graph_origin_x, self.graph_origin_y - graph_height)
+
+ if parent and 'EnableRubberBand' in dir(parent):
+ parent.EnableRubberBand(self.graph_origin_x, self.graph_origin_x + graph_width - 1, self)
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def paint(self, painter, option, widget):
+ pass
+
+ def RBXToPixel(self, x):
+ return self.attrs.PixelToX(x - self.graph_origin_x)
+
+ def RBXRangeToPixel(self, x0, x1):
+ return (self.RBXToPixel(x0), self.RBXToPixel(x1 + 1))
+
+ def RBPixelToTime(self, x):
+ if x < self.data.points[0].x:
+ return self.data.XToData(0)
+ return self.data.XToData(x)
+
+ def RBEventTimes(self, x0, x1):
+ x0, x1 = self.RBXRangeToPixel(x0, x1)
+ time_from = self.RBPixelToTime(x0)
+ time_to = self.RBPixelToTime(x1)
+ return (time_from, time_to)
+
+ def RBEvent(self, x0, x1):
+ time_from, time_to = self.RBEventTimes(x0, x1)
+ self.event_handler.RangeEvent(time_from, time_to)
+
+ def RBMoveEvent(self, x0, x1):
+ if x1 < x0:
+ x0, x1 = x1, x0
+ self.RBEvent(x0, x1)
+
+ def RBReleaseEvent(self, x0, x1, selection_state):
+ if x1 < x0:
+ x0, x1 = x1, x0
+ x0, x1 = self.RBXRangeToPixel(x0, x1)
+ self.event_handler.SelectEvent(x0, x1, selection_state)
+
+# Graphics item to draw a vertical bracket (used to highlight "forward" sub-range)
+
+class VerticalBracketGraphicsItem(QGraphicsItem):
+
+ def __init__(self, parent=None):
+ super(VerticalBracketGraphicsItem, self).__init__(parent)
+
+ self.width = 0
+ self.height = 0
+ self.hide()
+
+ def SetSize(self, width, height):
+ self.width = width + 1
+ self.height = height + 1
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def paint(self, painter, option, widget):
+ colour = QColor(255, 255, 0, 32)
+ painter.fillRect(0, 0, self.width, self.height, colour)
+ x1 = self.width - 1
+ y1 = self.height - 1
+ painter.drawLine(0, 0, x1, 0)
+ painter.drawLine(0, 0, 0, 3)
+ painter.drawLine(x1, 0, x1, 3)
+ painter.drawLine(0, y1, x1, y1)
+ painter.drawLine(0, y1, 0, y1 - 3)
+ painter.drawLine(x1, y1, x1, y1 - 3)
+
+# Graphics item to contain graphs arranged vertically
+
+class VertcalGraphSetGraphicsItem(QGraphicsItem):
+
+ def __init__(self, collection, attrs, event_handler, child_class, parent=None):
+ super(VertcalGraphSetGraphicsItem, self).__init__(parent)
+
+ self.collection = collection
+
+ self.top = 10
+
+ self.width = 0
+ self.height = self.top
+
+ self.rubber_band = None
+ self.rb_enabled = False
+
+ first = True
+ for data in collection.data:
+ child = child_class(collection, data, attrs, event_handler, first, self)
+ child.setPos(0, self.height + 1)
+ rect = child.boundingRect()
+ if rect.right() > self.width:
+ self.width = rect.right()
+ self.height = self.height + rect.bottom() + 1
+ first = False
+
+ self.bracket = VerticalBracketGraphicsItem(self)
+
+ def EnableRubberBand(self, xlo, xhi, rb_event_handler):
+ if self.rb_enabled:
+ return
+ self.rb_enabled = True
+ self.rb_in_view = False
+ self.setAcceptedMouseButtons(Qt.LeftButton)
+ self.rb_xlo = xlo
+ self.rb_xhi = xhi
+ self.rb_event_handler = rb_event_handler
+ self.mousePressEvent = self.MousePressEvent
+ self.mouseMoveEvent = self.MouseMoveEvent
+ self.mouseReleaseEvent = self.MouseReleaseEvent
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def paint(self, painter, option, widget):
+ pass
+
+ def RubberBandParent(self):
+ scene = self.scene()
+ view = scene.views()[0]
+ viewport = view.viewport()
+ return viewport
+
+ def RubberBandSetGeometry(self, rect):
+ scene_rectf = self.mapRectToScene(QRectF(rect))
+ scene = self.scene()
+ view = scene.views()[0]
+ poly = view.mapFromScene(scene_rectf)
+ self.rubber_band.setGeometry(poly.boundingRect())
+
+ def SetSelection(self, selection_state):
+ if self.rubber_band:
+ if selection_state:
+ self.RubberBandSetGeometry(selection_state)
+ self.rubber_band.show()
+ else:
+ self.rubber_band.hide()
+
+ def SetBracket(self, rect):
+ if rect:
+ x, y, width, height = rect.x(), rect.y(), rect.width(), rect.height()
+ self.bracket.setPos(x, y)
+ self.bracket.SetSize(width, height)
+ self.bracket.show()
+ else:
+ self.bracket.hide()
+
+ def RubberBandX(self, event):
+ x = event.pos().toPoint().x()
+ if x < self.rb_xlo:
+ x = self.rb_xlo
+ elif x > self.rb_xhi:
+ x = self.rb_xhi
+ else:
+ self.rb_in_view = True
+ return x
+
+ def RubberBandRect(self, x):
+ if self.rb_origin.x() <= x:
+ width = x - self.rb_origin.x()
+ rect = QRect(self.rb_origin, QSize(width, self.height))
+ else:
+ width = self.rb_origin.x() - x
+ top_left = QPoint(self.rb_origin.x() - width, self.rb_origin.y())
+ rect = QRect(top_left, QSize(width, self.height))
+ return rect
+
+ def MousePressEvent(self, event):
+ self.rb_in_view = False
+ x = self.RubberBandX(event)
+ self.rb_origin = QPoint(x, self.top)
+ if self.rubber_band is None:
+ self.rubber_band = QRubberBand(QRubberBand.Rectangle, self.RubberBandParent())
+ self.RubberBandSetGeometry(QRect(self.rb_origin, QSize(0, self.height)))
+ if self.rb_in_view:
+ self.rubber_band.show()
+ self.rb_event_handler.RBMoveEvent(x, x)
+ else:
+ self.rubber_band.hide()
+
+ def MouseMoveEvent(self, event):
+ x = self.RubberBandX(event)
+ rect = self.RubberBandRect(x)
+ self.RubberBandSetGeometry(rect)
+ if self.rb_in_view:
+ self.rubber_band.show()
+ self.rb_event_handler.RBMoveEvent(self.rb_origin.x(), x)
+
+ def MouseReleaseEvent(self, event):
+ x = self.RubberBandX(event)
+ if self.rb_in_view:
+ selection_state = self.RubberBandRect(x)
+ else:
+ selection_state = None
+ self.rb_event_handler.RBReleaseEvent(self.rb_origin.x(), x, selection_state)
+
+# Switch graph legend data model
+
+class SwitchGraphLegendModel(QAbstractTableModel):
+
+ def __init__(self, collection, region_attributes, parent=None):
+ super(SwitchGraphLegendModel, self).__init__(parent)
+
+ self.region_attributes = region_attributes
+
+ self.child_items = sorted(collection.hregions.values(), key=GraphDataRegionOrdinal)
+ self.child_count = len(self.child_items)
+
+ self.highlight_set = set()
+
+ self.column_headers = ("pid", "tid", "comm")
+
+ def rowCount(self, parent):
+ return self.child_count
+
+ def headerData(self, section, orientation, role):
+ if role != Qt.DisplayRole:
+ return None
+ if orientation != Qt.Horizontal:
+ return None
+ return self.columnHeader(section)
+
+ def index(self, row, column, parent):
+ return self.createIndex(row, column, self.child_items[row])
+
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
+ def data(self, index, role):
+ if role == Qt.BackgroundRole:
+ child = self.child_items[index.row()]
+ if child in self.highlight_set:
+ return self.region_attributes[child.key].colour
+ return None
+ if role == Qt.ForegroundRole:
+ child = self.child_items[index.row()]
+ if child in self.highlight_set:
+ return QColor(255, 255, 255)
+ return self.region_attributes[child.key].colour
+ if role != Qt.DisplayRole:
+ return None
+ hregion = self.child_items[index.row()]
+ col = index.column()
+ if col == 0:
+ return hregion.pid
+ if col == 1:
+ return hregion.tid
+ if col == 2:
+ return hregion.comm
+ return None
+
+ def SetHighlight(self, row, set_highlight):
+ child = self.child_items[row]
+ top_left = self.createIndex(row, 0, child)
+ bottom_right = self.createIndex(row, len(self.column_headers) - 1, child)
+ self.dataChanged.emit(top_left, bottom_right)
+
+ def Highlight(self, highlight_set):
+ for row in xrange(self.child_count):
+ child = self.child_items[row]
+ if child in self.highlight_set:
+ if child not in highlight_set:
+ self.SetHighlight(row, False)
+ elif child in highlight_set:
+ self.SetHighlight(row, True)
+ self.highlight_set = highlight_set
+
+# Switch graph legend is a table
+
+class SwitchGraphLegend(QWidget):
+
+ def __init__(self, collection, region_attributes, parent=None):
+ super(SwitchGraphLegend, self).__init__(parent)
+
+ self.data_model = SwitchGraphLegendModel(collection, region_attributes)
+
+ self.model = QSortFilterProxyModel()
+ self.model.setSourceModel(self.data_model)
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+ self.view.sortByColumn(-1, Qt.AscendingOrder)
+ self.view.setSortingEnabled(True)
+ self.view.resizeColumnsToContents()
+ self.view.resizeRowsToContents()
+
+ self.vbox = VBoxLayout(self.view)
+ self.setLayout(self.vbox)
+
+ sz1 = self.view.columnWidth(0) + self.view.columnWidth(1) + self.view.columnWidth(2) + 2
+ sz1 = sz1 + self.view.verticalScrollBar().sizeHint().width()
+ self.saved_size = sz1
+
+ def resizeEvent(self, event):
+ self.saved_size = self.size().width()
+ super(SwitchGraphLegend, self).resizeEvent(event)
+
+ def Highlight(self, highlight_set):
+ self.data_model.Highlight(highlight_set)
+ self.update()
+
+ def changeEvent(self, event):
+ if event.type() == QEvent.FontChange:
+ self.view.resizeRowsToContents()
+ self.view.resizeColumnsToContents()
+ # Need to resize rows again after column resize
+ self.view.resizeRowsToContents()
+ super(SwitchGraphLegend, self).changeEvent(event)
+
+# Random colour generation
+
+def RGBColourTooLight(r, g, b):
+ if g > 230:
+ return True
+ if g <= 160:
+ return False
+ if r <= 180 and g <= 180:
+ return False
+ if r < 60:
+ return False
+ return True
+
+def GenerateColours(x):
+ cs = [0]
+ for i in xrange(1, x):
+ cs.append(int((255.0 / i) + 0.5))
+ colours = []
+ for r in cs:
+ for g in cs:
+ for b in cs:
+ # Exclude black and colours that look too light against a white background
+ if (r, g, b) == (0, 0, 0) or RGBColourTooLight(r, g, b):
+ continue
+ colours.append(QColor(r, g, b))
+ return colours
+
+def GenerateNColours(n):
+ for x in xrange(2, n + 2):
+ colours = GenerateColours(x)
+ if len(colours) >= n:
+ return colours
+ return []
+
+def GenerateNRandomColours(n, seed):
+ colours = GenerateNColours(n)
+ random.seed(seed)
+ random.shuffle(colours)
+ return colours
+
+# Graph attributes, in particular the scale and subrange that change when zooming
+
+class GraphAttributes():
+
+ def __init__(self, scale, subrange, region_attributes, dp):
+ self.scale = scale
+ self.subrange = subrange
+ self.region_attributes = region_attributes
+ # Rounding avoids errors due to finite floating point precision
+ self.dp = dp # data decimal places
+ self.Update()
+
+ def XToPixel(self, x):
+ return int(round((x - self.subrange.x.lo) * self.scale.x, self.pdp.x))
+
+ def YToPixel(self, y):
+ return int(round((y - self.subrange.y.lo) * self.scale.y, self.pdp.y))
+
+ def PixelToXRounded(self, px):
+ return round((round(px, 0) / self.scale.x), self.dp.x) + self.subrange.x.lo
+
+ def PixelToYRounded(self, py):
+ return round((round(py, 0) / self.scale.y), self.dp.y) + self.subrange.y.lo
+
+ def PixelToX(self, px):
+ x = self.PixelToXRounded(px)
+ if self.pdp.x == 0:
+ rt = self.XToPixel(x)
+ if rt > px:
+ return x - 1
+ return x
+
+ def PixelToY(self, py):
+ y = self.PixelToYRounded(py)
+ if self.pdp.y == 0:
+ rt = self.YToPixel(y)
+ if rt > py:
+ return y - 1
+ return y
+
+ def ToPDP(self, dp, scale):
+ # Calculate pixel decimal places:
+ # (10 ** dp) is the minimum delta in the data
+ # scale it to get the minimum delta in pixels
+ # log10 gives the number of decimals places negatively
+ # subtrace 1 to divide by 10
+ # round to the lower negative number
+ # change the sign to get the number of decimals positively
+ x = math.log10((10 ** dp) * scale)
+ if x < 0:
+ x -= 1
+ x = -int(math.floor(x) - 0.1)
+ else:
+ x = 0
+ return x
+
+ def Update(self):
+ x = self.ToPDP(self.dp.x, self.scale.x)
+ y = self.ToPDP(self.dp.y, self.scale.y)
+ self.pdp = XY(x, y) # pixel decimal places
+
+# Switch graph splitter which divides the CPU graphs from the legend
+
+class SwitchGraphSplitter(QSplitter):
+
+ def __init__(self, parent=None):
+ super(SwitchGraphSplitter, self).__init__(parent)
+
+ self.first_time = False
+
+ def resizeEvent(self, ev):
+ if self.first_time:
+ self.first_time = False
+ sz1 = self.widget(1).view.columnWidth(0) + self.widget(1).view.columnWidth(1) + self.widget(1).view.columnWidth(2) + 2
+ sz1 = sz1 + self.widget(1).view.verticalScrollBar().sizeHint().width()
+ sz0 = self.size().width() - self.handleWidth() - sz1
+ self.setSizes([sz0, sz1])
+ elif not(self.widget(1).saved_size is None):
+ sz1 = self.widget(1).saved_size
+ sz0 = self.size().width() - self.handleWidth() - sz1
+ self.setSizes([sz0, sz1])
+ super(SwitchGraphSplitter, self).resizeEvent(ev)
+
+# Graph widget base class
+
+class GraphWidget(QWidget):
+
+ graph_title_changed = Signal(object)
+
+ def __init__(self, parent=None):
+ super(GraphWidget, self).__init__(parent)
+
+ def GraphTitleChanged(self, title):
+ self.graph_title_changed.emit(title)
+
+ def Title(self):
+ return ""
+
+# Display time in s, ms, us or ns
+
+def ToTimeStr(val):
+ val = Decimal(val)
+ if val >= 1000000000:
+ return "{} s".format((val / 1000000000).quantize(Decimal("0.000000001")))
+ if val >= 1000000:
+ return "{} ms".format((val / 1000000).quantize(Decimal("0.000001")))
+ if val >= 1000:
+ return "{} us".format((val / 1000).quantize(Decimal("0.001")))
+ return "{} ns".format(val.quantize(Decimal("1")))
+
+# Switch (i.e. context switch i.e. Time Chart by CPU) graph widget which contains the CPU graphs and the legend and control buttons
+
+class SwitchGraphWidget(GraphWidget):
+
+ def __init__(self, glb, collection, parent=None):
+ super(SwitchGraphWidget, self).__init__(parent)
+
+ self.glb = glb
+ self.collection = collection
+
+ self.back_state = []
+ self.forward_state = []
+ self.selection_state = (None, None)
+ self.fwd_rect = None
+ self.start_time = self.glb.StartTime(collection.machine_id)
+
+ i = 0
+ hregions = collection.hregions.values()
+ colours = GenerateNRandomColours(len(hregions), 1013)
+ region_attributes = {}
+ for hregion in hregions:
+ if hregion.pid == 0 and hregion.tid == 0:
+ region_attributes[hregion.key] = GraphRegionAttribute(QColor(0, 0, 0))
+ else:
+ region_attributes[hregion.key] = GraphRegionAttribute(colours[i])
+ i = i + 1
+
+ # Default to entire range
+ xsubrange = Subrange(0.0, float(collection.xrangehi - collection.xrangelo) + 1.0)
+ ysubrange = Subrange(0.0, float(collection.yrangehi - collection.yrangelo) + 1.0)
+ subrange = XY(xsubrange, ysubrange)
+
+ scale = self.GetScaleForRange(subrange)
+
+ self.attrs = GraphAttributes(scale, subrange, region_attributes, collection.dp)
+
+ self.item = VertcalGraphSetGraphicsItem(collection, self.attrs, self, SwitchGraphGraphicsItem)
+
+ self.scene = QGraphicsScene()
+ self.scene.addItem(self.item)
+
+ self.view = QGraphicsView(self.scene)
+ self.view.centerOn(0, 0)
+ self.view.setAlignment(Qt.AlignLeft | Qt.AlignTop)
+
+ self.legend = SwitchGraphLegend(collection, region_attributes)
+
+ self.splitter = SwitchGraphSplitter()
+ self.splitter.addWidget(self.view)
+ self.splitter.addWidget(self.legend)
+
+ self.point_label = QLabel("")
+ self.point_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
+
+ self.back_button = QToolButton()
+ self.back_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))
+ self.back_button.setDisabled(True)
+ self.back_button.released.connect(lambda: self.Back())
+
+ self.forward_button = QToolButton()
+ self.forward_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight))
+ self.forward_button.setDisabled(True)
+ self.forward_button.released.connect(lambda: self.Forward())
+
+ self.zoom_button = QToolButton()
+ self.zoom_button.setText("Zoom")
+ self.zoom_button.setDisabled(True)
+ self.zoom_button.released.connect(lambda: self.Zoom())
+
+ self.hbox = HBoxLayout(self.back_button, self.forward_button, self.zoom_button, self.point_label)
+
+ self.vbox = VBoxLayout(self.splitter, self.hbox)
+
+ self.setLayout(self.vbox)
+
+ def GetScaleForRangeX(self, xsubrange):
+ # Default graph 1000 pixels wide
+ dflt = 1000.0
+ r = xsubrange.hi - xsubrange.lo
+ return dflt / r
+
+ def GetScaleForRangeY(self, ysubrange):
+ # Default graph 50 pixels high
+ dflt = 50.0
+ r = ysubrange.hi - ysubrange.lo
+ return dflt / r
+
+ def GetScaleForRange(self, subrange):
+ # Default graph 1000 pixels wide, 50 pixels high
+ xscale = self.GetScaleForRangeX(subrange.x)
+ yscale = self.GetScaleForRangeY(subrange.y)
+ return XY(xscale, yscale)
+
+ def PointEvent(self, cpu, time_from, time_to, hregions):
+ text = "CPU: " + str(cpu)
+ time_from = time_from.quantize(Decimal(1))
+ rel_time_from = time_from - self.glb.StartTime(self.collection.machine_id)
+ text = text + " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ")"
+ self.point_label.setText(text)
+ self.legend.Highlight(hregions)
+
+ def RightClickEvent(self, cpu, hregion_times, pos):
+ if not IsSelectable(self.glb.db, "calls", "WHERE parent_id >= 0"):
+ return
+ menu = QMenu(self.view)
+ for hregion, time in hregion_times:
+ thread_at_time = (hregion.exec_comm_id, hregion.thread_id, time)
+ menu_text = "Show Call Tree for {} {}:{} at {}".format(hregion.comm, hregion.pid, hregion.tid, time)
+ menu.addAction(CreateAction(menu_text, "Show Call Tree", lambda a=None, args=thread_at_time: self.RightClickSelect(args), self.view))
+ menu.exec_(pos)
+
+ def RightClickSelect(self, args):
+ CallTreeWindow(self.glb, self.glb.mainwindow, thread_at_time=args)
+
+ def NoPointEvent(self):
+ self.point_label.setText("")
+ self.legend.Highlight({})
+
+ def RangeEvent(self, time_from, time_to):
+ time_from = time_from.quantize(Decimal(1))
+ time_to = time_to.quantize(Decimal(1))
+ if time_to <= time_from:
+ self.point_label.setText("")
+ return
+ rel_time_from = time_from - self.start_time
+ rel_time_to = time_to - self.start_time
+ text = " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ") to: " + str(time_to) + " (+" + ToTimeStr(rel_time_to) + ")"
+ text = text + " duration: " + ToTimeStr(time_to - time_from)
+ self.point_label.setText(text)
+
+ def BackState(self):
+ return (self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect)
+
+ def PushBackState(self):
+ state = copy.deepcopy(self.BackState())
+ self.back_state.append(state)
+ self.back_button.setEnabled(True)
+
+ def PopBackState(self):
+ self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.back_state.pop()
+ self.attrs.Update()
+ if not self.back_state:
+ self.back_button.setDisabled(True)
+
+ def PushForwardState(self):
+ state = copy.deepcopy(self.BackState())
+ self.forward_state.append(state)
+ self.forward_button.setEnabled(True)
+
+ def PopForwardState(self):
+ self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.forward_state.pop()
+ self.attrs.Update()
+ if not self.forward_state:
+ self.forward_button.setDisabled(True)
+
+ def Title(self):
+ time_from = self.collection.xrangelo + Decimal(self.attrs.subrange.x.lo)
+ time_to = self.collection.xrangelo + Decimal(self.attrs.subrange.x.hi)
+ rel_time_from = time_from - self.start_time
+ rel_time_to = time_to - self.start_time
+ title = "+" + ToTimeStr(rel_time_from) + " to +" + ToTimeStr(rel_time_to)
+ title = title + " (" + ToTimeStr(time_to - time_from) + ")"
+ return title
+
+ def Update(self):
+ selected_subrange, selection_state = self.selection_state
+ self.item.SetSelection(selection_state)
+ self.item.SetBracket(self.fwd_rect)
+ self.zoom_button.setDisabled(selected_subrange is None)
+ self.GraphTitleChanged(self.Title())
+ self.item.update(self.item.boundingRect())
+
+ def Back(self):
+ if not self.back_state:
+ return
+ self.PushForwardState()
+ self.PopBackState()
+ self.Update()
+
+ def Forward(self):
+ if not self.forward_state:
+ return
+ self.PushBackState()
+ self.PopForwardState()
+ self.Update()
+
+ def SelectEvent(self, x0, x1, selection_state):
+ if selection_state is None:
+ selected_subrange = None
+ else:
+ if x1 - x0 < 1.0:
+ x1 += 1.0
+ selected_subrange = Subrange(x0, x1)
+ self.selection_state = (selected_subrange, selection_state)
+ self.zoom_button.setDisabled(selected_subrange is None)
+
+ def Zoom(self):
+ selected_subrange, selection_state = self.selection_state
+ if selected_subrange is None:
+ return
+ self.fwd_rect = selection_state
+ self.item.SetSelection(None)
+ self.PushBackState()
+ self.attrs.subrange.x = selected_subrange
+ self.forward_state = []
+ self.forward_button.setDisabled(True)
+ self.selection_state = (None, None)
+ self.fwd_rect = None
+ self.attrs.scale.x = self.GetScaleForRangeX(self.attrs.subrange.x)
+ self.attrs.Update()
+ self.Update()
+
+# Slow initialization - perform non-GUI initialization in a separate thread and put up a modal message box while waiting
+
+class SlowInitClass():
+
+ def __init__(self, glb, title, init_fn):
+ self.init_fn = init_fn
+ self.done = False
+ self.result = None
+
+ self.msg_box = QMessageBox(glb.mainwindow)
+ self.msg_box.setText("Initializing " + title + ". Please wait.")
+ self.msg_box.setWindowTitle("Initializing " + title)
+ self.msg_box.setWindowIcon(glb.mainwindow.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+ self.init_thread = Thread(self.ThreadFn, glb)
+ self.init_thread.done.connect(lambda: self.Done(), Qt.QueuedConnection)
+
+ self.init_thread.start()
+
+ def Done(self):
+ self.msg_box.done(0)
+
+ def ThreadFn(self, glb):
+ conn_name = "SlowInitClass" + str(os.getpid())
+ db, dbname = glb.dbref.Open(conn_name)
+ self.result = self.init_fn(db)
+ self.done = True
+ return (True, 0)
+
+ def Result(self):
+ while not self.done:
+ self.msg_box.exec_()
+ self.init_thread.wait()
+ return self.result
+
+def SlowInit(glb, title, init_fn):
+ init = SlowInitClass(glb, title, init_fn)
+ return init.Result()
+
+# Time chart by CPU window
+
+class TimeChartByCPUWindow(QMdiSubWindow):
+
+ def __init__(self, glb, parent=None):
+ super(TimeChartByCPUWindow, self).__init__(parent)
+
+ self.glb = glb
+ self.machine_id = glb.HostMachineId()
+ self.collection_name = "SwitchGraphDataCollection " + str(self.machine_id)
+
+ collection = LookupModel(self.collection_name)
+ if collection is None:
+ collection = SlowInit(glb, "Time Chart", self.Init)
+
+ self.widget = SwitchGraphWidget(glb, collection, self)
+ self.view = self.widget
+
+ self.base_title = "Time Chart by CPU"
+ self.setWindowTitle(self.base_title + self.widget.Title())
+ self.widget.graph_title_changed.connect(self.GraphTitleChanged)
+
+ self.setWidget(self.widget)
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, self.windowTitle())
+
+ def Init(self, db):
+ return LookupCreateModel(self.collection_name, lambda : SwitchGraphDataCollection(self.glb, db, self.machine_id))
+
+ def GraphTitleChanged(self, title):
+ self.setWindowTitle(self.base_title + " : " + title)
+
# Child data item finder
class ChildDataItemFinder():
@@ -2058,10 +3429,8 @@ class SampleTimeRangesDataItem(LineEditDataItem):
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
- self.last_time = int(query.value(1))
- QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
- if query.next():
- self.first_time = int(query.value(0))
+ self.first_time = int(glb.HostStartTime())
+ self.last_time = int(glb.HostFinishTime())
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
@@ -2954,7 +4323,9 @@ p.c2 {
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
-<p class=c1><a href=#tables>2. Tables</a></p>
+<p class=c1><a href=#charts>2. Charts</a></p>
+<p class=c2><a href=#timechartbycpu>2.1 Time chart by CPU</a></p>
+<p class=c1><a href=#tables>3. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
@@ -3042,7 +4413,29 @@ N.B. Due to the granularity of timestamps, there could be no branches in any giv
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
-<h1 id=tables>2. Tables</h1>
+<h1 id=charts>2. Charts</h1>
+<h2 id=timechartbycpu>2.1 Time chart by CPU</h2>
+This chart displays context switch information when that data is available. Refer to context_switches_view on the Tables menu.
+<h3>Features</h3>
+<ol>
+<li>Mouse over to highight the task and show the time</li>
+<li>Drag the mouse to select a region and zoom by pushing the Zoom button</li>
+<li>Go back and forward by pressing the arrow buttons</li>
+<li>If call information is available, right-click to show a call tree opened to that task and time.
+Note, the call tree may take some time to appear, and there may not be call information for the task or time selected.
+</li>
+</ol>
+<h3>Important</h3>
+The graph can be misleading in the following respects:
+<ol>
+<li>The graph shows the first task on each CPU as running from the beginning of the time range.
+Because tracing might start on different CPUs at different times, that is not necessarily the case.
+Refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
+<li>Similarly, the last task on each CPU can be showing running longer than it really was.
+Again, refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
+<li>When the mouse is over a task, the highlighted task might not be visible on the legend without scrolling if the legend does not fit fully in the window</li>
+</ol>
+<h1 id=tables>3. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
@@ -3238,6 +4631,10 @@ class MainWindow(QMainWindow):
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
+ if IsSelectable(glb.db, "context_switches"):
+ charts_menu = menu.addMenu("&Charts")
+ charts_menu.addAction(CreateAction("&Time chart by CPU", "Create a new window displaying time charts by CPU", self.TimeChartByCPU, self))
+
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
@@ -3298,6 +4695,9 @@ class MainWindow(QMainWindow):
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self))
+ def TimeChartByCPU(self):
+ TimeChartByCPUWindow(self.glb, self)
+
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
@@ -3470,6 +4870,9 @@ class Glb():
self.have_disassembler = True
except:
self.have_disassembler = False
+ self.host_machine_id = 0
+ self.host_start_time = 0
+ self.host_finish_time = 0
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
@@ -3502,6 +4905,110 @@ class Glb():
except:
pass
+ def GetHostMachineId(self):
+ query = QSqlQuery(self.db)
+ QueryExec(query, "SELECT id FROM machines WHERE pid = -1")
+ if query.next():
+ self.host_machine_id = query.value(0)
+ else:
+ self.host_machine_id = 0
+ return self.host_machine_id
+
+ def HostMachineId(self):
+ if self.host_machine_id:
+ return self.host_machine_id
+ return self.GetHostMachineId()
+
+ def SelectValue(self, sql):
+ query = QSqlQuery(self.db)
+ try:
+ QueryExec(query, sql)
+ except:
+ return None
+ if query.next():
+ return Decimal(query.value(0))
+ return None
+
+ def SwitchesMinTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM context_switches"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id LIMIT 1")
+
+ def SwitchesMaxTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM context_switches"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id DESC LIMIT 1")
+
+ def SamplesMinTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM samples"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id LIMIT 1")
+
+ def SamplesMaxTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM samples"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id DESC LIMIT 1")
+
+ def CallsMinTime(self, machine_id):
+ return self.SelectValue("SELECT calls.call_time"
+ " FROM calls"
+ " INNER JOIN threads ON threads.thread_id = calls.thread_id"
+ " WHERE calls.call_time != 0 AND threads.machine_id = " + str(machine_id) +
+ " ORDER BY calls.id LIMIT 1")
+
+ def CallsMaxTime(self, machine_id):
+ return self.SelectValue("SELECT calls.return_time"
+ " FROM calls"
+ " INNER JOIN threads ON threads.thread_id = calls.thread_id"
+ " WHERE calls.return_time != 0 AND threads.machine_id = " + str(machine_id) +
+ " ORDER BY calls.return_time DESC LIMIT 1")
+
+ def GetStartTime(self, machine_id):
+ t0 = self.SwitchesMinTime(machine_id)
+ t1 = self.SamplesMinTime(machine_id)
+ t2 = self.CallsMinTime(machine_id)
+ if t0 is None or (not(t1 is None) and t1 < t0):
+ t0 = t1
+ if t0 is None or (not(t2 is None) and t2 < t0):
+ t0 = t2
+ return t0
+
+ def GetFinishTime(self, machine_id):
+ t0 = self.SwitchesMaxTime(machine_id)
+ t1 = self.SamplesMaxTime(machine_id)
+ t2 = self.CallsMaxTime(machine_id)
+ if t0 is None or (not(t1 is None) and t1 > t0):
+ t0 = t1
+ if t0 is None or (not(t2 is None) and t2 > t0):
+ t0 = t2
+ return t0
+
+ def HostStartTime(self):
+ if self.host_start_time:
+ return self.host_start_time
+ self.host_start_time = self.GetStartTime(self.HostMachineId())
+ return self.host_start_time
+
+ def HostFinishTime(self):
+ if self.host_finish_time:
+ return self.host_finish_time
+ self.host_finish_time = self.GetFinishTime(self.HostMachineId())
+ return self.host_finish_time
+
+ def StartTime(self, machine_id):
+ if machine_id == self.HostMachineId():
+ return self.HostStartTime()
+ return self.GetStartTime(machine_id)
+
+ def FinishTime(self, machine_id):
+ if machine_id == self.HostMachineId():
+ return self.HostFinishTime()
+ return self.GetFinishTime(machine_id)
+
# Database reference
class DBRef():
@@ -3509,6 +5016,12 @@ class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
+ self.TRUE = "TRUE"
+ self.FALSE = "FALSE"
+ # SQLite prior to version 3.23 does not support TRUE and FALSE
+ if self.is_sqlite3:
+ self.TRUE = "1"
+ self.FALSE = "0"
def Open(self, connection_name):
dbname = self.dbname
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index efd0157b9d22..645009c08b3c 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8
cpu=*
type=0|1
-size=112
+size=120
config=0
sample_period=*
sample_type=263
diff --git a/tools/perf/tests/attr/base-stat b/tools/perf/tests/attr/base-stat
index 4d0c2e42b64e..b0f42c34882e 100644
--- a/tools/perf/tests/attr/base-stat
+++ b/tools/perf/tests/attr/base-stat
@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8
cpu=*
type=0
-size=112
+size=120
config=0
sample_period=0
sample_type=65536
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 338cd9faa835..15cea518f5ad 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -13,6 +13,7 @@
#include "util/mmap.h"
#include <errno.h>
#include <linux/string.h>
+#include <perf/mmap.h>
#define NR_ITERS 111
@@ -37,8 +38,8 @@ static int count_samples(struct evlist *evlist, int *sample_count,
struct mmap *map = &evlist->overwrite_mmap[i];
union perf_event *event;
- perf_mmap__read_init(map);
- while ((event = perf_mmap__read_event(map)) != NULL) {
+ perf_mmap__read_init(&map->core);
+ while ((event = perf_mmap__read_event(&map->core)) != NULL) {
const u32 type = event->header.type;
switch (type) {
@@ -53,7 +54,7 @@ static int count_samples(struct evlist *evlist, int *sample_count,
return TEST_FAIL;
}
}
- perf_mmap__read_done(map);
+ perf_mmap__read_done(&map->core);
}
return TEST_OK;
}
@@ -147,6 +148,15 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m
goto out_delete_evlist;
}
+ evlist__close(evlist);
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
err = do_test(evlist, 1, &sample_count, &comm_count);
if (err != TEST_OK)
goto out_delete_evlist;
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index 016bba2c142d..d0b935356274 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -10,11 +10,7 @@
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
-#include <time.h>
#include <fcntl.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
#include "tests.h"
@@ -192,3 +188,19 @@ int test__bp_accounting(struct test *test __maybe_unused, int subtest __maybe_un
return bp_accounting(wp_cnt, share);
}
+
+bool test__bp_account_is_supported(void)
+{
+ /*
+ * PowerPC and S390 do not support creation of instruction
+ * breakpoints using the perf_event interface.
+ *
+ * Just disable the test for these architectures until these
+ * issues are resolved.
+ */
+#if defined(__powerpc__) || defined(__s390x__)
+ return false;
+#else
+ return true;
+#endif
+}
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index c1c2c13de254..415903b48578 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -49,14 +49,6 @@ asm (
"__test_function:\n"
"incq (%rdi)\n"
"ret\n");
-#elif defined (__aarch64__)
-extern void __test_function(volatile long *ptr);
-asm (
- ".globl __test_function\n"
- "__test_function:\n"
- "str x30, [x0]\n"
- "ret\n");
-
#else
static void __test_function(volatile long *ptr)
{
@@ -302,10 +294,15 @@ bool test__bp_signal_is_supported(void)
* stepping into the SIGIO handler and getting stuck on the
* breakpointed instruction.
*
+ * Since arm64 has the same issue with arm for the single-step
+ * handling, this case also gets stuck on the breakpointed
+ * instruction.
+ *
* Just disable the test for these architectures until these
* issues are resolved.
*/
-#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || \
+ defined(__aarch64__)
return false;
#else
return true;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 1eb0bffaed6c..5d20bf8397f0 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <api/fs/fs.h>
#include <bpf/bpf.h>
+#include <perf/mmap.h>
#include "tests.h"
#include "llvm.h"
#include "debug.h"
@@ -184,16 +185,16 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
struct mmap *md;
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
if (type == PERF_RECORD_SAMPLE)
count ++;
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
if (count != expect) {
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 55774baffc2a..8b286e9b7549 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -121,7 +121,7 @@ static struct test generic_tests[] = {
{
.desc = "Breakpoint accounting",
.func = test__bp_accounting,
- .is_supported = test__bp_signal_is_supported,
+ .is_supported = test__bp_account_is_supported,
},
{
.desc = "Watchpoint",
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index f5764a3890b9..1f017e1b2a55 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -10,6 +10,7 @@
#include <sys/param.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "dso.h"
@@ -425,16 +426,16 @@ static int process_events(struct machine *machine, struct evlist *evlist,
for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
ret = process_event(machine, evlist, event, state);
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (ret < 0)
return ret;
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
return 0;
}
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 4f4ecbcbe87e..779ce280a0e9 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -59,7 +59,7 @@ int test_dwarf_unwind__krava_1(struct thread *thread);
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
unsigned long *cnt = (unsigned long *) arg;
- char *symbol = entry->sym ? entry->sym->name : NULL;
+ char *symbol = entry->ms.sym ? entry->ms.sym->name : NULL;
static const char *funcs[MAX_STACK] = {
"test__arch_unwind_sample",
"test_dwarf_unwind__thread",
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 92c7d591bcac..50a0c9fcde7d 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -5,6 +5,7 @@
#include <sys/prctl.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
@@ -38,17 +39,17 @@ static int find_comm(struct evlist *evlist, const char *comm)
found = 0;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
if (event->header.type == PERF_RECORD_COMM &&
(pid_t)event->comm.pid == getpid() &&
(pid_t)event->comm.tid == getpid() &&
strcmp(event->comm.comm, comm) == 0)
found += 1;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
return found;
}
diff --git a/tools/perf/tests/map_groups.c b/tools/perf/tests/map_groups.c
index 594fdaca4f71..6b9f1cdcbe5b 100644
--- a/tools/perf/tests/map_groups.c
+++ b/tools/perf/tests/map_groups.c
@@ -18,17 +18,16 @@ static int check_maps(struct map_def *merged, unsigned int size, struct map_grou
struct map *map;
unsigned int i = 0;
- map = map_groups__first(mg);
- while (map) {
+ map_groups__for_each_entry(mg, map) {
+ if (i > 0)
+ TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
+
TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start);
TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end);
TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name));
- TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 2);
+ TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 1);
i++;
- map = map_groups__next(map);
-
- TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
}
return TEST_OK;
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 3a22dce991ba..5f4c0dbb4715 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
/*
* This test will generate random numbers of calls to some getpid syscalls,
@@ -113,10 +114,10 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
}
md = &evlist->mmap[0];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE) {
@@ -139,9 +140,9 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
goto out_delete_evlist;
}
nr_events[evsel->idx]++;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
out_init:
err = 0;
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 2b5c46813053..c6b2d7aab608 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -13,6 +13,7 @@
#include "debug.h"
#include "util/mmap.h"
#include <errno.h>
+#include <perf/mmap.h>
#ifndef O_DIRECTORY
#define O_DIRECTORY 00200000
@@ -92,10 +93,10 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
struct mmap *md;
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
int tp_flags;
struct perf_sample sample;
@@ -103,7 +104,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
++nr_events;
if (type != PERF_RECORD_SAMPLE) {
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
continue;
}
@@ -123,7 +124,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_ok;
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
if (nr_events == before)
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 25e0ed2eedfc..091c3aeccc27 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1768,10 +1768,11 @@ static struct terms_test test__terms[] = {
static int test_event(struct evlist_test *e)
{
- struct parse_events_error err = { .idx = 0, };
+ struct parse_events_error err;
struct evlist *evlist;
int ret;
+ bzero(&err, sizeof(err));
if (e->valid && !e->valid()) {
pr_debug("... SKIP");
return 0;
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 437426be29e9..2195fc205e72 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -6,6 +6,7 @@
#include <pthread.h>
#include <sched.h>
+#include <perf/mmap.h>
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
@@ -170,10 +171,10 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
struct mmap *md;
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
const char *name = perf_event__name(type);
@@ -276,9 +277,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
++errs;
}
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
/*
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 3a02426db9a6..2762e1155238 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -150,6 +150,15 @@ static bool samples_same(const struct perf_sample *s1,
if (type & PERF_SAMPLE_PHYS_ADDR)
COMP(phys_addr);
+ if (type & PERF_SAMPLE_AUX) {
+ COMP(aux_sample.size);
+ if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
+ s1->aux_sample.size)) {
+ pr_debug("Samples differ at 'aux_sample'\n");
+ return false;
+ }
+ }
+
return true;
}
@@ -182,6 +191,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
u64 regs[64];
const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
+ const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
struct perf_sample sample = {
.ip = 101,
.pid = 102,
@@ -218,6 +228,10 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
.regs = regs,
},
.phys_addr = 113,
+ .aux_sample = {
+ .size = sizeof(aux_data),
+ .data = (void *)aux_data,
+ },
};
struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
struct perf_sample sample_out;
@@ -317,7 +331,7 @@ int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_u
* were added. Please actually update the test rather than just change
* the condition below.
*/
- if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
+ if (PERF_SAMPLE_MAX > PERF_SAMPLE_AUX << 1) {
pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
return -1;
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 84519df87f30..bfb9986093d8 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -15,6 +15,7 @@
#include "util/mmap.h"
#include "util/thread_map.h"
#include <perf/evlist.h>
+#include <perf/mmap.h>
#define NR_LOOPS 10000000
@@ -99,10 +100,10 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
evlist__disable(evlist);
md = &evlist->mmap[0];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE)
@@ -117,9 +118,9 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
total_periods += sample.period;
nr_samples++;
next_event:
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
out_init:
if ((u64) nr_samples == total_periods) {
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index ffa592e0020e..fcb0d03dba4e 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -8,6 +8,7 @@
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
@@ -269,17 +270,17 @@ static int process_events(struct evlist *evlist,
for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
continue;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
cnt += 1;
ret = add_event(evlist, &events, event);
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (ret < 0)
goto out_free_nodes;
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
}
events_array = calloc(cnt, sizeof(struct event_node));
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index bce3a4cb4c89..adaff9044331 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
static int exited;
static int nr_exit;
@@ -53,6 +54,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
struct mmap *md;
+ int retry_count = 0;
signal(SIGCHLD, sig_handler);
@@ -110,6 +112,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = -1;
goto out_delete_evlist;
}
@@ -117,20 +120,27 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
retry:
md = &evlist->mmap[0];
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
- while ((event = perf_mmap__read_event(md)) != NULL) {
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
- perf_mmap__read_done(md);
+ perf_mmap__read_done(&md->core);
out_init:
if (!exited || !nr_exit) {
evlist__poll(evlist, -1);
+
+ if (retry_count++ > 1000) {
+ pr_debug("Failed after retrying 1000 times\n");
+ err = -1;
+ goto out_free_maps;
+ }
+
goto retry;
}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 72912eb473cb..9837b6e93023 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -111,6 +111,7 @@ int test__map_groups__merge_in(struct test *t, int subtest);
int test__time_utils(struct test *t, int subtest);
bool test__bp_signal_is_supported(void);
+bool test__bp_account_is_supported(void);
bool test__wp_is_supported(void);
#if defined(__arm__) || defined(__aarch64__)
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index aa296ffea6d1..ff649078da9a 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -182,7 +182,7 @@ next_pair:
header_printed = false;
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(maps, map) {
struct map *
/*
* If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
@@ -207,7 +207,7 @@ next_pair:
header_printed = false;
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(maps, map) {
struct map *pair;
mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
@@ -237,7 +237,7 @@ next_pair:
maps = machine__kernel_maps(&kallsyms);
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(maps, map) {
if (!map->priv) {
if (!header_printed) {
pr_info("WARN: Maps only in kallsyms:\n");
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index afa75a76f6b8..433dc39053a7 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -17,3 +17,4 @@ perf-y += sockaddr.o
perf-y += socket.o
perf-y += statx.o
perf-y += sync_file_range.o
+perf-y += tracepoints/
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 7e06605f7c76..5a61043c2ff7 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -5,9 +5,10 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <sys/types.h>
+#include <stdbool.h>
struct strarray {
- int offset;
+ u64 offset;
int nr_entries;
const char *prefix;
const char **entries;
@@ -27,8 +28,12 @@ struct strarray {
}
size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val);
+size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val);
size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, bool show_prefix, unsigned long flags);
+bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret);
+bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret);
+
struct trace;
struct thread;
@@ -51,6 +56,8 @@ struct strarrays {
size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val);
+bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret);
+
size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size);
extern struct strarray strarray__socket_families;
@@ -78,8 +85,11 @@ struct augmented_arg {
u64 value[];
};
+struct syscall_arg_fmt;
+
/**
* @val: value of syscall argument being formatted
+ * @len: for tracepoint dynamic arrays, if fmt->nr_entries == 0, then its not a fixed array, look at arg->len
* @args: All the args, use syscall_args__val(arg, nth) to access one
* @augmented_args: Extra data that can be collected, for instance, with eBPF for expanding the pathname for open, etc
* @augmented_args_size: augmented_args total payload size
@@ -94,6 +104,7 @@ struct augmented_arg {
struct syscall_arg {
unsigned long val;
unsigned char *args;
+ struct syscall_arg_fmt *fmt;
struct {
struct augmented_arg *args;
int size;
@@ -101,6 +112,7 @@ struct syscall_arg {
struct thread *thread;
struct trace *trace;
void *parm;
+ u16 len;
u8 idx;
u8 mask;
bool show_string_prefix;
@@ -111,6 +123,27 @@ unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx);
size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STRARRAY_FLAGS syscall_arg__scnprintf_strarray_flags
+bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret);
+#define STUL_STRARRAY syscall_arg__strtoul_strarray
+
+bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret);
+#define STUL_STRARRAY_FLAGS syscall_arg__strtoul_strarray_flags
+
+bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret);
+#define STUL_STRARRAYS syscall_arg__strtoul_strarrays
+
+size_t syscall_arg__scnprintf_x86_irq_vectors(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_X86_IRQ_VECTORS syscall_arg__scnprintf_x86_irq_vectors
+
+bool syscall_arg__strtoul_x86_irq_vectors(char *bf, size_t size, struct syscall_arg *arg, u64 *ret);
+#define STUL_X86_IRQ_VECTORS syscall_arg__strtoul_x86_irq_vectors
+
+size_t syscall_arg__scnprintf_x86_MSR(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_X86_MSR syscall_arg__scnprintf_x86_MSR
+
+bool syscall_arg__strtoul_x86_MSR(char *bf, size_t size, struct syscall_arg *arg, u64 *ret);
+#define STUL_X86_MSR syscall_arg__strtoul_x86_MSR
+
size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STRARRAYS syscall_arg__scnprintf_strarrays
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 859a8a9db2c6..9fa771a90d79 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -33,11 +33,11 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
-static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
-{
#include "trace/beauty/generated/mmap_flags_array.c"
static DEFINE_STRARRAY(mmap_flags, "MAP_");
+static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
+{
return strarray__scnprintf_flags(&strarray__mmap_flags, bf, size, show_prefix, flags);
}
diff --git a/tools/perf/trace/beauty/tracepoints/Build b/tools/perf/trace/beauty/tracepoints/Build
new file mode 100644
index 000000000000..e35087fdd108
--- /dev/null
+++ b/tools/perf/trace/beauty/tracepoints/Build
@@ -0,0 +1,2 @@
+perf-y += x86_irq_vectors.o
+perf-y += x86_msr.o
diff --git a/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.c b/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.c
new file mode 100644
index 000000000000..8eb9bc8534ac
--- /dev/null
+++ b/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * trace/beauty/x86_irq_vectors.c
+ *
+ * Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+
+#include "trace/beauty/generated/x86_arch_irq_vectors_array.c"
+
+static DEFINE_STRARRAY(x86_irq_vectors, "_VECTOR");
+
+static size_t x86_irq_vectors__scnprintf(unsigned long vector, char *bf, size_t size, bool show_prefix)
+{
+ return strarray__scnprintf_suffix(&strarray__x86_irq_vectors, bf, size, "%#x", show_prefix, vector);
+}
+
+size_t syscall_arg__scnprintf_x86_irq_vectors(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long vector = arg->val;
+
+ return x86_irq_vectors__scnprintf(vector, bf, size, arg->show_string_prefix);
+}
+
+bool syscall_arg__strtoul_x86_irq_vectors(char *bf, size_t size, struct syscall_arg *arg __maybe_unused, u64 *ret)
+{
+ return strarray__strtoul(&strarray__x86_irq_vectors, bf, size, ret);
+}
diff --git a/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh b/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh
new file mode 100755
index 000000000000..f920003723b3
--- /dev/null
+++ b/tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+# (C) 2019, Arnaldo Carvalho de Melo <acme@redhat.com>
+
+if [ $# -ne 1 ] ; then
+ arch_x86_header_dir=tools/arch/x86/include/asm/
+else
+ arch_x86_header_dir=$1
+fi
+
+x86_irq_vectors=${arch_x86_header_dir}/irq_vectors.h
+
+# FIRST_EXTERNAL_VECTOR is not that useful, find what is its number
+# and then replace whatever is using it and that is useful, which at
+# the time of writing of this script was: IRQ_MOVE_CLEANUP_VECTOR.
+
+first_external_regex='^#define[[:space:]]+FIRST_EXTERNAL_VECTOR[[:space:]]+(0x[[:xdigit:]]+)$'
+first_external_vector=$(egrep ${first_external_regex} ${x86_irq_vectors} | sed -r "s/${first_external_regex}/\1/g")
+
+printf "static const char *x86_irq_vectors[] = {\n"
+regex='^#define[[:space:]]+([[:alnum:]_]+)_VECTOR[[:space:]]+(0x[[:xdigit:]]+)$'
+sed -r "s/FIRST_EXTERNAL_VECTOR/${first_external_vector}/g" ${x86_irq_vectors} | \
+egrep ${regex} | \
+ sed -r "s/${regex}/\2 \1/g" | sort -n | \
+ xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n\n"
+
diff --git a/tools/perf/trace/beauty/tracepoints/x86_msr.c b/tools/perf/trace/beauty/tracepoints/x86_msr.c
new file mode 100644
index 000000000000..6b8f129579d6
--- /dev/null
+++ b/tools/perf/trace/beauty/tracepoints/x86_msr.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * trace/beauty/x86_msr.c
+ *
+ * Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+
+#include "trace/beauty/generated/x86_arch_MSRs_array.c"
+
+static DEFINE_STRARRAY(x86_MSRs, "MSR_");
+static DEFINE_STRARRAY_OFFSET(x86_64_specific_MSRs, "MSR_", x86_64_specific_MSRs_offset);
+static DEFINE_STRARRAY_OFFSET(x86_AMD_V_KVM_MSRs, "MSR_", x86_AMD_V_KVM_MSRs_offset);
+
+static struct strarray *x86_MSRs_tables[] = {
+ &strarray__x86_MSRs,
+ &strarray__x86_64_specific_MSRs,
+ &strarray__x86_AMD_V_KVM_MSRs,
+};
+
+static DEFINE_STRARRAYS(x86_MSRs_tables);
+
+static size_t x86_MSR__scnprintf(unsigned long msr, char *bf, size_t size, bool show_prefix)
+{
+ return strarrays__scnprintf(&strarrays__x86_MSRs_tables, bf, size, "%#x", show_prefix, msr);
+}
+
+size_t syscall_arg__scnprintf_x86_MSR(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long flags = arg->val;
+
+ return x86_MSR__scnprintf(flags, bf, size, arg->show_string_prefix);
+}
+
+bool syscall_arg__strtoul_x86_MSR(char *bf, size_t size, struct syscall_arg *arg __maybe_unused, u64 *ret)
+{
+ return strarrays__strtoul(&strarrays__x86_MSRs_tables, bf, size, ret);
+}
diff --git a/tools/perf/trace/beauty/tracepoints/x86_msr.sh b/tools/perf/trace/beauty/tracepoints/x86_msr.sh
new file mode 100755
index 000000000000..831c02cf0586
--- /dev/null
+++ b/tools/perf/trace/beauty/tracepoints/x86_msr.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+if [ $# -ne 1 ] ; then
+ arch_x86_header_dir=tools/arch/x86/include/asm/
+else
+ arch_x86_header_dir=$1
+fi
+
+x86_msr_index=${arch_x86_header_dir}/msr-index.h
+
+# Support all later, with some hash table, for now chop off
+# Just the ones starting with 0x00000 so as to have a simple
+# array.
+
+printf "static const char *x86_MSRs[] = {\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0x00000[[:xdigit:]]+)[[:space:]]*.*'
+egrep $regex ${x86_msr_index} | egrep -v 'MSR_(ATOM|P[46]|AMD64|IA32_TSCDEADLINE|IDT_FCR4)' | \
+ sed -r "s/$regex/\2 \1/g" | sort -n | \
+ xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n\n"
+
+# Remove MSR_K6_WHCR, clashes with MSR_LSTAR
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0xc0000[[:xdigit:]]+)[[:space:]]*.*'
+printf "#define x86_64_specific_MSRs_offset "
+egrep $regex ${x86_msr_index} | sed -r "s/$regex/\2/g" | sort -n | head -1
+printf "static const char *x86_64_specific_MSRs[] = {\n"
+egrep $regex ${x86_msr_index} | \
+ sed -r "s/$regex/\2 \1/g" | egrep -vw 'K6_WHCR' | sort -n | \
+ xargs printf "\t[%s - x86_64_specific_MSRs_offset] = \"%s\",\n"
+printf "};\n\n"
+
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0xc0010[[:xdigit:]]+)[[:space:]]*.*'
+printf "#define x86_AMD_V_KVM_MSRs_offset "
+egrep $regex ${x86_msr_index} | sed -r "s/$regex/\2/g" | sort -n | head -1
+printf "static const char *x86_AMD_V_KVM_MSRs[] = {\n"
+egrep $regex ${x86_msr_index} | \
+ sed -r "s/$regex/\2 \1/g" | sort -n | \
+ xargs printf "\t[%s - x86_AMD_V_KVM_MSRs_offset] = \"%s\",\n"
+printf "};\n"
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 82207db8f97c..992705c78bd0 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -410,7 +410,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct evsel *evsel,
struct hist_browser_timer *hbt)
{
- struct map_symbol *ms = browser->b.priv;
+ struct map_symbol *ms = browser->b.priv, target_ms;
struct disasm_line *dl = disasm_line(browser->selection);
struct annotation *notes;
char title[SYM_TITLE_MAX_SIZE];
@@ -430,8 +430,11 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
return true;
}
+ target_ms.mg = ms->mg;
+ target_ms.map = ms->map;
+ target_ms.sym = dl->ops.target.sym;
pthread_mutex_unlock(&notes->lock);
- symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt, browser->opts);
+ symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts);
sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type);
ui_browser__show_title(&browser->b, title);
return true;
@@ -874,7 +877,7 @@ int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *opts)
{
- return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt, opts);
+ return symbol__tui_annotate(ms, evsel, hbt, opts);
}
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
@@ -888,16 +891,12 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
}
-int symbol__tui_annotate(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *opts)
{
+ struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct map_symbol ms = {
- .map = map,
- .sym = sym,
- };
struct annotate_browser browser = {
.b = {
.refresh = annotate_browser__refresh,
@@ -905,7 +904,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
.write = annotate_browser__write,
.filter = disasm_line__filter,
.extra_title_lines = 1, /* for hists__scnprintf_title() */
- .priv = &ms,
+ .priv = ms,
.use_navkeypressed = true,
},
.opts = opts,
@@ -915,13 +914,13 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
if (sym == NULL)
return -1;
- if (map->dso->annotate_warned)
+ if (ms->map->dso->annotate_warned)
return -1;
- err = symbol__annotate2(sym, map, evsel, opts, &browser.arch);
+ err = symbol__annotate2(ms, evsel, opts, &browser.arch);
if (err) {
char msg[BUFSIZ];
- symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
+ symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
goto out_free_offsets;
}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 7a7187e069b4..d4d3558fdef4 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -26,6 +26,7 @@
#include "../../util/sort.h"
#include "../../util/top.h"
#include "../../util/thread.h"
+#include "../../util/block-info.h"
#include "../../arch/common.h"
#include "../../perf.h"
@@ -1783,7 +1784,11 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
continue;
}
- percent = hist_entry__get_percent_limit(h);
+ if (symbol_conf.report_individual_block)
+ percent = block_info__total_cycles_percent(h);
+ else
+ percent = hist_entry__get_percent_limit(h);
+
if (percent < hb->min_pcnt)
continue;
@@ -2380,7 +2385,11 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
if (!notes->src)
return 0;
- evsel = hists_to_evsel(browser->hists);
+ if (browser->block_evsel)
+ evsel = browser->block_evsel;
+ else
+ evsel = hists_to_evsel(browser->hists);
+
err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
browser->annotation_opts);
he = hist_browser__selected_entry(browser);
@@ -2400,16 +2409,15 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
static int
add_annotate_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
- struct map *map, struct symbol *sym)
+ struct map_symbol *ms)
{
- if (sym == NULL || map->dso->annotate_warned)
+ if (ms->sym == NULL || ms->map->dso->annotate_warned)
return 0;
- if (asprintf(optstr, "Annotate %s", sym->name) < 0)
+ if (asprintf(optstr, "Annotate %s", ms->sym->name) < 0)
return 0;
- act->ms.map = map;
- act->ms.sym = sym;
+ act->ms = *ms;
act->fn = do_annotate;
return 1;
}
@@ -3110,20 +3118,17 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
- bi->from.map,
- bi->from.sym);
- if (bi->to.sym != bi->from.sym)
+ &bi->from.ms);
+ if (bi->to.ms.sym != bi->from.ms.sym)
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
- bi->to.map,
- bi->to.sym);
+ &bi->to.ms);
} else {
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
- browser->selection->map,
- browser->selection->sym);
+ browser->selection);
}
skip_annotation:
nr_options += add_thread_opt(browser, &actions[nr_options],
@@ -3443,3 +3448,75 @@ single_entry:
warn_lost_event,
annotation_opts);
}
+
+static int block_hists_browser__title(struct hist_browser *browser, char *bf,
+ size_t size)
+{
+ struct hists *hists = evsel__hists(browser->block_evsel);
+ const char *evname = perf_evsel__name(browser->block_evsel);
+ unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ int ret;
+
+ ret = scnprintf(bf, size, "# Samples: %lu", nr_samples);
+ if (evname)
+ scnprintf(bf + ret, size - ret, " of event '%s'", evname);
+
+ return 0;
+}
+
+int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
+ float min_percent, struct perf_env *env,
+ struct annotation_options *annotation_opts)
+{
+ struct hists *hists = &bh->block_hists;
+ struct hist_browser *browser;
+ int key = -1;
+ struct popup_action action;
+ static const char help[] =
+ " q Quit \n";
+
+ browser = hist_browser__new(hists);
+ if (!browser)
+ return -1;
+
+ browser->block_evsel = evsel;
+ browser->title = block_hists_browser__title;
+ browser->min_pcnt = min_percent;
+ browser->env = env;
+ browser->annotation_opts = annotation_opts;
+
+ /* reset abort key so that it can get Ctrl-C as a key */
+ SLang_reset_tty();
+ SLang_init_tty(0, 0, 0);
+
+ memset(&action, 0, sizeof(action));
+
+ while (1) {
+ key = hist_browser__run(browser, "? - help", true);
+
+ switch (key) {
+ case 'q':
+ goto out;
+ case '?':
+ ui_browser__help_window(&browser->b, help);
+ break;
+ case 'a':
+ case K_ENTER:
+ if (!browser->selection ||
+ !browser->selection->sym) {
+ continue;
+ }
+
+ action.ms.map = browser->selection->map;
+ action.ms.sym = browser->selection->sym;
+ do_annotate(browser, &action);
+ continue;
+ default:
+ break;
+ }
+ }
+
+out:
+ hist_browser__delete(browser);
+ return 0;
+}
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index 91d3e18b50aa..078f2f2c7abd 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -5,6 +5,7 @@
#include "ui/browser.h"
struct annotation_options;
+struct evsel;
struct hist_browser {
struct ui_browser b;
@@ -15,6 +16,7 @@ struct hist_browser {
struct pstack *pstack;
struct perf_env *env;
struct annotation_options *annotation_opts;
+ struct evsel *block_evsel;
int print_seq;
bool show_dso;
bool show_headers;
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 8e744af24f7c..22cc240f7371 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -54,10 +54,10 @@ static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
return ret;
}
-static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
- struct map *map, struct disasm_line *dl)
+static int perf_gtk__get_offset(char *buf, size_t size, struct map_symbol *ms,
+ struct disasm_line *dl)
{
- u64 start = map__rip_2objdump(map, sym->start);
+ u64 start = map__rip_2objdump(ms->map, ms->sym->start);
strcpy(buf, "");
@@ -91,10 +91,11 @@ static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
return ret;
}
-static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
- struct map *map, struct evsel *evsel,
+static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
+ struct evsel *evsel,
struct hist_browser_timer *hbt __maybe_unused)
{
+ struct symbol *sym = ms->sym;
struct disasm_line *pos, *n;
struct annotation *notes;
GType col_types[MAX_ANN_COLS];
@@ -144,7 +145,7 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
if (ret)
gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
- if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos))
+ if (perf_gtk__get_offset(s, sizeof(s), ms, pos))
gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
if (perf_gtk__get_line(s, sizeof(s), pos))
gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
@@ -160,23 +161,23 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
return 0;
}
-static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
struct hist_browser_timer *hbt)
{
+ struct symbol *sym = ms->sym;
GtkWidget *window;
GtkWidget *notebook;
GtkWidget *scrolled_window;
GtkWidget *tab_label;
int err;
- if (map->dso->annotate_warned)
+ if (ms->map->dso->annotate_warned)
return -1;
- err = symbol__annotate(sym, map, evsel, 0, &annotation__default_options, NULL);
+ err = symbol__annotate(ms, evsel, 0, &annotation__default_options, NULL);
if (err) {
char msg[BUFSIZ];
- symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
+ symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s: %s\n", sym->name, msg);
return -1;
}
@@ -234,7 +235,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window,
tab_label);
- perf_gtk__annotate_symbol(scrolled_window, sym, map, evsel, hbt);
+ perf_gtk__annotate_symbol(scrolled_window, ms, evsel, hbt);
return 0;
}
@@ -242,7 +243,7 @@ int hist_entry__gtk_annotate(struct hist_entry *he,
struct evsel *evsel,
struct hist_browser_timer *hbt)
{
- return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt);
+ return symbol__gtk_annotate(&he->ms, evsel, hbt);
}
void perf_gtk__show_annotations(void)
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 5365606e9dad..132056c7d5b7 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -15,6 +15,7 @@
#include "../../util/srcline.h"
#include "../../util/string2.h"
#include "../../util/thread.h"
+#include "../../util/block-info.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
@@ -558,6 +559,25 @@ static int hist_entry__block_fprintf(struct hist_entry *he,
return ret;
}
+static int hist_entry__individual_block_fprintf(struct hist_entry *he,
+ char *bf, size_t size,
+ FILE *fp)
+{
+ int ret = 0;
+
+ struct perf_hpp hpp = {
+ .buf = bf,
+ .size = size,
+ .skip = false,
+ };
+
+ hist_entry__snprintf(he, &hpp);
+ if (!hpp.skip)
+ ret += fprintf(fp, "%s\n", bf);
+
+ return ret;
+}
+
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
char *bf, size_t bfsz, FILE *fp,
bool ignore_callchains)
@@ -580,6 +600,9 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
if (symbol_conf.report_block)
return hist_entry__block_fprintf(he, bf, size, fp);
+ if (symbol_conf.report_individual_block)
+ return hist_entry__individual_block_fprintf(he, bf, size, fp);
+
hist_entry__snprintf(he, &hpp);
ret = fprintf(fp, "%s\n", bf);
@@ -834,7 +857,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
if (h->filtered)
continue;
- percent = hist_entry__get_percent_limit(h);
+ if (symbol_conf.report_individual_block)
+ percent = block_info__total_cycles_percent(h);
+ else
+ percent = hist_entry__get_percent_limit(h);
+
if (percent < min_pcnt)
continue;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 8dcfca1a882f..b8e05a147b2b 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,4 +1,5 @@
perf-y += annotate.o
+perf-y += block-info.o
perf-y += block-range.o
perf-y += build-id.o
perf-y += cacheline.o
@@ -95,6 +96,7 @@ perf-y += cloexec.o
perf-y += call-path.o
perf-y += rwsem.o
perf-y += thread-stack.o
+perf-y += spark.o
perf-$(CONFIG_AUXTRACE) += auxtrace.o
perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
perf-$(CONFIG_AUXTRACE) += intel-pt.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e42bf572358c..5ea9a4534848 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -43,6 +43,7 @@
#include <linux/string.h>
#include <bpf/libbpf.h>
#include <subcmd/parse-options.h>
+#include <subcmd/run-command.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@@ -242,7 +243,7 @@ static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_s
char *endptr, *tok, *name;
struct map *map = ms->map;
struct addr_map_symbol target = {
- .map = map,
+ .ms = { .map = map, },
};
ops->target.addr = strtoull(ops->raw, &endptr, 16);
@@ -270,9 +271,9 @@ static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_s
find_target:
target.addr = map__objdump_2mem(map, ops->target.addr);
- if (map_groups__find_ams(&target) == 0 &&
- map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
- ops->target.sym = target.sym;
+ if (map_groups__find_ams(ms->mg, &target) == 0 &&
+ map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
+ ops->target.sym = target.ms.sym;
return 0;
@@ -331,7 +332,7 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
struct map *map = ms->map;
struct symbol *sym = ms->sym;
struct addr_map_symbol target = {
- .map = map,
+ .ms = { .map = map, },
};
const char *c = strchr(ops->raw, ',');
u64 start, end;
@@ -390,9 +391,9 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
* Actual navigation will come next, with further understanding of how
* the symbol searching and disassembly should be done.
*/
- if (map_groups__find_ams(&target) == 0 &&
- map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
- ops->target.sym = target.sym;
+ if (map_groups__find_ams(ms->mg, &target) == 0 &&
+ map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
+ ops->target.sym = target.ms.sym;
if (!ops->target.outside) {
ops->target.offset = target.addr - start;
@@ -853,6 +854,10 @@ static int __symbol__account_cycles(struct cyc_hist *ch,
ch[offset].start < start)
return 0;
}
+
+ if (ch[offset].num < NUM_SPARKS)
+ ch[offset].cycles_spark[ch[offset].num] = cycles;
+
ch[offset].have_start = have_start;
ch[offset].start = start;
ch[offset].cycles += cycles;
@@ -860,14 +865,15 @@ static int __symbol__account_cycles(struct cyc_hist *ch,
return 0;
}
-static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+static int __symbol__inc_addr_samples(struct map_symbol *ms,
struct annotated_source *src, int evidx, u64 addr,
struct perf_sample *sample)
{
+ struct symbol *sym = ms->sym;
unsigned offset;
struct sym_hist *h;
- pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
+ pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, ms->map->unmap_ip(ms->map, addr));
if ((addr < sym->start || addr >= sym->end) &&
(addr != sym->end || sym->start != sym->end)) {
@@ -934,17 +940,17 @@ alloc_histograms:
return notes->src;
}
-static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+static int symbol__inc_addr_samples(struct map_symbol *ms,
struct evsel *evsel, u64 addr,
struct perf_sample *sample)
{
+ struct symbol *sym = ms->sym;
struct annotated_source *src;
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->core.nr_entries);
- return (src) ? __symbol__inc_addr_samples(sym, map, src, evsel->idx,
- addr, sample) : 0;
+ return src ? __symbol__inc_addr_samples(ms, src, evsel->idx, addr, sample) : 0;
}
static int symbol__account_cycles(u64 addr, u64 start,
@@ -992,17 +998,17 @@ int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
* it starts on the function start.
*/
if (start &&
- (start->sym == ams->sym ||
- (ams->sym &&
- start->addr == ams->sym->start + ams->map->start)))
+ (start->ms.sym == ams->ms.sym ||
+ (ams->ms.sym &&
+ start->addr == ams->ms.sym->start + ams->ms.map->start)))
saddr = start->al_addr;
if (saddr == 0)
pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
ams->addr,
start ? start->addr : 0,
- ams->sym ? ams->sym->start + ams->map->start : 0,
+ ams->ms.sym ? ams->ms.sym->start + ams->ms.map->start : 0,
saddr);
- err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
+ err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
if (err)
pr_debug2("account_cycles failed %d\n", err);
return err;
@@ -1088,13 +1094,13 @@ void annotation__compute_ipc(struct annotation *notes, size_t size)
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
struct evsel *evsel)
{
- return symbol__inc_addr_samples(ams->sym, ams->map, evsel, ams->al_addr, sample);
+ return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
}
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
struct evsel *evsel, u64 ip)
{
- return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evsel, ip, sample);
+ return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
}
static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
@@ -1485,44 +1491,26 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
-static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
+static int symbol__parse_objdump_line(struct symbol *sym,
struct annotate_args *args,
- int *line_nr)
+ char *parsed_line, int *line_nr)
{
struct map *map = args->ms.map;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
- char *line = NULL, *parsed_line, *tmp, *tmp2;
- size_t line_len;
+ char *tmp;
s64 line_ip, offset = -1;
regmatch_t match[2];
- if (getline(&line, &line_len, file) < 0)
- return -1;
-
- if (!line)
- return -1;
-
- line_ip = -1;
- parsed_line = strim(line);
-
/* /filename:linenr ? Save line number and ignore. */
if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
*line_nr = atoi(parsed_line + match[1].rm_so);
return 0;
}
- tmp = skip_spaces(parsed_line);
- if (*tmp) {
- /*
- * Parse hexa addresses followed by ':'
- */
- line_ip = strtoull(tmp, &tmp2, 16);
- if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
- line_ip = -1;
- }
-
- if (line_ip != -1) {
+ /* Process hex address followed by ':'. */
+ line_ip = strtoull(parsed_line, &tmp, 16);
+ if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') {
u64 start = map__rip_2objdump(map, sym->start),
end = map__rip_2objdump(map, sym->end);
@@ -1530,7 +1518,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
if ((u64)line_ip < start || (u64)line_ip >= end)
offset = -1;
else
- parsed_line = tmp2 + 1;
+ parsed_line = tmp + 1;
}
args->offset = offset;
@@ -1539,7 +1527,6 @@ static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
args->ms.sym = sym;
dl = disasm_line__new(args);
- free(line);
(*line_nr)++;
if (dl == NULL)
@@ -1554,13 +1541,13 @@ static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
/* kcore has no symbols, so add the call target symbol */
if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
struct addr_map_symbol target = {
- .map = map,
.addr = dl->ops.target.addr,
+ .ms = { .map = map, },
};
- if (!map_groups__find_ams(&target) &&
- target.sym->start == target.al_addr)
- dl->ops.target.sym = target.sym;
+ if (!map_groups__find_ams(args->ms.mg, &target) &&
+ target.ms.sym->start == target.al_addr)
+ dl->ops.target.sym = target.ms.sym;
}
annotation_line__add(&dl->al, &notes->src->source);
@@ -1597,10 +1584,9 @@ static void delete_last_nop(struct symbol *sym)
}
}
-int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
- int errnum, char *buf, size_t buflen)
+int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen)
{
- struct dso *dso = map->dso;
+ struct dso *dso = ms->map->dso;
BUG_ON(buflen == 0);
@@ -1857,6 +1843,67 @@ static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
}
#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+/*
+ * Possibly create a new version of line with tabs expanded. Returns the
+ * existing or new line, storage is updated if a new line is allocated. If
+ * allocation fails then NULL is returned.
+ */
+static char *expand_tabs(char *line, char **storage, size_t *storage_len)
+{
+ size_t i, src, dst, len, new_storage_len, num_tabs;
+ char *new_line;
+ size_t line_len = strlen(line);
+
+ for (num_tabs = 0, i = 0; i < line_len; i++)
+ if (line[i] == '\t')
+ num_tabs++;
+
+ if (num_tabs == 0)
+ return line;
+
+ /*
+ * Space for the line and '\0', less the leading and trailing
+ * spaces. Each tab may introduce 7 additional spaces.
+ */
+ new_storage_len = line_len + 1 + (num_tabs * 7);
+
+ new_line = malloc(new_storage_len);
+ if (new_line == NULL) {
+ pr_err("Failure allocating memory for tab expansion\n");
+ return NULL;
+ }
+
+ /*
+ * Copy regions starting at src and expand tabs. If there are two
+ * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces
+ * are inserted.
+ */
+ for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) {
+ if (line[i] == '\t') {
+ len = i - src;
+ memcpy(&new_line[dst], &line[src], len);
+ dst += len;
+ new_line[dst++] = ' ';
+ while (dst % 8 != 0)
+ new_line[dst++] = ' ';
+ src = i + 1;
+ num_tabs--;
+ }
+ }
+
+ /* Expand the last region. */
+ len = line_len - src;
+ memcpy(&new_line[dst], &line[src], len);
+ dst += len;
+ new_line[dst] = '\0';
+
+ free(*storage);
+ *storage = new_line;
+ *storage_len = new_storage_len;
+ return new_line;
+
+}
+
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
@@ -1868,10 +1915,19 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
struct kcore_extract kce;
bool delete_extract = false;
bool decomp = false;
- int stdout_fd[2];
int lineno = 0;
int nline;
- pid_t pid;
+ char *line;
+ size_t line_len;
+ const char *objdump_argv[] = {
+ "/bin/sh",
+ "-c",
+ NULL, /* Will be the objdump command to run. */
+ "--",
+ NULL, /* Will be the symfs path. */
+ NULL,
+ };
+ struct child_process objdump_process;
int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
if (err)
@@ -1901,7 +1957,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
if (dso__decompress_kmodule_path(dso, symfs_filename,
tmp, sizeof(tmp)) < 0)
- goto out;
+ return -1;
decomp = true;
strcpy(symfs_filename, tmp);
@@ -1910,13 +1966,13 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand",
+ " -l -d %s %s -C \"$1\"",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
- opts->show_asm_raw ? "" : "--no-show-raw",
+ opts->show_asm_raw ? "" : "--no-show-raw-insn",
opts->annotate_src ? "-S" : "");
if (err < 0) {
@@ -1926,55 +1982,73 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("Executing: %s\n", command);
- err = -1;
- if (pipe(stdout_fd) < 0) {
- pr_err("Failure creating the pipe to run %s\n", command);
- goto out_free_command;
- }
-
- pid = fork();
- if (pid < 0) {
- pr_err("Failure forking to run %s\n", command);
- goto out_close_stdout;
- }
+ objdump_argv[2] = command;
+ objdump_argv[4] = symfs_filename;
- if (pid == 0) {
- close(stdout_fd[0]);
- dup2(stdout_fd[1], 1);
- close(stdout_fd[1]);
- execl("/bin/sh", "sh", "-c", command, "--", symfs_filename,
- NULL);
- perror(command);
- exit(-1);
+ /* Create a pipe to read from for stdout */
+ memset(&objdump_process, 0, sizeof(objdump_process));
+ objdump_process.argv = objdump_argv;
+ objdump_process.out = -1;
+ if (start_command(&objdump_process)) {
+ pr_err("Failure starting to run %s\n", command);
+ err = -1;
+ goto out_free_command;
}
- close(stdout_fd[1]);
-
- file = fdopen(stdout_fd[0], "r");
+ file = fdopen(objdump_process.out, "r");
if (!file) {
pr_err("Failure creating FILE stream for %s\n", command);
/*
* If we were using debug info should retry with
* original binary.
*/
- goto out_free_command;
+ err = -1;
+ goto out_close_stdout;
}
+ /* Storage for getline. */
+ line = NULL;
+ line_len = 0;
+
nline = 0;
while (!feof(file)) {
+ const char *match;
+ char *expanded_line;
+
+ if (getline(&line, &line_len, file) < 0 || !line)
+ break;
+
+ /* Skip lines containing "filename:" */
+ match = strstr(line, symfs_filename);
+ if (match && match[strlen(symfs_filename)] == ':')
+ continue;
+
+ expanded_line = strim(line);
+ expanded_line = expand_tabs(expanded_line, &line, &line_len);
+ if (!expanded_line)
+ break;
+
/*
* The source code line number (lineno) needs to be kept in
* across calls to symbol__parse_objdump_line(), so that it
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
- if (symbol__parse_objdump_line(sym, file, args, &lineno) < 0)
+ if (symbol__parse_objdump_line(sym, args, expanded_line,
+ &lineno) < 0)
break;
nline++;
}
+ free(line);
+
+ err = finish_command(&objdump_process);
+ if (err)
+ pr_err("Error running %s\n", command);
- if (nline == 0)
+ if (nline == 0) {
+ err = -1;
pr_err("No output from %s\n", command);
+ }
/*
* kallsyms does not have symbol sizes so there may a nop at the end.
@@ -1984,23 +2058,21 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
delete_last_nop(sym);
fclose(file);
- err = 0;
+
+out_close_stdout:
+ close(objdump_process.out);
+
out_free_command:
free(command);
-out_remove_tmp:
- close(stdout_fd[0]);
+out_remove_tmp:
if (decomp)
unlink(symfs_filename);
if (delete_extract)
kcore_extract__delete(&kce);
-out:
- return err;
-out_close_stdout:
- close(stdout_fd[1]);
- goto out_free_command;
+ return err;
}
static void calc_percent(struct sym_hist *sym_hist,
@@ -2071,11 +2143,10 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
-int symbol__annotate(struct symbol *sym, struct map *map,
- struct evsel *evsel, size_t privsize,
- struct annotation_options *options,
- struct arch **parch)
+int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, size_t privsize,
+ struct annotation_options *options, struct arch **parch)
{
+ struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
.privsize = privsize,
@@ -2105,9 +2176,8 @@ int symbol__annotate(struct symbol *sym, struct map *map,
}
}
- args.ms.map = map;
- args.ms.sym = sym;
- notes->start = map__rip_2objdump(map, sym->start);
+ args.ms = *ms;
+ notes->start = map__rip_2objdump(ms->map, sym->start);
return symbol__disassemble(sym, &args);
}
@@ -2263,10 +2333,11 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
return 0;
}
-int symbol__annotate_printf(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
+ struct map *map = ms->map;
+ struct symbol *sym = ms->sym;
struct dso *dso = map->dso;
char *filename;
const char *d_filename;
@@ -2670,30 +2741,29 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
resort_source_line(root, &tmp_root);
}
-static void symbol__calc_lines(struct symbol *sym, struct map *map,
- struct rb_root *root,
+static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root,
struct annotation_options *opts)
{
- struct annotation *notes = symbol__annotation(sym);
+ struct annotation *notes = symbol__annotation(ms->sym);
- annotation__calc_lines(notes, map, root, opts);
+ annotation__calc_lines(notes, ms->map, root, opts);
}
-int symbol__tty_annotate2(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
- struct dso *dso = map->dso;
+ struct dso *dso = ms->map->dso;
+ struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
struct hists *hists = evsel__hists(evsel);
char buf[1024];
- if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0)
+ if (symbol__annotate2(ms, evsel, opts, NULL) < 0)
return -1;
if (opts->print_lines) {
srcline_full_filename = opts->full_path;
- symbol__calc_lines(sym, map, &source_line, opts);
+ symbol__calc_lines(ms, &source_line, opts);
print_summary(&source_line, dso->long_name);
}
@@ -2707,25 +2777,25 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
return 0;
}
-int symbol__tty_annotate(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *opts)
{
- struct dso *dso = map->dso;
+ struct dso *dso = ms->map->dso;
+ struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
- if (symbol__annotate(sym, map, evsel, 0, opts, NULL) < 0)
+ if (symbol__annotate(ms, evsel, 0, opts, NULL) < 0)
return -1;
symbol__calc_percent(sym, evsel);
if (opts->print_lines) {
srcline_full_filename = opts->full_path;
- symbol__calc_lines(sym, map, &source_line, opts);
+ symbol__calc_lines(ms, &source_line, opts);
print_summary(&source_line, dso->long_name);
}
- symbol__annotate_printf(sym, map, evsel, opts);
+ symbol__annotate_printf(ms, evsel, opts);
annotated_source__purge(symbol__annotation(sym)->src);
@@ -2979,9 +3049,10 @@ void annotation_line__write(struct annotation_line *al, struct annotation *notes
wops->write_graph);
}
-int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
+int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options, struct arch **parch)
{
+ struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
size_t size = symbol__size(sym);
int nr_pcnt = 1, err;
@@ -2993,7 +3064,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
- err = symbol__annotate(sym, map, evsel, 0, options, parch);
+ err = symbol__annotate(ms, evsel, 0, options, parch);
if (err)
goto out_free_offsets;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d76fd0e81f46..7075d98f69d9 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -11,6 +11,7 @@
#include <pthread.h>
#include <asm/bug.h>
#include "symbol_conf.h"
+#include "spark.h"
struct hist_browser_timer;
struct hist_entry;
@@ -235,6 +236,7 @@ struct cyc_hist {
u64 cycles_aggr;
u64 cycles_max;
u64 cycles_min;
+ s64 cycles_spark[NUM_SPARKS];
u32 num;
u32 num_aggr;
u8 have_start;
@@ -347,11 +349,11 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp
struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists);
void symbol__annotate_zero_histograms(struct symbol *sym);
-int symbol__annotate(struct symbol *sym, struct map *map,
+int symbol__annotate(struct map_symbol *ms,
struct evsel *evsel, size_t privsize,
struct annotation_options *options,
struct arch **parch);
-int symbol__annotate2(struct symbol *sym, struct map *map,
+int symbol__annotate2(struct map_symbol *ms,
struct evsel *evsel,
struct annotation_options *options,
struct arch **parch);
@@ -378,11 +380,9 @@ enum symbol_disassemble_errno {
__SYMBOL_ANNOTATE_ERRNO__END,
};
-int symbol__strerror_disassemble(struct symbol *sym, struct map *map,
- int errnum, char *buf, size_t buflen);
+int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen);
-int symbol__annotate_printf(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
@@ -393,20 +393,16 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
bool ui__has_annotation(void);
-int symbol__tty_annotate(struct symbol *sym, struct map *map,
- struct evsel *evsel, struct annotation_options *opts);
+int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
-int symbol__tty_annotate2(struct symbol *sym, struct map *map,
- struct evsel *evsel, struct annotation_options *opts);
+int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
#ifdef HAVE_SLANG_SUPPORT
-int symbol__tui_annotate(struct symbol *sym, struct map *map,
- struct evsel *evsel,
+int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *opts);
#else
-static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
- struct map *map __maybe_unused,
+static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
struct evsel *evsel __maybe_unused,
struct hist_browser_timer *hbt __maybe_unused,
struct annotation_options *opts __maybe_unused)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 8470dfe9fe97..eb087e7df6f4 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -31,6 +31,7 @@
#include "map.h"
#include "pmu.h"
#include "evsel.h"
+#include "evsel_config.h"
#include "symbol.h"
#include "util/synthetic-events.h"
#include "thread_map.h"
@@ -57,6 +58,72 @@
#include "symbol/kallsyms.h"
#include <internal/lib.h>
+static struct perf_pmu *perf_evsel__find_pmu(struct evsel *evsel)
+{
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+ if (pmu->type == evsel->core.attr.type)
+ break;
+ }
+
+ return pmu;
+}
+
+static bool perf_evsel__is_aux_event(struct evsel *evsel)
+{
+ struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
+
+ return pmu && pmu->auxtrace;
+}
+
+/*
+ * Make a group from 'leader' to 'last', requiring that the events were not
+ * already grouped to a different leader.
+ */
+static int perf_evlist__regroup(struct evlist *evlist,
+ struct evsel *leader,
+ struct evsel *last)
+{
+ struct evsel *evsel;
+ bool grp;
+
+ if (!perf_evsel__is_group_leader(leader))
+ return -EINVAL;
+
+ grp = false;
+ evlist__for_each_entry(evlist, evsel) {
+ if (grp) {
+ if (!(evsel->leader == leader ||
+ (evsel->leader == evsel &&
+ evsel->core.nr_members <= 1)))
+ return -EINVAL;
+ } else if (evsel == leader) {
+ grp = true;
+ }
+ if (evsel == last)
+ break;
+ }
+
+ grp = false;
+ evlist__for_each_entry(evlist, evsel) {
+ if (grp) {
+ if (evsel->leader != leader) {
+ evsel->leader = leader;
+ if (leader->core.nr_members < 1)
+ leader->core.nr_members = 1;
+ leader->core.nr_members += 1;
+ }
+ } else if (evsel == leader) {
+ grp = true;
+ }
+ if (evsel == last)
+ break;
+ }
+
+ return 0;
+}
+
static bool auxtrace__dont_decode(struct perf_session *session)
{
return !session->itrace_synth_opts ||
@@ -597,6 +664,132 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
return -EINVAL;
}
+/*
+ * Event record size is 16-bit which results in a maximum size of about 64KiB.
+ * Allow about 4KiB for the rest of the sample record, to give a maximum
+ * AUX area sample size of 60KiB.
+ */
+#define MAX_AUX_SAMPLE_SIZE (60 * 1024)
+
+/* Arbitrary default size if no other default provided */
+#define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
+
+static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
+ struct record_opts *opts)
+{
+ struct evsel *evsel;
+ bool has_aux_leader = false;
+ u32 sz;
+
+ evlist__for_each_entry(evlist, evsel) {
+ sz = evsel->core.attr.aux_sample_size;
+ if (perf_evsel__is_group_leader(evsel)) {
+ has_aux_leader = perf_evsel__is_aux_event(evsel);
+ if (sz) {
+ if (has_aux_leader)
+ pr_err("Cannot add AUX area sampling to an AUX area event\n");
+ else
+ pr_err("Cannot add AUX area sampling to a group leader\n");
+ return -EINVAL;
+ }
+ }
+ if (sz > MAX_AUX_SAMPLE_SIZE) {
+ pr_err("AUX area sample size %u too big, max. %d\n",
+ sz, MAX_AUX_SAMPLE_SIZE);
+ return -EINVAL;
+ }
+ if (sz) {
+ if (!has_aux_leader) {
+ pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
+ return -EINVAL;
+ }
+ perf_evsel__set_sample_bit(evsel, AUX);
+ opts->auxtrace_sample_mode = true;
+ } else {
+ perf_evsel__reset_sample_bit(evsel, AUX);
+ }
+ }
+
+ if (!opts->auxtrace_sample_mode) {
+ pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
+ return -EINVAL;
+ }
+
+ if (!perf_can_aux_sample()) {
+ pr_err("AUX area sampling is not supported by kernel\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int auxtrace_parse_sample_options(struct auxtrace_record *itr,
+ struct evlist *evlist,
+ struct record_opts *opts, const char *str)
+{
+ struct perf_evsel_config_term *term;
+ struct evsel *aux_evsel;
+ bool has_aux_sample_size = false;
+ bool has_aux_leader = false;
+ struct evsel *evsel;
+ char *endptr;
+ unsigned long sz;
+
+ if (!str)
+ goto no_opt;
+
+ if (!itr) {
+ pr_err("No AUX area event to sample\n");
+ return -EINVAL;
+ }
+
+ sz = strtoul(str, &endptr, 0);
+ if (*endptr || sz > UINT_MAX) {
+ pr_err("Bad AUX area sampling option: '%s'\n", str);
+ return -EINVAL;
+ }
+
+ if (!sz)
+ sz = itr->default_aux_sample_size;
+
+ if (!sz)
+ sz = DEFAULT_AUX_SAMPLE_SIZE;
+
+ /* Set aux_sample_size based on --aux-sample option */
+ evlist__for_each_entry(evlist, evsel) {
+ if (perf_evsel__is_group_leader(evsel)) {
+ has_aux_leader = perf_evsel__is_aux_event(evsel);
+ } else if (has_aux_leader) {
+ evsel->core.attr.aux_sample_size = sz;
+ }
+ }
+no_opt:
+ aux_evsel = NULL;
+ /* Override with aux_sample_size from config term */
+ evlist__for_each_entry(evlist, evsel) {
+ if (perf_evsel__is_aux_event(evsel))
+ aux_evsel = evsel;
+ term = perf_evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
+ if (term) {
+ has_aux_sample_size = true;
+ evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
+ /* If possible, group with the AUX event */
+ if (aux_evsel && evsel->core.attr.aux_sample_size)
+ perf_evlist__regroup(evlist, aux_evsel, evsel);
+ }
+ }
+
+ if (!str && !has_aux_sample_size)
+ return 0;
+
+ if (!itr) {
+ pr_err("No AUX area event to sample\n");
+ return -EINVAL;
+ }
+
+ return auxtrace_validate_aux_sample_size(evlist, opts);
+}
+
struct auxtrace_record *__weak
auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
{
@@ -811,6 +1004,113 @@ struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
}
}
+struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
+ struct perf_sample *sample,
+ struct perf_session *session)
+{
+ struct perf_sample_id *sid;
+ unsigned int idx;
+ u64 id;
+
+ id = sample->id;
+ if (!id)
+ return NULL;
+
+ sid = perf_evlist__id2sid(session->evlist, id);
+ if (!sid)
+ return NULL;
+
+ idx = sid->idx;
+
+ if (idx >= queues->nr_queues)
+ return NULL;
+
+ return &queues->queue_array[idx];
+}
+
+int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
+ struct perf_session *session,
+ struct perf_sample *sample, u64 data_offset,
+ u64 reference)
+{
+ struct auxtrace_buffer buffer = {
+ .pid = -1,
+ .data_offset = data_offset,
+ .reference = reference,
+ .size = sample->aux_sample.size,
+ };
+ struct perf_sample_id *sid;
+ u64 id = sample->id;
+ unsigned int idx;
+
+ if (!id)
+ return -EINVAL;
+
+ sid = perf_evlist__id2sid(session->evlist, id);
+ if (!sid)
+ return -ENOENT;
+
+ idx = sid->idx;
+ buffer.tid = sid->tid;
+ buffer.cpu = sid->cpu;
+
+ return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
+}
+
+struct queue_data {
+ bool samples;
+ bool events;
+};
+
+static int auxtrace_queue_data_cb(struct perf_session *session,
+ union perf_event *event, u64 offset,
+ void *data)
+{
+ struct queue_data *qd = data;
+ struct perf_sample sample;
+ int err;
+
+ if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
+ if (event->header.size < sizeof(struct perf_record_auxtrace))
+ return -EINVAL;
+ offset += event->header.size;
+ return session->auxtrace->queue_data(session, NULL, event,
+ offset);
+ }
+
+ if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
+ return 0;
+
+ err = perf_evlist__parse_sample(session->evlist, event, &sample);
+ if (err)
+ return err;
+
+ if (!sample.aux_sample.size)
+ return 0;
+
+ offset += sample.aux_sample.data - (void *)event;
+
+ return session->auxtrace->queue_data(session, &sample, NULL, offset);
+}
+
+int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
+{
+ struct queue_data qd = {
+ .samples = samples,
+ .events = events,
+ };
+
+ if (auxtrace__dont_decode(session))
+ return 0;
+
+ if (!session->auxtrace || !session->auxtrace->queue_data)
+ return -EINVAL;
+
+ return perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ auxtrace_queue_data_cb, &qd);
+}
+
void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
{
size_t adj = buffer->data_offset & (page_size - 1);
@@ -1457,6 +1757,34 @@ int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
return 0;
}
+static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
+ u32 key)
+{
+ struct auxtrace_cache_entry *entry;
+ struct hlist_head *hlist;
+ struct hlist_node *n;
+
+ if (!c)
+ return NULL;
+
+ hlist = &c->hashtable[hash_32(key, c->bits)];
+ hlist_for_each_entry_safe(entry, n, hlist, hash) {
+ if (entry->key == key) {
+ hlist_del(&entry->hash);
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
+{
+ struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
+
+ auxtrace_cache__free_entry(c, entry);
+}
+
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
{
struct auxtrace_cache_entry *entry;
@@ -2152,18 +2480,6 @@ out_exit:
return err;
}
-static struct perf_pmu *perf_evsel__find_pmu(struct evsel *evsel)
-{
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmu__scan(pmu)) != NULL) {
- if (pmu->type == evsel->core.attr.type)
- break;
- }
-
- return pmu;
-}
-
static int perf_evsel__nr_addr_filter(struct evsel *evsel)
{
struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
@@ -2208,6 +2524,16 @@ int auxtrace__process_event(struct perf_session *session, union perf_event *even
return session->auxtrace->process_event(session, event, sample, tool);
}
+void auxtrace__dump_auxtrace_sample(struct perf_session *session,
+ struct perf_sample *sample)
+{
+ if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
+ auxtrace__dont_decode(session))
+ return;
+
+ session->auxtrace->dump_auxtrace_sample(session, sample);
+}
+
int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
{
if (!session->auxtrace)
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index f201f36bc35f..749d72cd9c7b 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -141,6 +141,9 @@ struct auxtrace_index {
* struct auxtrace - session callbacks to allow AUX area data decoding.
* @process_event: lets the decoder see all session events
* @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
+ * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
+ * processing
+ * @dump_auxtrace_sample: dump AUX area sample data
* @flush_events: process any remaining data
* @free_events: free resources associated with event processing
* @free: free resources associated with the session
@@ -153,6 +156,11 @@ struct auxtrace {
int (*process_auxtrace_event)(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool);
+ int (*queue_data)(struct perf_session *session,
+ struct perf_sample *sample, union perf_event *event,
+ u64 data_offset);
+ void (*dump_auxtrace_sample)(struct perf_session *session,
+ struct perf_sample *sample);
int (*flush_events)(struct perf_session *session,
struct perf_tool *tool);
void (*free_events)(struct perf_session *session);
@@ -313,6 +321,7 @@ struct auxtrace_mmap_params {
* @reference: provide a 64-bit reference number for auxtrace_event
* @read_finish: called after reading from an auxtrace mmap
* @alignment: alignment (if any) for AUX area data
+ * @default_aux_sample_size: default sample size for --aux sample option
*/
struct auxtrace_record {
int (*recording_options)(struct auxtrace_record *itr,
@@ -336,6 +345,7 @@ struct auxtrace_record {
u64 (*reference)(struct auxtrace_record *itr);
int (*read_finish)(struct auxtrace_record *itr, int idx);
unsigned int alignment;
+ unsigned int default_aux_sample_size;
};
/**
@@ -462,9 +472,19 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
struct perf_session *session,
union perf_event *event, off_t data_offset,
struct auxtrace_buffer **buffer_ptr);
+struct auxtrace_queue *
+auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
+ struct perf_sample *sample,
+ struct perf_session *session);
+int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
+ struct perf_session *session,
+ struct perf_sample *sample, u64 data_offset,
+ u64 reference);
void auxtrace_queues__free(struct auxtrace_queues *queues);
int auxtrace_queues__process_index(struct auxtrace_queues *queues,
struct perf_session *session);
+int auxtrace_queue_data(struct perf_session *session, bool samples,
+ bool events);
struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
struct auxtrace_buffer *buffer);
void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
@@ -489,6 +509,7 @@ void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
struct auxtrace_cache_entry *entry);
+void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
@@ -497,6 +518,9 @@ struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str);
+int auxtrace_parse_sample_options(struct auxtrace_record *itr,
+ struct evlist *evlist,
+ struct record_opts *opts, const char *str);
int auxtrace_record__options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts);
@@ -549,6 +573,8 @@ int auxtrace_parse_filters(struct evlist *evlist);
int auxtrace__process_event(struct perf_session *session, union perf_event *event,
struct perf_sample *sample, struct perf_tool *tool);
+void auxtrace__dump_auxtrace_sample(struct perf_session *session,
+ struct perf_sample *sample);
int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
void auxtrace__free_events(struct perf_session *session);
void auxtrace__free(struct perf_session *session);
@@ -648,6 +674,18 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
}
static inline
+int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
+ struct evlist *evlist __maybe_unused,
+ struct record_opts *opts __maybe_unused,
+ const char *str)
+{
+ if (!str)
+ return 0;
+ pr_err("AUX area tracing not supported\n");
+ return -EINVAL;
+}
+
+static inline
int auxtrace__process_event(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
@@ -657,6 +695,12 @@ int auxtrace__process_event(struct perf_session *session __maybe_unused,
}
static inline
+void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
+{
+}
+
+static inline
int auxtrace__flush_events(struct perf_session *session __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
new file mode 100644
index 000000000000..c4b030bf6ec2
--- /dev/null
+++ b/tools/perf/util/block-info.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdlib.h>
+#include <string.h>
+#include <linux/zalloc.h>
+#include "block-info.h"
+#include "sort.h"
+#include "annotate.h"
+#include "symbol.h"
+#include "dso.h"
+#include "map.h"
+#include "srcline.h"
+#include "evlist.h"
+#include "hist.h"
+#include "ui/browsers/hists.h"
+
+static struct block_header_column {
+ const char *name;
+ int width;
+} block_columns[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
+ [PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT] = {
+ .name = "Sampled Cycles%",
+ .width = 15,
+ },
+ [PERF_HPP_REPORT__BLOCK_LBR_CYCLES] = {
+ .name = "Sampled Cycles",
+ .width = 14,
+ },
+ [PERF_HPP_REPORT__BLOCK_CYCLES_PCT] = {
+ .name = "Avg Cycles%",
+ .width = 11,
+ },
+ [PERF_HPP_REPORT__BLOCK_AVG_CYCLES] = {
+ .name = "Avg Cycles",
+ .width = 10,
+ },
+ [PERF_HPP_REPORT__BLOCK_RANGE] = {
+ .name = "[Program Block Range]",
+ .width = 70,
+ },
+ [PERF_HPP_REPORT__BLOCK_DSO] = {
+ .name = "Shared Object",
+ .width = 20,
+ }
+};
+
+struct block_info *block_info__get(struct block_info *bi)
+{
+ if (bi)
+ refcount_inc(&bi->refcnt);
+ return bi;
+}
+
+void block_info__put(struct block_info *bi)
+{
+ if (bi && refcount_dec_and_test(&bi->refcnt))
+ free(bi);
+}
+
+struct block_info *block_info__new(void)
+{
+ struct block_info *bi = zalloc(sizeof(*bi));
+
+ if (bi)
+ refcount_set(&bi->refcnt, 1);
+ return bi;
+}
+
+int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ struct block_info *bi_l = left->block_info;
+ struct block_info *bi_r = right->block_info;
+ int cmp;
+
+ if (!bi_l->sym || !bi_r->sym) {
+ if (!bi_l->sym && !bi_r->sym)
+ return 0;
+ else if (!bi_l->sym)
+ return -1;
+ else
+ return 1;
+ }
+
+ if (bi_l->sym == bi_r->sym) {
+ if (bi_l->start == bi_r->start) {
+ if (bi_l->end == bi_r->end)
+ return 0;
+ else
+ return (int64_t)(bi_r->end - bi_l->end);
+ } else
+ return (int64_t)(bi_r->start - bi_l->start);
+ } else {
+ cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
+ return cmp;
+ }
+
+ if (bi_l->sym->start != bi_r->sym->start)
+ return (int64_t)(bi_r->sym->start - bi_l->sym->start);
+
+ return (int64_t)(bi_r->sym->end - bi_l->sym->end);
+}
+
+static void init_block_info(struct block_info *bi, struct symbol *sym,
+ struct cyc_hist *ch, int offset,
+ u64 total_cycles)
+{
+ bi->sym = sym;
+ bi->start = ch->start;
+ bi->end = offset;
+ bi->cycles = ch->cycles;
+ bi->cycles_aggr = ch->cycles_aggr;
+ bi->num = ch->num;
+ bi->num_aggr = ch->num_aggr;
+ bi->total_cycles = total_cycles;
+
+ memcpy(bi->cycles_spark, ch->cycles_spark,
+ NUM_SPARKS * sizeof(u64));
+}
+
+int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
+ u64 *block_cycles_aggr, u64 total_cycles)
+{
+ struct annotation *notes;
+ struct cyc_hist *ch;
+ static struct addr_location al;
+ u64 cycles = 0;
+
+ if (!he->ms.map || !he->ms.sym)
+ return 0;
+
+ memset(&al, 0, sizeof(al));
+ al.map = he->ms.map;
+ al.sym = he->ms.sym;
+
+ notes = symbol__annotation(he->ms.sym);
+ if (!notes || !notes->src || !notes->src->cycles_hist)
+ return 0;
+ ch = notes->src->cycles_hist;
+ for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
+ if (ch[i].num_aggr) {
+ struct block_info *bi;
+ struct hist_entry *he_block;
+
+ bi = block_info__new();
+ if (!bi)
+ return -1;
+
+ init_block_info(bi, he->ms.sym, &ch[i], i,
+ total_cycles);
+ cycles += bi->cycles_aggr / bi->num_aggr;
+
+ he_block = hists__add_entry_block(&bh->block_hists,
+ &al, bi);
+ if (!he_block) {
+ block_info__put(bi);
+ return -1;
+ }
+ }
+ }
+
+ if (block_cycles_aggr)
+ *block_cycles_aggr += cycles;
+
+ return 0;
+}
+
+static int block_column_header(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp,
+ struct hists *hists __maybe_unused,
+ int line __maybe_unused,
+ int *span __maybe_unused)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
+ block_fmt->header);
+}
+
+static int block_column_width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct hists *hists __maybe_unused)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+
+ return block_fmt->width;
+}
+
+static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi = he->block_info;
+ double ratio = 0.0;
+ char buf[16];
+
+ if (block_fmt->total_cycles)
+ ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
+
+ sprintf(buf, "%.2f%%", 100.0 * ratio);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
+}
+
+static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
+ struct hist_entry *left,
+ struct hist_entry *right)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi_l = left->block_info;
+ struct block_info *bi_r = right->block_info;
+ double l, r;
+
+ if (block_fmt->total_cycles) {
+ l = ((double)bi_l->cycles /
+ (double)block_fmt->total_cycles) * 100000.0;
+ r = ((double)bi_r->cycles /
+ (double)block_fmt->total_cycles) * 100000.0;
+ return (int64_t)l - (int64_t)r;
+ }
+
+ return 0;
+}
+
+static void cycles_string(u64 cycles, char *buf, int size)
+{
+ if (cycles >= 1000000)
+ scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0);
+ else if (cycles >= 1000)
+ scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0);
+ else
+ scnprintf(buf, size, "%1d", cycles);
+}
+
+static int block_cycles_lbr_entry(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi = he->block_info;
+ char cycles_buf[16];
+
+ cycles_string(bi->cycles_aggr, cycles_buf, sizeof(cycles_buf));
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
+ cycles_buf);
+}
+
+static int block_cycles_pct_entry(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi = he->block_info;
+ double ratio = 0.0;
+ u64 avg;
+ char buf[16];
+
+ if (block_fmt->block_cycles && bi->num_aggr) {
+ avg = bi->cycles_aggr / bi->num_aggr;
+ ratio = (double)avg / (double)block_fmt->block_cycles;
+ }
+
+ sprintf(buf, "%.2f%%", 100.0 * ratio);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
+}
+
+static int block_avg_cycles_entry(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi = he->block_info;
+ char cycles_buf[16];
+
+ cycles_string(bi->cycles_aggr / bi->num_aggr, cycles_buf,
+ sizeof(cycles_buf));
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
+ cycles_buf);
+}
+
+static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi = he->block_info;
+ char buf[128];
+ char *start_line, *end_line;
+
+ symbol_conf.disable_add2line_warn = true;
+
+ start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
+ he->ms.sym);
+
+ end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
+ he->ms.sym);
+
+ if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+ scnprintf(buf, sizeof(buf), "[%s -> %s]",
+ start_line, end_line);
+ } else {
+ scnprintf(buf, sizeof(buf), "[%7lx -> %7lx]",
+ bi->start, bi->end);
+ }
+
+ free_srcline(start_line);
+ free_srcline(end_line);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
+}
+
+static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct map *map = he->ms.map;
+
+ if (map && map->dso) {
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
+ map->dso->short_name);
+ }
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
+ "[unknown]");
+}
+
+static void init_block_header(struct block_fmt *block_fmt)
+{
+ struct perf_hpp_fmt *fmt = &block_fmt->fmt;
+
+ BUG_ON(block_fmt->idx >= PERF_HPP_REPORT__BLOCK_MAX_INDEX);
+
+ block_fmt->header = block_columns[block_fmt->idx].name;
+ block_fmt->width = block_columns[block_fmt->idx].width;
+
+ fmt->header = block_column_header;
+ fmt->width = block_column_width;
+}
+
+static void hpp_register(struct block_fmt *block_fmt, int idx,
+ struct perf_hpp_list *hpp_list)
+{
+ struct perf_hpp_fmt *fmt = &block_fmt->fmt;
+
+ block_fmt->idx = idx;
+ INIT_LIST_HEAD(&fmt->list);
+ INIT_LIST_HEAD(&fmt->sort_list);
+
+ switch (idx) {
+ case PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT:
+ fmt->entry = block_total_cycles_pct_entry;
+ fmt->cmp = block_info__cmp;
+ fmt->sort = block_total_cycles_pct_sort;
+ break;
+ case PERF_HPP_REPORT__BLOCK_LBR_CYCLES:
+ fmt->entry = block_cycles_lbr_entry;
+ break;
+ case PERF_HPP_REPORT__BLOCK_CYCLES_PCT:
+ fmt->entry = block_cycles_pct_entry;
+ break;
+ case PERF_HPP_REPORT__BLOCK_AVG_CYCLES:
+ fmt->entry = block_avg_cycles_entry;
+ break;
+ case PERF_HPP_REPORT__BLOCK_RANGE:
+ fmt->entry = block_range_entry;
+ break;
+ case PERF_HPP_REPORT__BLOCK_DSO:
+ fmt->entry = block_dso_entry;
+ break;
+ default:
+ return;
+ }
+
+ init_block_header(block_fmt);
+ perf_hpp_list__column_register(hpp_list, fmt);
+}
+
+static void register_block_columns(struct perf_hpp_list *hpp_list,
+ struct block_fmt *block_fmts)
+{
+ for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++)
+ hpp_register(&block_fmts[i], i, hpp_list);
+}
+
+static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts)
+{
+ __hists__init(&bh->block_hists, &bh->block_list);
+ perf_hpp_list__init(&bh->block_list);
+ bh->block_list.nr_header_lines = 1;
+
+ register_block_columns(&bh->block_list, block_fmts);
+
+ perf_hpp_list__register_sort_field(&bh->block_list,
+ &block_fmts[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT].fmt);
+}
+
+static void process_block_report(struct hists *hists,
+ struct block_report *block_report,
+ u64 total_cycles)
+{
+ struct rb_node *next = rb_first_cached(&hists->entries);
+ struct block_hist *bh = &block_report->hist;
+ struct hist_entry *he;
+
+ init_block_hist(bh, block_report->fmts);
+
+ while (next) {
+ he = rb_entry(next, struct hist_entry, rb_node);
+ block_info__process_sym(he, bh, &block_report->cycles,
+ total_cycles);
+ next = rb_next(&he->rb_node);
+ }
+
+ for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++) {
+ block_report->fmts[i].total_cycles = total_cycles;
+ block_report->fmts[i].block_cycles = block_report->cycles;
+ }
+
+ hists__output_resort(&bh->block_hists, NULL);
+}
+
+struct block_report *block_info__create_report(struct evlist *evlist,
+ u64 total_cycles)
+{
+ struct block_report *block_reports;
+ int nr_hists = evlist->core.nr_entries, i = 0;
+ struct evsel *pos;
+
+ block_reports = calloc(nr_hists, sizeof(struct block_report));
+ if (!block_reports)
+ return NULL;
+
+ evlist__for_each_entry(evlist, pos) {
+ struct hists *hists = evsel__hists(pos);
+
+ process_block_report(hists, &block_reports[i], total_cycles);
+ i++;
+ }
+
+ return block_reports;
+}
+
+int report__browse_block_hists(struct block_hist *bh, float min_percent,
+ struct evsel *evsel, struct perf_env *env,
+ struct annotation_options *annotation_opts)
+{
+ int ret;
+
+ switch (use_browser) {
+ case 0:
+ symbol_conf.report_individual_block = true;
+ hists__fprintf(&bh->block_hists, true, 0, 0, min_percent,
+ stdout, true);
+ hists__delete_entries(&bh->block_hists);
+ return 0;
+ case 1:
+ symbol_conf.report_individual_block = true;
+ ret = block_hists_tui_browse(bh, evsel, min_percent,
+ env, annotation_opts);
+ hists__delete_entries(&bh->block_hists);
+ return ret;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+float block_info__total_cycles_percent(struct hist_entry *he)
+{
+ struct block_info *bi = he->block_info;
+
+ if (bi->total_cycles)
+ return bi->cycles * 100.0 / bi->total_cycles;
+
+ return 0.0;
+}
diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h
new file mode 100644
index 000000000000..bef0d75e9819
--- /dev/null
+++ b/tools/perf/util/block-info.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_BLOCK_H
+#define __PERF_BLOCK_H
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include "hist.h"
+#include "symbol.h"
+#include "sort.h"
+#include "ui/ui.h"
+
+struct block_info {
+ struct symbol *sym;
+ u64 start;
+ u64 end;
+ u64 cycles;
+ u64 cycles_aggr;
+ s64 cycles_spark[NUM_SPARKS];
+ u64 total_cycles;
+ int num;
+ int num_aggr;
+ refcount_t refcnt;
+};
+
+struct block_fmt {
+ struct perf_hpp_fmt fmt;
+ int idx;
+ int width;
+ const char *header;
+ u64 total_cycles;
+ u64 block_cycles;
+};
+
+enum {
+ PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
+ PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
+ PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
+ PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
+ PERF_HPP_REPORT__BLOCK_RANGE,
+ PERF_HPP_REPORT__BLOCK_DSO,
+ PERF_HPP_REPORT__BLOCK_MAX_INDEX
+};
+
+struct block_report {
+ struct block_hist hist;
+ u64 cycles;
+ struct block_fmt fmts[PERF_HPP_REPORT__BLOCK_MAX_INDEX];
+};
+
+struct block_hist;
+
+struct block_info *block_info__new(void);
+struct block_info *block_info__get(struct block_info *bi);
+void block_info__put(struct block_info *bi);
+
+static inline void __block_info__zput(struct block_info **bi)
+{
+ block_info__put(*bi);
+ *bi = NULL;
+}
+
+#define block_info__zput(bi) __block_info__zput(&bi)
+
+int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right);
+
+int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
+ u64 *block_cycles_aggr, u64 total_cycles);
+
+struct block_report *block_info__create_report(struct evlist *evlist,
+ u64 total_cycles);
+
+int report__browse_block_hists(struct block_hist *bh, float min_percent,
+ struct evsel *evsel, struct perf_env *env,
+ struct annotation_options *annotation_opts);
+
+float block_info__total_cycles_percent(struct hist_entry *he);
+
+#endif /* __PERF_BLOCK_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 9a9b56ed3f0a..5cefce33b66b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -582,8 +582,8 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
return -1;
}
call->ip = cursor_node->ip;
- call->ms.sym = cursor_node->sym;
- call->ms.map = map__get(cursor_node->map);
+ call->ms = cursor_node->ms;
+ map__get(call->ms.map);
call->srcline = cursor_node->srcline;
if (cursor_node->branch) {
@@ -720,21 +720,21 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
/* otherwise fall-back to symbol-based comparison below */
__fallthrough;
case CCKEY_FUNCTION:
- if (node->sym && cnode->ms.sym) {
+ if (node->ms.sym && cnode->ms.sym) {
/*
* Compare inlined frames based on their symbol name
* because different inlined frames will have the same
* symbol start. Otherwise do a faster comparison based
* on the symbol start address.
*/
- if (cnode->ms.sym->inlined || node->sym->inlined) {
+ if (cnode->ms.sym->inlined || node->ms.sym->inlined) {
match = match_chain_strings(cnode->ms.sym->name,
- node->sym->name);
+ node->ms.sym->name);
if (match != MATCH_ERROR)
break;
} else {
match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
- node->map, node->sym->start);
+ node->ms.map, node->ms.sym->start);
break;
}
}
@@ -742,7 +742,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
__fallthrough;
case CCKEY_ADDRESS:
default:
- match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
+ match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->ms.map, node->ip);
break;
}
@@ -1004,8 +1004,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
int err = 0;
list_for_each_entry_safe(list, next_list, &src->val, list) {
- callchain_cursor_append(cursor, list->ip,
- list->ms.map, list->ms.sym,
+ callchain_cursor_append(cursor, list->ip, &list->ms,
false, NULL, 0, 0, 0, list->srcline);
list_del_init(&list->list);
map__zput(list->ms.map);
@@ -1044,7 +1043,7 @@ int callchain_merge(struct callchain_cursor *cursor,
}
int callchain_cursor_append(struct callchain_cursor *cursor,
- u64 ip, struct map *map, struct symbol *sym,
+ u64 ip, struct map_symbol *ms,
bool branch, struct branch_flags *flags,
int nr_loop_iter, u64 iter_cycles, u64 branch_from,
const char *srcline)
@@ -1060,9 +1059,9 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
}
node->ip = ip;
- map__zput(node->map);
- node->map = map__get(map);
- node->sym = sym;
+ map__zput(node->ms.map);
+ node->ms = *ms;
+ map__get(node->ms.map);
node->branch = branch;
node->nr_loop_iter = nr_loop_iter;
node->iter_cycles = iter_cycles;
@@ -1107,8 +1106,9 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
bool hide_unresolved)
{
- al->map = node->map;
- al->sym = node->sym;
+ al->mg = node->ms.mg;
+ al->map = node->ms.map;
+ al->sym = node->ms.sym;
al->srcline = node->srcline;
al->addr = node->ip;
@@ -1119,8 +1119,8 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
goto out;
}
- if (al->map->groups == &al->machine->kmaps) {
- if (machine__is_host(al->machine)) {
+ if (al->mg == &al->mg->machine->kmaps) {
+ if (machine__is_host(al->mg->machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
} else {
@@ -1128,7 +1128,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
al->level = 'g';
}
} else {
- if (machine__is_host(al->machine)) {
+ if (machine__is_host(al->mg->machine)) {
al->cpumode = PERF_RECORD_MISC_USER;
al->level = '.';
} else if (perf_guest) {
@@ -1571,7 +1571,7 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
if (node == NULL)
break;
- rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
+ rc = callchain_cursor_append(dst, node->ip, &node->ms,
node->branch, &node->branch_flags,
node->nr_loop_iter,
node->iter_cycles,
@@ -1597,5 +1597,5 @@ void callchain_cursor_reset(struct callchain_cursor *cursor)
cursor->last = &cursor->first;
for (node = cursor->first; node != NULL; node = node->next)
- map__zput(node->map);
+ map__zput(node->ms.map);
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 83398e5bbe4b..706bb7bbe1e1 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -141,8 +141,7 @@ struct callchain_list {
*/
struct callchain_cursor_node {
u64 ip;
- struct map *map;
- struct symbol *sym;
+ struct map_symbol ms;
const char *srcline;
bool branch;
struct branch_flags branch_flags;
@@ -195,7 +194,7 @@ int callchain_merge(struct callchain_cursor *cursor,
void callchain_cursor_reset(struct callchain_cursor *cursor);
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
- struct map *map, struct symbol *sym,
+ struct map_symbol *ms,
bool branch, struct branch_flags *flags,
int nr_loop_iter, u64 iter_cycles, u64 branch_from,
const char *srcline);
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index a22c1114e880..983b7388f22b 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -206,6 +206,11 @@ int cpu_map__get_core_id(int cpu)
return ret ?: value;
}
+int cpu_map__get_node_id(int cpu)
+{
+ return cpu__get_node(cpu);
+}
+
int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
{
int cpu, s_die;
@@ -235,6 +240,14 @@ int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
return (s_die << 16) | (cpu & 0xffff);
}
+int cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data __maybe_unused)
+{
+ if (idx < 0 || idx >= map->nr)
+ return -1;
+
+ return cpu_map__get_node_id(map->map[idx]);
+}
+
int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp)
{
return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
@@ -250,6 +263,11 @@ int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **cor
return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
}
+int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct perf_cpu_map **numap)
+{
+ return cpu_map__build_map(cpus, numap, cpu_map__get_node, NULL);
+}
+
/* setup simple routines to easily access node numbers given a cpu number */
static int get_max_num(char *path, int *max)
{
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 2553bef1279d..57943f3685f8 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -20,9 +20,12 @@ int cpu_map__get_die_id(int cpu);
int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data);
+int cpu_map__get_node_id(int cpu);
+int cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp);
int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep);
int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep);
+int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct perf_cpu_map **nodep);
const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */
static inline int cpu_map__socket(struct perf_cpu_map *sock, int s)
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 4ba0f871f086..f5f855fff412 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -110,7 +110,7 @@ static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
* encode the etm queue number as the upper 16 bit and the channel as
* the lower 16 bit.
*/
-#define TO_CS_QUEUE_NR(queue_nr, trace_id_chan) \
+#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
(queue_nr << 16 | trace_chan_id)
#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
@@ -819,7 +819,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* Note that packets decoded above are still in the traceID's packet
* queue and will be processed in cs_etm__process_queues().
*/
- cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_id_chan);
+ cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
out:
return ret;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 88fba2ba549f..c47aa34fdc0a 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -76,6 +76,13 @@ int perf_data__open_dir(struct perf_data *data)
DIR *dir;
int nr = 0;
+ /*
+ * Directory containing a single regular perf data file which is already
+ * open, means there is nothing more to do here.
+ */
+ if (perf_data__is_single_file(data))
+ return 0;
+
if (WARN_ON(!data->is_dir))
return -EINVAL;
@@ -96,7 +103,7 @@ int perf_data__open_dir(struct perf_data *data)
if (stat(path, &st))
continue;
- if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data", 4))
+ if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data.", 5))
continue;
ret = -ENOMEM;
@@ -306,7 +313,7 @@ static int open_dir(struct perf_data *data)
* So far we open only the header, so we can read the data version and
* layout.
*/
- if (asprintf(&data->file.path, "%s/header", data->path) < 0)
+ if (asprintf(&data->file.path, "%s/data", data->path) < 0)
return -1;
if (perf_data__is_write(data) &&
@@ -406,7 +413,7 @@ unsigned long perf_data__size(struct perf_data *data)
u64 size = data->file.size;
int i;
- if (!data->is_dir)
+ if (perf_data__is_single_file(data))
return size;
for (i = 0; i < data->dir.nr; i++) {
@@ -417,3 +424,36 @@ unsigned long perf_data__size(struct perf_data *data)
return size;
}
+
+int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz)
+{
+ int ret;
+
+ if (!data->is_dir)
+ return -1;
+
+ ret = snprintf(buf, buf_sz, "%s/kcore_dir", data->path);
+ if (ret < 0 || (size_t)ret >= buf_sz)
+ return -1;
+
+ return mkdir(buf, S_IRWXU);
+}
+
+char *perf_data__kallsyms_name(struct perf_data *data)
+{
+ char *kallsyms_name;
+ struct stat st;
+
+ if (!data->is_dir)
+ return NULL;
+
+ if (asprintf(&kallsyms_name, "%s/kcore_dir/kallsyms", data->path) < 0)
+ return NULL;
+
+ if (stat(kallsyms_name, &st)) {
+ free(kallsyms_name);
+ return NULL;
+ }
+
+ return kallsyms_name;
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 259868a39019..75947ef6bc17 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -9,6 +9,11 @@ enum perf_data_mode {
PERF_DATA_MODE_READ,
};
+enum perf_dir_version {
+ PERF_DIR_SINGLE_FILE = 0,
+ PERF_DIR_VERSION = 1,
+};
+
struct perf_data_file {
char *path;
int fd;
@@ -50,6 +55,11 @@ static inline bool perf_data__is_dir(struct perf_data *data)
return data->is_dir;
}
+static inline bool perf_data__is_single_file(struct perf_data *data)
+{
+ return data->dir.version == PERF_DIR_SINGLE_FILE;
+}
+
static inline int perf_data__fd(struct perf_data *data)
{
return data->file.fd;
@@ -77,4 +87,6 @@ int perf_data__open_dir(struct perf_data *data);
void perf_data__close_dir(struct perf_data *data);
int perf_data__update_dir(struct perf_data *data);
unsigned long perf_data__size(struct perf_data *data);
+int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz);
+char *perf_data__kallsyms_name(struct perf_data *data);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 752227b265e7..d029faf9fc9f 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -181,7 +181,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
if (al->map) {
struct dso *dso = al->map->dso;
- err = db_export__dso(dbe, dso, al->machine);
+ err = db_export__dso(dbe, dso, al->mg->machine);
if (err)
return err;
*dso_db_id = dso->db_id;
@@ -249,9 +249,9 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
* constructing an addr_location struct and then passing it to
* db_ids_from_al() to perform the export.
*/
- al.sym = node->sym;
- al.map = node->map;
- al.machine = machine;
+ al.sym = node->ms.sym;
+ al.map = node->ms.map;
+ al.mg = thread->mg;
al.addr = node->ip;
if (al.map && !al.sym)
@@ -360,13 +360,13 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
if (err)
return err;
- err = db_export__machine(dbe, al->machine);
+ err = db_export__machine(dbe, al->mg->machine);
if (err)
return err;
- main_thread = thread__main_thread(al->machine, thread);
+ main_thread = thread__main_thread(al->mg->machine, thread);
- err = db_export__threads(dbe, thread, main_thread, al->machine, &comm);
+ err = db_export__threads(dbe, thread, main_thread, al->mg->machine, &comm);
if (err)
goto out_put;
@@ -380,7 +380,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
goto out_put;
if (dbe->cpr) {
- struct call_path *cp = call_path_from_sample(dbe, al->machine,
+ struct call_path *cp = call_path_from_sample(dbe, al->mg->machine,
thread, sample,
evsel);
if (cp) {
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index e55114f0336f..adb656745ecc 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -24,6 +24,7 @@
#include <linux/ctype.h>
int verbose;
+int debug_peo_args;
bool dump_trace = false, quiet = false;
int debug_ordered_events;
static int redirect_to_stderr;
@@ -180,6 +181,7 @@ static struct debug_variable {
{ .name = "ordered-events", .ptr = &debug_ordered_events},
{ .name = "stderr", .ptr = &redirect_to_stderr},
{ .name = "data-convert", .ptr = &debug_data_convert },
+ { .name = "perf-event-open", .ptr = &debug_peo_args },
{ .name = NULL, }
};
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index d25ae1c4cee9..f1734abd98dd 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -8,6 +8,7 @@
#include <linux/compiler.h>
extern int verbose;
+extern int debug_peo_args;
extern bool quiet, dump_trace;
extern int debug_ordered_events;
extern int debug_data_convert;
@@ -30,6 +31,14 @@ extern int debug_data_convert;
#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+/* Special macro to print perf_event_open arguments/return value. */
+#define pr_debug2_peo(fmt, ...) { \
+ if (debug_peo_args) \
+ pr_debugN(0, pr_fmt(fmt), ##__VA_ARGS__); \
+ else \
+ pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__); \
+}
+
#define pr_time_N(n, var, t, fmt, ...) \
eprintf_time(n, var, t, fmt, ##__VA_ARGS__)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index e11ddf86f2b3..91f21239608b 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -768,7 +768,7 @@ dso_cache__free(struct dso *dso)
pthread_mutex_unlock(&dso->lock);
}
-static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
+static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
@@ -827,14 +827,16 @@ out:
return cache;
}
-static ssize_t
-dso_cache__memcpy(struct dso_cache *cache, u64 offset,
- u8 *data, u64 size)
+static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
+ u64 size, bool out)
{
u64 cache_offset = offset - cache->offset;
u64 cache_size = min(cache->size - cache_offset, size);
- memcpy(data, cache->data + cache_offset, cache_size);
+ if (out)
+ memcpy(data, cache->data + cache_offset, cache_size);
+ else
+ memcpy(cache->data + cache_offset, data, cache_size);
return cache_size;
}
@@ -863,63 +865,73 @@ out:
return ret;
}
-static ssize_t
-dso_cache__read(struct dso *dso, struct machine *machine,
- u64 offset, u8 *data, ssize_t size)
+static struct dso_cache *dso_cache__populate(struct dso *dso,
+ struct machine *machine,
+ u64 offset, ssize_t *ret)
{
u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
struct dso_cache *cache;
struct dso_cache *old;
- ssize_t ret;
cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
- if (!cache)
- return -ENOMEM;
+ if (!cache) {
+ *ret = -ENOMEM;
+ return NULL;
+ }
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
- ret = bpf_read(dso, cache_offset, cache->data);
+ *ret = bpf_read(dso, cache_offset, cache->data);
else
- ret = file_read(dso, machine, cache_offset, cache->data);
+ *ret = file_read(dso, machine, cache_offset, cache->data);
- if (ret > 0) {
- cache->offset = cache_offset;
- cache->size = ret;
+ if (*ret <= 0) {
+ free(cache);
+ return NULL;
+ }
- old = dso_cache__insert(dso, cache);
- if (old) {
- /* we lose the race */
- free(cache);
- cache = old;
- }
+ cache->offset = cache_offset;
+ cache->size = *ret;
- ret = dso_cache__memcpy(cache, offset, data, size);
+ old = dso_cache__insert(dso, cache);
+ if (old) {
+ /* we lose the race */
+ free(cache);
+ cache = old;
}
- if (ret <= 0)
- free(cache);
+ return cache;
+}
- return ret;
+static struct dso_cache *dso_cache__find(struct dso *dso,
+ struct machine *machine,
+ u64 offset,
+ ssize_t *ret)
+{
+ struct dso_cache *cache = __dso_cache__find(dso, offset);
+
+ return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
}
-static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
- u64 offset, u8 *data, ssize_t size)
+static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size, bool out)
{
struct dso_cache *cache;
+ ssize_t ret = 0;
- cache = dso_cache__find(dso, offset);
- if (cache)
- return dso_cache__memcpy(cache, offset, data, size);
- else
- return dso_cache__read(dso, machine, offset, data, size);
+ cache = dso_cache__find(dso, machine, offset, &ret);
+ if (!cache)
+ return ret;
+
+ return dso_cache__memcpy(cache, offset, data, size, out);
}
/*
* Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
* in the rb_tree. Any read to already cached data is served
- * by cached data.
+ * by cached data. Writes update the cache only, not the backing file.
*/
-static ssize_t cached_read(struct dso *dso, struct machine *machine,
- u64 offset, u8 *data, ssize_t size)
+static ssize_t cached_io(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size, bool out)
{
ssize_t r = 0;
u8 *p = data;
@@ -927,7 +939,7 @@ static ssize_t cached_read(struct dso *dso, struct machine *machine,
do {
ssize_t ret;
- ret = dso_cache_read(dso, machine, offset, p, size);
+ ret = dso_cache_io(dso, machine, offset, p, size, out);
if (ret < 0)
return ret;
@@ -1011,8 +1023,9 @@ off_t dso__data_size(struct dso *dso, struct machine *machine)
return dso->data.file_size;
}
-static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
- u64 offset, u8 *data, ssize_t size)
+static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size,
+ bool out)
{
if (dso__data_file_size(dso, machine))
return -1;
@@ -1024,7 +1037,7 @@ static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
if (offset + size < offset)
return -1;
- return cached_read(dso, machine, offset, data, size);
+ return cached_io(dso, machine, offset, data, size, out);
}
/**
@@ -1044,7 +1057,7 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
- return data_read_offset(dso, machine, offset, data, size);
+ return data_read_write_offset(dso, machine, offset, data, size, true);
}
/**
@@ -1065,6 +1078,46 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
return dso__data_read_offset(dso, machine, offset, data, size);
}
+/**
+ * dso__data_write_cache_offs - Write data to dso data cache at file offset
+ * @dso: dso object
+ * @machine: machine object
+ * @offset: file offset
+ * @data: buffer to write
+ * @size: size of the @data buffer
+ *
+ * Write into the dso file data cache, but do not change the file itself.
+ */
+ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
+ u64 offset, const u8 *data_in, ssize_t size)
+{
+ u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
+
+ if (dso->data.status == DSO_DATA_STATUS_ERROR)
+ return -1;
+
+ return data_read_write_offset(dso, machine, offset, data, size, false);
+}
+
+/**
+ * dso__data_write_cache_addr - Write data to dso data cache at dso address
+ * @dso: dso object
+ * @machine: machine object
+ * @add: virtual memory address
+ * @data: buffer to write
+ * @size: size of the @data buffer
+ *
+ * External interface to write into the dso file data cache, but do not change
+ * the file itself.
+ */
+ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ const u8 *data, ssize_t size)
+{
+ u64 offset = map->map_ip(map, addr);
+ return dso__data_write_cache_offs(dso, machine, offset, data, size);
+}
+
struct map *dso__new_map(const char *name)
{
struct map *map = NULL;
@@ -1096,7 +1149,7 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
return dso;
}
-void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
+static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
{
struct rb_root *root = dso->root;
@@ -1109,8 +1162,8 @@ void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
if (root) {
rb_erase(&dso->rb_node, root);
/*
- * __dsos__findnew_link_by_longname() isn't guaranteed to add it
- * back, so a clean removal is required here.
+ * __dsos__findnew_link_by_longname_id() isn't guaranteed to
+ * add it back, so a clean removal is required here.
*/
RB_CLEAR_NODE(&dso->rb_node);
dso->root = NULL;
@@ -1121,7 +1174,12 @@ void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
dso->long_name_allocated = name_allocated;
if (root)
- __dsos__findnew_link_by_longname(root, dso, NULL);
+ __dsos__findnew_link_by_longname_id(root, dso, NULL, id);
+}
+
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
+{
+ dso__set_long_name_id(dso, name, NULL, name_allocated);
}
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
@@ -1162,13 +1220,15 @@ void dso__set_sorted_by_name(struct dso *dso)
dso->sorted_by_name = true;
}
-struct dso *dso__new(const char *name)
+struct dso *dso__new_id(const char *name, struct dso_id *id)
{
struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
if (dso != NULL) {
strcpy(dso->name, name);
- dso__set_long_name(dso, dso->name, false);
+ if (id)
+ dso->id = *id;
+ dso__set_long_name_id(dso, dso->name, id, false);
dso__set_short_name(dso, dso->name, false);
dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
dso->data.cache = RB_ROOT;
@@ -1199,6 +1259,11 @@ struct dso *dso__new(const char *name)
return dso;
}
+struct dso *dso__new(const char *name)
+{
+ return dso__new_id(name, NULL);
+}
+
void dso__delete(struct dso *dso)
{
if (!RB_EMPTY_NODE(&dso->rb_node))
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index e4dddb76770d..2db64b79617a 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -122,6 +122,16 @@ enum dso_load_errno {
#define DSO__DATA_CACHE_SIZE 4096
#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1)
+/*
+ * Data about backing storage DSO, comes from PERF_RECORD_MMAP2 meta events
+ */
+struct dso_id {
+ u32 maj;
+ u32 min;
+ u64 ino;
+ u64 ino_generation;
+};
+
struct dso_cache {
struct rb_node rb_node;
u64 offset;
@@ -196,6 +206,7 @@ struct dso {
u64 db_id;
};
struct nsinfo *nsinfo;
+ struct dso_id id;
refcount_t refcnt;
char name[0];
};
@@ -214,9 +225,11 @@ static inline void dso__set_loaded(struct dso *dso)
dso->loaded = true;
}
+struct dso *dso__new_id(const char *name, struct dso_id *id);
struct dso *dso__new(const char *name);
void dso__delete(struct dso *dso);
+int dso__cmp_id(struct dso *a, struct dso *b);
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
@@ -285,6 +298,8 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m,
* dso__data_size
* dso__data_read_offset
* dso__data_read_addr
+ * dso__data_write_cache_offs
+ * dso__data_write_cache_addr
*
* Please refer to the dso.c object code for each function and
* arguments documentation. Following text tries to explain the
@@ -332,6 +347,11 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
u8 *data, ssize_t size);
bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by);
+ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
+ u64 offset, const u8 *data, ssize_t size);
+ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ const u8 *data, ssize_t size);
struct map *dso__new_map(const char *name);
struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index 3ea80d203587..591707c69c39 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -9,6 +9,40 @@
#include <string.h>
#include <symbol.h> // filename__read_build_id
+static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
+{
+ if (a->maj > b->maj) return -1;
+ if (a->maj < b->maj) return 1;
+
+ if (a->min > b->min) return -1;
+ if (a->min < b->min) return 1;
+
+ if (a->ino > b->ino) return -1;
+ if (a->ino < b->ino) return 1;
+
+ if (a->ino_generation > b->ino_generation) return -1;
+ if (a->ino_generation < b->ino_generation) return 1;
+
+ return 0;
+}
+
+static int dso_id__cmp(struct dso_id *a, struct dso_id *b)
+{
+ /*
+ * The second is always dso->id, so zeroes if not set, assume passing
+ * NULL for a means a zeroed id
+ */
+ if (a == NULL)
+ return 0;
+
+ return __dso_id__cmp(a, b);
+}
+
+int dso__cmp_id(struct dso *a, struct dso *b)
+{
+ return __dso_id__cmp(&a->id, &b->id);
+}
+
bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
{
bool have_build_id = false;
@@ -34,12 +68,30 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
return have_build_id;
}
+static int __dso__cmp_long_name(const char *long_name, struct dso_id *id, struct dso *b)
+{
+ int rc = strcmp(long_name, b->long_name);
+ return rc ?: dso_id__cmp(id, &b->id);
+}
+
+static int __dso__cmp_short_name(const char *short_name, struct dso_id *id, struct dso *b)
+{
+ int rc = strcmp(short_name, b->short_name);
+ return rc ?: dso_id__cmp(id, &b->id);
+}
+
+static int dso__cmp_short_name(struct dso *a, struct dso *b)
+{
+ return __dso__cmp_short_name(a->short_name, &a->id, b);
+}
+
/*
* Find a matching entry and/or link current entry to RB tree.
* Either one of the dso or name parameter must be non-NULL or the
* function will not work.
*/
-struct dso *__dsos__findnew_link_by_longname(struct rb_root *root, struct dso *dso, const char *name)
+struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
+ const char *name, struct dso_id *id)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -51,7 +103,7 @@ struct dso *__dsos__findnew_link_by_longname(struct rb_root *root, struct dso *d
*/
while (*p) {
struct dso *this = rb_entry(*p, struct dso, rb_node);
- int rc = strcmp(name, this->long_name);
+ int rc = __dso__cmp_long_name(name, id, this);
parent = *p;
if (rc == 0) {
@@ -67,7 +119,7 @@ struct dso *__dsos__findnew_link_by_longname(struct rb_root *root, struct dso *d
* In this case, the short name should be different.
* Comparing the short names to differentiate the DSOs.
*/
- rc = strcmp(dso->short_name, this->short_name);
+ rc = dso__cmp_short_name(dso, this);
if (rc == 0) {
pr_err("Duplicated dso name: %s\n", name);
return NULL;
@@ -90,7 +142,7 @@ struct dso *__dsos__findnew_link_by_longname(struct rb_root *root, struct dso *d
void __dsos__add(struct dsos *dsos, struct dso *dso)
{
list_add_tail(&dso->node, &dsos->head);
- __dsos__findnew_link_by_longname(&dsos->root, dso, NULL);
+ __dsos__findnew_link_by_longname_id(&dsos->root, dso, NULL, &dso->id);
/*
* It is now in the linked list, grab a reference, then garbage collect
* this when needing memory, by looking at LRU dso instances in the
@@ -121,26 +173,27 @@ void dsos__add(struct dsos *dsos, struct dso *dso)
up_write(&dsos->lock);
}
-struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
+static struct dso *__dsos__findnew_by_longname_id(struct rb_root *root, const char *name, struct dso_id *id)
+{
+ return __dsos__findnew_link_by_longname_id(root, NULL, name, id);
+}
+
+static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, bool cmp_short)
{
struct dso *pos;
if (cmp_short) {
list_for_each_entry(pos, &dsos->head, node)
- if (strcmp(pos->short_name, name) == 0)
+ if (__dso__cmp_short_name(name, id, pos) == 0)
return pos;
return NULL;
}
- return __dsos__findnew_by_longname(&dsos->root, name);
+ return __dsos__findnew_by_longname_id(&dsos->root, name, id);
}
-struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
+struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
{
- struct dso *dso;
- down_read(&dsos->lock);
- dso = __dsos__find(dsos, name, cmp_short);
- up_read(&dsos->lock);
- return dso;
+ return __dsos__find_id(dsos, name, NULL, cmp_short);
}
static void dso__set_basename(struct dso *dso)
@@ -175,9 +228,9 @@ static void dso__set_basename(struct dso *dso)
dso__set_short_name(dso, base, true);
}
-struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
+static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
- struct dso *dso = dso__new(name);
+ struct dso *dso = dso__new_id(name, id);
if (dso != NULL) {
__dsos__add(dsos, dso);
@@ -188,18 +241,22 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
return dso;
}
-struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
+struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
{
- struct dso *dso = __dsos__find(dsos, name, false);
+ return __dsos__addnew_id(dsos, name, NULL);
+}
- return dso ? dso : __dsos__addnew(dsos, name);
+static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
+{
+ struct dso *dso = __dsos__find_id(dsos, name, id, false);
+ return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
-struct dso *dsos__findnew(struct dsos *dsos, const char *name)
+struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
struct dso *dso;
down_write(&dsos->lock);
- dso = dso__get(__dsos__findnew(dsos, name));
+ dso = dso__get(__dsos__findnew_id(dsos, name, id));
up_write(&dsos->lock);
return dso;
}
diff --git a/tools/perf/util/dsos.h b/tools/perf/util/dsos.h
index 32f1fbee0feb..5dbec2bc6966 100644
--- a/tools/perf/util/dsos.h
+++ b/tools/perf/util/dsos.h
@@ -9,6 +9,7 @@
#include "rwsem.h"
struct dso;
+struct dso_id;
/*
* DSOs are put into both a list for fast iteration and rbtree for fast
@@ -24,16 +25,11 @@ void __dsos__add(struct dsos *dsos, struct dso *dso);
void dsos__add(struct dsos *dsos, struct dso *dso);
struct dso *__dsos__addnew(struct dsos *dsos, const char *name);
struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
-struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
-struct dso *__dsos__findnew(struct dsos *dsos, const char *name);
-struct dso *dsos__findnew(struct dsos *dsos, const char *name);
-struct dso *__dsos__findnew_link_by_longname(struct rb_root *root, struct dso *dso, const char *name);
-
-static inline struct dso *__dsos__findnew_by_longname(struct rb_root *root, const char *name)
-{
- return __dsos__findnew_link_by_longname(root, NULL, name);
-}
+struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id);
+
+struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
+ const char *name, struct dso_id *id);
bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index df6cee5c071f..aa898014ad12 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -59,6 +59,51 @@ const char *cu_get_comp_dir(Dwarf_Die *cu_die)
return dwarf_formstring(&attr);
}
+/* Unlike dwarf_getsrc_die(), cu_getsrc_die() only returns statement line */
+static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
+{
+ Dwarf_Addr laddr;
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, l, u, n;
+ bool flag;
+
+ if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0 ||
+ nlines == 0)
+ return NULL;
+
+ /* Lines are sorted by address, use binary search */
+ l = 0; u = nlines - 1;
+ while (l < u) {
+ n = u - (u - l) / 2;
+ line = dwarf_onesrcline(lines, n);
+ if (!line || dwarf_lineaddr(line, &laddr) != 0)
+ return NULL;
+ if (addr < laddr)
+ u = n - 1;
+ else
+ l = n;
+ }
+ /* Going backward to find the lowest line */
+ do {
+ line = dwarf_onesrcline(lines, --l);
+ if (!line || dwarf_lineaddr(line, &laddr) != 0)
+ return NULL;
+ } while (laddr == addr);
+ l++;
+ /* Going foward to find the statement line */
+ do {
+ line = dwarf_onesrcline(lines, l++);
+ if (!line || dwarf_lineaddr(line, &laddr) != 0 ||
+ dwarf_linebeginstatement(line, &flag) != 0)
+ return NULL;
+ if (laddr > addr)
+ return NULL;
+ } while (!flag);
+
+ return line;
+}
+
/**
* cu_find_lineinfo - Get a line number and file name for given address
* @cu_die: a CU DIE
@@ -72,17 +117,26 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
const char **fname, int *lineno)
{
Dwarf_Line *line;
- Dwarf_Addr laddr;
+ Dwarf_Die die_mem;
+ Dwarf_Addr faddr;
+
+ if (die_find_realfunc(cu_die, (Dwarf_Addr)addr, &die_mem)
+ && die_entrypc(&die_mem, &faddr) == 0 &&
+ faddr == addr) {
+ *fname = dwarf_decl_file(&die_mem);
+ dwarf_decl_line(&die_mem, lineno);
+ goto out;
+ }
- line = dwarf_getsrc_die(cu_die, (Dwarf_Addr)addr);
- if (line && dwarf_lineaddr(line, &laddr) == 0 &&
- addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) {
+ line = cu_getsrc_die(cu_die, (Dwarf_Addr)addr);
+ if (line && dwarf_lineno(line, lineno) == 0) {
*fname = dwarf_linesrc(line, NULL, NULL);
if (!*fname)
/* line number is useless without filename */
*lineno = 0;
}
+out:
return *lineno ?: -ENOENT;
}
@@ -308,20 +362,50 @@ bool die_is_func_def(Dwarf_Die *dw_die)
}
/**
+ * die_entrypc - Returns entry PC (the lowest address) of a DIE
+ * @dw_die: a DIE
+ * @addr: where to store entry PC
+ *
+ * Since dwarf_entrypc() does not return entry PC if the DIE has only address
+ * range, we have to use this to retrieve the lowest address from the address
+ * range attribute.
+ */
+int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+{
+ Dwarf_Addr base, end;
+
+ if (!addr)
+ return -EINVAL;
+
+ if (dwarf_entrypc(dw_die, addr) == 0)
+ return 0;
+
+ return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
+}
+
+/**
* die_is_func_instance - Ensure that this DIE is an instance of a subprogram
* @dw_die: a DIE
*
* Ensure that this DIE is an instance (which has an entry address).
- * This returns true if @dw_die is a function instance. If not, you need to
- * call die_walk_instances() to find actual instances.
+ * This returns true if @dw_die is a function instance. If not, the @dw_die
+ * must be a prototype. You can use die_walk_instances() to find actual
+ * instances.
**/
bool die_is_func_instance(Dwarf_Die *dw_die)
{
Dwarf_Addr tmp;
+ Dwarf_Attribute attr_mem;
+ int tag = dwarf_tag(dw_die);
+
+ if (tag != DW_TAG_subprogram &&
+ tag != DW_TAG_inlined_subroutine)
+ return false;
- /* Actually gcc optimizes non-inline as like as inlined */
- return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
+ return dwarf_entrypc(dw_die, &tmp) == 0 ||
+ dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
}
+
/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
@@ -598,6 +682,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
Dwarf_Die *origin;
int tmp;
+ if (!die_is_func_instance(inst))
+ return DIE_FIND_CB_CONTINUE;
+
attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
if (attr == NULL)
return DIE_FIND_CB_CONTINUE;
@@ -669,15 +756,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
fname = die_get_call_file(in_die);
lineno = die_get_call_lineno(in_die);
- if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
lw->retval = lw->callback(fname, lineno, addr, lw->data);
if (lw->retval != 0)
return DIE_FIND_CB_END;
}
+ if (!lw->recursive)
+ return DIE_FIND_CB_SIBLING;
}
- if (!lw->recursive)
- /* Don't need to search recursively */
- return DIE_FIND_CB_SIBLING;
if (addr) {
fname = dwarf_decl_file(in_die);
@@ -710,7 +796,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
/* Handle function declaration line */
fname = dwarf_decl_file(sp_die);
if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
- dwarf_entrypc(sp_die, &addr) == 0) {
+ die_entrypc(sp_die, &addr) == 0) {
lw.retval = callback(fname, lineno, addr, data);
if (lw.retval != 0)
goto done;
@@ -724,6 +810,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
{
struct __line_walk_param *lw = data;
+ /*
+ * Since inlined function can include another inlined function in
+ * the same file, we need to walk in it recursively.
+ */
lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
if (lw->retval != 0)
return DWARF_CB_ABORT;
@@ -748,11 +838,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
Dwarf_Lines *lines;
Dwarf_Line *line;
Dwarf_Addr addr;
- const char *fname, *decf = NULL;
+ const char *fname, *decf = NULL, *inf = NULL;
int lineno, ret = 0;
int decl = 0, inl;
Dwarf_Die die_mem, *cu_die;
size_t nlines, i;
+ bool flag;
/* Get the CU die */
if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
@@ -783,6 +874,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
"Possible error in debuginfo.\n");
continue;
}
+ /* Skip end-of-sequence */
+ if (dwarf_lineendsequence(line, &flag) != 0 || flag)
+ continue;
+ /* Skip Non statement line-info */
+ if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
+ continue;
/* Filter lines based on address */
if (rt_die != cu_die) {
/*
@@ -792,13 +889,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (!dwarf_haspc(rt_die, addr))
continue;
+
if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
+ /* Call-site check */
+ inf = die_get_call_file(&die_mem);
+ if ((inf && !strcmp(inf, decf)) &&
+ die_get_call_lineno(&die_mem) == lineno)
+ goto found;
+
dwarf_decl_line(&die_mem, &inl);
if (inl != decl ||
decf != dwarf_decl_file(&die_mem))
continue;
}
}
+found:
/* Get source line */
fname = dwarf_linesrc(line, NULL, NULL);
@@ -813,8 +918,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (rt_die != cu_die)
/*
- * Don't need walk functions recursively, because nested
- * inlined functions don't have lines of the specified DIE.
+ * Don't need walk inlined functions recursively, because
+ * inner inlined functions don't have the lines of the
+ * specified function.
*/
ret = __die_walk_funclines(rt_die, false, callback, data);
else {
@@ -989,7 +1095,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
bool first = true;
const char *name;
- ret = dwarf_entrypc(sp_die, &entry);
+ ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
@@ -1052,7 +1158,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
bool first = true;
const char *name;
- ret = dwarf_entrypc(sp_die, &entry);
+ ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index f204e5892403..506006e0cf66 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -29,6 +29,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
/* Get DW_AT_linkage_name (should be NULL for C binary) */
const char *die_get_linkage_name(Dwarf_Die *dw_die);
+/* Get the lowest PC in DIE (including range list) */
+int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr);
+
/* Ensure that this DIE is a subprogram and definition (not declaration) */
bool die_is_func_def(Dwarf_Die *dw_die);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 3baca06786fb..6242a9215df7 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -2,6 +2,7 @@
#include "cpumap.h"
#include "debug.h"
#include "env.h"
+#include "util/header.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include "bpf-event.h"
@@ -179,6 +180,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@@ -256,6 +258,21 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
return 0;
}
+int perf_env__read_cpuid(struct perf_env *env)
+{
+ char cpuid[128];
+ int err = get_cpuid(cpuid, sizeof(cpuid));
+
+ if (err)
+ return err;
+
+ free(env->cpuid);
+ env->cpuid = strdup(cpuid);
+ if (env->cpuid == NULL)
+ return ENOMEM;
+ return 0;
+}
+
static int perf_env__read_arch(struct perf_env *env)
{
struct utsname uts;
@@ -338,3 +355,42 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+
+ /*
+ * We initialize the numa_map array to prepare
+ * it for missing cpus, which return node -1
+ */
+ env->numa_map = malloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index db40906e2937..11d05ae3606a 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -87,6 +87,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};
enum perf_compress_type {
@@ -104,6 +108,7 @@ void perf_env__exit(struct perf_env *env);
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
+int perf_env__read_cpuid(struct perf_env *env);
int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
@@ -119,4 +124,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index fc1e5a991008..0141b26bae47 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -461,7 +461,7 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct machine *machine = mg->machine;
bool load_map = false;
- al->machine = machine;
+ al->mg = mg;
al->thread = thread;
al->addr = addr;
al->cpumode = cpumode;
@@ -474,13 +474,13 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
- mg = &machine->kmaps;
+ al->mg = mg = &machine->kmaps;
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
- mg = &machine->kmaps;
+ al->mg = mg = &machine->kmaps;
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
al->level = 'u';
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a0a0c91cde4a..85223159737c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -114,6 +114,11 @@ enum {
#define MAX_INSN 16
+struct aux_sample {
+ u64 size;
+ void *data;
+};
+
struct perf_sample {
u64 ip;
u32 pid, tid;
@@ -142,6 +147,7 @@ struct perf_sample {
struct regs_dump intr_regs;
struct stack_dump user_stack;
struct sample_read read;
+ struct aux_sample aux_sample;
};
#define PERF_MEM_DATA_SRC_NONE \
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index de79c735e441..fdce590d2278 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -21,6 +21,7 @@
#include "../perf.h"
#include "asm/bug.h"
#include "bpf-event.h"
+#include "util/string2.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -42,6 +43,7 @@
#include <perf/evlist.h>
#include <perf/evsel.h>
#include <perf/cpumap.h>
+#include <perf/mmap.h>
#include <internal/xyarray.h>
@@ -57,7 +59,6 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
{
perf_evlist__init(&evlist->core);
perf_evlist__set_maps(&evlist->core, cpus, threads);
- fdarray__init(&evlist->core.pollfd, 64);
evlist->workload.pid = -1;
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
}
@@ -138,7 +139,7 @@ void evlist__exit(struct evlist *evlist)
{
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
- fdarray__exit(&evlist->core.pollfd);
+ perf_evlist__exit(&evlist->core);
}
void evlist__delete(struct evlist *evlist)
@@ -148,10 +149,6 @@ void evlist__delete(struct evlist *evlist)
evlist__munmap(evlist);
evlist__close(evlist);
- perf_cpu_map__put(evlist->core.cpus);
- perf_thread_map__put(evlist->core.threads);
- evlist->core.cpus = NULL;
- evlist->core.threads = NULL;
evlist__purge(evlist);
evlist__exit(evlist);
free(evlist);
@@ -186,6 +183,30 @@ void perf_evlist__splice_list_tail(struct evlist *evlist,
}
}
+int __evlist__set_tracepoints_handlers(struct evlist *evlist,
+ const struct evsel_str_handler *assocs, size_t nr_assocs)
+{
+ struct evsel *evsel;
+ size_t i;
+ int err;
+
+ for (i = 0; i < nr_assocs; i++) {
+ // Adding a handler for an event not in this evlist, just ignore it.
+ evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
+ if (evsel == NULL)
+ continue;
+
+ err = -EEXIST;
+ if (evsel->handler != NULL)
+ goto out;
+ evsel->handler = assocs[i].handler;
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
void __perf_evlist__set_leader(struct list_head *list)
{
struct evsel *evsel, *leader;
@@ -403,19 +424,9 @@ int evlist__add_pollfd(struct evlist *evlist, int fd)
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
}
-static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
- void *arg __maybe_unused)
-{
- struct mmap *map = fda->priv[fd].ptr;
-
- if (map)
- perf_mmap__put(map);
-}
-
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
{
- return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
- perf_evlist__munmap_filtered, NULL);
+ return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
}
int evlist__poll(struct evlist *evlist, int timeout)
@@ -423,22 +434,6 @@ int evlist__poll(struct evlist *evlist, int timeout)
return perf_evlist__poll(&evlist->core, timeout);
}
-static void perf_evlist__set_sid_idx(struct evlist *evlist,
- struct evsel *evsel, int idx, int cpu,
- int thread)
-{
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
- sid->idx = idx;
- if (evlist->core.cpus && cpu >= 0)
- sid->cpu = evlist->core.cpus->map[cpu];
- else
- sid->cpu = -1;
- if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
- sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
- else
- sid->tid = -1;
-}
-
struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
{
struct hlist_head *head;
@@ -577,11 +572,11 @@ static void evlist__munmap_nofree(struct evlist *evlist)
if (evlist->mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
- perf_mmap__munmap(&evlist->mmap[i]);
+ perf_mmap__munmap(&evlist->mmap[i].core);
if (evlist->overwrite_mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
- perf_mmap__munmap(&evlist->overwrite_mmap[i]);
+ perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
}
void evlist__munmap(struct evlist *evlist)
@@ -591,22 +586,26 @@ void evlist__munmap(struct evlist *evlist)
zfree(&evlist->overwrite_mmap);
}
+static void perf_mmap__unmap_cb(struct perf_mmap *map)
+{
+ struct mmap *m = container_of(map, struct mmap, core);
+
+ mmap__munmap(m);
+}
+
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
bool overwrite)
{
int i;
struct mmap *map;
- evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
- if (perf_cpu_map__empty(evlist->core.cpus))
- evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
for (i = 0; i < evlist->core.nr_mmaps; i++) {
- map[i].core.fd = -1;
- map[i].core.overwrite = overwrite;
+ struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
+
/*
* When the perf_mmap() call is made we grab one refcount, plus
* one extra to let perf_mmap__consume() get the last
@@ -616,151 +615,56 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get() on it.
*/
- refcount_set(&map[i].core.refcnt, 0);
+ perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
}
+
return map;
}
-static bool
-perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
- struct evsel *evsel)
+static void
+perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
+ struct perf_mmap_param *_mp,
+ int idx, bool per_cpu)
{
- if (evsel->core.attr.write_backward)
- return false;
- return true;
+ struct evlist *evlist = container_of(_evlist, struct evlist, core);
+ struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
+
+ auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
}
-static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
- struct mmap_params *mp, int cpu_idx,
- int thread, int *_output, int *_output_overwrite)
+static struct perf_mmap*
+perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
{
- struct evsel *evsel;
- int revent;
- int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
-
- evlist__for_each_entry(evlist, evsel) {
- struct mmap *maps = evlist->mmap;
- int *output = _output;
- int fd;
- int cpu;
-
- mp->prot = PROT_READ | PROT_WRITE;
- if (evsel->core.attr.write_backward) {
- output = _output_overwrite;
- maps = evlist->overwrite_mmap;
-
- if (!maps) {
- maps = evlist__alloc_mmap(evlist, true);
- if (!maps)
- return -1;
- evlist->overwrite_mmap = maps;
- if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
- perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
- }
- mp->prot &= ~PROT_WRITE;
- }
-
- if (evsel->core.system_wide && thread)
- continue;
+ struct evlist *evlist = container_of(_evlist, struct evlist, core);
+ struct mmap *maps;
- cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
- if (cpu == -1)
- continue;
-
- fd = FD(evsel, cpu, thread);
+ maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
- if (*output == -1) {
- *output = fd;
+ if (!maps) {
+ maps = evlist__alloc_mmap(evlist, overwrite);
+ if (!maps)
+ return NULL;
- if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
- return -1;
+ if (overwrite) {
+ evlist->overwrite_mmap = maps;
+ if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
+ perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
} else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
- return -1;
-
- perf_mmap__get(&maps[idx]);
- }
-
- revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
-
- /*
- * The system_wide flag causes a selected event to be opened
- * always without a pid. Consequently it will never get a
- * POLLHUP, but it is used for tracking in combination with
- * other events, so it should not need to be polled anyway.
- * Therefore don't add it for polling.
- */
- if (!evsel->core.system_wide &&
- perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
- perf_mmap__put(&maps[idx]);
- return -1;
- }
-
- if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
- if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
- fd) < 0)
- return -1;
- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
- thread);
+ evlist->mmap = maps;
}
}
- return 0;
+ return &maps[idx].core;
}
-static int evlist__mmap_per_cpu(struct evlist *evlist,
- struct mmap_params *mp)
+static int
+perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
+ int output, int cpu)
{
- int cpu, thread;
- int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
- int nr_threads = perf_thread_map__nr(evlist->core.threads);
-
- pr_debug2("perf event ring buffer mmapped per cpu\n");
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- int output = -1;
- int output_overwrite = -1;
-
- auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
- true);
+ struct mmap *map = container_of(_map, struct mmap, core);
+ struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
- for (thread = 0; thread < nr_threads; thread++) {
- if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
- thread, &output, &output_overwrite))
- goto out_unmap;
- }
- }
-
- return 0;
-
-out_unmap:
- evlist__munmap_nofree(evlist);
- return -1;
-}
-
-static int evlist__mmap_per_thread(struct evlist *evlist,
- struct mmap_params *mp)
-{
- int thread;
- int nr_threads = perf_thread_map__nr(evlist->core.threads);
-
- pr_debug2("perf event ring buffer mmapped per thread\n");
- for (thread = 0; thread < nr_threads; thread++) {
- int output = -1;
- int output_overwrite = -1;
-
- auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
- false);
-
- if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
- &output, &output_overwrite))
- goto out_unmap;
- }
-
- return 0;
-
-out_unmap:
- evlist__munmap_nofree(evlist);
- return -1;
+ return mmap__mmap(map, mp, output, cpu);
}
unsigned long perf_event_mlock_kb_in_pages(void)
@@ -890,43 +794,30 @@ int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
int comp_level)
{
- struct evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->core.cpus;
- const struct perf_thread_map *threads = evlist->core.threads;
/*
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
- .comp_level = comp_level };
-
- if (!evlist->mmap)
- evlist->mmap = evlist__alloc_mmap(evlist, false);
- if (!evlist->mmap)
- return -ENOMEM;
-
- if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
- return -ENOMEM;
+ struct mmap_params mp = {
+ .nr_cblocks = nr_cblocks,
+ .affinity = affinity,
+ .flush = flush,
+ .comp_level = comp_level
+ };
+ struct perf_evlist_mmap_ops ops = {
+ .idx = perf_evlist__mmap_cb_idx,
+ .get = perf_evlist__mmap_cb_get,
+ .mmap = perf_evlist__mmap_cb_mmap,
+ };
evlist->core.mmap_len = evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
- mp.mask = evlist->core.mmap_len - page_size - 1;
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
auxtrace_pages, auxtrace_overwrite);
- evlist__for_each_entry(evlist, evsel) {
- if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
- evsel->core.sample_id == NULL &&
- perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
- return -ENOMEM;
- }
-
- if (perf_cpu_map__empty(cpus))
- return evlist__mmap_per_thread(evlist, &mp);
-
- return evlist__mmap_per_cpu(evlist, &mp);
+ return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
}
int evlist__mmap(struct evlist *evlist, unsigned int pages)
@@ -1029,6 +920,9 @@ int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
struct evsel *evsel;
int err = 0;
+ if (filter == NULL)
+ return -1;
+
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
@@ -1041,16 +935,35 @@ int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
return err;
}
-int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
+int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
+{
+ struct evsel *evsel;
+ int err = 0;
+
+ if (filter == NULL)
+ return -1;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+
+ err = perf_evsel__append_tp_filter(evsel, filter);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
{
char *filter;
- int ret = -1;
size_t i;
for (i = 0; i < npids; ++i) {
if (i == 0) {
if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
- return -1;
+ return NULL;
} else {
char *tmp;
@@ -1062,9 +975,18 @@ int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *
}
}
- ret = perf_evlist__set_tp_filter(evlist, filter);
+ return filter;
out_free:
free(filter);
+ return NULL;
+}
+
+int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
+{
+ char *filter = asprintf__tp_filter_pids(npids, pids);
+ int ret = perf_evlist__set_tp_filter(evlist, filter);
+
+ free(filter);
return ret;
}
@@ -1073,6 +995,20 @@ int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
}
+int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
+{
+ char *filter = asprintf__tp_filter_pids(npids, pids);
+ int ret = perf_evlist__append_tp_filter(evlist, filter);
+
+ free(filter);
+ return ret;
+}
+
+int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
+{
+ return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
+}
+
bool perf_evlist__valid_sample_type(struct evlist *evlist)
{
struct evsel *pos;
@@ -1729,9 +1665,9 @@ static void *perf_evlist__poll_thread(void *arg)
struct mmap *map = &evlist->mmap[i];
union perf_event *event;
- if (perf_mmap__read_init(map))
+ if (perf_mmap__read_init(&map->core))
continue;
- while ((event = perf_mmap__read_event(map)) != NULL) {
+ while ((event = perf_mmap__read_event(&map->core)) != NULL) {
struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
if (evsel && evsel->side_band.cb)
@@ -1739,10 +1675,10 @@ static void *perf_evlist__poll_thread(void *arg)
else
pr_warning("cannot locate proper evsel for the side band event\n");
- perf_mmap__consume(map);
+ perf_mmap__consume(&map->core);
got_data = true;
}
- perf_mmap__read_done(map);
+ perf_mmap__read_done(&map->core);
}
if (draining && !got_data)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 7cfe75522ba5..3655b9ebb147 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -118,6 +118,13 @@ void perf_evlist__stop_sb_thread(struct evlist *evlist);
int perf_evlist__add_newtp(struct evlist *evlist,
const char *sys, const char *name, void *handler);
+int __evlist__set_tracepoints_handlers(struct evlist *evlist,
+ const struct evsel_str_handler *assocs,
+ size_t nr_assocs);
+
+#define evlist__set_tracepoints_handlers(evlist, array) \
+ __evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
+
void __perf_evlist__set_sample_bit(struct evlist *evlist,
enum perf_event_sample_format bit);
void __perf_evlist__reset_sample_bit(struct evlist *evlist,
@@ -133,6 +140,11 @@ int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter);
int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid);
int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
+int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter);
+
+int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid);
+int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
+
struct evsel *
perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id);
@@ -164,6 +176,7 @@ void perf_evlist__set_id_pos(struct evlist *evlist);
bool perf_can_sample_identifier(void);
bool perf_can_record_switch_events(void);
bool perf_can_record_cpu_wide(void);
+bool perf_can_aux_sample(void);
void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
struct callchain_param *callchain);
int record_opts__config(struct record_opts *opts);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index abc7fda4a0fe..f4dea055b080 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -846,6 +846,11 @@ static void apply_config_terms(struct evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT:
attr->aux_output = term->val.aux_output ? 1 : 0;
break;
+ case PERF_EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
+ /* Already applied by auxtrace */
+ break;
+ case PERF_EVSEL__CONFIG_TERM_CFG_CHG:
+ break;
default:
break;
}
@@ -905,6 +910,19 @@ static bool is_dummy_event(struct evsel *evsel)
(evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
}
+struct perf_evsel_config_term *__perf_evsel__get_config_term(struct evsel *evsel,
+ enum evsel_term_type type)
+{
+ struct perf_evsel_config_term *term, *found_term = NULL;
+
+ list_for_each_entry(term, &evsel->config_terms, list) {
+ if (term->type == type)
+ found_term = term;
+ }
+
+ return found_term;
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1524,7 +1542,7 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
static void display_attr(struct perf_event_attr *attr)
{
- if (verbose >= 2) {
+ if (verbose >= 2 || debug_peo_args) {
fprintf(stderr, "%.60s\n", graph_dotted_line);
fprintf(stderr, "perf_event_attr:\n");
perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
@@ -1540,7 +1558,7 @@ static int perf_event_open(struct evsel *evsel,
int fd;
while (1) {
- pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
+ pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpu, group_fd, flags);
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags);
@@ -1560,9 +1578,9 @@ static int perf_event_open(struct evsel *evsel,
break;
}
- pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
+ pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
evsel->core.attr.precise_ip--;
- pr_debug2("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
+ pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
display_attr(&evsel->core.attr);
}
@@ -1574,7 +1592,7 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
{
int cpu, thread, nthreads;
unsigned long flags = PERF_FLAG_FD_CLOEXEC;
- int pid = -1, err;
+ int pid = -1, err, old_errno;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
@@ -1681,12 +1699,12 @@ retry_open:
continue;
}
- pr_debug2("\nsys_perf_event_open failed, error %d\n",
+ pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
err);
goto try_fallback;
}
- pr_debug2(" = %d\n", fd);
+ pr_debug2_peo(" = %d\n", fd);
if (evsel->bpf_fd >= 0) {
int evt_fd = fd;
@@ -1727,8 +1745,8 @@ try_fallback:
*/
if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
struct rlimit l;
- int old_errno = errno;
+ old_errno = errno;
if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
if (set_rlimit == NO_CHANGE)
l.rlim_cur = l.rlim_max;
@@ -1754,71 +1772,74 @@ try_fallback:
*/
if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
perf_missing_features.aux_output = true;
- pr_debug2("Kernel has no attr.aux_output support, bailing out\n");
+ pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
goto out_close;
} else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
perf_missing_features.bpf = true;
- pr_debug2("switching off bpf_event\n");
+ pr_debug2_peo("switching off bpf_event\n");
goto fallback_missing_features;
} else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
perf_missing_features.ksymbol = true;
- pr_debug2("switching off ksymbol\n");
+ pr_debug2_peo("switching off ksymbol\n");
goto fallback_missing_features;
} else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
perf_missing_features.write_backward = true;
- pr_debug2("switching off write_backward\n");
+ pr_debug2_peo("switching off write_backward\n");
goto out_close;
} else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
perf_missing_features.clockid_wrong = true;
- pr_debug2("switching off clockid\n");
+ pr_debug2_peo("switching off clockid\n");
goto fallback_missing_features;
} else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
perf_missing_features.clockid = true;
- pr_debug2("switching off use_clockid\n");
+ pr_debug2_peo("switching off use_clockid\n");
goto fallback_missing_features;
} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
perf_missing_features.cloexec = true;
- pr_debug2("switching off cloexec flag\n");
+ pr_debug2_peo("switching off cloexec flag\n");
goto fallback_missing_features;
} else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
perf_missing_features.mmap2 = true;
- pr_debug2("switching off mmap2\n");
+ pr_debug2_peo("switching off mmap2\n");
goto fallback_missing_features;
} else if (!perf_missing_features.exclude_guest &&
(evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) {
perf_missing_features.exclude_guest = true;
- pr_debug2("switching off exclude_guest, exclude_host\n");
+ pr_debug2_peo("switching off exclude_guest, exclude_host\n");
goto fallback_missing_features;
} else if (!perf_missing_features.sample_id_all) {
perf_missing_features.sample_id_all = true;
- pr_debug2("switching off sample_id_all\n");
+ pr_debug2_peo("switching off sample_id_all\n");
goto retry_sample_id;
} else if (!perf_missing_features.lbr_flags &&
(evsel->core.attr.branch_sample_type &
(PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
perf_missing_features.lbr_flags = true;
- pr_debug2("switching off branch sample type no (cycles/flags)\n");
+ pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
goto fallback_missing_features;
} else if (!perf_missing_features.group_read &&
evsel->core.attr.inherit &&
(evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
perf_evsel__is_group_leader(evsel)) {
perf_missing_features.group_read = true;
- pr_debug2("switching off group read\n");
+ pr_debug2_peo("switching off group read\n");
goto fallback_missing_features;
}
out_close:
if (err)
threads->err_thread = thread;
+ old_errno = errno;
do {
while (--thread >= 0) {
- close(FD(evsel, cpu, thread));
+ if (FD(evsel, cpu, thread) >= 0)
+ close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
}
thread = nthreads;
} while (--cpu >= 0);
+ errno = old_errno;
return err;
}
@@ -2206,6 +2227,19 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array++;
}
+ if (type & PERF_SAMPLE_AUX) {
+ OVERFLOW_CHECK_u64(array);
+ sz = *array++;
+
+ OVERFLOW_CHECK(array, sz, max_size);
+ /* Undo swap of data */
+ if (swapped)
+ mem_bswap_64((char *)array, sz);
+ data->aux_sample.size = sz;
+ data->aux_sample.data = (char *)array;
+ array = (void *)array + sz;
+ }
+
return 0;
}
diff --git a/tools/perf/util/evsel_config.h b/tools/perf/util/evsel_config.h
index 8a7648037c18..1f8d2fe0b66e 100644
--- a/tools/perf/util/evsel_config.h
+++ b/tools/perf/util/evsel_config.h
@@ -25,6 +25,8 @@ enum evsel_term_type {
PERF_EVSEL__CONFIG_TERM_BRANCH,
PERF_EVSEL__CONFIG_TERM_PERCORE,
PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
+ PERF_EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE,
+ PERF_EVSEL__CONFIG_TERM_CFG_CHG,
};
struct perf_evsel_config_term {
@@ -44,7 +46,18 @@ struct perf_evsel_config_term {
unsigned long max_events;
bool percore;
bool aux_output;
+ u32 aux_sample_size;
+ u64 cfg_chg;
} val;
bool weak;
};
+
+struct evsel;
+
+struct perf_evsel_config_term *__perf_evsel__get_config_term(struct evsel *evsel,
+ enum evsel_term_type type);
+
+#define perf_evsel__get_config_term(evsel, type) \
+ __perf_evsel__get_config_term(evsel, PERF_EVSEL__CONFIG_TERM_ ## type)
+
#endif // __PERF_EVSEL_CONFIG_H
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 028df7afb0dc..3b4842840db0 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -125,13 +125,18 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
callchain_cursor_commit(cursor);
while (1) {
+ struct symbol *sym;
+ struct map *map;
u64 addr = 0;
node = callchain_cursor_current(cursor);
if (!node)
break;
- if (node->sym && node->sym->ignore && print_skip_ignored)
+ sym = node->ms.sym;
+ map = node->ms.map;
+
+ if (sym && sym->ignore && print_skip_ignored)
goto next;
printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
@@ -142,42 +147,42 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (print_ip)
printed += fprintf(fp, "%c%16" PRIx64, s, node->ip);
- if (node->map)
- addr = node->map->map_ip(node->map, node->ip);
+ if (map)
+ addr = map->map_ip(map, node->ip);
if (print_sym) {
printed += fprintf(fp, " ");
node_al.addr = addr;
- node_al.map = node->map;
+ node_al.map = map;
if (print_symoffset) {
- printed += __symbol__fprintf_symname_offs(node->sym, &node_al,
+ printed += __symbol__fprintf_symname_offs(sym, &node_al,
print_unknown_as_addr,
true, fp);
} else {
- printed += __symbol__fprintf_symname(node->sym, &node_al,
+ printed += __symbol__fprintf_symname(sym, &node_al,
print_unknown_as_addr, fp);
}
}
- if (print_dso && (!node->sym || !node->sym->inlined)) {
+ if (print_dso && (!sym || !sym->inlined)) {
printed += fprintf(fp, " (");
- printed += map__fprintf_dsoname(node->map, fp);
+ printed += map__fprintf_dsoname(map, fp);
printed += fprintf(fp, ")");
}
if (print_srcline)
- printed += map__fprintf_srcline(node->map, addr, "\n ", fp);
+ printed += map__fprintf_srcline(map, addr, "\n ", fp);
- if (node->sym && node->sym->inlined)
+ if (sym && sym->inlined)
printed += fprintf(fp, " (inlined)");
if (!print_oneline)
printed += fprintf(fp, "\n");
/* Add srccode here too? */
- if (bt_stop_list && node->sym &&
- strlist__has_entry(bt_stop_list, node->sym->name)) {
+ if (bt_stop_list && sym &&
+ strlist__has_entry(bt_stop_list, sym->name)) {
break;
}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index ca53a929e9fd..840f95cee349 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -52,10 +52,6 @@ enum perf_header_version {
PERF_HEADER_VERSION_2,
};
-enum perf_dir_version {
- PERF_DIR_VERSION = 1,
-};
-
struct perf_file_section {
u64 offset;
u64 size;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 7b6eaf5e0bda..0a8d72ae93ca 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -18,6 +18,7 @@
#include "srcline.h"
#include "symbol.h"
#include "thread.h"
+#include "block-info.h"
#include "ui/progress.h"
#include <errno.h>
#include <math.h>
@@ -80,6 +81,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
int symlen;
u16 len;
+ if (h->block_info)
+ return;
/*
* +4 accounts for '[x] ' priv level info
* +2 accounts for 0x prefix on raw addresses
@@ -109,13 +112,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
if (h->branch_info) {
- if (h->branch_info->from.sym) {
- symlen = (int)h->branch_info->from.sym->namelen + 4;
+ if (h->branch_info->from.ms.sym) {
+ symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
if (verbose > 0)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
- symlen = dso__name_len(h->branch_info->from.map->dso);
+ symlen = dso__name_len(h->branch_info->from.ms.map->dso);
hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
@@ -123,13 +126,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
}
- if (h->branch_info->to.sym) {
- symlen = (int)h->branch_info->to.sym->namelen + 4;
+ if (h->branch_info->to.ms.sym) {
+ symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
if (verbose > 0)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
- symlen = dso__name_len(h->branch_info->to.map->dso);
+ symlen = dso__name_len(h->branch_info->to.ms.map->dso);
hists__new_col_len(hists, HISTC_DSO_TO, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
@@ -146,8 +149,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
}
if (h->mem_info) {
- if (h->mem_info->daddr.sym) {
- symlen = (int)h->mem_info->daddr.sym->namelen + 4
+ if (h->mem_info->daddr.ms.sym) {
+ symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
@@ -161,8 +164,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen);
}
- if (h->mem_info->iaddr.sym) {
- symlen = (int)h->mem_info->iaddr.sym->namelen + 4
+ if (h->mem_info->iaddr.ms.sym) {
+ symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
symlen);
@@ -172,8 +175,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen);
}
- if (h->mem_info->daddr.map) {
- symlen = dso__name_len(h->mem_info->daddr.map->dso);
+ if (h->mem_info->daddr.ms.map) {
+ symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
symlen);
} else {
@@ -440,13 +443,13 @@ static int hist_entry__init(struct hist_entry *he,
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
- map__get(he->branch_info->from.map);
- map__get(he->branch_info->to.map);
+ map__get(he->branch_info->from.ms.map);
+ map__get(he->branch_info->to.ms.map);
}
if (he->mem_info) {
- map__get(he->mem_info->iaddr.map);
- map__get(he->mem_info->daddr.map);
+ map__get(he->mem_info->iaddr.ms.map);
+ map__get(he->mem_info->daddr.ms.map);
}
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
@@ -489,13 +492,13 @@ err_rawdata:
err_infos:
if (he->branch_info) {
- map__put(he->branch_info->from.map);
- map__put(he->branch_info->to.map);
+ map__put(he->branch_info->from.ms.map);
+ map__put(he->branch_info->to.ms.map);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__put(he->mem_info->iaddr.map);
- map__put(he->mem_info->daddr.map);
+ map__put(he->mem_info->iaddr.ms.map);
+ map__put(he->mem_info->daddr.ms.map);
}
err:
map__zput(he->ms.map);
@@ -689,6 +692,7 @@ __hists__add_entry(struct hists *hists,
.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
},
.ms = {
+ .mg = al->mg,
.map = al->map,
.sym = al->sym,
},
@@ -755,6 +759,11 @@ struct hist_entry *hists__add_entry_block(struct hists *hists,
struct hist_entry entry = {
.block_info = block_info,
.hists = hists,
+ .ms = {
+ .mg = al->mg,
+ .map = al->map,
+ .sym = al->sym,
+ },
}, *he = hists__findnew_entry(hists, &entry, al, false);
return he;
@@ -886,8 +895,9 @@ iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
if (iter->curr >= iter->total)
return 0;
- al->map = bi[i].to.map;
- al->sym = bi[i].to.sym;
+ al->mg = bi[i].to.ms.mg;
+ al->map = bi[i].to.ms.map;
+ al->sym = bi[i].to.ms.sym;
al->addr = bi[i].to.addr;
return 1;
}
@@ -905,7 +915,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
bi = iter->priv;
- if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
+ if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
goto out;
/*
@@ -1062,6 +1072,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.comm = thread__comm(al->thread),
.ip = al->addr,
.ms = {
+ .mg = al->mg,
.map = al->map,
.sym = al->sym,
},
@@ -1244,16 +1255,16 @@ void hist_entry__delete(struct hist_entry *he)
map__zput(he->ms.map);
if (he->branch_info) {
- map__zput(he->branch_info->from.map);
- map__zput(he->branch_info->to.map);
+ map__zput(he->branch_info->from.ms.map);
+ map__zput(he->branch_info->to.ms.map);
free_srcline(he->branch_info->srcline_from);
free_srcline(he->branch_info->srcline_to);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__zput(he->mem_info->iaddr.map);
- map__zput(he->mem_info->daddr.map);
+ map__zput(he->mem_info->iaddr.ms.map);
+ map__zput(he->mem_info->daddr.ms.map);
mem_info__zput(he->mem_info);
}
@@ -2569,7 +2580,8 @@ int hists__unlink(struct hists *hists)
}
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
- struct perf_sample *sample, bool nonany_branch_mode)
+ struct perf_sample *sample, bool nonany_branch_mode,
+ u64 *total_cycles)
{
struct branch_info *bi;
@@ -2596,6 +2608,9 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
nonany_branch_mode ? NULL : prev,
bi[i].flags.cycles);
prev = &bi[i].to;
+
+ if (total_cycles)
+ *total_cycles += bi[i].flags.cycles;
}
free(bi);
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 6a186b668303..45286900aacb 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -449,6 +449,8 @@ enum rstype {
A_SOURCE
};
+struct block_hist;
+
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
void attr_to_script(char *buf, struct perf_event_attr *attr);
@@ -474,6 +476,10 @@ void run_script(char *cmd);
int res_sample_browse(struct res_sample *res_samples, int num_res,
struct evsel *evsel, enum rstype rstype);
void res_sample_init(void);
+
+int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
+ float min_percent, struct perf_env *env,
+ struct annotation_options *annotation_opts);
#else
static inline
int perf_evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
@@ -518,6 +524,15 @@ static inline int res_sample_browse(struct res_sample *res_samples __maybe_unuse
static inline void res_sample_init(void) {}
+static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ float min_percent __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ struct annotation_options *annotation_opts __maybe_unused)
+{
+ return 0;
+}
+
#define K_LEFT -1000
#define K_RIGHT -2000
#define K_SWITCH_INPUT_DATA -3000
@@ -527,7 +542,8 @@ unsigned int hists__sort_list_width(struct hists *hists);
unsigned int hists__overhead_width(struct hists *hists);
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
- struct perf_sample *sample, bool nonany_branch_mode);
+ struct perf_sample *sample, bool nonany_branch_mode,
+ u64 *total_cycles);
struct option;
int parse_filter_percentage(const struct option *opt, const char *arg, int unset);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index a1c9eb6d4f40..409afc611be9 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -233,6 +233,16 @@ static void intel_pt_log_event(union perf_event *event)
perf_event__fprintf(event, f);
}
+static void intel_pt_dump_sample(struct perf_session *session,
+ struct perf_sample *sample)
+{
+ struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+ auxtrace);
+
+ printf("\n");
+ intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
+}
+
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
@@ -836,6 +846,18 @@ static bool intel_pt_have_tsc(struct intel_pt *pt)
return have_tsc;
}
+static bool intel_pt_sampling_mode(struct intel_pt *pt)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(pt->session->evlist, evsel) {
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
+ evsel->core.attr.aux_sample_size)
+ return true;
+ }
+ return false;
+}
+
static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
{
u64 quot, rem;
@@ -2320,6 +2342,56 @@ static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
return 0;
}
+static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
+ struct auxtrace_queue *queue,
+ struct perf_sample *sample)
+{
+ struct machine *m = ptq->pt->machine;
+
+ ptq->pid = sample->pid;
+ ptq->tid = sample->tid;
+ ptq->cpu = queue->cpu;
+
+ intel_pt_log("queue %u cpu %d pid %d tid %d\n",
+ ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
+
+ thread__zput(ptq->thread);
+
+ if (ptq->tid == -1)
+ return;
+
+ if (ptq->pid == -1) {
+ ptq->thread = machine__find_thread(m, -1, ptq->tid);
+ if (ptq->thread)
+ ptq->pid = ptq->thread->pid_;
+ return;
+ }
+
+ ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
+}
+
+static int intel_pt_process_timeless_sample(struct intel_pt *pt,
+ struct perf_sample *sample)
+{
+ struct auxtrace_queue *queue;
+ struct intel_pt_queue *ptq;
+ u64 ts = 0;
+
+ queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
+ if (!queue)
+ return -EINVAL;
+
+ ptq = queue->priv;
+ if (!ptq)
+ return 0;
+
+ ptq->stop = false;
+ ptq->time = sample->time;
+ intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
+ intel_pt_run_decoder(ptq, &ts);
+ return 0;
+}
+
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
@@ -2550,7 +2622,11 @@ static int intel_pt_process_event(struct perf_session *session,
}
if (pt->timeless_decoding) {
- if (event->header.type == PERF_RECORD_EXIT) {
+ if (pt->sampling_mode) {
+ if (sample->aux_sample.size)
+ err = intel_pt_process_timeless_sample(pt,
+ sample);
+ } else if (event->header.type == PERF_RECORD_EXIT) {
err = intel_pt_process_timeless_queues(pt,
event->fork.tid,
sample->time);
@@ -2676,6 +2752,28 @@ static int intel_pt_process_auxtrace_event(struct perf_session *session,
return 0;
}
+static int intel_pt_queue_data(struct perf_session *session,
+ struct perf_sample *sample,
+ union perf_event *event, u64 data_offset)
+{
+ struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+ auxtrace);
+ u64 timestamp;
+
+ if (event) {
+ return auxtrace_queues__add_event(&pt->queues, session, event,
+ data_offset, NULL);
+ }
+
+ if (sample->time && sample->time != (u64)-1)
+ timestamp = perf_time_to_tsc(sample->time, &pt->tc);
+ else
+ timestamp = 0;
+
+ return auxtrace_queues__add_sample(&pt->queues, session, sample,
+ data_offset, timestamp);
+}
+
struct intel_pt_synth {
struct perf_tool dummy_tool;
struct perf_session *session;
@@ -3178,7 +3276,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
if (pt->timeless_decoding && !pt->tc.time_mult)
pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
- pt->sampling_mode = false;
+ pt->sampling_mode = intel_pt_sampling_mode(pt);
pt->est_tsc = !pt->timeless_decoding;
pt->unknown_thread = thread__new(999999999, 999999999);
@@ -3205,6 +3303,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
pt->auxtrace.process_event = intel_pt_process_event;
pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
+ pt->auxtrace.queue_data = intel_pt_queue_data;
+ pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
pt->auxtrace.flush_events = intel_pt_flush;
pt->auxtrace.free_events = intel_pt_free_events;
pt->auxtrace.free = intel_pt_free;
@@ -3282,7 +3382,10 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
intel_pt_setup_pebs_events(pt);
- err = auxtrace_queues__process_index(&pt->queues, session);
+ if (pt->sampling_mode || list_empty(&session->auxtrace_index))
+ err = auxtrace_queue_data(session, true, true);
+ else
+ err = auxtrace_queues__process_index(&pt->queues, session);
if (err)
goto err_delete_thread;
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 8b14e4a7f1dc..eae47c2509eb 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -418,10 +418,9 @@ void llvm__dump_obj(const char *path, void *obj_buf, size_t size)
goto out;
}
- pr_info("LLVM: dumping %s\n", obj_path);
+ pr_debug("LLVM: dumping %s\n", obj_path);
if (fwrite(obj_buf, size, 1, fp) != 1)
- pr_warning("WARNING: failed to write to file '%s': %s, skip object dumping\n",
- obj_path, strerror(errno));
+ pr_debug("WARNING: failed to write to file '%s': %s, skip object dumping\n", obj_path, strerror(errno));
fclose(fp);
out:
free(obj_path);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 70a9f8716a4b..e2a312c649f0 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -42,6 +42,11 @@
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+static struct dso *machine__kernel_dso(struct machine *machine)
+{
+ return machine->vmlinux_map->dso;
+}
+
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
@@ -767,45 +772,16 @@ int machine__process_ksymbol(struct machine *machine __maybe_unused,
return machine__process_ksymbol_register(machine, event, sample);
}
-static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
-{
- const char *dup_filename;
-
- if (!filename || !dso || !dso->long_name)
- return;
- if (dso->long_name[0] != '[')
- return;
- if (!strchr(filename, '/'))
- return;
-
- dup_filename = strdup(filename);
- if (!dup_filename)
- return;
-
- dso__set_long_name(dso, dup_filename, true);
-}
-
-struct map *machine__findnew_module_map(struct machine *machine, u64 start,
- const char *filename)
+static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
+ const char *filename)
{
struct map *map = NULL;
- struct dso *dso = NULL;
struct kmod_path m;
+ struct dso *dso;
if (kmod_path__parse_name(&m, filename))
return NULL;
- map = map_groups__find_by_name(&machine->kmaps, m.name);
- if (map) {
- /*
- * If the map's dso is an offline module, give dso__load()
- * a chance to find the file path of that module by fixing
- * long_name.
- */
- dso__adjust_kmod_long_name(map->dso, filename);
- goto out;
- }
-
dso = machine__findnew_module_dso(machine, &m, filename);
if (dso == NULL)
goto out;
@@ -861,7 +837,7 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
int i;
size_t printed = 0;
- struct dso *kdso = machine__kernel_map(machine)->dso;
+ struct dso *kdso = machine__kernel_dso(machine);
if (kdso->has_build_id) {
char filename[PATH_MAX];
@@ -1057,7 +1033,7 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine,
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(maps, map) {
struct kmap *kmap = __map__kmap(map);
struct map *dest_map;
@@ -1404,7 +1380,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
if (arch__fix_module_text_start(&start, &size, name) < 0)
return -1;
- map = machine__findnew_module_map(machine, start, name);
+ map = machine__addnew_module_map(machine, start, name);
if (map == NULL)
return -1;
map->end = start + size;
@@ -1543,8 +1519,7 @@ static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
static int machine__process_extra_kernel_map(struct machine *machine,
union perf_event *event)
{
- struct map *kernel_map = machine__kernel_map(machine);
- struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
+ struct dso *kernel = machine__kernel_dso(machine);
struct extra_kernel_map xm = {
.start = event->mmap.start,
.end = event->mmap.start + event->mmap.len,
@@ -1580,8 +1555,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
strlen(machine->mmap_name) - 1) == 0;
if (event->mmap.filename[0] == '/' ||
(!is_kernel_mmap && event->mmap.filename[0] == '[')) {
- map = machine__findnew_module_map(machine, event->mmap.start,
- event->mmap.filename);
+ map = machine__addnew_module_map(machine, event->mmap.start,
+ event->mmap.filename);
if (map == NULL)
goto out_problem;
@@ -1676,6 +1651,12 @@ int machine__process_mmap2_event(struct machine *machine,
{
struct thread *thread;
struct map *map;
+ struct dso_id dso_id = {
+ .maj = event->mmap2.maj,
+ .min = event->mmap2.min,
+ .ino = event->mmap2.ino,
+ .ino_generation = event->mmap2.ino_generation,
+ };
int ret = 0;
if (dump_trace)
@@ -1696,10 +1677,7 @@ int machine__process_mmap2_event(struct machine *machine,
map = map__new(machine, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff,
- event->mmap2.maj,
- event->mmap2.min, event->mmap2.ino,
- event->mmap2.ino_generation,
- event->mmap2.prot,
+ &dso_id, event->mmap2.prot,
event->mmap2.flags,
event->mmap2.filename, thread);
@@ -1752,9 +1730,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
map = map__new(machine, event->mmap.start,
event->mmap.len, event->mmap.pgoff,
- 0, 0, 0, 0, prot, 0,
- event->mmap.filename,
- thread);
+ NULL, prot, 0, event->mmap.filename, thread);
if (map == NULL)
goto out_problem_map;
@@ -1964,8 +1940,9 @@ static void ip__resolve_ams(struct thread *thread,
ams->addr = ip;
ams->al_addr = al.addr;
- ams->sym = al.sym;
- ams->map = al.map;
+ ams->ms.mg = al.mg;
+ ams->ms.sym = al.sym;
+ ams->ms.map = al.map;
ams->phys_addr = 0;
}
@@ -1981,8 +1958,9 @@ static void ip__resolve_data(struct thread *thread,
ams->addr = addr;
ams->al_addr = al.addr;
- ams->sym = al.sym;
- ams->map = al.map;
+ ams->ms.mg = al.mg;
+ ams->ms.sym = al.sym;
+ ams->ms.map = al.map;
ams->phys_addr = phys_addr;
}
@@ -2002,8 +1980,9 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
return mi;
}
-static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
+static char *callchain_srcline(struct map_symbol *ms, u64 ip)
{
+ struct map *map = ms->map;
char *srcline = NULL;
if (!map || callchain_param.key == CCKEY_FUNCTION)
@@ -2015,7 +1994,7 @@ static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
- sym, show_sym, show_addr, ip);
+ ms->sym, show_sym, show_addr, ip);
srcline__tree_insert(&map->dso->srclines, ip, srcline);
}
@@ -2038,6 +2017,7 @@ static int add_callchain_ip(struct thread *thread,
struct iterations *iter,
u64 branch_from)
{
+ struct map_symbol ms;
struct addr_location al;
int nr_loop_iter = 0;
u64 iter_cycles = 0;
@@ -2095,8 +2075,11 @@ static int add_callchain_ip(struct thread *thread,
iter_cycles = iter->cycles;
}
- srcline = callchain_srcline(al.map, al.sym, al.addr);
- return callchain_cursor_append(cursor, ip, al.map, al.sym,
+ ms.mg = al.mg;
+ ms.map = al.map;
+ ms.sym = al.sym;
+ srcline = callchain_srcline(&ms, al.addr);
+ return callchain_cursor_append(cursor, ip, &ms,
branch, flags, nr_loop_iter,
iter_cycles, branch_from, srcline);
}
@@ -2403,7 +2386,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
}
check_calls:
- if (callchain_param.order != ORDER_CALLEE) {
+ if (chain && callchain_param.order != ORDER_CALLEE) {
err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
&cpumode, chain->nr - first_call);
if (err)
@@ -2444,9 +2427,10 @@ check_calls:
return 0;
}
-static int append_inlines(struct callchain_cursor *cursor,
- struct map *map, struct symbol *sym, u64 ip)
+static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
{
+ struct symbol *sym = ms->sym;
+ struct map *map = ms->map;
struct inline_node *inline_node;
struct inline_list *ilist;
u64 addr;
@@ -2467,8 +2451,11 @@ static int append_inlines(struct callchain_cursor *cursor,
}
list_for_each_entry(ilist, &inline_node->val, list) {
- ret = callchain_cursor_append(cursor, ip, map,
- ilist->symbol, false,
+ struct map_symbol ilist_ms = {
+ .map = map,
+ .sym = ilist->symbol,
+ };
+ ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
NULL, 0, 0, 0, ilist->srcline);
if (ret != 0)
@@ -2484,22 +2471,21 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
const char *srcline = NULL;
u64 addr = entry->ip;
- if (symbol_conf.hide_unresolved && entry->sym == NULL)
+ if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
return 0;
- if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
+ if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
return 0;
/*
* Convert entry->ip from a virtual address to an offset in
* its corresponding binary.
*/
- if (entry->map)
- addr = map__map_ip(entry->map, entry->ip);
+ if (entry->ms.map)
+ addr = map__map_ip(entry->ms.map, entry->ip);
- srcline = callchain_srcline(entry->map, entry->sym, addr);
- return callchain_cursor_append(cursor, entry->ip,
- entry->map, entry->sym,
+ srcline = callchain_srcline(&entry->ms, addr);
+ return callchain_cursor_append(cursor, entry->ip, &entry->ms,
false, NULL, 0, 0, 0, srcline);
}
@@ -2725,9 +2711,14 @@ out:
return addr_cpumode;
}
+struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
+{
+ return dsos__findnew_id(&machine->dsos, filename, id);
+}
+
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
- return dsos__findnew(&machine->dsos, filename);
+ return machine__findnew_dso_id(machine, filename, NULL);
}
char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 18e13c0ccd6a..499be204830d 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -11,6 +11,7 @@
struct addr_location;
struct branch_stack;
struct dso;
+struct dso_id;
struct evsel;
struct perf_sample;
struct symbol;
@@ -202,6 +203,7 @@ int machine__nr_cpus_avail(struct machine *machine);
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id);
struct dso *machine__findnew_dso(struct machine *machine, const char *filename);
size_t machine__fprintf(struct machine *machine, FILE *fp);
@@ -221,8 +223,6 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
return map_groups__find_symbol_by_name(&machine->kmaps, name, mapp);
}
-struct map *machine__findnew_module_map(struct machine *machine, u64 start,
- const char *filename);
int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
int machine__load_kallsyms(struct machine *machine, const char *filename);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index eec9b282c047..744bfbaf35cf 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -26,7 +26,6 @@
#include "ui/ui.h"
static void __maps__insert(struct maps *maps, struct map *map);
-static void __maps__insert_name(struct maps *maps, struct map *map);
static inline int is_anon_memory(const char *filename, u32 flags)
{
@@ -140,14 +139,13 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip;
RB_CLEAR_NODE(&map->rb_node);
- map->groups = NULL;
map->erange_warned = false;
refcount_set(&map->refcnt, 1);
}
struct map *map__new(struct machine *machine, u64 start, u64 len,
- u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
- u64 ino_gen, u32 prot, u32 flags, char *filename,
+ u64 pgoff, struct dso_id *id,
+ u32 prot, u32 flags, char *filename,
struct thread *thread)
{
struct map *map = malloc(sizeof(*map));
@@ -163,11 +161,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
anon = is_anon_memory(filename, flags);
vdso = is_vdso_map(filename);
no_dso = is_no_dso_memory(filename);
-
- map->maj = d_maj;
- map->min = d_min;
- map->ino = ino;
- map->ino_generation = ino_gen;
map->prot = prot;
map->flags = flags;
nsi = nsinfo__get(thread->nsinfo);
@@ -197,7 +190,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
pgoff = 0;
dso = machine__findnew_vdso(machine, thread);
} else
- dso = machine__findnew_dso(machine, filename);
+ dso = machine__findnew_dso_id(machine, filename, id);
if (dso == NULL)
goto out_delete;
@@ -244,18 +237,11 @@ struct map *map__new2(u64 start, struct dso *dso)
return map;
}
-/*
- * Use this and __map__is_kmodule() for map instances that are in
- * machine->kmaps, and thus have map->groups->machine all properly set, to
- * disambiguate between the kernel and modules.
- *
- * When the need arises, introduce map__is_{kernel,kmodule)() that
- * checks (map->groups != NULL && map->groups->machine != NULL &&
- * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
- */
bool __map__is_kernel(const struct map *map)
{
- return machine__kernel_map(map->groups->machine) == map;
+ if (!map->dso->kernel)
+ return false;
+ return machine__kernel_map(map__kmaps((struct map *)map)->machine) == map;
}
bool __map__is_extra_kernel_map(const struct map *map)
@@ -288,7 +274,7 @@ bool map__has_symbols(const struct map *map)
static void map__exit(struct map *map)
{
- BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
+ BUG_ON(refcount_read(&map->refcnt) != 0);
dso__zput(map->dso);
}
@@ -395,7 +381,6 @@ struct map *map__clone(struct map *from)
refcount_set(&map->refcnt, 1);
RB_CLEAR_NODE(&map->rb_node);
dso__get(map->dso);
- map->groups = NULL;
}
return map;
@@ -575,7 +560,6 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
static void maps__init(struct maps *maps)
{
maps->entries = RB_ROOT;
- maps->names = RB_ROOT;
init_rwsem(&maps->lock);
}
@@ -583,39 +567,72 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
{
maps__init(&mg->maps);
mg->machine = machine;
+ mg->last_search_by_name = NULL;
+ mg->nr_maps = 0;
+ mg->maps_by_name = NULL;
refcount_set(&mg->refcnt, 1);
}
-void map_groups__insert(struct map_groups *mg, struct map *map)
+static void __map_groups__free_maps_by_name(struct map_groups *mg)
{
- maps__insert(&mg->maps, map);
- map->groups = mg;
+ /*
+ * Free everything to try to do it from the rbtree in the next search
+ */
+ zfree(&mg->maps_by_name);
+ mg->nr_maps_allocated = 0;
}
-static void __maps__purge(struct maps *maps)
+void map_groups__insert(struct map_groups *mg, struct map *map)
{
- struct rb_root *root = &maps->entries;
- struct rb_node *next = rb_first(root);
+ struct maps *maps = &mg->maps;
- while (next) {
- struct map *pos = rb_entry(next, struct map, rb_node);
+ down_write(&maps->lock);
+ __maps__insert(maps, map);
+ ++mg->nr_maps;
- next = rb_next(&pos->rb_node);
- rb_erase_init(&pos->rb_node, root);
- map__put(pos);
+ /*
+ * If we already performed some search by name, then we need to add the just
+ * inserted map and resort.
+ */
+ if (mg->maps_by_name) {
+ if (mg->nr_maps > mg->nr_maps_allocated) {
+ int nr_allocate = mg->nr_maps * 2;
+ struct map **maps_by_name = realloc(mg->maps_by_name, nr_allocate * sizeof(map));
+
+ if (maps_by_name == NULL) {
+ __map_groups__free_maps_by_name(mg);
+ return;
+ }
+
+ mg->maps_by_name = maps_by_name;
+ mg->nr_maps_allocated = nr_allocate;
+ }
+ mg->maps_by_name[mg->nr_maps - 1] = map;
+ __map_groups__sort_by_name(mg);
}
+ up_write(&maps->lock);
}
-static void __maps__purge_names(struct maps *maps)
+void map_groups__remove(struct map_groups *mg, struct map *map)
{
- struct rb_root *root = &maps->names;
- struct rb_node *next = rb_first(root);
+ struct maps *maps = &mg->maps;
+ down_write(&maps->lock);
+ if (mg->last_search_by_name == map)
+ mg->last_search_by_name = NULL;
- while (next) {
- struct map *pos = rb_entry(next, struct map, rb_node_name);
+ __maps__remove(maps, map);
+ --mg->nr_maps;
+ if (mg->maps_by_name)
+ __map_groups__free_maps_by_name(mg);
+ up_write(&maps->lock);
+}
- next = rb_next(&pos->rb_node_name);
- rb_erase_init(&pos->rb_node_name, root);
+static void __maps__purge(struct maps *maps)
+{
+ struct map *pos, *next;
+
+ maps__for_each_entry_safe(maps, pos, next) {
+ rb_erase_init(&pos->rb_node, &maps->entries);
map__put(pos);
}
}
@@ -624,7 +641,6 @@ static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
- __maps__purge_names(maps);
up_write(&maps->lock);
}
@@ -687,13 +703,11 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
struct map **mapp)
{
struct symbol *sym;
- struct rb_node *nd;
+ struct map *pos;
down_read(&maps->lock);
- for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
- struct map *pos = rb_entry(nd, struct map, rb_node);
-
+ maps__for_each_entry(maps, pos) {
sym = map__find_symbol_by_name(pos, name);
if (sym == NULL)
@@ -720,31 +734,30 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
return maps__find_symbol_by_name(&mg->maps, name, mapp);
}
-int map_groups__find_ams(struct addr_map_symbol *ams)
+int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams)
{
- if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
- if (ams->map->groups == NULL)
+ if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
+ if (mg == NULL)
return -1;
- ams->map = map_groups__find(ams->map->groups, ams->addr);
- if (ams->map == NULL)
+ ams->ms.map = map_groups__find(mg, ams->addr);
+ if (ams->ms.map == NULL)
return -1;
}
- ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
- ams->sym = map__find_symbol(ams->map, ams->al_addr);
+ ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
+ ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
- return ams->sym ? 0 : -1;
+ return ams->ms.sym ? 0 : -1;
}
static size_t maps__fprintf(struct maps *maps, FILE *fp)
{
size_t printed = 0;
- struct rb_node *nd;
+ struct map *pos;
down_read(&maps->lock);
- for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
- struct map *pos = rb_entry(nd, struct map, rb_node);
+ maps__for_each_entry(maps, pos) {
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
if (verbose > 2) {
@@ -766,12 +779,11 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
static void __map_groups__insert(struct map_groups *mg, struct map *map)
{
__maps__insert(&mg->maps, map);
- __maps__insert_name(&mg->maps, map);
- map->groups = mg;
}
-static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp)
{
+ struct maps *maps = &mg->maps;
struct rb_root *root;
struct rb_node *next, *first;
int err = 0;
@@ -836,7 +848,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
before->end = map->start;
- __map_groups__insert(pos->groups, before);
+ __map_groups__insert(mg, before);
if (verbose >= 2 && !use_browser)
map__fprintf(before, fp);
map__put(before);
@@ -853,7 +865,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
after->start = map->end;
after->pgoff += map->end - pos->start;
assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
- __map_groups__insert(pos->groups, after);
+ __map_groups__insert(mg, after);
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
map__put(after);
@@ -871,12 +883,6 @@ out:
return err;
}
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
- FILE *fp)
-{
- return maps__fixup_overlappings(&mg->maps, map, fp);
-}
-
/*
* XXX This should not really _copy_ te maps, but refcount them.
*/
@@ -889,7 +895,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent)
down_read(&maps->lock);
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(maps, map) {
struct map *new = map__clone(map);
if (new == NULL)
goto out_unlock;
@@ -929,42 +935,17 @@ static void __maps__insert(struct maps *maps, struct map *map)
map__get(map);
}
-static void __maps__insert_name(struct maps *maps, struct map *map)
-{
- struct rb_node **p = &maps->names.rb_node;
- struct rb_node *parent = NULL;
- struct map *m;
- int rc;
-
- while (*p != NULL) {
- parent = *p;
- m = rb_entry(parent, struct map, rb_node_name);
- rc = strcmp(m->dso->short_name, map->dso->short_name);
- if (rc < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&map->rb_node_name, parent, p);
- rb_insert_color(&map->rb_node_name, &maps->names);
- map__get(map);
-}
-
void maps__insert(struct maps *maps, struct map *map)
{
down_write(&maps->lock);
__maps__insert(maps, map);
- __maps__insert_name(maps, map);
up_write(&maps->lock);
}
-static void __maps__remove(struct maps *maps, struct map *map)
+void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
-
- rb_erase_init(&map->rb_node_name, &maps->names);
- map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
@@ -1007,7 +988,7 @@ struct map *maps__first(struct maps *maps)
return NULL;
}
-struct map *map__next(struct map *map)
+static struct map *__map__next(struct map *map)
{
struct rb_node *next = rb_next(&map->rb_node);
@@ -1016,6 +997,11 @@ struct map *map__next(struct map *map)
return NULL;
}
+struct map *map__next(struct map *map)
+{
+ return map ? __map__next(map) : NULL;
+}
+
struct kmap *__map__kmap(struct map *map)
{
if (!map->dso || !map->dso->kernel)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index c3614195ddc7..5e8899883231 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -23,18 +23,13 @@ struct map {
struct rb_node rb_node;
struct list_head node;
};
- struct rb_node rb_node_name;
u64 start;
u64 end;
- bool erange_warned;
- u32 priv;
+ bool erange_warned:1;
+ bool priv:1;
u32 prot;
- u32 flags;
u64 pgoff;
u64 reloc;
- u32 maj, min; /* only valid for MMAP2 record */
- u64 ino; /* only valid for MMAP2 record */
- u64 ino_generation;/* only valid for MMAP2 record */
/* ip -> dso rip */
u64 (*map_ip)(struct map *, u64);
@@ -42,8 +37,8 @@ struct map {
u64 (*unmap_ip)(struct map *, u64);
struct dso *dso;
- struct map_groups *groups;
refcount_t refcnt;
+ u32 flags;
};
struct kmap;
@@ -110,9 +105,11 @@ struct thread;
void map__init(struct map *map,
u64 start, u64 end, u64 pgoff, struct dso *dso);
+
+struct dso_id;
+
struct map *map__new(struct machine *machine, u64 start, u64 len,
- u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
- u64 ino_gen, u32 prot, u32 flags,
+ u64 pgoff, struct dso_id *id, u32 prot, u32 flags,
char *filename, struct thread *thread);
struct map *map__new2(u64 start, struct dso *dso);
void map__delete(struct map *map);
diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
index 77252e14008f..63ed211fe241 100644
--- a/tools/perf/util/map_groups.h
+++ b/tools/perf/util/map_groups.h
@@ -16,21 +16,32 @@ struct thread;
struct maps {
struct rb_root entries;
- struct rb_root names;
struct rw_semaphore lock;
};
void maps__insert(struct maps *maps, struct map *map);
void maps__remove(struct maps *maps, struct map *map);
+void __maps__remove(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr);
struct map *maps__first(struct maps *maps);
struct map *map__next(struct map *map);
+
+#define maps__for_each_entry(maps, map) \
+ for (map = maps__first(maps); map; map = map__next(map))
+
+#define maps__for_each_entry_safe(maps, map, next) \
+ for (map = maps__first(maps), next = map__next(map); map; map = next, next = map__next(map))
+
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
struct map_groups {
struct maps maps;
struct machine *machine;
+ struct map *last_search_by_name;
+ struct map **maps_by_name;
refcount_t refcnt;
+ unsigned int nr_maps;
+ unsigned int nr_maps_allocated;
#ifdef HAVE_LIBUNWIND_SUPPORT
void *addr_space;
struct unwind_libunwind_ops *unwind_libunwind_ops;
@@ -64,29 +75,25 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
void map_groups__insert(struct map_groups *mg, struct map *map);
-static inline void map_groups__remove(struct map_groups *mg, struct map *map)
-{
- maps__remove(&mg->maps, map);
-}
+void map_groups__remove(struct map_groups *mg, struct map *map);
static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
{
return maps__find(&mg->maps, addr);
}
-struct map *map_groups__first(struct map_groups *mg);
+#define map_groups__for_each_entry(mg, map) \
+ for (map = maps__first(&mg->maps); map; map = map__next(map))
-static inline struct map *map_groups__next(struct map *map)
-{
- return map__next(map);
-}
+#define map_groups__for_each_entry_safe(mg, map, next) \
+ for (map = maps__first(&mg->maps), next = map__next(map); map; map = next, next = map__next(map))
struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);
struct addr_map_symbol;
-int map_groups__find_ams(struct addr_map_symbol *ams);
+int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams);
int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp);
@@ -94,4 +101,6 @@ struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map);
+void __map_groups__sort_by_name(struct map_groups *mg);
+
#endif // __PERF_MAP_GROUPS_H
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
index 5a1aed9f6bb4..2964d971aeab 100644
--- a/tools/perf/util/map_symbol.h
+++ b/tools/perf/util/map_symbol.h
@@ -4,17 +4,18 @@
#include <linux/types.h>
+struct map_groups;
struct map;
struct symbol;
struct map_symbol {
+ struct map_groups *mg;
struct map *map;
struct symbol *sym;
};
struct addr_map_symbol {
- struct map *map;
- struct symbol *sym;
+ struct map_symbol ms;
u64 addr;
u64 al_addr;
u64 phys_addr;
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 3d391583f2ae..aa29589f6904 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -410,7 +410,7 @@ do { \
return -1;
}
- if (!mi->daddr.map || !mi->iaddr.map) {
+ if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
stats->nomap++;
return -1;
}
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index a7c0424dbda3..6a4d350d5cdb 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -523,7 +523,7 @@ int metricgroup__parse_groups(const struct option *opt,
if (ret)
return ret;
pr_debug("adding %s\n", extra_events.buf);
- memset(&parse_error, 0, sizeof(struct parse_events_error));
+ bzero(&parse_error, sizeof(parse_error));
ret = parse_events(perf_evlist, extra_events.buf, &parse_error);
if (ret) {
parse_events_print_error(&parse_error, extra_events.buf);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index a35dc57d5995..063d1b93c53d 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h> // sysconf()
+#include <perf/mmap.h>
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
@@ -23,116 +24,9 @@
#include "../perf.h"
#include <internal/lib.h> /* page_size */
-size_t perf_mmap__mmap_len(struct mmap *map)
+size_t mmap__mmap_len(struct mmap *map)
{
- return map->core.mask + 1 + page_size;
-}
-
-/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct mmap *map,
- u64 *startp, u64 end)
-{
- unsigned char *data = map->core.base + page_size;
- union perf_event *event = NULL;
- int diff = end - *startp;
-
- if (diff >= (int)sizeof(event->header)) {
- size_t size;
-
- event = (union perf_event *)&data[*startp & map->core.mask];
- size = event->header.size;
-
- if (size < sizeof(event->header) || diff < (int)size)
- return NULL;
-
- /*
- * Event straddles the mmap boundary -- header should always
- * be inside due to u64 alignment of output.
- */
- if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
- unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = map->core.event_copy;
-
- do {
- cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
- memcpy(dst, &data[offset & map->core.mask], cpy);
- offset += cpy;
- dst += cpy;
- len -= cpy;
- } while (len);
-
- event = (union perf_event *)map->core.event_copy;
- }
-
- *startp += size;
- }
-
- return event;
-}
-
-/*
- * Read event from ring buffer one by one.
- * Return one event for each call.
- *
- * Usage:
- * perf_mmap__read_init()
- * while(event = perf_mmap__read_event()) {
- * //process the event
- * perf_mmap__consume()
- * }
- * perf_mmap__read_done()
- */
-union perf_event *perf_mmap__read_event(struct mmap *map)
-{
- union perf_event *event;
-
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return NULL;
-
- /* non-overwirte doesn't pause the ringbuffer */
- if (!map->core.overwrite)
- map->core.end = perf_mmap__read_head(map);
-
- event = perf_mmap__read(map, &map->core.start, map->core.end);
-
- if (!map->core.overwrite)
- map->core.prev = map->core.start;
-
- return event;
-}
-
-static bool perf_mmap__empty(struct mmap *map)
-{
- return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
-}
-
-void perf_mmap__get(struct mmap *map)
-{
- refcount_inc(&map->core.refcnt);
-}
-
-void perf_mmap__put(struct mmap *map)
-{
- BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
-
- if (refcount_dec_and_test(&map->core.refcnt))
- perf_mmap__munmap(map);
-}
-
-void perf_mmap__consume(struct mmap *map)
-{
- if (!map->core.overwrite) {
- u64 old = map->core.prev;
-
- perf_mmap__write_tail(map, old);
- }
-
- if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
- perf_mmap__put(map);
+ return perf_mmap__mmap_len(&map->core);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
@@ -170,7 +64,7 @@ static int perf_mmap__aio_enabled(struct mmap *map)
#ifdef HAVE_LIBNUMA_SUPPORT
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
- map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->aio.data[idx] == MAP_FAILED) {
map->aio.data[idx] = NULL;
@@ -183,7 +77,7 @@ static int perf_mmap__aio_alloc(struct mmap *map, int idx)
static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
- munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ munmap(map->aio.data[idx], mmap__mmap_len(map));
map->aio.data[idx] = NULL;
}
}
@@ -196,7 +90,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
data = map->aio.data[idx];
- mmap_len = perf_mmap__mmap_len(map);
+ mmap_len = mmap__mmap_len(map);
node_mask = 1UL << cpu__get_node(cpu);
if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
@@ -210,7 +104,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
#else /* !HAVE_LIBNUMA_SUPPORT */
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
- map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ map->aio.data[idx] = malloc(mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
return -1;
@@ -311,19 +205,13 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
}
#endif
-void perf_mmap__munmap(struct mmap *map)
+void mmap__munmap(struct mmap *map)
{
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
- munmap(map->data, perf_mmap__mmap_len(map));
+ munmap(map->data, mmap__mmap_len(map));
map->data = NULL;
}
- if (map->core.base != NULL) {
- munmap(map->core.base, perf_mmap__mmap_len(map));
- map->core.base = NULL;
- map->core.fd = -1;
- refcount_set(&map->core.refcnt, 0);
- }
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
@@ -353,34 +241,13 @@ static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params
CPU_SET(map->core.cpu, &map->affinity_mask);
}
-int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
+int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
- /*
- * The last one will be done at perf_mmap__consume(), so that we
- * make sure we don't prevent tools from consuming every last event in
- * the ring buffer.
- *
- * I.e. we can get the POLLHUP meaning that the fd doesn't exist
- * anymore, but the last events for it are still in the ring buffer,
- * waiting to be consumed.
- *
- * Tools can chose to ignore this at their own discretion, but the
- * evlist layer can't just drop it when filtering events in
- * perf_evlist__filter_pollfd().
- */
- refcount_set(&map->core.refcnt, 2);
- map->core.prev = 0;
- map->core.mask = mp->mask;
- map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
- MAP_SHARED, fd, 0);
- if (map->core.base == MAP_FAILED) {
+ if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
- map->core.base = NULL;
return -1;
}
- map->core.fd = fd;
- map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
@@ -389,7 +256,7 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
map->comp_level = mp->comp_level;
if (map->comp_level && !perf_mmap__aio_enabled(map)) {
- map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->data == MAP_FAILED) {
pr_debug2("failed to mmap data buffer, error %d\n",
@@ -406,96 +273,16 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
return perf_mmap__aio_mmap(map, mp);
}
-static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
-{
- struct perf_event_header *pheader;
- u64 evt_head = *start;
- int size = mask + 1;
-
- pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
- pheader = (struct perf_event_header *)(buf + (*start & mask));
- while (true) {
- if (evt_head - *start >= (unsigned int)size) {
- pr_debug("Finished reading overwrite ring buffer: rewind\n");
- if (evt_head - *start > (unsigned int)size)
- evt_head -= pheader->size;
- *end = evt_head;
- return 0;
- }
-
- pheader = (struct perf_event_header *)(buf + (evt_head & mask));
-
- if (pheader->size == 0) {
- pr_debug("Finished reading overwrite ring buffer: get start\n");
- *end = evt_head;
- return 0;
- }
-
- evt_head += pheader->size;
- pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
- }
- WARN_ONCE(1, "Shouldn't get here\n");
- return -1;
-}
-
-/*
- * Report the start and end of the available data in ringbuffer
- */
-static int __perf_mmap__read_init(struct mmap *md)
-{
- u64 head = perf_mmap__read_head(md);
- u64 old = md->core.prev;
- unsigned char *data = md->core.base + page_size;
- unsigned long size;
-
- md->core.start = md->core.overwrite ? head : old;
- md->core.end = md->core.overwrite ? old : head;
-
- if ((md->core.end - md->core.start) < md->core.flush)
- return -EAGAIN;
-
- size = md->core.end - md->core.start;
- if (size > (unsigned long)(md->core.mask) + 1) {
- if (!md->core.overwrite) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
-
- md->core.prev = head;
- perf_mmap__consume(md);
- return -EAGAIN;
- }
-
- /*
- * Backward ring buffer is full. We still have a chance to read
- * most of data from it.
- */
- if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
- return -EINVAL;
- }
-
- return 0;
-}
-
-int perf_mmap__read_init(struct mmap *map)
-{
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return -ENOENT;
-
- return __perf_mmap__read_init(map);
-}
-
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size))
{
- u64 head = perf_mmap__read_head(md);
+ u64 head = perf_mmap__read_head(&md->core);
unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
- rc = perf_mmap__read_init(md);
+ rc = perf_mmap__read_init(&md->core);
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
@@ -522,24 +309,7 @@ int perf_mmap__push(struct mmap *md, void *to,
}
md->core.prev = head;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
out:
return rc;
}
-
-/*
- * Mandatory for overwrite mode
- * The direction of overwrite mode is backward.
- * The last perf_mmap__read() will set tail to map->core.prev.
- * Need to correct the map->core.prev to head which is the end of next read.
- */
-void perf_mmap__read_done(struct mmap *map)
-{
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return;
-
- map->core.prev = perf_mmap__read_head(map);
-}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e567c1c875bd..bee4e83f7109 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -37,37 +37,19 @@ struct mmap {
};
struct mmap_params {
- int prot, mask, nr_cblocks, affinity, flush, comp_level;
+ struct perf_mmap_param core;
+ int nr_cblocks, affinity, flush, comp_level;
struct auxtrace_mmap_params auxtrace_mp;
};
-int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
-void perf_mmap__munmap(struct mmap *map);
-
-void perf_mmap__get(struct mmap *map);
-void perf_mmap__put(struct mmap *map);
-
-void perf_mmap__consume(struct mmap *map);
-
-static inline u64 perf_mmap__read_head(struct mmap *mm)
-{
- return ring_buffer_read_head(mm->core.base);
-}
-
-static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
-{
- ring_buffer_write_tail(md->core.base, tail);
-}
+int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
+void mmap__munmap(struct mmap *map);
union perf_event *perf_mmap__read_forward(struct mmap *map);
-union perf_event *perf_mmap__read_event(struct mmap *map);
-
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size));
-size_t perf_mmap__mmap_len(struct mmap *map);
+size_t mmap__mmap_len(struct mmap *map);
-int perf_mmap__read_init(struct mmap *md);
-void perf_mmap__read_done(struct mmap *map);
#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index b5e2adef49de..ed7c008b9c8b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -182,6 +182,37 @@ static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
#define MAX_EVENT_LENGTH 512
+void parse_events__handle_error(struct parse_events_error *err, int idx,
+ char *str, char *help)
+{
+ if (WARN(!str, "WARNING: failed to provide error string\n")) {
+ free(help);
+ return;
+ }
+ switch (err->num_errors) {
+ case 0:
+ err->idx = idx;
+ err->str = str;
+ err->help = help;
+ break;
+ case 1:
+ err->first_idx = err->idx;
+ err->idx = idx;
+ err->first_str = err->str;
+ err->str = str;
+ err->first_help = err->help;
+ err->help = help;
+ break;
+ default:
+ WARN_ONCE(1, "WARNING: multiple event parsing errors\n");
+ free(err->str);
+ err->str = str;
+ free(err->help);
+ err->help = help;
+ break;
+ }
+ err->num_errors++;
+}
struct tracepoint_path *tracepoint_id_to_path(u64 config)
{
@@ -480,6 +511,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
static void tracepoint_error(struct parse_events_error *e, int err,
const char *sys, const char *name)
{
+ const char *str;
char help[BUFSIZ];
if (!e)
@@ -493,18 +525,18 @@ static void tracepoint_error(struct parse_events_error *e, int err,
switch (err) {
case EACCES:
- e->str = strdup("can't access trace events");
+ str = "can't access trace events";
break;
case ENOENT:
- e->str = strdup("unknown tracepoint");
+ str = "unknown tracepoint";
break;
default:
- e->str = strdup("failed to add tracepoint");
+ str = "failed to add tracepoint";
break;
}
tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
- e->help = strdup(help);
+ parse_events__handle_error(e, 0, strdup(str), strdup(help));
}
static int add_tracepoint(struct list_head *list, int *idx,
@@ -932,11 +964,11 @@ static int check_type_val(struct parse_events_term *term,
return 0;
if (err) {
- err->idx = term->err_val;
- if (type == PARSE_EVENTS__TERM_TYPE_NUM)
- err->str = strdup("expected numeric value");
- else
- err->str = strdup("expected string value");
+ parse_events__handle_error(err, term->err_val,
+ type == PARSE_EVENTS__TERM_TYPE_NUM
+ ? strdup("expected numeric value")
+ : strdup("expected string value"),
+ NULL);
}
return -EINVAL;
}
@@ -965,6 +997,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
[PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
[PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
+ [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
};
static bool config_term_shrinked;
@@ -972,8 +1005,11 @@ static bool config_term_shrinked;
static bool
config_term_avail(int term_type, struct parse_events_error *err)
{
+ char *err_str;
+
if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
- err->str = strdup("Invalid term_type");
+ parse_events__handle_error(err, -1,
+ strdup("Invalid term_type"), NULL);
return false;
}
if (!config_term_shrinked)
@@ -992,9 +1028,9 @@ config_term_avail(int term_type, struct parse_events_error *err)
return false;
/* term_type is validated so indexing is safe */
- if (asprintf(&err->str, "'%s' is not usable in 'perf stat'",
- config_term_names[term_type]) < 0)
- err->str = NULL;
+ if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
+ config_term_names[term_type]) >= 0)
+ parse_events__handle_error(err, -1, err_str, NULL);
return false;
}
}
@@ -1036,17 +1072,20 @@ do { \
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
CHECK_TYPE_VAL(STR);
if (strcmp(term->val.str, "no") &&
- parse_branch_str(term->val.str, &attr->branch_sample_type)) {
- err->str = strdup("invalid branch sample type");
- err->idx = term->err_val;
+ parse_branch_str(term->val.str,
+ &attr->branch_sample_type)) {
+ parse_events__handle_error(err, term->err_val,
+ strdup("invalid branch sample type"),
+ NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
CHECK_TYPE_VAL(NUM);
if (term->val.num > 1) {
- err->str = strdup("expected 0 or 1");
- err->idx = term->err_val;
+ parse_events__handle_error(err, term->err_val,
+ strdup("expected 0 or 1"),
+ NULL);
return -EINVAL;
}
break;
@@ -1080,18 +1119,28 @@ do { \
case PARSE_EVENTS__TERM_TYPE_PERCORE:
CHECK_TYPE_VAL(NUM);
if ((unsigned int)term->val.num > 1) {
- err->str = strdup("expected 0 or 1");
- err->idx = term->err_val;
+ parse_events__handle_error(err, term->err_val,
+ strdup("expected 0 or 1"),
+ NULL);
return -EINVAL;
}
break;
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
CHECK_TYPE_VAL(NUM);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
+ CHECK_TYPE_VAL(NUM);
+ if (term->val.num > UINT_MAX) {
+ parse_events__handle_error(err, term->err_val,
+ strdup("too big"),
+ NULL);
+ return -EINVAL;
+ }
+ break;
default:
- err->str = strdup("unknown term");
- err->idx = term->err_term;
- err->help = parse_events_formats_error_string(NULL);
+ parse_events__handle_error(err, term->err_term,
+ strdup("unknown term"),
+ parse_events_formats_error_string(NULL));
return -EINVAL;
}
@@ -1139,12 +1188,13 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
return config_term_common(attr, term, err);
default:
if (err) {
- err->idx = term->err_term;
- err->str = strdup("unknown term");
- err->help = strdup("valid terms: call-graph,stack-size\n");
+ parse_events__handle_error(err, term->err_term,
+ strdup("unknown term"),
+ strdup("valid terms: call-graph,stack-size\n"));
}
return -EINVAL;
}
@@ -1234,11 +1284,47 @@ do { \
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
ADD_CONFIG_TERM(AUX_OUTPUT, aux_output, term->val.num ? 1 : 0);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
+ ADD_CONFIG_TERM(AUX_SAMPLE_SIZE, aux_sample_size, term->val.num);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Add PERF_EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
+ * each bit of attr->config that the user has changed.
+ */
+static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
+ struct list_head *head_terms)
+{
+ struct parse_events_term *term;
+ u64 bits = 0;
+ int type;
+
+ list_for_each_entry(term, head_config, list) {
+ switch (term->type_term) {
+ case PARSE_EVENTS__TERM_TYPE_USER:
+ type = perf_pmu__format_type(&pmu->format, term->config);
+ if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
+ continue;
+ bits |= perf_pmu__format_bits(&pmu->format, term->config);
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ bits = ~(u64)0;
+ break;
default:
break;
}
}
-#undef ADD_EVSEL_CONFIG
+
+ if (bits)
+ ADD_CONFIG_TERM(CFG_CHG, cfg_chg, bits);
+
+#undef ADD_CONFIG_TERM
return 0;
}
@@ -1323,10 +1409,12 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
pmu = perf_pmu__find(name);
if (!pmu) {
- if (asprintf(&err->str,
+ char *err_str;
+
+ if (asprintf(&err_str,
"Cannot find PMU `%s'. Missing kernel support?",
- name) < 0)
- err->str = NULL;
+ name) >= 0)
+ parse_events__handle_error(err, 0, err_str, NULL);
return -EINVAL;
}
@@ -1365,8 +1453,22 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
- if (perf_pmu__config(pmu, &attr, head_config, parse_state->error))
+ /*
+ * When using default config, record which bits of attr->config were
+ * changed by the user.
+ */
+ if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
+ return -ENOMEM;
+
+ if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
+ struct perf_evsel_config_term *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &config_terms, list) {
+ list_del_init(&pos->list);
+ free(pos);
+ }
return -EINVAL;
+ }
evsel = __add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), pmu,
@@ -1389,7 +1491,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str, struct list_head **listp)
{
- struct list_head *head;
struct parse_events_term *term;
struct list_head *list;
struct perf_pmu *pmu = NULL;
@@ -1406,19 +1507,30 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
list_for_each_entry(alias, &pmu->aliases, list) {
if (!strcasecmp(alias->name, str)) {
+ struct list_head *head;
+ char *config;
+
head = malloc(sizeof(struct list_head));
if (!head)
return -1;
INIT_LIST_HEAD(head);
- if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- str, 1, false, &str, NULL) < 0)
+ config = strdup(str);
+ if (!config)
return -1;
+ if (parse_events_term__num(&term,
+ PARSE_EVENTS__TERM_TYPE_USER,
+ config, 1, false, &config,
+ NULL) < 0) {
+ free(list);
+ free(config);
+ return -1;
+ }
list_add_tail(&term->list, head);
if (!parse_events_add_pmu(parse_state, list,
pmu->name, head,
true, true)) {
- pr_debug("%s -> %s/%s/\n", str,
+ pr_debug("%s -> %s/%s/\n", config,
pmu->name, alias->str);
ok++;
}
@@ -1427,8 +1539,10 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
}
}
}
- if (!ok)
+ if (!ok) {
+ free(list);
return -1;
+ }
*listp = list;
return 0;
}
@@ -1927,15 +2041,20 @@ int parse_events(struct evlist *evlist, const char *str,
ret = parse_events__scanner(str, &parse_state, PE_START_EVENTS);
perf_pmu__parse_cleanup();
+
+ if (!ret && list_empty(&parse_state.list)) {
+ WARN_ONCE(true, "WARNING: event parser found nothing\n");
+ return -1;
+ }
+
+ /*
+ * Add list to the evlist even with errors to allow callers to clean up.
+ */
+ perf_evlist__splice_list_tail(evlist, &parse_state.list);
+
if (!ret) {
struct evsel *last;
- if (list_empty(&parse_state.list)) {
- WARN_ONCE(true, "WARNING: event parser found nothing\n");
- return -1;
- }
-
- perf_evlist__splice_list_tail(evlist, &parse_state.list);
evlist->nr_groups += parse_state.nr_groups;
last = evlist__last(evlist);
last->cmdline_group_boundary = true;
@@ -1960,15 +2079,14 @@ static int get_term_width(void)
return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
}
-void parse_events_print_error(struct parse_events_error *err,
- const char *event)
+static void __parse_events_print_error(int err_idx, const char *err_str,
+ const char *err_help, const char *event)
{
const char *str = "invalid or unsupported event: ";
char _buf[MAX_WIDTH];
char *buf = (char *) event;
int idx = 0;
-
- if (err->str) {
+ if (err_str) {
/* -2 for extra '' in the final fprintf */
int width = get_term_width() - 2;
int len_event = strlen(event);
@@ -1991,8 +2109,8 @@ void parse_events_print_error(struct parse_events_error *err,
buf = _buf;
/* We're cutting from the beginning. */
- if (err->idx > max_err_idx)
- cut = err->idx - max_err_idx;
+ if (err_idx > max_err_idx)
+ cut = err_idx - max_err_idx;
strncpy(buf, event + cut, max_len);
@@ -2005,16 +2123,33 @@ void parse_events_print_error(struct parse_events_error *err,
buf[max_len] = 0;
}
- idx = len_str + err->idx - cut;
+ idx = len_str + err_idx - cut;
}
fprintf(stderr, "%s'%s'\n", str, buf);
if (idx) {
- fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err->str);
- if (err->help)
- fprintf(stderr, "\n%s\n", err->help);
- zfree(&err->str);
- zfree(&err->help);
+ fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
+ if (err_help)
+ fprintf(stderr, "\n%s\n", err_help);
+ }
+}
+
+void parse_events_print_error(struct parse_events_error *err,
+ const char *event)
+{
+ if (!err->num_errors)
+ return;
+
+ __parse_events_print_error(err->idx, err->str, err->help, event);
+ zfree(&err->str);
+ zfree(&err->help);
+
+ if (err->num_errors > 1) {
+ fputs("\nInitial error:\n", stderr);
+ __parse_events_print_error(err->first_idx, err->first_str,
+ err->first_help, event);
+ zfree(&err->first_str);
+ zfree(&err->first_help);
}
}
@@ -2024,8 +2159,11 @@ int parse_events_option(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct evlist *evlist = *(struct evlist **)opt->value;
- struct parse_events_error err = { .idx = 0, };
- int ret = parse_events(evlist, str, &err);
+ struct parse_events_error err;
+ int ret;
+
+ bzero(&err, sizeof(err));
+ ret = parse_events(evlist, str, &err);
if (ret) {
parse_events_print_error(&err, str);
@@ -2600,7 +2738,7 @@ out_enomem:
* Print the help text for the event symbols:
*/
void print_events(const char *event_glob, bool name_only, bool quiet_flag,
- bool long_desc, bool details_flag)
+ bool long_desc, bool details_flag, bool deprecated)
{
print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
@@ -2612,7 +2750,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_hwcache_events(event_glob, name_only);
print_pmu_events(event_glob, name_only, quiet_flag, long_desc,
- details_flag);
+ details_flag, deprecated);
if (event_glob != NULL)
return;
@@ -2718,30 +2856,63 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx)
{
struct event_symbol *sym;
+ char *str;
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_STR,
.type_term = PARSE_EVENTS__TERM_TYPE_USER,
- .config = config ?: (char *) "event",
+ .config = config,
};
+ if (!temp.config) {
+ temp.config = strdup("event");
+ if (!temp.config)
+ return -ENOMEM;
+ }
BUG_ON(idx >= PERF_COUNT_HW_MAX);
sym = &event_symbols_hw[idx];
- return new_term(term, &temp, (char *) sym->symbol, 0);
+ str = strdup(sym->symbol);
+ if (!str)
+ return -ENOMEM;
+ return new_term(term, &temp, str, 0);
}
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term)
{
+ char *str;
struct parse_events_term temp = {
.type_val = term->type_val,
.type_term = term->type_term,
- .config = term->config,
+ .config = NULL,
.err_term = term->err_term,
.err_val = term->err_val,
};
- return new_term(new, &temp, term->val.str, term->val.num);
+ if (term->config) {
+ temp.config = strdup(term->config);
+ if (!temp.config)
+ return -ENOMEM;
+ }
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+ return new_term(new, &temp, NULL, term->val.num);
+
+ str = strdup(term->val.str);
+ if (!str)
+ return -ENOMEM;
+ return new_term(new, &temp, str, 0);
+}
+
+void parse_events_term__delete(struct parse_events_term *term)
+{
+ if (term->array.nr_ranges)
+ zfree(&term->array.ranges);
+
+ if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
+ zfree(&term->val.str);
+
+ zfree(&term->config);
+ free(term);
}
int parse_events_copy_term_list(struct list_head *old,
@@ -2774,10 +2945,8 @@ void parse_events_terms__purge(struct list_head *terms)
struct parse_events_term *term, *h;
list_for_each_entry_safe(term, h, terms, list) {
- if (term->array.nr_ranges)
- zfree(&term->array.ranges);
list_del_init(&term->list);
- free(term);
+ parse_events_term__delete(term);
}
}
@@ -2797,13 +2966,10 @@ void parse_events__clear_array(struct parse_events_array *a)
void parse_events_evlist_error(struct parse_events_state *parse_state,
int idx, const char *str)
{
- struct parse_events_error *err = parse_state->error;
-
- if (!err)
+ if (!parse_state->error)
return;
- err->idx = idx;
- err->str = strdup(str);
- WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
+
+ parse_events__handle_error(parse_state->error, idx, strdup(str), NULL);
}
static void config_terms_list(char *buf, size_t buf_sz)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 616ca1eda0eb..27596cbd0ba0 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -77,6 +77,7 @@ enum {
PARSE_EVENTS__TERM_TYPE_DRV_CFG,
PARSE_EVENTS__TERM_TYPE_PERCORE,
PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT,
+ PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE,
__PARSE_EVENTS__TERM_TYPE_NR,
};
@@ -110,9 +111,13 @@ struct parse_events_term {
};
struct parse_events_error {
+ int num_errors; /* number of errors encountered */
int idx; /* index in the parsed string */
char *str; /* string to display at the index */
char *help; /* optional help string */
+ int first_idx;/* as above, but for the first encountered error */
+ char *first_str;
+ char *first_help;
};
struct parse_events_state {
@@ -124,6 +129,8 @@ struct parse_events_state {
struct list_head *terms;
};
+void parse_events__handle_error(struct parse_events_error *err, int idx,
+ char *str, char *help);
void parse_events__shrink_config_terms(void);
int parse_events__is_hardcoded_term(struct parse_events_term *term);
int parse_events_term__num(struct parse_events_term **term,
@@ -137,6 +144,7 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx);
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term);
+void parse_events_term__delete(struct parse_events_term *term);
void parse_events_terms__delete(struct list_head *terms);
void parse_events_terms__purge(struct list_head *terms);
void parse_events__clear_array(struct parse_events_array *a);
@@ -195,7 +203,7 @@ void parse_events_evlist_error(struct parse_events_state *parse_state,
int idx, const char *str);
void print_events(const char *event_glob, bool name_only, bool quiet,
- bool long_desc, bool details_flag);
+ bool long_desc, bool details_flag, bool deprecated);
struct event_symbol {
const char *symbol;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 7469497cd28e..7b1c8ee537cf 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -285,6 +285,7 @@ overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); }
no-overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE); }
percore { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_PERCORE); }
aux-output { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT); }
+aux-sample-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE); }
, { return ','; }
"/" { BEGIN(INITIAL); return '/'; }
{name_minus} { return str(yyscanner, PE_NAME); }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 48126ae4cd13..e2eea4e601b4 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -12,6 +12,7 @@
#include <stdio.h>
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/zalloc.h>
#include "pmu.h"
#include "evsel.h"
#include "parse-events.h"
@@ -25,12 +26,28 @@ do { \
YYABORT; \
} while (0)
-#define ALLOC_LIST(list) \
-do { \
- list = malloc(sizeof(*list)); \
- ABORT_ON(!list); \
- INIT_LIST_HEAD(list); \
-} while (0)
+static struct list_head* alloc_list()
+{
+ struct list_head *list;
+
+ list = malloc(sizeof(*list));
+ if (!list)
+ return NULL;
+
+ INIT_LIST_HEAD(list);
+ return list;
+}
+
+static void free_list_evsel(struct list_head* list_evsel)
+{
+ struct evsel *evsel, *tmp;
+
+ list_for_each_entry_safe(evsel, tmp, list_evsel, core.node) {
+ list_del_init(&evsel->core.node);
+ perf_evsel__delete(evsel);
+ }
+ free(list_evsel);
+}
static void inc_group_count(struct list_head *list,
struct parse_events_state *parse_state)
@@ -61,6 +78,7 @@ static void inc_group_count(struct list_head *list,
%type <num> PE_VALUE_SYM_TOOL
%type <num> PE_RAW
%type <num> PE_TERM
+%type <num> value_sym
%type <str> PE_NAME
%type <str> PE_BPF_OBJECT
%type <str> PE_BPF_SOURCE
@@ -71,37 +89,43 @@ static void inc_group_count(struct list_head *list,
%type <str> PE_EVENT_NAME
%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
%type <str> PE_DRV_CFG_TERM
-%type <num> value_sym
-%type <head> event_config
-%type <head> opt_event_config
-%type <head> opt_pmu_config
+%destructor { free ($$); } <str>
%type <term> event_term
-%type <head> event_pmu
-%type <head> event_legacy_symbol
-%type <head> event_legacy_cache
-%type <head> event_legacy_mem
-%type <head> event_legacy_tracepoint
+%destructor { parse_events_term__delete ($$); } <term>
+%type <list_terms> event_config
+%type <list_terms> opt_event_config
+%type <list_terms> opt_pmu_config
+%destructor { parse_events_terms__delete ($$); } <list_terms>
+%type <list_evsel> event_pmu
+%type <list_evsel> event_legacy_symbol
+%type <list_evsel> event_legacy_cache
+%type <list_evsel> event_legacy_mem
+%type <list_evsel> event_legacy_tracepoint
+%type <list_evsel> event_legacy_numeric
+%type <list_evsel> event_legacy_raw
+%type <list_evsel> event_bpf_file
+%type <list_evsel> event_def
+%type <list_evsel> event_mod
+%type <list_evsel> event_name
+%type <list_evsel> event
+%type <list_evsel> events
+%type <list_evsel> group_def
+%type <list_evsel> group
+%type <list_evsel> groups
+%destructor { free_list_evsel ($$); } <list_evsel>
%type <tracepoint_name> tracepoint_name
-%type <head> event_legacy_numeric
-%type <head> event_legacy_raw
-%type <head> event_bpf_file
-%type <head> event_def
-%type <head> event_mod
-%type <head> event_name
-%type <head> event
-%type <head> events
-%type <head> group_def
-%type <head> group
-%type <head> groups
+%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
%type <array> array
%type <array> array_term
%type <array> array_terms
+%destructor { free ($$.ranges); } <array>
%union
{
char *str;
u64 num;
- struct list_head *head;
+ struct list_head *list_evsel;
+ struct list_head *list_terms;
struct parse_events_term *term;
struct tracepoint_name {
char *sys;
@@ -120,6 +144,7 @@ start_events: groups
{
struct parse_events_state *parse_state = _parse_state;
+ /* frees $1 */
parse_events_update_lists($1, &parse_state->list);
}
@@ -129,6 +154,7 @@ groups ',' group
struct list_head *list = $1;
struct list_head *group = $3;
+ /* frees $3 */
parse_events_update_lists(group, list);
$$ = list;
}
@@ -138,6 +164,7 @@ groups ',' event
struct list_head *list = $1;
struct list_head *event = $3;
+ /* frees $3 */
parse_events_update_lists(event, list);
$$ = list;
}
@@ -150,8 +177,14 @@ group:
group_def ':' PE_MODIFIER_EVENT
{
struct list_head *list = $1;
+ int err;
- ABORT_ON(parse_events__modifier_group(list, $3));
+ err = parse_events__modifier_group(list, $3);
+ free($3);
+ if (err) {
+ free_list_evsel(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -164,6 +197,7 @@ PE_NAME '{' events '}'
inc_group_count(list, _parse_state);
parse_events__set_leader($1, list, _parse_state);
+ free($1);
$$ = list;
}
|
@@ -182,6 +216,7 @@ events ',' event
struct list_head *event = $3;
struct list_head *list = $1;
+ /* frees $3 */
parse_events_update_lists(event, list);
$$ = list;
}
@@ -194,13 +229,19 @@ event_mod:
event_name PE_MODIFIER_EVENT
{
struct list_head *list = $1;
+ int err;
/*
* Apply modifier on all events added by single event definition
* (there could be more events added for multiple tracepoint
* definitions via '*?'.
*/
- ABORT_ON(parse_events__modifier_event(list, $2, false));
+ err = parse_events__modifier_event(list, $2, false);
+ free($2);
+ if (err) {
+ free_list_evsel(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -209,8 +250,14 @@ event_name
event_name:
PE_EVENT_NAME event_def
{
- ABORT_ON(parse_events_name($2, $1));
+ int err;
+
+ err = parse_events_name($2, $1);
free($1);
+ if (err) {
+ free_list_evsel($2);
+ YYABORT;
+ }
$$ = $2;
}
|
@@ -230,22 +277,34 @@ PE_NAME opt_pmu_config
{
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
- struct list_head *list, *orig_terms, *terms;
+ struct list_head *list = NULL, *orig_terms = NULL, *terms= NULL;
+ char *pattern = NULL;
+
+#define CLEANUP_YYABORT \
+ do { \
+ parse_events_terms__delete($2); \
+ parse_events_terms__delete(orig_terms); \
+ free(list); \
+ free($1); \
+ free(pattern); \
+ YYABORT; \
+ } while(0)
if (parse_events_copy_term_list($2, &orig_terms))
- YYABORT;
+ CLEANUP_YYABORT;
if (error)
error->idx = @1.first_column;
- ALLOC_LIST(list);
+ list = alloc_list();
+ if (!list)
+ CLEANUP_YYABORT;
if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
struct perf_pmu *pmu = NULL;
int ok = 0;
- char *pattern;
if (asprintf(&pattern, "%s*", $1) < 0)
- YYABORT;
+ CLEANUP_YYABORT;
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
char *name = pmu->name;
@@ -254,31 +313,32 @@ PE_NAME opt_pmu_config
strncmp($1, "uncore_", 7))
name += 7;
if (!fnmatch(pattern, name, 0)) {
- if (parse_events_copy_term_list(orig_terms, &terms)) {
- free(pattern);
- YYABORT;
- }
+ if (parse_events_copy_term_list(orig_terms, &terms))
+ CLEANUP_YYABORT;
if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
ok++;
parse_events_terms__delete(terms);
}
}
- free(pattern);
-
if (!ok)
- YYABORT;
+ CLEANUP_YYABORT;
}
parse_events_terms__delete($2);
parse_events_terms__delete(orig_terms);
+ free($1);
$$ = list;
+#undef CLEANUP_YYABORT
}
|
PE_KERNEL_PMU_EVENT sep_dc
{
struct list_head *list;
+ int err;
- if (parse_events_multi_pmu_add(_parse_state, $1, &list) < 0)
+ err = parse_events_multi_pmu_add(_parse_state, $1, &list);
+ free($1);
+ if (err < 0)
YYABORT;
$$ = list;
}
@@ -289,6 +349,8 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
char pmu_name[128];
snprintf(&pmu_name, 128, "%s-%s", $1, $3);
+ free($1);
+ free($3);
if (parse_events_multi_pmu_add(_parse_state, pmu_name, &list) < 0)
YYABORT;
$$ = list;
@@ -305,10 +367,16 @@ value_sym '/' event_config '/'
struct list_head *list;
int type = $1 >> 16;
int config = $1 & 255;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, $3));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_numeric(_parse_state, list, type, config, $3);
parse_events_terms__delete($3);
+ if (err) {
+ free_list_evsel(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -318,7 +386,8 @@ value_sym sep_slash_slash_dc
int type = $1 >> 16;
int config = $1 & 255;
- ALLOC_LIST(list);
+ list = alloc_list();
+ ABORT_ON(!list);
ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, NULL));
$$ = list;
}
@@ -327,7 +396,8 @@ PE_VALUE_SYM_TOOL sep_slash_slash_dc
{
struct list_head *list;
- ALLOC_LIST(list);
+ list = alloc_list();
+ ABORT_ON(!list);
ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
$$ = list;
}
@@ -338,10 +408,19 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_e
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6);
parse_events_terms__delete($6);
+ free($1);
+ free($3);
+ free($5);
+ if (err) {
+ free_list_evsel(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -350,10 +429,18 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT opt_event_config
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4);
parse_events_terms__delete($4);
+ free($1);
+ free($3);
+ if (err) {
+ free_list_evsel(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -362,10 +449,17 @@ PE_NAME_CACHE_TYPE opt_event_config
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2);
parse_events_terms__delete($2);
+ free($1);
+ if (err) {
+ free_list_evsel(list);
+ YYABORT;
+ }
$$ = list;
}
@@ -374,10 +468,17 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
-
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, $6, $4));
+ int err;
+
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_breakpoint(list, &parse_state->idx,
+ (void *) $2, $6, $4);
+ free($6);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -386,9 +487,13 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, NULL, $4));
+ list = alloc_list();
+ ABORT_ON(!list);
+ if (parse_events_add_breakpoint(list, &parse_state->idx,
+ (void *) $2, NULL, $4)) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -396,10 +501,17 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
-
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, $4, 0));
+ int err;
+
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_breakpoint(list, &parse_state->idx,
+ (void *) $2, $4, 0);
+ free($4);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
|
@@ -408,9 +520,13 @@ PE_PREFIX_MEM PE_VALUE sep_dc
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, NULL, 0));
+ list = alloc_list();
+ ABORT_ON(!list);
+ if (parse_events_add_breakpoint(list, &parse_state->idx,
+ (void *) $2, NULL, 0)) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
@@ -420,28 +536,35 @@ tracepoint_name opt_event_config
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
+ list = alloc_list();
+ ABORT_ON(!list);
if (error)
error->idx = @1.first_column;
- if (parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event,
- error, $2))
- return -1;
+ err = parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event,
+ error, $2);
+ parse_events_terms__delete($2);
+ free($1.sys);
+ free($1.event);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
tracepoint_name:
PE_NAME '-' PE_NAME ':' PE_NAME
{
- char sys_name[128];
struct tracepoint_name tracepoint;
- snprintf(&sys_name, 128, "%s-%s", $1, $3);
- tracepoint.sys = &sys_name;
+ ABORT_ON(asprintf(&tracepoint.sys, "%s-%s", $1, $3) < 0);
tracepoint.event = $5;
-
+ free($1);
+ free($3);
$$ = tracepoint;
}
|
@@ -456,10 +579,16 @@ event_legacy_numeric:
PE_VALUE ':' PE_VALUE opt_event_config
{
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_numeric(_parse_state, list, (u32)$1, $3, $4));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_numeric(_parse_state, list, (u32)$1, $3, $4);
parse_events_terms__delete($4);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
@@ -467,10 +596,16 @@ event_legacy_raw:
PE_RAW opt_event_config
{
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_numeric(_parse_state, list, PERF_TYPE_RAW, $1, $2));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_add_numeric(_parse_state, list, PERF_TYPE_RAW, $1, $2);
parse_events_terms__delete($2);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
@@ -479,20 +614,33 @@ PE_BPF_OBJECT opt_event_config
{
struct parse_events_state *parse_state = _parse_state;
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_load_bpf(parse_state, list, $1, false, $2));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_load_bpf(parse_state, list, $1, false, $2);
parse_events_terms__delete($2);
+ free($1);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
|
PE_BPF_SOURCE opt_event_config
{
struct list_head *list;
+ int err;
- ALLOC_LIST(list);
- ABORT_ON(parse_events_load_bpf(_parse_state, list, $1, true, $2));
+ list = alloc_list();
+ ABORT_ON(!list);
+ err = parse_events_load_bpf(_parse_state, list, $1, true, $2);
parse_events_terms__delete($2);
+ if (err) {
+ free(list);
+ YYABORT;
+ }
$$ = list;
}
@@ -525,6 +673,10 @@ opt_pmu_config:
start_terms: event_config
{
struct parse_events_state *parse_state = _parse_state;
+ if (parse_state->terms) {
+ parse_events_terms__delete ($1);
+ YYABORT;
+ }
parse_state->terms = $1;
}
@@ -534,7 +686,10 @@ event_config ',' event_term
struct list_head *head = $1;
struct parse_events_term *term = $3;
- ABORT_ON(!head);
+ if (!head) {
+ parse_events_term__delete(term);
+ YYABORT;
+ }
list_add_tail(&term->list, head);
$$ = $1;
}
@@ -555,8 +710,12 @@ PE_NAME '=' PE_NAME
{
struct parse_events_term *term;
- ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $3, &@1, &@3));
+ if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $3, &@1, &@3)) {
+ free($1);
+ free($3);
+ YYABORT;
+ }
$$ = term;
}
|
@@ -564,8 +723,11 @@ PE_NAME '=' PE_VALUE
{
struct parse_events_term *term;
- ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $3, false, &@1, &@3));
+ if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $3, false, &@1, &@3)) {
+ free($1);
+ YYABORT;
+ }
$$ = term;
}
|
@@ -574,7 +736,10 @@ PE_NAME '=' PE_VALUE_SYM_HW
struct parse_events_term *term;
int config = $3 & 255;
- ABORT_ON(parse_events_term__sym_hw(&term, $1, config));
+ if (parse_events_term__sym_hw(&term, $1, config)) {
+ free($1);
+ YYABORT;
+ }
$$ = term;
}
|
@@ -582,8 +747,11 @@ PE_NAME
{
struct parse_events_term *term;
- ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, 1, true, &@1, NULL));
+ if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, 1, true, &@1, NULL)) {
+ free($1);
+ YYABORT;
+ }
$$ = term;
}
|
@@ -600,7 +768,10 @@ PE_TERM '=' PE_NAME
{
struct parse_events_term *term;
- ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3));
+ if (parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3)) {
+ free($3);
+ YYABORT;
+ }
$$ = term;
}
|
@@ -624,9 +795,13 @@ PE_NAME array '=' PE_NAME
{
struct parse_events_term *term;
- ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $4, &@1, &@4));
-
+ if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $4, &@1, &@4)) {
+ free($1);
+ free($4);
+ free($2.ranges);
+ YYABORT;
+ }
term->array = $2;
$$ = term;
}
@@ -635,8 +810,12 @@ PE_NAME array '=' PE_VALUE
{
struct parse_events_term *term;
- ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $4, false, &@1, &@4));
+ if (parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $4, false, &@1, &@4)) {
+ free($1);
+ free($2.ranges);
+ YYABORT;
+ }
term->array = $2;
$$ = term;
}
@@ -644,9 +823,15 @@ PE_NAME array '=' PE_VALUE
PE_DRV_CFG_TERM
{
struct parse_events_term *term;
+ char *config = strdup($1);
- ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
- $1, $1, &@1, NULL));
+ ABORT_ON(!config);
+ if (parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
+ config, $1, &@1, NULL)) {
+ free($1);
+ free(config);
+ YYABORT;
+ }
$$ = term;
}
@@ -668,14 +853,12 @@ array_terms ',' array_term
struct parse_events_array new_array;
new_array.nr_ranges = $1.nr_ranges + $3.nr_ranges;
- new_array.ranges = malloc(sizeof(new_array.ranges[0]) *
- new_array.nr_ranges);
+ new_array.ranges = realloc($1.ranges,
+ sizeof(new_array.ranges[0]) *
+ new_array.nr_ranges);
ABORT_ON(!new_array.ranges);
- memcpy(&new_array.ranges[0], $1.ranges,
- $1.nr_ranges * sizeof(new_array.ranges[0]));
memcpy(&new_array.ranges[$1.nr_ranges], $3.ranges,
$3.nr_ranges * sizeof(new_array.ranges[0]));
- free($1.ranges);
free($3.ranges);
$$ = new_array;
}
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
index ef46c2848808..e687497b3aac 100644
--- a/tools/perf/util/parse-regs-options.c
+++ b/tools/perf/util/parse-regs-options.c
@@ -13,7 +13,7 @@ static int
__parse_regs(const struct option *opt, const char *str, int unset, bool intr)
{
uint64_t *mode = (uint64_t *)opt->value;
- const struct sample_reg *r;
+ const struct sample_reg *r = NULL;
char *s, *os = NULL, *p;
int ret = -1;
uint64_t mask;
@@ -46,19 +46,23 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr)
if (!strcmp(s, "?")) {
fprintf(stderr, "available registers: ");
+#ifdef HAVE_PERF_REGS_SUPPORT
for (r = sample_reg_masks; r->name; r++) {
if (r->mask & mask)
fprintf(stderr, "%s ", r->name);
}
+#endif
fputc('\n', stderr);
/* just printing available regs */
return -1;
}
+#ifdef HAVE_PERF_REGS_SUPPORT
for (r = sample_reg_masks; r->name; r++) {
if ((r->mask & mask) && !strcasecmp(s, r->name))
break;
}
- if (!r->name) {
+#endif
+ if (!r || !r->name) {
ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n",
s, intr ? "-I" : "--user-regs=");
goto error;
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index d4ad3f04923a..651203126c71 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -34,7 +34,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
- bit_name(WEIGHT), bit_name(PHYS_ADDR),
+ bit_name(WEIGHT), bit_name(PHYS_ADDR), bit_name(AUX),
{ .name = NULL, }
};
#undef bit_name
@@ -143,6 +143,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(sample_regs_intr, p_hex);
PRINT_ATTRf(aux_watermark, p_unsigned);
PRINT_ATTRf(sample_max_stack, p_unsigned);
+ PRINT_ATTRf(aux_sample_size, p_unsigned);
return ret;
}
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 2774cec1f15f..5ee47ae1509c 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -3,10 +3,6 @@
#include "perf_regs.h"
#include "event.h"
-const struct sample_reg __weak sample_reg_masks[] = {
- SMPL_REG_END
-};
-
int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
char **new_op __maybe_unused)
{
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 47fe34e5f7d5..e014c2c038f4 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -15,8 +15,6 @@ struct sample_reg {
#define SMPL_REG2(n, b) { .name = #n, .mask = 3ULL << (b) }
#define SMPL_REG_END { .name = NULL }
-extern const struct sample_reg sample_reg_masks[];
-
enum {
SDT_ARG_VALID = 0,
SDT_ARG_SKIP,
@@ -27,6 +25,8 @@ uint64_t arch__intr_reg_mask(void);
uint64_t arch__user_reg_mask(void);
#ifdef HAVE_PERF_REGS_SUPPORT
+extern const struct sample_reg sample_reg_masks[];
+
#include <perf_regs.h>
#define DWARF_MINIMAL_REGS ((1ULL << PERF_REG_IP) | (1ULL << PERF_REG_SP))
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 5608da82ad23..e8d348988026 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -308,7 +308,8 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
char *long_desc, char *topic,
char *unit, char *perpkg,
char *metric_expr,
- char *metric_name)
+ char *metric_name,
+ char *deprecated)
{
struct parse_events_term *term;
struct perf_pmu_alias *alias;
@@ -325,6 +326,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
alias->unit[0] = '\0';
alias->per_pkg = false;
alias->snapshot = false;
+ alias->deprecated = false;
ret = parse_events_terms(&alias->terms, val);
if (ret) {
@@ -379,6 +381,9 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
alias->str = strdup(newval);
+ if (deprecated)
+ alias->deprecated = true;
+
if (!perf_pmu_merge_alias(alias, list))
list_add_tail(&alias->list, list);
@@ -400,7 +405,7 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
strim(buf);
return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
}
static inline bool pmu_alias_info_file(char *name)
@@ -787,7 +792,8 @@ new_alias:
(char *)pe->long_desc, (char *)pe->topic,
(char *)pe->unit, (char *)pe->perpkg,
(char *)pe->metric_expr,
- (char *)pe->metric_name);
+ (char *)pe->metric_name,
+ (char *)pe->deprecated);
}
}
@@ -925,6 +931,16 @@ __u64 perf_pmu__format_bits(struct list_head *formats, const char *name)
return bits;
}
+int perf_pmu__format_type(struct list_head *formats, const char *name)
+{
+ struct perf_pmu_format *format = pmu_find_format(formats, name);
+
+ if (!format)
+ return -1;
+
+ return format->value;
+}
+
/*
* Sets value based on the format definition (format parameter)
* and unformated value (value parameter).
@@ -1044,9 +1060,9 @@ static int pmu_config_term(struct list_head *formats,
if (err) {
char *pmu_term = pmu_formats_string(formats);
- err->idx = term->err_term;
- err->str = strdup("unknown term");
- err->help = parse_events_formats_error_string(pmu_term);
+ parse_events__handle_error(err, term->err_term,
+ strdup("unknown term"),
+ parse_events_formats_error_string(pmu_term));
free(pmu_term);
}
return -EINVAL;
@@ -1074,8 +1090,9 @@ static int pmu_config_term(struct list_head *formats,
if (term->no_value &&
bitmap_weight(format->bits, PERF_PMU_FORMAT_BITS) > 1) {
if (err) {
- err->idx = term->err_val;
- err->str = strdup("no value assigned for term");
+ parse_events__handle_error(err, term->err_val,
+ strdup("no value assigned for term"),
+ NULL);
}
return -EINVAL;
}
@@ -1088,8 +1105,9 @@ static int pmu_config_term(struct list_head *formats,
term->config, term->val.str);
}
if (err) {
- err->idx = term->err_val;
- err->str = strdup("expected numeric value");
+ parse_events__handle_error(err, term->err_val,
+ strdup("expected numeric value"),
+ NULL);
}
return -EINVAL;
}
@@ -1102,11 +1120,15 @@ static int pmu_config_term(struct list_head *formats,
max_val = pmu_format_max_value(format->bits);
if (val > max_val) {
if (err) {
- err->idx = term->err_val;
- if (asprintf(&err->str,
- "value too big for format, maximum is %llu",
- (unsigned long long)max_val) < 0)
- err->str = strdup("value too big for format");
+ char *err_str;
+
+ parse_events__handle_error(err, term->err_val,
+ asprintf(&err_str,
+ "value too big for format, maximum is %llu",
+ (unsigned long long)max_val) < 0
+ ? strdup("value too big for format")
+ : err_str,
+ NULL);
return -EINVAL;
}
/*
@@ -1248,7 +1270,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
info->metric_name = alias->metric_name;
list_del_init(&term->list);
- free(term);
+ parse_events_term__delete(term);
}
/*
@@ -1383,7 +1405,7 @@ static void wordwrap(char *s, int start, int max, int corr)
}
void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
- bool long_desc, bool details_flag)
+ bool long_desc, bool details_flag, bool deprecated)
{
struct perf_pmu *pmu;
struct perf_pmu_alias *alias;
@@ -1414,6 +1436,9 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
format_alias(buf, sizeof(buf), pmu, alias);
bool is_cpu = !strcmp(pmu->name, "cpu");
+ if (alias->deprecated && !deprecated)
+ continue;
+
if (event_glob != NULL &&
!(strglobmatch_nocase(name, event_glob) ||
(!is_cpu && strglobmatch_nocase(alias->name,
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index f36ade6df76d..6737e3d5d568 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -26,6 +26,7 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ bool auxtrace;
int max_precise;
struct perf_event_attr *default_config;
struct perf_cpu_map *cpus;
@@ -57,6 +58,7 @@ struct perf_pmu_alias {
double scale;
bool per_pkg;
bool snapshot;
+ bool deprecated;
char *metric_expr;
char *metric_name;
};
@@ -70,6 +72,7 @@ int perf_pmu__config_terms(struct list_head *formats,
struct list_head *head_terms,
bool zero, struct parse_events_error *error);
__u64 perf_pmu__format_bits(struct list_head *formats, const char *name);
+int perf_pmu__format_type(struct list_head *formats, const char *name);
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
struct perf_pmu_info *info);
struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
@@ -85,7 +88,8 @@ int perf_pmu__format_parse(char *dir, struct list_head *head);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
void print_pmu_events(const char *event_glob, bool name_only, bool quiet,
- bool long_desc, bool details_flag);
+ bool long_desc, bool details_flag,
+ bool deprecated);
bool pmu_have_event(const char *pname, const char *name);
int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 91cab5f669d2..52b2d165453a 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -46,7 +46,7 @@
#define PERFPROBE_GROUP "probe"
bool probe_event_dry_run; /* Dry run flag */
-struct probe_conf probe_conf;
+struct probe_conf probe_conf = { .magic_num = DEFAULT_PROBE_MAGIC_NUM };
#define semantic_error(msg ...) pr_err("Semantic error :" msg)
@@ -153,7 +153,7 @@ static struct map *kernel_get_module_map(const char *module)
return map__get(pos);
}
- for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+ maps__for_each_entry(maps, pos) {
/* short_name is "[module]" */
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0 &&
@@ -1679,6 +1679,14 @@ int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
if (ret < 0)
goto out;
+ /* Generate event name if needed */
+ if (!pev->event && pev->point.function && pev->point.line
+ && !pev->point.lazy_line && !pev->point.offset) {
+ if (asprintf(&pev->event, "%s_L%d", pev->point.function,
+ pev->point.line) < 0)
+ return -ENOMEM;
+ }
+
/* Copy arguments and ensure return probe has no C argument */
pev->nargs = argc - 1;
pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
@@ -2730,8 +2738,13 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
if (tev->event == NULL || tev->group == NULL)
return -ENOMEM;
- /* Add added event name to namelist */
- strlist__add(namelist, event);
+ /*
+ * Add new event name to namelist if multiprobe event is NOT
+ * supported, since we have to use new event name for following
+ * probes in that case.
+ */
+ if (!multiprobe_event_is_supported())
+ strlist__add(namelist, event);
return 0;
}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 96a319cd2378..4f0eb3a20c36 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -16,10 +16,13 @@ struct probe_conf {
bool no_inlines;
bool cache;
int max_probes;
+ unsigned long magic_num;
};
extern struct probe_conf probe_conf;
extern bool probe_event_dry_run;
+#define DEFAULT_PROBE_MAGIC_NUM 0xdeade12d /* u32: 3735937325 */
+
struct symbol;
/* kprobe-tracer and uprobe-tracer tracing point */
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index b659466ea498..5003ba403345 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -1007,6 +1007,8 @@ enum ftrace_readme {
FTRACE_README_KRETPROBE_OFFSET,
FTRACE_README_UPROBE_REF_CTR,
FTRACE_README_USER_ACCESS,
+ FTRACE_README_MULTIPROBE_EVENT,
+ FTRACE_README_IMMEDIATE_VALUE,
FTRACE_README_END,
};
@@ -1020,6 +1022,8 @@ static struct {
DEFINE_TYPE(FTRACE_README_KRETPROBE_OFFSET, "*place (kretprobe): *"),
DEFINE_TYPE(FTRACE_README_UPROBE_REF_CTR, "*ref_ctr_offset*"),
DEFINE_TYPE(FTRACE_README_USER_ACCESS, "*[u]<offset>*"),
+ DEFINE_TYPE(FTRACE_README_MULTIPROBE_EVENT, "*Create/append/*"),
+ DEFINE_TYPE(FTRACE_README_IMMEDIATE_VALUE, "*\\imm-value,*"),
};
static bool scan_ftrace_readme(enum ftrace_readme type)
@@ -1085,3 +1089,13 @@ bool user_access_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_USER_ACCESS);
}
+
+bool multiprobe_event_is_supported(void)
+{
+ return scan_ftrace_readme(FTRACE_README_MULTIPROBE_EVENT);
+}
+
+bool immediate_value_is_supported(void)
+{
+ return scan_ftrace_readme(FTRACE_README_IMMEDIATE_VALUE);
+}
diff --git a/tools/perf/util/probe-file.h b/tools/perf/util/probe-file.h
index 986c1c94f64f..0dba88c0f5f0 100644
--- a/tools/perf/util/probe-file.h
+++ b/tools/perf/util/probe-file.h
@@ -71,6 +71,8 @@ bool probe_type_is_available(enum probe_type type);
bool kretprobe_offset_is_supported(void);
bool uprobe_ref_ctr_is_supported(void);
bool user_access_is_supported(void);
+bool multiprobe_event_is_supported(void);
+bool immediate_value_is_supported(void);
#else /* ! HAVE_LIBELF_SUPPORT */
static inline struct probe_cache *probe_cache__new(const char *tgt __maybe_unused, struct nsinfo *nsi __maybe_unused)
{
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index cd9f95e5044e..c470c49a804f 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -177,6 +177,17 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
goto static_var;
+ /* Constant value */
+ if (dwarf_attr(vr_die, DW_AT_const_value, &attr) &&
+ immediate_value_is_supported()) {
+ Dwarf_Sword snum;
+
+ dwarf_formsdata(&attr, &snum);
+ ret = asprintf(&tvar->value, "\\%ld", (long)snum);
+
+ return ret < 0 ? -ENOMEM : 0;
+ }
+
/* TODO: handle more than 1 exprs */
if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
return -EINVAL; /* Broken DIE ? */
@@ -525,6 +536,14 @@ next:
return 0;
}
+static void print_var_not_found(const char *varname)
+{
+ pr_err("Failed to find the location of the '%s' variable at this address.\n"
+ " Perhaps it has been optimized out.\n"
+ " Use -V with the --range option to show '%s' location range.\n",
+ varname, varname);
+}
+
/* Show a variables in kprobe event format */
static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
{
@@ -536,11 +555,11 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
&pf->sp_die, pf->machine, pf->tvar);
+ if (ret == -ENOENT && pf->skip_empty_arg)
+ /* This can be found in other place. skip it */
+ return 0;
if (ret == -ENOENT || ret == -EINVAL) {
- pr_err("Failed to find the location of the '%s' variable at this address.\n"
- " Perhaps it has been optimized out.\n"
- " Use -V with the --range option to show '%s' location range.\n",
- pf->pvar->var, pf->pvar->var);
+ print_var_not_found(pf->pvar->var);
} else if (ret == -ENOTSUP)
pr_err("Sorry, we don't support this variable location yet.\n");
else if (ret == 0 && pf->pvar->field) {
@@ -587,6 +606,8 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Search again in global variables */
if (!die_find_variable_at(&pf->cu_die, pf->pvar->var,
0, &vr_die)) {
+ if (pf->skip_empty_arg)
+ return 0;
pr_warning("Failed to find '%s' in this function.\n",
pf->pvar->var);
ret = -ENOENT;
@@ -604,38 +625,26 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
const char *function,
struct probe_trace_point *tp)
{
- Dwarf_Addr eaddr, highaddr;
+ Dwarf_Addr eaddr;
GElf_Sym sym;
const char *symbol;
/* Verify the address is correct */
- if (dwarf_entrypc(sp_die, &eaddr) != 0) {
- pr_warning("Failed to get entry address of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- if (dwarf_highpc(sp_die, &highaddr) != 0) {
- pr_warning("Failed to get end address of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- if (paddr > highaddr) {
- pr_warning("Offset specified is greater than size of %s\n",
+ if (!dwarf_haspc(sp_die, paddr)) {
+ pr_warning("Specified offset is out of %s\n",
dwarf_diename(sp_die));
return -EINVAL;
}
- symbol = dwarf_diename(sp_die);
+ /* Try to get actual symbol name from symtab */
+ symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
if (!symbol) {
- /* Try to get the symbol name from symtab */
- symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
- if (!symbol) {
- pr_warning("Failed to find symbol at 0x%lx\n",
- (unsigned long)paddr);
- return -ENOENT;
- }
- eaddr = sym.st_value;
+ pr_warning("Failed to find symbol at 0x%lx\n",
+ (unsigned long)paddr);
+ return -ENOENT;
}
+ eaddr = sym.st_value;
+
tp->offset = (unsigned long)(paddr - eaddr);
tp->address = (unsigned long)paddr;
tp->symbol = strdup(symbol);
@@ -756,6 +765,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
return 0;
}
+/* Return innermost DIE */
+static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct find_scope_param *fsp = data;
+
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ return 1;
+}
+
/* Find an appropriate scope fits to given conditions */
static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
{
@@ -767,12 +786,50 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
.die_mem = die_mem,
.found = false,
};
+ int ret;
- cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
+ ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
+ &fsp);
+ if (!ret && !fsp.found)
+ cu_walk_functions_at(&pf->cu_die, pf->addr,
+ find_inner_scope_cb, &fsp);
return fsp.found ? die_mem : NULL;
}
+static int verify_representive_line(struct probe_finder *pf, const char *fname,
+ int lineno, Dwarf_Addr addr)
+{
+ const char *__fname, *__func = NULL;
+ Dwarf_Die die_mem;
+ int __lineno;
+
+ /* Verify line number and address by reverse search */
+ if (cu_find_lineinfo(&pf->cu_die, addr, &__fname, &__lineno) < 0)
+ return 0;
+
+ pr_debug2("Reversed line: %s:%d\n", __fname, __lineno);
+ if (strcmp(fname, __fname) || lineno == __lineno)
+ return 0;
+
+ pr_warning("This line is sharing the address with other lines.\n");
+
+ if (pf->pev->point.function) {
+ /* Find best match function name and lines */
+ pf->addr = addr;
+ if (find_best_scope(pf, &die_mem)
+ && die_match_name(&die_mem, pf->pev->point.function)
+ && dwarf_decl_line(&die_mem, &lineno) == 0) {
+ __func = dwarf_diename(&die_mem);
+ __lineno -= lineno;
+ }
+ }
+ pr_warning("Please try to probe at %s:%d instead.\n",
+ __func ? : __fname, __lineno);
+
+ return -ENOENT;
+}
+
static int probe_point_line_walker(const char *fname, int lineno,
Dwarf_Addr addr, void *data)
{
@@ -783,6 +840,9 @@ static int probe_point_line_walker(const char *fname, int lineno,
if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
return 0;
+ if (verify_representive_line(pf, fname, lineno, addr))
+ return -ENOENT;
+
pf->addr = addr;
sc_die = find_best_scope(pf, &die_mem);
if (!sc_die) {
@@ -942,7 +1002,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
ret = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
- if (dwarf_entrypc(in_die, &addr) != 0) {
+ if (die_entrypc(in_die, &addr) != 0) {
pr_warning("Failed to get entry address of %s.\n",
dwarf_diename(in_die));
return -ENOENT;
@@ -994,7 +1054,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
param->retval = find_probe_point_by_line(pf);
} else if (die_is_func_instance(sp_die)) {
/* Instances always have the entry address */
- dwarf_entrypc(sp_die, &pf->addr);
+ die_entrypc(sp_die, &pf->addr);
/* But in some case the entry address is 0 */
if (pf->addr == 0) {
pr_debug("%s has no entry PC. Skipped\n",
@@ -1334,6 +1394,44 @@ end:
return ret;
}
+static int fill_empty_trace_arg(struct perf_probe_event *pev,
+ struct probe_trace_event *tevs, int ntevs)
+{
+ char **valp;
+ char *type;
+ int i, j, ret;
+
+ for (i = 0; i < pev->nargs; i++) {
+ type = NULL;
+ for (j = 0; j < ntevs; j++) {
+ if (tevs[j].args[i].value) {
+ type = tevs[j].args[i].type;
+ break;
+ }
+ }
+ if (j == ntevs) {
+ print_var_not_found(pev->args[i].var);
+ return -ENOENT;
+ }
+ for (j = 0; j < ntevs; j++) {
+ valp = &tevs[j].args[i].value;
+ if (*valp)
+ continue;
+
+ ret = asprintf(valp, "\\%lx", probe_conf.magic_num);
+ if (ret < 0)
+ return -ENOMEM;
+ /* Note that type can be NULL */
+ if (type) {
+ tevs[j].args[i].type = strdup(type);
+ if (!tevs[j].args[i].type)
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
+
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
@@ -1352,7 +1450,13 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
tf.tevs = *tevs;
tf.ntevs = 0;
+ if (pev->nargs != 0 && immediate_value_is_supported())
+ tf.pf.skip_empty_arg = true;
+
ret = debuginfo__find_probes(dbg, &tf.pf);
+ if (ret >= 0 && tf.pf.skip_empty_arg)
+ ret = fill_empty_trace_arg(pev, tf.tevs, tf.ntevs);
+
if (ret < 0) {
for (i = 0; i < tf.ntevs; i++)
clear_probe_trace_event(&tf.tevs[i]);
@@ -1425,6 +1529,18 @@ error:
return DIE_FIND_CB_END;
}
+static bool available_var_finder_overlap(struct available_var_finder *af)
+{
+ int i;
+
+ for (i = 0; i < af->nvls; i++) {
+ if (af->pf.addr == af->vls[i].point.address)
+ return true;
+ }
+ return false;
+
+}
+
/* Add a found vars into available variables list */
static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
@@ -1435,6 +1551,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
Dwarf_Die die_mem;
int ret;
+ /*
+ * For some reason (e.g. different column assigned to same address),
+ * this callback can be called with the address which already passed.
+ * Ignore it first.
+ */
+ if (available_var_finder_overlap(af))
+ return 0;
+
/* Check number of tevs */
if (af->nvls == af->max_vls) {
pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
@@ -1578,7 +1702,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
/* Get function entry information */
func = basefunc = dwarf_diename(&spdie);
if (!func ||
- dwarf_entrypc(&spdie, &baseaddr) != 0 ||
+ die_entrypc(&spdie, &baseaddr) != 0 ||
dwarf_decl_line(&spdie, &baseline) != 0) {
lineno = 0;
goto post;
@@ -1595,7 +1719,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
&indie)) {
/* There is an inline function */
- if (dwarf_entrypc(&indie, &_addr) == 0 &&
+ if (die_entrypc(&indie, &_addr) == 0 &&
_addr == addr) {
/*
* addr is at an inline function entry.
@@ -1675,12 +1799,19 @@ static int line_range_walk_cb(const char *fname, int lineno,
void *data)
{
struct line_finder *lf = data;
+ const char *__fname;
+ int __lineno;
int err;
if ((strtailcmp(fname, lf->fname) != 0) ||
(lf->lno_s > lineno || lf->lno_e < lineno))
return 0;
+ /* Make sure this line can be reversable */
+ if (cu_find_lineinfo(&lf->cu_die, addr, &__fname, &__lineno) > 0
+ && (lineno != __lineno || strcmp(fname, __fname)))
+ return 0;
+
err = line_range_add_line(fname, lineno, lf->lr);
if (err < 0 && err != -EEXIST)
return err;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 670c477bf8cf..11be10080613 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -87,6 +87,7 @@ struct probe_finder {
unsigned int machine; /* Target machine arch */
struct perf_probe_arg *pvar; /* Current target variable */
struct probe_trace_arg *tvar; /* Current result variable */
+ bool skip_empty_arg; /* Skip non-exist args */
};
struct trace_event_finder {
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 02460362256d..83212c65848b 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -6,6 +6,7 @@
#include <linux/err.h>
#include <perf/cpumap.h>
#include <traceevent/event-parse.h>
+#include <perf/mmap.h>
#include "evlist.h"
#include "callchain.h"
#include "evsel.h"
@@ -64,6 +65,7 @@ struct perf_env perf_env;
* implementing 'verbose' and 'eprintf'.
*/
int verbose;
+int debug_peo_args;
int eprintf(int level, int var, const char *fmt, ...);
@@ -1022,10 +1024,10 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
if (!md)
return NULL;
- if (perf_mmap__read_init(md) < 0)
+ if (perf_mmap__read_init(&md->core) < 0)
goto end;
- event = perf_mmap__read_event(md);
+ event = perf_mmap__read_event(&md->core);
if (event != NULL) {
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
@@ -1045,7 +1047,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
/* Consume the even only after we parsed it out. */
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (err)
return PyErr_Format(PyExc_OSError,
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 8579505c29a4..7def66168503 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -136,6 +136,37 @@ bool perf_can_record_cpu_wide(void)
return true;
}
+/*
+ * Architectures are expected to know if AUX area sampling is supported by the
+ * hardware. Here we check for kernel support.
+ */
+bool perf_can_aux_sample(void)
+{
+ struct perf_event_attr attr = {
+ .size = sizeof(struct perf_event_attr),
+ .exclude_kernel = 1,
+ /*
+ * Non-zero value causes the kernel to calculate the effective
+ * attribute size up to that byte.
+ */
+ .aux_sample_size = 1,
+ };
+ int fd;
+
+ fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
+ /*
+ * If the kernel attribute is big enough to contain aux_sample_size
+ * then we assume that it is supported. We are relying on the kernel to
+ * validate the attribute size before anything else that could be wrong.
+ */
+ if (fd < 0 && errno == E2BIG)
+ return false;
+ if (fd >= 0)
+ close(fd);
+
+ return true;
+}
+
void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
struct callchain_param *callchain)
{
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index 00275afc524d..5421fd2ad383 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -32,6 +32,7 @@ struct record_opts {
bool full_auxtrace;
bool auxtrace_snapshot_mode;
bool auxtrace_snapshot_on_exit;
+ bool auxtrace_sample_mode;
bool record_namespaces;
bool record_switch_events;
bool all_kernel;
@@ -44,6 +45,7 @@ struct record_opts {
bool strict_freq;
bool sample_id;
bool no_bpf_event;
+ bool kcore;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
@@ -55,6 +57,7 @@ struct record_opts {
u64 user_interval;
size_t auxtrace_snapshot_size;
const char *auxtrace_snapshot_opts;
+ const char *auxtrace_sample_opts;
bool sample_transaction;
unsigned initial_delay;
bool use_clockid;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 741f040648b5..0e608a5ef599 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -294,17 +294,17 @@ static SV *perl_process_callchain(struct perf_sample *sample,
goto exit;
}
- if (node->sym) {
+ if (node->ms.sym) {
HV *sym = newHV();
if (!sym) {
hv_undef(elem);
goto exit;
}
- if (!hv_stores(sym, "start", newSVuv(node->sym->start)) ||
- !hv_stores(sym, "end", newSVuv(node->sym->end)) ||
- !hv_stores(sym, "binding", newSVuv(node->sym->binding)) ||
- !hv_stores(sym, "name", newSVpvn(node->sym->name,
- node->sym->namelen)) ||
+ if (!hv_stores(sym, "start", newSVuv(node->ms.sym->start)) ||
+ !hv_stores(sym, "end", newSVuv(node->ms.sym->end)) ||
+ !hv_stores(sym, "binding", newSVuv(node->ms.sym->binding)) ||
+ !hv_stores(sym, "name", newSVpvn(node->ms.sym->name,
+ node->ms.sym->namelen)) ||
!hv_stores(elem, "sym", newRV_noinc((SV*)sym))) {
hv_undef(sym);
hv_undef(elem);
@@ -312,8 +312,8 @@ static SV *perl_process_callchain(struct perf_sample *sample,
}
}
- if (node->map) {
- struct map *map = node->map;
+ if (node->ms.map) {
+ struct map *map = node->ms.map;
const char *dsoname = "[unknown]";
if (map && map->dso) {
if (symbol_conf.show_kernel_path && map->dso->long_name)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 93c03b39cd9c..9581a904af29 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -428,24 +428,24 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
pydict_set_item_string_decref(pyelem, "ip",
PyLong_FromUnsignedLongLong(node->ip));
- if (node->sym) {
+ if (node->ms.sym) {
PyObject *pysym = PyDict_New();
if (!pysym)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(pysym, "start",
- PyLong_FromUnsignedLongLong(node->sym->start));
+ PyLong_FromUnsignedLongLong(node->ms.sym->start));
pydict_set_item_string_decref(pysym, "end",
- PyLong_FromUnsignedLongLong(node->sym->end));
+ PyLong_FromUnsignedLongLong(node->ms.sym->end));
pydict_set_item_string_decref(pysym, "binding",
- _PyLong_FromLong(node->sym->binding));
+ _PyLong_FromLong(node->ms.sym->binding));
pydict_set_item_string_decref(pysym, "name",
- _PyUnicode_FromStringAndSize(node->sym->name,
- node->sym->namelen));
+ _PyUnicode_FromStringAndSize(node->ms.sym->name,
+ node->ms.sym->namelen));
pydict_set_item_string_decref(pyelem, "sym", pysym);
}
- if (node->map) {
- const char *dsoname = get_dsoname(node->map);
+ if (node->ms.map) {
+ const char *dsoname = get_dsoname(node->ms.map);
pydict_set_item_string_decref(pyelem, "dso",
_PyUnicode_FromString(dsoname));
@@ -1127,7 +1127,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
- tuple_set_u64(t, 2, es->al->machine->db_id);
+ tuple_set_u64(t, 2, es->al->mg->machine->db_id);
tuple_set_u64(t, 3, es->al->thread->db_id);
tuple_set_u64(t, 4, es->comm_db_id);
tuple_set_u64(t, 5, es->dso_db_id);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 061bb4d6a3f5..d0d7d25b23e3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -227,9 +227,13 @@ struct perf_session *perf_session__new(struct perf_data *data,
/* Open the directory data. */
if (data->is_dir) {
ret = perf_data__open_dir(data);
- if (ret)
- goto out_delete;
+ if (ret)
+ goto out_delete;
}
+
+ if (!symbol_conf.kallsyms_name &&
+ !symbol_conf.vmlinux_name)
+ symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
}
} else {
session->machines.host.env = &perf_env;
@@ -748,6 +752,7 @@ do { \
bswap_field_32(sample_stack_user);
bswap_field_32(aux_watermark);
bswap_field_16(sample_max_stack);
+ bswap_field_32(aux_sample_size);
/*
* After read_format are bitfields. Check read_format because
@@ -1491,8 +1496,13 @@ static int perf_session__deliver_event(struct perf_session *session,
if (ret > 0)
return 0;
- return machines__deliver_event(&session->machines, session->evlist,
- event, &sample, tool, file_offset);
+ ret = machines__deliver_event(&session->machines, session->evlist,
+ event, &sample, tool, file_offset);
+
+ if (dump_trace && sample.aux_sample.size)
+ auxtrace__dump_auxtrace_sample(session, &sample);
+
+ return ret;
}
static s64 perf_session__process_user_event(struct perf_session *session,
@@ -1649,6 +1659,34 @@ out_parse_sample:
return 0;
}
+int perf_session__peek_events(struct perf_session *session, u64 offset,
+ u64 size, peek_events_cb_t cb, void *data)
+{
+ u64 max_offset = offset + size;
+ char buf[PERF_SAMPLE_MAX_SIZE];
+ union perf_event *event;
+ int err;
+
+ do {
+ err = perf_session__peek_event(session, offset, buf,
+ PERF_SAMPLE_MAX_SIZE, &event,
+ NULL);
+ if (err)
+ return err;
+
+ err = cb(session, event, offset, data);
+ if (err)
+ return err;
+
+ offset += event->header.size;
+ if (event->header.type == PERF_RECORD_AUXTRACE)
+ offset += event->auxtrace.size;
+
+ } while (offset < max_offset);
+
+ return err;
+}
+
static s64 perf_session__process_event(struct perf_session *session,
union perf_event *event, u64 file_offset)
{
@@ -1954,8 +1992,8 @@ out_err:
}
static union perf_event *
-fetch_mmaped_event(struct perf_session *session,
- u64 head, size_t mmap_size, char *buf)
+prefetch_event(char *buf, u64 head, size_t mmap_size,
+ bool needs_swap, union perf_event *error)
{
union perf_event *event;
@@ -1967,20 +2005,32 @@ fetch_mmaped_event(struct perf_session *session,
return NULL;
event = (union perf_event *)(buf + head);
+ if (needs_swap)
+ perf_event_header__bswap(&event->header);
- if (session->header.needs_swap)
+ if (head + event->header.size <= mmap_size)
+ return event;
+
+ /* We're not fetching the event so swap back again */
+ if (needs_swap)
perf_event_header__bswap(&event->header);
- if (head + event->header.size > mmap_size) {
- /* We're not fetching the event so swap back again */
- if (session->header.needs_swap)
- perf_event_header__bswap(&event->header);
- pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
- __func__, head, event->header.size, mmap_size);
- return ERR_PTR(-EINVAL);
- }
+ pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
+ " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
+
+ return error;
+}
+
+static union perf_event *
+fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
+{
+ return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
+}
- return event;
+static union perf_event *
+fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
+{
+ return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
}
static int __perf_session__process_decomp_events(struct perf_session *session)
@@ -1993,10 +2043,8 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
return 0;
while (decomp->head < decomp->size && !session_done()) {
- union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
-
- if (IS_ERR(event))
- return PTR_ERR(event);
+ union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
+ session->header.needs_swap);
if (!event)
break;
@@ -2096,7 +2144,7 @@ remap:
}
more:
- event = fetch_mmaped_event(session, head, mmap_size, buf);
+ event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
if (IS_ERR(event))
return PTR_ERR(event);
@@ -2355,35 +2403,6 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
fprintf(fp, "# ========\n#\n");
}
-
-int __perf_session__set_tracepoints_handlers(struct perf_session *session,
- const struct evsel_str_handler *assocs,
- size_t nr_assocs)
-{
- struct evsel *evsel;
- size_t i;
- int err;
-
- for (i = 0; i < nr_assocs; i++) {
- /*
- * Adding a handler for an event not in the session,
- * just ignore it.
- */
- evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
- if (evsel == NULL)
- continue;
-
- err = -EEXIST;
- if (evsel->handler != NULL)
- goto out;
- evsel->handler = assocs[i].handler;
- }
-
- err = 0;
-out:
- return err;
-}
-
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event)
{
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index b4c9428c18f0..f76480166d38 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -64,6 +64,11 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
void *buf, size_t buf_sz,
union perf_event **event_ptr,
struct perf_sample *sample);
+typedef int (*peek_events_cb_t)(struct perf_session *session,
+ union perf_event *event, u64 offset,
+ void *data);
+int perf_session__peek_events(struct perf_session *session, u64 offset,
+ u64 size, peek_events_cb_t cb, void *data);
int perf_session__process_events(struct perf_session *session);
@@ -120,12 +125,8 @@ void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
struct evsel_str_handler;
-int __perf_session__set_tracepoints_handlers(struct perf_session *session,
- const struct evsel_str_handler *assocs,
- size_t nr_assocs);
-
#define perf_session__set_tracepoints_handlers(session, array) \
- __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
+ __evlist__set_tracepoints_handlers(session->evlist, array, ARRAY_SIZE(array))
extern volatile int session_done;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 43d1d410854a..345b5ccc90f6 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -287,10 +287,12 @@ sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
return strcmp(right->ms.sym->name, left->ms.sym->name);
}
-static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
+static int _hist_entry__sym_snprintf(struct map_symbol *ms,
u64 ip, char level, char *bf, size_t size,
unsigned int width)
{
+ struct symbol *sym = ms->sym;
+ struct map *map = ms->map;
size_t ret = 0;
if (verbose > 0) {
@@ -325,7 +327,7 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
+ return _hist_entry__sym_snprintf(&he->ms, he->ip,
he->level, bf, size, width);
}
@@ -386,7 +388,7 @@ struct sort_entry sort_srcline = {
static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
{
- return map__srcline(ams->map, ams->al_addr, ams->sym);
+ return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
}
static int64_t
@@ -769,15 +771,15 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
- return _sort__dso_cmp(left->branch_info->from.map,
- right->branch_info->from.map);
+ return _sort__dso_cmp(left->branch_info->from.ms.map,
+ right->branch_info->from.ms.map);
}
static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info)
- return _hist_entry__dso_snprintf(he->branch_info->from.map,
+ return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
bf, size, width);
else
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -791,8 +793,8 @@ static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
if (type != HIST_FILTER__DSO)
return -1;
- return dso && (!he->branch_info || !he->branch_info->from.map ||
- he->branch_info->from.map->dso != dso);
+ return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
+ he->branch_info->from.ms.map->dso != dso);
}
static int64_t
@@ -801,15 +803,15 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
- return _sort__dso_cmp(left->branch_info->to.map,
- right->branch_info->to.map);
+ return _sort__dso_cmp(left->branch_info->to.ms.map,
+ right->branch_info->to.ms.map);
}
static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info)
- return _hist_entry__dso_snprintf(he->branch_info->to.map,
+ return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
bf, size, width);
else
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -823,8 +825,8 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
if (type != HIST_FILTER__DSO)
return -1;
- return dso && (!he->branch_info || !he->branch_info->to.map ||
- he->branch_info->to.map->dso != dso);
+ return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
+ he->branch_info->to.ms.map->dso != dso);
}
static int64_t
@@ -839,10 +841,10 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
from_l = &left->branch_info->from;
from_r = &right->branch_info->from;
- if (!from_l->sym && !from_r->sym)
+ if (!from_l->ms.sym && !from_r->ms.sym)
return _sort__addr_cmp(from_l->addr, from_r->addr);
- return _sort__sym_cmp(from_l->sym, from_r->sym);
+ return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
}
static int64_t
@@ -856,10 +858,10 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
to_l = &left->branch_info->to;
to_r = &right->branch_info->to;
- if (!to_l->sym && !to_r->sym)
+ if (!to_l->ms.sym && !to_r->ms.sym)
return _sort__addr_cmp(to_l->addr, to_r->addr);
- return _sort__sym_cmp(to_l->sym, to_r->sym);
+ return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
}
static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
@@ -868,8 +870,7 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
if (he->branch_info) {
struct addr_map_symbol *from = &he->branch_info->from;
- return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
- he->level, bf, size, width);
+ return _hist_entry__sym_snprintf(&from->ms, from->addr, he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -881,8 +882,7 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
if (he->branch_info) {
struct addr_map_symbol *to = &he->branch_info->to;
- return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
- he->level, bf, size, width);
+ return _hist_entry__sym_snprintf(&to->ms, to->addr, he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -896,8 +896,8 @@ static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
if (type != HIST_FILTER__SYMBOL)
return -1;
- return sym && !(he->branch_info && he->branch_info->from.sym &&
- strstr(he->branch_info->from.sym->name, sym));
+ return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
+ strstr(he->branch_info->from.ms.sym->name, sym));
}
static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
@@ -908,8 +908,8 @@ static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
if (type != HIST_FILTER__SYMBOL)
return -1;
- return sym && !(he->branch_info && he->branch_info->to.sym &&
- strstr(he->branch_info->to.sym->name, sym));
+ return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
+ strstr(he->branch_info->to.ms.sym->name, sym));
}
struct sort_entry sort_dso_from = {
@@ -1017,16 +1017,13 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
- struct map *map = NULL;
- struct symbol *sym = NULL;
+ struct map_symbol *ms = NULL;
if (he->mem_info) {
addr = he->mem_info->daddr.addr;
- map = he->mem_info->daddr.map;
- sym = he->mem_info->daddr.sym;
+ ms = &he->mem_info->daddr.ms;
}
- return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
- width);
+ return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
int64_t
@@ -1046,16 +1043,13 @@ static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
- struct map *map = NULL;
- struct symbol *sym = NULL;
+ struct map_symbol *ms = NULL;
if (he->mem_info) {
addr = he->mem_info->iaddr.addr;
- map = he->mem_info->iaddr.map;
- sym = he->mem_info->iaddr.sym;
+ ms = &he->mem_info->iaddr.ms;
}
- return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
- width);
+ return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
static int64_t
@@ -1065,9 +1059,9 @@ sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
struct map *map_r = NULL;
if (left->mem_info)
- map_l = left->mem_info->daddr.map;
+ map_l = left->mem_info->daddr.ms.map;
if (right->mem_info)
- map_r = right->mem_info->daddr.map;
+ map_r = right->mem_info->daddr.ms.map;
return _sort__dso_cmp(map_l, map_r);
}
@@ -1078,7 +1072,7 @@ static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
struct map *map = NULL;
if (he->mem_info)
- map = he->mem_info->daddr.map;
+ map = he->mem_info->daddr.ms.map;
return _hist_entry__dso_snprintf(map, bf, size, width);
}
@@ -1200,6 +1194,7 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 l, r;
struct map *l_map, *r_map;
+ int rc;
if (!left->mem_info) return -1;
if (!right->mem_info) return 1;
@@ -1208,8 +1203,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if (left->cpumode > right->cpumode) return -1;
if (left->cpumode < right->cpumode) return 1;
- l_map = left->mem_info->daddr.map;
- r_map = right->mem_info->daddr.map;
+ l_map = left->mem_info->daddr.ms.map;
+ r_map = right->mem_info->daddr.ms.map;
/* if both are NULL, jump to sort on al_addr instead */
if (!l_map && !r_map)
@@ -1218,18 +1213,9 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if (!l_map) return -1;
if (!r_map) return 1;
- if (l_map->maj > r_map->maj) return -1;
- if (l_map->maj < r_map->maj) return 1;
-
- if (l_map->min > r_map->min) return -1;
- if (l_map->min < r_map->min) return 1;
-
- if (l_map->ino > r_map->ino) return -1;
- if (l_map->ino < r_map->ino) return 1;
-
- if (l_map->ino_generation > r_map->ino_generation) return -1;
- if (l_map->ino_generation < r_map->ino_generation) return 1;
-
+ rc = dso__cmp_id(l_map->dso, r_map->dso);
+ if (rc)
+ return rc;
/*
* Addresses with no major/minor numbers are assumed to be
* anonymous in userspace. Sort those on pid then address.
@@ -1240,8 +1226,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
(!(l_map->flags & MAP_SHARED)) &&
- !l_map->maj && !l_map->min && !l_map->ino &&
- !l_map->ino_generation) {
+ !l_map->dso->id.maj && !l_map->dso->id.min &&
+ !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
/* userspace anonymous */
if (left->thread->pid_ > right->thread->pid_) return -1;
@@ -1264,27 +1250,26 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
{
uint64_t addr = 0;
- struct map *map = NULL;
- struct symbol *sym = NULL;
+ struct map_symbol *ms = NULL;
char level = he->level;
if (he->mem_info) {
+ struct map *map = he->mem_info->daddr.ms.map;
+
addr = cl_address(he->mem_info->daddr.al_addr);
- map = he->mem_info->daddr.map;
- sym = he->mem_info->daddr.sym;
+ ms = &he->mem_info->daddr.ms;
/* print [s] for shared data mmaps */
if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
map && !(map->prot & PROT_EXEC) &&
(map->flags & MAP_SHARED) &&
- (map->maj || map->min || map->ino ||
- map->ino_generation))
+ (map->dso->id.maj || map->dso->id.min ||
+ map->dso->id.ino || map->dso->id.ino_generation))
level = 's';
else if (!map)
level = 'X';
}
- return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
- width);
+ return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
}
struct sort_entry sort_mispredict = {
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 7b93f34ac1f4..5aff9542d9b7 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -10,6 +10,8 @@
#include "callchain.h"
#include "values.h"
#include "hist.h"
+#include "stat.h"
+#include "spark.h"
struct option;
struct thread;
@@ -71,6 +73,8 @@ struct hist_entry_diff {
/* PERF_HPP_DIFF__CYCLES */
s64 cycles;
};
+ struct stats stats;
+ unsigned long svals[NUM_SPARKS];
};
struct hist_entry_ops {
diff --git a/tools/perf/util/spark.c b/tools/perf/util/spark.c
new file mode 100644
index 000000000000..70272a8b81a6
--- /dev/null
+++ b/tools/perf/util/spark.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include "spark.h"
+#include "stat.h"
+
+#define SPARK_SHIFT 8
+
+/* Print spark lines on outf for numval values in val. */
+int print_spark(char *bf, int size, unsigned long *val, int numval)
+{
+ static const char *ticks[NUM_SPARKS] = {
+ "▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"
+ };
+ int i, printed = 0;
+ unsigned long min = ULONG_MAX, max = 0, f;
+
+ for (i = 0; i < numval; i++) {
+ if (val[i] < min)
+ min = val[i];
+ if (val[i] > max)
+ max = val[i];
+ }
+ f = ((max - min) << SPARK_SHIFT) / (NUM_SPARKS - 1);
+ if (f < 1)
+ f = 1;
+ for (i = 0; i < numval; i++) {
+ printed += scnprintf(bf + printed, size - printed, "%s",
+ ticks[((val[i] - min) << SPARK_SHIFT) / f]);
+ }
+
+ return printed;
+}
diff --git a/tools/perf/util/spark.h b/tools/perf/util/spark.h
new file mode 100644
index 000000000000..25402d7d7a64
--- /dev/null
+++ b/tools/perf/util/spark.h
@@ -0,0 +1,8 @@
+#ifndef SPARK_H
+#define SPARK_H 1
+
+#define NUM_SPARKS 8
+
+int print_spark(char *bf, int size, unsigned long *val, int numval);
+
+#endif
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index ed3b0ac2f785..bc31fccc0057 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -100,6 +100,15 @@ static void aggr_printout(struct perf_stat_config *config,
nr,
config->csv_sep);
break;
+ case AGGR_NODE:
+ fprintf(config->output, "N%*d%s%*d%s",
+ config->csv_output ? 0 : -5,
+ id,
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ break;
case AGGR_NONE:
if (evsel->percore) {
fprintf(config->output, "S%d-D%d-C%*d%s",
@@ -965,6 +974,11 @@ static void print_interval(struct perf_stat_config *config,
if ((num_print_interval == 0 && !config->csv_output) || config->interval_clear) {
switch (config->aggr_mode) {
+ case AGGR_NODE:
+ fprintf(output, "# time node cpus");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ break;
case AGGR_SOCKET:
fprintf(output, "# time socket cpus");
if (!metric_only)
@@ -1188,6 +1202,7 @@ perf_evlist__print_counters(struct evlist *evlist,
case AGGR_CORE:
case AGGR_DIE:
case AGGR_SOCKET:
+ case AGGR_NODE:
print_aggr(config, evlist, prefix);
break;
case AGGR_THREAD:
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index ebdd130557fb..332cb730785b 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -299,6 +299,7 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
case AGGR_CORE:
case AGGR_DIE:
case AGGR_SOCKET:
+ case AGGR_NODE:
case AGGR_NONE:
if (!evsel->snapshot)
perf_evsel__compute_deltas(evsel, cpu, thread, count);
@@ -490,6 +491,16 @@ int create_perf_stat_counter(struct evsel *evsel,
if (config->identifier)
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
+ if (config->all_user) {
+ attr->exclude_kernel = 1;
+ attr->exclude_user = 0;
+ }
+
+ if (config->all_kernel) {
+ attr->exclude_kernel = 0;
+ attr->exclude_user = 1;
+ }
+
/*
* Disabling all counters initially, they will be enabled
* either manually by us or by kernel via enable_on_exec
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index edbeb2f63e8d..bfa9aaf36ce6 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -47,6 +47,7 @@ enum aggr_mode {
AGGR_CORE,
AGGR_THREAD,
AGGR_UNSET,
+ AGGR_NODE,
};
enum {
@@ -106,6 +107,8 @@ struct perf_stat_config {
bool big_num;
bool no_merge;
bool walltime_run_table;
+ bool all_kernel;
+ bool all_user;
FILE *output;
unsigned int interval;
unsigned int timeout;
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index 708805f5573e..73df616ced43 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -4,6 +4,7 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <sys/types.h> // pid_t
#include <stddef.h>
#include <string.h>
@@ -32,6 +33,8 @@ static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int
return asprintf_expr_inout_ints(var, false, nints, ints);
}
+char *asprintf__tp_filter_pids(size_t npids, pid_t *pids);
+
char *strpbrk_esc(char *str, const char *stopset);
char *strdup_esc(const char *str);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 66f4be1df573..16776d5fbaea 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -934,7 +934,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
* we still are sure to have a reference to this DSO via
* *curr_map->dso.
*/
- dsos__add(&map->groups->machine->dsos, curr_dso);
+ dsos__add(&kmaps->machine->dsos, curr_dso);
/* kmaps already got it */
map__put(curr_map);
dso__set_loaded(curr_dso);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a8f80e427674..db9667aacb88 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -242,28 +242,24 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
void map_groups__fixup_end(struct map_groups *mg)
{
struct maps *maps = &mg->maps;
- struct map *next, *curr;
+ struct map *prev = NULL, *curr;
down_write(&maps->lock);
- curr = maps__first(maps);
- if (curr == NULL)
- goto out_unlock;
+ maps__for_each_entry(maps, curr) {
+ if (prev != NULL && !prev->end)
+ prev->end = curr->start;
- for (next = map__next(curr); next; next = map__next(curr)) {
- if (!curr->end)
- curr->end = next->start;
- curr = next;
+ prev = curr;
}
/*
* We still haven't the actual symbols, so guess the
* last map final address.
*/
- if (!curr->end)
+ if (curr && !curr->end)
curr->end = ~0ULL;
-out_unlock:
up_write(&maps->lock);
}
@@ -1053,11 +1049,6 @@ out_delete_from:
return ret;
}
-struct map *map_groups__first(struct map_groups *mg)
-{
- return maps__first(&mg->maps);
-}
-
static int do_validate_kcore_modules(const char *filename,
struct map_groups *kmaps)
{
@@ -1069,13 +1060,10 @@ static int do_validate_kcore_modules(const char *filename,
if (err)
return err;
- old_map = map_groups__first(kmaps);
- while (old_map) {
- struct map *next = map_groups__next(old_map);
+ map_groups__for_each_entry(kmaps, old_map) {
struct module_info *mi;
if (!__map__is_kmodule(old_map)) {
- old_map = next;
continue;
}
@@ -1085,8 +1073,6 @@ static int do_validate_kcore_modules(const char *filename,
err = -EINVAL;
goto out;
}
-
- old_map = next;
}
out:
delete_modules(&modules);
@@ -1189,9 +1175,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
struct map *old_map;
LIST_HEAD(merged);
- for (old_map = map_groups__first(kmaps); old_map;
- old_map = map_groups__next(old_map)) {
-
+ map_groups__for_each_entry(kmaps, old_map) {
/* no overload with this one */
if (new_map->end < old_map->start ||
new_map->start >= old_map->end)
@@ -1264,7 +1248,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{
struct map_groups *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
- struct map *old_map, *new_map, *replacement_map = NULL;
+ struct map *old_map, *new_map, *replacement_map = NULL, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1311,10 +1295,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
- old_map = map_groups__first(kmaps);
- while (old_map) {
- struct map *next = map_groups__next(old_map);
-
+ map_groups__for_each_entry_safe(kmaps, old_map, next) {
/*
* We need to preserve eBPF maps even if they are
* covered by kcore, because we need to access
@@ -1322,7 +1303,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
*/
if (old_map != map && !__map__is_bpf_prog(old_map))
map_groups__remove(kmaps, old_map);
- old_map = next;
}
machine->trampolines_mapped = false;
@@ -1550,7 +1530,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
/*
* kernel modules know their symtab type - it's set when
- * creating a module dso in machine__findnew_module_map().
+ * creating a module dso in machine__addnew_module_map().
*/
return kmod && dso->symtab_type == type;
@@ -1608,7 +1588,7 @@ int dso__load(struct dso *dso, struct map *map)
char *name;
int ret = -1;
u_int i;
- struct machine *machine;
+ struct machine *machine = NULL;
char *root_dir = (char *) "";
int ss_pos = 0;
struct symsrc ss_[2];
@@ -1637,17 +1617,13 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
- if (map->groups && map->groups->machine)
- machine = map->groups->machine;
- else
- machine = NULL;
-
if (dso->kernel) {
if (dso->kernel == DSO_TYPE_KERNEL)
ret = dso__load_kernel_sym(dso, map);
else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
ret = dso__load_guest_kernel_sym(dso, map);
+ machine = map__kmaps(map)->machine;
if (machine__is(machine, "x86_64"))
machine__map_x86_64_entry_trampolines(machine, dso);
goto out;
@@ -1784,28 +1760,82 @@ out:
return ret;
}
+static int map__strcmp(const void *a, const void *b)
+{
+ const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b;
+ return strcmp(ma->dso->short_name, mb->dso->short_name);
+}
+
+static int map__strcmp_name(const void *name, const void *b)
+{
+ const struct map *map = *(const struct map **)b;
+ return strcmp(name, map->dso->short_name);
+}
+
+void __map_groups__sort_by_name(struct map_groups *mg)
+{
+ qsort(mg->maps_by_name, mg->nr_maps, sizeof(struct map *), map__strcmp);
+}
+
+static int map__groups__sort_by_name_from_rbtree(struct map_groups *mg)
+{
+ struct map *map;
+ struct map **maps_by_name = realloc(mg->maps_by_name, mg->nr_maps * sizeof(map));
+ int i = 0;
+
+ if (maps_by_name == NULL)
+ return -1;
+
+ mg->maps_by_name = maps_by_name;
+ mg->nr_maps_allocated = mg->nr_maps;
+
+ maps__for_each_entry(&mg->maps, map)
+ maps_by_name[i++] = map;
+
+ __map_groups__sort_by_name(mg);
+ return 0;
+}
+
+static struct map *__map_groups__find_by_name(struct map_groups *mg, const char *name)
+{
+ struct map **mapp;
+
+ if (mg->maps_by_name == NULL &&
+ map__groups__sort_by_name_from_rbtree(mg))
+ return NULL;
+
+ mapp = bsearch(name, mg->maps_by_name, mg->nr_maps, sizeof(*mapp), map__strcmp_name);
+ if (mapp)
+ return *mapp;
+ return NULL;
+}
+
struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
{
struct maps *maps = &mg->maps;
struct map *map;
- struct rb_node *node;
down_read(&maps->lock);
- for (node = maps->names.rb_node; node; ) {
- int rc;
-
- map = rb_entry(node, struct map, rb_node_name);
-
- rc = strcmp(map->dso->short_name, name);
- if (rc < 0)
- node = node->rb_left;
- else if (rc > 0)
- node = node->rb_right;
- else
+ if (mg->last_search_by_name && strcmp(mg->last_search_by_name->dso->short_name, name) == 0) {
+ map = mg->last_search_by_name;
+ goto out_unlock;
+ }
+ /*
+ * If we have mg->maps_by_name, then the name isn't in the rbtree,
+ * as mg->maps_by_name mirrors the rbtree when lookups by name are
+ * made.
+ */
+ map = __map_groups__find_by_name(mg, name);
+ if (map || mg->maps_by_name != NULL)
+ goto out_unlock;
+ /* Fallback to traversing the rbtree... */
+ maps__for_each_entry(maps, map)
+ if (strcmp(map->dso->short_name, name) == 0) {
+ mg->last_search_by_name = map;
goto out_unlock;
- }
+ }
map = NULL;
@@ -2047,15 +2077,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
{
int err;
const char *kallsyms_filename = NULL;
- struct machine *machine;
+ struct machine *machine = map__kmaps(map)->machine;
char path[PATH_MAX];
- if (!map->groups) {
- pr_debug("Guest kernel map hasn't the point to groups\n");
- return -1;
- }
- machine = map->groups->machine;
-
if (machine__is_default_guest(machine)) {
/*
* if the user specified a vmlinux filename, use it and only
@@ -2371,25 +2395,3 @@ struct mem_info *mem_info__new(void)
refcount_set(&mi->refcnt, 1);
return mi;
}
-
-struct block_info *block_info__get(struct block_info *bi)
-{
- if (bi)
- refcount_inc(&bi->refcnt);
- return bi;
-}
-
-void block_info__put(struct block_info *bi)
-{
- if (bi && refcount_dec_and_test(&bi->refcnt))
- free(bi);
-}
-
-struct block_info *block_info__new(void)
-{
- struct block_info *bi = zalloc(sizeof(*bi));
-
- if (bi)
- refcount_set(&bi->refcnt, 1);
- return bi;
-}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 0b0c6b5b1899..0b718cc9fb28 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -11,6 +11,7 @@
#include <stdio.h>
#include "path.h"
#include "symbol_conf.h"
+#include "spark.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -105,20 +106,9 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
-struct block_info {
- struct symbol *sym;
- u64 start;
- u64 end;
- u64 cycles;
- u64 cycles_aggr;
- int num;
- int num_aggr;
- refcount_t refcnt;
-};
-
struct addr_location {
- struct machine *machine;
struct thread *thread;
+ struct map_groups *mg;
struct map *map;
struct symbol *sym;
const char *srcline;
@@ -289,16 +279,4 @@ static inline void __mem_info__zput(struct mem_info **mi)
#define mem_info__zput(mi) __mem_info__zput(&mi)
-struct block_info *block_info__new(void);
-struct block_info *block_info__get(struct block_info *bi);
-void block_info__put(struct block_info *bi);
-
-static inline void __block_info__zput(struct block_info **bi)
-{
- block_info__put(*bi);
- *bi = NULL;
-}
-
-#define block_info__zput(bi) __block_info__zput(&bi)
-
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index e6880789864c..10f1ec3e0349 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -40,6 +40,7 @@ struct symbol_conf {
raw_trace,
report_hierarchy,
report_block,
+ report_individual_block,
inline_name,
disable_add2line_warn;
const char *vmlinux_name,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 807cbca403a7..48c3f8b9c852 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -438,7 +438,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
else
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+ maps__for_each_entry(maps, pos) {
size_t size;
if (!__map__is_kmodule(pos))
@@ -1228,6 +1228,11 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_PHYS_ADDR)
result += sizeof(u64);
+ if (type & PERF_SAMPLE_AUX) {
+ result += sizeof(u64);
+ result += sample->aux_sample.size;
+ }
+
return result;
}
@@ -1396,6 +1401,13 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
array++;
}
+ if (type & PERF_SAMPLE_AUX) {
+ sz = sample->aux_sample.size;
+ *array++ = sz;
+ memcpy(array, sample->aux_sample.data, sz);
+ array = (void *)array + sz;
+ }
+
return 0;
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index b64e9e049636..0a277a920970 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -350,7 +350,7 @@ static int __thread__prepare_access(struct thread *thread)
down_read(&maps->lock);
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(maps, map) {
err = unwind__prepare_access(thread->mg, map, &initialized);
if (err || initialized)
break;
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 9796a2e43f67..302443921681 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -458,10 +458,11 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
return true;
}
-int perf_time__parse_for_ranges(const char *time_str,
+int perf_time__parse_for_ranges_reltime(const char *time_str,
struct perf_session *session,
struct perf_time_interval **ranges,
- int *range_size, int *range_num)
+ int *range_size, int *range_num,
+ bool reltime)
{
bool has_percent = strchr(time_str, '%');
struct perf_time_interval *ptime_range;
@@ -471,7 +472,7 @@ int perf_time__parse_for_ranges(const char *time_str,
if (!ptime_range)
return -ENOMEM;
- if (has_percent) {
+ if (has_percent || reltime) {
if (session->evlist->first_sample_time == 0 &&
session->evlist->last_sample_time == 0) {
pr_err("HINT: no first/last sample time found in perf data.\n"
@@ -479,7 +480,9 @@ int perf_time__parse_for_ranges(const char *time_str,
"(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
goto error;
}
+ }
+ if (has_percent) {
num = perf_time__percent_parse_str(
ptime_range, size,
time_str,
@@ -492,6 +495,15 @@ int perf_time__parse_for_ranges(const char *time_str,
if (num < 0)
goto error_invalid;
+ if (reltime) {
+ int i;
+
+ for (i = 0; i < num; i++) {
+ ptime_range[i].start += session->evlist->first_sample_time;
+ ptime_range[i].end += session->evlist->first_sample_time;
+ }
+ }
+
*range_size = size;
*range_num = num;
*ranges = ptime_range;
@@ -504,6 +516,15 @@ error:
return ret;
}
+int perf_time__parse_for_ranges(const char *time_str,
+ struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num)
+{
+ return perf_time__parse_for_ranges_reltime(time_str, session, ranges,
+ range_size, range_num, false);
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 4f42988eb2f7..1142b0bddd5e 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -26,6 +26,11 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
struct perf_session;
+int perf_time__parse_for_ranges_reltime(const char *str, struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num,
+ bool reltime);
+
int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
struct perf_time_interval **ranges,
int *range_size, int *range_num);
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 15f6e46d7124..d2a8df01c4a7 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -80,9 +80,10 @@ static int entry(u64 ip, struct unwind_info *ui)
if (__report_module(&al, ip, ui))
return -1;
- e->ip = ip;
- e->map = al.map;
- e->sym = al.sym;
+ e->ip = ip;
+ e->ms.mg = al.mg;
+ e->ms.map = al.map;
+ e->ms.sym = al.sym;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 1800887b2255..6d53347d6744 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -575,9 +575,10 @@ static int entry(u64 ip, struct thread *thread,
struct unwind_entry e;
struct addr_location al;
- e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
- e.ip = ip;
- e.map = al.map;
+ e.ms.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
+ e.ip = ip;
+ e.ms.map = al.map;
+ e.ms.mg = al.mg;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index 3a7d00c20d86..50337c966979 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -4,17 +4,15 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include "util/map_symbol.h"
-struct map;
struct map_groups;
struct perf_sample;
-struct symbol;
struct thread;
struct unwind_entry {
- struct map *map;
- struct symbol *sym;
- u64 ip;
+ struct map_symbol ms;
+ u64 ip;
};
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index ae56c766eda1..969ae560dad9 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -182,14 +182,31 @@ static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
return rmdir(path);
}
+static int rm_rf_kcore_dir(const char *path)
+{
+ char kcore_dir_path[PATH_MAX];
+ const char *pat[] = {
+ "kcore",
+ "kallsyms",
+ "modules",
+ NULL,
+ };
+
+ snprintf(kcore_dir_path, sizeof(kcore_dir_path), "%s/kcore_dir", path);
+
+ return rm_rf_depth_pat(kcore_dir_path, 0, pat);
+}
+
int rm_rf_perf_data(const char *path)
{
const char *pat[] = {
- "header",
+ "data",
"data.*",
NULL,
};
+ rm_rf_kcore_dir(path);
+
return rm_rf_depth_pat(path, 0, pat);
}
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index ba4b4395f35d..6e00793c10ee 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -142,9 +142,9 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
struct thread *thread)
{
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
- struct map *map = map_groups__first(thread->mg);
+ struct map *map;
- for (; map ; map = map_groups__next(map)) {
+ map_groups__for_each_entry(thread->mg, map) {
struct dso *dso = map->dso;
if (!dso || dso->long_name[0] != '/')
continue;
diff --git a/tools/power/cpupower/ToDo b/tools/power/cpupower/ToDo
index 6e8b89f282e6..b196a139a3e4 100644
--- a/tools/power/cpupower/ToDo
+++ b/tools/power/cpupower/ToDo
@@ -8,3 +8,17 @@ ToDos sorted by priority:
- Add another c1e debug idle monitor
-> Is by design racy with BIOS, but could be added
with a --force option and some "be careful" messages
+- Add cpu_start()/cpu_stop() callbacks for monitor
+ -> This is to move the per_cpu logic from inside the
+ monitor to outside it. This can be given higher
+ priority in fork_it.
+- Fork as many processes as there are CPUs in case the
+ per_cpu_schedule flag is set.
+ -> Bind forked process to each cpu.
+ -> Execute start measures via the forked processes on
+ each cpu.
+ -> Run test executable in a forked process.
+ -> Execute stop measures via the forked processes on
+ each cpu.
+ This would be ideal as it will not introduce noise in the
+ tested executable.
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
index 4c9d342b70ff..d3755ea70d4d 100644
--- a/tools/power/cpupower/utils/cpupower-info.c
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <string.h>
#include <getopt.h>
+#include <sys/utsname.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@@ -30,6 +31,7 @@ int cmd_info(int argc, char **argv)
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
+ struct utsname uts;
union {
struct {
@@ -39,6 +41,13 @@ int cmd_info(int argc, char **argv)
} params = {};
int ret = 0;
+ ret = uname(&uts);
+ if (!ret && (!strcmp(uts.machine, "ppc64le") ||
+ !strcmp(uts.machine, "ppc64"))) {
+ fprintf(stderr, _("Subcommand not supported on POWER.\n"));
+ return ret;
+ }
+
setlocale(LC_ALL, "");
textdomain(PACKAGE);
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 3cd95c6cb974..3cca6f715dd9 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <string.h>
#include <getopt.h>
+#include <sys/utsname.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@@ -31,6 +32,7 @@ int cmd_set(int argc, char **argv)
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
+ struct utsname uts;
union {
struct {
@@ -41,6 +43,13 @@ int cmd_set(int argc, char **argv)
int perf_bias = 0;
int ret = 0;
+ ret = uname(&uts);
+ if (!ret && (!strcmp(uts.machine, "ppc64le") ||
+ !strcmp(uts.machine, "ppc64"))) {
+ fprintf(stderr, _("Subcommand not supported on POWER.\n"));
+ return ret;
+ }
+
setlocale(LC_ALL, "");
textdomain(PACKAGE);
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 5cc39d4e23ed..73bfafc60e9b 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -131,6 +131,10 @@ out:
if (ext_cpuid_level >= 0x80000007 &&
(cpuid_edx(0x80000007) & (1 << 9)))
cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
+
+ if (ext_cpuid_level >= 0x80000008 &&
+ cpuid_ebx(0x80000008) & (1 << 4))
+ cpu_info->caps |= CPUPOWER_CAP_AMD_RDPRU;
}
if (cpu_info->vendor == X86_VENDOR_INTEL) {
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 357b19bb136e..c258eeccd05f 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -69,6 +69,7 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010
#define CPUPOWER_CAP_IS_SNB 0x00000020
#define CPUPOWER_CAP_INTEL_IDA 0x00000040
+#define CPUPOWER_CAP_AMD_RDPRU 0x00000080
#define CPUPOWER_AMD_CPBDIS 0x02000000
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
index 3f893b99b337..33dc34db4f3c 100644
--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -328,7 +328,7 @@ struct cpuidle_monitor amd_fam14h_monitor = {
.stop = amd_fam14h_stop,
.do_register = amd_fam14h_register,
.unregister = amd_fam14h_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = OVERFLOW_MS / 1000,
};
#endif /* #if defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index f634aeb65c5f..3c4cee160b0e 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -207,6 +207,6 @@ struct cpuidle_monitor cpuidle_sysfs_monitor = {
.stop = cpuidle_stop,
.do_register = cpuidle_register,
.unregister = cpuidle_unregister,
- .needs_root = 0,
+ .flags.needs_root = 0,
.overflow_s = UINT_MAX,
};
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index d3c3e6e7aa26..6d44fec55ad5 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -408,7 +408,7 @@ int cmd_monitor(int argc, char **argv)
dprint("Try to register: %s\n", all_monitors[num]->name);
test_mon = all_monitors[num]->do_register();
if (test_mon) {
- if (test_mon->needs_root && !run_as_root) {
+ if (test_mon->flags.needs_root && !run_as_root) {
fprintf(stderr, _("Available monitor %s needs "
"root access\n"), test_mon->name);
continue;
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
index a2d901d3bfaf..5b5eb1da0cce 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
@@ -60,7 +60,10 @@ struct cpuidle_monitor {
struct cpuidle_monitor* (*do_register) (void);
void (*unregister)(void);
unsigned int overflow_s;
- int needs_root;
+ struct {
+ unsigned int needs_root:1;
+ unsigned int per_cpu_schedule:1;
+ } flags;
};
extern long long timespec_diff_us(struct timespec start, struct timespec end);
diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
index 7c7451d3f494..97ad3233a521 100644
--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
@@ -39,7 +39,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
{
.name = "PC9",
.desc = N_("Processor Package C9"),
- .desc = N_("Processor Package C2"),
.id = PC9,
.range = RANGE_PACKAGE,
.get_count_percent = hsw_ext_get_count_percent,
@@ -188,7 +187,7 @@ struct cpuidle_monitor intel_hsw_ext_monitor = {
.stop = hsw_ext_stop,
.do_register = hsw_ext_register,
.unregister = hsw_ext_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 44806a6dae11..e7d48cb563c0 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -19,6 +19,10 @@
#define MSR_APERF 0xE8
#define MSR_MPERF 0xE7
+#define RDPRU ".byte 0x0f, 0x01, 0xfd"
+#define RDPRU_ECX_MPERF 0
+#define RDPRU_ECX_APERF 1
+
#define MSR_TSC 0x10
#define MSR_AMD_HWCR 0xc0010015
@@ -86,15 +90,51 @@ static int mperf_get_tsc(unsigned long long *tsc)
return ret;
}
+static int get_aperf_mperf(int cpu, unsigned long long *aval,
+ unsigned long long *mval)
+{
+ unsigned long low_a, high_a;
+ unsigned long low_m, high_m;
+ int ret;
+
+ /*
+ * Running on the cpu from which we read the registers will
+ * prevent APERF/MPERF from going out of sync because of IPI
+ * latency introduced by read_msr()s.
+ */
+ if (mperf_monitor.flags.per_cpu_schedule) {
+ if (bind_cpu(cpu))
+ return 1;
+ }
+
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_RDPRU) {
+ asm volatile(RDPRU
+ : "=a" (low_a), "=d" (high_a)
+ : "c" (RDPRU_ECX_APERF));
+ asm volatile(RDPRU
+ : "=a" (low_m), "=d" (high_m)
+ : "c" (RDPRU_ECX_MPERF));
+
+ *aval = ((low_a) | (high_a) << 32);
+ *mval = ((low_m) | (high_m) << 32);
+
+ return 0;
+ }
+
+ ret = read_msr(cpu, MSR_APERF, aval);
+ ret |= read_msr(cpu, MSR_MPERF, mval);
+
+ return ret;
+}
+
static int mperf_init_stats(unsigned int cpu)
{
- unsigned long long val;
+ unsigned long long aval, mval;
int ret;
- ret = read_msr(cpu, MSR_APERF, &val);
- aperf_previous_count[cpu] = val;
- ret |= read_msr(cpu, MSR_MPERF, &val);
- mperf_previous_count[cpu] = val;
+ ret = get_aperf_mperf(cpu, &aval, &mval);
+ aperf_previous_count[cpu] = aval;
+ mperf_previous_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@@ -102,13 +142,12 @@ static int mperf_init_stats(unsigned int cpu)
static int mperf_measure_stats(unsigned int cpu)
{
- unsigned long long val;
+ unsigned long long aval, mval;
int ret;
- ret = read_msr(cpu, MSR_APERF, &val);
- aperf_current_count[cpu] = val;
- ret |= read_msr(cpu, MSR_MPERF, &val);
- mperf_current_count[cpu] = val;
+ ret = get_aperf_mperf(cpu, &aval, &mval);
+ aperf_current_count[cpu] = aval;
+ mperf_current_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@@ -305,6 +344,9 @@ struct cpuidle_monitor *mperf_register(void)
if (init_maxfreq_mode())
return NULL;
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD)
+ mperf_monitor.flags.per_cpu_schedule = 1;
+
/* Free this at program termination */
is_valid = calloc(cpu_count, sizeof(int));
mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
@@ -333,7 +375,7 @@ struct cpuidle_monitor mperf_monitor = {
.stop = mperf_stop,
.do_register = mperf_register,
.unregister = mperf_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
index be7256696a37..114271165182 100644
--- a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
@@ -208,7 +208,7 @@ struct cpuidle_monitor intel_nhm_monitor = {
.stop = nhm_stop,
.do_register = intel_nhm_register,
.unregister = intel_nhm_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
index 968333571cad..df8b223cc096 100644
--- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
@@ -192,7 +192,7 @@ struct cpuidle_monitor intel_snb_monitor = {
.stop = snb_stop,
.do_register = snb_register,
.unregister = snb_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/testing/kunit/.gitignore b/tools/testing/kunit/.gitignore
new file mode 100644
index 000000000000..c791ff59a37a
--- /dev/null
+++ b/tools/testing/kunit/.gitignore
@@ -0,0 +1,3 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod] \ No newline at end of file
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
new file mode 100644
index 000000000000..9235b7d42d38
--- /dev/null
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_KUNIT_TEST=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
new file mode 100755
index 000000000000..efe06d621983
--- /dev/null
+++ b/tools/testing/kunit/kunit.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# A thin wrapper on top of the KUnit Kernel
+#
+# Copyright (C) 2019, Google LLC.
+# Author: Felix Guo <felixguoxiuping@gmail.com>
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+import argparse
+import sys
+import os
+import time
+import shutil
+
+from collections import namedtuple
+from enum import Enum, auto
+
+import kunit_config
+import kunit_kernel
+import kunit_parser
+
+KunitResult = namedtuple('KunitResult', ['status','result'])
+
+KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', 'build_dir', 'defconfig'])
+
+class KunitStatus(Enum):
+ SUCCESS = auto()
+ CONFIG_FAILURE = auto()
+ BUILD_FAILURE = auto()
+ TEST_FAILURE = auto()
+
+def create_default_kunitconfig():
+ if not os.path.exists(kunit_kernel.KUNITCONFIG_PATH):
+ shutil.copyfile('arch/um/configs/kunit_defconfig',
+ kunit_kernel.KUNITCONFIG_PATH)
+
+def run_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitRequest) -> KunitResult:
+ if request.defconfig:
+ create_default_kunitconfig()
+
+ config_start = time.time()
+ success = linux.build_reconfig(request.build_dir)
+ config_end = time.time()
+ if not success:
+ return KunitResult(KunitStatus.CONFIG_FAILURE, 'could not configure kernel')
+
+ kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
+
+ build_start = time.time()
+ success = linux.build_um_kernel(request.jobs, request.build_dir)
+ build_end = time.time()
+ if not success:
+ return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
+
+ kunit_parser.print_with_timestamp('Starting KUnit Kernel ...')
+ test_start = time.time()
+
+ test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
+ [],
+ 'Tests not Parsed.')
+ if request.raw_output:
+ kunit_parser.raw_output(
+ linux.run_kernel(timeout=request.timeout,
+ build_dir=request.build_dir))
+ else:
+ kunit_output = linux.run_kernel(timeout=request.timeout,
+ build_dir=request.build_dir)
+ test_result = kunit_parser.parse_run_tests(kunit_output)
+ test_end = time.time()
+
+ kunit_parser.print_with_timestamp((
+ 'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
+ 'building, %.3fs running\n') % (
+ test_end - config_start,
+ config_end - config_start,
+ build_end - build_start,
+ test_end - test_start))
+
+ if test_result.status != kunit_parser.TestStatus.SUCCESS:
+ return KunitResult(KunitStatus.TEST_FAILURE, test_result)
+ else:
+ return KunitResult(KunitStatus.SUCCESS, test_result)
+
+def main(argv, linux=None):
+ parser = argparse.ArgumentParser(
+ description='Helps writing and running KUnit tests.')
+ subparser = parser.add_subparsers(dest='subcommand')
+
+ run_parser = subparser.add_parser('run', help='Runs KUnit tests.')
+ run_parser.add_argument('--raw_output', help='don\'t format output from kernel',
+ action='store_true')
+
+ run_parser.add_argument('--timeout',
+ help='maximum number of seconds to allow for all tests '
+ 'to run. This does not include time taken to build the '
+ 'tests.',
+ type=int,
+ default=300,
+ metavar='timeout')
+
+ run_parser.add_argument('--jobs',
+ help='As in the make command, "Specifies the number of '
+ 'jobs (commands) to run simultaneously."',
+ type=int, default=8, metavar='jobs')
+
+ run_parser.add_argument('--build_dir',
+ help='As in the make command, it specifies the build '
+ 'directory.',
+ type=str, default=None, metavar='build_dir')
+
+ run_parser.add_argument('--defconfig',
+ help='Uses a default kunitconfig.',
+ action='store_true')
+
+ cli_args = parser.parse_args(argv)
+
+ if cli_args.subcommand == 'run':
+ if cli_args.defconfig:
+ create_default_kunitconfig()
+
+ if not linux:
+ linux = kunit_kernel.LinuxSourceTree()
+
+ request = KunitRequest(cli_args.raw_output,
+ cli_args.timeout,
+ cli_args.jobs,
+ cli_args.build_dir,
+ cli_args.defconfig)
+ result = run_tests(linux, request)
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+ else:
+ parser.print_help()
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
new file mode 100644
index 000000000000..ebf3942b23f5
--- /dev/null
+++ b/tools/testing/kunit/kunit_config.py
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Builds a .config from a kunitconfig.
+#
+# Copyright (C) 2019, Google LLC.
+# Author: Felix Guo <felixguoxiuping@gmail.com>
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+import collections
+import re
+
+CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_\w+ is not set$'
+CONFIG_PATTERN = r'^CONFIG_\w+=\S+$'
+
+KconfigEntryBase = collections.namedtuple('KconfigEntry', ['raw_entry'])
+
+
+class KconfigEntry(KconfigEntryBase):
+
+ def __str__(self) -> str:
+ return self.raw_entry
+
+
+class KconfigParseError(Exception):
+ """Error parsing Kconfig defconfig or .config."""
+
+
+class Kconfig(object):
+ """Represents defconfig or .config specified using the Kconfig language."""
+
+ def __init__(self):
+ self._entries = []
+
+ def entries(self):
+ return set(self._entries)
+
+ def add_entry(self, entry: KconfigEntry) -> None:
+ self._entries.append(entry)
+
+ def is_subset_of(self, other: 'Kconfig') -> bool:
+ return self.entries().issubset(other.entries())
+
+ def write_to_file(self, path: str) -> None:
+ with open(path, 'w') as f:
+ for entry in self.entries():
+ f.write(str(entry) + '\n')
+
+ def parse_from_string(self, blob: str) -> None:
+ """Parses a string containing KconfigEntrys and populates this Kconfig."""
+ self._entries = []
+ is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN)
+ config_matcher = re.compile(CONFIG_PATTERN)
+ for line in blob.split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+ elif config_matcher.match(line) or is_not_set_matcher.match(line):
+ self._entries.append(KconfigEntry(line))
+ elif line[0] == '#':
+ continue
+ else:
+ raise KconfigParseError('Failed to parse: ' + line)
+
+ def read_from_file(self, path: str) -> None:
+ with open(path, 'r') as f:
+ self.parse_from_string(f.read())
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
new file mode 100644
index 000000000000..bf3876835331
--- /dev/null
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Runs UML kernel, collects output, and handles errors.
+#
+# Copyright (C) 2019, Google LLC.
+# Author: Felix Guo <felixguoxiuping@gmail.com>
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+
+import logging
+import subprocess
+import os
+
+import kunit_config
+
+KCONFIG_PATH = '.config'
+KUNITCONFIG_PATH = 'kunitconfig'
+
+class ConfigError(Exception):
+ """Represents an error trying to configure the Linux kernel."""
+
+
+class BuildError(Exception):
+ """Represents an error trying to build the Linux kernel."""
+
+
+class LinuxSourceTreeOperations(object):
+ """An abstraction over command line operations performed on a source tree."""
+
+ def make_mrproper(self):
+ try:
+ subprocess.check_output(['make', 'mrproper'])
+ except OSError as e:
+ raise ConfigError('Could not call make command: ' + e)
+ except subprocess.CalledProcessError as e:
+ raise ConfigError(e.output)
+
+ def make_olddefconfig(self, build_dir):
+ command = ['make', 'ARCH=um', 'olddefconfig']
+ if build_dir:
+ command += ['O=' + build_dir]
+ try:
+ subprocess.check_output(command)
+ except OSError as e:
+ raise ConfigError('Could not call make command: ' + e)
+ except subprocess.CalledProcessError as e:
+ raise ConfigError(e.output)
+
+ def make(self, jobs, build_dir):
+ command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
+ if build_dir:
+ command += ['O=' + build_dir]
+ try:
+ subprocess.check_output(command)
+ except OSError as e:
+ raise BuildError('Could not call execute make: ' + e)
+ except subprocess.CalledProcessError as e:
+ raise BuildError(e.output)
+
+ def linux_bin(self, params, timeout, build_dir):
+ """Runs the Linux UML binary. Must be named 'linux'."""
+ linux_bin = './linux'
+ if build_dir:
+ linux_bin = os.path.join(build_dir, 'linux')
+ process = subprocess.Popen(
+ [linux_bin] + params,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ process.wait(timeout=timeout)
+ return process
+
+
+def get_kconfig_path(build_dir):
+ kconfig_path = KCONFIG_PATH
+ if build_dir:
+ kconfig_path = os.path.join(build_dir, KCONFIG_PATH)
+ return kconfig_path
+
+class LinuxSourceTree(object):
+ """Represents a Linux kernel source tree with KUnit tests."""
+
+ def __init__(self):
+ self._kconfig = kunit_config.Kconfig()
+ self._kconfig.read_from_file(KUNITCONFIG_PATH)
+ self._ops = LinuxSourceTreeOperations()
+
+ def clean(self):
+ try:
+ self._ops.make_mrproper()
+ except ConfigError as e:
+ logging.error(e)
+ return False
+ return True
+
+ def build_config(self, build_dir):
+ kconfig_path = get_kconfig_path(build_dir)
+ if build_dir and not os.path.exists(build_dir):
+ os.mkdir(build_dir)
+ self._kconfig.write_to_file(kconfig_path)
+ try:
+ self._ops.make_olddefconfig(build_dir)
+ except ConfigError as e:
+ logging.error(e)
+ return False
+ validated_kconfig = kunit_config.Kconfig()
+ validated_kconfig.read_from_file(kconfig_path)
+ if not self._kconfig.is_subset_of(validated_kconfig):
+ logging.error('Provided Kconfig is not contained in validated .config!')
+ return False
+ return True
+
+ def build_reconfig(self, build_dir):
+ """Creates a new .config if it is not a subset of the kunitconfig."""
+ kconfig_path = get_kconfig_path(build_dir)
+ if os.path.exists(kconfig_path):
+ existing_kconfig = kunit_config.Kconfig()
+ existing_kconfig.read_from_file(kconfig_path)
+ if not self._kconfig.is_subset_of(existing_kconfig):
+ print('Regenerating .config ...')
+ os.remove(kconfig_path)
+ return self.build_config(build_dir)
+ else:
+ return True
+ else:
+ print('Generating .config ...')
+ return self.build_config(build_dir)
+
+ def build_um_kernel(self, jobs, build_dir):
+ try:
+ self._ops.make_olddefconfig(build_dir)
+ self._ops.make(jobs, build_dir)
+ except (ConfigError, BuildError) as e:
+ logging.error(e)
+ return False
+ used_kconfig = kunit_config.Kconfig()
+ used_kconfig.read_from_file(get_kconfig_path(build_dir))
+ if not self._kconfig.is_subset_of(used_kconfig):
+ logging.error('Provided Kconfig is not contained in final config!')
+ return False
+ return True
+
+ def run_kernel(self, args=[], timeout=None, build_dir=None):
+ args.extend(['mem=256M'])
+ process = self._ops.linux_bin(args, timeout, build_dir)
+ with open('test.log', 'w') as f:
+ for line in process.stdout:
+ f.write(line.rstrip().decode('ascii') + '\n')
+ yield line.rstrip().decode('ascii')
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
new file mode 100644
index 000000000000..4ffbae0f6732
--- /dev/null
+++ b/tools/testing/kunit/kunit_parser.py
@@ -0,0 +1,310 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Parses test results from a kernel dmesg log.
+#
+# Copyright (C) 2019, Google LLC.
+# Author: Felix Guo <felixguoxiuping@gmail.com>
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+import re
+
+from collections import namedtuple
+from datetime import datetime
+from enum import Enum, auto
+from functools import reduce
+from typing import List
+
+TestResult = namedtuple('TestResult', ['status','suites','log'])
+
+class TestSuite(object):
+ def __init__(self):
+ self.status = None
+ self.name = None
+ self.cases = []
+
+ def __str__(self):
+ return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
+
+ def __repr__(self):
+ return str(self)
+
+class TestCase(object):
+ def __init__(self):
+ self.status = None
+ self.name = ''
+ self.log = []
+
+ def __str__(self):
+ return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+
+ def __repr__(self):
+ return str(self)
+
+class TestStatus(Enum):
+ SUCCESS = auto()
+ FAILURE = auto()
+ TEST_CRASHED = auto()
+ NO_TESTS = auto()
+
+kunit_start_re = re.compile(r'^TAP version [0-9]+$')
+kunit_end_re = re.compile('List of all partitions:')
+
+def isolate_kunit_output(kernel_output):
+ started = False
+ for line in kernel_output:
+ if kunit_start_re.match(line):
+ started = True
+ yield line
+ elif kunit_end_re.match(line):
+ break
+ elif started:
+ yield line
+
+def raw_output(kernel_output):
+ for line in kernel_output:
+ print(line)
+
+DIVIDER = '=' * 60
+
+RESET = '\033[0;0m'
+
+def red(text):
+ return '\033[1;31m' + text + RESET
+
+def yellow(text):
+ return '\033[1;33m' + text + RESET
+
+def green(text):
+ return '\033[1;32m' + text + RESET
+
+def print_with_timestamp(message):
+ print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
+
+def format_suite_divider(message):
+ return '======== ' + message + ' ========'
+
+def print_suite_divider(message):
+ print_with_timestamp(DIVIDER)
+ print_with_timestamp(format_suite_divider(message))
+
+def print_log(log):
+ for m in log:
+ print_with_timestamp(m)
+
+TAP_ENTRIES = re.compile(r'^(TAP|\t?ok|\t?not ok|\t?[0-9]+\.\.[0-9]+|\t?#).*$')
+
+def consume_non_diagnositic(lines: List[str]) -> None:
+ while lines and not TAP_ENTRIES.match(lines[0]):
+ lines.pop(0)
+
+def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
+ while lines and not TAP_ENTRIES.match(lines[0]):
+ test_case.log.append(lines[0])
+ lines.pop(0)
+
+OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+
+OK_NOT_OK_SUBTEST = re.compile(r'^\t(ok|not ok) [0-9]+ - (.*)$')
+
+OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) [0-9]+ - (.*)$')
+
+def parse_ok_not_ok_test_case(lines: List[str],
+ test_case: TestCase,
+ expecting_test_case: bool) -> bool:
+ save_non_diagnositic(lines, test_case)
+ if not lines:
+ if expecting_test_case:
+ test_case.status = TestStatus.TEST_CRASHED
+ return True
+ else:
+ return False
+ line = lines[0]
+ match = OK_NOT_OK_SUBTEST.match(line)
+ if match:
+ test_case.log.append(lines.pop(0))
+ test_case.name = match.group(2)
+ if test_case.status == TestStatus.TEST_CRASHED:
+ return True
+ if match.group(1) == 'ok':
+ test_case.status = TestStatus.SUCCESS
+ else:
+ test_case.status = TestStatus.FAILURE
+ return True
+ else:
+ return False
+
+SUBTEST_DIAGNOSTIC = re.compile(r'^\t# .*?: (.*)$')
+DIAGNOSTIC_CRASH_MESSAGE = 'kunit test case crashed!'
+
+def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
+ save_non_diagnositic(lines, test_case)
+ if not lines:
+ return False
+ line = lines[0]
+ match = SUBTEST_DIAGNOSTIC.match(line)
+ if match:
+ test_case.log.append(lines.pop(0))
+ if match.group(1) == DIAGNOSTIC_CRASH_MESSAGE:
+ test_case.status = TestStatus.TEST_CRASHED
+ return True
+ else:
+ return False
+
+def parse_test_case(lines: List[str], expecting_test_case: bool) -> TestCase:
+ test_case = TestCase()
+ save_non_diagnositic(lines, test_case)
+ while parse_diagnostic(lines, test_case):
+ pass
+ if parse_ok_not_ok_test_case(lines, test_case, expecting_test_case):
+ return test_case
+ else:
+ return None
+
+SUBTEST_HEADER = re.compile(r'^\t# Subtest: (.*)$')
+
+def parse_subtest_header(lines: List[str]) -> str:
+ consume_non_diagnositic(lines)
+ if not lines:
+ return None
+ match = SUBTEST_HEADER.match(lines[0])
+ if match:
+ lines.pop(0)
+ return match.group(1)
+ else:
+ return None
+
+SUBTEST_PLAN = re.compile(r'\t[0-9]+\.\.([0-9]+)')
+
+def parse_subtest_plan(lines: List[str]) -> int:
+ consume_non_diagnositic(lines)
+ match = SUBTEST_PLAN.match(lines[0])
+ if match:
+ lines.pop(0)
+ return int(match.group(1))
+ else:
+ return None
+
+def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
+ if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
+ return TestStatus.TEST_CRASHED
+ elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
+ return TestStatus.FAILURE
+ elif left != TestStatus.SUCCESS:
+ return left
+ elif right != TestStatus.SUCCESS:
+ return right
+ else:
+ return TestStatus.SUCCESS
+
+def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite) -> bool:
+ consume_non_diagnositic(lines)
+ if not lines:
+ test_suite.status = TestStatus.TEST_CRASHED
+ return False
+ line = lines[0]
+ match = OK_NOT_OK_MODULE.match(line)
+ if match:
+ lines.pop(0)
+ if match.group(1) == 'ok':
+ test_suite.status = TestStatus.SUCCESS
+ else:
+ test_suite.status = TestStatus.FAILURE
+ return True
+ else:
+ return False
+
+def bubble_up_errors(to_status, status_container_list) -> TestStatus:
+ status_list = map(to_status, status_container_list)
+ return reduce(max_status, status_list, TestStatus.SUCCESS)
+
+def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
+ max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
+ return max_status(max_test_case_status, test_suite.status)
+
+def parse_test_suite(lines: List[str]) -> TestSuite:
+ if not lines:
+ return None
+ consume_non_diagnositic(lines)
+ test_suite = TestSuite()
+ test_suite.status = TestStatus.SUCCESS
+ name = parse_subtest_header(lines)
+ if not name:
+ return None
+ test_suite.name = name
+ expected_test_case_num = parse_subtest_plan(lines)
+ if not expected_test_case_num:
+ return None
+ test_case = parse_test_case(lines, expected_test_case_num > 0)
+ expected_test_case_num -= 1
+ while test_case:
+ test_suite.cases.append(test_case)
+ test_case = parse_test_case(lines, expected_test_case_num > 0)
+ expected_test_case_num -= 1
+ if parse_ok_not_ok_test_suite(lines, test_suite):
+ test_suite.status = bubble_up_test_case_errors(test_suite)
+ return test_suite
+ elif not lines:
+ print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
+ return test_suite
+ else:
+ print('failed to parse end of suite' + lines[0])
+ return None
+
+TAP_HEADER = re.compile(r'^TAP version 14$')
+
+def parse_tap_header(lines: List[str]) -> bool:
+ consume_non_diagnositic(lines)
+ if TAP_HEADER.match(lines[0]):
+ lines.pop(0)
+ return True
+ else:
+ return False
+
+def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
+ return bubble_up_errors(lambda x: x.status, test_suite_list)
+
+def parse_test_result(lines: List[str]) -> TestResult:
+ if not lines:
+ return TestResult(TestStatus.NO_TESTS, [], lines)
+ consume_non_diagnositic(lines)
+ if not parse_tap_header(lines):
+ return None
+ test_suites = []
+ test_suite = parse_test_suite(lines)
+ while test_suite:
+ test_suites.append(test_suite)
+ test_suite = parse_test_suite(lines)
+ return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
+
+def parse_run_tests(kernel_output) -> TestResult:
+ total_tests = 0
+ failed_tests = 0
+ crashed_tests = 0
+ test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
+ for test_suite in test_result.suites:
+ if test_suite.status == TestStatus.SUCCESS:
+ print_suite_divider(green('[PASSED] ') + test_suite.name)
+ elif test_suite.status == TestStatus.TEST_CRASHED:
+ print_suite_divider(red('[CRASHED] ' + test_suite.name))
+ else:
+ print_suite_divider(red('[FAILED] ') + test_suite.name)
+ for test_case in test_suite.cases:
+ total_tests += 1
+ if test_case.status == TestStatus.SUCCESS:
+ print_with_timestamp(green('[PASSED] ') + test_case.name)
+ elif test_case.status == TestStatus.TEST_CRASHED:
+ crashed_tests += 1
+ print_with_timestamp(red('[CRASHED] ' + test_case.name))
+ print_log(map(yellow, test_case.log))
+ print_with_timestamp('')
+ else:
+ failed_tests += 1
+ print_with_timestamp(red('[FAILED] ') + test_case.name)
+ print_log(map(yellow, test_case.log))
+ print_with_timestamp('')
+ print_with_timestamp(DIVIDER)
+ fmt = green if test_result.status == TestStatus.SUCCESS else red
+ print_with_timestamp(
+ fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
+ (total_tests, failed_tests, crashed_tests)))
+ return test_result
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
new file mode 100755
index 000000000000..4a12baa0cd4e
--- /dev/null
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# A collection of tests for tools/testing/kunit/kunit.py
+#
+# Copyright (C) 2019, Google LLC.
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+import unittest
+from unittest import mock
+
+import tempfile, shutil # Handling test_tmpdir
+
+import os
+
+import kunit_config
+import kunit_parser
+import kunit_kernel
+import kunit
+
+test_tmpdir = ''
+
+def setUpModule():
+ global test_tmpdir
+ test_tmpdir = tempfile.mkdtemp()
+
+def tearDownModule():
+ shutil.rmtree(test_tmpdir)
+
+def get_absolute_path(path):
+ return os.path.join(os.path.dirname(__file__), path)
+
+class KconfigTest(unittest.TestCase):
+
+ def test_is_subset_of(self):
+ kconfig0 = kunit_config.Kconfig()
+ self.assertTrue(kconfig0.is_subset_of(kconfig0))
+
+ kconfig1 = kunit_config.Kconfig()
+ kconfig1.add_entry(kunit_config.KconfigEntry('CONFIG_TEST=y'))
+ self.assertTrue(kconfig1.is_subset_of(kconfig1))
+ self.assertTrue(kconfig0.is_subset_of(kconfig1))
+ self.assertFalse(kconfig1.is_subset_of(kconfig0))
+
+ def test_read_from_file(self):
+ kconfig = kunit_config.Kconfig()
+ kconfig_path = get_absolute_path(
+ 'test_data/test_read_from_file.kconfig')
+
+ kconfig.read_from_file(kconfig_path)
+
+ expected_kconfig = kunit_config.Kconfig()
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_UML=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_MMU=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_TEST=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_EXAMPLE_TEST=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('# CONFIG_MK8 is not set'))
+
+ self.assertEqual(kconfig.entries(), expected_kconfig.entries())
+
+ def test_write_to_file(self):
+ kconfig_path = os.path.join(test_tmpdir, '.config')
+
+ expected_kconfig = kunit_config.Kconfig()
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_UML=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_MMU=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_TEST=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('CONFIG_EXAMPLE_TEST=y'))
+ expected_kconfig.add_entry(
+ kunit_config.KconfigEntry('# CONFIG_MK8 is not set'))
+
+ expected_kconfig.write_to_file(kconfig_path)
+
+ actual_kconfig = kunit_config.Kconfig()
+ actual_kconfig.read_from_file(kconfig_path)
+
+ self.assertEqual(actual_kconfig.entries(),
+ expected_kconfig.entries())
+
+class KUnitParserTest(unittest.TestCase):
+
+ def assertContains(self, needle, haystack):
+ for line in haystack:
+ if needle in line:
+ return
+ raise AssertionError('"' +
+ str(needle) + '" not found in "' + str(haystack) + '"!')
+
+ def test_output_isolated_correctly(self):
+ log_path = get_absolute_path(
+ 'test_data/test_output_isolated_correctly.log')
+ file = open(log_path)
+ result = kunit_parser.isolate_kunit_output(file.readlines())
+ self.assertContains('TAP version 14\n', result)
+ self.assertContains(' # Subtest: example', result)
+ self.assertContains(' 1..2', result)
+ self.assertContains(' ok 1 - example_simple_test', result)
+ self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('ok 1 - example', result)
+ file.close()
+
+ def test_parse_successful_test_log(self):
+ all_passed_log = get_absolute_path(
+ 'test_data/test_is_test_passed-all_passed.log')
+ file = open(all_passed_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ file.close()
+
+ def test_parse_failed_test_log(self):
+ failed_log = get_absolute_path(
+ 'test_data/test_is_test_passed-failure.log')
+ file = open(failed_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.FAILURE,
+ result.status)
+ file.close()
+
+ def test_no_tests(self):
+ empty_log = get_absolute_path(
+ 'test_data/test_is_test_passed-no_tests_run.log')
+ file = open(empty_log)
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.isolate_kunit_output(file.readlines()))
+ self.assertEqual(0, len(result.suites))
+ self.assertEqual(
+ kunit_parser.TestStatus.NO_TESTS,
+ result.status)
+ file.close()
+
+ def test_crashed_test(self):
+ crashed_log = get_absolute_path(
+ 'test_data/test_is_test_passed-crash.log')
+ file = open(crashed_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.TEST_CRASHED,
+ result.status)
+ file.close()
+
+class StrContains(str):
+ def __eq__(self, other):
+ return self in other
+
+class KUnitMainTest(unittest.TestCase):
+ def setUp(self):
+ path = get_absolute_path('test_data/test_is_test_passed-all_passed.log')
+ file = open(path)
+ all_passed_log = file.readlines()
+ self.print_patch = mock.patch('builtins.print')
+ self.print_mock = self.print_patch.start()
+ self.linux_source_mock = mock.Mock()
+ self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
+ self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True)
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
+
+ def tearDown(self):
+ self.print_patch.stop()
+ pass
+
+ def test_run_passes_args_pass(self):
+ kunit.main(['run'], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+ assert self.linux_source_mock.run_kernel.call_count == 1
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_passes_args_fail(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['run'], self.linux_source_mock)
+ assert type(e.exception) == SystemExit
+ assert e.exception.code == 1
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+ assert self.linux_source_mock.run_kernel.call_count == 1
+ self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+
+ def test_run_raw_output(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output'], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+ assert self.linux_source_mock.run_kernel.call_count == 1
+ for kall in self.print_mock.call_args_list:
+ assert kall != mock.call(StrContains('Testing complete.'))
+ assert kall != mock.call(StrContains(' 0 tests run'))
+
+ def test_run_timeout(self):
+ timeout = 3453
+ kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+ self.linux_source_mock.run_kernel.assert_called_once_with(timeout=timeout)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed.log
new file mode 100644
index 000000000000..62ebc0288355
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed.log
@@ -0,0 +1,32 @@
+TAP version 14
+ # Subtest: sysctl_test
+ 1..8
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log
new file mode 100644
index 000000000000..0b249870c8be
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-crash.log
@@ -0,0 +1,69 @@
+printk: console [tty0] enabled
+printk: console [mc-1] enabled
+TAP version 14
+ # Subtest: sysctl_test
+ 1..8
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+Stack:
+ 6016f7db 6f81bd30 6f81bdd0 60021450
+ 6024b0e8 60021440 60018bbe 16f81bdc0
+ 00000001 6f81bd30 6f81bd20 6f81bdd0
+Call Trace:
+ [<6016f7db>] ? kunit_try_run_case+0xab/0xf0
+ [<60021450>] ? set_signals+0x0/0x60
+ [<60021440>] ? get_signals+0x0/0x10
+ [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0
+ [<60021450>] ? set_signals+0x0/0x60
+ [<60021440>] ? get_signals+0x0/0x10
+ [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0
+ [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0
+ [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0
+ [<600189e0>] ? kunit_um_throw+0x0/0x180
+ [<6016f730>] ? kunit_try_run_case+0x0/0xf0
+ [<6016f600>] ? kunit_catch_run_case+0x0/0x130
+ [<6016edd0>] ? kunit_vprintk+0x0/0x30
+ [<6016ece0>] ? kunit_fail+0x0/0x40
+ [<6016eca0>] ? kunit_abort+0x0/0x40
+ [<6016ed20>] ? kunit_printk_emit+0x0/0xb0
+ [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0
+ [<6016f46e>] ? kunit_run_tests+0xce/0x260
+ [<6005b390>] ? unregister_console+0x0/0x190
+ [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20
+ [<60001cbb>] ? do_one_initcall+0x0/0x197
+ [<60001d47>] ? do_one_initcall+0x8c/0x197
+ [<6005cd20>] ? irq_to_desc+0x0/0x30
+ [<60002005>] ? kernel_init_freeable+0x1b3/0x272
+ [<6005c5ec>] ? printk+0x0/0x9b
+ [<601c0086>] ? kernel_init+0x26/0x160
+ [<60014442>] ? new_thread_handler+0x82/0xc0
+
+ # example_simple_test: kunit test case crashed!
+ # example_simple_test: example_simple_test failed
+ not ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: one or more tests failed
+not ok 2 - example
+List of all partitions:
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-failure.log b/tools/testing/kunit/test_data/test_is_test_passed-failure.log
new file mode 100644
index 000000000000..9e89d32d5667
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-failure.log
@@ -0,0 +1,36 @@
+TAP version 14
+ # Subtest: sysctl_test
+ 1..8
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: EXPECTATION FAILED at lib/kunit/example-test.c:30
+ Expected 1 + 1 == 3, but
+ 1 + 1 == 2
+ 3 == 3
+ # example_simple_test: example_simple_test failed
+ not ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: one or more tests failed
+not ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log
new file mode 100644
index 000000000000..ba69f5c94b75
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log
@@ -0,0 +1,75 @@
+Core dump limits :
+ soft - 0
+ hard - NONE
+Checking environment variables for a tempdir...none found
+Checking if /dev/shm is on tmpfs...OK
+Checking PROT_EXEC mmap in /dev/shm...OK
+Adding 24743936 bytes to physical memory to account for exec-shield gap
+Linux version 4.12.0-rc3-00010-g7319eb35f493-dirty (brendanhiggins@mactruck.svl.corp.google.com) (gcc version 7.3.0 (Debian 7.3.0-5) ) #29 Thu Mar 15 14:57:19 PDT 2018
+Built 1 zonelists in Zone order, mobility grouping on. Total pages: 14038
+Kernel command line: root=98:0
+PID hash table entries: 256 (order: -1, 2048 bytes)
+Dentry cache hash table entries: 8192 (order: 4, 65536 bytes)
+Inode-cache hash table entries: 4096 (order: 3, 32768 bytes)
+Memory: 27868K/56932K available (1681K kernel code, 480K rwdata, 400K rodata, 89K init, 205K bss, 29064K reserved, 0K cma-reserved)
+SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1
+NR_IRQS:15
+clocksource: timer: mask: 0xffffffffffffffff max_cycles: 0x1cd42e205, max_idle_ns: 881590404426 ns
+Calibrating delay loop... 7384.26 BogoMIPS (lpj=36921344)
+pid_max: default: 32768 minimum: 301
+Mount-cache hash table entries: 512 (order: 0, 4096 bytes)
+Mountpoint-cache hash table entries: 512 (order: 0, 4096 bytes)
+Checking that host ptys support output SIGIO...Yes
+Checking that host ptys support SIGIO on close...No, enabling workaround
+Using 2.6 host AIO
+clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 19112604462750000 ns
+futex hash table entries: 256 (order: 0, 6144 bytes)
+clocksource: Switched to clocksource timer
+console [stderr0] disabled
+mconsole (version 2) initialized on /usr/local/google/home/brendanhiggins/.uml/6Ijecl/mconsole
+Checking host MADV_REMOVE support...OK
+workingset: timestamp_bits=62 max_order=13 bucket_order=0
+Block layer SCSI generic (bsg) driver version 0.4 loaded (major 254)
+io scheduler noop registered
+io scheduler deadline registered
+io scheduler cfq registered (default)
+io scheduler mq-deadline registered
+io scheduler kyber registered
+Initialized stdio console driver
+Using a channel type which is configured out of UML
+setup_one_line failed for device 1 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 2 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 3 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 4 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 5 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 6 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 7 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 8 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 9 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 10 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 11 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 12 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 13 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 14 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 15 : Configuration failed
+Console initialized on /dev/tty0
+console [tty0] enabled
+console [mc-1] enabled
+List of all partitions:
+No filesystem could mount root, tried:
+
+Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
diff --git a/tools/testing/kunit/test_data/test_output_isolated_correctly.log b/tools/testing/kunit/test_data/test_output_isolated_correctly.log
new file mode 100644
index 000000000000..94a6b3aeaa92
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_output_isolated_correctly.log
@@ -0,0 +1,106 @@
+Linux version 5.1.0-rc7-00061-g04652f1cb4aa0 (brendanhiggins@mactruck.svl.corp.google.com) (gcc version 7.3.0 (Debian 7.3.0-18)) #163 Wed May 8 16:18:20 PDT 2019
+Built 1 zonelists, mobility grouping on. Total pages: 69906
+Kernel command line: mem=256M root=98:0
+Dentry cache hash table entries: 65536 (order: 7, 524288 bytes)
+Inode-cache hash table entries: 32768 (order: 6, 262144 bytes)
+Memory: 254468K/283500K available (1734K kernel code, 489K rwdata, 396K rodata, 85K init, 216K bss, 29032K reserved, 0K cma-reserved)
+SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1
+NR_IRQS: 15
+clocksource: timer: mask: 0xffffffffffffffff max_cycles: 0x1cd42e205, max_idle_ns: 881590404426 ns
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 0 at kernel/time/clockevents.c:458 clockevents_register_device+0x143/0x160
+posix-timer cpumask == cpu_all_mask, using cpu_possible_mask instead
+CPU: 0 PID: 0 Comm: swapper Not tainted 5.1.0-rc7-00061-g04652f1cb4aa0 #163
+Stack:
+ 6005cc00 60233e18 60233e60 60233e18
+ 60233e60 00000009 00000000 6002a1b4
+ 1ca00000000 60071c23 60233e78 100000000000062
+Call Trace:
+ [<600214c5>] ? os_is_signal_stack+0x15/0x30
+ [<6005c5ec>] ? printk+0x0/0x9b
+ [<6001597e>] ? show_stack+0xbe/0x1c0
+ [<6005cc00>] ? __printk_safe_exit+0x0/0x40
+ [<6002a1b4>] ? __warn+0x144/0x170
+ [<60071c23>] ? clockevents_register_device+0x143/0x160
+ [<60021440>] ? get_signals+0x0/0x10
+ [<6005c5ec>] ? printk+0x0/0x9b
+ [<6002a27b>] ? warn_slowpath_fmt+0x9b/0xb0
+ [<6005c5ec>] ? printk+0x0/0x9b
+ [<6002a1e0>] ? warn_slowpath_fmt+0x0/0xb0
+ [<6005c5ec>] ? printk+0x0/0x9b
+ [<60021440>] ? get_signals+0x0/0x10
+ [<600213f0>] ? block_signals+0x0/0x20
+ [<60071c23>] ? clockevents_register_device+0x143/0x160
+ [<60021440>] ? get_signals+0x0/0x10
+ [<600213f0>] ? block_signals+0x0/0x20
+ [<6005c5ec>] ? printk+0x0/0x9b
+ [<60001bc8>] ? start_kernel+0x477/0x56a
+ [<600036f1>] ? start_kernel_proc+0x46/0x4d
+ [<60014442>] ? new_thread_handler+0x82/0xc0
+
+random: get_random_bytes called from print_oops_end_marker+0x4c/0x60 with crng_init=0
+---[ end trace c83434852b3702d3 ]---
+Calibrating delay loop... 6958.28 BogoMIPS (lpj=34791424)
+pid_max: default: 32768 minimum: 301
+Mount-cache hash table entries: 1024 (order: 1, 8192 bytes)
+Mountpoint-cache hash table entries: 1024 (order: 1, 8192 bytes)
+*** VALIDATE proc ***
+Checking that host ptys support output SIGIO...Yes
+Checking that host ptys support SIGIO on close...No, enabling workaround
+clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 19112604462750000 ns
+futex hash table entries: 256 (order: 0, 6144 bytes)
+clocksource: Switched to clocksource timer
+printk: console [stderr0] disabled
+mconsole (version 2) initialized on /usr/local/google/home/brendanhiggins/.uml/VZ2qMm/mconsole
+Checking host MADV_REMOVE support...OK
+workingset: timestamp_bits=62 max_order=16 bucket_order=0
+Block layer SCSI generic (bsg) driver version 0.4 loaded (major 254)
+io scheduler mq-deadline registered
+io scheduler kyber registered
+Initialized stdio console driver
+Using a channel type which is configured out of UML
+setup_one_line failed for device 1 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 2 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 3 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 4 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 5 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 6 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 7 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 8 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 9 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 10 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 11 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 12 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 13 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 14 : Configuration failed
+Using a channel type which is configured out of UML
+setup_one_line failed for device 15 : Configuration failed
+Console initialized on /dev/tty0
+printk: console [tty0] enabled
+printk: console [mc-1] enabled
+TAP version 14
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 1 - example
+List of all partitions:
diff --git a/tools/testing/kunit/test_data/test_read_from_file.kconfig b/tools/testing/kunit/test_data/test_read_from_file.kconfig
new file mode 100644
index 000000000000..d2a4928ac773
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_read_from_file.kconfig
@@ -0,0 +1,17 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# User Mode Linux/x86 4.12.0-rc3 Kernel Configuration
+#
+CONFIG_UML=y
+CONFIG_MMU=y
+
+#
+# UML-specific options
+#
+
+#
+# Host processor type and features
+#
+# CONFIG_MK8 is not set
+CONFIG_TEST=y
+CONFIG_EXAMPLE_TEST=y
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 4cdbae6f4e61..0fb95f25944d 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
TARGETS = android
+TARGETS += arm64
TARGETS += bpf
TARGETS += breakpoints
TARGETS += capabilities
TARGETS += cgroup
+TARGETS += clone3
TARGETS += cpufreq
TARGETS += cpu-hotplug
TARGETS += drivers/dma-buf
@@ -86,10 +88,10 @@ override LDFLAGS =
endif
ifneq ($(O),)
- BUILD := $(O)
+ BUILD := $(abs_objtree)
else
ifneq ($(KBUILD_OUTPUT),)
- BUILD := $(KBUILD_OUTPUT)/kselftest
+ BUILD := $(abs_objtree)/kselftest
else
BUILD := $(shell pwd)
DEFAULT_INSTALL_HDR_PATH := 1
@@ -102,6 +104,7 @@ include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
export KSFT_KHDR_INSTALL_DONE := 1
export BUILD
+#$(info abd_objtree = $(abs_objtree) BUILD = $(BUILD))
# build and run gpio when output directory is the src dir.
# gpio has dependency on tools/gpio and builds tools/gpio
@@ -190,6 +193,7 @@ install: all
ifdef INSTALL_PATH
@# Ask all targets to install their files
mkdir -p $(INSTALL_PATH)/kselftest
+ install -m 744 kselftest/module.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
@for TARGET in $(TARGETS); do \
@@ -213,7 +217,7 @@ ifdef INSTALL_PATH
@# included in the generated runlist.
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
+ [ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
echo "cd $$TARGET" >> $(ALL_SCRIPT); \
echo -n "run_many" >> $(ALL_SCRIPT); \
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile
index f9f79fb272f0..93b567d23c8b 100644
--- a/tools/testing/selftests/arm64/Makefile
+++ b/tools/testing/selftests/arm64/Makefile
@@ -1,12 +1,66 @@
# SPDX-License-Identifier: GPL-2.0
-# ARCH can be overridden by the user for cross compiling
+# When ARCH not overridden for crosscompiling, lookup machine
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
ifneq (,$(filter $(ARCH),aarch64 arm64))
-CFLAGS += -I../../../../usr/include/
-TEST_GEN_PROGS := tags_test
-TEST_PROGS := run_tags_test.sh
+ARM64_SUBTARGETS ?= tags signal
+else
+ARM64_SUBTARGETS :=
endif
-include ../lib.mk
+CFLAGS := -Wall -O2 -g
+
+# A proper top_srcdir is needed by KSFT(lib.mk)
+top_srcdir = $(realpath ../../../../)
+
+# Additional include paths needed by kselftest.h and local headers
+CFLAGS += -I$(top_srcdir)/tools/testing/selftests/
+
+# Guessing where the Kernel headers could have been installed
+# depending on ENV config
+ifeq ($(KBUILD_OUTPUT),)
+khdr_dir = $(top_srcdir)/usr/include
+else
+# the KSFT preferred location when KBUILD_OUTPUT is set
+khdr_dir = $(KBUILD_OUTPUT)/kselftest/usr/include
+endif
+
+CFLAGS += -I$(khdr_dir)
+
+export CFLAGS
+export top_srcdir
+
+all:
+ @for DIR in $(ARM64_SUBTARGETS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ mkdir -p $$BUILD_TARGET; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@; \
+ done
+
+install: all
+ @for DIR in $(ARM64_SUBTARGETS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@; \
+ done
+
+run_tests: all
+ @for DIR in $(ARM64_SUBTARGETS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@; \
+ done
+
+# Avoid any output on non arm64 on emit_tests
+emit_tests: all
+ @for DIR in $(ARM64_SUBTARGETS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@; \
+ done
+
+clean:
+ @for DIR in $(ARM64_SUBTARGETS); do \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
+ make OUTPUT=$$BUILD_TARGET -C $$DIR $@; \
+ done
+
+.PHONY: all clean install run_tests emit_tests
diff --git a/tools/testing/selftests/arm64/README b/tools/testing/selftests/arm64/README
new file mode 100644
index 000000000000..a1badd882102
--- /dev/null
+++ b/tools/testing/selftests/arm64/README
@@ -0,0 +1,25 @@
+KSelfTest ARM64
+===============
+
+- These tests are arm64 specific and so not built or run but just skipped
+ completely when env-variable ARCH is found to be different than 'arm64'
+ and `uname -m` reports other than 'aarch64'.
+
+- Holding true the above, ARM64 KSFT tests can be run within the KSelfTest
+ framework using standard Linux top-level-makefile targets:
+
+ $ make TARGETS=arm64 kselftest-clean
+ $ make TARGETS=arm64 kselftest
+
+ or
+
+ $ make -C tools/testing/selftests TARGETS=arm64 \
+ INSTALL_PATH=<your-installation-path> install
+
+ or, alternatively, only specific arm64/ subtargets can be picked:
+
+ $ make -C tools/testing/selftests TARGETS=arm64 ARM64_SUBTARGETS="tags signal" \
+ INSTALL_PATH=<your-installation-path> install
+
+ Further details on building and running KFST can be found in:
+ Documentation/dev-tools/kselftest.rst
diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore
new file mode 100644
index 000000000000..3c5b4e8ff894
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/.gitignore
@@ -0,0 +1,3 @@
+mangle_*
+fake_sigreturn_*
+!*.[ch]
diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile
new file mode 100644
index 000000000000..b497cfea4643
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2019 ARM Limited
+
+# Additional include paths needed by kselftest.h and local headers
+CFLAGS += -D_GNU_SOURCE -std=gnu99 -I.
+
+SRCS := $(filter-out testcases/testcases.c,$(wildcard testcases/*.c))
+PROGS := $(patsubst %.c,%,$(SRCS))
+
+# Generated binaries to be installed by top KSFT script
+TEST_GEN_PROGS := $(notdir $(PROGS))
+
+# Get Kernel headers installed and use them.
+KSFT_KHDR_INSTALL := 1
+
+# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
+# to account for any OUTPUT target-dirs optionally provided by
+# the toplevel makefile
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): $(PROGS)
+ cp $(PROGS) $(OUTPUT)/
+
+clean:
+ $(CLEAN)
+ rm -f $(PROGS)
+
+# Common test-unit targets to build common-layout test-cases executables
+# Needs secondary expansion to properly include the testcase c-file in pre-reqs
+.SECONDEXPANSION:
+$(PROGS): test_signals.c test_signals_utils.c testcases/testcases.c signals.S $$@.c test_signals.h test_signals_utils.h testcases/testcases.h
+ $(CC) $(CFLAGS) $^ -o $@
diff --git a/tools/testing/selftests/arm64/signal/README b/tools/testing/selftests/arm64/signal/README
new file mode 100644
index 000000000000..967a531b245c
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/README
@@ -0,0 +1,59 @@
+KSelfTest arm64/signal/
+=======================
+
+Signals Tests
++++++++++++++
+
+- Tests are built around a common main compilation unit: such shared main
+ enforces a standard sequence of operations needed to perform a single
+ signal-test (setup/trigger/run/result/cleanup)
+
+- The above mentioned ops are configurable on a test-by-test basis: each test
+ is described (and configured) using the descriptor signals.h::struct tdescr
+
+- Each signal testcase is compiled into its own executable: a separate
+ executable is used for each test since many tests complete successfully
+ by receiving some kind of fatal signal from the Kernel, so it's safer
+ to run each test unit in its own standalone process, so as to start each
+ test from a clean slate.
+
+- New tests can be simply defined in testcases/ dir providing a proper struct
+ tdescr overriding all the defaults we wish to change (as of now providing a
+ custom run method is mandatory though)
+
+- Signals' test-cases hereafter defined belong currently to two
+ principal families:
+
+ - 'mangle_' tests: a real signal (SIGUSR1) is raised and used as a trigger
+ and then the test case code modifies the signal frame from inside the
+ signal handler itself.
+
+ - 'fake_sigreturn_' tests: a brand new custom artificial sigframe structure
+ is placed on the stack and a sigreturn syscall is called to simulate a
+ real signal return. This kind of tests does not use a trigger usually and
+ they are just fired using some simple included assembly trampoline code.
+
+ - Most of these tests are successfully passing if the process gets killed by
+ some fatal signal: usually SIGSEGV or SIGBUS. Since while writing this
+ kind of tests it is extremely easy in fact to end-up injecting other
+ unrelated SEGV bugs in the testcases, it becomes extremely tricky to
+ be really sure that the tests are really addressing what they are meant
+ to address and they are not instead falling apart due to unplanned bugs
+ in the test code.
+ In order to alleviate the misery of the life of such test-developer, a few
+ helpers are provided:
+
+ - a couple of ASSERT_BAD/GOOD_CONTEXT() macros to easily parse a ucontext_t
+ and verify if it is indeed GOOD or BAD (depending on what we were
+ expecting), using the same logic/perspective as in the arm64 Kernel signals
+ routines.
+
+ - a sanity mechanism to be used in 'fake_sigreturn_'-alike tests: enabled by
+ default it takes care to verify that the test-execution had at least
+ successfully progressed up to the stage of triggering the fake sigreturn
+ call.
+
+ In both cases test results are expected in terms of:
+ - some fatal signal sent by the Kernel to the test process
+ or
+ - analyzing some final regs state
diff --git a/tools/testing/selftests/arm64/signal/signals.S b/tools/testing/selftests/arm64/signal/signals.S
new file mode 100644
index 000000000000..9f8c1aefc3b9
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/signals.S
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 ARM Limited */
+
+#include <asm/unistd.h>
+
+.section .rodata, "a"
+call_fmt:
+ .asciz "Calling sigreturn with fake sigframe sized:%zd at SP @%08lX\n"
+
+.text
+
+.globl fake_sigreturn
+
+/* fake_sigreturn x0:&sigframe, x1:sigframe_size, x2:misalign_bytes */
+fake_sigreturn:
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ mov x20, x0
+ mov x21, x1
+ mov x22, x2
+
+ /* create space on the stack for fake sigframe 16 bytes-aligned */
+ add x0, x21, x22
+ add x0, x0, #15
+ bic x0, x0, #15 /* round_up(sigframe_size + misalign_bytes, 16) */
+ sub sp, sp, x0
+ add x23, sp, x22 /* new sigframe base with misaligment if any */
+
+ ldr x0, =call_fmt
+ mov x1, x21
+ mov x2, x23
+ bl printf
+
+ /* memcpy the provided content, while still keeping SP aligned */
+ mov x0, x23
+ mov x1, x20
+ mov x2, x21
+ bl memcpy
+
+ /*
+ * Here saving a last minute SP to current->token acts as a marker:
+ * if we got here, we are successfully faking a sigreturn; in other
+ * words we are sure no bad fatal signal has been raised till now
+ * for unrelated reasons, so we should consider the possibly observed
+ * fatal signal like SEGV coming from Kernel restore_sigframe() and
+ * triggered as expected from our test-case.
+ * For simplicity this assumes that current field 'token' is laid out
+ * as first in struct tdescr
+ */
+ ldr x0, current
+ str x23, [x0]
+ /* finally move SP to misaligned address...if any requested */
+ mov sp, x23
+
+ mov x8, #__NR_rt_sigreturn
+ svc #0
+
+ /*
+ * Above sigreturn should not return...looping here leads to a timeout
+ * and ensure proper and clean test failure, instead of jumping around
+ * on a potentially corrupted stack.
+ */
+ b .
diff --git a/tools/testing/selftests/arm64/signal/test_signals.c b/tools/testing/selftests/arm64/signal/test_signals.c
new file mode 100644
index 000000000000..416b1ff43199
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/test_signals.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Generic test wrapper for arm64 signal tests.
+ *
+ * Each test provides its own tde struct tdescr descriptor to link with
+ * this wrapper. Framework provides common helpers.
+ */
+#include <kselftest.h>
+
+#include "test_signals.h"
+#include "test_signals_utils.h"
+
+struct tdescr *current;
+
+int main(int argc, char *argv[])
+{
+ current = &tde;
+
+ ksft_print_msg("%s :: %s\n", current->name, current->descr);
+ if (test_setup(current) && test_init(current)) {
+ test_run(current);
+ test_cleanup(current);
+ }
+ test_result(current);
+
+ return current->result;
+}
diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h
new file mode 100644
index 000000000000..f96baf1cef1a
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/test_signals.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 ARM Limited */
+
+#ifndef __TEST_SIGNALS_H__
+#define __TEST_SIGNALS_H__
+
+#include <signal.h>
+#include <stdbool.h>
+#include <ucontext.h>
+
+/*
+ * Using ARCH specific and sanitized Kernel headers installed by KSFT
+ * framework since we asked for it by setting flag KSFT_KHDR_INSTALL
+ * in our Makefile.
+ */
+#include <asm/ptrace.h>
+#include <asm/hwcap.h>
+
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
+
+#define get_regval(regname, out) \
+{ \
+ asm volatile("mrs %0, " __stringify(regname) \
+ : "=r" (out) \
+ : \
+ : "memory"); \
+}
+
+/*
+ * Feature flags used in tdescr.feats_required to specify
+ * any feature by the test
+ */
+enum {
+ FSSBS_BIT,
+ FMAX_END
+};
+
+#define FEAT_SSBS (1UL << FSSBS_BIT)
+
+/*
+ * A descriptor used to describe and configure a test case.
+ * Fields with a non-trivial meaning are described inline in the following.
+ */
+struct tdescr {
+ /* KEEP THIS FIELD FIRST for easier lookup from assembly */
+ void *token;
+ /* when disabled token based sanity checking is skipped in handler */
+ bool sanity_disabled;
+ /* just a name for the test-case; manadatory field */
+ char *name;
+ char *descr;
+ unsigned long feats_required;
+ /* bitmask of effectively supported feats: populated at run-time */
+ unsigned long feats_supported;
+ bool initialized;
+ unsigned int minsigstksz;
+ /* signum used as a test trigger. Zero if no trigger-signal is used */
+ int sig_trig;
+ /*
+ * signum considered as a successful test completion.
+ * Zero when no signal is expected on success
+ */
+ int sig_ok;
+ /* signum expected on unsupported CPU features. */
+ int sig_unsupp;
+ /* a timeout in second for test completion */
+ unsigned int timeout;
+ bool triggered;
+ bool pass;
+ unsigned int result;
+ /* optional sa_flags for the installed handler */
+ int sa_flags;
+ ucontext_t saved_uc;
+ /* used by get_current_ctx() */
+ size_t live_sz;
+ ucontext_t *live_uc;
+ volatile sig_atomic_t live_uc_valid;
+ /* optional test private data */
+ void *priv;
+
+ /* a custom setup: called alternatively to default_setup */
+ int (*setup)(struct tdescr *td);
+ /* a custom init: called by default test init after test_setup */
+ bool (*init)(struct tdescr *td);
+ /* a custom cleanup function called before test exits */
+ void (*cleanup)(struct tdescr *td);
+ /* an optional function to be used as a trigger for starting test */
+ int (*trigger)(struct tdescr *td);
+ /*
+ * the actual test-core: invoked differently depending on the
+ * presence of the trigger function above; this is mandatory
+ */
+ int (*run)(struct tdescr *td, siginfo_t *si, ucontext_t *uc);
+ /* an optional function for custom results' processing */
+ void (*check_result)(struct tdescr *td);
+};
+
+extern struct tdescr tde;
+#endif
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
new file mode 100644
index 000000000000..2de6e5ed5e25
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 ARM Limited */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/auxv.h>
+#include <linux/auxvec.h>
+#include <ucontext.h>
+
+#include <asm/unistd.h>
+
+#include <kselftest.h>
+
+#include "test_signals.h"
+#include "test_signals_utils.h"
+#include "testcases/testcases.h"
+
+
+extern struct tdescr *current;
+
+static int sig_copyctx = SIGTRAP;
+
+static char const *const feats_names[FMAX_END] = {
+ " SSBS ",
+};
+
+#define MAX_FEATS_SZ 128
+static char feats_string[MAX_FEATS_SZ];
+
+static inline char *feats_to_string(unsigned long feats)
+{
+ size_t flen = MAX_FEATS_SZ - 1;
+
+ for (int i = 0; i < FMAX_END; i++) {
+ if (feats & (1UL << i)) {
+ size_t tlen = strlen(feats_names[i]);
+
+ assert(flen > tlen);
+ flen -= tlen;
+ strncat(feats_string, feats_names[i], flen);
+ }
+ }
+
+ return feats_string;
+}
+
+static void unblock_signal(int signum)
+{
+ sigset_t sset;
+
+ sigemptyset(&sset);
+ sigaddset(&sset, signum);
+ sigprocmask(SIG_UNBLOCK, &sset, NULL);
+}
+
+static void default_result(struct tdescr *td, bool force_exit)
+{
+ if (td->result == KSFT_SKIP) {
+ fprintf(stderr, "==>> completed. SKIP.\n");
+ } else if (td->pass) {
+ fprintf(stderr, "==>> completed. PASS(1)\n");
+ td->result = KSFT_PASS;
+ } else {
+ fprintf(stdout, "==>> completed. FAIL(0)\n");
+ td->result = KSFT_FAIL;
+ }
+
+ if (force_exit)
+ exit(td->result);
+}
+
+/*
+ * The following handle_signal_* helpers are used by main default_handler
+ * and are meant to return true when signal is handled successfully:
+ * when false is returned instead, it means that the signal was somehow
+ * unexpected in that context and it was NOT handled; default_handler will
+ * take care of such unexpected situations.
+ */
+
+static bool handle_signal_unsupported(struct tdescr *td,
+ siginfo_t *si, void *uc)
+{
+ if (feats_ok(td))
+ return false;
+
+ /* Mangling PC to avoid loops on original SIGILL */
+ ((ucontext_t *)uc)->uc_mcontext.pc += 4;
+
+ if (!td->initialized) {
+ fprintf(stderr,
+ "Got SIG_UNSUPP @test_init. Ignore.\n");
+ } else {
+ fprintf(stderr,
+ "-- RX SIG_UNSUPP on unsupported feat...OK\n");
+ td->pass = 1;
+ default_result(current, 1);
+ }
+
+ return true;
+}
+
+static bool handle_signal_trigger(struct tdescr *td,
+ siginfo_t *si, void *uc)
+{
+ td->triggered = 1;
+ /* ->run was asserted NON-NULL in test_setup() already */
+ td->run(td, si, uc);
+
+ return true;
+}
+
+static bool handle_signal_ok(struct tdescr *td,
+ siginfo_t *si, void *uc)
+{
+ /*
+ * it's a bug in the test code when this assert fail:
+ * if sig_trig was defined, it must have been used before getting here.
+ */
+ assert(!td->sig_trig || td->triggered);
+ fprintf(stderr,
+ "SIG_OK -- SP:0x%llX si_addr@:%p si_code:%d token@:%p offset:%ld\n",
+ ((ucontext_t *)uc)->uc_mcontext.sp,
+ si->si_addr, si->si_code, td->token, td->token - si->si_addr);
+ /*
+ * fake_sigreturn tests, which have sanity_enabled=1, set, at the very
+ * last time, the token field to the SP address used to place the fake
+ * sigframe: so token==0 means we never made it to the end,
+ * segfaulting well-before, and the test is possibly broken.
+ */
+ if (!td->sanity_disabled && !td->token) {
+ fprintf(stdout,
+ "current->token ZEROED...test is probably broken!\n");
+ abort();
+ }
+ /*
+ * Trying to narrow down the SEGV to the ones generated by Kernel itself
+ * via arm64_notify_segfault(). This is a best-effort check anyway, and
+ * the si_code check may need to change if this aspect of the kernel
+ * ABI changes.
+ */
+ if (td->sig_ok == SIGSEGV && si->si_code != SEGV_ACCERR) {
+ fprintf(stdout,
+ "si_code != SEGV_ACCERR...test is probably broken!\n");
+ abort();
+ }
+ td->pass = 1;
+ /*
+ * Some tests can lead to SEGV loops: in such a case we want to
+ * terminate immediately exiting straight away; some others are not
+ * supposed to outlive the signal handler code, due to the content of
+ * the fake sigframe which caused the signal itself.
+ */
+ default_result(current, 1);
+
+ return true;
+}
+
+static bool handle_signal_copyctx(struct tdescr *td,
+ siginfo_t *si, void *uc)
+{
+ /* Mangling PC to avoid loops on original BRK instr */
+ ((ucontext_t *)uc)->uc_mcontext.pc += 4;
+ memcpy(td->live_uc, uc, td->live_sz);
+ ASSERT_GOOD_CONTEXT(td->live_uc);
+ td->live_uc_valid = 1;
+ fprintf(stderr,
+ "GOOD CONTEXT grabbed from sig_copyctx handler\n");
+
+ return true;
+}
+
+static void default_handler(int signum, siginfo_t *si, void *uc)
+{
+ if (current->sig_unsupp && signum == current->sig_unsupp &&
+ handle_signal_unsupported(current, si, uc)) {
+ fprintf(stderr, "Handled SIG_UNSUPP\n");
+ } else if (current->sig_trig && signum == current->sig_trig &&
+ handle_signal_trigger(current, si, uc)) {
+ fprintf(stderr, "Handled SIG_TRIG\n");
+ } else if (current->sig_ok && signum == current->sig_ok &&
+ handle_signal_ok(current, si, uc)) {
+ fprintf(stderr, "Handled SIG_OK\n");
+ } else if (signum == sig_copyctx && current->live_uc &&
+ handle_signal_copyctx(current, si, uc)) {
+ fprintf(stderr, "Handled SIG_COPYCTX\n");
+ } else {
+ if (signum == SIGALRM && current->timeout) {
+ fprintf(stderr, "-- Timeout !\n");
+ } else {
+ fprintf(stderr,
+ "-- RX UNEXPECTED SIGNAL: %d\n", signum);
+ }
+ default_result(current, 1);
+ }
+}
+
+static int default_setup(struct tdescr *td)
+{
+ struct sigaction sa;
+
+ sa.sa_sigaction = default_handler;
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
+ sa.sa_flags |= td->sa_flags;
+ sigemptyset(&sa.sa_mask);
+ /* uncatchable signals naturally skipped ... */
+ for (int sig = 1; sig < 32; sig++)
+ sigaction(sig, &sa, NULL);
+ /*
+ * RT Signals default disposition is Term but they cannot be
+ * generated by the Kernel in response to our tests; so just catch
+ * them all and report them as UNEXPECTED signals.
+ */
+ for (int sig = SIGRTMIN; sig <= SIGRTMAX; sig++)
+ sigaction(sig, &sa, NULL);
+
+ /* just in case...unblock explicitly all we need */
+ if (td->sig_trig)
+ unblock_signal(td->sig_trig);
+ if (td->sig_ok)
+ unblock_signal(td->sig_ok);
+ if (td->sig_unsupp)
+ unblock_signal(td->sig_unsupp);
+
+ if (td->timeout) {
+ unblock_signal(SIGALRM);
+ alarm(td->timeout);
+ }
+ fprintf(stderr, "Registered handlers for all signals.\n");
+
+ return 1;
+}
+
+static inline int default_trigger(struct tdescr *td)
+{
+ return !raise(td->sig_trig);
+}
+
+int test_init(struct tdescr *td)
+{
+ if (td->sig_trig == sig_copyctx) {
+ fprintf(stdout,
+ "Signal %d is RESERVED, cannot be used as a trigger. Aborting\n",
+ sig_copyctx);
+ return 0;
+ }
+ /* just in case */
+ unblock_signal(sig_copyctx);
+
+ td->minsigstksz = getauxval(AT_MINSIGSTKSZ);
+ if (!td->minsigstksz)
+ td->minsigstksz = MINSIGSTKSZ;
+ fprintf(stderr, "Detected MINSTKSIGSZ:%d\n", td->minsigstksz);
+
+ if (td->feats_required) {
+ td->feats_supported = 0;
+ /*
+ * Checking for CPU required features using both the
+ * auxval and the arm64 MRS Emulation to read sysregs.
+ */
+ if (getauxval(AT_HWCAP) & HWCAP_SSBS)
+ td->feats_supported |= FEAT_SSBS;
+ if (feats_ok(td))
+ fprintf(stderr,
+ "Required Features: [%s] supported\n",
+ feats_to_string(td->feats_required &
+ td->feats_supported));
+ else
+ fprintf(stderr,
+ "Required Features: [%s] NOT supported\n",
+ feats_to_string(td->feats_required &
+ ~td->feats_supported));
+ }
+
+ /* Perform test specific additional initialization */
+ if (td->init && !td->init(td)) {
+ fprintf(stderr, "FAILED Testcase initialization.\n");
+ return 0;
+ }
+ td->initialized = 1;
+ fprintf(stderr, "Testcase initialized.\n");
+
+ return 1;
+}
+
+int test_setup(struct tdescr *td)
+{
+ /* assert core invariants symptom of a rotten testcase */
+ assert(current);
+ assert(td);
+ assert(td->name);
+ assert(td->run);
+
+ /* Default result is FAIL if test setup fails */
+ td->result = KSFT_FAIL;
+ if (td->setup)
+ return td->setup(td);
+ else
+ return default_setup(td);
+}
+
+int test_run(struct tdescr *td)
+{
+ if (td->sig_trig) {
+ if (td->trigger)
+ return td->trigger(td);
+ else
+ return default_trigger(td);
+ } else {
+ return td->run(td, NULL, NULL);
+ }
+}
+
+void test_result(struct tdescr *td)
+{
+ if (td->initialized && td->result != KSFT_SKIP && td->check_result)
+ td->check_result(td);
+ default_result(td, 0);
+}
+
+void test_cleanup(struct tdescr *td)
+{
+ if (td->cleanup)
+ td->cleanup(td);
+}
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.h b/tools/testing/selftests/arm64/signal/test_signals_utils.h
new file mode 100644
index 000000000000..6772b5c8d274
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 ARM Limited */
+
+#ifndef __TEST_SIGNALS_UTILS_H__
+#define __TEST_SIGNALS_UTILS_H__
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "test_signals.h"
+
+int test_init(struct tdescr *td);
+int test_setup(struct tdescr *td);
+void test_cleanup(struct tdescr *td);
+int test_run(struct tdescr *td);
+void test_result(struct tdescr *td);
+
+static inline bool feats_ok(struct tdescr *td)
+{
+ return (td->feats_required & td->feats_supported) == td->feats_required;
+}
+
+/*
+ * Obtaining a valid and full-blown ucontext_t from userspace is tricky:
+ * libc getcontext does() not save all the regs and messes with some of
+ * them (pstate value in particular is not reliable).
+ *
+ * Here we use a service signal to grab the ucontext_t from inside a
+ * dedicated signal handler, since there, it is populated by Kernel
+ * itself in setup_sigframe(). The grabbed context is then stored and
+ * made available in td->live_uc.
+ *
+ * As service-signal is used a SIGTRAP induced by a 'brk' instruction,
+ * because here we have to avoid syscalls to trigger the signal since
+ * they would cause any SVE sigframe content (if any) to be removed.
+ *
+ * Anyway this function really serves a dual purpose:
+ *
+ * 1. grab a valid sigcontext into td->live_uc for result analysis: in
+ * such case it returns 1.
+ *
+ * 2. detect if, somehow, a previously grabbed live_uc context has been
+ * used actively with a sigreturn: in such a case the execution would have
+ * magically resumed in the middle of this function itself (seen_already==1):
+ * in such a case return 0, since in fact we have not just simply grabbed
+ * the context.
+ *
+ * This latter case is useful to detect when a fake_sigreturn test-case has
+ * unexpectedly survived without hitting a SEGV.
+ *
+ * Note that the case of runtime dynamically sized sigframes (like in SVE
+ * context) is still NOT addressed: sigframe size is supposed to be fixed
+ * at sizeof(ucontext_t).
+ */
+static __always_inline bool get_current_context(struct tdescr *td,
+ ucontext_t *dest_uc)
+{
+ static volatile bool seen_already;
+
+ assert(td && dest_uc);
+ /* it's a genuine invocation..reinit */
+ seen_already = 0;
+ td->live_uc_valid = 0;
+ td->live_sz = sizeof(*dest_uc);
+ memset(dest_uc, 0x00, td->live_sz);
+ td->live_uc = dest_uc;
+ /*
+ * Grab ucontext_t triggering a SIGTRAP.
+ *
+ * Note that:
+ * - live_uc_valid is declared volatile sig_atomic_t in
+ * struct tdescr since it will be changed inside the
+ * sig_copyctx handler
+ * - the additional 'memory' clobber is there to avoid possible
+ * compiler's assumption on live_uc_valid and the content
+ * pointed by dest_uc, which are all changed inside the signal
+ * handler
+ * - BRK causes a debug exception which is handled by the Kernel
+ * and finally causes the SIGTRAP signal to be delivered to this
+ * test thread. Since such delivery happens on the ret_to_user()
+ * /do_notify_resume() debug exception return-path, we are sure
+ * that the registered SIGTRAP handler has been run to completion
+ * before the execution path is restored here: as a consequence
+ * we can be sure that the volatile sig_atomic_t live_uc_valid
+ * carries a meaningful result. Being in a single thread context
+ * we'll also be sure that any access to memory modified by the
+ * handler (namely ucontext_t) will be visible once returned.
+ * - note that since we are using a breakpoint instruction here
+ * to cause a SIGTRAP, the ucontext_t grabbed from the signal
+ * handler would naturally contain a PC pointing exactly to this
+ * BRK line, which means that, on return from the signal handler,
+ * or if we place the ucontext_t on the stack to fake a sigreturn,
+ * we'll end up in an infinite loop of BRK-SIGTRAP-handler.
+ * For this reason we take care to artificially move forward the
+ * PC to the next instruction while inside the signal handler.
+ */
+ asm volatile ("brk #666"
+ : "+m" (*dest_uc)
+ :
+ : "memory");
+
+ /*
+ * If we get here with seen_already==1 it implies the td->live_uc
+ * context has been used to get back here....this probably means
+ * a test has failed to cause a SEGV...anyway live_uc does not
+ * point to a just acquired copy of ucontext_t...so return 0
+ */
+ if (seen_already) {
+ fprintf(stdout,
+ "Unexpected successful sigreturn detected: live_uc is stale !\n");
+ return 0;
+ }
+ seen_already = 1;
+
+ return td->live_uc_valid;
+}
+
+int fake_sigreturn(void *sigframe, size_t sz, int misalign_bytes);
+#endif
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c
new file mode 100644
index 000000000000..8dc600a7d4fd
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Place a fake sigframe on the stack including a BAD Unknown magic
+ * record: on sigreturn Kernel must spot this attempt and the test
+ * case is expected to be terminated via SEGV.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static int fake_sigreturn_bad_magic_run(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
+
+ /* just to fill the ucontext_t with something real */
+ if (!get_current_context(td, &sf.uc))
+ return 1;
+
+ /* need at least 2*HDR_SZ space: KSFT_BAD_MAGIC + terminator. */
+ head = get_starting_head(shead, HDR_SZ * 2, GET_SF_RESV_SIZE(sf), NULL);
+ if (!head)
+ return 0;
+
+ /*
+ * use a well known NON existent bad magic...something
+ * we should pretty sure won't be ever defined in Kernel
+ */
+ head->magic = KSFT_BAD_MAGIC;
+ head->size = HDR_SZ;
+ write_terminator_record(GET_RESV_NEXT_HEAD(head));
+
+ ASSERT_BAD_CONTEXT(&sf.uc);
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_BAD_MAGIC",
+ .descr = "Trigger a sigreturn with a sigframe with a bad magic",
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .run = fake_sigreturn_bad_magic_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c
new file mode 100644
index 000000000000..b3c362100666
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Place a fake sigframe on the stack including a bad record overflowing
+ * the __reserved space: on sigreturn Kernel must spot this attempt and
+ * the test case is expected to be terminated via SEGV.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+#define MIN_SZ_ALIGN 16
+
+static int fake_sigreturn_bad_size_run(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ size_t resv_sz, need_sz, offset;
+ struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
+
+ /* just to fill the ucontext_t with something real */
+ if (!get_current_context(td, &sf.uc))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+ /* at least HDR_SZ + bad sized esr_context needed */
+ need_sz = sizeof(struct esr_context) + HDR_SZ;
+ head = get_starting_head(shead, need_sz, resv_sz, &offset);
+ if (!head)
+ return 0;
+
+ /*
+ * Use an esr_context to build a fake header with a
+ * size greater then the free __reserved area minus HDR_SZ;
+ * using ESR_MAGIC here since it is not checked for size nor
+ * is limited to one instance.
+ *
+ * At first inject an additional normal esr_context
+ */
+ head->magic = ESR_MAGIC;
+ head->size = sizeof(struct esr_context);
+ /* and terminate properly */
+ write_terminator_record(GET_RESV_NEXT_HEAD(head));
+ ASSERT_GOOD_CONTEXT(&sf.uc);
+
+ /*
+ * now mess with fake esr_context size: leaving less space than
+ * needed while keeping size value 16-aligned
+ *
+ * It must trigger a SEGV from Kernel on:
+ *
+ * resv_sz - offset < sizeof(*head)
+ */
+ /* at first set the maximum good 16-aligned size */
+ head->size = (resv_sz - offset - need_sz + MIN_SZ_ALIGN) & ~0xfUL;
+ /* plus a bit more of 16-aligned sized stuff */
+ head->size += MIN_SZ_ALIGN;
+ /* and terminate properly */
+ write_terminator_record(GET_RESV_NEXT_HEAD(head));
+ ASSERT_BAD_CONTEXT(&sf.uc);
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_BAD_SIZE",
+ .descr = "Triggers a sigreturn with a overrun __reserved area",
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .run = fake_sigreturn_bad_size_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c
new file mode 100644
index 000000000000..a44b88bfc81a
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Place a fake sigframe on the stack including a badly sized terminator
+ * record: on sigreturn Kernel must spot this attempt and the test case
+ * is expected to be terminated via SEGV.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static int fake_sigreturn_bad_size_for_magic0_run(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
+
+ /* just to fill the ucontext_t with something real */
+ if (!get_current_context(td, &sf.uc))
+ return 1;
+
+ /* at least HDR_SZ for the badly sized terminator. */
+ head = get_starting_head(shead, HDR_SZ, GET_SF_RESV_SIZE(sf), NULL);
+ if (!head)
+ return 0;
+
+ head->magic = 0;
+ head->size = HDR_SZ;
+ ASSERT_BAD_CONTEXT(&sf.uc);
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_BAD_SIZE_FOR_TERMINATOR",
+ .descr = "Trigger a sigreturn using non-zero size terminator",
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .run = fake_sigreturn_bad_size_for_magic0_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c
new file mode 100644
index 000000000000..afe8915f0998
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Place a fake sigframe on the stack including an additional FPSIMD
+ * record: on sigreturn Kernel must spot this attempt and the test
+ * case is expected to be terminated via SEGV.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static int fake_sigreturn_duplicated_fpsimd_run(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
+
+ /* just to fill the ucontext_t with something real */
+ if (!get_current_context(td, &sf.uc))
+ return 1;
+
+ head = get_starting_head(shead, sizeof(struct fpsimd_context) + HDR_SZ,
+ GET_SF_RESV_SIZE(sf), NULL);
+ if (!head)
+ return 0;
+
+ /* Add a spurious fpsimd_context */
+ head->magic = FPSIMD_MAGIC;
+ head->size = sizeof(struct fpsimd_context);
+ /* and terminate */
+ write_terminator_record(GET_RESV_NEXT_HEAD(head));
+
+ ASSERT_BAD_CONTEXT(&sf.uc);
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_DUPLICATED_FPSIMD",
+ .descr = "Triggers a sigreturn including two fpsimd_context",
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .run = fake_sigreturn_duplicated_fpsimd_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c
new file mode 100644
index 000000000000..1e089e66f9f3
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Place a fake sigframe on the stack at a misaligned SP: on sigreturn
+ * Kernel must spot this attempt and the test case is expected to be
+ * terminated via SEGV.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static int fake_sigreturn_misaligned_run(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ /* just to fill the ucontext_t with something real */
+ if (!get_current_context(td, &sf.uc))
+ return 1;
+
+ /* Forcing sigframe on misaligned SP (16 + 3) */
+ fake_sigreturn(&sf, sizeof(sf), 3);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_MISALIGNED_SP",
+ .descr = "Triggers a sigreturn with a misaligned sigframe",
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .run = fake_sigreturn_misaligned_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c
new file mode 100644
index 000000000000..08ecd8073a1a
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Place a fake sigframe on the stack missing the mandatory FPSIMD
+ * record: on sigreturn Kernel must spot this attempt and the test
+ * case is expected to be terminated via SEGV.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <ucontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static int fake_sigreturn_missing_fpsimd_run(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ size_t resv_sz, offset;
+ struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
+
+ /* just to fill the ucontext_t with something real */
+ if (!get_current_context(td, &sf.uc))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+ head = get_header(head, FPSIMD_MAGIC, resv_sz, &offset);
+ if (head && resv_sz - offset >= HDR_SZ) {
+ fprintf(stderr, "Mangling template header. Spare space:%zd\n",
+ resv_sz - offset);
+ /* Just overwrite fpsmid_context */
+ write_terminator_record(head);
+
+ ASSERT_BAD_CONTEXT(&sf.uc);
+ fake_sigreturn(&sf, sizeof(sf), 0);
+ }
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_MISSING_FPSIMD",
+ .descr = "Triggers a sigreturn with a missing fpsimd_context",
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .run = fake_sigreturn_missing_fpsimd_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_compat_toggle.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_compat_toggle.c
new file mode 100644
index 000000000000..2cb118b0ba05
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_compat_toggle.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the execution state bit: this attempt must be spotted by Kernel and
+ * the test case is expected to be terminated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static int mangle_invalid_pstate_run(struct tdescr *td, siginfo_t *si,
+ ucontext_t *uc)
+{
+ ASSERT_GOOD_CONTEXT(uc);
+
+ /* This config should trigger a SIGSEGV by Kernel */
+ uc->uc_mcontext.pstate ^= PSR_MODE32_BIT;
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .sanity_disabled = true,
+ .name = "MANGLE_PSTATE_INVALID_STATE_TOGGLE",
+ .descr = "Mangling uc_mcontext with INVALID STATE_TOGGLE",
+ .sig_trig = SIGUSR1,
+ .sig_ok = SIGSEGV,
+ .run = mangle_invalid_pstate_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_daif_bits.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_daif_bits.c
new file mode 100644
index 000000000000..434b82597007
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_daif_bits.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, mangling the
+ * DAIF bits in an illegal manner: this attempt must be spotted by Kernel
+ * and the test case is expected to be terminated via SEGV.
+ *
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static int mangle_invalid_pstate_run(struct tdescr *td, siginfo_t *si,
+ ucontext_t *uc)
+{
+ ASSERT_GOOD_CONTEXT(uc);
+
+ /*
+ * This config should trigger a SIGSEGV by Kernel when it checks
+ * the sigframe consistency in valid_user_regs() routine.
+ */
+ uc->uc_mcontext.pstate |= PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT;
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .sanity_disabled = true,
+ .name = "MANGLE_PSTATE_INVALID_DAIF_BITS",
+ .descr = "Mangling uc_mcontext with INVALID DAIF_BITS",
+ .sig_trig = SIGUSR1,
+ .sig_ok = SIGSEGV,
+ .run = mangle_invalid_pstate_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1h.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1h.c
new file mode 100644
index 000000000000..95f821abdf46
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1h.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the mode bit to escalate exception level: this attempt must be spotted
+ * by Kernel and the test case is expected to be termninated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#include "mangle_pstate_invalid_mode_template.h"
+
+DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(1h);
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1t.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1t.c
new file mode 100644
index 000000000000..cc222d8a618a
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el1t.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the mode bit to escalate exception level: this attempt must be spotted
+ * by Kernel and the test case is expected to be termninated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#include "mangle_pstate_invalid_mode_template.h"
+
+DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(1t);
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2h.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2h.c
new file mode 100644
index 000000000000..2188add7d28c
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2h.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the mode bit to escalate exception level: this attempt must be spotted
+ * by Kernel and the test case is expected to be termninated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#include "mangle_pstate_invalid_mode_template.h"
+
+DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(2h);
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2t.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2t.c
new file mode 100644
index 000000000000..df32dd5a479c
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el2t.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the mode bit to escalate exception level: this attempt must be spotted
+ * by Kernel and the test case is expected to be termninated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#include "mangle_pstate_invalid_mode_template.h"
+
+DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(2t);
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3h.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3h.c
new file mode 100644
index 000000000000..9e6829b7e5db
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3h.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the mode bit to escalate exception level: this attempt must be spotted
+ * by Kernel and the test case is expected to be termninated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#include "mangle_pstate_invalid_mode_template.h"
+
+DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(3h);
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3t.c b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3t.c
new file mode 100644
index 000000000000..5685a4f10d06
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_el3t.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Try to mangle the ucontext from inside a signal handler, toggling
+ * the mode bit to escalate exception level: this attempt must be spotted
+ * by Kernel and the test case is expected to be termninated via SEGV.
+ */
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#include "mangle_pstate_invalid_mode_template.h"
+
+DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(3t);
diff --git a/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_template.h b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_template.h
new file mode 100644
index 000000000000..f5bf1804d858
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/mangle_pstate_invalid_mode_template.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 ARM Limited
+ *
+ * Utility macro to ease definition of testcases toggling mode EL
+ */
+
+#define DEFINE_TESTCASE_MANGLE_PSTATE_INVALID_MODE(_mode) \
+ \
+static int mangle_invalid_pstate_run(struct tdescr *td, siginfo_t *si, \
+ ucontext_t *uc) \
+{ \
+ ASSERT_GOOD_CONTEXT(uc); \
+ \
+ uc->uc_mcontext.pstate &= ~PSR_MODE_MASK; \
+ uc->uc_mcontext.pstate |= PSR_MODE_EL ## _mode; \
+ \
+ return 1; \
+} \
+ \
+struct tdescr tde = { \
+ .sanity_disabled = true, \
+ .name = "MANGLE_PSTATE_INVALID_MODE_EL"#_mode, \
+ .descr = "Mangling uc_mcontext INVALID MODE EL"#_mode, \
+ .sig_trig = SIGUSR1, \
+ .sig_ok = SIGSEGV, \
+ .run = mangle_invalid_pstate_run, \
+}
diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c
new file mode 100644
index 000000000000..61ebcdf63831
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 ARM Limited */
+#include "testcases.h"
+
+struct _aarch64_ctx *get_header(struct _aarch64_ctx *head, uint32_t magic,
+ size_t resv_sz, size_t *offset)
+{
+ size_t offs = 0;
+ struct _aarch64_ctx *found = NULL;
+
+ if (!head || resv_sz < HDR_SZ)
+ return found;
+
+ while (offs <= resv_sz - HDR_SZ &&
+ head->magic != magic && head->magic) {
+ offs += head->size;
+ head = GET_RESV_NEXT_HEAD(head);
+ }
+ if (head->magic == magic) {
+ found = head;
+ if (offset)
+ *offset = offs;
+ }
+
+ return found;
+}
+
+bool validate_extra_context(struct extra_context *extra, char **err)
+{
+ struct _aarch64_ctx *term;
+
+ if (!extra || !err)
+ return false;
+
+ fprintf(stderr, "Validating EXTRA...\n");
+ term = GET_RESV_NEXT_HEAD(extra);
+ if (!term || term->magic || term->size) {
+ *err = "Missing terminator after EXTRA context";
+ return false;
+ }
+ if (extra->datap & 0x0fUL)
+ *err = "Extra DATAP misaligned";
+ else if (extra->size & 0x0fUL)
+ *err = "Extra SIZE misaligned";
+ else if (extra->datap != (uint64_t)term + sizeof(*term))
+ *err = "Extra DATAP misplaced (not contiguous)";
+ if (*err)
+ return false;
+
+ return true;
+}
+
+bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
+{
+ bool terminated = false;
+ size_t offs = 0;
+ int flags = 0;
+ struct extra_context *extra = NULL;
+ struct _aarch64_ctx *head =
+ (struct _aarch64_ctx *)uc->uc_mcontext.__reserved;
+
+ if (!err)
+ return false;
+ /* Walk till the end terminator verifying __reserved contents */
+ while (head && !terminated && offs < resv_sz) {
+ if ((uint64_t)head & 0x0fUL) {
+ *err = "Misaligned HEAD";
+ return false;
+ }
+
+ switch (head->magic) {
+ case 0:
+ if (head->size)
+ *err = "Bad size for terminator";
+ else
+ terminated = true;
+ break;
+ case FPSIMD_MAGIC:
+ if (flags & FPSIMD_CTX)
+ *err = "Multiple FPSIMD_MAGIC";
+ else if (head->size !=
+ sizeof(struct fpsimd_context))
+ *err = "Bad size for fpsimd_context";
+ flags |= FPSIMD_CTX;
+ break;
+ case ESR_MAGIC:
+ if (head->size != sizeof(struct esr_context))
+ *err = "Bad size for esr_context";
+ break;
+ case SVE_MAGIC:
+ if (flags & SVE_CTX)
+ *err = "Multiple SVE_MAGIC";
+ else if (head->size !=
+ sizeof(struct sve_context))
+ *err = "Bad size for sve_context";
+ flags |= SVE_CTX;
+ break;
+ case EXTRA_MAGIC:
+ if (flags & EXTRA_CTX)
+ *err = "Multiple EXTRA_MAGIC";
+ else if (head->size !=
+ sizeof(struct extra_context))
+ *err = "Bad size for extra_context";
+ flags |= EXTRA_CTX;
+ extra = (struct extra_context *)head;
+ break;
+ case KSFT_BAD_MAGIC:
+ /*
+ * This is a BAD magic header defined
+ * artificially by a testcase and surely
+ * unknown to the Kernel parse_user_sigframe().
+ * It MUST cause a Kernel induced SEGV
+ */
+ *err = "BAD MAGIC !";
+ break;
+ default:
+ /*
+ * A still unknown Magic: potentially freshly added
+ * to the Kernel code and still unknown to the
+ * tests.
+ */
+ fprintf(stdout,
+ "SKIP Unknown MAGIC: 0x%X - Is KSFT arm64/signal up to date ?\n",
+ head->magic);
+ break;
+ }
+
+ if (*err)
+ return false;
+
+ offs += head->size;
+ if (resv_sz < offs + sizeof(*head)) {
+ *err = "HEAD Overrun";
+ return false;
+ }
+
+ if (flags & EXTRA_CTX)
+ if (!validate_extra_context(extra, err))
+ return false;
+
+ head = GET_RESV_NEXT_HEAD(head);
+ }
+
+ if (terminated && !(flags & FPSIMD_CTX)) {
+ *err = "Missing FPSIMD";
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * This function walks through the records inside the provided reserved area
+ * trying to find enough space to fit @need_sz bytes: if not enough space is
+ * available and an extra_context record is present, it throws away the
+ * extra_context record.
+ *
+ * It returns a pointer to a new header where it is possible to start storing
+ * our need_sz bytes.
+ *
+ * @shead: points to the start of reserved area
+ * @need_sz: needed bytes
+ * @resv_sz: reserved area size in bytes
+ * @offset: if not null, this will be filled with the offset of the return
+ * head pointer from @shead
+ *
+ * @return: pointer to a new head where to start storing need_sz bytes, or
+ * NULL if space could not be made available.
+ */
+struct _aarch64_ctx *get_starting_head(struct _aarch64_ctx *shead,
+ size_t need_sz, size_t resv_sz,
+ size_t *offset)
+{
+ size_t offs = 0;
+ struct _aarch64_ctx *head;
+
+ head = get_terminator(shead, resv_sz, &offs);
+ /* not found a terminator...no need to update offset if any */
+ if (!head)
+ return head;
+ if (resv_sz - offs < need_sz) {
+ fprintf(stderr, "Low on space:%zd. Discarding extra_context.\n",
+ resv_sz - offs);
+ head = get_header(shead, EXTRA_MAGIC, resv_sz, &offs);
+ if (!head || resv_sz - offs < need_sz) {
+ fprintf(stderr,
+ "Failed to reclaim space on sigframe.\n");
+ return NULL;
+ }
+ }
+
+ fprintf(stderr, "Available space:%zd\n", resv_sz - offs);
+ if (offset)
+ *offset = offs;
+ return head;
+}
diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.h b/tools/testing/selftests/arm64/signal/testcases/testcases.h
new file mode 100644
index 000000000000..ad884c135314
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/testcases.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 ARM Limited */
+#ifndef __TESTCASES_H__
+#define __TESTCASES_H__
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <signal.h>
+
+/* Architecture specific sigframe definitions */
+#include <asm/sigcontext.h>
+
+#define FPSIMD_CTX (1 << 0)
+#define SVE_CTX (1 << 1)
+#define EXTRA_CTX (1 << 2)
+
+#define KSFT_BAD_MAGIC 0xdeadbeef
+
+#define HDR_SZ \
+ sizeof(struct _aarch64_ctx)
+
+#define GET_SF_RESV_HEAD(sf) \
+ (struct _aarch64_ctx *)(&(sf).uc.uc_mcontext.__reserved)
+
+#define GET_SF_RESV_SIZE(sf) \
+ sizeof((sf).uc.uc_mcontext.__reserved)
+
+#define GET_UCP_RESV_SIZE(ucp) \
+ sizeof((ucp)->uc_mcontext.__reserved)
+
+#define ASSERT_BAD_CONTEXT(uc) do { \
+ char *err = NULL; \
+ if (!validate_reserved((uc), GET_UCP_RESV_SIZE((uc)), &err)) { \
+ if (err) \
+ fprintf(stderr, \
+ "Using badly built context - ERR: %s\n",\
+ err); \
+ } else { \
+ abort(); \
+ } \
+} while (0)
+
+#define ASSERT_GOOD_CONTEXT(uc) do { \
+ char *err = NULL; \
+ if (!validate_reserved((uc), GET_UCP_RESV_SIZE((uc)), &err)) { \
+ if (err) \
+ fprintf(stderr, \
+ "Detected BAD context - ERR: %s\n", err);\
+ abort(); \
+ } else { \
+ fprintf(stderr, "uc context validated.\n"); \
+ } \
+} while (0)
+
+/*
+ * A simple record-walker for __reserved area: it walks through assuming
+ * only to find a proper struct __aarch64_ctx header descriptor.
+ *
+ * Instead it makes no assumptions on the content and ordering of the
+ * records, any needed bounds checking must be enforced by the caller
+ * if wanted: this way can be used by caller on any maliciously built bad
+ * contexts.
+ *
+ * head->size accounts both for payload and header _aarch64_ctx size !
+ */
+#define GET_RESV_NEXT_HEAD(h) \
+ (struct _aarch64_ctx *)((char *)(h) + (h)->size)
+
+struct fake_sigframe {
+ siginfo_t info;
+ ucontext_t uc;
+};
+
+
+bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err);
+
+bool validate_extra_context(struct extra_context *extra, char **err);
+
+struct _aarch64_ctx *get_header(struct _aarch64_ctx *head, uint32_t magic,
+ size_t resv_sz, size_t *offset);
+
+static inline struct _aarch64_ctx *get_terminator(struct _aarch64_ctx *head,
+ size_t resv_sz,
+ size_t *offset)
+{
+ return get_header(head, 0, resv_sz, offset);
+}
+
+static inline void write_terminator_record(struct _aarch64_ctx *tail)
+{
+ if (tail) {
+ tail->magic = 0;
+ tail->size = 0;
+ }
+}
+
+struct _aarch64_ctx *get_starting_head(struct _aarch64_ctx *shead,
+ size_t need_sz, size_t resv_sz,
+ size_t *offset);
+#endif
diff --git a/tools/testing/selftests/arm64/.gitignore b/tools/testing/selftests/arm64/tags/.gitignore
index e8fae8d61ed6..e8fae8d61ed6 100644
--- a/tools/testing/selftests/arm64/.gitignore
+++ b/tools/testing/selftests/arm64/tags/.gitignore
diff --git a/tools/testing/selftests/arm64/tags/Makefile b/tools/testing/selftests/arm64/tags/Makefile
new file mode 100644
index 000000000000..41cb75070511
--- /dev/null
+++ b/tools/testing/selftests/arm64/tags/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += -I../../../../../usr/include/
+TEST_GEN_PROGS := tags_test
+TEST_PROGS := run_tags_test.sh
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/arm64/run_tags_test.sh b/tools/testing/selftests/arm64/tags/run_tags_test.sh
index 745f11379930..745f11379930 100755
--- a/tools/testing/selftests/arm64/run_tags_test.sh
+++ b/tools/testing/selftests/arm64/tags/run_tags_test.sh
diff --git a/tools/testing/selftests/arm64/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
index 5701163460ef..5701163460ef 100644
--- a/tools/testing/selftests/arm64/tags_test.c
+++ b/tools/testing/selftests/arm64/tags/tags_test.c
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 7470327edcfe..4865116b96c7 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -7,11 +7,10 @@ FEATURE-DUMP.libbpf
fixdep
test_align
test_dev_cgroup
-test_progs
+/test_progs*
test_tcpbpf_user
test_verifier_log
feature
-test_libbpf_open
test_sock
test_sock_addr
test_sock_fields
@@ -33,9 +32,10 @@ test_tcpnotify_user
test_libbpf
test_tcp_check_syncookie_user
test_sysctl
-alu32
libbpf.pc
libbpf.so.*
test_hashmap
test_btf_dump
xdping
+/no_alu32
+/bpf_gcc
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 6889c19a628c..085678d88ef8 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -2,10 +2,12 @@
include ../../../../scripts/Kbuild.include
include ../../../scripts/Makefile.arch
-LIBDIR := ../../../lib
+CURDIR := $(abspath .)
+LIBDIR := $(abspath ../../../lib)
BPFDIR := $(LIBDIR)/bpf
-APIDIR := ../../../include/uapi
-GENDIR := ../../../../include/generated
+TOOLSDIR := $(abspath ../../../include)
+APIDIR := $(TOOLSDIR)/uapi
+GENDIR := $(abspath ../../../../include/generated)
GENHDR := $(GENDIR)/autoconf.h
ifneq ($(wildcard $(GENHDR)),)
@@ -15,11 +17,10 @@ endif
CLANG ?= clang
LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
-LLVM_READELF ?= llvm-readelf
-BTF_PAHOLE ?= pahole
BPF_GCC ?= $(shell command -v bpf-gcc;)
-CFLAGS += -g -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include \
- -Dbpf_prog_load=bpf_prog_test_load \
+CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) \
+ -I$(GENDIR) -I$(TOOLSDIR) -I$(CURDIR) \
+ -Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
LDLIBS += -lcap -lelf -lrt -lpthread
@@ -27,33 +28,21 @@ LDLIBS += -lcap -lelf -lrt -lpthread
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
- test_cgroup_storage test_select_reuseport test_section_names \
+ test_cgroup_storage test_select_reuseport \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
- test_btf_dump test_cgroup_attach xdping
-
-BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
-TEST_GEN_FILES = $(BPF_OBJ_FILES)
-
-BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
-TEST_FILES = $(BTF_C_FILES)
-
-# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
-# contains both ALU32 and JMP32 instructions.
-SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
- $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \
- $(LLC) -mattr=+alu32 -mcpu=v3 2>&1 | \
- grep 'if w')
-ifneq ($(SUBREG_CODEGEN),)
-TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES))
-endif
+ test_cgroup_attach test_progs-no_alu32
+# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
-TEST_GEN_FILES += $(patsubst %.o,bpf_gcc/%.o, $(BPF_OBJ_FILES))
+TEST_GEN_PROGS += test_progs-bpf_gcc
endif
+TEST_GEN_FILES =
+TEST_FILES = test_lwt_ip_encap.o \
+ test_tc_edt.o
+
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
- test_libbpf.sh \
test_xdp_redirect.sh \
test_xdp_meta.sh \
test_xdp_veth.sh \
@@ -80,27 +69,33 @@ TEST_PROGS_EXTENDED := with_addr.sh \
test_xdp_vlan.sh
# Compile but not part of 'make run_tests'
-TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
+TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
- test_lirc_mode2_user
+ test_lirc_mode2_user xdping
-include ../lib.mk
+TEST_CUSTOM_PROGS = urandom_read
-# NOTE: $(OUTPUT) won't get default value if used before lib.mk
-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
-all: $(TEST_CUSTOM_PROGS)
+include ../lib.mk
-$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
+# to build individual tests.
+# NOTE: Semicolon at the end is critical to override lib.mk's default static
+# rule for binaries.
+$(notdir $(TEST_GEN_PROGS) \
+ $(TEST_PROGS) \
+ $(TEST_PROGS_EXTENDED) \
+ $(TEST_GEN_PROGS_EXTENDED) \
+ $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+
+$(OUTPUT)/urandom_read: urandom_read.c
$(CC) -o $@ $< -Wl,--build-id
$(OUTPUT)/test_stub.o: test_stub.c
- $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) -c -o $@ $<
+ $(CC) -c $(CFLAGS) -o $@ $<
BPFOBJ := $(OUTPUT)/libbpf.a
-$(TEST_GEN_PROGS): $(OUTPUT)/test_stub.o $(BPFOBJ)
-
-$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(OUTPUT)/libbpf.a
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ)
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
@@ -110,7 +105,6 @@ $(OUTPUT)/test_socket_cookie: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c
-$(OUTPUT)/test_progs: cgroup_helpers.c trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
@@ -126,15 +120,9 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
-PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
-
-# Let newer LLVM versions transparently probe the kernel for availability
-# of full BPF instruction set.
-ifeq ($(PROBE),)
- CPU ?= probe
-else
- CPU ?= generic
-endif
+BPF_HELPERS := $(BPFDIR)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
+$(BPFDIR)/bpf_helper_defs.h:
+ $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ bpf_helper_defs.h
# Get Clang's default includes on this system, as opposed to those seen by
# '-target bpf'. This fixes "missing" files on some architectures/distros,
@@ -146,9 +134,16 @@ define get_sys_includes
$(shell $(1) -v -E - </dev/null 2>&1 \
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
endef
+
+# Determine target endianness.
+IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
+ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
+MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
+
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
-BPF_CFLAGS = -I. -I./include/uapi -I../../../include/uapi \
- -I$(OUTPUT)/../usr/include -D__TARGET_ARCH_$(SRCARCH)
+BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
+ -I. -I./include/uapi -I$(APIDIR) \
+ -I$(BPFDIR) -I$(abspath $(OUTPUT)/../usr/include)
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types
@@ -156,167 +151,172 @@ CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
$(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline
-$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
-$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
-
$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
-$(OUTPUT)/test_progs.o: flow_dissector_load.h
-
-BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
-BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
-BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
-BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
- $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
- $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
- /bin/rm -f ./llvm_btf_verify.o)
-
-ifneq ($(BTF_LLVM_PROBE),)
- BPF_CFLAGS += -g
-else
-ifneq ($(BTF_LLC_PROBE),)
-ifneq ($(BTF_PAHOLE_PROBE),)
-ifneq ($(BTF_OBJCOPY_PROBE),)
- BPF_CFLAGS += -g
- LLC_FLAGS += -mattr=dwarfris
- DWARF2BTF = y
-endif
-endif
-endif
-endif
-TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
-TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
-TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
-
-ifneq ($(SUBREG_CODEGEN),)
-ALU32_BUILD_DIR = $(OUTPUT)/alu32
-TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
-$(ALU32_BUILD_DIR):
- mkdir -p $@
-
-$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read | $(ALU32_BUILD_DIR)
- cp $< $@
-
-$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
- $(ALU32_BUILD_DIR)/urandom_read \
- | $(ALU32_BUILD_DIR)
- $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
- -o $(ALU32_BUILD_DIR)/test_progs_32 \
- test_progs.c test_stub.c cgroup_helpers.c trace_helpers.c prog_tests/*.c \
- $(OUTPUT)/libbpf.a $(LDLIBS)
-
-$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
-$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
-
-$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR)/test_progs_32 \
- | $(ALU32_BUILD_DIR)
- ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \
- -c $< -o - || echo "clang failed") | \
- $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
- -filetype=obj -o $@
-ifeq ($(DWARF2BTF),y)
- $(BTF_PAHOLE) -J $@
-endif
+# Build BPF object using Clang
+# $1 - input .c file
+# $2 - output .o file
+# $3 - CFLAGS
+# $4 - LDFLAGS
+define CLANG_BPF_BUILD_RULE
+ ($(CLANG) $3 -O2 -target bpf -emit-llvm \
+ -c $1 -o - || echo "BPF obj compilation failed") | \
+ $(LLC) -mattr=dwarfris -march=bpf -mcpu=probe $4 -filetype=obj -o $2
+endef
+# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
+define CLANG_NOALU32_BPF_BUILD_RULE
+ ($(CLANG) $3 -O2 -target bpf -emit-llvm \
+ -c $1 -o - || echo "BPF obj compilation failed") | \
+ $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2
+endef
+# Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC
+define CLANG_NATIVE_BPF_BUILD_RULE
+ ($(CLANG) $3 -O2 -emit-llvm \
+ -c $1 -o - || echo "BPF obj compilation failed") | \
+ $(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2
+endef
+# Build BPF object using GCC
+define GCC_BPF_BUILD_RULE
+ $(BPF_GCC) $3 $4 -O2 -c $1 -o $2
+endef
+
+# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
+# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
+# Parameters:
+# $1 - test runner base binary name (e.g., test_progs)
+# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc)
+define DEFINE_TEST_RUNNER
+
+TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2
+TRUNNER_BINARY := $1$(if $2,-)$2
+TRUNNER_TEST_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.test.o, \
+ $$(notdir $$(wildcard $(TRUNNER_TESTS_DIR)/*.c)))
+TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
+ $$(filter %.c,$(TRUNNER_EXTRA_SOURCES)))
+TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES))
+TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h
+TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
+ $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c)))
+
+# Evaluate rules now with extra TRUNNER_XXX variables above already defined
+$$(eval $$(call DEFINE_TEST_RUNNER_RULES,$1,$2))
+
+endef
+
+# Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and
+# set up by DEFINE_TEST_RUNNER itself, create test runner build rules with:
+# $1 - test runner base binary name (e.g., test_progs)
+# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc)
+define DEFINE_TEST_RUNNER_RULES
+
+ifeq ($($(TRUNNER_OUTPUT)-dir),)
+$(TRUNNER_OUTPUT)-dir := y
+$(TRUNNER_OUTPUT):
+ mkdir -p $$@
endif
-ifneq ($(BPF_GCC),)
-GCC_SYS_INCLUDES = $(call get_sys_includes,gcc)
-IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
- grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
-ifeq ($(IS_LITTLE_ENDIAN),)
-MENDIAN=-mbig-endian
-else
-MENDIAN=-mlittle-endian
+# ensure we set up BPF objects generation rule just once for a given
+# input/output directory combination
+ifeq ($($(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs),)
+$(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y
+$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
+ $(TRUNNER_BPF_PROGS_DIR)/%.c \
+ $(TRUNNER_BPF_PROGS_DIR)/*.h \
+ $$(BPF_HELPERS) | $(TRUNNER_OUTPUT)
+ $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
+ $(TRUNNER_BPF_CFLAGS), \
+ $(TRUNNER_BPF_LDFLAGS))
endif
-BPF_GCC_CFLAGS = $(GCC_SYS_INCLUDES) $(MENDIAN)
-BPF_GCC_BUILD_DIR = $(OUTPUT)/bpf_gcc
-TEST_CUSTOM_PROGS += $(BPF_GCC_BUILD_DIR)/test_progs_bpf_gcc
-$(BPF_GCC_BUILD_DIR):
- mkdir -p $@
-
-$(BPF_GCC_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read | $(BPF_GCC_BUILD_DIR)
- cp $< $@
-
-$(BPF_GCC_BUILD_DIR)/test_progs_bpf_gcc: $(OUTPUT)/test_progs \
- | $(BPF_GCC_BUILD_DIR)
- cp $< $@
-
-$(BPF_GCC_BUILD_DIR)/%.o: progs/%.c $(BPF_GCC_BUILD_DIR)/test_progs_bpf_gcc \
- | $(BPF_GCC_BUILD_DIR)
- $(BPF_GCC) $(BPF_CFLAGS) $(BPF_GCC_CFLAGS) -O2 -c $< -o $@
+
+# ensure we set up tests.h header generation rule just once
+ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),)
+$(TRUNNER_TESTS_DIR)-tests-hdr := y
+$(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c
+ $$(shell ( cd $(TRUNNER_TESTS_DIR); \
+ echo '/* Generated header, do not edit */'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \
+ ) > $$@)
endif
-# Have one program compiled without "-target bpf" to test whether libbpf loads
-# it successfully
-$(OUTPUT)/test_xdp.o: progs/test_xdp.c
- ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -emit-llvm -c $< -o - || \
- echo "clang failed") | \
- $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
-ifeq ($(DWARF2BTF),y)
- $(BTF_PAHOLE) -J $@
+# compile individual test files
+# Note: we cd into output directory to ensure embedded BPF object is found
+$(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
+ $(TRUNNER_TESTS_DIR)/%.c \
+ $(TRUNNER_EXTRA_HDRS) \
+ $(TRUNNER_BPF_OBJS) \
+ $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ cd $$(@D) && $$(CC) $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+
+$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
+ %.c \
+ $(TRUNNER_EXTRA_HDRS) \
+ $(TRUNNER_TESTS_HDR) \
+ $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ $$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
+
+$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
+ifneq ($2,)
+ # only copy extra resources if in flavored build
+ cp -a $$^ $(TRUNNER_OUTPUT)/
endif
-$(OUTPUT)/%.o: progs/%.c
- ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \
- -c $< -o - || echo "clang failed") | \
- $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
-ifeq ($(DWARF2BTF),y)
- $(BTF_PAHOLE) -J $@
+$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
+ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
+ | $(TRUNNER_BINARY)-extras
+ $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
+
+endef
+
+# Define test_progs test runner.
+TRUNNER_TESTS_DIR := prog_tests
+TRUNNER_BPF_PROGS_DIR := progs
+TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
+ flow_dissector_load.h
+TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
+ $(wildcard progs/btf_dump_test_case_*.c)
+TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
+TRUNNER_BPF_CFLAGS := -I. -I$(OUTPUT) $(BPF_CFLAGS) $(CLANG_CFLAGS)
+TRUNNER_BPF_LDFLAGS := -mattr=+alu32
+$(eval $(call DEFINE_TEST_RUNNER,test_progs))
+
+# Define test_progs-no_alu32 test runner.
+TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE
+TRUNNER_BPF_LDFLAGS :=
+$(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32))
+
+# Define test_progs BPF-GCC-flavored test runner.
+ifneq ($(BPF_GCC),)
+TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc)
+TRUNNER_BPF_LDFLAGS :=
+$(eval $(call DEFINE_TEST_RUNNER,test_progs,bpf_gcc))
endif
-PROG_TESTS_DIR = $(OUTPUT)/prog_tests
-$(PROG_TESTS_DIR):
- mkdir -p $@
-PROG_TESTS_H := $(PROG_TESTS_DIR)/tests.h
-PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
-test_progs.c: $(PROG_TESTS_H)
-$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
-$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_FILES) | $(PROG_TESTS_H)
-$(PROG_TESTS_H): $(PROG_TESTS_FILES) | $(PROG_TESTS_DIR)
- $(shell ( cd prog_tests/; \
- echo '/* Generated header, do not edit */'; \
- ls *.c 2> /dev/null | \
- sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \
- ) > $(PROG_TESTS_H))
-
-MAP_TESTS_DIR = $(OUTPUT)/map_tests
-$(MAP_TESTS_DIR):
- mkdir -p $@
-MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h
-MAP_TESTS_FILES := $(wildcard map_tests/*.c)
-test_maps.c: $(MAP_TESTS_H)
-$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS)
-$(OUTPUT)/test_maps: test_maps.c $(MAP_TESTS_FILES) | $(MAP_TESTS_H)
-$(MAP_TESTS_H): $(MAP_TESTS_FILES) | $(MAP_TESTS_DIR)
- $(shell ( cd map_tests/; \
- echo '/* Generated header, do not edit */'; \
- echo '#ifdef DECLARE'; \
- ls *.c 2> /dev/null | \
- sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \
- echo '#endif'; \
- echo '#ifdef CALL'; \
- ls *.c 2> /dev/null | \
- sed -e 's@\([^\.]*\)\.c@test_\1();@'; \
- echo '#endif' \
- ) > $(MAP_TESTS_H))
-
-VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
-$(VERIFIER_TESTS_DIR):
- mkdir -p $@
-VERIFIER_TESTS_H := $(VERIFIER_TESTS_DIR)/tests.h
-VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
-test_verifier.c: $(VERIFIER_TESTS_H)
-$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
-$(OUTPUT)/test_verifier: test_verifier.c | $(VERIFIER_TEST_FILES) $(VERIFIER_TESTS_H)
-$(VERIFIER_TESTS_H): $(VERIFIER_TEST_FILES) | $(VERIFIER_TESTS_DIR)
+# Define test_maps test runner.
+TRUNNER_TESTS_DIR := map_tests
+TRUNNER_BPF_PROGS_DIR := progs
+TRUNNER_EXTRA_SOURCES := test_maps.c
+TRUNNER_EXTRA_FILES :=
+TRUNNER_BPF_BUILD_RULE := $$(error no BPF objects should be built)
+TRUNNER_BPF_CFLAGS :=
+TRUNNER_BPF_LDFLAGS :=
+$(eval $(call DEFINE_TEST_RUNNER,test_maps))
+
+# Define test_verifier test runner.
+# It is much simpler than test_maps/test_progs and sufficiently different from
+# them (e.g., test.h is using completely pattern), that it's worth just
+# explicitly defining all the rules explicitly.
+verifier/tests.h: verifier/*.c
$(shell ( cd verifier/; \
echo '/* Generated header, do not edit */'; \
echo '#ifdef FILL_ARRAY'; \
- ls *.c 2> /dev/null | \
- sed -e 's@\(.*\)@#include \"\1\"@'; \
+ ls *.c 2> /dev/null | sed -e 's@\(.*\)@#include \"\1\"@'; \
echo '#endif' \
- ) > $(VERIFIER_TESTS_H))
+ ) > verifier/tests.h)
+$(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
+ $(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) $(BPF_GCC_BUILD_DIR) \
- $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \
- feature
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) \
+ prog_tests/tests.h map_tests/tests.h verifier/tests.h \
+ feature $(OUTPUT)/*.o $(OUTPUT)/no_alu32 $(OUTPUT)/bpf_gcc
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
deleted file mode 100644
index 54a50699bbfd..000000000000
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ /dev/null
@@ -1,535 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __BPF_HELPERS__
-#define __BPF_HELPERS__
-
-#define __uint(name, val) int (*name)[val]
-#define __type(name, val) val *name
-
-/* helper macro to print out debug messages */
-#define bpf_printk(fmt, ...) \
-({ \
- char ____fmt[] = fmt; \
- bpf_trace_printk(____fmt, sizeof(____fmt), \
- ##__VA_ARGS__); \
-})
-
-#ifdef __clang__
-
-/* helper macro to place programs, maps, license in
- * different sections in elf_bpf file. Section names
- * are interpreted by elf_bpf loader
- */
-#define SEC(NAME) __attribute__((section(NAME), used))
-
-/* helper functions called from eBPF programs written in C */
-static void *(*bpf_map_lookup_elem)(void *map, const void *key) =
- (void *) BPF_FUNC_map_lookup_elem;
-static int (*bpf_map_update_elem)(void *map, const void *key, const void *value,
- unsigned long long flags) =
- (void *) BPF_FUNC_map_update_elem;
-static int (*bpf_map_delete_elem)(void *map, const void *key) =
- (void *) BPF_FUNC_map_delete_elem;
-static int (*bpf_map_push_elem)(void *map, const void *value,
- unsigned long long flags) =
- (void *) BPF_FUNC_map_push_elem;
-static int (*bpf_map_pop_elem)(void *map, void *value) =
- (void *) BPF_FUNC_map_pop_elem;
-static int (*bpf_map_peek_elem)(void *map, void *value) =
- (void *) BPF_FUNC_map_peek_elem;
-static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) =
- (void *) BPF_FUNC_probe_read;
-static unsigned long long (*bpf_ktime_get_ns)(void) =
- (void *) BPF_FUNC_ktime_get_ns;
-static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
- (void *) BPF_FUNC_trace_printk;
-static void (*bpf_tail_call)(void *ctx, void *map, int index) =
- (void *) BPF_FUNC_tail_call;
-static unsigned long long (*bpf_get_smp_processor_id)(void) =
- (void *) BPF_FUNC_get_smp_processor_id;
-static unsigned long long (*bpf_get_current_pid_tgid)(void) =
- (void *) BPF_FUNC_get_current_pid_tgid;
-static unsigned long long (*bpf_get_current_uid_gid)(void) =
- (void *) BPF_FUNC_get_current_uid_gid;
-static int (*bpf_get_current_comm)(void *buf, int buf_size) =
- (void *) BPF_FUNC_get_current_comm;
-static unsigned long long (*bpf_perf_event_read)(void *map,
- unsigned long long flags) =
- (void *) BPF_FUNC_perf_event_read;
-static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
- (void *) BPF_FUNC_clone_redirect;
-static int (*bpf_redirect)(int ifindex, int flags) =
- (void *) BPF_FUNC_redirect;
-static int (*bpf_redirect_map)(void *map, int key, int flags) =
- (void *) BPF_FUNC_redirect_map;
-static int (*bpf_perf_event_output)(void *ctx, void *map,
- unsigned long long flags, void *data,
- int size) =
- (void *) BPF_FUNC_perf_event_output;
-static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
- (void *) BPF_FUNC_get_stackid;
-static int (*bpf_probe_write_user)(void *dst, const void *src, int size) =
- (void *) BPF_FUNC_probe_write_user;
-static int (*bpf_current_task_under_cgroup)(void *map, int index) =
- (void *) BPF_FUNC_current_task_under_cgroup;
-static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
- (void *) BPF_FUNC_skb_get_tunnel_key;
-static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
- (void *) BPF_FUNC_skb_set_tunnel_key;
-static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
- (void *) BPF_FUNC_skb_get_tunnel_opt;
-static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
- (void *) BPF_FUNC_skb_set_tunnel_opt;
-static unsigned long long (*bpf_get_prandom_u32)(void) =
- (void *) BPF_FUNC_get_prandom_u32;
-static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
- (void *) BPF_FUNC_xdp_adjust_head;
-static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
- (void *) BPF_FUNC_xdp_adjust_meta;
-static int (*bpf_get_socket_cookie)(void *ctx) =
- (void *) BPF_FUNC_get_socket_cookie;
-static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
- int optlen) =
- (void *) BPF_FUNC_setsockopt;
-static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
- int optlen) =
- (void *) BPF_FUNC_getsockopt;
-static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
- (void *) BPF_FUNC_sock_ops_cb_flags_set;
-static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
- (void *) BPF_FUNC_sk_redirect_map;
-static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) =
- (void *) BPF_FUNC_sk_redirect_hash;
-static int (*bpf_sock_map_update)(void *map, void *key, void *value,
- unsigned long long flags) =
- (void *) BPF_FUNC_sock_map_update;
-static int (*bpf_sock_hash_update)(void *map, void *key, void *value,
- unsigned long long flags) =
- (void *) BPF_FUNC_sock_hash_update;
-static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
- void *buf, unsigned int buf_size) =
- (void *) BPF_FUNC_perf_event_read_value;
-static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
- unsigned int buf_size) =
- (void *) BPF_FUNC_perf_prog_read_value;
-static int (*bpf_override_return)(void *ctx, unsigned long rc) =
- (void *) BPF_FUNC_override_return;
-static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
- (void *) BPF_FUNC_msg_redirect_map;
-static int (*bpf_msg_redirect_hash)(void *ctx,
- void *map, void *key, int flags) =
- (void *) BPF_FUNC_msg_redirect_hash;
-static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
- (void *) BPF_FUNC_msg_apply_bytes;
-static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
- (void *) BPF_FUNC_msg_cork_bytes;
-static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
- (void *) BPF_FUNC_msg_pull_data;
-static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
- (void *) BPF_FUNC_msg_push_data;
-static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) =
- (void *) BPF_FUNC_msg_pop_data;
-static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
- (void *) BPF_FUNC_bind;
-static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
- (void *) BPF_FUNC_xdp_adjust_tail;
-static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
- int size, int flags) =
- (void *) BPF_FUNC_skb_get_xfrm_state;
-static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
- (void *) BPF_FUNC_sk_select_reuseport;
-static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
- (void *) BPF_FUNC_get_stack;
-static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
- int plen, __u32 flags) =
- (void *) BPF_FUNC_fib_lookup;
-static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr,
- unsigned int len) =
- (void *) BPF_FUNC_lwt_push_encap;
-static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset,
- void *from, unsigned int len) =
- (void *) BPF_FUNC_lwt_seg6_store_bytes;
-static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param,
- unsigned int param_len) =
- (void *) BPF_FUNC_lwt_seg6_action;
-static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset,
- unsigned int len) =
- (void *) BPF_FUNC_lwt_seg6_adjust_srh;
-static int (*bpf_rc_repeat)(void *ctx) =
- (void *) BPF_FUNC_rc_repeat;
-static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
- unsigned long long scancode, unsigned int toggle) =
- (void *) BPF_FUNC_rc_keydown;
-static unsigned long long (*bpf_get_current_cgroup_id)(void) =
- (void *) BPF_FUNC_get_current_cgroup_id;
-static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
- (void *) BPF_FUNC_get_local_storage;
-static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
- (void *) BPF_FUNC_skb_cgroup_id;
-static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
- (void *) BPF_FUNC_skb_ancestor_cgroup_id;
-static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
- struct bpf_sock_tuple *tuple,
- int size, unsigned long long netns_id,
- unsigned long long flags) =
- (void *) BPF_FUNC_sk_lookup_tcp;
-static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx,
- struct bpf_sock_tuple *tuple,
- int size, unsigned long long netns_id,
- unsigned long long flags) =
- (void *) BPF_FUNC_skc_lookup_tcp;
-static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
- struct bpf_sock_tuple *tuple,
- int size, unsigned long long netns_id,
- unsigned long long flags) =
- (void *) BPF_FUNC_sk_lookup_udp;
-static int (*bpf_sk_release)(struct bpf_sock *sk) =
- (void *) BPF_FUNC_sk_release;
-static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
- (void *) BPF_FUNC_skb_vlan_push;
-static int (*bpf_skb_vlan_pop)(void *ctx) =
- (void *) BPF_FUNC_skb_vlan_pop;
-static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
- (void *) BPF_FUNC_rc_pointer_rel;
-static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
- (void *) BPF_FUNC_spin_lock;
-static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
- (void *) BPF_FUNC_spin_unlock;
-static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
- (void *) BPF_FUNC_sk_fullsock;
-static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
- (void *) BPF_FUNC_tcp_sock;
-static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
- (void *) BPF_FUNC_get_listener_sock;
-static int (*bpf_skb_ecn_set_ce)(void *ctx) =
- (void *) BPF_FUNC_skb_ecn_set_ce;
-static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
- void *ip, int ip_len, void *tcp, int tcp_len) =
- (void *) BPF_FUNC_tcp_check_syncookie;
-static int (*bpf_sysctl_get_name)(void *ctx, char *buf,
- unsigned long long buf_len,
- unsigned long long flags) =
- (void *) BPF_FUNC_sysctl_get_name;
-static int (*bpf_sysctl_get_current_value)(void *ctx, char *buf,
- unsigned long long buf_len) =
- (void *) BPF_FUNC_sysctl_get_current_value;
-static int (*bpf_sysctl_get_new_value)(void *ctx, char *buf,
- unsigned long long buf_len) =
- (void *) BPF_FUNC_sysctl_get_new_value;
-static int (*bpf_sysctl_set_new_value)(void *ctx, const char *buf,
- unsigned long long buf_len) =
- (void *) BPF_FUNC_sysctl_set_new_value;
-static int (*bpf_strtol)(const char *buf, unsigned long long buf_len,
- unsigned long long flags, long *res) =
- (void *) BPF_FUNC_strtol;
-static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len,
- unsigned long long flags, unsigned long *res) =
- (void *) BPF_FUNC_strtoul;
-static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
- void *value, __u64 flags) =
- (void *) BPF_FUNC_sk_storage_get;
-static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
- (void *)BPF_FUNC_sk_storage_delete;
-static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal;
-static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip,
- int ip_len, void *tcp, int tcp_len) =
- (void *) BPF_FUNC_tcp_gen_syncookie;
-
-/* llvm builtin functions that eBPF C program may use to
- * emit BPF_LD_ABS and BPF_LD_IND instructions
- */
-struct sk_buff;
-unsigned long long load_byte(void *skb,
- unsigned long long off) asm("llvm.bpf.load.byte");
-unsigned long long load_half(void *skb,
- unsigned long long off) asm("llvm.bpf.load.half");
-unsigned long long load_word(void *skb,
- unsigned long long off) asm("llvm.bpf.load.word");
-
-/* a helper structure used by eBPF C program
- * to describe map attributes to elf_bpf loader
- */
-struct bpf_map_def {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
- unsigned int map_flags;
- unsigned int inner_map_idx;
- unsigned int numa_node;
-};
-
-#else
-
-#include <bpf-helpers.h>
-
-#endif
-
-#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
- struct ____btf_map_##name { \
- type_key key; \
- type_val value; \
- }; \
- struct ____btf_map_##name \
- __attribute__ ((section(".maps." #name), used)) \
- ____btf_map_##name = { }
-
-static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
- (void *) BPF_FUNC_skb_load_bytes;
-static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
- (void *) BPF_FUNC_skb_load_bytes_relative;
-static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
- (void *) BPF_FUNC_skb_store_bytes;
-static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
- (void *) BPF_FUNC_l3_csum_replace;
-static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
- (void *) BPF_FUNC_l4_csum_replace;
-static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
- (void *) BPF_FUNC_csum_diff;
-static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
- (void *) BPF_FUNC_skb_under_cgroup;
-static int (*bpf_skb_change_head)(void *, int len, int flags) =
- (void *) BPF_FUNC_skb_change_head;
-static int (*bpf_skb_pull_data)(void *, int len) =
- (void *) BPF_FUNC_skb_pull_data;
-static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
- (void *) BPF_FUNC_get_cgroup_classid;
-static unsigned int (*bpf_get_route_realm)(void *ctx) =
- (void *) BPF_FUNC_get_route_realm;
-static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
- (void *) BPF_FUNC_skb_change_proto;
-static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
- (void *) BPF_FUNC_skb_change_type;
-static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
- (void *) BPF_FUNC_get_hash_recalc;
-static unsigned long long (*bpf_get_current_task)(void) =
- (void *) BPF_FUNC_get_current_task;
-static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
- (void *) BPF_FUNC_skb_change_tail;
-static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
- (void *) BPF_FUNC_csum_update;
-static void (*bpf_set_hash_invalid)(void *ctx) =
- (void *) BPF_FUNC_set_hash_invalid;
-static int (*bpf_get_numa_node_id)(void) =
- (void *) BPF_FUNC_get_numa_node_id;
-static int (*bpf_probe_read_str)(void *ctx, __u32 size,
- const void *unsafe_ptr) =
- (void *) BPF_FUNC_probe_read_str;
-static unsigned int (*bpf_get_socket_uid)(void *ctx) =
- (void *) BPF_FUNC_get_socket_uid;
-static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
- (void *) BPF_FUNC_set_hash;
-static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
- unsigned long long flags) =
- (void *) BPF_FUNC_skb_adjust_room;
-
-/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
-#if defined(__TARGET_ARCH_x86)
- #define bpf_target_x86
- #define bpf_target_defined
-#elif defined(__TARGET_ARCH_s390)
- #define bpf_target_s390
- #define bpf_target_defined
-#elif defined(__TARGET_ARCH_arm)
- #define bpf_target_arm
- #define bpf_target_defined
-#elif defined(__TARGET_ARCH_arm64)
- #define bpf_target_arm64
- #define bpf_target_defined
-#elif defined(__TARGET_ARCH_mips)
- #define bpf_target_mips
- #define bpf_target_defined
-#elif defined(__TARGET_ARCH_powerpc)
- #define bpf_target_powerpc
- #define bpf_target_defined
-#elif defined(__TARGET_ARCH_sparc)
- #define bpf_target_sparc
- #define bpf_target_defined
-#else
- #undef bpf_target_defined
-#endif
-
-/* Fall back to what the compiler says */
-#ifndef bpf_target_defined
-#if defined(__x86_64__)
- #define bpf_target_x86
-#elif defined(__s390__)
- #define bpf_target_s390
-#elif defined(__arm__)
- #define bpf_target_arm
-#elif defined(__aarch64__)
- #define bpf_target_arm64
-#elif defined(__mips__)
- #define bpf_target_mips
-#elif defined(__powerpc__)
- #define bpf_target_powerpc
-#elif defined(__sparc__)
- #define bpf_target_sparc
-#endif
-#endif
-
-#if defined(bpf_target_x86)
-
-#ifdef __KERNEL__
-#define PT_REGS_PARM1(x) ((x)->di)
-#define PT_REGS_PARM2(x) ((x)->si)
-#define PT_REGS_PARM3(x) ((x)->dx)
-#define PT_REGS_PARM4(x) ((x)->cx)
-#define PT_REGS_PARM5(x) ((x)->r8)
-#define PT_REGS_RET(x) ((x)->sp)
-#define PT_REGS_FP(x) ((x)->bp)
-#define PT_REGS_RC(x) ((x)->ax)
-#define PT_REGS_SP(x) ((x)->sp)
-#define PT_REGS_IP(x) ((x)->ip)
-#else
-#ifdef __i386__
-/* i386 kernel is built with -mregparm=3 */
-#define PT_REGS_PARM1(x) ((x)->eax)
-#define PT_REGS_PARM2(x) ((x)->edx)
-#define PT_REGS_PARM3(x) ((x)->ecx)
-#define PT_REGS_PARM4(x) 0
-#define PT_REGS_PARM5(x) 0
-#define PT_REGS_RET(x) ((x)->esp)
-#define PT_REGS_FP(x) ((x)->ebp)
-#define PT_REGS_RC(x) ((x)->eax)
-#define PT_REGS_SP(x) ((x)->esp)
-#define PT_REGS_IP(x) ((x)->eip)
-#else
-#define PT_REGS_PARM1(x) ((x)->rdi)
-#define PT_REGS_PARM2(x) ((x)->rsi)
-#define PT_REGS_PARM3(x) ((x)->rdx)
-#define PT_REGS_PARM4(x) ((x)->rcx)
-#define PT_REGS_PARM5(x) ((x)->r8)
-#define PT_REGS_RET(x) ((x)->rsp)
-#define PT_REGS_FP(x) ((x)->rbp)
-#define PT_REGS_RC(x) ((x)->rax)
-#define PT_REGS_SP(x) ((x)->rsp)
-#define PT_REGS_IP(x) ((x)->rip)
-#endif
-#endif
-
-#elif defined(bpf_target_s390)
-
-/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
-struct pt_regs;
-#define PT_REGS_S390 const volatile user_pt_regs
-#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
-#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
-#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
-#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
-#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
-#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
-/* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
-#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
-#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
-#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
-
-#elif defined(bpf_target_arm)
-
-#define PT_REGS_PARM1(x) ((x)->uregs[0])
-#define PT_REGS_PARM2(x) ((x)->uregs[1])
-#define PT_REGS_PARM3(x) ((x)->uregs[2])
-#define PT_REGS_PARM4(x) ((x)->uregs[3])
-#define PT_REGS_PARM5(x) ((x)->uregs[4])
-#define PT_REGS_RET(x) ((x)->uregs[14])
-#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_RC(x) ((x)->uregs[0])
-#define PT_REGS_SP(x) ((x)->uregs[13])
-#define PT_REGS_IP(x) ((x)->uregs[12])
-
-#elif defined(bpf_target_arm64)
-
-/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
-struct pt_regs;
-#define PT_REGS_ARM64 const volatile struct user_pt_regs
-#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
-#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
-#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
-#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
-#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
-#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
-/* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
-#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
-#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
-#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
-
-#elif defined(bpf_target_mips)
-
-#define PT_REGS_PARM1(x) ((x)->regs[4])
-#define PT_REGS_PARM2(x) ((x)->regs[5])
-#define PT_REGS_PARM3(x) ((x)->regs[6])
-#define PT_REGS_PARM4(x) ((x)->regs[7])
-#define PT_REGS_PARM5(x) ((x)->regs[8])
-#define PT_REGS_RET(x) ((x)->regs[31])
-#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_RC(x) ((x)->regs[1])
-#define PT_REGS_SP(x) ((x)->regs[29])
-#define PT_REGS_IP(x) ((x)->cp0_epc)
-
-#elif defined(bpf_target_powerpc)
-
-#define PT_REGS_PARM1(x) ((x)->gpr[3])
-#define PT_REGS_PARM2(x) ((x)->gpr[4])
-#define PT_REGS_PARM3(x) ((x)->gpr[5])
-#define PT_REGS_PARM4(x) ((x)->gpr[6])
-#define PT_REGS_PARM5(x) ((x)->gpr[7])
-#define PT_REGS_RC(x) ((x)->gpr[3])
-#define PT_REGS_SP(x) ((x)->sp)
-#define PT_REGS_IP(x) ((x)->nip)
-
-#elif defined(bpf_target_sparc)
-
-#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
-#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
-#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
-#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
-#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
-#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
-#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
-#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
-
-/* Should this also be a bpf_target check for the sparc case? */
-#if defined(__arch64__)
-#define PT_REGS_IP(x) ((x)->tpc)
-#else
-#define PT_REGS_IP(x) ((x)->pc)
-#endif
-
-#endif
-
-#if defined(bpf_target_powerpc)
-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
-#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#elif defined(bpf_target_sparc)
-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
-#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#else
-#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
- bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
-#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
- bpf_probe_read(&(ip), sizeof(ip), \
- (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
-#endif
-
-/*
- * BPF_CORE_READ abstracts away bpf_probe_read() call and captures offset
- * relocation for source address using __builtin_preserve_access_index()
- * built-in, provided by Clang.
- *
- * __builtin_preserve_access_index() takes as an argument an expression of
- * taking an address of a field within struct/union. It makes compiler emit
- * a relocation, which records BTF type ID describing root struct/union and an
- * accessor string which describes exact embedded field that was used to take
- * an address. See detailed description of this relocation format and
- * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
- *
- * This relocation allows libbpf to adjust BPF instruction to use correct
- * actual field offset, based on target kernel BTF type that matches original
- * (local) BTF, used to record relocation.
- */
-#define BPF_CORE_READ(dst, src) \
- bpf_probe_read((dst), sizeof(*(src)), \
- __builtin_preserve_access_index(src))
-
-#endif
diff --git a/tools/testing/selftests/bpf/bpf_legacy.h b/tools/testing/selftests/bpf/bpf_legacy.h
new file mode 100644
index 000000000000..6f8988738bc1
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_legacy.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __BPF_LEGACY__
+#define __BPF_LEGACY__
+
+/*
+ * legacy bpf_map_def with extra fields supported only by bpf_load(), do not
+ * use outside of samples/bpf
+ */
+struct bpf_map_def_legacy {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+ unsigned int map_flags;
+ unsigned int inner_map_idx;
+ unsigned int numa_node;
+};
+
+#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
+ struct ____btf_map_##name { \
+ type_key key; \
+ type_val value; \
+ }; \
+ struct ____btf_map_##name \
+ __attribute__ ((section(".maps." #name), used)) \
+ ____btf_map_##name = { }
+
+/* llvm builtin functions that eBPF C program may use to
+ * emit BPF_LD_ABS and BPF_LD_IND instructions
+ */
+unsigned long long load_byte(void *skb,
+ unsigned long long off) asm("llvm.bpf.load.byte");
+unsigned long long load_half(void *skb,
+ unsigned long long off) asm("llvm.bpf.load.half");
+unsigned long long load_word(void *skb,
+ unsigned long long off) asm("llvm.bpf.load.word");
+
+#endif
+
diff --git a/tools/testing/selftests/bpf/bpf_trace_helpers.h b/tools/testing/selftests/bpf/bpf_trace_helpers.h
new file mode 100644
index 000000000000..c76a214a53b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_trace_helpers.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_TRACE_HELPERS_H
+#define __BPF_TRACE_HELPERS_H
+
+#include "bpf_helpers.h"
+
+#define __BPF_MAP_0(i, m, v, ...) v
+#define __BPF_MAP_1(i, m, v, t, a, ...) m(t, a, ctx[i])
+#define __BPF_MAP_2(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_1(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_3(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_2(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_4(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_3(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_5(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_4(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_6(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_5(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_7(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_6(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_8(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_7(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_9(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_8(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_10(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_9(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_11(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_10(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP_12(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_11(i+1, m, v, __VA_ARGS__)
+#define __BPF_MAP(n, ...) __BPF_MAP_##n(0, __VA_ARGS__)
+
+/* BPF sizeof(void *) is always 8, so no need to cast to long first
+ * for ptr to avoid compiler warning.
+ */
+#define __BPF_CAST(t, a, ctx) (t) ctx
+#define __BPF_V void
+#define __BPF_N
+
+#define __BPF_DECL_ARGS(t, a, ctx) t a
+
+#define BPF_TRACE_x(x, sec_name, fname, ret_type, ...) \
+static __always_inline ret_type \
+____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ \
+SEC(sec_name) \
+ret_type fname(__u64 *ctx) \
+{ \
+ return ____##fname(__BPF_MAP(x, __BPF_CAST, __BPF_N, __VA_ARGS__));\
+} \
+ \
+static __always_inline \
+ret_type ____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+
+#define BPF_TRACE_0(sec, fname, ...) BPF_TRACE_x(0, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_1(sec, fname, ...) BPF_TRACE_x(1, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_2(sec, fname, ...) BPF_TRACE_x(2, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_3(sec, fname, ...) BPF_TRACE_x(3, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_4(sec, fname, ...) BPF_TRACE_x(4, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_5(sec, fname, ...) BPF_TRACE_x(5, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_6(sec, fname, ...) BPF_TRACE_x(6, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_7(sec, fname, ...) BPF_TRACE_x(7, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_8(sec, fname, ...) BPF_TRACE_x(8, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_9(sec, fname, ...) BPF_TRACE_x(9, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_10(sec, fname, ...) BPF_TRACE_x(10, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_11(sec, fname, ...) BPF_TRACE_x(11, sec, fname, int, __VA_ARGS__)
+#define BPF_TRACE_12(sec, fname, ...) BPF_TRACE_x(12, sec, fname, int, __VA_ARGS__)
+
+#endif
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index e95c33e333a4..0fb910df5387 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -41,7 +41,7 @@
*
* If successful, 0 is returned.
*/
-int enable_all_controllers(char *cgroup_path)
+static int enable_all_controllers(char *cgroup_path)
{
char path[PATH_MAX + 1];
char buf[PATH_MAX];
@@ -98,7 +98,7 @@ int enable_all_controllers(char *cgroup_path)
*/
int setup_cgroup_environment(void)
{
- char cgroup_workdir[PATH_MAX + 1];
+ char cgroup_workdir[PATH_MAX - 24];
format_cgroup_path(cgroup_workdir, "");
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 5ecc267d98b0..a83111a32d4a 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -1,6 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#define EMBED_FILE(NAME, PATH) \
+asm ( \
+" .pushsection \".rodata\", \"a\", @progbits \n" \
+" .global "#NAME"_data \n" \
+#NAME"_data: \n" \
+" .incbin \"" PATH "\" \n" \
+#NAME"_data_end: \n" \
+" .global "#NAME"_size \n" \
+" .type "#NAME"_size, @object \n" \
+" .size "#NAME"_size, 4 \n" \
+" .align 4, \n" \
+#NAME"_size: \n" \
+" .int "#NAME"_data_end - "#NAME"_data \n" \
+" .popsection \n" \
+); \
+extern char NAME##_data[]; \
+extern int NAME##_size;
+
ssize_t get_base_addr() {
size_t start;
char buf[256];
@@ -21,6 +39,8 @@ ssize_t get_base_addr() {
return -EINVAL;
}
+EMBED_FILE(probe, "test_attach_probe.o");
+
void test_attach_probe(void)
{
const char *kprobe_name = "kprobe/sys_nanosleep";
@@ -29,11 +49,15 @@ void test_attach_probe(void)
const char *uretprobe_name = "uretprobe/trigger_func";
const int kprobe_idx = 0, kretprobe_idx = 1;
const int uprobe_idx = 2, uretprobe_idx = 3;
- const char *file = "./test_attach_probe.o";
+ const char *obj_name = "attach_probe";
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+ .object_name = obj_name,
+ .relaxed_maps = true,
+ );
struct bpf_program *kprobe_prog, *kretprobe_prog;
struct bpf_program *uprobe_prog, *uretprobe_prog;
struct bpf_object *obj;
- int err, prog_fd, duration = 0, res;
+ int err, duration = 0, res;
struct bpf_link *kprobe_link = NULL;
struct bpf_link *kretprobe_link = NULL;
struct bpf_link *uprobe_link = NULL;
@@ -48,11 +72,16 @@ void test_attach_probe(void)
return;
uprobe_offset = (size_t)&get_base_addr - base_addr;
- /* load programs */
- err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
- if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+ /* open object */
+ obj = bpf_object__open_mem(probe_data, probe_size, &open_opts);
+ if (CHECK(IS_ERR(obj), "obj_open_mem", "err %ld\n", PTR_ERR(obj)))
return;
+ if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
+ "wrong obj name '%s', expected '%s'\n",
+ bpf_object__name(obj), obj_name))
+ goto cleanup;
+
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", kprobe_name))
@@ -70,6 +99,11 @@ void test_attach_probe(void)
"prog '%s' not found\n", uretprobe_name))
goto cleanup;
+ /* create maps && load programs */
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+ goto cleanup;
+
/* load maps */
results_map_fd = bpf_find_map(__func__, obj, "results_map");
if (CHECK(results_map_fd < 0, "find_results_map",
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index 1c01ee2600a9..9486c13af6b2 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -15,6 +15,8 @@ static int libbpf_debug_print(enum libbpf_print_level level,
return 0;
}
+extern int extra_prog_load_log_flags;
+
static int check_load(const char *file, enum bpf_prog_type type)
{
struct bpf_prog_load_attr attr;
@@ -24,7 +26,7 @@ static int check_load(const char *file, enum bpf_prog_type type)
memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
attr.file = file;
attr.prog_type = type;
- attr.log_level = 4;
+ attr.log_level = 4 | extra_prog_load_log_flags;
attr.prog_flags = BPF_F_TEST_RND_HI32;
err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
bpf_object__close(obj);
diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 6e75dd3cb14f..7390d3061065 100644
--- a/tools/testing/selftests/bpf/test_btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -1,36 +1,26 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <errno.h>
-#include <linux/err.h>
-#include <btf.h>
-
-#define CHECK(condition, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
- fprintf(stderr, format); \
- } \
- __ret; \
-})
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+static int duration = 0;
void btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vfprintf(ctx, fmt, args);
}
-struct btf_dump_test_case {
+static struct btf_dump_test_case {
const char *name;
+ const char *file;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
- {.name = "btf_dump_test_case_syntax", .opts = {}},
- {.name = "btf_dump_test_case_ordering", .opts = {}},
- {.name = "btf_dump_test_case_padding", .opts = {}},
- {.name = "btf_dump_test_case_packing", .opts = {}},
- {.name = "btf_dump_test_case_bitfields", .opts = {}},
- {.name = "btf_dump_test_case_multidim", .opts = {}},
- {.name = "btf_dump_test_case_namespacing", .opts = {}},
+ {"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
+ {"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
+ {"btf_dump: padding", "btf_dump_test_case_padding", {}},
+ {"btf_dump: packing", "btf_dump_test_case_packing", {}},
+ {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
+ {"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
+ {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
};
static int btf_dump_all_types(const struct btf *btf,
@@ -55,55 +45,51 @@ done:
return err;
}
-int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
+static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
{
char test_file[256], out_file[256], diff_cmd[1024];
struct btf *btf = NULL;
int err = 0, fd = -1;
FILE *f = NULL;
- fprintf(stderr, "Test case #%d (%s): ", n, test_case->name);
-
- snprintf(test_file, sizeof(test_file), "%s.o", test_case->name);
+ snprintf(test_file, sizeof(test_file), "%s.o", t->file);
btf = btf__parse_elf(test_file, NULL);
- if (CHECK(IS_ERR(btf),
+ if (CHECK(IS_ERR(btf), "btf_parse_elf",
"failed to load test BTF: %ld\n", PTR_ERR(btf))) {
err = -PTR_ERR(btf);
btf = NULL;
goto done;
}
- snprintf(out_file, sizeof(out_file),
- "/tmp/%s.output.XXXXXX", test_case->name);
+ snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
- if (CHECK(fd < 0, "failed to create temp output file: %d\n", fd)) {
+ if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
err = fd;
goto done;
}
f = fdopen(fd, "w");
- if (CHECK(f == NULL, "failed to open temp output file: %s(%d)\n",
+ if (CHECK(f == NULL, "open_tmp", "failed to open file: %s(%d)\n",
strerror(errno), errno)) {
close(fd);
goto done;
}
- test_case->opts.ctx = f;
- err = btf_dump_all_types(btf, &test_case->opts);
+ t->opts.ctx = f;
+ err = btf_dump_all_types(btf, &t->opts);
fclose(f);
close(fd);
- if (CHECK(err, "failure during C dumping: %d\n", err)) {
+ if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) {
goto done;
}
- snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
+ snprintf(test_file, sizeof(test_file), "progs/%s.c", t->file);
if (access(test_file, R_OK) == -1)
/*
* When the test is run with O=, kselftest copies TEST_FILES
* without preserving the directory structure.
*/
- snprintf(test_file, sizeof(test_file), "%s.c",
- test_case->name);
+ snprintf(test_file, sizeof(test_file), "%s.c", t->file);
/*
* Diff test output and expected test output, contained between
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
@@ -118,33 +104,27 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
"out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'",
test_file, out_file);
err = system(diff_cmd);
- if (CHECK(err,
+ if (CHECK(err, "diff",
"differing test output, output=%s, err=%d, diff cmd:\n%s\n",
out_file, err, diff_cmd))
goto done;
remove(out_file);
- fprintf(stderr, "OK\n");
done:
btf__free(btf);
return err;
}
-int main() {
- int test_case_cnt, i, err, failed = 0;
-
- test_case_cnt = sizeof(btf_dump_test_cases) /
- sizeof(btf_dump_test_cases[0]);
+void test_btf_dump() {
+ int i;
- for (i = 0; i < test_case_cnt; i++) {
- err = test_btf_dump_case(i, &btf_dump_test_cases[i]);
- if (err)
- failed++;
- }
+ for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) {
+ struct btf_dump_test_case *t = &btf_dump_test_cases[i];
- fprintf(stderr, "%d tests succeeded, %d tests failed.\n",
- test_case_cnt - failed, failed);
+ if (!test__start_subtest(t->name))
+ continue;
- return failed;
+ test_btf_dump_case(i, &btf_dump_test_cases[i]);
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index f3863f976a48..05fe85281ff7 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "progs/core_reloc_types.h"
+#include <sys/mman.h>
+#include <sys/syscall.h>
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
@@ -174,6 +176,82 @@
.fails = true, \
}
+#define EXISTENCE_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_existence.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o", \
+ .relaxed_core_relocs = true
+
+#define EXISTENCE_ERR_CASE(name) { \
+ EXISTENCE_CASE_COMMON(name), \
+ .fails = true, \
+}
+
+#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
+ .case_name = test_name_prefix#name, \
+ .bpf_obj_file = objfile, \
+ .btf_src_file = "btf__core_reloc_" #name ".o"
+
+#define BITFIELDS_CASE(name, ...) { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
+ "direct:", name), \
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
+ .input_len = sizeof(struct core_reloc_##name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_bitfields_output), \
+}, { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
+ "probed:", name), \
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
+ .input_len = sizeof(struct core_reloc_##name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_bitfields_output), \
+ .direct_raw_tp = true, \
+}
+
+
+#define BITFIELDS_ERR_CASE(name) { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
+ "probed:", name), \
+ .fails = true, \
+}, { \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
+ "direct:", name), \
+ .direct_raw_tp = true, \
+ .fails = true, \
+}
+
+#define SIZE_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_size.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o", \
+ .relaxed_core_relocs = true
+
+#define SIZE_OUTPUT_DATA(type) \
+ STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
+ .int_sz = sizeof(((type *)0)->int_field), \
+ .struct_sz = sizeof(((type *)0)->struct_field), \
+ .union_sz = sizeof(((type *)0)->union_field), \
+ .arr_sz = sizeof(((type *)0)->arr_field), \
+ .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
+ .ptr_sz = sizeof(((type *)0)->ptr_field), \
+ .enum_sz = sizeof(((type *)0)->enum_field), \
+ }
+
+#define SIZE_CASE(name) { \
+ SIZE_CASE_COMMON(name), \
+ .input_len = 0, \
+ .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \
+ .output_len = sizeof(struct core_reloc_size_output), \
+}
+
+#define SIZE_ERR_CASE(name) { \
+ SIZE_CASE_COMMON(name), \
+ .fails = true, \
+}
+
struct core_reloc_test_case {
const char *case_name;
const char *bpf_obj_file;
@@ -183,6 +261,8 @@ struct core_reloc_test_case {
const char *output;
int output_len;
bool fails;
+ bool relaxed_core_relocs;
+ bool direct_raw_tp;
};
static struct core_reloc_test_case test_cases[] = {
@@ -193,8 +273,12 @@ static struct core_reloc_test_case test_cases[] = {
.btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */
.input = "",
.input_len = 0,
- .output = "\1", /* true */
- .output_len = 1,
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) {
+ .valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
+ .comm = "test_progs",
+ .comm_len = sizeof("test_progs"),
+ },
+ .output_len = sizeof(struct core_reloc_kernel_output),
},
/* validate BPF program can use multiple flavors to match against
@@ -255,12 +339,6 @@ static struct core_reloc_test_case test_cases[] = {
INTS_CASE(ints___bool),
INTS_CASE(ints___reverse_sign),
- INTS_ERR_CASE(ints___err_bitfield),
- INTS_ERR_CASE(ints___err_wrong_sz_8),
- INTS_ERR_CASE(ints___err_wrong_sz_16),
- INTS_ERR_CASE(ints___err_wrong_sz_32),
- INTS_ERR_CASE(ints___err_wrong_sz_64),
-
/* validate edge cases of capturing relocations */
{
.case_name = "misc",
@@ -279,43 +357,155 @@ static struct core_reloc_test_case test_cases[] = {
},
.output_len = sizeof(struct core_reloc_misc_output),
},
+
+ /* validate field existence checks */
+ {
+ EXISTENCE_CASE_COMMON(existence),
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence) {
+ .a = 1,
+ .b = 2,
+ .c = 3,
+ .arr = { 4 },
+ .s = { .x = 5 },
+ },
+ .input_len = sizeof(struct core_reloc_existence),
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
+ .a_exists = 1,
+ .b_exists = 1,
+ .c_exists = 1,
+ .arr_exists = 1,
+ .s_exists = 1,
+ .a_value = 1,
+ .b_value = 2,
+ .c_value = 3,
+ .arr_value = 4,
+ .s_value = 5,
+ },
+ .output_len = sizeof(struct core_reloc_existence_output),
+ },
+ {
+ EXISTENCE_CASE_COMMON(existence___minimal),
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
+ .a = 42,
+ },
+ .input_len = sizeof(struct core_reloc_existence),
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
+ .a_exists = 1,
+ .b_exists = 0,
+ .c_exists = 0,
+ .arr_exists = 0,
+ .s_exists = 0,
+ .a_value = 42,
+ .b_value = 0xff000002u,
+ .c_value = 0xff000003u,
+ .arr_value = 0xff000004u,
+ .s_value = 0xff000005u,
+ },
+ .output_len = sizeof(struct core_reloc_existence_output),
+ },
+
+ EXISTENCE_ERR_CASE(existence__err_int_sz),
+ EXISTENCE_ERR_CASE(existence__err_int_type),
+ EXISTENCE_ERR_CASE(existence__err_int_kind),
+ EXISTENCE_ERR_CASE(existence__err_arr_kind),
+ EXISTENCE_ERR_CASE(existence__err_arr_value_type),
+ EXISTENCE_ERR_CASE(existence__err_struct_type),
+
+ /* bitfield relocation checks */
+ BITFIELDS_CASE(bitfields, {
+ .ub1 = 1,
+ .ub2 = 2,
+ .ub7 = 96,
+ .sb4 = -7,
+ .sb20 = -0x76543,
+ .u32 = 0x80000000,
+ .s32 = -0x76543210,
+ }),
+ BITFIELDS_CASE(bitfields___bit_sz_change, {
+ .ub1 = 6,
+ .ub2 = 0xABCDE,
+ .ub7 = 1,
+ .sb4 = -1,
+ .sb20 = -0x17654321,
+ .u32 = 0xBEEF,
+ .s32 = -0x3FEDCBA987654321,
+ }),
+ BITFIELDS_CASE(bitfields___bitfield_vs_int, {
+ .ub1 = 0xFEDCBA9876543210,
+ .ub2 = 0xA6,
+ .ub7 = -0x7EDCBA987654321,
+ .sb4 = -0x6123456789ABCDE,
+ .sb20 = 0xD00D,
+ .u32 = -0x76543,
+ .s32 = 0x0ADEADBEEFBADB0B,
+ }),
+ BITFIELDS_CASE(bitfields___just_big_enough, {
+ .ub1 = 0xF,
+ .ub2 = 0x0812345678FEDCBA,
+ }),
+ BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
+
+ /* size relocation checks */
+ SIZE_CASE(size),
+ SIZE_CASE(size___diff_sz),
};
struct data {
char in[256];
char out[256];
+ uint64_t my_pid_tgid;
};
+static size_t roundup_page(size_t sz)
+{
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ return (sz + page_size - 1) / page_size * page_size;
+}
+
void test_core_reloc(void)
{
- const char *probe_name = "raw_tracepoint/sys_enter";
+ const size_t mmap_sz = roundup_page(sizeof(struct data));
struct bpf_object_load_attr load_attr = {};
struct core_reloc_test_case *test_case;
+ const char *tp_name, *probe_name;
int err, duration = 0, i, equal;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
- const int zero = 0;
- struct data data;
+ uint64_t my_pid_tgid;
+ struct data *data;
+ void *mmap_data = NULL;
+
+ my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
test_case = &test_cases[i];
-
if (!test__start_subtest(test_case->case_name))
continue;
- obj = bpf_object__open(test_case->bpf_obj_file);
- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
- "failed to open '%s': %ld\n",
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .relaxed_core_relocs = test_case->relaxed_core_relocs,
+ );
+
+ obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
+ if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
test_case->bpf_obj_file, PTR_ERR(obj)))
continue;
+ /* for typed raw tracepoints, NULL should be specified */
+ if (test_case->direct_raw_tp) {
+ probe_name = "tp_btf/sys_enter";
+ tp_name = NULL;
+ } else {
+ probe_name = "raw_tracepoint/sys_enter";
+ tp_name = "sys_enter";
+ }
+
prog = bpf_object__find_program_by_title(obj, probe_name);
if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name))
goto cleanup;
- bpf_program__set_type(prog, BPF_PROG_TYPE_RAW_TRACEPOINT);
load_attr.obj = obj;
load_attr.log_level = 0;
@@ -332,33 +522,32 @@ void test_core_reloc(void)
goto cleanup;
}
- link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
- PTR_ERR(link)))
- goto cleanup;
-
data_map = bpf_object__find_map_by_name(obj, "test_cor.bss");
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto cleanup;
- memset(&data, 0, sizeof(data));
- memcpy(data.in, test_case->input, test_case->input_len);
+ mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bpf_map__fd(data_map), 0);
+ if (CHECK(mmap_data == MAP_FAILED, "mmap",
+ ".bss mmap failed: %d", errno)) {
+ mmap_data = NULL;
+ goto cleanup;
+ }
+ data = mmap_data;
+
+ memset(mmap_data, 0, sizeof(*data));
+ memcpy(data->in, test_case->input, test_case->input_len);
+ data->my_pid_tgid = my_pid_tgid;
- err = bpf_map_update_elem(bpf_map__fd(data_map),
- &zero, &data, 0);
- if (CHECK(err, "update_data_map",
- "failed to update .data map: %d\n", err))
+ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+ PTR_ERR(link)))
goto cleanup;
/* trigger test run */
usleep(1);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &data);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto cleanup;
-
- equal = memcmp(data.out, test_case->output,
+ equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
if (CHECK(!equal, "check_result",
"input/output data don't match\n")) {
@@ -370,12 +559,16 @@ void test_core_reloc(void)
}
for (j = 0; j < test_case->output_len; j++) {
printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n",
- j, test_case->output[j], data.out[j]);
+ j, test_case->output[j], data->out[j]);
}
goto cleanup;
}
cleanup:
+ if (mmap_data) {
+ CHECK_FAIL(munmap(mmap_data, mmap_sz));
+ mmap_data = NULL;
+ }
if (!IS_ERR_OR_NULL(link)) {
bpf_link__destroy(link);
link = NULL;
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
new file mode 100644
index 000000000000..40bcff2cc274
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+void test_fentry_fexit(void)
+{
+ struct bpf_prog_load_attr attr_fentry = {
+ .file = "./fentry_test.o",
+ };
+ struct bpf_prog_load_attr attr_fexit = {
+ .file = "./fexit_test.o",
+ };
+
+ struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj;
+ struct bpf_map *data_map_fentry, *data_map_fexit;
+ char fentry_name[] = "fentry/bpf_fentry_testX";
+ char fexit_name[] = "fexit/bpf_fentry_testX";
+ int err, pkt_fd, kfree_skb_fd, i;
+ struct bpf_link *link[12] = {};
+ struct bpf_program *prog[12];
+ __u32 duration, retval;
+ const int zero = 0;
+ u64 result[12];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+ err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++) {
+ fentry_name[sizeof(fentry_name) - 2] = '1' + i;
+ prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss");
+ if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ for (i = 6; i < 12; i++) {
+ fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6;
+ prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss");
+ if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < 12; i++)
+ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
+ i % 6 + 1, result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < 12; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj_fentry);
+ bpf_object__close(obj_fexit);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
new file mode 100644
index 000000000000..9fb103193878
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+void test_fentry_test(void)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "./fentry_test.o",
+ };
+
+ char prog_name[] = "fentry/bpf_fentry_testX";
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd, kfree_skb_fd, i;
+ struct bpf_link *link[6] = {};
+ struct bpf_program *prog[6];
+ __u32 duration, retval;
+ struct bpf_map *data_map;
+ const int zero = 0;
+ u64 result[6];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++) {
+ prog_name[sizeof(prog_name) - 2] = '1' + i;
+ prog[i] = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++)
+ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
+ i + 1, result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < 6; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
new file mode 100644
index 000000000000..15c7378362dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+#define PROG_CNT 3
+
+void test_fexit_bpf2bpf(void)
+{
+ const char *prog_name[PROG_CNT] = {
+ "fexit/test_pkt_access",
+ "fexit/test_pkt_access_subprog1",
+ "fexit/test_pkt_access_subprog2",
+ };
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd, i;
+ struct bpf_link *link[PROG_CNT] = {};
+ struct bpf_program *prog[PROG_CNT];
+ __u32 duration, retval;
+ struct bpf_map *data_map;
+ const int zero = 0;
+ u64 result[PROG_CNT];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .attach_prog_fd = pkt_fd,
+ );
+
+ obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts);
+ if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
+ "failed to open fexit_bpf2bpf: %ld\n",
+ PTR_ERR(obj)))
+ goto close_prog;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < PROG_CNT; i++) {
+ prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i]))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < PROG_CNT; i++)
+ if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
+ result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < PROG_CNT; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ if (!IS_ERR_OR_NULL(obj))
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
new file mode 100644
index 000000000000..3b9dbf7433f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+/* x86-64 fits 55 JITed and 43 interpreted progs into half page */
+#define CNT 40
+
+void test_fexit_stress(void)
+{
+ char test_skb[128] = {};
+ int fexit_fd[CNT] = {};
+ int link_fd[CNT] = {};
+ __u32 duration = 0;
+ char error[4096];
+ __u32 prog_ret;
+ int err, i, filter_fd;
+
+ const struct bpf_insn trace_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr load_attr = {
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .license = "GPL",
+ .insns = trace_program,
+ .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ .expected_attach_type = BPF_TRACE_FEXIT,
+ };
+
+ const struct bpf_insn skb_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr skb_load_attr = {
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .license = "GPL",
+ .insns = skb_program,
+ .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
+ };
+
+ err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
+ load_attr.expected_attach_type);
+ if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err))
+ goto out;
+ load_attr.attach_btf_id = err;
+
+ for (i = 0; i < CNT; i++) {
+ fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ if (CHECK(fexit_fd[i] < 0, "fexit loaded",
+ "failed: %d errno %d\n", fexit_fd[i], errno))
+ goto out;
+ link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
+ if (CHECK(link_fd[i] < 0, "fexit attach failed",
+ "prog %d failed: %d err %d\n", i, link_fd[i], errno))
+ goto out;
+ }
+
+ filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
+ filter_fd, errno))
+ goto out;
+
+ err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
+ 0, &prog_ret, 0);
+ close(filter_fd);
+ CHECK_FAIL(err);
+out:
+ for (i = 0; i < CNT; i++) {
+ if (link_fd[i])
+ close(link_fd[i]);
+ if (fexit_fd[i])
+ close(fexit_fd[i]);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
new file mode 100644
index 000000000000..f99013222c74
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+
+void test_fexit_test(void)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "./fexit_test.o",
+ };
+
+ char prog_name[] = "fexit/bpf_fentry_testX";
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd, kfree_skb_fd, i;
+ struct bpf_link *link[6] = {};
+ struct bpf_program *prog[6];
+ __u32 duration, retval;
+ struct bpf_map *data_map;
+ const int zero = 0;
+ u64 result[6];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &pkt_obj, &pkt_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+ err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
+ if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++) {
+ prog_name[sizeof(prog_name) - 2] = '1' + i;
+ prog[i] = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
+ goto close_prog;
+ link[i] = bpf_program__attach_trace(prog[i]);
+ if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ goto close_prog;
+ }
+ data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss");
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto close_prog;
+
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ for (i = 0; i < 6; i++)
+ if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
+ i + 1, result[i]))
+ goto close_prog;
+
+close_prog:
+ for (i = 0; i < 6; i++)
+ if (!IS_ERR_OR_NULL(link[i]))
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
new file mode 100644
index 000000000000..1f51ba66b98b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that the flow_dissector program can be updated with a single
+ * syscall by attaching a new program that replaces the existing one.
+ *
+ * Corner case - the same program cannot be attached twice.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+
+#include "test_progs.h"
+
+static bool is_attached(int netns)
+{
+ __u32 cnt;
+ int err;
+
+ err = bpf_prog_query(netns, BPF_FLOW_DISSECTOR, 0, NULL, NULL, &cnt);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_prog_query");
+ return true; /* fail-safe */
+ }
+
+ return cnt > 0;
+}
+
+static int load_prog(void)
+{
+ struct bpf_insn prog[] = {
+ BPF_MOV64_IMM(BPF_REG_0, BPF_OK),
+ BPF_EXIT_INSN(),
+ };
+ int fd;
+
+ fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
+ ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ if (CHECK_FAIL(fd < 0))
+ perror("bpf_load_program");
+
+ return fd;
+}
+
+static void do_flow_dissector_reattach(void)
+{
+ int prog_fd[2] = { -1, -1 };
+ int err;
+
+ prog_fd[0] = load_prog();
+ if (prog_fd[0] < 0)
+ return;
+
+ prog_fd[1] = load_prog();
+ if (prog_fd[1] < 0)
+ goto out_close;
+
+ err = bpf_prog_attach(prog_fd[0], 0, BPF_FLOW_DISSECTOR, 0);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_prog_attach-0");
+ goto out_close;
+ }
+
+ /* Expect success when attaching a different program */
+ err = bpf_prog_attach(prog_fd[1], 0, BPF_FLOW_DISSECTOR, 0);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_prog_attach-1");
+ goto out_detach;
+ }
+
+ /* Expect failure when attaching the same program twice */
+ err = bpf_prog_attach(prog_fd[1], 0, BPF_FLOW_DISSECTOR, 0);
+ if (CHECK_FAIL(!err || errno != EINVAL))
+ perror("bpf_prog_attach-2");
+
+out_detach:
+ err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+ if (CHECK_FAIL(err))
+ perror("bpf_prog_detach");
+
+out_close:
+ close(prog_fd[1]);
+ close(prog_fd[0]);
+}
+
+void test_flow_dissector_reattach(void)
+{
+ int init_net, self_net, err;
+
+ self_net = open("/proc/self/ns/net", O_RDONLY);
+ if (CHECK_FAIL(self_net < 0)) {
+ perror("open(/proc/self/ns/net");
+ return;
+ }
+
+ init_net = open("/proc/1/ns/net", O_RDONLY);
+ if (CHECK_FAIL(init_net < 0)) {
+ perror("open(/proc/1/ns/net)");
+ goto out_close;
+ }
+
+ err = setns(init_net, CLONE_NEWNET);
+ if (CHECK_FAIL(err)) {
+ perror("setns(/proc/1/ns/net)");
+ goto out_close;
+ }
+
+ if (is_attached(init_net)) {
+ test__skip();
+ printf("Can't test with flow dissector attached to init_net\n");
+ goto out_setns;
+ }
+
+ /* First run tests in root network namespace */
+ do_flow_dissector_reattach();
+
+ /* Then repeat tests in a non-root namespace */
+ err = unshare(CLONE_NEWNET);
+ if (CHECK_FAIL(err)) {
+ perror("unshare(CLONE_NEWNET)");
+ goto out_setns;
+ }
+ do_flow_dissector_reattach();
+
+out_setns:
+ /* Move back to netns we started in. */
+ err = setns(self_net, CLONE_NEWNET);
+ if (CHECK_FAIL(err))
+ perror("setns(/proc/self/ns/net)");
+
+out_close:
+ close(init_net);
+ close(self_net);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
new file mode 100644
index 000000000000..7507c8f689bc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+struct meta {
+ int ifindex;
+ __u32 cb32_0;
+ __u8 cb8_0;
+};
+
+static union {
+ __u32 cb32[5];
+ __u8 cb8[20];
+} cb = {
+ .cb32[0] = 0x81828384,
+};
+
+static void on_sample(void *ctx, int cpu, void *data, __u32 size)
+{
+ struct meta *meta = (struct meta *)data;
+ struct ipv6_packet *pkt_v6 = data + sizeof(*meta);
+ int duration = 0;
+
+ if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n",
+ size, 72 + sizeof(*meta)))
+ return;
+ if (CHECK(meta->ifindex != 1, "check_meta_ifindex",
+ "meta->ifindex = %d\n", meta->ifindex))
+ /* spurious kfree_skb not on loopback device */
+ return;
+ if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n",
+ meta->cb8_0, cb.cb8[0]))
+ return;
+ if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0",
+ "cb32_0 %x != %x\n",
+ meta->cb32_0, cb.cb32[0]))
+ return;
+ if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth",
+ "h_proto %x\n", pkt_v6->eth.h_proto))
+ return;
+ if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip",
+ "iph.nexthdr %x\n", pkt_v6->iph.nexthdr))
+ return;
+ if (CHECK(pkt_v6->tcp.doff != 5, "check_tcp",
+ "tcp.doff %x\n", pkt_v6->tcp.doff))
+ return;
+
+ *(bool *)ctx = true;
+}
+
+void test_kfree_skb(void)
+{
+ struct __sk_buff skb = {};
+ struct bpf_prog_test_run_attr tattr = {
+ .data_in = &pkt_v6,
+ .data_size_in = sizeof(pkt_v6),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ };
+ struct bpf_prog_load_attr attr = {
+ .file = "./kfree_skb.o",
+ };
+
+ struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL;
+ struct bpf_map *perf_buf_map, *global_data;
+ struct bpf_program *prog, *fentry, *fexit;
+ struct bpf_object *obj, *obj2 = NULL;
+ struct perf_buffer_opts pb_opts = {};
+ struct perf_buffer *pb = NULL;
+ int err, kfree_skb_fd;
+ bool passed = false;
+ __u32 duration = 0;
+ const int zero = 0;
+ bool test_ok[2];
+
+ err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &tattr.prog_fd);
+ if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
+ return;
+
+ err = bpf_prog_load_xattr(&attr, &obj2, &kfree_skb_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb");
+ if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n"))
+ goto close_prog;
+ fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans");
+ if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n"))
+ goto close_prog;
+ fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans");
+ if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n"))
+ goto close_prog;
+
+ global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss");
+ if (CHECK(!global_data, "find global data", "not found\n"))
+ goto close_prog;
+
+ link = bpf_program__attach_raw_tracepoint(prog, NULL);
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ goto close_prog;
+ link_fentry = bpf_program__attach_trace(fentry);
+ if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n",
+ PTR_ERR(link_fentry)))
+ goto close_prog;
+ link_fexit = bpf_program__attach_trace(fexit);
+ if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n",
+ PTR_ERR(link_fexit)))
+ goto close_prog;
+
+ perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map");
+ if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
+ goto close_prog;
+
+ /* set up perf buffer */
+ pb_opts.sample_cb = on_sample;
+ pb_opts.ctx = &passed;
+ pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
+ if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ goto close_prog;
+
+ memcpy(skb.cb, &cb, sizeof(cb));
+ err = bpf_prog_test_run_xattr(&tattr);
+ duration = tattr.duration;
+ CHECK(err || tattr.retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, tattr.retval, duration);
+
+ /* read perf buffer */
+ err = perf_buffer__poll(pb, 100);
+ if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
+ goto close_prog;
+
+ /* make sure kfree_skb program was triggered
+ * and it sent expected skb into ring buffer
+ */
+ CHECK_FAIL(!passed);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok);
+ if (CHECK(err, "get_result",
+ "failed to get output data: %d\n", err))
+ goto close_prog;
+
+ CHECK_FAIL(!test_ok[0] || !test_ok[1]);
+close_prog:
+ perf_buffer__free(pb);
+ if (!IS_ERR_OR_NULL(link))
+ bpf_link__destroy(link);
+ if (!IS_ERR_OR_NULL(link_fentry))
+ bpf_link__destroy(link_fentry);
+ if (!IS_ERR_OR_NULL(link_fexit))
+ bpf_link__destroy(link_fexit);
+ bpf_object__close(obj);
+ bpf_object__close(obj2);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
new file mode 100644
index 000000000000..051a6d48762c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <sys/mman.h>
+
+struct map_data {
+ __u64 val[512 * 4];
+};
+
+struct bss_data {
+ __u64 in_val;
+ __u64 out_val;
+};
+
+static size_t roundup_page(size_t sz)
+{
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ return (sz + page_size - 1) / page_size * page_size;
+}
+
+void test_mmap(void)
+{
+ const char *file = "test_mmap.o";
+ const char *probe_name = "raw_tracepoint/sys_enter";
+ const char *tp_name = "sys_enter";
+ const size_t bss_sz = roundup_page(sizeof(struct bss_data));
+ const size_t map_sz = roundup_page(sizeof(struct map_data));
+ const int zero = 0, one = 1, two = 2, far = 1500;
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+ int err, duration = 0, i, data_map_fd;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_link *link = NULL;
+ struct bpf_map *data_map, *bss_map;
+ void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
+ volatile struct bss_data *bss_data;
+ volatile struct map_data *map_data;
+ __u64 val = 0;
+
+ obj = bpf_object__open_file("test_mmap.o", NULL);
+ if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
+ file, PTR_ERR(obj)))
+ return;
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name))
+ goto cleanup;
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n",
+ probe_name, err))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss");
+ if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n"))
+ goto cleanup;
+ data_map = bpf_object__find_map_by_name(obj, "data_map");
+ if (CHECK(!data_map, "find_data_map", "data_map map not found\n"))
+ goto cleanup;
+ data_map_fd = bpf_map__fd(data_map);
+
+ bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bpf_map__fd(bss_map), 0);
+ if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
+ ".bss mmap failed: %d\n", errno)) {
+ bss_mmaped = NULL;
+ goto cleanup;
+ }
+ /* map as R/W first */
+ map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ data_map_fd, 0);
+ if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
+ "data_map mmap failed: %d\n", errno)) {
+ map_mmaped = NULL;
+ goto cleanup;
+ }
+
+ bss_data = bss_mmaped;
+ map_data = map_mmaped;
+
+ CHECK_FAIL(bss_data->in_val);
+ CHECK_FAIL(bss_data->out_val);
+ CHECK_FAIL(map_data->val[0]);
+ CHECK_FAIL(map_data->val[1]);
+ CHECK_FAIL(map_data->val[2]);
+ CHECK_FAIL(map_data->val[far]);
+
+ link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+
+ bss_data->in_val = 123;
+ val = 111;
+ CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
+
+ usleep(1);
+
+ CHECK_FAIL(bss_data->in_val != 123);
+ CHECK_FAIL(bss_data->out_val != 123);
+ CHECK_FAIL(map_data->val[0] != 111);
+ CHECK_FAIL(map_data->val[1] != 222);
+ CHECK_FAIL(map_data->val[2] != 123);
+ CHECK_FAIL(map_data->val[far] != 3 * 123);
+
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
+ CHECK_FAIL(val != 111);
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
+ CHECK_FAIL(val != 222);
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
+ CHECK_FAIL(val != 123);
+ CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
+ CHECK_FAIL(val != 3 * 123);
+
+ /* data_map freeze should fail due to R/W mmap() */
+ err = bpf_map_freeze(data_map_fd);
+ if (CHECK(!err || errno != EBUSY, "no_freeze",
+ "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
+ goto cleanup;
+
+ /* unmap R/W mapping */
+ err = munmap(map_mmaped, map_sz);
+ map_mmaped = NULL;
+ if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
+ goto cleanup;
+
+ /* re-map as R/O now */
+ map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
+ if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
+ "data_map R/O mmap failed: %d\n", errno)) {
+ map_mmaped = NULL;
+ goto cleanup;
+ }
+ map_data = map_mmaped;
+
+ /* map/unmap in a loop to test ref counting */
+ for (i = 0; i < 10; i++) {
+ int flags = i % 2 ? PROT_READ : PROT_WRITE;
+ void *p;
+
+ p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
+ if (CHECK_FAIL(p == MAP_FAILED))
+ goto cleanup;
+ err = munmap(p, map_sz);
+ if (CHECK_FAIL(err))
+ goto cleanup;
+ }
+
+ /* data_map freeze should now succeed due to no R/W mapping */
+ err = bpf_map_freeze(data_map_fd);
+ if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
+ err, errno))
+ goto cleanup;
+
+ /* mapping as R/W now should fail */
+ tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ data_map_fd, 0);
+ if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
+ munmap(tmp1, map_sz);
+ goto cleanup;
+ }
+
+ bss_data->in_val = 321;
+ usleep(1);
+ CHECK_FAIL(bss_data->in_val != 321);
+ CHECK_FAIL(bss_data->out_val != 321);
+ CHECK_FAIL(map_data->val[0] != 111);
+ CHECK_FAIL(map_data->val[1] != 222);
+ CHECK_FAIL(map_data->val[2] != 321);
+ CHECK_FAIL(map_data->val[far] != 3 * 321);
+
+ /* check some more advanced mmap() manipulations */
+
+ /* map all but last page: pages 1-3 mapped */
+ tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
+ data_map_fd, 0);
+ if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
+ goto cleanup;
+
+ /* unmap second page: pages 1, 3 mapped */
+ err = munmap(tmp1 + page_size, page_size);
+ if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
+ munmap(tmp1, map_sz);
+ goto cleanup;
+ }
+
+ /* map page 2 back */
+ tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
+ MAP_SHARED | MAP_FIXED, data_map_fd, 0);
+ if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
+ munmap(tmp1, page_size);
+ munmap(tmp1 + 2*page_size, page_size);
+ goto cleanup;
+ }
+ CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
+ "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
+
+ /* re-map all 4 pages */
+ tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
+ data_map_fd, 0);
+ if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
+ munmap(tmp1, 3 * page_size); /* unmap page 1 */
+ goto cleanup;
+ }
+ CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
+
+ map_data = tmp2;
+ CHECK_FAIL(bss_data->in_val != 321);
+ CHECK_FAIL(bss_data->out_val != 321);
+ CHECK_FAIL(map_data->val[0] != 111);
+ CHECK_FAIL(map_data->val[1] != 222);
+ CHECK_FAIL(map_data->val[2] != 321);
+ CHECK_FAIL(map_data->val[far] != 3 * 321);
+
+ munmap(tmp2, 4 * page_size);
+cleanup:
+ if (bss_mmaped)
+ CHECK_FAIL(munmap(bss_mmaped, bss_sz));
+ if (map_mmaped)
+ CHECK_FAIL(munmap(map_mmaped, map_sz));
+ if (!IS_ERR_OR_NULL(link))
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c
new file mode 100644
index 000000000000..041952524c55
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pinning.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <test_progs.h>
+
+__u32 get_map_id(struct bpf_object *obj, const char *name)
+{
+ struct bpf_map_info map_info = {};
+ __u32 map_info_len, duration = 0;
+ struct bpf_map *map;
+ int err;
+
+ map_info_len = sizeof(map_info);
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (CHECK(!map, "find map", "NULL map"))
+ return 0;
+
+ err = bpf_obj_get_info_by_fd(bpf_map__fd(map),
+ &map_info, &map_info_len);
+ CHECK(err, "get map info", "err %d errno %d", err, errno);
+ return map_info.id;
+}
+
+void test_pinning(void)
+{
+ const char *file_invalid = "./test_pinning_invalid.o";
+ const char *custpinpath = "/sys/fs/bpf/custom/pinmap";
+ const char *nopinpath = "/sys/fs/bpf/nopinmap";
+ const char *nopinpath2 = "/sys/fs/bpf/nopinmap2";
+ const char *custpath = "/sys/fs/bpf/custom";
+ const char *pinpath = "/sys/fs/bpf/pinmap";
+ const char *file = "./test_pinning.o";
+ __u32 map_id, map_id2, duration = 0;
+ struct stat statbuf = {};
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .pin_root_path = custpath,
+ );
+
+ /* check that opening fails with invalid pinning value in map def */
+ obj = bpf_object__open_file(file_invalid, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ /* open the valid object file */
+ obj = bpf_object__open_file(file, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that pinmap was pinned */
+ err = stat(pinpath, &statbuf);
+ if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that nopinmap was *not* pinned */
+ err = stat(nopinpath, &statbuf);
+ if (CHECK(!err || errno != ENOENT, "stat nopinpath",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that nopinmap2 was *not* pinned */
+ err = stat(nopinpath2, &statbuf);
+ if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ map_id = get_map_id(obj, "pinmap");
+ if (!map_id)
+ goto out;
+
+ bpf_object__close(obj);
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK_FAIL(libbpf_get_error(obj))) {
+ obj = NULL;
+ goto out;
+ }
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "default load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that same map ID was reused for second load */
+ map_id2 = get_map_id(obj, "pinmap");
+ if (CHECK(map_id != map_id2, "check reuse",
+ "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2))
+ goto out;
+
+ /* should be no-op to re-pin same map */
+ map = bpf_object__find_map_by_name(obj, "pinmap");
+ if (CHECK(!map, "find map", "NULL map"))
+ goto out;
+
+ err = bpf_map__pin(map, NULL);
+ if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* but error to pin at different location */
+ err = bpf_map__pin(map, "/sys/fs/bpf/other");
+ if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* unpin maps with a pin_path set */
+ err = bpf_object__unpin_maps(obj, NULL);
+ if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* and re-pin them... */
+ err = bpf_object__pin_maps(obj, NULL);
+ if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* set pinning path of other map and re-pin all */
+ map = bpf_object__find_map_by_name(obj, "nopinmap");
+ if (CHECK(!map, "find map", "NULL map"))
+ goto out;
+
+ err = bpf_map__set_pin_path(map, custpinpath);
+ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* should only pin the one unpinned map */
+ err = bpf_object__pin_maps(obj, NULL);
+ if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that nopinmap was pinned at the custom path */
+ err = stat(custpinpath, &statbuf);
+ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* remove the custom pin path to re-test it with auto-pinning below */
+ err = unlink(custpinpath);
+ if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ err = rmdir(custpath);
+ if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno))
+ goto out;
+
+ bpf_object__close(obj);
+
+ /* open the valid object file again */
+ obj = bpf_object__open_file(file, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ /* set pin paths so that nopinmap2 will attempt to reuse the map at
+ * pinpath (which will fail), but not before pinmap has already been
+ * reused
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (!strcmp(bpf_map__name(map), "nopinmap"))
+ err = bpf_map__set_pin_path(map, nopinpath2);
+ else if (!strcmp(bpf_map__name(map), "nopinmap2"))
+ err = bpf_map__set_pin_path(map, pinpath);
+ else
+ continue;
+
+ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
+ goto out;
+ }
+
+ /* should fail because of map parameter mismatch */
+ err = bpf_object__load(obj);
+ if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* nopinmap2 should have been pinned and cleaned up again */
+ err = stat(nopinpath2, &statbuf);
+ if (CHECK(!err || errno != ENOENT, "stat nopinpath2",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* pinmap should still be there */
+ err = stat(pinpath, &statbuf);
+ if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ bpf_object__close(obj);
+
+ /* test auto-pinning at custom path with open opt */
+ obj = bpf_object__open_file(file, &opts);
+ if (CHECK_FAIL(libbpf_get_error(obj))) {
+ obj = NULL;
+ goto out;
+ }
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "custom load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* check that pinmap was pinned at the custom path */
+ err = stat(custpinpath, &statbuf);
+ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+out:
+ unlink(pinpath);
+ unlink(nopinpath);
+ unlink(nopinpath2);
+ unlink(custpinpath);
+ rmdir(custpath);
+ if (obj)
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
new file mode 100644
index 000000000000..8a3187dec048
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_probe_user(void)
+{
+#define kprobe_name "__sys_connect"
+ const char *prog_name = "kprobe/" kprobe_name;
+ const char *obj_file = "./test_probe_user.o";
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
+ int err, results_map_fd, sock_fd, duration = 0;
+ struct sockaddr curr, orig, tmp;
+ struct sockaddr_in *in = (struct sockaddr_in *)&curr;
+ struct bpf_link *kprobe_link = NULL;
+ struct bpf_program *kprobe_prog;
+ struct bpf_object *obj;
+ static const int zero = 0;
+
+ obj = bpf_object__open_file(obj_file, &opts);
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+ kprobe_prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!kprobe_prog, "find_probe",
+ "prog '%s' not found\n", prog_name))
+ goto cleanup;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+ goto cleanup;
+
+ results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss");
+ if (CHECK(results_map_fd < 0, "find_bss_map",
+ "err %d\n", results_map_fd))
+ goto cleanup;
+
+ kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false,
+ kprobe_name);
+ if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
+ "err %ld\n", PTR_ERR(kprobe_link))) {
+ kprobe_link = NULL;
+ goto cleanup;
+ }
+
+ memset(&curr, 0, sizeof(curr));
+ in->sin_family = AF_INET;
+ in->sin_port = htons(5555);
+ in->sin_addr.s_addr = inet_addr("255.255.255.255");
+ memcpy(&orig, &curr, sizeof(curr));
+
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd))
+ goto cleanup;
+
+ connect(sock_fd, &curr, sizeof(curr));
+ close(sock_fd);
+
+ err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp);
+ if (CHECK(err, "get_kprobe_res",
+ "failed to get kprobe res: %d\n", err))
+ goto cleanup;
+
+ in = (struct sockaddr_in *)&tmp;
+ if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res",
+ "wrong kprobe res from probe read: %s:%u\n",
+ inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
+ goto cleanup;
+
+ memset(&tmp, 0xab, sizeof(tmp));
+
+ in = (struct sockaddr_in *)&curr;
+ if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res",
+ "wrong kprobe res from probe write: %s:%u\n",
+ inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
+ goto cleanup;
+cleanup:
+ bpf_link__destroy(kprobe_link);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
new file mode 100644
index 000000000000..d90acc13d1ec
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+struct bss {
+ unsigned did_run;
+ unsigned iters;
+ unsigned sum;
+};
+
+struct rdonly_map_subtest {
+ const char *subtest_name;
+ const char *prog_name;
+ unsigned exp_iters;
+ unsigned exp_sum;
+};
+
+void test_rdonly_maps(void)
+{
+ const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop";
+ const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop";
+ const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop";
+ const char *file = "test_rdonly_maps.o";
+ struct rdonly_map_subtest subtests[] = {
+ { "skip loop", prog_name_skip_loop, 0, 0 },
+ { "part loop", prog_name_part_loop, 3, 2 + 3 + 4 },
+ { "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 },
+ };
+ int i, err, zero = 0, duration = 0;
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_map *bss_map;
+ struct bpf_object *obj;
+ struct bss bss;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss");
+ if (CHECK(!bss_map, "find_bss_map", "failed\n"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(subtests); i++) {
+ const struct rdonly_map_subtest *t = &subtests[i];
+
+ if (!test__start_subtest(t->subtest_name))
+ continue;
+
+ prog = bpf_object__find_program_by_title(obj, t->prog_name);
+ if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
+ t->prog_name))
+ goto cleanup;
+
+ memset(&bss, 0, sizeof(bss));
+ err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0);
+ if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err))
+ goto cleanup;
+
+ link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n",
+ t->prog_name, PTR_ERR(link))) {
+ link = NULL;
+ goto cleanup;
+ }
+
+ /* trigger probe */
+ usleep(1);
+
+ bpf_link__destroy(link);
+ link = NULL;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss);
+ if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err))
+ goto cleanup;
+ if (CHECK(bss.did_run == 0, "check_run",
+ "prog '%s' didn't run?\n", t->prog_name))
+ goto cleanup;
+ if (CHECK(bss.iters != t->exp_iters, "check_iters",
+ "prog '%s' iters: %d, expected: %d\n",
+ t->prog_name, bss.iters, t->exp_iters))
+ goto cleanup;
+ if (CHECK(bss.sum != t->exp_sum, "check_sum",
+ "prog '%s' sum: %d, expected: %d\n",
+ t->prog_name, bss.sum, t->exp_sum))
+ goto cleanup;
+ }
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index 5c78e2b5a917..fc0d7f4f02cf 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -3,16 +3,26 @@
void test_reference_tracking(void)
{
- const char *file = "./test_sk_lookup_kern.o";
+ const char *file = "test_sk_lookup_kern.o";
+ const char *obj_name = "ref_track";
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+ .object_name = obj_name,
+ .relaxed_maps = true,
+ );
struct bpf_object *obj;
struct bpf_program *prog;
__u32 duration = 0;
int err = 0;
- obj = bpf_object__open(file);
+ obj = bpf_object__open_file(file, &open_opts);
if (CHECK_FAIL(IS_ERR(obj)))
return;
+ if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
+ "wrong obj name '%s', expected '%s'\n",
+ bpf_object__name(obj), obj_name))
+ goto cleanup;
+
bpf_object__for_each_program(prog, obj) {
const char *title;
@@ -21,7 +31,8 @@ void test_reference_tracking(void)
if (strstr(title, ".text") != NULL)
continue;
- bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+ if (!test__start_subtest(title))
+ continue;
/* Expect verifier failure if test name has 'fail' */
if (strstr(title, "fail") != NULL) {
@@ -35,5 +46,7 @@ void test_reference_tracking(void)
}
CHECK(err, title, "\n");
}
+
+cleanup:
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/test_section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c
index 29833aeaf0de..9d9351dc2ded 100644
--- a/tools/testing/selftests/bpf/test_section_names.c
+++ b/tools/testing/selftests/bpf/prog_tests/section_names.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
+#include <test_progs.h>
-#include <err.h>
-#include <bpf/libbpf.h>
-
-#include "bpf_util.h"
+static int duration = 0;
struct sec_name_test {
const char sec_name[32];
@@ -20,19 +18,23 @@ struct sec_name_test {
};
static struct sec_name_test tests[] = {
- {"InvAliD", {-EINVAL, 0, 0}, {-EINVAL, 0} },
- {"cgroup", {-EINVAL, 0, 0}, {-EINVAL, 0} },
+ {"InvAliD", {-ESRCH, 0, 0}, {-EINVAL, 0} },
+ {"cgroup", {-ESRCH, 0, 0}, {-EINVAL, 0} },
{"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} },
{"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
+ {"uprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
+ {"uretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} },
{"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} },
{"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
+ {"tp/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
{
"raw_tracepoint/",
{0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0},
{-EINVAL, 0},
},
+ {"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} },
{"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} },
{"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
{"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
@@ -146,7 +148,7 @@ static struct sec_name_test tests[] = {
},
};
-static int test_prog_type_by_name(const struct sec_name_test *test)
+static void test_prog_type_by_name(const struct sec_name_test *test)
{
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
@@ -155,79 +157,47 @@ static int test_prog_type_by_name(const struct sec_name_test *test)
rc = libbpf_prog_type_by_name(test->sec_name, &prog_type,
&expected_attach_type);
- if (rc != test->expected_load.rc) {
- warnx("prog: unexpected rc=%d for %s", rc, test->sec_name);
- return -1;
- }
+ CHECK(rc != test->expected_load.rc, "check_code",
+ "prog: unexpected rc=%d for %s", rc, test->sec_name);
if (rc)
- return 0;
-
- if (prog_type != test->expected_load.prog_type) {
- warnx("prog: unexpected prog_type=%d for %s", prog_type,
- test->sec_name);
- return -1;
- }
+ return;
- if (expected_attach_type != test->expected_load.expected_attach_type) {
- warnx("prog: unexpected expected_attach_type=%d for %s",
- expected_attach_type, test->sec_name);
- return -1;
- }
+ CHECK(prog_type != test->expected_load.prog_type, "check_prog_type",
+ "prog: unexpected prog_type=%d for %s",
+ prog_type, test->sec_name);
- return 0;
+ CHECK(expected_attach_type != test->expected_load.expected_attach_type,
+ "check_attach_type", "prog: unexpected expected_attach_type=%d for %s",
+ expected_attach_type, test->sec_name);
}
-static int test_attach_type_by_name(const struct sec_name_test *test)
+static void test_attach_type_by_name(const struct sec_name_test *test)
{
enum bpf_attach_type attach_type;
int rc;
rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
- if (rc != test->expected_attach.rc) {
- warnx("attach: unexpected rc=%d for %s", rc, test->sec_name);
- return -1;
- }
+ CHECK(rc != test->expected_attach.rc, "check_ret",
+ "attach: unexpected rc=%d for %s", rc, test->sec_name);
if (rc)
- return 0;
-
- if (attach_type != test->expected_attach.attach_type) {
- warnx("attach: unexpected attach_type=%d for %s", attach_type,
- test->sec_name);
- return -1;
- }
+ return;
- return 0;
+ CHECK(attach_type != test->expected_attach.attach_type,
+ "check_attach_type", "attach: unexpected attach_type=%d for %s",
+ attach_type, test->sec_name);
}
-static int run_test_case(const struct sec_name_test *test)
+void test_section_names(void)
{
- if (test_prog_type_by_name(test))
- return -1;
- if (test_attach_type_by_name(test))
- return -1;
- return 0;
-}
-
-static int run_tests(void)
-{
- int passes = 0;
- int fails = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
- if (run_test_case(&tests[i]))
- ++fails;
- else
- ++passes;
- }
- printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
- return fails ? -1 : 0;
-}
+ struct sec_name_test *test = &tests[i];
-int main(int argc, char **argv)
-{
- return run_tests();
+ test_prog_type_by_name(test);
+ test_attach_type_by_name(test);
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
index e95baa32e277..a2eb8db8dafb 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -10,6 +10,7 @@ void test_skb_ctx(void)
.cb[3] = 4,
.cb[4] = 5,
.priority = 6,
+ .tstamp = 7,
};
struct bpf_prog_test_run_attr tattr = {
.data_in = &pkt_v4,
@@ -86,4 +87,8 @@ void test_skb_ctx(void)
"ctx_out_priority",
"skb->priority == %d, expected %d\n",
skb.priority, 7);
+ CHECK_ATTR(skb.tstamp != 8,
+ "ctx_out_tstamp",
+ "skb->tstamp == %lld, expected %d\n",
+ skb.tstamp, 8);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
new file mode 100644
index 000000000000..bb8fe646dd9f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+/* test_tailcall_1 checks basic functionality by patching multiple locations
+ * in a single program for a single tail call slot with nop->jmp, jmp->nop
+ * and jmp->jmp rewrites. Also checks for nop->nop.
+ */
+static void test_tailcall_1(void)
+{
+ int err, map_fd, prog_fd, main_fd, i, j;
+ struct bpf_map *prog_array;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+ char buff[128] = {};
+
+ err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != i, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ j = bpf_map__def(prog_array)->max_entries - 1 - i;
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ j = bpf_map__def(prog_array)->max_entries - 1 - i;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != j, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err >= 0 || errno != ENOENT))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 3, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+ }
+
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_2 checks that patching multiple programs for a single
+ * tail call slot works. It also jumps through several programs and tests
+ * the tail call limit counter.
+ */
+static void test_tailcall_2(void)
+{
+ int err, map_fd, prog_fd, main_fd, i;
+ struct bpf_map *prog_array;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+ char buff[128] = {};
+
+ err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 2;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_3 checks that the count value of the tail call limit
+ * enforcement matches with expectations.
+ */
+static void test_tailcall_3(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char buff[128] = {};
+
+ err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val);
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_4 checks that the kernel properly selects indirect jump
+ * for the case where the key is not known. Latter is passed via global
+ * data to select different targets we can compare return value of.
+ */
+static void test_tailcall_4(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ static const int zero = 0;
+ char buff[128] = {};
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != i, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+ }
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 3, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+ }
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
+ * an indirect jump when the keys are const but different from different branches.
+ */
+static void test_tailcall_5(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ static const int zero = 0;
+ char buff[128] = {};
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != i, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+ }
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 3, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+ }
+out:
+ bpf_object__close(obj);
+}
+
+void test_tailcalls(void)
+{
+ if (test__start_subtest("tailcall_1"))
+ test_tailcall_1();
+ if (test__start_subtest("tailcall_2"))
+ test_tailcall_2();
+ if (test__start_subtest("tailcall_3"))
+ test_tailcall_3();
+ if (test__start_subtest("tailcall_4"))
+ test_tailcall_4();
+ if (test__start_subtest("tailcall_5"))
+ test_tailcall_5();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
new file mode 100644
index 000000000000..c32aa28bd93f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019 Facebook */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+
+#define MAX_CNT 100000
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static int test_task_rename(const char *prog)
+{
+ int i, fd, duration = 0, err;
+ char buf[] = "test\n";
+ __u64 start_time;
+
+ fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
+ if (CHECK(fd < 0, "open /proc", "err %d", errno))
+ return -1;
+ start_time = time_get_ns();
+ for (i = 0; i < MAX_CNT; i++) {
+ err = write(fd, buf, sizeof(buf));
+ if (err < 0) {
+ CHECK(err < 0, "task rename", "err %d", errno);
+ close(fd);
+ return -1;
+ }
+ }
+ printf("task_rename %s\t%lluK events per sec\n", prog,
+ MAX_CNT * 1000000ll / (time_get_ns() - start_time));
+ close(fd);
+ return 0;
+}
+
+static void test_run(const char *prog)
+{
+ test_task_rename(prog);
+}
+
+static void setaffinity(void)
+{
+ cpu_set_t cpuset;
+ int cpu = 0;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ sched_setaffinity(0, sizeof(cpuset), &cpuset);
+}
+
+void test_test_overhead(void)
+{
+ const char *kprobe_name = "kprobe/__set_task_comm";
+ const char *kretprobe_name = "kretprobe/__set_task_comm";
+ const char *raw_tp_name = "raw_tp/task_rename";
+ const char *fentry_name = "fentry/__set_task_comm";
+ const char *fexit_name = "fexit/__set_task_comm";
+ const char *kprobe_func = "__set_task_comm";
+ struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
+ struct bpf_program *fentry_prog, *fexit_prog;
+ struct bpf_object *obj;
+ struct bpf_link *link;
+ int err, duration = 0;
+
+ obj = bpf_object__open_file("./test_overhead.o", NULL);
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+ kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
+ if (CHECK(!kprobe_prog, "find_probe",
+ "prog '%s' not found\n", kprobe_name))
+ goto cleanup;
+ kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
+ if (CHECK(!kretprobe_prog, "find_probe",
+ "prog '%s' not found\n", kretprobe_name))
+ goto cleanup;
+ raw_tp_prog = bpf_object__find_program_by_title(obj, raw_tp_name);
+ if (CHECK(!raw_tp_prog, "find_probe",
+ "prog '%s' not found\n", raw_tp_name))
+ goto cleanup;
+ fentry_prog = bpf_object__find_program_by_title(obj, fentry_name);
+ if (CHECK(!fentry_prog, "find_probe",
+ "prog '%s' not found\n", fentry_name))
+ goto cleanup;
+ fexit_prog = bpf_object__find_program_by_title(obj, fexit_name);
+ if (CHECK(!fexit_prog, "find_probe",
+ "prog '%s' not found\n", fexit_name))
+ goto cleanup;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d\n", err))
+ goto cleanup;
+
+ setaffinity();
+
+ /* base line run */
+ test_run("base");
+
+ /* attach kprobe */
+ link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */,
+ kprobe_func);
+ if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+ test_run("kprobe");
+ bpf_link__destroy(link);
+
+ /* attach kretprobe */
+ link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */,
+ kprobe_func);
+ if (CHECK(IS_ERR(link), "attach kretprobe", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+ test_run("kretprobe");
+ bpf_link__destroy(link);
+
+ /* attach raw_tp */
+ link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename");
+ if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+ test_run("raw_tp");
+ bpf_link__destroy(link);
+
+ /* attach fentry */
+ link = bpf_program__attach_trace(fentry_prog);
+ if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+ test_run("fentry");
+ bpf_link__destroy(link);
+
+ /* attach fexit */
+ link = bpf_program__attach_trace(fexit_prog);
+ if (CHECK(IS_ERR(link), "attach fexit", "err %ld\n", PTR_ERR(link)))
+ goto cleanup;
+ test_run("fexit");
+ bpf_link__destroy(link);
+cleanup:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c
new file mode 100644
index 000000000000..f5a7c832d0f2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___err_wrong_val_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c
deleted file mode 100644
index 795a5b729176..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_arrays___err_wrong_val_type1 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c
deleted file mode 100644
index 3af74b837c4d..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_arrays___err_wrong_val_type2 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c
new file mode 100644
index 000000000000..cff6f1836cc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c
new file mode 100644
index 000000000000..a1cd157d5451
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___bit_sz_change x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c
new file mode 100644
index 000000000000..3f2c7b07c456
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___bitfield_vs_int x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c
new file mode 100644
index 000000000000..f9746d6be399
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___err_too_big_bitfield x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c
new file mode 100644
index 000000000000..e7c75a6953dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_bitfields___just_big_enough x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c
new file mode 100644
index 000000000000..0b62315ad46c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
new file mode 100644
index 000000000000..dd0ffa518f36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
new file mode 100644
index 000000000000..bc83372088ad
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
new file mode 100644
index 000000000000..917bec41be08
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___err_wrong_int_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
new file mode 100644
index 000000000000..6ec7e6ec1c91
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___err_wrong_int_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
new file mode 100644
index 000000000000..7bbcacf2b0d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___err_wrong_int_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
new file mode 100644
index 000000000000..f384dd38ec70
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___err_wrong_struct_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c
new file mode 100644
index 000000000000..aec2dec20e90
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___minimal.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___minimal x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c
deleted file mode 100644
index 50369e8320a0..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_bitfield x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c
deleted file mode 100644
index 823bac13d641..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_16 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c
deleted file mode 100644
index b44f3be18535..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_32 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c
deleted file mode 100644
index 9a3dd2099c0f..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_64 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c
deleted file mode 100644
index 9f11ef5f6e88..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_ints___err_wrong_sz_8 x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c
new file mode 100644
index 000000000000..3c80903da5a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c
new file mode 100644
index 000000000000..6dbd14436b52
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size___diff_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
index 3a62119c7498..35c512818a56 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
@@ -62,6 +62,10 @@ struct padded_a_lot {
* long: 64;
* long: 64;
* int b;
+ * long: 32;
+ * long: 64;
+ * long: 64;
+ * long: 64;
*};
*
*/
@@ -95,7 +99,6 @@ struct zone_padding {
struct zone {
int a;
short b;
- short: 16;
struct zone_padding __pad__;
};
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index f686a8138d90..9311489e14b2 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -1,5 +1,14 @@
#include <stdint.h>
#include <stdbool.h>
+/*
+ * KERNEL
+ */
+
+struct core_reloc_kernel_output {
+ int valid[10];
+ char comm[sizeof("test_progs")];
+ int comm_len;
+};
/*
* FLAVORS
@@ -377,14 +386,7 @@ struct core_reloc_arrays___err_non_array {
struct core_reloc_arrays_substruct d[1][2];
};
-struct core_reloc_arrays___err_wrong_val_type1 {
- char a[5]; /* char instead of int */
- char b[2][3][4];
- struct core_reloc_arrays_substruct c[3];
- struct core_reloc_arrays_substruct d[1][2];
-};
-
-struct core_reloc_arrays___err_wrong_val_type2 {
+struct core_reloc_arrays___err_wrong_val_type {
int a[5];
char b[2][3][4];
int c[3]; /* value is not a struct */
@@ -580,67 +582,6 @@ struct core_reloc_ints___bool {
int64_t s64_field;
};
-struct core_reloc_ints___err_bitfield {
- uint8_t u8_field;
- int8_t s8_field;
- uint16_t u16_field;
- int16_t s16_field;
- uint32_t u32_field: 32; /* bitfields are not supported */
- int32_t s32_field;
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_8 {
- uint16_t u8_field; /* not 8-bit anymore */
- int16_t s8_field; /* not 8-bit anymore */
-
- uint16_t u16_field;
- int16_t s16_field;
- uint32_t u32_field;
- int32_t s32_field;
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_16 {
- uint8_t u8_field;
- int8_t s8_field;
-
- uint32_t u16_field; /* not 16-bit anymore */
- int32_t s16_field; /* not 16-bit anymore */
-
- uint32_t u32_field;
- int32_t s32_field;
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_32 {
- uint8_t u8_field;
- int8_t s8_field;
- uint16_t u16_field;
- int16_t s16_field;
-
- uint64_t u32_field; /* not 32-bit anymore */
- int64_t s32_field; /* not 32-bit anymore */
-
- uint64_t u64_field;
- int64_t s64_field;
-};
-
-struct core_reloc_ints___err_wrong_sz_64 {
- uint8_t u8_field;
- int8_t s8_field;
- uint16_t u16_field;
- int16_t s16_field;
- uint32_t u32_field;
- int32_t s32_field;
-
- uint32_t u64_field; /* not 64-bit anymore */
- int32_t s64_field; /* not 64-bit anymore */
-};
-
/*
* MISC
*/
@@ -665,3 +606,162 @@ struct core_reloc_misc_extensible {
int c;
int d;
};
+
+/*
+ * EXISTENCE
+ */
+struct core_reloc_existence_output {
+ int a_exists;
+ int a_value;
+ int b_exists;
+ int b_value;
+ int c_exists;
+ int c_value;
+ int arr_exists;
+ int arr_value;
+ int s_exists;
+ int s_value;
+};
+
+struct core_reloc_existence {
+ int a;
+ struct {
+ int b;
+ };
+ int c;
+ int arr[1];
+ struct {
+ int x;
+ } s;
+};
+
+struct core_reloc_existence___minimal {
+ int a;
+};
+
+struct core_reloc_existence___err_wrong_int_sz {
+ short a;
+};
+
+struct core_reloc_existence___err_wrong_int_type {
+ int b[1];
+};
+
+struct core_reloc_existence___err_wrong_int_kind {
+ struct{ int x; } c;
+};
+
+struct core_reloc_existence___err_wrong_arr_kind {
+ int arr;
+};
+
+struct core_reloc_existence___err_wrong_arr_value_type {
+ short arr[1];
+};
+
+struct core_reloc_existence___err_wrong_struct_type {
+ int s;
+};
+
+/*
+ * BITFIELDS
+ */
+/* bitfield read results, all as plain integers */
+struct core_reloc_bitfields_output {
+ int64_t ub1;
+ int64_t ub2;
+ int64_t ub7;
+ int64_t sb4;
+ int64_t sb20;
+ int64_t u32;
+ int64_t s32;
+};
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+};
+
+/* different bit sizes (both up and down) */
+struct core_reloc_bitfields___bit_sz_change {
+ /* unsigned bitfields */
+ uint16_t ub1: 3; /* 1 -> 3 */
+ uint32_t ub2: 20; /* 2 -> 20 */
+ uint8_t ub7: 1; /* 7 -> 1 */
+ /* signed bitfields */
+ int8_t sb4: 1; /* 4 -> 1 */
+ int32_t sb20: 30; /* 20 -> 30 */
+ /* non-bitfields */
+ uint16_t u32; /* 32 -> 16 */
+ int64_t s32; /* 32 -> 64 */
+};
+
+/* turn bitfield into non-bitfield and vice versa */
+struct core_reloc_bitfields___bitfield_vs_int {
+ uint64_t ub1; /* 3 -> 64 non-bitfield */
+ uint8_t ub2; /* 20 -> 8 non-bitfield */
+ int64_t ub7; /* 7 -> 64 non-bitfield signed */
+ int64_t sb4; /* 4 -> 64 non-bitfield signed */
+ uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
+ int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
+ uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
+};
+
+struct core_reloc_bitfields___just_big_enough {
+ uint64_t ub1: 4;
+ uint64_t ub2: 60; /* packed tightly */
+ uint32_t ub7;
+ uint32_t sb4;
+ uint32_t sb20;
+ uint32_t u32;
+ uint32_t s32;
+} __attribute__((packed)) ;
+
+struct core_reloc_bitfields___err_too_big_bitfield {
+ uint64_t ub1: 4;
+ uint64_t ub2: 61; /* packed tightly */
+ uint32_t ub7;
+ uint32_t sb4;
+ uint32_t sb20;
+ uint32_t u32;
+ uint32_t s32;
+} __attribute__((packed)) ;
+
+/*
+ * SIZE
+ */
+struct core_reloc_size_output {
+ int int_sz;
+ int struct_sz;
+ int union_sz;
+ int arr_sz;
+ int arr_elem_sz;
+ int ptr_sz;
+ int enum_sz;
+};
+
+struct core_reloc_size {
+ int int_field;
+ struct { int x; } struct_field;
+ union { int x; } union_field;
+ int arr_field[4];
+ void *ptr_field;
+ enum { VALUE = 123 } enum_field;
+};
+
+struct core_reloc_size___diff_sz {
+ uint64_t int_field;
+ struct { int x; int y; int z; } struct_field;
+ union { int x; char bla[123]; } union_field;
+ char arr_field[10];
+ void *ptr_field;
+ enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
+};
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
new file mode 100644
index 000000000000..d2af9f039df5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#include "bpf_trace_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+static volatile __u64 test1_result;
+BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a)
+{
+ test1_result = a == 1;
+ return 0;
+}
+
+static volatile __u64 test2_result;
+BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b)
+{
+ test2_result = a == 2 && b == 3;
+ return 0;
+}
+
+static volatile __u64 test3_result;
+BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c)
+{
+ test3_result = a == 4 && b == 5 && c == 6;
+ return 0;
+}
+
+static volatile __u64 test4_result;
+BPF_TRACE_4("fentry/bpf_fentry_test4", test4,
+ void *, a, char, b, int, c, __u64, d)
+{
+ test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10;
+ return 0;
+}
+
+static volatile __u64 test5_result;
+BPF_TRACE_5("fentry/bpf_fentry_test5", test5,
+ __u64, a, void *, b, short, c, int, d, __u64, e)
+{
+ test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
+ e == 15;
+ return 0;
+}
+
+static volatile __u64 test6_result;
+BPF_TRACE_6("fentry/bpf_fentry_test6", test6,
+ __u64, a, void *, b, short, c, int, d, void *, e, __u64, f)
+{
+ test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
new file mode 100644
index 000000000000..525d47d7b589
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#include "bpf_trace_helpers.h"
+
+struct sk_buff {
+ unsigned int len;
+};
+
+static volatile __u64 test_result;
+BPF_TRACE_2("fexit/test_pkt_access", test_main,
+ struct sk_buff *, skb, int, ret)
+{
+ int len;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ }));
+ if (len != 74 || ret != 0)
+ return 0;
+ test_result = 1;
+ return 0;
+}
+
+static volatile __u64 test_result_subprog1;
+BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1,
+ struct sk_buff *, skb, int, ret)
+{
+ int len;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ }));
+ if (len != 74 || ret != 148)
+ return 0;
+ test_result_subprog1 = 1;
+ return 0;
+}
+
+/* Though test_pkt_access_subprog2() is defined in C as:
+ * static __attribute__ ((noinline))
+ * int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
+ * {
+ * return skb->len * val;
+ * }
+ * llvm optimizations remove 'int val' argument and generate BPF assembly:
+ * r0 = *(u32 *)(r1 + 0)
+ * w0 <<= 1
+ * exit
+ * In such case the verifier falls back to conservative and
+ * tracing program can access arguments and return value as u64
+ * instead of accurate types.
+ */
+struct args_subprog2 {
+ __u64 args[5];
+ __u64 ret;
+};
+static volatile __u64 test_result_subprog2;
+SEC("fexit/test_pkt_access_subprog2")
+int test_subprog2(struct args_subprog2 *ctx)
+{
+ struct sk_buff *skb = (void *)ctx->args[0];
+ __u64 ret;
+ int len;
+
+ bpf_probe_read_kernel(&len, sizeof(len),
+ __builtin_preserve_access_index(&skb->len));
+
+ ret = ctx->ret;
+ /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
+ * which randomizes upper 32 bits after BPF_ALU32 insns.
+ * Hence after 'w0 <<= 1' upper bits of $rax are random.
+ * That is expected and correct. Trim them.
+ */
+ ret = (__u32) ret;
+ if (len != 74 || ret != 148)
+ return 0;
+ test_result_subprog2 = 1;
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
new file mode 100644
index 000000000000..2487e98edb34
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#include "bpf_trace_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+static volatile __u64 test1_result;
+BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret)
+{
+ test1_result = a == 1 && ret == 2;
+ return 0;
+}
+
+static volatile __u64 test2_result;
+BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret)
+{
+ test2_result = a == 2 && b == 3 && ret == 5;
+ return 0;
+}
+
+static volatile __u64 test3_result;
+BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret)
+{
+ test3_result = a == 4 && b == 5 && c == 6 && ret == 15;
+ return 0;
+}
+
+static volatile __u64 test4_result;
+BPF_TRACE_5("fexit/bpf_fentry_test4", test4,
+ void *, a, char, b, int, c, __u64, d, int, ret)
+{
+
+ test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10 &&
+ ret == 34;
+ return 0;
+}
+
+static volatile __u64 test5_result;
+BPF_TRACE_6("fexit/bpf_fentry_test5", test5,
+ __u64, a, void *, b, short, c, int, d, __u64, e, int, ret)
+{
+ test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
+ e == 15 && ret == 65;
+ return 0;
+}
+
+static volatile __u64 test6_result;
+BPF_TRACE_7("fexit/bpf_fentry_test6", test6,
+ __u64, a, void *, b, short, c, int, d, void *, e, __u64, f,
+ int, ret)
+{
+ test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && ret == 111;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
new file mode 100644
index 000000000000..974d6f3bb319
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+#include "bpf_trace_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} perf_buf_map SEC(".maps");
+
+#define _(P) (__builtin_preserve_access_index(P))
+
+/* define few struct-s that bpf program needs to access */
+struct callback_head {
+ struct callback_head *next;
+ void (*func)(struct callback_head *head);
+};
+struct dev_ifalias {
+ struct callback_head rcuhead;
+};
+
+struct net_device /* same as kernel's struct net_device */ {
+ int ifindex;
+ struct dev_ifalias *ifalias;
+};
+
+typedef struct {
+ int counter;
+} atomic_t;
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+struct sk_buff {
+ /* field names and sizes should match to those in the kernel */
+ unsigned int len, data_len;
+ __u16 mac_len, hdr_len, queue_mapping;
+ struct net_device *dev;
+ /* order of the fields doesn't matter */
+ refcount_t users;
+ unsigned char *data;
+ char __pkt_type_offset[0];
+ char cb[48];
+};
+
+struct meta {
+ int ifindex;
+ __u32 cb32_0;
+ __u8 cb8_0;
+};
+
+/* TRACE_EVENT(kfree_skb,
+ * TP_PROTO(struct sk_buff *skb, void *location),
+ */
+BPF_TRACE_2("tp_btf/kfree_skb", trace_kfree_skb,
+ struct sk_buff *, skb, void *, location)
+{
+ struct net_device *dev;
+ struct callback_head *ptr;
+ void *func;
+ int users;
+ unsigned char *data;
+ unsigned short pkt_data;
+ struct meta meta = {};
+ char pkt_type;
+ __u32 *cb32;
+ __u8 *cb8;
+
+ __builtin_preserve_access_index(({
+ users = skb->users.refs.counter;
+ data = skb->data;
+ dev = skb->dev;
+ ptr = dev->ifalias->rcuhead.next;
+ func = ptr->func;
+ cb8 = (__u8 *)&skb->cb;
+ cb32 = (__u32 *)&skb->cb;
+ }));
+
+ meta.ifindex = _(dev->ifindex);
+ meta.cb8_0 = cb8[8];
+ meta.cb32_0 = cb32[2];
+
+ bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
+ pkt_type &= 7;
+
+ /* read eth proto */
+ bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12);
+
+ bpf_printk("rcuhead.next %llx func %llx\n", ptr, func);
+ bpf_printk("skb->len %d users %d pkt_type %x\n",
+ _(skb->len), users, pkt_type);
+ bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping));
+ bpf_printk("dev->ifindex %d data %llx pkt_data %x\n",
+ meta.ifindex, data, pkt_data);
+ bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0);
+
+ if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1)
+ /* raw tp ignores return value */
+ return 0;
+
+ /* send first 72 byte of the packet to user space */
+ bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU,
+ &meta, sizeof(meta));
+ return 0;
+}
+
+static volatile struct {
+ bool fentry_test_ok;
+ bool fexit_test_ok;
+} result;
+
+BPF_TRACE_3("fentry/eth_type_trans", fentry_eth_type_trans,
+ struct sk_buff *, skb, struct net_device *, dev,
+ unsigned short, protocol)
+{
+ int len, ifindex;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ ifindex = dev->ifindex;
+ }));
+
+ /* fentry sees full packet including L2 header */
+ if (len != 74 || ifindex != 1)
+ return 0;
+ result.fentry_test_ok = true;
+ return 0;
+}
+
+BPF_TRACE_3("fexit/eth_type_trans", fexit_eth_type_trans,
+ struct sk_buff *, skb, struct net_device *, dev,
+ unsigned short, protocol)
+{
+ int len, ifindex;
+
+ __builtin_preserve_access_index(({
+ len = skb->len;
+ ifindex = dev->ifindex;
+ }));
+
+ /* fexit sees packet without L2 header that eth_type_trans should have
+ * consumed.
+ */
+ if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1)
+ return 0;
+ result.fexit_test_ok = true;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/loop1.c b/tools/testing/selftests/bpf/progs/loop1.c
index 7cdb7f878310..40ac722a9da5 100644
--- a/tools/testing/selftests/bpf/progs/loop1.c
+++ b/tools/testing/selftests/bpf/progs/loop1.c
@@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop2.c b/tools/testing/selftests/bpf/progs/loop2.c
index 9b2f808a2863..bb80f29aa7f7 100644
--- a/tools/testing/selftests/bpf/progs/loop2.c
+++ b/tools/testing/selftests/bpf/progs/loop2.c
@@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c
index d727657d51e2..2b9165a7afe1 100644
--- a/tools/testing/selftests/bpf/progs/loop3.c
+++ b/tools/testing/selftests/bpf/progs/loop3.c
@@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_tracing.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 003fe106fc70..71d383cc9b85 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -72,9 +72,9 @@ static __always_inline void *get_thread_state(void *tls_base, PidData *pidData)
void* thread_state;
int key;
- bpf_probe_read(&key, sizeof(key), (void*)(long)pidData->tls_key_addr);
- bpf_probe_read(&thread_state, sizeof(thread_state),
- tls_base + 0x310 + key * 0x10 + 0x08);
+ bpf_probe_read_user(&key, sizeof(key), (void*)(long)pidData->tls_key_addr);
+ bpf_probe_read_user(&thread_state, sizeof(thread_state),
+ tls_base + 0x310 + key * 0x10 + 0x08);
return thread_state;
}
@@ -82,31 +82,33 @@ static __always_inline bool get_frame_data(void *frame_ptr, PidData *pidData,
FrameData *frame, Symbol *symbol)
{
// read data from PyFrameObject
- bpf_probe_read(&frame->f_back,
- sizeof(frame->f_back),
- frame_ptr + pidData->offsets.PyFrameObject_back);
- bpf_probe_read(&frame->f_code,
- sizeof(frame->f_code),
- frame_ptr + pidData->offsets.PyFrameObject_code);
+ bpf_probe_read_user(&frame->f_back,
+ sizeof(frame->f_back),
+ frame_ptr + pidData->offsets.PyFrameObject_back);
+ bpf_probe_read_user(&frame->f_code,
+ sizeof(frame->f_code),
+ frame_ptr + pidData->offsets.PyFrameObject_code);
// read data from PyCodeObject
if (!frame->f_code)
return false;
- bpf_probe_read(&frame->co_filename,
- sizeof(frame->co_filename),
- frame->f_code + pidData->offsets.PyCodeObject_filename);
- bpf_probe_read(&frame->co_name,
- sizeof(frame->co_name),
- frame->f_code + pidData->offsets.PyCodeObject_name);
+ bpf_probe_read_user(&frame->co_filename,
+ sizeof(frame->co_filename),
+ frame->f_code + pidData->offsets.PyCodeObject_filename);
+ bpf_probe_read_user(&frame->co_name,
+ sizeof(frame->co_name),
+ frame->f_code + pidData->offsets.PyCodeObject_name);
// read actual names into symbol
if (frame->co_filename)
- bpf_probe_read_str(&symbol->file,
- sizeof(symbol->file),
- frame->co_filename + pidData->offsets.String_data);
+ bpf_probe_read_user_str(&symbol->file,
+ sizeof(symbol->file),
+ frame->co_filename +
+ pidData->offsets.String_data);
if (frame->co_name)
- bpf_probe_read_str(&symbol->name,
- sizeof(symbol->name),
- frame->co_name + pidData->offsets.String_data);
+ bpf_probe_read_user_str(&symbol->name,
+ sizeof(symbol->name),
+ frame->co_name +
+ pidData->offsets.String_data);
return true;
}
@@ -174,9 +176,9 @@ static __always_inline int __on_event(struct pt_regs *ctx)
event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0);
void* thread_state_current = (void*)0;
- bpf_probe_read(&thread_state_current,
- sizeof(thread_state_current),
- (void*)(long)pidData->current_state_addr);
+ bpf_probe_read_user(&thread_state_current,
+ sizeof(thread_state_current),
+ (void*)(long)pidData->current_state_addr);
struct task_struct* task = (struct task_struct*)bpf_get_current_task();
void* tls_base = (void*)task;
@@ -188,11 +190,13 @@ static __always_inline int __on_event(struct pt_regs *ctx)
if (pidData->use_tls) {
uint64_t pthread_created;
uint64_t pthread_self;
- bpf_probe_read(&pthread_self, sizeof(pthread_self), tls_base + 0x10);
+ bpf_probe_read_user(&pthread_self, sizeof(pthread_self),
+ tls_base + 0x10);
- bpf_probe_read(&pthread_created,
- sizeof(pthread_created),
- thread_state + pidData->offsets.PyThreadState_thread);
+ bpf_probe_read_user(&pthread_created,
+ sizeof(pthread_created),
+ thread_state +
+ pidData->offsets.PyThreadState_thread);
event->pthread_match = pthread_created == pthread_self;
} else {
event->pthread_match = 1;
@@ -204,9 +208,10 @@ static __always_inline int __on_event(struct pt_regs *ctx)
Symbol sym = {};
int cur_cpu = bpf_get_smp_processor_id();
- bpf_probe_read(&frame_ptr,
- sizeof(frame_ptr),
- thread_state + pidData->offsets.PyThreadState_frame);
+ bpf_probe_read_user(&frame_ptr,
+ sizeof(frame_ptr),
+ thread_state +
+ pidData->offsets.PyThreadState_frame);
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL)
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index 9a3d1c79e6fe..1bafbb944e37 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -14,13 +14,12 @@ struct sockopt_sk {
__u8 val;
};
-struct bpf_map_def SEC("maps") socket_storage_map = {
- .type = BPF_MAP_TYPE_SK_STORAGE,
- .key_size = sizeof(int),
- .value_size = sizeof(struct sockopt_sk),
- .map_flags = BPF_F_NO_PREALLOC,
-};
-BPF_ANNOTATE_KV_PAIR(socket_storage_map, int, struct sockopt_sk);
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct sockopt_sk);
+} socket_storage_map SEC(".maps");
SEC("cgroup/getsockopt")
int _getsockopt(struct bpf_sockopt *ctx)
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index 067eb625d01c..4bf16e0a1b0e 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -98,7 +98,7 @@ struct strobe_map_raw {
/*
* having volatile doesn't change anything on BPF side, but clang
* emits warnings for passing `volatile const char *` into
- * bpf_probe_read_str that expects just `const char *`
+ * bpf_probe_read_user_str that expects just `const char *`
*/
const char* tag;
/*
@@ -309,18 +309,18 @@ static __always_inline void *calc_location(struct strobe_value_loc *loc,
dtv_t *dtv;
void *tls_ptr;
- bpf_probe_read(&tls_index, sizeof(struct tls_index),
- (void *)loc->offset);
+ bpf_probe_read_user(&tls_index, sizeof(struct tls_index),
+ (void *)loc->offset);
/* valid module index is always positive */
if (tls_index.module > 0) {
/* dtv = ((struct tcbhead *)tls_base)->dtv[tls_index.module] */
- bpf_probe_read(&dtv, sizeof(dtv),
- &((struct tcbhead *)tls_base)->dtv);
+ bpf_probe_read_user(&dtv, sizeof(dtv),
+ &((struct tcbhead *)tls_base)->dtv);
dtv += tls_index.module;
} else {
dtv = NULL;
}
- bpf_probe_read(&tls_ptr, sizeof(void *), dtv);
+ bpf_probe_read_user(&tls_ptr, sizeof(void *), dtv);
/* if pointer has (void *)-1 value, then TLS wasn't initialized yet */
return tls_ptr && tls_ptr != (void *)-1
? tls_ptr + tls_index.offset
@@ -336,7 +336,7 @@ static __always_inline void read_int_var(struct strobemeta_cfg *cfg,
if (!location)
return;
- bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
data->int_vals[idx] = value->val;
if (value->header.len)
data->int_vals_set_mask |= (1 << idx);
@@ -356,13 +356,13 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
if (!location)
return 0;
- bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, value->ptr);
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
/*
- * if bpf_probe_read_str returns error (<0), due to casting to
+ * if bpf_probe_read_user_str returns error (<0), due to casting to
* unsinged int, it will become big number, so next check is
* sufficient to check for errors AND prove to BPF verifier, that
- * bpf_probe_read_str won't return anything bigger than
+ * bpf_probe_read_user_str won't return anything bigger than
* STROBE_MAX_STR_LEN
*/
if (len > STROBE_MAX_STR_LEN)
@@ -391,8 +391,8 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
if (!location)
return payload;
- bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
- if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr))
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
+ if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
return payload;
descr->id = map.id;
@@ -402,7 +402,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
data->req_meta_valid = 1;
}
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
if (len <= STROBE_MAX_STR_LEN) {
descr->tag_len = len;
payload += len;
@@ -418,15 +418,15 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
break;
descr->key_lens[i] = 0;
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
- map.entries[i].key);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ map.entries[i].key);
if (len <= STROBE_MAX_STR_LEN) {
descr->key_lens[i] = len;
payload += len;
}
descr->val_lens[i] = 0;
- len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
- map.entries[i].val);
+ len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ map.entries[i].val);
if (len <= STROBE_MAX_STR_LEN) {
descr->val_lens[i] = len;
payload += len;
diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c
new file mode 100644
index 000000000000..63531e1a9fa4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall1.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+#define TAIL_FUNC(x) \
+ SEC("classifier/" #x) \
+ int bpf_func_##x(struct __sk_buff *skb) \
+ { \
+ return x; \
+ }
+TAIL_FUNC(0)
+TAIL_FUNC(1)
+TAIL_FUNC(2)
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ /* Multiple locations to make sure we patch
+ * all of them.
+ */
+ bpf_tail_call(skb, &jmp_table, 0);
+ bpf_tail_call(skb, &jmp_table, 0);
+ bpf_tail_call(skb, &jmp_table, 0);
+ bpf_tail_call(skb, &jmp_table, 0);
+
+ bpf_tail_call(skb, &jmp_table, 1);
+ bpf_tail_call(skb, &jmp_table, 1);
+ bpf_tail_call(skb, &jmp_table, 1);
+ bpf_tail_call(skb, &jmp_table, 1);
+
+ bpf_tail_call(skb, &jmp_table, 2);
+ bpf_tail_call(skb, &jmp_table, 2);
+ bpf_tail_call(skb, &jmp_table, 2);
+ bpf_tail_call(skb, &jmp_table, 2);
+
+ return 3;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c
new file mode 100644
index 000000000000..21c85c477210
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall2.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 5);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 1);
+ return 0;
+}
+
+SEC("classifier/1")
+int bpf_func_1(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 2);
+ return 1;
+}
+
+SEC("classifier/2")
+int bpf_func_2(struct __sk_buff *skb)
+{
+ return 2;
+}
+
+SEC("classifier/3")
+int bpf_func_3(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 4);
+ return 3;
+}
+
+SEC("classifier/4")
+int bpf_func_4(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 3);
+ return 4;
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 0);
+ /* Check multi-prog update. */
+ bpf_tail_call(skb, &jmp_table, 2);
+ /* Check tail call limit. */
+ bpf_tail_call(skb, &jmp_table, 3);
+ return 3;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
new file mode 100644
index 000000000000..1ecae198b8c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static volatile int count;
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ count++;
+ bpf_tail_call(skb, &jmp_table, 0);
+ return 1;
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 0);
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
new file mode 100644
index 000000000000..499388758119
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static volatile int selector;
+
+#define TAIL_FUNC(x) \
+ SEC("classifier/" #x) \
+ int bpf_func_##x(struct __sk_buff *skb) \
+ { \
+ return x; \
+ }
+TAIL_FUNC(0)
+TAIL_FUNC(1)
+TAIL_FUNC(2)
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, selector);
+ return 3;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
new file mode 100644
index 000000000000..49c64eb53f19
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static volatile int selector;
+
+#define TAIL_FUNC(x) \
+ SEC("classifier/" #x) \
+ int bpf_func_##x(struct __sk_buff *skb) \
+ { \
+ return x; \
+ }
+TAIL_FUNC(0)
+TAIL_FUNC(1)
+TAIL_FUNC(2)
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ int idx = 0;
+
+ if (selector == 1234)
+ idx = 1;
+ else if (selector == 5678)
+ idx = 2;
+
+ bpf_tail_call(skb, &jmp_table, idx);
+ return 3;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tcp_rtt.c b/tools/testing/selftests/bpf/progs/tcp_rtt.c
index 233bdcb1659e..2cf813a06b83 100644
--- a/tools/testing/selftests/bpf/progs/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/progs/tcp_rtt.c
@@ -13,13 +13,12 @@ struct tcp_rtt_storage {
__u32 icsk_retransmits;
};
-struct bpf_map_def SEC("maps") socket_storage_map = {
- .type = BPF_MAP_TYPE_SK_STORAGE,
- .key_size = sizeof(int),
- .value_size = sizeof(struct tcp_rtt_storage),
- .map_flags = BPF_F_NO_PREALLOC,
-};
-BPF_ANNOTATE_KV_PAIR(socket_storage_map, int, struct tcp_rtt_storage);
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tcp_rtt_storage);
+} socket_storage_map SEC(".maps");
SEC("sockops")
int _sockops(struct bpf_sock_ops *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 63a8dfef893b..534621e38906 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -49,4 +49,3 @@ int handle_uprobe_return(struct pt_regs *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
index e5c79fe0ffdb..62ad7e22105e 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
int _version SEC("version") = 1;
@@ -25,7 +26,7 @@ struct dummy_tracepoint_args {
};
__attribute__((noinline))
-static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -43,7 +44,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg)
}
__attribute__((noinline))
-static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
index 5ee3622ddebb..fb8d91a1dbe0 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
+#include "bpf_legacy.h"
int _version SEC("version") = 1;
@@ -33,7 +34,7 @@ struct dummy_tracepoint_args {
};
__attribute__((noinline))
-static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -56,7 +57,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg)
}
__attribute__((noinline))
-static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 434188c37774..3f4422044759 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
@@ -23,7 +23,7 @@ struct dummy_tracepoint_args {
};
__attribute__((noinline))
-static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -41,7 +41,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg)
}
__attribute__((noinline))
-static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
index bf67f0fdf743..89951b684282 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_arrays_output {
int a2;
@@ -31,6 +32,8 @@ struct core_reloc_arrays {
struct core_reloc_arrays_substruct d[1][2];
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_arrays(void *ctx)
{
@@ -38,16 +41,16 @@ int test_core_arrays(void *ctx)
struct core_reloc_arrays_output *out = (void *)&data.out;
/* in->a[2] */
- if (BPF_CORE_READ(&out->a2, &in->a[2]))
+ if (CORE_READ(&out->a2, &in->a[2]))
return 1;
/* in->b[1][2][3] */
- if (BPF_CORE_READ(&out->b123, &in->b[1][2][3]))
+ if (CORE_READ(&out->b123, &in->b[1][2][3]))
return 1;
/* in->c[1].c */
- if (BPF_CORE_READ(&out->c1c, &in->c[1].c))
+ if (CORE_READ(&out->c1c, &in->c[1].c))
return 1;
/* in->d[0][0].d */
- if (BPF_CORE_READ(&out->d00d, &in->d[0][0].d))
+ if (CORE_READ(&out->d00d, &in->d[0][0].d))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
new file mode 100644
index 000000000000..edc0f7c9e56d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+} data = {};
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+};
+
+/* bitfield read results, all as plain integers */
+struct core_reloc_bitfields_output {
+ int64_t ub1;
+ int64_t ub2;
+ int64_t ub7;
+ int64_t sb4;
+ int64_t sb20;
+ int64_t u32;
+ int64_t s32;
+};
+
+struct pt_regs;
+
+struct trace_sys_enter {
+ struct pt_regs *regs;
+ long id;
+};
+
+SEC("tp_btf/sys_enter")
+int test_core_bitfields_direct(void *ctx)
+{
+ struct core_reloc_bitfields *in = (void *)&data.in;
+ struct core_reloc_bitfields_output *out = (void *)&data.out;
+
+ out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1);
+ out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2);
+ out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7);
+ out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4);
+ out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20);
+ out->u32 = BPF_CORE_READ_BITFIELD(in, u32);
+ out->s32 = BPF_CORE_READ_BITFIELD(in, s32);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
new file mode 100644
index 000000000000..6c20e433558b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+} data = {};
+
+struct core_reloc_bitfields {
+ /* unsigned bitfields */
+ uint8_t ub1: 1;
+ uint8_t ub2: 2;
+ uint32_t ub7: 7;
+ /* signed bitfields */
+ int8_t sb4: 4;
+ int32_t sb20: 20;
+ /* non-bitfields */
+ uint32_t u32;
+ int32_t s32;
+};
+
+/* bitfield read results, all as plain integers */
+struct core_reloc_bitfields_output {
+ int64_t ub1;
+ int64_t ub2;
+ int64_t ub7;
+ int64_t sb4;
+ int64_t sb20;
+ int64_t u32;
+ int64_t s32;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_bitfields(void *ctx)
+{
+ struct core_reloc_bitfields *in = (void *)&data.in;
+ struct core_reloc_bitfields_output *out = (void *)&data.out;
+ uint64_t res;
+
+ out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1);
+ out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2);
+ out->ub7 = BPF_CORE_READ_BITFIELD_PROBED(in, ub7);
+ out->sb4 = BPF_CORE_READ_BITFIELD_PROBED(in, sb4);
+ out->sb20 = BPF_CORE_READ_BITFIELD_PROBED(in, sb20);
+ out->u32 = BPF_CORE_READ_BITFIELD_PROBED(in, u32);
+ out->s32 = BPF_CORE_READ_BITFIELD_PROBED(in, s32);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
new file mode 100644
index 000000000000..1b7f0ae49cfb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+} data = {};
+
+struct core_reloc_existence_output {
+ int a_exists;
+ int a_value;
+ int b_exists;
+ int b_value;
+ int c_exists;
+ int c_value;
+ int arr_exists;
+ int arr_value;
+ int s_exists;
+ int s_value;
+};
+
+struct core_reloc_existence {
+ struct {
+ int x;
+ } s;
+ int arr[1];
+ int a;
+ struct {
+ int b;
+ };
+ int c;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_existence(void *ctx)
+{
+ struct core_reloc_existence *in = (void *)&data.in;
+ struct core_reloc_existence_output *out = (void *)&data.out;
+
+ out->a_exists = bpf_core_field_exists(in->a);
+ if (bpf_core_field_exists(in->a))
+ out->a_value = BPF_CORE_READ(in, a);
+ else
+ out->a_value = 0xff000001u;
+
+ out->b_exists = bpf_core_field_exists(in->b);
+ if (bpf_core_field_exists(in->b))
+ out->b_value = BPF_CORE_READ(in, b);
+ else
+ out->b_value = 0xff000002u;
+
+ out->c_exists = bpf_core_field_exists(in->c);
+ if (bpf_core_field_exists(in->c))
+ out->c_value = BPF_CORE_READ(in, c);
+ else
+ out->c_value = 0xff000003u;
+
+ out->arr_exists = bpf_core_field_exists(in->arr);
+ if (bpf_core_field_exists(in->arr))
+ out->arr_value = BPF_CORE_READ(in, arr[0]);
+ else
+ out->arr_value = 0xff000004u;
+
+ out->s_exists = bpf_core_field_exists(in->s);
+ if (bpf_core_field_exists(in->s))
+ out->s_value = BPF_CORE_READ(in, s.x);
+ else
+ out->s_value = 0xff000005u;
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
index 9fda73e87972..b5dbeef540fd 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_flavors {
int a;
@@ -39,6 +40,8 @@ struct core_reloc_flavors___weird {
};
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_flavors(void *ctx)
{
@@ -48,13 +51,13 @@ int test_core_flavors(void *ctx)
struct core_reloc_flavors *out = (void *)&data.out;
/* read a using weird layout */
- if (BPF_CORE_READ(&out->a, &in_weird->a))
+ if (CORE_READ(&out->a, &in_weird->a))
return 1;
/* read b using reversed layout */
- if (BPF_CORE_READ(&out->b, &in_rev->b))
+ if (CORE_READ(&out->b, &in_rev->b))
return 1;
/* read c using original layout */
- if (BPF_CORE_READ(&out->c, &in_orig->c))
+ if (CORE_READ(&out->c, &in_orig->c))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
index d99233c8008a..c78ab6d28a14 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ints.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_ints {
uint8_t u8_field;
@@ -23,20 +24,22 @@ struct core_reloc_ints {
int64_t s64_field;
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_ints(void *ctx)
{
struct core_reloc_ints *in = (void *)&data.in;
struct core_reloc_ints *out = (void *)&data.out;
- if (BPF_CORE_READ(&out->u8_field, &in->u8_field) ||
- BPF_CORE_READ(&out->s8_field, &in->s8_field) ||
- BPF_CORE_READ(&out->u16_field, &in->u16_field) ||
- BPF_CORE_READ(&out->s16_field, &in->s16_field) ||
- BPF_CORE_READ(&out->u32_field, &in->u32_field) ||
- BPF_CORE_READ(&out->s32_field, &in->s32_field) ||
- BPF_CORE_READ(&out->u64_field, &in->u64_field) ||
- BPF_CORE_READ(&out->s64_field, &in->s64_field))
+ if (CORE_READ(&out->u8_field, &in->u8_field) ||
+ CORE_READ(&out->s8_field, &in->s8_field) ||
+ CORE_READ(&out->u16_field, &in->u16_field) ||
+ CORE_READ(&out->s16_field, &in->s16_field) ||
+ CORE_READ(&out->u32_field, &in->u32_field) ||
+ CORE_READ(&out->s32_field, &in->s32_field) ||
+ CORE_READ(&out->u64_field, &in->u64_field) ||
+ CORE_READ(&out->s64_field, &in->s64_field))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
index 37e02aa3f0c8..270de441b60a 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
@@ -4,32 +4,92 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+ uint64_t my_pid_tgid;
+} data = {};
+
+struct core_reloc_kernel_output {
+ int valid[10];
+ /* we have test_progs[-flavor], so cut flavor part */
+ char comm[sizeof("test_progs")];
+ int comm_len;
+};
struct task_struct {
int pid;
int tgid;
+ char comm[16];
+ struct task_struct *group_leader;
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_kernel(void *ctx)
{
struct task_struct *task = (void *)bpf_get_current_task();
+ struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid();
+ uint32_t real_tgid = (uint32_t)pid_tgid;
int pid, tgid;
- if (BPF_CORE_READ(&pid, &task->pid) ||
- BPF_CORE_READ(&tgid, &task->tgid))
+ if (data.my_pid_tgid != pid_tgid)
+ return 0;
+
+ if (CORE_READ(&pid, &task->pid) ||
+ CORE_READ(&tgid, &task->tgid))
return 1;
/* validate pid + tgid matches */
- data.out[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
+ out->valid[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
+
+ /* test variadic BPF_CORE_READ macros */
+ out->valid[1] = BPF_CORE_READ(task,
+ tgid) == real_tgid;
+ out->valid[2] = BPF_CORE_READ(task,
+ group_leader,
+ tgid) == real_tgid;
+ out->valid[3] = BPF_CORE_READ(task,
+ group_leader, group_leader,
+ tgid) == real_tgid;
+ out->valid[4] = BPF_CORE_READ(task,
+ group_leader, group_leader, group_leader,
+ tgid) == real_tgid;
+ out->valid[5] = BPF_CORE_READ(task,
+ group_leader, group_leader, group_leader,
+ group_leader,
+ tgid) == real_tgid;
+ out->valid[6] = BPF_CORE_READ(task,
+ group_leader, group_leader, group_leader,
+ group_leader, group_leader,
+ tgid) == real_tgid;
+ out->valid[7] = BPF_CORE_READ(task,
+ group_leader, group_leader, group_leader,
+ group_leader, group_leader, group_leader,
+ tgid) == real_tgid;
+ out->valid[8] = BPF_CORE_READ(task,
+ group_leader, group_leader, group_leader,
+ group_leader, group_leader, group_leader,
+ group_leader,
+ tgid) == real_tgid;
+ out->valid[9] = BPF_CORE_READ(task,
+ group_leader, group_leader, group_leader,
+ group_leader, group_leader, group_leader,
+ group_leader, group_leader,
+ tgid) == real_tgid;
+
+ /* test BPF_CORE_READ_STR_INTO() returns correct code and contents */
+ out->comm_len = BPF_CORE_READ_STR_INTO(
+ &out->comm, task,
+ group_leader, group_leader, group_leader, group_leader,
+ group_leader, group_leader, group_leader, group_leader,
+ comm);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
index c59984bd3e23..292a5c4ee76a 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_misc.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_misc_output {
int a, b, c;
@@ -32,6 +33,8 @@ struct core_reloc_misc_extensible {
int b;
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_misc(void *ctx)
{
@@ -41,15 +44,15 @@ int test_core_misc(void *ctx)
struct core_reloc_misc_output *out = (void *)&data.out;
/* record two different relocations with the same accessor string */
- if (BPF_CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
- BPF_CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
+ if (CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
+ CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
return 1;
/* Validate relocations capture array-only accesses for structs with
* fixed header, but with potentially extendable tail. This will read
* first 4 bytes of 2nd element of in_ext array of potentially
* variably sized struct core_reloc_misc_extensible. */
- if (BPF_CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
+ if (CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
index f98b942c062b..0b28bfacc8fd 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_mods_output {
int a, b, c, d, e, f, g, h;
@@ -41,20 +42,22 @@ struct core_reloc_mods {
core_reloc_mods_substruct_t h;
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_mods(void *ctx)
{
struct core_reloc_mods *in = (void *)&data.in;
struct core_reloc_mods_output *out = (void *)&data.out;
- if (BPF_CORE_READ(&out->a, &in->a) ||
- BPF_CORE_READ(&out->b, &in->b) ||
- BPF_CORE_READ(&out->c, &in->c) ||
- BPF_CORE_READ(&out->d, &in->d) ||
- BPF_CORE_READ(&out->e, &in->e[2]) ||
- BPF_CORE_READ(&out->f, &in->f[1]) ||
- BPF_CORE_READ(&out->g, &in->g.x) ||
- BPF_CORE_READ(&out->h, &in->h.y))
+ if (CORE_READ(&out->a, &in->a) ||
+ CORE_READ(&out->b, &in->b) ||
+ CORE_READ(&out->c, &in->c) ||
+ CORE_READ(&out->d, &in->d) ||
+ CORE_READ(&out->e, &in->e[2]) ||
+ CORE_READ(&out->f, &in->f[1]) ||
+ CORE_READ(&out->g, &in->g.x) ||
+ CORE_READ(&out->h, &in->h.y))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
index 3ca30cec2b39..39279bf0c9db 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_nesting.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_nesting_substruct {
int a;
@@ -30,15 +31,17 @@ struct core_reloc_nesting {
} b;
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_nesting(void *ctx)
{
struct core_reloc_nesting *in = (void *)&data.in;
struct core_reloc_nesting *out = (void *)&data.out;
- if (BPF_CORE_READ(&out->a.a.a, &in->a.a.a))
+ if (CORE_READ(&out->a.a.a, &in->a.a.a))
return 1;
- if (BPF_CORE_READ(&out->b.b.b, &in->b.b.b))
+ if (CORE_READ(&out->b.b.b, &in->b.b.b))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
index add52f23ab35..ea57973cdd19 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c
@@ -4,13 +4,14 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
enum core_reloc_primitives_enum {
A = 0,
@@ -25,17 +26,19 @@ struct core_reloc_primitives {
int (*f)(const char *);
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_primitives(void *ctx)
{
struct core_reloc_primitives *in = (void *)&data.in;
struct core_reloc_primitives *out = (void *)&data.out;
- if (BPF_CORE_READ(&out->a, &in->a) ||
- BPF_CORE_READ(&out->b, &in->b) ||
- BPF_CORE_READ(&out->c, &in->c) ||
- BPF_CORE_READ(&out->d, &in->d) ||
- BPF_CORE_READ(&out->f, &in->f))
+ if (CORE_READ(&out->a, &in->a) ||
+ CORE_READ(&out->b, &in->b) ||
+ CORE_READ(&out->c, &in->c) ||
+ CORE_READ(&out->d, &in->d) ||
+ CORE_READ(&out->f, &in->f))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
index 526b7ddc7ea1..d1eb59d4ea64 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_ptr_as_arr.c
@@ -4,25 +4,28 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
+#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
-static volatile struct data {
+struct {
char in[256];
char out[256];
-} data;
+} data = {};
struct core_reloc_ptr_as_arr {
int a;
};
+#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+
SEC("raw_tracepoint/sys_enter")
int test_core_ptr_as_arr(void *ctx)
{
struct core_reloc_ptr_as_arr *in = (void *)&data.in;
struct core_reloc_ptr_as_arr *out = (void *)&data.out;
- if (BPF_CORE_READ(&out->a, &in[2].a))
+ if (CORE_READ(&out->a, &in[2].a))
return 1;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
new file mode 100644
index 000000000000..9e091124d3bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+#include "bpf_core_read.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+} data = {};
+
+struct core_reloc_size_output {
+ int int_sz;
+ int struct_sz;
+ int union_sz;
+ int arr_sz;
+ int arr_elem_sz;
+ int ptr_sz;
+ int enum_sz;
+};
+
+struct core_reloc_size {
+ int int_field;
+ struct { int x; } struct_field;
+ union { int x; } union_field;
+ int arr_field[4];
+ void *ptr_field;
+ enum { VALUE = 123 } enum_field;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_size(void *ctx)
+{
+ struct core_reloc_size *in = (void *)&data.in;
+ struct core_reloc_size_output *out = (void *)&data.out;
+
+ out->int_sz = bpf_core_field_size(in->int_field);
+ out->struct_sz = bpf_core_field_size(in->struct_field);
+ out->union_sz = bpf_core_field_size(in->union_field);
+ out->arr_sz = bpf_core_field_size(in->arr_field);
+ out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
+ out->ptr_sz = bpf_core_field_size(in->ptr_field);
+ out->enum_sz = bpf_core_field_size(in->enum_field);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
index f8ffa3f3d44b..6a4a8f57f174 100644
--- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
@@ -47,12 +47,11 @@ struct {
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
-typedef __u64 raw_stack_trace_t[2 * MAX_STACK_RAWTP];
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
- __type(value, raw_stack_trace_t);
+ __type(value, __u64[2 * MAX_STACK_RAWTP]);
} rawdata_map SEC(".maps");
SEC("raw_tracepoint/sys_enter")
@@ -100,4 +99,3 @@ int bpf_prog1(void *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c
new file mode 100644
index 000000000000..0d2ec9fbcf61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_mmap.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 512 * 4); /* at least 4 pages of data */
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __type(key, __u32);
+ __type(value, __u64);
+} data_map SEC(".maps");
+
+static volatile __u64 in_val;
+static volatile __u64 out_val;
+
+SEC("raw_tracepoint/sys_enter")
+int test_mmap(void *ctx)
+{
+ int zero = 0, one = 1, two = 2, far = 1500;
+ __u64 val, *p;
+
+ out_val = in_val;
+
+ /* data_map[2] = in_val; */
+ bpf_map_update_elem(&data_map, &two, (const void *)&in_val, 0);
+
+ /* data_map[1] = data_map[0] * 2; */
+ p = bpf_map_lookup_elem(&data_map, &zero);
+ if (p) {
+ val = (*p) * 2;
+ bpf_map_update_elem(&data_map, &one, &val, 0);
+ }
+
+ /* data_map[far] = in_val * 3; */
+ val = in_val * 3;
+ bpf_map_update_elem(&data_map, &far, &val, 0);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
new file mode 100644
index 000000000000..96c0124a04ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#include "bpf_tracing.h"
+#include "bpf_trace_helpers.h"
+
+SEC("kprobe/__set_task_comm")
+int prog1(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kretprobe/__set_task_comm")
+int prog2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("raw_tp/task_rename")
+int prog3(struct bpf_raw_tracepoint_args *ctx)
+{
+ return 0;
+}
+
+struct task_struct;
+BPF_TRACE_3("fentry/__set_task_comm", prog4,
+ struct task_struct *, tsk, const char *, buf, __u8, exec)
+{
+ return 0;
+}
+
+BPF_TRACE_3("fexit/__set_task_comm", prog5,
+ struct task_struct *, tsk, const char *, buf, __u8, exec)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index 876c27deb65a..07c09ca5546a 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -22,4 +22,3 @@ int handle_sys_nanosleep_entry(struct pt_regs *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c
new file mode 100644
index 000000000000..f20e7e00373f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pinning.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} pinmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} nopinmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_NONE);
+} nopinmap2 SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
new file mode 100644
index 000000000000..51b38abe7ba1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(pinning, 2); /* invalid */
+} nopinmap3 SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 7cf42d14103f..3a7b4b607ed3 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -17,8 +17,38 @@
#define barrier() __asm__ __volatile__("": : :"memory")
int _version SEC("version") = 1;
-SEC("test1")
-int process(struct __sk_buff *skb)
+/* llvm will optimize both subprograms into exactly the same BPF assembly
+ *
+ * Disassembly of section .text:
+ *
+ * 0000000000000000 test_pkt_access_subprog1:
+ * ; return skb->len * 2;
+ * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
+ * 1: 64 00 00 00 01 00 00 00 w0 <<= 1
+ * 2: 95 00 00 00 00 00 00 00 exit
+ *
+ * 0000000000000018 test_pkt_access_subprog2:
+ * ; return skb->len * val;
+ * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0)
+ * 4: 64 00 00 00 01 00 00 00 w0 <<= 1
+ * 5: 95 00 00 00 00 00 00 00 exit
+ *
+ * Which makes it an interesting test for BTF-enabled verifier.
+ */
+static __attribute__ ((noinline))
+int test_pkt_access_subprog1(volatile struct __sk_buff *skb)
+{
+ return skb->len * 2;
+}
+
+static __attribute__ ((noinline))
+int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
+{
+ return skb->len * val;
+}
+
+SEC("classifier/test_pkt_access")
+int test_pkt_access(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
@@ -48,6 +78,10 @@ int process(struct __sk_buff *skb)
tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len);
}
+ if (test_pkt_access_subprog1(skb) != skb->len * 2)
+ return TC_ACT_SHOT;
+ if (test_pkt_access_subprog2(2, skb) != skb->len * 2)
+ return TC_ACT_SHOT;
if (tcp) {
if (((void *)(tcp) + 20) > data_end || proto != 6)
return TC_ACT_SHOT;
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
new file mode 100644
index 000000000000..1871e2ece0c4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+
+#include <netinet/in.h>
+
+#include "bpf_helpers.h"
+#include "bpf_tracing.h"
+
+static struct sockaddr_in old;
+
+SEC("kprobe/__sys_connect")
+int handle_sys_connect(struct pt_regs *ctx)
+{
+ void *ptr = (void *)PT_REGS_PARM2(ctx);
+ struct sockaddr_in new;
+
+ bpf_probe_read_user(&old, sizeof(old), ptr);
+ __builtin_memset(&new, 0xab, sizeof(new));
+ bpf_probe_write_user(ptr, &new, sizeof(new));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
index 0e014d3b2b36..0e014d3b2b36 100644
--- a/tools/testing/selftests/bpf/test_queue_stack_map.h
+++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
new file mode 100644
index 000000000000..52d94e8b214d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+static volatile const struct {
+ unsigned a[4];
+ /*
+ * if the struct's size is multiple of 16, compiler will put it into
+ * .rodata.cst16 section, which is not recognized by libbpf; work
+ * around this by ensuring we don't have 16-aligned struct
+ */
+ char _y;
+} rdonly_values = { .a = {2, 3, 4, 5} };
+
+static volatile struct {
+ unsigned did_run;
+ unsigned iters;
+ unsigned sum;
+} res;
+
+SEC("raw_tracepoint/sys_enter:skip_loop")
+int skip_loop(struct pt_regs *ctx)
+{
+ /* prevent compiler to optimize everything out */
+ unsigned * volatile p = (void *)&rdonly_values.a;
+ unsigned iters = 0, sum = 0;
+
+ /* we should never enter this loop */
+ while (*p & 1) {
+ iters++;
+ sum += *p;
+ p++;
+ }
+ res.did_run = 1;
+ res.iters = iters;
+ res.sum = sum;
+ return 0;
+}
+
+SEC("raw_tracepoint/sys_enter:part_loop")
+int part_loop(struct pt_regs *ctx)
+{
+ /* prevent compiler to optimize everything out */
+ unsigned * volatile p = (void *)&rdonly_values.a;
+ unsigned iters = 0, sum = 0;
+
+ /* validate verifier can derive loop termination */
+ while (*p < 5) {
+ iters++;
+ sum += *p;
+ p++;
+ }
+ res.did_run = 1;
+ res.iters = iters;
+ res.sum = sum;
+ return 0;
+}
+
+SEC("raw_tracepoint/sys_enter:full_loop")
+int full_loop(struct pt_regs *ctx)
+{
+ /* prevent compiler to optimize everything out */
+ unsigned * volatile p = (void *)&rdonly_values.a;
+ int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
+ unsigned iters = 0, sum = 0;
+
+ /* validate verifier can allow full loop as well */
+ while (i > 0 ) {
+ iters++;
+ sum += *p;
+ p++;
+ i--;
+ }
+ res.did_run = 1;
+ res.iters = iters;
+ res.sum = sum;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
index c4d104428643..69880c1e7700 100644
--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -132,8 +132,10 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
+ // workaround: define induction variable "i" as "long" instead
+ // of "int" to prevent alu32 sub-register spilling.
#pragma clang loop unroll(disable)
- for (int i = 0; i < 100; i++) {
+ for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
if (cur_off == *tlv_off)
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e21cd736c196..cb49ccb707d1 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -53,7 +53,7 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("sk_lookup_success")
+SEC("classifier/sk_lookup_success")
int bpf_sk_lookup_test0(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
@@ -78,7 +78,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("sk_lookup_success_simple")
+SEC("classifier/sk_lookup_success_simple")
int bpf_sk_lookup_test1(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -90,7 +90,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
return 0;
}
-SEC("fail_use_after_free")
+SEC("classifier/fail_use_after_free")
int bpf_sk_lookup_uaf(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -105,7 +105,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
return family;
}
-SEC("fail_modify_sk_pointer")
+SEC("classifier/fail_modify_sk_pointer")
int bpf_sk_lookup_modptr(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -120,7 +120,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
return 0;
}
-SEC("fail_modify_sk_or_null_pointer")
+SEC("classifier/fail_modify_sk_or_null_pointer")
int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -134,7 +134,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
return 0;
}
-SEC("fail_no_release")
+SEC("classifier/fail_no_release")
int bpf_sk_lookup_test2(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -143,7 +143,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
return 0;
}
-SEC("fail_release_twice")
+SEC("classifier/fail_release_twice")
int bpf_sk_lookup_test3(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -155,7 +155,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
return 0;
}
-SEC("fail_release_unchecked")
+SEC("classifier/fail_release_unchecked")
int bpf_sk_lookup_test4(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -172,7 +172,7 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("fail_no_release_subcall")
+SEC("classifier/fail_no_release_subcall")
int bpf_sk_lookup_test5(struct __sk_buff *skb)
{
lookup_no_release(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index 7a80960d7df1..2a9f4c736ebc 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -16,6 +16,7 @@ int process(struct __sk_buff *skb)
skb->cb[i]++;
}
skb->priority++;
+ skb->tstamp++;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index fa0be3e10a10..3b7e1dca8829 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
@@ -74,4 +74,3 @@ int oncpu(struct sched_switch_args *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 608a06871572..d22e438198cf 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -44,7 +44,10 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
- int ret;
+ /* a workaround to prevent compiler from generating
+ * codes verifier cannot handle yet.
+ */
+ volatile int ret;
if (ctx->write)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index c8c595da38d4..87b7d934ce73 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
@@ -38,7 +38,7 @@
#include <sys/socket.h>
#include "bpf_helpers.h"
-#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;})
#define TCP_ESTATS_MAGIC 0xBAADBEEF
/* This test case needs "sock" and "pt_regs" data structure.
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index 4ba5a34bff56..ac349a5cea7e 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -1,18 +1,6 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-ERROR=0
-TMPDIR=
-
-# If one build fails, continue but return non-0 on exit.
-return_value() {
- if [ -d "$TMPDIR" ] ; then
- rm -rf -- $TMPDIR
- fi
- exit $ERROR
-}
-trap return_value EXIT
-
case $1 in
-h|--help)
echo -e "$0 [-j <n>]"
@@ -20,7 +8,7 @@ case $1 in
echo -e ""
echo -e "\tOptions:"
echo -e "\t\t-j <n>:\tPass -j flag to 'make'."
- exit
+ exit 0
;;
esac
@@ -32,6 +20,22 @@ SCRIPT_REL_PATH=$(realpath --relative-to=$PWD $0)
SCRIPT_REL_DIR=$(dirname $SCRIPT_REL_PATH)
KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../)
cd $KDIR_ROOT_DIR
+if [ ! -e tools/bpf/bpftool/Makefile ]; then
+ echo -e "skip: bpftool files not found!\n"
+ exit 0
+fi
+
+ERROR=0
+TMPDIR=
+
+# If one build fails, continue but return non-0 on exit.
+return_value() {
+ if [ -d "$TMPDIR" ] ; then
+ rm -rf -- $TMPDIR
+ fi
+ exit $ERROR
+}
+trap return_value EXIT
check() {
local dir=$(realpath $1)
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
index e2d06191bd35..a8485ae103d1 100755
--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
+++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
@@ -18,19 +18,55 @@ fi
# this is the case and run it with in_netns.sh if it is being run in the root
# namespace.
if [[ -z $(ip netns identify $$) ]]; then
+ err=0
+ if bpftool="$(which bpftool)"; then
+ echo "Testing global flow dissector..."
+
+ $bpftool prog loadall ./bpf_flow.o /sys/fs/bpf/flow \
+ type flow_dissector
+
+ if ! unshare --net $bpftool prog attach pinned \
+ /sys/fs/bpf/flow/flow_dissector flow_dissector; then
+ echo "Unexpected unsuccessful attach in namespace" >&2
+ err=1
+ fi
+
+ $bpftool prog attach pinned /sys/fs/bpf/flow/flow_dissector \
+ flow_dissector
+
+ if unshare --net $bpftool prog attach pinned \
+ /sys/fs/bpf/flow/flow_dissector flow_dissector; then
+ echo "Unexpected successful attach in namespace" >&2
+ err=1
+ fi
+
+ if ! $bpftool prog detach pinned \
+ /sys/fs/bpf/flow/flow_dissector flow_dissector; then
+ echo "Failed to detach flow dissector" >&2
+ err=1
+ fi
+
+ rm -rf /sys/fs/bpf/flow
+ else
+ echo "Skipping root flow dissector test, bpftool not found" >&2
+ fi
+
+ # Run the rest of the tests in a net namespace.
../net/in_netns.sh "$0" "$@"
- exit $?
-fi
+ err=$(( $err + $? ))
-# Determine selftest success via shell exit code
-exit_handler()
-{
- if (( $? == 0 )); then
+ if (( $err == 0 )); then
echo "selftests: $TESTNAME [PASS]";
else
echo "selftests: $TESTNAME [FAILED]";
fi
+ exit $err
+fi
+
+# Determine selftest success via shell exit code
+exit_handler()
+{
set +e
# Cleanup
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
deleted file mode 100755
index 2989b2e2d856..000000000000
--- a/tools/testing/selftests/bpf/test_libbpf.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-export TESTNAME=test_libbpf
-
-# Determine selftest success via shell exit code
-exit_handler()
-{
- if [ $? -eq 0 ]; then
- echo "selftests: $TESTNAME [PASS]";
- else
- echo "$TESTNAME: failed at file $LAST_LOADED" 1>&2
- echo "selftests: $TESTNAME [FAILED]";
- fi
-}
-
-libbpf_open_file()
-{
- LAST_LOADED=$1
- if [ -n "$VERBOSE" ]; then
- ./test_libbpf_open $1
- else
- ./test_libbpf_open --quiet $1
- fi
-}
-
-# Exit script immediately (well catched by trap handler) if any
-# program/thing exits with a non-zero status.
-set -e
-
-# (Use 'trap -l' to list meaning of numbers)
-trap exit_handler 0 2 3 6 9
-
-libbpf_open_file test_l4lb.o
-
-# Load a program with BPF-to-BPF calls
-libbpf_open_file test_l4lb_noinline.o
-
-# Load a program compiled without the "-target bpf" flag
-libbpf_open_file test_xdp.o
-
-# Success
-exit 0
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
deleted file mode 100644
index 9e9db202d218..000000000000
--- a/tools/testing/selftests/bpf/test_libbpf_open.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
- */
-static const char *__doc__ =
- "Libbpf test program for loading BPF ELF object files";
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdarg.h>
-#include <bpf/libbpf.h>
-#include <getopt.h>
-
-#include "bpf_rlimit.h"
-
-static const struct option long_options[] = {
- {"help", no_argument, NULL, 'h' },
- {"debug", no_argument, NULL, 'D' },
- {"quiet", no_argument, NULL, 'q' },
- {0, 0, NULL, 0 }
-};
-
-static void usage(char *argv[])
-{
- int i;
-
- printf("\nDOCUMENTATION:\n%s\n\n", __doc__);
- printf(" Usage: %s (options-see-below) BPF_FILE\n", argv[0]);
- printf(" Listing options:\n");
- for (i = 0; long_options[i].name != 0; i++) {
- printf(" --%-12s", long_options[i].name);
- printf(" short-option: -%c",
- long_options[i].val);
- printf("\n");
- }
- printf("\n");
-}
-
-static bool debug = 0;
-static int libbpf_debug_print(enum libbpf_print_level level,
- const char *fmt, va_list args)
-{
- if (level == LIBBPF_DEBUG && !debug)
- return 0;
-
- fprintf(stderr, "[%d] ", level);
- return vfprintf(stderr, fmt, args);
-}
-
-#define EXIT_FAIL_LIBBPF EXIT_FAILURE
-#define EXIT_FAIL_OPTION 2
-
-int test_walk_progs(struct bpf_object *obj, bool verbose)
-{
- struct bpf_program *prog;
- int cnt = 0;
-
- bpf_object__for_each_program(prog, obj) {
- cnt++;
- if (verbose)
- printf("Prog (count:%d) section_name: %s\n", cnt,
- bpf_program__title(prog, false));
- }
- return 0;
-}
-
-int test_walk_maps(struct bpf_object *obj, bool verbose)
-{
- struct bpf_map *map;
- int cnt = 0;
-
- bpf_object__for_each_map(map, obj) {
- cnt++;
- if (verbose)
- printf("Map (count:%d) name: %s\n", cnt,
- bpf_map__name(map));
- }
- return 0;
-}
-
-int test_open_file(char *filename, bool verbose)
-{
- struct bpf_object *bpfobj = NULL;
- long err;
-
- if (verbose)
- printf("Open BPF ELF-file with libbpf: %s\n", filename);
-
- /* Load BPF ELF object file and check for errors */
- bpfobj = bpf_object__open(filename);
- err = libbpf_get_error(bpfobj);
- if (err) {
- char err_buf[128];
- libbpf_strerror(err, err_buf, sizeof(err_buf));
- if (verbose)
- printf("Unable to load eBPF objects in file '%s': %s\n",
- filename, err_buf);
- return EXIT_FAIL_LIBBPF;
- }
- test_walk_progs(bpfobj, verbose);
- test_walk_maps(bpfobj, verbose);
-
- if (verbose)
- printf("Close BPF ELF-file with libbpf: %s\n",
- bpf_object__name(bpfobj));
- bpf_object__close(bpfobj);
-
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- char filename[1024] = { 0 };
- bool verbose = 1;
- int longindex = 0;
- int opt;
-
- libbpf_set_print(libbpf_debug_print);
-
- /* Parse commands line args */
- while ((opt = getopt_long(argc, argv, "hDq",
- long_options, &longindex)) != -1) {
- switch (opt) {
- case 'D':
- debug = 1;
- break;
- case 'q': /* Use in scripting mode */
- verbose = 0;
- break;
- case 'h':
- default:
- usage(argv);
- return EXIT_FAIL_OPTION;
- }
- }
- if (optind >= argc) {
- usage(argv);
- printf("ERROR: Expected BPF_FILE argument after options\n");
- return EXIT_FAIL_OPTION;
- }
- snprintf(filename, sizeof(filename), "%s", argv[optind]);
-
- return test_open_file(filename, verbose);
-}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index e1f1becda529..02eae1e864c2 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1142,7 +1142,6 @@ out_sockmap:
#define MAPINMAP_PROG "./test_map_in_map.o"
static void test_map_in_map(void)
{
- struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_map *map;
int mim_fd, fd, err;
@@ -1179,9 +1178,6 @@ static void test_map_in_map(void)
goto out_map_in_map;
}
- bpf_object__for_each_program(prog, obj) {
- bpf_program__set_xdp(prog);
- }
bpf_object__load(obj);
map = bpf_object__find_map_by_name(obj, "mim_array");
@@ -1717,9 +1713,9 @@ static void run_all_tests(void)
test_map_in_map();
}
-#define DECLARE
+#define DEFINE_TEST(name) extern void test_##name(void);
#include <map_tests/tests.h>
-#undef DECLARE
+#undef DEFINE_TEST
int main(void)
{
@@ -1731,9 +1727,9 @@ int main(void)
map_flags = BPF_F_NO_PREALLOC;
run_all_tests();
-#define CALL
+#define DEFINE_TEST(name) test_##name();
#include <map_tests/tests.h>
-#undef CALL
+#undef DEFINE_TEST
printf("test_maps: OK, %d SKIPPED\n", skips);
return 0;
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index 1afa22c88e42..8294ae3ffb3c 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -335,13 +335,22 @@ class NetdevSimDev:
"""
Class for netdevsim bus device and its attributes.
"""
+ @staticmethod
+ def ctrl_write(path, val):
+ fullpath = os.path.join("/sys/bus/netdevsim/", path)
+ try:
+ with open(fullpath, "w") as f:
+ f.write(val)
+ except OSError as e:
+ log("WRITE %s: %r" % (fullpath, val), -e.errno)
+ raise e
+ log("WRITE %s: %r" % (fullpath, val), 0)
def __init__(self, port_count=1):
addr = 0
while True:
try:
- with open("/sys/bus/netdevsim/new_device", "w") as f:
- f.write("%u %u" % (addr, port_count))
+ self.ctrl_write("new_device", "%u %u" % (addr, port_count))
except OSError as e:
if e.errno == errno.ENOSPC:
addr += 1
@@ -403,14 +412,13 @@ class NetdevSimDev:
return progs
def remove(self):
- with open("/sys/bus/netdevsim/del_device", "w") as f:
- f.write("%u" % self.addr)
+ self.ctrl_write("del_device", "%u" % (self.addr, ))
devs.remove(self)
def remove_nsim(self, nsim):
self.nsims.remove(nsim)
- with open("/sys/bus/netdevsim/devices/netdevsim%u/del_port" % self.addr ,"w") as f:
- f.write("%u" % nsim.port_index)
+ self.ctrl_write("devices/netdevsim%u/del_port" % (self.addr, ),
+ "%u" % (nsim.port_index, ))
class NetdevSim:
"""
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index af75a1c7a458..7fa7d08a8104 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -20,7 +20,7 @@ struct prog_test_def {
bool tested;
bool need_cgroup_cleanup;
- const char *subtest_name;
+ char *subtest_name;
int subtest_num;
/* store counts before subtest started */
@@ -45,7 +45,7 @@ static void dump_test_log(const struct prog_test_def *test, bool failed)
fflush(stdout); /* exports env.log_buf & env.log_cnt */
- if (env.verbose || test->force_log || failed) {
+ if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
if (env.log_cnt) {
env.log_buf[env.log_cnt] = '\0';
fprintf(env.stdout, "%s", env.log_buf);
@@ -81,16 +81,17 @@ void test__end_subtest()
fprintf(env.stdout, "#%d/%d %s:%s\n",
test->test_num, test->subtest_num,
test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
+
+ free(test->subtest_name);
+ test->subtest_name = NULL;
}
bool test__start_subtest(const char *name)
{
struct prog_test_def *test = env.test;
- if (test->subtest_name) {
+ if (test->subtest_name)
test__end_subtest();
- test->subtest_name = NULL;
- }
test->subtest_num++;
@@ -104,7 +105,13 @@ bool test__start_subtest(const char *name)
if (!should_run(&env.subtest_selector, test->subtest_num, name))
return false;
- test->subtest_name = name;
+ test->subtest_name = strdup(name);
+ if (!test->subtest_name) {
+ fprintf(env.stderr,
+ "Subtest #%d: failed to copy subtest name!\n",
+ test->subtest_num);
+ return false;
+ }
env.test->old_error_cnt = env.test->error_cnt;
return true;
@@ -306,7 +313,7 @@ void *spin_lock_thread(void *arg)
}
/* extern declarations for test funcs */
-#define DEFINE_TEST(name) extern void test_##name();
+#define DEFINE_TEST(name) extern void test_##name(void);
#include <prog_tests/tests.h>
#undef DEFINE_TEST
@@ -339,14 +346,14 @@ static const struct argp_option opts[] = {
{ "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
"Output verifier statistics", },
{ "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
- "Verbose output (use -vv for extra verbose output)" },
+ "Verbose output (use -vv or -vvv for progressively verbose output)" },
{},
};
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
- if (!env.very_verbose && level == LIBBPF_DEBUG)
+ if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0;
vprintf(format, args);
return 0;
@@ -412,6 +419,8 @@ int parse_num_list(const char *s, struct test_selector *sel)
return 0;
}
+extern int extra_prog_load_log_flags;
+
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
struct test_env *env = state->input;
@@ -453,9 +462,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
env->verifier_stats = true;
break;
case ARG_VERBOSE:
+ env->verbosity = VERBOSE_NORMAL;
if (arg) {
if (strcmp(arg, "v") == 0) {
- env->very_verbose = true;
+ env->verbosity = VERBOSE_VERY;
+ extra_prog_load_log_flags = 1;
+ } else if (strcmp(arg, "vv") == 0) {
+ env->verbosity = VERBOSE_SUPER;
+ extra_prog_load_log_flags = 2;
} else {
fprintf(stderr,
"Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
@@ -463,7 +477,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return -EINVAL;
}
}
- env->verbose = true;
break;
case ARGP_KEY_ARG:
argp_usage(state);
@@ -482,7 +495,7 @@ static void stdio_hijack(void)
env.stdout = stdout;
env.stderr = stderr;
- if (env.verbose) {
+ if (env.verbosity > VERBOSE_NONE) {
/* nothing to do, output to stdout by default */
return;
}
@@ -518,6 +531,33 @@ static void stdio_restore(void)
#endif
}
+/*
+ * Determine if test_progs is running as a "flavored" test runner and switch
+ * into corresponding sub-directory to load correct BPF objects.
+ *
+ * This is done by looking at executable name. If it contains "-flavor"
+ * suffix, then we are running as a flavored test runner.
+ */
+int cd_flavor_subdir(const char *exec_name)
+{
+ /* General form of argv[0] passed here is:
+ * some/path/to/test_progs[-flavor], where -flavor part is optional.
+ * First cut out "test_progs[-flavor]" part, then extract "flavor"
+ * part, if it's there.
+ */
+ const char *flavor = strrchr(exec_name, '/');
+
+ if (!flavor)
+ return 0;
+ flavor++;
+ flavor = strrchr(flavor, '-');
+ if (!flavor)
+ return 0;
+ flavor++;
+ printf("Switching to flavor '%s' subdirectory...\n", flavor);
+ return chdir(flavor);
+}
+
int main(int argc, char **argv)
{
static const struct argp argp = {
@@ -531,6 +571,10 @@ int main(int argc, char **argv)
if (err)
return err;
+ err = cd_flavor_subdir(argv[0]);
+ if (err)
+ return err;
+
libbpf_set_print(libbpf_print_fn);
srand(time(NULL));
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 0c48f64f732b..8477df835979 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -39,6 +39,13 @@ typedef __u16 __sum16;
#include "trace_helpers.h"
#include "flow_dissector_load.h"
+enum verbosity {
+ VERBOSE_NONE,
+ VERBOSE_NORMAL,
+ VERBOSE_VERY,
+ VERBOSE_SUPER,
+};
+
struct test_selector {
const char *name;
bool *num_set;
@@ -49,8 +56,7 @@ struct test_env {
struct test_selector test_selector;
struct test_selector subtest_selector;
bool verifier_stats;
- bool verbose;
- bool very_verbose;
+ enum verbosity verbosity;
bool jit_enabled;
diff --git a/tools/testing/selftests/bpf/test_stub.c b/tools/testing/selftests/bpf/test_stub.c
index 84e81a89e2f9..47e132726203 100644
--- a/tools/testing/selftests/bpf/test_stub.c
+++ b/tools/testing/selftests/bpf/test_stub.c
@@ -5,6 +5,8 @@
#include <bpf/libbpf.h>
#include <string.h>
+int extra_prog_load_log_flags = 0;
+
int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
@@ -15,6 +17,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
attr.prog_type = type;
attr.expected_attach_type = 0;
attr.prog_flags = BPF_F_TEST_RND_HI32;
+ attr.log_level = extra_prog_load_log_flags;
return bpf_prog_load_xattr(&attr, pobj, prog_fd);
}
@@ -35,6 +38,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
load_attr.license = license;
load_attr.kern_version = kern_version;
load_attr.prog_flags = BPF_F_TEST_RND_HI32;
+ load_attr.log_level = extra_prog_load_log_flags;
return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
}
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index 7c6e5b173f33..40bd93a6e7ae 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -121,6 +121,29 @@ static struct sysctl_test tests[] = {
.result = OP_EPERM,
},
{
+ .descr = "ctx:write sysctl:write read ok narrow",
+ .insns = {
+ /* u64 w = (u16)write & 1; */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, write)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, write) + 2),
+#endif
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_7, 1),
+ /* return 1 - w; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/domainname",
+ .open_flags = O_WRONLY,
+ .newval = "(none)", /* same as default, should fail anyway */
+ .result = OP_EPERM,
+ },
+ {
.descr = "ctx:write sysctl:read write reject",
.insns = {
/* write = X */
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index ff0d31d38061..7c76b841b17b 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -62,6 +62,10 @@ cleanup() {
if [[ -f "${infile}" ]]; then
rm "${infile}"
fi
+
+ if [[ -n $server_pid ]]; then
+ kill $server_pid 2> /dev/null
+ fi
}
server_listen() {
@@ -77,6 +81,7 @@ client_connect() {
verify_data() {
wait "${server_pid}"
+ server_pid=
# sha1sum returns two fields [sha1] [filepath]
# convert to bash array and access first elem
insum=($(sha1sum ${infile}))
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index f0961c58581e..bf0322eb5346 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -744,3 +744,86 @@
.result = ACCEPT,
.retval = 2,
},
+{
+ "jgt32: range bound deduction, reg op imm",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_0, 1, 5),
+ BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: range bound deduction, reg1 op reg2, reg1 unknown",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_0, BPF_REG_2, 5),
+ BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: range bound deduction, reg1 op reg2, reg2 unknown",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_2, BPF_REG_0, 5),
+ BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c
index 1fc4e61e9f9f..1af37187dc12 100644
--- a/tools/testing/selftests/bpf/verifier/loops1.c
+++ b/tools/testing/selftests/bpf/verifier/loops1.c
@@ -187,3 +187,20 @@
.prog_type = BPF_PROG_TYPE_XDP,
.retval = 55,
},
+{
+ "taken loop with back jump to 1st insn, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, -3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 55,
+},
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index 58ed5eeab709..ad41ea69001b 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -109,7 +109,7 @@ static bool set_watchpoint(pid_t pid, int size, int wp)
return false;
}
-static bool arun_test(int wr_size, int wp_size, int wr, int wp)
+static bool run_test(int wr_size, int wp_size, int wr, int wp)
{
int status;
siginfo_t siginfo;
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 8d369b6a2069..66aafe1f5746 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -Wall
+CFLAGS += -Wall -pthread
all:
+TEST_FILES := with_stress.sh
+TEST_PROGS := test_stress.sh
TEST_GEN_PROGS = test_memcontrol
TEST_GEN_PROGS += test_core
TEST_GEN_PROGS += test_freezer
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index bdb69599c4bd..8f7131dcf1ff 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -158,6 +158,22 @@ long cg_read_key_long(const char *cgroup, const char *control, const char *key)
return atol(ptr + strlen(key));
}
+long cg_read_lc(const char *cgroup, const char *control)
+{
+ char buf[PAGE_SIZE];
+ const char delim[] = "\n";
+ char *line;
+ long cnt = 0;
+
+ if (cg_read(cgroup, control, buf, sizeof(buf)))
+ return -1;
+
+ for (line = strtok(buf, delim); line; line = strtok(NULL, delim))
+ cnt++;
+
+ return cnt;
+}
+
int cg_write(const char *cgroup, const char *control, char *buf)
{
char path[PATH_MAX];
@@ -282,10 +298,12 @@ int cg_enter(const char *cgroup, int pid)
int cg_enter_current(const char *cgroup)
{
- char pidbuf[64];
+ return cg_write(cgroup, "cgroup.procs", "0");
+}
- snprintf(pidbuf, sizeof(pidbuf), "%d", getpid());
- return cg_write(cgroup, "cgroup.procs", pidbuf);
+int cg_enter_current_thread(const char *cgroup)
+{
+ return cg_write(cgroup, "cgroup.threads", "0");
}
int cg_run(const char *cgroup,
@@ -410,11 +428,25 @@ int set_oom_adj_score(int pid, int score)
return 0;
}
-char proc_read_text(int pid, const char *item, char *buf, size_t size)
+ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
{
char path[PATH_MAX];
- snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
+ if (!pid)
+ snprintf(path, sizeof(path), "/proc/%s/%s",
+ thread ? "thread-self" : "self", item);
+ else
+ snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
return read_text(path, buf, size);
}
+
+int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
+{
+ char buf[PAGE_SIZE];
+
+ if (proc_read_text(pid, thread, item, buf, sizeof(buf)) < 0)
+ return -1;
+
+ return strstr(buf, needle) ? 0 : -1;
+}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index c72f28046bfa..49c54fbdb229 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdbool.h>
#include <stdlib.h>
#define PAGE_SIZE 4096
@@ -29,12 +30,14 @@ extern int cg_read_strstr(const char *cgroup, const char *control,
const char *needle);
extern long cg_read_long(const char *cgroup, const char *control);
long cg_read_key_long(const char *cgroup, const char *control, const char *key);
+extern long cg_read_lc(const char *cgroup, const char *control);
extern int cg_write(const char *cgroup, const char *control, char *buf);
extern int cg_run(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg);
extern int cg_enter(const char *cgroup, int pid);
extern int cg_enter_current(const char *cgroup);
+extern int cg_enter_current_thread(const char *cgroup);
extern int cg_run_nowait(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg);
@@ -45,4 +48,5 @@ extern int is_swap_enabled(void);
extern int set_oom_adj_score(int pid, int score);
extern int cg_wait_for_proc_count(const char *cgroup, int count);
extern int cg_killall(const char *cgroup);
-extern char proc_read_text(int pid, const char *item, char *buf, size_t size);
+extern ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size);
+extern int proc_read_strstr(int pid, bool thread, const char *item, const char *needle);
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index 79053a4f4783..c5ca669feb2b 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -5,6 +5,9 @@
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
#include "../kselftest.h"
#include "cgroup_util.h"
@@ -354,6 +357,147 @@ cleanup:
return ret;
}
+static void *dummy_thread_fn(void *arg)
+{
+ return (void *)(size_t)pause();
+}
+
+/*
+ * Test threadgroup migration.
+ * All threads of a process are migrated together.
+ */
+static int test_cgcore_proc_migration(const char *root)
+{
+ int ret = KSFT_FAIL;
+ int t, c_threads, n_threads = 13;
+ char *src = NULL, *dst = NULL;
+ pthread_t threads[n_threads];
+
+ src = cg_name(root, "cg_src");
+ dst = cg_name(root, "cg_dst");
+ if (!src || !dst)
+ goto cleanup;
+
+ if (cg_create(src))
+ goto cleanup;
+ if (cg_create(dst))
+ goto cleanup;
+
+ if (cg_enter_current(src))
+ goto cleanup;
+
+ for (c_threads = 0; c_threads < n_threads; ++c_threads) {
+ if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
+ goto cleanup;
+ }
+
+ cg_enter_current(dst);
+ if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ for (t = 0; t < c_threads; ++t) {
+ pthread_cancel(threads[t]);
+ }
+
+ for (t = 0; t < c_threads; ++t) {
+ pthread_join(threads[t], NULL);
+ }
+
+ cg_enter_current(root);
+
+ if (dst)
+ cg_destroy(dst);
+ if (src)
+ cg_destroy(src);
+ free(dst);
+ free(src);
+ return ret;
+}
+
+static void *migrating_thread_fn(void *arg)
+{
+ int g, i, n_iterations = 1000;
+ char **grps = arg;
+ char lines[3][PATH_MAX];
+
+ for (g = 1; g < 3; ++g)
+ snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
+
+ for (i = 0; i < n_iterations; ++i) {
+ cg_enter_current_thread(grps[(i % 2) + 1]);
+
+ if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
+ return (void *)-1;
+ }
+ return NULL;
+}
+
+/*
+ * Test single thread migration.
+ * Threaded cgroups allow successful migration of a thread.
+ */
+static int test_cgcore_thread_migration(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *dom = NULL;
+ char line[PATH_MAX];
+ char *grps[3] = { (char *)root, NULL, NULL };
+ pthread_t thr;
+ void *retval;
+
+ dom = cg_name(root, "cg_dom");
+ grps[1] = cg_name(root, "cg_dom/cg_src");
+ grps[2] = cg_name(root, "cg_dom/cg_dst");
+ if (!grps[1] || !grps[2] || !dom)
+ goto cleanup;
+
+ if (cg_create(dom))
+ goto cleanup;
+ if (cg_create(grps[1]))
+ goto cleanup;
+ if (cg_create(grps[2]))
+ goto cleanup;
+
+ if (cg_write(grps[1], "cgroup.type", "threaded"))
+ goto cleanup;
+ if (cg_write(grps[2], "cgroup.type", "threaded"))
+ goto cleanup;
+
+ if (cg_enter_current(grps[1]))
+ goto cleanup;
+
+ if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
+ goto cleanup;
+
+ if (pthread_join(thr, &retval))
+ goto cleanup;
+
+ if (retval)
+ goto cleanup;
+
+ snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
+ if (proc_read_strstr(0, 1, "cgroup", line))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_enter_current(root);
+ if (grps[2])
+ cg_destroy(grps[2]);
+ if (grps[1])
+ cg_destroy(grps[1]);
+ if (dom)
+ cg_destroy(dom);
+ free(grps[2]);
+ free(grps[1]);
+ free(dom);
+ return ret;
+}
+
#define T(x) { x, #x }
struct corecg_test {
int (*fn)(const char *root);
@@ -366,6 +510,8 @@ struct corecg_test {
T(test_cgcore_parent_becomes_threaded),
T(test_cgcore_invalid_domain),
T(test_cgcore_populated),
+ T(test_cgcore_proc_migration),
+ T(test_cgcore_thread_migration),
};
#undef T
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
index 0fc1b6d4b0f9..23d8fa4a3e4e 100644
--- a/tools/testing/selftests/cgroup/test_freezer.c
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -72,6 +72,7 @@ static int cg_prepare_for_wait(const char *cgroup)
if (ret == -1) {
debug("Error: inotify_add_watch() failed\n");
close(fd);
+ fd = -1;
}
return fd;
@@ -701,7 +702,7 @@ static int proc_check_stopped(int pid)
char buf[PAGE_SIZE];
int len;
- len = proc_read_text(pid, "stat", buf, sizeof(buf));
+ len = proc_read_text(pid, 0, "stat", buf, sizeof(buf));
if (len == -1) {
debug("Can't get %d stat\n", pid);
return -1;
diff --git a/tools/testing/selftests/cgroup/test_stress.sh b/tools/testing/selftests/cgroup/test_stress.sh
new file mode 100755
index 000000000000..15d9d5896394
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_stress.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+./with_stress.sh -s subsys -s fork ./test_core
diff --git a/tools/testing/selftests/cgroup/with_stress.sh b/tools/testing/selftests/cgroup/with_stress.sh
new file mode 100755
index 000000000000..e28c35008f5b
--- /dev/null
+++ b/tools/testing/selftests/cgroup/with_stress.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+stress_fork()
+{
+ while true ; do
+ /usr/bin/true
+ sleep 0.01
+ done
+}
+
+stress_subsys()
+{
+ local verb=+
+ while true ; do
+ echo $verb$subsys_ctrl >$sysfs/cgroup.subtree_control
+ [ $verb = "+" ] && verb=- || verb=+
+ # incommensurable period with other stresses
+ sleep 0.011
+ done
+}
+
+init_and_check()
+{
+ sysfs=`mount -t cgroup2 | head -1 | awk '{ print $3 }'`
+ if [ ! -d "$sysfs" ]; then
+ echo "Skipping: cgroup2 is not mounted" >&2
+ exit $ksft_skip
+ fi
+
+ if ! echo +$subsys_ctrl >$sysfs/cgroup.subtree_control ; then
+ echo "Skipping: cannot enable $subsys_ctrl in $sysfs" >&2
+ exit $ksft_skip
+ fi
+
+ if ! echo -$subsys_ctrl >$sysfs/cgroup.subtree_control ; then
+ echo "Skipping: cannot disable $subsys_ctrl in $sysfs" >&2
+ exit $ksft_skip
+ fi
+}
+
+declare -a stresses
+declare -a stress_pids
+duration=5
+rc=0
+subsys_ctrl=cpuset
+sysfs=
+
+while getopts c:d:hs: opt; do
+ case $opt in
+ c)
+ subsys_ctrl=$OPTARG
+ ;;
+ d)
+ duration=$OPTARG
+ ;;
+ h)
+ echo "Usage $0 [ -s stress ] ... [ -d duration ] [-c controller] cmd args .."
+ echo -e "\t default duration $duration seconds"
+ echo -e "\t default controller $subsys_ctrl"
+ exit
+ ;;
+ s)
+ func=stress_$OPTARG
+ if [ "x$(type -t $func)" != "xfunction" ] ; then
+ echo "Unknown stress $OPTARG"
+ exit 1
+ fi
+ stresses+=($func)
+ ;;
+ esac
+done
+shift $((OPTIND - 1))
+
+init_and_check
+
+for s in ${stresses[*]} ; do
+ $s &
+ stress_pids+=($!)
+done
+
+
+time=0
+start=$(date +%s)
+
+while [ $time -lt $duration ] ; do
+ $*
+ rc=$?
+ [ $rc -eq 0 ] || break
+ time=$(($(date +%s) - $start))
+done
+
+for pid in ${stress_pids[*]} ; do
+ kill -SIGTERM $pid
+ wait $pid
+done
+
+exit $rc
diff --git a/tools/testing/selftests/clone3/.gitignore b/tools/testing/selftests/clone3/.gitignore
new file mode 100644
index 000000000000..0dc4f32c6cb8
--- /dev/null
+++ b/tools/testing/selftests/clone3/.gitignore
@@ -0,0 +1,3 @@
+clone3
+clone3_clear_sighand
+clone3_set_tid
diff --git a/tools/testing/selftests/clone3/Makefile b/tools/testing/selftests/clone3/Makefile
new file mode 100644
index 000000000000..cf976c732906
--- /dev/null
+++ b/tools/testing/selftests/clone3/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -g -I../../../../usr/include/
+
+TEST_GEN_PROGS := clone3 clone3_clear_sighand clone3_set_tid
+
+include ../lib.mk
diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
new file mode 100644
index 000000000000..f14c269a5a18
--- /dev/null
+++ b/tools/testing/selftests/clone3/clone3.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Based on Christian Brauner's clone3() example */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sched.h>
+
+#include "../kselftest.h"
+#include "clone3_selftests.h"
+
+/*
+ * Different sizes of struct clone_args
+ */
+#ifndef CLONE3_ARGS_SIZE_V0
+#define CLONE3_ARGS_SIZE_V0 64
+#endif
+
+enum test_mode {
+ CLONE3_ARGS_NO_TEST,
+ CLONE3_ARGS_ALL_0,
+ CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG,
+ CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG,
+ CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG,
+ CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG,
+};
+
+static int call_clone3(uint64_t flags, size_t size, enum test_mode test_mode)
+{
+ struct clone_args args = {
+ .flags = flags,
+ .exit_signal = SIGCHLD,
+ };
+
+ struct clone_args_extended {
+ struct clone_args args;
+ __aligned_u64 excess_space[2];
+ } args_ext;
+
+ pid_t pid = -1;
+ int status;
+
+ memset(&args_ext, 0, sizeof(args_ext));
+ if (size > sizeof(struct clone_args))
+ args_ext.excess_space[1] = 1;
+
+ if (size == 0)
+ size = sizeof(struct clone_args);
+
+ switch (test_mode) {
+ case CLONE3_ARGS_ALL_0:
+ args.flags = 0;
+ args.exit_signal = 0;
+ break;
+ case CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG:
+ args.exit_signal = 0xbadc0ded00000000ULL;
+ break;
+ case CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG:
+ args.exit_signal = 0x0000000080000000ULL;
+ break;
+ case CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG:
+ args.exit_signal = 0x0000000000000100ULL;
+ break;
+ case CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG:
+ args.exit_signal = 0x00000000000000f0ULL;
+ break;
+ }
+
+ memcpy(&args_ext.args, &args, sizeof(struct clone_args));
+
+ pid = sys_clone3((struct clone_args *)&args_ext, size);
+ if (pid < 0) {
+ ksft_print_msg("%s - Failed to create new process\n",
+ strerror(errno));
+ return -errno;
+ }
+
+ if (pid == 0) {
+ ksft_print_msg("I am the child, my PID is %d\n", getpid());
+ _exit(EXIT_SUCCESS);
+ }
+
+ ksft_print_msg("I am the parent (%d). My child's pid is %d\n",
+ getpid(), pid);
+
+ if (waitpid(-1, &status, __WALL) < 0) {
+ ksft_print_msg("Child returned %s\n", strerror(errno));
+ return -errno;
+ }
+ if (WEXITSTATUS(status))
+ return WEXITSTATUS(status);
+
+ return 0;
+}
+
+static void test_clone3(uint64_t flags, size_t size, int expected,
+ enum test_mode test_mode)
+{
+ int ret;
+
+ ksft_print_msg(
+ "[%d] Trying clone3() with flags %#" PRIx64 " (size %zu)\n",
+ getpid(), flags, size);
+ ret = call_clone3(flags, size, test_mode);
+ ksft_print_msg("[%d] clone3() with flags says: %d expected %d\n",
+ getpid(), ret, expected);
+ if (ret != expected)
+ ksft_test_result_fail(
+ "[%d] Result (%d) is different than expected (%d)\n",
+ getpid(), ret, expected);
+ else
+ ksft_test_result_pass(
+ "[%d] Result (%d) matches expectation (%d)\n",
+ getpid(), ret, expected);
+}
+
+int main(int argc, char *argv[])
+{
+ pid_t pid;
+
+ uid_t uid = getuid();
+
+ test_clone3_supported();
+ ksft_print_header();
+ ksft_set_plan(17);
+
+ /* Just a simple clone3() should return 0.*/
+ test_clone3(0, 0, 0, CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() in a new PID NS.*/
+ if (uid == 0)
+ test_clone3(CLONE_NEWPID, 0, 0, CLONE3_ARGS_NO_TEST);
+ else
+ ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
+
+ /* Do a clone3() with CLONE3_ARGS_SIZE_V0. */
+ test_clone3(0, CLONE3_ARGS_SIZE_V0, 0, CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() with CLONE3_ARGS_SIZE_V0 - 8 */
+ test_clone3(0, CLONE3_ARGS_SIZE_V0 - 8, -EINVAL, CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() with sizeof(struct clone_args) + 8 */
+ test_clone3(0, sizeof(struct clone_args) + 8, 0, CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() with exit_signal having highest 32 bits non-zero */
+ test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_BIG);
+
+ /* Do a clone3() with negative 32-bit exit_signal */
+ test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_NEG);
+
+ /* Do a clone3() with exit_signal not fitting into CSIGNAL mask */
+ test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_CSIG);
+
+ /* Do a clone3() with NSIG < exit_signal < CSIG */
+ test_clone3(0, 0, -EINVAL, CLONE3_ARGS_INVAL_EXIT_SIGNAL_NSIG);
+
+ test_clone3(0, sizeof(struct clone_args) + 8, 0, CLONE3_ARGS_ALL_0);
+
+ test_clone3(0, sizeof(struct clone_args) + 16, -E2BIG,
+ CLONE3_ARGS_ALL_0);
+
+ test_clone3(0, sizeof(struct clone_args) * 2, -E2BIG,
+ CLONE3_ARGS_ALL_0);
+
+ /* Do a clone3() with > page size */
+ test_clone3(0, getpagesize() + 8, -E2BIG, CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() with CLONE3_ARGS_SIZE_V0 in a new PID NS. */
+ if (uid == 0)
+ test_clone3(CLONE_NEWPID, CLONE3_ARGS_SIZE_V0, 0,
+ CLONE3_ARGS_NO_TEST);
+ else
+ ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
+
+ /* Do a clone3() with CLONE3_ARGS_SIZE_V0 - 8 in a new PID NS */
+ test_clone3(CLONE_NEWPID, CLONE3_ARGS_SIZE_V0 - 8, -EINVAL,
+ CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() with sizeof(struct clone_args) + 8 in a new PID NS */
+ if (uid == 0)
+ test_clone3(CLONE_NEWPID, sizeof(struct clone_args) + 8, 0,
+ CLONE3_ARGS_NO_TEST);
+ else
+ ksft_test_result_skip("Skipping clone3() with CLONE_NEWPID\n");
+
+ /* Do a clone3() with > page size in a new PID NS */
+ test_clone3(CLONE_NEWPID, getpagesize() + 8, -E2BIG,
+ CLONE3_ARGS_NO_TEST);
+
+ return !ksft_get_fail_cnt() ? ksft_exit_pass() : ksft_exit_fail();
+}
diff --git a/tools/testing/selftests/clone3/clone3_clear_sighand.c b/tools/testing/selftests/clone3/clone3_clear_sighand.c
new file mode 100644
index 000000000000..9e1af8aa7698
--- /dev/null
+++ b/tools/testing/selftests/clone3/clone3_clear_sighand.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <sys/syscall.h>
+#include <sys/wait.h>
+
+#include "../kselftest.h"
+#include "clone3_selftests.h"
+
+#ifndef CLONE_CLEAR_SIGHAND
+#define CLONE_CLEAR_SIGHAND 0x100000000ULL
+#endif
+
+static void nop_handler(int signo)
+{
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static void test_clone3_clear_sighand(void)
+{
+ int ret;
+ pid_t pid;
+ struct clone_args args = {};
+ struct sigaction act;
+
+ /*
+ * Check that CLONE_CLEAR_SIGHAND and CLONE_SIGHAND are mutually
+ * exclusive.
+ */
+ args.flags |= CLONE_CLEAR_SIGHAND | CLONE_SIGHAND;
+ args.exit_signal = SIGCHLD;
+ pid = sys_clone3(&args, sizeof(args));
+ if (pid > 0)
+ ksft_exit_fail_msg(
+ "clone3(CLONE_CLEAR_SIGHAND | CLONE_SIGHAND) succeeded\n");
+
+ act.sa_handler = nop_handler;
+ ret = sigemptyset(&act.sa_mask);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - sigemptyset() failed\n",
+ strerror(errno));
+
+ act.sa_flags = 0;
+
+ /* Register signal handler for SIGUSR1 */
+ ret = sigaction(SIGUSR1, &act, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s - sigaction(SIGUSR1, &act, NULL) failed\n",
+ strerror(errno));
+
+ /* Register signal handler for SIGUSR2 */
+ ret = sigaction(SIGUSR2, &act, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s - sigaction(SIGUSR2, &act, NULL) failed\n",
+ strerror(errno));
+
+ /* Check that CLONE_CLEAR_SIGHAND works. */
+ args.flags = CLONE_CLEAR_SIGHAND;
+ pid = sys_clone3(&args, sizeof(args));
+ if (pid < 0)
+ ksft_exit_fail_msg("%s - clone3(CLONE_CLEAR_SIGHAND) failed\n",
+ strerror(errno));
+
+ if (pid == 0) {
+ ret = sigaction(SIGUSR1, NULL, &act);
+ if (ret < 0)
+ exit(EXIT_FAILURE);
+
+ if (act.sa_handler != SIG_DFL)
+ exit(EXIT_FAILURE);
+
+ ret = sigaction(SIGUSR2, NULL, &act);
+ if (ret < 0)
+ exit(EXIT_FAILURE);
+
+ if (act.sa_handler != SIG_DFL)
+ exit(EXIT_FAILURE);
+
+ exit(EXIT_SUCCESS);
+ }
+
+ ret = wait_for_pid(pid);
+ if (ret)
+ ksft_exit_fail_msg(
+ "Failed to clear signal handler for child process\n");
+
+ ksft_test_result_pass("Cleared signal handlers for child process\n");
+}
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ test_clone3_supported();
+
+ ksft_set_plan(1);
+
+ test_clone3_clear_sighand();
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/clone3/clone3_selftests.h b/tools/testing/selftests/clone3/clone3_selftests.h
new file mode 100644
index 000000000000..a3f2c8ad8bcc
--- /dev/null
+++ b/tools/testing/selftests/clone3/clone3_selftests.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CLONE3_SELFTESTS_H
+#define _CLONE3_SELFTESTS_H
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdint.h>
+#include <syscall.h>
+#include <linux/types.h>
+
+#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
+
+#ifndef __NR_clone3
+#define __NR_clone3 -1
+struct clone_args {
+ __aligned_u64 flags;
+ __aligned_u64 pidfd;
+ __aligned_u64 child_tid;
+ __aligned_u64 parent_tid;
+ __aligned_u64 exit_signal;
+ __aligned_u64 stack;
+ __aligned_u64 stack_size;
+ __aligned_u64 tls;
+ __aligned_u64 set_tid;
+ __aligned_u64 set_tid_size;
+};
+#endif
+
+static pid_t sys_clone3(struct clone_args *args, size_t size)
+{
+ fflush(stdout);
+ fflush(stderr);
+ return syscall(__NR_clone3, args, size);
+}
+
+static inline void test_clone3_supported(void)
+{
+ pid_t pid;
+ struct clone_args args = {};
+
+ if (__NR_clone3 < 0)
+ ksft_exit_skip("clone3() syscall is not supported\n");
+
+ /* Set to something that will always cause EINVAL. */
+ args.exit_signal = -1;
+ pid = sys_clone3(&args, sizeof(args));
+ if (!pid)
+ exit(EXIT_SUCCESS);
+
+ if (pid > 0) {
+ wait(NULL);
+ ksft_exit_fail_msg(
+ "Managed to create child process with invalid exit_signal\n");
+ }
+
+ if (errno == ENOSYS)
+ ksft_exit_skip("clone3() syscall is not supported\n");
+
+ ksft_print_msg("clone3() syscall supported\n");
+}
+
+#endif /* _CLONE3_SELFTESTS_H */
diff --git a/tools/testing/selftests/clone3/clone3_set_tid.c b/tools/testing/selftests/clone3/clone3_set_tid.c
new file mode 100644
index 000000000000..25beb22f35b5
--- /dev/null
+++ b/tools/testing/selftests/clone3/clone3_set_tid.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Based on Christian Brauner's clone3() example.
+ * These tests are assuming to be running in the host's
+ * PID namespace.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sched.h>
+
+#include "../kselftest.h"
+#include "clone3_selftests.h"
+
+#ifndef MAX_PID_NS_LEVEL
+#define MAX_PID_NS_LEVEL 32
+#endif
+
+static int pipe_1[2];
+static int pipe_2[2];
+
+static void child_exit(int ret)
+{
+ fflush(stdout);
+ fflush(stderr);
+ _exit(ret);
+}
+
+static int call_clone3_set_tid(pid_t *set_tid,
+ size_t set_tid_size,
+ int flags,
+ int expected_pid,
+ bool wait_for_it)
+{
+ int status;
+ pid_t pid = -1;
+
+ struct clone_args args = {
+ .flags = flags,
+ .exit_signal = SIGCHLD,
+ .set_tid = ptr_to_u64(set_tid),
+ .set_tid_size = set_tid_size,
+ };
+
+ pid = sys_clone3(&args, sizeof(struct clone_args));
+ if (pid < 0) {
+ ksft_print_msg("%s - Failed to create new process\n",
+ strerror(errno));
+ return -errno;
+ }
+
+ if (pid == 0) {
+ int ret;
+ char tmp = 0;
+ int exit_code = EXIT_SUCCESS;
+
+ ksft_print_msg("I am the child, my PID is %d (expected %d)\n",
+ getpid(), set_tid[0]);
+ if (wait_for_it) {
+ ksft_print_msg("[%d] Child is ready and waiting\n",
+ getpid());
+
+ /* Signal the parent that the child is ready */
+ close(pipe_1[0]);
+ ret = write(pipe_1[1], &tmp, 1);
+ if (ret != 1) {
+ ksft_print_msg(
+ "Writing to pipe returned %d", ret);
+ exit_code = EXIT_FAILURE;
+ }
+ close(pipe_1[1]);
+ close(pipe_2[1]);
+ ret = read(pipe_2[0], &tmp, 1);
+ if (ret != 1) {
+ ksft_print_msg(
+ "Reading from pipe returned %d", ret);
+ exit_code = EXIT_FAILURE;
+ }
+ close(pipe_2[0]);
+ }
+
+ if (set_tid[0] != getpid())
+ child_exit(EXIT_FAILURE);
+ child_exit(exit_code);
+ }
+
+ if (expected_pid == 0 || expected_pid == pid) {
+ ksft_print_msg("I am the parent (%d). My child's pid is %d\n",
+ getpid(), pid);
+ } else {
+ ksft_print_msg(
+ "Expected child pid %d does not match actual pid %d\n",
+ expected_pid, pid);
+ return -1;
+ }
+
+ if (waitpid(pid, &status, 0) < 0) {
+ ksft_print_msg("Child returned %s\n", strerror(errno));
+ return -errno;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static void test_clone3_set_tid(pid_t *set_tid,
+ size_t set_tid_size,
+ int flags,
+ int expected,
+ int expected_pid,
+ bool wait_for_it)
+{
+ int ret;
+
+ ksft_print_msg(
+ "[%d] Trying clone3() with CLONE_SET_TID to %d and 0x%x\n",
+ getpid(), set_tid[0], flags);
+ ret = call_clone3_set_tid(set_tid, set_tid_size, flags, expected_pid,
+ wait_for_it);
+ ksft_print_msg(
+ "[%d] clone3() with CLONE_SET_TID %d says :%d - expected %d\n",
+ getpid(), set_tid[0], ret, expected);
+ if (ret != expected)
+ ksft_test_result_fail(
+ "[%d] Result (%d) is different than expected (%d)\n",
+ getpid(), ret, expected);
+ else
+ ksft_test_result_pass(
+ "[%d] Result (%d) matches expectation (%d)\n",
+ getpid(), ret, expected);
+}
+int main(int argc, char *argv[])
+{
+ FILE *f;
+ char buf;
+ char *line;
+ int status;
+ int ret = -1;
+ size_t len = 0;
+ int pid_max = 0;
+ uid_t uid = getuid();
+ char proc_path[100] = {0};
+ pid_t pid, ns1, ns2, ns3, ns_pid;
+ pid_t set_tid[MAX_PID_NS_LEVEL * 2];
+
+ ksft_print_header();
+ test_clone3_supported();
+ ksft_set_plan(29);
+
+ if (pipe(pipe_1) < 0 || pipe(pipe_2) < 0)
+ ksft_exit_fail_msg("pipe() failed\n");
+
+ f = fopen("/proc/sys/kernel/pid_max", "r");
+ if (f == NULL)
+ ksft_exit_fail_msg(
+ "%s - Could not open /proc/sys/kernel/pid_max\n",
+ strerror(errno));
+ fscanf(f, "%d", &pid_max);
+ fclose(f);
+ ksft_print_msg("/proc/sys/kernel/pid_max %d\n", pid_max);
+
+ /* Try invalid settings */
+ memset(&set_tid, 0, sizeof(set_tid));
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL + 1, 0, -EINVAL, 0, 0);
+
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2, 0, -EINVAL, 0, 0);
+
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2 + 1, 0,
+ -EINVAL, 0, 0);
+
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 42, 0, -EINVAL, 0, 0);
+
+ /*
+ * This can actually work if this test running in a MAX_PID_NS_LEVEL - 1
+ * nested PID namespace.
+ */
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL - 1, 0, -EINVAL, 0, 0);
+
+ memset(&set_tid, 0xff, sizeof(set_tid));
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL + 1, 0, -EINVAL, 0, 0);
+
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2, 0, -EINVAL, 0, 0);
+
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 2 + 1, 0,
+ -EINVAL, 0, 0);
+
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL * 42, 0, -EINVAL, 0, 0);
+
+ /*
+ * This can actually work if this test running in a MAX_PID_NS_LEVEL - 1
+ * nested PID namespace.
+ */
+ test_clone3_set_tid(set_tid, MAX_PID_NS_LEVEL - 1, 0, -EINVAL, 0, 0);
+
+ memset(&set_tid, 0, sizeof(set_tid));
+ /* Try with an invalid PID */
+ set_tid[0] = 0;
+ test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
+
+ set_tid[0] = -1;
+ test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
+
+ /* Claim that the set_tid array actually contains 2 elements. */
+ test_clone3_set_tid(set_tid, 2, 0, -EINVAL, 0, 0);
+
+ /* Try it in a new PID namespace */
+ if (uid == 0)
+ test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, -EINVAL, 0, 0);
+ else
+ ksft_test_result_skip("Clone3() with set_tid requires root\n");
+
+ /* Try with a valid PID (1) this should return -EEXIST. */
+ set_tid[0] = 1;
+ if (uid == 0)
+ test_clone3_set_tid(set_tid, 1, 0, -EEXIST, 0, 0);
+ else
+ ksft_test_result_skip("Clone3() with set_tid requires root\n");
+
+ /* Try it in a new PID namespace */
+ if (uid == 0)
+ test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, 0, 0, 0);
+ else
+ ksft_test_result_skip("Clone3() with set_tid requires root\n");
+
+ /* pid_max should fail everywhere */
+ set_tid[0] = pid_max;
+ test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
+
+ if (uid == 0)
+ test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, -EINVAL, 0, 0);
+ else
+ ksft_test_result_skip("Clone3() with set_tid requires root\n");
+
+ if (uid != 0) {
+ /*
+ * All remaining tests require root. Tell the framework
+ * that all those tests are skipped as non-root.
+ */
+ ksft_cnt.ksft_xskip += ksft_plan - ksft_test_num();
+ goto out;
+ }
+
+ /* Find the current active PID */
+ pid = fork();
+ if (pid == 0) {
+ ksft_print_msg("Child has PID %d\n", getpid());
+ child_exit(EXIT_SUCCESS);
+ }
+ if (waitpid(pid, &status, 0) < 0)
+ ksft_exit_fail_msg("Waiting for child %d failed", pid);
+
+ /* After the child has finished, its PID should be free. */
+ set_tid[0] = pid;
+ test_clone3_set_tid(set_tid, 1, 0, 0, 0, 0);
+
+ /* This should fail as there is no PID 1 in that namespace */
+ test_clone3_set_tid(set_tid, 1, CLONE_NEWPID, -EINVAL, 0, 0);
+
+ /*
+ * Creating a process with PID 1 in the newly created most nested
+ * PID namespace and PID 'pid' in the parent PID namespace. This
+ * needs to work.
+ */
+ set_tid[0] = 1;
+ set_tid[1] = pid;
+ test_clone3_set_tid(set_tid, 2, CLONE_NEWPID, 0, pid, 0);
+
+ ksft_print_msg("unshare PID namespace\n");
+ if (unshare(CLONE_NEWPID) == -1)
+ ksft_exit_fail_msg("unshare(CLONE_NEWPID) failed: %s\n",
+ strerror(errno));
+
+ set_tid[0] = pid;
+
+ /* This should fail as there is no PID 1 in that namespace */
+ test_clone3_set_tid(set_tid, 1, 0, -EINVAL, 0, 0);
+
+ /* Let's create a PID 1 */
+ ns_pid = fork();
+ if (ns_pid == 0) {
+ /*
+ * This and the next test cases check that all pid-s are
+ * released on error paths.
+ */
+ set_tid[0] = 43;
+ set_tid[1] = -1;
+ test_clone3_set_tid(set_tid, 2, 0, -EINVAL, 0, 0);
+
+ set_tid[0] = 43;
+ set_tid[1] = pid;
+ test_clone3_set_tid(set_tid, 2, 0, 0, 43, 0);
+
+ ksft_print_msg("Child in PID namespace has PID %d\n", getpid());
+ set_tid[0] = 2;
+ test_clone3_set_tid(set_tid, 1, 0, 0, 2, 0);
+
+ set_tid[0] = 1;
+ set_tid[1] = -1;
+ set_tid[2] = pid;
+ /* This should fail as there is invalid PID at level '1'. */
+ test_clone3_set_tid(set_tid, 3, CLONE_NEWPID, -EINVAL, 0, 0);
+
+ set_tid[0] = 1;
+ set_tid[1] = 42;
+ set_tid[2] = pid;
+ /*
+ * This should fail as there are not enough active PID
+ * namespaces. Again assuming this is running in the host's
+ * PID namespace. Not yet nested.
+ */
+ test_clone3_set_tid(set_tid, 4, CLONE_NEWPID, -EINVAL, 0, 0);
+
+ /*
+ * This should work and from the parent we should see
+ * something like 'NSpid: pid 42 1'.
+ */
+ test_clone3_set_tid(set_tid, 3, CLONE_NEWPID, 0, 42, true);
+
+ child_exit(ksft_cnt.ksft_fail);
+ }
+
+ close(pipe_1[1]);
+ close(pipe_2[0]);
+ while (read(pipe_1[0], &buf, 1) > 0) {
+ ksft_print_msg("[%d] Child is ready and waiting\n", getpid());
+ break;
+ }
+
+ snprintf(proc_path, sizeof(proc_path), "/proc/%d/status", pid);
+ f = fopen(proc_path, "r");
+ if (f == NULL)
+ ksft_exit_fail_msg(
+ "%s - Could not open %s\n",
+ strerror(errno), proc_path);
+
+ while (getline(&line, &len, f) != -1) {
+ if (strstr(line, "NSpid")) {
+ int i;
+
+ /* Verify that all generated PIDs are as expected. */
+ i = sscanf(line, "NSpid:\t%d\t%d\t%d",
+ &ns3, &ns2, &ns1);
+ if (i != 3) {
+ ksft_print_msg(
+ "Unexpected 'NSPid:' entry: %s",
+ line);
+ ns1 = ns2 = ns3 = 0;
+ }
+ break;
+ }
+ }
+ fclose(f);
+ free(line);
+ close(pipe_2[0]);
+
+ /* Tell the clone3()'d child to finish. */
+ write(pipe_2[1], &buf, 1);
+ close(pipe_2[1]);
+
+ if (waitpid(ns_pid, &status, 0) < 0) {
+ ksft_print_msg("Child returned %s\n", strerror(errno));
+ ret = -errno;
+ goto out;
+ }
+
+ if (!WIFEXITED(status))
+ ksft_test_result_fail("Child error\n");
+
+ ksft_cnt.ksft_pass += 6 - (ksft_cnt.ksft_fail - WEXITSTATUS(status));
+ ksft_cnt.ksft_fail = WEXITSTATUS(status);
+
+ if (ns3 == pid && ns2 == 42 && ns1 == 1)
+ ksft_test_result_pass(
+ "PIDs in all namespaces as expected (%d,%d,%d)\n",
+ ns3, ns2, ns1);
+ else
+ ksft_test_result_fail(
+ "PIDs in all namespaces not as expected (%d,%d,%d)\n",
+ ns3, ns2, ns1);
+out:
+ ret = 0;
+
+ return !ret ? ksft_exit_pass() : ksft_exit_fail();
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
index 126caf28b529..58cdbfb608e9 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
@@ -92,46 +92,6 @@ cleanup()
vrf_cleanup
}
-l2_drops_test()
-{
- local trap_name=$1; shift
- local group_name=$1; shift
-
- # This is the common part of all the tests. It checks that stats are
- # initially idle, then non-idle after changing the trap action and
- # finally idle again. It also makes sure the packets are dropped and
- # never forwarded.
- devlink_trap_stats_idle_test $trap_name
- check_err $? "Trap stats not idle with initial drop action"
- devlink_trap_group_stats_idle_test $group_name
- check_err $? "Trap group stats not idle with initial drop action"
-
- devlink_trap_action_set $trap_name "trap"
-
- devlink_trap_stats_idle_test $trap_name
- check_fail $? "Trap stats idle after setting action to trap"
- devlink_trap_group_stats_idle_test $group_name
- check_fail $? "Trap group stats idle after setting action to trap"
-
- devlink_trap_action_set $trap_name "drop"
-
- devlink_trap_stats_idle_test $trap_name
- check_err $? "Trap stats not idle after setting action to drop"
- devlink_trap_group_stats_idle_test $group_name
- check_err $? "Trap group stats not idle after setting action to drop"
-
- tc_check_packets "dev $swp2 egress" 101 0
- check_err $? "Packets were not dropped"
-}
-
-l2_drops_cleanup()
-{
- local mz_pid=$1; shift
-
- kill $mz_pid && wait $mz_pid &> /dev/null
- tc filter del dev $swp2 egress protocol ip pref 1 handle 101 flower
-}
-
source_mac_is_multicast_test()
{
local trap_name="source_mac_is_multicast"
@@ -147,11 +107,11 @@ source_mac_is_multicast_test()
RET=0
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
log_test "Source MAC is multicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
}
__vlan_tag_mismatch_test()
@@ -172,7 +132,7 @@ __vlan_tag_mismatch_test()
$MZ $h1 "$opt" -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Add PVID and make sure packets are no longer dropped.
bridge vlan add vid 1 dev $swp1 pvid untagged master
@@ -188,7 +148,7 @@ __vlan_tag_mismatch_test()
devlink_trap_action_set $trap_name "drop"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
}
vlan_tag_mismatch_untagged_test()
@@ -233,7 +193,7 @@ ingress_vlan_filter_test()
$MZ $h1 -Q $vid -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Add the VLAN on the bridge port and make sure packets are no longer
# dropped.
@@ -252,7 +212,7 @@ ingress_vlan_filter_test()
log_test "Ingress VLAN filter"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
bridge vlan del vid $vid dev $swp1 master
bridge vlan del vid $vid dev $swp2 master
@@ -277,7 +237,7 @@ __ingress_stp_filter_test()
$MZ $h1 -Q $vid -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Change STP state to forwarding and make sure packets are no longer
# dropped.
@@ -294,7 +254,7 @@ __ingress_stp_filter_test()
devlink_trap_action_set $trap_name "drop"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
bridge vlan del vid $vid dev $swp1 master
bridge vlan del vid $vid dev $swp2 master
@@ -348,7 +308,7 @@ port_list_is_empty_uc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Allow packets to be flooded to one port.
ip link set dev $swp2 type bridge_slave flood on
@@ -366,7 +326,7 @@ port_list_is_empty_uc_test()
log_test "Port list is empty - unicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
ip link set dev $swp1 type bridge_slave flood on
}
@@ -394,7 +354,7 @@ port_list_is_empty_mc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -B $dip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Allow packets to be flooded to one port.
ip link set dev $swp2 type bridge_slave mcast_flood on
@@ -412,7 +372,7 @@ port_list_is_empty_mc_test()
log_test "Port list is empty - multicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
ip link set dev $swp1 type bridge_slave mcast_flood on
}
@@ -441,7 +401,7 @@ port_loopback_filter_uc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- l2_drops_test $trap_name $group_name
+ devlink_trap_drop_test $trap_name $group_name $swp2
# Allow packets to be flooded.
ip link set dev $swp2 type bridge_slave flood on
@@ -459,7 +419,7 @@ port_loopback_filter_uc_test()
log_test "Port loopback filter - unicast"
- l2_drops_cleanup $mz_pid
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip
}
port_loopback_filter_test()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
new file mode 100755
index 000000000000..b4efb023ae51
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
@@ -0,0 +1,563 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap L3 drops functionality over mlxsw. Each registered L3 drop
+# packet trap is tested to make sure it is triggered under the right
+# conditions.
+
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ non_ip_test
+ uc_dip_over_mc_dmac_test
+ dip_is_loopback_test
+ sip_is_mc_test
+ sip_is_loopback_test
+ ip_header_corrupted_test
+ ipv4_sip_is_limited_bc_test
+ ipv6_mc_dip_reserved_scope_test
+ ipv6_mc_dip_interface_local_scope_test
+ blackhole_route_test
+"
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $h2_ipv4/24 $h2_ipv6/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 $h2_ipv4/24 $h2_ipv6/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp2 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp2 clsact
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h1mac=$(mac_get $h1)
+ rp1mac=$(mac_get $rp1)
+
+ h1_ipv4=192.0.2.1
+ h2_ipv4=198.51.100.1
+ h1_ipv6=2001:db8:1::1
+ h2_ipv6=2001:db8:2::1
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_check()
+{
+ trap_name=$1; shift
+
+ devlink_trap_action_set $trap_name "trap"
+ ping_do $h1 $h2_ipv4
+ check_err $? "Packets that should not be trapped were trapped"
+ devlink_trap_action_set $trap_name "drop"
+}
+
+non_ip_test()
+{
+ local trap_name="non_ip"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip $h2_ipv4 action drop
+
+ # Generate non-IP packets to the router
+ $MZ $h1 -c 0 -p 100 -d 1msec -B $h2_ipv4 -q "$rp1mac $h1mac \
+ 00:00 de:ad:be:ef" &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Non IP"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+__uc_dip_over_mc_dmac_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="uc_dip_over_mc_dmac"
+ local group_name="l3_drops"
+ local dmac=01:02:03:04:05:06
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower ip_proto udp src_port 54321 dst_port 12345 action drop
+
+ # Generate IP packets with a unicast IP and a multicast destination MAC
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -b $dmac \
+ -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Unicast destination IP over multicast destination MAC: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+uc_dip_over_mc_dmac_test()
+{
+ __uc_dip_over_mc_dmac_test "IPv4" "ip" $h2_ipv4
+ __uc_dip_over_mc_dmac_test "IPv6" "ipv6" $h2_ipv6 "-6"
+}
+
+__sip_is_loopback_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="sip_is_loopback_address"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower src_ip $sip action drop
+
+ # Generate packets with loopback source IP
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -A $sip \
+ -b $rp1mac -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Source IP is loopback address: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+sip_is_loopback_test()
+{
+ __sip_is_loopback_test "IPv4" "ip" "127.0.0.0/8" $h2_ipv4
+ __sip_is_loopback_test "IPv6" "ipv6" "::1" $h2_ipv6 "-6"
+}
+
+__dip_is_loopback_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="dip_is_loopback_address"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower dst_ip $dip action drop
+
+ # Generate packets with loopback destination IP
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -b $rp1mac \
+ -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Destination IP is loopback address: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+dip_is_loopback_test()
+{
+ __dip_is_loopback_test "IPv4" "ip" "127.0.0.0/8"
+ __dip_is_loopback_test "IPv6" "ipv6" "::1" "-6"
+}
+
+__sip_is_mc_test()
+{
+ local desc=$1; shift
+ local proto=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="sip_is_mc"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower src_ip $sip action drop
+
+ # Generate packets with multicast source IP
+ $MZ $h1 $flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -A $sip \
+ -b $rp1mac -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "Source IP is multicast: $desc"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+}
+
+sip_is_mc_test()
+{
+ __sip_is_mc_test "IPv4" "ip" "239.1.1.1" $h2_ipv4
+ __sip_is_mc_test "IPv6" "ipv6" "FF02::2" $h2_ipv6 "-6"
+}
+
+ipv4_sip_is_limited_bc_test()
+{
+ local trap_name="ipv4_sip_is_limited_bc"
+ local group_name="l3_drops"
+ local sip=255.255.255.255
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower src_ip $sip action drop
+
+ # Generate packets with limited broadcast source IP
+ $MZ $h1 -t udp "sp=54321,dp=12345" -c 0 -p 100 -A $sip -b $rp1mac \
+ -B $h2_ipv4 -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IPv4 source IP is limited broadcast"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+ipv4_payload_get()
+{
+ local ipver=$1; shift
+ local ihl=$1; shift
+ local checksum=$1; shift
+
+ p=$(:
+ )"08:00:"$( : ETH type
+ )"$ipver"$( : IP version
+ )"$ihl:"$( : IHL
+ )"00:"$( : IP TOS
+ )"00:F4:"$( : IP total length
+ )"00:00:"$( : IP identification
+ )"20:00:"$( : IP flags + frag off
+ )"30:"$( : IP TTL
+ )"01:"$( : IP proto
+ )"$checksum:"$( : IP header csum
+ )"$h1_ipv4:"$( : IP saddr
+ )"$h2_ipv4:"$( : IP daddr
+ )
+ echo $p
+}
+
+__ipv4_header_corrupted_test()
+{
+ local desc=$1; shift
+ local ipver=$1; shift
+ local ihl=$1; shift
+ local checksum=$1; shift
+ local trap_name="ip_header_corrupted"
+ local group_name="l3_drops"
+ local payload
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip $h2_ipv4 action drop
+
+ payload=$(ipv4_payload_get $ipver $ihl $checksum)
+
+ # Generate packets with corrupted IP header
+ $MZ $h1 -c 0 -d 1msec -a $h1mac -b $rp1mac -q p=$payload &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IP header corrupted: $desc: IPv4"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+ipv6_payload_get()
+{
+ local ipver=$1; shift
+
+ p=$(:
+ )"86:DD:"$( : ETH type
+ )"$ipver"$( : IP version
+ )"0:0:"$( : Traffic class
+ )"0:00:00:"$( : Flow label
+ )"00:00:"$( : Payload length
+ )"01:"$( : Next header
+ )"04:"$( : Hop limit
+ )"$h1_ipv6:"$( : IP saddr
+ )"$h2_ipv6:"$( : IP daddr
+ )
+ echo $p
+}
+
+__ipv6_header_corrupted_test()
+{
+ local desc=$1; shift
+ local ipver=$1; shift
+ local trap_name="ip_header_corrupted"
+ local group_name="l3_drops"
+ local payload
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip $h2_ipv4 action drop
+
+ payload=$(ipv6_payload_get $ipver)
+
+ # Generate packets with corrupted IP header
+ $MZ $h1 -c 0 -d 1msec -a $h1mac -b $rp1mac -q p=$payload &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IP header corrupted: $desc: IPv6"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+}
+
+ip_header_corrupted_test()
+{
+ # Each test uses one wrong value. The three values below are correct.
+ local ipv="4"
+ local ihl="5"
+ local checksum="00:F4"
+
+ __ipv4_header_corrupted_test "wrong IP version" 5 $ihl $checksum
+ __ipv4_header_corrupted_test "wrong IHL" $ipv 4 $checksum
+ __ipv4_header_corrupted_test "wrong checksum" $ipv $ihl "00:00"
+ __ipv6_header_corrupted_test "wrong IP version" 5
+}
+
+ipv6_mc_dip_reserved_scope_test()
+{
+ local trap_name="ipv6_mc_dip_reserved_scope"
+ local group_name="l3_drops"
+ local dip=FF00::
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ipv6 pref 1 handle 101 \
+ flower dst_ip $dip action drop
+
+ # Generate packets with reserved scope destination IP
+ $MZ $h1 -6 -t udp "sp=54321,dp=12345" -c 0 -p 100 -b \
+ "33:33:00:00:00:00" -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IPv6 multicast destination IP reserved scope"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6"
+}
+
+ipv6_mc_dip_interface_local_scope_test()
+{
+ local trap_name="ipv6_mc_dip_interface_local_scope"
+ local group_name="l3_drops"
+ local dip=FF01::
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ tc filter add dev $rp2 egress protocol ipv6 pref 1 handle 101 \
+ flower dst_ip $dip action drop
+
+ # Generate packets with interface local scope destination IP
+ $MZ $h1 -6 -t udp "sp=54321,dp=12345" -c 0 -p 100 -b \
+ "33:33:00:00:00:00" -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+
+ log_test "IPv6 multicast destination IP interface-local scope"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6"
+}
+
+__blackhole_route_test()
+{
+ local flags=$1; shift
+ local subnet=$1; shift
+ local proto=$1; shift
+ local dip=$1; shift
+ local ip_proto=${1:-"icmp"}; shift
+ local trap_name="blackhole_route"
+ local group_name="l3_drops"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+
+ ip -$flags route add blackhole $subnet
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower skip_hw dst_ip $dip ip_proto $ip_proto action drop
+
+ # Generate packets to the blackhole route
+ $MZ $h1 -$flags -t udp "sp=54321,dp=12345" -c 0 -p 100 -b $rp1mac \
+ -B $dip -d 1msec -q &
+ mz_pid=$!
+
+ devlink_trap_drop_test $trap_name $group_name $rp2
+ log_test "Blackhole route: IPv$flags"
+
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ ip -$flags route del blackhole $subnet
+}
+
+blackhole_route_test()
+{
+ __blackhole_route_test "4" "198.51.100.0/30" "ip" $h2_ipv4
+ __blackhole_route_test "6" "2001:db8:2::/120" "ipv6" $h2_ipv6 "icmpv6"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
new file mode 100755
index 000000000000..2bc6df42d597
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
@@ -0,0 +1,557 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap L3 exceptions functionality over mlxsw.
+# Check all exception traps to make sure they are triggered under the right
+# conditions.
+
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ mtu_value_is_too_small_test
+ ttl_value_is_too_small_test
+ mc_reverse_path_forwarding_test
+ reject_route_test
+ unresolved_neigh_test
+ ipv4_lpm_miss_test
+ ipv6_lpm_miss_test
+"
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
+
+require_command $MCD
+require_command $MC_CLI
+table_name=selftests
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+
+ tc qdisc add dev $h1 clsact
+}
+
+h1_destroy()
+{
+ tc qdisc del dev $h1 clsact
+
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp2 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp2 clsact
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1mac=$(mac_get $rp1)
+
+ start_mcd
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+
+ kill_mcd
+}
+
+ping_check()
+{
+ ping_do $h1 198.51.100.1
+ check_err $? "Packets that should not be trapped were trapped"
+}
+
+trap_action_check()
+{
+ local trap_name=$1; shift
+ local expected_action=$1; shift
+
+ action=$(devlink_trap_action_get $trap_name)
+ if [ "$action" != $expected_action ]; then
+ check_err 1 "Trap $trap_name has wrong action: $action"
+ fi
+}
+
+mtu_value_is_too_small_test()
+{
+ local trap_name="mtu_value_is_too_small"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # type - Destination Unreachable
+ # code - Fragmentation Needed and Don't Fragment was Set
+ tc filter add dev $h1 ingress protocol ip pref 1 handle 101 \
+ flower skip_hw ip_proto icmp type 3 code 4 action pass
+
+ mtu_set $rp2 1300
+
+ # Generate IP packets bigger than router's MTU with don't fragment
+ # flag on.
+ $MZ $h1 -t udp "sp=54321,dp=12345,df" -p 1400 -c 0 -d 1msec -b $rp1mac \
+ -B 198.51.100.1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets_hitting "dev $h1 ingress" 101
+ check_err $? "Packets were not received to h1"
+
+ log_test "MTU value is too small"
+
+ mtu_restore $rp2
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower
+}
+
+__ttl_value_is_too_small_test()
+{
+ local ttl_val=$1; shift
+ local trap_name="ttl_value_is_too_small"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # type - Time Exceeded
+ # code - Time to Live exceeded in Transit
+ tc filter add dev $h1 ingress protocol ip pref 1 handle 101 \
+ flower skip_hw ip_proto icmp type 11 code 0 action pass
+
+ # Generate IP packets with small TTL
+ $MZ $h1 -t udp "ttl=$ttl_val,sp=54321,dp=12345" -c 0 -d 1msec \
+ -b $rp1mac -B 198.51.100.1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets_hitting "dev $h1 ingress" 101
+ check_err $? "Packets were not received to h1"
+
+ log_test "TTL value is too small: TTL=$ttl_val"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower
+}
+
+ttl_value_is_too_small_test()
+{
+ __ttl_value_is_too_small_test 0
+ __ttl_value_is_too_small_test 1
+}
+
+start_mcd()
+{
+ SMCROUTEDIR="$(mktemp -d)"
+ for ((i = 1; i <= $NUM_NETIFS; ++i)); do
+ echo "phyint ${NETIFS[p$i]} enable" >> \
+ $SMCROUTEDIR/$table_name.conf
+ done
+
+ $MCD -N -I $table_name -f $SMCROUTEDIR/$table_name.conf \
+ -P $SMCROUTEDIR/$table_name.pid
+}
+
+kill_mcd()
+{
+ pkill $MCD
+ rm -rf $SMCROUTEDIR
+}
+
+__mc_reverse_path_forwarding_test()
+{
+ local desc=$1; shift
+ local src_ip=$1; shift
+ local dst_ip=$1; shift
+ local dst_mac=$1; shift
+ local proto=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="mc_reverse_path_forwarding"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ tc filter add dev $rp2 egress protocol $proto pref 1 handle 101 \
+ flower dst_ip $dst_ip ip_proto udp action drop
+
+ $MC_CLI -I $table_name add $rp1 $src_ip $dst_ip $rp2
+
+ # Generate packets to multicast address.
+ $MZ $h2 $flags -t udp "sp=54321,dp=12345" -c 0 -p 128 \
+ -a 00:11:22:33:44:55 -b $dst_mac \
+ -A $src_ip -B $dst_ip -q &
+
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets "dev $rp2 egress" 101 0
+ check_err $? "Packets were not dropped"
+
+ log_test "Multicast reverse path forwarding: $desc"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $rp2 egress protocol $proto pref 1 handle 101 flower
+}
+
+mc_reverse_path_forwarding_test()
+{
+ __mc_reverse_path_forwarding_test "IPv4" "192.0.2.1" "225.1.2.3" \
+ "01:00:5e:01:02:03" "ip"
+ __mc_reverse_path_forwarding_test "IPv6" "2001:db8:1::1" "ff0e::3" \
+ "33:33:00:00:00:03" "ipv6" "-6"
+}
+
+__reject_route_test()
+{
+ local desc=$1; shift
+ local dst_ip=$1; shift
+ local proto=$1; shift
+ local ip_proto=$1; shift
+ local type=$1; shift
+ local code=$1; shift
+ local unreachable=$1; shift
+ local flags=${1:-""}; shift
+ local trap_name="reject_route"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ tc filter add dev $h1 ingress protocol $proto pref 1 handle 101 flower \
+ skip_hw ip_proto $ip_proto type $type code $code action pass
+
+ ip route add unreachable $unreachable
+
+ # Generate pacekts to h2. The destination IP is unreachable.
+ $MZ $flags $h1 -t udp "sp=54321,dp=12345" -c 0 -d 1msec -b $rp1mac \
+ -B $dst_ip -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ tc_check_packets_hitting "dev $h1 ingress" 101
+ check_err $? "ICMP packet was not received to h1"
+
+ log_test "Reject route: $desc"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ ip route del unreachable $unreachable
+ tc filter del dev $h1 ingress protocol $proto pref 1 handle 101 flower
+}
+
+reject_route_test()
+{
+ # type - Destination Unreachable
+ # code - Host Unreachable
+ __reject_route_test "IPv4" 198.51.100.1 "ip" "icmp" 3 1 \
+ "198.51.100.0/26"
+ # type - Destination Unreachable
+ # code - No Route
+ __reject_route_test "IPv6" 2001:db8:2::1 "ipv6" "icmpv6" 1 0 \
+ "2001:db8:2::0/66" "-6"
+}
+
+__host_miss_test()
+{
+ local desc=$1; shift
+ local dip=$1; shift
+ local trap_name="unresolved_neigh"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ ip neigh flush dev $rp2
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ # Generate packets to h2 (will incur a unresolved neighbor).
+ # The ping should pass and devlink counters should be increased.
+ ping_do $h1 $dip
+ check_err $? "ping failed: $desc"
+
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -eq $t1_packets ]]; then
+ check_err 1 "Trap counter did not increase"
+ fi
+
+ log_test "Unresolved neigh: host miss: $desc"
+}
+
+__invalid_nexthop_test()
+{
+ local desc=$1; shift
+ local dip=$1; shift
+ local extra_add=$1; shift
+ local subnet=$1; shift
+ local via_add=$1; shift
+ local trap_name="unresolved_neigh"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ ip address add $extra_add/$subnet dev $h2
+
+ # Check that correct route does not trigger unresolved_neigh
+ ip $flags route add $dip via $extra_add dev $rp2
+
+ # Generate packets in order to discover all neighbours.
+ # Without it, counters of unresolved_neigh will be increased
+ # during neighbours discovery and the check below will fail
+ # for a wrong reason
+ ping_do $h1 $dip
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+ ping_do $h1 $dip
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -ne $t1_packets ]]; then
+ check_err 1 "Trap counter increased when it should not"
+ fi
+
+ ip $flags route del $dip via $extra_add dev $rp2
+
+ # Check that route to nexthop that does not exist trigger
+ # unresolved_neigh
+ ip $flags route add $dip via $via_add dev $h2
+
+ t0_packets=$(devlink_trap_rx_packets_get $trap_name)
+ ping_do $h1 $dip
+ t1_packets=$(devlink_trap_rx_packets_get $trap_name)
+
+ if [[ $t0_packets -eq $t1_packets ]]; then
+ check_err 1 "Trap counter did not increase"
+ fi
+
+ ip $flags route del $dip via $via_add dev $h2
+ ip address del $extra_add/$subnet dev $h2
+ log_test "Unresolved neigh: nexthop does not exist: $desc"
+}
+
+unresolved_neigh_test()
+{
+ __host_miss_test "IPv4" 198.51.100.1
+ __host_miss_test "IPv6" 2001:db8:2::1
+ __invalid_nexthop_test "IPv4" 198.51.100.1 198.51.100.3 24 198.51.100.4
+ __invalid_nexthop_test "IPv6" 2001:db8:2::1 2001:db8:2::3 64 \
+ 2001:db8:2::4
+}
+
+vrf_without_routes_create()
+{
+ # VRF creating makes the links to be down and then up again.
+ # By default, IPv6 address is not saved after link becomes down.
+ # Save IPv6 address using sysctl configuration.
+ sysctl_set net.ipv6.conf.$rp1.keep_addr_on_down 1
+ sysctl_set net.ipv6.conf.$rp2.keep_addr_on_down 1
+
+ ip link add dev vrf1 type vrf table 101
+ ip link set dev $rp1 master vrf1
+ ip link set dev $rp2 master vrf1
+ ip link set dev vrf1 up
+
+ # Wait for rp1 and rp2 to be up
+ setup_wait
+}
+
+vrf_without_routes_destroy()
+{
+ ip link set dev $rp1 nomaster
+ ip link set dev $rp2 nomaster
+ ip link del dev vrf1
+
+ sysctl_restore net.ipv6.conf.$rp2.keep_addr_on_down
+ sysctl_restore net.ipv6.conf.$rp1.keep_addr_on_down
+
+ # Wait for interfaces to be up
+ setup_wait
+}
+
+ipv4_lpm_miss_test()
+{
+ local trap_name="ipv4_lpm_miss"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # Create a VRF without a default route
+ vrf_without_routes_create
+
+ # Generate packets through a VRF without a matching route.
+ $MZ $h1 -t udp "sp=54321,dp=12345" -c 0 -d 1msec -b $rp1mac \
+ -B 203.0.113.1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ log_test "LPM miss: IPv4"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ vrf_without_routes_destroy
+}
+
+ipv6_lpm_miss_test()
+{
+ local trap_name="ipv6_lpm_miss"
+ local group_name="l3_drops"
+ local expected_action="trap"
+ local mz_pid
+
+ RET=0
+
+ ping_check $trap_name
+ trap_action_check $trap_name $expected_action
+
+ # Create a VRF without a default route
+ vrf_without_routes_create
+
+ # Generate packets through a VRF without a matching route.
+ $MZ -6 $h1 -t udp "sp=54321,dp=12345" -c 0 -d 1msec -b $rp1mac \
+ -B 2001:db8::1 -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name $group_name
+
+ log_test "LPM miss: IPv6"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ vrf_without_routes_destroy
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/mirror_gre_scale.sh
new file mode 100644
index 000000000000..f7c168decd1e
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/mirror_gre_scale.sh
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../mirror_gre_scale.sh
+
+mirror_gre_get_target()
+{
+ local should_fail=$1; shift
+ local target
+
+ target=$(devlink_resource_size_get span_agents)
+
+ if ((! should_fail)); then
+ echo $target
+ else
+ echo $((target + 1))
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
new file mode 100755
index 000000000000..7b2acba82a49
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/../../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
+
+if [ "$DEVLINK_VIDDID" != "15b3:cf6c" ]; then
+ echo "SKIP: test is tailored for Mellanox Spectrum-2"
+ exit 1
+fi
+
+current_test=""
+
+cleanup()
+{
+ pre_cleanup
+ if [ ! -z $current_test ]; then
+ ${current_test}_cleanup
+ fi
+ # Need to reload in order to avoid router abort.
+ devlink_reload
+}
+
+trap cleanup EXIT
+
+ALL_TESTS="router tc_flower mirror_gre"
+for current_test in ${TESTS:-$ALL_TESTS}; do
+ source ${current_test}_scale.sh
+
+ num_netifs_var=${current_test^^}_NUM_NETIFS
+ num_netifs=${!num_netifs_var:-$NUM_NETIFS}
+
+ for should_fail in 0 1; do
+ RET=0
+ target=$(${current_test}_get_target "$should_fail")
+ ${current_test}_setup_prepare
+ setup_wait $num_netifs
+ ${current_test}_test "$target" "$should_fail"
+ ${current_test}_cleanup
+ devlink_reload
+ if [[ "$should_fail" -eq 0 ]]; then
+ log_test "'$current_test' $target"
+ else
+ log_test "'$current_test' overflow $target"
+ fi
+ done
+done
+current_test=""
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh
new file mode 100644
index 000000000000..1897e163e3ab
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../router_scale.sh
+
+router_get_target()
+{
+ local should_fail=$1
+ local target
+
+ target=$(devlink_resource_size_get kvd)
+
+ if [[ $should_fail -eq 0 ]]; then
+ target=$((target * 85 / 100))
+ else
+ target=$((target + 1))
+ fi
+
+ echo $target
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
new file mode 100644
index 000000000000..a0795227216e
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../tc_flower_scale.sh
+
+tc_flower_get_target()
+{
+ local should_fail=$1; shift
+
+ # The driver associates a counter with each tc filter, which means the
+ # number of supported filters is bounded by the number of available
+ # counters.
+ # Currently, the driver supports 12K (12,288) flow counters and six of
+ # these are used for multicast routing.
+ local target=12282
+
+ if ((! should_fail)); then
+ echo $target
+ else
+ echo $((target + 1))
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
index 8d2186c7c62b..f7c168decd1e 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
@@ -4,10 +4,13 @@ source ../mirror_gre_scale.sh
mirror_gre_get_target()
{
local should_fail=$1; shift
+ local target
+
+ target=$(devlink_resource_size_get span_agents)
if ((! should_fail)); then
- echo 3
+ echo $target
else
- echo 4
+ echo $((target + 1))
fi
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index ae6146ec5afd..4632f51af7ab 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -112,14 +112,16 @@ sanitization_single_dev_mcast_group_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0
+ ip link add name dummy1 up type dummy
ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
- dev $swp2 group 239.0.0.1
+ dev dummy1 group 239.0.0.1
sanitization_single_dev_test_fail
ip link del dev vxlan0
+ ip link del dev dummy1
ip link del dev br0
log_test "vxlan device with a multicast group"
@@ -181,13 +183,15 @@ sanitization_single_dev_local_interface_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0
+ ip link add name dummy1 up type dummy
ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
- ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev $swp2
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev dummy1
sanitization_single_dev_test_fail
ip link del dev vxlan0
+ ip link del dev dummy1
ip link del dev br0
log_test "vxlan device with local interface"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index 115837355eaf..025a84c2ab5a 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -3,7 +3,9 @@
lib_dir=$(dirname $0)/../../../net/forwarding
-ALL_TESTS="fw_flash_test params_test regions_test"
+ALL_TESTS="fw_flash_test params_test regions_test reload_test \
+ netns_reload_test resource_test dev_info_test \
+ empty_reporter_test dummy_reporter_test"
NUM_NETIFS=0
source $lib_dir/lib.sh
@@ -142,6 +144,305 @@ regions_test()
log_test "regions test"
}
+reload_test()
+{
+ RET=0
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload"
+
+ echo "y"> $DEBUGFS_DIR/fail_reload
+ check_err $? "Failed to setup devlink reload to fail"
+
+ devlink dev reload $DL_HANDLE
+ check_fail $? "Unexpected success of devlink reload"
+
+ echo "n"> $DEBUGFS_DIR/fail_reload
+ check_err $? "Failed to setup devlink reload not to fail"
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload after set not to fail"
+
+ echo "y"> $DEBUGFS_DIR/dont_allow_reload
+ check_err $? "Failed to forbid devlink reload"
+
+ devlink dev reload $DL_HANDLE
+ check_fail $? "Unexpected success of devlink reload"
+
+ echo "n"> $DEBUGFS_DIR/dont_allow_reload
+ check_err $? "Failed to re-enable devlink reload"
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload after re-enable"
+
+ log_test "reload test"
+}
+
+netns_reload_test()
+{
+ RET=0
+
+ ip netns add testns1
+ check_err $? "Failed add netns \"testns1\""
+ ip netns add testns2
+ check_err $? "Failed add netns \"testns2\""
+
+ devlink dev reload $DL_HANDLE netns testns1
+ check_err $? "Failed to reload into netns \"testns1\""
+
+ devlink -N testns1 dev reload $DL_HANDLE netns testns2
+ check_err $? "Failed to reload from netns \"testns1\" into netns \"testns2\""
+
+ ip netns del testns2
+ ip netns del testns1
+
+ log_test "netns reload test"
+}
+
+DUMMYDEV="dummytest"
+
+res_val_get()
+{
+ local netns=$1
+ local parentname=$2
+ local name=$3
+ local type=$4
+
+ cmd_jq "devlink -N $netns resource show $DL_HANDLE -j" \
+ ".[][][] | select(.name == \"$parentname\").resources[] \
+ | select(.name == \"$name\").$type"
+}
+
+resource_test()
+{
+ RET=0
+
+ ip netns add testns1
+ check_err $? "Failed add netns \"testns1\""
+ ip netns add testns2
+ check_err $? "Failed add netns \"testns2\""
+
+ devlink dev reload $DL_HANDLE netns testns1
+ check_err $? "Failed to reload into netns \"testns1\""
+
+ # Create dummy dev to add the address and routes on.
+
+ ip -n testns1 link add name $DUMMYDEV type dummy
+ check_err $? "Failed create dummy device"
+ ip -n testns1 link set $DUMMYDEV up
+ check_err $? "Failed bring up dummy device"
+ ip -n testns1 a a 192.0.1.1/24 dev $DUMMYDEV
+ check_err $? "Failed add an IP address to dummy device"
+
+ local occ=$(res_val_get testns1 IPv4 fib occ)
+ local limit=$((occ+1))
+
+ # Set fib size limit to handle one another route only.
+
+ devlink -N testns1 resource set $DL_HANDLE path IPv4/fib size $limit
+ check_err $? "Failed to set IPv4/fib resource size"
+ local size_new=$(res_val_get testns1 IPv4 fib size_new)
+ [ "$size_new" -eq "$limit" ]
+ check_err $? "Unexpected \"size_new\" value (got $size_new, expected $limit)"
+
+ devlink -N testns1 dev reload $DL_HANDLE
+ check_err $? "Failed to reload"
+ local size=$(res_val_get testns1 IPv4 fib size)
+ [ "$size" -eq "$limit" ]
+ check_err $? "Unexpected \"size\" value (got $size, expected $limit)"
+
+ # Insert 2 routes, the first is going to be inserted,
+ # the second is expected to fail to be inserted.
+
+ ip -n testns1 r a 192.0.2.0/24 via 192.0.1.2
+ check_err $? "Failed to add route"
+
+ ip -n testns1 r a 192.0.3.0/24 via 192.0.1.2
+ check_fail $? "Unexpected successful route add over limit"
+
+ # Now create another dummy in second network namespace and
+ # insert two routes. That is over the limit of the netdevsim
+ # instance in the first namespace. Move the netdevsim instance
+ # into the second namespace and expect it to fail.
+
+ ip -n testns2 link add name $DUMMYDEV type dummy
+ check_err $? "Failed create dummy device"
+ ip -n testns2 link set $DUMMYDEV up
+ check_err $? "Failed bring up dummy device"
+ ip -n testns2 a a 192.0.1.1/24 dev $DUMMYDEV
+ check_err $? "Failed add an IP address to dummy device"
+ ip -n testns2 r a 192.0.2.0/24 via 192.0.1.2
+ check_err $? "Failed to add route"
+ ip -n testns2 r a 192.0.3.0/24 via 192.0.1.2
+ check_err $? "Failed to add route"
+
+ devlink -N testns1 dev reload $DL_HANDLE netns testns2
+ check_fail $? "Unexpected successful reload from netns \"testns1\" into netns \"testns2\""
+
+ devlink -N testns2 resource set $DL_HANDLE path IPv4/fib size ' -1'
+ check_err $? "Failed to reset IPv4/fib resource size"
+
+ devlink -N testns2 dev reload $DL_HANDLE netns 1
+ check_err $? "Failed to reload devlink back"
+
+ ip netns del testns2
+ ip netns del testns1
+
+ log_test "resource test"
+}
+
+info_get()
+{
+ local name=$1
+
+ cmd_jq "devlink dev info $DL_HANDLE -j" ".[][][\"$name\"]" "-e"
+}
+
+dev_info_test()
+{
+ RET=0
+
+ driver=$(info_get "driver")
+ check_err $? "Failed to get driver name"
+ [ "$driver" == "netdevsim" ]
+ check_err $? "Unexpected driver name $driver"
+
+ log_test "dev_info test"
+}
+
+empty_reporter_test()
+{
+ RET=0
+
+ devlink health show $DL_HANDLE reporter empty >/dev/null
+ check_err $? "Failed show empty reporter"
+
+ devlink health dump show $DL_HANDLE reporter empty >/dev/null
+ check_err $? "Failed show dump of empty reporter"
+
+ devlink health diagnose $DL_HANDLE reporter empty >/dev/null
+ check_err $? "Failed diagnose empty reporter"
+
+ devlink health recover $DL_HANDLE reporter empty
+ check_err $? "Failed recover empty reporter"
+
+ log_test "empty reporter test"
+}
+
+check_reporter_info()
+{
+ local name=$1
+ local expected_state=$2
+ local expected_error=$3
+ local expected_recover=$4
+ local expected_grace_period=$5
+ local expected_auto_recover=$6
+
+ local show=$(devlink health show $DL_HANDLE reporter $name -j | jq -e -r ".[][][]")
+ check_err $? "Failed show $name reporter"
+
+ local state=$(echo $show | jq -r ".state")
+ [ "$state" == "$expected_state" ]
+ check_err $? "Unexpected \"state\" value (got $state, expected $expected_state)"
+
+ local error=$(echo $show | jq -r ".error")
+ [ "$error" == "$expected_error" ]
+ check_err $? "Unexpected \"error\" value (got $error, expected $expected_error)"
+
+ local recover=`echo $show | jq -r ".recover"`
+ [ "$recover" == "$expected_recover" ]
+ check_err $? "Unexpected \"recover\" value (got $recover, expected $expected_recover)"
+
+ local grace_period=$(echo $show | jq -r ".grace_period")
+ check_err $? "Failed get $name reporter grace_period"
+ [ "$grace_period" == "$expected_grace_period" ]
+ check_err $? "Unexpected \"grace_period\" value (got $grace_period, expected $expected_grace_period)"
+
+ local auto_recover=$(echo $show | jq -r ".auto_recover")
+ [ "$auto_recover" == "$expected_auto_recover" ]
+ check_err $? "Unexpected \"auto_recover\" value (got $auto_recover, expected $expected_auto_recover)"
+}
+
+dummy_reporter_test()
+{
+ RET=0
+
+ check_reporter_info dummy healthy 0 0 0 false
+
+ local BREAK_MSG="foo bar"
+ echo "$BREAK_MSG"> $DEBUGFS_DIR/health/break_health
+ check_err $? "Failed to break dummy reporter"
+
+ check_reporter_info dummy error 1 0 0 false
+
+ local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j)
+ check_err $? "Failed show dump of dummy reporter"
+
+ local dump_break_msg=$(echo $dump | jq -r ".break_message")
+ [ "$dump_break_msg" == "$BREAK_MSG" ]
+ check_err $? "Unexpected dump break message value (got $dump_break_msg, expected $BREAK_MSG)"
+
+ devlink health dump clear $DL_HANDLE reporter dummy
+ check_err $? "Failed clear dump of dummy reporter"
+
+ devlink health recover $DL_HANDLE reporter dummy
+ check_err $? "Failed recover dummy reporter"
+
+ check_reporter_info dummy healthy 1 1 0 false
+
+ devlink health set $DL_HANDLE reporter dummy auto_recover true
+ check_err $? "Failed to dummy reporter auto_recover option"
+
+ check_reporter_info dummy healthy 1 1 0 true
+
+ echo "$BREAK_MSG"> $DEBUGFS_DIR/health/break_health
+ check_err $? "Failed to break dummy reporter"
+
+ check_reporter_info dummy healthy 2 2 0 true
+
+ local diagnose=$(devlink health diagnose $DL_HANDLE reporter dummy -j -p)
+ check_err $? "Failed show diagnose of dummy reporter"
+
+ local rcvrd_break_msg=$(echo $diagnose | jq -r ".recovered_break_message")
+ [ "$rcvrd_break_msg" == "$BREAK_MSG" ]
+ check_err $? "Unexpected recovered break message value (got $rcvrd_break_msg, expected $BREAK_MSG)"
+
+ devlink health set $DL_HANDLE reporter dummy grace_period 10
+ check_err $? "Failed to dummy reporter grace_period option"
+
+ check_reporter_info dummy healthy 2 2 10 true
+
+ echo "Y"> $DEBUGFS_DIR/health/fail_recover
+ check_err $? "Failed set dummy reporter recovery to fail"
+
+ echo "$BREAK_MSG"> $DEBUGFS_DIR/health/break_health
+ check_fail $? "Unexpected success of dummy reporter break"
+
+ check_reporter_info dummy error 3 2 10 true
+
+ devlink health recover $DL_HANDLE reporter dummy
+ check_fail $? "Unexpected success of dummy reporter recover"
+
+ echo "N"> $DEBUGFS_DIR/health/fail_recover
+ check_err $? "Failed set dummy reporter recovery to be successful"
+
+ devlink health recover $DL_HANDLE reporter dummy
+ check_err $? "Failed recover dummy reporter"
+
+ check_reporter_info dummy healthy 3 3 10 true
+
+ echo 8192> $DEBUGFS_DIR/health/binary_len
+ check_fail $? "Failed set dummy reporter binary len to 8192"
+
+ local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j)
+ check_err $? "Failed show dump of dummy reporter"
+
+ devlink health dump clear $DL_HANDLE reporter dummy
+ check_err $? "Failed clear dump of dummy reporter"
+
+ log_test "dummy reporter test"
+}
+
setup_prepare()
{
modprobe netdevsim
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink_in_netns.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink_in_netns.sh
new file mode 100755
index 000000000000..7effd35369e1
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink_in_netns.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="check_devlink_test check_ports_test"
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+
+BUS_ADDR=10
+PORT_COUNT=4
+DEV_NAME=netdevsim$BUS_ADDR
+SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV_NAME/net/
+DL_HANDLE=netdevsim/$DEV_NAME
+NETNS_NAME=testns1
+
+port_netdev_get()
+{
+ local port_index=$1
+
+ cmd_jq "devlink -N $NETNS_NAME port show -j" \
+ ".[][\"$DL_HANDLE/$port_index\"].netdev" "-e"
+}
+
+check_ports_test()
+{
+ RET=0
+
+ for i in $(seq 0 $(expr $PORT_COUNT - 1)); do
+ netdev_name=$(port_netdev_get $i)
+ check_err $? "Failed to get netdev name for port $DL_HANDLE/$i"
+ ip -n $NETNS_NAME link show $netdev_name &> /dev/null
+ check_err $? "Failed to find netdev $netdev_name"
+ done
+
+ log_test "check ports test"
+}
+
+check_devlink_test()
+{
+ RET=0
+
+ devlink -N $NETNS_NAME dev show $DL_HANDLE &> /dev/null
+ check_err $? "Failed to show devlink instance"
+
+ log_test "check devlink test"
+}
+
+setup_prepare()
+{
+ modprobe netdevsim
+ ip netns add $NETNS_NAME
+ ip netns exec $NETNS_NAME \
+ echo "$BUS_ADDR $PORT_COUNT" > /sys/bus/netdevsim/new_device
+ while [ ! -d $SYSFS_NET_DIR ] ; do :; done
+}
+
+cleanup()
+{
+ pre_cleanup
+ echo "$BUS_ADDR" > /sys/bus/netdevsim/del_device
+ ip netns del $NETNS_NAME
+ modprobe -r netdevsim
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh
index a27e2eec3586..8b2b6088540d 100755
--- a/tools/testing/selftests/gen_kselftest_tar.sh
+++ b/tools/testing/selftests/gen_kselftest_tar.sh
@@ -38,16 +38,21 @@ main()
esac
fi
- install_dir=./kselftest
+ # Create working directory.
+ dest=`pwd`
+ install_work="$dest"/kselftest_install
+ install_name=kselftest
+ install_dir="$install_work"/"$install_name"
+ mkdir -p "$install_dir"
-# Run install using INSTALL_KSFT_PATH override to generate install
-# directory
-./kselftest_install.sh
-tar $copts kselftest${ext} $install_dir
-echo "Kselftest archive kselftest${ext} created!"
+ # Run install using INSTALL_KSFT_PATH override to generate install
+ # directory
+ ./kselftest_install.sh "$install_dir"
+ (cd "$install_work"; tar $copts "$dest"/kselftest${ext} $install_name)
+ echo "Kselftest archive kselftest${ext} created!"
-# clean up install directory
-rm -rf kselftest
+ # clean up top-level install work directory
+ rm -rf "$install_work"
}
main "$@"
diff --git a/tools/testing/selftests/kselftest_module.sh b/tools/testing/selftests/kselftest/module.sh
index 18e1c7992d30..18e1c7992d30 100755
--- a/tools/testing/selftests/kselftest_module.sh
+++ b/tools/testing/selftests/kselftest/module.sh
diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh
index e2e1911d62d5..407af7da7037 100755
--- a/tools/testing/selftests/kselftest_install.sh
+++ b/tools/testing/selftests/kselftest_install.sh
@@ -6,30 +6,30 @@
# Author: Shuah Khan <shuahkh@osg.samsung.com>
# Copyright (C) 2015 Samsung Electronics Co., Ltd.
-install_loc=`pwd`
-
main()
{
- if [ $(basename $install_loc) != "selftests" ]; then
+ base_dir=`pwd`
+ install_dir="$base_dir"/kselftest_install
+
+ # Make sure we're in the selftests top-level directory.
+ if [ $(basename "$base_dir") != "selftests" ]; then
echo "$0: Please run it in selftests directory ..."
exit 1;
fi
+
+ # Only allow installation into an existing location.
if [ "$#" -eq 0 ]; then
- echo "$0: Installing in default location - $install_loc ..."
+ echo "$0: Installing in default location - $install_dir ..."
elif [ ! -d "$1" ]; then
echo "$0: $1 doesn't exist!!"
exit 1;
else
- install_loc=$1
- echo "$0: Installing in specified location - $install_loc ..."
+ install_dir="$1"
+ echo "$0: Installing in specified location - $install_dir ..."
fi
- install_dir=$install_loc/kselftest_install
-
-# Create install directory
- mkdir -p $install_dir
-# Build tests
- KSFT_INSTALL_PATH=$install_dir make install
+ # Build tests
+ KSFT_INSTALL_PATH="$install_dir" make install
}
main "$@"
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 409c1fa75e03..30072c3f52fb 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -13,6 +13,7 @@
/x86_64/vmx_dirty_log_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
+/x86_64/xss_msr_test
/clear_dirty_log_test
/dirty_log_test
/kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c5ec868fa1e5..3138a916574a 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -25,6 +25,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
+TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index ff234018219c..635ee6c33ad2 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -308,6 +308,8 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_x86_state *state);
+struct kvm_msr_list *kvm_get_msr_index_list(void);
+
struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_cpuid2 *cpuid);
@@ -322,10 +324,13 @@ kvm_get_supported_cpuid_entry(uint32_t function)
}
uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
+int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value);
void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
uint64_t msr_value);
-uint32_t kvm_get_cpuid_max(void);
+uint32_t kvm_get_cpuid_max_basic(void);
+uint32_t kvm_get_cpuid_max_extended(void);
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
/*
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index 231d79e57774..6f38c3dc0d56 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -29,12 +29,9 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- for (i = 0; i < num_vcpus; i++) {
- int vcpu_id = first_vcpu_id + i;
-
+ for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
/* This asserts that the vCPU was created. */
- vm_vcpu_add(vm, vcpu_id);
- }
+ vm_vcpu_add(vm, i);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 4911fc77d0f6..d1cf9f6e0e6b 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -55,7 +55,7 @@ static void test_dump_stack(void)
#pragma GCC diagnostic pop
}
-static pid_t gettid(void)
+static pid_t _gettid(void)
{
return syscall(SYS_gettid);
}
@@ -72,7 +72,7 @@ test_assert(bool exp, const char *exp_str,
fprintf(stderr, "==== Test Assertion Failure ====\n"
" %s:%u: %s\n"
" pid=%d tid=%d - %s\n",
- file, line, exp_str, getpid(), gettid(),
+ file, line, exp_str, getpid(), _gettid(),
strerror(errno));
test_dump_stack();
if (fmt) {
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6698cb741e10..683d3bdb8f6a 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -869,7 +869,7 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
return buffer.entry.data;
}
-/* VCPU Set MSR
+/* _VCPU Set MSR
*
* Input Args:
* vm - Virtual Machine
@@ -879,12 +879,12 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
*
* Output Args: None
*
- * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ * Return: The result of KVM_SET_MSRS.
*
- * Set value of MSR for VCPU.
+ * Sets the value of an MSR for the given VCPU.
*/
-void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value)
+int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
struct {
@@ -899,6 +899,29 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
buffer.entry.index = msr_index;
buffer.entry.data = msr_value;
r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
+ return r;
+}
+
+/* VCPU Set MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ * msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value)
+{
+ int r;
+
+ r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value);
TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
" rc: %i errno: %i", r, errno);
}
@@ -1000,19 +1023,45 @@ struct kvm_x86_state {
struct kvm_msrs msrs;
};
-static int kvm_get_num_msrs(struct kvm_vm *vm)
+static int kvm_get_num_msrs_fd(int kvm_fd)
{
struct kvm_msr_list nmsrs;
int r;
nmsrs.nmsrs = 0;
- r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
+ r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
r);
return nmsrs.nmsrs;
}
+static int kvm_get_num_msrs(struct kvm_vm *vm)
+{
+ return kvm_get_num_msrs_fd(vm->kvm_fd);
+}
+
+struct kvm_msr_list *kvm_get_msr_index_list(void)
+{
+ struct kvm_msr_list *list;
+ int nmsrs, r, kvm_fd;
+
+ kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+ if (kvm_fd < 0)
+ exit(KSFT_SKIP);
+
+ nmsrs = kvm_get_num_msrs_fd(kvm_fd);
+ list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
+ list->nmsrs = nmsrs;
+ r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
+ close(kvm_fd);
+
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
+ r);
+
+ return list;
+}
+
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
@@ -1158,7 +1207,12 @@ bool is_intel_cpu(void)
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
}
-uint32_t kvm_get_cpuid_max(void)
+uint32_t kvm_get_cpuid_max_basic(void)
+{
+ return kvm_get_supported_cpuid_entry(0)->eax;
+}
+
+uint32_t kvm_get_cpuid_max_extended(void)
{
return kvm_get_supported_cpuid_entry(0x80000000)->eax;
}
@@ -1169,7 +1223,7 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
bool pae;
/* SDM 4.1.4 */
- if (kvm_get_cpuid_max() < 0x80000008) {
+ if (kvm_get_cpuid_max_extended() < 0x80000008) {
pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
*pa_bits = pae ? 36 : 32;
*va_bits = 32;
diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
index d5290b4ad636..b705637ca14b 100644
--- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
@@ -25,12 +25,15 @@
static void guest_code(void)
{
- register u64 stage asm("11") = 0;
-
- for (;;) {
- GUEST_SYNC(0);
- asm volatile ("ahi %0,1" : : "r"(stage));
- }
+ /*
+ * We embed diag 501 here instead of doing a ucall to avoid that
+ * the compiler has messed with r11 at the time of the ucall.
+ */
+ asm volatile (
+ "0: diag 0,0,0x501\n"
+ " ahi 11,1\n"
+ " j 0b\n"
+ );
}
#define REG_COMPARE(reg) \
diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
new file mode 100644
index 000000000000..851ea81b9d9f
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Google LLC.
+ *
+ * Tests for the IA32_XSS MSR.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "vmx.h"
+
+#define VCPU_ID 1
+#define MSR_BITS 64
+
+#define X86_FEATURE_XSAVES (1<<3)
+
+bool is_supported_msr(u32 msr_index)
+{
+ struct kvm_msr_list *list;
+ bool found = false;
+ int i;
+
+ list = kvm_get_msr_index_list();
+ for (i = 0; i < list->nmsrs; ++i) {
+ if (list->indices[i] == msr_index) {
+ found = true;
+ break;
+ }
+ }
+
+ free(list);
+ return found;
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_cpuid_entry2 *entry;
+ bool xss_supported = false;
+ struct kvm_vm *vm;
+ uint64_t xss_val;
+ int i, r;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, 0);
+
+ if (kvm_get_cpuid_max_basic() >= 0xd) {
+ entry = kvm_get_supported_cpuid_index(0xd, 1);
+ xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES);
+ }
+ if (!xss_supported) {
+ printf("IA32_XSS is not supported by the vCPU.\n");
+ exit(KSFT_SKIP);
+ }
+
+ xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS);
+ TEST_ASSERT(xss_val == 0,
+ "MSR_IA32_XSS should be initialized to zero\n");
+
+ vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val);
+ /*
+ * At present, KVM only supports a guest IA32_XSS value of 0. Verify
+ * that trying to set the guest IA32_XSS to an unsupported value fails.
+ * Also, in the future when a non-zero value succeeds check that
+ * IA32_XSS is in the KVM_GET_MSR_INDEX_LIST.
+ */
+ for (i = 0; i < MSR_BITS; ++i) {
+ r = _vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, 1ull << i);
+ TEST_ASSERT(r == 0 || is_supported_msr(MSR_IA32_XSS),
+ "IA32_XSS was able to be set, but was not found in KVM_GET_MSR_INDEX_LIST.\n");
+ }
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh
index 5511dddc5c2d..00a416fbc0ef 100755
--- a/tools/testing/selftests/lib/bitmap.sh
+++ b/tools/testing/selftests/lib/bitmap.sh
@@ -1,3 +1,3 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-$(dirname $0)/../kselftest_module.sh "bitmap" test_bitmap
+$(dirname $0)/../kselftest/module.sh "bitmap" test_bitmap
diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh
index 43b28f24e453..370b79a9cb2e 100755
--- a/tools/testing/selftests/lib/prime_numbers.sh
+++ b/tools/testing/selftests/lib/prime_numbers.sh
@@ -1,4 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Checks fast/slow prime_number generation for inconsistencies
-$(dirname $0)/../kselftest_module.sh "prime numbers" prime_numbers selftest=65536
+$(dirname $0)/../kselftest/module.sh "prime numbers" prime_numbers selftest=65536
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
index 2ffa61da0296..05f4544e87f9 100755
--- a/tools/testing/selftests/lib/printf.sh
+++ b/tools/testing/selftests/lib/printf.sh
@@ -1,4 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Tests the printf infrastructure using test_printf kernel module.
-$(dirname $0)/../kselftest_module.sh "printf" test_printf
+$(dirname $0)/../kselftest/module.sh "printf" test_printf
diff --git a/tools/testing/selftests/lib/strscpy.sh b/tools/testing/selftests/lib/strscpy.sh
index 71f2be6afba6..be60ef6e1a7f 100755
--- a/tools/testing/selftests/lib/strscpy.sh
+++ b/tools/testing/selftests/lib/strscpy.sh
@@ -1,3 +1,3 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
-$(dirname $0)/../kselftest_module.sh "strscpy*" test_strscpy
+$(dirname $0)/../kselftest/module.sh "strscpy*" test_strscpy
diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
index fd405402c3ff..1cf40a9e7185 100644
--- a/tools/testing/selftests/livepatch/Makefile
+++ b/tools/testing/selftests/livepatch/Makefile
@@ -4,6 +4,7 @@ TEST_PROGS_EXTENDED := functions.sh
TEST_PROGS := \
test-livepatch.sh \
test-callbacks.sh \
- test-shadow-vars.sh
+ test-shadow-vars.sh \
+ test-state.sh
include ../lib.mk
diff --git a/tools/testing/selftests/livepatch/settings b/tools/testing/selftests/livepatch/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/livepatch/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/livepatch/test-state.sh b/tools/testing/selftests/livepatch/test-state.sh
new file mode 100755
index 000000000000..dc2908c22c26
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-state.sh
@@ -0,0 +1,180 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2019 SUSE
+
+. $(dirname $0)/functions.sh
+
+MOD_LIVEPATCH=test_klp_state
+MOD_LIVEPATCH2=test_klp_state2
+MOD_LIVEPATCH3=test_klp_state3
+
+set_dynamic_debug
+
+
+# TEST: Loading and removing a module that modifies the system state
+
+echo -n "TEST: system state modification ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: allocate_loglevel_state: allocating space to store console_loglevel
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: fix_console_loglevel: fixing console_loglevel
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: restore_console_loglevel: restoring console_loglevel
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: free_loglevel_state: freeing space for the stored console_loglevel
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: Take over system state change by a cumulative patch
+
+echo -n "TEST: taking over system state modification ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH2
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: allocate_loglevel_state: allocating space to store console_loglevel
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: fix_console_loglevel: fixing console_loglevel
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_LIVEPATCH2
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH2: allocate_loglevel_state: space to store console_loglevel already allocated
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+$MOD_LIVEPATCH2: fix_console_loglevel: taking over the console_loglevel change
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% rmmod $MOD_LIVEPATCH
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH2: restore_console_loglevel: restoring console_loglevel
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH2: free_loglevel_state: freeing space for the stored console_loglevel
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% rmmod $MOD_LIVEPATCH2"
+
+
+# TEST: Take over system state change by a cumulative patch
+
+echo -n "TEST: compatible cumulative livepatches ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH2
+load_lp $MOD_LIVEPATCH3
+unload_lp $MOD_LIVEPATCH2
+load_lp $MOD_LIVEPATCH2
+disable_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH3
+
+check_result "% modprobe $MOD_LIVEPATCH2
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH2: allocate_loglevel_state: allocating space to store console_loglevel
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+$MOD_LIVEPATCH2: fix_console_loglevel: fixing console_loglevel
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% modprobe $MOD_LIVEPATCH3
+livepatch: enabling patch '$MOD_LIVEPATCH3'
+livepatch: '$MOD_LIVEPATCH3': initializing patching transition
+$MOD_LIVEPATCH3: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH3: allocate_loglevel_state: space to store console_loglevel already allocated
+livepatch: '$MOD_LIVEPATCH3': starting patching transition
+livepatch: '$MOD_LIVEPATCH3': completing patching transition
+$MOD_LIVEPATCH3: post_patch_callback: vmlinux
+$MOD_LIVEPATCH3: fix_console_loglevel: taking over the console_loglevel change
+livepatch: '$MOD_LIVEPATCH3': patching complete
+% rmmod $MOD_LIVEPATCH2
+% modprobe $MOD_LIVEPATCH2
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH2: allocate_loglevel_state: space to store console_loglevel already allocated
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+$MOD_LIVEPATCH2: fix_console_loglevel: taking over the console_loglevel change
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH2: restore_console_loglevel: restoring console_loglevel
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH2: free_loglevel_state: freeing space for the stored console_loglevel
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH3"
+
+
+# TEST: Failure caused by incompatible cumulative livepatches
+
+echo -n "TEST: incompatible cumulative livepatches ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH2
+load_failing_mod $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH2
+
+check_result "% modprobe $MOD_LIVEPATCH2
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH2: allocate_loglevel_state: allocating space to store console_loglevel
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+$MOD_LIVEPATCH2: fix_console_loglevel: fixing console_loglevel
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% modprobe $MOD_LIVEPATCH
+livepatch: Livepatch patch ($MOD_LIVEPATCH) is not compatible with the already installed livepatches.
+modprobe: ERROR: could not insert '$MOD_LIVEPATCH': Invalid argument
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH2: restore_console_loglevel: restoring console_loglevel
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH2: free_loglevel_state: freeing space for the stored console_loglevel
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% rmmod $MOD_LIVEPATCH2"
+
+exit 0
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 0bd6b23c97ef..a8e04d665b69 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -10,7 +10,7 @@ TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
-TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh
+TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh
new file mode 100755
index 000000000000..4254ddc3f70b
--- /dev/null
+++ b/tools/testing/selftests/net/altnames.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/forwarding
+
+ALL_TESTS="altnames_test"
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+
+DUMMY_DEV=dummytest
+SHORT_NAME=shortname
+LONG_NAME=someveryveryveryveryveryverylongname
+
+altnames_test()
+{
+ RET=0
+ local output
+ local name
+
+ ip link property add $DUMMY_DEV altname $SHORT_NAME
+ check_err $? "Failed to add short alternative name"
+
+ output=$(ip -j -p link show $SHORT_NAME)
+ check_err $? "Failed to do link show with short alternative name"
+
+ name=$(echo $output | jq -e -r ".[0].altnames[0]")
+ check_err $? "Failed to get short alternative name from link show JSON"
+
+ [ "$name" == "$SHORT_NAME" ]
+ check_err $? "Got unexpected short alternative name from link show JSON"
+
+ ip -j -p link show $DUMMY_DEV &>/dev/null
+ check_err $? "Failed to do link show with original name"
+
+ ip link property add $DUMMY_DEV altname $LONG_NAME
+ check_err $? "Failed to add long alternative name"
+
+ output=$(ip -j -p link show $LONG_NAME)
+ check_err $? "Failed to do link show with long alternative name"
+
+ name=$(echo $output | jq -e -r ".[0].altnames[1]")
+ check_err $? "Failed to get long alternative name from link show JSON"
+
+ [ "$name" == "$LONG_NAME" ]
+ check_err $? "Got unexpected long alternative name from link show JSON"
+
+ ip link property del $DUMMY_DEV altname $SHORT_NAME
+ check_err $? "Failed to add short alternative name"
+
+ ip -j -p link show $SHORT_NAME &>/dev/null
+ check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
+
+ # long name is left there on purpose to be removed alongside the device
+
+ log_test "altnames test"
+}
+
+setup_prepare()
+{
+ ip link add name $DUMMY_DEV type dummy
+}
+
+cleanup()
+{
+ pre_cleanup
+ ip link del name $DUMMY_DEV
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 76c1897e6352..6dd403103800 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,7 +9,7 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter"
+TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -1500,6 +1500,55 @@ ipv4_route_metrics_test()
route_cleanup
}
+ipv4_del_addr_test()
+{
+ echo
+ echo "IPv4 delete address route tests"
+
+ setup
+
+ set -e
+ $IP li add dummy1 type dummy
+ $IP li set dummy1 up
+ $IP li add dummy2 type dummy
+ $IP li set dummy2 up
+ $IP li add red type vrf table 1111
+ $IP li set red up
+ $IP ro add vrf red unreachable default
+ $IP li set dummy2 vrf red
+
+ $IP addr add dev dummy1 172.16.104.1/24
+ $IP addr add dev dummy1 172.16.104.11/24
+ $IP addr add dev dummy2 172.16.104.1/24
+ $IP addr add dev dummy2 172.16.104.11/24
+ $IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+ $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+ set +e
+
+ # removing address from device in vrf should only remove route from vrf table
+ $IP addr del dev dummy2 172.16.104.11/24
+ $IP ro ls vrf red | grep -q 172.16.105.0/24
+ log_test $? 1 "Route removed from VRF when source address deleted"
+
+ $IP ro ls | grep -q 172.16.105.0/24
+ log_test $? 0 "Route in default VRF not removed"
+
+ $IP addr add dev dummy2 172.16.104.11/24
+ $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+
+ $IP addr del dev dummy1 172.16.104.11/24
+ $IP ro ls | grep -q 172.16.105.0/24
+ log_test $? 1 "Route removed in default VRF when source address deleted"
+
+ $IP ro ls vrf red | grep -q 172.16.105.0/24
+ log_test $? 0 "Route in VRF is not removed by address delete"
+
+ $IP li del dummy1
+ $IP li del dummy2
+ cleanup
+}
+
+
ipv4_route_v6_gw_test()
{
local rc
@@ -1633,6 +1682,7 @@ do
ipv4_route_test|ipv4_rt) ipv4_route_test;;
ipv6_addr_metric) ipv6_addr_metric_test;;
ipv4_addr_metric) ipv4_addr_metric_test;;
+ ipv4_del_addr) ipv4_del_addr_test;;
ipv6_route_metrics) ipv6_route_metrics_test;;
ipv4_route_metrics) ipv4_route_metrics_test;;
ipv4_route_v6_gw) ipv4_route_v6_gw_test;;
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 13d03a6d85ba..40b076983239 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -355,3 +355,58 @@ devlink_trap_group_stats_idle_test()
return 1
fi
}
+
+devlink_trap_exception_test()
+{
+ local trap_name=$1; shift
+ local group_name=$1; shift
+
+ devlink_trap_stats_idle_test $trap_name
+ check_fail $? "Trap stats idle when packets should have been trapped"
+
+ devlink_trap_group_stats_idle_test $group_name
+ check_fail $? "Trap group idle when packets should have been trapped"
+}
+
+devlink_trap_drop_test()
+{
+ local trap_name=$1; shift
+ local group_name=$1; shift
+ local dev=$1; shift
+
+ # This is the common part of all the tests. It checks that stats are
+ # initially idle, then non-idle after changing the trap action and
+ # finally idle again. It also makes sure the packets are dropped and
+ # never forwarded.
+ devlink_trap_stats_idle_test $trap_name
+ check_err $? "Trap stats not idle with initial drop action"
+ devlink_trap_group_stats_idle_test $group_name
+ check_err $? "Trap group stats not idle with initial drop action"
+
+
+ devlink_trap_action_set $trap_name "trap"
+ devlink_trap_stats_idle_test $trap_name
+ check_fail $? "Trap stats idle after setting action to trap"
+ devlink_trap_group_stats_idle_test $group_name
+ check_fail $? "Trap group stats idle after setting action to trap"
+
+ devlink_trap_action_set $trap_name "drop"
+
+ devlink_trap_stats_idle_test $trap_name
+ check_err $? "Trap stats not idle after setting action to drop"
+ devlink_trap_group_stats_idle_test $group_name
+ check_err $? "Trap group stats not idle after setting action to drop"
+
+ tc_check_packets "dev $dev egress" 101 0
+ check_err $? "Packets were not dropped"
+}
+
+devlink_trap_drop_cleanup()
+{
+ local mz_pid=$1; shift
+ local dev=$1; shift
+ local proto=$1; shift
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $dev egress protocol $proto pref 1 handle 101 flower
+}
diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh
new file mode 100755
index 000000000000..eb8e2a23bbb4
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ethtool.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ same_speeds_autoneg_off
+ different_speeds_autoneg_off
+ combination_of_neg_on_and_off
+ advertise_subset_of_speeds
+ check_highest_speed_is_chosen
+ different_speeds_autoneg_on
+"
+NUM_NETIFS=2
+source lib.sh
+source ethtool_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/24
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+}
+
+different_speeds_get()
+{
+ local dev1=$1; shift
+ local dev2=$1; shift
+ local with_mode=$1; shift
+ local adver=$1; shift
+
+ local -a speeds_arr
+
+ speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver))
+ if [[ ${#speeds_arr[@]} < 2 ]]; then
+ check_err 1 "cannot check different speeds. There are not enough speeds"
+ fi
+
+ echo ${speeds_arr[0]} ${speeds_arr[1]}
+}
+
+same_speeds_autoneg_off()
+{
+ # Check that when each of the reported speeds is forced, the links come
+ # up and are operational.
+ local -a speeds_arr=($(common_speeds_get $h1 $h2 0 0))
+
+ for speed in "${speeds_arr[@]}"; do
+ RET=0
+ ethtool_set $h1 speed $speed autoneg off
+ ethtool_set $h2 speed $speed autoneg off
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_err $? "speed $speed autoneg off"
+ log_test "force of same speed autoneg off"
+ log_info "speed = $speed"
+ done
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+different_speeds_autoneg_off()
+{
+ # Test that when we force different speeds, links are not up and ping
+ # fails.
+ RET=0
+
+ local -a speeds_arr=($(different_speeds_get $h1 $h2 0 0))
+ local speed1=${speeds_arr[0]}
+ local speed2=${speeds_arr[1]}
+
+ ethtool_set $h1 speed $speed1 autoneg off
+ ethtool_set $h2 speed $speed2 autoneg off
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_fail $? "ping with different speeds"
+
+ log_test "force of different speeds autoneg off"
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+combination_of_neg_on_and_off()
+{
+ # Test that when one device is forced to a speed supported by both
+ # endpoints and the other device is configured to autoneg on, the links
+ # are up and ping passes.
+ local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
+
+ for speed in "${speeds_arr[@]}"; do
+ RET=0
+ ethtool_set $h1 speed $speed autoneg off
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_err $? "h1-speed=$speed autoneg off, h2 autoneg on"
+ log_test "one side with autoneg off and another with autoneg on"
+ log_info "force speed = $speed"
+ done
+
+ ethtool -s $h1 autoneg on
+}
+
+hex_speed_value_get()
+{
+ local speed=$1; shift
+
+ local shift_size=${speed_values[$speed]}
+ speed=$((0x1 << $"shift_size"))
+ printf "%#x" "$speed"
+}
+
+subset_of_common_speeds_get()
+{
+ local dev1=$1; shift
+ local dev2=$1; shift
+ local adver=$1; shift
+
+ local -a speeds_arr=($(common_speeds_get $dev1 $dev2 0 $adver))
+ local speed_to_advertise=0
+ local speed_to_remove=${speeds_arr[0]}
+ speed_to_remove+='base'
+
+ local -a speeds_mode_arr=($(common_speeds_get $dev1 $dev2 1 $adver))
+
+ for speed in ${speeds_mode_arr[@]}; do
+ if [[ $speed != $speed_to_remove* ]]; then
+ speed=$(hex_speed_value_get $speed)
+ speed_to_advertise=$(($speed_to_advertise | \
+ $speed))
+ fi
+
+ done
+
+ # Convert to hex.
+ printf "%#x" "$speed_to_advertise"
+}
+
+speed_to_advertise_get()
+{
+ # The function returns the hex number that is composed by OR-ing all
+ # the modes corresponding to the provided speed.
+ local speed_without_mode=$1; shift
+ local supported_speeds=("$@"); shift
+ local speed_to_advertise=0
+
+ speed_without_mode+='base'
+
+ for speed in ${supported_speeds[@]}; do
+ if [[ $speed == $speed_without_mode* ]]; then
+ speed=$(hex_speed_value_get $speed)
+ speed_to_advertise=$(($speed_to_advertise | \
+ $speed))
+ fi
+
+ done
+
+ # Convert to hex.
+ printf "%#x" "$speed_to_advertise"
+}
+
+advertise_subset_of_speeds()
+{
+ # Test that when one device advertises a subset of speeds and another
+ # advertises a specific speed (but all modes of this speed), the links
+ # are up and ping passes.
+ RET=0
+
+ local speed_1_to_advertise=$(subset_of_common_speeds_get $h1 $h2 1)
+ ethtool_set $h1 advertise $speed_1_to_advertise
+
+ if [ $RET != 0 ]; then
+ log_test "advertise subset of speeds"
+ return
+ fi
+
+ local -a speeds_arr_without_mode=($(common_speeds_get $h1 $h2 0 1))
+ # Check only speeds that h1 advertised. Remove the first speed.
+ unset speeds_arr_without_mode[0]
+ local -a speeds_arr_with_mode=($(common_speeds_get $h1 $h2 1 1))
+
+ for speed_value in ${speeds_arr_without_mode[@]}; do
+ RET=0
+ local speed_2_to_advertise=$(speed_to_advertise_get $speed_value \
+ "${speeds_arr_with_mode[@]}")
+ ethtool_set $h2 advertise $speed_2_to_advertise
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_err $? "h1=$speed_1_to_advertise, h2=$speed_2_to_advertise ($speed_value)"
+
+ log_test "advertise subset of speeds"
+ log_info "h1=$speed_1_to_advertise, h2=$speed_2_to_advertise"
+ done
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+check_highest_speed_is_chosen()
+{
+ # Test that when one device advertises a subset of speeds, the other
+ # chooses the highest speed. This test checks configuration without
+ # traffic.
+ RET=0
+
+ local max_speed
+ local chosen_speed
+ local speed_to_advertise=$(subset_of_common_speeds_get $h1 $h2 1)
+
+ ethtool_set $h1 advertise $speed_to_advertise
+
+ if [ $RET != 0 ]; then
+ log_test "check highest speed"
+ return
+ fi
+
+ local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
+ # Remove the first speed, h1 does not advertise this speed.
+ unset speeds_arr[0]
+
+ max_speed=${speeds_arr[0]}
+ for current in ${speeds_arr[@]}; do
+ if [[ $current -gt $max_speed ]]; then
+ max_speed=$current
+ fi
+ done
+
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ chosen_speed=$(ethtool $h1 | grep 'Speed:')
+ chosen_speed=${chosen_speed%"Mb/s"*}
+ chosen_speed=${chosen_speed#*"Speed: "}
+ ((chosen_speed == max_speed))
+ check_err $? "h1 advertise $speed_to_advertise, h2 sync to speed $chosen_speed"
+
+ log_test "check highest speed"
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+different_speeds_autoneg_on()
+{
+ # Test that when we configure links to advertise different speeds,
+ # links are not up and ping fails.
+ RET=0
+
+ local -a speeds=($(different_speeds_get $h1 $h2 1 1))
+ local speed1=${speeds[0]}
+ local speed2=${speeds[1]}
+
+ speed1=$(hex_speed_value_get $speed1)
+ speed2=$(hex_speed_value_get $speed2)
+
+ ethtool_set $h1 advertise $speed1
+ ethtool_set $h2 advertise $speed2
+
+ if (($RET)); then
+ setup_wait_dev_with_timeout $h1
+ setup_wait_dev_with_timeout $h2
+ ping_do $h1 192.0.2.2
+ check_fail $? "ping with different speeds autoneg on"
+ fi
+
+ log_test "advertise different speeds autoneg on"
+
+ ethtool -s $h2 autoneg on
+ ethtool -s $h1 autoneg on
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+declare -gA speed_values
+eval "speed_values=($(speeds_arr_get))"
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh
new file mode 100755
index 000000000000..925d229a59d8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ethtool_lib.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+speeds_arr_get()
+{
+ cmd='/ETHTOOL_LINK_MODE_[^[:space:]]*_BIT[[:space:]]+=[[:space:]]+/ \
+ {sub(/,$/, "") \
+ sub(/ETHTOOL_LINK_MODE_/,"") \
+ sub(/_BIT/,"") \
+ sub(/_Full/,"/Full") \
+ sub(/_Half/,"/Half");\
+ print "["$1"]="$3}'
+
+ awk "${cmd}" /usr/include/linux/ethtool.h
+}
+
+ethtool_set()
+{
+ local cmd="$@"
+ local out=$(ethtool -s $cmd 2>&1 | wc -l)
+
+ check_err $out "error in configuration. $cmd"
+}
+
+dev_speeds_get()
+{
+ local dev=$1; shift
+ local with_mode=$1; shift
+ local adver=$1; shift
+ local speeds_str
+
+ if (($adver)); then
+ mode="Advertised link modes"
+ else
+ mode="Supported link modes"
+ fi
+
+ speeds_str=$(ethtool "$dev" | \
+ # Snip everything before the link modes section.
+ sed -n '/'"$mode"':/,$p' | \
+ # Quit processing the rest at the start of the next section.
+ # When checking, skip the header of this section (hence the 2,).
+ sed -n '2,${/^[\t][^ \t]/q};p' | \
+ # Drop the section header of the current section.
+ cut -d':' -f2)
+
+ local -a speeds_arr=($speeds_str)
+ if [[ $with_mode -eq 0 ]]; then
+ for ((i=0; i<${#speeds_arr[@]}; i++)); do
+ speeds_arr[$i]=${speeds_arr[$i]%base*}
+ done
+ fi
+ echo ${speeds_arr[@]}
+}
+
+common_speeds_get()
+{
+ dev1=$1; shift
+ dev2=$1; shift
+ with_mode=$1; shift
+ adver=$1; shift
+
+ local -a dev1_speeds=($(dev_speeds_get $dev1 $with_mode $adver))
+ local -a dev2_speeds=($(dev_speeds_get $dev2 $with_mode $adver))
+
+ comm -12 \
+ <(printf '%s\n' "${dev1_speeds[@]}" | sort -u) \
+ <(printf '%s\n' "${dev2_speeds[@]}" | sort -u)
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 85c587a03c8a..1f64e7348f69 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -18,6 +18,8 @@ NETIF_CREATE=${NETIF_CREATE:=yes}
MCD=${MCD:=smcrouted}
MC_CLI=${MC_CLI:=smcroutectl}
PING_TIMEOUT=${PING_TIMEOUT:=5}
+WAIT_TIMEOUT=${WAIT_TIMEOUT:=20}
+INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -226,24 +228,45 @@ log_info()
setup_wait_dev()
{
local dev=$1; shift
+ local wait_time=${1:-$WAIT_TIME}; shift
- while true; do
+ setup_wait_dev_with_timeout "$dev" $INTERFACE_TIMEOUT $wait_time
+
+ if (($?)); then
+ check_err 1
+ log_test setup_wait_dev ": Interface $dev does not come up."
+ exit 1
+ fi
+}
+
+setup_wait_dev_with_timeout()
+{
+ local dev=$1; shift
+ local max_iterations=${1:-$WAIT_TIMEOUT}; shift
+ local wait_time=${1:-$WAIT_TIME}; shift
+ local i
+
+ for ((i = 1; i <= $max_iterations; ++i)); do
ip link show dev $dev up \
| grep 'state UP' &> /dev/null
if [[ $? -ne 0 ]]; then
sleep 1
else
- break
+ sleep $wait_time
+ return 0
fi
done
+
+ return 1
}
setup_wait()
{
local num_netifs=${1:-$NUM_NETIFS}
+ local i
for ((i = 1; i <= num_netifs; ++i)); do
- setup_wait_dev ${NETIFS[p$i]}
+ setup_wait_dev ${NETIFS[p$i]} 0
done
# Make sure links are ready.
@@ -254,6 +277,7 @@ cmd_jq()
{
local cmd=$1
local jq_exp=$2
+ local jq_opts=$3
local ret
local output
@@ -263,7 +287,11 @@ cmd_jq()
if [[ $ret -ne 0 ]]; then
return $ret
fi
- output=$(echo $output | jq -r "$jq_exp")
+ output=$(echo $output | jq -r $jq_opts "$jq_exp")
+ ret=$?
+ if [[ $ret -ne 0 ]]; then
+ return $ret
+ fi
echo $output
# return success only in case of non-empty output
[ ! -z "$output" ]
diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh
index 315e934358d4..d93589bd4d1d 100644
--- a/tools/testing/selftests/net/forwarding/tc_common.sh
+++ b/tools/testing/selftests/net/forwarding/tc_common.sh
@@ -14,3 +14,14 @@ tc_check_packets()
select(.options.actions[0].stats.packets == $count)" \
&> /dev/null
}
+
+tc_check_packets_hitting()
+{
+ local id=$1
+ local handle=$2
+
+ cmd_jq "tc -j -s filter show $id" \
+ ".[] | select(.options.handle == $handle) | \
+ select(.options.actions[0].stats.packets > 0)" \
+ &> /dev/null
+}
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index 53f598f06647..34df4c8882af 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -105,8 +105,8 @@ static void do_recv_one(int fdr, struct timed_send *ts)
tstop = (gettime_ns() - glob_tstart) / 1000;
texpect = ts->delay_us >= 0 ? ts->delay_us : 0;
- fprintf(stderr, "payload:%c delay:%ld expected:%ld (us)\n",
- rbuf[0], tstop, texpect);
+ fprintf(stderr, "payload:%c delay:%lld expected:%lld (us)\n",
+ rbuf[0], (long long)tstop, (long long)texpect);
if (rbuf[0] != ts->data)
error(1, 0, "payload mismatch. expected %c", ts->data);
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index 31ced79f4f25..35505b31e5cc 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -71,7 +71,7 @@
#define MSG_ZEROCOPY 0x4000000
#endif
-#define FILE_SZ (1UL << 35)
+#define FILE_SZ (1ULL << 35)
static int cfg_family = AF_INET6;
static socklen_t cfg_alen = sizeof(struct sockaddr_in6);
static int cfg_port = 8787;
@@ -82,7 +82,9 @@ static int zflg; /* zero copy option. (MSG_ZEROCOPY for sender, mmap() for recei
static int xflg; /* hash received data (simple xor) (-h option) */
static int keepflag; /* -k option: receiver shall keep all received file in memory (no munmap() calls) */
-static int chunk_size = 512*1024;
+static size_t chunk_size = 512*1024;
+
+static size_t map_align;
unsigned long htotal;
@@ -118,6 +120,9 @@ void hash_zone(void *zone, unsigned int length)
htotal = temp;
}
+#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
+#define ALIGN_PTR_UP(p, ptr_align_to) ((typeof(p))ALIGN_UP((unsigned long)(p), ptr_align_to))
+
void *child_thread(void *arg)
{
unsigned long total_mmap = 0, total = 0;
@@ -126,6 +131,7 @@ void *child_thread(void *arg)
int flags = MAP_SHARED;
struct timeval t0, t1;
char *buffer = NULL;
+ void *raddr = NULL;
void *addr = NULL;
double throughput;
struct rusage ru;
@@ -142,9 +148,13 @@ void *child_thread(void *arg)
goto error;
}
if (zflg) {
- addr = mmap(NULL, chunk_size, PROT_READ, flags, fd, 0);
- if (addr == (void *)-1)
+ raddr = mmap(NULL, chunk_size + map_align, PROT_READ, flags, fd, 0);
+ if (raddr == (void *)-1) {
+ perror("mmap");
zflg = 0;
+ } else {
+ addr = ALIGN_PTR_UP(raddr, map_align);
+ }
}
while (1) {
struct pollfd pfd = { .fd = fd, .events = POLLIN, };
@@ -155,7 +165,7 @@ void *child_thread(void *arg)
socklen_t zc_len = sizeof(zc);
int res;
- zc.address = (__u64)addr;
+ zc.address = (__u64)((unsigned long)addr);
zc.length = chunk_size;
zc.recv_skip_hint = 0;
res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE,
@@ -222,7 +232,7 @@ error:
free(buffer);
close(fd);
if (zflg)
- munmap(addr, chunk_size);
+ munmap(raddr, chunk_size + map_align);
pthread_exit(0);
}
@@ -270,6 +280,11 @@ static void setup_sockaddr(int domain, const char *str_addr,
static void do_accept(int fdlisten)
{
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+
if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT,
&chunk_size, sizeof(chunk_size)) == -1) {
perror("setsockopt SO_RCVLOWAT");
@@ -288,7 +303,7 @@ static void do_accept(int fdlisten)
perror("accept");
continue;
}
- res = pthread_create(&th, NULL, child_thread,
+ res = pthread_create(&th, &attr, child_thread,
(void *)(unsigned long)fd);
if (res) {
errno = res;
@@ -298,18 +313,42 @@ static void do_accept(int fdlisten)
}
}
+/* Each thread should reserve a big enough vma to avoid
+ * spinlock collisions in ptl locks.
+ * This size is 2MB on x86_64, and is exported in /proc/meminfo.
+ */
+static unsigned long default_huge_page_size(void)
+{
+ FILE *f = fopen("/proc/meminfo", "r");
+ unsigned long hps = 0;
+ size_t linelen = 0;
+ char *line = NULL;
+
+ if (!f)
+ return 0;
+ while (getline(&line, &linelen, f) > 0) {
+ if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
+ hps <<= 10;
+ break;
+ }
+ }
+ free(line);
+ fclose(f);
+ return hps;
+}
+
int main(int argc, char *argv[])
{
struct sockaddr_storage listenaddr, addr;
unsigned int max_pacing_rate = 0;
- unsigned long total = 0;
+ size_t total = 0;
char *host = NULL;
int fd, c, on = 1;
char *buffer;
int sflg = 0;
int mss = 0;
- while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:")) != -1) {
+ while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:C:a:")) != -1) {
switch (c) {
case '4':
cfg_family = PF_INET;
@@ -349,10 +388,24 @@ int main(int argc, char *argv[])
case 'P':
max_pacing_rate = atoi(optarg) ;
break;
+ case 'C':
+ chunk_size = atol(optarg);
+ break;
+ case 'a':
+ map_align = atol(optarg);
+ break;
default:
exit(1);
}
}
+ if (!map_align) {
+ map_align = default_huge_page_size();
+ /* if really /proc/meminfo is not helping,
+ * we use the default x86_64 hugepagesize.
+ */
+ if (!map_align)
+ map_align = 2*1024*1024;
+ }
if (sflg) {
int fdlisten = socket(cfg_family, SOCK_STREAM, 0);
@@ -417,7 +470,7 @@ int main(int argc, char *argv[])
zflg = 0;
}
while (total < FILE_SZ) {
- long wr = FILE_SZ - total;
+ ssize_t wr = FILE_SZ - total;
if (wr > chunk_size)
wr = chunk_size;
diff --git a/tools/testing/selftests/net/traceroute.sh b/tools/testing/selftests/net/traceroute.sh
new file mode 100755
index 000000000000..de9ca97abc30
--- /dev/null
+++ b/tools/testing/selftests/net/traceroute.sh
@@ -0,0 +1,322 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run traceroute/traceroute6 tests
+#
+
+VERBOSE=0
+PAUSE_ON_FAIL=no
+
+################################################################################
+#
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf "TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+run_cmd()
+{
+ local ns
+ local cmd
+ local out
+ local rc
+
+ ns="$1"
+ shift
+ cmd="$*"
+
+ if [ "$VERBOSE" = "1" ]; then
+ printf " COMMAND: $cmd\n"
+ fi
+
+ out=$(eval ip netns exec ${ns} ${cmd} 2>&1)
+ rc=$?
+ if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+ echo " $out"
+ fi
+
+ [ "$VERBOSE" = "1" ] && echo
+
+ return $rc
+}
+
+################################################################################
+# create namespaces and interconnects
+
+create_ns()
+{
+ local ns=$1
+ local addr=$2
+ local addr6=$3
+
+ [ -z "${addr}" ] && addr="-"
+ [ -z "${addr6}" ] && addr6="-"
+
+ ip netns add ${ns}
+
+ ip netns exec ${ns} ip link set lo up
+ if [ "${addr}" != "-" ]; then
+ ip netns exec ${ns} ip addr add dev lo ${addr}
+ fi
+ if [ "${addr6}" != "-" ]; then
+ ip netns exec ${ns} ip -6 addr add dev lo ${addr6}
+ fi
+
+ ip netns exec ${ns} ip ro add unreachable default metric 8192
+ ip netns exec ${ns} ip -6 ro add unreachable default metric 8192
+
+ ip netns exec ${ns} sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.accept_dad=0
+}
+
+# create veth pair to connect namespaces and apply addresses.
+connect_ns()
+{
+ local ns1=$1
+ local ns1_dev=$2
+ local ns1_addr=$3
+ local ns1_addr6=$4
+ local ns2=$5
+ local ns2_dev=$6
+ local ns2_addr=$7
+ local ns2_addr6=$8
+
+ ip netns exec ${ns1} ip li add ${ns1_dev} type veth peer name tmp
+ ip netns exec ${ns1} ip li set ${ns1_dev} up
+ ip netns exec ${ns1} ip li set tmp netns ${ns2} name ${ns2_dev}
+ ip netns exec ${ns2} ip li set ${ns2_dev} up
+
+ if [ "${ns1_addr}" != "-" ]; then
+ ip netns exec ${ns1} ip addr add dev ${ns1_dev} ${ns1_addr}
+ fi
+
+ if [ "${ns2_addr}" != "-" ]; then
+ ip netns exec ${ns2} ip addr add dev ${ns2_dev} ${ns2_addr}
+ fi
+
+ if [ "${ns1_addr6}" != "-" ]; then
+ ip netns exec ${ns1} ip addr add dev ${ns1_dev} ${ns1_addr6}
+ fi
+
+ if [ "${ns2_addr6}" != "-" ]; then
+ ip netns exec ${ns2} ip addr add dev ${ns2_dev} ${ns2_addr6}
+ fi
+}
+
+################################################################################
+# traceroute6 test
+#
+# Verify that in this scenario
+#
+# ------------------------ N2
+# | |
+# ------ ------ N3 ----
+# | R1 | | R2 |------|H2|
+# ------ ------ ----
+# | |
+# ------------------------ N1
+# |
+# ----
+# |H1|
+# ----
+#
+# where H1's default route goes through R1 and R1's default route goes
+# through R2 over N2, traceroute6 from H1 to H2 reports R2's address
+# on N2 and not N1.
+#
+# Addresses are assigned as follows:
+#
+# N1: 2000:101::/64
+# N2: 2000:102::/64
+# N3: 2000:103::/64
+#
+# R1's host part of address: 1
+# R2's host part of address: 2
+# H1's host part of address: 3
+# H2's host part of address: 4
+#
+# For example:
+# the IPv6 address of R1's interface on N2 is 2000:102::1/64
+
+cleanup_traceroute6()
+{
+ local ns
+
+ for ns in host-1 host-2 router-1 router-2
+ do
+ ip netns del ${ns} 2>/dev/null
+ done
+}
+
+setup_traceroute6()
+{
+ brdev=br0
+
+ # start clean
+ cleanup_traceroute6
+
+ set -e
+ create_ns host-1
+ create_ns host-2
+ create_ns router-1
+ create_ns router-2
+
+ # Setup N3
+ connect_ns router-2 eth3 - 2000:103::2/64 host-2 eth3 - 2000:103::4/64
+ ip netns exec host-2 ip route add default via 2000:103::2
+
+ # Setup N2
+ connect_ns router-1 eth2 - 2000:102::1/64 router-2 eth2 - 2000:102::2/64
+ ip netns exec router-1 ip route add default via 2000:102::2
+
+ # Setup N1. host-1 and router-2 connect to a bridge in router-1.
+ ip netns exec router-1 ip link add name ${brdev} type bridge
+ ip netns exec router-1 ip link set ${brdev} up
+ ip netns exec router-1 ip addr add 2000:101::1/64 dev ${brdev}
+
+ connect_ns host-1 eth0 - 2000:101::3/64 router-1 eth0 - -
+ ip netns exec router-1 ip link set dev eth0 master ${brdev}
+ ip netns exec host-1 ip route add default via 2000:101::1
+
+ connect_ns router-2 eth1 - 2000:101::2/64 router-1 eth1 - -
+ ip netns exec router-1 ip link set dev eth1 master ${brdev}
+
+ # Prime the network
+ ip netns exec host-1 ping6 -c5 2000:103::4 >/dev/null 2>&1
+
+ set +e
+}
+
+run_traceroute6()
+{
+ if [ ! -x "$(command -v traceroute6)" ]; then
+ echo "SKIP: Could not run IPV6 test without traceroute6"
+ return
+ fi
+
+ setup_traceroute6
+
+ # traceroute6 host-2 from host-1 (expects 2000:102::2)
+ run_cmd host-1 "traceroute6 2000:103::4 | grep -q 2000:102::2"
+ log_test $? 0 "IPV6 traceroute"
+
+ cleanup_traceroute6
+}
+
+################################################################################
+# traceroute test
+#
+# Verify that traceroute from H1 to H2 shows 1.0.1.1 in this scenario
+#
+# 1.0.3.1/24
+# ---- 1.0.1.3/24 1.0.1.1/24 ---- 1.0.2.1/24 1.0.2.4/24 ----
+# |H1|--------------------------|R1|--------------------------|H2|
+# ---- N1 ---- N2 ----
+#
+# where net.ipv4.icmp_errors_use_inbound_ifaddr is set on R1 and
+# 1.0.3.1/24 and 1.0.1.1/24 are respectively R1's primary and secondary
+# address on N1.
+#
+
+cleanup_traceroute()
+{
+ local ns
+
+ for ns in host-1 host-2 router
+ do
+ ip netns del ${ns} 2>/dev/null
+ done
+}
+
+setup_traceroute()
+{
+ # start clean
+ cleanup_traceroute
+
+ set -e
+ create_ns host-1
+ create_ns host-2
+ create_ns router
+
+ connect_ns host-1 eth0 1.0.1.3/24 - \
+ router eth1 1.0.3.1/24 -
+ ip netns exec host-1 ip route add default via 1.0.1.1
+
+ ip netns exec router ip addr add 1.0.1.1/24 dev eth1
+ ip netns exec router sysctl -qw \
+ net.ipv4.icmp_errors_use_inbound_ifaddr=1
+
+ connect_ns host-2 eth0 1.0.2.4/24 - \
+ router eth2 1.0.2.1/24 -
+ ip netns exec host-2 ip route add default via 1.0.2.1
+
+ # Prime the network
+ ip netns exec host-1 ping -c5 1.0.2.4 >/dev/null 2>&1
+
+ set +e
+}
+
+run_traceroute()
+{
+ if [ ! -x "$(command -v traceroute)" ]; then
+ echo "SKIP: Could not run IPV4 test without traceroute"
+ return
+ fi
+
+ setup_traceroute
+
+ # traceroute host-2 from host-1 (expects 1.0.1.1). Takes a while.
+ run_cmd host-1 "traceroute 1.0.2.4 | grep -q 1.0.1.1"
+ log_test $? 0 "IPV4 traceroute"
+
+ cleanup_traceroute
+}
+
+################################################################################
+# Run tests
+
+run_tests()
+{
+ run_traceroute6
+ run_traceroute
+}
+
+################################################################################
+# main
+
+declare -i nfail=0
+declare -i nsuccess=0
+
+while getopts :pv o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=$(($VERBOSE + 1));;
+ *) exit 1;;
+ esac
+done
+
+run_tests
+
+printf "\nTests passed: %3d\n" ${nsuccess}
+printf "Tests failed: %3d\n" ${nfail}
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index 614b31aad168..c66da6ffd6d8 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -440,7 +440,8 @@ static bool __send_one(int fd, struct msghdr *msg, int flags)
if (ret == -1)
error(1, errno, "sendmsg");
if (ret != msg->msg_iov->iov_len)
- error(1, 0, "sendto: %d != %lu", ret, msg->msg_iov->iov_len);
+ error(1, 0, "sendto: %d != %llu", ret,
+ (unsigned long long)msg->msg_iov->iov_len);
if (msg->msg_flags)
error(1, 0, "sendmsg: return flags 0x%x\n", msg->msg_flags);
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index ada99496634a..17512a43885e 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -405,7 +405,8 @@ static int send_udp_segment(int fd, char *data)
if (ret == -1)
error(1, errno, "sendmsg");
if (ret != iov.iov_len)
- error(1, 0, "sendmsg: %u != %lu\n", ret, iov.iov_len);
+ error(1, 0, "sendmsg: %u != %llu\n", ret,
+ (unsigned long long)iov.iov_len);
return 1;
}
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 4144984ebee5..de1032b5ddea 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -2,6 +2,6 @@
# Makefile for netfilter selftests
TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
- conntrack_icmp_related.sh nft_flowtable.sh
+ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/ipvs.sh b/tools/testing/selftests/netfilter/ipvs.sh
new file mode 100755
index 000000000000..c3b8f90c497e
--- /dev/null
+++ b/tools/testing/selftests/netfilter/ipvs.sh
@@ -0,0 +1,228 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# End-to-end ipvs test suite
+# Topology:
+#--------------------------------------------------------------+
+# | |
+# ns0 | ns1 |
+# ----------- | ----------- ----------- |
+# | veth01 | --------- | veth10 | | veth12 | |
+# ----------- peer ----------- ----------- |
+# | | | |
+# ----------- | | |
+# | br0 | |----------------- peer |--------------|
+# ----------- | | |
+# | | | |
+# ---------- peer ---------- ----------- |
+# | veth02 | --------- | veth20 | | veth21 | |
+# ---------- | ---------- ----------- |
+# | ns2 |
+# | |
+#--------------------------------------------------------------+
+#
+# We assume that all network driver are loaded
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+GREEN='\033[0;92m'
+RED='\033[0;31m'
+NC='\033[0m' # No Color
+
+readonly port=8080
+
+readonly vip_v4=207.175.44.110
+readonly cip_v4=10.0.0.2
+readonly gip_v4=10.0.0.1
+readonly dip_v4=172.16.0.1
+readonly rip_v4=172.16.0.2
+readonly sip_v4=10.0.0.3
+
+readonly infile="$(mktemp)"
+readonly outfile="$(mktemp)"
+readonly datalen=32
+
+sysipvsnet="/proc/sys/net/ipv4/vs/"
+if [ ! -d $sysipvsnet ]; then
+ modprobe -q ip_vs
+ if [ $? -ne 0 ]; then
+ echo "skip: could not run test without ipvs module"
+ exit $ksft_skip
+ fi
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ipvsadm -v > /dev/null 2>&1
+if [ $? -ne 0 ]; then
+ echo "SKIP: Could not run test without ipvsadm"
+ exit $ksft_skip
+fi
+
+setup() {
+ ip netns add ns0
+ ip netns add ns1
+ ip netns add ns2
+
+ ip link add veth01 netns ns0 type veth peer name veth10 netns ns1
+ ip link add veth02 netns ns0 type veth peer name veth20 netns ns2
+ ip link add veth12 netns ns1 type veth peer name veth21 netns ns2
+
+ ip netns exec ns0 ip link set veth01 up
+ ip netns exec ns0 ip link set veth02 up
+ ip netns exec ns0 ip link add br0 type bridge
+ ip netns exec ns0 ip link set veth01 master br0
+ ip netns exec ns0 ip link set veth02 master br0
+ ip netns exec ns0 ip link set br0 up
+ ip netns exec ns0 ip addr add ${cip_v4}/24 dev br0
+
+ ip netns exec ns1 ip link set lo up
+ ip netns exec ns1 ip link set veth10 up
+ ip netns exec ns1 ip addr add ${gip_v4}/24 dev veth10
+ ip netns exec ns1 ip link set veth12 up
+ ip netns exec ns1 ip addr add ${dip_v4}/24 dev veth12
+
+ ip netns exec ns2 ip link set lo up
+ ip netns exec ns2 ip link set veth21 up
+ ip netns exec ns2 ip addr add ${rip_v4}/24 dev veth21
+ ip netns exec ns2 ip link set veth20 up
+ ip netns exec ns2 ip addr add ${sip_v4}/24 dev veth20
+
+ sleep 1
+
+ dd if=/dev/urandom of="${infile}" bs="${datalen}" count=1 status=none
+}
+
+cleanup() {
+ for i in 0 1 2
+ do
+ ip netns del ns$i > /dev/null 2>&1
+ done
+
+ if [ -f "${outfile}" ]; then
+ rm "${outfile}"
+ fi
+ if [ -f "${infile}" ]; then
+ rm "${infile}"
+ fi
+}
+
+server_listen() {
+ ip netns exec ns2 nc -l -p 8080 > "${outfile}" &
+ server_pid=$!
+ sleep 0.2
+}
+
+client_connect() {
+ ip netns exec ns0 timeout 2 nc -w 1 ${vip_v4} ${port} < "${infile}"
+}
+
+verify_data() {
+ wait "${server_pid}"
+ cmp "$infile" "$outfile" 2>/dev/null
+}
+
+test_service() {
+ server_listen
+ client_connect
+ verify_data
+}
+
+
+test_dr() {
+ ip netns exec ns0 ip route add ${vip_v4} via ${gip_v4} dev br0
+
+ ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ns1 ipvsadm -A -t ${vip_v4}:${port} -s rr
+ ip netns exec ns1 ipvsadm -a -t ${vip_v4}:${port} -r ${rip_v4}:${port}
+ ip netns exec ns1 ip addr add ${vip_v4}/32 dev lo:1
+
+ # avoid incorrect arp response
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.arp_ignore=1
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.arp_announce=2
+ # avoid reverse route lookup
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=0
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.veth21.rp_filter=0
+ ip netns exec ns2 ip addr add ${vip_v4}/32 dev lo:1
+
+ test_service
+}
+
+test_nat() {
+ ip netns exec ns0 ip route add ${vip_v4} via ${gip_v4} dev br0
+
+ ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ns1 ipvsadm -A -t ${vip_v4}:${port} -s rr
+ ip netns exec ns1 ipvsadm -a -m -t ${vip_v4}:${port} -r ${rip_v4}:${port}
+ ip netns exec ns1 ip addr add ${vip_v4}/32 dev lo:1
+
+ ip netns exec ns2 ip link del veth20
+ ip netns exec ns2 ip route add default via ${dip_v4} dev veth21
+
+ test_service
+}
+
+test_tun() {
+ ip netns exec ns0 ip route add ${vip_v4} via ${gip_v4} dev br0
+
+ ip netns exec ns1 modprobe ipip
+ ip netns exec ns1 ip link set tunl0 up
+ ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=0
+ ip netns exec ns1 sysctl -qw net.ipv4.conf.all.send_redirects=0
+ ip netns exec ns1 sysctl -qw net.ipv4.conf.default.send_redirects=0
+ ip netns exec ns1 ipvsadm -A -t ${vip_v4}:${port} -s rr
+ ip netns exec ns1 ipvsadm -a -i -t ${vip_v4}:${port} -r ${rip_v4}:${port}
+ ip netns exec ns1 ip addr add ${vip_v4}/32 dev lo:1
+
+ ip netns exec ns2 modprobe ipip
+ ip netns exec ns2 ip link set tunl0 up
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.arp_ignore=1
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.arp_announce=2
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=0
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.tunl0.rp_filter=0
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.veth21.rp_filter=0
+ ip netns exec ns2 ip addr add ${vip_v4}/32 dev lo:1
+
+ test_service
+}
+
+run_tests() {
+ local errors=
+
+ echo "Testing DR mode..."
+ cleanup
+ setup
+ test_dr
+ errors=$(( $errors + $? ))
+
+ echo "Testing NAT mode..."
+ cleanup
+ setup
+ test_nat
+ errors=$(( $errors + $? ))
+
+ echo "Testing Tunnel mode..."
+ cleanup
+ setup
+ test_tun
+ errors=$(( $errors + $? ))
+
+ return $errors
+}
+
+trap cleanup EXIT
+
+run_tests
+
+if [ $? -ne 0 ]; then
+ echo -e "$(basename $0): ${RED}FAIL${NC}"
+ exit 1
+fi
+echo -e "$(basename $0): ${GREEN}PASS${NC}"
+exit 0
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index 7550f08822a3..43db1b98e845 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -g -I../../../../usr/include/ -pthread
-TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait
+TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test pidfd_poll_test pidfd_wait
include ../lib.mk
diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
new file mode 100644
index 000000000000..22558524f71c
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/wait.h>
+
+#include "pidfd.h"
+#include "../kselftest.h"
+
+struct error {
+ int code;
+ char msg[512];
+};
+
+static int error_set(struct error *err, int code, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ if (code == PIDFD_PASS || !err || err->code != PIDFD_PASS)
+ return code;
+
+ err->code = code;
+ va_start(args, fmt);
+ r = vsnprintf(err->msg, sizeof(err->msg), fmt, args);
+ assert((size_t)r < sizeof(err->msg));
+ va_end(args);
+
+ return code;
+}
+
+static void error_report(struct error *err, const char *test_name)
+{
+ switch (err->code) {
+ case PIDFD_ERROR:
+ ksft_exit_fail_msg("%s test: Fatal: %s\n", test_name, err->msg);
+ break;
+
+ case PIDFD_FAIL:
+ /* will be: not ok %d # error %s test: %s */
+ ksft_test_result_error("%s test: %s\n", test_name, err->msg);
+ break;
+
+ case PIDFD_SKIP:
+ /* will be: not ok %d # SKIP %s test: %s */
+ ksft_test_result_skip("%s test: %s\n", test_name, err->msg);
+ break;
+
+ case PIDFD_XFAIL:
+ ksft_test_result_pass("%s test: Expected failure: %s\n",
+ test_name, err->msg);
+ break;
+
+ case PIDFD_PASS:
+ ksft_test_result_pass("%s test: Passed\n");
+ break;
+
+ default:
+ ksft_exit_fail_msg("%s test: Unknown code: %d %s\n",
+ test_name, err->code, err->msg);
+ break;
+ }
+}
+
+static inline int error_check(struct error *err, const char *test_name)
+{
+ /* In case of error we bail out and terminate the test program */
+ if (err->code == PIDFD_ERROR)
+ error_report(err, test_name);
+
+ return err->code;
+}
+
+struct child {
+ pid_t pid;
+ int fd;
+};
+
+static struct child clone_newns(int (*fn)(void *), void *args,
+ struct error *err)
+{
+ static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
+ size_t stack_size = 1024;
+ char *stack[1024] = { 0 };
+ struct child ret;
+
+ if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
+ flags |= CLONE_NEWUSER;
+
+#ifdef __ia64__
+ ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd);
+#else
+ ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd);
+#endif
+
+ if (ret.pid < 0) {
+ error_set(err, PIDFD_ERROR, "clone failed (ret %d, errno %d)",
+ ret.fd, errno);
+ return ret;
+ }
+
+ ksft_print_msg("New child: %d, fd: %d\n", ret.pid, ret.fd);
+
+ return ret;
+}
+
+static inline void child_close(struct child *child)
+{
+ close(child->fd);
+}
+
+static inline int child_join(struct child *child, struct error *err)
+{
+ int r;
+
+ r = wait_for_pid(child->pid);
+ if (r < 0)
+ error_set(err, PIDFD_ERROR, "waitpid failed (ret %d, errno %d)",
+ r, errno);
+ else if (r > 0)
+ error_set(err, r, "child %d reported: %d", child->pid, r);
+
+ return r;
+}
+
+static inline int child_join_close(struct child *child, struct error *err)
+{
+ child_close(child);
+ return child_join(child, err);
+}
+
+static inline void trim_newline(char *str)
+{
+ char *pos = strrchr(str, '\n');
+
+ if (pos)
+ *pos = '\0';
+}
+
+static int verify_fdinfo(int pidfd, struct error *err, const char *prefix,
+ size_t prefix_len, const char *expect, ...)
+{
+ char buffer[512] = {0, };
+ char path[512] = {0, };
+ va_list args;
+ FILE *f;
+ char *line = NULL;
+ size_t n = 0;
+ int found = 0;
+ int r;
+
+ va_start(args, expect);
+ r = vsnprintf(buffer, sizeof(buffer), expect, args);
+ assert((size_t)r < sizeof(buffer));
+ va_end(args);
+
+ snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", pidfd);
+ f = fopen(path, "re");
+ if (!f)
+ return error_set(err, PIDFD_ERROR, "fdinfo open failed for %d",
+ pidfd);
+
+ while (getline(&line, &n, f) != -1) {
+ char *val;
+
+ if (strncmp(line, prefix, prefix_len))
+ continue;
+
+ found = 1;
+
+ val = line + prefix_len;
+ r = strcmp(val, buffer);
+ if (r != 0) {
+ trim_newline(line);
+ trim_newline(buffer);
+ error_set(err, PIDFD_FAIL, "%s '%s' != '%s'",
+ prefix, val, buffer);
+ }
+ break;
+ }
+
+ free(line);
+ fclose(f);
+
+ if (found == 0)
+ return error_set(err, PIDFD_FAIL, "%s not found for fd %d",
+ prefix, pidfd);
+
+ return PIDFD_PASS;
+}
+
+static int child_fdinfo_nspid_test(void *args)
+{
+ struct error err;
+ int pidfd;
+ int r;
+
+ /* if we got no fd for the sibling, we are done */
+ if (!args)
+ return PIDFD_PASS;
+
+ /* verify that we can not resolve the pidfd for a process
+ * in a sibling pid namespace, i.e. a pid namespace it is
+ * not in our or a descended namespace
+ */
+ r = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (r < 0) {
+ ksft_print_msg("Failed to remount / private\n");
+ return PIDFD_ERROR;
+ }
+
+ (void)umount2("/proc", MNT_DETACH);
+ r = mount("proc", "/proc", "proc", 0, NULL);
+ if (r < 0) {
+ ksft_print_msg("Failed to remount /proc\n");
+ return PIDFD_ERROR;
+ }
+
+ pidfd = *(int *)args;
+ r = verify_fdinfo(pidfd, &err, "NSpid:", 6, "\t0\n");
+
+ if (r != PIDFD_PASS)
+ ksft_print_msg("NSpid fdinfo check failed: %s\n", err.msg);
+
+ return r;
+}
+
+static void test_pidfd_fdinfo_nspid(void)
+{
+ struct child a, b;
+ struct error err = {0, };
+ const char *test_name = "pidfd check for NSpid in fdinfo";
+
+ /* Create a new child in a new pid and mount namespace */
+ a = clone_newns(child_fdinfo_nspid_test, NULL, &err);
+ error_check(&err, test_name);
+
+ /* Pass the pidfd representing the first child to the
+ * second child, which will be in a sibling pid namespace,
+ * which means that the fdinfo NSpid entry for the pidfd
+ * should only contain '0'.
+ */
+ b = clone_newns(child_fdinfo_nspid_test, &a.fd, &err);
+ error_check(&err, test_name);
+
+ /* The children will have pid 1 in the new pid namespace,
+ * so the line must be 'NSPid:\t<pid>\t1'.
+ */
+ verify_fdinfo(a.fd, &err, "NSpid:", 6, "\t%d\t%d\n", a.pid, 1);
+ verify_fdinfo(b.fd, &err, "NSpid:", 6, "\t%d\t%d\n", b.pid, 1);
+
+ /* wait for the process, check the exit status and set
+ * 'err' accordingly, if it is not already set.
+ */
+ child_join_close(&a, &err);
+ child_join_close(&b, &err);
+
+ error_report(&err, test_name);
+}
+
+static void test_pidfd_dead_fdinfo(void)
+{
+ struct child a;
+ struct error err = {0, };
+ const char *test_name = "pidfd check fdinfo for dead process";
+
+ /* Create a new child in a new pid and mount namespace */
+ a = clone_newns(child_fdinfo_nspid_test, NULL, &err);
+ error_check(&err, test_name);
+ child_join(&a, &err);
+
+ verify_fdinfo(a.fd, &err, "Pid:", 4, "\t-1\n");
+ verify_fdinfo(a.fd, &err, "NSpid:", 6, "\t-1\n");
+ child_close(&a);
+ error_report(&err, test_name);
+}
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ ksft_set_plan(2);
+
+ test_pidfd_fdinfo_nspid();
+ test_pidfd_dead_fdinfo();
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 47b7473dedef..e6aa00a183bc 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -47,7 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
int main(void)
{
const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
- const unsigned long va_max = 1UL << 32;
+ /*
+ * va_max must be enough bigger than vm.mmap_min_addr, which is
+ * 64KB/32KB by default. (depends on CONFIG_LSM_MMAP_MIN_ADDR)
+ */
+ const unsigned long va_max = 1UL << 20;
unsigned long va;
void *p;
int fd;
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index bd4a7247b44f..c0dd10257df5 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -44,6 +44,46 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
}
#endif
+static void show_flag_test(int rq_index, unsigned int flags, int err)
+{
+ printf("PTP_EXTTS_REQUEST%c flags 0x%08x : (%d) %s\n",
+ rq_index ? '1' + rq_index : ' ',
+ flags, err, strerror(errno));
+ /* sigh, uClibc ... */
+ errno = 0;
+}
+
+static void do_flag_test(int fd, unsigned int index)
+{
+ struct ptp_extts_request extts_request;
+ unsigned long request[2] = {
+ PTP_EXTTS_REQUEST,
+ PTP_EXTTS_REQUEST2,
+ };
+ unsigned int enable_flags[5] = {
+ PTP_ENABLE_FEATURE,
+ PTP_ENABLE_FEATURE | PTP_RISING_EDGE,
+ PTP_ENABLE_FEATURE | PTP_FALLING_EDGE,
+ PTP_ENABLE_FEATURE | PTP_RISING_EDGE | PTP_FALLING_EDGE,
+ PTP_ENABLE_FEATURE | (PTP_EXTTS_VALID_FLAGS + 1),
+ };
+ int err, i, j;
+
+ memset(&extts_request, 0, sizeof(extts_request));
+ extts_request.index = index;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 5; j++) {
+ extts_request.flags = enable_flags[j];
+ err = ioctl(fd, request[i], &extts_request);
+ show_flag_test(i, extts_request.flags, err);
+
+ extts_request.flags = 0;
+ err = ioctl(fd, request[i], &extts_request);
+ }
+ }
+}
+
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
@@ -96,7 +136,8 @@ static void usage(char *progname)
" -s set the ptp clock time from the system time\n"
" -S set the system time from the ptp clock time\n"
" -t val shift the ptp clock time by 'val' seconds\n"
- " -T val set the ptp clock time to 'val' seconds\n",
+ " -T val set the ptp clock time to 'val' seconds\n"
+ " -z test combinations of rising/falling external time stamp flags\n",
progname);
}
@@ -122,6 +163,7 @@ int main(int argc, char *argv[])
int adjtime = 0;
int capabilities = 0;
int extts = 0;
+ int flagtest = 0;
int gettime = 0;
int index = 0;
int list_pins = 0;
@@ -138,7 +180,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
+ while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) {
switch (c) {
case 'c':
capabilities = 1;
@@ -191,6 +233,9 @@ int main(int argc, char *argv[])
settime = 3;
seconds = atoi(optarg);
break;
+ case 'z':
+ flagtest = 1;
+ break;
case 'h':
usage(progname);
return 0;
@@ -322,6 +367,10 @@ int main(int argc, char *argv[])
}
}
+ if (flagtest) {
+ do_flag_test(fd, index);
+ }
+
if (list_pins) {
int n_pins = 0;
if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
index 28568b72a31b..ea4399020c6c 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
@@ -1,8 +1,5 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
index 35e639e39366..65daee4fbf5a 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
@@ -9,9 +9,6 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_TRACE=n
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_RCU_FANOUT=3
CONFIG_RCU_FANOUT_LEAF=3
CONFIG_RCU_NOCB_CPU=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 24c9f6012e35..f6d6a40c0576 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -9,9 +9,6 @@ CONFIG_NO_HZ_IDLE=n
CONFIG_NO_HZ_FULL=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_TRACE=y
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_RCU_FANOUT=4
CONFIG_RCU_FANOUT_LEAF=3
CONFIG_DEBUG_LOCK_ALLOC=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
index 05a4eec3f27b..bf4980d606b5 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
@@ -9,9 +9,6 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_TRACE=n
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_RCU_FANOUT=6
CONFIG_RCU_FANOUT_LEAF=6
CONFIG_RCU_NOCB_CPU=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
index fb1c763c10c5..c810c5276a89 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
@@ -9,9 +9,6 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_TRACE=n
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_RCU_FANOUT=3
CONFIG_RCU_FANOUT_LEAF=2
CONFIG_RCU_NOCB_CPU=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
index 6710e749d9de..8523a7515cbf 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE09
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
@@ -8,9 +8,6 @@ CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_TRACE=n
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_RCU_NOCB_CPU=n
CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TRIVIAL b/tools/testing/selftests/rcutorture/configs/rcu/TRIVIAL
index 4d8eb5bfb6f6..5d546efa68e8 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TRIVIAL
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TRIVIAL
@@ -6,9 +6,6 @@ CONFIG_PREEMPT=n
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
index af6fca03602f..1b96d68473b8 100644
--- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
@@ -6,7 +6,6 @@ Kconfig Parameters:
CONFIG_DEBUG_LOCK_ALLOC -- Do three, covering CONFIG_PROVE_LOCKING & not.
CONFIG_DEBUG_OBJECTS_RCU_HEAD -- Do one.
-CONFIG_HOTPLUG_CPU -- Do half. (Every second.)
CONFIG_HZ_PERIODIC -- Do one.
CONFIG_NO_HZ_IDLE -- Do those not otherwise specified. (Groups of two.)
CONFIG_NO_HZ_FULL -- Do two, one with partial CPU enablement.
diff --git a/tools/testing/selftests/sync/sync.c b/tools/testing/selftests/sync/sync.c
index f3d599f249b9..7741c0518d18 100644
--- a/tools/testing/selftests/sync/sync.c
+++ b/tools/testing/selftests/sync/sync.c
@@ -109,7 +109,7 @@ static struct sync_file_info *sync_file_info(int fd)
return NULL;
}
- info->sync_fence_info = (uint64_t)fence_info;
+ info->sync_fence_info = (uint64_t)(unsigned long)fence_info;
err = ioctl(fd, SYNC_IOC_FILE_INFO, info);
if (err < 0) {
@@ -124,7 +124,7 @@ static struct sync_file_info *sync_file_info(int fd)
static void sync_file_info_free(struct sync_file_info *info)
{
- free((void *)info->sync_fence_info);
+ free((void *)(unsigned long)info->sync_fence_info);
free(info);
}
@@ -152,7 +152,7 @@ int sync_fence_count_with_status(int fd, int status)
if (!info)
return -1;
- fence_info = (struct sync_fence_info *)info->sync_fence_info;
+ fence_info = (struct sync_fence_info *)(unsigned long)info->sync_fence_info;
for (i = 0 ; i < info->num_fences ; i++) {
if (fence_info[i].status == status)
count++;
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index 7c551968d184..477bc61b374a 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -1,3 +1,12 @@
+#
+# Core Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_LABELS=y
+CONFIG_NF_NAT=m
+
CONFIG_NET_SCHED=y
#
@@ -42,6 +51,7 @@ CONFIG_NET_ACT_CTINFO=m
CONFIG_NET_ACT_SKBMOD=m
CONFIG_NET_ACT_IFE=m
CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_MPLS=m
CONFIG_NET_IFE_SKBMARK=m
CONFIG_NET_IFE_SKBPRIO=m
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index ddabb2fbb7c7..88ec134872e4 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -525,5 +525,29 @@
"teardown": [
"$TC actions flush action csum"
]
+ },
+ {
+ "id": "eaf0",
+ "name": "Add csum iph action with no_percpu flag",
+ "category": [
+ "actions",
+ "csum"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action csum",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action csum iph no_percpu",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action csum",
+ "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*no_percpu",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action csum"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
index 62b82fe10c89..4202e95e27b9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
@@ -24,6 +24,30 @@
]
},
{
+ "id": "e38c",
+ "name": "Add simple ct action with cookie",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct index 42 cookie deadbeef",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct zone 0 pipe.*index 42 ref.*cookie deadbeef",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
+ },
+ {
"id": "9f20",
"name": "Add ct clear action",
"category": [
@@ -48,6 +72,30 @@
]
},
{
+ "id": "0bc1",
+ "name": "Add ct clear action with cookie of max length",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct clear index 42 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct clear pipe.*index 42 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
+ },
+ {
"id": "5bea",
"name": "Try ct with zone",
"category": [
@@ -310,5 +358,53 @@
"teardown": [
"$TC actions flush action ct"
]
+ },
+ {
+ "id": "2faa",
+ "name": "Try ct with mark + mask and cookie",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct mark 0x42/0xf0 index 42 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct mark 66/0xf0 zone 0 pipe.*index 42 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
+ },
+ {
+ "id": "3991",
+ "name": "Add simple ct action with no_percpu flag",
+ "category": [
+ "actions",
+ "ct"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ct",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action ct no_percpu",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action ct",
+ "matchPattern": "action order [0-9]*: ct zone 0 pipe.*no_percpu",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ct"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index 814b7a8a478b..b24494c6f546 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -585,5 +585,29 @@
"teardown": [
"$TC actions flush action gact"
]
+ },
+ {
+ "id": "95ad",
+ "name": "Add gact pass action with no_percpu flag",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pass no_percpu",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass.*no_percpu",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index 2232b21e2510..12a2fe0e1472 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -553,5 +553,29 @@
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "31e3",
+ "name": "Add mirred mirror to egress action with no_percpu flag",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress mirror dev lo no_percpu",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*no_percpu",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
index e31a080edc49..866f0efd0859 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json
@@ -168,6 +168,54 @@
]
},
{
+ "id": "09d2",
+ "name": "Add mpls dec_ttl action with opcode and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls dec_ttl pipe index 8 cookie aabbccddeeff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*dec_ttl pipe.*index 8 ref.*cookie aabbccddeeff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
+ "id": "c170",
+ "name": "Add mpls dec_ttl action with opcode and cookie of max length",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls dec_ttl continue index 8 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*dec_ttl continue.*index 8 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "9118",
"name": "Add mpls dec_ttl action with invalid opcode",
"category": [
@@ -302,6 +350,30 @@
]
},
{
+ "id": "91fb",
+ "name": "Add mpls pop action with ip proto and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls pop protocol ipv4 cookie 12345678",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*pop.*protocol.*ip.*pipe.*ref 1.*cookie 12345678",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "92fe",
"name": "Add mpls pop action with mpls proto",
"category": [
@@ -508,6 +580,30 @@
]
},
{
+ "id": "7c34",
+ "name": "Add mpls push action with label, tc ttl and cookie of max length",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls push label 20 tc 3 ttl 128 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*push.*protocol.*mpls_uc.*label.*20.*tc.*3.*ttl.*128.*pipe.*ref 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "16eb",
"name": "Add mpls push action with label and bos",
"category": [
@@ -828,6 +924,30 @@
]
},
{
+ "id": "77c1",
+ "name": "Add mpls mod action with mpls ttl and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mpls mod ttl 128 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mpls",
+ "matchPattern": "action order [0-9]+: mpls.*modify.*ttl.*128.*pipe.*ref 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "b80f",
"name": "Add mpls mod action with mpls max ttl",
"category": [
@@ -1037,6 +1157,31 @@
]
},
{
+ "id": "95a9",
+ "name": "Replace existing mpls push action with new label, tc, ttl and cookie",
+ "category": [
+ "actions",
+ "mpls"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mpls",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mpls push label 20 tc 3 ttl 128 index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2"
+ ],
+ "cmdUnderTest": "$TC actions replace action mpls push label 30 tc 2 ttl 125 pipe index 1 cookie aa11bb22cc33",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action mpls index 1",
+ "matchPattern": "action order [0-9]+: mpls.*push.*protocol.*mpls_uc.*label.*30 tc 2 ttl 125 pipe.*index 1.*cookie aa11bb22cc33",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mpls"
+ ]
+ },
+ {
"id": "6cce",
"name": "Delete mpls pop action",
"category": [
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
index 0d319f1d01db..f8ea6f5fa8e9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
@@ -349,6 +349,281 @@
]
},
{
+ "id": "1762",
+ "name": "Add pedit action with RAW_OP offset u8 clear value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 clear",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "bcee",
+ "name": "Add pedit action with RAW_OP offset u8 retain value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 set 0x11 retain 0x0f",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 01000000 mask f0ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "e89f",
+ "name": "Add pedit action with RAW_OP offset u16 retain value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u16 set 0x1122 retain 0xff00",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 11000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c282",
+ "name": "Add pedit action with RAW_OP offset u32 clear value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u32 clear",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c422",
+ "name": "Add pedit action with RAW_OP offset u16 invert value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u16 invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 12: val ffff0000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "d3d3",
+ "name": "Add pedit action with RAW_OP offset u32 invert value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u32 invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 12: val ffffffff mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "57e5",
+ "name": "Add pedit action with RAW_OP offset u8 preserve value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 preserve",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "99e0",
+ "name": "Add pedit action with RAW_OP offset u16 preserve value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u16 preserve",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "1892",
+ "name": "Add pedit action with RAW_OP offset u32 preserve value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u32 preserve",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 1.*key #0.*at 0: val 00000000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "4b60",
+ "name": "Add pedit action with RAW_OP negative offset u16/u32 set value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset -14 u16 set 0x0000 munge offset -12 u32 set 0x00000100 munge offset -8 u32 set 0x0aaf0100 munge offset -4 u32 set 0x0008eb06 pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+:.*pedit.*keys 4.*key #0.*at -16: val 00000000 mask ffff0000.*key #1.*at -12: val 00000100 mask 00000000.*key #2.*at -8: val 0aaf0100 mask 00000000.*key #3.*at -4: val 0008eb06 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "a5a7",
+ "name": "Add pedit action with LAYERED_OP eth set src",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth src set 11:22:33:44:55:66",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 2.*key #0 at eth\\+4: val 00001122 mask ffff0000.*key #1 at eth\\+8: val 33445566 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "86d4",
"name": "Add pedit action with LAYERED_OP eth set src & dst",
"category": [
@@ -374,6 +649,31 @@
]
},
{
+ "id": "f8a9",
+ "name": "Add pedit action with LAYERED_OP eth set dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth dst set 11:22:33:44:55:66",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 2.*key #0 at eth\\+0: val 11223344 mask 00000000.*key #1 at eth\\+4: val 55660000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "c715",
"name": "Add pedit action with LAYERED_OP eth set src (INVALID)",
"category": [
@@ -399,6 +699,31 @@
]
},
{
+ "id": "8131",
+ "name": "Add pedit action with LAYERED_OP eth set dst (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth dst set %e:11:m2:33:x4:-5",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "ba22",
"name": "Add pedit action with LAYERED_OP eth type set/clear sequence",
"category": [
@@ -424,6 +749,179 @@
]
},
{
+ "id": "dec4",
+ "name": "Add pedit action with LAYERED_OP eth set type (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth type set 0xabcdef",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*key #0 at eth+12: val ",
+ "matchCount": "0",
+ "teardown": []
+ },
+ {
+ "id": "ab06",
+ "name": "Add pedit action with LAYERED_OP eth add type",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth type add 0x1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*key #0 at eth\\+12: add 00010000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "918d",
+ "name": "Add pedit action with LAYERED_OP eth invert src",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth src invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 2.*key #0 at eth\\+4: val 0000ff00 mask ffff0000.*key #1 at eth\\+8: val 00000000 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "a8d4",
+ "name": "Add pedit action with LAYERED_OP eth invert dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth dst invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 2.*key #0 at eth\\+0: val ff000000 mask 00000000.*key #1 at eth\\+4: val 00000000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "ee13",
+ "name": "Add pedit action with LAYERED_OP eth invert type",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth type invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*key #0 at eth\\+12: val ffff0000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "7588",
+ "name": "Add pedit action with LAYERED_OP ip set src",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip src set 1.1.1.1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*key #0 at 12: val 01010101 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "0fa7",
+ "name": "Add pedit action with LAYERED_OP ip set dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip dst set 2.2.2.2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*key #0 at 16: val 02020202 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "5810",
"name": "Add pedit action with LAYERED_OP ip set src & dst",
"category": [
@@ -599,6 +1097,206 @@
]
},
{
+ "id": "cc8a",
+ "name": "Add pedit action with LAYERED_OP ip set tos",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip tos set 0x4 continue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action continue keys 1.*key #0 at 0: val 00040000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "7a17",
+ "name": "Add pedit action with LAYERED_OP ip set precedence",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip precedence set 3 jump 2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action jump 2 keys 1.*key #0 at 0: val 00030000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c3b6",
+ "name": "Add pedit action with LAYERED_OP ip add tos",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip tos add 0x1 pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*key #0 at ipv4\\+0: add 00010000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "43d3",
+ "name": "Add pedit action with LAYERED_OP ip add precedence",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip precedence add 0x1 pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pipe keys 1.*key #0 at ipv4\\+0: add 00010000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "438e",
+ "name": "Add pedit action with LAYERED_OP ip clear tos",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip tos clear continue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action continue keys 1.*key #0 at 0: val 00000000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "6b1b",
+ "name": "Add pedit action with LAYERED_OP ip clear precedence",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip precedence clear jump 2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action jump 2 keys 1.*key #0 at 0: val 00000000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "824a",
+ "name": "Add pedit action with LAYERED_OP ip invert tos",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip tos invert pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pipe keys 1.*key #0 at 0: val 00ff0000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "106f",
+ "name": "Add pedit action with LAYERED_OP ip invert precedence",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip precedence invert reclassify",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action reclassify keys 1.*key #0 at 0: val 00ff0000 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "6829",
"name": "Add pedit action with LAYERED_OP beyond ip set dport & sport",
"category": [
@@ -674,6 +1372,56 @@
]
},
{
+ "id": "815c",
+ "name": "Add pedit action with LAYERED_OP ip6 set src",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip6 src set 2001:0db8:0:f101::1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 4.*key #0 at ipv6\\+8: val 20010db8 mask 00000000.*key #1 at ipv6\\+12: val 0000f101 mask 00000000.*key #2 at ipv6\\+16: val 00000000 mask 00000000.*key #3 at ipv6\\+20: val 00000001 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "4dae",
+ "name": "Add pedit action with LAYERED_OP ip6 set dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip6 dst set 2001:0db8:0:f101::1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 4.*key #0 at ipv6\\+24: val 20010db8 mask 00000000.*key #1 at ipv6\\+28: val 0000f101 mask 00000000.*key #2 at ipv6\\+32: val 00000000 mask 00000000.*key #3 at ipv6\\+36: val 00000001 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
"id": "fc1f",
"name": "Add pedit action with LAYERED_OP ip6 set src & dst",
"category": [
@@ -950,5 +1698,4 @@
"$TC actions flush action pedit"
]
}
-
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 28453a445fdb..fbeb9197697d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -909,5 +909,29 @@
"teardown": [
"$TC actions flush action tunnel_key"
]
+ },
+ {
+ "id": "0cd2",
+ "name": "Add tunnel_key set action with no_percpu flag",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 no_percpu",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*no_percpu",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index 6503b1ce091f..41d783254b08 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -807,5 +807,29 @@
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "1a3d",
+ "name": "Add vlan pop action with no_percpu flag",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action vlan pop no_percpu",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action vlan",
+ "matchPattern": "action order [0-9]+: vlan.*pop.*no_percpu",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action vlan"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
new file mode 100644
index 000000000000..76ae03a64506
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
@@ -0,0 +1,325 @@
+[
+ {
+ "id": "7a92",
+ "name": "Add basic filter with cmp ematch u8/link layer and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 0 mask 0xff gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "2e8a",
+ "name": "Add basic filter with cmp ematch u8/link layer with trans flag and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff trans gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 0 mask 0xff trans gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4d9f",
+ "name": "Add basic filter with cmp ematch u16/link layer and a single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u16 at 0 layer 0 mask 0xff00 lt 3)' action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u16 at 0 layer 0 mask 0xff00 lt 3\\).*action.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4943",
+ "name": "Add basic filter with cmp ematch u32/link layer and miltiple actions",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u32 at 4 layer link mask 0xff00ff00 eq 3)' action skbedit mark 7 pipe action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u32 at 4 layer 0 mask 0xff00ff00 eq 3\\).*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7559",
+ "name": "Add basic filter with cmp ematch u8/network layer and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xab protocol ip prio 11 basic match 'cmp(u8 at 0 layer 1 mask 0xff gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xab prio 11 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 11 basic.*handle 0xab flowid 1:1.*cmp\\(u8 at 0 layer 1 mask 0xff gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "aff4",
+ "name": "Add basic filter with cmp ematch u8/network layer with trans flag and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xab protocol ip prio 11 basic match 'cmp(u8 at 0 layer 1 mask 0xff trans gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xab prio 11 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 11 basic.*handle 0xab flowid 1:1.*cmp\\(u8 at 0 layer 1 mask 0xff trans gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "c732",
+ "name": "Add basic filter with cmp ematch u16/network layer and a single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0x100 protocol ip prio 100 basic match 'cmp(u16 at 0 layer network mask 0xff00 lt 3)' action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0x100 prio 100 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 100 basic.*handle 0x100.*cmp\\(u16 at 0 layer 1 mask 0xff00 lt 3\\).*action.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "32d8",
+ "name": "Add basic filter with cmp ematch u32/network layer and miltiple actions",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0x112233 protocol ip prio 7 basic match 'cmp(u32 at 4 layer network mask 0xff00ff00 eq 3)' action skbedit mark 7 pipe action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0x112233 prio 7 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 7 basic.*handle 0x112233.*cmp\\(u32 at 4 layer 1 mask 0xff00ff00 eq 3\\).*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6f5e",
+ "name": "Add basic filter with cmp ematch u8/transport layer and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer transport mask 0xff gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 2 mask 0xff gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "0752",
+ "name": "Add basic filter with cmp ematch u8/transport layer with trans flag and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer transport mask 0xff trans gt 10)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*cmp\\(u8 at 0 layer 2 mask 0xff trans gt 10\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7e07",
+ "name": "Add basic filter with cmp ematch u16/transport layer and a single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u16 at 0 layer 2 mask 0xff00 lt 3)' action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u16 at 0 layer 2 mask 0xff00 lt 3\\).*action.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "62d7",
+ "name": "Add basic filter with cmp ematch u32/transport layer and miltiple actions",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u32 at 4 layer transport mask 0xff00ff00 eq 3)' action skbedit mark 7 pipe action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u32 at 4 layer 2 mask 0xff00ff00 eq 3\\).*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "304b",
+ "name": "Add basic filter with NOT cmp ematch rule and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'not cmp(u8 at 0 layer link mask 0xff eq 3)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*NOT cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8ecb",
+ "name": "Add basic filter with two ANDed cmp ematch rules and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) and cmp(u16 at 8 layer link mask 0x00ff gt 7)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*AND cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b1ad",
+ "name": "Add basic filter with two ORed cmp ematch rules and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) or cmp(u16 at 8 layer link mask 0x00ff gt 7)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*OR cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4600",
+ "name": "Add basic filter with two ANDed cmp ematch rules and one ORed ematch rule and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) and cmp(u16 at 8 layer link mask 0x00ff gt 7) or cmp(u32 at 4 layer network mask 0xa0a0 lt 3)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*AND cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*OR cmp\\(u32 at 4 layer 1 mask 0xa0a0 lt 3\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "bc59",
+ "name": "Add basic filter with two ANDed cmp ematch rules and one NOT ORed ematch rule and single action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'cmp(u8 at 0 layer link mask 0xff eq 3) and cmp(u16 at 8 layer link mask 0x00ff gt 7) or not cmp(u32 at 4 layer network mask 0xa0a0 lt 3)' action gact drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1.*cmp\\(u8 at 0 layer 0 mask 0xff eq 3\\).*AND cmp\\(u16 at 8 layer 0 mask 0xff gt 7\\).*OR NOT cmp\\(u32 at 4 layer 1 mask 0xa0a0 lt 3\\).*action.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 9534dc2bc929..7f9a8a8c31da 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm selftests
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/')
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
LDLIBS = -lrt
@@ -16,8 +18,11 @@ TEST_GEN_FILES += on-fault-limit
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
+
+ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
+endif
TEST_PROGS := run_vmtests
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 951c507a27f7..a692ea828317 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -58,6 +58,14 @@ else
exit 1
fi
+#filter 64bit architectures
+ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64"
+if [ -z $ARCH ]; then
+ ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'`
+fi
+VADDR64=0
+echo "$ARCH64STR" | grep $ARCH && VADDR64=1
+
mkdir $mnt
mount -t hugetlbfs none $mnt
@@ -189,6 +197,7 @@ else
echo "[PASS]"
fi
+if [ $VADDR64 -ne 0 ]; then
echo "-----------------------------"
echo "running virtual_address_range"
echo "-----------------------------"
@@ -210,6 +219,7 @@ if [ $? -ne 0 ]; then
else
echo "[PASS]"
fi
+fi # VADDR64
echo "------------------------------------"
echo "running vmalloc stability smoke test"
diff --git a/tools/testing/selftests/x86/ioperm.c b/tools/testing/selftests/x86/ioperm.c
index 01de41c1b725..57ec5e99edb9 100644
--- a/tools/testing/selftests/x86/ioperm.c
+++ b/tools/testing/selftests/x86/ioperm.c
@@ -131,6 +131,17 @@ int main(void)
printf("[RUN]\tchild: check that we inherited permissions\n");
expect_ok(0x80);
expect_gp(0xed);
+ printf("[RUN]\tchild: Extend permissions to 0x81\n");
+ if (ioperm(0x81, 1, 1) != 0) {
+ printf("[FAIL]\tioperm(0x81, 1, 1) failed (%d)", errno);
+ return 1;
+ }
+ printf("[RUN]\tchild: Drop permissions to 0x80\n");
+ if (ioperm(0x80, 1, 0) != 0) {
+ printf("[FAIL]\tioperm(0x80, 1, 0) failed (%d)", errno);
+ return 1;
+ }
+ expect_gp(0x80);
return 0;
} else {
int status;
@@ -146,8 +157,11 @@ int main(void)
}
}
- /* Test the capability checks. */
+ /* Verify that the child dropping 0x80 did not affect the parent */
+ printf("\tVerify that unsharing the bitmap worked\n");
+ expect_ok(0x80);
+ /* Test the capability checks. */
printf("\tDrop privileges\n");
if (setresuid(1, 1, 1) != 0) {
printf("[WARN]\tDropping privileges failed\n");
diff --git a/tools/testing/selftests/x86/iopl.c b/tools/testing/selftests/x86/iopl.c
index 6aa27f34644c..bab2f6e06b63 100644
--- a/tools/testing/selftests/x86/iopl.c
+++ b/tools/testing/selftests/x86/iopl.c
@@ -35,6 +35,16 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
}
+static void clearhandler(int sig)
+{
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
static jmp_buf jmpbuf;
static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
@@ -42,25 +52,128 @@ static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
siglongjmp(jmpbuf, 1);
}
+static bool try_outb(unsigned short port)
+{
+ sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
+ if (sigsetjmp(jmpbuf, 1) != 0) {
+ return false;
+ } else {
+ asm volatile ("outb %%al, %w[port]"
+ : : [port] "Nd" (port), "a" (0));
+ return true;
+ }
+ clearhandler(SIGSEGV);
+}
+
+static void expect_ok_outb(unsigned short port)
+{
+ if (!try_outb(port)) {
+ printf("[FAIL]\toutb to 0x%02hx failed\n", port);
+ exit(1);
+ }
+
+ printf("[OK]\toutb to 0x%02hx worked\n", port);
+}
+
+static void expect_gp_outb(unsigned short port)
+{
+ if (try_outb(port)) {
+ printf("[FAIL]\toutb to 0x%02hx worked\n", port);
+ nerrs++;
+ }
+
+ printf("[OK]\toutb to 0x%02hx failed\n", port);
+}
+
+static bool try_cli(void)
+{
+ sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
+ if (sigsetjmp(jmpbuf, 1) != 0) {
+ return false;
+ } else {
+ asm volatile ("cli");
+ return true;
+ }
+ clearhandler(SIGSEGV);
+}
+
+static bool try_sti(void)
+{
+ sethandler(SIGSEGV, sigsegv, SA_RESETHAND);
+ if (sigsetjmp(jmpbuf, 1) != 0) {
+ return false;
+ } else {
+ asm volatile ("sti");
+ return true;
+ }
+ clearhandler(SIGSEGV);
+}
+
+static void expect_gp_sti(void)
+{
+ if (try_sti()) {
+ printf("[FAIL]\tSTI worked\n");
+ nerrs++;
+ } else {
+ printf("[OK]\tSTI faulted\n");
+ }
+}
+
+static void expect_gp_cli(void)
+{
+ if (try_cli()) {
+ printf("[FAIL]\tCLI worked\n");
+ nerrs++;
+ } else {
+ printf("[OK]\tCLI faulted\n");
+ }
+}
+
int main(void)
{
cpu_set_t cpuset;
+
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
err(1, "sched_setaffinity to CPU 0");
/* Probe for iopl support. Note that iopl(0) works even as nonroot. */
- if (iopl(3) != 0) {
+ switch(iopl(3)) {
+ case 0:
+ break;
+ case -ENOSYS:
+ printf("[OK]\tiopl() nor supported\n");
+ return 0;
+ default:
printf("[OK]\tiopl(3) failed (%d) -- try running as root\n",
errno);
return 0;
}
- /* Restore our original state prior to starting the test. */
+ /* Make sure that CLI/STI are blocked even with IOPL level 3 */
+ expect_gp_cli();
+ expect_gp_sti();
+ expect_ok_outb(0x80);
+
+ /* Establish an I/O bitmap to test the restore */
+ if (ioperm(0x80, 1, 1) != 0)
+ err(1, "ioperm(0x80, 1, 1) failed\n");
+
+ /* Restore our original state prior to starting the fork test. */
if (iopl(0) != 0)
err(1, "iopl(0)");
+ /*
+ * Verify that IOPL emulation is disabled and the I/O bitmap still
+ * works.
+ */
+ expect_ok_outb(0x80);
+ expect_gp_outb(0xed);
+ /* Drop the I/O bitmap */
+ if (ioperm(0x80, 1, 0) != 0)
+ err(1, "ioperm(0x80, 1, 0) failed\n");
+
pid_t child = fork();
if (child == -1)
err(1, "fork");
@@ -90,14 +203,9 @@ int main(void)
printf("[RUN]\tparent: write to 0x80 (should fail)\n");
- sethandler(SIGSEGV, sigsegv, 0);
- if (sigsetjmp(jmpbuf, 1) != 0) {
- printf("[OK]\twrite was denied\n");
- } else {
- asm volatile ("outb %%al, $0x80" : : "a" (0));
- printf("[FAIL]\twrite was allowed\n");
- nerrs++;
- }
+ expect_gp_outb(0x80);
+ expect_gp_cli();
+ expect_gp_sti();
/* Test the capability checks. */
printf("\tiopl(3)\n");
@@ -133,4 +241,3 @@ int main(void)
done:
return nerrs ? 1 : 0;
}
-
diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
index 3c3a022654f3..6da0ac3f0135 100644
--- a/tools/testing/selftests/x86/mov_ss_trap.c
+++ b/tools/testing/selftests/x86/mov_ss_trap.c
@@ -257,7 +257,8 @@ int main()
err(1, "sigaltstack");
sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
nr = SYS_getpid;
- asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr)
+ /* Clear EBP first to make sure we segfault cleanly. */
+ asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr)
: [ss] "m" (ss) : "flags", "rcx"
#ifdef __x86_64__
, "r11"
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 3e49a7873f3e..57c4f67f16ef 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -451,6 +451,19 @@ static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
ctx->uc_mcontext.gregs[REG_CX] = 0;
+#ifdef __i386__
+ /*
+ * Make sure the kernel doesn't inadvertently use DS or ES-relative
+ * accesses in a region where user DS or ES is loaded.
+ *
+ * Skip this for 64-bit builds because long mode doesn't care about
+ * DS and ES and skipping it increases test coverage a little bit,
+ * since 64-bit kernels can still run the 32-bit build.
+ */
+ ctx->uc_mcontext.gregs[REG_DS] = 0;
+ ctx->uc_mcontext.gregs[REG_ES] = 0;
+#endif
+
memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index 2813aa821c82..d1d8ba2a4a40 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -57,7 +57,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
}
value = atoi(status);
-
+ close(fd);
return value;
}
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index e2bb5bd60227..f182b2380345 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -80,7 +80,7 @@ static inline bool userspace_irqchip(struct kvm *kvm)
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
{
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_ABS_HARD);
}
static void soft_timer_cancel(struct hrtimer *hrt)
@@ -697,11 +697,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
ptimer->cntvoff = 0;
- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
timer->bg_timer.function = kvm_bg_timer_expire;
- hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+ hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
vtimer->hrtimer.function = kvm_hrtimer_expire;
ptimer->hrtimer.function = kvm_hrtimer_expire;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 86c6aa1cb58e..12e0280291ce 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -40,6 +40,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sections.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_pmu.h>
+#include <kvm/arm_psci.h>
+
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
#endif
@@ -98,6 +102,26 @@ int kvm_arch_check_processor_compat(void)
return 0;
}
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ int r;
+
+ if (cap->flags)
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_ARM_NISV_TO_USER:
+ r = 0;
+ kvm->arch.return_nisv_io_abort_to_user = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
/**
* kvm_arch_init_vm - initializes a VM data structure
@@ -197,6 +221,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
+ case KVM_CAP_ARM_NISV_TO_USER:
+ case KVM_CAP_ARM_INJECT_EXT_DABT:
r = 1;
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
@@ -322,20 +348,24 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
/*
* If we're about to block (most likely because we've just hit a
* WFI), we need to sync back the state of the GIC CPU interface
- * so that we have the lastest PMR and group enables. This ensures
+ * so that we have the latest PMR and group enables. This ensures
* that kvm_arch_vcpu_runnable has up-to-date data to decide
* whether we have pending interrupts.
+ *
+ * For the same reason, we want to tell GICv4 that we need
+ * doorbells to be signalled, should an interrupt become pending.
*/
preempt_disable();
kvm_vgic_vmcr_sync(vcpu);
+ vgic_v4_put(vcpu, true);
preempt_enable();
-
- kvm_vgic_v4_enable_doorbell(vcpu);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- kvm_vgic_v4_disable_doorbell(vcpu);
+ preempt_disable();
+ vgic_v4_load(vcpu);
+ preempt_enable();
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -351,6 +381,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_arm_reset_debug_ptr(vcpu);
+ kvm_arm_pvtime_vcpu_init(&vcpu->arch);
+
return kvm_vgic_vcpu_init(vcpu);
}
@@ -380,11 +412,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_vcpu_load_sysregs(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
kvm_vcpu_pmu_restore_guest(vcpu);
+ if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
+ kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
if (single_task_running())
- vcpu_clear_wfe_traps(vcpu);
+ vcpu_clear_wfx_traps(vcpu);
else
- vcpu_set_wfe_traps(vcpu);
+ vcpu_set_wfx_traps(vcpu);
vcpu_ptrauth_setup_lazy(vcpu);
}
@@ -645,6 +679,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
* that a VCPU sees new virtual interrupts.
*/
kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
+
+ if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
+ kvm_update_stolen_time(vcpu);
}
}
diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c
new file mode 100644
index 000000000000..550dfa3e53cd
--- /dev/null
+++ b/virt/kvm/arm/hypercalls.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Arm Ltd.
+
+#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_emulate.h>
+
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+{
+ u32 func_id = smccc_get_function(vcpu);
+ long val = SMCCC_RET_NOT_SUPPORTED;
+ u32 feature;
+ gpa_t gpa;
+
+ switch (func_id) {
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = ARM_SMCCC_VERSION_1_1;
+ break;
+ case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
+ feature = smccc_get_arg1(vcpu);
+ switch (feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ switch (kvm_arm_harden_branch_predictor()) {
+ case KVM_BP_HARDEN_UNKNOWN:
+ break;
+ case KVM_BP_HARDEN_WA_NEEDED:
+ val = SMCCC_RET_SUCCESS;
+ break;
+ case KVM_BP_HARDEN_NOT_REQUIRED:
+ val = SMCCC_RET_NOT_REQUIRED;
+ break;
+ }
+ break;
+ case ARM_SMCCC_ARCH_WORKAROUND_2:
+ switch (kvm_arm_have_ssbd()) {
+ case KVM_SSBD_FORCE_DISABLE:
+ case KVM_SSBD_UNKNOWN:
+ break;
+ case KVM_SSBD_KERNEL:
+ val = SMCCC_RET_SUCCESS;
+ break;
+ case KVM_SSBD_FORCE_ENABLE:
+ case KVM_SSBD_MITIGATED:
+ val = SMCCC_RET_NOT_REQUIRED;
+ break;
+ }
+ break;
+ case ARM_SMCCC_HV_PV_TIME_FEATURES:
+ val = SMCCC_RET_SUCCESS;
+ break;
+ }
+ break;
+ case ARM_SMCCC_HV_PV_TIME_FEATURES:
+ val = kvm_hypercall_pv_features(vcpu);
+ break;
+ case ARM_SMCCC_HV_PV_TIME_ST:
+ gpa = kvm_init_stolen_time(vcpu);
+ if (gpa != GPA_INVALID)
+ val = gpa;
+ break;
+ default:
+ return kvm_psci_call(vcpu);
+ }
+
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+}
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index 6af5c91337f2..70d3b449692c 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -167,7 +167,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (ret)
return ret;
} else {
- kvm_err("load/store instruction decoding not implemented\n");
+ if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
+ run->exit_reason = KVM_EXIT_ARM_NISV;
+ run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
+ run->arm_nisv.fault_ipa = fault_ipa;
+ return 0;
+ }
+
+ kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
return -ENOSYS;
}
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 87927f7e1ee7..17e2bdd4b76f 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -15,6 +15,7 @@
#include <asm/kvm_host.h>
#include <kvm/arm_psci.h>
+#include <kvm/arm_hypercalls.h>
/*
* This is an implementation of the Power State Coordination Interface
@@ -23,38 +24,6 @@
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
-static u32 smccc_get_function(struct kvm_vcpu *vcpu)
-{
- return vcpu_get_reg(vcpu, 0);
-}
-
-static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
-{
- return vcpu_get_reg(vcpu, 1);
-}
-
-static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
-{
- return vcpu_get_reg(vcpu, 2);
-}
-
-static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
-{
- return vcpu_get_reg(vcpu, 3);
-}
-
-static void smccc_set_retval(struct kvm_vcpu *vcpu,
- unsigned long a0,
- unsigned long a1,
- unsigned long a2,
- unsigned long a3)
-{
- vcpu_set_reg(vcpu, 0, a0);
- vcpu_set_reg(vcpu, 1, a1);
- vcpu_set_reg(vcpu, 2, a2);
- vcpu_set_reg(vcpu, 3, a3);
-}
-
static unsigned long psci_affinity_mask(unsigned long affinity_level)
{
if (affinity_level <= 3)
@@ -373,7 +342,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
* Errors:
* -EINVAL: Unrecognized PSCI function
*/
-static int kvm_psci_call(struct kvm_vcpu *vcpu)
+int kvm_psci_call(struct kvm_vcpu *vcpu)
{
switch (kvm_psci_version(vcpu, vcpu->kvm)) {
case KVM_ARM_PSCI_1_0:
@@ -387,55 +356,6 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
};
}
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
-{
- u32 func_id = smccc_get_function(vcpu);
- u32 val = SMCCC_RET_NOT_SUPPORTED;
- u32 feature;
-
- switch (func_id) {
- case ARM_SMCCC_VERSION_FUNC_ID:
- val = ARM_SMCCC_VERSION_1_1;
- break;
- case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
- feature = smccc_get_arg1(vcpu);
- switch(feature) {
- case ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
- break;
- case KVM_BP_HARDEN_WA_NEEDED:
- val = SMCCC_RET_SUCCESS;
- break;
- case KVM_BP_HARDEN_NOT_REQUIRED:
- val = SMCCC_RET_NOT_REQUIRED;
- break;
- }
- break;
- case ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- case KVM_SSBD_UNKNOWN:
- break;
- case KVM_SSBD_KERNEL:
- val = SMCCC_RET_SUCCESS;
- break;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
- val = SMCCC_RET_NOT_REQUIRED;
- break;
- }
- break;
- }
- break;
- default:
- return kvm_psci_call(vcpu);
- }
-
- smccc_set_retval(vcpu, val, 0, 0, 0);
- return 1;
-}
-
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
{
return 3; /* PSCI version and two workaround registers */
diff --git a/virt/kvm/arm/pvtime.c b/virt/kvm/arm/pvtime.c
new file mode 100644
index 000000000000..1e0f4c284888
--- /dev/null
+++ b/virt/kvm/arm/pvtime.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Arm Ltd.
+
+#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+#include <asm/pvclock-abi.h>
+
+#include <kvm/arm_hypercalls.h>
+
+void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 steal;
+ __le64 steal_le;
+ u64 offset;
+ int idx;
+ u64 base = vcpu->arch.steal.base;
+
+ if (base == GPA_INVALID)
+ return;
+
+ /* Let's do the local bookkeeping */
+ steal = vcpu->arch.steal.steal;
+ steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
+ vcpu->arch.steal.last_steal = current->sched_info.run_delay;
+ vcpu->arch.steal.steal = steal;
+
+ steal_le = cpu_to_le64(steal);
+ idx = srcu_read_lock(&kvm->srcu);
+ offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ kvm_put_guest(kvm, base + offset, steal_le, u64);
+ srcu_read_unlock(&kvm->srcu, idx);
+}
+
+long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
+{
+ u32 feature = smccc_get_arg1(vcpu);
+ long val = SMCCC_RET_NOT_SUPPORTED;
+
+ switch (feature) {
+ case ARM_SMCCC_HV_PV_TIME_FEATURES:
+ case ARM_SMCCC_HV_PV_TIME_ST:
+ val = SMCCC_RET_SUCCESS;
+ break;
+ }
+
+ return val;
+}
+
+gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
+{
+ struct pvclock_vcpu_stolen_time init_values = {};
+ struct kvm *kvm = vcpu->kvm;
+ u64 base = vcpu->arch.steal.base;
+ int idx;
+
+ if (base == GPA_INVALID)
+ return base;
+
+ /*
+ * Start counting stolen time from the time the guest requests
+ * the feature enabled.
+ */
+ vcpu->arch.steal.steal = 0;
+ vcpu->arch.steal.last_steal = current->sched_info.run_delay;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return base;
+}
+
+int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+ u64 ipa;
+ int ret = 0;
+ int idx;
+
+ if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ return -ENXIO;
+
+ if (get_user(ipa, user))
+ return -EFAULT;
+ if (!IS_ALIGNED(ipa, 64))
+ return -EINVAL;
+ if (vcpu->arch.steal.base != GPA_INVALID)
+ return -EEXIST;
+
+ /* Check the address is in a valid memslot */
+ idx = srcu_read_lock(&kvm->srcu);
+ if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
+ ret = -EINVAL;
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (!ret)
+ vcpu->arch.steal.base = ipa;
+
+ return ret;
+}
+
+int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 __user *user = (u64 __user *)attr->addr;
+ u64 ipa;
+
+ if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ return -ENXIO;
+
+ ipa = vcpu->arch.steal.base;
+
+ if (put_user(ipa, user))
+ return -EFAULT;
+ return 0;
+}
+
+int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_PVTIME_IPA:
+ return 0;
+ }
+ return -ENXIO;
+}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 6f50c429196d..b3c5de48064c 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -203,6 +203,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
+ atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
/*
* Enable and configure all SGIs to be edge-triggered and
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 2be6b66b3856..98c7360d9fb7 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -360,7 +360,10 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
if (ret)
return ret;
+ if (map.vpe)
+ atomic_dec(&map.vpe->vlpi_count);
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ atomic_inc(&map.vpe->vlpi_count);
ret = its_map_vlpi(irq->host_irq, &map);
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 8d69f007dd0c..f45635a6f0ec 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -357,14 +357,14 @@ retry:
}
/**
- * vgic_its_save_pending_tables - Save the pending tables into guest RAM
+ * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
* kvm lock and all vcpu lock must be held
*/
int vgic_v3_save_pending_tables(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- int last_byte_offset = -1;
struct vgic_irq *irq;
+ gpa_t last_ptr = ~(gpa_t)0;
int ret;
u8 val;
@@ -384,11 +384,11 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
bit_nr = irq->intid % BITS_PER_BYTE;
ptr = pendbase + byte_offset;
- if (byte_offset != last_byte_offset) {
+ if (ptr != last_ptr) {
ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
- last_byte_offset = byte_offset;
+ last_ptr = ptr;
}
stored = val & (1U << bit_nr);
@@ -664,6 +664,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
if (has_vhe())
__vgic_v3_activate_traps(vcpu);
+
+ WARN_ON(vgic_v4_load(vcpu));
}
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
@@ -676,6 +678,8 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
void vgic_v3_put(struct kvm_vcpu *vcpu)
{
+ WARN_ON(vgic_v4_put(vcpu, false));
+
vgic_v3_vmcr_sync(vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 477af6aebb97..46f875589c47 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -85,6 +85,10 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
{
struct kvm_vcpu *vcpu = info;
+ /* We got the message, no need to fire again */
+ if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
+ disable_irq_nosync(irq);
+
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
@@ -192,20 +196,30 @@ void vgic_v4_teardown(struct kvm *kvm)
its_vm->vpes = NULL;
}
-int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
+int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{
- if (!vgic_supports_direct_msis(vcpu->kvm))
+ struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ struct irq_desc *desc = irq_to_desc(vpe->irq);
+
+ if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0;
- return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
+ /*
+ * If blocking, a doorbell is required. Undo the nested
+ * disable_irq() calls...
+ */
+ while (need_db && irqd_irq_disabled(&desc->irq_data))
+ enable_irq(vpe->irq);
+
+ return its_schedule_vpe(vpe, false);
}
-int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
+int vgic_v4_load(struct kvm_vcpu *vcpu)
{
- int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
int err;
- if (!vgic_supports_direct_msis(vcpu->kvm))
+ if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
return 0;
/*
@@ -214,11 +228,14 @@ int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
* doc in drivers/irqchip/irq-gic-v4.c to understand how this
* turns into a VMOVP command at the ITS level.
*/
- err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
+ err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
if (err)
return err;
- err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
+ /* Disabled the doorbell, as we're about to enter the guest */
+ disable_irq_nosync(vpe->irq);
+
+ err = its_schedule_vpe(vpe, true);
if (err)
return err;
@@ -226,9 +243,7 @@ int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
* Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending.
*/
- err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
-
- return err;
+ return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
}
static struct vgic_its *vgic_get_its(struct kvm *kvm,
@@ -266,7 +281,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
mutex_lock(&its->its_lock);
- /* Perform then actual DevID/EventID -> LPI translation. */
+ /* Perform the actual DevID/EventID -> LPI translation. */
ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
irq_entry->msi.data, &irq);
if (ret)
@@ -294,6 +309,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
irq->hw = true;
irq->host_irq = virq;
+ atomic_inc(&map.vpe->vlpi_count);
out:
mutex_unlock(&its->its_lock);
@@ -327,6 +343,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
WARN_ON(!(irq->hw && irq->host_irq == virq));
if (irq->hw) {
+ atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
irq->hw = false;
ret = its_unmap_vlpi(virq);
}
@@ -335,21 +352,3 @@ out:
mutex_unlock(&its->its_lock);
return ret;
}
-
-void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
-{
- if (vgic_supports_direct_msis(vcpu->kvm)) {
- int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
- if (irq)
- enable_irq(irq);
- }
-}
-
-void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
-{
- if (vgic_supports_direct_msis(vcpu->kvm)) {
- int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
- if (irq)
- disable_irq(irq);
- }
-}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 45a870cb63f5..99b02ca730a8 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -857,8 +857,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- WARN_ON(vgic_v4_sync_hwstate(vcpu));
-
/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
@@ -882,8 +880,6 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
- WARN_ON(vgic_v4_flush_hwstate(vcpu));
-
/*
* If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 83066a81b16a..c7fefd6b1c80 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -316,7 +316,5 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
-int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
-int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
#endif
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 8ffd07e2a160..00c747dbc82e 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -110,14 +110,11 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct page *page;
- int ret;
- ret = -ENOMEM;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
- goto out_err;
+ return -ENOMEM;
- ret = 0;
kvm->coalesced_mmio_ring = page_address(page);
/*
@@ -128,8 +125,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
spin_lock_init(&kvm->ring_lock);
INIT_LIST_HEAD(&kvm->coalesced_zones);
-out_err:
- return ret;
+ return 0;
}
void kvm_coalesced_mmio_free(struct kvm *kvm)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d6f0696d98ef..00268290dcbd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -50,6 +50,7 @@
#include <linux/bsearch.h>
#include <linux/io.h>
#include <linux/lockdep.h>
+#include <linux/kthread.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -121,9 +122,22 @@ static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#define KVM_COMPAT(c) .compat_ioctl = (c)
#else
+/*
+ * For architectures that don't implement a compat infrastructure,
+ * adopt a double line of defense:
+ * - Prevent a compat task from opening /dev/kvm
+ * - If the open has been done by a 64bit task, and the KVM fd
+ * passed to a compat task, let the ioctls fail.
+ */
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg) { return -EINVAL; }
-#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
+
+static int kvm_no_compat_open(struct inode *inode, struct file *file)
+{
+ return is_compat_task() ? -ENODEV : 0;
+}
+#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
+ .open = kvm_no_compat_open
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
@@ -149,10 +163,30 @@ __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
return 0;
}
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+{
+ /*
+ * The metadata used by is_zone_device_page() to determine whether or
+ * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
+ * the device has been pinned, e.g. by get_user_pages(). WARN if the
+ * page_count() is zero to help detect bad usage of this helper.
+ */
+ if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
+ return false;
+
+ return is_zone_device_page(pfn_to_page(pfn));
+}
+
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
{
+ /*
+ * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
+ * perspective they are "normal" pages, albeit with slightly different
+ * usage rules.
+ */
if (pfn_valid(pfn))
- return PageReserved(pfn_to_page(pfn));
+ return PageReserved(pfn_to_page(pfn)) &&
+ !kvm_is_zone_device_pfn(pfn);
return true;
}
@@ -625,6 +659,23 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
return 0;
}
+/*
+ * Called after the VM is otherwise initialized, but just before adding it to
+ * the vm_list.
+ */
+int __weak kvm_arch_post_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+/*
+ * Called just after removing the VM from the vm_list, but before doing any
+ * other destruction.
+ */
+void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+}
+
static struct kvm *kvm_create_vm(unsigned long type)
{
struct kvm *kvm = kvm_arch_alloc_vm();
@@ -645,6 +696,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+ if (init_srcu_struct(&kvm->srcu))
+ goto out_err_no_srcu;
+ if (init_srcu_struct(&kvm->irq_srcu))
+ goto out_err_no_irq_srcu;
+
+ refcount_set(&kvm->users_count, 1);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_memslots *slots = kvm_alloc_memslots();
@@ -662,7 +719,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_arch_destroy_vm;
}
- refcount_set(&kvm->users_count, 1);
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_arch_destroy_vm;
@@ -675,13 +731,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
- if (init_srcu_struct(&kvm->srcu))
- goto out_err_no_srcu;
- if (init_srcu_struct(&kvm->irq_srcu))
- goto out_err_no_irq_srcu;
-
r = kvm_init_mmu_notifier(kvm);
if (r)
+ goto out_err_no_mmu_notifier;
+
+ r = kvm_arch_post_init_vm(kvm);
+ if (r)
goto out_err;
mutex_lock(&kvm_lock);
@@ -693,19 +748,24 @@ static struct kvm *kvm_create_vm(unsigned long type)
return kvm;
out_err:
- cleanup_srcu_struct(&kvm->irq_srcu);
-out_err_no_irq_srcu:
- cleanup_srcu_struct(&kvm->srcu);
-out_err_no_srcu:
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ if (kvm->mmu_notifier.ops)
+ mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
+#endif
+out_err_no_mmu_notifier:
hardware_disable_all();
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
- WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
out_err_no_arch_destroy_vm:
+ WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm_get_bus(kvm, i));
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
+ cleanup_srcu_struct(&kvm->irq_srcu);
+out_err_no_irq_srcu:
+ cleanup_srcu_struct(&kvm->srcu);
+out_err_no_srcu:
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
return ERR_PTR(r);
@@ -737,6 +797,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
mutex_lock(&kvm_lock);
list_del(&kvm->vm_list);
mutex_unlock(&kvm_lock);
+ kvm_arch_pre_destroy_vm(kvm);
+
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@ -776,6 +838,18 @@ void kvm_put_kvm(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);
+/*
+ * Used to put a reference that was taken on behalf of an object associated
+ * with a user-visible file descriptor, e.g. a vcpu or device, if installation
+ * of the new file descriptor fails and the reference cannot be transferred to
+ * its final owner. In such cases, the caller is still actively using @kvm and
+ * will fail miserably if the refcount unexpectedly hits zero.
+ */
+void kvm_put_kvm_no_destroy(struct kvm *kvm)
+{
+ WARN_ON(refcount_dec_and_test(&kvm->users_count));
+}
+EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
@@ -1857,7 +1931,7 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn)) {
+ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
SetPageDirty(page);
@@ -1867,7 +1941,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn))
+ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
@@ -2677,17 +2751,18 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto unlock_vcpu_destroy;
}
- BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
+ vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
+ BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0) {
- kvm_put_kvm(kvm);
+ kvm_put_kvm_no_destroy(kvm);
goto unlock_vcpu_destroy;
}
- kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+ kvm->vcpus[vcpu->vcpu_idx] = vcpu;
/*
* Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
@@ -3053,14 +3128,14 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
return filp->private_data;
}
-static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
+static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
#ifdef CONFIG_KVM_MPIC
[KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
[KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
#endif
};
-int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
+int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
{
if (type >= ARRAY_SIZE(kvm_device_ops_table))
return -ENOSPC;
@@ -3081,7 +3156,7 @@ void kvm_unregister_device_ops(u32 type)
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{
- struct kvm_device_ops *ops = NULL;
+ const struct kvm_device_ops *ops = NULL;
struct kvm_device *dev;
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
int type;
@@ -3121,7 +3196,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
kvm_get_kvm(kvm);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
- kvm_put_kvm(kvm);
+ kvm_put_kvm_no_destroy(kvm);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
mutex_unlock(&kvm->lock);
@@ -4279,12 +4354,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_arch_hardware_setup();
if (r < 0)
- goto out_free_0a;
+ goto out_free_1;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_processor_compat, &r, 1);
if (r < 0)
- goto out_free_1;
+ goto out_free_2;
}
r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
@@ -4341,9 +4416,8 @@ out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
out_free_2:
-out_free_1:
kvm_arch_hardware_unsetup();
-out_free_0a:
+out_free_1:
free_cpumask_var(cpus_hardware_enabled);
out_free_0:
kvm_irqfd_exit();
@@ -4371,3 +4445,86 @@ void kvm_exit(void)
kvm_vfio_ops_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
+
+struct kvm_vm_worker_thread_context {
+ struct kvm *kvm;
+ struct task_struct *parent;
+ struct completion init_done;
+ kvm_vm_thread_fn_t thread_fn;
+ uintptr_t data;
+ int err;
+};
+
+static int kvm_vm_worker_thread(void *context)
+{
+ /*
+ * The init_context is allocated on the stack of the parent thread, so
+ * we have to locally copy anything that is needed beyond initialization
+ */
+ struct kvm_vm_worker_thread_context *init_context = context;
+ struct kvm *kvm = init_context->kvm;
+ kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
+ uintptr_t data = init_context->data;
+ int err;
+
+ err = kthread_park(current);
+ /* kthread_park(current) is never supposed to return an error */
+ WARN_ON(err != 0);
+ if (err)
+ goto init_complete;
+
+ err = cgroup_attach_task_all(init_context->parent, current);
+ if (err) {
+ kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
+ __func__, err);
+ goto init_complete;
+ }
+
+ set_user_nice(current, task_nice(init_context->parent));
+
+init_complete:
+ init_context->err = err;
+ complete(&init_context->init_done);
+ init_context = NULL;
+
+ if (err)
+ return err;
+
+ /* Wait to be woken up by the spawner before proceeding. */
+ kthread_parkme();
+
+ if (!kthread_should_stop())
+ err = thread_fn(kvm, data);
+
+ return err;
+}
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr)
+{
+ struct kvm_vm_worker_thread_context init_context = {};
+ struct task_struct *thread;
+
+ *thread_ptr = NULL;
+ init_context.kvm = kvm;
+ init_context.parent = current;
+ init_context.thread_fn = thread_fn;
+ init_context.data = data;
+ init_completion(&init_context.init_done);
+
+ thread = kthread_run(kvm_vm_worker_thread, &init_context,
+ "%s-%d", name, task_pid_nr(current));
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
+ /* kthread_run is never supposed to return NULL */
+ WARN_ON(thread == NULL);
+
+ wait_for_completion(&init_context.init_done);
+
+ if (!init_context.err)
+ *thread_ptr = thread;
+
+ return init_context.err;
+}